vectorvein 0.2.13__tar.gz → 0.2.15__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (63) hide show
  1. {vectorvein-0.2.13 → vectorvein-0.2.15}/PKG-INFO +1 -1
  2. {vectorvein-0.2.13 → vectorvein-0.2.15}/pyproject.toml +1 -1
  3. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/settings/__init__.py +38 -4
  4. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/types/__init__.py +10 -0
  5. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/types/defaults.py +24 -56
  6. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/types/settings.py +16 -5
  7. {vectorvein-0.2.13 → vectorvein-0.2.15}/README.md +0 -0
  8. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/__init__.py +0 -0
  9. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/api/__init__.py +0 -0
  10. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/api/client.py +0 -0
  11. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/api/exceptions.py +0 -0
  12. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/api/models.py +0 -0
  13. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/chat_clients/__init__.py +0 -0
  14. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/chat_clients/anthropic_client.py +0 -0
  15. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/chat_clients/baichuan_client.py +0 -0
  16. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/chat_clients/base_client.py +0 -0
  17. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/chat_clients/deepseek_client.py +0 -0
  18. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/chat_clients/gemini_client.py +0 -0
  19. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/chat_clients/groq_client.py +0 -0
  20. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/chat_clients/local_client.py +0 -0
  21. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/chat_clients/minimax_client.py +0 -0
  22. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/chat_clients/mistral_client.py +0 -0
  23. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/chat_clients/moonshot_client.py +0 -0
  24. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/chat_clients/openai_client.py +0 -0
  25. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/chat_clients/openai_compatible_client.py +0 -0
  26. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/chat_clients/py.typed +0 -0
  27. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/chat_clients/qwen_client.py +0 -0
  28. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/chat_clients/stepfun_client.py +0 -0
  29. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/chat_clients/utils.py +0 -0
  30. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/chat_clients/xai_client.py +0 -0
  31. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/chat_clients/yi_client.py +0 -0
  32. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/chat_clients/zhipuai_client.py +0 -0
  33. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/py.typed +0 -0
  34. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/server/token_server.py +0 -0
  35. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/settings/py.typed +0 -0
  36. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/types/enums.py +0 -0
  37. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/types/exception.py +0 -0
  38. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/types/llm_parameters.py +0 -0
  39. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/types/py.typed +0 -0
  40. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/utilities/media_processing.py +0 -0
  41. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/utilities/rate_limiter.py +0 -0
  42. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/utilities/retry.py +0 -0
  43. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/workflow/graph/edge.py +0 -0
  44. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/workflow/graph/node.py +0 -0
  45. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/workflow/graph/port.py +0 -0
  46. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/workflow/graph/workflow.py +0 -0
  47. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/workflow/nodes/__init__.py +0 -0
  48. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/workflow/nodes/audio_generation.py +0 -0
  49. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/workflow/nodes/control_flows.py +0 -0
  50. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/workflow/nodes/file_processing.py +0 -0
  51. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/workflow/nodes/image_generation.py +0 -0
  52. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/workflow/nodes/llms.py +0 -0
  53. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/workflow/nodes/media_editing.py +0 -0
  54. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/workflow/nodes/media_processing.py +0 -0
  55. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/workflow/nodes/output.py +0 -0
  56. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/workflow/nodes/relational_db.py +0 -0
  57. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/workflow/nodes/text_processing.py +0 -0
  58. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/workflow/nodes/tools.py +0 -0
  59. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/workflow/nodes/triggers.py +0 -0
  60. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/workflow/nodes/vector_db.py +0 -0
  61. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/workflow/nodes/video_generation.py +0 -0
  62. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/workflow/nodes/web_crawlers.py +0 -0
  63. {vectorvein-0.2.13 → vectorvein-0.2.15}/src/vectorvein/workflow/utils/json_to_code.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vectorvein
3
- Version: 0.2.13
3
+ Version: 0.2.15
4
4
  Summary: VectorVein python SDK
5
5
  Author-Email: Anderson <andersonby@163.com>
6
6
  License: MIT
@@ -18,7 +18,7 @@ description = "VectorVein python SDK"
18
18
  name = "vectorvein"
19
19
  readme = "README.md"
20
20
  requires-python = ">=3.10"
21
- version = "0.2.13"
21
+ version = "0.2.15"
22
22
 
23
23
  [project.license]
24
24
  text = "MIT"
@@ -1,6 +1,7 @@
1
1
  # @Author: Bi Ying
2
2
  # @Date: 2024-07-27 00:30:56
3
3
  import warnings
4
+ from copy import deepcopy
4
5
  from typing import List, Optional, Literal
5
6
 
6
7
  from pydantic import BaseModel, Field
@@ -59,7 +60,7 @@ class Backends(BaseModel):
59
60
 
60
61
  class Settings(BaseModel):
61
62
  VERSION: Optional[str] = Field(
62
- default=None, description="Configuration version. If provided, will use the corresponding format."
63
+ default="2", description="Configuration version. If provided, will use the corresponding format."
63
64
  )
64
65
  endpoints: List[EndpointSetting] = Field(
65
66
  default_factory=list, description="Available endpoints for the LLM service."
@@ -114,8 +115,14 @@ class Settings(BaseModel):
114
115
  "xai": defs.XAI_MODELS,
115
116
  }
116
117
 
118
+ data = deepcopy(data)
119
+
117
120
  version = data.get("VERSION")
118
121
 
122
+ if len(data) == 0:
123
+ version = "2"
124
+ data["backends"] = {}
125
+
119
126
  # If V2 format, model configs are in the backends dictionary
120
127
  if version == "2":
121
128
  if "backends" not in data:
@@ -124,13 +131,12 @@ class Settings(BaseModel):
124
131
  backends = data["backends"]
125
132
  else:
126
133
  backends = data
127
- if len(data) > 0:
128
- warnings.warn("You're using vectorvein's deprecated V1 format. Please use V2 format.")
134
+ warnings.warn("You're using vectorvein's deprecated V1 format. Please use V2 format.")
129
135
 
130
136
  for model_type, default_models in model_types.items():
131
137
  if model_type in backends:
132
- model_settings = BackendSettings()
133
138
  user_models = backends[model_type].get("models", {})
139
+ model_settings = BackendSettings()
134
140
  model_settings.update_models(default_models, user_models)
135
141
  backends[model_type] = model_settings
136
142
  else:
@@ -149,6 +155,10 @@ class Settings(BaseModel):
149
155
  def load(self, settings_dict: SettingsDict):
150
156
  self.__init__(**settings_dict)
151
157
 
158
+ @classmethod
159
+ def load_from_dict(cls, settings_dict: SettingsDict):
160
+ return cls(**settings_dict)
161
+
152
162
  def get_endpoint(self, endpoint_id: str) -> EndpointSetting:
153
163
  for endpoint in self.endpoints:
154
164
  if endpoint.id == endpoint_id:
@@ -165,5 +175,29 @@ class Settings(BaseModel):
165
175
  # Compatible with VERSION 1 format
166
176
  return getattr(self, backend_name)
167
177
 
178
+ def export(self):
179
+ if self.VERSION == "2":
180
+ return super().model_dump(
181
+ exclude={
182
+ "anthropic",
183
+ "deepseek",
184
+ "gemini",
185
+ "groq",
186
+ "local",
187
+ "minimax",
188
+ "mistral",
189
+ "moonshot",
190
+ "openai",
191
+ "qwen",
192
+ "yi",
193
+ "zhipuai",
194
+ "baichuan",
195
+ "stepfun",
196
+ "xai",
197
+ }
198
+ )
199
+ else:
200
+ return super().model_dump(exclude={"backends"})
201
+
168
202
 
169
203
  settings = Settings()
@@ -61,8 +61,13 @@ from .settings import (
61
61
  BackendSettingsDict,
62
62
  EndpointSettingDict,
63
63
  SettingsDict,
64
+ SettingsV1Dict,
65
+ SettingsV2Dict,
64
66
  )
65
67
 
68
+ from anthropic.types import ThinkingConfigParam, ThinkingConfigEnabledParam
69
+ from openai.types.chat.completion_create_params import ResponseFormat
70
+
66
71
 
67
72
  __all__ = [
68
73
  "CONTEXT_LENGTH_CONTROL",
@@ -126,4 +131,9 @@ __all__ = [
126
131
  "BackendSettingsDict",
127
132
  "EndpointSettingDict",
128
133
  "SettingsDict",
134
+ "SettingsV1Dict",
135
+ "SettingsV2Dict",
136
+ "ThinkingConfigParam",
137
+ "ThinkingConfigEnabledParam",
138
+ "ResponseFormat",
129
139
  ]
@@ -176,62 +176,6 @@ GROQ_MODELS: Final[Dict[str, Dict[str, Any]]] = {
176
176
  # Qwen models
177
177
  QWEN_DEFAULT_MODEL: Final[str] = "qwen2.5-72b-instruct"
178
178
  QWEN_MODELS: Final[Dict[str, Dict[str, Any]]] = {
179
- "qwen1.5-1.8b-chat": {
180
- "id": "qwen1.5-1.8b-chat",
181
- "context_length": 30000,
182
- "max_output_tokens": 4096,
183
- "function_call_available": False,
184
- "response_format_available": True,
185
- },
186
- "qwen1.5-4b-chat": {
187
- "id": "qwen1.5-4b-chat",
188
- "context_length": 30000,
189
- "max_output_tokens": 4096,
190
- "function_call_available": False,
191
- "response_format_available": True,
192
- },
193
- "qwen1.5-7b-chat": {
194
- "id": "qwen1.5-7b-chat",
195
- "context_length": 30000,
196
- "max_output_tokens": 4096,
197
- "function_call_available": False,
198
- "response_format_available": True,
199
- },
200
- "qwen1.5-14b-chat": {
201
- "id": "qwen1.5-14b-chat",
202
- "context_length": 30000,
203
- "max_output_tokens": 4096,
204
- "function_call_available": False,
205
- "response_format_available": True,
206
- },
207
- "qwen1.5-32b-chat": {
208
- "id": "qwen1.5-32b-chat",
209
- "context_length": 30000,
210
- "max_output_tokens": 4096,
211
- "function_call_available": False,
212
- "response_format_available": True,
213
- },
214
- "qwen1.5-72b-chat": {
215
- "id": "qwen1.5-72b-chat",
216
- "context_length": 30000,
217
- "max_output_tokens": 4096,
218
- "function_call_available": False,
219
- "response_format_available": True,
220
- },
221
- "qwen1.5-110b-chat": {
222
- "id": "qwen1.5-110b-chat",
223
- "context_length": 30000,
224
- "max_output_tokens": 4096,
225
- "function_call_available": False,
226
- "response_format_available": True,
227
- },
228
- "qwen2-72b-instruct": {
229
- "id": "qwen2-72b-instruct",
230
- "context_length": 30000,
231
- "max_output_tokens": 4096,
232
- "function_call_available": False,
233
- "response_format_available": True,
234
- },
235
179
  "qwen2.5-7b-instruct": {
236
180
  "id": "qwen2.5-7b-instruct",
237
181
  "context_length": 131072,
@@ -282,6 +226,30 @@ QWEN_MODELS: Final[Dict[str, Dict[str, Any]]] = {
282
226
  "response_format_available": False,
283
227
  "native_multimodal": True,
284
228
  },
229
+ "qwen2.5-vl-72b-instruct": {
230
+ "id": "qwen2.5-vl-72b-instruct",
231
+ "context_length": 131072,
232
+ "max_output_tokens": 8192,
233
+ "function_call_available": False,
234
+ "response_format_available": False,
235
+ "native_multimodal": True,
236
+ },
237
+ "qwen2.5-vl-7b-instruct": {
238
+ "id": "qwen2.5-vl-7b-instruct",
239
+ "context_length": 131072,
240
+ "max_output_tokens": 8192,
241
+ "function_call_available": False,
242
+ "response_format_available": False,
243
+ "native_multimodal": True,
244
+ },
245
+ "qwen2.5-vl-3b-instruct": {
246
+ "id": "qwen2.5-vl-3b-instruct",
247
+ "context_length": 131072,
248
+ "max_output_tokens": 8192,
249
+ "function_call_available": False,
250
+ "response_format_available": False,
251
+ "native_multimodal": True,
252
+ },
285
253
  "qwen-max": {
286
254
  "id": "qwen-max",
287
255
  "context_length": 8096,
@@ -101,17 +101,13 @@ class BackendsDict(TypedDict):
101
101
  xai: NotRequired[BackendSettingsDict]
102
102
 
103
103
 
104
- class SettingsDict(TypedDict):
104
+ class SettingsV1Dict(TypedDict):
105
105
  """TypedDict representing the expected structure of the settings dictionary."""
106
106
 
107
- VERSION: NotRequired[str]
108
107
  endpoints: List[EndpointSettingDict]
109
108
  token_server: NotRequired[ServerDict]
110
109
  rate_limit: NotRequired[RateLimitConfigDict]
111
110
 
112
- # V2 format: all model backend configs in a single dictionary
113
- backends: NotRequired[BackendsDict]
114
-
115
111
  # V1 format: each model backend config
116
112
  anthropic: NotRequired[BackendSettingsDict]
117
113
  deepseek: NotRequired[BackendSettingsDict]
@@ -128,3 +124,18 @@ class SettingsDict(TypedDict):
128
124
  baichuan: NotRequired[BackendSettingsDict]
129
125
  stepfun: NotRequired[BackendSettingsDict]
130
126
  xai: NotRequired[BackendSettingsDict]
127
+
128
+
129
+ class SettingsV2Dict(TypedDict):
130
+ """TypedDict representing the expected structure of the settings dictionary."""
131
+
132
+ VERSION: NotRequired[str]
133
+ endpoints: List[EndpointSettingDict]
134
+ token_server: NotRequired[ServerDict]
135
+ rate_limit: NotRequired[RateLimitConfigDict]
136
+
137
+ # V2 format: all model backend configs in a single dictionary
138
+ backends: NotRequired[BackendsDict]
139
+
140
+
141
+ SettingsDict = Union[SettingsV1Dict, SettingsV2Dict]
File without changes