vectorvein 0.2.82__tar.gz → 0.2.84__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. {vectorvein-0.2.82 → vectorvein-0.2.84}/PKG-INFO +1 -1
  2. {vectorvein-0.2.82 → vectorvein-0.2.84}/pyproject.toml +1 -1
  3. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/settings/__init__.py +10 -21
  4. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/types/defaults.py +64 -0
  5. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/types/llm_parameters.py +11 -3
  6. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/types/settings.py +1 -0
  7. {vectorvein-0.2.82 → vectorvein-0.2.84}/README.md +0 -0
  8. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/__init__.py +0 -0
  9. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/api/__init__.py +0 -0
  10. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/api/client.py +0 -0
  11. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/api/exceptions.py +0 -0
  12. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/api/models.py +0 -0
  13. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/chat_clients/__init__.py +0 -0
  14. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/chat_clients/anthropic_client.py +0 -0
  15. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/chat_clients/baichuan_client.py +0 -0
  16. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/chat_clients/base_client.py +0 -0
  17. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/chat_clients/deepseek_client.py +0 -0
  18. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/chat_clients/ernie_client.py +0 -0
  19. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/chat_clients/gemini_client.py +0 -0
  20. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/chat_clients/groq_client.py +0 -0
  21. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/chat_clients/local_client.py +0 -0
  22. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/chat_clients/minimax_client.py +0 -0
  23. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/chat_clients/mistral_client.py +0 -0
  24. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/chat_clients/moonshot_client.py +0 -0
  25. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/chat_clients/openai_client.py +0 -0
  26. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/chat_clients/openai_compatible_client.py +0 -0
  27. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/chat_clients/py.typed +0 -0
  28. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/chat_clients/qwen_client.py +0 -0
  29. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/chat_clients/stepfun_client.py +0 -0
  30. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/chat_clients/utils.py +0 -0
  31. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/chat_clients/xai_client.py +0 -0
  32. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/chat_clients/yi_client.py +0 -0
  33. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/chat_clients/zhipuai_client.py +0 -0
  34. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/py.typed +0 -0
  35. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/server/token_server.py +0 -0
  36. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/settings/py.typed +0 -0
  37. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/types/__init__.py +0 -0
  38. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/types/enums.py +0 -0
  39. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/types/exception.py +0 -0
  40. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/types/py.typed +0 -0
  41. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/utilities/media_processing.py +0 -0
  42. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/utilities/rate_limiter.py +0 -0
  43. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/utilities/retry.py +0 -0
  44. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/workflow/graph/edge.py +0 -0
  45. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/workflow/graph/node.py +0 -0
  46. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/workflow/graph/port.py +0 -0
  47. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/workflow/graph/workflow.py +0 -0
  48. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/workflow/nodes/__init__.py +0 -0
  49. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/workflow/nodes/audio_generation.py +0 -0
  50. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/workflow/nodes/control_flows.py +0 -0
  51. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/workflow/nodes/file_processing.py +0 -0
  52. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/workflow/nodes/image_generation.py +0 -0
  53. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/workflow/nodes/llms.py +0 -0
  54. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/workflow/nodes/media_editing.py +0 -0
  55. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/workflow/nodes/media_processing.py +0 -0
  56. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/workflow/nodes/output.py +0 -0
  57. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/workflow/nodes/relational_db.py +0 -0
  58. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/workflow/nodes/text_processing.py +0 -0
  59. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/workflow/nodes/tools.py +0 -0
  60. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/workflow/nodes/triggers.py +0 -0
  61. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/workflow/nodes/vector_db.py +0 -0
  62. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/workflow/nodes/video_generation.py +0 -0
  63. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/workflow/nodes/web_crawlers.py +0 -0
  64. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/workflow/utils/analyse.py +0 -0
  65. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/workflow/utils/check.py +0 -0
  66. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/workflow/utils/json_to_code.py +0 -0
  67. {vectorvein-0.2.82 → vectorvein-0.2.84}/src/vectorvein/workflow/utils/layout.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vectorvein
3
- Version: 0.2.82
3
+ Version: 0.2.84
4
4
  Summary: VectorVein Python SDK
5
5
  Author-Email: Anderson <andersonby@163.com>
6
6
  License: MIT
@@ -17,7 +17,7 @@ description = "VectorVein Python SDK"
17
17
  name = "vectorvein"
18
18
  readme = "README.md"
19
19
  requires-python = ">=3.10"
20
- version = "0.2.82"
20
+ version = "0.2.84"
21
21
 
22
22
  [project.license]
23
23
  text = "MIT"
@@ -60,12 +60,8 @@ class Backends(BaseModel):
60
60
 
61
61
 
62
62
  class Settings(BaseModel):
63
- VERSION: Optional[str] = Field(
64
- default="2", description="Configuration version. If provided, will use the corresponding format."
65
- )
66
- endpoints: List[EndpointSetting] = Field(
67
- default_factory=list, description="Available endpoints for the LLM service."
68
- )
63
+ VERSION: Optional[str] = Field(default="2", description="Configuration version. If provided, will use the corresponding format.")
64
+ endpoints: List[EndpointSetting] = Field(default_factory=list, description="Available endpoints for the LLM service.")
69
65
  token_server: Optional[Server] = Field(default=None, description="Token server address. Format: host:port")
70
66
  rate_limit: Optional[RateLimitConfig] = Field(default=None, description="Rate limit settings.")
71
67
 
@@ -73,32 +69,22 @@ class Settings(BaseModel):
73
69
  backends: Optional[Backends] = Field(default=None, description="All model backends in one place (V2 format).")
74
70
 
75
71
  # V1 format: each model backend config
76
- anthropic: Optional[BackendSettings] = Field(
77
- default_factory=BackendSettings, description="Anthropic models settings."
78
- )
79
- deepseek: Optional[BackendSettings] = Field(
80
- default_factory=BackendSettings, description="Deepseek models settings."
81
- )
72
+ anthropic: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="Anthropic models settings.")
73
+ deepseek: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="Deepseek models settings.")
82
74
  gemini: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="Gemini models settings.")
83
75
  groq: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="Groq models settings.")
84
76
  local: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="Local models settings.")
85
77
  minimax: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="Minimax models settings.")
86
78
  mistral: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="Mistral models settings.")
87
- moonshot: Optional[BackendSettings] = Field(
88
- default_factory=BackendSettings, description="Moonshot models settings."
89
- )
79
+ moonshot: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="Moonshot models settings.")
90
80
  openai: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="OpenAI models settings.")
91
81
  qwen: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="Qwen models settings.")
92
82
  yi: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="Yi models settings.")
93
83
  zhipuai: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="Zhipuai models settings.")
94
- baichuan: Optional[BackendSettings] = Field(
95
- default_factory=BackendSettings, description="Baichuan models settings."
96
- )
84
+ baichuan: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="Baichuan models settings.")
97
85
  stepfun: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="StepFun models settings.")
98
86
  xai: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="XAI models settings.")
99
- ernie: Optional[BackendSettings] = Field(
100
- default_factory=BackendSettings, description="Baidu Ernie models settings."
101
- )
87
+ ernie: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="Baidu Ernie models settings.")
102
88
 
103
89
  def __init__(self, **data):
104
90
  model_types = {
@@ -143,6 +129,9 @@ class Settings(BaseModel):
143
129
  user_models = backends[model_type].get("models", {})
144
130
  model_settings = BackendSettings()
145
131
  model_settings.update_models(default_models, user_models)
132
+ default_endpoint = backends[model_type].get("default_endpoint", None)
133
+ if default_endpoint is not None:
134
+ model_settings.default_endpoint = default_endpoint
146
135
  backends[model_type] = model_settings
147
136
  else:
148
137
  backends[model_type] = BackendSettings(models=default_models)
@@ -339,6 +339,70 @@ QWEN_MODELS: Final[Dict[str, ModelSettingDict]] = {
339
339
  "response_format_available": False,
340
340
  "native_multimodal": True,
341
341
  },
342
+ "qwen3-235b-a22b": {
343
+ "id": "qwen3-235b-a22b",
344
+ "context_length": 131072,
345
+ "max_output_tokens": 8192,
346
+ "function_call_available": True,
347
+ "response_format_available": True,
348
+ "native_multimodal": False,
349
+ },
350
+ "qwen3-32b": {
351
+ "id": "qwen3-32b",
352
+ "context_length": 131072,
353
+ "max_output_tokens": 8192,
354
+ "function_call_available": True,
355
+ "response_format_available": True,
356
+ "native_multimodal": False,
357
+ },
358
+ "qwen3-30b-a3b": {
359
+ "id": "qwen3-30b-a3b",
360
+ "context_length": 131072,
361
+ "max_output_tokens": 8192,
362
+ "function_call_available": True,
363
+ "response_format_available": True,
364
+ "native_multimodal": False,
365
+ },
366
+ "qwen3-14b": {
367
+ "id": "qwen3-14b",
368
+ "context_length": 131072,
369
+ "max_output_tokens": 8192,
370
+ "function_call_available": True,
371
+ "response_format_available": True,
372
+ "native_multimodal": False,
373
+ },
374
+ "qwen3-8b": {
375
+ "id": "qwen3-8b",
376
+ "context_length": 131072,
377
+ "max_output_tokens": 8192,
378
+ "function_call_available": True,
379
+ "response_format_available": True,
380
+ "native_multimodal": False,
381
+ },
382
+ "qwen3-4b": {
383
+ "id": "qwen3-4b",
384
+ "context_length": 131072,
385
+ "max_output_tokens": 8192,
386
+ "function_call_available": True,
387
+ "response_format_available": True,
388
+ "native_multimodal": False,
389
+ },
390
+ "qwen3-1.7b": {
391
+ "id": "qwen3-1.7b",
392
+ "context_length": 32768,
393
+ "max_output_tokens": 8192,
394
+ "function_call_available": True,
395
+ "response_format_available": True,
396
+ "native_multimodal": False,
397
+ },
398
+ "qwen3-0.6b": {
399
+ "id": "qwen3-0.6b",
400
+ "context_length": 32768,
401
+ "max_output_tokens": 8192,
402
+ "function_call_available": True,
403
+ "response_format_available": True,
404
+ "native_multimodal": False,
405
+ },
342
406
  }
343
407
 
344
408
  # Yi models
@@ -101,9 +101,7 @@ class EndpointSetting(BaseModel):
101
101
 
102
102
  class ModelSetting(BaseModel):
103
103
  id: str = Field(..., description="The id of the model.")
104
- endpoints: List[Union[str, EndpointOptionDict]] = Field(
105
- default_factory=list, description="Available endpoints for the model."
106
- )
104
+ endpoints: List[Union[str, EndpointOptionDict]] = Field(default_factory=list, description="Available endpoints for the model.")
107
105
  function_call_available: bool = Field(False, description="Indicates if function call is available.")
108
106
  response_format_available: bool = Field(False, description="Indicates if response format is available.")
109
107
  native_multimodal: bool = Field(False, description="Indicates if the model is a native multimodal model.")
@@ -113,6 +111,16 @@ class ModelSetting(BaseModel):
113
111
 
114
112
  class BackendSettings(BaseModel):
115
113
  models: Dict[str, ModelSetting] = Field(default_factory=dict)
114
+ default_endpoint: Optional[str] = Field(default_factory=lambda: None, description="The default endpoint for the model.")
115
+
116
+ def get_model_setting(self, model_name: str) -> ModelSetting:
117
+ if model_name in self.models:
118
+ model_setting = self.models[model_name]
119
+ if len(model_setting.endpoints) == 0 and self.default_endpoint is not None:
120
+ model_setting.endpoints = [self.default_endpoint]
121
+ return model_setting
122
+ else:
123
+ raise ValueError(f"Model {model_name} not found in {self.models}")
116
124
 
117
125
  def update_models(self, default_models: Dict[str, Dict], input_models: Dict[str, Dict]):
118
126
  updated_models = {}
@@ -61,6 +61,7 @@ class BackendSettingsDict(TypedDict):
61
61
  """TypedDict representing the BackendSettings structure."""
62
62
 
63
63
  models: Dict[str, ModelConfigDict]
64
+ default_endpoint: NotRequired[Optional[str]]
64
65
 
65
66
 
66
67
  class EndpointSettingDict(TypedDict):
File without changes