vectorvein 0.3.0__py3-none-any.whl → 0.3.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. vectorvein/api/client.py +81 -103
  2. vectorvein/api/exceptions.py +1 -3
  3. vectorvein/api/models.py +11 -11
  4. vectorvein/chat_clients/anthropic_client.py +157 -169
  5. vectorvein/chat_clients/base_client.py +257 -198
  6. vectorvein/chat_clients/openai_compatible_client.py +150 -161
  7. vectorvein/chat_clients/utils.py +44 -24
  8. vectorvein/server/token_server.py +1 -1
  9. vectorvein/settings/__init__.py +27 -27
  10. vectorvein/types/defaults.py +18 -18
  11. vectorvein/types/llm_parameters.py +38 -34
  12. vectorvein/types/settings.py +10 -10
  13. vectorvein/utilities/media_processing.py +1 -1
  14. vectorvein/utilities/rate_limiter.py +5 -6
  15. vectorvein/utilities/retry.py +6 -5
  16. vectorvein/workflow/graph/edge.py +3 -3
  17. vectorvein/workflow/graph/node.py +14 -26
  18. vectorvein/workflow/graph/port.py +40 -39
  19. vectorvein/workflow/graph/workflow.py +13 -25
  20. vectorvein/workflow/nodes/audio_generation.py +5 -7
  21. vectorvein/workflow/nodes/control_flows.py +7 -9
  22. vectorvein/workflow/nodes/file_processing.py +4 -6
  23. vectorvein/workflow/nodes/image_generation.py +20 -22
  24. vectorvein/workflow/nodes/llms.py +13 -15
  25. vectorvein/workflow/nodes/media_editing.py +26 -40
  26. vectorvein/workflow/nodes/media_processing.py +19 -21
  27. vectorvein/workflow/nodes/output.py +10 -12
  28. vectorvein/workflow/nodes/relational_db.py +3 -5
  29. vectorvein/workflow/nodes/text_processing.py +8 -10
  30. vectorvein/workflow/nodes/tools.py +8 -10
  31. vectorvein/workflow/nodes/triggers.py +1 -3
  32. vectorvein/workflow/nodes/vector_db.py +3 -5
  33. vectorvein/workflow/nodes/video_generation.py +4 -6
  34. vectorvein/workflow/nodes/web_crawlers.py +4 -6
  35. vectorvein/workflow/utils/analyse.py +5 -13
  36. vectorvein/workflow/utils/check.py +6 -16
  37. vectorvein/workflow/utils/json_to_code.py +6 -14
  38. vectorvein/workflow/utils/layout.py +3 -5
  39. {vectorvein-0.3.0.dist-info → vectorvein-0.3.2.dist-info}/METADATA +1 -1
  40. vectorvein-0.3.2.dist-info/RECORD +68 -0
  41. vectorvein-0.3.0.dist-info/RECORD +0 -68
  42. {vectorvein-0.3.0.dist-info → vectorvein-0.3.2.dist-info}/WHEEL +0 -0
  43. {vectorvein-0.3.0.dist-info → vectorvein-0.3.2.dist-info}/entry_points.txt +0 -0
@@ -2,7 +2,7 @@
2
2
  # @Date: 2024-07-27 00:30:56
3
3
  import warnings
4
4
  from copy import deepcopy
5
- from typing import List, Optional, Literal
5
+ from typing import Literal
6
6
 
7
7
  from pydantic import BaseModel, Field
8
8
 
@@ -26,8 +26,8 @@ class RateLimitConfig(BaseModel):
26
26
  enabled: bool = False
27
27
 
28
28
  backend: Literal["memory", "redis", "diskcache"] = "memory"
29
- redis: Optional[RedisConfig] = Field(default=None)
30
- diskcache: Optional[DiskCacheConfig] = Field(default=None)
29
+ redis: RedisConfig | None = Field(default=None)
30
+ diskcache: DiskCacheConfig | None = Field(default=None)
31
31
  default_rpm: int = 60
32
32
  default_tpm: int = 1000000
33
33
 
@@ -35,7 +35,7 @@ class RateLimitConfig(BaseModel):
35
35
  class Server(BaseModel):
36
36
  host: str
37
37
  port: int
38
- url: Optional[str]
38
+ url: str | None
39
39
 
40
40
 
41
41
  class Backends(BaseModel):
@@ -60,31 +60,31 @@ class Backends(BaseModel):
60
60
 
61
61
 
62
62
  class Settings(BaseModel):
63
- VERSION: Optional[str] = Field(default="2", description="Configuration version. If provided, will use the corresponding format.")
64
- endpoints: List[EndpointSetting] = Field(default_factory=list, description="Available endpoints for the LLM service.")
65
- token_server: Optional[Server] = Field(default=None, description="Token server address. Format: host:port")
66
- rate_limit: Optional[RateLimitConfig] = Field(default=None, description="Rate limit settings.")
63
+ VERSION: str | None = Field(default="2", description="Configuration version. If provided, will use the corresponding format.")
64
+ endpoints: list[EndpointSetting] = Field(default_factory=list, description="Available endpoints for the LLM service.")
65
+ token_server: Server | None = Field(default=None, description="Token server address. Format: host:port")
66
+ rate_limit: RateLimitConfig | None = Field(default=None, description="Rate limit settings.")
67
67
 
68
68
  # V2 format: all model backend configs in a single dictionary
69
- backends: Optional[Backends] = Field(default=None, description="All model backends in one place (V2 format).")
69
+ backends: Backends | None = Field(default=None, description="All model backends in one place (V2 format).")
70
70
 
71
71
  # V1 format: each model backend config
72
- anthropic: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="Anthropic models settings.")
73
- deepseek: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="Deepseek models settings.")
74
- gemini: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="Gemini models settings.")
75
- groq: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="Groq models settings.")
76
- local: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="Local models settings.")
77
- minimax: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="Minimax models settings.")
78
- mistral: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="Mistral models settings.")
79
- moonshot: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="Moonshot models settings.")
80
- openai: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="OpenAI models settings.")
81
- qwen: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="Qwen models settings.")
82
- yi: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="Yi models settings.")
83
- zhipuai: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="Zhipuai models settings.")
84
- baichuan: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="Baichuan models settings.")
85
- stepfun: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="StepFun models settings.")
86
- xai: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="XAI models settings.")
87
- ernie: Optional[BackendSettings] = Field(default_factory=BackendSettings, description="Baidu Ernie models settings.")
72
+ anthropic: BackendSettings | None = Field(default_factory=BackendSettings, description="Anthropic models settings.")
73
+ deepseek: BackendSettings | None = Field(default_factory=BackendSettings, description="Deepseek models settings.")
74
+ gemini: BackendSettings | None = Field(default_factory=BackendSettings, description="Gemini models settings.")
75
+ groq: BackendSettings | None = Field(default_factory=BackendSettings, description="Groq models settings.")
76
+ local: BackendSettings | None = Field(default_factory=BackendSettings, description="Local models settings.")
77
+ minimax: BackendSettings | None = Field(default_factory=BackendSettings, description="Minimax models settings.")
78
+ mistral: BackendSettings | None = Field(default_factory=BackendSettings, description="Mistral models settings.")
79
+ moonshot: BackendSettings | None = Field(default_factory=BackendSettings, description="Moonshot models settings.")
80
+ openai: BackendSettings | None = Field(default_factory=BackendSettings, description="OpenAI models settings.")
81
+ qwen: BackendSettings | None = Field(default_factory=BackendSettings, description="Qwen models settings.")
82
+ yi: BackendSettings | None = Field(default_factory=BackendSettings, description="Yi models settings.")
83
+ zhipuai: BackendSettings | None = Field(default_factory=BackendSettings, description="Zhipuai models settings.")
84
+ baichuan: BackendSettings | None = Field(default_factory=BackendSettings, description="Baichuan models settings.")
85
+ stepfun: BackendSettings | None = Field(default_factory=BackendSettings, description="StepFun models settings.")
86
+ xai: BackendSettings | None = Field(default_factory=BackendSettings, description="XAI models settings.")
87
+ ernie: BackendSettings | None = Field(default_factory=BackendSettings, description="Baidu Ernie models settings.")
88
88
 
89
89
  def __init__(self, **data):
90
90
  model_types = {
@@ -122,7 +122,7 @@ class Settings(BaseModel):
122
122
  backends = data["backends"]
123
123
  else:
124
124
  backends = data
125
- warnings.warn("You're using vectorvein's deprecated V1 format. Please use V2 format.")
125
+ warnings.warn("You're using vectorvein's deprecated V1 format. Please use V2 format.", stacklevel=2)
126
126
 
127
127
  for model_type, default_models in model_types.items():
128
128
  if model_type in backends:
@@ -155,7 +155,7 @@ class Settings(BaseModel):
155
155
 
156
156
  super().__init__(**data)
157
157
 
158
- def load(self, settings: SettingsDict | "Settings"):
158
+ def load(self, settings: "SettingsDict | Settings"):
159
159
  if isinstance(settings, Settings):
160
160
  settings_dict = settings.export()
161
161
  else:
@@ -1,6 +1,6 @@
1
1
  # @Author: Bi Ying
2
2
  # @Date: 2024-07-27 00:02:34
3
- from typing import Final, Dict
3
+ from typing import Final
4
4
  from typing_extensions import TypedDict, NotRequired
5
5
 
6
6
  from .enums import ContextLengthControlType
@@ -25,7 +25,7 @@ class ModelSettingDict(TypedDict):
25
25
 
26
26
  # Moonshot models
27
27
  MOONSHOT_DEFAULT_MODEL: Final[str] = "kimi-latest"
28
- MOONSHOT_MODELS: Final[Dict[str, ModelSettingDict]] = {
28
+ MOONSHOT_MODELS: Final[dict[str, ModelSettingDict]] = {
29
29
  "moonshot-v1-8k": {
30
30
  "id": "moonshot-v1-8k",
31
31
  "context_length": 8192,
@@ -87,7 +87,7 @@ MOONSHOT_MODELS: Final[Dict[str, ModelSettingDict]] = {
87
87
 
88
88
  # Deepseek models
89
89
  DEEPSEEK_DEFAULT_MODEL: Final[str] = "deepseek-chat"
90
- DEEPSEEK_MODELS: Final[Dict[str, ModelSettingDict]] = {
90
+ DEEPSEEK_MODELS: Final[dict[str, ModelSettingDict]] = {
91
91
  "deepseek-chat": {
92
92
  "id": "deepseek-chat",
93
93
  "context_length": 64000,
@@ -108,7 +108,7 @@ DEEPSEEK_MODELS: Final[Dict[str, ModelSettingDict]] = {
108
108
 
109
109
  # Baichuan models
110
110
  BAICHUAN_DEFAULT_MODEL: Final[str] = "Baichuan3-Turbo"
111
- BAICHUAN_MODELS: Final[Dict[str, ModelSettingDict]] = {
111
+ BAICHUAN_MODELS: Final[dict[str, ModelSettingDict]] = {
112
112
  "Baichuan4": {
113
113
  "id": "Baichuan4",
114
114
  "context_length": 32768,
@@ -153,7 +153,7 @@ BAICHUAN_MODELS: Final[Dict[str, ModelSettingDict]] = {
153
153
 
154
154
  # Groq models
155
155
  GROQ_DEFAULT_MODEL: Final[str] = "llama3-70b-8192"
156
- GROQ_MODELS: Final[Dict[str, ModelSettingDict]] = {
156
+ GROQ_MODELS: Final[dict[str, ModelSettingDict]] = {
157
157
  "mixtral-8x7b-32768": {
158
158
  "id": "mixtral-8x7b-32768",
159
159
  "context_length": 32768,
@@ -225,7 +225,7 @@ GROQ_MODELS: Final[Dict[str, ModelSettingDict]] = {
225
225
 
226
226
  # Qwen models
227
227
  QWEN_DEFAULT_MODEL: Final[str] = "qwen2.5-72b-instruct"
228
- QWEN_MODELS: Final[Dict[str, ModelSettingDict]] = {
228
+ QWEN_MODELS: Final[dict[str, ModelSettingDict]] = {
229
229
  "qwen2.5-7b-instruct": {
230
230
  "id": "qwen2.5-7b-instruct",
231
231
  "context_length": 131072,
@@ -414,7 +414,7 @@ QWEN_MODELS: Final[Dict[str, ModelSettingDict]] = {
414
414
 
415
415
  # Yi models
416
416
  YI_DEFAULT_MODEL: Final[str] = "yi-lightning"
417
- YI_MODELS: Final[Dict[str, ModelSettingDict]] = {
417
+ YI_MODELS: Final[dict[str, ModelSettingDict]] = {
418
418
  "yi-lightning": {
419
419
  "id": "yi-lightning",
420
420
  "context_length": 16000,
@@ -435,7 +435,7 @@ YI_MODELS: Final[Dict[str, ModelSettingDict]] = {
435
435
 
436
436
  # ZhiPuAI models
437
437
  ZHIPUAI_DEFAULT_MODEL: Final[str] = "glm-4-air"
438
- ZHIPUAI_MODELS: Final[Dict[str, ModelSettingDict]] = {
438
+ ZHIPUAI_MODELS: Final[dict[str, ModelSettingDict]] = {
439
439
  "glm-3-turbo": {
440
440
  "id": "glm-3-turbo",
441
441
  "context_length": 128000,
@@ -568,7 +568,7 @@ ZHIPUAI_MODELS: Final[Dict[str, ModelSettingDict]] = {
568
568
 
569
569
  # Mistral models
570
570
  MISTRAL_DEFAULT_MODEL: Final[str] = "mistral-small"
571
- MISTRAL_MODELS: Final[Dict[str, ModelSettingDict]] = {
571
+ MISTRAL_MODELS: Final[dict[str, ModelSettingDict]] = {
572
572
  "open-mistral-7b": {
573
573
  "id": "open-mistral-7b",
574
574
  "context_length": 32000,
@@ -629,7 +629,7 @@ MISTRAL_MODELS: Final[Dict[str, ModelSettingDict]] = {
629
629
 
630
630
  # OpenAI models
631
631
  OPENAI_DEFAULT_MODEL: Final[str] = "gpt-4o"
632
- OPENAI_MODELS: Final[Dict[str, ModelSettingDict]] = {
632
+ OPENAI_MODELS: Final[dict[str, ModelSettingDict]] = {
633
633
  "gpt-35-turbo": {
634
634
  "id": "gpt-35-turbo",
635
635
  "context_length": 16385,
@@ -746,7 +746,7 @@ OPENAI_MODELS: Final[Dict[str, ModelSettingDict]] = {
746
746
 
747
747
  # Anthropic models
748
748
  ANTHROPIC_DEFAULT_MODEL: Final[str] = "claude-3-7-sonnet-20250219"
749
- ANTHROPIC_MODELS: Final[Dict[str, ModelSettingDict]] = {
749
+ ANTHROPIC_MODELS: Final[dict[str, ModelSettingDict]] = {
750
750
  "claude-3-opus-20240229": {
751
751
  "id": "claude-3-opus-20240229",
752
752
  "context_length": 200000,
@@ -823,7 +823,7 @@ ANTHROPIC_MODELS: Final[Dict[str, ModelSettingDict]] = {
823
823
 
824
824
  # Minimax models
825
825
  MINIMAX_DEFAULT_MODEL: Final[str] = "MiniMax-Text-01"
826
- MINIMAX_MODELS: Final[Dict[str, ModelSettingDict]] = {
826
+ MINIMAX_MODELS: Final[dict[str, ModelSettingDict]] = {
827
827
  "abab5-chat": {
828
828
  "id": "abab5-chat",
829
829
  "context_length": 6144,
@@ -867,7 +867,7 @@ MINIMAX_MODELS: Final[Dict[str, ModelSettingDict]] = {
867
867
  "MiniMax-Text-01": {
868
868
  "id": "MiniMax-Text-01",
869
869
  "context_length": 1000192,
870
- "max_output_tokens": 1000192,
870
+ "max_output_tokens": 40000,
871
871
  "function_call_available": True,
872
872
  "response_format_available": True,
873
873
  "native_multimodal": False,
@@ -875,7 +875,7 @@ MINIMAX_MODELS: Final[Dict[str, ModelSettingDict]] = {
875
875
  "MiniMax-M1": {
876
876
  "id": "MiniMax-M1",
877
877
  "context_length": 1000192,
878
- "max_output_tokens": 1000192,
878
+ "max_output_tokens": 40000,
879
879
  "function_call_available": True,
880
880
  "response_format_available": True,
881
881
  "native_multimodal": False,
@@ -884,7 +884,7 @@ MINIMAX_MODELS: Final[Dict[str, ModelSettingDict]] = {
884
884
 
885
885
  # Gemini models
886
886
  GEMINI_DEFAULT_MODEL: Final[str] = "gemini-2.0-flash"
887
- GEMINI_MODELS: Final[Dict[str, ModelSettingDict]] = {
887
+ GEMINI_MODELS: Final[dict[str, ModelSettingDict]] = {
888
888
  "gemini-1.5-pro": {
889
889
  "id": "gemini-1.5-pro",
890
890
  "context_length": 2097152,
@@ -1032,7 +1032,7 @@ GEMINI_MODELS: Final[Dict[str, ModelSettingDict]] = {
1032
1032
 
1033
1033
  # 百度文心一言 ERNIE 模型
1034
1034
  ERNIE_DEFAULT_MODEL: Final[str] = "ernie-lite"
1035
- ERNIE_MODELS: Final[Dict[str, ModelSettingDict]] = {
1035
+ ERNIE_MODELS: Final[dict[str, ModelSettingDict]] = {
1036
1036
  "ernie-lite": {
1037
1037
  "id": "ernie-lite",
1038
1038
  "context_length": 6144,
@@ -1109,7 +1109,7 @@ ERNIE_MODELS: Final[Dict[str, ModelSettingDict]] = {
1109
1109
 
1110
1110
 
1111
1111
  STEPFUN_DEFAULT_MODEL: Final[str] = "step-1-8k"
1112
- STEPFUN_MODELS: Final[Dict[str, ModelSettingDict]] = {
1112
+ STEPFUN_MODELS: Final[dict[str, ModelSettingDict]] = {
1113
1113
  "step-1-8k": {
1114
1114
  "id": "step-1-8k",
1115
1115
  "context_length": 8192,
@@ -1170,7 +1170,7 @@ STEPFUN_MODELS: Final[Dict[str, ModelSettingDict]] = {
1170
1170
 
1171
1171
 
1172
1172
  XAI_DEFAULT_MODEL: Final[str] = "grok-2-latest"
1173
- XAI_MODELS: Final[Dict[str, ModelSettingDict]] = {
1173
+ XAI_MODELS: Final[dict[str, ModelSettingDict]] = {
1174
1174
  "grok-beta": {
1175
1175
  "id": "grok-beta",
1176
1176
  "context_length": 131072,
@@ -1,6 +1,7 @@
1
1
  # @Author: Bi Ying
2
2
  # @Date: 2024-07-26 23:48:04
3
- from typing import List, Dict, Optional, Union, Iterable, Literal
3
+ from collections.abc import Iterable
4
+ from typing import Literal
4
5
  from typing_extensions import TypedDict, NotRequired
5
6
 
6
7
  import httpx
@@ -35,8 +36,8 @@ class VectorVeinTextMessage(TypedDict):
35
36
  create_time: NotRequired[int]
36
37
  update_time: NotRequired[int]
37
38
  metadata: NotRequired[dict]
38
- content: Dict[str, str]
39
- attachments: NotRequired[List[str]]
39
+ content: dict[str, str]
40
+ attachments: NotRequired[list[str]]
40
41
 
41
42
 
42
43
  class VectorVeinWorkflowMessage(TypedDict):
@@ -47,19 +48,20 @@ class VectorVeinWorkflowMessage(TypedDict):
47
48
  create_time: NotRequired[int]
48
49
  update_time: NotRequired[int]
49
50
  metadata: dict
50
- content: Dict[str, str]
51
- attachments: NotRequired[List[str]]
51
+ content: dict[str, str]
52
+ attachments: NotRequired[list[str]]
52
53
 
53
54
 
54
- VectorVeinMessage = Union[VectorVeinTextMessage, VectorVeinWorkflowMessage]
55
+ VectorVeinMessage = VectorVeinTextMessage | VectorVeinWorkflowMessage
55
56
 
56
57
 
57
58
  class EndpointSetting(BaseModel):
58
59
  id: str = Field(..., description="The id of the endpoint.")
59
- region: Optional[str] = Field(None, description="The region for the endpoint.")
60
- api_base: Optional[str] = Field(None, description="The base URL for the API.")
61
- api_key: Optional[str] = Field(None, description="The API key for authentication.")
62
- endpoint_type: Optional[
60
+ enabled: bool = Field(True, description="Whether the endpoint is enabled.")
61
+ region: str | None = Field(None, description="The region for the endpoint.")
62
+ api_base: str | None = Field(None, description="The base URL for the API.")
63
+ api_key: str | None = Field(None, description="The API key for authentication.")
64
+ endpoint_type: (
63
65
  Literal[
64
66
  "default",
65
67
  "openai",
@@ -68,11 +70,12 @@ class EndpointSetting(BaseModel):
68
70
  "anthropic_vertex",
69
71
  "anthropic_bedrock",
70
72
  ]
71
- ] = Field(
73
+ | None
74
+ ) = Field(
72
75
  "default",
73
76
  description="The type of endpoint. Set to 'default' will determine the type automatically.",
74
77
  )
75
- credentials: Optional[dict] = Field(None, description="Additional credentials if needed.")
78
+ credentials: dict | None = Field(None, description="Additional credentials if needed.")
76
79
  is_azure: bool = Field(False, description="Indicates if the endpoint is for Azure.")
77
80
  is_vertex: bool = Field(False, description="Indicates if the endpoint is for Vertex.")
78
81
  is_bedrock: bool = Field(False, description="Indicates if the endpoint is for Bedrock.")
@@ -82,7 +85,7 @@ class EndpointSetting(BaseModel):
82
85
  description="Whether to use concurrent requests for the LLM service.",
83
86
  default=defs.ENDPOINT_CONCURRENT_REQUESTS,
84
87
  )
85
- proxy: Optional[str] = Field(None, description="The proxy URL for the endpoint.")
88
+ proxy: str | None = Field(None, description="The proxy URL for the endpoint.")
86
89
 
87
90
  def model_list(self):
88
91
  http_client = httpx.Client(proxy=self.proxy) if self.proxy is not None else None
@@ -129,17 +132,18 @@ class EndpointSetting(BaseModel):
129
132
 
130
133
  class ModelSetting(BaseModel):
131
134
  id: str = Field(..., description="The id of the model.")
132
- endpoints: List[Union[str, EndpointOptionDict]] = Field(default_factory=list, description="Available endpoints for the model.")
135
+ enabled: bool = Field(True, description="Whether the model is enabled.")
136
+ endpoints: list[str | EndpointOptionDict] = Field(default_factory=list, description="Available endpoints for the model.")
133
137
  function_call_available: bool = Field(False, description="Indicates if function call is available.")
134
138
  response_format_available: bool = Field(False, description="Indicates if response format is available.")
135
139
  native_multimodal: bool = Field(False, description="Indicates if the model is a native multimodal model.")
136
140
  context_length: int = Field(32768, description="The context length for the model.")
137
- max_output_tokens: Optional[int] = Field(None, description="Maximum number of output tokens allowed.")
141
+ max_output_tokens: int | None = Field(None, description="Maximum number of output tokens allowed.")
138
142
 
139
143
 
140
144
  class BackendSettings(BaseModel):
141
- models: Dict[str, ModelSetting] = Field(default_factory=dict)
142
- default_endpoint: Optional[str] = Field(default_factory=lambda: None, description="The default endpoint for the model.")
145
+ models: dict[str, ModelSetting] = Field(default_factory=dict)
146
+ default_endpoint: str | None = Field(default_factory=lambda: None, description="The default endpoint for the model.")
143
147
 
144
148
  def get_model_setting(self, model_name: str) -> ModelSetting:
145
149
  if model_name in self.models:
@@ -150,8 +154,8 @@ class BackendSettings(BaseModel):
150
154
  else:
151
155
  raise ValueError(f"Model {model_name} not found in {self.models}")
152
156
 
153
- def update_models(self, default_models: Dict[str, Dict], input_models: Dict[str, Dict]):
154
- updated_models = {}
157
+ def update_models(self, default_models: dict[str, dict], input_models: dict[str, dict]):
158
+ updated_models: dict[str, ModelSetting] = {}
155
159
  for model_name, model_data in default_models.items():
156
160
  updated_model = ModelSetting(**model_data)
157
161
  if model_name in input_models:
@@ -173,44 +177,44 @@ class Usage(BaseModel):
173
177
 
174
178
  total_tokens: int
175
179
 
176
- completion_tokens_details: Optional[CompletionTokensDetails] = None
180
+ completion_tokens_details: CompletionTokensDetails | None = None
177
181
  """Breakdown of tokens used in a completion."""
178
182
 
179
- prompt_tokens_details: Optional[PromptTokensDetails] = None
183
+ prompt_tokens_details: PromptTokensDetails | None = None
180
184
  """Breakdown of tokens used in the prompt."""
181
185
 
182
186
 
183
187
  class ChatCompletionMessage(BaseModel):
184
- content: Optional[str] = None
188
+ content: str | None = None
185
189
 
186
- reasoning_content: Optional[str] = None
190
+ reasoning_content: str | None = None
187
191
 
188
- raw_content: Optional[List[Dict]] = None
192
+ raw_content: list[dict] | None = None
189
193
 
190
- tool_calls: Optional[List[ChatCompletionMessageToolCall]] = None
194
+ tool_calls: list[ChatCompletionMessageToolCall] | None = None
191
195
  """The tool calls generated by the model, such as function calls."""
192
196
 
193
- function_call_arguments: Optional[dict] = None
197
+ function_call_arguments: dict | None = None
194
198
 
195
- usage: Optional[Usage] = None
199
+ usage: Usage | None = None
196
200
 
197
201
 
198
202
  class ChatCompletionDeltaMessage(BaseModel):
199
- content: Optional[str] = None
203
+ content: str | None = None
200
204
 
201
- reasoning_content: Optional[str] = None
205
+ reasoning_content: str | None = None
202
206
 
203
- raw_content: Optional[Dict] = None
207
+ raw_content: dict | None = None
204
208
 
205
- tool_calls: Optional[List[ChoiceDeltaToolCall]] = None
209
+ tool_calls: list[ChoiceDeltaToolCall] | None = None
206
210
  """The tool calls generated by the model, such as function calls."""
207
211
 
208
- function_call_arguments: Optional[dict] = None
212
+ function_call_arguments: dict | None = None
209
213
 
210
- usage: Optional[Usage] = None
214
+ usage: Usage | None = None
211
215
 
212
216
 
213
- NotGiven = Union[AnthropicNotGiven, OpenAINotGiven]
217
+ NotGiven = AnthropicNotGiven | OpenAINotGiven
214
218
 
215
219
  NOT_GIVEN = OPENAI_NOT_GIVEN
216
220
 
@@ -1,4 +1,4 @@
1
- from typing import Dict, List, Optional, Union, Literal
1
+ from typing import Literal
2
2
  from typing_extensions import TypedDict, NotRequired # Required by pydantic under Python < 3.12
3
3
 
4
4
 
@@ -49,26 +49,26 @@ class ModelConfigDict(TypedDict):
49
49
  """TypedDict representing the model configuration structure."""
50
50
 
51
51
  id: str
52
- endpoints: List[Union[str, EndpointOptionDict]]
52
+ endpoints: list[str | EndpointOptionDict]
53
53
  function_call_available: NotRequired[bool]
54
54
  response_format_available: NotRequired[bool]
55
55
  native_multimodal: NotRequired[bool]
56
- context_length: NotRequired[Optional[int]]
57
- max_output_tokens: NotRequired[Optional[int]]
56
+ context_length: NotRequired[int | None]
57
+ max_output_tokens: NotRequired[int | None]
58
58
 
59
59
 
60
60
  class BackendSettingsDict(TypedDict):
61
61
  """TypedDict representing the BackendSettings structure."""
62
62
 
63
- models: Dict[str, ModelConfigDict]
64
- default_endpoint: NotRequired[Optional[str]]
63
+ models: dict[str, ModelConfigDict]
64
+ default_endpoint: NotRequired[str | None]
65
65
 
66
66
 
67
67
  class EndpointSettingDict(TypedDict):
68
68
  """TypedDict representing the EndpointSetting structure."""
69
69
 
70
70
  id: str
71
- api_base: NotRequired[Optional[str]]
71
+ api_base: NotRequired[str | None]
72
72
  api_key: NotRequired[str]
73
73
  region: NotRequired[str]
74
74
  endpoint_type: NotRequired[
@@ -115,7 +115,7 @@ class BackendsDict(TypedDict):
115
115
  class SettingsV1Dict(TypedDict):
116
116
  """TypedDict representing the expected structure of the settings dictionary."""
117
117
 
118
- endpoints: List[EndpointSettingDict]
118
+ endpoints: list[EndpointSettingDict]
119
119
  token_server: NotRequired[ServerDict]
120
120
  rate_limit: NotRequired[RateLimitConfigDict]
121
121
 
@@ -142,7 +142,7 @@ class SettingsV2Dict(TypedDict):
142
142
  """TypedDict representing the expected structure of the settings dictionary."""
143
143
 
144
144
  VERSION: NotRequired[str]
145
- endpoints: List[EndpointSettingDict]
145
+ endpoints: list[EndpointSettingDict]
146
146
  token_server: NotRequired[ServerDict]
147
147
  rate_limit: NotRequired[RateLimitConfigDict]
148
148
 
@@ -150,4 +150,4 @@ class SettingsV2Dict(TypedDict):
150
150
  backends: NotRequired[BackendsDict]
151
151
 
152
152
 
153
- SettingsDict = Union[SettingsV1Dict, SettingsV2Dict]
153
+ SettingsDict = SettingsV1Dict | SettingsV2Dict
@@ -20,7 +20,7 @@ class ImageProcessor:
20
20
  max_height: int | None = None,
21
21
  ):
22
22
  self.image_source = image_source
23
- if isinstance(image_source, (Image.Image, Path)):
23
+ if isinstance(image_source, Image.Image | Path):
24
24
  self.is_local = True
25
25
  else:
26
26
  self.is_local = not image_source.startswith("http")
@@ -1,6 +1,5 @@
1
1
  import time
2
2
  import asyncio
3
- from typing import Tuple
4
3
  from collections import defaultdict
5
4
  from abc import ABC, abstractmethod
6
5
 
@@ -9,7 +8,7 @@ class AsyncRateLimiterBackend(ABC):
9
8
  """Rate Limiter Backend Abstract Base Class"""
10
9
 
11
10
  @abstractmethod
12
- async def check_limit(self, key: str, rpm: int, tpm: int, request_cost: int = 1) -> Tuple[bool, float]:
11
+ async def check_limit(self, key: str, rpm: int, tpm: int, request_cost: int = 1) -> tuple[bool, float]:
13
12
  """Returns (allowed, wait_time)"""
14
13
  pass
15
14
 
@@ -18,7 +17,7 @@ class SyncRateLimiterBackend(ABC):
18
17
  """Rate Limiter Backend Abstract Base Class"""
19
18
 
20
19
  @abstractmethod
21
- def check_limit(self, key: str, rpm: int, tpm: int, request_cost: int = 1) -> Tuple[bool, float]:
20
+ def check_limit(self, key: str, rpm: int, tpm: int, request_cost: int = 1) -> tuple[bool, float]:
22
21
  """Returns (allowed, wait_time)"""
23
22
  pass
24
23
 
@@ -65,7 +64,7 @@ class SyncMemoryRateLimiter(SyncRateLimiterBackend):
65
64
  def _get_last_reset(self, key):
66
65
  return self.windows[key][0] if self.windows[key] else time.time()
67
66
 
68
- def check_limit(self, key: str, rpm: int, tpm: int, request_cost: int = 1) -> Tuple[bool, float]:
67
+ def check_limit(self, key: str, rpm: int, tpm: int, request_cost: int = 1) -> tuple[bool, float]:
69
68
  """Sync Rate Limiter Check
70
69
 
71
70
  Args:
@@ -211,7 +210,7 @@ class AsyncDiskCacheRateLimiter(AsyncRateLimiterBackend):
211
210
  def _get_tpm_key(self, key: str) -> str:
212
211
  return f"{key}_tpm"
213
212
 
214
- async def check_limit(self, key: str, rpm: int, tpm: int, request_cost: int = 1) -> Tuple[bool, float]:
213
+ async def check_limit(self, key: str, rpm: int, tpm: int, request_cost: int = 1) -> tuple[bool, float]:
215
214
  """检查是否超出限流阈值
216
215
 
217
216
  Args:
@@ -272,7 +271,7 @@ class SyncDiskCacheRateLimiter(SyncRateLimiterBackend):
272
271
  def _get_tpm_key(self, key: str) -> str:
273
272
  return f"{key}_tpm"
274
273
 
275
- def check_limit(self, key: str, rpm: int, tpm: int, request_cost: int = 1) -> Tuple[bool, float]:
274
+ def check_limit(self, key: str, rpm: int, tpm: int, request_cost: int = 1) -> tuple[bool, float]:
276
275
  """检查是否超出限流阈值
277
276
 
278
277
  Args:
@@ -1,7 +1,8 @@
1
1
  # @Author: Bi Ying
2
2
  # @Date: 2024-06-07 16:16:49
3
3
  import time
4
- from typing import Optional, Any, Callable, Tuple, Union, TypeVar, Generic
4
+ from collections.abc import Callable
5
+ from typing import Any, TypeVar, Generic
5
6
 
6
7
 
7
8
  ResultType = TypeVar("ResultType")
@@ -11,9 +12,9 @@ class Retry(Generic[ResultType]):
11
12
  def __init__(self, function: Callable[..., ResultType]):
12
13
  self.function: Callable[..., ResultType] = function
13
14
  self.__retry_times: int = 3
14
- self.__sleep_time: Union[int, float] = 1
15
+ self.__sleep_time: int | float = 1
15
16
  self.__timeout: int = 180
16
- self.__result_check: Optional[Callable[[ResultType], bool]] = None
17
+ self.__result_check: Callable[[ResultType], bool] | None = None
17
18
  self.pargs: list = []
18
19
  self.kwargs: dict = {}
19
20
 
@@ -26,7 +27,7 @@ class Retry(Generic[ResultType]):
26
27
  self.__retry_times = retry_times
27
28
  return self
28
29
 
29
- def sleep_time(self, sleep_time: Union[int, float]) -> "Retry[ResultType]":
30
+ def sleep_time(self, sleep_time: int | float) -> "Retry[ResultType]":
30
31
  self.__sleep_time = sleep_time
31
32
  return self
32
33
 
@@ -43,7 +44,7 @@ class Retry(Generic[ResultType]):
43
44
  print(f"Retry result check error: {e}")
44
45
  return False
45
46
 
46
- def run(self) -> Tuple[bool, Optional[ResultType]]:
47
+ def run(self) -> tuple[bool, ResultType | None]:
47
48
  try_times = 0
48
49
  start_time = time.time()
49
50
 
@@ -1,4 +1,4 @@
1
- from typing import Dict, Any, Union
1
+ from typing import Any
2
2
 
3
3
 
4
4
  class Edge:
@@ -19,9 +19,9 @@ class Edge:
19
19
  self.target_handle: str = target_handle
20
20
  self.animated: bool = animated
21
21
  self.type: str = type
22
- self.style: Dict[str, Union[str, int]] = {"stroke": "#28c5e5", "strokeWidth": 3}
22
+ self.style: dict[str, str | int] = {"stroke": "#28c5e5", "strokeWidth": 3}
23
23
 
24
- def to_dict(self) -> Dict[str, Any]:
24
+ def to_dict(self) -> dict[str, Any]:
25
25
  return {
26
26
  "id": self.id,
27
27
  "source": self.source,