vectorvein 0.2.11__py3-none-any.whl → 0.2.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vectorvein/chat_clients/openai_compatible_client.py +2 -2
- vectorvein/types/llm_parameters.py +44 -0
- vectorvein/types/settings.py +38 -38
- {vectorvein-0.2.11.dist-info → vectorvein-0.2.12.dist-info}/METADATA +1 -1
- {vectorvein-0.2.11.dist-info → vectorvein-0.2.12.dist-info}/RECORD +7 -7
- {vectorvein-0.2.11.dist-info → vectorvein-0.2.12.dist-info}/WHEEL +0 -0
- {vectorvein-0.2.11.dist-info → vectorvein-0.2.12.dist-info}/entry_points.txt +0 -0
@@ -99,7 +99,7 @@ class OpenAICompatibleChatClient(BaseChatClient):
|
|
99
99
|
return AzureOpenAI(
|
100
100
|
azure_endpoint=self.endpoint.api_base,
|
101
101
|
api_key=self.endpoint.api_key,
|
102
|
-
api_version="
|
102
|
+
api_version="2025-01-01-preview",
|
103
103
|
http_client=self.http_client,
|
104
104
|
)
|
105
105
|
else:
|
@@ -574,7 +574,7 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
|
|
574
574
|
return AsyncAzureOpenAI(
|
575
575
|
azure_endpoint=self.endpoint.api_base,
|
576
576
|
api_key=self.endpoint.api_key,
|
577
|
-
api_version="
|
577
|
+
api_version="2025-01-01-preview",
|
578
578
|
http_client=self.http_client,
|
579
579
|
)
|
580
580
|
else:
|
@@ -2,6 +2,7 @@
|
|
2
2
|
# @Date: 2024-07-26 23:48:04
|
3
3
|
from typing import List, Dict, Optional, Union, Iterable
|
4
4
|
|
5
|
+
import httpx
|
5
6
|
from pydantic import BaseModel, Field
|
6
7
|
|
7
8
|
from anthropic._types import NotGiven as AnthropicNotGiven
|
@@ -10,6 +11,7 @@ from anthropic.types import ToolParam as AnthropicToolParam
|
|
10
11
|
from anthropic.types import ThinkingConfigParam, ThinkingConfigEnabledParam
|
11
12
|
from anthropic.types.message_create_params import ToolChoice as AnthropicToolChoice
|
12
13
|
|
14
|
+
from openai import OpenAI, AsyncOpenAI, AzureOpenAI, AsyncAzureOpenAI
|
13
15
|
from openai._types import NotGiven as OpenAINotGiven
|
14
16
|
from openai._types import NOT_GIVEN as OPENAI_NOT_GIVEN
|
15
17
|
from openai.types.chat.completion_create_params import ResponseFormat
|
@@ -45,6 +47,48 @@ class EndpointSetting(BaseModel):
|
|
45
47
|
)
|
46
48
|
proxy: Optional[str] = Field(None, description="The proxy URL for the endpoint.")
|
47
49
|
|
50
|
+
def model_list(self):
|
51
|
+
http_client = httpx.Client(proxy=self.proxy) if self.proxy is not None else None
|
52
|
+
|
53
|
+
if self.is_azure:
|
54
|
+
if self.api_base is None:
|
55
|
+
raise ValueError("Azure endpoint is not set")
|
56
|
+
_client = AzureOpenAI(
|
57
|
+
azure_endpoint=self.api_base,
|
58
|
+
api_key=self.api_key,
|
59
|
+
api_version="2025-01-01-preview",
|
60
|
+
http_client=http_client,
|
61
|
+
)
|
62
|
+
else:
|
63
|
+
_client = OpenAI(
|
64
|
+
api_key=self.api_key,
|
65
|
+
base_url=self.api_base,
|
66
|
+
http_client=http_client,
|
67
|
+
)
|
68
|
+
|
69
|
+
return _client.models.list().model_dump()
|
70
|
+
|
71
|
+
async def amodel_list(self):
|
72
|
+
http_client = httpx.AsyncClient(proxy=self.proxy) if self.proxy is not None else None
|
73
|
+
|
74
|
+
if self.is_azure:
|
75
|
+
if self.api_base is None:
|
76
|
+
raise ValueError("Azure endpoint is not set")
|
77
|
+
_client = AsyncAzureOpenAI(
|
78
|
+
azure_endpoint=self.api_base,
|
79
|
+
api_key=self.api_key,
|
80
|
+
api_version="2025-01-01-preview",
|
81
|
+
http_client=http_client,
|
82
|
+
)
|
83
|
+
else:
|
84
|
+
_client = AsyncOpenAI(
|
85
|
+
api_key=self.api_key,
|
86
|
+
base_url=self.api_base,
|
87
|
+
http_client=http_client,
|
88
|
+
)
|
89
|
+
|
90
|
+
return (await _client.models.list()).model_dump()
|
91
|
+
|
48
92
|
|
49
93
|
class ModelSetting(BaseModel):
|
50
94
|
id: str = Field(..., description="The id of the model.")
|
vectorvein/types/settings.py
CHANGED
@@ -21,8 +21,8 @@ class RateLimitConfigDict(TypedDict):
|
|
21
21
|
|
22
22
|
enabled: bool
|
23
23
|
backend: Literal["memory", "redis", "diskcache"]
|
24
|
-
redis:
|
25
|
-
diskcache:
|
24
|
+
redis: NotRequired[RedisConfigDict]
|
25
|
+
diskcache: NotRequired[DiskCacheConfigDict]
|
26
26
|
default_rpm: int
|
27
27
|
default_tpm: int
|
28
28
|
|
@@ -32,7 +32,7 @@ class ServerDict(TypedDict):
|
|
32
32
|
|
33
33
|
host: str
|
34
34
|
port: int
|
35
|
-
url:
|
35
|
+
url: NotRequired[str]
|
36
36
|
|
37
37
|
|
38
38
|
class EndpointOptionDict(TypedDict):
|
@@ -48,13 +48,13 @@ class EndpointOptionDict(TypedDict):
|
|
48
48
|
class ModelConfigDict(TypedDict):
|
49
49
|
"""TypedDict representing the model configuration structure."""
|
50
50
|
|
51
|
-
id: str
|
51
|
+
id: NotRequired[str]
|
52
52
|
endpoints: List[Union[str, EndpointOptionDict]]
|
53
|
-
function_call_available: bool
|
54
|
-
response_format_available: bool
|
55
|
-
native_multimodal: bool
|
56
|
-
context_length: int
|
57
|
-
max_output_tokens:
|
53
|
+
function_call_available: NotRequired[bool]
|
54
|
+
response_format_available: NotRequired[bool]
|
55
|
+
native_multimodal: NotRequired[bool]
|
56
|
+
context_length: NotRequired[int]
|
57
|
+
max_output_tokens: NotRequired[int]
|
58
58
|
|
59
59
|
|
60
60
|
class BackendSettingsDict(TypedDict):
|
@@ -67,39 +67,39 @@ class EndpointSettingDict(TypedDict):
|
|
67
67
|
"""TypedDict representing the EndpointSetting structure."""
|
68
68
|
|
69
69
|
id: str
|
70
|
-
api_base: Optional[str]
|
71
|
-
api_key: str
|
72
|
-
region:
|
73
|
-
api_schema_type:
|
74
|
-
credentials:
|
75
|
-
is_azure:
|
76
|
-
is_vertex:
|
77
|
-
is_bedrock:
|
78
|
-
rpm:
|
79
|
-
tpm:
|
80
|
-
concurrent_requests:
|
81
|
-
proxy:
|
70
|
+
api_base: NotRequired[Optional[str]]
|
71
|
+
api_key: NotRequired[str]
|
72
|
+
region: NotRequired[str]
|
73
|
+
api_schema_type: NotRequired[str]
|
74
|
+
credentials: NotRequired[dict]
|
75
|
+
is_azure: NotRequired[bool]
|
76
|
+
is_vertex: NotRequired[bool]
|
77
|
+
is_bedrock: NotRequired[bool]
|
78
|
+
rpm: NotRequired[int]
|
79
|
+
tpm: NotRequired[int]
|
80
|
+
concurrent_requests: NotRequired[int]
|
81
|
+
proxy: NotRequired[str]
|
82
82
|
|
83
83
|
|
84
84
|
class SettingsDict(TypedDict):
|
85
85
|
"""TypedDict representing the expected structure of the settings dictionary."""
|
86
86
|
|
87
87
|
endpoints: List[EndpointSettingDict]
|
88
|
-
token_server:
|
89
|
-
rate_limit:
|
88
|
+
token_server: NotRequired[ServerDict]
|
89
|
+
rate_limit: NotRequired[RateLimitConfigDict]
|
90
90
|
# 各模型后端配置
|
91
|
-
anthropic: BackendSettingsDict
|
92
|
-
deepseek: BackendSettingsDict
|
93
|
-
gemini: BackendSettingsDict
|
94
|
-
groq: BackendSettingsDict
|
95
|
-
local: BackendSettingsDict
|
96
|
-
minimax: BackendSettingsDict
|
97
|
-
mistral: BackendSettingsDict
|
98
|
-
moonshot: BackendSettingsDict
|
99
|
-
openai: BackendSettingsDict
|
100
|
-
qwen: BackendSettingsDict
|
101
|
-
yi: BackendSettingsDict
|
102
|
-
zhipuai: BackendSettingsDict
|
103
|
-
baichuan: BackendSettingsDict
|
104
|
-
stepfun: BackendSettingsDict
|
105
|
-
xai: BackendSettingsDict
|
91
|
+
anthropic: NotRequired[BackendSettingsDict]
|
92
|
+
deepseek: NotRequired[BackendSettingsDict]
|
93
|
+
gemini: NotRequired[BackendSettingsDict]
|
94
|
+
groq: NotRequired[BackendSettingsDict]
|
95
|
+
local: NotRequired[BackendSettingsDict]
|
96
|
+
minimax: NotRequired[BackendSettingsDict]
|
97
|
+
mistral: NotRequired[BackendSettingsDict]
|
98
|
+
moonshot: NotRequired[BackendSettingsDict]
|
99
|
+
openai: NotRequired[BackendSettingsDict]
|
100
|
+
qwen: NotRequired[BackendSettingsDict]
|
101
|
+
yi: NotRequired[BackendSettingsDict]
|
102
|
+
zhipuai: NotRequired[BackendSettingsDict]
|
103
|
+
baichuan: NotRequired[BackendSettingsDict]
|
104
|
+
stepfun: NotRequired[BackendSettingsDict]
|
105
|
+
xai: NotRequired[BackendSettingsDict]
|
@@ -1,6 +1,6 @@
|
|
1
|
-
vectorvein-0.2.
|
2
|
-
vectorvein-0.2.
|
3
|
-
vectorvein-0.2.
|
1
|
+
vectorvein-0.2.12.dist-info/METADATA,sha256=tceoZCMvKLrKIQMxR0iZlKF1WHIHxgYS5Kqmozm0b8U,4414
|
2
|
+
vectorvein-0.2.12.dist-info/WHEEL,sha256=thaaA2w1JzcGC48WYufAs8nrYZjJm8LqNfnXFOFyCC4,90
|
3
|
+
vectorvein-0.2.12.dist-info/entry_points.txt,sha256=6OYgBcLyFCUgeqLgnvMyOJxPCWzgy7se4rLPKtNonMs,34
|
4
4
|
vectorvein/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
5
5
|
vectorvein/api/__init__.py,sha256=lfY-XA46fgD2iIZTU0VYP8i07AwA03Egj4Qua0vUKrQ,738
|
6
6
|
vectorvein/api/client.py,sha256=xF-leKDQzVyyy9FnIRaz0k4eElYW1XbbzeRLcpnyk90,33047
|
@@ -18,7 +18,7 @@ vectorvein/chat_clients/minimax_client.py,sha256=YOILWcsHsN5tihLTMbKJIyJr9TJREMI
|
|
18
18
|
vectorvein/chat_clients/mistral_client.py,sha256=1aKSylzBDaLYcFnaBIL4-sXSzWmXfBeON9Q0rq-ziWw,534
|
19
19
|
vectorvein/chat_clients/moonshot_client.py,sha256=gbu-6nGxx8uM_U2WlI4Wus881rFRotzHtMSoYOcruGU,526
|
20
20
|
vectorvein/chat_clients/openai_client.py,sha256=Nz6tV45pWcsOupxjnsRsGTicbQNJWIZyxuJoJ5DGMpg,527
|
21
|
-
vectorvein/chat_clients/openai_compatible_client.py,sha256=
|
21
|
+
vectorvein/chat_clients/openai_compatible_client.py,sha256=L8SXCRA7OO_eXh6b-oya8k6XZZJ-j8SrJSvy8BFhCgs,48498
|
22
22
|
vectorvein/chat_clients/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
23
23
|
vectorvein/chat_clients/qwen_client.py,sha256=-ryh-m9PgsO0fc4ulcCmPTy1155J8YUy15uPoJQOHA0,513
|
24
24
|
vectorvein/chat_clients/stepfun_client.py,sha256=zsD2W5ahmR4DD9cqQTXmJr3txrGuvxbRWhFlRdwNijI,519
|
@@ -34,9 +34,9 @@ vectorvein/types/__init__.py,sha256=ie7H3rTMq_Fg836vOmy96m3wzjDkqfekQecPXXEDbcM,
|
|
34
34
|
vectorvein/types/defaults.py,sha256=VrkQoyHqC_eK3g1b6egpPYLLo0ltwMHqxDscCX4y-N0,27417
|
35
35
|
vectorvein/types/enums.py,sha256=7KTJSVtQueImmbr1fSwv3rQVtc0RyMWXJmoE2tDOaso,1667
|
36
36
|
vectorvein/types/exception.py,sha256=gnW4GnJ76jND6UGnodk9xmqkcbeS7Cz2rvncA2HpD5E,69
|
37
|
-
vectorvein/types/llm_parameters.py,sha256=
|
37
|
+
vectorvein/types/llm_parameters.py,sha256=4SxDbJKVb9oGYymyxQtNZ66YZmUQd9_CpYYg81_Inkk,7650
|
38
38
|
vectorvein/types/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
39
|
-
vectorvein/types/settings.py,sha256=
|
39
|
+
vectorvein/types/settings.py,sha256=71D94qqG4PVY59NDp1M29ZGcINOByilGcnCrNs5rbX8,3214
|
40
40
|
vectorvein/utilities/media_processing.py,sha256=7KtbLFzOYIn1e9QTN9G6C76NH8CBlV9kfAgiRKEIeXY,6263
|
41
41
|
vectorvein/utilities/rate_limiter.py,sha256=dwolIUVw2wP83Odqpx0AAaE77de1GzxkYDGH4tM_u_4,10300
|
42
42
|
vectorvein/utilities/retry.py,sha256=6KFS9R2HdhqM3_9jkjD4F36ZSpEx2YNFGOVlpOsUetM,2208
|
@@ -61,4 +61,4 @@ vectorvein/workflow/nodes/vector_db.py,sha256=t6I17q6iR3yQreiDHpRrksMdWDPIvgqJs0
|
|
61
61
|
vectorvein/workflow/nodes/video_generation.py,sha256=qmdg-t_idpxq1veukd-jv_ChICMOoInKxprV9Z4Vi2w,4118
|
62
62
|
vectorvein/workflow/nodes/web_crawlers.py,sha256=LsqomfXfqrXfHJDO1cl0Ox48f4St7X_SL12DSbAMSOw,5415
|
63
63
|
vectorvein/workflow/utils/json_to_code.py,sha256=F7dhDy8kGc8ndOeihGLRLGFGlquoxVlb02ENtxnQ0C8,5914
|
64
|
-
vectorvein-0.2.
|
64
|
+
vectorvein-0.2.12.dist-info/RECORD,,
|
File without changes
|
File without changes
|