vectorvein 0.1.59__tar.gz → 0.1.61__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {vectorvein-0.1.59 → vectorvein-0.1.61}/PKG-INFO +2 -2
- {vectorvein-0.1.59 → vectorvein-0.1.61}/pyproject.toml +2 -2
- {vectorvein-0.1.59 → vectorvein-0.1.61}/src/vectorvein/chat_clients/anthropic_client.py +29 -3
- {vectorvein-0.1.59 → vectorvein-0.1.61}/src/vectorvein/chat_clients/base_client.py +20 -3
- {vectorvein-0.1.59 → vectorvein-0.1.61}/src/vectorvein/types/defaults.py +22 -0
- {vectorvein-0.1.59 → vectorvein-0.1.61}/src/vectorvein/types/llm_parameters.py +1 -0
- {vectorvein-0.1.59 → vectorvein-0.1.61}/README.md +0 -0
- {vectorvein-0.1.59 → vectorvein-0.1.61}/src/vectorvein/__init__.py +0 -0
- {vectorvein-0.1.59 → vectorvein-0.1.61}/src/vectorvein/chat_clients/__init__.py +0 -0
- {vectorvein-0.1.59 → vectorvein-0.1.61}/src/vectorvein/chat_clients/baichuan_client.py +0 -0
- {vectorvein-0.1.59 → vectorvein-0.1.61}/src/vectorvein/chat_clients/deepseek_client.py +0 -0
- {vectorvein-0.1.59 → vectorvein-0.1.61}/src/vectorvein/chat_clients/gemini_client.py +0 -0
- {vectorvein-0.1.59 → vectorvein-0.1.61}/src/vectorvein/chat_clients/groq_client.py +0 -0
- {vectorvein-0.1.59 → vectorvein-0.1.61}/src/vectorvein/chat_clients/local_client.py +0 -0
- {vectorvein-0.1.59 → vectorvein-0.1.61}/src/vectorvein/chat_clients/minimax_client.py +0 -0
- {vectorvein-0.1.59 → vectorvein-0.1.61}/src/vectorvein/chat_clients/mistral_client.py +0 -0
- {vectorvein-0.1.59 → vectorvein-0.1.61}/src/vectorvein/chat_clients/moonshot_client.py +0 -0
- {vectorvein-0.1.59 → vectorvein-0.1.61}/src/vectorvein/chat_clients/openai_client.py +0 -0
- {vectorvein-0.1.59 → vectorvein-0.1.61}/src/vectorvein/chat_clients/openai_compatible_client.py +0 -0
- {vectorvein-0.1.59 → vectorvein-0.1.61}/src/vectorvein/chat_clients/py.typed +0 -0
- {vectorvein-0.1.59 → vectorvein-0.1.61}/src/vectorvein/chat_clients/qwen_client.py +0 -0
- {vectorvein-0.1.59 → vectorvein-0.1.61}/src/vectorvein/chat_clients/stepfun_client.py +0 -0
- {vectorvein-0.1.59 → vectorvein-0.1.61}/src/vectorvein/chat_clients/utils.py +0 -0
- {vectorvein-0.1.59 → vectorvein-0.1.61}/src/vectorvein/chat_clients/xai_client.py +0 -0
- {vectorvein-0.1.59 → vectorvein-0.1.61}/src/vectorvein/chat_clients/yi_client.py +0 -0
- {vectorvein-0.1.59 → vectorvein-0.1.61}/src/vectorvein/chat_clients/zhipuai_client.py +0 -0
- {vectorvein-0.1.59 → vectorvein-0.1.61}/src/vectorvein/py.typed +0 -0
- {vectorvein-0.1.59 → vectorvein-0.1.61}/src/vectorvein/server/token_server.py +0 -0
- {vectorvein-0.1.59 → vectorvein-0.1.61}/src/vectorvein/settings/__init__.py +0 -0
- {vectorvein-0.1.59 → vectorvein-0.1.61}/src/vectorvein/settings/py.typed +0 -0
- {vectorvein-0.1.59 → vectorvein-0.1.61}/src/vectorvein/types/enums.py +0 -0
- {vectorvein-0.1.59 → vectorvein-0.1.61}/src/vectorvein/types/exception.py +0 -0
- {vectorvein-0.1.59 → vectorvein-0.1.61}/src/vectorvein/types/py.typed +0 -0
- {vectorvein-0.1.59 → vectorvein-0.1.61}/src/vectorvein/utilities/media_processing.py +0 -0
- {vectorvein-0.1.59 → vectorvein-0.1.61}/src/vectorvein/utilities/retry.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: vectorvein
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.61
|
4
4
|
Summary: VectorVein python SDK
|
5
5
|
Author-Email: Anderson <andersonby@163.com>
|
6
6
|
License: MIT
|
@@ -8,7 +8,7 @@ Requires-Python: >=3.10
|
|
8
8
|
Requires-Dist: openai>=1.37.1
|
9
9
|
Requires-Dist: tiktoken>=0.7.0
|
10
10
|
Requires-Dist: httpx>=0.27.0
|
11
|
-
Requires-Dist: anthropic[vertex]>=0.31.2
|
11
|
+
Requires-Dist: anthropic[bedrock,vertex]>=0.31.2
|
12
12
|
Requires-Dist: pydantic>=2.8.2
|
13
13
|
Requires-Dist: Pillow>=10.4.0
|
14
14
|
Requires-Dist: deepseek-tokenizer>=0.1.0
|
@@ -6,7 +6,7 @@ dependencies = [
|
|
6
6
|
"openai>=1.37.1",
|
7
7
|
"tiktoken>=0.7.0",
|
8
8
|
"httpx>=0.27.0",
|
9
|
-
"anthropic[vertex]>=0.31.2",
|
9
|
+
"anthropic[vertex,bedrock]>=0.31.2",
|
10
10
|
"pydantic>=2.8.2",
|
11
11
|
"Pillow>=10.4.0",
|
12
12
|
"deepseek-tokenizer>=0.1.0",
|
@@ -17,7 +17,7 @@ description = "VectorVein python SDK"
|
|
17
17
|
name = "vectorvein"
|
18
18
|
readme = "README.md"
|
19
19
|
requires-python = ">=3.10"
|
20
|
-
version = "0.1.
|
20
|
+
version = "0.1.61"
|
21
21
|
|
22
22
|
[project.license]
|
23
23
|
text = "MIT"
|
@@ -8,7 +8,14 @@ from typing import overload, Generator, AsyncGenerator, Any, Literal, Iterable
|
|
8
8
|
import httpx
|
9
9
|
from openai._types import NotGiven as OpenAINotGiven
|
10
10
|
from openai._types import NOT_GIVEN as OPENAI_NOT_GIVEN
|
11
|
-
from anthropic import
|
11
|
+
from anthropic import (
|
12
|
+
Anthropic,
|
13
|
+
AnthropicVertex,
|
14
|
+
AsyncAnthropic,
|
15
|
+
AsyncAnthropicVertex,
|
16
|
+
AnthropicBedrock,
|
17
|
+
AsyncAnthropicBedrock,
|
18
|
+
)
|
12
19
|
from anthropic._types import NOT_GIVEN
|
13
20
|
from anthropic.types import (
|
14
21
|
TextBlock,
|
@@ -219,6 +226,16 @@ class AnthropicChatClient(BaseChatClient):
|
|
219
226
|
access_token=self.creds.token,
|
220
227
|
http_client=self.http_client,
|
221
228
|
)
|
229
|
+
elif self.endpoint.is_bedrock:
|
230
|
+
if self.endpoint.credentials is None:
|
231
|
+
raise ValueError("Anthropic Bedrock endpoint requires credentials")
|
232
|
+
return AnthropicBedrock(
|
233
|
+
aws_access_key=self.endpoint.credentials.get("access_key"),
|
234
|
+
aws_secret_key=self.endpoint.credentials.get("secret_key"),
|
235
|
+
aws_region=self.endpoint.region,
|
236
|
+
base_url=self.endpoint.api_base,
|
237
|
+
http_client=self.http_client,
|
238
|
+
)
|
222
239
|
elif self.endpoint.api_schema_type == "default":
|
223
240
|
return Anthropic(
|
224
241
|
api_key=self.endpoint.api_key,
|
@@ -384,7 +401,7 @@ class AnthropicChatClient(BaseChatClient):
|
|
384
401
|
**kwargs,
|
385
402
|
)
|
386
403
|
|
387
|
-
assert isinstance(self.raw_client, Anthropic | AnthropicVertex)
|
404
|
+
assert isinstance(self.raw_client, Anthropic | AnthropicVertex | AnthropicBedrock)
|
388
405
|
|
389
406
|
if isinstance(tools, OpenAINotGiven):
|
390
407
|
tools = NOT_GIVEN
|
@@ -600,6 +617,15 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
|
|
600
617
|
access_token=self.creds.token,
|
601
618
|
http_client=self.http_client,
|
602
619
|
)
|
620
|
+
elif self.endpoint.is_bedrock:
|
621
|
+
if self.endpoint.credentials is None:
|
622
|
+
raise ValueError("Anthropic Bedrock endpoint requires credentials")
|
623
|
+
return AsyncAnthropicBedrock(
|
624
|
+
aws_access_key=self.endpoint.credentials.get("aws_access_key"),
|
625
|
+
aws_secret_key=self.endpoint.credentials.get("aws_secret_key"),
|
626
|
+
aws_region=self.endpoint.region,
|
627
|
+
http_client=self.http_client,
|
628
|
+
)
|
603
629
|
elif self.endpoint.api_schema_type == "default":
|
604
630
|
return AsyncAnthropic(
|
605
631
|
api_key=self.endpoint.api_key,
|
@@ -767,7 +793,7 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
|
|
767
793
|
**kwargs,
|
768
794
|
)
|
769
795
|
|
770
|
-
assert isinstance(self.raw_client, AsyncAnthropic | AsyncAnthropicVertex)
|
796
|
+
assert isinstance(self.raw_client, AsyncAnthropic | AsyncAnthropicVertex | AsyncAnthropicBedrock)
|
771
797
|
|
772
798
|
if isinstance(tools, OpenAINotGiven):
|
773
799
|
tools = NOT_GIVEN
|
@@ -6,7 +6,14 @@ from typing import Generator, AsyncGenerator, Any, overload, Literal, Iterable
|
|
6
6
|
|
7
7
|
import httpx
|
8
8
|
from openai import OpenAI, AsyncOpenAI, AzureOpenAI, AsyncAzureOpenAI
|
9
|
-
from anthropic import
|
9
|
+
from anthropic import (
|
10
|
+
Anthropic,
|
11
|
+
AnthropicVertex,
|
12
|
+
AsyncAnthropic,
|
13
|
+
AsyncAnthropicVertex,
|
14
|
+
AnthropicBedrock,
|
15
|
+
AsyncAnthropicBedrock,
|
16
|
+
)
|
10
17
|
|
11
18
|
from ..settings import settings
|
12
19
|
from ..types import defaults as defs
|
@@ -57,7 +64,9 @@ class BaseChatClient(ABC):
|
|
57
64
|
|
58
65
|
@cached_property
|
59
66
|
@abstractmethod
|
60
|
-
def raw_client(
|
67
|
+
def raw_client(
|
68
|
+
self,
|
69
|
+
) -> OpenAI | AzureOpenAI | Anthropic | AnthropicVertex | AnthropicBedrock | httpx.Client | None:
|
61
70
|
pass
|
62
71
|
|
63
72
|
@overload
|
@@ -199,7 +208,15 @@ class BaseAsyncChatClient(ABC):
|
|
199
208
|
@abstractmethod
|
200
209
|
def raw_client(
|
201
210
|
self,
|
202
|
-
) ->
|
211
|
+
) -> (
|
212
|
+
AsyncOpenAI
|
213
|
+
| AsyncAzureOpenAI
|
214
|
+
| AsyncAnthropic
|
215
|
+
| AsyncAnthropicVertex
|
216
|
+
| AsyncAnthropicBedrock
|
217
|
+
| httpx.AsyncClient
|
218
|
+
| None
|
219
|
+
):
|
203
220
|
pass
|
204
221
|
|
205
222
|
@overload
|
@@ -231,6 +231,20 @@ QWEN_MODELS: Final[Dict[str, Dict[str, Any]]] = {
|
|
231
231
|
"function_call_available": False,
|
232
232
|
"response_format_available": True,
|
233
233
|
},
|
234
|
+
"qwen2.5-coder-32b-instruct": {
|
235
|
+
"id": "qwen2.5-coder-32b-instruct",
|
236
|
+
"context_length": 30000,
|
237
|
+
"max_output_tokens": 4096,
|
238
|
+
"function_call_available": False,
|
239
|
+
"response_format_available": False,
|
240
|
+
},
|
241
|
+
"qwq-32b-preview": {
|
242
|
+
"id": "qwq-32b-preview",
|
243
|
+
"context_length": 30000,
|
244
|
+
"max_output_tokens": 4096,
|
245
|
+
"function_call_available": False,
|
246
|
+
"response_format_available": False,
|
247
|
+
},
|
234
248
|
"qwen2.5-72b-instruct": {
|
235
249
|
"id": "qwen2.5-72b-instruct",
|
236
250
|
"context_length": 131072,
|
@@ -238,6 +252,14 @@ QWEN_MODELS: Final[Dict[str, Dict[str, Any]]] = {
|
|
238
252
|
"function_call_available": False,
|
239
253
|
"response_format_available": True,
|
240
254
|
},
|
255
|
+
"qwen2-vl-72b-instruct": {
|
256
|
+
"id": "qwen2-vl-72b-instruct",
|
257
|
+
"context_length": 131072,
|
258
|
+
"max_output_tokens": 8192,
|
259
|
+
"function_call_available": False,
|
260
|
+
"response_format_available": False,
|
261
|
+
"native_multimodal": True,
|
262
|
+
},
|
241
263
|
"qwen-max": {
|
242
264
|
"id": "qwen-max",
|
243
265
|
"context_length": 8096,
|
@@ -37,6 +37,7 @@ class EndpointSetting(BaseModel):
|
|
37
37
|
credentials: Optional[dict] = Field(None, description="Additional credentials if needed.")
|
38
38
|
is_azure: bool = Field(False, description="Indicates if the endpoint is for Azure.")
|
39
39
|
is_vertex: bool = Field(False, description="Indicates if the endpoint is for Vertex.")
|
40
|
+
is_bedrock: bool = Field(False, description="Indicates if the endpoint is for Bedrock.")
|
40
41
|
rpm: int = Field(description="Requests per minute.", default=defs.ENDPOINT_RPM)
|
41
42
|
tpm: int = Field(description="Tokens per minute.", default=defs.ENDPOINT_TPM)
|
42
43
|
concurrent_requests: int = Field(
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{vectorvein-0.1.59 → vectorvein-0.1.61}/src/vectorvein/chat_clients/openai_compatible_client.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|