vectorvein 0.2.78__py3-none-any.whl → 0.2.80__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vectorvein/chat_clients/anthropic_client.py +8 -2
- vectorvein/chat_clients/openai_compatible_client.py +8 -2
- vectorvein/chat_clients/utils.py +7 -3
- vectorvein/types/defaults.py +8 -0
- {vectorvein-0.2.78.dist-info → vectorvein-0.2.80.dist-info}/METADATA +1 -1
- {vectorvein-0.2.78.dist-info → vectorvein-0.2.80.dist-info}/RECORD +8 -8
- {vectorvein-0.2.78.dist-info → vectorvein-0.2.80.dist-info}/WHEEL +0 -0
- {vectorvein-0.2.78.dist-info → vectorvein-0.2.80.dist-info}/entry_points.txt +0 -0
@@ -596,7 +596,10 @@ class AnthropicChatClient(BaseChatClient):
|
|
596
596
|
|
597
597
|
if max_tokens is None:
|
598
598
|
max_output_tokens = self.model_setting.max_output_tokens
|
599
|
-
|
599
|
+
native_multimodal = self.model_setting.native_multimodal
|
600
|
+
token_counts = get_message_token_counts(
|
601
|
+
messages=messages, tools=tools, model=self.model, native_multimodal=native_multimodal
|
602
|
+
)
|
600
603
|
if max_output_tokens is not None:
|
601
604
|
max_tokens = self.model_setting.context_length - token_counts
|
602
605
|
max_tokens = min(max(max_tokens, 1), max_output_tokens)
|
@@ -1207,7 +1210,10 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
|
|
1207
1210
|
|
1208
1211
|
if max_tokens is None:
|
1209
1212
|
max_output_tokens = self.model_setting.max_output_tokens
|
1210
|
-
|
1213
|
+
native_multimodal = self.model_setting.native_multimodal
|
1214
|
+
token_counts = get_message_token_counts(
|
1215
|
+
messages=messages, tools=tools, model=self.model, native_multimodal=native_multimodal
|
1216
|
+
)
|
1211
1217
|
if max_output_tokens is not None:
|
1212
1218
|
max_tokens = self.model_setting.context_length - token_counts
|
1213
1219
|
max_tokens = min(max(max_tokens, 1), max_output_tokens)
|
@@ -326,7 +326,10 @@ class OpenAICompatibleChatClient(BaseChatClient):
|
|
326
326
|
|
327
327
|
if not max_tokens and not max_completion_tokens:
|
328
328
|
max_output_tokens = self.model_setting.max_output_tokens
|
329
|
-
|
329
|
+
native_multimodal = self.model_setting.native_multimodal
|
330
|
+
token_counts = get_message_token_counts(
|
331
|
+
messages=messages, tools=tools, model=self.model, native_multimodal=native_multimodal
|
332
|
+
)
|
330
333
|
if max_output_tokens is not None:
|
331
334
|
max_tokens = self.model_setting.context_length - token_counts - 64
|
332
335
|
max_tokens = min(max(max_tokens, 1), max_output_tokens)
|
@@ -805,7 +808,10 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
|
|
805
808
|
|
806
809
|
if not max_tokens and not max_completion_tokens:
|
807
810
|
max_output_tokens = self.model_setting.max_output_tokens
|
808
|
-
|
811
|
+
native_multimodal = self.model_setting.native_multimodal
|
812
|
+
token_counts = get_message_token_counts(
|
813
|
+
messages=messages, tools=tools, model=self.model, native_multimodal=native_multimodal
|
814
|
+
)
|
809
815
|
if max_output_tokens is not None:
|
810
816
|
max_tokens = self.model_setting.context_length - token_counts - 64
|
811
817
|
max_tokens = min(max(max_tokens, 1), max_output_tokens)
|
vectorvein/chat_clients/utils.py
CHANGED
@@ -348,9 +348,10 @@ def get_message_token_counts(
|
|
348
348
|
messages: list,
|
349
349
|
tools: list | Iterable[ToolParam] | NotGiven = NOT_GIVEN,
|
350
350
|
model: str = "gpt-4o",
|
351
|
+
native_multimodal: bool = True,
|
351
352
|
) -> int:
|
352
353
|
tokens = 0
|
353
|
-
formatted_messages = format_messages(messages, backend=BackendType.OpenAI, native_multimodal=
|
354
|
+
formatted_messages = format_messages(messages, backend=BackendType.OpenAI, native_multimodal=native_multimodal)
|
354
355
|
for message in formatted_messages:
|
355
356
|
content = message["content"]
|
356
357
|
if isinstance(content, str):
|
@@ -360,8 +361,11 @@ def get_message_token_counts(
|
|
360
361
|
if isinstance(item, dict) and item["type"] == "text":
|
361
362
|
tokens += get_token_counts(item["text"], model)
|
362
363
|
elif isinstance(item, dict) and item["type"].startswith("image"):
|
363
|
-
|
364
|
-
|
364
|
+
if native_multimodal:
|
365
|
+
# TODO: Get real image size
|
366
|
+
tokens += calculate_image_tokens(2048, 2048, model)
|
367
|
+
else:
|
368
|
+
tokens += 1
|
365
369
|
if tools:
|
366
370
|
tokens += get_token_counts(str(tools), model)
|
367
371
|
|
vectorvein/types/defaults.py
CHANGED
@@ -607,6 +607,14 @@ OPENAI_MODELS: Final[Dict[str, ModelSettingDict]] = {
|
|
607
607
|
"response_format_available": True,
|
608
608
|
"native_multimodal": False,
|
609
609
|
},
|
610
|
+
"gpt-4.1": {
|
611
|
+
"id": "gpt-4.1",
|
612
|
+
"context_length": 1047576,
|
613
|
+
"max_output_tokens": 32768,
|
614
|
+
"function_call_available": True,
|
615
|
+
"response_format_available": True,
|
616
|
+
"native_multimodal": True,
|
617
|
+
},
|
610
618
|
}
|
611
619
|
|
612
620
|
# Anthropic models
|
@@ -1,13 +1,13 @@
|
|
1
|
-
vectorvein-0.2.
|
2
|
-
vectorvein-0.2.
|
3
|
-
vectorvein-0.2.
|
1
|
+
vectorvein-0.2.80.dist-info/METADATA,sha256=i7b-zBRjuZqgVdWw8cAcBWpnM6WNy2WRwh4wLwsoJdU,4567
|
2
|
+
vectorvein-0.2.80.dist-info/WHEEL,sha256=tSfRZzRHthuv7vxpI4aehrdN9scLjk-dCJkPLzkHxGg,90
|
3
|
+
vectorvein-0.2.80.dist-info/entry_points.txt,sha256=6OYgBcLyFCUgeqLgnvMyOJxPCWzgy7se4rLPKtNonMs,34
|
4
4
|
vectorvein/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
5
5
|
vectorvein/api/__init__.py,sha256=lfY-XA46fgD2iIZTU0VYP8i07AwA03Egj4Qua0vUKrQ,738
|
6
6
|
vectorvein/api/client.py,sha256=xF-leKDQzVyyy9FnIRaz0k4eElYW1XbbzeRLcpnyk90,33047
|
7
7
|
vectorvein/api/exceptions.py,sha256=uS_PAdx0ksC0r3dgfSGWdbLMZm4qdLeWSSqCv1g3_Gc,772
|
8
8
|
vectorvein/api/models.py,sha256=xtPWMsB0yIJI7i-gY4B6MtvXv0ZIXnoeKspmeInH6fU,1449
|
9
9
|
vectorvein/chat_clients/__init__.py,sha256=UIytpIgwo8qkZpIyrHVxLYTyliUOTp4J7C4iHRjbtWE,23850
|
10
|
-
vectorvein/chat_clients/anthropic_client.py,sha256=
|
10
|
+
vectorvein/chat_clients/anthropic_client.py,sha256=xAB2JDCqm865mmCXK9jfN636-lNrICTDUcBypuoWu_0,69234
|
11
11
|
vectorvein/chat_clients/baichuan_client.py,sha256=CVMvpgjdrZGv0BWnTOBD-f2ufZ3wq3496wqukumsAr4,526
|
12
12
|
vectorvein/chat_clients/base_client.py,sha256=p7s-G4Wh9MSpDKEfG8wuFAeWy5DGvj5Go31hqrpQPhM,38817
|
13
13
|
vectorvein/chat_clients/deepseek_client.py,sha256=3qWu01NlJAP2N-Ff62d5-CZXZitlizE1fzb20LNetig,526
|
@@ -19,11 +19,11 @@ vectorvein/chat_clients/minimax_client.py,sha256=YOILWcsHsN5tihLTMbKJIyJr9TJREMI
|
|
19
19
|
vectorvein/chat_clients/mistral_client.py,sha256=1aKSylzBDaLYcFnaBIL4-sXSzWmXfBeON9Q0rq-ziWw,534
|
20
20
|
vectorvein/chat_clients/moonshot_client.py,sha256=gbu-6nGxx8uM_U2WlI4Wus881rFRotzHtMSoYOcruGU,526
|
21
21
|
vectorvein/chat_clients/openai_client.py,sha256=Nz6tV45pWcsOupxjnsRsGTicbQNJWIZyxuJoJ5DGMpg,527
|
22
|
-
vectorvein/chat_clients/openai_compatible_client.py,sha256=
|
22
|
+
vectorvein/chat_clients/openai_compatible_client.py,sha256=o0UP2jfTD8qDxd2ayeUaOS7p_yfaj7_S_YR2vmm2w28,49155
|
23
23
|
vectorvein/chat_clients/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
24
24
|
vectorvein/chat_clients/qwen_client.py,sha256=-ryh-m9PgsO0fc4ulcCmPTy1155J8YUy15uPoJQOHA0,513
|
25
25
|
vectorvein/chat_clients/stepfun_client.py,sha256=zsD2W5ahmR4DD9cqQTXmJr3txrGuvxbRWhFlRdwNijI,519
|
26
|
-
vectorvein/chat_clients/utils.py,sha256=
|
26
|
+
vectorvein/chat_clients/utils.py,sha256=rd7snZT7RGSVTOlr159eJveo9ErUylAW0GLj9fjoWzQ,29652
|
27
27
|
vectorvein/chat_clients/xai_client.py,sha256=eLFJJrNRJ-ni3DpshODcr3S1EJQLbhVwxyO1E54LaqM,491
|
28
28
|
vectorvein/chat_clients/yi_client.py,sha256=RNf4CRuPJfixrwLZ3-DEc3t25QDe1mvZeb9sku2f8Bc,484
|
29
29
|
vectorvein/chat_clients/zhipuai_client.py,sha256=Ys5DSeLCuedaDXr3PfG1EW2zKXopt-awO2IylWSwY0s,519
|
@@ -32,7 +32,7 @@ vectorvein/server/token_server.py,sha256=36F9PKSNOX8ZtYBXY_l-76GQTpUSmQ2Y8EMy1H7
|
|
32
32
|
vectorvein/settings/__init__.py,sha256=3Kw3hbvqcIQepAR6Q2m2UXbBnwyJTUm8yAz-aHmbUTg,11163
|
33
33
|
vectorvein/settings/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
34
34
|
vectorvein/types/__init__.py,sha256=ypg8c8AwF49FrFBMqmgH_eIBH4LFf0KN4kjqQa7zrvM,3376
|
35
|
-
vectorvein/types/defaults.py,sha256=
|
35
|
+
vectorvein/types/defaults.py,sha256=lb36arKD9hv3W49vZZzHH1FrsUjLeRz7JsZYkE4lI7U,32698
|
36
36
|
vectorvein/types/enums.py,sha256=LplSVkXLBK-t8TWtJKj_f7ktWTd6CSHWRLb67XKMm54,1716
|
37
37
|
vectorvein/types/exception.py,sha256=KtnqZ-1DstHm95SZAyZdHhkGq1bJ4A9Aw3Zfdu-VIFo,130
|
38
38
|
vectorvein/types/llm_parameters.py,sha256=Bdz9E_x0G96rvJ5TnEFPrU5QV4I2y0YFv7dY4Pq-MuU,7933
|
@@ -65,4 +65,4 @@ vectorvein/workflow/utils/analyse.py,sha256=msmvyz35UTYTwqQR5sg9H0sm1vxmGDSmep9X
|
|
65
65
|
vectorvein/workflow/utils/check.py,sha256=B_NdwqIqnc7Ko2HHqFpfOmWVaAu21tPITe0szKfiZKc,11414
|
66
66
|
vectorvein/workflow/utils/json_to_code.py,sha256=P8dhhSNgKhTnW17qXNjLO2aLdb0rA8qMAWxhObol2TU,7295
|
67
67
|
vectorvein/workflow/utils/layout.py,sha256=j0bRD3uaXu40xCS6U6BGahBI8FrHa5MiF55GbTrZ1LM,4565
|
68
|
-
vectorvein-0.2.
|
68
|
+
vectorvein-0.2.80.dist-info/RECORD,,
|
File without changes
|
File without changes
|