vectorvein 0.1.21__py3-none-any.whl → 0.1.23__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vectorvein/chat_clients/base_client.py +2 -0
- vectorvein/chat_clients/gemini_client.py +14 -0
- {vectorvein-0.1.21.dist-info → vectorvein-0.1.23.dist-info}/METADATA +1 -1
- {vectorvein-0.1.21.dist-info → vectorvein-0.1.23.dist-info}/RECORD +6 -6
- {vectorvein-0.1.21.dist-info → vectorvein-0.1.23.dist-info}/WHEEL +0 -0
- {vectorvein-0.1.21.dist-info → vectorvein-0.1.23.dist-info}/entry_points.txt +0 -0
@@ -59,6 +59,7 @@ class BaseChatClient(ABC):
|
|
59
59
|
max_tokens: int | None = None,
|
60
60
|
tools: list | NotGiven = NOT_GIVEN,
|
61
61
|
tool_choice: str | NotGiven = NOT_GIVEN,
|
62
|
+
response_format: dict | None = None,
|
62
63
|
**kwargs,
|
63
64
|
) -> ChatCompletionMessage | Generator[ChatCompletionDeltaMessage, Any, None]:
|
64
65
|
pass
|
@@ -130,6 +131,7 @@ class BaseAsyncChatClient(ABC):
|
|
130
131
|
max_tokens: int | None = None,
|
131
132
|
tools: list | NotGiven = NOT_GIVEN,
|
132
133
|
tool_choice: str | NotGiven = NOT_GIVEN,
|
134
|
+
response_format: dict | None = None,
|
133
135
|
**kwargs,
|
134
136
|
) -> ChatCompletionMessage | AsyncGenerator[ChatCompletionDeltaMessage, None]:
|
135
137
|
pass
|
@@ -52,6 +52,7 @@ class GeminiChatClient(BaseChatClient):
|
|
52
52
|
max_tokens: int | None = None,
|
53
53
|
tools: list | None = None,
|
54
54
|
tool_choice: str | None = None,
|
55
|
+
response_format: dict | None = None,
|
55
56
|
**kwargs,
|
56
57
|
):
|
57
58
|
if model is not None:
|
@@ -82,6 +83,11 @@ class GeminiChatClient(BaseChatClient):
|
|
82
83
|
else:
|
83
84
|
tools_params = {}
|
84
85
|
|
86
|
+
response_format_params = {}
|
87
|
+
if response_format is not None:
|
88
|
+
if response_format.get("type") == "json_object":
|
89
|
+
response_format_params = {"response_mime_type": "application/json"}
|
90
|
+
|
85
91
|
if self.random_endpoint:
|
86
92
|
self.random_endpoint = True
|
87
93
|
self.endpoint_id = random.choice(self.backend_settings.models[self.model].endpoints)
|
@@ -98,6 +104,7 @@ class GeminiChatClient(BaseChatClient):
|
|
98
104
|
"generationConfig": {
|
99
105
|
"temperature": self.temperature,
|
100
106
|
"maxOutputTokens": max_tokens,
|
107
|
+
**response_format_params,
|
101
108
|
},
|
102
109
|
**tools_params,
|
103
110
|
**kwargs,
|
@@ -228,6 +235,7 @@ class AsyncGeminiChatClient(BaseAsyncChatClient):
|
|
228
235
|
max_tokens: int | None = None,
|
229
236
|
tools: list | None = None,
|
230
237
|
tool_choice: str | None = None,
|
238
|
+
response_format: dict | None = None,
|
231
239
|
**kwargs,
|
232
240
|
):
|
233
241
|
if model is not None:
|
@@ -258,6 +266,11 @@ class AsyncGeminiChatClient(BaseAsyncChatClient):
|
|
258
266
|
else:
|
259
267
|
tools_params = {}
|
260
268
|
|
269
|
+
response_format_params = {}
|
270
|
+
if response_format is not None:
|
271
|
+
if response_format.get("type") == "json_object":
|
272
|
+
response_format_params = {"response_mime_type": "application/json"}
|
273
|
+
|
261
274
|
if self.random_endpoint:
|
262
275
|
self.random_endpoint = True
|
263
276
|
self.endpoint_id = random.choice(self.backend_settings.models[self.model].endpoints)
|
@@ -274,6 +287,7 @@ class AsyncGeminiChatClient(BaseAsyncChatClient):
|
|
274
287
|
"generationConfig": {
|
275
288
|
"temperature": self.temperature,
|
276
289
|
"maxOutputTokens": max_tokens,
|
290
|
+
**response_format_params,
|
277
291
|
},
|
278
292
|
**tools_params,
|
279
293
|
**kwargs,
|
@@ -1,13 +1,13 @@
|
|
1
|
-
vectorvein-0.1.
|
2
|
-
vectorvein-0.1.
|
3
|
-
vectorvein-0.1.
|
1
|
+
vectorvein-0.1.23.dist-info/METADATA,sha256=WlWH7IJNvTkNwtDxV2IAtUEgfGvjhhfkLlGK7vZluPM,502
|
2
|
+
vectorvein-0.1.23.dist-info/WHEEL,sha256=Yaoh9rlmnhBUmykFXwmFW_cmVAW3ZIWH8QkQVngJtyw,90
|
3
|
+
vectorvein-0.1.23.dist-info/entry_points.txt,sha256=6OYgBcLyFCUgeqLgnvMyOJxPCWzgy7se4rLPKtNonMs,34
|
4
4
|
vectorvein/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
5
5
|
vectorvein/chat_clients/__init__.py,sha256=lOGrIEBGN-EoxJ-dF5uMsO6viNCIFIeNL8whDwE6x3g,4657
|
6
6
|
vectorvein/chat_clients/anthropic_client.py,sha256=VJQi7cKLbbLkJtmEFN9M41JUaGjwEuJaXIuQLK-3rdw,20332
|
7
7
|
vectorvein/chat_clients/baichuan_client.py,sha256=CVMvpgjdrZGv0BWnTOBD-f2ufZ3wq3496wqukumsAr4,526
|
8
|
-
vectorvein/chat_clients/base_client.py,sha256=
|
8
|
+
vectorvein/chat_clients/base_client.py,sha256=nGml8rGyKc71Wl2nMbqt_5L9gbX-MCRU2jvHJvxF4AY,5108
|
9
9
|
vectorvein/chat_clients/deepseek_client.py,sha256=3qWu01NlJAP2N-Ff62d5-CZXZitlizE1fzb20LNetig,526
|
10
|
-
vectorvein/chat_clients/gemini_client.py,sha256=
|
10
|
+
vectorvein/chat_clients/gemini_client.py,sha256=N08OPbCDK-_jN_CzEG8rIKlrMMNqW6cq9ucjREqF6dY,15295
|
11
11
|
vectorvein/chat_clients/groq_client.py,sha256=Uow4pgdmFi93ZQSoOol2-0PhhqkW-S0XuSldvppz5U4,498
|
12
12
|
vectorvein/chat_clients/local_client.py,sha256=55nOsxzqUf79q3Y14MKROA71zxhsT7p7FsDZ89rts2M,422
|
13
13
|
vectorvein/chat_clients/minimax_client.py,sha256=xykptVg0qzOrJKuswHLG3M7r8H37jEMOb0gAnYAUdKQ,13947
|
@@ -25,4 +25,4 @@ vectorvein/types/enums.py,sha256=PNK_pTIyjJFy-yAG2PHaMIO1ey3W6fReMCkH8M8VRW4,159
|
|
25
25
|
vectorvein/types/llm_parameters.py,sha256=mmJjJZz4bPRi0nHzYNUNdWsQLHa9lbf3-MNVnU78vaY,3608
|
26
26
|
vectorvein/utilities/media_processing.py,sha256=BujciRmw1GMmc3ELRvafL8STcy6r5b2rVnh27-uA7so,2256
|
27
27
|
vectorvein/utilities/retry.py,sha256=9ePuJdeUUGx-qMWfaFxmlOvG_lQPwCQ4UB1z3Edlo34,993
|
28
|
-
vectorvein-0.1.
|
28
|
+
vectorvein-0.1.23.dist-info/RECORD,,
|
File without changes
|
File without changes
|