vectorvein 0.1.21__tar.gz → 0.1.22__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. {vectorvein-0.1.21 → vectorvein-0.1.22}/PKG-INFO +1 -1
  2. {vectorvein-0.1.21 → vectorvein-0.1.22}/pyproject.toml +1 -1
  3. {vectorvein-0.1.21 → vectorvein-0.1.22}/src/vectorvein/chat_clients/base_client.py +2 -0
  4. {vectorvein-0.1.21 → vectorvein-0.1.22}/src/vectorvein/chat_clients/gemini_client.py +14 -0
  5. {vectorvein-0.1.21 → vectorvein-0.1.22}/README.md +0 -0
  6. {vectorvein-0.1.21 → vectorvein-0.1.22}/src/vectorvein/__init__.py +0 -0
  7. {vectorvein-0.1.21 → vectorvein-0.1.22}/src/vectorvein/chat_clients/__init__.py +0 -0
  8. {vectorvein-0.1.21 → vectorvein-0.1.22}/src/vectorvein/chat_clients/anthropic_client.py +0 -0
  9. {vectorvein-0.1.21 → vectorvein-0.1.22}/src/vectorvein/chat_clients/baichuan_client.py +0 -0
  10. {vectorvein-0.1.21 → vectorvein-0.1.22}/src/vectorvein/chat_clients/deepseek_client.py +0 -0
  11. {vectorvein-0.1.21 → vectorvein-0.1.22}/src/vectorvein/chat_clients/groq_client.py +0 -0
  12. {vectorvein-0.1.21 → vectorvein-0.1.22}/src/vectorvein/chat_clients/local_client.py +0 -0
  13. {vectorvein-0.1.21 → vectorvein-0.1.22}/src/vectorvein/chat_clients/minimax_client.py +0 -0
  14. {vectorvein-0.1.21 → vectorvein-0.1.22}/src/vectorvein/chat_clients/mistral_client.py +0 -0
  15. {vectorvein-0.1.21 → vectorvein-0.1.22}/src/vectorvein/chat_clients/moonshot_client.py +0 -0
  16. {vectorvein-0.1.21 → vectorvein-0.1.22}/src/vectorvein/chat_clients/openai_client.py +0 -0
  17. {vectorvein-0.1.21 → vectorvein-0.1.22}/src/vectorvein/chat_clients/openai_compatible_client.py +0 -0
  18. {vectorvein-0.1.21 → vectorvein-0.1.22}/src/vectorvein/chat_clients/qwen_client.py +0 -0
  19. {vectorvein-0.1.21 → vectorvein-0.1.22}/src/vectorvein/chat_clients/utils.py +0 -0
  20. {vectorvein-0.1.21 → vectorvein-0.1.22}/src/vectorvein/chat_clients/yi_client.py +0 -0
  21. {vectorvein-0.1.21 → vectorvein-0.1.22}/src/vectorvein/chat_clients/zhipuai_client.py +0 -0
  22. {vectorvein-0.1.21 → vectorvein-0.1.22}/src/vectorvein/settings/__init__.py +0 -0
  23. {vectorvein-0.1.21 → vectorvein-0.1.22}/src/vectorvein/types/defaults.py +0 -0
  24. {vectorvein-0.1.21 → vectorvein-0.1.22}/src/vectorvein/types/enums.py +0 -0
  25. {vectorvein-0.1.21 → vectorvein-0.1.22}/src/vectorvein/types/llm_parameters.py +0 -0
  26. {vectorvein-0.1.21 → vectorvein-0.1.22}/src/vectorvein/utilities/media_processing.py +0 -0
  27. {vectorvein-0.1.21 → vectorvein-0.1.22}/src/vectorvein/utilities/retry.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vectorvein
3
- Version: 0.1.21
3
+ Version: 0.1.22
4
4
  Summary: Default template for PDM package
5
5
  Author-Email: Anderson <andersonby@163.com>
6
6
  License: MIT
@@ -16,7 +16,7 @@ description = "Default template for PDM package"
16
16
  name = "vectorvein"
17
17
  readme = "README.md"
18
18
  requires-python = ">=3.10"
19
- version = "0.1.21"
19
+ version = "0.1.22"
20
20
 
21
21
  [project.license]
22
22
  text = "MIT"
@@ -59,6 +59,7 @@ class BaseChatClient(ABC):
59
59
  max_tokens: int | None = None,
60
60
  tools: list | NotGiven = NOT_GIVEN,
61
61
  tool_choice: str | NotGiven = NOT_GIVEN,
62
+ response_format: dict | None = None,
62
63
  **kwargs,
63
64
  ) -> ChatCompletionMessage | Generator[ChatCompletionDeltaMessage, Any, None]:
64
65
  pass
@@ -130,6 +131,7 @@ class BaseAsyncChatClient(ABC):
130
131
  max_tokens: int | None = None,
131
132
  tools: list | NotGiven = NOT_GIVEN,
132
133
  tool_choice: str | NotGiven = NOT_GIVEN,
134
+ response_format: dict | None = None,
133
135
  **kwargs,
134
136
  ) -> ChatCompletionMessage | AsyncGenerator[ChatCompletionDeltaMessage, None]:
135
137
  pass
@@ -52,6 +52,7 @@ class GeminiChatClient(BaseChatClient):
52
52
  max_tokens: int | None = None,
53
53
  tools: list | None = None,
54
54
  tool_choice: str | None = None,
55
+ response_format: dict | None = None,
55
56
  **kwargs,
56
57
  ):
57
58
  if model is not None:
@@ -82,6 +83,11 @@ class GeminiChatClient(BaseChatClient):
82
83
  else:
83
84
  tools_params = {}
84
85
 
86
+ if response_format:
87
+ response_format_params = {"generationConfig": {"response_mime_type": "application/json"}}
88
+ else:
89
+ response_format_params = {}
90
+
85
91
  if self.random_endpoint:
86
92
  self.random_endpoint = True
87
93
  self.endpoint_id = random.choice(self.backend_settings.models[self.model].endpoints)
@@ -100,6 +106,7 @@ class GeminiChatClient(BaseChatClient):
100
106
  "maxOutputTokens": max_tokens,
101
107
  },
102
108
  **tools_params,
109
+ **response_format_params,
103
110
  **kwargs,
104
111
  }
105
112
  if system_prompt:
@@ -228,6 +235,7 @@ class AsyncGeminiChatClient(BaseAsyncChatClient):
228
235
  max_tokens: int | None = None,
229
236
  tools: list | None = None,
230
237
  tool_choice: str | None = None,
238
+ response_format: dict | None = None,
231
239
  **kwargs,
232
240
  ):
233
241
  if model is not None:
@@ -258,6 +266,11 @@ class AsyncGeminiChatClient(BaseAsyncChatClient):
258
266
  else:
259
267
  tools_params = {}
260
268
 
269
+ if response_format:
270
+ response_format_params = {"generationConfig": {"response_mime_type": "application/json"}}
271
+ else:
272
+ response_format_params = {}
273
+
261
274
  if self.random_endpoint:
262
275
  self.random_endpoint = True
263
276
  self.endpoint_id = random.choice(self.backend_settings.models[self.model].endpoints)
@@ -276,6 +289,7 @@ class AsyncGeminiChatClient(BaseAsyncChatClient):
276
289
  "maxOutputTokens": max_tokens,
277
290
  },
278
291
  **tools_params,
292
+ **response_format_params,
279
293
  **kwargs,
280
294
  }
281
295
  if system_prompt:
File without changes