vectorvein 0.1.22__tar.gz → 0.1.23__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. {vectorvein-0.1.22 → vectorvein-0.1.23}/PKG-INFO +1 -1
  2. {vectorvein-0.1.22 → vectorvein-0.1.23}/pyproject.toml +1 -1
  3. {vectorvein-0.1.22 → vectorvein-0.1.23}/src/vectorvein/chat_clients/gemini_client.py +10 -10
  4. {vectorvein-0.1.22 → vectorvein-0.1.23}/README.md +0 -0
  5. {vectorvein-0.1.22 → vectorvein-0.1.23}/src/vectorvein/__init__.py +0 -0
  6. {vectorvein-0.1.22 → vectorvein-0.1.23}/src/vectorvein/chat_clients/__init__.py +0 -0
  7. {vectorvein-0.1.22 → vectorvein-0.1.23}/src/vectorvein/chat_clients/anthropic_client.py +0 -0
  8. {vectorvein-0.1.22 → vectorvein-0.1.23}/src/vectorvein/chat_clients/baichuan_client.py +0 -0
  9. {vectorvein-0.1.22 → vectorvein-0.1.23}/src/vectorvein/chat_clients/base_client.py +0 -0
  10. {vectorvein-0.1.22 → vectorvein-0.1.23}/src/vectorvein/chat_clients/deepseek_client.py +0 -0
  11. {vectorvein-0.1.22 → vectorvein-0.1.23}/src/vectorvein/chat_clients/groq_client.py +0 -0
  12. {vectorvein-0.1.22 → vectorvein-0.1.23}/src/vectorvein/chat_clients/local_client.py +0 -0
  13. {vectorvein-0.1.22 → vectorvein-0.1.23}/src/vectorvein/chat_clients/minimax_client.py +0 -0
  14. {vectorvein-0.1.22 → vectorvein-0.1.23}/src/vectorvein/chat_clients/mistral_client.py +0 -0
  15. {vectorvein-0.1.22 → vectorvein-0.1.23}/src/vectorvein/chat_clients/moonshot_client.py +0 -0
  16. {vectorvein-0.1.22 → vectorvein-0.1.23}/src/vectorvein/chat_clients/openai_client.py +0 -0
  17. {vectorvein-0.1.22 → vectorvein-0.1.23}/src/vectorvein/chat_clients/openai_compatible_client.py +0 -0
  18. {vectorvein-0.1.22 → vectorvein-0.1.23}/src/vectorvein/chat_clients/qwen_client.py +0 -0
  19. {vectorvein-0.1.22 → vectorvein-0.1.23}/src/vectorvein/chat_clients/utils.py +0 -0
  20. {vectorvein-0.1.22 → vectorvein-0.1.23}/src/vectorvein/chat_clients/yi_client.py +0 -0
  21. {vectorvein-0.1.22 → vectorvein-0.1.23}/src/vectorvein/chat_clients/zhipuai_client.py +0 -0
  22. {vectorvein-0.1.22 → vectorvein-0.1.23}/src/vectorvein/settings/__init__.py +0 -0
  23. {vectorvein-0.1.22 → vectorvein-0.1.23}/src/vectorvein/types/defaults.py +0 -0
  24. {vectorvein-0.1.22 → vectorvein-0.1.23}/src/vectorvein/types/enums.py +0 -0
  25. {vectorvein-0.1.22 → vectorvein-0.1.23}/src/vectorvein/types/llm_parameters.py +0 -0
  26. {vectorvein-0.1.22 → vectorvein-0.1.23}/src/vectorvein/utilities/media_processing.py +0 -0
  27. {vectorvein-0.1.22 → vectorvein-0.1.23}/src/vectorvein/utilities/retry.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vectorvein
3
- Version: 0.1.22
3
+ Version: 0.1.23
4
4
  Summary: Default template for PDM package
5
5
  Author-Email: Anderson <andersonby@163.com>
6
6
  License: MIT
@@ -16,7 +16,7 @@ description = "Default template for PDM package"
16
16
  name = "vectorvein"
17
17
  readme = "README.md"
18
18
  requires-python = ">=3.10"
19
- version = "0.1.22"
19
+ version = "0.1.23"
20
20
 
21
21
  [project.license]
22
22
  text = "MIT"
@@ -83,10 +83,10 @@ class GeminiChatClient(BaseChatClient):
83
83
  else:
84
84
  tools_params = {}
85
85
 
86
- if response_format:
87
- response_format_params = {"generationConfig": {"response_mime_type": "application/json"}}
88
- else:
89
- response_format_params = {}
86
+ response_format_params = {}
87
+ if response_format is not None:
88
+ if response_format.get("type") == "json_object":
89
+ response_format_params = {"response_mime_type": "application/json"}
90
90
 
91
91
  if self.random_endpoint:
92
92
  self.random_endpoint = True
@@ -104,9 +104,9 @@ class GeminiChatClient(BaseChatClient):
104
104
  "generationConfig": {
105
105
  "temperature": self.temperature,
106
106
  "maxOutputTokens": max_tokens,
107
+ **response_format_params,
107
108
  },
108
109
  **tools_params,
109
- **response_format_params,
110
110
  **kwargs,
111
111
  }
112
112
  if system_prompt:
@@ -266,10 +266,10 @@ class AsyncGeminiChatClient(BaseAsyncChatClient):
266
266
  else:
267
267
  tools_params = {}
268
268
 
269
- if response_format:
270
- response_format_params = {"generationConfig": {"response_mime_type": "application/json"}}
271
- else:
272
- response_format_params = {}
269
+ response_format_params = {}
270
+ if response_format is not None:
271
+ if response_format.get("type") == "json_object":
272
+ response_format_params = {"response_mime_type": "application/json"}
273
273
 
274
274
  if self.random_endpoint:
275
275
  self.random_endpoint = True
@@ -287,9 +287,9 @@ class AsyncGeminiChatClient(BaseAsyncChatClient):
287
287
  "generationConfig": {
288
288
  "temperature": self.temperature,
289
289
  "maxOutputTokens": max_tokens,
290
+ **response_format_params,
290
291
  },
291
292
  **tools_params,
292
- **response_format_params,
293
293
  **kwargs,
294
294
  }
295
295
  if system_prompt:
File without changes