vectorvein 0.1.19__tar.gz → 0.1.21__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. {vectorvein-0.1.19 → vectorvein-0.1.21}/PKG-INFO +1 -1
  2. {vectorvein-0.1.19 → vectorvein-0.1.21}/pyproject.toml +1 -1
  3. {vectorvein-0.1.19 → vectorvein-0.1.21}/src/vectorvein/chat_clients/gemini_client.py +12 -0
  4. {vectorvein-0.1.19 → vectorvein-0.1.21}/src/vectorvein/chat_clients/minimax_client.py +8 -0
  5. {vectorvein-0.1.19 → vectorvein-0.1.21}/README.md +0 -0
  6. {vectorvein-0.1.19 → vectorvein-0.1.21}/src/vectorvein/__init__.py +0 -0
  7. {vectorvein-0.1.19 → vectorvein-0.1.21}/src/vectorvein/chat_clients/__init__.py +0 -0
  8. {vectorvein-0.1.19 → vectorvein-0.1.21}/src/vectorvein/chat_clients/anthropic_client.py +0 -0
  9. {vectorvein-0.1.19 → vectorvein-0.1.21}/src/vectorvein/chat_clients/baichuan_client.py +0 -0
  10. {vectorvein-0.1.19 → vectorvein-0.1.21}/src/vectorvein/chat_clients/base_client.py +0 -0
  11. {vectorvein-0.1.19 → vectorvein-0.1.21}/src/vectorvein/chat_clients/deepseek_client.py +0 -0
  12. {vectorvein-0.1.19 → vectorvein-0.1.21}/src/vectorvein/chat_clients/groq_client.py +0 -0
  13. {vectorvein-0.1.19 → vectorvein-0.1.21}/src/vectorvein/chat_clients/local_client.py +0 -0
  14. {vectorvein-0.1.19 → vectorvein-0.1.21}/src/vectorvein/chat_clients/mistral_client.py +0 -0
  15. {vectorvein-0.1.19 → vectorvein-0.1.21}/src/vectorvein/chat_clients/moonshot_client.py +0 -0
  16. {vectorvein-0.1.19 → vectorvein-0.1.21}/src/vectorvein/chat_clients/openai_client.py +0 -0
  17. {vectorvein-0.1.19 → vectorvein-0.1.21}/src/vectorvein/chat_clients/openai_compatible_client.py +0 -0
  18. {vectorvein-0.1.19 → vectorvein-0.1.21}/src/vectorvein/chat_clients/qwen_client.py +0 -0
  19. {vectorvein-0.1.19 → vectorvein-0.1.21}/src/vectorvein/chat_clients/utils.py +0 -0
  20. {vectorvein-0.1.19 → vectorvein-0.1.21}/src/vectorvein/chat_clients/yi_client.py +0 -0
  21. {vectorvein-0.1.19 → vectorvein-0.1.21}/src/vectorvein/chat_clients/zhipuai_client.py +0 -0
  22. {vectorvein-0.1.19 → vectorvein-0.1.21}/src/vectorvein/settings/__init__.py +0 -0
  23. {vectorvein-0.1.19 → vectorvein-0.1.21}/src/vectorvein/types/defaults.py +0 -0
  24. {vectorvein-0.1.19 → vectorvein-0.1.21}/src/vectorvein/types/enums.py +0 -0
  25. {vectorvein-0.1.19 → vectorvein-0.1.21}/src/vectorvein/types/llm_parameters.py +0 -0
  26. {vectorvein-0.1.19 → vectorvein-0.1.21}/src/vectorvein/utilities/media_processing.py +0 -0
  27. {vectorvein-0.1.19 → vectorvein-0.1.21}/src/vectorvein/utilities/retry.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vectorvein
3
- Version: 0.1.19
3
+ Version: 0.1.21
4
4
  Summary: Default template for PDM package
5
5
  Author-Email: Anderson <andersonby@163.com>
6
6
  License: MIT
@@ -16,7 +16,7 @@ description = "Default template for PDM package"
16
16
  name = "vectorvein"
17
17
  readme = "README.md"
18
18
  requires-python = ">=3.10"
19
- version = "0.1.19"
19
+ version = "0.1.21"
20
20
 
21
21
  [project.license]
22
22
  text = "MIT"
@@ -39,6 +39,10 @@ class GeminiChatClient(BaseChatClient):
39
39
  **kwargs,
40
40
  )
41
41
 
42
+ @property
43
+ def raw_client(self):
44
+ return self.http_client
45
+
42
46
  def create_completion(
43
47
  self,
44
48
  messages: list = list,
@@ -48,6 +52,7 @@ class GeminiChatClient(BaseChatClient):
48
52
  max_tokens: int | None = None,
49
53
  tools: list | None = None,
50
54
  tool_choice: str | None = None,
55
+ **kwargs,
51
56
  ):
52
57
  if model is not None:
53
58
  self.model = model
@@ -95,6 +100,7 @@ class GeminiChatClient(BaseChatClient):
95
100
  "maxOutputTokens": max_tokens,
96
101
  },
97
102
  **tools_params,
103
+ **kwargs,
98
104
  }
99
105
  if system_prompt:
100
106
  request_body["systemInstruction"] = {"parts": [{"text": system_prompt}]}
@@ -209,6 +215,10 @@ class AsyncGeminiChatClient(BaseAsyncChatClient):
209
215
  **kwargs,
210
216
  )
211
217
 
218
+ @property
219
+ def raw_client(self):
220
+ return self.http_client
221
+
212
222
  async def create_completion(
213
223
  self,
214
224
  messages: list = list,
@@ -218,6 +228,7 @@ class AsyncGeminiChatClient(BaseAsyncChatClient):
218
228
  max_tokens: int | None = None,
219
229
  tools: list | None = None,
220
230
  tool_choice: str | None = None,
231
+ **kwargs,
221
232
  ):
222
233
  if model is not None:
223
234
  self.model = model
@@ -265,6 +276,7 @@ class AsyncGeminiChatClient(BaseAsyncChatClient):
265
276
  "maxOutputTokens": max_tokens,
266
277
  },
267
278
  **tools_params,
279
+ **kwargs,
268
280
  }
269
281
  if system_prompt:
270
282
  request_body["systemInstruction"] = {"parts": [{"text": system_prompt}]}
@@ -66,6 +66,10 @@ class MiniMaxChatClient(BaseChatClient):
66
66
  else:
67
67
  self.http_client = httpx.Client()
68
68
 
69
+ @property
70
+ def raw_client(self):
71
+ return self.http_client
72
+
69
73
  def create_completion(
70
74
  self,
71
75
  messages: list = list,
@@ -231,6 +235,10 @@ class AsyncMiniMaxChatClient(BaseAsyncChatClient):
231
235
  else:
232
236
  self.http_client = httpx.AsyncClient()
233
237
 
238
+ @property
239
+ def raw_client(self):
240
+ return self.http_client
241
+
234
242
  async def create_completion(
235
243
  self,
236
244
  messages: list = list,
File without changes