vectorvein 0.1.44__tar.gz → 0.1.45__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. {vectorvein-0.1.44 → vectorvein-0.1.45}/PKG-INFO +1 -1
  2. {vectorvein-0.1.44 → vectorvein-0.1.45}/pyproject.toml +1 -1
  3. {vectorvein-0.1.44 → vectorvein-0.1.45}/src/vectorvein/chat_clients/anthropic_client.py +9 -4
  4. {vectorvein-0.1.44 → vectorvein-0.1.45}/README.md +0 -0
  5. {vectorvein-0.1.44 → vectorvein-0.1.45}/src/vectorvein/__init__.py +0 -0
  6. {vectorvein-0.1.44 → vectorvein-0.1.45}/src/vectorvein/chat_clients/__init__.py +0 -0
  7. {vectorvein-0.1.44 → vectorvein-0.1.45}/src/vectorvein/chat_clients/baichuan_client.py +0 -0
  8. {vectorvein-0.1.44 → vectorvein-0.1.45}/src/vectorvein/chat_clients/base_client.py +0 -0
  9. {vectorvein-0.1.44 → vectorvein-0.1.45}/src/vectorvein/chat_clients/deepseek_client.py +0 -0
  10. {vectorvein-0.1.44 → vectorvein-0.1.45}/src/vectorvein/chat_clients/gemini_client.py +0 -0
  11. {vectorvein-0.1.44 → vectorvein-0.1.45}/src/vectorvein/chat_clients/groq_client.py +0 -0
  12. {vectorvein-0.1.44 → vectorvein-0.1.45}/src/vectorvein/chat_clients/local_client.py +0 -0
  13. {vectorvein-0.1.44 → vectorvein-0.1.45}/src/vectorvein/chat_clients/minimax_client.py +0 -0
  14. {vectorvein-0.1.44 → vectorvein-0.1.45}/src/vectorvein/chat_clients/mistral_client.py +0 -0
  15. {vectorvein-0.1.44 → vectorvein-0.1.45}/src/vectorvein/chat_clients/moonshot_client.py +0 -0
  16. {vectorvein-0.1.44 → vectorvein-0.1.45}/src/vectorvein/chat_clients/openai_client.py +0 -0
  17. {vectorvein-0.1.44 → vectorvein-0.1.45}/src/vectorvein/chat_clients/openai_compatible_client.py +0 -0
  18. {vectorvein-0.1.44 → vectorvein-0.1.45}/src/vectorvein/chat_clients/py.typed +0 -0
  19. {vectorvein-0.1.44 → vectorvein-0.1.45}/src/vectorvein/chat_clients/qwen_client.py +0 -0
  20. {vectorvein-0.1.44 → vectorvein-0.1.45}/src/vectorvein/chat_clients/stepfun_client.py +0 -0
  21. {vectorvein-0.1.44 → vectorvein-0.1.45}/src/vectorvein/chat_clients/utils.py +0 -0
  22. {vectorvein-0.1.44 → vectorvein-0.1.45}/src/vectorvein/chat_clients/yi_client.py +0 -0
  23. {vectorvein-0.1.44 → vectorvein-0.1.45}/src/vectorvein/chat_clients/zhipuai_client.py +0 -0
  24. {vectorvein-0.1.44 → vectorvein-0.1.45}/src/vectorvein/py.typed +0 -0
  25. {vectorvein-0.1.44 → vectorvein-0.1.45}/src/vectorvein/server/token_server.py +0 -0
  26. {vectorvein-0.1.44 → vectorvein-0.1.45}/src/vectorvein/settings/__init__.py +0 -0
  27. {vectorvein-0.1.44 → vectorvein-0.1.45}/src/vectorvein/settings/py.typed +0 -0
  28. {vectorvein-0.1.44 → vectorvein-0.1.45}/src/vectorvein/types/defaults.py +0 -0
  29. {vectorvein-0.1.44 → vectorvein-0.1.45}/src/vectorvein/types/enums.py +0 -0
  30. {vectorvein-0.1.44 → vectorvein-0.1.45}/src/vectorvein/types/exception.py +0 -0
  31. {vectorvein-0.1.44 → vectorvein-0.1.45}/src/vectorvein/types/llm_parameters.py +0 -0
  32. {vectorvein-0.1.44 → vectorvein-0.1.45}/src/vectorvein/types/py.typed +0 -0
  33. {vectorvein-0.1.44 → vectorvein-0.1.45}/src/vectorvein/utilities/media_processing.py +0 -0
  34. {vectorvein-0.1.44 → vectorvein-0.1.45}/src/vectorvein/utilities/retry.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vectorvein
3
- Version: 0.1.44
3
+ Version: 0.1.45
4
4
  Summary: Default template for PDM package
5
5
  Author-Email: Anderson <andersonby@163.com>
6
6
  License: MIT
@@ -17,7 +17,7 @@ description = "Default template for PDM package"
17
17
  name = "vectorvein"
18
18
  readme = "README.md"
19
19
  requires-python = ">=3.10"
20
- version = "0.1.44"
20
+ version = "0.1.45"
21
21
 
22
22
  [project.license]
23
23
  text = "MIT"
@@ -7,6 +7,7 @@ from typing import overload, Generator, AsyncGenerator, Any, Literal, Iterable
7
7
 
8
8
  import httpx
9
9
  from openai._types import NotGiven as OpenAINotGiven
10
+ from openai._types import NOT_GIVEN as OPENAI_NOT_GIVEN
10
11
  from anthropic import Anthropic, AnthropicVertex, AsyncAnthropic, AsyncAnthropicVertex
11
12
  from anthropic._types import NOT_GIVEN
12
13
  from anthropic.types import (
@@ -282,6 +283,8 @@ class AnthropicChatClient(BaseChatClient):
282
283
  self.endpoint = settings.get_endpoint(self.endpoint_id)
283
284
 
284
285
  if self.endpoint.api_schema_type == "openai":
286
+ _tools = OPENAI_NOT_GIVEN if tools is NOT_GIVEN else tools
287
+ _tool_choice = OPENAI_NOT_GIVEN if tool_choice is NOT_GIVEN else tool_choice
285
288
  if self.stream:
286
289
 
287
290
  def _generator():
@@ -295,7 +298,7 @@ class AnthropicChatClient(BaseChatClient):
295
298
  http_client=self.http_client,
296
299
  backend_name=self.BACKEND_NAME,
297
300
  ).create_completion(
298
- messages, model, True, temperature, max_tokens, tools, tool_choice, response_format, **kwargs
301
+ messages, model, True, temperature, max_tokens, _tools, _tool_choice, response_format, **kwargs
299
302
  )
300
303
  for chunk in response:
301
304
  yield chunk
@@ -312,7 +315,7 @@ class AnthropicChatClient(BaseChatClient):
312
315
  http_client=self.http_client,
313
316
  backend_name=self.BACKEND_NAME,
314
317
  ).create_completion(
315
- messages, model, False, temperature, max_tokens, tools, tool_choice, response_format, **kwargs
318
+ messages, model, False, temperature, max_tokens, _tools, _tool_choice, response_format, **kwargs
316
319
  )
317
320
 
318
321
  assert isinstance(self.raw_client, Anthropic | AnthropicVertex)
@@ -620,6 +623,8 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
620
623
  self.endpoint = settings.get_endpoint(self.endpoint_id)
621
624
 
622
625
  if self.endpoint.api_schema_type == "openai":
626
+ _tools = OPENAI_NOT_GIVEN if tools is NOT_GIVEN else tools
627
+ _tool_choice = OPENAI_NOT_GIVEN if tool_choice is NOT_GIVEN else tool_choice
623
628
  if self.stream:
624
629
 
625
630
  async def _generator():
@@ -634,7 +639,7 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
634
639
  backend_name=self.BACKEND_NAME,
635
640
  )
636
641
  response = await client.create_completion(
637
- messages, model, True, temperature, max_tokens, tools, tool_choice, response_format, **kwargs
642
+ messages, model, True, temperature, max_tokens, _tools, _tool_choice, response_format, **kwargs
638
643
  )
639
644
  async for chunk in response:
640
645
  yield chunk
@@ -652,7 +657,7 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
652
657
  backend_name=self.BACKEND_NAME,
653
658
  )
654
659
  return await client.create_completion(
655
- messages, model, False, temperature, max_tokens, tools, tool_choice, response_format, **kwargs
660
+ messages, model, False, temperature, max_tokens, _tools, _tool_choice, response_format, **kwargs
656
661
  )
657
662
 
658
663
  assert isinstance(self.raw_client, AsyncAnthropic | AsyncAnthropicVertex)
File without changes