vectorvein 0.1.36__tar.gz → 0.1.37__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. {vectorvein-0.1.36 → vectorvein-0.1.37}/PKG-INFO +1 -1
  2. {vectorvein-0.1.36 → vectorvein-0.1.37}/pyproject.toml +1 -1
  3. {vectorvein-0.1.36 → vectorvein-0.1.37}/src/vectorvein/chat_clients/anthropic_client.py +40 -26
  4. {vectorvein-0.1.36 → vectorvein-0.1.37}/src/vectorvein/chat_clients/openai_compatible_client.py +0 -1
  5. {vectorvein-0.1.36 → vectorvein-0.1.37}/README.md +0 -0
  6. {vectorvein-0.1.36 → vectorvein-0.1.37}/src/vectorvein/__init__.py +0 -0
  7. {vectorvein-0.1.36 → vectorvein-0.1.37}/src/vectorvein/chat_clients/__init__.py +0 -0
  8. {vectorvein-0.1.36 → vectorvein-0.1.37}/src/vectorvein/chat_clients/baichuan_client.py +0 -0
  9. {vectorvein-0.1.36 → vectorvein-0.1.37}/src/vectorvein/chat_clients/base_client.py +0 -0
  10. {vectorvein-0.1.36 → vectorvein-0.1.37}/src/vectorvein/chat_clients/deepseek_client.py +0 -0
  11. {vectorvein-0.1.36 → vectorvein-0.1.37}/src/vectorvein/chat_clients/gemini_client.py +0 -0
  12. {vectorvein-0.1.36 → vectorvein-0.1.37}/src/vectorvein/chat_clients/groq_client.py +0 -0
  13. {vectorvein-0.1.36 → vectorvein-0.1.37}/src/vectorvein/chat_clients/local_client.py +0 -0
  14. {vectorvein-0.1.36 → vectorvein-0.1.37}/src/vectorvein/chat_clients/minimax_client.py +0 -0
  15. {vectorvein-0.1.36 → vectorvein-0.1.37}/src/vectorvein/chat_clients/mistral_client.py +0 -0
  16. {vectorvein-0.1.36 → vectorvein-0.1.37}/src/vectorvein/chat_clients/moonshot_client.py +0 -0
  17. {vectorvein-0.1.36 → vectorvein-0.1.37}/src/vectorvein/chat_clients/openai_client.py +0 -0
  18. {vectorvein-0.1.36 → vectorvein-0.1.37}/src/vectorvein/chat_clients/py.typed +0 -0
  19. {vectorvein-0.1.36 → vectorvein-0.1.37}/src/vectorvein/chat_clients/qwen_client.py +0 -0
  20. {vectorvein-0.1.36 → vectorvein-0.1.37}/src/vectorvein/chat_clients/stepfun_client.py +0 -0
  21. {vectorvein-0.1.36 → vectorvein-0.1.37}/src/vectorvein/chat_clients/utils.py +0 -0
  22. {vectorvein-0.1.36 → vectorvein-0.1.37}/src/vectorvein/chat_clients/yi_client.py +0 -0
  23. {vectorvein-0.1.36 → vectorvein-0.1.37}/src/vectorvein/chat_clients/zhipuai_client.py +0 -0
  24. {vectorvein-0.1.36 → vectorvein-0.1.37}/src/vectorvein/py.typed +0 -0
  25. {vectorvein-0.1.36 → vectorvein-0.1.37}/src/vectorvein/settings/__init__.py +0 -0
  26. {vectorvein-0.1.36 → vectorvein-0.1.37}/src/vectorvein/settings/py.typed +0 -0
  27. {vectorvein-0.1.36 → vectorvein-0.1.37}/src/vectorvein/types/defaults.py +0 -0
  28. {vectorvein-0.1.36 → vectorvein-0.1.37}/src/vectorvein/types/enums.py +0 -0
  29. {vectorvein-0.1.36 → vectorvein-0.1.37}/src/vectorvein/types/exception.py +0 -0
  30. {vectorvein-0.1.36 → vectorvein-0.1.37}/src/vectorvein/types/llm_parameters.py +0 -0
  31. {vectorvein-0.1.36 → vectorvein-0.1.37}/src/vectorvein/types/py.typed +0 -0
  32. {vectorvein-0.1.36 → vectorvein-0.1.37}/src/vectorvein/utilities/media_processing.py +0 -0
  33. {vectorvein-0.1.36 → vectorvein-0.1.37}/src/vectorvein/utilities/retry.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vectorvein
3
- Version: 0.1.36
3
+ Version: 0.1.37
4
4
  Summary: Default template for PDM package
5
5
  Author-Email: Anderson <andersonby@163.com>
6
6
  License: MIT
@@ -16,7 +16,7 @@ description = "Default template for PDM package"
16
16
  name = "vectorvein"
17
17
  readme = "README.md"
18
18
  requires-python = ">=3.10"
19
- version = "0.1.36"
19
+ version = "0.1.37"
20
20
 
21
21
  [project.license]
22
22
  text = "MIT"
@@ -270,18 +270,24 @@ class AnthropicChatClient(BaseChatClient):
270
270
 
271
271
  if self.endpoint.api_schema_type == "openai":
272
272
  if self.stream:
273
- return OpenAICompatibleChatClient(
274
- model=self.model,
275
- stream=True,
276
- temperature=self.temperature,
277
- context_length_control=self.context_length_control,
278
- random_endpoint=self.random_endpoint,
279
- endpoint_id=self.endpoint_id,
280
- http_client=self.http_client,
281
- backend_name=self.BACKEND_NAME,
282
- ).create_completion(
283
- messages, model, True, temperature, max_tokens, tools, tool_choice, response_format, **kwargs
284
- )
273
+
274
+ def _generator():
275
+ response = OpenAICompatibleChatClient(
276
+ model=self.model,
277
+ stream=True,
278
+ temperature=self.temperature,
279
+ context_length_control=self.context_length_control,
280
+ random_endpoint=self.random_endpoint,
281
+ endpoint_id=self.endpoint_id,
282
+ http_client=self.http_client,
283
+ backend_name=self.BACKEND_NAME,
284
+ ).create_completion(
285
+ messages, model, True, temperature, max_tokens, tools, tool_choice, response_format, **kwargs
286
+ )
287
+ for chunk in response:
288
+ yield chunk
289
+
290
+ return _generator()
285
291
  else:
286
292
  return OpenAICompatibleChatClient(
287
293
  model=self.model,
@@ -586,20 +592,27 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
586
592
 
587
593
  if self.endpoint.api_schema_type == "openai":
588
594
  if self.stream:
589
- return AsyncOpenAICompatibleChatClient(
590
- model=self.model,
591
- stream=True,
592
- temperature=self.temperature,
593
- context_length_control=self.context_length_control,
594
- random_endpoint=self.random_endpoint,
595
- endpoint_id=self.endpoint_id,
596
- http_client=self.http_client,
597
- backend_name=self.BACKEND_NAME,
598
- ).create_completion(
599
- messages, model, True, temperature, max_tokens, tools, tool_choice, response_format, **kwargs
600
- )
595
+
596
+ async def _generator():
597
+ client = AsyncOpenAICompatibleChatClient(
598
+ model=self.model,
599
+ stream=True,
600
+ temperature=self.temperature,
601
+ context_length_control=self.context_length_control,
602
+ random_endpoint=self.random_endpoint,
603
+ endpoint_id=self.endpoint_id,
604
+ http_client=self.http_client,
605
+ backend_name=self.BACKEND_NAME,
606
+ )
607
+ response = await client.create_completion(
608
+ messages, model, True, temperature, max_tokens, tools, tool_choice, response_format, **kwargs
609
+ )
610
+ async for chunk in response:
611
+ yield chunk
612
+
613
+ return _generator()
601
614
  else:
602
- return AsyncOpenAICompatibleChatClient(
615
+ client = AsyncOpenAICompatibleChatClient(
603
616
  model=self.model,
604
617
  stream=False,
605
618
  temperature=self.temperature,
@@ -608,7 +621,8 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
608
621
  endpoint_id=self.endpoint_id,
609
622
  http_client=self.http_client,
610
623
  backend_name=self.BACKEND_NAME,
611
- ).create_completion(
624
+ )
625
+ return await client.create_completion(
612
626
  messages, model, False, temperature, max_tokens, tools, tool_choice, response_format, **kwargs
613
627
  )
614
628
 
@@ -25,7 +25,6 @@ from ..types.llm_parameters import (
25
25
  NOT_GIVEN,
26
26
  ToolParam,
27
27
  ToolChoice,
28
- BackendSettings,
29
28
  ChatCompletionMessage,
30
29
  ChatCompletionDeltaMessage,
31
30
  )
File without changes