vectorvein 0.1.13__tar.gz → 0.1.14__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. {vectorvein-0.1.13 → vectorvein-0.1.14}/PKG-INFO +1 -1
  2. {vectorvein-0.1.13 → vectorvein-0.1.14}/pyproject.toml +1 -1
  3. {vectorvein-0.1.13 → vectorvein-0.1.14}/src/vectorvein/chat_clients/__init__.py +3 -0
  4. vectorvein-0.1.14/src/vectorvein/chat_clients/baichuan_client.py +15 -0
  5. {vectorvein-0.1.13 → vectorvein-0.1.14}/src/vectorvein/settings/__init__.py +2 -0
  6. {vectorvein-0.1.13 → vectorvein-0.1.14}/src/vectorvein/types/defaults.py +40 -0
  7. {vectorvein-0.1.13 → vectorvein-0.1.14}/src/vectorvein/types/enums.py +3 -0
  8. {vectorvein-0.1.13 → vectorvein-0.1.14}/tests/sample_settings.py +14 -0
  9. {vectorvein-0.1.13 → vectorvein-0.1.14}/tests/test_http_client.py +1 -1
  10. {vectorvein-0.1.13 → vectorvein-0.1.14}/README.md +0 -0
  11. {vectorvein-0.1.13 → vectorvein-0.1.14}/src/vectorvein/__init__.py +0 -0
  12. {vectorvein-0.1.13 → vectorvein-0.1.14}/src/vectorvein/chat_clients/anthropic_client.py +0 -0
  13. {vectorvein-0.1.13 → vectorvein-0.1.14}/src/vectorvein/chat_clients/base_client.py +0 -0
  14. {vectorvein-0.1.13 → vectorvein-0.1.14}/src/vectorvein/chat_clients/deepseek_client.py +0 -0
  15. {vectorvein-0.1.13 → vectorvein-0.1.14}/src/vectorvein/chat_clients/gemini_client.py +0 -0
  16. {vectorvein-0.1.13 → vectorvein-0.1.14}/src/vectorvein/chat_clients/groq_client.py +0 -0
  17. {vectorvein-0.1.13 → vectorvein-0.1.14}/src/vectorvein/chat_clients/local_client.py +0 -0
  18. {vectorvein-0.1.13 → vectorvein-0.1.14}/src/vectorvein/chat_clients/minimax_client.py +0 -0
  19. {vectorvein-0.1.13 → vectorvein-0.1.14}/src/vectorvein/chat_clients/mistral_client.py +0 -0
  20. {vectorvein-0.1.13 → vectorvein-0.1.14}/src/vectorvein/chat_clients/moonshot_client.py +0 -0
  21. {vectorvein-0.1.13 → vectorvein-0.1.14}/src/vectorvein/chat_clients/openai_client.py +0 -0
  22. {vectorvein-0.1.13 → vectorvein-0.1.14}/src/vectorvein/chat_clients/openai_compatible_client.py +0 -0
  23. {vectorvein-0.1.13 → vectorvein-0.1.14}/src/vectorvein/chat_clients/qwen_client.py +0 -0
  24. {vectorvein-0.1.13 → vectorvein-0.1.14}/src/vectorvein/chat_clients/utils.py +0 -0
  25. {vectorvein-0.1.13 → vectorvein-0.1.14}/src/vectorvein/chat_clients/yi_client.py +0 -0
  26. {vectorvein-0.1.13 → vectorvein-0.1.14}/src/vectorvein/chat_clients/zhipuai_client.py +0 -0
  27. {vectorvein-0.1.13 → vectorvein-0.1.14}/src/vectorvein/types/llm_parameters.py +0 -0
  28. {vectorvein-0.1.13 → vectorvein-0.1.14}/src/vectorvein/utilities/media_processing.py +0 -0
  29. {vectorvein-0.1.13 → vectorvein-0.1.14}/src/vectorvein/utilities/retry.py +0 -0
  30. {vectorvein-0.1.13 → vectorvein-0.1.14}/tests/__init__.py +0 -0
  31. {vectorvein-0.1.13 → vectorvein-0.1.14}/tests/cat.png +0 -0
  32. {vectorvein-0.1.13 → vectorvein-0.1.14}/tests/test_chat_prefix.py +0 -0
  33. {vectorvein-0.1.13 → vectorvein-0.1.14}/tests/test_create_chat_client.py +0 -0
  34. {vectorvein-0.1.13 → vectorvein-0.1.14}/tests/test_format_messages.py +0 -0
  35. {vectorvein-0.1.13 → vectorvein-0.1.14}/tests/test_image_input_chat_client.py +0 -0
  36. {vectorvein-0.1.13 → vectorvein-0.1.14}/tests/test_stop.py +0 -0
  37. {vectorvein-0.1.13 → vectorvein-0.1.14}/tests/test_tokens_count.py +0 -0
  38. {vectorvein-0.1.13 → vectorvein-0.1.14}/tests/test_tool_use_multi_turns.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vectorvein
3
- Version: 0.1.13
3
+ Version: 0.1.14
4
4
  Summary: Default template for PDM package
5
5
  Author-Email: Anderson <andersonby@163.com>
6
6
  License: MIT
@@ -16,7 +16,7 @@ description = "Default template for PDM package"
16
16
  name = "vectorvein"
17
17
  readme = "README.md"
18
18
  requires-python = ">=3.10"
19
- version = "0.1.13"
19
+ version = "0.1.14"
20
20
 
21
21
  [project.license]
22
22
  text = "MIT"
@@ -13,6 +13,7 @@ from .openai_client import OpenAIChatClient, AsyncOpenAIChatClient
13
13
  from .zhipuai_client import ZhiPuAIChatClient, AsyncZhiPuAIChatClient
14
14
  from .minimax_client import MiniMaxChatClient, AsyncMiniMaxChatClient
15
15
  from .mistral_client import MistralChatClient, AsyncMistralChatClient
16
+ from .baichuan_client import BaichuanChatClient, AsyncBaichuanChatClient
16
17
  from .moonshot_client import MoonshotChatClient, AsyncMoonshotChatClient
17
18
  from .deepseek_client import DeepSeekChatClient, AsyncDeepSeekChatClient
18
19
 
@@ -36,6 +37,7 @@ BackendMap = {
36
37
  BackendType.Qwen: QwenChatClient,
37
38
  BackendType.Yi: YiChatClient,
38
39
  BackendType.ZhiPuAI: ZhiPuAIChatClient,
40
+ BackendType.Baichuan: BaichuanChatClient,
39
41
  },
40
42
  "async": {
41
43
  BackendType.Anthropic: AsyncAnthropicChatClient,
@@ -50,6 +52,7 @@ BackendMap = {
50
52
  BackendType.Qwen: AsyncQwenChatClient,
51
53
  BackendType.Yi: AsyncYiChatClient,
52
54
  BackendType.ZhiPuAI: AsyncZhiPuAIChatClient,
55
+ BackendType.Baichuan: AsyncBaichuanChatClient,
53
56
  },
54
57
  }
55
58
 
@@ -0,0 +1,15 @@
1
+ # @Author: Bi Ying
2
+ # @Date: 2024-07-26 14:48:55
3
+ from ..types.enums import BackendType
4
+ from ..types.defaults import BAICHUAN_DEFAULT_MODEL
5
+ from .openai_compatible_client import OpenAICompatibleChatClient, AsyncOpenAICompatibleChatClient
6
+
7
+
8
+ class BaichuanChatClient(OpenAICompatibleChatClient):
9
+ DEFAULT_MODEL = BAICHUAN_DEFAULT_MODEL
10
+ BACKEND_NAME = BackendType.Baichuan
11
+
12
+
13
+ class AsyncBaichuanChatClient(AsyncOpenAICompatibleChatClient):
14
+ DEFAULT_MODEL = BAICHUAN_DEFAULT_MODEL
15
+ BACKEND_NAME = BackendType.Baichuan
@@ -26,6 +26,7 @@ class Settings(BaseModel):
26
26
  qwen: BackendSettings = Field(default_factory=BackendSettings, description="Qwen models settings.")
27
27
  yi: BackendSettings = Field(default_factory=BackendSettings, description="Yi models settings.")
28
28
  zhipuai: BackendSettings = Field(default_factory=BackendSettings, description="Zhipuai models settings.")
29
+ baichuan: BackendSettings = Field(default_factory=BackendSettings, description="Baichuan models settings.")
29
30
 
30
31
  def __init__(self, **data):
31
32
  model_types = {
@@ -41,6 +42,7 @@ class Settings(BaseModel):
41
42
  "qwen": defs.QWEN_MODELS,
42
43
  "yi": defs.YI_MODELS,
43
44
  "zhipuai": defs.ZHIPUAI_MODELS,
45
+ "baichuan": defs.BAICHUAN_MODELS,
44
46
  }
45
47
 
46
48
  for model_type, default_models in model_types.items():
@@ -52,6 +52,46 @@ DEEPSEEK_MODELS = {
52
52
  }
53
53
  DEEPSEEK_DEFAULT_MODEL = "deepseek-chat"
54
54
 
55
+ # Baichuan models
56
+ BAICHUAN_MODELS = {
57
+ "Baichuan4": {
58
+ "id": "Baichuan4",
59
+ "context_length": 32768,
60
+ "max_output_tokens": 2048,
61
+ "function_call_available": True,
62
+ "response_format_available": True,
63
+ },
64
+ "Baichuan3-Turbo": {
65
+ "id": "Baichuan3-Turbo",
66
+ "context_length": 32768,
67
+ "max_output_tokens": 2048,
68
+ "function_call_available": True,
69
+ "response_format_available": True,
70
+ },
71
+ "Baichuan3-Turbo-128k": {
72
+ "id": "Baichuan3-Turbo-128k",
73
+ "context_length": 128000,
74
+ "max_output_tokens": 2048,
75
+ "function_call_available": True,
76
+ "response_format_available": True,
77
+ },
78
+ "Baichuan2-Turbo": {
79
+ "id": "Baichuan2-Turbo",
80
+ "context_length": 32768,
81
+ "max_output_tokens": 2048,
82
+ "function_call_available": True,
83
+ "response_format_available": False,
84
+ },
85
+ "Baichuan2-53B": {
86
+ "id": "Baichuan2-53B",
87
+ "context_length": 32768,
88
+ "max_output_tokens": 2048,
89
+ "function_call_available": False,
90
+ "response_format_available": False,
91
+ },
92
+ }
93
+ BAICHUAN_DEFAULT_MODEL = "Baichuan3-Turbo"
94
+
55
95
  # Groq models
56
96
  GROQ_DEFAULT_MODEL = "llama3-70b-8192"
57
97
  GROQ_MODELS = {
@@ -44,6 +44,9 @@ class BackendType(str, Enum):
44
44
  # Gemini
45
45
  Gemini = "gemini"
46
46
 
47
+ # Baichuan
48
+ Baichuan = "baichuan"
49
+
47
50
  def __repr__(self):
48
51
  """Get a string representation."""
49
52
  return f'"{self.value}"'
@@ -399,6 +399,11 @@ sample_settings = {
399
399
  "api_base": "https://api.siliconflow.cn/v1",
400
400
  "api_key": "sk-ewmzcvzrandjdqcpvtbmxnunklayxbrzcxzgbfegxbkiqucf",
401
401
  },
402
+ {
403
+ "id": "baichuan-default",
404
+ "api_base": "https://api.baichuan-ai.com/v1",
405
+ "api_key": "360b0c2e9b8bc3fa624c8eb666fcda08",
406
+ },
402
407
  ],
403
408
  "moonshot": {
404
409
  "models": {
@@ -597,4 +602,13 @@ sample_settings = {
597
602
  "glm-4v": {"id": "glm-4v", "endpoints": ["zhipuai-default"]},
598
603
  }
599
604
  },
605
+ "baichuan": {
606
+ "models": {
607
+ "Baichuan4": {"id": "Baichuan4", "endpoints": ["baichuan-default"]},
608
+ "Baichuan3-Turbo": {"id": "Baichuan3-Turbo", "endpoints": ["baichuan-default"]},
609
+ "Baichuan3-Turbo-128k": {"id": "Baichuan3-Turbo-128k", "endpoints": ["baichuan-default"]},
610
+ "Baichuan2-Turbo": {"id": "Baichuan2-Turbo", "endpoints": ["baichuan-default"]},
611
+ "Baichuan2-53B": {"id": "Baichuan2-53B", "endpoints": ["baichuan-default"]},
612
+ }
613
+ },
600
614
  }
@@ -17,7 +17,7 @@ messages = [
17
17
 
18
18
  start_time = time.perf_counter()
19
19
  http_client = httpx.Client()
20
- client = create_chat_client(backend=BackendType.DeepSeek, model="deepseek-chat", stream=False, http_client=http_client)
20
+ client = create_chat_client(backend=BackendType.Baichuan, stream=False, http_client=http_client)
21
21
  response = client.create_completion(messages=messages)
22
22
  print(response)
23
23
  end_time = time.perf_counter()
File without changes
File without changes