vectorvein 0.1.26__py3-none-any.whl → 0.1.28__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -13,6 +13,7 @@ from .openai_client import OpenAIChatClient, AsyncOpenAIChatClient
13
13
  from .zhipuai_client import ZhiPuAIChatClient, AsyncZhiPuAIChatClient
14
14
  from .minimax_client import MiniMaxChatClient, AsyncMiniMaxChatClient
15
15
  from .mistral_client import MistralChatClient, AsyncMistralChatClient
16
+ from .stepfun_client import StepFunChatClient, AsyncStepFunChatClient
16
17
  from .baichuan_client import BaichuanChatClient, AsyncBaichuanChatClient
17
18
  from .moonshot_client import MoonshotChatClient, AsyncMoonshotChatClient
18
19
  from .deepseek_client import DeepSeekChatClient, AsyncDeepSeekChatClient
@@ -38,6 +39,7 @@ BackendMap = {
38
39
  BackendType.Yi: YiChatClient,
39
40
  BackendType.ZhiPuAI: ZhiPuAIChatClient,
40
41
  BackendType.Baichuan: BaichuanChatClient,
42
+ BackendType.StepFun: StepFunChatClient,
41
43
  },
42
44
  "async": {
43
45
  BackendType.Anthropic: AsyncAnthropicChatClient,
@@ -53,6 +55,7 @@ BackendMap = {
53
55
  BackendType.Yi: AsyncYiChatClient,
54
56
  BackendType.ZhiPuAI: AsyncZhiPuAIChatClient,
55
57
  BackendType.Baichuan: AsyncBaichuanChatClient,
58
+ BackendType.StepFun: AsyncStepFunChatClient,
56
59
  },
57
60
  }
58
61
 
@@ -68,15 +71,13 @@ def create_chat_client(
68
71
  http_client: httpx.Client | None = None,
69
72
  **kwargs,
70
73
  ) -> BaseChatClient:
71
- if backend.lower() not in BackendMap["sync"]:
74
+ if backend not in BackendMap["sync"]:
72
75
  raise ValueError(f"Unsupported backend: {backend}")
73
- else:
74
- backend_key = backend.lower()
75
76
 
76
- ClientClass = BackendMap["sync"][backend_key]
77
+ ClientClass = BackendMap["sync"][backend]
77
78
  if model is None:
78
79
  model = ClientClass.DEFAULT_MODEL
79
- return BackendMap["sync"][backend_key](
80
+ return BackendMap["sync"][backend](
80
81
  model=model,
81
82
  stream=stream,
82
83
  temperature=temperature,
@@ -99,15 +100,13 @@ def create_async_chat_client(
99
100
  http_client: httpx.AsyncClient | None = None,
100
101
  **kwargs,
101
102
  ) -> BaseAsyncChatClient:
102
- if backend.lower() not in BackendMap["async"]:
103
+ if backend not in BackendMap["async"]:
103
104
  raise ValueError(f"Unsupported backend: {backend}")
104
- else:
105
- backend_key = backend.lower()
106
105
 
107
- ClientClass = BackendMap["async"][backend_key]
106
+ ClientClass = BackendMap["async"][backend]
108
107
  if model is None:
109
108
  model = ClientClass.DEFAULT_MODEL
110
- return BackendMap["async"][backend_key](
109
+ return BackendMap["async"][backend](
111
110
  model=model,
112
111
  stream=stream,
113
112
  temperature=temperature,
@@ -88,6 +88,22 @@ class BaseChatClient(ABC):
88
88
  ) -> Generator[ChatCompletionDeltaMessage, Any, None]:
89
89
  pass
90
90
 
91
+ @overload
92
+ @abstractmethod
93
+ def create_completion(
94
+ self,
95
+ messages: list,
96
+ model: str | None = None,
97
+ stream: bool = False,
98
+ temperature: float = 0.7,
99
+ max_tokens: int | None = None,
100
+ tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
101
+ tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
102
+ response_format: dict | None = None,
103
+ **kwargs,
104
+ ) -> ChatCompletionMessage | Generator[ChatCompletionDeltaMessage, Any, None]:
105
+ pass
106
+
91
107
  @abstractmethod
92
108
  def create_completion(
93
109
  self,
@@ -196,6 +212,22 @@ class BaseAsyncChatClient(ABC):
196
212
  ) -> AsyncGenerator[ChatCompletionDeltaMessage, None]:
197
213
  pass
198
214
 
215
+ @overload
216
+ @abstractmethod
217
+ async def create_completion(
218
+ self,
219
+ messages: list,
220
+ model: str | None = None,
221
+ stream: bool = False,
222
+ temperature: float = 0.7,
223
+ max_tokens: int | None = None,
224
+ tools: list | NotGiven = NOT_GIVEN,
225
+ tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
226
+ response_format: dict | None = None,
227
+ **kwargs,
228
+ ) -> ChatCompletionMessage | AsyncGenerator[ChatCompletionDeltaMessage, None]:
229
+ pass
230
+
199
231
  @abstractmethod
200
232
  async def create_completion(
201
233
  self,
@@ -82,6 +82,21 @@ class GeminiChatClient(BaseChatClient):
82
82
  ) -> Generator[ChatCompletionDeltaMessage, None, None]:
83
83
  pass
84
84
 
85
+ @overload
86
+ def create_completion(
87
+ self,
88
+ messages: list,
89
+ model: str | None = None,
90
+ stream: bool | None = None,
91
+ temperature: float | None = None,
92
+ max_tokens: int | None = None,
93
+ tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
94
+ tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
95
+ response_format: dict | None = None,
96
+ **kwargs,
97
+ ) -> ChatCompletionMessage | Generator[ChatCompletionDeltaMessage, Any, None]:
98
+ pass
99
+
85
100
  def create_completion(
86
101
  self,
87
102
  messages: list,
@@ -295,6 +310,21 @@ class AsyncGeminiChatClient(BaseAsyncChatClient):
295
310
  ) -> AsyncGenerator[ChatCompletionDeltaMessage, Any]:
296
311
  pass
297
312
 
313
+ @overload
314
+ async def create_completion(
315
+ self,
316
+ messages: list,
317
+ model: str | None = None,
318
+ stream: bool | None = None,
319
+ temperature: float | None = None,
320
+ max_tokens: int | None = None,
321
+ tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
322
+ tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
323
+ response_format: dict | None = None,
324
+ **kwargs,
325
+ ) -> ChatCompletionMessage | AsyncGenerator[ChatCompletionDeltaMessage, Any]:
326
+ pass
327
+
298
328
  async def create_completion(
299
329
  self,
300
330
  messages: list,
@@ -107,6 +107,21 @@ class MiniMaxChatClient(BaseChatClient):
107
107
  ) -> Generator[ChatCompletionDeltaMessage, None, None]:
108
108
  pass
109
109
 
110
+ @overload
111
+ def create_completion(
112
+ self,
113
+ messages: list,
114
+ model: str | None = None,
115
+ stream: bool | None = None,
116
+ temperature: float | None = None,
117
+ max_tokens: int | None = None,
118
+ tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
119
+ tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
120
+ response_format: dict | None = None,
121
+ **kwargs,
122
+ ) -> ChatCompletionMessage | Generator[ChatCompletionDeltaMessage, Any, None]:
123
+ pass
124
+
110
125
  def create_completion(
111
126
  self,
112
127
  messages: list,
@@ -307,6 +322,21 @@ class AsyncMiniMaxChatClient(BaseAsyncChatClient):
307
322
  ) -> AsyncGenerator[ChatCompletionDeltaMessage, Any]:
308
323
  pass
309
324
 
325
+ @overload
326
+ async def create_completion(
327
+ self,
328
+ messages: list,
329
+ model: str | None = None,
330
+ stream: bool | None = None,
331
+ temperature: float | None = None,
332
+ max_tokens: int | None = None,
333
+ tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
334
+ tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
335
+ response_format: dict | None = None,
336
+ **kwargs,
337
+ ) -> ChatCompletionMessage | AsyncGenerator[ChatCompletionDeltaMessage, Any]:
338
+ pass
339
+
310
340
  async def create_completion(
311
341
  self,
312
342
  messages: list,
@@ -107,6 +107,21 @@ class OpenAICompatibleChatClient(BaseChatClient):
107
107
  ) -> Generator[ChatCompletionDeltaMessage, None, None]:
108
108
  pass
109
109
 
110
+ @overload
111
+ def create_completion(
112
+ self,
113
+ messages: list,
114
+ model: str | None = None,
115
+ stream: bool | None = None,
116
+ temperature: float | None = None,
117
+ max_tokens: int | None = None,
118
+ tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
119
+ tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
120
+ response_format: dict | None = None,
121
+ **kwargs,
122
+ ) -> ChatCompletionMessage | Generator[ChatCompletionDeltaMessage, Any, None]:
123
+ pass
124
+
110
125
  def create_completion(
111
126
  self,
112
127
  messages: list,
@@ -154,10 +169,10 @@ class OpenAICompatibleChatClient(BaseChatClient):
154
169
  max_output_tokens = self.model_setting.max_output_tokens
155
170
  token_counts = get_message_token_counts(messages=messages, tools=tools, model=self.model_setting.id)
156
171
  if max_output_tokens is not None:
157
- max_tokens = self.model_setting.context_length - token_counts
172
+ max_tokens = self.model_setting.context_length - token_counts - 64
158
173
  max_tokens = min(max(max_tokens, 1), max_output_tokens)
159
174
  else:
160
- max_tokens = self.model_setting.context_length - token_counts
175
+ max_tokens = self.model_setting.context_length - token_counts - 64
161
176
 
162
177
  if response_format and self.model_setting.response_format_available:
163
178
  self.response_format = {"response_format": response_format}
@@ -314,6 +329,21 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
314
329
  ) -> AsyncGenerator[ChatCompletionDeltaMessage, Any]:
315
330
  pass
316
331
 
332
+ @overload
333
+ async def create_completion(
334
+ self,
335
+ messages: list,
336
+ model: str | None = None,
337
+ stream: bool | None = None,
338
+ temperature: float | None = None,
339
+ max_tokens: int | None = None,
340
+ tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
341
+ tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
342
+ response_format: dict | None = None,
343
+ **kwargs,
344
+ ) -> ChatCompletionMessage | AsyncGenerator[ChatCompletionDeltaMessage, Any]:
345
+ pass
346
+
317
347
  async def create_completion(
318
348
  self,
319
349
  messages: list,
@@ -366,10 +396,10 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
366
396
  max_output_tokens = self.model_setting.max_output_tokens
367
397
  token_counts = get_message_token_counts(messages=messages, tools=tools, model=self.model_setting.id)
368
398
  if max_output_tokens is not None:
369
- max_tokens = self.model_setting.context_length - token_counts
399
+ max_tokens = self.model_setting.context_length - token_counts - 64
370
400
  max_tokens = min(max(max_tokens, 1), max_output_tokens)
371
401
  else:
372
- max_tokens = self.model_setting.context_length - token_counts
402
+ max_tokens = self.model_setting.context_length - token_counts - 64
373
403
 
374
404
  if self.stream:
375
405
  stream_response: AsyncStream[ChatCompletionChunk] = await self.raw_client.chat.completions.create(
@@ -0,0 +1,15 @@
1
+ # @Author: Bi Ying
2
+ # @Date: 2024-07-26 14:48:55
3
+ from ..types.enums import BackendType
4
+ from ..types.defaults import STEPFUN_DEFAULT_MODEL
5
+ from .openai_compatible_client import OpenAICompatibleChatClient, AsyncOpenAICompatibleChatClient
6
+
7
+
8
+ class StepFunChatClient(OpenAICompatibleChatClient):
9
+ DEFAULT_MODEL = STEPFUN_DEFAULT_MODEL
10
+ BACKEND_NAME = BackendType.StepFun
11
+
12
+
13
+ class AsyncStepFunChatClient(AsyncOpenAICompatibleChatClient):
14
+ DEFAULT_MODEL = STEPFUN_DEFAULT_MODEL
15
+ BACKEND_NAME = BackendType.StepFun
@@ -195,6 +195,31 @@ def get_token_counts(text: str | dict, model: str = "") -> int:
195
195
  return len(deepseek_tokenizer.encode(text))
196
196
  elif model.startswith("qwen"):
197
197
  return len(qwen_tokenizer.encode(text))
198
+ elif model.startswith("stepfun"):
199
+ model_setting = settings.moonshot.models[model]
200
+ if len(model_setting.endpoints) == 0:
201
+ return len(chatgpt_encoding.encode(text))
202
+ endpoint_id = model_setting.endpoints[0]
203
+ endpoint = settings.get_endpoint(endpoint_id)
204
+ tokenize_url = "https://api.stepfun.com/v1/token/count"
205
+ headers = {"Content-Type": "application/json", "Authorization": f"Bearer {endpoint.api_key}"}
206
+ request_body = {
207
+ "model": model,
208
+ "messages": [
209
+ {"role": "user", "content": text},
210
+ ],
211
+ }
212
+ _, response = (
213
+ Retry(httpx.post)
214
+ .args(url=tokenize_url, headers=headers, json=request_body, timeout=None)
215
+ .retry_times(5)
216
+ .sleep_time(10)
217
+ .run()
218
+ )
219
+ if response is None:
220
+ return 1000
221
+ result = response.json()
222
+ return result["data"]["total_tokens"]
198
223
  else:
199
224
  return len(chatgpt_encoding.encode(text))
200
225
 
@@ -27,6 +27,7 @@ class Settings(BaseModel):
27
27
  yi: BackendSettings = Field(default_factory=BackendSettings, description="Yi models settings.")
28
28
  zhipuai: BackendSettings = Field(default_factory=BackendSettings, description="Zhipuai models settings.")
29
29
  baichuan: BackendSettings = Field(default_factory=BackendSettings, description="Baichuan models settings.")
30
+ stepfun: BackendSettings = Field(default_factory=BackendSettings, description="StepFun models settings.")
30
31
 
31
32
  def __init__(self, **data):
32
33
  model_types = {
@@ -43,6 +44,7 @@ class Settings(BaseModel):
43
44
  "yi": defs.YI_MODELS,
44
45
  "zhipuai": defs.ZHIPUAI_MODELS,
45
46
  "baichuan": defs.BAICHUAN_MODELS,
47
+ "stepfun": defs.STEPFUN_MODELS,
46
48
  }
47
49
 
48
50
  for model_type, default_models in model_types.items():
@@ -62,7 +64,7 @@ class Settings(BaseModel):
62
64
  for endpoint in self.endpoints:
63
65
  if endpoint.id == endpoint_id:
64
66
  return endpoint
65
- return EndpointSetting()
67
+ raise ValueError(f"Endpoint {endpoint_id} not found.")
66
68
 
67
69
  def get_backend(self, backend: BackendType) -> BackendSettings:
68
70
  return getattr(self, backend.value.lower())
@@ -588,3 +588,98 @@ GEMINI_MODELS = {
588
588
  "native_multimodal": True,
589
589
  },
590
590
  }
591
+
592
+ # 百度文心一言 ERNIE 模型
593
+ ERNIE_DEFAULT_MODEL = "ernie-lite"
594
+ ERNIE_MODELS = {
595
+ "ernie-lite": {
596
+ "id": "ernie-lite",
597
+ "context_length": 6144,
598
+ "max_output_tokens": 2048,
599
+ "function_call_available": False,
600
+ "response_format_available": False,
601
+ },
602
+ "ernie-speed": {
603
+ "id": "ernie-speed",
604
+ "context_length": 126976,
605
+ "max_output_tokens": 4096,
606
+ "function_call_available": False,
607
+ "response_format_available": False,
608
+ },
609
+ "ernie-speed-pro-128k": {
610
+ "id": "ernie-speed-pro-128k",
611
+ "context_length": 126976,
612
+ "max_output_tokens": 4096,
613
+ "function_call_available": False,
614
+ "response_format_available": False,
615
+ },
616
+ "ernie-4.0-8k-latest": {
617
+ "id": "ernie-4.0-8k-latest",
618
+ "context_length": 5120,
619
+ "max_output_tokens": 2048,
620
+ "function_call_available": False,
621
+ "response_format_available": True,
622
+ },
623
+ "ernie-4.0-turbo-8k": {
624
+ "id": "ernie-4.0-turbo-8k",
625
+ "context_length": 5120,
626
+ "max_output_tokens": 2048,
627
+ "function_call_available": False,
628
+ "response_format_available": True,
629
+ },
630
+ }
631
+
632
+
633
+ STEPFUN_DEFAULT_MODEL = "step-1-8k"
634
+ STEPFUN_MODELS = {
635
+ "step-1-8k": {
636
+ "id": "step-1-8k",
637
+ "context_length": 8192,
638
+ "function_call_available": True,
639
+ "response_format_available": True,
640
+ },
641
+ "step-1-32k": {
642
+ "id": "step-1-32k",
643
+ "context_length": 32000,
644
+ "function_call_available": True,
645
+ "response_format_available": True,
646
+ },
647
+ "step-1-128k": {
648
+ "id": "step-1-128k",
649
+ "context_length": 128000,
650
+ "function_call_available": True,
651
+ "response_format_available": True,
652
+ },
653
+ "step-1-256k": {
654
+ "id": "step-1-256k",
655
+ "context_length": 256000,
656
+ "function_call_available": True,
657
+ "response_format_available": True,
658
+ },
659
+ "step-2-16k": {
660
+ "id": "step-2-16k",
661
+ "context_length": 16384,
662
+ "function_call_available": True,
663
+ "response_format_available": True,
664
+ },
665
+ "step-1-flash": {
666
+ "id": "step-1-flash",
667
+ "context_length": 8192,
668
+ "function_call_available": True,
669
+ "response_format_available": True,
670
+ },
671
+ "step-1v-8k": {
672
+ "id": "step-1v-8k",
673
+ "context_length": 8192,
674
+ "function_call_available": False,
675
+ "response_format_available": False,
676
+ "native_multimodal": True,
677
+ },
678
+ "step-1v-32k": {
679
+ "id": "step-1v-32k",
680
+ "context_length": 32768,
681
+ "function_call_available": False,
682
+ "response_format_available": False,
683
+ "native_multimodal": True,
684
+ },
685
+ }
vectorvein/types/enums.py CHANGED
@@ -47,6 +47,9 @@ class BackendType(str, Enum):
47
47
  # Baichuan
48
48
  Baichuan = "baichuan"
49
49
 
50
+ # StepFun
51
+ StepFun = "stepfun"
52
+
50
53
  def __repr__(self):
51
54
  """Get a string representation."""
52
55
  return f'"{self.value}"'
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vectorvein
3
- Version: 0.1.26
3
+ Version: 0.1.28
4
4
  Summary: Default template for PDM package
5
5
  Author-Email: Anderson <andersonby@163.com>
6
6
  License: MIT
@@ -1,29 +1,30 @@
1
- vectorvein-0.1.26.dist-info/METADATA,sha256=tVqLXUUSsa7mVULL_fiWHP0qk4uZ-gLyx2xPaLqLW0Q,502
2
- vectorvein-0.1.26.dist-info/WHEEL,sha256=Vza3XR51HW1KmFP0iIMUVYIvz0uQuKJpIXKYOBGQyFQ,90
3
- vectorvein-0.1.26.dist-info/entry_points.txt,sha256=6OYgBcLyFCUgeqLgnvMyOJxPCWzgy7se4rLPKtNonMs,34
1
+ vectorvein-0.1.28.dist-info/METADATA,sha256=2YY41O0VggN_wq_vaj3NDEDrC2FkArNdZqaBOO0SViY,502
2
+ vectorvein-0.1.28.dist-info/WHEEL,sha256=Vza3XR51HW1KmFP0iIMUVYIvz0uQuKJpIXKYOBGQyFQ,90
3
+ vectorvein-0.1.28.dist-info/entry_points.txt,sha256=6OYgBcLyFCUgeqLgnvMyOJxPCWzgy7se4rLPKtNonMs,34
4
4
  vectorvein/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
- vectorvein/chat_clients/__init__.py,sha256=lOGrIEBGN-EoxJ-dF5uMsO6viNCIFIeNL8whDwE6x3g,4657
5
+ vectorvein/chat_clients/__init__.py,sha256=zGdJXdzNC7usZfRSBVLQ0qZYmECbPTdCkdfFum8SDlI,4700
6
6
  vectorvein/chat_clients/anthropic_client.py,sha256=h82GxBi7h22B7leBuPofwBstxH_c12tEgGjpnKg6UDc,25007
7
7
  vectorvein/chat_clients/baichuan_client.py,sha256=CVMvpgjdrZGv0BWnTOBD-f2ufZ3wq3496wqukumsAr4,526
8
- vectorvein/chat_clients/base_client.py,sha256=jSUSZNowUBg1Fl0Z2c9soPwx6glomoPyi7NFZIPLVBQ,7402
8
+ vectorvein/chat_clients/base_client.py,sha256=wxh7WkzFG4cD4I4t4e6RGe1KiFZc8Z5llh2iVblXEZE,8415
9
9
  vectorvein/chat_clients/deepseek_client.py,sha256=3qWu01NlJAP2N-Ff62d5-CZXZitlizE1fzb20LNetig,526
10
- vectorvein/chat_clients/gemini_client.py,sha256=2SrREa_wJqTXOAvv-2LSo3DVR2vONsMzmBV9WFkNQuA,17640
10
+ vectorvein/chat_clients/gemini_client.py,sha256=VxII45fMjE9JTlOuq4n7R0lNVQRoHTUyTNZE4ICXNrM,18685
11
11
  vectorvein/chat_clients/groq_client.py,sha256=Uow4pgdmFi93ZQSoOol2-0PhhqkW-S0XuSldvppz5U4,498
12
12
  vectorvein/chat_clients/local_client.py,sha256=55nOsxzqUf79q3Y14MKROA71zxhsT7p7FsDZ89rts2M,422
13
- vectorvein/chat_clients/minimax_client.py,sha256=dNMhCP74gRCnReR_xNosUkGc0_NP3IfNhp48WvBpU-4,16189
13
+ vectorvein/chat_clients/minimax_client.py,sha256=ljnT9QtVUiySSQSECEv9g2vRfv88K2pPPNZH4sCh838,17204
14
14
  vectorvein/chat_clients/mistral_client.py,sha256=1aKSylzBDaLYcFnaBIL4-sXSzWmXfBeON9Q0rq-ziWw,534
15
15
  vectorvein/chat_clients/moonshot_client.py,sha256=gbu-6nGxx8uM_U2WlI4Wus881rFRotzHtMSoYOcruGU,526
16
16
  vectorvein/chat_clients/openai_client.py,sha256=Nz6tV45pWcsOupxjnsRsGTicbQNJWIZyxuJoJ5DGMpg,527
17
- vectorvein/chat_clients/openai_compatible_client.py,sha256=Ojsxs7j6s4Hne7vMDbR0u-CtDx0WZQZ1NIsQIFm5BcA,17758
17
+ vectorvein/chat_clients/openai_compatible_client.py,sha256=gfCTXji8pgFUiultiNDKcmPIGu7lFfQ9VmA8o2_Mm6c,18823
18
18
  vectorvein/chat_clients/qwen_client.py,sha256=-ryh-m9PgsO0fc4ulcCmPTy1155J8YUy15uPoJQOHA0,513
19
- vectorvein/chat_clients/utils.py,sha256=8Md6XOF_io0ACKRQ7ruqP1eJu7g9uo80eUiGbWrLx7k,23041
19
+ vectorvein/chat_clients/stepfun_client.py,sha256=zsD2W5ahmR4DD9cqQTXmJr3txrGuvxbRWhFlRdwNijI,519
20
+ vectorvein/chat_clients/utils.py,sha256=zwuXY7Bs14xXFdhBlnTmCTOezMeYr2cf5DeF0_5_WNE,24016
20
21
  vectorvein/chat_clients/yi_client.py,sha256=RNf4CRuPJfixrwLZ3-DEc3t25QDe1mvZeb9sku2f8Bc,484
21
22
  vectorvein/chat_clients/zhipuai_client.py,sha256=Ys5DSeLCuedaDXr3PfG1EW2zKXopt-awO2IylWSwY0s,519
22
- vectorvein/settings/__init__.py,sha256=jVHbhHn1BuMcyfZGXrxWKiI4NdY9wzvYyGMvKYmUtqg,3378
23
- vectorvein/types/defaults.py,sha256=EouXmZvjbvDQhYJ-5FIz6Ee6Xyc7Ud1wlOPMIDnaAfY,17811
24
- vectorvein/types/enums.py,sha256=PNK_pTIyjJFy-yAG2PHaMIO1ey3W6fReMCkH8M8VRW4,1595
23
+ vectorvein/settings/__init__.py,sha256=0L-2WicBq9ctaJRoSwx8ZhVtX4slS5tHrIlSGf-tJxg,3564
24
+ vectorvein/types/defaults.py,sha256=gq0R_9QMsxJXE8cHrJPog9U81-XDWGZ4mbeQNLS1kOU,20609
25
+ vectorvein/types/enums.py,sha256=x_S0IJiEWijOAEiMNdiGDGEWGtmt7TwMriJVDqrDmTo,1637
25
26
  vectorvein/types/exception.py,sha256=gnW4GnJ76jND6UGnodk9xmqkcbeS7Cz2rvncA2HpD5E,69
26
27
  vectorvein/types/llm_parameters.py,sha256=N6RQ8tqO1RCywMFRWPooffeAEPd9x3JW6Bl4UgQtF5I,4379
27
28
  vectorvein/utilities/media_processing.py,sha256=BujciRmw1GMmc3ELRvafL8STcy6r5b2rVnh27-uA7so,2256
28
29
  vectorvein/utilities/retry.py,sha256=9ePuJdeUUGx-qMWfaFxmlOvG_lQPwCQ4UB1z3Edlo34,993
29
- vectorvein-0.1.26.dist-info/RECORD,,
30
+ vectorvein-0.1.28.dist-info/RECORD,,