vectorvein 0.1.28__tar.gz → 0.1.29__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {vectorvein-0.1.28 → vectorvein-0.1.29}/PKG-INFO +1 -1
- {vectorvein-0.1.28 → vectorvein-0.1.29}/pyproject.toml +1 -1
- vectorvein-0.1.29/src/vectorvein/chat_clients/__init__.py +522 -0
- vectorvein-0.1.28/src/vectorvein/chat_clients/__init__.py +0 -129
- {vectorvein-0.1.28 → vectorvein-0.1.29}/README.md +0 -0
- {vectorvein-0.1.28 → vectorvein-0.1.29}/src/vectorvein/__init__.py +0 -0
- {vectorvein-0.1.28 → vectorvein-0.1.29}/src/vectorvein/chat_clients/anthropic_client.py +0 -0
- {vectorvein-0.1.28 → vectorvein-0.1.29}/src/vectorvein/chat_clients/baichuan_client.py +0 -0
- {vectorvein-0.1.28 → vectorvein-0.1.29}/src/vectorvein/chat_clients/base_client.py +0 -0
- {vectorvein-0.1.28 → vectorvein-0.1.29}/src/vectorvein/chat_clients/deepseek_client.py +0 -0
- {vectorvein-0.1.28 → vectorvein-0.1.29}/src/vectorvein/chat_clients/gemini_client.py +0 -0
- {vectorvein-0.1.28 → vectorvein-0.1.29}/src/vectorvein/chat_clients/groq_client.py +0 -0
- {vectorvein-0.1.28 → vectorvein-0.1.29}/src/vectorvein/chat_clients/local_client.py +0 -0
- {vectorvein-0.1.28 → vectorvein-0.1.29}/src/vectorvein/chat_clients/minimax_client.py +0 -0
- {vectorvein-0.1.28 → vectorvein-0.1.29}/src/vectorvein/chat_clients/mistral_client.py +0 -0
- {vectorvein-0.1.28 → vectorvein-0.1.29}/src/vectorvein/chat_clients/moonshot_client.py +0 -0
- {vectorvein-0.1.28 → vectorvein-0.1.29}/src/vectorvein/chat_clients/openai_client.py +0 -0
- {vectorvein-0.1.28 → vectorvein-0.1.29}/src/vectorvein/chat_clients/openai_compatible_client.py +0 -0
- {vectorvein-0.1.28 → vectorvein-0.1.29}/src/vectorvein/chat_clients/qwen_client.py +0 -0
- {vectorvein-0.1.28 → vectorvein-0.1.29}/src/vectorvein/chat_clients/stepfun_client.py +0 -0
- {vectorvein-0.1.28 → vectorvein-0.1.29}/src/vectorvein/chat_clients/utils.py +0 -0
- {vectorvein-0.1.28 → vectorvein-0.1.29}/src/vectorvein/chat_clients/yi_client.py +0 -0
- {vectorvein-0.1.28 → vectorvein-0.1.29}/src/vectorvein/chat_clients/zhipuai_client.py +0 -0
- {vectorvein-0.1.28 → vectorvein-0.1.29}/src/vectorvein/settings/__init__.py +0 -0
- {vectorvein-0.1.28 → vectorvein-0.1.29}/src/vectorvein/types/defaults.py +0 -0
- {vectorvein-0.1.28 → vectorvein-0.1.29}/src/vectorvein/types/enums.py +0 -0
- {vectorvein-0.1.28 → vectorvein-0.1.29}/src/vectorvein/types/exception.py +0 -0
- {vectorvein-0.1.28 → vectorvein-0.1.29}/src/vectorvein/types/llm_parameters.py +0 -0
- {vectorvein-0.1.28 → vectorvein-0.1.29}/src/vectorvein/utilities/media_processing.py +0 -0
- {vectorvein-0.1.28 → vectorvein-0.1.29}/src/vectorvein/utilities/retry.py +0 -0
@@ -0,0 +1,522 @@
|
|
1
|
+
# @Author: Bi Ying
|
2
|
+
# @Date: 2024-07-26 14:48:55
|
3
|
+
import httpx
|
4
|
+
from typing import overload, Literal
|
5
|
+
|
6
|
+
from .base_client import BaseChatClient, BaseAsyncChatClient
|
7
|
+
|
8
|
+
from .yi_client import YiChatClient, AsyncYiChatClient
|
9
|
+
from .groq_client import GroqChatClient, AsyncGroqChatClient
|
10
|
+
from .qwen_client import QwenChatClient, AsyncQwenChatClient
|
11
|
+
from .local_client import LocalChatClient, AsyncLocalChatClient
|
12
|
+
from .gemini_client import GeminiChatClient, AsyncGeminiChatClient
|
13
|
+
from .openai_client import OpenAIChatClient, AsyncOpenAIChatClient
|
14
|
+
from .zhipuai_client import ZhiPuAIChatClient, AsyncZhiPuAIChatClient
|
15
|
+
from .minimax_client import MiniMaxChatClient, AsyncMiniMaxChatClient
|
16
|
+
from .mistral_client import MistralChatClient, AsyncMistralChatClient
|
17
|
+
from .stepfun_client import StepFunChatClient, AsyncStepFunChatClient
|
18
|
+
from .baichuan_client import BaichuanChatClient, AsyncBaichuanChatClient
|
19
|
+
from .moonshot_client import MoonshotChatClient, AsyncMoonshotChatClient
|
20
|
+
from .deepseek_client import DeepSeekChatClient, AsyncDeepSeekChatClient
|
21
|
+
|
22
|
+
from ..types import defaults as defs
|
23
|
+
from ..types.enums import BackendType, ContextLengthControlType
|
24
|
+
from .anthropic_client import AnthropicChatClient, AsyncAnthropicChatClient
|
25
|
+
from .utils import format_messages, get_token_counts, get_message_token_counts, ToolCallContentProcessor
|
26
|
+
|
27
|
+
# 后端映射
|
28
|
+
BackendMap = {
|
29
|
+
"sync": {
|
30
|
+
BackendType.Anthropic: AnthropicChatClient,
|
31
|
+
BackendType.DeepSeek: DeepSeekChatClient,
|
32
|
+
BackendType.Gemini: GeminiChatClient,
|
33
|
+
BackendType.Groq: GroqChatClient,
|
34
|
+
BackendType.Local: LocalChatClient,
|
35
|
+
BackendType.MiniMax: MiniMaxChatClient,
|
36
|
+
BackendType.Mistral: MistralChatClient,
|
37
|
+
BackendType.Moonshot: MoonshotChatClient,
|
38
|
+
BackendType.OpenAI: OpenAIChatClient,
|
39
|
+
BackendType.Qwen: QwenChatClient,
|
40
|
+
BackendType.Yi: YiChatClient,
|
41
|
+
BackendType.ZhiPuAI: ZhiPuAIChatClient,
|
42
|
+
BackendType.Baichuan: BaichuanChatClient,
|
43
|
+
BackendType.StepFun: StepFunChatClient,
|
44
|
+
},
|
45
|
+
"async": {
|
46
|
+
BackendType.Anthropic: AsyncAnthropicChatClient,
|
47
|
+
BackendType.DeepSeek: AsyncDeepSeekChatClient,
|
48
|
+
BackendType.Gemini: AsyncGeminiChatClient,
|
49
|
+
BackendType.Groq: AsyncGroqChatClient,
|
50
|
+
BackendType.Local: AsyncLocalChatClient,
|
51
|
+
BackendType.MiniMax: AsyncMiniMaxChatClient,
|
52
|
+
BackendType.Mistral: AsyncMistralChatClient,
|
53
|
+
BackendType.Moonshot: AsyncMoonshotChatClient,
|
54
|
+
BackendType.OpenAI: AsyncOpenAIChatClient,
|
55
|
+
BackendType.Qwen: AsyncQwenChatClient,
|
56
|
+
BackendType.Yi: AsyncYiChatClient,
|
57
|
+
BackendType.ZhiPuAI: AsyncZhiPuAIChatClient,
|
58
|
+
BackendType.Baichuan: AsyncBaichuanChatClient,
|
59
|
+
BackendType.StepFun: AsyncStepFunChatClient,
|
60
|
+
},
|
61
|
+
}
|
62
|
+
|
63
|
+
|
64
|
+
@overload
|
65
|
+
def create_chat_client(
|
66
|
+
backend: Literal[BackendType.Anthropic],
|
67
|
+
model: str | None = None,
|
68
|
+
stream: bool = False,
|
69
|
+
temperature: float = 0.7,
|
70
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
71
|
+
random_endpoint: bool = True,
|
72
|
+
endpoint_id: str = "",
|
73
|
+
http_client: httpx.Client | None = None,
|
74
|
+
**kwargs,
|
75
|
+
) -> AnthropicChatClient: ...
|
76
|
+
|
77
|
+
|
78
|
+
@overload
|
79
|
+
def create_chat_client(
|
80
|
+
backend: Literal[BackendType.DeepSeek],
|
81
|
+
model: str | None = None,
|
82
|
+
stream: bool = False,
|
83
|
+
temperature: float = 0.7,
|
84
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
85
|
+
random_endpoint: bool = True,
|
86
|
+
endpoint_id: str = "",
|
87
|
+
http_client: httpx.Client | None = None,
|
88
|
+
**kwargs,
|
89
|
+
) -> DeepSeekChatClient: ...
|
90
|
+
|
91
|
+
|
92
|
+
@overload
|
93
|
+
def create_chat_client(
|
94
|
+
backend: Literal[BackendType.Gemini],
|
95
|
+
model: str | None = None,
|
96
|
+
stream: bool = False,
|
97
|
+
temperature: float = 0.7,
|
98
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
99
|
+
random_endpoint: bool = True,
|
100
|
+
endpoint_id: str = "",
|
101
|
+
http_client: httpx.Client | None = None,
|
102
|
+
**kwargs,
|
103
|
+
) -> GeminiChatClient: ...
|
104
|
+
|
105
|
+
|
106
|
+
@overload
|
107
|
+
def create_chat_client(
|
108
|
+
backend: Literal[BackendType.Groq],
|
109
|
+
model: str | None = None,
|
110
|
+
stream: bool = False,
|
111
|
+
temperature: float = 0.7,
|
112
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
113
|
+
random_endpoint: bool = True,
|
114
|
+
endpoint_id: str = "",
|
115
|
+
http_client: httpx.Client | None = None,
|
116
|
+
**kwargs,
|
117
|
+
) -> GroqChatClient: ...
|
118
|
+
|
119
|
+
|
120
|
+
@overload
|
121
|
+
def create_chat_client(
|
122
|
+
backend: Literal[BackendType.Local],
|
123
|
+
model: str | None = None,
|
124
|
+
stream: bool = False,
|
125
|
+
temperature: float = 0.7,
|
126
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
127
|
+
random_endpoint: bool = True,
|
128
|
+
endpoint_id: str = "",
|
129
|
+
http_client: httpx.Client | None = None,
|
130
|
+
**kwargs,
|
131
|
+
) -> LocalChatClient: ...
|
132
|
+
|
133
|
+
|
134
|
+
@overload
|
135
|
+
def create_chat_client(
|
136
|
+
backend: Literal[BackendType.MiniMax],
|
137
|
+
model: str | None = None,
|
138
|
+
stream: bool = False,
|
139
|
+
temperature: float = 0.7,
|
140
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
141
|
+
random_endpoint: bool = True,
|
142
|
+
endpoint_id: str = "",
|
143
|
+
http_client: httpx.Client | None = None,
|
144
|
+
**kwargs,
|
145
|
+
) -> MiniMaxChatClient: ...
|
146
|
+
|
147
|
+
|
148
|
+
@overload
|
149
|
+
def create_chat_client(
|
150
|
+
backend: Literal[BackendType.Mistral],
|
151
|
+
model: str | None = None,
|
152
|
+
stream: bool = False,
|
153
|
+
temperature: float = 0.7,
|
154
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
155
|
+
random_endpoint: bool = True,
|
156
|
+
endpoint_id: str = "",
|
157
|
+
http_client: httpx.Client | None = None,
|
158
|
+
**kwargs,
|
159
|
+
) -> MistralChatClient: ...
|
160
|
+
|
161
|
+
|
162
|
+
@overload
|
163
|
+
def create_chat_client(
|
164
|
+
backend: Literal[BackendType.Moonshot],
|
165
|
+
model: str | None = None,
|
166
|
+
stream: bool = False,
|
167
|
+
temperature: float = 0.7,
|
168
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
169
|
+
random_endpoint: bool = True,
|
170
|
+
endpoint_id: str = "",
|
171
|
+
http_client: httpx.Client | None = None,
|
172
|
+
**kwargs,
|
173
|
+
) -> MoonshotChatClient: ...
|
174
|
+
|
175
|
+
|
176
|
+
@overload
|
177
|
+
def create_chat_client(
|
178
|
+
backend: Literal[BackendType.OpenAI],
|
179
|
+
model: str | None = None,
|
180
|
+
stream: bool = False,
|
181
|
+
temperature: float = 0.7,
|
182
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
183
|
+
random_endpoint: bool = True,
|
184
|
+
endpoint_id: str = "",
|
185
|
+
http_client: httpx.Client | None = None,
|
186
|
+
**kwargs,
|
187
|
+
) -> OpenAIChatClient: ...
|
188
|
+
|
189
|
+
|
190
|
+
@overload
|
191
|
+
def create_chat_client(
|
192
|
+
backend: Literal[BackendType.Qwen],
|
193
|
+
model: str | None = None,
|
194
|
+
stream: bool = False,
|
195
|
+
temperature: float = 0.7,
|
196
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
197
|
+
random_endpoint: bool = True,
|
198
|
+
endpoint_id: str = "",
|
199
|
+
http_client: httpx.Client | None = None,
|
200
|
+
**kwargs,
|
201
|
+
) -> QwenChatClient: ...
|
202
|
+
|
203
|
+
|
204
|
+
@overload
|
205
|
+
def create_chat_client(
|
206
|
+
backend: Literal[BackendType.Yi],
|
207
|
+
model: str | None = None,
|
208
|
+
stream: bool = False,
|
209
|
+
temperature: float = 0.7,
|
210
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
211
|
+
random_endpoint: bool = True,
|
212
|
+
endpoint_id: str = "",
|
213
|
+
http_client: httpx.Client | None = None,
|
214
|
+
**kwargs,
|
215
|
+
) -> YiChatClient: ...
|
216
|
+
|
217
|
+
|
218
|
+
@overload
|
219
|
+
def create_chat_client(
|
220
|
+
backend: Literal[BackendType.ZhiPuAI],
|
221
|
+
model: str | None = None,
|
222
|
+
stream: bool = False,
|
223
|
+
temperature: float = 0.7,
|
224
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
225
|
+
random_endpoint: bool = True,
|
226
|
+
endpoint_id: str = "",
|
227
|
+
http_client: httpx.Client | None = None,
|
228
|
+
**kwargs,
|
229
|
+
) -> ZhiPuAIChatClient: ...
|
230
|
+
|
231
|
+
|
232
|
+
@overload
|
233
|
+
def create_chat_client(
|
234
|
+
backend: Literal[BackendType.Baichuan],
|
235
|
+
model: str | None = None,
|
236
|
+
stream: bool = False,
|
237
|
+
temperature: float = 0.7,
|
238
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
239
|
+
random_endpoint: bool = True,
|
240
|
+
endpoint_id: str = "",
|
241
|
+
http_client: httpx.Client | None = None,
|
242
|
+
**kwargs,
|
243
|
+
) -> BaichuanChatClient: ...
|
244
|
+
|
245
|
+
|
246
|
+
@overload
|
247
|
+
def create_chat_client(
|
248
|
+
backend: Literal[BackendType.StepFun],
|
249
|
+
model: str | None = None,
|
250
|
+
stream: bool = False,
|
251
|
+
temperature: float = 0.7,
|
252
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
253
|
+
random_endpoint: bool = True,
|
254
|
+
endpoint_id: str = "",
|
255
|
+
http_client: httpx.Client | None = None,
|
256
|
+
**kwargs,
|
257
|
+
) -> StepFunChatClient: ...
|
258
|
+
|
259
|
+
|
260
|
+
def create_chat_client(
|
261
|
+
backend: BackendType,
|
262
|
+
model: str | None = None,
|
263
|
+
stream: bool = False,
|
264
|
+
temperature: float = 0.7,
|
265
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
266
|
+
random_endpoint: bool = True,
|
267
|
+
endpoint_id: str = "",
|
268
|
+
http_client: httpx.Client | None = None,
|
269
|
+
**kwargs,
|
270
|
+
) -> BaseChatClient:
|
271
|
+
if backend not in BackendMap["sync"]:
|
272
|
+
raise ValueError(f"Unsupported backend: {backend}")
|
273
|
+
|
274
|
+
ClientClass = BackendMap["sync"][backend]
|
275
|
+
if model is None:
|
276
|
+
model = ClientClass.DEFAULT_MODEL
|
277
|
+
return ClientClass(
|
278
|
+
model=model,
|
279
|
+
stream=stream,
|
280
|
+
temperature=temperature,
|
281
|
+
context_length_control=context_length_control,
|
282
|
+
random_endpoint=random_endpoint,
|
283
|
+
endpoint_id=endpoint_id,
|
284
|
+
http_client=http_client,
|
285
|
+
**kwargs,
|
286
|
+
)
|
287
|
+
|
288
|
+
|
289
|
+
@overload
|
290
|
+
def create_async_chat_client(
|
291
|
+
backend: Literal[BackendType.Anthropic],
|
292
|
+
model: str | None = None,
|
293
|
+
stream: bool = False,
|
294
|
+
temperature: float = 0.7,
|
295
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
296
|
+
random_endpoint: bool = True,
|
297
|
+
endpoint_id: str = "",
|
298
|
+
http_client: httpx.AsyncClient | None = None,
|
299
|
+
**kwargs,
|
300
|
+
) -> AsyncAnthropicChatClient: ...
|
301
|
+
|
302
|
+
|
303
|
+
@overload
|
304
|
+
def create_async_chat_client(
|
305
|
+
backend: Literal[BackendType.DeepSeek],
|
306
|
+
model: str | None = None,
|
307
|
+
stream: bool = False,
|
308
|
+
temperature: float = 0.7,
|
309
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
310
|
+
random_endpoint: bool = True,
|
311
|
+
endpoint_id: str = "",
|
312
|
+
http_client: httpx.AsyncClient | None = None,
|
313
|
+
**kwargs,
|
314
|
+
) -> AsyncDeepSeekChatClient: ...
|
315
|
+
|
316
|
+
|
317
|
+
@overload
|
318
|
+
def create_async_chat_client(
|
319
|
+
backend: Literal[BackendType.Gemini],
|
320
|
+
model: str | None = None,
|
321
|
+
stream: bool = False,
|
322
|
+
temperature: float = 0.7,
|
323
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
324
|
+
random_endpoint: bool = True,
|
325
|
+
endpoint_id: str = "",
|
326
|
+
http_client: httpx.AsyncClient | None = None,
|
327
|
+
**kwargs,
|
328
|
+
) -> AsyncGeminiChatClient: ...
|
329
|
+
|
330
|
+
|
331
|
+
@overload
|
332
|
+
def create_async_chat_client(
|
333
|
+
backend: Literal[BackendType.Groq],
|
334
|
+
model: str | None = None,
|
335
|
+
stream: bool = False,
|
336
|
+
temperature: float = 0.7,
|
337
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
338
|
+
random_endpoint: bool = True,
|
339
|
+
endpoint_id: str = "",
|
340
|
+
http_client: httpx.AsyncClient | None = None,
|
341
|
+
**kwargs,
|
342
|
+
) -> AsyncGroqChatClient: ...
|
343
|
+
|
344
|
+
|
345
|
+
@overload
|
346
|
+
def create_async_chat_client(
|
347
|
+
backend: Literal[BackendType.Local],
|
348
|
+
model: str | None = None,
|
349
|
+
stream: bool = False,
|
350
|
+
temperature: float = 0.7,
|
351
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
352
|
+
random_endpoint: bool = True,
|
353
|
+
endpoint_id: str = "",
|
354
|
+
http_client: httpx.AsyncClient | None = None,
|
355
|
+
**kwargs,
|
356
|
+
) -> AsyncLocalChatClient: ...
|
357
|
+
|
358
|
+
|
359
|
+
@overload
|
360
|
+
def create_async_chat_client(
|
361
|
+
backend: Literal[BackendType.MiniMax],
|
362
|
+
model: str | None = None,
|
363
|
+
stream: bool = False,
|
364
|
+
temperature: float = 0.7,
|
365
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
366
|
+
random_endpoint: bool = True,
|
367
|
+
endpoint_id: str = "",
|
368
|
+
http_client: httpx.AsyncClient | None = None,
|
369
|
+
**kwargs,
|
370
|
+
) -> AsyncMiniMaxChatClient: ...
|
371
|
+
|
372
|
+
|
373
|
+
@overload
|
374
|
+
def create_async_chat_client(
|
375
|
+
backend: Literal[BackendType.Mistral],
|
376
|
+
model: str | None = None,
|
377
|
+
stream: bool = False,
|
378
|
+
temperature: float = 0.7,
|
379
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
380
|
+
random_endpoint: bool = True,
|
381
|
+
endpoint_id: str = "",
|
382
|
+
http_client: httpx.AsyncClient | None = None,
|
383
|
+
**kwargs,
|
384
|
+
) -> AsyncMistralChatClient: ...
|
385
|
+
|
386
|
+
|
387
|
+
@overload
|
388
|
+
def create_async_chat_client(
|
389
|
+
backend: Literal[BackendType.Moonshot],
|
390
|
+
model: str | None = None,
|
391
|
+
stream: bool = False,
|
392
|
+
temperature: float = 0.7,
|
393
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
394
|
+
random_endpoint: bool = True,
|
395
|
+
endpoint_id: str = "",
|
396
|
+
http_client: httpx.AsyncClient | None = None,
|
397
|
+
**kwargs,
|
398
|
+
) -> AsyncMoonshotChatClient: ...
|
399
|
+
|
400
|
+
|
401
|
+
@overload
|
402
|
+
def create_async_chat_client(
|
403
|
+
backend: Literal[BackendType.OpenAI],
|
404
|
+
model: str | None = None,
|
405
|
+
stream: bool = False,
|
406
|
+
temperature: float = 0.7,
|
407
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
408
|
+
random_endpoint: bool = True,
|
409
|
+
endpoint_id: str = "",
|
410
|
+
http_client: httpx.AsyncClient | None = None,
|
411
|
+
**kwargs,
|
412
|
+
) -> AsyncOpenAIChatClient: ...
|
413
|
+
|
414
|
+
|
415
|
+
@overload
|
416
|
+
def create_async_chat_client(
|
417
|
+
backend: Literal[BackendType.Qwen],
|
418
|
+
model: str | None = None,
|
419
|
+
stream: bool = False,
|
420
|
+
temperature: float = 0.7,
|
421
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
422
|
+
random_endpoint: bool = True,
|
423
|
+
endpoint_id: str = "",
|
424
|
+
http_client: httpx.AsyncClient | None = None,
|
425
|
+
**kwargs,
|
426
|
+
) -> AsyncQwenChatClient: ...
|
427
|
+
|
428
|
+
|
429
|
+
@overload
|
430
|
+
def create_async_chat_client(
|
431
|
+
backend: Literal[BackendType.Yi],
|
432
|
+
model: str | None = None,
|
433
|
+
stream: bool = False,
|
434
|
+
temperature: float = 0.7,
|
435
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
436
|
+
random_endpoint: bool = True,
|
437
|
+
endpoint_id: str = "",
|
438
|
+
http_client: httpx.AsyncClient | None = None,
|
439
|
+
**kwargs,
|
440
|
+
) -> AsyncYiChatClient: ...
|
441
|
+
|
442
|
+
|
443
|
+
@overload
|
444
|
+
def create_async_chat_client(
|
445
|
+
backend: Literal[BackendType.ZhiPuAI],
|
446
|
+
model: str | None = None,
|
447
|
+
stream: bool = False,
|
448
|
+
temperature: float = 0.7,
|
449
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
450
|
+
random_endpoint: bool = True,
|
451
|
+
endpoint_id: str = "",
|
452
|
+
http_client: httpx.AsyncClient | None = None,
|
453
|
+
**kwargs,
|
454
|
+
) -> AsyncZhiPuAIChatClient: ...
|
455
|
+
|
456
|
+
|
457
|
+
@overload
|
458
|
+
def create_async_chat_client(
|
459
|
+
backend: Literal[BackendType.Baichuan],
|
460
|
+
model: str | None = None,
|
461
|
+
stream: bool = False,
|
462
|
+
temperature: float = 0.7,
|
463
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
464
|
+
random_endpoint: bool = True,
|
465
|
+
endpoint_id: str = "",
|
466
|
+
http_client: httpx.AsyncClient | None = None,
|
467
|
+
**kwargs,
|
468
|
+
) -> AsyncBaichuanChatClient: ...
|
469
|
+
|
470
|
+
|
471
|
+
@overload
|
472
|
+
def create_async_chat_client(
|
473
|
+
backend: Literal[BackendType.StepFun],
|
474
|
+
model: str | None = None,
|
475
|
+
stream: bool = False,
|
476
|
+
temperature: float = 0.7,
|
477
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
478
|
+
random_endpoint: bool = True,
|
479
|
+
endpoint_id: str = "",
|
480
|
+
http_client: httpx.AsyncClient | None = None,
|
481
|
+
**kwargs,
|
482
|
+
) -> AsyncStepFunChatClient: ...
|
483
|
+
|
484
|
+
|
485
|
+
def create_async_chat_client(
|
486
|
+
backend: BackendType,
|
487
|
+
model: str | None = None,
|
488
|
+
stream: bool = False,
|
489
|
+
temperature: float = 0.7,
|
490
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
491
|
+
random_endpoint: bool = True,
|
492
|
+
endpoint_id: str = "",
|
493
|
+
http_client: httpx.AsyncClient | None = None,
|
494
|
+
**kwargs,
|
495
|
+
) -> BaseAsyncChatClient:
|
496
|
+
if backend not in BackendMap["async"]:
|
497
|
+
raise ValueError(f"Unsupported backend: {backend}")
|
498
|
+
|
499
|
+
ClientClass = BackendMap["async"][backend]
|
500
|
+
if model is None:
|
501
|
+
model = ClientClass.DEFAULT_MODEL
|
502
|
+
return ClientClass(
|
503
|
+
model=model,
|
504
|
+
stream=stream,
|
505
|
+
temperature=temperature,
|
506
|
+
context_length_control=context_length_control,
|
507
|
+
random_endpoint=random_endpoint,
|
508
|
+
endpoint_id=endpoint_id,
|
509
|
+
http_client=http_client,
|
510
|
+
**kwargs,
|
511
|
+
)
|
512
|
+
|
513
|
+
|
514
|
+
__all__ = [
|
515
|
+
"BackendType",
|
516
|
+
"format_messages",
|
517
|
+
"get_token_counts",
|
518
|
+
"create_chat_client",
|
519
|
+
"create_async_chat_client",
|
520
|
+
"get_message_token_counts",
|
521
|
+
"ToolCallContentProcessor",
|
522
|
+
]
|
@@ -1,129 +0,0 @@
|
|
1
|
-
# @Author: Bi Ying
|
2
|
-
# @Date: 2024-07-26 14:48:55
|
3
|
-
import httpx
|
4
|
-
|
5
|
-
from .base_client import BaseChatClient, BaseAsyncChatClient
|
6
|
-
|
7
|
-
from .yi_client import YiChatClient, AsyncYiChatClient
|
8
|
-
from .groq_client import GroqChatClient, AsyncGroqChatClient
|
9
|
-
from .qwen_client import QwenChatClient, AsyncQwenChatClient
|
10
|
-
from .local_client import LocalChatClient, AsyncLocalChatClient
|
11
|
-
from .gemini_client import GeminiChatClient, AsyncGeminiChatClient
|
12
|
-
from .openai_client import OpenAIChatClient, AsyncOpenAIChatClient
|
13
|
-
from .zhipuai_client import ZhiPuAIChatClient, AsyncZhiPuAIChatClient
|
14
|
-
from .minimax_client import MiniMaxChatClient, AsyncMiniMaxChatClient
|
15
|
-
from .mistral_client import MistralChatClient, AsyncMistralChatClient
|
16
|
-
from .stepfun_client import StepFunChatClient, AsyncStepFunChatClient
|
17
|
-
from .baichuan_client import BaichuanChatClient, AsyncBaichuanChatClient
|
18
|
-
from .moonshot_client import MoonshotChatClient, AsyncMoonshotChatClient
|
19
|
-
from .deepseek_client import DeepSeekChatClient, AsyncDeepSeekChatClient
|
20
|
-
|
21
|
-
from ..types import defaults as defs
|
22
|
-
from ..types.enums import BackendType, ContextLengthControlType
|
23
|
-
from .anthropic_client import AnthropicChatClient, AsyncAnthropicChatClient
|
24
|
-
from .utils import format_messages, get_token_counts, get_message_token_counts, ToolCallContentProcessor
|
25
|
-
|
26
|
-
|
27
|
-
BackendMap = {
|
28
|
-
"sync": {
|
29
|
-
BackendType.Anthropic: AnthropicChatClient,
|
30
|
-
BackendType.DeepSeek: DeepSeekChatClient,
|
31
|
-
BackendType.Gemini: GeminiChatClient,
|
32
|
-
BackendType.Groq: GroqChatClient,
|
33
|
-
BackendType.Local: LocalChatClient,
|
34
|
-
BackendType.MiniMax: MiniMaxChatClient,
|
35
|
-
BackendType.Mistral: MistralChatClient,
|
36
|
-
BackendType.Moonshot: MoonshotChatClient,
|
37
|
-
BackendType.OpenAI: OpenAIChatClient,
|
38
|
-
BackendType.Qwen: QwenChatClient,
|
39
|
-
BackendType.Yi: YiChatClient,
|
40
|
-
BackendType.ZhiPuAI: ZhiPuAIChatClient,
|
41
|
-
BackendType.Baichuan: BaichuanChatClient,
|
42
|
-
BackendType.StepFun: StepFunChatClient,
|
43
|
-
},
|
44
|
-
"async": {
|
45
|
-
BackendType.Anthropic: AsyncAnthropicChatClient,
|
46
|
-
BackendType.DeepSeek: AsyncDeepSeekChatClient,
|
47
|
-
BackendType.Gemini: AsyncGeminiChatClient,
|
48
|
-
BackendType.Groq: AsyncGroqChatClient,
|
49
|
-
BackendType.Local: AsyncLocalChatClient,
|
50
|
-
BackendType.MiniMax: AsyncMiniMaxChatClient,
|
51
|
-
BackendType.Mistral: AsyncMistralChatClient,
|
52
|
-
BackendType.Moonshot: AsyncMoonshotChatClient,
|
53
|
-
BackendType.OpenAI: AsyncOpenAIChatClient,
|
54
|
-
BackendType.Qwen: AsyncQwenChatClient,
|
55
|
-
BackendType.Yi: AsyncYiChatClient,
|
56
|
-
BackendType.ZhiPuAI: AsyncZhiPuAIChatClient,
|
57
|
-
BackendType.Baichuan: AsyncBaichuanChatClient,
|
58
|
-
BackendType.StepFun: AsyncStepFunChatClient,
|
59
|
-
},
|
60
|
-
}
|
61
|
-
|
62
|
-
|
63
|
-
def create_chat_client(
|
64
|
-
backend: BackendType,
|
65
|
-
model: str | None = None,
|
66
|
-
stream: bool = False,
|
67
|
-
temperature: float = 0.7,
|
68
|
-
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
69
|
-
random_endpoint: bool = True,
|
70
|
-
endpoint_id: str = "",
|
71
|
-
http_client: httpx.Client | None = None,
|
72
|
-
**kwargs,
|
73
|
-
) -> BaseChatClient:
|
74
|
-
if backend not in BackendMap["sync"]:
|
75
|
-
raise ValueError(f"Unsupported backend: {backend}")
|
76
|
-
|
77
|
-
ClientClass = BackendMap["sync"][backend]
|
78
|
-
if model is None:
|
79
|
-
model = ClientClass.DEFAULT_MODEL
|
80
|
-
return BackendMap["sync"][backend](
|
81
|
-
model=model,
|
82
|
-
stream=stream,
|
83
|
-
temperature=temperature,
|
84
|
-
context_length_control=context_length_control,
|
85
|
-
random_endpoint=random_endpoint,
|
86
|
-
endpoint_id=endpoint_id,
|
87
|
-
http_client=http_client,
|
88
|
-
**kwargs,
|
89
|
-
)
|
90
|
-
|
91
|
-
|
92
|
-
def create_async_chat_client(
|
93
|
-
backend: BackendType,
|
94
|
-
model: str | None = None,
|
95
|
-
stream: bool = False,
|
96
|
-
temperature: float = 0.7,
|
97
|
-
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
98
|
-
random_endpoint: bool = True,
|
99
|
-
endpoint_id: str = "",
|
100
|
-
http_client: httpx.AsyncClient | None = None,
|
101
|
-
**kwargs,
|
102
|
-
) -> BaseAsyncChatClient:
|
103
|
-
if backend not in BackendMap["async"]:
|
104
|
-
raise ValueError(f"Unsupported backend: {backend}")
|
105
|
-
|
106
|
-
ClientClass = BackendMap["async"][backend]
|
107
|
-
if model is None:
|
108
|
-
model = ClientClass.DEFAULT_MODEL
|
109
|
-
return BackendMap["async"][backend](
|
110
|
-
model=model,
|
111
|
-
stream=stream,
|
112
|
-
temperature=temperature,
|
113
|
-
context_length_control=context_length_control,
|
114
|
-
random_endpoint=random_endpoint,
|
115
|
-
endpoint_id=endpoint_id,
|
116
|
-
http_client=http_client,
|
117
|
-
**kwargs,
|
118
|
-
)
|
119
|
-
|
120
|
-
|
121
|
-
__all__ = [
|
122
|
-
"BackendType",
|
123
|
-
"format_messages",
|
124
|
-
"get_token_counts",
|
125
|
-
"create_chat_client",
|
126
|
-
"create_async_chat_client",
|
127
|
-
"get_message_token_counts",
|
128
|
-
"ToolCallContentProcessor",
|
129
|
-
]
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{vectorvein-0.1.28 → vectorvein-0.1.29}/src/vectorvein/chat_clients/openai_compatible_client.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|