vectorvein 0.1.28__py3-none-any.whl → 0.1.29__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vectorvein/chat_clients/__init__.py +396 -3
- {vectorvein-0.1.28.dist-info → vectorvein-0.1.29.dist-info}/METADATA +1 -1
- {vectorvein-0.1.28.dist-info → vectorvein-0.1.29.dist-info}/RECORD +5 -5
- {vectorvein-0.1.28.dist-info → vectorvein-0.1.29.dist-info}/WHEEL +0 -0
- {vectorvein-0.1.28.dist-info → vectorvein-0.1.29.dist-info}/entry_points.txt +0 -0
@@ -1,6 +1,7 @@
|
|
1
1
|
# @Author: Bi Ying
|
2
2
|
# @Date: 2024-07-26 14:48:55
|
3
3
|
import httpx
|
4
|
+
from typing import overload, Literal
|
4
5
|
|
5
6
|
from .base_client import BaseChatClient, BaseAsyncChatClient
|
6
7
|
|
@@ -23,7 +24,7 @@ from ..types.enums import BackendType, ContextLengthControlType
|
|
23
24
|
from .anthropic_client import AnthropicChatClient, AsyncAnthropicChatClient
|
24
25
|
from .utils import format_messages, get_token_counts, get_message_token_counts, ToolCallContentProcessor
|
25
26
|
|
26
|
-
|
27
|
+
# 后端映射
|
27
28
|
BackendMap = {
|
28
29
|
"sync": {
|
29
30
|
BackendType.Anthropic: AnthropicChatClient,
|
@@ -60,6 +61,202 @@ BackendMap = {
|
|
60
61
|
}
|
61
62
|
|
62
63
|
|
64
|
+
@overload
|
65
|
+
def create_chat_client(
|
66
|
+
backend: Literal[BackendType.Anthropic],
|
67
|
+
model: str | None = None,
|
68
|
+
stream: bool = False,
|
69
|
+
temperature: float = 0.7,
|
70
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
71
|
+
random_endpoint: bool = True,
|
72
|
+
endpoint_id: str = "",
|
73
|
+
http_client: httpx.Client | None = None,
|
74
|
+
**kwargs,
|
75
|
+
) -> AnthropicChatClient: ...
|
76
|
+
|
77
|
+
|
78
|
+
@overload
|
79
|
+
def create_chat_client(
|
80
|
+
backend: Literal[BackendType.DeepSeek],
|
81
|
+
model: str | None = None,
|
82
|
+
stream: bool = False,
|
83
|
+
temperature: float = 0.7,
|
84
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
85
|
+
random_endpoint: bool = True,
|
86
|
+
endpoint_id: str = "",
|
87
|
+
http_client: httpx.Client | None = None,
|
88
|
+
**kwargs,
|
89
|
+
) -> DeepSeekChatClient: ...
|
90
|
+
|
91
|
+
|
92
|
+
@overload
|
93
|
+
def create_chat_client(
|
94
|
+
backend: Literal[BackendType.Gemini],
|
95
|
+
model: str | None = None,
|
96
|
+
stream: bool = False,
|
97
|
+
temperature: float = 0.7,
|
98
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
99
|
+
random_endpoint: bool = True,
|
100
|
+
endpoint_id: str = "",
|
101
|
+
http_client: httpx.Client | None = None,
|
102
|
+
**kwargs,
|
103
|
+
) -> GeminiChatClient: ...
|
104
|
+
|
105
|
+
|
106
|
+
@overload
|
107
|
+
def create_chat_client(
|
108
|
+
backend: Literal[BackendType.Groq],
|
109
|
+
model: str | None = None,
|
110
|
+
stream: bool = False,
|
111
|
+
temperature: float = 0.7,
|
112
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
113
|
+
random_endpoint: bool = True,
|
114
|
+
endpoint_id: str = "",
|
115
|
+
http_client: httpx.Client | None = None,
|
116
|
+
**kwargs,
|
117
|
+
) -> GroqChatClient: ...
|
118
|
+
|
119
|
+
|
120
|
+
@overload
|
121
|
+
def create_chat_client(
|
122
|
+
backend: Literal[BackendType.Local],
|
123
|
+
model: str | None = None,
|
124
|
+
stream: bool = False,
|
125
|
+
temperature: float = 0.7,
|
126
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
127
|
+
random_endpoint: bool = True,
|
128
|
+
endpoint_id: str = "",
|
129
|
+
http_client: httpx.Client | None = None,
|
130
|
+
**kwargs,
|
131
|
+
) -> LocalChatClient: ...
|
132
|
+
|
133
|
+
|
134
|
+
@overload
|
135
|
+
def create_chat_client(
|
136
|
+
backend: Literal[BackendType.MiniMax],
|
137
|
+
model: str | None = None,
|
138
|
+
stream: bool = False,
|
139
|
+
temperature: float = 0.7,
|
140
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
141
|
+
random_endpoint: bool = True,
|
142
|
+
endpoint_id: str = "",
|
143
|
+
http_client: httpx.Client | None = None,
|
144
|
+
**kwargs,
|
145
|
+
) -> MiniMaxChatClient: ...
|
146
|
+
|
147
|
+
|
148
|
+
@overload
|
149
|
+
def create_chat_client(
|
150
|
+
backend: Literal[BackendType.Mistral],
|
151
|
+
model: str | None = None,
|
152
|
+
stream: bool = False,
|
153
|
+
temperature: float = 0.7,
|
154
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
155
|
+
random_endpoint: bool = True,
|
156
|
+
endpoint_id: str = "",
|
157
|
+
http_client: httpx.Client | None = None,
|
158
|
+
**kwargs,
|
159
|
+
) -> MistralChatClient: ...
|
160
|
+
|
161
|
+
|
162
|
+
@overload
|
163
|
+
def create_chat_client(
|
164
|
+
backend: Literal[BackendType.Moonshot],
|
165
|
+
model: str | None = None,
|
166
|
+
stream: bool = False,
|
167
|
+
temperature: float = 0.7,
|
168
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
169
|
+
random_endpoint: bool = True,
|
170
|
+
endpoint_id: str = "",
|
171
|
+
http_client: httpx.Client | None = None,
|
172
|
+
**kwargs,
|
173
|
+
) -> MoonshotChatClient: ...
|
174
|
+
|
175
|
+
|
176
|
+
@overload
|
177
|
+
def create_chat_client(
|
178
|
+
backend: Literal[BackendType.OpenAI],
|
179
|
+
model: str | None = None,
|
180
|
+
stream: bool = False,
|
181
|
+
temperature: float = 0.7,
|
182
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
183
|
+
random_endpoint: bool = True,
|
184
|
+
endpoint_id: str = "",
|
185
|
+
http_client: httpx.Client | None = None,
|
186
|
+
**kwargs,
|
187
|
+
) -> OpenAIChatClient: ...
|
188
|
+
|
189
|
+
|
190
|
+
@overload
|
191
|
+
def create_chat_client(
|
192
|
+
backend: Literal[BackendType.Qwen],
|
193
|
+
model: str | None = None,
|
194
|
+
stream: bool = False,
|
195
|
+
temperature: float = 0.7,
|
196
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
197
|
+
random_endpoint: bool = True,
|
198
|
+
endpoint_id: str = "",
|
199
|
+
http_client: httpx.Client | None = None,
|
200
|
+
**kwargs,
|
201
|
+
) -> QwenChatClient: ...
|
202
|
+
|
203
|
+
|
204
|
+
@overload
|
205
|
+
def create_chat_client(
|
206
|
+
backend: Literal[BackendType.Yi],
|
207
|
+
model: str | None = None,
|
208
|
+
stream: bool = False,
|
209
|
+
temperature: float = 0.7,
|
210
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
211
|
+
random_endpoint: bool = True,
|
212
|
+
endpoint_id: str = "",
|
213
|
+
http_client: httpx.Client | None = None,
|
214
|
+
**kwargs,
|
215
|
+
) -> YiChatClient: ...
|
216
|
+
|
217
|
+
|
218
|
+
@overload
|
219
|
+
def create_chat_client(
|
220
|
+
backend: Literal[BackendType.ZhiPuAI],
|
221
|
+
model: str | None = None,
|
222
|
+
stream: bool = False,
|
223
|
+
temperature: float = 0.7,
|
224
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
225
|
+
random_endpoint: bool = True,
|
226
|
+
endpoint_id: str = "",
|
227
|
+
http_client: httpx.Client | None = None,
|
228
|
+
**kwargs,
|
229
|
+
) -> ZhiPuAIChatClient: ...
|
230
|
+
|
231
|
+
|
232
|
+
@overload
|
233
|
+
def create_chat_client(
|
234
|
+
backend: Literal[BackendType.Baichuan],
|
235
|
+
model: str | None = None,
|
236
|
+
stream: bool = False,
|
237
|
+
temperature: float = 0.7,
|
238
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
239
|
+
random_endpoint: bool = True,
|
240
|
+
endpoint_id: str = "",
|
241
|
+
http_client: httpx.Client | None = None,
|
242
|
+
**kwargs,
|
243
|
+
) -> BaichuanChatClient: ...
|
244
|
+
|
245
|
+
|
246
|
+
@overload
|
247
|
+
def create_chat_client(
|
248
|
+
backend: Literal[BackendType.StepFun],
|
249
|
+
model: str | None = None,
|
250
|
+
stream: bool = False,
|
251
|
+
temperature: float = 0.7,
|
252
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
253
|
+
random_endpoint: bool = True,
|
254
|
+
endpoint_id: str = "",
|
255
|
+
http_client: httpx.Client | None = None,
|
256
|
+
**kwargs,
|
257
|
+
) -> StepFunChatClient: ...
|
258
|
+
|
259
|
+
|
63
260
|
def create_chat_client(
|
64
261
|
backend: BackendType,
|
65
262
|
model: str | None = None,
|
@@ -77,7 +274,7 @@ def create_chat_client(
|
|
77
274
|
ClientClass = BackendMap["sync"][backend]
|
78
275
|
if model is None:
|
79
276
|
model = ClientClass.DEFAULT_MODEL
|
80
|
-
return
|
277
|
+
return ClientClass(
|
81
278
|
model=model,
|
82
279
|
stream=stream,
|
83
280
|
temperature=temperature,
|
@@ -89,6 +286,202 @@ def create_chat_client(
|
|
89
286
|
)
|
90
287
|
|
91
288
|
|
289
|
+
@overload
|
290
|
+
def create_async_chat_client(
|
291
|
+
backend: Literal[BackendType.Anthropic],
|
292
|
+
model: str | None = None,
|
293
|
+
stream: bool = False,
|
294
|
+
temperature: float = 0.7,
|
295
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
296
|
+
random_endpoint: bool = True,
|
297
|
+
endpoint_id: str = "",
|
298
|
+
http_client: httpx.AsyncClient | None = None,
|
299
|
+
**kwargs,
|
300
|
+
) -> AsyncAnthropicChatClient: ...
|
301
|
+
|
302
|
+
|
303
|
+
@overload
|
304
|
+
def create_async_chat_client(
|
305
|
+
backend: Literal[BackendType.DeepSeek],
|
306
|
+
model: str | None = None,
|
307
|
+
stream: bool = False,
|
308
|
+
temperature: float = 0.7,
|
309
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
310
|
+
random_endpoint: bool = True,
|
311
|
+
endpoint_id: str = "",
|
312
|
+
http_client: httpx.AsyncClient | None = None,
|
313
|
+
**kwargs,
|
314
|
+
) -> AsyncDeepSeekChatClient: ...
|
315
|
+
|
316
|
+
|
317
|
+
@overload
|
318
|
+
def create_async_chat_client(
|
319
|
+
backend: Literal[BackendType.Gemini],
|
320
|
+
model: str | None = None,
|
321
|
+
stream: bool = False,
|
322
|
+
temperature: float = 0.7,
|
323
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
324
|
+
random_endpoint: bool = True,
|
325
|
+
endpoint_id: str = "",
|
326
|
+
http_client: httpx.AsyncClient | None = None,
|
327
|
+
**kwargs,
|
328
|
+
) -> AsyncGeminiChatClient: ...
|
329
|
+
|
330
|
+
|
331
|
+
@overload
|
332
|
+
def create_async_chat_client(
|
333
|
+
backend: Literal[BackendType.Groq],
|
334
|
+
model: str | None = None,
|
335
|
+
stream: bool = False,
|
336
|
+
temperature: float = 0.7,
|
337
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
338
|
+
random_endpoint: bool = True,
|
339
|
+
endpoint_id: str = "",
|
340
|
+
http_client: httpx.AsyncClient | None = None,
|
341
|
+
**kwargs,
|
342
|
+
) -> AsyncGroqChatClient: ...
|
343
|
+
|
344
|
+
|
345
|
+
@overload
|
346
|
+
def create_async_chat_client(
|
347
|
+
backend: Literal[BackendType.Local],
|
348
|
+
model: str | None = None,
|
349
|
+
stream: bool = False,
|
350
|
+
temperature: float = 0.7,
|
351
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
352
|
+
random_endpoint: bool = True,
|
353
|
+
endpoint_id: str = "",
|
354
|
+
http_client: httpx.AsyncClient | None = None,
|
355
|
+
**kwargs,
|
356
|
+
) -> AsyncLocalChatClient: ...
|
357
|
+
|
358
|
+
|
359
|
+
@overload
|
360
|
+
def create_async_chat_client(
|
361
|
+
backend: Literal[BackendType.MiniMax],
|
362
|
+
model: str | None = None,
|
363
|
+
stream: bool = False,
|
364
|
+
temperature: float = 0.7,
|
365
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
366
|
+
random_endpoint: bool = True,
|
367
|
+
endpoint_id: str = "",
|
368
|
+
http_client: httpx.AsyncClient | None = None,
|
369
|
+
**kwargs,
|
370
|
+
) -> AsyncMiniMaxChatClient: ...
|
371
|
+
|
372
|
+
|
373
|
+
@overload
|
374
|
+
def create_async_chat_client(
|
375
|
+
backend: Literal[BackendType.Mistral],
|
376
|
+
model: str | None = None,
|
377
|
+
stream: bool = False,
|
378
|
+
temperature: float = 0.7,
|
379
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
380
|
+
random_endpoint: bool = True,
|
381
|
+
endpoint_id: str = "",
|
382
|
+
http_client: httpx.AsyncClient | None = None,
|
383
|
+
**kwargs,
|
384
|
+
) -> AsyncMistralChatClient: ...
|
385
|
+
|
386
|
+
|
387
|
+
@overload
|
388
|
+
def create_async_chat_client(
|
389
|
+
backend: Literal[BackendType.Moonshot],
|
390
|
+
model: str | None = None,
|
391
|
+
stream: bool = False,
|
392
|
+
temperature: float = 0.7,
|
393
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
394
|
+
random_endpoint: bool = True,
|
395
|
+
endpoint_id: str = "",
|
396
|
+
http_client: httpx.AsyncClient | None = None,
|
397
|
+
**kwargs,
|
398
|
+
) -> AsyncMoonshotChatClient: ...
|
399
|
+
|
400
|
+
|
401
|
+
@overload
|
402
|
+
def create_async_chat_client(
|
403
|
+
backend: Literal[BackendType.OpenAI],
|
404
|
+
model: str | None = None,
|
405
|
+
stream: bool = False,
|
406
|
+
temperature: float = 0.7,
|
407
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
408
|
+
random_endpoint: bool = True,
|
409
|
+
endpoint_id: str = "",
|
410
|
+
http_client: httpx.AsyncClient | None = None,
|
411
|
+
**kwargs,
|
412
|
+
) -> AsyncOpenAIChatClient: ...
|
413
|
+
|
414
|
+
|
415
|
+
@overload
|
416
|
+
def create_async_chat_client(
|
417
|
+
backend: Literal[BackendType.Qwen],
|
418
|
+
model: str | None = None,
|
419
|
+
stream: bool = False,
|
420
|
+
temperature: float = 0.7,
|
421
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
422
|
+
random_endpoint: bool = True,
|
423
|
+
endpoint_id: str = "",
|
424
|
+
http_client: httpx.AsyncClient | None = None,
|
425
|
+
**kwargs,
|
426
|
+
) -> AsyncQwenChatClient: ...
|
427
|
+
|
428
|
+
|
429
|
+
@overload
|
430
|
+
def create_async_chat_client(
|
431
|
+
backend: Literal[BackendType.Yi],
|
432
|
+
model: str | None = None,
|
433
|
+
stream: bool = False,
|
434
|
+
temperature: float = 0.7,
|
435
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
436
|
+
random_endpoint: bool = True,
|
437
|
+
endpoint_id: str = "",
|
438
|
+
http_client: httpx.AsyncClient | None = None,
|
439
|
+
**kwargs,
|
440
|
+
) -> AsyncYiChatClient: ...
|
441
|
+
|
442
|
+
|
443
|
+
@overload
|
444
|
+
def create_async_chat_client(
|
445
|
+
backend: Literal[BackendType.ZhiPuAI],
|
446
|
+
model: str | None = None,
|
447
|
+
stream: bool = False,
|
448
|
+
temperature: float = 0.7,
|
449
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
450
|
+
random_endpoint: bool = True,
|
451
|
+
endpoint_id: str = "",
|
452
|
+
http_client: httpx.AsyncClient | None = None,
|
453
|
+
**kwargs,
|
454
|
+
) -> AsyncZhiPuAIChatClient: ...
|
455
|
+
|
456
|
+
|
457
|
+
@overload
|
458
|
+
def create_async_chat_client(
|
459
|
+
backend: Literal[BackendType.Baichuan],
|
460
|
+
model: str | None = None,
|
461
|
+
stream: bool = False,
|
462
|
+
temperature: float = 0.7,
|
463
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
464
|
+
random_endpoint: bool = True,
|
465
|
+
endpoint_id: str = "",
|
466
|
+
http_client: httpx.AsyncClient | None = None,
|
467
|
+
**kwargs,
|
468
|
+
) -> AsyncBaichuanChatClient: ...
|
469
|
+
|
470
|
+
|
471
|
+
@overload
|
472
|
+
def create_async_chat_client(
|
473
|
+
backend: Literal[BackendType.StepFun],
|
474
|
+
model: str | None = None,
|
475
|
+
stream: bool = False,
|
476
|
+
temperature: float = 0.7,
|
477
|
+
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
478
|
+
random_endpoint: bool = True,
|
479
|
+
endpoint_id: str = "",
|
480
|
+
http_client: httpx.AsyncClient | None = None,
|
481
|
+
**kwargs,
|
482
|
+
) -> AsyncStepFunChatClient: ...
|
483
|
+
|
484
|
+
|
92
485
|
def create_async_chat_client(
|
93
486
|
backend: BackendType,
|
94
487
|
model: str | None = None,
|
@@ -106,7 +499,7 @@ def create_async_chat_client(
|
|
106
499
|
ClientClass = BackendMap["async"][backend]
|
107
500
|
if model is None:
|
108
501
|
model = ClientClass.DEFAULT_MODEL
|
109
|
-
return
|
502
|
+
return ClientClass(
|
110
503
|
model=model,
|
111
504
|
stream=stream,
|
112
505
|
temperature=temperature,
|
@@ -1,8 +1,8 @@
|
|
1
|
-
vectorvein-0.1.
|
2
|
-
vectorvein-0.1.
|
3
|
-
vectorvein-0.1.
|
1
|
+
vectorvein-0.1.29.dist-info/METADATA,sha256=Azvf3VhV-V6iJzO4lsm_85TFVZfvQirxRiSXQSUXVeY,502
|
2
|
+
vectorvein-0.1.29.dist-info/WHEEL,sha256=Vza3XR51HW1KmFP0iIMUVYIvz0uQuKJpIXKYOBGQyFQ,90
|
3
|
+
vectorvein-0.1.29.dist-info/entry_points.txt,sha256=6OYgBcLyFCUgeqLgnvMyOJxPCWzgy7se4rLPKtNonMs,34
|
4
4
|
vectorvein/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
5
|
-
vectorvein/chat_clients/__init__.py,sha256=
|
5
|
+
vectorvein/chat_clients/__init__.py,sha256=4AfQql41X0705EstkbV783vT6N4bBrJhZ5DgMwRrt5s,16018
|
6
6
|
vectorvein/chat_clients/anthropic_client.py,sha256=h82GxBi7h22B7leBuPofwBstxH_c12tEgGjpnKg6UDc,25007
|
7
7
|
vectorvein/chat_clients/baichuan_client.py,sha256=CVMvpgjdrZGv0BWnTOBD-f2ufZ3wq3496wqukumsAr4,526
|
8
8
|
vectorvein/chat_clients/base_client.py,sha256=wxh7WkzFG4cD4I4t4e6RGe1KiFZc8Z5llh2iVblXEZE,8415
|
@@ -27,4 +27,4 @@ vectorvein/types/exception.py,sha256=gnW4GnJ76jND6UGnodk9xmqkcbeS7Cz2rvncA2HpD5E
|
|
27
27
|
vectorvein/types/llm_parameters.py,sha256=N6RQ8tqO1RCywMFRWPooffeAEPd9x3JW6Bl4UgQtF5I,4379
|
28
28
|
vectorvein/utilities/media_processing.py,sha256=BujciRmw1GMmc3ELRvafL8STcy6r5b2rVnh27-uA7so,2256
|
29
29
|
vectorvein/utilities/retry.py,sha256=9ePuJdeUUGx-qMWfaFxmlOvG_lQPwCQ4UB1z3Edlo34,993
|
30
|
-
vectorvein-0.1.
|
30
|
+
vectorvein-0.1.29.dist-info/RECORD,,
|
File without changes
|
File without changes
|