vectorvein 0.2.6__py3-none-any.whl → 0.2.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,582 +1,582 @@
1
- # @Author: Bi Ying
2
- # @Date: 2024-07-26 14:48:55
3
- import httpx
4
- from typing import overload, Literal
5
-
6
- from .base_client import BaseChatClient, BaseAsyncChatClient
7
-
8
- from .yi_client import YiChatClient, AsyncYiChatClient
9
- from .xai_client import XAIChatClient, AsyncXAIChatClient
10
- from .groq_client import GroqChatClient, AsyncGroqChatClient
11
- from .qwen_client import QwenChatClient, AsyncQwenChatClient
12
- from .local_client import LocalChatClient, AsyncLocalChatClient
13
- from .gemini_client import GeminiChatClient, AsyncGeminiChatClient
14
- from .openai_client import OpenAIChatClient, AsyncOpenAIChatClient
15
- from .zhipuai_client import ZhiPuAIChatClient, AsyncZhiPuAIChatClient
16
- from .minimax_client import MiniMaxChatClient, AsyncMiniMaxChatClient
17
- from .mistral_client import MistralChatClient, AsyncMistralChatClient
18
- from .stepfun_client import StepFunChatClient, AsyncStepFunChatClient
19
- from .baichuan_client import BaichuanChatClient, AsyncBaichuanChatClient
20
- from .moonshot_client import MoonshotChatClient, AsyncMoonshotChatClient
21
- from .deepseek_client import DeepSeekChatClient, AsyncDeepSeekChatClient
22
-
23
- from ..types import defaults as defs
24
- from ..types.llm_parameters import NOT_GIVEN, NotGiven
25
- from ..types.enums import BackendType, ContextLengthControlType
26
- from .anthropic_client import AnthropicChatClient, AsyncAnthropicChatClient
27
- from .utils import format_messages, get_token_counts, get_message_token_counts, ToolCallContentProcessor
28
-
29
- # 后端映射
30
- BackendMap = {
31
- "sync": {
32
- BackendType.Anthropic: AnthropicChatClient,
33
- BackendType.DeepSeek: DeepSeekChatClient,
34
- BackendType.Gemini: GeminiChatClient,
35
- BackendType.Groq: GroqChatClient,
36
- BackendType.Local: LocalChatClient,
37
- BackendType.MiniMax: MiniMaxChatClient,
38
- BackendType.Mistral: MistralChatClient,
39
- BackendType.Moonshot: MoonshotChatClient,
40
- BackendType.OpenAI: OpenAIChatClient,
41
- BackendType.Qwen: QwenChatClient,
42
- BackendType.Yi: YiChatClient,
43
- BackendType.ZhiPuAI: ZhiPuAIChatClient,
44
- BackendType.Baichuan: BaichuanChatClient,
45
- BackendType.StepFun: StepFunChatClient,
46
- BackendType.XAI: XAIChatClient,
47
- },
48
- "async": {
49
- BackendType.Anthropic: AsyncAnthropicChatClient,
50
- BackendType.DeepSeek: AsyncDeepSeekChatClient,
51
- BackendType.Gemini: AsyncGeminiChatClient,
52
- BackendType.Groq: AsyncGroqChatClient,
53
- BackendType.Local: AsyncLocalChatClient,
54
- BackendType.MiniMax: AsyncMiniMaxChatClient,
55
- BackendType.Mistral: AsyncMistralChatClient,
56
- BackendType.Moonshot: AsyncMoonshotChatClient,
57
- BackendType.OpenAI: AsyncOpenAIChatClient,
58
- BackendType.Qwen: AsyncQwenChatClient,
59
- BackendType.Yi: AsyncYiChatClient,
60
- BackendType.ZhiPuAI: AsyncZhiPuAIChatClient,
61
- BackendType.Baichuan: AsyncBaichuanChatClient,
62
- BackendType.StepFun: AsyncStepFunChatClient,
63
- BackendType.XAI: AsyncXAIChatClient,
64
- },
65
- }
66
-
67
-
68
- @overload
69
- def create_chat_client(
70
- backend: Literal[BackendType.Anthropic],
71
- model: str | None = None,
72
- stream: bool = False,
73
- temperature: float | None | NotGiven = NOT_GIVEN,
74
- context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
75
- random_endpoint: bool = True,
76
- endpoint_id: str = "",
77
- http_client: httpx.Client | None = None,
78
- **kwargs,
79
- ) -> AnthropicChatClient: ...
80
-
81
-
82
- @overload
83
- def create_chat_client(
84
- backend: Literal[BackendType.DeepSeek],
85
- model: str | None = None,
86
- stream: bool = False,
87
- temperature: float | None | NotGiven = NOT_GIVEN,
88
- context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
89
- random_endpoint: bool = True,
90
- endpoint_id: str = "",
91
- http_client: httpx.Client | None = None,
92
- **kwargs,
93
- ) -> DeepSeekChatClient: ...
94
-
95
-
96
- @overload
97
- def create_chat_client(
98
- backend: Literal[BackendType.Gemini],
99
- model: str | None = None,
100
- stream: bool = False,
101
- temperature: float | None | NotGiven = NOT_GIVEN,
102
- context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
103
- random_endpoint: bool = True,
104
- endpoint_id: str = "",
105
- http_client: httpx.Client | None = None,
106
- **kwargs,
107
- ) -> GeminiChatClient: ...
108
-
109
-
110
- @overload
111
- def create_chat_client(
112
- backend: Literal[BackendType.Groq],
113
- model: str | None = None,
114
- stream: bool = False,
115
- temperature: float | None | NotGiven = NOT_GIVEN,
116
- context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
117
- random_endpoint: bool = True,
118
- endpoint_id: str = "",
119
- http_client: httpx.Client | None = None,
120
- **kwargs,
121
- ) -> GroqChatClient: ...
122
-
123
-
124
- @overload
125
- def create_chat_client(
126
- backend: Literal[BackendType.Local],
127
- model: str | None = None,
128
- stream: bool = False,
129
- temperature: float | None | NotGiven = NOT_GIVEN,
130
- context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
131
- random_endpoint: bool = True,
132
- endpoint_id: str = "",
133
- http_client: httpx.Client | None = None,
134
- **kwargs,
135
- ) -> LocalChatClient: ...
136
-
137
-
138
- @overload
139
- def create_chat_client(
140
- backend: Literal[BackendType.MiniMax],
141
- model: str | None = None,
142
- stream: bool = False,
143
- temperature: float | None | NotGiven = NOT_GIVEN,
144
- context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
145
- random_endpoint: bool = True,
146
- endpoint_id: str = "",
147
- http_client: httpx.Client | None = None,
148
- **kwargs,
149
- ) -> MiniMaxChatClient: ...
150
-
151
-
152
- @overload
153
- def create_chat_client(
154
- backend: Literal[BackendType.Mistral],
155
- model: str | None = None,
156
- stream: bool = False,
157
- temperature: float | None | NotGiven = NOT_GIVEN,
158
- context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
159
- random_endpoint: bool = True,
160
- endpoint_id: str = "",
161
- http_client: httpx.Client | None = None,
162
- **kwargs,
163
- ) -> MistralChatClient: ...
164
-
165
-
166
- @overload
167
- def create_chat_client(
168
- backend: Literal[BackendType.Moonshot],
169
- model: str | None = None,
170
- stream: bool = False,
171
- temperature: float | None | NotGiven = NOT_GIVEN,
172
- context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
173
- random_endpoint: bool = True,
174
- endpoint_id: str = "",
175
- http_client: httpx.Client | None = None,
176
- **kwargs,
177
- ) -> MoonshotChatClient: ...
178
-
179
-
180
- @overload
181
- def create_chat_client(
182
- backend: Literal[BackendType.OpenAI],
183
- model: str | None = None,
184
- stream: bool = False,
185
- temperature: float | None | NotGiven = NOT_GIVEN,
186
- context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
187
- random_endpoint: bool = True,
188
- endpoint_id: str = "",
189
- http_client: httpx.Client | None = None,
190
- **kwargs,
191
- ) -> OpenAIChatClient: ...
192
-
193
-
194
- @overload
195
- def create_chat_client(
196
- backend: Literal[BackendType.Qwen],
197
- model: str | None = None,
198
- stream: bool = False,
199
- temperature: float | None | NotGiven = NOT_GIVEN,
200
- context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
201
- random_endpoint: bool = True,
202
- endpoint_id: str = "",
203
- http_client: httpx.Client | None = None,
204
- **kwargs,
205
- ) -> QwenChatClient: ...
206
-
207
-
208
- @overload
209
- def create_chat_client(
210
- backend: Literal[BackendType.Yi],
211
- model: str | None = None,
212
- stream: bool = False,
213
- temperature: float | None | NotGiven = NOT_GIVEN,
214
- context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
215
- random_endpoint: bool = True,
216
- endpoint_id: str = "",
217
- http_client: httpx.Client | None = None,
218
- **kwargs,
219
- ) -> YiChatClient: ...
220
-
221
-
222
- @overload
223
- def create_chat_client(
224
- backend: Literal[BackendType.ZhiPuAI],
225
- model: str | None = None,
226
- stream: bool = False,
227
- temperature: float | None | NotGiven = NOT_GIVEN,
228
- context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
229
- random_endpoint: bool = True,
230
- endpoint_id: str = "",
231
- http_client: httpx.Client | None = None,
232
- **kwargs,
233
- ) -> ZhiPuAIChatClient: ...
234
-
235
-
236
- @overload
237
- def create_chat_client(
238
- backend: Literal[BackendType.Baichuan],
239
- model: str | None = None,
240
- stream: bool = False,
241
- temperature: float | None | NotGiven = NOT_GIVEN,
242
- context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
243
- random_endpoint: bool = True,
244
- endpoint_id: str = "",
245
- http_client: httpx.Client | None = None,
246
- **kwargs,
247
- ) -> BaichuanChatClient: ...
248
-
249
-
250
- @overload
251
- def create_chat_client(
252
- backend: Literal[BackendType.StepFun],
253
- model: str | None = None,
254
- stream: bool = False,
255
- temperature: float | None | NotGiven = NOT_GIVEN,
256
- context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
257
- random_endpoint: bool = True,
258
- endpoint_id: str = "",
259
- http_client: httpx.Client | None = None,
260
- **kwargs,
261
- ) -> StepFunChatClient: ...
262
-
263
-
264
- @overload
265
- def create_chat_client(
266
- backend: Literal[BackendType.XAI],
267
- model: str | None = None,
268
- stream: bool = False,
269
- temperature: float | None | NotGiven = NOT_GIVEN,
270
- context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
271
- random_endpoint: bool = True,
272
- endpoint_id: str = "",
273
- http_client: httpx.Client | None = None,
274
- **kwargs,
275
- ) -> XAIChatClient: ...
276
-
277
-
278
- @overload
279
- def create_chat_client(
280
- backend: BackendType,
281
- model: str | None = None,
282
- stream: bool = False,
283
- temperature: float | None | NotGiven = NOT_GIVEN,
284
- context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
285
- random_endpoint: bool = True,
286
- endpoint_id: str = "",
287
- http_client: httpx.Client | None = None,
288
- **kwargs,
289
- ) -> BaseChatClient: ...
290
-
291
-
292
- def create_chat_client(
293
- backend: BackendType,
294
- model: str | None = None,
295
- stream: bool = False,
296
- temperature: float | None | NotGiven = NOT_GIVEN,
297
- context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
298
- random_endpoint: bool = True,
299
- endpoint_id: str = "",
300
- http_client: httpx.Client | None = None,
301
- **kwargs,
302
- ) -> BaseChatClient:
303
- if backend not in BackendMap["sync"]:
304
- raise ValueError(f"Unsupported backend: {backend}")
305
-
306
- ClientClass = BackendMap["sync"][backend]
307
- if model is None:
308
- model = ClientClass.DEFAULT_MODEL
309
- return ClientClass(
310
- model=model,
311
- stream=stream,
312
- temperature=temperature,
313
- context_length_control=context_length_control,
314
- random_endpoint=random_endpoint,
315
- endpoint_id=endpoint_id,
316
- http_client=http_client,
317
- **kwargs,
318
- )
319
-
320
-
321
- @overload
322
- def create_async_chat_client(
323
- backend: Literal[BackendType.Anthropic],
324
- model: str | None = None,
325
- stream: bool = False,
326
- temperature: float | None | NotGiven = NOT_GIVEN,
327
- context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
328
- random_endpoint: bool = True,
329
- endpoint_id: str = "",
330
- http_client: httpx.AsyncClient | None = None,
331
- **kwargs,
332
- ) -> AsyncAnthropicChatClient: ...
333
-
334
-
335
- @overload
336
- def create_async_chat_client(
337
- backend: Literal[BackendType.DeepSeek],
338
- model: str | None = None,
339
- stream: bool = False,
340
- temperature: float | None | NotGiven = NOT_GIVEN,
341
- context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
342
- random_endpoint: bool = True,
343
- endpoint_id: str = "",
344
- http_client: httpx.AsyncClient | None = None,
345
- **kwargs,
346
- ) -> AsyncDeepSeekChatClient: ...
347
-
348
-
349
- @overload
350
- def create_async_chat_client(
351
- backend: Literal[BackendType.Gemini],
352
- model: str | None = None,
353
- stream: bool = False,
354
- temperature: float | None | NotGiven = NOT_GIVEN,
355
- context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
356
- random_endpoint: bool = True,
357
- endpoint_id: str = "",
358
- http_client: httpx.AsyncClient | None = None,
359
- **kwargs,
360
- ) -> AsyncGeminiChatClient: ...
361
-
362
-
363
- @overload
364
- def create_async_chat_client(
365
- backend: Literal[BackendType.Groq],
366
- model: str | None = None,
367
- stream: bool = False,
368
- temperature: float | None | NotGiven = NOT_GIVEN,
369
- context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
370
- random_endpoint: bool = True,
371
- endpoint_id: str = "",
372
- http_client: httpx.AsyncClient | None = None,
373
- **kwargs,
374
- ) -> AsyncGroqChatClient: ...
375
-
376
-
377
- @overload
378
- def create_async_chat_client(
379
- backend: Literal[BackendType.Local],
380
- model: str | None = None,
381
- stream: bool = False,
382
- temperature: float | None | NotGiven = NOT_GIVEN,
383
- context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
384
- random_endpoint: bool = True,
385
- endpoint_id: str = "",
386
- http_client: httpx.AsyncClient | None = None,
387
- **kwargs,
388
- ) -> AsyncLocalChatClient: ...
389
-
390
-
391
- @overload
392
- def create_async_chat_client(
393
- backend: Literal[BackendType.MiniMax],
394
- model: str | None = None,
395
- stream: bool = False,
396
- temperature: float | None | NotGiven = NOT_GIVEN,
397
- context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
398
- random_endpoint: bool = True,
399
- endpoint_id: str = "",
400
- http_client: httpx.AsyncClient | None = None,
401
- **kwargs,
402
- ) -> AsyncMiniMaxChatClient: ...
403
-
404
-
405
- @overload
406
- def create_async_chat_client(
407
- backend: Literal[BackendType.Mistral],
408
- model: str | None = None,
409
- stream: bool = False,
410
- temperature: float | None | NotGiven = NOT_GIVEN,
411
- context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
412
- random_endpoint: bool = True,
413
- endpoint_id: str = "",
414
- http_client: httpx.AsyncClient | None = None,
415
- **kwargs,
416
- ) -> AsyncMistralChatClient: ...
417
-
418
-
419
- @overload
420
- def create_async_chat_client(
421
- backend: Literal[BackendType.Moonshot],
422
- model: str | None = None,
423
- stream: bool = False,
424
- temperature: float | None | NotGiven = NOT_GIVEN,
425
- context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
426
- random_endpoint: bool = True,
427
- endpoint_id: str = "",
428
- http_client: httpx.AsyncClient | None = None,
429
- **kwargs,
430
- ) -> AsyncMoonshotChatClient: ...
431
-
432
-
433
- @overload
434
- def create_async_chat_client(
435
- backend: Literal[BackendType.OpenAI],
436
- model: str | None = None,
437
- stream: bool = False,
438
- temperature: float | None | NotGiven = NOT_GIVEN,
439
- context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
440
- random_endpoint: bool = True,
441
- endpoint_id: str = "",
442
- http_client: httpx.AsyncClient | None = None,
443
- **kwargs,
444
- ) -> AsyncOpenAIChatClient: ...
445
-
446
-
447
- @overload
448
- def create_async_chat_client(
449
- backend: Literal[BackendType.Qwen],
450
- model: str | None = None,
451
- stream: bool = False,
452
- temperature: float | None | NotGiven = NOT_GIVEN,
453
- context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
454
- random_endpoint: bool = True,
455
- endpoint_id: str = "",
456
- http_client: httpx.AsyncClient | None = None,
457
- **kwargs,
458
- ) -> AsyncQwenChatClient: ...
459
-
460
-
461
- @overload
462
- def create_async_chat_client(
463
- backend: Literal[BackendType.Yi],
464
- model: str | None = None,
465
- stream: bool = False,
466
- temperature: float | None | NotGiven = NOT_GIVEN,
467
- context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
468
- random_endpoint: bool = True,
469
- endpoint_id: str = "",
470
- http_client: httpx.AsyncClient | None = None,
471
- **kwargs,
472
- ) -> AsyncYiChatClient: ...
473
-
474
-
475
- @overload
476
- def create_async_chat_client(
477
- backend: Literal[BackendType.ZhiPuAI],
478
- model: str | None = None,
479
- stream: bool = False,
480
- temperature: float | None | NotGiven = NOT_GIVEN,
481
- context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
482
- random_endpoint: bool = True,
483
- endpoint_id: str = "",
484
- http_client: httpx.AsyncClient | None = None,
485
- **kwargs,
486
- ) -> AsyncZhiPuAIChatClient: ...
487
-
488
-
489
- @overload
490
- def create_async_chat_client(
491
- backend: Literal[BackendType.Baichuan],
492
- model: str | None = None,
493
- stream: bool = False,
494
- temperature: float | None | NotGiven = NOT_GIVEN,
495
- context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
496
- random_endpoint: bool = True,
497
- endpoint_id: str = "",
498
- http_client: httpx.AsyncClient | None = None,
499
- **kwargs,
500
- ) -> AsyncBaichuanChatClient: ...
501
-
502
-
503
- @overload
504
- def create_async_chat_client(
505
- backend: Literal[BackendType.StepFun],
506
- model: str | None = None,
507
- stream: bool = False,
508
- temperature: float | None | NotGiven = NOT_GIVEN,
509
- context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
510
- random_endpoint: bool = True,
511
- endpoint_id: str = "",
512
- http_client: httpx.AsyncClient | None = None,
513
- **kwargs,
514
- ) -> AsyncStepFunChatClient: ...
515
-
516
-
517
- @overload
518
- def create_async_chat_client(
519
- backend: Literal[BackendType.XAI],
520
- model: str | None = None,
521
- stream: bool = False,
522
- temperature: float | None | NotGiven = NOT_GIVEN,
523
- context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
524
- random_endpoint: bool = True,
525
- endpoint_id: str = "",
526
- http_client: httpx.AsyncClient | None = None,
527
- **kwargs,
528
- ) -> AsyncXAIChatClient: ...
529
-
530
-
531
- @overload
532
- def create_async_chat_client(
533
- backend: BackendType,
534
- model: str | None = None,
535
- stream: bool = False,
536
- temperature: float | None | NotGiven = NOT_GIVEN,
537
- context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
538
- random_endpoint: bool = True,
539
- endpoint_id: str = "",
540
- http_client: httpx.AsyncClient | None = None,
541
- **kwargs,
542
- ) -> BaseAsyncChatClient: ...
543
-
544
-
545
- def create_async_chat_client(
546
- backend: BackendType,
547
- model: str | None = None,
548
- stream: bool = False,
549
- temperature: float | None | NotGiven = NOT_GIVEN,
550
- context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
551
- random_endpoint: bool = True,
552
- endpoint_id: str = "",
553
- http_client: httpx.AsyncClient | None = None,
554
- **kwargs,
555
- ) -> BaseAsyncChatClient:
556
- if backend not in BackendMap["async"]:
557
- raise ValueError(f"Unsupported backend: {backend}")
558
-
559
- ClientClass = BackendMap["async"][backend]
560
- if model is None:
561
- model = ClientClass.DEFAULT_MODEL
562
- return ClientClass(
563
- model=model,
564
- stream=stream,
565
- temperature=temperature,
566
- context_length_control=context_length_control,
567
- random_endpoint=random_endpoint,
568
- endpoint_id=endpoint_id,
569
- http_client=http_client,
570
- **kwargs,
571
- )
572
-
573
-
574
- __all__ = [
575
- "BackendType",
576
- "format_messages",
577
- "get_token_counts",
578
- "create_chat_client",
579
- "create_async_chat_client",
580
- "get_message_token_counts",
581
- "ToolCallContentProcessor",
582
- ]
1
+ # @Author: Bi Ying
2
+ # @Date: 2024-07-26 14:48:55
3
+ import httpx
4
+ from typing import overload, Literal
5
+
6
+ from .base_client import BaseChatClient, BaseAsyncChatClient
7
+
8
+ from .yi_client import YiChatClient, AsyncYiChatClient
9
+ from .xai_client import XAIChatClient, AsyncXAIChatClient
10
+ from .groq_client import GroqChatClient, AsyncGroqChatClient
11
+ from .qwen_client import QwenChatClient, AsyncQwenChatClient
12
+ from .local_client import LocalChatClient, AsyncLocalChatClient
13
+ from .gemini_client import GeminiChatClient, AsyncGeminiChatClient
14
+ from .openai_client import OpenAIChatClient, AsyncOpenAIChatClient
15
+ from .zhipuai_client import ZhiPuAIChatClient, AsyncZhiPuAIChatClient
16
+ from .minimax_client import MiniMaxChatClient, AsyncMiniMaxChatClient
17
+ from .mistral_client import MistralChatClient, AsyncMistralChatClient
18
+ from .stepfun_client import StepFunChatClient, AsyncStepFunChatClient
19
+ from .baichuan_client import BaichuanChatClient, AsyncBaichuanChatClient
20
+ from .moonshot_client import MoonshotChatClient, AsyncMoonshotChatClient
21
+ from .deepseek_client import DeepSeekChatClient, AsyncDeepSeekChatClient
22
+
23
+ from ..types import defaults as defs
24
+ from ..types.llm_parameters import NOT_GIVEN, NotGiven
25
+ from ..types.enums import BackendType, ContextLengthControlType
26
+ from .anthropic_client import AnthropicChatClient, AsyncAnthropicChatClient
27
+ from .utils import format_messages, get_token_counts, get_message_token_counts, ToolCallContentProcessor
28
+
29
+ # 后端映射
30
+ BackendMap = {
31
+ "sync": {
32
+ BackendType.Anthropic: AnthropicChatClient,
33
+ BackendType.DeepSeek: DeepSeekChatClient,
34
+ BackendType.Gemini: GeminiChatClient,
35
+ BackendType.Groq: GroqChatClient,
36
+ BackendType.Local: LocalChatClient,
37
+ BackendType.MiniMax: MiniMaxChatClient,
38
+ BackendType.Mistral: MistralChatClient,
39
+ BackendType.Moonshot: MoonshotChatClient,
40
+ BackendType.OpenAI: OpenAIChatClient,
41
+ BackendType.Qwen: QwenChatClient,
42
+ BackendType.Yi: YiChatClient,
43
+ BackendType.ZhiPuAI: ZhiPuAIChatClient,
44
+ BackendType.Baichuan: BaichuanChatClient,
45
+ BackendType.StepFun: StepFunChatClient,
46
+ BackendType.XAI: XAIChatClient,
47
+ },
48
+ "async": {
49
+ BackendType.Anthropic: AsyncAnthropicChatClient,
50
+ BackendType.DeepSeek: AsyncDeepSeekChatClient,
51
+ BackendType.Gemini: AsyncGeminiChatClient,
52
+ BackendType.Groq: AsyncGroqChatClient,
53
+ BackendType.Local: AsyncLocalChatClient,
54
+ BackendType.MiniMax: AsyncMiniMaxChatClient,
55
+ BackendType.Mistral: AsyncMistralChatClient,
56
+ BackendType.Moonshot: AsyncMoonshotChatClient,
57
+ BackendType.OpenAI: AsyncOpenAIChatClient,
58
+ BackendType.Qwen: AsyncQwenChatClient,
59
+ BackendType.Yi: AsyncYiChatClient,
60
+ BackendType.ZhiPuAI: AsyncZhiPuAIChatClient,
61
+ BackendType.Baichuan: AsyncBaichuanChatClient,
62
+ BackendType.StepFun: AsyncStepFunChatClient,
63
+ BackendType.XAI: AsyncXAIChatClient,
64
+ },
65
+ }
66
+
67
+
68
+ @overload
69
+ def create_chat_client(
70
+ backend: Literal[BackendType.Anthropic],
71
+ model: str | None = None,
72
+ stream: bool = False,
73
+ temperature: float | None | NotGiven = NOT_GIVEN,
74
+ context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
75
+ random_endpoint: bool = True,
76
+ endpoint_id: str = "",
77
+ http_client: httpx.Client | None = None,
78
+ **kwargs,
79
+ ) -> AnthropicChatClient: ...
80
+
81
+
82
+ @overload
83
+ def create_chat_client(
84
+ backend: Literal[BackendType.DeepSeek],
85
+ model: str | None = None,
86
+ stream: bool = False,
87
+ temperature: float | None | NotGiven = NOT_GIVEN,
88
+ context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
89
+ random_endpoint: bool = True,
90
+ endpoint_id: str = "",
91
+ http_client: httpx.Client | None = None,
92
+ **kwargs,
93
+ ) -> DeepSeekChatClient: ...
94
+
95
+
96
+ @overload
97
+ def create_chat_client(
98
+ backend: Literal[BackendType.Gemini],
99
+ model: str | None = None,
100
+ stream: bool = False,
101
+ temperature: float | None | NotGiven = NOT_GIVEN,
102
+ context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
103
+ random_endpoint: bool = True,
104
+ endpoint_id: str = "",
105
+ http_client: httpx.Client | None = None,
106
+ **kwargs,
107
+ ) -> GeminiChatClient: ...
108
+
109
+
110
+ @overload
111
+ def create_chat_client(
112
+ backend: Literal[BackendType.Groq],
113
+ model: str | None = None,
114
+ stream: bool = False,
115
+ temperature: float | None | NotGiven = NOT_GIVEN,
116
+ context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
117
+ random_endpoint: bool = True,
118
+ endpoint_id: str = "",
119
+ http_client: httpx.Client | None = None,
120
+ **kwargs,
121
+ ) -> GroqChatClient: ...
122
+
123
+
124
+ @overload
125
+ def create_chat_client(
126
+ backend: Literal[BackendType.Local],
127
+ model: str | None = None,
128
+ stream: bool = False,
129
+ temperature: float | None | NotGiven = NOT_GIVEN,
130
+ context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
131
+ random_endpoint: bool = True,
132
+ endpoint_id: str = "",
133
+ http_client: httpx.Client | None = None,
134
+ **kwargs,
135
+ ) -> LocalChatClient: ...
136
+
137
+
138
+ @overload
139
+ def create_chat_client(
140
+ backend: Literal[BackendType.MiniMax],
141
+ model: str | None = None,
142
+ stream: bool = False,
143
+ temperature: float | None | NotGiven = NOT_GIVEN,
144
+ context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
145
+ random_endpoint: bool = True,
146
+ endpoint_id: str = "",
147
+ http_client: httpx.Client | None = None,
148
+ **kwargs,
149
+ ) -> MiniMaxChatClient: ...
150
+
151
+
152
+ @overload
153
+ def create_chat_client(
154
+ backend: Literal[BackendType.Mistral],
155
+ model: str | None = None,
156
+ stream: bool = False,
157
+ temperature: float | None | NotGiven = NOT_GIVEN,
158
+ context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
159
+ random_endpoint: bool = True,
160
+ endpoint_id: str = "",
161
+ http_client: httpx.Client | None = None,
162
+ **kwargs,
163
+ ) -> MistralChatClient: ...
164
+
165
+
166
+ @overload
167
+ def create_chat_client(
168
+ backend: Literal[BackendType.Moonshot],
169
+ model: str | None = None,
170
+ stream: bool = False,
171
+ temperature: float | None | NotGiven = NOT_GIVEN,
172
+ context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
173
+ random_endpoint: bool = True,
174
+ endpoint_id: str = "",
175
+ http_client: httpx.Client | None = None,
176
+ **kwargs,
177
+ ) -> MoonshotChatClient: ...
178
+
179
+
180
+ @overload
181
+ def create_chat_client(
182
+ backend: Literal[BackendType.OpenAI],
183
+ model: str | None = None,
184
+ stream: bool = False,
185
+ temperature: float | None | NotGiven = NOT_GIVEN,
186
+ context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
187
+ random_endpoint: bool = True,
188
+ endpoint_id: str = "",
189
+ http_client: httpx.Client | None = None,
190
+ **kwargs,
191
+ ) -> OpenAIChatClient: ...
192
+
193
+
194
+ @overload
195
+ def create_chat_client(
196
+ backend: Literal[BackendType.Qwen],
197
+ model: str | None = None,
198
+ stream: bool = False,
199
+ temperature: float | None | NotGiven = NOT_GIVEN,
200
+ context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
201
+ random_endpoint: bool = True,
202
+ endpoint_id: str = "",
203
+ http_client: httpx.Client | None = None,
204
+ **kwargs,
205
+ ) -> QwenChatClient: ...
206
+
207
+
208
+ @overload
209
+ def create_chat_client(
210
+ backend: Literal[BackendType.Yi],
211
+ model: str | None = None,
212
+ stream: bool = False,
213
+ temperature: float | None | NotGiven = NOT_GIVEN,
214
+ context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
215
+ random_endpoint: bool = True,
216
+ endpoint_id: str = "",
217
+ http_client: httpx.Client | None = None,
218
+ **kwargs,
219
+ ) -> YiChatClient: ...
220
+
221
+
222
+ @overload
223
+ def create_chat_client(
224
+ backend: Literal[BackendType.ZhiPuAI],
225
+ model: str | None = None,
226
+ stream: bool = False,
227
+ temperature: float | None | NotGiven = NOT_GIVEN,
228
+ context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
229
+ random_endpoint: bool = True,
230
+ endpoint_id: str = "",
231
+ http_client: httpx.Client | None = None,
232
+ **kwargs,
233
+ ) -> ZhiPuAIChatClient: ...
234
+
235
+
236
+ @overload
237
+ def create_chat_client(
238
+ backend: Literal[BackendType.Baichuan],
239
+ model: str | None = None,
240
+ stream: bool = False,
241
+ temperature: float | None | NotGiven = NOT_GIVEN,
242
+ context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
243
+ random_endpoint: bool = True,
244
+ endpoint_id: str = "",
245
+ http_client: httpx.Client | None = None,
246
+ **kwargs,
247
+ ) -> BaichuanChatClient: ...
248
+
249
+
250
+ @overload
251
+ def create_chat_client(
252
+ backend: Literal[BackendType.StepFun],
253
+ model: str | None = None,
254
+ stream: bool = False,
255
+ temperature: float | None | NotGiven = NOT_GIVEN,
256
+ context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
257
+ random_endpoint: bool = True,
258
+ endpoint_id: str = "",
259
+ http_client: httpx.Client | None = None,
260
+ **kwargs,
261
+ ) -> StepFunChatClient: ...
262
+
263
+
264
+ @overload
265
+ def create_chat_client(
266
+ backend: Literal[BackendType.XAI],
267
+ model: str | None = None,
268
+ stream: bool = False,
269
+ temperature: float | None | NotGiven = NOT_GIVEN,
270
+ context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
271
+ random_endpoint: bool = True,
272
+ endpoint_id: str = "",
273
+ http_client: httpx.Client | None = None,
274
+ **kwargs,
275
+ ) -> XAIChatClient: ...
276
+
277
+
278
+ @overload
279
+ def create_chat_client(
280
+ backend: BackendType,
281
+ model: str | None = None,
282
+ stream: bool = False,
283
+ temperature: float | None | NotGiven = NOT_GIVEN,
284
+ context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
285
+ random_endpoint: bool = True,
286
+ endpoint_id: str = "",
287
+ http_client: httpx.Client | None = None,
288
+ **kwargs,
289
+ ) -> BaseChatClient: ...
290
+
291
+
292
+ def create_chat_client(
293
+ backend: BackendType,
294
+ model: str | None = None,
295
+ stream: bool = False,
296
+ temperature: float | None | NotGiven = NOT_GIVEN,
297
+ context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
298
+ random_endpoint: bool = True,
299
+ endpoint_id: str = "",
300
+ http_client: httpx.Client | None = None,
301
+ **kwargs,
302
+ ) -> BaseChatClient:
303
+ if backend not in BackendMap["sync"]:
304
+ raise ValueError(f"Unsupported backend: {backend}")
305
+
306
+ ClientClass = BackendMap["sync"][backend]
307
+ if model is None:
308
+ model = ClientClass.DEFAULT_MODEL
309
+ return ClientClass(
310
+ model=model,
311
+ stream=stream,
312
+ temperature=temperature,
313
+ context_length_control=context_length_control,
314
+ random_endpoint=random_endpoint,
315
+ endpoint_id=endpoint_id,
316
+ http_client=http_client,
317
+ **kwargs,
318
+ )
319
+
320
+
321
+ @overload
322
+ def create_async_chat_client(
323
+ backend: Literal[BackendType.Anthropic],
324
+ model: str | None = None,
325
+ stream: bool = False,
326
+ temperature: float | None | NotGiven = NOT_GIVEN,
327
+ context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
328
+ random_endpoint: bool = True,
329
+ endpoint_id: str = "",
330
+ http_client: httpx.AsyncClient | None = None,
331
+ **kwargs,
332
+ ) -> AsyncAnthropicChatClient: ...
333
+
334
+
335
+ @overload
336
+ def create_async_chat_client(
337
+ backend: Literal[BackendType.DeepSeek],
338
+ model: str | None = None,
339
+ stream: bool = False,
340
+ temperature: float | None | NotGiven = NOT_GIVEN,
341
+ context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
342
+ random_endpoint: bool = True,
343
+ endpoint_id: str = "",
344
+ http_client: httpx.AsyncClient | None = None,
345
+ **kwargs,
346
+ ) -> AsyncDeepSeekChatClient: ...
347
+
348
+
349
+ @overload
350
+ def create_async_chat_client(
351
+ backend: Literal[BackendType.Gemini],
352
+ model: str | None = None,
353
+ stream: bool = False,
354
+ temperature: float | None | NotGiven = NOT_GIVEN,
355
+ context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
356
+ random_endpoint: bool = True,
357
+ endpoint_id: str = "",
358
+ http_client: httpx.AsyncClient | None = None,
359
+ **kwargs,
360
+ ) -> AsyncGeminiChatClient: ...
361
+
362
+
363
+ @overload
364
+ def create_async_chat_client(
365
+ backend: Literal[BackendType.Groq],
366
+ model: str | None = None,
367
+ stream: bool = False,
368
+ temperature: float | None | NotGiven = NOT_GIVEN,
369
+ context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
370
+ random_endpoint: bool = True,
371
+ endpoint_id: str = "",
372
+ http_client: httpx.AsyncClient | None = None,
373
+ **kwargs,
374
+ ) -> AsyncGroqChatClient: ...
375
+
376
+
377
+ @overload
378
+ def create_async_chat_client(
379
+ backend: Literal[BackendType.Local],
380
+ model: str | None = None,
381
+ stream: bool = False,
382
+ temperature: float | None | NotGiven = NOT_GIVEN,
383
+ context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
384
+ random_endpoint: bool = True,
385
+ endpoint_id: str = "",
386
+ http_client: httpx.AsyncClient | None = None,
387
+ **kwargs,
388
+ ) -> AsyncLocalChatClient: ...
389
+
390
+
391
+ @overload
392
+ def create_async_chat_client(
393
+ backend: Literal[BackendType.MiniMax],
394
+ model: str | None = None,
395
+ stream: bool = False,
396
+ temperature: float | None | NotGiven = NOT_GIVEN,
397
+ context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
398
+ random_endpoint: bool = True,
399
+ endpoint_id: str = "",
400
+ http_client: httpx.AsyncClient | None = None,
401
+ **kwargs,
402
+ ) -> AsyncMiniMaxChatClient: ...
403
+
404
+
405
+ @overload
406
+ def create_async_chat_client(
407
+ backend: Literal[BackendType.Mistral],
408
+ model: str | None = None,
409
+ stream: bool = False,
410
+ temperature: float | None | NotGiven = NOT_GIVEN,
411
+ context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
412
+ random_endpoint: bool = True,
413
+ endpoint_id: str = "",
414
+ http_client: httpx.AsyncClient | None = None,
415
+ **kwargs,
416
+ ) -> AsyncMistralChatClient: ...
417
+
418
+
419
+ @overload
420
+ def create_async_chat_client(
421
+ backend: Literal[BackendType.Moonshot],
422
+ model: str | None = None,
423
+ stream: bool = False,
424
+ temperature: float | None | NotGiven = NOT_GIVEN,
425
+ context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
426
+ random_endpoint: bool = True,
427
+ endpoint_id: str = "",
428
+ http_client: httpx.AsyncClient | None = None,
429
+ **kwargs,
430
+ ) -> AsyncMoonshotChatClient: ...
431
+
432
+
433
+ @overload
434
+ def create_async_chat_client(
435
+ backend: Literal[BackendType.OpenAI],
436
+ model: str | None = None,
437
+ stream: bool = False,
438
+ temperature: float | None | NotGiven = NOT_GIVEN,
439
+ context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
440
+ random_endpoint: bool = True,
441
+ endpoint_id: str = "",
442
+ http_client: httpx.AsyncClient | None = None,
443
+ **kwargs,
444
+ ) -> AsyncOpenAIChatClient: ...
445
+
446
+
447
+ @overload
448
+ def create_async_chat_client(
449
+ backend: Literal[BackendType.Qwen],
450
+ model: str | None = None,
451
+ stream: bool = False,
452
+ temperature: float | None | NotGiven = NOT_GIVEN,
453
+ context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
454
+ random_endpoint: bool = True,
455
+ endpoint_id: str = "",
456
+ http_client: httpx.AsyncClient | None = None,
457
+ **kwargs,
458
+ ) -> AsyncQwenChatClient: ...
459
+
460
+
461
+ @overload
462
+ def create_async_chat_client(
463
+ backend: Literal[BackendType.Yi],
464
+ model: str | None = None,
465
+ stream: bool = False,
466
+ temperature: float | None | NotGiven = NOT_GIVEN,
467
+ context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
468
+ random_endpoint: bool = True,
469
+ endpoint_id: str = "",
470
+ http_client: httpx.AsyncClient | None = None,
471
+ **kwargs,
472
+ ) -> AsyncYiChatClient: ...
473
+
474
+
475
+ @overload
476
+ def create_async_chat_client(
477
+ backend: Literal[BackendType.ZhiPuAI],
478
+ model: str | None = None,
479
+ stream: bool = False,
480
+ temperature: float | None | NotGiven = NOT_GIVEN,
481
+ context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
482
+ random_endpoint: bool = True,
483
+ endpoint_id: str = "",
484
+ http_client: httpx.AsyncClient | None = None,
485
+ **kwargs,
486
+ ) -> AsyncZhiPuAIChatClient: ...
487
+
488
+
489
+ @overload
490
+ def create_async_chat_client(
491
+ backend: Literal[BackendType.Baichuan],
492
+ model: str | None = None,
493
+ stream: bool = False,
494
+ temperature: float | None | NotGiven = NOT_GIVEN,
495
+ context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
496
+ random_endpoint: bool = True,
497
+ endpoint_id: str = "",
498
+ http_client: httpx.AsyncClient | None = None,
499
+ **kwargs,
500
+ ) -> AsyncBaichuanChatClient: ...
501
+
502
+
503
+ @overload
504
+ def create_async_chat_client(
505
+ backend: Literal[BackendType.StepFun],
506
+ model: str | None = None,
507
+ stream: bool = False,
508
+ temperature: float | None | NotGiven = NOT_GIVEN,
509
+ context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
510
+ random_endpoint: bool = True,
511
+ endpoint_id: str = "",
512
+ http_client: httpx.AsyncClient | None = None,
513
+ **kwargs,
514
+ ) -> AsyncStepFunChatClient: ...
515
+
516
+
517
+ @overload
518
+ def create_async_chat_client(
519
+ backend: Literal[BackendType.XAI],
520
+ model: str | None = None,
521
+ stream: bool = False,
522
+ temperature: float | None | NotGiven = NOT_GIVEN,
523
+ context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
524
+ random_endpoint: bool = True,
525
+ endpoint_id: str = "",
526
+ http_client: httpx.AsyncClient | None = None,
527
+ **kwargs,
528
+ ) -> AsyncXAIChatClient: ...
529
+
530
+
531
+ @overload
532
+ def create_async_chat_client(
533
+ backend: BackendType,
534
+ model: str | None = None,
535
+ stream: bool = False,
536
+ temperature: float | None | NotGiven = NOT_GIVEN,
537
+ context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
538
+ random_endpoint: bool = True,
539
+ endpoint_id: str = "",
540
+ http_client: httpx.AsyncClient | None = None,
541
+ **kwargs,
542
+ ) -> BaseAsyncChatClient: ...
543
+
544
+
545
+ def create_async_chat_client(
546
+ backend: BackendType,
547
+ model: str | None = None,
548
+ stream: bool = False,
549
+ temperature: float | None | NotGiven = NOT_GIVEN,
550
+ context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
551
+ random_endpoint: bool = True,
552
+ endpoint_id: str = "",
553
+ http_client: httpx.AsyncClient | None = None,
554
+ **kwargs,
555
+ ) -> BaseAsyncChatClient:
556
+ if backend not in BackendMap["async"]:
557
+ raise ValueError(f"Unsupported backend: {backend}")
558
+
559
+ ClientClass = BackendMap["async"][backend]
560
+ if model is None:
561
+ model = ClientClass.DEFAULT_MODEL
562
+ return ClientClass(
563
+ model=model,
564
+ stream=stream,
565
+ temperature=temperature,
566
+ context_length_control=context_length_control,
567
+ random_endpoint=random_endpoint,
568
+ endpoint_id=endpoint_id,
569
+ http_client=http_client,
570
+ **kwargs,
571
+ )
572
+
573
+
574
+ __all__ = [
575
+ "BackendType",
576
+ "format_messages",
577
+ "get_token_counts",
578
+ "create_chat_client",
579
+ "create_async_chat_client",
580
+ "get_message_token_counts",
581
+ "ToolCallContentProcessor",
582
+ ]