vectorvein 0.1.66__py3-none-any.whl → 0.1.68__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vectorvein/chat_clients/anthropic_client.py +14 -6
- vectorvein/chat_clients/base_client.py +18 -6
- vectorvein/chat_clients/gemini_client.py +14 -6
- vectorvein/chat_clients/openai_compatible_client.py +17 -10
- vectorvein/chat_clients/utils.py +2 -2
- vectorvein/types/defaults.py +18 -2
- {vectorvein-0.1.66.dist-info → vectorvein-0.1.68.dist-info}/METADATA +1 -1
- {vectorvein-0.1.66.dist-info → vectorvein-0.1.68.dist-info}/RECORD +10 -10
- {vectorvein-0.1.66.dist-info → vectorvein-0.1.68.dist-info}/WHEEL +0 -0
- {vectorvein-0.1.66.dist-info → vectorvein-0.1.68.dist-info}/entry_points.txt +0 -0
@@ -283,6 +283,7 @@ class AnthropicChatClient(BaseChatClient):
|
|
283
283
|
@overload
|
284
284
|
def create_completion(
|
285
285
|
self,
|
286
|
+
*,
|
286
287
|
messages: list,
|
287
288
|
model: str | None = None,
|
288
289
|
stream: Literal[False] = False,
|
@@ -301,9 +302,10 @@ class AnthropicChatClient(BaseChatClient):
|
|
301
302
|
@overload
|
302
303
|
def create_completion(
|
303
304
|
self,
|
305
|
+
*,
|
304
306
|
messages: list,
|
305
307
|
model: str | None = None,
|
306
|
-
stream: Literal[True]
|
308
|
+
stream: Literal[True],
|
307
309
|
temperature: float | None = None,
|
308
310
|
max_tokens: int | None = None,
|
309
311
|
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
|
@@ -319,9 +321,10 @@ class AnthropicChatClient(BaseChatClient):
|
|
319
321
|
@overload
|
320
322
|
def create_completion(
|
321
323
|
self,
|
324
|
+
*,
|
322
325
|
messages: list,
|
323
326
|
model: str | None = None,
|
324
|
-
stream: bool
|
327
|
+
stream: bool,
|
325
328
|
temperature: float | None = None,
|
326
329
|
max_tokens: int | None = None,
|
327
330
|
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
|
@@ -336,9 +339,10 @@ class AnthropicChatClient(BaseChatClient):
|
|
336
339
|
|
337
340
|
def create_completion(
|
338
341
|
self,
|
342
|
+
*,
|
339
343
|
messages: list,
|
340
344
|
model: str | None = None,
|
341
|
-
stream:
|
345
|
+
stream: Literal[False] | Literal[True] = False,
|
342
346
|
temperature: float | None = None,
|
343
347
|
max_tokens: int | None = None,
|
344
348
|
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
|
@@ -690,6 +694,7 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
|
|
690
694
|
@overload
|
691
695
|
async def create_completion(
|
692
696
|
self,
|
697
|
+
*,
|
693
698
|
messages: list,
|
694
699
|
model: str | None = None,
|
695
700
|
stream: Literal[False] = False,
|
@@ -708,9 +713,10 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
|
|
708
713
|
@overload
|
709
714
|
async def create_completion(
|
710
715
|
self,
|
716
|
+
*,
|
711
717
|
messages: list,
|
712
718
|
model: str | None = None,
|
713
|
-
stream: Literal[True]
|
719
|
+
stream: Literal[True],
|
714
720
|
temperature: float | None = None,
|
715
721
|
max_tokens: int | None = None,
|
716
722
|
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
|
@@ -726,9 +732,10 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
|
|
726
732
|
@overload
|
727
733
|
async def create_completion(
|
728
734
|
self,
|
735
|
+
*,
|
729
736
|
messages: list,
|
730
737
|
model: str | None = None,
|
731
|
-
stream: bool
|
738
|
+
stream: bool,
|
732
739
|
temperature: float | None = None,
|
733
740
|
max_tokens: int | None = None,
|
734
741
|
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
|
@@ -743,9 +750,10 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
|
|
743
750
|
|
744
751
|
async def create_completion(
|
745
752
|
self,
|
753
|
+
*,
|
746
754
|
messages: list,
|
747
755
|
model: str | None = None,
|
748
|
-
stream:
|
756
|
+
stream: Literal[False] | Literal[True] = False,
|
749
757
|
temperature: float | None = None,
|
750
758
|
max_tokens: int | None = None,
|
751
759
|
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
|
@@ -73,6 +73,7 @@ class BaseChatClient(ABC):
|
|
73
73
|
@abstractmethod
|
74
74
|
def create_completion(
|
75
75
|
self,
|
76
|
+
*,
|
76
77
|
messages: list,
|
77
78
|
model: str | None = None,
|
78
79
|
stream: Literal[False] = False,
|
@@ -92,9 +93,10 @@ class BaseChatClient(ABC):
|
|
92
93
|
@abstractmethod
|
93
94
|
def create_completion(
|
94
95
|
self,
|
96
|
+
*,
|
95
97
|
messages: list,
|
96
98
|
model: str | None = None,
|
97
|
-
stream: Literal[True]
|
99
|
+
stream: Literal[True],
|
98
100
|
temperature: float = 0.7,
|
99
101
|
max_tokens: int | None = None,
|
100
102
|
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
|
@@ -111,9 +113,10 @@ class BaseChatClient(ABC):
|
|
111
113
|
@abstractmethod
|
112
114
|
def create_completion(
|
113
115
|
self,
|
116
|
+
*,
|
114
117
|
messages: list,
|
115
118
|
model: str | None = None,
|
116
|
-
stream: bool
|
119
|
+
stream: bool,
|
117
120
|
temperature: float = 0.7,
|
118
121
|
max_tokens: int | None = None,
|
119
122
|
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
|
@@ -129,9 +132,10 @@ class BaseChatClient(ABC):
|
|
129
132
|
@abstractmethod
|
130
133
|
def create_completion(
|
131
134
|
self,
|
135
|
+
*,
|
132
136
|
messages: list,
|
133
137
|
model: str | None = None,
|
134
|
-
stream:
|
138
|
+
stream: Literal[False] | Literal[True] = False,
|
135
139
|
temperature: float = 0.7,
|
136
140
|
max_tokens: int | None = None,
|
137
141
|
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
|
@@ -146,6 +150,7 @@ class BaseChatClient(ABC):
|
|
146
150
|
|
147
151
|
def create_stream(
|
148
152
|
self,
|
153
|
+
*,
|
149
154
|
messages: list,
|
150
155
|
model: str | None = None,
|
151
156
|
temperature: float = 0.7,
|
@@ -167,6 +172,7 @@ class BaseChatClient(ABC):
|
|
167
172
|
tool_choice=tool_choice,
|
168
173
|
response_format=response_format,
|
169
174
|
stream_options=stream_options,
|
175
|
+
top_p=top_p,
|
170
176
|
**kwargs,
|
171
177
|
)
|
172
178
|
|
@@ -223,6 +229,7 @@ class BaseAsyncChatClient(ABC):
|
|
223
229
|
@abstractmethod
|
224
230
|
async def create_completion(
|
225
231
|
self,
|
232
|
+
*,
|
226
233
|
messages: list,
|
227
234
|
model: str | None = None,
|
228
235
|
stream: Literal[False] = False,
|
@@ -242,9 +249,10 @@ class BaseAsyncChatClient(ABC):
|
|
242
249
|
@abstractmethod
|
243
250
|
async def create_completion(
|
244
251
|
self,
|
252
|
+
*,
|
245
253
|
messages: list,
|
246
254
|
model: str | None = None,
|
247
|
-
stream: Literal[True]
|
255
|
+
stream: Literal[True],
|
248
256
|
temperature: float = 0.7,
|
249
257
|
max_tokens: int | None = None,
|
250
258
|
tools: list | NotGiven = NOT_GIVEN,
|
@@ -261,9 +269,10 @@ class BaseAsyncChatClient(ABC):
|
|
261
269
|
@abstractmethod
|
262
270
|
async def create_completion(
|
263
271
|
self,
|
272
|
+
*,
|
264
273
|
messages: list,
|
265
274
|
model: str | None = None,
|
266
|
-
stream: bool
|
275
|
+
stream: bool,
|
267
276
|
temperature: float = 0.7,
|
268
277
|
max_tokens: int | None = None,
|
269
278
|
tools: list | NotGiven = NOT_GIVEN,
|
@@ -279,9 +288,10 @@ class BaseAsyncChatClient(ABC):
|
|
279
288
|
@abstractmethod
|
280
289
|
async def create_completion(
|
281
290
|
self,
|
291
|
+
*,
|
282
292
|
messages: list,
|
283
293
|
model: str | None = None,
|
284
|
-
stream:
|
294
|
+
stream: Literal[False] | Literal[True] = False,
|
285
295
|
temperature: float = 0.7,
|
286
296
|
max_tokens: int | None = None,
|
287
297
|
tools: list | NotGiven = NOT_GIVEN,
|
@@ -296,6 +306,7 @@ class BaseAsyncChatClient(ABC):
|
|
296
306
|
|
297
307
|
async def create_stream(
|
298
308
|
self,
|
309
|
+
*,
|
299
310
|
messages: list,
|
300
311
|
model: str | None = None,
|
301
312
|
temperature: float = 0.7,
|
@@ -317,5 +328,6 @@ class BaseAsyncChatClient(ABC):
|
|
317
328
|
tool_choice=tool_choice,
|
318
329
|
response_format=response_format,
|
319
330
|
stream_options=stream_options,
|
331
|
+
top_p=top_p,
|
320
332
|
**kwargs,
|
321
333
|
)
|
@@ -56,6 +56,7 @@ class GeminiChatClient(BaseChatClient):
|
|
56
56
|
@overload
|
57
57
|
def create_completion(
|
58
58
|
self,
|
59
|
+
*,
|
59
60
|
messages: list,
|
60
61
|
model: str | None = None,
|
61
62
|
stream: Literal[False] = False,
|
@@ -74,9 +75,10 @@ class GeminiChatClient(BaseChatClient):
|
|
74
75
|
@overload
|
75
76
|
def create_completion(
|
76
77
|
self,
|
78
|
+
*,
|
77
79
|
messages: list,
|
78
80
|
model: str | None = None,
|
79
|
-
stream: Literal[True]
|
81
|
+
stream: Literal[True],
|
80
82
|
temperature: float | None = None,
|
81
83
|
max_tokens: int | None = None,
|
82
84
|
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
|
@@ -92,9 +94,10 @@ class GeminiChatClient(BaseChatClient):
|
|
92
94
|
@overload
|
93
95
|
def create_completion(
|
94
96
|
self,
|
97
|
+
*,
|
95
98
|
messages: list,
|
96
99
|
model: str | None = None,
|
97
|
-
stream: bool
|
100
|
+
stream: bool,
|
98
101
|
temperature: float | None = None,
|
99
102
|
max_tokens: int | None = None,
|
100
103
|
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
|
@@ -109,9 +112,10 @@ class GeminiChatClient(BaseChatClient):
|
|
109
112
|
|
110
113
|
def create_completion(
|
111
114
|
self,
|
115
|
+
*,
|
112
116
|
messages: list,
|
113
117
|
model: str | None = None,
|
114
|
-
stream:
|
118
|
+
stream: Literal[False] | Literal[True] = False,
|
115
119
|
temperature: float | None = None,
|
116
120
|
max_tokens: int | None = None,
|
117
121
|
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
|
@@ -307,6 +311,7 @@ class AsyncGeminiChatClient(BaseAsyncChatClient):
|
|
307
311
|
@overload
|
308
312
|
async def create_completion(
|
309
313
|
self,
|
314
|
+
*,
|
310
315
|
messages: list,
|
311
316
|
model: str | None = None,
|
312
317
|
stream: Literal[False] = False,
|
@@ -325,9 +330,10 @@ class AsyncGeminiChatClient(BaseAsyncChatClient):
|
|
325
330
|
@overload
|
326
331
|
async def create_completion(
|
327
332
|
self,
|
333
|
+
*,
|
328
334
|
messages: list,
|
329
335
|
model: str | None = None,
|
330
|
-
stream: Literal[True]
|
336
|
+
stream: Literal[True],
|
331
337
|
temperature: float | None = None,
|
332
338
|
max_tokens: int | None = None,
|
333
339
|
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
|
@@ -343,9 +349,10 @@ class AsyncGeminiChatClient(BaseAsyncChatClient):
|
|
343
349
|
@overload
|
344
350
|
async def create_completion(
|
345
351
|
self,
|
352
|
+
*,
|
346
353
|
messages: list,
|
347
354
|
model: str | None = None,
|
348
|
-
stream: bool
|
355
|
+
stream: bool,
|
349
356
|
temperature: float | None = None,
|
350
357
|
max_tokens: int | None = None,
|
351
358
|
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
|
@@ -360,9 +367,10 @@ class AsyncGeminiChatClient(BaseAsyncChatClient):
|
|
360
367
|
|
361
368
|
async def create_completion(
|
362
369
|
self,
|
370
|
+
*,
|
363
371
|
messages: list,
|
364
372
|
model: str | None = None,
|
365
|
-
stream:
|
373
|
+
stream: Literal[False] | Literal[True] = False,
|
366
374
|
temperature: float | None = None,
|
367
375
|
max_tokens: int | None = None,
|
368
376
|
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
|
@@ -91,6 +91,7 @@ class OpenAICompatibleChatClient(BaseChatClient):
|
|
91
91
|
@overload
|
92
92
|
def create_completion(
|
93
93
|
self,
|
94
|
+
*,
|
94
95
|
messages: list,
|
95
96
|
model: str | None = None,
|
96
97
|
stream: Literal[False] = False,
|
@@ -109,9 +110,10 @@ class OpenAICompatibleChatClient(BaseChatClient):
|
|
109
110
|
@overload
|
110
111
|
def create_completion(
|
111
112
|
self,
|
113
|
+
*,
|
112
114
|
messages: list,
|
113
115
|
model: str | None = None,
|
114
|
-
stream: Literal[True]
|
116
|
+
stream: Literal[True],
|
115
117
|
temperature: float | None = None,
|
116
118
|
max_tokens: int | None = None,
|
117
119
|
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
|
@@ -127,9 +129,10 @@ class OpenAICompatibleChatClient(BaseChatClient):
|
|
127
129
|
@overload
|
128
130
|
def create_completion(
|
129
131
|
self,
|
132
|
+
*,
|
130
133
|
messages: list,
|
131
134
|
model: str | None = None,
|
132
|
-
stream: bool
|
135
|
+
stream: bool,
|
133
136
|
temperature: float | None = None,
|
134
137
|
max_tokens: int | None = None,
|
135
138
|
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
|
@@ -144,9 +147,10 @@ class OpenAICompatibleChatClient(BaseChatClient):
|
|
144
147
|
|
145
148
|
def create_completion(
|
146
149
|
self,
|
150
|
+
*,
|
147
151
|
messages: list,
|
148
152
|
model: str | None = None,
|
149
|
-
stream:
|
153
|
+
stream: Literal[False] | Literal[True] = False,
|
150
154
|
temperature: float | None = None,
|
151
155
|
max_tokens: int | None = None,
|
152
156
|
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
|
@@ -218,7 +222,7 @@ class OpenAICompatibleChatClient(BaseChatClient):
|
|
218
222
|
messages=messages,
|
219
223
|
stream=True,
|
220
224
|
temperature=self.temperature,
|
221
|
-
|
225
|
+
max_completion_tokens=max_tokens,
|
222
226
|
top_p=top_p,
|
223
227
|
**_stream_options_params,
|
224
228
|
**self.response_format,
|
@@ -273,7 +277,7 @@ class OpenAICompatibleChatClient(BaseChatClient):
|
|
273
277
|
messages=messages,
|
274
278
|
stream=False,
|
275
279
|
temperature=self.temperature,
|
276
|
-
|
280
|
+
max_completion_tokens=max_tokens,
|
277
281
|
top_p=top_p,
|
278
282
|
**self.response_format,
|
279
283
|
**tools_params,
|
@@ -359,6 +363,7 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
|
|
359
363
|
@overload
|
360
364
|
async def create_completion(
|
361
365
|
self,
|
366
|
+
*,
|
362
367
|
messages: list,
|
363
368
|
model: str | None = None,
|
364
369
|
stream: Literal[False] = False,
|
@@ -377,9 +382,10 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
|
|
377
382
|
@overload
|
378
383
|
async def create_completion(
|
379
384
|
self,
|
385
|
+
*,
|
380
386
|
messages: list,
|
381
387
|
model: str | None = None,
|
382
|
-
stream: Literal[True]
|
388
|
+
stream: Literal[True],
|
383
389
|
temperature: float | None = None,
|
384
390
|
max_tokens: int | None = None,
|
385
391
|
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
|
@@ -395,9 +401,10 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
|
|
395
401
|
@overload
|
396
402
|
async def create_completion(
|
397
403
|
self,
|
404
|
+
*,
|
398
405
|
messages: list,
|
399
406
|
model: str | None = None,
|
400
|
-
stream: bool
|
407
|
+
stream: bool,
|
401
408
|
temperature: float | None = None,
|
402
409
|
max_tokens: int | None = None,
|
403
410
|
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
|
@@ -414,7 +421,7 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
|
|
414
421
|
self,
|
415
422
|
messages: list,
|
416
423
|
model: str | None = None,
|
417
|
-
stream:
|
424
|
+
stream: Literal[False] | Literal[True] = False,
|
418
425
|
temperature: float | None = None,
|
419
426
|
max_tokens: int | None = None,
|
420
427
|
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
|
@@ -486,7 +493,7 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
|
|
486
493
|
messages=messages,
|
487
494
|
stream=self.stream,
|
488
495
|
temperature=self.temperature,
|
489
|
-
|
496
|
+
max_completion_tokens=max_tokens,
|
490
497
|
top_p=top_p,
|
491
498
|
**_stream_options_params,
|
492
499
|
**self.response_format,
|
@@ -542,7 +549,7 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
|
|
542
549
|
messages=messages,
|
543
550
|
stream=self.stream,
|
544
551
|
temperature=self.temperature,
|
545
|
-
|
552
|
+
max_completion_tokens=max_tokens,
|
546
553
|
top_p=top_p,
|
547
554
|
**self.response_format,
|
548
555
|
**tools_params,
|
vectorvein/chat_clients/utils.py
CHANGED
@@ -201,8 +201,8 @@ def get_token_counts(text: str | dict, model: str = "", use_token_server_first:
|
|
201
201
|
result = response.json()
|
202
202
|
return result["data"]["total_tokens"]
|
203
203
|
elif model.startswith("gemini"):
|
204
|
-
# TODO: gemini-exp-
|
205
|
-
if model
|
204
|
+
# TODO: gemini-exp-1206 暂时不支持,使用 gemini-1.5-flash 代替
|
205
|
+
if model in ("gemini-exp-1206", "gemini-2.0-flash-exp", "gemini-2.0-flash-thinking-exp-1219"):
|
206
206
|
model = "gemini-1.5-flash"
|
207
207
|
|
208
208
|
model_setting = settings.gemini.models[model]
|
vectorvein/types/defaults.py
CHANGED
@@ -659,8 +659,24 @@ GEMINI_MODELS: Final[Dict[str, Dict[str, Any]]] = {
|
|
659
659
|
"response_format_available": True,
|
660
660
|
"native_multimodal": True,
|
661
661
|
},
|
662
|
-
"gemini-exp
|
663
|
-
"id": "gemini-exp
|
662
|
+
"gemini-2.0-flash-exp": {
|
663
|
+
"id": "gemini-2.0-flash-exp",
|
664
|
+
"context_length": 1048576,
|
665
|
+
"max_output_tokens": 8192,
|
666
|
+
"function_call_available": True,
|
667
|
+
"response_format_available": True,
|
668
|
+
"native_multimodal": True,
|
669
|
+
},
|
670
|
+
"gemini-2.0-flash-thinking-exp-1219": {
|
671
|
+
"id": "gemini-2.0-flash-thinking-exp-1219",
|
672
|
+
"context_length": 1048576,
|
673
|
+
"max_output_tokens": 8192,
|
674
|
+
"function_call_available": True,
|
675
|
+
"response_format_available": True,
|
676
|
+
"native_multimodal": True,
|
677
|
+
},
|
678
|
+
"gemini-exp-1206": {
|
679
|
+
"id": "gemini-exp-1206",
|
664
680
|
"context_length": 32767,
|
665
681
|
"function_call_available": True,
|
666
682
|
"response_format_available": True,
|
@@ -1,24 +1,24 @@
|
|
1
|
-
vectorvein-0.1.
|
2
|
-
vectorvein-0.1.
|
3
|
-
vectorvein-0.1.
|
1
|
+
vectorvein-0.1.68.dist-info/METADATA,sha256=YshyXDUndjpayVQce8GU9I0oa1oUnj5_56BtQmACbzs,641
|
2
|
+
vectorvein-0.1.68.dist-info/WHEEL,sha256=thaaA2w1JzcGC48WYufAs8nrYZjJm8LqNfnXFOFyCC4,90
|
3
|
+
vectorvein-0.1.68.dist-info/entry_points.txt,sha256=6OYgBcLyFCUgeqLgnvMyOJxPCWzgy7se4rLPKtNonMs,34
|
4
4
|
vectorvein/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
5
5
|
vectorvein/chat_clients/__init__.py,sha256=Oev7Lv1DIEWCMD-2Pm7e2cwzX7JFQTnIK-j6o4iUuyQ,17725
|
6
|
-
vectorvein/chat_clients/anthropic_client.py,sha256=
|
6
|
+
vectorvein/chat_clients/anthropic_client.py,sha256=shRwgpbynExqX8l370_MTT7cGVhD4pE6VdPuf2AbI1E,40017
|
7
7
|
vectorvein/chat_clients/baichuan_client.py,sha256=CVMvpgjdrZGv0BWnTOBD-f2ufZ3wq3496wqukumsAr4,526
|
8
|
-
vectorvein/chat_clients/base_client.py,sha256=
|
8
|
+
vectorvein/chat_clients/base_client.py,sha256=Hl6xNnUnM_oS1uIg1sgyxb95aD6-yFxG51nTsFz8Whs,10671
|
9
9
|
vectorvein/chat_clients/deepseek_client.py,sha256=3qWu01NlJAP2N-Ff62d5-CZXZitlizE1fzb20LNetig,526
|
10
|
-
vectorvein/chat_clients/gemini_client.py,sha256=
|
10
|
+
vectorvein/chat_clients/gemini_client.py,sha256=ROVwflFyczkOxbJieqQHPTJk6XogZDKl1TFAvwZVyMU,21192
|
11
11
|
vectorvein/chat_clients/groq_client.py,sha256=Uow4pgdmFi93ZQSoOol2-0PhhqkW-S0XuSldvppz5U4,498
|
12
12
|
vectorvein/chat_clients/local_client.py,sha256=55nOsxzqUf79q3Y14MKROA71zxhsT7p7FsDZ89rts2M,422
|
13
13
|
vectorvein/chat_clients/minimax_client.py,sha256=uNYz3ccJOhTAgzkDNvWfM4_LaBaOpHjrne1YNieba28,20122
|
14
14
|
vectorvein/chat_clients/mistral_client.py,sha256=1aKSylzBDaLYcFnaBIL4-sXSzWmXfBeON9Q0rq-ziWw,534
|
15
15
|
vectorvein/chat_clients/moonshot_client.py,sha256=gbu-6nGxx8uM_U2WlI4Wus881rFRotzHtMSoYOcruGU,526
|
16
16
|
vectorvein/chat_clients/openai_client.py,sha256=Nz6tV45pWcsOupxjnsRsGTicbQNJWIZyxuJoJ5DGMpg,527
|
17
|
-
vectorvein/chat_clients/openai_compatible_client.py,sha256=-
|
17
|
+
vectorvein/chat_clients/openai_compatible_client.py,sha256=-JTJqOdHjlWumCmRRr-GbFWu2TByEYKigTIVXveOS-8,23450
|
18
18
|
vectorvein/chat_clients/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
19
19
|
vectorvein/chat_clients/qwen_client.py,sha256=-ryh-m9PgsO0fc4ulcCmPTy1155J8YUy15uPoJQOHA0,513
|
20
20
|
vectorvein/chat_clients/stepfun_client.py,sha256=zsD2W5ahmR4DD9cqQTXmJr3txrGuvxbRWhFlRdwNijI,519
|
21
|
-
vectorvein/chat_clients/utils.py,sha256=
|
21
|
+
vectorvein/chat_clients/utils.py,sha256=cB14q0Cyc8idBRII4UcgUKwnpZL2Vwim4Q2YO1gvc3w,26892
|
22
22
|
vectorvein/chat_clients/xai_client.py,sha256=eLFJJrNRJ-ni3DpshODcr3S1EJQLbhVwxyO1E54LaqM,491
|
23
23
|
vectorvein/chat_clients/yi_client.py,sha256=RNf4CRuPJfixrwLZ3-DEc3t25QDe1mvZeb9sku2f8Bc,484
|
24
24
|
vectorvein/chat_clients/zhipuai_client.py,sha256=Ys5DSeLCuedaDXr3PfG1EW2zKXopt-awO2IylWSwY0s,519
|
@@ -26,11 +26,11 @@ vectorvein/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
26
26
|
vectorvein/server/token_server.py,sha256=36F9PKSNOX8ZtYBXY_l-76GQTpUSmQ2Y8EMy1H7wtdQ,1353
|
27
27
|
vectorvein/settings/__init__.py,sha256=g01y74x0k2JEAqNpRGG0PDs0NTULjOAZV6HRhydPX1c,3874
|
28
28
|
vectorvein/settings/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
29
|
-
vectorvein/types/defaults.py,sha256=
|
29
|
+
vectorvein/types/defaults.py,sha256=4PdJ_MmVEdGlCtpSySM2thFHPVoX6pDCDbDj9uleOuY,25051
|
30
30
|
vectorvein/types/enums.py,sha256=7KTJSVtQueImmbr1fSwv3rQVtc0RyMWXJmoE2tDOaso,1667
|
31
31
|
vectorvein/types/exception.py,sha256=gnW4GnJ76jND6UGnodk9xmqkcbeS7Cz2rvncA2HpD5E,69
|
32
32
|
vectorvein/types/llm_parameters.py,sha256=g2Q0RKMC2vOcMI0tFpZ53xfVSfC8MLoK0YntPqce49U,5360
|
33
33
|
vectorvein/types/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
34
34
|
vectorvein/utilities/media_processing.py,sha256=CTRq-lGlFkFgP_FSRhNwF_qUgmOrXPf2_1Ok9HY42_g,5887
|
35
35
|
vectorvein/utilities/retry.py,sha256=6KFS9R2HdhqM3_9jkjD4F36ZSpEx2YNFGOVlpOsUetM,2208
|
36
|
-
vectorvein-0.1.
|
36
|
+
vectorvein-0.1.68.dist-info/RECORD,,
|
File without changes
|
File without changes
|