vectorvein 0.1.36__py3-none-any.whl → 0.1.38__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vectorvein/chat_clients/anthropic_client.py +88 -42
- vectorvein/chat_clients/openai_compatible_client.py +34 -15
- vectorvein/types/llm_parameters.py +9 -2
- {vectorvein-0.1.36.dist-info → vectorvein-0.1.38.dist-info}/METADATA +1 -1
- {vectorvein-0.1.36.dist-info → vectorvein-0.1.38.dist-info}/RECORD +7 -7
- {vectorvein-0.1.36.dist-info → vectorvein-0.1.38.dist-info}/WHEEL +0 -0
- {vectorvein-0.1.36.dist-info → vectorvein-0.1.38.dist-info}/entry_points.txt +0 -0
@@ -135,12 +135,19 @@ class AnthropicChatClient(BaseChatClient):
|
|
135
135
|
http_client,
|
136
136
|
backend_name,
|
137
137
|
)
|
138
|
+
self.model_id = None
|
138
139
|
|
139
140
|
@cached_property
|
140
141
|
def raw_client(self):
|
141
142
|
if self.random_endpoint:
|
142
143
|
self.random_endpoint = True
|
143
|
-
|
144
|
+
endpoint = random.choice(self.backend_settings.models[self.model].endpoints)
|
145
|
+
self.model_id = None
|
146
|
+
if isinstance(endpoint, dict):
|
147
|
+
self.endpoint_id = endpoint["endpoint_id"]
|
148
|
+
self.model_id = endpoint["model_id"]
|
149
|
+
else:
|
150
|
+
self.endpoint_id = endpoint
|
144
151
|
self.endpoint = settings.get_endpoint(self.endpoint_id)
|
145
152
|
|
146
153
|
if self.endpoint.is_vertex:
|
@@ -265,23 +272,35 @@ class AnthropicChatClient(BaseChatClient):
|
|
265
272
|
|
266
273
|
if self.random_endpoint:
|
267
274
|
self.random_endpoint = True
|
268
|
-
|
275
|
+
endpoint = random.choice(self.backend_settings.models[self.model].endpoints)
|
276
|
+
self.model_id = None
|
277
|
+
if isinstance(endpoint, dict):
|
278
|
+
self.endpoint_id = endpoint["endpoint_id"]
|
279
|
+
self.model_id = endpoint["model_id"]
|
280
|
+
else:
|
281
|
+
self.endpoint_id = endpoint
|
269
282
|
self.endpoint = settings.get_endpoint(self.endpoint_id)
|
270
283
|
|
271
284
|
if self.endpoint.api_schema_type == "openai":
|
272
285
|
if self.stream:
|
273
|
-
|
274
|
-
|
275
|
-
|
276
|
-
|
277
|
-
|
278
|
-
|
279
|
-
|
280
|
-
|
281
|
-
|
282
|
-
|
283
|
-
|
284
|
-
|
286
|
+
|
287
|
+
def _generator():
|
288
|
+
response = OpenAICompatibleChatClient(
|
289
|
+
model=self.model,
|
290
|
+
stream=True,
|
291
|
+
temperature=self.temperature,
|
292
|
+
context_length_control=self.context_length_control,
|
293
|
+
random_endpoint=self.random_endpoint,
|
294
|
+
endpoint_id=self.endpoint_id,
|
295
|
+
http_client=self.http_client,
|
296
|
+
backend_name=self.BACKEND_NAME,
|
297
|
+
).create_completion(
|
298
|
+
messages, model, True, temperature, max_tokens, tools, tool_choice, response_format, **kwargs
|
299
|
+
)
|
300
|
+
for chunk in response:
|
301
|
+
yield chunk
|
302
|
+
|
303
|
+
return _generator()
|
285
304
|
else:
|
286
305
|
return OpenAICompatibleChatClient(
|
287
306
|
model=self.model,
|
@@ -298,7 +317,10 @@ class AnthropicChatClient(BaseChatClient):
|
|
298
317
|
|
299
318
|
assert isinstance(self.raw_client, Anthropic | AnthropicVertex)
|
300
319
|
|
320
|
+
raw_client = self.raw_client # 调用完 self.raw_client 后,self.model_id 会被赋值
|
301
321
|
self.model_setting = self.backend_settings.models[self.model]
|
322
|
+
if self.model_id is None:
|
323
|
+
self.model_id = self.model_setting.id
|
302
324
|
|
303
325
|
if messages[0].get("role") == "system":
|
304
326
|
system_prompt: str = messages[0]["content"]
|
@@ -311,7 +333,7 @@ class AnthropicChatClient(BaseChatClient):
|
|
311
333
|
messages,
|
312
334
|
max_count=self.model_setting.context_length,
|
313
335
|
backend=self.BACKEND_NAME,
|
314
|
-
model=self.
|
336
|
+
model=self.model,
|
315
337
|
)
|
316
338
|
|
317
339
|
messages = format_messages_alternate(messages)
|
@@ -323,7 +345,7 @@ class AnthropicChatClient(BaseChatClient):
|
|
323
345
|
|
324
346
|
if max_tokens is None:
|
325
347
|
max_output_tokens = self.model_setting.max_output_tokens
|
326
|
-
token_counts = get_message_token_counts(messages=messages, tools=tools_params, model=self.
|
348
|
+
token_counts = get_message_token_counts(messages=messages, tools=tools_params, model=self.model)
|
327
349
|
if max_output_tokens is not None:
|
328
350
|
max_tokens = self.model_setting.context_length - token_counts
|
329
351
|
max_tokens = min(max(max_tokens, 1), max_output_tokens)
|
@@ -331,8 +353,8 @@ class AnthropicChatClient(BaseChatClient):
|
|
331
353
|
max_tokens = self.model_setting.context_length - token_counts
|
332
354
|
|
333
355
|
if self.stream:
|
334
|
-
stream_response =
|
335
|
-
model=self.
|
356
|
+
stream_response = raw_client.messages.create(
|
357
|
+
model=self.model_id,
|
336
358
|
messages=messages,
|
337
359
|
system=system_prompt,
|
338
360
|
stream=True,
|
@@ -393,8 +415,8 @@ class AnthropicChatClient(BaseChatClient):
|
|
393
415
|
|
394
416
|
return generator()
|
395
417
|
else:
|
396
|
-
response =
|
397
|
-
model=self.
|
418
|
+
response = raw_client.messages.create(
|
419
|
+
model=self.model_id,
|
398
420
|
messages=messages,
|
399
421
|
system=system_prompt,
|
400
422
|
stream=False,
|
@@ -451,12 +473,19 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
|
|
451
473
|
http_client,
|
452
474
|
backend_name,
|
453
475
|
)
|
476
|
+
self.model_id = None
|
454
477
|
|
455
478
|
@cached_property
|
456
479
|
def raw_client(self):
|
457
480
|
if self.random_endpoint:
|
458
481
|
self.random_endpoint = True
|
459
|
-
|
482
|
+
endpoint = random.choice(self.backend_settings.models[self.model].endpoints)
|
483
|
+
self.model_id = None
|
484
|
+
if isinstance(endpoint, dict):
|
485
|
+
self.endpoint_id = endpoint["endpoint_id"]
|
486
|
+
self.model_id = endpoint["model_id"]
|
487
|
+
else:
|
488
|
+
self.endpoint_id = endpoint
|
460
489
|
self.endpoint = settings.get_endpoint(self.endpoint_id)
|
461
490
|
|
462
491
|
if self.endpoint.is_vertex:
|
@@ -581,25 +610,38 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
|
|
581
610
|
|
582
611
|
if self.random_endpoint:
|
583
612
|
self.random_endpoint = True
|
584
|
-
|
613
|
+
endpoint = random.choice(self.backend_settings.models[self.model].endpoints)
|
614
|
+
self.model_id = None
|
615
|
+
if isinstance(endpoint, dict):
|
616
|
+
self.endpoint_id = endpoint["endpoint_id"]
|
617
|
+
self.model_id = endpoint["model_id"]
|
618
|
+
else:
|
619
|
+
self.endpoint_id = endpoint
|
585
620
|
self.endpoint = settings.get_endpoint(self.endpoint_id)
|
586
621
|
|
587
622
|
if self.endpoint.api_schema_type == "openai":
|
588
623
|
if self.stream:
|
589
|
-
|
590
|
-
|
591
|
-
|
592
|
-
|
593
|
-
|
594
|
-
|
595
|
-
|
596
|
-
|
597
|
-
|
598
|
-
|
599
|
-
|
600
|
-
|
624
|
+
|
625
|
+
async def _generator():
|
626
|
+
client = AsyncOpenAICompatibleChatClient(
|
627
|
+
model=self.model,
|
628
|
+
stream=True,
|
629
|
+
temperature=self.temperature,
|
630
|
+
context_length_control=self.context_length_control,
|
631
|
+
random_endpoint=self.random_endpoint,
|
632
|
+
endpoint_id=self.endpoint_id,
|
633
|
+
http_client=self.http_client,
|
634
|
+
backend_name=self.BACKEND_NAME,
|
635
|
+
)
|
636
|
+
response = await client.create_completion(
|
637
|
+
messages, model, True, temperature, max_tokens, tools, tool_choice, response_format, **kwargs
|
638
|
+
)
|
639
|
+
async for chunk in response:
|
640
|
+
yield chunk
|
641
|
+
|
642
|
+
return _generator()
|
601
643
|
else:
|
602
|
-
|
644
|
+
client = AsyncOpenAICompatibleChatClient(
|
603
645
|
model=self.model,
|
604
646
|
stream=False,
|
605
647
|
temperature=self.temperature,
|
@@ -608,13 +650,17 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
|
|
608
650
|
endpoint_id=self.endpoint_id,
|
609
651
|
http_client=self.http_client,
|
610
652
|
backend_name=self.BACKEND_NAME,
|
611
|
-
)
|
653
|
+
)
|
654
|
+
return await client.create_completion(
|
612
655
|
messages, model, False, temperature, max_tokens, tools, tool_choice, response_format, **kwargs
|
613
656
|
)
|
614
657
|
|
615
658
|
assert isinstance(self.raw_client, AsyncAnthropic | AsyncAnthropicVertex)
|
616
659
|
|
660
|
+
raw_client = self.raw_client # 调用完 self.raw_client 后,self.model_id 会被赋值
|
617
661
|
self.model_setting = self.backend_settings.models[self.model]
|
662
|
+
if self.model_id is None:
|
663
|
+
self.model_id = self.model_setting.id
|
618
664
|
|
619
665
|
if messages[0].get("role") == "system":
|
620
666
|
system_prompt = messages[0]["content"]
|
@@ -627,7 +673,7 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
|
|
627
673
|
messages,
|
628
674
|
max_count=self.model_setting.context_length,
|
629
675
|
backend=self.BACKEND_NAME,
|
630
|
-
model=self.
|
676
|
+
model=self.model,
|
631
677
|
)
|
632
678
|
|
633
679
|
messages = format_messages_alternate(messages)
|
@@ -639,7 +685,7 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
|
|
639
685
|
|
640
686
|
if max_tokens is None:
|
641
687
|
max_output_tokens = self.model_setting.max_output_tokens
|
642
|
-
token_counts = get_message_token_counts(messages=messages, tools=tools_params, model=self.
|
688
|
+
token_counts = get_message_token_counts(messages=messages, tools=tools_params, model=self.model)
|
643
689
|
if max_output_tokens is not None:
|
644
690
|
max_tokens = self.model_setting.context_length - token_counts
|
645
691
|
max_tokens = min(max(max_tokens, 1), max_output_tokens)
|
@@ -647,8 +693,8 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
|
|
647
693
|
max_tokens = self.model_setting.context_length - token_counts
|
648
694
|
|
649
695
|
if self.stream:
|
650
|
-
stream_response = await
|
651
|
-
model=self.
|
696
|
+
stream_response = await raw_client.messages.create(
|
697
|
+
model=self.model_id,
|
652
698
|
messages=messages,
|
653
699
|
system=system_prompt,
|
654
700
|
stream=True,
|
@@ -709,8 +755,8 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
|
|
709
755
|
|
710
756
|
return generator()
|
711
757
|
else:
|
712
|
-
response = await
|
713
|
-
model=self.
|
758
|
+
response = await raw_client.messages.create(
|
759
|
+
model=self.model_id,
|
714
760
|
messages=messages,
|
715
761
|
system=system_prompt,
|
716
762
|
stream=False,
|
@@ -25,7 +25,6 @@ from ..types.llm_parameters import (
|
|
25
25
|
NOT_GIVEN,
|
26
26
|
ToolParam,
|
27
27
|
ToolChoice,
|
28
|
-
BackendSettings,
|
29
28
|
ChatCompletionMessage,
|
30
29
|
ChatCompletionDeltaMessage,
|
31
30
|
)
|
@@ -56,12 +55,19 @@ class OpenAICompatibleChatClient(BaseChatClient):
|
|
56
55
|
http_client,
|
57
56
|
backend_name,
|
58
57
|
)
|
58
|
+
self.model_id = None
|
59
59
|
|
60
60
|
@cached_property
|
61
61
|
def raw_client(self) -> OpenAI | AzureOpenAI:
|
62
62
|
if self.random_endpoint:
|
63
63
|
self.random_endpoint = True
|
64
|
-
|
64
|
+
endpoint = random.choice(self.backend_settings.models[self.model].endpoints)
|
65
|
+
self.model_id = None
|
66
|
+
if isinstance(endpoint, dict):
|
67
|
+
self.endpoint_id = endpoint["endpoint_id"]
|
68
|
+
self.model_id = endpoint["model_id"]
|
69
|
+
else:
|
70
|
+
self.endpoint_id = endpoint
|
65
71
|
self.endpoint = settings.get_endpoint(self.endpoint_id)
|
66
72
|
|
67
73
|
if self.endpoint.is_azure:
|
@@ -142,14 +148,17 @@ class OpenAICompatibleChatClient(BaseChatClient):
|
|
142
148
|
if temperature is not None:
|
143
149
|
self.temperature = temperature
|
144
150
|
|
151
|
+
raw_client = self.raw_client # 调用完 self.raw_client 后,self.model_id 会被赋值
|
145
152
|
self.model_setting = self.backend_settings.models[self.model]
|
153
|
+
if self.model_id is None:
|
154
|
+
self.model_id = self.model_setting.id
|
146
155
|
|
147
156
|
if self.context_length_control == ContextLengthControlType.Latest:
|
148
157
|
messages = cutoff_messages(
|
149
158
|
messages,
|
150
159
|
max_count=self.model_setting.context_length,
|
151
160
|
backend=self.BACKEND_NAME,
|
152
|
-
model=self.
|
161
|
+
model=self.model,
|
153
162
|
)
|
154
163
|
|
155
164
|
if tools:
|
@@ -168,7 +177,7 @@ class OpenAICompatibleChatClient(BaseChatClient):
|
|
168
177
|
|
169
178
|
if max_tokens is None:
|
170
179
|
max_output_tokens = self.model_setting.max_output_tokens
|
171
|
-
token_counts = get_message_token_counts(messages=messages, tools=tools, model=self.
|
180
|
+
token_counts = get_message_token_counts(messages=messages, tools=tools, model=self.model)
|
172
181
|
if max_output_tokens is not None:
|
173
182
|
max_tokens = self.model_setting.context_length - token_counts - 64
|
174
183
|
max_tokens = min(max(max_tokens, 1), max_output_tokens)
|
@@ -181,8 +190,8 @@ class OpenAICompatibleChatClient(BaseChatClient):
|
|
181
190
|
self.response_format = {}
|
182
191
|
|
183
192
|
if self.stream:
|
184
|
-
stream_response: Stream[ChatCompletionChunk] =
|
185
|
-
model=self.
|
193
|
+
stream_response: Stream[ChatCompletionChunk] = raw_client.chat.completions.create(
|
194
|
+
model=self.model_id,
|
186
195
|
messages=messages,
|
187
196
|
stream=True,
|
188
197
|
temperature=self.temperature,
|
@@ -222,8 +231,8 @@ class OpenAICompatibleChatClient(BaseChatClient):
|
|
222
231
|
|
223
232
|
return generator()
|
224
233
|
else:
|
225
|
-
response: ChatCompletion =
|
226
|
-
model=self.
|
234
|
+
response: ChatCompletion = raw_client.chat.completions.create(
|
235
|
+
model=self.model_id,
|
227
236
|
messages=messages,
|
228
237
|
stream=False,
|
229
238
|
temperature=self.temperature,
|
@@ -278,12 +287,19 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
|
|
278
287
|
http_client,
|
279
288
|
backend_name,
|
280
289
|
)
|
290
|
+
self.model_id = None
|
281
291
|
|
282
292
|
@cached_property
|
283
293
|
def raw_client(self):
|
284
294
|
if self.random_endpoint:
|
285
295
|
self.random_endpoint = True
|
286
|
-
|
296
|
+
endpoint = random.choice(self.backend_settings.models[self.model].endpoints)
|
297
|
+
self.model_id = None
|
298
|
+
if isinstance(endpoint, dict):
|
299
|
+
self.endpoint_id = endpoint["endpoint_id"]
|
300
|
+
self.model_id = endpoint["model_id"]
|
301
|
+
else:
|
302
|
+
self.endpoint_id = endpoint
|
287
303
|
self.endpoint = settings.get_endpoint(self.endpoint_id)
|
288
304
|
|
289
305
|
if self.endpoint.is_azure:
|
@@ -364,14 +380,17 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
|
|
364
380
|
if temperature is not None:
|
365
381
|
self.temperature = temperature
|
366
382
|
|
383
|
+
raw_client = self.raw_client # 调用完 self.raw_client 后,self.model_id 会被赋值
|
367
384
|
self.model_setting = self.backend_settings.models[self.model]
|
385
|
+
if self.model_id is None:
|
386
|
+
self.model_id = self.model_setting.id
|
368
387
|
|
369
388
|
if self.context_length_control == ContextLengthControlType.Latest:
|
370
389
|
messages = cutoff_messages(
|
371
390
|
messages,
|
372
391
|
max_count=self.model_setting.context_length,
|
373
392
|
backend=self.BACKEND_NAME,
|
374
|
-
model=self.
|
393
|
+
model=self.model,
|
375
394
|
)
|
376
395
|
|
377
396
|
if tools:
|
@@ -395,7 +414,7 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
|
|
395
414
|
|
396
415
|
if max_tokens is None:
|
397
416
|
max_output_tokens = self.model_setting.max_output_tokens
|
398
|
-
token_counts = get_message_token_counts(messages=messages, tools=tools, model=self.
|
417
|
+
token_counts = get_message_token_counts(messages=messages, tools=tools, model=self.model)
|
399
418
|
if max_output_tokens is not None:
|
400
419
|
max_tokens = self.model_setting.context_length - token_counts - 64
|
401
420
|
max_tokens = min(max(max_tokens, 1), max_output_tokens)
|
@@ -403,8 +422,8 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
|
|
403
422
|
max_tokens = self.model_setting.context_length - token_counts - 64
|
404
423
|
|
405
424
|
if self.stream:
|
406
|
-
stream_response: AsyncStream[ChatCompletionChunk] = await
|
407
|
-
model=self.
|
425
|
+
stream_response: AsyncStream[ChatCompletionChunk] = await raw_client.chat.completions.create(
|
426
|
+
model=self.model_id,
|
408
427
|
messages=messages,
|
409
428
|
stream=self.stream,
|
410
429
|
temperature=self.temperature,
|
@@ -444,8 +463,8 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
|
|
444
463
|
|
445
464
|
return generator()
|
446
465
|
else:
|
447
|
-
response: ChatCompletion = await
|
448
|
-
model=self.
|
466
|
+
response: ChatCompletion = await raw_client.chat.completions.create(
|
467
|
+
model=self.model_id,
|
449
468
|
messages=messages,
|
450
469
|
stream=self.stream,
|
451
470
|
temperature=self.temperature,
|
@@ -1,6 +1,6 @@
|
|
1
1
|
# @Author: Bi Ying
|
2
2
|
# @Date: 2024-07-26 23:48:04
|
3
|
-
from typing import List, Dict, Optional, Union, Iterable
|
3
|
+
from typing import List, Dict, Optional, Union, Iterable, TypedDict
|
4
4
|
|
5
5
|
from pydantic import BaseModel, Field
|
6
6
|
|
@@ -18,6 +18,11 @@ from openai.types.chat.chat_completion_tool_choice_option_param import ChatCompl
|
|
18
18
|
from . import defaults as defs
|
19
19
|
|
20
20
|
|
21
|
+
class EndpointOptionDict(TypedDict):
|
22
|
+
endpoint_id: str
|
23
|
+
model_id: str
|
24
|
+
|
25
|
+
|
21
26
|
class EndpointSetting(BaseModel):
|
22
27
|
id: str = Field(..., description="The id of the endpoint.")
|
23
28
|
region: Optional[str] = Field(None, description="The region for the endpoint.")
|
@@ -40,7 +45,9 @@ class EndpointSetting(BaseModel):
|
|
40
45
|
|
41
46
|
class ModelSetting(BaseModel):
|
42
47
|
id: str = Field(..., description="The id of the model.")
|
43
|
-
endpoints: List[str] = Field(
|
48
|
+
endpoints: List[Union[str, EndpointOptionDict]] = Field(
|
49
|
+
default_factory=list, description="Available endpoints for the model."
|
50
|
+
)
|
44
51
|
function_call_available: bool = Field(False, description="Indicates if function call is available.")
|
45
52
|
response_format_available: bool = Field(False, description="Indicates if response format is available.")
|
46
53
|
native_multimodal: bool = Field(False, description="Indicates if the model is a native multimodal model.")
|
@@ -1,9 +1,9 @@
|
|
1
|
-
vectorvein-0.1.
|
2
|
-
vectorvein-0.1.
|
3
|
-
vectorvein-0.1.
|
1
|
+
vectorvein-0.1.38.dist-info/METADATA,sha256=km7-NuiAPCx_ynlVDPTzPPgNDxg5S1C9pilzhrClOT0,502
|
2
|
+
vectorvein-0.1.38.dist-info/WHEEL,sha256=thaaA2w1JzcGC48WYufAs8nrYZjJm8LqNfnXFOFyCC4,90
|
3
|
+
vectorvein-0.1.38.dist-info/entry_points.txt,sha256=6OYgBcLyFCUgeqLgnvMyOJxPCWzgy7se4rLPKtNonMs,34
|
4
4
|
vectorvein/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
5
5
|
vectorvein/chat_clients/__init__.py,sha256=dW169oK1n3v8Z0uD8itghzlCP72rxiaS-XYn6fvI2xM,16788
|
6
|
-
vectorvein/chat_clients/anthropic_client.py,sha256=
|
6
|
+
vectorvein/chat_clients/anthropic_client.py,sha256=jF9pDlnkhjM6-OLPCQQxkh27xjzbTRaEY53olRd3_aY,32413
|
7
7
|
vectorvein/chat_clients/baichuan_client.py,sha256=CVMvpgjdrZGv0BWnTOBD-f2ufZ3wq3496wqukumsAr4,526
|
8
8
|
vectorvein/chat_clients/base_client.py,sha256=7i456Yn-tqY0oPeIj_wHWNGGzCKPAbX5Ufxy2wWGMNY,8653
|
9
9
|
vectorvein/chat_clients/deepseek_client.py,sha256=3qWu01NlJAP2N-Ff62d5-CZXZitlizE1fzb20LNetig,526
|
@@ -14,7 +14,7 @@ vectorvein/chat_clients/minimax_client.py,sha256=0MVMb4g0K_VKnPGHYX81jHiBaQUGWFG
|
|
14
14
|
vectorvein/chat_clients/mistral_client.py,sha256=1aKSylzBDaLYcFnaBIL4-sXSzWmXfBeON9Q0rq-ziWw,534
|
15
15
|
vectorvein/chat_clients/moonshot_client.py,sha256=gbu-6nGxx8uM_U2WlI4Wus881rFRotzHtMSoYOcruGU,526
|
16
16
|
vectorvein/chat_clients/openai_client.py,sha256=Nz6tV45pWcsOupxjnsRsGTicbQNJWIZyxuJoJ5DGMpg,527
|
17
|
-
vectorvein/chat_clients/openai_compatible_client.py,sha256=
|
17
|
+
vectorvein/chat_clients/openai_compatible_client.py,sha256=FVm_ZYL9UP6t6hTUNxPyoxGYqXHJMw37UGOQ-t63gaw,19709
|
18
18
|
vectorvein/chat_clients/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
19
19
|
vectorvein/chat_clients/qwen_client.py,sha256=-ryh-m9PgsO0fc4ulcCmPTy1155J8YUy15uPoJQOHA0,513
|
20
20
|
vectorvein/chat_clients/stepfun_client.py,sha256=zsD2W5ahmR4DD9cqQTXmJr3txrGuvxbRWhFlRdwNijI,519
|
@@ -27,8 +27,8 @@ vectorvein/settings/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,
|
|
27
27
|
vectorvein/types/defaults.py,sha256=xefmRNYBGbnWA5kjLLFKN91UM5gnHZ5-kcCNlQRfznk,22095
|
28
28
|
vectorvein/types/enums.py,sha256=x_S0IJiEWijOAEiMNdiGDGEWGtmt7TwMriJVDqrDmTo,1637
|
29
29
|
vectorvein/types/exception.py,sha256=gnW4GnJ76jND6UGnodk9xmqkcbeS7Cz2rvncA2HpD5E,69
|
30
|
-
vectorvein/types/llm_parameters.py,sha256=
|
30
|
+
vectorvein/types/llm_parameters.py,sha256=02ik6XDm1-4lpZP4yIh6QRGkU8wdnxEMZTKeaFBBStw,5040
|
31
31
|
vectorvein/types/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
32
32
|
vectorvein/utilities/media_processing.py,sha256=BujciRmw1GMmc3ELRvafL8STcy6r5b2rVnh27-uA7so,2256
|
33
33
|
vectorvein/utilities/retry.py,sha256=9ePuJdeUUGx-qMWfaFxmlOvG_lQPwCQ4UB1z3Edlo34,993
|
34
|
-
vectorvein-0.1.
|
34
|
+
vectorvein-0.1.38.dist-info/RECORD,,
|
File without changes
|
File without changes
|