vectorvein 0.1.63__tar.gz → 0.1.65__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {vectorvein-0.1.63 → vectorvein-0.1.65}/PKG-INFO +1 -1
- {vectorvein-0.1.63 → vectorvein-0.1.65}/pyproject.toml +1 -1
- {vectorvein-0.1.63 → vectorvein-0.1.65}/src/vectorvein/chat_clients/anthropic_client.py +69 -46
- {vectorvein-0.1.63 → vectorvein-0.1.65}/src/vectorvein/chat_clients/openai_compatible_client.py +4 -0
- {vectorvein-0.1.63 → vectorvein-0.1.65}/README.md +0 -0
- {vectorvein-0.1.63 → vectorvein-0.1.65}/src/vectorvein/__init__.py +0 -0
- {vectorvein-0.1.63 → vectorvein-0.1.65}/src/vectorvein/chat_clients/__init__.py +0 -0
- {vectorvein-0.1.63 → vectorvein-0.1.65}/src/vectorvein/chat_clients/baichuan_client.py +0 -0
- {vectorvein-0.1.63 → vectorvein-0.1.65}/src/vectorvein/chat_clients/base_client.py +0 -0
- {vectorvein-0.1.63 → vectorvein-0.1.65}/src/vectorvein/chat_clients/deepseek_client.py +0 -0
- {vectorvein-0.1.63 → vectorvein-0.1.65}/src/vectorvein/chat_clients/gemini_client.py +0 -0
- {vectorvein-0.1.63 → vectorvein-0.1.65}/src/vectorvein/chat_clients/groq_client.py +0 -0
- {vectorvein-0.1.63 → vectorvein-0.1.65}/src/vectorvein/chat_clients/local_client.py +0 -0
- {vectorvein-0.1.63 → vectorvein-0.1.65}/src/vectorvein/chat_clients/minimax_client.py +0 -0
- {vectorvein-0.1.63 → vectorvein-0.1.65}/src/vectorvein/chat_clients/mistral_client.py +0 -0
- {vectorvein-0.1.63 → vectorvein-0.1.65}/src/vectorvein/chat_clients/moonshot_client.py +0 -0
- {vectorvein-0.1.63 → vectorvein-0.1.65}/src/vectorvein/chat_clients/openai_client.py +0 -0
- {vectorvein-0.1.63 → vectorvein-0.1.65}/src/vectorvein/chat_clients/py.typed +0 -0
- {vectorvein-0.1.63 → vectorvein-0.1.65}/src/vectorvein/chat_clients/qwen_client.py +0 -0
- {vectorvein-0.1.63 → vectorvein-0.1.65}/src/vectorvein/chat_clients/stepfun_client.py +0 -0
- {vectorvein-0.1.63 → vectorvein-0.1.65}/src/vectorvein/chat_clients/utils.py +0 -0
- {vectorvein-0.1.63 → vectorvein-0.1.65}/src/vectorvein/chat_clients/xai_client.py +0 -0
- {vectorvein-0.1.63 → vectorvein-0.1.65}/src/vectorvein/chat_clients/yi_client.py +0 -0
- {vectorvein-0.1.63 → vectorvein-0.1.65}/src/vectorvein/chat_clients/zhipuai_client.py +0 -0
- {vectorvein-0.1.63 → vectorvein-0.1.65}/src/vectorvein/py.typed +0 -0
- {vectorvein-0.1.63 → vectorvein-0.1.65}/src/vectorvein/server/token_server.py +0 -0
- {vectorvein-0.1.63 → vectorvein-0.1.65}/src/vectorvein/settings/__init__.py +0 -0
- {vectorvein-0.1.63 → vectorvein-0.1.65}/src/vectorvein/settings/py.typed +0 -0
- {vectorvein-0.1.63 → vectorvein-0.1.65}/src/vectorvein/types/defaults.py +0 -0
- {vectorvein-0.1.63 → vectorvein-0.1.65}/src/vectorvein/types/enums.py +0 -0
- {vectorvein-0.1.63 → vectorvein-0.1.65}/src/vectorvein/types/exception.py +0 -0
- {vectorvein-0.1.63 → vectorvein-0.1.65}/src/vectorvein/types/llm_parameters.py +0 -0
- {vectorvein-0.1.63 → vectorvein-0.1.65}/src/vectorvein/types/py.typed +0 -0
- {vectorvein-0.1.63 → vectorvein-0.1.65}/src/vectorvein/utilities/media_processing.py +0 -0
- {vectorvein-0.1.63 → vectorvein-0.1.65}/src/vectorvein/utilities/retry.py +0 -0
@@ -40,6 +40,7 @@ from ..types.llm_parameters import (
|
|
40
40
|
NotGiven,
|
41
41
|
ToolParam,
|
42
42
|
ToolChoice,
|
43
|
+
EndpointSetting,
|
43
44
|
AnthropicToolParam,
|
44
45
|
AnthropicToolChoice,
|
45
46
|
ChatCompletionMessage,
|
@@ -179,13 +180,23 @@ class AnthropicChatClient(BaseChatClient):
|
|
179
180
|
self.model_id = None
|
180
181
|
self.endpoint = None
|
181
182
|
|
182
|
-
|
183
|
-
|
183
|
+
def set_model_id_by_endpoint_id(self, endpoint_id: str):
|
184
|
+
for endpoint_option in self.backend_settings.models[self.model].endpoints:
|
185
|
+
if isinstance(endpoint_option, dict):
|
186
|
+
if endpoint_id == endpoint_option["endpoint_id"]:
|
187
|
+
self.model_id = endpoint_option["model_id"]
|
188
|
+
break
|
189
|
+
else:
|
190
|
+
if endpoint_id == endpoint_option:
|
191
|
+
self.model_id = endpoint_option
|
192
|
+
break
|
193
|
+
return self.model_id
|
194
|
+
|
195
|
+
def _set_endpoint(self):
|
184
196
|
if self.endpoint is None:
|
185
197
|
if self.random_endpoint:
|
186
198
|
self.random_endpoint = True
|
187
199
|
endpoint = random.choice(self.backend_settings.models[self.model].endpoints)
|
188
|
-
self.model_id = None
|
189
200
|
if isinstance(endpoint, dict):
|
190
201
|
self.endpoint_id = endpoint["endpoint_id"]
|
191
202
|
self.model_id = endpoint["model_id"]
|
@@ -194,6 +205,18 @@ class AnthropicChatClient(BaseChatClient):
|
|
194
205
|
self.endpoint = settings.get_endpoint(self.endpoint_id)
|
195
206
|
else:
|
196
207
|
self.endpoint = settings.get_endpoint(self.endpoint_id)
|
208
|
+
self.set_model_id_by_endpoint_id(self.endpoint_id)
|
209
|
+
elif isinstance(self.endpoint, EndpointSetting):
|
210
|
+
self.endpoint_id = self.endpoint.id
|
211
|
+
self.set_model_id_by_endpoint_id(self.endpoint_id)
|
212
|
+
else:
|
213
|
+
raise ValueError("Invalid endpoint")
|
214
|
+
|
215
|
+
return self.endpoint, self.model_id
|
216
|
+
|
217
|
+
@property
|
218
|
+
def raw_client(self): # type: ignore
|
219
|
+
self.endpoint, self.model_id = self._set_endpoint()
|
197
220
|
|
198
221
|
if self.endpoint.is_vertex:
|
199
222
|
if self.endpoint.credentials is None:
|
@@ -333,31 +356,24 @@ class AnthropicChatClient(BaseChatClient):
|
|
333
356
|
if temperature is not None:
|
334
357
|
self.temperature = temperature
|
335
358
|
|
336
|
-
|
337
|
-
|
338
|
-
|
339
|
-
|
340
|
-
|
341
|
-
if isinstance(endpoint, dict):
|
342
|
-
self.endpoint_id = endpoint["endpoint_id"]
|
343
|
-
self.model_id = endpoint["model_id"]
|
344
|
-
else:
|
345
|
-
self.endpoint_id = endpoint
|
346
|
-
self.endpoint = settings.get_endpoint(self.endpoint_id)
|
347
|
-
else:
|
348
|
-
self.endpoint = settings.get_endpoint(self.endpoint_id)
|
359
|
+
self.model_setting = self.backend_settings.models[self.model]
|
360
|
+
if self.model_id is None:
|
361
|
+
self.model_id = self.model_setting.id
|
362
|
+
|
363
|
+
self.endpoint, self.model_id = self._set_endpoint()
|
349
364
|
|
350
365
|
if self.endpoint.api_schema_type == "openai":
|
351
366
|
_tools = OPENAI_NOT_GIVEN if tools is NOT_GIVEN else tools
|
352
367
|
_tool_choice = OPENAI_NOT_GIVEN if tool_choice is NOT_GIVEN else tool_choice
|
353
368
|
|
354
369
|
formatted_messages = refactor_into_openai_messages(messages)
|
370
|
+
model_id = self.model_id
|
355
371
|
|
356
372
|
if self.stream:
|
357
373
|
|
358
374
|
def _generator():
|
359
375
|
response = OpenAICompatibleChatClient(
|
360
|
-
model=
|
376
|
+
model=model_id,
|
361
377
|
stream=True,
|
362
378
|
temperature=self.temperature,
|
363
379
|
context_length_control=self.context_length_control,
|
@@ -385,7 +401,7 @@ class AnthropicChatClient(BaseChatClient):
|
|
385
401
|
return _generator()
|
386
402
|
else:
|
387
403
|
return OpenAICompatibleChatClient(
|
388
|
-
model=
|
404
|
+
model=model_id,
|
389
405
|
stream=False,
|
390
406
|
temperature=self.temperature,
|
391
407
|
context_length_control=self.context_length_control,
|
@@ -417,10 +433,6 @@ class AnthropicChatClient(BaseChatClient):
|
|
417
433
|
if isinstance(top_p, OpenAINotGiven) or top_p is None:
|
418
434
|
top_p = NOT_GIVEN
|
419
435
|
|
420
|
-
self.model_setting = self.backend_settings.models[self.model]
|
421
|
-
if self.model_id is None:
|
422
|
-
self.model_id = self.model_setting.id
|
423
|
-
|
424
436
|
if messages[0].get("role") == "system":
|
425
437
|
system_prompt: str = messages[0]["content"]
|
426
438
|
messages = messages[1:]
|
@@ -577,13 +589,23 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
|
|
577
589
|
self.model_id = None
|
578
590
|
self.endpoint = None
|
579
591
|
|
580
|
-
|
581
|
-
|
592
|
+
def set_model_id_by_endpoint_id(self, endpoint_id: str):
|
593
|
+
for endpoint_option in self.backend_settings.models[self.model].endpoints:
|
594
|
+
if isinstance(endpoint_option, dict):
|
595
|
+
if endpoint_id == endpoint_option["endpoint_id"]:
|
596
|
+
self.model_id = endpoint_option["model_id"]
|
597
|
+
break
|
598
|
+
else:
|
599
|
+
if endpoint_id == endpoint_option:
|
600
|
+
self.model_id = endpoint_option
|
601
|
+
break
|
602
|
+
return self.model_id
|
603
|
+
|
604
|
+
def _set_endpoint(self):
|
582
605
|
if self.endpoint is None:
|
583
606
|
if self.random_endpoint:
|
584
607
|
self.random_endpoint = True
|
585
608
|
endpoint = random.choice(self.backend_settings.models[self.model].endpoints)
|
586
|
-
self.model_id = None
|
587
609
|
if isinstance(endpoint, dict):
|
588
610
|
self.endpoint_id = endpoint["endpoint_id"]
|
589
611
|
self.model_id = endpoint["model_id"]
|
@@ -592,6 +614,18 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
|
|
592
614
|
self.endpoint = settings.get_endpoint(self.endpoint_id)
|
593
615
|
else:
|
594
616
|
self.endpoint = settings.get_endpoint(self.endpoint_id)
|
617
|
+
self.set_model_id_by_endpoint_id(self.endpoint_id)
|
618
|
+
elif isinstance(self.endpoint, EndpointSetting):
|
619
|
+
self.endpoint_id = self.endpoint.id
|
620
|
+
self.set_model_id_by_endpoint_id(self.endpoint_id)
|
621
|
+
else:
|
622
|
+
raise ValueError("Invalid endpoint")
|
623
|
+
|
624
|
+
return self.endpoint, self.model_id
|
625
|
+
|
626
|
+
@property
|
627
|
+
def raw_client(self): # type: ignore
|
628
|
+
self.endpoint, self.model_id = self._set_endpoint()
|
595
629
|
|
596
630
|
if self.endpoint.is_vertex:
|
597
631
|
if self.endpoint.credentials is None:
|
@@ -631,8 +665,8 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
|
|
631
665
|
if self.endpoint.credentials is None:
|
632
666
|
raise ValueError("Anthropic Bedrock endpoint requires credentials")
|
633
667
|
return AsyncAnthropicBedrock(
|
634
|
-
aws_access_key=self.endpoint.credentials.get("
|
635
|
-
aws_secret_key=self.endpoint.credentials.get("
|
668
|
+
aws_access_key=self.endpoint.credentials.get("access_key"),
|
669
|
+
aws_secret_key=self.endpoint.credentials.get("secret_key"),
|
636
670
|
aws_region=self.endpoint.region,
|
637
671
|
http_client=self.http_client,
|
638
672
|
)
|
@@ -730,31 +764,24 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
|
|
730
764
|
if temperature is not None:
|
731
765
|
self.temperature = temperature
|
732
766
|
|
733
|
-
|
734
|
-
|
735
|
-
|
736
|
-
|
737
|
-
|
738
|
-
if isinstance(endpoint, dict):
|
739
|
-
self.endpoint_id = endpoint["endpoint_id"]
|
740
|
-
self.model_id = endpoint["model_id"]
|
741
|
-
else:
|
742
|
-
self.endpoint_id = endpoint
|
743
|
-
self.endpoint = settings.get_endpoint(self.endpoint_id)
|
744
|
-
else:
|
745
|
-
self.endpoint = settings.get_endpoint(self.endpoint_id)
|
767
|
+
self.model_setting = self.backend_settings.models[self.model]
|
768
|
+
if self.model_id is None:
|
769
|
+
self.model_id = self.model_setting.id
|
770
|
+
|
771
|
+
self.endpoint, self.model_id = self._set_endpoint()
|
746
772
|
|
747
773
|
if self.endpoint.api_schema_type == "openai":
|
748
774
|
_tools = OPENAI_NOT_GIVEN if tools is NOT_GIVEN else tools
|
749
775
|
_tool_choice = OPENAI_NOT_GIVEN if tool_choice is NOT_GIVEN else tool_choice
|
750
776
|
|
751
777
|
formatted_messages = refactor_into_openai_messages(messages)
|
778
|
+
model_id = self.model_id
|
752
779
|
|
753
780
|
if self.stream:
|
754
781
|
|
755
782
|
async def _generator():
|
756
783
|
client = AsyncOpenAICompatibleChatClient(
|
757
|
-
model=
|
784
|
+
model=model_id,
|
758
785
|
stream=True,
|
759
786
|
temperature=self.temperature,
|
760
787
|
context_length_control=self.context_length_control,
|
@@ -783,7 +810,7 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
|
|
783
810
|
return _generator()
|
784
811
|
else:
|
785
812
|
client = AsyncOpenAICompatibleChatClient(
|
786
|
-
model=
|
813
|
+
model=model_id,
|
787
814
|
stream=False,
|
788
815
|
temperature=self.temperature,
|
789
816
|
context_length_control=self.context_length_control,
|
@@ -816,10 +843,6 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
|
|
816
843
|
if isinstance(top_p, OpenAINotGiven) or top_p is None:
|
817
844
|
top_p = NOT_GIVEN
|
818
845
|
|
819
|
-
self.model_setting = self.backend_settings.models[self.model]
|
820
|
-
if self.model_id is None:
|
821
|
-
self.model_id = self.model_setting.id
|
822
|
-
|
823
846
|
if messages[0].get("role") == "system":
|
824
847
|
system_prompt = messages[0]["content"]
|
825
848
|
messages = messages[1:]
|
{vectorvein-0.1.63 → vectorvein-0.1.65}/src/vectorvein/chat_clients/openai_compatible_client.py
RENAMED
@@ -73,6 +73,8 @@ class OpenAICompatibleChatClient(BaseChatClient):
|
|
73
73
|
self.endpoint = settings.get_endpoint(self.endpoint_id)
|
74
74
|
|
75
75
|
if self.endpoint.is_azure:
|
76
|
+
if self.endpoint.api_base is None:
|
77
|
+
raise ValueError("Azure endpoint is not set")
|
76
78
|
return AzureOpenAI(
|
77
79
|
azure_endpoint=self.endpoint.api_base,
|
78
80
|
api_key=self.endpoint.api_key,
|
@@ -339,6 +341,8 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
|
|
339
341
|
self.endpoint = settings.get_endpoint(self.endpoint_id)
|
340
342
|
|
341
343
|
if self.endpoint.is_azure:
|
344
|
+
if self.endpoint.api_base is None:
|
345
|
+
raise ValueError("Azure endpoint is not set")
|
342
346
|
return AsyncAzureOpenAI(
|
343
347
|
azure_endpoint=self.endpoint.api_base,
|
344
348
|
api_key=self.endpoint.api_key,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|