vectorvein 0.3.1__py3-none-any.whl → 0.3.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vectorvein/api/client.py +81 -103
- vectorvein/api/exceptions.py +1 -3
- vectorvein/api/models.py +11 -11
- vectorvein/chat_clients/anthropic_client.py +157 -169
- vectorvein/chat_clients/base_client.py +257 -198
- vectorvein/chat_clients/openai_compatible_client.py +150 -161
- vectorvein/chat_clients/utils.py +44 -24
- vectorvein/server/token_server.py +1 -1
- vectorvein/settings/__init__.py +27 -27
- vectorvein/types/defaults.py +32 -16
- vectorvein/types/llm_parameters.py +40 -34
- vectorvein/types/settings.py +10 -10
- vectorvein/utilities/media_processing.py +1 -1
- vectorvein/utilities/rate_limiter.py +5 -6
- vectorvein/utilities/retry.py +6 -5
- vectorvein/workflow/graph/edge.py +3 -3
- vectorvein/workflow/graph/node.py +14 -26
- vectorvein/workflow/graph/port.py +40 -39
- vectorvein/workflow/graph/workflow.py +13 -25
- vectorvein/workflow/nodes/audio_generation.py +5 -7
- vectorvein/workflow/nodes/control_flows.py +7 -9
- vectorvein/workflow/nodes/file_processing.py +4 -6
- vectorvein/workflow/nodes/image_generation.py +20 -22
- vectorvein/workflow/nodes/llms.py +13 -15
- vectorvein/workflow/nodes/media_editing.py +26 -40
- vectorvein/workflow/nodes/media_processing.py +19 -21
- vectorvein/workflow/nodes/output.py +10 -12
- vectorvein/workflow/nodes/relational_db.py +3 -5
- vectorvein/workflow/nodes/text_processing.py +8 -10
- vectorvein/workflow/nodes/tools.py +8 -10
- vectorvein/workflow/nodes/triggers.py +1 -3
- vectorvein/workflow/nodes/vector_db.py +3 -5
- vectorvein/workflow/nodes/video_generation.py +4 -6
- vectorvein/workflow/nodes/web_crawlers.py +4 -6
- vectorvein/workflow/utils/analyse.py +5 -13
- vectorvein/workflow/utils/check.py +6 -16
- vectorvein/workflow/utils/json_to_code.py +6 -14
- vectorvein/workflow/utils/layout.py +3 -5
- {vectorvein-0.3.1.dist-info → vectorvein-0.3.3.dist-info}/METADATA +1 -1
- vectorvein-0.3.3.dist-info/RECORD +68 -0
- {vectorvein-0.3.1.dist-info → vectorvein-0.3.3.dist-info}/WHEEL +1 -1
- vectorvein-0.3.1.dist-info/RECORD +0 -68
- {vectorvein-0.3.1.dist-info → vectorvein-0.3.3.dist-info}/entry_points.txt +0 -0
@@ -1,19 +1,8 @@
|
|
1
1
|
# @Author: Bi Ying
|
2
2
|
# @Date: 2024-07-26 14:48:55
|
3
3
|
import json
|
4
|
-
from
|
5
|
-
|
6
|
-
Dict,
|
7
|
-
List,
|
8
|
-
TYPE_CHECKING,
|
9
|
-
overload,
|
10
|
-
Generator,
|
11
|
-
AsyncGenerator,
|
12
|
-
Union,
|
13
|
-
Literal,
|
14
|
-
Iterable,
|
15
|
-
Optional,
|
16
|
-
)
|
4
|
+
from collections.abc import Generator, AsyncGenerator, Iterable
|
5
|
+
from typing import Any, TYPE_CHECKING, overload, Literal
|
17
6
|
|
18
7
|
import httpx
|
19
8
|
from openai._types import NotGiven as OpenAINotGiven
|
@@ -154,9 +143,9 @@ def refactor_into_openai_messages(messages: Iterable[MessageParam]):
|
|
154
143
|
elif isinstance(content, list):
|
155
144
|
_content = []
|
156
145
|
for item in content:
|
157
|
-
if isinstance(item,
|
146
|
+
if isinstance(item, TextBlock | ToolUseBlock):
|
158
147
|
_content.append(item.model_dump())
|
159
|
-
elif isinstance(item,
|
148
|
+
elif isinstance(item, ThinkingBlock | RedactedThinkingBlock):
|
160
149
|
continue
|
161
150
|
elif item.get("type") == "image":
|
162
151
|
image_data = item.get("source", {}).get("data", "")
|
@@ -290,32 +279,32 @@ class AnthropicChatClient(BaseChatClient):
|
|
290
279
|
messages: list,
|
291
280
|
model: str | None = None,
|
292
281
|
stream: Literal[False] = False,
|
293
|
-
temperature: float | None | NotGiven =
|
294
|
-
max_tokens: int | None = None,
|
295
|
-
tools: Iterable[ToolParam] | NotGiven =
|
296
|
-
tool_choice: ToolChoice | NotGiven =
|
297
|
-
response_format: ResponseFormat | NotGiven =
|
282
|
+
temperature: float | None | NotGiven = OPENAI_NOT_GIVEN,
|
283
|
+
max_tokens: int | None | NotGiven = None,
|
284
|
+
tools: Iterable[ToolParam] | NotGiven = OPENAI_NOT_GIVEN,
|
285
|
+
tool_choice: ToolChoice | NotGiven = OPENAI_NOT_GIVEN,
|
286
|
+
response_format: ResponseFormat | NotGiven = OPENAI_NOT_GIVEN,
|
298
287
|
stream_options: ChatCompletionStreamOptionsParam | None = None,
|
299
|
-
top_p: float | NotGiven | None =
|
288
|
+
top_p: float | NotGiven | None = OPENAI_NOT_GIVEN,
|
300
289
|
skip_cutoff: bool = False,
|
301
|
-
audio:
|
302
|
-
frequency_penalty:
|
303
|
-
logit_bias:
|
304
|
-
logprobs:
|
305
|
-
max_completion_tokens:
|
306
|
-
metadata:
|
307
|
-
modalities:
|
308
|
-
n:
|
290
|
+
audio: ChatCompletionAudioParam | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
291
|
+
frequency_penalty: float | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
292
|
+
logit_bias: dict[str, int] | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
293
|
+
logprobs: bool | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
294
|
+
max_completion_tokens: int | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
295
|
+
metadata: Metadata | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
296
|
+
modalities: list[ChatCompletionModality] | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
297
|
+
n: int | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
309
298
|
parallel_tool_calls: bool | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
310
|
-
prediction:
|
311
|
-
presence_penalty:
|
312
|
-
reasoning_effort:
|
313
|
-
thinking: ThinkingConfigParam | None | NotGiven =
|
314
|
-
seed:
|
315
|
-
service_tier:
|
316
|
-
stop:
|
317
|
-
store:
|
318
|
-
top_logprobs:
|
299
|
+
prediction: ChatCompletionPredictionContentParam | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
300
|
+
presence_penalty: float | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
301
|
+
reasoning_effort: ChatCompletionReasoningEffort | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
302
|
+
thinking: ThinkingConfigParam | None | NotGiven = OPENAI_NOT_GIVEN,
|
303
|
+
seed: int | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
304
|
+
service_tier: Literal["auto", "default"] | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
305
|
+
stop: str | list[str] | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
306
|
+
store: bool | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
307
|
+
top_logprobs: int | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
319
308
|
user: str | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
320
309
|
extra_headers: Headers | None = None,
|
321
310
|
extra_query: Query | None = None,
|
@@ -332,31 +321,31 @@ class AnthropicChatClient(BaseChatClient):
|
|
332
321
|
model: str | None = None,
|
333
322
|
stream: Literal[True],
|
334
323
|
temperature: float | None | NotGiven = NOT_GIVEN,
|
335
|
-
max_tokens: int | None = None,
|
324
|
+
max_tokens: int | None | NotGiven = None,
|
336
325
|
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
|
337
326
|
tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
|
338
327
|
response_format: ResponseFormat | NotGiven = NOT_GIVEN,
|
339
328
|
stream_options: ChatCompletionStreamOptionsParam | None = None,
|
340
329
|
top_p: float | NotGiven | None = NOT_GIVEN,
|
341
330
|
skip_cutoff: bool = False,
|
342
|
-
audio:
|
343
|
-
frequency_penalty:
|
344
|
-
logit_bias:
|
345
|
-
logprobs:
|
346
|
-
max_completion_tokens:
|
347
|
-
metadata:
|
348
|
-
modalities:
|
349
|
-
n:
|
331
|
+
audio: ChatCompletionAudioParam | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
332
|
+
frequency_penalty: float | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
333
|
+
logit_bias: dict[str, int] | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
334
|
+
logprobs: bool | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
335
|
+
max_completion_tokens: int | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
336
|
+
metadata: Metadata | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
337
|
+
modalities: list[ChatCompletionModality] | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
338
|
+
n: int | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
350
339
|
parallel_tool_calls: bool | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
351
|
-
prediction:
|
352
|
-
presence_penalty:
|
353
|
-
reasoning_effort:
|
340
|
+
prediction: ChatCompletionPredictionContentParam | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
341
|
+
presence_penalty: float | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
342
|
+
reasoning_effort: ChatCompletionReasoningEffort | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
354
343
|
thinking: ThinkingConfigParam | None | NotGiven = NOT_GIVEN,
|
355
|
-
seed:
|
356
|
-
service_tier:
|
357
|
-
stop:
|
358
|
-
store:
|
359
|
-
top_logprobs:
|
344
|
+
seed: int | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
345
|
+
service_tier: Literal["auto", "default"] | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
346
|
+
stop: str | list[str] | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
347
|
+
store: bool | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
348
|
+
top_logprobs: int | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
360
349
|
user: str | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
361
350
|
extra_headers: Headers | None = None,
|
362
351
|
extra_query: Query | None = None,
|
@@ -373,31 +362,31 @@ class AnthropicChatClient(BaseChatClient):
|
|
373
362
|
model: str | None = None,
|
374
363
|
stream: bool,
|
375
364
|
temperature: float | None | NotGiven = NOT_GIVEN,
|
376
|
-
max_tokens: int | None = None,
|
365
|
+
max_tokens: int | None | NotGiven = None,
|
377
366
|
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
|
378
367
|
tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
|
379
368
|
response_format: ResponseFormat | NotGiven = NOT_GIVEN,
|
380
369
|
stream_options: ChatCompletionStreamOptionsParam | None = None,
|
381
370
|
top_p: float | NotGiven | None = NOT_GIVEN,
|
382
371
|
skip_cutoff: bool = False,
|
383
|
-
audio:
|
384
|
-
frequency_penalty:
|
385
|
-
logit_bias:
|
386
|
-
logprobs:
|
387
|
-
max_completion_tokens:
|
388
|
-
metadata:
|
389
|
-
modalities:
|
390
|
-
n:
|
372
|
+
audio: ChatCompletionAudioParam | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
373
|
+
frequency_penalty: float | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
374
|
+
logit_bias: dict[str, int] | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
375
|
+
logprobs: bool | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
376
|
+
max_completion_tokens: int | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
377
|
+
metadata: Metadata | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
378
|
+
modalities: list[ChatCompletionModality] | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
379
|
+
n: int | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
391
380
|
parallel_tool_calls: bool | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
392
|
-
prediction:
|
393
|
-
presence_penalty:
|
394
|
-
reasoning_effort:
|
381
|
+
prediction: ChatCompletionPredictionContentParam | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
382
|
+
presence_penalty: float | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
383
|
+
reasoning_effort: ChatCompletionReasoningEffort | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
395
384
|
thinking: ThinkingConfigParam | None | NotGiven = NOT_GIVEN,
|
396
|
-
seed:
|
397
|
-
service_tier:
|
398
|
-
stop:
|
399
|
-
store:
|
400
|
-
top_logprobs:
|
385
|
+
seed: int | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
386
|
+
service_tier: Literal["auto", "default"] | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
387
|
+
stop: str | list[str] | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
388
|
+
store: bool | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
389
|
+
top_logprobs: int | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
401
390
|
user: str | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
402
391
|
extra_headers: Headers | None = None,
|
403
392
|
extra_query: Query | None = None,
|
@@ -413,31 +402,31 @@ class AnthropicChatClient(BaseChatClient):
|
|
413
402
|
model: str | None = None,
|
414
403
|
stream: Literal[False] | Literal[True] = False,
|
415
404
|
temperature: float | None | NotGiven = NOT_GIVEN,
|
416
|
-
max_tokens: int | None = None,
|
405
|
+
max_tokens: int | None | NotGiven = None,
|
417
406
|
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
|
418
407
|
tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
|
419
408
|
response_format: ResponseFormat | NotGiven = NOT_GIVEN,
|
420
409
|
stream_options: ChatCompletionStreamOptionsParam | None = None,
|
421
410
|
top_p: float | NotGiven | None = NOT_GIVEN,
|
422
411
|
skip_cutoff: bool = False,
|
423
|
-
audio:
|
424
|
-
frequency_penalty:
|
425
|
-
logit_bias:
|
426
|
-
logprobs:
|
427
|
-
max_completion_tokens:
|
428
|
-
metadata:
|
429
|
-
modalities:
|
430
|
-
n:
|
412
|
+
audio: ChatCompletionAudioParam | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
413
|
+
frequency_penalty: float | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
414
|
+
logit_bias: dict[str, int] | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
415
|
+
logprobs: bool | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
416
|
+
max_completion_tokens: int | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
417
|
+
metadata: Metadata | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
418
|
+
modalities: list[ChatCompletionModality] | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
419
|
+
n: int | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
431
420
|
parallel_tool_calls: bool | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
432
|
-
prediction:
|
433
|
-
presence_penalty:
|
434
|
-
reasoning_effort:
|
421
|
+
prediction: ChatCompletionPredictionContentParam | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
422
|
+
presence_penalty: float | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
423
|
+
reasoning_effort: ChatCompletionReasoningEffort | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
435
424
|
thinking: ThinkingConfigParam | None | NotGiven = NOT_GIVEN,
|
436
|
-
seed:
|
437
|
-
service_tier:
|
438
|
-
stop:
|
439
|
-
store:
|
440
|
-
top_logprobs:
|
425
|
+
seed: int | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
426
|
+
service_tier: Literal["auto", "default"] | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
427
|
+
stop: str | list[str] | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
428
|
+
store: bool | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
429
|
+
top_logprobs: int | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
441
430
|
user: str | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
442
431
|
extra_headers: Headers | None = None,
|
443
432
|
extra_query: Query | None = None,
|
@@ -510,8 +499,7 @@ class AnthropicChatClient(BaseChatClient):
|
|
510
499
|
extra_body=extra_body,
|
511
500
|
timeout=timeout,
|
512
501
|
)
|
513
|
-
|
514
|
-
yield chunk
|
502
|
+
yield from response
|
515
503
|
|
516
504
|
return _generator()
|
517
505
|
else:
|
@@ -594,7 +582,7 @@ class AnthropicChatClient(BaseChatClient):
|
|
594
582
|
if tool_choice:
|
595
583
|
tool_choice_param = refactor_tool_choice(tool_choice)
|
596
584
|
|
597
|
-
if max_tokens
|
585
|
+
if not max_tokens:
|
598
586
|
max_output_tokens = self.model_setting.max_output_tokens
|
599
587
|
native_multimodal = self.model_setting.native_multimodal
|
600
588
|
token_counts = get_message_token_counts(messages=messages, tools=tools, model=self.model, native_multimodal=native_multimodal)
|
@@ -621,9 +609,9 @@ class AnthropicChatClient(BaseChatClient):
|
|
621
609
|
thinking=thinking,
|
622
610
|
)
|
623
611
|
except AnthropicAPIStatusError as e:
|
624
|
-
raise APIStatusError(message=e.message, response=e.response, body=e.body)
|
612
|
+
raise APIStatusError(message=e.message, response=e.response, body=e.body) from e
|
625
613
|
except AnthropicAPIConnectionError as e:
|
626
|
-
raise APIConnectionError(message=e.message, request=e.request)
|
614
|
+
raise APIConnectionError(message=e.message, request=e.request) from e
|
627
615
|
|
628
616
|
def generator():
|
629
617
|
result = {"content": "", "reasoning_content": "", "usage": {}, "tool_calls": [], "raw_content": []}
|
@@ -752,9 +740,9 @@ class AnthropicChatClient(BaseChatClient):
|
|
752
740
|
thinking=thinking,
|
753
741
|
)
|
754
742
|
except AnthropicAPIStatusError as e:
|
755
|
-
raise APIStatusError(message=e.message, response=e.response, body=e.body)
|
743
|
+
raise APIStatusError(message=e.message, response=e.response, body=e.body) from e
|
756
744
|
except AnthropicAPIConnectionError as e:
|
757
|
-
raise APIConnectionError(message=e.message, request=e.request)
|
745
|
+
raise APIConnectionError(message=e.message, request=e.request) from e
|
758
746
|
|
759
747
|
result = {
|
760
748
|
"content": "",
|
@@ -897,31 +885,31 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
|
|
897
885
|
model: str | None = None,
|
898
886
|
stream: Literal[False] = False,
|
899
887
|
temperature: float | None | NotGiven = NOT_GIVEN,
|
900
|
-
max_tokens: int | None = None,
|
888
|
+
max_tokens: int | None | NotGiven = None,
|
901
889
|
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
|
902
890
|
tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
|
903
891
|
response_format: ResponseFormat | NotGiven = NOT_GIVEN,
|
904
892
|
stream_options: ChatCompletionStreamOptionsParam | None = None,
|
905
893
|
top_p: float | NotGiven | None = NOT_GIVEN,
|
906
894
|
skip_cutoff: bool = False,
|
907
|
-
audio:
|
908
|
-
frequency_penalty:
|
909
|
-
logit_bias:
|
910
|
-
logprobs:
|
911
|
-
max_completion_tokens:
|
912
|
-
metadata:
|
913
|
-
modalities:
|
914
|
-
n:
|
895
|
+
audio: ChatCompletionAudioParam | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
896
|
+
frequency_penalty: float | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
897
|
+
logit_bias: dict[str, int] | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
898
|
+
logprobs: bool | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
899
|
+
max_completion_tokens: int | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
900
|
+
metadata: Metadata | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
901
|
+
modalities: list[ChatCompletionModality] | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
902
|
+
n: int | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
915
903
|
parallel_tool_calls: bool | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
916
|
-
prediction:
|
917
|
-
presence_penalty:
|
918
|
-
reasoning_effort:
|
904
|
+
prediction: ChatCompletionPredictionContentParam | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
905
|
+
presence_penalty: float | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
906
|
+
reasoning_effort: ChatCompletionReasoningEffort | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
919
907
|
thinking: ThinkingConfigParam | None | NotGiven = NOT_GIVEN,
|
920
|
-
seed:
|
921
|
-
service_tier:
|
922
|
-
stop:
|
923
|
-
store:
|
924
|
-
top_logprobs:
|
908
|
+
seed: int | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
909
|
+
service_tier: Literal["auto", "default"] | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
910
|
+
stop: str | list[str] | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
911
|
+
store: bool | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
912
|
+
top_logprobs: int | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
925
913
|
user: str | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
926
914
|
extra_headers: Headers | None = None,
|
927
915
|
extra_query: Query | None = None,
|
@@ -938,31 +926,31 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
|
|
938
926
|
model: str | None = None,
|
939
927
|
stream: Literal[True],
|
940
928
|
temperature: float | None | NotGiven = NOT_GIVEN,
|
941
|
-
max_tokens: int | None = None,
|
929
|
+
max_tokens: int | None | NotGiven = None,
|
942
930
|
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
|
943
931
|
tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
|
944
932
|
response_format: ResponseFormat | NotGiven = NOT_GIVEN,
|
945
933
|
stream_options: ChatCompletionStreamOptionsParam | None = None,
|
946
934
|
top_p: float | NotGiven | None = NOT_GIVEN,
|
947
935
|
skip_cutoff: bool = False,
|
948
|
-
audio:
|
949
|
-
frequency_penalty:
|
950
|
-
logit_bias:
|
951
|
-
logprobs:
|
952
|
-
max_completion_tokens:
|
953
|
-
metadata:
|
954
|
-
modalities:
|
955
|
-
n:
|
936
|
+
audio: ChatCompletionAudioParam | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
937
|
+
frequency_penalty: float | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
938
|
+
logit_bias: dict[str, int] | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
939
|
+
logprobs: bool | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
940
|
+
max_completion_tokens: int | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
941
|
+
metadata: Metadata | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
942
|
+
modalities: list[ChatCompletionModality] | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
943
|
+
n: int | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
956
944
|
parallel_tool_calls: bool | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
957
|
-
prediction:
|
958
|
-
presence_penalty:
|
959
|
-
reasoning_effort:
|
945
|
+
prediction: ChatCompletionPredictionContentParam | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
946
|
+
presence_penalty: float | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
947
|
+
reasoning_effort: ChatCompletionReasoningEffort | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
960
948
|
thinking: ThinkingConfigParam | None | NotGiven = NOT_GIVEN,
|
961
|
-
seed:
|
962
|
-
service_tier:
|
963
|
-
stop:
|
964
|
-
store:
|
965
|
-
top_logprobs:
|
949
|
+
seed: int | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
950
|
+
service_tier: Literal["auto", "default"] | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
951
|
+
stop: str | list[str] | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
952
|
+
store: bool | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
953
|
+
top_logprobs: int | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
966
954
|
user: str | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
967
955
|
extra_headers: Headers | None = None,
|
968
956
|
extra_query: Query | None = None,
|
@@ -979,31 +967,31 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
|
|
979
967
|
model: str | None = None,
|
980
968
|
stream: bool,
|
981
969
|
temperature: float | None | NotGiven = NOT_GIVEN,
|
982
|
-
max_tokens: int | None = None,
|
970
|
+
max_tokens: int | None | NotGiven = None,
|
983
971
|
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
|
984
972
|
tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
|
985
973
|
response_format: ResponseFormat | NotGiven = NOT_GIVEN,
|
986
974
|
stream_options: ChatCompletionStreamOptionsParam | None = None,
|
987
975
|
top_p: float | NotGiven | None = NOT_GIVEN,
|
988
976
|
skip_cutoff: bool = False,
|
989
|
-
audio:
|
990
|
-
frequency_penalty:
|
991
|
-
logit_bias:
|
992
|
-
logprobs:
|
993
|
-
max_completion_tokens:
|
994
|
-
metadata:
|
995
|
-
modalities:
|
996
|
-
n:
|
977
|
+
audio: ChatCompletionAudioParam | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
978
|
+
frequency_penalty: float | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
979
|
+
logit_bias: dict[str, int] | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
980
|
+
logprobs: bool | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
981
|
+
max_completion_tokens: int | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
982
|
+
metadata: Metadata | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
983
|
+
modalities: list[ChatCompletionModality] | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
984
|
+
n: int | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
997
985
|
parallel_tool_calls: bool | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
998
|
-
prediction:
|
999
|
-
presence_penalty:
|
1000
|
-
reasoning_effort:
|
986
|
+
prediction: ChatCompletionPredictionContentParam | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
987
|
+
presence_penalty: float | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
988
|
+
reasoning_effort: ChatCompletionReasoningEffort | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
1001
989
|
thinking: ThinkingConfigParam | None | NotGiven = NOT_GIVEN,
|
1002
|
-
seed:
|
1003
|
-
service_tier:
|
1004
|
-
stop:
|
1005
|
-
store:
|
1006
|
-
top_logprobs:
|
990
|
+
seed: int | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
991
|
+
service_tier: Literal["auto", "default"] | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
992
|
+
stop: str | list[str] | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
993
|
+
store: bool | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
994
|
+
top_logprobs: int | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
1007
995
|
user: str | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
1008
996
|
extra_headers: Headers | None = None,
|
1009
997
|
extra_query: Query | None = None,
|
@@ -1019,31 +1007,31 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
|
|
1019
1007
|
model: str | None = None,
|
1020
1008
|
stream: Literal[False] | Literal[True] = False,
|
1021
1009
|
temperature: float | None | NotGiven = NOT_GIVEN,
|
1022
|
-
max_tokens: int | None = None,
|
1010
|
+
max_tokens: int | None | NotGiven = None,
|
1023
1011
|
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
|
1024
1012
|
tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
|
1025
1013
|
response_format: ResponseFormat | NotGiven = NOT_GIVEN,
|
1026
1014
|
stream_options: ChatCompletionStreamOptionsParam | None = None,
|
1027
1015
|
top_p: float | NotGiven | None = NOT_GIVEN,
|
1028
1016
|
skip_cutoff: bool = False,
|
1029
|
-
audio:
|
1030
|
-
frequency_penalty:
|
1031
|
-
logit_bias:
|
1032
|
-
logprobs:
|
1033
|
-
max_completion_tokens:
|
1034
|
-
metadata:
|
1035
|
-
modalities:
|
1036
|
-
n:
|
1017
|
+
audio: ChatCompletionAudioParam | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
1018
|
+
frequency_penalty: float | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
1019
|
+
logit_bias: dict[str, int] | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
1020
|
+
logprobs: bool | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
1021
|
+
max_completion_tokens: int | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
1022
|
+
metadata: Metadata | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
1023
|
+
modalities: list[ChatCompletionModality] | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
1024
|
+
n: int | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
1037
1025
|
parallel_tool_calls: bool | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
1038
|
-
prediction:
|
1039
|
-
presence_penalty:
|
1040
|
-
reasoning_effort:
|
1026
|
+
prediction: ChatCompletionPredictionContentParam | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
1027
|
+
presence_penalty: float | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
1028
|
+
reasoning_effort: ChatCompletionReasoningEffort | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
1041
1029
|
thinking: ThinkingConfigParam | None | NotGiven = NOT_GIVEN,
|
1042
|
-
seed:
|
1043
|
-
service_tier:
|
1044
|
-
stop:
|
1045
|
-
store:
|
1046
|
-
top_logprobs:
|
1030
|
+
seed: int | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
1031
|
+
service_tier: Literal["auto", "default"] | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
1032
|
+
stop: str | list[str] | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
1033
|
+
store: bool | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
1034
|
+
top_logprobs: int | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
|
1047
1035
|
user: str | OpenAINotGiven = OPENAI_NOT_GIVEN,
|
1048
1036
|
extra_headers: Headers | None = None,
|
1049
1037
|
extra_query: Query | None = None,
|
@@ -1202,7 +1190,7 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
|
|
1202
1190
|
if tool_choice:
|
1203
1191
|
tool_choice_param = refactor_tool_choice(tool_choice)
|
1204
1192
|
|
1205
|
-
if max_tokens
|
1193
|
+
if not max_tokens:
|
1206
1194
|
max_output_tokens = self.model_setting.max_output_tokens
|
1207
1195
|
native_multimodal = self.model_setting.native_multimodal
|
1208
1196
|
token_counts = get_message_token_counts(messages=messages, tools=tools, model=self.model, native_multimodal=native_multimodal)
|
@@ -1229,9 +1217,9 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
|
|
1229
1217
|
thinking=thinking,
|
1230
1218
|
)
|
1231
1219
|
except AnthropicAPIStatusError as e:
|
1232
|
-
raise APIStatusError(message=e.message, response=e.response, body=e.body)
|
1220
|
+
raise APIStatusError(message=e.message, response=e.response, body=e.body) from e
|
1233
1221
|
except AnthropicAPIConnectionError as e:
|
1234
|
-
raise APIConnectionError(message=e.message, request=e.request)
|
1222
|
+
raise APIConnectionError(message=e.message, request=e.request) from e
|
1235
1223
|
|
1236
1224
|
async def generator():
|
1237
1225
|
result = {"content": "", "reasoning_content": "", "usage": {}, "tool_calls": [], "raw_content": []}
|
@@ -1360,9 +1348,9 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
|
|
1360
1348
|
thinking=thinking,
|
1361
1349
|
)
|
1362
1350
|
except AnthropicAPIStatusError as e:
|
1363
|
-
raise APIStatusError(message=e.message, response=e.response, body=e.body)
|
1351
|
+
raise APIStatusError(message=e.message, response=e.response, body=e.body) from e
|
1364
1352
|
except AnthropicAPIConnectionError as e:
|
1365
|
-
raise APIConnectionError(message=e.message, request=e.request)
|
1353
|
+
raise APIConnectionError(message=e.message, request=e.request) from e
|
1366
1354
|
|
1367
1355
|
result = {
|
1368
1356
|
"content": "",
|