vectorvein 0.3.1__py3-none-any.whl → 0.3.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. vectorvein/api/client.py +81 -103
  2. vectorvein/api/exceptions.py +1 -3
  3. vectorvein/api/models.py +11 -11
  4. vectorvein/chat_clients/anthropic_client.py +157 -169
  5. vectorvein/chat_clients/base_client.py +257 -198
  6. vectorvein/chat_clients/openai_compatible_client.py +150 -161
  7. vectorvein/chat_clients/utils.py +44 -24
  8. vectorvein/server/token_server.py +1 -1
  9. vectorvein/settings/__init__.py +27 -27
  10. vectorvein/types/defaults.py +32 -16
  11. vectorvein/types/llm_parameters.py +40 -34
  12. vectorvein/types/settings.py +10 -10
  13. vectorvein/utilities/media_processing.py +1 -1
  14. vectorvein/utilities/rate_limiter.py +5 -6
  15. vectorvein/utilities/retry.py +6 -5
  16. vectorvein/workflow/graph/edge.py +3 -3
  17. vectorvein/workflow/graph/node.py +14 -26
  18. vectorvein/workflow/graph/port.py +40 -39
  19. vectorvein/workflow/graph/workflow.py +13 -25
  20. vectorvein/workflow/nodes/audio_generation.py +5 -7
  21. vectorvein/workflow/nodes/control_flows.py +7 -9
  22. vectorvein/workflow/nodes/file_processing.py +4 -6
  23. vectorvein/workflow/nodes/image_generation.py +20 -22
  24. vectorvein/workflow/nodes/llms.py +13 -15
  25. vectorvein/workflow/nodes/media_editing.py +26 -40
  26. vectorvein/workflow/nodes/media_processing.py +19 -21
  27. vectorvein/workflow/nodes/output.py +10 -12
  28. vectorvein/workflow/nodes/relational_db.py +3 -5
  29. vectorvein/workflow/nodes/text_processing.py +8 -10
  30. vectorvein/workflow/nodes/tools.py +8 -10
  31. vectorvein/workflow/nodes/triggers.py +1 -3
  32. vectorvein/workflow/nodes/vector_db.py +3 -5
  33. vectorvein/workflow/nodes/video_generation.py +4 -6
  34. vectorvein/workflow/nodes/web_crawlers.py +4 -6
  35. vectorvein/workflow/utils/analyse.py +5 -13
  36. vectorvein/workflow/utils/check.py +6 -16
  37. vectorvein/workflow/utils/json_to_code.py +6 -14
  38. vectorvein/workflow/utils/layout.py +3 -5
  39. {vectorvein-0.3.1.dist-info → vectorvein-0.3.3.dist-info}/METADATA +1 -1
  40. vectorvein-0.3.3.dist-info/RECORD +68 -0
  41. {vectorvein-0.3.1.dist-info → vectorvein-0.3.3.dist-info}/WHEEL +1 -1
  42. vectorvein-0.3.1.dist-info/RECORD +0 -68
  43. {vectorvein-0.3.1.dist-info → vectorvein-0.3.3.dist-info}/entry_points.txt +0 -0
@@ -1,19 +1,8 @@
1
1
  # @Author: Bi Ying
2
2
  # @Date: 2024-07-26 14:48:55
3
3
  import json
4
- from typing import (
5
- Any,
6
- Dict,
7
- List,
8
- TYPE_CHECKING,
9
- overload,
10
- Generator,
11
- AsyncGenerator,
12
- Union,
13
- Literal,
14
- Iterable,
15
- Optional,
16
- )
4
+ from collections.abc import Generator, AsyncGenerator, Iterable
5
+ from typing import Any, TYPE_CHECKING, overload, Literal
17
6
 
18
7
  import httpx
19
8
  from openai._types import NotGiven as OpenAINotGiven
@@ -154,9 +143,9 @@ def refactor_into_openai_messages(messages: Iterable[MessageParam]):
154
143
  elif isinstance(content, list):
155
144
  _content = []
156
145
  for item in content:
157
- if isinstance(item, (TextBlock, ToolUseBlock)):
146
+ if isinstance(item, TextBlock | ToolUseBlock):
158
147
  _content.append(item.model_dump())
159
- elif isinstance(item, (ThinkingBlock, RedactedThinkingBlock)):
148
+ elif isinstance(item, ThinkingBlock | RedactedThinkingBlock):
160
149
  continue
161
150
  elif item.get("type") == "image":
162
151
  image_data = item.get("source", {}).get("data", "")
@@ -290,32 +279,32 @@ class AnthropicChatClient(BaseChatClient):
290
279
  messages: list,
291
280
  model: str | None = None,
292
281
  stream: Literal[False] = False,
293
- temperature: float | None | NotGiven = NOT_GIVEN,
294
- max_tokens: int | None = None,
295
- tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
296
- tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
297
- response_format: ResponseFormat | NotGiven = NOT_GIVEN,
282
+ temperature: float | None | NotGiven = OPENAI_NOT_GIVEN,
283
+ max_tokens: int | None | NotGiven = None,
284
+ tools: Iterable[ToolParam] | NotGiven = OPENAI_NOT_GIVEN,
285
+ tool_choice: ToolChoice | NotGiven = OPENAI_NOT_GIVEN,
286
+ response_format: ResponseFormat | NotGiven = OPENAI_NOT_GIVEN,
298
287
  stream_options: ChatCompletionStreamOptionsParam | None = None,
299
- top_p: float | NotGiven | None = NOT_GIVEN,
288
+ top_p: float | NotGiven | None = OPENAI_NOT_GIVEN,
300
289
  skip_cutoff: bool = False,
301
- audio: Optional[ChatCompletionAudioParam] | OpenAINotGiven = OPENAI_NOT_GIVEN,
302
- frequency_penalty: Optional[float] | OpenAINotGiven = OPENAI_NOT_GIVEN,
303
- logit_bias: Optional[Dict[str, int]] | OpenAINotGiven = OPENAI_NOT_GIVEN,
304
- logprobs: Optional[bool] | OpenAINotGiven = OPENAI_NOT_GIVEN,
305
- max_completion_tokens: Optional[int] | OpenAINotGiven = OPENAI_NOT_GIVEN,
306
- metadata: Optional[Metadata] | OpenAINotGiven = OPENAI_NOT_GIVEN,
307
- modalities: Optional[List[ChatCompletionModality]] | OpenAINotGiven = OPENAI_NOT_GIVEN,
308
- n: Optional[int] | OpenAINotGiven = OPENAI_NOT_GIVEN,
290
+ audio: ChatCompletionAudioParam | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
291
+ frequency_penalty: float | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
292
+ logit_bias: dict[str, int] | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
293
+ logprobs: bool | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
294
+ max_completion_tokens: int | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
295
+ metadata: Metadata | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
296
+ modalities: list[ChatCompletionModality] | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
297
+ n: int | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
309
298
  parallel_tool_calls: bool | OpenAINotGiven = OPENAI_NOT_GIVEN,
310
- prediction: Optional[ChatCompletionPredictionContentParam] | OpenAINotGiven = OPENAI_NOT_GIVEN,
311
- presence_penalty: Optional[float] | OpenAINotGiven = OPENAI_NOT_GIVEN,
312
- reasoning_effort: Optional[ChatCompletionReasoningEffort] | OpenAINotGiven = OPENAI_NOT_GIVEN,
313
- thinking: ThinkingConfigParam | None | NotGiven = NOT_GIVEN,
314
- seed: Optional[int] | OpenAINotGiven = OPENAI_NOT_GIVEN,
315
- service_tier: Optional[Literal["auto", "default"]] | OpenAINotGiven = OPENAI_NOT_GIVEN,
316
- stop: Union[Optional[str], List[str]] | OpenAINotGiven = OPENAI_NOT_GIVEN,
317
- store: Optional[bool] | OpenAINotGiven = OPENAI_NOT_GIVEN,
318
- top_logprobs: Optional[int] | OpenAINotGiven = OPENAI_NOT_GIVEN,
299
+ prediction: ChatCompletionPredictionContentParam | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
300
+ presence_penalty: float | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
301
+ reasoning_effort: ChatCompletionReasoningEffort | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
302
+ thinking: ThinkingConfigParam | None | NotGiven = OPENAI_NOT_GIVEN,
303
+ seed: int | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
304
+ service_tier: Literal["auto", "default"] | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
305
+ stop: str | list[str] | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
306
+ store: bool | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
307
+ top_logprobs: int | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
319
308
  user: str | OpenAINotGiven = OPENAI_NOT_GIVEN,
320
309
  extra_headers: Headers | None = None,
321
310
  extra_query: Query | None = None,
@@ -332,31 +321,31 @@ class AnthropicChatClient(BaseChatClient):
332
321
  model: str | None = None,
333
322
  stream: Literal[True],
334
323
  temperature: float | None | NotGiven = NOT_GIVEN,
335
- max_tokens: int | None = None,
324
+ max_tokens: int | None | NotGiven = None,
336
325
  tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
337
326
  tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
338
327
  response_format: ResponseFormat | NotGiven = NOT_GIVEN,
339
328
  stream_options: ChatCompletionStreamOptionsParam | None = None,
340
329
  top_p: float | NotGiven | None = NOT_GIVEN,
341
330
  skip_cutoff: bool = False,
342
- audio: Optional[ChatCompletionAudioParam] | OpenAINotGiven = OPENAI_NOT_GIVEN,
343
- frequency_penalty: Optional[float] | OpenAINotGiven = OPENAI_NOT_GIVEN,
344
- logit_bias: Optional[Dict[str, int]] | OpenAINotGiven = OPENAI_NOT_GIVEN,
345
- logprobs: Optional[bool] | OpenAINotGiven = OPENAI_NOT_GIVEN,
346
- max_completion_tokens: Optional[int] | OpenAINotGiven = OPENAI_NOT_GIVEN,
347
- metadata: Optional[Metadata] | OpenAINotGiven = OPENAI_NOT_GIVEN,
348
- modalities: Optional[List[ChatCompletionModality]] | OpenAINotGiven = OPENAI_NOT_GIVEN,
349
- n: Optional[int] | OpenAINotGiven = OPENAI_NOT_GIVEN,
331
+ audio: ChatCompletionAudioParam | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
332
+ frequency_penalty: float | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
333
+ logit_bias: dict[str, int] | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
334
+ logprobs: bool | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
335
+ max_completion_tokens: int | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
336
+ metadata: Metadata | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
337
+ modalities: list[ChatCompletionModality] | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
338
+ n: int | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
350
339
  parallel_tool_calls: bool | OpenAINotGiven = OPENAI_NOT_GIVEN,
351
- prediction: Optional[ChatCompletionPredictionContentParam] | OpenAINotGiven = OPENAI_NOT_GIVEN,
352
- presence_penalty: Optional[float] | OpenAINotGiven = OPENAI_NOT_GIVEN,
353
- reasoning_effort: Optional[ChatCompletionReasoningEffort] | OpenAINotGiven = OPENAI_NOT_GIVEN,
340
+ prediction: ChatCompletionPredictionContentParam | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
341
+ presence_penalty: float | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
342
+ reasoning_effort: ChatCompletionReasoningEffort | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
354
343
  thinking: ThinkingConfigParam | None | NotGiven = NOT_GIVEN,
355
- seed: Optional[int] | OpenAINotGiven = OPENAI_NOT_GIVEN,
356
- service_tier: Optional[Literal["auto", "default"]] | OpenAINotGiven = OPENAI_NOT_GIVEN,
357
- stop: Union[Optional[str], List[str]] | OpenAINotGiven = OPENAI_NOT_GIVEN,
358
- store: Optional[bool] | OpenAINotGiven = OPENAI_NOT_GIVEN,
359
- top_logprobs: Optional[int] | OpenAINotGiven = OPENAI_NOT_GIVEN,
344
+ seed: int | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
345
+ service_tier: Literal["auto", "default"] | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
346
+ stop: str | list[str] | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
347
+ store: bool | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
348
+ top_logprobs: int | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
360
349
  user: str | OpenAINotGiven = OPENAI_NOT_GIVEN,
361
350
  extra_headers: Headers | None = None,
362
351
  extra_query: Query | None = None,
@@ -373,31 +362,31 @@ class AnthropicChatClient(BaseChatClient):
373
362
  model: str | None = None,
374
363
  stream: bool,
375
364
  temperature: float | None | NotGiven = NOT_GIVEN,
376
- max_tokens: int | None = None,
365
+ max_tokens: int | None | NotGiven = None,
377
366
  tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
378
367
  tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
379
368
  response_format: ResponseFormat | NotGiven = NOT_GIVEN,
380
369
  stream_options: ChatCompletionStreamOptionsParam | None = None,
381
370
  top_p: float | NotGiven | None = NOT_GIVEN,
382
371
  skip_cutoff: bool = False,
383
- audio: Optional[ChatCompletionAudioParam] | OpenAINotGiven = OPENAI_NOT_GIVEN,
384
- frequency_penalty: Optional[float] | OpenAINotGiven = OPENAI_NOT_GIVEN,
385
- logit_bias: Optional[Dict[str, int]] | OpenAINotGiven = OPENAI_NOT_GIVEN,
386
- logprobs: Optional[bool] | OpenAINotGiven = OPENAI_NOT_GIVEN,
387
- max_completion_tokens: Optional[int] | OpenAINotGiven = OPENAI_NOT_GIVEN,
388
- metadata: Optional[Metadata] | OpenAINotGiven = OPENAI_NOT_GIVEN,
389
- modalities: Optional[List[ChatCompletionModality]] | OpenAINotGiven = OPENAI_NOT_GIVEN,
390
- n: Optional[int] | OpenAINotGiven = OPENAI_NOT_GIVEN,
372
+ audio: ChatCompletionAudioParam | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
373
+ frequency_penalty: float | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
374
+ logit_bias: dict[str, int] | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
375
+ logprobs: bool | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
376
+ max_completion_tokens: int | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
377
+ metadata: Metadata | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
378
+ modalities: list[ChatCompletionModality] | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
379
+ n: int | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
391
380
  parallel_tool_calls: bool | OpenAINotGiven = OPENAI_NOT_GIVEN,
392
- prediction: Optional[ChatCompletionPredictionContentParam] | OpenAINotGiven = OPENAI_NOT_GIVEN,
393
- presence_penalty: Optional[float] | OpenAINotGiven = OPENAI_NOT_GIVEN,
394
- reasoning_effort: Optional[ChatCompletionReasoningEffort] | OpenAINotGiven = OPENAI_NOT_GIVEN,
381
+ prediction: ChatCompletionPredictionContentParam | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
382
+ presence_penalty: float | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
383
+ reasoning_effort: ChatCompletionReasoningEffort | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
395
384
  thinking: ThinkingConfigParam | None | NotGiven = NOT_GIVEN,
396
- seed: Optional[int] | OpenAINotGiven = OPENAI_NOT_GIVEN,
397
- service_tier: Optional[Literal["auto", "default"]] | OpenAINotGiven = OPENAI_NOT_GIVEN,
398
- stop: Union[Optional[str], List[str]] | OpenAINotGiven = OPENAI_NOT_GIVEN,
399
- store: Optional[bool] | OpenAINotGiven = OPENAI_NOT_GIVEN,
400
- top_logprobs: Optional[int] | OpenAINotGiven = OPENAI_NOT_GIVEN,
385
+ seed: int | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
386
+ service_tier: Literal["auto", "default"] | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
387
+ stop: str | list[str] | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
388
+ store: bool | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
389
+ top_logprobs: int | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
401
390
  user: str | OpenAINotGiven = OPENAI_NOT_GIVEN,
402
391
  extra_headers: Headers | None = None,
403
392
  extra_query: Query | None = None,
@@ -413,31 +402,31 @@ class AnthropicChatClient(BaseChatClient):
413
402
  model: str | None = None,
414
403
  stream: Literal[False] | Literal[True] = False,
415
404
  temperature: float | None | NotGiven = NOT_GIVEN,
416
- max_tokens: int | None = None,
405
+ max_tokens: int | None | NotGiven = None,
417
406
  tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
418
407
  tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
419
408
  response_format: ResponseFormat | NotGiven = NOT_GIVEN,
420
409
  stream_options: ChatCompletionStreamOptionsParam | None = None,
421
410
  top_p: float | NotGiven | None = NOT_GIVEN,
422
411
  skip_cutoff: bool = False,
423
- audio: Optional[ChatCompletionAudioParam] | OpenAINotGiven = OPENAI_NOT_GIVEN,
424
- frequency_penalty: Optional[float] | OpenAINotGiven = OPENAI_NOT_GIVEN,
425
- logit_bias: Optional[Dict[str, int]] | OpenAINotGiven = OPENAI_NOT_GIVEN,
426
- logprobs: Optional[bool] | OpenAINotGiven = OPENAI_NOT_GIVEN,
427
- max_completion_tokens: Optional[int] | OpenAINotGiven = OPENAI_NOT_GIVEN,
428
- metadata: Optional[Metadata] | OpenAINotGiven = OPENAI_NOT_GIVEN,
429
- modalities: Optional[List[ChatCompletionModality]] | OpenAINotGiven = OPENAI_NOT_GIVEN,
430
- n: Optional[int] | OpenAINotGiven = OPENAI_NOT_GIVEN,
412
+ audio: ChatCompletionAudioParam | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
413
+ frequency_penalty: float | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
414
+ logit_bias: dict[str, int] | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
415
+ logprobs: bool | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
416
+ max_completion_tokens: int | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
417
+ metadata: Metadata | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
418
+ modalities: list[ChatCompletionModality] | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
419
+ n: int | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
431
420
  parallel_tool_calls: bool | OpenAINotGiven = OPENAI_NOT_GIVEN,
432
- prediction: Optional[ChatCompletionPredictionContentParam] | OpenAINotGiven = OPENAI_NOT_GIVEN,
433
- presence_penalty: Optional[float] | OpenAINotGiven = OPENAI_NOT_GIVEN,
434
- reasoning_effort: Optional[ChatCompletionReasoningEffort] | OpenAINotGiven = OPENAI_NOT_GIVEN,
421
+ prediction: ChatCompletionPredictionContentParam | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
422
+ presence_penalty: float | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
423
+ reasoning_effort: ChatCompletionReasoningEffort | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
435
424
  thinking: ThinkingConfigParam | None | NotGiven = NOT_GIVEN,
436
- seed: Optional[int] | OpenAINotGiven = OPENAI_NOT_GIVEN,
437
- service_tier: Optional[Literal["auto", "default"]] | OpenAINotGiven = OPENAI_NOT_GIVEN,
438
- stop: Union[Optional[str], List[str]] | OpenAINotGiven = OPENAI_NOT_GIVEN,
439
- store: Optional[bool] | OpenAINotGiven = OPENAI_NOT_GIVEN,
440
- top_logprobs: Optional[int] | OpenAINotGiven = OPENAI_NOT_GIVEN,
425
+ seed: int | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
426
+ service_tier: Literal["auto", "default"] | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
427
+ stop: str | list[str] | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
428
+ store: bool | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
429
+ top_logprobs: int | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
441
430
  user: str | OpenAINotGiven = OPENAI_NOT_GIVEN,
442
431
  extra_headers: Headers | None = None,
443
432
  extra_query: Query | None = None,
@@ -510,8 +499,7 @@ class AnthropicChatClient(BaseChatClient):
510
499
  extra_body=extra_body,
511
500
  timeout=timeout,
512
501
  )
513
- for chunk in response:
514
- yield chunk
502
+ yield from response
515
503
 
516
504
  return _generator()
517
505
  else:
@@ -594,7 +582,7 @@ class AnthropicChatClient(BaseChatClient):
594
582
  if tool_choice:
595
583
  tool_choice_param = refactor_tool_choice(tool_choice)
596
584
 
597
- if max_tokens is None:
585
+ if not max_tokens:
598
586
  max_output_tokens = self.model_setting.max_output_tokens
599
587
  native_multimodal = self.model_setting.native_multimodal
600
588
  token_counts = get_message_token_counts(messages=messages, tools=tools, model=self.model, native_multimodal=native_multimodal)
@@ -621,9 +609,9 @@ class AnthropicChatClient(BaseChatClient):
621
609
  thinking=thinking,
622
610
  )
623
611
  except AnthropicAPIStatusError as e:
624
- raise APIStatusError(message=e.message, response=e.response, body=e.body)
612
+ raise APIStatusError(message=e.message, response=e.response, body=e.body) from e
625
613
  except AnthropicAPIConnectionError as e:
626
- raise APIConnectionError(message=e.message, request=e.request)
614
+ raise APIConnectionError(message=e.message, request=e.request) from e
627
615
 
628
616
  def generator():
629
617
  result = {"content": "", "reasoning_content": "", "usage": {}, "tool_calls": [], "raw_content": []}
@@ -752,9 +740,9 @@ class AnthropicChatClient(BaseChatClient):
752
740
  thinking=thinking,
753
741
  )
754
742
  except AnthropicAPIStatusError as e:
755
- raise APIStatusError(message=e.message, response=e.response, body=e.body)
743
+ raise APIStatusError(message=e.message, response=e.response, body=e.body) from e
756
744
  except AnthropicAPIConnectionError as e:
757
- raise APIConnectionError(message=e.message, request=e.request)
745
+ raise APIConnectionError(message=e.message, request=e.request) from e
758
746
 
759
747
  result = {
760
748
  "content": "",
@@ -897,31 +885,31 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
897
885
  model: str | None = None,
898
886
  stream: Literal[False] = False,
899
887
  temperature: float | None | NotGiven = NOT_GIVEN,
900
- max_tokens: int | None = None,
888
+ max_tokens: int | None | NotGiven = None,
901
889
  tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
902
890
  tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
903
891
  response_format: ResponseFormat | NotGiven = NOT_GIVEN,
904
892
  stream_options: ChatCompletionStreamOptionsParam | None = None,
905
893
  top_p: float | NotGiven | None = NOT_GIVEN,
906
894
  skip_cutoff: bool = False,
907
- audio: Optional[ChatCompletionAudioParam] | OpenAINotGiven = OPENAI_NOT_GIVEN,
908
- frequency_penalty: Optional[float] | OpenAINotGiven = OPENAI_NOT_GIVEN,
909
- logit_bias: Optional[Dict[str, int]] | OpenAINotGiven = OPENAI_NOT_GIVEN,
910
- logprobs: Optional[bool] | OpenAINotGiven = OPENAI_NOT_GIVEN,
911
- max_completion_tokens: Optional[int] | OpenAINotGiven = OPENAI_NOT_GIVEN,
912
- metadata: Optional[Metadata] | OpenAINotGiven = OPENAI_NOT_GIVEN,
913
- modalities: Optional[List[ChatCompletionModality]] | OpenAINotGiven = OPENAI_NOT_GIVEN,
914
- n: Optional[int] | OpenAINotGiven = OPENAI_NOT_GIVEN,
895
+ audio: ChatCompletionAudioParam | OpenAINotGiven = OPENAI_NOT_GIVEN,
896
+ frequency_penalty: float | OpenAINotGiven = OPENAI_NOT_GIVEN,
897
+ logit_bias: dict[str, int] | OpenAINotGiven = OPENAI_NOT_GIVEN,
898
+ logprobs: bool | OpenAINotGiven = OPENAI_NOT_GIVEN,
899
+ max_completion_tokens: int | OpenAINotGiven = OPENAI_NOT_GIVEN,
900
+ metadata: Metadata | OpenAINotGiven = OPENAI_NOT_GIVEN,
901
+ modalities: list[ChatCompletionModality] | OpenAINotGiven = OPENAI_NOT_GIVEN,
902
+ n: int | OpenAINotGiven = OPENAI_NOT_GIVEN,
915
903
  parallel_tool_calls: bool | OpenAINotGiven = OPENAI_NOT_GIVEN,
916
- prediction: Optional[ChatCompletionPredictionContentParam] | OpenAINotGiven = OPENAI_NOT_GIVEN,
917
- presence_penalty: Optional[float] | OpenAINotGiven = OPENAI_NOT_GIVEN,
918
- reasoning_effort: Optional[ChatCompletionReasoningEffort] | OpenAINotGiven = OPENAI_NOT_GIVEN,
904
+ prediction: ChatCompletionPredictionContentParam | OpenAINotGiven = OPENAI_NOT_GIVEN,
905
+ presence_penalty: float | OpenAINotGiven = OPENAI_NOT_GIVEN,
906
+ reasoning_effort: ChatCompletionReasoningEffort | OpenAINotGiven = OPENAI_NOT_GIVEN,
919
907
  thinking: ThinkingConfigParam | None | NotGiven = NOT_GIVEN,
920
- seed: Optional[int] | OpenAINotGiven = OPENAI_NOT_GIVEN,
921
- service_tier: Optional[Literal["auto", "default"]] | OpenAINotGiven = OPENAI_NOT_GIVEN,
922
- stop: Union[Optional[str], List[str]] | OpenAINotGiven = OPENAI_NOT_GIVEN,
923
- store: Optional[bool] | OpenAINotGiven = OPENAI_NOT_GIVEN,
924
- top_logprobs: Optional[int] | OpenAINotGiven = OPENAI_NOT_GIVEN,
908
+ seed: int | OpenAINotGiven = OPENAI_NOT_GIVEN,
909
+ service_tier: Literal["auto", "default"] | OpenAINotGiven = OPENAI_NOT_GIVEN,
910
+ stop: str | list[str] | OpenAINotGiven = OPENAI_NOT_GIVEN,
911
+ store: bool | OpenAINotGiven = OPENAI_NOT_GIVEN,
912
+ top_logprobs: int | OpenAINotGiven = OPENAI_NOT_GIVEN,
925
913
  user: str | OpenAINotGiven = OPENAI_NOT_GIVEN,
926
914
  extra_headers: Headers | None = None,
927
915
  extra_query: Query | None = None,
@@ -938,31 +926,31 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
938
926
  model: str | None = None,
939
927
  stream: Literal[True],
940
928
  temperature: float | None | NotGiven = NOT_GIVEN,
941
- max_tokens: int | None = None,
929
+ max_tokens: int | None | NotGiven = None,
942
930
  tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
943
931
  tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
944
932
  response_format: ResponseFormat | NotGiven = NOT_GIVEN,
945
933
  stream_options: ChatCompletionStreamOptionsParam | None = None,
946
934
  top_p: float | NotGiven | None = NOT_GIVEN,
947
935
  skip_cutoff: bool = False,
948
- audio: Optional[ChatCompletionAudioParam] | OpenAINotGiven = OPENAI_NOT_GIVEN,
949
- frequency_penalty: Optional[float] | OpenAINotGiven = OPENAI_NOT_GIVEN,
950
- logit_bias: Optional[Dict[str, int]] | OpenAINotGiven = OPENAI_NOT_GIVEN,
951
- logprobs: Optional[bool] | OpenAINotGiven = OPENAI_NOT_GIVEN,
952
- max_completion_tokens: Optional[int] | OpenAINotGiven = OPENAI_NOT_GIVEN,
953
- metadata: Optional[Metadata] | OpenAINotGiven = OPENAI_NOT_GIVEN,
954
- modalities: Optional[List[ChatCompletionModality]] | OpenAINotGiven = OPENAI_NOT_GIVEN,
955
- n: Optional[int] | OpenAINotGiven = OPENAI_NOT_GIVEN,
936
+ audio: ChatCompletionAudioParam | OpenAINotGiven = OPENAI_NOT_GIVEN,
937
+ frequency_penalty: float | OpenAINotGiven = OPENAI_NOT_GIVEN,
938
+ logit_bias: dict[str, int] | OpenAINotGiven = OPENAI_NOT_GIVEN,
939
+ logprobs: bool | OpenAINotGiven = OPENAI_NOT_GIVEN,
940
+ max_completion_tokens: int | OpenAINotGiven = OPENAI_NOT_GIVEN,
941
+ metadata: Metadata | OpenAINotGiven = OPENAI_NOT_GIVEN,
942
+ modalities: list[ChatCompletionModality] | OpenAINotGiven = OPENAI_NOT_GIVEN,
943
+ n: int | OpenAINotGiven = OPENAI_NOT_GIVEN,
956
944
  parallel_tool_calls: bool | OpenAINotGiven = OPENAI_NOT_GIVEN,
957
- prediction: Optional[ChatCompletionPredictionContentParam] | OpenAINotGiven = OPENAI_NOT_GIVEN,
958
- presence_penalty: Optional[float] | OpenAINotGiven = OPENAI_NOT_GIVEN,
959
- reasoning_effort: Optional[ChatCompletionReasoningEffort] | OpenAINotGiven = OPENAI_NOT_GIVEN,
945
+ prediction: ChatCompletionPredictionContentParam | OpenAINotGiven = OPENAI_NOT_GIVEN,
946
+ presence_penalty: float | OpenAINotGiven = OPENAI_NOT_GIVEN,
947
+ reasoning_effort: ChatCompletionReasoningEffort | OpenAINotGiven = OPENAI_NOT_GIVEN,
960
948
  thinking: ThinkingConfigParam | None | NotGiven = NOT_GIVEN,
961
- seed: Optional[int] | OpenAINotGiven = OPENAI_NOT_GIVEN,
962
- service_tier: Optional[Literal["auto", "default"]] | OpenAINotGiven = OPENAI_NOT_GIVEN,
963
- stop: Union[Optional[str], List[str]] | OpenAINotGiven = OPENAI_NOT_GIVEN,
964
- store: Optional[bool] | OpenAINotGiven = OPENAI_NOT_GIVEN,
965
- top_logprobs: Optional[int] | OpenAINotGiven = OPENAI_NOT_GIVEN,
949
+ seed: int | OpenAINotGiven = OPENAI_NOT_GIVEN,
950
+ service_tier: Literal["auto", "default"] | OpenAINotGiven = OPENAI_NOT_GIVEN,
951
+ stop: str | list[str] | OpenAINotGiven = OPENAI_NOT_GIVEN,
952
+ store: bool | OpenAINotGiven = OPENAI_NOT_GIVEN,
953
+ top_logprobs: int | OpenAINotGiven = OPENAI_NOT_GIVEN,
966
954
  user: str | OpenAINotGiven = OPENAI_NOT_GIVEN,
967
955
  extra_headers: Headers | None = None,
968
956
  extra_query: Query | None = None,
@@ -979,31 +967,31 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
979
967
  model: str | None = None,
980
968
  stream: bool,
981
969
  temperature: float | None | NotGiven = NOT_GIVEN,
982
- max_tokens: int | None = None,
970
+ max_tokens: int | None | NotGiven = None,
983
971
  tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
984
972
  tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
985
973
  response_format: ResponseFormat | NotGiven = NOT_GIVEN,
986
974
  stream_options: ChatCompletionStreamOptionsParam | None = None,
987
975
  top_p: float | NotGiven | None = NOT_GIVEN,
988
976
  skip_cutoff: bool = False,
989
- audio: Optional[ChatCompletionAudioParam] | OpenAINotGiven = OPENAI_NOT_GIVEN,
990
- frequency_penalty: Optional[float] | OpenAINotGiven = OPENAI_NOT_GIVEN,
991
- logit_bias: Optional[Dict[str, int]] | OpenAINotGiven = OPENAI_NOT_GIVEN,
992
- logprobs: Optional[bool] | OpenAINotGiven = OPENAI_NOT_GIVEN,
993
- max_completion_tokens: Optional[int] | OpenAINotGiven = OPENAI_NOT_GIVEN,
994
- metadata: Optional[Metadata] | OpenAINotGiven = OPENAI_NOT_GIVEN,
995
- modalities: Optional[List[ChatCompletionModality]] | OpenAINotGiven = OPENAI_NOT_GIVEN,
996
- n: Optional[int] | OpenAINotGiven = OPENAI_NOT_GIVEN,
977
+ audio: ChatCompletionAudioParam | OpenAINotGiven = OPENAI_NOT_GIVEN,
978
+ frequency_penalty: float | OpenAINotGiven = OPENAI_NOT_GIVEN,
979
+ logit_bias: dict[str, int] | OpenAINotGiven = OPENAI_NOT_GIVEN,
980
+ logprobs: bool | OpenAINotGiven = OPENAI_NOT_GIVEN,
981
+ max_completion_tokens: int | OpenAINotGiven = OPENAI_NOT_GIVEN,
982
+ metadata: Metadata | OpenAINotGiven = OPENAI_NOT_GIVEN,
983
+ modalities: list[ChatCompletionModality] | OpenAINotGiven = OPENAI_NOT_GIVEN,
984
+ n: int | OpenAINotGiven = OPENAI_NOT_GIVEN,
997
985
  parallel_tool_calls: bool | OpenAINotGiven = OPENAI_NOT_GIVEN,
998
- prediction: Optional[ChatCompletionPredictionContentParam] | OpenAINotGiven = OPENAI_NOT_GIVEN,
999
- presence_penalty: Optional[float] | OpenAINotGiven = OPENAI_NOT_GIVEN,
1000
- reasoning_effort: Optional[ChatCompletionReasoningEffort] | OpenAINotGiven = OPENAI_NOT_GIVEN,
986
+ prediction: ChatCompletionPredictionContentParam | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
987
+ presence_penalty: float | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
988
+ reasoning_effort: ChatCompletionReasoningEffort | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
1001
989
  thinking: ThinkingConfigParam | None | NotGiven = NOT_GIVEN,
1002
- seed: Optional[int] | OpenAINotGiven = OPENAI_NOT_GIVEN,
1003
- service_tier: Optional[Literal["auto", "default"]] | OpenAINotGiven = OPENAI_NOT_GIVEN,
1004
- stop: Union[Optional[str], List[str]] | OpenAINotGiven = OPENAI_NOT_GIVEN,
1005
- store: Optional[bool] | OpenAINotGiven = OPENAI_NOT_GIVEN,
1006
- top_logprobs: Optional[int] | OpenAINotGiven = OPENAI_NOT_GIVEN,
990
+ seed: int | OpenAINotGiven = OPENAI_NOT_GIVEN,
991
+ service_tier: Literal["auto", "default"] | OpenAINotGiven = OPENAI_NOT_GIVEN,
992
+ stop: str | list[str] | OpenAINotGiven = OPENAI_NOT_GIVEN,
993
+ store: bool | OpenAINotGiven = OPENAI_NOT_GIVEN,
994
+ top_logprobs: int | OpenAINotGiven = OPENAI_NOT_GIVEN,
1007
995
  user: str | OpenAINotGiven = OPENAI_NOT_GIVEN,
1008
996
  extra_headers: Headers | None = None,
1009
997
  extra_query: Query | None = None,
@@ -1019,31 +1007,31 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
1019
1007
  model: str | None = None,
1020
1008
  stream: Literal[False] | Literal[True] = False,
1021
1009
  temperature: float | None | NotGiven = NOT_GIVEN,
1022
- max_tokens: int | None = None,
1010
+ max_tokens: int | None | NotGiven = None,
1023
1011
  tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
1024
1012
  tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
1025
1013
  response_format: ResponseFormat | NotGiven = NOT_GIVEN,
1026
1014
  stream_options: ChatCompletionStreamOptionsParam | None = None,
1027
1015
  top_p: float | NotGiven | None = NOT_GIVEN,
1028
1016
  skip_cutoff: bool = False,
1029
- audio: Optional[ChatCompletionAudioParam] | OpenAINotGiven = OPENAI_NOT_GIVEN,
1030
- frequency_penalty: Optional[float] | OpenAINotGiven = OPENAI_NOT_GIVEN,
1031
- logit_bias: Optional[Dict[str, int]] | OpenAINotGiven = OPENAI_NOT_GIVEN,
1032
- logprobs: Optional[bool] | OpenAINotGiven = OPENAI_NOT_GIVEN,
1033
- max_completion_tokens: Optional[int] | OpenAINotGiven = OPENAI_NOT_GIVEN,
1034
- metadata: Optional[Metadata] | OpenAINotGiven = OPENAI_NOT_GIVEN,
1035
- modalities: Optional[List[ChatCompletionModality]] | OpenAINotGiven = OPENAI_NOT_GIVEN,
1036
- n: Optional[int] | OpenAINotGiven = OPENAI_NOT_GIVEN,
1017
+ audio: ChatCompletionAudioParam | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
1018
+ frequency_penalty: float | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
1019
+ logit_bias: dict[str, int] | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
1020
+ logprobs: bool | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
1021
+ max_completion_tokens: int | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
1022
+ metadata: Metadata | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
1023
+ modalities: list[ChatCompletionModality] | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
1024
+ n: int | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
1037
1025
  parallel_tool_calls: bool | OpenAINotGiven = OPENAI_NOT_GIVEN,
1038
- prediction: Optional[ChatCompletionPredictionContentParam] | OpenAINotGiven = OPENAI_NOT_GIVEN,
1039
- presence_penalty: Optional[float] | OpenAINotGiven = OPENAI_NOT_GIVEN,
1040
- reasoning_effort: Optional[ChatCompletionReasoningEffort] | OpenAINotGiven = OPENAI_NOT_GIVEN,
1026
+ prediction: ChatCompletionPredictionContentParam | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
1027
+ presence_penalty: float | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
1028
+ reasoning_effort: ChatCompletionReasoningEffort | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
1041
1029
  thinking: ThinkingConfigParam | None | NotGiven = NOT_GIVEN,
1042
- seed: Optional[int] | OpenAINotGiven = OPENAI_NOT_GIVEN,
1043
- service_tier: Optional[Literal["auto", "default"]] | OpenAINotGiven = OPENAI_NOT_GIVEN,
1044
- stop: Union[Optional[str], List[str]] | OpenAINotGiven = OPENAI_NOT_GIVEN,
1045
- store: Optional[bool] | OpenAINotGiven = OPENAI_NOT_GIVEN,
1046
- top_logprobs: Optional[int] | OpenAINotGiven = OPENAI_NOT_GIVEN,
1030
+ seed: int | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
1031
+ service_tier: Literal["auto", "default"] | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
1032
+ stop: str | list[str] | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
1033
+ store: bool | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
1034
+ top_logprobs: int | OpenAINotGiven | None = OPENAI_NOT_GIVEN,
1047
1035
  user: str | OpenAINotGiven = OPENAI_NOT_GIVEN,
1048
1036
  extra_headers: Headers | None = None,
1049
1037
  extra_query: Query | None = None,
@@ -1202,7 +1190,7 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
1202
1190
  if tool_choice:
1203
1191
  tool_choice_param = refactor_tool_choice(tool_choice)
1204
1192
 
1205
- if max_tokens is None:
1193
+ if not max_tokens:
1206
1194
  max_output_tokens = self.model_setting.max_output_tokens
1207
1195
  native_multimodal = self.model_setting.native_multimodal
1208
1196
  token_counts = get_message_token_counts(messages=messages, tools=tools, model=self.model, native_multimodal=native_multimodal)
@@ -1229,9 +1217,9 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
1229
1217
  thinking=thinking,
1230
1218
  )
1231
1219
  except AnthropicAPIStatusError as e:
1232
- raise APIStatusError(message=e.message, response=e.response, body=e.body)
1220
+ raise APIStatusError(message=e.message, response=e.response, body=e.body) from e
1233
1221
  except AnthropicAPIConnectionError as e:
1234
- raise APIConnectionError(message=e.message, request=e.request)
1222
+ raise APIConnectionError(message=e.message, request=e.request) from e
1235
1223
 
1236
1224
  async def generator():
1237
1225
  result = {"content": "", "reasoning_content": "", "usage": {}, "tool_calls": [], "raw_content": []}
@@ -1360,9 +1348,9 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
1360
1348
  thinking=thinking,
1361
1349
  )
1362
1350
  except AnthropicAPIStatusError as e:
1363
- raise APIStatusError(message=e.message, response=e.response, body=e.body)
1351
+ raise APIStatusError(message=e.message, response=e.response, body=e.body) from e
1364
1352
  except AnthropicAPIConnectionError as e:
1365
- raise APIConnectionError(message=e.message, request=e.request)
1353
+ raise APIConnectionError(message=e.message, request=e.request) from e
1366
1354
 
1367
1355
  result = {
1368
1356
  "content": "",