vectorvein 0.2.5__tar.gz → 0.2.7__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. {vectorvein-0.2.5 → vectorvein-0.2.7}/PKG-INFO +1 -1
  2. {vectorvein-0.2.5 → vectorvein-0.2.7}/pyproject.toml +1 -1
  3. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/chat_clients/anthropic_client.py +28 -2
  4. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/types/defaults.py +16 -0
  5. {vectorvein-0.2.5 → vectorvein-0.2.7}/README.md +0 -0
  6. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/__init__.py +0 -0
  7. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/api/__init__.py +0 -0
  8. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/api/client.py +0 -0
  9. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/api/exceptions.py +0 -0
  10. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/api/models.py +0 -0
  11. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/chat_clients/__init__.py +0 -0
  12. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/chat_clients/baichuan_client.py +0 -0
  13. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/chat_clients/base_client.py +0 -0
  14. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/chat_clients/deepseek_client.py +0 -0
  15. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/chat_clients/gemini_client.py +0 -0
  16. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/chat_clients/groq_client.py +0 -0
  17. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/chat_clients/local_client.py +0 -0
  18. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/chat_clients/minimax_client.py +0 -0
  19. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/chat_clients/mistral_client.py +0 -0
  20. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/chat_clients/moonshot_client.py +0 -0
  21. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/chat_clients/openai_client.py +0 -0
  22. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/chat_clients/openai_compatible_client.py +0 -0
  23. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/chat_clients/py.typed +0 -0
  24. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/chat_clients/qwen_client.py +0 -0
  25. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/chat_clients/stepfun_client.py +0 -0
  26. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/chat_clients/utils.py +0 -0
  27. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/chat_clients/xai_client.py +0 -0
  28. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/chat_clients/yi_client.py +0 -0
  29. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/chat_clients/zhipuai_client.py +0 -0
  30. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/py.typed +0 -0
  31. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/server/token_server.py +0 -0
  32. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/settings/__init__.py +0 -0
  33. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/settings/py.typed +0 -0
  34. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/types/enums.py +0 -0
  35. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/types/exception.py +0 -0
  36. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/types/llm_parameters.py +0 -0
  37. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/types/py.typed +0 -0
  38. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/utilities/media_processing.py +0 -0
  39. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/utilities/rate_limiter.py +0 -0
  40. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/utilities/retry.py +0 -0
  41. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/workflow/graph/edge.py +0 -0
  42. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/workflow/graph/node.py +0 -0
  43. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/workflow/graph/port.py +0 -0
  44. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/workflow/graph/workflow.py +0 -0
  45. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/workflow/nodes/__init__.py +0 -0
  46. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/workflow/nodes/audio_generation.py +0 -0
  47. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/workflow/nodes/control_flows.py +0 -0
  48. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/workflow/nodes/file_processing.py +0 -0
  49. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/workflow/nodes/image_generation.py +0 -0
  50. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/workflow/nodes/llms.py +0 -0
  51. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/workflow/nodes/media_editing.py +0 -0
  52. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/workflow/nodes/media_processing.py +0 -0
  53. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/workflow/nodes/output.py +0 -0
  54. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/workflow/nodes/relational_db.py +0 -0
  55. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/workflow/nodes/text_processing.py +0 -0
  56. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/workflow/nodes/tools.py +0 -0
  57. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/workflow/nodes/triggers.py +0 -0
  58. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/workflow/nodes/vector_db.py +0 -0
  59. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/workflow/nodes/video_generation.py +0 -0
  60. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/workflow/nodes/web_crawlers.py +0 -0
  61. {vectorvein-0.2.5 → vectorvein-0.2.7}/src/vectorvein/workflow/utils/json_to_code.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vectorvein
3
- Version: 0.2.5
3
+ Version: 0.2.7
4
4
  Summary: VectorVein python SDK
5
5
  Author-Email: Anderson <andersonby@163.com>
6
6
  License: MIT
@@ -18,7 +18,7 @@ description = "VectorVein python SDK"
18
18
  name = "vectorvein"
19
19
  readme = "README.md"
20
20
  requires-python = ">=3.10"
21
- version = "0.2.5"
21
+ version = "0.2.7"
22
22
 
23
23
  [project.license]
24
24
  text = "MIT"
@@ -17,6 +17,8 @@ from anthropic import (
17
17
  from anthropic._types import NOT_GIVEN
18
18
  from anthropic.types import (
19
19
  TextBlock,
20
+ ThinkingBlock,
21
+ RedactedThinkingBlock,
20
22
  MessageParam,
21
23
  ToolUseBlock,
22
24
  RawMessageDeltaEvent,
@@ -128,6 +130,8 @@ def refactor_into_openai_messages(messages: Iterable[MessageParam]):
128
130
  for item in content:
129
131
  if isinstance(item, (TextBlock, ToolUseBlock)):
130
132
  _content.append(item.model_dump())
133
+ elif isinstance(item, (ThinkingBlock, RedactedThinkingBlock)):
134
+ continue
131
135
  elif item.get("type") == "image":
132
136
  image_data = item.get("source", {}).get("data", "")
133
137
  media_type = item.get("source", {}).get("media_type", "")
@@ -338,6 +342,9 @@ class AnthropicChatClient(BaseChatClient):
338
342
 
339
343
  formatted_messages = refactor_into_openai_messages(messages)
340
344
 
345
+ if "thinking" in kwargs:
346
+ kwargs.pop("thinking") # TODO: 暂时没看到如何处理 openai 接口的 thinking 参数,如 openrouter 中使用
347
+
341
348
  if self.stream:
342
349
 
343
350
  def _generator():
@@ -451,7 +458,7 @@ class AnthropicChatClient(BaseChatClient):
451
458
  )
452
459
 
453
460
  def generator():
454
- result = {"content": "", "usage": {}, "tool_calls": []}
461
+ result = {"content": "", "reasoning_content": "", "usage": {}, "tool_calls": []}
455
462
  for chunk in stream_response:
456
463
  message = {"content": "", "tool_calls": []}
457
464
  if isinstance(chunk, RawMessageStartEvent):
@@ -472,11 +479,16 @@ class AnthropicChatClient(BaseChatClient):
472
479
  ]
473
480
  elif chunk.content_block.type == "text":
474
481
  message["content"] = chunk.content_block.text
482
+ elif chunk.content_block.type == "thinking":
483
+ message["reasoning_content"] = chunk.content_block.thinking
475
484
  yield ChatCompletionDeltaMessage(**message)
476
485
  elif isinstance(chunk, RawContentBlockDeltaEvent):
477
486
  if chunk.delta.type == "text_delta":
478
487
  message["content"] = chunk.delta.text
479
488
  result["content"] += chunk.delta.text
489
+ elif chunk.delta.type == "thinking_delta":
490
+ message["reasoning_content"] = chunk.delta.thinking
491
+ result["reasoning_content"] += chunk.delta.thinking
480
492
  elif chunk.delta.type == "input_json_delta":
481
493
  result["tool_calls"][0]["function"]["arguments"] += chunk.delta.partial_json
482
494
  message["tool_calls"] = [
@@ -521,6 +533,7 @@ class AnthropicChatClient(BaseChatClient):
521
533
 
522
534
  result = {
523
535
  "content": "",
536
+ "reasoning_content": "",
524
537
  "usage": {
525
538
  "prompt_tokens": response.usage.input_tokens,
526
539
  "completion_tokens": response.usage.output_tokens,
@@ -531,6 +544,8 @@ class AnthropicChatClient(BaseChatClient):
531
544
  for content_block in response.content:
532
545
  if isinstance(content_block, TextBlock):
533
546
  result["content"] += content_block.text
547
+ elif isinstance(content_block, ThinkingBlock):
548
+ result["reasoning_content"] = content_block.thinking
534
549
  elif isinstance(content_block, ToolUseBlock):
535
550
  tool_calls.append(content_block.model_dump())
536
551
 
@@ -728,6 +743,9 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
728
743
 
729
744
  formatted_messages = refactor_into_openai_messages(messages)
730
745
 
746
+ if "thinking" in kwargs:
747
+ kwargs.pop("thinking") # TODO: 暂时没看到如何处理 openai 接口的 thinking 参数,如 openrouter 中使用
748
+
731
749
  if self.stream:
732
750
 
733
751
  async def _generator():
@@ -843,7 +861,7 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
843
861
  )
844
862
 
845
863
  async def generator():
846
- result = {"content": "", "usage": {}, "tool_calls": []}
864
+ result = {"content": "", "reasoning_content": "", "usage": {}, "tool_calls": []}
847
865
  async for chunk in stream_response:
848
866
  message = {"content": "", "tool_calls": []}
849
867
  if isinstance(chunk, RawMessageStartEvent):
@@ -864,11 +882,16 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
864
882
  ]
865
883
  elif chunk.content_block.type == "text":
866
884
  message["content"] = chunk.content_block.text
885
+ elif chunk.content_block.type == "thinking":
886
+ message["reasoning_content"] = chunk.content_block.thinking
867
887
  yield ChatCompletionDeltaMessage(**message)
868
888
  elif isinstance(chunk, RawContentBlockDeltaEvent):
869
889
  if chunk.delta.type == "text_delta":
870
890
  message["content"] = chunk.delta.text
871
891
  result["content"] += chunk.delta.text
892
+ elif chunk.delta.type == "thinking_delta":
893
+ message["reasoning_content"] = chunk.delta.thinking
894
+ result["reasoning_content"] += chunk.delta.thinking
872
895
  elif chunk.delta.type == "input_json_delta":
873
896
  result["tool_calls"][0]["function"]["arguments"] += chunk.delta.partial_json
874
897
  message["tool_calls"] = [
@@ -913,6 +936,7 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
913
936
 
914
937
  result = {
915
938
  "content": "",
939
+ "reasoning_content": "",
916
940
  "usage": {
917
941
  "prompt_tokens": response.usage.input_tokens,
918
942
  "completion_tokens": response.usage.output_tokens,
@@ -923,6 +947,8 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
923
947
  for content_block in response.content:
924
948
  if isinstance(content_block, TextBlock):
925
949
  result["content"] += content_block.text
950
+ elif isinstance(content_block, ThinkingBlock):
951
+ result["reasoning_content"] = content_block.thinking
926
952
  elif isinstance(content_block, ToolUseBlock):
927
953
  tool_calls.append(content_block.model_dump())
928
954
 
@@ -541,6 +541,14 @@ OPENAI_MODELS: Final[Dict[str, Dict[str, Any]]] = {
541
541
  "response_format_available": False,
542
542
  "native_multimodal": False,
543
543
  },
544
+ "o3-mini": {
545
+ "id": "o3-mini",
546
+ "context_length": 200000,
547
+ "max_output_tokens": 100000,
548
+ "function_call_available": True,
549
+ "response_format_available": True,
550
+ "native_multimodal": False,
551
+ },
544
552
  }
545
553
 
546
554
  # Anthropic models
@@ -594,6 +602,14 @@ ANTHROPIC_MODELS: Final[Dict[str, Dict[str, Any]]] = {
594
602
  "response_format_available": False,
595
603
  "native_multimodal": True,
596
604
  },
605
+ "claude-3-7-sonnet-20250219": {
606
+ "id": "claude-3-7-sonnet-20250219",
607
+ "context_length": 128000,
608
+ "max_output_tokens": 8192,
609
+ "function_call_available": True,
610
+ "response_format_available": False,
611
+ "native_multimodal": True,
612
+ },
597
613
  }
598
614
 
599
615
  # Minimax models
File without changes