vectorvein 0.2.80__tar.gz → 0.2.82__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. {vectorvein-0.2.80 → vectorvein-0.2.82}/PKG-INFO +1 -1
  2. {vectorvein-0.2.80 → vectorvein-0.2.82}/pyproject.toml +1 -1
  3. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/chat_clients/openai_compatible_client.py +4 -4
  4. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/types/defaults.py +73 -1
  5. {vectorvein-0.2.80 → vectorvein-0.2.82}/README.md +0 -0
  6. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/__init__.py +0 -0
  7. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/api/__init__.py +0 -0
  8. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/api/client.py +0 -0
  9. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/api/exceptions.py +0 -0
  10. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/api/models.py +0 -0
  11. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/chat_clients/__init__.py +0 -0
  12. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/chat_clients/anthropic_client.py +0 -0
  13. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/chat_clients/baichuan_client.py +0 -0
  14. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/chat_clients/base_client.py +0 -0
  15. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/chat_clients/deepseek_client.py +0 -0
  16. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/chat_clients/ernie_client.py +0 -0
  17. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/chat_clients/gemini_client.py +0 -0
  18. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/chat_clients/groq_client.py +0 -0
  19. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/chat_clients/local_client.py +0 -0
  20. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/chat_clients/minimax_client.py +0 -0
  21. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/chat_clients/mistral_client.py +0 -0
  22. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/chat_clients/moonshot_client.py +0 -0
  23. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/chat_clients/openai_client.py +0 -0
  24. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/chat_clients/py.typed +0 -0
  25. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/chat_clients/qwen_client.py +0 -0
  26. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/chat_clients/stepfun_client.py +0 -0
  27. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/chat_clients/utils.py +0 -0
  28. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/chat_clients/xai_client.py +0 -0
  29. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/chat_clients/yi_client.py +0 -0
  30. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/chat_clients/zhipuai_client.py +0 -0
  31. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/py.typed +0 -0
  32. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/server/token_server.py +0 -0
  33. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/settings/__init__.py +0 -0
  34. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/settings/py.typed +0 -0
  35. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/types/__init__.py +0 -0
  36. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/types/enums.py +0 -0
  37. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/types/exception.py +0 -0
  38. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/types/llm_parameters.py +0 -0
  39. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/types/py.typed +0 -0
  40. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/types/settings.py +0 -0
  41. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/utilities/media_processing.py +0 -0
  42. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/utilities/rate_limiter.py +0 -0
  43. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/utilities/retry.py +0 -0
  44. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/workflow/graph/edge.py +0 -0
  45. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/workflow/graph/node.py +0 -0
  46. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/workflow/graph/port.py +0 -0
  47. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/workflow/graph/workflow.py +0 -0
  48. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/workflow/nodes/__init__.py +0 -0
  49. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/workflow/nodes/audio_generation.py +0 -0
  50. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/workflow/nodes/control_flows.py +0 -0
  51. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/workflow/nodes/file_processing.py +0 -0
  52. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/workflow/nodes/image_generation.py +0 -0
  53. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/workflow/nodes/llms.py +0 -0
  54. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/workflow/nodes/media_editing.py +0 -0
  55. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/workflow/nodes/media_processing.py +0 -0
  56. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/workflow/nodes/output.py +0 -0
  57. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/workflow/nodes/relational_db.py +0 -0
  58. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/workflow/nodes/text_processing.py +0 -0
  59. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/workflow/nodes/tools.py +0 -0
  60. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/workflow/nodes/triggers.py +0 -0
  61. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/workflow/nodes/vector_db.py +0 -0
  62. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/workflow/nodes/video_generation.py +0 -0
  63. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/workflow/nodes/web_crawlers.py +0 -0
  64. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/workflow/utils/analyse.py +0 -0
  65. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/workflow/utils/check.py +0 -0
  66. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/workflow/utils/json_to_code.py +0 -0
  67. {vectorvein-0.2.80 → vectorvein-0.2.82}/src/vectorvein/workflow/utils/layout.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vectorvein
3
- Version: 0.2.80
3
+ Version: 0.2.82
4
4
  Summary: VectorVein Python SDK
5
5
  Author-Email: Anderson <andersonby@163.com>
6
6
  License: MIT
@@ -17,7 +17,7 @@ description = "VectorVein Python SDK"
17
17
  name = "vectorvein"
18
18
  readme = "README.md"
19
19
  requires-python = ">=3.10"
20
- version = "0.2.80"
20
+ version = "0.2.82"
21
21
 
22
22
  [project.license]
23
23
  text = "MIT"
@@ -99,7 +99,7 @@ class OpenAICompatibleChatClient(BaseChatClient):
99
99
  return AzureOpenAI(
100
100
  azure_endpoint=self.endpoint.api_base,
101
101
  api_key=self.endpoint.api_key,
102
- api_version="2025-01-01-preview",
102
+ api_version="2025-03-01-preview",
103
103
  http_client=self.http_client,
104
104
  )
105
105
  else:
@@ -336,7 +336,7 @@ class OpenAICompatibleChatClient(BaseChatClient):
336
336
  else:
337
337
  max_tokens = self.model_setting.context_length - token_counts - 64
338
338
 
339
- if "o3-mini" in self.model_id:
339
+ if "o3-mini" in self.model_id or "o4-mini" in self.model_id:
340
340
  max_completion_tokens = max_tokens
341
341
  max_tokens = NOT_GIVEN
342
342
 
@@ -581,7 +581,7 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
581
581
  return AsyncAzureOpenAI(
582
582
  azure_endpoint=self.endpoint.api_base,
583
583
  api_key=self.endpoint.api_key,
584
- api_version="2025-01-01-preview",
584
+ api_version="2025-03-01-preview",
585
585
  http_client=self.http_client,
586
586
  )
587
587
  else:
@@ -818,7 +818,7 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
818
818
  else:
819
819
  max_tokens = self.model_setting.context_length - token_counts - 64
820
820
 
821
- if "o3-mini" in self.model_id:
821
+ if "o3-mini" in self.model_id or "o4-mini" in self.model_id:
822
822
  max_completion_tokens = max_tokens
823
823
  max_tokens = NOT_GIVEN
824
824
 
@@ -469,6 +469,30 @@ ZHIPUAI_MODELS: Final[Dict[str, ModelSettingDict]] = {
469
469
  "max_output_tokens": 20480,
470
470
  "native_multimodal": False,
471
471
  },
472
+ "glm-z1-air": {
473
+ "id": "glm-z1-air",
474
+ "context_length": 32000,
475
+ "function_call_available": False,
476
+ "response_format_available": False,
477
+ "max_output_tokens": 32000,
478
+ "native_multimodal": False,
479
+ },
480
+ "glm-z1-airx": {
481
+ "id": "glm-z1-airx",
482
+ "context_length": 32000,
483
+ "function_call_available": False,
484
+ "response_format_available": False,
485
+ "max_output_tokens": 30000,
486
+ "native_multimodal": False,
487
+ },
488
+ "glm-z1-flash": {
489
+ "id": "glm-z1-flash",
490
+ "context_length": 32000,
491
+ "function_call_available": False,
492
+ "response_format_available": False,
493
+ "max_output_tokens": 32000,
494
+ "native_multimodal": False,
495
+ },
472
496
  }
473
497
 
474
498
  # Mistral models
@@ -599,6 +623,14 @@ OPENAI_MODELS: Final[Dict[str, ModelSettingDict]] = {
599
623
  "response_format_available": False,
600
624
  "native_multimodal": False,
601
625
  },
626
+ "o3": {
627
+ "id": "o3",
628
+ "context_length": 200000,
629
+ "max_output_tokens": 100000,
630
+ "function_call_available": True,
631
+ "response_format_available": True,
632
+ "native_multimodal": True,
633
+ },
602
634
  "o3-mini": {
603
635
  "id": "o3-mini",
604
636
  "context_length": 200000,
@@ -607,6 +639,14 @@ OPENAI_MODELS: Final[Dict[str, ModelSettingDict]] = {
607
639
  "response_format_available": True,
608
640
  "native_multimodal": False,
609
641
  },
642
+ "o4-mini": {
643
+ "id": "o4-mini",
644
+ "context_length": 200000,
645
+ "max_output_tokens": 100000,
646
+ "function_call_available": True,
647
+ "response_format_available": True,
648
+ "native_multimodal": True,
649
+ },
610
650
  "gpt-4.1": {
611
651
  "id": "gpt-4.1",
612
652
  "context_length": 1047576,
@@ -615,6 +655,22 @@ OPENAI_MODELS: Final[Dict[str, ModelSettingDict]] = {
615
655
  "response_format_available": True,
616
656
  "native_multimodal": True,
617
657
  },
658
+ "gpt-4.1-mini": {
659
+ "id": "gpt-4.1-mini",
660
+ "context_length": 1047576,
661
+ "max_output_tokens": 32768,
662
+ "function_call_available": True,
663
+ "response_format_available": True,
664
+ "native_multimodal": True,
665
+ },
666
+ "gpt-4.1-nano": {
667
+ "id": "gpt-4.1-nano",
668
+ "context_length": 1047576,
669
+ "max_output_tokens": 32768,
670
+ "function_call_available": True,
671
+ "response_format_available": True,
672
+ "native_multimodal": True,
673
+ },
618
674
  }
619
675
 
620
676
  # Anthropic models
@@ -816,7 +872,23 @@ GEMINI_MODELS: Final[Dict[str, ModelSettingDict]] = {
816
872
  "gemini-2.5-pro-exp-03-25": {
817
873
  "id": "gemini-2.5-pro-exp-03-25",
818
874
  "context_length": 1048576,
819
- "max_output_tokens": 8192,
875
+ "max_output_tokens": 65536,
876
+ "function_call_available": True,
877
+ "response_format_available": True,
878
+ "native_multimodal": True,
879
+ },
880
+ "gemini-2.5-pro-preview-03-25": {
881
+ "id": "gemini-2.5-pro-preview-03-25",
882
+ "context_length": 1048576,
883
+ "max_output_tokens": 65536,
884
+ "function_call_available": True,
885
+ "response_format_available": True,
886
+ "native_multimodal": True,
887
+ },
888
+ "gemini-2.5-flash-preview-04-17": {
889
+ "id": "gemini-2.5-flash-preview-04-17",
890
+ "context_length": 1048576,
891
+ "max_output_tokens": 65536,
820
892
  "function_call_available": True,
821
893
  "response_format_available": True,
822
894
  "native_multimodal": True,
File without changes