vectorvein 0.1.73__tar.gz → 0.1.75__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. {vectorvein-0.1.73 → vectorvein-0.1.75}/PKG-INFO +1 -1
  2. {vectorvein-0.1.73 → vectorvein-0.1.75}/pyproject.toml +1 -1
  3. {vectorvein-0.1.73 → vectorvein-0.1.75}/src/vectorvein/chat_clients/minimax_client.py +4 -4
  4. {vectorvein-0.1.73 → vectorvein-0.1.75}/src/vectorvein/chat_clients/utils.py +1 -1
  5. {vectorvein-0.1.73 → vectorvein-0.1.75}/src/vectorvein/types/defaults.py +7 -0
  6. {vectorvein-0.1.73 → vectorvein-0.1.75}/README.md +0 -0
  7. {vectorvein-0.1.73 → vectorvein-0.1.75}/src/vectorvein/__init__.py +0 -0
  8. {vectorvein-0.1.73 → vectorvein-0.1.75}/src/vectorvein/chat_clients/__init__.py +0 -0
  9. {vectorvein-0.1.73 → vectorvein-0.1.75}/src/vectorvein/chat_clients/anthropic_client.py +0 -0
  10. {vectorvein-0.1.73 → vectorvein-0.1.75}/src/vectorvein/chat_clients/baichuan_client.py +0 -0
  11. {vectorvein-0.1.73 → vectorvein-0.1.75}/src/vectorvein/chat_clients/base_client.py +0 -0
  12. {vectorvein-0.1.73 → vectorvein-0.1.75}/src/vectorvein/chat_clients/deepseek_client.py +0 -0
  13. {vectorvein-0.1.73 → vectorvein-0.1.75}/src/vectorvein/chat_clients/gemini_client.py +0 -0
  14. {vectorvein-0.1.73 → vectorvein-0.1.75}/src/vectorvein/chat_clients/groq_client.py +0 -0
  15. {vectorvein-0.1.73 → vectorvein-0.1.75}/src/vectorvein/chat_clients/local_client.py +0 -0
  16. {vectorvein-0.1.73 → vectorvein-0.1.75}/src/vectorvein/chat_clients/mistral_client.py +0 -0
  17. {vectorvein-0.1.73 → vectorvein-0.1.75}/src/vectorvein/chat_clients/moonshot_client.py +0 -0
  18. {vectorvein-0.1.73 → vectorvein-0.1.75}/src/vectorvein/chat_clients/openai_client.py +0 -0
  19. {vectorvein-0.1.73 → vectorvein-0.1.75}/src/vectorvein/chat_clients/openai_compatible_client.py +0 -0
  20. {vectorvein-0.1.73 → vectorvein-0.1.75}/src/vectorvein/chat_clients/py.typed +0 -0
  21. {vectorvein-0.1.73 → vectorvein-0.1.75}/src/vectorvein/chat_clients/qwen_client.py +0 -0
  22. {vectorvein-0.1.73 → vectorvein-0.1.75}/src/vectorvein/chat_clients/stepfun_client.py +0 -0
  23. {vectorvein-0.1.73 → vectorvein-0.1.75}/src/vectorvein/chat_clients/xai_client.py +0 -0
  24. {vectorvein-0.1.73 → vectorvein-0.1.75}/src/vectorvein/chat_clients/yi_client.py +0 -0
  25. {vectorvein-0.1.73 → vectorvein-0.1.75}/src/vectorvein/chat_clients/zhipuai_client.py +0 -0
  26. {vectorvein-0.1.73 → vectorvein-0.1.75}/src/vectorvein/py.typed +0 -0
  27. {vectorvein-0.1.73 → vectorvein-0.1.75}/src/vectorvein/server/token_server.py +0 -0
  28. {vectorvein-0.1.73 → vectorvein-0.1.75}/src/vectorvein/settings/__init__.py +0 -0
  29. {vectorvein-0.1.73 → vectorvein-0.1.75}/src/vectorvein/settings/py.typed +0 -0
  30. {vectorvein-0.1.73 → vectorvein-0.1.75}/src/vectorvein/types/enums.py +0 -0
  31. {vectorvein-0.1.73 → vectorvein-0.1.75}/src/vectorvein/types/exception.py +0 -0
  32. {vectorvein-0.1.73 → vectorvein-0.1.75}/src/vectorvein/types/llm_parameters.py +0 -0
  33. {vectorvein-0.1.73 → vectorvein-0.1.75}/src/vectorvein/types/py.typed +0 -0
  34. {vectorvein-0.1.73 → vectorvein-0.1.75}/src/vectorvein/utilities/media_processing.py +0 -0
  35. {vectorvein-0.1.73 → vectorvein-0.1.75}/src/vectorvein/utilities/retry.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vectorvein
3
- Version: 0.1.73
3
+ Version: 0.1.75
4
4
  Summary: VectorVein python SDK
5
5
  Author-Email: Anderson <andersonby@163.com>
6
6
  License: MIT
@@ -17,7 +17,7 @@ description = "VectorVein python SDK"
17
17
  name = "vectorvein"
18
18
  readme = "README.md"
19
19
  requires-python = ">=3.10"
20
- version = "0.1.73"
20
+ version = "0.1.75"
21
21
 
22
22
  [project.license]
23
23
  text = "MIT"
@@ -244,7 +244,7 @@ class MiniMaxChatClient(BaseChatClient):
244
244
  url=self.url,
245
245
  headers=self.headers,
246
246
  json=request_body,
247
- timeout=60,
247
+ timeout=300,
248
248
  ) as response:
249
249
  for chunk in response.iter_lines():
250
250
  if chunk:
@@ -275,7 +275,7 @@ class MiniMaxChatClient(BaseChatClient):
275
275
  url=self.url,
276
276
  headers=self.headers,
277
277
  json=request_body,
278
- timeout=60,
278
+ timeout=300,
279
279
  )
280
280
  result = response.json()
281
281
  tool_calls_params = extract_tool_calls(result)
@@ -491,7 +491,7 @@ class AsyncMiniMaxChatClient(BaseAsyncChatClient):
491
491
  url=self.url,
492
492
  headers=self.headers,
493
493
  json=request_body,
494
- timeout=60,
494
+ timeout=300,
495
495
  ) as response:
496
496
  has_tool_calls = False
497
497
  async for chunk in response.aiter_lines():
@@ -523,7 +523,7 @@ class AsyncMiniMaxChatClient(BaseAsyncChatClient):
523
523
  url=self.url,
524
524
  headers=self.headers,
525
525
  json=request_body,
526
- timeout=60,
526
+ timeout=300,
527
527
  )
528
528
  result = response.json()
529
529
  tool_calls_params = extract_tool_calls(result)
@@ -243,7 +243,7 @@ def get_token_counts(text: str | dict, model: str = "", use_token_server_first:
243
243
  endpoint_id = endpoint_choice
244
244
  endpoint = settings.get_endpoint(endpoint_id)
245
245
 
246
- if endpoint.is_vertex:
246
+ if endpoint.is_vertex or endpoint.is_bedrock:
247
247
  continue
248
248
  elif endpoint.api_schema_type == "default":
249
249
  return (
@@ -647,6 +647,13 @@ MINIMAX_MODELS: Final[Dict[str, Dict[str, Any]]] = {
647
647
  "function_call_available": True,
648
648
  "response_format_available": True,
649
649
  },
650
+ "MiniMax-Text-01": {
651
+ "id": "MiniMax-Text-01",
652
+ "context_length": 1000192,
653
+ "max_output_tokens": 1000192,
654
+ "function_call_available": True,
655
+ "response_format_available": True,
656
+ },
650
657
  }
651
658
 
652
659
  # Gemini models
File without changes