vectorvein 0.1.30__tar.gz → 0.1.32__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. {vectorvein-0.1.30 → vectorvein-0.1.32}/PKG-INFO +1 -1
  2. {vectorvein-0.1.30 → vectorvein-0.1.32}/pyproject.toml +1 -1
  3. {vectorvein-0.1.30 → vectorvein-0.1.32}/src/vectorvein/chat_clients/__init__.py +28 -0
  4. {vectorvein-0.1.30 → vectorvein-0.1.32}/src/vectorvein/types/defaults.py +16 -0
  5. {vectorvein-0.1.30 → vectorvein-0.1.32}/README.md +0 -0
  6. {vectorvein-0.1.30 → vectorvein-0.1.32}/src/vectorvein/__init__.py +0 -0
  7. {vectorvein-0.1.30 → vectorvein-0.1.32}/src/vectorvein/chat_clients/anthropic_client.py +0 -0
  8. {vectorvein-0.1.30 → vectorvein-0.1.32}/src/vectorvein/chat_clients/baichuan_client.py +0 -0
  9. {vectorvein-0.1.30 → vectorvein-0.1.32}/src/vectorvein/chat_clients/base_client.py +0 -0
  10. {vectorvein-0.1.30 → vectorvein-0.1.32}/src/vectorvein/chat_clients/deepseek_client.py +0 -0
  11. {vectorvein-0.1.30 → vectorvein-0.1.32}/src/vectorvein/chat_clients/gemini_client.py +0 -0
  12. {vectorvein-0.1.30 → vectorvein-0.1.32}/src/vectorvein/chat_clients/groq_client.py +0 -0
  13. {vectorvein-0.1.30 → vectorvein-0.1.32}/src/vectorvein/chat_clients/local_client.py +0 -0
  14. {vectorvein-0.1.30 → vectorvein-0.1.32}/src/vectorvein/chat_clients/minimax_client.py +0 -0
  15. {vectorvein-0.1.30 → vectorvein-0.1.32}/src/vectorvein/chat_clients/mistral_client.py +0 -0
  16. {vectorvein-0.1.30 → vectorvein-0.1.32}/src/vectorvein/chat_clients/moonshot_client.py +0 -0
  17. {vectorvein-0.1.30 → vectorvein-0.1.32}/src/vectorvein/chat_clients/openai_client.py +0 -0
  18. {vectorvein-0.1.30 → vectorvein-0.1.32}/src/vectorvein/chat_clients/openai_compatible_client.py +0 -0
  19. {vectorvein-0.1.30 → vectorvein-0.1.32}/src/vectorvein/chat_clients/qwen_client.py +0 -0
  20. {vectorvein-0.1.30 → vectorvein-0.1.32}/src/vectorvein/chat_clients/stepfun_client.py +0 -0
  21. {vectorvein-0.1.30 → vectorvein-0.1.32}/src/vectorvein/chat_clients/utils.py +0 -0
  22. {vectorvein-0.1.30 → vectorvein-0.1.32}/src/vectorvein/chat_clients/yi_client.py +0 -0
  23. {vectorvein-0.1.30 → vectorvein-0.1.32}/src/vectorvein/chat_clients/zhipuai_client.py +0 -0
  24. {vectorvein-0.1.30 → vectorvein-0.1.32}/src/vectorvein/settings/__init__.py +0 -0
  25. {vectorvein-0.1.30 → vectorvein-0.1.32}/src/vectorvein/types/enums.py +0 -0
  26. {vectorvein-0.1.30 → vectorvein-0.1.32}/src/vectorvein/types/exception.py +0 -0
  27. {vectorvein-0.1.30 → vectorvein-0.1.32}/src/vectorvein/types/llm_parameters.py +0 -0
  28. {vectorvein-0.1.30 → vectorvein-0.1.32}/src/vectorvein/utilities/media_processing.py +0 -0
  29. {vectorvein-0.1.30 → vectorvein-0.1.32}/src/vectorvein/utilities/retry.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vectorvein
3
- Version: 0.1.30
3
+ Version: 0.1.32
4
4
  Summary: Default template for PDM package
5
5
  Author-Email: Anderson <andersonby@163.com>
6
6
  License: MIT
@@ -16,7 +16,7 @@ description = "Default template for PDM package"
16
16
  name = "vectorvein"
17
17
  readme = "README.md"
18
18
  requires-python = ">=3.10"
19
- version = "0.1.30"
19
+ version = "0.1.32"
20
20
 
21
21
  [project.license]
22
22
  text = "MIT"
@@ -257,6 +257,20 @@ def create_chat_client(
257
257
  ) -> StepFunChatClient: ...
258
258
 
259
259
 
260
+ @overload
261
+ def create_chat_client(
262
+ backend: BackendType,
263
+ model: str | None = None,
264
+ stream: bool = False,
265
+ temperature: float = 0.7,
266
+ context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
267
+ random_endpoint: bool = True,
268
+ endpoint_id: str = "",
269
+ http_client: httpx.Client | None = None,
270
+ **kwargs,
271
+ ) -> BaseChatClient: ...
272
+
273
+
260
274
  def create_chat_client(
261
275
  backend: BackendType,
262
276
  model: str | None = None,
@@ -482,6 +496,20 @@ def create_async_chat_client(
482
496
  ) -> AsyncStepFunChatClient: ...
483
497
 
484
498
 
499
+ @overload
500
+ def create_async_chat_client(
501
+ backend: BackendType,
502
+ model: str | None = None,
503
+ stream: bool = False,
504
+ temperature: float = 0.7,
505
+ context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
506
+ random_endpoint: bool = True,
507
+ endpoint_id: str = "",
508
+ http_client: httpx.AsyncClient | None = None,
509
+ **kwargs,
510
+ ) -> BaseAsyncChatClient: ...
511
+
512
+
485
513
  def create_async_chat_client(
486
514
  backend: BackendType,
487
515
  model: str | None = None,
@@ -498,6 +498,22 @@ OPENAI_MODELS = {
498
498
  "function_call_available": True,
499
499
  "response_format_available": True,
500
500
  },
501
+ "o1-mini": {
502
+ "id": "o1-mini",
503
+ "context_length": 128000,
504
+ "max_output_tokens": 65536,
505
+ "function_call_available": False,
506
+ "response_format_available": False,
507
+ "native_multimodal": False,
508
+ },
509
+ "o1-preview": {
510
+ "id": "o1-preview",
511
+ "context_length": 128000,
512
+ "max_output_tokens": 32768,
513
+ "function_call_available": False,
514
+ "response_format_available": False,
515
+ "native_multimodal": False,
516
+ },
501
517
  }
502
518
 
503
519
  # Anthropic models
File without changes