vectorvein 0.2.18__tar.gz → 0.2.20__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. {vectorvein-0.2.18 → vectorvein-0.2.20}/PKG-INFO +1 -1
  2. {vectorvein-0.2.18 → vectorvein-0.2.20}/pyproject.toml +1 -1
  3. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/chat_clients/utils.py +18 -18
  4. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/types/defaults.py +10 -2
  5. {vectorvein-0.2.18 → vectorvein-0.2.20}/README.md +0 -0
  6. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/__init__.py +0 -0
  7. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/api/__init__.py +0 -0
  8. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/api/client.py +0 -0
  9. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/api/exceptions.py +0 -0
  10. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/api/models.py +0 -0
  11. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/chat_clients/__init__.py +0 -0
  12. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/chat_clients/anthropic_client.py +0 -0
  13. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/chat_clients/baichuan_client.py +0 -0
  14. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/chat_clients/base_client.py +0 -0
  15. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/chat_clients/deepseek_client.py +0 -0
  16. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/chat_clients/ernie_client.py +0 -0
  17. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/chat_clients/gemini_client.py +0 -0
  18. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/chat_clients/groq_client.py +0 -0
  19. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/chat_clients/local_client.py +0 -0
  20. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/chat_clients/minimax_client.py +0 -0
  21. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/chat_clients/mistral_client.py +0 -0
  22. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/chat_clients/moonshot_client.py +0 -0
  23. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/chat_clients/openai_client.py +0 -0
  24. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/chat_clients/openai_compatible_client.py +0 -0
  25. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/chat_clients/py.typed +0 -0
  26. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/chat_clients/qwen_client.py +0 -0
  27. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/chat_clients/stepfun_client.py +0 -0
  28. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/chat_clients/xai_client.py +0 -0
  29. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/chat_clients/yi_client.py +0 -0
  30. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/chat_clients/zhipuai_client.py +0 -0
  31. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/py.typed +0 -0
  32. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/server/token_server.py +0 -0
  33. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/settings/__init__.py +0 -0
  34. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/settings/py.typed +0 -0
  35. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/types/__init__.py +0 -0
  36. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/types/enums.py +0 -0
  37. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/types/exception.py +0 -0
  38. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/types/llm_parameters.py +0 -0
  39. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/types/py.typed +0 -0
  40. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/types/settings.py +0 -0
  41. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/utilities/media_processing.py +0 -0
  42. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/utilities/rate_limiter.py +0 -0
  43. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/utilities/retry.py +0 -0
  44. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/workflow/graph/edge.py +0 -0
  45. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/workflow/graph/node.py +0 -0
  46. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/workflow/graph/port.py +0 -0
  47. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/workflow/graph/workflow.py +0 -0
  48. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/workflow/nodes/__init__.py +0 -0
  49. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/workflow/nodes/audio_generation.py +0 -0
  50. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/workflow/nodes/control_flows.py +0 -0
  51. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/workflow/nodes/file_processing.py +0 -0
  52. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/workflow/nodes/image_generation.py +0 -0
  53. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/workflow/nodes/llms.py +0 -0
  54. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/workflow/nodes/media_editing.py +0 -0
  55. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/workflow/nodes/media_processing.py +0 -0
  56. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/workflow/nodes/output.py +0 -0
  57. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/workflow/nodes/relational_db.py +0 -0
  58. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/workflow/nodes/text_processing.py +0 -0
  59. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/workflow/nodes/tools.py +0 -0
  60. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/workflow/nodes/triggers.py +0 -0
  61. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/workflow/nodes/vector_db.py +0 -0
  62. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/workflow/nodes/video_generation.py +0 -0
  63. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/workflow/nodes/web_crawlers.py +0 -0
  64. {vectorvein-0.2.18 → vectorvein-0.2.20}/src/vectorvein/workflow/utils/json_to_code.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vectorvein
3
- Version: 0.2.18
3
+ Version: 0.2.20
4
4
  Summary: VectorVein Python SDK
5
5
  Author-Email: Anderson <andersonby@163.com>
6
6
  License: MIT
@@ -18,7 +18,7 @@ description = "VectorVein Python SDK"
18
18
  name = "vectorvein"
19
19
  readme = "README.md"
20
20
  requires-python = ">=3.10"
21
- version = "0.2.18"
21
+ version = "0.2.20"
22
22
 
23
23
  [project.license]
24
24
  text = "MIT"
@@ -130,10 +130,10 @@ def get_token_counts(text: str | dict, model: str = "", use_token_server_first:
130
130
  elif model.startswith(("gpt-4o", "o1-")):
131
131
  return len(get_gpt_4o_encoding().encode(text))
132
132
  elif model.startswith(("abab", "MiniMax")):
133
- model_setting = settings.minimax.models[model]
134
- if len(model_setting.endpoints) == 0:
133
+ backend_setting = settings.get_backend(BackendType.MiniMax).models[model]
134
+ if len(backend_setting.endpoints) == 0:
135
135
  return int(len(text) / 1.33)
136
- endpoint_id = model_setting.endpoints[0]
136
+ endpoint_id = backend_setting.endpoints[0]
137
137
  if isinstance(endpoint_id, dict):
138
138
  endpoint_id = endpoint_id["endpoint_id"]
139
139
  endpoint = settings.get_endpoint(endpoint_id)
@@ -160,10 +160,10 @@ def get_token_counts(text: str | dict, model: str = "", use_token_server_first:
160
160
  result = response.json()
161
161
  return result["segments_num"]
162
162
  elif model.startswith("moonshot"):
163
- model_setting = settings.moonshot.models[model]
164
- if len(model_setting.endpoints) == 0:
163
+ backend_setting = settings.get_backend(BackendType.Moonshot).models[model]
164
+ if len(backend_setting.endpoints) == 0:
165
165
  return len(get_gpt_35_encoding().encode(text))
166
- endpoint_id = model_setting.endpoints[0]
166
+ endpoint_id = backend_setting.endpoints[0]
167
167
  if isinstance(endpoint_id, dict):
168
168
  endpoint_id = endpoint_id["endpoint_id"]
169
169
  endpoint = settings.get_endpoint(endpoint_id)
@@ -187,10 +187,10 @@ def get_token_counts(text: str | dict, model: str = "", use_token_server_first:
187
187
  result = response.json()
188
188
  return result["data"]["total_tokens"]
189
189
  elif model.startswith("gemini"):
190
- model_setting = settings.gemini.models[model]
191
- if len(model_setting.endpoints) == 0:
190
+ backend_setting = settings.get_backend(BackendType.Gemini).models[model]
191
+ if len(backend_setting.endpoints) == 0:
192
192
  return len(get_gpt_35_encoding().encode(text))
193
- endpoint_id = model_setting.endpoints[0]
193
+ endpoint_id = backend_setting.endpoints[0]
194
194
  if isinstance(endpoint_id, dict):
195
195
  endpoint_id = endpoint_id["endpoint_id"]
196
196
  endpoint = settings.get_endpoint(endpoint_id)
@@ -200,7 +200,7 @@ def get_token_counts(text: str | dict, model: str = "", use_token_server_first:
200
200
  if endpoint.api_base
201
201
  else "https://generativelanguage.googleapis.com/v1beta"
202
202
  )
203
- base_url = f"{api_base}/models/{model_setting.id}:countTokens"
203
+ base_url = f"{api_base}/models/{backend_setting.id}:countTokens"
204
204
  params = {"key": endpoint.api_key}
205
205
  request_body = {
206
206
  "contents": {
@@ -222,8 +222,8 @@ def get_token_counts(text: str | dict, model: str = "", use_token_server_first:
222
222
  result = response.json()
223
223
  return result["totalTokens"]
224
224
  elif model.startswith("claude"):
225
- backend_settings = settings.get_backend(BackendType.Anthropic)
226
- for endpoint_choice in backend_settings.models[model].endpoints:
225
+ backend_setting = settings.get_backend(BackendType.Anthropic)
226
+ for endpoint_choice in backend_setting.models[model].endpoints:
227
227
  if isinstance(endpoint_choice, dict):
228
228
  endpoint_id = endpoint_choice["endpoint_id"]
229
229
  else:
@@ -255,10 +255,10 @@ def get_token_counts(text: str | dict, model: str = "", use_token_server_first:
255
255
  qwen_tokenizer = get_tokenizer(model)
256
256
  return len(qwen_tokenizer.encode(text))
257
257
  elif model.startswith("stepfun"):
258
- model_setting = settings.moonshot.models[model]
259
- if len(model_setting.endpoints) == 0:
258
+ backend_setting = settings.get_backend(BackendType.StepFun).models[model]
259
+ if len(backend_setting.endpoints) == 0:
260
260
  return len(get_gpt_35_encoding().encode(text))
261
- endpoint_id = model_setting.endpoints[0]
261
+ endpoint_id = backend_setting.endpoints[0]
262
262
  if isinstance(endpoint_id, dict):
263
263
  endpoint_id = endpoint_id["endpoint_id"]
264
264
  endpoint = settings.get_endpoint(endpoint_id)
@@ -282,10 +282,10 @@ def get_token_counts(text: str | dict, model: str = "", use_token_server_first:
282
282
  result = response.json()
283
283
  return result["data"]["total_tokens"]
284
284
  elif model.startswith("glm"):
285
- model_setting = settings.zhipuai.models[model]
286
- if len(model_setting.endpoints) == 0:
285
+ backend_setting = settings.get_backend(BackendType.ZhiPuAI).models[model]
286
+ if len(backend_setting.endpoints) == 0:
287
287
  return len(get_gpt_35_encoding().encode(text))
288
- endpoint_id = model_setting.endpoints[0]
288
+ endpoint_id = backend_setting.endpoints[0]
289
289
  if isinstance(endpoint_id, dict):
290
290
  endpoint_id = endpoint_id["endpoint_id"]
291
291
  endpoint = settings.get_endpoint(endpoint_id)
@@ -388,8 +388,16 @@ ZHIPUAI_MODELS: Final[Dict[str, Dict[str, Any]]] = {
388
388
  "context_length": 16000,
389
389
  "function_call_available": False,
390
390
  "response_format_available": False,
391
- "max_output_tokens": 16000,
392
- "native_multimodal": True,
391
+ "max_output_tokens": 12000,
392
+ "native_multimodal": False,
393
+ },
394
+ "glm-4-alltools": {
395
+ "id": "glm-4-alltools",
396
+ "context_length": 128000,
397
+ "function_call_available": False,
398
+ "response_format_available": False,
399
+ "max_output_tokens": 20480,
400
+ "native_multimodal": False,
393
401
  },
394
402
  }
395
403
 
File without changes