vectorvein 0.1.70__tar.gz → 0.1.71__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. {vectorvein-0.1.70 → vectorvein-0.1.71}/PKG-INFO +1 -1
  2. {vectorvein-0.1.70 → vectorvein-0.1.71}/pyproject.toml +1 -1
  3. {vectorvein-0.1.70 → vectorvein-0.1.71}/src/vectorvein/chat_clients/utils.py +28 -1
  4. {vectorvein-0.1.70 → vectorvein-0.1.71}/src/vectorvein/types/defaults.py +18 -9
  5. {vectorvein-0.1.70 → vectorvein-0.1.71}/README.md +0 -0
  6. {vectorvein-0.1.70 → vectorvein-0.1.71}/src/vectorvein/__init__.py +0 -0
  7. {vectorvein-0.1.70 → vectorvein-0.1.71}/src/vectorvein/chat_clients/__init__.py +0 -0
  8. {vectorvein-0.1.70 → vectorvein-0.1.71}/src/vectorvein/chat_clients/anthropic_client.py +0 -0
  9. {vectorvein-0.1.70 → vectorvein-0.1.71}/src/vectorvein/chat_clients/baichuan_client.py +0 -0
  10. {vectorvein-0.1.70 → vectorvein-0.1.71}/src/vectorvein/chat_clients/base_client.py +0 -0
  11. {vectorvein-0.1.70 → vectorvein-0.1.71}/src/vectorvein/chat_clients/deepseek_client.py +0 -0
  12. {vectorvein-0.1.70 → vectorvein-0.1.71}/src/vectorvein/chat_clients/gemini_client.py +0 -0
  13. {vectorvein-0.1.70 → vectorvein-0.1.71}/src/vectorvein/chat_clients/groq_client.py +0 -0
  14. {vectorvein-0.1.70 → vectorvein-0.1.71}/src/vectorvein/chat_clients/local_client.py +0 -0
  15. {vectorvein-0.1.70 → vectorvein-0.1.71}/src/vectorvein/chat_clients/minimax_client.py +0 -0
  16. {vectorvein-0.1.70 → vectorvein-0.1.71}/src/vectorvein/chat_clients/mistral_client.py +0 -0
  17. {vectorvein-0.1.70 → vectorvein-0.1.71}/src/vectorvein/chat_clients/moonshot_client.py +0 -0
  18. {vectorvein-0.1.70 → vectorvein-0.1.71}/src/vectorvein/chat_clients/openai_client.py +0 -0
  19. {vectorvein-0.1.70 → vectorvein-0.1.71}/src/vectorvein/chat_clients/openai_compatible_client.py +0 -0
  20. {vectorvein-0.1.70 → vectorvein-0.1.71}/src/vectorvein/chat_clients/py.typed +0 -0
  21. {vectorvein-0.1.70 → vectorvein-0.1.71}/src/vectorvein/chat_clients/qwen_client.py +0 -0
  22. {vectorvein-0.1.70 → vectorvein-0.1.71}/src/vectorvein/chat_clients/stepfun_client.py +0 -0
  23. {vectorvein-0.1.70 → vectorvein-0.1.71}/src/vectorvein/chat_clients/xai_client.py +0 -0
  24. {vectorvein-0.1.70 → vectorvein-0.1.71}/src/vectorvein/chat_clients/yi_client.py +0 -0
  25. {vectorvein-0.1.70 → vectorvein-0.1.71}/src/vectorvein/chat_clients/zhipuai_client.py +0 -0
  26. {vectorvein-0.1.70 → vectorvein-0.1.71}/src/vectorvein/py.typed +0 -0
  27. {vectorvein-0.1.70 → vectorvein-0.1.71}/src/vectorvein/server/token_server.py +0 -0
  28. {vectorvein-0.1.70 → vectorvein-0.1.71}/src/vectorvein/settings/__init__.py +0 -0
  29. {vectorvein-0.1.70 → vectorvein-0.1.71}/src/vectorvein/settings/py.typed +0 -0
  30. {vectorvein-0.1.70 → vectorvein-0.1.71}/src/vectorvein/types/enums.py +0 -0
  31. {vectorvein-0.1.70 → vectorvein-0.1.71}/src/vectorvein/types/exception.py +0 -0
  32. {vectorvein-0.1.70 → vectorvein-0.1.71}/src/vectorvein/types/llm_parameters.py +0 -0
  33. {vectorvein-0.1.70 → vectorvein-0.1.71}/src/vectorvein/types/py.typed +0 -0
  34. {vectorvein-0.1.70 → vectorvein-0.1.71}/src/vectorvein/utilities/media_processing.py +0 -0
  35. {vectorvein-0.1.70 → vectorvein-0.1.71}/src/vectorvein/utilities/retry.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vectorvein
3
- Version: 0.1.70
3
+ Version: 0.1.71
4
4
  Summary: VectorVein python SDK
5
5
  Author-Email: Anderson <andersonby@163.com>
6
6
  License: MIT
@@ -17,7 +17,7 @@ description = "VectorVein python SDK"
17
17
  name = "vectorvein"
18
18
  readme = "README.md"
19
19
  requires-python = ">=3.10"
20
- version = "0.1.70"
20
+ version = "0.1.71"
21
21
 
22
22
  [project.license]
23
23
  text = "MIT"
@@ -173,7 +173,7 @@ def get_token_counts(text: str | dict, model: str = "", use_token_server_first:
173
173
  return 1000
174
174
  result = response.json()
175
175
  return result["segments_num"]
176
- elif model in ("moonshot-v1-8k", "moonshot-v1-32k", "moonshot-v1-128k"):
176
+ elif model.startswith("moonshot"):
177
177
  model_setting = settings.moonshot.models[model]
178
178
  if len(model_setting.endpoints) == 0:
179
179
  return len(get_gpt_35_encoding().encode(text))
@@ -294,6 +294,33 @@ def get_token_counts(text: str | dict, model: str = "", use_token_server_first:
294
294
  return 1000
295
295
  result = response.json()
296
296
  return result["data"]["total_tokens"]
297
+ elif model.startswith("glm"):
298
+ model_setting = settings.zhipuai.models[model]
299
+ if len(model_setting.endpoints) == 0:
300
+ return len(get_gpt_35_encoding().encode(text))
301
+ endpoint_id = model_setting.endpoints[0]
302
+ if isinstance(endpoint_id, dict):
303
+ endpoint_id = endpoint_id["endpoint_id"]
304
+ endpoint = settings.get_endpoint(endpoint_id)
305
+ tokenize_url = f"{endpoint.api_base}/tokenizer"
306
+ headers = {"Content-Type": "application/json", "Authorization": f"Bearer {endpoint.api_key}"}
307
+ request_body = {
308
+ "model": model,
309
+ "messages": [
310
+ {"role": "user", "content": text},
311
+ ],
312
+ }
313
+ _, response = (
314
+ Retry(httpx.post)
315
+ .args(url=tokenize_url, headers=headers, json=request_body, timeout=None)
316
+ .retry_times(5)
317
+ .sleep_time(10)
318
+ .run()
319
+ )
320
+ if response is None:
321
+ return 1000
322
+ result = response.json()
323
+ return result["usage"]["prompt_tokens"]
297
324
  else:
298
325
  return len(get_gpt_35_encoding().encode(text))
299
326
 
@@ -44,13 +44,6 @@ DEEPSEEK_MODELS: Final[Dict[str, Dict[str, Any]]] = {
44
44
  "function_call_available": True,
45
45
  "response_format_available": True,
46
46
  },
47
- "deepseek-coder": {
48
- "id": "deepseek-chat",
49
- "context_length": 128000,
50
- "max_output_tokens": 4096,
51
- "function_call_available": True,
52
- "response_format_available": True,
53
- },
54
47
  }
55
48
  DEEPSEEK_DEFAULT_MODEL: Final[str] = "deepseek-chat"
56
49
 
@@ -427,6 +420,22 @@ ZHIPUAI_MODELS: Final[Dict[str, Dict[str, Any]]] = {
427
420
  "max_output_tokens": 1024,
428
421
  "native_multimodal": True,
429
422
  },
423
+ "glm-4v-flash": {
424
+ "id": "glm-4v-flash",
425
+ "context_length": 2000,
426
+ "function_call_available": False,
427
+ "response_format_available": False,
428
+ "max_output_tokens": 1024,
429
+ "native_multimodal": True,
430
+ },
431
+ "glm-zero-preview": {
432
+ "id": "glm-zero-preview",
433
+ "context_length": 16000,
434
+ "function_call_available": False,
435
+ "response_format_available": False,
436
+ "max_output_tokens": 16000,
437
+ "native_multimodal": True,
438
+ },
430
439
  }
431
440
 
432
441
  # Mistral models
@@ -669,7 +678,7 @@ GEMINI_MODELS: Final[Dict[str, Dict[str, Any]]] = {
669
678
  },
670
679
  "gemini-2.0-flash-thinking-exp-1219": {
671
680
  "id": "gemini-2.0-flash-thinking-exp-1219",
672
- "context_length": 1048576,
681
+ "context_length": 32767,
673
682
  "max_output_tokens": 8192,
674
683
  "function_call_available": True,
675
684
  "response_format_available": True,
@@ -677,7 +686,7 @@ GEMINI_MODELS: Final[Dict[str, Dict[str, Any]]] = {
677
686
  },
678
687
  "gemini-exp-1206": {
679
688
  "id": "gemini-exp-1206",
680
- "context_length": 32767,
689
+ "context_length": 2097152,
681
690
  "function_call_available": True,
682
691
  "response_format_available": True,
683
692
  "native_multimodal": True,
File without changes