vectorvein 0.2.19__py3-none-any.whl → 0.2.21__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -130,10 +130,10 @@ def get_token_counts(text: str | dict, model: str = "", use_token_server_first:
130
130
  elif model.startswith(("gpt-4o", "o1-")):
131
131
  return len(get_gpt_4o_encoding().encode(text))
132
132
  elif model.startswith(("abab", "MiniMax")):
133
- model_setting = settings.minimax.models[model]
134
- if len(model_setting.endpoints) == 0:
133
+ backend_setting = settings.get_backend(BackendType.MiniMax).models[model]
134
+ if len(backend_setting.endpoints) == 0:
135
135
  return int(len(text) / 1.33)
136
- endpoint_id = model_setting.endpoints[0]
136
+ endpoint_id = backend_setting.endpoints[0]
137
137
  if isinstance(endpoint_id, dict):
138
138
  endpoint_id = endpoint_id["endpoint_id"]
139
139
  endpoint = settings.get_endpoint(endpoint_id)
@@ -160,10 +160,10 @@ def get_token_counts(text: str | dict, model: str = "", use_token_server_first:
160
160
  result = response.json()
161
161
  return result["segments_num"]
162
162
  elif model.startswith("moonshot"):
163
- model_setting = settings.moonshot.models[model]
164
- if len(model_setting.endpoints) == 0:
163
+ backend_setting = settings.get_backend(BackendType.Moonshot).models[model]
164
+ if len(backend_setting.endpoints) == 0:
165
165
  return len(get_gpt_35_encoding().encode(text))
166
- endpoint_id = model_setting.endpoints[0]
166
+ endpoint_id = backend_setting.endpoints[0]
167
167
  if isinstance(endpoint_id, dict):
168
168
  endpoint_id = endpoint_id["endpoint_id"]
169
169
  endpoint = settings.get_endpoint(endpoint_id)
@@ -187,10 +187,10 @@ def get_token_counts(text: str | dict, model: str = "", use_token_server_first:
187
187
  result = response.json()
188
188
  return result["data"]["total_tokens"]
189
189
  elif model.startswith("gemini"):
190
- model_setting = settings.gemini.models[model]
191
- if len(model_setting.endpoints) == 0:
190
+ backend_setting = settings.get_backend(BackendType.Gemini).models[model]
191
+ if len(backend_setting.endpoints) == 0:
192
192
  return len(get_gpt_35_encoding().encode(text))
193
- endpoint_id = model_setting.endpoints[0]
193
+ endpoint_id = backend_setting.endpoints[0]
194
194
  if isinstance(endpoint_id, dict):
195
195
  endpoint_id = endpoint_id["endpoint_id"]
196
196
  endpoint = settings.get_endpoint(endpoint_id)
@@ -200,7 +200,7 @@ def get_token_counts(text: str | dict, model: str = "", use_token_server_first:
200
200
  if endpoint.api_base
201
201
  else "https://generativelanguage.googleapis.com/v1beta"
202
202
  )
203
- base_url = f"{api_base}/models/{model_setting.id}:countTokens"
203
+ base_url = f"{api_base}/models/{backend_setting.id}:countTokens"
204
204
  params = {"key": endpoint.api_key}
205
205
  request_body = {
206
206
  "contents": {
@@ -222,8 +222,8 @@ def get_token_counts(text: str | dict, model: str = "", use_token_server_first:
222
222
  result = response.json()
223
223
  return result["totalTokens"]
224
224
  elif model.startswith("claude"):
225
- backend_settings = settings.get_backend(BackendType.Anthropic)
226
- for endpoint_choice in backend_settings.models[model].endpoints:
225
+ backend_setting = settings.get_backend(BackendType.Anthropic)
226
+ for endpoint_choice in backend_setting.models[model].endpoints:
227
227
  if isinstance(endpoint_choice, dict):
228
228
  endpoint_id = endpoint_choice["endpoint_id"]
229
229
  else:
@@ -255,10 +255,10 @@ def get_token_counts(text: str | dict, model: str = "", use_token_server_first:
255
255
  qwen_tokenizer = get_tokenizer(model)
256
256
  return len(qwen_tokenizer.encode(text))
257
257
  elif model.startswith("stepfun"):
258
- model_setting = settings.moonshot.models[model]
259
- if len(model_setting.endpoints) == 0:
258
+ backend_setting = settings.get_backend(BackendType.StepFun).models[model]
259
+ if len(backend_setting.endpoints) == 0:
260
260
  return len(get_gpt_35_encoding().encode(text))
261
- endpoint_id = model_setting.endpoints[0]
261
+ endpoint_id = backend_setting.endpoints[0]
262
262
  if isinstance(endpoint_id, dict):
263
263
  endpoint_id = endpoint_id["endpoint_id"]
264
264
  endpoint = settings.get_endpoint(endpoint_id)
@@ -282,10 +282,10 @@ def get_token_counts(text: str | dict, model: str = "", use_token_server_first:
282
282
  result = response.json()
283
283
  return result["data"]["total_tokens"]
284
284
  elif model.startswith("glm"):
285
- model_setting = settings.zhipuai.models[model]
286
- if len(model_setting.endpoints) == 0:
285
+ backend_setting = settings.get_backend(BackendType.ZhiPuAI).models[model]
286
+ if len(backend_setting.endpoints) == 0:
287
287
  return len(get_gpt_35_encoding().encode(text))
288
- endpoint_id = model_setting.endpoints[0]
288
+ endpoint_id = backend_setting.endpoints[0]
289
289
  if isinstance(endpoint_id, dict):
290
290
  endpoint_id = endpoint_id["endpoint_id"]
291
291
  endpoint = settings.get_endpoint(endpoint_id)
@@ -209,5 +209,52 @@ class Settings(BaseModel):
209
209
  else:
210
210
  return super().model_dump(exclude={"backends"})
211
211
 
212
+ def upgrade_to_v2(self) -> "Settings":
213
+ """
214
+ Upgrade settings from v1 format to v2 format.
215
+ In v2 format, all backend settings are stored in the 'backends' field.
216
+
217
+ Returns:
218
+ Settings: Self with updated format
219
+ """
220
+ # If already v2, no need to upgrade
221
+ if self.VERSION == "2" and self.backends is not None:
222
+ return self
223
+
224
+ # Initialize backends if not exists
225
+ if self.backends is None:
226
+ self.backends = Backends()
227
+
228
+ # Move all backend settings to backends field
229
+ backend_names = [
230
+ "anthropic",
231
+ "deepseek",
232
+ "gemini",
233
+ "groq",
234
+ "local",
235
+ "minimax",
236
+ "mistral",
237
+ "moonshot",
238
+ "openai",
239
+ "qwen",
240
+ "yi",
241
+ "zhipuai",
242
+ "baichuan",
243
+ "stepfun",
244
+ "xai",
245
+ "ernie",
246
+ ]
247
+
248
+ for backend_name in backend_names:
249
+ backend_setting = getattr(self, backend_name)
250
+ if backend_setting is not None:
251
+ setattr(self.backends, backend_name, backend_setting)
252
+ delattr(self, backend_name)
253
+
254
+ # Set version to 2
255
+ self.VERSION = "2"
256
+
257
+ return self
258
+
212
259
 
213
260
  settings = Settings()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vectorvein
3
- Version: 0.2.19
3
+ Version: 0.2.21
4
4
  Summary: VectorVein Python SDK
5
5
  Author-Email: Anderson <andersonby@163.com>
6
6
  License: MIT
@@ -1,6 +1,6 @@
1
- vectorvein-0.2.19.dist-info/METADATA,sha256=YqX5NXbVy5wddHOOcIK3m3hadOVLutLxvjigyxNbP9E,4414
2
- vectorvein-0.2.19.dist-info/WHEEL,sha256=thaaA2w1JzcGC48WYufAs8nrYZjJm8LqNfnXFOFyCC4,90
3
- vectorvein-0.2.19.dist-info/entry_points.txt,sha256=6OYgBcLyFCUgeqLgnvMyOJxPCWzgy7se4rLPKtNonMs,34
1
+ vectorvein-0.2.21.dist-info/METADATA,sha256=_UPhTjcys4IEhO-QgpwbPUzN8Ty0k4uCZ4Yz-eUQu5o,4414
2
+ vectorvein-0.2.21.dist-info/WHEEL,sha256=thaaA2w1JzcGC48WYufAs8nrYZjJm8LqNfnXFOFyCC4,90
3
+ vectorvein-0.2.21.dist-info/entry_points.txt,sha256=6OYgBcLyFCUgeqLgnvMyOJxPCWzgy7se4rLPKtNonMs,34
4
4
  vectorvein/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
5
  vectorvein/api/__init__.py,sha256=lfY-XA46fgD2iIZTU0VYP8i07AwA03Egj4Qua0vUKrQ,738
6
6
  vectorvein/api/client.py,sha256=xF-leKDQzVyyy9FnIRaz0k4eElYW1XbbzeRLcpnyk90,33047
@@ -23,13 +23,13 @@ vectorvein/chat_clients/openai_compatible_client.py,sha256=L8SXCRA7OO_eXh6b-oya8
23
23
  vectorvein/chat_clients/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
24
24
  vectorvein/chat_clients/qwen_client.py,sha256=-ryh-m9PgsO0fc4ulcCmPTy1155J8YUy15uPoJQOHA0,513
25
25
  vectorvein/chat_clients/stepfun_client.py,sha256=zsD2W5ahmR4DD9cqQTXmJr3txrGuvxbRWhFlRdwNijI,519
26
- vectorvein/chat_clients/utils.py,sha256=r8tlt-suUjISSRjc_tCCcBYmQT3zng1YFVnQgeAG1Co,25041
26
+ vectorvein/chat_clients/utils.py,sha256=sXqp-RAxCeNmNoh3SKptPoYqS0ZNb_y2Eemuz919IJk,25195
27
27
  vectorvein/chat_clients/xai_client.py,sha256=eLFJJrNRJ-ni3DpshODcr3S1EJQLbhVwxyO1E54LaqM,491
28
28
  vectorvein/chat_clients/yi_client.py,sha256=RNf4CRuPJfixrwLZ3-DEc3t25QDe1mvZeb9sku2f8Bc,484
29
29
  vectorvein/chat_clients/zhipuai_client.py,sha256=Ys5DSeLCuedaDXr3PfG1EW2zKXopt-awO2IylWSwY0s,519
30
30
  vectorvein/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
31
31
  vectorvein/server/token_server.py,sha256=36F9PKSNOX8ZtYBXY_l-76GQTpUSmQ2Y8EMy1H7wtdQ,1353
32
- vectorvein/settings/__init__.py,sha256=1KHgIF3hAXFMHlNZcgB4IcQg9iRy9WMh0BZUcBq5dGI,9509
32
+ vectorvein/settings/__init__.py,sha256=fqOfdTZ_b1P1Mx_6rFOUkgaSqS06hvrjzIQArRUPjU0,10849
33
33
  vectorvein/settings/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
34
34
  vectorvein/types/__init__.py,sha256=DJYGhlshgUQgzXPfMfKW5sTpBClZUCLhqmCqF44lVuU,3329
35
35
  vectorvein/types/defaults.py,sha256=wNPrfYDwsXMNIxofgGdN-ix9-by6WQu3cGIRySl3Q2E,27350
@@ -62,4 +62,4 @@ vectorvein/workflow/nodes/vector_db.py,sha256=t6I17q6iR3yQreiDHpRrksMdWDPIvgqJs0
62
62
  vectorvein/workflow/nodes/video_generation.py,sha256=qmdg-t_idpxq1veukd-jv_ChICMOoInKxprV9Z4Vi2w,4118
63
63
  vectorvein/workflow/nodes/web_crawlers.py,sha256=LsqomfXfqrXfHJDO1cl0Ox48f4St7X_SL12DSbAMSOw,5415
64
64
  vectorvein/workflow/utils/json_to_code.py,sha256=F7dhDy8kGc8ndOeihGLRLGFGlquoxVlb02ENtxnQ0C8,5914
65
- vectorvein-0.2.19.dist-info/RECORD,,
65
+ vectorvein-0.2.21.dist-info/RECORD,,