vectorvein 0.2.96__py3-none-any.whl → 0.2.98__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -597,9 +597,7 @@ class AnthropicChatClient(BaseChatClient):
597
597
  if max_tokens is None:
598
598
  max_output_tokens = self.model_setting.max_output_tokens
599
599
  native_multimodal = self.model_setting.native_multimodal
600
- token_counts = get_message_token_counts(
601
- messages=messages, tools=tools, model=self.model, native_multimodal=native_multimodal
602
- )
600
+ token_counts = get_message_token_counts(messages=messages, tools=tools, model=self.model, native_multimodal=native_multimodal)
603
601
  if max_output_tokens is not None:
604
602
  max_tokens = self.model_setting.context_length - token_counts
605
603
  max_tokens = min(max(max_tokens, 1), max_output_tokens)
@@ -707,9 +705,7 @@ class AnthropicChatClient(BaseChatClient):
707
705
  result["raw_content"][i]["input"] = {}
708
706
  try:
709
707
  if result["tool_calls"][0]["function"]["arguments"]:
710
- result["raw_content"][i]["input"] = json.loads(
711
- result["tool_calls"][0]["function"]["arguments"]
712
- )
708
+ result["raw_content"][i]["input"] = json.loads(result["tool_calls"][0]["function"]["arguments"])
713
709
  else:
714
710
  result["raw_content"][i]["input"] = {}
715
711
  except json.JSONDecodeError:
@@ -727,9 +723,7 @@ class AnthropicChatClient(BaseChatClient):
727
723
  yield ChatCompletionDeltaMessage(**message)
728
724
  elif isinstance(chunk, RawMessageDeltaEvent):
729
725
  result["usage"]["completion_tokens"] = chunk.usage.output_tokens
730
- result["usage"]["total_tokens"] = (
731
- result["usage"]["prompt_tokens"] + result["usage"]["completion_tokens"]
732
- )
726
+ result["usage"]["total_tokens"] = result["usage"]["prompt_tokens"] + result["usage"]["completion_tokens"]
733
727
  yield ChatCompletionDeltaMessage(
734
728
  usage=Usage(
735
729
  prompt_tokens=result["usage"]["prompt_tokens"],
@@ -1211,9 +1205,7 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
1211
1205
  if max_tokens is None:
1212
1206
  max_output_tokens = self.model_setting.max_output_tokens
1213
1207
  native_multimodal = self.model_setting.native_multimodal
1214
- token_counts = get_message_token_counts(
1215
- messages=messages, tools=tools, model=self.model, native_multimodal=native_multimodal
1216
- )
1208
+ token_counts = get_message_token_counts(messages=messages, tools=tools, model=self.model, native_multimodal=native_multimodal)
1217
1209
  if max_output_tokens is not None:
1218
1210
  max_tokens = self.model_setting.context_length - token_counts
1219
1211
  max_tokens = min(max(max_tokens, 1), max_output_tokens)
@@ -1321,9 +1313,7 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
1321
1313
  result["raw_content"][i]["input"] = {}
1322
1314
  try:
1323
1315
  if result["tool_calls"][0]["function"]["arguments"]:
1324
- result["raw_content"][i]["input"] = json.loads(
1325
- result["tool_calls"][0]["function"]["arguments"]
1326
- )
1316
+ result["raw_content"][i]["input"] = json.loads(result["tool_calls"][0]["function"]["arguments"])
1327
1317
  else:
1328
1318
  result["raw_content"][i]["input"] = {}
1329
1319
  except json.JSONDecodeError:
@@ -1341,9 +1331,7 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
1341
1331
  yield ChatCompletionDeltaMessage(**message)
1342
1332
  elif isinstance(chunk, RawMessageDeltaEvent):
1343
1333
  result["usage"]["completion_tokens"] = chunk.usage.output_tokens
1344
- result["usage"]["total_tokens"] = (
1345
- result["usage"]["prompt_tokens"] + result["usage"]["completion_tokens"]
1346
- )
1334
+ result["usage"]["total_tokens"] = result["usage"]["prompt_tokens"] + result["usage"]["completion_tokens"]
1347
1335
  yield ChatCompletionDeltaMessage(
1348
1336
  usage=Usage(
1349
1337
  prompt_tokens=result["usage"]["prompt_tokens"],
@@ -381,8 +381,8 @@ class OpenAICompatibleChatClient(BaseChatClient):
381
381
  usage = None
382
382
  buffer = ""
383
383
  in_reasoning = False
384
- current_reasoning = []
385
- current_content = []
384
+ accumulated_reasoning = []
385
+ accumulated_content = []
386
386
 
387
387
  for chunk in stream_response:
388
388
  if chunk.usage and chunk.usage.total_tokens:
@@ -404,63 +404,142 @@ class OpenAICompatibleChatClient(BaseChatClient):
404
404
  for index, tool_call in enumerate(chunk.choices[0].delta.tool_calls):
405
405
  tool_call.index = index
406
406
  tool_call.type = "function" # 也是 MiniMax 的不规范导致的问题
407
- yield ChatCompletionDeltaMessage(**chunk.choices[0].delta.model_dump(), usage=usage)
407
+
408
+ # 即使支持 function call,也要处理 <think> 标签
409
+ message = chunk.choices[0].delta.model_dump()
410
+ delta_content = message.get("content", "")
411
+ if delta_content:
412
+ buffer += delta_content
413
+
414
+ # 处理缓冲区中的内容,提取 <think> 标签
415
+ current_output_content = ""
416
+ current_reasoning_content = ""
417
+
418
+ while buffer:
419
+ if not in_reasoning:
420
+ start_pos = buffer.find("<think>")
421
+ if start_pos != -1:
422
+ # 找到了 <think> 标签的开始
423
+ if start_pos > 0:
424
+ current_output_content += buffer[:start_pos]
425
+ buffer = buffer[start_pos + 7 :] # 跳过 "<think>"
426
+ in_reasoning = True
427
+ else:
428
+ # 没有找到 <think> 标签,直接输出
429
+ current_output_content += buffer
430
+ buffer = ""
431
+ else:
432
+ end_pos = buffer.find("</think>")
433
+ if end_pos != -1:
434
+ # 找到了 </think> 标签的结束
435
+ current_reasoning_content += buffer[:end_pos]
436
+ buffer = buffer[end_pos + 8 :] # 跳过 "</think>"
437
+ in_reasoning = False
438
+ else:
439
+ # 没有找到结束标签,继续累积到推理内容中
440
+ current_reasoning_content += buffer
441
+ buffer = ""
442
+
443
+ # 累积内容
444
+ if current_output_content:
445
+ accumulated_content.append(current_output_content)
446
+ if current_reasoning_content:
447
+ accumulated_reasoning.append(current_reasoning_content)
448
+
449
+ # 只要有内容变化就产生 delta
450
+ if current_output_content or current_reasoning_content:
451
+ if current_output_content:
452
+ message["content"] = current_output_content
453
+ elif current_reasoning_content:
454
+ message["reasoning_content"] = current_reasoning_content
455
+ message["content"] = "" # 推理时不输出普通内容
456
+ elif not current_output_content and not current_reasoning_content and not message.get("tool_calls"):
457
+ # 如果没有任何内容且没有 tool_calls,则跳过这个消息
458
+ continue
459
+
460
+ yield ChatCompletionDeltaMessage(**message, usage=usage)
408
461
  else:
409
462
  message = chunk.choices[0].delta.model_dump()
410
463
  delta_content = message.get("content", "")
411
- buffer += delta_content or ""
464
+ if delta_content:
465
+ buffer += delta_content
412
466
 
413
- while True:
467
+ # 处理缓冲区中的内容,提取 <think> 标签
468
+ current_output_content = ""
469
+ current_reasoning_content = ""
470
+
471
+ while buffer:
414
472
  if not in_reasoning:
415
473
  start_pos = buffer.find("<think>")
416
474
  if start_pos != -1:
417
- current_content.append(buffer[:start_pos])
418
- buffer = buffer[start_pos + 7 :]
475
+ # 找到了 <think> 标签的开始
476
+ if start_pos > 0:
477
+ current_output_content += buffer[:start_pos]
478
+ buffer = buffer[start_pos + 7 :] # 跳过 "<think>"
419
479
  in_reasoning = True
420
480
  else:
421
- current_content.append(buffer)
481
+ # 没有找到 <think> 标签,直接输出
482
+ current_output_content += buffer
422
483
  buffer = ""
423
- break
424
484
  else:
425
485
  end_pos = buffer.find("</think>")
426
486
  if end_pos != -1:
427
- current_reasoning.append(buffer[:end_pos])
428
- buffer = buffer[end_pos + 8 :]
487
+ # 找到了 </think> 标签的结束
488
+ current_reasoning_content += buffer[:end_pos]
489
+ buffer = buffer[end_pos + 8 :] # 跳过 "</think>"
429
490
  in_reasoning = False
430
491
  else:
431
- current_reasoning.append(buffer)
492
+ # 没有找到结束标签,继续累积到推理内容中
493
+ current_reasoning_content += buffer
432
494
  buffer = ""
433
- break
434
-
435
- message["content"] = "".join(current_content)
436
- if current_reasoning:
437
- message["reasoning_content"] = "".join(current_reasoning)
438
- current_content.clear()
439
- current_reasoning.clear()
440
-
441
- if tools:
442
- full_content += message["content"]
443
- tool_call_data = ToolCallContentProcessor(full_content).tool_calls
444
- if tool_call_data:
445
- message["tool_calls"] = tool_call_data["tool_calls"]
446
-
447
- if full_content in ("<", "<|", "<|▶", "<|▶|") or full_content.startswith("<|▶|>"):
448
- message["content"] = ""
449
- result = message
450
- continue
451
-
452
- yield ChatCompletionDeltaMessage(**message, usage=usage)
453
495
 
496
+ # 累积内容
497
+ if current_output_content:
498
+ accumulated_content.append(current_output_content)
499
+ if current_reasoning_content:
500
+ accumulated_reasoning.append(current_reasoning_content)
501
+
502
+ # 只要有内容变化就产生 delta
503
+ if current_output_content or current_reasoning_content:
504
+ if current_output_content:
505
+ message["content"] = current_output_content
506
+ elif current_reasoning_content:
507
+ message["reasoning_content"] = current_reasoning_content
508
+ message["content"] = "" # 推理时不输出普通内容
509
+
510
+ if tools:
511
+ full_content += current_output_content
512
+ tool_call_data = ToolCallContentProcessor(full_content).tool_calls
513
+ if tool_call_data:
514
+ message["tool_calls"] = tool_call_data["tool_calls"]
515
+
516
+ if full_content in ("<", "<|", "<|▶", "<|▶|") or full_content.startswith("<|▶|>"):
517
+ message["content"] = ""
518
+ result = message
519
+ continue
520
+
521
+ yield ChatCompletionDeltaMessage(**message, usage=usage)
522
+
523
+ # 处理最后剩余的缓冲区内容
454
524
  if buffer:
455
525
  if in_reasoning:
456
- current_reasoning.append(buffer)
526
+ accumulated_reasoning.append(buffer)
457
527
  else:
458
- current_content.append(buffer)
459
- final_message = {
460
- "content": "".join(current_content),
461
- "reasoning_content": "".join(current_reasoning) if current_reasoning else None,
462
- }
463
- yield ChatCompletionDeltaMessage(**final_message, usage=usage)
528
+ accumulated_content.append(buffer)
529
+
530
+ final_message = {}
531
+ if accumulated_content:
532
+ final_content = "".join(accumulated_content)
533
+ if final_content.strip(): # 只有当内容非空时才输出
534
+ final_message["content"] = final_content
535
+
536
+ if accumulated_reasoning:
537
+ final_reasoning = "".join(accumulated_reasoning)
538
+ if final_reasoning.strip(): # 只有当推理内容非空时才输出
539
+ final_message["reasoning_content"] = final_reasoning
540
+
541
+ if final_message:
542
+ yield ChatCompletionDeltaMessage(**final_message, usage=usage)
464
543
 
465
544
  if result:
466
545
  yield ChatCompletionDeltaMessage(**result, usage=usage)
@@ -820,7 +899,7 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
820
899
  messages=messages,
821
900
  stream=self.stream,
822
901
  temperature=self.temperature,
823
- max_tokens=max_tokens, # Azure 的 OpenAI 怎么 stream 模式不支持 max_completion_tokens
902
+ max_tokens=max_tokens,
824
903
  top_p=top_p,
825
904
  audio=audio,
826
905
  frequency_penalty=frequency_penalty,
@@ -855,8 +934,8 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
855
934
  usage = None
856
935
  buffer = ""
857
936
  in_reasoning = False
858
- current_reasoning = []
859
- current_content = []
937
+ accumulated_reasoning = []
938
+ accumulated_content = []
860
939
 
861
940
  async for chunk in stream_response:
862
941
  if chunk.usage and chunk.usage.total_tokens:
@@ -878,63 +957,142 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
878
957
  for index, tool_call in enumerate(chunk.choices[0].delta.tool_calls):
879
958
  tool_call.index = index
880
959
  tool_call.type = "function"
881
- yield ChatCompletionDeltaMessage(**chunk.choices[0].delta.model_dump(), usage=usage)
960
+
961
+ # 即使支持 function call,也要处理 <think> 标签
962
+ message = chunk.choices[0].delta.model_dump()
963
+ delta_content = message.get("content", "")
964
+ if delta_content:
965
+ buffer += delta_content
966
+
967
+ # 处理缓冲区中的内容,提取 <think> 标签
968
+ current_output_content = ""
969
+ current_reasoning_content = ""
970
+
971
+ while buffer:
972
+ if not in_reasoning:
973
+ start_pos = buffer.find("<think>")
974
+ if start_pos != -1:
975
+ # 找到了 <think> 标签的开始
976
+ if start_pos > 0:
977
+ current_output_content += buffer[:start_pos]
978
+ buffer = buffer[start_pos + 7 :] # 跳过 "<think>"
979
+ in_reasoning = True
980
+ else:
981
+ # 没有找到 <think> 标签,直接输出
982
+ current_output_content += buffer
983
+ buffer = ""
984
+ else:
985
+ end_pos = buffer.find("</think>")
986
+ if end_pos != -1:
987
+ # 找到了 </think> 标签的结束
988
+ current_reasoning_content += buffer[:end_pos]
989
+ buffer = buffer[end_pos + 8 :] # 跳过 "</think>"
990
+ in_reasoning = False
991
+ else:
992
+ # 没有找到结束标签,继续累积到推理内容中
993
+ current_reasoning_content += buffer
994
+ buffer = ""
995
+
996
+ # 累积内容
997
+ if current_output_content:
998
+ accumulated_content.append(current_output_content)
999
+ if current_reasoning_content:
1000
+ accumulated_reasoning.append(current_reasoning_content)
1001
+
1002
+ # 只要有内容变化就产生 delta
1003
+ if current_output_content or current_reasoning_content:
1004
+ if current_output_content:
1005
+ message["content"] = current_output_content
1006
+ elif current_reasoning_content:
1007
+ message["reasoning_content"] = current_reasoning_content
1008
+ message["content"] = "" # 推理时不输出普通内容
1009
+ elif not current_output_content and not current_reasoning_content and not message.get("tool_calls"):
1010
+ # 如果没有任何内容且没有 tool_calls,则跳过这个消息
1011
+ continue
1012
+
1013
+ yield ChatCompletionDeltaMessage(**message, usage=usage)
882
1014
  else:
883
1015
  message = chunk.choices[0].delta.model_dump()
884
1016
  delta_content = message.get("content", "")
885
- buffer += delta_content or ""
1017
+ if delta_content:
1018
+ buffer += delta_content
886
1019
 
887
- while True:
1020
+ # 处理缓冲区中的内容,提取 <think> 标签
1021
+ current_output_content = ""
1022
+ current_reasoning_content = ""
1023
+
1024
+ while buffer:
888
1025
  if not in_reasoning:
889
1026
  start_pos = buffer.find("<think>")
890
1027
  if start_pos != -1:
891
- current_content.append(buffer[:start_pos])
892
- buffer = buffer[start_pos + 7 :]
1028
+ # 找到了 <think> 标签的开始
1029
+ if start_pos > 0:
1030
+ current_output_content += buffer[:start_pos]
1031
+ buffer = buffer[start_pos + 7 :] # 跳过 "<think>"
893
1032
  in_reasoning = True
894
1033
  else:
895
- current_content.append(buffer)
1034
+ # 没有找到 <think> 标签,直接输出
1035
+ current_output_content += buffer
896
1036
  buffer = ""
897
- break
898
1037
  else:
899
1038
  end_pos = buffer.find("</think>")
900
1039
  if end_pos != -1:
901
- current_reasoning.append(buffer[:end_pos])
902
- buffer = buffer[end_pos + 8 :]
1040
+ # 找到了 </think> 标签的结束
1041
+ current_reasoning_content += buffer[:end_pos]
1042
+ buffer = buffer[end_pos + 8 :] # 跳过 "</think>"
903
1043
  in_reasoning = False
904
1044
  else:
905
- current_reasoning.append(buffer)
1045
+ # 没有找到结束标签,继续累积到推理内容中
1046
+ current_reasoning_content += buffer
906
1047
  buffer = ""
907
- break
908
-
909
- message["content"] = "".join(current_content)
910
- if current_reasoning:
911
- message["reasoning_content"] = "".join(current_reasoning)
912
- current_content.clear()
913
- current_reasoning.clear()
914
-
915
- if tools:
916
- full_content += message["content"]
917
- tool_call_data = ToolCallContentProcessor(full_content).tool_calls
918
- if tool_call_data:
919
- message["tool_calls"] = tool_call_data["tool_calls"]
920
-
921
- if full_content in ("<", "<|", "<|▶", "<|▶|") or full_content.startswith("<|▶|>"):
922
- message["content"] = ""
923
- result = message
924
- continue
925
-
926
- yield ChatCompletionDeltaMessage(**message, usage=usage)
927
1048
 
1049
+ # 累积内容
1050
+ if current_output_content:
1051
+ accumulated_content.append(current_output_content)
1052
+ if current_reasoning_content:
1053
+ accumulated_reasoning.append(current_reasoning_content)
1054
+
1055
+ # 只要有内容变化就产生 delta
1056
+ if current_output_content or current_reasoning_content:
1057
+ if current_output_content:
1058
+ message["content"] = current_output_content
1059
+ elif current_reasoning_content:
1060
+ message["reasoning_content"] = current_reasoning_content
1061
+ message["content"] = "" # 推理时不输出普通内容
1062
+
1063
+ if tools:
1064
+ full_content += current_output_content
1065
+ tool_call_data = ToolCallContentProcessor(full_content).tool_calls
1066
+ if tool_call_data:
1067
+ message["tool_calls"] = tool_call_data["tool_calls"]
1068
+
1069
+ if full_content in ("<", "<|", "<|▶", "<|▶|") or full_content.startswith("<|▶|>"):
1070
+ message["content"] = ""
1071
+ result = message
1072
+ continue
1073
+
1074
+ yield ChatCompletionDeltaMessage(**message, usage=usage)
1075
+
1076
+ # 处理最后剩余的缓冲区内容
928
1077
  if buffer:
929
1078
  if in_reasoning:
930
- current_reasoning.append(buffer)
1079
+ accumulated_reasoning.append(buffer)
931
1080
  else:
932
- current_content.append(buffer)
933
- final_message = {
934
- "content": "".join(current_content),
935
- "reasoning_content": "".join(current_reasoning) if current_reasoning else None,
936
- }
937
- yield ChatCompletionDeltaMessage(**final_message, usage=usage)
1081
+ accumulated_content.append(buffer)
1082
+
1083
+ final_message = {}
1084
+ if accumulated_content:
1085
+ final_content = "".join(accumulated_content)
1086
+ if final_content.strip(): # 只有当内容非空时才输出
1087
+ final_message["content"] = final_content
1088
+
1089
+ if accumulated_reasoning:
1090
+ final_reasoning = "".join(accumulated_reasoning)
1091
+ if final_reasoning.strip(): # 只有当推理内容非空时才输出
1092
+ final_message["reasoning_content"] = final_reasoning
1093
+
1094
+ if final_message:
1095
+ yield ChatCompletionDeltaMessage(**final_message, usage=usage)
938
1096
 
939
1097
  if result:
940
1098
  yield ChatCompletionDeltaMessage(**result, usage=usage)
@@ -996,6 +996,14 @@ GEMINI_MODELS: Final[Dict[str, ModelSettingDict]] = {
996
996
  "response_format_available": True,
997
997
  "native_multimodal": True,
998
998
  },
999
+ "gemini-2.5-pro-preview-06-05": {
1000
+ "id": "gemini-2.5-pro-preview-06-05",
1001
+ "context_length": 1048576,
1002
+ "max_output_tokens": 65536,
1003
+ "function_call_available": True,
1004
+ "response_format_available": True,
1005
+ "native_multimodal": True,
1006
+ },
999
1007
  }
1000
1008
 
1001
1009
  # 百度文心一言 ERNIE 模型
@@ -182,6 +182,16 @@ class Flux1(Node):
182
182
  value="",
183
183
  multiple=True,
184
184
  ),
185
+ "input_image": InputPort(
186
+ name="input_image",
187
+ port_type=PortType.FILE,
188
+ value=list(),
189
+ support_file_types=[".jpg", ".jpeg", ".png", ".webp"],
190
+ multiple=True,
191
+ show=True,
192
+ condition="return fieldsData.model.value.startsWith('FLUX.1 Kontext')",
193
+ condition_python=lambda ports: ports["model"].value.startswith("FLUX.1 Kontext"),
194
+ ),
185
195
  "model": InputPort(
186
196
  name="model",
187
197
  port_type=PortType.SELECT,
@@ -191,6 +201,9 @@ class Flux1(Node):
191
201
  {"value": "FLUX.1 [dev]", "label": "FLUX.1 [dev]"},
192
202
  {"value": "FLUX.1 [pro]", "label": "FLUX.1 [pro]"},
193
203
  {"value": "FLUX.1 [pro] ultra", "label": "FLUX.1 [pro] ultra"},
204
+ {"value": "FLUX.1 Kontext [pro]", "label": "FLUX.1 Kontext [pro]"},
205
+ {"value": "FLUX.1 Kontext [max]", "label": "FLUX.1 Kontext [max]"},
206
+ {"value": "FLUX.1 Kontext [max] Multi", "label": "FLUX.1 Kontext [max] Multi"},
194
207
  ],
195
208
  multiple=True,
196
209
  ),
@@ -199,16 +212,16 @@ class Flux1(Node):
199
212
  port_type=PortType.NUMBER,
200
213
  value=1024,
201
214
  max=1536,
202
- condition="return fieldsData.model.value !== 'FLUX.1 [pro] ultra'",
203
- condition_python=lambda ports: ports["model"].value != "FLUX.1 [pro] ultra",
215
+ condition="return fieldsData.model.value !== 'FLUX.1 [pro] ultra' && !fieldsData.model.value.startsWith('FLUX.1 Kontext')",
216
+ condition_python=lambda ports: ports["model"].value != "FLUX.1 [pro] ultra" and not ports["model"].value.startswith("FLUX.1 Kontext"),
204
217
  ),
205
218
  "height": InputPort(
206
219
  name="height",
207
220
  port_type=PortType.NUMBER,
208
221
  value=1024,
209
222
  max=1536,
210
- condition="return fieldsData.model.value !== 'FLUX.1 [pro] ultra'",
211
- condition_python=lambda ports: ports["model"].value != "FLUX.1 [pro] ultra",
223
+ condition="return fieldsData.model.value !== 'FLUX.1 [pro] ultra' && !fieldsData.model.value.startsWith('FLUX.1 Kontext')",
224
+ condition_python=lambda ports: ports["model"].value != "FLUX.1 [pro] ultra" and not ports["model"].value.startswith("FLUX.1 Kontext"),
212
225
  ),
213
226
  "aspect_ratio": InputPort(
214
227
  name="aspect_ratio",
@@ -374,10 +387,7 @@ class Kolors(Node):
374
387
  {"value": "EulerDiscreteScheduler", "label": "EulerDiscreteScheduler"},
375
388
  {"value": "EulerAncestralDiscreteScheduler", "label": "EulerAncestralDiscreteScheduler"},
376
389
  {"value": "DPMSolverMultistepScheduler", "label": "DPMSolverMultistepScheduler"},
377
- {
378
- "value": "DPMSolverMultistepScheduler_SDE_karras",
379
- "label": "DPMSolverMultistepScheduler_SDE_karras",
380
- },
390
+ {"value": "DPMSolverMultistepScheduler_SDE_karras", "label": "DPMSolverMultistepScheduler_SDE_karras"},
381
391
  {"value": "UniPCMultistepScheduler", "label": "UniPCMultistepScheduler"},
382
392
  {"value": "DEISMultistepScheduler", "label": "DEISMultistepScheduler"},
383
393
  ],
@@ -566,8 +576,7 @@ class Recraft(Node):
566
576
  {"value": "motion_blur", "label": "motion_blur"},
567
577
  ],
568
578
  condition="return fieldsData.generation_type.value === 'text_to_image' && fieldsData.base_style.value === 'realistic_image'",
569
- condition_python=lambda ports: ports["generation_type"].value == "text_to_image"
570
- and ports["base_style"].value == "realistic_image",
579
+ condition_python=lambda ports: ports["generation_type"].value == "text_to_image" and ports["base_style"].value == "realistic_image",
571
580
  multiple=True,
572
581
  ),
573
582
  "substyle_digital_illustration": InputPort(
@@ -587,8 +596,7 @@ class Recraft(Node):
587
596
  {"value": "2d_art_poster_2", "label": "2d_art_poster_2"},
588
597
  ],
589
598
  condition="return fieldsData.generation_type.value === 'text_to_image' && fieldsData.base_style.value === 'digital_illustration'",
590
- condition_python=lambda ports: ports["generation_type"].value == "text_to_image"
591
- and ports["base_style"].value == "digital_illustration",
599
+ condition_python=lambda ports: ports["generation_type"].value == "text_to_image" and ports["base_style"].value == "digital_illustration",
592
600
  multiple=True,
593
601
  ),
594
602
  "substyle_vector_illustration": InputPort(
@@ -603,8 +611,7 @@ class Recraft(Node):
603
611
  {"value": "linocut", "label": "linocut"},
604
612
  ],
605
613
  condition="return fieldsData.generation_type.value === 'text_to_image' && fieldsData.base_style.value === 'vector_illustration'",
606
- condition_python=lambda ports: ports["generation_type"].value == "text_to_image"
607
- and ports["base_style"].value == "vector_illustration",
614
+ condition_python=lambda ports: ports["generation_type"].value == "text_to_image" and ports["base_style"].value == "vector_illustration",
608
615
  multiple=True,
609
616
  ),
610
617
  "size": InputPort(
@@ -663,6 +670,107 @@ class Recraft(Node):
663
670
  )
664
671
 
665
672
 
673
+ class GptImage(Node):
674
+ def __init__(self, id: Optional[str] = None):
675
+ super().__init__(
676
+ node_type="GptImage",
677
+ category="image_generation",
678
+ task_name="image_generation.gpt_image",
679
+ node_id=id,
680
+ ports={
681
+ "action": InputPort(
682
+ name="action",
683
+ port_type=PortType.SELECT,
684
+ value="generation",
685
+ options=[
686
+ {"value": "generation", "label": "generation"},
687
+ {"value": "edit", "label": "edit"},
688
+ ],
689
+ ),
690
+ "prompt": InputPort(
691
+ name="prompt",
692
+ port_type=PortType.TEXTAREA,
693
+ value="",
694
+ multiple=True,
695
+ show=True,
696
+ ),
697
+ "individual_images": InputPort(
698
+ name="individual_images",
699
+ port_type=PortType.CHECKBOX,
700
+ value=False,
701
+ condition="return fieldsData.action.value === 'edit'",
702
+ condition_python=lambda ports: ports["action"].value == "edit",
703
+ ),
704
+ "image": InputPort(
705
+ name="image",
706
+ port_type=PortType.FILE,
707
+ value=list(),
708
+ support_file_types=[".jpg", ".jpeg", ".png", ".webp"],
709
+ multiple=False,
710
+ condition="return fieldsData.action.value === 'edit'",
711
+ condition_python=lambda ports: ports["action"].value == "edit",
712
+ ),
713
+ "mask": InputPort(
714
+ name="mask",
715
+ port_type=PortType.FILE,
716
+ value=list(),
717
+ support_file_types=[".png"],
718
+ condition="return fieldsData.action.value === 'edit'",
719
+ condition_python=lambda ports: ports["action"].value == "edit",
720
+ ),
721
+ "model": InputPort(
722
+ name="model",
723
+ port_type=PortType.SELECT,
724
+ value="gpt-image-1",
725
+ options=[
726
+ {"value": "gpt-image-1", "label": "gpt-image-1"},
727
+ ],
728
+ multiple=True,
729
+ ),
730
+ "size": InputPort(
731
+ name="size",
732
+ port_type=PortType.SELECT,
733
+ value="1024x1024",
734
+ options=[
735
+ {"value": "1024x1024", "label": "1024x1024"},
736
+ {"value": "1024x1536", "label": "1024x1536"},
737
+ {"value": "1536x1024", "label": "1536x1024"},
738
+ ],
739
+ multiple=True,
740
+ ),
741
+ "n": InputPort(
742
+ name="n",
743
+ port_type=PortType.NUMBER,
744
+ value=1,
745
+ min=1,
746
+ max=10,
747
+ ),
748
+ "quality": InputPort(
749
+ name="quality",
750
+ port_type=PortType.SELECT,
751
+ value="high",
752
+ options=[
753
+ {"value": "low", "label": "low"},
754
+ {"value": "medium", "label": "medium"},
755
+ {"value": "high", "label": "high"},
756
+ ],
757
+ multiple=True,
758
+ ),
759
+ "output_type": InputPort(
760
+ name="output_type",
761
+ port_type=PortType.SELECT,
762
+ value="markdown",
763
+ options=[
764
+ {"value": "only_link", "label": "only_link"},
765
+ {"value": "markdown", "label": "markdown"},
766
+ {"value": "html", "label": "html"},
767
+ ],
768
+ ),
769
+ "output": OutputPort(),
770
+ },
771
+ )
772
+
773
+
666
774
  class StableDiffusion(Node):
667
775
  def __init__(self, id: Optional[str] = None):
668
776
  special_width_height_models = [
@@ -21,15 +21,31 @@ class AliyunQwen(Node):
21
21
  "llm_model": InputPort(
22
22
  name="llm_model",
23
23
  port_type=PortType.SELECT,
24
- value="qwen2.5-72b-instruct",
24
+ value="qwen3-32b",
25
25
  options=[
26
26
  {"value": "qwen2.5-72b-instruct", "label": "qwen2.5-72b-instruct"},
27
27
  {"value": "qwen2.5-32b-instruct", "label": "qwen2.5-32b-instruct"},
28
28
  {"value": "qwen2.5-coder-32b-instruct", "label": "qwen2.5-coder-32b-instruct"},
29
- {"value": "qwq-32b-preview", "label": "qwq-32b-preview"},
29
+ {"value": "qwq-32b", "label": "qwq-32b"},
30
30
  {"value": "qwen2.5-14b-instruct", "label": "qwen2.5-14b-instruct"},
31
31
  {"value": "qwen2.5-7b-instruct", "label": "qwen2.5-7b-instruct"},
32
32
  {"value": "qwen2.5-coder-7b-instruct", "label": "qwen2.5-coder-7b-instruct"},
33
+ {"value": "qwen3-235b-a22b", "label": "qwen3-235b-a22b"},
34
+ {"value": "qwen3-235b-a22b-thinking", "label": "qwen3-235b-a22b-thinking"},
35
+ {"value": "qwen3-32b", "label": "qwen3-32b"},
36
+ {"value": "qwen3-32b-thinking", "label": "qwen3-32b-thinking"},
37
+ {"value": "qwen3-30b-a3b", "label": "qwen3-30b-a3b"},
38
+ {"value": "qwen3-30b-a3b-thinking", "label": "qwen3-30b-a3b-thinking"},
39
+ {"value": "qwen3-14b", "label": "qwen3-14b"},
40
+ {"value": "qwen3-14b-thinking", "label": "qwen3-14b-thinking"},
41
+ {"value": "qwen3-8b", "label": "qwen3-8b"},
42
+ {"value": "qwen3-8b-thinking", "label": "qwen3-8b-thinking"},
43
+ {"value": "qwen3-4b", "label": "qwen3-4b"},
44
+ {"value": "qwen3-4b-thinking", "label": "qwen3-4b-thinking"},
45
+ {"value": "qwen3-1.7b", "label": "qwen3-1.7b"},
46
+ {"value": "qwen3-1.7b-thinking", "label": "qwen3-1.7b-thinking"},
47
+ {"value": "qwen3-0.6b", "label": "qwen3-0.6b"},
48
+ {"value": "qwen3-0.6b-thinking", "label": "qwen3-0.6b-thinking"},
33
49
  ],
34
50
  ),
35
51
  "top_p": InputPort(
@@ -180,6 +196,7 @@ class BaiduWenxin(Node):
180
196
  {"value": "ernie-speed", "label": "ernie-speed"},
181
197
  {"value": "ernie-3.5", "label": "ernie-3.5"},
182
198
  {"value": "ernie-4.0", "label": "ernie-4.0"},
199
+ {"value": "ernie-4.5", "label": "ernie-4.5"},
183
200
  ],
184
201
  ),
185
202
  "temperature": InputPort(
@@ -224,6 +241,9 @@ class ChatGLM(Node):
224
241
  {"value": "glm-4-flash", "label": "glm-4-flash"},
225
242
  {"value": "glm-4-long", "label": "glm-4-long"},
226
243
  {"value": "glm-zero-preview", "label": "glm-zero-preview"},
244
+ {"value": "glm-z1-air", "label": "glm-z1-air"},
245
+ {"value": "glm-z1-airx", "label": "glm-z1-airx"},
246
+ {"value": "glm-z1-flash", "label": "glm-z1-flash"},
227
247
  ],
228
248
  ),
229
249
  "temperature": InputPort(
@@ -299,8 +319,12 @@ class Claude(Node):
299
319
  "llm_model": InputPort(
300
320
  name="llm_model",
301
321
  port_type=PortType.SELECT,
302
- value="claude-3-5-haiku",
322
+ value="claude-sonnet-4-20250514",
303
323
  options=[
324
+ {"value": "claude-opus-4-20250514-thinking", "label": "claude-opus-4-20250514-thinking"},
325
+ {"value": "claude-opus-4-20250514", "label": "claude-opus-4-20250514"},
326
+ {"value": "claude-sonnet-4-20250514-thinking", "label": "claude-sonnet-4-20250514-thinking"},
327
+ {"value": "claude-sonnet-4-20250514", "label": "claude-sonnet-4-20250514"},
304
328
  {"value": "claude-3-7-sonnet-thinking", "label": "claude-3-7-sonnet-thinking"},
305
329
  {"value": "claude-3-7-sonnet", "label": "claude-3-7-sonnet"},
306
330
  {"value": "claude-3-5-sonnet", "label": "claude-3-5-sonnet"},
@@ -441,21 +465,13 @@ class Gemini(Node):
441
465
  "llm_model": InputPort(
442
466
  name="llm_model",
443
467
  port_type=PortType.SELECT,
444
- value="gemini-1.5-flash",
468
+ value="gemini-2.5-pro-preview-06-05",
445
469
  options=[
446
- {"value": "gemini-1.5-flash", "label": "gemini-1.5-flash"},
447
- {"value": "gemini-1.5-pro", "label": "gemini-1.5-pro"},
448
- {"value": "gemini-2.0-flash", "label": "gemini-2.0-flash"},
449
- {
450
- "value": "gemini-2.0-flash-thinking-exp-01-21",
451
- "label": "gemini-2.0-flash-thinking-exp-01-21",
452
- },
453
- {"value": "gemini-2.0-pro-exp-02-05", "label": "gemini-2.0-pro-exp-02-05"},
454
- {
455
- "value": "gemini-2.0-flash-lite-preview-02-05",
456
- "label": "gemini-2.0-flash-lite-preview-02-05",
457
- },
458
- {"value": "gemini-exp-1206", "label": "gemini-exp-1206"},
470
+ {"label": "gemini-2.0-flash", "value": "gemini-2.0-flash"},
471
+ {"label": "gemini-2.0-flash-lite-preview-02-05", "value": "gemini-2.0-flash-lite-preview-02-05"},
472
+ {"value": "gemini-2.0-flash-thinking-exp-01-21", "label": "gemini-2.0-flash-thinking-exp-01-21"},
473
+ {"label": "gemini-2.5-pro-preview-06-05", "value": "gemini-2.5-pro-preview-06-05"},
474
+ {"label": "gemini-2.5-flash-preview-05-20", "value": "gemini-2.5-flash-preview-05-20"},
459
475
  ],
460
476
  ),
461
477
  "temperature": InputPort(
@@ -678,6 +694,7 @@ class Moonshot(Node):
678
694
  {"value": "moonshot-v1-8k", "label": "moonshot-v1-8k"},
679
695
  {"value": "moonshot-v1-32k", "label": "moonshot-v1-32k"},
680
696
  {"value": "moonshot-v1-128k", "label": "moonshot-v1-128k"},
697
+ {"value": "kimi-latest", "label": "kimi-latest"},
681
698
  ],
682
699
  ),
683
700
  "temperature": InputPort(
@@ -771,6 +788,10 @@ class OpenAI(Node):
771
788
  {"value": "o1-mini", "label": "o1-mini"},
772
789
  {"value": "o1-preview", "label": "o1-preview"},
773
790
  {"value": "o3-mini", "label": "o3-mini"},
791
+ {"value": "o3-mini-high", "label": "o3-mini-high"},
792
+ {"value": "gpt-4.1", "label": "gpt-4.1"},
793
+ {"value": "o4-mini", "label": "o4-mini"},
794
+ {"value": "o4-mini-high", "label": "o4-mini-high"},
774
795
  ],
775
796
  ),
776
797
  "temperature": InputPort(
@@ -855,9 +876,13 @@ class XAi(Node):
855
876
  "llm_model": InputPort(
856
877
  name="llm_model",
857
878
  port_type=PortType.SELECT,
858
- value="grok-beta",
879
+ value="grok-3-beta",
859
880
  options=[
860
881
  {"value": "grok-beta", "label": "grok-beta"},
882
+ {"value": "grok-3-beta", "label": "grok-3-beta"},
883
+ {"value": "grok-3-fast-beta", "label": "grok-3-fast-beta"},
884
+ {"value": "grok-3-mini-beta", "label": "grok-3-mini-beta"},
885
+ {"value": "grok-3-mini-fast-beta", "label": "grok-3-mini-fast-beta"},
861
886
  ],
862
887
  ),
863
888
  "temperature": InputPort(
@@ -21,8 +21,14 @@ class ClaudeVision(Node):
21
21
  "llm_model": InputPort(
22
22
  name="llm_model",
23
23
  port_type=PortType.SELECT,
24
- value="claude-3-5-sonnet",
24
+ value="claude-sonnet-4-20250514",
25
25
  options=[
26
+ {"value": "claude-opus-4-20250514-thinking", "label": "claude-opus-4-20250514-thinking"},
27
+ {"value": "claude-opus-4-20250514", "label": "claude-opus-4-20250514"},
28
+ {"value": "claude-sonnet-4-20250514-thinking", "label": "claude-sonnet-4-20250514-thinking"},
29
+ {"value": "claude-sonnet-4-20250514", "label": "claude-sonnet-4-20250514"},
30
+ {"value": "claude-3-7-sonnet-thinking", "label": "claude-3-7-sonnet-thinking"},
31
+ {"value": "claude-3-7-sonnet", "label": "claude-3-7-sonnet"},
26
32
  {"value": "claude-3-5-sonnet", "label": "claude-3-5-sonnet"},
27
33
  {"value": "claude-3-opus", "label": "claude-3-opus"},
28
34
  {"value": "claude-3-sonnet", "label": "claude-3-sonnet"},
@@ -134,13 +140,13 @@ class GeminiVision(Node):
134
140
  "llm_model": InputPort(
135
141
  name="llm_model",
136
142
  port_type=PortType.SELECT,
137
- value="gemini-1.5-pro",
143
+ value="gemini-2.5-pro-preview-06-05",
138
144
  options=[
139
- {"value": "gemini-1.5-pro", "label": "gemini-1.5-pro"},
140
- {"value": "gemini-1.5-flash", "label": "gemini-1.5-flash"},
141
- {"value": "gemini-2.0-flash-exp", "label": "gemini-2.0-flash-exp"},
142
- {"value": "gemini-2.0-flash-thinking-exp-1219", "label": "gemini-2.0-flash-thinking-exp-1219"},
143
- {"value": "gemini-exp-1206", "label": "gemini-exp-1206"},
145
+ {"label": "gemini-2.0-flash", "value": "gemini-2.0-flash"},
146
+ {"label": "gemini-2.0-flash-lite-preview-02-05", "value": "gemini-2.0-flash-lite-preview-02-05"},
147
+ {"value": "gemini-2.0-flash-thinking-exp-01-21", "label": "gemini-2.0-flash-thinking-exp-01-21"},
148
+ {"label": "gemini-2.5-pro-preview-06-05", "value": "gemini-2.5-pro-preview-06-05"},
149
+ {"label": "gemini-2.5-flash-preview-05-20", "value": "gemini-2.5-flash-preview-05-20"},
144
150
  ],
145
151
  ),
146
152
  "multiple_input": InputPort(
@@ -254,6 +260,9 @@ class GptVision(Node):
254
260
  options=[
255
261
  {"value": "gpt-4o", "label": "gpt-4o"},
256
262
  {"value": "gpt-4o-mini", "label": "gpt-4o-mini"},
263
+ {"value": "o4-mini", "label": "o4-mini"},
264
+ {"value": "o4-mini-high", "label": "o4-mini-high"},
265
+ {"value": "gpt-4.1", "label": "gpt-4.1"},
257
266
  ],
258
267
  ),
259
268
  "images_or_urls": InputPort(
@@ -332,16 +332,14 @@ class PictureRender(Node):
332
332
  port_type=PortType.NUMBER,
333
333
  value=1200,
334
334
  condition="return ['url', 'html_code', 'markdown', 'mindmap', 'mermaid'].includes(fieldsData.render_type.value)",
335
- condition_python=lambda ports: ports["render_type"].value
336
- in ["url", "html_code", "markdown", "mindmap", "mermaid"],
335
+ condition_python=lambda ports: ports["render_type"].value in ["url", "html_code", "markdown", "mindmap", "mermaid"],
337
336
  ),
338
337
  "height": InputPort(
339
338
  name="height",
340
339
  port_type=PortType.NUMBER,
341
340
  value=800,
342
341
  condition="return ['url', 'html_code', 'markdown', 'mindmap', 'mermaid'].includes(fieldsData.render_type.value)",
343
- condition_python=lambda ports: ports["render_type"].value
344
- in ["url", "html_code", "markdown", "mindmap", "mermaid"],
342
+ condition_python=lambda ports: ports["render_type"].value in ["url", "html_code", "markdown", "mindmap", "mermaid"],
345
343
  ),
346
344
  "base64_encode": InputPort(
347
345
  name="base64_encode",
@@ -161,11 +161,30 @@ class TextSearch(Node):
161
161
  options=[
162
162
  {"value": "bing", "label": "bing"},
163
163
  {"value": "bochaai", "label": "bochaai"},
164
+ {"value": "exa.ai", "label": "exa.ai"},
164
165
  {"value": "jina.ai", "label": "jina.ai"},
165
166
  {"value": "zhipuai", "label": "zhipuai"},
166
167
  {"value": "duckduckgo", "label": "duckduckgo"},
167
168
  ],
168
169
  ),
170
+ "result_category": InputPort(
171
+ name="result_category",
172
+ port_type=PortType.SELECT,
173
+ value="all",
174
+ options=[
175
+ {"value": "all", "label": "all"},
176
+ {"value": "company", "label": "company"},
177
+ {"value": "research_paper", "label": "research_paper"},
178
+ {"value": "news", "label": "news"},
179
+ {"value": "pdf", "label": "pdf"},
180
+ {"value": "github", "label": "github"},
181
+ {"value": "personal_site", "label": "personal_site"},
182
+ {"value": "linkedin_profile", "label": "linkedin_profile"},
183
+ {"value": "financial_report", "label": "financial_report"},
184
+ ],
185
+ condition="return fieldsData.search_engine.value === 'exa.ai'",
186
+ condition_python=lambda ports: ports["search_engine"].value == "exa.ai",
187
+ ),
169
188
  "count": InputPort(
170
189
  name="count",
171
190
  port_type=PortType.NUMBER,
@@ -219,14 +238,26 @@ class TextSearch(Node):
219
238
  "output_page_title": OutputPort(
220
239
  name="output_page_title",
221
240
  port_type=PortType.LIST,
241
+ condition="!fieldsData.combine_result_in_text.value",
242
+ condition_python=lambda ports: not ports["combine_result_in_text"].value,
222
243
  ),
223
244
  "output_page_url": OutputPort(
224
245
  name="output_page_url",
225
246
  port_type=PortType.LIST,
247
+ condition="!fieldsData.combine_result_in_text.value",
248
+ condition_python=lambda ports: not ports["combine_result_in_text"].value,
226
249
  ),
227
250
  "output_page_snippet": OutputPort(
228
251
  name="output_page_snippet",
229
252
  port_type=PortType.LIST,
253
+ condition="!fieldsData.combine_result_in_text.value",
254
+ condition_python=lambda ports: not ports["combine_result_in_text"].value,
255
+ ),
256
+ "output_combined": OutputPort(
257
+ name="output_combined",
258
+ port_type=PortType.LIST,
259
+ condition="!fieldsData.combine_result_in_text.value",
260
+ condition_python=lambda ports: not ports["combine_result_in_text"].value,
230
261
  ),
231
262
  },
232
263
  )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vectorvein
3
- Version: 0.2.96
3
+ Version: 0.2.98
4
4
  Summary: VectorVein Python SDK
5
5
  Author-Email: Anderson <andersonby@163.com>
6
6
  License: MIT
@@ -1,13 +1,13 @@
1
- vectorvein-0.2.96.dist-info/METADATA,sha256=CWBCAxqrKUZke23EwSmb7ycDvpIizYhCI3EoApv7t3s,4567
2
- vectorvein-0.2.96.dist-info/WHEEL,sha256=tSfRZzRHthuv7vxpI4aehrdN9scLjk-dCJkPLzkHxGg,90
3
- vectorvein-0.2.96.dist-info/entry_points.txt,sha256=6OYgBcLyFCUgeqLgnvMyOJxPCWzgy7se4rLPKtNonMs,34
1
+ vectorvein-0.2.98.dist-info/METADATA,sha256=zg6ffo132eAMfqBZs8RY3ckHLA6uhs9IkcqNz5dDR6I,4567
2
+ vectorvein-0.2.98.dist-info/WHEEL,sha256=tSfRZzRHthuv7vxpI4aehrdN9scLjk-dCJkPLzkHxGg,90
3
+ vectorvein-0.2.98.dist-info/entry_points.txt,sha256=6OYgBcLyFCUgeqLgnvMyOJxPCWzgy7se4rLPKtNonMs,34
4
4
  vectorvein/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
5
  vectorvein/api/__init__.py,sha256=lfY-XA46fgD2iIZTU0VYP8i07AwA03Egj4Qua0vUKrQ,738
6
6
  vectorvein/api/client.py,sha256=xF-leKDQzVyyy9FnIRaz0k4eElYW1XbbzeRLcpnyk90,33047
7
7
  vectorvein/api/exceptions.py,sha256=uS_PAdx0ksC0r3dgfSGWdbLMZm4qdLeWSSqCv1g3_Gc,772
8
8
  vectorvein/api/models.py,sha256=xtPWMsB0yIJI7i-gY4B6MtvXv0ZIXnoeKspmeInH6fU,1449
9
9
  vectorvein/chat_clients/__init__.py,sha256=UIytpIgwo8qkZpIyrHVxLYTyliUOTp4J7C4iHRjbtWE,23850
10
- vectorvein/chat_clients/anthropic_client.py,sha256=xAB2JDCqm865mmCXK9jfN636-lNrICTDUcBypuoWu_0,69234
10
+ vectorvein/chat_clients/anthropic_client.py,sha256=OYqPjRzFcwhZjHSxmj0NlfZSwsVAo_PPD5sCqnMMBHA,68874
11
11
  vectorvein/chat_clients/baichuan_client.py,sha256=CVMvpgjdrZGv0BWnTOBD-f2ufZ3wq3496wqukumsAr4,526
12
12
  vectorvein/chat_clients/base_client.py,sha256=p7s-G4Wh9MSpDKEfG8wuFAeWy5DGvj5Go31hqrpQPhM,38817
13
13
  vectorvein/chat_clients/deepseek_client.py,sha256=3qWu01NlJAP2N-Ff62d5-CZXZitlizE1fzb20LNetig,526
@@ -19,7 +19,7 @@ vectorvein/chat_clients/minimax_client.py,sha256=YOILWcsHsN5tihLTMbKJIyJr9TJREMI
19
19
  vectorvein/chat_clients/mistral_client.py,sha256=1aKSylzBDaLYcFnaBIL4-sXSzWmXfBeON9Q0rq-ziWw,534
20
20
  vectorvein/chat_clients/moonshot_client.py,sha256=gbu-6nGxx8uM_U2WlI4Wus881rFRotzHtMSoYOcruGU,526
21
21
  vectorvein/chat_clients/openai_client.py,sha256=Nz6tV45pWcsOupxjnsRsGTicbQNJWIZyxuJoJ5DGMpg,527
22
- vectorvein/chat_clients/openai_compatible_client.py,sha256=oJeiAtnxUrfqnJtlk_niehLlzfd2yHGG5tKthqPsEFI,48919
22
+ vectorvein/chat_clients/openai_compatible_client.py,sha256=p9jWjuNnrOg3-OwW_Q_1FjgtPHKsKL4_L3oD_7M7U2Y,58729
23
23
  vectorvein/chat_clients/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
24
24
  vectorvein/chat_clients/qwen_client.py,sha256=-ryh-m9PgsO0fc4ulcCmPTy1155J8YUy15uPoJQOHA0,513
25
25
  vectorvein/chat_clients/stepfun_client.py,sha256=zsD2W5ahmR4DD9cqQTXmJr3txrGuvxbRWhFlRdwNijI,519
@@ -32,7 +32,7 @@ vectorvein/server/token_server.py,sha256=36F9PKSNOX8ZtYBXY_l-76GQTpUSmQ2Y8EMy1H7
32
32
  vectorvein/settings/__init__.py,sha256=j8BNRqJ23GWI83vFzOQJZvZuy-WtKMeOTJRghG4cG5I,11471
33
33
  vectorvein/settings/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
34
34
  vectorvein/types/__init__.py,sha256=0XNY7FGPklSk0eKPR0ZgwG2kNqyZ0z3Z3G7oLP0ep8Y,3838
35
- vectorvein/types/defaults.py,sha256=aSGkKIP2crYylumrjng80e6XDNIppzl7_Jw7q1lhi5I,38444
35
+ vectorvein/types/defaults.py,sha256=8FbCJpl2D0hg9Ezr6P0T7Azd5Ticgoddl2puAYjWoBQ,38733
36
36
  vectorvein/types/enums.py,sha256=LplSVkXLBK-t8TWtJKj_f7ktWTd6CSHWRLb67XKMm54,1716
37
37
  vectorvein/types/exception.py,sha256=KtnqZ-1DstHm95SZAyZdHhkGq1bJ4A9Aw3Zfdu-VIFo,130
38
38
  vectorvein/types/llm_parameters.py,sha256=q2Ilrh0mjERnI8qRDJ-2exQlHiMb-HEXVFTDiAVk6Dk,9452
@@ -49,14 +49,14 @@ vectorvein/workflow/nodes/__init__.py,sha256=dWrWtL3q0Vsn-MLgJ7gNgLCrwZ5BrqjrN2Q
49
49
  vectorvein/workflow/nodes/audio_generation.py,sha256=ZRFZ_ycMTSJ2LKmekctagQdJYTl-3q4TNOIKETpS9AM,5870
50
50
  vectorvein/workflow/nodes/control_flows.py,sha256=fDySWek8Isbfznwn0thmbTwTP4c99w68Up9dlASAtIo,6805
51
51
  vectorvein/workflow/nodes/file_processing.py,sha256=f4PlfgSAVFhwuqcEAvcLarNIkHUFP4FJucxnb3kekTU,4498
52
- vectorvein/workflow/nodes/image_generation.py,sha256=a1ObkmvM8dwMxQvsnoYJwaURn0WwLXsqVkPybRnXT9A,35708
53
- vectorvein/workflow/nodes/llms.py,sha256=ePnWAF4q-Uai5ZHgrYb7ZeoWzjIZ9B8XGAPPT5QEO10,40238
52
+ vectorvein/workflow/nodes/image_generation.py,sha256=aH5TUc3Cjs07OcHJAA_fIsexwI9Jy5J2eX6wSgUl0B8,40708
53
+ vectorvein/workflow/nodes/llms.py,sha256=iceW_AA0eyq701pcs5_pvNaDG9yR-zZoW2TJd7jMiCI,42684
54
54
  vectorvein/workflow/nodes/media_editing.py,sha256=ut4NN9_VUqnsqT2rlv0JrLhyxRLNUkvHb0c4QZDiKz8,34320
55
- vectorvein/workflow/nodes/media_processing.py,sha256=_YuoJur2EeIeZfg8dSigDtqYcUpN6uVjGXJSVNqa6uI,22067
56
- vectorvein/workflow/nodes/output.py,sha256=JHp-Y9EtuwD9qtZvVV2zHkH1OEK_6xlYh_DT1LrKuBs,13174
55
+ vectorvein/workflow/nodes/media_processing.py,sha256=zfFMgKtggADJ1mbs9TAWKZK49rvpMHD3U7J0WOWgF4g,23013
56
+ vectorvein/workflow/nodes/output.py,sha256=60Eef45OhyvSHhzbiotjBPYD1eIlJZqnUckJWQqPmvo,13132
57
57
  vectorvein/workflow/nodes/relational_db.py,sha256=Zg4G3xIQ94uoWE-Z4YER1bBhWgBQ6mYbJVQDeAN895I,5498
58
58
  vectorvein/workflow/nodes/text_processing.py,sha256=BRmFSyLPADFplbUqUNjoJdmHzQvrPknJvBvvgtzaklk,8744
59
- vectorvein/workflow/nodes/tools.py,sha256=ejIQO2hfuRr6m1jc9NMZEUK9ABEWPpX0PVO_UA5BtSc,13853
59
+ vectorvein/workflow/nodes/tools.py,sha256=GDJnxv4fzlATlP5zACs_1CwMsNcssKLLHPgQEsVLnZA,15795
60
60
  vectorvein/workflow/nodes/triggers.py,sha256=BolH4X6S8HSuU2kwHmYKr-ozHbgKBmdZRcnXpK5EfGA,597
61
61
  vectorvein/workflow/nodes/vector_db.py,sha256=p9AT_E8ASbcYHZqHYTCIGvqkIqzxaFM4UxaUELJEe-c,6112
62
62
  vectorvein/workflow/nodes/video_generation.py,sha256=qmdg-t_idpxq1veukd-jv_ChICMOoInKxprV9Z4Vi2w,4118
@@ -65,4 +65,4 @@ vectorvein/workflow/utils/analyse.py,sha256=msmvyz35UTYTwqQR5sg9H0sm1vxmGDSmep9X
65
65
  vectorvein/workflow/utils/check.py,sha256=B_NdwqIqnc7Ko2HHqFpfOmWVaAu21tPITe0szKfiZKc,11414
66
66
  vectorvein/workflow/utils/json_to_code.py,sha256=P8dhhSNgKhTnW17qXNjLO2aLdb0rA8qMAWxhObol2TU,7295
67
67
  vectorvein/workflow/utils/layout.py,sha256=j0bRD3uaXu40xCS6U6BGahBI8FrHa5MiF55GbTrZ1LM,4565
68
- vectorvein-0.2.96.dist-info/RECORD,,
68
+ vectorvein-0.2.98.dist-info/RECORD,,