vectorvein 0.3.17__py3-none-any.whl → 0.3.19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -8,6 +8,8 @@ from .models import (
8
8
  WorkflowOutput,
9
9
  WorkflowRunResult,
10
10
  AccessKeyListResponse,
11
+ Workflow,
12
+ WorkflowTag,
11
13
  )
12
14
  from .exceptions import (
13
15
  VectorVeinAPIError,
@@ -27,6 +29,8 @@ __all__ = [
27
29
  "WorkflowOutput",
28
30
  "WorkflowRunResult",
29
31
  "AccessKeyListResponse",
32
+ "Workflow",
33
+ "WorkflowTag",
30
34
  "VectorVeinAPIError",
31
35
  "APIKeyError",
32
36
  "WorkflowError",
vectorvein/api/client.py CHANGED
@@ -24,6 +24,8 @@ from .models import (
24
24
  WorkflowOutput,
25
25
  WorkflowRunResult,
26
26
  AccessKeyListResponse,
27
+ Workflow,
28
+ WorkflowTag,
27
29
  )
28
30
 
29
31
 
@@ -223,6 +225,71 @@ class VectorVeinClient:
223
225
  else:
224
226
  raise WorkflowError(f"Workflow execution failed: {response['msg']}")
225
227
 
228
+ def create_workflow(
229
+ self,
230
+ title: str = "New workflow",
231
+ brief: str = "",
232
+ images: list[str] | None = None,
233
+ tags: list[dict[str, str]] | None = None,
234
+ data: dict[str, Any] | None = None,
235
+ language: str = "zh-CN",
236
+ tool_call_data: dict[str, Any] | None = None,
237
+ source_workflow_wid: str | None = None,
238
+ ) -> Workflow:
239
+ """Create a new workflow
240
+
241
+ Args:
242
+ title: Workflow title, default is "New workflow"
243
+ brief: Workflow brief description
244
+ images: List of image URLs
245
+ tags: List of workflow tags, each tag should have 'tid' field
246
+ data: Workflow data containing nodes and edges, default is {"nodes": [], "edges": []}
247
+ language: Workflow language, default is "zh-CN"
248
+ tool_call_data: Tool call data
249
+ source_workflow_wid: Source workflow ID for copying
250
+
251
+ Returns:
252
+ Workflow: Created workflow information
253
+
254
+ Raises:
255
+ VectorVeinAPIError: Workflow creation error
256
+ """
257
+ payload = {
258
+ "title": title,
259
+ "brief": brief,
260
+ "images": images or [],
261
+ "tags": tags or [],
262
+ "data": data or {"nodes": [], "edges": []},
263
+ "language": language,
264
+ "tool_call_data": tool_call_data or {},
265
+ }
266
+
267
+ if source_workflow_wid:
268
+ payload["source_workflow_wid"] = source_workflow_wid
269
+
270
+ response = self._request("POST", "workflow/create", json=payload)
271
+
272
+ # Parse tags from response
273
+ workflow_tags = []
274
+ if response["data"].get("tags"):
275
+ for tag_data in response["data"]["tags"]:
276
+ if isinstance(tag_data, dict):
277
+ workflow_tags.append(WorkflowTag(**tag_data))
278
+
279
+ return Workflow(
280
+ wid=response["data"]["wid"],
281
+ title=response["data"]["title"],
282
+ brief=response["data"]["brief"],
283
+ data=response["data"]["data"],
284
+ language=response["data"]["language"],
285
+ images=response["data"]["images"],
286
+ tags=workflow_tags,
287
+ source_workflow=response["data"].get("source_workflow"),
288
+ tool_call_data=response["data"].get("tool_call_data"),
289
+ create_time=response["data"].get("create_time"),
290
+ update_time=response["data"].get("update_time"),
291
+ )
292
+
226
293
  def get_access_keys(self, access_keys: list[str] | None = None, get_type: Literal["selected", "all"] = "selected") -> list[AccessKey]:
227
294
  """Get access key information
228
295
 
@@ -650,6 +717,71 @@ class AsyncVectorVeinClient:
650
717
  else:
651
718
  raise WorkflowError(f"Workflow execution failed: {response['msg']}")
652
719
 
720
+ async def create_workflow(
721
+ self,
722
+ title: str = "New workflow",
723
+ brief: str = "",
724
+ images: list[str] | None = None,
725
+ tags: list[dict[str, str]] | None = None,
726
+ data: dict[str, Any] | None = None,
727
+ language: str = "zh-CN",
728
+ tool_call_data: dict[str, Any] | None = None,
729
+ source_workflow_wid: str | None = None,
730
+ ) -> Workflow:
731
+ """Async create a new workflow
732
+
733
+ Args:
734
+ title: Workflow title, default is "New workflow"
735
+ brief: Workflow brief description
736
+ images: List of image URLs
737
+ tags: List of workflow tags, each tag should have 'tid' field
738
+ data: Workflow data containing nodes and edges, default is {"nodes": [], "edges": []}
739
+ language: Workflow language, default is "zh-CN"
740
+ tool_call_data: Tool call data
741
+ source_workflow_wid: Source workflow ID for copying
742
+
743
+ Returns:
744
+ Workflow: Created workflow information
745
+
746
+ Raises:
747
+ VectorVeinAPIError: Workflow creation error
748
+ """
749
+ payload = {
750
+ "title": title,
751
+ "brief": brief,
752
+ "images": images or [],
753
+ "tags": tags or [],
754
+ "data": data or {"nodes": [], "edges": []},
755
+ "language": language,
756
+ "tool_call_data": tool_call_data or {},
757
+ }
758
+
759
+ if source_workflow_wid:
760
+ payload["source_workflow_wid"] = source_workflow_wid
761
+
762
+ response = await self._request("POST", "workflow/create", json=payload)
763
+
764
+ # Parse tags from response
765
+ workflow_tags = []
766
+ if response["data"].get("tags"):
767
+ for tag_data in response["data"]["tags"]:
768
+ if isinstance(tag_data, dict):
769
+ workflow_tags.append(WorkflowTag(**tag_data))
770
+
771
+ return Workflow(
772
+ wid=response["data"]["wid"],
773
+ title=response["data"]["title"],
774
+ brief=response["data"]["brief"],
775
+ data=response["data"]["data"],
776
+ language=response["data"]["language"],
777
+ images=response["data"]["images"],
778
+ tags=workflow_tags,
779
+ source_workflow=response["data"].get("source_workflow"),
780
+ tool_call_data=response["data"].get("tool_call_data"),
781
+ create_time=response["data"].get("create_time"),
782
+ update_time=response["data"].get("update_time"),
783
+ )
784
+
653
785
  async def get_access_keys(self, access_keys: list[str] | None = None, get_type: Literal["selected", "all"] = "selected") -> list[AccessKey]:
654
786
  """Async get access key information
655
787
 
vectorvein/api/models.py CHANGED
@@ -72,3 +72,42 @@ class AccessKeyListResponse:
72
72
  total: int
73
73
  page_size: int
74
74
  page: int
75
+
76
+
77
+ @dataclass
78
+ class WorkflowTag:
79
+ """Workflow tag"""
80
+
81
+ tid: str
82
+ name: str
83
+
84
+
85
+ @dataclass
86
+ class Workflow:
87
+ """Workflow information"""
88
+
89
+ wid: str
90
+ title: str
91
+ brief: str
92
+ data: dict[str, Any]
93
+ language: str
94
+ images: list[str]
95
+ tags: list[WorkflowTag]
96
+ source_workflow: str | None = None
97
+ tool_call_data: dict[str, Any] | None = None
98
+ create_time: str | None = None
99
+ update_time: str | None = None
100
+
101
+
102
+ @dataclass
103
+ class WorkflowCreateRequest:
104
+ """Workflow creation request data"""
105
+
106
+ title: str = "New workflow"
107
+ brief: str = ""
108
+ images: list[str] | None = None
109
+ tags: list[dict[str, str]] | None = None
110
+ data: dict[str, Any] | None = None
111
+ language: str = "zh-CN"
112
+ tool_call_data: dict[str, Any] | None = None
113
+ source_workflow_wid: str | None = None
@@ -146,9 +146,10 @@ def refactor_into_openai_messages(messages: Iterable[MessageParam]):
146
146
  _content.append(item.model_dump())
147
147
  elif isinstance(item, ThinkingBlock | RedactedThinkingBlock):
148
148
  continue
149
- elif item.get("type") == "image":
150
- image_data = item.get("source", {}).get("data", "")
151
- media_type = item.get("source", {}).get("media_type", "")
149
+ elif isinstance(item, dict) and item.get("type") == "image":
150
+ source = item.get("source", {})
151
+ image_data = source.get("data", "") if isinstance(source, dict) else ""
152
+ media_type = source.get("media_type", "") if isinstance(source, dict) else ""
152
153
  data_url = f"data:{media_type};base64,{image_data}"
153
154
  _content.append(
154
155
  {
@@ -72,7 +72,9 @@ class BaseChatClient(ABC):
72
72
  self.http_client = http_client
73
73
 
74
74
  if backend_name is not None:
75
- self.BACKEND_NAME = BackendType(backend_name)
75
+ self.backend_name = BackendType(backend_name) # type: ignore
76
+ else:
77
+ self.backend_name = self.BACKEND_NAME
76
78
 
77
79
  if settings is None:
78
80
  self.settings = default_settings
@@ -81,7 +83,7 @@ class BaseChatClient(ABC):
81
83
  else:
82
84
  self.settings = settings
83
85
 
84
- self.backend_settings = self.settings.get_backend(self.BACKEND_NAME)
86
+ self.backend_settings = self.settings.get_backend(self.backend_name)
85
87
 
86
88
  self.rate_limiter = self._init_rate_limiter()
87
89
  self.active_requests = defaultdict(int)
@@ -493,7 +495,9 @@ class BaseAsyncChatClient(ABC):
493
495
  self.http_client = http_client
494
496
 
495
497
  if backend_name is not None:
496
- self.BACKEND_NAME = BackendType(backend_name)
498
+ self.backend_name = BackendType(backend_name) # type: ignore
499
+ else:
500
+ self.backend_name = self.BACKEND_NAME
497
501
 
498
502
  if settings is None:
499
503
  self.settings = default_settings
@@ -502,7 +506,7 @@ class BaseAsyncChatClient(ABC):
502
506
  else:
503
507
  self.settings = settings
504
508
 
505
- self.backend_settings = self.settings.get_backend(self.BACKEND_NAME)
509
+ self.backend_settings = self.settings.get_backend(self.backend_name)
506
510
 
507
511
  self.rate_limiter = self._init_rate_limiter()
508
512
  self.active_requests = defaultdict(int)
@@ -46,6 +46,64 @@ if TYPE_CHECKING:
46
46
  from ..types.settings import SettingsDict
47
47
 
48
48
 
49
+ def get_thinking_tags(backend: BackendType) -> tuple[str, str]:
50
+ """Get the appropriate thinking tags for the given backend.
51
+
52
+ Args:
53
+ backend: The backend type
54
+
55
+ Returns:
56
+ Tuple of (start_tag, end_tag) for thinking content
57
+ """
58
+ if backend == BackendType.Gemini:
59
+ return ("<thought>", "</thought>")
60
+ else:
61
+ return ("<think>", "</think>")
62
+
63
+
64
+ def process_thinking_content(buffer: str, in_reasoning: bool, start_tag: str, end_tag: str) -> tuple[str, str, str, bool]:
65
+ """Process buffer content to extract thinking tags.
66
+
67
+ Args:
68
+ buffer: Content buffer to process
69
+ in_reasoning: Whether currently inside thinking tags
70
+ start_tag: Opening thinking tag
71
+ end_tag: Closing thinking tag
72
+
73
+ Returns:
74
+ Tuple of (remaining_buffer, output_content, reasoning_content, new_in_reasoning_state)
75
+ """
76
+ current_output_content = ""
77
+ current_reasoning_content = ""
78
+
79
+ while buffer:
80
+ if not in_reasoning:
81
+ start_pos = buffer.find(start_tag)
82
+ if start_pos != -1:
83
+ # Found start tag
84
+ if start_pos > 0:
85
+ current_output_content += buffer[:start_pos]
86
+ buffer = buffer[start_pos + len(start_tag) :]
87
+ in_reasoning = True
88
+ else:
89
+ # No start tag found, output all content
90
+ current_output_content += buffer
91
+ buffer = ""
92
+ else:
93
+ end_pos = buffer.find(end_tag)
94
+ if end_pos != -1:
95
+ # Found end tag
96
+ current_reasoning_content += buffer[:end_pos]
97
+ buffer = buffer[end_pos + len(end_tag) :]
98
+ in_reasoning = False
99
+ else:
100
+ # No end tag found, accumulate as reasoning content
101
+ current_reasoning_content += buffer
102
+ buffer = ""
103
+
104
+ return buffer, current_output_content, current_reasoning_content, in_reasoning
105
+
106
+
49
107
  class OpenAICompatibleChatClient(BaseChatClient):
50
108
  DEFAULT_MODEL: str = ""
51
109
  BACKEND_NAME: BackendType
@@ -280,6 +338,9 @@ class OpenAICompatibleChatClient(BaseChatClient):
280
338
  if self.model_id is None:
281
339
  self.model_id = self.model_setting.id
282
340
 
341
+ # Get thinking tags for the current backend
342
+ start_tag, end_tag = get_thinking_tags(self.backend_name)
343
+
283
344
  if not skip_cutoff and self.context_length_control == ContextLengthControlType.Latest:
284
345
  messages = cutoff_messages(
285
346
  messages,
@@ -404,40 +465,14 @@ class OpenAICompatibleChatClient(BaseChatClient):
404
465
  tool_call.index = index
405
466
  tool_call.type = "function" # 也是 MiniMax 的不规范导致的问题
406
467
 
407
- # 即使支持 function call,也要处理 <think> 标签
468
+ # 即使支持 function call,也要处理思考标签
408
469
  message = chunk.choices[0].delta.model_dump()
409
470
  delta_content = message.get("content", "")
410
471
  if delta_content:
411
472
  buffer += delta_content
412
473
 
413
- # 处理缓冲区中的内容,提取 <think> 标签
414
- current_output_content = ""
415
- current_reasoning_content = ""
416
-
417
- while buffer:
418
- if not in_reasoning:
419
- start_pos = buffer.find("<think>")
420
- if start_pos != -1:
421
- # 找到了 <think> 标签的开始
422
- if start_pos > 0:
423
- current_output_content += buffer[:start_pos]
424
- buffer = buffer[start_pos + 7 :] # 跳过 "<think>"
425
- in_reasoning = True
426
- else:
427
- # 没有找到 <think> 标签,直接输出
428
- current_output_content += buffer
429
- buffer = ""
430
- else:
431
- end_pos = buffer.find("</think>")
432
- if end_pos != -1:
433
- # 找到了 </think> 标签的结束
434
- current_reasoning_content += buffer[:end_pos]
435
- buffer = buffer[end_pos + 8 :] # 跳过 "</think>"
436
- in_reasoning = False
437
- else:
438
- # 没有找到结束标签,继续累积到推理内容中
439
- current_reasoning_content += buffer
440
- buffer = ""
474
+ # 处理缓冲区中的内容,提取思考标签
475
+ buffer, current_output_content, current_reasoning_content, in_reasoning = process_thinking_content(buffer, in_reasoning, start_tag, end_tag)
441
476
 
442
477
  # 累积内容
443
478
  if current_output_content:
@@ -463,34 +498,8 @@ class OpenAICompatibleChatClient(BaseChatClient):
463
498
  if delta_content:
464
499
  buffer += delta_content
465
500
 
466
- # 处理缓冲区中的内容,提取 <think> 标签
467
- current_output_content = ""
468
- current_reasoning_content = ""
469
-
470
- while buffer:
471
- if not in_reasoning:
472
- start_pos = buffer.find("<think>")
473
- if start_pos != -1:
474
- # 找到了 <think> 标签的开始
475
- if start_pos > 0:
476
- current_output_content += buffer[:start_pos]
477
- buffer = buffer[start_pos + 7 :] # 跳过 "<think>"
478
- in_reasoning = True
479
- else:
480
- # 没有找到 <think> 标签,直接输出
481
- current_output_content += buffer
482
- buffer = ""
483
- else:
484
- end_pos = buffer.find("</think>")
485
- if end_pos != -1:
486
- # 找到了 </think> 标签的结束
487
- current_reasoning_content += buffer[:end_pos]
488
- buffer = buffer[end_pos + 8 :] # 跳过 "</think>"
489
- in_reasoning = False
490
- else:
491
- # 没有找到结束标签,继续累积到推理内容中
492
- current_reasoning_content += buffer
493
- buffer = ""
501
+ # 处理缓冲区中的内容,提取思考标签
502
+ buffer, current_output_content, current_reasoning_content, in_reasoning = process_thinking_content(buffer, in_reasoning, start_tag, end_tag)
494
503
 
495
504
  # 累积内容
496
505
  if current_output_content:
@@ -597,7 +606,9 @@ class OpenAICompatibleChatClient(BaseChatClient):
597
606
  }
598
607
 
599
608
  if not result["reasoning_content"] and result["content"]:
600
- think_match = re.search(r"<think>(.*?)</think>", result["content"], re.DOTALL)
609
+ # Create dynamic regex pattern based on backend
610
+ think_pattern = f"{re.escape(start_tag)}(.*?){re.escape(end_tag)}"
611
+ think_match = re.search(think_pattern, result["content"], re.DOTALL)
601
612
  if think_match:
602
613
  result["reasoning_content"] = think_match.group(1)
603
614
  result["content"] = result["content"].replace(think_match.group(0), "", 1)
@@ -850,6 +861,9 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
850
861
  if self.model_id is None:
851
862
  self.model_id = self.model_setting.id
852
863
 
864
+ # Get thinking tags for the current backend
865
+ start_tag, end_tag = get_thinking_tags(self.backend_name)
866
+
853
867
  if not skip_cutoff and self.context_length_control == ContextLengthControlType.Latest:
854
868
  messages = cutoff_messages(
855
869
  messages,
@@ -974,40 +988,14 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
974
988
  tool_call.index = index
975
989
  tool_call.type = "function"
976
990
 
977
- # 即使支持 function call,也要处理 <think> 标签
991
+ # 即使支持 function call,也要处理思考标签
978
992
  message = chunk.choices[0].delta.model_dump()
979
993
  delta_content = message.get("content", "")
980
994
  if delta_content:
981
995
  buffer += delta_content
982
996
 
983
- # 处理缓冲区中的内容,提取 <think> 标签
984
- current_output_content = ""
985
- current_reasoning_content = ""
986
-
987
- while buffer:
988
- if not in_reasoning:
989
- start_pos = buffer.find("<think>")
990
- if start_pos != -1:
991
- # 找到了 <think> 标签的开始
992
- if start_pos > 0:
993
- current_output_content += buffer[:start_pos]
994
- buffer = buffer[start_pos + 7 :] # 跳过 "<think>"
995
- in_reasoning = True
996
- else:
997
- # 没有找到 <think> 标签,直接输出
998
- current_output_content += buffer
999
- buffer = ""
1000
- else:
1001
- end_pos = buffer.find("</think>")
1002
- if end_pos != -1:
1003
- # 找到了 </think> 标签的结束
1004
- current_reasoning_content += buffer[:end_pos]
1005
- buffer = buffer[end_pos + 8 :] # 跳过 "</think>"
1006
- in_reasoning = False
1007
- else:
1008
- # 没有找到结束标签,继续累积到推理内容中
1009
- current_reasoning_content += buffer
1010
- buffer = ""
997
+ # 处理缓冲区中的内容,提取思考标签
998
+ buffer, current_output_content, current_reasoning_content, in_reasoning = process_thinking_content(buffer, in_reasoning, start_tag, end_tag)
1011
999
 
1012
1000
  # 累积内容
1013
1001
  if current_output_content:
@@ -1033,34 +1021,8 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
1033
1021
  if delta_content:
1034
1022
  buffer += delta_content
1035
1023
 
1036
- # 处理缓冲区中的内容,提取 <think> 标签
1037
- current_output_content = ""
1038
- current_reasoning_content = ""
1039
-
1040
- while buffer:
1041
- if not in_reasoning:
1042
- start_pos = buffer.find("<think>")
1043
- if start_pos != -1:
1044
- # 找到了 <think> 标签的开始
1045
- if start_pos > 0:
1046
- current_output_content += buffer[:start_pos]
1047
- buffer = buffer[start_pos + 7 :] # 跳过 "<think>"
1048
- in_reasoning = True
1049
- else:
1050
- # 没有找到 <think> 标签,直接输出
1051
- current_output_content += buffer
1052
- buffer = ""
1053
- else:
1054
- end_pos = buffer.find("</think>")
1055
- if end_pos != -1:
1056
- # 找到了 </think> 标签的结束
1057
- current_reasoning_content += buffer[:end_pos]
1058
- buffer = buffer[end_pos + 8 :] # 跳过 "</think>"
1059
- in_reasoning = False
1060
- else:
1061
- # 没有找到结束标签,继续累积到推理内容中
1062
- current_reasoning_content += buffer
1063
- buffer = ""
1024
+ # 处理缓冲区中的内容,提取思考标签
1025
+ buffer, current_output_content, current_reasoning_content, in_reasoning = process_thinking_content(buffer, in_reasoning, start_tag, end_tag)
1064
1026
 
1065
1027
  # 累积内容
1066
1028
  if current_output_content:
@@ -1167,7 +1129,9 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
1167
1129
  }
1168
1130
 
1169
1131
  if not result["reasoning_content"] and result["content"]:
1170
- think_match = re.search(r"<think>(.*?)</think>", result["content"], re.DOTALL)
1132
+ # Create dynamic regex pattern based on backend
1133
+ think_pattern = f"{re.escape(start_tag)}(.*?){re.escape(end_tag)}"
1134
+ think_match = re.search(think_pattern, result["content"], re.DOTALL)
1171
1135
  if think_match:
1172
1136
  result["reasoning_content"] = think_match.group(1)
1173
1137
  result["content"] = result["content"].replace(think_match.group(0), "", 1)