vectorvein 0.1.80__py3-none-any.whl → 0.1.82__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3,6 +3,7 @@
3
3
  import json
4
4
  from functools import cached_property
5
5
  from typing import overload, Generator, AsyncGenerator, Any, Literal, Iterable
6
+ import re
6
7
 
7
8
  import httpx
8
9
  from openai import OpenAI, AsyncOpenAI, AzureOpenAI, AsyncAzureOpenAI
@@ -229,6 +230,11 @@ class OpenAICompatibleChatClient(BaseChatClient):
229
230
  full_content = ""
230
231
  result = {}
231
232
  usage = None
233
+ buffer = ""
234
+ in_reasoning = False
235
+ current_reasoning = []
236
+ current_content = []
237
+
232
238
  for chunk in stream_response:
233
239
  if chunk.usage and chunk.usage.total_tokens:
234
240
  usage = Usage(
@@ -239,14 +245,11 @@ class OpenAICompatibleChatClient(BaseChatClient):
239
245
  completion_tokens_details=chunk.usage.completion_tokens_details,
240
246
  )
241
247
 
242
- if len(chunk.choices) == 0:
243
- if usage:
244
- yield ChatCompletionDeltaMessage(usage=usage)
245
- continue
246
- if not chunk.choices[0].delta:
248
+ if len(chunk.choices) == 0 or not chunk.choices[0].delta:
247
249
  if usage:
248
250
  yield ChatCompletionDeltaMessage(usage=usage)
249
251
  continue
252
+
250
253
  if self.model_setting.function_call_available:
251
254
  if chunk.choices[0].delta.tool_calls:
252
255
  for index, tool_call in enumerate(chunk.choices[0].delta.tool_calls):
@@ -254,16 +257,61 @@ class OpenAICompatibleChatClient(BaseChatClient):
254
257
  yield ChatCompletionDeltaMessage(**chunk.choices[0].delta.model_dump(), usage=usage)
255
258
  else:
256
259
  message = chunk.choices[0].delta.model_dump()
257
- full_content += message["content"] if message["content"] else ""
260
+ delta_content = message.get("content", "")
261
+ buffer += delta_content
262
+
263
+ while True:
264
+ if not in_reasoning:
265
+ start_pos = buffer.find("<think>")
266
+ if start_pos != -1:
267
+ current_content.append(buffer[:start_pos])
268
+ buffer = buffer[start_pos + 7 :]
269
+ in_reasoning = True
270
+ else:
271
+ current_content.append(buffer)
272
+ buffer = ""
273
+ break
274
+ else:
275
+ end_pos = buffer.find("</think>")
276
+ if end_pos != -1:
277
+ current_reasoning.append(buffer[:end_pos])
278
+ buffer = buffer[end_pos + 8 :]
279
+ in_reasoning = False
280
+ else:
281
+ current_reasoning.append(buffer)
282
+ buffer = ""
283
+ break
284
+
285
+ message["content"] = "".join(current_content).strip()
286
+ if current_reasoning:
287
+ message["reasoning_content"] = "".join(current_reasoning).strip()
288
+ current_content.clear()
289
+ current_reasoning.clear()
290
+
258
291
  if tools:
292
+ full_content += message["content"]
259
293
  tool_call_data = ToolCallContentProcessor(full_content).tool_calls
260
294
  if tool_call_data:
261
295
  message["tool_calls"] = tool_call_data["tool_calls"]
296
+
262
297
  if full_content in ("<", "<|", "<|▶", "<|▶|") or full_content.startswith("<|▶|>"):
263
298
  message["content"] = ""
264
299
  result = message
265
300
  continue
301
+
266
302
  yield ChatCompletionDeltaMessage(**message, usage=usage)
303
+
304
+ if buffer:
305
+ if in_reasoning:
306
+ current_reasoning.append(buffer)
307
+ else:
308
+ current_content.append(buffer)
309
+ final_message = {
310
+ "content": "".join(current_content).strip(),
311
+ "reasoning_content": "".join(current_reasoning).strip() if current_reasoning else None,
312
+ }
313
+ yield ChatCompletionDeltaMessage(**final_message, usage=usage)
314
+
267
315
  if result:
268
316
  yield ChatCompletionDeltaMessage(**result, usage=usage)
269
317
 
@@ -286,6 +334,13 @@ class OpenAICompatibleChatClient(BaseChatClient):
286
334
  "reasoning_content": getattr(response.choices[0].message, "reasoning_content", None),
287
335
  "usage": response.usage.model_dump() if response.usage else None,
288
336
  }
337
+
338
+ if not result["reasoning_content"] and result["content"]:
339
+ think_match = re.search(r"<think>(.*?)</think>", result["content"], re.DOTALL)
340
+ if think_match:
341
+ result["reasoning_content"] = think_match.group(1).strip()
342
+ result["content"] = result["content"].replace(think_match.group(0), "", 1).strip()
343
+
289
344
  if tools:
290
345
  if self.model_setting.function_call_available and response.choices[0].message.tool_calls:
291
346
  result["tool_calls"] = [
@@ -501,6 +556,11 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
501
556
  full_content = ""
502
557
  result = {}
503
558
  usage = None
559
+ buffer = ""
560
+ in_reasoning = False
561
+ current_reasoning = []
562
+ current_content = []
563
+
504
564
  async for chunk in stream_response:
505
565
  if chunk.usage and chunk.usage.total_tokens:
506
566
  usage = Usage(
@@ -511,11 +571,7 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
511
571
  prompt_tokens_details=chunk.usage.prompt_tokens_details,
512
572
  )
513
573
 
514
- if len(chunk.choices) == 0:
515
- if usage:
516
- yield ChatCompletionDeltaMessage(usage=usage)
517
- continue
518
- if not chunk.choices[0].delta:
574
+ if len(chunk.choices) == 0 or not chunk.choices[0].delta:
519
575
  if usage:
520
576
  yield ChatCompletionDeltaMessage(usage=usage)
521
577
  continue
@@ -527,16 +583,61 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
527
583
  yield ChatCompletionDeltaMessage(**chunk.choices[0].delta.model_dump(), usage=usage)
528
584
  else:
529
585
  message = chunk.choices[0].delta.model_dump()
530
- full_content += message["content"] if message["content"] else ""
586
+ delta_content = message.get("content", "")
587
+ buffer += delta_content
588
+
589
+ while True:
590
+ if not in_reasoning:
591
+ start_pos = buffer.find("<think>")
592
+ if start_pos != -1:
593
+ current_content.append(buffer[:start_pos])
594
+ buffer = buffer[start_pos + 7 :]
595
+ in_reasoning = True
596
+ else:
597
+ current_content.append(buffer)
598
+ buffer = ""
599
+ break
600
+ else:
601
+ end_pos = buffer.find("</think>")
602
+ if end_pos != -1:
603
+ current_reasoning.append(buffer[:end_pos])
604
+ buffer = buffer[end_pos + 8 :]
605
+ in_reasoning = False
606
+ else:
607
+ current_reasoning.append(buffer)
608
+ buffer = ""
609
+ break
610
+
611
+ message["content"] = "".join(current_content).strip()
612
+ if current_reasoning:
613
+ message["reasoning_content"] = "".join(current_reasoning).strip()
614
+ current_content.clear()
615
+ current_reasoning.clear()
616
+
531
617
  if tools:
618
+ full_content += message["content"]
532
619
  tool_call_data = ToolCallContentProcessor(full_content).tool_calls
533
620
  if tool_call_data:
534
621
  message["tool_calls"] = tool_call_data["tool_calls"]
622
+
535
623
  if full_content in ("<", "<|", "<|▶", "<|▶|") or full_content.startswith("<|▶|>"):
536
624
  message["content"] = ""
537
625
  result = message
538
626
  continue
627
+
539
628
  yield ChatCompletionDeltaMessage(**message, usage=usage)
629
+
630
+ if buffer:
631
+ if in_reasoning:
632
+ current_reasoning.append(buffer)
633
+ else:
634
+ current_content.append(buffer)
635
+ final_message = {
636
+ "content": "".join(current_content).strip(),
637
+ "reasoning_content": "".join(current_reasoning).strip() if current_reasoning else None,
638
+ }
639
+ yield ChatCompletionDeltaMessage(**final_message, usage=usage)
640
+
540
641
  if result:
541
642
  yield ChatCompletionDeltaMessage(**result, usage=usage)
542
643
 
@@ -558,6 +659,13 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
558
659
  "reasoning_content": getattr(response.choices[0].message, "reasoning_content", None),
559
660
  "usage": response.usage.model_dump() if response.usage else None,
560
661
  }
662
+
663
+ if not result["reasoning_content"] and result["content"]:
664
+ think_match = re.search(r"<think>(.*?)</think>", result["content"], re.DOTALL)
665
+ if think_match:
666
+ result["reasoning_content"] = think_match.group(1).strip()
667
+ result["content"] = result["content"].replace(think_match.group(0), "", 1).strip()
668
+
561
669
  if tools:
562
670
  if self.model_setting.function_call_available and response.choices[0].message.tool_calls:
563
671
  result["tool_calls"] = [
@@ -0,0 +1,36 @@
1
+ from typing import Dict, Any, Union
2
+
3
+
4
+ class Edge:
5
+ def __init__(
6
+ self,
7
+ id: str,
8
+ source: str,
9
+ sourceHandle: str,
10
+ target: str,
11
+ targetHandle: str,
12
+ animated: bool = True,
13
+ type: str = "default",
14
+ ) -> None:
15
+ self.id: str = id
16
+ self.source: str = source
17
+ self.sourceHandle: str = sourceHandle
18
+ self.target: str = target
19
+ self.targetHandle: str = targetHandle
20
+ self.animated: bool = animated
21
+ self.type: str = type
22
+ self.style: Dict[str, Union[str, int]] = {"stroke": "#28c5e5", "strokeWidth": 3}
23
+
24
+ def to_dict(self) -> Dict[str, Any]:
25
+ return {
26
+ "id": self.id,
27
+ "source": self.source,
28
+ "sourceHandle": self.sourceHandle,
29
+ "target": self.target,
30
+ "targetHandle": self.targetHandle,
31
+ "animated": self.animated,
32
+ "type": self.type,
33
+ "style": self.style,
34
+ "data": {},
35
+ "label": "",
36
+ }
@@ -0,0 +1,82 @@
1
+ import uuid
2
+ from typing import Dict, Any, Optional, List, Union
3
+
4
+ from .port import PortType, Port, InputPort, OutputPort
5
+
6
+
7
+ class Node:
8
+ def __init__(
9
+ self,
10
+ node_type: str,
11
+ category: str,
12
+ task_name: str,
13
+ description: str = "",
14
+ ports: Optional[Dict[str, Any]] = None,
15
+ node_id: Optional[str] = None,
16
+ position: Optional[Dict[str, float]] = None,
17
+ seleted_workflow_title: str = "",
18
+ is_template: bool = False,
19
+ initialized: bool = False,
20
+ ) -> None:
21
+ self.id: str = node_id or str(uuid.uuid4())
22
+ self.type: str = node_type
23
+ self.category: str = category
24
+ self.task_name: str = task_name
25
+ self.description: str = description
26
+ self.ports: Dict[str, Port] = ports or {}
27
+ self.position: Dict[str, float] = position or {"x": 0, "y": 0}
28
+ self.seleted_workflow_title: str = seleted_workflow_title
29
+ self.is_template: bool = is_template
30
+ self.initialized: bool = initialized
31
+ self.ignored: bool = False
32
+ self.lock: bool = False
33
+ self.shadow: bool = False
34
+
35
+ def add_port(
36
+ self,
37
+ name: str,
38
+ port_type: Union[PortType, str],
39
+ show: bool = False,
40
+ value: Any = None,
41
+ options: Optional[List[Any]] = None,
42
+ is_output: bool = False,
43
+ **kwargs,
44
+ ):
45
+ if is_output:
46
+ self.ports[name] = OutputPort(
47
+ name=name, port_type=port_type, show=show, value=value, options=options, **kwargs
48
+ )
49
+ else:
50
+ self.ports[name] = InputPort(
51
+ name=name, port_type=port_type, show=show, value=value, options=options, **kwargs
52
+ )
53
+
54
+ def to_dict(self) -> Dict[str, Any]:
55
+ return {
56
+ "id": self.id,
57
+ "type": self.type,
58
+ "data": {
59
+ "task_name": self.task_name,
60
+ "has_inputs": self.has_inputs(),
61
+ "description": self.description,
62
+ "seleted_workflow_title": self.seleted_workflow_title,
63
+ "is_template": self.is_template,
64
+ "template": {
65
+ port_name: port.to_dict()
66
+ for port_name, port in self.ports.items()
67
+ if port_name not in ["debug", "seleted_workflow_title", "is_template"]
68
+ },
69
+ },
70
+ "category": self.category,
71
+ "position": self.position,
72
+ "initialized": self.initialized,
73
+ "ignored": self.ignored,
74
+ "lock": self.lock,
75
+ "shadow": self.shadow,
76
+ }
77
+
78
+ def has_inputs(self) -> bool:
79
+ for port in self.ports.values():
80
+ if isinstance(port, InputPort):
81
+ return True
82
+ return False
@@ -0,0 +1,173 @@
1
+ from enum import Enum
2
+ from typing import Optional, Any, Dict, List, Union
3
+
4
+
5
+ class PortType(Enum):
6
+ TEXT = "text"
7
+ NUMBER = "number"
8
+ CHECKBOX = "checkbox"
9
+ SELECT = "select"
10
+ RADIO = "radio"
11
+ TEXTAREA = "textarea"
12
+ INPUT = "input"
13
+ FILE = "file"
14
+ LIST = "list"
15
+ COLOR = "color"
16
+ TEMPERATURE = "temperature"
17
+
18
+
19
+ class Port:
20
+ def __init__(
21
+ self,
22
+ name: str,
23
+ port_type: Union[PortType, str],
24
+ required: bool = True,
25
+ show: bool = False,
26
+ value: Any = None,
27
+ options: Optional[List[Any]] = None,
28
+ field_type: Optional[str] = None,
29
+ is_output: bool = False,
30
+ condition: Optional[str] = None,
31
+ max_length: Optional[int] = None,
32
+ support_file_types: Optional[List[str]] = None,
33
+ multiple: Optional[bool] = None,
34
+ group: Optional[str] = None,
35
+ group_collpased: bool = False,
36
+ has_tooltip: bool = False,
37
+ max: Optional[Union[int, float]] = None,
38
+ min: Optional[Union[int, float]] = None,
39
+ max_count: Optional[int] = None,
40
+ list: bool = False,
41
+ ) -> None:
42
+ self.name = name
43
+ self.port_type = port_type
44
+ self.required = required
45
+ self.show = show
46
+ self.value = value
47
+ self.options = options
48
+ self.field_type = field_type
49
+ self.is_output = is_output
50
+ self.condition = condition
51
+ self.max_length = max_length
52
+ self.support_file_types = support_file_types
53
+ self.multiple = multiple
54
+ self.group = group
55
+ self.group_collpased = group_collpased
56
+ self.has_tooltip = has_tooltip
57
+ self.max = max
58
+ self.min = min
59
+ self.max_count = max_count
60
+ self.list = list
61
+
62
+ def to_dict(self) -> Dict[str, Any]:
63
+ return {
64
+ "name": self.name,
65
+ "display_name": self.name,
66
+ "field_type": self.port_type.value if isinstance(self.port_type, PortType) else self.port_type,
67
+ "required": self.required,
68
+ "show": self.show,
69
+ "value": self.value,
70
+ "options": self.options,
71
+ "type": self.field_type,
72
+ "is_output": self.is_output,
73
+ # "condition": f"(fieldsData) => {{ {self.condition} }}" if self.condition else "",
74
+ "max_length": self.max_length,
75
+ "support_file_types": ", ".join(self.support_file_types) if self.support_file_types else None,
76
+ "multiple": self.multiple,
77
+ "group": self.group,
78
+ "group_collpased": self.group_collpased,
79
+ "has_tooltip": self.has_tooltip,
80
+ "max": self.max,
81
+ "min": self.min,
82
+ "list": self.list,
83
+ }
84
+
85
+
86
+ class InputPort(Port):
87
+ def __init__(
88
+ self,
89
+ name: str,
90
+ port_type: Union[PortType, str],
91
+ required: bool = True,
92
+ show: bool = False,
93
+ value: Any = None,
94
+ options: Optional[List[Any]] = None,
95
+ field_type: Optional[str] = None,
96
+ condition: Optional[str] = None,
97
+ max_length: Optional[int] = None,
98
+ support_file_types: Optional[List[str]] = None,
99
+ multiple: Optional[bool] = None,
100
+ group: Optional[str] = None,
101
+ group_collpased: bool = False,
102
+ has_tooltip: bool = False,
103
+ max: Optional[Union[int, float]] = None,
104
+ min: Optional[Union[int, float]] = None,
105
+ max_count: Optional[int] = None,
106
+ list: bool = False,
107
+ ) -> None:
108
+ super().__init__(
109
+ name=name,
110
+ port_type=port_type,
111
+ required=required,
112
+ show=show,
113
+ value=value,
114
+ options=options,
115
+ field_type=field_type,
116
+ is_output=False,
117
+ condition=condition,
118
+ max_length=max_length,
119
+ support_file_types=support_file_types,
120
+ multiple=multiple,
121
+ group=group,
122
+ group_collpased=group_collpased,
123
+ has_tooltip=has_tooltip,
124
+ max=max,
125
+ min=min,
126
+ max_count=max_count,
127
+ list=list,
128
+ )
129
+
130
+
131
+ class OutputPort(Port):
132
+ def __init__(
133
+ self,
134
+ name: str = "output",
135
+ port_type: Union[PortType, str] = PortType.TEXT,
136
+ required: bool = True,
137
+ show: bool = False,
138
+ value: Any = None,
139
+ options: Optional[List[Any]] = None,
140
+ field_type: Optional[str] = None,
141
+ condition: Optional[str] = None,
142
+ max_length: Optional[int] = None,
143
+ support_file_types: Optional[List[str]] = None,
144
+ multiple: Optional[bool] = None,
145
+ group: Optional[str] = None,
146
+ group_collpased: bool = False,
147
+ has_tooltip: bool = False,
148
+ max: Optional[Union[int, float]] = None,
149
+ min: Optional[Union[int, float]] = None,
150
+ max_count: Optional[int] = None,
151
+ list: bool = False,
152
+ ) -> None:
153
+ super().__init__(
154
+ name=name,
155
+ port_type=port_type,
156
+ required=required,
157
+ show=show,
158
+ value=value,
159
+ options=options,
160
+ field_type=field_type,
161
+ is_output=True,
162
+ condition=condition,
163
+ max_length=max_length,
164
+ support_file_types=support_file_types,
165
+ multiple=multiple,
166
+ group=group,
167
+ group_collpased=group_collpased,
168
+ has_tooltip=has_tooltip,
169
+ max=max,
170
+ min=min,
171
+ max_count=max_count,
172
+ list=list,
173
+ )
@@ -0,0 +1,87 @@
1
+ import json
2
+ from typing import List, Union
3
+
4
+ from .node import Node
5
+ from .edge import Edge
6
+
7
+
8
+ class Workflow:
9
+ def __init__(self) -> None:
10
+ self.nodes: List[Node] = []
11
+ self.edges: List[Edge] = []
12
+
13
+ def add_node(self, node: Node):
14
+ self.nodes.append(node)
15
+
16
+ def add_nodes(self, nodes: List[Node]):
17
+ self.nodes.extend(nodes)
18
+
19
+ def add_edge(self, edge: Edge):
20
+ self.edges.append(edge)
21
+
22
+ def connect(
23
+ self,
24
+ source_node: Union[str, Node],
25
+ source_port: str,
26
+ target_node: Union[str, Node],
27
+ target_port: str,
28
+ ):
29
+ if isinstance(source_node, Node):
30
+ source_node_id = source_node.id
31
+ else:
32
+ source_node_id = source_node
33
+ if isinstance(target_node, Node):
34
+ target_node_id = target_node.id
35
+ else:
36
+ target_node_id = target_node
37
+
38
+ edge_id = f"vueflow__edge-{source_node_id}{source_port}-{target_node_id}{target_port}"
39
+ edge = Edge(edge_id, source_node_id, source_port, target_node_id, target_port)
40
+ self.add_edge(edge)
41
+
42
+ def to_dict(self):
43
+ return {
44
+ "nodes": [node.to_dict() for node in self.nodes],
45
+ "edges": [edge.to_dict() for edge in self.edges],
46
+ "viewport": {"x": 0, "y": 0, "zoom": 1},
47
+ }
48
+
49
+ def to_json(self, ensure_ascii=False):
50
+ return json.dumps(self.to_dict(), ensure_ascii=ensure_ascii)
51
+
52
+ def to_mermaid(self) -> str:
53
+ """生成 Mermaid 格式的流程图。
54
+
55
+ Returns:
56
+ str: Mermaid 格式的流程图文本
57
+ """
58
+ lines = ["flowchart TD"]
59
+
60
+ # 创建节点类型到序号的映射
61
+ type_counters = {}
62
+ node_id_to_label = {}
63
+
64
+ # 首先为所有节点生成标签
65
+ for node in self.nodes:
66
+ node_type = node.type.lower()
67
+ if node_type not in type_counters:
68
+ type_counters[node_type] = 0
69
+ node_label = f"{node_type}_{type_counters[node_type]}"
70
+ node_id_to_label[node.id] = node_label
71
+ type_counters[node_type] += 1
72
+
73
+ # 添加节点定义
74
+ for node in self.nodes:
75
+ node_label = node_id_to_label[node.id]
76
+ lines.append(f' {node_label}["{node_label} ({node.type})"]')
77
+
78
+ lines.append("") # 添加一个空行分隔节点和边的定义
79
+
80
+ # 添加边的定义
81
+ for edge in self.edges:
82
+ source_label = node_id_to_label[edge.source]
83
+ target_label = node_id_to_label[edge.target]
84
+ label = f"{edge.sourceHandle} → {edge.targetHandle}"
85
+ lines.append(f" {source_label} -->|{label}| {target_label}")
86
+
87
+ return "\n".join(lines)