vectorvein 0.1.80__py3-none-any.whl → 0.1.82__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vectorvein/chat_clients/openai_compatible_client.py +120 -12
- vectorvein/workflow/graph/edge.py +36 -0
- vectorvein/workflow/graph/node.py +82 -0
- vectorvein/workflow/graph/port.py +173 -0
- vectorvein/workflow/graph/workflow.py +87 -0
- vectorvein/workflow/nodes/__init__.py +136 -0
- vectorvein/workflow/nodes/audio_generation.py +154 -0
- vectorvein/workflow/nodes/control_flows.py +170 -0
- vectorvein/workflow/nodes/file_processing.py +106 -0
- vectorvein/workflow/nodes/image_generation.py +743 -0
- vectorvein/workflow/nodes/llms.py +802 -0
- vectorvein/workflow/nodes/media_editing.py +668 -0
- vectorvein/workflow/nodes/media_processing.py +478 -0
- vectorvein/workflow/nodes/output.py +357 -0
- vectorvein/workflow/nodes/relational_db.py +153 -0
- vectorvein/workflow/nodes/text_processing.py +218 -0
- vectorvein/workflow/nodes/tools.py +331 -0
- vectorvein/workflow/nodes/triggers.py +0 -0
- vectorvein/workflow/nodes/vector_db.py +156 -0
- vectorvein/workflow/nodes/video_generation.py +113 -0
- vectorvein/workflow/nodes/web_crawlers.py +157 -0
- vectorvein/workflow/utils/json_to_code.py +191 -0
- {vectorvein-0.1.80.dist-info → vectorvein-0.1.82.dist-info}/METADATA +1 -1
- {vectorvein-0.1.80.dist-info → vectorvein-0.1.82.dist-info}/RECORD +26 -5
- {vectorvein-0.1.80.dist-info → vectorvein-0.1.82.dist-info}/WHEEL +0 -0
- {vectorvein-0.1.80.dist-info → vectorvein-0.1.82.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,478 @@
|
|
1
|
+
from typing import Optional
|
2
|
+
|
3
|
+
from ..graph.node import Node
|
4
|
+
from ..graph.port import PortType, InputPort, OutputPort
|
5
|
+
|
6
|
+
|
7
|
+
class ClaudeVision(Node):
|
8
|
+
def __init__(self, id: Optional[str] = None):
|
9
|
+
super().__init__(
|
10
|
+
node_type="ClaudeVision",
|
11
|
+
category="media_processing",
|
12
|
+
task_name="media_processing.claude_vision",
|
13
|
+
node_id=id,
|
14
|
+
ports={
|
15
|
+
"text_prompt": InputPort(
|
16
|
+
name="text_prompt",
|
17
|
+
port_type=PortType.TEXTAREA,
|
18
|
+
value="",
|
19
|
+
),
|
20
|
+
"llm_model": InputPort(
|
21
|
+
name="llm_model",
|
22
|
+
port_type=PortType.SELECT,
|
23
|
+
value="claude-3-5-sonnet",
|
24
|
+
options=[
|
25
|
+
{"value": "claude-3-5-sonnet", "label": "claude-3-5-sonnet"},
|
26
|
+
{"value": "claude-3-opus", "label": "claude-3-opus"},
|
27
|
+
{"value": "claude-3-sonnet", "label": "claude-3-sonnet"},
|
28
|
+
{"value": "claude-3-haiku", "label": "claude-3-haiku"},
|
29
|
+
],
|
30
|
+
),
|
31
|
+
"images_or_urls": InputPort(
|
32
|
+
name="images_or_urls",
|
33
|
+
port_type=PortType.RADIO,
|
34
|
+
value="images",
|
35
|
+
options=[
|
36
|
+
{"value": "images", "label": "images"},
|
37
|
+
{"value": "urls", "label": "urls"},
|
38
|
+
],
|
39
|
+
),
|
40
|
+
"images": InputPort(
|
41
|
+
name="images",
|
42
|
+
port_type=PortType.FILE,
|
43
|
+
value=list(),
|
44
|
+
multiple=True,
|
45
|
+
support_file_types=[".jpg", ".jpeg", ".png", ".webp"],
|
46
|
+
condition="fields_data.get('images_or_urls') == 'images'",
|
47
|
+
),
|
48
|
+
"urls": InputPort(
|
49
|
+
name="urls",
|
50
|
+
port_type=PortType.TEXT,
|
51
|
+
value="",
|
52
|
+
condition="fields_data.get('images_or_urls') == 'urls'",
|
53
|
+
),
|
54
|
+
"output": OutputPort(),
|
55
|
+
},
|
56
|
+
)
|
57
|
+
|
58
|
+
|
59
|
+
class DeepseekVl(Node):
|
60
|
+
def __init__(self, id: Optional[str] = None):
|
61
|
+
super().__init__(
|
62
|
+
node_type="DeepseekVl",
|
63
|
+
category="media_processing",
|
64
|
+
task_name="media_processing.deepseek_vl",
|
65
|
+
node_id=id,
|
66
|
+
ports={
|
67
|
+
"text_prompt": InputPort(
|
68
|
+
name="text_prompt",
|
69
|
+
port_type=PortType.TEXTAREA,
|
70
|
+
value="",
|
71
|
+
),
|
72
|
+
"llm_model": InputPort(
|
73
|
+
name="llm_model",
|
74
|
+
port_type=PortType.SELECT,
|
75
|
+
value="deepseek-vl2",
|
76
|
+
options=[
|
77
|
+
{"value": "deepseek-vl2", "label": "deepseek-vl2"},
|
78
|
+
],
|
79
|
+
),
|
80
|
+
"images_or_urls": InputPort(
|
81
|
+
name="images_or_urls",
|
82
|
+
port_type=PortType.RADIO,
|
83
|
+
value="images",
|
84
|
+
options=[
|
85
|
+
{"value": "images", "label": "images"},
|
86
|
+
{"value": "urls", "label": "urls"},
|
87
|
+
],
|
88
|
+
),
|
89
|
+
"images": InputPort(
|
90
|
+
name="images",
|
91
|
+
port_type=PortType.FILE,
|
92
|
+
value=list(),
|
93
|
+
multiple=True,
|
94
|
+
support_file_types=[".jpg", ".jpeg", ".png", ".webp"],
|
95
|
+
condition="fields_data.get('images_or_urls') == 'images'",
|
96
|
+
),
|
97
|
+
"urls": InputPort(
|
98
|
+
name="urls",
|
99
|
+
port_type=PortType.TEXT,
|
100
|
+
value="",
|
101
|
+
condition="fields_data.get('images_or_urls') == 'urls'",
|
102
|
+
),
|
103
|
+
"output": OutputPort(),
|
104
|
+
},
|
105
|
+
)
|
106
|
+
|
107
|
+
|
108
|
+
class GeminiVision(Node):
|
109
|
+
def __init__(self, id: Optional[str] = None):
|
110
|
+
super().__init__(
|
111
|
+
node_type="GeminiVision",
|
112
|
+
category="media_processing",
|
113
|
+
task_name="media_processing.gemini_vision",
|
114
|
+
node_id=id,
|
115
|
+
ports={
|
116
|
+
"text_prompt": InputPort(
|
117
|
+
name="text_prompt",
|
118
|
+
port_type=PortType.TEXTAREA,
|
119
|
+
value="",
|
120
|
+
),
|
121
|
+
"llm_model": InputPort(
|
122
|
+
name="llm_model",
|
123
|
+
port_type=PortType.SELECT,
|
124
|
+
value="gemini-1.5-pro",
|
125
|
+
options=[
|
126
|
+
{"value": "gemini-1.5-pro", "label": "gemini-1.5-pro"},
|
127
|
+
{"value": "gemini-1.5-flash", "label": "gemini-1.5-flash"},
|
128
|
+
{"value": "gemini-2.0-flash-exp", "label": "gemini-2.0-flash-exp"},
|
129
|
+
{"value": "gemini-2.0-flash-thinking-exp-1219", "label": "gemini-2.0-flash-thinking-exp-1219"},
|
130
|
+
{"value": "gemini-exp-1206", "label": "gemini-exp-1206"},
|
131
|
+
],
|
132
|
+
),
|
133
|
+
"images_or_urls": InputPort(
|
134
|
+
name="images_or_urls",
|
135
|
+
port_type=PortType.RADIO,
|
136
|
+
value="images",
|
137
|
+
options=[
|
138
|
+
{"value": "images", "label": "images"},
|
139
|
+
{"value": "urls", "label": "urls"},
|
140
|
+
],
|
141
|
+
),
|
142
|
+
"images": InputPort(
|
143
|
+
name="images",
|
144
|
+
port_type=PortType.FILE,
|
145
|
+
value=list(),
|
146
|
+
multiple=True,
|
147
|
+
support_file_types=[".jpg", ".jpeg", ".png", ".webp"],
|
148
|
+
condition="fields_data.get('images_or_urls') == 'images'",
|
149
|
+
),
|
150
|
+
"urls": InputPort(
|
151
|
+
name="urls",
|
152
|
+
port_type=PortType.TEXT,
|
153
|
+
value="",
|
154
|
+
condition="fields_data.get('images_or_urls') == 'urls'",
|
155
|
+
),
|
156
|
+
"output": OutputPort(),
|
157
|
+
},
|
158
|
+
)
|
159
|
+
|
160
|
+
|
161
|
+
class GlmVision(Node):
|
162
|
+
def __init__(self, id: Optional[str] = None):
|
163
|
+
super().__init__(
|
164
|
+
node_type="GlmVision",
|
165
|
+
category="media_processing",
|
166
|
+
task_name="media_processing.glm_vision",
|
167
|
+
node_id=id,
|
168
|
+
ports={
|
169
|
+
"text_prompt": InputPort(
|
170
|
+
name="text_prompt",
|
171
|
+
port_type=PortType.TEXTAREA,
|
172
|
+
value="",
|
173
|
+
),
|
174
|
+
"llm_model": InputPort(
|
175
|
+
name="llm_model",
|
176
|
+
port_type=PortType.SELECT,
|
177
|
+
value="glm-4v-plus",
|
178
|
+
options=[
|
179
|
+
{"value": "glm-4v", "label": "glm-4v"},
|
180
|
+
{"value": "glm-4v-plus", "label": "glm-4v-plus"},
|
181
|
+
{"value": "glm-4v-flash", "label": "glm-4v-flash"},
|
182
|
+
],
|
183
|
+
),
|
184
|
+
"images_or_urls": InputPort(
|
185
|
+
name="images_or_urls",
|
186
|
+
port_type=PortType.RADIO,
|
187
|
+
value="images",
|
188
|
+
options=[
|
189
|
+
{"value": "images", "label": "images"},
|
190
|
+
{"value": "urls", "label": "urls"},
|
191
|
+
],
|
192
|
+
),
|
193
|
+
"images": InputPort(
|
194
|
+
name="images",
|
195
|
+
port_type=PortType.FILE,
|
196
|
+
value=list(),
|
197
|
+
multiple=True,
|
198
|
+
support_file_types=[".jpg", ".jpeg", ".png", ".webp"],
|
199
|
+
condition="fields_data.images_or_urls.value == 'images'",
|
200
|
+
),
|
201
|
+
"urls": InputPort(
|
202
|
+
name="urls",
|
203
|
+
port_type=PortType.TEXT,
|
204
|
+
value="",
|
205
|
+
condition="fields_data.images_or_urls.value == 'urls'",
|
206
|
+
),
|
207
|
+
"output": OutputPort(),
|
208
|
+
},
|
209
|
+
)
|
210
|
+
|
211
|
+
|
212
|
+
class GptVision(Node):
|
213
|
+
def __init__(self, id: Optional[str] = None):
|
214
|
+
super().__init__(
|
215
|
+
node_type="GptVision",
|
216
|
+
category="media_processing",
|
217
|
+
task_name="media_processing.gpt_vision",
|
218
|
+
node_id=id,
|
219
|
+
ports={
|
220
|
+
"text_prompt": InputPort(
|
221
|
+
name="text_prompt",
|
222
|
+
port_type=PortType.TEXTAREA,
|
223
|
+
value="",
|
224
|
+
),
|
225
|
+
"llm_model": InputPort(
|
226
|
+
name="llm_model",
|
227
|
+
port_type=PortType.SELECT,
|
228
|
+
value="gpt-4o",
|
229
|
+
options=[
|
230
|
+
{"value": "gpt-4o", "label": "gpt-4o"},
|
231
|
+
{"value": "gpt-4o-mini", "label": "gpt-4o-mini"},
|
232
|
+
],
|
233
|
+
),
|
234
|
+
"images_or_urls": InputPort(
|
235
|
+
name="images_or_urls",
|
236
|
+
port_type=PortType.RADIO,
|
237
|
+
value="images",
|
238
|
+
options=[
|
239
|
+
{"value": "images", "label": "images"},
|
240
|
+
{"value": "urls", "label": "urls"},
|
241
|
+
],
|
242
|
+
),
|
243
|
+
"images": InputPort(
|
244
|
+
name="images",
|
245
|
+
port_type=PortType.FILE,
|
246
|
+
value=list(),
|
247
|
+
multiple=True,
|
248
|
+
support_file_types=[".jpg", ".jpeg", ".png", ".webp"],
|
249
|
+
condition="fields_data.get('images_or_urls') == 'images'",
|
250
|
+
),
|
251
|
+
"urls": InputPort(
|
252
|
+
name="urls",
|
253
|
+
port_type=PortType.TEXT,
|
254
|
+
value="",
|
255
|
+
condition="fields_data.get('images_or_urls') == 'urls'",
|
256
|
+
),
|
257
|
+
"detail_type": InputPort(
|
258
|
+
name="detail_type",
|
259
|
+
port_type=PortType.SELECT,
|
260
|
+
value="auto",
|
261
|
+
options=[
|
262
|
+
{"value": "auto", "label": "auto"},
|
263
|
+
{"value": "low", "label": "low"},
|
264
|
+
{"value": "high", "label": "high"},
|
265
|
+
],
|
266
|
+
),
|
267
|
+
"output": OutputPort(),
|
268
|
+
},
|
269
|
+
)
|
270
|
+
|
271
|
+
|
272
|
+
class InternVision(Node):
|
273
|
+
def __init__(self, id: Optional[str] = None):
|
274
|
+
super().__init__(
|
275
|
+
node_type="InternVision",
|
276
|
+
category="media_processing",
|
277
|
+
task_name="media_processing.intern_vision",
|
278
|
+
node_id=id,
|
279
|
+
ports={
|
280
|
+
"text_prompt": InputPort(
|
281
|
+
name="text_prompt",
|
282
|
+
port_type=PortType.TEXTAREA,
|
283
|
+
value="",
|
284
|
+
),
|
285
|
+
"llm_model": InputPort(
|
286
|
+
name="llm_model",
|
287
|
+
port_type=PortType.SELECT,
|
288
|
+
value="internvl2-26b",
|
289
|
+
options=[
|
290
|
+
{"value": "internvl2-26b", "label": "internvl2-26b"},
|
291
|
+
{"value": "internvl2-8b", "label": "internvl2-8b"},
|
292
|
+
],
|
293
|
+
),
|
294
|
+
"images_or_urls": InputPort(
|
295
|
+
name="images_or_urls",
|
296
|
+
port_type=PortType.RADIO,
|
297
|
+
value="images",
|
298
|
+
options=[
|
299
|
+
{"value": "images", "label": "images"},
|
300
|
+
{"value": "urls", "label": "urls"},
|
301
|
+
],
|
302
|
+
),
|
303
|
+
"images": InputPort(
|
304
|
+
name="images",
|
305
|
+
port_type=PortType.FILE,
|
306
|
+
value=list(),
|
307
|
+
multiple=True,
|
308
|
+
support_file_types=[".jpg", ".jpeg", ".png", ".webp"],
|
309
|
+
condition="fields_data.get('images_or_urls') == 'images'",
|
310
|
+
),
|
311
|
+
"urls": InputPort(
|
312
|
+
name="urls",
|
313
|
+
port_type=PortType.TEXT,
|
314
|
+
value="",
|
315
|
+
condition="fields_data.get('images_or_urls') == 'urls'",
|
316
|
+
),
|
317
|
+
"output": OutputPort(),
|
318
|
+
},
|
319
|
+
)
|
320
|
+
|
321
|
+
|
322
|
+
class Ocr(Node):
|
323
|
+
def __init__(self, id: Optional[str] = None):
|
324
|
+
super().__init__(
|
325
|
+
node_type="Ocr",
|
326
|
+
category="media_processing",
|
327
|
+
task_name="media_processing.ocr",
|
328
|
+
node_id=id,
|
329
|
+
ports={
|
330
|
+
"ocr_type": InputPort(
|
331
|
+
name="ocr_type",
|
332
|
+
port_type=PortType.SELECT,
|
333
|
+
value="general",
|
334
|
+
options=[
|
335
|
+
{"value": "general", "label": "general"},
|
336
|
+
{"value": "table", "label": "table"},
|
337
|
+
{"value": "business_license", "label": "business_license"},
|
338
|
+
],
|
339
|
+
),
|
340
|
+
"images_or_urls": InputPort(
|
341
|
+
name="images_or_urls",
|
342
|
+
port_type=PortType.RADIO,
|
343
|
+
value="images",
|
344
|
+
options=[
|
345
|
+
{"value": "images", "label": "images"},
|
346
|
+
{"value": "urls", "label": "urls"},
|
347
|
+
],
|
348
|
+
),
|
349
|
+
"images": InputPort(
|
350
|
+
name="images",
|
351
|
+
port_type=PortType.FILE,
|
352
|
+
value=list(),
|
353
|
+
multiple=True,
|
354
|
+
support_file_types=[".jpg", ".jpeg", ".png", ".webp"],
|
355
|
+
condition="fields_data.get('images_or_urls') == 'images'",
|
356
|
+
),
|
357
|
+
"urls": InputPort(
|
358
|
+
name="urls",
|
359
|
+
port_type=PortType.TEXT,
|
360
|
+
value="",
|
361
|
+
condition="fields_data.get('images_or_urls') == 'urls'",
|
362
|
+
),
|
363
|
+
"output_table": OutputPort(
|
364
|
+
name="output_table",
|
365
|
+
condition="fields_data.get('ocr_type') == 'table'",
|
366
|
+
has_tooltip=True,
|
367
|
+
),
|
368
|
+
"output_content": OutputPort(
|
369
|
+
name="output_content",
|
370
|
+
condition="fields_data.get('ocr_type') in ['general', 'business_license']",
|
371
|
+
),
|
372
|
+
"output_words_info": OutputPort(
|
373
|
+
name="output_words_info",
|
374
|
+
value=list(),
|
375
|
+
condition="fields_data.get('ocr_type') in ['general', 'business_license']",
|
376
|
+
has_tooltip=True,
|
377
|
+
),
|
378
|
+
},
|
379
|
+
)
|
380
|
+
|
381
|
+
|
382
|
+
class QwenVision(Node):
|
383
|
+
def __init__(self, id: Optional[str] = None):
|
384
|
+
super().__init__(
|
385
|
+
node_type="QwenVision",
|
386
|
+
category="media_processing",
|
387
|
+
task_name="media_processing.qwen_vision",
|
388
|
+
node_id=id,
|
389
|
+
ports={
|
390
|
+
"text_prompt": InputPort(
|
391
|
+
name="text_prompt",
|
392
|
+
port_type=PortType.TEXTAREA,
|
393
|
+
value="",
|
394
|
+
),
|
395
|
+
"llm_model": InputPort(
|
396
|
+
name="llm_model",
|
397
|
+
port_type=PortType.SELECT,
|
398
|
+
value="qwen2-vl-72b-instruct",
|
399
|
+
options=[
|
400
|
+
{"value": "qvq-72b-preview", "label": "qvq-72b-preview"},
|
401
|
+
{"value": "qwen2-vl-72b-instruct", "label": "qwen2-vl-72b-instruct"},
|
402
|
+
{"value": "qwen2-vl-7b-instruct", "label": "qwen2-vl-7b-instruct"},
|
403
|
+
{"value": "qwen-vl-max", "label": "qwen-vl-max"},
|
404
|
+
{"value": "qwen-vl-plus", "label": "qwen-vl-plus"},
|
405
|
+
],
|
406
|
+
),
|
407
|
+
"images_or_urls": InputPort(
|
408
|
+
name="images_or_urls",
|
409
|
+
port_type=PortType.RADIO,
|
410
|
+
value="images",
|
411
|
+
options=[
|
412
|
+
{"value": "images", "label": "images"},
|
413
|
+
{"value": "urls", "label": "urls"},
|
414
|
+
],
|
415
|
+
),
|
416
|
+
"images": InputPort(
|
417
|
+
name="images",
|
418
|
+
port_type=PortType.FILE,
|
419
|
+
value=list(),
|
420
|
+
multiple=True,
|
421
|
+
support_file_types=[".jpg", ".jpeg", ".png", ".webp"],
|
422
|
+
condition="fields_data.get('images_or_urls') == 'images'",
|
423
|
+
),
|
424
|
+
"urls": InputPort(
|
425
|
+
name="urls",
|
426
|
+
port_type=PortType.TEXT,
|
427
|
+
value="",
|
428
|
+
condition="fields_data.get('images_or_urls') == 'urls'",
|
429
|
+
),
|
430
|
+
"output": OutputPort(),
|
431
|
+
},
|
432
|
+
)
|
433
|
+
|
434
|
+
|
435
|
+
class SpeechRecognition(Node):
|
436
|
+
def __init__(self, id: Optional[str] = None):
|
437
|
+
super().__init__(
|
438
|
+
node_type="SpeechRecognition",
|
439
|
+
category="media_processing",
|
440
|
+
task_name="media_processing.speech_recognition",
|
441
|
+
node_id=id,
|
442
|
+
ports={
|
443
|
+
"files_or_urls": InputPort(
|
444
|
+
name="files_or_urls",
|
445
|
+
port_type=PortType.RADIO,
|
446
|
+
value="files",
|
447
|
+
options=[
|
448
|
+
{"value": "files", "label": "files"},
|
449
|
+
{"value": "urls", "label": "urls"},
|
450
|
+
],
|
451
|
+
),
|
452
|
+
"files": InputPort(
|
453
|
+
name="files",
|
454
|
+
port_type=PortType.FILE,
|
455
|
+
value=list(),
|
456
|
+
multiple=True,
|
457
|
+
support_file_types=[".wav", ".mp3", ".mp4", ".m4a", ".wma", ".aac", ".ogg", ".amr", ".flac"],
|
458
|
+
condition="fields_data.get('files_or_urls') == 'files'",
|
459
|
+
),
|
460
|
+
"urls": InputPort(
|
461
|
+
name="urls",
|
462
|
+
port_type=PortType.TEXT,
|
463
|
+
value="",
|
464
|
+
condition="fields_data.get('files_or_urls') == 'urls'",
|
465
|
+
),
|
466
|
+
"output_type": InputPort(
|
467
|
+
name="output_type",
|
468
|
+
port_type=PortType.SELECT,
|
469
|
+
value="text",
|
470
|
+
options=[
|
471
|
+
{"value": "text", "label": "text"},
|
472
|
+
{"value": "list", "label": "list"},
|
473
|
+
{"value": "srt", "label": "srt"},
|
474
|
+
],
|
475
|
+
),
|
476
|
+
"output": OutputPort(),
|
477
|
+
},
|
478
|
+
)
|