vectorvein 0.1.80__py3-none-any.whl → 0.1.81__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,802 @@
1
+ from typing import Optional
2
+
3
+ from ..graph.node import Node
4
+ from ..graph.port import PortType, InputPort, OutputPort
5
+
6
+
7
+ class AliyunQwen(Node):
8
+ def __init__(self, id: Optional[str] = None):
9
+ super().__init__(
10
+ node_type="AliyunQwen",
11
+ category="llms",
12
+ task_name="llms.aliyun_qwen",
13
+ node_id=id,
14
+ ports={
15
+ "prompt": InputPort(
16
+ name="prompt",
17
+ port_type=PortType.TEXT,
18
+ value="",
19
+ ),
20
+ "llm_model": InputPort(
21
+ name="llm_model",
22
+ port_type=PortType.SELECT,
23
+ value="qwen2.5-72b-instruct",
24
+ options=[
25
+ {"value": "qwen2.5-72b-instruct", "label": "qwen2.5-72b-instruct"},
26
+ {"value": "qwen2.5-32b-instruct", "label": "qwen2.5-32b-instruct"},
27
+ {"value": "qwen2.5-coder-32b-instruct", "label": "qwen2.5-coder-32b-instruct"},
28
+ {"value": "qwq-32b-preview", "label": "qwq-32b-preview"},
29
+ {"value": "qwen2.5-14b-instruct", "label": "qwen2.5-14b-instruct"},
30
+ {"value": "qwen2.5-7b-instruct", "label": "qwen2.5-7b-instruct"},
31
+ {"value": "qwen2.5-coder-7b-instruct", "label": "qwen2.5-coder-7b-instruct"},
32
+ ],
33
+ ),
34
+ "top_p": InputPort(
35
+ name="top_p",
36
+ port_type=PortType.NUMBER,
37
+ value=0.95,
38
+ ),
39
+ "temperature": InputPort(
40
+ name="temperature",
41
+ port_type=PortType.TEMPERATURE,
42
+ value=0.7,
43
+ ),
44
+ "stream": InputPort(
45
+ name="stream",
46
+ port_type=PortType.CHECKBOX,
47
+ value=False,
48
+ ),
49
+ "system_prompt": InputPort(
50
+ name="system_prompt",
51
+ port_type=PortType.TEXTAREA,
52
+ value="",
53
+ ),
54
+ "response_format": InputPort(
55
+ name="response_format",
56
+ port_type=PortType.SELECT,
57
+ value="text",
58
+ options=[
59
+ {"value": "text", "label": "Text"},
60
+ {"value": "json_object", "label": "JSON"},
61
+ ],
62
+ ),
63
+ "output": OutputPort(),
64
+ },
65
+ )
66
+
67
+
68
+ class BaiduWenxin(Node):
69
+ def __init__(self, id: Optional[str] = None):
70
+ super().__init__(
71
+ node_type="BaiduWenxin",
72
+ category="llms",
73
+ task_name="llms.baidu_wenxin",
74
+ node_id=id,
75
+ ports={
76
+ "prompt": InputPort(
77
+ name="prompt",
78
+ port_type=PortType.TEXTAREA,
79
+ value="",
80
+ ),
81
+ "llm_model": InputPort(
82
+ name="llm_model",
83
+ port_type=PortType.SELECT,
84
+ value="ernie-3.5",
85
+ options=[
86
+ {"value": "ernie-lite", "label": "ernie-lite"},
87
+ {"value": "ernie-speed", "label": "ernie-speed"},
88
+ {"value": "ernie-3.5", "label": "ernie-3.5"},
89
+ {"value": "ernie-4.0", "label": "ernie-4.0"},
90
+ ],
91
+ ),
92
+ "temperature": InputPort(
93
+ name="temperature",
94
+ port_type=PortType.TEMPERATURE,
95
+ value=0.7,
96
+ ),
97
+ "stream": InputPort(
98
+ name="stream",
99
+ port_type=PortType.CHECKBOX,
100
+ value=False,
101
+ ),
102
+ "output": OutputPort(),
103
+ },
104
+ )
105
+
106
+
107
+ class ChatGLM(Node):
108
+ def __init__(self, id: Optional[str] = None):
109
+ super().__init__(
110
+ node_type="ChatGLM",
111
+ category="llms",
112
+ task_name="llms.chat_glm",
113
+ node_id=id,
114
+ ports={
115
+ "prompt": InputPort(
116
+ name="prompt",
117
+ port_type=PortType.TEXTAREA,
118
+ value="",
119
+ ),
120
+ "llm_model": InputPort(
121
+ name="llm_model",
122
+ port_type=PortType.SELECT,
123
+ value="glm-4-air",
124
+ options=[
125
+ {"value": "glm-4-plus", "label": "glm-4-plus"},
126
+ {"value": "glm-4", "label": "glm-4"},
127
+ {"value": "glm-4-0520", "label": "glm-4-0520"},
128
+ {"value": "glm-4-air", "label": "glm-4-air"},
129
+ {"value": "glm-4-airx", "label": "glm-4-airx"},
130
+ {"value": "glm-4-flash", "label": "glm-4-flash"},
131
+ {"value": "glm-4-long", "label": "glm-4-long"},
132
+ {"value": "glm-zero-preview", "label": "glm-zero-preview"},
133
+ ],
134
+ ),
135
+ "temperature": InputPort(
136
+ name="temperature",
137
+ port_type=PortType.TEMPERATURE,
138
+ value=0.7,
139
+ ),
140
+ "top_p": InputPort(
141
+ name="top_p",
142
+ port_type=PortType.NUMBER,
143
+ value=0.95,
144
+ ),
145
+ "stream": InputPort(
146
+ name="stream",
147
+ port_type=PortType.CHECKBOX,
148
+ value=False,
149
+ ),
150
+ "system_prompt": InputPort(
151
+ name="system_prompt",
152
+ port_type=PortType.TEXTAREA,
153
+ value="",
154
+ ),
155
+ "use_function_call": InputPort(
156
+ name="use_function_call",
157
+ port_type=PortType.CHECKBOX,
158
+ value=False,
159
+ ),
160
+ "functions": InputPort(
161
+ name="functions",
162
+ port_type=PortType.SELECT,
163
+ value=[],
164
+ ),
165
+ "function_call_mode": InputPort(
166
+ name="function_call_mode",
167
+ port_type=PortType.SELECT,
168
+ value="auto",
169
+ options=[
170
+ {"value": "auto", "label": "auto"},
171
+ {"value": "none", "label": "none"},
172
+ ],
173
+ ),
174
+ "output": OutputPort(
175
+ name="output",
176
+ ),
177
+ "function_call_output": OutputPort(
178
+ name="function_call_output",
179
+ condition="return fieldsData.use_function_call.value",
180
+ ),
181
+ "function_call_arguments": OutputPort(
182
+ name="function_call_arguments",
183
+ condition="return fieldsData.use_function_call.value",
184
+ ),
185
+ },
186
+ )
187
+
188
+
189
+ class Claude(Node):
190
+ def __init__(self, id: Optional[str] = None):
191
+ super().__init__(
192
+ node_type="Claude",
193
+ category="llms",
194
+ task_name="llms.claude",
195
+ node_id=id,
196
+ ports={
197
+ "prompt": InputPort(
198
+ name="prompt",
199
+ port_type=PortType.TEXTAREA,
200
+ value="",
201
+ ),
202
+ "llm_model": InputPort(
203
+ name="llm_model",
204
+ port_type=PortType.SELECT,
205
+ value="claude-3-5-haiku",
206
+ options=[
207
+ {"value": "claude-3-5-sonnet", "label": "claude-3-5-sonnet"},
208
+ {"value": "claude-3-5-haiku", "label": "claude-3-5-haiku"},
209
+ {"value": "claude-3-opus", "label": "claude-3-opus"},
210
+ {"value": "claude-3-sonnet", "label": "claude-3-sonnet"},
211
+ {"value": "claude-3-haiku", "label": "claude-3-haiku"},
212
+ ],
213
+ ),
214
+ "stream": InputPort(
215
+ name="stream",
216
+ port_type=PortType.CHECKBOX,
217
+ value=False,
218
+ ),
219
+ "system_prompt": InputPort(
220
+ name="system_prompt",
221
+ port_type=PortType.TEXTAREA,
222
+ value="",
223
+ ),
224
+ "temperature": InputPort(
225
+ name="temperature",
226
+ port_type=PortType.TEMPERATURE,
227
+ value=0.7,
228
+ ),
229
+ "output": OutputPort(),
230
+ },
231
+ )
232
+
233
+
234
+ class Deepseek(Node):
235
+ def __init__(self, id: Optional[str] = None):
236
+ super().__init__(
237
+ node_type="Deepseek",
238
+ category="llms",
239
+ task_name="llms.deepseek",
240
+ node_id=id,
241
+ ports={
242
+ "prompt": InputPort(
243
+ name="prompt",
244
+ port_type=PortType.TEXTAREA,
245
+ value="",
246
+ ),
247
+ "llm_model": InputPort(
248
+ name="llm_model",
249
+ port_type=PortType.SELECT,
250
+ value="deepseek-chat",
251
+ options=[
252
+ {"value": "deepseek-chat", "label": "deepseek-chat"},
253
+ {"value": "deepseek-reasoner", "label": "deepseek-reasoner"},
254
+ {"value": "deepseek-32k", "label": "deepseek-32k"},
255
+ ],
256
+ ),
257
+ "temperature": InputPort(
258
+ name="temperature",
259
+ port_type=PortType.TEMPERATURE,
260
+ value=0.7,
261
+ ),
262
+ "top_p": InputPort(
263
+ name="top_p",
264
+ port_type=PortType.NUMBER,
265
+ value=0.95,
266
+ ),
267
+ "stream": InputPort(
268
+ name="stream",
269
+ port_type=PortType.CHECKBOX,
270
+ value=False,
271
+ ),
272
+ "system_prompt": InputPort(
273
+ name="system_prompt",
274
+ port_type=PortType.TEXTAREA,
275
+ value="",
276
+ ),
277
+ "response_format": InputPort(
278
+ name="response_format",
279
+ port_type=PortType.SELECT,
280
+ value="text",
281
+ options=[
282
+ {"value": "text", "label": "Text"},
283
+ {"value": "json_object", "label": "JSON"},
284
+ ],
285
+ ),
286
+ "use_function_call": InputPort(
287
+ name="use_function_call",
288
+ port_type=PortType.CHECKBOX,
289
+ value=False,
290
+ ),
291
+ "functions": InputPort(
292
+ name="functions",
293
+ port_type=PortType.SELECT,
294
+ value=[],
295
+ ),
296
+ "function_call_mode": InputPort(
297
+ name="function_call_mode",
298
+ port_type=PortType.SELECT,
299
+ value="auto",
300
+ options=[
301
+ {"value": "auto", "label": "auto"},
302
+ {"value": "none", "label": "none"},
303
+ ],
304
+ ),
305
+ "output": OutputPort(
306
+ name="output",
307
+ ),
308
+ "reasoning_content": OutputPort(
309
+ name="reasoning_content",
310
+ condition="return fieldsData.llm_model.value === 'deepseek-reasoner'",
311
+ ),
312
+ "function_call_output": OutputPort(
313
+ name="function_call_output",
314
+ condition="return fieldsData.use_function_call.value",
315
+ ),
316
+ "function_call_arguments": OutputPort(
317
+ name="function_call_arguments",
318
+ condition="return fieldsData.use_function_call.value",
319
+ ),
320
+ },
321
+ )
322
+
323
+
324
+ class Gemini(Node):
325
+ def __init__(self, id: Optional[str] = None):
326
+ super().__init__(
327
+ node_type="Gemini",
328
+ category="llms",
329
+ task_name="llms.gemini",
330
+ node_id=id,
331
+ ports={
332
+ "prompt": InputPort(
333
+ name="prompt",
334
+ port_type=PortType.TEXTAREA,
335
+ value="",
336
+ ),
337
+ "llm_model": InputPort(
338
+ name="llm_model",
339
+ port_type=PortType.SELECT,
340
+ value="gemini-1.5-flash",
341
+ options=[
342
+ {"value": "gemini-1.5-flash", "label": "gemini-1.5-flash"},
343
+ {"value": "gemini-1.5-pro", "label": "gemini-1.5-pro"},
344
+ {"value": "gemini-2.0-flash-exp", "label": "gemini-2.0-flash-exp"},
345
+ {
346
+ "value": "gemini-2.0-flash-thinking-exp-01-21",
347
+ "label": "gemini-2.0-flash-thinking-exp-01-21",
348
+ },
349
+ {"value": "gemini-exp-1206", "label": "gemini-exp-1206"},
350
+ ],
351
+ ),
352
+ "temperature": InputPort(
353
+ name="temperature",
354
+ port_type=PortType.TEMPERATURE,
355
+ value=0.7,
356
+ ),
357
+ "top_p": InputPort(
358
+ name="top_p",
359
+ port_type=PortType.NUMBER,
360
+ value=0.95,
361
+ ),
362
+ "stream": InputPort(
363
+ name="stream",
364
+ port_type=PortType.CHECKBOX,
365
+ value=False,
366
+ ),
367
+ "system_prompt": InputPort(
368
+ name="system_prompt",
369
+ port_type=PortType.TEXTAREA,
370
+ value="",
371
+ ),
372
+ "response_format": InputPort(
373
+ name="response_format",
374
+ port_type=PortType.SELECT,
375
+ value="text",
376
+ options=[
377
+ {"value": "text", "label": "Text"},
378
+ {"value": "json_object", "label": "JSON"},
379
+ ],
380
+ ),
381
+ "use_function_call": InputPort(
382
+ name="use_function_call",
383
+ port_type=PortType.CHECKBOX,
384
+ value=False,
385
+ ),
386
+ "functions": InputPort(
387
+ name="functions",
388
+ port_type=PortType.SELECT,
389
+ value=[],
390
+ ),
391
+ "function_call_mode": InputPort(
392
+ name="function_call_mode",
393
+ port_type=PortType.SELECT,
394
+ value="auto",
395
+ options=[
396
+ {"value": "auto", "label": "auto"},
397
+ {"value": "none", "label": "none"},
398
+ ],
399
+ ),
400
+ "output": OutputPort(
401
+ name="output",
402
+ ),
403
+ "function_call_output": OutputPort(
404
+ name="function_call_output",
405
+ condition="return fieldsData.use_function_call.value",
406
+ ),
407
+ "function_call_arguments": OutputPort(
408
+ name="function_call_arguments",
409
+ condition="return fieldsData.use_function_call.value",
410
+ ),
411
+ },
412
+ )
413
+
414
+
415
+ class LingYiWanWu(Node):
416
+ def __init__(self, id: Optional[str] = None):
417
+ super().__init__(
418
+ node_type="LingYiWanWu",
419
+ category="llms",
420
+ task_name="llms.ling_yi_wan_wu",
421
+ node_id=id,
422
+ ports={
423
+ "prompt": InputPort(
424
+ name="prompt",
425
+ port_type=PortType.TEXTAREA,
426
+ value="",
427
+ ),
428
+ "llm_model": InputPort(
429
+ name="llm_model",
430
+ port_type=PortType.SELECT,
431
+ value="yi-lightning",
432
+ options=[
433
+ {"value": "yi-lightning", "label": "yi-lightning"},
434
+ {"value": "yi-large", "label": "yi-large"},
435
+ {"value": "yi-large-turbo", "label": "yi-large-turbo"},
436
+ {"value": "yi-medium", "label": "yi-medium"},
437
+ {"value": "yi-medium-200k", "label": "yi-medium-200k"},
438
+ {"value": "yi-spark", "label": "yi-spark"},
439
+ ],
440
+ ),
441
+ "temperature": InputPort(
442
+ name="temperature",
443
+ port_type=PortType.TEMPERATURE,
444
+ value=0.7,
445
+ ),
446
+ "top_p": InputPort(
447
+ name="top_p",
448
+ port_type=PortType.NUMBER,
449
+ value=0.95,
450
+ ),
451
+ "stream": InputPort(
452
+ name="stream",
453
+ port_type=PortType.CHECKBOX,
454
+ value=False,
455
+ ),
456
+ "output": OutputPort(),
457
+ },
458
+ )
459
+
460
+
461
+ class MiniMax(Node):
462
+ def __init__(self, id: Optional[str] = None):
463
+ super().__init__(
464
+ node_type="MiniMax",
465
+ category="llms",
466
+ task_name="llms.mini_max",
467
+ node_id=id,
468
+ ports={
469
+ "prompt": InputPort(
470
+ name="prompt",
471
+ port_type=PortType.TEXTAREA,
472
+ value="",
473
+ ),
474
+ "llm_model": InputPort(
475
+ name="llm_model",
476
+ port_type=PortType.SELECT,
477
+ value="MiniMax-Text-01",
478
+ options=[
479
+ {"value": "abab6.5s-chat", "label": "abab6.5s-chat"},
480
+ {"value": "MiniMax-Text-01", "label": "MiniMax-Text-01"},
481
+ ],
482
+ ),
483
+ "temperature": InputPort(
484
+ name="temperature",
485
+ port_type=PortType.TEMPERATURE,
486
+ value=0.7,
487
+ ),
488
+ "top_p": InputPort(
489
+ name="top_p",
490
+ port_type=PortType.NUMBER,
491
+ value=0.95,
492
+ ),
493
+ "stream": InputPort(
494
+ name="stream",
495
+ port_type=PortType.CHECKBOX,
496
+ value=False,
497
+ ),
498
+ "system_prompt": InputPort(
499
+ name="system_prompt",
500
+ port_type=PortType.TEXTAREA,
501
+ value="",
502
+ ),
503
+ "response_format": InputPort(
504
+ name="response_format",
505
+ port_type=PortType.SELECT,
506
+ value="text",
507
+ options=[
508
+ {"value": "text", "label": "Text"},
509
+ {"value": "json_object", "label": "JSON"},
510
+ ],
511
+ ),
512
+ "use_function_call": InputPort(
513
+ name="use_function_call",
514
+ port_type=PortType.CHECKBOX,
515
+ value=False,
516
+ ),
517
+ "functions": InputPort(
518
+ name="functions",
519
+ port_type=PortType.SELECT,
520
+ value=[],
521
+ ),
522
+ "function_call_mode": InputPort(
523
+ name="function_call_mode",
524
+ port_type=PortType.SELECT,
525
+ value="auto",
526
+ options=[
527
+ {"value": "auto", "label": "auto"},
528
+ {"value": "none", "label": "none"},
529
+ ],
530
+ ),
531
+ "output": OutputPort(
532
+ name="output",
533
+ ),
534
+ "function_call_output": OutputPort(
535
+ name="function_call_output",
536
+ condition="return fieldsData.use_function_call.value",
537
+ ),
538
+ "function_call_arguments": OutputPort(
539
+ name="function_call_arguments",
540
+ condition="return fieldsData.use_function_call.value",
541
+ ),
542
+ },
543
+ )
544
+
545
+
546
+ class Moonshot(Node):
547
+ def __init__(self, id: Optional[str] = None):
548
+ super().__init__(
549
+ node_type="Moonshot",
550
+ category="llms",
551
+ task_name="llms.moonshot",
552
+ node_id=id,
553
+ ports={
554
+ "prompt": InputPort(
555
+ name="prompt",
556
+ port_type=PortType.TEXTAREA,
557
+ value="",
558
+ ),
559
+ "llm_model": InputPort(
560
+ name="llm_model",
561
+ port_type=PortType.SELECT,
562
+ value="moonshot-v1-8k",
563
+ options=[
564
+ {"value": "moonshot-v1-8k", "label": "moonshot-v1-8k"},
565
+ {"value": "moonshot-v1-32k", "label": "moonshot-v1-32k"},
566
+ {"value": "moonshot-v1-128k", "label": "moonshot-v1-128k"},
567
+ ],
568
+ ),
569
+ "temperature": InputPort(
570
+ name="temperature",
571
+ port_type=PortType.TEMPERATURE,
572
+ value=0.7,
573
+ ),
574
+ "top_p": InputPort(
575
+ name="top_p",
576
+ port_type=PortType.NUMBER,
577
+ value=0.95,
578
+ ),
579
+ "stream": InputPort(
580
+ name="stream",
581
+ port_type=PortType.CHECKBOX,
582
+ value=False,
583
+ ),
584
+ "system_prompt": InputPort(
585
+ name="system_prompt",
586
+ port_type=PortType.TEXTAREA,
587
+ value="",
588
+ ),
589
+ "response_format": InputPort(
590
+ name="response_format",
591
+ port_type=PortType.SELECT,
592
+ value="text",
593
+ options=[
594
+ {"value": "text", "label": "Text"},
595
+ {"value": "json_object", "label": "JSON"},
596
+ ],
597
+ ),
598
+ "use_function_call": InputPort(
599
+ name="use_function_call",
600
+ port_type=PortType.CHECKBOX,
601
+ value=False,
602
+ ),
603
+ "functions": InputPort(
604
+ name="functions",
605
+ port_type=PortType.SELECT,
606
+ value=[],
607
+ ),
608
+ "function_call_mode": InputPort(
609
+ name="function_call_mode",
610
+ port_type=PortType.SELECT,
611
+ value="auto",
612
+ options=[
613
+ {"value": "auto", "label": "auto"},
614
+ {"value": "none", "label": "none"},
615
+ ],
616
+ ),
617
+ "output": OutputPort(
618
+ name="output",
619
+ ),
620
+ "function_call_output": OutputPort(
621
+ name="function_call_output",
622
+ condition="return fieldsData.use_function_call.value",
623
+ ),
624
+ "function_call_arguments": OutputPort(
625
+ name="function_call_arguments",
626
+ condition="return fieldsData.use_function_call.value",
627
+ ),
628
+ },
629
+ )
630
+
631
+
632
+ class OpenAI(Node):
633
+ def __init__(self, id: Optional[str] = None):
634
+ super().__init__(
635
+ node_type="OpenAI",
636
+ category="llms",
637
+ task_name="llms.open_ai",
638
+ node_id=id,
639
+ ports={
640
+ "prompt": InputPort(
641
+ name="prompt",
642
+ port_type=PortType.TEXTAREA,
643
+ value="",
644
+ ),
645
+ "llm_model": InputPort(
646
+ name="llm_model",
647
+ port_type=PortType.SELECT,
648
+ value="gpt-4o-mini",
649
+ options=[
650
+ {"value": "gpt-3.5", "label": "gpt-3.5-turbo"},
651
+ {"value": "gpt-4", "label": "gpt-4-turbo"},
652
+ {"value": "gpt-4o", "label": "gpt-4o"},
653
+ {"value": "gpt-4o-mini", "label": "gpt-4o-mini"},
654
+ {"value": "o1-mini", "label": "o1-mini"},
655
+ {"value": "o1-preview", "label": "o1-preview"},
656
+ ],
657
+ ),
658
+ "temperature": InputPort(
659
+ name="temperature",
660
+ port_type=PortType.TEMPERATURE,
661
+ value=0.7,
662
+ ),
663
+ "top_p": InputPort(
664
+ name="top_p",
665
+ port_type=PortType.NUMBER,
666
+ value=0.95,
667
+ ),
668
+ "stream": InputPort(
669
+ name="stream",
670
+ port_type=PortType.CHECKBOX,
671
+ value=False,
672
+ ),
673
+ "system_prompt": InputPort(
674
+ name="system_prompt",
675
+ port_type=PortType.TEXTAREA,
676
+ value="",
677
+ ),
678
+ "response_format": InputPort(
679
+ name="response_format",
680
+ port_type=PortType.SELECT,
681
+ value="text",
682
+ options=[
683
+ {"value": "text", "label": "Text"},
684
+ {"value": "json_object", "label": "JSON"},
685
+ ],
686
+ ),
687
+ "use_function_call": InputPort(
688
+ name="use_function_call",
689
+ port_type=PortType.CHECKBOX,
690
+ value=False,
691
+ ),
692
+ "functions": InputPort(
693
+ name="functions",
694
+ port_type=PortType.SELECT,
695
+ value=[],
696
+ ),
697
+ "function_call_mode": InputPort(
698
+ name="function_call_mode",
699
+ port_type=PortType.SELECT,
700
+ value="auto",
701
+ options=[
702
+ {"value": "auto", "label": "auto"},
703
+ {"value": "none", "label": "none"},
704
+ ],
705
+ ),
706
+ "output": OutputPort(
707
+ name="output",
708
+ ),
709
+ "function_call_output": OutputPort(
710
+ name="function_call_output",
711
+ condition="return fieldsData.use_function_call.value",
712
+ ),
713
+ "function_call_arguments": OutputPort(
714
+ name="function_call_arguments",
715
+ condition="return fieldsData.use_function_call.value",
716
+ ),
717
+ },
718
+ )
719
+
720
+
721
+ class XAi(Node):
722
+ def __init__(self, id: Optional[str] = None):
723
+ super().__init__(
724
+ node_type="XAi",
725
+ category="llms",
726
+ task_name="llms.x_ai",
727
+ node_id=id,
728
+ ports={
729
+ "prompt": InputPort(
730
+ name="prompt",
731
+ port_type=PortType.TEXTAREA,
732
+ value="",
733
+ ),
734
+ "llm_model": InputPort(
735
+ name="llm_model",
736
+ port_type=PortType.SELECT,
737
+ value="grok-beta",
738
+ options=[
739
+ {"value": "grok-beta", "label": "grok-beta"},
740
+ ],
741
+ ),
742
+ "temperature": InputPort(
743
+ name="temperature",
744
+ port_type=PortType.TEMPERATURE,
745
+ value=0.7,
746
+ ),
747
+ "top_p": InputPort(
748
+ name="top_p",
749
+ port_type=PortType.NUMBER,
750
+ value=0.95,
751
+ ),
752
+ "stream": InputPort(
753
+ name="stream",
754
+ port_type=PortType.CHECKBOX,
755
+ value=False,
756
+ ),
757
+ "system_prompt": InputPort(
758
+ name="system_prompt",
759
+ port_type=PortType.TEXTAREA,
760
+ value="",
761
+ ),
762
+ "response_format": InputPort(
763
+ name="response_format",
764
+ port_type=PortType.SELECT,
765
+ value="text",
766
+ options=[
767
+ {"value": "text", "label": "Text"},
768
+ {"value": "json_object", "label": "JSON"},
769
+ ],
770
+ ),
771
+ "use_function_call": InputPort(
772
+ name="use_function_call",
773
+ port_type=PortType.CHECKBOX,
774
+ value=False,
775
+ ),
776
+ "functions": InputPort(
777
+ name="functions",
778
+ port_type=PortType.SELECT,
779
+ value=[],
780
+ ),
781
+ "function_call_mode": InputPort(
782
+ name="function_call_mode",
783
+ port_type=PortType.SELECT,
784
+ value="auto",
785
+ options=[
786
+ {"value": "auto", "label": "auto"},
787
+ {"value": "none", "label": "none"},
788
+ ],
789
+ ),
790
+ "output": OutputPort(
791
+ name="output",
792
+ ),
793
+ "function_call_output": OutputPort(
794
+ name="function_call_output",
795
+ condition="return fieldsData.use_function_call.value",
796
+ ),
797
+ "function_call_arguments": OutputPort(
798
+ name="function_call_arguments",
799
+ condition="return fieldsData.use_function_call.value",
800
+ ),
801
+ },
802
+ )