langflow-base-nightly 1.7.0.dev55__py3-none-any.whl → 1.7.0.dev57__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langflow/api/v2/files.py +6 -6
- langflow/initial_setup/starter_projects/Basic Prompt Chaining.json +31 -1088
- langflow/initial_setup/starter_projects/Basic Prompting.json +196 -135
- langflow/initial_setup/starter_projects/Blog Writer.json +141 -84
- langflow/initial_setup/starter_projects/Custom Component Generator.json +133 -73
- langflow/initial_setup/starter_projects/Document Q&A.json +136 -81
- langflow/initial_setup/starter_projects/Financial Report Parser.json +12 -365
- langflow/initial_setup/starter_projects/Hybrid Search RAG.json +19 -729
- langflow/initial_setup/starter_projects/Image Sentiment Analysis.json +688 -733
- langflow/initial_setup/starter_projects/Instagram Copywriter.json +322 -203
- langflow/initial_setup/starter_projects/Invoice Summarizer.json +47 -21
- langflow/initial_setup/starter_projects/Market Research.json +63 -394
- langflow/initial_setup/starter_projects/Meeting Summary.json +266 -168
- langflow/initial_setup/starter_projects/Memory Chatbot.json +136 -81
- langflow/initial_setup/starter_projects/News Aggregator.json +49 -24
- langflow/initial_setup/starter_projects/Nvidia Remix.json +48 -23
- langflow/initial_setup/starter_projects/Pok/303/251dex Agent.json" +49 -23
- langflow/initial_setup/starter_projects/Portfolio Website Code Generator.json +113 -418
- langflow/initial_setup/starter_projects/Price Deal Finder.json +48 -22
- langflow/initial_setup/starter_projects/Research Agent.json +319 -181
- langflow/initial_setup/starter_projects/Research Translation Loop.json +636 -615
- langflow/initial_setup/starter_projects/SEO Keyword Generator.json +145 -89
- langflow/initial_setup/starter_projects/SaaS Pricing.json +48 -22
- langflow/initial_setup/starter_projects/Search agent.json +47 -21
- langflow/initial_setup/starter_projects/Sequential Tasks Agents.json +147 -54
- langflow/initial_setup/starter_projects/Simple Agent.json +47 -16
- langflow/initial_setup/starter_projects/Social Media Agent.json +47 -16
- langflow/initial_setup/starter_projects/Text Sentiment Analysis.json +398 -251
- langflow/initial_setup/starter_projects/Travel Planning Agents.json +146 -53
- langflow/initial_setup/starter_projects/Twitter Thread Generator.json +137 -81
- langflow/initial_setup/starter_projects/Vector Store RAG.json +133 -82
- langflow/initial_setup/starter_projects/Youtube Analysis.json +182 -106
- langflow/services/storage/local.py +13 -8
- langflow/services/storage/s3.py +0 -6
- {langflow_base_nightly-1.7.0.dev55.dist-info → langflow_base_nightly-1.7.0.dev57.dist-info}/METADATA +2 -2
- {langflow_base_nightly-1.7.0.dev55.dist-info → langflow_base_nightly-1.7.0.dev57.dist-info}/RECORD +38 -38
- {langflow_base_nightly-1.7.0.dev55.dist-info → langflow_base_nightly-1.7.0.dev57.dist-info}/WHEEL +0 -0
- {langflow_base_nightly-1.7.0.dev55.dist-info → langflow_base_nightly-1.7.0.dev57.dist-info}/entry_points.txt +0 -0
|
@@ -7,7 +7,7 @@
|
|
|
7
7
|
"data": {
|
|
8
8
|
"sourceHandle": {
|
|
9
9
|
"dataType": "parser",
|
|
10
|
-
"id": "parser-
|
|
10
|
+
"id": "parser-IFSS9",
|
|
11
11
|
"name": "parsed_text",
|
|
12
12
|
"output_types": [
|
|
13
13
|
"Message"
|
|
@@ -15,7 +15,7 @@
|
|
|
15
15
|
},
|
|
16
16
|
"targetHandle": {
|
|
17
17
|
"fieldName": "input_value",
|
|
18
|
-
"id": "ChatOutput-
|
|
18
|
+
"id": "ChatOutput-Ou5RJ",
|
|
19
19
|
"inputTypes": [
|
|
20
20
|
"Data",
|
|
21
21
|
"DataFrame",
|
|
@@ -24,40 +24,41 @@
|
|
|
24
24
|
"type": "str"
|
|
25
25
|
}
|
|
26
26
|
},
|
|
27
|
-
"id": "reactflow__edge-parser-
|
|
27
|
+
"id": "reactflow__edge-parser-IFSS9{œdataTypeœ:œparserœ,œidœ:œparser-IFSS9œ,œnameœ:œparsed_textœ,œoutput_typesœ:[œMessageœ]}-ChatOutput-Ou5RJ{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-Ou5RJœ,œinputTypesœ:[œDataœ,œDataFrameœ,œMessageœ],œtypeœ:œstrœ}",
|
|
28
28
|
"selected": false,
|
|
29
|
-
"source": "parser-
|
|
30
|
-
"sourceHandle": "{œdataTypeœ: œparserœ, œidœ: œparser-
|
|
31
|
-
"target": "ChatOutput-
|
|
32
|
-
"targetHandle": "{œfieldNameœ: œinput_valueœ, œidœ: œChatOutput-
|
|
29
|
+
"source": "parser-IFSS9",
|
|
30
|
+
"sourceHandle": "{œdataTypeœ: œparserœ, œidœ: œparser-IFSS9œ, œnameœ: œparsed_textœ, œoutput_typesœ: [œMessageœ]}",
|
|
31
|
+
"target": "ChatOutput-Ou5RJ",
|
|
32
|
+
"targetHandle": "{œfieldNameœ: œinput_valueœ, œidœ: œChatOutput-Ou5RJœ, œinputTypesœ: [œDataœ, œDataFrameœ, œMessageœ], œtypeœ: œstrœ}"
|
|
33
33
|
},
|
|
34
34
|
{
|
|
35
35
|
"animated": false,
|
|
36
36
|
"className": "",
|
|
37
37
|
"data": {
|
|
38
38
|
"sourceHandle": {
|
|
39
|
-
"dataType": "
|
|
40
|
-
"id": "
|
|
41
|
-
"name": "
|
|
39
|
+
"dataType": "StructuredOutput",
|
|
40
|
+
"id": "StructuredOutput-e4qlS",
|
|
41
|
+
"name": "structured_output",
|
|
42
42
|
"output_types": [
|
|
43
|
-
"
|
|
43
|
+
"Data"
|
|
44
44
|
]
|
|
45
45
|
},
|
|
46
46
|
"targetHandle": {
|
|
47
|
-
"fieldName": "
|
|
48
|
-
"id": "
|
|
47
|
+
"fieldName": "input_data",
|
|
48
|
+
"id": "parser-IFSS9",
|
|
49
49
|
"inputTypes": [
|
|
50
|
-
"
|
|
50
|
+
"DataFrame",
|
|
51
|
+
"Data"
|
|
51
52
|
],
|
|
52
|
-
"type": "
|
|
53
|
+
"type": "other"
|
|
53
54
|
}
|
|
54
55
|
},
|
|
55
|
-
"id": "reactflow__edge-
|
|
56
|
+
"id": "reactflow__edge-StructuredOutput-e4qlS{œdataTypeœ:œStructuredOutputœ,œidœ:œStructuredOutput-e4qlSœ,œnameœ:œstructured_outputœ,œoutput_typesœ:[œDataœ]}-parser-IFSS9{œfieldNameœ:œinput_dataœ,œidœ:œparser-IFSS9œ,œinputTypesœ:[œDataFrameœ,œDataœ],œtypeœ:œotherœ}",
|
|
56
57
|
"selected": false,
|
|
57
|
-
"source": "
|
|
58
|
-
"sourceHandle": "{œdataTypeœ:
|
|
59
|
-
"target": "
|
|
60
|
-
"targetHandle": "{œfieldNameœ: œ
|
|
58
|
+
"source": "StructuredOutput-e4qlS",
|
|
59
|
+
"sourceHandle": "{œdataTypeœ: œStructuredOutputœ, œidœ: œStructuredOutput-e4qlSœ, œnameœ: œstructured_outputœ, œoutput_typesœ: [œDataœ]}",
|
|
60
|
+
"target": "parser-IFSS9",
|
|
61
|
+
"targetHandle": "{œfieldNameœ: œinput_dataœ, œidœ: œparser-IFSS9œ, œinputTypesœ: [œDataFrameœ, œDataœ], œtypeœ: œotherœ}"
|
|
61
62
|
},
|
|
62
63
|
{
|
|
63
64
|
"animated": false,
|
|
@@ -65,27 +66,27 @@
|
|
|
65
66
|
"data": {
|
|
66
67
|
"sourceHandle": {
|
|
67
68
|
"dataType": "Prompt",
|
|
68
|
-
"id": "Prompt-
|
|
69
|
+
"id": "Prompt-fx7aI",
|
|
69
70
|
"name": "prompt",
|
|
70
71
|
"output_types": [
|
|
71
72
|
"Message"
|
|
72
73
|
]
|
|
73
74
|
},
|
|
74
75
|
"targetHandle": {
|
|
75
|
-
"fieldName": "
|
|
76
|
-
"id": "LanguageModelComponent-
|
|
76
|
+
"fieldName": "system_message",
|
|
77
|
+
"id": "LanguageModelComponent-KHx2J",
|
|
77
78
|
"inputTypes": [
|
|
78
79
|
"Message"
|
|
79
80
|
],
|
|
80
81
|
"type": "str"
|
|
81
82
|
}
|
|
82
83
|
},
|
|
83
|
-
"id": "
|
|
84
|
+
"id": "xy-edge__Prompt-fx7aI{œdataTypeœ:œPromptœ,œidœ:œPrompt-fx7aIœ,œnameœ:œpromptœ,œoutput_typesœ:[œMessageœ]}-LanguageModelComponent-KHx2J{œfieldNameœ:œsystem_messageœ,œidœ:œLanguageModelComponent-KHx2Jœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}",
|
|
84
85
|
"selected": false,
|
|
85
|
-
"source": "Prompt-
|
|
86
|
-
"sourceHandle": "{œdataTypeœ: œPromptœ, œidœ: œPrompt-
|
|
87
|
-
"target": "LanguageModelComponent-
|
|
88
|
-
"targetHandle": "{œfieldNameœ: œ
|
|
86
|
+
"source": "Prompt-fx7aI",
|
|
87
|
+
"sourceHandle": "{œdataTypeœ: œPromptœ, œidœ: œPrompt-fx7aIœ, œnameœ: œpromptœ, œoutput_typesœ: [œMessageœ]}",
|
|
88
|
+
"target": "LanguageModelComponent-KHx2J",
|
|
89
|
+
"targetHandle": "{œfieldNameœ: œsystem_messageœ, œidœ: œLanguageModelComponent-KHx2Jœ, œinputTypesœ: [œMessageœ], œtypeœ: œstrœ}"
|
|
89
90
|
},
|
|
90
91
|
{
|
|
91
92
|
"animated": false,
|
|
@@ -93,7 +94,7 @@
|
|
|
93
94
|
"data": {
|
|
94
95
|
"sourceHandle": {
|
|
95
96
|
"dataType": "ChatInput",
|
|
96
|
-
"id": "ChatInput-
|
|
97
|
+
"id": "ChatInput-7S2Wg",
|
|
97
98
|
"name": "message",
|
|
98
99
|
"output_types": [
|
|
99
100
|
"Message"
|
|
@@ -101,47 +102,19 @@
|
|
|
101
102
|
},
|
|
102
103
|
"targetHandle": {
|
|
103
104
|
"fieldName": "input_value",
|
|
104
|
-
"id": "LanguageModelComponent-
|
|
105
|
+
"id": "LanguageModelComponent-KHx2J",
|
|
105
106
|
"inputTypes": [
|
|
106
107
|
"Message"
|
|
107
108
|
],
|
|
108
109
|
"type": "str"
|
|
109
110
|
}
|
|
110
111
|
},
|
|
111
|
-
"id": "
|
|
112
|
-
"selected": false,
|
|
113
|
-
"source": "ChatInput-wjsFE",
|
|
114
|
-
"sourceHandle": "{œdataTypeœ: œChatInputœ, œidœ: œChatInput-wjsFEœ, œnameœ: œmessageœ, œoutput_typesœ: [œMessageœ]}",
|
|
115
|
-
"target": "LanguageModelComponent-TSuC2",
|
|
116
|
-
"targetHandle": "{œfieldNameœ: œinput_valueœ, œidœ: œLanguageModelComponent-TSuC2œ, œinputTypesœ: [œMessageœ], œtypeœ: œstrœ}"
|
|
117
|
-
},
|
|
118
|
-
{
|
|
119
|
-
"animated": false,
|
|
120
|
-
"className": "",
|
|
121
|
-
"data": {
|
|
122
|
-
"sourceHandle": {
|
|
123
|
-
"dataType": "LanguageModelComponent",
|
|
124
|
-
"id": "LanguageModelComponent-yEikN",
|
|
125
|
-
"name": "model_output",
|
|
126
|
-
"output_types": [
|
|
127
|
-
"LanguageModel"
|
|
128
|
-
]
|
|
129
|
-
},
|
|
130
|
-
"targetHandle": {
|
|
131
|
-
"fieldName": "llm",
|
|
132
|
-
"id": "StructuredOutput-bek9G",
|
|
133
|
-
"inputTypes": [
|
|
134
|
-
"LanguageModel"
|
|
135
|
-
],
|
|
136
|
-
"type": "other"
|
|
137
|
-
}
|
|
138
|
-
},
|
|
139
|
-
"id": "xy-edge__LanguageModelComponent-yEikN{œdataTypeœ:œLanguageModelComponentœ,œidœ:œLanguageModelComponent-yEikNœ,œnameœ:œmodel_outputœ,œoutput_typesœ:[œLanguageModelœ]}-StructuredOutput-bek9G{œfieldNameœ:œllmœ,œidœ:œStructuredOutput-bek9Gœ,œinputTypesœ:[œLanguageModelœ],œtypeœ:œotherœ}",
|
|
112
|
+
"id": "xy-edge__ChatInput-7S2Wg{œdataTypeœ:œChatInputœ,œidœ:œChatInput-7S2Wgœ,œnameœ:œmessageœ,œoutput_typesœ:[œMessageœ]}-LanguageModelComponent-KHx2J{œfieldNameœ:œinput_valueœ,œidœ:œLanguageModelComponent-KHx2Jœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}",
|
|
140
113
|
"selected": false,
|
|
141
|
-
"source": "
|
|
142
|
-
"sourceHandle": "{œdataTypeœ:
|
|
143
|
-
"target": "
|
|
144
|
-
"targetHandle": "{œfieldNameœ:
|
|
114
|
+
"source": "ChatInput-7S2Wg",
|
|
115
|
+
"sourceHandle": "{œdataTypeœ: œChatInputœ, œidœ: œChatInput-7S2Wgœ, œnameœ: œmessageœ, œoutput_typesœ: [œMessageœ]}",
|
|
116
|
+
"target": "LanguageModelComponent-KHx2J",
|
|
117
|
+
"targetHandle": "{œfieldNameœ: œinput_valueœ, œidœ: œLanguageModelComponent-KHx2Jœ, œinputTypesœ: [œMessageœ], œtypeœ: œstrœ}"
|
|
145
118
|
},
|
|
146
119
|
{
|
|
147
120
|
"animated": false,
|
|
@@ -149,7 +122,7 @@
|
|
|
149
122
|
"data": {
|
|
150
123
|
"sourceHandle": {
|
|
151
124
|
"dataType": "LanguageModelComponent",
|
|
152
|
-
"id": "LanguageModelComponent-
|
|
125
|
+
"id": "LanguageModelComponent-KHx2J",
|
|
153
126
|
"name": "text_output",
|
|
154
127
|
"output_types": [
|
|
155
128
|
"Message"
|
|
@@ -157,48 +130,19 @@
|
|
|
157
130
|
},
|
|
158
131
|
"targetHandle": {
|
|
159
132
|
"fieldName": "input_value",
|
|
160
|
-
"id": "StructuredOutput-
|
|
133
|
+
"id": "StructuredOutput-e4qlS",
|
|
161
134
|
"inputTypes": [
|
|
162
135
|
"Message"
|
|
163
136
|
],
|
|
164
137
|
"type": "str"
|
|
165
138
|
}
|
|
166
139
|
},
|
|
167
|
-
"id": "xy-edge__LanguageModelComponent-
|
|
140
|
+
"id": "xy-edge__LanguageModelComponent-KHx2J{œdataTypeœ:œLanguageModelComponentœ,œidœ:œLanguageModelComponent-KHx2Jœ,œnameœ:œtext_outputœ,œoutput_typesœ:[œMessageœ]}-StructuredOutput-e4qlS{œfieldNameœ:œinput_valueœ,œidœ:œStructuredOutput-e4qlSœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}",
|
|
168
141
|
"selected": false,
|
|
169
|
-
"source": "LanguageModelComponent-
|
|
170
|
-
"sourceHandle": "{œdataTypeœ: œLanguageModelComponentœ, œidœ: œLanguageModelComponent-
|
|
171
|
-
"target": "StructuredOutput-
|
|
172
|
-
"targetHandle": "{œfieldNameœ: œinput_valueœ, œidœ: œStructuredOutput-
|
|
173
|
-
},
|
|
174
|
-
{
|
|
175
|
-
"animated": false,
|
|
176
|
-
"className": "",
|
|
177
|
-
"data": {
|
|
178
|
-
"sourceHandle": {
|
|
179
|
-
"dataType": "StructuredOutput",
|
|
180
|
-
"id": "StructuredOutput-bek9G",
|
|
181
|
-
"name": "structured_output",
|
|
182
|
-
"output_types": [
|
|
183
|
-
"Data"
|
|
184
|
-
]
|
|
185
|
-
},
|
|
186
|
-
"targetHandle": {
|
|
187
|
-
"fieldName": "input_data",
|
|
188
|
-
"id": "parser-mPcuh",
|
|
189
|
-
"inputTypes": [
|
|
190
|
-
"DataFrame",
|
|
191
|
-
"Data"
|
|
192
|
-
],
|
|
193
|
-
"type": "other"
|
|
194
|
-
}
|
|
195
|
-
},
|
|
196
|
-
"id": "xy-edge__StructuredOutput-bek9G{œdataTypeœ:œStructuredOutputœ,œidœ:œStructuredOutput-bek9Gœ,œnameœ:œstructured_outputœ,œoutput_typesœ:[œDataœ]}-parser-mPcuh{œfieldNameœ:œinput_dataœ,œidœ:œparser-mPcuhœ,œinputTypesœ:[œDataFrameœ,œDataœ],œtypeœ:œotherœ}",
|
|
197
|
-
"selected": false,
|
|
198
|
-
"source": "StructuredOutput-bek9G",
|
|
199
|
-
"sourceHandle": "{œdataTypeœ: œStructuredOutputœ, œidœ: œStructuredOutput-bek9Gœ, œnameœ: œstructured_outputœ, œoutput_typesœ: [œDataœ]}",
|
|
200
|
-
"target": "parser-mPcuh",
|
|
201
|
-
"targetHandle": "{œfieldNameœ: œinput_dataœ, œidœ: œparser-mPcuhœ, œinputTypesœ: [œDataFrameœ, œDataœ], œtypeœ: œotherœ}"
|
|
142
|
+
"source": "LanguageModelComponent-KHx2J",
|
|
143
|
+
"sourceHandle": "{œdataTypeœ: œLanguageModelComponentœ, œidœ: œLanguageModelComponent-KHx2Jœ, œnameœ: œtext_outputœ, œoutput_typesœ: [œMessageœ]}",
|
|
144
|
+
"target": "StructuredOutput-e4qlS",
|
|
145
|
+
"targetHandle": "{œfieldNameœ: œinput_valueœ, œidœ: œStructuredOutput-e4qlSœ, œinputTypesœ: [œMessageœ], œtypeœ: œstrœ}"
|
|
202
146
|
}
|
|
203
147
|
],
|
|
204
148
|
"nodes": [
|
|
@@ -206,7 +150,7 @@
|
|
|
206
150
|
"data": {
|
|
207
151
|
"description": "Get chat inputs from the Playground.",
|
|
208
152
|
"display_name": "Chat Input",
|
|
209
|
-
"id": "ChatInput-
|
|
153
|
+
"id": "ChatInput-7S2Wg",
|
|
210
154
|
"node": {
|
|
211
155
|
"base_classes": [
|
|
212
156
|
"Message"
|
|
@@ -232,7 +176,7 @@
|
|
|
232
176
|
"frozen": false,
|
|
233
177
|
"icon": "MessagesSquare",
|
|
234
178
|
"legacy": false,
|
|
235
|
-
"lf_version": "1.
|
|
179
|
+
"lf_version": "1.7.0",
|
|
236
180
|
"metadata": {
|
|
237
181
|
"code_hash": "7a26c54d89ed",
|
|
238
182
|
"dependencies": {
|
|
@@ -458,9 +402,9 @@
|
|
|
458
402
|
"type": "ChatInput"
|
|
459
403
|
},
|
|
460
404
|
"dragging": false,
|
|
461
|
-
"id": "ChatInput-
|
|
405
|
+
"id": "ChatInput-7S2Wg",
|
|
462
406
|
"measured": {
|
|
463
|
-
"height":
|
|
407
|
+
"height": 204,
|
|
464
408
|
"width": 320
|
|
465
409
|
},
|
|
466
410
|
"position": {
|
|
@@ -478,7 +422,7 @@
|
|
|
478
422
|
"data": {
|
|
479
423
|
"description": "Display a chat message in the Playground.",
|
|
480
424
|
"display_name": "Chat Output",
|
|
481
|
-
"id": "ChatOutput-
|
|
425
|
+
"id": "ChatOutput-Ou5RJ",
|
|
482
426
|
"node": {
|
|
483
427
|
"base_classes": [
|
|
484
428
|
"Message"
|
|
@@ -504,7 +448,7 @@
|
|
|
504
448
|
"frozen": false,
|
|
505
449
|
"icon": "MessagesSquare",
|
|
506
450
|
"legacy": false,
|
|
507
|
-
"lf_version": "1.
|
|
451
|
+
"lf_version": "1.7.0",
|
|
508
452
|
"metadata": {
|
|
509
453
|
"code_hash": "8c87e536cca4",
|
|
510
454
|
"dependencies": {
|
|
@@ -737,9 +681,9 @@
|
|
|
737
681
|
"type": "ChatOutput"
|
|
738
682
|
},
|
|
739
683
|
"dragging": false,
|
|
740
|
-
"id": "ChatOutput-
|
|
684
|
+
"id": "ChatOutput-Ou5RJ",
|
|
741
685
|
"measured": {
|
|
742
|
-
"height":
|
|
686
|
+
"height": 204,
|
|
743
687
|
"width": 320
|
|
744
688
|
},
|
|
745
689
|
"position": {
|
|
@@ -755,7 +699,7 @@
|
|
|
755
699
|
},
|
|
756
700
|
{
|
|
757
701
|
"data": {
|
|
758
|
-
"id": "note-
|
|
702
|
+
"id": "note-30lqA",
|
|
759
703
|
"node": {
|
|
760
704
|
"description": "# Image Sentiment Analysis\nClassify images uploaded to the Playground by sentiment.\n\n## Prerequisites\n\n* [OpenAI API Key](https://platform.openai.com/)\n\n## Quickstart\n\n1. In the **Language Model** component, add your OpenAI API key.\n\n2. Open the **Playground**, and then submit an image to the chat. \n\nThe LLM analyzes the image. The sentiment is output into a structured table according to the **Structured Output** component's Output Schema, and then parsed into a message for the Playground to display.",
|
|
761
705
|
"display_name": "",
|
|
@@ -766,7 +710,7 @@
|
|
|
766
710
|
},
|
|
767
711
|
"dragging": false,
|
|
768
712
|
"height": 583,
|
|
769
|
-
"id": "note-
|
|
713
|
+
"id": "note-30lqA",
|
|
770
714
|
"measured": {
|
|
771
715
|
"height": 583,
|
|
772
716
|
"width": 391
|
|
@@ -792,7 +736,7 @@
|
|
|
792
736
|
"data": {
|
|
793
737
|
"description": "Create a prompt template with dynamic variables.",
|
|
794
738
|
"display_name": "Prompt",
|
|
795
|
-
"id": "Prompt-
|
|
739
|
+
"id": "Prompt-fx7aI",
|
|
796
740
|
"node": {
|
|
797
741
|
"base_classes": [
|
|
798
742
|
"Message"
|
|
@@ -812,7 +756,7 @@
|
|
|
812
756
|
"frozen": false,
|
|
813
757
|
"icon": "braces",
|
|
814
758
|
"legacy": false,
|
|
815
|
-
"lf_version": "1.
|
|
759
|
+
"lf_version": "1.7.0",
|
|
816
760
|
"metadata": {
|
|
817
761
|
"code_hash": "3bf0b511e227",
|
|
818
762
|
"module": "langflow.components.prompts.prompt.PromptComponent"
|
|
@@ -901,9 +845,9 @@
|
|
|
901
845
|
"type": "Prompt"
|
|
902
846
|
},
|
|
903
847
|
"dragging": false,
|
|
904
|
-
"id": "Prompt-
|
|
848
|
+
"id": "Prompt-fx7aI",
|
|
905
849
|
"measured": {
|
|
906
|
-
"height":
|
|
850
|
+
"height": 222,
|
|
907
851
|
"width": 320
|
|
908
852
|
},
|
|
909
853
|
"position": {
|
|
@@ -919,7 +863,7 @@
|
|
|
919
863
|
},
|
|
920
864
|
{
|
|
921
865
|
"data": {
|
|
922
|
-
"id": "parser-
|
|
866
|
+
"id": "parser-IFSS9",
|
|
923
867
|
"node": {
|
|
924
868
|
"base_classes": [
|
|
925
869
|
"Message"
|
|
@@ -942,7 +886,7 @@
|
|
|
942
886
|
"icon": "braces",
|
|
943
887
|
"key": "parser",
|
|
944
888
|
"legacy": false,
|
|
945
|
-
"lf_version": "1.
|
|
889
|
+
"lf_version": "1.7.0",
|
|
946
890
|
"metadata": {},
|
|
947
891
|
"minimized": false,
|
|
948
892
|
"output_types": [],
|
|
@@ -1080,7 +1024,7 @@
|
|
|
1080
1024
|
"type": "parser"
|
|
1081
1025
|
},
|
|
1082
1026
|
"dragging": false,
|
|
1083
|
-
"id": "parser-
|
|
1027
|
+
"id": "parser-IFSS9",
|
|
1084
1028
|
"measured": {
|
|
1085
1029
|
"height": 361,
|
|
1086
1030
|
"width": 320
|
|
@@ -1094,63 +1038,51 @@
|
|
|
1094
1038
|
},
|
|
1095
1039
|
{
|
|
1096
1040
|
"data": {
|
|
1097
|
-
"id": "
|
|
1041
|
+
"id": "StructuredOutput-e4qlS",
|
|
1098
1042
|
"node": {
|
|
1099
1043
|
"base_classes": [
|
|
1100
|
-
"
|
|
1101
|
-
"
|
|
1044
|
+
"Data",
|
|
1045
|
+
"DataFrame"
|
|
1102
1046
|
],
|
|
1103
1047
|
"beta": false,
|
|
1104
1048
|
"conditional_paths": [],
|
|
1105
1049
|
"custom_fields": {},
|
|
1106
|
-
"description": "
|
|
1107
|
-
"display_name": "
|
|
1108
|
-
"documentation": "",
|
|
1050
|
+
"description": "Uses an LLM to generate structured data. Ideal for extraction and consistency.",
|
|
1051
|
+
"display_name": "Structured Output",
|
|
1052
|
+
"documentation": "https://docs.langflow.org/components-processing#structured-output",
|
|
1109
1053
|
"edited": false,
|
|
1110
1054
|
"field_order": [
|
|
1111
|
-
"
|
|
1112
|
-
"model_name",
|
|
1113
|
-
"api_key",
|
|
1055
|
+
"llm",
|
|
1114
1056
|
"input_value",
|
|
1115
|
-
"
|
|
1116
|
-
"
|
|
1117
|
-
"
|
|
1057
|
+
"system_prompt",
|
|
1058
|
+
"schema_name",
|
|
1059
|
+
"output_schema"
|
|
1118
1060
|
],
|
|
1119
1061
|
"frozen": false,
|
|
1120
|
-
"icon": "
|
|
1121
|
-
"last_updated": "2025-
|
|
1062
|
+
"icon": "braces",
|
|
1063
|
+
"last_updated": "2025-12-18T20:09:36.543Z",
|
|
1122
1064
|
"legacy": false,
|
|
1123
|
-
"lf_version": "1.
|
|
1065
|
+
"lf_version": "1.7.0",
|
|
1124
1066
|
"metadata": {
|
|
1125
|
-
"code_hash": "
|
|
1067
|
+
"code_hash": "058ca1f51e9f",
|
|
1126
1068
|
"dependencies": {
|
|
1127
1069
|
"dependencies": [
|
|
1128
1070
|
{
|
|
1129
|
-
"name": "
|
|
1130
|
-
"version": "
|
|
1131
|
-
},
|
|
1132
|
-
{
|
|
1133
|
-
"name": "langchain_google_genai",
|
|
1134
|
-
"version": "2.0.6"
|
|
1071
|
+
"name": "pydantic",
|
|
1072
|
+
"version": "2.11.10"
|
|
1135
1073
|
},
|
|
1136
1074
|
{
|
|
1137
|
-
"name": "
|
|
1138
|
-
"version": "0.
|
|
1075
|
+
"name": "trustcall",
|
|
1076
|
+
"version": "0.0.39"
|
|
1139
1077
|
},
|
|
1140
1078
|
{
|
|
1141
1079
|
"name": "lfx",
|
|
1142
1080
|
"version": null
|
|
1143
1081
|
}
|
|
1144
1082
|
],
|
|
1145
|
-
"total_dependencies":
|
|
1083
|
+
"total_dependencies": 3
|
|
1146
1084
|
},
|
|
1147
|
-
"
|
|
1148
|
-
"model",
|
|
1149
|
-
"llm",
|
|
1150
|
-
"language model",
|
|
1151
|
-
"large language model"
|
|
1152
|
-
],
|
|
1153
|
-
"module": "lfx.components.models.language_model.LanguageModelComponent"
|
|
1085
|
+
"module": "lfx.components.llm_operations.structured_output.StructuredOutputComponent"
|
|
1154
1086
|
},
|
|
1155
1087
|
"minimized": false,
|
|
1156
1088
|
"output_types": [],
|
|
@@ -1158,57 +1090,54 @@
|
|
|
1158
1090
|
{
|
|
1159
1091
|
"allows_loop": false,
|
|
1160
1092
|
"cache": true,
|
|
1161
|
-
"display_name": "
|
|
1093
|
+
"display_name": "Structured Output",
|
|
1162
1094
|
"group_outputs": false,
|
|
1163
|
-
"method": "
|
|
1164
|
-
"name": "
|
|
1165
|
-
"
|
|
1166
|
-
"required_inputs": null,
|
|
1167
|
-
"selected": "Message",
|
|
1095
|
+
"method": "build_structured_output",
|
|
1096
|
+
"name": "structured_output",
|
|
1097
|
+
"selected": "Data",
|
|
1168
1098
|
"tool_mode": true,
|
|
1169
1099
|
"types": [
|
|
1170
|
-
"
|
|
1100
|
+
"Data"
|
|
1171
1101
|
],
|
|
1172
1102
|
"value": "__UNDEFINED__"
|
|
1173
1103
|
},
|
|
1174
1104
|
{
|
|
1175
1105
|
"allows_loop": false,
|
|
1176
1106
|
"cache": true,
|
|
1177
|
-
"display_name": "
|
|
1107
|
+
"display_name": "Structured Output",
|
|
1178
1108
|
"group_outputs": false,
|
|
1179
|
-
"method": "
|
|
1180
|
-
"name": "
|
|
1181
|
-
"
|
|
1182
|
-
"required_inputs": null,
|
|
1183
|
-
"selected": "LanguageModel",
|
|
1109
|
+
"method": "build_structured_dataframe",
|
|
1110
|
+
"name": "dataframe_output",
|
|
1111
|
+
"selected": "DataFrame",
|
|
1184
1112
|
"tool_mode": true,
|
|
1185
1113
|
"types": [
|
|
1186
|
-
"
|
|
1114
|
+
"DataFrame"
|
|
1187
1115
|
],
|
|
1188
1116
|
"value": "__UNDEFINED__"
|
|
1189
1117
|
}
|
|
1190
1118
|
],
|
|
1191
1119
|
"pinned": false,
|
|
1192
|
-
"priority": 0,
|
|
1193
1120
|
"template": {
|
|
1194
1121
|
"_type": "Component",
|
|
1195
1122
|
"api_key": {
|
|
1196
1123
|
"_input_type": "SecretStrInput",
|
|
1197
|
-
"advanced":
|
|
1198
|
-
"display_name": "
|
|
1124
|
+
"advanced": true,
|
|
1125
|
+
"display_name": "API Key",
|
|
1199
1126
|
"dynamic": false,
|
|
1200
1127
|
"info": "Model Provider API key",
|
|
1201
1128
|
"input_types": [],
|
|
1202
|
-
"load_from_db":
|
|
1129
|
+
"load_from_db": false,
|
|
1203
1130
|
"name": "api_key",
|
|
1131
|
+
"override_skip": false,
|
|
1204
1132
|
"password": true,
|
|
1205
1133
|
"placeholder": "",
|
|
1206
1134
|
"real_time_refresh": true,
|
|
1207
1135
|
"required": false,
|
|
1208
1136
|
"show": true,
|
|
1209
1137
|
"title_case": false,
|
|
1138
|
+
"track_in_telemetry": false,
|
|
1210
1139
|
"type": "str",
|
|
1211
|
-
"value": "
|
|
1140
|
+
"value": ""
|
|
1212
1141
|
},
|
|
1213
1142
|
"code": {
|
|
1214
1143
|
"advanced": true,
|
|
@@ -1226,200 +1155,362 @@
|
|
|
1226
1155
|
"show": true,
|
|
1227
1156
|
"title_case": false,
|
|
1228
1157
|
"type": "code",
|
|
1229
|
-
"value": "from lfx.base.models.model import LCModelComponent\nfrom lfx.base.models.unified_models import (\n get_language_model_options,\n get_llm,\n update_model_options_in_build_config,\n)\nfrom lfx.base.models.watsonx_constants import IBM_WATSONX_URLS\nfrom lfx.field_typing import LanguageModel\nfrom lfx.field_typing.range_spec import RangeSpec\nfrom lfx.inputs.inputs import BoolInput, DropdownInput, StrInput\nfrom lfx.io import MessageInput, ModelInput, MultilineInput, SecretStrInput, SliderInput\n\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass LanguageModelComponent(LCModelComponent):\n display_name = \"Language Model\"\n description = \"Runs a language model given a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-models\"\n icon = \"brain-circuit\"\n category = \"models\"\n priority = 0 # Set priority to 0 to make it appear first\n\n inputs = [\n ModelInput(\n name=\"model\",\n display_name=\"Language Model\",\n info=\"Select your model provider\",\n real_time_refresh=True,\n required=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"API Key\",\n info=\"Model Provider API key\",\n required=False,\n show=True,\n real_time_refresh=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n StrInput(\n name=\"project_id\",\n display_name=\"watsonx Project ID\",\n info=\"The project ID associated with the foundation model (IBM watsonx.ai only)\",\n show=False,\n required=False,\n ),\n MessageInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n MessageInput(\n name=\"input_value\",\n display_name=\"Input\",\n info=\"The input text to send to the model\",\n ),\n MultilineInput(\n name=\"system_message\",\n display_name=\"System Message\",\n info=\"A system message that helps set the behavior of the assistant\",\n advanced=False,\n ),\n BoolInput(\n name=\"stream\",\n display_name=\"Stream\",\n info=\"Whether to stream the response\",\n value=False,\n advanced=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n info=\"Controls randomness in responses\",\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n advanced=True,\n ),\n ]\n\n def build_model(self) -> LanguageModel:\n return get_llm(\n model=self.model,\n user_id=self.user_id,\n api_key=self.api_key,\n temperature=self.temperature,\n stream=self.stream,\n )\n\n def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None):\n \"\"\"Dynamically update build config with user-filtered model options.\"\"\"\n return update_model_options_in_build_config(\n component=self,\n build_config=build_config,\n cache_key_prefix=\"language_model_options\",\n get_options_func=get_language_model_options,\n field_name=field_name,\n field_value=field_value,\n )\n"
|
|
1158
|
+
"value": "from pydantic import BaseModel, Field, create_model\nfrom trustcall import create_extractor\n\nfrom lfx.base.models.chat_result import get_chat_result\nfrom lfx.base.models.unified_models import (\n get_language_model_options,\n get_llm,\n update_model_options_in_build_config,\n)\nfrom lfx.custom.custom_component.component import Component\nfrom lfx.helpers.base_model import build_model_from_schema\nfrom lfx.io import (\n MessageTextInput,\n ModelInput,\n MultilineInput,\n Output,\n SecretStrInput,\n TableInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.data import Data\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.table import EditMode\n\n\nclass StructuredOutputComponent(Component):\n display_name = \"Structured Output\"\n description = \"Uses an LLM to generate structured data. Ideal for extraction and consistency.\"\n documentation: str = \"https://docs.langflow.org/structured-output\"\n name = \"StructuredOutput\"\n icon = \"braces\"\n\n inputs = [\n ModelInput(\n name=\"model\",\n display_name=\"Language Model\",\n info=\"Select your model provider\",\n real_time_refresh=True,\n required=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"API Key\",\n info=\"Model Provider API key\",\n real_time_refresh=True,\n advanced=True,\n ),\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Message\",\n info=\"The input message to the language model.\",\n tool_mode=True,\n required=True,\n ),\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Format Instructions\",\n info=\"The instructions to the language model for formatting the output.\",\n value=(\n \"You are an AI that extracts structured JSON objects from unstructured text. \"\n \"Use a predefined schema with expected types (str, int, float, bool, dict). \"\n \"Extract ALL relevant instances that match the schema - if multiple patterns exist, capture them all. \"\n \"Fill missing or ambiguous values with defaults: null for missing values. \"\n \"Remove exact duplicates but keep variations that have different field values. \"\n \"Always return valid JSON in the expected format, never throw errors. \"\n \"If multiple objects can be extracted, return them all in the structured format.\"\n ),\n required=True,\n advanced=True,\n ),\n MessageTextInput(\n name=\"schema_name\",\n display_name=\"Schema Name\",\n info=\"Provide a name for the output data schema.\",\n advanced=True,\n ),\n TableInput(\n name=\"output_schema\",\n display_name=\"Output Schema\",\n info=\"Define the structure and data types for the model's output.\",\n required=True,\n # TODO: remove deault value\n table_schema=[\n {\n \"name\": \"name\",\n \"display_name\": \"Name\",\n \"type\": \"str\",\n \"description\": \"Specify the name of the output field.\",\n \"default\": \"field\",\n \"edit_mode\": EditMode.INLINE,\n },\n {\n \"name\": \"description\",\n \"display_name\": \"Description\",\n \"type\": \"str\",\n \"description\": \"Describe the purpose of the output field.\",\n \"default\": \"description of field\",\n \"edit_mode\": EditMode.POPOVER,\n },\n {\n \"name\": \"type\",\n \"display_name\": \"Type\",\n \"type\": \"str\",\n \"edit_mode\": EditMode.INLINE,\n \"description\": (\"Indicate the data type of the output field (e.g., str, int, float, bool, dict).\"),\n \"options\": [\"str\", \"int\", \"float\", \"bool\", \"dict\"],\n \"default\": \"str\",\n },\n {\n \"name\": \"multiple\",\n \"display_name\": \"As List\",\n \"type\": \"boolean\",\n \"description\": \"Set to True if this output field should be a list of the specified type.\",\n \"default\": \"False\",\n \"edit_mode\": EditMode.INLINE,\n },\n ],\n value=[\n {\n \"name\": \"field\",\n \"description\": \"description of field\",\n \"type\": \"str\",\n \"multiple\": \"False\",\n }\n ],\n ),\n ]\n\n outputs = [\n Output(\n name=\"structured_output\",\n display_name=\"Structured Output\",\n method=\"build_structured_output\",\n ),\n Output(\n name=\"dataframe_output\",\n display_name=\"Structured Output\",\n method=\"build_structured_dataframe\",\n ),\n ]\n\n def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None):\n \"\"\"Dynamically update build config with user-filtered model options.\"\"\"\n return update_model_options_in_build_config(\n component=self,\n build_config=build_config,\n cache_key_prefix=\"language_model_options\",\n get_options_func=get_language_model_options,\n field_name=field_name,\n field_value=field_value,\n )\n\n def build_structured_output_base(self):\n schema_name = self.schema_name or \"OutputModel\"\n\n llm = get_llm(model=self.model, user_id=self.user_id, api_key=self.api_key)\n\n if not hasattr(llm, \"with_structured_output\"):\n msg = \"Language model does not support structured output.\"\n raise TypeError(msg)\n if not self.output_schema:\n msg = \"Output schema cannot be empty\"\n raise ValueError(msg)\n\n output_model_ = build_model_from_schema(self.output_schema)\n output_model = create_model(\n schema_name,\n __doc__=f\"A list of {schema_name}.\",\n objects=(\n list[output_model_],\n Field(\n description=f\"A list of {schema_name}.\", # type: ignore[valid-type]\n min_length=1, # help ensure non-empty output\n ),\n ),\n )\n # Tracing config\n config_dict = {\n \"run_name\": self.display_name,\n \"project_name\": self.get_project_name(),\n \"callbacks\": self.get_langchain_callbacks(),\n }\n # Generate structured output using Trustcall first, then fallback to Langchain if it fails\n result = self._extract_output_with_trustcall(llm, output_model, config_dict)\n if result is None:\n result = self._extract_output_with_langchain(llm, output_model, config_dict)\n\n # OPTIMIZATION NOTE: Simplified processing based on trustcall response structure\n # Handle non-dict responses (shouldn't happen with trustcall, but defensive)\n if not isinstance(result, dict):\n return result\n\n # Extract first response and convert BaseModel to dict\n responses = result.get(\"responses\", [])\n if not responses:\n return result\n\n # Convert BaseModel to dict (creates the \"objects\" key)\n first_response = responses[0]\n structured_data = first_response\n if isinstance(first_response, BaseModel):\n structured_data = first_response.model_dump()\n # Extract the objects array (guaranteed to exist due to our Pydantic model structure)\n return structured_data.get(\"objects\", structured_data)\n\n def build_structured_output(self) -> Data:\n output = self.build_structured_output_base()\n if not isinstance(output, list) or not output:\n # handle empty or unexpected type case\n msg = \"No structured output returned\"\n raise ValueError(msg)\n if len(output) == 1:\n return Data(data=output[0])\n if len(output) > 1:\n # Multiple outputs - wrap them in a results container\n return Data(data={\"results\": output})\n return Data()\n\n def build_structured_dataframe(self) -> DataFrame:\n output = self.build_structured_output_base()\n if not isinstance(output, list) or not output:\n # handle empty or unexpected type case\n msg = \"No structured output returned\"\n raise ValueError(msg)\n if len(output) == 1:\n # For single dictionary, wrap in a list to create DataFrame with one row\n return DataFrame([output[0]])\n if len(output) > 1:\n # Multiple outputs - convert to DataFrame directly\n return DataFrame(output)\n return DataFrame()\n\n def _extract_output_with_trustcall(self, llm, schema: BaseModel, config_dict: dict) -> list[BaseModel] | None:\n try:\n llm_with_structured_output = create_extractor(llm, tools=[schema], tool_choice=schema.__name__)\n result = get_chat_result(\n runnable=llm_with_structured_output,\n system_message=self.system_prompt,\n input_value=self.input_value,\n config=config_dict,\n )\n except Exception as e: # noqa: BLE001\n logger.warning(\n f\"Trustcall extraction failed, falling back to Langchain: {e} \"\n \"(Note: This may not be an error—some models or configurations do not support tool calling. \"\n \"Falling back is normal in such cases.)\"\n )\n return None\n return result or None # langchain fallback is used if error occurs or the result is empty\n\n def _extract_output_with_langchain(self, llm, schema: BaseModel, config_dict: dict) -> list[BaseModel] | None:\n try:\n llm_with_structured_output = llm.with_structured_output(schema)\n result = get_chat_result(\n runnable=llm_with_structured_output,\n system_message=self.system_prompt,\n input_value=self.input_value,\n config=config_dict,\n )\n if isinstance(result, BaseModel):\n result = result.model_dump()\n result = result.get(\"objects\", result)\n except Exception as fallback_error:\n msg = (\n f\"Model does not support tool calling (trustcall failed) \"\n f\"and fallback with_structured_output also failed: {fallback_error}\"\n )\n raise ValueError(msg) from fallback_error\n\n return result or None\n"
|
|
1230
1159
|
},
|
|
1231
1160
|
"input_value": {
|
|
1232
|
-
"_input_type": "
|
|
1161
|
+
"_input_type": "MultilineInput",
|
|
1233
1162
|
"advanced": false,
|
|
1234
|
-
"
|
|
1163
|
+
"copy_field": false,
|
|
1164
|
+
"display_name": "Input Message",
|
|
1235
1165
|
"dynamic": false,
|
|
1236
|
-
"info": "The input
|
|
1166
|
+
"info": "The input message to the language model.",
|
|
1237
1167
|
"input_types": [
|
|
1238
1168
|
"Message"
|
|
1239
1169
|
],
|
|
1240
1170
|
"list": false,
|
|
1241
1171
|
"list_add_label": "Add More",
|
|
1242
1172
|
"load_from_db": false,
|
|
1173
|
+
"multiline": true,
|
|
1243
1174
|
"name": "input_value",
|
|
1244
1175
|
"placeholder": "",
|
|
1245
|
-
"required":
|
|
1176
|
+
"required": true,
|
|
1246
1177
|
"show": true,
|
|
1247
1178
|
"title_case": false,
|
|
1248
|
-
"tool_mode":
|
|
1179
|
+
"tool_mode": true,
|
|
1249
1180
|
"trace_as_input": true,
|
|
1250
1181
|
"trace_as_metadata": true,
|
|
1251
1182
|
"type": "str",
|
|
1252
1183
|
"value": ""
|
|
1253
1184
|
},
|
|
1254
|
-
"
|
|
1255
|
-
"_input_type": "
|
|
1185
|
+
"model": {
|
|
1186
|
+
"_input_type": "ModelInput",
|
|
1256
1187
|
"advanced": false,
|
|
1257
|
-
"
|
|
1258
|
-
"dialog_inputs": {},
|
|
1259
|
-
"display_name": "Model Name",
|
|
1188
|
+
"display_name": "Language Model",
|
|
1260
1189
|
"dynamic": false,
|
|
1261
|
-
"
|
|
1262
|
-
|
|
1263
|
-
|
|
1264
|
-
|
|
1265
|
-
|
|
1266
|
-
|
|
1267
|
-
|
|
1268
|
-
|
|
1269
|
-
|
|
1270
|
-
|
|
1271
|
-
|
|
1272
|
-
|
|
1273
|
-
|
|
1274
|
-
"
|
|
1275
|
-
"gpt-5-nano",
|
|
1276
|
-
"gpt-5-chat-latest",
|
|
1277
|
-
"o1",
|
|
1278
|
-
"o3-mini",
|
|
1279
|
-
"o3",
|
|
1280
|
-
"o3-pro",
|
|
1281
|
-
"o4-mini",
|
|
1282
|
-
"o4-mini-high"
|
|
1190
|
+
"external_options": {
|
|
1191
|
+
"fields": {
|
|
1192
|
+
"data": {
|
|
1193
|
+
"node": {
|
|
1194
|
+
"display_name": "Connect other models",
|
|
1195
|
+
"icon": "CornerDownLeft",
|
|
1196
|
+
"name": "connect_other_models"
|
|
1197
|
+
}
|
|
1198
|
+
}
|
|
1199
|
+
}
|
|
1200
|
+
},
|
|
1201
|
+
"info": "Select your model provider",
|
|
1202
|
+
"input_types": [
|
|
1203
|
+
"LanguageModel"
|
|
1283
1204
|
],
|
|
1284
|
-
"
|
|
1285
|
-
"
|
|
1286
|
-
"
|
|
1287
|
-
"
|
|
1288
|
-
"title_case": false,
|
|
1289
|
-
"toggle": false,
|
|
1290
|
-
"tool_mode": false,
|
|
1291
|
-
"trace_as_metadata": true,
|
|
1292
|
-
"type": "str",
|
|
1293
|
-
"value": "gpt-4o-mini"
|
|
1294
|
-
},
|
|
1295
|
-
"provider": {
|
|
1296
|
-
"_input_type": "DropdownInput",
|
|
1297
|
-
"advanced": false,
|
|
1298
|
-
"combobox": false,
|
|
1299
|
-
"dialog_inputs": {},
|
|
1300
|
-
"display_name": "Model Provider",
|
|
1301
|
-
"dynamic": false,
|
|
1302
|
-
"info": "Select the model provider",
|
|
1303
|
-
"name": "provider",
|
|
1205
|
+
"list": false,
|
|
1206
|
+
"list_add_label": "Add More",
|
|
1207
|
+
"model_type": "language",
|
|
1208
|
+
"name": "model",
|
|
1304
1209
|
"options": [
|
|
1305
|
-
"OpenAI",
|
|
1306
|
-
"Anthropic",
|
|
1307
|
-
"Google"
|
|
1308
|
-
],
|
|
1309
|
-
"options_metadata": [
|
|
1310
1210
|
{
|
|
1311
|
-
"
|
|
1211
|
+
"category": "Anthropic",
|
|
1212
|
+
"icon": "Anthropic",
|
|
1213
|
+
"metadata": {
|
|
1214
|
+
"api_key_param": "api_key",
|
|
1215
|
+
"context_length": 128000,
|
|
1216
|
+
"model_class": "ChatAnthropic",
|
|
1217
|
+
"model_name_param": "model"
|
|
1218
|
+
},
|
|
1219
|
+
"name": "claude-opus-4-5-20251101",
|
|
1220
|
+
"provider": "Anthropic"
|
|
1221
|
+
},
|
|
1222
|
+
{
|
|
1223
|
+
"category": "Anthropic",
|
|
1224
|
+
"icon": "Anthropic",
|
|
1225
|
+
"metadata": {
|
|
1226
|
+
"api_key_param": "api_key",
|
|
1227
|
+
"context_length": 128000,
|
|
1228
|
+
"model_class": "ChatAnthropic",
|
|
1229
|
+
"model_name_param": "model"
|
|
1230
|
+
},
|
|
1231
|
+
"name": "claude-haiku-4-5-20251001",
|
|
1232
|
+
"provider": "Anthropic"
|
|
1233
|
+
},
|
|
1234
|
+
{
|
|
1235
|
+
"category": "Anthropic",
|
|
1236
|
+
"icon": "Anthropic",
|
|
1237
|
+
"metadata": {
|
|
1238
|
+
"api_key_param": "api_key",
|
|
1239
|
+
"context_length": 128000,
|
|
1240
|
+
"model_class": "ChatAnthropic",
|
|
1241
|
+
"model_name_param": "model"
|
|
1242
|
+
},
|
|
1243
|
+
"name": "claude-sonnet-4-5-20250929",
|
|
1244
|
+
"provider": "Anthropic"
|
|
1245
|
+
},
|
|
1246
|
+
{
|
|
1247
|
+
"category": "OpenAI",
|
|
1248
|
+
"icon": "OpenAI",
|
|
1249
|
+
"metadata": {
|
|
1250
|
+
"api_key_param": "api_key",
|
|
1251
|
+
"context_length": 128000,
|
|
1252
|
+
"model_class": "ChatOpenAI",
|
|
1253
|
+
"model_name_param": "model",
|
|
1254
|
+
"reasoning_models": [
|
|
1255
|
+
"gpt-5.1"
|
|
1256
|
+
]
|
|
1257
|
+
},
|
|
1258
|
+
"name": "gpt-5.1",
|
|
1259
|
+
"provider": "OpenAI"
|
|
1260
|
+
},
|
|
1261
|
+
{
|
|
1262
|
+
"category": "OpenAI",
|
|
1263
|
+
"icon": "OpenAI",
|
|
1264
|
+
"metadata": {
|
|
1265
|
+
"api_key_param": "api_key",
|
|
1266
|
+
"context_length": 128000,
|
|
1267
|
+
"model_class": "ChatOpenAI",
|
|
1268
|
+
"model_name_param": "model"
|
|
1269
|
+
},
|
|
1270
|
+
"name": "gpt-4o-mini",
|
|
1271
|
+
"provider": "OpenAI"
|
|
1272
|
+
},
|
|
1273
|
+
{
|
|
1274
|
+
"category": "OpenAI",
|
|
1275
|
+
"icon": "OpenAI",
|
|
1276
|
+
"metadata": {
|
|
1277
|
+
"api_key_param": "api_key",
|
|
1278
|
+
"context_length": 128000,
|
|
1279
|
+
"model_class": "ChatOpenAI",
|
|
1280
|
+
"model_name_param": "model",
|
|
1281
|
+
"reasoning_models": [
|
|
1282
|
+
"o1"
|
|
1283
|
+
]
|
|
1284
|
+
},
|
|
1285
|
+
"name": "o1",
|
|
1286
|
+
"provider": "OpenAI"
|
|
1287
|
+
},
|
|
1288
|
+
{
|
|
1289
|
+
"category": "Ollama",
|
|
1290
|
+
"icon": "Ollama",
|
|
1291
|
+
"metadata": {
|
|
1292
|
+
"api_key_param": "base_url",
|
|
1293
|
+
"base_url_param": "base_url",
|
|
1294
|
+
"context_length": 128000,
|
|
1295
|
+
"model_class": "ChatOllama",
|
|
1296
|
+
"model_name_param": "model"
|
|
1297
|
+
},
|
|
1298
|
+
"name": "llama3.3",
|
|
1299
|
+
"provider": "Ollama"
|
|
1300
|
+
},
|
|
1301
|
+
{
|
|
1302
|
+
"category": "Ollama",
|
|
1303
|
+
"icon": "Ollama",
|
|
1304
|
+
"metadata": {
|
|
1305
|
+
"api_key_param": "base_url",
|
|
1306
|
+
"base_url_param": "base_url",
|
|
1307
|
+
"context_length": 128000,
|
|
1308
|
+
"model_class": "ChatOllama",
|
|
1309
|
+
"model_name_param": "model"
|
|
1310
|
+
},
|
|
1311
|
+
"name": "qwq",
|
|
1312
|
+
"provider": "Ollama"
|
|
1312
1313
|
},
|
|
1313
1314
|
{
|
|
1314
|
-
"
|
|
1315
|
+
"category": "Google Generative AI",
|
|
1316
|
+
"icon": "GoogleGenerativeAI",
|
|
1317
|
+
"metadata": {
|
|
1318
|
+
"is_disabled_provider": true,
|
|
1319
|
+
"variable_name": "GOOGLE_API_KEY"
|
|
1320
|
+
},
|
|
1321
|
+
"name": "__enable_provider_Google Generative AI__",
|
|
1322
|
+
"provider": "Google Generative AI"
|
|
1315
1323
|
},
|
|
1316
1324
|
{
|
|
1317
|
-
"
|
|
1325
|
+
"category": "IBM Watsonx",
|
|
1326
|
+
"icon": "WatsonxAI",
|
|
1327
|
+
"metadata": {
|
|
1328
|
+
"is_disabled_provider": true,
|
|
1329
|
+
"variable_name": "WATSONX_APIKEY"
|
|
1330
|
+
},
|
|
1331
|
+
"name": "__enable_provider_IBM Watsonx__",
|
|
1332
|
+
"provider": "IBM Watsonx"
|
|
1318
1333
|
}
|
|
1319
1334
|
],
|
|
1320
|
-
"
|
|
1335
|
+
"override_skip": false,
|
|
1336
|
+
"placeholder": "Setup Provider",
|
|
1321
1337
|
"real_time_refresh": true,
|
|
1322
|
-
"
|
|
1323
|
-
"
|
|
1324
|
-
"title_case": false,
|
|
1325
|
-
"toggle": false,
|
|
1326
|
-
"tool_mode": false,
|
|
1327
|
-
"trace_as_metadata": true,
|
|
1328
|
-
"type": "str",
|
|
1329
|
-
"value": "OpenAI"
|
|
1330
|
-
},
|
|
1331
|
-
"stream": {
|
|
1332
|
-
"_input_type": "BoolInput",
|
|
1333
|
-
"advanced": true,
|
|
1334
|
-
"display_name": "Stream",
|
|
1335
|
-
"dynamic": false,
|
|
1336
|
-
"info": "Whether to stream the response",
|
|
1337
|
-
"list": false,
|
|
1338
|
-
"list_add_label": "Add More",
|
|
1339
|
-
"name": "stream",
|
|
1340
|
-
"placeholder": "",
|
|
1341
|
-
"required": false,
|
|
1338
|
+
"refresh_button": true,
|
|
1339
|
+
"required": true,
|
|
1342
1340
|
"show": true,
|
|
1343
1341
|
"title_case": false,
|
|
1344
1342
|
"tool_mode": false,
|
|
1345
|
-
"
|
|
1346
|
-
"
|
|
1347
|
-
"
|
|
1343
|
+
"trace_as_input": true,
|
|
1344
|
+
"track_in_telemetry": false,
|
|
1345
|
+
"type": "model",
|
|
1346
|
+
"value": [
|
|
1347
|
+
{
|
|
1348
|
+
"category": "OpenAI",
|
|
1349
|
+
"icon": "OpenAI",
|
|
1350
|
+
"metadata": {
|
|
1351
|
+
"api_key_param": "api_key",
|
|
1352
|
+
"context_length": 128000,
|
|
1353
|
+
"model_class": "ChatOpenAI",
|
|
1354
|
+
"model_name_param": "model",
|
|
1355
|
+
"reasoning_models": [
|
|
1356
|
+
"gpt-5.1"
|
|
1357
|
+
]
|
|
1358
|
+
},
|
|
1359
|
+
"name": "gpt-5.1",
|
|
1360
|
+
"provider": "OpenAI"
|
|
1361
|
+
}
|
|
1362
|
+
]
|
|
1348
1363
|
},
|
|
1349
|
-
"
|
|
1350
|
-
"_input_type": "
|
|
1351
|
-
"advanced":
|
|
1352
|
-
"display_name": "
|
|
1364
|
+
"output_schema": {
|
|
1365
|
+
"_input_type": "TableInput",
|
|
1366
|
+
"advanced": false,
|
|
1367
|
+
"display_name": "Output Schema",
|
|
1353
1368
|
"dynamic": false,
|
|
1354
|
-
"info": "
|
|
1355
|
-
"
|
|
1356
|
-
"Message"
|
|
1357
|
-
],
|
|
1358
|
-
"list": false,
|
|
1369
|
+
"info": "Define the structure and data types for the model's output.",
|
|
1370
|
+
"is_list": true,
|
|
1359
1371
|
"list_add_label": "Add More",
|
|
1360
|
-
"
|
|
1361
|
-
"name": "system_message",
|
|
1372
|
+
"name": "output_schema",
|
|
1362
1373
|
"placeholder": "",
|
|
1363
|
-
"required":
|
|
1374
|
+
"required": true,
|
|
1364
1375
|
"show": true,
|
|
1365
|
-
"
|
|
1366
|
-
"
|
|
1367
|
-
|
|
1368
|
-
|
|
1369
|
-
|
|
1370
|
-
|
|
1371
|
-
|
|
1372
|
-
|
|
1373
|
-
|
|
1374
|
-
|
|
1375
|
-
|
|
1376
|
-
|
|
1377
|
-
|
|
1378
|
-
|
|
1379
|
-
|
|
1380
|
-
|
|
1381
|
-
|
|
1382
|
-
|
|
1376
|
+
"table_icon": "Table",
|
|
1377
|
+
"table_schema": [
|
|
1378
|
+
{
|
|
1379
|
+
"default": "field",
|
|
1380
|
+
"description": "Specify the name of the output field.",
|
|
1381
|
+
"display_name": "Name",
|
|
1382
|
+
"edit_mode": "inline",
|
|
1383
|
+
"formatter": "text",
|
|
1384
|
+
"name": "name",
|
|
1385
|
+
"type": "str"
|
|
1386
|
+
},
|
|
1387
|
+
{
|
|
1388
|
+
"default": "description of field",
|
|
1389
|
+
"description": "Describe the purpose of the output field.",
|
|
1390
|
+
"display_name": "Description",
|
|
1391
|
+
"edit_mode": "popover",
|
|
1392
|
+
"formatter": "text",
|
|
1393
|
+
"name": "description",
|
|
1394
|
+
"type": "str"
|
|
1395
|
+
},
|
|
1396
|
+
{
|
|
1397
|
+
"default": "str",
|
|
1398
|
+
"description": "Indicate the data type of the output field (e.g., str, int, float, bool, dict).",
|
|
1399
|
+
"display_name": "Type",
|
|
1400
|
+
"edit_mode": "inline",
|
|
1401
|
+
"formatter": "text",
|
|
1402
|
+
"name": "type",
|
|
1403
|
+
"options": [
|
|
1404
|
+
"str",
|
|
1405
|
+
"int",
|
|
1406
|
+
"float",
|
|
1407
|
+
"bool",
|
|
1408
|
+
"dict"
|
|
1409
|
+
],
|
|
1410
|
+
"type": "str"
|
|
1411
|
+
},
|
|
1412
|
+
{
|
|
1413
|
+
"default": "False",
|
|
1414
|
+
"description": "Set to True if this output field should be a list of the specified type.",
|
|
1415
|
+
"display_name": "As List",
|
|
1416
|
+
"edit_mode": "inline",
|
|
1417
|
+
"formatter": "text",
|
|
1418
|
+
"name": "multiple",
|
|
1419
|
+
"type": "boolean"
|
|
1420
|
+
}
|
|
1421
|
+
],
|
|
1422
|
+
"title_case": false,
|
|
1423
|
+
"tool_mode": false,
|
|
1424
|
+
"trace_as_metadata": true,
|
|
1425
|
+
"trigger_icon": "Table",
|
|
1426
|
+
"trigger_text": "Open table",
|
|
1427
|
+
"type": "table",
|
|
1428
|
+
"value": [
|
|
1429
|
+
{
|
|
1430
|
+
"description": "A Positive|Negative value that represents the image.",
|
|
1431
|
+
"multiple": "False",
|
|
1432
|
+
"name": "sentiment",
|
|
1433
|
+
"type": "str"
|
|
1434
|
+
},
|
|
1435
|
+
{
|
|
1436
|
+
"description": "Brief Description of the image",
|
|
1437
|
+
"multiple": "False",
|
|
1438
|
+
"name": "description",
|
|
1439
|
+
"type": "str"
|
|
1440
|
+
}
|
|
1441
|
+
]
|
|
1442
|
+
},
|
|
1443
|
+
"schema_name": {
|
|
1444
|
+
"_input_type": "MessageTextInput",
|
|
1445
|
+
"advanced": true,
|
|
1446
|
+
"display_name": "Schema Name",
|
|
1447
|
+
"dynamic": false,
|
|
1448
|
+
"info": "Provide a name for the output data schema.",
|
|
1449
|
+
"input_types": [
|
|
1450
|
+
"Message"
|
|
1451
|
+
],
|
|
1452
|
+
"list": false,
|
|
1453
|
+
"list_add_label": "Add More",
|
|
1454
|
+
"load_from_db": false,
|
|
1455
|
+
"name": "schema_name",
|
|
1383
1456
|
"placeholder": "",
|
|
1384
|
-
"range_spec": {
|
|
1385
|
-
"max": 1,
|
|
1386
|
-
"min": 0,
|
|
1387
|
-
"step": 0.01,
|
|
1388
|
-
"step_type": "float"
|
|
1389
|
-
},
|
|
1390
1457
|
"required": false,
|
|
1391
1458
|
"show": true,
|
|
1392
|
-
"slider_buttons": false,
|
|
1393
|
-
"slider_buttons_options": [],
|
|
1394
|
-
"slider_input": false,
|
|
1395
1459
|
"title_case": false,
|
|
1396
1460
|
"tool_mode": false,
|
|
1397
|
-
"
|
|
1398
|
-
"
|
|
1461
|
+
"trace_as_input": true,
|
|
1462
|
+
"trace_as_metadata": true,
|
|
1463
|
+
"type": "str",
|
|
1464
|
+
"value": ""
|
|
1465
|
+
},
|
|
1466
|
+
"system_prompt": {
|
|
1467
|
+
"_input_type": "MultilineInput",
|
|
1468
|
+
"advanced": true,
|
|
1469
|
+
"copy_field": false,
|
|
1470
|
+
"display_name": "Format Instructions",
|
|
1471
|
+
"dynamic": false,
|
|
1472
|
+
"info": "The instructions to the language model for formatting the output.",
|
|
1473
|
+
"input_types": [
|
|
1474
|
+
"Message"
|
|
1475
|
+
],
|
|
1476
|
+
"list": false,
|
|
1477
|
+
"list_add_label": "Add More",
|
|
1478
|
+
"load_from_db": false,
|
|
1479
|
+
"multiline": true,
|
|
1480
|
+
"name": "system_prompt",
|
|
1481
|
+
"placeholder": "",
|
|
1482
|
+
"required": true,
|
|
1483
|
+
"show": true,
|
|
1484
|
+
"title_case": false,
|
|
1485
|
+
"tool_mode": false,
|
|
1486
|
+
"trace_as_input": true,
|
|
1487
|
+
"trace_as_metadata": true,
|
|
1488
|
+
"type": "str",
|
|
1489
|
+
"value": "You are an AI that extracts structured JSON objects from unstructured text. Use a predefined schema with expected types (str, int, float, bool, dict). Extract ALL relevant instances that match the schema - if multiple patterns exist, capture them all. Fill missing or ambiguous values with defaults: null for missing values. Remove exact duplicates but keep variations that have different field values. Always return valid JSON in the expected format, never throw errors. If multiple objects can be extracted, return them all in the structured format."
|
|
1399
1490
|
}
|
|
1400
1491
|
},
|
|
1401
1492
|
"tool_mode": false
|
|
1402
1493
|
},
|
|
1403
|
-
"selected_output": "
|
|
1494
|
+
"selected_output": "structured_output",
|
|
1404
1495
|
"showNode": true,
|
|
1405
|
-
"type": "
|
|
1496
|
+
"type": "StructuredOutput"
|
|
1406
1497
|
},
|
|
1407
1498
|
"dragging": false,
|
|
1408
|
-
"id": "
|
|
1499
|
+
"id": "StructuredOutput-e4qlS",
|
|
1409
1500
|
"measured": {
|
|
1410
|
-
"height":
|
|
1501
|
+
"height": 387,
|
|
1411
1502
|
"width": 320
|
|
1412
1503
|
},
|
|
1413
1504
|
"position": {
|
|
1414
|
-
"x":
|
|
1415
|
-
"y":
|
|
1505
|
+
"x": 2013.2943902301881,
|
|
1506
|
+
"y": 328.8311104097772
|
|
1416
1507
|
},
|
|
1417
1508
|
"selected": false,
|
|
1418
1509
|
"type": "genericNode"
|
|
1419
1510
|
},
|
|
1420
1511
|
{
|
|
1421
1512
|
"data": {
|
|
1422
|
-
"id": "LanguageModelComponent-
|
|
1513
|
+
"id": "LanguageModelComponent-KHx2J",
|
|
1423
1514
|
"node": {
|
|
1424
1515
|
"base_classes": [
|
|
1425
1516
|
"LanguageModel",
|
|
@@ -1430,12 +1521,14 @@
|
|
|
1430
1521
|
"custom_fields": {},
|
|
1431
1522
|
"description": "Runs a language model given a specified provider.",
|
|
1432
1523
|
"display_name": "Language Model",
|
|
1433
|
-
"documentation": "",
|
|
1524
|
+
"documentation": "https://docs.langflow.org/components-models",
|
|
1434
1525
|
"edited": false,
|
|
1435
1526
|
"field_order": [
|
|
1436
|
-
"
|
|
1437
|
-
"model_name",
|
|
1527
|
+
"model",
|
|
1438
1528
|
"api_key",
|
|
1529
|
+
"base_url_ibm_watsonx",
|
|
1530
|
+
"project_id",
|
|
1531
|
+
"ollama_base_url",
|
|
1439
1532
|
"input_value",
|
|
1440
1533
|
"system_message",
|
|
1441
1534
|
"stream",
|
|
@@ -1443,31 +1536,19 @@
|
|
|
1443
1536
|
],
|
|
1444
1537
|
"frozen": false,
|
|
1445
1538
|
"icon": "brain-circuit",
|
|
1446
|
-
"last_updated": "2025-
|
|
1539
|
+
"last_updated": "2025-12-18T20:09:41.555Z",
|
|
1447
1540
|
"legacy": false,
|
|
1448
|
-
"lf_version": "1.
|
|
1541
|
+
"lf_version": "1.7.0",
|
|
1449
1542
|
"metadata": {
|
|
1450
|
-
"code_hash": "
|
|
1543
|
+
"code_hash": "7fe3257f2169",
|
|
1451
1544
|
"dependencies": {
|
|
1452
1545
|
"dependencies": [
|
|
1453
|
-
{
|
|
1454
|
-
"name": "langchain_anthropic",
|
|
1455
|
-
"version": "0.3.14"
|
|
1456
|
-
},
|
|
1457
|
-
{
|
|
1458
|
-
"name": "langchain_google_genai",
|
|
1459
|
-
"version": "2.0.6"
|
|
1460
|
-
},
|
|
1461
|
-
{
|
|
1462
|
-
"name": "langchain_openai",
|
|
1463
|
-
"version": "0.3.23"
|
|
1464
|
-
},
|
|
1465
1546
|
{
|
|
1466
1547
|
"name": "lfx",
|
|
1467
1548
|
"version": null
|
|
1468
1549
|
}
|
|
1469
1550
|
],
|
|
1470
|
-
"total_dependencies":
|
|
1551
|
+
"total_dependencies": 1
|
|
1471
1552
|
},
|
|
1472
1553
|
"keywords": [
|
|
1473
1554
|
"model",
|
|
@@ -1475,7 +1556,7 @@
|
|
|
1475
1556
|
"language model",
|
|
1476
1557
|
"large language model"
|
|
1477
1558
|
],
|
|
1478
|
-
"module": "lfx.components.
|
|
1559
|
+
"module": "lfx.components.models_and_agents.language_model.LanguageModelComponent"
|
|
1479
1560
|
},
|
|
1480
1561
|
"minimized": false,
|
|
1481
1562
|
"output_types": [],
|
|
@@ -1485,6 +1566,7 @@
|
|
|
1485
1566
|
"cache": true,
|
|
1486
1567
|
"display_name": "Model Response",
|
|
1487
1568
|
"group_outputs": false,
|
|
1569
|
+
"loop_types": null,
|
|
1488
1570
|
"method": "text_response",
|
|
1489
1571
|
"name": "text_output",
|
|
1490
1572
|
"options": null,
|
|
@@ -1501,6 +1583,7 @@
|
|
|
1501
1583
|
"cache": true,
|
|
1502
1584
|
"display_name": "Language Model",
|
|
1503
1585
|
"group_outputs": false,
|
|
1586
|
+
"loop_types": null,
|
|
1504
1587
|
"method": "build_model",
|
|
1505
1588
|
"name": "model_output",
|
|
1506
1589
|
"options": null,
|
|
@@ -1516,24 +1599,64 @@
|
|
|
1516
1599
|
"pinned": false,
|
|
1517
1600
|
"priority": 0,
|
|
1518
1601
|
"template": {
|
|
1602
|
+
"_frontend_node_flow_id": {
|
|
1603
|
+
"value": "c3b4019e-b2fe-4b7e-8df7-c85a47b5188b"
|
|
1604
|
+
},
|
|
1605
|
+
"_frontend_node_folder_id": {
|
|
1606
|
+
"value": "afaca9ee-97e3-4211-8569-722a4058ea5e"
|
|
1607
|
+
},
|
|
1519
1608
|
"_type": "Component",
|
|
1520
1609
|
"api_key": {
|
|
1521
1610
|
"_input_type": "SecretStrInput",
|
|
1522
|
-
"advanced":
|
|
1523
|
-
"display_name": "
|
|
1611
|
+
"advanced": true,
|
|
1612
|
+
"display_name": "API Key",
|
|
1524
1613
|
"dynamic": false,
|
|
1525
1614
|
"info": "Model Provider API key",
|
|
1526
1615
|
"input_types": [],
|
|
1527
|
-
"load_from_db":
|
|
1616
|
+
"load_from_db": false,
|
|
1528
1617
|
"name": "api_key",
|
|
1618
|
+
"override_skip": false,
|
|
1529
1619
|
"password": true,
|
|
1530
1620
|
"placeholder": "",
|
|
1531
1621
|
"real_time_refresh": true,
|
|
1532
1622
|
"required": false,
|
|
1533
1623
|
"show": true,
|
|
1534
1624
|
"title_case": false,
|
|
1625
|
+
"track_in_telemetry": false,
|
|
1626
|
+
"type": "str",
|
|
1627
|
+
"value": ""
|
|
1628
|
+
},
|
|
1629
|
+
"base_url_ibm_watsonx": {
|
|
1630
|
+
"_input_type": "DropdownInput",
|
|
1631
|
+
"advanced": false,
|
|
1632
|
+
"combobox": false,
|
|
1633
|
+
"dialog_inputs": {},
|
|
1634
|
+
"display_name": "watsonx API Endpoint",
|
|
1635
|
+
"dynamic": false,
|
|
1636
|
+
"external_options": {},
|
|
1637
|
+
"info": "The base URL of the API (IBM watsonx.ai only)",
|
|
1638
|
+
"name": "base_url_ibm_watsonx",
|
|
1639
|
+
"options": [
|
|
1640
|
+
"https://us-south.ml.cloud.ibm.com",
|
|
1641
|
+
"https://eu-de.ml.cloud.ibm.com",
|
|
1642
|
+
"https://eu-gb.ml.cloud.ibm.com",
|
|
1643
|
+
"https://au-syd.ml.cloud.ibm.com",
|
|
1644
|
+
"https://jp-tok.ml.cloud.ibm.com",
|
|
1645
|
+
"https://ca-tor.ml.cloud.ibm.com"
|
|
1646
|
+
],
|
|
1647
|
+
"options_metadata": [],
|
|
1648
|
+
"override_skip": false,
|
|
1649
|
+
"placeholder": "",
|
|
1650
|
+
"real_time_refresh": true,
|
|
1651
|
+
"required": false,
|
|
1652
|
+
"show": false,
|
|
1653
|
+
"title_case": false,
|
|
1654
|
+
"toggle": false,
|
|
1655
|
+
"tool_mode": false,
|
|
1656
|
+
"trace_as_metadata": true,
|
|
1657
|
+
"track_in_telemetry": true,
|
|
1535
1658
|
"type": "str",
|
|
1536
|
-
"value": "
|
|
1659
|
+
"value": "https://us-south.ml.cloud.ibm.com"
|
|
1537
1660
|
},
|
|
1538
1661
|
"code": {
|
|
1539
1662
|
"advanced": true,
|
|
@@ -1554,7 +1677,7 @@
|
|
|
1554
1677
|
"value": "from lfx.base.models.model import LCModelComponent\nfrom lfx.base.models.unified_models import (\n get_language_model_options,\n get_llm,\n update_model_options_in_build_config,\n)\nfrom lfx.base.models.watsonx_constants import IBM_WATSONX_URLS\nfrom lfx.field_typing import LanguageModel\nfrom lfx.field_typing.range_spec import RangeSpec\nfrom lfx.inputs.inputs import BoolInput, DropdownInput, StrInput\nfrom lfx.io import MessageInput, ModelInput, MultilineInput, SecretStrInput, SliderInput\n\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass LanguageModelComponent(LCModelComponent):\n display_name = \"Language Model\"\n description = \"Runs a language model given a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-models\"\n icon = \"brain-circuit\"\n category = \"models\"\n priority = 0 # Set priority to 0 to make it appear first\n\n inputs = [\n ModelInput(\n name=\"model\",\n display_name=\"Language Model\",\n info=\"Select your model provider\",\n real_time_refresh=True,\n required=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"API Key\",\n info=\"Model Provider API key\",\n required=False,\n show=True,\n real_time_refresh=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n StrInput(\n name=\"project_id\",\n display_name=\"watsonx Project ID\",\n info=\"The project ID associated with the foundation model (IBM watsonx.ai only)\",\n show=False,\n required=False,\n ),\n MessageInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n MessageInput(\n name=\"input_value\",\n display_name=\"Input\",\n info=\"The input text to send to the model\",\n ),\n MultilineInput(\n name=\"system_message\",\n display_name=\"System Message\",\n info=\"A system message that helps set the behavior of the assistant\",\n advanced=False,\n ),\n BoolInput(\n name=\"stream\",\n display_name=\"Stream\",\n info=\"Whether to stream the response\",\n value=False,\n advanced=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n info=\"Controls randomness in responses\",\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n advanced=True,\n ),\n ]\n\n def build_model(self) -> LanguageModel:\n return get_llm(\n model=self.model,\n user_id=self.user_id,\n api_key=self.api_key,\n temperature=self.temperature,\n stream=self.stream,\n )\n\n def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None):\n \"\"\"Dynamically update build config with user-filtered model options.\"\"\"\n return update_model_options_in_build_config(\n component=self,\n build_config=build_config,\n cache_key_prefix=\"language_model_options\",\n get_options_func=get_language_model_options,\n field_name=field_name,\n field_value=field_value,\n )\n"
|
|
1555
1678
|
},
|
|
1556
1679
|
"input_value": {
|
|
1557
|
-
"_input_type": "
|
|
1680
|
+
"_input_type": "MessageInput",
|
|
1558
1681
|
"advanced": false,
|
|
1559
1682
|
"display_name": "Input",
|
|
1560
1683
|
"dynamic": false,
|
|
@@ -1566,6 +1689,7 @@
|
|
|
1566
1689
|
"list_add_label": "Add More",
|
|
1567
1690
|
"load_from_db": false,
|
|
1568
1691
|
"name": "input_value",
|
|
1692
|
+
"override_skip": false,
|
|
1569
1693
|
"placeholder": "",
|
|
1570
1694
|
"required": false,
|
|
1571
1695
|
"show": true,
|
|
@@ -1573,107 +1697,262 @@
|
|
|
1573
1697
|
"tool_mode": false,
|
|
1574
1698
|
"trace_as_input": true,
|
|
1575
1699
|
"trace_as_metadata": true,
|
|
1700
|
+
"track_in_telemetry": false,
|
|
1576
1701
|
"type": "str",
|
|
1577
1702
|
"value": ""
|
|
1578
1703
|
},
|
|
1579
|
-
"
|
|
1580
|
-
|
|
1704
|
+
"is_refresh": false,
|
|
1705
|
+
"model": {
|
|
1706
|
+
"_input_type": "ModelInput",
|
|
1581
1707
|
"advanced": false,
|
|
1582
|
-
"
|
|
1583
|
-
"dialog_inputs": {},
|
|
1584
|
-
"display_name": "Model Name",
|
|
1708
|
+
"display_name": "Language Model",
|
|
1585
1709
|
"dynamic": false,
|
|
1586
|
-
"
|
|
1587
|
-
|
|
1588
|
-
|
|
1589
|
-
|
|
1590
|
-
|
|
1591
|
-
|
|
1592
|
-
|
|
1593
|
-
|
|
1594
|
-
|
|
1595
|
-
|
|
1596
|
-
|
|
1597
|
-
|
|
1598
|
-
|
|
1599
|
-
"
|
|
1600
|
-
"gpt-5-nano",
|
|
1601
|
-
"gpt-5-chat-latest",
|
|
1602
|
-
"o1",
|
|
1603
|
-
"o3-mini",
|
|
1604
|
-
"o3",
|
|
1605
|
-
"o3-pro",
|
|
1606
|
-
"o4-mini",
|
|
1607
|
-
"o4-mini-high"
|
|
1710
|
+
"external_options": {
|
|
1711
|
+
"fields": {
|
|
1712
|
+
"data": {
|
|
1713
|
+
"node": {
|
|
1714
|
+
"display_name": "Connect other models",
|
|
1715
|
+
"icon": "CornerDownLeft",
|
|
1716
|
+
"name": "connect_other_models"
|
|
1717
|
+
}
|
|
1718
|
+
}
|
|
1719
|
+
}
|
|
1720
|
+
},
|
|
1721
|
+
"info": "Select your model provider",
|
|
1722
|
+
"input_types": [
|
|
1723
|
+
"LanguageModel"
|
|
1608
1724
|
],
|
|
1609
|
-
"
|
|
1610
|
-
"
|
|
1611
|
-
"
|
|
1612
|
-
"
|
|
1613
|
-
"title_case": false,
|
|
1614
|
-
"toggle": false,
|
|
1615
|
-
"tool_mode": false,
|
|
1616
|
-
"trace_as_metadata": true,
|
|
1617
|
-
"type": "str",
|
|
1618
|
-
"value": "gpt-4o-mini"
|
|
1619
|
-
},
|
|
1620
|
-
"provider": {
|
|
1621
|
-
"_input_type": "DropdownInput",
|
|
1622
|
-
"advanced": false,
|
|
1623
|
-
"combobox": false,
|
|
1624
|
-
"dialog_inputs": {},
|
|
1625
|
-
"display_name": "Model Provider",
|
|
1626
|
-
"dynamic": false,
|
|
1627
|
-
"info": "Select the model provider",
|
|
1628
|
-
"name": "provider",
|
|
1725
|
+
"list": false,
|
|
1726
|
+
"list_add_label": "Add More",
|
|
1727
|
+
"model_type": "language",
|
|
1728
|
+
"name": "model",
|
|
1629
1729
|
"options": [
|
|
1630
|
-
"OpenAI",
|
|
1631
|
-
"Anthropic",
|
|
1632
|
-
"Google"
|
|
1633
|
-
],
|
|
1634
|
-
"options_metadata": [
|
|
1635
1730
|
{
|
|
1636
|
-
"
|
|
1731
|
+
"category": "Anthropic",
|
|
1732
|
+
"icon": "Anthropic",
|
|
1733
|
+
"metadata": {
|
|
1734
|
+
"api_key_param": "api_key",
|
|
1735
|
+
"context_length": 128000,
|
|
1736
|
+
"model_class": "ChatAnthropic",
|
|
1737
|
+
"model_name_param": "model"
|
|
1738
|
+
},
|
|
1739
|
+
"name": "claude-opus-4-5-20251101",
|
|
1740
|
+
"provider": "Anthropic"
|
|
1741
|
+
},
|
|
1742
|
+
{
|
|
1743
|
+
"category": "Anthropic",
|
|
1744
|
+
"icon": "Anthropic",
|
|
1745
|
+
"metadata": {
|
|
1746
|
+
"api_key_param": "api_key",
|
|
1747
|
+
"context_length": 128000,
|
|
1748
|
+
"model_class": "ChatAnthropic",
|
|
1749
|
+
"model_name_param": "model"
|
|
1750
|
+
},
|
|
1751
|
+
"name": "claude-haiku-4-5-20251001",
|
|
1752
|
+
"provider": "Anthropic"
|
|
1753
|
+
},
|
|
1754
|
+
{
|
|
1755
|
+
"category": "Anthropic",
|
|
1756
|
+
"icon": "Anthropic",
|
|
1757
|
+
"metadata": {
|
|
1758
|
+
"api_key_param": "api_key",
|
|
1759
|
+
"context_length": 128000,
|
|
1760
|
+
"model_class": "ChatAnthropic",
|
|
1761
|
+
"model_name_param": "model"
|
|
1762
|
+
},
|
|
1763
|
+
"name": "claude-sonnet-4-5-20250929",
|
|
1764
|
+
"provider": "Anthropic"
|
|
1765
|
+
},
|
|
1766
|
+
{
|
|
1767
|
+
"category": "OpenAI",
|
|
1768
|
+
"icon": "OpenAI",
|
|
1769
|
+
"metadata": {
|
|
1770
|
+
"api_key_param": "api_key",
|
|
1771
|
+
"context_length": 128000,
|
|
1772
|
+
"model_class": "ChatOpenAI",
|
|
1773
|
+
"model_name_param": "model",
|
|
1774
|
+
"reasoning_models": [
|
|
1775
|
+
"gpt-5.1"
|
|
1776
|
+
]
|
|
1777
|
+
},
|
|
1778
|
+
"name": "gpt-5.1",
|
|
1779
|
+
"provider": "OpenAI"
|
|
1780
|
+
},
|
|
1781
|
+
{
|
|
1782
|
+
"category": "OpenAI",
|
|
1783
|
+
"icon": "OpenAI",
|
|
1784
|
+
"metadata": {
|
|
1785
|
+
"api_key_param": "api_key",
|
|
1786
|
+
"context_length": 128000,
|
|
1787
|
+
"model_class": "ChatOpenAI",
|
|
1788
|
+
"model_name_param": "model"
|
|
1789
|
+
},
|
|
1790
|
+
"name": "gpt-4o-mini",
|
|
1791
|
+
"provider": "OpenAI"
|
|
1792
|
+
},
|
|
1793
|
+
{
|
|
1794
|
+
"category": "OpenAI",
|
|
1795
|
+
"icon": "OpenAI",
|
|
1796
|
+
"metadata": {
|
|
1797
|
+
"api_key_param": "api_key",
|
|
1798
|
+
"context_length": 128000,
|
|
1799
|
+
"model_class": "ChatOpenAI",
|
|
1800
|
+
"model_name_param": "model",
|
|
1801
|
+
"reasoning_models": [
|
|
1802
|
+
"o1"
|
|
1803
|
+
]
|
|
1804
|
+
},
|
|
1805
|
+
"name": "o1",
|
|
1806
|
+
"provider": "OpenAI"
|
|
1807
|
+
},
|
|
1808
|
+
{
|
|
1809
|
+
"category": "Ollama",
|
|
1810
|
+
"icon": "Ollama",
|
|
1811
|
+
"metadata": {
|
|
1812
|
+
"api_key_param": "base_url",
|
|
1813
|
+
"base_url_param": "base_url",
|
|
1814
|
+
"context_length": 128000,
|
|
1815
|
+
"model_class": "ChatOllama",
|
|
1816
|
+
"model_name_param": "model"
|
|
1817
|
+
},
|
|
1818
|
+
"name": "llama3.3",
|
|
1819
|
+
"provider": "Ollama"
|
|
1820
|
+
},
|
|
1821
|
+
{
|
|
1822
|
+
"category": "Ollama",
|
|
1823
|
+
"icon": "Ollama",
|
|
1824
|
+
"metadata": {
|
|
1825
|
+
"api_key_param": "base_url",
|
|
1826
|
+
"base_url_param": "base_url",
|
|
1827
|
+
"context_length": 128000,
|
|
1828
|
+
"model_class": "ChatOllama",
|
|
1829
|
+
"model_name_param": "model"
|
|
1830
|
+
},
|
|
1831
|
+
"name": "qwq",
|
|
1832
|
+
"provider": "Ollama"
|
|
1637
1833
|
},
|
|
1638
1834
|
{
|
|
1639
|
-
"
|
|
1835
|
+
"category": "Google Generative AI",
|
|
1836
|
+
"icon": "GoogleGenerativeAI",
|
|
1837
|
+
"metadata": {
|
|
1838
|
+
"is_disabled_provider": true,
|
|
1839
|
+
"variable_name": "GOOGLE_API_KEY"
|
|
1840
|
+
},
|
|
1841
|
+
"name": "__enable_provider_Google Generative AI__",
|
|
1842
|
+
"provider": "Google Generative AI"
|
|
1640
1843
|
},
|
|
1641
1844
|
{
|
|
1642
|
-
"
|
|
1845
|
+
"category": "IBM Watsonx",
|
|
1846
|
+
"icon": "WatsonxAI",
|
|
1847
|
+
"metadata": {
|
|
1848
|
+
"is_disabled_provider": true,
|
|
1849
|
+
"variable_name": "WATSONX_APIKEY"
|
|
1850
|
+
},
|
|
1851
|
+
"name": "__enable_provider_IBM Watsonx__",
|
|
1852
|
+
"provider": "IBM Watsonx"
|
|
1643
1853
|
}
|
|
1644
1854
|
],
|
|
1645
|
-
"
|
|
1855
|
+
"override_skip": false,
|
|
1856
|
+
"placeholder": "Setup Provider",
|
|
1646
1857
|
"real_time_refresh": true,
|
|
1647
|
-
"
|
|
1858
|
+
"refresh_button": true,
|
|
1859
|
+
"required": true,
|
|
1648
1860
|
"show": true,
|
|
1649
1861
|
"title_case": false,
|
|
1650
|
-
"toggle": false,
|
|
1651
1862
|
"tool_mode": false,
|
|
1652
|
-
"
|
|
1653
|
-
"
|
|
1654
|
-
"
|
|
1863
|
+
"trace_as_input": true,
|
|
1864
|
+
"track_in_telemetry": false,
|
|
1865
|
+
"type": "model",
|
|
1866
|
+
"value": [
|
|
1867
|
+
{
|
|
1868
|
+
"category": "OpenAI",
|
|
1869
|
+
"icon": "OpenAI",
|
|
1870
|
+
"metadata": {
|
|
1871
|
+
"api_key_param": "api_key",
|
|
1872
|
+
"context_length": 128000,
|
|
1873
|
+
"model_class": "ChatOpenAI",
|
|
1874
|
+
"model_name_param": "model",
|
|
1875
|
+
"reasoning_models": [
|
|
1876
|
+
"gpt-5.1"
|
|
1877
|
+
]
|
|
1878
|
+
},
|
|
1879
|
+
"name": "gpt-5.1",
|
|
1880
|
+
"provider": "OpenAI"
|
|
1881
|
+
}
|
|
1882
|
+
]
|
|
1655
1883
|
},
|
|
1656
|
-
"
|
|
1657
|
-
"_input_type": "
|
|
1658
|
-
"advanced":
|
|
1659
|
-
"display_name": "
|
|
1884
|
+
"ollama_base_url": {
|
|
1885
|
+
"_input_type": "MessageInput",
|
|
1886
|
+
"advanced": false,
|
|
1887
|
+
"display_name": "Ollama API URL",
|
|
1660
1888
|
"dynamic": false,
|
|
1661
|
-
"info": "
|
|
1889
|
+
"info": "Endpoint of the Ollama API (Ollama only). Defaults to http://localhost:11434",
|
|
1890
|
+
"input_types": [
|
|
1891
|
+
"Message"
|
|
1892
|
+
],
|
|
1662
1893
|
"list": false,
|
|
1663
1894
|
"list_add_label": "Add More",
|
|
1664
|
-
"
|
|
1895
|
+
"load_from_db": false,
|
|
1896
|
+
"name": "ollama_base_url",
|
|
1897
|
+
"override_skip": false,
|
|
1665
1898
|
"placeholder": "",
|
|
1899
|
+
"real_time_refresh": true,
|
|
1666
1900
|
"required": false,
|
|
1667
|
-
"show":
|
|
1901
|
+
"show": false,
|
|
1668
1902
|
"title_case": false,
|
|
1669
1903
|
"tool_mode": false,
|
|
1904
|
+
"trace_as_input": true,
|
|
1670
1905
|
"trace_as_metadata": true,
|
|
1906
|
+
"track_in_telemetry": false,
|
|
1907
|
+
"type": "str",
|
|
1908
|
+
"value": ""
|
|
1909
|
+
},
|
|
1910
|
+
"project_id": {
|
|
1911
|
+
"_input_type": "StrInput",
|
|
1912
|
+
"advanced": false,
|
|
1913
|
+
"display_name": "watsonx Project ID",
|
|
1914
|
+
"dynamic": false,
|
|
1915
|
+
"info": "The project ID associated with the foundation model (IBM watsonx.ai only)",
|
|
1916
|
+
"list": false,
|
|
1917
|
+
"list_add_label": "Add More",
|
|
1918
|
+
"load_from_db": false,
|
|
1919
|
+
"name": "project_id",
|
|
1920
|
+
"override_skip": false,
|
|
1921
|
+
"placeholder": "",
|
|
1922
|
+
"required": false,
|
|
1923
|
+
"show": false,
|
|
1924
|
+
"title_case": false,
|
|
1925
|
+
"tool_mode": false,
|
|
1926
|
+
"trace_as_metadata": true,
|
|
1927
|
+
"track_in_telemetry": false,
|
|
1928
|
+
"type": "str",
|
|
1929
|
+
"value": ""
|
|
1930
|
+
},
|
|
1931
|
+
"stream": {
|
|
1932
|
+
"_input_type": "BoolInput",
|
|
1933
|
+
"advanced": true,
|
|
1934
|
+
"display_name": "Stream",
|
|
1935
|
+
"dynamic": false,
|
|
1936
|
+
"info": "Whether to stream the response",
|
|
1937
|
+
"list": false,
|
|
1938
|
+
"list_add_label": "Add More",
|
|
1939
|
+
"name": "stream",
|
|
1940
|
+
"override_skip": false,
|
|
1941
|
+
"placeholder": "",
|
|
1942
|
+
"required": false,
|
|
1943
|
+
"show": true,
|
|
1944
|
+
"title_case": false,
|
|
1945
|
+
"tool_mode": false,
|
|
1946
|
+
"trace_as_metadata": true,
|
|
1947
|
+
"track_in_telemetry": true,
|
|
1671
1948
|
"type": "bool",
|
|
1672
1949
|
"value": false
|
|
1673
1950
|
},
|
|
1674
1951
|
"system_message": {
|
|
1675
|
-
"_input_type": "
|
|
1676
|
-
"advanced":
|
|
1952
|
+
"_input_type": "MultilineInput",
|
|
1953
|
+
"advanced": false,
|
|
1954
|
+
"ai_enabled": false,
|
|
1955
|
+
"copy_field": false,
|
|
1677
1956
|
"display_name": "System Message",
|
|
1678
1957
|
"dynamic": false,
|
|
1679
1958
|
"info": "A system message that helps set the behavior of the assistant",
|
|
@@ -1683,7 +1962,9 @@
|
|
|
1683
1962
|
"list": false,
|
|
1684
1963
|
"list_add_label": "Add More",
|
|
1685
1964
|
"load_from_db": false,
|
|
1965
|
+
"multiline": true,
|
|
1686
1966
|
"name": "system_message",
|
|
1967
|
+
"override_skip": false,
|
|
1687
1968
|
"placeholder": "",
|
|
1688
1969
|
"required": false,
|
|
1689
1970
|
"show": true,
|
|
@@ -1691,6 +1972,7 @@
|
|
|
1691
1972
|
"tool_mode": false,
|
|
1692
1973
|
"trace_as_input": true,
|
|
1693
1974
|
"trace_as_metadata": true,
|
|
1975
|
+
"track_in_telemetry": false,
|
|
1694
1976
|
"type": "str",
|
|
1695
1977
|
"value": ""
|
|
1696
1978
|
},
|
|
@@ -1705,6 +1987,7 @@
|
|
|
1705
1987
|
"min_label": "",
|
|
1706
1988
|
"min_label_icon": "",
|
|
1707
1989
|
"name": "temperature",
|
|
1990
|
+
"override_skip": false,
|
|
1708
1991
|
"placeholder": "",
|
|
1709
1992
|
"range_spec": {
|
|
1710
1993
|
"max": 1,
|
|
@@ -1719,6 +2002,7 @@
|
|
|
1719
2002
|
"slider_input": false,
|
|
1720
2003
|
"title_case": false,
|
|
1721
2004
|
"tool_mode": false,
|
|
2005
|
+
"track_in_telemetry": false,
|
|
1722
2006
|
"type": "slider",
|
|
1723
2007
|
"value": 0.1
|
|
1724
2008
|
}
|
|
@@ -1730,359 +2014,30 @@
|
|
|
1730
2014
|
"type": "LanguageModelComponent"
|
|
1731
2015
|
},
|
|
1732
2016
|
"dragging": false,
|
|
1733
|
-
"id": "LanguageModelComponent-
|
|
2017
|
+
"id": "LanguageModelComponent-KHx2J",
|
|
1734
2018
|
"measured": {
|
|
1735
|
-
"height":
|
|
2019
|
+
"height": 369,
|
|
1736
2020
|
"width": 320
|
|
1737
2021
|
},
|
|
1738
2022
|
"position": {
|
|
1739
|
-
"x":
|
|
1740
|
-
"y":
|
|
2023
|
+
"x": 1631.5692052860056,
|
|
2024
|
+
"y": 392.7085006745582
|
|
1741
2025
|
},
|
|
1742
2026
|
"selected": false,
|
|
1743
2027
|
"type": "genericNode"
|
|
1744
|
-
},
|
|
1745
|
-
{
|
|
1746
|
-
"data": {
|
|
1747
|
-
"id": "StructuredOutput-bek9G",
|
|
1748
|
-
"node": {
|
|
1749
|
-
"base_classes": [
|
|
1750
|
-
"Data",
|
|
1751
|
-
"DataFrame"
|
|
1752
|
-
],
|
|
1753
|
-
"beta": false,
|
|
1754
|
-
"conditional_paths": [],
|
|
1755
|
-
"custom_fields": {},
|
|
1756
|
-
"description": "Uses an LLM to generate structured data. Ideal for extraction and consistency.",
|
|
1757
|
-
"display_name": "Structured Output",
|
|
1758
|
-
"documentation": "https://docs.langflow.org/components-processing#structured-output",
|
|
1759
|
-
"edited": false,
|
|
1760
|
-
"field_order": [
|
|
1761
|
-
"llm",
|
|
1762
|
-
"input_value",
|
|
1763
|
-
"system_prompt",
|
|
1764
|
-
"schema_name",
|
|
1765
|
-
"output_schema"
|
|
1766
|
-
],
|
|
1767
|
-
"frozen": false,
|
|
1768
|
-
"icon": "braces",
|
|
1769
|
-
"legacy": false,
|
|
1770
|
-
"lf_version": "1.6.0",
|
|
1771
|
-
"metadata": {
|
|
1772
|
-
"code_hash": "058ca1f51e9f",
|
|
1773
|
-
"dependencies": {
|
|
1774
|
-
"dependencies": [
|
|
1775
|
-
{
|
|
1776
|
-
"name": "pydantic",
|
|
1777
|
-
"version": "2.11.10"
|
|
1778
|
-
},
|
|
1779
|
-
{
|
|
1780
|
-
"name": "trustcall",
|
|
1781
|
-
"version": "0.0.39"
|
|
1782
|
-
},
|
|
1783
|
-
{
|
|
1784
|
-
"name": "lfx",
|
|
1785
|
-
"version": null
|
|
1786
|
-
}
|
|
1787
|
-
],
|
|
1788
|
-
"total_dependencies": 3
|
|
1789
|
-
},
|
|
1790
|
-
"module": "lfx.components.llm_operations.structured_output.StructuredOutputComponent"
|
|
1791
|
-
},
|
|
1792
|
-
"minimized": false,
|
|
1793
|
-
"output_types": [],
|
|
1794
|
-
"outputs": [
|
|
1795
|
-
{
|
|
1796
|
-
"allows_loop": false,
|
|
1797
|
-
"cache": true,
|
|
1798
|
-
"display_name": "Structured Output",
|
|
1799
|
-
"group_outputs": false,
|
|
1800
|
-
"method": "build_structured_output",
|
|
1801
|
-
"name": "structured_output",
|
|
1802
|
-
"selected": "Data",
|
|
1803
|
-
"tool_mode": true,
|
|
1804
|
-
"types": [
|
|
1805
|
-
"Data"
|
|
1806
|
-
],
|
|
1807
|
-
"value": "__UNDEFINED__"
|
|
1808
|
-
},
|
|
1809
|
-
{
|
|
1810
|
-
"allows_loop": false,
|
|
1811
|
-
"cache": true,
|
|
1812
|
-
"display_name": "Structured Output",
|
|
1813
|
-
"group_outputs": false,
|
|
1814
|
-
"method": "build_structured_dataframe",
|
|
1815
|
-
"name": "dataframe_output",
|
|
1816
|
-
"selected": null,
|
|
1817
|
-
"tool_mode": true,
|
|
1818
|
-
"types": [
|
|
1819
|
-
"DataFrame"
|
|
1820
|
-
],
|
|
1821
|
-
"value": "__UNDEFINED__"
|
|
1822
|
-
}
|
|
1823
|
-
],
|
|
1824
|
-
"pinned": false,
|
|
1825
|
-
"template": {
|
|
1826
|
-
"_type": "Component",
|
|
1827
|
-
"api_key": {
|
|
1828
|
-
"_input_type": "SecretStrInput",
|
|
1829
|
-
"advanced": true,
|
|
1830
|
-
"display_name": "API Key",
|
|
1831
|
-
"dynamic": false,
|
|
1832
|
-
"info": "Model Provider API key",
|
|
1833
|
-
"input_types": [],
|
|
1834
|
-
"load_from_db": true,
|
|
1835
|
-
"name": "api_key",
|
|
1836
|
-
"override_skip": false,
|
|
1837
|
-
"password": true,
|
|
1838
|
-
"placeholder": "",
|
|
1839
|
-
"real_time_refresh": true,
|
|
1840
|
-
"required": false,
|
|
1841
|
-
"show": true,
|
|
1842
|
-
"title_case": false,
|
|
1843
|
-
"track_in_telemetry": false,
|
|
1844
|
-
"type": "str",
|
|
1845
|
-
"value": ""
|
|
1846
|
-
},
|
|
1847
|
-
"code": {
|
|
1848
|
-
"advanced": true,
|
|
1849
|
-
"dynamic": true,
|
|
1850
|
-
"fileTypes": [],
|
|
1851
|
-
"file_path": "",
|
|
1852
|
-
"info": "",
|
|
1853
|
-
"list": false,
|
|
1854
|
-
"load_from_db": false,
|
|
1855
|
-
"multiline": true,
|
|
1856
|
-
"name": "code",
|
|
1857
|
-
"password": false,
|
|
1858
|
-
"placeholder": "",
|
|
1859
|
-
"required": true,
|
|
1860
|
-
"show": true,
|
|
1861
|
-
"title_case": false,
|
|
1862
|
-
"type": "code",
|
|
1863
|
-
"value": "from pydantic import BaseModel, Field, create_model\nfrom trustcall import create_extractor\n\nfrom lfx.base.models.chat_result import get_chat_result\nfrom lfx.base.models.unified_models import (\n get_language_model_options,\n get_llm,\n update_model_options_in_build_config,\n)\nfrom lfx.custom.custom_component.component import Component\nfrom lfx.helpers.base_model import build_model_from_schema\nfrom lfx.io import (\n MessageTextInput,\n ModelInput,\n MultilineInput,\n Output,\n SecretStrInput,\n TableInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.data import Data\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.table import EditMode\n\n\nclass StructuredOutputComponent(Component):\n display_name = \"Structured Output\"\n description = \"Uses an LLM to generate structured data. Ideal for extraction and consistency.\"\n documentation: str = \"https://docs.langflow.org/structured-output\"\n name = \"StructuredOutput\"\n icon = \"braces\"\n\n inputs = [\n ModelInput(\n name=\"model\",\n display_name=\"Language Model\",\n info=\"Select your model provider\",\n real_time_refresh=True,\n required=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"API Key\",\n info=\"Model Provider API key\",\n real_time_refresh=True,\n advanced=True,\n ),\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Message\",\n info=\"The input message to the language model.\",\n tool_mode=True,\n required=True,\n ),\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Format Instructions\",\n info=\"The instructions to the language model for formatting the output.\",\n value=(\n \"You are an AI that extracts structured JSON objects from unstructured text. \"\n \"Use a predefined schema with expected types (str, int, float, bool, dict). \"\n \"Extract ALL relevant instances that match the schema - if multiple patterns exist, capture them all. \"\n \"Fill missing or ambiguous values with defaults: null for missing values. \"\n \"Remove exact duplicates but keep variations that have different field values. \"\n \"Always return valid JSON in the expected format, never throw errors. \"\n \"If multiple objects can be extracted, return them all in the structured format.\"\n ),\n required=True,\n advanced=True,\n ),\n MessageTextInput(\n name=\"schema_name\",\n display_name=\"Schema Name\",\n info=\"Provide a name for the output data schema.\",\n advanced=True,\n ),\n TableInput(\n name=\"output_schema\",\n display_name=\"Output Schema\",\n info=\"Define the structure and data types for the model's output.\",\n required=True,\n # TODO: remove deault value\n table_schema=[\n {\n \"name\": \"name\",\n \"display_name\": \"Name\",\n \"type\": \"str\",\n \"description\": \"Specify the name of the output field.\",\n \"default\": \"field\",\n \"edit_mode\": EditMode.INLINE,\n },\n {\n \"name\": \"description\",\n \"display_name\": \"Description\",\n \"type\": \"str\",\n \"description\": \"Describe the purpose of the output field.\",\n \"default\": \"description of field\",\n \"edit_mode\": EditMode.POPOVER,\n },\n {\n \"name\": \"type\",\n \"display_name\": \"Type\",\n \"type\": \"str\",\n \"edit_mode\": EditMode.INLINE,\n \"description\": (\"Indicate the data type of the output field (e.g., str, int, float, bool, dict).\"),\n \"options\": [\"str\", \"int\", \"float\", \"bool\", \"dict\"],\n \"default\": \"str\",\n },\n {\n \"name\": \"multiple\",\n \"display_name\": \"As List\",\n \"type\": \"boolean\",\n \"description\": \"Set to True if this output field should be a list of the specified type.\",\n \"default\": \"False\",\n \"edit_mode\": EditMode.INLINE,\n },\n ],\n value=[\n {\n \"name\": \"field\",\n \"description\": \"description of field\",\n \"type\": \"str\",\n \"multiple\": \"False\",\n }\n ],\n ),\n ]\n\n outputs = [\n Output(\n name=\"structured_output\",\n display_name=\"Structured Output\",\n method=\"build_structured_output\",\n ),\n Output(\n name=\"dataframe_output\",\n display_name=\"Structured Output\",\n method=\"build_structured_dataframe\",\n ),\n ]\n\n def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None):\n \"\"\"Dynamically update build config with user-filtered model options.\"\"\"\n return update_model_options_in_build_config(\n component=self,\n build_config=build_config,\n cache_key_prefix=\"language_model_options\",\n get_options_func=get_language_model_options,\n field_name=field_name,\n field_value=field_value,\n )\n\n def build_structured_output_base(self):\n schema_name = self.schema_name or \"OutputModel\"\n\n llm = get_llm(model=self.model, user_id=self.user_id, api_key=self.api_key)\n\n if not hasattr(llm, \"with_structured_output\"):\n msg = \"Language model does not support structured output.\"\n raise TypeError(msg)\n if not self.output_schema:\n msg = \"Output schema cannot be empty\"\n raise ValueError(msg)\n\n output_model_ = build_model_from_schema(self.output_schema)\n output_model = create_model(\n schema_name,\n __doc__=f\"A list of {schema_name}.\",\n objects=(\n list[output_model_],\n Field(\n description=f\"A list of {schema_name}.\", # type: ignore[valid-type]\n min_length=1, # help ensure non-empty output\n ),\n ),\n )\n # Tracing config\n config_dict = {\n \"run_name\": self.display_name,\n \"project_name\": self.get_project_name(),\n \"callbacks\": self.get_langchain_callbacks(),\n }\n # Generate structured output using Trustcall first, then fallback to Langchain if it fails\n result = self._extract_output_with_trustcall(llm, output_model, config_dict)\n if result is None:\n result = self._extract_output_with_langchain(llm, output_model, config_dict)\n\n # OPTIMIZATION NOTE: Simplified processing based on trustcall response structure\n # Handle non-dict responses (shouldn't happen with trustcall, but defensive)\n if not isinstance(result, dict):\n return result\n\n # Extract first response and convert BaseModel to dict\n responses = result.get(\"responses\", [])\n if not responses:\n return result\n\n # Convert BaseModel to dict (creates the \"objects\" key)\n first_response = responses[0]\n structured_data = first_response\n if isinstance(first_response, BaseModel):\n structured_data = first_response.model_dump()\n # Extract the objects array (guaranteed to exist due to our Pydantic model structure)\n return structured_data.get(\"objects\", structured_data)\n\n def build_structured_output(self) -> Data:\n output = self.build_structured_output_base()\n if not isinstance(output, list) or not output:\n # handle empty or unexpected type case\n msg = \"No structured output returned\"\n raise ValueError(msg)\n if len(output) == 1:\n return Data(data=output[0])\n if len(output) > 1:\n # Multiple outputs - wrap them in a results container\n return Data(data={\"results\": output})\n return Data()\n\n def build_structured_dataframe(self) -> DataFrame:\n output = self.build_structured_output_base()\n if not isinstance(output, list) or not output:\n # handle empty or unexpected type case\n msg = \"No structured output returned\"\n raise ValueError(msg)\n if len(output) == 1:\n # For single dictionary, wrap in a list to create DataFrame with one row\n return DataFrame([output[0]])\n if len(output) > 1:\n # Multiple outputs - convert to DataFrame directly\n return DataFrame(output)\n return DataFrame()\n\n def _extract_output_with_trustcall(self, llm, schema: BaseModel, config_dict: dict) -> list[BaseModel] | None:\n try:\n llm_with_structured_output = create_extractor(llm, tools=[schema], tool_choice=schema.__name__)\n result = get_chat_result(\n runnable=llm_with_structured_output,\n system_message=self.system_prompt,\n input_value=self.input_value,\n config=config_dict,\n )\n except Exception as e: # noqa: BLE001\n logger.warning(\n f\"Trustcall extraction failed, falling back to Langchain: {e} \"\n \"(Note: This may not be an error—some models or configurations do not support tool calling. \"\n \"Falling back is normal in such cases.)\"\n )\n return None\n return result or None # langchain fallback is used if error occurs or the result is empty\n\n def _extract_output_with_langchain(self, llm, schema: BaseModel, config_dict: dict) -> list[BaseModel] | None:\n try:\n llm_with_structured_output = llm.with_structured_output(schema)\n result = get_chat_result(\n runnable=llm_with_structured_output,\n system_message=self.system_prompt,\n input_value=self.input_value,\n config=config_dict,\n )\n if isinstance(result, BaseModel):\n result = result.model_dump()\n result = result.get(\"objects\", result)\n except Exception as fallback_error:\n msg = (\n f\"Model does not support tool calling (trustcall failed) \"\n f\"and fallback with_structured_output also failed: {fallback_error}\"\n )\n raise ValueError(msg) from fallback_error\n\n return result or None\n"
|
|
1864
|
-
},
|
|
1865
|
-
"input_value": {
|
|
1866
|
-
"_input_type": "MultilineInput",
|
|
1867
|
-
"advanced": false,
|
|
1868
|
-
"copy_field": false,
|
|
1869
|
-
"display_name": "Input Message",
|
|
1870
|
-
"dynamic": false,
|
|
1871
|
-
"info": "The input message to the language model.",
|
|
1872
|
-
"input_types": [
|
|
1873
|
-
"Message"
|
|
1874
|
-
],
|
|
1875
|
-
"list": false,
|
|
1876
|
-
"list_add_label": "Add More",
|
|
1877
|
-
"load_from_db": false,
|
|
1878
|
-
"multiline": true,
|
|
1879
|
-
"name": "input_value",
|
|
1880
|
-
"placeholder": "",
|
|
1881
|
-
"required": true,
|
|
1882
|
-
"show": true,
|
|
1883
|
-
"title_case": false,
|
|
1884
|
-
"tool_mode": true,
|
|
1885
|
-
"trace_as_input": true,
|
|
1886
|
-
"trace_as_metadata": true,
|
|
1887
|
-
"type": "str",
|
|
1888
|
-
"value": ""
|
|
1889
|
-
},
|
|
1890
|
-
"model": {
|
|
1891
|
-
"_input_type": "ModelInput",
|
|
1892
|
-
"advanced": false,
|
|
1893
|
-
"display_name": "Language Model",
|
|
1894
|
-
"dynamic": false,
|
|
1895
|
-
"external_options": {
|
|
1896
|
-
"fields": {
|
|
1897
|
-
"data": {
|
|
1898
|
-
"node": {
|
|
1899
|
-
"display_name": "Connect other models",
|
|
1900
|
-
"icon": "CornerDownLeft",
|
|
1901
|
-
"name": "connect_other_models"
|
|
1902
|
-
}
|
|
1903
|
-
}
|
|
1904
|
-
}
|
|
1905
|
-
},
|
|
1906
|
-
"info": "Select your model provider",
|
|
1907
|
-
"input_types": [
|
|
1908
|
-
"LanguageModel"
|
|
1909
|
-
],
|
|
1910
|
-
"list": false,
|
|
1911
|
-
"list_add_label": "Add More",
|
|
1912
|
-
"model_type": "language",
|
|
1913
|
-
"name": "model",
|
|
1914
|
-
"override_skip": false,
|
|
1915
|
-
"placeholder": "Setup Provider",
|
|
1916
|
-
"real_time_refresh": true,
|
|
1917
|
-
"refresh_button": true,
|
|
1918
|
-
"required": true,
|
|
1919
|
-
"show": true,
|
|
1920
|
-
"title_case": false,
|
|
1921
|
-
"tool_mode": false,
|
|
1922
|
-
"trace_as_input": true,
|
|
1923
|
-
"track_in_telemetry": false,
|
|
1924
|
-
"type": "model",
|
|
1925
|
-
"value": ""
|
|
1926
|
-
},
|
|
1927
|
-
"output_schema": {
|
|
1928
|
-
"_input_type": "TableInput",
|
|
1929
|
-
"advanced": false,
|
|
1930
|
-
"display_name": "Output Schema",
|
|
1931
|
-
"dynamic": false,
|
|
1932
|
-
"info": "Define the structure and data types for the model's output.",
|
|
1933
|
-
"is_list": true,
|
|
1934
|
-
"list_add_label": "Add More",
|
|
1935
|
-
"name": "output_schema",
|
|
1936
|
-
"placeholder": "",
|
|
1937
|
-
"required": true,
|
|
1938
|
-
"show": true,
|
|
1939
|
-
"table_icon": "Table",
|
|
1940
|
-
"table_schema": [
|
|
1941
|
-
{
|
|
1942
|
-
"default": "field",
|
|
1943
|
-
"description": "Specify the name of the output field.",
|
|
1944
|
-
"display_name": "Name",
|
|
1945
|
-
"edit_mode": "inline",
|
|
1946
|
-
"formatter": "text",
|
|
1947
|
-
"name": "name",
|
|
1948
|
-
"type": "str"
|
|
1949
|
-
},
|
|
1950
|
-
{
|
|
1951
|
-
"default": "description of field",
|
|
1952
|
-
"description": "Describe the purpose of the output field.",
|
|
1953
|
-
"display_name": "Description",
|
|
1954
|
-
"edit_mode": "popover",
|
|
1955
|
-
"formatter": "text",
|
|
1956
|
-
"name": "description",
|
|
1957
|
-
"type": "str"
|
|
1958
|
-
},
|
|
1959
|
-
{
|
|
1960
|
-
"default": "str",
|
|
1961
|
-
"description": "Indicate the data type of the output field (e.g., str, int, float, bool, dict).",
|
|
1962
|
-
"display_name": "Type",
|
|
1963
|
-
"edit_mode": "inline",
|
|
1964
|
-
"formatter": "text",
|
|
1965
|
-
"name": "type",
|
|
1966
|
-
"options": [
|
|
1967
|
-
"str",
|
|
1968
|
-
"int",
|
|
1969
|
-
"float",
|
|
1970
|
-
"bool",
|
|
1971
|
-
"dict"
|
|
1972
|
-
],
|
|
1973
|
-
"type": "str"
|
|
1974
|
-
},
|
|
1975
|
-
{
|
|
1976
|
-
"default": "False",
|
|
1977
|
-
"description": "Set to True if this output field should be a list of the specified type.",
|
|
1978
|
-
"display_name": "As List",
|
|
1979
|
-
"edit_mode": "inline",
|
|
1980
|
-
"formatter": "text",
|
|
1981
|
-
"name": "multiple",
|
|
1982
|
-
"type": "boolean"
|
|
1983
|
-
}
|
|
1984
|
-
],
|
|
1985
|
-
"title_case": false,
|
|
1986
|
-
"tool_mode": false,
|
|
1987
|
-
"trace_as_metadata": true,
|
|
1988
|
-
"trigger_icon": "Table",
|
|
1989
|
-
"trigger_text": "Open table",
|
|
1990
|
-
"type": "table",
|
|
1991
|
-
"value": [
|
|
1992
|
-
{
|
|
1993
|
-
"description": "A Positive|Negative value that represents the image.",
|
|
1994
|
-
"multiple": "False",
|
|
1995
|
-
"name": "sentiment",
|
|
1996
|
-
"type": "str"
|
|
1997
|
-
},
|
|
1998
|
-
{
|
|
1999
|
-
"description": "Brief Description of the image",
|
|
2000
|
-
"multiple": "False",
|
|
2001
|
-
"name": "description",
|
|
2002
|
-
"type": "str"
|
|
2003
|
-
}
|
|
2004
|
-
]
|
|
2005
|
-
},
|
|
2006
|
-
"schema_name": {
|
|
2007
|
-
"_input_type": "MessageTextInput",
|
|
2008
|
-
"advanced": true,
|
|
2009
|
-
"display_name": "Schema Name",
|
|
2010
|
-
"dynamic": false,
|
|
2011
|
-
"info": "Provide a name for the output data schema.",
|
|
2012
|
-
"input_types": [
|
|
2013
|
-
"Message"
|
|
2014
|
-
],
|
|
2015
|
-
"list": false,
|
|
2016
|
-
"list_add_label": "Add More",
|
|
2017
|
-
"load_from_db": false,
|
|
2018
|
-
"name": "schema_name",
|
|
2019
|
-
"placeholder": "",
|
|
2020
|
-
"required": false,
|
|
2021
|
-
"show": true,
|
|
2022
|
-
"title_case": false,
|
|
2023
|
-
"tool_mode": false,
|
|
2024
|
-
"trace_as_input": true,
|
|
2025
|
-
"trace_as_metadata": true,
|
|
2026
|
-
"type": "str",
|
|
2027
|
-
"value": ""
|
|
2028
|
-
},
|
|
2029
|
-
"system_prompt": {
|
|
2030
|
-
"_input_type": "MultilineInput",
|
|
2031
|
-
"advanced": true,
|
|
2032
|
-
"copy_field": false,
|
|
2033
|
-
"display_name": "Format Instructions",
|
|
2034
|
-
"dynamic": false,
|
|
2035
|
-
"info": "The instructions to the language model for formatting the output.",
|
|
2036
|
-
"input_types": [
|
|
2037
|
-
"Message"
|
|
2038
|
-
],
|
|
2039
|
-
"list": false,
|
|
2040
|
-
"list_add_label": "Add More",
|
|
2041
|
-
"load_from_db": false,
|
|
2042
|
-
"multiline": true,
|
|
2043
|
-
"name": "system_prompt",
|
|
2044
|
-
"placeholder": "",
|
|
2045
|
-
"required": true,
|
|
2046
|
-
"show": true,
|
|
2047
|
-
"title_case": false,
|
|
2048
|
-
"tool_mode": false,
|
|
2049
|
-
"trace_as_input": true,
|
|
2050
|
-
"trace_as_metadata": true,
|
|
2051
|
-
"type": "str",
|
|
2052
|
-
"value": "You are an AI that extracts structured JSON objects from unstructured text. Use a predefined schema with expected types (str, int, float, bool, dict). Extract ALL relevant instances that match the schema - if multiple patterns exist, capture them all. Fill missing or ambiguous values with defaults: null for missing values. Remove exact duplicates but keep variations that have different field values. Always return valid JSON in the expected format, never throw errors. If multiple objects can be extracted, return them all in the structured format."
|
|
2053
|
-
}
|
|
2054
|
-
},
|
|
2055
|
-
"tool_mode": false
|
|
2056
|
-
},
|
|
2057
|
-
"selected_output": "structured_output",
|
|
2058
|
-
"showNode": true,
|
|
2059
|
-
"type": "StructuredOutput"
|
|
2060
|
-
},
|
|
2061
|
-
"dragging": false,
|
|
2062
|
-
"id": "StructuredOutput-bek9G",
|
|
2063
|
-
"measured": {
|
|
2064
|
-
"height": 349,
|
|
2065
|
-
"width": 320
|
|
2066
|
-
},
|
|
2067
|
-
"position": {
|
|
2068
|
-
"x": 2013.2943902301881,
|
|
2069
|
-
"y": 328.8311104097772
|
|
2070
|
-
},
|
|
2071
|
-
"selected": true,
|
|
2072
|
-
"type": "genericNode"
|
|
2073
2028
|
}
|
|
2074
2029
|
],
|
|
2075
2030
|
"viewport": {
|
|
2076
|
-
"x": -
|
|
2077
|
-
"y":
|
|
2078
|
-
"zoom": 0.
|
|
2031
|
+
"x": -467.72360111043963,
|
|
2032
|
+
"y": 34.34652370932338,
|
|
2033
|
+
"zoom": 0.6104392467426164
|
|
2079
2034
|
}
|
|
2080
2035
|
},
|
|
2081
2036
|
"description": "Analyzes images and categorizes them as positive, negative, or neutral using zero-shot learning.",
|
|
2082
2037
|
"endpoint_name": null,
|
|
2083
|
-
"id": "
|
|
2038
|
+
"id": "c3b4019e-b2fe-4b7e-8df7-c85a47b5188b",
|
|
2084
2039
|
"is_component": false,
|
|
2085
|
-
"last_tested_version": "1.
|
|
2040
|
+
"last_tested_version": "1.7.0",
|
|
2086
2041
|
"name": "Image Sentiment Analysis",
|
|
2087
2042
|
"tags": [
|
|
2088
2043
|
"classification"
|