@n8n/node-cli 0.20.0 → 0.22.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/build.tsbuildinfo +1 -1
- package/dist/commands/new/index.js +30 -1
- package/dist/commands/new/index.js.map +1 -1
- package/dist/commands/new/prompts.d.ts +2 -0
- package/dist/commands/new/prompts.js +46 -2
- package/dist/commands/new/prompts.js.map +1 -1
- package/dist/template/templates/index.d.ts +4 -0
- package/dist/template/templates/index.js +10 -2
- package/dist/template/templates/index.js.map +1 -1
- package/dist/template/templates/index.ts +8 -0
- package/dist/template/templates/programmatic/ai/memory-custom/template/README.md +46 -0
- package/dist/template/templates/programmatic/ai/memory-custom/template/nodes/ExampleChatMemory/ExampleChatMemory.node.json +18 -0
- package/dist/template/templates/programmatic/ai/memory-custom/template/nodes/ExampleChatMemory/ExampleChatMemory.node.ts +84 -0
- package/dist/template/templates/programmatic/ai/memory-custom/template/nodes/ExampleChatMemory/example.dark.svg +13 -0
- package/dist/template/templates/programmatic/ai/memory-custom/template/nodes/ExampleChatMemory/example.svg +13 -0
- package/dist/template/templates/programmatic/ai/memory-custom/template/nodes/ExampleChatMemory/memory.ts +29 -0
- package/dist/template/templates/programmatic/ai/memory-custom/template/package.json +50 -0
- package/dist/template/templates/programmatic/ai/memory-custom/template/tsconfig.json +25 -0
- package/dist/template/templates/programmatic/ai/memory-custom/template.d.ts +1 -0
- package/dist/template/templates/programmatic/ai/memory-custom/template.js +14 -0
- package/dist/template/templates/programmatic/ai/memory-custom/template.js.map +1 -0
- package/dist/template/templates/programmatic/ai/memory-custom/template.ts +9 -0
- package/dist/template/templates/programmatic/ai/model-ai-custom/template/README.md +46 -0
- package/dist/template/templates/programmatic/ai/model-ai-custom/template/credentials/ExampleApi.credentials.ts +54 -0
- package/dist/template/templates/programmatic/ai/model-ai-custom/template/icons/example.dark.svg +13 -0
- package/dist/template/templates/programmatic/ai/model-ai-custom/template/icons/example.svg +13 -0
- package/dist/template/templates/programmatic/ai/model-ai-custom/template/nodes/ExampleChatModel/ExampleChatModel.node.json +18 -0
- package/dist/template/templates/programmatic/ai/model-ai-custom/template/nodes/ExampleChatModel/ExampleChatModel.node.ts +113 -0
- package/dist/template/templates/programmatic/ai/model-ai-custom/template/nodes/ExampleChatModel/model.ts +115 -0
- package/dist/template/templates/programmatic/ai/model-ai-custom/template/package.json +52 -0
- package/dist/template/templates/programmatic/ai/model-ai-custom/template/tsconfig.json +25 -0
- package/dist/template/templates/programmatic/ai/model-ai-custom/template.d.ts +1 -0
- package/dist/template/templates/programmatic/ai/model-ai-custom/template.js +14 -0
- package/dist/template/templates/programmatic/ai/model-ai-custom/template.js.map +1 -0
- package/dist/template/templates/programmatic/ai/model-ai-custom/template.ts +9 -0
- package/dist/template/templates/programmatic/ai/model-ai-custom-example/template/README.md +46 -0
- package/dist/template/templates/programmatic/ai/model-ai-custom-example/template/credentials/ExampleApi.credentials.ts +52 -0
- package/dist/template/templates/programmatic/ai/model-ai-custom-example/template/icons/example.dark.svg +13 -0
- package/dist/template/templates/programmatic/ai/model-ai-custom-example/template/icons/example.svg +13 -0
- package/dist/template/templates/programmatic/ai/model-ai-custom-example/template/nodes/ExampleChatModel/ExampleChatModel.node.json +18 -0
- package/dist/template/templates/programmatic/ai/model-ai-custom-example/template/nodes/ExampleChatModel/ExampleChatModel.node.ts +114 -0
- package/dist/template/templates/programmatic/ai/model-ai-custom-example/template/nodes/ExampleChatModel/common.ts +43 -0
- package/dist/template/templates/programmatic/ai/model-ai-custom-example/template/nodes/ExampleChatModel/model.ts +534 -0
- package/dist/template/templates/programmatic/ai/model-ai-custom-example/template/nodes/ExampleChatModel/properties.ts +130 -0
- package/dist/template/templates/programmatic/ai/model-ai-custom-example/template/package.json +52 -0
- package/dist/template/templates/programmatic/ai/model-ai-custom-example/template/tsconfig.json +25 -0
- package/dist/template/templates/programmatic/ai/model-ai-custom-example/template.d.ts +1 -0
- package/dist/template/templates/programmatic/ai/model-ai-custom-example/template.js +14 -0
- package/dist/template/templates/programmatic/ai/model-ai-custom-example/template.js.map +1 -0
- package/dist/template/templates/programmatic/ai/model-ai-custom-example/template.ts +9 -0
- package/dist/template/templates/programmatic/ai/model-openai-compatible/template/README.md +46 -0
- package/dist/template/templates/programmatic/ai/model-openai-compatible/template/credentials/ExampleApi.credentials.ts +52 -0
- package/dist/template/templates/programmatic/ai/model-openai-compatible/template/icons/example.dark.svg +13 -0
- package/dist/template/templates/programmatic/ai/model-openai-compatible/template/icons/example.svg +13 -0
- package/dist/template/templates/programmatic/ai/model-openai-compatible/template/nodes/ExampleChatModel/ExampleChatModel.node.json +18 -0
- package/dist/template/templates/programmatic/ai/model-openai-compatible/template/nodes/ExampleChatModel/ExampleChatModel.node.ts +84 -0
- package/dist/template/templates/programmatic/ai/model-openai-compatible/template/package.json +52 -0
- package/dist/template/templates/programmatic/ai/model-openai-compatible/template/tsconfig.json +25 -0
- package/dist/template/templates/programmatic/ai/model-openai-compatible/template.d.ts +1 -0
- package/dist/template/templates/programmatic/ai/model-openai-compatible/template.js +14 -0
- package/dist/template/templates/programmatic/ai/model-openai-compatible/template.js.map +1 -0
- package/dist/template/templates/programmatic/ai/model-openai-compatible/template.ts +9 -0
- package/dist/template/templates/programmatic/example/template/nodes/Example/Example.node.ts +1 -1
- package/package.json +7 -6
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
import type { IDataObject } from 'n8n-workflow';
|
|
2
|
+
import type { ProviderTool } from '@n8n/ai-node-sdk';
|
|
3
|
+
|
|
4
|
+
const toArray = (str: string) =>
|
|
5
|
+
str
|
|
6
|
+
.split(',')
|
|
7
|
+
.map((e) => e.trim())
|
|
8
|
+
.filter(Boolean);
|
|
9
|
+
|
|
10
|
+
export const formatBuiltInTools = (builtInTools: IDataObject): ProviderTool[] => {
|
|
11
|
+
const tools: ProviderTool[] = [];
|
|
12
|
+
if (builtInTools) {
|
|
13
|
+
const webSearchOptions = builtInTools.webSearch as IDataObject;
|
|
14
|
+
if (webSearchOptions) {
|
|
15
|
+
let allowedDomains: string[] | undefined;
|
|
16
|
+
const allowedDomainsRaw = webSearchOptions.allowedDomains as string;
|
|
17
|
+
if (allowedDomainsRaw) {
|
|
18
|
+
allowedDomains = toArray(allowedDomainsRaw);
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
let userLocation: IDataObject | undefined;
|
|
22
|
+
if (webSearchOptions.country || webSearchOptions.city || webSearchOptions.region) {
|
|
23
|
+
userLocation = {
|
|
24
|
+
type: 'approximate',
|
|
25
|
+
country: webSearchOptions.country as string,
|
|
26
|
+
city: webSearchOptions.city as string,
|
|
27
|
+
region: webSearchOptions.region as string,
|
|
28
|
+
};
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
tools.push({
|
|
32
|
+
type: 'provider',
|
|
33
|
+
name: 'web_search',
|
|
34
|
+
args: {
|
|
35
|
+
search_context_size: webSearchOptions.searchContextSize as string,
|
|
36
|
+
user_location: userLocation,
|
|
37
|
+
...(allowedDomains && { filters: { allowed_domains: allowedDomains } }),
|
|
38
|
+
},
|
|
39
|
+
});
|
|
40
|
+
}
|
|
41
|
+
}
|
|
42
|
+
return tools;
|
|
43
|
+
};
|
|
@@ -0,0 +1,534 @@
|
|
|
1
|
+
import type { IHttpRequestMethods } from 'n8n-workflow';
|
|
2
|
+
import {
|
|
3
|
+
BaseChatModel,
|
|
4
|
+
getParametersJsonSchema,
|
|
5
|
+
parseSSEStream,
|
|
6
|
+
type TokenUsage,
|
|
7
|
+
type Tool,
|
|
8
|
+
type ToolCall,
|
|
9
|
+
type ChatModelConfig,
|
|
10
|
+
type GenerateResult,
|
|
11
|
+
type Message,
|
|
12
|
+
type MessageContent,
|
|
13
|
+
type ProviderTool,
|
|
14
|
+
type StreamChunk,
|
|
15
|
+
} from '@n8n/ai-node-sdk';
|
|
16
|
+
|
|
17
|
+
// Types
|
|
18
|
+
type OpenAITool =
|
|
19
|
+
| {
|
|
20
|
+
type: 'function';
|
|
21
|
+
name: string;
|
|
22
|
+
description?: string;
|
|
23
|
+
parameters: unknown;
|
|
24
|
+
strict?: boolean;
|
|
25
|
+
}
|
|
26
|
+
| {
|
|
27
|
+
type: 'web_search';
|
|
28
|
+
};
|
|
29
|
+
|
|
30
|
+
type OpenAIToolChoice = 'auto' | 'required' | 'none' | { type: 'function'; name: string };
|
|
31
|
+
|
|
32
|
+
type ResponsesInputItem =
|
|
33
|
+
| { role: 'user'; content: string }
|
|
34
|
+
| { role: 'user'; content: Array<{ type: 'input_text'; text: string }> }
|
|
35
|
+
| {
|
|
36
|
+
type: 'message';
|
|
37
|
+
role: 'assistant';
|
|
38
|
+
content: Array<{ type: 'output_text'; text: string }>;
|
|
39
|
+
}
|
|
40
|
+
| {
|
|
41
|
+
type: 'function_call';
|
|
42
|
+
call_id: string;
|
|
43
|
+
name: string;
|
|
44
|
+
arguments: string;
|
|
45
|
+
}
|
|
46
|
+
| { type: 'function_call_output'; call_id: string; output: string };
|
|
47
|
+
|
|
48
|
+
interface OpenAIResponsesRequest {
|
|
49
|
+
model: string;
|
|
50
|
+
input: string | ResponsesInputItem[];
|
|
51
|
+
instructions?: string;
|
|
52
|
+
max_output_tokens?: number;
|
|
53
|
+
temperature?: number;
|
|
54
|
+
top_p?: number;
|
|
55
|
+
tools?: OpenAITool[];
|
|
56
|
+
tool_choice?: OpenAIToolChoice;
|
|
57
|
+
parallel_tool_calls?: boolean;
|
|
58
|
+
store?: boolean;
|
|
59
|
+
stream?: boolean;
|
|
60
|
+
metadata?: Record<string, unknown>;
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
interface OpenAIResponsesResponse {
|
|
64
|
+
id: string;
|
|
65
|
+
object: string;
|
|
66
|
+
created_at: string;
|
|
67
|
+
model: string;
|
|
68
|
+
output: ResponsesOutputItem[];
|
|
69
|
+
status: string;
|
|
70
|
+
usage?: {
|
|
71
|
+
input_tokens: number;
|
|
72
|
+
output_tokens: number;
|
|
73
|
+
total_tokens: number;
|
|
74
|
+
input_tokens_details?: {
|
|
75
|
+
cached_tokens?: number;
|
|
76
|
+
};
|
|
77
|
+
output_tokens_details?: {
|
|
78
|
+
reasoning_tokens?: number;
|
|
79
|
+
};
|
|
80
|
+
};
|
|
81
|
+
incomplete_details?: Record<string, unknown>;
|
|
82
|
+
metadata?: Record<string, unknown>;
|
|
83
|
+
user?: string;
|
|
84
|
+
service_tier?: string;
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
type ResponsesOutputItem =
|
|
88
|
+
| {
|
|
89
|
+
type: 'message';
|
|
90
|
+
role: 'assistant';
|
|
91
|
+
id?: string;
|
|
92
|
+
content: Array<{
|
|
93
|
+
type: 'output_text';
|
|
94
|
+
text: string;
|
|
95
|
+
}>;
|
|
96
|
+
}
|
|
97
|
+
| {
|
|
98
|
+
type: 'function_call';
|
|
99
|
+
id?: string;
|
|
100
|
+
call_id: string;
|
|
101
|
+
name: string;
|
|
102
|
+
arguments: string;
|
|
103
|
+
}
|
|
104
|
+
| {
|
|
105
|
+
type: 'reasoning';
|
|
106
|
+
id?: string;
|
|
107
|
+
summary: Array<{
|
|
108
|
+
type: string;
|
|
109
|
+
text: string;
|
|
110
|
+
}>;
|
|
111
|
+
};
|
|
112
|
+
|
|
113
|
+
interface OpenAIStreamEvent {
|
|
114
|
+
type: string;
|
|
115
|
+
delta?: string;
|
|
116
|
+
output_index?: number;
|
|
117
|
+
item?: Record<string, unknown>;
|
|
118
|
+
response?: Record<string, unknown>;
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
// Helpers
|
|
122
|
+
|
|
123
|
+
async function* parseOpenAIStreamEvents(
|
|
124
|
+
body: AsyncIterableIterator<Buffer | Uint8Array>,
|
|
125
|
+
): AsyncIterable<OpenAIStreamEvent> {
|
|
126
|
+
for await (const message of parseSSEStream(body)) {
|
|
127
|
+
if (!message.data) continue;
|
|
128
|
+
if (message.data === '[DONE]') continue;
|
|
129
|
+
|
|
130
|
+
try {
|
|
131
|
+
const event = JSON.parse(message.data);
|
|
132
|
+
yield event as OpenAIStreamEvent;
|
|
133
|
+
// eslint-disable-next-line @typescript-eslint/no-unused-vars
|
|
134
|
+
} catch (e) {
|
|
135
|
+
// ignore error
|
|
136
|
+
}
|
|
137
|
+
}
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
function genericMessagesToResponsesInput(messages: Message[]): {
|
|
141
|
+
instructions?: string;
|
|
142
|
+
input: string | ResponsesInputItem[];
|
|
143
|
+
} {
|
|
144
|
+
const instructionsParts: string[] = [];
|
|
145
|
+
const inputItems: ResponsesInputItem[] = [];
|
|
146
|
+
|
|
147
|
+
for (const msg of messages) {
|
|
148
|
+
if (msg.role === 'system') {
|
|
149
|
+
for (const contentPart of msg.content) {
|
|
150
|
+
if (contentPart.type === 'text') {
|
|
151
|
+
instructionsParts.push(contentPart.text);
|
|
152
|
+
}
|
|
153
|
+
}
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
if (msg.role === 'user') {
|
|
157
|
+
for (const contentPart of msg.content) {
|
|
158
|
+
if (contentPart.type === 'text') {
|
|
159
|
+
inputItems.push({
|
|
160
|
+
role: 'user',
|
|
161
|
+
content: contentPart.text,
|
|
162
|
+
});
|
|
163
|
+
}
|
|
164
|
+
}
|
|
165
|
+
continue;
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
if (msg.role === 'assistant') {
|
|
169
|
+
for (const contentPart of msg.content) {
|
|
170
|
+
if (contentPart.type === 'text') {
|
|
171
|
+
inputItems.push({
|
|
172
|
+
type: 'message',
|
|
173
|
+
role: 'assistant',
|
|
174
|
+
content: [
|
|
175
|
+
{
|
|
176
|
+
type: 'output_text',
|
|
177
|
+
text: contentPart.text,
|
|
178
|
+
},
|
|
179
|
+
],
|
|
180
|
+
});
|
|
181
|
+
} else if (contentPart.type === 'tool-call') {
|
|
182
|
+
if (!contentPart.toolCallId) {
|
|
183
|
+
throw new Error('Tool call ID is required');
|
|
184
|
+
}
|
|
185
|
+
inputItems.push({
|
|
186
|
+
type: 'function_call',
|
|
187
|
+
call_id: contentPart.toolCallId,
|
|
188
|
+
name: contentPart.toolName,
|
|
189
|
+
arguments: contentPart.input,
|
|
190
|
+
});
|
|
191
|
+
} else if (contentPart.type === 'reasoning') {
|
|
192
|
+
inputItems.push({
|
|
193
|
+
type: 'message',
|
|
194
|
+
role: 'assistant',
|
|
195
|
+
content: [
|
|
196
|
+
{
|
|
197
|
+
type: 'output_text',
|
|
198
|
+
text: contentPart.text,
|
|
199
|
+
},
|
|
200
|
+
],
|
|
201
|
+
});
|
|
202
|
+
}
|
|
203
|
+
}
|
|
204
|
+
}
|
|
205
|
+
|
|
206
|
+
if (msg.role === 'tool') {
|
|
207
|
+
for (const contentPart of msg.content) {
|
|
208
|
+
if (contentPart.type === 'tool-result') {
|
|
209
|
+
const output =
|
|
210
|
+
typeof contentPart.result === 'string'
|
|
211
|
+
? contentPart.result
|
|
212
|
+
: JSON.stringify(contentPart.result);
|
|
213
|
+
inputItems.push({
|
|
214
|
+
type: 'function_call_output',
|
|
215
|
+
call_id: contentPart.toolCallId,
|
|
216
|
+
output,
|
|
217
|
+
});
|
|
218
|
+
}
|
|
219
|
+
}
|
|
220
|
+
}
|
|
221
|
+
}
|
|
222
|
+
|
|
223
|
+
const instructions = instructionsParts.length > 0 ? instructionsParts.join('\n\n') : undefined;
|
|
224
|
+
|
|
225
|
+
const single = inputItems[0];
|
|
226
|
+
if (
|
|
227
|
+
inputItems.length === 1 &&
|
|
228
|
+
single &&
|
|
229
|
+
'role' in single &&
|
|
230
|
+
single.role === 'user' &&
|
|
231
|
+
typeof single.content === 'string'
|
|
232
|
+
) {
|
|
233
|
+
return { instructions, input: single.content };
|
|
234
|
+
}
|
|
235
|
+
return { instructions, input: inputItems };
|
|
236
|
+
}
|
|
237
|
+
|
|
238
|
+
function genericToolToResponsesTool(tool: Tool): OpenAITool {
|
|
239
|
+
if (tool.type === 'provider') {
|
|
240
|
+
if (tool.name === 'web_search') {
|
|
241
|
+
return {
|
|
242
|
+
type: 'web_search',
|
|
243
|
+
...tool.args,
|
|
244
|
+
};
|
|
245
|
+
}
|
|
246
|
+
throw new Error(`Unsupported provider tool: ${tool.name}`);
|
|
247
|
+
}
|
|
248
|
+
const parameters = getParametersJsonSchema(tool);
|
|
249
|
+
return {
|
|
250
|
+
type: 'function',
|
|
251
|
+
name: tool.name,
|
|
252
|
+
description: tool.description,
|
|
253
|
+
parameters,
|
|
254
|
+
strict: tool.strict,
|
|
255
|
+
};
|
|
256
|
+
}
|
|
257
|
+
|
|
258
|
+
function parseResponsesOutput(output: ResponsesOutputItem[]): {
|
|
259
|
+
text: string;
|
|
260
|
+
toolCalls: ToolCall[];
|
|
261
|
+
} {
|
|
262
|
+
let text = '';
|
|
263
|
+
const toolCalls: ToolCall[] = [];
|
|
264
|
+
|
|
265
|
+
for (const item of output) {
|
|
266
|
+
if (item.type === 'message' && item.role === 'assistant') {
|
|
267
|
+
for (const block of item.content) {
|
|
268
|
+
if (block.type === 'output_text') {
|
|
269
|
+
text += block.text;
|
|
270
|
+
}
|
|
271
|
+
}
|
|
272
|
+
}
|
|
273
|
+
if (item.type === 'function_call') {
|
|
274
|
+
try {
|
|
275
|
+
toolCalls.push({
|
|
276
|
+
id: item.call_id,
|
|
277
|
+
name: item.name,
|
|
278
|
+
arguments: JSON.parse(item.arguments) as Record<string, unknown>,
|
|
279
|
+
argumentsRaw: item.arguments,
|
|
280
|
+
});
|
|
281
|
+
// eslint-disable-next-line @typescript-eslint/no-unused-vars
|
|
282
|
+
} catch (e) {
|
|
283
|
+
throw new Error(`Failed to parse function call arguments: ${item.arguments}`);
|
|
284
|
+
}
|
|
285
|
+
}
|
|
286
|
+
}
|
|
287
|
+
|
|
288
|
+
return { text, toolCalls };
|
|
289
|
+
}
|
|
290
|
+
|
|
291
|
+
function parseTokenUsage(
|
|
292
|
+
usage: OpenAIResponsesResponse['usage'] | undefined,
|
|
293
|
+
): TokenUsage | undefined {
|
|
294
|
+
return usage
|
|
295
|
+
? {
|
|
296
|
+
promptTokens: usage.input_tokens ?? 0,
|
|
297
|
+
completionTokens: usage.output_tokens ?? 0,
|
|
298
|
+
totalTokens: usage.total_tokens ?? 0,
|
|
299
|
+
inputTokenDetails: {
|
|
300
|
+
...(!!usage.input_tokens_details?.cached_tokens && {
|
|
301
|
+
cacheRead: usage.input_tokens_details.cached_tokens,
|
|
302
|
+
}),
|
|
303
|
+
},
|
|
304
|
+
outputTokenDetails: {
|
|
305
|
+
...(!!usage.output_tokens_details?.reasoning_tokens && {
|
|
306
|
+
reasoning: usage.output_tokens_details.reasoning_tokens,
|
|
307
|
+
}),
|
|
308
|
+
},
|
|
309
|
+
}
|
|
310
|
+
: undefined;
|
|
311
|
+
}
|
|
312
|
+
|
|
313
|
+
interface OpenAIChatModelConfig extends ChatModelConfig {
|
|
314
|
+
apiKey?: string;
|
|
315
|
+
baseURL?: string;
|
|
316
|
+
|
|
317
|
+
providerTools?: ProviderTool[];
|
|
318
|
+
}
|
|
319
|
+
|
|
320
|
+
interface RequestConfig {
|
|
321
|
+
httpRequest: (
|
|
322
|
+
method: IHttpRequestMethods,
|
|
323
|
+
url: string,
|
|
324
|
+
body?: object,
|
|
325
|
+
headers?: Record<string, string>,
|
|
326
|
+
) => Promise<{ body: unknown }>;
|
|
327
|
+
openStream: (
|
|
328
|
+
method: IHttpRequestMethods,
|
|
329
|
+
url: string,
|
|
330
|
+
body?: object,
|
|
331
|
+
headers?: Record<string, string>,
|
|
332
|
+
) => Promise<{ body: AsyncIterableIterator<Buffer | Uint8Array> }>;
|
|
333
|
+
}
|
|
334
|
+
|
|
335
|
+
export class OpenAIChatModel extends BaseChatModel<OpenAIChatModelConfig> {
|
|
336
|
+
private baseURL: string;
|
|
337
|
+
|
|
338
|
+
constructor(
|
|
339
|
+
modelId: string = 'gpt-4o',
|
|
340
|
+
private requests: RequestConfig,
|
|
341
|
+
config?: OpenAIChatModelConfig,
|
|
342
|
+
) {
|
|
343
|
+
super('openai', modelId, config);
|
|
344
|
+
this.baseURL = config?.baseURL ?? 'https://api.openai.com/v1';
|
|
345
|
+
}
|
|
346
|
+
|
|
347
|
+
private getTools(config?: OpenAIChatModelConfig) {
|
|
348
|
+
const ownTools = this.tools;
|
|
349
|
+
const providerTools = config?.providerTools ?? this.defaultConfig?.providerTools ?? [];
|
|
350
|
+
return [...ownTools, ...providerTools].map(genericToolToResponsesTool);
|
|
351
|
+
}
|
|
352
|
+
|
|
353
|
+
async generate(messages: Message[], config?: OpenAIChatModelConfig): Promise<GenerateResult> {
|
|
354
|
+
const merged = this.mergeConfig(config);
|
|
355
|
+
const { instructions, input } = genericMessagesToResponsesInput(messages);
|
|
356
|
+
const tools = this.getTools(config);
|
|
357
|
+
const requestBody: OpenAIResponsesRequest = {
|
|
358
|
+
model: this.modelId,
|
|
359
|
+
input,
|
|
360
|
+
instructions,
|
|
361
|
+
max_output_tokens: merged.maxTokens,
|
|
362
|
+
temperature: merged.temperature,
|
|
363
|
+
top_p: merged.topP,
|
|
364
|
+
tools,
|
|
365
|
+
parallel_tool_calls: true,
|
|
366
|
+
store: false,
|
|
367
|
+
stream: false,
|
|
368
|
+
};
|
|
369
|
+
|
|
370
|
+
const response = await this.requests.httpRequest(
|
|
371
|
+
'POST',
|
|
372
|
+
`${this.baseURL}/responses`,
|
|
373
|
+
requestBody,
|
|
374
|
+
);
|
|
375
|
+
const body = response.body as OpenAIResponsesResponse;
|
|
376
|
+
|
|
377
|
+
const { text, toolCalls } = parseResponsesOutput(body.output);
|
|
378
|
+
|
|
379
|
+
const usage = parseTokenUsage(body.usage);
|
|
380
|
+
|
|
381
|
+
const responseMetadata: Record<string, unknown> = {
|
|
382
|
+
model_provider: 'openai',
|
|
383
|
+
model: body.model,
|
|
384
|
+
created_at: body.created_at,
|
|
385
|
+
id: body.id,
|
|
386
|
+
incomplete_details: body.incomplete_details,
|
|
387
|
+
metadata: body.metadata,
|
|
388
|
+
object: body.object,
|
|
389
|
+
status: body.status,
|
|
390
|
+
user: body.user,
|
|
391
|
+
service_tier: body.service_tier,
|
|
392
|
+
model_name: body.model,
|
|
393
|
+
output: body.output,
|
|
394
|
+
};
|
|
395
|
+
|
|
396
|
+
for (const item of body.output as unknown[]) {
|
|
397
|
+
const o = item as Record<string, unknown>;
|
|
398
|
+
if (o.type === 'reasoning') {
|
|
399
|
+
responseMetadata.reasoning = o;
|
|
400
|
+
}
|
|
401
|
+
}
|
|
402
|
+
|
|
403
|
+
const content: MessageContent[] = [];
|
|
404
|
+
if (toolCalls.length) {
|
|
405
|
+
for (const toolCall of toolCalls) {
|
|
406
|
+
content.push({
|
|
407
|
+
type: 'tool-call',
|
|
408
|
+
toolCallId: toolCall.id,
|
|
409
|
+
toolName: toolCall.name,
|
|
410
|
+
input: JSON.stringify(toolCall.arguments),
|
|
411
|
+
});
|
|
412
|
+
}
|
|
413
|
+
}
|
|
414
|
+
content.push({ type: 'text', text });
|
|
415
|
+
|
|
416
|
+
const message: Message = {
|
|
417
|
+
role: 'assistant',
|
|
418
|
+
content,
|
|
419
|
+
id: body.id,
|
|
420
|
+
};
|
|
421
|
+
|
|
422
|
+
return {
|
|
423
|
+
id: body.id,
|
|
424
|
+
finishReason: body.status === 'completed' ? 'stop' : 'other',
|
|
425
|
+
usage,
|
|
426
|
+
message,
|
|
427
|
+
rawResponse: body,
|
|
428
|
+
providerMetadata: responseMetadata,
|
|
429
|
+
};
|
|
430
|
+
}
|
|
431
|
+
|
|
432
|
+
async *stream(messages: Message[], config?: OpenAIChatModelConfig): AsyncIterable<StreamChunk> {
|
|
433
|
+
const merged = this.mergeConfig(config) as OpenAIChatModelConfig;
|
|
434
|
+
const { instructions, input } = genericMessagesToResponsesInput(messages);
|
|
435
|
+
|
|
436
|
+
const tools = this.getTools(config);
|
|
437
|
+
|
|
438
|
+
const requestBody: OpenAIResponsesRequest = {
|
|
439
|
+
model: this.modelId,
|
|
440
|
+
input,
|
|
441
|
+
instructions,
|
|
442
|
+
max_output_tokens: merged.maxTokens,
|
|
443
|
+
temperature: merged.temperature,
|
|
444
|
+
top_p: merged.topP,
|
|
445
|
+
tools,
|
|
446
|
+
parallel_tool_calls: true,
|
|
447
|
+
store: false,
|
|
448
|
+
stream: true,
|
|
449
|
+
};
|
|
450
|
+
|
|
451
|
+
const streamResponse = await this.requests.openStream(
|
|
452
|
+
'POST',
|
|
453
|
+
`${this.baseURL}/responses`,
|
|
454
|
+
requestBody,
|
|
455
|
+
);
|
|
456
|
+
const streamBody = streamResponse.body;
|
|
457
|
+
|
|
458
|
+
const toolCallBuffers: Record<number, { name: string; arguments: string }> = {};
|
|
459
|
+
|
|
460
|
+
for await (const event of parseOpenAIStreamEvents(streamBody)) {
|
|
461
|
+
const type = event.type;
|
|
462
|
+
|
|
463
|
+
if (type === 'response.output_text.delta') {
|
|
464
|
+
const delta = event.delta;
|
|
465
|
+
if (delta) {
|
|
466
|
+
yield { type: 'text-delta', delta };
|
|
467
|
+
}
|
|
468
|
+
}
|
|
469
|
+
|
|
470
|
+
if (type === 'response.output_item.added') {
|
|
471
|
+
const item = event.item;
|
|
472
|
+
if (item?.type === 'function_call') {
|
|
473
|
+
const idx = event.output_index ?? 0;
|
|
474
|
+
toolCallBuffers[idx] = {
|
|
475
|
+
name: (item.name as string) ?? '',
|
|
476
|
+
arguments: (item.arguments as string) ?? '',
|
|
477
|
+
};
|
|
478
|
+
}
|
|
479
|
+
if (item?.type === 'reasoning') {
|
|
480
|
+
const summary = (item.summary as Array<Record<string, unknown>>) ?? [];
|
|
481
|
+
const reasoningText = summary
|
|
482
|
+
.map((s) => s.text)
|
|
483
|
+
.filter(Boolean)
|
|
484
|
+
.join('');
|
|
485
|
+
if (reasoningText) {
|
|
486
|
+
yield { type: 'reasoning-delta', delta: reasoningText };
|
|
487
|
+
}
|
|
488
|
+
}
|
|
489
|
+
}
|
|
490
|
+
|
|
491
|
+
if (type === 'response.reasoning_summary_text.delta') {
|
|
492
|
+
const delta = event.delta;
|
|
493
|
+
if (delta) {
|
|
494
|
+
yield { type: 'reasoning-delta', delta };
|
|
495
|
+
}
|
|
496
|
+
}
|
|
497
|
+
|
|
498
|
+
if (type === 'response.function_call_arguments.delta') {
|
|
499
|
+
const idx = event.output_index ?? 0;
|
|
500
|
+
const delta = event.delta;
|
|
501
|
+
if (toolCallBuffers[idx] && delta) {
|
|
502
|
+
toolCallBuffers[idx].arguments += delta;
|
|
503
|
+
}
|
|
504
|
+
}
|
|
505
|
+
|
|
506
|
+
if (type === 'response.output_item.done') {
|
|
507
|
+
const item = event.item;
|
|
508
|
+
if (item?.type === 'function_call') {
|
|
509
|
+
const idx = event.output_index ?? 0;
|
|
510
|
+
const buf = toolCallBuffers[idx];
|
|
511
|
+
if (buf) {
|
|
512
|
+
yield {
|
|
513
|
+
type: 'tool-call-delta',
|
|
514
|
+
id: (item.call_id as string) ?? (item.id as string),
|
|
515
|
+
name: buf.name,
|
|
516
|
+
argumentsDelta: buf.arguments,
|
|
517
|
+
};
|
|
518
|
+
}
|
|
519
|
+
}
|
|
520
|
+
}
|
|
521
|
+
|
|
522
|
+
if (type === 'response.done' || type === 'response.completed') {
|
|
523
|
+
const responseData =
|
|
524
|
+
(event.response as unknown as OpenAIResponsesResponse) ??
|
|
525
|
+
(event as unknown as OpenAIResponsesResponse);
|
|
526
|
+
yield {
|
|
527
|
+
type: 'finish',
|
|
528
|
+
finishReason: 'stop',
|
|
529
|
+
usage: parseTokenUsage(responseData.usage),
|
|
530
|
+
};
|
|
531
|
+
}
|
|
532
|
+
}
|
|
533
|
+
}
|
|
534
|
+
}
|
|
@@ -0,0 +1,130 @@
|
|
|
1
|
+
import type { INodeProperties } from 'n8n-workflow';
|
|
2
|
+
|
|
3
|
+
export const openAiProperties: INodeProperties[] = [
|
|
4
|
+
{
|
|
5
|
+
displayName: 'Model',
|
|
6
|
+
name: 'model',
|
|
7
|
+
type: 'options',
|
|
8
|
+
description:
|
|
9
|
+
'The model which will generate the completion. <a href="https://beta.openai.com/docs/models/overview">Learn more</a>.',
|
|
10
|
+
typeOptions: {
|
|
11
|
+
loadOptions: {
|
|
12
|
+
routing: {
|
|
13
|
+
request: {
|
|
14
|
+
method: 'GET',
|
|
15
|
+
url: '={{ $parameter.options?.baseURL?.split("/").slice(-1).pop() || $credentials?.url?.split("/").slice(-1).pop() || "v1" }}/models',
|
|
16
|
+
},
|
|
17
|
+
output: {
|
|
18
|
+
postReceive: [
|
|
19
|
+
{
|
|
20
|
+
type: 'rootProperty',
|
|
21
|
+
properties: {
|
|
22
|
+
property: 'data',
|
|
23
|
+
},
|
|
24
|
+
},
|
|
25
|
+
{
|
|
26
|
+
type: 'setKeyValue',
|
|
27
|
+
properties: {
|
|
28
|
+
name: '={{$responseItem.id}}',
|
|
29
|
+
value: '={{$responseItem.id}}',
|
|
30
|
+
},
|
|
31
|
+
},
|
|
32
|
+
{
|
|
33
|
+
type: 'sort',
|
|
34
|
+
properties: {
|
|
35
|
+
key: 'name',
|
|
36
|
+
},
|
|
37
|
+
},
|
|
38
|
+
],
|
|
39
|
+
},
|
|
40
|
+
},
|
|
41
|
+
},
|
|
42
|
+
},
|
|
43
|
+
routing: {
|
|
44
|
+
send: {
|
|
45
|
+
type: 'body',
|
|
46
|
+
property: 'model',
|
|
47
|
+
},
|
|
48
|
+
},
|
|
49
|
+
default: 'gpt-5-mini',
|
|
50
|
+
},
|
|
51
|
+
{
|
|
52
|
+
displayName: 'Built-in Tools',
|
|
53
|
+
name: 'builtInTools',
|
|
54
|
+
placeholder: 'Add Built-in Tool',
|
|
55
|
+
type: 'collection',
|
|
56
|
+
default: {},
|
|
57
|
+
options: [
|
|
58
|
+
{
|
|
59
|
+
displayName: 'Web Search',
|
|
60
|
+
name: 'webSearch',
|
|
61
|
+
type: 'collection',
|
|
62
|
+
default: { searchContextSize: 'medium' },
|
|
63
|
+
options: [
|
|
64
|
+
{
|
|
65
|
+
displayName: 'Search Context Size',
|
|
66
|
+
name: 'searchContextSize',
|
|
67
|
+
type: 'options',
|
|
68
|
+
default: 'medium',
|
|
69
|
+
description:
|
|
70
|
+
'High level guidance for the amount of context window space to use for the search',
|
|
71
|
+
options: [
|
|
72
|
+
{ name: 'Low', value: 'low' },
|
|
73
|
+
{ name: 'Medium', value: 'medium' },
|
|
74
|
+
{ name: 'High', value: 'high' },
|
|
75
|
+
],
|
|
76
|
+
},
|
|
77
|
+
{
|
|
78
|
+
displayName: 'Web Search Allowed Domains',
|
|
79
|
+
name: 'allowedDomains',
|
|
80
|
+
type: 'string',
|
|
81
|
+
default: '',
|
|
82
|
+
description:
|
|
83
|
+
'Comma-separated list of domains to search. Only domains in this list will be searched.',
|
|
84
|
+
placeholder: 'e.g. google.com, wikipedia.org',
|
|
85
|
+
},
|
|
86
|
+
{
|
|
87
|
+
displayName: 'Country',
|
|
88
|
+
name: 'country',
|
|
89
|
+
type: 'string',
|
|
90
|
+
default: '',
|
|
91
|
+
placeholder: 'e.g. US, GB',
|
|
92
|
+
},
|
|
93
|
+
{
|
|
94
|
+
displayName: 'City',
|
|
95
|
+
name: 'city',
|
|
96
|
+
type: 'string',
|
|
97
|
+
default: '',
|
|
98
|
+
placeholder: 'e.g. New York, London',
|
|
99
|
+
},
|
|
100
|
+
{
|
|
101
|
+
displayName: 'Region',
|
|
102
|
+
name: 'region',
|
|
103
|
+
type: 'string',
|
|
104
|
+
default: '',
|
|
105
|
+
placeholder: 'e.g. New York, London',
|
|
106
|
+
},
|
|
107
|
+
],
|
|
108
|
+
},
|
|
109
|
+
],
|
|
110
|
+
},
|
|
111
|
+
{
|
|
112
|
+
displayName: 'Options',
|
|
113
|
+
name: 'options',
|
|
114
|
+
placeholder: 'Add Option',
|
|
115
|
+
description: 'Additional options to add',
|
|
116
|
+
type: 'collection',
|
|
117
|
+
default: {},
|
|
118
|
+
options: [
|
|
119
|
+
{
|
|
120
|
+
displayName: 'Sampling Temperature',
|
|
121
|
+
name: 'temperature',
|
|
122
|
+
default: 0.7,
|
|
123
|
+
typeOptions: { maxValue: 2, minValue: 0, numberPrecision: 1 },
|
|
124
|
+
description:
|
|
125
|
+
'Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.',
|
|
126
|
+
type: 'number',
|
|
127
|
+
},
|
|
128
|
+
],
|
|
129
|
+
},
|
|
130
|
+
];
|