n8n-nodes-openai-compatible-wwrs 0.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +379 -0
- package/credentials/OpenAICompatibleApi.credentials.ts +148 -0
- package/dist/credentials/OpenAICompatibleApi.credentials.d.ts +86 -0
- package/dist/credentials/OpenAICompatibleApi.credentials.js +145 -0
- package/dist/credentials/OpenAICompatibleApi.credentials.ts +148 -0
- package/dist/index.d.ts +2 -0
- package/dist/index.js +7 -0
- package/dist/nodes/OpenAICompatible/NodeDescription.js +377 -0
- package/dist/nodes/OpenAICompatible/OpenAICompatible.node.js +406 -0
- package/dist/nodes/OpenAICompatible/openai-compatible.svg +87 -0
- package/dist/nodes/types.d.ts +79 -0
- package/dist/nodes/types.js +6 -0
- package/dist/nodes/types.ts +90 -0
- package/nodes/OpenAICompatible/NodeDescription.js +377 -0
- package/nodes/OpenAICompatible/OpenAICompatible.node.js +406 -0
- package/nodes/OpenAICompatible/openai-compatible.svg +87 -0
- package/nodes/types.ts +90 -0
- package/package.json +65 -0
|
@@ -0,0 +1,377 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Node Description for OpenAI Compatible Node
|
|
3
|
+
* Defines all UI fields and operations
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
const resourceOperations = [
|
|
7
|
+
{
|
|
8
|
+
displayName: 'Resource',
|
|
9
|
+
name: 'resource',
|
|
10
|
+
type: 'options',
|
|
11
|
+
noDataExpression: true,
|
|
12
|
+
options: [
|
|
13
|
+
{
|
|
14
|
+
name: 'Language Model',
|
|
15
|
+
value: 'languageModel',
|
|
16
|
+
},
|
|
17
|
+
{
|
|
18
|
+
name: 'Message',
|
|
19
|
+
value: 'message',
|
|
20
|
+
},
|
|
21
|
+
],
|
|
22
|
+
default: 'languageModel',
|
|
23
|
+
},
|
|
24
|
+
];
|
|
25
|
+
|
|
26
|
+
const languageModelOperations = [
|
|
27
|
+
{
|
|
28
|
+
displayName: 'Mode',
|
|
29
|
+
name: 'operation',
|
|
30
|
+
type: 'options',
|
|
31
|
+
noDataExpression: true,
|
|
32
|
+
displayOptions: {
|
|
33
|
+
show: {
|
|
34
|
+
resource: ['languageModel'],
|
|
35
|
+
},
|
|
36
|
+
},
|
|
37
|
+
options: [
|
|
38
|
+
{
|
|
39
|
+
name: 'Chat Model',
|
|
40
|
+
value: 'chatModel',
|
|
41
|
+
description: 'Use as a chat model in AI Agent (LangChain compatible)',
|
|
42
|
+
action: 'Chat Model',
|
|
43
|
+
},
|
|
44
|
+
],
|
|
45
|
+
default: 'chatModel',
|
|
46
|
+
},
|
|
47
|
+
];
|
|
48
|
+
|
|
49
|
+
const messageOperations = [
|
|
50
|
+
{
|
|
51
|
+
displayName: 'Operation',
|
|
52
|
+
name: 'operation',
|
|
53
|
+
type: 'options',
|
|
54
|
+
noDataExpression: true,
|
|
55
|
+
displayOptions: {
|
|
56
|
+
show: {
|
|
57
|
+
resource: ['message'],
|
|
58
|
+
},
|
|
59
|
+
},
|
|
60
|
+
options: [
|
|
61
|
+
{
|
|
62
|
+
name: 'Create',
|
|
63
|
+
value: 'create',
|
|
64
|
+
description: 'Create a chat completion',
|
|
65
|
+
action: 'Create Message',
|
|
66
|
+
},
|
|
67
|
+
],
|
|
68
|
+
default: 'create',
|
|
69
|
+
},
|
|
70
|
+
];
|
|
71
|
+
|
|
72
|
+
const languageModelFields = [
|
|
73
|
+
{
|
|
74
|
+
displayName: 'Model Name',
|
|
75
|
+
name: 'modelName',
|
|
76
|
+
type: 'string',
|
|
77
|
+
description: 'The model to use (e.g., gpt-4, deepseek-chat, llama2)',
|
|
78
|
+
displayOptions: {
|
|
79
|
+
show: {
|
|
80
|
+
resource: ['languageModel'],
|
|
81
|
+
operation: ['chatModel'],
|
|
82
|
+
},
|
|
83
|
+
},
|
|
84
|
+
default: 'gpt-3.5-turbo',
|
|
85
|
+
required: true,
|
|
86
|
+
},
|
|
87
|
+
{
|
|
88
|
+
displayName: 'Options',
|
|
89
|
+
name: 'options',
|
|
90
|
+
placeholder: 'Add Option',
|
|
91
|
+
description: 'Additional options for the language model',
|
|
92
|
+
type: 'collection',
|
|
93
|
+
displayOptions: {
|
|
94
|
+
show: {
|
|
95
|
+
resource: ['languageModel'],
|
|
96
|
+
operation: ['chatModel'],
|
|
97
|
+
},
|
|
98
|
+
},
|
|
99
|
+
default: {},
|
|
100
|
+
options: [
|
|
101
|
+
{
|
|
102
|
+
displayName: 'Temperature',
|
|
103
|
+
name: 'temperature',
|
|
104
|
+
type: 'number',
|
|
105
|
+
description: 'What sampling temperature to use',
|
|
106
|
+
typeOptions: {
|
|
107
|
+
minValue: 0,
|
|
108
|
+
maxValue: 2,
|
|
109
|
+
numberPrecision: 1,
|
|
110
|
+
},
|
|
111
|
+
default: 0.7,
|
|
112
|
+
},
|
|
113
|
+
{
|
|
114
|
+
displayName: 'Max Tokens',
|
|
115
|
+
name: 'maxTokens',
|
|
116
|
+
type: 'number',
|
|
117
|
+
description: 'The maximum number of tokens to generate',
|
|
118
|
+
typeOptions: {
|
|
119
|
+
minValue: 1,
|
|
120
|
+
},
|
|
121
|
+
default: 1000,
|
|
122
|
+
},
|
|
123
|
+
{
|
|
124
|
+
displayName: 'Top P',
|
|
125
|
+
name: 'topP',
|
|
126
|
+
type: 'number',
|
|
127
|
+
description: 'An alternative to sampling with temperature',
|
|
128
|
+
typeOptions: {
|
|
129
|
+
minValue: 0,
|
|
130
|
+
maxValue: 1,
|
|
131
|
+
numberPrecision: 2,
|
|
132
|
+
},
|
|
133
|
+
default: 1,
|
|
134
|
+
},
|
|
135
|
+
{
|
|
136
|
+
displayName: 'Frequency Penalty',
|
|
137
|
+
name: 'frequencyPenalty',
|
|
138
|
+
type: 'number',
|
|
139
|
+
description: 'Number between -2.0 and 2.0',
|
|
140
|
+
typeOptions: {
|
|
141
|
+
minValue: -2,
|
|
142
|
+
maxValue: 2,
|
|
143
|
+
numberPrecision: 2,
|
|
144
|
+
},
|
|
145
|
+
default: 0,
|
|
146
|
+
},
|
|
147
|
+
{
|
|
148
|
+
displayName: 'Presence Penalty',
|
|
149
|
+
name: 'presencePenalty',
|
|
150
|
+
type: 'number',
|
|
151
|
+
description: 'Number between -2.0 and 2.0',
|
|
152
|
+
typeOptions: {
|
|
153
|
+
minValue: -2,
|
|
154
|
+
maxValue: 2,
|
|
155
|
+
numberPrecision: 2,
|
|
156
|
+
},
|
|
157
|
+
default: 0,
|
|
158
|
+
},
|
|
159
|
+
],
|
|
160
|
+
},
|
|
161
|
+
];
|
|
162
|
+
|
|
163
|
+
const messageFields = [
|
|
164
|
+
{
|
|
165
|
+
displayName: 'Model',
|
|
166
|
+
name: 'model',
|
|
167
|
+
type: 'options',
|
|
168
|
+
description: 'The model which will generate the completion',
|
|
169
|
+
displayOptions: {
|
|
170
|
+
show: {
|
|
171
|
+
resource: ['message'],
|
|
172
|
+
operation: ['create'],
|
|
173
|
+
},
|
|
174
|
+
},
|
|
175
|
+
typeOptions: {
|
|
176
|
+
loadOptionsDependsOn: ['credentials.provider'],
|
|
177
|
+
loadOptionsMethod: 'getModels',
|
|
178
|
+
},
|
|
179
|
+
default: 'gpt-3.5-turbo',
|
|
180
|
+
options: [
|
|
181
|
+
{
|
|
182
|
+
name: 'GPT-3.5 Turbo',
|
|
183
|
+
value: 'gpt-3.5-turbo',
|
|
184
|
+
},
|
|
185
|
+
{
|
|
186
|
+
name: 'GPT-4',
|
|
187
|
+
value: 'gpt-4',
|
|
188
|
+
},
|
|
189
|
+
{
|
|
190
|
+
name: 'GPT-4 Turbo',
|
|
191
|
+
value: 'gpt-4-turbo-preview',
|
|
192
|
+
},
|
|
193
|
+
{
|
|
194
|
+
name: 'GPT-4o',
|
|
195
|
+
value: 'gpt-4o',
|
|
196
|
+
},
|
|
197
|
+
{
|
|
198
|
+
name: 'GPT-4o Mini',
|
|
199
|
+
value: 'gpt-4o-mini',
|
|
200
|
+
},
|
|
201
|
+
{
|
|
202
|
+
name: 'DeepSeek Chat',
|
|
203
|
+
value: 'deepseek-chat',
|
|
204
|
+
},
|
|
205
|
+
{
|
|
206
|
+
name: 'DeepSeek Coder',
|
|
207
|
+
value: 'deepseek-coder',
|
|
208
|
+
},
|
|
209
|
+
{
|
|
210
|
+
name: 'Llama 2',
|
|
211
|
+
value: 'llama2',
|
|
212
|
+
},
|
|
213
|
+
{
|
|
214
|
+
name: 'Llama 3',
|
|
215
|
+
value: 'llama3',
|
|
216
|
+
},
|
|
217
|
+
{
|
|
218
|
+
name: 'Mistral',
|
|
219
|
+
value: 'mistral',
|
|
220
|
+
},
|
|
221
|
+
],
|
|
222
|
+
},
|
|
223
|
+
{
|
|
224
|
+
displayName: 'Messages',
|
|
225
|
+
name: 'messages',
|
|
226
|
+
type: 'fixedCollection',
|
|
227
|
+
typeOptions: {
|
|
228
|
+
sortable: true,
|
|
229
|
+
multipleValues: true,
|
|
230
|
+
},
|
|
231
|
+
description: 'The messages to generate chat completions for',
|
|
232
|
+
displayOptions: {
|
|
233
|
+
show: {
|
|
234
|
+
resource: ['message'],
|
|
235
|
+
operation: ['create'],
|
|
236
|
+
},
|
|
237
|
+
},
|
|
238
|
+
placeholder: 'Add Message',
|
|
239
|
+
default: {},
|
|
240
|
+
options: [
|
|
241
|
+
{
|
|
242
|
+
displayName: 'Messages',
|
|
243
|
+
name: 'messages',
|
|
244
|
+
values: [
|
|
245
|
+
{
|
|
246
|
+
displayName: 'Role',
|
|
247
|
+
name: 'role',
|
|
248
|
+
type: 'options',
|
|
249
|
+
options: [
|
|
250
|
+
{
|
|
251
|
+
name: 'System',
|
|
252
|
+
value: 'system',
|
|
253
|
+
},
|
|
254
|
+
{
|
|
255
|
+
name: 'User',
|
|
256
|
+
value: 'user',
|
|
257
|
+
},
|
|
258
|
+
{
|
|
259
|
+
name: 'Assistant',
|
|
260
|
+
value: 'assistant',
|
|
261
|
+
},
|
|
262
|
+
],
|
|
263
|
+
default: 'user',
|
|
264
|
+
},
|
|
265
|
+
{
|
|
266
|
+
displayName: 'Content',
|
|
267
|
+
name: 'content',
|
|
268
|
+
type: 'string',
|
|
269
|
+
typeOptions: {
|
|
270
|
+
rows: 4,
|
|
271
|
+
},
|
|
272
|
+
default: '',
|
|
273
|
+
},
|
|
274
|
+
],
|
|
275
|
+
},
|
|
276
|
+
],
|
|
277
|
+
},
|
|
278
|
+
{
|
|
279
|
+
displayName: 'Options',
|
|
280
|
+
name: 'options',
|
|
281
|
+
placeholder: 'Add Option',
|
|
282
|
+
description: 'Additional options to add',
|
|
283
|
+
type: 'collection',
|
|
284
|
+
displayOptions: {
|
|
285
|
+
show: {
|
|
286
|
+
resource: ['message'],
|
|
287
|
+
operation: ['create'],
|
|
288
|
+
},
|
|
289
|
+
},
|
|
290
|
+
default: {},
|
|
291
|
+
options: [
|
|
292
|
+
{
|
|
293
|
+
displayName: 'Simplify Output',
|
|
294
|
+
name: 'simplifyOutput',
|
|
295
|
+
type: 'boolean',
|
|
296
|
+
default: true,
|
|
297
|
+
description: 'Whether to return a simplified version of the response',
|
|
298
|
+
},
|
|
299
|
+
{
|
|
300
|
+
displayName: 'Temperature',
|
|
301
|
+
name: 'temperature',
|
|
302
|
+
type: 'number',
|
|
303
|
+
description: 'What sampling temperature to use',
|
|
304
|
+
typeOptions: {
|
|
305
|
+
minValue: 0,
|
|
306
|
+
maxValue: 2,
|
|
307
|
+
numberPrecision: 1,
|
|
308
|
+
},
|
|
309
|
+
default: 0.7,
|
|
310
|
+
},
|
|
311
|
+
{
|
|
312
|
+
displayName: 'Max Tokens',
|
|
313
|
+
name: 'maxTokens',
|
|
314
|
+
type: 'number',
|
|
315
|
+
description: 'The maximum number of tokens to generate',
|
|
316
|
+
typeOptions: {
|
|
317
|
+
minValue: 1,
|
|
318
|
+
},
|
|
319
|
+
default: 1000,
|
|
320
|
+
},
|
|
321
|
+
{
|
|
322
|
+
displayName: 'Top P',
|
|
323
|
+
name: 'topP',
|
|
324
|
+
type: 'number',
|
|
325
|
+
description: 'An alternative to sampling with temperature',
|
|
326
|
+
typeOptions: {
|
|
327
|
+
minValue: 0,
|
|
328
|
+
maxValue: 1,
|
|
329
|
+
numberPrecision: 2,
|
|
330
|
+
},
|
|
331
|
+
default: 1,
|
|
332
|
+
},
|
|
333
|
+
{
|
|
334
|
+
displayName: 'Top K',
|
|
335
|
+
name: 'topK',
|
|
336
|
+
type: 'number',
|
|
337
|
+
description: 'Top-k sampling parameter (for some providers)',
|
|
338
|
+
typeOptions: {
|
|
339
|
+
minValue: 1,
|
|
340
|
+
},
|
|
341
|
+
default: 0,
|
|
342
|
+
},
|
|
343
|
+
{
|
|
344
|
+
displayName: 'Frequency Penalty',
|
|
345
|
+
name: 'frequencyPenalty',
|
|
346
|
+
type: 'number',
|
|
347
|
+
description: 'Number between -2.0 and 2.0',
|
|
348
|
+
typeOptions: {
|
|
349
|
+
minValue: -2,
|
|
350
|
+
maxValue: 2,
|
|
351
|
+
numberPrecision: 2,
|
|
352
|
+
},
|
|
353
|
+
default: 0,
|
|
354
|
+
},
|
|
355
|
+
{
|
|
356
|
+
displayName: 'Presence Penalty',
|
|
357
|
+
name: 'presencePenalty',
|
|
358
|
+
type: 'number',
|
|
359
|
+
description: 'Number between -2.0 and 2.0',
|
|
360
|
+
typeOptions: {
|
|
361
|
+
minValue: -2,
|
|
362
|
+
maxValue: 2,
|
|
363
|
+
numberPrecision: 2,
|
|
364
|
+
},
|
|
365
|
+
default: 0,
|
|
366
|
+
},
|
|
367
|
+
],
|
|
368
|
+
},
|
|
369
|
+
];
|
|
370
|
+
|
|
371
|
+
module.exports = {
|
|
372
|
+
resourceOperations,
|
|
373
|
+
languageModelOperations,
|
|
374
|
+
messageOperations,
|
|
375
|
+
languageModelFields,
|
|
376
|
+
messageFields,
|
|
377
|
+
};
|