n8n-nodes-openai-compatible-wwrs 0.0.2 → 0.0.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. package/credentials/DeepSeekApi.credentials.ts +50 -0
  2. package/credentials/OllamaApi.credentials.ts +52 -0
  3. package/dist/credentials/DeepSeekApi.credentials.d.ts +12 -0
  4. package/dist/credentials/DeepSeekApi.credentials.js +46 -0
  5. package/dist/credentials/DeepSeekApi.credentials.ts +50 -0
  6. package/dist/credentials/OllamaApi.credentials.d.ts +12 -0
  7. package/dist/credentials/OllamaApi.credentials.js +48 -0
  8. package/dist/credentials/OllamaApi.credentials.ts +52 -0
  9. package/dist/nodes/llms/LMChatDeepSeek/LMChatDeepSeek.node.d.ts +9 -0
  10. package/dist/nodes/llms/LMChatDeepSeek/LMChatDeepSeek.node.js +151 -0
  11. package/dist/nodes/llms/LMChatDeepSeek/LMChatDeepSeek.node.ts +166 -0
  12. package/dist/nodes/llms/LMChatOllama/LMChatOllama.node.d.ts +9 -0
  13. package/dist/nodes/llms/LMChatOllama/LMChatOllama.node.js +157 -0
  14. package/dist/nodes/llms/LMChatOllama/LMChatOllama.node.ts +169 -0
  15. package/nodes/llms/LMChatDeepSeek/LMChatDeepSeek.node.ts +166 -0
  16. package/nodes/llms/LMChatOllama/LMChatOllama.node.ts +169 -0
  17. package/package.json +8 -4
  18. package/credentials/OpenAICompatibleApi.credentials.ts +0 -148
  19. package/dist/credentials/OpenAICompatibleApi.credentials.d.ts +0 -86
  20. package/dist/credentials/OpenAICompatibleApi.credentials.js +0 -145
  21. package/dist/credentials/OpenAICompatibleApi.credentials.ts +0 -148
  22. package/dist/nodes/OpenAICompatible/NodeDescription.js +0 -377
  23. package/dist/nodes/OpenAICompatible/OpenAICompatible.node.js +0 -406
  24. package/dist/nodes/OpenAICompatible/openai-compatible.svg +0 -87
  25. package/nodes/OpenAICompatible/NodeDescription.js +0 -377
  26. package/nodes/OpenAICompatible/OpenAICompatible.node.js +0 -406
  27. package/nodes/OpenAICompatible/openai-compatible.svg +0 -87
@@ -1,406 +0,0 @@
1
- /**
2
- * OpenAI Compatible Node for n8n
3
- * Supports multiple providers: OpenAI, Ollama, Gemini, Custom
4
- * Two modes:
5
- * 1. Language Model (Chat Model) - for AI Agent
6
- * 2. Message API - direct API calls
7
- */
8
-
9
- const { NodeConnectionTypes, NodeApiError, NodeOperationError } = require('n8n-workflow');
10
- const NodeDescription = require('./NodeDescription');
11
-
12
- class OpenAICompatible {
13
- constructor() {
14
- this.description = {
15
- displayName: 'OpenAI Compatible',
16
- name: 'openaiCompatible',
17
- icon: 'file:openai-compatible.svg',
18
- iconColor: 'dark-green',
19
- group: ['transform'],
20
- version: 2,
21
- subtitle: '={{$parameter["resource"] + ": " + $parameter["operation"]}}',
22
- description: 'Consume OpenAI-compatible APIs with multiple providers',
23
- defaults: {
24
- name: 'OpenAI Compatible',
25
- },
26
- inputs: [NodeConnectionTypes.Main],
27
- outputs: [NodeConnectionTypes.Main],
28
- credentials: [
29
- {
30
- name: 'openAICompatibleApi',
31
- required: true,
32
- },
33
- ],
34
- properties: [
35
- ...NodeDescription.resourceOperations,
36
- ...NodeDescription.languageModelOperations,
37
- ...NodeDescription.messageOperations,
38
- ...NodeDescription.languageModelFields,
39
- ...NodeDescription.messageFields,
40
- ],
41
- };
42
- }
43
-
44
- async execute() {
45
- const resource = this.getNodeParameter('resource', 0);
46
- const operation = this.getNodeParameter('operation', 0);
47
-
48
- if (resource === 'languageModel' && operation === 'chatModel') {
49
- return await this.executeChatModel();
50
- }
51
-
52
- if (resource === 'message' && operation === 'create') {
53
- return await this.executeMessageCreate();
54
- }
55
-
56
- throw new NodeOperationError(this.getNode(), `Unknown operation: ${resource}.${operation}`);
57
- }
58
-
59
- /**
60
- * Chat Model Mode - For AI Agent (LangChain compatible)
61
- */
62
- async executeChatModel() {
63
- const credentials = await this.getCredentials('openAICompatibleApi');
64
- const modelName = this.getNodeParameter('modelName', 0);
65
- const options = this.getNodeParameter('options', 0) || {};
66
-
67
- // Return a language model instance for AI Agent
68
- // This will be used by n8n's AI Agent node
69
- return {
70
- data: {
71
- modelName,
72
- temperature: options.temperature ?? 0.7,
73
- maxTokens: options.maxTokens ?? 1000,
74
- topP: options.topP ?? 1,
75
- frequencyPenalty: options.frequencyPenalty ?? 0,
76
- presencePenalty: options.presencePenalty ?? 0,
77
- provider: credentials.provider,
78
- baseUrl: credentials.baseUrl,
79
- },
80
- };
81
- }
82
-
83
- /**
84
- * Message API Mode - Direct API calls
85
- */
86
- async executeMessageCreate() {
87
- const credentials = await this.getCredentials('openAICompatibleApi');
88
- const model = this.getNodeParameter('model', 0);
89
- const messagesInput = this.getNodeParameter('messages', 0);
90
- const options = this.getNodeParameter('options', 0) || {};
91
-
92
- // Extract messages from fixedCollection
93
- let messages = [];
94
- if (messagesInput.messages && Array.isArray(messagesInput.messages)) {
95
- messages = messagesInput.messages.map((msg) => ({
96
- role: msg.role,
97
- content: msg.content,
98
- }));
99
- }
100
-
101
- if (messages.length === 0) {
102
- throw new NodeOperationError(this.getNode(), 'At least one message is required');
103
- }
104
-
105
- // Build request body based on provider
106
- const body = this.buildRequestBody(credentials.provider, model, messages, options);
107
-
108
- // Make the API request
109
- const response = await this.makeApiRequest(credentials, body);
110
-
111
- // Process response based on provider and options
112
- return this.processResponse(response, credentials.provider, options);
113
- }
114
-
115
- /**
116
- * Build request body based on provider
117
- */
118
- buildRequestBody(provider, model, messages, options) {
119
- const body = {
120
- model,
121
- messages,
122
- };
123
-
124
- // Add optional parameters
125
- if (options.temperature !== undefined) {
126
- body.temperature = options.temperature;
127
- }
128
- if (options.maxTokens !== undefined) {
129
- body.max_tokens = options.maxTokens;
130
- }
131
- if (options.topP !== undefined) {
132
- body.top_p = options.topP;
133
- }
134
- if (options.topK !== undefined && options.topK > 0) {
135
- body.top_k = options.topK;
136
- }
137
- if (options.frequencyPenalty !== undefined) {
138
- body.frequency_penalty = options.frequencyPenalty;
139
- }
140
- if (options.presencePenalty !== undefined) {
141
- body.presence_penalty = options.presencePenalty;
142
- }
143
-
144
- // Provider-specific adjustments
145
- if (provider === 'gemini') {
146
- // Gemini uses different format
147
- body.contents = messages.map((msg) => ({
148
- role: msg.role === 'system' ? 'user' : msg.role,
149
- parts: [{ text: msg.content }],
150
- }));
151
- delete body.messages;
152
- }
153
-
154
- if (provider === 'ollama') {
155
- // Ollama specific parameters
156
- body.stream = false;
157
- }
158
-
159
- return body;
160
- }
161
-
162
- /**
163
- * Make API request based on provider
164
- */
165
- async makeApiRequest(credentials, body) {
166
- const { provider, baseUrl, apiKey, customHeaders } = credentials;
167
-
168
- let url = baseUrl.replace(/\/$/, '');
169
- let endpoint = '';
170
- let headers = {
171
- 'Content-Type': 'application/json',
172
- };
173
-
174
- // Configure based on provider
175
- switch (provider) {
176
- case 'openai':
177
- case 'custom':
178
- endpoint = `${url}/chat/completions`;
179
- if (apiKey) {
180
- headers['Authorization'] = `Bearer ${apiKey}`;
181
- }
182
- break;
183
-
184
- case 'ollama':
185
- endpoint = `${url}/api/chat`;
186
- break;
187
-
188
- case 'gemini':
189
- endpoint = `${url}/v1beta/models/${body.model}:generateContent`;
190
- if (apiKey) {
191
- headers['x-goog-api-key'] = apiKey;
192
- }
193
- break;
194
- }
195
-
196
- // Add custom headers if provided
197
- if (customHeaders && customHeaders.headers && Array.isArray(customHeaders.headers)) {
198
- for (const header of customHeaders.headers) {
199
- if (header.name && header.value) {
200
- headers[header.name] = header.value;
201
- }
202
- }
203
- }
204
-
205
- try {
206
- const response = await this.helpers.httpRequest({
207
- method: 'POST',
208
- url: endpoint,
209
- headers,
210
- body,
211
- json: true,
212
- timeout: 60000,
213
- });
214
-
215
- return response;
216
- } catch (error) {
217
- if (error.response) {
218
- throw new NodeApiError(this.getNode(), error.response);
219
- }
220
- throw error;
221
- }
222
- }
223
-
224
- /**
225
- * Process API response based on provider
226
- */
227
- processResponse(response, provider, options) {
228
- const simplifyOutput = options.simplifyOutput !== false;
229
-
230
- if (provider === 'gemini') {
231
- return this.processGeminiResponse(response, simplifyOutput);
232
- }
233
-
234
- if (provider === 'ollama') {
235
- return this.processOllamaResponse(response, simplifyOutput);
236
- }
237
-
238
- // OpenAI-compatible response
239
- if (simplifyOutput) {
240
- const choices = response.choices || [];
241
- const results = choices.map((choice, index) => ({
242
- json: {
243
- message: choice.message || choice.delta || {},
244
- index: choice.index || index,
245
- finishReason: choice.finish_reason,
246
- usage: response.usage,
247
- model: response.model,
248
- id: response.id,
249
- },
250
- }));
251
-
252
- return results;
253
- } else {
254
- return [
255
- {
256
- json: response,
257
- },
258
- ];
259
- }
260
- }
261
-
262
- processGeminiResponse(response, simplifyOutput) {
263
- if (simplifyOutput) {
264
- const candidates = response.candidates || [];
265
- const results = candidates.map((candidate, index) => ({
266
- json: {
267
- message: {
268
- role: 'assistant',
269
- content: candidate.content?.parts?.[0]?.text || '',
270
- },
271
- index: candidate.index || index,
272
- finishReason: candidate.finishReason,
273
- usage: response.usageMetadata,
274
- model: response.model,
275
- },
276
- }));
277
-
278
- return results;
279
- } else {
280
- return [
281
- {
282
- json: response,
283
- },
284
- ];
285
- }
286
- }
287
-
288
- processOllamaResponse(response, simplifyOutput) {
289
- if (simplifyOutput) {
290
- return [
291
- {
292
- json: {
293
- message: {
294
- role: response.message?.role || 'assistant',
295
- content: response.message?.content || '',
296
- },
297
- model: response.model,
298
- done: response.done,
299
- usage: response.usage,
300
- },
301
- },
302
- ];
303
- } else {
304
- return [
305
- {
306
- json: response,
307
- },
308
- ];
309
- }
310
- }
311
-
312
- /**
313
- * Load available models for the provider
314
- */
315
- async getModels() {
316
- const credentials = await this.getCredentials('openAICompatibleApi');
317
- const { provider, baseUrl, apiKey } = credentials;
318
-
319
- let url = baseUrl.replace(/\/$/, '');
320
- let endpoint = '';
321
- let headers = {
322
- 'Content-Type': 'application/json',
323
- };
324
-
325
- switch (provider) {
326
- case 'openai':
327
- case 'custom':
328
- endpoint = `${url}/models`;
329
- if (apiKey) {
330
- headers['Authorization'] = `Bearer ${apiKey}`;
331
- }
332
- break;
333
-
334
- case 'ollama':
335
- endpoint = `${url}/api/tags`;
336
- break;
337
-
338
- case 'gemini':
339
- // Gemini doesn't have a models endpoint, return static list
340
- return [
341
- {
342
- name: 'gemini-pro',
343
- value: 'gemini-pro',
344
- },
345
- {
346
- name: 'gemini-pro-vision',
347
- value: 'gemini-pro-vision',
348
- },
349
- ];
350
- }
351
-
352
- try {
353
- const response = await this.helpers.httpRequest({
354
- method: 'GET',
355
- url: endpoint,
356
- headers,
357
- json: true,
358
- });
359
-
360
- // Parse models based on provider
361
- if (provider === 'ollama') {
362
- return (response.models || []).map((model) => ({
363
- name: model.name,
364
- value: model.name,
365
- }));
366
- }
367
-
368
- return (response.data || []).map((model) => ({
369
- name: model.id,
370
- value: model.id,
371
- }));
372
- } catch (error) {
373
- // If models endpoint fails, return default models
374
- return this.getDefaultModels(provider);
375
- }
376
- }
377
-
378
- getDefaultModels(provider) {
379
- switch (provider) {
380
- case 'openai':
381
- case 'custom':
382
- return [
383
- { name: 'gpt-3.5-turbo', value: 'gpt-3.5-turbo' },
384
- { name: 'gpt-4', value: 'gpt-4' },
385
- { name: 'gpt-4-turbo-preview', value: 'gpt-4-turbo-preview' },
386
- { name: 'gpt-4o', value: 'gpt-4o' },
387
- { name: 'gpt-4o-mini', value: 'gpt-4o-mini' },
388
- ];
389
-
390
- case 'ollama':
391
- return [
392
- { name: 'llama2', value: 'llama2' },
393
- { name: 'llama3', value: 'llama3' },
394
- { name: 'mistral', value: 'mistral' },
395
- { name: 'codellama', value: 'codellama' },
396
- ];
397
-
398
- default:
399
- return [];
400
- }
401
- }
402
- }
403
-
404
- module.exports = {
405
- OpenAICompatible,
406
- };
@@ -1,87 +0,0 @@
1
- <svg width="64" height="64" viewBox="0 0 64 64" fill="none" xmlns="http://www.w3.org/2000/svg">
2
- <defs>
3
- <!-- 主渐变:深绿到青色 -->
4
- <linearGradient id="mainGradient" x1="0%" y1="0%" x2="100%" y2="100%">
5
- <stop offset="0%" style="stop-color:#10A37F;stop-opacity:1" />
6
- <stop offset="50%" style="stop-color:#0D8A6A;stop-opacity:1" />
7
- <stop offset="100%" style="stop-color:#0A6B52;stop-opacity:1" />
8
- </linearGradient>
9
-
10
- <!-- 高光渐变 -->
11
- <linearGradient id="highlightGradient" x1="0%" y1="0%" x2="0%" y2="100%">
12
- <stop offset="0%" style="stop-color:#1DD8A9;stop-opacity:0.3" />
13
- <stop offset="100%" style="stop-color:#1DD8A9;stop-opacity:0" />
14
- </linearGradient>
15
-
16
- <!-- 内部发光 -->
17
- <filter id="glow">
18
- <feGaussianBlur stdDeviation="1.5" result="coloredBlur"/>
19
- <feMerge>
20
- <feMergeNode in="coloredBlur"/>
21
- <feMergeNode in="SourceGraphic"/>
22
- </feMerge>
23
- </filter>
24
- </defs>
25
-
26
- <!-- 背景圆角矩形 -->
27
- <rect x="2" y="2" width="60" height="60" rx="14" fill="url(#mainGradient)"/>
28
-
29
- <!-- 高光效果 -->
30
- <rect x="2" y="2" width="60" height="30" rx="14" fill="url(#highlightGradient)"/>
31
-
32
- <!-- AI 头像轮廓 -->
33
- <g filter="url(#glow)">
34
- <!-- 头部 -->
35
- <circle cx="32" cy="26" r="8" stroke="white" stroke-width="2.5" fill="none"/>
36
-
37
- <!-- 身体 -->
38
- <path d="M18 46
39
- C18 38.5 23 35 32 35
40
- C41 35 46 38.5 46 46"
41
- stroke="white"
42
- stroke-width="2.5"
43
- stroke-linecap="round"
44
- fill="none"/>
45
-
46
- <!-- 眼睛 - AI 特征 -->
47
- <circle cx="29" cy="25" r="1.5" fill="white"/>
48
- <circle cx="35" cy="25" r="1.5" fill="white"/>
49
-
50
- <!-- 大脑/AI 符号 -->
51
- <path d="M32 18
52
- L32 20
53
- M28 19 L29 21
54
- M36 19 L35 21"
55
- stroke="white"
56
- stroke-width="1.5"
57
- stroke-linecap="round"/>
58
- </g>
59
-
60
- <!-- 兼容性符号 - 右下角 -->
61
- <g transform="translate(42, 42)">
62
- <!-- 连接点背景 -->
63
- <circle cx="0" cy="0" r="10" fill="white" opacity="0.2"/>
64
-
65
- <!-- 插头图标 -->
66
- <path d="M-4 -2 L-4 4
67
- M4 -2 L4 4
68
- M-4 0 L4 0"
69
- stroke="white"
70
- stroke-width="2"
71
- stroke-linecap="round"
72
- fill="none"/>
73
-
74
- <!-- 小圆点表示连接 -->
75
- <circle cx="-4" cy="-2" r="1.5" fill="white"/>
76
- <circle cx="4" cy="-2" r="1.5" fill="white"/>
77
- </g>
78
-
79
- <!-- 装饰性电路纹路 - 左上角 -->
80
- <g opacity="0.15" stroke="white" stroke-width="1" fill="none">
81
- <circle cx="14" cy="14" r="2"/>
82
- <path d="M14 16 L14 22"/>
83
- <circle cx="14" cy="22" r="1.5"/>
84
- <path d="M14 14 L20 14"/>
85
- <circle cx="20" cy="14" r="1.5"/>
86
- </g>
87
- </svg>