@n8n/ai-utilities 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (105) hide show
  1. package/LICENSE.md +88 -0
  2. package/README.md +468 -0
  3. package/dist/adapters/langchain-chat-model.d.ts +20 -0
  4. package/dist/adapters/langchain-chat-model.js +180 -0
  5. package/dist/adapters/langchain-chat-model.js.map +1 -0
  6. package/dist/adapters/langchain-history.d.ts +12 -0
  7. package/dist/adapters/langchain-history.js +27 -0
  8. package/dist/adapters/langchain-history.js.map +1 -0
  9. package/dist/adapters/langchain-memory.d.ts +11 -0
  10. package/dist/adapters/langchain-memory.js +36 -0
  11. package/dist/adapters/langchain-memory.js.map +1 -0
  12. package/dist/build.tsbuildinfo +1 -0
  13. package/dist/chat-model/base.d.ts +15 -0
  14. package/dist/chat-model/base.js +25 -0
  15. package/dist/chat-model/base.js.map +1 -0
  16. package/dist/converters/message.d.ts +5 -0
  17. package/dist/converters/message.js +365 -0
  18. package/dist/converters/message.js.map +1 -0
  19. package/dist/converters/tool.d.ts +5 -0
  20. package/dist/converters/tool.js +59 -0
  21. package/dist/converters/tool.js.map +1 -0
  22. package/dist/guards.d.ts +8 -0
  23. package/dist/guards.js +27 -0
  24. package/dist/guards.js.map +1 -0
  25. package/dist/index.d.ts +28 -0
  26. package/dist/index.js +56 -0
  27. package/dist/index.js.map +1 -0
  28. package/dist/memory/base-chat-history.d.ts +8 -0
  29. package/dist/memory/base-chat-history.js +12 -0
  30. package/dist/memory/base-chat-history.js.map +1 -0
  31. package/dist/memory/base-chat-memory.d.ts +8 -0
  32. package/dist/memory/base-chat-memory.js +7 -0
  33. package/dist/memory/base-chat-memory.js.map +1 -0
  34. package/dist/memory/windowed-chat-memory.d.ts +14 -0
  35. package/dist/memory/windowed-chat-memory.js +38 -0
  36. package/dist/memory/windowed-chat-memory.js.map +1 -0
  37. package/dist/suppliers/supplyMemory.d.ts +6 -0
  38. package/dist/suppliers/supplyMemory.js +14 -0
  39. package/dist/suppliers/supplyMemory.js.map +1 -0
  40. package/dist/suppliers/supplyModel.d.ts +15 -0
  41. package/dist/suppliers/supplyModel.js +78 -0
  42. package/dist/suppliers/supplyModel.js.map +1 -0
  43. package/dist/types/chat-model.d.ts +26 -0
  44. package/dist/types/chat-model.js +3 -0
  45. package/dist/types/chat-model.js.map +1 -0
  46. package/dist/types/json.d.ts +5 -0
  47. package/dist/types/json.js +3 -0
  48. package/dist/types/json.js.map +1 -0
  49. package/dist/types/memory.d.ts +13 -0
  50. package/dist/types/memory.js +3 -0
  51. package/dist/types/memory.js.map +1 -0
  52. package/dist/types/message.d.ts +49 -0
  53. package/dist/types/message.js +3 -0
  54. package/dist/types/message.js.map +1 -0
  55. package/dist/types/openai.d.ts +39 -0
  56. package/dist/types/openai.js +3 -0
  57. package/dist/types/openai.js.map +1 -0
  58. package/dist/types/output.d.ts +37 -0
  59. package/dist/types/output.js +3 -0
  60. package/dist/types/output.js.map +1 -0
  61. package/dist/types/tool.d.ts +28 -0
  62. package/dist/types/tool.js +3 -0
  63. package/dist/types/tool.js.map +1 -0
  64. package/dist/utils/embeddings-input-validation.d.ts +3 -0
  65. package/dist/utils/embeddings-input-validation.js +28 -0
  66. package/dist/utils/embeddings-input-validation.js.map +1 -0
  67. package/dist/utils/failed-attempt-handler/n8nDefaultFailedAttemptHandler.d.ts +1 -0
  68. package/dist/utils/failed-attempt-handler/n8nDefaultFailedAttemptHandler.js +30 -0
  69. package/dist/utils/failed-attempt-handler/n8nDefaultFailedAttemptHandler.js.map +1 -0
  70. package/dist/utils/failed-attempt-handler/n8nLlmFailedAttemptHandler.d.ts +3 -0
  71. package/dist/utils/failed-attempt-handler/n8nLlmFailedAttemptHandler.js +28 -0
  72. package/dist/utils/failed-attempt-handler/n8nLlmFailedAttemptHandler.js.map +1 -0
  73. package/dist/utils/helpers.d.ts +3 -0
  74. package/dist/utils/helpers.js +53 -0
  75. package/dist/utils/helpers.js.map +1 -0
  76. package/dist/utils/http-proxy-agent.d.ts +10 -0
  77. package/dist/utils/http-proxy-agent.js +48 -0
  78. package/dist/utils/http-proxy-agent.js.map +1 -0
  79. package/dist/utils/log-ai-event.d.ts +2 -0
  80. package/dist/utils/log-ai-event.js +13 -0
  81. package/dist/utils/log-ai-event.js.map +1 -0
  82. package/dist/utils/log-wrapper.d.ts +28 -0
  83. package/dist/utils/log-wrapper.js +329 -0
  84. package/dist/utils/log-wrapper.js.map +1 -0
  85. package/dist/utils/n8n-binary-loader.d.ts +18 -0
  86. package/dist/utils/n8n-binary-loader.js +159 -0
  87. package/dist/utils/n8n-binary-loader.js.map +1 -0
  88. package/dist/utils/n8n-json-loader.d.ts +11 -0
  89. package/dist/utils/n8n-json-loader.js +66 -0
  90. package/dist/utils/n8n-json-loader.js.map +1 -0
  91. package/dist/utils/n8n-llm-tracing.d.ts +46 -0
  92. package/dist/utils/n8n-llm-tracing.js +157 -0
  93. package/dist/utils/n8n-llm-tracing.js.map +1 -0
  94. package/dist/utils/sse.d.ts +8 -0
  95. package/dist/utils/sse.js +107 -0
  96. package/dist/utils/sse.js.map +1 -0
  97. package/dist/utils/tokenizer/cl100k_base.json +1 -0
  98. package/dist/utils/tokenizer/o200k_base.json +1 -0
  99. package/dist/utils/tokenizer/tiktoken.d.ts +4 -0
  100. package/dist/utils/tokenizer/tiktoken.js +40 -0
  101. package/dist/utils/tokenizer/tiktoken.js.map +1 -0
  102. package/dist/utils/tokenizer/token-estimator.d.ts +4 -0
  103. package/dist/utils/tokenizer/token-estimator.js +98 -0
  104. package/dist/utils/tokenizer/token-estimator.js.map +1 -0
  105. package/package.json +51 -0
package/LICENSE.md ADDED
@@ -0,0 +1,88 @@
1
+ # License
2
+
3
+ Portions of this software are licensed as follows:
4
+
5
+ - Content of branches other than the main branch (i.e. "master") are not licensed.
6
+ - Source code files that contain ".ee." in their filename or ".ee" in their dirname are NOT licensed under
7
+ the Sustainable Use License.
8
+ To use source code files that contain ".ee." in their filename or ".ee" in their dirname you must hold a
9
+ valid n8n Enterprise License specifically allowing you access to such source code files and as defined
10
+ in "LICENSE_EE.md".
11
+ - All third party components incorporated into the n8n Software are licensed under the original license
12
+ provided by the owner of the applicable component.
13
+ - Content outside of the above mentioned files or restrictions is available under the "Sustainable Use
14
+ License" as defined below.
15
+
16
+ ## Sustainable Use License
17
+
18
+ Version 1.0
19
+
20
+ ### Acceptance
21
+
22
+ By using the software, you agree to all of the terms and conditions below.
23
+
24
+ ### Copyright License
25
+
26
+ The licensor grants you a non-exclusive, royalty-free, worldwide, non-sublicensable, non-transferable license
27
+ to use, copy, distribute, make available, and prepare derivative works of the software, in each case subject
28
+ to the limitations below.
29
+
30
+ ### Limitations
31
+
32
+ You may use or modify the software only for your own internal business purposes or for non-commercial or
33
+ personal use. You may distribute the software or provide it to others only if you do so free of charge for
34
+ non-commercial purposes. You may not alter, remove, or obscure any licensing, copyright, or other notices of
35
+ the licensor in the software. Any use of the licensor’s trademarks is subject to applicable law.
36
+
37
+ ### Patents
38
+
39
+ The licensor grants you a license, under any patent claims the licensor can license, or becomes able to
40
+ license, to make, have made, use, sell, offer for sale, import and have imported the software, in each case
41
+ subject to the limitations and conditions in this license. This license does not cover any patent claims that
42
+ you cause to be infringed by modifications or additions to the software. If you or your company make any
43
+ written claim that the software infringes or contributes to infringement of any patent, your patent license
44
+ for the software granted under these terms ends immediately. If your company makes such a claim, your patent
45
+ license ends immediately for work on behalf of your company.
46
+
47
+ ### Notices
48
+
49
+ You must ensure that anyone who gets a copy of any part of the software from you also gets a copy of these
50
+ terms. If you modify the software, you must include in any modified copies of the software a prominent notice
51
+ stating that you have modified the software.
52
+
53
+ ### No Other Rights
54
+
55
+ These terms do not imply any licenses other than those expressly granted in these terms.
56
+
57
+ ### Termination
58
+
59
+ If you use the software in violation of these terms, such use is not licensed, and your license will
60
+ automatically terminate. If the licensor provides you with a notice of your violation, and you cease all
61
+ violation of this license no later than 30 days after you receive that notice, your license will be reinstated
62
+ retroactively. However, if you violate these terms after such reinstatement, any additional violation of these
63
+ terms will cause your license to terminate automatically and permanently.
64
+
65
+ ### No Liability
66
+
67
+ As far as the law allows, the software comes as is, without any warranty or condition, and the licensor will
68
+ not be liable to you for any damages arising out of these terms or the use or nature of the software, under
69
+ any kind of legal claim.
70
+
71
+ ### Definitions
72
+
73
+ The “licensor” is the entity offering these terms.
74
+
75
+ The “software” is the software the licensor makes available under these terms, including any portion of it.
76
+
77
+ “You” refers to the individual or entity agreeing to these terms.
78
+
79
+ “Your company” is any legal entity, sole proprietorship, or other kind of organization that you work for, plus
80
+ all organizations that have control over, are under the control of, or are under common control with that
81
+ organization. Control means ownership of substantially all the assets of an entity, or the power to direct its
82
+ management and policies by vote, contract, or otherwise. Control can be direct or indirect.
83
+
84
+ “Your license” is the license granted to you for the software under these terms.
85
+
86
+ “Use” means anything you do with the software requiring your license.
87
+
88
+ “Trademark” means trademarks, service marks, and similar rights.
package/README.md ADDED
@@ -0,0 +1,468 @@
1
+ # @n8n/ai-utilities
2
+
3
+ Utilities for building AI nodes in n8n.
4
+
5
+ ## Installation
6
+
7
+ This package is part of the n8n monorepo and should be installed via the workspace.
8
+
9
+ ## Development
10
+
11
+ ```bash
12
+ # Build the package
13
+ pnpm build
14
+
15
+ # Run tests
16
+ pnpm test
17
+
18
+ # Run in watch mode
19
+ pnpm dev
20
+ ```
21
+
22
+ # Model SDK
23
+
24
+ ## Core Pattern
25
+
26
+ ### Option A: OpenAI-Compatible APIs (easiest)
27
+
28
+ Pass config directly to `supplyModel` for providers that follow the OpenAI API format:
29
+
30
+ ```typescript
31
+ import { supplyModel } from '@n8n/ai-utilities';
32
+
33
+ return supplyModel(this, {
34
+ type: 'openai',
35
+ modelId: 'model-name',
36
+ apiKey: 'your-api-key',
37
+ baseURL: 'https://api.provider.com/v1', // OpenRouter, DeepSeek, etc.
38
+ });
39
+ ```
40
+
41
+ ### Option B: Custom API (full control)
42
+
43
+ Extend `BaseChatModel` and implement `generate()` + `stream()`:
44
+
45
+ ```typescript
46
+ import { BaseChatModel, supplyModel, type Message, type GenerateResult, type StreamChunk } from '@n8n/ai-utilities';
47
+
48
+ class MyChatModel extends BaseChatModel {
49
+ async generate(messages: Message[]): Promise<GenerateResult> {
50
+ // Call your API, convert messages to provider format...
51
+ return { text: '...', toolCalls: [...] };
52
+ }
53
+
54
+ async *stream(messages: Message[]): AsyncIterable<StreamChunk> {
55
+ // Stream from your API...
56
+ yield { type: 'text-delta', textDelta: '...' };
57
+ yield { type: 'finish', finishReason: 'stop' };
58
+ }
59
+ }
60
+
61
+ const model = new MyChatModel('my-provider', 'model-id', { apiKey: '...' });
62
+ return supplyModel(this, model);
63
+ ```
64
+
65
+ ---
66
+
67
+ ## Before/After Examples
68
+
69
+ ### Example 1: LmChatOpenRouter
70
+
71
+ **Before (LangChain):**
72
+
73
+ ```typescript
74
+ import { ChatOpenAI } from '@langchain/openai';
75
+ import { N8nLlmTracing } from '../N8nLlmTracing';
76
+
77
+ async supplyData(this: ISupplyDataFunctions, itemIndex: number): Promise<SupplyData> {
78
+ const credentials = await this.getCredentials<OpenAICompatibleCredential>('openRouterApi');
79
+ const modelName = this.getNodeParameter('model', itemIndex) as string;
80
+ const options = this.getNodeParameter('options', itemIndex, {}) as { ... };
81
+
82
+ const model = new ChatOpenAI({
83
+ apiKey: credentials.apiKey,
84
+ model: modelName,
85
+ ...options,
86
+ configuration: { baseURL: credentials.url, ... },
87
+ callbacks: [new N8nLlmTracing(this)],
88
+ onFailedAttempt: makeN8nLlmFailedAttemptHandler(this, openAiFailedAttemptHandler),
89
+ });
90
+
91
+ return { response: model };
92
+ }
93
+ ```
94
+
95
+ **After (SDK):**
96
+
97
+ ```typescript
98
+ import { supplyModel } from '@n8n/ai-utilities';
99
+
100
+ async supplyData(this: ISupplyDataFunctions, itemIndex: number): Promise<SupplyData> {
101
+ const credentials = await this.getCredentials<{ url: string; apiKey: string }>('openRouterApi');
102
+ const modelName = this.getNodeParameter('model', itemIndex) as string;
103
+ const options = this.getNodeParameter('options', itemIndex, {}) as { temperature?: number };
104
+
105
+ return supplyModel(this, {
106
+ type: 'openai',
107
+ modelId: modelName,
108
+ apiKey: credentials.apiKey,
109
+ baseURL: credentials.url,
110
+ ...options,
111
+ });
112
+ }
113
+ ```
114
+
115
+ > **Note:** `type: 'openai'` uses the SDK's built-in OpenAI-compatible implementation.
116
+ > Works with OpenRouter, DeepSeek, Azure OpenAI, and any provider following the OpenAI API format.
117
+
118
+ ---
119
+
120
+ ## Community Node Examples
121
+
122
+ ### ImaginaryLLM Chat Model
123
+
124
+ ```typescript
125
+ import {
126
+ BaseChatModel,
127
+ supplyModel,
128
+ type Message,
129
+ type GenerateResult,
130
+ type StreamChunk,
131
+ type ChatModelConfig,
132
+ } from '@n8n/ai-utilities';
133
+ import { NodeConnectionTypes, type INodeType, type ISupplyDataFunctions, type SupplyData } from 'n8n-workflow';
134
+
135
+ // Custom chat model extending BaseChatModel
136
+ class ImaginaryLlmChatModel extends BaseChatModel {
137
+ constructor(
138
+ private apiKey: string,
139
+ modelId: string,
140
+ config?: ChatModelConfig,
141
+ ) {
142
+ super('imaginary-llm', modelId, config);
143
+ }
144
+
145
+ async generate(messages: Message[], config?: ChatModelConfig): Promise<GenerateResult> {
146
+ // Convert n8n messages to provider format
147
+ const providerMessages = messages.map(m => ({
148
+ speaker: m.role === 'human' ? 'user' : m.role === 'ai' ? 'bot' : m.role,
149
+ text: m.content.find(c => c.type === 'text')?.text ?? '',
150
+ }));
151
+
152
+ // Call the API
153
+ const response = await fetch('https://api.imaginary-llm.example.com/v1/generate', {
154
+ method: 'POST',
155
+ headers: {
156
+ 'Authorization': `Bearer ${this.apiKey}`,
157
+ 'Content-Type': 'application/json',
158
+ },
159
+ body: JSON.stringify({
160
+ model: this.modelId,
161
+ conversation: providerMessages,
162
+ settings: {
163
+ creativity: config?.temperature ?? 0.7,
164
+ max_length: config?.maxTokens,
165
+ },
166
+ }),
167
+ });
168
+
169
+ const data = await response.json();
170
+
171
+ return {
172
+ text: data.reply.text,
173
+ toolCalls: data.reply.actions?.map((a: any) => ({
174
+ id: a.id,
175
+ name: a.name,
176
+ arguments: a.params,
177
+ })),
178
+ usage: data.metrics ? {
179
+ promptTokens: data.metrics.input_tokens,
180
+ completionTokens: data.metrics.output_tokens,
181
+ totalTokens: data.metrics.input_tokens + data.metrics.output_tokens,
182
+ } : undefined,
183
+ };
184
+ }
185
+
186
+ async *stream(messages: Message[], config?: ChatModelConfig): AsyncIterable<StreamChunk> {
187
+ // Streaming implementation...
188
+ yield { type: 'text-delta', textDelta: '...' };
189
+ yield { type: 'finish', finishReason: 'stop' };
190
+ }
191
+ }
192
+
193
+ // The n8n node
194
+ export class LmChatImaginaryLlm implements INodeType {
195
+ description = {
196
+ displayName: 'ImaginaryLLM Chat Model',
197
+ name: 'lmChatImaginaryLlm',
198
+ outputs: [NodeConnectionTypes.AiLanguageModel],
199
+ credentials: [{ name: 'imaginaryLlmApi', required: true }],
200
+ properties: [
201
+ { displayName: 'Model', name: 'model', type: 'options', options: [
202
+ { name: 'Imaginary Pro', value: 'imaginary-pro' },
203
+ { name: 'Imaginary Fast', value: 'imaginary-fast' },
204
+ ], default: 'imaginary-pro' },
205
+ { displayName: 'Temperature', name: 'temperature', type: 'number', default: 0.7 },
206
+ ],
207
+ };
208
+
209
+ async supplyData(this: ISupplyDataFunctions, itemIndex: number): Promise<SupplyData> {
210
+ const credentials = await this.getCredentials<{ apiKey: string }>('imaginaryLlmApi');
211
+ const modelName = this.getNodeParameter('model', itemIndex) as string;
212
+ const temperature = this.getNodeParameter('temperature', itemIndex) as number;
213
+
214
+ const model = new ImaginaryLlmChatModel(credentials.apiKey, modelName, { temperature });
215
+
216
+ return supplyModel(this, model);
217
+ }
218
+ }
219
+ ```
220
+
221
+ ---
222
+
223
+ # Memory SDK
224
+
225
+ The Memory SDK provides abstractions for building conversation memory nodes without LangChain dependencies.
226
+
227
+ ## Architecture
228
+
229
+ Memory uses a **two-layer design**:
230
+
231
+ 1. **ChatHistory** (Storage Layer) - Where messages are stored (your custom implementation)
232
+ 2. **ChatMemory** (Logic Layer) - How messages are managed (windowing, session scoping)
233
+
234
+ ### Naming Convention
235
+
236
+ The SDK uses n8n-specific naming to avoid confusion with LangChain classes:
237
+
238
+ | n8n SDK | LangChain Equivalent |
239
+ |---------|---------------------|
240
+ | `ChatHistory` (interface) | `BaseChatMessageHistory` |
241
+ | `BaseChatHistory` (base class) | `BaseChatMessageHistory` |
242
+ | `ChatMemory` (interface) | `BaseChatMemory` |
243
+ | `BaseChatMemory` (base class) | `BaseChatMemory` |
244
+ | `WindowedChatMemory` | `BufferWindowMemory` |
245
+
246
+ ## Core Pattern
247
+
248
+ ### Option A: Custom Storage
249
+
250
+ For exotic databases not covered by the SDK, extend `BaseChatHistory`:
251
+
252
+ ```typescript
253
+ import {
254
+ BaseChatHistory,
255
+ WindowedChatMemory,
256
+ supplyMemory,
257
+ type Message,
258
+ } from '@n8n/ai-utilities';
259
+
260
+ class MyChatHistory extends BaseChatHistory {
261
+ constructor(private sessionId: string) {
262
+ super();
263
+ }
264
+
265
+ async getMessages(): Promise<Message[]> {
266
+ // Read from your storage...
267
+ return [];
268
+ }
269
+
270
+ async addMessage(message: Message): Promise<void> {
271
+ // Write to your storage...
272
+ }
273
+
274
+ async clear(): Promise<void> {
275
+ // Clear your storage...
276
+ }
277
+ }
278
+
279
+ const history = new MyChatHistory(sessionId);
280
+ const memory = new WindowedChatMemory(history, { windowSize: 10 });
281
+ return supplyMemory(this, memory);
282
+ ```
283
+
284
+ ### Option B: Custom Memory Logic
285
+
286
+ For custom memory behavior (not just storage), extend `BaseChatMemory`:
287
+
288
+ ```typescript
289
+ import {
290
+ BaseChatMemory,
291
+ supplyMemory,
292
+ type Message,
293
+ type ChatHistory,
294
+ type ChatMemory,
295
+ } from '@n8n/ai-utilities';
296
+
297
+ class MyCustomChatMemory extends BaseChatMemory {
298
+ readonly chatHistory: ChatHistory;
299
+
300
+ constructor(chatHistory: ChatHistory) {
301
+ super();
302
+ this.chatHistory = chatHistory;
303
+ }
304
+
305
+ async loadMessages(): Promise<Message[]> {
306
+ const messages = await this.chatHistory.getMessages();
307
+ // Apply your custom logic here...
308
+ return messages;
309
+ }
310
+
311
+ async saveTurn(input: string, output: string): Promise<void> {
312
+ await this.chatHistory.addMessages([
313
+ { role: 'human', content: [{ type: 'text', text: input }] },
314
+ { role: 'ai', content: [{ type: 'text', text: output }] },
315
+ ]);
316
+ }
317
+
318
+ async clear(): Promise<void> {
319
+ await this.chatHistory.clear();
320
+ }
321
+ }
322
+
323
+ const history = new MyChatHistory(sessionId);
324
+ const memory = new MyCustomChatMemory(history);
325
+ return supplyMemory(this, memory);
326
+ ```
327
+
328
+ ---
329
+
330
+ ## Community Node Examples
331
+
332
+ ### ImaginaryDB Memory Node
333
+
334
+ ```typescript
335
+ import {
336
+ BaseChatHistory,
337
+ WindowedChatMemory,
338
+ supplyMemory,
339
+ type Message,
340
+ } from '@n8n/ai-utilities';
341
+ import {
342
+ NodeConnectionTypes,
343
+ type INodeType,
344
+ type ISupplyDataFunctions,
345
+ type SupplyData,
346
+ type IHttpRequestMethods,
347
+ } from 'n8n-workflow';
348
+
349
+ // Custom storage implementation using n8n's HTTP helpers
350
+ class ImaginaryDbChatHistory extends BaseChatHistory {
351
+ constructor(
352
+ private sessionId: string,
353
+ private baseUrl: string,
354
+ private apiKey: string,
355
+ private httpRequest: ISupplyDataFunctions['helpers']['httpRequest'],
356
+ ) {
357
+ super();
358
+ }
359
+
360
+ async getMessages(): Promise<Message[]> {
361
+ const data = await this.httpRequest({
362
+ method: 'GET',
363
+ url: `${this.baseUrl}/sessions/${this.sessionId}/messages`,
364
+ headers: { Authorization: `Bearer ${this.apiKey}` },
365
+ json: true,
366
+ });
367
+
368
+ // Convert from provider format to n8n Message format
369
+ return data.messages.map((m: any) => ({
370
+ role: m.speaker === 'user' ? 'human' : m.speaker === 'bot' ? 'ai' : m.speaker,
371
+ content: [{ type: 'text', text: m.text }],
372
+ }));
373
+ }
374
+
375
+ async addMessage(message: Message): Promise<void> {
376
+ const text = message.content.find((c) => c.type === 'text')?.text ?? '';
377
+ await this.httpRequest({
378
+ method: 'POST',
379
+ url: `${this.baseUrl}/sessions/${this.sessionId}/messages`,
380
+ headers: { Authorization: `Bearer ${this.apiKey}` },
381
+ body: {
382
+ speaker: message.role === 'human' ? 'user' : message.role === 'ai' ? 'bot' : message.role,
383
+ text,
384
+ },
385
+ json: true,
386
+ });
387
+ }
388
+
389
+ async clear(): Promise<void> {
390
+ await this.httpRequest({
391
+ method: 'DELETE',
392
+ url: `${this.baseUrl}/sessions/${this.sessionId}`,
393
+ headers: { Authorization: `Bearer ${this.apiKey}` },
394
+ });
395
+ }
396
+ }
397
+
398
+ // The n8n node
399
+ export class MemoryImaginaryDb implements INodeType {
400
+ description = {
401
+ displayName: 'ImaginaryDB Memory',
402
+ name: 'memoryImaginaryDb',
403
+ icon: 'file:imaginarydb.svg',
404
+ group: ['transform'],
405
+ version: 1,
406
+ description: 'Use ImaginaryDB for chat memory storage',
407
+ defaults: { name: 'ImaginaryDB Memory' },
408
+ codex: { categories: ['AI'], subcategories: { AI: ['Memory'] } },
409
+ inputs: [],
410
+ outputs: [NodeConnectionTypes.AiMemory],
411
+ outputNames: ['Memory'],
412
+ credentials: [{ name: 'imaginaryDbApi', required: true }],
413
+ properties: [
414
+ {
415
+ displayName: 'Session ID',
416
+ name: 'sessionId',
417
+ type: 'string',
418
+ default: '={{ $json.sessionId }}',
419
+ description: 'Unique identifier for the conversation session',
420
+ },
421
+ {
422
+ displayName: 'Window Size',
423
+ name: 'windowSize',
424
+ type: 'number',
425
+ default: 10,
426
+ description: 'Number of recent message pairs to keep in context',
427
+ },
428
+ ],
429
+ };
430
+
431
+ async supplyData(this: ISupplyDataFunctions, itemIndex: number): Promise<SupplyData> {
432
+ const credentials = await this.getCredentials<{ apiKey: string; baseUrl: string }>('imaginaryDbApi');
433
+ const sessionId = this.getNodeParameter('sessionId', itemIndex) as string;
434
+ const windowSize = this.getNodeParameter('windowSize', itemIndex) as number;
435
+
436
+ // Pass n8n's HTTP request helper directly
437
+ const history = new ImaginaryDbChatHistory(
438
+ sessionId,
439
+ credentials.baseUrl,
440
+ credentials.apiKey,
441
+ this.helpers.httpRequest,
442
+ );
443
+ const memory = new WindowedChatMemory(history, { windowSize });
444
+
445
+ return supplyMemory(this, memory);
446
+ }
447
+ }
448
+ ```
449
+
450
+ > **Note:** Community nodes must use `this.helpers.httpRequest` or `this.helpers.httpRequestWithAuthentication`
451
+ > for HTTP calls. Direct `fetch` or other global APIs are not allowed.
452
+
453
+
454
+ ---
455
+
456
+ ## Summary
457
+
458
+ | Before (LangChain) | After (SDK) |
459
+ |--------------------|-------------|
460
+ | `import { ChatOpenAI } from '@langchain/openai'` | `import { supplyModel } from '@n8n/ai-utilities'` |
461
+ | `new ChatOpenAI({ ... })` | `supplyModel(this, { type: 'openai', ... })` |
462
+ | Custom model provider | `class MyModel extends BaseChatModel { ... }` |
463
+ | `return { response: model }` | `return supplyModel(this, model)` |
464
+ | `import { BufferWindowMemory } from '@langchain/classic/memory'` | `import { WindowedChatMemory } from '@n8n/ai-utilities'` |
465
+ | Custom storage backend | `class MyHistory extends BaseChatHistory { ... }` |
466
+ | `return { response: logWrapper(memory, this) }` | `return supplyMemory(this, memory)` |
467
+ | LangChain message types | `Message` with roles: `system`, `human`, `ai`, `tool` |
468
+ | `tool_calls[].args` | `toolCalls[].arguments` |
@@ -0,0 +1,20 @@
1
+ import type { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager';
2
+ import type { BaseLanguageModelInput } from '@langchain/core/language_models/base';
3
+ import type { BindToolsInput } from '@langchain/core/language_models/chat_models';
4
+ import { BaseChatModel } from '@langchain/core/language_models/chat_models';
5
+ import type { BaseMessage } from '@langchain/core/messages';
6
+ import { AIMessageChunk } from '@langchain/core/messages';
7
+ import type { ChatResult } from '@langchain/core/outputs';
8
+ import { ChatGenerationChunk } from '@langchain/core/outputs';
9
+ import type { Runnable } from '@langchain/core/runnables';
10
+ import type { ISupplyDataFunctions } from 'n8n-workflow';
11
+ import type { ChatModel, ChatModelConfig } from '../types/chat-model';
12
+ export declare class LangchainAdapter<CallOptions extends ChatModelConfig = ChatModelConfig> extends BaseChatModel<CallOptions> {
13
+ private chatModel;
14
+ private ctx?;
15
+ constructor(chatModel: ChatModel, ctx?: ISupplyDataFunctions | undefined);
16
+ _llmType(): string;
17
+ _generate(messages: BaseMessage[], options: this['ParsedCallOptions']): Promise<ChatResult>;
18
+ _streamResponseChunks(messages: BaseMessage[], options: this['ParsedCallOptions'], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
19
+ bindTools(tools: BindToolsInput[]): Runnable<BaseLanguageModelInput, AIMessageChunk, CallOptions>;
20
+ }