n8n-nodes-kafka-batch-consumer 1.0.11 → 1.0.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +1 -1
- package/.eslintrc.js +0 -15
- package/IMPLEMENTATION_VERIFICATION.md +0 -417
- package/PROJECT_STRUCTURE.md +0 -268
- package/QUICK_START.md +0 -181
- package/jest.config.js +0 -22
- package/src/index.ts +0 -1
- package/src/nodes/KafkaBatchConsumer/KafkaBatchConsumer.node.test.ts +0 -1113
- package/src/nodes/KafkaBatchConsumer/KafkaBatchConsumer.node.ts +0 -323
- package/src/nodes/KafkaBatchConsumer/kafka.svg +0 -3
- package/tsconfig.json +0 -20
|
@@ -1,323 +0,0 @@
|
|
|
1
|
-
/// <reference types="node" />
|
|
2
|
-
|
|
3
|
-
import {
|
|
4
|
-
IExecuteFunctions,
|
|
5
|
-
INodeExecutionData,
|
|
6
|
-
INodeType,
|
|
7
|
-
INodeTypeDescription,
|
|
8
|
-
NodeOperationError,
|
|
9
|
-
} from 'n8n-workflow';
|
|
10
|
-
|
|
11
|
-
import { Kafka, Consumer, EachMessagePayload } from 'kafkajs';
|
|
12
|
-
|
|
13
|
-
/**
|
|
14
|
-
* Step 1: Node Interface
|
|
15
|
-
* Implements INodeType with complete node description
|
|
16
|
-
* Defines all Kafka configuration properties
|
|
17
|
-
* Includes optional credentials reference for authentication
|
|
18
|
-
*/
|
|
19
|
-
export class KafkaBatchConsumer implements INodeType {
|
|
20
|
-
description: INodeTypeDescription = {
|
|
21
|
-
displayName: 'Kafka Batch Consumer',
|
|
22
|
-
name: 'kafkaBatchConsumer',
|
|
23
|
-
icon: 'file:kafka.svg',
|
|
24
|
-
group: ['transform'],
|
|
25
|
-
version: 1,
|
|
26
|
-
description: 'Consume messages from Kafka in batches',
|
|
27
|
-
defaults: {
|
|
28
|
-
name: 'Kafka Batch Consumer',
|
|
29
|
-
},
|
|
30
|
-
inputs: ['main'],
|
|
31
|
-
outputs: ['main'],
|
|
32
|
-
// Credentials reference - required for brokers and clientId configuration
|
|
33
|
-
credentials: [
|
|
34
|
-
{
|
|
35
|
-
name: 'kafka',
|
|
36
|
-
required: true,
|
|
37
|
-
},
|
|
38
|
-
],
|
|
39
|
-
// Define all Kafka configuration properties
|
|
40
|
-
properties: [
|
|
41
|
-
{
|
|
42
|
-
displayName: 'Group ID',
|
|
43
|
-
name: 'groupId',
|
|
44
|
-
type: 'string',
|
|
45
|
-
default: 'n8n-consumer-group',
|
|
46
|
-
required: true,
|
|
47
|
-
description: 'Consumer group identifier',
|
|
48
|
-
},
|
|
49
|
-
{
|
|
50
|
-
displayName: 'Topic',
|
|
51
|
-
name: 'topic',
|
|
52
|
-
type: 'string',
|
|
53
|
-
default: '',
|
|
54
|
-
required: true,
|
|
55
|
-
description: 'Kafka topic to consume from',
|
|
56
|
-
},
|
|
57
|
-
{
|
|
58
|
-
displayName: 'Batch Size',
|
|
59
|
-
name: 'batchSize',
|
|
60
|
-
type: 'number',
|
|
61
|
-
default: 10,
|
|
62
|
-
required: true,
|
|
63
|
-
description: 'Number of messages to consume in a batch',
|
|
64
|
-
},
|
|
65
|
-
{
|
|
66
|
-
displayName: 'From Beginning',
|
|
67
|
-
name: 'fromBeginning',
|
|
68
|
-
type: 'boolean',
|
|
69
|
-
default: false,
|
|
70
|
-
description: 'Whether to read from the beginning of the topic',
|
|
71
|
-
},
|
|
72
|
-
{
|
|
73
|
-
displayName: 'Session Timeout',
|
|
74
|
-
name: 'sessionTimeout',
|
|
75
|
-
type: 'number',
|
|
76
|
-
default: 30000,
|
|
77
|
-
description: 'Session timeout in milliseconds',
|
|
78
|
-
},
|
|
79
|
-
{
|
|
80
|
-
displayName: 'Options',
|
|
81
|
-
name: 'options',
|
|
82
|
-
type: 'collection',
|
|
83
|
-
placeholder: 'Add Option',
|
|
84
|
-
default: {},
|
|
85
|
-
options: [
|
|
86
|
-
{
|
|
87
|
-
displayName: 'Read Timeout',
|
|
88
|
-
name: 'readTimeout',
|
|
89
|
-
type: 'number',
|
|
90
|
-
default: 60000,
|
|
91
|
-
description: 'Maximum time to wait for messages in milliseconds',
|
|
92
|
-
},
|
|
93
|
-
{
|
|
94
|
-
displayName: 'Parse JSON',
|
|
95
|
-
name: 'parseJson',
|
|
96
|
-
type: 'boolean',
|
|
97
|
-
default: true,
|
|
98
|
-
description: 'Whether to parse message values as JSON',
|
|
99
|
-
},
|
|
100
|
-
],
|
|
101
|
-
},
|
|
102
|
-
],
|
|
103
|
-
};
|
|
104
|
-
|
|
105
|
-
/**
|
|
106
|
-
* Main execution method
|
|
107
|
-
* Handles the complete workflow: credentials, connection, consumption, and error handling
|
|
108
|
-
*/
|
|
109
|
-
async execute(this: IExecuteFunctions): Promise<INodeExecutionData[][]> {
|
|
110
|
-
const returnData: INodeExecutionData[] = [];
|
|
111
|
-
|
|
112
|
-
// Get all node parameters from N8N configuration
|
|
113
|
-
const groupId = this.getNodeParameter('groupId', 0) as string;
|
|
114
|
-
const topic = this.getNodeParameter('topic', 0) as string;
|
|
115
|
-
const batchSize = this.getNodeParameter('batchSize', 0) as number;
|
|
116
|
-
const fromBeginning = this.getNodeParameter('fromBeginning', 0) as boolean;
|
|
117
|
-
const sessionTimeout = this.getNodeParameter('sessionTimeout', 0) as number;
|
|
118
|
-
const options = this.getNodeParameter('options', 0) as {
|
|
119
|
-
readTimeout?: number;
|
|
120
|
-
parseJson?: boolean;
|
|
121
|
-
};
|
|
122
|
-
|
|
123
|
-
const readTimeout = options.readTimeout || 60000;
|
|
124
|
-
const parseJson = options.parseJson !== undefined ? options.parseJson : true;
|
|
125
|
-
|
|
126
|
-
/**
|
|
127
|
-
* Step 2: Credentials Retrieval and Kafka Configuration
|
|
128
|
-
* Build KafkaJS configuration with optional authentication
|
|
129
|
-
* Supports SASL (PLAIN, SCRAM-SHA-256, SCRAM-SHA-512) and SSL/TLS
|
|
130
|
-
* Brokers and clientId are now taken from credentials
|
|
131
|
-
*/
|
|
132
|
-
|
|
133
|
-
// Attempt to retrieve Kafka credentials (required for brokers and clientId)
|
|
134
|
-
let credentials: any = null;
|
|
135
|
-
try {
|
|
136
|
-
credentials = await this.getCredentials('kafka');
|
|
137
|
-
} catch (error) {
|
|
138
|
-
throw new NodeOperationError(
|
|
139
|
-
this.getNode(),
|
|
140
|
-
'Kafka credentials are required to get brokers and clientId configuration'
|
|
141
|
-
);
|
|
142
|
-
}
|
|
143
|
-
|
|
144
|
-
// Build base Kafka configuration from credentials
|
|
145
|
-
const kafkaConfig: any = {
|
|
146
|
-
clientId: credentials.clientId || 'n8n-kafka-batch-consumer',
|
|
147
|
-
brokers: credentials.brokers ?
|
|
148
|
-
(typeof credentials.brokers === 'string' ?
|
|
149
|
-
credentials.brokers.split(',').map((b: string) => b.trim()) :
|
|
150
|
-
credentials.brokers) :
|
|
151
|
-
['localhost:9092'],
|
|
152
|
-
};
|
|
153
|
-
|
|
154
|
-
// Debug: log credentials to understand SSL configuration
|
|
155
|
-
console.log('🔍 Kafka Credentials Debug:', {
|
|
156
|
-
ssl: credentials.ssl,
|
|
157
|
-
hasCa: !!credentials.ca,
|
|
158
|
-
hasCert: !!credentials.cert,
|
|
159
|
-
hasKey: !!credentials.key,
|
|
160
|
-
sslType: typeof credentials.ssl,
|
|
161
|
-
});
|
|
162
|
-
|
|
163
|
-
// Map N8N credential fields to KafkaJS authentication format
|
|
164
|
-
// Add SASL authentication if provided
|
|
165
|
-
if (credentials.authentication) {
|
|
166
|
-
kafkaConfig.sasl = {
|
|
167
|
-
mechanism: credentials.authentication, // PLAIN, SCRAM-SHA-256, or SCRAM-SHA-512
|
|
168
|
-
username: credentials.username,
|
|
169
|
-
password: credentials.password,
|
|
170
|
-
};
|
|
171
|
-
}
|
|
172
|
-
|
|
173
|
-
// Add SSL/TLS configuration for encrypted connections
|
|
174
|
-
// Only enable SSL if explicitly set to true or if SSL certificates are provided
|
|
175
|
-
if (credentials.ssl === true || credentials.ca || credentials.cert || credentials.key) {
|
|
176
|
-
kafkaConfig.ssl = {
|
|
177
|
-
rejectUnauthorized: credentials.ssl !== false, // Default to true if SSL is enabled
|
|
178
|
-
};
|
|
179
|
-
|
|
180
|
-
// Add optional SSL certificates for mutual TLS authentication
|
|
181
|
-
if (credentials.ca) {
|
|
182
|
-
kafkaConfig.ssl.ca = credentials.ca; // Certificate Authority
|
|
183
|
-
}
|
|
184
|
-
if (credentials.cert) {
|
|
185
|
-
kafkaConfig.ssl.cert = credentials.cert; // Client certificate
|
|
186
|
-
}
|
|
187
|
-
if (credentials.key) {
|
|
188
|
-
kafkaConfig.ssl.key = credentials.key; // Client private key
|
|
189
|
-
}
|
|
190
|
-
}
|
|
191
|
-
|
|
192
|
-
/**
|
|
193
|
-
* Step 3: Consumer Setup
|
|
194
|
-
* Initialize Kafka client and consumer with configuration
|
|
195
|
-
* Connect to brokers and subscribe to topic
|
|
196
|
-
*/
|
|
197
|
-
// Create Kafka instance with complete configuration
|
|
198
|
-
const kafka = new Kafka(kafkaConfig);
|
|
199
|
-
// Create consumer with group ID and session timeout
|
|
200
|
-
const consumer: Consumer = kafka.consumer({
|
|
201
|
-
groupId, // Consumer group for load balancing and offset management
|
|
202
|
-
sessionTimeout, // Session timeout in milliseconds
|
|
203
|
-
});
|
|
204
|
-
|
|
205
|
-
// Track connection state for proper cleanup
|
|
206
|
-
let consumerConnected = false;
|
|
207
|
-
|
|
208
|
-
try {
|
|
209
|
-
// Establish connection to Kafka brokers
|
|
210
|
-
await consumer.connect();
|
|
211
|
-
consumerConnected = true;
|
|
212
|
-
|
|
213
|
-
// Subscribe to the specified topic
|
|
214
|
-
// fromBeginning: if true, read from start; if false, read from latest
|
|
215
|
-
await consumer.subscribe({ topic, fromBeginning });
|
|
216
|
-
|
|
217
|
-
/**
|
|
218
|
-
* Step 4: Message Collection
|
|
219
|
-
* Collect messages in batch with timeout support
|
|
220
|
-
* Stop when batch size reached or timeout occurs
|
|
221
|
-
*/
|
|
222
|
-
// Initialize message collection array
|
|
223
|
-
const messages: INodeExecutionData[] = [];
|
|
224
|
-
let timeoutHandle: NodeJS.Timeout | null = null;
|
|
225
|
-
let resolvePromise: ((value: void) => void) | null = null;
|
|
226
|
-
|
|
227
|
-
const collectionPromise = new Promise<void>((resolve) => {
|
|
228
|
-
resolvePromise = resolve;
|
|
229
|
-
});
|
|
230
|
-
|
|
231
|
-
// Set maximum wait time for message collection
|
|
232
|
-
timeoutHandle = setTimeout(() => {
|
|
233
|
-
if (resolvePromise) {
|
|
234
|
-
resolvePromise(); // Resolve with partial batch on timeout
|
|
235
|
-
}
|
|
236
|
-
}, readTimeout);
|
|
237
|
-
|
|
238
|
-
/**
|
|
239
|
-
* Start message consumption
|
|
240
|
-
* eachMessage callback processes messages one by one
|
|
241
|
-
* Collects until batch size or timeout reached
|
|
242
|
-
* Note: consumer.run() starts the consumer but doesn't block
|
|
243
|
-
*/
|
|
244
|
-
consumer.run({
|
|
245
|
-
eachMessage: async ({ topic, partition, message }: EachMessagePayload) => {
|
|
246
|
-
/**
|
|
247
|
-
* Step 6: Output Format
|
|
248
|
-
* Process each message and format for N8N output
|
|
249
|
-
* Parse JSON if configured, preserve metadata
|
|
250
|
-
*/
|
|
251
|
-
// Parse message value from Buffer to string from Buffer to string
|
|
252
|
-
let value: any = message.value?.toString() || '';
|
|
253
|
-
|
|
254
|
-
// Attempt JSON parsing if configured
|
|
255
|
-
if (parseJson && value) {
|
|
256
|
-
try {
|
|
257
|
-
value = JSON.parse(value); // Parse valid JSON to object
|
|
258
|
-
} catch (error) {
|
|
259
|
-
// Keep as string if JSON parsing fails (invalid JSON)
|
|
260
|
-
}
|
|
261
|
-
}
|
|
262
|
-
|
|
263
|
-
// Build N8N execution data with complete Kafka message metadata
|
|
264
|
-
const messageData: INodeExecutionData = {
|
|
265
|
-
json: {
|
|
266
|
-
topic,
|
|
267
|
-
partition,
|
|
268
|
-
offset: message.offset,
|
|
269
|
-
key: message.key?.toString() || null,
|
|
270
|
-
value,
|
|
271
|
-
timestamp: message.timestamp,
|
|
272
|
-
headers: message.headers || {},
|
|
273
|
-
},
|
|
274
|
-
};
|
|
275
|
-
|
|
276
|
-
messages.push(messageData);
|
|
277
|
-
|
|
278
|
-
// Check if batch size reached
|
|
279
|
-
if (messages.length >= batchSize) {
|
|
280
|
-
if (timeoutHandle) {
|
|
281
|
-
clearTimeout(timeoutHandle); // Cancel timeout
|
|
282
|
-
}
|
|
283
|
-
if (resolvePromise) {
|
|
284
|
-
resolvePromise(); // Complete batch collection
|
|
285
|
-
}
|
|
286
|
-
}
|
|
287
|
-
},
|
|
288
|
-
});
|
|
289
|
-
|
|
290
|
-
/**
|
|
291
|
-
* Wait for collection to complete
|
|
292
|
-
* Completes when: batch size reached OR timeout occurs
|
|
293
|
-
* Partial batches are valid on timeout
|
|
294
|
-
*/
|
|
295
|
-
await collectionPromise;
|
|
296
|
-
|
|
297
|
-
// Gracefully disconnect consumer and cleanup resources
|
|
298
|
-
await consumer.disconnect();
|
|
299
|
-
consumerConnected = false;
|
|
300
|
-
|
|
301
|
-
// Add collected messages to return data
|
|
302
|
-
returnData.push(...messages);
|
|
303
|
-
} catch (error) {
|
|
304
|
-
// Ensure consumer is disconnected
|
|
305
|
-
if (consumerConnected) {
|
|
306
|
-
try {
|
|
307
|
-
await consumer.disconnect();
|
|
308
|
-
} catch (disconnectError) {
|
|
309
|
-
// Ignore disconnect errors
|
|
310
|
-
}
|
|
311
|
-
}
|
|
312
|
-
|
|
313
|
-
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
314
|
-
throw new NodeOperationError(
|
|
315
|
-
this.getNode(),
|
|
316
|
-
`Kafka error: ${errorMessage}`,
|
|
317
|
-
{ description: errorMessage }
|
|
318
|
-
);
|
|
319
|
-
}
|
|
320
|
-
|
|
321
|
-
return [returnData];
|
|
322
|
-
}
|
|
323
|
-
}
|
|
@@ -1,3 +0,0 @@
|
|
|
1
|
-
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" width="24" height="24">
|
|
2
|
-
<path fill="currentColor" d="M12 2C6.48 2 2 6.48 2 12s4.48 10 10 10 10-4.48 10-10S17.52 2 12 2zm0 18c-4.41 0-8-3.59-8-8s3.59-8 8-8 8 3.59 8 8-3.59 8-8 8zm-1-13h2v6h-2zm0 8h2v2h-2z"/>
|
|
3
|
-
</svg>
|
package/tsconfig.json
DELETED
|
@@ -1,20 +0,0 @@
|
|
|
1
|
-
{
|
|
2
|
-
"compilerOptions": {
|
|
3
|
-
"target": "ES2020",
|
|
4
|
-
"module": "commonjs",
|
|
5
|
-
"lib": ["ES2020"],
|
|
6
|
-
"outDir": "./dist",
|
|
7
|
-
"rootDir": "./src",
|
|
8
|
-
"strict": true,
|
|
9
|
-
"esModuleInterop": true,
|
|
10
|
-
"skipLibCheck": true,
|
|
11
|
-
"forceConsistentCasingInFileNames": true,
|
|
12
|
-
"declaration": true,
|
|
13
|
-
"declarationMap": true,
|
|
14
|
-
"sourceMap": true,
|
|
15
|
-
"resolveJsonModule": true,
|
|
16
|
-
"moduleResolution": "node"
|
|
17
|
-
},
|
|
18
|
-
"include": ["src/**/*"],
|
|
19
|
-
"exclude": ["node_modules", "dist", "**/*.test.ts"]
|
|
20
|
-
}
|