@backstage/plugin-events-backend-module-kafka 0.1.6-next.1 → 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. package/CHANGELOG.md +15 -0
  2. package/README.md +67 -25
  3. package/config.d.ts +504 -136
  4. package/dist/KafkaConsumingEventPublisher/KafkaConsumingEventPublisher.cjs.js +67 -0
  5. package/dist/KafkaConsumingEventPublisher/KafkaConsumingEventPublisher.cjs.js.map +1 -0
  6. package/dist/KafkaConsumingEventPublisher/config.cjs.js +71 -0
  7. package/dist/KafkaConsumingEventPublisher/config.cjs.js.map +1 -0
  8. package/dist/{service/eventsModuleKafkaConsumingEventPublisher.cjs.js → KafkaConsumingEventPublisher/module.cjs.js} +10 -9
  9. package/dist/KafkaConsumingEventPublisher/module.cjs.js.map +1 -0
  10. package/dist/KafkaPublishingEventConsumer/KafkaPublishingEventConsumer.cjs.js +73 -0
  11. package/dist/KafkaPublishingEventConsumer/KafkaPublishingEventConsumer.cjs.js.map +1 -0
  12. package/dist/KafkaPublishingEventConsumer/config.cjs.js +44 -0
  13. package/dist/KafkaPublishingEventConsumer/config.cjs.js.map +1 -0
  14. package/dist/KafkaPublishingEventConsumer/module.cjs.js +36 -0
  15. package/dist/KafkaPublishingEventConsumer/module.cjs.js.map +1 -0
  16. package/dist/index.cjs.js +10 -3
  17. package/dist/index.cjs.js.map +1 -1
  18. package/dist/index.d.ts +7 -4
  19. package/dist/utils/LoggerServiceAdapter.cjs.js.map +1 -0
  20. package/dist/utils/config.cjs.js +46 -0
  21. package/dist/utils/config.cjs.js.map +1 -0
  22. package/dist/utils/kafkaTransformers.cjs.js +24 -0
  23. package/dist/utils/kafkaTransformers.cjs.js.map +1 -0
  24. package/package.json +8 -8
  25. package/dist/publisher/KafkaConsumerClient.cjs.js +0 -44
  26. package/dist/publisher/KafkaConsumerClient.cjs.js.map +0 -1
  27. package/dist/publisher/KafkaConsumingEventPublisher.cjs.js +0 -63
  28. package/dist/publisher/KafkaConsumingEventPublisher.cjs.js.map +0 -1
  29. package/dist/publisher/LoggerServiceAdapter.cjs.js.map +0 -1
  30. package/dist/publisher/config.cjs.js +0 -102
  31. package/dist/publisher/config.cjs.js.map +0 -1
  32. package/dist/service/eventsModuleKafkaConsumingEventPublisher.cjs.js.map +0 -1
  33. /package/dist/{publisher → utils}/LoggerServiceAdapter.cjs.js +0 -0
package/config.d.ts CHANGED
@@ -13,7 +13,6 @@
13
13
  * See the License for the specific language governing permissions and
14
14
  * limitations under the License.
15
15
  */
16
-
17
16
  import { HumanDuration } from '@backstage/types';
18
17
 
19
18
  export interface Config {
@@ -25,179 +24,548 @@ export interface Config {
25
24
  kafka?: {
26
25
  /**
27
26
  * Configuration for KafkaConsumingEventPublisher
27
+ *
28
+ * Supports either:
29
+ * 1. Single configuration object (legacy format)
30
+ * 2. Multiple named instances as a record where each key is a unique name for the Kafka instance
28
31
  */
29
- kafkaConsumingEventPublisher?: {
30
- /**
31
- * (Required) Client ID used by Backstage to identify when connecting to the Kafka cluster.
32
- */
33
- clientId: string;
34
- /**
35
- * (Required) List of brokers in the Kafka cluster to connect to.
36
- */
37
- brokers: string[];
38
- /**
39
- * Optional SSL connection parameters to connect to the cluster. Passed directly to Node tls.connect.
40
- * See https://nodejs.org/dist/latest-v8.x/docs/api/tls.html#tls_tls_createsecurecontext_options
41
- */
42
- ssl?:
43
- | {
44
- ca?: string[];
32
+ kafkaConsumingEventPublisher?:
33
+ | {
34
+ /**
35
+ * (Required) Client ID used by Backstage to identify when connecting to the Kafka cluster.
36
+ */
37
+ clientId: string;
38
+ /**
39
+ * (Required) List of brokers in the Kafka cluster to connect to.
40
+ */
41
+ brokers: string[];
42
+ /**
43
+ * Optional SSL connection parameters to connect to the cluster. Passed directly to Node tls.connect.
44
+ * See https://nodejs.org/dist/latest-v8.x/docs/api/tls.html#tls_tls_createsecurecontext_options
45
+ */
46
+ ssl?:
47
+ | {
48
+ ca?: string[];
49
+ /** @visibility secret */
50
+ key?: string;
51
+ cert?: string;
52
+ rejectUnauthorized?: boolean;
53
+ }
54
+ | boolean;
55
+ /**
56
+ * Optional SASL connection parameters.
57
+ */
58
+ sasl?: {
59
+ mechanism: 'plain' | 'scram-sha-256' | 'scram-sha-512';
60
+ username: string;
45
61
  /** @visibility secret */
46
- key?: string;
47
- cert?: string;
48
- rejectUnauthorized?: boolean;
49
- }
50
- | boolean;
51
- /**
52
- * Optional SASL connection parameters.
53
- */
54
- sasl?: {
55
- mechanism: 'plain' | 'scram-sha-256' | 'scram-sha-512';
56
- username: string;
57
- /** @visibility secret */
58
- password: string;
59
- };
62
+ password: string;
63
+ };
60
64
 
61
- /**
62
- * Optional retry connection parameters.
63
- */
64
- retry: {
65
- /**
66
- * (Optional) Maximum wait time for a retry
67
- * Default: 30000 ms.
68
- */
69
- maxRetryTime: HumanDuration | string;
65
+ /**
66
+ * Optional retry connection parameters.
67
+ */
68
+ retry?: {
69
+ /**
70
+ * (Optional) Maximum wait time for a retry
71
+ * Default: 30000 ms.
72
+ */
73
+ maxRetryTime?: HumanDuration | string;
70
74
 
71
- /**
72
- * (Optional) Initial value used to calculate the retry (This is still randomized following the randomization factor)
73
- * Default: 300 ms.
74
- */
75
- initialRetryTime: HumanDuration | string;
75
+ /**
76
+ * (Optional) Initial value used to calculate the retry (This is still randomized following the randomization factor)
77
+ * Default: 300 ms.
78
+ */
79
+ initialRetryTime?: HumanDuration | string;
76
80
 
77
- /**
78
- * (Optional) Randomization factor
79
- * Default: 0.2.
80
- */
81
- factor: number;
81
+ /**
82
+ * (Optional) Randomization factor
83
+ * Default: 0.2.
84
+ */
85
+ factor?: number;
82
86
 
83
- /**
84
- * (Optional) Exponential factor
85
- * Default: 2.
86
- */
87
- multiplier: number;
87
+ /**
88
+ * (Optional) Exponential factor
89
+ * Default: 2.
90
+ */
91
+ multiplier?: number;
88
92
 
89
- /**
90
- * (Optional) Max number of retries per call
91
- * Default: 5.
92
- */
93
- retries: number;
94
- };
93
+ /**
94
+ * (Optional) Max number of retries per call
95
+ * Default: 5.
96
+ */
97
+ retries?: number;
98
+ };
95
99
 
96
- /**
97
- * (Optional) Timeout for authentication requests.
98
- * Default: 10000 ms.
99
- */
100
- authenticationTimeout: HumanDuration | string;
101
-
102
- /**
103
- * (Optional) Time to wait for a successful connection.
104
- * Default: 1000 ms.
105
- */
106
- connectionTimeout: HumanDuration | string;
107
-
108
- /**
109
- * (Optional) Time to wait for a successful request.
110
- * Default: 30000 ms.
111
- */
112
- requestTimeout: HumanDuration | string;
113
-
114
- /**
115
- * (Optional) The request timeout can be disabled by setting enforceRequestTimeout to false.
116
- * Default: true
117
- */
118
- enforceRequestTimeout: boolean;
119
-
120
- /**
121
- * Contains a object per topic for which an Kafka queue
122
- * should be used as source of events.
123
- */
124
- topics: Array<{
125
- /**
126
- * (Required) The Backstage topic to publish to
127
- */
128
- topic: string;
129
- /**
130
- * (Required) KafkaConsumer-related configuration.
131
- */
132
- kafka: {
133
100
  /**
134
- * (Required) The Kafka topics to subscribe to
101
+ * (Optional) Timeout for authentication requests.
102
+ * Default: 10000 ms.
135
103
  */
136
- topics: string[];
104
+ authenticationTimeout?: HumanDuration | string;
105
+
137
106
  /**
138
- * (Required) The GroupId to be used by the topic consumers
107
+ * (Optional) Time to wait for a successful connection.
108
+ * Default: 1000 ms.
139
109
  */
140
- groupId: string;
110
+ connectionTimeout?: HumanDuration | string;
141
111
 
142
112
  /**
143
- * (Optional) Timeout used to detect failures.
144
- * The consumer sends periodic heartbeats to indicate its liveness to the broker.
145
- * If no heartbeats are received by the broker before the expiration of this session timeout,
146
- * then the broker will remove this consumer from the group and initiate a rebalance
113
+ * (Optional) Time to wait for a successful request.
147
114
  * Default: 30000 ms.
148
115
  */
149
- sessionTimeout: HumanDuration | string;
116
+ requestTimeout?: HumanDuration | string;
150
117
 
151
118
  /**
152
- * (Optional) The maximum time that the coordinator will wait for each member to rejoin when rebalancing the group
153
- * Default: 60000 ms.
119
+ * (Optional) The request timeout can be disabled by setting enforceRequestTimeout to false.
120
+ * Default: true
154
121
  */
155
- rebalanceTimeout: HumanDuration | string;
122
+ enforceRequestTimeout?: boolean;
156
123
 
157
124
  /**
158
- * (Optional) The expected time between heartbeats to the consumer coordinator.
159
- * Heartbeats are used to ensure that the consumer's session stays active.
160
- * The value must be set lower than session timeout
161
- * Default: 3000 ms.
125
+ * Contains an object per topic for which a Kafka queue
126
+ * should be used as source of events.
162
127
  */
163
- heartbeatInterval: HumanDuration | string;
128
+ topics: Array<{
129
+ /**
130
+ * (Required) The Backstage topic to publish to
131
+ */
132
+ topic: string;
133
+ /**
134
+ * (Required) KafkaConsumer-related configuration.
135
+ */
136
+ kafka: {
137
+ /**
138
+ * (Required) The Kafka topics to subscribe to
139
+ */
140
+ topics: string[];
141
+ /**
142
+ * (Required) The GroupId to be used by the topic consumers
143
+ */
144
+ groupId: string;
145
+
146
+ /**
147
+ * (Optional) Timeout used to detect failures.
148
+ * The consumer sends periodic heartbeats to indicate its liveness to the broker.
149
+ * If no heartbeats are received by the broker before the expiration of this session timeout,
150
+ * then the broker will remove this consumer from the group and initiate a rebalance
151
+ * Default: 30000 ms.
152
+ */
153
+ sessionTimeout?: HumanDuration | string;
154
+
155
+ /**
156
+ * (Optional) The maximum time that the coordinator will wait for each member to rejoin when rebalancing the group
157
+ * Default: 60000 ms.
158
+ */
159
+ rebalanceTimeout?: HumanDuration | string;
160
+
161
+ /**
162
+ * (Optional) The expected time between heartbeats to the consumer coordinator.
163
+ * Heartbeats are used to ensure that the consumer's session stays active.
164
+ * The value must be set lower than session timeout
165
+ * Default: 3000 ms.
166
+ */
167
+ heartbeatInterval?: HumanDuration | string;
168
+
169
+ /**
170
+ * (Optional) The period of time after which we force a refresh of metadata
171
+ * even if we haven't seen any partition leadership changes to proactively discover any new brokers or partitions
172
+ * Default: 300000 ms (5 minutes).
173
+ */
174
+ metadataMaxAge?: HumanDuration | string;
175
+
176
+ /**
177
+ * (Optional) The maximum amount of data per-partition the server will return.
178
+ * This size must be at least as large as the maximum message size the server allows
179
+ * or else it is possible for the producer to send messages larger than the consumer can fetch.
180
+ * If that happens, the consumer can get stuck trying to fetch a large message on a certain partition
181
+ * Default: 1048576 (1MB)
182
+ */
183
+ maxBytesPerPartition?: number;
184
+
185
+ /**
186
+ * (Optional) Minimum amount of data the server should return for a fetch request, otherwise wait up to maxWaitTime for more data to accumulate.
187
+ * Default: 1
188
+ */
189
+ minBytes?: number;
190
+
191
+ /**
192
+ * (Optional) Maximum amount of bytes to accumulate in the response. Supported by Kafka >= 0.10.1.0
193
+ * Default: 10485760 (10MB)
194
+ */
195
+ maxBytes?: number;
196
+
197
+ /**
198
+ * (Optional) The maximum amount of time the server will block before answering the fetch request
199
+ * if there isn't sufficient data to immediately satisfy the requirement given by minBytes
200
+ * Default: 5000
201
+ */
202
+ maxWaitTime?: HumanDuration | string;
203
+ };
204
+ }>;
205
+ }
206
+ | {
207
+ [name: string]: {
208
+ /**
209
+ * (Required) Client ID used by Backstage to identify when connecting to the Kafka cluster.
210
+ */
211
+ clientId: string;
212
+ /**
213
+ * (Required) List of brokers in the Kafka cluster to connect to.
214
+ */
215
+ brokers: string[];
216
+ /**
217
+ * Optional SSL connection parameters to connect to the cluster. Passed directly to Node tls.connect.
218
+ * See https://nodejs.org/dist/latest-v8.x/docs/api/tls.html#tls_tls_createsecurecontext_options
219
+ */
220
+ ssl?:
221
+ | {
222
+ ca?: string[];
223
+ /** @visibility secret */
224
+ key?: string;
225
+ cert?: string;
226
+ rejectUnauthorized?: boolean;
227
+ }
228
+ | boolean;
229
+ /**
230
+ * Optional SASL connection parameters.
231
+ */
232
+ sasl?: {
233
+ mechanism: 'plain' | 'scram-sha-256' | 'scram-sha-512';
234
+ username: string;
235
+ /** @visibility secret */
236
+ password: string;
237
+ };
238
+
239
+ /**
240
+ * Optional retry connection parameters.
241
+ */
242
+ retry?: {
243
+ /**
244
+ * (Optional) Maximum wait time for a retry
245
+ * Default: 30000 ms.
246
+ */
247
+ maxRetryTime?: HumanDuration | string;
248
+
249
+ /**
250
+ * (Optional) Initial value used to calculate the retry (This is still randomized following the randomization factor)
251
+ * Default: 300 ms.
252
+ */
253
+ initialRetryTime?: HumanDuration | string;
254
+
255
+ /**
256
+ * (Optional) Randomization factor
257
+ * Default: 0.2.
258
+ */
259
+ factor?: number;
260
+
261
+ /**
262
+ * (Optional) Exponential factor
263
+ * Default: 2.
264
+ */
265
+ multiplier?: number;
266
+
267
+ /**
268
+ * (Optional) Max number of retries per call
269
+ * Default: 5.
270
+ */
271
+ retries?: number;
272
+ };
273
+
274
+ /**
275
+ * (Optional) Timeout for authentication requests.
276
+ * Default: 10000 ms.
277
+ */
278
+ authenticationTimeout?: HumanDuration | string;
279
+
280
+ /**
281
+ * (Optional) Time to wait for a successful connection.
282
+ * Default: 1000 ms.
283
+ */
284
+ connectionTimeout?: HumanDuration | string;
285
+
286
+ /**
287
+ * (Optional) Time to wait for a successful request.
288
+ * Default: 30000 ms.
289
+ */
290
+ requestTimeout?: HumanDuration | string;
291
+
292
+ /**
293
+ * (Optional) The request timeout can be disabled by setting enforceRequestTimeout to false.
294
+ * Default: true
295
+ */
296
+ enforceRequestTimeout?: boolean;
297
+
298
+ /**
299
+ * Contains an object per topic for which a Kafka queue
300
+ * should be used as source of events.
301
+ */
302
+ topics: Array<{
303
+ /**
304
+ * (Required) The Backstage topic to publish to
305
+ */
306
+ topic: string;
307
+ /**
308
+ * (Required) KafkaConsumer-related configuration.
309
+ */
310
+ kafka: {
311
+ /**
312
+ * (Required) The Kafka topics to subscribe to
313
+ */
314
+ topics: string[];
315
+ /**
316
+ * (Required) The GroupId to be used by the topic consumers
317
+ */
318
+ groupId: string;
319
+
320
+ /**
321
+ * (Optional) Timeout used to detect failures.
322
+ * The consumer sends periodic heartbeats to indicate its liveness to the broker.
323
+ * If no heartbeats are received by the broker before the expiration of this session timeout,
324
+ * then the broker will remove this consumer from the group and initiate a rebalance
325
+ * Default: 30000 ms.
326
+ */
327
+ sessionTimeout?: HumanDuration | string;
328
+
329
+ /**
330
+ * (Optional) The maximum time that the coordinator will wait for each member to rejoin when rebalancing the group
331
+ * Default: 60000 ms.
332
+ */
333
+ rebalanceTimeout?: HumanDuration | string;
334
+
335
+ /**
336
+ * (Optional) The expected time between heartbeats to the consumer coordinator.
337
+ * Heartbeats are used to ensure that the consumer's session stays active.
338
+ * The value must be set lower than session timeout
339
+ * Default: 3000 ms.
340
+ */
341
+ heartbeatInterval?: HumanDuration | string;
342
+
343
+ /**
344
+ * (Optional) The period of time after which we force a refresh of metadata
345
+ * even if we haven't seen any partition leadership changes to proactively discover any new brokers or partitions
346
+ * Default: 300000 ms (5 minutes).
347
+ */
348
+ metadataMaxAge?: HumanDuration | string;
164
349
 
350
+ /**
351
+ * (Optional) The maximum amount of data per-partition the server will return.
352
+ * This size must be at least as large as the maximum message size the server allows
353
+ * or else it is possible for the producer to send messages larger than the consumer can fetch.
354
+ * If that happens, the consumer can get stuck trying to fetch a large message on a certain partition
355
+ * Default: 1048576 (1MB)
356
+ */
357
+ maxBytesPerPartition?: number;
358
+
359
+ /**
360
+ * (Optional) Minimum amount of data the server should return for a fetch request, otherwise wait up to maxWaitTime for more data to accumulate.
361
+ * Default: 1
362
+ */
363
+ minBytes?: number;
364
+
365
+ /**
366
+ * (Optional) Maximum amount of bytes to accumulate in the response. Supported by Kafka >= 0.10.1.0
367
+ * Default: 10485760 (10MB)
368
+ */
369
+ maxBytes?: number;
370
+
371
+ /**
372
+ * (Optional) The maximum amount of time the server will block before answering the fetch request
373
+ * if there isn't sufficient data to immediately satisfy the requirement given by minBytes
374
+ * Default: 5000
375
+ */
376
+ maxWaitTime?: HumanDuration | string;
377
+ };
378
+ }>;
379
+ };
380
+ };
381
+
382
+ /**
383
+ * Configuration for KafkaPublishingEventConsumer
384
+ *
385
+ * Supports multiple named instances as a record where each key is a unique name
386
+ * for the Kafka producer configuration.
387
+ */
388
+ kafkaPublishingEventConsumer?: {
389
+ [name: string]: {
390
+ /**
391
+ * (Required) Client ID used by Backstage to identify when connecting to the Kafka cluster.
392
+ */
393
+ clientId: string;
394
+ /**
395
+ * (Required) List of brokers in the Kafka cluster to connect to.
396
+ */
397
+ brokers: string[];
398
+ /**
399
+ * Optional SSL connection parameters to connect to the cluster. Passed directly to Node tls.connect.
400
+ * See https://nodejs.org/dist/latest-v8.x/docs/api/tls.html#tls_tls_createsecurecontext_options
401
+ */
402
+ ssl?:
403
+ | {
404
+ ca?: string[];
405
+ /** @visibility secret */
406
+ key?: string;
407
+ cert?: string;
408
+ rejectUnauthorized?: boolean;
409
+ }
410
+ | boolean;
411
+ /**
412
+ * Optional SASL connection parameters.
413
+ */
414
+ sasl?: {
415
+ mechanism: 'plain' | 'scram-sha-256' | 'scram-sha-512';
416
+ username: string;
417
+ /** @visibility secret */
418
+ password: string;
419
+ };
420
+
421
+ /**
422
+ * Optional retry connection parameters.
423
+ */
424
+ retry?: {
165
425
  /**
166
- * (Optional) The period of time after which we force a refresh of metadata
167
- * even if we haven't seen any partition leadership changes to proactively discover any new brokers or partitions
168
- * Default: 300000 ms (5 minutes).
426
+ * (Optional) Maximum wait time for a retry
427
+ * Default: 30000 ms.
169
428
  */
170
- metadataMaxAge: HumanDuration | string;
429
+ maxRetryTime?: HumanDuration | string;
171
430
 
172
431
  /**
173
- * (Optional) The maximum amount of data per-partition the server will return.
174
- * This size must be at least as large as the maximum message size the server allows
175
- * or else it is possible for the producer to send messages larger than the consumer can fetch.
176
- * If that happens, the consumer can get stuck trying to fetch a large message on a certain partition
177
- * Default: 1048576 (1MB)
432
+ * (Optional) Initial value used to calculate the retry (This is still randomized following the randomization factor)
433
+ * Default: 300 ms.
178
434
  */
179
- maxBytesPerPartition: number;
435
+ initialRetryTime?: HumanDuration | string;
180
436
 
181
437
  /**
182
- * (Optional) Minimum amount of data the server should return for a fetch request, otherwise wait up to maxWaitTime for more data to accumulate.
183
- * Default: 1
438
+ * (Optional) Randomization factor
439
+ * Default: 0.2.
184
440
  */
185
- minBytes: number;
441
+ factor?: number;
186
442
 
187
443
  /**
188
- * (Optional) Maximum amount of bytes to accumulate in the response. Supported by Kafka >= 0.10.1.0
189
- * Default: 10485760 (10MB)
444
+ * (Optional) Exponential factor
445
+ * Default: 2.
190
446
  */
191
- maxBytes: number;
447
+ multiplier?: number;
192
448
 
193
449
  /**
194
- * (Optional) The maximum amount of time the server will block before answering the fetch request
195
- * if there isn’t sufficient data to immediately satisfy the requirement given by minBytes
196
- * Default: 5000
450
+ * (Optional) Max number of retries per call
451
+ * Default: 5.
197
452
  */
198
- maxWaitTime: HumanDuration | string;
453
+ retries?: number;
199
454
  };
200
- }>;
455
+
456
+ /**
457
+ * (Optional) Timeout for authentication requests.
458
+ * Default: 10000 ms.
459
+ */
460
+ authenticationTimeout?: HumanDuration | string;
461
+
462
+ /**
463
+ * (Optional) Time to wait for a successful connection.
464
+ * Default: 1000 ms.
465
+ */
466
+ connectionTimeout?: HumanDuration | string;
467
+
468
+ /**
469
+ * (Optional) Time to wait for a successful request.
470
+ * Default: 30000 ms.
471
+ */
472
+ requestTimeout?: HumanDuration | string;
473
+
474
+ /**
475
+ * (Optional) The request timeout can be disabled by setting enforceRequestTimeout to false.
476
+ * Default: true
477
+ */
478
+ enforceRequestTimeout?: boolean;
479
+
480
+ /**
481
+ * Contains an object per topic for which a Kafka queue
482
+ * should be used as destination for events.
483
+ */
484
+ topics: Array<{
485
+ /**
486
+ * (Required) The Backstage topic to consume from
487
+ */
488
+ topic: string;
489
+ /**
490
+ * (Required) KafkaProducer-related configuration.
491
+ */
492
+ kafka: {
493
+ /**
494
+ * (Required) The Kafka topic to publish to
495
+ */
496
+ topic: string;
497
+
498
+ /**
499
+ * (Optional) Allow topic creation when querying metadata for non-existent topics.
500
+ * Default: true
501
+ */
502
+ allowAutoTopicCreation?: boolean;
503
+
504
+ /**
505
+ * (Optional) The period of time after which we force a refresh of metadata
506
+ * even if we haven't seen any partition leadership changes to proactively discover any new brokers or partitions
507
+ * Default: 300000 ms (5 minutes).
508
+ */
509
+ metadataMaxAge?: HumanDuration | string;
510
+
511
+ /**
512
+ * (Optional) The maximum amount of time in ms that the transaction coordinator will wait for a transaction status update
513
+ * from the producer before proactively aborting the ongoing transaction.
514
+ * If this value is larger than the `transaction.max.timeout.ms`` setting in the broker, the request will fail with a `InvalidTransactionTimeout` error
515
+ * Default: 60000 ms.
516
+ */
517
+ transactionTimeout?: HumanDuration | string;
518
+
519
+ /**
520
+ * (Optional) Experimental. If enabled producer will ensure each message is written exactly once. Acks must be set to -1 ("all").
521
+ * Retries will default to MAX_SAFE_INTEGER.
522
+ * Default: false.
523
+ */
524
+ idempotent?: boolean;
525
+
526
+ /**
527
+ * (Optional) Max number of requests that may be in progress at any time. If falsey then no limit.
528
+ * Default: null.
529
+ */
530
+ maxInFlightRequests?: number;
531
+
532
+ /**
533
+ * Optional retry connection parameters.
534
+ */
535
+ retry?: {
536
+ /**
537
+ * (Optional) Maximum wait time for a retry
538
+ * Default: 30000 ms.
539
+ */
540
+ maxRetryTime?: HumanDuration | string;
541
+
542
+ /**
543
+ * (Optional) Initial value used to calculate the retry (This is still randomized following the randomization factor)
544
+ * Default: 300 ms.
545
+ */
546
+ initialRetryTime?: HumanDuration | string;
547
+
548
+ /**
549
+ * (Optional) Randomization factor
550
+ * Default: 0.2.
551
+ */
552
+ factor?: number;
553
+
554
+ /**
555
+ * (Optional) Exponential factor
556
+ * Default: 2.
557
+ */
558
+ multiplier?: number;
559
+
560
+ /**
561
+ * (Optional) Max number of retries per call
562
+ * Default: 5.
563
+ */
564
+ retries?: number;
565
+ };
566
+ };
567
+ }>;
568
+ };
201
569
  };
202
570
  };
203
571
  };