autotel-subscribers 4.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (87) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +669 -0
  3. package/dist/amplitude.cjs +2486 -0
  4. package/dist/amplitude.cjs.map +1 -0
  5. package/dist/amplitude.d.cts +49 -0
  6. package/dist/amplitude.d.ts +49 -0
  7. package/dist/amplitude.js +2463 -0
  8. package/dist/amplitude.js.map +1 -0
  9. package/dist/event-subscriber-base-CnF3V56W.d.cts +182 -0
  10. package/dist/event-subscriber-base-CnF3V56W.d.ts +182 -0
  11. package/dist/factories.cjs +16660 -0
  12. package/dist/factories.cjs.map +1 -0
  13. package/dist/factories.d.cts +304 -0
  14. package/dist/factories.d.ts +304 -0
  15. package/dist/factories.js +16624 -0
  16. package/dist/factories.js.map +1 -0
  17. package/dist/index.cjs +16575 -0
  18. package/dist/index.cjs.map +1 -0
  19. package/dist/index.d.cts +179 -0
  20. package/dist/index.d.ts +179 -0
  21. package/dist/index.js +16539 -0
  22. package/dist/index.js.map +1 -0
  23. package/dist/middleware.cjs +220 -0
  24. package/dist/middleware.cjs.map +1 -0
  25. package/dist/middleware.d.cts +227 -0
  26. package/dist/middleware.d.ts +227 -0
  27. package/dist/middleware.js +208 -0
  28. package/dist/middleware.js.map +1 -0
  29. package/dist/mixpanel.cjs +2940 -0
  30. package/dist/mixpanel.cjs.map +1 -0
  31. package/dist/mixpanel.d.cts +47 -0
  32. package/dist/mixpanel.d.ts +47 -0
  33. package/dist/mixpanel.js +2932 -0
  34. package/dist/mixpanel.js.map +1 -0
  35. package/dist/posthog.cjs +4115 -0
  36. package/dist/posthog.cjs.map +1 -0
  37. package/dist/posthog.d.cts +299 -0
  38. package/dist/posthog.d.ts +299 -0
  39. package/dist/posthog.js +4113 -0
  40. package/dist/posthog.js.map +1 -0
  41. package/dist/segment.cjs +6822 -0
  42. package/dist/segment.cjs.map +1 -0
  43. package/dist/segment.d.cts +49 -0
  44. package/dist/segment.d.ts +49 -0
  45. package/dist/segment.js +6794 -0
  46. package/dist/segment.js.map +1 -0
  47. package/dist/slack.cjs +368 -0
  48. package/dist/slack.cjs.map +1 -0
  49. package/dist/slack.d.cts +126 -0
  50. package/dist/slack.d.ts +126 -0
  51. package/dist/slack.js +366 -0
  52. package/dist/slack.js.map +1 -0
  53. package/dist/webhook.cjs +100 -0
  54. package/dist/webhook.cjs.map +1 -0
  55. package/dist/webhook.d.cts +53 -0
  56. package/dist/webhook.d.ts +53 -0
  57. package/dist/webhook.js +98 -0
  58. package/dist/webhook.js.map +1 -0
  59. package/examples/quickstart-custom-subscriber.ts +144 -0
  60. package/examples/subscriber-bigquery.ts +219 -0
  61. package/examples/subscriber-databricks.ts +280 -0
  62. package/examples/subscriber-kafka.ts +326 -0
  63. package/examples/subscriber-kinesis.ts +307 -0
  64. package/examples/subscriber-posthog.ts +421 -0
  65. package/examples/subscriber-pubsub.ts +336 -0
  66. package/examples/subscriber-snowflake.ts +232 -0
  67. package/package.json +141 -0
  68. package/src/amplitude.test.ts +231 -0
  69. package/src/amplitude.ts +148 -0
  70. package/src/event-subscriber-base.ts +325 -0
  71. package/src/factories.ts +197 -0
  72. package/src/index.ts +50 -0
  73. package/src/middleware.ts +489 -0
  74. package/src/mixpanel.test.ts +194 -0
  75. package/src/mixpanel.ts +134 -0
  76. package/src/mock-event-subscriber.ts +333 -0
  77. package/src/posthog.test.ts +629 -0
  78. package/src/posthog.ts +530 -0
  79. package/src/segment.test.ts +228 -0
  80. package/src/segment.ts +148 -0
  81. package/src/slack.ts +383 -0
  82. package/src/streaming-event-subscriber.ts +323 -0
  83. package/src/testing/index.ts +37 -0
  84. package/src/testing/mock-webhook-server.ts +242 -0
  85. package/src/testing/subscriber-test-harness.ts +365 -0
  86. package/src/webhook.test.ts +264 -0
  87. package/src/webhook.ts +158 -0
@@ -0,0 +1,280 @@
1
+ /**
2
+ * Databricks Subscriber Example
3
+ *
4
+ * Sends events events to Databricks Delta Lake via REST API.
5
+ * This is a complete, production-ready implementation.
6
+ *
7
+ * Installation:
8
+ * ```bash
9
+ * # No additional dependencies required (uses fetch)
10
+ * ```
11
+ *
12
+ * Setup Databricks table:
13
+ * ```sql
14
+ * CREATE TABLE events.events (
15
+ * event_id STRING NOT NULL,
16
+ * event_type STRING NOT NULL,
17
+ * event_name STRING NOT NULL,
18
+ * attributes MAP<STRING, STRING>,
19
+ * funnel STRING,
20
+ * step STRING,
21
+ * operation STRING,
22
+ * outcome STRING,
23
+ * value DECIMAL(18,2),
24
+ * timestamp TIMESTAMP NOT NULL,
25
+ * created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP()
26
+ * )
27
+ * USING DELTA
28
+ * PARTITIONED BY (DATE(timestamp))
29
+ * TBLPROPERTIES (
30
+ * 'delta.autoOptimize.optimizeWrite' = 'true',
31
+ * 'delta.autoOptimize.autoCompact' = 'true'
32
+ * );
33
+ * ```
34
+ *
35
+ * Setup Authentication:
36
+ * 1. Generate Personal Access Token in Databricks
37
+ * 2. Get your workspace URL (e.g., 'https://dbc-1234567-890.cloud.databricks.com')
38
+ * 3. Get your SQL warehouse ID
39
+ *
40
+ * Usage:
41
+ * ```typescript
42
+ * import { Events } from 'autotel/events';
43
+ * import { DatabricksSubscriber } from './adapter-databricks';
44
+ *
45
+ * const events = new Events('app', {
46
+ * subscribers: [
47
+ * new DatabricksSubscriber({
48
+ * host: 'https://dbc-1234567-890.cloud.databricks.com',
49
+ * token: process.env.DATABRICKS_TOKEN!,
50
+ * catalog: 'main',
51
+ * schema: 'events',
52
+ * table: 'events',
53
+ * warehouseId: 'abc123def456' // SQL warehouse ID
54
+ * })
55
+ * ]
56
+ * });
57
+ *
58
+ * events.trackEvent('order.completed', { orderId: 'ord_123', amount: 99.99 });
59
+ * ```
60
+ */
61
+
62
+ import {
63
+ EventSubscriber,
64
+ type EventPayload,
65
+ } from '../src/event-subscriber-base';
66
+
67
+ export interface DatabricksSubscriberConfig {
68
+ /** Databricks workspace URL (e.g., 'https://dbc-1234567-890.cloud.databricks.com') */
69
+ host: string;
70
+ /** Personal Access Token */
71
+ token: string;
72
+ /** Unity Catalog name (default: 'main') */
73
+ catalog?: string;
74
+ /** Schema/database name */
75
+ schema: string;
76
+ /** Table name */
77
+ table: string;
78
+ /** SQL Warehouse ID (for SQL execution) */
79
+ warehouseId: string;
80
+ /** Enable/disable subscriber */
81
+ enabled?: boolean;
82
+ /** Batch size (default: 200) */
83
+ batchSize?: number;
84
+ /** Flush interval in ms (default: 10000) */
85
+ flushInterval?: number;
86
+ /** Request timeout in ms (default: 30000) */
87
+ timeout?: number;
88
+ }
89
+
90
+ interface SQLExecutionResponse {
91
+ statement_id: string;
92
+ status: {
93
+ state: 'PENDING' | 'RUNNING' | 'SUCCEEDED' | 'FAILED' | 'CANCELED';
94
+ };
95
+ }
96
+
97
+ export class DatabricksSubscriber extends EventSubscriber {
98
+ readonly name = 'DatabricksSubscriber';
99
+ readonly version = '1.0.0';
100
+
101
+ private config: Required<DatabricksSubscriberConfig>;
102
+ private buffer: EventPayload[] = [];
103
+ private flushIntervalHandle: NodeJS.Timeout | null = null;
104
+
105
+ constructor(config: DatabricksSubscriberConfig) {
106
+ super();
107
+
108
+ // Set defaults
109
+ this.config = {
110
+ catalog: 'main',
111
+ enabled: true,
112
+ batchSize: 200,
113
+ flushInterval: 10_000,
114
+ timeout: 30_000,
115
+ ...config,
116
+ };
117
+
118
+ this.enabled = this.config.enabled;
119
+
120
+ if (this.enabled) {
121
+ this.startFlushInterval();
122
+ }
123
+ }
124
+
125
+ private startFlushInterval(): void {
126
+ this.flushIntervalHandle = setInterval(() => {
127
+ void this.flushBuffer();
128
+ }, this.config.flushInterval);
129
+ }
130
+
131
+ protected async sendToDestination(payload: EventPayload): Promise<void> {
132
+ this.buffer.push(payload);
133
+
134
+ // Auto-flush at batch size
135
+ if (this.buffer.length >= this.config.batchSize) {
136
+ await this.flushBuffer();
137
+ }
138
+ }
139
+
140
+ private async flushBuffer(): Promise<void> {
141
+ if (this.buffer.length === 0) return;
142
+
143
+ const batch = [...this.buffer];
144
+ this.buffer = [];
145
+
146
+ try {
147
+ await this.insertBatch(batch);
148
+ } catch (error) {
149
+ console.error('[DatabricksSubscriber] Failed to flush batch:', error);
150
+ // Re-add to buffer for retry
151
+ this.buffer.unshift(...batch);
152
+ }
153
+ }
154
+
155
+ private async insertBatch(events: EventPayload[]): Promise<void> {
156
+ // Build VALUES clause
157
+ const values = events
158
+ .map((event) => {
159
+ const eventId = crypto.randomUUID();
160
+ const attributes = event.attributes
161
+ ? Object.entries(event.attributes)
162
+ .map(
163
+ ([key, value]) =>
164
+ `'${this.escapeSql(key)}', '${this.escapeSql(String(value))}'`
165
+ )
166
+ .join(', ')
167
+ : '';
168
+
169
+ return `(
170
+ '${eventId}',
171
+ '${this.escapeSql(event.type)}',
172
+ '${this.escapeSql(event.name)}',
173
+ ${attributes ? `map(${attributes})` : 'map()'},
174
+ ${event.funnel ? `'${this.escapeSql(event.funnel)}'` : 'NULL'},
175
+ ${event.step ? `'${this.escapeSql(event.step)}'` : 'NULL'},
176
+ ${event.operation ? `'${this.escapeSql(event.operation)}'` : 'NULL'},
177
+ ${event.outcome ? `'${this.escapeSql(event.outcome)}'` : 'NULL'},
178
+ ${event.value === undefined ? 'NULL' : event.value},
179
+ CAST('${event.timestamp}' AS TIMESTAMP)
180
+ )`;
181
+ })
182
+ .join(',\n');
183
+
184
+ const sql = `
185
+ INSERT INTO ${this.config.catalog}.${this.config.schema}.${this.config.table}
186
+ (event_id, event_type, event_name, attributes, funnel, step, operation, outcome, value, timestamp)
187
+ VALUES ${values}
188
+ `;
189
+
190
+ await this.executeSql(sql);
191
+ }
192
+
193
+ private async executeSql(sql: string): Promise<void> {
194
+ // Execute SQL via Databricks SQL API
195
+ const response = await fetch(
196
+ `${this.config.host}/api/2.0/sql/statements`,
197
+ {
198
+ method: 'POST',
199
+ headers: {
200
+ Authorization: `Bearer ${this.config.token}`,
201
+ 'Content-Type': 'application/json',
202
+ },
203
+ body: JSON.stringify({
204
+ statement: sql,
205
+ warehouse_id: this.config.warehouseId,
206
+ wait_timeout: `${this.config.timeout / 1000}s`,
207
+ }),
208
+ signal: AbortSignal.timeout(this.config.timeout),
209
+ }
210
+ );
211
+
212
+ if (!response.ok) {
213
+ const error = await response.text();
214
+ throw new Error(
215
+ `Databricks API returned ${response.status}: ${error}`
216
+ );
217
+ }
218
+
219
+ const result: SQLExecutionResponse = await response.json();
220
+
221
+ // Check execution status
222
+ if (result.status.state === 'FAILED') {
223
+ throw new Error('SQL execution failed');
224
+ }
225
+
226
+ // For long-running queries, you might want to poll for completion
227
+ // This example assumes synchronous execution (wait_timeout)
228
+ }
229
+
230
+ private escapeSql(value: string): string {
231
+ // Escape single quotes for SQL
232
+ return value.replaceAll('\'', "''");
233
+ }
234
+
235
+ protected handleError(error: Error, payload: EventPayload): void {
236
+ console.error(
237
+ `[DatabricksSubscriber] Failed to send ${payload.type}:`,
238
+ error,
239
+ {
240
+ eventName: payload.name,
241
+ attributes: payload.attributes,
242
+ }
243
+ );
244
+
245
+ // Databricks-specific error handling
246
+ if (error.message.includes('401')) {
247
+ console.error(
248
+ '[DatabricksSubscriber] Authentication failed - check your token'
249
+ );
250
+ }
251
+
252
+ if (error.message.includes('warehouse')) {
253
+ console.error(
254
+ '[DatabricksSubscriber] SQL warehouse error - check warehouse ID and status'
255
+ );
256
+ }
257
+
258
+ if (error.message.includes('timeout')) {
259
+ console.error(
260
+ '[DatabricksSubscriber] Timeout - consider increasing timeout or reducing batch size'
261
+ );
262
+ }
263
+ }
264
+
265
+ async shutdown(): Promise<void> {
266
+ // Clear flush interval
267
+ if (this.flushIntervalHandle) {
268
+ clearInterval(this.flushIntervalHandle);
269
+ this.flushIntervalHandle = null;
270
+ }
271
+
272
+ // Flush remaining events
273
+ await this.flushBuffer();
274
+
275
+ // Wait for pending requests
276
+ await super.shutdown();
277
+
278
+ console.log('[DatabricksSubscriber] Shutdown complete');
279
+ }
280
+ }
@@ -0,0 +1,326 @@
1
+ /**
2
+ * Kafka Streaming Subscriber Example
3
+ *
4
+ * Production-ready Kafka subscriber for high-throughput, ordered event streaming.
5
+ *
6
+ * Installation:
7
+ * ```bash
8
+ * pnpm add kafkajs
9
+ * ```
10
+ *
11
+ * Features:
12
+ * - Partitioning by userId for ordered events per user
13
+ * - High-throughput batching (configurable up to 10,000+ events/batch)
14
+ * - Backpressure handling
15
+ * - Automatic retries with exponential backoff
16
+ * - Compression support (gzip, snappy, lz4, zstd)
17
+ * - Graceful shutdown with buffer draining
18
+ *
19
+ * Usage:
20
+ * ```typescript
21
+ * import { Events } from 'autotel/events';
22
+ * import { KafkaSubscriber } from './adapter-kafka';
23
+ *
24
+ * const events = new Events('app', {
25
+ * subscribers: [
26
+ * new KafkaSubscriber({
27
+ * clientId: 'events-producer',
28
+ * brokers: ['kafka1:9092', 'kafka2:9092', 'kafka3:9092'],
29
+ * topic: 'events.events',
30
+ * partitionStrategy: 'userId', // or 'tenantId', 'eventType', 'round-robin'
31
+ * compression: 'gzip',
32
+ * maxBufferSize: 10000,
33
+ * maxBatchSize: 1000,
34
+ * bufferOverflowStrategy: 'block'
35
+ * })
36
+ * ]
37
+ * });
38
+ *
39
+ * // High-throughput: 10k+ events/sec
40
+ * for (let i = 0; i < 10000; i++) {
41
+ * await events.trackEvent('page.viewed', { userId: `user_${i % 100}` });
42
+ * }
43
+ *
44
+ * // Graceful shutdown
45
+ * await events.flush();
46
+ * ```
47
+ */
48
+
49
+ import {
50
+ StreamingEventSubscriber,
51
+ type BufferOverflowStrategy,
52
+ } from '../src/streaming-event-subscriber';
53
+ import type { EventPayload } from '../src/event-subscriber-base';
54
+ import { Kafka, Producer, CompressionTypes, type ProducerRecord } from 'kafkajs';
55
+
56
+ type CompressionType = 'gzip' | 'snappy' | 'lz4' | 'zstd' | 'none';
57
+ type PartitionStrategy = 'userId' | 'tenantId' | 'eventType' | 'round-robin';
58
+
59
+ export interface KafkaSubscriberConfig {
60
+ /** Kafka client ID */
61
+ clientId: string;
62
+
63
+ /** Kafka broker addresses */
64
+ brokers: string[];
65
+
66
+ /** Topic to publish events to */
67
+ topic: string;
68
+
69
+ /** Partitioning strategy (default: 'userId') */
70
+ partitionStrategy?: PartitionStrategy;
71
+
72
+ /** Compression type (default: 'gzip') */
73
+ compression?: CompressionType;
74
+
75
+ /** Enable/disable subscriber */
76
+ enabled?: boolean;
77
+
78
+ /** Maximum buffer size (default: 10000) */
79
+ maxBufferSize?: number;
80
+
81
+ /** Maximum batch size (default: 1000) */
82
+ maxBatchSize?: number;
83
+
84
+ /** Buffer overflow strategy (default: 'block') */
85
+ bufferOverflowStrategy?: BufferOverflowStrategy;
86
+
87
+ /** Flush interval in ms (default: 1000) */
88
+ flushIntervalMs?: number;
89
+
90
+ /** SASL authentication (optional) */
91
+ sasl?: {
92
+ mechanism: 'plain' | 'scram-sha-256' | 'scram-sha-512';
93
+ username: string;
94
+ password: string;
95
+ };
96
+
97
+ /** SSL/TLS configuration (optional) */
98
+ ssl?: boolean;
99
+ }
100
+
101
+ export class KafkaSubscriber extends StreamingEventSubscriber {
102
+ readonly name = 'KafkaSubscriber';
103
+ readonly version = '1.0.0';
104
+
105
+ private kafka: Kafka;
106
+ private producer: Producer;
107
+ private subscriberConfig: Required<Omit<KafkaSubscriberConfig, 'sasl' | 'ssl'>> & {
108
+ sasl?: KafkaSubscriberConfig['sasl'];
109
+ ssl?: boolean;
110
+ };
111
+ private roundRobinCounter = 0;
112
+ private isConnected = false;
113
+
114
+ constructor(config: KafkaSubscriberConfig) {
115
+ super({
116
+ maxBufferSize: config.maxBufferSize ?? 10_000,
117
+ maxBatchSize: config.maxBatchSize ?? 1000,
118
+ bufferOverflowStrategy: config.bufferOverflowStrategy ?? 'block',
119
+ flushIntervalMs: config.flushIntervalMs ?? 1000,
120
+ });
121
+
122
+ // Set config defaults
123
+ this.adapterConfig = {
124
+ clientId: config.clientId,
125
+ brokers: config.brokers,
126
+ topic: config.topic,
127
+ partitionStrategy: config.partitionStrategy ?? 'userId',
128
+ compression: config.compression ?? 'gzip',
129
+ enabled: config.enabled ?? true,
130
+ maxBufferSize: config.maxBufferSize ?? 10_000,
131
+ maxBatchSize: config.maxBatchSize ?? 1000,
132
+ bufferOverflowStrategy: config.bufferOverflowStrategy ?? 'block',
133
+ flushIntervalMs: config.flushIntervalMs ?? 1000,
134
+ sasl: config.sasl,
135
+ ssl: config.ssl,
136
+ };
137
+
138
+ this.enabled = this.adapterConfig.enabled;
139
+
140
+ if (this.enabled) {
141
+ this.initializeKafka();
142
+ }
143
+ }
144
+
145
+ private initializeKafka(): void {
146
+ try {
147
+ // Initialize Kafka client
148
+ this.kafka = new Kafka({
149
+ clientId: this.adapterConfig.clientId,
150
+ brokers: this.adapterConfig.brokers,
151
+ sasl: this.adapterConfig.sasl,
152
+ ssl: this.adapterConfig.ssl,
153
+ retry: {
154
+ initialRetryTime: 100,
155
+ retries: 8,
156
+ maxRetryTime: 30_000,
157
+ multiplier: 2,
158
+ },
159
+ });
160
+
161
+ // Create producer
162
+ this.producer = this.kafka.producer({
163
+ allowAutoTopicCreation: false,
164
+ compression: this.getCompressionType(this.adapterConfig.compression),
165
+ maxInFlightRequests: 5,
166
+ idempotent: true, // Exactly-once semantics
167
+ });
168
+
169
+ // Connect asynchronously
170
+ void this.connect();
171
+ } catch (error) {
172
+ console.error('[KafkaSubscriber] Failed to initialize:', error);
173
+ this.enabled = false;
174
+ }
175
+ }
176
+
177
+ private async connect(): Promise<void> {
178
+ try {
179
+ await this.producer.connect();
180
+ this.isConnected = true;
181
+ console.log('[KafkaSubscriber] Connected successfully');
182
+ } catch (error) {
183
+ console.error('[KafkaSubscriber] Failed to connect:', error);
184
+ this.enabled = false;
185
+ this.isConnected = false;
186
+ }
187
+ }
188
+
189
+ private getCompressionType(compression: CompressionType): CompressionTypes {
190
+ switch (compression) {
191
+ case 'gzip': {
192
+ return CompressionTypes.GZIP;
193
+ }
194
+ case 'snappy': {
195
+ return CompressionTypes.Snappy;
196
+ }
197
+ case 'lz4': {
198
+ return CompressionTypes.LZ4;
199
+ }
200
+ case 'zstd': {
201
+ return CompressionTypes.ZSTD;
202
+ }
203
+ default: {
204
+ return CompressionTypes.None;
205
+ }
206
+ }
207
+ }
208
+
209
+ /**
210
+ * Get partition key based on configured strategy
211
+ */
212
+ protected getPartitionKey(payload: EventPayload): string {
213
+ switch (this.adapterConfig.partitionStrategy) {
214
+ case 'userId': {
215
+ return payload.attributes?.userId?.toString() || 'default';
216
+ }
217
+
218
+ case 'tenantId': {
219
+ return payload.attributes?.tenantId?.toString() || 'default';
220
+ }
221
+
222
+ case 'eventType': {
223
+ return payload.type;
224
+ } // 'event', 'funnel', 'outcome', 'value'
225
+
226
+ case 'round-robin': {
227
+ // Round-robin across partitions
228
+ this.roundRobinCounter = (this.roundRobinCounter + 1) % 100;
229
+ return `partition-${this.roundRobinCounter}`;
230
+ }
231
+
232
+ default: {
233
+ return 'default';
234
+ }
235
+ }
236
+ }
237
+
238
+ /**
239
+ * Send batch of events to Kafka
240
+ */
241
+ protected async sendBatch(events: EventPayload[]): Promise<void> {
242
+ if (!this.isConnected) {
243
+ throw new Error('[KafkaSubscriber] Producer not connected');
244
+ }
245
+
246
+ // Build Kafka messages
247
+ const messages = events.map((event) => ({
248
+ key: this.getPartitionKey(event),
249
+ value: JSON.stringify(event),
250
+ headers: {
251
+ 'event-type': event.type,
252
+ 'event-name': event.name,
253
+ timestamp: event.timestamp,
254
+ },
255
+ }));
256
+
257
+ // Send to Kafka
258
+ const record: ProducerRecord = {
259
+ topic: this.adapterConfig.topic,
260
+ messages,
261
+ };
262
+
263
+ try {
264
+ const result = await this.producer.send(record);
265
+
266
+ // Log successful send (debug)
267
+ if (process.env.DEBUG) {
268
+ console.log(
269
+ `[KafkaSubscriber] Sent ${messages.length} events to partition ${result[0].partition}`
270
+ );
271
+ }
272
+ } catch (error) {
273
+ console.error(
274
+ `[KafkaSubscriber] Failed to send ${messages.length} events:`,
275
+ error
276
+ );
277
+ throw error; // Re-throw for retry logic
278
+ }
279
+ }
280
+
281
+ /**
282
+ * Handle errors (override from EventSubscriber)
283
+ */
284
+ protected handleError(error: Error, payload: EventPayload): void {
285
+ console.error(
286
+ `[KafkaSubscriber] Failed to process ${payload.type} event:`,
287
+ error,
288
+ {
289
+ eventName: payload.name,
290
+ partitionKey: this.getPartitionKey(payload),
291
+ }
292
+ );
293
+
294
+ // Check for specific Kafka errors
295
+ if (error.message.includes('NOT_LEADER_FOR_PARTITION')) {
296
+ console.error(
297
+ '[KafkaSubscriber] Partition leadership changed - will retry'
298
+ );
299
+ }
300
+
301
+ if (error.message.includes('BROKER_NOT_AVAILABLE')) {
302
+ console.error('[KafkaSubscriber] Broker unavailable - check cluster health');
303
+ }
304
+ }
305
+
306
+ /**
307
+ * Graceful shutdown
308
+ */
309
+ async shutdown(): Promise<void> {
310
+ console.log('[KafkaSubscriber] Starting graceful shutdown...');
311
+
312
+ // Flush buffer and drain pending requests
313
+ await super.shutdown();
314
+
315
+ // Disconnect producer
316
+ if (this.isConnected && this.producer) {
317
+ try {
318
+ await this.producer.disconnect();
319
+ this.isConnected = false;
320
+ console.log('[KafkaSubscriber] Disconnected successfully');
321
+ } catch (error) {
322
+ console.error('[KafkaSubscriber] Error during disconnect:', error);
323
+ }
324
+ }
325
+ }
326
+ }