@hotmeshio/hotmesh 0.10.2 → 0.12.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. package/README.md +1 -1
  2. package/build/modules/enums.d.ts +1 -0
  3. package/build/modules/enums.js +3 -1
  4. package/build/modules/errors.d.ts +2 -0
  5. package/build/modules/errors.js +2 -0
  6. package/build/modules/key.js +3 -2
  7. package/build/package.json +2 -2
  8. package/build/services/activities/worker.js +10 -0
  9. package/build/services/dba/index.d.ts +2 -1
  10. package/build/services/dba/index.js +11 -2
  11. package/build/services/durable/client.js +6 -1
  12. package/build/services/durable/exporter.d.ts +15 -0
  13. package/build/services/durable/exporter.js +384 -5
  14. package/build/services/durable/schemas/factory.d.ts +1 -1
  15. package/build/services/durable/schemas/factory.js +27 -4
  16. package/build/services/durable/worker.d.ts +2 -2
  17. package/build/services/durable/worker.js +15 -9
  18. package/build/services/durable/workflow/context.js +2 -0
  19. package/build/services/durable/workflow/execChild.js +5 -2
  20. package/build/services/durable/workflow/hook.js +6 -0
  21. package/build/services/durable/workflow/proxyActivities.js +3 -4
  22. package/build/services/engine/index.d.ts +2 -2
  23. package/build/services/engine/index.js +10 -5
  24. package/build/services/exporter/index.d.ts +16 -2
  25. package/build/services/exporter/index.js +76 -0
  26. package/build/services/hotmesh/index.d.ts +2 -2
  27. package/build/services/hotmesh/index.js +2 -2
  28. package/build/services/router/config/index.d.ts +2 -2
  29. package/build/services/router/config/index.js +2 -1
  30. package/build/services/router/consumption/index.js +80 -5
  31. package/build/services/store/index.d.ts +52 -0
  32. package/build/services/store/providers/postgres/exporter-sql.d.ts +40 -0
  33. package/build/services/store/providers/postgres/exporter-sql.js +92 -0
  34. package/build/services/store/providers/postgres/kvtables.js +6 -0
  35. package/build/services/store/providers/postgres/postgres.d.ts +42 -0
  36. package/build/services/store/providers/postgres/postgres.js +151 -0
  37. package/build/services/stream/index.d.ts +1 -0
  38. package/build/services/stream/providers/postgres/kvtables.d.ts +1 -1
  39. package/build/services/stream/providers/postgres/kvtables.js +235 -82
  40. package/build/services/stream/providers/postgres/lifecycle.d.ts +4 -3
  41. package/build/services/stream/providers/postgres/lifecycle.js +6 -5
  42. package/build/services/stream/providers/postgres/messages.d.ts +14 -6
  43. package/build/services/stream/providers/postgres/messages.js +153 -76
  44. package/build/services/stream/providers/postgres/notifications.d.ts +5 -2
  45. package/build/services/stream/providers/postgres/notifications.js +39 -35
  46. package/build/services/stream/providers/postgres/postgres.d.ts +21 -118
  47. package/build/services/stream/providers/postgres/postgres.js +87 -140
  48. package/build/services/stream/providers/postgres/scout.js +2 -2
  49. package/build/services/stream/providers/postgres/stats.js +3 -2
  50. package/build/services/stream/registry.d.ts +62 -0
  51. package/build/services/stream/registry.js +198 -0
  52. package/build/services/worker/index.js +20 -6
  53. package/build/types/durable.d.ts +6 -1
  54. package/build/types/error.d.ts +2 -0
  55. package/build/types/exporter.d.ts +84 -0
  56. package/build/types/hotmesh.d.ts +7 -1
  57. package/build/types/index.d.ts +1 -1
  58. package/build/types/stream.d.ts +2 -0
  59. package/package.json +2 -2
@@ -10,11 +10,9 @@ async function deploySchema(streamClient, appId, logger) {
10
10
  const releaseClient = isPool;
11
11
  try {
12
12
  const schemaName = appId.replace(/[^a-zA-Z0-9_]/g, '_');
13
- const tableName = `${schemaName}.streams`;
14
13
  // First, check if tables already exist (no lock needed)
15
- const tablesExist = await checkIfTablesExist(client, schemaName, tableName);
14
+ const tablesExist = await checkIfTablesExist(client, schemaName);
16
15
  if (tablesExist) {
17
- // Tables already exist, no need to acquire lock or create tables
18
16
  return;
19
17
  }
20
18
  // Tables don't exist, need to acquire lock and create them
@@ -24,10 +22,10 @@ async function deploySchema(streamClient, appId, logger) {
24
22
  try {
25
23
  await client.query('BEGIN');
26
24
  // Double-check tables don't exist (race condition safety)
27
- const tablesStillMissing = !(await checkIfTablesExist(client, schemaName, tableName));
25
+ const tablesStillMissing = !(await checkIfTablesExist(client, schemaName));
28
26
  if (tablesStillMissing) {
29
- await createTables(client, schemaName, tableName);
30
- await createNotificationTriggers(client, schemaName, tableName);
27
+ await createTables(client, schemaName);
28
+ await createNotificationTriggers(client, schemaName);
31
29
  }
32
30
  await client.query('COMMIT');
33
31
  }
@@ -41,7 +39,7 @@ async function deploySchema(streamClient, appId, logger) {
41
39
  await client.release();
42
40
  }
43
41
  // Wait for the deploy process to complete
44
- await waitForTablesCreation(streamClient, lockId, schemaName, tableName, logger);
42
+ await waitForTablesCreation(streamClient, lockId, schemaName, logger);
45
43
  return; // Already released client, don't release again in finally
46
44
  }
47
45
  }
@@ -72,16 +70,21 @@ function hashStringToInt(str) {
72
70
  }
73
71
  return Math.abs(hash);
74
72
  }
75
- async function checkIfTablesExist(client, schemaName, tableName) {
76
- // Check both streams table exists AND roles table (from store provider)
77
- // The roles table is created by the store provider and is used for scout role coordination
78
- const result = await client.query(`SELECT
79
- to_regclass($1) AS streams_table,
80
- to_regclass($2) AS roles_table`, [tableName, `${schemaName}.roles`]);
81
- return result.rows[0].streams_table !== null &&
73
+ async function checkIfTablesExist(client, schemaName) {
74
+ // Check engine_streams, worker_streams, AND roles table
75
+ const result = await client.query(`SELECT
76
+ to_regclass($1) AS engine_table,
77
+ to_regclass($2) AS worker_table,
78
+ to_regclass($3) AS roles_table`, [
79
+ `${schemaName}.engine_streams`,
80
+ `${schemaName}.worker_streams`,
81
+ `${schemaName}.roles`,
82
+ ]);
83
+ return result.rows[0].engine_table !== null &&
84
+ result.rows[0].worker_table !== null &&
82
85
  result.rows[0].roles_table !== null;
83
86
  }
84
- async function waitForTablesCreation(streamClient, lockId, schemaName, tableName, logger) {
87
+ async function waitForTablesCreation(streamClient, lockId, schemaName, logger) {
85
88
  let retries = 0;
86
89
  const maxRetries = Math.round(enums_1.HMSH_DEPLOYMENT_DELAY / enums_1.HMSH_DEPLOYMENT_PAUSE);
87
90
  while (retries < maxRetries) {
@@ -90,17 +93,14 @@ async function waitForTablesCreation(streamClient, lockId, schemaName, tableName
90
93
  streamClient?.idleCount !== undefined;
91
94
  const client = isPool ? await streamClient.connect() : streamClient;
92
95
  try {
93
- // Check if tables exist directly (most efficient check)
94
- const tablesExist = await checkIfTablesExist(client, schemaName, tableName);
96
+ const tablesExist = await checkIfTablesExist(client, schemaName);
95
97
  if (tablesExist) {
96
- // Tables now exist, deployment is complete
97
98
  return;
98
99
  }
99
100
  // Fallback: check if the lock has been released (indicates completion)
100
101
  const lockCheck = await client.query("SELECT NOT EXISTS (SELECT 1 FROM pg_locks WHERE locktype = 'advisory' AND objid = $1::bigint) AS unlocked", [lockId]);
101
102
  if (lockCheck.rows[0].unlocked) {
102
- // Lock has been released, tables should exist now
103
- const tablesExistAfterLock = await checkIfTablesExist(client, schemaName, tableName);
103
+ const tablesExistAfterLock = await checkIfTablesExist(client, schemaName);
104
104
  if (tablesExistAfterLock) {
105
105
  return;
106
106
  }
@@ -113,22 +113,23 @@ async function waitForTablesCreation(streamClient, lockId, schemaName, tableName
113
113
  }
114
114
  retries++;
115
115
  }
116
- logger.error('stream-table-create-timeout', { schemaName, tableName });
116
+ logger.error('stream-table-create-timeout', { schemaName });
117
117
  throw new Error('Timeout waiting for stream table creation');
118
118
  }
119
- async function createTables(client, schemaName, tableName) {
119
+ async function createTables(client, schemaName) {
120
120
  await client.query(`CREATE SCHEMA IF NOT EXISTS ${schemaName};`);
121
- // Main table creation with partitions
121
+ // ---- ENGINE_STREAMS table ----
122
+ const engineTable = `${schemaName}.engine_streams`;
122
123
  await client.query(`
123
- CREATE TABLE IF NOT EXISTS ${tableName} (
124
+ CREATE TABLE IF NOT EXISTS ${engineTable} (
124
125
  id BIGSERIAL,
125
126
  stream_name TEXT NOT NULL,
126
- group_name TEXT NOT NULL DEFAULT 'ENGINE',
127
127
  message TEXT NOT NULL,
128
128
  created_at TIMESTAMPTZ DEFAULT NOW(),
129
129
  reserved_at TIMESTAMPTZ,
130
130
  reserved_by TEXT,
131
131
  expired_at TIMESTAMPTZ,
132
+ dead_lettered_at TIMESTAMPTZ,
132
133
  max_retry_attempts INT DEFAULT 3,
133
134
  backoff_coefficient NUMERIC DEFAULT 10,
134
135
  maximum_interval_seconds INT DEFAULT 120,
@@ -138,86 +139,217 @@ async function createTables(client, schemaName, tableName) {
138
139
  ) PARTITION BY HASH (stream_name);
139
140
  `);
140
141
  for (let i = 0; i < 8; i++) {
141
- const partitionTableName = `${schemaName}.streams_part_${i}`;
142
142
  await client.query(`
143
- CREATE TABLE IF NOT EXISTS ${partitionTableName}
144
- PARTITION OF ${tableName}
143
+ CREATE TABLE IF NOT EXISTS ${schemaName}.engine_streams_part_${i}
144
+ PARTITION OF ${engineTable}
145
145
  FOR VALUES WITH (modulus 8, remainder ${i});
146
146
  `);
147
147
  }
148
- // Index for active messages (includes visible_at for visibility timeout support)
149
148
  await client.query(`
150
- CREATE INDEX IF NOT EXISTS idx_streams_active_messages
151
- ON ${tableName} (group_name, stream_name, reserved_at, visible_at, id)
149
+ CREATE INDEX IF NOT EXISTS idx_engine_streams_active_messages
150
+ ON ${engineTable} (stream_name, reserved_at, visible_at, id)
152
151
  WHERE reserved_at IS NULL AND expired_at IS NULL;
153
152
  `);
154
- // Optimized index for the simplified fetchMessages query (includes visible_at)
155
153
  await client.query(`
156
- CREATE INDEX IF NOT EXISTS idx_streams_message_fetch
157
- ON ${tableName} (stream_name, group_name, visible_at, id)
154
+ CREATE INDEX IF NOT EXISTS idx_engine_streams_message_fetch
155
+ ON ${engineTable} (stream_name, visible_at, id)
158
156
  WHERE expired_at IS NULL;
159
157
  `);
160
- // Index for expired messages
161
158
  await client.query(`
162
- CREATE INDEX IF NOT EXISTS idx_streams_expired_at
163
- ON ${tableName} (expired_at);
159
+ CREATE INDEX IF NOT EXISTS idx_engine_streams_expired_at
160
+ ON ${engineTable} (expired_at);
164
161
  `);
165
- // New index for stream stats optimization
166
162
  await client.query(`
167
- CREATE INDEX IF NOT EXISTS idx_stream_name_expired_at
168
- ON ${tableName} (stream_name)
163
+ CREATE INDEX IF NOT EXISTS idx_engine_stream_name_expired_at
164
+ ON ${engineTable} (stream_name)
169
165
  WHERE expired_at IS NULL;
170
166
  `);
171
- // TODO: revisit this index when solving automated cleanup
172
- // Optional index for querying by creation time, if needed
173
- // await client.query(`
174
- // CREATE INDEX IF NOT EXISTS idx_streams_created_at
175
- // ON ${tableName} (created_at);
176
- // `);
167
+ await client.query(`
168
+ CREATE INDEX IF NOT EXISTS idx_engine_streams_processed_volume
169
+ ON ${engineTable} (expired_at, stream_name)
170
+ WHERE expired_at IS NOT NULL;
171
+ `);
172
+ await client.query(`
173
+ CREATE INDEX IF NOT EXISTS idx_engine_streams_dead_lettered
174
+ ON ${engineTable} (dead_lettered_at, stream_name)
175
+ WHERE dead_lettered_at IS NOT NULL;
176
+ `);
177
+ // Migration: add dead_lettered_at column to existing tables
178
+ await client.query(`
179
+ DO $$ BEGIN
180
+ ALTER TABLE ${engineTable} ADD COLUMN IF NOT EXISTS dead_lettered_at TIMESTAMPTZ;
181
+ EXCEPTION WHEN duplicate_column THEN NULL;
182
+ END $$;
183
+ `);
184
+ // ---- WORKER_STREAMS table ----
185
+ const workerTable = `${schemaName}.worker_streams`;
186
+ await client.query(`
187
+ CREATE TABLE IF NOT EXISTS ${workerTable} (
188
+ id BIGSERIAL,
189
+ stream_name TEXT NOT NULL,
190
+ workflow_name TEXT NOT NULL DEFAULT '',
191
+ jid TEXT NOT NULL DEFAULT '',
192
+ aid TEXT NOT NULL DEFAULT '',
193
+ dad TEXT NOT NULL DEFAULT '',
194
+ msg_type TEXT NOT NULL DEFAULT '',
195
+ topic TEXT NOT NULL DEFAULT '',
196
+ message TEXT NOT NULL,
197
+ created_at TIMESTAMPTZ DEFAULT NOW(),
198
+ reserved_at TIMESTAMPTZ,
199
+ reserved_by TEXT,
200
+ expired_at TIMESTAMPTZ,
201
+ dead_lettered_at TIMESTAMPTZ,
202
+ max_retry_attempts INT DEFAULT 3,
203
+ backoff_coefficient NUMERIC DEFAULT 10,
204
+ maximum_interval_seconds INT DEFAULT 120,
205
+ visible_at TIMESTAMPTZ DEFAULT NOW(),
206
+ retry_attempt INT DEFAULT 0,
207
+ PRIMARY KEY (stream_name, id)
208
+ ) PARTITION BY HASH (stream_name);
209
+ `);
210
+ for (let i = 0; i < 8; i++) {
211
+ await client.query(`
212
+ CREATE TABLE IF NOT EXISTS ${schemaName}.worker_streams_part_${i}
213
+ PARTITION OF ${workerTable}
214
+ FOR VALUES WITH (modulus 8, remainder ${i});
215
+ `);
216
+ }
217
+ await client.query(`
218
+ CREATE INDEX IF NOT EXISTS idx_worker_streams_active_messages
219
+ ON ${workerTable} (stream_name, reserved_at, visible_at, id)
220
+ WHERE reserved_at IS NULL AND expired_at IS NULL;
221
+ `);
222
+ await client.query(`
223
+ CREATE INDEX IF NOT EXISTS idx_worker_streams_message_fetch
224
+ ON ${workerTable} (stream_name, visible_at, id)
225
+ WHERE expired_at IS NULL;
226
+ `);
227
+ await client.query(`
228
+ CREATE INDEX IF NOT EXISTS idx_worker_streams_expired_at
229
+ ON ${workerTable} (expired_at);
230
+ `);
231
+ await client.query(`
232
+ CREATE INDEX IF NOT EXISTS idx_worker_stream_name_expired_at
233
+ ON ${workerTable} (stream_name)
234
+ WHERE expired_at IS NULL;
235
+ `);
236
+ await client.query(`
237
+ CREATE INDEX IF NOT EXISTS idx_worker_streams_processed_volume
238
+ ON ${workerTable} (expired_at, stream_name)
239
+ WHERE expired_at IS NOT NULL;
240
+ `);
241
+ await client.query(`
242
+ CREATE INDEX IF NOT EXISTS idx_worker_streams_dead_lettered
243
+ ON ${workerTable} (dead_lettered_at, stream_name)
244
+ WHERE dead_lettered_at IS NOT NULL;
245
+ `);
246
+ // Migration: add dead_lettered_at column to existing tables
247
+ await client.query(`
248
+ DO $$ BEGIN
249
+ ALTER TABLE ${workerTable} ADD COLUMN IF NOT EXISTS dead_lettered_at TIMESTAMPTZ;
250
+ EXCEPTION WHEN duplicate_column THEN NULL;
251
+ END $$;
252
+ `);
253
+ // ---- Export fidelity columns and indexes ----
254
+ // These columns surface stream message metadata for efficient job history queries.
255
+ // Migration: add columns to existing tables (no-op on fresh installs)
256
+ for (const col of ['jid', 'aid', 'dad', 'msg_type', 'topic']) {
257
+ await client.query(`
258
+ DO $$ BEGIN
259
+ ALTER TABLE ${workerTable} ADD COLUMN IF NOT EXISTS ${col} TEXT NOT NULL DEFAULT '';
260
+ EXCEPTION WHEN duplicate_column THEN NULL;
261
+ END $$;
262
+ `);
263
+ }
264
+ // All messages for a job, ordered by time
265
+ await client.query(`
266
+ CREATE INDEX IF NOT EXISTS idx_worker_streams_jid_created
267
+ ON ${workerTable} (jid, created_at)
268
+ WHERE jid != '';
269
+ `);
270
+ // Activity-specific lookups within a job
271
+ await client.query(`
272
+ CREATE INDEX IF NOT EXISTS idx_worker_streams_jid_aid
273
+ ON ${workerTable} (jid, aid, created_at)
274
+ WHERE jid != '';
275
+ `);
276
+ // Type-filtered queries (e.g., only worker invocations + responses)
277
+ await client.query(`
278
+ CREATE INDEX IF NOT EXISTS idx_worker_streams_jid_type
279
+ ON ${workerTable} (jid, msg_type, created_at)
280
+ WHERE jid != '';
281
+ `);
177
282
  }
178
- async function createNotificationTriggers(client, schemaName, tableName) {
179
- // Create the notification function for INSERT events
283
+ async function createNotificationTriggers(client, schemaName) {
284
+ const engineTable = `${schemaName}.engine_streams`;
285
+ const workerTable = `${schemaName}.worker_streams`;
286
+ // ---- ENGINE notification trigger ----
287
+ await client.query(`
288
+ CREATE OR REPLACE FUNCTION ${schemaName}.notify_new_engine_stream_message()
289
+ RETURNS TRIGGER AS $$
290
+ DECLARE
291
+ channel_name TEXT;
292
+ payload JSON;
293
+ BEGIN
294
+ IF NEW.visible_at <= NOW() THEN
295
+ channel_name := 'eng_' || NEW.stream_name;
296
+ IF length(channel_name) > 63 THEN
297
+ channel_name := left(channel_name, 63);
298
+ END IF;
299
+
300
+ payload := json_build_object(
301
+ 'stream_name', NEW.stream_name,
302
+ 'table_type', 'engine'
303
+ );
304
+
305
+ PERFORM pg_notify(channel_name, payload::text);
306
+ END IF;
307
+
308
+ RETURN NEW;
309
+ END;
310
+ $$ LANGUAGE plpgsql;
311
+ `);
180
312
  await client.query(`
181
- CREATE OR REPLACE FUNCTION ${schemaName}.notify_new_stream_message()
313
+ DROP TRIGGER IF EXISTS notify_engine_stream_insert ON ${engineTable};
314
+ CREATE TRIGGER notify_engine_stream_insert
315
+ AFTER INSERT ON ${engineTable}
316
+ FOR EACH ROW
317
+ EXECUTE FUNCTION ${schemaName}.notify_new_engine_stream_message();
318
+ `);
319
+ // ---- WORKER notification trigger ----
320
+ await client.query(`
321
+ CREATE OR REPLACE FUNCTION ${schemaName}.notify_new_worker_stream_message()
182
322
  RETURNS TRIGGER AS $$
183
323
  DECLARE
184
324
  channel_name TEXT;
185
325
  payload JSON;
186
326
  BEGIN
187
- -- Only notify if message is immediately visible
188
- -- Messages with visibility timeout will be notified when they become visible
189
327
  IF NEW.visible_at <= NOW() THEN
190
- -- Create channel name: stream_{stream_name}_{group_name}
191
- -- Truncate if too long (PostgreSQL channel names limited to 63 chars)
192
- channel_name := 'stream_' || NEW.stream_name || '_' || NEW.group_name;
328
+ channel_name := 'wrk_' || NEW.stream_name;
193
329
  IF length(channel_name) > 63 THEN
194
330
  channel_name := left(channel_name, 63);
195
331
  END IF;
196
-
197
- -- Create minimal payload with only required fields
332
+
198
333
  payload := json_build_object(
199
334
  'stream_name', NEW.stream_name,
200
- 'group_name', NEW.group_name
335
+ 'table_type', 'worker'
201
336
  );
202
-
203
- -- Send notification
337
+
204
338
  PERFORM pg_notify(channel_name, payload::text);
205
339
  END IF;
206
-
340
+
207
341
  RETURN NEW;
208
342
  END;
209
343
  $$ LANGUAGE plpgsql;
210
344
  `);
211
- // Create trigger only on the main table - it will automatically apply to all partitions
212
345
  await client.query(`
213
- DROP TRIGGER IF EXISTS notify_stream_insert ON ${tableName};
214
- CREATE TRIGGER notify_stream_insert
215
- AFTER INSERT ON ${tableName}
346
+ DROP TRIGGER IF EXISTS notify_worker_stream_insert ON ${workerTable};
347
+ CREATE TRIGGER notify_worker_stream_insert
348
+ AFTER INSERT ON ${workerTable}
216
349
  FOR EACH ROW
217
- EXECUTE FUNCTION ${schemaName}.notify_new_stream_message();
350
+ EXECUTE FUNCTION ${schemaName}.notify_new_worker_stream_message();
218
351
  `);
219
- // Create helper function to notify about messages with expired visibility timeouts
220
- // This is called periodically by the router scout for responsive retry processing
352
+ // ---- Visibility timeout notification function (queries both tables) ----
221
353
  await client.query(`
222
354
  CREATE OR REPLACE FUNCTION ${schemaName}.notify_visible_messages()
223
355
  RETURNS INTEGER AS $$
@@ -227,39 +359,60 @@ async function createNotificationTriggers(client, schemaName, tableName) {
227
359
  payload JSON;
228
360
  notification_count INTEGER := 0;
229
361
  BEGIN
230
- -- Find all distinct streams with messages that are now visible
231
- -- Router will drain all messages when notified, so we just notify each channel once
232
- FOR msg IN
233
- SELECT DISTINCT stream_name, group_name
234
- FROM ${tableName}
362
+ -- Engine streams
363
+ FOR msg IN
364
+ SELECT DISTINCT stream_name
365
+ FROM ${engineTable}
366
+ WHERE visible_at <= NOW()
367
+ AND reserved_at IS NULL
368
+ AND expired_at IS NULL
369
+ LIMIT 50
370
+ LOOP
371
+ channel_name := 'eng_' || msg.stream_name;
372
+ IF length(channel_name) > 63 THEN
373
+ channel_name := left(channel_name, 63);
374
+ END IF;
375
+
376
+ payload := json_build_object(
377
+ 'stream_name', msg.stream_name,
378
+ 'table_type', 'engine'
379
+ );
380
+
381
+ PERFORM pg_notify(channel_name, payload::text);
382
+ notification_count := notification_count + 1;
383
+ END LOOP;
384
+
385
+ -- Worker streams
386
+ FOR msg IN
387
+ SELECT DISTINCT stream_name
388
+ FROM ${workerTable}
235
389
  WHERE visible_at <= NOW()
236
390
  AND reserved_at IS NULL
237
391
  AND expired_at IS NULL
238
- LIMIT 100 -- Prevent overwhelming the system
392
+ LIMIT 50
239
393
  LOOP
240
- -- Create channel name (same logic as INSERT trigger)
241
- channel_name := 'stream_' || msg.stream_name || '_' || msg.group_name;
394
+ channel_name := 'wrk_' || msg.stream_name;
242
395
  IF length(channel_name) > 63 THEN
243
396
  channel_name := left(channel_name, 63);
244
397
  END IF;
245
-
246
- -- Send minimal notification with only required fields
398
+
247
399
  payload := json_build_object(
248
400
  'stream_name', msg.stream_name,
249
- 'group_name', msg.group_name
401
+ 'table_type', 'worker'
250
402
  );
251
-
403
+
252
404
  PERFORM pg_notify(channel_name, payload::text);
253
405
  notification_count := notification_count + 1;
254
406
  END LOOP;
255
-
407
+
256
408
  RETURN notification_count;
257
409
  END;
258
410
  $$ LANGUAGE plpgsql;
259
411
  `);
260
412
  }
261
- function getNotificationChannelName(streamName, groupName) {
262
- const channelName = `stream_${streamName}_${groupName}`;
413
+ function getNotificationChannelName(streamName, isEngine) {
414
+ const prefix = isEngine ? 'eng_' : 'wrk_';
415
+ const channelName = `${prefix}${streamName}`;
263
416
  // PostgreSQL channel names are limited to 63 characters
264
417
  return channelName.length > 63 ? channelName.substring(0, 63) : channelName;
265
418
  }
@@ -6,7 +6,7 @@ import { ProviderClient } from '../../../../types/provider';
6
6
  */
7
7
  export declare function createStream(streamName: string): Promise<boolean>;
8
8
  /**
9
- * Delete a stream or all streams.
9
+ * Delete a stream or all streams from a specific table.
10
10
  */
11
11
  export declare function deleteStream(client: PostgresClientType & ProviderClient, tableName: string, streamName: string, logger: ILogger): Promise<boolean>;
12
12
  /**
@@ -14,6 +14,7 @@ export declare function deleteStream(client: PostgresClientType & ProviderClient
14
14
  */
15
15
  export declare function createConsumerGroup(streamName: string, groupName: string): Promise<boolean>;
16
16
  /**
17
- * Delete a consumer group (removes all messages for that group).
17
+ * Delete messages for a stream from a specific table.
18
+ * No group_name needed since engine and worker are separate tables.
18
19
  */
19
- export declare function deleteConsumerGroup(client: PostgresClientType & ProviderClient, tableName: string, streamName: string, groupName: string, logger: ILogger): Promise<boolean>;
20
+ export declare function deleteConsumerGroup(client: PostgresClientType & ProviderClient, tableName: string, streamName: string, logger: ILogger): Promise<boolean>;
@@ -9,7 +9,7 @@ async function createStream(streamName) {
9
9
  }
10
10
  exports.createStream = createStream;
11
11
  /**
12
- * Delete a stream or all streams.
12
+ * Delete a stream or all streams from a specific table.
13
13
  */
14
14
  async function deleteStream(client, tableName, streamName, logger) {
15
15
  try {
@@ -39,15 +39,16 @@ async function createConsumerGroup(streamName, groupName) {
39
39
  }
40
40
  exports.createConsumerGroup = createConsumerGroup;
41
41
  /**
42
- * Delete a consumer group (removes all messages for that group).
42
+ * Delete messages for a stream from a specific table.
43
+ * No group_name needed since engine and worker are separate tables.
43
44
  */
44
- async function deleteConsumerGroup(client, tableName, streamName, groupName, logger) {
45
+ async function deleteConsumerGroup(client, tableName, streamName, logger) {
45
46
  try {
46
- await client.query(`DELETE FROM ${tableName} WHERE stream_name = $1 AND group_name = $2`, [streamName, groupName]);
47
+ await client.query(`DELETE FROM ${tableName} WHERE stream_name = $1`, [streamName]);
47
48
  return true;
48
49
  }
49
50
  catch (error) {
50
- logger.error(`postgres-stream-delete-group-error-${streamName}.${groupName}`, { error });
51
+ logger.error(`postgres-stream-delete-group-error-${streamName}`, { error });
51
52
  throw error;
52
53
  }
53
54
  }
@@ -8,20 +8,22 @@ import { ProviderClient, ProviderTransaction } from '../../../../types/provider'
8
8
  * When a transaction is provided, the SQL is added to the transaction
9
9
  * and executed atomically with other operations.
10
10
  */
11
- export declare function publishMessages(client: PostgresClientType & ProviderClient, tableName: string, streamName: string, messages: string[], options: PublishMessageConfig | undefined, logger: ILogger): Promise<string[] | ProviderTransaction>;
11
+ export declare function publishMessages(client: PostgresClientType & ProviderClient, tableName: string, streamName: string, isEngine: boolean, messages: string[], options: PublishMessageConfig | undefined, logger: ILogger): Promise<string[] | ProviderTransaction>;
12
12
  /**
13
13
  * Build SQL for publishing messages with retry policies and visibility delays.
14
- * Optimizes the INSERT statement based on whether retry config is present.
14
+ * Routes to engine_streams or worker_streams based on isEngine flag.
15
+ * Worker messages include a workflow_name column extracted from metadata.wfn.
15
16
  */
16
- export declare function buildPublishSQL(tableName: string, streamName: string, messages: string[], options?: PublishMessageConfig): {
17
+ export declare function buildPublishSQL(tableName: string, streamName: string, isEngine: boolean, messages: string[], options?: PublishMessageConfig): {
17
18
  sql: string;
18
19
  params: any[];
19
20
  };
20
21
  /**
21
22
  * Fetch messages from the stream with optional exponential backoff.
22
23
  * Uses SKIP LOCKED for high-concurrency consumption.
24
+ * No group_name filter needed - the table itself determines engine vs worker.
23
25
  */
24
- export declare function fetchMessages(client: PostgresClientType & ProviderClient, tableName: string, streamName: string, groupName: string, consumerName: string, options: {
26
+ export declare function fetchMessages(client: PostgresClientType & ProviderClient, tableName: string, streamName: string, isEngine: boolean, consumerName: string, options: {
25
27
  batchSize?: number;
26
28
  blockTimeout?: number;
27
29
  autoAck?: boolean;
@@ -37,12 +39,18 @@ export declare function fetchMessages(client: PostgresClientType & ProviderClien
37
39
  export declare function acknowledgeMessages(messageIds: string[]): Promise<number>;
38
40
  /**
39
41
  * Delete messages by soft-deleting them (setting expired_at).
42
+ * No group_name needed - stream_name + table is sufficient.
40
43
  */
41
- export declare function deleteMessages(client: PostgresClientType & ProviderClient, tableName: string, streamName: string, groupName: string, messageIds: string[], logger: ILogger): Promise<number>;
44
+ export declare function deleteMessages(client: PostgresClientType & ProviderClient, tableName: string, streamName: string, messageIds: string[], logger: ILogger): Promise<number>;
42
45
  /**
43
46
  * Acknowledge and delete messages in one operation.
44
47
  */
45
- export declare function ackAndDelete(client: PostgresClientType & ProviderClient, tableName: string, streamName: string, groupName: string, messageIds: string[], logger: ILogger): Promise<number>;
48
+ export declare function ackAndDelete(client: PostgresClientType & ProviderClient, tableName: string, streamName: string, messageIds: string[], logger: ILogger): Promise<number>;
49
+ /**
50
+ * Move messages to the dead-letter state by setting dead_lettered_at
51
+ * and expired_at. The message payload is preserved for inspection.
52
+ */
53
+ export declare function deadLetterMessages(client: PostgresClientType & ProviderClient, tableName: string, streamName: string, messageIds: string[], logger: ILogger): Promise<number>;
46
54
  /**
47
55
  * Retry messages (placeholder for future implementation).
48
56
  */