@hotmeshio/hotmesh 0.10.2 → 0.11.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. package/README.md +1 -1
  2. package/build/modules/errors.d.ts +2 -0
  3. package/build/modules/errors.js +2 -0
  4. package/build/modules/key.js +3 -2
  5. package/build/package.json +2 -2
  6. package/build/services/activities/worker.js +10 -0
  7. package/build/services/dba/index.d.ts +2 -1
  8. package/build/services/dba/index.js +11 -2
  9. package/build/services/durable/client.js +6 -1
  10. package/build/services/durable/exporter.d.ts +15 -0
  11. package/build/services/durable/exporter.js +343 -5
  12. package/build/services/durable/schemas/factory.d.ts +1 -1
  13. package/build/services/durable/schemas/factory.js +27 -4
  14. package/build/services/durable/worker.d.ts +2 -2
  15. package/build/services/durable/worker.js +15 -9
  16. package/build/services/durable/workflow/context.js +2 -0
  17. package/build/services/durable/workflow/execChild.js +5 -2
  18. package/build/services/durable/workflow/hook.js +6 -0
  19. package/build/services/durable/workflow/proxyActivities.js +3 -4
  20. package/build/services/engine/index.js +5 -3
  21. package/build/services/store/index.d.ts +40 -0
  22. package/build/services/store/providers/postgres/exporter-sql.d.ts +23 -0
  23. package/build/services/store/providers/postgres/exporter-sql.js +52 -0
  24. package/build/services/store/providers/postgres/kvtables.js +6 -0
  25. package/build/services/store/providers/postgres/postgres.d.ts +34 -0
  26. package/build/services/store/providers/postgres/postgres.js +99 -0
  27. package/build/services/stream/providers/postgres/kvtables.d.ts +1 -1
  28. package/build/services/stream/providers/postgres/kvtables.js +175 -82
  29. package/build/services/stream/providers/postgres/lifecycle.d.ts +4 -3
  30. package/build/services/stream/providers/postgres/lifecycle.js +6 -5
  31. package/build/services/stream/providers/postgres/messages.d.ts +9 -6
  32. package/build/services/stream/providers/postgres/messages.js +121 -75
  33. package/build/services/stream/providers/postgres/notifications.d.ts +5 -2
  34. package/build/services/stream/providers/postgres/notifications.js +39 -35
  35. package/build/services/stream/providers/postgres/postgres.d.ts +20 -118
  36. package/build/services/stream/providers/postgres/postgres.js +83 -140
  37. package/build/services/stream/registry.d.ts +62 -0
  38. package/build/services/stream/registry.js +198 -0
  39. package/build/services/worker/index.js +20 -6
  40. package/build/types/durable.d.ts +6 -1
  41. package/build/types/error.d.ts +2 -0
  42. package/build/types/exporter.d.ts +39 -0
  43. package/build/types/hotmesh.d.ts +7 -1
  44. package/build/types/stream.d.ts +2 -0
  45. package/package.json +2 -2
@@ -5,112 +5,20 @@ import { KeyStoreParams, StringAnyType } from '../../../../types';
5
5
  import { PostgresClientType } from '../../../../types/postgres';
6
6
  import { PublishMessageConfig, StreamConfig, StreamMessage, StreamStats } from '../../../../types/stream';
7
7
  import { ProviderClient, ProviderTransaction } from '../../../../types/provider';
8
+ /**
9
+ * Resolved stream target containing the table name and simplified stream name.
10
+ */
11
+ export interface StreamTarget {
12
+ tableName: string;
13
+ streamName: string;
14
+ isEngine: boolean;
15
+ }
8
16
  /**
9
17
  * PostgreSQL Stream Service
10
18
  *
11
- * High-performance stream message provider using PostgreSQL with LISTEN/NOTIFY.
12
- *
13
- * ## Module Organization
14
- *
15
- * This service is organized into focused modules following KISS principles:
16
- * - `postgres.ts` (this file) - Main orchestrator and service interface
17
- * - `kvtables.ts` - Schema deployment and table management
18
- * - `messages.ts` - Message CRUD operations (publish, fetch, ack, delete)
19
- * - `stats.ts` - Statistics and query operations
20
- * - `scout.ts` - Scout role coordination for polling visible messages
21
- * - `notifications.ts` - LISTEN/NOTIFY notification system with static state management
22
- * - `lifecycle.ts` - Stream and consumer group lifecycle operations
23
- *
24
- * ## Lifecycle
25
- *
26
- * ### Initialization (`init`)
27
- * 1. Deploy PostgreSQL schema (tables, indexes, triggers, functions)
28
- * 2. Create ScoutManager for coordinating visibility timeout polling
29
- * 3. Create NotificationManager for LISTEN/NOTIFY event handling
30
- * 4. Set up notification handler (once per client, shared across instances)
31
- * 5. Start fallback poller (backup for missed notifications)
32
- * 6. Start router scout poller (for visibility timeout processing)
33
- *
34
- * ### Shutdown (`cleanup`)
35
- * 1. Stop router scout polling loop
36
- * 2. Release scout role if held
37
- * 3. Stop notification consumers for this instance
38
- * 4. UNLISTEN from channels when last instance disconnects
39
- * 5. Clean up fallback poller when last instance disconnects
40
- * 6. Remove notification handlers when last instance disconnects
41
- *
42
- * ## Notification System (LISTEN/NOTIFY)
43
- *
44
- * ### Real-time Message Delivery
45
- * - PostgreSQL trigger on INSERT sends NOTIFY when messages are immediately visible
46
- * - Messages with visibility timeout are NOT notified on INSERT
47
- * - Multiple service instances share the same client and notification handlers
48
- * - Static state ensures only ONE LISTEN per channel across all instances
49
- *
50
- * ### Components
51
- * - **Notification Handler**: Listens for PostgreSQL NOTIFY events
52
- * - **Fallback Poller**: Polls every 30s (default) for missed messages
53
- * - **Router Scout**: Active role-holder polls visible messages frequently (~100ms)
54
- * - **Visibility Function**: `notify_visible_messages()` checks for expired timeouts
55
- *
56
- * ## Scout Role (Visibility Timeout Processing)
57
- *
58
- * When messages are published with visibility timeouts (delays), they need to be
59
- * processed when they become visible. The scout role ensures this happens efficiently:
60
- *
61
- * 1. **Role Acquisition**: One instance per app acquires "router" scout role
62
- * 2. **Fast Polling**: Scout polls `notify_visible_messages()` every ~100ms
63
- * 3. **Notification**: Function triggers NOTIFY for streams with visible messages
64
- * 4. **Role Rotation**: Role expires after interval, another instance can claim it
65
- * 5. **Fallback**: Non-scouts sleep longer, try to acquire role periodically
66
- *
67
- * ## Message Flow
68
- *
69
- * ### Publishing
70
- * 1. Messages inserted into partitioned table
71
- * 2. If immediately visible → INSERT trigger sends NOTIFY
72
- * 3. If visibility timeout → no NOTIFY (scout will handle when visible)
73
- *
74
- * ### Consuming (Event-Driven)
75
- * 1. Consumer calls `consumeMessages` with notification callback
76
- * 2. Service executes LISTEN on channel `stream_{name}_{group}`
77
- * 3. On NOTIFY → fetch messages → invoke callback
78
- * 4. Initial fetch done immediately (catch any queued messages)
79
- *
80
- * ### Consuming (Polling)
81
- * 1. Consumer calls `consumeMessages` without callback
82
- * 2. Service directly queries and reserves messages
83
- * 3. Returns messages synchronously
84
- *
85
- * ## Reliability Guarantees
86
- *
87
- * - **Notification Fallback**: Poller catches missed notifications every 30s
88
- * - **Visibility Scout**: Ensures delayed messages are processed when visible
89
- * - **Graceful Degradation**: Falls back to polling if LISTEN fails
90
- * - **Shared State**: Multiple instances coordinate via static maps
91
- * - **Race Condition Safe**: SKIP LOCKED prevents message duplication
92
- *
93
- * @example
94
- * ```typescript
95
- * // Initialize service
96
- * const service = new PostgresStreamService(client, storeClient, config);
97
- * await service.init('namespace', 'appId', logger);
98
- *
99
- * // Event-driven consumption (recommended)
100
- * await service.consumeMessages('stream', 'group', 'consumer', {
101
- * notificationCallback: (messages) => {
102
- * // Process messages in real-time
103
- * }
104
- * });
105
- *
106
- * // Polling consumption
107
- * const messages = await service.consumeMessages('stream', 'group', 'consumer', {
108
- * batchSize: 10
109
- * });
110
- *
111
- * // Cleanup on shutdown
112
- * await service.cleanup();
113
- * ```
19
+ * Uses separate `engine_streams` and `worker_streams` tables for
20
+ * security isolation and independent scaling. The `worker_streams`
21
+ * table includes a `workflow_name` column for routing.
114
22
  */
115
23
  declare class PostgresStreamService extends StreamService<PostgresClientType & ProviderClient, any> {
116
24
  namespace: string;
@@ -124,9 +32,15 @@ declare class PostgresStreamService extends StreamService<PostgresClientType & P
124
32
  private checkForMissedMessages;
125
33
  private fetchAndDeliverMessages;
126
34
  private getConsumerKey;
35
+ /**
36
+ * Resolves a conjoined stream key (e.g., `hmsh:appId:x:topic`) into
37
+ * the correct table name and simplified stream name.
38
+ */
39
+ resolveStreamTarget(streamKey: string): StreamTarget;
127
40
  mintKey(type: KeyType, params: KeyStoreParams): string;
128
41
  transact(): ProviderTransaction;
129
- getTableName(): string;
42
+ getEngineTableName(): string;
43
+ getWorkerTableName(): string;
130
44
  safeName(appId: string): string;
131
45
  createStream(streamName: string): Promise<boolean>;
132
46
  deleteStream(streamName: string): Promise<boolean>;
@@ -134,20 +48,8 @@ declare class PostgresStreamService extends StreamService<PostgresClientType & P
134
48
  deleteConsumerGroup(streamName: string, groupName: string): Promise<boolean>;
135
49
  /**
136
50
  * `publishMessages` can be roped into a transaction by the `store`
137
- * service. If so, it will add the SQL and params to the
138
- * transaction. [Process Overview]: The engine keeps a reference
139
- * to the `store` and `stream` providers; it asks the `store` to
140
- * create a transaction and then starts adding store commands to the
141
- * transaction. The engine then calls the router to publish a
142
- * message using the `stream` provider (which the router keeps
143
- * a reference to), and provides the transaction object.
144
- * The `stream` provider then calls this method to generate
145
- * the SQL and params for the transaction (but, of course, the sql
146
- * is not executed until the engine calls the `exec` method on
147
- * the transaction object provided by `store`).
148
- *
149
- * NOTE: this strategy keeps `stream` and `store` operations separate but
150
- * allows calls to the stream to be roped into a single SQL transaction.
51
+ * service. The `stream` provider generates SQL and params that are
52
+ * added to the transaction for atomic execution.
151
53
  */
152
54
  publishMessages(streamName: string, messages: string[], options?: PublishMessageConfig): Promise<string[] | ProviderTransaction>;
153
55
  _publishMessages(streamName: string, messages: string[], options?: PublishMessageConfig): {
@@ -35,109 +35,9 @@ const Lifecycle = __importStar(require("./lifecycle"));
35
35
  /**
36
36
  * PostgreSQL Stream Service
37
37
  *
38
- * High-performance stream message provider using PostgreSQL with LISTEN/NOTIFY.
39
- *
40
- * ## Module Organization
41
- *
42
- * This service is organized into focused modules following KISS principles:
43
- * - `postgres.ts` (this file) - Main orchestrator and service interface
44
- * - `kvtables.ts` - Schema deployment and table management
45
- * - `messages.ts` - Message CRUD operations (publish, fetch, ack, delete)
46
- * - `stats.ts` - Statistics and query operations
47
- * - `scout.ts` - Scout role coordination for polling visible messages
48
- * - `notifications.ts` - LISTEN/NOTIFY notification system with static state management
49
- * - `lifecycle.ts` - Stream and consumer group lifecycle operations
50
- *
51
- * ## Lifecycle
52
- *
53
- * ### Initialization (`init`)
54
- * 1. Deploy PostgreSQL schema (tables, indexes, triggers, functions)
55
- * 2. Create ScoutManager for coordinating visibility timeout polling
56
- * 3. Create NotificationManager for LISTEN/NOTIFY event handling
57
- * 4. Set up notification handler (once per client, shared across instances)
58
- * 5. Start fallback poller (backup for missed notifications)
59
- * 6. Start router scout poller (for visibility timeout processing)
60
- *
61
- * ### Shutdown (`cleanup`)
62
- * 1. Stop router scout polling loop
63
- * 2. Release scout role if held
64
- * 3. Stop notification consumers for this instance
65
- * 4. UNLISTEN from channels when last instance disconnects
66
- * 5. Clean up fallback poller when last instance disconnects
67
- * 6. Remove notification handlers when last instance disconnects
68
- *
69
- * ## Notification System (LISTEN/NOTIFY)
70
- *
71
- * ### Real-time Message Delivery
72
- * - PostgreSQL trigger on INSERT sends NOTIFY when messages are immediately visible
73
- * - Messages with visibility timeout are NOT notified on INSERT
74
- * - Multiple service instances share the same client and notification handlers
75
- * - Static state ensures only ONE LISTEN per channel across all instances
76
- *
77
- * ### Components
78
- * - **Notification Handler**: Listens for PostgreSQL NOTIFY events
79
- * - **Fallback Poller**: Polls every 30s (default) for missed messages
80
- * - **Router Scout**: Active role-holder polls visible messages frequently (~100ms)
81
- * - **Visibility Function**: `notify_visible_messages()` checks for expired timeouts
82
- *
83
- * ## Scout Role (Visibility Timeout Processing)
84
- *
85
- * When messages are published with visibility timeouts (delays), they need to be
86
- * processed when they become visible. The scout role ensures this happens efficiently:
87
- *
88
- * 1. **Role Acquisition**: One instance per app acquires "router" scout role
89
- * 2. **Fast Polling**: Scout polls `notify_visible_messages()` every ~100ms
90
- * 3. **Notification**: Function triggers NOTIFY for streams with visible messages
91
- * 4. **Role Rotation**: Role expires after interval, another instance can claim it
92
- * 5. **Fallback**: Non-scouts sleep longer, try to acquire role periodically
93
- *
94
- * ## Message Flow
95
- *
96
- * ### Publishing
97
- * 1. Messages inserted into partitioned table
98
- * 2. If immediately visible → INSERT trigger sends NOTIFY
99
- * 3. If visibility timeout → no NOTIFY (scout will handle when visible)
100
- *
101
- * ### Consuming (Event-Driven)
102
- * 1. Consumer calls `consumeMessages` with notification callback
103
- * 2. Service executes LISTEN on channel `stream_{name}_{group}`
104
- * 3. On NOTIFY → fetch messages → invoke callback
105
- * 4. Initial fetch done immediately (catch any queued messages)
106
- *
107
- * ### Consuming (Polling)
108
- * 1. Consumer calls `consumeMessages` without callback
109
- * 2. Service directly queries and reserves messages
110
- * 3. Returns messages synchronously
111
- *
112
- * ## Reliability Guarantees
113
- *
114
- * - **Notification Fallback**: Poller catches missed notifications every 30s
115
- * - **Visibility Scout**: Ensures delayed messages are processed when visible
116
- * - **Graceful Degradation**: Falls back to polling if LISTEN fails
117
- * - **Shared State**: Multiple instances coordinate via static maps
118
- * - **Race Condition Safe**: SKIP LOCKED prevents message duplication
119
- *
120
- * @example
121
- * ```typescript
122
- * // Initialize service
123
- * const service = new PostgresStreamService(client, storeClient, config);
124
- * await service.init('namespace', 'appId', logger);
125
- *
126
- * // Event-driven consumption (recommended)
127
- * await service.consumeMessages('stream', 'group', 'consumer', {
128
- * notificationCallback: (messages) => {
129
- * // Process messages in real-time
130
- * }
131
- * });
132
- *
133
- * // Polling consumption
134
- * const messages = await service.consumeMessages('stream', 'group', 'consumer', {
135
- * batchSize: 10
136
- * });
137
- *
138
- * // Cleanup on shutdown
139
- * await service.cleanup();
140
- * ```
38
+ * Uses separate `engine_streams` and `worker_streams` tables for
39
+ * security isolation and independent scaling. The `worker_streams`
40
+ * table includes a `workflow_name` column for routing.
141
41
  */
142
42
  class PostgresStreamService extends index_1.StreamService {
143
43
  constructor(streamClient, storeClient, config = {}) {
@@ -149,9 +49,9 @@ class PostgresStreamService extends index_1.StreamService {
149
49
  this.logger = logger;
150
50
  await (0, kvtables_1.deploySchema)(this.streamClient, this.appId, this.logger);
151
51
  // Initialize scout manager
152
- this.scoutManager = new scout_1.ScoutManager(this.streamClient, this.appId, this.getTableName.bind(this), this.mintKey.bind(this), this.logger);
52
+ this.scoutManager = new scout_1.ScoutManager(this.streamClient, this.appId, this.getEngineTableName.bind(this), this.mintKey.bind(this), this.logger);
153
53
  // Initialize notification manager
154
- this.notificationManager = new notifications_1.NotificationManager(this.streamClient, this.getTableName.bind(this), () => (0, notifications_1.getFallbackInterval)(this.config), this.logger);
54
+ this.notificationManager = new notifications_1.NotificationManager(this.streamClient, this.getEngineTableName.bind(this), () => (0, notifications_1.getFallbackInterval)(this.config), this.logger);
155
55
  // Set up notification handler if supported
156
56
  if (this.streamClient.on && this.isNotificationsEnabled()) {
157
57
  this.notificationManager.setupClientNotificationHandler(this);
@@ -185,6 +85,28 @@ class PostgresStreamService extends index_1.StreamService {
185
85
  getConsumerKey(streamName, groupName) {
186
86
  return `${streamName}:${groupName}`;
187
87
  }
88
+ /**
89
+ * Resolves a conjoined stream key (e.g., `hmsh:appId:x:topic`) into
90
+ * the correct table name and simplified stream name.
91
+ */
92
+ resolveStreamTarget(streamKey) {
93
+ const isEngine = streamKey.endsWith(':');
94
+ if (isEngine) {
95
+ return {
96
+ tableName: this.getEngineTableName(),
97
+ streamName: this.appId,
98
+ isEngine: true,
99
+ };
100
+ }
101
+ // Extract the bare topic from hmsh:appId:x:topicName
102
+ const parts = streamKey.split(':');
103
+ const topic = parts[parts.length - 1];
104
+ return {
105
+ tableName: this.getWorkerTableName(),
106
+ streamName: topic,
107
+ isEngine: false,
108
+ };
109
+ }
188
110
  mintKey(type, params) {
189
111
  if (!this.namespace)
190
112
  throw new Error('namespace not set');
@@ -196,8 +118,11 @@ class PostgresStreamService extends index_1.StreamService {
196
118
  transact() {
197
119
  return {};
198
120
  }
199
- getTableName() {
200
- return `${this.safeName(this.appId)}.streams`;
121
+ getEngineTableName() {
122
+ return `${this.safeName(this.appId)}.engine_streams`;
123
+ }
124
+ getWorkerTableName() {
125
+ return `${this.safeName(this.appId)}.worker_streams`;
201
126
  }
202
127
  safeName(appId) {
203
128
  return appId.replace(/[^a-zA-Z0-9_]/g, '_');
@@ -206,36 +131,33 @@ class PostgresStreamService extends index_1.StreamService {
206
131
  return Lifecycle.createStream(streamName);
207
132
  }
208
133
  async deleteStream(streamName) {
209
- return Lifecycle.deleteStream(this.streamClient, this.getTableName(), streamName, this.logger);
134
+ if (streamName === '*') {
135
+ // Delete from both tables
136
+ await Lifecycle.deleteStream(this.streamClient, this.getEngineTableName(), '*', this.logger);
137
+ return Lifecycle.deleteStream(this.streamClient, this.getWorkerTableName(), '*', this.logger);
138
+ }
139
+ const target = this.resolveStreamTarget(streamName);
140
+ return Lifecycle.deleteStream(this.streamClient, target.tableName, target.streamName, this.logger);
210
141
  }
211
142
  async createConsumerGroup(streamName, groupName) {
212
143
  return Lifecycle.createConsumerGroup(streamName, groupName);
213
144
  }
214
145
  async deleteConsumerGroup(streamName, groupName) {
215
- return Lifecycle.deleteConsumerGroup(this.streamClient, this.getTableName(), streamName, groupName, this.logger);
146
+ const target = this.resolveStreamTarget(streamName);
147
+ return Lifecycle.deleteConsumerGroup(this.streamClient, target.tableName, target.streamName, this.logger);
216
148
  }
217
149
  /**
218
150
  * `publishMessages` can be roped into a transaction by the `store`
219
- * service. If so, it will add the SQL and params to the
220
- * transaction. [Process Overview]: The engine keeps a reference
221
- * to the `store` and `stream` providers; it asks the `store` to
222
- * create a transaction and then starts adding store commands to the
223
- * transaction. The engine then calls the router to publish a
224
- * message using the `stream` provider (which the router keeps
225
- * a reference to), and provides the transaction object.
226
- * The `stream` provider then calls this method to generate
227
- * the SQL and params for the transaction (but, of course, the sql
228
- * is not executed until the engine calls the `exec` method on
229
- * the transaction object provided by `store`).
230
- *
231
- * NOTE: this strategy keeps `stream` and `store` operations separate but
232
- * allows calls to the stream to be roped into a single SQL transaction.
151
+ * service. The `stream` provider generates SQL and params that are
152
+ * added to the transaction for atomic execution.
233
153
  */
234
154
  async publishMessages(streamName, messages, options) {
235
- return Messages.publishMessages(this.streamClient, this.getTableName(), streamName, messages, options, this.logger);
155
+ const target = this.resolveStreamTarget(streamName);
156
+ return Messages.publishMessages(this.streamClient, target.tableName, target.streamName, target.isEngine, messages, options, this.logger);
236
157
  }
237
158
  _publishMessages(streamName, messages, options) {
238
- return Messages.buildPublishSQL(this.getTableName(), streamName, messages, options);
159
+ const target = this.resolveStreamTarget(streamName);
160
+ return Messages.buildPublishSQL(target.tableName, target.streamName, target.isEngine, messages, options);
239
161
  }
240
162
  async consumeMessages(streamName, groupName, consumerName, options) {
241
163
  // If notification callback is provided and notifications are enabled, set up listener
@@ -248,9 +170,7 @@ class PostgresStreamService extends index_1.StreamService {
248
170
  shouldUseNotifications(options) {
249
171
  const globalEnabled = this.isNotificationsEnabled();
250
172
  const optionEnabled = options?.enableNotifications;
251
- // If option is explicitly set, use that; otherwise use global setting
252
173
  const enabled = optionEnabled !== undefined ? optionEnabled : globalEnabled;
253
- // Also check if client supports notifications
254
174
  return enabled && this.streamClient.on !== undefined;
255
175
  }
256
176
  async setupNotificationConsumer(streamName, groupName, consumerName, callback, options) {
@@ -271,7 +191,6 @@ class PostgresStreamService extends index_1.StreamService {
271
191
  messageCount: initialMessages.length,
272
192
  fetchDuration: Date.now() - fetchStart,
273
193
  });
274
- // If we got messages, call the callback
275
194
  if (initialMessages.length > 0) {
276
195
  callback(initialMessages);
277
196
  }
@@ -284,11 +203,9 @@ class PostgresStreamService extends index_1.StreamService {
284
203
  });
285
204
  }
286
205
  });
287
- // Return empty array immediately to avoid blocking
288
206
  return [];
289
207
  }
290
208
  catch (error) {
291
- // Fall back to polling if setup fails
292
209
  return this.fetchMessages(streamName, groupName, consumerName, options);
293
210
  }
294
211
  }
@@ -296,42 +213,68 @@ class PostgresStreamService extends index_1.StreamService {
296
213
  await this.notificationManager.stopNotificationConsumer(this, streamName, groupName);
297
214
  }
298
215
  async fetchMessages(streamName, groupName, consumerName, options) {
299
- return Messages.fetchMessages(this.streamClient, this.getTableName(), streamName, groupName, consumerName, options || {}, this.logger);
216
+ const target = this.resolveStreamTarget(streamName);
217
+ return Messages.fetchMessages(this.streamClient, target.tableName, target.streamName, target.isEngine, consumerName, options || {}, this.logger);
300
218
  }
301
219
  async ackAndDelete(streamName, groupName, messageIds) {
302
- return Messages.ackAndDelete(this.streamClient, this.getTableName(), streamName, groupName, messageIds, this.logger);
220
+ const target = this.resolveStreamTarget(streamName);
221
+ return Messages.ackAndDelete(this.streamClient, target.tableName, target.streamName, messageIds, this.logger);
303
222
  }
304
223
  async acknowledgeMessages(streamName, groupName, messageIds, options) {
305
224
  return Messages.acknowledgeMessages(messageIds);
306
225
  }
307
226
  async deleteMessages(streamName, groupName, messageIds, options) {
308
- return Messages.deleteMessages(this.streamClient, this.getTableName(), streamName, groupName, messageIds, this.logger);
227
+ const target = this.resolveStreamTarget(streamName);
228
+ return Messages.deleteMessages(this.streamClient, target.tableName, target.streamName, messageIds, this.logger);
309
229
  }
310
230
  async retryMessages(streamName, groupName, options) {
311
231
  return Messages.retryMessages(streamName, groupName, options);
312
232
  }
313
233
  async getStreamStats(streamName) {
314
- return Stats.getStreamStats(this.streamClient, this.getTableName(), streamName, this.logger);
234
+ const target = this.resolveStreamTarget(streamName);
235
+ return Stats.getStreamStats(this.streamClient, target.tableName, target.streamName, this.logger);
315
236
  }
316
237
  async getStreamDepth(streamName) {
317
- return Stats.getStreamDepth(this.streamClient, this.getTableName(), streamName, this.logger);
238
+ const target = this.resolveStreamTarget(streamName);
239
+ return Stats.getStreamDepth(this.streamClient, target.tableName, target.streamName, this.logger);
318
240
  }
319
241
  async getStreamDepths(streamNames) {
320
- return Stats.getStreamDepths(this.streamClient, this.getTableName(), streamNames, this.logger);
242
+ // Partition stream names by table type and query each table separately
243
+ const engineStreams = [];
244
+ const workerStreams = [];
245
+ const streamNameMap = new Map(); // resolvedName -> originalKey
246
+ for (const s of streamNames) {
247
+ const target = this.resolveStreamTarget(s.stream);
248
+ streamNameMap.set(target.streamName, s.stream);
249
+ if (target.isEngine) {
250
+ engineStreams.push({ stream: target.streamName });
251
+ }
252
+ else {
253
+ workerStreams.push({ stream: target.streamName });
254
+ }
255
+ }
256
+ const results = [];
257
+ if (engineStreams.length > 0) {
258
+ const engineResults = await Stats.getStreamDepths(this.streamClient, this.getEngineTableName(), engineStreams, this.logger);
259
+ results.push(...engineResults);
260
+ }
261
+ if (workerStreams.length > 0) {
262
+ const workerResults = await Stats.getStreamDepths(this.streamClient, this.getWorkerTableName(), workerStreams, this.logger);
263
+ results.push(...workerResults);
264
+ }
265
+ return results;
321
266
  }
322
267
  async trimStream(streamName, options) {
323
- return Stats.trimStream(this.streamClient, this.getTableName(), streamName, options, this.logger);
268
+ const target = this.resolveStreamTarget(streamName);
269
+ return Stats.trimStream(this.streamClient, target.tableName, target.streamName, options, this.logger);
324
270
  }
325
271
  getProviderSpecificFeatures() {
326
272
  return Stats.getProviderSpecificFeatures(this.config);
327
273
  }
328
- // Cleanup method to be called when shutting down
329
274
  async cleanup() {
330
- // Stop router scout polling loop
331
275
  if (this.scoutManager) {
332
276
  await this.scoutManager.stopRouterScoutPoller();
333
277
  }
334
- // Clean up notification consumers
335
278
  if (this.notificationManager) {
336
279
  await this.notificationManager.cleanup(this);
337
280
  }
@@ -0,0 +1,62 @@
1
+ import { ILogger } from '../logger';
2
+ import { StreamService } from './index';
3
+ import { ProviderClient, ProviderTransaction } from '../../types/provider';
4
+ import { StreamData, StreamDataResponse } from '../../types/stream';
5
+ import { KeyType } from '../../modules/key';
6
+ type WorkerCallback = (data: StreamData) => Promise<StreamDataResponse | void>;
7
+ /**
8
+ * Process-wide singleton registry that manages one consumer per task queue (workers)
9
+ * and one per appId (engines). Instead of N consumers each polling independently,
10
+ * one consumer fetches batches from the stream and dispatches to registered callbacks
11
+ * based on the `workflow_name` column (workers) or round-robin (engines).
12
+ */
13
+ declare class StreamConsumerRegistry {
14
+ private static workerConsumers;
15
+ private static engineConsumers;
16
+ /**
17
+ * Register a worker callback for a (taskQueue, workflowName) pair.
18
+ * If no consumer exists for this taskQueue, a singleton Router is created.
19
+ */
20
+ static registerWorker(namespace: string, appId: string, guid: string, taskQueue: string, workflowName: string, callback: WorkerCallback, stream: StreamService<ProviderClient, ProviderTransaction>, store: {
21
+ mintKey: (type: KeyType, params: any) => string;
22
+ getThrottleRate: (topic?: string) => Promise<number>;
23
+ }, logger: ILogger, config?: {
24
+ reclaimDelay?: number;
25
+ reclaimCount?: number;
26
+ retryPolicy?: any;
27
+ }): Promise<void>;
28
+ /**
29
+ * Register an engine callback for an appId.
30
+ * If no consumer exists for this appId, a singleton Router is created.
31
+ */
32
+ static registerEngine(namespace: string, appId: string, guid: string, callback: WorkerCallback, stream: StreamService<ProviderClient, ProviderTransaction>, store: {
33
+ mintKey: (type: KeyType, params: any) => string;
34
+ getThrottleRate: (topic?: string) => Promise<number>;
35
+ }, logger: ILogger, config?: {
36
+ reclaimDelay?: number;
37
+ reclaimCount?: number;
38
+ }): Promise<void>;
39
+ /**
40
+ * Creates a dispatch callback for worker consumers.
41
+ * Routes messages to the registered callback based on metadata.wfn (workflow_name).
42
+ */
43
+ private static createWorkerDispatcher;
44
+ /**
45
+ * Creates a dispatch callback for engine consumers.
46
+ * Engines are generic processors — the first registered callback handles the message.
47
+ */
48
+ private static createEngineDispatcher;
49
+ /**
50
+ * Unregister a worker callback.
51
+ */
52
+ static unregisterWorker(namespace: string, appId: string, taskQueue: string, workflowName: string): Promise<void>;
53
+ /**
54
+ * Unregister an engine callback.
55
+ */
56
+ static unregisterEngine(namespace: string, appId: string, callback: WorkerCallback): Promise<void>;
57
+ /**
58
+ * Stop all consumers and clear the registry.
59
+ */
60
+ static shutdown(): Promise<void>;
61
+ }
62
+ export { StreamConsumerRegistry };