@hotmeshio/hotmesh 0.5.3 → 0.5.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. package/README.md +67 -134
  2. package/build/index.d.ts +1 -3
  3. package/build/index.js +1 -5
  4. package/build/modules/enums.d.ts +4 -0
  5. package/build/modules/enums.js +5 -1
  6. package/build/modules/utils.d.ts +1 -9
  7. package/build/modules/utils.js +0 -6
  8. package/build/package.json +3 -4
  9. package/build/services/connector/factory.d.ts +2 -2
  10. package/build/services/connector/factory.js +11 -8
  11. package/build/services/connector/providers/postgres.d.ts +47 -0
  12. package/build/services/connector/providers/postgres.js +107 -0
  13. package/build/services/hotmesh/index.d.ts +8 -0
  14. package/build/services/hotmesh/index.js +27 -0
  15. package/build/services/memflow/client.d.ts +1 -1
  16. package/build/services/memflow/client.js +8 -6
  17. package/build/services/memflow/worker.js +3 -0
  18. package/build/services/pipe/functions/cron.js +1 -1
  19. package/build/services/store/providers/postgres/kvtables.js +19 -6
  20. package/build/services/store/providers/postgres/postgres.js +13 -2
  21. package/build/services/stream/providers/postgres/postgres.d.ts +6 -3
  22. package/build/services/stream/providers/postgres/postgres.js +169 -59
  23. package/build/services/sub/providers/postgres/postgres.d.ts +9 -0
  24. package/build/services/sub/providers/postgres/postgres.js +109 -18
  25. package/build/services/worker/index.js +4 -0
  26. package/build/types/hotmesh.d.ts +19 -5
  27. package/build/types/index.d.ts +0 -2
  28. package/env.example +11 -0
  29. package/index.ts +0 -4
  30. package/package.json +3 -4
  31. package/build/services/meshdata/index.d.ts +0 -795
  32. package/build/services/meshdata/index.js +0 -1235
  33. package/build/services/meshos/index.d.ts +0 -293
  34. package/build/services/meshos/index.js +0 -547
  35. package/build/types/manifest.d.ts +0 -52
  36. package/build/types/manifest.js +0 -2
  37. package/build/types/meshdata.d.ts +0 -252
  38. package/build/types/meshdata.js +0 -2
@@ -8,8 +8,8 @@ const kvtables_1 = require("./kvtables");
8
8
  class PostgresStreamService extends index_1.StreamService {
9
9
  constructor(streamClient, storeClient, config = {}) {
10
10
  super(streamClient, storeClient, config);
11
- this.notificationConsumers = new Map();
12
- this.fallbackIntervalId = null;
11
+ // Instance-level tracking for cleanup
12
+ this.instanceNotificationConsumers = new Set();
13
13
  this.notificationHandlerBound = this.handleNotification.bind(this);
14
14
  }
15
15
  async init(namespace, appId, logger) {
@@ -19,10 +19,34 @@ class PostgresStreamService extends index_1.StreamService {
19
19
  await (0, kvtables_1.deploySchema)(this.streamClient, this.appId, this.logger);
20
20
  // Set up notification handler if supported
21
21
  if (this.streamClient.on && this.isNotificationsEnabled()) {
22
- this.streamClient.on('notification', this.notificationHandlerBound);
23
- this.startFallbackPoller();
22
+ this.setupClientNotificationHandler();
23
+ this.startClientFallbackPoller();
24
24
  }
25
25
  }
26
+ setupClientNotificationHandler() {
27
+ // Check if notification handler is already set up for this client
28
+ if (PostgresStreamService.clientNotificationHandlers.get(this.streamClient)) {
29
+ return;
30
+ }
31
+ // Initialize notification consumer map for this client if it doesn't exist
32
+ if (!PostgresStreamService.clientNotificationConsumers.has(this.streamClient)) {
33
+ PostgresStreamService.clientNotificationConsumers.set(this.streamClient, new Map());
34
+ }
35
+ // Set up the notification handler for this client
36
+ this.streamClient.on('notification', this.handleNotification.bind(this));
37
+ // Mark this client as having a notification handler
38
+ PostgresStreamService.clientNotificationHandlers.set(this.streamClient, true);
39
+ }
40
+ startClientFallbackPoller() {
41
+ // Check if fallback poller already exists for this client
42
+ if (PostgresStreamService.clientFallbackPollers.has(this.streamClient)) {
43
+ return;
44
+ }
45
+ const fallbackIntervalId = setInterval(() => {
46
+ this.checkForMissedMessages();
47
+ }, this.getFallbackInterval());
48
+ PostgresStreamService.clientFallbackPollers.set(this.streamClient, fallbackIntervalId);
49
+ }
26
50
  isNotificationsEnabled() {
27
51
  return this.config?.postgres?.enableNotifications !== false; // Default: true
28
52
  }
@@ -32,42 +56,53 @@ class PostgresStreamService extends index_1.StreamService {
32
56
  getNotificationTimeout() {
33
57
  return this.config?.postgres?.notificationTimeout || 5000; // Default: 5 seconds
34
58
  }
35
- startFallbackPoller() {
36
- if (this.fallbackIntervalId) {
37
- clearInterval(this.fallbackIntervalId);
38
- }
39
- this.fallbackIntervalId = setInterval(() => {
40
- this.checkForMissedMessages();
41
- }, this.getFallbackInterval());
42
- }
43
59
  async checkForMissedMessages() {
44
60
  const now = Date.now();
45
- for (const [key, consumer] of this.notificationConsumers.entries()) {
46
- if (consumer.isListening && now - consumer.lastFallbackCheck > this.getFallbackInterval()) {
47
- try {
48
- const messages = await this.fetchMessages(consumer.streamName, consumer.groupName, consumer.consumerName, { batchSize: 10, enableBackoff: false, maxRetries: 1 });
49
- if (messages.length > 0) {
50
- this.logger.debug('postgres-stream-fallback-messages', {
61
+ const clientNotificationConsumers = PostgresStreamService.clientNotificationConsumers.get(this.streamClient);
62
+ if (!clientNotificationConsumers) {
63
+ return;
64
+ }
65
+ for (const [consumerKey, instanceMap] of clientNotificationConsumers.entries()) {
66
+ for (const [instance, consumer] of instanceMap.entries()) {
67
+ if (consumer.isListening && now - consumer.lastFallbackCheck > this.getFallbackInterval()) {
68
+ try {
69
+ const messages = await instance.fetchMessages(consumer.streamName, consumer.groupName, consumer.consumerName, { batchSize: 10, enableBackoff: false, maxRetries: 1 });
70
+ if (messages.length > 0) {
71
+ instance.logger.debug('postgres-stream-fallback-messages', {
72
+ streamName: consumer.streamName,
73
+ groupName: consumer.groupName,
74
+ messageCount: messages.length
75
+ });
76
+ consumer.callback(messages);
77
+ }
78
+ consumer.lastFallbackCheck = now;
79
+ }
80
+ catch (error) {
81
+ instance.logger.error('postgres-stream-fallback-error', {
51
82
  streamName: consumer.streamName,
52
83
  groupName: consumer.groupName,
53
- messageCount: messages.length
84
+ error
54
85
  });
55
- consumer.callback(messages);
56
86
  }
57
- consumer.lastFallbackCheck = now;
58
- }
59
- catch (error) {
60
- this.logger.error('postgres-stream-fallback-error', {
61
- streamName: consumer.streamName,
62
- groupName: consumer.groupName,
63
- error
64
- });
65
87
  }
66
88
  }
67
89
  }
68
90
  }
69
91
  handleNotification(notification) {
70
92
  try {
93
+ // Only handle stream notifications (channels starting with "stream_")
94
+ // Ignore pub/sub notifications from sub provider which use different channel names
95
+ if (!notification.channel.startsWith('stream_')) {
96
+ // This is likely a pub/sub notification from the sub provider, ignore it
97
+ this.logger.debug('postgres-stream-ignoring-sub-notification', {
98
+ channel: notification.channel,
99
+ payloadPreview: notification.payload.substring(0, 100)
100
+ });
101
+ return;
102
+ }
103
+ this.logger.debug('postgres-stream-processing-notification', {
104
+ channel: notification.channel
105
+ });
71
106
  const payload = JSON.parse(notification.payload);
72
107
  const { stream_name, group_name } = payload;
73
108
  if (!stream_name || !group_name) {
@@ -75,10 +110,19 @@ class PostgresStreamService extends index_1.StreamService {
75
110
  return;
76
111
  }
77
112
  const consumerKey = this.getConsumerKey(stream_name, group_name);
78
- const consumer = this.notificationConsumers.get(consumerKey);
79
- if (consumer && consumer.isListening) {
80
- // Trigger immediate message fetch for this consumer
81
- this.fetchAndDeliverMessages(consumer);
113
+ const clientNotificationConsumers = PostgresStreamService.clientNotificationConsumers.get(this.streamClient);
114
+ if (!clientNotificationConsumers) {
115
+ return;
116
+ }
117
+ const instanceMap = clientNotificationConsumers.get(consumerKey);
118
+ if (!instanceMap) {
119
+ return;
120
+ }
121
+ // Trigger immediate message fetch for all instances with this consumer
122
+ for (const [instance, consumer] of instanceMap.entries()) {
123
+ if (consumer.isListening) {
124
+ instance.fetchAndDeliverMessages(consumer);
125
+ }
82
126
  }
83
127
  }
84
128
  catch (error) {
@@ -235,8 +279,18 @@ class PostgresStreamService extends index_1.StreamService {
235
279
  const startTime = Date.now();
236
280
  const consumerKey = this.getConsumerKey(streamName, groupName);
237
281
  const channelName = (0, kvtables_1.getNotificationChannelName)(streamName, groupName);
238
- // Set up LISTEN if not already listening
239
- if (!this.notificationConsumers.has(consumerKey)) {
282
+ // Get or create notification consumer map for this client
283
+ let clientNotificationConsumers = PostgresStreamService.clientNotificationConsumers.get(this.streamClient);
284
+ if (!clientNotificationConsumers) {
285
+ clientNotificationConsumers = new Map();
286
+ PostgresStreamService.clientNotificationConsumers.set(this.streamClient, clientNotificationConsumers);
287
+ }
288
+ // Get or create instance map for this consumer key
289
+ let instanceMap = clientNotificationConsumers.get(consumerKey);
290
+ if (!instanceMap) {
291
+ instanceMap = new Map();
292
+ clientNotificationConsumers.set(consumerKey, instanceMap);
293
+ // Set up LISTEN for this channel (only once per channel across all instances)
240
294
  try {
241
295
  const listenStart = Date.now();
242
296
  await this.streamClient.query(`LISTEN "${channelName}"`);
@@ -258,18 +312,22 @@ class PostgresStreamService extends index_1.StreamService {
258
312
  return this.fetchMessages(streamName, groupName, consumerName, options);
259
313
  }
260
314
  }
261
- // Register or update consumer
262
- this.notificationConsumers.set(consumerKey, {
315
+ // Register or update consumer for this instance
316
+ const consumer = {
263
317
  streamName,
264
318
  groupName,
265
319
  consumerName,
266
320
  callback,
267
321
  isListening: true,
268
322
  lastFallbackCheck: Date.now()
269
- });
323
+ };
324
+ instanceMap.set(this, consumer);
325
+ // Track this consumer for cleanup
326
+ this.instanceNotificationConsumers.add(consumerKey);
270
327
  this.logger.debug('postgres-stream-notification-setup-complete', {
271
328
  streamName,
272
329
  groupName,
330
+ instanceCount: instanceMap.size,
273
331
  setupDuration: Date.now() - startTime
274
332
  });
275
333
  // Do an initial fetch asynchronously to avoid blocking setup
@@ -306,14 +364,23 @@ class PostgresStreamService extends index_1.StreamService {
306
364
  }
307
365
  async stopNotificationConsumer(streamName, groupName) {
308
366
  const consumerKey = this.getConsumerKey(streamName, groupName);
309
- const consumer = this.notificationConsumers.get(consumerKey);
367
+ const clientNotificationConsumers = PostgresStreamService.clientNotificationConsumers.get(this.streamClient);
368
+ if (!clientNotificationConsumers) {
369
+ return;
370
+ }
371
+ const instanceMap = clientNotificationConsumers.get(consumerKey);
372
+ if (!instanceMap) {
373
+ return;
374
+ }
375
+ const consumer = instanceMap.get(this);
310
376
  if (consumer) {
311
377
  consumer.isListening = false;
312
- this.notificationConsumers.delete(consumerKey);
313
- // If no more consumers for this channel, stop listening
314
- const hasOtherConsumers = Array.from(this.notificationConsumers.values())
315
- .some(c => c.streamName === streamName && c.groupName === groupName);
316
- if (!hasOtherConsumers) {
378
+ instanceMap.delete(this);
379
+ // Remove from instance tracking
380
+ this.instanceNotificationConsumers.delete(consumerKey);
381
+ // If no more instances for this consumer key, stop listening and clean up
382
+ if (instanceMap.size === 0) {
383
+ clientNotificationConsumers.delete(consumerKey);
317
384
  const channelName = (0, kvtables_1.getNotificationChannelName)(streamName, groupName);
318
385
  try {
319
386
  await this.streamClient.query(`UNLISTEN "${channelName}"`);
@@ -496,24 +563,67 @@ class PostgresStreamService extends index_1.StreamService {
496
563
  }
497
564
  // Cleanup method to be called when shutting down
498
565
  async cleanup() {
499
- // Stop fallback poller
500
- if (this.fallbackIntervalId) {
501
- clearInterval(this.fallbackIntervalId);
502
- this.fallbackIntervalId = null;
503
- }
504
- // Remove notification handler
505
- if (this.streamClient.removeAllListeners) {
506
- this.streamClient.removeAllListeners('notification');
507
- }
508
- else if (this.streamClient.off) {
509
- this.streamClient.off('notification', this.notificationHandlerBound);
566
+ // Clean up this instance's notification consumers
567
+ const clientNotificationConsumers = PostgresStreamService.clientNotificationConsumers.get(this.streamClient);
568
+ if (clientNotificationConsumers) {
569
+ // Remove this instance from all consumer maps
570
+ for (const consumerKey of this.instanceNotificationConsumers) {
571
+ const instanceMap = clientNotificationConsumers.get(consumerKey);
572
+ if (instanceMap) {
573
+ const consumer = instanceMap.get(this);
574
+ if (consumer) {
575
+ consumer.isListening = false;
576
+ instanceMap.delete(this);
577
+ // If no more instances for this consumer, stop listening
578
+ if (instanceMap.size === 0) {
579
+ clientNotificationConsumers.delete(consumerKey);
580
+ const channelName = (0, kvtables_1.getNotificationChannelName)(consumer.streamName, consumer.groupName);
581
+ try {
582
+ await this.streamClient.query(`UNLISTEN "${channelName}"`);
583
+ this.logger.debug('postgres-stream-cleanup-unlisten', {
584
+ streamName: consumer.streamName,
585
+ groupName: consumer.groupName,
586
+ channelName
587
+ });
588
+ }
589
+ catch (error) {
590
+ this.logger.error('postgres-stream-cleanup-unlisten-error', {
591
+ streamName: consumer.streamName,
592
+ groupName: consumer.groupName,
593
+ channelName,
594
+ error
595
+ });
596
+ }
597
+ }
598
+ }
599
+ }
600
+ }
510
601
  }
511
- // Stop all consumers and unlisten from channels
512
- const consumers = Array.from(this.notificationConsumers.entries());
513
- for (const [key, consumer] of consumers) {
514
- await this.stopNotificationConsumer(consumer.streamName, consumer.groupName);
602
+ // Clear instance tracking
603
+ this.instanceNotificationConsumers.clear();
604
+ // If no more consumers exist for this client, clean up static resources
605
+ if (clientNotificationConsumers && clientNotificationConsumers.size === 0) {
606
+ // Remove client from static maps
607
+ PostgresStreamService.clientNotificationConsumers.delete(this.streamClient);
608
+ PostgresStreamService.clientNotificationHandlers.delete(this.streamClient);
609
+ // Stop fallback poller for this client
610
+ const fallbackIntervalId = PostgresStreamService.clientFallbackPollers.get(this.streamClient);
611
+ if (fallbackIntervalId) {
612
+ clearInterval(fallbackIntervalId);
613
+ PostgresStreamService.clientFallbackPollers.delete(this.streamClient);
614
+ }
615
+ // Remove notification handler
616
+ if (this.streamClient.removeAllListeners) {
617
+ this.streamClient.removeAllListeners('notification');
618
+ }
619
+ else if (this.streamClient.off && this.notificationHandlerBound) {
620
+ this.streamClient.off('notification', this.notificationHandlerBound);
621
+ }
515
622
  }
516
- this.notificationConsumers.clear();
517
623
  }
518
624
  }
519
625
  exports.PostgresStreamService = PostgresStreamService;
626
+ // Static maps to manage notifications across all instances sharing the same client
627
+ PostgresStreamService.clientNotificationConsumers = new Map();
628
+ PostgresStreamService.clientNotificationHandlers = new Map();
629
+ PostgresStreamService.clientFallbackPollers = new Map();
@@ -5,13 +5,22 @@ import { SubscriptionCallback } from '../../../../types/quorum';
5
5
  import { ProviderClient, ProviderTransaction } from '../../../../types/provider';
6
6
  import { PostgresClientType } from '../../../../types/postgres';
7
7
  declare class PostgresSubService extends SubService<PostgresClientType & ProviderClient> {
8
+ private static clientSubscriptions;
9
+ private static clientHandlers;
10
+ private instanceSubscriptions;
8
11
  constructor(eventClient: PostgresClientType & ProviderClient, storeClient?: PostgresClientType & ProviderClient);
9
12
  init(namespace: string, appId: string, engineId: string, logger: ILogger): Promise<void>;
13
+ private setupNotificationHandler;
10
14
  transact(): ProviderTransaction;
11
15
  mintKey(type: KeyType, params: KeyStoreParams): string;
12
16
  mintSafeKey(type: KeyType, params: KeyStoreParams): [string, string];
13
17
  subscribe(keyType: KeyType.QUORUM, callback: SubscriptionCallback, appId: string, topic?: string): Promise<void>;
14
18
  unsubscribe(keyType: KeyType.QUORUM, appId: string, topic?: string): Promise<void>;
19
+ /**
20
+ * Cleanup method to remove all subscriptions for this instance.
21
+ * Should be called when the SubService instance is being destroyed.
22
+ */
23
+ cleanup(): Promise<void>;
15
24
  publish(keyType: KeyType.QUORUM, message: Record<string, any>, appId: string, topic?: string): Promise<boolean>;
16
25
  psubscribe(): Promise<void>;
17
26
  punsubscribe(): Promise<void>;
@@ -10,12 +10,49 @@ const index_1 = require("../../index");
10
10
  class PostgresSubService extends index_1.SubService {
11
11
  constructor(eventClient, storeClient) {
12
12
  super(eventClient, storeClient);
13
+ // Instance-level subscriptions for cleanup
14
+ this.instanceSubscriptions = new Set();
13
15
  }
14
16
  async init(namespace = key_1.HMNS, appId, engineId, logger) {
15
17
  this.namespace = namespace;
16
18
  this.logger = logger;
17
19
  this.appId = appId;
18
20
  this.engineId = engineId;
21
+ this.setupNotificationHandler();
22
+ }
23
+ setupNotificationHandler() {
24
+ // Check if notification handler is already set up for this client
25
+ if (PostgresSubService.clientHandlers.get(this.eventClient)) {
26
+ return;
27
+ }
28
+ // Initialize subscription map for this client if it doesn't exist
29
+ if (!PostgresSubService.clientSubscriptions.has(this.eventClient)) {
30
+ PostgresSubService.clientSubscriptions.set(this.eventClient, new Map());
31
+ }
32
+ // Set up the notification handler for this client
33
+ this.eventClient.on('notification', (msg) => {
34
+ const clientSubscriptions = PostgresSubService.clientSubscriptions.get(this.eventClient);
35
+ const callbacks = clientSubscriptions?.get(msg.channel);
36
+ if (callbacks && callbacks.size > 0) {
37
+ try {
38
+ const payload = JSON.parse(msg.payload || '{}');
39
+ // Call all callbacks registered for this channel across all SubService instances
40
+ callbacks.forEach(callback => {
41
+ try {
42
+ callback(msg.channel, payload);
43
+ }
44
+ catch (err) {
45
+ this.logger?.error(`Error in subscription callback for ${msg.channel}:`, err);
46
+ }
47
+ });
48
+ }
49
+ catch (err) {
50
+ this.logger?.error(`Error parsing message for topic ${msg.channel}:`, err);
51
+ }
52
+ }
53
+ });
54
+ // Mark this client as having a notification handler
55
+ PostgresSubService.clientHandlers.set(this.eventClient, true);
19
56
  }
20
57
  transact() {
21
58
  throw new Error('Transactions are not supported in lightweight pub/sub');
@@ -46,30 +83,81 @@ class PostgresSubService extends index_1.SubService {
46
83
  appId,
47
84
  engineId: topic,
48
85
  });
49
- // Start listening to the safe topic
50
- await this.eventClient.query(`LISTEN "${safeKey}"`);
51
- this.logger.debug(`postgres-subscribe`, { originalKey, safeKey });
52
- // Set up the notification handler
53
- this.eventClient.on('notification', (msg) => {
54
- if (msg.channel === safeKey) {
55
- try {
56
- const payload = JSON.parse(msg.payload || '{}');
57
- callback(safeKey, payload);
58
- }
59
- catch (err) {
60
- this.logger.error(`Error parsing message for topic ${safeKey}:`, err);
61
- }
62
- }
63
- });
86
+ // Get or create subscription map for this client
87
+ let clientSubscriptions = PostgresSubService.clientSubscriptions.get(this.eventClient);
88
+ if (!clientSubscriptions) {
89
+ clientSubscriptions = new Map();
90
+ PostgresSubService.clientSubscriptions.set(this.eventClient, clientSubscriptions);
91
+ }
92
+ // Get or create callback array for this channel
93
+ let callbacks = clientSubscriptions.get(safeKey);
94
+ if (!callbacks) {
95
+ callbacks = new Map();
96
+ clientSubscriptions.set(safeKey, callbacks);
97
+ // Start listening to the safe topic (only once per channel across all instances)
98
+ await this.eventClient.query(`LISTEN "${safeKey}"`);
99
+ }
100
+ // Add this callback to the list
101
+ callbacks.set(this, callback);
102
+ // Track this subscription for cleanup
103
+ this.instanceSubscriptions.add(safeKey);
104
+ this.logger.debug(`postgres-subscribe`, { originalKey, safeKey, totalCallbacks: callbacks.size });
64
105
  }
65
106
  async unsubscribe(keyType, appId, topic) {
66
107
  const [originalKey, safeKey] = this.mintSafeKey(keyType, {
67
108
  appId,
68
109
  engineId: topic,
69
110
  });
70
- // Stop listening to the safe topic
71
- await this.eventClient.query(`UNLISTEN "${safeKey}"`);
72
- this.logger.debug(`postgres-subscribe`, { originalKey, safeKey });
111
+ const clientSubscriptions = PostgresSubService.clientSubscriptions.get(this.eventClient);
112
+ if (!clientSubscriptions) {
113
+ return;
114
+ }
115
+ const callbacks = clientSubscriptions.get(safeKey);
116
+ if (!callbacks || callbacks.size === 0) {
117
+ return;
118
+ }
119
+ // Remove callback from this specific instance
120
+ callbacks.delete(this);
121
+ // Remove from instance tracking
122
+ this.instanceSubscriptions.delete(safeKey);
123
+ // Stop listening to the safe topic if no more callbacks exist
124
+ if (callbacks.size === 0) {
125
+ clientSubscriptions.delete(safeKey);
126
+ await this.eventClient.query(`UNLISTEN "${safeKey}"`);
127
+ }
128
+ this.logger.debug(`postgres-unsubscribe`, { originalKey, safeKey, remainingCallbacks: callbacks.size });
129
+ }
130
+ /**
131
+ * Cleanup method to remove all subscriptions for this instance.
132
+ * Should be called when the SubService instance is being destroyed.
133
+ */
134
+ async cleanup() {
135
+ const clientSubscriptions = PostgresSubService.clientSubscriptions.get(this.eventClient);
136
+ if (!clientSubscriptions) {
137
+ return;
138
+ }
139
+ for (const safeKey of this.instanceSubscriptions) {
140
+ const callbacks = clientSubscriptions.get(safeKey);
141
+ if (callbacks) {
142
+ callbacks.delete(this);
143
+ // If no more callbacks exist for this channel, stop listening
144
+ if (callbacks.size === 0) {
145
+ clientSubscriptions.delete(safeKey);
146
+ try {
147
+ await this.eventClient.query(`UNLISTEN "${safeKey}"`);
148
+ }
149
+ catch (err) {
150
+ this.logger?.error(`Error unlistening from ${safeKey}:`, err);
151
+ }
152
+ }
153
+ }
154
+ }
155
+ this.instanceSubscriptions.clear();
156
+ // If no more subscriptions exist for this client, remove it from static maps
157
+ if (clientSubscriptions.size === 0) {
158
+ PostgresSubService.clientSubscriptions.delete(this.eventClient);
159
+ PostgresSubService.clientHandlers.delete(this.eventClient);
160
+ }
73
161
  }
74
162
  async publish(keyType, message, appId, topic) {
75
163
  const [originalKey, safeKey] = this.mintSafeKey(keyType, {
@@ -90,3 +178,6 @@ class PostgresSubService extends index_1.SubService {
90
178
  }
91
179
  }
92
180
  exports.PostgresSubService = PostgresSubService;
181
+ // Static maps to manage subscriptions across all instances sharing the same client
182
+ PostgresSubService.clientSubscriptions = new Map();
183
+ PostgresSubService.clientHandlers = new Map();
@@ -24,6 +24,10 @@ class WorkerService {
24
24
  const services = [];
25
25
  if (Array.isArray(config.workers)) {
26
26
  for (const worker of config.workers) {
27
+ // Pass taskQueue from top-level config to worker for connection pooling
28
+ if (config.taskQueue) {
29
+ worker.taskQueue = config.taskQueue;
30
+ }
27
31
  await factory_1.ConnectorService.initClients(worker);
28
32
  const service = new WorkerService();
29
33
  service.verifyWorkerFields(worker);
@@ -72,11 +72,6 @@ type HotMeshEngine = {
72
72
  * @private
73
73
  */
74
74
  search?: ProviderClient;
75
- /**
76
- * redis connection options; replaced with 'connection'
77
- * @deprecated
78
- */
79
- redis?: ProviderConfig;
80
75
  /**
81
76
  * short-form format for the connection options for the
82
77
  * store, stream, sub, and search clients
@@ -107,6 +102,12 @@ type HotMeshEngine = {
107
102
  * @default false
108
103
  */
109
104
  readonly?: boolean;
105
+ /**
106
+ * Task queue identifier used for connection pooling optimization.
107
+ * When provided, connections will be reused across providers (store, sub, stream)
108
+ * that share the same task queue and database configuration.
109
+ */
110
+ taskQueue?: string;
110
111
  };
111
112
  type HotMeshWorker = {
112
113
  /**
@@ -173,6 +174,12 @@ type HotMeshWorker = {
173
174
  * from the target stream
174
175
  */
175
176
  callback: (payload: StreamData) => Promise<StreamDataResponse>;
177
+ /**
178
+ * Task queue identifier used for connection pooling optimization.
179
+ * When provided, connections will be reused across providers (store, sub, stream)
180
+ * that share the same task queue and database configuration.
181
+ */
182
+ taskQueue?: string;
176
183
  };
177
184
  type HotMeshConfig = {
178
185
  appId: string;
@@ -181,6 +188,13 @@ type HotMeshConfig = {
181
188
  guid?: string;
182
189
  logger?: ILogger;
183
190
  logLevel?: LogLevel;
191
+ /**
192
+ * Task queue identifier used for connection pooling optimization.
193
+ * When multiple engines/workers share the same task queue and database configuration,
194
+ * they will reuse the same connection instead of creating separate ones.
195
+ * This is particularly useful for PostgreSQL providers to reduce connection overhead.
196
+ */
197
+ taskQueue?: string;
184
198
  engine?: HotMeshEngine;
185
199
  workers?: HotMeshWorker[];
186
200
  };
@@ -10,12 +10,10 @@ export { HookCondition, HookConditions, HookGate, HookInterface, HookRule, HookR
10
10
  export { HotMesh, HotMeshEngine, HotMeshWorker, HotMeshSettings, HotMeshApp, HotMeshApps, HotMeshConfig, HotMeshManifest, HotMeshGraph, KeyType, KeyStoreParams, } from './hotmesh';
11
11
  export { ILogger, LogLevel } from './logger';
12
12
  export { ExtensionType, JobCompletionOptions, JobData, JobsData, JobInterruptOptions, JobMetadata, JobOutput, JobState, JobStatus, PartialJobState, } from './job';
13
- export { DB, Profile, Namespaces, Entity, EntityInstanceTypes, SubClassInstance, AllSubclassInstances, SubclassType, Namespace, Instance, Instances, Profiles, } from './manifest';
14
13
  export { MappingStatements } from './map';
15
14
  export { Pipe, PipeContext, PipeItem, PipeItems, PipeObject, ReduceObject, } from './pipe';
16
15
  export { ProviderClass, ProviderClient, ProviderConfig, ProviderTransaction, Providers, TransactionResultList, ProviderNativeClient, ProviderOptions, } from './provider';
17
16
  export { MeshCallConnectParams, MeshCallExecParams, MeshCallCronParams, MeshCallExecOptions, MeshCallCronOptions, MeshCallInterruptOptions, MeshCallInterruptParams, MeshCallFlushParams, } from './meshcall';
18
- export { CallOptions, MeshDataWorkflowOptions, ConnectOptions, ConnectionInput, ExecInput, } from './meshdata';
19
17
  export { PostgresClassType, PostgresClientOptions, PostgresClientType, PostgresConsumerGroup, PostgresPendingMessage, PostgresPoolClientType, PostgresQueryConfigType, PostgresQueryResultType, PostgresStreamMessage, PostgresStreamOptions, PostgresTransaction, } from './postgres';
20
18
  export { ActivateMessage, CronMessage, JobMessage, JobMessageCallback, PingMessage, PongMessage, QuorumMessage, QuorumMessageCallback, QuorumProfile, RollCallMessage, RollCallOptions, SubscriptionCallback, SubscriptionOptions, SystemHealth, ThrottleMessage, ThrottleOptions, WorkMessage, } from './quorum';
21
19
  export { NatsAckPolicy, NatsAckPolicyExplicitType, NatsClassType, NatsClientType, NatsClientOptions, NatsConsumerConfigType, NatsJetStreamManager, NatsConnection, NatsJetStreamType, NatsConnectionOptions, NatsConsumerConfig, NatsConsumerInfo, NatsConsumerManager, NatsDeliveryInfo, NatsJetStreamOptions, NatsError, NatsErrorType, NatsJetStreamClient, NatsJsMsg, NatsMessageType, NatsMsgExpect, NatsPubAck, NatsPubAckType, NatsPublishOptions, NatsRetentionPolicy, NatsRetentionPolicyWorkqueueType, NatsSequenceInfo, NatsStorageMemoryType, NatsStorageType, NatsStreamConfig, NatsStreamInfo, NatsStreamManager, NatsStreamConfigType, NatsStreamInfoType, NatsStreamOptions, NatsStreamState, NatsTransaction, } from './nats';
package/env.example ADDED
@@ -0,0 +1,11 @@
1
+ # Provide an openai api key to run the agent and pipeline tests
2
+ # OPENAI_API_KEY=sk-proj-qqVSdKZ...Mvl3aw7gFuAEMA
3
+
4
+ # Connnect as follows to run the tests against a remote postgres instance (supabase)
5
+ # replace XXXXX and YYYYY with your supabase credentials
6
+ # POSTGRES_IS_REMOTE=true
7
+ # POSTGRES_HOST=aws-0-us-west-1.pooler.supabase.com
8
+ # POSTGRES_PORT=5432
9
+ # POSTGRES_DB=postgres
10
+ # POSTGRES_USER=postgres.XXXXX
11
+ # POSTGRES_PASSWORD=YYYYY
package/index.ts CHANGED
@@ -10,8 +10,6 @@ import { WorkerService as Worker } from './services/memflow/worker';
10
10
  import { WorkflowService as workflow } from './services/memflow/workflow';
11
11
  import { WorkflowHandleService as WorkflowHandle } from './services/memflow/handle';
12
12
  import { proxyActivities } from './services/memflow/workflow/proxyActivities';
13
- import { MeshData } from './services/meshdata';
14
- import { MeshOS } from './services/meshos';
15
13
  import * as Errors from './modules/errors';
16
14
  import * as Utils from './modules/utils';
17
15
  import * as Enums from './modules/enums';
@@ -34,9 +32,7 @@ export {
34
32
  HotMesh,
35
33
  HotMeshConfig,
36
34
  MeshCall,
37
- MeshData,
38
35
  MemFlow,
39
- MeshOS,
40
36
 
41
37
  //MemFlow Submodules
42
38
  Client,
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@hotmeshio/hotmesh",
3
- "version": "0.5.3",
3
+ "version": "0.5.4",
4
4
  "description": "Permanent-Memory Workflows & AI Agents",
5
5
  "main": "./build/index.js",
6
6
  "types": "./build/index.d.ts",
@@ -20,7 +20,7 @@
20
20
  "lint:fix": "eslint . --fix --ext .ts",
21
21
  "start": "ts-node src/index.ts",
22
22
  "test": "NODE_ENV=test jest --detectOpenHandles --forceExit --verbose",
23
- "test:await": "NODE_ENV=test jest ./tests/functional/awaiter/*.test.ts --detectOpenHandles --forceExit --verbose",
23
+ "test:await": "NODE_ENV=test jest ./tests/functional/awaiter/postgres.test.ts --detectOpenHandles --forceExit --verbose",
24
24
  "test:compile": "NODE_ENV=test jest ./tests/functional/compile/index.test.ts --detectOpenHandles --forceExit --verbose",
25
25
  "test:connect": "NODE_ENV=test jest ./tests/unit/services/connector/* --detectOpenHandles --forceExit --verbose",
26
26
  "test:connect:ioredis": "NODE_ENV=test jest ./tests/unit/services/connector/providers/ioredis.test.ts --detectOpenHandles --forceExit --verbose",
@@ -59,7 +59,7 @@
59
59
  "test:reporter": "NODE_ENV=test jest ./tests/unit/services/reporter/index.test.ts --detectOpenHandles --forceExit --verbose",
60
60
  "test:reentrant": "NODE_ENV=test jest ./tests/functional/reentrant/*.test.ts --detectOpenHandles --forceExit --verbose",
61
61
  "test:retry": "NODE_ENV=test jest ./tests/functional/retry/*.test.ts --detectOpenHandles --forceExit --verbose",
62
- "test:sequence": "NODE_ENV=test HMSH_LOGLEVEL=info jest ./tests/functional/sequence/*.test.ts --detectOpenHandles --forceExit --verbose",
62
+ "test:sequence": "NODE_ENV=test HMSH_LOGLEVEL=debug jest ./tests/functional/sequence/postgres.test.ts --detectOpenHandles --forceExit --verbose",
63
63
  "test:signal": "NODE_ENV=test jest ./tests/functional/signal/*.test.ts --detectOpenHandles --forceExit --verbose",
64
64
  "test:status": "NODE_ENV=test jest ./tests/functional/status/index.test.ts --detectOpenHandles --forceExit --verbose",
65
65
  "test:providers": "NODE_ENV=test jest ./tests/functional/*/providers/*/*.test.ts --detectOpenHandles --forceExit --verbose",
@@ -75,7 +75,6 @@
75
75
  "test:sub:postgres": "NODE_ENV=test jest ./tests/functional/sub/providers/postgres/postgres.test.ts --detectOpenHandles --forceExit --verbose",
76
76
  "test:sub:nats": "NODE_ENV=test jest ./tests/functional/sub/providers/nats/nats.test.ts --detectOpenHandles --forceExit --verbose",
77
77
  "test:trigger": "NODE_ENV=test jest ./tests/unit/services/activities/trigger.test.ts --detectOpenHandles --forceExit --verbose",
78
- "test:meshos": "HMSH_LOGLEVEL=info NODE_ENV=test HMSH_IS_CLUSTER=true jest ./tests/meshos/*.test.ts --forceExit --verbose --detectOpenHandles",
79
78
  "test:meshcall": "NODE_ENV=test jest ./tests/meshcall/*.test.ts --forceExit --verbose --detectOpenHandles",
80
79
  "test:unit": "NODE_ENV=test jest ./tests/unit/*/*/index.test.ts --detectOpenHandles --forceExit --verbose"
81
80
  },