@hotmeshio/hotmesh 0.6.0 → 0.7.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. package/README.md +179 -142
  2. package/build/index.d.ts +3 -1
  3. package/build/index.js +5 -1
  4. package/build/modules/enums.d.ts +18 -0
  5. package/build/modules/enums.js +27 -1
  6. package/build/modules/utils.d.ts +27 -0
  7. package/build/modules/utils.js +79 -1
  8. package/build/package.json +24 -10
  9. package/build/services/connector/factory.d.ts +1 -1
  10. package/build/services/connector/factory.js +15 -1
  11. package/build/services/connector/providers/ioredis.d.ts +9 -0
  12. package/build/services/connector/providers/ioredis.js +26 -0
  13. package/build/services/connector/providers/postgres.js +3 -0
  14. package/build/services/connector/providers/redis.d.ts +9 -0
  15. package/build/services/connector/providers/redis.js +38 -0
  16. package/build/services/hotmesh/index.d.ts +66 -15
  17. package/build/services/hotmesh/index.js +84 -15
  18. package/build/services/memflow/index.d.ts +100 -14
  19. package/build/services/memflow/index.js +100 -14
  20. package/build/services/memflow/worker.d.ts +97 -0
  21. package/build/services/memflow/worker.js +217 -0
  22. package/build/services/memflow/workflow/proxyActivities.d.ts +74 -3
  23. package/build/services/memflow/workflow/proxyActivities.js +81 -4
  24. package/build/services/router/consumption/index.d.ts +2 -1
  25. package/build/services/router/consumption/index.js +38 -2
  26. package/build/services/router/error-handling/index.d.ts +3 -3
  27. package/build/services/router/error-handling/index.js +48 -13
  28. package/build/services/router/index.d.ts +1 -0
  29. package/build/services/router/index.js +2 -1
  30. package/build/services/search/factory.js +8 -0
  31. package/build/services/search/providers/redis/ioredis.d.ts +23 -0
  32. package/build/services/search/providers/redis/ioredis.js +189 -0
  33. package/build/services/search/providers/redis/redis.d.ts +23 -0
  34. package/build/services/search/providers/redis/redis.js +202 -0
  35. package/build/services/store/factory.js +9 -1
  36. package/build/services/store/index.d.ts +3 -2
  37. package/build/services/store/providers/postgres/kvtypes/hash/basic.js +36 -6
  38. package/build/services/store/providers/postgres/kvtypes/hash/expire.js +12 -2
  39. package/build/services/store/providers/postgres/kvtypes/hash/scan.js +30 -10
  40. package/build/services/store/providers/postgres/kvtypes/list.js +68 -10
  41. package/build/services/store/providers/postgres/kvtypes/string.js +60 -10
  42. package/build/services/store/providers/postgres/kvtypes/zset.js +92 -22
  43. package/build/services/store/providers/postgres/postgres.d.ts +3 -3
  44. package/build/services/store/providers/redis/_base.d.ts +137 -0
  45. package/build/services/store/providers/redis/_base.js +980 -0
  46. package/build/services/store/providers/redis/ioredis.d.ts +20 -0
  47. package/build/services/store/providers/redis/ioredis.js +190 -0
  48. package/build/services/store/providers/redis/redis.d.ts +18 -0
  49. package/build/services/store/providers/redis/redis.js +199 -0
  50. package/build/services/stream/factory.js +17 -1
  51. package/build/services/stream/providers/postgres/kvtables.js +76 -23
  52. package/build/services/stream/providers/postgres/lifecycle.d.ts +19 -0
  53. package/build/services/stream/providers/postgres/lifecycle.js +54 -0
  54. package/build/services/stream/providers/postgres/messages.d.ts +56 -0
  55. package/build/services/stream/providers/postgres/messages.js +253 -0
  56. package/build/services/stream/providers/postgres/notifications.d.ts +59 -0
  57. package/build/services/stream/providers/postgres/notifications.js +357 -0
  58. package/build/services/stream/providers/postgres/postgres.d.ts +110 -11
  59. package/build/services/stream/providers/postgres/postgres.js +196 -488
  60. package/build/services/stream/providers/postgres/scout.d.ts +68 -0
  61. package/build/services/stream/providers/postgres/scout.js +233 -0
  62. package/build/services/stream/providers/postgres/stats.d.ts +49 -0
  63. package/build/services/stream/providers/postgres/stats.js +113 -0
  64. package/build/services/stream/providers/redis/ioredis.d.ts +61 -0
  65. package/build/services/stream/providers/redis/ioredis.js +272 -0
  66. package/build/services/stream/providers/redis/redis.d.ts +61 -0
  67. package/build/services/stream/providers/redis/redis.js +305 -0
  68. package/build/services/sub/factory.js +8 -0
  69. package/build/services/sub/providers/postgres/postgres.js +37 -5
  70. package/build/services/sub/providers/redis/ioredis.d.ts +20 -0
  71. package/build/services/sub/providers/redis/ioredis.js +161 -0
  72. package/build/services/sub/providers/redis/redis.d.ts +18 -0
  73. package/build/services/sub/providers/redis/redis.js +148 -0
  74. package/build/services/worker/index.d.ts +1 -0
  75. package/build/services/worker/index.js +2 -0
  76. package/build/types/hotmesh.d.ts +42 -2
  77. package/build/types/index.d.ts +4 -3
  78. package/build/types/index.js +4 -1
  79. package/build/types/memflow.d.ts +32 -0
  80. package/build/types/provider.d.ts +17 -1
  81. package/build/types/redis.d.ts +258 -0
  82. package/build/types/redis.js +11 -0
  83. package/build/types/stream.d.ts +92 -1
  84. package/index.ts +4 -0
  85. package/package.json +24 -10
@@ -0,0 +1,20 @@
1
+ import { IORedisClientType as RedisClientType, IORedisMultiType as RedisMultiType } from '../../../../types/redis';
2
+ import { StoreInitializable } from '../store-initializable';
3
+ import { RedisStoreBase } from './_base';
4
+ declare class IORedisStoreService extends RedisStoreBase<RedisClientType, RedisMultiType> implements StoreInitializable {
5
+ constructor(storeClient: RedisClientType);
6
+ /**
7
+ * When in cluster mode, the transact wrapper only
8
+ * sends commands to the same node/shard if they share a key.
9
+ * All other commands are sent simultaneouslyusing Promise.all
10
+ * and are then collated. this is effectiely a wrapper for
11
+ * `multi` but is closer to `pipeline` in terms of usage when
12
+ * promises are used.
13
+ */
14
+ transact(): RedisMultiType;
15
+ exec(...args: any[]): Promise<string | string[] | string[][]>;
16
+ setnxex(key: string, value: string, expireSeconds: number): Promise<boolean>;
17
+ hGetAllResult(result: any): any;
18
+ addTaskQueues(keys: string[]): Promise<void>;
19
+ }
20
+ export { IORedisStoreService };
@@ -0,0 +1,190 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.IORedisStoreService = void 0;
4
+ const key_1 = require("../../../../modules/key");
5
+ const enums_1 = require("../../../../modules/enums");
6
+ const _base_1 = require("./_base");
7
+ class IORedisStoreService extends _base_1.RedisStoreBase {
8
+ constructor(storeClient) {
9
+ super(storeClient);
10
+ this.commands = {
11
+ get: 'get',
12
+ set: 'set',
13
+ setnx: 'setnx',
14
+ del: 'del',
15
+ expire: 'expire',
16
+ hset: 'hset',
17
+ hscan: 'hscan',
18
+ hsetnx: 'hsetnx',
19
+ hincrby: 'hincrby',
20
+ hdel: 'hdel',
21
+ hget: 'hget',
22
+ hmget: 'hmget',
23
+ hgetall: 'hgetall',
24
+ hincrbyfloat: 'hincrbyfloat',
25
+ zrank: 'zrank',
26
+ zrange: 'zrange',
27
+ zrangebyscore_withscores: 'zrangebyscore',
28
+ zrangebyscore: 'zrangebyscore',
29
+ zrem: 'zrem',
30
+ zadd: 'zadd',
31
+ lmove: 'lmove',
32
+ lpop: 'lpop',
33
+ lrange: 'lrange',
34
+ rename: 'rename',
35
+ rpush: 'rpush',
36
+ scan: 'scan',
37
+ xack: 'xack',
38
+ xdel: 'xdel',
39
+ };
40
+ }
41
+ /**
42
+ * When in cluster mode, the transact wrapper only
43
+ * sends commands to the same node/shard if they share a key.
44
+ * All other commands are sent simultaneouslyusing Promise.all
45
+ * and are then collated. this is effectiely a wrapper for
46
+ * `multi` but is closer to `pipeline` in terms of usage when
47
+ * promises are used.
48
+ */
49
+ transact() {
50
+ const my = this;
51
+ if (enums_1.HMSH_IS_CLUSTER) {
52
+ const commands = [];
53
+ const addCommand = (command, args) => {
54
+ commands.push({ command, args });
55
+ return multiInstance;
56
+ };
57
+ const multiInstance = {
58
+ sendCommand(command) {
59
+ return my.storeClient.sendCommand(command);
60
+ },
61
+ async exec() {
62
+ if (commands.length === 0)
63
+ return [];
64
+ const sameKey = commands.every((cmd) => {
65
+ return cmd.args[0] === commands[0].args[0];
66
+ });
67
+ if (sameKey) {
68
+ const multi = my.storeClient.multi();
69
+ commands.forEach((cmd) => multi[cmd.command](...cmd.args));
70
+ const results = await multi.exec();
71
+ return results.map((item) => item);
72
+ }
73
+ else {
74
+ return Promise.all(commands.map((cmd) => my.storeClient[cmd.command](...cmd.args)));
75
+ }
76
+ },
77
+ xadd(key, id, fields, message) {
78
+ return addCommand('xadd', [key, id, fields, message]);
79
+ },
80
+ xack(key, group, id) {
81
+ return addCommand('xack', [key, group, id]);
82
+ },
83
+ xdel(key, id) {
84
+ return addCommand('xdel', [key, id]);
85
+ },
86
+ xlen(key) {
87
+ return addCommand('xlen', [key]);
88
+ },
89
+ xpending(key, group, start, end, count, consumer) {
90
+ return addCommand('xpending', [
91
+ key,
92
+ group,
93
+ start,
94
+ end,
95
+ count,
96
+ consumer,
97
+ ]);
98
+ },
99
+ xclaim(key, group, consumer, minIdleTime, id, ...args) {
100
+ return addCommand('xclaim', [
101
+ key,
102
+ group,
103
+ consumer,
104
+ minIdleTime,
105
+ id,
106
+ ...args,
107
+ ]);
108
+ },
109
+ del(key) {
110
+ return addCommand('del', [key]);
111
+ },
112
+ expire: function (key, seconds) {
113
+ return addCommand('expire', [key, seconds]);
114
+ },
115
+ hdel(key, itemId) {
116
+ return addCommand('hdel', [key, itemId]);
117
+ },
118
+ hget(key, itemId) {
119
+ return addCommand('hget', [key, itemId]);
120
+ },
121
+ hgetall(key) {
122
+ return addCommand('hgetall', [key]);
123
+ },
124
+ hincrbyfloat(key, itemId, value) {
125
+ return addCommand('hincrbyfloat', [key, itemId, value]);
126
+ },
127
+ hmget(key, itemIds) {
128
+ return addCommand('hmget', [key, itemIds]);
129
+ },
130
+ hset(key, values) {
131
+ return addCommand('hset', [key, values]);
132
+ },
133
+ lrange(key, start, end) {
134
+ return addCommand('lrange', [key, start, end]);
135
+ },
136
+ rpush(key, value) {
137
+ return addCommand('rpush', [key, value]);
138
+ },
139
+ zadd(...args) {
140
+ return addCommand('zadd', args);
141
+ },
142
+ xgroup(command, key, groupName, id, mkStream) {
143
+ return addCommand('xgroup', [command, key, groupName, id, mkStream]);
144
+ },
145
+ };
146
+ return multiInstance;
147
+ }
148
+ return this.storeClient.multi();
149
+ }
150
+ async exec(...args) {
151
+ try {
152
+ const response = await this.storeClient.call.apply(this.storeClient, args);
153
+ if (typeof response === 'string') {
154
+ return response;
155
+ }
156
+ else if (Array.isArray(response)) {
157
+ if (Array.isArray(response[0])) {
158
+ return response;
159
+ }
160
+ return response;
161
+ }
162
+ return response;
163
+ }
164
+ catch (error) {
165
+ // Connection closed during test cleanup - log and return empty response
166
+ if (error?.message?.includes('Connection is closed')) {
167
+ return [];
168
+ }
169
+ // Re-throw unexpected errors
170
+ throw error;
171
+ }
172
+ }
173
+ async setnxex(key, value, expireSeconds) {
174
+ const status = await this.storeClient[this.commands.set](key, value, 'NX', 'EX', expireSeconds.toString());
175
+ return this.isSuccessful(status);
176
+ }
177
+ hGetAllResult(result) {
178
+ //ioredis response signature is [null, {}] or [null, null]
179
+ return result[1];
180
+ }
181
+ async addTaskQueues(keys) {
182
+ const multi = this.storeClient.multi();
183
+ const zsetKey = this.mintKey(key_1.KeyType.WORK_ITEMS, { appId: this.appId });
184
+ for (const key of keys) {
185
+ multi.zadd(zsetKey, 'NX', Date.now(), key);
186
+ }
187
+ await multi.exec();
188
+ }
189
+ }
190
+ exports.IORedisStoreService = IORedisStoreService;
@@ -0,0 +1,18 @@
1
+ import { StoreInitializable } from '../store-initializable';
2
+ import { RedisRedisClientType as RedisClientType, RedisRedisMultiType as RedisMultiType } from '../../../../types/redis';
3
+ import { RedisStoreBase } from './_base';
4
+ declare class RedisStoreService extends RedisStoreBase<RedisClientType, RedisMultiType> implements StoreInitializable {
5
+ constructor(storeClient: RedisClientType);
6
+ /**
7
+ * When in cluster mode, the transact wrapper only
8
+ * sends commands to the same node/shard if they share a key.
9
+ * All other commands are sent simultaneouslyusing Promise.all
10
+ * and are then collated
11
+ */
12
+ transact(): RedisMultiType;
13
+ exec(...args: any[]): Promise<string | string[] | string[][]>;
14
+ setnxex(key: string, value: string, expireSeconds: number): Promise<boolean>;
15
+ zAdd(key: string, score: number | string, value: string | number, redisMulti?: RedisMultiType): Promise<any>;
16
+ zRangeByScore(key: string, score: number | string, value: string | number): Promise<string | null>;
17
+ }
18
+ export { RedisStoreService };
@@ -0,0 +1,199 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.RedisStoreService = void 0;
4
+ const enums_1 = require("../../../../modules/enums");
5
+ const _base_1 = require("./_base");
6
+ class RedisStoreService extends _base_1.RedisStoreBase {
7
+ constructor(storeClient) {
8
+ super(storeClient);
9
+ this.commands = {
10
+ get: 'GET',
11
+ set: 'SET',
12
+ setnx: 'SETNX',
13
+ del: 'DEL',
14
+ expire: 'EXPIRE',
15
+ hscan: 'HSCAN',
16
+ hset: 'HSET',
17
+ hsetnx: 'HSETNX',
18
+ hincrby: 'HINCRBY',
19
+ hdel: 'HDEL',
20
+ hget: 'HGET',
21
+ hmget: 'HMGET',
22
+ hgetall: 'HGETALL',
23
+ hincrbyfloat: 'HINCRBYFLOAT',
24
+ zrange: 'ZRANGE',
25
+ zrangebyscore_withscores: 'ZRANGEBYSCORE_WITHSCORES',
26
+ zrangebyscore: 'ZRANGEBYSCORE',
27
+ zrem: 'ZREM',
28
+ zadd: 'ZADD',
29
+ lmove: 'LMOVE',
30
+ lrange: 'LRANGE',
31
+ lpop: 'LPOP',
32
+ rename: 'RENAME',
33
+ rpush: 'RPUSH',
34
+ scan: 'SCAN',
35
+ xack: 'XACK',
36
+ xdel: 'XDEL',
37
+ xlen: 'XLEN',
38
+ };
39
+ }
40
+ /**
41
+ * When in cluster mode, the transact wrapper only
42
+ * sends commands to the same node/shard if they share a key.
43
+ * All other commands are sent simultaneouslyusing Promise.all
44
+ * and are then collated
45
+ */
46
+ transact() {
47
+ const my = this;
48
+ if (enums_1.HMSH_IS_CLUSTER) {
49
+ const commands = [];
50
+ const addCommand = (command, args) => {
51
+ commands.push({ command: command.toUpperCase(), args });
52
+ return multiInstance;
53
+ };
54
+ const multiInstance = {
55
+ sendCommand(command, ...args) {
56
+ return my.storeClient.sendCommand([command, ...args]);
57
+ },
58
+ async exec() {
59
+ if (commands.length === 0)
60
+ return [];
61
+ const sameKey = commands.every((cmd) => {
62
+ return cmd.args[0] === commands[0].args[0];
63
+ });
64
+ if (sameKey) {
65
+ const multi = my.storeClient.multi();
66
+ commands.forEach((cmd) => {
67
+ if (cmd.command === 'ZADD') {
68
+ return multi.ZADD(cmd.args[0], cmd.args[1], cmd.args[2]);
69
+ }
70
+ return multi[cmd.command](...cmd.args);
71
+ });
72
+ const results = await multi.exec();
73
+ return results.map((item) => item);
74
+ }
75
+ else {
76
+ return Promise.all(commands.map((cmd) => {
77
+ if (cmd.command === 'ZADD') {
78
+ return my.storeClient.ZADD(cmd.args[0], cmd.args[1], cmd.args[2]);
79
+ }
80
+ return my.storeClient[cmd.command](...cmd.args);
81
+ }));
82
+ }
83
+ },
84
+ XADD(key, id, fields, message) {
85
+ return addCommand('XADD', [key, id, fields, message]);
86
+ },
87
+ XACK(key, group, id) {
88
+ return addCommand('XACK', [key, group, id]);
89
+ },
90
+ XDEL(key, id) {
91
+ return addCommand('XDEL', [key, id]);
92
+ },
93
+ XLEN(key) {
94
+ return addCommand('XLEN', [key]);
95
+ },
96
+ XCLAIM(key, group, consumer, minIdleTime, id, ...args) {
97
+ return addCommand('XCLAIM', [
98
+ key,
99
+ group,
100
+ consumer,
101
+ minIdleTime,
102
+ id,
103
+ ...args,
104
+ ]);
105
+ },
106
+ XPENDING(key, group, start, end, count, consumer) {
107
+ return addCommand('XPENDING', [
108
+ key,
109
+ group,
110
+ start,
111
+ end,
112
+ count,
113
+ consumer,
114
+ ]);
115
+ },
116
+ DEL: function (key) {
117
+ return addCommand('DEL', [key]);
118
+ },
119
+ EXPIRE: function (key, seconds) {
120
+ return addCommand('EXPIRE', [key, seconds]);
121
+ },
122
+ HDEL(key, itemId) {
123
+ return addCommand('HDEL', [key, itemId]);
124
+ },
125
+ HGET(key, itemId) {
126
+ return addCommand('HGET', [key, itemId]);
127
+ },
128
+ HGETALL(key) {
129
+ return addCommand('HGETALL', [key]);
130
+ },
131
+ HINCRBYFLOAT(key, itemId, value) {
132
+ return addCommand('HINCRBYFLOAT', [key, itemId, value]);
133
+ },
134
+ HMGET(key, itemIds) {
135
+ return addCommand('HMGET', [key, itemIds]);
136
+ },
137
+ HSET(key, values) {
138
+ return addCommand('HSET', [key, values]);
139
+ },
140
+ LRANGE(key, start, end) {
141
+ return addCommand('LRANGE', [key, start, end]);
142
+ },
143
+ RPUSH(key, items) {
144
+ return addCommand('RPUSH', [key, items]);
145
+ },
146
+ ZADD(key, args, opts) {
147
+ return addCommand('ZADD', [key, args, opts]);
148
+ },
149
+ XGROUP(command, key, groupName, id, mkStream) {
150
+ return addCommand('XGROUP', [command, key, groupName, id, mkStream]);
151
+ },
152
+ EXISTS: function (key) {
153
+ throw new Error('Function not implemented.');
154
+ },
155
+ HMPUSH: function (key, values) {
156
+ throw new Error('Function not implemented.');
157
+ },
158
+ LPUSH: function (key, items) {
159
+ throw new Error('Function not implemented.');
160
+ },
161
+ SET: function (key, value) {
162
+ throw new Error('Function not implemented.');
163
+ },
164
+ ZRANGE_WITHSCORES: function (key, start, end) {
165
+ throw new Error('Function not implemented.');
166
+ },
167
+ ZRANK: function (key, member) {
168
+ throw new Error('Function not implemented.');
169
+ },
170
+ ZSCORE: function (key, value) {
171
+ throw new Error('Function not implemented.');
172
+ },
173
+ };
174
+ return multiInstance;
175
+ }
176
+ return this.storeClient.multi();
177
+ }
178
+ async exec(...args) {
179
+ return await this.storeClient.sendCommand(args);
180
+ }
181
+ async setnxex(key, value, expireSeconds) {
182
+ const status = await this.storeClient[this.commands.set](key, value, { NX: true, EX: expireSeconds });
183
+ return this.isSuccessful(status);
184
+ }
185
+ async zAdd(key, score, value, redisMulti) {
186
+ return await (redisMulti || this.storeClient)[this.commands.zadd](key, {
187
+ score: score,
188
+ value: value.toString(),
189
+ });
190
+ }
191
+ async zRangeByScore(key, score, value) {
192
+ const result = await this.storeClient[this.commands.zrangebyscore](key, score, value);
193
+ if (result?.length > 0) {
194
+ return result[0];
195
+ }
196
+ return null;
197
+ }
198
+ }
199
+ exports.RedisStoreService = RedisStoreService;
@@ -2,6 +2,8 @@
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.StreamServiceFactory = void 0;
4
4
  const utils_1 = require("../../modules/utils");
5
+ const ioredis_1 = require("./providers/redis/ioredis");
6
+ const redis_1 = require("./providers/redis/redis");
5
7
  const nats_1 = require("./providers/nats/nats");
6
8
  const postgres_1 = require("./providers/postgres/postgres");
7
9
  class StreamServiceFactory {
@@ -9,10 +11,24 @@ class StreamServiceFactory {
9
11
  let service;
10
12
  const providerType = (0, utils_1.identifyProvider)(provider);
11
13
  if (providerType === 'nats') {
12
- service = new nats_1.NatsStreamService(provider, storeProvider);
14
+ let redisStoreProvider;
15
+ if ((0, utils_1.identifyProvider)(storeProvider) === 'redis') {
16
+ redisStoreProvider = storeProvider;
17
+ }
18
+ else {
19
+ //ioredis
20
+ redisStoreProvider = storeProvider;
21
+ }
22
+ service = new nats_1.NatsStreamService(provider, redisStoreProvider);
13
23
  }
14
24
  else if (providerType === 'postgres') {
15
25
  service = new postgres_1.PostgresStreamService(provider, storeProvider);
26
+ }
27
+ else if (providerType === 'redis') {
28
+ service = new redis_1.RedisStreamService(provider, storeProvider);
29
+ }
30
+ else if (providerType === 'ioredis') {
31
+ service = new ioredis_1.IORedisStreamService(provider, storeProvider);
16
32
  } //etc register other providers here
17
33
  await service.init(namespace, appId, logger);
18
34
  return service;
@@ -73,8 +73,13 @@ function hashStringToInt(str) {
73
73
  return Math.abs(hash);
74
74
  }
75
75
  async function checkIfTablesExist(client, schemaName, tableName) {
76
- const result = await client.query(`SELECT to_regclass('${tableName}') AS t`);
77
- return result.rows[0].t !== null;
76
+ // Check both streams table exists AND roles table (from store provider)
77
+ // The roles table is created by the store provider and is used for scout role coordination
78
+ const result = await client.query(`SELECT
79
+ to_regclass($1) AS streams_table,
80
+ to_regclass($2) AS roles_table`, [tableName, `${schemaName}.roles`]);
81
+ return result.rows[0].streams_table !== null &&
82
+ result.rows[0].roles_table !== null;
78
83
  }
79
84
  async function waitForTablesCreation(streamClient, lockId, schemaName, tableName, logger) {
80
85
  let retries = 0;
@@ -124,6 +129,11 @@ async function createTables(client, schemaName, tableName) {
124
129
  reserved_at TIMESTAMPTZ,
125
130
  reserved_by TEXT,
126
131
  expired_at TIMESTAMPTZ,
132
+ max_retry_attempts INT DEFAULT 3,
133
+ backoff_coefficient NUMERIC DEFAULT 10,
134
+ maximum_interval_seconds INT DEFAULT 120,
135
+ visible_at TIMESTAMPTZ DEFAULT NOW(),
136
+ retry_attempt INT DEFAULT 0,
127
137
  PRIMARY KEY (stream_name, id)
128
138
  ) PARTITION BY HASH (stream_name);
129
139
  `);
@@ -135,16 +145,16 @@ async function createTables(client, schemaName, tableName) {
135
145
  FOR VALUES WITH (modulus 8, remainder ${i});
136
146
  `);
137
147
  }
138
- // Index for active messages
148
+ // Index for active messages (includes visible_at for visibility timeout support)
139
149
  await client.query(`
140
150
  CREATE INDEX IF NOT EXISTS idx_streams_active_messages
141
- ON ${tableName} (group_name, stream_name, reserved_at, id)
151
+ ON ${tableName} (group_name, stream_name, reserved_at, visible_at, id)
142
152
  WHERE reserved_at IS NULL AND expired_at IS NULL;
143
153
  `);
144
- // Optimized index for the simplified fetchMessages query
154
+ // Optimized index for the simplified fetchMessages query (includes visible_at)
145
155
  await client.query(`
146
156
  CREATE INDEX IF NOT EXISTS idx_streams_message_fetch
147
- ON ${tableName} (stream_name, group_name, id)
157
+ ON ${tableName} (stream_name, group_name, visible_at, id)
148
158
  WHERE expired_at IS NULL;
149
159
  `);
150
160
  // Index for expired messages
@@ -166,7 +176,7 @@ async function createTables(client, schemaName, tableName) {
166
176
  // `);
167
177
  }
168
178
  async function createNotificationTriggers(client, schemaName, tableName) {
169
- // Create the notification function
179
+ // Create the notification function for INSERT events
170
180
  await client.query(`
171
181
  CREATE OR REPLACE FUNCTION ${schemaName}.notify_new_stream_message()
172
182
  RETURNS TRIGGER AS $$
@@ -174,24 +184,26 @@ async function createNotificationTriggers(client, schemaName, tableName) {
174
184
  channel_name TEXT;
175
185
  payload JSON;
176
186
  BEGIN
177
- -- Create channel name: stream_{stream_name}_{group_name}
178
- -- Truncate if too long (PostgreSQL channel names limited to 63 chars)
179
- channel_name := 'stream_' || NEW.stream_name || '_' || NEW.group_name;
180
- IF length(channel_name) > 63 THEN
181
- channel_name := left(channel_name, 63);
187
+ -- Only notify if message is immediately visible
188
+ -- Messages with visibility timeout will be notified when they become visible
189
+ IF NEW.visible_at <= NOW() THEN
190
+ -- Create channel name: stream_{stream_name}_{group_name}
191
+ -- Truncate if too long (PostgreSQL channel names limited to 63 chars)
192
+ channel_name := 'stream_' || NEW.stream_name || '_' || NEW.group_name;
193
+ IF length(channel_name) > 63 THEN
194
+ channel_name := left(channel_name, 63);
195
+ END IF;
196
+
197
+ -- Create minimal payload with only required fields
198
+ payload := json_build_object(
199
+ 'stream_name', NEW.stream_name,
200
+ 'group_name', NEW.group_name
201
+ );
202
+
203
+ -- Send notification
204
+ PERFORM pg_notify(channel_name, payload::text);
182
205
  END IF;
183
206
 
184
- -- Create payload with message details
185
- payload := json_build_object(
186
- 'id', NEW.id,
187
- 'stream_name', NEW.stream_name,
188
- 'group_name', NEW.group_name,
189
- 'created_at', extract(epoch from NEW.created_at)
190
- );
191
-
192
- -- Send notification
193
- PERFORM pg_notify(channel_name, payload::text);
194
-
195
207
  RETURN NEW;
196
208
  END;
197
209
  $$ LANGUAGE plpgsql;
@@ -204,6 +216,47 @@ async function createNotificationTriggers(client, schemaName, tableName) {
204
216
  FOR EACH ROW
205
217
  EXECUTE FUNCTION ${schemaName}.notify_new_stream_message();
206
218
  `);
219
+ // Create helper function to notify about messages with expired visibility timeouts
220
+ // This is called periodically by the router scout for responsive retry processing
221
+ await client.query(`
222
+ CREATE OR REPLACE FUNCTION ${schemaName}.notify_visible_messages()
223
+ RETURNS INTEGER AS $$
224
+ DECLARE
225
+ msg RECORD;
226
+ channel_name TEXT;
227
+ payload JSON;
228
+ notification_count INTEGER := 0;
229
+ BEGIN
230
+ -- Find all distinct streams with messages that are now visible
231
+ -- Router will drain all messages when notified, so we just notify each channel once
232
+ FOR msg IN
233
+ SELECT DISTINCT stream_name, group_name
234
+ FROM ${tableName}
235
+ WHERE visible_at <= NOW()
236
+ AND reserved_at IS NULL
237
+ AND expired_at IS NULL
238
+ LIMIT 100 -- Prevent overwhelming the system
239
+ LOOP
240
+ -- Create channel name (same logic as INSERT trigger)
241
+ channel_name := 'stream_' || msg.stream_name || '_' || msg.group_name;
242
+ IF length(channel_name) > 63 THEN
243
+ channel_name := left(channel_name, 63);
244
+ END IF;
245
+
246
+ -- Send minimal notification with only required fields
247
+ payload := json_build_object(
248
+ 'stream_name', msg.stream_name,
249
+ 'group_name', msg.group_name
250
+ );
251
+
252
+ PERFORM pg_notify(channel_name, payload::text);
253
+ notification_count := notification_count + 1;
254
+ END LOOP;
255
+
256
+ RETURN notification_count;
257
+ END;
258
+ $$ LANGUAGE plpgsql;
259
+ `);
207
260
  }
208
261
  function getNotificationChannelName(streamName, groupName) {
209
262
  const channelName = `stream_${streamName}_${groupName}`;
@@ -0,0 +1,19 @@
1
+ import { ILogger } from '../../../logger';
2
+ import { PostgresClientType } from '../../../../types/postgres';
3
+ import { ProviderClient } from '../../../../types/provider';
4
+ /**
5
+ * Create a stream (no-op for PostgreSQL - streams are created implicitly).
6
+ */
7
+ export declare function createStream(streamName: string): Promise<boolean>;
8
+ /**
9
+ * Delete a stream or all streams.
10
+ */
11
+ export declare function deleteStream(client: PostgresClientType & ProviderClient, tableName: string, streamName: string, logger: ILogger): Promise<boolean>;
12
+ /**
13
+ * Create a consumer group (no-op for PostgreSQL - groups are created implicitly).
14
+ */
15
+ export declare function createConsumerGroup(streamName: string, groupName: string): Promise<boolean>;
16
+ /**
17
+ * Delete a consumer group (removes all messages for that group).
18
+ */
19
+ export declare function deleteConsumerGroup(client: PostgresClientType & ProviderClient, tableName: string, streamName: string, groupName: string, logger: ILogger): Promise<boolean>;