@hotmeshio/hotmesh 0.6.1 → 0.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +179 -142
- package/build/modules/enums.d.ts +7 -0
- package/build/modules/enums.js +16 -1
- package/build/modules/utils.d.ts +27 -0
- package/build/modules/utils.js +52 -1
- package/build/package.json +10 -8
- package/build/services/connector/providers/postgres.js +3 -0
- package/build/services/hotmesh/index.d.ts +66 -15
- package/build/services/hotmesh/index.js +84 -15
- package/build/services/memflow/index.d.ts +100 -14
- package/build/services/memflow/index.js +100 -14
- package/build/services/memflow/worker.d.ts +97 -0
- package/build/services/memflow/worker.js +217 -0
- package/build/services/memflow/workflow/proxyActivities.d.ts +74 -3
- package/build/services/memflow/workflow/proxyActivities.js +81 -4
- package/build/services/router/consumption/index.d.ts +2 -1
- package/build/services/router/consumption/index.js +38 -2
- package/build/services/router/error-handling/index.d.ts +3 -3
- package/build/services/router/error-handling/index.js +48 -13
- package/build/services/router/index.d.ts +1 -0
- package/build/services/router/index.js +2 -1
- package/build/services/store/index.d.ts +3 -2
- package/build/services/store/providers/postgres/kvtypes/hash/basic.js +36 -6
- package/build/services/store/providers/postgres/kvtypes/hash/expire.js +12 -2
- package/build/services/store/providers/postgres/kvtypes/hash/scan.js +30 -10
- package/build/services/store/providers/postgres/kvtypes/list.js +68 -10
- package/build/services/store/providers/postgres/kvtypes/string.js +60 -10
- package/build/services/store/providers/postgres/kvtypes/zset.js +92 -22
- package/build/services/store/providers/postgres/postgres.d.ts +3 -3
- package/build/services/store/providers/redis/_base.d.ts +3 -3
- package/build/services/store/providers/redis/ioredis.js +17 -7
- package/build/services/stream/providers/postgres/kvtables.js +76 -23
- package/build/services/stream/providers/postgres/lifecycle.d.ts +19 -0
- package/build/services/stream/providers/postgres/lifecycle.js +54 -0
- package/build/services/stream/providers/postgres/messages.d.ts +56 -0
- package/build/services/stream/providers/postgres/messages.js +253 -0
- package/build/services/stream/providers/postgres/notifications.d.ts +59 -0
- package/build/services/stream/providers/postgres/notifications.js +357 -0
- package/build/services/stream/providers/postgres/postgres.d.ts +110 -11
- package/build/services/stream/providers/postgres/postgres.js +196 -488
- package/build/services/stream/providers/postgres/scout.d.ts +68 -0
- package/build/services/stream/providers/postgres/scout.js +233 -0
- package/build/services/stream/providers/postgres/stats.d.ts +49 -0
- package/build/services/stream/providers/postgres/stats.js +113 -0
- package/build/services/sub/providers/postgres/postgres.js +37 -5
- package/build/services/sub/providers/redis/ioredis.js +13 -2
- package/build/services/sub/providers/redis/redis.js +13 -2
- package/build/services/worker/index.d.ts +1 -0
- package/build/services/worker/index.js +2 -0
- package/build/types/hotmesh.d.ts +42 -2
- package/build/types/index.d.ts +3 -3
- package/build/types/memflow.d.ts +32 -0
- package/build/types/provider.d.ts +16 -0
- package/build/types/stream.d.ts +92 -1
- package/package.json +10 -8
|
@@ -3,7 +3,7 @@ import { ILogger } from '../../../logger';
|
|
|
3
3
|
import { ActivityType, Consumes } from '../../../../types/activity';
|
|
4
4
|
import { AppVID } from '../../../../types/app';
|
|
5
5
|
import { HookRule, HookSignal } from '../../../../types/hook';
|
|
6
|
-
import { HotMeshApp, HotMeshApps, HotMeshSettings } from '../../../../types/hotmesh';
|
|
6
|
+
import { HotMeshApp, HotMeshApps, HotMeshSettings, ScoutType } from '../../../../types/hotmesh';
|
|
7
7
|
import { ProviderClient, ProviderTransaction } from '../../../../types/provider';
|
|
8
8
|
import { SymbolSets, StringStringType, StringAnyType, Symbols } from '../../../../types/serializer';
|
|
9
9
|
import { IdsData, JobStatsRange, StatsType } from '../../../../types/stats';
|
|
@@ -38,8 +38,8 @@ declare class PostgresStoreService extends StoreService<ProviderClient, Provider
|
|
|
38
38
|
* check for and process work items in the
|
|
39
39
|
* time and signal task queues.
|
|
40
40
|
*/
|
|
41
|
-
reserveScoutRole(scoutType:
|
|
42
|
-
releaseScoutRole(scoutType:
|
|
41
|
+
reserveScoutRole(scoutType: ScoutType, delay?: number): Promise<boolean>;
|
|
42
|
+
releaseScoutRole(scoutType: ScoutType): Promise<boolean>;
|
|
43
43
|
getSettings(bCreate?: boolean): Promise<HotMeshSettings>;
|
|
44
44
|
setSettings(manifest: HotMeshSettings): Promise<any>;
|
|
45
45
|
reserveSymbolRange(target: string, size: number, type: 'JOB' | 'ACTIVITY', tryCount?: number): Promise<[number, number, Symbols]>;
|
|
@@ -3,7 +3,7 @@ import { ILogger } from '../../../logger';
|
|
|
3
3
|
import { ActivityType, Consumes } from '../../../../types/activity';
|
|
4
4
|
import { AppVID } from '../../../../types/app';
|
|
5
5
|
import { HookRule, HookSignal } from '../../../../types/hook';
|
|
6
|
-
import { HotMeshApp, HotMeshApps, HotMeshSettings } from '../../../../types/hotmesh';
|
|
6
|
+
import { HotMeshApp, HotMeshApps, HotMeshSettings, ScoutType } from '../../../../types/hotmesh';
|
|
7
7
|
import { ProviderClient, ProviderTransaction } from '../../../../types/provider';
|
|
8
8
|
import { SymbolSets, StringStringType, StringAnyType, Symbols } from '../../../../types/serializer';
|
|
9
9
|
import { IdsData, JobStatsRange, StatsType } from '../../../../types/stats';
|
|
@@ -30,8 +30,8 @@ declare abstract class RedisStoreBase<ClientProvider extends ProviderClient, Tra
|
|
|
30
30
|
* check for and process work items in the
|
|
31
31
|
* time and signal task queues.
|
|
32
32
|
*/
|
|
33
|
-
reserveScoutRole(scoutType:
|
|
34
|
-
releaseScoutRole(scoutType:
|
|
33
|
+
reserveScoutRole(scoutType: ScoutType, delay?: number): Promise<boolean>;
|
|
34
|
+
releaseScoutRole(scoutType: ScoutType): Promise<boolean>;
|
|
35
35
|
getSettings(bCreate?: boolean): Promise<HotMeshSettings>;
|
|
36
36
|
setSettings(manifest: HotMeshSettings): Promise<any>;
|
|
37
37
|
reserveSymbolRange(target: string, size: number, type: 'JOB' | 'ACTIVITY', tryCount?: number): Promise<[number, number, Symbols]>;
|
|
@@ -148,17 +148,27 @@ class IORedisStoreService extends _base_1.RedisStoreBase {
|
|
|
148
148
|
return this.storeClient.multi();
|
|
149
149
|
}
|
|
150
150
|
async exec(...args) {
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
if (Array.isArray(response
|
|
151
|
+
try {
|
|
152
|
+
const response = await this.storeClient.call.apply(this.storeClient, args);
|
|
153
|
+
if (typeof response === 'string') {
|
|
154
|
+
return response;
|
|
155
|
+
}
|
|
156
|
+
else if (Array.isArray(response)) {
|
|
157
|
+
if (Array.isArray(response[0])) {
|
|
158
|
+
return response;
|
|
159
|
+
}
|
|
157
160
|
return response;
|
|
158
161
|
}
|
|
159
162
|
return response;
|
|
160
163
|
}
|
|
161
|
-
|
|
164
|
+
catch (error) {
|
|
165
|
+
// Connection closed during test cleanup - log and return empty response
|
|
166
|
+
if (error?.message?.includes('Connection is closed')) {
|
|
167
|
+
return [];
|
|
168
|
+
}
|
|
169
|
+
// Re-throw unexpected errors
|
|
170
|
+
throw error;
|
|
171
|
+
}
|
|
162
172
|
}
|
|
163
173
|
async setnxex(key, value, expireSeconds) {
|
|
164
174
|
const status = await this.storeClient[this.commands.set](key, value, 'NX', 'EX', expireSeconds.toString());
|
|
@@ -73,8 +73,13 @@ function hashStringToInt(str) {
|
|
|
73
73
|
return Math.abs(hash);
|
|
74
74
|
}
|
|
75
75
|
async function checkIfTablesExist(client, schemaName, tableName) {
|
|
76
|
-
|
|
77
|
-
|
|
76
|
+
// Check both streams table exists AND roles table (from store provider)
|
|
77
|
+
// The roles table is created by the store provider and is used for scout role coordination
|
|
78
|
+
const result = await client.query(`SELECT
|
|
79
|
+
to_regclass($1) AS streams_table,
|
|
80
|
+
to_regclass($2) AS roles_table`, [tableName, `${schemaName}.roles`]);
|
|
81
|
+
return result.rows[0].streams_table !== null &&
|
|
82
|
+
result.rows[0].roles_table !== null;
|
|
78
83
|
}
|
|
79
84
|
async function waitForTablesCreation(streamClient, lockId, schemaName, tableName, logger) {
|
|
80
85
|
let retries = 0;
|
|
@@ -124,6 +129,11 @@ async function createTables(client, schemaName, tableName) {
|
|
|
124
129
|
reserved_at TIMESTAMPTZ,
|
|
125
130
|
reserved_by TEXT,
|
|
126
131
|
expired_at TIMESTAMPTZ,
|
|
132
|
+
max_retry_attempts INT DEFAULT 3,
|
|
133
|
+
backoff_coefficient NUMERIC DEFAULT 10,
|
|
134
|
+
maximum_interval_seconds INT DEFAULT 120,
|
|
135
|
+
visible_at TIMESTAMPTZ DEFAULT NOW(),
|
|
136
|
+
retry_attempt INT DEFAULT 0,
|
|
127
137
|
PRIMARY KEY (stream_name, id)
|
|
128
138
|
) PARTITION BY HASH (stream_name);
|
|
129
139
|
`);
|
|
@@ -135,16 +145,16 @@ async function createTables(client, schemaName, tableName) {
|
|
|
135
145
|
FOR VALUES WITH (modulus 8, remainder ${i});
|
|
136
146
|
`);
|
|
137
147
|
}
|
|
138
|
-
// Index for active messages
|
|
148
|
+
// Index for active messages (includes visible_at for visibility timeout support)
|
|
139
149
|
await client.query(`
|
|
140
150
|
CREATE INDEX IF NOT EXISTS idx_streams_active_messages
|
|
141
|
-
ON ${tableName} (group_name, stream_name, reserved_at, id)
|
|
151
|
+
ON ${tableName} (group_name, stream_name, reserved_at, visible_at, id)
|
|
142
152
|
WHERE reserved_at IS NULL AND expired_at IS NULL;
|
|
143
153
|
`);
|
|
144
|
-
// Optimized index for the simplified fetchMessages query
|
|
154
|
+
// Optimized index for the simplified fetchMessages query (includes visible_at)
|
|
145
155
|
await client.query(`
|
|
146
156
|
CREATE INDEX IF NOT EXISTS idx_streams_message_fetch
|
|
147
|
-
ON ${tableName} (stream_name, group_name, id)
|
|
157
|
+
ON ${tableName} (stream_name, group_name, visible_at, id)
|
|
148
158
|
WHERE expired_at IS NULL;
|
|
149
159
|
`);
|
|
150
160
|
// Index for expired messages
|
|
@@ -166,7 +176,7 @@ async function createTables(client, schemaName, tableName) {
|
|
|
166
176
|
// `);
|
|
167
177
|
}
|
|
168
178
|
async function createNotificationTriggers(client, schemaName, tableName) {
|
|
169
|
-
// Create the notification function
|
|
179
|
+
// Create the notification function for INSERT events
|
|
170
180
|
await client.query(`
|
|
171
181
|
CREATE OR REPLACE FUNCTION ${schemaName}.notify_new_stream_message()
|
|
172
182
|
RETURNS TRIGGER AS $$
|
|
@@ -174,24 +184,26 @@ async function createNotificationTriggers(client, schemaName, tableName) {
|
|
|
174
184
|
channel_name TEXT;
|
|
175
185
|
payload JSON;
|
|
176
186
|
BEGIN
|
|
177
|
-
--
|
|
178
|
-
--
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
187
|
+
-- Only notify if message is immediately visible
|
|
188
|
+
-- Messages with visibility timeout will be notified when they become visible
|
|
189
|
+
IF NEW.visible_at <= NOW() THEN
|
|
190
|
+
-- Create channel name: stream_{stream_name}_{group_name}
|
|
191
|
+
-- Truncate if too long (PostgreSQL channel names limited to 63 chars)
|
|
192
|
+
channel_name := 'stream_' || NEW.stream_name || '_' || NEW.group_name;
|
|
193
|
+
IF length(channel_name) > 63 THEN
|
|
194
|
+
channel_name := left(channel_name, 63);
|
|
195
|
+
END IF;
|
|
196
|
+
|
|
197
|
+
-- Create minimal payload with only required fields
|
|
198
|
+
payload := json_build_object(
|
|
199
|
+
'stream_name', NEW.stream_name,
|
|
200
|
+
'group_name', NEW.group_name
|
|
201
|
+
);
|
|
202
|
+
|
|
203
|
+
-- Send notification
|
|
204
|
+
PERFORM pg_notify(channel_name, payload::text);
|
|
182
205
|
END IF;
|
|
183
206
|
|
|
184
|
-
-- Create payload with message details
|
|
185
|
-
payload := json_build_object(
|
|
186
|
-
'id', NEW.id,
|
|
187
|
-
'stream_name', NEW.stream_name,
|
|
188
|
-
'group_name', NEW.group_name,
|
|
189
|
-
'created_at', extract(epoch from NEW.created_at)
|
|
190
|
-
);
|
|
191
|
-
|
|
192
|
-
-- Send notification
|
|
193
|
-
PERFORM pg_notify(channel_name, payload::text);
|
|
194
|
-
|
|
195
207
|
RETURN NEW;
|
|
196
208
|
END;
|
|
197
209
|
$$ LANGUAGE plpgsql;
|
|
@@ -204,6 +216,47 @@ async function createNotificationTriggers(client, schemaName, tableName) {
|
|
|
204
216
|
FOR EACH ROW
|
|
205
217
|
EXECUTE FUNCTION ${schemaName}.notify_new_stream_message();
|
|
206
218
|
`);
|
|
219
|
+
// Create helper function to notify about messages with expired visibility timeouts
|
|
220
|
+
// This is called periodically by the router scout for responsive retry processing
|
|
221
|
+
await client.query(`
|
|
222
|
+
CREATE OR REPLACE FUNCTION ${schemaName}.notify_visible_messages()
|
|
223
|
+
RETURNS INTEGER AS $$
|
|
224
|
+
DECLARE
|
|
225
|
+
msg RECORD;
|
|
226
|
+
channel_name TEXT;
|
|
227
|
+
payload JSON;
|
|
228
|
+
notification_count INTEGER := 0;
|
|
229
|
+
BEGIN
|
|
230
|
+
-- Find all distinct streams with messages that are now visible
|
|
231
|
+
-- Router will drain all messages when notified, so we just notify each channel once
|
|
232
|
+
FOR msg IN
|
|
233
|
+
SELECT DISTINCT stream_name, group_name
|
|
234
|
+
FROM ${tableName}
|
|
235
|
+
WHERE visible_at <= NOW()
|
|
236
|
+
AND reserved_at IS NULL
|
|
237
|
+
AND expired_at IS NULL
|
|
238
|
+
LIMIT 100 -- Prevent overwhelming the system
|
|
239
|
+
LOOP
|
|
240
|
+
-- Create channel name (same logic as INSERT trigger)
|
|
241
|
+
channel_name := 'stream_' || msg.stream_name || '_' || msg.group_name;
|
|
242
|
+
IF length(channel_name) > 63 THEN
|
|
243
|
+
channel_name := left(channel_name, 63);
|
|
244
|
+
END IF;
|
|
245
|
+
|
|
246
|
+
-- Send minimal notification with only required fields
|
|
247
|
+
payload := json_build_object(
|
|
248
|
+
'stream_name', msg.stream_name,
|
|
249
|
+
'group_name', msg.group_name
|
|
250
|
+
);
|
|
251
|
+
|
|
252
|
+
PERFORM pg_notify(channel_name, payload::text);
|
|
253
|
+
notification_count := notification_count + 1;
|
|
254
|
+
END LOOP;
|
|
255
|
+
|
|
256
|
+
RETURN notification_count;
|
|
257
|
+
END;
|
|
258
|
+
$$ LANGUAGE plpgsql;
|
|
259
|
+
`);
|
|
207
260
|
}
|
|
208
261
|
function getNotificationChannelName(streamName, groupName) {
|
|
209
262
|
const channelName = `stream_${streamName}_${groupName}`;
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
import { ILogger } from '../../../logger';
|
|
2
|
+
import { PostgresClientType } from '../../../../types/postgres';
|
|
3
|
+
import { ProviderClient } from '../../../../types/provider';
|
|
4
|
+
/**
|
|
5
|
+
* Create a stream (no-op for PostgreSQL - streams are created implicitly).
|
|
6
|
+
*/
|
|
7
|
+
export declare function createStream(streamName: string): Promise<boolean>;
|
|
8
|
+
/**
|
|
9
|
+
* Delete a stream or all streams.
|
|
10
|
+
*/
|
|
11
|
+
export declare function deleteStream(client: PostgresClientType & ProviderClient, tableName: string, streamName: string, logger: ILogger): Promise<boolean>;
|
|
12
|
+
/**
|
|
13
|
+
* Create a consumer group (no-op for PostgreSQL - groups are created implicitly).
|
|
14
|
+
*/
|
|
15
|
+
export declare function createConsumerGroup(streamName: string, groupName: string): Promise<boolean>;
|
|
16
|
+
/**
|
|
17
|
+
* Delete a consumer group (removes all messages for that group).
|
|
18
|
+
*/
|
|
19
|
+
export declare function deleteConsumerGroup(client: PostgresClientType & ProviderClient, tableName: string, streamName: string, groupName: string, logger: ILogger): Promise<boolean>;
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.deleteConsumerGroup = exports.createConsumerGroup = exports.deleteStream = exports.createStream = void 0;
|
|
4
|
+
/**
|
|
5
|
+
* Create a stream (no-op for PostgreSQL - streams are created implicitly).
|
|
6
|
+
*/
|
|
7
|
+
async function createStream(streamName) {
|
|
8
|
+
return true;
|
|
9
|
+
}
|
|
10
|
+
exports.createStream = createStream;
|
|
11
|
+
/**
|
|
12
|
+
* Delete a stream or all streams.
|
|
13
|
+
*/
|
|
14
|
+
async function deleteStream(client, tableName, streamName, logger) {
|
|
15
|
+
try {
|
|
16
|
+
if (streamName === '*') {
|
|
17
|
+
await client.query(`DELETE FROM ${tableName}`);
|
|
18
|
+
}
|
|
19
|
+
else {
|
|
20
|
+
await client.query(`DELETE FROM ${tableName} WHERE stream_name = $1`, [
|
|
21
|
+
streamName,
|
|
22
|
+
]);
|
|
23
|
+
}
|
|
24
|
+
return true;
|
|
25
|
+
}
|
|
26
|
+
catch (error) {
|
|
27
|
+
logger.error(`postgres-stream-delete-error-${streamName}`, {
|
|
28
|
+
error,
|
|
29
|
+
});
|
|
30
|
+
throw error;
|
|
31
|
+
}
|
|
32
|
+
}
|
|
33
|
+
exports.deleteStream = deleteStream;
|
|
34
|
+
/**
|
|
35
|
+
* Create a consumer group (no-op for PostgreSQL - groups are created implicitly).
|
|
36
|
+
*/
|
|
37
|
+
async function createConsumerGroup(streamName, groupName) {
|
|
38
|
+
return true;
|
|
39
|
+
}
|
|
40
|
+
exports.createConsumerGroup = createConsumerGroup;
|
|
41
|
+
/**
|
|
42
|
+
* Delete a consumer group (removes all messages for that group).
|
|
43
|
+
*/
|
|
44
|
+
async function deleteConsumerGroup(client, tableName, streamName, groupName, logger) {
|
|
45
|
+
try {
|
|
46
|
+
await client.query(`DELETE FROM ${tableName} WHERE stream_name = $1 AND group_name = $2`, [streamName, groupName]);
|
|
47
|
+
return true;
|
|
48
|
+
}
|
|
49
|
+
catch (error) {
|
|
50
|
+
logger.error(`postgres-stream-delete-group-error-${streamName}.${groupName}`, { error });
|
|
51
|
+
throw error;
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
exports.deleteConsumerGroup = deleteConsumerGroup;
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
import { ILogger } from '../../../logger';
|
|
2
|
+
import { PostgresClientType } from '../../../../types/postgres';
|
|
3
|
+
import { PublishMessageConfig, StreamMessage } from '../../../../types/stream';
|
|
4
|
+
import { ProviderClient, ProviderTransaction } from '../../../../types/provider';
|
|
5
|
+
/**
|
|
6
|
+
* Publish messages to a stream. Can be used within a transaction.
|
|
7
|
+
*
|
|
8
|
+
* When a transaction is provided, the SQL is added to the transaction
|
|
9
|
+
* and executed atomically with other operations.
|
|
10
|
+
*/
|
|
11
|
+
export declare function publishMessages(client: PostgresClientType & ProviderClient, tableName: string, streamName: string, messages: string[], options: PublishMessageConfig | undefined, logger: ILogger): Promise<string[] | ProviderTransaction>;
|
|
12
|
+
/**
|
|
13
|
+
* Build SQL for publishing messages with retry policies and visibility delays.
|
|
14
|
+
* Optimizes the INSERT statement based on whether retry config is present.
|
|
15
|
+
*/
|
|
16
|
+
export declare function buildPublishSQL(tableName: string, streamName: string, messages: string[], options?: PublishMessageConfig): {
|
|
17
|
+
sql: string;
|
|
18
|
+
params: any[];
|
|
19
|
+
};
|
|
20
|
+
/**
|
|
21
|
+
* Fetch messages from the stream with optional exponential backoff.
|
|
22
|
+
* Uses SKIP LOCKED for high-concurrency consumption.
|
|
23
|
+
*/
|
|
24
|
+
export declare function fetchMessages(client: PostgresClientType & ProviderClient, tableName: string, streamName: string, groupName: string, consumerName: string, options: {
|
|
25
|
+
batchSize?: number;
|
|
26
|
+
blockTimeout?: number;
|
|
27
|
+
autoAck?: boolean;
|
|
28
|
+
reservationTimeout?: number;
|
|
29
|
+
enableBackoff?: boolean;
|
|
30
|
+
initialBackoff?: number;
|
|
31
|
+
maxBackoff?: number;
|
|
32
|
+
maxRetries?: number;
|
|
33
|
+
}, logger: ILogger): Promise<StreamMessage[]>;
|
|
34
|
+
/**
|
|
35
|
+
* Acknowledge messages (no-op for PostgreSQL - uses soft delete pattern).
|
|
36
|
+
*/
|
|
37
|
+
export declare function acknowledgeMessages(messageIds: string[]): Promise<number>;
|
|
38
|
+
/**
|
|
39
|
+
* Delete messages by soft-deleting them (setting expired_at).
|
|
40
|
+
*/
|
|
41
|
+
export declare function deleteMessages(client: PostgresClientType & ProviderClient, tableName: string, streamName: string, groupName: string, messageIds: string[], logger: ILogger): Promise<number>;
|
|
42
|
+
/**
|
|
43
|
+
* Acknowledge and delete messages in one operation.
|
|
44
|
+
*/
|
|
45
|
+
export declare function ackAndDelete(client: PostgresClientType & ProviderClient, tableName: string, streamName: string, groupName: string, messageIds: string[], logger: ILogger): Promise<number>;
|
|
46
|
+
/**
|
|
47
|
+
* Retry messages (placeholder for future implementation).
|
|
48
|
+
*/
|
|
49
|
+
export declare function retryMessages(streamName: string, groupName: string, options?: {
|
|
50
|
+
consumerName?: string;
|
|
51
|
+
minIdleTime?: number;
|
|
52
|
+
messageIds?: string[];
|
|
53
|
+
delay?: number;
|
|
54
|
+
maxRetries?: number;
|
|
55
|
+
limit?: number;
|
|
56
|
+
}): Promise<StreamMessage[]>;
|
|
@@ -0,0 +1,253 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.retryMessages = exports.ackAndDelete = exports.deleteMessages = exports.acknowledgeMessages = exports.fetchMessages = exports.buildPublishSQL = exports.publishMessages = void 0;
|
|
4
|
+
const utils_1 = require("../../../../modules/utils");
|
|
5
|
+
/**
|
|
6
|
+
* Publish messages to a stream. Can be used within a transaction.
|
|
7
|
+
*
|
|
8
|
+
* When a transaction is provided, the SQL is added to the transaction
|
|
9
|
+
* and executed atomically with other operations.
|
|
10
|
+
*/
|
|
11
|
+
async function publishMessages(client, tableName, streamName, messages, options, logger) {
|
|
12
|
+
const { sql, params } = buildPublishSQL(tableName, streamName, messages, options);
|
|
13
|
+
if (options?.transaction &&
|
|
14
|
+
typeof options.transaction.addCommand === 'function') {
|
|
15
|
+
// Add to transaction and return the transaction object
|
|
16
|
+
options.transaction.addCommand(sql, params, 'array', (rows) => rows.map((row) => row.id.toString()));
|
|
17
|
+
return options.transaction;
|
|
18
|
+
}
|
|
19
|
+
else {
|
|
20
|
+
try {
|
|
21
|
+
const ids = [];
|
|
22
|
+
const res = await client.query(sql, params);
|
|
23
|
+
for (const row of res.rows) {
|
|
24
|
+
ids.push(row.id.toString());
|
|
25
|
+
}
|
|
26
|
+
return ids;
|
|
27
|
+
}
|
|
28
|
+
catch (error) {
|
|
29
|
+
logger.error(`postgres-stream-publish-error-${streamName}`, {
|
|
30
|
+
error,
|
|
31
|
+
});
|
|
32
|
+
throw error;
|
|
33
|
+
}
|
|
34
|
+
}
|
|
35
|
+
}
|
|
36
|
+
exports.publishMessages = publishMessages;
|
|
37
|
+
/**
|
|
38
|
+
* Build SQL for publishing messages with retry policies and visibility delays.
|
|
39
|
+
* Optimizes the INSERT statement based on whether retry config is present.
|
|
40
|
+
*/
|
|
41
|
+
function buildPublishSQL(tableName, streamName, messages, options) {
|
|
42
|
+
const groupName = streamName.endsWith(':') ? 'ENGINE' : 'WORKER';
|
|
43
|
+
// Parse messages to extract retry config and visibility options
|
|
44
|
+
const parsedMessages = messages.map(msg => {
|
|
45
|
+
const data = JSON.parse(msg);
|
|
46
|
+
const retryConfig = data._streamRetryConfig;
|
|
47
|
+
const visibilityDelayMs = data._visibilityDelayMs;
|
|
48
|
+
const retryAttempt = data._retryAttempt;
|
|
49
|
+
// Remove internal fields from message payload
|
|
50
|
+
delete data._streamRetryConfig;
|
|
51
|
+
delete data._visibilityDelayMs;
|
|
52
|
+
delete data._retryAttempt;
|
|
53
|
+
// Determine if this message has explicit retry config
|
|
54
|
+
const hasExplicitConfig = (retryConfig && 'max_retry_attempts' in retryConfig) || options?.retryPolicy;
|
|
55
|
+
let normalizedPolicy = null;
|
|
56
|
+
if (retryConfig && 'max_retry_attempts' in retryConfig) {
|
|
57
|
+
normalizedPolicy = retryConfig;
|
|
58
|
+
}
|
|
59
|
+
else if (options?.retryPolicy) {
|
|
60
|
+
normalizedPolicy = (0, utils_1.normalizeRetryPolicy)(options.retryPolicy, {
|
|
61
|
+
maximumAttempts: 3,
|
|
62
|
+
backoffCoefficient: 10,
|
|
63
|
+
maximumInterval: 120,
|
|
64
|
+
});
|
|
65
|
+
}
|
|
66
|
+
return {
|
|
67
|
+
message: JSON.stringify(data),
|
|
68
|
+
hasExplicitConfig,
|
|
69
|
+
retryPolicy: normalizedPolicy,
|
|
70
|
+
visibilityDelayMs: visibilityDelayMs || 0,
|
|
71
|
+
retryAttempt: retryAttempt || 0,
|
|
72
|
+
};
|
|
73
|
+
});
|
|
74
|
+
const params = [streamName, groupName];
|
|
75
|
+
let valuesClauses = [];
|
|
76
|
+
let insertColumns;
|
|
77
|
+
// Check if ALL messages have explicit config or ALL don't
|
|
78
|
+
const allHaveConfig = parsedMessages.every(pm => pm.hasExplicitConfig);
|
|
79
|
+
const noneHaveConfig = parsedMessages.every(pm => !pm.hasExplicitConfig);
|
|
80
|
+
const hasVisibilityDelays = parsedMessages.some(pm => pm.visibilityDelayMs > 0);
|
|
81
|
+
if (noneHaveConfig && !hasVisibilityDelays) {
|
|
82
|
+
// Omit retry columns entirely - let DB defaults apply
|
|
83
|
+
insertColumns = '(stream_name, group_name, message)';
|
|
84
|
+
parsedMessages.forEach((pm, idx) => {
|
|
85
|
+
const base = idx * 1;
|
|
86
|
+
valuesClauses.push(`($1, $2, $${base + 3})`);
|
|
87
|
+
params.push(pm.message);
|
|
88
|
+
});
|
|
89
|
+
}
|
|
90
|
+
else if (noneHaveConfig && hasVisibilityDelays) {
|
|
91
|
+
// Only visibility delays, no retry config
|
|
92
|
+
insertColumns = '(stream_name, group_name, message, visible_at, retry_attempt)';
|
|
93
|
+
parsedMessages.forEach((pm, idx) => {
|
|
94
|
+
const base = idx * 2;
|
|
95
|
+
if (pm.visibilityDelayMs > 0) {
|
|
96
|
+
const visibleAtSQL = `NOW() + INTERVAL '${pm.visibilityDelayMs} milliseconds'`;
|
|
97
|
+
valuesClauses.push(`($1, $2, $${base + 3}, ${visibleAtSQL}, $${base + 4})`);
|
|
98
|
+
params.push(pm.message, pm.retryAttempt);
|
|
99
|
+
}
|
|
100
|
+
else {
|
|
101
|
+
valuesClauses.push(`($1, $2, $${base + 3}, DEFAULT, $${base + 4})`);
|
|
102
|
+
params.push(pm.message, pm.retryAttempt);
|
|
103
|
+
}
|
|
104
|
+
});
|
|
105
|
+
}
|
|
106
|
+
else {
|
|
107
|
+
// Include retry columns and optionally visibility
|
|
108
|
+
insertColumns = '(stream_name, group_name, message, max_retry_attempts, backoff_coefficient, maximum_interval_seconds, visible_at, retry_attempt)';
|
|
109
|
+
parsedMessages.forEach((pm, idx) => {
|
|
110
|
+
const visibleAtClause = pm.visibilityDelayMs > 0
|
|
111
|
+
? `NOW() + INTERVAL '${pm.visibilityDelayMs} milliseconds'`
|
|
112
|
+
: 'DEFAULT';
|
|
113
|
+
if (pm.hasExplicitConfig) {
|
|
114
|
+
const paramOffset = params.length + 1; // Current param count + 1 for next param
|
|
115
|
+
valuesClauses.push(`($1, $2, $${paramOffset}, $${paramOffset + 1}, $${paramOffset + 2}, $${paramOffset + 3}, ${visibleAtClause}, $${paramOffset + 4})`);
|
|
116
|
+
params.push(pm.message, pm.retryPolicy.max_retry_attempts, pm.retryPolicy.backoff_coefficient, pm.retryPolicy.maximum_interval_seconds, pm.retryAttempt);
|
|
117
|
+
}
|
|
118
|
+
else {
|
|
119
|
+
// This message doesn't have config but others do - use DEFAULT keyword
|
|
120
|
+
const paramOffset = params.length + 1;
|
|
121
|
+
valuesClauses.push(`($1, $2, $${paramOffset}, DEFAULT, DEFAULT, DEFAULT, ${visibleAtClause}, $${paramOffset + 1})`);
|
|
122
|
+
params.push(pm.message, pm.retryAttempt);
|
|
123
|
+
}
|
|
124
|
+
});
|
|
125
|
+
}
|
|
126
|
+
return {
|
|
127
|
+
sql: `INSERT INTO ${tableName} ${insertColumns}
|
|
128
|
+
VALUES ${valuesClauses.join(', ')}
|
|
129
|
+
RETURNING id`,
|
|
130
|
+
params,
|
|
131
|
+
};
|
|
132
|
+
}
|
|
133
|
+
exports.buildPublishSQL = buildPublishSQL;
|
|
134
|
+
/**
|
|
135
|
+
* Fetch messages from the stream with optional exponential backoff.
|
|
136
|
+
* Uses SKIP LOCKED for high-concurrency consumption.
|
|
137
|
+
*/
|
|
138
|
+
async function fetchMessages(client, tableName, streamName, groupName, consumerName, options = {}, logger) {
|
|
139
|
+
const enableBackoff = options?.enableBackoff ?? false;
|
|
140
|
+
const initialBackoff = options?.initialBackoff ?? 100; // Default initial backoff: 100ms
|
|
141
|
+
const maxBackoff = options?.maxBackoff ?? 3000; // Default max backoff: 3 seconds
|
|
142
|
+
const maxRetries = options?.maxRetries ?? 3; // Set a finite default, e.g., 3 retries
|
|
143
|
+
let backoff = initialBackoff;
|
|
144
|
+
let retries = 0;
|
|
145
|
+
try {
|
|
146
|
+
while (retries < maxRetries) {
|
|
147
|
+
retries++;
|
|
148
|
+
const batchSize = options?.batchSize || 1;
|
|
149
|
+
const reservationTimeout = options?.reservationTimeout || 30;
|
|
150
|
+
// Simplified query for better performance - especially for notification-triggered fetches
|
|
151
|
+
const res = await client.query(`UPDATE ${tableName}
|
|
152
|
+
SET reserved_at = NOW(), reserved_by = $4
|
|
153
|
+
WHERE id IN (
|
|
154
|
+
SELECT id FROM ${tableName}
|
|
155
|
+
WHERE stream_name = $1
|
|
156
|
+
AND group_name = $2
|
|
157
|
+
AND (reserved_at IS NULL OR reserved_at < NOW() - INTERVAL '${reservationTimeout} seconds')
|
|
158
|
+
AND expired_at IS NULL
|
|
159
|
+
AND visible_at <= NOW()
|
|
160
|
+
ORDER BY id
|
|
161
|
+
LIMIT $3
|
|
162
|
+
FOR UPDATE SKIP LOCKED
|
|
163
|
+
)
|
|
164
|
+
RETURNING id, message, max_retry_attempts, backoff_coefficient, maximum_interval_seconds, retry_attempt`, [streamName, groupName, batchSize, consumerName]);
|
|
165
|
+
const messages = res.rows.map((row) => {
|
|
166
|
+
const data = (0, utils_1.parseStreamMessage)(row.message);
|
|
167
|
+
// Inject retry policy only if not using default values
|
|
168
|
+
// Default values indicate old retry mechanism should be used (policies.retry)
|
|
169
|
+
const hasDefaultRetryPolicy = (row.max_retry_attempts === 3 || row.max_retry_attempts === 5) &&
|
|
170
|
+
parseFloat(row.backoff_coefficient) === 10 &&
|
|
171
|
+
row.maximum_interval_seconds === 120;
|
|
172
|
+
if (row.max_retry_attempts !== null && !hasDefaultRetryPolicy) {
|
|
173
|
+
data._streamRetryConfig = {
|
|
174
|
+
max_retry_attempts: row.max_retry_attempts,
|
|
175
|
+
backoff_coefficient: parseFloat(row.backoff_coefficient),
|
|
176
|
+
maximum_interval_seconds: row.maximum_interval_seconds,
|
|
177
|
+
};
|
|
178
|
+
}
|
|
179
|
+
// Inject retry_attempt from database
|
|
180
|
+
if (row.retry_attempt !== undefined && row.retry_attempt !== null) {
|
|
181
|
+
data._retryAttempt = row.retry_attempt;
|
|
182
|
+
}
|
|
183
|
+
return {
|
|
184
|
+
id: row.id.toString(),
|
|
185
|
+
data,
|
|
186
|
+
retryPolicy: (row.max_retry_attempts !== null && !hasDefaultRetryPolicy) ? {
|
|
187
|
+
maximumAttempts: row.max_retry_attempts,
|
|
188
|
+
backoffCoefficient: parseFloat(row.backoff_coefficient),
|
|
189
|
+
maximumInterval: row.maximum_interval_seconds,
|
|
190
|
+
} : undefined,
|
|
191
|
+
};
|
|
192
|
+
});
|
|
193
|
+
if (messages.length > 0 || !enableBackoff) {
|
|
194
|
+
return messages;
|
|
195
|
+
}
|
|
196
|
+
// Apply backoff if enabled and no messages found
|
|
197
|
+
await (0, utils_1.sleepFor)(backoff);
|
|
198
|
+
backoff = Math.min(backoff * 2, maxBackoff); // Exponential backoff
|
|
199
|
+
}
|
|
200
|
+
// Return empty array if maxRetries is reached and still no messages
|
|
201
|
+
return [];
|
|
202
|
+
}
|
|
203
|
+
catch (error) {
|
|
204
|
+
logger.error(`postgres-stream-consumer-error-${streamName}`, {
|
|
205
|
+
error,
|
|
206
|
+
});
|
|
207
|
+
throw error;
|
|
208
|
+
}
|
|
209
|
+
}
|
|
210
|
+
exports.fetchMessages = fetchMessages;
|
|
211
|
+
/**
|
|
212
|
+
* Acknowledge messages (no-op for PostgreSQL - uses soft delete pattern).
|
|
213
|
+
*/
|
|
214
|
+
async function acknowledgeMessages(messageIds) {
|
|
215
|
+
// No-op for this implementation
|
|
216
|
+
return messageIds.length;
|
|
217
|
+
}
|
|
218
|
+
exports.acknowledgeMessages = acknowledgeMessages;
|
|
219
|
+
/**
|
|
220
|
+
* Delete messages by soft-deleting them (setting expired_at).
|
|
221
|
+
*/
|
|
222
|
+
async function deleteMessages(client, tableName, streamName, groupName, messageIds, logger) {
|
|
223
|
+
try {
|
|
224
|
+
const ids = messageIds.map((id) => parseInt(id));
|
|
225
|
+
// Perform a soft delete by setting `expired_at` to the current timestamp
|
|
226
|
+
await client.query(`UPDATE ${tableName}
|
|
227
|
+
SET expired_at = NOW()
|
|
228
|
+
WHERE stream_name = $1 AND id = ANY($2::bigint[]) AND group_name = $3`, [streamName, ids, groupName]);
|
|
229
|
+
return messageIds.length;
|
|
230
|
+
}
|
|
231
|
+
catch (error) {
|
|
232
|
+
logger.error(`postgres-stream-delete-error-${streamName}`, {
|
|
233
|
+
error,
|
|
234
|
+
});
|
|
235
|
+
throw error;
|
|
236
|
+
}
|
|
237
|
+
}
|
|
238
|
+
exports.deleteMessages = deleteMessages;
|
|
239
|
+
/**
|
|
240
|
+
* Acknowledge and delete messages in one operation.
|
|
241
|
+
*/
|
|
242
|
+
async function ackAndDelete(client, tableName, streamName, groupName, messageIds, logger) {
|
|
243
|
+
return await deleteMessages(client, tableName, streamName, groupName, messageIds, logger);
|
|
244
|
+
}
|
|
245
|
+
exports.ackAndDelete = ackAndDelete;
|
|
246
|
+
/**
|
|
247
|
+
* Retry messages (placeholder for future implementation).
|
|
248
|
+
*/
|
|
249
|
+
async function retryMessages(streamName, groupName, options) {
|
|
250
|
+
// Implement retry logic if needed
|
|
251
|
+
return [];
|
|
252
|
+
}
|
|
253
|
+
exports.retryMessages = retryMessages;
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
import { ILogger } from '../../../logger';
|
|
2
|
+
import { PostgresClientType } from '../../../../types/postgres';
|
|
3
|
+
import { NotificationConsumer, StreamMessage } from '../../../../types/stream';
|
|
4
|
+
import { ProviderClient } from '../../../../types/provider';
|
|
5
|
+
/**
|
|
6
|
+
* Manages PostgreSQL LISTEN/NOTIFY for stream message notifications.
|
|
7
|
+
* Handles static state shared across all service instances using the same client.
|
|
8
|
+
*/
|
|
9
|
+
export declare class NotificationManager<TService> {
|
|
10
|
+
private client;
|
|
11
|
+
private getTableName;
|
|
12
|
+
private getFallbackInterval;
|
|
13
|
+
private logger;
|
|
14
|
+
private static clientNotificationConsumers;
|
|
15
|
+
private static clientNotificationHandlers;
|
|
16
|
+
private static clientFallbackPollers;
|
|
17
|
+
private instanceNotificationConsumers;
|
|
18
|
+
private notificationHandlerBound;
|
|
19
|
+
constructor(client: PostgresClientType & ProviderClient, getTableName: () => string, getFallbackInterval: () => number, logger: ILogger);
|
|
20
|
+
/**
|
|
21
|
+
* Set up notification handler for this client (once per client).
|
|
22
|
+
*/
|
|
23
|
+
setupClientNotificationHandler(serviceInstance: TService): void;
|
|
24
|
+
/**
|
|
25
|
+
* Start fallback poller for missed notifications (once per client).
|
|
26
|
+
*/
|
|
27
|
+
startClientFallbackPoller(checkForMissedMessages: () => Promise<void>): void;
|
|
28
|
+
/**
|
|
29
|
+
* Check for missed messages (fallback polling).
|
|
30
|
+
* Handles errors gracefully to avoid noise during shutdown.
|
|
31
|
+
*/
|
|
32
|
+
checkForMissedMessages(fetchMessages: (instance: TService, consumer: NotificationConsumer) => Promise<StreamMessage[]>): Promise<void>;
|
|
33
|
+
/**
|
|
34
|
+
* Handle incoming PostgreSQL notification.
|
|
35
|
+
*/
|
|
36
|
+
private handleNotification;
|
|
37
|
+
/**
|
|
38
|
+
* Set up notification consumer for a stream/group.
|
|
39
|
+
*/
|
|
40
|
+
setupNotificationConsumer(serviceInstance: TService, streamName: string, groupName: string, consumerName: string, callback: (messages: StreamMessage[]) => void): Promise<void>;
|
|
41
|
+
/**
|
|
42
|
+
* Stop notification consumer for a stream/group.
|
|
43
|
+
*/
|
|
44
|
+
stopNotificationConsumer(serviceInstance: TService, streamName: string, groupName: string): Promise<void>;
|
|
45
|
+
/**
|
|
46
|
+
* Clean up notification consumers for this instance.
|
|
47
|
+
* Stops fallback poller FIRST to prevent race conditions during shutdown.
|
|
48
|
+
*/
|
|
49
|
+
cleanup(serviceInstance: TService): Promise<void>;
|
|
50
|
+
/**
|
|
51
|
+
* Get consumer key from stream and group names.
|
|
52
|
+
*/
|
|
53
|
+
private getConsumerKey;
|
|
54
|
+
}
|
|
55
|
+
/**
|
|
56
|
+
* Get configuration values for notification settings.
|
|
57
|
+
*/
|
|
58
|
+
export declare function getFallbackInterval(config: any): number;
|
|
59
|
+
export declare function getNotificationTimeout(config: any): number;
|