@hotmeshio/hotmesh 0.6.0 → 0.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +179 -142
- package/build/index.d.ts +3 -1
- package/build/index.js +5 -1
- package/build/modules/enums.d.ts +18 -0
- package/build/modules/enums.js +27 -1
- package/build/modules/utils.d.ts +27 -0
- package/build/modules/utils.js +79 -1
- package/build/package.json +24 -10
- package/build/services/connector/factory.d.ts +1 -1
- package/build/services/connector/factory.js +15 -1
- package/build/services/connector/providers/ioredis.d.ts +9 -0
- package/build/services/connector/providers/ioredis.js +26 -0
- package/build/services/connector/providers/postgres.js +3 -0
- package/build/services/connector/providers/redis.d.ts +9 -0
- package/build/services/connector/providers/redis.js +38 -0
- package/build/services/hotmesh/index.d.ts +66 -15
- package/build/services/hotmesh/index.js +84 -15
- package/build/services/memflow/index.d.ts +100 -14
- package/build/services/memflow/index.js +100 -14
- package/build/services/memflow/worker.d.ts +97 -0
- package/build/services/memflow/worker.js +217 -0
- package/build/services/memflow/workflow/proxyActivities.d.ts +74 -3
- package/build/services/memflow/workflow/proxyActivities.js +81 -4
- package/build/services/router/consumption/index.d.ts +2 -1
- package/build/services/router/consumption/index.js +38 -2
- package/build/services/router/error-handling/index.d.ts +3 -3
- package/build/services/router/error-handling/index.js +48 -13
- package/build/services/router/index.d.ts +1 -0
- package/build/services/router/index.js +2 -1
- package/build/services/search/factory.js +8 -0
- package/build/services/search/providers/redis/ioredis.d.ts +23 -0
- package/build/services/search/providers/redis/ioredis.js +189 -0
- package/build/services/search/providers/redis/redis.d.ts +23 -0
- package/build/services/search/providers/redis/redis.js +202 -0
- package/build/services/store/factory.js +9 -1
- package/build/services/store/index.d.ts +3 -2
- package/build/services/store/providers/postgres/kvtypes/hash/basic.js +36 -6
- package/build/services/store/providers/postgres/kvtypes/hash/expire.js +12 -2
- package/build/services/store/providers/postgres/kvtypes/hash/scan.js +30 -10
- package/build/services/store/providers/postgres/kvtypes/list.js +68 -10
- package/build/services/store/providers/postgres/kvtypes/string.js +60 -10
- package/build/services/store/providers/postgres/kvtypes/zset.js +92 -22
- package/build/services/store/providers/postgres/postgres.d.ts +3 -3
- package/build/services/store/providers/redis/_base.d.ts +137 -0
- package/build/services/store/providers/redis/_base.js +980 -0
- package/build/services/store/providers/redis/ioredis.d.ts +20 -0
- package/build/services/store/providers/redis/ioredis.js +190 -0
- package/build/services/store/providers/redis/redis.d.ts +18 -0
- package/build/services/store/providers/redis/redis.js +199 -0
- package/build/services/stream/factory.js +17 -1
- package/build/services/stream/providers/postgres/kvtables.js +76 -23
- package/build/services/stream/providers/postgres/lifecycle.d.ts +19 -0
- package/build/services/stream/providers/postgres/lifecycle.js +54 -0
- package/build/services/stream/providers/postgres/messages.d.ts +56 -0
- package/build/services/stream/providers/postgres/messages.js +253 -0
- package/build/services/stream/providers/postgres/notifications.d.ts +59 -0
- package/build/services/stream/providers/postgres/notifications.js +357 -0
- package/build/services/stream/providers/postgres/postgres.d.ts +110 -11
- package/build/services/stream/providers/postgres/postgres.js +196 -488
- package/build/services/stream/providers/postgres/scout.d.ts +68 -0
- package/build/services/stream/providers/postgres/scout.js +233 -0
- package/build/services/stream/providers/postgres/stats.d.ts +49 -0
- package/build/services/stream/providers/postgres/stats.js +113 -0
- package/build/services/stream/providers/redis/ioredis.d.ts +61 -0
- package/build/services/stream/providers/redis/ioredis.js +272 -0
- package/build/services/stream/providers/redis/redis.d.ts +61 -0
- package/build/services/stream/providers/redis/redis.js +305 -0
- package/build/services/sub/factory.js +8 -0
- package/build/services/sub/providers/postgres/postgres.js +37 -5
- package/build/services/sub/providers/redis/ioredis.d.ts +20 -0
- package/build/services/sub/providers/redis/ioredis.js +161 -0
- package/build/services/sub/providers/redis/redis.d.ts +18 -0
- package/build/services/sub/providers/redis/redis.js +148 -0
- package/build/services/worker/index.d.ts +1 -0
- package/build/services/worker/index.js +2 -0
- package/build/types/hotmesh.d.ts +42 -2
- package/build/types/index.d.ts +4 -3
- package/build/types/index.js +4 -1
- package/build/types/memflow.d.ts +32 -0
- package/build/types/provider.d.ts +17 -1
- package/build/types/redis.d.ts +258 -0
- package/build/types/redis.js +11 -0
- package/build/types/stream.d.ts +92 -1
- package/index.ts +4 -0
- package/package.json +24 -10
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.deleteConsumerGroup = exports.createConsumerGroup = exports.deleteStream = exports.createStream = void 0;
|
|
4
|
+
/**
|
|
5
|
+
* Create a stream (no-op for PostgreSQL - streams are created implicitly).
|
|
6
|
+
*/
|
|
7
|
+
async function createStream(streamName) {
|
|
8
|
+
return true;
|
|
9
|
+
}
|
|
10
|
+
exports.createStream = createStream;
|
|
11
|
+
/**
|
|
12
|
+
* Delete a stream or all streams.
|
|
13
|
+
*/
|
|
14
|
+
async function deleteStream(client, tableName, streamName, logger) {
|
|
15
|
+
try {
|
|
16
|
+
if (streamName === '*') {
|
|
17
|
+
await client.query(`DELETE FROM ${tableName}`);
|
|
18
|
+
}
|
|
19
|
+
else {
|
|
20
|
+
await client.query(`DELETE FROM ${tableName} WHERE stream_name = $1`, [
|
|
21
|
+
streamName,
|
|
22
|
+
]);
|
|
23
|
+
}
|
|
24
|
+
return true;
|
|
25
|
+
}
|
|
26
|
+
catch (error) {
|
|
27
|
+
logger.error(`postgres-stream-delete-error-${streamName}`, {
|
|
28
|
+
error,
|
|
29
|
+
});
|
|
30
|
+
throw error;
|
|
31
|
+
}
|
|
32
|
+
}
|
|
33
|
+
exports.deleteStream = deleteStream;
|
|
34
|
+
/**
|
|
35
|
+
* Create a consumer group (no-op for PostgreSQL - groups are created implicitly).
|
|
36
|
+
*/
|
|
37
|
+
async function createConsumerGroup(streamName, groupName) {
|
|
38
|
+
return true;
|
|
39
|
+
}
|
|
40
|
+
exports.createConsumerGroup = createConsumerGroup;
|
|
41
|
+
/**
|
|
42
|
+
* Delete a consumer group (removes all messages for that group).
|
|
43
|
+
*/
|
|
44
|
+
async function deleteConsumerGroup(client, tableName, streamName, groupName, logger) {
|
|
45
|
+
try {
|
|
46
|
+
await client.query(`DELETE FROM ${tableName} WHERE stream_name = $1 AND group_name = $2`, [streamName, groupName]);
|
|
47
|
+
return true;
|
|
48
|
+
}
|
|
49
|
+
catch (error) {
|
|
50
|
+
logger.error(`postgres-stream-delete-group-error-${streamName}.${groupName}`, { error });
|
|
51
|
+
throw error;
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
exports.deleteConsumerGroup = deleteConsumerGroup;
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
import { ILogger } from '../../../logger';
|
|
2
|
+
import { PostgresClientType } from '../../../../types/postgres';
|
|
3
|
+
import { PublishMessageConfig, StreamMessage } from '../../../../types/stream';
|
|
4
|
+
import { ProviderClient, ProviderTransaction } from '../../../../types/provider';
|
|
5
|
+
/**
|
|
6
|
+
* Publish messages to a stream. Can be used within a transaction.
|
|
7
|
+
*
|
|
8
|
+
* When a transaction is provided, the SQL is added to the transaction
|
|
9
|
+
* and executed atomically with other operations.
|
|
10
|
+
*/
|
|
11
|
+
export declare function publishMessages(client: PostgresClientType & ProviderClient, tableName: string, streamName: string, messages: string[], options: PublishMessageConfig | undefined, logger: ILogger): Promise<string[] | ProviderTransaction>;
|
|
12
|
+
/**
|
|
13
|
+
* Build SQL for publishing messages with retry policies and visibility delays.
|
|
14
|
+
* Optimizes the INSERT statement based on whether retry config is present.
|
|
15
|
+
*/
|
|
16
|
+
export declare function buildPublishSQL(tableName: string, streamName: string, messages: string[], options?: PublishMessageConfig): {
|
|
17
|
+
sql: string;
|
|
18
|
+
params: any[];
|
|
19
|
+
};
|
|
20
|
+
/**
|
|
21
|
+
* Fetch messages from the stream with optional exponential backoff.
|
|
22
|
+
* Uses SKIP LOCKED for high-concurrency consumption.
|
|
23
|
+
*/
|
|
24
|
+
export declare function fetchMessages(client: PostgresClientType & ProviderClient, tableName: string, streamName: string, groupName: string, consumerName: string, options: {
|
|
25
|
+
batchSize?: number;
|
|
26
|
+
blockTimeout?: number;
|
|
27
|
+
autoAck?: boolean;
|
|
28
|
+
reservationTimeout?: number;
|
|
29
|
+
enableBackoff?: boolean;
|
|
30
|
+
initialBackoff?: number;
|
|
31
|
+
maxBackoff?: number;
|
|
32
|
+
maxRetries?: number;
|
|
33
|
+
}, logger: ILogger): Promise<StreamMessage[]>;
|
|
34
|
+
/**
|
|
35
|
+
* Acknowledge messages (no-op for PostgreSQL - uses soft delete pattern).
|
|
36
|
+
*/
|
|
37
|
+
export declare function acknowledgeMessages(messageIds: string[]): Promise<number>;
|
|
38
|
+
/**
|
|
39
|
+
* Delete messages by soft-deleting them (setting expired_at).
|
|
40
|
+
*/
|
|
41
|
+
export declare function deleteMessages(client: PostgresClientType & ProviderClient, tableName: string, streamName: string, groupName: string, messageIds: string[], logger: ILogger): Promise<number>;
|
|
42
|
+
/**
|
|
43
|
+
* Acknowledge and delete messages in one operation.
|
|
44
|
+
*/
|
|
45
|
+
export declare function ackAndDelete(client: PostgresClientType & ProviderClient, tableName: string, streamName: string, groupName: string, messageIds: string[], logger: ILogger): Promise<number>;
|
|
46
|
+
/**
|
|
47
|
+
* Retry messages (placeholder for future implementation).
|
|
48
|
+
*/
|
|
49
|
+
export declare function retryMessages(streamName: string, groupName: string, options?: {
|
|
50
|
+
consumerName?: string;
|
|
51
|
+
minIdleTime?: number;
|
|
52
|
+
messageIds?: string[];
|
|
53
|
+
delay?: number;
|
|
54
|
+
maxRetries?: number;
|
|
55
|
+
limit?: number;
|
|
56
|
+
}): Promise<StreamMessage[]>;
|
|
@@ -0,0 +1,253 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.retryMessages = exports.ackAndDelete = exports.deleteMessages = exports.acknowledgeMessages = exports.fetchMessages = exports.buildPublishSQL = exports.publishMessages = void 0;
|
|
4
|
+
const utils_1 = require("../../../../modules/utils");
|
|
5
|
+
/**
|
|
6
|
+
* Publish messages to a stream. Can be used within a transaction.
|
|
7
|
+
*
|
|
8
|
+
* When a transaction is provided, the SQL is added to the transaction
|
|
9
|
+
* and executed atomically with other operations.
|
|
10
|
+
*/
|
|
11
|
+
async function publishMessages(client, tableName, streamName, messages, options, logger) {
|
|
12
|
+
const { sql, params } = buildPublishSQL(tableName, streamName, messages, options);
|
|
13
|
+
if (options?.transaction &&
|
|
14
|
+
typeof options.transaction.addCommand === 'function') {
|
|
15
|
+
// Add to transaction and return the transaction object
|
|
16
|
+
options.transaction.addCommand(sql, params, 'array', (rows) => rows.map((row) => row.id.toString()));
|
|
17
|
+
return options.transaction;
|
|
18
|
+
}
|
|
19
|
+
else {
|
|
20
|
+
try {
|
|
21
|
+
const ids = [];
|
|
22
|
+
const res = await client.query(sql, params);
|
|
23
|
+
for (const row of res.rows) {
|
|
24
|
+
ids.push(row.id.toString());
|
|
25
|
+
}
|
|
26
|
+
return ids;
|
|
27
|
+
}
|
|
28
|
+
catch (error) {
|
|
29
|
+
logger.error(`postgres-stream-publish-error-${streamName}`, {
|
|
30
|
+
error,
|
|
31
|
+
});
|
|
32
|
+
throw error;
|
|
33
|
+
}
|
|
34
|
+
}
|
|
35
|
+
}
|
|
36
|
+
exports.publishMessages = publishMessages;
|
|
37
|
+
/**
|
|
38
|
+
* Build SQL for publishing messages with retry policies and visibility delays.
|
|
39
|
+
* Optimizes the INSERT statement based on whether retry config is present.
|
|
40
|
+
*/
|
|
41
|
+
function buildPublishSQL(tableName, streamName, messages, options) {
|
|
42
|
+
const groupName = streamName.endsWith(':') ? 'ENGINE' : 'WORKER';
|
|
43
|
+
// Parse messages to extract retry config and visibility options
|
|
44
|
+
const parsedMessages = messages.map(msg => {
|
|
45
|
+
const data = JSON.parse(msg);
|
|
46
|
+
const retryConfig = data._streamRetryConfig;
|
|
47
|
+
const visibilityDelayMs = data._visibilityDelayMs;
|
|
48
|
+
const retryAttempt = data._retryAttempt;
|
|
49
|
+
// Remove internal fields from message payload
|
|
50
|
+
delete data._streamRetryConfig;
|
|
51
|
+
delete data._visibilityDelayMs;
|
|
52
|
+
delete data._retryAttempt;
|
|
53
|
+
// Determine if this message has explicit retry config
|
|
54
|
+
const hasExplicitConfig = (retryConfig && 'max_retry_attempts' in retryConfig) || options?.retryPolicy;
|
|
55
|
+
let normalizedPolicy = null;
|
|
56
|
+
if (retryConfig && 'max_retry_attempts' in retryConfig) {
|
|
57
|
+
normalizedPolicy = retryConfig;
|
|
58
|
+
}
|
|
59
|
+
else if (options?.retryPolicy) {
|
|
60
|
+
normalizedPolicy = (0, utils_1.normalizeRetryPolicy)(options.retryPolicy, {
|
|
61
|
+
maximumAttempts: 3,
|
|
62
|
+
backoffCoefficient: 10,
|
|
63
|
+
maximumInterval: 120,
|
|
64
|
+
});
|
|
65
|
+
}
|
|
66
|
+
return {
|
|
67
|
+
message: JSON.stringify(data),
|
|
68
|
+
hasExplicitConfig,
|
|
69
|
+
retryPolicy: normalizedPolicy,
|
|
70
|
+
visibilityDelayMs: visibilityDelayMs || 0,
|
|
71
|
+
retryAttempt: retryAttempt || 0,
|
|
72
|
+
};
|
|
73
|
+
});
|
|
74
|
+
const params = [streamName, groupName];
|
|
75
|
+
let valuesClauses = [];
|
|
76
|
+
let insertColumns;
|
|
77
|
+
// Check if ALL messages have explicit config or ALL don't
|
|
78
|
+
const allHaveConfig = parsedMessages.every(pm => pm.hasExplicitConfig);
|
|
79
|
+
const noneHaveConfig = parsedMessages.every(pm => !pm.hasExplicitConfig);
|
|
80
|
+
const hasVisibilityDelays = parsedMessages.some(pm => pm.visibilityDelayMs > 0);
|
|
81
|
+
if (noneHaveConfig && !hasVisibilityDelays) {
|
|
82
|
+
// Omit retry columns entirely - let DB defaults apply
|
|
83
|
+
insertColumns = '(stream_name, group_name, message)';
|
|
84
|
+
parsedMessages.forEach((pm, idx) => {
|
|
85
|
+
const base = idx * 1;
|
|
86
|
+
valuesClauses.push(`($1, $2, $${base + 3})`);
|
|
87
|
+
params.push(pm.message);
|
|
88
|
+
});
|
|
89
|
+
}
|
|
90
|
+
else if (noneHaveConfig && hasVisibilityDelays) {
|
|
91
|
+
// Only visibility delays, no retry config
|
|
92
|
+
insertColumns = '(stream_name, group_name, message, visible_at, retry_attempt)';
|
|
93
|
+
parsedMessages.forEach((pm, idx) => {
|
|
94
|
+
const base = idx * 2;
|
|
95
|
+
if (pm.visibilityDelayMs > 0) {
|
|
96
|
+
const visibleAtSQL = `NOW() + INTERVAL '${pm.visibilityDelayMs} milliseconds'`;
|
|
97
|
+
valuesClauses.push(`($1, $2, $${base + 3}, ${visibleAtSQL}, $${base + 4})`);
|
|
98
|
+
params.push(pm.message, pm.retryAttempt);
|
|
99
|
+
}
|
|
100
|
+
else {
|
|
101
|
+
valuesClauses.push(`($1, $2, $${base + 3}, DEFAULT, $${base + 4})`);
|
|
102
|
+
params.push(pm.message, pm.retryAttempt);
|
|
103
|
+
}
|
|
104
|
+
});
|
|
105
|
+
}
|
|
106
|
+
else {
|
|
107
|
+
// Include retry columns and optionally visibility
|
|
108
|
+
insertColumns = '(stream_name, group_name, message, max_retry_attempts, backoff_coefficient, maximum_interval_seconds, visible_at, retry_attempt)';
|
|
109
|
+
parsedMessages.forEach((pm, idx) => {
|
|
110
|
+
const visibleAtClause = pm.visibilityDelayMs > 0
|
|
111
|
+
? `NOW() + INTERVAL '${pm.visibilityDelayMs} milliseconds'`
|
|
112
|
+
: 'DEFAULT';
|
|
113
|
+
if (pm.hasExplicitConfig) {
|
|
114
|
+
const paramOffset = params.length + 1; // Current param count + 1 for next param
|
|
115
|
+
valuesClauses.push(`($1, $2, $${paramOffset}, $${paramOffset + 1}, $${paramOffset + 2}, $${paramOffset + 3}, ${visibleAtClause}, $${paramOffset + 4})`);
|
|
116
|
+
params.push(pm.message, pm.retryPolicy.max_retry_attempts, pm.retryPolicy.backoff_coefficient, pm.retryPolicy.maximum_interval_seconds, pm.retryAttempt);
|
|
117
|
+
}
|
|
118
|
+
else {
|
|
119
|
+
// This message doesn't have config but others do - use DEFAULT keyword
|
|
120
|
+
const paramOffset = params.length + 1;
|
|
121
|
+
valuesClauses.push(`($1, $2, $${paramOffset}, DEFAULT, DEFAULT, DEFAULT, ${visibleAtClause}, $${paramOffset + 1})`);
|
|
122
|
+
params.push(pm.message, pm.retryAttempt);
|
|
123
|
+
}
|
|
124
|
+
});
|
|
125
|
+
}
|
|
126
|
+
return {
|
|
127
|
+
sql: `INSERT INTO ${tableName} ${insertColumns}
|
|
128
|
+
VALUES ${valuesClauses.join(', ')}
|
|
129
|
+
RETURNING id`,
|
|
130
|
+
params,
|
|
131
|
+
};
|
|
132
|
+
}
|
|
133
|
+
exports.buildPublishSQL = buildPublishSQL;
|
|
134
|
+
/**
|
|
135
|
+
* Fetch messages from the stream with optional exponential backoff.
|
|
136
|
+
* Uses SKIP LOCKED for high-concurrency consumption.
|
|
137
|
+
*/
|
|
138
|
+
async function fetchMessages(client, tableName, streamName, groupName, consumerName, options = {}, logger) {
|
|
139
|
+
const enableBackoff = options?.enableBackoff ?? false;
|
|
140
|
+
const initialBackoff = options?.initialBackoff ?? 100; // Default initial backoff: 100ms
|
|
141
|
+
const maxBackoff = options?.maxBackoff ?? 3000; // Default max backoff: 3 seconds
|
|
142
|
+
const maxRetries = options?.maxRetries ?? 3; // Set a finite default, e.g., 3 retries
|
|
143
|
+
let backoff = initialBackoff;
|
|
144
|
+
let retries = 0;
|
|
145
|
+
try {
|
|
146
|
+
while (retries < maxRetries) {
|
|
147
|
+
retries++;
|
|
148
|
+
const batchSize = options?.batchSize || 1;
|
|
149
|
+
const reservationTimeout = options?.reservationTimeout || 30;
|
|
150
|
+
// Simplified query for better performance - especially for notification-triggered fetches
|
|
151
|
+
const res = await client.query(`UPDATE ${tableName}
|
|
152
|
+
SET reserved_at = NOW(), reserved_by = $4
|
|
153
|
+
WHERE id IN (
|
|
154
|
+
SELECT id FROM ${tableName}
|
|
155
|
+
WHERE stream_name = $1
|
|
156
|
+
AND group_name = $2
|
|
157
|
+
AND (reserved_at IS NULL OR reserved_at < NOW() - INTERVAL '${reservationTimeout} seconds')
|
|
158
|
+
AND expired_at IS NULL
|
|
159
|
+
AND visible_at <= NOW()
|
|
160
|
+
ORDER BY id
|
|
161
|
+
LIMIT $3
|
|
162
|
+
FOR UPDATE SKIP LOCKED
|
|
163
|
+
)
|
|
164
|
+
RETURNING id, message, max_retry_attempts, backoff_coefficient, maximum_interval_seconds, retry_attempt`, [streamName, groupName, batchSize, consumerName]);
|
|
165
|
+
const messages = res.rows.map((row) => {
|
|
166
|
+
const data = (0, utils_1.parseStreamMessage)(row.message);
|
|
167
|
+
// Inject retry policy only if not using default values
|
|
168
|
+
// Default values indicate old retry mechanism should be used (policies.retry)
|
|
169
|
+
const hasDefaultRetryPolicy = (row.max_retry_attempts === 3 || row.max_retry_attempts === 5) &&
|
|
170
|
+
parseFloat(row.backoff_coefficient) === 10 &&
|
|
171
|
+
row.maximum_interval_seconds === 120;
|
|
172
|
+
if (row.max_retry_attempts !== null && !hasDefaultRetryPolicy) {
|
|
173
|
+
data._streamRetryConfig = {
|
|
174
|
+
max_retry_attempts: row.max_retry_attempts,
|
|
175
|
+
backoff_coefficient: parseFloat(row.backoff_coefficient),
|
|
176
|
+
maximum_interval_seconds: row.maximum_interval_seconds,
|
|
177
|
+
};
|
|
178
|
+
}
|
|
179
|
+
// Inject retry_attempt from database
|
|
180
|
+
if (row.retry_attempt !== undefined && row.retry_attempt !== null) {
|
|
181
|
+
data._retryAttempt = row.retry_attempt;
|
|
182
|
+
}
|
|
183
|
+
return {
|
|
184
|
+
id: row.id.toString(),
|
|
185
|
+
data,
|
|
186
|
+
retryPolicy: (row.max_retry_attempts !== null && !hasDefaultRetryPolicy) ? {
|
|
187
|
+
maximumAttempts: row.max_retry_attempts,
|
|
188
|
+
backoffCoefficient: parseFloat(row.backoff_coefficient),
|
|
189
|
+
maximumInterval: row.maximum_interval_seconds,
|
|
190
|
+
} : undefined,
|
|
191
|
+
};
|
|
192
|
+
});
|
|
193
|
+
if (messages.length > 0 || !enableBackoff) {
|
|
194
|
+
return messages;
|
|
195
|
+
}
|
|
196
|
+
// Apply backoff if enabled and no messages found
|
|
197
|
+
await (0, utils_1.sleepFor)(backoff);
|
|
198
|
+
backoff = Math.min(backoff * 2, maxBackoff); // Exponential backoff
|
|
199
|
+
}
|
|
200
|
+
// Return empty array if maxRetries is reached and still no messages
|
|
201
|
+
return [];
|
|
202
|
+
}
|
|
203
|
+
catch (error) {
|
|
204
|
+
logger.error(`postgres-stream-consumer-error-${streamName}`, {
|
|
205
|
+
error,
|
|
206
|
+
});
|
|
207
|
+
throw error;
|
|
208
|
+
}
|
|
209
|
+
}
|
|
210
|
+
exports.fetchMessages = fetchMessages;
|
|
211
|
+
/**
|
|
212
|
+
* Acknowledge messages (no-op for PostgreSQL - uses soft delete pattern).
|
|
213
|
+
*/
|
|
214
|
+
async function acknowledgeMessages(messageIds) {
|
|
215
|
+
// No-op for this implementation
|
|
216
|
+
return messageIds.length;
|
|
217
|
+
}
|
|
218
|
+
exports.acknowledgeMessages = acknowledgeMessages;
|
|
219
|
+
/**
|
|
220
|
+
* Delete messages by soft-deleting them (setting expired_at).
|
|
221
|
+
*/
|
|
222
|
+
async function deleteMessages(client, tableName, streamName, groupName, messageIds, logger) {
|
|
223
|
+
try {
|
|
224
|
+
const ids = messageIds.map((id) => parseInt(id));
|
|
225
|
+
// Perform a soft delete by setting `expired_at` to the current timestamp
|
|
226
|
+
await client.query(`UPDATE ${tableName}
|
|
227
|
+
SET expired_at = NOW()
|
|
228
|
+
WHERE stream_name = $1 AND id = ANY($2::bigint[]) AND group_name = $3`, [streamName, ids, groupName]);
|
|
229
|
+
return messageIds.length;
|
|
230
|
+
}
|
|
231
|
+
catch (error) {
|
|
232
|
+
logger.error(`postgres-stream-delete-error-${streamName}`, {
|
|
233
|
+
error,
|
|
234
|
+
});
|
|
235
|
+
throw error;
|
|
236
|
+
}
|
|
237
|
+
}
|
|
238
|
+
exports.deleteMessages = deleteMessages;
|
|
239
|
+
/**
|
|
240
|
+
* Acknowledge and delete messages in one operation.
|
|
241
|
+
*/
|
|
242
|
+
async function ackAndDelete(client, tableName, streamName, groupName, messageIds, logger) {
|
|
243
|
+
return await deleteMessages(client, tableName, streamName, groupName, messageIds, logger);
|
|
244
|
+
}
|
|
245
|
+
exports.ackAndDelete = ackAndDelete;
|
|
246
|
+
/**
|
|
247
|
+
* Retry messages (placeholder for future implementation).
|
|
248
|
+
*/
|
|
249
|
+
async function retryMessages(streamName, groupName, options) {
|
|
250
|
+
// Implement retry logic if needed
|
|
251
|
+
return [];
|
|
252
|
+
}
|
|
253
|
+
exports.retryMessages = retryMessages;
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
import { ILogger } from '../../../logger';
|
|
2
|
+
import { PostgresClientType } from '../../../../types/postgres';
|
|
3
|
+
import { NotificationConsumer, StreamMessage } from '../../../../types/stream';
|
|
4
|
+
import { ProviderClient } from '../../../../types/provider';
|
|
5
|
+
/**
|
|
6
|
+
* Manages PostgreSQL LISTEN/NOTIFY for stream message notifications.
|
|
7
|
+
* Handles static state shared across all service instances using the same client.
|
|
8
|
+
*/
|
|
9
|
+
export declare class NotificationManager<TService> {
|
|
10
|
+
private client;
|
|
11
|
+
private getTableName;
|
|
12
|
+
private getFallbackInterval;
|
|
13
|
+
private logger;
|
|
14
|
+
private static clientNotificationConsumers;
|
|
15
|
+
private static clientNotificationHandlers;
|
|
16
|
+
private static clientFallbackPollers;
|
|
17
|
+
private instanceNotificationConsumers;
|
|
18
|
+
private notificationHandlerBound;
|
|
19
|
+
constructor(client: PostgresClientType & ProviderClient, getTableName: () => string, getFallbackInterval: () => number, logger: ILogger);
|
|
20
|
+
/**
|
|
21
|
+
* Set up notification handler for this client (once per client).
|
|
22
|
+
*/
|
|
23
|
+
setupClientNotificationHandler(serviceInstance: TService): void;
|
|
24
|
+
/**
|
|
25
|
+
* Start fallback poller for missed notifications (once per client).
|
|
26
|
+
*/
|
|
27
|
+
startClientFallbackPoller(checkForMissedMessages: () => Promise<void>): void;
|
|
28
|
+
/**
|
|
29
|
+
* Check for missed messages (fallback polling).
|
|
30
|
+
* Handles errors gracefully to avoid noise during shutdown.
|
|
31
|
+
*/
|
|
32
|
+
checkForMissedMessages(fetchMessages: (instance: TService, consumer: NotificationConsumer) => Promise<StreamMessage[]>): Promise<void>;
|
|
33
|
+
/**
|
|
34
|
+
* Handle incoming PostgreSQL notification.
|
|
35
|
+
*/
|
|
36
|
+
private handleNotification;
|
|
37
|
+
/**
|
|
38
|
+
* Set up notification consumer for a stream/group.
|
|
39
|
+
*/
|
|
40
|
+
setupNotificationConsumer(serviceInstance: TService, streamName: string, groupName: string, consumerName: string, callback: (messages: StreamMessage[]) => void): Promise<void>;
|
|
41
|
+
/**
|
|
42
|
+
* Stop notification consumer for a stream/group.
|
|
43
|
+
*/
|
|
44
|
+
stopNotificationConsumer(serviceInstance: TService, streamName: string, groupName: string): Promise<void>;
|
|
45
|
+
/**
|
|
46
|
+
* Clean up notification consumers for this instance.
|
|
47
|
+
* Stops fallback poller FIRST to prevent race conditions during shutdown.
|
|
48
|
+
*/
|
|
49
|
+
cleanup(serviceInstance: TService): Promise<void>;
|
|
50
|
+
/**
|
|
51
|
+
* Get consumer key from stream and group names.
|
|
52
|
+
*/
|
|
53
|
+
private getConsumerKey;
|
|
54
|
+
}
|
|
55
|
+
/**
|
|
56
|
+
* Get configuration values for notification settings.
|
|
57
|
+
*/
|
|
58
|
+
export declare function getFallbackInterval(config: any): number;
|
|
59
|
+
export declare function getNotificationTimeout(config: any): number;
|