@hotmeshio/hotmesh 0.5.8 → 0.6.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/build/index.d.ts +3 -1
- package/build/index.js +5 -1
- package/build/modules/enums.d.ts +17 -0
- package/build/modules/enums.js +18 -1
- package/build/modules/utils.js +27 -0
- package/build/package.json +15 -3
- package/build/services/connector/factory.d.ts +1 -1
- package/build/services/connector/factory.js +15 -1
- package/build/services/connector/providers/ioredis.d.ts +9 -0
- package/build/services/connector/providers/ioredis.js +26 -0
- package/build/services/connector/providers/redis.d.ts +9 -0
- package/build/services/connector/providers/redis.js +38 -0
- package/build/services/engine/index.js +12 -2
- package/build/services/quorum/index.js +18 -1
- package/build/services/search/factory.js +8 -0
- package/build/services/search/providers/redis/ioredis.d.ts +23 -0
- package/build/services/search/providers/redis/ioredis.js +189 -0
- package/build/services/search/providers/redis/redis.d.ts +23 -0
- package/build/services/search/providers/redis/redis.js +202 -0
- package/build/services/store/factory.js +9 -1
- package/build/services/store/providers/postgres/postgres.js +3 -5
- package/build/services/store/providers/postgres/time-notify.d.ts +7 -0
- package/build/services/store/providers/postgres/time-notify.js +163 -0
- package/build/services/store/providers/redis/_base.d.ts +137 -0
- package/build/services/store/providers/redis/_base.js +980 -0
- package/build/services/store/providers/redis/ioredis.d.ts +20 -0
- package/build/services/store/providers/redis/ioredis.js +180 -0
- package/build/services/store/providers/redis/redis.d.ts +18 -0
- package/build/services/store/providers/redis/redis.js +199 -0
- package/build/services/stream/factory.js +17 -1
- package/build/services/stream/providers/postgres/kvtables.js +81 -14
- package/build/services/stream/providers/redis/ioredis.d.ts +61 -0
- package/build/services/stream/providers/redis/ioredis.js +272 -0
- package/build/services/stream/providers/redis/redis.d.ts +61 -0
- package/build/services/stream/providers/redis/redis.js +305 -0
- package/build/services/sub/factory.js +8 -0
- package/build/services/sub/providers/postgres/postgres.js +28 -1
- package/build/services/sub/providers/redis/ioredis.d.ts +20 -0
- package/build/services/sub/providers/redis/ioredis.js +150 -0
- package/build/services/sub/providers/redis/redis.d.ts +18 -0
- package/build/services/sub/providers/redis/redis.js +137 -0
- package/build/types/index.d.ts +1 -0
- package/build/types/index.js +4 -1
- package/build/types/provider.d.ts +1 -1
- package/build/types/quorum.d.ts +2 -0
- package/build/types/redis.d.ts +258 -0
- package/build/types/redis.js +11 -0
- package/index.ts +4 -0
- package/package.json +15 -3
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
import { SearchService } from '../../index';
|
|
2
|
+
import { ILogger } from '../../../logger';
|
|
3
|
+
import { RedisRedisClientType } from '../../../../types/redis';
|
|
4
|
+
declare class RedisSearchService extends SearchService<RedisRedisClientType> {
|
|
5
|
+
constructor(searchClient: RedisRedisClientType, storeClient?: RedisRedisClientType);
|
|
6
|
+
init(namespace: string, appId: string, logger: ILogger): Promise<void>;
|
|
7
|
+
createSearchIndex(indexName: string, prefixes: string[], schema: string[]): Promise<void>;
|
|
8
|
+
listSearchIndexes(): Promise<string[]>;
|
|
9
|
+
updateContext(key: string, fields: Record<string, string>): Promise<any>;
|
|
10
|
+
setFields(key: string, fields: Record<string, string>): Promise<number>;
|
|
11
|
+
getField(key: string, field: string): Promise<string>;
|
|
12
|
+
getFields(key: string, fields: string[]): Promise<string[]>;
|
|
13
|
+
getAllFields(key: string): Promise<Record<string, string>>;
|
|
14
|
+
deleteFields(key: string, fields: string[]): Promise<number>;
|
|
15
|
+
incrementFieldByFloat(key: string, field: string, increment: number): Promise<number>;
|
|
16
|
+
sendQuery(...query: any[]): Promise<any>;
|
|
17
|
+
sendIndexedQuery(index: string, query: string[]): Promise<string[]>;
|
|
18
|
+
findEntities(): Promise<any[]>;
|
|
19
|
+
findEntityById(): Promise<any>;
|
|
20
|
+
findEntitiesByCondition(): Promise<any[]>;
|
|
21
|
+
createEntityIndex(): Promise<void>;
|
|
22
|
+
}
|
|
23
|
+
export { RedisSearchService };
|
|
@@ -0,0 +1,202 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.RedisSearchService = void 0;
|
|
4
|
+
const index_1 = require("../../index");
|
|
5
|
+
class RedisSearchService extends index_1.SearchService {
|
|
6
|
+
constructor(searchClient, storeClient) {
|
|
7
|
+
super(searchClient, storeClient);
|
|
8
|
+
}
|
|
9
|
+
async init(namespace, appId, logger) {
|
|
10
|
+
this.namespace = namespace;
|
|
11
|
+
this.appId = appId;
|
|
12
|
+
this.logger = logger;
|
|
13
|
+
}
|
|
14
|
+
async createSearchIndex(indexName, prefixes, schema) {
|
|
15
|
+
try {
|
|
16
|
+
await this.searchClient.sendCommand([
|
|
17
|
+
'FT.CREATE',
|
|
18
|
+
indexName,
|
|
19
|
+
'ON',
|
|
20
|
+
'HASH',
|
|
21
|
+
'PREFIX',
|
|
22
|
+
prefixes.length.toString(),
|
|
23
|
+
...prefixes,
|
|
24
|
+
'SCHEMA',
|
|
25
|
+
...schema,
|
|
26
|
+
]);
|
|
27
|
+
}
|
|
28
|
+
catch (error) {
|
|
29
|
+
this.logger.info('Error creating search index', { error });
|
|
30
|
+
throw error;
|
|
31
|
+
}
|
|
32
|
+
}
|
|
33
|
+
async listSearchIndexes() {
|
|
34
|
+
try {
|
|
35
|
+
const indexes = await this.searchClient.sendCommand(['FT._LIST']);
|
|
36
|
+
return indexes;
|
|
37
|
+
}
|
|
38
|
+
catch (error) {
|
|
39
|
+
this.logger.info('Error listing search indexes', { error });
|
|
40
|
+
throw error;
|
|
41
|
+
}
|
|
42
|
+
}
|
|
43
|
+
async updateContext(key, fields) {
|
|
44
|
+
// Find replay ID if present (field with hyphen, not the @udata field)
|
|
45
|
+
const replayId = Object.keys(fields).find((k) => k.includes('-') && !k.startsWith('@'));
|
|
46
|
+
// Route based on @udata operation
|
|
47
|
+
if ('@udata:set' in fields) {
|
|
48
|
+
const udata = JSON.parse(fields['@udata:set']);
|
|
49
|
+
const fieldsToSet = Array.isArray(udata)
|
|
50
|
+
? Object.fromEntries(Array.from({ length: udata.length / 2 }, (_, i) => [
|
|
51
|
+
udata[i * 2],
|
|
52
|
+
udata[i * 2 + 1],
|
|
53
|
+
]))
|
|
54
|
+
: udata;
|
|
55
|
+
const result = await this.setFields(key, fieldsToSet);
|
|
56
|
+
if (replayId)
|
|
57
|
+
await this.searchClient.HSET(key, { [replayId]: String(result) });
|
|
58
|
+
return result;
|
|
59
|
+
}
|
|
60
|
+
if ('@udata:get' in fields) {
|
|
61
|
+
const result = await this.getField(key, fields['@udata:get']);
|
|
62
|
+
if (replayId)
|
|
63
|
+
await this.searchClient.HSET(key, { [replayId]: result });
|
|
64
|
+
return result;
|
|
65
|
+
}
|
|
66
|
+
if ('@udata:mget' in fields) {
|
|
67
|
+
const result = await this.getFields(key, JSON.parse(fields['@udata:mget']));
|
|
68
|
+
if (replayId)
|
|
69
|
+
await this.searchClient.HSET(key, { [replayId]: result.join('|||') });
|
|
70
|
+
return result;
|
|
71
|
+
}
|
|
72
|
+
if ('@udata:delete' in fields) {
|
|
73
|
+
const result = await this.deleteFields(key, JSON.parse(fields['@udata:delete']));
|
|
74
|
+
if (replayId)
|
|
75
|
+
await this.searchClient.HSET(key, { [replayId]: String(result) });
|
|
76
|
+
return result;
|
|
77
|
+
}
|
|
78
|
+
if ('@udata:increment' in fields) {
|
|
79
|
+
const { field, value } = JSON.parse(fields['@udata:increment']);
|
|
80
|
+
const result = await this.incrementFieldByFloat(key, field, value);
|
|
81
|
+
if (replayId)
|
|
82
|
+
await this.searchClient.HSET(key, { [replayId]: String(result) });
|
|
83
|
+
return result;
|
|
84
|
+
}
|
|
85
|
+
if ('@udata:multiply' in fields) {
|
|
86
|
+
const { field, value } = JSON.parse(fields['@udata:multiply']);
|
|
87
|
+
const result = await this.incrementFieldByFloat(key, field, Math.log(value));
|
|
88
|
+
if (replayId)
|
|
89
|
+
await this.searchClient.HSET(key, { [replayId]: String(result) });
|
|
90
|
+
return result;
|
|
91
|
+
}
|
|
92
|
+
if ('@udata:all' in fields) {
|
|
93
|
+
const all = await this.getAllFields(key);
|
|
94
|
+
const result = Object.fromEntries(Object.entries(all).filter(([k]) => k.startsWith('_')));
|
|
95
|
+
if (replayId)
|
|
96
|
+
await this.searchClient.HSET(key, { [replayId]: JSON.stringify(result) });
|
|
97
|
+
return result;
|
|
98
|
+
}
|
|
99
|
+
// Default: call setFields
|
|
100
|
+
return await this.setFields(key, fields);
|
|
101
|
+
}
|
|
102
|
+
async setFields(key, fields) {
|
|
103
|
+
try {
|
|
104
|
+
const result = await this.searchClient.HSET(key, fields);
|
|
105
|
+
return Number(result);
|
|
106
|
+
}
|
|
107
|
+
catch (error) {
|
|
108
|
+
this.logger.error(`Error setting fields for key: ${key}`, { error });
|
|
109
|
+
throw error;
|
|
110
|
+
}
|
|
111
|
+
}
|
|
112
|
+
async getField(key, field) {
|
|
113
|
+
try {
|
|
114
|
+
return await this.searchClient.HGET(key, field);
|
|
115
|
+
}
|
|
116
|
+
catch (error) {
|
|
117
|
+
this.logger.error(`Error getting field ${field} for key: ${key}`, {
|
|
118
|
+
error,
|
|
119
|
+
});
|
|
120
|
+
throw error;
|
|
121
|
+
}
|
|
122
|
+
}
|
|
123
|
+
async getFields(key, fields) {
|
|
124
|
+
try {
|
|
125
|
+
return await this.searchClient.HMGET(key, [...fields]);
|
|
126
|
+
}
|
|
127
|
+
catch (error) {
|
|
128
|
+
this.logger.error(`Error getting fields for key: ${key}`, { error });
|
|
129
|
+
throw error;
|
|
130
|
+
}
|
|
131
|
+
}
|
|
132
|
+
async getAllFields(key) {
|
|
133
|
+
try {
|
|
134
|
+
return await this.searchClient.HGETALL(key);
|
|
135
|
+
}
|
|
136
|
+
catch (error) {
|
|
137
|
+
this.logger.error(`Error getting fields for key: ${key}`, { error });
|
|
138
|
+
throw error;
|
|
139
|
+
}
|
|
140
|
+
}
|
|
141
|
+
async deleteFields(key, fields) {
|
|
142
|
+
try {
|
|
143
|
+
const result = await this.searchClient.HDEL(key, fields);
|
|
144
|
+
return Number(result);
|
|
145
|
+
}
|
|
146
|
+
catch (error) {
|
|
147
|
+
this.logger.error(`Error deleting fields for key: ${key}`, { error });
|
|
148
|
+
throw error;
|
|
149
|
+
}
|
|
150
|
+
}
|
|
151
|
+
async incrementFieldByFloat(key, field, increment) {
|
|
152
|
+
try {
|
|
153
|
+
const result = await this.searchClient.HINCRBYFLOAT(key, field, increment);
|
|
154
|
+
return Number(result);
|
|
155
|
+
}
|
|
156
|
+
catch (error) {
|
|
157
|
+
this.logger.error(`Error incrementing field ${field} for key: ${key}`, {
|
|
158
|
+
error,
|
|
159
|
+
});
|
|
160
|
+
throw error;
|
|
161
|
+
}
|
|
162
|
+
}
|
|
163
|
+
async sendQuery(...query) {
|
|
164
|
+
try {
|
|
165
|
+
return await this.searchClient.sendCommand(query);
|
|
166
|
+
}
|
|
167
|
+
catch (error) {
|
|
168
|
+
this.logger.error('Error executing query', { error });
|
|
169
|
+
throw error;
|
|
170
|
+
}
|
|
171
|
+
}
|
|
172
|
+
async sendIndexedQuery(index, query) {
|
|
173
|
+
try {
|
|
174
|
+
if (query[0]?.startsWith('FT.')) {
|
|
175
|
+
return (await this.searchClient.sendCommand(query));
|
|
176
|
+
}
|
|
177
|
+
return (await this.searchClient.sendCommand([
|
|
178
|
+
'FT.SEARCH',
|
|
179
|
+
index,
|
|
180
|
+
...query,
|
|
181
|
+
]));
|
|
182
|
+
}
|
|
183
|
+
catch (error) {
|
|
184
|
+
this.logger.error('Error executing query', { error });
|
|
185
|
+
throw error;
|
|
186
|
+
}
|
|
187
|
+
}
|
|
188
|
+
// Entity methods - not implemented for Redis (postgres-specific JSONB operations)
|
|
189
|
+
async findEntities() {
|
|
190
|
+
throw new Error('Entity findEntities not supported in Redis - use PostgreSQL');
|
|
191
|
+
}
|
|
192
|
+
async findEntityById() {
|
|
193
|
+
throw new Error('Entity findEntityById not supported in Redis - use PostgreSQL');
|
|
194
|
+
}
|
|
195
|
+
async findEntitiesByCondition() {
|
|
196
|
+
throw new Error('Entity findEntitiesByCondition not supported in Redis - use PostgreSQL');
|
|
197
|
+
}
|
|
198
|
+
async createEntityIndex() {
|
|
199
|
+
throw new Error('Entity createEntityIndex not supported in Redis - use PostgreSQL');
|
|
200
|
+
}
|
|
201
|
+
}
|
|
202
|
+
exports.RedisSearchService = RedisSearchService;
|
|
@@ -2,11 +2,19 @@
|
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
3
|
exports.StoreServiceFactory = void 0;
|
|
4
4
|
const utils_1 = require("../../modules/utils");
|
|
5
|
+
const ioredis_1 = require("./providers/redis/ioredis");
|
|
6
|
+
const redis_1 = require("./providers/redis/redis");
|
|
5
7
|
const postgres_1 = require("./providers/postgres/postgres");
|
|
6
8
|
class StoreServiceFactory {
|
|
7
9
|
static async init(providerClient, namespace, appId, logger) {
|
|
8
10
|
let service;
|
|
9
|
-
if ((0, utils_1.identifyProvider)(providerClient) === '
|
|
11
|
+
if ((0, utils_1.identifyProvider)(providerClient) === 'redis') {
|
|
12
|
+
service = new redis_1.RedisStoreService(providerClient);
|
|
13
|
+
}
|
|
14
|
+
else if ((0, utils_1.identifyProvider)(providerClient) === 'ioredis') {
|
|
15
|
+
service = new ioredis_1.IORedisStoreService(providerClient);
|
|
16
|
+
}
|
|
17
|
+
else if ((0, utils_1.identifyProvider)(providerClient) === 'postgres') {
|
|
10
18
|
service = new postgres_1.PostgresStoreService(providerClient);
|
|
11
19
|
} //etc
|
|
12
20
|
await service.init(namespace, appId, logger);
|
|
@@ -33,6 +33,7 @@ const cache_1 = require("../../cache");
|
|
|
33
33
|
const __1 = require("../..");
|
|
34
34
|
const kvsql_1 = require("./kvsql");
|
|
35
35
|
const kvtables_1 = require("./kvtables");
|
|
36
|
+
const time_notify_1 = require("./time-notify");
|
|
36
37
|
class PostgresStoreService extends __1.StoreService {
|
|
37
38
|
transact() {
|
|
38
39
|
return this.storeClient.transact();
|
|
@@ -1039,11 +1040,8 @@ class PostgresStoreService extends __1.StoreService {
|
|
|
1039
1040
|
const schemaName = this.kvsql().safeName(appId);
|
|
1040
1041
|
const client = this.pgClient;
|
|
1041
1042
|
try {
|
|
1042
|
-
//
|
|
1043
|
-
const
|
|
1044
|
-
const path = await Promise.resolve().then(() => __importStar(require('path')));
|
|
1045
|
-
const sqlTemplate = fs.readFileSync(path.join(__dirname, 'time-notify.sql'), 'utf8');
|
|
1046
|
-
const sql = sqlTemplate.replace(/{schema}/g, schemaName);
|
|
1043
|
+
// Get the SQL with schema placeholder replaced
|
|
1044
|
+
const sql = (0, time_notify_1.getTimeNotifySql)(schemaName);
|
|
1047
1045
|
// Execute the entire SQL as one statement (functions contain $$ blocks with semicolons)
|
|
1048
1046
|
await client.query(sql);
|
|
1049
1047
|
this.logger.info('postgres-time-notifications-deployed', {
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Time-aware notification system for PostgreSQL
|
|
3
|
+
* This system minimizes polling by using LISTEN/NOTIFY for time-based task awakening
|
|
4
|
+
*
|
|
5
|
+
* Exported as a function that returns the SQL with schema placeholder replaced.
|
|
6
|
+
*/
|
|
7
|
+
export declare function getTimeNotifySql(schema: string): string;
|
|
@@ -0,0 +1,163 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.getTimeNotifySql = void 0;
|
|
4
|
+
/**
|
|
5
|
+
* Time-aware notification system for PostgreSQL
|
|
6
|
+
* This system minimizes polling by using LISTEN/NOTIFY for time-based task awakening
|
|
7
|
+
*
|
|
8
|
+
* Exported as a function that returns the SQL with schema placeholder replaced.
|
|
9
|
+
*/
|
|
10
|
+
function getTimeNotifySql(schema) {
|
|
11
|
+
return `
|
|
12
|
+
-- Time-aware notification system for PostgreSQL
|
|
13
|
+
-- This system minimizes polling by using LISTEN/NOTIFY for time-based task awakening
|
|
14
|
+
|
|
15
|
+
-- Function to calculate the next awakening time from the sorted set
|
|
16
|
+
CREATE OR REPLACE FUNCTION ${schema}.get_next_awakening_time(app_key TEXT)
|
|
17
|
+
RETURNS TIMESTAMP WITH TIME ZONE AS $$
|
|
18
|
+
DECLARE
|
|
19
|
+
next_score DOUBLE PRECISION;
|
|
20
|
+
next_time TIMESTAMP WITH TIME ZONE;
|
|
21
|
+
BEGIN
|
|
22
|
+
-- Get the earliest (lowest score) entry from the time range ZSET
|
|
23
|
+
SELECT score INTO next_score
|
|
24
|
+
FROM ${schema}.task_schedules
|
|
25
|
+
WHERE key = app_key
|
|
26
|
+
AND (expiry IS NULL OR expiry > NOW())
|
|
27
|
+
ORDER BY score ASC
|
|
28
|
+
LIMIT 1;
|
|
29
|
+
|
|
30
|
+
IF next_score IS NULL THEN
|
|
31
|
+
RETURN NULL;
|
|
32
|
+
END IF;
|
|
33
|
+
|
|
34
|
+
-- Convert epoch milliseconds to timestamp
|
|
35
|
+
next_time := to_timestamp(next_score / 1000.0);
|
|
36
|
+
|
|
37
|
+
-- Only return if it's in the future
|
|
38
|
+
IF next_time > NOW() THEN
|
|
39
|
+
RETURN next_time;
|
|
40
|
+
END IF;
|
|
41
|
+
|
|
42
|
+
RETURN NULL;
|
|
43
|
+
END;
|
|
44
|
+
$$ LANGUAGE plpgsql;
|
|
45
|
+
|
|
46
|
+
-- Function to schedule a notification for the next awakening time
|
|
47
|
+
CREATE OR REPLACE FUNCTION ${schema}.schedule_time_notification(
|
|
48
|
+
app_id TEXT,
|
|
49
|
+
new_awakening_time TIMESTAMP WITH TIME ZONE DEFAULT NULL
|
|
50
|
+
)
|
|
51
|
+
RETURNS VOID AS $$
|
|
52
|
+
DECLARE
|
|
53
|
+
channel_name TEXT;
|
|
54
|
+
current_next_time TIMESTAMP WITH TIME ZONE;
|
|
55
|
+
app_key TEXT;
|
|
56
|
+
BEGIN
|
|
57
|
+
-- Build the time range key for this app
|
|
58
|
+
app_key := app_id || ':time_range';
|
|
59
|
+
channel_name := 'time_hooks_' || app_id;
|
|
60
|
+
|
|
61
|
+
-- Get the current next awakening time
|
|
62
|
+
current_next_time := ${schema}.get_next_awakening_time(app_key);
|
|
63
|
+
|
|
64
|
+
-- If we have a specific new awakening time, check if it's earlier
|
|
65
|
+
IF new_awakening_time IS NOT NULL THEN
|
|
66
|
+
IF current_next_time IS NULL OR new_awakening_time < current_next_time THEN
|
|
67
|
+
current_next_time := new_awakening_time;
|
|
68
|
+
END IF;
|
|
69
|
+
END IF;
|
|
70
|
+
|
|
71
|
+
-- If there's a next awakening time, schedule immediate notification
|
|
72
|
+
-- The application will handle the timing logic
|
|
73
|
+
IF current_next_time IS NOT NULL THEN
|
|
74
|
+
PERFORM pg_notify(channel_name, json_build_object(
|
|
75
|
+
'type', 'time_schedule_updated',
|
|
76
|
+
'app_id', app_id,
|
|
77
|
+
'next_awakening', extract(epoch from current_next_time) * 1000,
|
|
78
|
+
'updated_at', extract(epoch from NOW()) * 1000
|
|
79
|
+
)::text);
|
|
80
|
+
END IF;
|
|
81
|
+
END;
|
|
82
|
+
$$ LANGUAGE plpgsql;
|
|
83
|
+
|
|
84
|
+
-- Function to notify when time hooks are ready
|
|
85
|
+
CREATE OR REPLACE FUNCTION ${schema}.notify_time_hooks_ready(app_id TEXT)
|
|
86
|
+
RETURNS VOID AS $$
|
|
87
|
+
DECLARE
|
|
88
|
+
channel_name TEXT;
|
|
89
|
+
BEGIN
|
|
90
|
+
channel_name := 'time_hooks_' || app_id;
|
|
91
|
+
|
|
92
|
+
PERFORM pg_notify(channel_name, json_build_object(
|
|
93
|
+
'type', 'time_hooks_ready',
|
|
94
|
+
'app_id', app_id,
|
|
95
|
+
'ready_at', extract(epoch from NOW()) * 1000
|
|
96
|
+
)::text);
|
|
97
|
+
END;
|
|
98
|
+
$$ LANGUAGE plpgsql;
|
|
99
|
+
|
|
100
|
+
-- Trigger function for when time hooks are added/updated
|
|
101
|
+
CREATE OR REPLACE FUNCTION ${schema}.on_time_hook_change()
|
|
102
|
+
RETURNS TRIGGER AS $$
|
|
103
|
+
DECLARE
|
|
104
|
+
app_id_extracted TEXT;
|
|
105
|
+
awakening_time TIMESTAMP WITH TIME ZONE;
|
|
106
|
+
BEGIN
|
|
107
|
+
-- Extract app_id from the key (assumes format: app_id:time_range)
|
|
108
|
+
app_id_extracted := split_part(NEW.key, ':time_range', 1);
|
|
109
|
+
|
|
110
|
+
-- Convert the score (epoch milliseconds) to timestamp
|
|
111
|
+
awakening_time := to_timestamp(NEW.score / 1000.0);
|
|
112
|
+
|
|
113
|
+
-- Schedule notification for this new awakening time
|
|
114
|
+
PERFORM ${schema}.schedule_time_notification(app_id_extracted, awakening_time);
|
|
115
|
+
|
|
116
|
+
RETURN NEW;
|
|
117
|
+
END;
|
|
118
|
+
$$ LANGUAGE plpgsql;
|
|
119
|
+
|
|
120
|
+
-- Trigger function for when time hooks are removed
|
|
121
|
+
CREATE OR REPLACE FUNCTION ${schema}.on_time_hook_remove()
|
|
122
|
+
RETURNS TRIGGER AS $$
|
|
123
|
+
DECLARE
|
|
124
|
+
app_id_extracted TEXT;
|
|
125
|
+
BEGIN
|
|
126
|
+
-- Extract app_id from the key
|
|
127
|
+
app_id_extracted := split_part(OLD.key, ':time_range', 1);
|
|
128
|
+
|
|
129
|
+
-- Recalculate and notify about the schedule update
|
|
130
|
+
PERFORM ${schema}.schedule_time_notification(app_id_extracted);
|
|
131
|
+
|
|
132
|
+
RETURN OLD;
|
|
133
|
+
END;
|
|
134
|
+
$$ LANGUAGE plpgsql;
|
|
135
|
+
|
|
136
|
+
-- Create triggers on the sorted_set table for time hooks
|
|
137
|
+
-- Note: These will be created per app schema
|
|
138
|
+
-- Drop existing triggers first to avoid conflicts
|
|
139
|
+
DROP TRIGGER IF EXISTS trg_time_hook_insert ON ${schema}.task_schedules;
|
|
140
|
+
DROP TRIGGER IF EXISTS trg_time_hook_update ON ${schema}.task_schedules;
|
|
141
|
+
DROP TRIGGER IF EXISTS trg_time_hook_delete ON ${schema}.task_schedules;
|
|
142
|
+
|
|
143
|
+
-- Create new triggers
|
|
144
|
+
CREATE TRIGGER trg_time_hook_insert
|
|
145
|
+
AFTER INSERT ON ${schema}.task_schedules
|
|
146
|
+
FOR EACH ROW
|
|
147
|
+
WHEN (NEW.key LIKE '%:time_range')
|
|
148
|
+
EXECUTE FUNCTION ${schema}.on_time_hook_change();
|
|
149
|
+
|
|
150
|
+
CREATE TRIGGER trg_time_hook_update
|
|
151
|
+
AFTER UPDATE ON ${schema}.task_schedules
|
|
152
|
+
FOR EACH ROW
|
|
153
|
+
WHEN (NEW.key LIKE '%:time_range')
|
|
154
|
+
EXECUTE FUNCTION ${schema}.on_time_hook_change();
|
|
155
|
+
|
|
156
|
+
CREATE TRIGGER trg_time_hook_delete
|
|
157
|
+
AFTER DELETE ON ${schema}.task_schedules
|
|
158
|
+
FOR EACH ROW
|
|
159
|
+
WHEN (OLD.key LIKE '%:time_range')
|
|
160
|
+
EXECUTE FUNCTION ${schema}.on_time_hook_remove();
|
|
161
|
+
`;
|
|
162
|
+
}
|
|
163
|
+
exports.getTimeNotifySql = getTimeNotifySql;
|
|
@@ -0,0 +1,137 @@
|
|
|
1
|
+
import { KeyStoreParams, KeyType } from '../../../../modules/key';
|
|
2
|
+
import { ILogger } from '../../../logger';
|
|
3
|
+
import { ActivityType, Consumes } from '../../../../types/activity';
|
|
4
|
+
import { AppVID } from '../../../../types/app';
|
|
5
|
+
import { HookRule, HookSignal } from '../../../../types/hook';
|
|
6
|
+
import { HotMeshApp, HotMeshApps, HotMeshSettings } from '../../../../types/hotmesh';
|
|
7
|
+
import { ProviderClient, ProviderTransaction } from '../../../../types/provider';
|
|
8
|
+
import { SymbolSets, StringStringType, StringAnyType, Symbols } from '../../../../types/serializer';
|
|
9
|
+
import { IdsData, JobStatsRange, StatsType } from '../../../../types/stats';
|
|
10
|
+
import { Transitions } from '../../../../types/transition';
|
|
11
|
+
import { JobInterruptOptions } from '../../../../types/job';
|
|
12
|
+
import { WorkListTaskType } from '../../../../types/task';
|
|
13
|
+
import { ThrottleOptions } from '../../../../types/quorum';
|
|
14
|
+
import { StoreService } from '../..';
|
|
15
|
+
declare abstract class RedisStoreBase<ClientProvider extends ProviderClient, TransactionProvider extends ProviderTransaction> extends StoreService<ClientProvider, TransactionProvider> {
|
|
16
|
+
commands: Record<string, string>;
|
|
17
|
+
abstract transact(): TransactionProvider;
|
|
18
|
+
abstract exec(...args: any[]): Promise<any>;
|
|
19
|
+
abstract setnxex(key: string, value: string, expireSeconds: number): Promise<boolean>;
|
|
20
|
+
constructor(storeClient: ClientProvider);
|
|
21
|
+
init(namespace: string, appId: string, logger: ILogger): Promise<HotMeshApps>;
|
|
22
|
+
isSuccessful(result: any): boolean;
|
|
23
|
+
delistSignalKey(key: string, target: string): Promise<void>;
|
|
24
|
+
zAdd(key: string, score: number | string, value: string | number, redisMulti?: TransactionProvider): Promise<any>;
|
|
25
|
+
zRangeByScore(key: string, score: number | string, value: string | number): Promise<string | null>;
|
|
26
|
+
mintKey(type: KeyType, params: KeyStoreParams): string;
|
|
27
|
+
invalidateCache(): void;
|
|
28
|
+
/**
|
|
29
|
+
* At any given time only a single engine will
|
|
30
|
+
* check for and process work items in the
|
|
31
|
+
* time and signal task queues.
|
|
32
|
+
*/
|
|
33
|
+
reserveScoutRole(scoutType: 'time' | 'signal' | 'activate', delay?: number): Promise<boolean>;
|
|
34
|
+
releaseScoutRole(scoutType: 'time' | 'signal' | 'activate'): Promise<boolean>;
|
|
35
|
+
getSettings(bCreate?: boolean): Promise<HotMeshSettings>;
|
|
36
|
+
setSettings(manifest: HotMeshSettings): Promise<any>;
|
|
37
|
+
reserveSymbolRange(target: string, size: number, type: 'JOB' | 'ACTIVITY', tryCount?: number): Promise<[number, number, Symbols]>;
|
|
38
|
+
getAllSymbols(): Promise<Symbols>;
|
|
39
|
+
getSymbols(activityId: string): Promise<Symbols>;
|
|
40
|
+
addSymbols(activityId: string, symbols: Symbols): Promise<boolean>;
|
|
41
|
+
seedSymbols(target: string, type: 'JOB' | 'ACTIVITY', startIndex: number): StringStringType;
|
|
42
|
+
seedJobSymbols(startIndex: number): StringStringType;
|
|
43
|
+
seedActivitySymbols(startIndex: number, activityId: string): StringStringType;
|
|
44
|
+
getSymbolValues(): Promise<Symbols>;
|
|
45
|
+
addSymbolValues(symvals: Symbols): Promise<boolean>;
|
|
46
|
+
getSymbolKeys(symbolNames: string[]): Promise<SymbolSets>;
|
|
47
|
+
getApp(id: string, refresh?: boolean): Promise<HotMeshApp>;
|
|
48
|
+
setApp(id: string, version: string): Promise<HotMeshApp>;
|
|
49
|
+
activateAppVersion(id: string, version: string): Promise<boolean>;
|
|
50
|
+
registerAppVersion(appId: string, version: string): Promise<any>;
|
|
51
|
+
setStats(jobKey: string, jobId: string, dateTime: string, stats: StatsType, appVersion: AppVID, transaction?: TransactionProvider): Promise<any>;
|
|
52
|
+
hGetAllResult(result: any): any;
|
|
53
|
+
getJobStats(jobKeys: string[]): Promise<JobStatsRange>;
|
|
54
|
+
getJobIds(indexKeys: string[], idRange: [number, number]): Promise<IdsData>;
|
|
55
|
+
setStatus(collationKeyStatus: number, jobId: string, appId: string, transaction?: TransactionProvider): Promise<any>;
|
|
56
|
+
getStatus(jobId: string, appId: string): Promise<number>;
|
|
57
|
+
setState({ ...state }: StringAnyType, status: number | null, jobId: string, symbolNames: string[], dIds: StringStringType, transaction?: TransactionProvider): Promise<string>;
|
|
58
|
+
/**
|
|
59
|
+
* Returns custom search fields and values.
|
|
60
|
+
* NOTE: The `fields` param should NOT prefix items with an underscore.
|
|
61
|
+
* NOTE: Literals are allowed if quoted.
|
|
62
|
+
*/
|
|
63
|
+
getQueryState(jobId: string, fields: string[]): Promise<StringAnyType>;
|
|
64
|
+
getState(jobId: string, consumes: Consumes, dIds: StringStringType): Promise<[StringAnyType, number] | undefined>;
|
|
65
|
+
getRaw(jobId: string): Promise<StringStringType>;
|
|
66
|
+
/**
|
|
67
|
+
* collate is a generic method for incrementing a value in a hash
|
|
68
|
+
* in order to track their progress during processing.
|
|
69
|
+
*/
|
|
70
|
+
collate(jobId: string, activityId: string, amount: number, dIds: StringStringType, transaction?: TransactionProvider): Promise<number>;
|
|
71
|
+
/**
|
|
72
|
+
* Synthentic collation affects those activities in the graph
|
|
73
|
+
* that represent the synthetic DAG that was materialized during compilation;
|
|
74
|
+
* Synthetic collation distinguishes `re-entry due to failure` from
|
|
75
|
+
* `purposeful re-entry`.
|
|
76
|
+
*/
|
|
77
|
+
collateSynthetic(jobId: string, guid: string, amount: number, transaction?: TransactionProvider): Promise<number>;
|
|
78
|
+
setStateNX(jobId: string, appId: string, status?: number, entity?: string): Promise<boolean>;
|
|
79
|
+
getSchema(activityId: string, appVersion: AppVID): Promise<ActivityType>;
|
|
80
|
+
getSchemas(appVersion: AppVID): Promise<Record<string, ActivityType>>;
|
|
81
|
+
setSchemas(schemas: Record<string, ActivityType>, appVersion: AppVID): Promise<any>;
|
|
82
|
+
setSubscriptions(subscriptions: Record<string, any>, appVersion: AppVID): Promise<boolean>;
|
|
83
|
+
getSubscriptions(appVersion: AppVID): Promise<Record<string, string>>;
|
|
84
|
+
getSubscription(topic: string, appVersion: AppVID): Promise<string | undefined>;
|
|
85
|
+
setTransitions(transitions: Record<string, any>, appVersion: AppVID): Promise<any>;
|
|
86
|
+
getTransitions(appVersion: AppVID): Promise<Transitions>;
|
|
87
|
+
setHookRules(hookRules: Record<string, HookRule[]>): Promise<any>;
|
|
88
|
+
getHookRules(): Promise<Record<string, HookRule[]>>;
|
|
89
|
+
setHookSignal(hook: HookSignal, transaction?: TransactionProvider): Promise<any>;
|
|
90
|
+
getHookSignal(topic: string, resolved: string): Promise<string | undefined>;
|
|
91
|
+
deleteHookSignal(topic: string, resolved: string): Promise<number | undefined>;
|
|
92
|
+
addTaskQueues(keys: string[]): Promise<void>;
|
|
93
|
+
getActiveTaskQueue(): Promise<string | null>;
|
|
94
|
+
deleteProcessedTaskQueue(workItemKey: string, key: string, processedKey: string, scrub?: boolean): Promise<void>;
|
|
95
|
+
processTaskQueue(sourceKey: string, destinationKey: string): Promise<any>;
|
|
96
|
+
expireJob(jobId: string, inSeconds: number, redisMulti?: TransactionProvider): Promise<void>;
|
|
97
|
+
getDependencies(jobId: string): Promise<string[]>;
|
|
98
|
+
/**
|
|
99
|
+
* registers a hook activity to be awakened (uses ZSET to
|
|
100
|
+
* store the 'sleep group' and LIST to store the events
|
|
101
|
+
* for the given sleep group. Sleep groups are
|
|
102
|
+
* organized into 'n'-second blocks (LISTS))
|
|
103
|
+
*/
|
|
104
|
+
registerTimeHook(jobId: string, gId: string, activityId: string, type: WorkListTaskType, deletionTime: number, dad: string, transaction?: TransactionProvider): Promise<void>;
|
|
105
|
+
getNextTask(listKey?: string): Promise<[
|
|
106
|
+
listKey: string,
|
|
107
|
+
jobId: string,
|
|
108
|
+
gId: string,
|
|
109
|
+
activityId: string,
|
|
110
|
+
type: WorkListTaskType
|
|
111
|
+
] | boolean>;
|
|
112
|
+
/**
|
|
113
|
+
* when processing time jobs, the target LIST ID returned
|
|
114
|
+
* from the ZSET query can be prefixed to denote what to
|
|
115
|
+
* do with the work list. (not everything is known in advance,
|
|
116
|
+
* so the ZSET key defines HOW to approach the work in the
|
|
117
|
+
* generic LIST (lists typically contain target job ids)
|
|
118
|
+
* @param {string} listKey - composite key
|
|
119
|
+
*/
|
|
120
|
+
resolveTaskKeyContext(listKey: string): [WorkListTaskType, string];
|
|
121
|
+
/**
|
|
122
|
+
* Interrupts a job and sets sets a job error (410), if 'throw'!=false.
|
|
123
|
+
* This method is called by the engine and not by an activity and is
|
|
124
|
+
* followed by a call to execute job completion/cleanup tasks
|
|
125
|
+
* associated with a job completion event.
|
|
126
|
+
*
|
|
127
|
+
* Todo: move most of this logic to the engine (too much logic for the store)
|
|
128
|
+
*/
|
|
129
|
+
interrupt(topic: string, jobId: string, options?: JobInterruptOptions): Promise<void>;
|
|
130
|
+
scrub(jobId: string): Promise<void>;
|
|
131
|
+
findJobs(queryString?: string, limit?: number, batchSize?: number, cursor?: string): Promise<[string, string[]]>;
|
|
132
|
+
findJobFields(jobId: string, fieldMatchPattern?: string, limit?: number, batchSize?: number, cursor?: string): Promise<[string, StringStringType]>;
|
|
133
|
+
setThrottleRate(options: ThrottleOptions): Promise<void>;
|
|
134
|
+
getThrottleRates(): Promise<StringStringType>;
|
|
135
|
+
getThrottleRate(topic: string): Promise<number>;
|
|
136
|
+
}
|
|
137
|
+
export { RedisStoreBase };
|