@hotmeshio/hotmesh 0.5.8 → 0.6.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/build/index.d.ts +3 -1
- package/build/index.js +5 -1
- package/build/modules/enums.d.ts +17 -0
- package/build/modules/enums.js +18 -1
- package/build/modules/utils.js +27 -0
- package/build/package.json +15 -3
- package/build/services/connector/factory.d.ts +1 -1
- package/build/services/connector/factory.js +15 -1
- package/build/services/connector/providers/ioredis.d.ts +9 -0
- package/build/services/connector/providers/ioredis.js +26 -0
- package/build/services/connector/providers/redis.d.ts +9 -0
- package/build/services/connector/providers/redis.js +38 -0
- package/build/services/engine/index.js +12 -2
- package/build/services/quorum/index.js +18 -1
- package/build/services/search/factory.js +8 -0
- package/build/services/search/providers/redis/ioredis.d.ts +23 -0
- package/build/services/search/providers/redis/ioredis.js +189 -0
- package/build/services/search/providers/redis/redis.d.ts +23 -0
- package/build/services/search/providers/redis/redis.js +202 -0
- package/build/services/store/factory.js +9 -1
- package/build/services/store/providers/postgres/postgres.js +3 -5
- package/build/services/store/providers/postgres/time-notify.d.ts +7 -0
- package/build/services/store/providers/postgres/time-notify.js +163 -0
- package/build/services/store/providers/redis/_base.d.ts +137 -0
- package/build/services/store/providers/redis/_base.js +980 -0
- package/build/services/store/providers/redis/ioredis.d.ts +20 -0
- package/build/services/store/providers/redis/ioredis.js +180 -0
- package/build/services/store/providers/redis/redis.d.ts +18 -0
- package/build/services/store/providers/redis/redis.js +199 -0
- package/build/services/stream/factory.js +17 -1
- package/build/services/stream/providers/postgres/kvtables.js +81 -14
- package/build/services/stream/providers/redis/ioredis.d.ts +61 -0
- package/build/services/stream/providers/redis/ioredis.js +272 -0
- package/build/services/stream/providers/redis/redis.d.ts +61 -0
- package/build/services/stream/providers/redis/redis.js +305 -0
- package/build/services/sub/factory.js +8 -0
- package/build/services/sub/providers/postgres/postgres.js +28 -1
- package/build/services/sub/providers/redis/ioredis.d.ts +20 -0
- package/build/services/sub/providers/redis/ioredis.js +150 -0
- package/build/services/sub/providers/redis/redis.d.ts +18 -0
- package/build/services/sub/providers/redis/redis.js +137 -0
- package/build/types/index.d.ts +1 -0
- package/build/types/index.js +4 -1
- package/build/types/provider.d.ts +1 -1
- package/build/types/quorum.d.ts +2 -0
- package/build/types/redis.d.ts +258 -0
- package/build/types/redis.js +11 -0
- package/index.ts +4 -0
- package/package.json +15 -3
package/build/index.d.ts
CHANGED
|
@@ -16,7 +16,9 @@ import * as Enums from './modules/enums';
|
|
|
16
16
|
import * as KeyStore from './modules/key';
|
|
17
17
|
import { ConnectorService as Connector } from './services/connector/factory';
|
|
18
18
|
import { PostgresConnection as ConnectorPostgres } from './services/connector/providers/postgres';
|
|
19
|
+
import { RedisConnection as ConnectorIORedis } from './services/connector/providers/ioredis';
|
|
20
|
+
import { RedisConnection as ConnectorRedis } from './services/connector/providers/redis';
|
|
19
21
|
import { NatsConnection as ConnectorNATS } from './services/connector/providers/nats';
|
|
20
22
|
export { Connector, //factory
|
|
21
|
-
ConnectorNATS, ConnectorPostgres, HotMesh, HotMeshConfig, MeshCall, MemFlow, Client, Connection, proxyActivities, Search, Entity, Worker, workflow, WorkflowHandle, Enums, Errors, Utils, KeyStore, };
|
|
23
|
+
ConnectorIORedis, ConnectorNATS, ConnectorPostgres, ConnectorRedis, HotMesh, HotMeshConfig, MeshCall, MemFlow, Client, Connection, proxyActivities, Search, Entity, Worker, workflow, WorkflowHandle, Enums, Errors, Utils, KeyStore, };
|
|
22
24
|
export * as Types from './types';
|
package/build/index.js
CHANGED
|
@@ -23,7 +23,7 @@ var __importStar = (this && this.__importStar) || function (mod) {
|
|
|
23
23
|
return result;
|
|
24
24
|
};
|
|
25
25
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
26
|
-
exports.Types = exports.KeyStore = exports.Utils = exports.Errors = exports.Enums = exports.WorkflowHandle = exports.workflow = exports.Worker = exports.Entity = exports.Search = exports.proxyActivities = exports.Connection = exports.Client = exports.MemFlow = exports.MeshCall = exports.HotMesh = exports.ConnectorPostgres = exports.ConnectorNATS = exports.Connector = void 0;
|
|
26
|
+
exports.Types = exports.KeyStore = exports.Utils = exports.Errors = exports.Enums = exports.WorkflowHandle = exports.workflow = exports.Worker = exports.Entity = exports.Search = exports.proxyActivities = exports.Connection = exports.Client = exports.MemFlow = exports.MeshCall = exports.HotMesh = exports.ConnectorRedis = exports.ConnectorPostgres = exports.ConnectorNATS = exports.ConnectorIORedis = exports.Connector = void 0;
|
|
27
27
|
const hotmesh_1 = require("./services/hotmesh");
|
|
28
28
|
Object.defineProperty(exports, "HotMesh", { enumerable: true, get: function () { return hotmesh_1.HotMesh; } });
|
|
29
29
|
const meshcall_1 = require("./services/meshcall");
|
|
@@ -58,6 +58,10 @@ const factory_1 = require("./services/connector/factory");
|
|
|
58
58
|
Object.defineProperty(exports, "Connector", { enumerable: true, get: function () { return factory_1.ConnectorService; } });
|
|
59
59
|
const postgres_1 = require("./services/connector/providers/postgres");
|
|
60
60
|
Object.defineProperty(exports, "ConnectorPostgres", { enumerable: true, get: function () { return postgres_1.PostgresConnection; } });
|
|
61
|
+
const ioredis_1 = require("./services/connector/providers/ioredis");
|
|
62
|
+
Object.defineProperty(exports, "ConnectorIORedis", { enumerable: true, get: function () { return ioredis_1.RedisConnection; } });
|
|
63
|
+
const redis_1 = require("./services/connector/providers/redis");
|
|
64
|
+
Object.defineProperty(exports, "ConnectorRedis", { enumerable: true, get: function () { return redis_1.RedisConnection; } });
|
|
61
65
|
const nats_1 = require("./services/connector/providers/nats");
|
|
62
66
|
Object.defineProperty(exports, "ConnectorNATS", { enumerable: true, get: function () { return nats_1.NatsConnection; } });
|
|
63
67
|
exports.Types = __importStar(require("./types"));
|
package/build/modules/enums.d.ts
CHANGED
|
@@ -11,6 +11,10 @@ export declare const HMSH_TELEMETRY: "debug" | "info";
|
|
|
11
11
|
* Default cleanup time for signal in the db when its associated job is completed.
|
|
12
12
|
*/
|
|
13
13
|
export declare const HMSH_SIGNAL_EXPIRE = 3600;
|
|
14
|
+
/**
|
|
15
|
+
* Determines if the system is running in cluster mode (for Redis cluster support).
|
|
16
|
+
*/
|
|
17
|
+
export declare const HMSH_IS_CLUSTER: boolean;
|
|
14
18
|
export declare const HMSH_CODE_SUCCESS = 200;
|
|
15
19
|
export declare const HMSH_CODE_PENDING = 202;
|
|
16
20
|
export declare const HMSH_CODE_NOTFOUND = 404;
|
|
@@ -107,3 +111,16 @@ export declare const HMSH_GUID_SIZE: number;
|
|
|
107
111
|
* Default task queue name used when no task queue is specified
|
|
108
112
|
*/
|
|
109
113
|
export declare const DEFAULT_TASK_QUEUE = "default";
|
|
114
|
+
/**
|
|
115
|
+
* PostgreSQL NOTIFY payload limit. If a job message exceeds this size,
|
|
116
|
+
* a reference message is sent instead and the subscriber fetches via getState.
|
|
117
|
+
* PostgreSQL hard limit is 8000 bytes; default 7500 provides safety margin.
|
|
118
|
+
*/
|
|
119
|
+
export declare const HMSH_NOTIFY_PAYLOAD_LIMIT: number;
|
|
120
|
+
/**
|
|
121
|
+
* Serializer compression threshold. When a stringified object exceeds this size
|
|
122
|
+
* in bytes, it will be gzipped and base64 encoded (with /b prefix) to reduce
|
|
123
|
+
* Redis hash storage size. Default 100 bytes - small enough to catch most
|
|
124
|
+
* workflow state but compression only applies if it actually reduces size.
|
|
125
|
+
*/
|
|
126
|
+
export declare const HMSH_SERIALIZER_COMPRESSION_THRESHOLD: number;
|
package/build/modules/enums.js
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.DEFAULT_TASK_QUEUE = exports.HMSH_GUID_SIZE = exports.HMSH_SCOUT_INTERVAL_SECONDS = exports.HMSH_FIDELITY_SECONDS = exports.HMSH_EXPIRE_DURATION = exports.HMSH_XPENDING_COUNT = exports.HMSH_XCLAIM_COUNT = exports.HMSH_XCLAIM_DELAY_MS = exports.HMSH_BLOCK_TIME_MS = exports.HMSH_MEMFLOW_EXP_BACKOFF = exports.HMSH_MEMFLOW_MAX_INTERVAL = exports.HMSH_MEMFLOW_MAX_ATTEMPTS = exports.HMSH_GRADUATED_INTERVAL_MS = exports.HMSH_MAX_TIMEOUT_MS = exports.HMSH_MAX_RETRIES = exports.MAX_DELAY = exports.MAX_STREAM_RETRIES = exports.INITIAL_STREAM_BACKOFF = exports.MAX_STREAM_BACKOFF = exports.HMSH_EXPIRE_JOB_SECONDS = exports.HMSH_OTT_WAIT_TIME = exports.HMSH_DEPLOYMENT_PAUSE = exports.HMSH_DEPLOYMENT_DELAY = exports.HMSH_ACTIVATION_MAX_RETRY = exports.HMSH_QUORUM_DELAY_MS = exports.HMSH_QUORUM_ROLLCALL_CYCLES = exports.HMSH_STATUS_UNKNOWN = exports.HMSH_CODE_MEMFLOW_RETRYABLE = exports.HMSH_CODE_MEMFLOW_FATAL = exports.HMSH_CODE_MEMFLOW_MAXED = exports.HMSH_CODE_MEMFLOW_TIMEOUT = exports.HMSH_CODE_MEMFLOW_WAIT = exports.HMSH_CODE_MEMFLOW_PROXY = exports.HMSH_CODE_MEMFLOW_CHILD = exports.HMSH_CODE_MEMFLOW_ALL = exports.HMSH_CODE_MEMFLOW_SLEEP = exports.HMSH_CODE_UNACKED = exports.HMSH_CODE_TIMEOUT = exports.HMSH_CODE_UNKNOWN = exports.HMSH_CODE_INTERRUPT = exports.HMSH_CODE_NOTFOUND = exports.HMSH_CODE_PENDING = exports.HMSH_CODE_SUCCESS = exports.HMSH_SIGNAL_EXPIRE = exports.HMSH_TELEMETRY = exports.HMSH_LOGLEVEL = void 0;
|
|
3
|
+
exports.HMSH_SERIALIZER_COMPRESSION_THRESHOLD = exports.HMSH_NOTIFY_PAYLOAD_LIMIT = exports.DEFAULT_TASK_QUEUE = exports.HMSH_GUID_SIZE = exports.HMSH_SCOUT_INTERVAL_SECONDS = exports.HMSH_FIDELITY_SECONDS = exports.HMSH_EXPIRE_DURATION = exports.HMSH_XPENDING_COUNT = exports.HMSH_XCLAIM_COUNT = exports.HMSH_XCLAIM_DELAY_MS = exports.HMSH_BLOCK_TIME_MS = exports.HMSH_MEMFLOW_EXP_BACKOFF = exports.HMSH_MEMFLOW_MAX_INTERVAL = exports.HMSH_MEMFLOW_MAX_ATTEMPTS = exports.HMSH_GRADUATED_INTERVAL_MS = exports.HMSH_MAX_TIMEOUT_MS = exports.HMSH_MAX_RETRIES = exports.MAX_DELAY = exports.MAX_STREAM_RETRIES = exports.INITIAL_STREAM_BACKOFF = exports.MAX_STREAM_BACKOFF = exports.HMSH_EXPIRE_JOB_SECONDS = exports.HMSH_OTT_WAIT_TIME = exports.HMSH_DEPLOYMENT_PAUSE = exports.HMSH_DEPLOYMENT_DELAY = exports.HMSH_ACTIVATION_MAX_RETRY = exports.HMSH_QUORUM_DELAY_MS = exports.HMSH_QUORUM_ROLLCALL_CYCLES = exports.HMSH_STATUS_UNKNOWN = exports.HMSH_CODE_MEMFLOW_RETRYABLE = exports.HMSH_CODE_MEMFLOW_FATAL = exports.HMSH_CODE_MEMFLOW_MAXED = exports.HMSH_CODE_MEMFLOW_TIMEOUT = exports.HMSH_CODE_MEMFLOW_WAIT = exports.HMSH_CODE_MEMFLOW_PROXY = exports.HMSH_CODE_MEMFLOW_CHILD = exports.HMSH_CODE_MEMFLOW_ALL = exports.HMSH_CODE_MEMFLOW_SLEEP = exports.HMSH_CODE_UNACKED = exports.HMSH_CODE_TIMEOUT = exports.HMSH_CODE_UNKNOWN = exports.HMSH_CODE_INTERRUPT = exports.HMSH_CODE_NOTFOUND = exports.HMSH_CODE_PENDING = exports.HMSH_CODE_SUCCESS = exports.HMSH_IS_CLUSTER = exports.HMSH_SIGNAL_EXPIRE = exports.HMSH_TELEMETRY = exports.HMSH_LOGLEVEL = void 0;
|
|
4
4
|
/**
|
|
5
5
|
* Determines the log level for the application. The default is 'info'.
|
|
6
6
|
*/
|
|
@@ -13,6 +13,10 @@ exports.HMSH_TELEMETRY = process.env.HMSH_TELEMETRY || 'info';
|
|
|
13
13
|
* Default cleanup time for signal in the db when its associated job is completed.
|
|
14
14
|
*/
|
|
15
15
|
exports.HMSH_SIGNAL_EXPIRE = 3600; //seconds
|
|
16
|
+
/**
|
|
17
|
+
* Determines if the system is running in cluster mode (for Redis cluster support).
|
|
18
|
+
*/
|
|
19
|
+
exports.HMSH_IS_CLUSTER = process.env.HMSH_IS_CLUSTER === 'true';
|
|
16
20
|
// HOTMESH STATUS CODES
|
|
17
21
|
exports.HMSH_CODE_SUCCESS = 200;
|
|
18
22
|
exports.HMSH_CODE_PENDING = 202;
|
|
@@ -131,3 +135,16 @@ exports.HMSH_GUID_SIZE = Math.min(parseInt(process.env.HMSH_GUID_SIZE, 10) || 22
|
|
|
131
135
|
* Default task queue name used when no task queue is specified
|
|
132
136
|
*/
|
|
133
137
|
exports.DEFAULT_TASK_QUEUE = 'default';
|
|
138
|
+
/**
|
|
139
|
+
* PostgreSQL NOTIFY payload limit. If a job message exceeds this size,
|
|
140
|
+
* a reference message is sent instead and the subscriber fetches via getState.
|
|
141
|
+
* PostgreSQL hard limit is 8000 bytes; default 7500 provides safety margin.
|
|
142
|
+
*/
|
|
143
|
+
exports.HMSH_NOTIFY_PAYLOAD_LIMIT = parseInt(process.env.HMSH_NOTIFY_PAYLOAD_LIMIT, 10) || 7500;
|
|
144
|
+
/**
|
|
145
|
+
* Serializer compression threshold. When a stringified object exceeds this size
|
|
146
|
+
* in bytes, it will be gzipped and base64 encoded (with /b prefix) to reduce
|
|
147
|
+
* Redis hash storage size. Default 100 bytes - small enough to catch most
|
|
148
|
+
* workflow state but compression only applies if it actually reduces size.
|
|
149
|
+
*/
|
|
150
|
+
exports.HMSH_SERIALIZER_COMPRESSION_THRESHOLD = parseInt(process.env.HMSH_SERIALIZER_COMPRESSION_THRESHOLD, 10) || 100000000;
|
package/build/modules/utils.js
CHANGED
|
@@ -79,11 +79,38 @@ function identifyProvider(provider) {
|
|
|
79
79
|
else if (provider.toString().toLowerCase().includes('nats')) {
|
|
80
80
|
return 'nats';
|
|
81
81
|
}
|
|
82
|
+
else if ('defineCommand' in prototype ||
|
|
83
|
+
Object.keys(prototype).includes('multi')) {
|
|
84
|
+
return 'ioredis';
|
|
85
|
+
}
|
|
86
|
+
else if (Object.keys(prototype).includes('Multi')) {
|
|
87
|
+
return 'redis';
|
|
88
|
+
}
|
|
89
|
+
if (provider.constructor) {
|
|
90
|
+
if (provider.constructor.name === 'Redis' ||
|
|
91
|
+
provider.constructor.name === 'EventEmitter') {
|
|
92
|
+
if ('hset' in provider) {
|
|
93
|
+
return 'ioredis';
|
|
94
|
+
}
|
|
95
|
+
}
|
|
96
|
+
else if (provider.constructor.name === 'ProviderClient' ||
|
|
97
|
+
provider.constructor.name === 'Commander') {
|
|
98
|
+
if ('HSET' in provider) {
|
|
99
|
+
return 'redis';
|
|
100
|
+
}
|
|
101
|
+
}
|
|
102
|
+
}
|
|
82
103
|
let type = null;
|
|
83
104
|
if (Object.keys(provider).includes('connection') ||
|
|
84
105
|
!isNaN(provider.totalCount) && !isNaN(provider.idleCount)) {
|
|
85
106
|
type = 'postgres';
|
|
86
107
|
}
|
|
108
|
+
else if (Object.keys(provider).includes('Pipeline')) {
|
|
109
|
+
type = 'ioredis';
|
|
110
|
+
}
|
|
111
|
+
else if (Object.keys(provider).includes('createClient')) {
|
|
112
|
+
type = 'redis';
|
|
113
|
+
}
|
|
87
114
|
else if (prototype.constructor.toString().includes('NatsConnectionImpl')) {
|
|
88
115
|
type = 'nats';
|
|
89
116
|
}
|
package/build/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@hotmeshio/hotmesh",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.6.1",
|
|
4
4
|
"description": "Permanent-Memory Workflows & AI Agents",
|
|
5
5
|
"main": "./build/index.js",
|
|
6
6
|
"types": "./build/index.d.ts",
|
|
@@ -23,9 +23,11 @@
|
|
|
23
23
|
"test:await": "NODE_ENV=test jest ./tests/functional/awaiter/postgres.test.ts --detectOpenHandles --forceExit --verbose",
|
|
24
24
|
"test:compile": "NODE_ENV=test jest ./tests/functional/compile/index.test.ts --detectOpenHandles --forceExit --verbose",
|
|
25
25
|
"test:connect": "NODE_ENV=test jest ./tests/unit/services/connector/* --detectOpenHandles --forceExit --verbose",
|
|
26
|
+
"test:connect:ioredis": "NODE_ENV=test jest ./tests/unit/services/connector/providers/ioredis.test.ts --detectOpenHandles --forceExit --verbose",
|
|
26
27
|
"test:connect:postgres": "NODE_ENV=test jest ./tests/unit/services/connector/providers/postgres.test.ts --detectOpenHandles --forceExit --verbose",
|
|
28
|
+
"test:connect:redis": "NODE_ENV=test jest ./tests/unit/services/connector/providers/ioredis.test.ts --detectOpenHandles --forceExit --verbose",
|
|
27
29
|
"test:connect:nats": "NODE_ENV=test jest ./tests/unit/services/connector/providers/nats.test.ts --detectOpenHandles --forceExit --verbose",
|
|
28
|
-
"test:memflow": "NODE_ENV=test jest ./tests/memflow
|
|
30
|
+
"test:memflow": "NODE_ENV=test jest ./tests/memflow/*/*.test.ts --detectOpenHandles --forceExit --verbose",
|
|
29
31
|
"test:memflow:basic": "HMSH_LOGLEVEL=info NODE_ENV=test jest ./tests/memflow/basic/postgres.test.ts --detectOpenHandles --forceExit --verbose",
|
|
30
32
|
"test:memflow:collision": "NODE_ENV=test jest ./tests/memflow/collision/*.test.ts --detectOpenHandles --forceExit --verbose",
|
|
31
33
|
"test:memflow:fatal": "NODE_ENV=test jest ./tests/memflow/fatal/*.test.ts --detectOpenHandles --forceExit --verbose",
|
|
@@ -62,9 +64,15 @@
|
|
|
62
64
|
"test:signal": "NODE_ENV=test jest ./tests/functional/signal/*.test.ts --detectOpenHandles --forceExit --verbose",
|
|
63
65
|
"test:status": "NODE_ENV=test jest ./tests/functional/status/index.test.ts --detectOpenHandles --forceExit --verbose",
|
|
64
66
|
"test:providers": "NODE_ENV=test jest ./tests/functional/*/providers/*/*.test.ts --detectOpenHandles --forceExit --verbose",
|
|
67
|
+
"test:store:ioredis": "NODE_ENV=test jest ./tests/functional/store/providers/redis/ioredis.test.ts --detectOpenHandles --forceExit --verbose",
|
|
68
|
+
"test:store:redis": "NODE_ENV=test jest ./tests/functional/store/providers/redis/redis.test.ts --detectOpenHandles --forceExit --verbose",
|
|
65
69
|
"test:store:postgres": "NODE_ENV=test jest ./tests/functional/store/providers/postgres/postgres.test.ts --detectOpenHandles --forceExit --verbose",
|
|
70
|
+
"test:stream:ioredis": "NODE_ENV=test jest ./tests/functional/stream/providers/redis/ioredis.test.ts --detectOpenHandles --forceExit --verbose",
|
|
71
|
+
"test:stream:redis": "NODE_ENV=test jest ./tests/functional/stream/providers/redis/redis.test.ts --detectOpenHandles --forceExit --verbose",
|
|
66
72
|
"test:stream:postgres": "NODE_ENV=test jest ./tests/functional/stream/providers/postgres/postgres.test.ts --detectOpenHandles --forceExit --verbose",
|
|
67
73
|
"test:stream:nats": "NODE_ENV=test jest ./tests/functional/stream/providers/nats/nats.test.ts --detectOpenHandles --forceExit --verbose",
|
|
74
|
+
"test:sub:ioredis": "NODE_ENV=test jest ./tests/functional/sub/providers/redis/ioredis.test.ts --detectOpenHandles --forceExit --verbose",
|
|
75
|
+
"test:sub:redis": "NODE_ENV=test jest ./tests/functional/sub/providers/redis/redis.test.ts --detectOpenHandles --forceExit --verbose",
|
|
68
76
|
"test:sub:postgres": "NODE_ENV=test jest ./tests/functional/sub/providers/postgres/postgres.test.ts --detectOpenHandles --forceExit --verbose",
|
|
69
77
|
"test:sub:nats": "NODE_ENV=test jest ./tests/functional/sub/providers/nats/nats.test.ts --detectOpenHandles --forceExit --verbose",
|
|
70
78
|
"test:trigger": "NODE_ENV=test jest ./tests/unit/services/activities/trigger.test.ts --detectOpenHandles --forceExit --verbose",
|
|
@@ -106,11 +114,13 @@
|
|
|
106
114
|
"eslint-config-prettier": "^9.1.0",
|
|
107
115
|
"eslint-plugin-import": "^2.29.1",
|
|
108
116
|
"eslint-plugin-prettier": "^5.1.3",
|
|
117
|
+
"ioredis": "^5.3.2",
|
|
109
118
|
"javascript-obfuscator": "^0.6.2",
|
|
110
119
|
"jest": "^29.5.0",
|
|
111
120
|
"nats": "^2.28.0",
|
|
112
121
|
"openai": "^5.9.0",
|
|
113
122
|
"pg": "^8.10.0",
|
|
123
|
+
"redis": "^4.6.13",
|
|
114
124
|
"rimraf": "^4.4.1",
|
|
115
125
|
"terser": "^5.37.0",
|
|
116
126
|
"ts-jest": "^29.0.5",
|
|
@@ -120,7 +130,9 @@
|
|
|
120
130
|
"typescript": "^5.0.4"
|
|
121
131
|
},
|
|
122
132
|
"peerDependencies": {
|
|
133
|
+
"ioredis": "^4.0.0 || ^5.0.0",
|
|
123
134
|
"nats": "^2.0.0",
|
|
124
|
-
"pg": "^8.0.0"
|
|
135
|
+
"pg": "^8.0.0",
|
|
136
|
+
"redis": "^4.0.0"
|
|
125
137
|
}
|
|
126
138
|
}
|
|
@@ -3,7 +3,7 @@ import { ProviderConfig, ProviderNativeClient } from '../../types/provider';
|
|
|
3
3
|
export declare class ConnectorService {
|
|
4
4
|
static disconnectAll(): Promise<void>;
|
|
5
5
|
/**
|
|
6
|
-
* Connect to a provider (
|
|
6
|
+
* Connect to a provider (redis, nats, postgres) and return the native
|
|
7
7
|
* client. Connections are handled by the engine and worker routers at
|
|
8
8
|
* initialization, but the factory method provided here is useful
|
|
9
9
|
* for testing provider configurations.
|
|
@@ -2,15 +2,19 @@
|
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
3
|
exports.ConnectorService = void 0;
|
|
4
4
|
const utils_1 = require("../../modules/utils");
|
|
5
|
+
const ioredis_1 = require("./providers/ioredis");
|
|
5
6
|
const nats_1 = require("./providers/nats");
|
|
6
7
|
const postgres_1 = require("./providers/postgres");
|
|
8
|
+
const redis_1 = require("./providers/redis");
|
|
7
9
|
class ConnectorService {
|
|
8
10
|
static async disconnectAll() {
|
|
11
|
+
await redis_1.RedisConnection.disconnectAll();
|
|
12
|
+
await ioredis_1.RedisConnection.disconnectAll();
|
|
9
13
|
await postgres_1.PostgresConnection.disconnectAll();
|
|
10
14
|
await nats_1.NatsConnection.disconnectAll();
|
|
11
15
|
}
|
|
12
16
|
/**
|
|
13
|
-
* Connect to a provider (
|
|
17
|
+
* Connect to a provider (redis, nats, postgres) and return the native
|
|
14
18
|
* client. Connections are handled by the engine and worker routers at
|
|
15
19
|
* initialization, but the factory method provided here is useful
|
|
16
20
|
* for testing provider configurations.
|
|
@@ -69,10 +73,20 @@ class ConnectorService {
|
|
|
69
73
|
const providerClass = ProviderConfig.class;
|
|
70
74
|
const options = ProviderConfig.options;
|
|
71
75
|
const providerName = ProviderConfig.provider || (0, utils_1.identifyProvider)(providerClass); //e.g. 'postgres.poolclient'
|
|
76
|
+
if (!providerName) {
|
|
77
|
+
throw new Error(`Unable to identify provider type. Please explicitly set the 'provider' field in your connection config. ` +
|
|
78
|
+
`Received class: ${providerClass?.constructor?.name || 'unknown'}`);
|
|
79
|
+
}
|
|
72
80
|
const providerType = providerName.split('.')[0]; //e.g. 'postgres'
|
|
73
81
|
let clientInstance;
|
|
74
82
|
const id = (0, utils_1.guid)();
|
|
75
83
|
switch (providerType) {
|
|
84
|
+
case 'redis':
|
|
85
|
+
clientInstance = await redis_1.RedisConnection.connect(id, providerClass, options, { provider: providerName });
|
|
86
|
+
break;
|
|
87
|
+
case 'ioredis':
|
|
88
|
+
clientInstance = await ioredis_1.RedisConnection.connect(id, providerClass, options, { provider: providerName });
|
|
89
|
+
break;
|
|
76
90
|
case 'nats':
|
|
77
91
|
clientInstance = await nats_1.NatsConnection.connect(id, providerClass, options, { provider: providerName });
|
|
78
92
|
break;
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
import { AbstractConnection } from '..';
|
|
2
|
+
import { IORedisClientOptions as RedisClientOptions, IORedisClassType as RedisClassType, IORedisClientType as RedisClientType } from '../../../types/redis';
|
|
3
|
+
declare class RedisConnection extends AbstractConnection<RedisClassType, RedisClientOptions> {
|
|
4
|
+
defaultOptions: RedisClientOptions;
|
|
5
|
+
createConnection(Redis: RedisClassType, options: RedisClientOptions): Promise<RedisClientType>;
|
|
6
|
+
getClient(): RedisClientType;
|
|
7
|
+
closeConnection(connection: RedisClientType): Promise<void>;
|
|
8
|
+
}
|
|
9
|
+
export { RedisConnection };
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.RedisConnection = void 0;
|
|
4
|
+
const __1 = require("..");
|
|
5
|
+
class RedisConnection extends __1.AbstractConnection {
|
|
6
|
+
constructor() {
|
|
7
|
+
super(...arguments);
|
|
8
|
+
this.defaultOptions = {
|
|
9
|
+
host: 'localhost',
|
|
10
|
+
port: 6379,
|
|
11
|
+
};
|
|
12
|
+
}
|
|
13
|
+
async createConnection(Redis, options) {
|
|
14
|
+
return new Redis(options);
|
|
15
|
+
}
|
|
16
|
+
getClient() {
|
|
17
|
+
if (!this.connection) {
|
|
18
|
+
throw new Error('Redis client is not connected');
|
|
19
|
+
}
|
|
20
|
+
return this.connection;
|
|
21
|
+
}
|
|
22
|
+
async closeConnection(connection) {
|
|
23
|
+
await connection.quit();
|
|
24
|
+
}
|
|
25
|
+
}
|
|
26
|
+
exports.RedisConnection = RedisConnection;
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
import { AbstractConnection } from '..';
|
|
2
|
+
import { RedisRedisClassType as RedisClassType, RedisRedisClientType as RedisClientType, RedisRedisClientOptions as RedisClientOptions } from '../../../types/redis';
|
|
3
|
+
declare class RedisConnection extends AbstractConnection<RedisClassType, RedisClientOptions> {
|
|
4
|
+
defaultOptions: RedisClientOptions;
|
|
5
|
+
createConnection(Redis: Partial<RedisClassType>, options: RedisClientOptions): Promise<Partial<RedisClientType>>;
|
|
6
|
+
getClient(): RedisClientType;
|
|
7
|
+
closeConnection(connection: any): Promise<void>;
|
|
8
|
+
}
|
|
9
|
+
export { RedisConnection, RedisClientType };
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.RedisConnection = void 0;
|
|
4
|
+
const __1 = require("..");
|
|
5
|
+
class RedisConnection extends __1.AbstractConnection {
|
|
6
|
+
constructor() {
|
|
7
|
+
super(...arguments);
|
|
8
|
+
this.defaultOptions = {
|
|
9
|
+
socket: {
|
|
10
|
+
host: 'localhost',
|
|
11
|
+
port: 6379,
|
|
12
|
+
tls: false,
|
|
13
|
+
},
|
|
14
|
+
};
|
|
15
|
+
}
|
|
16
|
+
async createConnection(Redis, options) {
|
|
17
|
+
return new Promise((resolve, reject) => {
|
|
18
|
+
const client = Redis.createClient(options);
|
|
19
|
+
client.on('error', (error) => {
|
|
20
|
+
reject(error);
|
|
21
|
+
});
|
|
22
|
+
client.on('ready', () => {
|
|
23
|
+
resolve(client);
|
|
24
|
+
});
|
|
25
|
+
client.connect();
|
|
26
|
+
});
|
|
27
|
+
}
|
|
28
|
+
getClient() {
|
|
29
|
+
if (!this.connection) {
|
|
30
|
+
throw new Error('Redis client is not connected');
|
|
31
|
+
}
|
|
32
|
+
return this.connection;
|
|
33
|
+
}
|
|
34
|
+
async closeConnection(connection) {
|
|
35
|
+
await connection.quit();
|
|
36
|
+
}
|
|
37
|
+
}
|
|
38
|
+
exports.RedisConnection = RedisConnection;
|
|
@@ -547,7 +547,12 @@ class EngineService {
|
|
|
547
547
|
*/
|
|
548
548
|
async sub(topic, callback) {
|
|
549
549
|
const subscriptionCallback = async (topic, message) => {
|
|
550
|
-
|
|
550
|
+
let jobOutput = message.job;
|
|
551
|
+
// If _ref is true, payload was too large - fetch full job data via getState
|
|
552
|
+
if (message._ref && message.job?.metadata) {
|
|
553
|
+
jobOutput = await this.getState(message.job.metadata.tpc, message.job.metadata.jid);
|
|
554
|
+
}
|
|
555
|
+
callback(message.topic, jobOutput);
|
|
551
556
|
};
|
|
552
557
|
return await this.subscribe.subscribe(key_1.KeyType.QUORUM, subscriptionCallback, this.appId, topic);
|
|
553
558
|
}
|
|
@@ -562,7 +567,12 @@ class EngineService {
|
|
|
562
567
|
*/
|
|
563
568
|
async psub(wild, callback) {
|
|
564
569
|
const subscriptionCallback = async (topic, message) => {
|
|
565
|
-
|
|
570
|
+
let jobOutput = message.job;
|
|
571
|
+
// If _ref is true, payload was too large - fetch full job data via getState
|
|
572
|
+
if (message._ref && message.job?.metadata) {
|
|
573
|
+
jobOutput = await this.getState(message.job.metadata.tpc, message.job.metadata.jid);
|
|
574
|
+
}
|
|
575
|
+
callback(message.topic, jobOutput);
|
|
566
576
|
};
|
|
567
577
|
return await this.subscribe.psubscribe(key_1.KeyType.QUORUM, subscriptionCallback, this.appId, wild);
|
|
568
578
|
}
|
|
@@ -88,7 +88,24 @@ class QuorumService {
|
|
|
88
88
|
self.engine.processWebHooks();
|
|
89
89
|
}
|
|
90
90
|
else if (message.type === 'job') {
|
|
91
|
-
|
|
91
|
+
let jobOutput = message.job;
|
|
92
|
+
// If _ref is true, payload was too large - fetch full job data via getState
|
|
93
|
+
if (message._ref && message.job?.metadata) {
|
|
94
|
+
try {
|
|
95
|
+
jobOutput = await self.engine.getState(message.job.metadata.tpc, message.job.metadata.jid);
|
|
96
|
+
self.logger.debug('quorum-job-ref-resolved', {
|
|
97
|
+
jid: message.job.metadata.jid,
|
|
98
|
+
});
|
|
99
|
+
}
|
|
100
|
+
catch (err) {
|
|
101
|
+
self.logger.error('quorum-job-ref-error', {
|
|
102
|
+
jid: message.job.metadata.jid,
|
|
103
|
+
error: err,
|
|
104
|
+
});
|
|
105
|
+
return; // Can't route without job data
|
|
106
|
+
}
|
|
107
|
+
}
|
|
108
|
+
self.engine.routeToSubscribers(message.topic, jobOutput);
|
|
92
109
|
}
|
|
93
110
|
else if (message.type === 'cron') {
|
|
94
111
|
self.engine.processTimeHooks();
|
|
@@ -3,12 +3,20 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
|
|
3
3
|
exports.SearchServiceFactory = void 0;
|
|
4
4
|
const utils_1 = require("../../modules/utils");
|
|
5
5
|
const postgres_1 = require("./providers/postgres/postgres");
|
|
6
|
+
const ioredis_1 = require("./providers/redis/ioredis");
|
|
7
|
+
const redis_1 = require("./providers/redis/redis");
|
|
6
8
|
class SearchServiceFactory {
|
|
7
9
|
static async init(providerClient, storeProviderClient, namespace, appId, logger) {
|
|
8
10
|
let service;
|
|
9
11
|
if ((0, utils_1.identifyProvider)(providerClient) === 'postgres') {
|
|
10
12
|
service = new postgres_1.PostgresSearchService(providerClient, storeProviderClient);
|
|
11
13
|
}
|
|
14
|
+
else if ((0, utils_1.identifyProvider)(providerClient) === 'redis') {
|
|
15
|
+
service = new redis_1.RedisSearchService(providerClient, storeProviderClient);
|
|
16
|
+
}
|
|
17
|
+
else {
|
|
18
|
+
service = new ioredis_1.IORedisSearchService(providerClient, storeProviderClient);
|
|
19
|
+
}
|
|
12
20
|
await service.init(namespace, appId, logger);
|
|
13
21
|
return service;
|
|
14
22
|
}
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
import { SearchService } from '../../index';
|
|
2
|
+
import { ILogger } from '../../../logger';
|
|
3
|
+
import { IORedisClientType } from '../../../../types/redis';
|
|
4
|
+
declare class IORedisSearchService extends SearchService<IORedisClientType> {
|
|
5
|
+
constructor(searchClient: IORedisClientType, storeClient?: IORedisClientType);
|
|
6
|
+
init(namespace: string, appId: string, logger: ILogger): Promise<void>;
|
|
7
|
+
createSearchIndex(indexName: string, prefixes: string[], schema: string[]): Promise<void>;
|
|
8
|
+
listSearchIndexes(): Promise<string[]>;
|
|
9
|
+
updateContext(key: string, fields: Record<string, string>): Promise<any>;
|
|
10
|
+
setFields(key: string, fields: Record<string, string>): Promise<number>;
|
|
11
|
+
getField(key: string, field: string): Promise<string>;
|
|
12
|
+
getFields(key: string, fields: string[]): Promise<string[]>;
|
|
13
|
+
getAllFields(key: string): Promise<Record<string, string>>;
|
|
14
|
+
deleteFields(key: string, fields: string[]): Promise<number>;
|
|
15
|
+
incrementFieldByFloat(key: string, field: string, increment: number): Promise<number>;
|
|
16
|
+
sendQuery(...query: [string, ...string[]]): Promise<any>;
|
|
17
|
+
sendIndexedQuery(index: string, query: string[]): Promise<string[]>;
|
|
18
|
+
findEntities(): Promise<any[]>;
|
|
19
|
+
findEntityById(): Promise<any>;
|
|
20
|
+
findEntitiesByCondition(): Promise<any[]>;
|
|
21
|
+
createEntityIndex(): Promise<void>;
|
|
22
|
+
}
|
|
23
|
+
export { IORedisSearchService };
|
|
@@ -0,0 +1,189 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.IORedisSearchService = void 0;
|
|
4
|
+
const index_1 = require("../../index");
|
|
5
|
+
class IORedisSearchService extends index_1.SearchService {
|
|
6
|
+
constructor(searchClient, storeClient) {
|
|
7
|
+
super(searchClient, storeClient);
|
|
8
|
+
}
|
|
9
|
+
async init(namespace, appId, logger) {
|
|
10
|
+
this.namespace = namespace;
|
|
11
|
+
this.appId = appId;
|
|
12
|
+
this.logger = logger;
|
|
13
|
+
}
|
|
14
|
+
async createSearchIndex(indexName, prefixes, schema) {
|
|
15
|
+
try {
|
|
16
|
+
await this.searchClient.call('FT.CREATE', indexName, 'ON', 'HASH', 'PREFIX', prefixes.length.toString(), ...prefixes, 'SCHEMA', ...schema);
|
|
17
|
+
}
|
|
18
|
+
catch (error) {
|
|
19
|
+
this.logger.info('Error creating search index', { error });
|
|
20
|
+
throw error;
|
|
21
|
+
}
|
|
22
|
+
}
|
|
23
|
+
async listSearchIndexes() {
|
|
24
|
+
try {
|
|
25
|
+
const indexes = await this.searchClient.call('FT._LIST');
|
|
26
|
+
return indexes;
|
|
27
|
+
}
|
|
28
|
+
catch (error) {
|
|
29
|
+
this.logger.info('Error listing search indexes', { error });
|
|
30
|
+
throw error;
|
|
31
|
+
}
|
|
32
|
+
}
|
|
33
|
+
async updateContext(key, fields) {
|
|
34
|
+
// Find replay ID if present (field with hyphen, not the @udata field)
|
|
35
|
+
const replayId = Object.keys(fields).find((k) => k.includes('-') && !k.startsWith('@'));
|
|
36
|
+
// Route based on @udata operation
|
|
37
|
+
if ('@udata:set' in fields) {
|
|
38
|
+
const udata = JSON.parse(fields['@udata:set']);
|
|
39
|
+
const fieldsToSet = Array.isArray(udata)
|
|
40
|
+
? Object.fromEntries(Array.from({ length: udata.length / 2 }, (_, i) => [
|
|
41
|
+
udata[i * 2],
|
|
42
|
+
udata[i * 2 + 1],
|
|
43
|
+
]))
|
|
44
|
+
: udata;
|
|
45
|
+
const result = await this.setFields(key, fieldsToSet);
|
|
46
|
+
if (replayId)
|
|
47
|
+
await this.searchClient.hset(key, { [replayId]: String(result) });
|
|
48
|
+
return result;
|
|
49
|
+
}
|
|
50
|
+
if ('@udata:get' in fields) {
|
|
51
|
+
const result = await this.getField(key, fields['@udata:get']);
|
|
52
|
+
if (replayId)
|
|
53
|
+
await this.searchClient.hset(key, { [replayId]: result });
|
|
54
|
+
return result;
|
|
55
|
+
}
|
|
56
|
+
if ('@udata:mget' in fields) {
|
|
57
|
+
const result = await this.getFields(key, JSON.parse(fields['@udata:mget']));
|
|
58
|
+
if (replayId)
|
|
59
|
+
await this.searchClient.hset(key, { [replayId]: result.join('|||') });
|
|
60
|
+
return result;
|
|
61
|
+
}
|
|
62
|
+
if ('@udata:delete' in fields) {
|
|
63
|
+
const result = await this.deleteFields(key, JSON.parse(fields['@udata:delete']));
|
|
64
|
+
if (replayId)
|
|
65
|
+
await this.searchClient.hset(key, { [replayId]: String(result) });
|
|
66
|
+
return result;
|
|
67
|
+
}
|
|
68
|
+
if ('@udata:increment' in fields) {
|
|
69
|
+
const { field, value } = JSON.parse(fields['@udata:increment']);
|
|
70
|
+
const result = await this.incrementFieldByFloat(key, field, value);
|
|
71
|
+
if (replayId)
|
|
72
|
+
await this.searchClient.hset(key, { [replayId]: String(result) });
|
|
73
|
+
return result;
|
|
74
|
+
}
|
|
75
|
+
if ('@udata:multiply' in fields) {
|
|
76
|
+
const { field, value } = JSON.parse(fields['@udata:multiply']);
|
|
77
|
+
const result = await this.incrementFieldByFloat(key, field, Math.log(value));
|
|
78
|
+
if (replayId)
|
|
79
|
+
await this.searchClient.hset(key, { [replayId]: String(result) });
|
|
80
|
+
return result;
|
|
81
|
+
}
|
|
82
|
+
if ('@udata:all' in fields) {
|
|
83
|
+
const all = await this.getAllFields(key);
|
|
84
|
+
const result = Object.fromEntries(Object.entries(all).filter(([k]) => k.startsWith('_')));
|
|
85
|
+
if (replayId)
|
|
86
|
+
await this.searchClient.hset(key, { [replayId]: JSON.stringify(result) });
|
|
87
|
+
return result;
|
|
88
|
+
}
|
|
89
|
+
// Default: call setFields
|
|
90
|
+
return await this.setFields(key, fields);
|
|
91
|
+
}
|
|
92
|
+
async setFields(key, fields) {
|
|
93
|
+
try {
|
|
94
|
+
const result = await this.searchClient.hset(key, fields);
|
|
95
|
+
return Number(result);
|
|
96
|
+
}
|
|
97
|
+
catch (error) {
|
|
98
|
+
this.logger.error(`Error setting fields for key: ${key}`, { error });
|
|
99
|
+
throw error;
|
|
100
|
+
}
|
|
101
|
+
}
|
|
102
|
+
async getField(key, field) {
|
|
103
|
+
try {
|
|
104
|
+
return await this.searchClient.hget(key, field);
|
|
105
|
+
}
|
|
106
|
+
catch (error) {
|
|
107
|
+
this.logger.error(`Error getting field ${field} for key: ${key}`, {
|
|
108
|
+
error,
|
|
109
|
+
});
|
|
110
|
+
throw error;
|
|
111
|
+
}
|
|
112
|
+
}
|
|
113
|
+
async getFields(key, fields) {
|
|
114
|
+
try {
|
|
115
|
+
return await this.searchClient.hmget(key, [...fields]);
|
|
116
|
+
}
|
|
117
|
+
catch (error) {
|
|
118
|
+
this.logger.error(`Error getting fields for key: ${key}`, { error });
|
|
119
|
+
throw error;
|
|
120
|
+
}
|
|
121
|
+
}
|
|
122
|
+
async getAllFields(key) {
|
|
123
|
+
try {
|
|
124
|
+
return await this.searchClient.hgetall(key);
|
|
125
|
+
}
|
|
126
|
+
catch (error) {
|
|
127
|
+
this.logger.error(`Error getting fields for key: ${key}`, { error });
|
|
128
|
+
throw error;
|
|
129
|
+
}
|
|
130
|
+
}
|
|
131
|
+
async deleteFields(key, fields) {
|
|
132
|
+
try {
|
|
133
|
+
const result = await this.searchClient.hdel(key, ...fields);
|
|
134
|
+
return Number(result);
|
|
135
|
+
}
|
|
136
|
+
catch (error) {
|
|
137
|
+
this.logger.error(`Error deleting fields for key: ${key}`, { error });
|
|
138
|
+
throw error;
|
|
139
|
+
}
|
|
140
|
+
}
|
|
141
|
+
async incrementFieldByFloat(key, field, increment) {
|
|
142
|
+
try {
|
|
143
|
+
const result = await this.searchClient.hincrbyfloat(key, field, increment);
|
|
144
|
+
return Number(result);
|
|
145
|
+
}
|
|
146
|
+
catch (error) {
|
|
147
|
+
this.logger.error(`Error incrementing field ${field} for key: ${key}`, {
|
|
148
|
+
error,
|
|
149
|
+
});
|
|
150
|
+
throw error;
|
|
151
|
+
}
|
|
152
|
+
}
|
|
153
|
+
async sendQuery(...query) {
|
|
154
|
+
try {
|
|
155
|
+
return await this.searchClient.call(...query);
|
|
156
|
+
}
|
|
157
|
+
catch (error) {
|
|
158
|
+
this.logger.error('Error executing query', { error });
|
|
159
|
+
throw error;
|
|
160
|
+
}
|
|
161
|
+
}
|
|
162
|
+
async sendIndexedQuery(index, query) {
|
|
163
|
+
try {
|
|
164
|
+
if (query[0]?.startsWith('FT.')) {
|
|
165
|
+
const [cmd, ...rest] = query;
|
|
166
|
+
return (await this.searchClient.call(cmd, ...rest));
|
|
167
|
+
}
|
|
168
|
+
return (await this.searchClient.call('FT.SEARCH', index, ...query));
|
|
169
|
+
}
|
|
170
|
+
catch (error) {
|
|
171
|
+
this.logger.error('Error executing query', { error });
|
|
172
|
+
throw error;
|
|
173
|
+
}
|
|
174
|
+
}
|
|
175
|
+
// Entity methods - not implemented for Redis (postgres-specific JSONB operations)
|
|
176
|
+
async findEntities() {
|
|
177
|
+
throw new Error('Entity findEntities not supported in Redis - use PostgreSQL');
|
|
178
|
+
}
|
|
179
|
+
async findEntityById() {
|
|
180
|
+
throw new Error('Entity findEntityById not supported in Redis - use PostgreSQL');
|
|
181
|
+
}
|
|
182
|
+
async findEntitiesByCondition() {
|
|
183
|
+
throw new Error('Entity findEntitiesByCondition not supported in Redis - use PostgreSQL');
|
|
184
|
+
}
|
|
185
|
+
async createEntityIndex() {
|
|
186
|
+
throw new Error('Entity createEntityIndex not supported in Redis - use PostgreSQL');
|
|
187
|
+
}
|
|
188
|
+
}
|
|
189
|
+
exports.IORedisSearchService = IORedisSearchService;
|