@hotmeshio/hotmesh 0.5.6 → 0.5.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (80) hide show
  1. package/README.md +6 -32
  2. package/build/index.d.ts +1 -3
  3. package/build/index.js +1 -5
  4. package/build/modules/enums.d.ts +0 -5
  5. package/build/modules/enums.js +1 -6
  6. package/build/modules/utils.d.ts +1 -1
  7. package/build/modules/utils.js +2 -29
  8. package/build/package.json +4 -16
  9. package/build/services/activities/hook.js +1 -5
  10. package/build/services/compiler/index.d.ts +2 -2
  11. package/build/services/compiler/index.js +4 -4
  12. package/build/services/connector/factory.d.ts +1 -1
  13. package/build/services/connector/factory.js +1 -11
  14. package/build/services/exporter/index.d.ts +8 -8
  15. package/build/services/exporter/index.js +8 -8
  16. package/build/services/memflow/client.js +1 -8
  17. package/build/services/memflow/connection.d.ts +0 -2
  18. package/build/services/memflow/connection.js +0 -2
  19. package/build/services/memflow/exporter.d.ts +3 -3
  20. package/build/services/memflow/exporter.js +3 -3
  21. package/build/services/memflow/index.d.ts +1 -1
  22. package/build/services/memflow/index.js +1 -1
  23. package/build/services/memflow/schemas/factory.js +1 -1
  24. package/build/services/memflow/search.d.ts +11 -4
  25. package/build/services/memflow/search.js +98 -71
  26. package/build/services/memflow/worker.d.ts +1 -1
  27. package/build/services/memflow/worker.js +1 -1
  28. package/build/services/meshcall/index.d.ts +1 -1
  29. package/build/services/meshcall/index.js +1 -1
  30. package/build/services/reporter/index.d.ts +1 -1
  31. package/build/services/reporter/index.js +12 -12
  32. package/build/services/search/factory.js +0 -8
  33. package/build/services/search/providers/postgres/postgres.js +1 -1
  34. package/build/services/store/cache.d.ts +1 -1
  35. package/build/services/store/cache.js +1 -1
  36. package/build/services/store/factory.js +1 -9
  37. package/build/services/store/index.d.ts +1 -1
  38. package/build/services/store/providers/postgres/kvtypes/hash/index.js +57 -0
  39. package/build/services/store/providers/postgres/kvtypes/hash/udata.d.ts +10 -0
  40. package/build/services/store/providers/postgres/kvtypes/hash/udata.js +384 -0
  41. package/build/services/store/providers/postgres/postgres.js +2 -6
  42. package/build/services/stream/factory.js +0 -16
  43. package/build/services/sub/factory.js +0 -8
  44. package/build/services/sub/providers/nats/nats.js +0 -1
  45. package/build/services/task/index.js +0 -1
  46. package/build/types/activity.d.ts +1 -5
  47. package/build/types/hotmesh.d.ts +0 -5
  48. package/build/types/index.d.ts +0 -1
  49. package/build/types/index.js +1 -4
  50. package/build/types/job.d.ts +1 -1
  51. package/build/types/memflow.d.ts +5 -4
  52. package/build/types/meshcall.d.ts +0 -25
  53. package/build/types/provider.d.ts +1 -1
  54. package/build/types/stream.d.ts +1 -6
  55. package/index.ts +0 -4
  56. package/package.json +4 -16
  57. package/build/services/connector/providers/ioredis.d.ts +0 -9
  58. package/build/services/connector/providers/ioredis.js +0 -26
  59. package/build/services/connector/providers/redis.d.ts +0 -9
  60. package/build/services/connector/providers/redis.js +0 -38
  61. package/build/services/search/providers/redis/ioredis.d.ts +0 -23
  62. package/build/services/search/providers/redis/ioredis.js +0 -134
  63. package/build/services/search/providers/redis/redis.d.ts +0 -23
  64. package/build/services/search/providers/redis/redis.js +0 -147
  65. package/build/services/store/providers/redis/_base.d.ts +0 -137
  66. package/build/services/store/providers/redis/_base.js +0 -980
  67. package/build/services/store/providers/redis/ioredis.d.ts +0 -20
  68. package/build/services/store/providers/redis/ioredis.js +0 -180
  69. package/build/services/store/providers/redis/redis.d.ts +0 -18
  70. package/build/services/store/providers/redis/redis.js +0 -199
  71. package/build/services/stream/providers/redis/ioredis.d.ts +0 -61
  72. package/build/services/stream/providers/redis/ioredis.js +0 -272
  73. package/build/services/stream/providers/redis/redis.d.ts +0 -61
  74. package/build/services/stream/providers/redis/redis.js +0 -305
  75. package/build/services/sub/providers/redis/ioredis.d.ts +0 -17
  76. package/build/services/sub/providers/redis/ioredis.js +0 -81
  77. package/build/services/sub/providers/redis/redis.d.ts +0 -17
  78. package/build/services/sub/providers/redis/redis.js +0 -72
  79. package/build/types/redis.d.ts +0 -258
  80. package/build/types/redis.js +0 -11
@@ -0,0 +1,384 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.createUdataOperations = void 0;
4
+ const utils_1 = require("./utils");
5
+ function createUdataOperations(context) {
6
+ return {
7
+ handleUdataSet,
8
+ handleUdataGet,
9
+ handleUdataMget,
10
+ handleUdataDelete,
11
+ handleUdataIncrement,
12
+ handleUdataMultiply,
13
+ handleUdataAll,
14
+ };
15
+ function handleUdataSet(key, fields, options) {
16
+ const tableName = context.tableForKey(key, 'hash');
17
+ const replayId = Object.keys(fields).find((k) => k.includes('-') && k !== '@udata:set');
18
+ const udata = JSON.parse(fields['@udata:set']);
19
+ const params = [];
20
+ let sql = '';
21
+ // Extract the fields to set (can be object or key-value pairs)
22
+ const fieldsToSet = {};
23
+ if (typeof udata === 'object' && !Array.isArray(udata)) {
24
+ // Object format: { field1: 'value1', field2: 'value2' }
25
+ for (const [fieldName, value] of Object.entries(udata)) {
26
+ fieldsToSet[fieldName] = String(value);
27
+ }
28
+ }
29
+ else if (Array.isArray(udata)) {
30
+ // Array format: ['field1', 'value1', 'field2', 'value2']
31
+ for (let i = 0; i < udata.length; i += 2) {
32
+ const fieldName = udata[i];
33
+ const value = udata[i + 1];
34
+ fieldsToSet[fieldName] = String(value);
35
+ }
36
+ }
37
+ const fieldEntries = Object.entries(fieldsToSet);
38
+ if (fieldEntries.length === 0) {
39
+ // No fields to set, return a no-op
40
+ return { sql: 'SELECT 0 as count', params: [] };
41
+ }
42
+ const schemaName = context.safeName(context.appId);
43
+ if (replayId) {
44
+ // Version with replay storage
45
+ const placeholders = fieldEntries
46
+ .map(([fieldName, value], index) => {
47
+ const baseIndex = index * 3 + 3;
48
+ params.push(fieldName, value, 'udata');
49
+ return `($${baseIndex}, $${baseIndex + 1}, $${baseIndex + 2}::${schemaName}.type_enum)`;
50
+ })
51
+ .join(', ');
52
+ sql = `
53
+ WITH valid_job AS (
54
+ SELECT id FROM ${tableName} WHERE key = $1 AND is_live
55
+ ),
56
+ upsert_fields AS (
57
+ INSERT INTO ${tableName}_attributes (job_id, field, value, type)
58
+ SELECT
59
+ job.id,
60
+ vals.field,
61
+ vals.value,
62
+ vals.type
63
+ FROM valid_job job
64
+ CROSS JOIN (
65
+ VALUES ${placeholders}
66
+ ) AS vals(field, value, type)
67
+ ON CONFLICT (job_id, field) DO UPDATE SET value = EXCLUDED.value
68
+ RETURNING 1 as field_count
69
+ ),
70
+ count_result AS (
71
+ SELECT COUNT(*) as new_fields_count FROM upsert_fields
72
+ ),
73
+ replay_insert AS (
74
+ INSERT INTO ${tableName}_attributes (job_id, field, value, type)
75
+ SELECT job.id, $2, new_fields_count::text, $${2 + fieldEntries.length * 3 + 1}::${schemaName}.type_enum
76
+ FROM valid_job job, count_result
77
+ ON CONFLICT (job_id, field) DO UPDATE
78
+ SET value = EXCLUDED.value
79
+ RETURNING 1
80
+ )
81
+ SELECT new_fields_count FROM count_result
82
+ `;
83
+ params.unshift(key, replayId);
84
+ params.push((0, utils_1.deriveType)(replayId));
85
+ }
86
+ else {
87
+ // Version without replay storage
88
+ const placeholders = fieldEntries
89
+ .map(([fieldName, value], index) => {
90
+ const baseIndex = index * 3 + 2;
91
+ params.push(fieldName, value, 'udata');
92
+ return `($${baseIndex}, $${baseIndex + 1}, $${baseIndex + 2}::${schemaName}.type_enum)`;
93
+ })
94
+ .join(', ');
95
+ sql = `
96
+ WITH valid_job AS (
97
+ SELECT id FROM ${tableName} WHERE key = $1 AND is_live
98
+ )
99
+ INSERT INTO ${tableName}_attributes (job_id, field, value, type)
100
+ SELECT
101
+ job.id,
102
+ vals.field,
103
+ vals.value,
104
+ vals.type
105
+ FROM valid_job job
106
+ CROSS JOIN (
107
+ VALUES ${placeholders}
108
+ ) AS vals(field, value, type)
109
+ ON CONFLICT (job_id, field) DO UPDATE SET value = EXCLUDED.value
110
+ RETURNING 1 as count
111
+ `;
112
+ params.unshift(key);
113
+ }
114
+ return { sql, params };
115
+ }
116
+ function handleUdataGet(key, fields, options) {
117
+ const tableName = context.tableForKey(key, 'hash');
118
+ const fieldName = fields['@udata:get'];
119
+ const replayId = Object.keys(fields).find((k) => k.includes('-') && k !== '@udata:get');
120
+ const params = [];
121
+ let sql = '';
122
+ if (replayId) {
123
+ sql = `
124
+ WITH field_data AS (
125
+ SELECT COALESCE(a.value, '') as field_value
126
+ FROM ${tableName} j
127
+ LEFT JOIN ${tableName}_attributes a ON j.id = a.job_id AND a.field = $2
128
+ WHERE j.key = $1 AND j.is_live
129
+ ),
130
+ replay_insert AS (
131
+ INSERT INTO ${tableName}_attributes (job_id, field, value, type)
132
+ SELECT j.id, $3, field_value, $4
133
+ FROM ${tableName} j, field_data
134
+ WHERE j.key = $1 AND j.is_live
135
+ ON CONFLICT (job_id, field) DO UPDATE
136
+ SET value = EXCLUDED.value
137
+ RETURNING 1
138
+ )
139
+ SELECT field_value as new_value FROM field_data
140
+ `;
141
+ params.push(key, fieldName, replayId, (0, utils_1.deriveType)(replayId));
142
+ }
143
+ else {
144
+ sql = `
145
+ SELECT COALESCE(a.value, '') as new_value
146
+ FROM ${tableName} j
147
+ LEFT JOIN ${tableName}_attributes a ON j.id = a.job_id AND a.field = $2
148
+ WHERE j.key = $1 AND j.is_live
149
+ `;
150
+ params.push(key, fieldName);
151
+ }
152
+ return { sql, params };
153
+ }
154
+ function handleUdataMget(key, fields, options) {
155
+ const tableName = context.tableForKey(key, 'hash');
156
+ const fieldNames = JSON.parse(fields['@udata:mget']);
157
+ const replayId = Object.keys(fields).find((k) => k.includes('-') && k !== '@udata:mget');
158
+ const params = [];
159
+ let sql = '';
160
+ if (replayId) {
161
+ sql = `
162
+ WITH field_data AS (
163
+ SELECT array_agg(COALESCE(a.value, '') ORDER BY field_order.idx) as field_values
164
+ FROM ${tableName} j
165
+ CROSS JOIN (
166
+ SELECT unnest($2::text[]) as field_name, generate_subscripts($2::text[], 1) as idx
167
+ ) as field_order
168
+ LEFT JOIN ${tableName}_attributes a ON j.id = a.job_id AND a.field = field_order.field_name
169
+ WHERE j.key = $1 AND j.is_live
170
+ ),
171
+ replay_insert AS (
172
+ INSERT INTO ${tableName}_attributes (job_id, field, value, type)
173
+ SELECT j.id, $3, array_to_string(field_values, '|||'), $4
174
+ FROM ${tableName} j, field_data
175
+ WHERE j.key = $1 AND j.is_live
176
+ ON CONFLICT (job_id, field) DO UPDATE
177
+ SET value = EXCLUDED.value
178
+ RETURNING 1
179
+ )
180
+ SELECT field_values as new_value FROM field_data
181
+ `;
182
+ params.push(key, fieldNames, replayId, (0, utils_1.deriveType)(replayId));
183
+ }
184
+ else {
185
+ sql = `
186
+ SELECT array_agg(COALESCE(a.value, '') ORDER BY field_order.idx) as new_value
187
+ FROM ${tableName} j
188
+ CROSS JOIN (
189
+ SELECT unnest($2::text[]) as field_name, generate_subscripts($2::text[], 1) as idx
190
+ ) as field_order
191
+ LEFT JOIN ${tableName}_attributes a ON j.id = a.job_id AND a.field = field_order.field_name
192
+ WHERE j.key = $1 AND j.is_live
193
+ `;
194
+ params.push(key, fieldNames);
195
+ }
196
+ return { sql, params };
197
+ }
198
+ function handleUdataDelete(key, fields, options) {
199
+ const tableName = context.tableForKey(key, 'hash');
200
+ const fieldNames = JSON.parse(fields['@udata:delete']);
201
+ const replayId = Object.keys(fields).find((k) => k.includes('-') && k !== '@udata:delete');
202
+ const params = [];
203
+ let sql = '';
204
+ if (replayId) {
205
+ sql = `
206
+ WITH deleted_fields AS (
207
+ DELETE FROM ${tableName}_attributes
208
+ WHERE job_id = (
209
+ SELECT id FROM ${tableName} WHERE key = $1 AND is_live
210
+ )
211
+ AND field = ANY($2::text[])
212
+ RETURNING 1 as deleted_count
213
+ ),
214
+ count_result AS (
215
+ SELECT COUNT(*) as total_deleted FROM deleted_fields
216
+ ),
217
+ replay_insert AS (
218
+ INSERT INTO ${tableName}_attributes (job_id, field, value, type)
219
+ SELECT j.id, $3, total_deleted::text, $4
220
+ FROM ${tableName} j, count_result
221
+ WHERE j.key = $1 AND j.is_live
222
+ ON CONFLICT (job_id, field) DO UPDATE
223
+ SET value = EXCLUDED.value
224
+ RETURNING 1
225
+ )
226
+ SELECT total_deleted as new_value FROM count_result
227
+ `;
228
+ params.push(key, fieldNames, replayId, (0, utils_1.deriveType)(replayId));
229
+ }
230
+ else {
231
+ sql = `
232
+ WITH deleted_fields AS (
233
+ DELETE FROM ${tableName}_attributes
234
+ WHERE job_id = (
235
+ SELECT id FROM ${tableName} WHERE key = $1 AND is_live
236
+ )
237
+ AND field = ANY($2::text[])
238
+ RETURNING 1 as deleted_count
239
+ )
240
+ SELECT COUNT(*) as new_value FROM deleted_fields
241
+ `;
242
+ params.push(key, fieldNames);
243
+ }
244
+ return { sql, params };
245
+ }
246
+ function handleUdataIncrement(key, fields, options) {
247
+ const tableName = context.tableForKey(key, 'hash');
248
+ const { field, value } = JSON.parse(fields['@udata:increment']);
249
+ const replayId = Object.keys(fields).find((k) => k.includes('-') && k !== '@udata:increment');
250
+ const schemaName = context.safeName(context.appId);
251
+ const params = [];
252
+ let sql = '';
253
+ if (replayId) {
254
+ sql = `
255
+ WITH valid_job AS (
256
+ SELECT id FROM ${tableName} WHERE key = $1 AND is_live
257
+ ),
258
+ increment_result AS (
259
+ INSERT INTO ${tableName}_attributes (job_id, field, value, type)
260
+ SELECT id, $2, $3::text, $4::${schemaName}.type_enum
261
+ FROM valid_job
262
+ ON CONFLICT (job_id, field) DO UPDATE
263
+ SET value = ((COALESCE(${tableName}_attributes.value, '0')::double precision) + $3::double precision)::text
264
+ RETURNING value
265
+ ),
266
+ replay_insert AS (
267
+ INSERT INTO ${tableName}_attributes (job_id, field, value, type)
268
+ SELECT job.id, $5, inc.value, $6::${schemaName}.type_enum
269
+ FROM valid_job job, increment_result inc
270
+ ON CONFLICT (job_id, field) DO UPDATE
271
+ SET value = EXCLUDED.value
272
+ RETURNING 1
273
+ )
274
+ SELECT value as new_value FROM increment_result
275
+ `;
276
+ params.push(key, field, value, 'udata', replayId, (0, utils_1.deriveType)(replayId));
277
+ }
278
+ else {
279
+ sql = `
280
+ WITH valid_job AS (
281
+ SELECT id FROM ${tableName} WHERE key = $1 AND is_live
282
+ )
283
+ INSERT INTO ${tableName}_attributes (job_id, field, value, type)
284
+ SELECT id, $2, $3::text, $4::${schemaName}.type_enum
285
+ FROM valid_job
286
+ ON CONFLICT (job_id, field) DO UPDATE
287
+ SET value = ((COALESCE(${tableName}_attributes.value, '0')::double precision) + $3::double precision)::text
288
+ RETURNING value as new_value
289
+ `;
290
+ params.push(key, field, value, 'udata');
291
+ }
292
+ return { sql, params };
293
+ }
294
+ function handleUdataMultiply(key, fields, options) {
295
+ const tableName = context.tableForKey(key, 'hash');
296
+ const { field, value } = JSON.parse(fields['@udata:multiply']);
297
+ const replayId = Object.keys(fields).find((k) => k.includes('-') && k !== '@udata:multiply');
298
+ const schemaName = context.safeName(context.appId);
299
+ const params = [];
300
+ let sql = '';
301
+ // For multiplication, we work with logarithms to support exponential multiplication
302
+ // log(a * b) = log(a) + log(b), so exp(log(a) + log(b)) = a * b
303
+ if (replayId) {
304
+ sql = `
305
+ WITH valid_job AS (
306
+ SELECT id FROM ${tableName} WHERE key = $1 AND is_live
307
+ ),
308
+ multiply_result AS (
309
+ INSERT INTO ${tableName}_attributes (job_id, field, value, type)
310
+ SELECT id, $2, ln($3::double precision)::text, $4::${schemaName}.type_enum
311
+ FROM valid_job
312
+ ON CONFLICT (job_id, field) DO UPDATE
313
+ SET value = (COALESCE(${tableName}_attributes.value::double precision, 0) + ln($3::double precision))::text
314
+ RETURNING value
315
+ ),
316
+ replay_insert AS (
317
+ INSERT INTO ${tableName}_attributes (job_id, field, value, type)
318
+ SELECT job.id, $5, mult.value, $6::${schemaName}.type_enum
319
+ FROM valid_job job, multiply_result mult
320
+ ON CONFLICT (job_id, field) DO UPDATE
321
+ SET value = EXCLUDED.value
322
+ RETURNING 1
323
+ )
324
+ SELECT value as new_value FROM multiply_result
325
+ `;
326
+ params.push(key, field, value, 'udata', replayId, (0, utils_1.deriveType)(replayId));
327
+ }
328
+ else {
329
+ sql = `
330
+ WITH valid_job AS (
331
+ SELECT id FROM ${tableName} WHERE key = $1 AND is_live
332
+ )
333
+ INSERT INTO ${tableName}_attributes (job_id, field, value, type)
334
+ SELECT id, $2, ln($3::double precision)::text, $4::${schemaName}.type_enum
335
+ FROM valid_job
336
+ ON CONFLICT (job_id, field) DO UPDATE
337
+ SET value = (COALESCE(${tableName}_attributes.value::double precision, 0) + ln($3::double precision))::text
338
+ RETURNING value as new_value
339
+ `;
340
+ params.push(key, field, value, 'udata');
341
+ }
342
+ return { sql, params };
343
+ }
344
+ function handleUdataAll(key, fields, options) {
345
+ const tableName = context.tableForKey(key, 'hash');
346
+ const replayId = Object.keys(fields).find((k) => k.includes('-') && k !== '@udata:all');
347
+ const params = [];
348
+ let sql = '';
349
+ if (replayId) {
350
+ sql = `
351
+ WITH field_data AS (
352
+ SELECT jsonb_object_agg(a.field, a.value) as field_values
353
+ FROM ${tableName} j
354
+ LEFT JOIN ${tableName}_attributes a ON j.id = a.job_id
355
+ WHERE j.key = $1 AND j.is_live
356
+ AND a.type = 'udata' AND a.field LIKE '\\_%'
357
+ ),
358
+ replay_insert AS (
359
+ INSERT INTO ${tableName}_attributes (job_id, field, value, type)
360
+ SELECT j.id, $2, field_values::text, $3
361
+ FROM ${tableName} j, field_data
362
+ WHERE j.key = $1 AND j.is_live
363
+ ON CONFLICT (job_id, field) DO UPDATE
364
+ SET value = EXCLUDED.value
365
+ RETURNING 1
366
+ )
367
+ SELECT field_values as new_value FROM field_data
368
+ `;
369
+ params.push(key, replayId, (0, utils_1.deriveType)(replayId));
370
+ }
371
+ else {
372
+ sql = `
373
+ SELECT jsonb_object_agg(a.field, a.value) as new_value
374
+ FROM ${tableName} j
375
+ LEFT JOIN ${tableName}_attributes a ON j.id = a.job_id
376
+ WHERE j.key = $1 AND j.is_live
377
+ AND a.type = 'udata' AND a.field LIKE '\\_%'
378
+ `;
379
+ params.push(key);
380
+ }
381
+ return { sql, params };
382
+ }
383
+ }
384
+ exports.createUdataOperations = createUdataOperations;
@@ -42,10 +42,7 @@ class PostgresStoreService extends __1.StoreService {
42
42
  this.isScout = false;
43
43
  //Instead of directly referencing the 'pg' package and methods like 'query',
44
44
  // the PostgresStore wraps the 'pg' client in a class that implements
45
- // the Redis client interface. This allows the same methods to be called
46
- // that were used when authoring the Redis client store provider.
47
- //In general, this.storeClient will behave like Redis, but will
48
- // use the 'pg' package and will read/write to a Postgres database.
45
+ // an entity/attribute interface.
49
46
  this.pgClient = storeClient;
50
47
  this.storeClient = new kvsql_1.KVSQL(storeClient, this.namespace, this.appId);
51
48
  //kvTables will provision tables and indexes in the Postgres db as necessary
@@ -75,7 +72,6 @@ class PostgresStoreService extends __1.StoreService {
75
72
  await this.kvsql().del(`${key}:${target}`);
76
73
  }
77
74
  async zAdd(key, score, value, transaction) {
78
- //default call signature uses 'ioredis' NPM Package format
79
75
  return await this.kvsql(transaction).zadd(key, Number(score), value.toString());
80
76
  }
81
77
  async zRangeByScore(key, score, value) {
@@ -393,7 +389,7 @@ class PostgresStoreService extends __1.StoreService {
393
389
  }
394
390
  }
395
391
  hGetAllResult(result) {
396
- //default response signature uses 'redis' NPM Package format
392
+ //default response signature
397
393
  return result;
398
394
  }
399
395
  async getJobStats(jobKeys) {
@@ -2,8 +2,6 @@
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.StreamServiceFactory = void 0;
4
4
  const utils_1 = require("../../modules/utils");
5
- const ioredis_1 = require("./providers/redis/ioredis");
6
- const redis_1 = require("./providers/redis/redis");
7
5
  const nats_1 = require("./providers/nats/nats");
8
6
  const postgres_1 = require("./providers/postgres/postgres");
9
7
  class StreamServiceFactory {
@@ -11,24 +9,10 @@ class StreamServiceFactory {
11
9
  let service;
12
10
  const providerType = (0, utils_1.identifyProvider)(provider);
13
11
  if (providerType === 'nats') {
14
- let storeProvider;
15
- if ((0, utils_1.identifyProvider)(storeProvider) === 'redis') {
16
- storeProvider = storeProvider;
17
- }
18
- else {
19
- //ioredis
20
- storeProvider = storeProvider;
21
- }
22
12
  service = new nats_1.NatsStreamService(provider, storeProvider);
23
13
  }
24
14
  else if (providerType === 'postgres') {
25
15
  service = new postgres_1.PostgresStreamService(provider, storeProvider);
26
- }
27
- else if (providerType === 'redis') {
28
- service = new redis_1.RedisStreamService(provider, storeProvider);
29
- }
30
- else if (providerType === 'ioredis') {
31
- service = new ioredis_1.IORedisStreamService(provider, storeProvider);
32
16
  } //etc register other providers here
33
17
  await service.init(namespace, appId, logger);
34
18
  return service;
@@ -2,10 +2,8 @@
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.SubServiceFactory = void 0;
4
4
  const utils_1 = require("../../modules/utils");
5
- const redis_1 = require("./providers/redis/redis");
6
5
  const postgres_1 = require("./providers/postgres/postgres");
7
6
  const nats_1 = require("./providers/nats/nats");
8
- const ioredis_1 = require("./providers/redis/ioredis");
9
7
  class SubServiceFactory {
10
8
  static async init(providerSubClient, providerPubClient, namespace, appId, engineId, logger) {
11
9
  let service;
@@ -13,15 +11,9 @@ class SubServiceFactory {
13
11
  if (providerType === 'nats') {
14
12
  service = new nats_1.NatsSubService(providerSubClient, providerPubClient);
15
13
  }
16
- else if (providerType === 'redis') {
17
- service = new redis_1.RedisSubService(providerSubClient, providerPubClient);
18
- }
19
14
  else if (providerType === 'postgres') {
20
15
  service = new postgres_1.PostgresSubService(providerSubClient, providerPubClient);
21
16
  }
22
- else {
23
- service = new ioredis_1.IORedisSubService(providerSubClient, providerPubClient);
24
- }
25
17
  await service.init(namespace, appId, engineId, logger);
26
18
  return service;
27
19
  }
@@ -15,7 +15,6 @@ class NatsSubService extends index_1.SubService {
15
15
  this.engineId = engineId;
16
16
  }
17
17
  transact() {
18
- // NATS does not support transactions like Redis.
19
18
  // Return an empty object or throw an error if not supported.
20
19
  return {};
21
20
  }
@@ -113,7 +113,6 @@ class TaskService {
113
113
  }
114
114
  catch (err) {
115
115
  //most common reasons: deleted job not found; container stopping; test stopping
116
- //less common: redis/cluster down; retry with fallback (5s max main reassignment)
117
116
  this.logger.warn('task-process-timehooks-error', err);
118
117
  await (0, utils_1.sleepFor)(1000 * this.errorCount++);
119
118
  if (this.errorCount < 5) {
@@ -58,11 +58,7 @@ interface TriggerActivityStats {
58
58
  } | string;
59
59
  /**
60
60
  * @deprecated
61
- * return 'infinity' to disable; default behavior
62
- * is to always segment keys by time to ensure
63
- * indexes (Redis LIST) never grow unbounded
64
- * as a default behavior; for now, 5m is default
65
- * and infinity can be set to override
61
+ * return 'infinity' to disable
66
62
  */
67
63
  granularity?: string;
68
64
  /**
@@ -140,11 +140,6 @@ type HotMeshWorker = {
140
140
  * @private
141
141
  */
142
142
  search?: ProviderClient;
143
- /**
144
- * redis connection options; replaced with 'connection'
145
- * @deprecated
146
- */
147
- redis?: ProviderConfig;
148
143
  /**
149
144
  * short-form format for the connection options for the
150
145
  * store, stream, sub, and search clients
@@ -17,7 +17,6 @@ export { MeshCallConnectParams, MeshCallExecParams, MeshCallCronParams, MeshCall
17
17
  export { PostgresClassType, PostgresClientOptions, PostgresClientType, PostgresConsumerGroup, PostgresPendingMessage, PostgresPoolClientType, PostgresQueryConfigType, PostgresQueryResultType, PostgresStreamMessage, PostgresStreamOptions, PostgresTransaction, } from './postgres';
18
18
  export { ActivateMessage, CronMessage, JobMessage, JobMessageCallback, PingMessage, PongMessage, QuorumMessage, QuorumMessageCallback, QuorumProfile, RollCallMessage, RollCallOptions, SubscriptionCallback, SubscriptionOptions, SystemHealth, ThrottleMessage, ThrottleOptions, WorkMessage, } from './quorum';
19
19
  export { NatsAckPolicy, NatsAckPolicyExplicitType, NatsClassType, NatsClientType, NatsClientOptions, NatsConsumerConfigType, NatsJetStreamManager, NatsConnection, NatsJetStreamType, NatsConnectionOptions, NatsConsumerConfig, NatsConsumerInfo, NatsConsumerManager, NatsDeliveryInfo, NatsJetStreamOptions, NatsError, NatsErrorType, NatsJetStreamClient, NatsJsMsg, NatsMessageType, NatsMsgExpect, NatsPubAck, NatsPubAckType, NatsPublishOptions, NatsRetentionPolicy, NatsRetentionPolicyWorkqueueType, NatsSequenceInfo, NatsStorageMemoryType, NatsStorageType, NatsStreamConfig, NatsStreamInfo, NatsStreamManager, NatsStreamConfigType, NatsStreamInfoType, NatsStreamOptions, NatsStreamState, NatsTransaction, } from './nats';
20
- export { RedisClass, RedisRedisClientType, RedisRedisClientOptions, RedisRedisClassType, IORedisClientType, RedisClient, RedisMulti, RedisRedisMultiType, IORedisClientOptions, IORedisClassType, IORedisMultiType, RedisOptions, isRedisClient, isIORedisClient, } from './redis';
21
20
  export { JSONSchema, StringAnyType, StringScalarType, StringStringType, SymbolMap, SymbolMaps, SymbolRanges, Symbols, SymbolSets, } from './serializer';
22
21
  export { AggregatedData, CountByFacet, GetStatsOptions, IdsData, Measure, MeasureIds, MetricTypes, StatType, StatsType, IdsResponse, JobStats, JobStatsInput, JobStatsRange, StatsResponse, Segment, TimeSegment, } from './stats';
23
22
  export { ReclaimedMessageType, RouterConfig, StreamCode, StreamConfig, StreamData, StreamDataType, StreamError, StreamDataResponse, StreamMessage, StreamMessageMetadata, StreamProviderType, StreamRetryPolicy, StreamRole, StreamStats, StreamStatus, } from './stream';
@@ -1,15 +1,12 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.ValueType = exports.trace = exports.SpanKind = exports.SpanStatusCode = exports.propagation = exports.metrics = exports.context = exports.StreamStatus = exports.StreamRole = exports.StreamDataType = exports.isIORedisClient = exports.isRedisClient = exports.KeyType = exports.HookGate = exports.CollationFaultType = void 0;
3
+ exports.ValueType = exports.trace = exports.SpanKind = exports.SpanStatusCode = exports.propagation = exports.metrics = exports.context = exports.StreamStatus = exports.StreamRole = exports.StreamDataType = exports.KeyType = exports.HookGate = exports.CollationFaultType = void 0;
4
4
  var collator_1 = require("./collator");
5
5
  Object.defineProperty(exports, "CollationFaultType", { enumerable: true, get: function () { return collator_1.CollationFaultType; } });
6
6
  var hook_1 = require("./hook");
7
7
  Object.defineProperty(exports, "HookGate", { enumerable: true, get: function () { return hook_1.HookGate; } });
8
8
  var hotmesh_1 = require("./hotmesh");
9
9
  Object.defineProperty(exports, "KeyType", { enumerable: true, get: function () { return hotmesh_1.KeyType; } });
10
- var redis_1 = require("./redis");
11
- Object.defineProperty(exports, "isRedisClient", { enumerable: true, get: function () { return redis_1.isRedisClient; } });
12
- Object.defineProperty(exports, "isIORedisClient", { enumerable: true, get: function () { return redis_1.isIORedisClient; } });
13
10
  var stream_1 = require("./stream");
14
11
  Object.defineProperty(exports, "StreamDataType", { enumerable: true, get: function () { return stream_1.StreamDataType; } });
15
12
  Object.defineProperty(exports, "StreamRole", { enumerable: true, get: function () { return stream_1.StreamRole; } });
@@ -140,7 +140,7 @@ type JobInterruptOptions = {
140
140
  */
141
141
  suppress?: boolean;
142
142
  /**
143
- * how long to wait in seconds before fully expiring/removing the hash from Redis;
143
+ * how long to wait in seconds before fully softdeleting the job;
144
144
  * the job is inactive, but can remain in the cache indefinitely;
145
145
  * @default 1 second.
146
146
  */
@@ -91,7 +91,7 @@ type WorkflowContext = {
91
91
  */
92
92
  raw: StreamData;
93
93
  /**
94
- * the HotMesh connection configuration (io/redis NPM package reference and login credentials)
94
+ * the HotMesh connection configuration
95
95
  */
96
96
  connection: Connection;
97
97
  /**
@@ -100,7 +100,8 @@ type WorkflowContext = {
100
100
  expire?: number;
101
101
  };
102
102
  /**
103
- * The schema for the full-text-search (RediSearch) index.
103
+ * The schema for the full-text-search
104
+ * @deprecated
104
105
  */
105
106
  export type WorkflowSearchSchema = Record<string, {
106
107
  /**
@@ -252,7 +253,7 @@ type WorkflowOptions = {
252
253
  */
253
254
  workflowSpan?: string;
254
255
  /**
255
- * the full-text-search (RediSearch) options for the workflow
256
+ * the full-text-search
256
257
  */
257
258
  search?: WorkflowSearchOptions;
258
259
  /**
@@ -414,7 +415,7 @@ type FindWhereOptions = {
414
415
  };
415
416
  };
416
417
  type FindJobsOptions = {
417
- /** The workflow name; include an asterisk for wilcard search; refer to Redis SCAN for the allowed format */
418
+ /** The workflow name; include an asterisk for wilcard search */
418
419
  match?: string;
419
420
  /**
420
421
  * application namespace
@@ -31,11 +31,6 @@ interface MeshCallConnectParams {
31
31
  * Unique topic for the worker function
32
32
  */
33
33
  topic: string;
34
- /**
35
- * Redis configuration; use 'connection' instead of 'redis'
36
- * @deprecated
37
- */
38
- redis?: ProviderConfig;
39
34
  /**
40
35
  * Provider configuration
41
36
  */
@@ -58,11 +53,6 @@ interface MeshCallExecParams {
58
53
  * Arguments to pass to the worker function
59
54
  */
60
55
  args: any[];
61
- /**
62
- * Redis configuration; use 'connection' instead of 'redis'
63
- * @deprecated
64
- */
65
- redis?: ProviderConfig;
66
56
  /**
67
57
  * Provider configuration
68
58
  */
@@ -91,11 +81,6 @@ interface MeshCallFlushParams {
91
81
  * topic assigned to the worker when it was connected
92
82
  */
93
83
  topic: string;
94
- /**
95
- * Redis configuration; use 'connection' instead of 'redis'
96
- * @deprecated
97
- */
98
- redis?: ProviderConfig;
99
84
  /**
100
85
  * Provider configuration
101
86
  */
@@ -151,11 +136,6 @@ interface MeshCallCronParams {
151
136
  * Unique topic for the cron function to identify the worker
152
137
  */
153
138
  topic: string;
154
- /**
155
- * Redis configuration; use 'connection' instead of 'redis'
156
- * @deprecated
157
- */
158
- redis?: ProviderConfig;
159
139
  /**
160
140
  * Provider configuration
161
141
  */
@@ -198,11 +178,6 @@ interface MeshCallInterruptParams {
198
178
  * topic assigned to the cron worker when it was connected
199
179
  */
200
180
  topic: string;
201
- /**
202
- * Redis configuration; use 'connection' instead of 'redis'
203
- * @deprecated
204
- */
205
- redis?: ProviderConfig;
206
181
  /**
207
182
  * Provider configuration
208
183
  */
@@ -12,7 +12,7 @@ export interface ProviderClass {
12
12
  export interface ProviderOptions {
13
13
  [key: string]: any;
14
14
  }
15
- export type Providers = 'redis' | 'nats' | 'postgres' | 'ioredis';
15
+ export type Providers = 'nats' | 'postgres';
16
16
  /**
17
17
  * A provider transaction is a set of operations that are executed
18
18
  * atomically by the provider. The transaction is created by calling