@hotmeshio/hotmesh 0.6.1 → 0.8.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (112) hide show
  1. package/.claude/settings.local.json +7 -0
  2. package/README.md +179 -142
  3. package/build/index.d.ts +1 -3
  4. package/build/index.js +1 -5
  5. package/build/modules/enums.d.ts +7 -0
  6. package/build/modules/enums.js +16 -1
  7. package/build/modules/utils.d.ts +27 -0
  8. package/build/modules/utils.js +55 -32
  9. package/build/package.json +18 -27
  10. package/build/services/activities/activity.d.ts +43 -6
  11. package/build/services/activities/activity.js +262 -54
  12. package/build/services/activities/await.js +2 -2
  13. package/build/services/activities/cycle.js +1 -1
  14. package/build/services/activities/hook.d.ts +5 -0
  15. package/build/services/activities/hook.js +22 -19
  16. package/build/services/activities/interrupt.js +17 -25
  17. package/build/services/activities/signal.d.ts +4 -2
  18. package/build/services/activities/signal.js +27 -24
  19. package/build/services/activities/worker.js +2 -2
  20. package/build/services/collator/index.d.ts +123 -25
  21. package/build/services/collator/index.js +224 -101
  22. package/build/services/connector/factory.d.ts +1 -1
  23. package/build/services/connector/factory.js +1 -11
  24. package/build/services/connector/providers/postgres.js +3 -0
  25. package/build/services/engine/index.d.ts +5 -5
  26. package/build/services/engine/index.js +36 -15
  27. package/build/services/hotmesh/index.d.ts +66 -15
  28. package/build/services/hotmesh/index.js +84 -15
  29. package/build/services/memflow/index.d.ts +100 -14
  30. package/build/services/memflow/index.js +100 -14
  31. package/build/services/memflow/worker.d.ts +97 -0
  32. package/build/services/memflow/worker.js +217 -0
  33. package/build/services/memflow/workflow/proxyActivities.d.ts +74 -3
  34. package/build/services/memflow/workflow/proxyActivities.js +81 -4
  35. package/build/services/router/consumption/index.d.ts +2 -1
  36. package/build/services/router/consumption/index.js +39 -3
  37. package/build/services/router/error-handling/index.d.ts +3 -3
  38. package/build/services/router/error-handling/index.js +48 -13
  39. package/build/services/router/index.d.ts +1 -0
  40. package/build/services/router/index.js +2 -1
  41. package/build/services/search/factory.js +1 -9
  42. package/build/services/store/factory.js +1 -9
  43. package/build/services/store/index.d.ts +8 -2
  44. package/build/services/store/providers/postgres/kvsql.d.ts +4 -0
  45. package/build/services/store/providers/postgres/kvsql.js +4 -0
  46. package/build/services/store/providers/postgres/kvtransaction.d.ts +2 -0
  47. package/build/services/store/providers/postgres/kvtransaction.js +23 -0
  48. package/build/services/store/providers/postgres/kvtypes/hash/basic.d.ts +51 -0
  49. package/build/services/store/providers/postgres/kvtypes/hash/basic.js +229 -7
  50. package/build/services/store/providers/postgres/kvtypes/hash/expire.js +12 -2
  51. package/build/services/store/providers/postgres/kvtypes/hash/index.d.ts +4 -0
  52. package/build/services/store/providers/postgres/kvtypes/hash/index.js +6 -0
  53. package/build/services/store/providers/postgres/kvtypes/hash/scan.js +30 -10
  54. package/build/services/store/providers/postgres/kvtypes/list.js +68 -10
  55. package/build/services/store/providers/postgres/kvtypes/string.js +60 -10
  56. package/build/services/store/providers/postgres/kvtypes/zset.js +92 -22
  57. package/build/services/store/providers/postgres/postgres.d.ts +23 -3
  58. package/build/services/store/providers/postgres/postgres.js +38 -1
  59. package/build/services/stream/factory.js +1 -17
  60. package/build/services/stream/providers/postgres/kvtables.js +76 -23
  61. package/build/services/stream/providers/postgres/lifecycle.d.ts +19 -0
  62. package/build/services/stream/providers/postgres/lifecycle.js +54 -0
  63. package/build/services/stream/providers/postgres/messages.d.ts +56 -0
  64. package/build/services/stream/providers/postgres/messages.js +253 -0
  65. package/build/services/stream/providers/postgres/notifications.d.ts +59 -0
  66. package/build/services/stream/providers/postgres/notifications.js +357 -0
  67. package/build/services/stream/providers/postgres/postgres.d.ts +110 -11
  68. package/build/services/stream/providers/postgres/postgres.js +196 -488
  69. package/build/services/stream/providers/postgres/scout.d.ts +68 -0
  70. package/build/services/stream/providers/postgres/scout.js +233 -0
  71. package/build/services/stream/providers/postgres/stats.d.ts +49 -0
  72. package/build/services/stream/providers/postgres/stats.js +113 -0
  73. package/build/services/sub/factory.js +1 -9
  74. package/build/services/sub/index.d.ts +1 -1
  75. package/build/services/sub/providers/postgres/postgres.d.ts +1 -1
  76. package/build/services/sub/providers/postgres/postgres.js +53 -6
  77. package/build/services/task/index.d.ts +1 -1
  78. package/build/services/task/index.js +2 -6
  79. package/build/services/worker/index.d.ts +1 -0
  80. package/build/services/worker/index.js +2 -0
  81. package/build/types/hotmesh.d.ts +42 -2
  82. package/build/types/index.d.ts +3 -4
  83. package/build/types/index.js +1 -4
  84. package/build/types/memflow.d.ts +32 -0
  85. package/build/types/provider.d.ts +17 -1
  86. package/build/types/stream.d.ts +92 -1
  87. package/index.ts +0 -4
  88. package/package.json +18 -27
  89. package/build/services/connector/providers/ioredis.d.ts +0 -9
  90. package/build/services/connector/providers/ioredis.js +0 -26
  91. package/build/services/connector/providers/redis.d.ts +0 -9
  92. package/build/services/connector/providers/redis.js +0 -38
  93. package/build/services/search/providers/redis/ioredis.d.ts +0 -23
  94. package/build/services/search/providers/redis/ioredis.js +0 -189
  95. package/build/services/search/providers/redis/redis.d.ts +0 -23
  96. package/build/services/search/providers/redis/redis.js +0 -202
  97. package/build/services/store/providers/redis/_base.d.ts +0 -137
  98. package/build/services/store/providers/redis/_base.js +0 -980
  99. package/build/services/store/providers/redis/ioredis.d.ts +0 -20
  100. package/build/services/store/providers/redis/ioredis.js +0 -180
  101. package/build/services/store/providers/redis/redis.d.ts +0 -18
  102. package/build/services/store/providers/redis/redis.js +0 -199
  103. package/build/services/stream/providers/redis/ioredis.d.ts +0 -61
  104. package/build/services/stream/providers/redis/ioredis.js +0 -272
  105. package/build/services/stream/providers/redis/redis.d.ts +0 -61
  106. package/build/services/stream/providers/redis/redis.js +0 -305
  107. package/build/services/sub/providers/redis/ioredis.d.ts +0 -20
  108. package/build/services/sub/providers/redis/ioredis.js +0 -150
  109. package/build/services/sub/providers/redis/redis.d.ts +0 -18
  110. package/build/services/sub/providers/redis/redis.js +0 -137
  111. package/build/types/redis.d.ts +0 -258
  112. package/build/types/redis.js +0 -11
@@ -0,0 +1,7 @@
1
+ {
2
+ "permissions": {
3
+ "allow": [
4
+ "Bash(npx tsc:*)"
5
+ ]
6
+ }
7
+ }
package/README.md CHANGED
@@ -1,189 +1,226 @@
1
1
  # HotMesh
2
2
 
3
- **Integrate AI automation into your current stack — without breaking it**
4
-
5
3
  ![beta release](https://img.shields.io/badge/release-beta-blue.svg)
6
4
 
7
- HotMesh modernizes existing business systems by introducing a durable workflow layer that connects AI, automation, and human-in-the-loop steps — **without replacing your current stack**.
8
- Each process runs with persistent memory in Postgres, surviving retries, crashes, and human delays.
9
-
10
- ```bash
11
- npm install @hotmeshio/hotmesh
12
- ```
13
-
14
- ---
15
-
16
- ## What It Solves
17
-
18
- Modernization often stalls where systems meet people and AI.
19
- HotMesh builds a **durable execution bridge** across those seams — linking your database, APIs, RPA, and AI agents into one recoverable process.
20
-
21
- * **AI that can fail safely** — retries, resumable state, and confidence tracking
22
- * **Human steps that don’t block** — pause for days, resume instantly
23
- * **Legacy systems that stay connected** — SQL and RPA coexist seamlessly
24
- * **Full visibility** — query workflows and outcomes directly in SQL
25
-
26
- ---
27
-
28
- ## Core Model
5
+ Run durable workflows on Postgres. No servers, no queues, just your database.
29
6
 
30
- ### Entity — the Business Process Record
31
7
 
32
- Every workflow writes to a durable JSON document in Postgres called an **Entity**.
33
- It becomes the shared memory between APIs, RPA jobs, LLM agents, and human operators.
8
+ ## Common Use Cases
34
9
 
35
- ```ts
36
- const e = await MemFlow.workflow.entity();
10
+ ### 1. Pipeline Database
11
+ Transform Postgres into a durable pipeline processor. Orchestrate long-running, multi-step pipelines transactionally and durably.
37
12
 
38
- // initialize from a source event
39
- await e.set({
40
- caseId: "A42",
41
- stage: "verification",
42
- retries: 0,
43
- notes: []
44
- });
13
+ ### 2. Temporal You Own
14
+ Get the power of Temporal without the infrastructure. HotMesh includes MemFlow, a Temporal-compatible API that runs directly on your Postgres database. No app server required.
45
15
 
46
- // AI step adds structured output
47
- await e.merge({
48
- aiSummary: { result: "Verified coverage", confidence: 0.93 },
49
- stage: "approval",
50
- });
16
+ ### 3. Distributed State Machine
17
+ Build resilient, stateful applications where every component can [fail and recover](https://github.com/hotmeshio/sdk-typescript/blob/main/services/collator/README.md). HotMesh manages state transitions, retries, and coordination.
51
18
 
52
- // human operator review
53
- await e.append("notes", { reviewer: "ops1", comment: "ok to proceed" });
19
+ ### 4. Workflow-as-Code Platform
20
+ Choose your style: procedural workflows with MemFlow's Temporal API, or functional workflows with HotMesh's YAML syntax.
54
21
 
55
- // maintain counters
56
- await e.increment("retries", 1);
22
+ ## Installation
57
23
 
58
- // retrieve current process state
59
- const data = await e.get();
24
+ ```bash
25
+ npm install @hotmeshio/hotmesh
60
26
  ```
61
27
 
62
- **Minimal surface contract**
28
+ ## Two ways to write workflows
63
29
 
64
- | Command | Purpose |
65
- | ------------- | ---------------------------------- |
66
- | `set()` | Initialize workflow state |
67
- | `merge()` | Update any JSON path |
68
- | `append()` | Add entries to lists (logs, notes) |
69
- | `increment()` | Maintain counters or metrics |
70
- | `get()` | Retrieve current state |
30
+ Both approaches reuse your activity functions:
71
31
 
72
- Entities are stored in plain SQL tables, directly queryable:
32
+ ```typescript
33
+ // activities.ts (shared between both approaches)
34
+ export async function checkInventory(itemId: string): Promise<number> {
35
+ return getInventoryCount(itemId);
36
+ }
73
37
 
74
- ```sql
75
- SELECT id, context->>'stage', context->'aiSummary'->>'result'
76
- FROM my_app.jobs
77
- WHERE entity = 'claims-review'
78
- AND context->>'stage' != 'complete';
79
- ```
38
+ export async function reserveItem(itemId: string, quantity: number): Promise<string> {
39
+ return createReservation(itemId, quantity);
40
+ }
80
41
 
81
- ---
42
+ export async function notifyBackorder(itemId: string): Promise<void> {
43
+ await sendBackorderEmail(itemId);
44
+ }
45
+ ```
82
46
 
83
- ### Hook Parallel Work Units
47
+ ### Option 1: Code (Temporal-compatible API)
48
+
49
+ ```typescript
50
+ // workflows.ts
51
+ import { MemFlow } from '@hotmeshio/hotmesh';
52
+ import * as activities from './activities';
53
+
54
+ export async function orderWorkflow(itemId: string, qty: number) {
55
+ const { checkInventory, reserveItem, notifyBackorder } =
56
+ MemFlow.workflow.proxyActivities<typeof activities>({
57
+ taskQueue: 'inventory-tasks'
58
+ });
59
+
60
+ const available = await checkInventory(itemId);
61
+
62
+ if (available >= qty) {
63
+ return await reserveItem(itemId, qty);
64
+ } else {
65
+ await notifyBackorder(itemId);
66
+ return 'backordered';
67
+ }
68
+ }
84
69
 
85
- Hooks are stateless functions that operate on the shared Entity.
86
- Each hook executes independently (API, RPA, or AI), retrying automatically until success.
70
+ // main.ts
71
+ const connection = {
72
+ class: Postgres,
73
+ options: { connectionString: 'postgresql://localhost:5432/mydb' }
74
+ };
75
+
76
+ await MemFlow.registerActivityWorker({
77
+ connection,
78
+ taskQueue: 'inventory-tasks'
79
+ }, activities, 'inventory-activities');
80
+
81
+ await MemFlow.Worker.create({
82
+ connection,
83
+ taskQueue: 'orders',
84
+ workflow: orderWorkflow
85
+ });
87
86
 
88
- ```ts
89
- await MemFlow.workflow.execHook({
90
- workflowName: "verifyCoverage",
91
- args: ["A42"]
87
+ const client = new MemFlow.Client({ connection });
88
+ const handle = await client.workflow.start({
89
+ args: ['item-123', 5],
90
+ taskQueue: 'orders',
91
+ workflowName: 'orderWorkflow',
92
+ workflowId: 'order-456'
92
93
  });
93
- ```
94
94
 
95
- To run independent work in parallel, use a **batch execution** pattern:
96
-
97
- ```ts
98
- // Run independent research perspectives in parallel using batch execution
99
- await MemFlow.workflow.execHookBatch([
100
- {
101
- key: 'optimistic',
102
- options: {
103
- taskQueue: 'agents',
104
- workflowName: 'optimisticPerspective',
105
- args: [query],
106
- signalId: 'optimistic-complete'
107
- }
108
- },
109
- {
110
- key: 'skeptical',
111
- options: {
112
- taskQueue: 'agents',
113
- workflowName: 'skepticalPerspective',
114
- args: [query],
115
- signalId: 'skeptical-complete'
116
- }
117
- }
118
- ]);
95
+ const result = await handle.result();
119
96
  ```
120
97
 
121
- Each hook runs in its own recoverable context, allowing AI, API, and RPA agents to operate independently while writing to the same durable Entity.
122
-
123
- ---
124
-
125
- ## Example — AI-Assisted Claims Review
98
+ ### Option 2: YAML (functional approach)
99
+
100
+ ```yaml
101
+ # order.yaml
102
+ activities:
103
+ trigger:
104
+ type: trigger
105
+
106
+ checkInventory:
107
+ type: worker
108
+ topic: inventory.check
109
+
110
+ reserveItem:
111
+ type: worker
112
+ topic: inventory.reserve
113
+
114
+ notifyBackorder:
115
+ type: worker
116
+ topic: inventory.backorder.notify
117
+
118
+ transitions:
119
+ trigger:
120
+ - to: checkInventory
121
+
122
+ checkInventory:
123
+ - to: reserveItem
124
+ conditions:
125
+ match:
126
+ - expected: true
127
+ actual:
128
+ '@pipe':
129
+ - ['{checkInventory.output.data.availableQty}', '{trigger.output.data.requestedQty}']
130
+ - ['{@conditional.gte}']
131
+
132
+ - to: notifyBackorder
133
+ conditions:
134
+ match:
135
+ - expected: false
136
+ actual:
137
+ '@pipe':
138
+ - ['{checkInventory.output.data.availableQty}', '{trigger.output.data.requestedQty}']
139
+ - ['{@conditional.gte}']
140
+ ```
126
141
 
127
- ```ts
128
- export async function claimsWorkflow(caseId: string) {
129
- const e = await MemFlow.workflow.entity();
130
- await e.set({ caseId, stage: "intake", approved: false });
142
+ ```typescript
143
+ // main.ts (reuses same activities.ts)
144
+ import * as activities from './activities';
131
145
 
132
- // Run verification and summarization in parallel
133
- await MemFlow.workflow.execHookBatch([
146
+ const hotMesh = await HotMesh.init({
147
+ appId: 'orders',
148
+ engine: { connection },
149
+ workers: [
150
+ {
151
+ topic: 'inventory.check',
152
+ connection,
153
+ callback: async (data) => {
154
+ const availableQty = await activities.checkInventory(data.data.itemId);
155
+ return { metadata: { ...data.metadata }, data: { availableQty } };
156
+ }
157
+ },
134
158
  {
135
- key: 'verifyCoverage',
136
- options: {
137
- taskQueue: 'agents',
138
- workflowName: 'verifyCoverage',
139
- args: [caseId],
140
- signalId: 'verify-complete'
159
+ topic: 'inventory.reserve',
160
+ connection,
161
+ callback: async (data) => {
162
+ const reservationId = await activities.reserveItem(data.data.itemId, data.data.quantity);
163
+ return { metadata: { ...data.metadata }, data: { reservationId } };
141
164
  }
142
165
  },
143
166
  {
144
- key: 'generateSummary',
145
- options: {
146
- taskQueue: 'agents',
147
- workflowName: 'generateSummary',
148
- args: [caseId],
149
- signalId: 'summary-complete'
167
+ topic: 'inventory.backorder.notify',
168
+ connection,
169
+ callback: async (data) => {
170
+ await activities.notifyBackorder(data.data.itemId);
171
+ return { metadata: { ...data.metadata } };
150
172
  }
151
173
  }
152
- ]);
174
+ ]
175
+ });
153
176
 
154
- // Wait for human sign-off
155
- const approval = await MemFlow.workflow.waitFor("human-approval");
156
- await e.merge({ approved: approval === true, stage: "complete" });
177
+ await hotMesh.deploy('./order.yaml');
178
+ await hotMesh.activate('1');
157
179
 
158
- return await e.get();
159
- }
180
+ const result = await hotMesh.pubsub('order.requested', {
181
+ itemId: 'item-123',
182
+ requestedQty: 5
183
+ });
160
184
  ```
161
185
 
162
- This bridges:
186
+ Both compile to the same distributed execution model.
163
187
 
164
- * an existing insurance or EHR system (status + audit trail)
165
- * LLM agents for data validation and summarization
166
- * a human reviewer for final sign-off
188
+ ## Core features
167
189
 
168
- —all within one recoverable workflow record.
190
+ - **Durable execution** - Survives crashes, retries automatically
191
+ - **No infrastructure** - Runs on your existing Postgres
192
+ - **Temporal compatible** - Drop-in replacement for many use cases
193
+ - **Distributed** - Every client participates in execution
194
+ - **Observable** - Full execution history in your database
169
195
 
170
- ---
196
+ ## Common patterns
171
197
 
172
- ## Why It Fits Integration Work
198
+ **Long-running workflows**
173
199
 
174
- HotMesh is purpose-built for **incremental modernization**.
200
+ ```typescript
201
+ await sleep('30 days');
202
+ await sendFollowUp();
203
+ ```
175
204
 
176
- | Need | What HotMesh Provides |
177
- | ----------------------------- | ---------------------------------------- |
178
- | Tie AI into legacy apps | Durable SQL bridge with full visibility |
179
- | Keep human review steps | Wait-for-signal workflows |
180
- | Handle unstable APIs | Built-in retries and exponential backoff |
181
- | Trace process across systems | Unified JSON entity per workflow |
182
- | Store long-running AI results | Durable state for agents and automations |
205
+ **Parallel execution**
183
206
 
184
- ---
207
+ ```typescript
208
+ const results = await Promise.all([
209
+ processPayment(),
210
+ updateInventory(),
211
+ notifyWarehouse()
212
+ ]);
213
+ ```
214
+
215
+ **Child workflows**
216
+
217
+ ```typescript
218
+ const childHandle = await startChild(validateOrder, { args: [orderId] });
219
+ const validation = await childHandle.result();
220
+ ```
185
221
 
186
222
  ## License
187
223
 
188
- Apache 2.0 free to build, integrate, and deploy.
189
- Do not resell the core engine as a hosted service.
224
+ HotMesh is licensed under the Apache License, Version 2.0.
225
+
226
+ You may use, modify, and distribute HotMesh in accordance with the license, including as part of your own applications and services. However, offering HotMesh itself as a standalone, hosted commercial orchestration service (or a substantially similar service) requires prior written permission from the author.
package/build/index.d.ts CHANGED
@@ -16,9 +16,7 @@ import * as Enums from './modules/enums';
16
16
  import * as KeyStore from './modules/key';
17
17
  import { ConnectorService as Connector } from './services/connector/factory';
18
18
  import { PostgresConnection as ConnectorPostgres } from './services/connector/providers/postgres';
19
- import { RedisConnection as ConnectorIORedis } from './services/connector/providers/ioredis';
20
- import { RedisConnection as ConnectorRedis } from './services/connector/providers/redis';
21
19
  import { NatsConnection as ConnectorNATS } from './services/connector/providers/nats';
22
20
  export { Connector, //factory
23
- ConnectorIORedis, ConnectorNATS, ConnectorPostgres, ConnectorRedis, HotMesh, HotMeshConfig, MeshCall, MemFlow, Client, Connection, proxyActivities, Search, Entity, Worker, workflow, WorkflowHandle, Enums, Errors, Utils, KeyStore, };
21
+ ConnectorNATS, ConnectorPostgres, HotMesh, HotMeshConfig, MeshCall, MemFlow, Client, Connection, proxyActivities, Search, Entity, Worker, workflow, WorkflowHandle, Enums, Errors, Utils, KeyStore, };
24
22
  export * as Types from './types';
package/build/index.js CHANGED
@@ -23,7 +23,7 @@ var __importStar = (this && this.__importStar) || function (mod) {
23
23
  return result;
24
24
  };
25
25
  Object.defineProperty(exports, "__esModule", { value: true });
26
- exports.Types = exports.KeyStore = exports.Utils = exports.Errors = exports.Enums = exports.WorkflowHandle = exports.workflow = exports.Worker = exports.Entity = exports.Search = exports.proxyActivities = exports.Connection = exports.Client = exports.MemFlow = exports.MeshCall = exports.HotMesh = exports.ConnectorRedis = exports.ConnectorPostgres = exports.ConnectorNATS = exports.ConnectorIORedis = exports.Connector = void 0;
26
+ exports.Types = exports.KeyStore = exports.Utils = exports.Errors = exports.Enums = exports.WorkflowHandle = exports.workflow = exports.Worker = exports.Entity = exports.Search = exports.proxyActivities = exports.Connection = exports.Client = exports.MemFlow = exports.MeshCall = exports.HotMesh = exports.ConnectorPostgres = exports.ConnectorNATS = exports.Connector = void 0;
27
27
  const hotmesh_1 = require("./services/hotmesh");
28
28
  Object.defineProperty(exports, "HotMesh", { enumerable: true, get: function () { return hotmesh_1.HotMesh; } });
29
29
  const meshcall_1 = require("./services/meshcall");
@@ -58,10 +58,6 @@ const factory_1 = require("./services/connector/factory");
58
58
  Object.defineProperty(exports, "Connector", { enumerable: true, get: function () { return factory_1.ConnectorService; } });
59
59
  const postgres_1 = require("./services/connector/providers/postgres");
60
60
  Object.defineProperty(exports, "ConnectorPostgres", { enumerable: true, get: function () { return postgres_1.PostgresConnection; } });
61
- const ioredis_1 = require("./services/connector/providers/ioredis");
62
- Object.defineProperty(exports, "ConnectorIORedis", { enumerable: true, get: function () { return ioredis_1.RedisConnection; } });
63
- const redis_1 = require("./services/connector/providers/redis");
64
- Object.defineProperty(exports, "ConnectorRedis", { enumerable: true, get: function () { return redis_1.RedisConnection; } });
65
61
  const nats_1 = require("./services/connector/providers/nats");
66
62
  Object.defineProperty(exports, "ConnectorNATS", { enumerable: true, get: function () { return nats_1.NatsConnection; } });
67
63
  exports.Types = __importStar(require("./types"));
@@ -106,6 +106,8 @@ export declare const HMSH_XPENDING_COUNT: number;
106
106
  export declare const HMSH_EXPIRE_DURATION: number;
107
107
  export declare const HMSH_FIDELITY_SECONDS: number;
108
108
  export declare const HMSH_SCOUT_INTERVAL_SECONDS: number;
109
+ export declare const HMSH_ROUTER_SCOUT_INTERVAL_SECONDS: number;
110
+ export declare const HMSH_ROUTER_SCOUT_INTERVAL_MS: number;
109
111
  export declare const HMSH_GUID_SIZE: number;
110
112
  /**
111
113
  * Default task queue name used when no task queue is specified
@@ -117,6 +119,11 @@ export declare const DEFAULT_TASK_QUEUE = "default";
117
119
  * PostgreSQL hard limit is 8000 bytes; default 7500 provides safety margin.
118
120
  */
119
121
  export declare const HMSH_NOTIFY_PAYLOAD_LIMIT: number;
122
+ /**
123
+ * PostgreSQL LISTEN/NOTIFY fallback polling interval in milliseconds.
124
+ * Used when LISTEN/NOTIFY is unavailable or fails. Default 30 seconds.
125
+ */
126
+ export declare const HMSH_ROUTER_POLL_FALLBACK_INTERVAL: number;
120
127
  /**
121
128
  * Serializer compression threshold. When a stringified object exceeds this size
122
129
  * in bytes, it will be gzipped and base64 encoded (with /b prefix) to reduce
@@ -1,6 +1,7 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.HMSH_SERIALIZER_COMPRESSION_THRESHOLD = exports.HMSH_NOTIFY_PAYLOAD_LIMIT = exports.DEFAULT_TASK_QUEUE = exports.HMSH_GUID_SIZE = exports.HMSH_SCOUT_INTERVAL_SECONDS = exports.HMSH_FIDELITY_SECONDS = exports.HMSH_EXPIRE_DURATION = exports.HMSH_XPENDING_COUNT = exports.HMSH_XCLAIM_COUNT = exports.HMSH_XCLAIM_DELAY_MS = exports.HMSH_BLOCK_TIME_MS = exports.HMSH_MEMFLOW_EXP_BACKOFF = exports.HMSH_MEMFLOW_MAX_INTERVAL = exports.HMSH_MEMFLOW_MAX_ATTEMPTS = exports.HMSH_GRADUATED_INTERVAL_MS = exports.HMSH_MAX_TIMEOUT_MS = exports.HMSH_MAX_RETRIES = exports.MAX_DELAY = exports.MAX_STREAM_RETRIES = exports.INITIAL_STREAM_BACKOFF = exports.MAX_STREAM_BACKOFF = exports.HMSH_EXPIRE_JOB_SECONDS = exports.HMSH_OTT_WAIT_TIME = exports.HMSH_DEPLOYMENT_PAUSE = exports.HMSH_DEPLOYMENT_DELAY = exports.HMSH_ACTIVATION_MAX_RETRY = exports.HMSH_QUORUM_DELAY_MS = exports.HMSH_QUORUM_ROLLCALL_CYCLES = exports.HMSH_STATUS_UNKNOWN = exports.HMSH_CODE_MEMFLOW_RETRYABLE = exports.HMSH_CODE_MEMFLOW_FATAL = exports.HMSH_CODE_MEMFLOW_MAXED = exports.HMSH_CODE_MEMFLOW_TIMEOUT = exports.HMSH_CODE_MEMFLOW_WAIT = exports.HMSH_CODE_MEMFLOW_PROXY = exports.HMSH_CODE_MEMFLOW_CHILD = exports.HMSH_CODE_MEMFLOW_ALL = exports.HMSH_CODE_MEMFLOW_SLEEP = exports.HMSH_CODE_UNACKED = exports.HMSH_CODE_TIMEOUT = exports.HMSH_CODE_UNKNOWN = exports.HMSH_CODE_INTERRUPT = exports.HMSH_CODE_NOTFOUND = exports.HMSH_CODE_PENDING = exports.HMSH_CODE_SUCCESS = exports.HMSH_IS_CLUSTER = exports.HMSH_SIGNAL_EXPIRE = exports.HMSH_TELEMETRY = exports.HMSH_LOGLEVEL = void 0;
3
+ exports.HMSH_NOTIFY_PAYLOAD_LIMIT = exports.DEFAULT_TASK_QUEUE = exports.HMSH_GUID_SIZE = exports.HMSH_ROUTER_SCOUT_INTERVAL_MS = exports.HMSH_ROUTER_SCOUT_INTERVAL_SECONDS = exports.HMSH_SCOUT_INTERVAL_SECONDS = exports.HMSH_FIDELITY_SECONDS = exports.HMSH_EXPIRE_DURATION = exports.HMSH_XPENDING_COUNT = exports.HMSH_XCLAIM_COUNT = exports.HMSH_XCLAIM_DELAY_MS = exports.HMSH_BLOCK_TIME_MS = exports.HMSH_MEMFLOW_EXP_BACKOFF = exports.HMSH_MEMFLOW_MAX_INTERVAL = exports.HMSH_MEMFLOW_MAX_ATTEMPTS = exports.HMSH_GRADUATED_INTERVAL_MS = exports.HMSH_MAX_TIMEOUT_MS = exports.HMSH_MAX_RETRIES = exports.MAX_DELAY = exports.MAX_STREAM_RETRIES = exports.INITIAL_STREAM_BACKOFF = exports.MAX_STREAM_BACKOFF = exports.HMSH_EXPIRE_JOB_SECONDS = exports.HMSH_OTT_WAIT_TIME = exports.HMSH_DEPLOYMENT_PAUSE = exports.HMSH_DEPLOYMENT_DELAY = exports.HMSH_ACTIVATION_MAX_RETRY = exports.HMSH_QUORUM_DELAY_MS = exports.HMSH_QUORUM_ROLLCALL_CYCLES = exports.HMSH_STATUS_UNKNOWN = exports.HMSH_CODE_MEMFLOW_RETRYABLE = exports.HMSH_CODE_MEMFLOW_FATAL = exports.HMSH_CODE_MEMFLOW_MAXED = exports.HMSH_CODE_MEMFLOW_TIMEOUT = exports.HMSH_CODE_MEMFLOW_WAIT = exports.HMSH_CODE_MEMFLOW_PROXY = exports.HMSH_CODE_MEMFLOW_CHILD = exports.HMSH_CODE_MEMFLOW_ALL = exports.HMSH_CODE_MEMFLOW_SLEEP = exports.HMSH_CODE_UNACKED = exports.HMSH_CODE_TIMEOUT = exports.HMSH_CODE_UNKNOWN = exports.HMSH_CODE_INTERRUPT = exports.HMSH_CODE_NOTFOUND = exports.HMSH_CODE_PENDING = exports.HMSH_CODE_SUCCESS = exports.HMSH_IS_CLUSTER = exports.HMSH_SIGNAL_EXPIRE = exports.HMSH_TELEMETRY = exports.HMSH_LOGLEVEL = void 0;
4
+ exports.HMSH_SERIALIZER_COMPRESSION_THRESHOLD = exports.HMSH_ROUTER_POLL_FALLBACK_INTERVAL = void 0;
4
5
  /**
5
6
  * Determines the log level for the application. The default is 'info'.
6
7
  */
@@ -129,6 +130,15 @@ exports.HMSH_FIDELITY_SECONDS = process.env.HMSH_FIDELITY_SECONDS
129
130
  ? TEST_FIDELITY_SECONDS
130
131
  : BASE_FIDELITY_SECONDS;
131
132
  exports.HMSH_SCOUT_INTERVAL_SECONDS = parseInt(process.env.HMSH_SCOUT_INTERVAL_SECONDS, 10) || 60;
133
+ // ROUTER SCOUT - polls for visible messages when LISTEN/NOTIFY is insufficient
134
+ exports.HMSH_ROUTER_SCOUT_INTERVAL_SECONDS = parseInt(process.env.HMSH_ROUTER_SCOUT_INTERVAL_SECONDS, 10) || 60;
135
+ const BASE_ROUTER_SCOUT_INTERVAL_MS = 7000;
136
+ const TEST_ROUTER_SCOUT_INTERVAL_MS = 7000;
137
+ exports.HMSH_ROUTER_SCOUT_INTERVAL_MS = process.env.HMSH_ROUTER_SCOUT_INTERVAL_MS
138
+ ? parseInt(process.env.HMSH_ROUTER_SCOUT_INTERVAL_MS, 10)
139
+ : process.env.NODE_ENV === 'test'
140
+ ? TEST_ROUTER_SCOUT_INTERVAL_MS
141
+ : BASE_ROUTER_SCOUT_INTERVAL_MS;
132
142
  // UTILS
133
143
  exports.HMSH_GUID_SIZE = Math.min(parseInt(process.env.HMSH_GUID_SIZE, 10) || 22, 32);
134
144
  /**
@@ -141,6 +151,11 @@ exports.DEFAULT_TASK_QUEUE = 'default';
141
151
  * PostgreSQL hard limit is 8000 bytes; default 7500 provides safety margin.
142
152
  */
143
153
  exports.HMSH_NOTIFY_PAYLOAD_LIMIT = parseInt(process.env.HMSH_NOTIFY_PAYLOAD_LIMIT, 10) || 7500;
154
+ /**
155
+ * PostgreSQL LISTEN/NOTIFY fallback polling interval in milliseconds.
156
+ * Used when LISTEN/NOTIFY is unavailable or fails. Default 30 seconds.
157
+ */
158
+ exports.HMSH_ROUTER_POLL_FALLBACK_INTERVAL = parseInt(process.env.HOTMESH_POSTGRES_FALLBACK_INTERVAL, 10) || 30000;
144
159
  /**
145
160
  * Serializer compression threshold. When a stringified object exceeds this size
146
161
  * in bytes, it will be gzipped and base64 encoded (with /b prefix) to reduce
@@ -97,6 +97,33 @@ export declare function isValidCron(cronExpression: string): boolean;
97
97
  * used by the `ms` npm package as the input.
98
98
  */
99
99
  export declare const s: (input: string) => number;
100
+ /**
101
+ * Normalizes retry policy configuration to a consistent format.
102
+ * Converts maximumInterval to seconds and applies defaults.
103
+ *
104
+ * @param policy - Retry policy to normalize
105
+ * @param defaults - Default values to use if not specified
106
+ * @returns Normalized retry policy with numeric values
107
+ *
108
+ * @example
109
+ * ```typescript
110
+ * const normalized = normalizeRetryPolicy({
111
+ * maximumAttempts: 5,
112
+ * backoffCoefficient: 2,
113
+ * maximumInterval: '300s',
114
+ * });
115
+ * // Returns: { max_retry_attempts: 5, backoff_coefficient: 2, maximum_interval_seconds: 300 }
116
+ * ```
117
+ */
118
+ export declare function normalizeRetryPolicy(policy?: import('../types/stream').RetryPolicy, defaults?: {
119
+ maximumAttempts: number;
120
+ backoffCoefficient: number;
121
+ maximumInterval: number;
122
+ }): {
123
+ max_retry_attempts: number;
124
+ backoff_coefficient: number;
125
+ maximum_interval_seconds: number;
126
+ };
100
127
  /**
101
128
  * @private
102
129
  */
@@ -3,7 +3,7 @@ var __importDefault = (this && this.__importDefault) || function (mod) {
3
3
  return (mod && mod.__esModule) ? mod : { "default": mod };
4
4
  };
5
5
  Object.defineProperty(exports, "__esModule", { value: true });
6
- exports.arrayToHash = exports.isStreamMessage = exports.parseStreamMessage = exports.s = exports.isValidCron = exports.restoreHierarchy = exports.getValueByPath = exports.getIndexedHash = exports.getSymVal = exports.getSymKey = exports.formatISODate = exports.getTimeSeries = exports.getSubscriptionTopic = exports.findSubscriptionForTrigger = exports.findTopKey = exports.matchesStatus = exports.matchesStatusCode = exports.polyfill = exports.identifyProvider = exports.XSleepFor = exports.sleepImmediate = exports.sleepFor = exports.guid = exports.deterministicRandom = exports.deepCopy = exports.getSystemHealth = exports.hashOptions = void 0;
6
+ exports.arrayToHash = exports.isStreamMessage = exports.parseStreamMessage = exports.normalizeRetryPolicy = exports.s = exports.isValidCron = exports.restoreHierarchy = exports.getValueByPath = exports.getIndexedHash = exports.getSymVal = exports.getSymKey = exports.formatISODate = exports.getTimeSeries = exports.getSubscriptionTopic = exports.findSubscriptionForTrigger = exports.findTopKey = exports.matchesStatus = exports.matchesStatusCode = exports.polyfill = exports.identifyProvider = exports.XSleepFor = exports.sleepImmediate = exports.sleepFor = exports.guid = exports.deterministicRandom = exports.deepCopy = exports.getSystemHealth = exports.hashOptions = void 0;
7
7
  const os_1 = __importDefault(require("os"));
8
8
  const crypto_1 = require("crypto");
9
9
  const nanoid_1 = require("nanoid");
@@ -79,42 +79,14 @@ function identifyProvider(provider) {
79
79
  else if (provider.toString().toLowerCase().includes('nats')) {
80
80
  return 'nats';
81
81
  }
82
- else if ('defineCommand' in prototype ||
83
- Object.keys(prototype).includes('multi')) {
84
- return 'ioredis';
85
- }
86
- else if (Object.keys(prototype).includes('Multi')) {
87
- return 'redis';
88
- }
89
- if (provider.constructor) {
90
- if (provider.constructor.name === 'Redis' ||
91
- provider.constructor.name === 'EventEmitter') {
92
- if ('hset' in provider) {
93
- return 'ioredis';
94
- }
95
- }
96
- else if (provider.constructor.name === 'ProviderClient' ||
97
- provider.constructor.name === 'Commander') {
98
- if ('HSET' in provider) {
99
- return 'redis';
100
- }
101
- }
102
- }
103
- let type = null;
104
82
  if (Object.keys(provider).includes('connection') ||
105
83
  !isNaN(provider.totalCount) && !isNaN(provider.idleCount)) {
106
- type = 'postgres';
107
- }
108
- else if (Object.keys(provider).includes('Pipeline')) {
109
- type = 'ioredis';
110
- }
111
- else if (Object.keys(provider).includes('createClient')) {
112
- type = 'redis';
84
+ return 'postgres';
113
85
  }
114
86
  else if (prototype.constructor.toString().includes('NatsConnectionImpl')) {
115
- type = 'nats';
87
+ return 'nats';
116
88
  }
117
- return type;
89
+ return null;
118
90
  }
119
91
  exports.identifyProvider = identifyProvider;
120
92
  /**
@@ -316,6 +288,57 @@ const s = (input) => {
316
288
  return (0, ms_1.default)(input) / 1000;
317
289
  };
318
290
  exports.s = s;
291
+ /**
292
+ * Normalizes retry policy configuration to a consistent format.
293
+ * Converts maximumInterval to seconds and applies defaults.
294
+ *
295
+ * @param policy - Retry policy to normalize
296
+ * @param defaults - Default values to use if not specified
297
+ * @returns Normalized retry policy with numeric values
298
+ *
299
+ * @example
300
+ * ```typescript
301
+ * const normalized = normalizeRetryPolicy({
302
+ * maximumAttempts: 5,
303
+ * backoffCoefficient: 2,
304
+ * maximumInterval: '300s',
305
+ * });
306
+ * // Returns: { max_retry_attempts: 5, backoff_coefficient: 2, maximum_interval_seconds: 300 }
307
+ * ```
308
+ */
309
+ function normalizeRetryPolicy(policy, defaults = {
310
+ maximumAttempts: 3,
311
+ backoffCoefficient: 10,
312
+ maximumInterval: 120,
313
+ }) {
314
+ if (!policy) {
315
+ return {
316
+ max_retry_attempts: defaults.maximumAttempts,
317
+ backoff_coefficient: defaults.backoffCoefficient,
318
+ maximum_interval_seconds: defaults.maximumInterval,
319
+ };
320
+ }
321
+ const maxAttempts = policy.maximumAttempts ?? defaults.maximumAttempts;
322
+ const backoffCoeff = policy.backoffCoefficient ?? defaults.backoffCoefficient;
323
+ let maxIntervalSeconds;
324
+ if (policy.maximumInterval !== undefined) {
325
+ if (typeof policy.maximumInterval === 'string') {
326
+ maxIntervalSeconds = (0, exports.s)(policy.maximumInterval);
327
+ }
328
+ else {
329
+ maxIntervalSeconds = policy.maximumInterval;
330
+ }
331
+ }
332
+ else {
333
+ maxIntervalSeconds = defaults.maximumInterval;
334
+ }
335
+ return {
336
+ max_retry_attempts: maxAttempts,
337
+ backoff_coefficient: backoffCoeff,
338
+ maximum_interval_seconds: maxIntervalSeconds,
339
+ };
340
+ }
341
+ exports.normalizeRetryPolicy = normalizeRetryPolicy;
319
342
  /**
320
343
  * @private
321
344
  */