@hotmeshio/hotmesh 0.5.5 → 0.5.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. package/README.md +7 -19
  2. package/build/package.json +3 -2
  3. package/build/services/activities/trigger.js +1 -1
  4. package/build/services/connector/factory.js +2 -1
  5. package/build/services/connector/providers/postgres.js +11 -6
  6. package/build/services/memflow/client.js +4 -2
  7. package/build/services/memflow/index.d.ts +154 -34
  8. package/build/services/memflow/index.js +165 -33
  9. package/build/services/memflow/interceptor.d.ts +241 -0
  10. package/build/services/memflow/interceptor.js +256 -0
  11. package/build/services/memflow/worker.js +10 -1
  12. package/build/services/memflow/workflow/execChild.js +3 -1
  13. package/build/services/memflow/workflow/execHook.js +1 -1
  14. package/build/services/memflow/workflow/hook.js +4 -2
  15. package/build/services/memflow/workflow/proxyActivities.js +2 -1
  16. package/build/services/router/consumption/index.js +23 -9
  17. package/build/services/router/error-handling/index.js +3 -3
  18. package/build/services/search/providers/postgres/postgres.js +47 -19
  19. package/build/services/store/providers/postgres/kvtypes/hash/basic.js +1 -1
  20. package/build/services/store/providers/postgres/kvtypes/hash/index.js +2 -2
  21. package/build/services/store/providers/postgres/kvtypes/hash/jsonb.js +11 -11
  22. package/build/services/store/providers/postgres/postgres.js +8 -8
  23. package/build/services/stream/providers/postgres/postgres.js +23 -20
  24. package/build/services/sub/providers/postgres/postgres.js +11 -3
  25. package/build/services/task/index.js +4 -4
  26. package/build/types/memflow.d.ts +78 -0
  27. package/package.json +3 -2
package/README.md CHANGED
@@ -4,30 +4,18 @@
4
4
 
5
5
  ![beta release](https://img.shields.io/badge/release-beta-blue.svg) ![made with typescript](https://img.shields.io/badge/built%20with-typescript-lightblue.svg)
6
6
 
7
- HotMesh removes the repetitive glue of building durable agents, pipelines, and long‑running workflows. Instead of you designing queues, schedulers, cache layers, and ad‑hoc state stores for each agent or pipeline, HotMesh standardizes the pattern:
8
-
9
- * **Entity (Core Memory)**: The authoritative JSONB document + its indexable “surface” fields.
10
- * **Hooks (Durable Units of Work)**: Re‑entrant, idempotent functions that *maintain* the entity over time.
11
- * **Workflow (Coordinator)**: The thin orchestration entry that seeds state, spawns hooks, and optionally synthesizes results.
12
- * **Commands (State Mutation API)**: Atomic `set / merge / append / increment / tag / signal` updates with optimistic invariants handled by Postgres transactions.
13
-
14
- You focus on *what should change in memory*; HotMesh handles *how it changes safely and durably.*
7
+ HotMesh removes the repetitive glue of building durable agents, pipelines, and long‑running workflows. You focus on *what should change*; HotMesh handles *how it changes safely and durably.*
15
8
 
16
9
  ---
17
10
 
18
- ## Why It’s Easier with HotMesh
11
+ ## Why Choose HotMesh
19
12
 
20
- | Problem You Usually Solve Manually | HotMesh Built‑In | Impact |
21
- | --------------------------------------------------- | ----------------------------------------- | ---------------------------------------------- |
22
- | Designing per‑agent persistence and caches | Unified JSONB entity + typed accessors | One memory model across agents/pipelines |
23
- | Preventing race conditions on shared state | Transactional hook writes | Safe parallel maintenance |
24
- | Coordinating multi-perspective / multi-step work | Hook spawning + signals | Decomposed work without orchestration glue |
25
- | Schema evolution / optional fields | Flexible JSONB + selective indexes | Add / adapt state incrementally |
26
- | Querying live pipeline / agent status | SQL over materialized surfaces | Operational observability using standard tools |
27
- | Avoiding duplicate side-effects during retry/replay | Deterministic re‑entry + idempotent hooks | Simplifies error handling |
28
- | Per‑tenant isolation | Schema (or prefix) scoping | Clean multi‑tenant boundary |
29
- | Background progression / fan‑out | `execHook` + signals | Natural concurrency without queue plumbing |
30
13
 
14
+ - **One memory model** across all your agents and pipelines. No more designing custom persistence for each workflow.
15
+ - **Automatic reliability** with transactional safety, replay protection, and crash recovery built-in.
16
+ - **Natural concurrency** through isolated hooks that can run in parallel without coordination overhead.
17
+ - **Operational transparency** using standard SQL to query live pipeline status and agent memory.
18
+ - **Multi-tenant ready** with clean schema isolation and flexible indexing.
31
19
  ---
32
20
 
33
21
  ## Core Abstractions
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@hotmeshio/hotmesh",
3
- "version": "0.5.5",
3
+ "version": "0.5.6",
4
4
  "description": "Permanent-Memory Workflows & AI Agents",
5
5
  "main": "./build/index.js",
6
6
  "types": "./build/index.d.ts",
@@ -31,7 +31,8 @@
31
31
  "test:memflow:basic": "HMSH_LOGLEVEL=info NODE_ENV=test jest ./tests/memflow/basic/postgres.test.ts --detectOpenHandles --forceExit --verbose",
32
32
  "test:memflow:collision": "NODE_ENV=test jest ./tests/memflow/collision/*.test.ts --detectOpenHandles --forceExit --verbose",
33
33
  "test:memflow:fatal": "NODE_ENV=test jest ./tests/memflow/fatal/*.test.ts --detectOpenHandles --forceExit --verbose",
34
- "test:memflow:goodbye": "NODE_ENV=test jest ./tests/memflow/goodbye/*.test.ts --detectOpenHandles --forceExit --verbose",
34
+ "test:memflow:goodbye": "NODE_ENV=test HMSH_LOGLEVEL=debug jest ./tests/memflow/goodbye/postgres.test.ts --detectOpenHandles --forceExit --verbose",
35
+ "test:memflow:interceptor": "NODE_ENV=test HMSH_LOGLEVEL=debug jest ./tests/memflow/interceptor/postgres.test.ts --detectOpenHandles --forceExit --verbose",
35
36
  "test:memflow:entity": "NODE_ENV=test HMSH_LOGLEVEL=debug jest ./tests/memflow/entity/postgres.test.ts --detectOpenHandles --forceExit --verbose",
36
37
  "test:memflow:agent": "NODE_ENV=test HMSH_LOGLEVEL=debug jest ./tests/memflow/agent/postgres.test.ts --detectOpenHandles --forceExit --verbose",
37
38
  "test:memflow:hello": "HMSH_TELEMETRY=debug HMSH_LOGLEVEL=debug HMSH_IS_CLUSTER=true NODE_ENV=test jest ./tests/memflow/helloworld/postgres.test.ts --detectOpenHandles --forceExit --verbose",
@@ -8,8 +8,8 @@ const pipe_1 = require("../pipe");
8
8
  const reporter_1 = require("../reporter");
9
9
  const serializer_1 = require("../serializer");
10
10
  const telemetry_1 = require("../telemetry");
11
- const activity_1 = require("./activity");
12
11
  const mapper_1 = require("../mapper");
12
+ const activity_1 = require("./activity");
13
13
  class Trigger extends activity_1.Activity {
14
14
  constructor(config, data, metadata, hook, engine, context) {
15
15
  super(config, data, metadata, hook, engine, context);
@@ -90,7 +90,8 @@ class ConnectorService {
90
90
  //if connecting as a poolClient for subscription, auto connect the client
91
91
  const bAutoConnect = field === 'sub';
92
92
  // Use taskQueue-based connection pooling for PostgreSQL
93
- clientInstance = await postgres_1.PostgresConnection.getOrCreateTaskQueueConnection(id, taskQueue, providerClass, options, { connect: bAutoConnect, provider: providerName });
93
+ clientInstance =
94
+ await postgres_1.PostgresConnection.getOrCreateTaskQueueConnection(id, taskQueue, providerClass, options, { connect: bAutoConnect, provider: providerName });
94
95
  break;
95
96
  default:
96
97
  throw new Error(`Unknown provider type: ${providerType}`);
@@ -23,13 +23,13 @@ class PostgresConnection extends __1.AbstractConnection {
23
23
  const taskQueueDetails = Array.from(this.taskQueueConnections.entries()).map(([key, connection]) => ({
24
24
  key,
25
25
  connectionId: connection.getConnectionId() || 'unknown',
26
- reusedCount: connection.reusedCount || 0
26
+ reusedCount: connection.reusedCount || 0,
27
27
  }));
28
28
  return {
29
29
  totalPoolClients: this.poolClientInstances.size,
30
30
  totalConnections: this.connectionInstances.size,
31
31
  taskQueueConnections: this.taskQueueConnections.size,
32
- taskQueueDetails
32
+ taskQueueDetails,
33
33
  };
34
34
  }
35
35
  /**
@@ -41,7 +41,7 @@ class PostgresConnection extends __1.AbstractConnection {
41
41
  if (logger) {
42
42
  logger.info('postgres-connection-stats', {
43
43
  ...stats,
44
- message
44
+ message,
45
45
  });
46
46
  }
47
47
  else {
@@ -54,8 +54,12 @@ class PostgresConnection extends __1.AbstractConnection {
54
54
  static getPoolingEffectiveness() {
55
55
  const stats = this.getConnectionStats();
56
56
  const totalReuses = stats.taskQueueDetails.reduce((sum, detail) => sum + detail.reusedCount, 0);
57
- const averageReusesPerPool = stats.taskQueueConnections > 0 ? totalReuses / stats.taskQueueConnections : 0;
58
- const poolingEfficiency = stats.totalConnections > 0 ? (stats.taskQueueConnections / stats.totalConnections) * 100 : 0;
57
+ const averageReusesPerPool = stats.taskQueueConnections > 0
58
+ ? totalReuses / stats.taskQueueConnections
59
+ : 0;
60
+ const poolingEfficiency = stats.totalConnections > 0
61
+ ? stats.taskQueueConnections / stats.totalConnections * 100
62
+ : 0;
59
63
  return {
60
64
  totalConnections: stats.totalConnections,
61
65
  taskQueuePools: stats.taskQueueConnections,
@@ -164,7 +168,8 @@ class PostgresConnection extends __1.AbstractConnection {
164
168
  if (this.taskQueueConnections.has(connectionKey)) {
165
169
  const existingConnection = this.taskQueueConnections.get(connectionKey);
166
170
  // Track reuse count for monitoring
167
- existingConnection.reusedCount = (existingConnection.reusedCount || 0) + 1;
171
+ existingConnection.reusedCount =
172
+ (existingConnection.reusedCount || 0) + 1;
168
173
  this.logger.debug('postgres-connection-reused', {
169
174
  connectionKey,
170
175
  taskQueue,
@@ -69,7 +69,7 @@ class ClientService {
69
69
  }
70
70
  //init, but don't await
71
71
  const readonly = this.connection.readonly ?? undefined;
72
- let hotMeshClient = hotmesh_1.HotMesh.init({
72
+ const hotMeshClient = hotmesh_1.HotMesh.init({
73
73
  appId: targetNS,
74
74
  taskQueue,
75
75
  logLevel: enums_1.HMSH_LOGLEVEL,
@@ -126,7 +126,9 @@ class ClientService {
126
126
  */
127
127
  start: async (options) => {
128
128
  const taskQueueName = options.taskQueue ?? options.entity;
129
- const workflowName = options.taskQueue ? options.workflowName : (options.entity ?? options.workflowName);
129
+ const workflowName = options.taskQueue
130
+ ? options.workflowName
131
+ : options.entity ?? options.workflowName;
130
132
  const trc = options.workflowTrace;
131
133
  const spn = options.workflowSpan;
132
134
  //hotmesh `topic` is equivalent to `queue+workflowname` pattern in other systems
@@ -1,4 +1,4 @@
1
- import { ContextType } from '../../types/memflow';
1
+ import { ContextType, WorkflowInterceptor } from '../../types/memflow';
2
2
  import { ClientService } from './client';
3
3
  import { ConnectionService } from './connection';
4
4
  import { Search } from './search';
@@ -8,60 +8,170 @@ import { WorkflowService } from './workflow';
8
8
  import { WorkflowHandleService } from './handle';
9
9
  import { didInterrupt } from './workflow/interruption';
10
10
  /**
11
- * The MemFlow service is a collection of services that
12
- * emulate Temporal's capabilities, but instead are
13
- * backed by Postgres or Redis/ValKey. The following lifecycle example
14
- * demonstrates how to start a new workflow, subscribe
15
- * to the result, and shutdown the system.
11
+ * The MemFlow service provides a Temporal-compatible workflow framework backed by
12
+ * Postgres or Redis/ValKey. It offers durable execution, entity-based memory management,
13
+ * and composable workflows.
16
14
  *
17
- * @example
15
+ * ## Core Features
16
+ *
17
+ * ### 1. Entity-Based Memory Model
18
+ * Each workflow has a durable JSONB entity that serves as its memory:
19
+ * ```typescript
20
+ * export async function researchAgent(query: string) {
21
+ * const agent = await MemFlow.workflow.entity();
22
+ *
23
+ * // Initialize entity state
24
+ * await agent.set({
25
+ * query,
26
+ * findings: [],
27
+ * status: 'researching'
28
+ * });
29
+ *
30
+ * // Update state atomically
31
+ * await agent.merge({ status: 'analyzing' });
32
+ * await agent.append('findings', newFinding);
33
+ * }
34
+ * ```
35
+ *
36
+ * ### 2. Hook Functions & Workflow Coordination
37
+ * Spawn and coordinate multiple perspectives/phases:
18
38
  * ```typescript
19
- * import { Client, Worker, MemFlow, HotMesh } from '@hotmeshio/hotmesh';
20
- * import { Client as Postgres} from 'pg';
21
- * import * as workflows from './workflows';
39
+ * // Launch parallel research perspectives
40
+ * await MemFlow.workflow.execHook({
41
+ * taskQueue: 'research',
42
+ * workflowName: 'optimisticView',
43
+ * args: [query],
44
+ * signalId: 'optimistic-complete'
45
+ * });
46
+ *
47
+ * await MemFlow.workflow.execHook({
48
+ * taskQueue: 'research',
49
+ * workflowName: 'skepticalView',
50
+ * args: [query],
51
+ * signalId: 'skeptical-complete'
52
+ * });
22
53
  *
23
- * //1) Initialize the worker
54
+ * // Wait for both perspectives
55
+ * await Promise.all([
56
+ * MemFlow.workflow.waitFor('optimistic-complete'),
57
+ * MemFlow.workflow.waitFor('skeptical-complete')
58
+ * ]);
59
+ * ```
60
+ *
61
+ * ### 3. Durable Activities & Proxies
62
+ * Define and execute durable activities with automatic retry:
63
+ * ```typescript
64
+ * const activities = MemFlow.workflow.proxyActivities<{
65
+ * analyzeDocument: typeof analyzeDocument;
66
+ * validateFindings: typeof validateFindings;
67
+ * }>({
68
+ * activities: { analyzeDocument, validateFindings },
69
+ * retryPolicy: {
70
+ * maximumAttempts: 3,
71
+ * backoffCoefficient: 2
72
+ * }
73
+ * });
74
+ *
75
+ * // Activities are durable and automatically retried
76
+ * const analysis = await activities.analyzeDocument(data);
77
+ * const validation = await activities.validateFindings(analysis);
78
+ * ```
79
+ *
80
+ * ### 4. Workflow Composition
81
+ * Build complex workflows through composition:
82
+ * ```typescript
83
+ * // Start a child workflow
84
+ * const childResult = await MemFlow.workflow.execChild({
85
+ * taskQueue: 'analysis',
86
+ * workflowName: 'detailedAnalysis',
87
+ * args: [data],
88
+ * // Child workflow config
89
+ * config: {
90
+ * maximumAttempts: 5,
91
+ * backoffCoefficient: 2
92
+ * }
93
+ * });
94
+ *
95
+ * // Fire-and-forget child workflow
96
+ * await MemFlow.workflow.startChild({
97
+ * taskQueue: 'notifications',
98
+ * workflowName: 'sendUpdates',
99
+ * args: [updates]
100
+ * });
101
+ * ```
102
+ *
103
+ * ### 5. Workflow Interceptors
104
+ * Add cross-cutting concerns through interceptors that run as durable functions:
105
+ * ```typescript
106
+ * // Add audit interceptor that uses MemFlow functions
107
+ * MemFlow.registerInterceptor({
108
+ * async execute(ctx, next) {
109
+ * try {
110
+ * // Interceptors can use MemFlow functions and participate in replay
111
+ * const entity = await MemFlow.workflow.entity();
112
+ * await entity.append('auditLog', {
113
+ * action: 'started',
114
+ * timestamp: new Date().toISOString()
115
+ * });
116
+ *
117
+ * // Rate limiting with durable sleep
118
+ * await MemFlow.workflow.sleepFor('100 milliseconds');
119
+ *
120
+ * const result = await next();
121
+ *
122
+ * await entity.append('auditLog', {
123
+ * action: 'completed',
124
+ * timestamp: new Date().toISOString()
125
+ * });
126
+ *
127
+ * return result;
128
+ * } catch (err) {
129
+ * // CRITICAL: Always check for HotMesh interruptions
130
+ * if (MemFlow.didInterrupt(err)) {
131
+ * throw err; // Rethrow for replay system
132
+ * }
133
+ * throw err;
134
+ * }
135
+ * }
136
+ * });
137
+ * ```
138
+ *
139
+ * ## Basic Usage Example
140
+ *
141
+ * ```typescript
142
+ * import { Client, Worker, MemFlow } from '@hotmeshio/hotmesh';
143
+ * import { Client as Postgres } from 'pg';
144
+ *
145
+ * // Initialize worker
24
146
  * await Worker.create({
25
147
  * connection: {
26
148
  * class: Postgres,
27
- * options: {
28
- * connectionString: 'postgresql://usr:pwd@localhost:5432/db',
29
- * }
30
- * }
149
+ * options: { connectionString: 'postgresql://usr:pwd@localhost:5432/db' }
150
+ * },
31
151
  * taskQueue: 'default',
32
- * namespace: 'memflow',
33
- * workflow: workflows.example,
34
- * options: {
35
- * backoffCoefficient: 2,
36
- * maximumAttempts: 1_000,
37
- * maximumInterval: '5 seconds'
38
- * }
152
+ * workflow: workflows.example
39
153
  * });
40
154
  *
41
- * //2) initialize the client
155
+ * // Initialize client
42
156
  * const client = new Client({
43
157
  * connection: {
44
158
  * class: Postgres,
45
- * options: {
46
- * connectionString: 'postgresql://usr:pwd@localhost:5432/db',
47
- * }
159
+ * options: { connectionString: 'postgresql://usr:pwd@localhost:5432/db' }
48
160
  * }
49
161
  * });
50
162
  *
51
- * //3) start a new workflow
163
+ * // Start workflow
52
164
  * const handle = await client.workflow.start({
53
- * args: ['HotMesh', 'es'],
165
+ * args: ['input data'],
54
166
  * taskQueue: 'default',
55
167
  * workflowName: 'example',
56
- * workflowId: HotMesh.guid(),
57
- * namespace: 'memflow',
168
+ * workflowId: MemFlow.guid()
58
169
  * });
59
170
  *
60
- * //4) subscribe to the eventual result
61
- * console.log('\nRESPONSE', await handle.result(), '\n');
62
- * //logs '¡Hola, HotMesh!'
171
+ * // Get result
172
+ * const result = await handle.result();
63
173
  *
64
- * //5) Shutdown (typically on sigint)
174
+ * // Cleanup
65
175
  * await MemFlow.shutdown();
66
176
  * ```
67
177
  */
@@ -114,6 +224,16 @@ declare class MemFlowClass {
114
224
  * @see {@link utils/interruption.didInterrupt} for detailed documentation
115
225
  */
116
226
  static didInterrupt: typeof didInterrupt;
227
+ private static interceptorService;
228
+ /**
229
+ * Register a workflow interceptor
230
+ * @param interceptor The interceptor to register
231
+ */
232
+ static registerInterceptor(interceptor: WorkflowInterceptor): void;
233
+ /**
234
+ * Clear all registered workflow interceptors
235
+ */
236
+ static clearInterceptors(): void;
117
237
  /**
118
238
  * Shutdown everything. All connections, workers, and clients will be closed.
119
239
  * Include in your signal handlers to ensure a clean shutdown.
@@ -10,61 +10,172 @@ const worker_1 = require("./worker");
10
10
  const workflow_1 = require("./workflow");
11
11
  const handle_1 = require("./handle");
12
12
  const interruption_1 = require("./workflow/interruption");
13
+ const interceptor_1 = require("./interceptor");
13
14
  /**
14
- * The MemFlow service is a collection of services that
15
- * emulate Temporal's capabilities, but instead are
16
- * backed by Postgres or Redis/ValKey. The following lifecycle example
17
- * demonstrates how to start a new workflow, subscribe
18
- * to the result, and shutdown the system.
15
+ * The MemFlow service provides a Temporal-compatible workflow framework backed by
16
+ * Postgres or Redis/ValKey. It offers durable execution, entity-based memory management,
17
+ * and composable workflows.
19
18
  *
20
- * @example
19
+ * ## Core Features
20
+ *
21
+ * ### 1. Entity-Based Memory Model
22
+ * Each workflow has a durable JSONB entity that serves as its memory:
23
+ * ```typescript
24
+ * export async function researchAgent(query: string) {
25
+ * const agent = await MemFlow.workflow.entity();
26
+ *
27
+ * // Initialize entity state
28
+ * await agent.set({
29
+ * query,
30
+ * findings: [],
31
+ * status: 'researching'
32
+ * });
33
+ *
34
+ * // Update state atomically
35
+ * await agent.merge({ status: 'analyzing' });
36
+ * await agent.append('findings', newFinding);
37
+ * }
38
+ * ```
39
+ *
40
+ * ### 2. Hook Functions & Workflow Coordination
41
+ * Spawn and coordinate multiple perspectives/phases:
42
+ * ```typescript
43
+ * // Launch parallel research perspectives
44
+ * await MemFlow.workflow.execHook({
45
+ * taskQueue: 'research',
46
+ * workflowName: 'optimisticView',
47
+ * args: [query],
48
+ * signalId: 'optimistic-complete'
49
+ * });
50
+ *
51
+ * await MemFlow.workflow.execHook({
52
+ * taskQueue: 'research',
53
+ * workflowName: 'skepticalView',
54
+ * args: [query],
55
+ * signalId: 'skeptical-complete'
56
+ * });
57
+ *
58
+ * // Wait for both perspectives
59
+ * await Promise.all([
60
+ * MemFlow.workflow.waitFor('optimistic-complete'),
61
+ * MemFlow.workflow.waitFor('skeptical-complete')
62
+ * ]);
63
+ * ```
64
+ *
65
+ * ### 3. Durable Activities & Proxies
66
+ * Define and execute durable activities with automatic retry:
67
+ * ```typescript
68
+ * const activities = MemFlow.workflow.proxyActivities<{
69
+ * analyzeDocument: typeof analyzeDocument;
70
+ * validateFindings: typeof validateFindings;
71
+ * }>({
72
+ * activities: { analyzeDocument, validateFindings },
73
+ * retryPolicy: {
74
+ * maximumAttempts: 3,
75
+ * backoffCoefficient: 2
76
+ * }
77
+ * });
78
+ *
79
+ * // Activities are durable and automatically retried
80
+ * const analysis = await activities.analyzeDocument(data);
81
+ * const validation = await activities.validateFindings(analysis);
82
+ * ```
83
+ *
84
+ * ### 4. Workflow Composition
85
+ * Build complex workflows through composition:
86
+ * ```typescript
87
+ * // Start a child workflow
88
+ * const childResult = await MemFlow.workflow.execChild({
89
+ * taskQueue: 'analysis',
90
+ * workflowName: 'detailedAnalysis',
91
+ * args: [data],
92
+ * // Child workflow config
93
+ * config: {
94
+ * maximumAttempts: 5,
95
+ * backoffCoefficient: 2
96
+ * }
97
+ * });
98
+ *
99
+ * // Fire-and-forget child workflow
100
+ * await MemFlow.workflow.startChild({
101
+ * taskQueue: 'notifications',
102
+ * workflowName: 'sendUpdates',
103
+ * args: [updates]
104
+ * });
105
+ * ```
106
+ *
107
+ * ### 5. Workflow Interceptors
108
+ * Add cross-cutting concerns through interceptors that run as durable functions:
21
109
  * ```typescript
22
- * import { Client, Worker, MemFlow, HotMesh } from '@hotmeshio/hotmesh';
23
- * import { Client as Postgres} from 'pg';
24
- * import * as workflows from './workflows';
110
+ * // Add audit interceptor that uses MemFlow functions
111
+ * MemFlow.registerInterceptor({
112
+ * async execute(ctx, next) {
113
+ * try {
114
+ * // Interceptors can use MemFlow functions and participate in replay
115
+ * const entity = await MemFlow.workflow.entity();
116
+ * await entity.append('auditLog', {
117
+ * action: 'started',
118
+ * timestamp: new Date().toISOString()
119
+ * });
120
+ *
121
+ * // Rate limiting with durable sleep
122
+ * await MemFlow.workflow.sleepFor('100 milliseconds');
123
+ *
124
+ * const result = await next();
125
+ *
126
+ * await entity.append('auditLog', {
127
+ * action: 'completed',
128
+ * timestamp: new Date().toISOString()
129
+ * });
25
130
  *
26
- * //1) Initialize the worker
131
+ * return result;
132
+ * } catch (err) {
133
+ * // CRITICAL: Always check for HotMesh interruptions
134
+ * if (MemFlow.didInterrupt(err)) {
135
+ * throw err; // Rethrow for replay system
136
+ * }
137
+ * throw err;
138
+ * }
139
+ * }
140
+ * });
141
+ * ```
142
+ *
143
+ * ## Basic Usage Example
144
+ *
145
+ * ```typescript
146
+ * import { Client, Worker, MemFlow } from '@hotmeshio/hotmesh';
147
+ * import { Client as Postgres } from 'pg';
148
+ *
149
+ * // Initialize worker
27
150
  * await Worker.create({
28
151
  * connection: {
29
152
  * class: Postgres,
30
- * options: {
31
- * connectionString: 'postgresql://usr:pwd@localhost:5432/db',
32
- * }
33
- * }
153
+ * options: { connectionString: 'postgresql://usr:pwd@localhost:5432/db' }
154
+ * },
34
155
  * taskQueue: 'default',
35
- * namespace: 'memflow',
36
- * workflow: workflows.example,
37
- * options: {
38
- * backoffCoefficient: 2,
39
- * maximumAttempts: 1_000,
40
- * maximumInterval: '5 seconds'
41
- * }
156
+ * workflow: workflows.example
42
157
  * });
43
158
  *
44
- * //2) initialize the client
159
+ * // Initialize client
45
160
  * const client = new Client({
46
161
  * connection: {
47
162
  * class: Postgres,
48
- * options: {
49
- * connectionString: 'postgresql://usr:pwd@localhost:5432/db',
50
- * }
163
+ * options: { connectionString: 'postgresql://usr:pwd@localhost:5432/db' }
51
164
  * }
52
165
  * });
53
166
  *
54
- * //3) start a new workflow
167
+ * // Start workflow
55
168
  * const handle = await client.workflow.start({
56
- * args: ['HotMesh', 'es'],
169
+ * args: ['input data'],
57
170
  * taskQueue: 'default',
58
171
  * workflowName: 'example',
59
- * workflowId: HotMesh.guid(),
60
- * namespace: 'memflow',
172
+ * workflowId: MemFlow.guid()
61
173
  * });
62
174
  *
63
- * //4) subscribe to the eventual result
64
- * console.log('\nRESPONSE', await handle.result(), '\n');
65
- * //logs '¡Hola, HotMesh!'
175
+ * // Get result
176
+ * const result = await handle.result();
66
177
  *
67
- * //5) Shutdown (typically on sigint)
178
+ * // Cleanup
68
179
  * await MemFlow.shutdown();
69
180
  * ```
70
181
  */
@@ -73,6 +184,26 @@ class MemFlowClass {
73
184
  * @private
74
185
  */
75
186
  constructor() { }
187
+ /**
188
+ * Register a workflow interceptor
189
+ * @param interceptor The interceptor to register
190
+ */
191
+ static registerInterceptor(interceptor) {
192
+ MemFlowClass.interceptorService.register(interceptor);
193
+ }
194
+ /**
195
+ * Clear all registered workflow interceptors
196
+ */
197
+ static clearInterceptors() {
198
+ MemFlowClass.interceptorService.clear();
199
+ }
200
+ /**
201
+ * Get the interceptor service instance
202
+ * @internal
203
+ */
204
+ static getInterceptorService() {
205
+ return MemFlowClass.interceptorService;
206
+ }
76
207
  /**
77
208
  * Shutdown everything. All connections, workers, and clients will be closed.
78
209
  * Include in your signal handlers to ensure a clean shutdown.
@@ -128,3 +259,4 @@ MemFlowClass.workflow = workflow_1.WorkflowService;
128
259
  * @see {@link utils/interruption.didInterrupt} for detailed documentation
129
260
  */
130
261
  MemFlowClass.didInterrupt = interruption_1.didInterrupt;
262
+ MemFlowClass.interceptorService = new interceptor_1.InterceptorService();