@hotmeshio/hotmesh 0.6.1 → 0.7.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. package/README.md +179 -142
  2. package/build/modules/enums.d.ts +7 -0
  3. package/build/modules/enums.js +16 -1
  4. package/build/modules/utils.d.ts +27 -0
  5. package/build/modules/utils.js +52 -1
  6. package/build/package.json +10 -8
  7. package/build/services/connector/providers/postgres.js +3 -0
  8. package/build/services/hotmesh/index.d.ts +66 -15
  9. package/build/services/hotmesh/index.js +84 -15
  10. package/build/services/memflow/index.d.ts +100 -14
  11. package/build/services/memflow/index.js +100 -14
  12. package/build/services/memflow/worker.d.ts +97 -0
  13. package/build/services/memflow/worker.js +217 -0
  14. package/build/services/memflow/workflow/proxyActivities.d.ts +74 -3
  15. package/build/services/memflow/workflow/proxyActivities.js +81 -4
  16. package/build/services/router/consumption/index.d.ts +2 -1
  17. package/build/services/router/consumption/index.js +38 -2
  18. package/build/services/router/error-handling/index.d.ts +3 -3
  19. package/build/services/router/error-handling/index.js +48 -13
  20. package/build/services/router/index.d.ts +1 -0
  21. package/build/services/router/index.js +2 -1
  22. package/build/services/store/index.d.ts +3 -2
  23. package/build/services/store/providers/postgres/kvtypes/hash/basic.js +36 -6
  24. package/build/services/store/providers/postgres/kvtypes/hash/expire.js +12 -2
  25. package/build/services/store/providers/postgres/kvtypes/hash/scan.js +30 -10
  26. package/build/services/store/providers/postgres/kvtypes/list.js +68 -10
  27. package/build/services/store/providers/postgres/kvtypes/string.js +60 -10
  28. package/build/services/store/providers/postgres/kvtypes/zset.js +92 -22
  29. package/build/services/store/providers/postgres/postgres.d.ts +3 -3
  30. package/build/services/store/providers/redis/_base.d.ts +3 -3
  31. package/build/services/store/providers/redis/ioredis.js +17 -7
  32. package/build/services/stream/providers/postgres/kvtables.js +76 -23
  33. package/build/services/stream/providers/postgres/lifecycle.d.ts +19 -0
  34. package/build/services/stream/providers/postgres/lifecycle.js +54 -0
  35. package/build/services/stream/providers/postgres/messages.d.ts +56 -0
  36. package/build/services/stream/providers/postgres/messages.js +253 -0
  37. package/build/services/stream/providers/postgres/notifications.d.ts +59 -0
  38. package/build/services/stream/providers/postgres/notifications.js +357 -0
  39. package/build/services/stream/providers/postgres/postgres.d.ts +110 -11
  40. package/build/services/stream/providers/postgres/postgres.js +196 -488
  41. package/build/services/stream/providers/postgres/scout.d.ts +68 -0
  42. package/build/services/stream/providers/postgres/scout.js +233 -0
  43. package/build/services/stream/providers/postgres/stats.d.ts +49 -0
  44. package/build/services/stream/providers/postgres/stats.js +113 -0
  45. package/build/services/sub/providers/postgres/postgres.js +37 -5
  46. package/build/services/sub/providers/redis/ioredis.js +13 -2
  47. package/build/services/sub/providers/redis/redis.js +13 -2
  48. package/build/services/worker/index.d.ts +1 -0
  49. package/build/services/worker/index.js +2 -0
  50. package/build/types/hotmesh.d.ts +42 -2
  51. package/build/types/index.d.ts +3 -3
  52. package/build/types/memflow.d.ts +32 -0
  53. package/build/types/provider.d.ts +16 -0
  54. package/build/types/stream.d.ts +92 -1
  55. package/package.json +10 -8
@@ -10,30 +10,19 @@ import { StringAnyType, StringStringType } from '../../types/serializer';
10
10
  import { JobStatsInput, GetStatsOptions, IdsResponse, StatsResponse } from '../../types/stats';
11
11
  import { StreamCode, StreamData, StreamDataResponse, StreamStatus } from '../../types/stream';
12
12
  /**
13
- * HotMesh is a distributed, reentrant process orchestration engine that transforms
14
- * Postgres into a resilient service mesh capable of running
13
+ * HotMesh transforms Postgres into a durable workflow orchestration engine capable of running
15
14
  * fault-tolerant workflows across multiple services and systems.
16
15
  *
17
- * ## Core Concepts
18
- *
19
- * **Distributed Quorum Architecture**: HotMesh operates as a distributed quorum
20
- * where multiple engine and worker instances coordinate using CQRS principles.
21
- * Each member reads from assigned topic queues and writes results to other queues,
22
- * creating emergent workflow orchestration without a central controller.
23
- *
24
- * **Reentrant Process Engine**: Unlike traditional workflow engines, HotMesh
25
- * provides built-in retry logic, idempotency, and failure recovery. Your business
26
- * logic doesn't need to handle timeouts or retries - the engine manages all of that.
27
- *
28
16
  * ## Key Features
29
17
  *
30
- * - **Fault Tolerance**: Automatic retry, timeout, and failure recovery
18
+ * - **Fault Tolerance**: Automatic retry with exponential backoff and configurable policies
31
19
  * - **Distributed Execution**: No single point of failure
32
20
  * - **YAML-Driven**: Model-driven development with declarative workflow definitions
33
21
  * - **OpenTelemetry**: Built-in observability and tracing
34
22
  * - **Durable State**: Workflow state persists across system restarts
35
23
  * - **Pattern Matching**: Pub/sub with wildcard pattern support
36
24
  * - **Throttling**: Dynamic flow control and backpressure management
25
+ * - **Retry Policies**: PostgreSQL-native retry configuration with exponential backoff
37
26
  *
38
27
  * ## Architecture
39
28
  *
@@ -225,7 +214,12 @@ declare class HotMesh {
225
214
  * similarly to the engine, but as an array with
226
215
  * multiple worker objects.
227
216
  *
228
- * @example
217
+ * ## Retry Policy Configuration
218
+ *
219
+ * HotMesh supports robust retry policies with exponential backoff for PostgreSQL.
220
+ * Configure retry behavior at the stream level for automatic fault tolerance.
221
+ *
222
+ * @example Basic Configuration
229
223
  * ```typescript
230
224
  * const config: HotMeshConfig = {
231
225
  * appId: 'myapp',
@@ -241,6 +235,57 @@ declare class HotMesh {
241
235
  * };
242
236
  * const hotMesh = await HotMesh.init(config);
243
237
  * ```
238
+ *
239
+ * @example With Retry Policy (PostgreSQL)
240
+ * ```typescript
241
+ * import { HotMesh } from '@hotmeshio/hotmesh';
242
+ * import { Client as Postgres } from 'pg';
243
+ *
244
+ * const hotMesh = await HotMesh.init({
245
+ * appId: 'my-app',
246
+ * engine: {
247
+ * connection: {
248
+ * class: Postgres,
249
+ * options: { connectionString: 'postgresql://...' }
250
+ * },
251
+ * // Default retry policy for engine streams
252
+ * retryPolicy: {
253
+ * maximumAttempts: 5, // Retry up to 5 times
254
+ * backoffCoefficient: 2, // Exponential: 2^0, 2^1, 2^2...
255
+ * maximumInterval: '300s' // Cap delay at 5 minutes
256
+ * }
257
+ * },
258
+ * workers: [{
259
+ * topic: 'order.process',
260
+ * connection: {
261
+ * class: Postgres,
262
+ * options: { connectionString: 'postgresql://...' }
263
+ * },
264
+ * // Worker-specific retry policy
265
+ * retryPolicy: {
266
+ * maximumAttempts: 10,
267
+ * backoffCoefficient: 1.5,
268
+ * maximumInterval: '600s'
269
+ * },
270
+ * callback: async (data) => {
271
+ * // Your business logic here
272
+ * // Failures will automatically retry with exponential backoff
273
+ * return { status: 'success', data: processedData };
274
+ * }
275
+ * }]
276
+ * });
277
+ * ```
278
+ *
279
+ * **Retry Policy Options**:
280
+ * - `maximumAttempts` - Maximum retry attempts before failure (default: 3)
281
+ * - `backoffCoefficient` - Base for exponential backoff calculation (default: 10)
282
+ * - `maximumInterval` - Maximum delay between retries in seconds or duration string (default: '120s')
283
+ *
284
+ * **Retry Delays**: For `backoffCoefficient: 2`, delays are: 2s, 4s, 8s, 16s, 32s...
285
+ * capped at `maximumInterval`.
286
+ *
287
+ * **Note**: Retry policies are stored in PostgreSQL columns for efficient querying and
288
+ * observability. Each retry creates a new message, preserving message immutability.
244
289
  */
245
290
  static init(config: HotMeshConfig): Promise<HotMesh>;
246
291
  /**
@@ -272,6 +317,12 @@ declare class HotMesh {
272
317
  * @private
273
318
  */
274
319
  private initTaskQueue;
320
+ /**
321
+ * Apply retry policy to the stream connection within a ProviderConfig or ProvidersConfig.
322
+ * Handles both short-form (ProviderConfig) and long-form (ProvidersConfig) connection configs.
323
+ * @private
324
+ */
325
+ private applyRetryPolicy;
275
326
  /**
276
327
  * Starts a workflow
277
328
  * @example
@@ -11,30 +11,19 @@ const router_1 = require("../router");
11
11
  const worker_1 = require("../worker");
12
12
  const enums_1 = require("../../modules/enums");
13
13
  /**
14
- * HotMesh is a distributed, reentrant process orchestration engine that transforms
15
- * Postgres into a resilient service mesh capable of running
14
+ * HotMesh transforms Postgres into a durable workflow orchestration engine capable of running
16
15
  * fault-tolerant workflows across multiple services and systems.
17
16
  *
18
- * ## Core Concepts
19
- *
20
- * **Distributed Quorum Architecture**: HotMesh operates as a distributed quorum
21
- * where multiple engine and worker instances coordinate using CQRS principles.
22
- * Each member reads from assigned topic queues and writes results to other queues,
23
- * creating emergent workflow orchestration without a central controller.
24
- *
25
- * **Reentrant Process Engine**: Unlike traditional workflow engines, HotMesh
26
- * provides built-in retry logic, idempotency, and failure recovery. Your business
27
- * logic doesn't need to handle timeouts or retries - the engine manages all of that.
28
- *
29
17
  * ## Key Features
30
18
  *
31
- * - **Fault Tolerance**: Automatic retry, timeout, and failure recovery
19
+ * - **Fault Tolerance**: Automatic retry with exponential backoff and configurable policies
32
20
  * - **Distributed Execution**: No single point of failure
33
21
  * - **YAML-Driven**: Model-driven development with declarative workflow definitions
34
22
  * - **OpenTelemetry**: Built-in observability and tracing
35
23
  * - **Durable State**: Workflow state persists across system restarts
36
24
  * - **Pattern Matching**: Pub/sub with wildcard pattern support
37
25
  * - **Throttling**: Dynamic flow control and backpressure management
26
+ * - **Retry Policies**: PostgreSQL-native retry configuration with exponential backoff
38
27
  *
39
28
  * ## Architecture
40
29
  *
@@ -229,7 +218,12 @@ class HotMesh {
229
218
  * similarly to the engine, but as an array with
230
219
  * multiple worker objects.
231
220
  *
232
- * @example
221
+ * ## Retry Policy Configuration
222
+ *
223
+ * HotMesh supports robust retry policies with exponential backoff for PostgreSQL.
224
+ * Configure retry behavior at the stream level for automatic fault tolerance.
225
+ *
226
+ * @example Basic Configuration
233
227
  * ```typescript
234
228
  * const config: HotMeshConfig = {
235
229
  * appId: 'myapp',
@@ -245,6 +239,57 @@ class HotMesh {
245
239
  * };
246
240
  * const hotMesh = await HotMesh.init(config);
247
241
  * ```
242
+ *
243
+ * @example With Retry Policy (PostgreSQL)
244
+ * ```typescript
245
+ * import { HotMesh } from '@hotmeshio/hotmesh';
246
+ * import { Client as Postgres } from 'pg';
247
+ *
248
+ * const hotMesh = await HotMesh.init({
249
+ * appId: 'my-app',
250
+ * engine: {
251
+ * connection: {
252
+ * class: Postgres,
253
+ * options: { connectionString: 'postgresql://...' }
254
+ * },
255
+ * // Default retry policy for engine streams
256
+ * retryPolicy: {
257
+ * maximumAttempts: 5, // Retry up to 5 times
258
+ * backoffCoefficient: 2, // Exponential: 2^0, 2^1, 2^2...
259
+ * maximumInterval: '300s' // Cap delay at 5 minutes
260
+ * }
261
+ * },
262
+ * workers: [{
263
+ * topic: 'order.process',
264
+ * connection: {
265
+ * class: Postgres,
266
+ * options: { connectionString: 'postgresql://...' }
267
+ * },
268
+ * // Worker-specific retry policy
269
+ * retryPolicy: {
270
+ * maximumAttempts: 10,
271
+ * backoffCoefficient: 1.5,
272
+ * maximumInterval: '600s'
273
+ * },
274
+ * callback: async (data) => {
275
+ * // Your business logic here
276
+ * // Failures will automatically retry with exponential backoff
277
+ * return { status: 'success', data: processedData };
278
+ * }
279
+ * }]
280
+ * });
281
+ * ```
282
+ *
283
+ * **Retry Policy Options**:
284
+ * - `maximumAttempts` - Maximum retry attempts before failure (default: 3)
285
+ * - `backoffCoefficient` - Base for exponential backoff calculation (default: 10)
286
+ * - `maximumInterval` - Maximum delay between retries in seconds or duration string (default: '120s')
287
+ *
288
+ * **Retry Delays**: For `backoffCoefficient: 2`, delays are: 2s, 4s, 8s, 16s, 32s...
289
+ * capped at `maximumInterval`.
290
+ *
291
+ * **Note**: Retry policies are stored in PostgreSQL columns for efficient querying and
292
+ * observability. Each retry creates a new message, preserving message immutability.
248
293
  */
249
294
  static async init(config) {
250
295
  const instance = new HotMesh();
@@ -275,6 +320,10 @@ class HotMesh {
275
320
  if (config.engine.connection.readonly) {
276
321
  config.engine.readonly = true;
277
322
  }
323
+ // Apply retry policy to stream connection if provided
324
+ if (config.engine.retryPolicy) {
325
+ this.applyRetryPolicy(config.engine.connection, config.engine.retryPolicy);
326
+ }
278
327
  // Initialize task queue for engine
279
328
  config.engine.taskQueue = this.initTaskQueue(config.engine.taskQueue, config.taskQueue);
280
329
  await factory_1.ConnectorService.initClients(config.engine);
@@ -313,6 +362,10 @@ class HotMesh {
313
362
  // Initialize task queues for workers
314
363
  if (config.workers) {
315
364
  for (const worker of config.workers) {
365
+ // Apply retry policy to stream connection if provided
366
+ if (worker.retryPolicy) {
367
+ this.applyRetryPolicy(worker.connection, worker.retryPolicy);
368
+ }
316
369
  worker.taskQueue = this.initTaskQueue(worker.taskQueue, config.taskQueue);
317
370
  }
318
371
  }
@@ -337,6 +390,22 @@ class HotMesh {
337
390
  // Default queue as fallback
338
391
  return enums_1.DEFAULT_TASK_QUEUE;
339
392
  }
393
+ /**
394
+ * Apply retry policy to the stream connection within a ProviderConfig or ProvidersConfig.
395
+ * Handles both short-form (ProviderConfig) and long-form (ProvidersConfig) connection configs.
396
+ * @private
397
+ */
398
+ applyRetryPolicy(connection, retryPolicy) {
399
+ // Check if this is ProvidersConfig (has 'stream' property)
400
+ if ('stream' in connection && connection.stream) {
401
+ // Long-form: apply to the stream sub-config
402
+ connection.stream.retryPolicy = retryPolicy;
403
+ }
404
+ else {
405
+ // Short-form: apply directly to the connection
406
+ connection.retryPolicy = retryPolicy;
407
+ }
408
+ }
340
409
  // ************* PUB/SUB METHODS *************
341
410
  /**
342
411
  * Starts a workflow
@@ -1,4 +1,5 @@
1
1
  import { ContextType, WorkflowInterceptor } from '../../types/memflow';
2
+ import { guid } from '../../modules/utils';
2
3
  import { ClientService } from './client';
3
4
  import { ConnectionService } from './connection';
4
5
  import { Search } from './search';
@@ -61,6 +62,7 @@ import { didInterrupt } from './workflow/interruption';
61
62
  * ### 3. Durable Activities & Proxies
62
63
  * Define and execute durable activities with automatic retry:
63
64
  * ```typescript
65
+ * // Default: activities use workflow's task queue
64
66
  * const activities = MemFlow.workflow.proxyActivities<{
65
67
  * analyzeDocument: typeof analyzeDocument;
66
68
  * validateFindings: typeof validateFindings;
@@ -77,7 +79,29 @@ import { didInterrupt } from './workflow/interruption';
77
79
  * const validation = await activities.validateFindings(analysis);
78
80
  * ```
79
81
  *
80
- * ### 4. Workflow Composition
82
+ * ### 4. Explicit Activity Registration
83
+ * Register activity workers explicitly before workflows start:
84
+ * ```typescript
85
+ * // Register shared activity pool for interceptors
86
+ * await MemFlow.registerActivityWorker({
87
+ * connection: {
88
+ * class: Postgres,
89
+ * options: { connectionString: 'postgresql://usr:pwd@localhost:5432/db' }
90
+ * },
91
+ * taskQueue: 'shared-activities'
92
+ * }, sharedActivities, 'shared-activities');
93
+ *
94
+ * // Register custom activity pool for specific use cases
95
+ * await MemFlow.registerActivityWorker({
96
+ * connection: {
97
+ * class: Postgres,
98
+ * options: { connectionString: 'postgresql://usr:pwd@localhost:5432/db' }
99
+ * },
100
+ * taskQueue: 'priority-activities'
101
+ * }, priorityActivities, 'priority-activities');
102
+ * ```
103
+ *
104
+ * ### 5. Workflow Composition
81
105
  * Build complex workflows through composition:
82
106
  * ```typescript
83
107
  * // Start a child workflow
@@ -100,29 +124,34 @@ import { didInterrupt } from './workflow/interruption';
100
124
  * });
101
125
  * ```
102
126
  *
103
- * ### 5. Workflow Interceptors
127
+ * ### 6. Workflow Interceptors
104
128
  * Add cross-cutting concerns through interceptors that run as durable functions:
105
129
  * ```typescript
106
- * // Add audit interceptor that uses MemFlow functions
130
+ * // First register shared activity worker for interceptors
131
+ * await MemFlow.registerActivityWorker({
132
+ * connection: {
133
+ * class: Postgres,
134
+ * options: { connectionString: 'postgresql://usr:pwd@localhost:5432/db' }
135
+ * },
136
+ * taskQueue: 'interceptor-activities'
137
+ * }, { auditLog }, 'interceptor-activities');
138
+ *
139
+ * // Add audit interceptor that uses activities with explicit taskQueue
107
140
  * MemFlow.registerInterceptor({
108
141
  * async execute(ctx, next) {
109
142
  * try {
110
- * // Interceptors can use MemFlow functions and participate in replay
111
- * const entity = await MemFlow.workflow.entity();
112
- * await entity.append('auditLog', {
113
- * action: 'started',
114
- * timestamp: new Date().toISOString()
143
+ * // Interceptors use explicit taskQueue to prevent per-workflow queues
144
+ * const { auditLog } = MemFlow.workflow.proxyActivities<typeof activities>({
145
+ * activities: { auditLog },
146
+ * taskQueue: 'interceptor-activities', // Explicit shared queue
147
+ * retryPolicy: { maximumAttempts: 3 }
115
148
  * });
116
149
  *
117
- * // Rate limiting with durable sleep
118
- * await MemFlow.workflow.sleepFor('100 milliseconds');
150
+ * await auditLog(ctx.get('workflowId'), 'started');
119
151
  *
120
152
  * const result = await next();
121
153
  *
122
- * await entity.append('auditLog', {
123
- * action: 'completed',
124
- * timestamp: new Date().toISOString()
125
- * });
154
+ * await auditLog(ctx.get('workflowId'), 'completed');
126
155
  *
127
156
  * return result;
128
157
  * } catch (err) {
@@ -141,6 +170,16 @@ import { didInterrupt } from './workflow/interruption';
141
170
  * ```typescript
142
171
  * import { Client, Worker, MemFlow } from '@hotmeshio/hotmesh';
143
172
  * import { Client as Postgres } from 'pg';
173
+ * import * as activities from './activities';
174
+ *
175
+ * // (Optional) Register shared activity workers for interceptors
176
+ * await MemFlow.registerActivityWorker({
177
+ * connection: {
178
+ * class: Postgres,
179
+ * options: { connectionString: 'postgresql://usr:pwd@localhost:5432/db' }
180
+ * },
181
+ * taskQueue: 'shared-activities'
182
+ * }, sharedActivities, 'shared-activities');
144
183
  *
145
184
  * // Initialize worker
146
185
  * await Worker.create({
@@ -210,6 +249,49 @@ declare class MemFlowClass {
210
249
  * equivalent to the Temporal `Worker` service.
211
250
  */
212
251
  static Worker: typeof WorkerService;
252
+ /**
253
+ * Register activity workers for a task queue. Activities execute via message queue
254
+ * and can run on different servers from workflows.
255
+ *
256
+ * @example
257
+ * ```typescript
258
+ * // Activity worker
259
+ * const activities = {
260
+ * async processPayment(amount: number) { return `Processed $${amount}`; },
261
+ * async sendEmail(to: string, msg: string) { /* ... *\/ }
262
+ * };
263
+ *
264
+ * await MemFlow.registerActivityWorker({
265
+ * connection: {
266
+ * class: Postgres,
267
+ * options: { connectionString: 'postgresql://usr:pwd@localhost:5432/db' }
268
+ * },
269
+ * taskQueue: 'payment'
270
+ * }, activities, 'payment');
271
+ *
272
+ * // Workflow worker (can be on different server)
273
+ * async function orderWorkflow(amount: number) {
274
+ * const { processPayment, sendEmail } = MemFlow.workflow.proxyActivities<{
275
+ * processPayment: (amount: number) => Promise<string>;
276
+ * sendEmail: (to: string, msg: string) => Promise<void>;
277
+ * }>({
278
+ * taskQueue: 'payment',
279
+ * retryPolicy: { maximumAttempts: 3 }
280
+ * });
281
+ *
282
+ * const result = await processPayment(amount);
283
+ * await sendEmail('customer@example.com', result);
284
+ * return result;
285
+ * }
286
+ *
287
+ * await MemFlow.Worker.create({
288
+ * connection: { class: Postgres, options: { connectionString: '...' } },
289
+ * taskQueue: 'orders',
290
+ * workflow: orderWorkflow
291
+ * });
292
+ * ```
293
+ */
294
+ static registerActivityWorker: typeof WorkerService.registerActivityWorker;
213
295
  /**
214
296
  * The MemFlow `workflow` service is functionally
215
297
  * equivalent to the Temporal `Workflow` service
@@ -234,6 +316,10 @@ declare class MemFlowClass {
234
316
  * Clear all registered workflow interceptors
235
317
  */
236
318
  static clearInterceptors(): void;
319
+ /**
320
+ * Generate a unique identifier for workflow IDs
321
+ */
322
+ static guid: typeof guid;
237
323
  /**
238
324
  * Shutdown everything. All connections, workers, and clients will be closed.
239
325
  * Include in your signal handlers to ensure a clean shutdown.
@@ -2,6 +2,7 @@
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.MemFlow = void 0;
4
4
  const hotmesh_1 = require("../hotmesh");
5
+ const utils_1 = require("../../modules/utils");
5
6
  const client_1 = require("./client");
6
7
  const connection_1 = require("./connection");
7
8
  const search_1 = require("./search");
@@ -65,6 +66,7 @@ const interceptor_1 = require("./interceptor");
65
66
  * ### 3. Durable Activities & Proxies
66
67
  * Define and execute durable activities with automatic retry:
67
68
  * ```typescript
69
+ * // Default: activities use workflow's task queue
68
70
  * const activities = MemFlow.workflow.proxyActivities<{
69
71
  * analyzeDocument: typeof analyzeDocument;
70
72
  * validateFindings: typeof validateFindings;
@@ -81,7 +83,29 @@ const interceptor_1 = require("./interceptor");
81
83
  * const validation = await activities.validateFindings(analysis);
82
84
  * ```
83
85
  *
84
- * ### 4. Workflow Composition
86
+ * ### 4. Explicit Activity Registration
87
+ * Register activity workers explicitly before workflows start:
88
+ * ```typescript
89
+ * // Register shared activity pool for interceptors
90
+ * await MemFlow.registerActivityWorker({
91
+ * connection: {
92
+ * class: Postgres,
93
+ * options: { connectionString: 'postgresql://usr:pwd@localhost:5432/db' }
94
+ * },
95
+ * taskQueue: 'shared-activities'
96
+ * }, sharedActivities, 'shared-activities');
97
+ *
98
+ * // Register custom activity pool for specific use cases
99
+ * await MemFlow.registerActivityWorker({
100
+ * connection: {
101
+ * class: Postgres,
102
+ * options: { connectionString: 'postgresql://usr:pwd@localhost:5432/db' }
103
+ * },
104
+ * taskQueue: 'priority-activities'
105
+ * }, priorityActivities, 'priority-activities');
106
+ * ```
107
+ *
108
+ * ### 5. Workflow Composition
85
109
  * Build complex workflows through composition:
86
110
  * ```typescript
87
111
  * // Start a child workflow
@@ -104,29 +128,34 @@ const interceptor_1 = require("./interceptor");
104
128
  * });
105
129
  * ```
106
130
  *
107
- * ### 5. Workflow Interceptors
131
+ * ### 6. Workflow Interceptors
108
132
  * Add cross-cutting concerns through interceptors that run as durable functions:
109
133
  * ```typescript
110
- * // Add audit interceptor that uses MemFlow functions
134
+ * // First register shared activity worker for interceptors
135
+ * await MemFlow.registerActivityWorker({
136
+ * connection: {
137
+ * class: Postgres,
138
+ * options: { connectionString: 'postgresql://usr:pwd@localhost:5432/db' }
139
+ * },
140
+ * taskQueue: 'interceptor-activities'
141
+ * }, { auditLog }, 'interceptor-activities');
142
+ *
143
+ * // Add audit interceptor that uses activities with explicit taskQueue
111
144
  * MemFlow.registerInterceptor({
112
145
  * async execute(ctx, next) {
113
146
  * try {
114
- * // Interceptors can use MemFlow functions and participate in replay
115
- * const entity = await MemFlow.workflow.entity();
116
- * await entity.append('auditLog', {
117
- * action: 'started',
118
- * timestamp: new Date().toISOString()
147
+ * // Interceptors use explicit taskQueue to prevent per-workflow queues
148
+ * const { auditLog } = MemFlow.workflow.proxyActivities<typeof activities>({
149
+ * activities: { auditLog },
150
+ * taskQueue: 'interceptor-activities', // Explicit shared queue
151
+ * retryPolicy: { maximumAttempts: 3 }
119
152
  * });
120
153
  *
121
- * // Rate limiting with durable sleep
122
- * await MemFlow.workflow.sleepFor('100 milliseconds');
154
+ * await auditLog(ctx.get('workflowId'), 'started');
123
155
  *
124
156
  * const result = await next();
125
157
  *
126
- * await entity.append('auditLog', {
127
- * action: 'completed',
128
- * timestamp: new Date().toISOString()
129
- * });
158
+ * await auditLog(ctx.get('workflowId'), 'completed');
130
159
  *
131
160
  * return result;
132
161
  * } catch (err) {
@@ -145,6 +174,16 @@ const interceptor_1 = require("./interceptor");
145
174
  * ```typescript
146
175
  * import { Client, Worker, MemFlow } from '@hotmeshio/hotmesh';
147
176
  * import { Client as Postgres } from 'pg';
177
+ * import * as activities from './activities';
178
+ *
179
+ * // (Optional) Register shared activity workers for interceptors
180
+ * await MemFlow.registerActivityWorker({
181
+ * connection: {
182
+ * class: Postgres,
183
+ * options: { connectionString: 'postgresql://usr:pwd@localhost:5432/db' }
184
+ * },
185
+ * taskQueue: 'shared-activities'
186
+ * }, sharedActivities, 'shared-activities');
148
187
  *
149
188
  * // Initialize worker
150
189
  * await Worker.create({
@@ -245,6 +284,49 @@ MemFlowClass.Handle = handle_1.WorkflowHandleService;
245
284
  * equivalent to the Temporal `Worker` service.
246
285
  */
247
286
  MemFlowClass.Worker = worker_1.WorkerService;
287
+ /**
288
+ * Register activity workers for a task queue. Activities execute via message queue
289
+ * and can run on different servers from workflows.
290
+ *
291
+ * @example
292
+ * ```typescript
293
+ * // Activity worker
294
+ * const activities = {
295
+ * async processPayment(amount: number) { return `Processed $${amount}`; },
296
+ * async sendEmail(to: string, msg: string) { /* ... *\/ }
297
+ * };
298
+ *
299
+ * await MemFlow.registerActivityWorker({
300
+ * connection: {
301
+ * class: Postgres,
302
+ * options: { connectionString: 'postgresql://usr:pwd@localhost:5432/db' }
303
+ * },
304
+ * taskQueue: 'payment'
305
+ * }, activities, 'payment');
306
+ *
307
+ * // Workflow worker (can be on different server)
308
+ * async function orderWorkflow(amount: number) {
309
+ * const { processPayment, sendEmail } = MemFlow.workflow.proxyActivities<{
310
+ * processPayment: (amount: number) => Promise<string>;
311
+ * sendEmail: (to: string, msg: string) => Promise<void>;
312
+ * }>({
313
+ * taskQueue: 'payment',
314
+ * retryPolicy: { maximumAttempts: 3 }
315
+ * });
316
+ *
317
+ * const result = await processPayment(amount);
318
+ * await sendEmail('customer@example.com', result);
319
+ * return result;
320
+ * }
321
+ *
322
+ * await MemFlow.Worker.create({
323
+ * connection: { class: Postgres, options: { connectionString: '...' } },
324
+ * taskQueue: 'orders',
325
+ * workflow: orderWorkflow
326
+ * });
327
+ * ```
328
+ */
329
+ MemFlowClass.registerActivityWorker = worker_1.WorkerService.registerActivityWorker;
248
330
  /**
249
331
  * The MemFlow `workflow` service is functionally
250
332
  * equivalent to the Temporal `Workflow` service
@@ -260,3 +342,7 @@ MemFlowClass.workflow = workflow_1.WorkflowService;
260
342
  */
261
343
  MemFlowClass.didInterrupt = interruption_1.didInterrupt;
262
344
  MemFlowClass.interceptorService = new interceptor_1.InterceptorService();
345
+ /**
346
+ * Generate a unique identifier for workflow IDs
347
+ */
348
+ MemFlowClass.guid = utils_1.guid;