@hotmeshio/hotmesh 0.11.0 → 0.12.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. package/build/modules/enums.d.ts +1 -0
  2. package/build/modules/enums.js +3 -1
  3. package/build/package.json +1 -1
  4. package/build/services/durable/exporter.js +41 -0
  5. package/build/services/engine/index.d.ts +2 -2
  6. package/build/services/engine/index.js +5 -2
  7. package/build/services/exporter/index.d.ts +16 -2
  8. package/build/services/exporter/index.js +76 -0
  9. package/build/services/hotmesh/index.d.ts +2 -2
  10. package/build/services/hotmesh/index.js +2 -2
  11. package/build/services/router/config/index.d.ts +2 -2
  12. package/build/services/router/config/index.js +2 -1
  13. package/build/services/router/consumption/index.js +80 -5
  14. package/build/services/store/index.d.ts +12 -0
  15. package/build/services/store/providers/postgres/exporter-sql.d.ts +17 -0
  16. package/build/services/store/providers/postgres/exporter-sql.js +41 -1
  17. package/build/services/store/providers/postgres/postgres.d.ts +8 -0
  18. package/build/services/store/providers/postgres/postgres.js +52 -0
  19. package/build/services/stream/index.d.ts +1 -0
  20. package/build/services/stream/providers/postgres/kvtables.js +60 -0
  21. package/build/services/stream/providers/postgres/messages.d.ts +5 -0
  22. package/build/services/stream/providers/postgres/messages.js +47 -16
  23. package/build/services/stream/providers/postgres/postgres.d.ts +1 -0
  24. package/build/services/stream/providers/postgres/postgres.js +4 -0
  25. package/build/services/stream/providers/postgres/scout.js +2 -2
  26. package/build/services/stream/providers/postgres/stats.js +3 -2
  27. package/build/types/exporter.d.ts +45 -0
  28. package/build/types/index.d.ts +1 -1
  29. package/package.json +1 -1
@@ -78,6 +78,7 @@ export declare const INITIAL_STREAM_BACKOFF: number;
78
78
  export declare const MAX_STREAM_RETRIES: number;
79
79
  export declare const MAX_DELAY = 2147483647;
80
80
  export declare const HMSH_MAX_RETRIES: number;
81
+ export declare const HMSH_POISON_MESSAGE_THRESHOLD: number;
81
82
  export declare const HMSH_MAX_TIMEOUT_MS: number;
82
83
  export declare const HMSH_GRADUATED_INTERVAL_MS: number;
83
84
  /**
@@ -1,6 +1,7 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.HMSH_ROUTER_POLL_FALLBACK_INTERVAL = exports.HMSH_NOTIFY_PAYLOAD_LIMIT = exports.DEFAULT_TASK_QUEUE = exports.HMSH_GUID_SIZE = exports.HMSH_ROUTER_SCOUT_INTERVAL_MS = exports.HMSH_ROUTER_SCOUT_INTERVAL_SECONDS = exports.HMSH_SCOUT_INTERVAL_SECONDS = exports.HMSH_FIDELITY_SECONDS = exports.HMSH_EXPIRE_DURATION = exports.HMSH_XPENDING_COUNT = exports.HMSH_XCLAIM_COUNT = exports.HMSH_XCLAIM_DELAY_MS = exports.HMSH_BLOCK_TIME_MS = exports.HMSH_DURABLE_EXP_BACKOFF = exports.HMSH_DURABLE_MAX_INTERVAL = exports.HMSH_DURABLE_MAX_ATTEMPTS = exports.HMSH_GRADUATED_INTERVAL_MS = exports.HMSH_MAX_TIMEOUT_MS = exports.HMSH_MAX_RETRIES = exports.MAX_DELAY = exports.MAX_STREAM_RETRIES = exports.INITIAL_STREAM_BACKOFF = exports.MAX_STREAM_BACKOFF = exports.HMSH_EXPIRE_JOB_SECONDS = exports.HMSH_OTT_WAIT_TIME = exports.HMSH_DEPLOYMENT_PAUSE = exports.HMSH_DEPLOYMENT_DELAY = exports.HMSH_ACTIVATION_MAX_RETRY = exports.HMSH_QUORUM_DELAY_MS = exports.HMSH_QUORUM_ROLLCALL_CYCLES = exports.HMSH_STATUS_UNKNOWN = exports.HMSH_CODE_DURABLE_RETRYABLE = exports.HMSH_CODE_DURABLE_FATAL = exports.HMSH_CODE_DURABLE_MAXED = exports.HMSH_CODE_DURABLE_TIMEOUT = exports.HMSH_CODE_DURABLE_WAIT = exports.HMSH_CODE_DURABLE_PROXY = exports.HMSH_CODE_DURABLE_CHILD = exports.HMSH_CODE_DURABLE_ALL = exports.HMSH_CODE_DURABLE_SLEEP = exports.HMSH_CODE_UNACKED = exports.HMSH_CODE_TIMEOUT = exports.HMSH_CODE_UNKNOWN = exports.HMSH_CODE_INTERRUPT = exports.HMSH_CODE_NOTFOUND = exports.HMSH_CODE_PENDING = exports.HMSH_CODE_SUCCESS = exports.HMSH_SIGNAL_EXPIRE = exports.HMSH_TELEMETRY = exports.HMSH_LOGLEVEL = void 0;
3
+ exports.HMSH_NOTIFY_PAYLOAD_LIMIT = exports.DEFAULT_TASK_QUEUE = exports.HMSH_GUID_SIZE = exports.HMSH_ROUTER_SCOUT_INTERVAL_MS = exports.HMSH_ROUTER_SCOUT_INTERVAL_SECONDS = exports.HMSH_SCOUT_INTERVAL_SECONDS = exports.HMSH_FIDELITY_SECONDS = exports.HMSH_EXPIRE_DURATION = exports.HMSH_XPENDING_COUNT = exports.HMSH_XCLAIM_COUNT = exports.HMSH_XCLAIM_DELAY_MS = exports.HMSH_BLOCK_TIME_MS = exports.HMSH_DURABLE_EXP_BACKOFF = exports.HMSH_DURABLE_MAX_INTERVAL = exports.HMSH_DURABLE_MAX_ATTEMPTS = exports.HMSH_GRADUATED_INTERVAL_MS = exports.HMSH_MAX_TIMEOUT_MS = exports.HMSH_POISON_MESSAGE_THRESHOLD = exports.HMSH_MAX_RETRIES = exports.MAX_DELAY = exports.MAX_STREAM_RETRIES = exports.INITIAL_STREAM_BACKOFF = exports.MAX_STREAM_BACKOFF = exports.HMSH_EXPIRE_JOB_SECONDS = exports.HMSH_OTT_WAIT_TIME = exports.HMSH_DEPLOYMENT_PAUSE = exports.HMSH_DEPLOYMENT_DELAY = exports.HMSH_ACTIVATION_MAX_RETRY = exports.HMSH_QUORUM_DELAY_MS = exports.HMSH_QUORUM_ROLLCALL_CYCLES = exports.HMSH_STATUS_UNKNOWN = exports.HMSH_CODE_DURABLE_RETRYABLE = exports.HMSH_CODE_DURABLE_FATAL = exports.HMSH_CODE_DURABLE_MAXED = exports.HMSH_CODE_DURABLE_TIMEOUT = exports.HMSH_CODE_DURABLE_WAIT = exports.HMSH_CODE_DURABLE_PROXY = exports.HMSH_CODE_DURABLE_CHILD = exports.HMSH_CODE_DURABLE_ALL = exports.HMSH_CODE_DURABLE_SLEEP = exports.HMSH_CODE_UNACKED = exports.HMSH_CODE_TIMEOUT = exports.HMSH_CODE_UNKNOWN = exports.HMSH_CODE_INTERRUPT = exports.HMSH_CODE_NOTFOUND = exports.HMSH_CODE_PENDING = exports.HMSH_CODE_SUCCESS = exports.HMSH_SIGNAL_EXPIRE = exports.HMSH_TELEMETRY = exports.HMSH_LOGLEVEL = void 0;
4
+ exports.HMSH_ROUTER_POLL_FALLBACK_INTERVAL = void 0;
4
5
  /**
5
6
  * Determines the log level for the application. The default is 'info'.
6
7
  */
@@ -87,6 +88,7 @@ exports.INITIAL_STREAM_BACKOFF = parseInt(process.env.INITIAL_STREAM_BACKOFF, 10
87
88
  exports.MAX_STREAM_RETRIES = parseInt(process.env.MAX_STREAM_RETRIES, 10) || 2;
88
89
  exports.MAX_DELAY = 2147483647; // Maximum allowed delay in milliseconds for setTimeout
89
90
  exports.HMSH_MAX_RETRIES = parseInt(process.env.HMSH_MAX_RETRIES, 10) || 3;
91
+ exports.HMSH_POISON_MESSAGE_THRESHOLD = parseInt(process.env.HMSH_POISON_MESSAGE_THRESHOLD, 10) || 5;
90
92
  exports.HMSH_MAX_TIMEOUT_MS = parseInt(process.env.HMSH_MAX_TIMEOUT_MS, 10) || 60000;
91
93
  exports.HMSH_GRADUATED_INTERVAL_MS = parseInt(process.env.HMSH_GRADUATED_INTERVAL_MS, 10) || 5000;
92
94
  // DURABLE
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@hotmeshio/hotmesh",
3
- "version": "0.11.0",
3
+ "version": "0.12.0",
4
4
  "description": "Permanent-Memory Workflows & AI Agents",
5
5
  "main": "./build/index.js",
6
6
  "types": "./build/index.d.ts",
@@ -510,6 +510,47 @@ class ExporterService {
510
510
  }
511
511
  }
512
512
  }
513
+ // ── 3. Stream-based fallback for unenriched activity events ──
514
+ // When job attributes have been pruned, recover inputs from worker_streams
515
+ if (this.store.getStreamHistory) {
516
+ const unenrichedEvents = execution.events.filter((e) => (e.event_type === 'activity_task_scheduled' ||
517
+ e.event_type === 'activity_task_completed' ||
518
+ e.event_type === 'activity_task_failed') &&
519
+ e.attributes.input === undefined);
520
+ if (unenrichedEvents.length > 0) {
521
+ const streamHistory = await this.store.getStreamHistory(workflowId, {
522
+ types: ['worker'],
523
+ });
524
+ // Build a map of aid -> stream message data (the worker invocation inputs)
525
+ const streamInputsByAid = new Map();
526
+ for (const entry of streamHistory) {
527
+ if (entry.msg_type === 'worker' && entry.data) {
528
+ const key = `${entry.aid}:${entry.dad || ''}`;
529
+ if (!streamInputsByAid.has(key)) {
530
+ streamInputsByAid.set(key, entry.data);
531
+ }
532
+ }
533
+ }
534
+ for (const evt of unenrichedEvents) {
535
+ const attrs = evt.attributes;
536
+ // Try matching by activity_type + dimensional address
537
+ const key = `${attrs.activity_type}:${attrs.timeline_key || ''}`;
538
+ let input = streamInputsByAid.get(key);
539
+ if (input === undefined) {
540
+ // Fallback: match by activity name alone (first occurrence)
541
+ for (const [k, v] of streamInputsByAid) {
542
+ if (k.startsWith(`${attrs.activity_type}:`)) {
543
+ input = v;
544
+ break;
545
+ }
546
+ }
547
+ }
548
+ if (input !== undefined) {
549
+ attrs.input = input;
550
+ }
551
+ }
552
+ }
553
+ }
513
554
  }
514
555
  /**
515
556
  * Resolve a symbol field from stable JSON path using the symbol registry.
@@ -16,7 +16,7 @@ import { TaskService } from '../task';
16
16
  import { AppVID } from '../../types/app';
17
17
  import { ActivityType } from '../../types/activity';
18
18
  import { CacheMode } from '../../types/cache';
19
- import { JobExport } from '../../types/exporter';
19
+ import { ExportOptions, JobExport } from '../../types/exporter';
20
20
  import { JobState, JobData, JobMetadata, JobOutput, JobStatus, JobInterruptOptions, JobCompletionOptions, ExtensionType } from '../../types/job';
21
21
  import { HotMeshApps, HotMeshConfig, HotMeshManifest, HotMeshSettings } from '../../types/hotmesh';
22
22
  import { ProviderClient, ProviderTransaction } from '../../types/provider';
@@ -254,7 +254,7 @@ declare class EngineService {
254
254
  /**
255
255
  * @private
256
256
  */
257
- export(jobId: string): Promise<JobExport>;
257
+ export(jobId: string, options?: ExportOptions): Promise<JobExport>;
258
258
  /**
259
259
  * @private
260
260
  */
@@ -210,6 +210,9 @@ class EngineService {
210
210
  */
211
211
  async initActivity(topic, data = {}, context) {
212
212
  const [activityId, schema] = await this.getSchema(topic);
213
+ if (!schema) {
214
+ throw new Error(`Activity schema not found for "${activityId}" (topic: ${topic}) in app ${this.appId}`);
215
+ }
213
216
  const ActivityHandler = activities_1.default[schema.type];
214
217
  if (ActivityHandler) {
215
218
  const utc = (0, utils_1.formatISODate)(new Date());
@@ -746,8 +749,8 @@ class EngineService {
746
749
  /**
747
750
  * @private
748
751
  */
749
- async export(jobId) {
750
- return await this.exporter.export(jobId);
752
+ async export(jobId, options = {}) {
753
+ return await this.exporter.export(jobId, options);
751
754
  }
752
755
  /**
753
756
  * @private
@@ -1,8 +1,8 @@
1
1
  import { ILogger } from '../logger';
2
2
  import { StoreService } from '../store';
3
- import { DependencyExport, ExportOptions, JobActionExport, JobExport } from '../../types/exporter';
3
+ import { ActivityDetail, DependencyExport, ExportOptions, JobActionExport, JobExport, StreamHistoryEntry } from '../../types/exporter';
4
4
  import { ProviderClient, ProviderTransaction } from '../../types/provider';
5
- import { StringStringType, Symbols } from '../../types/serializer';
5
+ import { StringAnyType, StringStringType, Symbols } from '../../types/serializer';
6
6
  /**
7
7
  * Downloads job data and expands process data and
8
8
  * includes dependency list
@@ -34,6 +34,20 @@ declare class ExporterService {
34
34
  * @returns - the inflated job data
35
35
  */
36
36
  inflate(jobHash: StringStringType, dependencyList: string[]): JobExport;
37
+ /**
38
+ * Build structured activity details by correlating stream messages
39
+ * (inputs, timing, retries) with the process hierarchy (outputs).
40
+ *
41
+ * Stream messages carry the raw data that flowed through each activity:
42
+ * - `data` contains the activity input arguments
43
+ * - `dad` (dimensional address) reveals cycle iterations (e.g., ,0,1,0 = 2nd cycle)
44
+ * - `created_at` / `expired_at` give precise timing
45
+ * - `retry_attempt` tracks retries
46
+ *
47
+ * The process hierarchy carries activity outputs organized by dimension.
48
+ * This method merges both into a flat, dashboard-friendly list.
49
+ */
50
+ buildActivities(process: StringAnyType, streamHistory: StreamHistoryEntry[]): ActivityDetail[];
37
51
  /**
38
52
  * Inflates the dependency data into a JobExport object by
39
53
  * organizing the dimensional isolate in such a way as to interleave
@@ -28,6 +28,10 @@ class ExporterService {
28
28
  const depData = []; // await this.store.getDependencies(jobId);
29
29
  const jobData = await this.store.getRaw(jobId);
30
30
  const jobExport = this.inflate(jobData, depData);
31
+ if (options.enrich_inputs && this.store.getStreamHistory) {
32
+ const streamHistory = await this.store.getStreamHistory(jobId);
33
+ jobExport.activities = this.buildActivities(jobExport.process, streamHistory);
34
+ }
31
35
  return jobExport;
32
36
  }
33
37
  /**
@@ -77,6 +81,78 @@ class ExporterService {
77
81
  status: jobHash[':'],
78
82
  };
79
83
  }
84
+ /**
85
+ * Build structured activity details by correlating stream messages
86
+ * (inputs, timing, retries) with the process hierarchy (outputs).
87
+ *
88
+ * Stream messages carry the raw data that flowed through each activity:
89
+ * - `data` contains the activity input arguments
90
+ * - `dad` (dimensional address) reveals cycle iterations (e.g., ,0,1,0 = 2nd cycle)
91
+ * - `created_at` / `expired_at` give precise timing
92
+ * - `retry_attempt` tracks retries
93
+ *
94
+ * The process hierarchy carries activity outputs organized by dimension.
95
+ * This method merges both into a flat, dashboard-friendly list.
96
+ */
97
+ buildActivities(process, streamHistory) {
98
+ const activities = [];
99
+ for (const entry of streamHistory) {
100
+ // Parse dimensional address: ",0,1,0,0" → ["0","1","0","0"]
101
+ const dimParts = (entry.dad || '').split(',').filter(Boolean);
102
+ const dimension = dimParts.join('/');
103
+ // Detect cycle iteration from dimensional address
104
+ // In a cycling workflow, the 2nd dimension component increments per cycle
105
+ const cycleIteration = dimParts.length > 1 ? parseInt(dimParts[1]) || 0 : 0;
106
+ // Look up the corresponding output from the process hierarchy
107
+ // Process keys are like: process[dimension][activityName].output.data
108
+ let output;
109
+ let activityName = entry.aid;
110
+ // Walk the process hierarchy using the dimension path
111
+ let node = process;
112
+ for (const part of dimParts) {
113
+ if (node && typeof node === 'object' && node[part]) {
114
+ node = node[part];
115
+ }
116
+ else {
117
+ node = undefined;
118
+ break;
119
+ }
120
+ }
121
+ if (node && typeof node === 'object') {
122
+ // node is now at the dimensional level, look for the activity
123
+ if (node[activityName]?.output?.data) {
124
+ output = node[activityName].output.data;
125
+ }
126
+ }
127
+ // Compute timing
128
+ const startedAt = entry.created_at;
129
+ const completedAt = entry.expired_at;
130
+ let durationMs;
131
+ if (startedAt && completedAt) {
132
+ durationMs = new Date(completedAt).getTime() - new Date(startedAt).getTime();
133
+ }
134
+ activities.push({
135
+ name: activityName,
136
+ type: entry.aid,
137
+ dimension,
138
+ input: entry.data,
139
+ output,
140
+ started_at: startedAt,
141
+ completed_at: completedAt,
142
+ duration_ms: durationMs,
143
+ retry_attempt: entry.code === undefined ? 0 : undefined,
144
+ cycle_iteration: cycleIteration > 0 ? cycleIteration : undefined,
145
+ error: null,
146
+ });
147
+ }
148
+ // Sort by time, then by dimension for cycle ordering
149
+ activities.sort((a, b) => {
150
+ const timeA = a.started_at || '';
151
+ const timeB = b.started_at || '';
152
+ return timeA.localeCompare(timeB);
153
+ });
154
+ return activities;
155
+ }
80
156
  /**
81
157
  * Inflates the dependency data into a JobExport object by
82
158
  * organizing the dimensional isolate in such a way as to interleave
@@ -4,7 +4,7 @@ import { QuorumService } from '../quorum';
4
4
  import { WorkerService } from '../worker';
5
5
  import { JobState, JobData, JobOutput, JobStatus, JobInterruptOptions, ExtensionType } from '../../types/job';
6
6
  import { HotMeshConfig, HotMeshManifest } from '../../types/hotmesh';
7
- import { JobExport } from '../../types/exporter';
7
+ import { ExportOptions, JobExport } from '../../types/exporter';
8
8
  import { JobMessageCallback, QuorumMessage, QuorumMessageCallback, QuorumProfile, ThrottleOptions } from '../../types/quorum';
9
9
  import { StringAnyType, StringStringType } from '../../types/serializer';
10
10
  import { JobStatsInput, GetStatsOptions, IdsResponse, StatsResponse } from '../../types/stats';
@@ -632,7 +632,7 @@ declare class HotMesh {
632
632
  * activity data, transitions, and dependency chains. Useful for
633
633
  * debugging, auditing, and visualizing workflow execution.
634
634
  */
635
- export(jobId: string): Promise<JobExport>;
635
+ export(jobId: string, options?: ExportOptions): Promise<JobExport>;
636
636
  /**
637
637
  * Returns all raw key-value pairs for a job's HASH record. This is
638
638
  * the lowest-level read — it returns internal engine fields alongside
@@ -769,8 +769,8 @@ class HotMesh {
769
769
  * activity data, transitions, and dependency chains. Useful for
770
770
  * debugging, auditing, and visualizing workflow execution.
771
771
  */
772
- async export(jobId) {
773
- return await this.engine?.export(jobId);
772
+ async export(jobId, options = {}) {
773
+ return await this.engine?.export(jobId, options);
774
774
  }
775
775
  /**
776
776
  * Returns all raw key-value pairs for a job's HASH record. This is
@@ -1,4 +1,4 @@
1
- import { HMSH_BLOCK_TIME_MS, HMSH_MAX_RETRIES, HMSH_MAX_TIMEOUT_MS, HMSH_GRADUATED_INTERVAL_MS, HMSH_CODE_UNACKED, HMSH_CODE_UNKNOWN, HMSH_STATUS_UNKNOWN, HMSH_XCLAIM_COUNT, HMSH_XCLAIM_DELAY_MS, HMSH_XPENDING_COUNT, MAX_DELAY, MAX_STREAM_BACKOFF, INITIAL_STREAM_BACKOFF, MAX_STREAM_RETRIES } from '../../../modules/enums';
1
+ import { HMSH_BLOCK_TIME_MS, HMSH_MAX_RETRIES, HMSH_MAX_TIMEOUT_MS, HMSH_GRADUATED_INTERVAL_MS, HMSH_CODE_UNACKED, HMSH_CODE_UNKNOWN, HMSH_STATUS_UNKNOWN, HMSH_XCLAIM_COUNT, HMSH_XCLAIM_DELAY_MS, HMSH_XPENDING_COUNT, MAX_DELAY, MAX_STREAM_BACKOFF, INITIAL_STREAM_BACKOFF, MAX_STREAM_RETRIES, HMSH_POISON_MESSAGE_THRESHOLD } from '../../../modules/enums';
2
2
  import { RouterConfig } from '../../../types/stream';
3
3
  export declare class RouterConfigManager {
4
4
  static validateThrottle(delayInMillis: number): void;
@@ -8,4 +8,4 @@ export declare class RouterConfigManager {
8
8
  readonly: boolean;
9
9
  };
10
10
  }
11
- export { HMSH_BLOCK_TIME_MS, HMSH_MAX_RETRIES, HMSH_MAX_TIMEOUT_MS, HMSH_GRADUATED_INTERVAL_MS, HMSH_CODE_UNACKED, HMSH_CODE_UNKNOWN, HMSH_STATUS_UNKNOWN, HMSH_XCLAIM_COUNT, HMSH_XCLAIM_DELAY_MS, HMSH_XPENDING_COUNT, MAX_DELAY, MAX_STREAM_BACKOFF, INITIAL_STREAM_BACKOFF, MAX_STREAM_RETRIES, };
11
+ export { HMSH_BLOCK_TIME_MS, HMSH_MAX_RETRIES, HMSH_MAX_TIMEOUT_MS, HMSH_GRADUATED_INTERVAL_MS, HMSH_CODE_UNACKED, HMSH_CODE_UNKNOWN, HMSH_STATUS_UNKNOWN, HMSH_XCLAIM_COUNT, HMSH_XCLAIM_DELAY_MS, HMSH_XPENDING_COUNT, MAX_DELAY, MAX_STREAM_BACKOFF, INITIAL_STREAM_BACKOFF, MAX_STREAM_RETRIES, HMSH_POISON_MESSAGE_THRESHOLD, };
@@ -1,6 +1,6 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.MAX_STREAM_RETRIES = exports.INITIAL_STREAM_BACKOFF = exports.MAX_STREAM_BACKOFF = exports.MAX_DELAY = exports.HMSH_XPENDING_COUNT = exports.HMSH_XCLAIM_DELAY_MS = exports.HMSH_XCLAIM_COUNT = exports.HMSH_STATUS_UNKNOWN = exports.HMSH_CODE_UNKNOWN = exports.HMSH_CODE_UNACKED = exports.HMSH_GRADUATED_INTERVAL_MS = exports.HMSH_MAX_TIMEOUT_MS = exports.HMSH_MAX_RETRIES = exports.HMSH_BLOCK_TIME_MS = exports.RouterConfigManager = void 0;
3
+ exports.HMSH_POISON_MESSAGE_THRESHOLD = exports.MAX_STREAM_RETRIES = exports.INITIAL_STREAM_BACKOFF = exports.MAX_STREAM_BACKOFF = exports.MAX_DELAY = exports.HMSH_XPENDING_COUNT = exports.HMSH_XCLAIM_DELAY_MS = exports.HMSH_XCLAIM_COUNT = exports.HMSH_STATUS_UNKNOWN = exports.HMSH_CODE_UNKNOWN = exports.HMSH_CODE_UNACKED = exports.HMSH_GRADUATED_INTERVAL_MS = exports.HMSH_MAX_TIMEOUT_MS = exports.HMSH_MAX_RETRIES = exports.HMSH_BLOCK_TIME_MS = exports.RouterConfigManager = void 0;
4
4
  const enums_1 = require("../../../modules/enums");
5
5
  Object.defineProperty(exports, "HMSH_BLOCK_TIME_MS", { enumerable: true, get: function () { return enums_1.HMSH_BLOCK_TIME_MS; } });
6
6
  Object.defineProperty(exports, "HMSH_MAX_RETRIES", { enumerable: true, get: function () { return enums_1.HMSH_MAX_RETRIES; } });
@@ -16,6 +16,7 @@ Object.defineProperty(exports, "MAX_DELAY", { enumerable: true, get: function ()
16
16
  Object.defineProperty(exports, "MAX_STREAM_BACKOFF", { enumerable: true, get: function () { return enums_1.MAX_STREAM_BACKOFF; } });
17
17
  Object.defineProperty(exports, "INITIAL_STREAM_BACKOFF", { enumerable: true, get: function () { return enums_1.INITIAL_STREAM_BACKOFF; } });
18
18
  Object.defineProperty(exports, "MAX_STREAM_RETRIES", { enumerable: true, get: function () { return enums_1.MAX_STREAM_RETRIES; } });
19
+ Object.defineProperty(exports, "HMSH_POISON_MESSAGE_THRESHOLD", { enumerable: true, get: function () { return enums_1.HMSH_POISON_MESSAGE_THRESHOLD; } });
19
20
  class RouterConfigManager {
20
21
  static validateThrottle(delayInMillis) {
21
22
  if (!Number.isInteger(delayInMillis) ||
@@ -356,6 +356,68 @@ class ConsumptionManager {
356
356
  }
357
357
  async consumeOne(stream, group, id, input, callback) {
358
358
  this.logger.debug(`stream-read-one`, { group, stream, id });
359
+ // Poison message circuit breaker. This is a SAFETY NET that sits above
360
+ // the normal retry mechanism (ErrorHandler.handleRetry / shouldRetry).
361
+ //
362
+ // Normal retry flow: handleRetry() checks metadata.try against the
363
+ // configured retryPolicy.maximumAttempts (or _streamRetryConfig) and
364
+ // applies exponential backoff + visibility delays. That mechanism is
365
+ // the primary retry budget and is what developers configure via
366
+ // HotMesh.init({ workers: [{ retryPolicy: { maximumAttempts, ... } }] }).
367
+ //
368
+ // This check catches messages that have somehow exceeded the normal
369
+ // budget — e.g., when no retryPolicy is configured, when the retry
370
+ // logic is bypassed by an infrastructure error, or when a message
371
+ // re-enters the stream through a path that doesn't increment
372
+ // metadata.try. The threshold is the HIGHER of the configured retry
373
+ // budget and the system-wide HMSH_POISON_MESSAGE_THRESHOLD, so it
374
+ // never interferes with legitimate developer-configured retries.
375
+ const retryAttempt = input._retryAttempt || 0;
376
+ const configuredMax = input._streamRetryConfig?.max_retry_attempts
377
+ ?? this.retryPolicy?.maximumAttempts
378
+ ?? 0;
379
+ const poisonThreshold = Math.max(configuredMax, config_1.HMSH_POISON_MESSAGE_THRESHOLD);
380
+ if (retryAttempt >= poisonThreshold) {
381
+ this.logger.error(`stream-poison-message-detected`, {
382
+ group,
383
+ stream,
384
+ id,
385
+ retryAttempt,
386
+ poisonThreshold,
387
+ configuredMaxAttempts: configuredMax,
388
+ systemThreshold: config_1.HMSH_POISON_MESSAGE_THRESHOLD,
389
+ topic: input.metadata?.topic,
390
+ activityId: input.metadata?.aid,
391
+ jobId: input.metadata?.jid,
392
+ metadata: input.metadata,
393
+ });
394
+ const errorOutput = this.errorHandler.structureUnhandledError(input, new Error(`Poison message detected: retry attempt ${retryAttempt} reached ` +
395
+ `threshold ${poisonThreshold} (configured: ${configuredMax}, ` +
396
+ `system: ${config_1.HMSH_POISON_MESSAGE_THRESHOLD}). Discarding message ` +
397
+ `for activity "${input.metadata?.aid || 'unknown'}" ` +
398
+ `(topic: ${input.metadata?.topic || 'unknown'}, ` +
399
+ `job: ${input.metadata?.jid || 'unknown'}).`));
400
+ try {
401
+ await this.publishMessage(null, errorOutput);
402
+ }
403
+ catch (publishErr) {
404
+ this.logger.error(`stream-poison-message-publish-error`, {
405
+ error: publishErr,
406
+ stream,
407
+ id,
408
+ retryAttempt,
409
+ poisonThreshold,
410
+ });
411
+ }
412
+ // Mark as dead-lettered if the provider supports it; otherwise just ack
413
+ if (this.stream.deadLetterMessages) {
414
+ await this.stream.deadLetterMessages(stream, group, [id]);
415
+ }
416
+ else {
417
+ await this.ackAndDelete(stream, group, id);
418
+ }
419
+ return;
420
+ }
359
421
  let output;
360
422
  const telemetry = new telemetry_1.RouterTelemetry(this.appId);
361
423
  try {
@@ -367,12 +429,25 @@ class ConsumptionManager {
367
429
  catch (err) {
368
430
  this.logger.error(`stream-read-one-error`, { group, stream, id, err });
369
431
  telemetry.setStreamErrorFromException(err);
432
+ output = this.errorHandler.structureUnhandledError(input, err instanceof Error ? err : new Error(String(err)));
433
+ }
434
+ try {
435
+ const messageId = await this.publishResponse(input, output);
436
+ telemetry.setStreamAttributes({ 'app.worker.mid': messageId });
437
+ }
438
+ catch (publishErr) {
439
+ // If publishResponse fails, still ack the message to prevent
440
+ // infinite reprocessing. Log the error for debugging.
441
+ this.logger.error(`stream-publish-response-error`, {
442
+ group, stream, id, error: publishErr,
443
+ });
444
+ this.errorCount++;
445
+ }
446
+ finally {
447
+ await this.ackAndDelete(stream, group, id);
448
+ telemetry.endStreamSpan();
449
+ this.logger.debug(`stream-read-one-end`, { group, stream, id });
370
450
  }
371
- const messageId = await this.publishResponse(input, output);
372
- telemetry.setStreamAttributes({ 'app.worker.mid': messageId });
373
- await this.ackAndDelete(stream, group, id);
374
- telemetry.endStreamSpan();
375
- this.logger.debug(`stream-read-one-end`, { group, stream, id });
376
451
  }
377
452
  async execStreamLeg(input, stream, id, callback) {
378
453
  let output;
@@ -112,6 +112,18 @@ declare abstract class StoreService<Provider extends ProviderClient, Transaction
112
112
  * @returns Map of child_workflow_id -> parsed input arguments
113
113
  */
114
114
  getChildWorkflowInputs?(childJobKeys: string[], symbolField: string): Promise<Map<string, any>>;
115
+ /**
116
+ * Fetch stream message history for a job from worker_streams.
117
+ * Returns raw activity input/output data from soft-deleted messages.
118
+ *
119
+ * @param jobId - The job ID (metadata.jid in stream messages)
120
+ * @param options - Optional filters for activity or message types
121
+ * @returns Array of stream history entries ordered by creation time
122
+ */
123
+ getStreamHistory?(jobId: string, options?: {
124
+ activity?: string;
125
+ types?: string[];
126
+ }): Promise<import('../../types/exporter').StreamHistoryEntry[]>;
115
127
  /**
116
128
  * Fetch job record and attributes by key. Used by the exporter to
117
129
  * reconstruct execution history for expired jobs.
@@ -15,6 +15,23 @@ export declare const GET_JOB_ATTRIBUTES = "\n SELECT field, value\n FROM {sche
15
15
  * Matches all activity jobs for the given workflow and extracts their input arguments.
16
16
  */
17
17
  export declare const GET_ACTIVITY_INPUTS = "\n SELECT j.key, ja.value\n FROM {schema}.jobs j\n JOIN {schema}.jobs_attributes ja ON ja.job_id = j.id\n WHERE j.key LIKE $1\n AND ja.field = $2\n";
18
+ /**
19
+ * Fetch all worker stream messages for a job AND its child activities.
20
+ * Child activity jobs use the pattern: -{parentJobId}-$activityName-N
21
+ * Uses the partial index on (jid, created_at) WHERE jid != '' for efficiency.
22
+ * Includes both active and expired messages for full execution history.
23
+ */
24
+ export declare const GET_STREAM_HISTORY_BY_JID = "\n SELECT\n id, jid, aid, dad, msg_type, topic, workflow_name,\n message, created_at, expired_at\n FROM {schema}.worker_streams\n WHERE jid = $1 OR jid LIKE '-' || $1 || '-%'\n ORDER BY created_at, id\n";
25
+ /**
26
+ * Fetch worker stream messages for a job filtered by message type.
27
+ * Includes child activity messages.
28
+ */
29
+ export declare const GET_STREAM_HISTORY_BY_JID_AND_TYPE = "\n SELECT\n id, jid, aid, dad, msg_type, topic, workflow_name,\n message, created_at, expired_at\n FROM {schema}.worker_streams\n WHERE (jid = $1 OR jid LIKE '-' || $1 || '-%')\n AND msg_type = ANY($2::text[])\n ORDER BY created_at, id\n";
30
+ /**
31
+ * Fetch worker stream messages for a job filtered by activity ID.
32
+ * Includes child activity messages.
33
+ */
34
+ export declare const GET_STREAM_HISTORY_BY_JID_AND_AID = "\n SELECT\n id, jid, aid, dad, msg_type, topic, workflow_name,\n message, created_at, expired_at\n FROM {schema}.worker_streams\n WHERE (jid = $1 OR jid LIKE '-' || $1 || '-%')\n AND aid = $2\n ORDER BY created_at, id\n";
18
35
  /**
19
36
  * Fetch child workflow inputs in batch.
20
37
  * Uses parameterized IN clause for exact-match efficiency.
@@ -4,7 +4,7 @@
4
4
  * These queries support the exporter's input enrichment and direct query features.
5
5
  */
6
6
  Object.defineProperty(exports, "__esModule", { value: true });
7
- exports.buildChildWorkflowInputsQuery = exports.GET_ACTIVITY_INPUTS = exports.GET_JOB_ATTRIBUTES = exports.GET_JOB_BY_KEY = void 0;
7
+ exports.buildChildWorkflowInputsQuery = exports.GET_STREAM_HISTORY_BY_JID_AND_AID = exports.GET_STREAM_HISTORY_BY_JID_AND_TYPE = exports.GET_STREAM_HISTORY_BY_JID = exports.GET_ACTIVITY_INPUTS = exports.GET_JOB_ATTRIBUTES = exports.GET_JOB_BY_KEY = void 0;
8
8
  /**
9
9
  * Fetch job record by key.
10
10
  */
@@ -34,6 +34,46 @@ exports.GET_ACTIVITY_INPUTS = `
34
34
  WHERE j.key LIKE $1
35
35
  AND ja.field = $2
36
36
  `;
37
+ /**
38
+ * Fetch all worker stream messages for a job AND its child activities.
39
+ * Child activity jobs use the pattern: -{parentJobId}-$activityName-N
40
+ * Uses the partial index on (jid, created_at) WHERE jid != '' for efficiency.
41
+ * Includes both active and expired messages for full execution history.
42
+ */
43
+ exports.GET_STREAM_HISTORY_BY_JID = `
44
+ SELECT
45
+ id, jid, aid, dad, msg_type, topic, workflow_name,
46
+ message, created_at, expired_at
47
+ FROM {schema}.worker_streams
48
+ WHERE jid = $1 OR jid LIKE '-' || $1 || '-%'
49
+ ORDER BY created_at, id
50
+ `;
51
+ /**
52
+ * Fetch worker stream messages for a job filtered by message type.
53
+ * Includes child activity messages.
54
+ */
55
+ exports.GET_STREAM_HISTORY_BY_JID_AND_TYPE = `
56
+ SELECT
57
+ id, jid, aid, dad, msg_type, topic, workflow_name,
58
+ message, created_at, expired_at
59
+ FROM {schema}.worker_streams
60
+ WHERE (jid = $1 OR jid LIKE '-' || $1 || '-%')
61
+ AND msg_type = ANY($2::text[])
62
+ ORDER BY created_at, id
63
+ `;
64
+ /**
65
+ * Fetch worker stream messages for a job filtered by activity ID.
66
+ * Includes child activity messages.
67
+ */
68
+ exports.GET_STREAM_HISTORY_BY_JID_AND_AID = `
69
+ SELECT
70
+ id, jid, aid, dad, msg_type, topic, workflow_name,
71
+ message, created_at, expired_at
72
+ FROM {schema}.worker_streams
73
+ WHERE (jid = $1 OR jid LIKE '-' || $1 || '-%')
74
+ AND aid = $2
75
+ ORDER BY created_at, id
76
+ `;
37
77
  /**
38
78
  * Fetch child workflow inputs in batch.
39
79
  * Uses parameterized IN clause for exact-match efficiency.
@@ -223,6 +223,14 @@ declare class PostgresStoreService extends StoreService<ProviderClient, Provider
223
223
  };
224
224
  attributes: Record<string, string>;
225
225
  }>;
226
+ /**
227
+ * Fetch stream message history for a job from worker_streams.
228
+ * Returns raw activity input/output data from soft-deleted messages.
229
+ */
230
+ getStreamHistory(jobId: string, options?: {
231
+ activity?: string;
232
+ types?: string[];
233
+ }): Promise<import('../../../../types/exporter').StreamHistoryEntry[]>;
226
234
  /**
227
235
  * Parse a HotMesh-encoded value string.
228
236
  * Values may be prefixed with `/s` (JSON), `/d` (number), `/t` or `/f` (boolean), `/n` (null).
@@ -1365,6 +1365,58 @@ class PostgresStoreService extends __1.StoreService {
1365
1365
  }
1366
1366
  return { job, attributes };
1367
1367
  }
1368
+ /**
1369
+ * Fetch stream message history for a job from worker_streams.
1370
+ * Returns raw activity input/output data from soft-deleted messages.
1371
+ */
1372
+ async getStreamHistory(jobId, options) {
1373
+ const { GET_STREAM_HISTORY_BY_JID, GET_STREAM_HISTORY_BY_JID_AND_TYPE, GET_STREAM_HISTORY_BY_JID_AND_AID, } = await Promise.resolve().then(() => __importStar(require('./exporter-sql')));
1374
+ const schemaName = this.kvsql().safeName(this.appId);
1375
+ let sql;
1376
+ let params;
1377
+ if (options?.activity) {
1378
+ sql = GET_STREAM_HISTORY_BY_JID_AND_AID.replace(/{schema}/g, schemaName);
1379
+ params = [jobId, options.activity];
1380
+ }
1381
+ else if (options?.types?.length) {
1382
+ sql = GET_STREAM_HISTORY_BY_JID_AND_TYPE.replace(/{schema}/g, schemaName);
1383
+ params = [jobId, options.types];
1384
+ }
1385
+ else {
1386
+ sql = GET_STREAM_HISTORY_BY_JID.replace(/{schema}/g, schemaName);
1387
+ params = [jobId];
1388
+ }
1389
+ const result = await this.pgClient.query(sql, params);
1390
+ return result.rows.map((row) => {
1391
+ let parsed = {};
1392
+ try {
1393
+ parsed = JSON.parse(row.message);
1394
+ }
1395
+ catch {
1396
+ // message may not be valid JSON
1397
+ }
1398
+ return {
1399
+ id: parseInt(row.id),
1400
+ jid: row.jid,
1401
+ aid: row.aid,
1402
+ dad: row.dad,
1403
+ msg_type: row.msg_type,
1404
+ topic: row.topic,
1405
+ workflow_name: row.workflow_name,
1406
+ data: parsed.data || {},
1407
+ status: parsed.status,
1408
+ code: parsed.code,
1409
+ created_at: row.created_at instanceof Date
1410
+ ? row.created_at.toISOString()
1411
+ : String(row.created_at),
1412
+ expired_at: row.expired_at
1413
+ ? (row.expired_at instanceof Date
1414
+ ? row.expired_at.toISOString()
1415
+ : String(row.expired_at))
1416
+ : undefined,
1417
+ };
1418
+ });
1419
+ }
1368
1420
  /**
1369
1421
  * Parse a HotMesh-encoded value string.
1370
1422
  * Values may be prefixed with `/s` (JSON), `/d` (number), `/t` or `/f` (boolean), `/n` (null).
@@ -64,6 +64,7 @@ export declare abstract class StreamService<ClientProvider extends ProviderClien
64
64
  maxMessageSize: number;
65
65
  maxBatchSize: number;
66
66
  };
67
+ deadLetterMessages?(streamName: string, groupName: string, messageIds: string[]): Promise<number>;
67
68
  stopNotificationConsumer?(streamName: string, groupName: string): Promise<void>;
68
69
  cleanup?(): Promise<void>;
69
70
  }
@@ -129,6 +129,7 @@ async function createTables(client, schemaName) {
129
129
  reserved_at TIMESTAMPTZ,
130
130
  reserved_by TEXT,
131
131
  expired_at TIMESTAMPTZ,
132
+ dead_lettered_at TIMESTAMPTZ,
132
133
  max_retry_attempts INT DEFAULT 3,
133
134
  backoff_coefficient NUMERIC DEFAULT 10,
134
135
  maximum_interval_seconds INT DEFAULT 120,
@@ -167,6 +168,18 @@ async function createTables(client, schemaName) {
167
168
  CREATE INDEX IF NOT EXISTS idx_engine_streams_processed_volume
168
169
  ON ${engineTable} (expired_at, stream_name)
169
170
  WHERE expired_at IS NOT NULL;
171
+ `);
172
+ await client.query(`
173
+ CREATE INDEX IF NOT EXISTS idx_engine_streams_dead_lettered
174
+ ON ${engineTable} (dead_lettered_at, stream_name)
175
+ WHERE dead_lettered_at IS NOT NULL;
176
+ `);
177
+ // Migration: add dead_lettered_at column to existing tables
178
+ await client.query(`
179
+ DO $$ BEGIN
180
+ ALTER TABLE ${engineTable} ADD COLUMN IF NOT EXISTS dead_lettered_at TIMESTAMPTZ;
181
+ EXCEPTION WHEN duplicate_column THEN NULL;
182
+ END $$;
170
183
  `);
171
184
  // ---- WORKER_STREAMS table ----
172
185
  const workerTable = `${schemaName}.worker_streams`;
@@ -175,11 +188,17 @@ async function createTables(client, schemaName) {
175
188
  id BIGSERIAL,
176
189
  stream_name TEXT NOT NULL,
177
190
  workflow_name TEXT NOT NULL DEFAULT '',
191
+ jid TEXT NOT NULL DEFAULT '',
192
+ aid TEXT NOT NULL DEFAULT '',
193
+ dad TEXT NOT NULL DEFAULT '',
194
+ msg_type TEXT NOT NULL DEFAULT '',
195
+ topic TEXT NOT NULL DEFAULT '',
178
196
  message TEXT NOT NULL,
179
197
  created_at TIMESTAMPTZ DEFAULT NOW(),
180
198
  reserved_at TIMESTAMPTZ,
181
199
  reserved_by TEXT,
182
200
  expired_at TIMESTAMPTZ,
201
+ dead_lettered_at TIMESTAMPTZ,
183
202
  max_retry_attempts INT DEFAULT 3,
184
203
  backoff_coefficient NUMERIC DEFAULT 10,
185
204
  maximum_interval_seconds INT DEFAULT 120,
@@ -219,6 +238,47 @@ async function createTables(client, schemaName) {
219
238
  ON ${workerTable} (expired_at, stream_name)
220
239
  WHERE expired_at IS NOT NULL;
221
240
  `);
241
+ await client.query(`
242
+ CREATE INDEX IF NOT EXISTS idx_worker_streams_dead_lettered
243
+ ON ${workerTable} (dead_lettered_at, stream_name)
244
+ WHERE dead_lettered_at IS NOT NULL;
245
+ `);
246
+ // Migration: add dead_lettered_at column to existing tables
247
+ await client.query(`
248
+ DO $$ BEGIN
249
+ ALTER TABLE ${workerTable} ADD COLUMN IF NOT EXISTS dead_lettered_at TIMESTAMPTZ;
250
+ EXCEPTION WHEN duplicate_column THEN NULL;
251
+ END $$;
252
+ `);
253
+ // ---- Export fidelity columns and indexes ----
254
+ // These columns surface stream message metadata for efficient job history queries.
255
+ // Migration: add columns to existing tables (no-op on fresh installs)
256
+ for (const col of ['jid', 'aid', 'dad', 'msg_type', 'topic']) {
257
+ await client.query(`
258
+ DO $$ BEGIN
259
+ ALTER TABLE ${workerTable} ADD COLUMN IF NOT EXISTS ${col} TEXT NOT NULL DEFAULT '';
260
+ EXCEPTION WHEN duplicate_column THEN NULL;
261
+ END $$;
262
+ `);
263
+ }
264
+ // All messages for a job, ordered by time
265
+ await client.query(`
266
+ CREATE INDEX IF NOT EXISTS idx_worker_streams_jid_created
267
+ ON ${workerTable} (jid, created_at)
268
+ WHERE jid != '';
269
+ `);
270
+ // Activity-specific lookups within a job
271
+ await client.query(`
272
+ CREATE INDEX IF NOT EXISTS idx_worker_streams_jid_aid
273
+ ON ${workerTable} (jid, aid, created_at)
274
+ WHERE jid != '';
275
+ `);
276
+ // Type-filtered queries (e.g., only worker invocations + responses)
277
+ await client.query(`
278
+ CREATE INDEX IF NOT EXISTS idx_worker_streams_jid_type
279
+ ON ${workerTable} (jid, msg_type, created_at)
280
+ WHERE jid != '';
281
+ `);
222
282
  }
223
283
  async function createNotificationTriggers(client, schemaName) {
224
284
  const engineTable = `${schemaName}.engine_streams`;
@@ -46,6 +46,11 @@ export declare function deleteMessages(client: PostgresClientType & ProviderClie
46
46
  * Acknowledge and delete messages in one operation.
47
47
  */
48
48
  export declare function ackAndDelete(client: PostgresClientType & ProviderClient, tableName: string, streamName: string, messageIds: string[], logger: ILogger): Promise<number>;
49
+ /**
50
+ * Move messages to the dead-letter state by setting dead_lettered_at
51
+ * and expired_at. The message payload is preserved for inspection.
52
+ */
53
+ export declare function deadLetterMessages(client: PostgresClientType & ProviderClient, tableName: string, streamName: string, messageIds: string[], logger: ILogger): Promise<number>;
49
54
  /**
50
55
  * Retry messages (placeholder for future implementation).
51
56
  */
@@ -1,6 +1,6 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.retryMessages = exports.ackAndDelete = exports.deleteMessages = exports.acknowledgeMessages = exports.fetchMessages = exports.buildPublishSQL = exports.publishMessages = void 0;
3
+ exports.retryMessages = exports.deadLetterMessages = exports.ackAndDelete = exports.deleteMessages = exports.acknowledgeMessages = exports.fetchMessages = exports.buildPublishSQL = exports.publishMessages = void 0;
4
4
  const utils_1 = require("../../../../modules/utils");
5
5
  /**
6
6
  * Publish messages to a stream. Can be used within a transaction.
@@ -49,8 +49,13 @@ function buildPublishSQL(tableName, streamName, isEngine, messages, options) {
49
49
  delete data._streamRetryConfig;
50
50
  delete data._visibilityDelayMs;
51
51
  delete data._retryAttempt;
52
- // Extract workflow name for worker streams
52
+ // Extract metadata for worker stream columns
53
53
  const workflowName = data.metadata?.wfn || '';
54
+ const jid = data.metadata?.jid || '';
55
+ const aid = data.metadata?.aid || '';
56
+ const dad = data.metadata?.dad || '';
57
+ const msgType = data.type || '';
58
+ const topic = data.metadata?.topic || '';
54
59
  // Determine if this message has explicit retry config
55
60
  const hasExplicitConfig = (retryConfig && 'max_retry_attempts' in retryConfig) || options?.retryPolicy;
56
61
  let normalizedPolicy = null;
@@ -71,6 +76,11 @@ function buildPublishSQL(tableName, streamName, isEngine, messages, options) {
71
76
  visibilityDelayMs: visibilityDelayMs || 0,
72
77
  retryAttempt: retryAttempt || 0,
73
78
  workflowName,
79
+ jid,
80
+ aid,
81
+ dad,
82
+ msgType,
83
+ topic,
74
84
  };
75
85
  });
76
86
  const params = [streamName];
@@ -124,45 +134,45 @@ function buildPublishSQL(tableName, streamName, isEngine, messages, options) {
124
134
  }
125
135
  }
126
136
  else {
127
- // Worker table: includes workflow_name, no group_name
137
+ // Worker table: includes workflow_name + export fidelity columns, no group_name
128
138
  if (noneHaveConfig && !hasVisibilityDelays) {
129
- insertColumns = '(stream_name, workflow_name, message)';
139
+ insertColumns = '(stream_name, workflow_name, jid, aid, dad, msg_type, topic, message)';
130
140
  parsedMessages.forEach((pm) => {
131
141
  const paramOffset = params.length + 1;
132
- valuesClauses.push(`($1, $${paramOffset}, $${paramOffset + 1})`);
133
- params.push(pm.workflowName, pm.message);
142
+ valuesClauses.push(`($1, $${paramOffset}, $${paramOffset + 1}, $${paramOffset + 2}, $${paramOffset + 3}, $${paramOffset + 4}, $${paramOffset + 5}, $${paramOffset + 6})`);
143
+ params.push(pm.workflowName, pm.jid, pm.aid, pm.dad, pm.msgType, pm.topic, pm.message);
134
144
  });
135
145
  }
136
146
  else if (noneHaveConfig && hasVisibilityDelays) {
137
- insertColumns = '(stream_name, workflow_name, message, visible_at, retry_attempt)';
147
+ insertColumns = '(stream_name, workflow_name, jid, aid, dad, msg_type, topic, message, visible_at, retry_attempt)';
138
148
  parsedMessages.forEach((pm) => {
139
149
  const paramOffset = params.length + 1;
140
150
  if (pm.visibilityDelayMs > 0) {
141
151
  const visibleAtSQL = `NOW() + INTERVAL '${pm.visibilityDelayMs} milliseconds'`;
142
- valuesClauses.push(`($1, $${paramOffset}, $${paramOffset + 1}, ${visibleAtSQL}, $${paramOffset + 2})`);
143
- params.push(pm.workflowName, pm.message, pm.retryAttempt);
152
+ valuesClauses.push(`($1, $${paramOffset}, $${paramOffset + 1}, $${paramOffset + 2}, $${paramOffset + 3}, $${paramOffset + 4}, $${paramOffset + 5}, $${paramOffset + 6}, ${visibleAtSQL}, $${paramOffset + 7})`);
153
+ params.push(pm.workflowName, pm.jid, pm.aid, pm.dad, pm.msgType, pm.topic, pm.message, pm.retryAttempt);
144
154
  }
145
155
  else {
146
- valuesClauses.push(`($1, $${paramOffset}, $${paramOffset + 1}, DEFAULT, $${paramOffset + 2})`);
147
- params.push(pm.workflowName, pm.message, pm.retryAttempt);
156
+ valuesClauses.push(`($1, $${paramOffset}, $${paramOffset + 1}, $${paramOffset + 2}, $${paramOffset + 3}, $${paramOffset + 4}, $${paramOffset + 5}, $${paramOffset + 6}, DEFAULT, $${paramOffset + 7})`);
157
+ params.push(pm.workflowName, pm.jid, pm.aid, pm.dad, pm.msgType, pm.topic, pm.message, pm.retryAttempt);
148
158
  }
149
159
  });
150
160
  }
151
161
  else {
152
- insertColumns = '(stream_name, workflow_name, message, max_retry_attempts, backoff_coefficient, maximum_interval_seconds, visible_at, retry_attempt)';
162
+ insertColumns = '(stream_name, workflow_name, jid, aid, dad, msg_type, topic, message, max_retry_attempts, backoff_coefficient, maximum_interval_seconds, visible_at, retry_attempt)';
153
163
  parsedMessages.forEach((pm) => {
154
164
  const visibleAtClause = pm.visibilityDelayMs > 0
155
165
  ? `NOW() + INTERVAL '${pm.visibilityDelayMs} milliseconds'`
156
166
  : 'DEFAULT';
157
167
  if (pm.hasExplicitConfig) {
158
168
  const paramOffset = params.length + 1;
159
- valuesClauses.push(`($1, $${paramOffset}, $${paramOffset + 1}, $${paramOffset + 2}, $${paramOffset + 3}, $${paramOffset + 4}, ${visibleAtClause}, $${paramOffset + 5})`);
160
- params.push(pm.workflowName, pm.message, pm.retryPolicy.max_retry_attempts, pm.retryPolicy.backoff_coefficient, pm.retryPolicy.maximum_interval_seconds, pm.retryAttempt);
169
+ valuesClauses.push(`($1, $${paramOffset}, $${paramOffset + 1}, $${paramOffset + 2}, $${paramOffset + 3}, $${paramOffset + 4}, $${paramOffset + 5}, $${paramOffset + 6}, $${paramOffset + 7}, $${paramOffset + 8}, $${paramOffset + 9}, ${visibleAtClause}, $${paramOffset + 10})`);
170
+ params.push(pm.workflowName, pm.jid, pm.aid, pm.dad, pm.msgType, pm.topic, pm.message, pm.retryPolicy.max_retry_attempts, pm.retryPolicy.backoff_coefficient, pm.retryPolicy.maximum_interval_seconds, pm.retryAttempt);
161
171
  }
162
172
  else {
163
173
  const paramOffset = params.length + 1;
164
- valuesClauses.push(`($1, $${paramOffset}, $${paramOffset + 1}, DEFAULT, DEFAULT, DEFAULT, ${visibleAtClause}, $${paramOffset + 2})`);
165
- params.push(pm.workflowName, pm.message, pm.retryAttempt);
174
+ valuesClauses.push(`($1, $${paramOffset}, $${paramOffset + 1}, $${paramOffset + 2}, $${paramOffset + 3}, $${paramOffset + 4}, $${paramOffset + 5}, $${paramOffset + 6}, DEFAULT, DEFAULT, DEFAULT, ${visibleAtClause}, $${paramOffset + 7})`);
175
+ params.push(pm.workflowName, pm.jid, pm.aid, pm.dad, pm.msgType, pm.topic, pm.message, pm.retryAttempt);
166
176
  }
167
177
  });
168
178
  }
@@ -290,6 +300,27 @@ async function ackAndDelete(client, tableName, streamName, messageIds, logger) {
290
300
  return await deleteMessages(client, tableName, streamName, messageIds, logger);
291
301
  }
292
302
  exports.ackAndDelete = ackAndDelete;
303
+ /**
304
+ * Move messages to the dead-letter state by setting dead_lettered_at
305
+ * and expired_at. The message payload is preserved for inspection.
306
+ */
307
+ async function deadLetterMessages(client, tableName, streamName, messageIds, logger) {
308
+ try {
309
+ const ids = messageIds.map((id) => parseInt(id));
310
+ const res = await client.query(`UPDATE ${tableName}
311
+ SET dead_lettered_at = NOW(), expired_at = NOW()
312
+ WHERE stream_name = $1 AND id = ANY($2::bigint[])`, [streamName, ids]);
313
+ return res.rowCount;
314
+ }
315
+ catch (error) {
316
+ logger.error(`postgres-stream-dead-letter-error-${streamName}`, {
317
+ error,
318
+ messageIds,
319
+ });
320
+ throw error;
321
+ }
322
+ }
323
+ exports.deadLetterMessages = deadLetterMessages;
293
324
  /**
294
325
  * Retry messages (placeholder for future implementation).
295
326
  */
@@ -73,6 +73,7 @@ declare class PostgresStreamService extends StreamService<PostgresClientType & P
73
73
  stopNotificationConsumer(streamName: string, groupName: string): Promise<void>;
74
74
  private fetchMessages;
75
75
  ackAndDelete(streamName: string, groupName: string, messageIds: string[]): Promise<number>;
76
+ deadLetterMessages(streamName: string, groupName: string, messageIds: string[]): Promise<number>;
76
77
  acknowledgeMessages(streamName: string, groupName: string, messageIds: string[], options?: StringAnyType): Promise<number>;
77
78
  deleteMessages(streamName: string, groupName: string, messageIds: string[], options?: StringAnyType): Promise<number>;
78
79
  retryMessages(streamName: string, groupName: string, options?: {
@@ -220,6 +220,10 @@ class PostgresStreamService extends index_1.StreamService {
220
220
  const target = this.resolveStreamTarget(streamName);
221
221
  return Messages.ackAndDelete(this.streamClient, target.tableName, target.streamName, messageIds, this.logger);
222
222
  }
223
+ async deadLetterMessages(streamName, groupName, messageIds) {
224
+ const target = this.resolveStreamTarget(streamName);
225
+ return Messages.deadLetterMessages(this.streamClient, target.tableName, target.streamName, messageIds, this.logger);
226
+ }
223
227
  async acknowledgeMessages(streamName, groupName, messageIds, options) {
224
228
  return Messages.acknowledgeMessages(messageIds);
225
229
  }
@@ -59,7 +59,7 @@ class ScoutManager {
59
59
  if (!wasScout) {
60
60
  // First time becoming scout - set timeout to reset after interval and track start time
61
61
  this.scoutStartTime = Date.now();
62
- this.logger.info('postgres-stream-router-scout-role-acquired', {
62
+ this.logger.debug('postgres-stream-router-scout-role-acquired', {
63
63
  appId: this.appId,
64
64
  });
65
65
  setTimeout(() => {
@@ -216,7 +216,7 @@ class ScoutManager {
216
216
  const durationMinutes = durationMs / 1000 / 60;
217
217
  const qpm = durationMinutes > 0 ? this.pollCount / durationMinutes : 0;
218
218
  const qps = durationMs > 0 ? this.pollCount / (durationMs / 1000) : 0;
219
- this.logger.info('postgres-stream-router-scout-metrics', {
219
+ this.logger.debug('postgres-stream-router-scout-metrics', {
220
220
  appId: this.appId,
221
221
  totalPolls: this.pollCount,
222
222
  totalNotifications: this.totalNotifications,
@@ -61,7 +61,7 @@ async function trimStream(client, tableName, streamName, options, logger) {
61
61
  if (options.maxLen !== undefined) {
62
62
  const res = await client.query(`WITH to_expire AS (
63
63
  SELECT id FROM ${tableName}
64
- WHERE stream_name = $1
64
+ WHERE stream_name = $1 AND dead_lettered_at IS NULL
65
65
  ORDER BY id ASC
66
66
  OFFSET $2
67
67
  )
@@ -73,7 +73,8 @@ async function trimStream(client, tableName, streamName, options, logger) {
73
73
  if (options.maxAge !== undefined) {
74
74
  const res = await client.query(`UPDATE ${tableName}
75
75
  SET expired_at = NOW()
76
- WHERE stream_name = $1 AND created_at < NOW() - INTERVAL '${options.maxAge} milliseconds'`, [streamName]);
76
+ WHERE stream_name = $1 AND dead_lettered_at IS NULL
77
+ AND created_at < NOW() - INTERVAL '${options.maxAge} milliseconds'`, [streamName]);
77
78
  expiredCount += res.rowCount;
78
79
  }
79
80
  return expiredCount;
@@ -18,6 +18,13 @@ export interface ExportOptions {
18
18
  * @default true
19
19
  */
20
20
  values?: boolean;
21
+ /**
22
+ * When true, fetches stream message history and produces a structured
23
+ * `activities` array with input/output per activity, timing, dimensional
24
+ * cycle info, and retry attempts. This is the dashboard-friendly format.
25
+ * @default false
26
+ */
27
+ enrich_inputs?: boolean;
21
28
  }
22
29
  export type JobAction = {
23
30
  cursor: number;
@@ -74,10 +81,24 @@ export interface DurableJobExport {
74
81
  timeline?: TimelineType[];
75
82
  transitions?: TransitionType[];
76
83
  }
84
+ export interface ActivityDetail {
85
+ name: string;
86
+ type: string;
87
+ dimension: string;
88
+ input?: Record<string, any>;
89
+ output?: Record<string, any>;
90
+ started_at?: string;
91
+ completed_at?: string;
92
+ duration_ms?: number;
93
+ retry_attempt?: number;
94
+ cycle_iteration?: number;
95
+ error?: string | null;
96
+ }
77
97
  export interface JobExport {
78
98
  dependencies: DependencyExport[];
79
99
  process: StringAnyType;
80
100
  status: string;
101
+ activities?: ActivityDetail[];
81
102
  }
82
103
  export type ExportMode = 'sparse' | 'verbose';
83
104
  export type WorkflowEventType = 'workflow_execution_started' | 'workflow_execution_completed' | 'workflow_execution_failed' | 'activity_task_scheduled' | 'activity_task_completed' | 'activity_task_failed' | 'child_workflow_execution_started' | 'child_workflow_execution_completed' | 'child_workflow_execution_failed' | 'timer_started' | 'timer_fired' | 'workflow_execution_signaled';
@@ -203,6 +224,7 @@ export interface WorkflowExecution {
203
224
  events: WorkflowExecutionEvent[];
204
225
  summary: WorkflowExecutionSummary;
205
226
  children?: WorkflowExecution[];
227
+ stream_history?: StreamHistoryEntry[];
206
228
  }
207
229
  export interface ExecutionExportOptions {
208
230
  mode?: ExportMode;
@@ -225,6 +247,29 @@ export interface ExecutionExportOptions {
225
247
  * @default false
226
248
  */
227
249
  allow_direct_query?: boolean;
250
+ /**
251
+ * When true, fetches the full stream message history for this workflow
252
+ * from the worker_streams table and attaches it as `stream_history`.
253
+ * This provides raw activity input/output data from the original stream
254
+ * messages, enabling Temporal-grade export fidelity.
255
+ *
256
+ * @default false
257
+ */
258
+ include_stream_history?: boolean;
259
+ }
260
+ export interface StreamHistoryEntry {
261
+ id: number;
262
+ jid: string;
263
+ aid: string;
264
+ dad: string;
265
+ msg_type: string;
266
+ topic: string;
267
+ workflow_name: string;
268
+ data: Record<string, any>;
269
+ status?: string;
270
+ code?: number;
271
+ created_at: string;
272
+ expired_at?: string;
228
273
  }
229
274
  export interface JobAttributesRow {
230
275
  field: string;
@@ -6,7 +6,7 @@ export { CollationFaultType, CollationStage } from './collator';
6
6
  export { ActivityConfig, ActivityInterceptor, ActivityInterceptorContext, ActivityWorkflowDataType, ChildResponseType, ClientConfig, ClientWorkflow, ContextType, Connection, ProxyResponseType, ProxyType, Registry, SignalOptions, FindJobsOptions, FindOptions, FindWhereOptions, FindWhereQuery, HookOptions, SearchResults, WorkflowConfig, WorkerConfig, WorkerOptions, WorkflowContext, WorkflowSearchOptions, WorkflowSearchSchema, WorkflowDataType, WorkflowOptions, WorkflowInterceptor, InterceptorRegistry, } from './durable';
7
7
  export { PruneOptions, PruneResult, } from './dba';
8
8
  export { DurableChildErrorType, DurableProxyErrorType, DurableSleepErrorType, DurableWaitForAllErrorType, DurableWaitForErrorType, } from './error';
9
- export { ActivityAction, DependencyExport, DurableJobExport, ExecutionExportOptions, ExportCycles, ExportItem, ExportMode, ExportOptions, ExportTransitions, JobAction, JobExport, JobActionExport, JobTimeline, WorkflowEventAttributes, WorkflowEventCategory, WorkflowEventType, WorkflowExecution, WorkflowExecutionEvent, WorkflowExecutionStatus, WorkflowExecutionSummary, } from './exporter';
9
+ export { ActivityAction, ActivityDetail, ActivityInputMap, ActivityTaskCompletedAttributes, ActivityTaskFailedAttributes, ActivityTaskScheduledAttributes, ChildWorkflowExecutionCompletedAttributes, ChildWorkflowExecutionFailedAttributes, ChildWorkflowExecutionStartedAttributes, DependencyExport, DurableJobExport, ExecutionExportOptions, ExportCycles, ExportFields, ExportItem, ExportMode, ExportOptions, ExportTransitions, JobAction, JobActionExport, JobAttributesRow, JobExport, JobRow, JobTimeline, StreamHistoryEntry, TimelineType, TimerFiredAttributes, TimerStartedAttributes, TransitionType, WorkflowEventAttributes, WorkflowEventCategory, WorkflowEventType, WorkflowExecution, WorkflowExecutionCompletedAttributes, WorkflowExecutionEvent, WorkflowExecutionFailedAttributes, WorkflowExecutionSignaledAttributes, WorkflowExecutionStartedAttributes, WorkflowExecutionStatus, WorkflowExecutionSummary, } from './exporter';
10
10
  export { HookCondition, HookConditions, HookGate, HookInterface, HookRule, HookRules, HookSignal, } from './hook';
11
11
  export { HotMesh, HotMeshEngine, HotMeshWorker, HotMeshSettings, HotMeshApp, HotMeshApps, HotMeshConfig, HotMeshManifest, HotMeshGraph, KeyType, KeyStoreParams, ScoutType, } from './hotmesh';
12
12
  export { ILogger, LogLevel } from './logger';
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@hotmeshio/hotmesh",
3
- "version": "0.11.0",
3
+ "version": "0.12.0",
4
4
  "description": "Permanent-Memory Workflows & AI Agents",
5
5
  "main": "./build/index.js",
6
6
  "types": "./build/index.d.ts",