@etohq/workflow-engine-redis 1.5.1-alpha.4 → 1.5.1-alpha.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. package/dist/index.d.ts +1 -0
  2. package/dist/index.d.ts.map +1 -1
  3. package/dist/index.js +1 -0
  4. package/dist/index.js.map +1 -1
  5. package/dist/loaders/redis.d.ts.map +1 -1
  6. package/dist/loaders/redis.js +61 -15
  7. package/dist/loaders/redis.js.map +1 -1
  8. package/dist/loaders/utils.js +1 -1
  9. package/dist/loaders/utils.js.map +1 -1
  10. package/dist/migrations/Migration20231228143900.d.ts +1 -1
  11. package/dist/migrations/Migration20231228143900.d.ts.map +1 -1
  12. package/dist/migrations/Migration20231228143900.js +1 -1
  13. package/dist/migrations/Migration20231228143900.js.map +1 -1
  14. package/dist/migrations/Migration20241206123341.d.ts +1 -1
  15. package/dist/migrations/Migration20241206123341.d.ts.map +1 -1
  16. package/dist/migrations/Migration20241206123341.js +1 -1
  17. package/dist/migrations/Migration20241206123341.js.map +1 -1
  18. package/dist/migrations/Migration20250120111059.d.ts +1 -1
  19. package/dist/migrations/Migration20250120111059.d.ts.map +1 -1
  20. package/dist/migrations/Migration20250120111059.js +1 -1
  21. package/dist/migrations/Migration20250120111059.js.map +1 -1
  22. package/dist/migrations/Migration20250128174354.d.ts +1 -1
  23. package/dist/migrations/Migration20250128174354.d.ts.map +1 -1
  24. package/dist/migrations/Migration20250128174354.js +1 -1
  25. package/dist/migrations/Migration20250128174354.js.map +1 -1
  26. package/dist/migrations/Migration20250505101505.d.ts +1 -1
  27. package/dist/migrations/Migration20250505101505.d.ts.map +1 -1
  28. package/dist/migrations/Migration20250505101505.js +1 -1
  29. package/dist/migrations/Migration20250505101505.js.map +1 -1
  30. package/dist/migrations/Migration20250819110923.d.ts +6 -0
  31. package/dist/migrations/Migration20250819110923.d.ts.map +1 -0
  32. package/dist/migrations/Migration20250819110923.js +14 -0
  33. package/dist/migrations/Migration20250819110923.js.map +1 -0
  34. package/dist/migrations/Migration20250819110924.d.ts +6 -0
  35. package/dist/migrations/Migration20250819110924.d.ts.map +1 -0
  36. package/dist/migrations/Migration20250819110924.js +16 -0
  37. package/dist/migrations/Migration20250819110924.js.map +1 -0
  38. package/dist/migrations/Migration20250908080326.d.ts +6 -0
  39. package/dist/migrations/Migration20250908080326.d.ts.map +1 -0
  40. package/dist/migrations/Migration20250908080326.js +20 -0
  41. package/dist/migrations/Migration20250908080326.js.map +1 -0
  42. package/dist/models/workflow-execution.d.ts.map +1 -1
  43. package/dist/models/workflow-execution.js +20 -0
  44. package/dist/models/workflow-execution.js.map +1 -1
  45. package/dist/services/workflow-orchestrator.d.ts +12 -3
  46. package/dist/services/workflow-orchestrator.d.ts.map +1 -1
  47. package/dist/services/workflow-orchestrator.js +199 -104
  48. package/dist/services/workflow-orchestrator.js.map +1 -1
  49. package/dist/services/workflows-module.d.ts +11 -5
  50. package/dist/services/workflows-module.d.ts.map +1 -1
  51. package/dist/services/workflows-module.js +13 -18
  52. package/dist/services/workflows-module.js.map +1 -1
  53. package/dist/tsconfig.tsbuildinfo +1 -1
  54. package/dist/types/index.d.ts +108 -4
  55. package/dist/types/index.d.ts.map +1 -1
  56. package/dist/utils/workflow-orchestrator-storage.d.ts +17 -2
  57. package/dist/utils/workflow-orchestrator-storage.d.ts.map +1 -1
  58. package/dist/utils/workflow-orchestrator-storage.js +301 -182
  59. package/dist/utils/workflow-orchestrator-storage.js.map +1 -1
  60. package/package.json +5 -4
@@ -10,12 +10,12 @@ var __classPrivateFieldGet = (this && this.__classPrivateFieldGet) || function (
10
10
  if (typeof state === "function" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError("Cannot read private member from an object whose class did not declare it");
11
11
  return kind === "m" ? f : kind === "a" ? f.call(receiver) : f ? f.value : state.get(receiver);
12
12
  };
13
- var _RedisDistributedTransactionStorage_instances, _RedisDistributedTransactionStorage_isWorkerMode, _RedisDistributedTransactionStorage_preventRaceConditionExecutionIfNecessary;
13
+ var _RedisDistributedTransactionStorage_instances, _RedisDistributedTransactionStorage_isWorkerMode, _RedisDistributedTransactionStorage_getLockKey, _RedisDistributedTransactionStorage_acquireLock, _RedisDistributedTransactionStorage_releaseLock, _RedisDistributedTransactionStorage_preventRaceConditionExecutionIfNecessary;
14
14
  Object.defineProperty(exports, "__esModule", { value: true });
15
15
  exports.RedisDistributedTransactionStorage = void 0;
16
+ const core_1 = require("@etohq/framework/mikro-orm/core");
16
17
  const orchestration_1 = require("@etohq/framework/orchestration");
17
18
  const utils_1 = require("@etohq/framework/utils");
18
- const core_1 = require("@mikro-orm/core");
19
19
  const bullmq_1 = require("bullmq");
20
20
  var JobType;
21
21
  (function (JobType) {
@@ -24,10 +24,27 @@ var JobType;
24
24
  JobType["STEP_TIMEOUT"] = "step_timeout";
25
25
  JobType["TRANSACTION_TIMEOUT"] = "transaction_timeout";
26
26
  })(JobType || (JobType = {}));
27
- const ONE_HOUR_IN_MS = 1000 * 60 * 60;
27
+ const THIRTY_MINUTES_IN_MS = 1000 * 60 * 30;
28
28
  const REPEATABLE_CLEARER_JOB_ID = "clear-expired-executions";
29
+ const doneStates = new Set([
30
+ utils_1.TransactionStepState.DONE,
31
+ utils_1.TransactionStepState.REVERTED,
32
+ utils_1.TransactionStepState.FAILED,
33
+ utils_1.TransactionStepState.SKIPPED,
34
+ utils_1.TransactionStepState.SKIPPED_FAILURE,
35
+ utils_1.TransactionStepState.TIMEOUT,
36
+ ]);
37
+ const finishedStates = new Set([
38
+ utils_1.TransactionState.DONE,
39
+ utils_1.TransactionState.FAILED,
40
+ utils_1.TransactionState.REVERTED,
41
+ ]);
42
+ const failedStates = new Set([
43
+ utils_1.TransactionState.FAILED,
44
+ utils_1.TransactionState.REVERTED,
45
+ ]);
29
46
  class RedisDistributedTransactionStorage {
30
- constructor({ workflowExecutionService, redisConnection, redisWorkerConnection, redisQueueName, redisJobQueueName, logger, isWorkerMode, }) {
47
+ constructor({ workflowExecutionService, redisConnection, redisWorkerConnection, redisQueueName, redisJobQueueName, redisMainQueueOptions, redisMainWorkerOptions, redisJobQueueOptions, redisJobWorkerOptions, redisCleanerQueueOptions, redisCleanerWorkerOptions, logger, isWorkerMode, }) {
31
48
  _RedisDistributedTransactionStorage_instances.add(this);
32
49
  _RedisDistributedTransactionStorage_isWorkerMode.set(this, false);
33
50
  this.workflowExecutionService_ = workflowExecutionService;
@@ -37,14 +54,27 @@ class RedisDistributedTransactionStorage {
37
54
  this.cleanerQueueName = "workflows-cleaner";
38
55
  this.queueName = redisQueueName;
39
56
  this.jobQueueName = redisJobQueueName;
40
- this.queue = new bullmq_1.Queue(redisQueueName, { connection: this.redisClient });
57
+ // Store per-queue options
58
+ this.mainQueueOptions_ = redisMainQueueOptions ?? {};
59
+ this.mainWorkerOptions_ = redisMainWorkerOptions ?? {};
60
+ this.jobQueueOptions_ = redisJobQueueOptions ?? {};
61
+ this.jobWorkerOptions_ = redisJobWorkerOptions ?? {};
62
+ this.cleanerQueueOptions_ = redisCleanerQueueOptions ?? {};
63
+ this.cleanerWorkerOptions_ = redisCleanerWorkerOptions ?? {};
64
+ // Create queues with their respective options
65
+ this.queue = new bullmq_1.Queue(redisQueueName, {
66
+ ...this.mainQueueOptions_,
67
+ connection: this.redisClient,
68
+ });
41
69
  this.jobQueue = isWorkerMode
42
70
  ? new bullmq_1.Queue(redisJobQueueName, {
71
+ ...this.jobQueueOptions_,
43
72
  connection: this.redisClient,
44
73
  })
45
74
  : undefined;
46
75
  this.cleanerQueue_ = isWorkerMode
47
76
  ? new bullmq_1.Queue(this.cleanerQueueName, {
77
+ ...this.cleanerQueueOptions_,
48
78
  connection: this.redisClient,
49
79
  })
50
80
  : undefined;
@@ -54,12 +84,6 @@ class RedisDistributedTransactionStorage {
54
84
  // Close worker gracefully, i.e. wait for the current jobs to finish
55
85
  await this.worker?.close();
56
86
  await this.jobWorker?.close();
57
- const repeatableJobs = (await this.cleanerQueue_?.getRepeatableJobs()) ?? [];
58
- for (const job of repeatableJobs) {
59
- if (job.id === REPEATABLE_CLEARER_JOB_ID) {
60
- await this.cleanerQueue_?.removeRepeatableByKey(job.key);
61
- }
62
- }
63
87
  await this.cleanerWorker_?.close();
64
88
  }
65
89
  async onApplicationShutdown() {
@@ -68,38 +92,56 @@ class RedisDistributedTransactionStorage {
68
92
  await this.cleanerQueue_?.close();
69
93
  }
70
94
  async onApplicationStart() {
95
+ await this.ensureRedisConnection();
71
96
  const allowedJobs = [
72
97
  JobType.RETRY,
73
98
  JobType.STEP_TIMEOUT,
74
99
  JobType.TRANSACTION_TIMEOUT,
75
100
  ];
76
- const workerOptions = {
101
+ // Per-worker options with their respective configurations
102
+ const mainWorkerOptions = {
103
+ ...this.mainWorkerOptions_,
104
+ connection: this.redisWorkerConnection,
105
+ };
106
+ const jobWorkerOptions = {
107
+ ...this.jobWorkerOptions_,
108
+ connection: this.redisWorkerConnection,
109
+ };
110
+ const cleanerWorkerOptions = {
111
+ ...this.cleanerWorkerOptions_,
77
112
  connection: this.redisWorkerConnection,
78
113
  };
79
114
  // TODO: Remove this once we have released to all clients (Added: v2.6+)
80
115
  // Remove all repeatable jobs from the old queue since now we have a queue dedicated to scheduled jobs
81
116
  await this.removeAllRepeatableJobs(this.queue);
82
- this.worker = new bullmq_1.Worker(this.queueName, async (job) => {
83
- this.logger_.debug(`executing job ${job.name} from queue ${this.queueName} with the following data: ${JSON.stringify(job.data)}`);
84
- if (allowedJobs.includes(job.name)) {
85
- await this.executeTransaction(job.data.workflowId, job.data.transactionId, job.data.transactionMetadata);
86
- }
87
- if (job.name === JobType.SCHEDULE) {
88
- // Remove repeatable job from the old queue since now we have a queue dedicated to scheduled jobs
89
- await this.remove(job.data.jobId);
90
- }
91
- }, workerOptions);
92
117
  if (__classPrivateFieldGet(this, _RedisDistributedTransactionStorage_isWorkerMode, "f")) {
118
+ this.worker = new bullmq_1.Worker(this.queueName, async (job) => {
119
+ this.logger_.debug(`executing job ${job.name} from queue ${this.queueName} with the following data: ${JSON.stringify(job.data)}`);
120
+ if (allowedJobs.includes(job.name)) {
121
+ try {
122
+ await this.executeTransaction(job.data.workflowId, job.data.transactionId, job.data.transactionMetadata);
123
+ }
124
+ catch (error) {
125
+ if (!orchestration_1.SkipExecutionError.isSkipExecutionError(error)) {
126
+ throw error;
127
+ }
128
+ }
129
+ }
130
+ if (job.name === JobType.SCHEDULE) {
131
+ // Remove repeatable job from the old queue since now we have a queue dedicated to scheduled jobs
132
+ await this.remove(job.data.jobId);
133
+ }
134
+ }, mainWorkerOptions);
93
135
  this.jobWorker = new bullmq_1.Worker(this.jobQueueName, async (job) => {
94
136
  this.logger_.debug(`executing scheduled job ${job.data.jobId} from queue ${this.jobQueueName} with the following options: ${JSON.stringify(job.data.schedulerOptions)}`);
95
137
  return await this.executeScheduledJob(job.data.jobId, job.data.schedulerOptions);
96
- }, workerOptions);
138
+ }, jobWorkerOptions);
97
139
  this.cleanerWorker_ = new bullmq_1.Worker(this.cleanerQueueName, async () => {
98
140
  await this.clearExpiredExecutions();
99
- }, { connection: this.redisClient });
141
+ }, cleanerWorkerOptions);
100
142
  await this.cleanerQueue_?.add("cleaner", {}, {
101
143
  repeat: {
102
- every: ONE_HOUR_IN_MS,
144
+ every: THIRTY_MINUTES_IN_MS,
103
145
  },
104
146
  jobId: REPEATABLE_CLEARER_JOB_ID,
105
147
  removeOnComplete: true,
@@ -110,7 +152,80 @@ class RedisDistributedTransactionStorage {
110
152
  setWorkflowOrchestratorService(workflowOrchestratorService) {
111
153
  this.workflowOrchestratorService_ = workflowOrchestratorService;
112
154
  }
155
+ async ensureRedisConnection() {
156
+ const reconnectTasks = [];
157
+ if (this.redisClient.status !== "ready") {
158
+ this.logger_.warn(`[Workflow-engine-redis] Redis connection is not ready (status: ${this.redisClient.status}). Attempting to reconnect...`);
159
+ reconnectTasks.push(this.redisClient
160
+ .connect()
161
+ .then(() => {
162
+ this.logger_.info("[Workflow-engine-redis] Redis connection reestablished successfully");
163
+ })
164
+ .catch((error) => {
165
+ this.logger_.error("[Workflow-engine-redis] Failed to reconnect to Redis", error);
166
+ throw new utils_1.EtoError(utils_1.EtoError.Types.DB_ERROR, `Redis connection failed: ${error.message}`);
167
+ }));
168
+ }
169
+ if (this.redisWorkerConnection.status !== "ready") {
170
+ this.logger_.warn(`[Workflow-engine-redis] Redis worker connection is not ready (status: ${this.redisWorkerConnection.status}). Attempting to reconnect...`);
171
+ reconnectTasks.push(this.redisWorkerConnection
172
+ .connect()
173
+ .then(() => {
174
+ this.logger_.info("[Workflow-engine-redis] Redis worker connection reestablished successfully");
175
+ })
176
+ .catch((error) => {
177
+ this.logger_.error("[Workflow-engine-redis] Failed to reconnect to Redis worker connection", error);
178
+ throw new utils_1.EtoError(utils_1.EtoError.Types.DB_ERROR, `Redis worker connection failed: ${error.message}`);
179
+ }));
180
+ }
181
+ if (reconnectTasks.length > 0) {
182
+ await (0, utils_1.promiseAll)(reconnectTasks);
183
+ }
184
+ }
113
185
  async saveToDb(data, retentionTime) {
186
+ const isNotStarted = data.flow.state === utils_1.TransactionState.NOT_STARTED;
187
+ const asyncVersion = data.flow._v;
188
+ const isFinished = finishedStates.has(data.flow.state);
189
+ const isWaitingToCompensate = data.flow.state === utils_1.TransactionState.WAITING_TO_COMPENSATE;
190
+ const isFlowInvoking = data.flow.state === utils_1.TransactionState.INVOKING;
191
+ const stepsArray = Object.values(data.flow.steps);
192
+ let currentStep;
193
+ const targetStates = isFlowInvoking
194
+ ? new Set([
195
+ utils_1.TransactionStepState.INVOKING,
196
+ utils_1.TransactionStepState.DONE,
197
+ utils_1.TransactionStepState.FAILED,
198
+ ])
199
+ : new Set([utils_1.TransactionStepState.COMPENSATING]);
200
+ for (let i = stepsArray.length - 1; i >= 0; i--) {
201
+ const step = stepsArray[i];
202
+ if (step.id === "_root") {
203
+ break;
204
+ }
205
+ const isTargetState = targetStates.has(step.invoke?.state);
206
+ if (isTargetState && !currentStep) {
207
+ currentStep = step;
208
+ break;
209
+ }
210
+ }
211
+ let shouldStoreCurrentSteps = false;
212
+ if (currentStep) {
213
+ for (const step of stepsArray) {
214
+ if (step.id === "_root") {
215
+ continue;
216
+ }
217
+ if (step.depth === currentStep.depth &&
218
+ step?.definition?.store === true) {
219
+ shouldStoreCurrentSteps = true;
220
+ break;
221
+ }
222
+ }
223
+ }
224
+ if (!(isNotStarted || isFinished || isWaitingToCompensate) &&
225
+ !shouldStoreCurrentSteps &&
226
+ !asyncVersion) {
227
+ return;
228
+ }
114
229
  await this.workflowExecutionService_.upsert([
115
230
  {
116
231
  workflow_id: data.flow.modelId,
@@ -129,8 +244,6 @@ class RedisDistributedTransactionStorage {
129
244
  async deleteFromDb(data) {
130
245
  await this.workflowExecutionService_.delete([
131
246
  {
132
- workflow_id: data.flow.modelId,
133
- transaction_id: data.flow.transactionId,
134
247
  run_id: data.flow.runId,
135
248
  },
136
249
  ]);
@@ -166,21 +279,25 @@ class RedisDistributedTransactionStorage {
166
279
  }
167
280
  async get(key, options) {
168
281
  const [_, workflowId, transactionId] = key.split(":");
169
- const trx = await this.workflowExecutionService_
170
- .list({
171
- workflow_id: workflowId,
172
- transaction_id: transactionId,
173
- }, {
174
- select: ["execution", "context"],
175
- order: {
176
- id: "desc",
177
- },
178
- take: 1,
179
- })
180
- .then((trx) => trx[0])
181
- .catch(() => undefined);
282
+ const [trx, rawData] = await (0, utils_1.promiseAll)([
283
+ this.workflowExecutionService_
284
+ .list({
285
+ workflow_id: workflowId,
286
+ transaction_id: transactionId,
287
+ }, {
288
+ select: ["execution", "context"],
289
+ order: {
290
+ id: "desc",
291
+ },
292
+ take: 1,
293
+ })
294
+ .then((trx) => trx[0])
295
+ .catch(() => undefined),
296
+ options?._cachedRawData !== undefined
297
+ ? Promise.resolve(options._cachedRawData)
298
+ : this.redisClient.get(key),
299
+ ]);
182
300
  if (trx) {
183
- const rawData = await this.redisClient.get(key);
184
301
  let flow, errors;
185
302
  if (rawData) {
186
303
  const data = JSON.parse(rawData);
@@ -190,10 +307,7 @@ class RedisDistributedTransactionStorage {
190
307
  const { idempotent } = options ?? {};
191
308
  const execution = trx.execution;
192
309
  if (!idempotent) {
193
- const isFailedOrReverted = [
194
- utils_1.TransactionState.REVERTED,
195
- utils_1.TransactionState.FAILED,
196
- ].includes(execution.state);
310
+ const isFailedOrReverted = failedStates.has(execution.state);
197
311
  const isDone = execution.state === utils_1.TransactionState.DONE;
198
312
  const isCancellingAndFailedOrReverted = options?.isCancelling && isFailedOrReverted;
199
313
  const isNotCancellingAndDoneOrFailedOrReverted = !options?.isCancelling && (isDone || isFailedOrReverted);
@@ -202,86 +316,101 @@ class RedisDistributedTransactionStorage {
202
316
  return;
203
317
  }
204
318
  }
205
- return {
206
- flow: flow ?? trx.execution,
207
- context: trx.context?.data,
208
- errors: errors ?? trx.context?.errors,
209
- };
319
+ return new orchestration_1.TransactionCheckpoint(flow ?? trx.execution, trx.context?.data, errors ?? trx.context?.errors);
210
320
  }
211
321
  return;
212
322
  }
213
323
  async save(key, data, ttl, options) {
214
324
  /**
215
325
  * Store the retention time only if the transaction is done, failed or reverted.
216
- * From that moment, this tuple can be later on archived or deleted after the retention time.
217
326
  */
218
- const hasFinished = [
219
- utils_1.TransactionState.DONE,
220
- utils_1.TransactionState.FAILED,
221
- utils_1.TransactionState.REVERTED,
222
- ].includes(data.flow.state);
223
- const { retentionTime, idempotent } = options ?? {};
224
- await __classPrivateFieldGet(this, _RedisDistributedTransactionStorage_instances, "m", _RedisDistributedTransactionStorage_preventRaceConditionExecutionIfNecessary).call(this, {
225
- data,
226
- key,
227
- options,
228
- });
229
- if (hasFinished && retentionTime) {
230
- Object.assign(data, {
231
- retention_time: retentionTime,
327
+ const { retentionTime } = options ?? {};
328
+ let lockAcquired = false;
329
+ let storedData;
330
+ if (data.flow._v) {
331
+ lockAcquired = await __classPrivateFieldGet(this, _RedisDistributedTransactionStorage_instances, "m", _RedisDistributedTransactionStorage_acquireLock).call(this, key);
332
+ if (!lockAcquired) {
333
+ throw new Error("Lock not acquired");
334
+ }
335
+ storedData = await this.get(key, {
336
+ isCancelling: !!data.flow.cancelledAt,
232
337
  });
338
+ orchestration_1.TransactionCheckpoint.mergeCheckpoints(data, storedData);
233
339
  }
234
- const isNotStarted = data.flow.state === utils_1.TransactionState.NOT_STARTED;
235
- const isManualTransactionId = !data.flow.transactionId.startsWith("auto-");
236
- // Only set if not exists
237
- const shouldSetNX = isNotStarted && isManualTransactionId;
238
- // Prepare operations to be executed in batch or pipeline
239
- const data_ = {
240
- errors: data.errors,
241
- flow: data.flow,
242
- };
243
- const stringifiedData = JSON.stringify(data_);
244
- const pipeline = this.redisClient.pipeline();
245
- // Execute Redis operations
246
- if (!hasFinished) {
247
- if (ttl) {
248
- if (shouldSetNX) {
249
- pipeline.set(key, stringifiedData, "EX", ttl, "NX");
340
+ try {
341
+ const hasFinished = finishedStates.has(data.flow.state);
342
+ await __classPrivateFieldGet(this, _RedisDistributedTransactionStorage_instances, "m", _RedisDistributedTransactionStorage_preventRaceConditionExecutionIfNecessary).call(this, {
343
+ data: data,
344
+ key,
345
+ options,
346
+ storedData,
347
+ });
348
+ // Only set if not exists
349
+ const shouldSetNX = data.flow.state === utils_1.TransactionState.NOT_STARTED &&
350
+ !data.flow.transactionId.startsWith("auto-");
351
+ if (retentionTime) {
352
+ Object.assign(data, {
353
+ retention_time: retentionTime,
354
+ });
355
+ }
356
+ const execPipeline = () => {
357
+ const stringifiedData = JSON.stringify({
358
+ errors: data.errors,
359
+ flow: data.flow,
360
+ });
361
+ const pipeline = this.redisClient.pipeline();
362
+ if (!hasFinished) {
363
+ if (ttl) {
364
+ if (shouldSetNX) {
365
+ pipeline.set(key, stringifiedData, "EX", ttl, "NX");
366
+ }
367
+ else {
368
+ pipeline.set(key, stringifiedData, "EX", ttl);
369
+ }
370
+ }
371
+ else {
372
+ if (shouldSetNX) {
373
+ pipeline.set(key, stringifiedData, "NX");
374
+ }
375
+ else {
376
+ pipeline.set(key, stringifiedData);
377
+ }
378
+ }
250
379
  }
251
380
  else {
252
- pipeline.set(key, stringifiedData, "EX", ttl);
381
+ pipeline.unlink(key);
253
382
  }
254
- }
255
- else {
256
- if (shouldSetNX) {
257
- pipeline.set(key, stringifiedData, "NX");
383
+ return pipeline.exec().then((result) => {
384
+ if (!shouldSetNX) {
385
+ return result;
386
+ }
387
+ const actionResult = result?.pop();
388
+ const isOk = !!actionResult?.pop();
389
+ if (!isOk) {
390
+ throw new orchestration_1.SkipExecutionError("Transaction already started for transactionId: " +
391
+ data.flow.transactionId);
392
+ }
393
+ return result;
394
+ });
395
+ };
396
+ // Parallelize DB and Redis operations for better performance
397
+ if (hasFinished && !retentionTime) {
398
+ if (!data.flow.metadata?.parentStepIdempotencyKey) {
399
+ await (0, utils_1.promiseAll)([this.deleteFromDb(data), execPipeline()]);
258
400
  }
259
401
  else {
260
- pipeline.set(key, stringifiedData);
402
+ await (0, utils_1.promiseAll)([this.saveToDb(data, retentionTime), execPipeline()]);
261
403
  }
262
404
  }
263
- }
264
- else {
265
- pipeline.unlink(key);
266
- }
267
- const pipelinePromise = pipeline.exec().then((result) => {
268
- if (!shouldSetNX) {
269
- return result;
270
- }
271
- const actionResult = result?.pop();
272
- const isOk = !!actionResult?.pop();
273
- if (!isOk) {
274
- throw new utils_1.EtoError(utils_1.EtoError.Types.INVALID_ARGUMENT, "Transaction already started for transactionId: " +
275
- data.flow.transactionId);
405
+ else {
406
+ await (0, utils_1.promiseAll)([this.saveToDb(data, retentionTime), execPipeline()]);
276
407
  }
277
- return result;
278
- });
279
- // Database operations
280
- if (hasFinished && !retentionTime && !idempotent) {
281
- await (0, utils_1.promiseAll)([pipelinePromise, this.deleteFromDb(data)]);
408
+ return data;
282
409
  }
283
- else {
284
- await (0, utils_1.promiseAll)([pipelinePromise, this.saveToDb(data, retentionTime)]);
410
+ finally {
411
+ if (lockAcquired) {
412
+ await __classPrivateFieldGet(this, _RedisDistributedTransactionStorage_instances, "m", _RedisDistributedTransactionStorage_releaseLock).call(this, key);
413
+ }
285
414
  }
286
415
  }
287
416
  async scheduleRetry(transaction, step, timestamp, interval) {
@@ -292,12 +421,16 @@ class RedisDistributedTransactionStorage {
292
421
  stepId: step.id,
293
422
  }, {
294
423
  delay: interval > 0 ? interval * 1000 : undefined,
295
- jobId: this.getJobId(JobType.RETRY, transaction, step),
424
+ jobId: this.getJobId(JobType.RETRY, transaction, step, interval),
296
425
  removeOnComplete: true,
297
426
  });
298
427
  }
299
428
  async clearRetry(transaction, step) {
300
- await this.removeJob(JobType.RETRY, transaction, step);
429
+ // Pass retry interval to ensure we remove the correct job (with -retry suffix if interval > 0)
430
+ const interval = step.definition.retryInterval ||
431
+ step.definition.retryIntervalAwaiting ||
432
+ 0;
433
+ await this.removeJob(JobType.RETRY, transaction, step, interval);
301
434
  }
302
435
  async scheduleTransactionTimeout(transaction, _, interval) {
303
436
  await this.queue.add(JobType.TRANSACTION_TIMEOUT, {
@@ -328,18 +461,26 @@ class RedisDistributedTransactionStorage {
328
461
  async clearStepTimeout(transaction, step) {
329
462
  await this.removeJob(JobType.STEP_TIMEOUT, transaction, step);
330
463
  }
331
- getJobId(type, transaction, step) {
464
+ getJobId(type, transaction, step, interval) {
332
465
  const key = [type, transaction.modelId, transaction.transactionId];
333
466
  if (step) {
334
- key.push(step.id, step.attempts + "");
467
+ key.push(step.id);
468
+ // Step timeout has a single job per step
469
+ if (type !== JobType.STEP_TIMEOUT) {
470
+ key.push(step.attempts + "");
471
+ }
472
+ // Add suffix for retry scheduling (interval > 0) to avoid collision with async execution (interval = 0)
473
+ if (type === JobType.RETRY && (0, utils_1.isDefined)(interval) && interval > 0) {
474
+ key.push("retry");
475
+ }
335
476
  if (step.isCompensating()) {
336
477
  key.push("compensate");
337
478
  }
338
479
  }
339
480
  return key.join(":");
340
481
  }
341
- async removeJob(type, transaction, step) {
342
- const jobId = this.getJobId(type, transaction, step);
482
+ async removeJob(type, transaction, step, interval) {
483
+ const jobId = this.getJobId(type, transaction, step, interval);
343
484
  if (type === JobType.SCHEDULE) {
344
485
  const job = await this.jobQueue?.getJob(jobId);
345
486
  if (job) {
@@ -401,7 +542,7 @@ class RedisDistributedTransactionStorage {
401
542
  $ne: null,
402
543
  },
403
544
  updated_at: {
404
- $lte: (0, core_1.raw)((alias) => `CURRENT_TIMESTAMP - (INTERVAL '1 second' * "retention_time")`),
545
+ $lte: (0, core_1.raw)((_alias) => `CURRENT_TIMESTAMP - (INTERVAL '1 second' * "retention_time")`),
405
546
  },
406
547
  state: {
407
548
  $in: [
@@ -414,29 +555,59 @@ class RedisDistributedTransactionStorage {
414
555
  }
415
556
  }
416
557
  exports.RedisDistributedTransactionStorage = RedisDistributedTransactionStorage;
417
- _RedisDistributedTransactionStorage_isWorkerMode = new WeakMap(), _RedisDistributedTransactionStorage_instances = new WeakSet(), _RedisDistributedTransactionStorage_preventRaceConditionExecutionIfNecessary = async function _RedisDistributedTransactionStorage_preventRaceConditionExecutionIfNecessary({ data, key, options, }) {
558
+ _RedisDistributedTransactionStorage_isWorkerMode = new WeakMap(), _RedisDistributedTransactionStorage_instances = new WeakSet(), _RedisDistributedTransactionStorage_getLockKey = function _RedisDistributedTransactionStorage_getLockKey(key) {
559
+ return `${key}:lock`;
560
+ }, _RedisDistributedTransactionStorage_acquireLock = async function _RedisDistributedTransactionStorage_acquireLock(key, ttlSeconds = 2) {
561
+ const lockKey = __classPrivateFieldGet(this, _RedisDistributedTransactionStorage_instances, "m", _RedisDistributedTransactionStorage_getLockKey).call(this, key);
562
+ const result = await this.redisClient.set(lockKey, 1, "EX", ttlSeconds, "NX");
563
+ return result === "OK";
564
+ }, _RedisDistributedTransactionStorage_releaseLock = async function _RedisDistributedTransactionStorage_releaseLock(key) {
565
+ const lockKey = __classPrivateFieldGet(this, _RedisDistributedTransactionStorage_instances, "m", _RedisDistributedTransactionStorage_getLockKey).call(this, key);
566
+ await this.redisClient.del(lockKey);
567
+ }, _RedisDistributedTransactionStorage_preventRaceConditionExecutionIfNecessary = async function _RedisDistributedTransactionStorage_preventRaceConditionExecutionIfNecessary({ data, key, options, storedData, }) {
418
568
  const isInitialCheckpoint = [utils_1.TransactionState.NOT_STARTED].includes(data.flow.state);
419
569
  /**
420
570
  * In case many execution can succeed simultaneously, we need to ensure that the latest
421
571
  * execution does continue if a previous execution is considered finished
422
572
  */
423
573
  const currentFlow = data.flow;
424
- const rawData = await this.redisClient.get(key);
425
- let data_ = {};
426
- if (rawData) {
427
- data_ = JSON.parse(rawData);
428
- }
429
- else {
430
- const getOptions = {
431
- ...options,
432
- isCancelling: !!data.flow.cancelledAt,
433
- };
434
- data_ =
435
- (await this.get(key, getOptions)) ??
436
- { flow: {} };
574
+ let data_ = storedData ?? {};
575
+ if (!storedData) {
576
+ const rawData = await this.redisClient.get(key);
577
+ if (rawData) {
578
+ data_ = JSON.parse(rawData);
579
+ }
580
+ else {
581
+ // Pass cached raw data to avoid redundant Redis fetch
582
+ const getOptions = {
583
+ ...options,
584
+ isCancelling: !!data.flow.cancelledAt,
585
+ _cachedRawData: rawData,
586
+ };
587
+ data_ =
588
+ (await this.get(key, getOptions)) ??
589
+ { flow: {} };
590
+ }
437
591
  }
438
592
  const { flow: latestUpdatedFlow } = data_;
439
- if (!isInitialCheckpoint && !(0, utils_1.isPresent)(latestUpdatedFlow)) {
593
+ if (options?.stepId) {
594
+ const stepId = options.stepId;
595
+ const currentStep = data.flow.steps[stepId];
596
+ const latestStep = latestUpdatedFlow.steps?.[stepId];
597
+ if (latestStep && currentStep) {
598
+ const isCompensating = data.flow.state === utils_1.TransactionState.COMPENSATING;
599
+ const latestState = isCompensating
600
+ ? latestStep.compensate?.state
601
+ : latestStep.invoke?.state;
602
+ const shouldSkip = doneStates.has(latestState);
603
+ if (shouldSkip) {
604
+ throw new orchestration_1.SkipStepAlreadyFinishedError(`Step ${stepId} already finished by another execution`);
605
+ }
606
+ }
607
+ }
608
+ if (!isInitialCheckpoint &&
609
+ !(0, utils_1.isPresent)(latestUpdatedFlow) &&
610
+ !data.flow.metadata?.parentStepIdempotencyKey) {
440
611
  /**
441
612
  * the initial checkpoint expect no other checkpoint to have been stored.
442
613
  * In case it is not the initial one and another checkpoint is trying to
@@ -445,64 +616,12 @@ _RedisDistributedTransactionStorage_isWorkerMode = new WeakMap(), _RedisDistribu
445
616
  */
446
617
  throw new orchestration_1.SkipExecutionError("Already finished by another execution");
447
618
  }
448
- // First ensure that the latest execution was not cancelled, otherwise we skip the execution
619
+ // Ensure that the latest execution was not cancelled, otherwise we skip the execution
449
620
  const latestTransactionCancelledAt = latestUpdatedFlow.cancelledAt;
450
621
  const currentTransactionCancelledAt = currentFlow.cancelledAt;
451
622
  if (!!latestTransactionCancelledAt &&
452
623
  currentTransactionCancelledAt == null) {
453
624
  throw new orchestration_1.SkipCancelledExecutionError("Workflow execution has been cancelled during the execution");
454
625
  }
455
- const currentFlowSteps = Object.values(currentFlow.steps || {});
456
- const latestUpdatedFlowSteps = latestUpdatedFlow.steps
457
- ? Object.values(latestUpdatedFlow.steps)
458
- : [];
459
- // Predefined states for quick lookup
460
- const invokingStates = [
461
- utils_1.TransactionStepState.INVOKING,
462
- utils_1.TransactionStepState.NOT_STARTED,
463
- ];
464
- const compensatingStates = [
465
- utils_1.TransactionStepState.COMPENSATING,
466
- utils_1.TransactionStepState.NOT_STARTED,
467
- ];
468
- const isInvokingState = (step) => invokingStates.includes(step.invoke?.state);
469
- const isCompensatingState = (step) => compensatingStates.includes(step.compensate?.state);
470
- const currentFlowLastInvokingStepIndex = currentFlowSteps.findIndex(isInvokingState);
471
- const latestUpdatedFlowLastInvokingStepIndex = !latestUpdatedFlow.steps
472
- ? 1 // There is no other execution, so the current execution is the latest
473
- : latestUpdatedFlowSteps.findIndex(isInvokingState);
474
- const reversedCurrentFlowSteps = [...currentFlowSteps].reverse();
475
- const currentFlowLastCompensatingStepIndex = reversedCurrentFlowSteps.findIndex(isCompensatingState);
476
- const reversedLatestUpdatedFlowSteps = [...latestUpdatedFlowSteps].reverse();
477
- const latestUpdatedFlowLastCompensatingStepIndex = !latestUpdatedFlow.steps
478
- ? -1
479
- : reversedLatestUpdatedFlowSteps.findIndex(isCompensatingState);
480
- const isLatestExecutionFinishedIndex = -1;
481
- const invokeShouldBeSkipped = (latestUpdatedFlowLastInvokingStepIndex ===
482
- isLatestExecutionFinishedIndex ||
483
- currentFlowLastInvokingStepIndex <
484
- latestUpdatedFlowLastInvokingStepIndex) &&
485
- currentFlowLastInvokingStepIndex !== isLatestExecutionFinishedIndex;
486
- const compensateShouldBeSkipped = currentFlowLastCompensatingStepIndex <
487
- latestUpdatedFlowLastCompensatingStepIndex &&
488
- currentFlowLastCompensatingStepIndex !== isLatestExecutionFinishedIndex &&
489
- latestUpdatedFlowLastCompensatingStepIndex !==
490
- isLatestExecutionFinishedIndex;
491
- const isCompensatingMismatch = latestUpdatedFlow.state === utils_1.TransactionState.COMPENSATING &&
492
- ![utils_1.TransactionState.REVERTED, utils_1.TransactionState.FAILED].includes(currentFlow.state) &&
493
- currentFlow.state !== latestUpdatedFlow.state;
494
- const isRevertedMismatch = latestUpdatedFlow.state === utils_1.TransactionState.REVERTED &&
495
- currentFlow.state !== utils_1.TransactionState.REVERTED;
496
- const isFailedMismatch = latestUpdatedFlow.state === utils_1.TransactionState.FAILED &&
497
- currentFlow.state !== utils_1.TransactionState.FAILED;
498
- if ((data.flow.state !== utils_1.TransactionState.COMPENSATING &&
499
- invokeShouldBeSkipped) ||
500
- (data.flow.state === utils_1.TransactionState.COMPENSATING &&
501
- compensateShouldBeSkipped) ||
502
- isCompensatingMismatch ||
503
- isRevertedMismatch ||
504
- isFailedMismatch) {
505
- throw new orchestration_1.SkipExecutionError("Already finished by another execution");
506
- }
507
626
  };
508
627
  //# sourceMappingURL=workflow-orchestrator-storage.js.map