@openfn/ws-worker 1.19.3 → 1.19.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,17 @@
1
1
  # ws-worker
2
2
 
3
+ ## 1.19.5
4
+
5
+ ### Patch Changes
6
+
7
+ - b42b0b0: Force disable message retries
8
+
9
+ ## 1.19.4
10
+
11
+ ### Patch Changes
12
+
13
+ - 3e0aac7: Retry events if they are timed out. Will retry WORKER_TIMEOUT_RETRY_COUNT times and wait for WORKER_TIMEOUT_RETRY_DELAY_MS or WORKER_MESSAGE_TIMEOUT_SECONDS between retries
14
+
3
15
  ## 1.19.3
4
16
 
5
17
  ### Patch Changes
package/dist/index.d.ts CHANGED
@@ -3,8 +3,8 @@ import Koa from 'koa';
3
3
  import { LogLevel, Logger } from '@openfn/logger';
4
4
  import * as l from '@openfn/lexicon/lightning';
5
5
  import { ClaimRun } from '@openfn/lexicon/lightning';
6
+ import { ExecutionPlan, Lazy, State, UUID } from '@openfn/lexicon';
6
7
  import { ExecuteOptions, RuntimeEngine } from '@openfn/engine-multi';
7
- import { ExecutionPlan, Lazy, State } from '@openfn/lexicon';
8
8
  import { Channel as Channel$1 } from 'phoenix';
9
9
  import { Server } from 'http';
10
10
 
@@ -50,10 +50,12 @@ type WorkerRunOptions = ExecuteOptions & {
50
50
  outputDataclips?: boolean;
51
51
  payloadLimitMb?: number;
52
52
  jobLogLevel?: LogLevel;
53
+ timeoutRetryCount?: number;
54
+ timeoutRetryDelay?: number;
53
55
  };
54
56
 
55
57
  type Context = {
56
- id: string;
58
+ id: UUID;
57
59
  channel: Channel;
58
60
  state: RunState;
59
61
  logger: Logger;
@@ -83,6 +85,8 @@ type ServerOptions = {
83
85
  collectionsVersion?: string;
84
86
  collectionsUrl?: string;
85
87
  monorepoDir?: string;
88
+ timeoutRetryCount?: number;
89
+ timeoutRetryDelayMs?: number;
86
90
  };
87
91
  interface ServerApp extends Koa {
88
92
  id: string;
package/dist/index.js CHANGED
@@ -525,7 +525,10 @@ var LightningTimeoutError = class extends Error {
525
525
  };
526
526
 
527
527
  // src/util/send-event.ts
528
- var sendEvent = (context, event, payload) => {
528
+ var allowRetryOntimeout = false;
529
+ var sendEvent = (context, event, payload, attempts) => {
530
+ const { timeoutRetryCount = 1, timeoutRetryDelay = 1 } = context.options ?? {};
531
+ const thisAttempt = attempts ?? 1;
529
532
  const { channel, logger, id: runId = "<unknown run>" } = context;
530
533
  return new Promise((resolve, reject) => {
531
534
  const report = (error) => {
@@ -549,7 +552,16 @@ var sendEvent = (context, event, payload) => {
549
552
  channel.push(event, payload).receive("error", (message) => {
550
553
  report(new LightningSocketError(event, message));
551
554
  }).receive("timeout", () => {
552
- report(new LightningTimeoutError(event));
555
+ if (!allowRetryOntimeout || thisAttempt >= timeoutRetryCount) {
556
+ report(new LightningTimeoutError(event));
557
+ } else {
558
+ logger.warn(
559
+ `${runId} event ${event} timed out, will retry in ${timeoutRetryDelay}ms (attempt ${thisAttempt + 1} of ${timeoutRetryCount})`
560
+ );
561
+ setTimeout(() => {
562
+ sendEvent(context, event, payload, thisAttempt + 1).then(resolve).catch(reject);
563
+ }, timeoutRetryDelay);
564
+ }
553
565
  }).receive("ok", resolve);
554
566
  });
555
567
  };
@@ -957,7 +969,6 @@ function execute(channel, engine, logger, plan, input, options = {}, onFinish =
957
969
  });
958
970
  engine.execute(plan, loadedInput, { resolvers, ...options });
959
971
  } catch (e) {
960
- process.exit(66);
961
972
  Sentry3.addBreadcrumb({
962
973
  category: "run",
963
974
  message: "exception in run",
@@ -996,7 +1007,7 @@ function onJobLog(context, event) {
996
1007
  message = JSON.parse(event.message);
997
1008
  }
998
1009
  const log = {
999
- run_id: state.plan.id,
1010
+ run_id: `${state.plan.id}`,
1000
1011
  message,
1001
1012
  source: event.name,
1002
1013
  level: event.level,
@@ -1038,7 +1049,7 @@ var joinRunChannel = (socket, token, runId, logger, timeout = 30) => {
1038
1049
  didReceiveOk = true;
1039
1050
  logger.success(`connected to ${channelName}`, e);
1040
1051
  const run = await send_event_default(
1041
- { channel, logger, id: runId },
1052
+ { channel, logger, id: runId, options: {} },
1042
1053
  GET_PLAN
1043
1054
  );
1044
1055
  resolve({ channel, run });
@@ -1336,6 +1347,8 @@ function createServer(engine, options = {}) {
1336
1347
  if (!("payloadLimitMb" in options2)) {
1337
1348
  options2.payloadLimitMb = app.options.payloadLimitMb;
1338
1349
  }
1350
+ options2.timeoutRetryCount = app.options.timeoutRetryCount;
1351
+ options2.timeoutRetryDelay = app.options.timeoutRetryDelayMs ?? app.options.socketTimeoutSeconds;
1339
1352
  const onFinish = () => {
1340
1353
  const duration = (Date.now() - start) / 1e3;
1341
1354
  logger.debug(
package/dist/start.js CHANGED
@@ -674,7 +674,10 @@ var LightningTimeoutError = class extends Error {
674
674
  };
675
675
 
676
676
  // src/util/send-event.ts
677
- var sendEvent = (context, event, payload) => {
677
+ var allowRetryOntimeout = false;
678
+ var sendEvent = (context, event, payload, attempts) => {
679
+ const { timeoutRetryCount = 1, timeoutRetryDelay = 1 } = context.options ?? {};
680
+ const thisAttempt = attempts ?? 1;
678
681
  const { channel, logger: logger2, id: runId = "<unknown run>" } = context;
679
682
  return new Promise((resolve5, reject) => {
680
683
  const report = (error) => {
@@ -698,7 +701,16 @@ var sendEvent = (context, event, payload) => {
698
701
  channel.push(event, payload).receive("error", (message) => {
699
702
  report(new LightningSocketError(event, message));
700
703
  }).receive("timeout", () => {
701
- report(new LightningTimeoutError(event));
704
+ if (!allowRetryOntimeout || thisAttempt >= timeoutRetryCount) {
705
+ report(new LightningTimeoutError(event));
706
+ } else {
707
+ logger2.warn(
708
+ `${runId} event ${event} timed out, will retry in ${timeoutRetryDelay}ms (attempt ${thisAttempt + 1} of ${timeoutRetryCount})`
709
+ );
710
+ setTimeout(() => {
711
+ sendEvent(context, event, payload, thisAttempt + 1).then(resolve5).catch(reject);
712
+ }, timeoutRetryDelay);
713
+ }
702
714
  }).receive("ok", resolve5);
703
715
  });
704
716
  };
@@ -1106,7 +1118,6 @@ function execute(channel, engine, logger2, plan, input, options = {}, onFinish =
1106
1118
  });
1107
1119
  engine.execute(plan, loadedInput, { resolvers, ...options });
1108
1120
  } catch (e) {
1109
- process.exit(66);
1110
1121
  Sentry3.addBreadcrumb({
1111
1122
  category: "run",
1112
1123
  message: "exception in run",
@@ -1145,7 +1156,7 @@ function onJobLog(context, event) {
1145
1156
  message = JSON.parse(event.message);
1146
1157
  }
1147
1158
  const log = {
1148
- run_id: state.plan.id,
1159
+ run_id: `${state.plan.id}`,
1149
1160
  message,
1150
1161
  source: event.name,
1151
1162
  level: event.level,
@@ -1187,7 +1198,7 @@ var joinRunChannel = (socket, token, runId, logger2, timeout = 30) => {
1187
1198
  didReceiveOk = true;
1188
1199
  logger2.success(`connected to ${channelName}`, e);
1189
1200
  const run2 = await send_event_default(
1190
- { channel, logger: logger2, id: runId },
1201
+ { channel, logger: logger2, id: runId, options: {} },
1191
1202
  GET_PLAN
1192
1203
  );
1193
1204
  resolve5({ channel, run: run2 });
@@ -1485,6 +1496,8 @@ function createServer(engine, options = {}) {
1485
1496
  if (!("payloadLimitMb" in options2)) {
1486
1497
  options2.payloadLimitMb = app.options.payloadLimitMb;
1487
1498
  }
1499
+ options2.timeoutRetryCount = app.options.timeoutRetryCount;
1500
+ options2.timeoutRetryDelay = app.options.timeoutRetryDelayMs ?? app.options.socketTimeoutSeconds;
1488
1501
  const onFinish = () => {
1489
1502
  const duration = (Date.now() - start) / 1e3;
1490
1503
  logger2.debug(
@@ -6449,14 +6462,16 @@ function parseArgs(argv) {
6449
6462
  WORKER_MAX_RUN_MEMORY_MB,
6450
6463
  WORKER_MESSAGE_TIMEOUT_SECONDS,
6451
6464
  WORKER_PORT,
6452
- WORKER_PROFILE,
6453
6465
  WORKER_PROFILE_POLL_INTERVAL_MS,
6466
+ WORKER_PROFILE,
6454
6467
  WORKER_REPO_DIR,
6455
6468
  WORKER_SECRET,
6456
6469
  WORKER_SENTRY_DSN,
6457
6470
  WORKER_SENTRY_ENV,
6458
6471
  WORKER_SOCKET_TIMEOUT_SECONDS,
6459
6472
  WORKER_STATE_PROPS_TO_REMOVE,
6473
+ WORKER_TIMEOUT_RETRY_COUNT,
6474
+ WORKER_TIMEOUT_RETRY_DELAY_MS,
6460
6475
  WORKER_VALIDATION_RETRIES,
6461
6476
  WORKER_VALIDATION_TIMEOUT_MS
6462
6477
  } = process.env;
@@ -6539,6 +6554,12 @@ function parseArgs(argv) {
6539
6554
  }).option("profile-poll-interval-ms", {
6540
6555
  description: "Interval for polling profile data, in milliseconds. Default 10. Env: WORKER_PROFILE_POLL_INTERVAL_MS",
6541
6556
  type: "number"
6557
+ }).option("timeout-retry-count", {
6558
+ description: "When a websocket event receives a timeout, this option sets how many times the worker should retry it. Default 10. Env: WORKER_TIMEOUT_RETRY_COUNT",
6559
+ type: "number"
6560
+ }).option("timeout-retry-delay", {
6561
+ description: "When a websocket event receives a timeout, this option sets how log to wait before retrying Default 30000. Env: WORKER_TIMEOUT_RETRY_DELAY_MS",
6562
+ type: "number"
6542
6563
  });
6543
6564
  const args2 = parser2.parse();
6544
6565
  return {
@@ -6607,6 +6628,16 @@ function parseArgs(argv) {
6607
6628
  args2.profilePollIntervalMs,
6608
6629
  WORKER_PROFILE_POLL_INTERVAL_MS,
6609
6630
  10
6631
+ ),
6632
+ timeoutRetryCount: setArg(
6633
+ args2.timeoutRetryCount,
6634
+ WORKER_TIMEOUT_RETRY_COUNT,
6635
+ 10
6636
+ ),
6637
+ timeoutRetryDelayMs: setArg(
6638
+ args2.timeoutRetryDelayMs,
6639
+ WORKER_TIMEOUT_RETRY_DELAY_MS,
6640
+ 30 * 1e3
6610
6641
  )
6611
6642
  };
6612
6643
  }
@@ -6647,9 +6678,11 @@ function engineReady(engine) {
6647
6678
  messageTimeoutSeconds: args.messageTimeoutSeconds,
6648
6679
  claimTimeoutSeconds: args.claimTimeoutSeconds,
6649
6680
  // deprecated!
6650
- socketTimeoutSeconds: args.socketTimeoutSeconds
6681
+ socketTimeoutSeconds: args.socketTimeoutSeconds,
6682
+ timeoutRetryCount: args.timeoutRetryCount,
6683
+ timeoutRetryDelayMs: args.timeoutRetryDelayMs
6651
6684
  };
6652
- if ("socketTimeoutSeconds" in args) {
6685
+ if (args.socketTimeoutSeconds) {
6653
6686
  logger.warn(
6654
6687
  "WARNING: deprecated socketTimeoutSeconds value passed.\n\nThis will be respected as the default socket timeout value, but will be removed from future versions of the worker."
6655
6688
  );
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@openfn/ws-worker",
3
- "version": "1.19.3",
3
+ "version": "1.19.5",
4
4
  "description": "A Websocket Worker to connect Lightning to a Runtime Engine",
5
5
  "main": "dist/index.js",
6
6
  "type": "module",
@@ -23,10 +23,10 @@
23
23
  "koa-logger": "^3.2.1",
24
24
  "phoenix": "1.7.10",
25
25
  "ws": "^8.18.3",
26
+ "@openfn/runtime": "1.7.5",
27
+ "@openfn/lexicon": "^1.2.6",
26
28
  "@openfn/engine-multi": "1.8.3",
27
- "@openfn/lexicon": "^1.2.5",
28
- "@openfn/logger": "1.0.6",
29
- "@openfn/runtime": "1.7.5"
29
+ "@openfn/logger": "1.0.6"
30
30
  },
31
31
  "devDependencies": {
32
32
  "@types/koa": "^2.15.0",