@openfn/ws-worker 1.20.1 → 1.21.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,28 @@
1
1
  # ws-worker
2
2
 
3
+ ## 1.21.0
4
+
5
+ ### Minor Changes
6
+
7
+ - 064933d: Measure the size of state objects at the end of each step, and throw if they exceed a limit
8
+
9
+ In the Worker, this limit is set to 25% of the available runtime memory.
10
+
11
+ ### Patch Changes
12
+
13
+ - f089f8d: Remove special edge condition mapping (this is now handled by the runtime)
14
+ - Updated dependencies [f089f8d]
15
+ - Updated dependencies [064933d]
16
+ - @openfn/runtime@1.8.0
17
+ - @openfn/engine-multi@1.10.0
18
+
19
+ ## 1.20.2
20
+
21
+ ### Patch Changes
22
+
23
+ - Add internal fallback timeout to events. Also adjust error handling to try and catch an uncaught error
24
+ - q
25
+
3
26
  ## 1.20.1
4
27
 
5
28
  ### Patch Changes
package/dist/index.d.ts CHANGED
@@ -56,6 +56,7 @@ type WorkerRunOptions = ExecuteOptions & {
56
56
  batchLogs?: boolean;
57
57
  batchInterval?: number;
58
58
  batchLimit?: number;
59
+ eventTimeoutSeconds?: number;
59
60
  };
60
61
 
61
62
  type Context = {
package/dist/index.js CHANGED
@@ -286,19 +286,6 @@ import {
286
286
  import crypto2 from "node:crypto";
287
287
  import path from "node:path";
288
288
  import { getNameAndVersion } from "@openfn/runtime";
289
- var conditions = {
290
- on_job_success: (upstreamId) => `Boolean(!state?.errors?.["${upstreamId}"] ?? true)`,
291
- on_job_failure: (upstreamId) => `Boolean(state?.errors && state.errors["${upstreamId}"])`,
292
- always: (_upstreamId) => null
293
- };
294
- var mapEdgeCondition = (edge) => {
295
- const { condition } = edge;
296
- if (condition && condition in conditions) {
297
- const upstream = edge.source_job_id || edge.source_trigger_id;
298
- return conditions[condition](upstream);
299
- }
300
- return condition;
301
- };
302
289
  var mapTriggerEdgeCondition = (edge) => {
303
290
  const { condition } = edge;
304
291
  if (condition === void 0 || condition === "always")
@@ -414,9 +401,8 @@ var convert_lightning_plan_default = (run, options = {}) => {
414
401
  }
415
402
  const next = edges.filter((e) => e.source_job_id === id).reduce((obj, edge) => {
416
403
  const newEdge = {};
417
- const condition = mapEdgeCondition(edge);
418
- if (condition) {
419
- newEdge.condition = condition;
404
+ if (edge.condition) {
405
+ newEdge.condition = edge.condition;
420
406
  }
421
407
  if (edge.enabled === false) {
422
408
  newEdge.disabled = true;
@@ -936,21 +922,37 @@ function eventProcessor(engine, context, callbacks, options = {}) {
936
922
  const { id: planId, logger } = context;
937
923
  const {
938
924
  batchLimit: limit = DEFAULT_BATCH_LIMIT,
939
- batchInterval: interval = DEFAULT_BATCH_INTERVAL
925
+ batchInterval: interval = DEFAULT_BATCH_INTERVAL,
926
+ timeout_ms
940
927
  } = options;
941
928
  const queue = [];
929
+ let activeBatch = null;
930
+ let batch = [];
931
+ let batchTimeout;
932
+ let didFinish = false;
933
+ let timeoutHandle;
942
934
  const next = async () => {
943
935
  const evt = queue[0];
944
936
  if (evt) {
937
+ didFinish = false;
938
+ const finish = () => {
939
+ clearTimeout(timeoutHandle);
940
+ if (!didFinish) {
941
+ didFinish = true;
942
+ queue.shift();
943
+ setImmediate(next);
944
+ }
945
+ };
946
+ if (timeout_ms) {
947
+ timeoutHandle = setTimeout(() => {
948
+ logger.error(`${planId} :: ${evt.name} :: timeout (fallback)`);
949
+ finish();
950
+ }, timeout_ms);
951
+ }
945
952
  await process2(evt.name, evt.event);
946
- queue.shift();
947
- next();
953
+ finish();
948
954
  }
949
955
  };
950
- let activeBatch = null;
951
- let batch = [];
952
- let start = -1;
953
- let batchTimeout;
954
956
  const sendBatch = async (name) => {
955
957
  clearTimeout(batchTimeout);
956
958
  activeBatch = null;
@@ -958,16 +960,24 @@ function eventProcessor(engine, context, callbacks, options = {}) {
958
960
  batch = [];
959
961
  };
960
962
  const send = async (name, payload, batchSize) => {
961
- const lightningEvent = eventMap[name] ?? name;
962
- await callbacks[name](context, payload);
963
- if (batchSize) {
964
- logger.info(
965
- `${planId} :: sent ${lightningEvent} (${batchSize}):: OK :: ${Date.now() - start}ms`
966
- );
967
- } else {
968
- logger.info(
969
- `${planId} :: sent ${lightningEvent} :: OK :: ${Date.now() - start}ms`
970
- );
963
+ try {
964
+ const start = Date.now();
965
+ const lightningEvent = eventMap[name] ?? name;
966
+ await callbacks[name](context, payload);
967
+ if (batchSize) {
968
+ logger.info(
969
+ `${planId} :: sent ${lightningEvent} (${batchSize}):: OK :: ${Date.now() - start}ms`
970
+ );
971
+ } else {
972
+ logger.info(
973
+ `${planId} :: sent ${lightningEvent} :: OK :: ${Date.now() - start}ms`
974
+ );
975
+ }
976
+ } catch (e2) {
977
+ if (!e2.reportedToSentry) {
978
+ Sentry3.captureException(e2);
979
+ logger.error(e2);
980
+ }
971
981
  }
972
982
  };
973
983
  const process2 = async (name, event) => {
@@ -988,32 +998,24 @@ function eventProcessor(engine, context, callbacks, options = {}) {
988
998
  await sendBatch(activeBatch);
989
999
  }
990
1000
  if (name in callbacks) {
991
- try {
992
- start = Date.now();
993
- if (options?.batch?.[name]) {
994
- activeBatch = name;
995
- batch.push(event);
996
- while (queue.length > 1 && queue[1].name === name) {
997
- const [nextBatchItem] = queue.splice(1, 1);
998
- batch.push(nextBatchItem.event);
999
- if (batch.length >= limit) {
1000
- return sendBatch(name);
1001
- }
1002
- }
1003
- if (!batchTimeout) {
1004
- const batchName = activeBatch;
1005
- batchTimeout = setTimeout(async () => {
1006
- sendBatch(batchName);
1007
- }, interval);
1001
+ if (options?.batch?.[name]) {
1002
+ activeBatch = name;
1003
+ batch.push(event);
1004
+ while (queue.length > 1 && queue[1].name === name) {
1005
+ const [nextBatchItem] = queue.splice(1, 1);
1006
+ batch.push(nextBatchItem.event);
1007
+ if (batch.length >= limit) {
1008
+ return sendBatch(name);
1008
1009
  }
1009
- return;
1010
1010
  }
1011
- await send(name, event);
1012
- } catch (e2) {
1013
- if (!e2.reportedToSentry) {
1014
- Sentry3.captureException(e2);
1015
- logger.error(e2);
1011
+ if (!batchTimeout) {
1012
+ const batchName = activeBatch;
1013
+ batchTimeout = setTimeout(async () => {
1014
+ sendBatch(batchName);
1015
+ }, interval);
1016
1016
  }
1017
+ } else {
1018
+ await send(name, event);
1017
1019
  }
1018
1020
  } else {
1019
1021
  logger.warn("no event bound for", name);
@@ -1056,6 +1058,10 @@ function execute(channel, engine, logger, plan, input, options = {}, onFinish =
1056
1058
  runId: plan.id
1057
1059
  }
1058
1060
  });
1061
+ const batch = {};
1062
+ if (options.batchLogs) {
1063
+ batch[WORKFLOW_LOG2] = true;
1064
+ }
1059
1065
  eventProcessor(
1060
1066
  engine,
1061
1067
  context,
@@ -1069,11 +1075,10 @@ function execute(channel, engine, logger, plan, input, options = {}, onFinish =
1069
1075
  [WORKFLOW_ERROR2]: onRunError
1070
1076
  },
1071
1077
  {
1072
- batch: options.batchLogs ? {
1073
- [WORKFLOW_LOG2]: true
1074
- } : {},
1078
+ batch,
1075
1079
  batchInterval: options.batchInterval,
1076
- batchLimit: options.batchLimit
1080
+ batchLimit: options.batchLimit,
1081
+ timeout_ms: (options.eventTimeoutSeconds ?? 0) * 1e3 * 1.1
1077
1082
  }
1078
1083
  );
1079
1084
  const resolvers = {
@@ -1468,6 +1473,7 @@ function createServer(engine, options = {}) {
1468
1473
  }
1469
1474
  options2.timeoutRetryCount = app.options.timeoutRetryCount;
1470
1475
  options2.timeoutRetryDelay = app.options.timeoutRetryDelayMs ?? app.options.socketTimeoutSeconds;
1476
+ options2.eventTimeoutSeconds = app.options.messageTimeoutSeconds;
1471
1477
  options2.batchLogs = app.options.batchLogs;
1472
1478
  options2.batchInterval = app.options.batchInterval;
1473
1479
  options2.batchLimit = app.options.batchLimit;
package/dist/start.js CHANGED
@@ -435,19 +435,6 @@ import {
435
435
  import crypto3 from "node:crypto";
436
436
  import path from "node:path";
437
437
  import { getNameAndVersion } from "@openfn/runtime";
438
- var conditions = {
439
- on_job_success: (upstreamId) => `Boolean(!state?.errors?.["${upstreamId}"] ?? true)`,
440
- on_job_failure: (upstreamId) => `Boolean(state?.errors && state.errors["${upstreamId}"])`,
441
- always: (_upstreamId) => null
442
- };
443
- var mapEdgeCondition = (edge) => {
444
- const { condition } = edge;
445
- if (condition && condition in conditions) {
446
- const upstream = edge.source_job_id || edge.source_trigger_id;
447
- return conditions[condition](upstream);
448
- }
449
- return condition;
450
- };
451
438
  var mapTriggerEdgeCondition = (edge) => {
452
439
  const { condition } = edge;
453
440
  if (condition === void 0 || condition === "always")
@@ -563,9 +550,8 @@ var convert_lightning_plan_default = (run2, options = {}) => {
563
550
  }
564
551
  const next = edges.filter((e) => e.source_job_id === id).reduce((obj, edge) => {
565
552
  const newEdge = {};
566
- const condition = mapEdgeCondition(edge);
567
- if (condition) {
568
- newEdge.condition = condition;
553
+ if (edge.condition) {
554
+ newEdge.condition = edge.condition;
569
555
  }
570
556
  if (edge.enabled === false) {
571
557
  newEdge.disabled = true;
@@ -1085,21 +1071,37 @@ function eventProcessor(engine, context, callbacks, options = {}) {
1085
1071
  const { id: planId, logger: logger2 } = context;
1086
1072
  const {
1087
1073
  batchLimit: limit = DEFAULT_BATCH_LIMIT,
1088
- batchInterval: interval = DEFAULT_BATCH_INTERVAL
1074
+ batchInterval: interval = DEFAULT_BATCH_INTERVAL,
1075
+ timeout_ms
1089
1076
  } = options;
1090
1077
  const queue = [];
1078
+ let activeBatch = null;
1079
+ let batch = [];
1080
+ let batchTimeout;
1081
+ let didFinish = false;
1082
+ let timeoutHandle;
1091
1083
  const next = async () => {
1092
1084
  const evt = queue[0];
1093
1085
  if (evt) {
1086
+ didFinish = false;
1087
+ const finish = () => {
1088
+ clearTimeout(timeoutHandle);
1089
+ if (!didFinish) {
1090
+ didFinish = true;
1091
+ queue.shift();
1092
+ setImmediate(next);
1093
+ }
1094
+ };
1095
+ if (timeout_ms) {
1096
+ timeoutHandle = setTimeout(() => {
1097
+ logger2.error(`${planId} :: ${evt.name} :: timeout (fallback)`);
1098
+ finish();
1099
+ }, timeout_ms);
1100
+ }
1094
1101
  await process2(evt.name, evt.event);
1095
- queue.shift();
1096
- next();
1102
+ finish();
1097
1103
  }
1098
1104
  };
1099
- let activeBatch = null;
1100
- let batch = [];
1101
- let start = -1;
1102
- let batchTimeout;
1103
1105
  const sendBatch = async (name) => {
1104
1106
  clearTimeout(batchTimeout);
1105
1107
  activeBatch = null;
@@ -1107,16 +1109,24 @@ function eventProcessor(engine, context, callbacks, options = {}) {
1107
1109
  batch = [];
1108
1110
  };
1109
1111
  const send = async (name, payload, batchSize) => {
1110
- const lightningEvent = eventMap[name] ?? name;
1111
- await callbacks[name](context, payload);
1112
- if (batchSize) {
1113
- logger2.info(
1114
- `${planId} :: sent ${lightningEvent} (${batchSize}):: OK :: ${Date.now() - start}ms`
1115
- );
1116
- } else {
1117
- logger2.info(
1118
- `${planId} :: sent ${lightningEvent} :: OK :: ${Date.now() - start}ms`
1119
- );
1112
+ try {
1113
+ const start = Date.now();
1114
+ const lightningEvent = eventMap[name] ?? name;
1115
+ await callbacks[name](context, payload);
1116
+ if (batchSize) {
1117
+ logger2.info(
1118
+ `${planId} :: sent ${lightningEvent} (${batchSize}):: OK :: ${Date.now() - start}ms`
1119
+ );
1120
+ } else {
1121
+ logger2.info(
1122
+ `${planId} :: sent ${lightningEvent} :: OK :: ${Date.now() - start}ms`
1123
+ );
1124
+ }
1125
+ } catch (e2) {
1126
+ if (!e2.reportedToSentry) {
1127
+ Sentry3.captureException(e2);
1128
+ logger2.error(e2);
1129
+ }
1120
1130
  }
1121
1131
  };
1122
1132
  const process2 = async (name, event) => {
@@ -1137,32 +1147,24 @@ function eventProcessor(engine, context, callbacks, options = {}) {
1137
1147
  await sendBatch(activeBatch);
1138
1148
  }
1139
1149
  if (name in callbacks) {
1140
- try {
1141
- start = Date.now();
1142
- if (options?.batch?.[name]) {
1143
- activeBatch = name;
1144
- batch.push(event);
1145
- while (queue.length > 1 && queue[1].name === name) {
1146
- const [nextBatchItem] = queue.splice(1, 1);
1147
- batch.push(nextBatchItem.event);
1148
- if (batch.length >= limit) {
1149
- return sendBatch(name);
1150
- }
1150
+ if (options?.batch?.[name]) {
1151
+ activeBatch = name;
1152
+ batch.push(event);
1153
+ while (queue.length > 1 && queue[1].name === name) {
1154
+ const [nextBatchItem] = queue.splice(1, 1);
1155
+ batch.push(nextBatchItem.event);
1156
+ if (batch.length >= limit) {
1157
+ return sendBatch(name);
1151
1158
  }
1152
- if (!batchTimeout) {
1153
- const batchName = activeBatch;
1154
- batchTimeout = setTimeout(async () => {
1155
- sendBatch(batchName);
1156
- }, interval);
1157
- }
1158
- return;
1159
1159
  }
1160
- await send(name, event);
1161
- } catch (e2) {
1162
- if (!e2.reportedToSentry) {
1163
- Sentry3.captureException(e2);
1164
- logger2.error(e2);
1160
+ if (!batchTimeout) {
1161
+ const batchName = activeBatch;
1162
+ batchTimeout = setTimeout(async () => {
1163
+ sendBatch(batchName);
1164
+ }, interval);
1165
1165
  }
1166
+ } else {
1167
+ await send(name, event);
1166
1168
  }
1167
1169
  } else {
1168
1170
  logger2.warn("no event bound for", name);
@@ -1205,6 +1207,10 @@ function execute(channel, engine, logger2, plan, input, options = {}, onFinish =
1205
1207
  runId: plan.id
1206
1208
  }
1207
1209
  });
1210
+ const batch = {};
1211
+ if (options.batchLogs) {
1212
+ batch[WORKFLOW_LOG2] = true;
1213
+ }
1208
1214
  eventProcessor(
1209
1215
  engine,
1210
1216
  context,
@@ -1218,11 +1224,10 @@ function execute(channel, engine, logger2, plan, input, options = {}, onFinish =
1218
1224
  [WORKFLOW_ERROR2]: onRunError
1219
1225
  },
1220
1226
  {
1221
- batch: options.batchLogs ? {
1222
- [WORKFLOW_LOG2]: true
1223
- } : {},
1227
+ batch,
1224
1228
  batchInterval: options.batchInterval,
1225
- batchLimit: options.batchLimit
1229
+ batchLimit: options.batchLimit,
1230
+ timeout_ms: (options.eventTimeoutSeconds ?? 0) * 1e3 * 1.1
1226
1231
  }
1227
1232
  );
1228
1233
  const resolvers = {
@@ -1617,6 +1622,7 @@ function createServer(engine, options = {}) {
1617
1622
  }
1618
1623
  options2.timeoutRetryCount = app.options.timeoutRetryCount;
1619
1624
  options2.timeoutRetryDelay = app.options.timeoutRetryDelayMs ?? app.options.socketTimeoutSeconds;
1625
+ options2.eventTimeoutSeconds = app.options.messageTimeoutSeconds;
1620
1626
  options2.batchLogs = app.options.batchLogs;
1621
1627
  options2.batchInterval = app.options.batchInterval;
1622
1628
  options2.batchLimit = app.options.batchLimit;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@openfn/ws-worker",
3
- "version": "1.20.1",
3
+ "version": "1.21.0",
4
4
  "description": "A Websocket Worker to connect Lightning to a Runtime Engine",
5
5
  "main": "dist/index.js",
6
6
  "type": "module",
@@ -23,10 +23,10 @@
23
23
  "koa-logger": "^3.2.1",
24
24
  "phoenix": "1.7.10",
25
25
  "ws": "^8.18.3",
26
- "@openfn/engine-multi": "1.9.1",
27
26
  "@openfn/lexicon": "^1.3.0",
28
27
  "@openfn/logger": "1.1.1",
29
- "@openfn/runtime": "1.7.7"
28
+ "@openfn/engine-multi": "1.10.0",
29
+ "@openfn/runtime": "1.8.0"
30
30
  },
31
31
  "devDependencies": {
32
32
  "@types/koa": "^2.15.0",
@@ -43,7 +43,7 @@
43
43
  "tsup": "^6.7.0",
44
44
  "typescript": "^4.9.5",
45
45
  "yargs": "^17.7.2",
46
- "@openfn/lightning-mock": "2.3.10"
46
+ "@openfn/lightning-mock": "2.4.1"
47
47
  },
48
48
  "files": [
49
49
  "dist",