@openfn/ws-worker 1.21.4 → 1.22.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/start.js CHANGED
@@ -168,6 +168,7 @@ var GET_CREDENTIAL = "fetch:credential";
168
168
  var RUN_START = "run:start";
169
169
  var RUN_COMPLETE = "run:complete";
170
170
  var RUN_LOG = "run:log";
171
+ var RUN_LOG_BATCH = "run:batch_logs";
171
172
  var STEP_START = "step:start";
172
173
  var STEP_COMPLETE = "step:complete";
173
174
  var INTERNAL_RUN_COMPLETE = "server:run-complete";
@@ -181,7 +182,9 @@ var destroy = async (app, logger2) => {
181
182
  await Promise.all([
182
183
  new Promise((resolve5) => {
183
184
  app.destroyed = true;
184
- app.workloop?.stop("server closed");
185
+ for (const w of app.workloops) {
186
+ w.stop("server closed");
187
+ }
185
188
  app.server.close(async () => {
186
189
  resolve5();
187
190
  });
@@ -199,11 +202,11 @@ var destroy = async (app, logger2) => {
199
202
  var waitForRunsAndClaims = (app, logger2) => new Promise((resolve5) => {
200
203
  const log = () => {
201
204
  logger2.debug(
202
- `Waiting for ${Object.keys(app.workflows).length} runs and ${Object.keys(app.openClaims).length} claims to complete...`
205
+ `Waiting for ${Object.keys(app.workflows).length} runs and ${app.pendingClaims()} claims to complete...`
203
206
  );
204
207
  };
205
208
  const checkAllClear = () => {
206
- if (Object.keys(app.workflows).length + Object.keys(app.openClaims).length === 0) {
209
+ if (Object.keys(app.workflows).length + app.pendingClaims() === 0) {
207
210
  logger2.debug("All runs completed!");
208
211
  app.events.off(INTERNAL_RUN_COMPLETE, checkAllClear);
209
212
  app.events.off(INTERNAL_CLAIM_COMPLETE, checkAllClear);
@@ -212,7 +215,7 @@ var waitForRunsAndClaims = (app, logger2) => new Promise((resolve5) => {
212
215
  log();
213
216
  }
214
217
  };
215
- if (Object.keys(app.workflows).length || Object.keys(app.openClaims).length) {
218
+ if (Object.keys(app.workflows).length || app.pendingClaims()) {
216
219
  log();
217
220
  app.events.on(INTERNAL_RUN_COMPLETE, checkAllClear);
218
221
  app.events.on(INTERNAL_CLAIM_COMPLETE, checkAllClear);
@@ -223,51 +226,6 @@ var waitForRunsAndClaims = (app, logger2) => new Promise((resolve5) => {
223
226
  });
224
227
  var destroy_default = destroy;
225
228
 
226
- // src/util/try-with-backoff.ts
227
- var BACKOFF_MULTIPLIER = 1.15;
228
- var tryWithBackoff = (fn, opts = {}) => {
229
- const { min = 1e3, max = 1e4, maxRuns, runs = 1 } = opts;
230
- let cancelled = false;
231
- if (!opts.isCancelled) {
232
- opts.isCancelled = () => cancelled;
233
- }
234
- const promise = new Promise(async (resolve5, reject) => {
235
- try {
236
- await fn();
237
- resolve5();
238
- } catch (e) {
239
- if (e?.abort) {
240
- cancelled = true;
241
- return reject();
242
- }
243
- if (opts.isCancelled()) {
244
- return resolve5();
245
- }
246
- if (!isNaN(maxRuns) && runs >= maxRuns) {
247
- return reject(new Error("max runs exceeded"));
248
- }
249
- setTimeout(() => {
250
- if (opts.isCancelled()) {
251
- return resolve5();
252
- }
253
- const nextOpts = {
254
- maxRuns,
255
- runs: runs + 1,
256
- min: Math.min(max, min * BACKOFF_MULTIPLIER),
257
- max,
258
- isCancelled: opts.isCancelled
259
- };
260
- tryWithBackoff(fn, nextOpts).then(resolve5).catch(reject);
261
- }, min);
262
- }
263
- });
264
- promise.cancel = () => {
265
- cancelled = true;
266
- };
267
- return promise;
268
- };
269
- var try_with_backoff_default = tryWithBackoff;
270
-
271
229
  // src/api/claim.ts
272
230
  import v8 from "node:v8";
273
231
  import * as Sentry from "@sentry/node";
@@ -296,24 +254,26 @@ var ClaimError = class extends Error {
296
254
  }
297
255
  };
298
256
  var claimIdGen = 0;
299
- var claim = (app, logger2 = mockLogger, options = {}) => {
257
+ var claim = (app, workloop, logger2 = mockLogger, options = {}) => {
300
258
  return new Promise((resolve5, reject) => {
301
- app.openClaims ??= {};
302
- const { maxWorkers = 5, demand = 1 } = options;
259
+ const { demand = 1 } = options;
303
260
  const podName = NAME ? `[${NAME}] ` : "";
304
- const activeWorkers = Object.keys(app.workflows).length;
305
- const pendingClaims = Object.values(app.openClaims).reduce(
261
+ const activeInWorkloop = workloop.activeRuns.size;
262
+ const capacity = workloop.capacity;
263
+ const pendingWorkloopClaims = Object.values(workloop.openClaims).reduce(
306
264
  (a, b) => a + b,
307
265
  0
308
266
  );
309
- if (activeWorkers >= maxWorkers) {
310
- app.workloop?.stop(`server at capacity (${activeWorkers}/${maxWorkers})`);
311
- return reject(new ClaimError("Server at capacity"));
312
- } else if (activeWorkers + pendingClaims >= maxWorkers) {
313
- app.workloop?.stop(
314
- `server at capacity (${activeWorkers}/${maxWorkers}, ${pendingClaims} pending)`
267
+ if (activeInWorkloop >= capacity) {
268
+ workloop.stop(
269
+ `workloop ${workloop.id} at capacity (${activeInWorkloop}/${capacity})`
270
+ );
271
+ return reject(new ClaimError("Workloop at capacity"));
272
+ } else if (activeInWorkloop + pendingWorkloopClaims >= capacity) {
273
+ workloop.stop(
274
+ `workloop ${workloop.id} at capacity (${activeInWorkloop}/${capacity}, ${pendingWorkloopClaims} pending)`
315
275
  );
316
- return reject(new ClaimError("Server at capacity"));
276
+ return reject(new ClaimError("Workloop at capacity"));
317
277
  }
318
278
  if (!app.queueChannel) {
319
279
  logger2.warn("skipping claim attempt: websocket unavailable");
@@ -326,21 +286,22 @@ var claim = (app, logger2 = mockLogger, options = {}) => {
326
286
  return reject(e);
327
287
  }
328
288
  const claimId = ++claimIdGen;
329
- app.openClaims[claimId] = demand;
289
+ workloop.openClaims[claimId] = demand;
330
290
  const { used_heap_size, heap_size_limit } = v8.getHeapStatistics();
331
291
  const usedHeapMb = Math.round(used_heap_size / 1024 / 1024);
332
292
  const totalHeapMb = Math.round(heap_size_limit / 1024 / 1024);
333
293
  const memPercent = Math.round(usedHeapMb / totalHeapMb * 100);
334
294
  logger2.debug(
335
- `Claiming runs :: demand ${demand} | capacity ${activeWorkers}/${maxWorkers} | memory ${memPercent}% (${usedHeapMb}/${totalHeapMb}mb)`
295
+ `Claiming runs [${workloop.id}] :: demand ${demand} | capacity ${activeInWorkloop}/${capacity} | memory ${memPercent}% (${usedHeapMb}/${totalHeapMb}mb)`
336
296
  );
337
297
  app.events.emit(INTERNAL_CLAIM_START);
338
298
  const start = Date.now();
339
299
  app.queueChannel.push(CLAIM, {
340
300
  demand,
341
- worker_name: NAME || null
301
+ worker_name: NAME || null,
302
+ queues: workloop.queues
342
303
  }).receive("ok", async ({ runs }) => {
343
- delete app.openClaims[claimId];
304
+ delete workloop.openClaims[claimId];
344
305
  const duration = Date.now() - start;
345
306
  logger2.debug(
346
307
  `${podName}claimed ${runs.length} runs in ${duration}ms (${runs.length ? runs.map((r) => r.id).join(",") : "-"})`
@@ -364,17 +325,19 @@ var claim = (app, logger2 = mockLogger, options = {}) => {
364
325
  } else {
365
326
  logger2.debug("skipping run token validation for", run2.id);
366
327
  }
328
+ workloop.activeRuns.add(run2.id);
329
+ app.runWorkloopMap[run2.id] = workloop;
367
330
  logger2.debug(`${podName} starting run ${run2.id}`);
368
331
  app.execute(run2);
369
332
  }
370
333
  resolve5();
371
334
  app.events.emit(INTERNAL_CLAIM_COMPLETE, { runs });
372
335
  }).receive("error", (e) => {
373
- delete app.openClaims[claimId];
336
+ delete workloop.openClaims[claimId];
374
337
  logger2.error("Error on claim", e);
375
338
  reject(new Error("claim error"));
376
339
  }).receive("timeout", () => {
377
- delete app.openClaims[claimId];
340
+ delete workloop.openClaims[claimId];
378
341
  logger2.error("TIMEOUT on claim. Runs may be lost.");
379
342
  reject(new Error("timeout"));
380
343
  });
@@ -382,43 +345,6 @@ var claim = (app, logger2 = mockLogger, options = {}) => {
382
345
  };
383
346
  var claim_default = claim;
384
347
 
385
- // src/api/workloop.ts
386
- var startWorkloop = (app, logger2, minBackoff2, maxBackoff2, maxWorkers) => {
387
- let promise;
388
- let cancelled = false;
389
- const workLoop = () => {
390
- if (!cancelled) {
391
- promise = try_with_backoff_default(
392
- () => claim_default(app, logger2, {
393
- maxWorkers
394
- }),
395
- {
396
- min: minBackoff2,
397
- max: maxBackoff2
398
- }
399
- );
400
- promise.then(() => {
401
- if (!cancelled) {
402
- setTimeout(workLoop, minBackoff2);
403
- }
404
- }).catch(() => {
405
- });
406
- }
407
- };
408
- workLoop();
409
- return {
410
- stop: (reason = "reason unknown") => {
411
- if (!cancelled) {
412
- logger2.info(`cancelling workloop: ${reason}`);
413
- cancelled = true;
414
- promise.cancel();
415
- }
416
- },
417
- isStopped: () => cancelled
418
- };
419
- };
420
- var workloop_default = startWorkloop;
421
-
422
348
  // src/api/execute.ts
423
349
  import * as Sentry4 from "@sentry/node";
424
350
  import {
@@ -723,6 +649,51 @@ var stringify_default = (obj) => stringify(obj, (_key, value) => {
723
649
  return value;
724
650
  });
725
651
 
652
+ // src/util/try-with-backoff.ts
653
+ var BACKOFF_MULTIPLIER = 1.15;
654
+ var tryWithBackoff = (fn, opts = {}) => {
655
+ const { min = 1e3, max = 1e4, maxRuns, runs = 1 } = opts;
656
+ let cancelled = false;
657
+ if (!opts.isCancelled) {
658
+ opts.isCancelled = () => cancelled;
659
+ }
660
+ const promise = new Promise(async (resolve5, reject) => {
661
+ try {
662
+ await fn();
663
+ resolve5();
664
+ } catch (e) {
665
+ if (e?.abort) {
666
+ cancelled = true;
667
+ return reject();
668
+ }
669
+ if (opts.isCancelled()) {
670
+ return resolve5();
671
+ }
672
+ if (!isNaN(maxRuns) && runs >= maxRuns) {
673
+ return reject(new Error("max runs exceeded"));
674
+ }
675
+ setTimeout(() => {
676
+ if (opts.isCancelled()) {
677
+ return resolve5();
678
+ }
679
+ const nextOpts = {
680
+ maxRuns,
681
+ runs: runs + 1,
682
+ min: Math.min(max, min * BACKOFF_MULTIPLIER),
683
+ max,
684
+ isCancelled: opts.isCancelled
685
+ };
686
+ tryWithBackoff(fn, nextOpts).then(resolve5).catch(reject);
687
+ }, min);
688
+ }
689
+ });
690
+ promise.cancel = () => {
691
+ cancelled = true;
692
+ };
693
+ return promise;
694
+ };
695
+ var try_with_backoff_default = tryWithBackoff;
696
+
726
697
  // src/util/timestamp.ts
727
698
  var timeInMicroseconds = (time) => time && (BigInt(time) / BigInt(1e3)).toString();
728
699
 
@@ -761,7 +732,7 @@ async function onRunLog(context, events) {
761
732
  run_id: `${state.plan.id}`,
762
733
  logs
763
734
  };
764
- return sendEvent(context, RUN_LOG, payload);
735
+ return sendEvent(context, RUN_LOG_BATCH, payload);
765
736
  } else {
766
737
  return new Promise(async (resolve5) => {
767
738
  for (const log of logs) {
@@ -1383,6 +1354,7 @@ var connectToWorkerQueue = (endpoint, serverId, secret, logger2, options) => {
1383
1354
  messageTimeout = DEFAULT_MESSAGE_TIMEOUT_SECONDS,
1384
1355
  claimTimeout = DEFAULT_CLAIM_TIMEOUT_SECONDS,
1385
1356
  capacity,
1357
+ queues,
1386
1358
  SocketConstructor = PhxSocket
1387
1359
  } = options;
1388
1360
  const events = new EventEmitter2();
@@ -1419,6 +1391,9 @@ var connectToWorkerQueue = (endpoint, serverId, secret, logger2, options) => {
1419
1391
  didOpen = true;
1420
1392
  shouldReportConnectionError = true;
1421
1393
  const joinPayload = { capacity };
1394
+ if (queues) {
1395
+ joinPayload.queues = queues;
1396
+ }
1422
1397
  const channel = socket.channel("worker:queue", joinPayload);
1423
1398
  channel.onMessage = (ev, load) => {
1424
1399
  events.emit("message", ev, load);
@@ -1459,6 +1434,140 @@ var connectToWorkerQueue = (endpoint, serverId, secret, logger2, options) => {
1459
1434
  };
1460
1435
  var worker_queue_default = connectToWorkerQueue;
1461
1436
 
1437
+ // src/api/workloop.ts
1438
+ var Workloop = class {
1439
+ constructor({
1440
+ id,
1441
+ queues,
1442
+ capacity
1443
+ }) {
1444
+ this.activeRuns = /* @__PURE__ */ new Set();
1445
+ this.openClaims = {};
1446
+ this.cancelled = true;
1447
+ this.id = id;
1448
+ this.queues = queues;
1449
+ this.capacity = capacity;
1450
+ }
1451
+ hasCapacity() {
1452
+ const pendingClaims = Object.values(this.openClaims).reduce(
1453
+ (a, b) => a + b,
1454
+ 0
1455
+ );
1456
+ return this.activeRuns.size + pendingClaims < this.capacity;
1457
+ }
1458
+ start(app, logger2, minBackoff2, maxBackoff2) {
1459
+ this.logger = logger2;
1460
+ this.cancelled = false;
1461
+ const loop = () => {
1462
+ if (!this.cancelled) {
1463
+ this.promise = try_with_backoff_default(() => claim_default(app, this, logger2), {
1464
+ min: minBackoff2,
1465
+ max: maxBackoff2
1466
+ });
1467
+ this.promise.then(() => {
1468
+ if (!this.cancelled) {
1469
+ setTimeout(loop, minBackoff2);
1470
+ }
1471
+ }).catch(() => {
1472
+ });
1473
+ }
1474
+ };
1475
+ loop();
1476
+ }
1477
+ stop(reason = "reason unknown") {
1478
+ if (!this.cancelled) {
1479
+ this.logger?.info(`cancelling workloop: ${reason}`);
1480
+ this.cancelled = true;
1481
+ this.promise?.cancel();
1482
+ }
1483
+ }
1484
+ isStopped() {
1485
+ return this.cancelled;
1486
+ }
1487
+ };
1488
+
1489
+ // src/util/parse-workloops.ts
1490
+ var WorkloopValidationError = class extends Error {
1491
+ constructor(message) {
1492
+ super(message);
1493
+ this.name = "WorkloopValidationError";
1494
+ }
1495
+ };
1496
+ var VALID_NAME = /^[a-zA-Z0-9_]+$/;
1497
+ function parseWorkloops(input) {
1498
+ const trimmed = input.trim();
1499
+ if (!trimmed) {
1500
+ throw new WorkloopValidationError("Workloop configuration cannot be empty");
1501
+ }
1502
+ const tokens = trimmed.split(/\s+/);
1503
+ const configs = tokens.map(parseToken);
1504
+ const seenConfigs = /* @__PURE__ */ new Map();
1505
+ for (let i = 0; i < configs.length; i++) {
1506
+ const key = JSON.stringify(configs[i].queues);
1507
+ if (seenConfigs.has(key)) {
1508
+ const prevIndex = seenConfigs.get(key);
1509
+ console.warn(
1510
+ `Warning: workloops at positions ${prevIndex} and ${i} have identical queue configurations: ${tokens[prevIndex]} and ${tokens[i]}`
1511
+ );
1512
+ } else {
1513
+ seenConfigs.set(key, i);
1514
+ }
1515
+ }
1516
+ return configs;
1517
+ }
1518
+ function parseToken(token) {
1519
+ const lastColon = token.lastIndexOf(":");
1520
+ if (lastColon === -1) {
1521
+ throw new WorkloopValidationError(
1522
+ `Invalid token "${token}": missing :<count> suffix`
1523
+ );
1524
+ }
1525
+ const prefStr = token.slice(0, lastColon);
1526
+ const countStr = token.slice(lastColon + 1);
1527
+ const count = Number(countStr);
1528
+ if (!Number.isInteger(count) || countStr !== String(Math.floor(count))) {
1529
+ throw new WorkloopValidationError(
1530
+ `Invalid count "${countStr}" in token "${token}": must be a positive integer`
1531
+ );
1532
+ }
1533
+ if (count < 1) {
1534
+ throw new WorkloopValidationError(
1535
+ `Invalid count "${countStr}" in token "${token}": must be >= 1`
1536
+ );
1537
+ }
1538
+ const names = prefStr.split(">");
1539
+ for (const name of names) {
1540
+ if (name === "") {
1541
+ throw new WorkloopValidationError(`Empty queue name in token "${token}"`);
1542
+ }
1543
+ if (name !== "*" && !VALID_NAME.test(name)) {
1544
+ throw new WorkloopValidationError(
1545
+ `Invalid queue name "${name}" in token "${token}": must match /^[a-zA-Z0-9_]+$/ or be "*"`
1546
+ );
1547
+ }
1548
+ }
1549
+ const nonWildcardNames = names.filter((n) => n !== "*");
1550
+ const seen = /* @__PURE__ */ new Set();
1551
+ for (const name of nonWildcardNames) {
1552
+ if (seen.has(name)) {
1553
+ console.warn(
1554
+ `Warning: duplicate queue name "${name}" in token "${token}"`
1555
+ );
1556
+ }
1557
+ seen.add(name);
1558
+ }
1559
+ const wildcardIndex = names.indexOf("*");
1560
+ if (wildcardIndex !== -1 && wildcardIndex !== names.length - 1) {
1561
+ throw new WorkloopValidationError(
1562
+ `Wildcard "*" must be the last element in token "${token}"`
1563
+ );
1564
+ }
1565
+ return new Workloop({ id: token, queues: names, capacity: count });
1566
+ }
1567
+
1568
+ // src/util/get-default-workloop-config.ts
1569
+ var get_default_workloop_config_default = (capacity = 5) => `manual>*:${capacity}`;
1570
+
1462
1571
  // src/server.ts
1463
1572
  var exec = promisify(_exec);
1464
1573
  var DEFAULT_PORT = 2222;
@@ -1486,8 +1595,10 @@ function connect(app, logger2, options = {}) {
1486
1595
  app.resumeWorkloop();
1487
1596
  };
1488
1597
  const onDisconnect = () => {
1489
- if (!app.workloop?.isStopped()) {
1490
- app.workloop?.stop("Socket disconnected unexpectedly");
1598
+ for (const w of app.workloops) {
1599
+ if (!w.isStopped()) {
1600
+ w.stop("Socket disconnected unexpectedly");
1601
+ }
1491
1602
  }
1492
1603
  if (!app.destroyed) {
1493
1604
  logger2.info("Connection to lightning lost");
@@ -1509,17 +1620,26 @@ function connect(app, logger2, options = {}) {
1509
1620
  const onMessage = (event) => {
1510
1621
  if (event === WORK_AVAILABLE) {
1511
1622
  if (!app.destroyed) {
1512
- claim_default(app, logger2, { maxWorkers: options.maxWorkflows }).catch(() => {
1513
- });
1623
+ for (const w of app.workloops) {
1624
+ if (w.hasCapacity()) {
1625
+ claim_default(app, w, logger2).catch(() => {
1626
+ });
1627
+ }
1628
+ }
1514
1629
  }
1515
1630
  }
1516
1631
  };
1632
+ const queuesMap = {};
1633
+ for (const w of app.workloops) {
1634
+ queuesMap[w.queues.join(">")] = w.capacity;
1635
+ }
1517
1636
  worker_queue_default(options.lightning, app.id, options.secret, logger2, {
1518
1637
  // TODO: options.socketTimeoutSeconds wins because this is what USED to be used
1519
1638
  // But it's deprecated and should be removed soon
1520
1639
  messageTimeout: options.socketTimeoutSeconds ?? options.messageTimeoutSeconds,
1521
1640
  claimTimeout: options.claimTimeoutSeconds,
1522
- capacity: options.maxWorkflows
1641
+ capacity: options.maxWorkflows,
1642
+ queues: queuesMap
1523
1643
  }).on("connect", onConnect).on("disconnect", onDisconnect).on("error", onError).on("message", onMessage);
1524
1644
  }
1525
1645
  async function setupCollections(options, logger2) {
@@ -1568,27 +1688,36 @@ function createServer(engine, options = {}) {
1568
1688
  logger2.debug(str);
1569
1689
  })
1570
1690
  );
1571
- app.openClaims = {};
1572
1691
  app.workflows = {};
1573
1692
  app.destroyed = false;
1693
+ app.workloops = parseWorkloops(
1694
+ options.workloopConfigs ?? get_default_workloop_config_default(options.maxWorkflows)
1695
+ );
1696
+ app.runWorkloopMap = {};
1574
1697
  app.server = app.listen(port);
1575
1698
  logger2.success(`Worker ${app.id} listening on ${port}`);
1576
1699
  process.send?.("READY");
1577
1700
  router.get("/livez", healthcheck_default);
1578
1701
  router.get("/", healthcheck_default);
1579
1702
  app.options = options;
1580
- app.resumeWorkloop = () => {
1703
+ app.resumeWorkloop = (workloop) => {
1581
1704
  if (options.noLoop || app.destroyed) {
1582
1705
  return;
1583
1706
  }
1584
- if (!app.workloop || app.workloop?.isStopped()) {
1585
- logger2.info("Starting workloop");
1586
- app.workloop = workloop_default(
1707
+ const targets = workloop ? [workloop] : app.workloops;
1708
+ for (const w of targets) {
1709
+ if (!w.hasCapacity()) {
1710
+ continue;
1711
+ }
1712
+ if (!w.isStopped()) {
1713
+ w.stop("restarting");
1714
+ }
1715
+ logger2.info(`Starting workloop for ${w.id}`);
1716
+ w.start(
1587
1717
  app,
1588
1718
  logger2,
1589
1719
  options.backoff?.min || MIN_BACKOFF,
1590
- options.backoff?.max || MAX_BACKOFF,
1591
- options.maxWorkflows
1720
+ options.backoff?.max || MAX_BACKOFF
1592
1721
  );
1593
1722
  }
1594
1723
  };
@@ -1633,8 +1762,16 @@ function createServer(engine, options = {}) {
1633
1762
  );
1634
1763
  delete app.workflows[id];
1635
1764
  runChannel.leave();
1636
- app.events.emit(INTERNAL_RUN_COMPLETE);
1637
- app.resumeWorkloop();
1765
+ const owningWorkloop = app.runWorkloopMap[id];
1766
+ if (owningWorkloop) {
1767
+ owningWorkloop.activeRuns.delete(id);
1768
+ delete app.runWorkloopMap[id];
1769
+ app.events.emit(INTERNAL_RUN_COMPLETE);
1770
+ app.resumeWorkloop(owningWorkloop);
1771
+ } else {
1772
+ app.events.emit(INTERNAL_RUN_COMPLETE);
1773
+ app.resumeWorkloop();
1774
+ }
1638
1775
  };
1639
1776
  const context = execute(
1640
1777
  runChannel,
@@ -1648,7 +1785,14 @@ function createServer(engine, options = {}) {
1648
1785
  app.workflows[id] = context;
1649
1786
  } catch (e) {
1650
1787
  delete app.workflows[id];
1651
- app.resumeWorkloop();
1788
+ const owningWorkloop = app.runWorkloopMap[id];
1789
+ if (owningWorkloop) {
1790
+ owningWorkloop.activeRuns.delete(id);
1791
+ delete app.runWorkloopMap[id];
1792
+ app.resumeWorkloop(owningWorkloop);
1793
+ } else {
1794
+ app.resumeWorkloop();
1795
+ }
1652
1796
  logger2.error(`Unexpected error executing ${id}`);
1653
1797
  logger2.error(e);
1654
1798
  }
@@ -1658,9 +1802,13 @@ function createServer(engine, options = {}) {
1658
1802
  };
1659
1803
  router.post("/claim", async (ctx) => {
1660
1804
  logger2.info("triggering claim from POST request");
1661
- return claim_default(app, logger2, {
1662
- maxWorkers: options.maxWorkflows
1663
- }).then(() => {
1805
+ const promises = app.workloops.map((w) => {
1806
+ if (w.hasCapacity()) {
1807
+ return claim_default(app, w, logger2);
1808
+ }
1809
+ return Promise.reject(new Error("Workloop at capacity"));
1810
+ });
1811
+ return Promise.any(promises).then(() => {
1664
1812
  logger2.info("claim complete: 1 run claimed");
1665
1813
  ctx.body = "complete";
1666
1814
  ctx.status = 200;
@@ -1671,10 +1819,15 @@ function createServer(engine, options = {}) {
1671
1819
  });
1672
1820
  });
1673
1821
  app.claim = () => {
1674
- return claim_default(app, logger2, {
1675
- maxWorkers: options.maxWorkflows
1822
+ const promises = app.workloops.map((w) => {
1823
+ if (w.hasCapacity()) {
1824
+ return claim_default(app, w, logger2);
1825
+ }
1826
+ return Promise.reject(new Error("Workloop at capacity"));
1676
1827
  });
1828
+ return Promise.any(promises);
1677
1829
  };
1830
+ app.pendingClaims = () => app.workloops.reduce((sum, w) => sum + Object.keys(w.openClaims).length, 0);
1678
1831
  app.destroy = () => destroy_default(app, logger2);
1679
1832
  app.use(router.routes());
1680
1833
  if (options.lightning) {
@@ -6604,6 +6757,7 @@ function parseArgs(argv) {
6604
6757
  WORKER_STATE_PROPS_TO_REMOVE,
6605
6758
  WORKER_TIMEOUT_RETRY_COUNT,
6606
6759
  WORKER_TIMEOUT_RETRY_DELAY_MS,
6760
+ WORKER_WORKLOOPS,
6607
6761
  WORKER_VALIDATION_RETRIES,
6608
6762
  WORKER_VALIDATION_TIMEOUT_MS
6609
6763
  } = process.env;
@@ -6665,8 +6819,12 @@ function parseArgs(argv) {
6665
6819
  }).option("backoff", {
6666
6820
  description: "Claim backoff rules: min/max (in seconds). Env: WORKER_BACKOFF"
6667
6821
  }).option("capacity", {
6668
- description: `max concurrent workers. Default ${DEFAULT_WORKER_CAPACITY}. Env: WORKER_CAPACITY`,
6822
+ description: `Sets the maximum concurrent workers - but only if workloops is not set. Default ${DEFAULT_WORKER_CAPACITY}. Env: WORKER_CAPACITY`,
6669
6823
  type: "number"
6824
+ }).option("workloops", {
6825
+ description: 'Configure workloops with a priorised queue list and a max capacity. Syntax: "<queues>:<capacity> ...". Mutually exclusive with --capacity. Env: WORKER_WORKLOOPS',
6826
+ type: "string",
6827
+ example: "fast_lane:1 manual>*:4"
6670
6828
  }).option("state-props-to-remove", {
6671
6829
  description: "A list of properties to remove from the final state returned by a job. Env: WORKER_STATE_PROPS_TO_REMOVE",
6672
6830
  type: "array"
@@ -6704,10 +6862,25 @@ function parseArgs(argv) {
6704
6862
  }).option("timeout-retry-delay", {
6705
6863
  description: "When a websocket event receives a timeout, this option sets how log to wait before retrying Default 30000. Env: WORKER_TIMEOUT_RETRY_DELAY_MS",
6706
6864
  type: "number"
6707
- });
6865
+ }).example(
6866
+ "start --queues *:5",
6867
+ "Default start configuration: a single workloop with capacity 5, claiming from all queues"
6868
+ ).example(
6869
+ "start --queues manual>*:5",
6870
+ "A single workloop, capacity 5, which claims across two queues. Runs in the manual queue will be picked first, else any other queue will be picked."
6871
+ ).example(
6872
+ "start --queues fast_lane:1 manual>*:4",
6873
+ "production start configuration with 1 fast lane workloop (capacity 1) and a second workloop with capacity 4"
6874
+ );
6708
6875
  const args2 = parser2.parse();
6876
+ const resolvedWorkloops = setArg(args2.workloops, WORKER_WORKLOOPS);
6877
+ const capacityExplicit = args2.capacity !== void 0 || WORKER_CAPACITY !== void 0;
6878
+ if (resolvedWorkloops !== void 0 && capacityExplicit) {
6879
+ throw new Error("--workloops and --capacity are mutually exclusive");
6880
+ }
6709
6881
  return {
6710
6882
  ...args2,
6883
+ workloops: resolvedWorkloops,
6711
6884
  port: setArg(args2.port, WORKER_PORT, DEFAULT_PORT2),
6712
6885
  lightning: setArg(
6713
6886
  args2.lightning,
@@ -6796,8 +6969,16 @@ function parseArgs(argv) {
6796
6969
 
6797
6970
  // src/start.ts
6798
6971
  var args = parseArgs(process.argv);
6972
+ var workloopConfigs = args.workloops ?? get_default_workloop_config_default(args.capacity);
6973
+ var effectiveCapacity = workloopConfigs.trim().split(/\s+/).reduce((sum, token) => sum + (parseInt(token.split(":").pop()) || 0), 0);
6799
6974
  var logger = createLogger("SRV", { level: args.log });
6800
- logger.info("Starting worker server...");
6975
+ logger.info("Starting worker...");
6976
+ logger.info(
6977
+ "Workloops:",
6978
+ workloopConfigs,
6979
+ "effective capacity:",
6980
+ effectiveCapacity
6981
+ );
6801
6982
  if (args.lightning === "mock") {
6802
6983
  args.lightning = "ws://localhost:8888/worker";
6803
6984
  if (!args.secret) {
@@ -6822,7 +7003,8 @@ function engineReady(engine) {
6822
7003
  min: minBackoff,
6823
7004
  max: maxBackoff
6824
7005
  },
6825
- maxWorkflows: args.capacity,
7006
+ maxWorkflows: effectiveCapacity,
7007
+ workloopConfigs,
6826
7008
  payloadLimitMb: args.payloadMemory,
6827
7009
  logPayloadLimitMb: args.logPayloadMemory ?? 1,
6828
7010
  // Default to 1MB
@@ -6872,7 +7054,7 @@ if (args.mock) {
6872
7054
  const engineOptions = {
6873
7055
  repoDir: args.repoDir,
6874
7056
  memoryLimitMb: args.runMemory,
6875
- maxWorkers: args.capacity,
7057
+ maxWorkers: effectiveCapacity,
6876
7058
  statePropsToRemove: args.statePropsToRemove,
6877
7059
  runTimeoutMs: args.maxRunDurationSeconds * 1e3,
6878
7060
  workerValidationTimeout: args.engineValidationTimeoutMs,
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@openfn/ws-worker",
3
- "version": "1.21.4",
3
+ "version": "1.22.0",
4
4
  "description": "A Websocket Worker to connect Lightning to a Runtime Engine",
5
5
  "main": "dist/index.js",
6
6
  "type": "module",