@openfn/ws-worker 1.21.5 → 1.22.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/start.js CHANGED
@@ -182,7 +182,9 @@ var destroy = async (app, logger2) => {
182
182
  await Promise.all([
183
183
  new Promise((resolve5) => {
184
184
  app.destroyed = true;
185
- app.workloop?.stop("server closed");
185
+ for (const w of app.workloops) {
186
+ w.stop("server closed");
187
+ }
186
188
  app.server.close(async () => {
187
189
  resolve5();
188
190
  });
@@ -200,11 +202,11 @@ var destroy = async (app, logger2) => {
200
202
  var waitForRunsAndClaims = (app, logger2) => new Promise((resolve5) => {
201
203
  const log = () => {
202
204
  logger2.debug(
203
- `Waiting for ${Object.keys(app.workflows).length} runs and ${Object.keys(app.openClaims).length} claims to complete...`
205
+ `Waiting for ${Object.keys(app.workflows).length} runs and ${app.pendingClaims()} claims to complete...`
204
206
  );
205
207
  };
206
208
  const checkAllClear = () => {
207
- if (Object.keys(app.workflows).length + Object.keys(app.openClaims).length === 0) {
209
+ if (Object.keys(app.workflows).length + app.pendingClaims() === 0) {
208
210
  logger2.debug("All runs completed!");
209
211
  app.events.off(INTERNAL_RUN_COMPLETE, checkAllClear);
210
212
  app.events.off(INTERNAL_CLAIM_COMPLETE, checkAllClear);
@@ -213,7 +215,7 @@ var waitForRunsAndClaims = (app, logger2) => new Promise((resolve5) => {
213
215
  log();
214
216
  }
215
217
  };
216
- if (Object.keys(app.workflows).length || Object.keys(app.openClaims).length) {
218
+ if (Object.keys(app.workflows).length || app.pendingClaims()) {
217
219
  log();
218
220
  app.events.on(INTERNAL_RUN_COMPLETE, checkAllClear);
219
221
  app.events.on(INTERNAL_CLAIM_COMPLETE, checkAllClear);
@@ -224,51 +226,6 @@ var waitForRunsAndClaims = (app, logger2) => new Promise((resolve5) => {
224
226
  });
225
227
  var destroy_default = destroy;
226
228
 
227
- // src/util/try-with-backoff.ts
228
- var BACKOFF_MULTIPLIER = 1.15;
229
- var tryWithBackoff = (fn, opts = {}) => {
230
- const { min = 1e3, max = 1e4, maxRuns, runs = 1 } = opts;
231
- let cancelled = false;
232
- if (!opts.isCancelled) {
233
- opts.isCancelled = () => cancelled;
234
- }
235
- const promise = new Promise(async (resolve5, reject) => {
236
- try {
237
- await fn();
238
- resolve5();
239
- } catch (e) {
240
- if (e?.abort) {
241
- cancelled = true;
242
- return reject();
243
- }
244
- if (opts.isCancelled()) {
245
- return resolve5();
246
- }
247
- if (!isNaN(maxRuns) && runs >= maxRuns) {
248
- return reject(new Error("max runs exceeded"));
249
- }
250
- setTimeout(() => {
251
- if (opts.isCancelled()) {
252
- return resolve5();
253
- }
254
- const nextOpts = {
255
- maxRuns,
256
- runs: runs + 1,
257
- min: Math.min(max, min * BACKOFF_MULTIPLIER),
258
- max,
259
- isCancelled: opts.isCancelled
260
- };
261
- tryWithBackoff(fn, nextOpts).then(resolve5).catch(reject);
262
- }, min);
263
- }
264
- });
265
- promise.cancel = () => {
266
- cancelled = true;
267
- };
268
- return promise;
269
- };
270
- var try_with_backoff_default = tryWithBackoff;
271
-
272
229
  // src/api/claim.ts
273
230
  import v8 from "node:v8";
274
231
  import * as Sentry from "@sentry/node";
@@ -297,24 +254,26 @@ var ClaimError = class extends Error {
297
254
  }
298
255
  };
299
256
  var claimIdGen = 0;
300
- var claim = (app, logger2 = mockLogger, options = {}) => {
257
+ var claim = (app, workloop, logger2 = mockLogger, options = {}) => {
301
258
  return new Promise((resolve5, reject) => {
302
- app.openClaims ??= {};
303
- const { maxWorkers = 5, demand = 1 } = options;
259
+ const { demand = 1 } = options;
304
260
  const podName = NAME ? `[${NAME}] ` : "";
305
- const activeWorkers = Object.keys(app.workflows).length;
306
- const pendingClaims = Object.values(app.openClaims).reduce(
261
+ const activeInWorkloop = workloop.activeRuns.size;
262
+ const capacity = workloop.capacity;
263
+ const pendingWorkloopClaims = Object.values(workloop.openClaims).reduce(
307
264
  (a, b) => a + b,
308
265
  0
309
266
  );
310
- if (activeWorkers >= maxWorkers) {
311
- app.workloop?.stop(`server at capacity (${activeWorkers}/${maxWorkers})`);
312
- return reject(new ClaimError("Server at capacity"));
313
- } else if (activeWorkers + pendingClaims >= maxWorkers) {
314
- app.workloop?.stop(
315
- `server at capacity (${activeWorkers}/${maxWorkers}, ${pendingClaims} pending)`
267
+ if (activeInWorkloop >= capacity) {
268
+ workloop.stop(
269
+ `workloop ${workloop.id} at capacity (${activeInWorkloop}/${capacity})`
270
+ );
271
+ return reject(new ClaimError("Workloop at capacity"));
272
+ } else if (activeInWorkloop + pendingWorkloopClaims >= capacity) {
273
+ workloop.stop(
274
+ `workloop ${workloop.id} at capacity (${activeInWorkloop}/${capacity}, ${pendingWorkloopClaims} pending)`
316
275
  );
317
- return reject(new ClaimError("Server at capacity"));
276
+ return reject(new ClaimError("Workloop at capacity"));
318
277
  }
319
278
  if (!app.queueChannel) {
320
279
  logger2.warn("skipping claim attempt: websocket unavailable");
@@ -327,21 +286,22 @@ var claim = (app, logger2 = mockLogger, options = {}) => {
327
286
  return reject(e);
328
287
  }
329
288
  const claimId = ++claimIdGen;
330
- app.openClaims[claimId] = demand;
289
+ workloop.openClaims[claimId] = demand;
331
290
  const { used_heap_size, heap_size_limit } = v8.getHeapStatistics();
332
291
  const usedHeapMb = Math.round(used_heap_size / 1024 / 1024);
333
292
  const totalHeapMb = Math.round(heap_size_limit / 1024 / 1024);
334
293
  const memPercent = Math.round(usedHeapMb / totalHeapMb * 100);
335
294
  logger2.debug(
336
- `Claiming runs :: demand ${demand} | capacity ${activeWorkers}/${maxWorkers} | memory ${memPercent}% (${usedHeapMb}/${totalHeapMb}mb)`
295
+ `Claiming runs [${workloop.id}] :: demand ${demand} | capacity ${activeInWorkloop}/${capacity} | memory ${memPercent}% (${usedHeapMb}/${totalHeapMb}mb)`
337
296
  );
338
297
  app.events.emit(INTERNAL_CLAIM_START);
339
298
  const start = Date.now();
340
299
  app.queueChannel.push(CLAIM, {
341
300
  demand,
342
- worker_name: NAME || null
301
+ worker_name: NAME || null,
302
+ queues: workloop.queues
343
303
  }).receive("ok", async ({ runs }) => {
344
- delete app.openClaims[claimId];
304
+ delete workloop.openClaims[claimId];
345
305
  const duration = Date.now() - start;
346
306
  logger2.debug(
347
307
  `${podName}claimed ${runs.length} runs in ${duration}ms (${runs.length ? runs.map((r) => r.id).join(",") : "-"})`
@@ -365,17 +325,19 @@ var claim = (app, logger2 = mockLogger, options = {}) => {
365
325
  } else {
366
326
  logger2.debug("skipping run token validation for", run2.id);
367
327
  }
328
+ workloop.activeRuns.add(run2.id);
329
+ app.runWorkloopMap[run2.id] = workloop;
368
330
  logger2.debug(`${podName} starting run ${run2.id}`);
369
331
  app.execute(run2);
370
332
  }
371
333
  resolve5();
372
334
  app.events.emit(INTERNAL_CLAIM_COMPLETE, { runs });
373
335
  }).receive("error", (e) => {
374
- delete app.openClaims[claimId];
336
+ delete workloop.openClaims[claimId];
375
337
  logger2.error("Error on claim", e);
376
338
  reject(new Error("claim error"));
377
339
  }).receive("timeout", () => {
378
- delete app.openClaims[claimId];
340
+ delete workloop.openClaims[claimId];
379
341
  logger2.error("TIMEOUT on claim. Runs may be lost.");
380
342
  reject(new Error("timeout"));
381
343
  });
@@ -383,43 +345,6 @@ var claim = (app, logger2 = mockLogger, options = {}) => {
383
345
  };
384
346
  var claim_default = claim;
385
347
 
386
- // src/api/workloop.ts
387
- var startWorkloop = (app, logger2, minBackoff2, maxBackoff2, maxWorkers) => {
388
- let promise;
389
- let cancelled = false;
390
- const workLoop = () => {
391
- if (!cancelled) {
392
- promise = try_with_backoff_default(
393
- () => claim_default(app, logger2, {
394
- maxWorkers
395
- }),
396
- {
397
- min: minBackoff2,
398
- max: maxBackoff2
399
- }
400
- );
401
- promise.then(() => {
402
- if (!cancelled) {
403
- setTimeout(workLoop, minBackoff2);
404
- }
405
- }).catch(() => {
406
- });
407
- }
408
- };
409
- workLoop();
410
- return {
411
- stop: (reason = "reason unknown") => {
412
- if (!cancelled) {
413
- logger2.info(`cancelling workloop: ${reason}`);
414
- cancelled = true;
415
- promise.cancel();
416
- }
417
- },
418
- isStopped: () => cancelled
419
- };
420
- };
421
- var workloop_default = startWorkloop;
422
-
423
348
  // src/api/execute.ts
424
349
  import * as Sentry4 from "@sentry/node";
425
350
  import {
@@ -724,6 +649,51 @@ var stringify_default = (obj) => stringify(obj, (_key, value) => {
724
649
  return value;
725
650
  });
726
651
 
652
+ // src/util/try-with-backoff.ts
653
+ var BACKOFF_MULTIPLIER = 1.15;
654
+ var tryWithBackoff = (fn, opts = {}) => {
655
+ const { min = 1e3, max = 1e4, maxRuns, runs = 1 } = opts;
656
+ let cancelled = false;
657
+ if (!opts.isCancelled) {
658
+ opts.isCancelled = () => cancelled;
659
+ }
660
+ const promise = new Promise(async (resolve5, reject) => {
661
+ try {
662
+ await fn();
663
+ resolve5();
664
+ } catch (e) {
665
+ if (e?.abort) {
666
+ cancelled = true;
667
+ return reject();
668
+ }
669
+ if (opts.isCancelled()) {
670
+ return resolve5();
671
+ }
672
+ if (!isNaN(maxRuns) && runs >= maxRuns) {
673
+ return reject(new Error("max runs exceeded"));
674
+ }
675
+ setTimeout(() => {
676
+ if (opts.isCancelled()) {
677
+ return resolve5();
678
+ }
679
+ const nextOpts = {
680
+ maxRuns,
681
+ runs: runs + 1,
682
+ min: Math.min(max, min * BACKOFF_MULTIPLIER),
683
+ max,
684
+ isCancelled: opts.isCancelled
685
+ };
686
+ tryWithBackoff(fn, nextOpts).then(resolve5).catch(reject);
687
+ }, min);
688
+ }
689
+ });
690
+ promise.cancel = () => {
691
+ cancelled = true;
692
+ };
693
+ return promise;
694
+ };
695
+ var try_with_backoff_default = tryWithBackoff;
696
+
727
697
  // src/util/timestamp.ts
728
698
  var timeInMicroseconds = (time) => time && (BigInt(time) / BigInt(1e3)).toString();
729
699
 
@@ -1384,6 +1354,7 @@ var connectToWorkerQueue = (endpoint, serverId, secret, logger2, options) => {
1384
1354
  messageTimeout = DEFAULT_MESSAGE_TIMEOUT_SECONDS,
1385
1355
  claimTimeout = DEFAULT_CLAIM_TIMEOUT_SECONDS,
1386
1356
  capacity,
1357
+ queues,
1387
1358
  SocketConstructor = PhxSocket
1388
1359
  } = options;
1389
1360
  const events = new EventEmitter2();
@@ -1420,6 +1391,9 @@ var connectToWorkerQueue = (endpoint, serverId, secret, logger2, options) => {
1420
1391
  didOpen = true;
1421
1392
  shouldReportConnectionError = true;
1422
1393
  const joinPayload = { capacity };
1394
+ if (queues) {
1395
+ joinPayload.queues = queues;
1396
+ }
1423
1397
  const channel = socket.channel("worker:queue", joinPayload);
1424
1398
  channel.onMessage = (ev, load) => {
1425
1399
  events.emit("message", ev, load);
@@ -1460,6 +1434,140 @@ var connectToWorkerQueue = (endpoint, serverId, secret, logger2, options) => {
1460
1434
  };
1461
1435
  var worker_queue_default = connectToWorkerQueue;
1462
1436
 
1437
+ // src/api/workloop.ts
1438
+ var Workloop = class {
1439
+ constructor({
1440
+ id,
1441
+ queues,
1442
+ capacity
1443
+ }) {
1444
+ this.activeRuns = /* @__PURE__ */ new Set();
1445
+ this.openClaims = {};
1446
+ this.cancelled = true;
1447
+ this.id = id;
1448
+ this.queues = queues;
1449
+ this.capacity = capacity;
1450
+ }
1451
+ hasCapacity() {
1452
+ const pendingClaims = Object.values(this.openClaims).reduce(
1453
+ (a, b) => a + b,
1454
+ 0
1455
+ );
1456
+ return this.activeRuns.size + pendingClaims < this.capacity;
1457
+ }
1458
+ start(app, logger2, minBackoff2, maxBackoff2) {
1459
+ this.logger = logger2;
1460
+ this.cancelled = false;
1461
+ const loop = () => {
1462
+ if (!this.cancelled) {
1463
+ this.promise = try_with_backoff_default(() => claim_default(app, this, logger2), {
1464
+ min: minBackoff2,
1465
+ max: maxBackoff2
1466
+ });
1467
+ this.promise.then(() => {
1468
+ if (!this.cancelled) {
1469
+ setTimeout(loop, minBackoff2);
1470
+ }
1471
+ }).catch(() => {
1472
+ });
1473
+ }
1474
+ };
1475
+ loop();
1476
+ }
1477
+ stop(reason = "reason unknown") {
1478
+ if (!this.cancelled) {
1479
+ this.logger?.info(`cancelling workloop: ${reason}`);
1480
+ this.cancelled = true;
1481
+ this.promise?.cancel();
1482
+ }
1483
+ }
1484
+ isStopped() {
1485
+ return this.cancelled;
1486
+ }
1487
+ };
1488
+
1489
+ // src/util/parse-workloops.ts
1490
+ var WorkloopValidationError = class extends Error {
1491
+ constructor(message) {
1492
+ super(message);
1493
+ this.name = "WorkloopValidationError";
1494
+ }
1495
+ };
1496
+ var VALID_NAME = /^[a-zA-Z0-9_]+$/;
1497
+ function parseWorkloops(input) {
1498
+ const trimmed = input.trim();
1499
+ if (!trimmed) {
1500
+ throw new WorkloopValidationError("Workloop configuration cannot be empty");
1501
+ }
1502
+ const tokens = trimmed.split(/\s+/);
1503
+ const configs = tokens.map(parseToken);
1504
+ const seenConfigs = /* @__PURE__ */ new Map();
1505
+ for (let i = 0; i < configs.length; i++) {
1506
+ const key = JSON.stringify(configs[i].queues);
1507
+ if (seenConfigs.has(key)) {
1508
+ const prevIndex = seenConfigs.get(key);
1509
+ console.warn(
1510
+ `Warning: workloops at positions ${prevIndex} and ${i} have identical queue configurations: ${tokens[prevIndex]} and ${tokens[i]}`
1511
+ );
1512
+ } else {
1513
+ seenConfigs.set(key, i);
1514
+ }
1515
+ }
1516
+ return configs;
1517
+ }
1518
+ function parseToken(token) {
1519
+ const lastColon = token.lastIndexOf(":");
1520
+ if (lastColon === -1) {
1521
+ throw new WorkloopValidationError(
1522
+ `Invalid token "${token}": missing :<count> suffix`
1523
+ );
1524
+ }
1525
+ const prefStr = token.slice(0, lastColon);
1526
+ const countStr = token.slice(lastColon + 1);
1527
+ const count = Number(countStr);
1528
+ if (!Number.isInteger(count) || countStr !== String(Math.floor(count))) {
1529
+ throw new WorkloopValidationError(
1530
+ `Invalid count "${countStr}" in token "${token}": must be a positive integer`
1531
+ );
1532
+ }
1533
+ if (count < 1) {
1534
+ throw new WorkloopValidationError(
1535
+ `Invalid count "${countStr}" in token "${token}": must be >= 1`
1536
+ );
1537
+ }
1538
+ const names = prefStr.split(">");
1539
+ for (const name of names) {
1540
+ if (name === "") {
1541
+ throw new WorkloopValidationError(`Empty queue name in token "${token}"`);
1542
+ }
1543
+ if (name !== "*" && !VALID_NAME.test(name)) {
1544
+ throw new WorkloopValidationError(
1545
+ `Invalid queue name "${name}" in token "${token}": must match /^[a-zA-Z0-9_]+$/ or be "*"`
1546
+ );
1547
+ }
1548
+ }
1549
+ const nonWildcardNames = names.filter((n) => n !== "*");
1550
+ const seen = /* @__PURE__ */ new Set();
1551
+ for (const name of nonWildcardNames) {
1552
+ if (seen.has(name)) {
1553
+ console.warn(
1554
+ `Warning: duplicate queue name "${name}" in token "${token}"`
1555
+ );
1556
+ }
1557
+ seen.add(name);
1558
+ }
1559
+ const wildcardIndex = names.indexOf("*");
1560
+ if (wildcardIndex !== -1 && wildcardIndex !== names.length - 1) {
1561
+ throw new WorkloopValidationError(
1562
+ `Wildcard "*" must be the last element in token "${token}"`
1563
+ );
1564
+ }
1565
+ return new Workloop({ id: token, queues: names, capacity: count });
1566
+ }
1567
+
1568
+ // src/util/get-default-workloop-config.ts
1569
+ var get_default_workloop_config_default = (capacity = 5) => `manual>*:${capacity}`;
1570
+
1463
1571
  // src/server.ts
1464
1572
  var exec = promisify(_exec);
1465
1573
  var DEFAULT_PORT = 2222;
@@ -1487,8 +1595,10 @@ function connect(app, logger2, options = {}) {
1487
1595
  app.resumeWorkloop();
1488
1596
  };
1489
1597
  const onDisconnect = () => {
1490
- if (!app.workloop?.isStopped()) {
1491
- app.workloop?.stop("Socket disconnected unexpectedly");
1598
+ for (const w of app.workloops) {
1599
+ if (!w.isStopped()) {
1600
+ w.stop("Socket disconnected unexpectedly");
1601
+ }
1492
1602
  }
1493
1603
  if (!app.destroyed) {
1494
1604
  logger2.info("Connection to lightning lost");
@@ -1510,17 +1620,26 @@ function connect(app, logger2, options = {}) {
1510
1620
  const onMessage = (event) => {
1511
1621
  if (event === WORK_AVAILABLE) {
1512
1622
  if (!app.destroyed) {
1513
- claim_default(app, logger2, { maxWorkers: options.maxWorkflows }).catch(() => {
1514
- });
1623
+ for (const w of app.workloops) {
1624
+ if (w.hasCapacity()) {
1625
+ claim_default(app, w, logger2).catch(() => {
1626
+ });
1627
+ }
1628
+ }
1515
1629
  }
1516
1630
  }
1517
1631
  };
1632
+ const queuesMap = {};
1633
+ for (const w of app.workloops) {
1634
+ queuesMap[w.queues.join(">")] = w.capacity;
1635
+ }
1518
1636
  worker_queue_default(options.lightning, app.id, options.secret, logger2, {
1519
1637
  // TODO: options.socketTimeoutSeconds wins because this is what USED to be used
1520
1638
  // But it's deprecated and should be removed soon
1521
1639
  messageTimeout: options.socketTimeoutSeconds ?? options.messageTimeoutSeconds,
1522
1640
  claimTimeout: options.claimTimeoutSeconds,
1523
- capacity: options.maxWorkflows
1641
+ capacity: options.maxWorkflows,
1642
+ queues: queuesMap
1524
1643
  }).on("connect", onConnect).on("disconnect", onDisconnect).on("error", onError).on("message", onMessage);
1525
1644
  }
1526
1645
  async function setupCollections(options, logger2) {
@@ -1569,27 +1688,36 @@ function createServer(engine, options = {}) {
1569
1688
  logger2.debug(str);
1570
1689
  })
1571
1690
  );
1572
- app.openClaims = {};
1573
1691
  app.workflows = {};
1574
1692
  app.destroyed = false;
1693
+ app.workloops = parseWorkloops(
1694
+ options.workloopConfigs ?? get_default_workloop_config_default(options.maxWorkflows)
1695
+ );
1696
+ app.runWorkloopMap = {};
1575
1697
  app.server = app.listen(port);
1576
1698
  logger2.success(`Worker ${app.id} listening on ${port}`);
1577
1699
  process.send?.("READY");
1578
1700
  router.get("/livez", healthcheck_default);
1579
1701
  router.get("/", healthcheck_default);
1580
1702
  app.options = options;
1581
- app.resumeWorkloop = () => {
1703
+ app.resumeWorkloop = (workloop) => {
1582
1704
  if (options.noLoop || app.destroyed) {
1583
1705
  return;
1584
1706
  }
1585
- if (!app.workloop || app.workloop?.isStopped()) {
1586
- logger2.info("Starting workloop");
1587
- app.workloop = workloop_default(
1707
+ const targets = workloop ? [workloop] : app.workloops;
1708
+ for (const w of targets) {
1709
+ if (!w.hasCapacity()) {
1710
+ continue;
1711
+ }
1712
+ if (!w.isStopped()) {
1713
+ w.stop("restarting");
1714
+ }
1715
+ logger2.info(`Starting workloop for ${w.id}`);
1716
+ w.start(
1588
1717
  app,
1589
1718
  logger2,
1590
1719
  options.backoff?.min || MIN_BACKOFF,
1591
- options.backoff?.max || MAX_BACKOFF,
1592
- options.maxWorkflows
1720
+ options.backoff?.max || MAX_BACKOFF
1593
1721
  );
1594
1722
  }
1595
1723
  };
@@ -1634,8 +1762,16 @@ function createServer(engine, options = {}) {
1634
1762
  );
1635
1763
  delete app.workflows[id];
1636
1764
  runChannel.leave();
1637
- app.events.emit(INTERNAL_RUN_COMPLETE);
1638
- app.resumeWorkloop();
1765
+ const owningWorkloop = app.runWorkloopMap[id];
1766
+ if (owningWorkloop) {
1767
+ owningWorkloop.activeRuns.delete(id);
1768
+ delete app.runWorkloopMap[id];
1769
+ app.events.emit(INTERNAL_RUN_COMPLETE);
1770
+ app.resumeWorkloop(owningWorkloop);
1771
+ } else {
1772
+ app.events.emit(INTERNAL_RUN_COMPLETE);
1773
+ app.resumeWorkloop();
1774
+ }
1639
1775
  };
1640
1776
  const context = execute(
1641
1777
  runChannel,
@@ -1649,7 +1785,14 @@ function createServer(engine, options = {}) {
1649
1785
  app.workflows[id] = context;
1650
1786
  } catch (e) {
1651
1787
  delete app.workflows[id];
1652
- app.resumeWorkloop();
1788
+ const owningWorkloop = app.runWorkloopMap[id];
1789
+ if (owningWorkloop) {
1790
+ owningWorkloop.activeRuns.delete(id);
1791
+ delete app.runWorkloopMap[id];
1792
+ app.resumeWorkloop(owningWorkloop);
1793
+ } else {
1794
+ app.resumeWorkloop();
1795
+ }
1653
1796
  logger2.error(`Unexpected error executing ${id}`);
1654
1797
  logger2.error(e);
1655
1798
  }
@@ -1659,9 +1802,13 @@ function createServer(engine, options = {}) {
1659
1802
  };
1660
1803
  router.post("/claim", async (ctx) => {
1661
1804
  logger2.info("triggering claim from POST request");
1662
- return claim_default(app, logger2, {
1663
- maxWorkers: options.maxWorkflows
1664
- }).then(() => {
1805
+ const promises = app.workloops.map((w) => {
1806
+ if (w.hasCapacity()) {
1807
+ return claim_default(app, w, logger2);
1808
+ }
1809
+ return Promise.reject(new Error("Workloop at capacity"));
1810
+ });
1811
+ return Promise.any(promises).then(() => {
1665
1812
  logger2.info("claim complete: 1 run claimed");
1666
1813
  ctx.body = "complete";
1667
1814
  ctx.status = 200;
@@ -1672,10 +1819,15 @@ function createServer(engine, options = {}) {
1672
1819
  });
1673
1820
  });
1674
1821
  app.claim = () => {
1675
- return claim_default(app, logger2, {
1676
- maxWorkers: options.maxWorkflows
1822
+ const promises = app.workloops.map((w) => {
1823
+ if (w.hasCapacity()) {
1824
+ return claim_default(app, w, logger2);
1825
+ }
1826
+ return Promise.reject(new Error("Workloop at capacity"));
1677
1827
  });
1828
+ return Promise.any(promises);
1678
1829
  };
1830
+ app.pendingClaims = () => app.workloops.reduce((sum, w) => sum + Object.keys(w.openClaims).length, 0);
1679
1831
  app.destroy = () => destroy_default(app, logger2);
1680
1832
  app.use(router.routes());
1681
1833
  if (options.lightning) {
@@ -6605,6 +6757,7 @@ function parseArgs(argv) {
6605
6757
  WORKER_STATE_PROPS_TO_REMOVE,
6606
6758
  WORKER_TIMEOUT_RETRY_COUNT,
6607
6759
  WORKER_TIMEOUT_RETRY_DELAY_MS,
6760
+ WORKER_WORKLOOPS,
6608
6761
  WORKER_VALIDATION_RETRIES,
6609
6762
  WORKER_VALIDATION_TIMEOUT_MS
6610
6763
  } = process.env;
@@ -6666,8 +6819,12 @@ function parseArgs(argv) {
6666
6819
  }).option("backoff", {
6667
6820
  description: "Claim backoff rules: min/max (in seconds). Env: WORKER_BACKOFF"
6668
6821
  }).option("capacity", {
6669
- description: `max concurrent workers. Default ${DEFAULT_WORKER_CAPACITY}. Env: WORKER_CAPACITY`,
6822
+ description: `Sets the maximum concurrent workers - but only if workloops is not set. Default ${DEFAULT_WORKER_CAPACITY}. Env: WORKER_CAPACITY`,
6670
6823
  type: "number"
6824
+ }).option("workloops", {
6825
+ description: 'Configure workloops with a priorised queue list and a max capacity. Syntax: "<queues>:<capacity> ...". Mutually exclusive with --capacity. Env: WORKER_WORKLOOPS',
6826
+ type: "string",
6827
+ example: "fast_lane:1 manual>*:4"
6671
6828
  }).option("state-props-to-remove", {
6672
6829
  description: "A list of properties to remove from the final state returned by a job. Env: WORKER_STATE_PROPS_TO_REMOVE",
6673
6830
  type: "array"
@@ -6705,10 +6862,25 @@ function parseArgs(argv) {
6705
6862
  }).option("timeout-retry-delay", {
6706
6863
  description: "When a websocket event receives a timeout, this option sets how log to wait before retrying Default 30000. Env: WORKER_TIMEOUT_RETRY_DELAY_MS",
6707
6864
  type: "number"
6708
- });
6865
+ }).example(
6866
+ "start --queues *:5",
6867
+ "Default start configuration: a single workloop with capacity 5, claiming from all queues"
6868
+ ).example(
6869
+ "start --queues manual>*:5",
6870
+ "A single workloop, capacity 5, which claims across two queues. Runs in the manual queue will be picked first, else any other queue will be picked."
6871
+ ).example(
6872
+ "start --queues fast_lane:1 manual>*:4",
6873
+ "production start configuration with 1 fast lane workloop (capacity 1) and a second workloop with capacity 4"
6874
+ );
6709
6875
  const args2 = parser2.parse();
6876
+ const resolvedWorkloops = setArg(args2.workloops, WORKER_WORKLOOPS);
6877
+ const capacityExplicit = args2.capacity !== void 0 || WORKER_CAPACITY !== void 0;
6878
+ if (resolvedWorkloops !== void 0 && capacityExplicit) {
6879
+ throw new Error("--workloops and --capacity are mutually exclusive");
6880
+ }
6710
6881
  return {
6711
6882
  ...args2,
6883
+ workloops: resolvedWorkloops,
6712
6884
  port: setArg(args2.port, WORKER_PORT, DEFAULT_PORT2),
6713
6885
  lightning: setArg(
6714
6886
  args2.lightning,
@@ -6797,8 +6969,16 @@ function parseArgs(argv) {
6797
6969
 
6798
6970
  // src/start.ts
6799
6971
  var args = parseArgs(process.argv);
6972
+ var workloopConfigs = args.workloops ?? get_default_workloop_config_default(args.capacity);
6973
+ var effectiveCapacity = workloopConfigs.trim().split(/\s+/).reduce((sum, token) => sum + (parseInt(token.split(":").pop()) || 0), 0);
6800
6974
  var logger = createLogger("SRV", { level: args.log });
6801
- logger.info("Starting worker server...");
6975
+ logger.info("Starting worker...");
6976
+ logger.info(
6977
+ "Workloops:",
6978
+ workloopConfigs,
6979
+ "effective capacity:",
6980
+ effectiveCapacity
6981
+ );
6802
6982
  if (args.lightning === "mock") {
6803
6983
  args.lightning = "ws://localhost:8888/worker";
6804
6984
  if (!args.secret) {
@@ -6823,7 +7003,8 @@ function engineReady(engine) {
6823
7003
  min: minBackoff,
6824
7004
  max: maxBackoff
6825
7005
  },
6826
- maxWorkflows: args.capacity,
7006
+ maxWorkflows: effectiveCapacity,
7007
+ workloopConfigs,
6827
7008
  payloadLimitMb: args.payloadMemory,
6828
7009
  logPayloadLimitMb: args.logPayloadMemory ?? 1,
6829
7010
  // Default to 1MB
@@ -6873,7 +7054,7 @@ if (args.mock) {
6873
7054
  const engineOptions = {
6874
7055
  repoDir: args.repoDir,
6875
7056
  memoryLimitMb: args.runMemory,
6876
- maxWorkers: args.capacity,
7057
+ maxWorkers: effectiveCapacity,
6877
7058
  statePropsToRemove: args.statePropsToRemove,
6878
7059
  runTimeoutMs: args.maxRunDurationSeconds * 1e3,
6879
7060
  workerValidationTimeout: args.engineValidationTimeoutMs,
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@openfn/ws-worker",
3
- "version": "1.21.5",
3
+ "version": "1.22.0",
4
4
  "description": "A Websocket Worker to connect Lightning to a Runtime Engine",
5
5
  "main": "dist/index.js",
6
6
  "type": "module",
@@ -24,8 +24,8 @@
24
24
  "phoenix": "1.7.10",
25
25
  "ws": "^8.18.3",
26
26
  "@openfn/engine-multi": "1.10.4",
27
- "@openfn/lexicon": "^1.4.1",
28
27
  "@openfn/logger": "1.1.1",
28
+ "@openfn/lexicon": "^1.4.1",
29
29
  "@openfn/runtime": "1.8.4"
30
30
  },
31
31
  "devDependencies": {