@openfn/ws-worker 1.21.5 → 1.22.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,25 @@
1
1
  # ws-worker
2
2
 
3
+ ## 1.22.1
4
+
5
+ ### Patch Changes
6
+
7
+ - 6fd3942: Fix an issue where unhandled errors could trigger a worker crash after a compilation error.
8
+ - 32b43cb: When reporting compilation errors, prefer the step name to the id
9
+ - Updated dependencies [6fd3942]
10
+ - Updated dependencies [32b43cb]
11
+ - @openfn/engine-multi@1.10.5
12
+
13
+ ## 1.22.0
14
+
15
+ ### Minor Changes
16
+
17
+ - 07457d7: Fastlane support: multiple concurrent workloops, each with its own isolated capacity and backoff.
18
+
19
+ Claims also include a `queues` key, which specifies a prioritised list of Lightning work queues to claim from.
20
+
21
+ Configure workloops with the `--workloops` CLI option or env. By default the worker users `manual>*5`, which provides parity behaviour to prior production.
22
+
3
23
  ## 1.21.5
4
24
 
5
25
  ### Patch Changes
package/README.md CHANGED
@@ -80,6 +80,85 @@ To use the monorepo adaptor version:
80
80
  pnpm start --collections-version local --collections-url http://localhost:4000/collections
81
81
  ```
82
82
 
83
+ ## Workloops
84
+
85
+ By default, the worker runs a single workloop that claims runs from any
86
+ queue, preferring the `manual` queue (used for manually-triggered and
87
+ webhook runs). This is equivalent to `--workloops "manual>*:5"`.
88
+
89
+ The `--workloops` option lets you split the worker's capacity into
90
+ independent groups, each with its own queue preference chain and slot
91
+ count. This is useful for dedicating capacity to latency-sensitive
92
+ workloads (e.g., sync webhooks on a `fast_lane` queue) while letting
93
+ remaining capacity serve general work.
94
+
95
+ ```
96
+ --workloops "<queues>:<capacity> <queues>:<capacity> ..."
97
+ ```
98
+
99
+ ### Syntax
100
+
101
+ | Element | Meaning |
102
+ |---------|---------|
103
+ | `>` | Queue preference separator (left = highest priority) |
104
+ | `*` | Wildcard: accept runs from any queue (must be last) |
105
+ | `:N` | Number of slots for this group |
106
+ | ` ` (space) | Group separator |
107
+
108
+ ### Examples
109
+
110
+ ```bash
111
+ # 1 slot pinned to fast_lane (strict), 4 slots preferring manual then anything
112
+ --workloops "fast_lane:1 manual>*:4"
113
+
114
+ # 5 generic slots (pure FIFO across all queues) — equivalent to --capacity 5
115
+ --workloops "*:5"
116
+
117
+ # 2 fast lane (strict), 3 with manual preference
118
+ --workloops "fast_lane:2 manual>*:3"
119
+
120
+ # 1 slot preferring fast_lane > manual > rest, 4 generic
121
+ --workloops "fast_lane>manual>*:1 *:4"
122
+ ```
123
+
124
+ A group **without** `*` in its queue list is strict — it will only
125
+ claim runs from the named queues. A group **with** `*` will accept any
126
+ run, but prefers queues listed before the wildcard.
127
+
128
+ ### Environment variable
129
+
130
+ ```
131
+ WORKER_WORKLOOPS="fast_lane:1 manual>*:4"
132
+ ```
133
+
134
+ ### Relationship to --capacity
135
+
136
+ `--workloops` and `--capacity` are mutually exclusive. If neither is
137
+ provided, the default is `--capacity 5`, which internally creates a
138
+ single `manual>*:5` group. The total capacity of the worker is always
139
+ the sum of all group slot counts.
140
+
141
+ ### How it works
142
+
143
+ Each group runs its own independent claim loop with its own backoff
144
+ timer. When a run completes, only the owning group's workloop resumes.
145
+ A `work-available` push from Lightning triggers a claim attempt on
146
+ every group that has free slots.
147
+
148
+ ```
149
+ Main Process (ws-worker)
150
+ ├── Workloop 1 (manual>*:2) ─┐
151
+ ├── Workloop 2 (fast_lane:1) ─┼── all run in the main process as async loops
152
+ ├── Workloop 3 (*:5) ─┘
153
+
154
+ └── Engine (single instance, shared by all lanes)
155
+ └── Child Process Pool (capacity = sum of all lanes/slots)
156
+ ├── Child 1 (forked) → Worker Thread (per task)
157
+ ├── Child 2 (forked) → Worker Thread (per task)
158
+ ├── ...on demand, reused after each task
159
+ └── Child N
160
+ ```
161
+
83
162
  ## Architecture
84
163
 
85
164
  Lightning is expected to maintain a queue of runs. The Worker pulls those runs from the queue, via websocket, and sends them off to the Engine for execution.
package/dist/index.d.ts CHANGED
@@ -1,6 +1,6 @@
1
1
  import { EventEmitter } from 'node:events';
2
2
  import Koa from 'koa';
3
- import { LogLevel, Logger } from '@openfn/logger';
3
+ import { Logger, LogLevel } from '@openfn/logger';
4
4
  import * as l from '@openfn/lexicon/lightning';
5
5
  import { ClaimRun } from '@openfn/lexicon/lightning';
6
6
  import { ExecutionPlan, Lazy, State, UUID } from '@openfn/lexicon';
@@ -8,10 +8,25 @@ import { ExecuteOptions, RuntimeEngine } from '@openfn/engine-multi';
8
8
  import { Channel as Channel$1 } from 'phoenix';
9
9
  import { Server } from 'http';
10
10
 
11
- type Workloop = {
12
- stop: (reason?: string) => void;
13
- isStopped: () => boolean;
14
- };
11
+ declare class Workloop {
12
+ id: string;
13
+ queues: string[];
14
+ capacity: number;
15
+ activeRuns: Set<string>;
16
+ openClaims: Record<string, number>;
17
+ private cancelled;
18
+ private promise?;
19
+ private logger?;
20
+ constructor({ id, queues, capacity, }: {
21
+ id: string;
22
+ queues: string[];
23
+ capacity: number;
24
+ });
25
+ hasCapacity(): boolean;
26
+ start(app: ServerApp, logger: Logger, minBackoff: number, maxBackoff: number): void;
27
+ stop(reason?: string): void;
28
+ isStopped(): boolean;
29
+ }
15
30
 
16
31
  // Internal server state for each run
17
32
  type RunState = {
@@ -74,6 +89,7 @@ type ServerOptions = {
74
89
  batchInterval?: number;
75
90
  batchLimit?: number;
76
91
  maxWorkflows?: number;
92
+ workloopConfigs?: string;
77
93
  port?: number;
78
94
  lightning?: string;
79
95
  logger?: Logger;
@@ -102,16 +118,17 @@ interface ServerApp extends Koa {
102
118
  socket?: any;
103
119
  queueChannel?: Channel;
104
120
  workflows: Record<string, true | Context>;
105
- openClaims: Record<string, number>;
106
121
  destroyed: boolean;
107
122
  events: EventEmitter;
108
123
  server: Server;
109
124
  engine: RuntimeEngine;
110
125
  options: ServerOptions;
111
- workloop?: Workloop;
126
+ workloops: Workloop[];
127
+ runWorkloopMap: Record<string, Workloop>;
112
128
  execute: ({ id, token }: ClaimRun) => Promise<void>;
113
129
  destroy: () => void;
114
- resumeWorkloop: () => void;
130
+ resumeWorkloop: (workloop?: Workloop) => void;
131
+ pendingClaims: () => number;
115
132
  claim: () => Promise<any>;
116
133
  }
117
134
  declare function createServer(engine: RuntimeEngine, options?: ServerOptions): ServerApp;