@blokjs/helper 0.2.1 → 0.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/dist/components/AddElse.d.ts +15 -0
  2. package/dist/components/AddIf.d.ts +15 -0
  3. package/dist/components/StepNode.d.ts +6 -0
  4. package/dist/components/StepNode.js +8 -0
  5. package/dist/components/StepNode.js.map +1 -1
  6. package/dist/components/Trigger.d.ts +10 -2
  7. package/dist/components/Trigger.js +16 -5
  8. package/dist/components/Trigger.js.map +1 -1
  9. package/dist/components/branch.d.ts +44 -0
  10. package/dist/components/branch.js +57 -0
  11. package/dist/components/branch.js.map +1 -0
  12. package/dist/components/forEach.d.ts +53 -0
  13. package/dist/components/forEach.js +64 -0
  14. package/dist/components/forEach.js.map +1 -0
  15. package/dist/components/loop.d.ts +52 -0
  16. package/dist/components/loop.js +54 -0
  17. package/dist/components/loop.js.map +1 -0
  18. package/dist/components/switchOn.d.ts +68 -0
  19. package/dist/components/switchOn.js +76 -0
  20. package/dist/components/switchOn.js.map +1 -0
  21. package/dist/components/tryCatch.d.ts +63 -0
  22. package/dist/components/tryCatch.js +68 -0
  23. package/dist/components/tryCatch.js.map +1 -0
  24. package/dist/components/workflowV2.d.ts +83 -0
  25. package/dist/components/workflowV2.js +84 -0
  26. package/dist/components/workflowV2.js.map +1 -0
  27. package/dist/index.d.ts +12 -3
  28. package/dist/index.js +25 -4
  29. package/dist/index.js.map +1 -1
  30. package/dist/proxy/$.d.ts +102 -0
  31. package/dist/proxy/$.js +130 -0
  32. package/dist/proxy/$.js.map +1 -0
  33. package/dist/types/StepOpts.d.ts +723 -3
  34. package/dist/types/StepOpts.js +702 -3
  35. package/dist/types/StepOpts.js.map +1 -1
  36. package/dist/types/TriggerOpts.d.ts +1600 -35
  37. package/dist/types/TriggerOpts.js +601 -29
  38. package/dist/types/TriggerOpts.js.map +1 -1
  39. package/dist/types/WorkflowOpts.d.ts +478 -28
  40. package/dist/types/WorkflowOpts.js +66 -3
  41. package/dist/types/WorkflowOpts.js.map +1 -1
  42. package/dist/utils/parseDuration.d.ts +33 -0
  43. package/dist/utils/parseDuration.js +78 -0
  44. package/dist/utils/parseDuration.js.map +1 -0
  45. package/dist/workflow.schema.json +662 -0
  46. package/package.json +6 -6
@@ -1,15 +1,342 @@
1
1
  import { z } from "zod";
2
- // HTTP Trigger Options
3
- export const HttpTriggerOptsSchema = z.object({
4
- method: z.enum(["GET", "POST", "PUT", "DELETE", "PATCH", "ANY"]),
5
- path: z.string().optional(),
6
- accept: z.string().default("application/json"),
7
- headers: z.record(z.string(), z.any()).optional(),
2
+ // =============================================================================
3
+ // Concurrency keys (Tier 2 #6) — shared across HTTP & Worker triggers
4
+ // =============================================================================
5
+ /**
6
+ * Reusable Zod field bag for per-key concurrency gating.
7
+ *
8
+ * Spread into a trigger's `z.object({...})` and pair with the
9
+ * {@link concurrencyRefinement} cross-field check to add concurrency-key
10
+ * support to a trigger schema.
11
+ *
12
+ * Authors set `concurrencyKey` (literal or `$`-proxy expression) plus an
13
+ * optional `concurrencyLimit` (defaults to 1, matching Trigger.dev's
14
+ * "named mutex per key" pattern). When omitted, the trigger has no
15
+ * concurrency gate (zero-overhead default).
16
+ */
17
+ export const ConcurrencyOptsFields = {
18
+ concurrencyKey: z
19
+ .string()
20
+ .min(1)
21
+ .optional()
22
+ .describe("OPTIONAL. Per-key concurrency gating. Literal string or `$.<path>` proxy expression " +
23
+ "evaluated against the live ctx at run-entry time. When set, runs sharing the resolved " +
24
+ "key contend for at most `concurrencyLimit` concurrent slots. When unset, no gating applies."),
25
+ concurrencyLimit: z
26
+ .number()
27
+ .int()
28
+ .min(1)
29
+ .max(10000)
30
+ .optional()
31
+ .describe("OPTIONAL. Maximum concurrent runs for the resolved `concurrencyKey`. " +
32
+ "Defaults to 1 (matches Trigger.dev's named-mutex semantics). " +
33
+ "Ignored when `concurrencyKey` is unset. Bump for throughput-oriented use cases."),
34
+ concurrencyLeaseMs: z
35
+ .number()
36
+ .int()
37
+ .min(1000)
38
+ .optional()
39
+ .describe("OPTIONAL. Lease duration for the concurrency slot in milliseconds. " +
40
+ "Defaults to 3600000 (1h). Tunable per-trigger; process-wide override via " +
41
+ "`BLOK_CONCURRENCY_LEASE_MS`. Crash-safety upper bound on slot leaks."),
42
+ onLimit: z
43
+ .enum(["throw", "queue"])
44
+ .optional()
45
+ .describe("OPTIONAL. Behavior when the concurrency gate denies a run. " +
46
+ "`'throw'` (default): HTTP returns 429 + Retry-After / Worker NACKs with redelivery " +
47
+ "(transient resource state, doesn't count against the workflow's retry budget). " +
48
+ "`'queue'`: defer the run via the in-process scheduler and re-attempt acquisition " +
49
+ "after a 1s delay. Reuses the Tier 2 #5+#7 deferred-dispatch plumbing; HTTP returns " +
50
+ "202 Accepted + Location, Worker ACKs without retry. Requires `concurrencyKey` to be set."),
51
+ // PR 5 B2 — TTL on queued runs.
52
+ concurrencyQueueTimeoutMs: z
53
+ .number()
54
+ .int()
55
+ .min(1000)
56
+ .optional()
57
+ .describe('OPTIONAL. Time-to-live for queued runs in milliseconds. When set AND `onLimit: "queue"`, ' +
58
+ "queued runs that age past this timeout flip to `expired` instead of re-queueing. " +
59
+ 'Requires `onLimit: "queue"`. Without this, queued runs retry indefinitely (lease-bounded only).'),
60
+ // PR 5 B3 — capped exponential backoff for onLimit:queue re-defer.
61
+ concurrencyQueueRetry: z
62
+ .object({
63
+ minBackoffMs: z.number().int().min(0).optional().describe("Initial backoff (default 1000)."),
64
+ maxBackoffMs: z.number().int().min(0).optional().describe("Cap on the backoff between retries (default 30000)."),
65
+ factor: z.number().min(1).optional().describe("Exponential factor (default 2)."),
66
+ })
67
+ .optional()
68
+ .describe('OPTIONAL. Capped exponential backoff config for `onLimit: "queue"` re-defer. ' +
69
+ "Replaces the fixed 1s retry. delay = min(maxBackoffMs, minBackoffMs * factor^attempt). " +
70
+ 'Requires `onLimit: "queue"`.'),
71
+ };
72
+ /**
73
+ * Cross-field refinement: `concurrencyLimit` / `concurrencyLeaseMs` / `onLimit`
74
+ * set without `concurrencyKey` are meaningless and rejected at validation time.
75
+ */
76
+ export const concurrencyRefinement = (val, ctx) => {
77
+ if (val.concurrencyLimit !== undefined && val.concurrencyKey === undefined) {
78
+ ctx.addIssue({
79
+ code: z.ZodIssueCode.custom,
80
+ path: ["concurrencyLimit"],
81
+ message: "`concurrencyLimit` requires `concurrencyKey` to be set.",
82
+ });
83
+ }
84
+ if (val.concurrencyLeaseMs !== undefined && val.concurrencyKey === undefined) {
85
+ ctx.addIssue({
86
+ code: z.ZodIssueCode.custom,
87
+ path: ["concurrencyLeaseMs"],
88
+ message: "`concurrencyLeaseMs` requires `concurrencyKey` to be set.",
89
+ });
90
+ }
91
+ if (val.onLimit !== undefined && val.concurrencyKey === undefined) {
92
+ ctx.addIssue({
93
+ code: z.ZodIssueCode.custom,
94
+ path: ["onLimit"],
95
+ message: "`onLimit` requires `concurrencyKey` to be set.",
96
+ });
97
+ }
98
+ // PR 5 B2 — concurrencyQueueTimeoutMs requires onLimit: "queue".
99
+ if (val.concurrencyQueueTimeoutMs !== undefined && val.onLimit !== "queue") {
100
+ ctx.addIssue({
101
+ code: z.ZodIssueCode.custom,
102
+ path: ["concurrencyQueueTimeoutMs"],
103
+ message: '`concurrencyQueueTimeoutMs` requires `onLimit: "queue"`.',
104
+ });
105
+ }
106
+ // PR 5 B3 — concurrencyQueueRetry requires onLimit: "queue".
107
+ if (val.concurrencyQueueRetry !== undefined && val.onLimit !== "queue") {
108
+ ctx.addIssue({
109
+ code: z.ZodIssueCode.custom,
110
+ path: ["concurrencyQueueRetry"],
111
+ message: '`concurrencyQueueRetry` requires `onLimit: "queue"`.',
112
+ });
113
+ }
114
+ };
115
+ /**
116
+ * Standalone schema exposing just the concurrency fields. Useful for tests
117
+ * and tools that want to validate concurrency config in isolation. Real
118
+ * triggers spread {@link ConcurrencyOptsFields} into their own `z.object`
119
+ * and apply {@link concurrencyRefinement}.
120
+ */
121
+ export const ConcurrencyOptsSchema = z.object(ConcurrencyOptsFields).superRefine(concurrencyRefinement);
122
+ // =============================================================================
123
+ // Scheduling: delay / TTL / debounce (Tier 2 #5 + #7) — shared across HTTP & Worker
124
+ // =============================================================================
125
+ /**
126
+ * Duration value: a non-negative integer (interpreted as milliseconds) or a
127
+ * single-unit string (`"500ms"`, `"30s"`, `"5m"`, `"2h"`, `"1d"`). Validated
128
+ * at trigger-config parse time; converted to milliseconds at run-entry time
129
+ * by `parseDuration` (`@blokjs/helper`).
130
+ */
131
+ /**
132
+ * Duration value: a non-negative integer (ms) or a single-unit string
133
+ * (`"500ms"`, `"30s"`, `"5m"`, `"2h"`, `"1d"`). Reused by Tier 2 #5+#7
134
+ * scheduling fields and the Tier 2 quick-wins `maxDuration` step field.
135
+ */
136
+ export const DurationSchema = z.union([
137
+ z.number().int().min(0),
138
+ z
139
+ .string()
140
+ .min(1)
141
+ .regex(/^\d+(ms|s|m|h|d)$/, {
142
+ message: 'Duration must be a non-negative integer + unit (ms|s|m|h|d), e.g. "500ms", "30s", "5m", "2h", "1d".',
143
+ }),
144
+ ]);
145
+ /**
146
+ * Per-key debounce configuration. When set, repeated triggers sharing the
147
+ * resolved `key` collapse into one delayed run. Modes:
148
+ *
149
+ * - `"trailing"` (default): each ping resets a `delay` ms timer; the run
150
+ * fires after `delay` ms of silence. `maxDelay` (when set) bounds the
151
+ * tail latency — even with continuous pings, the run fires after
152
+ * `maxDelay` ms.
153
+ * - `"leading"`: the first ping in a window fires immediately; subsequent
154
+ * pings within `delay` ms are dropped with status `"debounced"`. Window
155
+ * resets when `delay` ms of silence pass.
156
+ *
157
+ * `key` is a literal string OR a `js/...` expression that evaluates to a
158
+ * string at run-entry time (typically derived from the request payload via
159
+ * a `$`-proxy expression like `$.req.body.userId`).
160
+ */
161
+ export const DebounceOptsSchema = z
162
+ .object({
163
+ key: z
164
+ .string()
165
+ .min(1)
166
+ .describe("Debounce key — literal string or `js/ctx.<path>` expression. Pings sharing the resolved key collapse."),
167
+ mode: z
168
+ .enum(["leading", "trailing"])
169
+ .default("trailing")
170
+ .describe("`trailing` (default) waits for `delay` ms of silence then fires once with the latest payload. " +
171
+ "`leading` fires immediately and suppresses follow-ups within `delay` ms."),
172
+ delay: DurationSchema.describe("Debounce window. Number (ms) or string (`500ms`, `5s`, `2m`, etc.)."),
173
+ maxDelay: DurationSchema.optional().describe("OPTIONAL. Force a fire after this many ms even if pings keep coming. Bounds tail latency. " +
174
+ "Must be >= `delay` when set. Ignored in leading mode."),
175
+ })
176
+ .refine((d) => {
177
+ if (d.maxDelay === undefined)
178
+ return true;
179
+ // Accept any combo at the schema layer — duration normalization happens at runtime.
180
+ // We DO check the easy numeric case to catch obvious typos early.
181
+ if (typeof d.delay === "number" && typeof d.maxDelay === "number") {
182
+ return d.maxDelay >= d.delay;
183
+ }
184
+ return true;
185
+ }, {
186
+ message: "`debounce.maxDelay` must be >= `debounce.delay`.",
187
+ path: ["maxDelay"],
188
+ });
189
+ /**
190
+ * Reusable Zod field bag for run-scheduling primitives. Spread into a
191
+ * trigger's `z.object({...})` and pair with {@link schedulingRefinement}
192
+ * for cross-field validation.
193
+ *
194
+ * - `delay`: defer the run by N (number = ms, or string = `"1h"`).
195
+ * - `ttl`: expire if not started within N. Auto-cancels with status
196
+ * `"expired"`.
197
+ * - `debounce`: coalesce rapid same-key triggers into one delayed run.
198
+ *
199
+ * Zero-overhead default: when none of these fields are set, the trigger
200
+ * behaves exactly as before.
201
+ */
202
+ export const SchedulingOptsFields = {
203
+ delay: DurationSchema.optional().describe("OPTIONAL. Defer the run by this many ms (number) or duration string (`'1h'`, `'30m'`, `'500ms'`). " +
204
+ "When set, the trigger schedules the run for later and returns immediately. " +
205
+ "HTTP returns 202 Accepted with `Location: /__blok/runs/:id`. Worker forwards to the adapter's native delay."),
206
+ ttl: DurationSchema.optional().describe("OPTIONAL. Expire if not started within this many ms (number) or duration string. " +
207
+ "At dispatch time, runs older than `ttl` are skipped with status `'expired'`. " +
208
+ "For HTTP, `ttl` requires `delay` to be set (otherwise immediate-dispatch makes TTL meaningless). " +
209
+ "For Worker, `ttl` is independent of `delay` (queue-time TTL applies regardless)."),
210
+ debounce: DebounceOptsSchema.optional().describe("OPTIONAL. Per-key trigger coalescing. See `DebounceOptsSchema` for modes and timing semantics."),
211
+ };
212
+ /**
213
+ * Cross-field refinement for scheduling fields. Per-trigger callers pass
214
+ * the trigger kind so HTTP and Worker can have different rules for
215
+ * `ttl`-without-`delay`.
216
+ */
217
+ export function makeSchedulingRefinement(triggerKind) {
218
+ return (val, ctx) => {
219
+ // HTTP: TTL without delay is meaningless (the request is dispatched
220
+ // immediately). Worker: TTL alone is the queue-time TTL — allowed.
221
+ if (triggerKind === "http" && val.ttl !== undefined && val.delay === undefined) {
222
+ ctx.addIssue({
223
+ code: z.ZodIssueCode.custom,
224
+ path: ["ttl"],
225
+ message: "HTTP `ttl` requires `delay` to be set (TTL is only meaningful for deferred runs).",
226
+ });
227
+ }
228
+ };
229
+ }
230
+ /**
231
+ * Standalone schema exposing just the scheduling fields. Useful for tests
232
+ * and tools. Real triggers spread {@link SchedulingOptsFields} into their
233
+ * own `z.object` and apply {@link makeSchedulingRefinement}.
234
+ */
235
+ export const SchedulingOptsSchema = z.object(SchedulingOptsFields);
236
+ // =============================================================================
237
+ // HTTP Trigger
238
+ // =============================================================================
239
+ /**
240
+ * Canonical HTTP method names.
241
+ *
242
+ * `"ANY"` is the wildcard. Legacy `"*"` (used by old JSON workflows) is
243
+ * accepted via {@link HttpMethodSchema} preprocessing and normalized to
244
+ * `"ANY"` with a one-time deprecation warning.
245
+ */
246
+ export const HTTP_METHODS = ["GET", "POST", "PUT", "DELETE", "PATCH", "HEAD", "OPTIONS", "ANY"];
247
+ let _wildcardWarned = false;
248
+ function warnWildcardOnce() {
249
+ if (_wildcardWarned)
250
+ return;
251
+ _wildcardWarned = true;
252
+ console.warn('[blok] trigger.http.method "*" is deprecated; use "ANY" instead. ' +
253
+ "Run `blokctl migrate workflows` to update your workflows.");
254
+ }
255
+ /**
256
+ * HTTP method schema — accepts the canonical names plus the legacy `"*"`
257
+ * (which is preprocessed to `"ANY"` with a one-time warning).
258
+ */
259
+ export const HttpMethodSchema = z.preprocess((val) => {
260
+ if (val === "*") {
261
+ warnWildcardOnce();
262
+ return "ANY";
263
+ }
264
+ return val;
265
+ }, z.enum(HTTP_METHODS));
266
+ /** Validation schema for the HTTP trigger configuration. */
267
+ export const HttpTriggerOptsSchema = z
268
+ .object({
269
+ method: HttpMethodSchema.describe("HTTP method this workflow responds to. " +
270
+ "Use 'ANY' to match all methods. The legacy '*' is accepted for back-compat but warns."),
271
+ path: z
272
+ .string()
273
+ .optional()
274
+ .describe("OPTIONAL. When set, this is the FULL URL path (e.g. '/api/users/:id'). " +
275
+ "When omitted, the URL is derived from the workflow file's location " +
276
+ "under the workflows root. Examples: " +
277
+ "workflows/users/list.ts → /users/list; " +
278
+ "workflows/users/[id].ts → /users/:id; " +
279
+ "workflows/users/index.ts → /users."),
280
+ accept: z
281
+ .string()
282
+ .default("application/json")
283
+ .describe("Default response Content-Type when the workflow doesn't set one explicitly."),
284
+ headers: z
285
+ .record(z.string(), z.any())
286
+ .optional()
287
+ .describe("Required headers for incoming requests (validated at trigger entry)."),
288
+ legacyKeyPrefix: z
289
+ .boolean()
290
+ .optional()
291
+ .describe("Opt-in back-compat for the v1 URL scheme `/<workflow-key>/<path>`. " +
292
+ "When true, the workflow is also reachable at the legacy filename-prefixed URL. " +
293
+ "Off (undefined / false) by default. Will be removed after one minor version."),
294
+ middleware: z
295
+ .array(z.string().min(1))
296
+ .optional()
297
+ .describe("v0.5 — ordered list of middleware workflow names. Each entry is the `name` of a " +
298
+ "workflow with `middleware: true` registered in the same WORKFLOWS_PATH. Middleware " +
299
+ "runs in declared order BEFORE the main workflow's steps, on the SAME ctx so " +
300
+ "state mutations (e.g. `ctx.state.identity` from auth-check) carry forward. " +
301
+ "A middleware can short-circuit by setting `ctx.response` and using a step with " +
302
+ "`stop: true` — the rest of the chain AND the main workflow are skipped, and the " +
303
+ "current ctx.response is returned to the caller. Errors thrown inside middleware " +
304
+ "propagate to the trigger's normal error handler."),
305
+ examples: z
306
+ .object({
307
+ body: z.unknown().describe("Sample request body that Studio uses in the empty-state curl example."),
308
+ })
309
+ .partial()
310
+ .optional()
311
+ .describe("OPTIONAL. Author-declared examples for Studio. Today only `body` is consumed " +
312
+ "(the empty-state curl snippet). Wins over recorded + inferred bodies. Mirror in " +
313
+ 'JSON workflows: `"trigger": { "http": { "examples": { "body": { ... } } } }`.'),
314
+ recordSample: z
315
+ .boolean()
316
+ .optional()
317
+ .describe("OPTIONAL. v0.6 — when `true`, the trigger captures the request body of the FIRST " +
318
+ "successful run and persists it as the sample body Studio shows in the empty-state " +
319
+ "curl. Subsequent runs are no-ops (one row per workflow). Off by default for " +
320
+ "privacy reasons — request bodies frequently contain PII or auth payloads. Author- " +
321
+ "declared `examples.body` always wins over a recorded sample."),
322
+ ...ConcurrencyOptsFields,
323
+ ...SchedulingOptsFields,
324
+ })
325
+ .superRefine((val, ctx) => {
326
+ concurrencyRefinement(val, ctx);
327
+ makeSchedulingRefinement("http")(val, ctx);
8
328
  });
9
- // Legacy alias for backward compatibility
329
+ /**
330
+ * Legacy alias for {@link HttpTriggerOptsSchema}. Prefer the explicit name.
331
+ *
332
+ * @deprecated Use {@link HttpTriggerOptsSchema} directly. Will be removed in
333
+ * the next minor version.
334
+ */
10
335
  export const TriggerOptsSchema = HttpTriggerOptsSchema;
11
- // Queue Trigger Options (Kafka, RabbitMQ, SQS, Redis)
12
- export const QueueProviderSchema = z.enum(["kafka", "rabbitmq", "sqs", "redis", "beanstalk"]);
336
+ // =============================================================================
337
+ // Queue Trigger (Kafka, RabbitMQ, SQS, Redis, NATS, Beanstalk)
338
+ // =============================================================================
339
+ export const QueueProviderSchema = z.enum(["kafka", "rabbitmq", "sqs", "redis", "beanstalk", "nats"]);
13
340
  export const QueueTriggerOptsSchema = z.object({
14
341
  provider: QueueProviderSchema,
15
342
  topic: z.string().describe("Topic or queue name to consume from"),
@@ -22,43 +349,235 @@ export const QueueTriggerOptsSchema = z.object({
22
349
  batchSize: z.number().default(1).describe("Number of messages to process in batch"),
23
350
  concurrency: z.number().default(1).describe("Number of concurrent consumers"),
24
351
  });
25
- // Pub/Sub Trigger Options (GCP Pub/Sub, AWS SNS, Azure Service Bus)
26
- export const PubSubProviderSchema = z.enum(["gcp", "aws", "azure"]);
352
+ // =============================================================================
353
+ // Pub/Sub Trigger (GCP Pub/Sub, AWS SNS/SQS, Azure Service Bus)
354
+ // =============================================================================
355
+ /**
356
+ * v0.7 PR 6 — supported pub/sub adapter providers.
357
+ *
358
+ * Pub/Sub is distinct from Worker: pub/sub is **1:N fan-out by default**
359
+ * (every subscriber sees every message). When `consumerGroup` is set,
360
+ * semantics shift to **competing-consumer** (1 of N within group). This
361
+ * single field disambiguates the two semantics — matching NestJS's
362
+ * `@MessagePattern` vs `@EventPattern` distinction.
363
+ *
364
+ * `nats` and `redis-streams` and `kafka` appear in both Worker and
365
+ * Pub/Sub provider lists because the underlying brokers support both
366
+ * semantics — the same broker connection serves both adapter kinds
367
+ * via different code paths.
368
+ *
369
+ * `BLOK_PUBSUB_ADAPTER` env var sets the default when `provider` is
370
+ * omitted on the workflow.
371
+ */
372
+ export const PubSubProviderSchema = z.enum(["nats", "redis-streams", "kafka", "gcp", "aws", "azure"]);
27
373
  export const PubSubTriggerOptsSchema = z.object({
28
- provider: PubSubProviderSchema,
29
- topic: z.string().describe("Topic name to subscribe to"),
374
+ provider: PubSubProviderSchema.optional().describe("v0.7 — selects the broker adapter. Defaults to `BLOK_PUBSUB_ADAPTER` env var. Each adapter except in-memory requires its broker client as a peer dep."),
375
+ topic: z
376
+ .string()
377
+ .describe("Topic / subject / stream name. Supports broker-native wildcards (NATS subject hierarchy `orders.*.created`, Redis Streams pattern, etc.)."),
30
378
  subscription: z
31
379
  .string()
32
- .describe("Subscription name (GCP) or SQS queue URL (AWS) or Service Bus subscription (Azure)"),
380
+ .optional()
381
+ .describe("Subscription identifier. Required for GCP (subscription name), AWS (SQS queue URL bound to SNS topic), Azure (subscription name). Optional for NATS / Redis Streams / Kafka — derived from `consumerGroup` when absent."),
382
+ consumerGroup: z
383
+ .string()
384
+ .optional()
385
+ .describe("v0.7 — when set, the subscriber joins a competing-consumer group (1 of N gets each message). When unset, the subscriber fans out (every subscriber sees every message). Mirrors NestJS @MessagePattern (group) vs @EventPattern (fan-out)."),
386
+ durable: z
387
+ .boolean()
388
+ .optional()
389
+ .describe("v0.7 — when true, the subscription survives broker restarts. Maps to NATS JetStream durable, Kafka committed offsets, Redis Streams persisted groups. Default behavior is provider-specific (NATS Core: false; GCP/AWS/Azure: true)."),
390
+ startFrom: z
391
+ .union([
392
+ z.literal("earliest"),
393
+ z.literal("latest"),
394
+ z.object({ seq: z.number().int().nonnegative() }),
395
+ z.object({ timestamp: z.number().int().nonnegative() }),
396
+ ])
397
+ .optional()
398
+ .describe('v0.7 — replay position for the subscription. `"earliest"` replays the broker\'s entire retained history; `"latest"` (default) only delivers new messages; `{seq}` and `{timestamp}` resume from a specific cursor.'),
33
399
  ack: z.boolean().default(true).describe("Whether to acknowledge messages after processing"),
34
400
  maxMessages: z.number().default(10).describe("Maximum messages to receive at once"),
35
401
  ackDeadline: z.number().default(30).describe("Acknowledgment deadline in seconds"),
36
402
  deadLetterTopic: z.string().optional().describe("Dead letter topic for failed messages"),
37
- filter: z.string().optional().describe("Message filter expression"),
403
+ filter: z.string().optional().describe("Message filter expression (provider-specific)"),
38
404
  });
39
- // Worker Trigger Options (background jobs)
40
- export const WorkerTriggerOptsSchema = z.object({
41
- queue: z.string().describe("Worker queue name"),
42
- concurrency: z.number().default(1).describe("Number of concurrent workers"),
405
+ // =============================================================================
406
+ // Worker Trigger (background jobs)
407
+ // =============================================================================
408
+ /**
409
+ * v0.7 — supported worker adapter providers. `BLOK_WORKER_ADAPTER`
410
+ * env var sets the default when `provider` is omitted on the workflow
411
+ * (per Q7 resolution). `in-memory` is the dev/test fallback.
412
+ *
413
+ * `nats` and `bullmq` shipped pre-v0.7. The other five (kafka,
414
+ * rabbitmq, sqs, redis, pg-boss) are new in PR 5 — each ships behind a
415
+ * peer-dependency so workflows that don't use that provider don't
416
+ * pay the install cost.
417
+ */
418
+ export const WorkerProviderSchema = z.enum([
419
+ "in-memory",
420
+ "nats",
421
+ "bullmq",
422
+ "kafka",
423
+ "rabbitmq",
424
+ "sqs",
425
+ "redis",
426
+ "pg-boss",
427
+ ]);
428
+ export const WorkerTriggerOptsSchema = z
429
+ .object({
430
+ queue: z.string().describe("Worker queue / topic / stream name (provider-specific semantics)."),
431
+ provider: WorkerProviderSchema.optional().describe("v0.7 — selects the broker adapter at runtime. Defaults to `BLOK_WORKER_ADAPTER` env var or `in-memory` when neither is set. Each adapter except `in-memory` requires its broker client library as a peer dependency (bullmq / kafkajs / amqplib / @aws-sdk/client-sqs / ioredis / pg-boss / nats)."),
432
+ concurrency: z
433
+ .number()
434
+ .default(1)
435
+ .describe("Number of concurrent consumers (parallelism cap). Orthogonal to `concurrencyKey` — " +
436
+ "`concurrency` is the consumer count; `concurrencyKey` is per-key fairness within those consumers."),
43
437
  timeout: z.number().optional().describe("Job timeout in milliseconds"),
44
438
  retries: z.number().default(3).describe("Number of retry attempts"),
45
439
  priority: z.number().default(0).describe("Job priority (higher = more priority)"),
46
- delay: z.number().optional().describe("Delay before processing in milliseconds"),
440
+ consumerGroup: z
441
+ .string()
442
+ .optional()
443
+ .describe("Provider-specific consumer-group / subscriber identifier. Required for Kafka, optional but recommended for NATS JetStream / Redis Streams. Lets multiple processes share the work for a given queue/topic."),
444
+ ack: z
445
+ .boolean()
446
+ .optional()
447
+ .describe("v0.7 — when true (default per provider), the adapter ACKs messages only after successful workflow run. When false, messages are auto-ACKed on receive (at-most-once delivery)."),
448
+ deadLetterQueue: z
449
+ .string()
450
+ .optional()
451
+ .describe("v0.7 — destination queue/topic for messages that exhausted retries. Empty disables dead-letter routing."),
452
+ fromBeginning: z
453
+ .boolean()
454
+ .optional()
455
+ .describe("v0.7 — Kafka-specific: when true, consumer starts from earliest offset; false (default) starts from latest committed."),
456
+ // `delay` (and `ttl`, `debounce`) live in SchedulingOptsFields below.
457
+ // The legacy number-only `delay` (pre-Tier-2-#5+#7) is superseded by
458
+ // the duration-or-number SchedulingOptsFields.delay. Number values
459
+ // remain accepted for back-compat — only the type widens.
460
+ ...ConcurrencyOptsFields,
461
+ ...SchedulingOptsFields,
462
+ })
463
+ .superRefine((val, ctx) => {
464
+ concurrencyRefinement(val, ctx);
465
+ makeSchedulingRefinement("worker")(val, ctx);
47
466
  });
48
- // Cron Trigger Options (scheduled workflows)
467
+ // =============================================================================
468
+ // Cron Trigger (scheduled workflows)
469
+ // =============================================================================
49
470
  export const CronTriggerOptsSchema = z.object({
50
471
  schedule: z.string().describe("Cron expression (e.g., '0 * * * *' for hourly)"),
51
472
  timezone: z.string().default("UTC").describe("Timezone for schedule evaluation"),
52
473
  overlap: z.boolean().default(false).describe("Allow overlapping executions"),
53
474
  });
54
- // Webhook Trigger Options (external service events)
475
+ // =============================================================================
476
+ // Webhook Trigger (external service events)
477
+ // =============================================================================
478
+ /**
479
+ * v0.7 — custom signature scheme for unknown webhook providers.
480
+ *
481
+ * Authors who use a webhook source NOT in the built-in `provider`
482
+ * allowlist (github / stripe / slack / shopify / svix) supply this
483
+ * config; the trigger does HMAC verification using these fields.
484
+ *
485
+ * The `format` template names the layout of the signature payload —
486
+ * `{hex}` is substituted with the hex digest at verify time. Supports
487
+ * common shapes:
488
+ * - GitHub-style: `sha256={hex}`
489
+ * - Plain hex: `{hex}`
490
+ * - Base64: `{base64}` (encoded variant)
491
+ *
492
+ * When `timestampHeader` is set, the verifier mixes the header value
493
+ * into the signed string as `{timestamp}.{rawBody}` (Stripe pattern)
494
+ * and rejects deliveries whose timestamp drifted by more than
495
+ * `tolerance` seconds (default 300s / 5min).
496
+ */
497
+ export const WebhookCustomSignatureSchema = z.object({
498
+ scheme: z
499
+ .enum(["hmac-sha256", "hmac-sha1", "hmac-sha512"])
500
+ .default("hmac-sha256")
501
+ .describe("HMAC algorithm. SHA-256 is the de facto standard for webhooks."),
502
+ header: z.string().describe("Header name carrying the signature value (e.g. 'X-Acme-Signature')."),
503
+ format: z
504
+ .string()
505
+ .default("{hex}")
506
+ .describe("Layout of the signature payload. `{hex}` and `{base64}` placeholders are substituted with the digest."),
507
+ secretEnv: z.string().describe("Env-var name holding the shared secret. Never inline the secret value."),
508
+ tolerance: z
509
+ .number()
510
+ .int()
511
+ .positive()
512
+ .default(300)
513
+ .describe("Allowed clock skew between webhook origin and this server, in seconds. Used with timestampHeader."),
514
+ timestampHeader: z
515
+ .string()
516
+ .optional()
517
+ .describe("Header name carrying the request timestamp. When set, signature payload becomes `{timestamp}.{rawBody}` and the timestamp is range-checked against `tolerance`."),
518
+ });
519
+ /**
520
+ * v0.7 webhook trigger config. See the v0.7 additional-triggers plan
521
+ * for the full design.
522
+ *
523
+ * **`provider`** selects a built-in verifier (GitHub, Stripe, Slack,
524
+ * Shopify, Standard Webhooks via Svix). For unknown providers, omit
525
+ * `provider` and supply `signature` instead.
526
+ *
527
+ * **`namespace`** (combined with a polymorphic `subworkflow` step in
528
+ * the workflow body) lets one trigger workflow dispatch to many
529
+ * per-event handler workflows by name. Example: webhook fires with
530
+ * `body.type === "invoice.paid"` and namespace `"stripe"` resolves
531
+ * to sub-workflow `"stripe.invoice.paid"`.
532
+ *
533
+ * **`secretEnv`** is the env var name to read the shared secret from
534
+ * — the verifier never sees the secret value at config time, and
535
+ * workflow JSON files never contain it.
536
+ *
537
+ * **`events`** (allowlist) skips verifier-validated deliveries that
538
+ * are NOT in the list — returns 200 with `{status: "ignored"}` so
539
+ * the sender doesn't retry. Absent allowlist = accept all events.
540
+ */
55
541
  export const WebhookTriggerOptsSchema = z.object({
56
- source: z.string().describe("Source service (github, stripe, shopify, etc.)"),
57
- events: z.array(z.string()).describe("Event types to listen for"),
58
- secret: z.string().optional().describe("Webhook secret for verification"),
59
- path: z.string().optional().describe("Custom webhook path"),
542
+ provider: z
543
+ .enum(["github", "stripe", "slack", "shopify", "svix"])
544
+ .optional()
545
+ .describe("Built-in webhook provider. When set, the trigger uses the provider's signature scheme + event-id field automatically; `signature` is ignored. For unknown providers, omit this and supply `signature`."),
546
+ path: z
547
+ .string()
548
+ .optional()
549
+ .describe("HTTP path the trigger mounts the POST handler on. Defaults to `/webhooks/{provider}` when `provider` is set."),
550
+ events: z
551
+ .array(z.string())
552
+ .optional()
553
+ .describe("Optional allowlist of event types. Deliveries whose event type isn't in the list return 200 (no retry) but don't run the workflow. Provider-specific event identifiers — GitHub: X-GitHub-Event header; Stripe/Svix: body.type field."),
554
+ secretEnv: z
555
+ .string()
556
+ .optional()
557
+ .describe("Env-var name holding the shared secret. Required for `provider` (built-in) or when `signature.secretEnv` is unset. Never inline the secret value."),
558
+ signature: WebhookCustomSignatureSchema.optional().describe("Custom signature scheme for unknown providers. Ignored when `provider` is set."),
559
+ tolerance: z
560
+ .number()
561
+ .int()
562
+ .positive()
563
+ .optional()
564
+ .describe("Replay-tolerance window in seconds — Stripe/Svix/custom-with-timestampHeader only. Default 300s. Used to range-check the signed timestamp."),
565
+ idempotencyKey: z
566
+ .string()
567
+ .optional()
568
+ .describe("Replay-protection cache key — typically a mapper expression resolving to the provider's event id (Stripe's `body.id`, GitHub's `X-GitHub-Delivery`, Svix's `webhook-id`). When set, the verified event is recorded in the idempotency cache; subsequent deliveries with the same key return 200 with `{status: 'duplicate', eventId}` and don't run the workflow."),
569
+ namespace: z
570
+ .string()
571
+ .optional()
572
+ .describe("Optional prefix prepended to polymorphic sub-workflow names. Example: `namespace: 'stripe'` + `subworkflow: '$.req.body.type'` resolving to `'invoice.paid'` looks up `'stripe.invoice.paid'`."),
573
+ middleware: z
574
+ .array(z.string())
575
+ .optional()
576
+ .describe("Trigger-level middleware chain (runs after workflow-level middleware, before workflow body)."),
60
577
  });
61
- // WebSocket Trigger Options (real-time bidirectional)
578
+ // =============================================================================
579
+ // WebSocket Trigger (real-time bidirectional)
580
+ // =============================================================================
62
581
  export const WebSocketTriggerOptsSchema = z.object({
63
582
  events: z.array(z.string()).default(["*"]).describe("Event names to listen for (supports wildcards)"),
64
583
  rooms: z.array(z.string()).optional().describe("Room/channel filters"),
@@ -67,7 +586,9 @@ export const WebSocketTriggerOptsSchema = z.object({
67
586
  heartbeatInterval: z.number().default(30000).describe("Heartbeat interval in milliseconds"),
68
587
  messageRateLimit: z.number().default(100).describe("Max messages per second per client"),
69
588
  });
70
- // SSE Trigger Options (Server-Sent Events)
589
+ // =============================================================================
590
+ // SSE Trigger (Server-Sent Events)
591
+ // =============================================================================
71
592
  export const SSETriggerOptsSchema = z.object({
72
593
  events: z.array(z.string()).default(["*"]).describe("Event names to emit"),
73
594
  channels: z.array(z.string()).optional().describe("Channel filters"),
@@ -76,7 +597,10 @@ export const SSETriggerOptsSchema = z.object({
76
597
  heartbeatInterval: z.number().default(30000).describe("Heartbeat interval in milliseconds"),
77
598
  retryInterval: z.number().default(3000).describe("Client retry interval in milliseconds"),
78
599
  });
79
- // All trigger types
600
+ // =============================================================================
601
+ // Trigger registry (the dispatch table)
602
+ // =============================================================================
603
+ /** All trigger names supported by Blok. */
80
604
  export const TriggersSchema = z.enum([
81
605
  "http",
82
606
  "grpc",
@@ -89,4 +613,52 @@ export const TriggersSchema = z.enum([
89
613
  "sse",
90
614
  "websocket",
91
615
  ]);
616
+ /**
617
+ * Map of trigger name → validation schema. `null` means the trigger does not
618
+ * have a required configuration shape (currently `grpc` and `manual`).
619
+ *
620
+ * Single source of truth for runtime trigger-config validation. Used by
621
+ * {@link validateTriggerConfig}.
622
+ */
623
+ export const TRIGGER_SCHEMAS = {
624
+ http: HttpTriggerOptsSchema,
625
+ queue: QueueTriggerOptsSchema,
626
+ pubsub: PubSubTriggerOptsSchema,
627
+ worker: WorkerTriggerOptsSchema,
628
+ cron: CronTriggerOptsSchema,
629
+ webhook: WebhookTriggerOptsSchema,
630
+ sse: SSETriggerOptsSchema,
631
+ websocket: WebSocketTriggerOptsSchema,
632
+ grpc: null,
633
+ manual: null,
634
+ };
635
+ /**
636
+ * Validate a trigger configuration against the schema for the given trigger
637
+ * kind. When the trigger has a schema, returns the parsed config (with
638
+ * defaults applied). When the trigger has no schema (`grpc`, `manual`),
639
+ * returns the input config (or an empty object when `undefined`).
640
+ *
641
+ * Throws when the trigger requires a schema and the config is missing or
642
+ * invalid. The thrown error is either a `ZodError` (for shape violations) or
643
+ * a regular `Error` (when config is missing entirely).
644
+ *
645
+ * @example
646
+ * const cfg = validateTriggerConfig("cron", { schedule: "0 * * * *" });
647
+ * // cfg.timezone === "UTC" (default applied)
648
+ *
649
+ * @example
650
+ * validateTriggerConfig("cron", undefined);
651
+ * // throws: 'Trigger "cron" requires a configuration object.'
652
+ */
653
+ export function validateTriggerConfig(name, config) {
654
+ const schema = TRIGGER_SCHEMAS[name];
655
+ if (schema === null) {
656
+ // Triggers with no schema accept anything (including undefined).
657
+ return config ?? {};
658
+ }
659
+ if (config === undefined) {
660
+ throw new Error(`Trigger "${name}" requires a configuration object. See ${name.charAt(0).toUpperCase()}${name.slice(1)}TriggerOpts.`);
661
+ }
662
+ return schema.parse(config);
663
+ }
92
664
  //# sourceMappingURL=TriggerOpts.js.map