@uploadista/core 0.2.0 → 1.0.0-beta.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (112) hide show
  1. package/dist/{checksum-BjP9nb5b.mjs → checksum-BRjFmTRk.mjs} +2 -2
  2. package/dist/{checksum-BjP9nb5b.mjs.map → checksum-BRjFmTRk.mjs.map} +1 -1
  3. package/dist/{checksum-B7RDiO7V.cjs → checksum-BrjQ8GJL.cjs} +1 -1
  4. package/dist/errors/index.cjs +1 -1
  5. package/dist/errors/index.d.cts +1 -1
  6. package/dist/errors/index.d.mts +1 -1
  7. package/dist/errors/index.mjs +1 -1
  8. package/dist/flow/index.cjs +1 -1
  9. package/dist/flow/index.d.cts +3 -2
  10. package/dist/flow/index.d.mts +8 -5
  11. package/dist/flow/index.mjs +1 -1
  12. package/dist/generate-id-BAMRQzMr.d.cts +34 -0
  13. package/dist/generate-id-BAMRQzMr.d.cts.map +1 -0
  14. package/dist/generate-id-DuZwLm4m.d.mts +34 -0
  15. package/dist/generate-id-DuZwLm4m.d.mts.map +1 -0
  16. package/dist/index.cjs +1 -1
  17. package/dist/index.d.cts +8 -5
  18. package/dist/index.d.mts +8 -5
  19. package/dist/index.mjs +1 -1
  20. package/dist/middleware-BghazxzH.d.cts +4129 -0
  21. package/dist/middleware-BghazxzH.d.cts.map +1 -0
  22. package/dist/middleware-CYizzAhP.d.mts +4129 -0
  23. package/dist/middleware-CYizzAhP.d.mts.map +1 -0
  24. package/dist/resolve-upload-metadata-CYl2PHIs.d.mts +4542 -0
  25. package/dist/resolve-upload-metadata-CYl2PHIs.d.mts.map +1 -0
  26. package/dist/resolve-upload-metadata-D0qFuyWc.d.cts +4542 -0
  27. package/dist/resolve-upload-metadata-D0qFuyWc.d.cts.map +1 -0
  28. package/dist/run-args-CM14Vtzu.cjs +1 -0
  29. package/dist/run-args-DSKHoSWs.mjs +2 -0
  30. package/dist/run-args-DSKHoSWs.mjs.map +1 -0
  31. package/dist/{stream-limiter-BCFULdAM.d.cts → stream-limiter-7wkBVLWT.d.mts} +2 -2
  32. package/dist/{stream-limiter-BCFULdAM.d.cts.map → stream-limiter-7wkBVLWT.d.mts.map} +1 -1
  33. package/dist/{stream-limiter-DZ22uIqf.cjs → stream-limiter-B-Y0DTgA.cjs} +1 -1
  34. package/dist/{stream-limiter-CTJPEJqE.mjs → stream-limiter-CvDuNIyd.mjs} +2 -2
  35. package/dist/{stream-limiter-CTJPEJqE.mjs.map → stream-limiter-CvDuNIyd.mjs.map} +1 -1
  36. package/dist/{stream-limiter-Bi7OTbRp.d.mts → stream-limiter-D1KC-6pK.d.cts} +2 -2
  37. package/dist/{stream-limiter-Bi7OTbRp.d.mts.map → stream-limiter-D1KC-6pK.d.cts.map} +1 -1
  38. package/dist/streams/index.cjs +1 -1
  39. package/dist/streams/index.d.cts +1 -1
  40. package/dist/streams/index.d.mts +2 -2
  41. package/dist/streams/index.mjs +1 -1
  42. package/dist/testing/index.cjs +1 -1
  43. package/dist/testing/index.d.cts +2 -1
  44. package/dist/testing/index.d.cts.map +1 -1
  45. package/dist/testing/index.d.mts +7 -4
  46. package/dist/testing/index.d.mts.map +1 -1
  47. package/dist/testing/index.mjs +1 -1
  48. package/dist/{throttle-Da0OA8JT.d.cts → throttle-3FRcr7MU.d.mts} +4 -34
  49. package/dist/throttle-3FRcr7MU.d.mts.map +1 -0
  50. package/dist/{throttle-ibiT6E4U.d.mts → throttle-BlH27EGu.d.cts} +4 -34
  51. package/dist/throttle-BlH27EGu.d.cts.map +1 -0
  52. package/dist/{throttle-KnkRgZPi.cjs → throttle-Dp59f37i.cjs} +1 -1
  53. package/dist/{throttle-CnDa3v1k.mjs → throttle-TFY-V41R.mjs} +2 -2
  54. package/dist/{throttle-CnDa3v1k.mjs.map → throttle-TFY-V41R.mjs.map} +1 -1
  55. package/dist/types/index.cjs +1 -1
  56. package/dist/types/index.d.cts +2 -2
  57. package/dist/types/index.d.mts +3 -5
  58. package/dist/types/index.mjs +1 -1
  59. package/dist/upload/index.cjs +1 -1
  60. package/dist/upload/index.d.cts +1 -1
  61. package/dist/upload/index.d.mts +4 -4
  62. package/dist/upload/index.mjs +1 -1
  63. package/dist/upload-strategy-negotiator-0-dpNIce.d.cts +455 -0
  64. package/dist/upload-strategy-negotiator-0-dpNIce.d.cts.map +1 -0
  65. package/dist/upload-strategy-negotiator-BR_o1Ez8.cjs +1 -0
  66. package/dist/upload-strategy-negotiator-C9MeoOnW.mjs +2 -0
  67. package/dist/upload-strategy-negotiator-C9MeoOnW.mjs.map +1 -0
  68. package/dist/upload-strategy-negotiator-CEnlfVgJ.d.mts +455 -0
  69. package/dist/upload-strategy-negotiator-CEnlfVgJ.d.mts.map +1 -0
  70. package/dist/{uploadista-error-B-geDgi8.cjs → uploadista-error-CZx1JU_L.cjs} +3 -1
  71. package/dist/{uploadista-error-Fsfvr2Bb.mjs → uploadista-error-DQ7V1FlX.mjs} +3 -1
  72. package/dist/uploadista-error-DQ7V1FlX.mjs.map +1 -0
  73. package/dist/{uploadista-error-BragVhIs.d.mts → uploadista-error-LtiZn-R_.d.mts} +2 -2
  74. package/dist/{uploadista-error-BragVhIs.d.mts.map → uploadista-error-LtiZn-R_.d.mts.map} +1 -1
  75. package/dist/{uploadista-error-Cj_pAFck.d.cts → uploadista-error-eZtG4iyf.d.cts} +2 -2
  76. package/dist/{uploadista-error-Cj_pAFck.d.cts.map → uploadista-error-eZtG4iyf.d.cts.map} +1 -1
  77. package/dist/utils/index.cjs +1 -1
  78. package/dist/utils/index.d.cts +2 -1
  79. package/dist/utils/index.d.mts +3 -2
  80. package/dist/utils/index.mjs +1 -1
  81. package/dist/websocket-Br0ijEZA.cjs +1 -0
  82. package/dist/websocket-DftnHFfN.mjs +2 -0
  83. package/dist/websocket-DftnHFfN.mjs.map +1 -0
  84. package/package.json +3 -3
  85. package/src/errors/uploadista-error.ts +11 -1
  86. package/src/flow/README.md +115 -0
  87. package/src/flow/flow-engine.ts +36 -2
  88. package/src/flow/flow-queue-store.ts +155 -0
  89. package/src/flow/flow-queue.ts +640 -0
  90. package/src/flow/index.ts +4 -0
  91. package/src/flow/types/flow-queue-item.ts +154 -0
  92. package/src/types/data-store.ts +3 -3
  93. package/src/types/kv-store.ts +31 -1
  94. package/src/upload/write-to-store.ts +24 -29
  95. package/tests/flow-queue-store.test.ts +150 -0
  96. package/tests/flow-queue.test.ts +308 -0
  97. package/dist/resolve-upload-metadata-BUVl1LoS.d.cts +0 -8723
  98. package/dist/resolve-upload-metadata-BUVl1LoS.d.cts.map +0 -1
  99. package/dist/resolve-upload-metadata-MPDmDfOZ.d.mts +0 -8723
  100. package/dist/resolve-upload-metadata-MPDmDfOZ.d.mts.map +0 -1
  101. package/dist/run-args-WD1otVrz.mjs +0 -2
  102. package/dist/run-args-WD1otVrz.mjs.map +0 -1
  103. package/dist/run-args-g74p8pEZ.cjs +0 -1
  104. package/dist/throttle-Da0OA8JT.d.cts.map +0 -1
  105. package/dist/throttle-ibiT6E4U.d.mts.map +0 -1
  106. package/dist/upload-strategy-negotiator-BuxPf1sa.mjs +0 -2
  107. package/dist/upload-strategy-negotiator-BuxPf1sa.mjs.map +0 -1
  108. package/dist/upload-strategy-negotiator-DfiQ0Fy0.cjs +0 -1
  109. package/dist/uploadista-error-Fsfvr2Bb.mjs.map +0 -1
  110. package/dist/websocket-Avz4T8YB.cjs +0 -1
  111. package/dist/websocket-CdgVhVJs.mjs +0 -2
  112. package/dist/websocket-CdgVhVJs.mjs.map +0 -1
@@ -0,0 +1,4129 @@
1
+ import { n as UploadistaError } from "./uploadista-error-LtiZn-R_.mjs";
2
+ import { Context, Effect, Layer, Stream } from "effect";
3
+ import z$1, { z } from "zod";
4
+
5
+ //#region src/types/circuit-breaker-store.d.ts
6
+ /**
7
+ * Circuit breaker state values.
8
+ */
9
+ type CircuitBreakerStateValue = "closed" | "open" | "half-open";
10
+ /**
11
+ * Persisted circuit breaker state data.
12
+ *
13
+ * This represents the full state of a circuit breaker that needs to be
14
+ * stored and shared across instances.
15
+ */
16
+ interface CircuitBreakerStateData {
17
+ /** Current circuit state */
18
+ state: CircuitBreakerStateValue;
19
+ /** Number of failures in current window */
20
+ failureCount: number;
21
+ /** Timestamp of last state transition */
22
+ lastStateChange: number;
23
+ /** Number of successful requests in half-open state */
24
+ halfOpenSuccesses: number;
25
+ /** Timestamp when the current failure window started */
26
+ windowStart: number;
27
+ /** Configuration snapshot for consistency */
28
+ config: {
29
+ failureThreshold: number;
30
+ resetTimeout: number;
31
+ halfOpenRequests: number;
32
+ windowDuration: number;
33
+ };
34
+ }
35
+ /**
36
+ * Statistics about a circuit breaker.
37
+ */
38
+ interface CircuitBreakerStats {
39
+ nodeType: string;
40
+ state: CircuitBreakerStateValue;
41
+ failureCount: number;
42
+ halfOpenSuccesses: number;
43
+ timeSinceLastStateChange: number;
44
+ timeUntilHalfOpen?: number;
45
+ }
46
+ /**
47
+ * Interface for circuit breaker state storage.
48
+ *
49
+ * Implementations should handle distributed state for circuit breakers,
50
+ * allowing multiple instances to share circuit state. The interface is
51
+ * designed to work with eventually consistent stores - perfect consistency
52
+ * is not required for circuit breaker functionality.
53
+ *
54
+ * @example
55
+ * ```typescript
56
+ * // Using the store
57
+ * const store: CircuitBreakerStore = yield* CircuitBreakerStoreService;
58
+ *
59
+ * // Record a failure
60
+ * const newCount = yield* store.incrementFailures("describe-image", 60000);
61
+ * if (newCount >= 5) {
62
+ * yield* store.setState("describe-image", {
63
+ * state: "open",
64
+ * failureCount: newCount,
65
+ * lastStateChange: Date.now(),
66
+ * // ...
67
+ * });
68
+ * }
69
+ * ```
70
+ */
71
+ interface CircuitBreakerStore {
72
+ /**
73
+ * Gets the current state data for a circuit breaker.
74
+ *
75
+ * @param nodeType - The node type identifier
76
+ * @returns The state data or null if no state exists
77
+ */
78
+ readonly getState: (nodeType: string) => Effect.Effect<CircuitBreakerStateData | null, UploadistaError>;
79
+ /**
80
+ * Sets the complete state for a circuit breaker.
81
+ *
82
+ * @param nodeType - The node type identifier
83
+ * @param state - The new state data
84
+ */
85
+ readonly setState: (nodeType: string, state: CircuitBreakerStateData) => Effect.Effect<void, UploadistaError>;
86
+ /**
87
+ * Increments the failure count and returns the new count.
88
+ *
89
+ * This operation should be atomic where possible. For stores that don't
90
+ * support atomic increment, a read-modify-write is acceptable as circuit
91
+ * breakers tolerate eventual consistency.
92
+ *
93
+ * The implementation should also handle window expiry - if the window
94
+ * has expired, reset the count before incrementing.
95
+ *
96
+ * @param nodeType - The node type identifier
97
+ * @param windowDuration - Duration of the sliding window in milliseconds
98
+ * @returns The new failure count after incrementing
99
+ */
100
+ readonly incrementFailures: (nodeType: string, windowDuration: number) => Effect.Effect<number, UploadistaError>;
101
+ /**
102
+ * Resets the failure count to zero.
103
+ *
104
+ * Called when circuit closes or on successful requests.
105
+ *
106
+ * @param nodeType - The node type identifier
107
+ */
108
+ readonly resetFailures: (nodeType: string) => Effect.Effect<void, UploadistaError>;
109
+ /**
110
+ * Increments the half-open success count.
111
+ *
112
+ * @param nodeType - The node type identifier
113
+ * @returns The new half-open success count
114
+ */
115
+ readonly incrementHalfOpenSuccesses: (nodeType: string) => Effect.Effect<number, UploadistaError>;
116
+ /**
117
+ * Gets statistics for all tracked circuit breakers.
118
+ *
119
+ * @returns Map of node type to stats
120
+ */
121
+ readonly getAllStats: () => Effect.Effect<Map<string, CircuitBreakerStats>, UploadistaError>;
122
+ /**
123
+ * Deletes circuit breaker state for a node type.
124
+ *
125
+ * @param nodeType - The node type identifier
126
+ */
127
+ readonly delete: (nodeType: string) => Effect.Effect<void, UploadistaError>;
128
+ }
129
+ declare const CircuitBreakerStoreService_base: Context.TagClass<CircuitBreakerStoreService, "CircuitBreakerStoreService", CircuitBreakerStore>;
130
+ /**
131
+ * Effect-TS context tag for the CircuitBreakerStore service.
132
+ *
133
+ * Use this to inject a circuit breaker store into your Effect programs.
134
+ *
135
+ * @example
136
+ * ```typescript
137
+ * const program = Effect.gen(function* () {
138
+ * const cbStore = yield* CircuitBreakerStoreService;
139
+ * const state = yield* cbStore.getState("my-node-type");
140
+ * // ...
141
+ * });
142
+ *
143
+ * // Provide the implementation
144
+ * const result = yield* program.pipe(
145
+ * Effect.provide(kvCircuitBreakerStoreLayer)
146
+ * );
147
+ * ```
148
+ */
149
+ declare class CircuitBreakerStoreService extends CircuitBreakerStoreService_base {}
150
+ /**
151
+ * Creates a default initial state for a circuit breaker.
152
+ *
153
+ * @param config - Circuit breaker configuration
154
+ * @returns Initial state data with closed circuit
155
+ */
156
+ declare function createInitialCircuitBreakerState(config: {
157
+ failureThreshold: number;
158
+ resetTimeout: number;
159
+ halfOpenRequests: number;
160
+ windowDuration: number;
161
+ }): CircuitBreakerStateData;
162
+ //#endregion
163
+ //#region src/flow/types/dead-letter-item.d.ts
164
+ /**
165
+ * Dead Letter Queue item types and definitions.
166
+ *
167
+ * A DeadLetterItem represents a failed flow job that has been captured
168
+ * for later retry, debugging, or manual intervention.
169
+ *
170
+ * @module flow/types/dead-letter-item
171
+ * @see {@link DeadLetterQueueService} for DLQ operations
172
+ */
173
+ /**
174
+ * Status of a Dead Letter Queue item.
175
+ *
176
+ * Item lifecycle: pending → retrying → (pending | exhausted | resolved)
177
+ *
178
+ * - `pending`: Awaiting retry (either scheduled or manual)
179
+ * - `retrying`: Currently being retried
180
+ * - `exhausted`: Max retries reached, requires manual intervention
181
+ * - `resolved`: Successfully retried or manually resolved
182
+ */
183
+ type DeadLetterItemStatus = "pending" | "retrying" | "exhausted" | "resolved";
184
+ /**
185
+ * Error details captured when a flow job fails.
186
+ *
187
+ * Contains comprehensive error information for debugging and retry decisions.
188
+ *
189
+ * @property code - Error code (e.g., "FLOW_NODE_ERROR", "VALIDATION_ERROR")
190
+ * @property message - Human-readable error message
191
+ * @property nodeId - ID of the node that failed (if applicable)
192
+ * @property stack - Stack trace (included in development mode)
193
+ */
194
+ interface DeadLetterError {
195
+ /** Error code for categorization and retry filtering */
196
+ code: string;
197
+ /** Human-readable error message */
198
+ message: string;
199
+ /** Node that failed (if applicable) */
200
+ nodeId?: string;
201
+ /** Stack trace (in dev mode only) */
202
+ stack?: string;
203
+ }
204
+ /**
205
+ * Record of a single retry attempt.
206
+ *
207
+ * @property attemptedAt - When the retry was attempted
208
+ * @property error - Error message if the retry failed
209
+ * @property durationMs - How long the retry took
210
+ */
211
+ interface DeadLetterRetryAttempt {
212
+ /** When the retry was attempted */
213
+ attemptedAt: Date;
214
+ /** Error message if the retry failed */
215
+ error: string;
216
+ /** Duration of the retry attempt in milliseconds */
217
+ durationMs: number;
218
+ }
219
+ /**
220
+ * Represents a failed flow job captured in the Dead Letter Queue.
221
+ *
222
+ * Contains all information needed to debug, retry, or manually resolve
223
+ * a failed flow execution.
224
+ *
225
+ * @property id - Unique DLQ item identifier
226
+ * @property jobId - Original flow job ID that failed
227
+ * @property flowId - Flow definition that was being executed
228
+ * @property storageId - Target storage for the flow
229
+ * @property clientId - Client who initiated the job
230
+ * @property error - Comprehensive error details
231
+ * @property inputs - Original inputs passed to the flow
232
+ * @property nodeResults - Partial results from nodes that completed before failure
233
+ * @property failedAtNodeId - Node where execution failed (if applicable)
234
+ * @property retryCount - Number of retry attempts made
235
+ * @property maxRetries - Maximum retries allowed from retry policy
236
+ * @property nextRetryAt - Scheduled time for next automatic retry
237
+ * @property retryHistory - History of all retry attempts
238
+ * @property createdAt - When the item was added to DLQ
239
+ * @property updatedAt - When the item was last modified
240
+ * @property expiresAt - TTL for automatic cleanup
241
+ * @property status - Current status of the DLQ item
242
+ *
243
+ * @example
244
+ * ```typescript
245
+ * const dlqItem: DeadLetterItem = {
246
+ * id: "dlq_abc123",
247
+ * jobId: "job_xyz789",
248
+ * flowId: "image-resize-pipeline",
249
+ * storageId: "s3-production",
250
+ * clientId: "client_456",
251
+ * error: {
252
+ * code: "FLOW_NODE_ERROR",
253
+ * message: "External service timeout",
254
+ * nodeId: "resize-node"
255
+ * },
256
+ * inputs: { input: { uploadId: "upload_123" } },
257
+ * nodeResults: { "input-node": { file: {...} } },
258
+ * failedAtNodeId: "resize-node",
259
+ * retryCount: 2,
260
+ * maxRetries: 3,
261
+ * nextRetryAt: new Date("2024-01-15T10:35:00Z"),
262
+ * retryHistory: [
263
+ * { attemptedAt: new Date("2024-01-15T10:30:00Z"), error: "Timeout", durationMs: 5000 },
264
+ * { attemptedAt: new Date("2024-01-15T10:32:00Z"), error: "Timeout", durationMs: 5000 }
265
+ * ],
266
+ * createdAt: new Date("2024-01-15T10:30:00Z"),
267
+ * updatedAt: new Date("2024-01-15T10:32:00Z"),
268
+ * expiresAt: new Date("2024-01-22T10:30:00Z"),
269
+ * status: "pending"
270
+ * };
271
+ * ```
272
+ */
273
+ interface DeadLetterItem {
274
+ /** Unique DLQ item identifier */
275
+ id: string;
276
+ /** Original flow job ID that failed */
277
+ jobId: string;
278
+ /** Flow definition ID that was being executed */
279
+ flowId: string;
280
+ /** Target storage for the flow */
281
+ storageId: string;
282
+ /** Client who initiated the job (null for anonymous) */
283
+ clientId: string | null;
284
+ /** Comprehensive error details */
285
+ error: DeadLetterError;
286
+ /** Original inputs passed to the flow */
287
+ inputs: Record<string, unknown>;
288
+ /** Partial results from nodes that completed before failure */
289
+ nodeResults: Record<string, unknown>;
290
+ /** Node where execution failed (if applicable) */
291
+ failedAtNodeId?: string;
292
+ /** Number of retry attempts made */
293
+ retryCount: number;
294
+ /** Maximum retries allowed from retry policy */
295
+ maxRetries: number;
296
+ /** Scheduled time for next automatic retry */
297
+ nextRetryAt?: Date;
298
+ /** History of all retry attempts */
299
+ retryHistory: DeadLetterRetryAttempt[];
300
+ /** When the item was added to DLQ */
301
+ createdAt: Date;
302
+ /** When the item was last modified */
303
+ updatedAt: Date;
304
+ /** TTL for automatic cleanup */
305
+ expiresAt?: Date;
306
+ /** Current status of the DLQ item */
307
+ status: DeadLetterItemStatus;
308
+ }
309
+ /**
310
+ * Statistics about the Dead Letter Queue.
311
+ *
312
+ * Provides aggregate information for monitoring and alerting.
313
+ *
314
+ * @property totalItems - Total number of items in the DLQ
315
+ * @property byStatus - Count of items by status
316
+ * @property byFlow - Count of items by flow ID
317
+ * @property oldestItem - Timestamp of the oldest item
318
+ * @property averageRetryCount - Average number of retries across all items
319
+ */
320
+ interface DeadLetterQueueStats {
321
+ /** Total number of items in the DLQ */
322
+ totalItems: number;
323
+ /** Count of items by status */
324
+ byStatus: Record<DeadLetterItemStatus, number>;
325
+ /** Count of items by flow ID */
326
+ byFlow: Record<string, number>;
327
+ /** Timestamp of the oldest item */
328
+ oldestItem?: Date;
329
+ /** Average number of retries across all items */
330
+ averageRetryCount: number;
331
+ }
332
+ /**
333
+ * Options for listing DLQ items.
334
+ *
335
+ * @property status - Filter by status
336
+ * @property flowId - Filter by flow ID
337
+ * @property clientId - Filter by client ID
338
+ * @property limit - Maximum items to return (default: 50)
339
+ * @property offset - Number of items to skip (default: 0)
340
+ */
341
+ interface DeadLetterListOptions {
342
+ /** Filter by status */
343
+ status?: DeadLetterItemStatus;
344
+ /** Filter by flow ID */
345
+ flowId?: string;
346
+ /** Filter by client ID */
347
+ clientId?: string;
348
+ /** Maximum items to return (default: 50) */
349
+ limit?: number;
350
+ /** Number of items to skip for pagination (default: 0) */
351
+ offset?: number;
352
+ }
353
+ /**
354
+ * Result of a batch retry operation.
355
+ *
356
+ * @property retried - Number of items that were retried
357
+ * @property succeeded - Number of retries that succeeded
358
+ * @property failed - Number of retries that failed
359
+ */
360
+ interface DeadLetterRetryAllResult {
361
+ /** Number of items that were retried */
362
+ retried: number;
363
+ /** Number of retries that succeeded */
364
+ succeeded: number;
365
+ /** Number of retries that failed */
366
+ failed: number;
367
+ }
368
+ /**
369
+ * Result of a cleanup operation.
370
+ *
371
+ * @property deleted - Number of items that were deleted
372
+ */
373
+ interface DeadLetterCleanupResult {
374
+ /** Number of items that were deleted */
375
+ deleted: number;
376
+ }
377
+ /**
378
+ * Options for cleanup operation.
379
+ *
380
+ * @property olderThan - Delete items older than this date
381
+ * @property status - Only delete items with this status
382
+ */
383
+ interface DeadLetterCleanupOptions {
384
+ /** Delete items older than this date */
385
+ olderThan?: Date;
386
+ /** Only delete items with this status */
387
+ status?: "exhausted" | "resolved";
388
+ }
389
+ /**
390
+ * Result of processing scheduled retries.
391
+ *
392
+ * @property processed - Total items processed
393
+ * @property succeeded - Number that succeeded
394
+ * @property failed - Number that failed
395
+ */
396
+ interface DeadLetterProcessResult {
397
+ /** Total items processed */
398
+ processed: number;
399
+ /** Number of successful retries */
400
+ succeeded: number;
401
+ /** Number of failed retries */
402
+ failed: number;
403
+ }
404
+ //#endregion
405
+ //#region src/types/upload-file.d.ts
406
+ /**
407
+ * Zod schema for validating UploadFile objects.
408
+ *
409
+ * This schema defines the structure and validation rules for upload file metadata.
410
+ * Use this schema to parse and validate UploadFile data from external sources.
411
+ *
412
+ * @see {@link UploadFile} for the TypeScript type
413
+ */
414
+ /**
415
+ * Zod schema for trace context used in distributed tracing.
416
+ */
417
+ declare const traceContextSchema: z.ZodObject<{
418
+ traceId: z.ZodString;
419
+ spanId: z.ZodString;
420
+ traceFlags: z.ZodNumber;
421
+ }, z.core.$strip>;
422
+ /**
423
+ * JSON value type that allows any JSON-serializable data.
424
+ * Used for metadata values which can be primitives, arrays, or nested objects.
425
+ */
426
+ type JsonValue = string | number | boolean | null | JsonValue[] | {
427
+ [key: string]: JsonValue;
428
+ };
429
+ /**
430
+ * JSON value schema that allows any JSON-serializable data.
431
+ * This is used for metadata values which can be primitives, arrays, or objects.
432
+ */
433
+ declare const jsonValueSchema: z.ZodType<JsonValue>;
434
+ declare const uploadFileSchema: z.ZodObject<{
435
+ id: z.ZodString;
436
+ size: z.ZodOptional<z.ZodNumber>;
437
+ offset: z.ZodNumber;
438
+ metadata: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodType<JsonValue, unknown, z.core.$ZodTypeInternals<JsonValue, unknown>>>>;
439
+ creationDate: z.ZodOptional<z.ZodString>;
440
+ url: z.ZodOptional<z.ZodString>;
441
+ sizeIsDeferred: z.ZodOptional<z.ZodBoolean>;
442
+ checksum: z.ZodOptional<z.ZodString>;
443
+ checksumAlgorithm: z.ZodOptional<z.ZodString>;
444
+ storage: z.ZodObject<{
445
+ id: z.ZodString;
446
+ type: z.ZodString;
447
+ path: z.ZodOptional<z.ZodString>;
448
+ uploadId: z.ZodOptional<z.ZodString>;
449
+ bucket: z.ZodOptional<z.ZodString>;
450
+ parts: z.ZodOptional<z.ZodArray<z.ZodObject<{
451
+ partNumber: z.ZodNumber;
452
+ etag: z.ZodString;
453
+ size: z.ZodNumber;
454
+ }, z.core.$strip>>>;
455
+ }, z.core.$strip>;
456
+ flow: z.ZodOptional<z.ZodObject<{
457
+ flowId: z.ZodString;
458
+ nodeId: z.ZodString;
459
+ jobId: z.ZodString;
460
+ }, z.core.$strip>>;
461
+ traceContext: z.ZodOptional<z.ZodObject<{
462
+ traceId: z.ZodString;
463
+ spanId: z.ZodString;
464
+ traceFlags: z.ZodNumber;
465
+ }, z.core.$strip>>;
466
+ }, z.core.$strip>;
467
+ /**
468
+ * Represents an uploaded file with its metadata and storage information.
469
+ *
470
+ * This is the core data structure that tracks file uploads throughout their lifecycle.
471
+ * It contains all metadata needed to resume uploads, track progress, and locate files
472
+ * in storage backends.
473
+ *
474
+ * @property id - Unique identifier for this upload
475
+ * @property offset - Current byte offset (how many bytes have been uploaded)
476
+ * @property storage - Storage backend information
477
+ * @property storage.id - Storage backend identifier (e.g., "s3-production")
478
+ * @property storage.type - Storage backend type (e.g., "s3", "azure", "gcs")
479
+ * @property storage.path - Optional path prefix within the storage backend
480
+ * @property storage.uploadId - Optional backend-specific upload ID (e.g., S3 multipart upload ID)
481
+ * @property storage.bucket - Optional bucket or container name
482
+ * @property storage.parts - Optional array of uploaded parts (used by data stores that need to track parts locally, like R2)
483
+ * @property flow - Optional flow processing information (when file is part of a flow)
484
+ * @property flow.flowId - ID of the flow processing this file
485
+ * @property flow.nodeId - ID of the flow node that created this file
486
+ * @property flow.jobId - ID of the flow job execution
487
+ * @property size - Total file size in bytes (undefined if deferred)
488
+ * @property metadata - Custom key-value metadata attached to the file
489
+ * @property creationDate - ISO 8601 timestamp when upload was created
490
+ * @property url - Optional public URL to access the file
491
+ * @property sizeIsDeferred - True if file size is not known at upload start
492
+ * @property checksum - Optional file checksum/hash value
493
+ * @property checksumAlgorithm - Algorithm used for checksum (e.g., "md5", "sha256")
494
+ *
495
+ * @example
496
+ * ```typescript
497
+ * // Create an UploadFile for a new upload
498
+ * const uploadFile: UploadFile = {
499
+ * id: "upload_abc123",
500
+ * offset: 0,
501
+ * size: 1024000,
502
+ * storage: {
503
+ * id: "s3-production",
504
+ * type: "s3",
505
+ * bucket: "my-uploads",
506
+ * path: "files/"
507
+ * },
508
+ * metadata: {
509
+ * fileName: "image.jpg",
510
+ * contentType: "image/jpeg",
511
+ * userId: "user_123"
512
+ * },
513
+ * creationDate: new Date().toISOString(),
514
+ * checksum: "5d41402abc4b2a76b9719d911017c592",
515
+ * checksumAlgorithm: "md5"
516
+ * };
517
+ *
518
+ * // UploadFile with flow processing
519
+ * const flowFile: UploadFile = {
520
+ * id: "upload_xyz789",
521
+ * offset: 0,
522
+ * size: 2048000,
523
+ * storage: {
524
+ * id: "s3-temp",
525
+ * type: "s3",
526
+ * bucket: "temp-processing"
527
+ * },
528
+ * flow: {
529
+ * flowId: "flow_resize_optimize",
530
+ * nodeId: "input_1",
531
+ * jobId: "job_456"
532
+ * }
533
+ * };
534
+ *
535
+ * // Resume an interrupted upload
536
+ * const resumingFile: UploadFile = {
537
+ * id: "upload_resume",
538
+ * offset: 524288, // Already uploaded 512KB
539
+ * size: 1024000,
540
+ * storage: {
541
+ * id: "s3-production",
542
+ * type: "s3",
543
+ * uploadId: "multipart_xyz" // S3 multipart upload ID
544
+ * }
545
+ * };
546
+ * ```
547
+ */
548
+ /**
549
+ * Trace context for distributed tracing.
550
+ * Allows upload operations to be linked under a single trace.
551
+ */
552
+ type UploadFileTraceContext = {
553
+ /** 128-bit trace identifier (32 hex characters) */traceId: string; /** 64-bit span identifier (16 hex characters) */
554
+ spanId: string; /** Trace flags (1 = sampled) */
555
+ traceFlags: number;
556
+ };
557
+ type UploadFile = {
558
+ id: string;
559
+ offset: number;
560
+ storage: {
561
+ id: string;
562
+ type: string;
563
+ path?: string | undefined;
564
+ uploadId?: string | undefined;
565
+ bucket?: string | undefined;
566
+ parts?: Array<{
567
+ partNumber: number;
568
+ etag: string;
569
+ size: number;
570
+ }> | undefined;
571
+ };
572
+ flow?: {
573
+ flowId: string;
574
+ nodeId: string;
575
+ jobId: string;
576
+ };
577
+ size?: number | undefined;
578
+ metadata?: Record<string, JsonValue> | undefined;
579
+ creationDate?: string | undefined;
580
+ url?: string | undefined;
581
+ sizeIsDeferred?: boolean | undefined;
582
+ checksum?: string | undefined;
583
+ checksumAlgorithm?: string | undefined;
584
+ /**
585
+ * OpenTelemetry trace context for distributed tracing.
586
+ * When set, subsequent upload operations (chunks, validation) will be
587
+ * linked as children of this trace context.
588
+ */
589
+ traceContext?: UploadFileTraceContext | undefined;
590
+ };
591
+ //#endregion
592
+ //#region src/flow/node.d.ts
593
+ /**
594
+ * Defines the type of node in a flow, determining its role in the processing pipeline.
595
+ */
596
+ declare enum NodeType {
597
+ /** Entry point for data into the flow */
598
+ input = "input",
599
+ /** Transforms data during flow execution */
600
+ process = "process",
601
+ /** Routes data based on conditions */
602
+ conditional = "conditional",
603
+ /** Splits data to multiple outputs */
604
+ multiplex = "multiplex",
605
+ /** Combines multiple inputs into one output */
606
+ merge = "merge"
607
+ }
608
+ /**
609
+ * Fields that can be evaluated in conditional node conditions.
610
+ * These fields are typically found in file metadata.
611
+ */
612
+ type ConditionField = "mimeType" | "size" | "width" | "height" | "extension";
613
+ /**
614
+ * Operators available for comparing values in conditional node conditions.
615
+ */
616
+ type ConditionOperator = "equals" | "notEquals" | "greaterThan" | "lessThan" | "contains" | "startsWith";
617
+ /**
618
+ * Value used in conditional node comparisons.
619
+ * Can be either a string or number depending on the field being evaluated.
620
+ */
621
+ type ConditionValue = string | number;
622
+ /**
623
+ * Creates a flow node with automatic input/output validation and retry logic.
624
+ *
625
+ * Flow nodes are the building blocks of processing pipelines. Each node:
626
+ * - Validates its input against a Zod schema
627
+ * - Executes its processing logic
628
+ * - Validates its output against a Zod schema
629
+ * - Can optionally retry on failure with exponential backoff
630
+ *
631
+ * @template Input - The expected input type for this node
632
+ * @template Output - The output type produced by this node
633
+ *
634
+ * @param config - Node configuration
635
+ * @param config.id - Unique identifier for this node in the flow
636
+ * @param config.name - Human-readable name for the node
637
+ * @param config.description - Description of what this node does
638
+ * @param config.type - The type of node (input, process, conditional, multiplex, merge)
639
+ * @param config.inputSchema - Zod schema for validating input data
640
+ * @param config.outputSchema - Zod schema for validating output data
641
+ * @param config.run - The processing function to execute for this node
642
+ * @param config.condition - Optional condition for conditional nodes to determine if they should execute
643
+ * @param config.multiInput - If true, node receives all inputs as a record instead of a single input
644
+ * @param config.multiOutput - If true, node can output to multiple targets
645
+ * @param config.pausable - If true, node can pause execution and wait for additional data
646
+ * @param config.retry - Optional retry configuration for handling transient failures
647
+ * @param config.retry.maxRetries - Maximum number of retry attempts (default: 0)
648
+ * @param config.retry.retryDelay - Base delay in milliseconds between retries (default: 1000)
649
+ * @param config.retry.exponentialBackoff - Whether to use exponential backoff for retries (default: true)
650
+ * @param config.inputTypeId - Optional input type ID from inputTypeRegistry (e.g., "streaming-input-v1"). Used for input nodes to describe external interface.
651
+ * @param config.outputTypeId - Optional output type ID from outputTypeRegistry (e.g., "storage-output-v1"). Used for result type tagging.
652
+ * @param config.keepOutput - If true, preserves this node's output even if it has outgoing edges (default: false). Useful for flows where intermediate results need to be kept (e.g., preserving the original file when also running OCR on it).
653
+ * @param config.circuitBreaker - Optional circuit breaker configuration for resilience. Overrides flow-level circuit breaker defaults for this node.
654
+ * @param config.nodeTypeId - Stable node type identifier for circuit breaker configuration. Used to share circuit breaker state across nodes of the same type and for nodeTypeOverrides. Example: "describe-image", "remove-background", "scan-virus"
655
+ *
656
+ * @returns An Effect that succeeds with the created FlowNode
657
+ *
658
+ * @example
659
+ * ```typescript
660
+ * const resizeNode = createFlowNode({
661
+ * id: "resize-1",
662
+ * name: "Resize Image",
663
+ * description: "Resizes images to 800x600",
664
+ * type: NodeType.process,
665
+ * inputSchema: z.object({
666
+ * stream: z.instanceof(Uint8Array),
667
+ * metadata: z.object({ width: z.number(), height: z.number() })
668
+ * }),
669
+ * outputSchema: z.object({
670
+ * stream: z.instanceof(Uint8Array),
671
+ * metadata: z.object({ width: z.literal(800), height: z.literal(600) })
672
+ * }),
673
+ * run: ({ data }) => Effect.gen(function* () {
674
+ * const resized = yield* resizeImage(data.stream, 800, 600);
675
+ * return {
676
+ * type: "complete",
677
+ * data: { stream: resized, metadata: { width: 800, height: 600 } }
678
+ * };
679
+ * }),
680
+ * retry: {
681
+ * maxRetries: 3,
682
+ * retryDelay: 1000,
683
+ * exponentialBackoff: true
684
+ * }
685
+ * });
686
+ * ```
687
+ */
688
+ declare function createFlowNode<Input, Output, TType extends NodeType = NodeType>({
689
+ id,
690
+ name,
691
+ description,
692
+ type,
693
+ inputSchema,
694
+ outputSchema,
695
+ run,
696
+ condition,
697
+ multiInput,
698
+ multiOutput,
699
+ pausable,
700
+ retry,
701
+ inputTypeId,
702
+ outputTypeId,
703
+ keepOutput,
704
+ circuitBreaker,
705
+ nodeTypeId
706
+ }: {
707
+ id: string;
708
+ name: string;
709
+ description: string;
710
+ type: TType;
711
+ inputSchema: z.ZodSchema<Input>;
712
+ outputSchema: z.ZodSchema<Output>;
713
+ run: (args: {
714
+ data: Input;
715
+ jobId: string;
716
+ storageId: string;
717
+ flowId: string;
718
+ clientId: string | null;
719
+ }) => Effect.Effect<NodeExecutionResult<Output>, UploadistaError>;
720
+ condition?: {
721
+ field: ConditionField;
722
+ operator: ConditionOperator;
723
+ value: ConditionValue;
724
+ };
725
+ multiInput?: boolean;
726
+ multiOutput?: boolean;
727
+ pausable?: boolean;
728
+ retry?: {
729
+ maxRetries?: number;
730
+ retryDelay?: number;
731
+ exponentialBackoff?: boolean;
732
+ }; /** Input type ID from inputTypeRegistry - for input nodes describing external interface */
733
+ inputTypeId?: string; /** Output type ID from outputTypeRegistry - for result type tagging */
734
+ outputTypeId?: string;
735
+ keepOutput?: boolean; /** Circuit breaker configuration for resilience (overrides flow defaults) */
736
+ circuitBreaker?: FlowCircuitBreakerConfig;
737
+ /**
738
+ * Stable node type identifier for circuit breaker configuration.
739
+ * Used to share circuit breaker state across nodes of the same type and for nodeTypeOverrides.
740
+ * Example: "describe-image", "remove-background", "scan-virus"
741
+ */
742
+ nodeTypeId?: string;
743
+ }): Effect.Effect<FlowNode<Input, Output, UploadistaError> & {
744
+ type: TType;
745
+ }, UploadistaError>;
746
+ /**
747
+ * Extracts serializable node metadata from a FlowNode instance.
748
+ *
749
+ * This function is useful for serializing flow configurations or
750
+ * transmitting node information over the network without including
751
+ * the executable run function or schemas.
752
+ *
753
+ * @param node - The flow node to extract data from
754
+ * @returns A plain object containing the node's metadata (id, name, description, type, inputTypeId, outputTypeId)
755
+ */
756
+ declare const getNodeData: (node: FlowNode<any, any, UploadistaError>) => FlowNodeData;
757
+ //#endregion
758
+ //#region src/flow/event.d.ts
759
+ /**
760
+ * Enumeration of all possible flow and node execution event types.
761
+ *
762
+ * Events follow a lifecycle pattern:
763
+ * - Job level: JobStart → ... → JobEnd
764
+ * - Flow level: FlowStart → ... → (FlowEnd | FlowError)
765
+ * - Node level: NodeStart → ... → (NodeEnd | NodePause | NodeError)
766
+ *
767
+ * @example
768
+ * ```typescript
769
+ * // Listen for flow completion
770
+ * if (event.eventType === EventType.FlowEnd) {
771
+ * console.log("Flow completed:", event.result);
772
+ * }
773
+ * ```
774
+ */
775
+ declare enum EventType {
776
+ /** Emitted when a job starts execution */
777
+ JobStart = "job-start",
778
+ /** Emitted when a job completes (success or failure) */
779
+ JobEnd = "job-end",
780
+ /** Emitted when a flow begins execution */
781
+ FlowStart = "flow-start",
782
+ /** Emitted when a flow completes successfully */
783
+ FlowEnd = "flow-end",
784
+ /** Emitted when a flow encounters an error */
785
+ FlowError = "flow-error",
786
+ /** Emitted when a flow is paused by user request */
787
+ FlowPause = "flow-pause",
788
+ /** Emitted when a flow is cancelled by user request */
789
+ FlowCancel = "flow-cancel",
790
+ /** Emitted when a node starts processing */
791
+ NodeStart = "node-start",
792
+ /** Emitted when a node completes successfully */
793
+ NodeEnd = "node-end",
794
+ /** Emitted when a node pauses (waiting for additional data) */
795
+ NodePause = "node-pause",
796
+ /** Emitted when a paused node resumes execution */
797
+ NodeResume = "node-resume",
798
+ /** Emitted when a node encounters an error */
799
+ NodeError = "node-error",
800
+ /** Emitted for streaming node data (e.g., progress updates) */
801
+ NodeStream = "node-stream",
802
+ /** Emitted for node response data */
803
+ NodeResponse = "node-response",
804
+ /** Emitted when a job is added to the Dead Letter Queue */
805
+ DlqItemAdded = "dlq-item-added",
806
+ /** Emitted when a DLQ retry attempt starts */
807
+ DlqRetryStart = "dlq-retry-start",
808
+ /** Emitted when a DLQ retry succeeds */
809
+ DlqRetrySuccess = "dlq-retry-success",
810
+ /** Emitted when a DLQ retry fails */
811
+ DlqRetryFailed = "dlq-retry-failed",
812
+ /** Emitted when a DLQ item is exhausted (max retries reached) */
813
+ DlqItemExhausted = "dlq-item-exhausted",
814
+ /** Emitted when a DLQ item is resolved */
815
+ DlqItemResolved = "dlq-item-resolved"
816
+ }
817
+ /**
818
+ * Event emitted when a job starts execution.
819
+ */
820
+ type FlowEventJobStart = {
821
+ jobId: string;
822
+ eventType: EventType.JobStart;
823
+ };
824
+ /**
825
+ * Event emitted when a job completes (either successfully or with failure).
826
+ */
827
+ type FlowEventJobEnd = {
828
+ jobId: string;
829
+ eventType: EventType.JobEnd;
830
+ };
831
+ /**
832
+ * Event emitted when a flow begins execution.
833
+ * This is the first event after JobStart in the execution lifecycle.
834
+ */
835
+ type FlowEventFlowStart = {
836
+ jobId: string;
837
+ flowId: string;
838
+ eventType: EventType.FlowStart;
839
+ };
840
+ /**
841
+ * Event emitted when a flow completes successfully.
842
+ *
843
+ * @property outputs - Array of typed outputs from all output nodes in the flow
844
+ * @property result - Legacy field for backward compatibility (deprecated, use outputs instead)
845
+ *
846
+ * @remarks
847
+ * The `outputs` field contains an array of TypedOutput objects, each with:
848
+ * - nodeId: The specific node that produced the output
849
+ * - nodeType: The registered type ID (e.g., "storage-output-v1")
850
+ * - data: The actual output data
851
+ * - timestamp: When the output was produced
852
+ *
853
+ * @example
854
+ * ```typescript
855
+ * // Handle flow completion with typed outputs
856
+ * if (event.eventType === EventType.FlowEnd && event.outputs) {
857
+ * for (const output of event.outputs) {
858
+ * console.log(`${output.nodeId} (${output.nodeType}):`, output.data);
859
+ * }
860
+ * }
861
+ * ```
862
+ */
863
+ type FlowEventFlowEnd = {
864
+ jobId: string;
865
+ flowId: string;
866
+ eventType: EventType.FlowEnd;
867
+ outputs?: TypedOutput[];
868
+ result?: unknown;
869
+ };
870
+ /**
871
+ * Event emitted when a flow encounters an unrecoverable error.
872
+ *
873
+ * @property error - Error message describing what went wrong
874
+ */
875
+ type FlowEventFlowError = {
876
+ jobId: string;
877
+ flowId: string;
878
+ eventType: EventType.FlowError;
879
+ error: string;
880
+ };
881
+ /**
882
+ * Event emitted when a flow is paused by user request.
883
+ *
884
+ * Unlike NodePause which occurs when a node needs more data,
885
+ * this event is triggered by an explicit user action to pause the flow.
886
+ */
887
+ type FlowEventFlowPause = {
888
+ jobId: string;
889
+ flowId: string;
890
+ eventType: EventType.FlowPause;
891
+ pausedAt?: string;
892
+ };
893
+ /**
894
+ * Event emitted when a flow is cancelled by user request.
895
+ *
896
+ * Cancelled flows will clean up intermediate files and stop execution.
897
+ */
898
+ type FlowEventFlowCancel = {
899
+ jobId: string;
900
+ flowId: string;
901
+ eventType: EventType.FlowCancel;
902
+ };
903
+ /**
904
+ * Event emitted when a node begins processing.
905
+ *
906
+ * @property nodeName - Human-readable node name
907
+ * @property nodeType - Type of node (input, transform, conditional, output, etc.)
908
+ */
909
+ type FlowEventNodeStart = {
910
+ jobId: string;
911
+ flowId: string;
912
+ nodeId: string;
913
+ eventType: EventType.NodeStart;
914
+ nodeName: string;
915
+ nodeType: NodeType;
916
+ };
917
+ /**
918
+ * Event emitted when a node fails after all retry attempts.
919
+ *
920
+ * @property error - Error message from the failed execution
921
+ * @property retryCount - Number of retry attempts made before giving up
922
+ */
923
+ type FlowEventNodeError = {
924
+ jobId: string;
925
+ flowId: string;
926
+ nodeId: string;
927
+ nodeName: string;
928
+ eventType: EventType.NodeError;
929
+ error: string;
930
+ retryCount?: number;
931
+ };
932
+ /**
933
+ * Event emitted when a node completes successfully.
934
+ *
935
+ * @property result - The typed output data produced by the node
936
+ *
937
+ * @remarks
938
+ * For output nodes, the result will be a TypedOutput containing type information.
939
+ * For other nodes, it may be untyped (nodeType will be undefined).
940
+ */
941
+ type FlowEventNodeEnd = {
942
+ jobId: string;
943
+ flowId: string;
944
+ nodeId: string;
945
+ eventType: EventType.NodeEnd;
946
+ nodeName: string;
947
+ result?: TypedOutput | unknown;
948
+ };
949
+ /**
950
+ * Event emitted when a node pauses execution, waiting for additional data.
951
+ *
952
+ * This typically occurs with input nodes that need more chunks or nodes
953
+ * waiting for external services.
954
+ *
955
+ * @property partialData - Any partial result available before pausing
956
+ */
957
+ type FlowEventNodePause = {
958
+ jobId: string;
959
+ flowId: string;
960
+ nodeId: string;
961
+ eventType: EventType.NodePause;
962
+ nodeName: string;
963
+ partialData?: unknown;
964
+ };
965
+ /**
966
+ * Event emitted when a paused node resumes execution.
967
+ *
968
+ * This occurs after providing the additional data needed by a paused node.
969
+ */
970
+ type FlowEventNodeResume = {
971
+ jobId: string;
972
+ flowId: string;
973
+ nodeId: string;
974
+ eventType: EventType.NodeResume;
975
+ nodeName: string;
976
+ nodeType: NodeType;
977
+ };
978
+ /**
979
+ * Event emitted for node-specific response data.
980
+ *
981
+ * Used for streaming intermediate results or progress updates.
982
+ */
983
+ type FlowEventNodeResponse = {
984
+ jobId: string;
985
+ flowId: string;
986
+ nodeId: string;
987
+ eventType: EventType.NodeResponse;
988
+ nodeName: string;
989
+ data: unknown;
990
+ };
991
+ /**
992
+ * Event emitted when a job is added to the Dead Letter Queue.
993
+ */
994
+ type FlowEventDlqItemAdded = {
995
+ eventType: EventType.DlqItemAdded;
996
+ dlqItemId: string;
997
+ jobId: string;
998
+ flowId: string;
999
+ errorCode: string;
1000
+ errorMessage: string;
1001
+ retryCount: number;
1002
+ maxRetries: number;
1003
+ };
1004
+ /**
1005
+ * Event emitted when a DLQ retry attempt starts.
1006
+ */
1007
+ type FlowEventDlqRetryStart = {
1008
+ eventType: EventType.DlqRetryStart;
1009
+ dlqItemId: string;
1010
+ jobId: string;
1011
+ flowId: string;
1012
+ attemptNumber: number;
1013
+ };
1014
+ /**
1015
+ * Event emitted when a DLQ retry succeeds.
1016
+ */
1017
+ type FlowEventDlqRetrySuccess = {
1018
+ eventType: EventType.DlqRetrySuccess;
1019
+ dlqItemId: string;
1020
+ jobId: string;
1021
+ flowId: string;
1022
+ attemptNumber: number;
1023
+ durationMs: number;
1024
+ };
1025
+ /**
1026
+ * Event emitted when a DLQ retry fails.
1027
+ */
1028
+ type FlowEventDlqRetryFailed = {
1029
+ eventType: EventType.DlqRetryFailed;
1030
+ dlqItemId: string;
1031
+ jobId: string;
1032
+ flowId: string;
1033
+ attemptNumber: number;
1034
+ error: string;
1035
+ durationMs: number;
1036
+ nextRetryAt?: string;
1037
+ };
1038
+ /**
1039
+ * Event emitted when a DLQ item is exhausted (max retries reached).
1040
+ */
1041
+ type FlowEventDlqItemExhausted = {
1042
+ eventType: EventType.DlqItemExhausted;
1043
+ dlqItemId: string;
1044
+ jobId: string;
1045
+ flowId: string;
1046
+ totalAttempts: number;
1047
+ };
1048
+ /**
1049
+ * Event emitted when a DLQ item is resolved.
1050
+ */
1051
+ type FlowEventDlqItemResolved = {
1052
+ eventType: EventType.DlqItemResolved;
1053
+ dlqItemId: string;
1054
+ jobId: string;
1055
+ flowId: string;
1056
+ resolvedBy: "retry" | "manual";
1057
+ };
1058
+ /**
1059
+ * Union of all DLQ-related events.
1060
+ */
1061
+ type DlqEvent = FlowEventDlqItemAdded | FlowEventDlqRetryStart | FlowEventDlqRetrySuccess | FlowEventDlqRetryFailed | FlowEventDlqItemExhausted | FlowEventDlqItemResolved;
1062
+ /**
1063
+ * Union of all possible flow execution events.
1064
+ *
1065
+ * This discriminated union allows type-safe event handling based on eventType.
1066
+ *
1067
+ * @example
1068
+ * ```typescript
1069
+ * function handleFlowEvent(event: FlowEvent) {
1070
+ * switch (event.eventType) {
1071
+ * case EventType.FlowStart:
1072
+ * console.log("Flow started:", event.flowId);
1073
+ * break;
1074
+ * case EventType.NodeEnd:
1075
+ * console.log("Node completed:", event.nodeName, event.result);
1076
+ * break;
1077
+ * case EventType.FlowError:
1078
+ * console.error("Flow failed:", event.error);
1079
+ * break;
1080
+ * case EventType.FlowCancel:
1081
+ * console.log("Flow cancelled:", event.flowId);
1082
+ * break;
1083
+ * case EventType.DlqItemAdded:
1084
+ * console.log("Job added to DLQ:", event.dlqItemId);
1085
+ * break;
1086
+ * }
1087
+ * }
1088
+ * ```
1089
+ */
1090
+ type FlowEvent = FlowEventJobStart | FlowEventJobEnd | FlowEventFlowStart | FlowEventFlowEnd | FlowEventFlowError | FlowEventFlowPause | FlowEventFlowCancel | FlowEventNodeStart | FlowEventNodeEnd | FlowEventNodePause | FlowEventNodeResume | FlowEventNodeError | DlqEvent;
1091
+ //#endregion
1092
+ //#region src/flow/types/retry-policy.d.ts
1093
+ /**
1094
+ * Retry policy types for the Dead Letter Queue.
1095
+ *
1096
+ * Defines configurable retry strategies including immediate, fixed delay,
1097
+ * and exponential backoff with jitter.
1098
+ *
1099
+ * @module flow/types/retry-policy
1100
+ * @see {@link DeadLetterQueueService} for DLQ operations
1101
+ */
1102
+ /**
1103
+ * Immediate retry strategy - retry as soon as possible.
1104
+ *
1105
+ * Use for errors that are likely transient and may succeed on immediate retry.
1106
+ */
1107
+ interface ImmediateBackoff {
1108
+ type: "immediate";
1109
+ }
1110
+ /**
1111
+ * Fixed delay retry strategy - wait a fixed duration between retries.
1112
+ *
1113
+ * @property delayMs - Milliseconds to wait between retries
1114
+ *
1115
+ * @example
1116
+ * ```typescript
1117
+ * const fixedBackoff: FixedBackoff = {
1118
+ * type: "fixed",
1119
+ * delayMs: 5000 // Wait 5 seconds between retries
1120
+ * };
1121
+ * ```
1122
+ */
1123
+ interface FixedBackoff {
1124
+ type: "fixed";
1125
+ /** Milliseconds to wait between retries */
1126
+ delayMs: number;
1127
+ }
1128
+ /**
1129
+ * Exponential backoff retry strategy - progressively longer delays.
1130
+ *
1131
+ * Delay = min(initialDelayMs * (multiplier ^ retryCount), maxDelayMs)
1132
+ * With optional jitter to prevent thundering herd.
1133
+ *
1134
+ * @property initialDelayMs - Starting delay in milliseconds (e.g., 1000)
1135
+ * @property maxDelayMs - Maximum delay cap in milliseconds (e.g., 300000)
1136
+ * @property multiplier - Multiplication factor per retry (e.g., 2)
1137
+ * @property jitter - Add randomness to prevent thundering herd
1138
+ *
1139
+ * @example
1140
+ * ```typescript
1141
+ * const exponentialBackoff: ExponentialBackoff = {
1142
+ * type: "exponential",
1143
+ * initialDelayMs: 1000, // Start with 1 second
1144
+ * maxDelayMs: 300000, // Cap at 5 minutes
1145
+ * multiplier: 2, // Double each time
1146
+ * jitter: true // Add randomness
1147
+ * };
1148
+ * // Delays: ~1s, ~2s, ~4s, ~8s, ..., capped at ~5min
1149
+ * ```
1150
+ */
1151
+ interface ExponentialBackoff {
1152
+ type: "exponential";
1153
+ /** Starting delay in milliseconds */
1154
+ initialDelayMs: number;
1155
+ /** Maximum delay cap in milliseconds */
1156
+ maxDelayMs: number;
1157
+ /** Multiplication factor per retry (e.g., 2 for doubling) */
1158
+ multiplier: number;
1159
+ /** Add randomness to prevent thundering herd */
1160
+ jitter: boolean;
1161
+ }
1162
+ /**
1163
+ * Union type for all backoff strategies.
1164
+ */
1165
+ type BackoffStrategy = ImmediateBackoff | FixedBackoff | ExponentialBackoff;
1166
+ /**
1167
+ * Configuration for automatic retry behavior.
1168
+ *
1169
+ * Defines how failed jobs should be retried, including backoff strategy,
1170
+ * max attempts, and error filtering.
1171
+ *
1172
+ * @property enabled - Whether automatic retry is enabled (default: true)
1173
+ * @property maxRetries - Maximum retry attempts (default: 3)
1174
+ * @property backoff - Backoff strategy configuration
1175
+ * @property retryableErrors - Only retry these error codes (default: all)
1176
+ * @property nonRetryableErrors - Never retry these error codes
1177
+ * @property ttlMs - Auto-delete items after this time (default: 7 days)
1178
+ *
1179
+ * @example
1180
+ * ```typescript
1181
+ * // Conservative retry policy for external APIs
1182
+ * const apiRetryPolicy: RetryPolicy = {
1183
+ * enabled: true,
1184
+ * maxRetries: 5,
1185
+ * backoff: {
1186
+ * type: "exponential",
1187
+ * initialDelayMs: 1000,
1188
+ * maxDelayMs: 60000,
1189
+ * multiplier: 2,
1190
+ * jitter: true
1191
+ * },
1192
+ * nonRetryableErrors: ["VALIDATION_ERROR", "AUTH_ERROR"],
1193
+ * ttlMs: 604800000 // 7 days
1194
+ * };
1195
+ *
1196
+ * // Aggressive retry for transient failures
1197
+ * const transientRetryPolicy: RetryPolicy = {
1198
+ * enabled: true,
1199
+ * maxRetries: 3,
1200
+ * backoff: { type: "immediate" },
1201
+ * retryableErrors: ["NETWORK_ERROR", "TIMEOUT_ERROR"]
1202
+ * };
1203
+ *
1204
+ * // No automatic retry, manual intervention only
1205
+ * const manualPolicy: RetryPolicy = {
1206
+ * enabled: false,
1207
+ * maxRetries: 0,
1208
+ * backoff: { type: "immediate" }
1209
+ * };
1210
+ * ```
1211
+ */
1212
+ interface RetryPolicy {
1213
+ /** Whether automatic retry is enabled (default: true) */
1214
+ enabled: boolean;
1215
+ /** Maximum retry attempts (default: 3) */
1216
+ maxRetries: number;
1217
+ /** Backoff strategy configuration */
1218
+ backoff: BackoffStrategy;
1219
+ /** Only retry these error codes. If undefined, retry all errors. */
1220
+ retryableErrors?: string[];
1221
+ /** Never retry these error codes. Takes precedence over retryableErrors. */
1222
+ nonRetryableErrors?: string[];
1223
+ /** Auto-delete items after this time in milliseconds (default: 7 days) */
1224
+ ttlMs?: number;
1225
+ }
1226
+ /**
1227
+ * Default retry policy values.
1228
+ */
1229
+ declare const DEFAULT_RETRY_POLICY: RetryPolicy;
1230
+ /**
1231
+ * Calculates the next retry delay based on the backoff strategy.
1232
+ *
1233
+ * @param backoff - The backoff strategy configuration
1234
+ * @param retryCount - Current retry attempt number (0-based)
1235
+ * @returns Delay in milliseconds before the next retry
1236
+ *
1237
+ * @example
1238
+ * ```typescript
1239
+ * const delay = calculateBackoffDelay(
1240
+ * { type: "exponential", initialDelayMs: 1000, maxDelayMs: 60000, multiplier: 2, jitter: true },
1241
+ * 2 // Third attempt
1242
+ * );
1243
+ * // Returns approximately 4000ms (1000 * 2^2) with jitter
1244
+ * ```
1245
+ */
1246
+ declare function calculateBackoffDelay(backoff: BackoffStrategy, retryCount: number): number;
1247
+ /**
1248
+ * Determines if an error should be retried based on the retry policy.
1249
+ *
1250
+ * @param errorCode - The error code to check
1251
+ * @param policy - The retry policy configuration
1252
+ * @returns true if the error should be retried
1253
+ *
1254
+ * @example
1255
+ * ```typescript
1256
+ * const policy: RetryPolicy = {
1257
+ * enabled: true,
1258
+ * maxRetries: 3,
1259
+ * backoff: { type: "immediate" },
1260
+ * nonRetryableErrors: ["VALIDATION_ERROR"]
1261
+ * };
1262
+ *
1263
+ * isErrorRetryable("NETWORK_ERROR", policy); // true
1264
+ * isErrorRetryable("VALIDATION_ERROR", policy); // false
1265
+ * ```
1266
+ */
1267
+ declare function isErrorRetryable(errorCode: string, policy: RetryPolicy): boolean;
1268
+ /**
1269
+ * Calculates the expiration date for a DLQ item.
1270
+ *
1271
+ * @param createdAt - When the item was created
1272
+ * @param ttlMs - Time to live in milliseconds
1273
+ * @returns The expiration date, or undefined if no TTL
1274
+ */
1275
+ declare function calculateExpirationDate(createdAt: Date, ttlMs?: number): Date | undefined;
1276
+ //#endregion
1277
+ //#region src/flow/types/flow-types.d.ts
1278
+ /**
1279
+ * Type mapping for node input/output schemas.
1280
+ * Used for type-safe node connections in typed flows.
1281
+ */
1282
+ type NodeTypeMap = Record<string, {
1283
+ input: unknown;
1284
+ output: unknown;
1285
+ }>;
1286
+ /**
1287
+ * Minimal node data without execution logic.
1288
+ * Used for serialization and UI display.
1289
+ *
1290
+ * @property id - Unique node identifier
1291
+ * @property name - Human-readable node name
1292
+ * @property description - Explanation of what the node does
1293
+ * @property type - Node category (input, transform, conditional, output, etc.)
1294
+ * @property inputTypeId - Optional input type ID from inputTypeRegistry (for input nodes)
1295
+ * @property outputTypeId - Optional output type ID from outputTypeRegistry (for result typing)
1296
+ * @property keepOutput - If true, preserves this node's output even if it has outgoing edges (default: false)
1297
+ */
1298
+ type FlowNodeData = {
1299
+ id: string;
1300
+ name: string;
1301
+ description: string;
1302
+ type: NodeType; /** Input type ID from inputTypeRegistry - describes how external clients interact with this node */
1303
+ inputTypeId?: string; /** Output type ID from outputTypeRegistry - describes the data shape this node produces */
1304
+ outputTypeId?: string;
1305
+ keepOutput?: boolean;
1306
+ /**
1307
+ * Stable node type identifier for circuit breaker configuration.
1308
+ * Used to share circuit breaker state across nodes of the same type and for nodeTypeOverrides.
1309
+ * Example: "describe-image", "remove-background", "scan-virus"
1310
+ */
1311
+ nodeTypeId?: string;
1312
+ };
1313
+ /**
1314
+ * All built-in node type identifiers.
1315
+ * Used to exclude these from custom types for proper discriminated union narrowing.
1316
+ */
1317
+ type BuiltInNodeType = "storage-output-v1" | "streaming-input-v1";
1318
+ /**
1319
+ * Built-in typed outputs with automatic TypeScript narrowing.
1320
+ *
1321
+ * These outputs use discriminated unions to enable automatic type narrowing
1322
+ * in switch statements without requiring type guards.
1323
+ *
1324
+ * @remarks
1325
+ * Built-in types automatically narrow when using switch statements:
1326
+ * ```typescript
1327
+ * switch (output.nodeType) {
1328
+ * case 'storage-output-v1':
1329
+ * output.data.url // ✅ TypeScript knows data is UploadFile
1330
+ * break;
1331
+ * }
1332
+ * ```
1333
+ */
1334
+ type BuiltInTypedOutput = {
1335
+ nodeType: "storage-output-v1";
1336
+ data: UploadFile;
1337
+ nodeId: string;
1338
+ timestamp: string;
1339
+ } | {
1340
+ nodeType: "streaming-input-v1";
1341
+ data: UploadFile;
1342
+ nodeId: string;
1343
+ timestamp: string;
1344
+ };
1345
+ /**
1346
+ * Custom typed output for user-defined node types.
1347
+ *
1348
+ * Custom outputs require type guards for type narrowing:
1349
+ * ```typescript
1350
+ * if (isThumbnailOutput(output)) {
1351
+ * output.data.width // ✅ Type guard narrows data to ThumbnailOutput
1352
+ * }
1353
+ * ```
1354
+ *
1355
+ * @template T - The TypeScript type of the output data
1356
+ */
1357
+ type CustomTypedOutput<T = unknown> = {
1358
+ nodeType?: string;
1359
+ data: T;
1360
+ nodeId: string;
1361
+ timestamp: string;
1362
+ };
1363
+ /**
1364
+ * Typed output structure from a flow node.
1365
+ *
1366
+ * This is a discriminated union that provides automatic type narrowing for
1367
+ * built-in types while maintaining extensibility for custom types.
1368
+ *
1369
+ * @template T - The TypeScript type of the output data (for custom outputs)
1370
+ *
1371
+ * @property nodeId - Node instance ID that produced this output
1372
+ * @property nodeType - Type ID from the registry (e.g., "storage-output-v1")
1373
+ * @property data - The actual output data from the node
1374
+ * @property timestamp - ISO 8601 timestamp when the result was produced
1375
+ *
1376
+ * @remarks
1377
+ * **Built-in types (automatic narrowing):**
1378
+ * - `storage-output-v1` - Storage node output (UploadFile)
1379
+ *
1380
+ * Use switch statements for automatic narrowing:
1381
+ * ```typescript
1382
+ * for (const output of state.flowOutputs) {
1383
+ * switch (output.nodeType) {
1384
+ * case 'storage-output-v1':
1385
+ * // ✅ output.data is automatically UploadFile
1386
+ * console.log(output.data.url);
1387
+ * break;
1388
+ * }
1389
+ * }
1390
+ * ```
1391
+ *
1392
+ * **Custom types (require type guards):**
1393
+ * ```typescript
1394
+ * import { isThumbnailOutput } from './type-guards';
1395
+ *
1396
+ * if (isThumbnailOutput(output)) {
1397
+ * // ✅ Type guard narrows output.data to ThumbnailOutput
1398
+ * console.log(output.data.width);
1399
+ * }
1400
+ * ```
1401
+ *
1402
+ * **Untyped nodes (backward compatible):**
1403
+ * ```typescript
1404
+ * const untypedOutput: TypedOutput = {
1405
+ * nodeId: "custom-node-1",
1406
+ * data: { custom: "data" },
1407
+ * timestamp: "2024-01-15T10:30:00Z"
1408
+ * };
1409
+ * ```
1410
+ *
1411
+ * @example
1412
+ * ```typescript
1413
+ * // Storage output result (built-in, automatic narrowing)
1414
+ * const output: TypedOutput = {
1415
+ * nodeId: "storage-1",
1416
+ * nodeType: "storage-output-v1",
1417
+ * data: {
1418
+ * id: "file-123",
1419
+ * url: "https://cdn.example.com/file.jpg",
1420
+ * size: 1024000,
1421
+ * // ... rest of UploadFile
1422
+ * },
1423
+ * timestamp: "2024-01-15T10:30:00Z"
1424
+ * };
1425
+ *
1426
+ * // Custom output (requires type guard)
1427
+ * const thumbnailOutput: TypedOutput<ThumbnailOutput> = {
1428
+ * nodeId: "thumbnail-1",
1429
+ * nodeType: "thumbnail-output-v1",
1430
+ * data: {
1431
+ * url: "https://cdn.example.com/thumb.jpg",
1432
+ * width: 200,
1433
+ * height: 200,
1434
+ * format: "webp",
1435
+ * },
1436
+ * timestamp: "2024-01-15T10:30:00Z"
1437
+ * };
1438
+ * ```
1439
+ */
1440
+ type TypedOutput<T = unknown> = BuiltInTypedOutput | CustomTypedOutput<T>;
1441
+ /**
1442
+ * Helper type to extract a specific built-in output by nodeType.
1443
+ *
1444
+ * This enables type-safe narrowing when you know the nodeType:
1445
+ * ```typescript
1446
+ * if (output.nodeType === "storage-output-v1") {
1447
+ * const narrowed = output as NarrowTypedOutput<"storage-output-v1">;
1448
+ * narrowed.data.url; // ✅ TypeScript knows data is UploadFile
1449
+ * }
1450
+ * ```
1451
+ */
1452
+ type NarrowTypedOutput<NodeType extends BuiltInNodeType> = Extract<BuiltInTypedOutput, {
1453
+ nodeType: NodeType;
1454
+ }>;
1455
+ /**
1456
+ * Type guard function signature for narrowing TypedOutput.
1457
+ *
1458
+ * @template NodeType - The built-in node type to narrow to
1459
+ */
1460
+ type TypedOutputGuard<NodeType extends BuiltInNodeType> = (output: TypedOutput) => output is NarrowTypedOutput<NodeType>;
1461
+ /**
1462
+ * Creates a type-safe type guard for a built-in node type.
1463
+ *
1464
+ * @example
1465
+ * ```typescript
1466
+ * const isStorageV1 = createOutputGuard("storage-output-v1");
1467
+ *
1468
+ * for (const output of outputs) {
1469
+ * if (isStorageV1(output)) {
1470
+ * output.data.url; // ✅ TypeScript knows data is UploadFile
1471
+ * }
1472
+ * }
1473
+ * ```
1474
+ */
1475
+ declare const createOutputGuard: <NodeType extends BuiltInNodeType>(nodeType: NodeType) => TypedOutputGuard<NodeType>;
1476
+ /**
1477
+ * Pre-built type guard for storage-output-v1.
1478
+ */
1479
+ declare const isStorageOutputV1: TypedOutputGuard<"storage-output-v1">;
1480
+ /**
1481
+ * Pre-built type guard for streaming-input-v1.
1482
+ */
1483
+ declare const isStreamingInputV1: TypedOutputGuard<"streaming-input-v1">;
1484
+ /**
1485
+ * Result of a node execution - either complete or waiting for more data.
1486
+ *
1487
+ * @template TOutput - Type of the node's output data
1488
+ *
1489
+ * @remarks
1490
+ * Nodes can return "waiting" to pause flow execution when they need additional
1491
+ * data (e.g., chunked uploads, external service responses). The flow can be
1492
+ * resumed later with the missing data.
1493
+ *
1494
+ * Results now include optional type information (`nodeType` and `nodeId`) to
1495
+ * enable type-safe result consumption. These fields are automatically added
1496
+ * by the node execution wrapper when a node is created with an `outputTypeId`.
1497
+ *
1498
+ * @example
1499
+ * ```typescript
1500
+ * // Node completes immediately with type information
1501
+ * return completeNodeExecution({ processedData });
1502
+ * // Result will be wrapped with: { type: "complete", data, nodeType, nodeId }
1503
+ *
1504
+ * // Node waits for more chunks
1505
+ * if (needsMoreData) {
1506
+ * return waitingNodeExecution({ receivedChunks: 3, totalChunks: 10 });
1507
+ * }
1508
+ * ```
1509
+ */
1510
+ type NodeExecutionResult<TOutput> = {
1511
+ type: "complete";
1512
+ data: TOutput;
1513
+ nodeType?: string;
1514
+ nodeId?: string;
1515
+ } | {
1516
+ type: "waiting";
1517
+ partialData?: unknown;
1518
+ nodeType?: string;
1519
+ nodeId?: string;
1520
+ };
1521
+ /**
1522
+ * Helper function to create a complete node execution result.
1523
+ *
1524
+ * @template TOutput - Type of the output data
1525
+ * @param data - The output data from the node
1526
+ * @returns A complete execution result
1527
+ *
1528
+ * @example
1529
+ * ```typescript
1530
+ * return completeNodeExecution({
1531
+ * url: uploadedFile.url,
1532
+ * size: uploadedFile.size
1533
+ * });
1534
+ * ```
1535
+ */
1536
+ declare const completeNodeExecution: <TOutput>(data: TOutput) => {
1537
+ type: "complete";
1538
+ data: TOutput;
1539
+ };
1540
+ /**
1541
+ * Helper function to create a waiting node execution result.
1542
+ *
1543
+ * @param partialData - Optional partial data available so far
1544
+ * @returns A waiting execution result that pauses the flow
1545
+ *
1546
+ * @example
1547
+ * ```typescript
1548
+ * // Wait for more upload chunks
1549
+ * return waitingNodeExecution({
1550
+ * receivedBytes: currentSize,
1551
+ * totalBytes: expectedSize
1552
+ * });
1553
+ * ```
1554
+ */
1555
+ declare const waitingNodeExecution: (partialData?: unknown) => {
1556
+ type: "waiting";
1557
+ partialData: unknown;
1558
+ };
1559
+ /**
1560
+ * A flow node represents a single processing step in the DAG.
1561
+ *
1562
+ * Nodes are the building blocks of flows. Each node has typed inputs/outputs,
1563
+ * execution logic, and optional features like conditions, retries, and pausing.
1564
+ *
1565
+ * @template TInput - Type of data the node accepts
1566
+ * @template TOutput - Type of data the node produces
1567
+ * @template TError - Type of errors the node can throw
1568
+ *
1569
+ * @property inputSchema - Zod schema for validating input data
1570
+ * @property outputSchema - Zod schema for validating output data
1571
+ * @property run - Effect-based execution function
1572
+ * @property condition - Optional conditional execution rule
1573
+ * @property multiInput - Whether node accepts multiple inputs (default: false)
1574
+ * @property multiOutput - Whether node produces multiple outputs (default: false)
1575
+ * @property pausable - Whether node can pause execution (default: false)
1576
+ * @property retry - Optional retry configuration
1577
+ *
1578
+ * @remarks
1579
+ * - Nodes use Effect for composable error handling and dependency injection
1580
+ * - Input/output schemas ensure type safety at runtime
1581
+ * - Conditions are evaluated before execution
1582
+ * - Retry logic supports exponential backoff
1583
+ * - Pausable nodes can halt flow execution and resume later
1584
+ *
1585
+ * @example
1586
+ * ```typescript
1587
+ * const resizeNode: FlowNode<InputFile, UploadFile> = {
1588
+ * id: "resize",
1589
+ * name: "Resize Image",
1590
+ * description: "Resize image to specified dimensions",
1591
+ * type: NodeType.transform,
1592
+ * inputSchema: inputFileSchema,
1593
+ * outputSchema: uploadFileSchema,
1594
+ * run: ({ data, storageId }) => Effect.gen(function* () {
1595
+ * const resized = yield* resizeImage(data, { width: 1920, height: 1080 });
1596
+ * return completeNodeExecution(resized);
1597
+ * }),
1598
+ * retry: {
1599
+ * maxRetries: 3,
1600
+ * retryDelay: 1000,
1601
+ * exponentialBackoff: true
1602
+ * }
1603
+ * };
1604
+ * ```
1605
+ *
1606
+ * @see {@link NodeExecutionResult} for return type options
1607
+ * @see {@link FlowCondition} for conditional execution
1608
+ */
1609
+ type FlowNode<TInput = unknown, TOutput = unknown, TError = UploadistaError> = FlowNodeData & {
1610
+ inputSchema: z.ZodSchema<TInput>;
1611
+ outputSchema: z.ZodSchema<TOutput>;
1612
+ run: (args: {
1613
+ data: TInput;
1614
+ jobId: string;
1615
+ storageId: string;
1616
+ flowId: string;
1617
+ inputs?: Record<string, unknown>;
1618
+ clientId: string | null;
1619
+ }) => Effect.Effect<NodeExecutionResult<TOutput>, TError>;
1620
+ condition?: {
1621
+ field: string;
1622
+ operator: string;
1623
+ value: unknown;
1624
+ };
1625
+ multiInput?: boolean;
1626
+ multiOutput?: boolean;
1627
+ pausable?: boolean;
1628
+ retry?: {
1629
+ maxRetries?: number;
1630
+ retryDelay?: number;
1631
+ exponentialBackoff?: boolean;
1632
+ }; /** Circuit breaker configuration for this node (overrides flow defaults) */
1633
+ circuitBreaker?: FlowCircuitBreakerConfig;
1634
+ };
1635
+ /**
1636
+ * Represents a directed edge connecting two nodes in the flow graph.
1637
+ *
1638
+ * Edges define the data flow direction and can specify ports for multi-input/output nodes.
1639
+ *
1640
+ * @property source - ID of the source node
1641
+ * @property target - ID of the target node
1642
+ * @property sourcePort - Optional output port name for multi-output nodes
1643
+ * @property targetPort - Optional input port name for multi-input nodes
1644
+ *
1645
+ * @remarks
1646
+ * - Edges must not create cycles (DAG constraint)
1647
+ * - Source node's output type should be compatible with target node's input type
1648
+ * - Ports allow routing specific outputs to specific inputs
1649
+ *
1650
+ * @example
1651
+ * ```typescript
1652
+ * // Simple edge
1653
+ * const edge: FlowEdge = {
1654
+ * source: "resize-node",
1655
+ * target: "optimize-node"
1656
+ * };
1657
+ *
1658
+ * // Edge with ports (for multiplex nodes)
1659
+ * const multiplexEdge: FlowEdge = {
1660
+ * source: "multiplex-node",
1661
+ * target: "output-node",
1662
+ * sourcePort: "image",
1663
+ * targetPort: "primary"
1664
+ * };
1665
+ * ```
1666
+ */
1667
+ type FlowEdge = {
1668
+ source: string;
1669
+ target: string;
1670
+ sourcePort?: string;
1671
+ targetPort?: string;
1672
+ };
1673
+ /**
1674
+ * Function type for checking schema compatibility between nodes.
1675
+ *
1676
+ * @param from - Source node's output schema
1677
+ * @param to - Target node's input schema
1678
+ * @returns true if schemas are compatible
1679
+ *
1680
+ * @remarks
1681
+ * Custom type checkers can implement more sophisticated compatibility rules
1682
+ * than the default checker.
1683
+ *
1684
+ * @see {@link FlowTypeValidator} for the default implementation
1685
+ */
1686
+ type TypeCompatibilityChecker = (from: z.ZodSchema<any>, to: z.ZodSchema<any>) => boolean;
1687
+ /**
1688
+ * Interface for validating node connections and schema compatibility.
1689
+ *
1690
+ * @remarks
1691
+ * Validators ensure that connected nodes have compatible types,
1692
+ * preventing runtime type errors in flow execution.
1693
+ *
1694
+ * @see {@link FlowTypeValidator} for the implementation
1695
+ */
1696
+ type NodeConnectionValidator = {
1697
+ validateConnection: (sourceNode: FlowNode<any, any>, targetNode: FlowNode<any, any>, edge: FlowEdge) => boolean;
1698
+ getCompatibleTypes: (sourceSchema: z.ZodSchema<any>, targetSchema: z.ZodSchema<any>) => boolean;
1699
+ };
1700
+ /**
1701
+ * Fallback behavior when circuit is open.
1702
+ *
1703
+ * - `fail`: Fail immediately with CIRCUIT_BREAKER_OPEN error (default)
1704
+ * - `skip`: Skip node, pass input through as output
1705
+ * - `default`: Return a configured default value
1706
+ */
1707
+ type FlowCircuitBreakerFallback = {
1708
+ type: "fail";
1709
+ } | {
1710
+ type: "skip";
1711
+ passThrough: true;
1712
+ } | {
1713
+ type: "default";
1714
+ value: unknown;
1715
+ };
1716
+ /**
1717
+ * Configuration for a circuit breaker on a flow or node.
1718
+ *
1719
+ * @property enabled - Whether circuit breaker is active (default: false for backward compatibility)
1720
+ * @property failureThreshold - Number of failures within window to trip circuit (default: 5)
1721
+ * @property resetTimeout - Milliseconds to wait in open state before half-open (default: 30000)
1722
+ * @property halfOpenRequests - Number of successful requests in half-open to close (default: 3)
1723
+ * @property windowDuration - Sliding window duration in milliseconds (default: 60000)
1724
+ * @property fallback - Behavior when circuit is open
1725
+ *
1726
+ * @example
1727
+ * ```typescript
1728
+ * const config: FlowCircuitBreakerConfig = {
1729
+ * enabled: true,
1730
+ * failureThreshold: 5,
1731
+ * resetTimeout: 30000,
1732
+ * halfOpenRequests: 3,
1733
+ * windowDuration: 60000,
1734
+ * fallback: { type: "fail" }
1735
+ * };
1736
+ * ```
1737
+ */
1738
+ interface FlowCircuitBreakerConfig {
1739
+ /** Whether circuit breaker is active (default: false) */
1740
+ enabled?: boolean;
1741
+ /** Number of failures within window to trip circuit (default: 5) */
1742
+ failureThreshold?: number;
1743
+ /** Milliseconds to wait in open state before half-open (default: 30000) */
1744
+ resetTimeout?: number;
1745
+ /** Number of successful requests in half-open to close (default: 3) */
1746
+ halfOpenRequests?: number;
1747
+ /** Sliding window duration in milliseconds (default: 60000) */
1748
+ windowDuration?: number;
1749
+ /** Behavior when circuit is open */
1750
+ fallback?: FlowCircuitBreakerFallback;
1751
+ }
1752
+ /**
1753
+ * Configuration for Dead Letter Queue on a flow.
1754
+ *
1755
+ * When enabled, failed flow jobs are captured in the DLQ for later retry,
1756
+ * debugging, or manual intervention.
1757
+ *
1758
+ * @property enabled - Whether DLQ is enabled for this flow (default: true when service is provided)
1759
+ * @property retryPolicy - Retry policy configuration for automatic retries
1760
+ *
1761
+ * @example
1762
+ * ```typescript
1763
+ * // Enable DLQ with custom retry policy
1764
+ * const flowConfig = {
1765
+ * flowId: "image-pipeline",
1766
+ * deadLetterQueue: {
1767
+ * enabled: true,
1768
+ * retryPolicy: {
1769
+ * enabled: true,
1770
+ * maxRetries: 5,
1771
+ * backoff: {
1772
+ * type: "exponential",
1773
+ * initialDelayMs: 1000,
1774
+ * maxDelayMs: 60000,
1775
+ * multiplier: 2,
1776
+ * jitter: true
1777
+ * },
1778
+ * nonRetryableErrors: ["VALIDATION_ERROR"]
1779
+ * }
1780
+ * }
1781
+ * };
1782
+ *
1783
+ * // Disable DLQ for best-effort flows
1784
+ * const bestEffortFlow = {
1785
+ * flowId: "analytics-pipeline",
1786
+ * deadLetterQueue: {
1787
+ * enabled: false
1788
+ * }
1789
+ * };
1790
+ * ```
1791
+ */
1792
+ interface FlowDeadLetterQueueConfig {
1793
+ /** Whether DLQ is enabled for this flow (default: true when service is provided) */
1794
+ enabled?: boolean;
1795
+ /** Retry policy configuration for automatic retries */
1796
+ retryPolicy?: RetryPolicy;
1797
+ }
1798
+ /**
1799
+ * Configuration object for creating a new flow.
1800
+ *
1801
+ * FlowConfig defines all aspects of a flow including its nodes, connections,
1802
+ * schemas, and optional features like event handlers and parallel execution.
1803
+ *
1804
+ * @template TFlowInputSchema - Zod schema for flow inputs
1805
+ * @template TFlowOutputSchema - Zod schema for flow outputs
1806
+ * @template TNodeError - Union of possible errors from node Effects
1807
+ * @template TNodeRequirements - Union of requirements from node Effects
1808
+ *
1809
+ * @property flowId - Unique identifier for the flow
1810
+ * @property name - Human-readable flow name
1811
+ * @property nodes - Array of nodes (can be plain nodes or Effects resolving to nodes)
1812
+ * @property edges - Array of edges connecting the nodes
1813
+ * @property inputSchema - Zod schema for validating flow inputs
1814
+ * @property outputSchema - Zod schema for validating flow outputs
1815
+ * @property typeChecker - Optional custom type compatibility checker
1816
+ * @property onEvent - Optional event handler for monitoring execution
1817
+ * @property parallelExecution - Optional parallel execution configuration
1818
+ *
1819
+ * @remarks
1820
+ * - Nodes can be provided as plain objects or Effect-wrapped for lazy initialization
1821
+ * - Event handlers receive all flow and node events
1822
+ * - Parallel execution is experimental and disabled by default
1823
+ * - Type checker allows custom schema compatibility rules
1824
+ *
1825
+ * @example
1826
+ * ```typescript
1827
+ * const config: FlowConfig<
1828
+ * z.ZodObject<{ file: z.ZodType<File> }>,
1829
+ * z.ZodType<UploadFile>,
1830
+ * never,
1831
+ * never
1832
+ * > = {
1833
+ * flowId: "image-upload",
1834
+ * name: "Image Upload Pipeline",
1835
+ * nodes: [inputNode, resizeNode, optimizeNode, storageNode],
1836
+ * edges: [
1837
+ * { source: "input", target: "resize" },
1838
+ * { source: "resize", target: "optimize" },
1839
+ * { source: "optimize", target: "storage" }
1840
+ * ],
1841
+ * inputSchema: z.object({ file: z.instanceof(File) }),
1842
+ * outputSchema: uploadFileSchema,
1843
+ * onEvent: (event) => Effect.gen(function* () {
1844
+ * yield* logEvent(event);
1845
+ * return { eventId: event.jobId };
1846
+ * })
1847
+ * };
1848
+ * ```
1849
+ *
1850
+ * @see {@link createFlowWithSchema} for creating flows from config
1851
+ * @see {@link FlowNode} for node specifications
1852
+ * @see {@link FlowEdge} for edge specifications
1853
+ */
1854
+ type FlowConfig<TFlowInputSchema extends z.ZodSchema<any>, TFlowOutputSchema extends z.ZodSchema<any>, TNodeError = never, TNodeRequirements = never> = {
1855
+ flowId: string;
1856
+ name: string;
1857
+ nodes: Array<FlowNode<any, any, UploadistaError> | Effect.Effect<FlowNode<any, any, UploadistaError>, TNodeError, TNodeRequirements>>;
1858
+ edges: FlowEdge[];
1859
+ inputSchema: TFlowInputSchema;
1860
+ outputSchema: TFlowOutputSchema;
1861
+ typeChecker?: TypeCompatibilityChecker;
1862
+ onEvent?: (event: FlowEvent) => Effect.Effect<{
1863
+ eventId: string | null;
1864
+ }, UploadistaError>;
1865
+ checkJobStatus?: (jobId: string) => Effect.Effect<"running" | "paused" | "cancelled", UploadistaError>;
1866
+ parallelExecution?: {
1867
+ enabled?: boolean;
1868
+ maxConcurrency?: number;
1869
+ };
1870
+ /**
1871
+ * Circuit breaker configuration for the flow.
1872
+ *
1873
+ * When enabled, the circuit breaker monitors node execution failures and
1874
+ * automatically prevents requests to failing services, protecting against
1875
+ * cascade failures.
1876
+ *
1877
+ * @example
1878
+ * ```typescript
1879
+ * circuitBreaker: {
1880
+ * defaults: {
1881
+ * enabled: true,
1882
+ * failureThreshold: 5,
1883
+ * resetTimeout: 30000
1884
+ * },
1885
+ * nodeTypeOverrides: {
1886
+ * "virus-scan": { failureThreshold: 3 }
1887
+ * }
1888
+ * }
1889
+ * ```
1890
+ */
1891
+ circuitBreaker?: {
1892
+ /** Default circuit breaker config for all nodes */defaults?: FlowCircuitBreakerConfig; /** Override circuit breaker config per node type */
1893
+ nodeTypeOverrides?: Record<string, FlowCircuitBreakerConfig>;
1894
+ };
1895
+ /**
1896
+ * Dead Letter Queue configuration for the flow.
1897
+ *
1898
+ * When enabled, failed jobs are captured in the DLQ for later retry,
1899
+ * debugging, or manual intervention.
1900
+ *
1901
+ * @example
1902
+ * ```typescript
1903
+ * deadLetterQueue: {
1904
+ * enabled: true,
1905
+ * retryPolicy: {
1906
+ * enabled: true,
1907
+ * maxRetries: 5,
1908
+ * backoff: {
1909
+ * type: "exponential",
1910
+ * initialDelayMs: 1000,
1911
+ * maxDelayMs: 60000,
1912
+ * multiplier: 2,
1913
+ * jitter: true
1914
+ * }
1915
+ * }
1916
+ * }
1917
+ * ```
1918
+ */
1919
+ deadLetterQueue?: FlowDeadLetterQueueConfig;
1920
+ hooks?: {
1921
+ /**
1922
+ * Called when a sink node (terminal node with no outgoing edges) produces an output.
1923
+ * This hook runs after auto-persistence for UploadFile outputs.
1924
+ *
1925
+ * Use this hook to perform additional post-processing such as:
1926
+ * - Saving output metadata to a database
1927
+ * - Tracking outputs in external systems
1928
+ * - Adding custom metadata to outputs
1929
+ * - Triggering downstream workflows
1930
+ *
1931
+ * The hook receives the output and context, and can optionally modify
1932
+ * and return the output (e.g., adding metadata fields).
1933
+ *
1934
+ * **Important**: The hook must not have any service requirements (Effect requirements must be `never`).
1935
+ * All necessary services should be captured in the closure when defining the hook.
1936
+ *
1937
+ * @param context - Output context including the output data, node ID, flow ID, etc.
1938
+ * @returns Effect or Promise that resolves to the (optionally modified) output
1939
+ *
1940
+ * @example
1941
+ * ```typescript
1942
+ * // Using Effect
1943
+ * hooks: {
1944
+ * onNodeOutput: ({ output, nodeId, flowId }) =>
1945
+ * Effect.gen(function* () {
1946
+ * // Save to database
1947
+ * yield* Effect.promise(() => db.save(output));
1948
+ * // Return output with additional metadata
1949
+ * return { ...output, metadata: { ...output.metadata, tracked: true } };
1950
+ * })
1951
+ * }
1952
+ *
1953
+ * // Using Promise (simpler for most users)
1954
+ * hooks: {
1955
+ * onNodeOutput: async ({ output, nodeId, flowId }) => {
1956
+ * // Save to database
1957
+ * await db.save(output);
1958
+ * // Return output with additional metadata
1959
+ * return { ...output, metadata: { ...output.metadata, tracked: true } };
1960
+ * }
1961
+ * }
1962
+ * ```
1963
+ */
1964
+ onNodeOutput?: <TOutput>(context: {
1965
+ output: TOutput;
1966
+ nodeId: string;
1967
+ flowId: string;
1968
+ jobId: string;
1969
+ storageId: string;
1970
+ clientId: string | null;
1971
+ }) => Effect.Effect<TOutput, UploadistaError, never> | Promise<TOutput>;
1972
+ };
1973
+ };
1974
+ /**
1975
+ * Context provided to file naming functions and templates.
1976
+ *
1977
+ * Contains all relevant information about the current file, node, and flow
1978
+ * execution that can be used to generate dynamic file names.
1979
+ *
1980
+ * @property baseName - Filename without extension (e.g., "photo" from "photo.jpg")
1981
+ * @property extension - File extension without dot (e.g., "jpg")
1982
+ * @property fileName - Full original filename (e.g., "photo.jpg")
1983
+ * @property nodeType - Type of processing node (e.g., "resize", "optimize")
1984
+ * @property nodeId - Specific node instance ID
1985
+ * @property flowId - Flow identifier
1986
+ * @property jobId - Execution job ID
1987
+ * @property timestamp - ISO 8601 timestamp of processing
1988
+ * @property width - Output width (image/video nodes only)
1989
+ * @property height - Output height (image/video nodes only)
1990
+ * @property format - Output format (e.g., "webp", "mp4")
1991
+ * @property quality - Quality setting (e.g., 80)
1992
+ *
1993
+ * @example
1994
+ * ```typescript
1995
+ * // Available in templates as {{variable}}
1996
+ * const pattern = "{{baseName}}-{{width}}x{{height}}.{{extension}}";
1997
+ * // Result: "photo-800x600.jpg"
1998
+ * ```
1999
+ */
2000
+ type NamingContext = {
2001
+ /** Filename without extension */baseName: string; /** File extension without dot */
2002
+ extension: string; /** Full original filename */
2003
+ fileName: string; /** Type of processing node */
2004
+ nodeType: string; /** Specific node instance ID */
2005
+ nodeId: string; /** Flow identifier */
2006
+ flowId: string; /** Execution job ID */
2007
+ jobId: string; /** ISO 8601 timestamp of processing */
2008
+ timestamp: string; /** Output width (image/video nodes) */
2009
+ width?: number; /** Output height (image/video nodes) */
2010
+ height?: number; /** Output format */
2011
+ format?: string; /** Quality setting */
2012
+ quality?: number; /** Page number (document nodes) */
2013
+ pageNumber?: number; /** Additional custom variables */
2014
+ [key: string]: string | number | undefined;
2015
+ };
2016
+ /**
2017
+ * Function type for custom file naming logic.
2018
+ *
2019
+ * @param file - The UploadFile being processed
2020
+ * @param context - Naming context with all available variables
2021
+ * @returns The new filename (including extension)
2022
+ *
2023
+ * @example
2024
+ * ```typescript
2025
+ * const customRename: FileNamingFunction = (file, ctx) =>
2026
+ * `${ctx.flowId}-${ctx.baseName}-${ctx.timestamp}.${ctx.extension}`;
2027
+ * ```
2028
+ */
2029
+ type FileNamingFunction = (file: UploadFile, context: NamingContext) => string;
2030
+ /**
2031
+ * Function type for generating auto-naming suffixes.
2032
+ *
2033
+ * Each node type can define its own auto suffix generator that creates
2034
+ * a descriptive suffix based on the processing parameters.
2035
+ *
2036
+ * @param context - Naming context with all available variables
2037
+ * @returns The suffix to append (without leading dash)
2038
+ *
2039
+ * @example
2040
+ * ```typescript
2041
+ * // Resize node auto suffix
2042
+ * const resizeAutoSuffix: AutoNamingSuffixGenerator = (ctx) =>
2043
+ * `${ctx.width}x${ctx.height}`;
2044
+ * // Result: "photo-800x600.jpg"
2045
+ *
2046
+ * // Optimize node auto suffix
2047
+ * const optimizeAutoSuffix: AutoNamingSuffixGenerator = (ctx) =>
2048
+ * ctx.format ?? 'optimized';
2049
+ * // Result: "photo-webp.webp"
2050
+ * ```
2051
+ */
2052
+ type AutoNamingSuffixGenerator = (context: NamingContext) => string;
2053
+ /**
2054
+ * Configuration for file naming behavior on a node.
2055
+ *
2056
+ * Supports three modes:
2057
+ * - `undefined` or no config: Preserve original filename (backward compatible)
2058
+ * - `mode: 'auto'`: Generate smart suffix based on node type
2059
+ * - `mode: 'custom'`: Use template pattern or rename function
2060
+ *
2061
+ * @property mode - Naming mode: 'auto' for smart suffixes, 'custom' for templates/functions
2062
+ * @property pattern - Mustache-style template string (for custom mode)
2063
+ * @property rename - Custom function for full control (for custom mode, SDK only)
2064
+ * @property autoSuffix - Generator function for auto mode suffix
2065
+ *
2066
+ * @example
2067
+ * ```typescript
2068
+ * // Auto mode with smart suffix
2069
+ * const autoNaming: FileNamingConfig = {
2070
+ * mode: 'auto',
2071
+ * autoSuffix: (ctx) => `${ctx.width}x${ctx.height}`
2072
+ * };
2073
+ *
2074
+ * // Custom mode with template
2075
+ * const templateNaming: FileNamingConfig = {
2076
+ * mode: 'custom',
2077
+ * pattern: '{{baseName}}-{{nodeType}}.{{extension}}'
2078
+ * };
2079
+ *
2080
+ * // Custom mode with function
2081
+ * const functionNaming: FileNamingConfig = {
2082
+ * mode: 'custom',
2083
+ * rename: (file, ctx) => `processed-${ctx.fileName}`
2084
+ * };
2085
+ * ```
2086
+ */
2087
+ type FileNamingConfig = {
2088
+ /** Naming mode: 'auto' for smart suffixes, 'custom' for templates/functions */mode: "auto" | "custom"; /** Mustache-style template string (for custom mode) */
2089
+ pattern?: string; /** Custom function for full control (for custom mode, SDK only) */
2090
+ rename?: FileNamingFunction; /** Generator function for auto mode suffix */
2091
+ autoSuffix?: AutoNamingSuffixGenerator;
2092
+ };
2093
+ //#endregion
2094
+ //#region src/flow/types/flow-job.d.ts
2095
+ /**
2096
+ * Status of an individual node within a flow job.
2097
+ *
2098
+ * Node tasks follow this lifecycle:
2099
+ * started → pending → running → (completed | paused | failed)
2100
+ */
2101
+ type FlowJobTaskStatus = "started" | "pending" | "running" | "completed" | "paused" | "failed";
2102
+ /**
2103
+ * Represents a single node's execution within a flow job.
2104
+ *
2105
+ * Tasks track individual node execution, storing results, errors, and retry information.
2106
+ * They allow monitoring of which nodes have completed and accessing intermediate results.
2107
+ *
2108
+ * @property nodeId - Unique identifier of the node this task represents
2109
+ * @property status - Current execution status of the node
2110
+ * @property result - Node execution result data (can be partial data if paused, or complete data if finished)
2111
+ * @property error - Error message if the node failed
2112
+ * @property retryCount - Number of retry attempts made before success or final failure
2113
+ * @property createdAt - When the task was created
2114
+ * @property updatedAt - When the task was last updated
2115
+ *
2116
+ * @remarks
2117
+ * The result field can contain:
2118
+ * - Partial/intermediate data when status is "paused" (unknown type)
2119
+ * - Complete data when status is "completed" (could be TypedOutput for output nodes)
2120
+ * - undefined when status is "pending", "running", "started", or "failed"
2121
+ *
2122
+ * For type-safe access to final outputs, use FlowJob.result instead, which contains
2123
+ * the array of TypedOutput from all output nodes.
2124
+ */
2125
+ type FlowJobTask = {
2126
+ nodeId: string;
2127
+ status: FlowJobTaskStatus;
2128
+ result?: unknown;
2129
+ error?: string;
2130
+ retryCount?: number;
2131
+ createdAt: Date;
2132
+ updatedAt: Date;
2133
+ };
2134
+ /**
2135
+ * Represents a flow execution job with full state tracking.
2136
+ *
2137
+ * Jobs are created when a flow is executed and track the entire execution lifecycle.
2138
+ * They store node results, handle paused states, and manage cleanup of intermediate files.
2139
+ *
2140
+ * @property id - Unique job identifier (UUID)
2141
+ * @property flowId - The flow being executed
2142
+ * @property storageId - Storage location for file outputs
2143
+ * @property clientId - Client that initiated the job (for authorization)
2144
+ * @property status - Overall job status
2145
+ * @property createdAt - When the job was created
2146
+ * @property updatedAt - When the job was last updated
2147
+ * @property tasks - Array of node execution tasks
2148
+ * @property error - Error message if the job failed
2149
+ * @property endedAt - When the job completed or failed
2150
+ * @property result - Array of typed outputs from all output nodes (only set when completed)
2151
+ * @property pausedAt - Node ID where execution is paused (if applicable)
2152
+ * @property executionState - State needed to resume a paused flow
2153
+ * @property intermediateFiles - File IDs to cleanup after completion
2154
+ *
2155
+ * @remarks
2156
+ * - Jobs can be paused at nodes that return `{ type: "waiting" }`
2157
+ * - Paused jobs store execution state and can be resumed with new data
2158
+ * - Intermediate files from non-output nodes are automatically cleaned up
2159
+ * - Tasks are updated as nodes progress through their lifecycle
2160
+ * - The result field now contains an array of TypedOutput for all output nodes
2161
+ *
2162
+ * @example
2163
+ * ```typescript
2164
+ * // Create and monitor a job
2165
+ * const job = yield* flowServer.runFlow({
2166
+ * flowId: "image-pipeline",
2167
+ * storageId: "storage-1",
2168
+ * inputs: { input: myFile }
2169
+ * });
2170
+ *
2171
+ * // Poll for status
2172
+ * const status = yield* flowServer.getJobStatus(job.id);
2173
+ * if (status.status === "completed") {
2174
+ * // Access typed outputs
2175
+ * console.log("Outputs:", status.result);
2176
+ * for (const output of status.result || []) {
2177
+ * console.log(`${output.nodeId} (${output.nodeType}):`, output.data);
2178
+ * }
2179
+ * } else if (status.status === "paused") {
2180
+ * // Resume with additional data
2181
+ * yield* flowServer.resumeFlow({
2182
+ * jobId: job.id,
2183
+ * nodeId: status.pausedAt,
2184
+ * newData: additionalChunk
2185
+ * });
2186
+ * }
2187
+ * ```
2188
+ */
2189
+ /**
2190
+ * Trace context for distributed tracing.
2191
+ * Allows flow operations to be linked under a single trace.
2192
+ */
2193
+ type FlowJobTraceContext = {
2194
+ /** 128-bit trace identifier (32 hex characters) */traceId: string; /** 64-bit span identifier (16 hex characters) */
2195
+ spanId: string; /** Trace flags (1 = sampled) */
2196
+ traceFlags: number;
2197
+ };
2198
+ type FlowJob = {
2199
+ id: string;
2200
+ flowId: string;
2201
+ storageId: string;
2202
+ clientId: string | null;
2203
+ status: FlowJobStatus;
2204
+ createdAt: Date;
2205
+ updatedAt: Date;
2206
+ tasks: FlowJobTask[];
2207
+ error?: string;
2208
+ endedAt?: Date;
2209
+ result?: TypedOutput[];
2210
+ pausedAt?: string;
2211
+ executionState?: {
2212
+ executionOrder: string[];
2213
+ currentIndex: number;
2214
+ inputs: Record<string, unknown>;
2215
+ };
2216
+ intermediateFiles?: string[];
2217
+ /**
2218
+ * Active upload IDs for in-progress multipart uploads.
2219
+ * These are tracked separately from intermediateFiles because they may not
2220
+ * be complete yet. When a flow is cancelled during upload phase, these
2221
+ * uploads need to be aborted to clean up the multipart upload in storage.
2222
+ */
2223
+ activeUploads?: string[];
2224
+ /**
2225
+ * OpenTelemetry trace context for distributed tracing.
2226
+ * When set, all flow operations (node executions, uploads) will be
2227
+ * linked as children of this trace context. Enables end-to-end tracing
2228
+ * of flow executions in observability tools like Grafana Tempo.
2229
+ */
2230
+ traceContext?: FlowJobTraceContext;
2231
+ };
2232
+ /**
2233
+ * Overall status of a flow job.
2234
+ *
2235
+ * Job lifecycle: started → running → (completed | failed | cancelled)
2236
+ * Or with pauses: started → running → paused → running → (completed | failed | cancelled)
2237
+ * User actions: running → paused (via pauseFlow) or running → cancelled (via cancelFlow)
2238
+ */
2239
+ type FlowJobStatus = "pending" | "running" | "completed" | "failed" | "started" | "paused" | "cancelled";
2240
+ //#endregion
2241
+ //#region src/flow/types/flow-queue-item.d.ts
2242
+ /**
2243
+ * Flow Queue item types and configuration.
2244
+ *
2245
+ * A FlowQueueItem represents a queued flow execution request, tracking its
2246
+ * lifecycle from pending → running → completed | failed.
2247
+ *
2248
+ * @module flow/types/flow-queue-item
2249
+ * @see {@link FlowQueueService} for queue operations
2250
+ */
2251
+ /**
2252
+ * Status of a flow queue item.
2253
+ *
2254
+ * Item lifecycle: pending → running → completed | failed
2255
+ *
2256
+ * - `pending`: Waiting for a concurrency slot to become available
2257
+ * - `running`: Currently being executed by the flow engine
2258
+ * - `completed`: Flow execution finished successfully
2259
+ * - `failed`: Flow execution ended with an error
2260
+ */
2261
+ type FlowQueueItemStatus = "pending" | "running" | "completed" | "failed";
2262
+ /**
2263
+ * Represents a single queued flow execution request.
2264
+ *
2265
+ * FlowQueueItems are created when a caller enqueues a flow for execution.
2266
+ * The worker loop picks up pending items, transitions them to running, and
2267
+ * dispatches them to FlowEngine. On completion or failure the item is updated.
2268
+ *
2269
+ * @property id - Unique queue item identifier (UUID)
2270
+ * @property flowId - The flow definition to execute
2271
+ * @property storageId - Target storage for flow outputs
2272
+ * @property input - Original input payload passed to the flow
2273
+ * @property clientId - Client who initiated the request (null for anonymous)
2274
+ * @property status - Current lifecycle status of the queue item
2275
+ * @property dlqItemId - Set when this item is a DLQ retry; links back to the DLQ item
2276
+ * @property enqueuedAt - When the item was added to the queue
2277
+ * @property startedAt - When the worker started executing this item
2278
+ * @property completedAt - When execution finished (success or failure)
2279
+ * @property error - Error message recorded if status is "failed"
2280
+ *
2281
+ * @example
2282
+ * ```typescript
2283
+ * const item: FlowQueueItem = {
2284
+ * id: "q_abc123",
2285
+ * flowId: "image-resize-pipeline",
2286
+ * storageId: "s3-production",
2287
+ * input: { input: { uploadId: "upload_xyz" } },
2288
+ * clientId: "client_456",
2289
+ * status: "pending",
2290
+ * enqueuedAt: new Date(),
2291
+ * };
2292
+ * ```
2293
+ */
2294
+ interface FlowQueueItem {
2295
+ /** Unique queue item identifier (UUID) */
2296
+ id: string;
2297
+ /** The flow definition to execute */
2298
+ flowId: string;
2299
+ /** Target storage for flow outputs */
2300
+ storageId: string;
2301
+ /** Original input payload passed to the flow */
2302
+ input: unknown;
2303
+ /** Client who initiated the request (null for anonymous) */
2304
+ clientId: string | null;
2305
+ /** Current lifecycle status */
2306
+ status: FlowQueueItemStatus;
2307
+ /** Set when this is a DLQ retry; references the DLQ item for result correlation */
2308
+ dlqItemId?: string;
2309
+ /** When the item was added to the queue */
2310
+ enqueuedAt: Date;
2311
+ /** When the worker began executing this item */
2312
+ startedAt?: Date;
2313
+ /** When execution finished (success or failure) */
2314
+ completedAt?: Date;
2315
+ /** Error message if status is "failed" */
2316
+ error?: string;
2317
+ }
2318
+ /**
2319
+ * Aggregate statistics about the flow queue.
2320
+ *
2321
+ * Provides counts by status and concurrency information for monitoring.
2322
+ *
2323
+ * @property pending - Number of items waiting for a concurrency slot
2324
+ * @property running - Number of items currently being executed
2325
+ * @property completed - Number of items that finished successfully
2326
+ * @property failed - Number of items that ended with an error
2327
+ * @property maxConcurrency - Configured maximum simultaneous executions
2328
+ * @property currentConcurrency - Number of items currently running
2329
+ */
2330
+ interface FlowQueueStats {
2331
+ /** Number of items waiting for a concurrency slot */
2332
+ pending: number;
2333
+ /** Number of items currently being executed */
2334
+ running: number;
2335
+ /** Number of items that finished successfully */
2336
+ completed: number;
2337
+ /** Number of items that ended with an error */
2338
+ failed: number;
2339
+ /** Configured maximum simultaneous executions */
2340
+ maxConcurrency: number;
2341
+ /** Number of items currently running */
2342
+ currentConcurrency: number;
2343
+ }
2344
+ /**
2345
+ * Configuration options for the FlowQueueService.
2346
+ *
2347
+ * All fields are optional; defaults are applied via DEFAULT_QUEUE_CONFIG.
2348
+ *
2349
+ * @property maxConcurrency - Maximum number of simultaneously running flows (default: 4)
2350
+ * @property dlqRetryIntervalMs - How often the DLQ retry loop fires in milliseconds (default: 30_000)
2351
+ * @property dlqRetryBatchSize - Maximum DLQ items processed per retry loop tick (default: 10)
2352
+ *
2353
+ * @example
2354
+ * ```typescript
2355
+ * const config: FlowQueueConfig = {
2356
+ * maxConcurrency: 8,
2357
+ * dlqRetryIntervalMs: 60_000, // check every 60 seconds
2358
+ * dlqRetryBatchSize: 5,
2359
+ * };
2360
+ * ```
2361
+ */
2362
+ interface FlowQueueConfig {
2363
+ /**
2364
+ * Maximum number of simultaneously running flows.
2365
+ * Flows beyond this limit remain pending until a slot opens.
2366
+ * @default 4
2367
+ */
2368
+ maxConcurrency?: number;
2369
+ /**
2370
+ * Interval in milliseconds between DLQ retry loop ticks.
2371
+ * Only relevant when DeadLetterQueueService is present.
2372
+ * @default 30_000
2373
+ */
2374
+ dlqRetryIntervalMs?: number;
2375
+ /**
2376
+ * Maximum number of DLQ items to re-enqueue per retry loop tick.
2377
+ * @default 10
2378
+ */
2379
+ dlqRetryBatchSize?: number;
2380
+ }
2381
+ /**
2382
+ * Default configuration values for FlowQueueService.
2383
+ *
2384
+ * Applied when specific config fields are omitted.
2385
+ */
2386
+ declare const DEFAULT_QUEUE_CONFIG: Required<FlowQueueConfig>;
2387
+ //#endregion
2388
+ //#region src/types/kv-store.d.ts
2389
+ /**
2390
+ * Base key-value store interface for raw string storage.
2391
+ *
2392
+ * This is the low-level interface that storage adapters implement.
2393
+ * It stores raw string values without type safety or serialization.
2394
+ *
2395
+ * @property get - Retrieves a value by key, returns null if not found
2396
+ * @property set - Stores a value with the given key
2397
+ * @property delete - Removes a value by key
2398
+ * @property list - Optional operation to list all keys with a given prefix
2399
+ *
2400
+ * @example
2401
+ * ```typescript
2402
+ * // Implement a BaseKvStore with Redis
2403
+ * const redisKvStore: BaseKvStore = {
2404
+ * get: (key) => Effect.tryPromise({
2405
+ * try: () => redis.get(key),
2406
+ * catch: (error) => UploadistaError.fromCode("UNKNOWN_ERROR", { cause: error })
2407
+ * }),
2408
+ *
2409
+ * set: (key, value) => Effect.tryPromise({
2410
+ * try: () => redis.set(key, value),
2411
+ * catch: (error) => UploadistaError.fromCode("UNKNOWN_ERROR", { cause: error })
2412
+ * }),
2413
+ *
2414
+ * delete: (key) => Effect.tryPromise({
2415
+ * try: () => redis.del(key),
2416
+ * catch: (error) => UploadistaError.fromCode("UNKNOWN_ERROR", { cause: error })
2417
+ * }),
2418
+ *
2419
+ * list: (prefix) => Effect.tryPromise({
2420
+ * try: () => redis.keys(`${prefix}*`),
2421
+ * catch: (error) => UploadistaError.fromCode("UNKNOWN_ERROR", { cause: error })
2422
+ * })
2423
+ * };
2424
+ * ```
2425
+ */
2426
+ interface BaseKvStore {
2427
+ readonly get: (key: string) => Effect.Effect<string | null, UploadistaError>;
2428
+ readonly set: (key: string, value: string) => Effect.Effect<void, UploadistaError>;
2429
+ readonly delete: (key: string) => Effect.Effect<void, UploadistaError>;
2430
+ readonly list?: (keyPrefix: string) => Effect.Effect<Array<string>, UploadistaError>;
2431
+ }
2432
+ /**
2433
+ * Type-safe key-value store interface with automatic serialization.
2434
+ *
2435
+ * This wraps a BaseKvStore and handles JSON serialization/deserialization
2436
+ * for a specific data type, providing type safety and eliminating the need
2437
+ * for manual JSON.stringify/parse calls.
2438
+ *
2439
+ * @template TData - The type of data stored in this KV store
2440
+ *
2441
+ * @property get - Retrieves and deserializes a value, fails if not found
2442
+ * @property set - Serializes and stores a value
2443
+ * @property delete - Removes a value by key
2444
+ * @property list - Optional operation to list all keys (without prefix)
2445
+ *
2446
+ * @example
2447
+ * ```typescript
2448
+ * // Use a typed KV store
2449
+ * const uploadStore: KvStore<UploadFile> = new TypedKvStore(
2450
+ * baseStore,
2451
+ * "uploads:",
2452
+ * jsonSerializer.serialize,
2453
+ * jsonSerializer.deserialize
2454
+ * );
2455
+ *
2456
+ * // Store and retrieve typed data
2457
+ * const program = Effect.gen(function* () {
2458
+ * const file: UploadFile = {
2459
+ * id: "file123",
2460
+ * offset: 0,
2461
+ * storage: { id: "s3", type: "s3" }
2462
+ * };
2463
+ *
2464
+ * // Automatic serialization
2465
+ * yield* uploadStore.set("file123", file);
2466
+ *
2467
+ * // Automatic deserialization with type safety
2468
+ * const retrieved = yield* uploadStore.get("file123");
2469
+ * console.log(retrieved.offset); // TypeScript knows this is a number
2470
+ * });
2471
+ * ```
2472
+ */
2473
+ type KvStore<TData> = {
2474
+ readonly get: (key: string) => Effect.Effect<TData, UploadistaError>;
2475
+ readonly set: (key: string, value: TData) => Effect.Effect<void, UploadistaError>;
2476
+ readonly delete: (key: string) => Effect.Effect<void, UploadistaError>;
2477
+ readonly list?: () => Effect.Effect<Array<string>, UploadistaError>;
2478
+ };
2479
+ /**
2480
+ * Typed wrapper class that adds serialization to a BaseKvStore.
2481
+ *
2482
+ * This class implements the KvStore interface by wrapping a BaseKvStore
2483
+ * and handling serialization/deserialization for a specific type. It also
2484
+ * adds a key prefix to isolate different data types in the same store.
2485
+ *
2486
+ * @template TData - The type of data to store
2487
+ *
2488
+ * @example
2489
+ * ```typescript
2490
+ * // Create a typed store for UploadFile
2491
+ * const uploadFileStore = new TypedKvStore<UploadFile>(
2492
+ * baseKvStore,
2493
+ * "uploadista:upload-file:", // All keys will be prefixed
2494
+ * (data) => JSON.stringify(data),
2495
+ * (str) => JSON.parse(str) as UploadFile
2496
+ * );
2497
+ *
2498
+ * // Use the store
2499
+ * const effect = Effect.gen(function* () {
2500
+ * const file: UploadFile = { ... };
2501
+ * yield* uploadFileStore.set("abc123", file);
2502
+ * // Internally stores at key "uploadista:upload-file:abc123"
2503
+ *
2504
+ * const retrieved = yield* uploadFileStore.get("abc123");
2505
+ * return retrieved;
2506
+ * });
2507
+ *
2508
+ * // Custom serialization for binary data
2509
+ * const binaryStore = new TypedKvStore<Uint8Array>(
2510
+ * baseKvStore,
2511
+ * "binary:",
2512
+ * (data) => btoa(String.fromCharCode(...data)), // Base64 encode
2513
+ * (str) => Uint8Array.from(atob(str), c => c.charCodeAt(0)) // Base64 decode
2514
+ * );
2515
+ * ```
2516
+ */
2517
+ declare class TypedKvStore<TData> implements KvStore<TData> {
2518
+ private baseStore;
2519
+ private keyPrefix;
2520
+ private serialize;
2521
+ private deserialize;
2522
+ constructor(baseStore: BaseKvStore, keyPrefix: string, serialize: (data: TData) => string, deserialize: (str: string) => TData);
2523
+ get: (key: string) => Effect.Effect<TData, UploadistaError>;
2524
+ set: (key: string, value: TData) => Effect.Effect<void, UploadistaError>;
2525
+ delete: (key: string) => Effect.Effect<void, UploadistaError>;
2526
+ list: () => Effect.Effect<Array<string>, UploadistaError>;
2527
+ }
2528
+ /**
2529
+ * Default JSON serialization helpers.
2530
+ *
2531
+ * These functions provide standard JSON serialization for use with TypedKvStore.
2532
+ * They work with any JSON-serializable type.
2533
+ *
2534
+ * @example
2535
+ * ```typescript
2536
+ * const store = new TypedKvStore<MyType>(
2537
+ * baseStore,
2538
+ * "mydata:",
2539
+ * jsonSerializer.serialize,
2540
+ * jsonSerializer.deserialize
2541
+ * );
2542
+ * ```
2543
+ */
2544
+ declare const jsonSerializer: {
2545
+ serialize: <T>(data: T) => string;
2546
+ deserialize: <T>(str: string) => T;
2547
+ };
2548
+ declare const BaseKvStoreService_base: Context.TagClass<BaseKvStoreService, "BaseKvStore", BaseKvStore>;
2549
+ /**
2550
+ * Effect-TS context tag for the base untyped KV store.
2551
+ *
2552
+ * This is the low-level store that storage adapter implementations provide.
2553
+ * Most application code should use typed stores like UploadFileKVStore instead.
2554
+ *
2555
+ * @example
2556
+ * ```typescript
2557
+ * // Provide a base store implementation
2558
+ * const baseStoreLayer = Layer.succeed(BaseKvStoreService, redisKvStore);
2559
+ *
2560
+ * // Use in an Effect
2561
+ * const effect = Effect.gen(function* () {
2562
+ * const baseStore = yield* BaseKvStoreService;
2563
+ * yield* baseStore.set("raw-key", "raw-value");
2564
+ * });
2565
+ * ```
2566
+ */
2567
+ declare class BaseKvStoreService extends BaseKvStoreService_base {}
2568
+ declare const UploadFileKVStore_base: Context.TagClass<UploadFileKVStore, "UploadFileKVStore", KvStore<UploadFile>>;
2569
+ /**
2570
+ * Effect-TS context tag for the UploadFile typed KV store.
2571
+ *
2572
+ * This provides type-safe storage for UploadFile metadata. It's the primary
2573
+ * way to store and retrieve upload metadata in the system.
2574
+ *
2575
+ * @example
2576
+ * ```typescript
2577
+ * const uploadEffect = Effect.gen(function* () {
2578
+ * const kvStore = yield* UploadFileKVStore;
2579
+ *
2580
+ * // Store upload metadata
2581
+ * const file: UploadFile = {
2582
+ * id: "upload123",
2583
+ * offset: 0,
2584
+ * storage: { id: "s3", type: "s3" }
2585
+ * };
2586
+ * yield* kvStore.set("upload123", file);
2587
+ *
2588
+ * // Retrieve with type safety
2589
+ * const retrieved = yield* kvStore.get("upload123");
2590
+ * return retrieved;
2591
+ * });
2592
+ * ```
2593
+ */
2594
+ declare class UploadFileKVStore extends UploadFileKVStore_base {}
2595
+ /**
2596
+ * Effect Layer that creates the UploadFileKVStore from a BaseKvStore.
2597
+ *
2598
+ * This layer automatically wires up JSON serialization for UploadFile objects
2599
+ * with the "uploadista:upload-file:" key prefix.
2600
+ *
2601
+ * @example
2602
+ * ```typescript
2603
+ * const program = Effect.gen(function* () {
2604
+ * const kvStore = yield* UploadFileKVStore;
2605
+ * // Use the store...
2606
+ * }).pipe(
2607
+ * Effect.provide(uploadFileKvStore),
2608
+ * Effect.provide(baseStoreLayer)
2609
+ * );
2610
+ * ```
2611
+ */
2612
+ declare const uploadFileKvStore: Layer.Layer<UploadFileKVStore, never, BaseKvStoreService>;
2613
+ declare const FlowJobKVStore_base: Context.TagClass<FlowJobKVStore, "FlowJobKVStore", KvStore<FlowJob>>;
2614
+ /**
2615
+ * Effect-TS context tag for the FlowJob typed KV store.
2616
+ *
2617
+ * This provides type-safe storage for FlowJob metadata, tracking the
2618
+ * execution state of flow processing jobs.
2619
+ *
2620
+ * @example
2621
+ * ```typescript
2622
+ * const flowEffect = Effect.gen(function* () {
2623
+ * const jobStore = yield* FlowJobKVStore;
2624
+ *
2625
+ * // Store job state
2626
+ * const job: FlowJob = {
2627
+ * id: "job123",
2628
+ * flowId: "flow_resize",
2629
+ * status: "running",
2630
+ * tasks: [],
2631
+ * createdAt: new Date(),
2632
+ * updatedAt: new Date()
2633
+ * };
2634
+ * yield* jobStore.set("job123", job);
2635
+ *
2636
+ * // Retrieve and check status
2637
+ * const retrieved = yield* jobStore.get("job123");
2638
+ * return retrieved.status;
2639
+ * });
2640
+ * ```
2641
+ */
2642
+ declare class FlowJobKVStore extends FlowJobKVStore_base {}
2643
+ /**
2644
+ * Effect Layer that creates the FlowJobKVStore from a BaseKvStore.
2645
+ *
2646
+ * This layer automatically wires up JSON serialization for FlowJob objects
2647
+ * with the "uploadista:flow-job:" key prefix.
2648
+ *
2649
+ * @example
2650
+ * ```typescript
2651
+ * const program = Effect.gen(function* () {
2652
+ * const jobStore = yield* FlowJobKVStore;
2653
+ * // Use the store...
2654
+ * }).pipe(
2655
+ * Effect.provide(flowJobKvStore),
2656
+ * Effect.provide(baseStoreLayer)
2657
+ * );
2658
+ * ```
2659
+ */
2660
+ declare const flowJobKvStore: Layer.Layer<FlowJobKVStore, never, BaseKvStoreService>;
2661
+ declare const DeadLetterQueueKVStore_base: Context.TagClass<DeadLetterQueueKVStore, "DeadLetterQueueKVStore", KvStore<DeadLetterItem>>;
2662
+ /**
2663
+ * Effect-TS context tag for the Dead Letter Queue typed KV store.
2664
+ *
2665
+ * This provides type-safe storage for DeadLetterItem objects, tracking
2666
+ * failed flow jobs for retry, debugging, and manual intervention.
2667
+ *
2668
+ * @example
2669
+ * ```typescript
2670
+ * const dlqEffect = Effect.gen(function* () {
2671
+ * const dlqStore = yield* DeadLetterQueueKVStore;
2672
+ *
2673
+ * // Store a DLQ item
2674
+ * const item: DeadLetterItem = {
2675
+ * id: "dlq_123",
2676
+ * jobId: "job_456",
2677
+ * flowId: "image-pipeline",
2678
+ * storageId: "s3",
2679
+ * clientId: "client_789",
2680
+ * error: { code: "FLOW_NODE_ERROR", message: "Timeout" },
2681
+ * inputs: { input: { uploadId: "upload_abc" } },
2682
+ * nodeResults: {},
2683
+ * retryCount: 0,
2684
+ * maxRetries: 3,
2685
+ * retryHistory: [],
2686
+ * createdAt: new Date(),
2687
+ * updatedAt: new Date(),
2688
+ * status: "pending"
2689
+ * };
2690
+ * yield* dlqStore.set("dlq_123", item);
2691
+ *
2692
+ * // Retrieve with type safety
2693
+ * const retrieved = yield* dlqStore.get("dlq_123");
2694
+ * return retrieved.status;
2695
+ * });
2696
+ * ```
2697
+ */
2698
+ declare class DeadLetterQueueKVStore extends DeadLetterQueueKVStore_base {}
2699
+ /**
2700
+ * Effect Layer that creates the DeadLetterQueueKVStore from a BaseKvStore.
2701
+ *
2702
+ * This layer automatically wires up JSON serialization for DeadLetterItem objects
2703
+ * with the "uploadista:dlq:" key prefix.
2704
+ *
2705
+ * @example
2706
+ * ```typescript
2707
+ * const program = Effect.gen(function* () {
2708
+ * const dlqStore = yield* DeadLetterQueueKVStore;
2709
+ * // Use the store...
2710
+ * }).pipe(
2711
+ * Effect.provide(deadLetterQueueKvStore),
2712
+ * Effect.provide(baseStoreLayer)
2713
+ * );
2714
+ * ```
2715
+ */
2716
+ declare const deadLetterQueueKvStore: Layer.Layer<DeadLetterQueueKVStore, never, BaseKvStoreService>;
2717
+ declare const FlowQueueKVStore_base: Context.TagClass<FlowQueueKVStore, "FlowQueueKVStore", KvStore<FlowQueueItem>>;
2718
+ /**
2719
+ * Effect-TS context tag for the FlowQueueItem typed KV store.
2720
+ */
2721
+ declare class FlowQueueKVStore extends FlowQueueKVStore_base {}
2722
+ /**
2723
+ * Effect Layer that creates the FlowQueueKVStore from a BaseKvStore.
2724
+ *
2725
+ * Stores queue items as JSON under the "uploadista:queue-item:" prefix.
2726
+ * Used by FlowQueueService.fromKvStore() so the queue can be backed by
2727
+ * any BaseKvStoreService (Redis, filesystem, Cloudflare KV, etc.).
2728
+ */
2729
+ declare const flowQueueKvStore: Layer.Layer<FlowQueueKVStore, never, BaseKvStoreService>;
2730
+ //#endregion
2731
+ //#region src/types/data-store.d.ts
2732
+ /**
2733
+ * Options for writing data to a DataStore.
2734
+ *
2735
+ * @property file_id - Unique identifier for the file being written
2736
+ * @property stream - Stream of byte chunks to write to storage
2737
+ * @property offset - Byte offset where writing should begin (for resumable uploads)
2738
+ */
2739
+ type DataStoreWriteOptions = {
2740
+ file_id: string;
2741
+ stream: Stream.Stream<Uint8Array, UploadistaError>;
2742
+ offset: number;
2743
+ };
2744
+ /**
2745
+ * Upload strategy type indicating how chunks are uploaded.
2746
+ *
2747
+ * - `single`: Upload file in a single request (traditional upload)
2748
+ * - `parallel`: Upload file chunks in parallel (for large files)
2749
+ */
2750
+ type UploadStrategy = "single" | "parallel";
2751
+ /**
2752
+ * Configuration options for streaming file reads.
2753
+ *
2754
+ * Used to control streaming behavior in transform nodes and data stores.
2755
+ *
2756
+ * @property fileSizeThreshold - Files below this size use buffered mode (default: 1MB)
2757
+ * @property chunkSize - Chunk size for streaming reads in bytes (default: 64KB)
2758
+ *
2759
+ * @example
2760
+ * ```typescript
2761
+ * const config: StreamingConfig = {
2762
+ * fileSizeThreshold: 1_048_576, // 1MB - use buffered for smaller files
2763
+ * chunkSize: 65_536, // 64KB chunks
2764
+ * };
2765
+ * ```
2766
+ */
2767
+ type StreamingConfig = {
2768
+ /** Files below this size use buffered mode (default: 1MB = 1_048_576 bytes) */fileSizeThreshold?: number; /** Chunk size for streaming reads in bytes (default: 64KB = 65_536 bytes) */
2769
+ chunkSize?: number;
2770
+ };
2771
+ /**
2772
+ * Default streaming configuration values.
2773
+ */
2774
+ declare const DEFAULT_STREAMING_CONFIG: Required<StreamingConfig>;
2775
+ /**
2776
+ * Default multipart part size for S3/R2 streaming writes.
2777
+ * S3 requires minimum 5MB parts (except for the last part).
2778
+ */
2779
+ declare const DEFAULT_MULTIPART_PART_SIZE: number;
2780
+ /**
2781
+ * Options for streaming write operations.
2782
+ *
2783
+ * Used when writing file content from a stream with unknown final size.
2784
+ * The store will finalize the upload when the stream completes.
2785
+ *
2786
+ * @property stream - Effect Stream of byte chunks to write
2787
+ * @property contentType - Optional MIME type for the file
2788
+ * @property metadata - Optional metadata to store with the file
2789
+ * @property sizeHint - Optional estimated size for optimization (e.g., multipart part sizing)
2790
+ *
2791
+ * @example
2792
+ * ```typescript
2793
+ * const options: StreamWriteOptions = {
2794
+ * stream: transformedStream,
2795
+ * contentType: "image/webp",
2796
+ * metadata: { originalName: "photo.jpg" },
2797
+ * sizeHint: 5_000_000, // ~5MB expected
2798
+ * };
2799
+ * ```
2800
+ */
2801
+ type StreamWriteOptions = {
2802
+ stream: Stream.Stream<Uint8Array, UploadistaError>;
2803
+ contentType?: string;
2804
+ metadata?: Record<string, string>; /** Optional size hint for optimization (not required) */
2805
+ sizeHint?: number;
2806
+ };
2807
+ /**
2808
+ * Result of a streaming write operation.
2809
+ *
2810
+ * Contains the final size after the stream completes, along with
2811
+ * storage location information.
2812
+ *
2813
+ * @property id - Unique identifier of the written file
2814
+ * @property size - Final size in bytes after stream completed
2815
+ * @property path - Storage path or key where file was written
2816
+ * @property bucket - Optional bucket/container name (for cloud storage)
2817
+ *
2818
+ * @example
2819
+ * ```typescript
2820
+ * const result = yield* dataStore.writeStream(fileId, options);
2821
+ * console.log(`Wrote ${result.size} bytes to ${result.path}`);
2822
+ * ```
2823
+ */
2824
+ type StreamWriteResult = {
2825
+ id: string;
2826
+ size: number;
2827
+ path: string;
2828
+ bucket?: string; /** Public URL for accessing the uploaded file (if available) */
2829
+ url?: string;
2830
+ };
2831
+ /**
2832
+ * Capabilities and constraints of a DataStore implementation.
2833
+ *
2834
+ * This type describes what features a storage backend supports and what
2835
+ * limitations it has. Use this to determine the optimal upload strategy
2836
+ * and validate client requests.
2837
+ *
2838
+ * @property supportsParallelUploads - Can upload chunks in parallel (e.g., S3 multipart)
2839
+ * @property supportsConcatenation - Can concatenate multiple uploads into one file
2840
+ * @property supportsDeferredLength - Can start upload without knowing final size
2841
+ * @property supportsResumableUploads - Can resume interrupted uploads from last offset
2842
+ * @property supportsTransactionalUploads - Guarantees atomic upload success/failure
2843
+ * @property supportsStreamingRead - Can read file content as a stream instead of buffering
2844
+ * @property maxConcurrentUploads - Maximum parallel upload parts (if parallel supported)
2845
+ * @property minChunkSize - Minimum size in bytes for each chunk (except last)
2846
+ * @property maxChunkSize - Maximum size in bytes for each chunk
2847
+ * @property maxParts - Maximum number of parts in a multipart upload
2848
+ * @property optimalChunkSize - Recommended chunk size for best performance
2849
+ * @property requiresOrderedChunks - Must receive chunks in sequential order
2850
+ * @property requiresMimeTypeValidation - Validates file MIME type matches declaration
2851
+ * @property maxValidationSize - Maximum file size for MIME type validation
2852
+ *
2853
+ * @example
2854
+ * ```typescript
2855
+ * const capabilities = dataStore.getCapabilities();
2856
+ *
2857
+ * if (capabilities.supportsParallelUploads && fileSize > 10_000_000) {
2858
+ * // Use parallel upload for large files
2859
+ * const chunkSize = capabilities.optimalChunkSize || 5_242_880; // 5MB default
2860
+ * uploadInParallel(file, chunkSize);
2861
+ * } else {
2862
+ * // Use single upload
2863
+ * uploadAsSingleChunk(file);
2864
+ * }
2865
+ *
2866
+ * // Check for streaming support
2867
+ * if (capabilities.supportsStreamingRead) {
2868
+ * // Use streaming for memory-efficient processing
2869
+ * const stream = yield* dataStore.readStream(fileId);
2870
+ * }
2871
+ * ```
2872
+ */
2873
+ type DataStoreCapabilities = {
2874
+ supportsParallelUploads: boolean;
2875
+ supportsConcatenation: boolean;
2876
+ supportsDeferredLength: boolean;
2877
+ supportsResumableUploads: boolean;
2878
+ supportsTransactionalUploads: boolean; /** Whether the store supports streaming reads via readStream() */
2879
+ supportsStreamingRead?: boolean; /** Whether the store supports streaming writes via writeStream() with unknown final size */
2880
+ supportsStreamingWrite?: boolean;
2881
+ maxConcurrentUploads?: number;
2882
+ minChunkSize?: number;
2883
+ maxChunkSize?: number;
2884
+ maxParts?: number;
2885
+ optimalChunkSize?: number;
2886
+ requiresOrderedChunks: boolean;
2887
+ requiresMimeTypeValidation?: boolean;
2888
+ maxValidationSize?: number;
2889
+ };
2890
+ /**
2891
+ * Core interface for all storage backend implementations.
2892
+ *
2893
+ * DataStore abstracts file storage operations across different backends
2894
+ * (S3, Azure Blob, GCS, local filesystem, etc.). All storage adapters
2895
+ * must implement this interface.
2896
+ *
2897
+ * @template TData - The data type stored (typically UploadFile)
2898
+ *
2899
+ * @property bucket - Optional storage bucket or container name
2900
+ * @property path - Optional base path prefix for all stored files
2901
+ * @property create - Creates a new file record in storage
2902
+ * @property remove - Deletes a file from storage
2903
+ * @property read - Reads complete file contents as bytes
2904
+ * @property write - Writes data stream to storage at specified offset
2905
+ * @property deleteExpired - Optional cleanup of expired files
2906
+ * @property getCapabilities - Returns storage backend capabilities
2907
+ * @property validateUploadStrategy - Validates if strategy is supported
2908
+ *
2909
+ * @example
2910
+ * ```typescript
2911
+ * // Implement a custom DataStore
2912
+ * const myDataStore: DataStore<UploadFile> = {
2913
+ * bucket: "my-uploads",
2914
+ * path: "files/",
2915
+ *
2916
+ * create: (file) => Effect.gen(function* () {
2917
+ * // Store file metadata
2918
+ * yield* saveMetadata(file);
2919
+ * return file;
2920
+ * }),
2921
+ *
2922
+ * write: ({ file_id, stream, offset }, { onProgress }) => Effect.gen(function* () {
2923
+ * // Write chunks to storage
2924
+ * let bytesWritten = offset;
2925
+ * yield* Stream.runForEach(stream, (chunk) => Effect.gen(function* () {
2926
+ * writeChunk(file_id, chunk, bytesWritten);
2927
+ * bytesWritten += chunk.byteLength;
2928
+ * yield* (onProgress?.(bytesWritten) ?? Effect.void);
2929
+ * }));
2930
+ * return bytesWritten;
2931
+ * }),
2932
+ *
2933
+ * read: (file_id) => Effect.gen(function* () {
2934
+ * // Read complete file
2935
+ * const data = yield* readFromStorage(file_id);
2936
+ * return data;
2937
+ * }),
2938
+ *
2939
+ * remove: (file_id) => Effect.gen(function* () {
2940
+ * yield* deleteFromStorage(file_id);
2941
+ * }),
2942
+ *
2943
+ * getCapabilities: () => ({
2944
+ * supportsParallelUploads: true,
2945
+ * supportsConcatenation: false,
2946
+ * supportsDeferredLength: true,
2947
+ * supportsResumableUploads: true,
2948
+ * supportsTransactionalUploads: false,
2949
+ * maxConcurrentUploads: 10,
2950
+ * optimalChunkSize: 5_242_880, // 5MB
2951
+ * requiresOrderedChunks: false,
2952
+ * }),
2953
+ *
2954
+ * validateUploadStrategy: (strategy) =>
2955
+ * Effect.succeed(strategy === "parallel" || strategy === "single"),
2956
+ * };
2957
+ * ```
2958
+ */
2959
+ type DataStore<TData = unknown> = {
2960
+ readonly bucket?: string;
2961
+ readonly path?: string;
2962
+ readonly create: (file: TData) => Effect.Effect<TData, UploadistaError>;
2963
+ readonly remove: (file_id: string) => Effect.Effect<void, UploadistaError>;
2964
+ /**
2965
+ * Reads the complete file contents as bytes (buffered mode).
2966
+ * For large files, consider using readStream() if available.
2967
+ */
2968
+ readonly read: (file_id: string) => Effect.Effect<Uint8Array, UploadistaError>;
2969
+ /**
2970
+ * Reads file content as a stream of chunks for memory-efficient processing.
2971
+ * Optional - check getCapabilities().supportsStreamingRead before using.
2972
+ *
2973
+ * @param file_id - The unique identifier of the file to read
2974
+ * @param config - Optional streaming configuration (chunk size)
2975
+ * @returns An Effect that resolves to a Stream of byte chunks
2976
+ *
2977
+ * @example
2978
+ * ```typescript
2979
+ * const capabilities = dataStore.getCapabilities();
2980
+ * if (capabilities.supportsStreamingRead && dataStore.readStream) {
2981
+ * const stream = yield* dataStore.readStream(fileId, { chunkSize: 65536 });
2982
+ * // Process stream chunk by chunk
2983
+ * }
2984
+ * ```
2985
+ */
2986
+ readonly readStream?: (file_id: string, config?: StreamingConfig) => Effect.Effect<Stream.Stream<Uint8Array, UploadistaError>, UploadistaError>;
2987
+ readonly write: (options: DataStoreWriteOptions, dependencies: {
2988
+ onProgress?: (offset: number) => Effect.Effect<void>;
2989
+ }) => Effect.Effect<number, UploadistaError>;
2990
+ /**
2991
+ * Writes file content from a stream with unknown final size.
2992
+ * Optional - check getCapabilities().supportsStreamingWrite before using.
2993
+ *
2994
+ * This method is optimized for end-to-end streaming where the output
2995
+ * size isn't known until the stream completes. It uses store-specific
2996
+ * mechanisms like multipart uploads (S3/R2), resumable uploads (GCS),
2997
+ * or block staging (Azure) to efficiently handle streaming data.
2998
+ *
2999
+ * @param fileId - Unique identifier for the file being written
3000
+ * @param options - Stream and optional metadata
3001
+ * @returns StreamWriteResult containing final size after completion
3002
+ *
3003
+ * @example
3004
+ * ```typescript
3005
+ * const capabilities = dataStore.getCapabilities();
3006
+ * if (capabilities.supportsStreamingWrite && dataStore.writeStream) {
3007
+ * const result = yield* dataStore.writeStream(fileId, {
3008
+ * stream: transformedStream,
3009
+ * contentType: "image/webp",
3010
+ * });
3011
+ * console.log(`Wrote ${result.size} bytes`);
3012
+ * }
3013
+ * ```
3014
+ */
3015
+ readonly writeStream?: (fileId: string, options: StreamWriteOptions) => Effect.Effect<StreamWriteResult, UploadistaError>;
3016
+ readonly deleteExpired?: () => Effect.Effect<number, UploadistaError>;
3017
+ readonly getCapabilities: () => DataStoreCapabilities;
3018
+ readonly validateUploadStrategy: (strategy: UploadStrategy) => Effect.Effect<boolean, never>;
3019
+ };
3020
+ declare const UploadFileDataStore_base: Context.TagClass<UploadFileDataStore, "UploadFileDataStore", DataStore<UploadFile>>;
3021
+ /**
3022
+ * Effect-TS context tag for UploadFile DataStore.
3023
+ *
3024
+ * Use this tag to access the primary DataStore in an Effect context.
3025
+ * This is the standard storage backend for uploaded files.
3026
+ *
3027
+ * @example
3028
+ * ```typescript
3029
+ * const uploadEffect = Effect.gen(function* () {
3030
+ * const dataStore = yield* UploadFileDataStore;
3031
+ * const file = yield* dataStore.create(uploadFile);
3032
+ * return file;
3033
+ * });
3034
+ * ```
3035
+ */
3036
+ declare class UploadFileDataStore extends UploadFileDataStore_base {}
3037
+ declare const BufferedUploadFileDataStore_base: Context.TagClass<BufferedUploadFileDataStore, "BufferedUploadFileDataStore", DataStore<UploadFile>>;
3038
+ /**
3039
+ * Effect-TS context tag for buffered/temporary DataStore.
3040
+ *
3041
+ * This is an optional storage backend used for temporary or intermediate files
3042
+ * during flow processing. Not all implementations provide a buffered store.
3043
+ *
3044
+ * @example
3045
+ * ```typescript
3046
+ * const processEffect = Effect.gen(function* () {
3047
+ * const bufferedStore = yield* BufferedUploadFileDataStore;
3048
+ * // Store intermediate processing results
3049
+ * const tempFile = yield* bufferedStore.create(intermediateFile);
3050
+ * return tempFile;
3051
+ * });
3052
+ * ```
3053
+ */
3054
+ declare class BufferedUploadFileDataStore extends BufferedUploadFileDataStore_base {}
3055
+ /**
3056
+ * Service interface for managing multiple DataStore instances.
3057
+ *
3058
+ * This allows routing files to different storage backends based on
3059
+ * storageId (e.g., different S3 buckets, Azure containers, or storage tiers).
3060
+ *
3061
+ * @property getDataStore - Retrieves the appropriate DataStore for a given storage ID
3062
+ * @property bufferedDataStore - Optional temporary storage for intermediate files
3063
+ */
3064
+ type UploadFileDataStoresShape = {
3065
+ getDataStore: (storageId: string, clientId: string | null) => Effect.Effect<DataStore<UploadFile>, UploadistaError>;
3066
+ bufferedDataStore: Effect.Effect<DataStore<UploadFile> | undefined, UploadistaError>;
3067
+ };
3068
+ declare const UploadFileDataStores_base: Context.TagClass<UploadFileDataStores, "UploadFileDataStores", UploadFileDataStoresShape>;
3069
+ /**
3070
+ * Effect-TS context tag for the DataStore routing service.
3071
+ *
3072
+ * Provides access to multiple DataStore instances with routing logic.
3073
+ *
3074
+ * @example
3075
+ * ```typescript
3076
+ * const uploadEffect = Effect.gen(function* () {
3077
+ * const dataStores = yield* UploadFileDataStores;
3078
+ * // Route to specific storage based on storageId
3079
+ * const dataStore = yield* dataStores.getDataStore("s3-production", clientId);
3080
+ * const file = yield* dataStore.create(uploadFile);
3081
+ * return file;
3082
+ * });
3083
+ * ```
3084
+ */
3085
+ declare class UploadFileDataStores extends UploadFileDataStores_base {}
3086
+ /**
3087
+ * Simplified DataStore configuration for easy setup.
3088
+ *
3089
+ * This type allows flexible configuration:
3090
+ * - Single DataStore instance
3091
+ * - Multiple named stores with routing
3092
+ * - Effect that resolves to a DataStore
3093
+ * - Pre-built Effect Layer
3094
+ *
3095
+ * @example
3096
+ * ```typescript
3097
+ * // Single store
3098
+ * const config: DataStoreConfig = s3DataStore;
3099
+ *
3100
+ * // Multiple stores with routing
3101
+ * const config: DataStoreConfig = {
3102
+ * stores: {
3103
+ * "s3-prod": s3ProdStore,
3104
+ * "s3-dev": s3DevStore,
3105
+ * "local": localFileStore,
3106
+ * },
3107
+ * default: "s3-prod"
3108
+ * };
3109
+ *
3110
+ * // Effect that creates a store
3111
+ * const config: DataStoreConfig = Effect.gen(function* () {
3112
+ * const kvStore = yield* UploadFileKVStore;
3113
+ * return s3Store(kvStore);
3114
+ * });
3115
+ *
3116
+ * // Pre-built Layer
3117
+ * const config: DataStoreConfig = Layer.succeed(UploadFileDataStores, {...});
3118
+ * ```
3119
+ */
3120
+ type DataStoreConfig = DataStore<UploadFile> | Effect.Effect<DataStore<UploadFile>, never, UploadFileKVStore> | {
3121
+ stores: Record<string, DataStore<UploadFile> | Effect.Effect<DataStore<UploadFile>, never, UploadFileKVStore>>;
3122
+ default?: string;
3123
+ } | Layer.Layer<UploadFileDataStores, never, UploadFileKVStore>;
3124
+ /**
3125
+ * Type guard to check if a value is a DataStore instance.
3126
+ *
3127
+ * @param config - The value to check
3128
+ * @returns True if the value is a DataStore
3129
+ *
3130
+ * @example
3131
+ * ```typescript
3132
+ * if (isDataStore(config)) {
3133
+ * const capabilities = config.getCapabilities();
3134
+ * }
3135
+ * ```
3136
+ */
3137
+ declare const isDataStore: (config: DataStoreConfig) => config is DataStore<UploadFile>;
3138
+ /**
3139
+ * Creates an Effect Layer from simplified DataStoreConfig.
3140
+ *
3141
+ * This function converts any DataStoreConfig format into a proper Effect Layer
3142
+ * that can be provided to the UploadFileDataStores context tag.
3143
+ *
3144
+ * It handles:
3145
+ * - Single DataStore: Wraps in a Layer that always returns that store
3146
+ * - Multiple stores: Creates routing logic with optional default
3147
+ * - Effect<DataStore>: Executes the Effect and wraps the result
3148
+ * - Layer: Returns as-is
3149
+ *
3150
+ * @param config - The DataStore configuration
3151
+ * @returns A Layer that provides UploadFileDataStores service
3152
+ *
3153
+ * @example
3154
+ * ```typescript
3155
+ * // Create from single store
3156
+ * const layer = await createDataStoreLayer(s3DataStore);
3157
+ *
3158
+ * // Create from multiple stores
3159
+ * const layer = await createDataStoreLayer({
3160
+ * stores: {
3161
+ * "production": s3Store,
3162
+ * "development": localStore,
3163
+ * },
3164
+ * default: "development"
3165
+ * });
3166
+ *
3167
+ * // Use the layer
3168
+ * const program = Effect.gen(function* () {
3169
+ * const stores = yield* UploadFileDataStores;
3170
+ * const store = yield* stores.getDataStore("production", null);
3171
+ * return store;
3172
+ * }).pipe(Effect.provide(layer));
3173
+ * ```
3174
+ */
3175
+ declare const createDataStoreLayer: (config: DataStoreConfig) => Promise<Layer.Layer<UploadFileDataStores, never, UploadFileKVStore>>;
3176
+ //#endregion
3177
+ //#region src/types/event-broadcaster.d.ts
3178
+ /**
3179
+ * Event broadcaster interface for pub/sub messaging across distributed instances.
3180
+ * Used by WebSocketManager to broadcast upload events to all connected instances.
3181
+ */
3182
+ interface EventBroadcaster {
3183
+ /**
3184
+ * Publish a message to a channel
3185
+ */
3186
+ readonly publish: (channel: string, message: string) => Effect.Effect<void, UploadistaError>;
3187
+ /**
3188
+ * Subscribe to messages on a channel
3189
+ */
3190
+ readonly subscribe: (channel: string, handler: (message: string) => void) => Effect.Effect<void, UploadistaError>;
3191
+ /**
3192
+ * Unsubscribe from a channel (optional - not all implementations may support)
3193
+ */
3194
+ readonly unsubscribe?: (channel: string) => Effect.Effect<void, UploadistaError>;
3195
+ }
3196
+ declare const EventBroadcasterService_base: Context.TagClass<EventBroadcasterService, "EventBroadcaster", EventBroadcaster>;
3197
+ /**
3198
+ * Context tag for EventBroadcaster service
3199
+ */
3200
+ declare class EventBroadcasterService extends EventBroadcasterService_base {}
3201
+ //#endregion
3202
+ //#region src/types/upload-event.d.ts
3203
+ declare enum UploadEventType {
3204
+ UPLOAD_STARTED = "upload-started",
3205
+ UPLOAD_PROGRESS = "upload-progress",
3206
+ UPLOAD_COMPLETE = "upload-complete",
3207
+ UPLOAD_FAILED = "upload-failed",
3208
+ UPLOAD_VALIDATION_SUCCESS = "upload-validation-success",
3209
+ UPLOAD_VALIDATION_FAILED = "upload-validation-failed",
3210
+ UPLOAD_VALIDATION_WARNING = "upload-validation-warning"
3211
+ }
3212
+ declare const uploadEventSchema: z.ZodUnion<readonly [z.ZodObject<{
3213
+ type: z.ZodUnion<readonly [z.ZodLiteral<UploadEventType.UPLOAD_STARTED>, z.ZodLiteral<UploadEventType.UPLOAD_COMPLETE>]>;
3214
+ data: z.ZodObject<{
3215
+ id: z.ZodString;
3216
+ size: z.ZodOptional<z.ZodNumber>;
3217
+ offset: z.ZodNumber;
3218
+ metadata: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodType<JsonValue, unknown, z.core.$ZodTypeInternals<JsonValue, unknown>>>>;
3219
+ creationDate: z.ZodOptional<z.ZodString>;
3220
+ url: z.ZodOptional<z.ZodString>;
3221
+ sizeIsDeferred: z.ZodOptional<z.ZodBoolean>;
3222
+ checksum: z.ZodOptional<z.ZodString>;
3223
+ checksumAlgorithm: z.ZodOptional<z.ZodString>;
3224
+ storage: z.ZodObject<{
3225
+ id: z.ZodString;
3226
+ type: z.ZodString;
3227
+ path: z.ZodOptional<z.ZodString>;
3228
+ uploadId: z.ZodOptional<z.ZodString>;
3229
+ bucket: z.ZodOptional<z.ZodString>;
3230
+ parts: z.ZodOptional<z.ZodArray<z.ZodObject<{
3231
+ partNumber: z.ZodNumber;
3232
+ etag: z.ZodString;
3233
+ size: z.ZodNumber;
3234
+ }, z.core.$strip>>>;
3235
+ }, z.core.$strip>;
3236
+ flow: z.ZodOptional<z.ZodObject<{
3237
+ flowId: z.ZodString;
3238
+ nodeId: z.ZodString;
3239
+ jobId: z.ZodString;
3240
+ }, z.core.$strip>>;
3241
+ traceContext: z.ZodOptional<z.ZodObject<{
3242
+ traceId: z.ZodString;
3243
+ spanId: z.ZodString;
3244
+ traceFlags: z.ZodNumber;
3245
+ }, z.core.$strip>>;
3246
+ }, z.core.$strip>;
3247
+ flow: z.ZodOptional<z.ZodObject<{
3248
+ flowId: z.ZodString;
3249
+ nodeId: z.ZodString;
3250
+ jobId: z.ZodString;
3251
+ }, z.core.$strip>>;
3252
+ }, z.core.$strip>, z.ZodObject<{
3253
+ type: z.ZodLiteral<UploadEventType.UPLOAD_PROGRESS>;
3254
+ data: z.ZodObject<{
3255
+ id: z.ZodString;
3256
+ progress: z.ZodNumber;
3257
+ total: z.ZodNumber;
3258
+ }, z.core.$strip>;
3259
+ flow: z.ZodOptional<z.ZodObject<{
3260
+ flowId: z.ZodString;
3261
+ nodeId: z.ZodString;
3262
+ jobId: z.ZodString;
3263
+ }, z.core.$strip>>;
3264
+ }, z.core.$strip>, z.ZodObject<{
3265
+ type: z.ZodLiteral<UploadEventType.UPLOAD_FAILED>;
3266
+ data: z.ZodObject<{
3267
+ id: z.ZodString;
3268
+ error: z.ZodString;
3269
+ }, z.core.$strip>;
3270
+ flow: z.ZodOptional<z.ZodObject<{
3271
+ flowId: z.ZodString;
3272
+ nodeId: z.ZodString;
3273
+ jobId: z.ZodString;
3274
+ }, z.core.$strip>>;
3275
+ }, z.core.$strip>, z.ZodObject<{
3276
+ type: z.ZodLiteral<UploadEventType.UPLOAD_VALIDATION_SUCCESS>;
3277
+ data: z.ZodObject<{
3278
+ id: z.ZodString;
3279
+ validationType: z.ZodEnum<{
3280
+ checksum: "checksum";
3281
+ mimetype: "mimetype";
3282
+ }>;
3283
+ algorithm: z.ZodOptional<z.ZodString>;
3284
+ }, z.core.$strip>;
3285
+ flow: z.ZodOptional<z.ZodObject<{
3286
+ flowId: z.ZodString;
3287
+ nodeId: z.ZodString;
3288
+ jobId: z.ZodString;
3289
+ }, z.core.$strip>>;
3290
+ }, z.core.$strip>, z.ZodObject<{
3291
+ type: z.ZodLiteral<UploadEventType.UPLOAD_VALIDATION_FAILED>;
3292
+ data: z.ZodObject<{
3293
+ id: z.ZodString;
3294
+ reason: z.ZodString;
3295
+ expected: z.ZodString;
3296
+ actual: z.ZodString;
3297
+ }, z.core.$strip>;
3298
+ flow: z.ZodOptional<z.ZodObject<{
3299
+ flowId: z.ZodString;
3300
+ nodeId: z.ZodString;
3301
+ jobId: z.ZodString;
3302
+ }, z.core.$strip>>;
3303
+ }, z.core.$strip>, z.ZodObject<{
3304
+ type: z.ZodLiteral<UploadEventType.UPLOAD_VALIDATION_WARNING>;
3305
+ data: z.ZodObject<{
3306
+ id: z.ZodString;
3307
+ message: z.ZodString;
3308
+ }, z.core.$strip>;
3309
+ flow: z.ZodOptional<z.ZodObject<{
3310
+ flowId: z.ZodString;
3311
+ nodeId: z.ZodString;
3312
+ jobId: z.ZodString;
3313
+ }, z.core.$strip>>;
3314
+ }, z.core.$strip>]>;
3315
+ type UploadEvent = z.infer<typeof uploadEventSchema>;
3316
+ //#endregion
3317
+ //#region src/types/websocket.d.ts
3318
+ /**
3319
+ * Platform-agnostic WebSocket connection interface
3320
+ */
3321
+ interface WebSocketConnection {
3322
+ send(data: string): void;
3323
+ close(code?: number, reason?: string): void;
3324
+ readonly readyState: number;
3325
+ readonly id: string;
3326
+ }
3327
+ /**
3328
+ * WebSocket message that can be sent/received
3329
+ */
3330
+ declare const webSocketMessageSchema: z$1.ZodUnion<readonly [z$1.ZodObject<{
3331
+ type: z$1.ZodLiteral<"upload_event">;
3332
+ payload: z$1.ZodUnion<readonly [z$1.ZodObject<{
3333
+ type: z$1.ZodUnion<readonly [z$1.ZodLiteral<UploadEventType.UPLOAD_STARTED>, z$1.ZodLiteral<UploadEventType.UPLOAD_COMPLETE>]>;
3334
+ data: z$1.ZodObject<{
3335
+ id: z$1.ZodString;
3336
+ size: z$1.ZodOptional<z$1.ZodNumber>;
3337
+ offset: z$1.ZodNumber;
3338
+ metadata: z$1.ZodOptional<z$1.ZodRecord<z$1.ZodString, z$1.ZodType<JsonValue, unknown, z$1.core.$ZodTypeInternals<JsonValue, unknown>>>>;
3339
+ creationDate: z$1.ZodOptional<z$1.ZodString>;
3340
+ url: z$1.ZodOptional<z$1.ZodString>;
3341
+ sizeIsDeferred: z$1.ZodOptional<z$1.ZodBoolean>;
3342
+ checksum: z$1.ZodOptional<z$1.ZodString>;
3343
+ checksumAlgorithm: z$1.ZodOptional<z$1.ZodString>;
3344
+ storage: z$1.ZodObject<{
3345
+ id: z$1.ZodString;
3346
+ type: z$1.ZodString;
3347
+ path: z$1.ZodOptional<z$1.ZodString>;
3348
+ uploadId: z$1.ZodOptional<z$1.ZodString>;
3349
+ bucket: z$1.ZodOptional<z$1.ZodString>;
3350
+ parts: z$1.ZodOptional<z$1.ZodArray<z$1.ZodObject<{
3351
+ partNumber: z$1.ZodNumber;
3352
+ etag: z$1.ZodString;
3353
+ size: z$1.ZodNumber;
3354
+ }, z$1.core.$strip>>>;
3355
+ }, z$1.core.$strip>;
3356
+ flow: z$1.ZodOptional<z$1.ZodObject<{
3357
+ flowId: z$1.ZodString;
3358
+ nodeId: z$1.ZodString;
3359
+ jobId: z$1.ZodString;
3360
+ }, z$1.core.$strip>>;
3361
+ traceContext: z$1.ZodOptional<z$1.ZodObject<{
3362
+ traceId: z$1.ZodString;
3363
+ spanId: z$1.ZodString;
3364
+ traceFlags: z$1.ZodNumber;
3365
+ }, z$1.core.$strip>>;
3366
+ }, z$1.core.$strip>;
3367
+ flow: z$1.ZodOptional<z$1.ZodObject<{
3368
+ flowId: z$1.ZodString;
3369
+ nodeId: z$1.ZodString;
3370
+ jobId: z$1.ZodString;
3371
+ }, z$1.core.$strip>>;
3372
+ }, z$1.core.$strip>, z$1.ZodObject<{
3373
+ type: z$1.ZodLiteral<UploadEventType.UPLOAD_PROGRESS>;
3374
+ data: z$1.ZodObject<{
3375
+ id: z$1.ZodString;
3376
+ progress: z$1.ZodNumber;
3377
+ total: z$1.ZodNumber;
3378
+ }, z$1.core.$strip>;
3379
+ flow: z$1.ZodOptional<z$1.ZodObject<{
3380
+ flowId: z$1.ZodString;
3381
+ nodeId: z$1.ZodString;
3382
+ jobId: z$1.ZodString;
3383
+ }, z$1.core.$strip>>;
3384
+ }, z$1.core.$strip>, z$1.ZodObject<{
3385
+ type: z$1.ZodLiteral<UploadEventType.UPLOAD_FAILED>;
3386
+ data: z$1.ZodObject<{
3387
+ id: z$1.ZodString;
3388
+ error: z$1.ZodString;
3389
+ }, z$1.core.$strip>;
3390
+ flow: z$1.ZodOptional<z$1.ZodObject<{
3391
+ flowId: z$1.ZodString;
3392
+ nodeId: z$1.ZodString;
3393
+ jobId: z$1.ZodString;
3394
+ }, z$1.core.$strip>>;
3395
+ }, z$1.core.$strip>, z$1.ZodObject<{
3396
+ type: z$1.ZodLiteral<UploadEventType.UPLOAD_VALIDATION_SUCCESS>;
3397
+ data: z$1.ZodObject<{
3398
+ id: z$1.ZodString;
3399
+ validationType: z$1.ZodEnum<{
3400
+ checksum: "checksum";
3401
+ mimetype: "mimetype";
3402
+ }>;
3403
+ algorithm: z$1.ZodOptional<z$1.ZodString>;
3404
+ }, z$1.core.$strip>;
3405
+ flow: z$1.ZodOptional<z$1.ZodObject<{
3406
+ flowId: z$1.ZodString;
3407
+ nodeId: z$1.ZodString;
3408
+ jobId: z$1.ZodString;
3409
+ }, z$1.core.$strip>>;
3410
+ }, z$1.core.$strip>, z$1.ZodObject<{
3411
+ type: z$1.ZodLiteral<UploadEventType.UPLOAD_VALIDATION_FAILED>;
3412
+ data: z$1.ZodObject<{
3413
+ id: z$1.ZodString;
3414
+ reason: z$1.ZodString;
3415
+ expected: z$1.ZodString;
3416
+ actual: z$1.ZodString;
3417
+ }, z$1.core.$strip>;
3418
+ flow: z$1.ZodOptional<z$1.ZodObject<{
3419
+ flowId: z$1.ZodString;
3420
+ nodeId: z$1.ZodString;
3421
+ jobId: z$1.ZodString;
3422
+ }, z$1.core.$strip>>;
3423
+ }, z$1.core.$strip>, z$1.ZodObject<{
3424
+ type: z$1.ZodLiteral<UploadEventType.UPLOAD_VALIDATION_WARNING>;
3425
+ data: z$1.ZodObject<{
3426
+ id: z$1.ZodString;
3427
+ message: z$1.ZodString;
3428
+ }, z$1.core.$strip>;
3429
+ flow: z$1.ZodOptional<z$1.ZodObject<{
3430
+ flowId: z$1.ZodString;
3431
+ nodeId: z$1.ZodString;
3432
+ jobId: z$1.ZodString;
3433
+ }, z$1.core.$strip>>;
3434
+ }, z$1.core.$strip>]>;
3435
+ timestamp: z$1.ZodOptional<z$1.ZodString>;
3436
+ }, z$1.core.$strip>, z$1.ZodObject<{
3437
+ type: z$1.ZodLiteral<"flow_event">;
3438
+ payload: z$1.ZodAny;
3439
+ timestamp: z$1.ZodOptional<z$1.ZodString>;
3440
+ }, z$1.core.$strip>, z$1.ZodObject<{
3441
+ type: z$1.ZodLiteral<"subscribed">;
3442
+ payload: z$1.ZodObject<{
3443
+ eventKey: z$1.ZodString;
3444
+ }, z$1.core.$strip>;
3445
+ timestamp: z$1.ZodOptional<z$1.ZodString>;
3446
+ }, z$1.core.$strip>, z$1.ZodObject<{
3447
+ type: z$1.ZodLiteral<"error">;
3448
+ message: z$1.ZodOptional<z$1.ZodString>;
3449
+ }, z$1.core.$strip>, z$1.ZodObject<{
3450
+ type: z$1.ZodLiteral<"pong">;
3451
+ timestamp: z$1.ZodOptional<z$1.ZodString>;
3452
+ }, z$1.core.$strip>, z$1.ZodObject<{
3453
+ type: z$1.ZodLiteral<"ping">;
3454
+ timestamp: z$1.ZodOptional<z$1.ZodString>;
3455
+ }, z$1.core.$strip>, z$1.ZodObject<{
3456
+ type: z$1.ZodLiteral<"connection">;
3457
+ message: z$1.ZodOptional<z$1.ZodString>;
3458
+ uploadId: z$1.ZodOptional<z$1.ZodString>;
3459
+ timestamp: z$1.ZodOptional<z$1.ZodString>;
3460
+ }, z$1.core.$strip>]>;
3461
+ type WebSocketMessage<TEvent = unknown> = z$1.infer<typeof webSocketMessageSchema> | {
3462
+ type: "upload_event";
3463
+ payload: TEvent;
3464
+ timestamp?: string;
3465
+ } | {
3466
+ type: "flow_event";
3467
+ payload: TEvent;
3468
+ timestamp?: string;
3469
+ };
3470
+ //#endregion
3471
+ //#region src/types/event-emitter.d.ts
3472
+ /**
3473
+ * Base event emitter interface for raw string message broadcasting.
3474
+ *
3475
+ * This is the low-level interface that event broadcasting implementations
3476
+ * (WebSocket, Server-Sent Events, etc.) implement. It emits raw string messages
3477
+ * without type safety or serialization.
3478
+ *
3479
+ * @property subscribe - Registers a WebSocket connection to receive events for a key
3480
+ * @property unsubscribe - Removes subscription for a key
3481
+ * @property emit - Broadcasts a string message to all subscribers of a key
3482
+ *
3483
+ * @example
3484
+ * ```typescript
3485
+ * // Implement BaseEventEmitter with WebSocket broadcast
3486
+ * const websocketEmitter: BaseEventEmitter = {
3487
+ * subscribe: (key, connection) => Effect.sync(() => {
3488
+ * connections.set(key, [...(connections.get(key) || []), connection]);
3489
+ * }),
3490
+ *
3491
+ * unsubscribe: (key) => Effect.sync(() => {
3492
+ * connections.delete(key);
3493
+ * }),
3494
+ *
3495
+ * emit: (key, event) => Effect.sync(() => {
3496
+ * const subs = connections.get(key) || [];
3497
+ * subs.forEach(conn => conn.send(event));
3498
+ * })
3499
+ * };
3500
+ * ```
3501
+ */
3502
+ interface BaseEventEmitter {
3503
+ readonly subscribe: (key: string, connection: WebSocketConnection) => Effect.Effect<void, UploadistaError>;
3504
+ readonly unsubscribe: (key: string) => Effect.Effect<void, UploadistaError>;
3505
+ readonly emit: (key: string, event: string) => Effect.Effect<void, UploadistaError>;
3506
+ }
3507
+ /**
3508
+ * Type-safe event emitter interface with automatic serialization.
3509
+ *
3510
+ * This wraps a BaseEventEmitter and handles event serialization to JSON messages,
3511
+ * providing type safety for events and ensuring consistent message format.
3512
+ *
3513
+ * @template TEvent - The type of events emitted by this emitter
3514
+ *
3515
+ * @property subscribe - Registers a WebSocket connection to receive typed events
3516
+ * @property unsubscribe - Removes subscription
3517
+ * @property emit - Serializes and broadcasts a typed event
3518
+ *
3519
+ * @example
3520
+ * ```typescript
3521
+ * // Use a typed event emitter
3522
+ * const uploadEmitter: EventEmitter<UploadEvent> = new TypedEventEmitter(
3523
+ * baseEmitter,
3524
+ * (event) => JSON.stringify({ type: 'upload', payload: event })
3525
+ * );
3526
+ *
3527
+ * // Emit type-safe events
3528
+ * const program = Effect.gen(function* () {
3529
+ * const event: UploadEvent = {
3530
+ * uploadId: "upload123",
3531
+ * type: "progress",
3532
+ * offset: 1024,
3533
+ * size: 2048
3534
+ * };
3535
+ *
3536
+ * // Automatic serialization
3537
+ * yield* uploadEmitter.emit("upload123", event);
3538
+ * });
3539
+ * ```
3540
+ */
3541
+ type EventEmitter<TEvent> = {
3542
+ readonly subscribe: (key: string, connection: WebSocketConnection) => Effect.Effect<void, UploadistaError>;
3543
+ readonly unsubscribe: (key: string) => Effect.Effect<void, UploadistaError>;
3544
+ readonly emit: (key: string, event: TEvent) => Effect.Effect<void, UploadistaError>;
3545
+ };
3546
+ /**
3547
+ * Typed wrapper class that adds event serialization to a BaseEventEmitter.
3548
+ *
3549
+ * This class implements the EventEmitter interface by wrapping a BaseEventEmitter
3550
+ * and handling serialization for a specific event type. It converts typed events
3551
+ * to JSON message strings before broadcasting.
3552
+ *
3553
+ * @template TEvent - The type of events to emit
3554
+ *
3555
+ * @example
3556
+ * ```typescript
3557
+ * // Create a typed emitter for UploadEvent
3558
+ * const uploadEmitter = new TypedEventEmitter<UploadEvent>(
3559
+ * baseEmitter,
3560
+ * (event) => JSON.stringify({
3561
+ * type: "upload_event",
3562
+ * payload: event,
3563
+ * timestamp: new Date().toISOString()
3564
+ * })
3565
+ * );
3566
+ *
3567
+ * // Use the emitter
3568
+ * const effect = Effect.gen(function* () {
3569
+ * // Subscribe a WebSocket connection
3570
+ * yield* uploadEmitter.subscribe("upload123", websocket);
3571
+ *
3572
+ * // Emit an event (automatically serialized)
3573
+ * yield* uploadEmitter.emit("upload123", {
3574
+ * uploadId: "upload123",
3575
+ * type: "completed",
3576
+ * offset: 2048,
3577
+ * size: 2048
3578
+ * });
3579
+ *
3580
+ * // Unsubscribe when done
3581
+ * yield* uploadEmitter.unsubscribe("upload123");
3582
+ * });
3583
+ *
3584
+ * // Custom message format
3585
+ * const customEmitter = new TypedEventEmitter<MyEvent>(
3586
+ * baseEmitter,
3587
+ * (event) => `EVENT:${event.type}:${JSON.stringify(event.data)}`
3588
+ * );
3589
+ * ```
3590
+ */
3591
+ declare class TypedEventEmitter<TEvent> implements EventEmitter<TEvent> {
3592
+ private baseEmitter;
3593
+ private eventToMessage;
3594
+ constructor(baseEmitter: BaseEventEmitter, eventToMessage: (event: TEvent) => string);
3595
+ subscribe: (key: string, connection: WebSocketConnection) => Effect.Effect<void, UploadistaError>;
3596
+ unsubscribe: (key: string) => Effect.Effect<void, UploadistaError>;
3597
+ emit: (key: string, event: TEvent) => Effect.Effect<void, UploadistaError>;
3598
+ }
3599
+ /**
3600
+ * Default event-to-message serialization helper.
3601
+ *
3602
+ * Creates a standardized JSON message format with type, payload, and timestamp.
3603
+ * This is the recommended way to serialize events for WebSocket transmission.
3604
+ *
3605
+ * @param messageType - The message type identifier ("upload_event" or "flow_event")
3606
+ * @returns An object with an eventToMessage function
3607
+ *
3608
+ * @example
3609
+ * ```typescript
3610
+ * // Create emitter with standard serialization
3611
+ * const emitter = new TypedEventEmitter<UploadEvent>(
3612
+ * baseEmitter,
3613
+ * eventToMessageSerializer("upload_event").eventToMessage
3614
+ * );
3615
+ *
3616
+ * // Messages will be formatted as:
3617
+ * // {
3618
+ * // "type": "upload_event",
3619
+ * // "payload": { ...event data... },
3620
+ * // "timestamp": "2024-01-15T10:30:00.000Z"
3621
+ * // }
3622
+ * ```
3623
+ */
3624
+ declare const eventToMessageSerializer: (messageType: "upload_event" | "flow_event") => {
3625
+ eventToMessage: <T>(event: T) => string;
3626
+ };
3627
+ declare const BaseEventEmitterService_base: Context.TagClass<BaseEventEmitterService, "BaseEventEmitter", BaseEventEmitter>;
3628
+ /**
3629
+ * Effect-TS context tag for the base untyped event emitter.
3630
+ *
3631
+ * This is the low-level emitter that broadcasting implementations provide.
3632
+ * Most application code should use typed emitters like UploadEventEmitter instead.
3633
+ *
3634
+ * @example
3635
+ * ```typescript
3636
+ * // Provide a base emitter implementation
3637
+ * const baseEmitterLayer = Layer.succeed(BaseEventEmitterService, websocketEmitter);
3638
+ *
3639
+ * // Use in an Effect
3640
+ * const effect = Effect.gen(function* () {
3641
+ * const baseEmitter = yield* BaseEventEmitterService;
3642
+ * yield* baseEmitter.emit("channel1", "raw message");
3643
+ * });
3644
+ * ```
3645
+ */
3646
+ declare class BaseEventEmitterService extends BaseEventEmitterService_base {}
3647
+ declare const UploadEventEmitter_base: Context.TagClass<UploadEventEmitter, "UploadEventEmitter", EventEmitter<{
3648
+ type: UploadEventType.UPLOAD_STARTED | UploadEventType.UPLOAD_COMPLETE;
3649
+ data: {
3650
+ id: string;
3651
+ offset: number;
3652
+ storage: {
3653
+ id: string;
3654
+ type: string;
3655
+ path?: string | undefined;
3656
+ uploadId?: string | undefined;
3657
+ bucket?: string | undefined;
3658
+ parts?: {
3659
+ partNumber: number;
3660
+ etag: string;
3661
+ size: number;
3662
+ }[] | undefined;
3663
+ };
3664
+ size?: number | undefined;
3665
+ metadata?: Record<string, JsonValue> | undefined;
3666
+ creationDate?: string | undefined;
3667
+ url?: string | undefined;
3668
+ sizeIsDeferred?: boolean | undefined;
3669
+ checksum?: string | undefined;
3670
+ checksumAlgorithm?: string | undefined;
3671
+ flow?: {
3672
+ flowId: string;
3673
+ nodeId: string;
3674
+ jobId: string;
3675
+ } | undefined;
3676
+ traceContext?: {
3677
+ traceId: string;
3678
+ spanId: string;
3679
+ traceFlags: number;
3680
+ } | undefined;
3681
+ };
3682
+ flow?: {
3683
+ flowId: string;
3684
+ nodeId: string;
3685
+ jobId: string;
3686
+ } | undefined;
3687
+ } | {
3688
+ type: UploadEventType.UPLOAD_PROGRESS;
3689
+ data: {
3690
+ id: string;
3691
+ progress: number;
3692
+ total: number;
3693
+ };
3694
+ flow?: {
3695
+ flowId: string;
3696
+ nodeId: string;
3697
+ jobId: string;
3698
+ } | undefined;
3699
+ } | {
3700
+ type: UploadEventType.UPLOAD_FAILED;
3701
+ data: {
3702
+ id: string;
3703
+ error: string;
3704
+ };
3705
+ flow?: {
3706
+ flowId: string;
3707
+ nodeId: string;
3708
+ jobId: string;
3709
+ } | undefined;
3710
+ } | {
3711
+ type: UploadEventType.UPLOAD_VALIDATION_SUCCESS;
3712
+ data: {
3713
+ id: string;
3714
+ validationType: "checksum" | "mimetype";
3715
+ algorithm?: string | undefined;
3716
+ };
3717
+ flow?: {
3718
+ flowId: string;
3719
+ nodeId: string;
3720
+ jobId: string;
3721
+ } | undefined;
3722
+ } | {
3723
+ type: UploadEventType.UPLOAD_VALIDATION_FAILED;
3724
+ data: {
3725
+ id: string;
3726
+ reason: string;
3727
+ expected: string;
3728
+ actual: string;
3729
+ };
3730
+ flow?: {
3731
+ flowId: string;
3732
+ nodeId: string;
3733
+ jobId: string;
3734
+ } | undefined;
3735
+ } | {
3736
+ type: UploadEventType.UPLOAD_VALIDATION_WARNING;
3737
+ data: {
3738
+ id: string;
3739
+ message: string;
3740
+ };
3741
+ flow?: {
3742
+ flowId: string;
3743
+ nodeId: string;
3744
+ jobId: string;
3745
+ } | undefined;
3746
+ }>>;
3747
+ /**
3748
+ * Effect-TS context tag for the UploadEvent typed emitter.
3749
+ *
3750
+ * This provides type-safe event emission for upload progress and lifecycle events.
3751
+ * It's the primary way to broadcast upload events to connected clients.
3752
+ *
3753
+ * @example
3754
+ * ```typescript
3755
+ * const uploadEffect = Effect.gen(function* () {
3756
+ * const emitter = yield* UploadEventEmitter;
3757
+ *
3758
+ * // Subscribe a client to upload events
3759
+ * yield* emitter.subscribe("upload123", websocketConnection);
3760
+ *
3761
+ * // Emit progress event
3762
+ * yield* emitter.emit("upload123", {
3763
+ * uploadId: "upload123",
3764
+ * type: "progress",
3765
+ * offset: 512000,
3766
+ * size: 1024000
3767
+ * });
3768
+ *
3769
+ * // Emit completion event
3770
+ * yield* emitter.emit("upload123", {
3771
+ * uploadId: "upload123",
3772
+ * type: "completed",
3773
+ * offset: 1024000,
3774
+ * size: 1024000
3775
+ * });
3776
+ * });
3777
+ * ```
3778
+ */
3779
+ declare class UploadEventEmitter extends UploadEventEmitter_base {}
3780
+ /**
3781
+ * Effect Layer that creates the UploadEventEmitter from a BaseEventEmitter.
3782
+ *
3783
+ * This layer automatically wires up JSON serialization for UploadEvent objects
3784
+ * with the standard "upload_event" message format.
3785
+ *
3786
+ * @example
3787
+ * ```typescript
3788
+ * const program = Effect.gen(function* () {
3789
+ * const emitter = yield* UploadEventEmitter;
3790
+ * // Use the emitter...
3791
+ * }).pipe(
3792
+ * Effect.provide(uploadEventEmitter),
3793
+ * Effect.provide(baseEmitterLayer)
3794
+ * );
3795
+ * ```
3796
+ */
3797
+ declare const uploadEventEmitter: Layer.Layer<UploadEventEmitter, never, BaseEventEmitterService>;
3798
+ declare const FlowEventEmitter_base: Context.TagClass<FlowEventEmitter, "FlowEventEmitter", EventEmitter<FlowEvent>>;
3799
+ /**
3800
+ * Effect-TS context tag for the FlowEvent typed emitter.
3801
+ *
3802
+ * This provides type-safe event emission for flow processing lifecycle events.
3803
+ * It's used to broadcast flow execution progress, node completion, and errors.
3804
+ *
3805
+ * @example
3806
+ * ```typescript
3807
+ * const flowEffect = Effect.gen(function* () {
3808
+ * const emitter = yield* FlowEventEmitter;
3809
+ *
3810
+ * // Subscribe a client to flow job events
3811
+ * yield* emitter.subscribe("job123", websocketConnection);
3812
+ *
3813
+ * // Emit node start event
3814
+ * yield* emitter.emit("job123", {
3815
+ * jobId: "job123",
3816
+ * eventType: "NodeStart",
3817
+ * flowId: "flow_resize",
3818
+ * nodeId: "resize_1"
3819
+ * });
3820
+ *
3821
+ * // Emit node completion event
3822
+ * yield* emitter.emit("job123", {
3823
+ * jobId: "job123",
3824
+ * eventType: "NodeEnd",
3825
+ * flowId: "flow_resize",
3826
+ * nodeId: "resize_1",
3827
+ * result: { width: 800, height: 600 }
3828
+ * });
3829
+ * });
3830
+ * ```
3831
+ */
3832
+ declare class FlowEventEmitter extends FlowEventEmitter_base {}
3833
+ /**
3834
+ * Effect Layer that creates the FlowEventEmitter from a BaseEventEmitter.
3835
+ *
3836
+ * This layer automatically wires up JSON serialization for FlowEvent objects
3837
+ * with the standard "flow_event" message format.
3838
+ *
3839
+ * @example
3840
+ * ```typescript
3841
+ * const program = Effect.gen(function* () {
3842
+ * const emitter = yield* FlowEventEmitter;
3843
+ * // Use the emitter...
3844
+ * }).pipe(
3845
+ * Effect.provide(flowEventEmitter),
3846
+ * Effect.provide(baseEmitterLayer)
3847
+ * );
3848
+ * ```
3849
+ */
3850
+ declare const flowEventEmitter: Layer.Layer<FlowEventEmitter, never, BaseEventEmitterService>;
3851
+ //#endregion
3852
+ //#region src/types/health-check.d.ts
3853
+ /**
3854
+ * Health Check Types for Uploadista SDK.
3855
+ *
3856
+ * This module provides types for the health monitoring system including:
3857
+ * - Liveness probes (`/health`)
3858
+ * - Readiness probes (`/ready`)
3859
+ * - Component health details (`/health/components`)
3860
+ *
3861
+ * @module types/health-check
3862
+ */
3863
+ /**
3864
+ * Health status values for components and overall system health.
3865
+ *
3866
+ * - `healthy`: All checks passed, system is fully operational
3867
+ * - `degraded`: Some non-critical issues detected, but system is functional
3868
+ * - `unhealthy`: Critical components unavailable, system cannot serve requests
3869
+ */
3870
+ type HealthStatus = "healthy" | "degraded" | "unhealthy";
3871
+ /**
3872
+ * Health status for an individual component (storage, KV store, etc.).
3873
+ */
3874
+ interface ComponentHealth {
3875
+ /** Current health status of the component */
3876
+ status: HealthStatus;
3877
+ /** Latency of the last health check in milliseconds */
3878
+ latency?: number;
3879
+ /** Human-readable status message */
3880
+ message?: string;
3881
+ /** ISO 8601 timestamp of the last health check */
3882
+ lastCheck?: string;
3883
+ }
3884
+ /**
3885
+ * Circuit breaker health summary aggregating all circuit states.
3886
+ */
3887
+ interface CircuitBreakerHealthSummary {
3888
+ /** Overall circuit breaker system status */
3889
+ status: HealthStatus;
3890
+ /** Number of circuits currently in open state */
3891
+ openCircuits: number;
3892
+ /** Total number of tracked circuits */
3893
+ totalCircuits: number;
3894
+ /** Detailed stats for each circuit (optional, for debugging) */
3895
+ circuits?: Array<{
3896
+ nodeType: string;
3897
+ state: "closed" | "open" | "half-open";
3898
+ failureCount: number;
3899
+ timeSinceLastStateChange: number;
3900
+ }>;
3901
+ }
3902
+ /**
3903
+ * Dead Letter Queue health summary.
3904
+ */
3905
+ interface DlqHealthSummary {
3906
+ /** Overall DLQ status */
3907
+ status: HealthStatus;
3908
+ /** Number of items pending retry */
3909
+ pendingItems: number;
3910
+ /** Number of items that have exhausted all retries */
3911
+ exhaustedItems: number;
3912
+ /** ISO 8601 timestamp of the oldest item in the queue */
3913
+ oldestItem?: string;
3914
+ }
3915
+ /**
3916
+ * Components health map for detailed health responses.
3917
+ */
3918
+ interface HealthComponents {
3919
+ /** Storage backend health */
3920
+ storage?: ComponentHealth;
3921
+ /** KV store health */
3922
+ kvStore?: ComponentHealth;
3923
+ /** Event broadcaster health */
3924
+ eventBroadcaster?: ComponentHealth;
3925
+ /** Circuit breaker summary (if enabled) */
3926
+ circuitBreaker?: CircuitBreakerHealthSummary;
3927
+ /** Dead letter queue summary (if enabled) */
3928
+ deadLetterQueue?: DlqHealthSummary;
3929
+ }
3930
+ /**
3931
+ * Standard health response structure.
3932
+ *
3933
+ * Used for all health endpoints with varying levels of detail.
3934
+ */
3935
+ interface HealthResponse {
3936
+ /** Overall health status */
3937
+ status: HealthStatus;
3938
+ /** ISO 8601 timestamp of the response */
3939
+ timestamp: string;
3940
+ /** Optional version string for deployment identification */
3941
+ version?: string;
3942
+ /** Server uptime in milliseconds */
3943
+ uptime?: number;
3944
+ /** Component-level health details (for /health/components) */
3945
+ components?: HealthComponents;
3946
+ }
3947
+ /**
3948
+ * Configuration options for health check behavior.
3949
+ */
3950
+ interface HealthCheckConfig {
3951
+ /**
3952
+ * Timeout for dependency health checks in milliseconds.
3953
+ * @default 5000
3954
+ */
3955
+ timeout?: number;
3956
+ /**
3957
+ * Whether to check storage backend health.
3958
+ * @default true
3959
+ */
3960
+ checkStorage?: boolean;
3961
+ /**
3962
+ * Whether to check KV store health.
3963
+ * @default true
3964
+ */
3965
+ checkKvStore?: boolean;
3966
+ /**
3967
+ * Whether to check event broadcaster health.
3968
+ * @default true
3969
+ */
3970
+ checkEventBroadcaster?: boolean;
3971
+ /**
3972
+ * Optional version string to include in health responses.
3973
+ * Useful for identifying deployed versions.
3974
+ */
3975
+ version?: string;
3976
+ }
3977
+ /**
3978
+ * Default health check configuration values.
3979
+ */
3980
+ declare const DEFAULT_HEALTH_CHECK_CONFIG: Required<Omit<HealthCheckConfig, "version">>;
3981
+ /**
3982
+ * Supported response formats for health endpoints.
3983
+ */
3984
+ type HealthResponseFormat = "json" | "text";
3985
+ /**
3986
+ * Determines the response format based on Accept header.
3987
+ *
3988
+ * @param acceptHeader - The Accept header value from the request
3989
+ * @returns The response format to use
3990
+ */
3991
+ declare function getHealthResponseFormat(acceptHeader?: string | null): HealthResponseFormat;
3992
+ /**
3993
+ * Formats a health response as plain text.
3994
+ *
3995
+ * @param status - The health status
3996
+ * @returns Plain text representation
3997
+ */
3998
+ declare function formatHealthAsText(status: HealthStatus): string;
3999
+ //#endregion
4000
+ //#region src/types/input-file.d.ts
4001
+ /**
4002
+ * Zod schema for validating InputFile objects.
4003
+ *
4004
+ * This schema defines the structure and validation rules for file upload requests.
4005
+ * Use this schema to parse and validate input data when creating new uploads.
4006
+ *
4007
+ * @see {@link InputFile} for the TypeScript type
4008
+ */
4009
+ declare const inputFileSchema: z.ZodObject<{
4010
+ uploadLengthDeferred: z.ZodOptional<z.ZodBoolean>;
4011
+ storageId: z.ZodString;
4012
+ size: z.ZodOptional<z.ZodNumber>;
4013
+ sizeHint: z.ZodOptional<z.ZodNumber>;
4014
+ type: z.ZodString;
4015
+ fileName: z.ZodOptional<z.ZodString>;
4016
+ lastModified: z.ZodOptional<z.ZodNumber>;
4017
+ metadata: z.ZodOptional<z.ZodString>;
4018
+ checksum: z.ZodOptional<z.ZodString>;
4019
+ checksumAlgorithm: z.ZodOptional<z.ZodString>;
4020
+ flow: z.ZodOptional<z.ZodObject<{
4021
+ flowId: z.ZodString;
4022
+ nodeId: z.ZodString;
4023
+ jobId: z.ZodString;
4024
+ }, z.core.$strip>>;
4025
+ }, z.core.$strip>;
4026
+ /**
4027
+ * Represents the input data for creating a new file upload.
4028
+ *
4029
+ * This type defines the information required to initiate an upload.
4030
+ * It's used by clients to provide upload metadata before sending file data.
4031
+ *
4032
+ * @property storageId - Target storage backend identifier (e.g., "s3-production", "azure-blob")
4033
+ * @property size - File size in bytes. Optional when uploadLengthDeferred is true.
4034
+ * @property sizeHint - Optional size hint for optimization when exact size is unknown
4035
+ * @property type - MIME type of the file (e.g., "image/jpeg", "application/pdf")
4036
+ * @property uploadLengthDeferred - If true, file size is not known upfront (streaming upload)
4037
+ * @property fileName - Original filename from the client
4038
+ * @property lastModified - File's last modified timestamp in milliseconds since epoch
4039
+ * @property metadata - Base64-encoded metadata string (as per tus protocol)
4040
+ * @property checksum - Expected file checksum for validation
4041
+ * @property checksumAlgorithm - Algorithm used for checksum (e.g., "md5", "sha256")
4042
+ * @property flow - Optional flow processing configuration
4043
+ * @property flow.flowId - ID of the flow to execute on this file
4044
+ * @property flow.nodeId - Starting node ID in the flow
4045
+ * @property flow.jobId - Flow job execution ID
4046
+ *
4047
+ * @example
4048
+ * ```typescript
4049
+ * // Basic file upload
4050
+ * const inputFile: InputFile = {
4051
+ * storageId: "s3-production",
4052
+ * size: 1024000,
4053
+ * type: "image/jpeg",
4054
+ * fileName: "photo.jpg",
4055
+ * lastModified: Date.now()
4056
+ * };
4057
+ *
4058
+ * // Upload with metadata (base64 encoded as per tus protocol)
4059
+ * const metadata = btoa(JSON.stringify({
4060
+ * userId: "user_123",
4061
+ * albumId: "album_456"
4062
+ * }));
4063
+ * const inputWithMetadata: InputFile = {
4064
+ * storageId: "s3-production",
4065
+ * size: 2048000,
4066
+ * type: "image/png",
4067
+ * fileName: "screenshot.png",
4068
+ * metadata
4069
+ * };
4070
+ *
4071
+ * // Upload with checksum validation
4072
+ * const inputWithChecksum: InputFile = {
4073
+ * storageId: "s3-production",
4074
+ * size: 512000,
4075
+ * type: "application/pdf",
4076
+ * fileName: "document.pdf",
4077
+ * checksum: "5d41402abc4b2a76b9719d911017c592",
4078
+ * checksumAlgorithm: "md5"
4079
+ * };
4080
+ *
4081
+ * // Upload that triggers a flow
4082
+ * const inputWithFlow: InputFile = {
4083
+ * storageId: "s3-temp",
4084
+ * size: 4096000,
4085
+ * type: "image/jpeg",
4086
+ * fileName: "large-image.jpg",
4087
+ * flow: {
4088
+ * flowId: "resize-and-optimize",
4089
+ * nodeId: "input_1",
4090
+ * jobId: "job_789"
4091
+ * }
4092
+ * };
4093
+ *
4094
+ * // Streaming upload (size unknown) - size can be omitted
4095
+ * const streamingInput: InputFile = {
4096
+ * storageId: "s3-production",
4097
+ * type: "video/mp4",
4098
+ * uploadLengthDeferred: true,
4099
+ * fileName: "live-stream.mp4"
4100
+ * };
4101
+ *
4102
+ * // Streaming upload with size hint for optimization
4103
+ * const streamingWithHint: InputFile = {
4104
+ * storageId: "s3-production",
4105
+ * type: "image/webp",
4106
+ * uploadLengthDeferred: true,
4107
+ * sizeHint: 5_000_000, // ~5MB expected
4108
+ * fileName: "optimized-image.webp"
4109
+ * };
4110
+ * ```
4111
+ */
4112
+ type InputFile = z.infer<typeof inputFileSchema>;
4113
+ //#endregion
4114
+ //#region src/types/middleware.d.ts
4115
+ type MiddlewareContext = {
4116
+ request: Request;
4117
+ uploadId?: string;
4118
+ metadata?: Record<string, string>;
4119
+ };
4120
+ type MiddlewareNext = () => Promise<Response>;
4121
+ type Middleware = (context: MiddlewareContext, next: MiddlewareNext) => Promise<Response>;
4122
+ declare const MiddlewareService_base: Context.TagClass<MiddlewareService, "MiddlewareService", {
4123
+ readonly execute: (middlewares: Middleware[], context: MiddlewareContext, handler: MiddlewareNext) => Effect.Effect<Response, UploadistaError>;
4124
+ }>;
4125
+ declare class MiddlewareService extends MiddlewareService_base {}
4126
+ declare const MiddlewareServiceLive: Layer.Layer<MiddlewareService, never, never>;
4127
+ //#endregion
4128
+ export { BaseKvStoreService as $, calculateBackoffDelay as $t, webSocketMessageSchema as A, JsonValue as An, FlowDeadLetterQueueConfig as At, DataStoreCapabilities as B, DeadLetterItemStatus as Bn, TypedOutput as Bt, TypedEventEmitter as C, FlowEventNodeStart as Cn, BuiltInTypedOutput as Ct, uploadEventEmitter as D, NodeType as Dn, FlowCircuitBreakerConfig as Dt, flowEventEmitter as E, ConditionValue as En, FileNamingFunction as Et, EventBroadcasterService as F, uploadFileSchema as Fn, NarrowTypedOutput as Ft, StreamingConfig as G, DeadLetterRetryAttempt as Gn, isStreamingInputV1 as Gt, DataStoreWriteOptions as H, DeadLetterProcessResult as Hn, completeNodeExecution as Ht, BufferedUploadFileDataStore as I, DeadLetterCleanupOptions as In, NodeConnectionValidator as It, UploadFileDataStoresShape as J, CircuitBreakerStats as Jn, DEFAULT_RETRY_POLICY as Jt, UploadFileDataStore as K, CircuitBreakerStateData as Kn, waitingNodeExecution as Kt, DEFAULT_MULTIPART_PART_SIZE as L, DeadLetterCleanupResult as Ln, NodeExecutionResult as Lt, UploadEventType as M, UploadFileTraceContext as Mn, FlowNode as Mt, uploadEventSchema as N, jsonValueSchema as Nn, FlowNodeData as Nt, WebSocketConnection as O, createFlowNode as On, FlowCircuitBreakerFallback as Ot, EventBroadcaster as P, traceContextSchema as Pn, NamingContext as Pt, BaseKvStore as Q, RetryPolicy as Qt, DEFAULT_STREAMING_CONFIG as R, DeadLetterError as Rn, NodeTypeMap as Rt, FlowEventEmitter as S, FlowEventNodeResume as Sn, BuiltInNodeType as St, eventToMessageSerializer as T, ConditionOperator as Tn, FileNamingConfig as Tt, StreamWriteOptions as U, DeadLetterQueueStats as Un, createOutputGuard as Ut, DataStoreConfig as V, DeadLetterListOptions as Vn, TypedOutputGuard as Vt, StreamWriteResult as W, DeadLetterRetryAllResult as Wn, isStorageOutputV1 as Wt, createDataStoreLayer as X, CircuitBreakerStoreService as Xn, FixedBackoff as Xt, UploadStrategy as Y, CircuitBreakerStore as Yn, ExponentialBackoff as Yt, isDataStore as Z, createInitialCircuitBreakerState as Zn, ImmediateBackoff as Zt, formatHealthAsText as _, FlowEventJobStart as _n, FlowJobStatus as _t, MiddlewareServiceLive as a, FlowEventDlqItemAdded as an, UploadFileKVStore as at, BaseEventEmitterService as b, FlowEventNodePause as bn, FlowJobTraceContext as bt, CircuitBreakerHealthSummary as c, FlowEventDlqRetryFailed as cn, flowQueueKvStore as ct, DlqHealthSummary as d, FlowEventFlowCancel as dn, DEFAULT_QUEUE_CONFIG as dt, calculateExpirationDate as en, DeadLetterQueueKVStore as et, HealthCheckConfig as f, FlowEventFlowEnd as fn, FlowQueueConfig as ft, HealthStatus as g, FlowEventJobEnd as gn, FlowJob as gt, HealthResponseFormat as h, FlowEventFlowStart as hn, FlowQueueStats as ht, MiddlewareService as i, FlowEvent as in, TypedKvStore as it, UploadEvent as j, UploadFile as jn, FlowEdge as jt, WebSocketMessage as k, getNodeData as kn, FlowConfig as kt, ComponentHealth as l, FlowEventDlqRetryStart as ln, jsonSerializer as lt, HealthResponse as m, FlowEventFlowPause as mn, FlowQueueItemStatus as mt, MiddlewareContext as n, DlqEvent as nn, FlowQueueKVStore as nt, InputFile as o, FlowEventDlqItemExhausted as on, deadLetterQueueKvStore as ot, HealthComponents as p, FlowEventFlowError as pn, FlowQueueItem as pt, UploadFileDataStores as q, CircuitBreakerStateValue as qn, BackoffStrategy as qt, MiddlewareNext as r, EventType as rn, KvStore as rt, inputFileSchema as s, FlowEventDlqItemResolved as sn, flowJobKvStore as st, Middleware as t, isErrorRetryable as tn, FlowJobKVStore as tt, DEFAULT_HEALTH_CHECK_CONFIG as u, FlowEventDlqRetrySuccess as un, uploadFileKvStore as ut, getHealthResponseFormat as v, FlowEventNodeEnd as vn, FlowJobTask as vt, UploadEventEmitter as w, ConditionField as wn, CustomTypedOutput as wt, EventEmitter as x, FlowEventNodeResponse as xn, AutoNamingSuffixGenerator as xt, BaseEventEmitter as y, FlowEventNodeError as yn, FlowJobTaskStatus as yt, DataStore as z, DeadLetterItem as zn, TypeCompatibilityChecker as zt };
4129
+ //# sourceMappingURL=middleware-CYizzAhP.d.mts.map