@uploadista/core 0.2.0 → 1.0.0-beta.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/{checksum-BjP9nb5b.mjs → checksum-BRjFmTRk.mjs} +2 -2
- package/dist/{checksum-BjP9nb5b.mjs.map → checksum-BRjFmTRk.mjs.map} +1 -1
- package/dist/{checksum-B7RDiO7V.cjs → checksum-BrjQ8GJL.cjs} +1 -1
- package/dist/errors/index.cjs +1 -1
- package/dist/errors/index.d.cts +1 -1
- package/dist/errors/index.d.mts +1 -1
- package/dist/errors/index.mjs +1 -1
- package/dist/flow/index.cjs +1 -1
- package/dist/flow/index.d.cts +3 -2
- package/dist/flow/index.d.mts +8 -5
- package/dist/flow/index.mjs +1 -1
- package/dist/generate-id-BAMRQzMr.d.cts +34 -0
- package/dist/generate-id-BAMRQzMr.d.cts.map +1 -0
- package/dist/generate-id-DuZwLm4m.d.mts +34 -0
- package/dist/generate-id-DuZwLm4m.d.mts.map +1 -0
- package/dist/index.cjs +1 -1
- package/dist/index.d.cts +8 -5
- package/dist/index.d.mts +8 -5
- package/dist/index.mjs +1 -1
- package/dist/middleware-BlrOGKrp.d.cts +4129 -0
- package/dist/middleware-BlrOGKrp.d.cts.map +1 -0
- package/dist/middleware-BmRmwme_.d.mts +4129 -0
- package/dist/middleware-BmRmwme_.d.mts.map +1 -0
- package/dist/resolve-upload-metadata-B2C5e1y1.d.cts +4533 -0
- package/dist/resolve-upload-metadata-B2C5e1y1.d.cts.map +1 -0
- package/dist/resolve-upload-metadata-DbkBzxm8.d.mts +4533 -0
- package/dist/resolve-upload-metadata-DbkBzxm8.d.mts.map +1 -0
- package/dist/run-args-C4no7Ny4.cjs +1 -0
- package/dist/run-args-CIqI4Zc7.mjs +2 -0
- package/dist/run-args-CIqI4Zc7.mjs.map +1 -0
- package/dist/{stream-limiter-BCFULdAM.d.cts → stream-limiter-7wkBVLWT.d.mts} +2 -2
- package/dist/{stream-limiter-BCFULdAM.d.cts.map → stream-limiter-7wkBVLWT.d.mts.map} +1 -1
- package/dist/{stream-limiter-DZ22uIqf.cjs → stream-limiter-B-Y0DTgA.cjs} +1 -1
- package/dist/{stream-limiter-CTJPEJqE.mjs → stream-limiter-CvDuNIyd.mjs} +2 -2
- package/dist/{stream-limiter-CTJPEJqE.mjs.map → stream-limiter-CvDuNIyd.mjs.map} +1 -1
- package/dist/{stream-limiter-Bi7OTbRp.d.mts → stream-limiter-D1KC-6pK.d.cts} +2 -2
- package/dist/{stream-limiter-Bi7OTbRp.d.mts.map → stream-limiter-D1KC-6pK.d.cts.map} +1 -1
- package/dist/streams/index.cjs +1 -1
- package/dist/streams/index.d.cts +1 -1
- package/dist/streams/index.d.mts +2 -2
- package/dist/streams/index.mjs +1 -1
- package/dist/testing/index.cjs +1 -1
- package/dist/testing/index.d.cts +2 -1
- package/dist/testing/index.d.cts.map +1 -1
- package/dist/testing/index.d.mts +7 -4
- package/dist/testing/index.d.mts.map +1 -1
- package/dist/testing/index.mjs +1 -1
- package/dist/{throttle-Da0OA8JT.d.cts → throttle-3FRcr7MU.d.mts} +4 -34
- package/dist/throttle-3FRcr7MU.d.mts.map +1 -0
- package/dist/{throttle-ibiT6E4U.d.mts → throttle-BlH27EGu.d.cts} +4 -34
- package/dist/throttle-BlH27EGu.d.cts.map +1 -0
- package/dist/{throttle-KnkRgZPi.cjs → throttle-Dp59f37i.cjs} +1 -1
- package/dist/{throttle-CnDa3v1k.mjs → throttle-TFY-V41R.mjs} +2 -2
- package/dist/{throttle-CnDa3v1k.mjs.map → throttle-TFY-V41R.mjs.map} +1 -1
- package/dist/types/index.cjs +1 -1
- package/dist/types/index.d.cts +2 -2
- package/dist/types/index.d.mts +3 -5
- package/dist/types/index.mjs +1 -1
- package/dist/upload/index.cjs +1 -1
- package/dist/upload/index.d.cts +1 -1
- package/dist/upload/index.d.mts +4 -4
- package/dist/upload/index.mjs +1 -1
- package/dist/{upload-strategy-negotiator-DfiQ0Fy0.cjs → upload-strategy-negotiator-5da9ZySO.cjs} +1 -1
- package/dist/{upload-strategy-negotiator-BuxPf1sa.mjs → upload-strategy-negotiator-ChKvppnA.mjs} +2 -2
- package/dist/{upload-strategy-negotiator-BuxPf1sa.mjs.map → upload-strategy-negotiator-ChKvppnA.mjs.map} +1 -1
- package/dist/upload-strategy-negotiator-EmOrc2bn.d.cts +455 -0
- package/dist/upload-strategy-negotiator-EmOrc2bn.d.cts.map +1 -0
- package/dist/upload-strategy-negotiator-a2O28qPf.d.mts +455 -0
- package/dist/upload-strategy-negotiator-a2O28qPf.d.mts.map +1 -0
- package/dist/{uploadista-error-B-geDgi8.cjs → uploadista-error-CZx1JU_L.cjs} +3 -1
- package/dist/{uploadista-error-Fsfvr2Bb.mjs → uploadista-error-DQ7V1FlX.mjs} +3 -1
- package/dist/uploadista-error-DQ7V1FlX.mjs.map +1 -0
- package/dist/{uploadista-error-BragVhIs.d.mts → uploadista-error-LtiZn-R_.d.mts} +2 -2
- package/dist/{uploadista-error-BragVhIs.d.mts.map → uploadista-error-LtiZn-R_.d.mts.map} +1 -1
- package/dist/{uploadista-error-Cj_pAFck.d.cts → uploadista-error-eZtG4iyf.d.cts} +2 -2
- package/dist/{uploadista-error-Cj_pAFck.d.cts.map → uploadista-error-eZtG4iyf.d.cts.map} +1 -1
- package/dist/utils/index.cjs +1 -1
- package/dist/utils/index.d.cts +2 -1
- package/dist/utils/index.d.mts +3 -2
- package/dist/utils/index.mjs +1 -1
- package/dist/websocket-Br0ijEZA.cjs +1 -0
- package/dist/websocket-DftnHFfN.mjs +2 -0
- package/dist/websocket-DftnHFfN.mjs.map +1 -0
- package/package.json +3 -3
- package/src/errors/uploadista-error.ts +11 -1
- package/src/flow/README.md +115 -0
- package/src/flow/flow-engine.ts +34 -2
- package/src/flow/flow-queue-store.ts +155 -0
- package/src/flow/flow-queue.ts +640 -0
- package/src/flow/index.ts +4 -0
- package/src/flow/types/flow-queue-item.ts +154 -0
- package/src/types/kv-store.ts +31 -1
- package/tests/flow-queue-store.test.ts +150 -0
- package/tests/flow-queue.test.ts +308 -0
- package/dist/resolve-upload-metadata-BUVl1LoS.d.cts +0 -8723
- package/dist/resolve-upload-metadata-BUVl1LoS.d.cts.map +0 -1
- package/dist/resolve-upload-metadata-MPDmDfOZ.d.mts +0 -8723
- package/dist/resolve-upload-metadata-MPDmDfOZ.d.mts.map +0 -1
- package/dist/run-args-WD1otVrz.mjs +0 -2
- package/dist/run-args-WD1otVrz.mjs.map +0 -1
- package/dist/run-args-g74p8pEZ.cjs +0 -1
- package/dist/throttle-Da0OA8JT.d.cts.map +0 -1
- package/dist/throttle-ibiT6E4U.d.mts.map +0 -1
- package/dist/uploadista-error-Fsfvr2Bb.mjs.map +0 -1
- package/dist/websocket-Avz4T8YB.cjs +0 -1
- package/dist/websocket-CdgVhVJs.mjs +0 -2
- package/dist/websocket-CdgVhVJs.mjs.map +0 -1
|
@@ -0,0 +1,640 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Flow Queue Service — global flow-level concurrency control with DLQ retry loop.
|
|
3
|
+
*
|
|
4
|
+
* FlowQueueService sits above FlowEngine.runFlow() and manages a bounded pool of
|
|
5
|
+
* simultaneous flow executions. When the queue is present in the Effect layer,
|
|
6
|
+
* FlowEngine.runFlow() delegates to it instead of forking directly.
|
|
7
|
+
*
|
|
8
|
+
* Features:
|
|
9
|
+
* - Configurable maxConcurrency (default: 4)
|
|
10
|
+
* - Pluggable FlowQueueStore (default: in-memory)
|
|
11
|
+
* - Background DLQ retry loop (when DeadLetterQueueService is also present)
|
|
12
|
+
* - Optional service: existing deployments without it see unchanged behavior
|
|
13
|
+
*
|
|
14
|
+
* @module flow/flow-queue
|
|
15
|
+
* @see {@link FlowQueueStore} for persistence backends
|
|
16
|
+
* @see {@link DeadLetterQueueService} for the DLQ integration
|
|
17
|
+
*
|
|
18
|
+
* @example
|
|
19
|
+
* ```typescript
|
|
20
|
+
* // Minimal wiring — in-memory store, default config
|
|
21
|
+
* const program = myEffect.pipe(
|
|
22
|
+
* Effect.provide(FlowQueueService.Default()),
|
|
23
|
+
* Effect.provide(flowEngineLayer),
|
|
24
|
+
* );
|
|
25
|
+
*
|
|
26
|
+
* // With Redis store for persistence
|
|
27
|
+
* const redisStore = new RedisFlowQueueStore({ redis });
|
|
28
|
+
* const program = myEffect.pipe(
|
|
29
|
+
* Effect.provide(FlowQueueService.make({ maxConcurrency: 8 }, redisStore)),
|
|
30
|
+
* Effect.provide(flowEngineLayer),
|
|
31
|
+
* );
|
|
32
|
+
* ```
|
|
33
|
+
*/
|
|
34
|
+
|
|
35
|
+
import { Context, Duration, Effect, Layer, Option, Ref, Schedule } from "effect";
|
|
36
|
+
import { UploadistaError } from "../errors";
|
|
37
|
+
import { FlowQueueKVStore, flowQueueKvStore } from "../types/kv-store";
|
|
38
|
+
import { DeadLetterQueueService } from "./dead-letter-queue";
|
|
39
|
+
import type { DeadLetterItem } from "./types/dead-letter-item";
|
|
40
|
+
import { MemoryFlowQueueStore, type FlowQueueStore } from "./flow-queue-store";
|
|
41
|
+
import type { FlowEngineShape } from "./flow-engine";
|
|
42
|
+
import type { FlowJob } from "./types/flow-job";
|
|
43
|
+
import {
|
|
44
|
+
DEFAULT_QUEUE_CONFIG,
|
|
45
|
+
type FlowQueueConfig,
|
|
46
|
+
type FlowQueueItem,
|
|
47
|
+
type FlowQueueStats,
|
|
48
|
+
} from "./types/flow-queue-item";
|
|
49
|
+
|
|
50
|
+
// Import the FlowEngine tag lazily to avoid circular module graph issues.
|
|
51
|
+
// flow-engine.ts will import FlowQueueService for the optional check,
|
|
52
|
+
// and flow-queue.ts needs FlowEngine to dispatch items. We break the cycle
|
|
53
|
+
// by importing only the *class* (tag) from flow-engine at call time inside
|
|
54
|
+
// the Effect generator, where module evaluation is already complete.
|
|
55
|
+
import { FlowEngine } from "./flow-engine";
|
|
56
|
+
|
|
57
|
+
/**
|
|
58
|
+
* Context marker that signals the current Effect is running inside the
|
|
59
|
+
* FlowQueueService worker dispatch loop.
|
|
60
|
+
*
|
|
61
|
+
* When this marker is present in the Effect context, FlowEngine.runFlow()
|
|
62
|
+
* skips the FlowQueueService delegation and executes directly via forkDaemon.
|
|
63
|
+
* This prevents infinite re-enqueue cycles when the worker calls runFlow.
|
|
64
|
+
*
|
|
65
|
+
* @internal
|
|
66
|
+
*/
|
|
67
|
+
export class FlowQueueDispatchMarker extends Context.Tag(
|
|
68
|
+
"FlowQueueDispatchMarker",
|
|
69
|
+
)<FlowQueueDispatchMarker, true>() {}
|
|
70
|
+
|
|
71
|
+
/**
|
|
72
|
+
* Shape of the FlowQueueService.
|
|
73
|
+
*
|
|
74
|
+
* All operations return Effect types for composable, type-safe error handling.
|
|
75
|
+
*/
|
|
76
|
+
export interface FlowQueueServiceShape {
|
|
77
|
+
/**
|
|
78
|
+
* Enqueue a flow for execution.
|
|
79
|
+
*
|
|
80
|
+
* Returns immediately with a FlowQueueItem in "pending" state.
|
|
81
|
+
* The worker loop will dispatch the flow when a concurrency slot is available.
|
|
82
|
+
*
|
|
83
|
+
* @param params - Flow execution parameters
|
|
84
|
+
* @returns The created queue item with status "pending"
|
|
85
|
+
*/
|
|
86
|
+
enqueue(params: {
|
|
87
|
+
flowId: string;
|
|
88
|
+
storageId: string;
|
|
89
|
+
input: unknown;
|
|
90
|
+
clientId: string | null;
|
|
91
|
+
dlqItemId?: string;
|
|
92
|
+
}): Effect.Effect<FlowQueueItem, UploadistaError>;
|
|
93
|
+
|
|
94
|
+
/**
|
|
95
|
+
* Retrieve the current status of a queue item by ID.
|
|
96
|
+
*
|
|
97
|
+
* @param itemId - The queue item ID
|
|
98
|
+
* @returns The queue item
|
|
99
|
+
* @throws QUEUE_ITEM_NOT_FOUND if the ID is unknown
|
|
100
|
+
*/
|
|
101
|
+
getStatus(itemId: string): Effect.Effect<FlowQueueItem, UploadistaError>;
|
|
102
|
+
|
|
103
|
+
/**
|
|
104
|
+
* Cancel a pending queue item before it starts executing.
|
|
105
|
+
*
|
|
106
|
+
* @param itemId - The queue item ID
|
|
107
|
+
* @throws QUEUE_ITEM_ALREADY_RUNNING if the item is already running
|
|
108
|
+
*/
|
|
109
|
+
cancel(itemId: string): Effect.Effect<void, UploadistaError>;
|
|
110
|
+
|
|
111
|
+
/**
|
|
112
|
+
* List queue items, optionally filtered by status.
|
|
113
|
+
*
|
|
114
|
+
* @param options - Optional filter options
|
|
115
|
+
* @returns Array of matching queue items
|
|
116
|
+
*/
|
|
117
|
+
list(options?: {
|
|
118
|
+
status?: FlowQueueItem["status"];
|
|
119
|
+
}): Effect.Effect<FlowQueueItem[], UploadistaError>;
|
|
120
|
+
|
|
121
|
+
/**
|
|
122
|
+
* Get aggregate queue statistics for monitoring.
|
|
123
|
+
*
|
|
124
|
+
* @returns Current queue stats including counts and concurrency info
|
|
125
|
+
*/
|
|
126
|
+
getStats(): Effect.Effect<FlowQueueStats, UploadistaError>;
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
/**
|
|
130
|
+
* Build a FlowQueueStore implementation backed by a KvStore<FlowQueueItem>.
|
|
131
|
+
*
|
|
132
|
+
* Items are stored as typed JSON values. listByStatus scans the full key list
|
|
133
|
+
* and filters in memory — acceptable for queue sizes up to a few thousand items.
|
|
134
|
+
*/
|
|
135
|
+
function makeKvStoreFlowQueueStore(
|
|
136
|
+
kv: import("../types/kv-store").KvStore<FlowQueueItem>,
|
|
137
|
+
): FlowQueueStore {
|
|
138
|
+
const parseDates = (item: FlowQueueItem): FlowQueueItem => ({
|
|
139
|
+
...item,
|
|
140
|
+
enqueuedAt: new Date(item.enqueuedAt),
|
|
141
|
+
startedAt: item.startedAt ? new Date(item.startedAt) : undefined,
|
|
142
|
+
completedAt: item.completedAt ? new Date(item.completedAt) : undefined,
|
|
143
|
+
});
|
|
144
|
+
|
|
145
|
+
return {
|
|
146
|
+
createItem: (item) =>
|
|
147
|
+
kv.set(item.id, item).pipe(Effect.map(() => item)),
|
|
148
|
+
|
|
149
|
+
getItem: (id) =>
|
|
150
|
+
kv.get(id).pipe(
|
|
151
|
+
Effect.map((item) => parseDates(item)),
|
|
152
|
+
Effect.catchAll(() => Effect.succeed(null as FlowQueueItem | null)),
|
|
153
|
+
),
|
|
154
|
+
|
|
155
|
+
updateItem: (id, updates) =>
|
|
156
|
+
Effect.gen(function* () {
|
|
157
|
+
const existing = yield* kv.get(id).pipe(
|
|
158
|
+
Effect.map(parseDates),
|
|
159
|
+
Effect.mapError(() =>
|
|
160
|
+
UploadistaError.fromCode("FLOW_JOB_NOT_FOUND", {
|
|
161
|
+
body: `Queue item ${id} not found`,
|
|
162
|
+
}),
|
|
163
|
+
),
|
|
164
|
+
);
|
|
165
|
+
const updated: FlowQueueItem = { ...existing, ...updates };
|
|
166
|
+
yield* kv.set(id, updated);
|
|
167
|
+
return updated;
|
|
168
|
+
}),
|
|
169
|
+
|
|
170
|
+
listByStatus: (status) =>
|
|
171
|
+
Effect.gen(function* () {
|
|
172
|
+
if (!kv.list) return [];
|
|
173
|
+
const keys = yield* kv.list();
|
|
174
|
+
const items: FlowQueueItem[] = [];
|
|
175
|
+
for (const key of keys) {
|
|
176
|
+
const item = yield* kv
|
|
177
|
+
.get(key)
|
|
178
|
+
.pipe(
|
|
179
|
+
Effect.map((i) => parseDates(i) as FlowQueueItem | null),
|
|
180
|
+
Effect.catchAll(() =>
|
|
181
|
+
Effect.succeed(null as FlowQueueItem | null),
|
|
182
|
+
),
|
|
183
|
+
);
|
|
184
|
+
if (item && item.status === status) items.push(item);
|
|
185
|
+
}
|
|
186
|
+
if (status === "pending") {
|
|
187
|
+
items.sort((a, b) => a.enqueuedAt.getTime() - b.enqueuedAt.getTime());
|
|
188
|
+
}
|
|
189
|
+
return items;
|
|
190
|
+
}),
|
|
191
|
+
|
|
192
|
+
deleteItem: (id) => kv.delete(id),
|
|
193
|
+
};
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
/**
|
|
197
|
+
* Effect-TS context tag for the FlowQueueService.
|
|
198
|
+
*
|
|
199
|
+
* Use `FlowQueueService.optional` to resolve it optionally — this is the
|
|
200
|
+
* pattern used in FlowEngine to preserve backward compatibility.
|
|
201
|
+
*
|
|
202
|
+
* @example
|
|
203
|
+
* ```typescript
|
|
204
|
+
* // In FlowEngine.runFlow()
|
|
205
|
+
* const queueOption = yield* FlowQueueService.optional;
|
|
206
|
+
* if (Option.isSome(queueOption)) {
|
|
207
|
+
* return yield* queueOption.value.enqueue({ flowId, storageId, input, clientId });
|
|
208
|
+
* }
|
|
209
|
+
* // ... existing fork path
|
|
210
|
+
*
|
|
211
|
+
* // From application code
|
|
212
|
+
* const queue = yield* FlowQueueService;
|
|
213
|
+
* const item = yield* queue.enqueue({ flowId: "my-flow", storageId: "s3", input: {}, clientId: null });
|
|
214
|
+
* ```
|
|
215
|
+
*/
|
|
216
|
+
export class FlowQueueService extends Context.Tag("FlowQueueService")<
|
|
217
|
+
FlowQueueService,
|
|
218
|
+
FlowQueueServiceShape
|
|
219
|
+
>() {
|
|
220
|
+
/**
|
|
221
|
+
* Access the FlowQueueService optionally.
|
|
222
|
+
* Returns Option.none() if the service is not present in the layer.
|
|
223
|
+
*
|
|
224
|
+
* Use this in FlowEngine to remain backward-compatible.
|
|
225
|
+
*/
|
|
226
|
+
static readonly optional = Effect.serviceOption(FlowQueueService);
|
|
227
|
+
|
|
228
|
+
/**
|
|
229
|
+
* Create a FlowQueueService Layer using the default in-memory store.
|
|
230
|
+
*
|
|
231
|
+
* @param config - Optional configuration overrides
|
|
232
|
+
* @returns A Layer providing FlowQueueService
|
|
233
|
+
*/
|
|
234
|
+
static Default(
|
|
235
|
+
config?: FlowQueueConfig,
|
|
236
|
+
): Layer.Layer<FlowQueueService, never, FlowEngine> {
|
|
237
|
+
return FlowQueueService.make(config ?? {}, new MemoryFlowQueueStore());
|
|
238
|
+
}
|
|
239
|
+
|
|
240
|
+
/**
|
|
241
|
+
* Create a FlowQueueService Layer with a custom store.
|
|
242
|
+
*
|
|
243
|
+
* @param config - Configuration (maxConcurrency, dlqRetryIntervalMs, dlqRetryBatchSize)
|
|
244
|
+
* @param store - The FlowQueueStore implementation to use
|
|
245
|
+
* @returns A Layer providing FlowQueueService
|
|
246
|
+
*/
|
|
247
|
+
static make(
|
|
248
|
+
config: FlowQueueConfig,
|
|
249
|
+
store: FlowQueueStore,
|
|
250
|
+
): Layer.Layer<FlowQueueService, never, FlowEngine> {
|
|
251
|
+
return Layer.effect(
|
|
252
|
+
FlowQueueService,
|
|
253
|
+
createFlowQueueService(config, store),
|
|
254
|
+
);
|
|
255
|
+
}
|
|
256
|
+
|
|
257
|
+
/**
|
|
258
|
+
* Create a FlowQueueService Layer backed by the application's BaseKvStoreService.
|
|
259
|
+
*
|
|
260
|
+
* Items are persisted under the "uploadista:queue-item:" key prefix, using the
|
|
261
|
+
* same KV store already configured for the server (Redis, Cloudflare KV, etc.).
|
|
262
|
+
* This is the recommended factory for most deployments — no separate store
|
|
263
|
+
* dependency is needed beyond the kvStore already wired at server level.
|
|
264
|
+
*
|
|
265
|
+
* @param config - Optional queue configuration (maxConcurrency, retry intervals…)
|
|
266
|
+
* @returns A Layer providing FlowQueueService, requiring FlowEngine and BaseKvStoreService
|
|
267
|
+
*
|
|
268
|
+
* @example
|
|
269
|
+
* ```typescript
|
|
270
|
+
* // In createUploadistaServer — flowQueue: true uses this automatically
|
|
271
|
+
* FlowQueueService.fromKvStore({ maxConcurrency: 8 })
|
|
272
|
+
* .pipe(Layer.provide(flowEngineLayer), Layer.provide(kvStore))
|
|
273
|
+
* ```
|
|
274
|
+
*/
|
|
275
|
+
static fromKvStore(
|
|
276
|
+
config: FlowQueueConfig = {},
|
|
277
|
+
): Layer.Layer<FlowQueueService, never, FlowEngine | FlowQueueKVStore> {
|
|
278
|
+
return Layer.effect(
|
|
279
|
+
FlowQueueService,
|
|
280
|
+
Effect.gen(function* () {
|
|
281
|
+
const kvStore = yield* FlowQueueKVStore;
|
|
282
|
+
const store = makeKvStoreFlowQueueStore(kvStore);
|
|
283
|
+
return yield* createFlowQueueService(config, store);
|
|
284
|
+
}),
|
|
285
|
+
);
|
|
286
|
+
}
|
|
287
|
+
|
|
288
|
+
/**
|
|
289
|
+
* Shorthand for fromKvStore — creates the full layer including the KV store
|
|
290
|
+
* sub-layer, requiring only FlowEngine and BaseKvStoreService.
|
|
291
|
+
*/
|
|
292
|
+
static fromBaseKvStore(
|
|
293
|
+
config: FlowQueueConfig = {},
|
|
294
|
+
): Layer.Layer<FlowQueueService, never, FlowEngine | import("../types/kv-store").BaseKvStoreService> {
|
|
295
|
+
return FlowQueueService.fromKvStore(config).pipe(
|
|
296
|
+
Layer.provide(flowQueueKvStore),
|
|
297
|
+
);
|
|
298
|
+
}
|
|
299
|
+
}
|
|
300
|
+
|
|
301
|
+
/**
|
|
302
|
+
* Creates the FlowQueueService implementation.
|
|
303
|
+
*
|
|
304
|
+
* Internal factory used by FlowQueueService.Default and FlowQueueService.make.
|
|
305
|
+
* Starts the worker loop and optionally the DLQ retry loop as daemon fibers.
|
|
306
|
+
*/
|
|
307
|
+
function createFlowQueueService(
|
|
308
|
+
config: FlowQueueConfig,
|
|
309
|
+
store: FlowQueueStore,
|
|
310
|
+
): Effect.Effect<FlowQueueServiceShape, never, FlowEngine> {
|
|
311
|
+
return Effect.gen(function* () {
|
|
312
|
+
const resolved = { ...DEFAULT_QUEUE_CONFIG, ...config };
|
|
313
|
+
const { maxConcurrency, dlqRetryIntervalMs, dlqRetryBatchSize } = resolved;
|
|
314
|
+
|
|
315
|
+
// Ref tracking the number of currently running flows
|
|
316
|
+
const concurrencyRef = yield* Ref.make(0);
|
|
317
|
+
|
|
318
|
+
// Get the FlowEngine from the context (required dependency)
|
|
319
|
+
const flowEngine: FlowEngineShape = yield* FlowEngine;
|
|
320
|
+
|
|
321
|
+
// Get optional DLQ service
|
|
322
|
+
const dlqOption = yield* DeadLetterQueueService.optional;
|
|
323
|
+
|
|
324
|
+
/**
|
|
325
|
+
* Generate a unique queue item ID.
|
|
326
|
+
*/
|
|
327
|
+
const generateId = (): string => `q_${crypto.randomUUID()}`;
|
|
328
|
+
|
|
329
|
+
/**
|
|
330
|
+
* Dispatch a single pending item: transition it to "running" and invoke FlowEngine.
|
|
331
|
+
* When the flow finishes (success or failure), update the item status.
|
|
332
|
+
* If the item has a dlqItemId, correlate the result back to the DLQ.
|
|
333
|
+
*/
|
|
334
|
+
const dispatchItem = (item: FlowQueueItem): Effect.Effect<void, never> => {
|
|
335
|
+
const startedAt = Date.now();
|
|
336
|
+
|
|
337
|
+
// Mark as running and increment concurrency counter
|
|
338
|
+
const setup = Effect.gen(function* () {
|
|
339
|
+
yield* Effect.catchAll(
|
|
340
|
+
store.updateItem(item.id, {
|
|
341
|
+
status: "running",
|
|
342
|
+
startedAt: new Date(),
|
|
343
|
+
}),
|
|
344
|
+
(err) =>
|
|
345
|
+
Effect.logError("FlowQueue: failed to mark item running", err),
|
|
346
|
+
);
|
|
347
|
+
yield* Ref.update(concurrencyRef, (n) => n + 1);
|
|
348
|
+
});
|
|
349
|
+
|
|
350
|
+
// Execute the flow, catch all errors, update item status.
|
|
351
|
+
// We provide FlowQueueDispatchMarker into the runFlow call so that
|
|
352
|
+
// FlowEngine.runFlow skips the queue delegation and uses the direct
|
|
353
|
+
// forkDaemon path, preventing infinite re-enqueue cycles.
|
|
354
|
+
//
|
|
355
|
+
// runFlow<TRequirements> infers TRequirements=unknown at this generic
|
|
356
|
+
// call site; we assert never here because the FlowEngine service instance
|
|
357
|
+
// already holds all its required context — no additional requirements are
|
|
358
|
+
// propagated to the caller.
|
|
359
|
+
const runFlowEffect = (
|
|
360
|
+
flowEngine.runFlow({
|
|
361
|
+
flowId: item.flowId,
|
|
362
|
+
storageId: item.storageId,
|
|
363
|
+
clientId: item.clientId,
|
|
364
|
+
inputs: item.input,
|
|
365
|
+
// Reuse the queue item ID as the flow job ID so clients polling
|
|
366
|
+
// /jobs/{id}/status get results without a separate ID mapping.
|
|
367
|
+
jobId: item.id,
|
|
368
|
+
}) as Effect.Effect<FlowJob, UploadistaError, never>
|
|
369
|
+
).pipe(Effect.provideService(FlowQueueDispatchMarker, true));
|
|
370
|
+
|
|
371
|
+
const execute = runFlowEffect.pipe(
|
|
372
|
+
Effect.andThen(() =>
|
|
373
|
+
Effect.gen(function* () {
|
|
374
|
+
// Mark as completed
|
|
375
|
+
yield* Effect.catchAll(
|
|
376
|
+
store.updateItem(item.id, {
|
|
377
|
+
status: "completed",
|
|
378
|
+
completedAt: new Date(),
|
|
379
|
+
}),
|
|
380
|
+
(err) =>
|
|
381
|
+
Effect.logError(
|
|
382
|
+
"FlowQueue: failed to mark item completed",
|
|
383
|
+
err,
|
|
384
|
+
),
|
|
385
|
+
);
|
|
386
|
+
|
|
387
|
+
// DLQ correlation — success
|
|
388
|
+
if (item.dlqItemId && Option.isSome(dlqOption)) {
|
|
389
|
+
yield* Effect.catchAll(
|
|
390
|
+
dlqOption.value.markResolved(item.dlqItemId),
|
|
391
|
+
(err) =>
|
|
392
|
+
Effect.logError(
|
|
393
|
+
"FlowQueue: failed to mark DLQ item resolved",
|
|
394
|
+
err,
|
|
395
|
+
),
|
|
396
|
+
);
|
|
397
|
+
}
|
|
398
|
+
}),
|
|
399
|
+
),
|
|
400
|
+
Effect.catchAll((err) =>
|
|
401
|
+
Effect.gen(function* () {
|
|
402
|
+
const errorMsg =
|
|
403
|
+
err instanceof UploadistaError ? err.body : String(err);
|
|
404
|
+
|
|
405
|
+
// Mark as failed
|
|
406
|
+
yield* Effect.catchAll(
|
|
407
|
+
store.updateItem(item.id, {
|
|
408
|
+
status: "failed",
|
|
409
|
+
completedAt: new Date(),
|
|
410
|
+
error: errorMsg,
|
|
411
|
+
}),
|
|
412
|
+
(storeErr) =>
|
|
413
|
+
Effect.logError(
|
|
414
|
+
"FlowQueue: failed to mark item failed",
|
|
415
|
+
storeErr,
|
|
416
|
+
),
|
|
417
|
+
);
|
|
418
|
+
|
|
419
|
+
// DLQ correlation — failure
|
|
420
|
+
if (item.dlqItemId && Option.isSome(dlqOption)) {
|
|
421
|
+
const durationMs = Date.now() - startedAt;
|
|
422
|
+
yield* Effect.catchAll(
|
|
423
|
+
dlqOption.value.recordRetryFailure(
|
|
424
|
+
item.dlqItemId,
|
|
425
|
+
errorMsg,
|
|
426
|
+
durationMs,
|
|
427
|
+
),
|
|
428
|
+
(dlqErr) =>
|
|
429
|
+
Effect.logError(
|
|
430
|
+
"FlowQueue: failed to record DLQ retry failure",
|
|
431
|
+
dlqErr,
|
|
432
|
+
),
|
|
433
|
+
);
|
|
434
|
+
}
|
|
435
|
+
}),
|
|
436
|
+
),
|
|
437
|
+
);
|
|
438
|
+
|
|
439
|
+
// Always decrement concurrency, even on unexpected failures
|
|
440
|
+
const release = Ref.update(concurrencyRef, (n) => Math.max(0, n - 1));
|
|
441
|
+
|
|
442
|
+
return setup.pipe(
|
|
443
|
+
Effect.andThen(() => execute.pipe(Effect.ensuring(release))),
|
|
444
|
+
Effect.catchAllCause((cause) =>
|
|
445
|
+
Effect.logError("FlowQueue: unexpected error in dispatchItem", cause),
|
|
446
|
+
),
|
|
447
|
+
);
|
|
448
|
+
};
|
|
449
|
+
|
|
450
|
+
/**
|
|
451
|
+
* Worker tick: claim up to (maxConcurrency - current) pending items and
|
|
452
|
+
* dispatch each as a daemon fiber so the tick returns immediately.
|
|
453
|
+
*/
|
|
454
|
+
const workerTick = Effect.gen(function* () {
|
|
455
|
+
const current = yield* Ref.get(concurrencyRef);
|
|
456
|
+
const available = maxConcurrency - current;
|
|
457
|
+
|
|
458
|
+
if (available <= 0) {
|
|
459
|
+
return;
|
|
460
|
+
}
|
|
461
|
+
|
|
462
|
+
const pending = yield* Effect.catchAll(
|
|
463
|
+
store.listByStatus("pending"),
|
|
464
|
+
() => Effect.succeed([] as FlowQueueItem[]),
|
|
465
|
+
);
|
|
466
|
+
|
|
467
|
+
const toDispatch = pending.slice(0, available);
|
|
468
|
+
|
|
469
|
+
for (const item of toDispatch) {
|
|
470
|
+
yield* Effect.forkDaemon(dispatchItem(item));
|
|
471
|
+
}
|
|
472
|
+
});
|
|
473
|
+
|
|
474
|
+
/**
|
|
475
|
+
* Background worker loop: poll every 500ms for pending items.
|
|
476
|
+
*/
|
|
477
|
+
yield* Effect.forkDaemon(
|
|
478
|
+
workerTick.pipe(
|
|
479
|
+
Effect.repeat(Schedule.spaced(Duration.millis(500))),
|
|
480
|
+
Effect.catchAllCause((cause) =>
|
|
481
|
+
Effect.logError("FlowQueue: worker loop crashed", cause),
|
|
482
|
+
),
|
|
483
|
+
),
|
|
484
|
+
);
|
|
485
|
+
|
|
486
|
+
/**
|
|
487
|
+
* DLQ retry loop: when DeadLetterQueueService is present, poll on the
|
|
488
|
+
* configured interval and re-enqueue items that are due for retry.
|
|
489
|
+
*/
|
|
490
|
+
if (Option.isSome(dlqOption)) {
|
|
491
|
+
const dlq = dlqOption.value;
|
|
492
|
+
|
|
493
|
+
const dlqRetryTick = Effect.gen(function* () {
|
|
494
|
+
const items = yield* Effect.catchAll(
|
|
495
|
+
dlq.getScheduledRetries(dlqRetryBatchSize),
|
|
496
|
+
(err) =>
|
|
497
|
+
Effect.logError(
|
|
498
|
+
"FlowQueue: failed to fetch DLQ scheduled retries",
|
|
499
|
+
err,
|
|
500
|
+
).pipe(Effect.as([] as DeadLetterItem[])),
|
|
501
|
+
);
|
|
502
|
+
|
|
503
|
+
for (const dlqItem of items) {
|
|
504
|
+
// Mark as retrying before enqueuing to prevent duplicate dispatch
|
|
505
|
+
yield* Effect.catchAll(
|
|
506
|
+
dlq.markRetrying(dlqItem.id),
|
|
507
|
+
(err) =>
|
|
508
|
+
Effect.logError(
|
|
509
|
+
"FlowQueue: failed to mark DLQ item retrying",
|
|
510
|
+
err,
|
|
511
|
+
),
|
|
512
|
+
);
|
|
513
|
+
|
|
514
|
+
// Create queue item for the retry
|
|
515
|
+
const queueItem: FlowQueueItem = {
|
|
516
|
+
id: generateId(),
|
|
517
|
+
flowId: dlqItem.flowId,
|
|
518
|
+
storageId: dlqItem.storageId,
|
|
519
|
+
input: dlqItem.inputs,
|
|
520
|
+
clientId: dlqItem.clientId,
|
|
521
|
+
status: "pending",
|
|
522
|
+
dlqItemId: dlqItem.id,
|
|
523
|
+
enqueuedAt: new Date(),
|
|
524
|
+
};
|
|
525
|
+
|
|
526
|
+
yield* Effect.catchAll(
|
|
527
|
+
store.createItem(queueItem),
|
|
528
|
+
(err) =>
|
|
529
|
+
Effect.logError(
|
|
530
|
+
"FlowQueue: failed to enqueue DLQ retry item",
|
|
531
|
+
err,
|
|
532
|
+
),
|
|
533
|
+
);
|
|
534
|
+
}
|
|
535
|
+
});
|
|
536
|
+
|
|
537
|
+
yield* Effect.forkDaemon(
|
|
538
|
+
// Wait one interval before the first tick
|
|
539
|
+
Effect.sleep(Duration.millis(dlqRetryIntervalMs)).pipe(
|
|
540
|
+
Effect.andThen(
|
|
541
|
+
dlqRetryTick.pipe(
|
|
542
|
+
Effect.repeat(
|
|
543
|
+
Schedule.spaced(Duration.millis(dlqRetryIntervalMs)),
|
|
544
|
+
),
|
|
545
|
+
),
|
|
546
|
+
),
|
|
547
|
+
Effect.catchAllCause((cause) =>
|
|
548
|
+
Effect.logError("FlowQueue: DLQ retry loop crashed", cause),
|
|
549
|
+
),
|
|
550
|
+
),
|
|
551
|
+
);
|
|
552
|
+
}
|
|
553
|
+
|
|
554
|
+
/**
|
|
555
|
+
* Service implementation.
|
|
556
|
+
*/
|
|
557
|
+
const service: FlowQueueServiceShape = {
|
|
558
|
+
enqueue: ({ flowId, storageId, input, clientId, dlqItemId }) =>
|
|
559
|
+
Effect.gen(function* () {
|
|
560
|
+
const item: FlowQueueItem = {
|
|
561
|
+
id: generateId(),
|
|
562
|
+
flowId,
|
|
563
|
+
storageId,
|
|
564
|
+
input,
|
|
565
|
+
clientId,
|
|
566
|
+
status: "pending",
|
|
567
|
+
dlqItemId,
|
|
568
|
+
enqueuedAt: new Date(),
|
|
569
|
+
};
|
|
570
|
+
return yield* store.createItem(item);
|
|
571
|
+
}),
|
|
572
|
+
|
|
573
|
+
getStatus: (itemId) =>
|
|
574
|
+
Effect.gen(function* () {
|
|
575
|
+
const item = yield* store.getItem(itemId);
|
|
576
|
+
if (!item) {
|
|
577
|
+
return yield* Effect.fail(
|
|
578
|
+
UploadistaError.fromCode("QUEUE_ITEM_NOT_FOUND"),
|
|
579
|
+
);
|
|
580
|
+
}
|
|
581
|
+
return item;
|
|
582
|
+
}),
|
|
583
|
+
|
|
584
|
+
cancel: (itemId) =>
|
|
585
|
+
Effect.gen(function* () {
|
|
586
|
+
const item = yield* store.getItem(itemId);
|
|
587
|
+
if (!item) {
|
|
588
|
+
return yield* Effect.fail(
|
|
589
|
+
UploadistaError.fromCode("QUEUE_ITEM_NOT_FOUND"),
|
|
590
|
+
);
|
|
591
|
+
}
|
|
592
|
+
if (item.status === "running") {
|
|
593
|
+
return yield* Effect.fail(
|
|
594
|
+
UploadistaError.fromCode("QUEUE_ITEM_ALREADY_RUNNING"),
|
|
595
|
+
);
|
|
596
|
+
}
|
|
597
|
+
if (item.status === "pending") {
|
|
598
|
+
yield* store.deleteItem(itemId);
|
|
599
|
+
}
|
|
600
|
+
// completed/failed items: no-op (already done)
|
|
601
|
+
}),
|
|
602
|
+
|
|
603
|
+
list: (options) =>
|
|
604
|
+
Effect.gen(function* () {
|
|
605
|
+
if (options?.status) {
|
|
606
|
+
return yield* store.listByStatus(options.status);
|
|
607
|
+
}
|
|
608
|
+
// Return all statuses combined
|
|
609
|
+
const [pending, running, completed, failed] = yield* Effect.all([
|
|
610
|
+
store.listByStatus("pending"),
|
|
611
|
+
store.listByStatus("running"),
|
|
612
|
+
store.listByStatus("completed"),
|
|
613
|
+
store.listByStatus("failed"),
|
|
614
|
+
]);
|
|
615
|
+
return [...pending, ...running, ...completed, ...failed];
|
|
616
|
+
}),
|
|
617
|
+
|
|
618
|
+
getStats: () =>
|
|
619
|
+
Effect.gen(function* () {
|
|
620
|
+
const [pending, running, completed, failed] = yield* Effect.all([
|
|
621
|
+
store.listByStatus("pending"),
|
|
622
|
+
store.listByStatus("running"),
|
|
623
|
+
store.listByStatus("completed"),
|
|
624
|
+
store.listByStatus("failed"),
|
|
625
|
+
]);
|
|
626
|
+
const currentConcurrency = yield* Ref.get(concurrencyRef);
|
|
627
|
+
return {
|
|
628
|
+
pending: pending.length,
|
|
629
|
+
running: running.length,
|
|
630
|
+
completed: completed.length,
|
|
631
|
+
failed: failed.length,
|
|
632
|
+
maxConcurrency,
|
|
633
|
+
currentConcurrency,
|
|
634
|
+
} satisfies FlowQueueStats;
|
|
635
|
+
}),
|
|
636
|
+
};
|
|
637
|
+
|
|
638
|
+
return service;
|
|
639
|
+
});
|
|
640
|
+
}
|
package/src/flow/index.ts
CHANGED
|
@@ -20,6 +20,8 @@ export type { StreamingConfig } from "../types/data-store";
|
|
|
20
20
|
export { DEFAULT_STREAMING_CONFIG } from "../types/data-store";
|
|
21
21
|
export * from "./dead-letter-queue";
|
|
22
22
|
export * from "./flow";
|
|
23
|
+
export * from "./flow-queue";
|
|
24
|
+
export * from "./flow-queue-store";
|
|
23
25
|
// Core flow engine
|
|
24
26
|
export { createFlowWithSchema } from "./flow";
|
|
25
27
|
export * from "./flow-engine";
|
|
@@ -55,6 +57,8 @@ export { createFlow } from "./typed-flow";
|
|
|
55
57
|
export * from "./types/dead-letter-item";
|
|
56
58
|
export * from "./types/flow-file";
|
|
57
59
|
export * from "./types/flow-job";
|
|
60
|
+
// Flow Queue types
|
|
61
|
+
export * from "./types/flow-queue-item";
|
|
58
62
|
export * from "./types/flow-types";
|
|
59
63
|
export * from "./types/retry-policy";
|
|
60
64
|
export * from "./types/run-args";
|