convex-batch-processor 0.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +395 -0
- package/dist/client/index.d.ts +212 -0
- package/dist/client/index.d.ts.map +1 -0
- package/dist/client/index.js +78 -0
- package/dist/client/index.js.map +1 -0
- package/dist/component/_generated/api.d.ts +34 -0
- package/dist/component/_generated/api.d.ts.map +1 -0
- package/dist/component/_generated/api.js +31 -0
- package/dist/component/_generated/api.js.map +1 -0
- package/dist/component/_generated/component.d.ts +80 -0
- package/dist/component/_generated/component.d.ts.map +1 -0
- package/dist/component/_generated/component.js +11 -0
- package/dist/component/_generated/component.js.map +1 -0
- package/dist/component/_generated/dataModel.d.ts +46 -0
- package/dist/component/_generated/dataModel.d.ts.map +1 -0
- package/dist/component/_generated/dataModel.js +11 -0
- package/dist/component/_generated/dataModel.js.map +1 -0
- package/dist/component/_generated/server.d.ts +121 -0
- package/dist/component/_generated/server.d.ts.map +1 -0
- package/dist/component/_generated/server.js +78 -0
- package/dist/component/_generated/server.js.map +1 -0
- package/dist/component/convex.config.d.ts +3 -0
- package/dist/component/convex.config.d.ts.map +1 -0
- package/dist/component/convex.config.js +4 -0
- package/dist/component/convex.config.js.map +1 -0
- package/dist/component/lib.d.ts +351 -0
- package/dist/component/lib.d.ts.map +1 -0
- package/dist/component/lib.js +879 -0
- package/dist/component/lib.js.map +1 -0
- package/dist/component/schema.d.ts +118 -0
- package/dist/component/schema.d.ts.map +1 -0
- package/dist/component/schema.js +61 -0
- package/dist/component/schema.js.map +1 -0
- package/package.json +63 -0
- package/src/client/index.test.ts +123 -0
- package/src/client/index.ts +336 -0
- package/src/component/_generated/api.ts +50 -0
- package/src/component/_generated/component.ts +140 -0
- package/src/component/_generated/dataModel.ts +60 -0
- package/src/component/_generated/server.ts +156 -0
- package/src/component/convex.config.ts +4 -0
- package/src/component/lib.ts +1082 -0
- package/src/component/schema.ts +70 -0
|
@@ -0,0 +1,879 @@
|
|
|
1
|
+
import { mutation, query, internalAction, internalMutation, internalQuery, } from "./_generated/server";
|
|
2
|
+
import { internal } from "./_generated/api";
|
|
3
|
+
import { v } from "convex/values";
|
|
4
|
+
// ============================================================================
|
|
5
|
+
// Batch Accumulator - Public API
|
|
6
|
+
// ============================================================================
|
|
7
|
+
export const addItems = mutation({
|
|
8
|
+
args: {
|
|
9
|
+
batchId: v.string(),
|
|
10
|
+
items: v.array(v.any()),
|
|
11
|
+
config: v.object({
|
|
12
|
+
maxBatchSize: v.number(),
|
|
13
|
+
flushIntervalMs: v.number(),
|
|
14
|
+
processBatchHandle: v.string(),
|
|
15
|
+
}),
|
|
16
|
+
},
|
|
17
|
+
handler: async (ctx, { batchId, items, config }) => {
|
|
18
|
+
const now = Date.now();
|
|
19
|
+
// Parse base batch ID (strip sequence if present)
|
|
20
|
+
const baseBatchId = batchId.includes("::")
|
|
21
|
+
? batchId.split("::")[0]
|
|
22
|
+
: batchId;
|
|
23
|
+
// 1. Find accumulating batch (READ only)
|
|
24
|
+
let batch = await ctx.db
|
|
25
|
+
.query("batches")
|
|
26
|
+
.withIndex("by_baseBatchId_status", (q) => q.eq("baseBatchId", baseBatchId).eq("status", "accumulating"))
|
|
27
|
+
.first();
|
|
28
|
+
// 2. If no batch, create one WITH timer (one-time INSERT)
|
|
29
|
+
if (!batch) {
|
|
30
|
+
// Find highest sequence number for this base ID
|
|
31
|
+
const latestBatch = await ctx.db
|
|
32
|
+
.query("batches")
|
|
33
|
+
.withIndex("by_baseBatchId_sequence", (q) => q.eq("baseBatchId", baseBatchId))
|
|
34
|
+
.order("desc")
|
|
35
|
+
.first();
|
|
36
|
+
const nextSequence = latestBatch ? latestBatch.sequence + 1 : 0;
|
|
37
|
+
const newBatchId = `${baseBatchId}::${nextSequence}`;
|
|
38
|
+
const batchDocId = await ctx.db.insert("batches", {
|
|
39
|
+
batchId: newBatchId,
|
|
40
|
+
baseBatchId,
|
|
41
|
+
sequence: nextSequence,
|
|
42
|
+
createdAt: now,
|
|
43
|
+
lastUpdatedAt: now,
|
|
44
|
+
status: "accumulating",
|
|
45
|
+
config,
|
|
46
|
+
});
|
|
47
|
+
batch = (await ctx.db.get(batchDocId));
|
|
48
|
+
// Schedule timer at creation (not on every add)
|
|
49
|
+
if (config.flushIntervalMs > 0) {
|
|
50
|
+
const scheduledFlushId = await ctx.scheduler.runAfter(config.flushIntervalMs, internal.lib.maybeFlush, { batchDocId: batch._id, force: true });
|
|
51
|
+
await ctx.db.patch(batch._id, { scheduledFlushId });
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
// 3. INSERT items (NEVER conflicts - always a new document)
|
|
55
|
+
await ctx.db.insert("batchItems", {
|
|
56
|
+
batchDocId: batch._id,
|
|
57
|
+
items,
|
|
58
|
+
itemCount: items.length,
|
|
59
|
+
createdAt: now,
|
|
60
|
+
});
|
|
61
|
+
// 4. Schedule flush check ONLY if this single call could complete a batch
|
|
62
|
+
// DO NOT query batchItems to count - that causes OCC conflicts when
|
|
63
|
+
// multiple concurrent addItems all read the same index.
|
|
64
|
+
//
|
|
65
|
+
// Dual-trigger pattern:
|
|
66
|
+
// - SIZE trigger: items.length >= maxBatchSize (handled here)
|
|
67
|
+
// - TIME trigger: flushIntervalMs timer (scheduled at batch creation)
|
|
68
|
+
//
|
|
69
|
+
// For high-throughput small items, the interval timer handles flushing.
|
|
70
|
+
// For large single calls, we trigger immediate flush check.
|
|
71
|
+
if (items.length >= config.maxBatchSize) {
|
|
72
|
+
await ctx.scheduler.runAfter(0, internal.lib.maybeFlush, {
|
|
73
|
+
batchDocId: batch._id,
|
|
74
|
+
});
|
|
75
|
+
}
|
|
76
|
+
// 5. Return success - NO PATCH, NO COUNT QUERY!
|
|
77
|
+
// We return the count of items added in THIS call only.
|
|
78
|
+
// Total count can be obtained via getBatchStatus query if needed.
|
|
79
|
+
return {
|
|
80
|
+
batchId: baseBatchId,
|
|
81
|
+
itemCount: items.length,
|
|
82
|
+
flushed: false, // Flush happens via interval timer or large batch detection
|
|
83
|
+
status: "accumulating",
|
|
84
|
+
};
|
|
85
|
+
},
|
|
86
|
+
});
|
|
87
|
+
export const flushBatch = mutation({
|
|
88
|
+
args: { batchId: v.string() },
|
|
89
|
+
handler: async (ctx, { batchId }) => {
|
|
90
|
+
// First try exact match (for full batch IDs like "base::0")
|
|
91
|
+
let batch = await ctx.db
|
|
92
|
+
.query("batches")
|
|
93
|
+
.withIndex("by_batchId", (q) => q.eq("batchId", batchId))
|
|
94
|
+
.first();
|
|
95
|
+
// If not found, try as base batch ID and find the accumulating batch
|
|
96
|
+
if (!batch) {
|
|
97
|
+
batch = await ctx.db
|
|
98
|
+
.query("batches")
|
|
99
|
+
.withIndex("by_baseBatchId_status", (q) => q.eq("baseBatchId", batchId).eq("status", "accumulating"))
|
|
100
|
+
.first();
|
|
101
|
+
}
|
|
102
|
+
if (!batch) {
|
|
103
|
+
throw new Error(`Batch ${batchId} not found`);
|
|
104
|
+
}
|
|
105
|
+
if (batch.status !== "accumulating") {
|
|
106
|
+
throw new Error(`Batch ${batch.baseBatchId} is not in accumulating state (current: ${batch.status})`);
|
|
107
|
+
}
|
|
108
|
+
// Compute item count from batchItems
|
|
109
|
+
const batchItemDocs = await ctx.db
|
|
110
|
+
.query("batchItems")
|
|
111
|
+
.withIndex("by_batchDocId", (q) => q.eq("batchDocId", batch._id))
|
|
112
|
+
.collect();
|
|
113
|
+
const itemCount = batchItemDocs.reduce((sum, doc) => sum + doc.itemCount, 0);
|
|
114
|
+
if (itemCount === 0) {
|
|
115
|
+
return { batchId, itemCount: 0, flushed: false, reason: "Batch is empty" };
|
|
116
|
+
}
|
|
117
|
+
if (!batch.config.processBatchHandle) {
|
|
118
|
+
throw new Error(`Batch ${batchId} has no processBatchHandle configured`);
|
|
119
|
+
}
|
|
120
|
+
// Schedule maybeFlush to handle the transition (avoids OCC in user-facing mutation)
|
|
121
|
+
// Pass force: true to bypass threshold check for manual flushes
|
|
122
|
+
await ctx.scheduler.runAfter(0, internal.lib.maybeFlush, {
|
|
123
|
+
batchDocId: batch._id,
|
|
124
|
+
force: true,
|
|
125
|
+
});
|
|
126
|
+
return {
|
|
127
|
+
batchId,
|
|
128
|
+
itemCount,
|
|
129
|
+
flushed: true, // Will be flushed by maybeFlush
|
|
130
|
+
status: "flushing",
|
|
131
|
+
};
|
|
132
|
+
},
|
|
133
|
+
});
|
|
134
|
+
export const getBatchStatus = query({
|
|
135
|
+
args: { batchId: v.string() },
|
|
136
|
+
handler: async (ctx, { batchId }) => {
|
|
137
|
+
// Parse base batch ID (strip sequence if present)
|
|
138
|
+
const baseBatchId = batchId.includes("::")
|
|
139
|
+
? batchId.split("::")[0]
|
|
140
|
+
: batchId;
|
|
141
|
+
// Find all active (accumulating or flushing) batches for this base ID
|
|
142
|
+
const accumulatingBatches = await ctx.db
|
|
143
|
+
.query("batches")
|
|
144
|
+
.withIndex("by_baseBatchId_status", (q) => q.eq("baseBatchId", baseBatchId).eq("status", "accumulating"))
|
|
145
|
+
.collect();
|
|
146
|
+
const flushingBatches = await ctx.db
|
|
147
|
+
.query("batches")
|
|
148
|
+
.withIndex("by_baseBatchId_status", (q) => q.eq("baseBatchId", baseBatchId).eq("status", "flushing"))
|
|
149
|
+
.collect();
|
|
150
|
+
const activeBatches = [...flushingBatches, ...accumulatingBatches];
|
|
151
|
+
if (activeBatches.length === 0) {
|
|
152
|
+
return null;
|
|
153
|
+
}
|
|
154
|
+
// Use config from any batch (they should all have the same config)
|
|
155
|
+
const config = activeBatches[0].config;
|
|
156
|
+
// Compute itemCount and lastUpdatedAt from batchItems for each batch
|
|
157
|
+
const batchesWithCounts = await Promise.all(activeBatches.map(async (batch) => {
|
|
158
|
+
const batchItemDocs = await ctx.db
|
|
159
|
+
.query("batchItems")
|
|
160
|
+
.withIndex("by_batchDocId", (q) => q.eq("batchDocId", batch._id))
|
|
161
|
+
.collect();
|
|
162
|
+
const itemCount = batchItemDocs.reduce((sum, doc) => sum + doc.itemCount, 0);
|
|
163
|
+
// Compute lastUpdatedAt as max of batchItems.createdAt, or fall back to batch.lastUpdatedAt
|
|
164
|
+
const lastUpdatedAt = batchItemDocs.length > 0
|
|
165
|
+
? Math.max(...batchItemDocs.map((doc) => doc.createdAt))
|
|
166
|
+
: batch.lastUpdatedAt;
|
|
167
|
+
return {
|
|
168
|
+
status: batch.status,
|
|
169
|
+
itemCount,
|
|
170
|
+
createdAt: batch.createdAt,
|
|
171
|
+
lastUpdatedAt,
|
|
172
|
+
};
|
|
173
|
+
}));
|
|
174
|
+
return {
|
|
175
|
+
batchId: baseBatchId,
|
|
176
|
+
batches: batchesWithCounts,
|
|
177
|
+
config: {
|
|
178
|
+
maxBatchSize: config.maxBatchSize,
|
|
179
|
+
flushIntervalMs: config.flushIntervalMs,
|
|
180
|
+
},
|
|
181
|
+
};
|
|
182
|
+
},
|
|
183
|
+
});
|
|
184
|
+
export const getAllBatchesForBaseId = query({
|
|
185
|
+
args: { baseBatchId: v.string() },
|
|
186
|
+
handler: async (ctx, { baseBatchId }) => {
|
|
187
|
+
const batches = await ctx.db
|
|
188
|
+
.query("batches")
|
|
189
|
+
.withIndex("by_baseBatchId_status", (q) => q.eq("baseBatchId", baseBatchId))
|
|
190
|
+
.collect();
|
|
191
|
+
// Compute itemCount and lastUpdatedAt from batchItems for each batch
|
|
192
|
+
return Promise.all(batches.map(async (batch) => {
|
|
193
|
+
const batchItemDocs = await ctx.db
|
|
194
|
+
.query("batchItems")
|
|
195
|
+
.withIndex("by_batchDocId", (q) => q.eq("batchDocId", batch._id))
|
|
196
|
+
.collect();
|
|
197
|
+
const itemCount = batchItemDocs.reduce((sum, doc) => sum + doc.itemCount, 0);
|
|
198
|
+
// Compute lastUpdatedAt as max of batchItems.createdAt, or fall back to batch.lastUpdatedAt
|
|
199
|
+
const lastUpdatedAt = batchItemDocs.length > 0
|
|
200
|
+
? Math.max(...batchItemDocs.map((doc) => doc.createdAt))
|
|
201
|
+
: batch.lastUpdatedAt;
|
|
202
|
+
return {
|
|
203
|
+
batchId: batch.batchId,
|
|
204
|
+
baseBatchId: batch.baseBatchId,
|
|
205
|
+
sequence: batch.sequence,
|
|
206
|
+
itemCount,
|
|
207
|
+
status: batch.status,
|
|
208
|
+
createdAt: batch.createdAt,
|
|
209
|
+
lastUpdatedAt,
|
|
210
|
+
};
|
|
211
|
+
}));
|
|
212
|
+
},
|
|
213
|
+
});
|
|
214
|
+
export const getFlushHistory = query({
|
|
215
|
+
args: {
|
|
216
|
+
batchId: v.string(),
|
|
217
|
+
limit: v.optional(v.number()),
|
|
218
|
+
},
|
|
219
|
+
handler: async (ctx, { batchId, limit }) => {
|
|
220
|
+
let query = ctx.db
|
|
221
|
+
.query("flushHistory")
|
|
222
|
+
.withIndex("by_batchId", (q) => q.eq("batchId", batchId))
|
|
223
|
+
.order("desc");
|
|
224
|
+
if (limit) {
|
|
225
|
+
return await query.take(limit);
|
|
226
|
+
}
|
|
227
|
+
return await query.collect();
|
|
228
|
+
},
|
|
229
|
+
});
|
|
230
|
+
export const deleteBatch = mutation({
|
|
231
|
+
args: { batchId: v.string() },
|
|
232
|
+
handler: async (ctx, { batchId }) => {
|
|
233
|
+
const batch = await ctx.db
|
|
234
|
+
.query("batches")
|
|
235
|
+
.withIndex("by_batchId", (q) => q.eq("batchId", batchId))
|
|
236
|
+
.first();
|
|
237
|
+
if (!batch) {
|
|
238
|
+
return { deleted: false, reason: "Batch not found" };
|
|
239
|
+
}
|
|
240
|
+
if (batch.status === "flushing") {
|
|
241
|
+
return { deleted: false, reason: "Cannot delete batch while flushing" };
|
|
242
|
+
}
|
|
243
|
+
// Compute item count from batchItems
|
|
244
|
+
const batchItems = await ctx.db
|
|
245
|
+
.query("batchItems")
|
|
246
|
+
.withIndex("by_batchDocId", (q) => q.eq("batchDocId", batch._id))
|
|
247
|
+
.collect();
|
|
248
|
+
const itemCount = batchItems.reduce((sum, doc) => sum + doc.itemCount, 0);
|
|
249
|
+
if (batch.status === "accumulating" && itemCount > 0) {
|
|
250
|
+
return { deleted: false, reason: "Cannot delete batch with pending items" };
|
|
251
|
+
}
|
|
252
|
+
if (batch.scheduledFlushId) {
|
|
253
|
+
await ctx.scheduler.cancel(batch.scheduledFlushId);
|
|
254
|
+
}
|
|
255
|
+
// Delete associated batchItems
|
|
256
|
+
for (const item of batchItems) {
|
|
257
|
+
await ctx.db.delete(item._id);
|
|
258
|
+
}
|
|
259
|
+
await ctx.db.delete(batch._id);
|
|
260
|
+
return { deleted: true };
|
|
261
|
+
},
|
|
262
|
+
});
|
|
263
|
+
// ============================================================================
|
|
264
|
+
// Batch Accumulator - Internal Functions
|
|
265
|
+
// ============================================================================
|
|
266
|
+
export const getBatch = internalQuery({
|
|
267
|
+
args: { batchId: v.string() },
|
|
268
|
+
handler: async (ctx, { batchId }) => {
|
|
269
|
+
return await ctx.db
|
|
270
|
+
.query("batches")
|
|
271
|
+
.withIndex("by_batchId", (q) => q.eq("batchId", batchId))
|
|
272
|
+
.first();
|
|
273
|
+
},
|
|
274
|
+
});
|
|
275
|
+
export const collectBatchItems = internalQuery({
|
|
276
|
+
args: { batchDocId: v.id("batches") },
|
|
277
|
+
handler: async (ctx, { batchDocId }) => {
|
|
278
|
+
const batch = await ctx.db.get(batchDocId);
|
|
279
|
+
if (!batch) {
|
|
280
|
+
return { items: [], flushStartedAt: undefined };
|
|
281
|
+
}
|
|
282
|
+
const flushStartedAt = batch.flushStartedAt ?? Date.now();
|
|
283
|
+
// Get all batchItems created before flushStartedAt
|
|
284
|
+
const batchItemDocs = await ctx.db
|
|
285
|
+
.query("batchItems")
|
|
286
|
+
.withIndex("by_batchDocId_createdAt", (q) => q.eq("batchDocId", batchDocId).lt("createdAt", flushStartedAt + 1))
|
|
287
|
+
.collect();
|
|
288
|
+
// Flatten all items from the batchItem documents
|
|
289
|
+
const items = [];
|
|
290
|
+
for (const doc of batchItemDocs) {
|
|
291
|
+
items.push(...doc.items);
|
|
292
|
+
}
|
|
293
|
+
return { items, flushStartedAt };
|
|
294
|
+
},
|
|
295
|
+
});
|
|
296
|
+
/**
|
|
297
|
+
* maybeFlush - Attempts to transition a batch from "accumulating" to "flushing" state.
|
|
298
|
+
*
|
|
299
|
+
* This is an internal action scheduled by addItems (when threshold is reached) or
|
|
300
|
+
* flushBatch (for manual flushes). It serves as a lightweight coordinator that
|
|
301
|
+
* delegates the actual state transition to doFlushTransition (a mutation).
|
|
302
|
+
*
|
|
303
|
+
* ## Why this architecture?
|
|
304
|
+
*
|
|
305
|
+
* 1. **OCC is handled automatically by Convex**: When doFlushTransition (a mutation)
|
|
306
|
+
* encounters an OCC conflict, Convex automatically retries it. We don't need
|
|
307
|
+
* external retry logic like ActionRetrier for database operations.
|
|
308
|
+
*
|
|
309
|
+
* 2. **Race conditions are handled gracefully**: Multiple maybeFlush calls can be
|
|
310
|
+
* scheduled concurrently (e.g., rapid addItems calls all hitting threshold).
|
|
311
|
+
* The first one to execute wins the race and transitions the batch to "flushing".
|
|
312
|
+
* Subsequent calls see status !== "accumulating" and return early with
|
|
313
|
+
* reason: "not_accumulating". This is expected behavior, not an error.
|
|
314
|
+
*
|
|
315
|
+
* 3. **Mutations can't call actions directly**: Convex mutations are deterministic
|
|
316
|
+
* and can't have side effects. To execute the user's processBatchHandle (an action),
|
|
317
|
+
* we need this action layer. The flow is:
|
|
318
|
+
* mutation (addItems) → schedules action (maybeFlush)
|
|
319
|
+
* action (maybeFlush) → calls mutation (doFlushTransition)
|
|
320
|
+
* mutation (doFlushTransition) → schedules action (executeFlush)
|
|
321
|
+
*
|
|
322
|
+
* 4. **Non-blocking for callers**: addItems returns immediately after scheduling
|
|
323
|
+
* maybeFlush. Users don't wait for the flush to complete.
|
|
324
|
+
*
|
|
325
|
+
* ## Failure scenarios
|
|
326
|
+
*
|
|
327
|
+
* - If maybeFlush fails completely (rare), the batch stays in "accumulating" state.
|
|
328
|
+
* The next addItems call that hits threshold will schedule another maybeFlush.
|
|
329
|
+
* - If a scheduled interval flush exists, it will also attempt the transition.
|
|
330
|
+
*
|
|
331
|
+
* @param batchDocId - The batch document ID to potentially flush
|
|
332
|
+
* @param force - If true, flush regardless of threshold (used by manual flush and interval timer)
|
|
333
|
+
*/
|
|
334
|
+
export const maybeFlush = internalAction({
|
|
335
|
+
args: {
|
|
336
|
+
batchDocId: v.id("batches"),
|
|
337
|
+
force: v.optional(v.boolean()),
|
|
338
|
+
},
|
|
339
|
+
handler: async (ctx, { batchDocId, force }) => {
|
|
340
|
+
// Call the mutation directly - Convex handles OCC retries automatically.
|
|
341
|
+
// If another maybeFlush already transitioned this batch, the mutation
|
|
342
|
+
// returns { flushed: false, reason: "not_accumulating" } which is fine.
|
|
343
|
+
await ctx.runMutation(internal.lib.doFlushTransition, {
|
|
344
|
+
batchDocId,
|
|
345
|
+
force: force ?? false,
|
|
346
|
+
});
|
|
347
|
+
},
|
|
348
|
+
});
|
|
349
|
+
/**
|
|
350
|
+
* doFlushTransition - The actual state machine transition from "accumulating" to "flushing".
|
|
351
|
+
*
|
|
352
|
+
* This mutation is the source of truth for batch state transitions. It's designed to be
|
|
353
|
+
* idempotent and race-condition safe:
|
|
354
|
+
*
|
|
355
|
+
* - Returns early if batch is already flushing/completed (another caller won the race)
|
|
356
|
+
* - Returns early if batch is empty or below threshold (unless force=true)
|
|
357
|
+
* - On success, atomically updates status and schedules executeFlush
|
|
358
|
+
*
|
|
359
|
+
* OCC (Optimistic Concurrency Control) note:
|
|
360
|
+
* If two doFlushTransition calls race, Convex detects the conflict when both try to
|
|
361
|
+
* patch the same batch document. One succeeds, the other is auto-retried by Convex.
|
|
362
|
+
* On retry, it sees status="flushing" and returns { flushed: false, reason: "not_accumulating" }.
|
|
363
|
+
*/
|
|
364
|
+
export const doFlushTransition = internalMutation({
|
|
365
|
+
args: {
|
|
366
|
+
batchDocId: v.id("batches"),
|
|
367
|
+
force: v.optional(v.boolean()),
|
|
368
|
+
},
|
|
369
|
+
handler: async (ctx, { batchDocId, force }) => {
|
|
370
|
+
const batch = await ctx.db.get(batchDocId);
|
|
371
|
+
// Already flushing or completed? Nothing to do. This handles the race condition
|
|
372
|
+
// where multiple maybeFlush calls are scheduled - the first one wins.
|
|
373
|
+
if (!batch || batch.status !== "accumulating") {
|
|
374
|
+
return { flushed: false, reason: "not_accumulating" };
|
|
375
|
+
}
|
|
376
|
+
// Check actual count from batchItems
|
|
377
|
+
const batchItemDocs = await ctx.db
|
|
378
|
+
.query("batchItems")
|
|
379
|
+
.withIndex("by_batchDocId", (q) => q.eq("batchDocId", batchDocId))
|
|
380
|
+
.collect();
|
|
381
|
+
const totalCount = batchItemDocs.reduce((sum, doc) => sum + doc.itemCount, 0);
|
|
382
|
+
// Empty batch? Nothing to flush.
|
|
383
|
+
if (totalCount === 0) {
|
|
384
|
+
return { flushed: false, reason: "empty" };
|
|
385
|
+
}
|
|
386
|
+
// Not at threshold? Skip only if not forced (interval flush uses force=true).
|
|
387
|
+
if (!force && totalCount < batch.config.maxBatchSize) {
|
|
388
|
+
return { flushed: false, reason: "below_threshold" };
|
|
389
|
+
}
|
|
390
|
+
// Cancel scheduled timer if exists
|
|
391
|
+
if (batch.scheduledFlushId) {
|
|
392
|
+
await ctx.scheduler.cancel(batch.scheduledFlushId);
|
|
393
|
+
}
|
|
394
|
+
// Transition to flushing
|
|
395
|
+
const now = Date.now();
|
|
396
|
+
await ctx.db.patch(batchDocId, {
|
|
397
|
+
status: "flushing",
|
|
398
|
+
flushStartedAt: now,
|
|
399
|
+
lastUpdatedAt: now,
|
|
400
|
+
scheduledFlushId: undefined,
|
|
401
|
+
});
|
|
402
|
+
// Schedule the actual flush
|
|
403
|
+
await ctx.scheduler.runAfter(0, internal.lib.executeFlush, {
|
|
404
|
+
batchDocId,
|
|
405
|
+
processBatchHandle: batch.config.processBatchHandle,
|
|
406
|
+
});
|
|
407
|
+
return { flushed: true, itemCount: totalCount };
|
|
408
|
+
},
|
|
409
|
+
});
|
|
410
|
+
export const executeFlush = internalAction({
|
|
411
|
+
args: {
|
|
412
|
+
batchDocId: v.id("batches"),
|
|
413
|
+
processBatchHandle: v.string(),
|
|
414
|
+
},
|
|
415
|
+
handler: async (ctx, { batchDocId, processBatchHandle }) => {
|
|
416
|
+
const startTime = Date.now();
|
|
417
|
+
let success = true;
|
|
418
|
+
let errorMessage;
|
|
419
|
+
// Collect items from batchItems table
|
|
420
|
+
const { items, flushStartedAt } = await ctx.runQuery(internal.lib.collectBatchItems, {
|
|
421
|
+
batchDocId,
|
|
422
|
+
});
|
|
423
|
+
if (items.length === 0) {
|
|
424
|
+
await ctx.runMutation(internal.lib.recordFlushResult, {
|
|
425
|
+
batchDocId,
|
|
426
|
+
itemCount: 0,
|
|
427
|
+
durationMs: 0,
|
|
428
|
+
success: true,
|
|
429
|
+
flushStartedAt,
|
|
430
|
+
});
|
|
431
|
+
return { success: true, durationMs: 0 };
|
|
432
|
+
}
|
|
433
|
+
try {
|
|
434
|
+
const handle = processBatchHandle;
|
|
435
|
+
await ctx.runAction(handle, { items });
|
|
436
|
+
}
|
|
437
|
+
catch (error) {
|
|
438
|
+
success = false;
|
|
439
|
+
errorMessage = error instanceof Error ? error.message : String(error);
|
|
440
|
+
}
|
|
441
|
+
const durationMs = Date.now() - startTime;
|
|
442
|
+
await ctx.runMutation(internal.lib.recordFlushResult, {
|
|
443
|
+
batchDocId,
|
|
444
|
+
itemCount: items.length,
|
|
445
|
+
durationMs,
|
|
446
|
+
success,
|
|
447
|
+
errorMessage,
|
|
448
|
+
flushStartedAt,
|
|
449
|
+
});
|
|
450
|
+
return { success, errorMessage, durationMs };
|
|
451
|
+
},
|
|
452
|
+
});
|
|
453
|
+
export const recordFlushResult = internalMutation({
|
|
454
|
+
args: {
|
|
455
|
+
batchDocId: v.id("batches"),
|
|
456
|
+
itemCount: v.number(),
|
|
457
|
+
durationMs: v.number(),
|
|
458
|
+
success: v.boolean(),
|
|
459
|
+
errorMessage: v.optional(v.string()),
|
|
460
|
+
flushStartedAt: v.optional(v.number()),
|
|
461
|
+
},
|
|
462
|
+
handler: async (ctx, { batchDocId, itemCount, durationMs, success, errorMessage, flushStartedAt }) => {
|
|
463
|
+
const batch = await ctx.db.get(batchDocId);
|
|
464
|
+
if (!batch)
|
|
465
|
+
return;
|
|
466
|
+
await ctx.db.insert("flushHistory", {
|
|
467
|
+
batchId: batch.baseBatchId, // Store client's original ID, not internal sequence
|
|
468
|
+
itemCount,
|
|
469
|
+
flushedAt: Date.now(),
|
|
470
|
+
durationMs,
|
|
471
|
+
success,
|
|
472
|
+
errorMessage,
|
|
473
|
+
});
|
|
474
|
+
if (success) {
|
|
475
|
+
// Delete all batchItems that were included in this flush (created before flushStartedAt)
|
|
476
|
+
const cutoffTime = flushStartedAt ?? batch.flushStartedAt ?? Date.now();
|
|
477
|
+
const batchItemsToDelete = await ctx.db
|
|
478
|
+
.query("batchItems")
|
|
479
|
+
.withIndex("by_batchDocId_createdAt", (q) => q.eq("batchDocId", batchDocId).lt("createdAt", cutoffTime + 1))
|
|
480
|
+
.collect();
|
|
481
|
+
for (const item of batchItemsToDelete) {
|
|
482
|
+
await ctx.db.delete(item._id);
|
|
483
|
+
}
|
|
484
|
+
// Check for stranded items (added after flushStartedAt)
|
|
485
|
+
const remainingItems = await ctx.db
|
|
486
|
+
.query("batchItems")
|
|
487
|
+
.withIndex("by_batchDocId", (q) => q.eq("batchDocId", batchDocId))
|
|
488
|
+
.collect();
|
|
489
|
+
const remainingCount = remainingItems.reduce((sum, item) => sum + item.itemCount, 0);
|
|
490
|
+
if (remainingCount > 0) {
|
|
491
|
+
// Don't complete - keep accumulating for stranded items
|
|
492
|
+
await ctx.db.patch(batchDocId, {
|
|
493
|
+
status: "accumulating",
|
|
494
|
+
flushStartedAt: undefined,
|
|
495
|
+
lastUpdatedAt: Date.now(),
|
|
496
|
+
});
|
|
497
|
+
// Schedule another maybeFlush if at threshold
|
|
498
|
+
if (remainingCount >= batch.config.maxBatchSize) {
|
|
499
|
+
await ctx.scheduler.runAfter(0, internal.lib.maybeFlush, { batchDocId });
|
|
500
|
+
}
|
|
501
|
+
else if (batch.config.flushIntervalMs > 0) {
|
|
502
|
+
// Re-schedule interval timer
|
|
503
|
+
const scheduledFlushId = await ctx.scheduler.runAfter(batch.config.flushIntervalMs, internal.lib.maybeFlush, { batchDocId, force: true });
|
|
504
|
+
await ctx.db.patch(batchDocId, { scheduledFlushId });
|
|
505
|
+
}
|
|
506
|
+
}
|
|
507
|
+
else {
|
|
508
|
+
// No stranded items - mark completed
|
|
509
|
+
await ctx.db.patch(batchDocId, {
|
|
510
|
+
status: "completed",
|
|
511
|
+
flushStartedAt: undefined,
|
|
512
|
+
lastUpdatedAt: Date.now(),
|
|
513
|
+
});
|
|
514
|
+
// Clean up old completed batches for the same base ID
|
|
515
|
+
// Keep only the most recent completed batch to reduce clutter
|
|
516
|
+
const completedBatches = await ctx.db
|
|
517
|
+
.query("batches")
|
|
518
|
+
.withIndex("by_baseBatchId_status", (q) => q.eq("baseBatchId", batch.baseBatchId).eq("status", "completed"))
|
|
519
|
+
.collect();
|
|
520
|
+
// Sort by sequence number descending and delete all but the most recent
|
|
521
|
+
const sortedCompleted = completedBatches.sort((a, b) => b.sequence - a.sequence);
|
|
522
|
+
for (let i = 1; i < sortedCompleted.length; i++) {
|
|
523
|
+
// Also delete batchItems for old completed batches
|
|
524
|
+
const oldBatchItems = await ctx.db
|
|
525
|
+
.query("batchItems")
|
|
526
|
+
.withIndex("by_batchDocId", (q) => q.eq("batchDocId", sortedCompleted[i]._id))
|
|
527
|
+
.collect();
|
|
528
|
+
for (const item of oldBatchItems) {
|
|
529
|
+
await ctx.db.delete(item._id);
|
|
530
|
+
}
|
|
531
|
+
await ctx.db.delete(sortedCompleted[i]._id);
|
|
532
|
+
}
|
|
533
|
+
}
|
|
534
|
+
}
|
|
535
|
+
else {
|
|
536
|
+
// Failure case - revert to accumulating
|
|
537
|
+
let scheduledFlushId = undefined;
|
|
538
|
+
if (batch.config.flushIntervalMs > 0 && batch.config.processBatchHandle) {
|
|
539
|
+
scheduledFlushId = await ctx.scheduler.runAfter(batch.config.flushIntervalMs, internal.lib.maybeFlush, { batchDocId, force: true });
|
|
540
|
+
}
|
|
541
|
+
await ctx.db.patch(batchDocId, {
|
|
542
|
+
status: "accumulating",
|
|
543
|
+
flushStartedAt: undefined,
|
|
544
|
+
scheduledFlushId,
|
|
545
|
+
});
|
|
546
|
+
}
|
|
547
|
+
},
|
|
548
|
+
});
|
|
549
|
+
/**
|
|
550
|
+
* scheduledIntervalFlush - Timer-triggered flush that runs after flushIntervalMs.
|
|
551
|
+
*
|
|
552
|
+
* Scheduled once when a batch is created (if flushIntervalMs > 0). Uses force=true
|
|
553
|
+
* to flush regardless of whether the batch has reached maxBatchSize threshold.
|
|
554
|
+
* This ensures batches don't sit indefinitely waiting for more items.
|
|
555
|
+
*/
|
|
556
|
+
export const scheduledIntervalFlush = internalAction({
|
|
557
|
+
args: { batchDocId: v.id("batches") },
|
|
558
|
+
handler: async (ctx, { batchDocId }) => {
|
|
559
|
+
// Use force=true to flush regardless of threshold
|
|
560
|
+
await ctx.runMutation(internal.lib.doFlushTransition, {
|
|
561
|
+
batchDocId,
|
|
562
|
+
force: true,
|
|
563
|
+
});
|
|
564
|
+
},
|
|
565
|
+
});
|
|
566
|
+
// ============================================================================
|
|
567
|
+
// Table Iterator - Public API
|
|
568
|
+
// ============================================================================
|
|
569
|
+
export const startIteratorJob = mutation({
|
|
570
|
+
args: {
|
|
571
|
+
jobId: v.string(),
|
|
572
|
+
config: v.object({
|
|
573
|
+
batchSize: v.number(),
|
|
574
|
+
delayBetweenBatchesMs: v.optional(v.number()),
|
|
575
|
+
getNextBatchHandle: v.string(),
|
|
576
|
+
processBatchHandle: v.string(),
|
|
577
|
+
onCompleteHandle: v.optional(v.string()),
|
|
578
|
+
maxRetries: v.optional(v.number()),
|
|
579
|
+
}),
|
|
580
|
+
},
|
|
581
|
+
handler: async (ctx, { jobId, config }) => {
|
|
582
|
+
const existingJob = await ctx.db
|
|
583
|
+
.query("iteratorJobs")
|
|
584
|
+
.withIndex("by_jobId", (q) => q.eq("jobId", jobId))
|
|
585
|
+
.first();
|
|
586
|
+
if (existingJob) {
|
|
587
|
+
throw new Error(`Job ${jobId} already exists`);
|
|
588
|
+
}
|
|
589
|
+
const now = Date.now();
|
|
590
|
+
const jobDocId = await ctx.db.insert("iteratorJobs", {
|
|
591
|
+
jobId,
|
|
592
|
+
cursor: undefined,
|
|
593
|
+
processedCount: 0,
|
|
594
|
+
status: "running",
|
|
595
|
+
config: {
|
|
596
|
+
batchSize: config.batchSize,
|
|
597
|
+
delayBetweenBatchesMs: config.delayBetweenBatchesMs ?? 100,
|
|
598
|
+
getNextBatchHandle: config.getNextBatchHandle,
|
|
599
|
+
processBatchHandle: config.processBatchHandle,
|
|
600
|
+
onCompleteHandle: config.onCompleteHandle,
|
|
601
|
+
maxRetries: config.maxRetries,
|
|
602
|
+
},
|
|
603
|
+
retryCount: 0,
|
|
604
|
+
createdAt: now,
|
|
605
|
+
lastRunAt: now,
|
|
606
|
+
});
|
|
607
|
+
await ctx.scheduler.runAfter(0, internal.lib.processNextBatch, { jobDocId });
|
|
608
|
+
return { jobId, status: "running" };
|
|
609
|
+
},
|
|
610
|
+
});
|
|
611
|
+
export const pauseIteratorJob = mutation({
|
|
612
|
+
args: { jobId: v.string() },
|
|
613
|
+
handler: async (ctx, { jobId }) => {
|
|
614
|
+
const job = await ctx.db
|
|
615
|
+
.query("iteratorJobs")
|
|
616
|
+
.withIndex("by_jobId", (q) => q.eq("jobId", jobId))
|
|
617
|
+
.first();
|
|
618
|
+
if (!job) {
|
|
619
|
+
throw new Error(`Job ${jobId} not found`);
|
|
620
|
+
}
|
|
621
|
+
if (job.status !== "running") {
|
|
622
|
+
throw new Error(`Job ${jobId} is not running (current: ${job.status})`);
|
|
623
|
+
}
|
|
624
|
+
await ctx.db.patch(job._id, {
|
|
625
|
+
status: "paused",
|
|
626
|
+
});
|
|
627
|
+
return { jobId, status: "paused" };
|
|
628
|
+
},
|
|
629
|
+
});
|
|
630
|
+
export const resumeIteratorJob = mutation({
|
|
631
|
+
args: { jobId: v.string() },
|
|
632
|
+
handler: async (ctx, { jobId }) => {
|
|
633
|
+
const job = await ctx.db
|
|
634
|
+
.query("iteratorJobs")
|
|
635
|
+
.withIndex("by_jobId", (q) => q.eq("jobId", jobId))
|
|
636
|
+
.first();
|
|
637
|
+
if (!job) {
|
|
638
|
+
throw new Error(`Job ${jobId} not found`);
|
|
639
|
+
}
|
|
640
|
+
if (job.status !== "paused") {
|
|
641
|
+
throw new Error(`Job ${jobId} is not paused (current: ${job.status})`);
|
|
642
|
+
}
|
|
643
|
+
await ctx.db.patch(job._id, {
|
|
644
|
+
status: "running",
|
|
645
|
+
retryCount: 0,
|
|
646
|
+
});
|
|
647
|
+
await ctx.scheduler.runAfter(0, internal.lib.processNextBatch, { jobDocId: job._id });
|
|
648
|
+
return { jobId, status: "running" };
|
|
649
|
+
},
|
|
650
|
+
});
|
|
651
|
+
export const cancelIteratorJob = mutation({
|
|
652
|
+
args: { jobId: v.string() },
|
|
653
|
+
handler: async (ctx, { jobId }) => {
|
|
654
|
+
const job = await ctx.db
|
|
655
|
+
.query("iteratorJobs")
|
|
656
|
+
.withIndex("by_jobId", (q) => q.eq("jobId", jobId))
|
|
657
|
+
.first();
|
|
658
|
+
if (!job) {
|
|
659
|
+
throw new Error(`Job ${jobId} not found`);
|
|
660
|
+
}
|
|
661
|
+
if (job.status === "completed" || job.status === "failed") {
|
|
662
|
+
return { jobId, status: job.status, reason: "Job already finished" };
|
|
663
|
+
}
|
|
664
|
+
await ctx.db.patch(job._id, {
|
|
665
|
+
status: "failed",
|
|
666
|
+
errorMessage: "Cancelled by user",
|
|
667
|
+
});
|
|
668
|
+
return { jobId, status: "failed" };
|
|
669
|
+
},
|
|
670
|
+
});
|
|
671
|
+
export const getIteratorJobStatus = query({
|
|
672
|
+
args: { jobId: v.string() },
|
|
673
|
+
handler: async (ctx, { jobId }) => {
|
|
674
|
+
const job = await ctx.db
|
|
675
|
+
.query("iteratorJobs")
|
|
676
|
+
.withIndex("by_jobId", (q) => q.eq("jobId", jobId))
|
|
677
|
+
.first();
|
|
678
|
+
if (!job) {
|
|
679
|
+
return null;
|
|
680
|
+
}
|
|
681
|
+
return {
|
|
682
|
+
jobId: job.jobId,
|
|
683
|
+
status: job.status,
|
|
684
|
+
processedCount: job.processedCount,
|
|
685
|
+
cursor: job.cursor,
|
|
686
|
+
retryCount: job.retryCount,
|
|
687
|
+
errorMessage: job.errorMessage,
|
|
688
|
+
createdAt: job.createdAt,
|
|
689
|
+
lastRunAt: job.lastRunAt,
|
|
690
|
+
config: {
|
|
691
|
+
batchSize: job.config.batchSize,
|
|
692
|
+
delayBetweenBatchesMs: job.config.delayBetweenBatchesMs,
|
|
693
|
+
},
|
|
694
|
+
};
|
|
695
|
+
},
|
|
696
|
+
});
|
|
697
|
+
export const listIteratorJobs = query({
|
|
698
|
+
args: {
|
|
699
|
+
status: v.optional(v.union(v.literal("pending"), v.literal("running"), v.literal("paused"), v.literal("completed"), v.literal("failed"))),
|
|
700
|
+
limit: v.optional(v.number()),
|
|
701
|
+
},
|
|
702
|
+
handler: async (ctx, { status, limit }) => {
|
|
703
|
+
let queryBuilder;
|
|
704
|
+
if (status) {
|
|
705
|
+
queryBuilder = ctx.db
|
|
706
|
+
.query("iteratorJobs")
|
|
707
|
+
.withIndex("by_status", (q) => q.eq("status", status));
|
|
708
|
+
}
|
|
709
|
+
else {
|
|
710
|
+
queryBuilder = ctx.db.query("iteratorJobs");
|
|
711
|
+
}
|
|
712
|
+
const jobs = limit ? await queryBuilder.take(limit) : await queryBuilder.collect();
|
|
713
|
+
return jobs.map((job) => ({
|
|
714
|
+
jobId: job.jobId,
|
|
715
|
+
status: job.status,
|
|
716
|
+
processedCount: job.processedCount,
|
|
717
|
+
createdAt: job.createdAt,
|
|
718
|
+
lastRunAt: job.lastRunAt,
|
|
719
|
+
errorMessage: job.errorMessage,
|
|
720
|
+
}));
|
|
721
|
+
},
|
|
722
|
+
});
|
|
723
|
+
export const deleteIteratorJob = mutation({
|
|
724
|
+
args: { jobId: v.string() },
|
|
725
|
+
handler: async (ctx, { jobId }) => {
|
|
726
|
+
const job = await ctx.db
|
|
727
|
+
.query("iteratorJobs")
|
|
728
|
+
.withIndex("by_jobId", (q) => q.eq("jobId", jobId))
|
|
729
|
+
.first();
|
|
730
|
+
if (!job) {
|
|
731
|
+
return { deleted: false, reason: "Job not found" };
|
|
732
|
+
}
|
|
733
|
+
if (job.status === "running" || job.status === "paused") {
|
|
734
|
+
return { deleted: false, reason: "Cannot delete active job" };
|
|
735
|
+
}
|
|
736
|
+
await ctx.db.delete(job._id);
|
|
737
|
+
return { deleted: true };
|
|
738
|
+
},
|
|
739
|
+
});
|
|
740
|
+
// ============================================================================
|
|
741
|
+
// Table Iterator - Internal Functions
|
|
742
|
+
// ============================================================================
|
|
743
|
+
export const getIteratorJob = internalQuery({
|
|
744
|
+
args: { jobId: v.string() },
|
|
745
|
+
handler: async (ctx, { jobId }) => {
|
|
746
|
+
return await ctx.db
|
|
747
|
+
.query("iteratorJobs")
|
|
748
|
+
.withIndex("by_jobId", (q) => q.eq("jobId", jobId))
|
|
749
|
+
.first();
|
|
750
|
+
},
|
|
751
|
+
});
|
|
752
|
+
export const getIteratorJobById = internalQuery({
|
|
753
|
+
args: { jobDocId: v.id("iteratorJobs") },
|
|
754
|
+
handler: async (ctx, { jobDocId }) => {
|
|
755
|
+
return await ctx.db.get(jobDocId);
|
|
756
|
+
},
|
|
757
|
+
});
|
|
758
|
+
export const processNextBatch = internalAction({
|
|
759
|
+
args: { jobDocId: v.id("iteratorJobs") },
|
|
760
|
+
handler: async (ctx, { jobDocId }) => {
|
|
761
|
+
const job = await ctx.runQuery(internal.lib.getIteratorJobById, { jobDocId });
|
|
762
|
+
if (!job || job.status !== "running") {
|
|
763
|
+
return { processed: false, reason: "Job not found or not running" };
|
|
764
|
+
}
|
|
765
|
+
const maxRetries = job.config.maxRetries ?? 5;
|
|
766
|
+
try {
|
|
767
|
+
const getNextBatchHandle = job.config.getNextBatchHandle;
|
|
768
|
+
const batchResult = await ctx.runQuery(getNextBatchHandle, {
|
|
769
|
+
cursor: job.cursor ?? undefined,
|
|
770
|
+
batchSize: job.config.batchSize,
|
|
771
|
+
});
|
|
772
|
+
const { items, cursor: nextCursor, done } = batchResult;
|
|
773
|
+
if (items.length > 0) {
|
|
774
|
+
const processBatchHandle = job.config.processBatchHandle;
|
|
775
|
+
await ctx.runAction(processBatchHandle, { items });
|
|
776
|
+
}
|
|
777
|
+
const newProcessedCount = job.processedCount + items.length;
|
|
778
|
+
if (done) {
|
|
779
|
+
await ctx.runMutation(internal.lib.markJobCompleted, {
|
|
780
|
+
jobDocId,
|
|
781
|
+
processedCount: newProcessedCount,
|
|
782
|
+
});
|
|
783
|
+
if (job.config.onCompleteHandle) {
|
|
784
|
+
const onCompleteHandle = job.config.onCompleteHandle;
|
|
785
|
+
await ctx.runMutation(onCompleteHandle, {
|
|
786
|
+
jobId: job.jobId,
|
|
787
|
+
processedCount: newProcessedCount,
|
|
788
|
+
});
|
|
789
|
+
}
|
|
790
|
+
return { processed: true, done: true, processedCount: newProcessedCount };
|
|
791
|
+
}
|
|
792
|
+
await ctx.runMutation(internal.lib.updateJobProgress, {
|
|
793
|
+
jobDocId,
|
|
794
|
+
cursor: nextCursor,
|
|
795
|
+
processedCount: newProcessedCount,
|
|
796
|
+
});
|
|
797
|
+
await ctx.scheduler.runAfter(job.config.delayBetweenBatchesMs, internal.lib.processNextBatch, { jobDocId });
|
|
798
|
+
return { processed: true, done: false, processedCount: newProcessedCount };
|
|
799
|
+
}
|
|
800
|
+
catch (error) {
|
|
801
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
802
|
+
const newRetryCount = job.retryCount + 1;
|
|
803
|
+
if (newRetryCount >= maxRetries) {
|
|
804
|
+
await ctx.runMutation(internal.lib.markJobFailed, {
|
|
805
|
+
jobDocId,
|
|
806
|
+
errorMessage,
|
|
807
|
+
retryCount: newRetryCount,
|
|
808
|
+
});
|
|
809
|
+
return { processed: false, reason: "Max retries exceeded", error: errorMessage };
|
|
810
|
+
}
|
|
811
|
+
const backoffMs = Math.min(1000 * Math.pow(2, newRetryCount), 30000);
|
|
812
|
+
await ctx.runMutation(internal.lib.incrementRetryCount, {
|
|
813
|
+
jobDocId,
|
|
814
|
+
retryCount: newRetryCount,
|
|
815
|
+
errorMessage,
|
|
816
|
+
});
|
|
817
|
+
await ctx.scheduler.runAfter(backoffMs, internal.lib.processNextBatch, { jobDocId });
|
|
818
|
+
return { processed: false, reason: "Retrying", error: errorMessage, retryCount: newRetryCount };
|
|
819
|
+
}
|
|
820
|
+
},
|
|
821
|
+
});
|
|
822
|
+
export const updateJobProgress = internalMutation({
|
|
823
|
+
args: {
|
|
824
|
+
jobDocId: v.id("iteratorJobs"),
|
|
825
|
+
cursor: v.optional(v.string()),
|
|
826
|
+
processedCount: v.number(),
|
|
827
|
+
},
|
|
828
|
+
handler: async (ctx, { jobDocId, cursor, processedCount }) => {
|
|
829
|
+
await ctx.db.patch(jobDocId, {
|
|
830
|
+
cursor,
|
|
831
|
+
processedCount,
|
|
832
|
+
lastRunAt: Date.now(),
|
|
833
|
+
retryCount: 0,
|
|
834
|
+
});
|
|
835
|
+
},
|
|
836
|
+
});
|
|
837
|
+
export const markJobCompleted = internalMutation({
|
|
838
|
+
args: {
|
|
839
|
+
jobDocId: v.id("iteratorJobs"),
|
|
840
|
+
processedCount: v.number(),
|
|
841
|
+
},
|
|
842
|
+
handler: async (ctx, { jobDocId, processedCount }) => {
|
|
843
|
+
await ctx.db.patch(jobDocId, {
|
|
844
|
+
status: "completed",
|
|
845
|
+
processedCount,
|
|
846
|
+
lastRunAt: Date.now(),
|
|
847
|
+
});
|
|
848
|
+
},
|
|
849
|
+
});
|
|
850
|
+
export const markJobFailed = internalMutation({
|
|
851
|
+
args: {
|
|
852
|
+
jobDocId: v.id("iteratorJobs"),
|
|
853
|
+
errorMessage: v.string(),
|
|
854
|
+
retryCount: v.number(),
|
|
855
|
+
},
|
|
856
|
+
handler: async (ctx, { jobDocId, errorMessage, retryCount }) => {
|
|
857
|
+
await ctx.db.patch(jobDocId, {
|
|
858
|
+
status: "failed",
|
|
859
|
+
errorMessage,
|
|
860
|
+
retryCount,
|
|
861
|
+
lastRunAt: Date.now(),
|
|
862
|
+
});
|
|
863
|
+
},
|
|
864
|
+
});
|
|
865
|
+
export const incrementRetryCount = internalMutation({
|
|
866
|
+
args: {
|
|
867
|
+
jobDocId: v.id("iteratorJobs"),
|
|
868
|
+
retryCount: v.number(),
|
|
869
|
+
errorMessage: v.string(),
|
|
870
|
+
},
|
|
871
|
+
handler: async (ctx, { jobDocId, retryCount, errorMessage }) => {
|
|
872
|
+
await ctx.db.patch(jobDocId, {
|
|
873
|
+
retryCount,
|
|
874
|
+
errorMessage,
|
|
875
|
+
lastRunAt: Date.now(),
|
|
876
|
+
});
|
|
877
|
+
},
|
|
878
|
+
});
|
|
879
|
+
//# sourceMappingURL=lib.js.map
|