@convex-dev/workpool 0.3.1 → 0.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. package/dist/client/index.d.ts.map +1 -1
  2. package/dist/client/index.js +33 -7
  3. package/dist/client/index.js.map +1 -1
  4. package/dist/component/_generated/server.d.ts.map +1 -1
  5. package/dist/component/complete.d.ts.map +1 -1
  6. package/dist/component/complete.js +61 -5
  7. package/dist/component/complete.js.map +1 -1
  8. package/dist/component/config.js.map +1 -1
  9. package/dist/component/crons.d.ts +1 -1
  10. package/dist/component/crons.d.ts.map +1 -1
  11. package/dist/component/crons.js +2 -2
  12. package/dist/component/crons.js.map +1 -1
  13. package/dist/component/danger.d.ts +0 -2
  14. package/dist/component/danger.d.ts.map +1 -1
  15. package/dist/component/danger.js +63 -35
  16. package/dist/component/danger.js.map +1 -1
  17. package/dist/component/lib.d.ts +1 -1
  18. package/dist/component/lib.d.ts.map +1 -1
  19. package/dist/component/lib.js +43 -3
  20. package/dist/component/lib.js.map +1 -1
  21. package/dist/component/loop.js +2 -2
  22. package/dist/component/loop.js.map +1 -1
  23. package/dist/component/recovery.js +7 -7
  24. package/dist/component/recovery.js.map +1 -1
  25. package/dist/component/schema.d.ts +14 -3
  26. package/dist/component/schema.d.ts.map +1 -1
  27. package/dist/component/schema.js +9 -1
  28. package/dist/component/schema.js.map +1 -1
  29. package/dist/component/stats.d.ts +1 -1
  30. package/dist/component/stats.js +1 -1
  31. package/dist/component/stats.js.map +1 -1
  32. package/dist/component/worker.d.ts +7 -2
  33. package/dist/component/worker.d.ts.map +1 -1
  34. package/dist/component/worker.js +37 -7
  35. package/dist/component/worker.js.map +1 -1
  36. package/package.json +27 -24
  37. package/src/client/index.ts +42 -6
  38. package/src/component/_generated/server.ts +0 -5
  39. package/src/component/complete.ts +88 -12
  40. package/src/component/config.ts +1 -4
  41. package/src/component/crons.ts +2 -2
  42. package/src/component/danger.ts +95 -67
  43. package/src/component/lib.ts +54 -4
  44. package/src/component/loop.ts +2 -2
  45. package/src/component/recovery.ts +7 -7
  46. package/src/component/schema.ts +10 -1
  47. package/src/component/stats.test.ts +1 -1
  48. package/src/component/stats.ts +1 -1
  49. package/src/component/worker.ts +46 -7
@@ -12,9 +12,11 @@ import {
12
12
  type RegisteredMutation,
13
13
  } from "convex/server";
14
14
  import {
15
+ getConvexSize,
15
16
  type Infer,
16
17
  v,
17
18
  type Validator,
19
+ type Value,
18
20
  type VAny,
19
21
  type VString,
20
22
  } from "convex/values";
@@ -559,15 +561,49 @@ export async function enqueueBatch<
559
561
  },
560
562
  ): Promise<WorkId[]> {
561
563
  const { config, ...defaults } = await enqueueArgs(fn, options);
562
- const ids = await ctx.runMutation(component.lib.enqueueBatch, {
563
- items: fnArgsArray.map((fnArgs) => ({
564
+ const batches = [];
565
+ const MAX_BATCH_SIZE = 8_000_000;
566
+ let currentBatch = [];
567
+ let currentBatchSize = 0;
568
+
569
+ for (const fnArgs of fnArgsArray) {
570
+ const item = {
564
571
  ...defaults,
565
572
  fnArgs,
566
573
  fnType,
567
- })),
568
- config,
569
- });
570
- return ids as WorkId[];
574
+ };
575
+ const itemSize = getConvexSize(item as Value);
576
+
577
+ // If adding this item would exceed the limit, start a new batch
578
+ if (
579
+ currentBatch.length > 0 &&
580
+ currentBatchSize + itemSize > MAX_BATCH_SIZE
581
+ ) {
582
+ batches.push({ items: currentBatch });
583
+ currentBatch = [];
584
+ currentBatchSize = 0;
585
+ }
586
+
587
+ currentBatch.push(item);
588
+ currentBatchSize += itemSize;
589
+ }
590
+
591
+ // Add the last batch if it has items
592
+ if (currentBatch.length > 0) {
593
+ batches.push({ items: currentBatch });
594
+ }
595
+
596
+ // Process all batches and collect IDs
597
+ const allIds: WorkId[] = [];
598
+ for (const batch of batches) {
599
+ const ids = await ctx.runMutation(component.lib.enqueueBatch, {
600
+ items: batch.items,
601
+ config,
602
+ });
603
+ allIds.push(...(ids as WorkId[]));
604
+ }
605
+
606
+ return allIds;
571
607
  }
572
608
 
573
609
  export async function enqueue<
@@ -107,11 +107,6 @@ export const internalAction: ActionBuilder<DataModel, "internal"> =
107
107
  */
108
108
  export const httpAction: HttpActionBuilder = httpActionGeneric;
109
109
 
110
- type GenericCtx =
111
- | GenericActionCtx<DataModel>
112
- | GenericMutationCtx<DataModel>
113
- | GenericQueryCtx<DataModel>;
114
-
115
110
  /**
116
111
  * A set of services for use within Convex query functions.
117
112
  *
@@ -1,11 +1,13 @@
1
1
  import type { FunctionHandle } from "convex/server";
2
- import { type Infer, v } from "convex/values";
2
+ import { getConvexSize, type Infer, v } from "convex/values";
3
3
  import type { Id } from "./_generated/dataModel.js";
4
+ import { internal } from "./_generated/api.js";
4
5
  import { internalMutation, type MutationCtx } from "./_generated/server.js";
5
6
  import { kickMainLoop } from "./kick.js";
6
7
  import { createLogger } from "./logging.js";
7
8
  import { type OnCompleteArgs, type RunResult, vResult } from "./shared.js";
8
9
  import { recordCompleted } from "./stats.js";
10
+ import { assert } from "convex-helpers";
9
11
 
10
12
  export type CompleteJob = Infer<typeof completeArgs.fields.jobs.element>;
11
13
 
@@ -24,22 +26,80 @@ export async function completeHandler(
24
26
  ) {
25
27
  const globals = await ctx.db.query("globals").unique();
26
28
  const console = createLogger(globals?.logLevel);
29
+ if (args.jobs.length === 0) {
30
+ console.warn("Trying to complete 0 jobs");
31
+ return;
32
+ }
27
33
  const pendingCompletions: {
28
34
  runResult: RunResult;
29
35
  workId: Id<"work">;
30
36
  retry: boolean;
31
37
  }[] = [];
38
+ const jobAndWorks = (
39
+ await Promise.all(
40
+ args.jobs.map(async (job) => {
41
+ const work = await ctx.db.get(job.workId);
42
+ if (!work) {
43
+ console.warn(
44
+ `[complete] ${job.workId} is done, but its work is gone`,
45
+ );
46
+ return null;
47
+ }
48
+ if (work.attempts !== job.attempt) {
49
+ console.warn(`[complete] ${job.workId} mismatched attempt number`);
50
+ return null;
51
+ }
52
+ return { job, work };
53
+ }),
54
+ )
55
+ ).filter((a) => a !== null);
56
+ if (jobAndWorks.length === 0) {
57
+ return;
58
+ }
59
+ const MAX_BATCH_SIZE = 2_000_000; // combined job / work / payload size
60
+
61
+ // Create batches based on size
62
+ const batches: (typeof jobAndWorks)[] = [];
63
+ let currentBatch: typeof jobAndWorks = [];
64
+ let currentBatchSize = 0;
65
+
66
+ for (const item of jobAndWorks) {
67
+ const itemSize =
68
+ getConvexSize(item.job) +
69
+ getConvexSize(item.work) +
70
+ (item.work.payloadSize ?? 0);
71
+
72
+ // If adding this item would exceed the limit, start a new batch
73
+ if (
74
+ currentBatch.length > 0 &&
75
+ currentBatchSize + itemSize > MAX_BATCH_SIZE
76
+ ) {
77
+ batches.push(currentBatch);
78
+ currentBatch = [];
79
+ currentBatchSize = 0;
80
+ }
81
+
82
+ currentBatch.push(item);
83
+ currentBatchSize += itemSize;
84
+ }
85
+
86
+ // Add the last batch if it has items
87
+ if (currentBatch.length > 0) {
88
+ batches.push(currentBatch);
89
+ }
90
+
91
+ // Schedule all batches after the first one
92
+ for (let i = 1; i < batches.length; i++) {
93
+ await ctx.scheduler.runAfter(0, internal.complete.complete, {
94
+ jobs: batches[i]!.map(({ job }) => job),
95
+ });
96
+ }
97
+
98
+ const ourBatch = batches[0];
99
+ assert(ourBatch);
100
+
32
101
  await Promise.all(
33
- args.jobs.map(async (job) => {
34
- const work = await ctx.db.get(job.workId);
35
- if (!work) {
36
- console.warn(`[complete] ${job.workId} is done, but its work is gone`);
37
- return;
38
- }
39
- if (work.attempts !== job.attempt) {
40
- console.warn(`[complete] ${job.workId} mismatched attempt number`);
41
- return;
42
- }
102
+ ourBatch.map(async ({ work, job }) => {
43
103
  work.attempts++;
44
104
  await ctx.db.patch(work._id, { attempts: work.attempts });
45
105
  const pendingCompletion = await ctx.db
@@ -58,6 +118,15 @@ export async function completeHandler(
58
118
  if (!retry) {
59
119
  if (work.onComplete) {
60
120
  try {
121
+ // Retrieve large context if stored separately
122
+ let context = work.onComplete.context;
123
+ if (context === undefined && work.payloadId) {
124
+ const payload = await ctx.db.get(work.payloadId);
125
+ if (payload) {
126
+ context = payload.context;
127
+ }
128
+ }
129
+
61
130
  const handle = work.onComplete.fnHandle as FunctionHandle<
62
131
  "mutation",
63
132
  OnCompleteArgs,
@@ -65,7 +134,7 @@ export async function completeHandler(
65
134
  >;
66
135
  await ctx.runMutation(handle, {
67
136
  workId: work._id,
68
- context: work.onComplete.context,
137
+ context,
69
138
  result: job.runResult,
70
139
  });
71
140
  console.debug(`[complete] onComplete for ${job.workId} completed`);
@@ -78,6 +147,13 @@ export async function completeHandler(
78
147
  }
79
148
  }
80
149
  recordCompleted(console, work, job.runResult.kind);
150
+
151
+ // Clean up any large data that was stored separately.
152
+ // TODO: consider async deletion in the future to avoid bandwidth limits.
153
+ if (work.payloadId) {
154
+ await ctx.db.delete(work.payloadId);
155
+ }
156
+
81
157
  // This is the terminating state for work.
82
158
  await ctx.db.delete(job.workId);
83
159
  }
@@ -36,10 +36,7 @@ export async function getOrUpdateGlobals(
36
36
  const { globals } = await _getOrUpdateGlobals(ctx, config);
37
37
  return globals;
38
38
  }
39
- async function _getOrUpdateGlobals(
40
- ctx: MutationCtx,
41
- config?: Partial<Config>,
42
- ) {
39
+ async function _getOrUpdateGlobals(ctx: MutationCtx, config?: Partial<Config>) {
43
40
  if (config) {
44
41
  validateConfig(config);
45
42
  }
@@ -6,7 +6,7 @@ import { RECOVERY_PERIOD_SEGMENTS } from "./loop.js";
6
6
 
7
7
  const crons = cronJobs();
8
8
 
9
- export const recover = internalMutation({
9
+ export const healthcheck = internalMutation({
10
10
  args: {},
11
11
  handler: async (ctx) => {
12
12
  const internalState = await ctx.db.query("internalState").first();
@@ -61,6 +61,6 @@ export const recover = internalMutation({
61
61
  },
62
62
  });
63
63
 
64
- crons.interval("recover", { minutes: 30 }, internal.crons.recover);
64
+ crons.interval("healthcheck", { minutes: 30 }, internal.crons.healthcheck);
65
65
 
66
66
  export default crons;
@@ -1,41 +1,53 @@
1
- import { v } from "convex/values";
1
+ import { v, getConvexSize } from "convex/values";
2
2
  import { internal } from "./_generated/api.js";
3
3
  import { internalMutation } from "./_generated/server.js";
4
- import { paginator } from "convex-helpers/server/pagination";
5
- import schema from "./schema.js";
6
4
 
7
5
  const DEFAULT_OLDER_THAN = 1000 * 60 * 60 * 24;
6
+ const MAX_ROWS_READ = 100;
7
+ const MAX_BYTES_READ = 4_000_000;
8
8
 
9
9
  export const clearPending = internalMutation({
10
10
  args: {
11
11
  olderThan: v.optional(v.number()),
12
12
  before: v.optional(v.number()),
13
- cursor: v.optional(v.string()),
14
13
  },
15
14
  handler: async (ctx, args) => {
16
15
  const time =
17
16
  args.before ?? Date.now() - (args.olderThan ?? DEFAULT_OLDER_THAN);
18
- const entries = await paginator(ctx.db, schema)
17
+ let i = 0,
18
+ totalBytes = 0,
19
+ hasMore = false,
20
+ nextTime;
21
+ console.log("Clearing pending before", new Date(time).toUTCString());
22
+ for await (const entry of ctx.db
19
23
  .query("pendingStart")
20
- .withIndex("by_creation_time", (q) => q.lt("_creationTime", time))
21
- .paginate({
22
- cursor: args.cursor ?? null,
23
- numItems: 100,
24
- });
25
- await Promise.all(
26
- entries.page.map(async (entry) => {
27
- await ctx.db.delete(entry._id);
28
- const work = await ctx.db.get(entry.workId);
29
- if (work) {
30
- await ctx.db.delete(work._id);
24
+ .withIndex("by_creation_time", (q) => q.lte("_creationTime", time))
25
+ .order("desc")) {
26
+ i++;
27
+ const work = await ctx.db.get(entry.workId);
28
+ totalBytes +=
29
+ getConvexSize(entry) + getConvexSize(work) + (work?.payloadSize ?? 0);
30
+ if (i > MAX_ROWS_READ || totalBytes > MAX_BYTES_READ) {
31
+ hasMore = true;
32
+ nextTime = entry._creationTime;
33
+ console.log(`Continuing after ${i} entries, ${totalBytes} bytes`);
34
+ break;
35
+ }
36
+ await ctx.db.delete(entry._id);
37
+ if (work) {
38
+ // Clean up any large data stored separately
39
+ if (work.payloadId) {
40
+ await ctx.db.delete("payload", work.payloadId);
31
41
  }
32
- }),
33
- );
34
- if (!entries.isDone) {
42
+ await ctx.db.delete("work", work._id);
43
+ }
44
+ }
45
+ if (hasMore) {
35
46
  await ctx.scheduler.runAfter(0, internal.danger.clearPending, {
36
- before: time,
37
- cursor: entries.continueCursor,
47
+ before: nextTime,
38
48
  });
49
+ } else {
50
+ console.log(`Done clearing pending entries. ${i} in the last batch.`);
39
51
  }
40
52
  },
41
53
  });
@@ -44,59 +56,75 @@ export const clearOldWork = internalMutation({
44
56
  args: {
45
57
  olderThan: v.optional(v.number()),
46
58
  before: v.optional(v.number()),
47
- cursor: v.optional(v.string()),
48
59
  },
49
60
  handler: async (ctx, args) => {
50
61
  const time =
51
62
  args.before ?? Date.now() - (args.olderThan ?? DEFAULT_OLDER_THAN);
52
- const entries = await paginator(ctx.db, schema)
63
+ let i = 0,
64
+ totalBytes = 0,
65
+ hasMore = false,
66
+ nextTime;
67
+ console.log("Clearing old work before", new Date(time).toUTCString());
68
+ for await (const entry of ctx.db
53
69
  .query("work")
54
- .withIndex("by_creation_time", (q) => q.lt("_creationTime", time))
55
- .paginate({
56
- cursor: args.cursor ?? null,
57
- numItems: 100,
58
- });
59
- await Promise.all(
60
- entries.page.map(async (entry) => {
61
- const pendingStart = await ctx.db
62
- .query("pendingStart")
63
- .withIndex("workId", (q) => q.eq("workId", entry._id))
64
- .unique();
65
- if (pendingStart) {
66
- await ctx.db.delete(pendingStart._id);
67
- }
68
- const pendingCompletion = await ctx.db
69
- .query("pendingCompletion")
70
- .withIndex("workId", (q) => q.eq("workId", entry._id))
71
- .unique();
72
- if (pendingCompletion) {
73
- await ctx.db.delete(pendingCompletion._id);
74
- }
75
- const pendingCancelation = await ctx.db
76
- .query("pendingCancelation")
77
- .withIndex("workId", (q) => q.eq("workId", entry._id))
78
- .unique();
79
- if (pendingCancelation) {
80
- await ctx.db.delete(pendingCancelation._id);
81
- }
82
- console.debug(
83
- `cleared ${entry.fnName}: ${entry.fnArgs} (${Object.entries({
84
- pendingStart,
85
- pendingCompletion,
86
- pendingCancelation,
87
- })
88
- .filter(([_, v]) => v !== null)
89
- .map(([name]) => name)
90
- .join(", ")})`,
91
- );
92
- await ctx.db.delete(entry._id);
93
- }),
94
- );
95
- if (!entries.isDone) {
70
+ .withIndex("by_creation_time", (q) => q.lte("_creationTime", time))
71
+ .order("desc")) {
72
+ i++;
73
+ const pendingStart = await ctx.db
74
+ .query("pendingStart")
75
+ .withIndex("workId", (q) => q.eq("workId", entry._id))
76
+ .unique();
77
+ const pendingCompletion = await ctx.db
78
+ .query("pendingCompletion")
79
+ .withIndex("workId", (q) => q.eq("workId", entry._id))
80
+ .unique();
81
+ const pendingCancelation = await ctx.db
82
+ .query("pendingCancelation")
83
+ .withIndex("workId", (q) => q.eq("workId", entry._id))
84
+ .unique();
85
+ totalBytes +=
86
+ getConvexSize(entry) +
87
+ getConvexSize(pendingStart) +
88
+ getConvexSize(pendingCompletion) +
89
+ getConvexSize(pendingCancelation) +
90
+ (entry.payloadSize ?? 0);
91
+ if (i > MAX_ROWS_READ || totalBytes > MAX_BYTES_READ) {
92
+ hasMore = true;
93
+ nextTime = entry._creationTime;
94
+ console.log(`Continuing after ${i} entries, ${totalBytes} bytes`);
95
+ break;
96
+ }
97
+ if (pendingStart) {
98
+ await ctx.db.delete(pendingStart._id);
99
+ }
100
+ if (pendingCompletion) {
101
+ await ctx.db.delete(pendingCompletion._id);
102
+ }
103
+ if (pendingCancelation) {
104
+ await ctx.db.delete(pendingCancelation._id);
105
+ }
106
+ // Clean up any large data stored separately
107
+ if (entry.payloadId) {
108
+ await ctx.db.delete(entry.payloadId);
109
+ }
110
+ console.debug(
111
+ `cleared ${entry.fnName}: ${entry.fnArgs} (${Object.entries({
112
+ pendingStart,
113
+ pendingCompletion,
114
+ pendingCancelation,
115
+ })
116
+ .filter(([_, v]) => v !== null)
117
+ .map(([name]) => name)
118
+ .join(", ")})`,
119
+ );
120
+ await ctx.db.delete(entry._id);
121
+ }
122
+ if (hasMore) {
96
123
  await ctx.scheduler.runAfter(0, internal.danger.clearOldWork, {
97
- before: time,
98
- cursor: entries.continueCursor,
124
+ before: nextTime,
99
125
  });
126
+ } else {
127
+ console.log(`Done clearing old work. ${i} in the last batch.`);
100
128
  }
101
129
  },
102
130
  });
@@ -1,6 +1,7 @@
1
- import { type ObjectType, v } from "convex/values";
1
+ import { type ObjectType, v, getConvexSize } from "convex/values";
2
+ import type { WithoutSystemFields } from "convex/server";
2
3
  import { api } from "./_generated/api.js";
3
- import type { Id } from "./_generated/dataModel.js";
4
+ import { type Doc, type Id } from "./_generated/dataModel.js";
4
5
  import {
5
6
  mutation,
6
7
  type MutationCtx,
@@ -28,6 +29,10 @@ import {
28
29
  import { recordEnqueued } from "./stats.js";
29
30
  import { getOrUpdateGlobals } from "./config.js";
30
31
 
32
+ const INLINE_METADATA_THRESHOLD = 8_000; // 8KB threshold
33
+ const MAX_DOC_SIZE = 1_000_000; // Some buffer for 1MiB actual limit
34
+ const PAYLOAD_DOC_OVERHEAD = 78; // Size of { args: null, context: null }
35
+
31
36
  const itemArgs = {
32
37
  fnHandle: v.string(),
33
38
  fnName: v.string(),
@@ -59,10 +64,55 @@ async function enqueueHandler(
59
64
  { runAt, ...workArgs }: ObjectType<typeof itemArgs>,
60
65
  ) {
61
66
  runAt = boundScheduledTime(runAt, console);
62
- const workId = await ctx.db.insert("work", {
67
+
68
+ const fnArgsSize = getConvexSize(workArgs.fnArgs);
69
+ if (fnArgsSize > MAX_DOC_SIZE) {
70
+ throw new Error(
71
+ `Function arguments for function ${workArgs.fnName} too large: ${fnArgsSize} bytes (max: ${MAX_DOC_SIZE} bytes)`,
72
+ );
73
+ }
74
+
75
+ let contextSize = 0;
76
+ const context = workArgs.onComplete?.context;
77
+ if (context !== undefined) {
78
+ contextSize = getConvexSize(context);
79
+ if (contextSize > MAX_DOC_SIZE) {
80
+ throw new Error(
81
+ `OnComplete context for function ${workArgs.fnName} too large: ${contextSize} bytes (max: ${MAX_DOC_SIZE} bytes)`,
82
+ );
83
+ }
84
+ }
85
+
86
+ const workItem: WithoutSystemFields<Doc<"work">> = {
63
87
  ...workArgs,
64
88
  attempts: 0,
65
- });
89
+ };
90
+
91
+ if (fnArgsSize >= INLINE_METADATA_THRESHOLD) {
92
+ // Args are large, store separately
93
+ const payloadDoc: { args: Record<string, any>; context?: unknown } = {
94
+ args: workArgs.fnArgs,
95
+ };
96
+ workItem.payloadSize = fnArgsSize + PAYLOAD_DOC_OVERHEAD;
97
+ delete workItem.fnArgs;
98
+ if (contextSize >= INLINE_METADATA_THRESHOLD) {
99
+ // Context is also too big to inline
100
+ payloadDoc.context = context;
101
+ workItem.payloadSize += contextSize;
102
+ delete workItem.onComplete!.context;
103
+ }
104
+ workItem.payloadId = await ctx.db.insert("payload", payloadDoc);
105
+ } else if (fnArgsSize + contextSize >= INLINE_METADATA_THRESHOLD) {
106
+ // Args are small enough, but combined with context it's too big.
107
+ // Store just context in this case.
108
+ workItem.payloadId = await ctx.db.insert("payload", { context });
109
+ delete workItem.onComplete!.context;
110
+ workItem.payloadSize = contextSize + PAYLOAD_DOC_OVERHEAD;
111
+ }
112
+
113
+ // Store the work item
114
+ const workId = await ctx.db.insert("work", workItem);
115
+
66
116
  await ctx.db.insert("pendingStart", {
67
117
  workId,
68
118
  segment: max(toSegment(runAt), kickSegment),
@@ -561,8 +561,8 @@ async function beginWork(
561
561
  throw new Error("work not found");
562
562
  }
563
563
  recordStarted(console, work, lagMs);
564
- const { attempts: attempt, fnHandle, fnArgs } = work;
565
- const args = { workId, fnHandle, fnArgs, logLevel, attempt };
564
+ const { attempts: attempt, fnHandle, fnArgs, payloadId } = work;
565
+ const args = { workId, fnHandle, fnArgs, payloadId, logLevel, attempt };
566
566
  if (work.fnType === "action") {
567
567
  return ctx.scheduler.runAfter(0, internal.worker.runActionWrapper, args);
568
568
  } else if (work.fnType === "mutation" || work.fnType === "query") {
@@ -1,7 +1,7 @@
1
1
  import { type Infer, v } from "convex/values";
2
2
  import { internalMutation, type MutationCtx } from "./_generated/server.js";
3
- import { completeArgs, completeHandler } from "./complete.js";
4
3
  import { createLogger } from "./logging.js";
4
+ import { type CompleteJob, completeHandler } from "./complete.js";
5
5
 
6
6
  const recoveryArgs = v.object({
7
7
  jobs: v.array(
@@ -41,7 +41,7 @@ export async function recoveryHandler(
41
41
  ) {
42
42
  const globals = await ctx.db.query("globals").unique();
43
43
  const console = createLogger(globals?.logLevel);
44
- const toComplete: Infer<typeof completeArgs.fields.jobs> = [];
44
+ const completionJobs: CompleteJob[] = [];
45
45
  for (let i = 0; i < jobs.length; i++) {
46
46
  const job = jobs[i];
47
47
  const preamble = `[recovery] Scheduled job ${job.scheduledId} for work ${job.workId}`;
@@ -68,7 +68,7 @@ export async function recoveryHandler(
68
68
  const scheduled = await ctx.db.system.get(job.scheduledId);
69
69
  if (scheduled === null) {
70
70
  console.warn(`${preamble} not found in _scheduled_functions`);
71
- toComplete.push({
71
+ completionJobs.push({
72
72
  workId: job.workId,
73
73
  runResult: { kind: "failed", error: `Scheduled job not found` },
74
74
  attempt: job.attempt,
@@ -80,7 +80,7 @@ export async function recoveryHandler(
80
80
  switch (scheduled.state.kind) {
81
81
  case "failed": {
82
82
  console.debug(`${preamble} failed and detected in recovery`);
83
- toComplete.push({
83
+ completionJobs.push({
84
84
  workId: job.workId,
85
85
  runResult: scheduled.state,
86
86
  attempt: job.attempt,
@@ -89,7 +89,7 @@ export async function recoveryHandler(
89
89
  }
90
90
  case "canceled": {
91
91
  console.debug(`${preamble} was canceled and detected in recovery`);
92
- toComplete.push({
92
+ completionJobs.push({
93
93
  workId: job.workId,
94
94
  runResult: { kind: "failed", error: "Canceled via scheduler" },
95
95
  attempt: job.attempt,
@@ -98,7 +98,7 @@ export async function recoveryHandler(
98
98
  }
99
99
  }
100
100
  }
101
- if (toComplete.length > 0) {
102
- await completeHandler(ctx, { jobs: toComplete });
101
+ if (completionJobs.length > 0) {
102
+ await completeHandler(ctx, { jobs: completionJobs });
103
103
  }
104
104
  }
@@ -62,7 +62,10 @@ export default defineSchema({
62
62
  fnType,
63
63
  fnHandle: v.string(),
64
64
  fnName: v.string(),
65
- fnArgs: v.any(),
65
+ fnArgs: v.optional(v.any()),
66
+ // Reference to large args/onComplete context if stored separately
67
+ payloadId: v.optional(v.id("payload")),
68
+ payloadSize: v.optional(v.number()),
66
69
  attempts: v.number(), // number of completed attempts
67
70
  onComplete: v.optional(vOnCompleteFnContext),
68
71
  retryBehavior: v.optional(retryBehavior),
@@ -94,4 +97,10 @@ export default defineSchema({
94
97
  })
95
98
  .index("workId", ["workId"])
96
99
  .index("segment", ["segment"]),
100
+
101
+ // Store large data separately to avoid document size limits
102
+ payload: defineTable({
103
+ args: v.optional(v.record(v.string(), v.any())),
104
+ context: v.optional(v.any()),
105
+ }),
97
106
  });
@@ -190,7 +190,7 @@ describe("stats", () => {
190
190
  });
191
191
 
192
192
  // Create more pending start items than maxParallelism
193
- const maxParallelism = 5;
193
+ const maxParallelism = 50;
194
194
 
195
195
  // Create maxParallelism + 1 work items to trigger pagination
196
196
  for (let i = 0; i < maxParallelism + 1; i++) {
@@ -81,7 +81,7 @@ export async function generateReport(
81
81
  .lt("segment", currentSegment),
82
82
  )
83
83
  .paginate({
84
- numItems: maxParallelism,
84
+ numItems: Math.max(maxParallelism, 10),
85
85
  cursor: null,
86
86
  });
87
87
  if (pendingStart.isDone) {