@nicnocquee/dataqueue 1.33.0 → 1.35.0-beta.20260224075710
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/ai/build-docs-content.ts +96 -0
- package/ai/build-llms-full.ts +42 -0
- package/ai/docs-content.json +290 -0
- package/ai/rules/advanced.md +170 -0
- package/ai/rules/basic.md +159 -0
- package/ai/rules/react-dashboard.md +87 -0
- package/ai/skills/dataqueue-advanced/SKILL.md +370 -0
- package/ai/skills/dataqueue-core/SKILL.md +235 -0
- package/ai/skills/dataqueue-react/SKILL.md +201 -0
- package/dist/cli.cjs +577 -32
- package/dist/cli.cjs.map +1 -1
- package/dist/cli.d.cts +52 -2
- package/dist/cli.d.ts +52 -2
- package/dist/cli.js +575 -32
- package/dist/cli.js.map +1 -1
- package/dist/index.cjs +937 -108
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +358 -11
- package/dist/index.d.ts +358 -11
- package/dist/index.js +937 -108
- package/dist/index.js.map +1 -1
- package/dist/mcp-server.cjs +186 -0
- package/dist/mcp-server.cjs.map +1 -0
- package/dist/mcp-server.d.cts +32 -0
- package/dist/mcp-server.d.ts +32 -0
- package/dist/mcp-server.js +175 -0
- package/dist/mcp-server.js.map +1 -0
- package/migrations/1781200000005_add_retry_config_to_job_queue.sql +17 -0
- package/migrations/1781200000006_add_output_to_job_queue.sql +3 -0
- package/package.json +10 -4
- package/src/backend.ts +36 -3
- package/src/backends/postgres.ts +344 -42
- package/src/backends/redis-scripts.ts +173 -8
- package/src/backends/redis.test.ts +668 -0
- package/src/backends/redis.ts +244 -15
- package/src/cli.test.ts +65 -0
- package/src/cli.ts +56 -19
- package/src/db-util.ts +1 -1
- package/src/index.test.ts +811 -12
- package/src/index.ts +106 -14
- package/src/install-mcp-command.test.ts +216 -0
- package/src/install-mcp-command.ts +185 -0
- package/src/install-rules-command.test.ts +218 -0
- package/src/install-rules-command.ts +233 -0
- package/src/install-skills-command.test.ts +176 -0
- package/src/install-skills-command.ts +124 -0
- package/src/mcp-server.test.ts +162 -0
- package/src/mcp-server.ts +231 -0
- package/src/processor.ts +133 -49
- package/src/queue.test.ts +477 -0
- package/src/queue.ts +20 -3
- package/src/supervisor.test.ts +340 -0
- package/src/supervisor.ts +177 -0
- package/src/types.ts +318 -3
|
@@ -0,0 +1,170 @@
|
|
|
1
|
+
# DataQueue — Advanced Rules
|
|
2
|
+
|
|
3
|
+
## Step Memoization (ctx.run)
|
|
4
|
+
|
|
5
|
+
Wrap side-effectful work in `ctx.run(stepName, fn)` for durability. Cached results replay on re-invocation after a wait.
|
|
6
|
+
|
|
7
|
+
```typescript
|
|
8
|
+
const data = await ctx.run('fetch', async () => fetchFromAPI(url));
|
|
9
|
+
await ctx.waitFor({ hours: 1 });
|
|
10
|
+
await ctx.run('notify', async () => sendNotification(data));
|
|
11
|
+
```
|
|
12
|
+
|
|
13
|
+
Step names must be unique within a handler and stable across deployments.
|
|
14
|
+
|
|
15
|
+
## Waits
|
|
16
|
+
|
|
17
|
+
- `ctx.waitFor({ hours: 24 })` — pause for a duration (seconds, minutes, hours, days, weeks, months, years).
|
|
18
|
+
- `ctx.waitUntil(date)` — pause until a specific date.
|
|
19
|
+
- `ctx.waitForToken(tokenId)` — pause until an external actor completes the token.
|
|
20
|
+
|
|
21
|
+
Waiting jobs release their worker lock and concurrency slot. They consume no resources.
|
|
22
|
+
|
|
23
|
+
Wait calls use a positional counter internally. Do not add/remove waits conditionally between re-invocations.
|
|
24
|
+
|
|
25
|
+
## Token System
|
|
26
|
+
|
|
27
|
+
```typescript
|
|
28
|
+
const token = await ctx.createToken({ timeout: '48h', tags: ['approval'] });
|
|
29
|
+
const result = await ctx.waitForToken<{ approved: boolean }>(token.id);
|
|
30
|
+
if (result.ok) {
|
|
31
|
+
/* result.output.approved */
|
|
32
|
+
}
|
|
33
|
+
```
|
|
34
|
+
|
|
35
|
+
Complete externally: `await queue.completeToken(tokenId, { approved: true })`.
|
|
36
|
+
Expire timed-out tokens: `await queue.expireTimedOutTokens()`.
|
|
37
|
+
|
|
38
|
+
## Cron Scheduling
|
|
39
|
+
|
|
40
|
+
```typescript
|
|
41
|
+
await queue.addCronJob({
|
|
42
|
+
scheduleName: 'daily-cleanup',
|
|
43
|
+
cronExpression: '0 2 * * *',
|
|
44
|
+
jobType: 'cleanup',
|
|
45
|
+
payload: { days: 30 },
|
|
46
|
+
timezone: 'UTC',
|
|
47
|
+
allowOverlap: false,
|
|
48
|
+
});
|
|
49
|
+
```
|
|
50
|
+
|
|
51
|
+
The processor auto-enqueues due cron jobs before each batch. Manage with `pauseCronJob`, `resumeCronJob`, `editCronJob`, `removeCronJob`, `listCronJobs`.
|
|
52
|
+
|
|
53
|
+
## Timeout Management
|
|
54
|
+
|
|
55
|
+
- `ctx.prolong(ms)` — proactively reset deadline. `ctx.prolong()` resets to original `timeoutMs`.
|
|
56
|
+
- `ctx.onTimeout(() => ms)` — reactive; return ms to extend, or nothing to let timeout proceed.
|
|
57
|
+
- `forceKillOnTimeout: true` — terminates handler via Worker Thread. Requires Node.js, serializable handler, and disables `ctx.run`/waits/`prolong`/`onTimeout`.
|
|
58
|
+
|
|
59
|
+
## Tags and Filtering
|
|
60
|
+
|
|
61
|
+
```typescript
|
|
62
|
+
await queue.addJob({ jobType: 'email', payload, tags: ['welcome', 'user'] });
|
|
63
|
+
const jobs = await queue.getJobsByTags(['welcome'], 'any');
|
|
64
|
+
await queue.cancelAllUpcomingJobs({ tags: { values: ['user'], mode: 'all' } });
|
|
65
|
+
```
|
|
66
|
+
|
|
67
|
+
Modes: `exact` (exact set), `all` (superset), `any` (intersection), `none` (exclusion).
|
|
68
|
+
|
|
69
|
+
## Idempotency
|
|
70
|
+
|
|
71
|
+
```typescript
|
|
72
|
+
await queue.addJob({
|
|
73
|
+
jobType: 'email',
|
|
74
|
+
payload,
|
|
75
|
+
idempotencyKey: `welcome-${userId}`,
|
|
76
|
+
});
|
|
77
|
+
```
|
|
78
|
+
|
|
79
|
+
Returns existing job ID if key already exists. Key persists until `cleanupOldJobs` removes the job.
|
|
80
|
+
|
|
81
|
+
## Transactional Job Creation (PostgreSQL Only)
|
|
82
|
+
|
|
83
|
+
Pass a `pg.PoolClient` inside a transaction via the `{ db }` option to enqueue a job atomically with other writes:
|
|
84
|
+
|
|
85
|
+
```typescript
|
|
86
|
+
const client = await pool.connect();
|
|
87
|
+
await client.query('BEGIN');
|
|
88
|
+
await client.query('INSERT INTO users (email) VALUES ($1)', [email]);
|
|
89
|
+
await queue.addJob(
|
|
90
|
+
{
|
|
91
|
+
jobType: 'send_email',
|
|
92
|
+
payload: { to: email, subject: 'Welcome!', body: '...' },
|
|
93
|
+
},
|
|
94
|
+
{ db: client },
|
|
95
|
+
);
|
|
96
|
+
await client.query('COMMIT');
|
|
97
|
+
client.release();
|
|
98
|
+
```
|
|
99
|
+
|
|
100
|
+
If the transaction rolls back, the job and its event are never persisted. The `db` option accepts any object with a `.query(text, values)` method matching `pg`'s signature. Using `{ db }` with the Redis backend throws an error.
|
|
101
|
+
|
|
102
|
+
## Retry Strategy
|
|
103
|
+
|
|
104
|
+
```typescript
|
|
105
|
+
await queue.addJob({
|
|
106
|
+
jobType: 'email',
|
|
107
|
+
payload,
|
|
108
|
+
retryDelay: 10, // base 10s
|
|
109
|
+
retryBackoff: true, // exponential (default)
|
|
110
|
+
retryDelayMax: 300, // cap at 5 min
|
|
111
|
+
});
|
|
112
|
+
```
|
|
113
|
+
|
|
114
|
+
- `retryBackoff: false` — fixed delay of `retryDelay` seconds.
|
|
115
|
+
- `retryBackoff: true` (default) — `retryDelay * 2^attempts` with jitter, capped by `retryDelayMax`.
|
|
116
|
+
- No config — legacy `2^attempts * 60s` formula (backward compatible).
|
|
117
|
+
- Cron schedules propagate retry config to enqueued jobs.
|
|
118
|
+
|
|
119
|
+
## Event Hooks
|
|
120
|
+
|
|
121
|
+
Subscribe to real-time lifecycle events via `on`, `once`, `off`, `removeAllListeners`. Works with both Postgres and Redis.
|
|
122
|
+
|
|
123
|
+
```typescript
|
|
124
|
+
queue.on('job:completed', ({ jobId, jobType }) => {
|
|
125
|
+
metrics.increment('job.completed', { jobType });
|
|
126
|
+
});
|
|
127
|
+
queue.on('job:failed', ({ jobId, jobType, error, willRetry }) => {
|
|
128
|
+
if (!willRetry) alertOps(`Permanent failure: ${jobId}`);
|
|
129
|
+
});
|
|
130
|
+
queue.on('error', (error) => Sentry.captureException(error));
|
|
131
|
+
```
|
|
132
|
+
|
|
133
|
+
Events: `job:added`, `job:processing`, `job:completed`, `job:failed` (with `willRetry`), `job:cancelled`, `job:retried`, `job:waiting`, `job:progress`, `job:output`, `error`.
|
|
134
|
+
|
|
135
|
+
`error` events fire alongside `onError` callbacks in `ProcessorOptions` / `SupervisorOptions` — both mechanisms work independently.
|
|
136
|
+
|
|
137
|
+
## Scaling
|
|
138
|
+
|
|
139
|
+
- Increase `batchSize` and `concurrency` for higher throughput.
|
|
140
|
+
- Run multiple processor instances with unique `workerId` values — `FOR UPDATE SKIP LOCKED` (PostgreSQL) or Lua scripts (Redis) prevent double-claiming.
|
|
141
|
+
- Use `jobType` filter for specialized workers.
|
|
142
|
+
- Use `createSupervisor()` to automate maintenance (reclaim stuck jobs, cleanup, token expiry). Safe to run across multiple instances.
|
|
143
|
+
|
|
144
|
+
## Progress Tracking
|
|
145
|
+
|
|
146
|
+
```typescript
|
|
147
|
+
await ctx.setProgress(50); // 0–100, persisted to DB
|
|
148
|
+
```
|
|
149
|
+
|
|
150
|
+
Read via `queue.getJob(id)` (`progress` field) or React SDK's `useJob` hook.
|
|
151
|
+
|
|
152
|
+
## Job Output
|
|
153
|
+
|
|
154
|
+
Store results via `ctx.setOutput(data)` or by returning a value from the handler:
|
|
155
|
+
|
|
156
|
+
```typescript
|
|
157
|
+
// Option 1: return a value
|
|
158
|
+
const handler = async (payload, signal, ctx) => {
|
|
159
|
+
const result = await doWork(payload);
|
|
160
|
+
return { url: result.downloadUrl };
|
|
161
|
+
};
|
|
162
|
+
|
|
163
|
+
// Option 2: ctx.setOutput (takes precedence over return value)
|
|
164
|
+
const handler = async (payload, signal, ctx) => {
|
|
165
|
+
const result = await doWork(payload);
|
|
166
|
+
await ctx.setOutput({ url: result.downloadUrl });
|
|
167
|
+
};
|
|
168
|
+
```
|
|
169
|
+
|
|
170
|
+
Read via `queue.getJob(id)` (`output` field) or React SDK's `useJob` hook (`output` property).
|
|
@@ -0,0 +1,159 @@
|
|
|
1
|
+
# DataQueue — Basic Rules
|
|
2
|
+
|
|
3
|
+
## Imports
|
|
4
|
+
|
|
5
|
+
Always import from `@nicnocquee/dataqueue`. There is no subpath like `/v2` or `/v3`.
|
|
6
|
+
|
|
7
|
+
```typescript
|
|
8
|
+
import { initJobQueue, JobHandlers } from '@nicnocquee/dataqueue';
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
## PayloadMap Pattern
|
|
12
|
+
|
|
13
|
+
Define an object type where keys are job type strings and values are payload shapes. This powers type-safe `addJob`, `createProcessor`, and handler completeness checking.
|
|
14
|
+
|
|
15
|
+
```typescript
|
|
16
|
+
type JobPayloadMap = {
|
|
17
|
+
send_email: { to: string; subject: string; body: string };
|
|
18
|
+
generate_report: { reportId: string; userId: string };
|
|
19
|
+
};
|
|
20
|
+
```
|
|
21
|
+
|
|
22
|
+
## Initialization (Singleton)
|
|
23
|
+
|
|
24
|
+
Never call `initJobQueue` per request — each call creates a new database connection pool. Use a module-level singleton:
|
|
25
|
+
|
|
26
|
+
```typescript
|
|
27
|
+
import { initJobQueue } from '@nicnocquee/dataqueue';
|
|
28
|
+
|
|
29
|
+
let jobQueue: ReturnType<typeof initJobQueue<JobPayloadMap>> | null = null;
|
|
30
|
+
|
|
31
|
+
export const getJobQueue = () => {
|
|
32
|
+
if (!jobQueue) {
|
|
33
|
+
jobQueue = initJobQueue<JobPayloadMap>({
|
|
34
|
+
databaseConfig: { connectionString: process.env.PG_DATAQUEUE_DATABASE },
|
|
35
|
+
});
|
|
36
|
+
}
|
|
37
|
+
return jobQueue;
|
|
38
|
+
};
|
|
39
|
+
```
|
|
40
|
+
|
|
41
|
+
For Redis, set `backend: 'redis'` and use `redisConfig` with `url` or `host`/`port`/`password`. Install `ioredis` as a peer dependency.
|
|
42
|
+
|
|
43
|
+
### Bring Your Own Pool / Client
|
|
44
|
+
|
|
45
|
+
Pass an existing `pg.Pool` or `ioredis` client instead of connection config:
|
|
46
|
+
|
|
47
|
+
```typescript
|
|
48
|
+
import { Pool } from 'pg';
|
|
49
|
+
const pool = new Pool({ connectionString: process.env.DATABASE_URL });
|
|
50
|
+
jobQueue = initJobQueue<JobPayloadMap>({ pool });
|
|
51
|
+
```
|
|
52
|
+
|
|
53
|
+
```typescript
|
|
54
|
+
import IORedis from 'ioredis';
|
|
55
|
+
const redis = new IORedis(process.env.REDIS_URL);
|
|
56
|
+
jobQueue = initJobQueue<JobPayloadMap>({
|
|
57
|
+
backend: 'redis',
|
|
58
|
+
client: redis,
|
|
59
|
+
keyPrefix: 'myapp:',
|
|
60
|
+
});
|
|
61
|
+
```
|
|
62
|
+
|
|
63
|
+
The library will **not** close externally provided connections on shutdown.
|
|
64
|
+
|
|
65
|
+
## Adding Jobs
|
|
66
|
+
|
|
67
|
+
Use `addJob` for a single job, `addJobs` for bulk inserts (single DB round-trip).
|
|
68
|
+
|
|
69
|
+
```typescript
|
|
70
|
+
const id = await queue.addJob({
|
|
71
|
+
jobType: 'send_email',
|
|
72
|
+
payload: { to: 'a@x.com', subject: 'Hi', body: '...' },
|
|
73
|
+
});
|
|
74
|
+
|
|
75
|
+
const ids = await queue.addJobs([
|
|
76
|
+
{
|
|
77
|
+
jobType: 'send_email',
|
|
78
|
+
payload: { to: 'a@x.com', subject: 'Hi', body: '...' },
|
|
79
|
+
},
|
|
80
|
+
{
|
|
81
|
+
jobType: 'send_email',
|
|
82
|
+
payload: { to: 'b@x.com', subject: 'Hi', body: '...' },
|
|
83
|
+
priority: 10,
|
|
84
|
+
},
|
|
85
|
+
]);
|
|
86
|
+
// ids[i] corresponds to the i-th input job
|
|
87
|
+
```
|
|
88
|
+
|
|
89
|
+
Both support `idempotencyKey`, `priority`, `runAt`, `tags`, and `{ db }` for transactional inserts (PostgreSQL only).
|
|
90
|
+
|
|
91
|
+
## Handlers
|
|
92
|
+
|
|
93
|
+
Type handlers as `JobHandlers<PayloadMap>` so TypeScript enforces a handler for every job type.
|
|
94
|
+
|
|
95
|
+
```typescript
|
|
96
|
+
export const jobHandlers: JobHandlers<JobPayloadMap> = {
|
|
97
|
+
send_email: async (payload, signal, ctx) => {
|
|
98
|
+
await sendEmail(payload.to, payload.subject, payload.body);
|
|
99
|
+
},
|
|
100
|
+
generate_report: async (payload) => {
|
|
101
|
+
await generateReport(payload.reportId, payload.userId);
|
|
102
|
+
},
|
|
103
|
+
};
|
|
104
|
+
```
|
|
105
|
+
|
|
106
|
+
Handler signature: `(payload: T, signal: AbortSignal, ctx: JobContext) => Promise<void>`. You can omit arguments you don't need.
|
|
107
|
+
|
|
108
|
+
## Processing
|
|
109
|
+
|
|
110
|
+
**Serverless** — call `processor.start()` which processes one batch and stops:
|
|
111
|
+
|
|
112
|
+
```typescript
|
|
113
|
+
const processor = queue.createProcessor(handlers, {
|
|
114
|
+
batchSize: 10,
|
|
115
|
+
concurrency: 3,
|
|
116
|
+
});
|
|
117
|
+
await processor.start();
|
|
118
|
+
```
|
|
119
|
+
|
|
120
|
+
**Long-running** — call `processor.startInBackground()` which polls continuously, and `createSupervisor()` to automate maintenance:
|
|
121
|
+
|
|
122
|
+
```typescript
|
|
123
|
+
processor.startInBackground();
|
|
124
|
+
|
|
125
|
+
const supervisor = queue.createSupervisor({
|
|
126
|
+
intervalMs: 60_000,
|
|
127
|
+
stuckJobsTimeoutMinutes: 10,
|
|
128
|
+
cleanupJobsDaysToKeep: 30,
|
|
129
|
+
});
|
|
130
|
+
supervisor.startInBackground();
|
|
131
|
+
|
|
132
|
+
process.on('SIGTERM', async () => {
|
|
133
|
+
await Promise.all([
|
|
134
|
+
processor.stopAndDrain(30000),
|
|
135
|
+
supervisor.stopAndDrain(30000),
|
|
136
|
+
]);
|
|
137
|
+
queue.getPool().end(); // or queue.getRedisClient().quit() for Redis
|
|
138
|
+
process.exit(0);
|
|
139
|
+
});
|
|
140
|
+
```
|
|
141
|
+
|
|
142
|
+
## Retry Configuration
|
|
143
|
+
|
|
144
|
+
Control retry behavior per-job with optional fields on `addJob`:
|
|
145
|
+
|
|
146
|
+
- `retryDelay` (seconds, default 60) — base delay between retries.
|
|
147
|
+
- `retryBackoff` (boolean, default true) — enable exponential backoff with jitter.
|
|
148
|
+
- `retryDelayMax` (seconds, optional) — cap the maximum delay.
|
|
149
|
+
|
|
150
|
+
When none are set, the legacy `2^attempts * 60s` formula is used.
|
|
151
|
+
|
|
152
|
+
## Common Mistakes
|
|
153
|
+
|
|
154
|
+
1. Creating `initJobQueue` per request — use a singleton.
|
|
155
|
+
2. Missing handler for a job type — fails with `NoHandler`. Type as `JobHandlers<PayloadMap>`.
|
|
156
|
+
3. Not checking `signal.aborted` in long handlers — timed-out jobs keep running.
|
|
157
|
+
4. Skipping maintenance — use `createSupervisor()` to automate reclaim, cleanup, and token expiry. Without it, stuck jobs and old data accumulate.
|
|
158
|
+
5. Skipping migrations (PostgreSQL) — run `dataqueue-cli migrate` first. Redis needs none.
|
|
159
|
+
6. Using `stop()` instead of `stopAndDrain()` — leaves in-flight jobs stuck.
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
# DataQueue — React & Dashboard Rules
|
|
2
|
+
|
|
3
|
+
## React SDK (@nicnocquee/dataqueue-react)
|
|
4
|
+
|
|
5
|
+
Install: `npm install @nicnocquee/dataqueue-react` (requires React 18+).
|
|
6
|
+
|
|
7
|
+
### useJob Hook
|
|
8
|
+
|
|
9
|
+
```tsx
|
|
10
|
+
'use client';
|
|
11
|
+
import { useJob } from '@nicnocquee/dataqueue-react';
|
|
12
|
+
|
|
13
|
+
const { status, progress, output, data, isLoading, error } = useJob(jobId, {
|
|
14
|
+
fetcher: (id) =>
|
|
15
|
+
fetch(`/api/jobs/${id}`)
|
|
16
|
+
.then((r) => r.json())
|
|
17
|
+
.then((d) => d.job),
|
|
18
|
+
pollingInterval: 1000,
|
|
19
|
+
onComplete: (job) => {
|
|
20
|
+
/* job completed */
|
|
21
|
+
},
|
|
22
|
+
onFailed: (job) => {
|
|
23
|
+
/* job failed */
|
|
24
|
+
},
|
|
25
|
+
});
|
|
26
|
+
```
|
|
27
|
+
|
|
28
|
+
Polling auto-stops on terminal statuses (`completed`, `failed`, `cancelled`).
|
|
29
|
+
|
|
30
|
+
### DataqueueProvider
|
|
31
|
+
|
|
32
|
+
Wrap app in `DataqueueProvider` to share `fetcher` and `pollingInterval`:
|
|
33
|
+
|
|
34
|
+
```tsx
|
|
35
|
+
<DataqueueProvider fetcher={fetcher} pollingInterval={2000}>
|
|
36
|
+
{children}
|
|
37
|
+
</DataqueueProvider>
|
|
38
|
+
```
|
|
39
|
+
|
|
40
|
+
### API Route (Next.js)
|
|
41
|
+
|
|
42
|
+
```typescript
|
|
43
|
+
// app/api/jobs/[id]/route.ts
|
|
44
|
+
export async function GET(
|
|
45
|
+
_req: Request,
|
|
46
|
+
{ params }: { params: Promise<{ id: string }> },
|
|
47
|
+
) {
|
|
48
|
+
const { id } = await params;
|
|
49
|
+
const job = await getJobQueue().getJob(Number(id));
|
|
50
|
+
if (!job) return NextResponse.json({ error: 'Not found' }, { status: 404 });
|
|
51
|
+
return NextResponse.json({ job });
|
|
52
|
+
}
|
|
53
|
+
```
|
|
54
|
+
|
|
55
|
+
## Dashboard (@nicnocquee/dataqueue-dashboard)
|
|
56
|
+
|
|
57
|
+
Install: `npm install @nicnocquee/dataqueue-dashboard`.
|
|
58
|
+
|
|
59
|
+
### Setup (Next.js App Router)
|
|
60
|
+
|
|
61
|
+
```typescript
|
|
62
|
+
// app/admin/dataqueue/[[...path]]/route.ts
|
|
63
|
+
import { createDataqueueDashboard } from '@nicnocquee/dataqueue-dashboard/next';
|
|
64
|
+
import { getJobQueue, jobHandlers } from '@/lib/queue';
|
|
65
|
+
|
|
66
|
+
const { GET, POST } = createDataqueueDashboard({
|
|
67
|
+
jobQueue: getJobQueue(),
|
|
68
|
+
jobHandlers,
|
|
69
|
+
basePath: '/admin/dataqueue',
|
|
70
|
+
});
|
|
71
|
+
|
|
72
|
+
export { GET, POST };
|
|
73
|
+
```
|
|
74
|
+
|
|
75
|
+
`basePath` must match the route directory path.
|
|
76
|
+
|
|
77
|
+
### Protection
|
|
78
|
+
|
|
79
|
+
Wrap handlers with your auth middleware before exporting GET/POST.
|
|
80
|
+
|
|
81
|
+
## Progress Tracking
|
|
82
|
+
|
|
83
|
+
Use `ctx.setProgress(percent)` in handlers (0–100). The value appears in `useJob`'s `progress` field and the dashboard detail view.
|
|
84
|
+
|
|
85
|
+
## Job Output
|
|
86
|
+
|
|
87
|
+
Store results via `ctx.setOutput(data)` or by returning a value from the handler. The value appears in `useJob`'s `output` field and the dashboard detail view. If both are used, `ctx.setOutput()` takes precedence.
|