@clipboard-health/mongo-jobs 0.3.1 → 0.3.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +551 -0
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -107,6 +107,557 @@ await backgroundJobs.start(["emails"], {
|
|
|
107
107
|
|
|
108
108
|
</embedex>
|
|
109
109
|
|
|
110
|
+
## Usage
|
|
111
|
+
|
|
112
|
+
### Creating a job
|
|
113
|
+
|
|
114
|
+
Jobs are defined as classes that implement the `HandlerInterface`:
|
|
115
|
+
|
|
116
|
+
<embedex source="packages/mongo-jobs/examples/usage/myJob.ts">
|
|
117
|
+
|
|
118
|
+
```ts
|
|
119
|
+
import type { BackgroundJobType, HandlerInterface } from "@clipboard-health/mongo-jobs";
|
|
120
|
+
|
|
121
|
+
export interface MyJobData {
|
|
122
|
+
userId: string;
|
|
123
|
+
action: string;
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
export class MyJob implements HandlerInterface<MyJobData> {
|
|
127
|
+
// Required: unique name for this job type
|
|
128
|
+
public name = "MyJob";
|
|
129
|
+
|
|
130
|
+
// Optional: max retry attempts (default: 10)
|
|
131
|
+
public maxAttempts = 5;
|
|
132
|
+
|
|
133
|
+
// Required: the actual job logic
|
|
134
|
+
async perform(data: MyJobData, job?: BackgroundJobType<MyJobData>) {
|
|
135
|
+
// Job implementation
|
|
136
|
+
console.log(`Processing ${data.action} for user ${data.userId}`);
|
|
137
|
+
|
|
138
|
+
// Optional: access job metadata
|
|
139
|
+
if (job) {
|
|
140
|
+
console.log(`Job ID: ${job._id.toString()}`);
|
|
141
|
+
console.log(`Attempt: ${job.attemptsCount}`);
|
|
142
|
+
}
|
|
143
|
+
}
|
|
144
|
+
}
|
|
145
|
+
```
|
|
146
|
+
|
|
147
|
+
</embedex>
|
|
148
|
+
|
|
149
|
+
#### Job handler options
|
|
150
|
+
|
|
151
|
+
- **`name`** (required): Unique identifier for the job type
|
|
152
|
+
- **`maxAttempts`** (optional): Maximum number of retry attempts before marking the job as failed. Default is 10. Uses exponential backoff: 2^attempt seconds between retries
|
|
153
|
+
- **`perform`** (required): Async function that executes the job logic
|
|
154
|
+
- `data`: The job payload passed when enqueueing
|
|
155
|
+
- `job`: Optional metadata about the job execution (id, attempts, timestamps, etc.)
|
|
156
|
+
|
|
157
|
+
### Registering jobs
|
|
158
|
+
|
|
159
|
+
Register job handlers with the `BackgroundJobs` instance and assign them to processing groups:
|
|
160
|
+
|
|
161
|
+
<embedex source="packages/mongo-jobs/examples/usage/registerJobs.ts">
|
|
162
|
+
|
|
163
|
+
```ts
|
|
164
|
+
import { BackgroundJobs } from "@clipboard-health/mongo-jobs";
|
|
165
|
+
|
|
166
|
+
import { CleanupJob } from "./jobs/cleanupJob";
|
|
167
|
+
import { EmailJob } from "./jobs/emailJob";
|
|
168
|
+
import { ReportJob } from "./jobs/reportJob";
|
|
169
|
+
import { SmsJob } from "./jobs/smsJob";
|
|
170
|
+
|
|
171
|
+
const backgroundJobs = new BackgroundJobs();
|
|
172
|
+
|
|
173
|
+
// Register jobs to groups
|
|
174
|
+
backgroundJobs.register(EmailJob, "notifications");
|
|
175
|
+
backgroundJobs.register(ReportJob, "reports");
|
|
176
|
+
backgroundJobs.register(CleanupJob, "maintenance");
|
|
177
|
+
|
|
178
|
+
// You can register multiple jobs to the same group
|
|
179
|
+
backgroundJobs.register(SmsJob, "notifications");
|
|
180
|
+
```
|
|
181
|
+
|
|
182
|
+
</embedex>
|
|
183
|
+
|
|
184
|
+
Groups allow you to:
|
|
185
|
+
|
|
186
|
+
- Organize related jobs together
|
|
187
|
+
- Run dedicated workers for specific job types
|
|
188
|
+
- Control concurrency per group
|
|
189
|
+
- Scale different job types independently
|
|
190
|
+
|
|
191
|
+
#### Jobs with dependencies
|
|
192
|
+
|
|
193
|
+
If your job requires dependencies (like services, database connections, etc.) passed through the constructor, you must register an instance instead of the class:
|
|
194
|
+
|
|
195
|
+
<embedex source="packages/mongo-jobs/examples/usage/registerJobsWithDependencies.ts">
|
|
196
|
+
|
|
197
|
+
```ts
|
|
198
|
+
import { BackgroundJobs } from "@clipboard-health/mongo-jobs";
|
|
199
|
+
|
|
200
|
+
import { EmailServiceJob } from "./jobs/emailServiceJob";
|
|
201
|
+
|
|
202
|
+
const backgroundJobs = new BackgroundJobs();
|
|
203
|
+
|
|
204
|
+
// For jobs with constructor dependencies, register an instance
|
|
205
|
+
const emailService = {
|
|
206
|
+
async send(to: string, subject: string, body: string) {
|
|
207
|
+
console.log(`Sending email to ${to}: ${subject}`);
|
|
208
|
+
},
|
|
209
|
+
};
|
|
210
|
+
|
|
211
|
+
backgroundJobs.register(new EmailServiceJob(emailService), "notifications");
|
|
212
|
+
```
|
|
213
|
+
|
|
214
|
+
</embedex>
|
|
215
|
+
|
|
216
|
+
Example job with dependencies:
|
|
217
|
+
|
|
218
|
+
<embedex source="packages/mongo-jobs/examples/usage/jobs/emailServiceJob.ts">
|
|
219
|
+
|
|
220
|
+
```ts
|
|
221
|
+
import type { HandlerInterface } from "@clipboard-health/mongo-jobs";
|
|
222
|
+
|
|
223
|
+
interface EmailService {
|
|
224
|
+
send(to: string, subject: string, body: string): Promise<void>;
|
|
225
|
+
}
|
|
226
|
+
|
|
227
|
+
export interface EmailServiceJobData {
|
|
228
|
+
to: string;
|
|
229
|
+
subject: string;
|
|
230
|
+
body: string;
|
|
231
|
+
}
|
|
232
|
+
|
|
233
|
+
export class EmailServiceJob implements HandlerInterface<EmailServiceJobData> {
|
|
234
|
+
public name = "EmailServiceJob";
|
|
235
|
+
public maxAttempts = 3;
|
|
236
|
+
|
|
237
|
+
constructor(private readonly emailService: EmailService) {}
|
|
238
|
+
|
|
239
|
+
async perform({ to, subject, body }: EmailServiceJobData) {
|
|
240
|
+
await this.emailService.send(to, subject, body);
|
|
241
|
+
}
|
|
242
|
+
}
|
|
243
|
+
```
|
|
244
|
+
|
|
245
|
+
</embedex>
|
|
246
|
+
|
|
247
|
+
**Important**: When registering job instances, the library will use the instance directly rather than instantiating the class. This means:
|
|
248
|
+
|
|
249
|
+
- The same instance is used for all job executions in this process
|
|
250
|
+
- Dependencies are shared across all executions
|
|
251
|
+
- Your job class should be stateless (all state should come from the `data` parameter)
|
|
252
|
+
|
|
253
|
+
**Note**: Even when registering an instance, you can still enqueue jobs using the class, instance, or handler name:
|
|
254
|
+
|
|
255
|
+
```ts
|
|
256
|
+
// All of these work, regardless of whether you registered a class or instance
|
|
257
|
+
await backgroundJobs.enqueue(EmailServiceJob, data); // By class
|
|
258
|
+
await backgroundJobs.enqueue(emailServiceJobInstance, data); // By instance
|
|
259
|
+
await backgroundJobs.enqueue("EmailServiceJob", data); // By name
|
|
260
|
+
```
|
|
261
|
+
|
|
262
|
+
The enqueued class/instance/name is only used to look up the registered handler. The **registered** instance is always used for execution, not the instance passed to `enqueue()`.
|
|
263
|
+
|
|
264
|
+
### Enqueuing jobs
|
|
265
|
+
|
|
266
|
+
Add jobs to the queue for processing:
|
|
267
|
+
|
|
268
|
+
<embedex source="packages/mongo-jobs/examples/usage/enqueueBasic.ts">
|
|
269
|
+
|
|
270
|
+
```ts
|
|
271
|
+
import { backgroundJobs } from "./jobsRegistry";
|
|
272
|
+
import { MyJob } from "./myJob";
|
|
273
|
+
|
|
274
|
+
// Basic enqueue
|
|
275
|
+
await backgroundJobs.enqueue(MyJob, {
|
|
276
|
+
userId: "123",
|
|
277
|
+
action: "process",
|
|
278
|
+
});
|
|
279
|
+
```
|
|
280
|
+
|
|
281
|
+
</embedex>
|
|
282
|
+
|
|
283
|
+
<embedex source="packages/mongo-jobs/examples/usage/enqueueWithOptions.ts">
|
|
284
|
+
|
|
285
|
+
```ts
|
|
286
|
+
import { backgroundJobs } from "./jobsRegistry";
|
|
287
|
+
import { MyJob } from "./myJob";
|
|
288
|
+
|
|
289
|
+
// Enqueue with options
|
|
290
|
+
await backgroundJobs.enqueue(
|
|
291
|
+
MyJob,
|
|
292
|
+
{ userId: "123", action: "process" },
|
|
293
|
+
{
|
|
294
|
+
// Schedule for later
|
|
295
|
+
startAt: new Date("2024-12-31T23:59:59Z"),
|
|
296
|
+
|
|
297
|
+
// Ensure uniqueness (see uniqueness section below)
|
|
298
|
+
unique: "user-123-process",
|
|
299
|
+
|
|
300
|
+
// Use within a MongoDB transaction
|
|
301
|
+
session: mongoSession,
|
|
302
|
+
},
|
|
303
|
+
);
|
|
304
|
+
```
|
|
305
|
+
|
|
306
|
+
</embedex>
|
|
307
|
+
|
|
308
|
+
<embedex source="packages/mongo-jobs/examples/usage/enqueueByName.ts">
|
|
309
|
+
|
|
310
|
+
```ts
|
|
311
|
+
import { backgroundJobs } from "./jobsRegistry";
|
|
312
|
+
|
|
313
|
+
// Enqueue by job name (when handler is already registered)
|
|
314
|
+
await backgroundJobs.enqueue("MyJob", { userId: "123", action: "process" });
|
|
315
|
+
```
|
|
316
|
+
|
|
317
|
+
</embedex>
|
|
318
|
+
|
|
319
|
+
#### Enqueue options
|
|
320
|
+
|
|
321
|
+
- **`startAt`**: Schedule the job to run at a specific time. Default is immediate
|
|
322
|
+
- **`unique`**: Ensure only one instance of the job exists (see Job uniqueness section)
|
|
323
|
+
- **`session`**: MongoDB session for transactional job creation
|
|
324
|
+
|
|
325
|
+
### Starting a worker
|
|
326
|
+
|
|
327
|
+
Start processing jobs from one or more groups:
|
|
328
|
+
|
|
329
|
+
<embedex source="packages/mongo-jobs/examples/usage/startWorkerBasic.ts">
|
|
330
|
+
|
|
331
|
+
```ts
|
|
332
|
+
import { backgroundJobs } from "./jobsRegistry";
|
|
333
|
+
|
|
334
|
+
// Start a worker for specific groups
|
|
335
|
+
await backgroundJobs.start(["notifications", "reports"], {
|
|
336
|
+
maxConcurrency: 20,
|
|
337
|
+
});
|
|
338
|
+
```
|
|
339
|
+
|
|
340
|
+
</embedex>
|
|
341
|
+
|
|
342
|
+
<embedex source="packages/mongo-jobs/examples/usage/startWorkerWithOptions.ts">
|
|
343
|
+
|
|
344
|
+
```ts
|
|
345
|
+
import { backgroundJobs } from "./jobsRegistry";
|
|
346
|
+
|
|
347
|
+
// Start with all available options
|
|
348
|
+
await backgroundJobs.start(["notifications"], {
|
|
349
|
+
// Maximum concurrent jobs (default: 10)
|
|
350
|
+
maxConcurrency: 10,
|
|
351
|
+
|
|
352
|
+
// Time to wait when no jobs available, in ms (default: 10000)
|
|
353
|
+
newJobCheckWaitMS: 5000,
|
|
354
|
+
|
|
355
|
+
// Use MongoDB change streams for instant job detection (default: true)
|
|
356
|
+
useChangeStream: true,
|
|
357
|
+
|
|
358
|
+
// Lock timeout for stuck jobs, in ms (default: 600000 = 10 minutes)
|
|
359
|
+
lockTimeoutMS: 300000,
|
|
360
|
+
|
|
361
|
+
// Interval to check for stuck jobs, in ms (default: 60000 = 1 minute)
|
|
362
|
+
unlockJobsIntervalMS: 30000,
|
|
363
|
+
|
|
364
|
+
// Interval to refresh queue list, in ms (default: 30000 = 30 seconds)
|
|
365
|
+
refreshQueuesIntervalMS: 60000,
|
|
366
|
+
|
|
367
|
+
// Exclude specific queues from processing
|
|
368
|
+
exclude: ["low-priority-queue"],
|
|
369
|
+
});
|
|
370
|
+
```
|
|
371
|
+
|
|
372
|
+
</embedex>
|
|
373
|
+
|
|
374
|
+
<embedex source="packages/mongo-jobs/examples/usage/stopWorker.ts">
|
|
375
|
+
|
|
376
|
+
```ts
|
|
377
|
+
import { backgroundJobs } from "./jobsRegistry";
|
|
378
|
+
|
|
379
|
+
// Graceful shutdown
|
|
380
|
+
await backgroundJobs.stop(30000); // Wait up to 30 seconds for jobs to complete
|
|
381
|
+
```
|
|
382
|
+
|
|
383
|
+
</embedex>
|
|
384
|
+
|
|
385
|
+
#### Worker options
|
|
386
|
+
|
|
387
|
+
- **`maxConcurrency`**: Number of jobs to process simultaneously
|
|
388
|
+
- **`useChangeStream`**: Enable instant job detection using MongoDB change streams. When `true`, workers are notified immediately when new jobs are added
|
|
389
|
+
- **`newJobCheckWaitMS`**: Fallback polling interval when no jobs are available
|
|
390
|
+
- **`lockTimeoutMS`**: Maximum time a job can be locked before being considered stuck
|
|
391
|
+
- **`unlockJobsIntervalMS`**: How often to check for and unlock stuck jobs
|
|
392
|
+
- **`refreshQueuesIntervalMS`**: How often to refresh the list of queues to consume
|
|
393
|
+
- **`exclude`**: Array of queue names to skip processing
|
|
394
|
+
|
|
395
|
+
### Cron jobs
|
|
396
|
+
|
|
397
|
+
Schedule recurring jobs using cron expressions:
|
|
398
|
+
|
|
399
|
+
<embedex source="packages/mongo-jobs/examples/usage/cronRegister.ts">
|
|
400
|
+
|
|
401
|
+
```ts
|
|
402
|
+
import { BackgroundJobs } from "@clipboard-health/mongo-jobs";
|
|
403
|
+
|
|
404
|
+
import { DailyReportJob } from "./jobs/dailyReportJob";
|
|
405
|
+
|
|
406
|
+
const backgroundJobs = new BackgroundJobs();
|
|
407
|
+
|
|
408
|
+
// Register a cron job
|
|
409
|
+
await backgroundJobs.registerCron(DailyReportJob, {
|
|
410
|
+
// Group assignment (same as regular registration)
|
|
411
|
+
group: "reports",
|
|
412
|
+
|
|
413
|
+
// Unique name for this schedule
|
|
414
|
+
scheduleName: "daily-report",
|
|
415
|
+
|
|
416
|
+
// Cron expression (standard 5-field format)
|
|
417
|
+
cronExpression: "0 9 * * *", // Every day at 9 AM
|
|
418
|
+
|
|
419
|
+
// Optional: timezone for cron evaluation (default: "utc")
|
|
420
|
+
timeZone: "America/New_York",
|
|
421
|
+
|
|
422
|
+
// Data to pass to each job execution
|
|
423
|
+
data: { reportType: "daily" },
|
|
424
|
+
});
|
|
425
|
+
```
|
|
426
|
+
|
|
427
|
+
</embedex>
|
|
428
|
+
|
|
429
|
+
<embedex source="packages/mongo-jobs/examples/usage/cronRemove.ts">
|
|
430
|
+
|
|
431
|
+
```ts
|
|
432
|
+
import { backgroundJobs } from "./jobsRegistry";
|
|
433
|
+
|
|
434
|
+
// Remove a cron schedule and its pending jobs
|
|
435
|
+
await backgroundJobs.removeCron("daily-report");
|
|
436
|
+
```
|
|
437
|
+
|
|
438
|
+
</embedex>
|
|
439
|
+
|
|
440
|
+
#### Cron scheduling details
|
|
441
|
+
|
|
442
|
+
- Uses standard 5-field cron expressions: `minute hour day month weekday`
|
|
443
|
+
- Automatically enqueues the next job after the current one completes
|
|
444
|
+
- Updates to cron schedules automatically cancel pending jobs and reschedule
|
|
445
|
+
- Failed cron jobs are retried according to `maxAttempts`, but the next scheduled job will still be enqueued
|
|
446
|
+
- Each scheduled execution is a unique job instance
|
|
447
|
+
|
|
448
|
+
#### Removing cron schedules
|
|
449
|
+
|
|
450
|
+
**Important**: When you register a cron schedule, it is persisted in the database. Even if you remove the schedule registration from your code, it will continue executing. To stop a cron schedule, you must explicitly remove it using the `removeCron` API:
|
|
451
|
+
|
|
452
|
+
```ts
|
|
453
|
+
await backgroundJobs.removeCron("daily-report");
|
|
454
|
+
```
|
|
455
|
+
|
|
456
|
+
This will:
|
|
457
|
+
|
|
458
|
+
- Delete the schedule from the database
|
|
459
|
+
- Cancel all pending jobs that were created by this schedule
|
|
460
|
+
- Prevent future jobs from being scheduled
|
|
461
|
+
|
|
462
|
+
### Job uniqueness
|
|
463
|
+
|
|
464
|
+
Prevent duplicate jobs from being enqueued or running simultaneously:
|
|
465
|
+
|
|
466
|
+
<embedex source="packages/mongo-jobs/examples/usage/uniqueSimple.ts">
|
|
467
|
+
|
|
468
|
+
```ts
|
|
469
|
+
import { backgroundJobs } from "./jobsRegistry";
|
|
470
|
+
import { ProcessUserJob } from "./jobs/processUserJob";
|
|
471
|
+
|
|
472
|
+
// Simple uniqueness - single unique key for both enqueued and running
|
|
473
|
+
await backgroundJobs.enqueue(
|
|
474
|
+
ProcessUserJob,
|
|
475
|
+
{ userId: "123" },
|
|
476
|
+
{
|
|
477
|
+
unique: "process-user-123",
|
|
478
|
+
},
|
|
479
|
+
);
|
|
480
|
+
```
|
|
481
|
+
|
|
482
|
+
</embedex>
|
|
483
|
+
|
|
484
|
+
#### Advanced uniqueness
|
|
485
|
+
|
|
486
|
+
It's possible to have separate enqueued and running key. When the job is enqueued, the library will
|
|
487
|
+
ensure that we can't enqueue another one but once it starts running it switches to its running key so
|
|
488
|
+
we can enqueue another one that will wait to be executed until the first one finishes.
|
|
489
|
+
|
|
490
|
+
An example where this can be useful is recalculating some kind of a cache. We don't want to enqueue more
|
|
491
|
+
than one non-running job to not explode number of enqueued jobs. But once it starts running and there is another
|
|
492
|
+
trigger that may warrant cache recalculation we want to schedule another one to do another recalculation even
|
|
493
|
+
if there is one running, cause we don't know if the current recalculation will include the newest change.
|
|
494
|
+
|
|
495
|
+
<embedex source="packages/mongo-jobs/examples/usage/uniqueAdvanced.ts">
|
|
496
|
+
|
|
497
|
+
```ts
|
|
498
|
+
import { backgroundJobs } from "./jobsRegistry";
|
|
499
|
+
import { ProcessUserJob } from "./jobs/processUserJob";
|
|
500
|
+
|
|
501
|
+
// Advanced uniqueness - separate keys for enqueued vs running states
|
|
502
|
+
await backgroundJobs.enqueue(
|
|
503
|
+
ProcessUserJob,
|
|
504
|
+
{ userId: "123" },
|
|
505
|
+
{
|
|
506
|
+
unique: {
|
|
507
|
+
// Only one enqueued job per user
|
|
508
|
+
enqueuedKey: "process-user-123",
|
|
509
|
+
|
|
510
|
+
// Only one running job per user
|
|
511
|
+
runningKey: "process-user-123-running",
|
|
512
|
+
},
|
|
513
|
+
},
|
|
514
|
+
);
|
|
515
|
+
```
|
|
516
|
+
|
|
517
|
+
</embedex>
|
|
518
|
+
|
|
519
|
+
<embedex source="packages/mongo-jobs/examples/usage/uniqueMultipleEnqueued.ts">
|
|
520
|
+
|
|
521
|
+
```ts
|
|
522
|
+
import { backgroundJobs } from "./jobsRegistry";
|
|
523
|
+
import { SendEmailJob } from "./jobs/sendEmailJob";
|
|
524
|
+
|
|
525
|
+
// Example: Allow multiple enqueued but only one running
|
|
526
|
+
await backgroundJobs.enqueue(
|
|
527
|
+
SendEmailJob,
|
|
528
|
+
{ userId: "123", emailType: "welcome" },
|
|
529
|
+
{
|
|
530
|
+
unique: {
|
|
531
|
+
enqueuedKey: undefined, // Allow multiple enqueued emails
|
|
532
|
+
runningKey: "send-email-123", // But only one sending at a time
|
|
533
|
+
},
|
|
534
|
+
},
|
|
535
|
+
);
|
|
536
|
+
```
|
|
537
|
+
|
|
538
|
+
</embedex>
|
|
539
|
+
|
|
540
|
+
#### Uniqueness behavior
|
|
541
|
+
|
|
542
|
+
- **Enqueued uniqueness**: Prevents duplicate jobs from being added to the queue. If a job with the same `enqueuedKey` already exists and hasn't started, the new enqueue returns `undefined`
|
|
543
|
+
- **Running uniqueness**: When a job starts, its unique key transitions from `enqueuedKey` to `runningKey`. This prevents multiple instances from running simultaneously
|
|
544
|
+
- If a duplicate unique key is detected, the operation silently fails and returns `undefined`
|
|
545
|
+
- Uniqueness is enforced via MongoDB unique index on the `uniqueKey` field
|
|
546
|
+
- Cron jobs automatically use unique keys based on schedule name and timestamp
|
|
547
|
+
|
|
548
|
+
## Observability
|
|
549
|
+
|
|
550
|
+
### Metrics
|
|
551
|
+
|
|
552
|
+
The library automatically reports metrics using StatsD by default. Metrics are reported every 60 seconds for each queue and include:
|
|
553
|
+
|
|
554
|
+
- **`background_jobs.queue.scheduled`** - Number of jobs scheduled for future execution
|
|
555
|
+
- **`background_jobs.queue.pending`** - Number of jobs ready to be processed
|
|
556
|
+
- **`background_jobs.queue.created`** - Total jobs (scheduled + pending)
|
|
557
|
+
- **`background_jobs.queue.failed`** - Number of jobs that exhausted all retry attempts
|
|
558
|
+
- **`background_jobs.queue.retry`** - Counter incremented when a job is retried
|
|
559
|
+
- **`background_jobs.queue.expired`** - Counter incremented when a job lock expires (stuck jobs)
|
|
560
|
+
- **`background_jobs.queue.delay`** - Timing metric for execution delay (time between `nextRunAt` and actual execution)
|
|
561
|
+
|
|
562
|
+
All metrics are tagged with `queue` to identify which queue the metric belongs to.
|
|
563
|
+
|
|
564
|
+
#### Custom metrics reporter
|
|
565
|
+
|
|
566
|
+
You can provide a custom metrics reporter by implementing the `MetricsReporter` interface:
|
|
567
|
+
|
|
568
|
+
```ts
|
|
569
|
+
import { BackgroundJobs, type MetricsReporter } from "@clipboard-health/mongo-jobs";
|
|
570
|
+
|
|
571
|
+
class CustomMetricsReporter implements MetricsReporter {
|
|
572
|
+
gauge(name: string, value: number, tags: Record<string, string>): void {
|
|
573
|
+
// Report gauge metric
|
|
574
|
+
console.log(`Gauge: ${name} = ${value}`, tags);
|
|
575
|
+
}
|
|
576
|
+
|
|
577
|
+
increment(name: string, tags: Record<string, string>): void {
|
|
578
|
+
// Report counter increment
|
|
579
|
+
console.log(`Increment: ${name}`, tags);
|
|
580
|
+
}
|
|
581
|
+
|
|
582
|
+
timing(name: string, value: number | Date, tags: Record<string, string>): void {
|
|
583
|
+
// Report timing metric
|
|
584
|
+
console.log(`Timing: ${name} = ${value}`, tags);
|
|
585
|
+
}
|
|
586
|
+
}
|
|
587
|
+
|
|
588
|
+
const backgroundJobs = new BackgroundJobs({
|
|
589
|
+
metricsReporter: new CustomMetricsReporter(),
|
|
590
|
+
});
|
|
591
|
+
```
|
|
592
|
+
|
|
593
|
+
#### StatsD configuration
|
|
594
|
+
|
|
595
|
+
The default metrics reporter uses the `hot-shots` StatsD client. You can configure it by passing options:
|
|
596
|
+
|
|
597
|
+
```ts
|
|
598
|
+
import { BackgroundJobs, defaultMetricsReporter } from "@clipboard-health/mongo-jobs";
|
|
599
|
+
|
|
600
|
+
const backgroundJobs = new BackgroundJobs({
|
|
601
|
+
metricsReporter: defaultMetricsReporter({
|
|
602
|
+
host: "localhost",
|
|
603
|
+
port: 8125,
|
|
604
|
+
globalTags: { env: "production" },
|
|
605
|
+
}),
|
|
606
|
+
});
|
|
607
|
+
```
|
|
608
|
+
|
|
609
|
+
### OpenTelemetry tracing
|
|
610
|
+
|
|
611
|
+
The library provides built-in OpenTelemetry distributed tracing support. Traces are automatically created for job enqueueing (producer) and execution (consumer), allowing you to track jobs across your distributed system.
|
|
612
|
+
|
|
613
|
+
#### Trace spans
|
|
614
|
+
|
|
615
|
+
Three types of spans are created:
|
|
616
|
+
|
|
617
|
+
1. **Producer spans** (`background-jobs.producer`) - Created when a job is enqueued
|
|
618
|
+
- Kind: `PRODUCER`
|
|
619
|
+
- Attributes include: messaging system, operation, destination (handler name), queue name
|
|
620
|
+
|
|
621
|
+
2. **Consumer spans** (`background-jobs.consumer`) - Created when a job is executed
|
|
622
|
+
- Kind: `CONSUMER`
|
|
623
|
+
- Linked to the producer span for distributed tracing
|
|
624
|
+
- Attributes include: message ID, handler name, queue, attempt count, timestamps
|
|
625
|
+
|
|
626
|
+
3. **Internal spans** (`background-jobs.internals`) - Created for internal operations
|
|
627
|
+
- Kind: `INTERNAL`
|
|
628
|
+
- Used for operations like fetching jobs, reporting metrics, etc.
|
|
629
|
+
|
|
630
|
+
#### Setting up OpenTelemetry
|
|
631
|
+
|
|
632
|
+
To enable tracing, configure the OpenTelemetry SDK in your application:
|
|
633
|
+
|
|
634
|
+
```ts
|
|
635
|
+
import { NodeSDK } from "@opentelemetry/sdk-node";
|
|
636
|
+
import { getNodeAutoInstrumentations } from "@opentelemetry/auto-instrumentations-node";
|
|
637
|
+
import { OTLPTraceExporter } from "@opentelemetry/exporter-trace-otlp-http";
|
|
638
|
+
|
|
639
|
+
const sdk = new NodeSDK({
|
|
640
|
+
traceExporter: new OTLPTraceExporter({
|
|
641
|
+
url: "http://localhost:4318/v1/traces",
|
|
642
|
+
}),
|
|
643
|
+
instrumentations: [getNodeAutoInstrumentations()],
|
|
644
|
+
});
|
|
645
|
+
|
|
646
|
+
sdk.start();
|
|
647
|
+
```
|
|
648
|
+
|
|
649
|
+
#### Distributed tracing
|
|
650
|
+
|
|
651
|
+
When a job is enqueued, trace context is automatically injected into the job data via the `_traceHeaders` field. When the job is executed, this context is extracted to link the consumer span to the producer span, enabling end-to-end trace visibility.
|
|
652
|
+
|
|
653
|
+
```text
|
|
654
|
+
HTTP Request → Enqueue Job (Producer Span)
|
|
655
|
+
↓
|
|
656
|
+
[Job in Queue]
|
|
657
|
+
↓
|
|
658
|
+
Execute Job (Consumer Span) → Your Handler
|
|
659
|
+
```
|
|
660
|
+
|
|
110
661
|
## License
|
|
111
662
|
|
|
112
663
|
MIT
|
package/package.json
CHANGED