@gravito/stream 1.0.1 → 1.0.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +50 -18
- package/dist/index.d.cts +71 -83
- package/dist/index.d.ts +71 -83
- package/dist/index.js +50 -18
- package/package.json +3 -3
package/dist/index.cjs
CHANGED
|
@@ -2198,6 +2198,7 @@ var OrbitStream = class _OrbitStream {
|
|
|
2198
2198
|
*/
|
|
2199
2199
|
install(core) {
|
|
2200
2200
|
this.queueManager = new QueueManager(this.options);
|
|
2201
|
+
core.container.instance("queue", this.queueManager);
|
|
2201
2202
|
core.adapter.use("*", async (c, next) => {
|
|
2202
2203
|
if (this.queueManager && this.options.connections) {
|
|
2203
2204
|
for (const [name, config] of Object.entries(this.options.connections)) {
|
|
@@ -2220,8 +2221,7 @@ var OrbitStream = class _OrbitStream {
|
|
|
2220
2221
|
}
|
|
2221
2222
|
}
|
|
2222
2223
|
c.set("queue", this.queueManager);
|
|
2223
|
-
await next();
|
|
2224
|
-
return void 0;
|
|
2224
|
+
return await next();
|
|
2225
2225
|
});
|
|
2226
2226
|
core.logger.info("[OrbitStream] Installed");
|
|
2227
2227
|
if (this.options.autoStartWorker && process.env.NODE_ENV === "development" && this.options.workerOptions) {
|
|
@@ -2372,9 +2372,15 @@ var MySQLPersistence = class {
|
|
|
2372
2372
|
*/
|
|
2373
2373
|
async listLogs(options = {}) {
|
|
2374
2374
|
let query = this.db.table(this.logsTable);
|
|
2375
|
-
if (options.level)
|
|
2376
|
-
|
|
2377
|
-
|
|
2375
|
+
if (options.level) {
|
|
2376
|
+
query = query.where("level", options.level);
|
|
2377
|
+
}
|
|
2378
|
+
if (options.workerId) {
|
|
2379
|
+
query = query.where("worker_id", options.workerId);
|
|
2380
|
+
}
|
|
2381
|
+
if (options.queue) {
|
|
2382
|
+
query = query.where("queue", options.queue);
|
|
2383
|
+
}
|
|
2378
2384
|
if (options.search) {
|
|
2379
2385
|
query = query.where("message", "like", `%${options.search}%`);
|
|
2380
2386
|
}
|
|
@@ -2391,9 +2397,15 @@ var MySQLPersistence = class {
|
|
|
2391
2397
|
*/
|
|
2392
2398
|
async countLogs(options = {}) {
|
|
2393
2399
|
let query = this.db.table(this.logsTable);
|
|
2394
|
-
if (options.level)
|
|
2395
|
-
|
|
2396
|
-
|
|
2400
|
+
if (options.level) {
|
|
2401
|
+
query = query.where("level", options.level);
|
|
2402
|
+
}
|
|
2403
|
+
if (options.workerId) {
|
|
2404
|
+
query = query.where("worker_id", options.workerId);
|
|
2405
|
+
}
|
|
2406
|
+
if (options.queue) {
|
|
2407
|
+
query = query.where("queue", options.queue);
|
|
2408
|
+
}
|
|
2397
2409
|
if (options.search) {
|
|
2398
2410
|
query = query.where("message", "like", `%${options.search}%`);
|
|
2399
2411
|
}
|
|
@@ -2446,7 +2458,9 @@ var MySQLPersistence = class {
|
|
|
2446
2458
|
}
|
|
2447
2459
|
async setupJobsTable() {
|
|
2448
2460
|
const exists = await import_atlas.Schema.hasTable(this.table);
|
|
2449
|
-
if (exists)
|
|
2461
|
+
if (exists) {
|
|
2462
|
+
return;
|
|
2463
|
+
}
|
|
2450
2464
|
await import_atlas.Schema.create(this.table, (table) => {
|
|
2451
2465
|
table.id();
|
|
2452
2466
|
table.string("job_id", 64);
|
|
@@ -2465,7 +2479,9 @@ var MySQLPersistence = class {
|
|
|
2465
2479
|
}
|
|
2466
2480
|
async setupLogsTable() {
|
|
2467
2481
|
const exists = await import_atlas.Schema.hasTable(this.logsTable);
|
|
2468
|
-
if (exists)
|
|
2482
|
+
if (exists) {
|
|
2483
|
+
return;
|
|
2484
|
+
}
|
|
2469
2485
|
await import_atlas.Schema.create(this.logsTable, (table) => {
|
|
2470
2486
|
table.id();
|
|
2471
2487
|
table.string("level", 20);
|
|
@@ -2595,9 +2611,15 @@ var SQLitePersistence = class {
|
|
|
2595
2611
|
*/
|
|
2596
2612
|
async listLogs(options = {}) {
|
|
2597
2613
|
let query = this.db.table(this.logsTable);
|
|
2598
|
-
if (options.level)
|
|
2599
|
-
|
|
2600
|
-
|
|
2614
|
+
if (options.level) {
|
|
2615
|
+
query = query.where("level", options.level);
|
|
2616
|
+
}
|
|
2617
|
+
if (options.workerId) {
|
|
2618
|
+
query = query.where("worker_id", options.workerId);
|
|
2619
|
+
}
|
|
2620
|
+
if (options.queue) {
|
|
2621
|
+
query = query.where("queue", options.queue);
|
|
2622
|
+
}
|
|
2601
2623
|
if (options.search) {
|
|
2602
2624
|
query = query.where("message", "like", `%${options.search}%`);
|
|
2603
2625
|
}
|
|
@@ -2614,9 +2636,15 @@ var SQLitePersistence = class {
|
|
|
2614
2636
|
*/
|
|
2615
2637
|
async countLogs(options = {}) {
|
|
2616
2638
|
let query = this.db.table(this.logsTable);
|
|
2617
|
-
if (options.level)
|
|
2618
|
-
|
|
2619
|
-
|
|
2639
|
+
if (options.level) {
|
|
2640
|
+
query = query.where("level", options.level);
|
|
2641
|
+
}
|
|
2642
|
+
if (options.workerId) {
|
|
2643
|
+
query = query.where("worker_id", options.workerId);
|
|
2644
|
+
}
|
|
2645
|
+
if (options.queue) {
|
|
2646
|
+
query = query.where("queue", options.queue);
|
|
2647
|
+
}
|
|
2620
2648
|
if (options.search) {
|
|
2621
2649
|
query = query.where("message", "like", `%${options.search}%`);
|
|
2622
2650
|
}
|
|
@@ -2669,7 +2697,9 @@ var SQLitePersistence = class {
|
|
|
2669
2697
|
}
|
|
2670
2698
|
async setupJobsTable() {
|
|
2671
2699
|
const exists = await import_atlas2.Schema.hasTable(this.table);
|
|
2672
|
-
if (exists)
|
|
2700
|
+
if (exists) {
|
|
2701
|
+
return;
|
|
2702
|
+
}
|
|
2673
2703
|
await import_atlas2.Schema.create(this.table, (table) => {
|
|
2674
2704
|
table.id();
|
|
2675
2705
|
table.string("job_id", 64);
|
|
@@ -2686,7 +2716,9 @@ var SQLitePersistence = class {
|
|
|
2686
2716
|
}
|
|
2687
2717
|
async setupLogsTable() {
|
|
2688
2718
|
const exists = await import_atlas2.Schema.hasTable(this.logsTable);
|
|
2689
|
-
if (exists)
|
|
2719
|
+
if (exists) {
|
|
2720
|
+
return;
|
|
2721
|
+
}
|
|
2690
2722
|
await import_atlas2.Schema.create(this.logsTable, (table) => {
|
|
2691
2723
|
table.id();
|
|
2692
2724
|
table.string("level", 20);
|
package/dist/index.d.cts
CHANGED
|
@@ -1,94 +1,59 @@
|
|
|
1
1
|
import { GravitoOrbit, PlanetCore } from '@gravito/core';
|
|
2
2
|
|
|
3
3
|
/**
|
|
4
|
-
*
|
|
4
|
+
* Represents a job that has been serialized for storage in a queue.
|
|
5
|
+
* @public
|
|
5
6
|
*/
|
|
6
7
|
interface SerializedJob {
|
|
7
|
-
/**
|
|
8
|
-
* Unique job identifier.
|
|
9
|
-
*/
|
|
8
|
+
/** Unique job identifier */
|
|
10
9
|
id: string;
|
|
11
|
-
/**
|
|
12
|
-
* Serializer type: `'json'` or `'class'`.
|
|
13
|
-
*/
|
|
10
|
+
/** Serializer type: 'json' for plain objects or 'class' for instances */
|
|
14
11
|
type: 'json' | 'class';
|
|
15
|
-
/**
|
|
16
|
-
* Serialized data.
|
|
17
|
-
*/
|
|
12
|
+
/** Serialized data string */
|
|
18
13
|
data: string;
|
|
19
|
-
/**
|
|
20
|
-
* Class name (only for `type === 'class'`).
|
|
21
|
-
*/
|
|
14
|
+
/** Fully qualified class name (only used for 'class' type) */
|
|
22
15
|
className?: string;
|
|
23
|
-
/**
|
|
24
|
-
* Created timestamp.
|
|
25
|
-
*/
|
|
16
|
+
/** Timestamp when the job was created */
|
|
26
17
|
createdAt: number;
|
|
27
|
-
/**
|
|
28
|
-
* Delay before execution (seconds).
|
|
29
|
-
*/
|
|
18
|
+
/** Optional delay in seconds before the job becomes available for processing */
|
|
30
19
|
delaySeconds?: number;
|
|
31
|
-
/**
|
|
32
|
-
* Current attempt number.
|
|
33
|
-
*/
|
|
20
|
+
/** Number of times the job has been attempted */
|
|
34
21
|
attempts?: number;
|
|
35
|
-
/**
|
|
36
|
-
* Maximum attempts.
|
|
37
|
-
*/
|
|
22
|
+
/** Maximum number of retry attempts before the job is marked as failed */
|
|
38
23
|
maxAttempts?: number;
|
|
39
|
-
/**
|
|
40
|
-
* Group ID for FIFO ordering.
|
|
41
|
-
*/
|
|
24
|
+
/** Group ID for FIFO (strictly sequential) processing */
|
|
42
25
|
groupId?: string;
|
|
43
|
-
/**
|
|
44
|
-
* Initial retry delay (seconds).
|
|
45
|
-
*/
|
|
26
|
+
/** Initial delay in seconds before first retry attempt */
|
|
46
27
|
retryAfterSeconds?: number;
|
|
47
|
-
/**
|
|
48
|
-
* Retry delay multiplier.
|
|
49
|
-
*/
|
|
28
|
+
/** Multiplier for exponential backoff on retries */
|
|
50
29
|
retryMultiplier?: number;
|
|
51
|
-
/**
|
|
52
|
-
* Last error message.
|
|
53
|
-
*/
|
|
30
|
+
/** Last error message if the job failed */
|
|
54
31
|
error?: string;
|
|
55
|
-
/**
|
|
56
|
-
* Timestamp when the job failed permanently.
|
|
57
|
-
*/
|
|
32
|
+
/** Timestamp when the job finally failed after max attempts */
|
|
58
33
|
failedAt?: number;
|
|
59
|
-
/**
|
|
60
|
-
* Job priority.
|
|
61
|
-
*/
|
|
34
|
+
/** Optional priority for the job (string or numeric) */
|
|
62
35
|
priority?: string | number;
|
|
63
36
|
}
|
|
64
37
|
/**
|
|
65
|
-
*
|
|
38
|
+
* Advanced topic options for distributed queues (e.g., Kafka).
|
|
39
|
+
* @public
|
|
66
40
|
*/
|
|
67
41
|
interface TopicOptions {
|
|
68
|
-
/**
|
|
69
|
-
* Number of partitions.
|
|
70
|
-
*/
|
|
42
|
+
/** Number of partitions for the topic */
|
|
71
43
|
partitions?: number;
|
|
72
|
-
/**
|
|
73
|
-
* Replication factor.
|
|
74
|
-
*/
|
|
44
|
+
/** Number of replicas for each partition */
|
|
75
45
|
replicationFactor?: number;
|
|
76
|
-
/**
|
|
77
|
-
* Additional config.
|
|
78
|
-
*/
|
|
46
|
+
/** Additional driver-specific configurations */
|
|
79
47
|
config?: Record<string, string>;
|
|
80
48
|
}
|
|
81
49
|
/**
|
|
82
|
-
*
|
|
50
|
+
* Configuration for a specific queue connection.
|
|
51
|
+
* @public
|
|
83
52
|
*/
|
|
84
53
|
interface QueueConnectionConfig {
|
|
85
|
-
/**
|
|
86
|
-
* Driver type.
|
|
87
|
-
*/
|
|
54
|
+
/** The driver type to use for this connection */
|
|
88
55
|
driver: 'memory' | 'database' | 'redis' | 'kafka' | 'sqs' | 'rabbitmq' | 'nats';
|
|
89
|
-
/**
|
|
90
|
-
* Driver-specific config.
|
|
91
|
-
*/
|
|
56
|
+
/** Driver-specific settings (e.g., connection string, table name) */
|
|
92
57
|
[key: string]: unknown;
|
|
93
58
|
}
|
|
94
59
|
/**
|
|
@@ -1434,41 +1399,36 @@ declare class SQSDriver implements QueueDriver {
|
|
|
1434
1399
|
}
|
|
1435
1400
|
|
|
1436
1401
|
/**
|
|
1437
|
-
*
|
|
1402
|
+
* Options for configuring OrbitStream (Queue Orbit).
|
|
1403
|
+
* @public
|
|
1438
1404
|
*/
|
|
1439
1405
|
interface OrbitStreamOptions extends QueueConfig {
|
|
1440
1406
|
/**
|
|
1441
|
-
* Whether to
|
|
1407
|
+
* Whether to automatically start an embedded worker in development mode.
|
|
1408
|
+
* Useful for simple local testing without running a separate worker process.
|
|
1442
1409
|
*/
|
|
1443
1410
|
autoStartWorker?: boolean;
|
|
1444
1411
|
/**
|
|
1445
|
-
*
|
|
1412
|
+
* Configuration for the embedded worker/consumer.
|
|
1446
1413
|
*/
|
|
1447
1414
|
workerOptions?: ConsumerOptions;
|
|
1448
1415
|
}
|
|
1449
1416
|
/**
|
|
1450
|
-
*
|
|
1451
|
-
*
|
|
1452
|
-
*
|
|
1453
|
-
* Integrates with PlanetCore and injects a `queue` service into the Photon Context.
|
|
1417
|
+
* OrbitStream provides a powerful, multi-driver queue system for Gravito.
|
|
1418
|
+
* It integrates with various backends (Redis, Database, SQS, RabbitMQ)
|
|
1419
|
+
* and supports job serialization, delayed jobs, and FIFO processing.
|
|
1454
1420
|
*
|
|
1455
1421
|
* @example
|
|
1456
1422
|
* ```typescript
|
|
1457
|
-
* const
|
|
1458
|
-
*
|
|
1459
|
-
*
|
|
1460
|
-
*
|
|
1461
|
-
*
|
|
1462
|
-
*
|
|
1463
|
-
*
|
|
1464
|
-
* })
|
|
1465
|
-
* ]
|
|
1466
|
-
* })
|
|
1467
|
-
*
|
|
1468
|
-
* // Use in a controller/handler
|
|
1469
|
-
* const queue = c.get('queue')
|
|
1470
|
-
* await queue.push(new SendEmail('user@example.com'))
|
|
1423
|
+
* const stream = new OrbitStream({
|
|
1424
|
+
* default: 'redis',
|
|
1425
|
+
* connections: {
|
|
1426
|
+
* redis: { driver: 'redis', host: 'localhost' }
|
|
1427
|
+
* }
|
|
1428
|
+
* });
|
|
1429
|
+
* core.addOrbit(stream);
|
|
1471
1430
|
* ```
|
|
1431
|
+
* @public
|
|
1472
1432
|
*/
|
|
1473
1433
|
declare class OrbitStream implements GravitoOrbit {
|
|
1474
1434
|
private options;
|
|
@@ -1695,19 +1655,47 @@ declare class SQLitePersistence implements PersistenceAdapter {
|
|
|
1695
1655
|
private setupLogsTable;
|
|
1696
1656
|
}
|
|
1697
1657
|
|
|
1658
|
+
/**
|
|
1659
|
+
* Configuration for a recurring scheduled job.
|
|
1660
|
+
*
|
|
1661
|
+
* @public
|
|
1662
|
+
* @since 3.0.0
|
|
1663
|
+
*/
|
|
1698
1664
|
interface ScheduledJobConfig {
|
|
1665
|
+
/** Unique identifier for the scheduled task. */
|
|
1699
1666
|
id: string;
|
|
1667
|
+
/** Cron expression defining the schedule (e.g., '* * * * *'). */
|
|
1700
1668
|
cron: string;
|
|
1669
|
+
/** The target queue name where the job should be pushed. */
|
|
1701
1670
|
queue: string;
|
|
1671
|
+
/** The serialized job data. */
|
|
1702
1672
|
job: SerializedJob;
|
|
1673
|
+
/** Timestamp of the last successful execution in milliseconds. */
|
|
1703
1674
|
lastRun?: number;
|
|
1675
|
+
/** Timestamp of the next scheduled execution in milliseconds. */
|
|
1704
1676
|
nextRun?: number;
|
|
1677
|
+
/** Whether the scheduled job is active. */
|
|
1705
1678
|
enabled: boolean;
|
|
1706
1679
|
}
|
|
1707
1680
|
/**
|
|
1708
|
-
* Scheduler
|
|
1681
|
+
* Scheduler manages recurring (cron) jobs in Gravito.
|
|
1682
|
+
*
|
|
1683
|
+
* It uses Redis to store schedule metadata and coordinates distributed
|
|
1684
|
+
* execution using locks to ensure jobs are triggered exactly once per interval.
|
|
1685
|
+
*
|
|
1686
|
+
* @example
|
|
1687
|
+
* ```typescript
|
|
1688
|
+
* const scheduler = new Scheduler(queueManager);
|
|
1689
|
+
* await scheduler.register({
|
|
1690
|
+
* id: 'daily-cleanup',
|
|
1691
|
+
* cron: '0 0 * * *',
|
|
1692
|
+
* queue: 'default',
|
|
1693
|
+
* job: myJob.serialize()
|
|
1694
|
+
* });
|
|
1695
|
+
* ```
|
|
1709
1696
|
*
|
|
1710
|
-
*
|
|
1697
|
+
* @public
|
|
1698
|
+
* @since 3.0.0
|
|
1711
1699
|
*/
|
|
1712
1700
|
declare class Scheduler {
|
|
1713
1701
|
private manager;
|
package/dist/index.d.ts
CHANGED
|
@@ -1,94 +1,59 @@
|
|
|
1
1
|
import { GravitoOrbit, PlanetCore } from '@gravito/core';
|
|
2
2
|
|
|
3
3
|
/**
|
|
4
|
-
*
|
|
4
|
+
* Represents a job that has been serialized for storage in a queue.
|
|
5
|
+
* @public
|
|
5
6
|
*/
|
|
6
7
|
interface SerializedJob {
|
|
7
|
-
/**
|
|
8
|
-
* Unique job identifier.
|
|
9
|
-
*/
|
|
8
|
+
/** Unique job identifier */
|
|
10
9
|
id: string;
|
|
11
|
-
/**
|
|
12
|
-
* Serializer type: `'json'` or `'class'`.
|
|
13
|
-
*/
|
|
10
|
+
/** Serializer type: 'json' for plain objects or 'class' for instances */
|
|
14
11
|
type: 'json' | 'class';
|
|
15
|
-
/**
|
|
16
|
-
* Serialized data.
|
|
17
|
-
*/
|
|
12
|
+
/** Serialized data string */
|
|
18
13
|
data: string;
|
|
19
|
-
/**
|
|
20
|
-
* Class name (only for `type === 'class'`).
|
|
21
|
-
*/
|
|
14
|
+
/** Fully qualified class name (only used for 'class' type) */
|
|
22
15
|
className?: string;
|
|
23
|
-
/**
|
|
24
|
-
* Created timestamp.
|
|
25
|
-
*/
|
|
16
|
+
/** Timestamp when the job was created */
|
|
26
17
|
createdAt: number;
|
|
27
|
-
/**
|
|
28
|
-
* Delay before execution (seconds).
|
|
29
|
-
*/
|
|
18
|
+
/** Optional delay in seconds before the job becomes available for processing */
|
|
30
19
|
delaySeconds?: number;
|
|
31
|
-
/**
|
|
32
|
-
* Current attempt number.
|
|
33
|
-
*/
|
|
20
|
+
/** Number of times the job has been attempted */
|
|
34
21
|
attempts?: number;
|
|
35
|
-
/**
|
|
36
|
-
* Maximum attempts.
|
|
37
|
-
*/
|
|
22
|
+
/** Maximum number of retry attempts before the job is marked as failed */
|
|
38
23
|
maxAttempts?: number;
|
|
39
|
-
/**
|
|
40
|
-
* Group ID for FIFO ordering.
|
|
41
|
-
*/
|
|
24
|
+
/** Group ID for FIFO (strictly sequential) processing */
|
|
42
25
|
groupId?: string;
|
|
43
|
-
/**
|
|
44
|
-
* Initial retry delay (seconds).
|
|
45
|
-
*/
|
|
26
|
+
/** Initial delay in seconds before first retry attempt */
|
|
46
27
|
retryAfterSeconds?: number;
|
|
47
|
-
/**
|
|
48
|
-
* Retry delay multiplier.
|
|
49
|
-
*/
|
|
28
|
+
/** Multiplier for exponential backoff on retries */
|
|
50
29
|
retryMultiplier?: number;
|
|
51
|
-
/**
|
|
52
|
-
* Last error message.
|
|
53
|
-
*/
|
|
30
|
+
/** Last error message if the job failed */
|
|
54
31
|
error?: string;
|
|
55
|
-
/**
|
|
56
|
-
* Timestamp when the job failed permanently.
|
|
57
|
-
*/
|
|
32
|
+
/** Timestamp when the job finally failed after max attempts */
|
|
58
33
|
failedAt?: number;
|
|
59
|
-
/**
|
|
60
|
-
* Job priority.
|
|
61
|
-
*/
|
|
34
|
+
/** Optional priority for the job (string or numeric) */
|
|
62
35
|
priority?: string | number;
|
|
63
36
|
}
|
|
64
37
|
/**
|
|
65
|
-
*
|
|
38
|
+
* Advanced topic options for distributed queues (e.g., Kafka).
|
|
39
|
+
* @public
|
|
66
40
|
*/
|
|
67
41
|
interface TopicOptions {
|
|
68
|
-
/**
|
|
69
|
-
* Number of partitions.
|
|
70
|
-
*/
|
|
42
|
+
/** Number of partitions for the topic */
|
|
71
43
|
partitions?: number;
|
|
72
|
-
/**
|
|
73
|
-
* Replication factor.
|
|
74
|
-
*/
|
|
44
|
+
/** Number of replicas for each partition */
|
|
75
45
|
replicationFactor?: number;
|
|
76
|
-
/**
|
|
77
|
-
* Additional config.
|
|
78
|
-
*/
|
|
46
|
+
/** Additional driver-specific configurations */
|
|
79
47
|
config?: Record<string, string>;
|
|
80
48
|
}
|
|
81
49
|
/**
|
|
82
|
-
*
|
|
50
|
+
* Configuration for a specific queue connection.
|
|
51
|
+
* @public
|
|
83
52
|
*/
|
|
84
53
|
interface QueueConnectionConfig {
|
|
85
|
-
/**
|
|
86
|
-
* Driver type.
|
|
87
|
-
*/
|
|
54
|
+
/** The driver type to use for this connection */
|
|
88
55
|
driver: 'memory' | 'database' | 'redis' | 'kafka' | 'sqs' | 'rabbitmq' | 'nats';
|
|
89
|
-
/**
|
|
90
|
-
* Driver-specific config.
|
|
91
|
-
*/
|
|
56
|
+
/** Driver-specific settings (e.g., connection string, table name) */
|
|
92
57
|
[key: string]: unknown;
|
|
93
58
|
}
|
|
94
59
|
/**
|
|
@@ -1434,41 +1399,36 @@ declare class SQSDriver implements QueueDriver {
|
|
|
1434
1399
|
}
|
|
1435
1400
|
|
|
1436
1401
|
/**
|
|
1437
|
-
*
|
|
1402
|
+
* Options for configuring OrbitStream (Queue Orbit).
|
|
1403
|
+
* @public
|
|
1438
1404
|
*/
|
|
1439
1405
|
interface OrbitStreamOptions extends QueueConfig {
|
|
1440
1406
|
/**
|
|
1441
|
-
* Whether to
|
|
1407
|
+
* Whether to automatically start an embedded worker in development mode.
|
|
1408
|
+
* Useful for simple local testing without running a separate worker process.
|
|
1442
1409
|
*/
|
|
1443
1410
|
autoStartWorker?: boolean;
|
|
1444
1411
|
/**
|
|
1445
|
-
*
|
|
1412
|
+
* Configuration for the embedded worker/consumer.
|
|
1446
1413
|
*/
|
|
1447
1414
|
workerOptions?: ConsumerOptions;
|
|
1448
1415
|
}
|
|
1449
1416
|
/**
|
|
1450
|
-
*
|
|
1451
|
-
*
|
|
1452
|
-
*
|
|
1453
|
-
* Integrates with PlanetCore and injects a `queue` service into the Photon Context.
|
|
1417
|
+
* OrbitStream provides a powerful, multi-driver queue system for Gravito.
|
|
1418
|
+
* It integrates with various backends (Redis, Database, SQS, RabbitMQ)
|
|
1419
|
+
* and supports job serialization, delayed jobs, and FIFO processing.
|
|
1454
1420
|
*
|
|
1455
1421
|
* @example
|
|
1456
1422
|
* ```typescript
|
|
1457
|
-
* const
|
|
1458
|
-
*
|
|
1459
|
-
*
|
|
1460
|
-
*
|
|
1461
|
-
*
|
|
1462
|
-
*
|
|
1463
|
-
*
|
|
1464
|
-
* })
|
|
1465
|
-
* ]
|
|
1466
|
-
* })
|
|
1467
|
-
*
|
|
1468
|
-
* // Use in a controller/handler
|
|
1469
|
-
* const queue = c.get('queue')
|
|
1470
|
-
* await queue.push(new SendEmail('user@example.com'))
|
|
1423
|
+
* const stream = new OrbitStream({
|
|
1424
|
+
* default: 'redis',
|
|
1425
|
+
* connections: {
|
|
1426
|
+
* redis: { driver: 'redis', host: 'localhost' }
|
|
1427
|
+
* }
|
|
1428
|
+
* });
|
|
1429
|
+
* core.addOrbit(stream);
|
|
1471
1430
|
* ```
|
|
1431
|
+
* @public
|
|
1472
1432
|
*/
|
|
1473
1433
|
declare class OrbitStream implements GravitoOrbit {
|
|
1474
1434
|
private options;
|
|
@@ -1695,19 +1655,47 @@ declare class SQLitePersistence implements PersistenceAdapter {
|
|
|
1695
1655
|
private setupLogsTable;
|
|
1696
1656
|
}
|
|
1697
1657
|
|
|
1658
|
+
/**
|
|
1659
|
+
* Configuration for a recurring scheduled job.
|
|
1660
|
+
*
|
|
1661
|
+
* @public
|
|
1662
|
+
* @since 3.0.0
|
|
1663
|
+
*/
|
|
1698
1664
|
interface ScheduledJobConfig {
|
|
1665
|
+
/** Unique identifier for the scheduled task. */
|
|
1699
1666
|
id: string;
|
|
1667
|
+
/** Cron expression defining the schedule (e.g., '* * * * *'). */
|
|
1700
1668
|
cron: string;
|
|
1669
|
+
/** The target queue name where the job should be pushed. */
|
|
1701
1670
|
queue: string;
|
|
1671
|
+
/** The serialized job data. */
|
|
1702
1672
|
job: SerializedJob;
|
|
1673
|
+
/** Timestamp of the last successful execution in milliseconds. */
|
|
1703
1674
|
lastRun?: number;
|
|
1675
|
+
/** Timestamp of the next scheduled execution in milliseconds. */
|
|
1704
1676
|
nextRun?: number;
|
|
1677
|
+
/** Whether the scheduled job is active. */
|
|
1705
1678
|
enabled: boolean;
|
|
1706
1679
|
}
|
|
1707
1680
|
/**
|
|
1708
|
-
* Scheduler
|
|
1681
|
+
* Scheduler manages recurring (cron) jobs in Gravito.
|
|
1682
|
+
*
|
|
1683
|
+
* It uses Redis to store schedule metadata and coordinates distributed
|
|
1684
|
+
* execution using locks to ensure jobs are triggered exactly once per interval.
|
|
1685
|
+
*
|
|
1686
|
+
* @example
|
|
1687
|
+
* ```typescript
|
|
1688
|
+
* const scheduler = new Scheduler(queueManager);
|
|
1689
|
+
* await scheduler.register({
|
|
1690
|
+
* id: 'daily-cleanup',
|
|
1691
|
+
* cron: '0 0 * * *',
|
|
1692
|
+
* queue: 'default',
|
|
1693
|
+
* job: myJob.serialize()
|
|
1694
|
+
* });
|
|
1695
|
+
* ```
|
|
1709
1696
|
*
|
|
1710
|
-
*
|
|
1697
|
+
* @public
|
|
1698
|
+
* @since 3.0.0
|
|
1711
1699
|
*/
|
|
1712
1700
|
declare class Scheduler {
|
|
1713
1701
|
private manager;
|
package/dist/index.js
CHANGED
|
@@ -2171,6 +2171,7 @@ var OrbitStream = class _OrbitStream {
|
|
|
2171
2171
|
*/
|
|
2172
2172
|
install(core) {
|
|
2173
2173
|
this.queueManager = new QueueManager(this.options);
|
|
2174
|
+
core.container.instance("queue", this.queueManager);
|
|
2174
2175
|
core.adapter.use("*", async (c, next) => {
|
|
2175
2176
|
if (this.queueManager && this.options.connections) {
|
|
2176
2177
|
for (const [name, config] of Object.entries(this.options.connections)) {
|
|
@@ -2193,8 +2194,7 @@ var OrbitStream = class _OrbitStream {
|
|
|
2193
2194
|
}
|
|
2194
2195
|
}
|
|
2195
2196
|
c.set("queue", this.queueManager);
|
|
2196
|
-
await next();
|
|
2197
|
-
return void 0;
|
|
2197
|
+
return await next();
|
|
2198
2198
|
});
|
|
2199
2199
|
core.logger.info("[OrbitStream] Installed");
|
|
2200
2200
|
if (this.options.autoStartWorker && process.env.NODE_ENV === "development" && this.options.workerOptions) {
|
|
@@ -2345,9 +2345,15 @@ var MySQLPersistence = class {
|
|
|
2345
2345
|
*/
|
|
2346
2346
|
async listLogs(options = {}) {
|
|
2347
2347
|
let query = this.db.table(this.logsTable);
|
|
2348
|
-
if (options.level)
|
|
2349
|
-
|
|
2350
|
-
|
|
2348
|
+
if (options.level) {
|
|
2349
|
+
query = query.where("level", options.level);
|
|
2350
|
+
}
|
|
2351
|
+
if (options.workerId) {
|
|
2352
|
+
query = query.where("worker_id", options.workerId);
|
|
2353
|
+
}
|
|
2354
|
+
if (options.queue) {
|
|
2355
|
+
query = query.where("queue", options.queue);
|
|
2356
|
+
}
|
|
2351
2357
|
if (options.search) {
|
|
2352
2358
|
query = query.where("message", "like", `%${options.search}%`);
|
|
2353
2359
|
}
|
|
@@ -2364,9 +2370,15 @@ var MySQLPersistence = class {
|
|
|
2364
2370
|
*/
|
|
2365
2371
|
async countLogs(options = {}) {
|
|
2366
2372
|
let query = this.db.table(this.logsTable);
|
|
2367
|
-
if (options.level)
|
|
2368
|
-
|
|
2369
|
-
|
|
2373
|
+
if (options.level) {
|
|
2374
|
+
query = query.where("level", options.level);
|
|
2375
|
+
}
|
|
2376
|
+
if (options.workerId) {
|
|
2377
|
+
query = query.where("worker_id", options.workerId);
|
|
2378
|
+
}
|
|
2379
|
+
if (options.queue) {
|
|
2380
|
+
query = query.where("queue", options.queue);
|
|
2381
|
+
}
|
|
2370
2382
|
if (options.search) {
|
|
2371
2383
|
query = query.where("message", "like", `%${options.search}%`);
|
|
2372
2384
|
}
|
|
@@ -2419,7 +2431,9 @@ var MySQLPersistence = class {
|
|
|
2419
2431
|
}
|
|
2420
2432
|
async setupJobsTable() {
|
|
2421
2433
|
const exists = await Schema.hasTable(this.table);
|
|
2422
|
-
if (exists)
|
|
2434
|
+
if (exists) {
|
|
2435
|
+
return;
|
|
2436
|
+
}
|
|
2423
2437
|
await Schema.create(this.table, (table) => {
|
|
2424
2438
|
table.id();
|
|
2425
2439
|
table.string("job_id", 64);
|
|
@@ -2438,7 +2452,9 @@ var MySQLPersistence = class {
|
|
|
2438
2452
|
}
|
|
2439
2453
|
async setupLogsTable() {
|
|
2440
2454
|
const exists = await Schema.hasTable(this.logsTable);
|
|
2441
|
-
if (exists)
|
|
2455
|
+
if (exists) {
|
|
2456
|
+
return;
|
|
2457
|
+
}
|
|
2442
2458
|
await Schema.create(this.logsTable, (table) => {
|
|
2443
2459
|
table.id();
|
|
2444
2460
|
table.string("level", 20);
|
|
@@ -2568,9 +2584,15 @@ var SQLitePersistence = class {
|
|
|
2568
2584
|
*/
|
|
2569
2585
|
async listLogs(options = {}) {
|
|
2570
2586
|
let query = this.db.table(this.logsTable);
|
|
2571
|
-
if (options.level)
|
|
2572
|
-
|
|
2573
|
-
|
|
2587
|
+
if (options.level) {
|
|
2588
|
+
query = query.where("level", options.level);
|
|
2589
|
+
}
|
|
2590
|
+
if (options.workerId) {
|
|
2591
|
+
query = query.where("worker_id", options.workerId);
|
|
2592
|
+
}
|
|
2593
|
+
if (options.queue) {
|
|
2594
|
+
query = query.where("queue", options.queue);
|
|
2595
|
+
}
|
|
2574
2596
|
if (options.search) {
|
|
2575
2597
|
query = query.where("message", "like", `%${options.search}%`);
|
|
2576
2598
|
}
|
|
@@ -2587,9 +2609,15 @@ var SQLitePersistence = class {
|
|
|
2587
2609
|
*/
|
|
2588
2610
|
async countLogs(options = {}) {
|
|
2589
2611
|
let query = this.db.table(this.logsTable);
|
|
2590
|
-
if (options.level)
|
|
2591
|
-
|
|
2592
|
-
|
|
2612
|
+
if (options.level) {
|
|
2613
|
+
query = query.where("level", options.level);
|
|
2614
|
+
}
|
|
2615
|
+
if (options.workerId) {
|
|
2616
|
+
query = query.where("worker_id", options.workerId);
|
|
2617
|
+
}
|
|
2618
|
+
if (options.queue) {
|
|
2619
|
+
query = query.where("queue", options.queue);
|
|
2620
|
+
}
|
|
2593
2621
|
if (options.search) {
|
|
2594
2622
|
query = query.where("message", "like", `%${options.search}%`);
|
|
2595
2623
|
}
|
|
@@ -2642,7 +2670,9 @@ var SQLitePersistence = class {
|
|
|
2642
2670
|
}
|
|
2643
2671
|
async setupJobsTable() {
|
|
2644
2672
|
const exists = await Schema2.hasTable(this.table);
|
|
2645
|
-
if (exists)
|
|
2673
|
+
if (exists) {
|
|
2674
|
+
return;
|
|
2675
|
+
}
|
|
2646
2676
|
await Schema2.create(this.table, (table) => {
|
|
2647
2677
|
table.id();
|
|
2648
2678
|
table.string("job_id", 64);
|
|
@@ -2659,7 +2689,9 @@ var SQLitePersistence = class {
|
|
|
2659
2689
|
}
|
|
2660
2690
|
async setupLogsTable() {
|
|
2661
2691
|
const exists = await Schema2.hasTable(this.logsTable);
|
|
2662
|
-
if (exists)
|
|
2692
|
+
if (exists) {
|
|
2693
|
+
return;
|
|
2694
|
+
}
|
|
2663
2695
|
await Schema2.create(this.logsTable, (table) => {
|
|
2664
2696
|
table.id();
|
|
2665
2697
|
table.string("level", 20);
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@gravito/stream",
|
|
3
|
-
"version": "1.0.
|
|
3
|
+
"version": "1.0.3",
|
|
4
4
|
"publishConfig": {
|
|
5
5
|
"access": "public"
|
|
6
6
|
},
|
|
@@ -25,8 +25,8 @@
|
|
|
25
25
|
"build": "bun run build.ts",
|
|
26
26
|
"test": "bun test",
|
|
27
27
|
"typecheck": "bun tsc -p tsconfig.json --noEmit --skipLibCheck",
|
|
28
|
-
"test:coverage": "bun test --coverage --coverage-threshold=
|
|
29
|
-
"test:ci": "bun test --coverage --coverage-threshold=
|
|
28
|
+
"test:coverage": "bun test --coverage --coverage-threshold=70",
|
|
29
|
+
"test:ci": "bun test --coverage --coverage-threshold=70"
|
|
30
30
|
},
|
|
31
31
|
"keywords": [
|
|
32
32
|
"gravito",
|