@gravito/stream 1.0.2 → 1.0.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +49 -16
- package/dist/index.d.cts +71 -83
- package/dist/index.d.ts +71 -83
- package/dist/index.js +49 -16
- package/package.json +3 -3
package/dist/index.cjs
CHANGED
|
@@ -2198,6 +2198,7 @@ var OrbitStream = class _OrbitStream {
|
|
|
2198
2198
|
*/
|
|
2199
2199
|
install(core) {
|
|
2200
2200
|
this.queueManager = new QueueManager(this.options);
|
|
2201
|
+
core.container.instance("queue", this.queueManager);
|
|
2201
2202
|
core.adapter.use("*", async (c, next) => {
|
|
2202
2203
|
if (this.queueManager && this.options.connections) {
|
|
2203
2204
|
for (const [name, config] of Object.entries(this.options.connections)) {
|
|
@@ -2371,9 +2372,15 @@ var MySQLPersistence = class {
|
|
|
2371
2372
|
*/
|
|
2372
2373
|
async listLogs(options = {}) {
|
|
2373
2374
|
let query = this.db.table(this.logsTable);
|
|
2374
|
-
if (options.level)
|
|
2375
|
-
|
|
2376
|
-
|
|
2375
|
+
if (options.level) {
|
|
2376
|
+
query = query.where("level", options.level);
|
|
2377
|
+
}
|
|
2378
|
+
if (options.workerId) {
|
|
2379
|
+
query = query.where("worker_id", options.workerId);
|
|
2380
|
+
}
|
|
2381
|
+
if (options.queue) {
|
|
2382
|
+
query = query.where("queue", options.queue);
|
|
2383
|
+
}
|
|
2377
2384
|
if (options.search) {
|
|
2378
2385
|
query = query.where("message", "like", `%${options.search}%`);
|
|
2379
2386
|
}
|
|
@@ -2390,9 +2397,15 @@ var MySQLPersistence = class {
|
|
|
2390
2397
|
*/
|
|
2391
2398
|
async countLogs(options = {}) {
|
|
2392
2399
|
let query = this.db.table(this.logsTable);
|
|
2393
|
-
if (options.level)
|
|
2394
|
-
|
|
2395
|
-
|
|
2400
|
+
if (options.level) {
|
|
2401
|
+
query = query.where("level", options.level);
|
|
2402
|
+
}
|
|
2403
|
+
if (options.workerId) {
|
|
2404
|
+
query = query.where("worker_id", options.workerId);
|
|
2405
|
+
}
|
|
2406
|
+
if (options.queue) {
|
|
2407
|
+
query = query.where("queue", options.queue);
|
|
2408
|
+
}
|
|
2396
2409
|
if (options.search) {
|
|
2397
2410
|
query = query.where("message", "like", `%${options.search}%`);
|
|
2398
2411
|
}
|
|
@@ -2445,7 +2458,9 @@ var MySQLPersistence = class {
|
|
|
2445
2458
|
}
|
|
2446
2459
|
async setupJobsTable() {
|
|
2447
2460
|
const exists = await import_atlas.Schema.hasTable(this.table);
|
|
2448
|
-
if (exists)
|
|
2461
|
+
if (exists) {
|
|
2462
|
+
return;
|
|
2463
|
+
}
|
|
2449
2464
|
await import_atlas.Schema.create(this.table, (table) => {
|
|
2450
2465
|
table.id();
|
|
2451
2466
|
table.string("job_id", 64);
|
|
@@ -2464,7 +2479,9 @@ var MySQLPersistence = class {
|
|
|
2464
2479
|
}
|
|
2465
2480
|
async setupLogsTable() {
|
|
2466
2481
|
const exists = await import_atlas.Schema.hasTable(this.logsTable);
|
|
2467
|
-
if (exists)
|
|
2482
|
+
if (exists) {
|
|
2483
|
+
return;
|
|
2484
|
+
}
|
|
2468
2485
|
await import_atlas.Schema.create(this.logsTable, (table) => {
|
|
2469
2486
|
table.id();
|
|
2470
2487
|
table.string("level", 20);
|
|
@@ -2594,9 +2611,15 @@ var SQLitePersistence = class {
|
|
|
2594
2611
|
*/
|
|
2595
2612
|
async listLogs(options = {}) {
|
|
2596
2613
|
let query = this.db.table(this.logsTable);
|
|
2597
|
-
if (options.level)
|
|
2598
|
-
|
|
2599
|
-
|
|
2614
|
+
if (options.level) {
|
|
2615
|
+
query = query.where("level", options.level);
|
|
2616
|
+
}
|
|
2617
|
+
if (options.workerId) {
|
|
2618
|
+
query = query.where("worker_id", options.workerId);
|
|
2619
|
+
}
|
|
2620
|
+
if (options.queue) {
|
|
2621
|
+
query = query.where("queue", options.queue);
|
|
2622
|
+
}
|
|
2600
2623
|
if (options.search) {
|
|
2601
2624
|
query = query.where("message", "like", `%${options.search}%`);
|
|
2602
2625
|
}
|
|
@@ -2613,9 +2636,15 @@ var SQLitePersistence = class {
|
|
|
2613
2636
|
*/
|
|
2614
2637
|
async countLogs(options = {}) {
|
|
2615
2638
|
let query = this.db.table(this.logsTable);
|
|
2616
|
-
if (options.level)
|
|
2617
|
-
|
|
2618
|
-
|
|
2639
|
+
if (options.level) {
|
|
2640
|
+
query = query.where("level", options.level);
|
|
2641
|
+
}
|
|
2642
|
+
if (options.workerId) {
|
|
2643
|
+
query = query.where("worker_id", options.workerId);
|
|
2644
|
+
}
|
|
2645
|
+
if (options.queue) {
|
|
2646
|
+
query = query.where("queue", options.queue);
|
|
2647
|
+
}
|
|
2619
2648
|
if (options.search) {
|
|
2620
2649
|
query = query.where("message", "like", `%${options.search}%`);
|
|
2621
2650
|
}
|
|
@@ -2668,7 +2697,9 @@ var SQLitePersistence = class {
|
|
|
2668
2697
|
}
|
|
2669
2698
|
async setupJobsTable() {
|
|
2670
2699
|
const exists = await import_atlas2.Schema.hasTable(this.table);
|
|
2671
|
-
if (exists)
|
|
2700
|
+
if (exists) {
|
|
2701
|
+
return;
|
|
2702
|
+
}
|
|
2672
2703
|
await import_atlas2.Schema.create(this.table, (table) => {
|
|
2673
2704
|
table.id();
|
|
2674
2705
|
table.string("job_id", 64);
|
|
@@ -2685,7 +2716,9 @@ var SQLitePersistence = class {
|
|
|
2685
2716
|
}
|
|
2686
2717
|
async setupLogsTable() {
|
|
2687
2718
|
const exists = await import_atlas2.Schema.hasTable(this.logsTable);
|
|
2688
|
-
if (exists)
|
|
2719
|
+
if (exists) {
|
|
2720
|
+
return;
|
|
2721
|
+
}
|
|
2689
2722
|
await import_atlas2.Schema.create(this.logsTable, (table) => {
|
|
2690
2723
|
table.id();
|
|
2691
2724
|
table.string("level", 20);
|
package/dist/index.d.cts
CHANGED
|
@@ -1,94 +1,59 @@
|
|
|
1
1
|
import { GravitoOrbit, PlanetCore } from '@gravito/core';
|
|
2
2
|
|
|
3
3
|
/**
|
|
4
|
-
*
|
|
4
|
+
* Represents a job that has been serialized for storage in a queue.
|
|
5
|
+
* @public
|
|
5
6
|
*/
|
|
6
7
|
interface SerializedJob {
|
|
7
|
-
/**
|
|
8
|
-
* Unique job identifier.
|
|
9
|
-
*/
|
|
8
|
+
/** Unique job identifier */
|
|
10
9
|
id: string;
|
|
11
|
-
/**
|
|
12
|
-
* Serializer type: `'json'` or `'class'`.
|
|
13
|
-
*/
|
|
10
|
+
/** Serializer type: 'json' for plain objects or 'class' for instances */
|
|
14
11
|
type: 'json' | 'class';
|
|
15
|
-
/**
|
|
16
|
-
* Serialized data.
|
|
17
|
-
*/
|
|
12
|
+
/** Serialized data string */
|
|
18
13
|
data: string;
|
|
19
|
-
/**
|
|
20
|
-
* Class name (only for `type === 'class'`).
|
|
21
|
-
*/
|
|
14
|
+
/** Fully qualified class name (only used for 'class' type) */
|
|
22
15
|
className?: string;
|
|
23
|
-
/**
|
|
24
|
-
* Created timestamp.
|
|
25
|
-
*/
|
|
16
|
+
/** Timestamp when the job was created */
|
|
26
17
|
createdAt: number;
|
|
27
|
-
/**
|
|
28
|
-
* Delay before execution (seconds).
|
|
29
|
-
*/
|
|
18
|
+
/** Optional delay in seconds before the job becomes available for processing */
|
|
30
19
|
delaySeconds?: number;
|
|
31
|
-
/**
|
|
32
|
-
* Current attempt number.
|
|
33
|
-
*/
|
|
20
|
+
/** Number of times the job has been attempted */
|
|
34
21
|
attempts?: number;
|
|
35
|
-
/**
|
|
36
|
-
* Maximum attempts.
|
|
37
|
-
*/
|
|
22
|
+
/** Maximum number of retry attempts before the job is marked as failed */
|
|
38
23
|
maxAttempts?: number;
|
|
39
|
-
/**
|
|
40
|
-
* Group ID for FIFO ordering.
|
|
41
|
-
*/
|
|
24
|
+
/** Group ID for FIFO (strictly sequential) processing */
|
|
42
25
|
groupId?: string;
|
|
43
|
-
/**
|
|
44
|
-
* Initial retry delay (seconds).
|
|
45
|
-
*/
|
|
26
|
+
/** Initial delay in seconds before first retry attempt */
|
|
46
27
|
retryAfterSeconds?: number;
|
|
47
|
-
/**
|
|
48
|
-
* Retry delay multiplier.
|
|
49
|
-
*/
|
|
28
|
+
/** Multiplier for exponential backoff on retries */
|
|
50
29
|
retryMultiplier?: number;
|
|
51
|
-
/**
|
|
52
|
-
* Last error message.
|
|
53
|
-
*/
|
|
30
|
+
/** Last error message if the job failed */
|
|
54
31
|
error?: string;
|
|
55
|
-
/**
|
|
56
|
-
* Timestamp when the job failed permanently.
|
|
57
|
-
*/
|
|
32
|
+
/** Timestamp when the job finally failed after max attempts */
|
|
58
33
|
failedAt?: number;
|
|
59
|
-
/**
|
|
60
|
-
* Job priority.
|
|
61
|
-
*/
|
|
34
|
+
/** Optional priority for the job (string or numeric) */
|
|
62
35
|
priority?: string | number;
|
|
63
36
|
}
|
|
64
37
|
/**
|
|
65
|
-
*
|
|
38
|
+
* Advanced topic options for distributed queues (e.g., Kafka).
|
|
39
|
+
* @public
|
|
66
40
|
*/
|
|
67
41
|
interface TopicOptions {
|
|
68
|
-
/**
|
|
69
|
-
* Number of partitions.
|
|
70
|
-
*/
|
|
42
|
+
/** Number of partitions for the topic */
|
|
71
43
|
partitions?: number;
|
|
72
|
-
/**
|
|
73
|
-
* Replication factor.
|
|
74
|
-
*/
|
|
44
|
+
/** Number of replicas for each partition */
|
|
75
45
|
replicationFactor?: number;
|
|
76
|
-
/**
|
|
77
|
-
* Additional config.
|
|
78
|
-
*/
|
|
46
|
+
/** Additional driver-specific configurations */
|
|
79
47
|
config?: Record<string, string>;
|
|
80
48
|
}
|
|
81
49
|
/**
|
|
82
|
-
*
|
|
50
|
+
* Configuration for a specific queue connection.
|
|
51
|
+
* @public
|
|
83
52
|
*/
|
|
84
53
|
interface QueueConnectionConfig {
|
|
85
|
-
/**
|
|
86
|
-
* Driver type.
|
|
87
|
-
*/
|
|
54
|
+
/** The driver type to use for this connection */
|
|
88
55
|
driver: 'memory' | 'database' | 'redis' | 'kafka' | 'sqs' | 'rabbitmq' | 'nats';
|
|
89
|
-
/**
|
|
90
|
-
* Driver-specific config.
|
|
91
|
-
*/
|
|
56
|
+
/** Driver-specific settings (e.g., connection string, table name) */
|
|
92
57
|
[key: string]: unknown;
|
|
93
58
|
}
|
|
94
59
|
/**
|
|
@@ -1434,41 +1399,36 @@ declare class SQSDriver implements QueueDriver {
|
|
|
1434
1399
|
}
|
|
1435
1400
|
|
|
1436
1401
|
/**
|
|
1437
|
-
*
|
|
1402
|
+
* Options for configuring OrbitStream (Queue Orbit).
|
|
1403
|
+
* @public
|
|
1438
1404
|
*/
|
|
1439
1405
|
interface OrbitStreamOptions extends QueueConfig {
|
|
1440
1406
|
/**
|
|
1441
|
-
* Whether to
|
|
1407
|
+
* Whether to automatically start an embedded worker in development mode.
|
|
1408
|
+
* Useful for simple local testing without running a separate worker process.
|
|
1442
1409
|
*/
|
|
1443
1410
|
autoStartWorker?: boolean;
|
|
1444
1411
|
/**
|
|
1445
|
-
*
|
|
1412
|
+
* Configuration for the embedded worker/consumer.
|
|
1446
1413
|
*/
|
|
1447
1414
|
workerOptions?: ConsumerOptions;
|
|
1448
1415
|
}
|
|
1449
1416
|
/**
|
|
1450
|
-
*
|
|
1451
|
-
*
|
|
1452
|
-
*
|
|
1453
|
-
* Integrates with PlanetCore and injects a `queue` service into the Photon Context.
|
|
1417
|
+
* OrbitStream provides a powerful, multi-driver queue system for Gravito.
|
|
1418
|
+
* It integrates with various backends (Redis, Database, SQS, RabbitMQ)
|
|
1419
|
+
* and supports job serialization, delayed jobs, and FIFO processing.
|
|
1454
1420
|
*
|
|
1455
1421
|
* @example
|
|
1456
1422
|
* ```typescript
|
|
1457
|
-
* const
|
|
1458
|
-
*
|
|
1459
|
-
*
|
|
1460
|
-
*
|
|
1461
|
-
*
|
|
1462
|
-
*
|
|
1463
|
-
*
|
|
1464
|
-
* })
|
|
1465
|
-
* ]
|
|
1466
|
-
* })
|
|
1467
|
-
*
|
|
1468
|
-
* // Use in a controller/handler
|
|
1469
|
-
* const queue = c.get('queue')
|
|
1470
|
-
* await queue.push(new SendEmail('user@example.com'))
|
|
1423
|
+
* const stream = new OrbitStream({
|
|
1424
|
+
* default: 'redis',
|
|
1425
|
+
* connections: {
|
|
1426
|
+
* redis: { driver: 'redis', host: 'localhost' }
|
|
1427
|
+
* }
|
|
1428
|
+
* });
|
|
1429
|
+
* core.addOrbit(stream);
|
|
1471
1430
|
* ```
|
|
1431
|
+
* @public
|
|
1472
1432
|
*/
|
|
1473
1433
|
declare class OrbitStream implements GravitoOrbit {
|
|
1474
1434
|
private options;
|
|
@@ -1695,19 +1655,47 @@ declare class SQLitePersistence implements PersistenceAdapter {
|
|
|
1695
1655
|
private setupLogsTable;
|
|
1696
1656
|
}
|
|
1697
1657
|
|
|
1658
|
+
/**
|
|
1659
|
+
* Configuration for a recurring scheduled job.
|
|
1660
|
+
*
|
|
1661
|
+
* @public
|
|
1662
|
+
* @since 3.0.0
|
|
1663
|
+
*/
|
|
1698
1664
|
interface ScheduledJobConfig {
|
|
1665
|
+
/** Unique identifier for the scheduled task. */
|
|
1699
1666
|
id: string;
|
|
1667
|
+
/** Cron expression defining the schedule (e.g., '* * * * *'). */
|
|
1700
1668
|
cron: string;
|
|
1669
|
+
/** The target queue name where the job should be pushed. */
|
|
1701
1670
|
queue: string;
|
|
1671
|
+
/** The serialized job data. */
|
|
1702
1672
|
job: SerializedJob;
|
|
1673
|
+
/** Timestamp of the last successful execution in milliseconds. */
|
|
1703
1674
|
lastRun?: number;
|
|
1675
|
+
/** Timestamp of the next scheduled execution in milliseconds. */
|
|
1704
1676
|
nextRun?: number;
|
|
1677
|
+
/** Whether the scheduled job is active. */
|
|
1705
1678
|
enabled: boolean;
|
|
1706
1679
|
}
|
|
1707
1680
|
/**
|
|
1708
|
-
* Scheduler
|
|
1681
|
+
* Scheduler manages recurring (cron) jobs in Gravito.
|
|
1682
|
+
*
|
|
1683
|
+
* It uses Redis to store schedule metadata and coordinates distributed
|
|
1684
|
+
* execution using locks to ensure jobs are triggered exactly once per interval.
|
|
1685
|
+
*
|
|
1686
|
+
* @example
|
|
1687
|
+
* ```typescript
|
|
1688
|
+
* const scheduler = new Scheduler(queueManager);
|
|
1689
|
+
* await scheduler.register({
|
|
1690
|
+
* id: 'daily-cleanup',
|
|
1691
|
+
* cron: '0 0 * * *',
|
|
1692
|
+
* queue: 'default',
|
|
1693
|
+
* job: myJob.serialize()
|
|
1694
|
+
* });
|
|
1695
|
+
* ```
|
|
1709
1696
|
*
|
|
1710
|
-
*
|
|
1697
|
+
* @public
|
|
1698
|
+
* @since 3.0.0
|
|
1711
1699
|
*/
|
|
1712
1700
|
declare class Scheduler {
|
|
1713
1701
|
private manager;
|
package/dist/index.d.ts
CHANGED
|
@@ -1,94 +1,59 @@
|
|
|
1
1
|
import { GravitoOrbit, PlanetCore } from '@gravito/core';
|
|
2
2
|
|
|
3
3
|
/**
|
|
4
|
-
*
|
|
4
|
+
* Represents a job that has been serialized for storage in a queue.
|
|
5
|
+
* @public
|
|
5
6
|
*/
|
|
6
7
|
interface SerializedJob {
|
|
7
|
-
/**
|
|
8
|
-
* Unique job identifier.
|
|
9
|
-
*/
|
|
8
|
+
/** Unique job identifier */
|
|
10
9
|
id: string;
|
|
11
|
-
/**
|
|
12
|
-
* Serializer type: `'json'` or `'class'`.
|
|
13
|
-
*/
|
|
10
|
+
/** Serializer type: 'json' for plain objects or 'class' for instances */
|
|
14
11
|
type: 'json' | 'class';
|
|
15
|
-
/**
|
|
16
|
-
* Serialized data.
|
|
17
|
-
*/
|
|
12
|
+
/** Serialized data string */
|
|
18
13
|
data: string;
|
|
19
|
-
/**
|
|
20
|
-
* Class name (only for `type === 'class'`).
|
|
21
|
-
*/
|
|
14
|
+
/** Fully qualified class name (only used for 'class' type) */
|
|
22
15
|
className?: string;
|
|
23
|
-
/**
|
|
24
|
-
* Created timestamp.
|
|
25
|
-
*/
|
|
16
|
+
/** Timestamp when the job was created */
|
|
26
17
|
createdAt: number;
|
|
27
|
-
/**
|
|
28
|
-
* Delay before execution (seconds).
|
|
29
|
-
*/
|
|
18
|
+
/** Optional delay in seconds before the job becomes available for processing */
|
|
30
19
|
delaySeconds?: number;
|
|
31
|
-
/**
|
|
32
|
-
* Current attempt number.
|
|
33
|
-
*/
|
|
20
|
+
/** Number of times the job has been attempted */
|
|
34
21
|
attempts?: number;
|
|
35
|
-
/**
|
|
36
|
-
* Maximum attempts.
|
|
37
|
-
*/
|
|
22
|
+
/** Maximum number of retry attempts before the job is marked as failed */
|
|
38
23
|
maxAttempts?: number;
|
|
39
|
-
/**
|
|
40
|
-
* Group ID for FIFO ordering.
|
|
41
|
-
*/
|
|
24
|
+
/** Group ID for FIFO (strictly sequential) processing */
|
|
42
25
|
groupId?: string;
|
|
43
|
-
/**
|
|
44
|
-
* Initial retry delay (seconds).
|
|
45
|
-
*/
|
|
26
|
+
/** Initial delay in seconds before first retry attempt */
|
|
46
27
|
retryAfterSeconds?: number;
|
|
47
|
-
/**
|
|
48
|
-
* Retry delay multiplier.
|
|
49
|
-
*/
|
|
28
|
+
/** Multiplier for exponential backoff on retries */
|
|
50
29
|
retryMultiplier?: number;
|
|
51
|
-
/**
|
|
52
|
-
* Last error message.
|
|
53
|
-
*/
|
|
30
|
+
/** Last error message if the job failed */
|
|
54
31
|
error?: string;
|
|
55
|
-
/**
|
|
56
|
-
* Timestamp when the job failed permanently.
|
|
57
|
-
*/
|
|
32
|
+
/** Timestamp when the job finally failed after max attempts */
|
|
58
33
|
failedAt?: number;
|
|
59
|
-
/**
|
|
60
|
-
* Job priority.
|
|
61
|
-
*/
|
|
34
|
+
/** Optional priority for the job (string or numeric) */
|
|
62
35
|
priority?: string | number;
|
|
63
36
|
}
|
|
64
37
|
/**
|
|
65
|
-
*
|
|
38
|
+
* Advanced topic options for distributed queues (e.g., Kafka).
|
|
39
|
+
* @public
|
|
66
40
|
*/
|
|
67
41
|
interface TopicOptions {
|
|
68
|
-
/**
|
|
69
|
-
* Number of partitions.
|
|
70
|
-
*/
|
|
42
|
+
/** Number of partitions for the topic */
|
|
71
43
|
partitions?: number;
|
|
72
|
-
/**
|
|
73
|
-
* Replication factor.
|
|
74
|
-
*/
|
|
44
|
+
/** Number of replicas for each partition */
|
|
75
45
|
replicationFactor?: number;
|
|
76
|
-
/**
|
|
77
|
-
* Additional config.
|
|
78
|
-
*/
|
|
46
|
+
/** Additional driver-specific configurations */
|
|
79
47
|
config?: Record<string, string>;
|
|
80
48
|
}
|
|
81
49
|
/**
|
|
82
|
-
*
|
|
50
|
+
* Configuration for a specific queue connection.
|
|
51
|
+
* @public
|
|
83
52
|
*/
|
|
84
53
|
interface QueueConnectionConfig {
|
|
85
|
-
/**
|
|
86
|
-
* Driver type.
|
|
87
|
-
*/
|
|
54
|
+
/** The driver type to use for this connection */
|
|
88
55
|
driver: 'memory' | 'database' | 'redis' | 'kafka' | 'sqs' | 'rabbitmq' | 'nats';
|
|
89
|
-
/**
|
|
90
|
-
* Driver-specific config.
|
|
91
|
-
*/
|
|
56
|
+
/** Driver-specific settings (e.g., connection string, table name) */
|
|
92
57
|
[key: string]: unknown;
|
|
93
58
|
}
|
|
94
59
|
/**
|
|
@@ -1434,41 +1399,36 @@ declare class SQSDriver implements QueueDriver {
|
|
|
1434
1399
|
}
|
|
1435
1400
|
|
|
1436
1401
|
/**
|
|
1437
|
-
*
|
|
1402
|
+
* Options for configuring OrbitStream (Queue Orbit).
|
|
1403
|
+
* @public
|
|
1438
1404
|
*/
|
|
1439
1405
|
interface OrbitStreamOptions extends QueueConfig {
|
|
1440
1406
|
/**
|
|
1441
|
-
* Whether to
|
|
1407
|
+
* Whether to automatically start an embedded worker in development mode.
|
|
1408
|
+
* Useful for simple local testing without running a separate worker process.
|
|
1442
1409
|
*/
|
|
1443
1410
|
autoStartWorker?: boolean;
|
|
1444
1411
|
/**
|
|
1445
|
-
*
|
|
1412
|
+
* Configuration for the embedded worker/consumer.
|
|
1446
1413
|
*/
|
|
1447
1414
|
workerOptions?: ConsumerOptions;
|
|
1448
1415
|
}
|
|
1449
1416
|
/**
|
|
1450
|
-
*
|
|
1451
|
-
*
|
|
1452
|
-
*
|
|
1453
|
-
* Integrates with PlanetCore and injects a `queue` service into the Photon Context.
|
|
1417
|
+
* OrbitStream provides a powerful, multi-driver queue system for Gravito.
|
|
1418
|
+
* It integrates with various backends (Redis, Database, SQS, RabbitMQ)
|
|
1419
|
+
* and supports job serialization, delayed jobs, and FIFO processing.
|
|
1454
1420
|
*
|
|
1455
1421
|
* @example
|
|
1456
1422
|
* ```typescript
|
|
1457
|
-
* const
|
|
1458
|
-
*
|
|
1459
|
-
*
|
|
1460
|
-
*
|
|
1461
|
-
*
|
|
1462
|
-
*
|
|
1463
|
-
*
|
|
1464
|
-
* })
|
|
1465
|
-
* ]
|
|
1466
|
-
* })
|
|
1467
|
-
*
|
|
1468
|
-
* // Use in a controller/handler
|
|
1469
|
-
* const queue = c.get('queue')
|
|
1470
|
-
* await queue.push(new SendEmail('user@example.com'))
|
|
1423
|
+
* const stream = new OrbitStream({
|
|
1424
|
+
* default: 'redis',
|
|
1425
|
+
* connections: {
|
|
1426
|
+
* redis: { driver: 'redis', host: 'localhost' }
|
|
1427
|
+
* }
|
|
1428
|
+
* });
|
|
1429
|
+
* core.addOrbit(stream);
|
|
1471
1430
|
* ```
|
|
1431
|
+
* @public
|
|
1472
1432
|
*/
|
|
1473
1433
|
declare class OrbitStream implements GravitoOrbit {
|
|
1474
1434
|
private options;
|
|
@@ -1695,19 +1655,47 @@ declare class SQLitePersistence implements PersistenceAdapter {
|
|
|
1695
1655
|
private setupLogsTable;
|
|
1696
1656
|
}
|
|
1697
1657
|
|
|
1658
|
+
/**
|
|
1659
|
+
* Configuration for a recurring scheduled job.
|
|
1660
|
+
*
|
|
1661
|
+
* @public
|
|
1662
|
+
* @since 3.0.0
|
|
1663
|
+
*/
|
|
1698
1664
|
interface ScheduledJobConfig {
|
|
1665
|
+
/** Unique identifier for the scheduled task. */
|
|
1699
1666
|
id: string;
|
|
1667
|
+
/** Cron expression defining the schedule (e.g., '* * * * *'). */
|
|
1700
1668
|
cron: string;
|
|
1669
|
+
/** The target queue name where the job should be pushed. */
|
|
1701
1670
|
queue: string;
|
|
1671
|
+
/** The serialized job data. */
|
|
1702
1672
|
job: SerializedJob;
|
|
1673
|
+
/** Timestamp of the last successful execution in milliseconds. */
|
|
1703
1674
|
lastRun?: number;
|
|
1675
|
+
/** Timestamp of the next scheduled execution in milliseconds. */
|
|
1704
1676
|
nextRun?: number;
|
|
1677
|
+
/** Whether the scheduled job is active. */
|
|
1705
1678
|
enabled: boolean;
|
|
1706
1679
|
}
|
|
1707
1680
|
/**
|
|
1708
|
-
* Scheduler
|
|
1681
|
+
* Scheduler manages recurring (cron) jobs in Gravito.
|
|
1682
|
+
*
|
|
1683
|
+
* It uses Redis to store schedule metadata and coordinates distributed
|
|
1684
|
+
* execution using locks to ensure jobs are triggered exactly once per interval.
|
|
1685
|
+
*
|
|
1686
|
+
* @example
|
|
1687
|
+
* ```typescript
|
|
1688
|
+
* const scheduler = new Scheduler(queueManager);
|
|
1689
|
+
* await scheduler.register({
|
|
1690
|
+
* id: 'daily-cleanup',
|
|
1691
|
+
* cron: '0 0 * * *',
|
|
1692
|
+
* queue: 'default',
|
|
1693
|
+
* job: myJob.serialize()
|
|
1694
|
+
* });
|
|
1695
|
+
* ```
|
|
1709
1696
|
*
|
|
1710
|
-
*
|
|
1697
|
+
* @public
|
|
1698
|
+
* @since 3.0.0
|
|
1711
1699
|
*/
|
|
1712
1700
|
declare class Scheduler {
|
|
1713
1701
|
private manager;
|
package/dist/index.js
CHANGED
|
@@ -2171,6 +2171,7 @@ var OrbitStream = class _OrbitStream {
|
|
|
2171
2171
|
*/
|
|
2172
2172
|
install(core) {
|
|
2173
2173
|
this.queueManager = new QueueManager(this.options);
|
|
2174
|
+
core.container.instance("queue", this.queueManager);
|
|
2174
2175
|
core.adapter.use("*", async (c, next) => {
|
|
2175
2176
|
if (this.queueManager && this.options.connections) {
|
|
2176
2177
|
for (const [name, config] of Object.entries(this.options.connections)) {
|
|
@@ -2344,9 +2345,15 @@ var MySQLPersistence = class {
|
|
|
2344
2345
|
*/
|
|
2345
2346
|
async listLogs(options = {}) {
|
|
2346
2347
|
let query = this.db.table(this.logsTable);
|
|
2347
|
-
if (options.level)
|
|
2348
|
-
|
|
2349
|
-
|
|
2348
|
+
if (options.level) {
|
|
2349
|
+
query = query.where("level", options.level);
|
|
2350
|
+
}
|
|
2351
|
+
if (options.workerId) {
|
|
2352
|
+
query = query.where("worker_id", options.workerId);
|
|
2353
|
+
}
|
|
2354
|
+
if (options.queue) {
|
|
2355
|
+
query = query.where("queue", options.queue);
|
|
2356
|
+
}
|
|
2350
2357
|
if (options.search) {
|
|
2351
2358
|
query = query.where("message", "like", `%${options.search}%`);
|
|
2352
2359
|
}
|
|
@@ -2363,9 +2370,15 @@ var MySQLPersistence = class {
|
|
|
2363
2370
|
*/
|
|
2364
2371
|
async countLogs(options = {}) {
|
|
2365
2372
|
let query = this.db.table(this.logsTable);
|
|
2366
|
-
if (options.level)
|
|
2367
|
-
|
|
2368
|
-
|
|
2373
|
+
if (options.level) {
|
|
2374
|
+
query = query.where("level", options.level);
|
|
2375
|
+
}
|
|
2376
|
+
if (options.workerId) {
|
|
2377
|
+
query = query.where("worker_id", options.workerId);
|
|
2378
|
+
}
|
|
2379
|
+
if (options.queue) {
|
|
2380
|
+
query = query.where("queue", options.queue);
|
|
2381
|
+
}
|
|
2369
2382
|
if (options.search) {
|
|
2370
2383
|
query = query.where("message", "like", `%${options.search}%`);
|
|
2371
2384
|
}
|
|
@@ -2418,7 +2431,9 @@ var MySQLPersistence = class {
|
|
|
2418
2431
|
}
|
|
2419
2432
|
async setupJobsTable() {
|
|
2420
2433
|
const exists = await Schema.hasTable(this.table);
|
|
2421
|
-
if (exists)
|
|
2434
|
+
if (exists) {
|
|
2435
|
+
return;
|
|
2436
|
+
}
|
|
2422
2437
|
await Schema.create(this.table, (table) => {
|
|
2423
2438
|
table.id();
|
|
2424
2439
|
table.string("job_id", 64);
|
|
@@ -2437,7 +2452,9 @@ var MySQLPersistence = class {
|
|
|
2437
2452
|
}
|
|
2438
2453
|
async setupLogsTable() {
|
|
2439
2454
|
const exists = await Schema.hasTable(this.logsTable);
|
|
2440
|
-
if (exists)
|
|
2455
|
+
if (exists) {
|
|
2456
|
+
return;
|
|
2457
|
+
}
|
|
2441
2458
|
await Schema.create(this.logsTable, (table) => {
|
|
2442
2459
|
table.id();
|
|
2443
2460
|
table.string("level", 20);
|
|
@@ -2567,9 +2584,15 @@ var SQLitePersistence = class {
|
|
|
2567
2584
|
*/
|
|
2568
2585
|
async listLogs(options = {}) {
|
|
2569
2586
|
let query = this.db.table(this.logsTable);
|
|
2570
|
-
if (options.level)
|
|
2571
|
-
|
|
2572
|
-
|
|
2587
|
+
if (options.level) {
|
|
2588
|
+
query = query.where("level", options.level);
|
|
2589
|
+
}
|
|
2590
|
+
if (options.workerId) {
|
|
2591
|
+
query = query.where("worker_id", options.workerId);
|
|
2592
|
+
}
|
|
2593
|
+
if (options.queue) {
|
|
2594
|
+
query = query.where("queue", options.queue);
|
|
2595
|
+
}
|
|
2573
2596
|
if (options.search) {
|
|
2574
2597
|
query = query.where("message", "like", `%${options.search}%`);
|
|
2575
2598
|
}
|
|
@@ -2586,9 +2609,15 @@ var SQLitePersistence = class {
|
|
|
2586
2609
|
*/
|
|
2587
2610
|
async countLogs(options = {}) {
|
|
2588
2611
|
let query = this.db.table(this.logsTable);
|
|
2589
|
-
if (options.level)
|
|
2590
|
-
|
|
2591
|
-
|
|
2612
|
+
if (options.level) {
|
|
2613
|
+
query = query.where("level", options.level);
|
|
2614
|
+
}
|
|
2615
|
+
if (options.workerId) {
|
|
2616
|
+
query = query.where("worker_id", options.workerId);
|
|
2617
|
+
}
|
|
2618
|
+
if (options.queue) {
|
|
2619
|
+
query = query.where("queue", options.queue);
|
|
2620
|
+
}
|
|
2592
2621
|
if (options.search) {
|
|
2593
2622
|
query = query.where("message", "like", `%${options.search}%`);
|
|
2594
2623
|
}
|
|
@@ -2641,7 +2670,9 @@ var SQLitePersistence = class {
|
|
|
2641
2670
|
}
|
|
2642
2671
|
async setupJobsTable() {
|
|
2643
2672
|
const exists = await Schema2.hasTable(this.table);
|
|
2644
|
-
if (exists)
|
|
2673
|
+
if (exists) {
|
|
2674
|
+
return;
|
|
2675
|
+
}
|
|
2645
2676
|
await Schema2.create(this.table, (table) => {
|
|
2646
2677
|
table.id();
|
|
2647
2678
|
table.string("job_id", 64);
|
|
@@ -2658,7 +2689,9 @@ var SQLitePersistence = class {
|
|
|
2658
2689
|
}
|
|
2659
2690
|
async setupLogsTable() {
|
|
2660
2691
|
const exists = await Schema2.hasTable(this.logsTable);
|
|
2661
|
-
if (exists)
|
|
2692
|
+
if (exists) {
|
|
2693
|
+
return;
|
|
2694
|
+
}
|
|
2662
2695
|
await Schema2.create(this.logsTable, (table) => {
|
|
2663
2696
|
table.id();
|
|
2664
2697
|
table.string("level", 20);
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@gravito/stream",
|
|
3
|
-
"version": "1.0.
|
|
3
|
+
"version": "1.0.3",
|
|
4
4
|
"publishConfig": {
|
|
5
5
|
"access": "public"
|
|
6
6
|
},
|
|
@@ -25,8 +25,8 @@
|
|
|
25
25
|
"build": "bun run build.ts",
|
|
26
26
|
"test": "bun test",
|
|
27
27
|
"typecheck": "bun tsc -p tsconfig.json --noEmit --skipLibCheck",
|
|
28
|
-
"test:coverage": "bun test --coverage --coverage-threshold=
|
|
29
|
-
"test:ci": "bun test --coverage --coverage-threshold=
|
|
28
|
+
"test:coverage": "bun test --coverage --coverage-threshold=70",
|
|
29
|
+
"test:ci": "bun test --coverage --coverage-threshold=70"
|
|
30
30
|
},
|
|
31
31
|
"keywords": [
|
|
32
32
|
"gravito",
|