@cascade-flow/backend-postgres 0.2.8 → 0.2.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/db.d.ts +55 -1
- package/dist/db.d.ts.map +1 -1
- package/dist/index.d.ts +19 -0
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +434 -128
- package/dist/index.js.map +5 -5
- package/dist/migrations.d.ts.map +1 -1
- package/package.json +3 -3
package/dist/index.js
CHANGED
|
@@ -5080,6 +5080,31 @@ class DatabaseClient {
|
|
|
5080
5080
|
client.release();
|
|
5081
5081
|
}
|
|
5082
5082
|
}
|
|
5083
|
+
async loadStepEventsForProjection(workflowSlug, runId) {
|
|
5084
|
+
const client = await this.pool.connect();
|
|
5085
|
+
try {
|
|
5086
|
+
const query = `
|
|
5087
|
+
SELECT event_data, event_id, step_id
|
|
5088
|
+
FROM ${this.schema}.step_events
|
|
5089
|
+
WHERE workflow_slug = $1 AND run_id = $2
|
|
5090
|
+
AND type NOT IN ('LogEntry', 'StepHeartbeat', 'StepRetrying')
|
|
5091
|
+
ORDER BY timestamp_us ASC, event_id ASC
|
|
5092
|
+
`;
|
|
5093
|
+
const result = await client.query(query, [workflowSlug, runId]);
|
|
5094
|
+
const eventsByStep = new Map;
|
|
5095
|
+
for (const row of result.rows) {
|
|
5096
|
+
const stepId = row.step_id;
|
|
5097
|
+
const event = { ...row.event_data, eventId: row.event_id };
|
|
5098
|
+
if (!eventsByStep.has(stepId)) {
|
|
5099
|
+
eventsByStep.set(stepId, []);
|
|
5100
|
+
}
|
|
5101
|
+
eventsByStep.get(stepId).push(event);
|
|
5102
|
+
}
|
|
5103
|
+
return eventsByStep;
|
|
5104
|
+
} finally {
|
|
5105
|
+
client.release();
|
|
5106
|
+
}
|
|
5107
|
+
}
|
|
5083
5108
|
async claimScheduledStep(workflowSlug, runId, stepId, workerId, eventToWrite) {
|
|
5084
5109
|
const client = await this.pool.connect();
|
|
5085
5110
|
try {
|
|
@@ -5451,6 +5476,253 @@ class DatabaseClient {
|
|
|
5451
5476
|
client.release();
|
|
5452
5477
|
}
|
|
5453
5478
|
}
|
|
5479
|
+
async getQueueDepthAggregation(workflowSlug) {
|
|
5480
|
+
const client = await this.pool.connect();
|
|
5481
|
+
try {
|
|
5482
|
+
const result = await client.query(`WITH
|
|
5483
|
+
-- Get the latest workflow event per run to determine run status
|
|
5484
|
+
run_status AS (
|
|
5485
|
+
SELECT DISTINCT ON (workflow_slug, run_id)
|
|
5486
|
+
workflow_slug,
|
|
5487
|
+
run_id,
|
|
5488
|
+
type,
|
|
5489
|
+
timestamp_us AS created_at
|
|
5490
|
+
FROM ${this.schema}.workflow_events
|
|
5491
|
+
WHERE ($1::text IS NULL OR workflow_slug = $1)
|
|
5492
|
+
ORDER BY workflow_slug, run_id, timestamp_us DESC, event_id DESC
|
|
5493
|
+
),
|
|
5494
|
+
-- Filter to only active (pending/running) runs
|
|
5495
|
+
active_runs AS (
|
|
5496
|
+
SELECT
|
|
5497
|
+
workflow_slug,
|
|
5498
|
+
run_id,
|
|
5499
|
+
CASE WHEN type IN ('RunSubmitted', 'WorkflowRetryStarted') THEN 'pending' ELSE 'running' END AS status,
|
|
5500
|
+
created_at
|
|
5501
|
+
FROM run_status
|
|
5502
|
+
WHERE type IN ('RunSubmitted', 'WorkflowRetryStarted', 'WorkflowStarted', 'WorkflowResumed')
|
|
5503
|
+
),
|
|
5504
|
+
-- Get latest step event per step (excluding LogEntry which doesn't change state)
|
|
5505
|
+
latest_step_events AS (
|
|
5506
|
+
SELECT DISTINCT ON (se.workflow_slug, se.run_id, se.step_id)
|
|
5507
|
+
se.type,
|
|
5508
|
+
se.available_at_us
|
|
5509
|
+
FROM ${this.schema}.step_events se
|
|
5510
|
+
INNER JOIN active_runs ar
|
|
5511
|
+
ON ar.workflow_slug = se.workflow_slug
|
|
5512
|
+
AND ar.run_id = se.run_id
|
|
5513
|
+
AND ar.status = 'running'
|
|
5514
|
+
WHERE se.type != 'LogEntry'
|
|
5515
|
+
ORDER BY se.workflow_slug, se.run_id, se.step_id, se.timestamp_us DESC, se.event_id DESC
|
|
5516
|
+
),
|
|
5517
|
+
-- Aggregate step counts
|
|
5518
|
+
step_counts AS (
|
|
5519
|
+
SELECT
|
|
5520
|
+
COUNT(*) FILTER (WHERE type IN ('StepScheduled', 'StepReclaimed')) AS scheduled_steps,
|
|
5521
|
+
COUNT(*) FILTER (WHERE type IN ('StepStarted', 'StepHeartbeat')) AS running_steps,
|
|
5522
|
+
MIN(available_at_us) FILTER (WHERE type IN ('StepScheduled', 'StepReclaimed')) AS oldest_scheduled_step_us
|
|
5523
|
+
FROM latest_step_events
|
|
5524
|
+
),
|
|
5525
|
+
-- Aggregate run counts
|
|
5526
|
+
run_counts AS (
|
|
5527
|
+
SELECT
|
|
5528
|
+
COUNT(*) FILTER (WHERE status = 'pending') AS pending_runs,
|
|
5529
|
+
COUNT(*) FILTER (WHERE status = 'running') AS running_runs,
|
|
5530
|
+
MIN(created_at) FILTER (WHERE status = 'pending') AS oldest_pending_run_us
|
|
5531
|
+
FROM active_runs
|
|
5532
|
+
)
|
|
5533
|
+
SELECT
|
|
5534
|
+
COALESCE(rc.pending_runs, 0) AS pending_runs,
|
|
5535
|
+
COALESCE(rc.running_runs, 0) AS running_runs,
|
|
5536
|
+
COALESCE(sc.scheduled_steps, 0) AS scheduled_steps,
|
|
5537
|
+
COALESCE(sc.running_steps, 0) AS running_steps,
|
|
5538
|
+
sc.oldest_scheduled_step_us,
|
|
5539
|
+
rc.oldest_pending_run_us
|
|
5540
|
+
FROM run_counts rc, step_counts sc`, [workflowSlug ?? null]);
|
|
5541
|
+
const row = result.rows[0];
|
|
5542
|
+
return {
|
|
5543
|
+
pendingRuns: parseInt(row?.pending_runs ?? "0", 10),
|
|
5544
|
+
runningRuns: parseInt(row?.running_runs ?? "0", 10),
|
|
5545
|
+
scheduledSteps: parseInt(row?.scheduled_steps ?? "0", 10),
|
|
5546
|
+
runningSteps: parseInt(row?.running_steps ?? "0", 10),
|
|
5547
|
+
oldestScheduledStepUs: row?.oldest_scheduled_step_us ? parseInt(row.oldest_scheduled_step_us, 10) : null,
|
|
5548
|
+
oldestPendingRunUs: row?.oldest_pending_run_us ? parseInt(row.oldest_pending_run_us, 10) : null
|
|
5549
|
+
};
|
|
5550
|
+
} finally {
|
|
5551
|
+
client.release();
|
|
5552
|
+
}
|
|
5553
|
+
}
|
|
5554
|
+
async listRunsFiltered(options) {
|
|
5555
|
+
const client = await this.pool.connect();
|
|
5556
|
+
try {
|
|
5557
|
+
const statusToEvents = {
|
|
5558
|
+
pending: ["RunSubmitted", "WorkflowRetryStarted"],
|
|
5559
|
+
running: ["WorkflowStarted", "WorkflowResumed"],
|
|
5560
|
+
completed: ["WorkflowCompleted"],
|
|
5561
|
+
failed: ["WorkflowFailed"],
|
|
5562
|
+
cancelled: ["WorkflowCancelled"]
|
|
5563
|
+
};
|
|
5564
|
+
let eventTypeFilter = null;
|
|
5565
|
+
if (options?.status && options.status.length > 0) {
|
|
5566
|
+
eventTypeFilter = options.status.flatMap((s) => statusToEvents[s] || []);
|
|
5567
|
+
}
|
|
5568
|
+
const result = await client.query(`WITH
|
|
5569
|
+
-- Get first event (RunSubmitted) for each run to get createdAt and tags
|
|
5570
|
+
run_submitted AS (
|
|
5571
|
+
SELECT DISTINCT ON (workflow_slug, run_id)
|
|
5572
|
+
workflow_slug,
|
|
5573
|
+
run_id,
|
|
5574
|
+
timestamp_us AS created_at,
|
|
5575
|
+
event_data->'tags' AS tags
|
|
5576
|
+
FROM ${this.schema}.workflow_events
|
|
5577
|
+
WHERE type = 'RunSubmitted'
|
|
5578
|
+
AND ($1::text IS NULL OR workflow_slug = $1)
|
|
5579
|
+
ORDER BY workflow_slug, run_id, timestamp_us ASC
|
|
5580
|
+
),
|
|
5581
|
+
-- Get latest status-determining event for each run
|
|
5582
|
+
latest_status AS (
|
|
5583
|
+
SELECT DISTINCT ON (workflow_slug, run_id)
|
|
5584
|
+
workflow_slug,
|
|
5585
|
+
run_id,
|
|
5586
|
+
CASE
|
|
5587
|
+
WHEN type IN ('RunSubmitted', 'WorkflowRetryStarted') THEN 'pending'
|
|
5588
|
+
WHEN type IN ('WorkflowStarted', 'WorkflowResumed') THEN 'running'
|
|
5589
|
+
WHEN type = 'WorkflowCompleted' THEN 'completed'
|
|
5590
|
+
WHEN type = 'WorkflowFailed' THEN 'failed'
|
|
5591
|
+
WHEN type = 'WorkflowCancelled' THEN 'cancelled'
|
|
5592
|
+
END AS status,
|
|
5593
|
+
type
|
|
5594
|
+
FROM ${this.schema}.workflow_events
|
|
5595
|
+
WHERE ($1::text IS NULL OR workflow_slug = $1)
|
|
5596
|
+
ORDER BY workflow_slug, run_id, timestamp_us DESC, event_id DESC
|
|
5597
|
+
)
|
|
5598
|
+
SELECT
|
|
5599
|
+
ls.workflow_slug,
|
|
5600
|
+
ls.run_id,
|
|
5601
|
+
ls.status,
|
|
5602
|
+
rs.created_at,
|
|
5603
|
+
rs.tags
|
|
5604
|
+
FROM latest_status ls
|
|
5605
|
+
JOIN run_submitted rs ON ls.workflow_slug = rs.workflow_slug AND ls.run_id = rs.run_id
|
|
5606
|
+
WHERE ($2::text[] IS NULL OR ls.type = ANY($2))
|
|
5607
|
+
ORDER BY rs.created_at DESC
|
|
5608
|
+
LIMIT $3`, [
|
|
5609
|
+
options?.workflowSlug ?? null,
|
|
5610
|
+
eventTypeFilter,
|
|
5611
|
+
options?.limit ?? null
|
|
5612
|
+
]);
|
|
5613
|
+
return result.rows.map((row) => ({
|
|
5614
|
+
workflowSlug: row.workflow_slug,
|
|
5615
|
+
runId: row.run_id,
|
|
5616
|
+
status: row.status,
|
|
5617
|
+
createdAt: parseInt(row.created_at, 10),
|
|
5618
|
+
tags: row.tags
|
|
5619
|
+
}));
|
|
5620
|
+
} finally {
|
|
5621
|
+
client.release();
|
|
5622
|
+
}
|
|
5623
|
+
}
|
|
5624
|
+
async loadWorkflowEventsForRuns(runs) {
|
|
5625
|
+
if (runs.length === 0) {
|
|
5626
|
+
return new Map;
|
|
5627
|
+
}
|
|
5628
|
+
const client = await this.pool.connect();
|
|
5629
|
+
try {
|
|
5630
|
+
const values = [];
|
|
5631
|
+
const valuePlaceholders = [];
|
|
5632
|
+
runs.forEach((run, i) => {
|
|
5633
|
+
values.push(run.workflowSlug, run.runId);
|
|
5634
|
+
valuePlaceholders.push(`($${i * 2 + 1}, $${i * 2 + 2})`);
|
|
5635
|
+
});
|
|
5636
|
+
const result = await client.query(`SELECT workflow_slug, run_id, event_data, event_id
|
|
5637
|
+
FROM ${this.schema}.workflow_events
|
|
5638
|
+
WHERE (workflow_slug, run_id) IN (VALUES ${valuePlaceholders.join(", ")})
|
|
5639
|
+
ORDER BY workflow_slug, run_id, timestamp_us ASC, event_id ASC`, values);
|
|
5640
|
+
const eventsByRun = new Map;
|
|
5641
|
+
for (const row of result.rows) {
|
|
5642
|
+
const key = `${row.workflow_slug}:${row.run_id}`;
|
|
5643
|
+
const events = eventsByRun.get(key) || [];
|
|
5644
|
+
events.push({ ...row.event_data, eventId: row.event_id });
|
|
5645
|
+
eventsByRun.set(key, events);
|
|
5646
|
+
}
|
|
5647
|
+
return eventsByRun;
|
|
5648
|
+
} finally {
|
|
5649
|
+
client.release();
|
|
5650
|
+
}
|
|
5651
|
+
}
|
|
5652
|
+
async getQueueDepthByWorkflowAggregation() {
|
|
5653
|
+
const client = await this.pool.connect();
|
|
5654
|
+
try {
|
|
5655
|
+
const result = await client.query(`WITH
|
|
5656
|
+
-- Get the latest workflow event per run to determine run status
|
|
5657
|
+
run_status AS (
|
|
5658
|
+
SELECT DISTINCT ON (workflow_slug, run_id)
|
|
5659
|
+
workflow_slug,
|
|
5660
|
+
run_id,
|
|
5661
|
+
type,
|
|
5662
|
+
timestamp_us AS created_at
|
|
5663
|
+
FROM ${this.schema}.workflow_events
|
|
5664
|
+
ORDER BY workflow_slug, run_id, timestamp_us DESC, event_id DESC
|
|
5665
|
+
),
|
|
5666
|
+
-- Filter to only active (pending/running) runs
|
|
5667
|
+
active_runs AS (
|
|
5668
|
+
SELECT
|
|
5669
|
+
workflow_slug,
|
|
5670
|
+
run_id,
|
|
5671
|
+
CASE WHEN type IN ('RunSubmitted', 'WorkflowRetryStarted') THEN 'pending' ELSE 'running' END AS status,
|
|
5672
|
+
created_at
|
|
5673
|
+
FROM run_status
|
|
5674
|
+
WHERE type IN ('RunSubmitted', 'WorkflowRetryStarted', 'WorkflowStarted', 'WorkflowResumed')
|
|
5675
|
+
),
|
|
5676
|
+
-- Get latest step event per step (excluding LogEntry)
|
|
5677
|
+
latest_step_events AS (
|
|
5678
|
+
SELECT DISTINCT ON (se.workflow_slug, se.run_id, se.step_id)
|
|
5679
|
+
se.workflow_slug,
|
|
5680
|
+
se.type,
|
|
5681
|
+
se.available_at_us
|
|
5682
|
+
FROM ${this.schema}.step_events se
|
|
5683
|
+
INNER JOIN active_runs ar
|
|
5684
|
+
ON ar.workflow_slug = se.workflow_slug
|
|
5685
|
+
AND ar.run_id = se.run_id
|
|
5686
|
+
AND ar.status = 'running'
|
|
5687
|
+
WHERE se.type != 'LogEntry'
|
|
5688
|
+
ORDER BY se.workflow_slug, se.run_id, se.step_id, se.timestamp_us DESC, se.event_id DESC
|
|
5689
|
+
),
|
|
5690
|
+
-- Aggregate step counts by workflow
|
|
5691
|
+
step_counts_by_workflow AS (
|
|
5692
|
+
SELECT
|
|
5693
|
+
workflow_slug,
|
|
5694
|
+
COUNT(*) FILTER (WHERE type IN ('StepScheduled', 'StepReclaimed')) AS scheduled_steps,
|
|
5695
|
+
MIN(available_at_us) FILTER (WHERE type IN ('StepScheduled', 'StepReclaimed')) AS oldest_scheduled_us
|
|
5696
|
+
FROM latest_step_events
|
|
5697
|
+
GROUP BY workflow_slug
|
|
5698
|
+
),
|
|
5699
|
+
-- Aggregate run counts by workflow
|
|
5700
|
+
run_counts_by_workflow AS (
|
|
5701
|
+
SELECT
|
|
5702
|
+
workflow_slug,
|
|
5703
|
+
COUNT(*) FILTER (WHERE status = 'pending') AS pending_runs,
|
|
5704
|
+
MIN(created_at) FILTER (WHERE status = 'pending') AS oldest_pending_run_us
|
|
5705
|
+
FROM active_runs
|
|
5706
|
+
GROUP BY workflow_slug
|
|
5707
|
+
)
|
|
5708
|
+
SELECT
|
|
5709
|
+
COALESCE(r.workflow_slug, s.workflow_slug) AS workflow_slug,
|
|
5710
|
+
COALESCE(r.pending_runs, 0) AS pending_runs,
|
|
5711
|
+
COALESCE(s.scheduled_steps, 0) AS scheduled_steps,
|
|
5712
|
+
LEAST(r.oldest_pending_run_us, s.oldest_scheduled_us) AS oldest_pending_item_us
|
|
5713
|
+
FROM run_counts_by_workflow r
|
|
5714
|
+
FULL OUTER JOIN step_counts_by_workflow s ON r.workflow_slug = s.workflow_slug
|
|
5715
|
+
WHERE COALESCE(r.pending_runs, 0) > 0 OR COALESCE(s.scheduled_steps, 0) > 0`);
|
|
5716
|
+
return result.rows.map((row) => ({
|
|
5717
|
+
workflowSlug: row.workflow_slug,
|
|
5718
|
+
pendingRuns: parseInt(row.pending_runs, 10),
|
|
5719
|
+
scheduledSteps: parseInt(row.scheduled_steps, 10),
|
|
5720
|
+
oldestPendingItemUs: row.oldest_pending_item_us ? parseInt(row.oldest_pending_item_us, 10) : null
|
|
5721
|
+
}));
|
|
5722
|
+
} finally {
|
|
5723
|
+
client.release();
|
|
5724
|
+
}
|
|
5725
|
+
}
|
|
5454
5726
|
}
|
|
5455
5727
|
function createPool(connectionString) {
|
|
5456
5728
|
return new Pool2({ connectionString });
|
|
@@ -5729,6 +6001,60 @@ async function migration005_addWorkflowVersioning(pool, schema) {
|
|
|
5729
6001
|
client.release();
|
|
5730
6002
|
}
|
|
5731
6003
|
}
|
|
6004
|
+
async function migration006_addDescIndexes(pool, schema) {
|
|
6005
|
+
const client = await pool.connect();
|
|
6006
|
+
try {
|
|
6007
|
+
await client.query(`
|
|
6008
|
+
CREATE INDEX IF NOT EXISTS idx_workflow_events_latest
|
|
6009
|
+
ON ${schema}.workflow_events (workflow_slug, run_id, timestamp_us DESC, event_id DESC)
|
|
6010
|
+
`);
|
|
6011
|
+
await client.query(`
|
|
6012
|
+
CREATE INDEX IF NOT EXISTS idx_step_events_latest
|
|
6013
|
+
ON ${schema}.step_events (workflow_slug, run_id, step_id, timestamp_us DESC, event_id DESC)
|
|
6014
|
+
`);
|
|
6015
|
+
await client.query(`
|
|
6016
|
+
CREATE INDEX IF NOT EXISTS idx_workflow_events_by_type
|
|
6017
|
+
ON ${schema}.workflow_events (type, workflow_slug, run_id, timestamp_us ASC)
|
|
6018
|
+
`);
|
|
6019
|
+
console.log("[Migration 006] Optimized query indexes added successfully");
|
|
6020
|
+
} catch (error) {
|
|
6021
|
+
console.error("[Migration 006] Error adding DESC indexes:", error);
|
|
6022
|
+
throw error;
|
|
6023
|
+
} finally {
|
|
6024
|
+
client.release();
|
|
6025
|
+
}
|
|
6026
|
+
}
|
|
6027
|
+
async function migration007_addWorkerIndexes(pool, schema) {
|
|
6028
|
+
const client = await pool.connect();
|
|
6029
|
+
try {
|
|
6030
|
+
await client.query(`
|
|
6031
|
+
CREATE INDEX IF NOT EXISTS idx_step_events_scheduled_status
|
|
6032
|
+
ON ${schema}.step_events (type, workflow_slug, run_id, step_id, timestamp_us DESC, event_id DESC)
|
|
6033
|
+
WHERE type IN ('StepScheduled', 'StepReclaimed', 'StepRetrying')
|
|
6034
|
+
`);
|
|
6035
|
+
await client.query(`
|
|
6036
|
+
CREATE INDEX IF NOT EXISTS idx_step_events_heartbeat_status
|
|
6037
|
+
ON ${schema}.step_events (type, workflow_slug, run_id, step_id, timestamp_us DESC, event_id DESC)
|
|
6038
|
+
WHERE type IN ('StepStarted', 'StepHeartbeat')
|
|
6039
|
+
`);
|
|
6040
|
+
await client.query(`
|
|
6041
|
+
CREATE INDEX IF NOT EXISTS idx_workflow_events_active_types
|
|
6042
|
+
ON ${schema}.workflow_events (type)
|
|
6043
|
+
WHERE type IN ('RunSubmitted', 'WorkflowStarted', 'WorkflowResumed')
|
|
6044
|
+
`);
|
|
6045
|
+
await client.query(`
|
|
6046
|
+
CREATE INDEX IF NOT EXISTS idx_step_events_active_types
|
|
6047
|
+
ON ${schema}.step_events (type)
|
|
6048
|
+
WHERE type IN ('StepScheduled', 'StepStarted', 'StepReclaimed', 'StepRetrying')
|
|
6049
|
+
`);
|
|
6050
|
+
console.log("[Migration 007] Worker loop indexes added successfully");
|
|
6051
|
+
} catch (error) {
|
|
6052
|
+
console.error("[Migration 007] Error adding worker indexes:", error);
|
|
6053
|
+
throw error;
|
|
6054
|
+
} finally {
|
|
6055
|
+
client.release();
|
|
6056
|
+
}
|
|
6057
|
+
}
|
|
5732
6058
|
async function runMigrations(pool, schema = "cascadeflow") {
|
|
5733
6059
|
console.log(`[Migrations] Starting database migrations in schema '${schema}'...`);
|
|
5734
6060
|
try {
|
|
@@ -5738,6 +6064,8 @@ async function runMigrations(pool, schema = "cascadeflow") {
|
|
|
5738
6064
|
await migration003_createIndexes(pool, schema);
|
|
5739
6065
|
await migration004_addErrorFingerprints(pool, schema);
|
|
5740
6066
|
await migration005_addWorkflowVersioning(pool, schema);
|
|
6067
|
+
await migration006_addDescIndexes(pool, schema);
|
|
6068
|
+
await migration007_addWorkerIndexes(pool, schema);
|
|
5741
6069
|
console.log("[Migrations] All migrations completed successfully");
|
|
5742
6070
|
} catch (error) {
|
|
5743
6071
|
console.error("[Migrations] Migration failed:", error);
|
|
@@ -5761,6 +6089,20 @@ class PostgresBackend extends Backend {
|
|
|
5761
6089
|
await runMigrations(this.db.getPool(), this.db.getSchema());
|
|
5762
6090
|
this.initialized = true;
|
|
5763
6091
|
}
|
|
6092
|
+
async backendReady() {
|
|
6093
|
+
try {
|
|
6094
|
+
const client = await this.db.getPool().connect();
|
|
6095
|
+
try {
|
|
6096
|
+
await client.query("SELECT 1");
|
|
6097
|
+
return true;
|
|
6098
|
+
} finally {
|
|
6099
|
+
client.release();
|
|
6100
|
+
}
|
|
6101
|
+
} catch (error) {
|
|
6102
|
+
console.error("Backend health check failed:", error);
|
|
6103
|
+
return false;
|
|
6104
|
+
}
|
|
6105
|
+
}
|
|
5764
6106
|
generateRunId() {
|
|
5765
6107
|
return `run_${Date.now()}_${Math.random().toString(36).substring(2, 15)}`;
|
|
5766
6108
|
}
|
|
@@ -5824,6 +6166,9 @@ class PostgresBackend extends Backend {
|
|
|
5824
6166
|
return this.db.loadAllRunEvents(workflowSlug, runId);
|
|
5825
6167
|
}
|
|
5826
6168
|
}
|
|
6169
|
+
async loadStepEventsForProjection(workflowSlug, runId) {
|
|
6170
|
+
return this.db.loadStepEventsForProjection(workflowSlug, runId);
|
|
6171
|
+
}
|
|
5827
6172
|
async copyEntireRun(workflowSlug, sourceRunId, targetRunId) {
|
|
5828
6173
|
await this.copyEntireRunWithClient(workflowSlug, sourceRunId, targetRunId, this.db.getPool());
|
|
5829
6174
|
}
|
|
@@ -6054,8 +6399,7 @@ class PostgresBackend extends Backend {
|
|
|
6054
6399
|
await this.db.appendEvent("step_events", event);
|
|
6055
6400
|
}
|
|
6056
6401
|
async saveStepComplete(workflowSlug, runId, stepId, output, metadata, exportOutput = false) {
|
|
6057
|
-
const
|
|
6058
|
-
const attemptNumber = getCurrentAttemptNumber(events);
|
|
6402
|
+
const attemptNumber = metadata.attemptNumber;
|
|
6059
6403
|
if (attemptNumber === 0) {
|
|
6060
6404
|
throw new Error(`Cannot complete step that hasn't started: ${stepId}`);
|
|
6061
6405
|
}
|
|
@@ -6194,6 +6538,44 @@ class PostgresBackend extends Backend {
|
|
|
6194
6538
|
};
|
|
6195
6539
|
await this.db.appendEvent("step_events", event);
|
|
6196
6540
|
}
|
|
6541
|
+
async saveStepCheckpoint(workflowSlug, runId, stepId, checkpoint) {
|
|
6542
|
+
const now = getMicrosecondTimestamp();
|
|
6543
|
+
const event = {
|
|
6544
|
+
category: "step",
|
|
6545
|
+
type: "StepCheckpoint",
|
|
6546
|
+
eventId: this.generateEventId(now),
|
|
6547
|
+
timestampUs: now,
|
|
6548
|
+
workflowSlug,
|
|
6549
|
+
runId,
|
|
6550
|
+
stepId,
|
|
6551
|
+
name: checkpoint.name,
|
|
6552
|
+
sequenceNumber: checkpoint.sequenceNumber,
|
|
6553
|
+
attemptNumber: checkpoint.attemptNumber,
|
|
6554
|
+
data: checkpoint.data,
|
|
6555
|
+
...checkpoint.label && { label: checkpoint.label },
|
|
6556
|
+
...checkpoint.parentCheckpoint && {
|
|
6557
|
+
parentCheckpoint: checkpoint.parentCheckpoint
|
|
6558
|
+
}
|
|
6559
|
+
};
|
|
6560
|
+
await this.db.appendEvent("step_events", event);
|
|
6561
|
+
}
|
|
6562
|
+
async saveStepCheckpointFailed(workflowSlug, runId, stepId, checkpoint) {
|
|
6563
|
+
const now = getMicrosecondTimestamp();
|
|
6564
|
+
const event = {
|
|
6565
|
+
category: "step",
|
|
6566
|
+
type: "StepCheckpointFailed",
|
|
6567
|
+
eventId: this.generateEventId(now),
|
|
6568
|
+
timestampUs: now,
|
|
6569
|
+
workflowSlug,
|
|
6570
|
+
runId,
|
|
6571
|
+
stepId,
|
|
6572
|
+
name: checkpoint.name,
|
|
6573
|
+
sequenceNumber: checkpoint.sequenceNumber,
|
|
6574
|
+
attemptNumber: checkpoint.attemptNumber,
|
|
6575
|
+
error: checkpoint.error
|
|
6576
|
+
};
|
|
6577
|
+
await this.db.appendEvent("step_events", event);
|
|
6578
|
+
}
|
|
6197
6579
|
async saveStepHeartbeat(workflowSlug, runId, stepId, workerId, attemptNumber) {
|
|
6198
6580
|
const now = getMicrosecondTimestamp();
|
|
6199
6581
|
const event = {
|
|
@@ -6403,35 +6785,39 @@ class PostgresBackend extends Backend {
|
|
|
6403
6785
|
return { runId, isNew: true };
|
|
6404
6786
|
}
|
|
6405
6787
|
async listRuns(options) {
|
|
6788
|
+
const filteredRuns = await this.db.listRunsFiltered({
|
|
6789
|
+
workflowSlug: options?.workflowSlug,
|
|
6790
|
+
status: options?.status,
|
|
6791
|
+
limit: options?.tags?.length ? undefined : options?.limit
|
|
6792
|
+
});
|
|
6793
|
+
let runsToLoad = filteredRuns;
|
|
6794
|
+
if (options?.tags && options.tags.length > 0) {
|
|
6795
|
+
runsToLoad = filteredRuns.filter((run) => {
|
|
6796
|
+
const runTags = run.tags || [];
|
|
6797
|
+
return options.tags.every((tag) => runTags.includes(tag));
|
|
6798
|
+
});
|
|
6799
|
+
if (options?.limit) {
|
|
6800
|
+
runsToLoad = runsToLoad.slice(0, options.limit);
|
|
6801
|
+
}
|
|
6802
|
+
}
|
|
6803
|
+
if (runsToLoad.length === 0) {
|
|
6804
|
+
return [];
|
|
6805
|
+
}
|
|
6806
|
+
const eventsByRun = await this.db.loadWorkflowEventsForRuns(runsToLoad.map((r) => ({ workflowSlug: r.workflowSlug, runId: r.runId })));
|
|
6406
6807
|
const allRuns = [];
|
|
6407
|
-
const
|
|
6408
|
-
|
|
6409
|
-
const
|
|
6410
|
-
|
|
6411
|
-
|
|
6412
|
-
|
|
6413
|
-
|
|
6414
|
-
|
|
6415
|
-
|
|
6416
|
-
|
|
6417
|
-
if (options?.status && options.status.length > 0) {
|
|
6418
|
-
if (!options.status.includes(state.status))
|
|
6419
|
-
continue;
|
|
6420
|
-
}
|
|
6421
|
-
if (options?.tags && options.tags.length > 0) {
|
|
6422
|
-
const stateTags = state.tags || [];
|
|
6423
|
-
const hasAllTags = options.tags.every((tag) => stateTags.includes(tag));
|
|
6424
|
-
if (!hasAllTags)
|
|
6425
|
-
continue;
|
|
6426
|
-
}
|
|
6427
|
-
allRuns.push(state);
|
|
6428
|
-
} catch {
|
|
6429
|
-
continue;
|
|
6430
|
-
}
|
|
6808
|
+
for (const run of runsToLoad) {
|
|
6809
|
+
const key = `${run.workflowSlug}:${run.runId}`;
|
|
6810
|
+
const events = eventsByRun.get(key);
|
|
6811
|
+
if (!events || events.length === 0)
|
|
6812
|
+
continue;
|
|
6813
|
+
try {
|
|
6814
|
+
const state = projectRunStateFromEvents(events, run.workflowSlug);
|
|
6815
|
+
allRuns.push(state);
|
|
6816
|
+
} catch {
|
|
6817
|
+
continue;
|
|
6431
6818
|
}
|
|
6432
6819
|
}
|
|
6433
|
-
allRuns
|
|
6434
|
-
return options?.limit ? allRuns.slice(0, options.limit) : allRuns;
|
|
6820
|
+
return allRuns;
|
|
6435
6821
|
}
|
|
6436
6822
|
async cancelRun(runId, reason) {
|
|
6437
6823
|
const allWorkflows = await this.db.listActiveWorkflows();
|
|
@@ -6461,8 +6847,7 @@ class PostgresBackend extends Backend {
|
|
|
6461
6847
|
for (const workflowSlug of allWorkflows) {
|
|
6462
6848
|
const runIds = await this.db.listRunIds(workflowSlug);
|
|
6463
6849
|
if (runIds.includes(runId)) {
|
|
6464
|
-
const
|
|
6465
|
-
const workflowEvents = events.filter((e) => e.category === "workflow");
|
|
6850
|
+
const workflowEvents = await this.loadEvents(workflowSlug, runId, { category: "workflow" });
|
|
6466
6851
|
return projectRunStateFromEvents(workflowEvents, workflowSlug);
|
|
6467
6852
|
}
|
|
6468
6853
|
}
|
|
@@ -6907,113 +7292,34 @@ class PostgresBackend extends Backend {
|
|
|
6907
7292
|
return computeThroughput(stepEvents, workflowEvents, timeRangeUs, options?.workflowSlug);
|
|
6908
7293
|
}
|
|
6909
7294
|
async getQueueDepth(options) {
|
|
6910
|
-
const
|
|
6911
|
-
workflowSlug: options?.workflowSlug,
|
|
6912
|
-
status: ["pending", "running"]
|
|
6913
|
-
});
|
|
6914
|
-
let pendingRuns = 0;
|
|
6915
|
-
let runningRuns = 0;
|
|
6916
|
-
let scheduledSteps = 0;
|
|
6917
|
-
let runningSteps = 0;
|
|
6918
|
-
let oldestScheduledStepUs;
|
|
6919
|
-
let oldestPendingRunUs;
|
|
6920
|
-
for (const run of runs) {
|
|
6921
|
-
if (run.status === "pending") {
|
|
6922
|
-
pendingRuns++;
|
|
6923
|
-
if (!oldestPendingRunUs || run.createdAt < oldestPendingRunUs) {
|
|
6924
|
-
oldestPendingRunUs = run.createdAt;
|
|
6925
|
-
}
|
|
6926
|
-
} else if (run.status === "running") {
|
|
6927
|
-
runningRuns++;
|
|
6928
|
-
try {
|
|
6929
|
-
const stepEvents = await this.loadEvents(run.workflowSlug, run.runId, {
|
|
6930
|
-
category: "step"
|
|
6931
|
-
});
|
|
6932
|
-
const eventsByStep = new Map;
|
|
6933
|
-
for (const event of stepEvents) {
|
|
6934
|
-
const events = eventsByStep.get(event.stepId) || [];
|
|
6935
|
-
events.push(event);
|
|
6936
|
-
eventsByStep.set(event.stepId, events);
|
|
6937
|
-
}
|
|
6938
|
-
for (const [stepId, events] of eventsByStep.entries()) {
|
|
6939
|
-
const state = projectStepState(events, run.workflowSlug);
|
|
6940
|
-
if (state.status === "scheduled") {
|
|
6941
|
-
scheduledSteps++;
|
|
6942
|
-
if (state.availableAt && (!oldestScheduledStepUs || state.availableAt < oldestScheduledStepUs)) {
|
|
6943
|
-
oldestScheduledStepUs = state.availableAt;
|
|
6944
|
-
}
|
|
6945
|
-
} else if (state.status === "running") {
|
|
6946
|
-
runningSteps++;
|
|
6947
|
-
}
|
|
6948
|
-
}
|
|
6949
|
-
} catch (error) {
|
|
6950
|
-
continue;
|
|
6951
|
-
}
|
|
6952
|
-
}
|
|
6953
|
-
}
|
|
7295
|
+
const result = await this.db.getQueueDepthAggregation(options?.workflowSlug);
|
|
6954
7296
|
return {
|
|
6955
7297
|
workflowSlug: options?.workflowSlug,
|
|
6956
|
-
pendingRuns,
|
|
6957
|
-
runningRuns,
|
|
6958
|
-
scheduledSteps,
|
|
6959
|
-
runningSteps,
|
|
6960
|
-
oldestScheduledStepUs,
|
|
6961
|
-
oldestPendingRunUs
|
|
7298
|
+
pendingRuns: result.pendingRuns,
|
|
7299
|
+
runningRuns: result.runningRuns,
|
|
7300
|
+
scheduledSteps: result.scheduledSteps,
|
|
7301
|
+
runningSteps: result.runningSteps,
|
|
7302
|
+
oldestScheduledStepUs: result.oldestScheduledStepUs ?? undefined,
|
|
7303
|
+
oldestPendingRunUs: result.oldestPendingRunUs ?? undefined
|
|
6962
7304
|
};
|
|
6963
7305
|
}
|
|
6964
7306
|
async getQueueDepthByWorkflow() {
|
|
6965
|
-
const
|
|
6966
|
-
|
|
6967
|
-
|
|
6968
|
-
|
|
6969
|
-
pendingRuns: 0,
|
|
6970
|
-
scheduledSteps: 0
|
|
6971
|
-
};
|
|
6972
|
-
if (run.status === "pending") {
|
|
6973
|
-
existing.pendingRuns++;
|
|
6974
|
-
if (!existing.oldestPendingItemUs || run.createdAt < existing.oldestPendingItemUs) {
|
|
6975
|
-
existing.oldestPendingItemUs = run.createdAt;
|
|
6976
|
-
}
|
|
6977
|
-
} else if (run.status === "running") {
|
|
6978
|
-
try {
|
|
6979
|
-
const stepEvents = await this.loadEvents(run.workflowSlug, run.runId, {
|
|
6980
|
-
category: "step"
|
|
6981
|
-
});
|
|
6982
|
-
const eventsByStep = new Map;
|
|
6983
|
-
for (const event of stepEvents) {
|
|
6984
|
-
const events = eventsByStep.get(event.stepId) || [];
|
|
6985
|
-
events.push(event);
|
|
6986
|
-
eventsByStep.set(event.stepId, events);
|
|
6987
|
-
}
|
|
6988
|
-
for (const [stepId, events] of eventsByStep.entries()) {
|
|
6989
|
-
const state = projectStepState(events, run.workflowSlug);
|
|
6990
|
-
if (state.status === "scheduled") {
|
|
6991
|
-
existing.scheduledSteps++;
|
|
6992
|
-
if (state.availableAt && (!existing.oldestPendingItemUs || state.availableAt < existing.oldestPendingItemUs)) {
|
|
6993
|
-
existing.oldestPendingItemUs = state.availableAt;
|
|
6994
|
-
}
|
|
6995
|
-
}
|
|
6996
|
-
}
|
|
6997
|
-
} catch (error) {
|
|
6998
|
-
continue;
|
|
6999
|
-
}
|
|
7000
|
-
}
|
|
7001
|
-
workflowMap.set(run.workflowSlug, existing);
|
|
7002
|
-
}
|
|
7003
|
-
const allMetadata = await this.listWorkflowMetadata();
|
|
7307
|
+
const [aggregation, allMetadata] = await Promise.all([
|
|
7308
|
+
this.db.getQueueDepthByWorkflowAggregation(),
|
|
7309
|
+
this.listWorkflowMetadata()
|
|
7310
|
+
]);
|
|
7004
7311
|
const metadataMap = new Map(allMetadata.map((m) => [m.slug, m]));
|
|
7005
|
-
|
|
7006
|
-
workflowSlug,
|
|
7007
|
-
workflowName: metadataMap.get(workflowSlug)?.name,
|
|
7008
|
-
pendingRuns:
|
|
7009
|
-
scheduledSteps:
|
|
7010
|
-
oldestPendingItemUs:
|
|
7011
|
-
})).
|
|
7312
|
+
return aggregation.map((item) => ({
|
|
7313
|
+
workflowSlug: item.workflowSlug,
|
|
7314
|
+
workflowName: metadataMap.get(item.workflowSlug)?.name,
|
|
7315
|
+
pendingRuns: item.pendingRuns,
|
|
7316
|
+
scheduledSteps: item.scheduledSteps,
|
|
7317
|
+
oldestPendingItemUs: item.oldestPendingItemUs ?? undefined
|
|
7318
|
+
})).sort((a, b) => {
|
|
7012
7319
|
const aTotal = a.pendingRuns + a.scheduledSteps;
|
|
7013
7320
|
const bTotal = b.pendingRuns + b.scheduledSteps;
|
|
7014
7321
|
return bTotal - aTotal;
|
|
7015
7322
|
});
|
|
7016
|
-
return result;
|
|
7017
7323
|
}
|
|
7018
7324
|
async getSuccessRate(options) {
|
|
7019
7325
|
const { stepEvents, workflowEvents } = await this.loadEventsForAnalytics(options);
|
|
@@ -7066,4 +7372,4 @@ export {
|
|
|
7066
7372
|
PostgresBackend
|
|
7067
7373
|
};
|
|
7068
7374
|
|
|
7069
|
-
//# debugId=
|
|
7375
|
+
//# debugId=30740426469D1F9364756E2164756E21
|