@cascade-flow/backend-postgres 0.2.7 → 0.2.9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/db.d.ts +100 -1
- package/dist/db.d.ts.map +1 -1
- package/dist/index.d.ts +31 -1
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +708 -165
- package/dist/index.js.map +5 -5
- package/dist/migrations.d.ts.map +1 -1
- package/package.json +3 -3
package/dist/index.js
CHANGED
|
@@ -4744,6 +4744,7 @@ import {
|
|
|
4744
4744
|
projectRunStateFromEvents,
|
|
4745
4745
|
extractLogsFromEvents,
|
|
4746
4746
|
getCurrentAttemptNumber,
|
|
4747
|
+
getVersionIdFromEvents,
|
|
4747
4748
|
getMicrosecondTimestamp,
|
|
4748
4749
|
computeErrorAnalysis,
|
|
4749
4750
|
computeRetryAnalysis,
|
|
@@ -4773,6 +4774,10 @@ var esm_default = import_lib.default;
|
|
|
4773
4774
|
|
|
4774
4775
|
// src/db.ts
|
|
4775
4776
|
var { Pool: Pool2 } = esm_default;
|
|
4777
|
+
function stripEventIdFromJson(event) {
|
|
4778
|
+
const { eventId, ...eventWithoutId } = event;
|
|
4779
|
+
return eventWithoutId;
|
|
4780
|
+
}
|
|
4776
4781
|
|
|
4777
4782
|
class DatabaseClient {
|
|
4778
4783
|
pool;
|
|
@@ -4807,7 +4812,7 @@ class DatabaseClient {
|
|
|
4807
4812
|
} else if ("workflowAttemptNumber" in we) {
|
|
4808
4813
|
workflowAttemptNumber = we.workflowAttemptNumber;
|
|
4809
4814
|
}
|
|
4810
|
-
if (we.type === "WorkflowStarted") {
|
|
4815
|
+
if (we.type === "WorkflowStarted" || we.type === "WorkflowResumed") {
|
|
4811
4816
|
versionId = we.versionId;
|
|
4812
4817
|
}
|
|
4813
4818
|
if (versionId === null) {
|
|
@@ -4830,7 +4835,7 @@ class DatabaseClient {
|
|
|
4830
4835
|
we.timestampUs,
|
|
4831
4836
|
we.category,
|
|
4832
4837
|
we.type,
|
|
4833
|
-
JSON.stringify(event),
|
|
4838
|
+
JSON.stringify(stripEventIdFromJson(event)),
|
|
4834
4839
|
workflowAttemptNumber,
|
|
4835
4840
|
availableAtUs,
|
|
4836
4841
|
priority,
|
|
@@ -4889,7 +4894,7 @@ class DatabaseClient {
|
|
|
4889
4894
|
se.timestampUs,
|
|
4890
4895
|
se.category,
|
|
4891
4896
|
se.type,
|
|
4892
|
-
JSON.stringify(event),
|
|
4897
|
+
JSON.stringify(stripEventIdFromJson(event)),
|
|
4893
4898
|
workerId,
|
|
4894
4899
|
attemptNumber,
|
|
4895
4900
|
availableAtUs,
|
|
@@ -4947,7 +4952,7 @@ class DatabaseClient {
|
|
|
4947
4952
|
we.timestampUs,
|
|
4948
4953
|
we.category,
|
|
4949
4954
|
we.type,
|
|
4950
|
-
JSON.stringify(event),
|
|
4955
|
+
JSON.stringify(stripEventIdFromJson(event)),
|
|
4951
4956
|
workflowAttemptNumber,
|
|
4952
4957
|
availableAtUs,
|
|
4953
4958
|
priority,
|
|
@@ -5006,7 +5011,7 @@ class DatabaseClient {
|
|
|
5006
5011
|
se.timestampUs,
|
|
5007
5012
|
se.category,
|
|
5008
5013
|
se.type,
|
|
5009
|
-
JSON.stringify(event),
|
|
5014
|
+
JSON.stringify(stripEventIdFromJson(event)),
|
|
5010
5015
|
workerId,
|
|
5011
5016
|
attemptNumber,
|
|
5012
5017
|
availableAtUs,
|
|
@@ -5048,12 +5053,12 @@ class DatabaseClient {
|
|
|
5048
5053
|
}
|
|
5049
5054
|
const whereClause = conditions.length > 0 ? `WHERE ${conditions.join(" AND ")}` : "";
|
|
5050
5055
|
const query = `
|
|
5051
|
-
SELECT event_data FROM ${this.schema}.${table}
|
|
5056
|
+
SELECT event_data, event_id FROM ${this.schema}.${table}
|
|
5052
5057
|
${whereClause}
|
|
5053
5058
|
ORDER BY timestamp_us ASC, event_id ASC
|
|
5054
5059
|
`;
|
|
5055
5060
|
const result = await client.query(query, values);
|
|
5056
|
-
return result.rows.map((row) => row.event_data);
|
|
5061
|
+
return result.rows.map((row) => ({ ...row.event_data, eventId: row.event_id }));
|
|
5057
5062
|
} finally {
|
|
5058
5063
|
client.release();
|
|
5059
5064
|
}
|
|
@@ -5070,7 +5075,32 @@ class DatabaseClient {
|
|
|
5070
5075
|
ORDER BY timestamp_us ASC, event_id ASC
|
|
5071
5076
|
`;
|
|
5072
5077
|
const result = await client.query(query, [workflowSlug, runId]);
|
|
5073
|
-
return result.rows.map((row) => row.event_data);
|
|
5078
|
+
return result.rows.map((row) => ({ ...row.event_data, eventId: row.event_id }));
|
|
5079
|
+
} finally {
|
|
5080
|
+
client.release();
|
|
5081
|
+
}
|
|
5082
|
+
}
|
|
5083
|
+
async loadStepEventsForProjection(workflowSlug, runId) {
|
|
5084
|
+
const client = await this.pool.connect();
|
|
5085
|
+
try {
|
|
5086
|
+
const query = `
|
|
5087
|
+
SELECT event_data, event_id, step_id
|
|
5088
|
+
FROM ${this.schema}.step_events
|
|
5089
|
+
WHERE workflow_slug = $1 AND run_id = $2
|
|
5090
|
+
AND type NOT IN ('LogEntry', 'StepHeartbeat', 'StepRetrying')
|
|
5091
|
+
ORDER BY timestamp_us ASC, event_id ASC
|
|
5092
|
+
`;
|
|
5093
|
+
const result = await client.query(query, [workflowSlug, runId]);
|
|
5094
|
+
const eventsByStep = new Map;
|
|
5095
|
+
for (const row of result.rows) {
|
|
5096
|
+
const stepId = row.step_id;
|
|
5097
|
+
const event = { ...row.event_data, eventId: row.event_id };
|
|
5098
|
+
if (!eventsByStep.has(stepId)) {
|
|
5099
|
+
eventsByStep.set(stepId, []);
|
|
5100
|
+
}
|
|
5101
|
+
eventsByStep.get(stepId).push(event);
|
|
5102
|
+
}
|
|
5103
|
+
return eventsByStep;
|
|
5074
5104
|
} finally {
|
|
5075
5105
|
client.release();
|
|
5076
5106
|
}
|
|
@@ -5135,7 +5165,7 @@ class DatabaseClient {
|
|
|
5135
5165
|
eventToWrite.timestampUs,
|
|
5136
5166
|
eventToWrite.category,
|
|
5137
5167
|
eventToWrite.type,
|
|
5138
|
-
JSON.stringify(eventToWrite),
|
|
5168
|
+
JSON.stringify(stripEventIdFromJson(eventToWrite)),
|
|
5139
5169
|
workerId2,
|
|
5140
5170
|
attemptNumber,
|
|
5141
5171
|
null,
|
|
@@ -5341,6 +5371,59 @@ class DatabaseClient {
|
|
|
5341
5371
|
client.release();
|
|
5342
5372
|
}
|
|
5343
5373
|
}
|
|
5374
|
+
async getWorkflowVersion(workflowSlug, versionId) {
|
|
5375
|
+
const client = await this.pool.connect();
|
|
5376
|
+
try {
|
|
5377
|
+
const result = await client.query(`WITH numbered_versions AS (
|
|
5378
|
+
SELECT *,
|
|
5379
|
+
ROW_NUMBER() OVER (PARTITION BY workflow_slug ORDER BY created_at ASC) as version_number
|
|
5380
|
+
FROM ${this.schema}.workflow_versions
|
|
5381
|
+
WHERE workflow_slug = $1
|
|
5382
|
+
)
|
|
5383
|
+
SELECT * FROM numbered_versions WHERE version_id = $2`, [workflowSlug, versionId]);
|
|
5384
|
+
if (result.rows.length === 0)
|
|
5385
|
+
return null;
|
|
5386
|
+
return result.rows[0];
|
|
5387
|
+
} finally {
|
|
5388
|
+
client.release();
|
|
5389
|
+
}
|
|
5390
|
+
}
|
|
5391
|
+
async getCurrentWorkflowVersion(workflowSlug) {
|
|
5392
|
+
const client = await this.pool.connect();
|
|
5393
|
+
try {
|
|
5394
|
+
const result = await client.query(`WITH numbered_versions AS (
|
|
5395
|
+
SELECT *,
|
|
5396
|
+
ROW_NUMBER() OVER (PARTITION BY workflow_slug ORDER BY created_at ASC) as version_number
|
|
5397
|
+
FROM ${this.schema}.workflow_versions
|
|
5398
|
+
WHERE workflow_slug = $1
|
|
5399
|
+
)
|
|
5400
|
+
SELECT * FROM numbered_versions
|
|
5401
|
+
ORDER BY created_at DESC
|
|
5402
|
+
LIMIT 1`, [workflowSlug]);
|
|
5403
|
+
if (result.rows.length === 0)
|
|
5404
|
+
return null;
|
|
5405
|
+
return result.rows[0];
|
|
5406
|
+
} finally {
|
|
5407
|
+
client.release();
|
|
5408
|
+
}
|
|
5409
|
+
}
|
|
5410
|
+
async listWorkflowVersions(workflowSlug, limit) {
|
|
5411
|
+
const client = await this.pool.connect();
|
|
5412
|
+
try {
|
|
5413
|
+
const result = await client.query(`WITH numbered_versions AS (
|
|
5414
|
+
SELECT *,
|
|
5415
|
+
ROW_NUMBER() OVER (PARTITION BY workflow_slug ORDER BY created_at ASC) as version_number
|
|
5416
|
+
FROM ${this.schema}.workflow_versions
|
|
5417
|
+
WHERE workflow_slug = $1
|
|
5418
|
+
)
|
|
5419
|
+
SELECT * FROM numbered_versions
|
|
5420
|
+
ORDER BY created_at DESC
|
|
5421
|
+
LIMIT $2`, [workflowSlug, limit]);
|
|
5422
|
+
return result.rows;
|
|
5423
|
+
} finally {
|
|
5424
|
+
client.release();
|
|
5425
|
+
}
|
|
5426
|
+
}
|
|
5344
5427
|
async saveIdempotencyKey(hash, runId) {
|
|
5345
5428
|
const client = await this.pool.connect();
|
|
5346
5429
|
try {
|
|
@@ -5393,6 +5476,253 @@ class DatabaseClient {
|
|
|
5393
5476
|
client.release();
|
|
5394
5477
|
}
|
|
5395
5478
|
}
|
|
5479
|
+
async getQueueDepthAggregation(workflowSlug) {
|
|
5480
|
+
const client = await this.pool.connect();
|
|
5481
|
+
try {
|
|
5482
|
+
const result = await client.query(`WITH
|
|
5483
|
+
-- Get the latest workflow event per run to determine run status
|
|
5484
|
+
run_status AS (
|
|
5485
|
+
SELECT DISTINCT ON (workflow_slug, run_id)
|
|
5486
|
+
workflow_slug,
|
|
5487
|
+
run_id,
|
|
5488
|
+
type,
|
|
5489
|
+
timestamp_us AS created_at
|
|
5490
|
+
FROM ${this.schema}.workflow_events
|
|
5491
|
+
WHERE ($1::text IS NULL OR workflow_slug = $1)
|
|
5492
|
+
ORDER BY workflow_slug, run_id, timestamp_us DESC, event_id DESC
|
|
5493
|
+
),
|
|
5494
|
+
-- Filter to only active (pending/running) runs
|
|
5495
|
+
active_runs AS (
|
|
5496
|
+
SELECT
|
|
5497
|
+
workflow_slug,
|
|
5498
|
+
run_id,
|
|
5499
|
+
CASE WHEN type IN ('RunSubmitted', 'WorkflowRetryStarted') THEN 'pending' ELSE 'running' END AS status,
|
|
5500
|
+
created_at
|
|
5501
|
+
FROM run_status
|
|
5502
|
+
WHERE type IN ('RunSubmitted', 'WorkflowRetryStarted', 'WorkflowStarted', 'WorkflowResumed')
|
|
5503
|
+
),
|
|
5504
|
+
-- Get latest step event per step (excluding LogEntry which doesn't change state)
|
|
5505
|
+
latest_step_events AS (
|
|
5506
|
+
SELECT DISTINCT ON (se.workflow_slug, se.run_id, se.step_id)
|
|
5507
|
+
se.type,
|
|
5508
|
+
se.available_at_us
|
|
5509
|
+
FROM ${this.schema}.step_events se
|
|
5510
|
+
INNER JOIN active_runs ar
|
|
5511
|
+
ON ar.workflow_slug = se.workflow_slug
|
|
5512
|
+
AND ar.run_id = se.run_id
|
|
5513
|
+
AND ar.status = 'running'
|
|
5514
|
+
WHERE se.type != 'LogEntry'
|
|
5515
|
+
ORDER BY se.workflow_slug, se.run_id, se.step_id, se.timestamp_us DESC, se.event_id DESC
|
|
5516
|
+
),
|
|
5517
|
+
-- Aggregate step counts
|
|
5518
|
+
step_counts AS (
|
|
5519
|
+
SELECT
|
|
5520
|
+
COUNT(*) FILTER (WHERE type IN ('StepScheduled', 'StepReclaimed')) AS scheduled_steps,
|
|
5521
|
+
COUNT(*) FILTER (WHERE type IN ('StepStarted', 'StepHeartbeat')) AS running_steps,
|
|
5522
|
+
MIN(available_at_us) FILTER (WHERE type IN ('StepScheduled', 'StepReclaimed')) AS oldest_scheduled_step_us
|
|
5523
|
+
FROM latest_step_events
|
|
5524
|
+
),
|
|
5525
|
+
-- Aggregate run counts
|
|
5526
|
+
run_counts AS (
|
|
5527
|
+
SELECT
|
|
5528
|
+
COUNT(*) FILTER (WHERE status = 'pending') AS pending_runs,
|
|
5529
|
+
COUNT(*) FILTER (WHERE status = 'running') AS running_runs,
|
|
5530
|
+
MIN(created_at) FILTER (WHERE status = 'pending') AS oldest_pending_run_us
|
|
5531
|
+
FROM active_runs
|
|
5532
|
+
)
|
|
5533
|
+
SELECT
|
|
5534
|
+
COALESCE(rc.pending_runs, 0) AS pending_runs,
|
|
5535
|
+
COALESCE(rc.running_runs, 0) AS running_runs,
|
|
5536
|
+
COALESCE(sc.scheduled_steps, 0) AS scheduled_steps,
|
|
5537
|
+
COALESCE(sc.running_steps, 0) AS running_steps,
|
|
5538
|
+
sc.oldest_scheduled_step_us,
|
|
5539
|
+
rc.oldest_pending_run_us
|
|
5540
|
+
FROM run_counts rc, step_counts sc`, [workflowSlug ?? null]);
|
|
5541
|
+
const row = result.rows[0];
|
|
5542
|
+
return {
|
|
5543
|
+
pendingRuns: parseInt(row?.pending_runs ?? "0", 10),
|
|
5544
|
+
runningRuns: parseInt(row?.running_runs ?? "0", 10),
|
|
5545
|
+
scheduledSteps: parseInt(row?.scheduled_steps ?? "0", 10),
|
|
5546
|
+
runningSteps: parseInt(row?.running_steps ?? "0", 10),
|
|
5547
|
+
oldestScheduledStepUs: row?.oldest_scheduled_step_us ? parseInt(row.oldest_scheduled_step_us, 10) : null,
|
|
5548
|
+
oldestPendingRunUs: row?.oldest_pending_run_us ? parseInt(row.oldest_pending_run_us, 10) : null
|
|
5549
|
+
};
|
|
5550
|
+
} finally {
|
|
5551
|
+
client.release();
|
|
5552
|
+
}
|
|
5553
|
+
}
|
|
5554
|
+
async listRunsFiltered(options) {
|
|
5555
|
+
const client = await this.pool.connect();
|
|
5556
|
+
try {
|
|
5557
|
+
const statusToEvents = {
|
|
5558
|
+
pending: ["RunSubmitted", "WorkflowRetryStarted"],
|
|
5559
|
+
running: ["WorkflowStarted", "WorkflowResumed"],
|
|
5560
|
+
completed: ["WorkflowCompleted"],
|
|
5561
|
+
failed: ["WorkflowFailed"],
|
|
5562
|
+
cancelled: ["WorkflowCancelled"]
|
|
5563
|
+
};
|
|
5564
|
+
let eventTypeFilter = null;
|
|
5565
|
+
if (options?.status && options.status.length > 0) {
|
|
5566
|
+
eventTypeFilter = options.status.flatMap((s) => statusToEvents[s] || []);
|
|
5567
|
+
}
|
|
5568
|
+
const result = await client.query(`WITH
|
|
5569
|
+
-- Get first event (RunSubmitted) for each run to get createdAt and tags
|
|
5570
|
+
run_submitted AS (
|
|
5571
|
+
SELECT DISTINCT ON (workflow_slug, run_id)
|
|
5572
|
+
workflow_slug,
|
|
5573
|
+
run_id,
|
|
5574
|
+
timestamp_us AS created_at,
|
|
5575
|
+
event_data->'tags' AS tags
|
|
5576
|
+
FROM ${this.schema}.workflow_events
|
|
5577
|
+
WHERE type = 'RunSubmitted'
|
|
5578
|
+
AND ($1::text IS NULL OR workflow_slug = $1)
|
|
5579
|
+
ORDER BY workflow_slug, run_id, timestamp_us ASC
|
|
5580
|
+
),
|
|
5581
|
+
-- Get latest status-determining event for each run
|
|
5582
|
+
latest_status AS (
|
|
5583
|
+
SELECT DISTINCT ON (workflow_slug, run_id)
|
|
5584
|
+
workflow_slug,
|
|
5585
|
+
run_id,
|
|
5586
|
+
CASE
|
|
5587
|
+
WHEN type IN ('RunSubmitted', 'WorkflowRetryStarted') THEN 'pending'
|
|
5588
|
+
WHEN type IN ('WorkflowStarted', 'WorkflowResumed') THEN 'running'
|
|
5589
|
+
WHEN type = 'WorkflowCompleted' THEN 'completed'
|
|
5590
|
+
WHEN type = 'WorkflowFailed' THEN 'failed'
|
|
5591
|
+
WHEN type = 'WorkflowCancelled' THEN 'cancelled'
|
|
5592
|
+
END AS status,
|
|
5593
|
+
type
|
|
5594
|
+
FROM ${this.schema}.workflow_events
|
|
5595
|
+
WHERE ($1::text IS NULL OR workflow_slug = $1)
|
|
5596
|
+
ORDER BY workflow_slug, run_id, timestamp_us DESC, event_id DESC
|
|
5597
|
+
)
|
|
5598
|
+
SELECT
|
|
5599
|
+
ls.workflow_slug,
|
|
5600
|
+
ls.run_id,
|
|
5601
|
+
ls.status,
|
|
5602
|
+
rs.created_at,
|
|
5603
|
+
rs.tags
|
|
5604
|
+
FROM latest_status ls
|
|
5605
|
+
JOIN run_submitted rs ON ls.workflow_slug = rs.workflow_slug AND ls.run_id = rs.run_id
|
|
5606
|
+
WHERE ($2::text[] IS NULL OR ls.type = ANY($2))
|
|
5607
|
+
ORDER BY rs.created_at DESC
|
|
5608
|
+
LIMIT $3`, [
|
|
5609
|
+
options?.workflowSlug ?? null,
|
|
5610
|
+
eventTypeFilter,
|
|
5611
|
+
options?.limit ?? null
|
|
5612
|
+
]);
|
|
5613
|
+
return result.rows.map((row) => ({
|
|
5614
|
+
workflowSlug: row.workflow_slug,
|
|
5615
|
+
runId: row.run_id,
|
|
5616
|
+
status: row.status,
|
|
5617
|
+
createdAt: parseInt(row.created_at, 10),
|
|
5618
|
+
tags: row.tags
|
|
5619
|
+
}));
|
|
5620
|
+
} finally {
|
|
5621
|
+
client.release();
|
|
5622
|
+
}
|
|
5623
|
+
}
|
|
5624
|
+
async loadWorkflowEventsForRuns(runs) {
|
|
5625
|
+
if (runs.length === 0) {
|
|
5626
|
+
return new Map;
|
|
5627
|
+
}
|
|
5628
|
+
const client = await this.pool.connect();
|
|
5629
|
+
try {
|
|
5630
|
+
const values = [];
|
|
5631
|
+
const valuePlaceholders = [];
|
|
5632
|
+
runs.forEach((run, i) => {
|
|
5633
|
+
values.push(run.workflowSlug, run.runId);
|
|
5634
|
+
valuePlaceholders.push(`($${i * 2 + 1}, $${i * 2 + 2})`);
|
|
5635
|
+
});
|
|
5636
|
+
const result = await client.query(`SELECT workflow_slug, run_id, event_data, event_id
|
|
5637
|
+
FROM ${this.schema}.workflow_events
|
|
5638
|
+
WHERE (workflow_slug, run_id) IN (VALUES ${valuePlaceholders.join(", ")})
|
|
5639
|
+
ORDER BY workflow_slug, run_id, timestamp_us ASC, event_id ASC`, values);
|
|
5640
|
+
const eventsByRun = new Map;
|
|
5641
|
+
for (const row of result.rows) {
|
|
5642
|
+
const key = `${row.workflow_slug}:${row.run_id}`;
|
|
5643
|
+
const events = eventsByRun.get(key) || [];
|
|
5644
|
+
events.push({ ...row.event_data, eventId: row.event_id });
|
|
5645
|
+
eventsByRun.set(key, events);
|
|
5646
|
+
}
|
|
5647
|
+
return eventsByRun;
|
|
5648
|
+
} finally {
|
|
5649
|
+
client.release();
|
|
5650
|
+
}
|
|
5651
|
+
}
|
|
5652
|
+
async getQueueDepthByWorkflowAggregation() {
|
|
5653
|
+
const client = await this.pool.connect();
|
|
5654
|
+
try {
|
|
5655
|
+
const result = await client.query(`WITH
|
|
5656
|
+
-- Get the latest workflow event per run to determine run status
|
|
5657
|
+
run_status AS (
|
|
5658
|
+
SELECT DISTINCT ON (workflow_slug, run_id)
|
|
5659
|
+
workflow_slug,
|
|
5660
|
+
run_id,
|
|
5661
|
+
type,
|
|
5662
|
+
timestamp_us AS created_at
|
|
5663
|
+
FROM ${this.schema}.workflow_events
|
|
5664
|
+
ORDER BY workflow_slug, run_id, timestamp_us DESC, event_id DESC
|
|
5665
|
+
),
|
|
5666
|
+
-- Filter to only active (pending/running) runs
|
|
5667
|
+
active_runs AS (
|
|
5668
|
+
SELECT
|
|
5669
|
+
workflow_slug,
|
|
5670
|
+
run_id,
|
|
5671
|
+
CASE WHEN type IN ('RunSubmitted', 'WorkflowRetryStarted') THEN 'pending' ELSE 'running' END AS status,
|
|
5672
|
+
created_at
|
|
5673
|
+
FROM run_status
|
|
5674
|
+
WHERE type IN ('RunSubmitted', 'WorkflowRetryStarted', 'WorkflowStarted', 'WorkflowResumed')
|
|
5675
|
+
),
|
|
5676
|
+
-- Get latest step event per step (excluding LogEntry)
|
|
5677
|
+
latest_step_events AS (
|
|
5678
|
+
SELECT DISTINCT ON (se.workflow_slug, se.run_id, se.step_id)
|
|
5679
|
+
se.workflow_slug,
|
|
5680
|
+
se.type,
|
|
5681
|
+
se.available_at_us
|
|
5682
|
+
FROM ${this.schema}.step_events se
|
|
5683
|
+
INNER JOIN active_runs ar
|
|
5684
|
+
ON ar.workflow_slug = se.workflow_slug
|
|
5685
|
+
AND ar.run_id = se.run_id
|
|
5686
|
+
AND ar.status = 'running'
|
|
5687
|
+
WHERE se.type != 'LogEntry'
|
|
5688
|
+
ORDER BY se.workflow_slug, se.run_id, se.step_id, se.timestamp_us DESC, se.event_id DESC
|
|
5689
|
+
),
|
|
5690
|
+
-- Aggregate step counts by workflow
|
|
5691
|
+
step_counts_by_workflow AS (
|
|
5692
|
+
SELECT
|
|
5693
|
+
workflow_slug,
|
|
5694
|
+
COUNT(*) FILTER (WHERE type IN ('StepScheduled', 'StepReclaimed')) AS scheduled_steps,
|
|
5695
|
+
MIN(available_at_us) FILTER (WHERE type IN ('StepScheduled', 'StepReclaimed')) AS oldest_scheduled_us
|
|
5696
|
+
FROM latest_step_events
|
|
5697
|
+
GROUP BY workflow_slug
|
|
5698
|
+
),
|
|
5699
|
+
-- Aggregate run counts by workflow
|
|
5700
|
+
run_counts_by_workflow AS (
|
|
5701
|
+
SELECT
|
|
5702
|
+
workflow_slug,
|
|
5703
|
+
COUNT(*) FILTER (WHERE status = 'pending') AS pending_runs,
|
|
5704
|
+
MIN(created_at) FILTER (WHERE status = 'pending') AS oldest_pending_run_us
|
|
5705
|
+
FROM active_runs
|
|
5706
|
+
GROUP BY workflow_slug
|
|
5707
|
+
)
|
|
5708
|
+
SELECT
|
|
5709
|
+
COALESCE(r.workflow_slug, s.workflow_slug) AS workflow_slug,
|
|
5710
|
+
COALESCE(r.pending_runs, 0) AS pending_runs,
|
|
5711
|
+
COALESCE(s.scheduled_steps, 0) AS scheduled_steps,
|
|
5712
|
+
LEAST(r.oldest_pending_run_us, s.oldest_scheduled_us) AS oldest_pending_item_us
|
|
5713
|
+
FROM run_counts_by_workflow r
|
|
5714
|
+
FULL OUTER JOIN step_counts_by_workflow s ON r.workflow_slug = s.workflow_slug
|
|
5715
|
+
WHERE COALESCE(r.pending_runs, 0) > 0 OR COALESCE(s.scheduled_steps, 0) > 0`);
|
|
5716
|
+
return result.rows.map((row) => ({
|
|
5717
|
+
workflowSlug: row.workflow_slug,
|
|
5718
|
+
pendingRuns: parseInt(row.pending_runs, 10),
|
|
5719
|
+
scheduledSteps: parseInt(row.scheduled_steps, 10),
|
|
5720
|
+
oldestPendingItemUs: row.oldest_pending_item_us ? parseInt(row.oldest_pending_item_us, 10) : null
|
|
5721
|
+
}));
|
|
5722
|
+
} finally {
|
|
5723
|
+
client.release();
|
|
5724
|
+
}
|
|
5725
|
+
}
|
|
5396
5726
|
}
|
|
5397
5727
|
function createPool(connectionString) {
|
|
5398
5728
|
return new Pool2({ connectionString });
|
|
@@ -5671,6 +6001,60 @@ async function migration005_addWorkflowVersioning(pool, schema) {
|
|
|
5671
6001
|
client.release();
|
|
5672
6002
|
}
|
|
5673
6003
|
}
|
|
6004
|
+
async function migration006_addDescIndexes(pool, schema) {
|
|
6005
|
+
const client = await pool.connect();
|
|
6006
|
+
try {
|
|
6007
|
+
await client.query(`
|
|
6008
|
+
CREATE INDEX IF NOT EXISTS idx_workflow_events_latest
|
|
6009
|
+
ON ${schema}.workflow_events (workflow_slug, run_id, timestamp_us DESC, event_id DESC)
|
|
6010
|
+
`);
|
|
6011
|
+
await client.query(`
|
|
6012
|
+
CREATE INDEX IF NOT EXISTS idx_step_events_latest
|
|
6013
|
+
ON ${schema}.step_events (workflow_slug, run_id, step_id, timestamp_us DESC, event_id DESC)
|
|
6014
|
+
`);
|
|
6015
|
+
await client.query(`
|
|
6016
|
+
CREATE INDEX IF NOT EXISTS idx_workflow_events_by_type
|
|
6017
|
+
ON ${schema}.workflow_events (type, workflow_slug, run_id, timestamp_us ASC)
|
|
6018
|
+
`);
|
|
6019
|
+
console.log("[Migration 006] Optimized query indexes added successfully");
|
|
6020
|
+
} catch (error) {
|
|
6021
|
+
console.error("[Migration 006] Error adding DESC indexes:", error);
|
|
6022
|
+
throw error;
|
|
6023
|
+
} finally {
|
|
6024
|
+
client.release();
|
|
6025
|
+
}
|
|
6026
|
+
}
|
|
6027
|
+
async function migration007_addWorkerIndexes(pool, schema) {
|
|
6028
|
+
const client = await pool.connect();
|
|
6029
|
+
try {
|
|
6030
|
+
await client.query(`
|
|
6031
|
+
CREATE INDEX IF NOT EXISTS idx_step_events_scheduled_status
|
|
6032
|
+
ON ${schema}.step_events (type, workflow_slug, run_id, step_id, timestamp_us DESC, event_id DESC)
|
|
6033
|
+
WHERE type IN ('StepScheduled', 'StepReclaimed', 'StepRetrying')
|
|
6034
|
+
`);
|
|
6035
|
+
await client.query(`
|
|
6036
|
+
CREATE INDEX IF NOT EXISTS idx_step_events_heartbeat_status
|
|
6037
|
+
ON ${schema}.step_events (type, workflow_slug, run_id, step_id, timestamp_us DESC, event_id DESC)
|
|
6038
|
+
WHERE type IN ('StepStarted', 'StepHeartbeat')
|
|
6039
|
+
`);
|
|
6040
|
+
await client.query(`
|
|
6041
|
+
CREATE INDEX IF NOT EXISTS idx_workflow_events_active_types
|
|
6042
|
+
ON ${schema}.workflow_events (type)
|
|
6043
|
+
WHERE type IN ('RunSubmitted', 'WorkflowStarted', 'WorkflowResumed')
|
|
6044
|
+
`);
|
|
6045
|
+
await client.query(`
|
|
6046
|
+
CREATE INDEX IF NOT EXISTS idx_step_events_active_types
|
|
6047
|
+
ON ${schema}.step_events (type)
|
|
6048
|
+
WHERE type IN ('StepScheduled', 'StepStarted', 'StepReclaimed', 'StepRetrying')
|
|
6049
|
+
`);
|
|
6050
|
+
console.log("[Migration 007] Worker loop indexes added successfully");
|
|
6051
|
+
} catch (error) {
|
|
6052
|
+
console.error("[Migration 007] Error adding worker indexes:", error);
|
|
6053
|
+
throw error;
|
|
6054
|
+
} finally {
|
|
6055
|
+
client.release();
|
|
6056
|
+
}
|
|
6057
|
+
}
|
|
5674
6058
|
async function runMigrations(pool, schema = "cascadeflow") {
|
|
5675
6059
|
console.log(`[Migrations] Starting database migrations in schema '${schema}'...`);
|
|
5676
6060
|
try {
|
|
@@ -5680,6 +6064,8 @@ async function runMigrations(pool, schema = "cascadeflow") {
|
|
|
5680
6064
|
await migration003_createIndexes(pool, schema);
|
|
5681
6065
|
await migration004_addErrorFingerprints(pool, schema);
|
|
5682
6066
|
await migration005_addWorkflowVersioning(pool, schema);
|
|
6067
|
+
await migration006_addDescIndexes(pool, schema);
|
|
6068
|
+
await migration007_addWorkerIndexes(pool, schema);
|
|
5683
6069
|
console.log("[Migrations] All migrations completed successfully");
|
|
5684
6070
|
} catch (error) {
|
|
5685
6071
|
console.error("[Migrations] Migration failed:", error);
|
|
@@ -5703,6 +6089,20 @@ class PostgresBackend extends Backend {
|
|
|
5703
6089
|
await runMigrations(this.db.getPool(), this.db.getSchema());
|
|
5704
6090
|
this.initialized = true;
|
|
5705
6091
|
}
|
|
6092
|
+
async backendReady() {
|
|
6093
|
+
try {
|
|
6094
|
+
const client = await this.db.getPool().connect();
|
|
6095
|
+
try {
|
|
6096
|
+
await client.query("SELECT 1");
|
|
6097
|
+
return true;
|
|
6098
|
+
} finally {
|
|
6099
|
+
client.release();
|
|
6100
|
+
}
|
|
6101
|
+
} catch (error) {
|
|
6102
|
+
console.error("Backend health check failed:", error);
|
|
6103
|
+
return false;
|
|
6104
|
+
}
|
|
6105
|
+
}
|
|
5706
6106
|
generateRunId() {
|
|
5707
6107
|
return `run_${Date.now()}_${Math.random().toString(36).substring(2, 15)}`;
|
|
5708
6108
|
}
|
|
@@ -5766,6 +6166,203 @@ class PostgresBackend extends Backend {
|
|
|
5766
6166
|
return this.db.loadAllRunEvents(workflowSlug, runId);
|
|
5767
6167
|
}
|
|
5768
6168
|
}
|
|
6169
|
+
async loadStepEventsForProjection(workflowSlug, runId) {
|
|
6170
|
+
return this.db.loadStepEventsForProjection(workflowSlug, runId);
|
|
6171
|
+
}
|
|
6172
|
+
async copyEntireRun(workflowSlug, sourceRunId, targetRunId) {
|
|
6173
|
+
await this.copyEntireRunWithClient(workflowSlug, sourceRunId, targetRunId, this.db.getPool());
|
|
6174
|
+
}
|
|
6175
|
+
async copyEntireRunWithClient(workflowSlug, sourceRunId, targetRunId, client) {
|
|
6176
|
+
const schema = this.db.getSchema();
|
|
6177
|
+
await client.query(`INSERT INTO ${schema}.workflow_events (
|
|
6178
|
+
event_id, workflow_slug, run_id, timestamp_us, category, type, event_data, created_at,
|
|
6179
|
+
workflow_attempt_number, available_at_us, priority, timeout_us, idempotency_key, version_id
|
|
6180
|
+
)
|
|
6181
|
+
SELECT
|
|
6182
|
+
gen_random_uuid()::text,
|
|
6183
|
+
workflow_slug,
|
|
6184
|
+
$2,
|
|
6185
|
+
timestamp_us,
|
|
6186
|
+
category,
|
|
6187
|
+
type,
|
|
6188
|
+
jsonb_set(event_data, '{runId}', to_jsonb($2::text)),
|
|
6189
|
+
created_at,
|
|
6190
|
+
workflow_attempt_number,
|
|
6191
|
+
available_at_us,
|
|
6192
|
+
priority,
|
|
6193
|
+
timeout_us,
|
|
6194
|
+
idempotency_key,
|
|
6195
|
+
version_id
|
|
6196
|
+
FROM ${schema}.workflow_events
|
|
6197
|
+
WHERE workflow_slug = $1 AND run_id = $3`, [workflowSlug, targetRunId, sourceRunId]);
|
|
6198
|
+
await client.query(`INSERT INTO ${schema}.step_events (
|
|
6199
|
+
event_id, workflow_slug, run_id, step_id, timestamp_us, category, type, event_data, created_at,
|
|
6200
|
+
worker_id, attempt_number, available_at_us, export_output,
|
|
6201
|
+
error_name_hash, error_message_hash, error_stack_exact_hash, error_stack_normalized_hash, error_stack_portable_hash,
|
|
6202
|
+
version_id
|
|
6203
|
+
)
|
|
6204
|
+
SELECT
|
|
6205
|
+
gen_random_uuid()::text,
|
|
6206
|
+
workflow_slug,
|
|
6207
|
+
$2,
|
|
6208
|
+
step_id,
|
|
6209
|
+
timestamp_us,
|
|
6210
|
+
category,
|
|
6211
|
+
type,
|
|
6212
|
+
jsonb_set(event_data, '{runId}', to_jsonb($2::text)),
|
|
6213
|
+
created_at,
|
|
6214
|
+
worker_id,
|
|
6215
|
+
attempt_number,
|
|
6216
|
+
available_at_us,
|
|
6217
|
+
export_output,
|
|
6218
|
+
error_name_hash,
|
|
6219
|
+
error_message_hash,
|
|
6220
|
+
error_stack_exact_hash,
|
|
6221
|
+
error_stack_normalized_hash,
|
|
6222
|
+
error_stack_portable_hash,
|
|
6223
|
+
version_id
|
|
6224
|
+
FROM ${schema}.step_events
|
|
6225
|
+
WHERE workflow_slug = $1 AND run_id = $3`, [workflowSlug, targetRunId, sourceRunId]);
|
|
6226
|
+
}
|
|
6227
|
+
async deleteStepEvents(workflowSlug, runId, stepIds) {
|
|
6228
|
+
await this.deleteStepEventsWithClient(workflowSlug, runId, stepIds, this.db.getPool());
|
|
6229
|
+
}
|
|
6230
|
+
async deleteStepEventsWithClient(workflowSlug, runId, stepIds, client) {
|
|
6231
|
+
if (stepIds.size === 0)
|
|
6232
|
+
return;
|
|
6233
|
+
const schema = this.db.getSchema();
|
|
6234
|
+
const stepIdsArray = Array.from(stepIds);
|
|
6235
|
+
await client.query(`DELETE FROM ${schema}.step_events
|
|
6236
|
+
WHERE workflow_slug = $1 AND run_id = $2 AND step_id = ANY($3)`, [workflowSlug, runId, stepIdsArray]);
|
|
6237
|
+
}
|
|
6238
|
+
async deleteWorkflowTerminalEvents(workflowSlug, runId) {
|
|
6239
|
+
await this.deleteWorkflowTerminalEventsWithClient(workflowSlug, runId, this.db.getPool());
|
|
6240
|
+
}
|
|
6241
|
+
async deleteWorkflowTerminalEventsWithClient(workflowSlug, runId, client) {
|
|
6242
|
+
const schema = this.db.getSchema();
|
|
6243
|
+
await client.query(`DELETE FROM ${schema}.workflow_events
|
|
6244
|
+
WHERE workflow_slug = $1
|
|
6245
|
+
AND run_id = $2
|
|
6246
|
+
AND type IN ('WorkflowCompleted', 'WorkflowFailed', 'WorkflowCancelled')`, [workflowSlug, runId]);
|
|
6247
|
+
}
|
|
6248
|
+
async copyWorkflowEvents(workflowSlug, sourceRunId, targetRunId, excludeTerminal) {
|
|
6249
|
+
const pool = this.db.getPool();
|
|
6250
|
+
const schema = this.db.getSchema();
|
|
6251
|
+
const terminalFilter = excludeTerminal ? `AND type NOT IN ('WorkflowCompleted', 'WorkflowFailed', 'WorkflowCancelled')` : "";
|
|
6252
|
+
await pool.query(`INSERT INTO ${schema}.workflow_events (
|
|
6253
|
+
event_id, workflow_slug, run_id, timestamp_us, category, type, event_data, created_at,
|
|
6254
|
+
workflow_attempt_number, available_at_us, priority, timeout_us, idempotency_key, version_id
|
|
6255
|
+
)
|
|
6256
|
+
SELECT
|
|
6257
|
+
gen_random_uuid()::text,
|
|
6258
|
+
workflow_slug,
|
|
6259
|
+
$3,
|
|
6260
|
+
timestamp_us,
|
|
6261
|
+
category,
|
|
6262
|
+
type,
|
|
6263
|
+
jsonb_set(event_data, '{runId}', to_jsonb($3::text)),
|
|
6264
|
+
created_at,
|
|
6265
|
+
workflow_attempt_number,
|
|
6266
|
+
available_at_us,
|
|
6267
|
+
priority,
|
|
6268
|
+
timeout_us,
|
|
6269
|
+
idempotency_key,
|
|
6270
|
+
version_id
|
|
6271
|
+
FROM ${schema}.workflow_events
|
|
6272
|
+
WHERE workflow_slug = $1
|
|
6273
|
+
AND run_id = $2
|
|
6274
|
+
${terminalFilter}`, [workflowSlug, sourceRunId, targetRunId]);
|
|
6275
|
+
}
|
|
6276
|
+
async copyStepEvents(workflowSlug, sourceRunId, targetRunId, includeStepIds) {
|
|
6277
|
+
const pool = this.db.getPool();
|
|
6278
|
+
const schema = this.db.getSchema();
|
|
6279
|
+
const stepIdsArray = Array.from(includeStepIds);
|
|
6280
|
+
if (stepIdsArray.length === 0)
|
|
6281
|
+
return;
|
|
6282
|
+
await pool.query(`INSERT INTO ${schema}.step_events (
|
|
6283
|
+
event_id, workflow_slug, run_id, step_id, timestamp_us, category, type, event_data, created_at,
|
|
6284
|
+
worker_id, attempt_number, available_at_us, export_output,
|
|
6285
|
+
error_name_hash, error_message_hash, error_stack_exact_hash, error_stack_normalized_hash, error_stack_portable_hash,
|
|
6286
|
+
version_id
|
|
6287
|
+
)
|
|
6288
|
+
SELECT
|
|
6289
|
+
gen_random_uuid()::text,
|
|
6290
|
+
workflow_slug,
|
|
6291
|
+
$3,
|
|
6292
|
+
step_id,
|
|
6293
|
+
timestamp_us,
|
|
6294
|
+
category,
|
|
6295
|
+
type,
|
|
6296
|
+
jsonb_set(event_data, '{runId}', to_jsonb($3::text)),
|
|
6297
|
+
created_at,
|
|
6298
|
+
worker_id,
|
|
6299
|
+
attempt_number,
|
|
6300
|
+
available_at_us,
|
|
6301
|
+
export_output,
|
|
6302
|
+
error_name_hash,
|
|
6303
|
+
error_message_hash,
|
|
6304
|
+
error_stack_exact_hash,
|
|
6305
|
+
error_stack_normalized_hash,
|
|
6306
|
+
error_stack_portable_hash,
|
|
6307
|
+
version_id
|
|
6308
|
+
FROM ${schema}.step_events
|
|
6309
|
+
WHERE workflow_slug = $1
|
|
6310
|
+
AND run_id = $2
|
|
6311
|
+
AND step_id = ANY($4)`, [workflowSlug, sourceRunId, targetRunId, stepIdsArray]);
|
|
6312
|
+
}
|
|
6313
|
+
async rerunFrom(params) {
|
|
6314
|
+
const parentRun = await this.getRun(params.parentRunId);
|
|
6315
|
+
if (!parentRun) {
|
|
6316
|
+
throw new Error(`Parent run "${params.parentRunId}" not found`);
|
|
6317
|
+
}
|
|
6318
|
+
const dependents = await this.calculateDependents(parentRun.workflowSlug, params.fromStepId);
|
|
6319
|
+
const rerunStepIds = new Set([params.fromStepId, ...dependents]);
|
|
6320
|
+
const newRunId = getMicrosecondTimestamp().toString();
|
|
6321
|
+
const pool = this.db.getPool();
|
|
6322
|
+
const client = await pool.connect();
|
|
6323
|
+
try {
|
|
6324
|
+
await client.query("BEGIN");
|
|
6325
|
+
await this.copyEntireRunWithClient(parentRun.workflowSlug, params.parentRunId, newRunId, client);
|
|
6326
|
+
await this.deleteStepEventsWithClient(parentRun.workflowSlug, newRunId, rerunStepIds, client);
|
|
6327
|
+
await this.deleteWorkflowTerminalEventsWithClient(parentRun.workflowSlug, newRunId, client);
|
|
6328
|
+
await client.query("COMMIT");
|
|
6329
|
+
} catch (error) {
|
|
6330
|
+
await client.query("ROLLBACK");
|
|
6331
|
+
throw error;
|
|
6332
|
+
} finally {
|
|
6333
|
+
client.release();
|
|
6334
|
+
}
|
|
6335
|
+
const currentVersion = await this.getCurrentWorkflowVersion(parentRun.workflowSlug);
|
|
6336
|
+
if (!currentVersion) {
|
|
6337
|
+
throw new Error(`Workflow ${parentRun.workflowSlug} not registered. Please ensure the worker has started and registered workflows.`);
|
|
6338
|
+
}
|
|
6339
|
+
const parentWorkflowEvents = await this.loadEvents(parentRun.workflowSlug, params.parentRunId, {
|
|
6340
|
+
category: "workflow"
|
|
6341
|
+
});
|
|
6342
|
+
const parentVersionId = getVersionIdFromEvents(parentWorkflowEvents);
|
|
6343
|
+
const timestamp = getMicrosecondTimestamp();
|
|
6344
|
+
await this.appendEvent(parentRun.workflowSlug, newRunId, {
|
|
6345
|
+
category: "workflow",
|
|
6346
|
+
type: "WorkflowRerunFromStep",
|
|
6347
|
+
eventId: this.generateEventId(timestamp),
|
|
6348
|
+
timestampUs: timestamp,
|
|
6349
|
+
workflowSlug: parentRun.workflowSlug,
|
|
6350
|
+
runId: newRunId,
|
|
6351
|
+
parentRunId: params.parentRunId,
|
|
6352
|
+
rerunFromStepId: params.fromStepId,
|
|
6353
|
+
rerunStepIds: Array.from(rerunStepIds),
|
|
6354
|
+
versionId: currentVersion.versionId,
|
|
6355
|
+
parentVersionId
|
|
6356
|
+
});
|
|
6357
|
+
await this.submitRun({
|
|
6358
|
+
workflowSlug: parentRun.workflowSlug,
|
|
6359
|
+
runId: newRunId,
|
|
6360
|
+
input: params.input
|
|
6361
|
+
});
|
|
6362
|
+
return {
|
|
6363
|
+
runId: newRunId
|
|
6364
|
+
};
|
|
6365
|
+
}
|
|
5769
6366
|
async saveStepScheduled(workflowSlug, runId, stepId, metadata) {
|
|
5770
6367
|
const now = getMicrosecondTimestamp();
|
|
5771
6368
|
const event = {
|
|
@@ -5802,8 +6399,7 @@ class PostgresBackend extends Backend {
|
|
|
5802
6399
|
await this.db.appendEvent("step_events", event);
|
|
5803
6400
|
}
|
|
5804
6401
|
async saveStepComplete(workflowSlug, runId, stepId, output, metadata, exportOutput = false) {
|
|
5805
|
-
const
|
|
5806
|
-
const attemptNumber = getCurrentAttemptNumber(events);
|
|
6402
|
+
const attemptNumber = metadata.attemptNumber;
|
|
5807
6403
|
if (attemptNumber === 0) {
|
|
5808
6404
|
throw new Error(`Cannot complete step that hasn't started: ${stepId}`);
|
|
5809
6405
|
}
|
|
@@ -5942,6 +6538,40 @@ class PostgresBackend extends Backend {
|
|
|
5942
6538
|
};
|
|
5943
6539
|
await this.db.appendEvent("step_events", event);
|
|
5944
6540
|
}
|
|
6541
|
+
async saveStepCheckpoint(workflowSlug, runId, stepId, checkpoint) {
|
|
6542
|
+
const now = getMicrosecondTimestamp();
|
|
6543
|
+
const event = {
|
|
6544
|
+
category: "step",
|
|
6545
|
+
type: "StepCheckpoint",
|
|
6546
|
+
eventId: this.generateEventId(now),
|
|
6547
|
+
timestampUs: now,
|
|
6548
|
+
workflowSlug,
|
|
6549
|
+
runId,
|
|
6550
|
+
stepId,
|
|
6551
|
+
name: checkpoint.name,
|
|
6552
|
+
sequenceNumber: checkpoint.sequenceNumber,
|
|
6553
|
+
attemptNumber: checkpoint.attemptNumber,
|
|
6554
|
+
data: checkpoint.data
|
|
6555
|
+
};
|
|
6556
|
+
await this.db.appendEvent("step_events", event);
|
|
6557
|
+
}
|
|
6558
|
+
async saveStepCheckpointFailed(workflowSlug, runId, stepId, checkpoint) {
|
|
6559
|
+
const now = getMicrosecondTimestamp();
|
|
6560
|
+
const event = {
|
|
6561
|
+
category: "step",
|
|
6562
|
+
type: "StepCheckpointFailed",
|
|
6563
|
+
eventId: this.generateEventId(now),
|
|
6564
|
+
timestampUs: now,
|
|
6565
|
+
workflowSlug,
|
|
6566
|
+
runId,
|
|
6567
|
+
stepId,
|
|
6568
|
+
name: checkpoint.name,
|
|
6569
|
+
sequenceNumber: checkpoint.sequenceNumber,
|
|
6570
|
+
attemptNumber: checkpoint.attemptNumber,
|
|
6571
|
+
error: checkpoint.error
|
|
6572
|
+
};
|
|
6573
|
+
await this.db.appendEvent("step_events", event);
|
|
6574
|
+
}
|
|
5945
6575
|
async saveStepHeartbeat(workflowSlug, runId, stepId, workerId, attemptNumber) {
|
|
5946
6576
|
const now = getMicrosecondTimestamp();
|
|
5947
6577
|
const event = {
|
|
@@ -6057,6 +6687,7 @@ class PostgresBackend extends Backend {
|
|
|
6057
6687
|
timestampUs: timestamp,
|
|
6058
6688
|
workflowSlug,
|
|
6059
6689
|
runId,
|
|
6690
|
+
versionId: metadata.versionId,
|
|
6060
6691
|
originalRunId: metadata.originalRunId,
|
|
6061
6692
|
resumedSteps: metadata.resumedSteps,
|
|
6062
6693
|
pendingSteps: metadata.pendingSteps
|
|
@@ -6150,35 +6781,39 @@ class PostgresBackend extends Backend {
|
|
|
6150
6781
|
return { runId, isNew: true };
|
|
6151
6782
|
}
|
|
6152
6783
|
async listRuns(options) {
|
|
6784
|
+
const filteredRuns = await this.db.listRunsFiltered({
|
|
6785
|
+
workflowSlug: options?.workflowSlug,
|
|
6786
|
+
status: options?.status,
|
|
6787
|
+
limit: options?.tags?.length ? undefined : options?.limit
|
|
6788
|
+
});
|
|
6789
|
+
let runsToLoad = filteredRuns;
|
|
6790
|
+
if (options?.tags && options.tags.length > 0) {
|
|
6791
|
+
runsToLoad = filteredRuns.filter((run) => {
|
|
6792
|
+
const runTags = run.tags || [];
|
|
6793
|
+
return options.tags.every((tag) => runTags.includes(tag));
|
|
6794
|
+
});
|
|
6795
|
+
if (options?.limit) {
|
|
6796
|
+
runsToLoad = runsToLoad.slice(0, options.limit);
|
|
6797
|
+
}
|
|
6798
|
+
}
|
|
6799
|
+
if (runsToLoad.length === 0) {
|
|
6800
|
+
return [];
|
|
6801
|
+
}
|
|
6802
|
+
const eventsByRun = await this.db.loadWorkflowEventsForRuns(runsToLoad.map((r) => ({ workflowSlug: r.workflowSlug, runId: r.runId })));
|
|
6153
6803
|
const allRuns = [];
|
|
6154
|
-
const
|
|
6155
|
-
|
|
6156
|
-
const
|
|
6157
|
-
|
|
6158
|
-
|
|
6159
|
-
|
|
6160
|
-
|
|
6161
|
-
|
|
6162
|
-
|
|
6163
|
-
|
|
6164
|
-
if (options?.status && options.status.length > 0) {
|
|
6165
|
-
if (!options.status.includes(state.status))
|
|
6166
|
-
continue;
|
|
6167
|
-
}
|
|
6168
|
-
if (options?.tags && options.tags.length > 0) {
|
|
6169
|
-
const stateTags = state.tags || [];
|
|
6170
|
-
const hasAllTags = options.tags.every((tag) => stateTags.includes(tag));
|
|
6171
|
-
if (!hasAllTags)
|
|
6172
|
-
continue;
|
|
6173
|
-
}
|
|
6174
|
-
allRuns.push(state);
|
|
6175
|
-
} catch {
|
|
6176
|
-
continue;
|
|
6177
|
-
}
|
|
6804
|
+
for (const run of runsToLoad) {
|
|
6805
|
+
const key = `${run.workflowSlug}:${run.runId}`;
|
|
6806
|
+
const events = eventsByRun.get(key);
|
|
6807
|
+
if (!events || events.length === 0)
|
|
6808
|
+
continue;
|
|
6809
|
+
try {
|
|
6810
|
+
const state = projectRunStateFromEvents(events, run.workflowSlug);
|
|
6811
|
+
allRuns.push(state);
|
|
6812
|
+
} catch {
|
|
6813
|
+
continue;
|
|
6178
6814
|
}
|
|
6179
6815
|
}
|
|
6180
|
-
allRuns
|
|
6181
|
-
return options?.limit ? allRuns.slice(0, options.limit) : allRuns;
|
|
6816
|
+
return allRuns;
|
|
6182
6817
|
}
|
|
6183
6818
|
async cancelRun(runId, reason) {
|
|
6184
6819
|
const allWorkflows = await this.db.listActiveWorkflows();
|
|
@@ -6208,8 +6843,7 @@ class PostgresBackend extends Backend {
|
|
|
6208
6843
|
for (const workflowSlug of allWorkflows) {
|
|
6209
6844
|
const runIds = await this.db.listRunIds(workflowSlug);
|
|
6210
6845
|
if (runIds.includes(runId)) {
|
|
6211
|
-
const
|
|
6212
|
-
const workflowEvents = events.filter((e) => e.category === "workflow");
|
|
6846
|
+
const workflowEvents = await this.loadEvents(workflowSlug, runId, { category: "workflow" });
|
|
6213
6847
|
return projectRunStateFromEvents(workflowEvents, workflowSlug);
|
|
6214
6848
|
}
|
|
6215
6849
|
}
|
|
@@ -6359,20 +6993,17 @@ class PostgresBackend extends Backend {
|
|
|
6359
6993
|
]);
|
|
6360
6994
|
}
|
|
6361
6995
|
async getWorkflowVersion(workflowSlug, versionId) {
|
|
6362
|
-
const
|
|
6363
|
-
|
|
6364
|
-
WHERE workflow_slug = $1 AND version_id = $2
|
|
6365
|
-
`, [workflowSlug, versionId]);
|
|
6366
|
-
if (result.rows.length === 0)
|
|
6996
|
+
const row = await this.db.getWorkflowVersion(workflowSlug, versionId);
|
|
6997
|
+
if (!row)
|
|
6367
6998
|
return null;
|
|
6368
|
-
const row = result.rows[0];
|
|
6369
6999
|
return {
|
|
6370
7000
|
workflowSlug: row.workflow_slug,
|
|
6371
7001
|
versionId: row.version_id,
|
|
7002
|
+
versionNumber: parseInt(row.version_number.toString(), 10),
|
|
6372
7003
|
createdAt: Math.floor(new Date(row.created_at).getTime() * 1000),
|
|
6373
7004
|
stepManifest: row.step_manifest,
|
|
6374
7005
|
totalSteps: row.total_steps,
|
|
6375
|
-
git: row.git_commit ? {
|
|
7006
|
+
git: row.git_commit && row.git_dirty !== null && row.git_branch !== null ? {
|
|
6376
7007
|
commit: row.git_commit,
|
|
6377
7008
|
dirty: row.git_dirty,
|
|
6378
7009
|
branch: row.git_branch
|
|
@@ -6380,22 +7011,17 @@ class PostgresBackend extends Backend {
|
|
|
6380
7011
|
};
|
|
6381
7012
|
}
|
|
6382
7013
|
async getCurrentWorkflowVersion(workflowSlug) {
|
|
6383
|
-
const
|
|
6384
|
-
|
|
6385
|
-
WHERE workflow_slug = $1
|
|
6386
|
-
ORDER BY created_at DESC
|
|
6387
|
-
LIMIT 1
|
|
6388
|
-
`, [workflowSlug]);
|
|
6389
|
-
if (result.rows.length === 0)
|
|
7014
|
+
const row = await this.db.getCurrentWorkflowVersion(workflowSlug);
|
|
7015
|
+
if (!row)
|
|
6390
7016
|
return null;
|
|
6391
|
-
const row = result.rows[0];
|
|
6392
7017
|
return {
|
|
6393
7018
|
workflowSlug: row.workflow_slug,
|
|
6394
7019
|
versionId: row.version_id,
|
|
7020
|
+
versionNumber: parseInt(row.version_number.toString(), 10),
|
|
6395
7021
|
createdAt: Math.floor(new Date(row.created_at).getTime() * 1000),
|
|
6396
7022
|
stepManifest: row.step_manifest,
|
|
6397
7023
|
totalSteps: row.total_steps,
|
|
6398
|
-
git: row.git_commit ? {
|
|
7024
|
+
git: row.git_commit && row.git_dirty !== null && row.git_branch !== null ? {
|
|
6399
7025
|
commit: row.git_commit,
|
|
6400
7026
|
dirty: row.git_dirty,
|
|
6401
7027
|
branch: row.git_branch
|
|
@@ -6404,19 +7030,15 @@ class PostgresBackend extends Backend {
|
|
|
6404
7030
|
}
|
|
6405
7031
|
async listWorkflowVersions(workflowSlug, options) {
|
|
6406
7032
|
const limit = options?.limit ?? 100;
|
|
6407
|
-
const
|
|
6408
|
-
|
|
6409
|
-
WHERE workflow_slug = $1
|
|
6410
|
-
ORDER BY created_at DESC
|
|
6411
|
-
LIMIT $2
|
|
6412
|
-
`, [workflowSlug, limit]);
|
|
6413
|
-
return result.rows.map((row) => ({
|
|
7033
|
+
const rows = await this.db.listWorkflowVersions(workflowSlug, limit);
|
|
7034
|
+
return rows.map((row) => ({
|
|
6414
7035
|
workflowSlug: row.workflow_slug,
|
|
6415
7036
|
versionId: row.version_id,
|
|
7037
|
+
versionNumber: parseInt(row.version_number.toString(), 10),
|
|
6416
7038
|
createdAt: Math.floor(new Date(row.created_at).getTime() * 1000),
|
|
6417
7039
|
stepManifest: row.step_manifest,
|
|
6418
7040
|
totalSteps: row.total_steps,
|
|
6419
|
-
git: row.git_commit ? {
|
|
7041
|
+
git: row.git_commit && row.git_dirty !== null && row.git_branch !== null ? {
|
|
6420
7042
|
commit: row.git_commit,
|
|
6421
7043
|
dirty: row.git_dirty,
|
|
6422
7044
|
branch: row.git_branch
|
|
@@ -6431,7 +7053,7 @@ class PostgresBackend extends Backend {
|
|
|
6431
7053
|
const startUs = options?.startUs ?? now - 24 * 60 * 60 * 1000 * 1000;
|
|
6432
7054
|
const endUs = options?.endUs ?? now;
|
|
6433
7055
|
let stepQuery = `
|
|
6434
|
-
SELECT event_data
|
|
7056
|
+
SELECT event_data, event_id
|
|
6435
7057
|
FROM ${this.db.getSchema()}.step_events
|
|
6436
7058
|
WHERE timestamp_us >= $1 AND timestamp_us <= $2
|
|
6437
7059
|
`;
|
|
@@ -6455,7 +7077,7 @@ class PostgresBackend extends Backend {
|
|
|
6455
7077
|
}
|
|
6456
7078
|
stepQuery += ` ORDER BY timestamp_us ASC`;
|
|
6457
7079
|
let workflowQuery = `
|
|
6458
|
-
SELECT event_data
|
|
7080
|
+
SELECT event_data, event_id
|
|
6459
7081
|
FROM ${this.db.getSchema()}.workflow_events
|
|
6460
7082
|
WHERE timestamp_us >= $1 AND timestamp_us <= $2
|
|
6461
7083
|
`;
|
|
@@ -6477,8 +7099,8 @@ class PostgresBackend extends Backend {
|
|
|
6477
7099
|
this.db.getPool().query(stepQuery, stepParams),
|
|
6478
7100
|
options?.stepId ? Promise.resolve({ rows: [] }) : this.db.getPool().query(workflowQuery, workflowParams)
|
|
6479
7101
|
]);
|
|
6480
|
-
const stepEvents = stepResult.rows.map((row) => row.event_data);
|
|
6481
|
-
const workflowEvents = workflowResult.rows.map((row) => row.event_data);
|
|
7102
|
+
const stepEvents = stepResult.rows.map((row) => ({ ...row.event_data, eventId: row.event_id }));
|
|
7103
|
+
const workflowEvents = workflowResult.rows.map((row) => ({ ...row.event_data, eventId: row.event_id }));
|
|
6482
7104
|
return { stepEvents, workflowEvents };
|
|
6483
7105
|
}
|
|
6484
7106
|
async getErrorAnalysis(options) {
|
|
@@ -6666,113 +7288,34 @@ class PostgresBackend extends Backend {
|
|
|
6666
7288
|
return computeThroughput(stepEvents, workflowEvents, timeRangeUs, options?.workflowSlug);
|
|
6667
7289
|
}
|
|
6668
7290
|
async getQueueDepth(options) {
|
|
6669
|
-
const
|
|
6670
|
-
workflowSlug: options?.workflowSlug,
|
|
6671
|
-
status: ["pending", "running"]
|
|
6672
|
-
});
|
|
6673
|
-
let pendingRuns = 0;
|
|
6674
|
-
let runningRuns = 0;
|
|
6675
|
-
let scheduledSteps = 0;
|
|
6676
|
-
let runningSteps = 0;
|
|
6677
|
-
let oldestScheduledStepUs;
|
|
6678
|
-
let oldestPendingRunUs;
|
|
6679
|
-
for (const run of runs) {
|
|
6680
|
-
if (run.status === "pending") {
|
|
6681
|
-
pendingRuns++;
|
|
6682
|
-
if (!oldestPendingRunUs || run.createdAt < oldestPendingRunUs) {
|
|
6683
|
-
oldestPendingRunUs = run.createdAt;
|
|
6684
|
-
}
|
|
6685
|
-
} else if (run.status === "running") {
|
|
6686
|
-
runningRuns++;
|
|
6687
|
-
try {
|
|
6688
|
-
const stepEvents = await this.loadEvents(run.workflowSlug, run.runId, {
|
|
6689
|
-
category: "step"
|
|
6690
|
-
});
|
|
6691
|
-
const eventsByStep = new Map;
|
|
6692
|
-
for (const event of stepEvents) {
|
|
6693
|
-
const events = eventsByStep.get(event.stepId) || [];
|
|
6694
|
-
events.push(event);
|
|
6695
|
-
eventsByStep.set(event.stepId, events);
|
|
6696
|
-
}
|
|
6697
|
-
for (const [stepId, events] of eventsByStep.entries()) {
|
|
6698
|
-
const state = projectStepState(events, run.workflowSlug);
|
|
6699
|
-
if (state.status === "scheduled") {
|
|
6700
|
-
scheduledSteps++;
|
|
6701
|
-
if (state.availableAt && (!oldestScheduledStepUs || state.availableAt < oldestScheduledStepUs)) {
|
|
6702
|
-
oldestScheduledStepUs = state.availableAt;
|
|
6703
|
-
}
|
|
6704
|
-
} else if (state.status === "running") {
|
|
6705
|
-
runningSteps++;
|
|
6706
|
-
}
|
|
6707
|
-
}
|
|
6708
|
-
} catch (error) {
|
|
6709
|
-
continue;
|
|
6710
|
-
}
|
|
6711
|
-
}
|
|
6712
|
-
}
|
|
7291
|
+
const result = await this.db.getQueueDepthAggregation(options?.workflowSlug);
|
|
6713
7292
|
return {
|
|
6714
7293
|
workflowSlug: options?.workflowSlug,
|
|
6715
|
-
pendingRuns,
|
|
6716
|
-
runningRuns,
|
|
6717
|
-
scheduledSteps,
|
|
6718
|
-
runningSteps,
|
|
6719
|
-
oldestScheduledStepUs,
|
|
6720
|
-
oldestPendingRunUs
|
|
7294
|
+
pendingRuns: result.pendingRuns,
|
|
7295
|
+
runningRuns: result.runningRuns,
|
|
7296
|
+
scheduledSteps: result.scheduledSteps,
|
|
7297
|
+
runningSteps: result.runningSteps,
|
|
7298
|
+
oldestScheduledStepUs: result.oldestScheduledStepUs ?? undefined,
|
|
7299
|
+
oldestPendingRunUs: result.oldestPendingRunUs ?? undefined
|
|
6721
7300
|
};
|
|
6722
7301
|
}
|
|
6723
7302
|
async getQueueDepthByWorkflow() {
|
|
6724
|
-
const
|
|
6725
|
-
|
|
6726
|
-
|
|
6727
|
-
|
|
6728
|
-
pendingRuns: 0,
|
|
6729
|
-
scheduledSteps: 0
|
|
6730
|
-
};
|
|
6731
|
-
if (run.status === "pending") {
|
|
6732
|
-
existing.pendingRuns++;
|
|
6733
|
-
if (!existing.oldestPendingItemUs || run.createdAt < existing.oldestPendingItemUs) {
|
|
6734
|
-
existing.oldestPendingItemUs = run.createdAt;
|
|
6735
|
-
}
|
|
6736
|
-
} else if (run.status === "running") {
|
|
6737
|
-
try {
|
|
6738
|
-
const stepEvents = await this.loadEvents(run.workflowSlug, run.runId, {
|
|
6739
|
-
category: "step"
|
|
6740
|
-
});
|
|
6741
|
-
const eventsByStep = new Map;
|
|
6742
|
-
for (const event of stepEvents) {
|
|
6743
|
-
const events = eventsByStep.get(event.stepId) || [];
|
|
6744
|
-
events.push(event);
|
|
6745
|
-
eventsByStep.set(event.stepId, events);
|
|
6746
|
-
}
|
|
6747
|
-
for (const [stepId, events] of eventsByStep.entries()) {
|
|
6748
|
-
const state = projectStepState(events, run.workflowSlug);
|
|
6749
|
-
if (state.status === "scheduled") {
|
|
6750
|
-
existing.scheduledSteps++;
|
|
6751
|
-
if (state.availableAt && (!existing.oldestPendingItemUs || state.availableAt < existing.oldestPendingItemUs)) {
|
|
6752
|
-
existing.oldestPendingItemUs = state.availableAt;
|
|
6753
|
-
}
|
|
6754
|
-
}
|
|
6755
|
-
}
|
|
6756
|
-
} catch (error) {
|
|
6757
|
-
continue;
|
|
6758
|
-
}
|
|
6759
|
-
}
|
|
6760
|
-
workflowMap.set(run.workflowSlug, existing);
|
|
6761
|
-
}
|
|
6762
|
-
const allMetadata = await this.listWorkflowMetadata();
|
|
7303
|
+
const [aggregation, allMetadata] = await Promise.all([
|
|
7304
|
+
this.db.getQueueDepthByWorkflowAggregation(),
|
|
7305
|
+
this.listWorkflowMetadata()
|
|
7306
|
+
]);
|
|
6763
7307
|
const metadataMap = new Map(allMetadata.map((m) => [m.slug, m]));
|
|
6764
|
-
|
|
6765
|
-
workflowSlug,
|
|
6766
|
-
workflowName: metadataMap.get(workflowSlug)?.name,
|
|
6767
|
-
pendingRuns:
|
|
6768
|
-
scheduledSteps:
|
|
6769
|
-
oldestPendingItemUs:
|
|
6770
|
-
})).
|
|
7308
|
+
return aggregation.map((item) => ({
|
|
7309
|
+
workflowSlug: item.workflowSlug,
|
|
7310
|
+
workflowName: metadataMap.get(item.workflowSlug)?.name,
|
|
7311
|
+
pendingRuns: item.pendingRuns,
|
|
7312
|
+
scheduledSteps: item.scheduledSteps,
|
|
7313
|
+
oldestPendingItemUs: item.oldestPendingItemUs ?? undefined
|
|
7314
|
+
})).sort((a, b) => {
|
|
6771
7315
|
const aTotal = a.pendingRuns + a.scheduledSteps;
|
|
6772
7316
|
const bTotal = b.pendingRuns + b.scheduledSteps;
|
|
6773
7317
|
return bTotal - aTotal;
|
|
6774
7318
|
});
|
|
6775
|
-
return result;
|
|
6776
7319
|
}
|
|
6777
7320
|
async getSuccessRate(options) {
|
|
6778
7321
|
const { stepEvents, workflowEvents } = await this.loadEventsForAnalytics(options);
|
|
@@ -6825,4 +7368,4 @@ export {
|
|
|
6825
7368
|
PostgresBackend
|
|
6826
7369
|
};
|
|
6827
7370
|
|
|
6828
|
-
//# debugId=
|
|
7371
|
+
//# debugId=E1D15906F1E05F4664756E2164756E21
|