@cascade-flow/backend-postgres 0.1.0 → 0.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/db.d.ts +112 -96
- package/dist/db.d.ts.map +1 -1
- package/dist/index.d.ts +3 -2
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +510 -460
- package/dist/index.js.map +5 -5
- package/dist/migrations.d.ts +1 -1
- package/dist/migrations.d.ts.map +1 -1
- package/package.json +1 -1
package/dist/index.js
CHANGED
|
@@ -4773,12 +4773,117 @@ var esm_default = import_lib.default;
|
|
|
4773
4773
|
|
|
4774
4774
|
// src/db.ts
|
|
4775
4775
|
var { Pool: Pool2 } = esm_default;
|
|
4776
|
-
|
|
4777
|
-
|
|
4778
|
-
|
|
4779
|
-
|
|
4780
|
-
|
|
4781
|
-
|
|
4776
|
+
|
|
4777
|
+
class DatabaseClient {
|
|
4778
|
+
pool;
|
|
4779
|
+
schema;
|
|
4780
|
+
constructor(pool, schema) {
|
|
4781
|
+
this.pool = pool;
|
|
4782
|
+
this.schema = schema;
|
|
4783
|
+
}
|
|
4784
|
+
getPool() {
|
|
4785
|
+
return this.pool;
|
|
4786
|
+
}
|
|
4787
|
+
getSchema() {
|
|
4788
|
+
return this.schema;
|
|
4789
|
+
}
|
|
4790
|
+
async appendEvent(table, event) {
|
|
4791
|
+
const client = await this.pool.connect();
|
|
4792
|
+
try {
|
|
4793
|
+
if (table === "workflow_events") {
|
|
4794
|
+
const we = event;
|
|
4795
|
+
let workflowAttemptNumber = null;
|
|
4796
|
+
let availableAtUs = null;
|
|
4797
|
+
let priority = null;
|
|
4798
|
+
let timeoutUs = null;
|
|
4799
|
+
let idempotencyKey = null;
|
|
4800
|
+
if (we.type === "RunSubmitted") {
|
|
4801
|
+
availableAtUs = we.availableAtUs;
|
|
4802
|
+
priority = we.priority;
|
|
4803
|
+
timeoutUs = we.timeoutUs ?? null;
|
|
4804
|
+
idempotencyKey = we.idempotencyKey ?? null;
|
|
4805
|
+
} else if ("workflowAttemptNumber" in we) {
|
|
4806
|
+
workflowAttemptNumber = we.workflowAttemptNumber;
|
|
4807
|
+
}
|
|
4808
|
+
await client.query(`INSERT INTO ${this.schema}.workflow_events (
|
|
4809
|
+
event_id, workflow_slug, run_id, timestamp_us, category, type, event_data,
|
|
4810
|
+
workflow_attempt_number, available_at_us, priority, timeout_us, idempotency_key
|
|
4811
|
+
)
|
|
4812
|
+
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12)`, [
|
|
4813
|
+
we.eventId,
|
|
4814
|
+
we.workflowSlug,
|
|
4815
|
+
we.runId,
|
|
4816
|
+
we.timestampUs,
|
|
4817
|
+
we.category,
|
|
4818
|
+
we.type,
|
|
4819
|
+
JSON.stringify(event),
|
|
4820
|
+
workflowAttemptNumber,
|
|
4821
|
+
availableAtUs,
|
|
4822
|
+
priority,
|
|
4823
|
+
timeoutUs,
|
|
4824
|
+
idempotencyKey
|
|
4825
|
+
]);
|
|
4826
|
+
} else {
|
|
4827
|
+
const se = event;
|
|
4828
|
+
let workerId = null;
|
|
4829
|
+
let attemptNumber = null;
|
|
4830
|
+
let availableAtUs = null;
|
|
4831
|
+
let exportOutput = null;
|
|
4832
|
+
let errorNameHash = "";
|
|
4833
|
+
let errorMessageHash = "";
|
|
4834
|
+
let errorStackExactHash = "";
|
|
4835
|
+
let errorStackNormalizedHash = "";
|
|
4836
|
+
let errorStackPortableHash = "";
|
|
4837
|
+
if (se.type === "StepStarted" || se.type === "StepHeartbeat") {
|
|
4838
|
+
workerId = se.workerId;
|
|
4839
|
+
}
|
|
4840
|
+
if ("attemptNumber" in se) {
|
|
4841
|
+
attemptNumber = se.attemptNumber;
|
|
4842
|
+
}
|
|
4843
|
+
if (se.type === "StepScheduled") {
|
|
4844
|
+
availableAtUs = se.availableAtUs;
|
|
4845
|
+
}
|
|
4846
|
+
if (se.type === "StepCompleted") {
|
|
4847
|
+
exportOutput = se.exportOutput;
|
|
4848
|
+
}
|
|
4849
|
+
if (se.type === "StepFailed") {
|
|
4850
|
+
errorNameHash = se.errorFingerprints.nameHash;
|
|
4851
|
+
errorMessageHash = se.errorFingerprints.messageHash;
|
|
4852
|
+
errorStackExactHash = se.errorFingerprints.stackExactHash;
|
|
4853
|
+
errorStackNormalizedHash = se.errorFingerprints.stackNormalizedHash;
|
|
4854
|
+
errorStackPortableHash = se.errorFingerprints.stackPortableHash;
|
|
4855
|
+
}
|
|
4856
|
+
await client.query(`INSERT INTO ${this.schema}.step_events (
|
|
4857
|
+
event_id, workflow_slug, run_id, step_id, timestamp_us, category, type, event_data,
|
|
4858
|
+
worker_id, attempt_number, available_at_us, export_output,
|
|
4859
|
+
error_name_hash, error_message_hash, error_stack_exact_hash,
|
|
4860
|
+
error_stack_normalized_hash, error_stack_portable_hash
|
|
4861
|
+
)
|
|
4862
|
+
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17)`, [
|
|
4863
|
+
se.eventId,
|
|
4864
|
+
se.workflowSlug,
|
|
4865
|
+
se.runId,
|
|
4866
|
+
se.stepId,
|
|
4867
|
+
se.timestampUs,
|
|
4868
|
+
se.category,
|
|
4869
|
+
se.type,
|
|
4870
|
+
JSON.stringify(event),
|
|
4871
|
+
workerId,
|
|
4872
|
+
attemptNumber,
|
|
4873
|
+
availableAtUs,
|
|
4874
|
+
exportOutput,
|
|
4875
|
+
errorNameHash,
|
|
4876
|
+
errorMessageHash,
|
|
4877
|
+
errorStackExactHash,
|
|
4878
|
+
errorStackNormalizedHash,
|
|
4879
|
+
errorStackPortableHash
|
|
4880
|
+
]);
|
|
4881
|
+
}
|
|
4882
|
+
} finally {
|
|
4883
|
+
client.release();
|
|
4884
|
+
}
|
|
4885
|
+
}
|
|
4886
|
+
async appendEventWithClient(client, table, event) {
|
|
4782
4887
|
if (table === "workflow_events") {
|
|
4783
4888
|
const we = event;
|
|
4784
4889
|
let workflowAttemptNumber = null;
|
|
@@ -4794,7 +4899,7 @@ async function appendEvent(pool, table, event) {
|
|
|
4794
4899
|
} else if ("workflowAttemptNumber" in we) {
|
|
4795
4900
|
workflowAttemptNumber = we.workflowAttemptNumber;
|
|
4796
4901
|
}
|
|
4797
|
-
await client.query(`INSERT INTO workflow_events (
|
|
4902
|
+
await client.query(`INSERT INTO ${this.schema}.workflow_events (
|
|
4798
4903
|
event_id, workflow_slug, run_id, timestamp_us, category, type, event_data,
|
|
4799
4904
|
workflow_attempt_number, available_at_us, priority, timeout_us, idempotency_key
|
|
4800
4905
|
)
|
|
@@ -4842,7 +4947,7 @@ async function appendEvent(pool, table, event) {
|
|
|
4842
4947
|
errorStackNormalizedHash = se.errorFingerprints.stackNormalizedHash;
|
|
4843
4948
|
errorStackPortableHash = se.errorFingerprints.stackPortableHash;
|
|
4844
4949
|
}
|
|
4845
|
-
await client.query(`INSERT INTO step_events (
|
|
4950
|
+
await client.query(`INSERT INTO ${this.schema}.step_events (
|
|
4846
4951
|
event_id, workflow_slug, run_id, step_id, timestamp_us, category, type, event_data,
|
|
4847
4952
|
worker_id, attempt_number, available_at_us, export_output,
|
|
4848
4953
|
error_name_hash, error_message_hash, error_stack_exact_hash,
|
|
@@ -4868,239 +4973,145 @@ async function appendEvent(pool, table, event) {
|
|
|
4868
4973
|
errorStackPortableHash
|
|
4869
4974
|
]);
|
|
4870
4975
|
}
|
|
4871
|
-
} finally {
|
|
4872
|
-
client.release();
|
|
4873
4976
|
}
|
|
4874
|
-
|
|
4875
|
-
|
|
4876
|
-
|
|
4877
|
-
|
|
4878
|
-
|
|
4879
|
-
|
|
4880
|
-
|
|
4881
|
-
|
|
4882
|
-
|
|
4883
|
-
|
|
4884
|
-
|
|
4885
|
-
|
|
4886
|
-
|
|
4887
|
-
|
|
4888
|
-
|
|
4889
|
-
|
|
4890
|
-
|
|
4891
|
-
|
|
4892
|
-
|
|
4893
|
-
|
|
4894
|
-
|
|
4895
|
-
|
|
4896
|
-
|
|
4897
|
-
|
|
4898
|
-
|
|
4899
|
-
|
|
4900
|
-
|
|
4901
|
-
|
|
4902
|
-
|
|
4903
|
-
workflowAttemptNumber,
|
|
4904
|
-
availableAtUs,
|
|
4905
|
-
priority,
|
|
4906
|
-
timeoutUs,
|
|
4907
|
-
idempotencyKey
|
|
4908
|
-
]);
|
|
4909
|
-
} else {
|
|
4910
|
-
const se = event;
|
|
4911
|
-
let workerId = null;
|
|
4912
|
-
let attemptNumber = null;
|
|
4913
|
-
let availableAtUs = null;
|
|
4914
|
-
let exportOutput = null;
|
|
4915
|
-
let errorNameHash = "";
|
|
4916
|
-
let errorMessageHash = "";
|
|
4917
|
-
let errorStackExactHash = "";
|
|
4918
|
-
let errorStackNormalizedHash = "";
|
|
4919
|
-
let errorStackPortableHash = "";
|
|
4920
|
-
if (se.type === "StepStarted" || se.type === "StepHeartbeat") {
|
|
4921
|
-
workerId = se.workerId;
|
|
4922
|
-
}
|
|
4923
|
-
if ("attemptNumber" in se) {
|
|
4924
|
-
attemptNumber = se.attemptNumber;
|
|
4925
|
-
}
|
|
4926
|
-
if (se.type === "StepScheduled") {
|
|
4927
|
-
availableAtUs = se.availableAtUs;
|
|
4928
|
-
}
|
|
4929
|
-
if (se.type === "StepCompleted") {
|
|
4930
|
-
exportOutput = se.exportOutput;
|
|
4931
|
-
}
|
|
4932
|
-
if (se.type === "StepFailed") {
|
|
4933
|
-
errorNameHash = se.errorFingerprints.nameHash;
|
|
4934
|
-
errorMessageHash = se.errorFingerprints.messageHash;
|
|
4935
|
-
errorStackExactHash = se.errorFingerprints.stackExactHash;
|
|
4936
|
-
errorStackNormalizedHash = se.errorFingerprints.stackNormalizedHash;
|
|
4937
|
-
errorStackPortableHash = se.errorFingerprints.stackPortableHash;
|
|
4938
|
-
}
|
|
4939
|
-
await client.query(`INSERT INTO step_events (
|
|
4940
|
-
event_id, workflow_slug, run_id, step_id, timestamp_us, category, type, event_data,
|
|
4941
|
-
worker_id, attempt_number, available_at_us, export_output,
|
|
4942
|
-
error_name_hash, error_message_hash, error_stack_exact_hash,
|
|
4943
|
-
error_stack_normalized_hash, error_stack_portable_hash
|
|
4944
|
-
)
|
|
4945
|
-
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17)`, [
|
|
4946
|
-
se.eventId,
|
|
4947
|
-
se.workflowSlug,
|
|
4948
|
-
se.runId,
|
|
4949
|
-
se.stepId,
|
|
4950
|
-
se.timestampUs,
|
|
4951
|
-
se.category,
|
|
4952
|
-
se.type,
|
|
4953
|
-
JSON.stringify(event),
|
|
4954
|
-
workerId,
|
|
4955
|
-
attemptNumber,
|
|
4956
|
-
availableAtUs,
|
|
4957
|
-
exportOutput,
|
|
4958
|
-
errorNameHash,
|
|
4959
|
-
errorMessageHash,
|
|
4960
|
-
errorStackExactHash,
|
|
4961
|
-
errorStackNormalizedHash,
|
|
4962
|
-
errorStackPortableHash
|
|
4963
|
-
]);
|
|
4964
|
-
}
|
|
4965
|
-
}
|
|
4966
|
-
async function loadEvents(pool, table, filters) {
|
|
4967
|
-
const client = await pool.connect();
|
|
4968
|
-
try {
|
|
4969
|
-
const conditions = [];
|
|
4970
|
-
const values = [];
|
|
4971
|
-
let paramIndex = 1;
|
|
4972
|
-
if (filters.workflowSlug) {
|
|
4973
|
-
conditions.push(`workflow_slug = $${paramIndex++}`);
|
|
4974
|
-
values.push(filters.workflowSlug);
|
|
4975
|
-
}
|
|
4976
|
-
if (filters.runId) {
|
|
4977
|
-
conditions.push(`run_id = $${paramIndex++}`);
|
|
4978
|
-
values.push(filters.runId);
|
|
4979
|
-
}
|
|
4980
|
-
if (filters.stepId && table === "step_events") {
|
|
4981
|
-
conditions.push(`step_id = $${paramIndex++}`);
|
|
4982
|
-
values.push(filters.stepId);
|
|
4983
|
-
}
|
|
4984
|
-
if (filters.category) {
|
|
4985
|
-
conditions.push(`category = $${paramIndex++}`);
|
|
4986
|
-
values.push(filters.category);
|
|
4987
|
-
}
|
|
4988
|
-
if (filters.types && filters.types.length > 0) {
|
|
4989
|
-
conditions.push(`type = ANY($${paramIndex++})`);
|
|
4990
|
-
values.push(filters.types);
|
|
4991
|
-
}
|
|
4992
|
-
const whereClause = conditions.length > 0 ? `WHERE ${conditions.join(" AND ")}` : "";
|
|
4993
|
-
const query = `
|
|
4994
|
-
SELECT event_data FROM ${table}
|
|
4977
|
+
async loadEvents(table, filters) {
|
|
4978
|
+
const client = await this.pool.connect();
|
|
4979
|
+
try {
|
|
4980
|
+
const conditions = [];
|
|
4981
|
+
const values = [];
|
|
4982
|
+
let paramIndex = 1;
|
|
4983
|
+
if (filters.workflowSlug) {
|
|
4984
|
+
conditions.push(`workflow_slug = $${paramIndex++}`);
|
|
4985
|
+
values.push(filters.workflowSlug);
|
|
4986
|
+
}
|
|
4987
|
+
if (filters.runId) {
|
|
4988
|
+
conditions.push(`run_id = $${paramIndex++}`);
|
|
4989
|
+
values.push(filters.runId);
|
|
4990
|
+
}
|
|
4991
|
+
if (filters.stepId && table === "step_events") {
|
|
4992
|
+
conditions.push(`step_id = $${paramIndex++}`);
|
|
4993
|
+
values.push(filters.stepId);
|
|
4994
|
+
}
|
|
4995
|
+
if (filters.category) {
|
|
4996
|
+
conditions.push(`category = $${paramIndex++}`);
|
|
4997
|
+
values.push(filters.category);
|
|
4998
|
+
}
|
|
4999
|
+
if (filters.types && filters.types.length > 0) {
|
|
5000
|
+
conditions.push(`type = ANY($${paramIndex++})`);
|
|
5001
|
+
values.push(filters.types);
|
|
5002
|
+
}
|
|
5003
|
+
const whereClause = conditions.length > 0 ? `WHERE ${conditions.join(" AND ")}` : "";
|
|
5004
|
+
const query = `
|
|
5005
|
+
SELECT event_data FROM ${this.schema}.${table}
|
|
4995
5006
|
${whereClause}
|
|
4996
5007
|
ORDER BY timestamp_us ASC, event_id ASC
|
|
4997
5008
|
`;
|
|
4998
|
-
|
|
4999
|
-
|
|
5000
|
-
|
|
5001
|
-
|
|
5009
|
+
const result = await client.query(query, values);
|
|
5010
|
+
return result.rows.map((row) => row.event_data);
|
|
5011
|
+
} finally {
|
|
5012
|
+
client.release();
|
|
5013
|
+
}
|
|
5002
5014
|
}
|
|
5003
|
-
|
|
5004
|
-
|
|
5005
|
-
|
|
5006
|
-
|
|
5007
|
-
|
|
5008
|
-
SELECT event_data, timestamp_us, event_id FROM workflow_events
|
|
5015
|
+
async loadAllRunEvents(workflowSlug, runId) {
|
|
5016
|
+
const client = await this.pool.connect();
|
|
5017
|
+
try {
|
|
5018
|
+
const query = `
|
|
5019
|
+
SELECT event_data, timestamp_us, event_id FROM ${this.schema}.workflow_events
|
|
5009
5020
|
WHERE workflow_slug = $1 AND run_id = $2
|
|
5010
5021
|
UNION ALL
|
|
5011
|
-
SELECT event_data, timestamp_us, event_id FROM step_events
|
|
5022
|
+
SELECT event_data, timestamp_us, event_id FROM ${this.schema}.step_events
|
|
5012
5023
|
WHERE workflow_slug = $1 AND run_id = $2
|
|
5013
5024
|
ORDER BY timestamp_us ASC, event_id ASC
|
|
5014
5025
|
`;
|
|
5015
|
-
|
|
5016
|
-
|
|
5017
|
-
|
|
5018
|
-
|
|
5026
|
+
const result = await client.query(query, [workflowSlug, runId]);
|
|
5027
|
+
return result.rows.map((row) => row.event_data);
|
|
5028
|
+
} finally {
|
|
5029
|
+
client.release();
|
|
5030
|
+
}
|
|
5019
5031
|
}
|
|
5020
|
-
|
|
5021
|
-
|
|
5022
|
-
|
|
5023
|
-
|
|
5024
|
-
|
|
5025
|
-
|
|
5026
|
-
SELECT event_data FROM step_events
|
|
5032
|
+
async claimScheduledStep(workflowSlug, runId, stepId, workerId, eventToWrite) {
|
|
5033
|
+
const client = await this.pool.connect();
|
|
5034
|
+
try {
|
|
5035
|
+
await client.query("BEGIN");
|
|
5036
|
+
const checkQuery = `
|
|
5037
|
+
SELECT event_data FROM ${this.schema}.step_events
|
|
5027
5038
|
WHERE workflow_slug = $1 AND run_id = $2 AND step_id = $3
|
|
5028
5039
|
ORDER BY timestamp_us DESC, event_id DESC
|
|
5029
5040
|
LIMIT 1
|
|
5030
5041
|
FOR UPDATE SKIP LOCKED
|
|
5031
5042
|
`;
|
|
5032
|
-
|
|
5033
|
-
|
|
5034
|
-
|
|
5035
|
-
|
|
5036
|
-
|
|
5037
|
-
|
|
5038
|
-
|
|
5043
|
+
const checkResult = await client.query(checkQuery, [workflowSlug, runId, stepId]);
|
|
5044
|
+
if (checkResult.rows.length === 0) {
|
|
5045
|
+
await client.query("ROLLBACK");
|
|
5046
|
+
return false;
|
|
5047
|
+
}
|
|
5048
|
+
const latestEvent = checkResult.rows[0].event_data;
|
|
5049
|
+
if (latestEvent.type !== "StepScheduled" && latestEvent.type !== "StepReclaimed" && latestEvent.type !== "StepRetrying") {
|
|
5050
|
+
await client.query("ROLLBACK");
|
|
5051
|
+
return false;
|
|
5052
|
+
}
|
|
5053
|
+
let workerId2 = null;
|
|
5054
|
+
let attemptNumber = null;
|
|
5055
|
+
let errorNameHash = "";
|
|
5056
|
+
let errorMessageHash = "";
|
|
5057
|
+
let errorStackExactHash = "";
|
|
5058
|
+
let errorStackNormalizedHash = "";
|
|
5059
|
+
let errorStackPortableHash = "";
|
|
5060
|
+
if (eventToWrite.type === "StepStarted") {
|
|
5061
|
+
workerId2 = eventToWrite.workerId;
|
|
5062
|
+
attemptNumber = eventToWrite.attemptNumber;
|
|
5063
|
+
}
|
|
5064
|
+
if (eventToWrite.type === "StepFailed") {
|
|
5065
|
+
errorNameHash = eventToWrite.errorFingerprints.nameHash;
|
|
5066
|
+
errorMessageHash = eventToWrite.errorFingerprints.messageHash;
|
|
5067
|
+
errorStackExactHash = eventToWrite.errorFingerprints.stackExactHash;
|
|
5068
|
+
errorStackNormalizedHash = eventToWrite.errorFingerprints.stackNormalizedHash;
|
|
5069
|
+
errorStackPortableHash = eventToWrite.errorFingerprints.stackPortableHash;
|
|
5070
|
+
}
|
|
5071
|
+
await client.query(`INSERT INTO ${this.schema}.step_events (
|
|
5072
|
+
event_id, workflow_slug, run_id, step_id, timestamp_us, category, type, event_data,
|
|
5073
|
+
worker_id, attempt_number, available_at_us, export_output,
|
|
5074
|
+
error_name_hash, error_message_hash, error_stack_exact_hash,
|
|
5075
|
+
error_stack_normalized_hash, error_stack_portable_hash
|
|
5076
|
+
)
|
|
5077
|
+
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17)`, [
|
|
5078
|
+
eventToWrite.eventId,
|
|
5079
|
+
eventToWrite.workflowSlug,
|
|
5080
|
+
eventToWrite.runId,
|
|
5081
|
+
eventToWrite.stepId,
|
|
5082
|
+
eventToWrite.timestampUs,
|
|
5083
|
+
eventToWrite.category,
|
|
5084
|
+
eventToWrite.type,
|
|
5085
|
+
JSON.stringify(eventToWrite),
|
|
5086
|
+
workerId2,
|
|
5087
|
+
attemptNumber,
|
|
5088
|
+
null,
|
|
5089
|
+
null,
|
|
5090
|
+
errorNameHash,
|
|
5091
|
+
errorMessageHash,
|
|
5092
|
+
errorStackExactHash,
|
|
5093
|
+
errorStackNormalizedHash,
|
|
5094
|
+
errorStackPortableHash
|
|
5095
|
+
]);
|
|
5096
|
+
await client.query("COMMIT");
|
|
5097
|
+
return true;
|
|
5098
|
+
} catch (error) {
|
|
5039
5099
|
await client.query("ROLLBACK");
|
|
5040
|
-
|
|
5100
|
+
throw error;
|
|
5101
|
+
} finally {
|
|
5102
|
+
client.release();
|
|
5041
5103
|
}
|
|
5042
|
-
let workerId2 = null;
|
|
5043
|
-
let attemptNumber = null;
|
|
5044
|
-
let errorNameHash = "";
|
|
5045
|
-
let errorMessageHash = "";
|
|
5046
|
-
let errorStackExactHash = "";
|
|
5047
|
-
let errorStackNormalizedHash = "";
|
|
5048
|
-
let errorStackPortableHash = "";
|
|
5049
|
-
if (eventToWrite.type === "StepStarted") {
|
|
5050
|
-
workerId2 = eventToWrite.workerId;
|
|
5051
|
-
attemptNumber = eventToWrite.attemptNumber;
|
|
5052
|
-
}
|
|
5053
|
-
if (eventToWrite.type === "StepFailed") {
|
|
5054
|
-
errorNameHash = eventToWrite.errorFingerprints.nameHash;
|
|
5055
|
-
errorMessageHash = eventToWrite.errorFingerprints.messageHash;
|
|
5056
|
-
errorStackExactHash = eventToWrite.errorFingerprints.stackExactHash;
|
|
5057
|
-
errorStackNormalizedHash = eventToWrite.errorFingerprints.stackNormalizedHash;
|
|
5058
|
-
errorStackPortableHash = eventToWrite.errorFingerprints.stackPortableHash;
|
|
5059
|
-
}
|
|
5060
|
-
await client.query(`INSERT INTO step_events (
|
|
5061
|
-
event_id, workflow_slug, run_id, step_id, timestamp_us, category, type, event_data,
|
|
5062
|
-
worker_id, attempt_number, available_at_us, export_output,
|
|
5063
|
-
error_name_hash, error_message_hash, error_stack_exact_hash,
|
|
5064
|
-
error_stack_normalized_hash, error_stack_portable_hash
|
|
5065
|
-
)
|
|
5066
|
-
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17)`, [
|
|
5067
|
-
eventToWrite.eventId,
|
|
5068
|
-
eventToWrite.workflowSlug,
|
|
5069
|
-
eventToWrite.runId,
|
|
5070
|
-
eventToWrite.stepId,
|
|
5071
|
-
eventToWrite.timestampUs,
|
|
5072
|
-
eventToWrite.category,
|
|
5073
|
-
eventToWrite.type,
|
|
5074
|
-
JSON.stringify(eventToWrite),
|
|
5075
|
-
workerId2,
|
|
5076
|
-
attemptNumber,
|
|
5077
|
-
null,
|
|
5078
|
-
null,
|
|
5079
|
-
errorNameHash,
|
|
5080
|
-
errorMessageHash,
|
|
5081
|
-
errorStackExactHash,
|
|
5082
|
-
errorStackNormalizedHash,
|
|
5083
|
-
errorStackPortableHash
|
|
5084
|
-
]);
|
|
5085
|
-
await client.query("COMMIT");
|
|
5086
|
-
return true;
|
|
5087
|
-
} catch (error) {
|
|
5088
|
-
await client.query("ROLLBACK");
|
|
5089
|
-
throw error;
|
|
5090
|
-
} finally {
|
|
5091
|
-
client.release();
|
|
5092
5104
|
}
|
|
5093
|
-
|
|
5094
|
-
|
|
5095
|
-
|
|
5096
|
-
|
|
5097
|
-
|
|
5098
|
-
|
|
5099
|
-
let query = `
|
|
5105
|
+
async listScheduledSteps(options) {
|
|
5106
|
+
const client = await this.pool.connect();
|
|
5107
|
+
try {
|
|
5108
|
+
const currentTimeUs = Date.now() * 1000;
|
|
5109
|
+
const scheduledTypes = ["StepScheduled", "StepReclaimed", "StepRetrying"];
|
|
5110
|
+
let query = `
|
|
5100
5111
|
WITH latest_step_events AS (
|
|
5101
5112
|
SELECT DISTINCT ON (workflow_slug, run_id, step_id)
|
|
5102
5113
|
workflow_slug, run_id, step_id, type, available_at_us
|
|
5103
|
-
FROM step_events
|
|
5114
|
+
FROM ${this.schema}.step_events
|
|
5104
5115
|
${options?.workflowSlugs ? "WHERE workflow_slug = ANY($1)" : ""}
|
|
5105
5116
|
ORDER BY workflow_slug, run_id, step_id, timestamp_us DESC, event_id DESC
|
|
5106
5117
|
)
|
|
@@ -5110,34 +5121,34 @@ async function listScheduledSteps(pool, options) {
|
|
|
5110
5121
|
AND (available_at_us IS NULL OR available_at_us <= $${options?.workflowSlugs ? "3" : "2"})
|
|
5111
5122
|
${options?.limit ? `LIMIT $${options?.workflowSlugs ? "4" : "3"}` : ""}
|
|
5112
5123
|
`;
|
|
5113
|
-
|
|
5114
|
-
|
|
5115
|
-
|
|
5116
|
-
|
|
5117
|
-
|
|
5118
|
-
|
|
5119
|
-
|
|
5120
|
-
|
|
5124
|
+
const params = [];
|
|
5125
|
+
if (options?.workflowSlugs) {
|
|
5126
|
+
params.push(options.workflowSlugs);
|
|
5127
|
+
}
|
|
5128
|
+
params.push(scheduledTypes);
|
|
5129
|
+
params.push(currentTimeUs);
|
|
5130
|
+
if (options?.limit) {
|
|
5131
|
+
params.push(options.limit);
|
|
5132
|
+
}
|
|
5133
|
+
const result = await client.query(query, params);
|
|
5134
|
+
return result.rows.map((row) => ({
|
|
5135
|
+
workflowSlug: row.workflow_slug,
|
|
5136
|
+
runId: row.run_id,
|
|
5137
|
+
stepId: row.step_id
|
|
5138
|
+
}));
|
|
5139
|
+
} finally {
|
|
5140
|
+
client.release();
|
|
5121
5141
|
}
|
|
5122
|
-
const result = await client.query(query, params);
|
|
5123
|
-
return result.rows.map((row) => ({
|
|
5124
|
-
workflowSlug: row.workflow_slug,
|
|
5125
|
-
runId: row.run_id,
|
|
5126
|
-
stepId: row.step_id
|
|
5127
|
-
}));
|
|
5128
|
-
} finally {
|
|
5129
|
-
client.release();
|
|
5130
5142
|
}
|
|
5131
|
-
|
|
5132
|
-
|
|
5133
|
-
|
|
5134
|
-
|
|
5135
|
-
|
|
5136
|
-
const query = `
|
|
5143
|
+
async findStaleSteps(staleThresholdUs) {
|
|
5144
|
+
const client = await this.pool.connect();
|
|
5145
|
+
try {
|
|
5146
|
+
const currentTimeUs = Date.now() * 1000;
|
|
5147
|
+
const query = `
|
|
5137
5148
|
WITH latest_step_events AS (
|
|
5138
5149
|
SELECT DISTINCT ON (workflow_slug, run_id, step_id)
|
|
5139
5150
|
workflow_slug, run_id, step_id, type, timestamp_us, worker_id
|
|
5140
|
-
FROM step_events
|
|
5151
|
+
FROM ${this.schema}.step_events
|
|
5141
5152
|
WHERE type IN ('StepStarted', 'StepHeartbeat')
|
|
5142
5153
|
ORDER BY workflow_slug, run_id, step_id, timestamp_us DESC, event_id DESC
|
|
5143
5154
|
)
|
|
@@ -5145,21 +5156,42 @@ async function findStaleSteps(pool, staleThresholdUs) {
|
|
|
5145
5156
|
FROM latest_step_events
|
|
5146
5157
|
WHERE timestamp_us < $1 AND worker_id IS NOT NULL
|
|
5147
5158
|
`;
|
|
5148
|
-
|
|
5149
|
-
|
|
5150
|
-
|
|
5151
|
-
|
|
5152
|
-
|
|
5153
|
-
|
|
5154
|
-
|
|
5155
|
-
|
|
5156
|
-
|
|
5159
|
+
const result = await client.query(query, [currentTimeUs - staleThresholdUs]);
|
|
5160
|
+
return result.rows.map((row) => ({
|
|
5161
|
+
workflowSlug: row.workflow_slug,
|
|
5162
|
+
runId: row.run_id,
|
|
5163
|
+
stepId: row.step_id,
|
|
5164
|
+
workerId: row.worker_id
|
|
5165
|
+
}));
|
|
5166
|
+
} finally {
|
|
5167
|
+
client.release();
|
|
5168
|
+
}
|
|
5157
5169
|
}
|
|
5158
|
-
|
|
5159
|
-
|
|
5160
|
-
|
|
5161
|
-
|
|
5162
|
-
|
|
5170
|
+
async saveStepOutput(workflowSlug, runId, stepId, attemptNumber, output) {
|
|
5171
|
+
const client = await this.pool.connect();
|
|
5172
|
+
try {
|
|
5173
|
+
await client.query(`INSERT INTO ${this.schema}.step_outputs (workflow_slug, run_id, step_id, attempt_number, output)
|
|
5174
|
+
VALUES ($1, $2, $3, $4, $5)
|
|
5175
|
+
ON CONFLICT (workflow_slug, run_id, step_id, attempt_number)
|
|
5176
|
+
DO UPDATE SET output = EXCLUDED.output`, [workflowSlug, runId, stepId, attemptNumber, JSON.stringify(output)]);
|
|
5177
|
+
} finally {
|
|
5178
|
+
client.release();
|
|
5179
|
+
}
|
|
5180
|
+
}
|
|
5181
|
+
async loadStepOutput(workflowSlug, runId, stepId, attemptNumber) {
|
|
5182
|
+
const client = await this.pool.connect();
|
|
5183
|
+
try {
|
|
5184
|
+
const result = await client.query(`SELECT output FROM ${this.schema}.step_outputs
|
|
5185
|
+
WHERE workflow_slug = $1 AND run_id = $2 AND step_id = $3 AND attempt_number = $4`, [workflowSlug, runId, stepId, attemptNumber]);
|
|
5186
|
+
return result.rows.length > 0 ? result.rows[0].output : null;
|
|
5187
|
+
} finally {
|
|
5188
|
+
client.release();
|
|
5189
|
+
}
|
|
5190
|
+
}
|
|
5191
|
+
async upsertWorkflowMetadata(slug, name, location, inputSchemaJSON) {
|
|
5192
|
+
const client = await this.pool.connect();
|
|
5193
|
+
try {
|
|
5194
|
+
await client.query(`INSERT INTO ${this.schema}.workflow_metadata (slug, name, description, input_schema_json, tags, updated_at)
|
|
5163
5195
|
VALUES ($1, $2, $3, $4, $5, NOW())
|
|
5164
5196
|
ON CONFLICT (slug)
|
|
5165
5197
|
DO UPDATE SET
|
|
@@ -5167,153 +5199,169 @@ async function upsertWorkflowMetadata(pool, slug, name, location, inputSchemaJSO
|
|
|
5167
5199
|
description = EXCLUDED.description,
|
|
5168
5200
|
input_schema_json = EXCLUDED.input_schema_json,
|
|
5169
5201
|
updated_at = NOW()`, [
|
|
5170
|
-
|
|
5171
|
-
|
|
5172
|
-
|
|
5173
|
-
|
|
5174
|
-
|
|
5175
|
-
|
|
5176
|
-
|
|
5177
|
-
|
|
5202
|
+
slug,
|
|
5203
|
+
name,
|
|
5204
|
+
location || null,
|
|
5205
|
+
inputSchemaJSON ? JSON.stringify(inputSchemaJSON) : null,
|
|
5206
|
+
[]
|
|
5207
|
+
]);
|
|
5208
|
+
} finally {
|
|
5209
|
+
client.release();
|
|
5210
|
+
}
|
|
5178
5211
|
}
|
|
5179
|
-
|
|
5180
|
-
|
|
5181
|
-
|
|
5182
|
-
|
|
5183
|
-
|
|
5184
|
-
|
|
5185
|
-
|
|
5186
|
-
)
|
|
5212
|
+
async upsertStepDefinition(workflowSlug, step) {
|
|
5213
|
+
const client = await this.pool.connect();
|
|
5214
|
+
try {
|
|
5215
|
+
await client.query(`INSERT INTO ${this.schema}.step_definitions (
|
|
5216
|
+
workflow_slug, id, dependencies, export_output, input_schema_json,
|
|
5217
|
+
timeout_ms, max_retries, retry_delay_ms
|
|
5218
|
+
)
|
|
5187
5219
|
VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
|
|
5188
5220
|
ON CONFLICT (workflow_slug, id)
|
|
5189
5221
|
DO UPDATE SET
|
|
5190
5222
|
dependencies = EXCLUDED.dependencies,
|
|
5191
5223
|
export_output = EXCLUDED.export_output`, [
|
|
5192
|
-
|
|
5193
|
-
|
|
5194
|
-
|
|
5195
|
-
|
|
5196
|
-
|
|
5197
|
-
|
|
5198
|
-
|
|
5199
|
-
|
|
5200
|
-
|
|
5201
|
-
|
|
5202
|
-
|
|
5224
|
+
workflowSlug,
|
|
5225
|
+
step.id,
|
|
5226
|
+
JSON.stringify(step.dependencies),
|
|
5227
|
+
step.exportOutput,
|
|
5228
|
+
null,
|
|
5229
|
+
null,
|
|
5230
|
+
null,
|
|
5231
|
+
null
|
|
5232
|
+
]);
|
|
5233
|
+
} finally {
|
|
5234
|
+
client.release();
|
|
5235
|
+
}
|
|
5203
5236
|
}
|
|
5204
|
-
|
|
5205
|
-
|
|
5206
|
-
|
|
5207
|
-
|
|
5208
|
-
|
|
5209
|
-
FROM workflow_metadata
|
|
5237
|
+
async getWorkflowMetadata(slug) {
|
|
5238
|
+
const client = await this.pool.connect();
|
|
5239
|
+
try {
|
|
5240
|
+
const result = await client.query(`SELECT slug, name, description, input_schema_json
|
|
5241
|
+
FROM ${this.schema}.workflow_metadata
|
|
5210
5242
|
WHERE slug = $1`, [slug]);
|
|
5211
|
-
|
|
5212
|
-
|
|
5243
|
+
if (result.rows.length === 0) {
|
|
5244
|
+
return null;
|
|
5245
|
+
}
|
|
5246
|
+
const row = result.rows[0];
|
|
5247
|
+
return {
|
|
5248
|
+
slug: row.slug,
|
|
5249
|
+
name: row.name,
|
|
5250
|
+
location: row.description,
|
|
5251
|
+
inputSchemaJSON: row.input_schema_json
|
|
5252
|
+
};
|
|
5253
|
+
} finally {
|
|
5254
|
+
client.release();
|
|
5213
5255
|
}
|
|
5214
|
-
const row = result.rows[0];
|
|
5215
|
-
return {
|
|
5216
|
-
slug: row.slug,
|
|
5217
|
-
name: row.name,
|
|
5218
|
-
location: row.description,
|
|
5219
|
-
inputSchemaJSON: row.input_schema_json
|
|
5220
|
-
};
|
|
5221
|
-
} finally {
|
|
5222
|
-
client.release();
|
|
5223
5256
|
}
|
|
5224
|
-
|
|
5225
|
-
|
|
5226
|
-
|
|
5227
|
-
|
|
5228
|
-
|
|
5229
|
-
FROM workflow_metadata
|
|
5257
|
+
async listWorkflowMetadata() {
|
|
5258
|
+
const client = await this.pool.connect();
|
|
5259
|
+
try {
|
|
5260
|
+
const result = await client.query(`SELECT slug, name, description, input_schema_json
|
|
5261
|
+
FROM ${this.schema}.workflow_metadata
|
|
5230
5262
|
ORDER BY name ASC`);
|
|
5231
|
-
|
|
5232
|
-
|
|
5233
|
-
|
|
5234
|
-
|
|
5235
|
-
|
|
5236
|
-
|
|
5237
|
-
|
|
5238
|
-
|
|
5263
|
+
return result.rows.map((row) => ({
|
|
5264
|
+
slug: row.slug,
|
|
5265
|
+
name: row.name,
|
|
5266
|
+
location: row.description,
|
|
5267
|
+
inputSchemaJSON: row.input_schema_json
|
|
5268
|
+
}));
|
|
5269
|
+
} finally {
|
|
5270
|
+
client.release();
|
|
5271
|
+
}
|
|
5239
5272
|
}
|
|
5240
|
-
|
|
5241
|
-
|
|
5242
|
-
|
|
5243
|
-
|
|
5244
|
-
|
|
5245
|
-
FROM step_definitions
|
|
5273
|
+
async getWorkflowSteps(workflowSlug) {
|
|
5274
|
+
const client = await this.pool.connect();
|
|
5275
|
+
try {
|
|
5276
|
+
const result = await client.query(`SELECT id, dependencies, export_output
|
|
5277
|
+
FROM ${this.schema}.step_definitions
|
|
5246
5278
|
WHERE workflow_slug = $1
|
|
5247
5279
|
ORDER BY id ASC`, [workflowSlug]);
|
|
5248
|
-
|
|
5249
|
-
|
|
5250
|
-
|
|
5251
|
-
|
|
5252
|
-
|
|
5253
|
-
|
|
5254
|
-
|
|
5255
|
-
|
|
5280
|
+
return result.rows.map((row) => ({
|
|
5281
|
+
id: row.id,
|
|
5282
|
+
name: row.id,
|
|
5283
|
+
dependencies: row.dependencies,
|
|
5284
|
+
exportOutput: row.export_output
|
|
5285
|
+
}));
|
|
5286
|
+
} finally {
|
|
5287
|
+
client.release();
|
|
5288
|
+
}
|
|
5256
5289
|
}
|
|
5257
|
-
|
|
5258
|
-
|
|
5259
|
-
|
|
5260
|
-
|
|
5261
|
-
const result = await client.query(`INSERT INTO idempotency_keys (hash, run_id)
|
|
5290
|
+
async saveIdempotencyKey(hash, runId) {
|
|
5291
|
+
const client = await this.pool.connect();
|
|
5292
|
+
try {
|
|
5293
|
+
const result = await client.query(`INSERT INTO ${this.schema}.idempotency_keys (hash, run_id)
|
|
5262
5294
|
VALUES ($1, $2)
|
|
5263
5295
|
ON CONFLICT (hash)
|
|
5264
5296
|
DO UPDATE SET hash = EXCLUDED.hash
|
|
5265
5297
|
RETURNING run_id`, [hash, runId]);
|
|
5266
|
-
|
|
5267
|
-
|
|
5268
|
-
|
|
5298
|
+
return result.rows[0].run_id;
|
|
5299
|
+
} finally {
|
|
5300
|
+
client.release();
|
|
5301
|
+
}
|
|
5269
5302
|
}
|
|
5270
|
-
|
|
5271
|
-
|
|
5272
|
-
|
|
5273
|
-
|
|
5274
|
-
const result = await client.query(`SELECT DISTINCT run_id FROM workflow_events WHERE workflow_slug = $1
|
|
5303
|
+
async listRunIds(workflowSlug) {
|
|
5304
|
+
const client = await this.pool.connect();
|
|
5305
|
+
try {
|
|
5306
|
+
const result = await client.query(`SELECT DISTINCT run_id FROM ${this.schema}.workflow_events WHERE workflow_slug = $1
|
|
5275
5307
|
UNION
|
|
5276
|
-
SELECT DISTINCT run_id FROM step_events WHERE workflow_slug = $1
|
|
5308
|
+
SELECT DISTINCT run_id FROM ${this.schema}.step_events WHERE workflow_slug = $1
|
|
5277
5309
|
ORDER BY run_id DESC`, [workflowSlug]);
|
|
5278
|
-
|
|
5279
|
-
|
|
5280
|
-
|
|
5310
|
+
return result.rows.map((row) => row.run_id);
|
|
5311
|
+
} finally {
|
|
5312
|
+
client.release();
|
|
5313
|
+
}
|
|
5281
5314
|
}
|
|
5282
|
-
|
|
5283
|
-
|
|
5284
|
-
|
|
5285
|
-
|
|
5286
|
-
const result = await client.query(`
|
|
5315
|
+
async listActiveWorkflows() {
|
|
5316
|
+
const client = await this.pool.connect();
|
|
5317
|
+
try {
|
|
5318
|
+
const result = await client.query(`
|
|
5287
5319
|
SELECT DISTINCT workflow_slug FROM (
|
|
5288
|
-
SELECT DISTINCT workflow_slug FROM workflow_events
|
|
5320
|
+
SELECT DISTINCT workflow_slug FROM ${this.schema}.workflow_events
|
|
5289
5321
|
WHERE type IN ('RunSubmitted', 'WorkflowStarted', 'WorkflowResumed')
|
|
5290
5322
|
UNION
|
|
5291
|
-
SELECT DISTINCT workflow_slug FROM step_events
|
|
5323
|
+
SELECT DISTINCT workflow_slug FROM ${this.schema}.step_events
|
|
5292
5324
|
WHERE type IN ('StepScheduled', 'StepStarted', 'StepReclaimed', 'StepRetrying')
|
|
5293
5325
|
) AS active
|
|
5294
5326
|
ORDER BY workflow_slug ASC
|
|
5295
5327
|
`);
|
|
5296
|
-
|
|
5297
|
-
|
|
5298
|
-
|
|
5328
|
+
return result.rows.map((row) => row.workflow_slug);
|
|
5329
|
+
} finally {
|
|
5330
|
+
client.release();
|
|
5331
|
+
}
|
|
5299
5332
|
}
|
|
5333
|
+
async runExists(workflowSlug, runId) {
|
|
5334
|
+
const client = await this.pool.connect();
|
|
5335
|
+
try {
|
|
5336
|
+
const result = await client.query(`SELECT 1 FROM ${this.schema}.workflow_events WHERE workflow_slug = $1 AND run_id = $2 LIMIT 1`, [workflowSlug, runId]);
|
|
5337
|
+
return result.rows.length > 0;
|
|
5338
|
+
} finally {
|
|
5339
|
+
client.release();
|
|
5340
|
+
}
|
|
5341
|
+
}
|
|
5342
|
+
}
|
|
5343
|
+
function createPool(connectionString) {
|
|
5344
|
+
return new Pool2({ connectionString });
|
|
5300
5345
|
}
|
|
5301
|
-
|
|
5346
|
+
|
|
5347
|
+
// src/migrations.ts
|
|
5348
|
+
async function migration000_createSchema(pool, schema) {
|
|
5302
5349
|
const client = await pool.connect();
|
|
5303
5350
|
try {
|
|
5304
|
-
|
|
5305
|
-
|
|
5351
|
+
await client.query(`CREATE SCHEMA IF NOT EXISTS ${schema}`);
|
|
5352
|
+
console.log(`[Migration 000] Schema '${schema}' created successfully`);
|
|
5353
|
+
} catch (error) {
|
|
5354
|
+
console.error(`[Migration 000] Error creating schema '${schema}':`, error);
|
|
5355
|
+
throw error;
|
|
5306
5356
|
} finally {
|
|
5307
5357
|
client.release();
|
|
5308
5358
|
}
|
|
5309
5359
|
}
|
|
5310
|
-
|
|
5311
|
-
// src/migrations.ts
|
|
5312
|
-
async function migration001_createTables(pool) {
|
|
5360
|
+
async function migration001_createTables(pool, schema) {
|
|
5313
5361
|
const client = await pool.connect();
|
|
5314
5362
|
try {
|
|
5315
5363
|
await client.query(`
|
|
5316
|
-
CREATE TABLE IF NOT EXISTS workflow_events (
|
|
5364
|
+
CREATE TABLE IF NOT EXISTS ${schema}.workflow_events (
|
|
5317
5365
|
id SERIAL PRIMARY KEY,
|
|
5318
5366
|
event_id TEXT NOT NULL,
|
|
5319
5367
|
workflow_slug TEXT NOT NULL,
|
|
@@ -5326,7 +5374,7 @@ async function migration001_createTables(pool) {
|
|
|
5326
5374
|
)
|
|
5327
5375
|
`);
|
|
5328
5376
|
await client.query(`
|
|
5329
|
-
CREATE TABLE IF NOT EXISTS step_events (
|
|
5377
|
+
CREATE TABLE IF NOT EXISTS ${schema}.step_events (
|
|
5330
5378
|
id SERIAL PRIMARY KEY,
|
|
5331
5379
|
event_id TEXT NOT NULL,
|
|
5332
5380
|
workflow_slug TEXT NOT NULL,
|
|
@@ -5340,7 +5388,7 @@ async function migration001_createTables(pool) {
|
|
|
5340
5388
|
)
|
|
5341
5389
|
`);
|
|
5342
5390
|
await client.query(`
|
|
5343
|
-
CREATE TABLE IF NOT EXISTS workflow_metadata (
|
|
5391
|
+
CREATE TABLE IF NOT EXISTS ${schema}.workflow_metadata (
|
|
5344
5392
|
slug TEXT PRIMARY KEY,
|
|
5345
5393
|
name TEXT NOT NULL,
|
|
5346
5394
|
description TEXT,
|
|
@@ -5351,7 +5399,7 @@ async function migration001_createTables(pool) {
|
|
|
5351
5399
|
)
|
|
5352
5400
|
`);
|
|
5353
5401
|
await client.query(`
|
|
5354
|
-
CREATE TABLE IF NOT EXISTS step_definitions (
|
|
5402
|
+
CREATE TABLE IF NOT EXISTS ${schema}.step_definitions (
|
|
5355
5403
|
workflow_slug TEXT NOT NULL,
|
|
5356
5404
|
id TEXT NOT NULL,
|
|
5357
5405
|
dependencies JSONB NOT NULL DEFAULT '{}',
|
|
@@ -5362,11 +5410,11 @@ async function migration001_createTables(pool) {
|
|
|
5362
5410
|
retry_delay_ms INTEGER,
|
|
5363
5411
|
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
|
|
5364
5412
|
PRIMARY KEY (workflow_slug, id),
|
|
5365
|
-
FOREIGN KEY (workflow_slug) REFERENCES workflow_metadata(slug) ON DELETE CASCADE
|
|
5413
|
+
FOREIGN KEY (workflow_slug) REFERENCES ${schema}.workflow_metadata(slug) ON DELETE CASCADE
|
|
5366
5414
|
)
|
|
5367
5415
|
`);
|
|
5368
5416
|
await client.query(`
|
|
5369
|
-
CREATE TABLE IF NOT EXISTS step_outputs (
|
|
5417
|
+
CREATE TABLE IF NOT EXISTS ${schema}.step_outputs (
|
|
5370
5418
|
workflow_slug TEXT NOT NULL,
|
|
5371
5419
|
run_id TEXT NOT NULL,
|
|
5372
5420
|
step_id TEXT NOT NULL,
|
|
@@ -5377,7 +5425,7 @@ async function migration001_createTables(pool) {
|
|
|
5377
5425
|
)
|
|
5378
5426
|
`);
|
|
5379
5427
|
await client.query(`
|
|
5380
|
-
CREATE TABLE IF NOT EXISTS idempotency_keys (
|
|
5428
|
+
CREATE TABLE IF NOT EXISTS ${schema}.idempotency_keys (
|
|
5381
5429
|
hash TEXT PRIMARY KEY,
|
|
5382
5430
|
run_id TEXT NOT NULL,
|
|
5383
5431
|
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW()
|
|
@@ -5391,18 +5439,18 @@ async function migration001_createTables(pool) {
|
|
|
5391
5439
|
client.release();
|
|
5392
5440
|
}
|
|
5393
5441
|
}
|
|
5394
|
-
async function migration002_addNormalizedColumns(pool) {
|
|
5442
|
+
async function migration002_addNormalizedColumns(pool, schema) {
|
|
5395
5443
|
const client = await pool.connect();
|
|
5396
5444
|
try {
|
|
5397
5445
|
await client.query(`
|
|
5398
|
-
ALTER TABLE step_events
|
|
5446
|
+
ALTER TABLE ${schema}.step_events
|
|
5399
5447
|
ADD COLUMN IF NOT EXISTS worker_id TEXT,
|
|
5400
5448
|
ADD COLUMN IF NOT EXISTS attempt_number INTEGER,
|
|
5401
5449
|
ADD COLUMN IF NOT EXISTS available_at_us BIGINT,
|
|
5402
5450
|
ADD COLUMN IF NOT EXISTS export_output BOOLEAN
|
|
5403
5451
|
`);
|
|
5404
5452
|
await client.query(`
|
|
5405
|
-
ALTER TABLE workflow_events
|
|
5453
|
+
ALTER TABLE ${schema}.workflow_events
|
|
5406
5454
|
ADD COLUMN IF NOT EXISTS workflow_attempt_number INTEGER,
|
|
5407
5455
|
ADD COLUMN IF NOT EXISTS available_at_us BIGINT,
|
|
5408
5456
|
ADD COLUMN IF NOT EXISTS priority INTEGER,
|
|
@@ -5417,65 +5465,65 @@ async function migration002_addNormalizedColumns(pool) {
|
|
|
5417
5465
|
client.release();
|
|
5418
5466
|
}
|
|
5419
5467
|
}
|
|
5420
|
-
async function migration003_createIndexes(pool) {
|
|
5468
|
+
async function migration003_createIndexes(pool, schema) {
|
|
5421
5469
|
const client = await pool.connect();
|
|
5422
5470
|
try {
|
|
5423
5471
|
await client.query(`
|
|
5424
5472
|
CREATE INDEX IF NOT EXISTS idx_workflow_events_lookup
|
|
5425
|
-
ON workflow_events (workflow_slug, run_id, timestamp_us)
|
|
5473
|
+
ON ${schema}.workflow_events (workflow_slug, run_id, timestamp_us)
|
|
5426
5474
|
`);
|
|
5427
5475
|
await client.query(`
|
|
5428
5476
|
CREATE INDEX IF NOT EXISTS idx_workflow_events_type
|
|
5429
|
-
ON workflow_events (workflow_slug, run_id, type)
|
|
5477
|
+
ON ${schema}.workflow_events (workflow_slug, run_id, type)
|
|
5430
5478
|
`);
|
|
5431
5479
|
await client.query(`
|
|
5432
5480
|
CREATE INDEX IF NOT EXISTS idx_step_events_lookup
|
|
5433
|
-
ON step_events (workflow_slug, run_id, step_id, timestamp_us)
|
|
5481
|
+
ON ${schema}.step_events (workflow_slug, run_id, step_id, timestamp_us)
|
|
5434
5482
|
`);
|
|
5435
5483
|
await client.query(`
|
|
5436
5484
|
CREATE INDEX IF NOT EXISTS idx_step_events_run
|
|
5437
|
-
ON step_events (workflow_slug, run_id, timestamp_us)
|
|
5485
|
+
ON ${schema}.step_events (workflow_slug, run_id, timestamp_us)
|
|
5438
5486
|
`);
|
|
5439
5487
|
await client.query(`
|
|
5440
5488
|
CREATE INDEX IF NOT EXISTS idx_step_events_type
|
|
5441
|
-
ON step_events (workflow_slug, run_id, type)
|
|
5489
|
+
ON ${schema}.step_events (workflow_slug, run_id, type)
|
|
5442
5490
|
`);
|
|
5443
5491
|
await client.query(`
|
|
5444
5492
|
CREATE INDEX IF NOT EXISTS idx_step_events_scheduled
|
|
5445
|
-
ON step_events (workflow_slug, run_id, step_id, type, timestamp_us)
|
|
5493
|
+
ON ${schema}.step_events (workflow_slug, run_id, step_id, type, timestamp_us)
|
|
5446
5494
|
`);
|
|
5447
5495
|
await client.query(`
|
|
5448
5496
|
CREATE INDEX IF NOT EXISTS idx_step_definitions_workflow
|
|
5449
|
-
ON step_definitions (workflow_slug)
|
|
5497
|
+
ON ${schema}.step_definitions (workflow_slug)
|
|
5450
5498
|
`);
|
|
5451
5499
|
await client.query(`
|
|
5452
5500
|
CREATE INDEX IF NOT EXISTS idx_step_events_worker_timestamp
|
|
5453
|
-
ON step_events (worker_id, timestamp_us)
|
|
5501
|
+
ON ${schema}.step_events (worker_id, timestamp_us)
|
|
5454
5502
|
WHERE worker_id IS NOT NULL
|
|
5455
5503
|
`);
|
|
5456
5504
|
await client.query(`
|
|
5457
5505
|
CREATE INDEX IF NOT EXISTS idx_step_events_available_at
|
|
5458
|
-
ON step_events (type, available_at_us)
|
|
5506
|
+
ON ${schema}.step_events (type, available_at_us)
|
|
5459
5507
|
WHERE available_at_us IS NOT NULL
|
|
5460
5508
|
`);
|
|
5461
5509
|
await client.query(`
|
|
5462
5510
|
CREATE INDEX IF NOT EXISTS idx_step_events_attempt
|
|
5463
|
-
ON step_events (workflow_slug, run_id, step_id, attempt_number)
|
|
5511
|
+
ON ${schema}.step_events (workflow_slug, run_id, step_id, attempt_number)
|
|
5464
5512
|
WHERE attempt_number IS NOT NULL
|
|
5465
5513
|
`);
|
|
5466
5514
|
await client.query(`
|
|
5467
5515
|
CREATE INDEX IF NOT EXISTS idx_workflow_events_priority
|
|
5468
|
-
ON workflow_events (priority DESC, available_at_us)
|
|
5516
|
+
ON ${schema}.workflow_events (priority DESC, available_at_us)
|
|
5469
5517
|
WHERE priority IS NOT NULL
|
|
5470
5518
|
`);
|
|
5471
5519
|
await client.query(`
|
|
5472
5520
|
CREATE INDEX IF NOT EXISTS idx_workflow_events_idempotency
|
|
5473
|
-
ON workflow_events (idempotency_key)
|
|
5521
|
+
ON ${schema}.workflow_events (idempotency_key)
|
|
5474
5522
|
WHERE idempotency_key IS NOT NULL
|
|
5475
5523
|
`);
|
|
5476
5524
|
await client.query(`
|
|
5477
5525
|
CREATE INDEX IF NOT EXISTS idx_workflow_events_timeout
|
|
5478
|
-
ON workflow_events (workflow_slug, run_id, timeout_us)
|
|
5526
|
+
ON ${schema}.workflow_events (workflow_slug, run_id, timeout_us)
|
|
5479
5527
|
WHERE timeout_us IS NOT NULL
|
|
5480
5528
|
`);
|
|
5481
5529
|
console.log("[Migration 003] Indexes created successfully");
|
|
@@ -5486,11 +5534,11 @@ async function migration003_createIndexes(pool) {
|
|
|
5486
5534
|
client.release();
|
|
5487
5535
|
}
|
|
5488
5536
|
}
|
|
5489
|
-
async function migration004_addErrorFingerprints(pool) {
|
|
5537
|
+
async function migration004_addErrorFingerprints(pool, schema) {
|
|
5490
5538
|
const client = await pool.connect();
|
|
5491
5539
|
try {
|
|
5492
5540
|
await client.query(`
|
|
5493
|
-
ALTER TABLE step_events
|
|
5541
|
+
ALTER TABLE ${schema}.step_events
|
|
5494
5542
|
ADD COLUMN IF NOT EXISTS error_name_hash TEXT NOT NULL DEFAULT '',
|
|
5495
5543
|
ADD COLUMN IF NOT EXISTS error_message_hash TEXT NOT NULL DEFAULT '',
|
|
5496
5544
|
ADD COLUMN IF NOT EXISTS error_stack_exact_hash TEXT NOT NULL DEFAULT '',
|
|
@@ -5499,17 +5547,17 @@ async function migration004_addErrorFingerprints(pool) {
|
|
|
5499
5547
|
`);
|
|
5500
5548
|
await client.query(`
|
|
5501
5549
|
CREATE INDEX IF NOT EXISTS idx_error_fp_exact
|
|
5502
|
-
ON step_events(error_name_hash, error_message_hash, error_stack_exact_hash)
|
|
5550
|
+
ON ${schema}.step_events(error_name_hash, error_message_hash, error_stack_exact_hash)
|
|
5503
5551
|
WHERE type = 'StepFailed'
|
|
5504
5552
|
`);
|
|
5505
5553
|
await client.query(`
|
|
5506
5554
|
CREATE INDEX IF NOT EXISTS idx_error_fp_normalized
|
|
5507
|
-
ON step_events(error_name_hash, error_message_hash, error_stack_normalized_hash)
|
|
5555
|
+
ON ${schema}.step_events(error_name_hash, error_message_hash, error_stack_normalized_hash)
|
|
5508
5556
|
WHERE type = 'StepFailed'
|
|
5509
5557
|
`);
|
|
5510
5558
|
await client.query(`
|
|
5511
5559
|
CREATE INDEX IF NOT EXISTS idx_error_fp_portable
|
|
5512
|
-
ON step_events(error_name_hash, error_message_hash, error_stack_portable_hash)
|
|
5560
|
+
ON ${schema}.step_events(error_name_hash, error_message_hash, error_stack_portable_hash)
|
|
5513
5561
|
WHERE type = 'StepFailed'
|
|
5514
5562
|
`);
|
|
5515
5563
|
console.log("[Migration 004] Error fingerprint columns and indexes added successfully");
|
|
@@ -5520,13 +5568,14 @@ async function migration004_addErrorFingerprints(pool) {
|
|
|
5520
5568
|
client.release();
|
|
5521
5569
|
}
|
|
5522
5570
|
}
|
|
5523
|
-
async function runMigrations(pool) {
|
|
5524
|
-
console.log(
|
|
5571
|
+
async function runMigrations(pool, schema = "cascadeflow") {
|
|
5572
|
+
console.log(`[Migrations] Starting database migrations in schema '${schema}'...`);
|
|
5525
5573
|
try {
|
|
5526
|
-
await
|
|
5527
|
-
await
|
|
5528
|
-
await
|
|
5529
|
-
await
|
|
5574
|
+
await migration000_createSchema(pool, schema);
|
|
5575
|
+
await migration001_createTables(pool, schema);
|
|
5576
|
+
await migration002_addNormalizedColumns(pool, schema);
|
|
5577
|
+
await migration003_createIndexes(pool, schema);
|
|
5578
|
+
await migration004_addErrorFingerprints(pool, schema);
|
|
5530
5579
|
console.log("[Migrations] All migrations completed successfully");
|
|
5531
5580
|
} catch (error) {
|
|
5532
5581
|
console.error("[Migrations] Migration failed:", error);
|
|
@@ -5536,17 +5585,18 @@ async function runMigrations(pool) {
|
|
|
5536
5585
|
|
|
5537
5586
|
// src/index.ts
|
|
5538
5587
|
class PostgresBackend extends Backend {
|
|
5539
|
-
|
|
5588
|
+
db;
|
|
5540
5589
|
initialized = false;
|
|
5541
|
-
constructor(connectionString) {
|
|
5590
|
+
constructor(connectionString, schema = "cascadeflow") {
|
|
5542
5591
|
super();
|
|
5543
|
-
|
|
5592
|
+
const pool = createPool(connectionString);
|
|
5593
|
+
this.db = new DatabaseClient(pool, schema);
|
|
5544
5594
|
}
|
|
5545
5595
|
async initialize() {
|
|
5546
5596
|
if (this.initialized) {
|
|
5547
5597
|
return;
|
|
5548
5598
|
}
|
|
5549
|
-
await runMigrations(this.
|
|
5599
|
+
await runMigrations(this.db.getPool(), this.db.getSchema());
|
|
5550
5600
|
this.initialized = true;
|
|
5551
5601
|
}
|
|
5552
5602
|
generateRunId() {
|
|
@@ -5561,10 +5611,10 @@ class PostgresBackend extends Backend {
|
|
|
5561
5611
|
}
|
|
5562
5612
|
async initializeRun(workflowSlug, runId) {}
|
|
5563
5613
|
async runExists(workflowSlug, runId) {
|
|
5564
|
-
return runExists(
|
|
5614
|
+
return this.db.runExists(workflowSlug, runId);
|
|
5565
5615
|
}
|
|
5566
5616
|
async loadRun(workflowSlug, runId) {
|
|
5567
|
-
const events = await loadAllRunEvents(
|
|
5617
|
+
const events = await this.db.loadAllRunEvents(workflowSlug, runId);
|
|
5568
5618
|
const stepEvents = new Map;
|
|
5569
5619
|
for (const event of events) {
|
|
5570
5620
|
if (event.category === "step") {
|
|
@@ -5594,22 +5644,22 @@ class PostgresBackend extends Backend {
|
|
|
5594
5644
|
}
|
|
5595
5645
|
eventSchema.parse(event);
|
|
5596
5646
|
const table = event.category === "workflow" ? "workflow_events" : "step_events";
|
|
5597
|
-
await appendEvent(
|
|
5647
|
+
await this.db.appendEvent(table, event);
|
|
5598
5648
|
}
|
|
5599
5649
|
async loadEvents(workflowSlug, runId, options) {
|
|
5600
5650
|
if (options?.category === "workflow") {
|
|
5601
|
-
return loadEvents(
|
|
5651
|
+
return this.db.loadEvents("workflow_events", {
|
|
5602
5652
|
workflowSlug,
|
|
5603
5653
|
runId
|
|
5604
5654
|
});
|
|
5605
5655
|
} else if (options?.category === "step") {
|
|
5606
|
-
return loadEvents(
|
|
5656
|
+
return this.db.loadEvents("step_events", {
|
|
5607
5657
|
workflowSlug,
|
|
5608
5658
|
runId,
|
|
5609
5659
|
stepId: options.stepId
|
|
5610
5660
|
});
|
|
5611
5661
|
} else {
|
|
5612
|
-
return loadAllRunEvents(
|
|
5662
|
+
return this.db.loadAllRunEvents(workflowSlug, runId);
|
|
5613
5663
|
}
|
|
5614
5664
|
}
|
|
5615
5665
|
async saveStepScheduled(workflowSlug, runId, stepId, metadata) {
|
|
@@ -5627,7 +5677,7 @@ class PostgresBackend extends Backend {
|
|
|
5627
5677
|
attemptNumber: metadata.attemptNumber,
|
|
5628
5678
|
retryDelayMs: metadata.retryDelayMs
|
|
5629
5679
|
};
|
|
5630
|
-
await appendEvent(
|
|
5680
|
+
await this.db.appendEvent("step_events", event);
|
|
5631
5681
|
}
|
|
5632
5682
|
async saveStepStart(workflowSlug, runId, stepId, workerId, metadata) {
|
|
5633
5683
|
const events = await this.loadEvents(workflowSlug, runId, { category: "step", stepId });
|
|
@@ -5645,7 +5695,7 @@ class PostgresBackend extends Backend {
|
|
|
5645
5695
|
workerId,
|
|
5646
5696
|
dependencies: metadata.dependencies
|
|
5647
5697
|
};
|
|
5648
|
-
await appendEvent(
|
|
5698
|
+
await this.db.appendEvent("step_events", event);
|
|
5649
5699
|
}
|
|
5650
5700
|
async saveStepComplete(workflowSlug, runId, stepId, output, metadata, exportOutput = false) {
|
|
5651
5701
|
const events = await this.loadEvents(workflowSlug, runId, { category: "step", stepId });
|
|
@@ -5668,7 +5718,7 @@ class PostgresBackend extends Backend {
|
|
|
5668
5718
|
message: log.message,
|
|
5669
5719
|
attemptNumber
|
|
5670
5720
|
};
|
|
5671
|
-
await appendEvent(
|
|
5721
|
+
await this.db.appendEvent("step_events", logEvent);
|
|
5672
5722
|
}
|
|
5673
5723
|
}
|
|
5674
5724
|
const serialized = safeSerialize(output);
|
|
@@ -5687,7 +5737,7 @@ class PostgresBackend extends Backend {
|
|
|
5687
5737
|
attemptNumber,
|
|
5688
5738
|
exportOutput
|
|
5689
5739
|
};
|
|
5690
|
-
await appendEvent(
|
|
5740
|
+
await this.db.appendEvent("step_events", event);
|
|
5691
5741
|
}
|
|
5692
5742
|
async saveStepFailed(workflowSlug, runId, stepId, error, metadata) {
|
|
5693
5743
|
const now = getMicrosecondTimestamp();
|
|
@@ -5707,10 +5757,10 @@ class PostgresBackend extends Backend {
|
|
|
5707
5757
|
nextRetryAtUs: metadata.nextRetryAt,
|
|
5708
5758
|
failureReason: metadata.failureReason
|
|
5709
5759
|
};
|
|
5710
|
-
await appendEvent(
|
|
5760
|
+
await this.db.appendEvent("step_events", event);
|
|
5711
5761
|
}
|
|
5712
5762
|
async saveStepFailedAndScheduleRetry(workflowSlug, runId, stepId, error, failureMetadata, scheduleMetadata) {
|
|
5713
|
-
const client = await this.
|
|
5763
|
+
const client = await this.db.getPool().connect();
|
|
5714
5764
|
try {
|
|
5715
5765
|
await client.query("BEGIN");
|
|
5716
5766
|
const failedTimestamp = getMicrosecondTimestamp();
|
|
@@ -5732,7 +5782,7 @@ class PostgresBackend extends Backend {
|
|
|
5732
5782
|
nextRetryAtUs: failureMetadata.nextRetryAt,
|
|
5733
5783
|
failureReason: failureMetadata.failureReason
|
|
5734
5784
|
};
|
|
5735
|
-
await appendEventWithClient(client, "step_events", failedEvent);
|
|
5785
|
+
await this.db.appendEventWithClient(client, "step_events", failedEvent);
|
|
5736
5786
|
const retryingEvent = {
|
|
5737
5787
|
category: "step",
|
|
5738
5788
|
type: "StepRetrying",
|
|
@@ -5746,7 +5796,7 @@ class PostgresBackend extends Backend {
|
|
|
5746
5796
|
maxRetries: scheduleMetadata.maxRetries,
|
|
5747
5797
|
error
|
|
5748
5798
|
};
|
|
5749
|
-
await appendEventWithClient(client, "step_events", retryingEvent);
|
|
5799
|
+
await this.db.appendEventWithClient(client, "step_events", retryingEvent);
|
|
5750
5800
|
const scheduledEvent = {
|
|
5751
5801
|
category: "step",
|
|
5752
5802
|
type: "StepScheduled",
|
|
@@ -5760,7 +5810,7 @@ class PostgresBackend extends Backend {
|
|
|
5760
5810
|
attemptNumber: scheduleMetadata.nextAttemptNumber,
|
|
5761
5811
|
retryDelayMs: scheduleMetadata.retryDelayMs
|
|
5762
5812
|
};
|
|
5763
|
-
await appendEventWithClient(client, "step_events", scheduledEvent);
|
|
5813
|
+
await this.db.appendEventWithClient(client, "step_events", scheduledEvent);
|
|
5764
5814
|
await client.query("COMMIT");
|
|
5765
5815
|
} catch (error2) {
|
|
5766
5816
|
await client.query("ROLLBACK");
|
|
@@ -5786,7 +5836,7 @@ class PostgresBackend extends Backend {
|
|
|
5786
5836
|
attemptNumber: metadata.attemptNumber,
|
|
5787
5837
|
cascadedFrom: metadata.cascadedFrom
|
|
5788
5838
|
};
|
|
5789
|
-
await appendEvent(
|
|
5839
|
+
await this.db.appendEvent("step_events", event);
|
|
5790
5840
|
}
|
|
5791
5841
|
async saveStepHeartbeat(workflowSlug, runId, stepId, workerId, attemptNumber) {
|
|
5792
5842
|
const now = getMicrosecondTimestamp();
|
|
@@ -5801,7 +5851,7 @@ class PostgresBackend extends Backend {
|
|
|
5801
5851
|
workerId,
|
|
5802
5852
|
attemptNumber
|
|
5803
5853
|
};
|
|
5804
|
-
await appendEvent(
|
|
5854
|
+
await this.db.appendEvent("step_events", event);
|
|
5805
5855
|
}
|
|
5806
5856
|
async saveStepReclaimed(workflowSlug, runId, stepId, metadata) {
|
|
5807
5857
|
const now = getMicrosecondTimestamp();
|
|
@@ -5820,7 +5870,7 @@ class PostgresBackend extends Backend {
|
|
|
5820
5870
|
staleDurationUs: metadata.staleDuration,
|
|
5821
5871
|
attemptNumber: metadata.attemptNumber
|
|
5822
5872
|
};
|
|
5823
|
-
await appendEvent(
|
|
5873
|
+
await this.db.appendEvent("step_events", event);
|
|
5824
5874
|
}
|
|
5825
5875
|
async saveStepLogs(workflowSlug, runId, stepId, logs) {}
|
|
5826
5876
|
async loadStepLogs(workflowSlug, runId, stepId, attemptNumber) {
|
|
@@ -5844,7 +5894,7 @@ class PostgresBackend extends Backend {
|
|
|
5844
5894
|
hasInputSchema: metadata.hasInputSchema,
|
|
5845
5895
|
hasInput: metadata.hasInput
|
|
5846
5896
|
};
|
|
5847
|
-
await appendEvent(
|
|
5897
|
+
await this.db.appendEvent("workflow_events", event);
|
|
5848
5898
|
}
|
|
5849
5899
|
async saveWorkflowInputValidation(workflowSlug, runId, result) {
|
|
5850
5900
|
const timestamp = getMicrosecondTimestamp();
|
|
@@ -5857,7 +5907,7 @@ class PostgresBackend extends Backend {
|
|
|
5857
5907
|
runId,
|
|
5858
5908
|
...result
|
|
5859
5909
|
};
|
|
5860
|
-
await appendEvent(
|
|
5910
|
+
await this.db.appendEvent("workflow_events", event);
|
|
5861
5911
|
}
|
|
5862
5912
|
async saveWorkflowComplete(workflowSlug, runId, output, metadata) {
|
|
5863
5913
|
const timestamp = getMicrosecondTimestamp();
|
|
@@ -5873,7 +5923,7 @@ class PostgresBackend extends Backend {
|
|
|
5873
5923
|
durationUs: metadata.duration,
|
|
5874
5924
|
totalSteps: metadata.totalSteps
|
|
5875
5925
|
};
|
|
5876
|
-
await appendEvent(
|
|
5926
|
+
await this.db.appendEvent("workflow_events", event);
|
|
5877
5927
|
}
|
|
5878
5928
|
async saveWorkflowFailed(workflowSlug, runId, error, metadata, failureReason) {
|
|
5879
5929
|
const timestamp = getMicrosecondTimestamp();
|
|
@@ -5891,7 +5941,7 @@ class PostgresBackend extends Backend {
|
|
|
5891
5941
|
failedStep: metadata.failedStep,
|
|
5892
5942
|
failureReason
|
|
5893
5943
|
};
|
|
5894
|
-
await appendEvent(
|
|
5944
|
+
await this.db.appendEvent("workflow_events", event);
|
|
5895
5945
|
}
|
|
5896
5946
|
async saveWorkflowResumed(workflowSlug, runId, metadata) {
|
|
5897
5947
|
const timestamp = getMicrosecondTimestamp();
|
|
@@ -5906,7 +5956,7 @@ class PostgresBackend extends Backend {
|
|
|
5906
5956
|
resumedSteps: metadata.resumedSteps,
|
|
5907
5957
|
pendingSteps: metadata.pendingSteps
|
|
5908
5958
|
};
|
|
5909
|
-
await appendEvent(
|
|
5959
|
+
await this.db.appendEvent("workflow_events", event);
|
|
5910
5960
|
}
|
|
5911
5961
|
async saveWorkflowCancelled(workflowSlug, runId, metadata) {
|
|
5912
5962
|
const timestamp = getMicrosecondTimestamp();
|
|
@@ -5922,7 +5972,7 @@ class PostgresBackend extends Backend {
|
|
|
5922
5972
|
durationUs: metadata.duration,
|
|
5923
5973
|
completedSteps: metadata.completedSteps
|
|
5924
5974
|
};
|
|
5925
|
-
await appendEvent(
|
|
5975
|
+
await this.db.appendEvent("workflow_events", event);
|
|
5926
5976
|
}
|
|
5927
5977
|
async saveWorkflowRetryStarted(workflowSlug, runId, metadata) {
|
|
5928
5978
|
const timestamp = getMicrosecondTimestamp();
|
|
@@ -5938,7 +5988,7 @@ class PostgresBackend extends Backend {
|
|
|
5938
5988
|
retriedSteps: metadata.retriedSteps,
|
|
5939
5989
|
reason: metadata.reason
|
|
5940
5990
|
};
|
|
5941
|
-
await appendEvent(
|
|
5991
|
+
await this.db.appendEvent("workflow_events", event);
|
|
5942
5992
|
}
|
|
5943
5993
|
async saveRunSubmitted(workflowSlug, runId, metadata) {
|
|
5944
5994
|
const timestamp = getMicrosecondTimestamp();
|
|
@@ -5958,13 +6008,13 @@ class PostgresBackend extends Backend {
|
|
|
5958
6008
|
metadata: metadata.metadata,
|
|
5959
6009
|
tags: metadata.tags
|
|
5960
6010
|
};
|
|
5961
|
-
await appendEvent(
|
|
6011
|
+
await this.db.appendEvent("workflow_events", event);
|
|
5962
6012
|
}
|
|
5963
6013
|
async submitRun(submission) {
|
|
5964
6014
|
if (submission.idempotencyKey) {
|
|
5965
6015
|
const hash = this.hashIdempotencyKey(submission.idempotencyKey);
|
|
5966
6016
|
const proposedRunId = submission.runId || this.generateRunId();
|
|
5967
|
-
const existingRunId = await saveIdempotencyKey(
|
|
6017
|
+
const existingRunId = await this.db.saveIdempotencyKey(hash, proposedRunId);
|
|
5968
6018
|
if (existingRunId !== proposedRunId) {
|
|
5969
6019
|
return { runId: existingRunId, isNew: false };
|
|
5970
6020
|
}
|
|
@@ -5988,12 +6038,12 @@ class PostgresBackend extends Backend {
|
|
|
5988
6038
|
}
|
|
5989
6039
|
async listRuns(options) {
|
|
5990
6040
|
const allRuns = [];
|
|
5991
|
-
const workflows = options?.workflowSlug ? [options.workflowSlug] : await listActiveWorkflows(
|
|
6041
|
+
const workflows = options?.workflowSlug ? [options.workflowSlug] : await this.db.listActiveWorkflows();
|
|
5992
6042
|
for (const workflowSlug of workflows) {
|
|
5993
|
-
const runIds = await listRunIds(
|
|
6043
|
+
const runIds = await this.db.listRunIds(workflowSlug);
|
|
5994
6044
|
for (const runId of runIds) {
|
|
5995
6045
|
try {
|
|
5996
|
-
const events = await loadAllRunEvents(
|
|
6046
|
+
const events = await this.db.loadAllRunEvents(workflowSlug, runId);
|
|
5997
6047
|
if (events.length === 0)
|
|
5998
6048
|
continue;
|
|
5999
6049
|
const workflowEvents = events.filter((e) => e.category === "workflow");
|
|
@@ -6018,9 +6068,9 @@ class PostgresBackend extends Backend {
|
|
|
6018
6068
|
return options?.limit ? allRuns.slice(0, options.limit) : allRuns;
|
|
6019
6069
|
}
|
|
6020
6070
|
async cancelRun(runId, reason) {
|
|
6021
|
-
const allWorkflows = await listActiveWorkflows(
|
|
6071
|
+
const allWorkflows = await this.db.listActiveWorkflows();
|
|
6022
6072
|
for (const workflowSlug of allWorkflows) {
|
|
6023
|
-
const runIds = await listRunIds(
|
|
6073
|
+
const runIds = await this.db.listRunIds(workflowSlug);
|
|
6024
6074
|
if (runIds.includes(runId)) {
|
|
6025
6075
|
const events = await this.loadEvents(workflowSlug, runId, { category: "workflow" });
|
|
6026
6076
|
if (events.length === 0)
|
|
@@ -6041,11 +6091,11 @@ class PostgresBackend extends Backend {
|
|
|
6041
6091
|
throw new Error(`Run ${runId} not found`);
|
|
6042
6092
|
}
|
|
6043
6093
|
async getRun(runId) {
|
|
6044
|
-
const allWorkflows = await listActiveWorkflows(
|
|
6094
|
+
const allWorkflows = await this.db.listActiveWorkflows();
|
|
6045
6095
|
for (const workflowSlug of allWorkflows) {
|
|
6046
|
-
const runIds = await listRunIds(
|
|
6096
|
+
const runIds = await this.db.listRunIds(workflowSlug);
|
|
6047
6097
|
if (runIds.includes(runId)) {
|
|
6048
|
-
const events = await loadAllRunEvents(
|
|
6098
|
+
const events = await this.db.loadAllRunEvents(workflowSlug, runId);
|
|
6049
6099
|
const workflowEvents = events.filter((e) => e.category === "workflow");
|
|
6050
6100
|
return projectRunStateFromEvents(workflowEvents, workflowSlug);
|
|
6051
6101
|
}
|
|
@@ -6079,17 +6129,17 @@ class PostgresBackend extends Backend {
|
|
|
6079
6129
|
return failedSteps;
|
|
6080
6130
|
}
|
|
6081
6131
|
async listActiveWorkflows() {
|
|
6082
|
-
return listActiveWorkflows(
|
|
6132
|
+
return this.db.listActiveWorkflows();
|
|
6083
6133
|
}
|
|
6084
6134
|
async listScheduledSteps(options) {
|
|
6085
6135
|
const dbOptions = {
|
|
6086
6136
|
workflowSlugs: options?.workflowSlug ? [options.workflowSlug] : undefined,
|
|
6087
6137
|
limit: options?.limit
|
|
6088
6138
|
};
|
|
6089
|
-
return listScheduledSteps(
|
|
6139
|
+
return this.db.listScheduledSteps(dbOptions);
|
|
6090
6140
|
}
|
|
6091
6141
|
async isStepClaimable(workflowSlug, runId, stepId) {
|
|
6092
|
-
const events = await loadEvents(
|
|
6142
|
+
const events = await this.db.loadEvents("step_events", {
|
|
6093
6143
|
workflowSlug,
|
|
6094
6144
|
runId,
|
|
6095
6145
|
stepId
|
|
@@ -6124,13 +6174,13 @@ class PostgresBackend extends Backend {
|
|
|
6124
6174
|
dependencies: metadata.dependencies,
|
|
6125
6175
|
attemptNumber
|
|
6126
6176
|
};
|
|
6127
|
-
const claimed = await claimScheduledStep(
|
|
6177
|
+
const claimed = await this.db.claimScheduledStep(workflowSlug, runId, stepId, workerId, event);
|
|
6128
6178
|
return claimed ? { attemptNumber } : null;
|
|
6129
6179
|
}
|
|
6130
6180
|
async reclaimStaleSteps(staleThreshold, reclaimedBy) {
|
|
6131
6181
|
const reclaimed = [];
|
|
6132
6182
|
const now = getMicrosecondTimestamp();
|
|
6133
|
-
const staleSteps = await findStaleSteps(
|
|
6183
|
+
const staleSteps = await this.db.findStaleSteps(staleThreshold);
|
|
6134
6184
|
for (const step of staleSteps) {
|
|
6135
6185
|
const events = await this.loadEvents(step.workflowSlug, step.runId, { category: "step", stepId: step.stepId });
|
|
6136
6186
|
if (events.length === 0)
|
|
@@ -6161,25 +6211,25 @@ class PostgresBackend extends Backend {
|
|
|
6161
6211
|
return reclaimed;
|
|
6162
6212
|
}
|
|
6163
6213
|
async registerWorkflow(registration) {
|
|
6164
|
-
await upsertWorkflowMetadata(
|
|
6214
|
+
await this.db.upsertWorkflowMetadata(registration.slug, registration.name, registration.location, registration.inputSchemaJSON);
|
|
6165
6215
|
for (const step of registration.steps) {
|
|
6166
|
-
await upsertStepDefinition(
|
|
6216
|
+
await this.db.upsertStepDefinition(registration.slug, step);
|
|
6167
6217
|
}
|
|
6168
6218
|
}
|
|
6169
6219
|
async getWorkflowMetadata(slug) {
|
|
6170
|
-
return getWorkflowMetadata(
|
|
6220
|
+
return this.db.getWorkflowMetadata(slug);
|
|
6171
6221
|
}
|
|
6172
6222
|
async listWorkflowMetadata() {
|
|
6173
|
-
return listWorkflowMetadata(
|
|
6223
|
+
return this.db.listWorkflowMetadata();
|
|
6174
6224
|
}
|
|
6175
6225
|
async getWorkflowSteps(slug) {
|
|
6176
|
-
return getWorkflowSteps(
|
|
6226
|
+
return this.db.getWorkflowSteps(slug);
|
|
6177
6227
|
}
|
|
6178
6228
|
async listRunIds(workflowSlug) {
|
|
6179
|
-
return listRunIds(
|
|
6229
|
+
return this.db.listRunIds(workflowSlug);
|
|
6180
6230
|
}
|
|
6181
6231
|
async close() {
|
|
6182
|
-
await this.
|
|
6232
|
+
await this.db.getPool().end();
|
|
6183
6233
|
}
|
|
6184
6234
|
async loadEventsForAnalytics(options) {
|
|
6185
6235
|
const now = getMicrosecondTimestamp();
|
|
@@ -6187,7 +6237,7 @@ class PostgresBackend extends Backend {
|
|
|
6187
6237
|
const endUs = options?.endUs ?? now;
|
|
6188
6238
|
let stepQuery = `
|
|
6189
6239
|
SELECT event_data
|
|
6190
|
-
FROM step_events
|
|
6240
|
+
FROM ${this.db.getSchema()}.step_events
|
|
6191
6241
|
WHERE timestamp_us >= $1 AND timestamp_us <= $2
|
|
6192
6242
|
`;
|
|
6193
6243
|
const stepParams = [startUs, endUs];
|
|
@@ -6211,7 +6261,7 @@ class PostgresBackend extends Backend {
|
|
|
6211
6261
|
stepQuery += ` ORDER BY timestamp_us ASC`;
|
|
6212
6262
|
let workflowQuery = `
|
|
6213
6263
|
SELECT event_data
|
|
6214
|
-
FROM workflow_events
|
|
6264
|
+
FROM ${this.db.getSchema()}.workflow_events
|
|
6215
6265
|
WHERE timestamp_us >= $1 AND timestamp_us <= $2
|
|
6216
6266
|
`;
|
|
6217
6267
|
const workflowParams = [startUs, endUs];
|
|
@@ -6229,8 +6279,8 @@ class PostgresBackend extends Backend {
|
|
|
6229
6279
|
}
|
|
6230
6280
|
workflowQuery += ` ORDER BY timestamp_us ASC`;
|
|
6231
6281
|
const [stepResult, workflowResult] = await Promise.all([
|
|
6232
|
-
this.
|
|
6233
|
-
options?.stepId ? Promise.resolve({ rows: [] }) : this.
|
|
6282
|
+
this.db.getPool().query(stepQuery, stepParams),
|
|
6283
|
+
options?.stepId ? Promise.resolve({ rows: [] }) : this.db.getPool().query(workflowQuery, workflowParams)
|
|
6234
6284
|
]);
|
|
6235
6285
|
const stepEvents = stepResult.rows.map((row) => row.event_data);
|
|
6236
6286
|
const workflowEvents = workflowResult.rows.map((row) => row.event_data);
|
|
@@ -6264,10 +6314,10 @@ class PostgresBackend extends Backend {
|
|
|
6264
6314
|
const whereClause = conditions.length > 0 ? `WHERE ${conditions.join(" AND ")}` : "";
|
|
6265
6315
|
const countQuery = `
|
|
6266
6316
|
SELECT COUNT(DISTINCT CONCAT(error_name_hash, ':', error_message_hash, ':', ${stackHashColumn})) as total
|
|
6267
|
-
FROM step_events
|
|
6317
|
+
FROM ${this.db.getSchema()}.step_events
|
|
6268
6318
|
${whereClause}
|
|
6269
6319
|
`;
|
|
6270
|
-
const countResult = await this.
|
|
6320
|
+
const countResult = await this.db.getPool().query(countQuery, params);
|
|
6271
6321
|
const total = parseInt(countResult.rows[0]?.total || "0", 10);
|
|
6272
6322
|
const query = `
|
|
6273
6323
|
SELECT
|
|
@@ -6279,7 +6329,7 @@ class PostgresBackend extends Backend {
|
|
|
6279
6329
|
COUNT(DISTINCT run_id) as affected_runs,
|
|
6280
6330
|
MIN(timestamp_us) as first_seen,
|
|
6281
6331
|
MAX(timestamp_us) as last_seen
|
|
6282
|
-
FROM step_events
|
|
6332
|
+
FROM ${this.db.getSchema()}.step_events
|
|
6283
6333
|
${whereClause}
|
|
6284
6334
|
GROUP BY
|
|
6285
6335
|
error_name_hash,
|
|
@@ -6288,7 +6338,7 @@ class PostgresBackend extends Backend {
|
|
|
6288
6338
|
ORDER BY count DESC
|
|
6289
6339
|
LIMIT $${paramIndex} OFFSET $${paramIndex + 1}
|
|
6290
6340
|
`;
|
|
6291
|
-
const result = await this.
|
|
6341
|
+
const result = await this.db.getPool().query(query, [...params, limit, offset]);
|
|
6292
6342
|
const errors = result.rows.map((row) => ({
|
|
6293
6343
|
fingerprint: row.fingerprint,
|
|
6294
6344
|
errorMessage: row.error_message || "",
|
|
@@ -6336,10 +6386,10 @@ class PostgresBackend extends Backend {
|
|
|
6336
6386
|
COUNT(DISTINCT run_id) as affected_runs,
|
|
6337
6387
|
MIN(timestamp_us) as first_seen,
|
|
6338
6388
|
MAX(timestamp_us) as last_seen
|
|
6339
|
-
FROM step_events
|
|
6389
|
+
FROM ${this.db.getSchema()}.step_events
|
|
6340
6390
|
${whereClause}
|
|
6341
6391
|
`;
|
|
6342
|
-
const statsResult = await this.
|
|
6392
|
+
const statsResult = await this.db.getPool().query(statsQuery, params.slice(0, paramIndex - 1));
|
|
6343
6393
|
if (statsResult.rows.length === 0) {
|
|
6344
6394
|
return {
|
|
6345
6395
|
fingerprint,
|
|
@@ -6362,12 +6412,12 @@ class PostgresBackend extends Backend {
|
|
|
6362
6412
|
step_id,
|
|
6363
6413
|
(event_data->>'attemptNumber')::int as attempt_number,
|
|
6364
6414
|
timestamp_us
|
|
6365
|
-
FROM step_events
|
|
6415
|
+
FROM ${this.db.getSchema()}.step_events
|
|
6366
6416
|
${whereClause}
|
|
6367
6417
|
ORDER BY timestamp_us DESC
|
|
6368
6418
|
LIMIT $${paramIndex} OFFSET $${paramIndex + 1}
|
|
6369
6419
|
`;
|
|
6370
|
-
const occurrencesResult = await this.
|
|
6420
|
+
const occurrencesResult = await this.db.getPool().query(occurrencesQuery, [
|
|
6371
6421
|
...params.slice(0, paramIndex - 1),
|
|
6372
6422
|
limit,
|
|
6373
6423
|
offset
|
|
@@ -6580,4 +6630,4 @@ export {
|
|
|
6580
6630
|
PostgresBackend
|
|
6581
6631
|
};
|
|
6582
6632
|
|
|
6583
|
-
//# debugId=
|
|
6633
|
+
//# debugId=FFCFF67786D53C3B64756E2164756E21
|