@cascade-flow/backend-postgres 0.1.0 → 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/db.d.ts +112 -96
- package/dist/db.d.ts.map +1 -1
- package/dist/index.d.ts +12 -3
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +654 -463
- package/dist/index.js.map +5 -5
- package/dist/migrations.d.ts +1 -1
- package/dist/migrations.d.ts.map +1 -1
- package/package.json +1 -1
package/dist/index.js
CHANGED
|
@@ -4773,12 +4773,117 @@ var esm_default = import_lib.default;
|
|
|
4773
4773
|
|
|
4774
4774
|
// src/db.ts
|
|
4775
4775
|
var { Pool: Pool2 } = esm_default;
|
|
4776
|
-
|
|
4777
|
-
|
|
4778
|
-
|
|
4779
|
-
|
|
4780
|
-
|
|
4781
|
-
|
|
4776
|
+
|
|
4777
|
+
class DatabaseClient {
|
|
4778
|
+
pool;
|
|
4779
|
+
schema;
|
|
4780
|
+
constructor(pool, schema) {
|
|
4781
|
+
this.pool = pool;
|
|
4782
|
+
this.schema = schema;
|
|
4783
|
+
}
|
|
4784
|
+
getPool() {
|
|
4785
|
+
return this.pool;
|
|
4786
|
+
}
|
|
4787
|
+
getSchema() {
|
|
4788
|
+
return this.schema;
|
|
4789
|
+
}
|
|
4790
|
+
async appendEvent(table, event) {
|
|
4791
|
+
const client = await this.pool.connect();
|
|
4792
|
+
try {
|
|
4793
|
+
if (table === "workflow_events") {
|
|
4794
|
+
const we = event;
|
|
4795
|
+
let workflowAttemptNumber = null;
|
|
4796
|
+
let availableAtUs = null;
|
|
4797
|
+
let priority = null;
|
|
4798
|
+
let timeoutUs = null;
|
|
4799
|
+
let idempotencyKey = null;
|
|
4800
|
+
if (we.type === "RunSubmitted") {
|
|
4801
|
+
availableAtUs = we.availableAtUs;
|
|
4802
|
+
priority = we.priority;
|
|
4803
|
+
timeoutUs = we.timeoutUs ?? null;
|
|
4804
|
+
idempotencyKey = we.idempotencyKey ?? null;
|
|
4805
|
+
} else if ("workflowAttemptNumber" in we) {
|
|
4806
|
+
workflowAttemptNumber = we.workflowAttemptNumber;
|
|
4807
|
+
}
|
|
4808
|
+
await client.query(`INSERT INTO ${this.schema}.workflow_events (
|
|
4809
|
+
event_id, workflow_slug, run_id, timestamp_us, category, type, event_data,
|
|
4810
|
+
workflow_attempt_number, available_at_us, priority, timeout_us, idempotency_key
|
|
4811
|
+
)
|
|
4812
|
+
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12)`, [
|
|
4813
|
+
we.eventId,
|
|
4814
|
+
we.workflowSlug,
|
|
4815
|
+
we.runId,
|
|
4816
|
+
we.timestampUs,
|
|
4817
|
+
we.category,
|
|
4818
|
+
we.type,
|
|
4819
|
+
JSON.stringify(event),
|
|
4820
|
+
workflowAttemptNumber,
|
|
4821
|
+
availableAtUs,
|
|
4822
|
+
priority,
|
|
4823
|
+
timeoutUs,
|
|
4824
|
+
idempotencyKey
|
|
4825
|
+
]);
|
|
4826
|
+
} else {
|
|
4827
|
+
const se = event;
|
|
4828
|
+
let workerId = null;
|
|
4829
|
+
let attemptNumber = null;
|
|
4830
|
+
let availableAtUs = null;
|
|
4831
|
+
let exportOutput = null;
|
|
4832
|
+
let errorNameHash = "";
|
|
4833
|
+
let errorMessageHash = "";
|
|
4834
|
+
let errorStackExactHash = "";
|
|
4835
|
+
let errorStackNormalizedHash = "";
|
|
4836
|
+
let errorStackPortableHash = "";
|
|
4837
|
+
if (se.type === "StepStarted" || se.type === "StepHeartbeat") {
|
|
4838
|
+
workerId = se.workerId;
|
|
4839
|
+
}
|
|
4840
|
+
if ("attemptNumber" in se) {
|
|
4841
|
+
attemptNumber = se.attemptNumber;
|
|
4842
|
+
}
|
|
4843
|
+
if (se.type === "StepScheduled") {
|
|
4844
|
+
availableAtUs = se.availableAtUs;
|
|
4845
|
+
}
|
|
4846
|
+
if (se.type === "StepCompleted") {
|
|
4847
|
+
exportOutput = se.exportOutput;
|
|
4848
|
+
}
|
|
4849
|
+
if (se.type === "StepFailed") {
|
|
4850
|
+
errorNameHash = se.errorFingerprints.nameHash;
|
|
4851
|
+
errorMessageHash = se.errorFingerprints.messageHash;
|
|
4852
|
+
errorStackExactHash = se.errorFingerprints.stackExactHash;
|
|
4853
|
+
errorStackNormalizedHash = se.errorFingerprints.stackNormalizedHash;
|
|
4854
|
+
errorStackPortableHash = se.errorFingerprints.stackPortableHash;
|
|
4855
|
+
}
|
|
4856
|
+
await client.query(`INSERT INTO ${this.schema}.step_events (
|
|
4857
|
+
event_id, workflow_slug, run_id, step_id, timestamp_us, category, type, event_data,
|
|
4858
|
+
worker_id, attempt_number, available_at_us, export_output,
|
|
4859
|
+
error_name_hash, error_message_hash, error_stack_exact_hash,
|
|
4860
|
+
error_stack_normalized_hash, error_stack_portable_hash
|
|
4861
|
+
)
|
|
4862
|
+
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17)`, [
|
|
4863
|
+
se.eventId,
|
|
4864
|
+
se.workflowSlug,
|
|
4865
|
+
se.runId,
|
|
4866
|
+
se.stepId,
|
|
4867
|
+
se.timestampUs,
|
|
4868
|
+
se.category,
|
|
4869
|
+
se.type,
|
|
4870
|
+
JSON.stringify(event),
|
|
4871
|
+
workerId,
|
|
4872
|
+
attemptNumber,
|
|
4873
|
+
availableAtUs,
|
|
4874
|
+
exportOutput,
|
|
4875
|
+
errorNameHash,
|
|
4876
|
+
errorMessageHash,
|
|
4877
|
+
errorStackExactHash,
|
|
4878
|
+
errorStackNormalizedHash,
|
|
4879
|
+
errorStackPortableHash
|
|
4880
|
+
]);
|
|
4881
|
+
}
|
|
4882
|
+
} finally {
|
|
4883
|
+
client.release();
|
|
4884
|
+
}
|
|
4885
|
+
}
|
|
4886
|
+
async appendEventWithClient(client, table, event) {
|
|
4782
4887
|
if (table === "workflow_events") {
|
|
4783
4888
|
const we = event;
|
|
4784
4889
|
let workflowAttemptNumber = null;
|
|
@@ -4794,7 +4899,7 @@ async function appendEvent(pool, table, event) {
|
|
|
4794
4899
|
} else if ("workflowAttemptNumber" in we) {
|
|
4795
4900
|
workflowAttemptNumber = we.workflowAttemptNumber;
|
|
4796
4901
|
}
|
|
4797
|
-
await client.query(`INSERT INTO workflow_events (
|
|
4902
|
+
await client.query(`INSERT INTO ${this.schema}.workflow_events (
|
|
4798
4903
|
event_id, workflow_slug, run_id, timestamp_us, category, type, event_data,
|
|
4799
4904
|
workflow_attempt_number, available_at_us, priority, timeout_us, idempotency_key
|
|
4800
4905
|
)
|
|
@@ -4842,7 +4947,7 @@ async function appendEvent(pool, table, event) {
|
|
|
4842
4947
|
errorStackNormalizedHash = se.errorFingerprints.stackNormalizedHash;
|
|
4843
4948
|
errorStackPortableHash = se.errorFingerprints.stackPortableHash;
|
|
4844
4949
|
}
|
|
4845
|
-
await client.query(`INSERT INTO step_events (
|
|
4950
|
+
await client.query(`INSERT INTO ${this.schema}.step_events (
|
|
4846
4951
|
event_id, workflow_slug, run_id, step_id, timestamp_us, category, type, event_data,
|
|
4847
4952
|
worker_id, attempt_number, available_at_us, export_output,
|
|
4848
4953
|
error_name_hash, error_message_hash, error_stack_exact_hash,
|
|
@@ -4868,239 +4973,145 @@ async function appendEvent(pool, table, event) {
|
|
|
4868
4973
|
errorStackPortableHash
|
|
4869
4974
|
]);
|
|
4870
4975
|
}
|
|
4871
|
-
} finally {
|
|
4872
|
-
client.release();
|
|
4873
4976
|
}
|
|
4874
|
-
|
|
4875
|
-
|
|
4876
|
-
|
|
4877
|
-
|
|
4878
|
-
|
|
4879
|
-
|
|
4880
|
-
|
|
4881
|
-
|
|
4882
|
-
|
|
4883
|
-
|
|
4884
|
-
|
|
4885
|
-
|
|
4886
|
-
|
|
4887
|
-
|
|
4888
|
-
|
|
4889
|
-
|
|
4890
|
-
|
|
4891
|
-
|
|
4892
|
-
|
|
4893
|
-
|
|
4894
|
-
|
|
4895
|
-
|
|
4896
|
-
|
|
4897
|
-
|
|
4898
|
-
|
|
4899
|
-
|
|
4900
|
-
|
|
4901
|
-
|
|
4902
|
-
|
|
4903
|
-
workflowAttemptNumber,
|
|
4904
|
-
availableAtUs,
|
|
4905
|
-
priority,
|
|
4906
|
-
timeoutUs,
|
|
4907
|
-
idempotencyKey
|
|
4908
|
-
]);
|
|
4909
|
-
} else {
|
|
4910
|
-
const se = event;
|
|
4911
|
-
let workerId = null;
|
|
4912
|
-
let attemptNumber = null;
|
|
4913
|
-
let availableAtUs = null;
|
|
4914
|
-
let exportOutput = null;
|
|
4915
|
-
let errorNameHash = "";
|
|
4916
|
-
let errorMessageHash = "";
|
|
4917
|
-
let errorStackExactHash = "";
|
|
4918
|
-
let errorStackNormalizedHash = "";
|
|
4919
|
-
let errorStackPortableHash = "";
|
|
4920
|
-
if (se.type === "StepStarted" || se.type === "StepHeartbeat") {
|
|
4921
|
-
workerId = se.workerId;
|
|
4922
|
-
}
|
|
4923
|
-
if ("attemptNumber" in se) {
|
|
4924
|
-
attemptNumber = se.attemptNumber;
|
|
4925
|
-
}
|
|
4926
|
-
if (se.type === "StepScheduled") {
|
|
4927
|
-
availableAtUs = se.availableAtUs;
|
|
4928
|
-
}
|
|
4929
|
-
if (se.type === "StepCompleted") {
|
|
4930
|
-
exportOutput = se.exportOutput;
|
|
4931
|
-
}
|
|
4932
|
-
if (se.type === "StepFailed") {
|
|
4933
|
-
errorNameHash = se.errorFingerprints.nameHash;
|
|
4934
|
-
errorMessageHash = se.errorFingerprints.messageHash;
|
|
4935
|
-
errorStackExactHash = se.errorFingerprints.stackExactHash;
|
|
4936
|
-
errorStackNormalizedHash = se.errorFingerprints.stackNormalizedHash;
|
|
4937
|
-
errorStackPortableHash = se.errorFingerprints.stackPortableHash;
|
|
4938
|
-
}
|
|
4939
|
-
await client.query(`INSERT INTO step_events (
|
|
4940
|
-
event_id, workflow_slug, run_id, step_id, timestamp_us, category, type, event_data,
|
|
4941
|
-
worker_id, attempt_number, available_at_us, export_output,
|
|
4942
|
-
error_name_hash, error_message_hash, error_stack_exact_hash,
|
|
4943
|
-
error_stack_normalized_hash, error_stack_portable_hash
|
|
4944
|
-
)
|
|
4945
|
-
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17)`, [
|
|
4946
|
-
se.eventId,
|
|
4947
|
-
se.workflowSlug,
|
|
4948
|
-
se.runId,
|
|
4949
|
-
se.stepId,
|
|
4950
|
-
se.timestampUs,
|
|
4951
|
-
se.category,
|
|
4952
|
-
se.type,
|
|
4953
|
-
JSON.stringify(event),
|
|
4954
|
-
workerId,
|
|
4955
|
-
attemptNumber,
|
|
4956
|
-
availableAtUs,
|
|
4957
|
-
exportOutput,
|
|
4958
|
-
errorNameHash,
|
|
4959
|
-
errorMessageHash,
|
|
4960
|
-
errorStackExactHash,
|
|
4961
|
-
errorStackNormalizedHash,
|
|
4962
|
-
errorStackPortableHash
|
|
4963
|
-
]);
|
|
4964
|
-
}
|
|
4965
|
-
}
|
|
4966
|
-
async function loadEvents(pool, table, filters) {
|
|
4967
|
-
const client = await pool.connect();
|
|
4968
|
-
try {
|
|
4969
|
-
const conditions = [];
|
|
4970
|
-
const values = [];
|
|
4971
|
-
let paramIndex = 1;
|
|
4972
|
-
if (filters.workflowSlug) {
|
|
4973
|
-
conditions.push(`workflow_slug = $${paramIndex++}`);
|
|
4974
|
-
values.push(filters.workflowSlug);
|
|
4975
|
-
}
|
|
4976
|
-
if (filters.runId) {
|
|
4977
|
-
conditions.push(`run_id = $${paramIndex++}`);
|
|
4978
|
-
values.push(filters.runId);
|
|
4979
|
-
}
|
|
4980
|
-
if (filters.stepId && table === "step_events") {
|
|
4981
|
-
conditions.push(`step_id = $${paramIndex++}`);
|
|
4982
|
-
values.push(filters.stepId);
|
|
4983
|
-
}
|
|
4984
|
-
if (filters.category) {
|
|
4985
|
-
conditions.push(`category = $${paramIndex++}`);
|
|
4986
|
-
values.push(filters.category);
|
|
4987
|
-
}
|
|
4988
|
-
if (filters.types && filters.types.length > 0) {
|
|
4989
|
-
conditions.push(`type = ANY($${paramIndex++})`);
|
|
4990
|
-
values.push(filters.types);
|
|
4991
|
-
}
|
|
4992
|
-
const whereClause = conditions.length > 0 ? `WHERE ${conditions.join(" AND ")}` : "";
|
|
4993
|
-
const query = `
|
|
4994
|
-
SELECT event_data FROM ${table}
|
|
4977
|
+
async loadEvents(table, filters) {
|
|
4978
|
+
const client = await this.pool.connect();
|
|
4979
|
+
try {
|
|
4980
|
+
const conditions = [];
|
|
4981
|
+
const values = [];
|
|
4982
|
+
let paramIndex = 1;
|
|
4983
|
+
if (filters.workflowSlug) {
|
|
4984
|
+
conditions.push(`workflow_slug = $${paramIndex++}`);
|
|
4985
|
+
values.push(filters.workflowSlug);
|
|
4986
|
+
}
|
|
4987
|
+
if (filters.runId) {
|
|
4988
|
+
conditions.push(`run_id = $${paramIndex++}`);
|
|
4989
|
+
values.push(filters.runId);
|
|
4990
|
+
}
|
|
4991
|
+
if (filters.stepId && table === "step_events") {
|
|
4992
|
+
conditions.push(`step_id = $${paramIndex++}`);
|
|
4993
|
+
values.push(filters.stepId);
|
|
4994
|
+
}
|
|
4995
|
+
if (filters.category) {
|
|
4996
|
+
conditions.push(`category = $${paramIndex++}`);
|
|
4997
|
+
values.push(filters.category);
|
|
4998
|
+
}
|
|
4999
|
+
if (filters.types && filters.types.length > 0) {
|
|
5000
|
+
conditions.push(`type = ANY($${paramIndex++})`);
|
|
5001
|
+
values.push(filters.types);
|
|
5002
|
+
}
|
|
5003
|
+
const whereClause = conditions.length > 0 ? `WHERE ${conditions.join(" AND ")}` : "";
|
|
5004
|
+
const query = `
|
|
5005
|
+
SELECT event_data FROM ${this.schema}.${table}
|
|
4995
5006
|
${whereClause}
|
|
4996
5007
|
ORDER BY timestamp_us ASC, event_id ASC
|
|
4997
5008
|
`;
|
|
4998
|
-
|
|
4999
|
-
|
|
5000
|
-
|
|
5001
|
-
|
|
5009
|
+
const result = await client.query(query, values);
|
|
5010
|
+
return result.rows.map((row) => row.event_data);
|
|
5011
|
+
} finally {
|
|
5012
|
+
client.release();
|
|
5013
|
+
}
|
|
5002
5014
|
}
|
|
5003
|
-
|
|
5004
|
-
|
|
5005
|
-
|
|
5006
|
-
|
|
5007
|
-
|
|
5008
|
-
SELECT event_data, timestamp_us, event_id FROM workflow_events
|
|
5015
|
+
async loadAllRunEvents(workflowSlug, runId) {
|
|
5016
|
+
const client = await this.pool.connect();
|
|
5017
|
+
try {
|
|
5018
|
+
const query = `
|
|
5019
|
+
SELECT event_data, timestamp_us, event_id FROM ${this.schema}.workflow_events
|
|
5009
5020
|
WHERE workflow_slug = $1 AND run_id = $2
|
|
5010
5021
|
UNION ALL
|
|
5011
|
-
SELECT event_data, timestamp_us, event_id FROM step_events
|
|
5022
|
+
SELECT event_data, timestamp_us, event_id FROM ${this.schema}.step_events
|
|
5012
5023
|
WHERE workflow_slug = $1 AND run_id = $2
|
|
5013
5024
|
ORDER BY timestamp_us ASC, event_id ASC
|
|
5014
5025
|
`;
|
|
5015
|
-
|
|
5016
|
-
|
|
5017
|
-
|
|
5018
|
-
|
|
5026
|
+
const result = await client.query(query, [workflowSlug, runId]);
|
|
5027
|
+
return result.rows.map((row) => row.event_data);
|
|
5028
|
+
} finally {
|
|
5029
|
+
client.release();
|
|
5030
|
+
}
|
|
5019
5031
|
}
|
|
5020
|
-
|
|
5021
|
-
|
|
5022
|
-
|
|
5023
|
-
|
|
5024
|
-
|
|
5025
|
-
|
|
5026
|
-
SELECT event_data FROM step_events
|
|
5032
|
+
async claimScheduledStep(workflowSlug, runId, stepId, workerId, eventToWrite) {
|
|
5033
|
+
const client = await this.pool.connect();
|
|
5034
|
+
try {
|
|
5035
|
+
await client.query("BEGIN");
|
|
5036
|
+
const checkQuery = `
|
|
5037
|
+
SELECT event_data FROM ${this.schema}.step_events
|
|
5027
5038
|
WHERE workflow_slug = $1 AND run_id = $2 AND step_id = $3
|
|
5028
5039
|
ORDER BY timestamp_us DESC, event_id DESC
|
|
5029
5040
|
LIMIT 1
|
|
5030
5041
|
FOR UPDATE SKIP LOCKED
|
|
5031
5042
|
`;
|
|
5032
|
-
|
|
5033
|
-
|
|
5034
|
-
|
|
5035
|
-
|
|
5036
|
-
|
|
5037
|
-
|
|
5038
|
-
|
|
5043
|
+
const checkResult = await client.query(checkQuery, [workflowSlug, runId, stepId]);
|
|
5044
|
+
if (checkResult.rows.length === 0) {
|
|
5045
|
+
await client.query("ROLLBACK");
|
|
5046
|
+
return false;
|
|
5047
|
+
}
|
|
5048
|
+
const latestEvent = checkResult.rows[0].event_data;
|
|
5049
|
+
if (latestEvent.type !== "StepScheduled" && latestEvent.type !== "StepReclaimed" && latestEvent.type !== "StepRetrying") {
|
|
5050
|
+
await client.query("ROLLBACK");
|
|
5051
|
+
return false;
|
|
5052
|
+
}
|
|
5053
|
+
let workerId2 = null;
|
|
5054
|
+
let attemptNumber = null;
|
|
5055
|
+
let errorNameHash = "";
|
|
5056
|
+
let errorMessageHash = "";
|
|
5057
|
+
let errorStackExactHash = "";
|
|
5058
|
+
let errorStackNormalizedHash = "";
|
|
5059
|
+
let errorStackPortableHash = "";
|
|
5060
|
+
if (eventToWrite.type === "StepStarted") {
|
|
5061
|
+
workerId2 = eventToWrite.workerId;
|
|
5062
|
+
attemptNumber = eventToWrite.attemptNumber;
|
|
5063
|
+
}
|
|
5064
|
+
if (eventToWrite.type === "StepFailed") {
|
|
5065
|
+
errorNameHash = eventToWrite.errorFingerprints.nameHash;
|
|
5066
|
+
errorMessageHash = eventToWrite.errorFingerprints.messageHash;
|
|
5067
|
+
errorStackExactHash = eventToWrite.errorFingerprints.stackExactHash;
|
|
5068
|
+
errorStackNormalizedHash = eventToWrite.errorFingerprints.stackNormalizedHash;
|
|
5069
|
+
errorStackPortableHash = eventToWrite.errorFingerprints.stackPortableHash;
|
|
5070
|
+
}
|
|
5071
|
+
await client.query(`INSERT INTO ${this.schema}.step_events (
|
|
5072
|
+
event_id, workflow_slug, run_id, step_id, timestamp_us, category, type, event_data,
|
|
5073
|
+
worker_id, attempt_number, available_at_us, export_output,
|
|
5074
|
+
error_name_hash, error_message_hash, error_stack_exact_hash,
|
|
5075
|
+
error_stack_normalized_hash, error_stack_portable_hash
|
|
5076
|
+
)
|
|
5077
|
+
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17)`, [
|
|
5078
|
+
eventToWrite.eventId,
|
|
5079
|
+
eventToWrite.workflowSlug,
|
|
5080
|
+
eventToWrite.runId,
|
|
5081
|
+
eventToWrite.stepId,
|
|
5082
|
+
eventToWrite.timestampUs,
|
|
5083
|
+
eventToWrite.category,
|
|
5084
|
+
eventToWrite.type,
|
|
5085
|
+
JSON.stringify(eventToWrite),
|
|
5086
|
+
workerId2,
|
|
5087
|
+
attemptNumber,
|
|
5088
|
+
null,
|
|
5089
|
+
null,
|
|
5090
|
+
errorNameHash,
|
|
5091
|
+
errorMessageHash,
|
|
5092
|
+
errorStackExactHash,
|
|
5093
|
+
errorStackNormalizedHash,
|
|
5094
|
+
errorStackPortableHash
|
|
5095
|
+
]);
|
|
5096
|
+
await client.query("COMMIT");
|
|
5097
|
+
return true;
|
|
5098
|
+
} catch (error) {
|
|
5039
5099
|
await client.query("ROLLBACK");
|
|
5040
|
-
|
|
5100
|
+
throw error;
|
|
5101
|
+
} finally {
|
|
5102
|
+
client.release();
|
|
5041
5103
|
}
|
|
5042
|
-
let workerId2 = null;
|
|
5043
|
-
let attemptNumber = null;
|
|
5044
|
-
let errorNameHash = "";
|
|
5045
|
-
let errorMessageHash = "";
|
|
5046
|
-
let errorStackExactHash = "";
|
|
5047
|
-
let errorStackNormalizedHash = "";
|
|
5048
|
-
let errorStackPortableHash = "";
|
|
5049
|
-
if (eventToWrite.type === "StepStarted") {
|
|
5050
|
-
workerId2 = eventToWrite.workerId;
|
|
5051
|
-
attemptNumber = eventToWrite.attemptNumber;
|
|
5052
|
-
}
|
|
5053
|
-
if (eventToWrite.type === "StepFailed") {
|
|
5054
|
-
errorNameHash = eventToWrite.errorFingerprints.nameHash;
|
|
5055
|
-
errorMessageHash = eventToWrite.errorFingerprints.messageHash;
|
|
5056
|
-
errorStackExactHash = eventToWrite.errorFingerprints.stackExactHash;
|
|
5057
|
-
errorStackNormalizedHash = eventToWrite.errorFingerprints.stackNormalizedHash;
|
|
5058
|
-
errorStackPortableHash = eventToWrite.errorFingerprints.stackPortableHash;
|
|
5059
|
-
}
|
|
5060
|
-
await client.query(`INSERT INTO step_events (
|
|
5061
|
-
event_id, workflow_slug, run_id, step_id, timestamp_us, category, type, event_data,
|
|
5062
|
-
worker_id, attempt_number, available_at_us, export_output,
|
|
5063
|
-
error_name_hash, error_message_hash, error_stack_exact_hash,
|
|
5064
|
-
error_stack_normalized_hash, error_stack_portable_hash
|
|
5065
|
-
)
|
|
5066
|
-
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17)`, [
|
|
5067
|
-
eventToWrite.eventId,
|
|
5068
|
-
eventToWrite.workflowSlug,
|
|
5069
|
-
eventToWrite.runId,
|
|
5070
|
-
eventToWrite.stepId,
|
|
5071
|
-
eventToWrite.timestampUs,
|
|
5072
|
-
eventToWrite.category,
|
|
5073
|
-
eventToWrite.type,
|
|
5074
|
-
JSON.stringify(eventToWrite),
|
|
5075
|
-
workerId2,
|
|
5076
|
-
attemptNumber,
|
|
5077
|
-
null,
|
|
5078
|
-
null,
|
|
5079
|
-
errorNameHash,
|
|
5080
|
-
errorMessageHash,
|
|
5081
|
-
errorStackExactHash,
|
|
5082
|
-
errorStackNormalizedHash,
|
|
5083
|
-
errorStackPortableHash
|
|
5084
|
-
]);
|
|
5085
|
-
await client.query("COMMIT");
|
|
5086
|
-
return true;
|
|
5087
|
-
} catch (error) {
|
|
5088
|
-
await client.query("ROLLBACK");
|
|
5089
|
-
throw error;
|
|
5090
|
-
} finally {
|
|
5091
|
-
client.release();
|
|
5092
5104
|
}
|
|
5093
|
-
|
|
5094
|
-
|
|
5095
|
-
|
|
5096
|
-
|
|
5097
|
-
|
|
5098
|
-
|
|
5099
|
-
let query = `
|
|
5105
|
+
async listScheduledSteps(options) {
|
|
5106
|
+
const client = await this.pool.connect();
|
|
5107
|
+
try {
|
|
5108
|
+
const currentTimeUs = Date.now() * 1000;
|
|
5109
|
+
const scheduledTypes = ["StepScheduled", "StepReclaimed", "StepRetrying"];
|
|
5110
|
+
let query = `
|
|
5100
5111
|
WITH latest_step_events AS (
|
|
5101
5112
|
SELECT DISTINCT ON (workflow_slug, run_id, step_id)
|
|
5102
5113
|
workflow_slug, run_id, step_id, type, available_at_us
|
|
5103
|
-
FROM step_events
|
|
5114
|
+
FROM ${this.schema}.step_events
|
|
5104
5115
|
${options?.workflowSlugs ? "WHERE workflow_slug = ANY($1)" : ""}
|
|
5105
5116
|
ORDER BY workflow_slug, run_id, step_id, timestamp_us DESC, event_id DESC
|
|
5106
5117
|
)
|
|
@@ -5110,34 +5121,34 @@ async function listScheduledSteps(pool, options) {
|
|
|
5110
5121
|
AND (available_at_us IS NULL OR available_at_us <= $${options?.workflowSlugs ? "3" : "2"})
|
|
5111
5122
|
${options?.limit ? `LIMIT $${options?.workflowSlugs ? "4" : "3"}` : ""}
|
|
5112
5123
|
`;
|
|
5113
|
-
|
|
5114
|
-
|
|
5115
|
-
|
|
5116
|
-
|
|
5117
|
-
|
|
5118
|
-
|
|
5119
|
-
|
|
5120
|
-
|
|
5124
|
+
const params = [];
|
|
5125
|
+
if (options?.workflowSlugs) {
|
|
5126
|
+
params.push(options.workflowSlugs);
|
|
5127
|
+
}
|
|
5128
|
+
params.push(scheduledTypes);
|
|
5129
|
+
params.push(currentTimeUs);
|
|
5130
|
+
if (options?.limit) {
|
|
5131
|
+
params.push(options.limit);
|
|
5132
|
+
}
|
|
5133
|
+
const result = await client.query(query, params);
|
|
5134
|
+
return result.rows.map((row) => ({
|
|
5135
|
+
workflowSlug: row.workflow_slug,
|
|
5136
|
+
runId: row.run_id,
|
|
5137
|
+
stepId: row.step_id
|
|
5138
|
+
}));
|
|
5139
|
+
} finally {
|
|
5140
|
+
client.release();
|
|
5121
5141
|
}
|
|
5122
|
-
const result = await client.query(query, params);
|
|
5123
|
-
return result.rows.map((row) => ({
|
|
5124
|
-
workflowSlug: row.workflow_slug,
|
|
5125
|
-
runId: row.run_id,
|
|
5126
|
-
stepId: row.step_id
|
|
5127
|
-
}));
|
|
5128
|
-
} finally {
|
|
5129
|
-
client.release();
|
|
5130
5142
|
}
|
|
5131
|
-
|
|
5132
|
-
|
|
5133
|
-
|
|
5134
|
-
|
|
5135
|
-
|
|
5136
|
-
const query = `
|
|
5143
|
+
async findStaleSteps(staleThresholdUs) {
|
|
5144
|
+
const client = await this.pool.connect();
|
|
5145
|
+
try {
|
|
5146
|
+
const currentTimeUs = Date.now() * 1000;
|
|
5147
|
+
const query = `
|
|
5137
5148
|
WITH latest_step_events AS (
|
|
5138
5149
|
SELECT DISTINCT ON (workflow_slug, run_id, step_id)
|
|
5139
5150
|
workflow_slug, run_id, step_id, type, timestamp_us, worker_id
|
|
5140
|
-
FROM step_events
|
|
5151
|
+
FROM ${this.schema}.step_events
|
|
5141
5152
|
WHERE type IN ('StepStarted', 'StepHeartbeat')
|
|
5142
5153
|
ORDER BY workflow_slug, run_id, step_id, timestamp_us DESC, event_id DESC
|
|
5143
5154
|
)
|
|
@@ -5145,21 +5156,42 @@ async function findStaleSteps(pool, staleThresholdUs) {
|
|
|
5145
5156
|
FROM latest_step_events
|
|
5146
5157
|
WHERE timestamp_us < $1 AND worker_id IS NOT NULL
|
|
5147
5158
|
`;
|
|
5148
|
-
|
|
5149
|
-
|
|
5150
|
-
|
|
5151
|
-
|
|
5152
|
-
|
|
5153
|
-
|
|
5154
|
-
|
|
5155
|
-
|
|
5156
|
-
|
|
5159
|
+
const result = await client.query(query, [currentTimeUs - staleThresholdUs]);
|
|
5160
|
+
return result.rows.map((row) => ({
|
|
5161
|
+
workflowSlug: row.workflow_slug,
|
|
5162
|
+
runId: row.run_id,
|
|
5163
|
+
stepId: row.step_id,
|
|
5164
|
+
workerId: row.worker_id
|
|
5165
|
+
}));
|
|
5166
|
+
} finally {
|
|
5167
|
+
client.release();
|
|
5168
|
+
}
|
|
5157
5169
|
}
|
|
5158
|
-
|
|
5159
|
-
|
|
5160
|
-
|
|
5161
|
-
|
|
5162
|
-
|
|
5170
|
+
async saveStepOutput(workflowSlug, runId, stepId, attemptNumber, output) {
|
|
5171
|
+
const client = await this.pool.connect();
|
|
5172
|
+
try {
|
|
5173
|
+
await client.query(`INSERT INTO ${this.schema}.step_outputs (workflow_slug, run_id, step_id, attempt_number, output)
|
|
5174
|
+
VALUES ($1, $2, $3, $4, $5)
|
|
5175
|
+
ON CONFLICT (workflow_slug, run_id, step_id, attempt_number)
|
|
5176
|
+
DO UPDATE SET output = EXCLUDED.output`, [workflowSlug, runId, stepId, attemptNumber, JSON.stringify(output)]);
|
|
5177
|
+
} finally {
|
|
5178
|
+
client.release();
|
|
5179
|
+
}
|
|
5180
|
+
}
|
|
5181
|
+
async loadStepOutput(workflowSlug, runId, stepId, attemptNumber) {
|
|
5182
|
+
const client = await this.pool.connect();
|
|
5183
|
+
try {
|
|
5184
|
+
const result = await client.query(`SELECT output FROM ${this.schema}.step_outputs
|
|
5185
|
+
WHERE workflow_slug = $1 AND run_id = $2 AND step_id = $3 AND attempt_number = $4`, [workflowSlug, runId, stepId, attemptNumber]);
|
|
5186
|
+
return result.rows.length > 0 ? result.rows[0].output : null;
|
|
5187
|
+
} finally {
|
|
5188
|
+
client.release();
|
|
5189
|
+
}
|
|
5190
|
+
}
|
|
5191
|
+
async upsertWorkflowMetadata(slug, name, location, inputSchemaJSON) {
|
|
5192
|
+
const client = await this.pool.connect();
|
|
5193
|
+
try {
|
|
5194
|
+
await client.query(`INSERT INTO ${this.schema}.workflow_metadata (slug, name, description, input_schema_json, tags, updated_at)
|
|
5163
5195
|
VALUES ($1, $2, $3, $4, $5, NOW())
|
|
5164
5196
|
ON CONFLICT (slug)
|
|
5165
5197
|
DO UPDATE SET
|
|
@@ -5167,153 +5199,169 @@ async function upsertWorkflowMetadata(pool, slug, name, location, inputSchemaJSO
|
|
|
5167
5199
|
description = EXCLUDED.description,
|
|
5168
5200
|
input_schema_json = EXCLUDED.input_schema_json,
|
|
5169
5201
|
updated_at = NOW()`, [
|
|
5170
|
-
|
|
5171
|
-
|
|
5172
|
-
|
|
5173
|
-
|
|
5174
|
-
|
|
5175
|
-
|
|
5176
|
-
|
|
5177
|
-
|
|
5202
|
+
slug,
|
|
5203
|
+
name,
|
|
5204
|
+
location || null,
|
|
5205
|
+
inputSchemaJSON ? JSON.stringify(inputSchemaJSON) : null,
|
|
5206
|
+
[]
|
|
5207
|
+
]);
|
|
5208
|
+
} finally {
|
|
5209
|
+
client.release();
|
|
5210
|
+
}
|
|
5178
5211
|
}
|
|
5179
|
-
|
|
5180
|
-
|
|
5181
|
-
|
|
5182
|
-
|
|
5183
|
-
|
|
5184
|
-
|
|
5185
|
-
|
|
5186
|
-
)
|
|
5212
|
+
async upsertStepDefinition(workflowSlug, step) {
|
|
5213
|
+
const client = await this.pool.connect();
|
|
5214
|
+
try {
|
|
5215
|
+
await client.query(`INSERT INTO ${this.schema}.step_definitions (
|
|
5216
|
+
workflow_slug, id, dependencies, export_output, input_schema_json,
|
|
5217
|
+
timeout_ms, max_retries, retry_delay_ms
|
|
5218
|
+
)
|
|
5187
5219
|
VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
|
|
5188
5220
|
ON CONFLICT (workflow_slug, id)
|
|
5189
5221
|
DO UPDATE SET
|
|
5190
5222
|
dependencies = EXCLUDED.dependencies,
|
|
5191
5223
|
export_output = EXCLUDED.export_output`, [
|
|
5192
|
-
|
|
5193
|
-
|
|
5194
|
-
|
|
5195
|
-
|
|
5196
|
-
|
|
5197
|
-
|
|
5198
|
-
|
|
5199
|
-
|
|
5200
|
-
|
|
5201
|
-
|
|
5202
|
-
|
|
5224
|
+
workflowSlug,
|
|
5225
|
+
step.id,
|
|
5226
|
+
JSON.stringify(step.dependencies),
|
|
5227
|
+
step.exportOutput,
|
|
5228
|
+
null,
|
|
5229
|
+
null,
|
|
5230
|
+
null,
|
|
5231
|
+
null
|
|
5232
|
+
]);
|
|
5233
|
+
} finally {
|
|
5234
|
+
client.release();
|
|
5235
|
+
}
|
|
5203
5236
|
}
|
|
5204
|
-
|
|
5205
|
-
|
|
5206
|
-
|
|
5207
|
-
|
|
5208
|
-
|
|
5209
|
-
FROM workflow_metadata
|
|
5237
|
+
async getWorkflowMetadata(slug) {
|
|
5238
|
+
const client = await this.pool.connect();
|
|
5239
|
+
try {
|
|
5240
|
+
const result = await client.query(`SELECT slug, name, description, input_schema_json
|
|
5241
|
+
FROM ${this.schema}.workflow_metadata
|
|
5210
5242
|
WHERE slug = $1`, [slug]);
|
|
5211
|
-
|
|
5212
|
-
|
|
5243
|
+
if (result.rows.length === 0) {
|
|
5244
|
+
return null;
|
|
5245
|
+
}
|
|
5246
|
+
const row = result.rows[0];
|
|
5247
|
+
return {
|
|
5248
|
+
slug: row.slug,
|
|
5249
|
+
name: row.name,
|
|
5250
|
+
location: row.description,
|
|
5251
|
+
inputSchemaJSON: row.input_schema_json
|
|
5252
|
+
};
|
|
5253
|
+
} finally {
|
|
5254
|
+
client.release();
|
|
5213
5255
|
}
|
|
5214
|
-
const row = result.rows[0];
|
|
5215
|
-
return {
|
|
5216
|
-
slug: row.slug,
|
|
5217
|
-
name: row.name,
|
|
5218
|
-
location: row.description,
|
|
5219
|
-
inputSchemaJSON: row.input_schema_json
|
|
5220
|
-
};
|
|
5221
|
-
} finally {
|
|
5222
|
-
client.release();
|
|
5223
5256
|
}
|
|
5224
|
-
|
|
5225
|
-
|
|
5226
|
-
|
|
5227
|
-
|
|
5228
|
-
|
|
5229
|
-
FROM workflow_metadata
|
|
5257
|
+
async listWorkflowMetadata() {
|
|
5258
|
+
const client = await this.pool.connect();
|
|
5259
|
+
try {
|
|
5260
|
+
const result = await client.query(`SELECT slug, name, description, input_schema_json
|
|
5261
|
+
FROM ${this.schema}.workflow_metadata
|
|
5230
5262
|
ORDER BY name ASC`);
|
|
5231
|
-
|
|
5232
|
-
|
|
5233
|
-
|
|
5234
|
-
|
|
5235
|
-
|
|
5236
|
-
|
|
5237
|
-
|
|
5238
|
-
|
|
5263
|
+
return result.rows.map((row) => ({
|
|
5264
|
+
slug: row.slug,
|
|
5265
|
+
name: row.name,
|
|
5266
|
+
location: row.description,
|
|
5267
|
+
inputSchemaJSON: row.input_schema_json
|
|
5268
|
+
}));
|
|
5269
|
+
} finally {
|
|
5270
|
+
client.release();
|
|
5271
|
+
}
|
|
5239
5272
|
}
|
|
5240
|
-
|
|
5241
|
-
|
|
5242
|
-
|
|
5243
|
-
|
|
5244
|
-
|
|
5245
|
-
FROM step_definitions
|
|
5273
|
+
async getWorkflowSteps(workflowSlug) {
|
|
5274
|
+
const client = await this.pool.connect();
|
|
5275
|
+
try {
|
|
5276
|
+
const result = await client.query(`SELECT id, dependencies, export_output
|
|
5277
|
+
FROM ${this.schema}.step_definitions
|
|
5246
5278
|
WHERE workflow_slug = $1
|
|
5247
5279
|
ORDER BY id ASC`, [workflowSlug]);
|
|
5248
|
-
|
|
5249
|
-
|
|
5250
|
-
|
|
5251
|
-
|
|
5252
|
-
|
|
5253
|
-
|
|
5254
|
-
|
|
5255
|
-
|
|
5280
|
+
return result.rows.map((row) => ({
|
|
5281
|
+
id: row.id,
|
|
5282
|
+
name: row.id,
|
|
5283
|
+
dependencies: row.dependencies,
|
|
5284
|
+
exportOutput: row.export_output
|
|
5285
|
+
}));
|
|
5286
|
+
} finally {
|
|
5287
|
+
client.release();
|
|
5288
|
+
}
|
|
5256
5289
|
}
|
|
5257
|
-
|
|
5258
|
-
|
|
5259
|
-
|
|
5260
|
-
|
|
5261
|
-
const result = await client.query(`INSERT INTO idempotency_keys (hash, run_id)
|
|
5290
|
+
async saveIdempotencyKey(hash, runId) {
|
|
5291
|
+
const client = await this.pool.connect();
|
|
5292
|
+
try {
|
|
5293
|
+
const result = await client.query(`INSERT INTO ${this.schema}.idempotency_keys (hash, run_id)
|
|
5262
5294
|
VALUES ($1, $2)
|
|
5263
5295
|
ON CONFLICT (hash)
|
|
5264
5296
|
DO UPDATE SET hash = EXCLUDED.hash
|
|
5265
5297
|
RETURNING run_id`, [hash, runId]);
|
|
5266
|
-
|
|
5267
|
-
|
|
5268
|
-
|
|
5298
|
+
return result.rows[0].run_id;
|
|
5299
|
+
} finally {
|
|
5300
|
+
client.release();
|
|
5301
|
+
}
|
|
5269
5302
|
}
|
|
5270
|
-
|
|
5271
|
-
|
|
5272
|
-
|
|
5273
|
-
|
|
5274
|
-
const result = await client.query(`SELECT DISTINCT run_id FROM workflow_events WHERE workflow_slug = $1
|
|
5303
|
+
async listRunIds(workflowSlug) {
|
|
5304
|
+
const client = await this.pool.connect();
|
|
5305
|
+
try {
|
|
5306
|
+
const result = await client.query(`SELECT DISTINCT run_id FROM ${this.schema}.workflow_events WHERE workflow_slug = $1
|
|
5275
5307
|
UNION
|
|
5276
|
-
SELECT DISTINCT run_id FROM step_events WHERE workflow_slug = $1
|
|
5308
|
+
SELECT DISTINCT run_id FROM ${this.schema}.step_events WHERE workflow_slug = $1
|
|
5277
5309
|
ORDER BY run_id DESC`, [workflowSlug]);
|
|
5278
|
-
|
|
5279
|
-
|
|
5280
|
-
|
|
5310
|
+
return result.rows.map((row) => row.run_id);
|
|
5311
|
+
} finally {
|
|
5312
|
+
client.release();
|
|
5313
|
+
}
|
|
5281
5314
|
}
|
|
5282
|
-
|
|
5283
|
-
|
|
5284
|
-
|
|
5285
|
-
|
|
5286
|
-
const result = await client.query(`
|
|
5315
|
+
async listActiveWorkflows() {
|
|
5316
|
+
const client = await this.pool.connect();
|
|
5317
|
+
try {
|
|
5318
|
+
const result = await client.query(`
|
|
5287
5319
|
SELECT DISTINCT workflow_slug FROM (
|
|
5288
|
-
SELECT DISTINCT workflow_slug FROM workflow_events
|
|
5320
|
+
SELECT DISTINCT workflow_slug FROM ${this.schema}.workflow_events
|
|
5289
5321
|
WHERE type IN ('RunSubmitted', 'WorkflowStarted', 'WorkflowResumed')
|
|
5290
5322
|
UNION
|
|
5291
|
-
SELECT DISTINCT workflow_slug FROM step_events
|
|
5323
|
+
SELECT DISTINCT workflow_slug FROM ${this.schema}.step_events
|
|
5292
5324
|
WHERE type IN ('StepScheduled', 'StepStarted', 'StepReclaimed', 'StepRetrying')
|
|
5293
5325
|
) AS active
|
|
5294
5326
|
ORDER BY workflow_slug ASC
|
|
5295
5327
|
`);
|
|
5296
|
-
|
|
5297
|
-
|
|
5298
|
-
|
|
5328
|
+
return result.rows.map((row) => row.workflow_slug);
|
|
5329
|
+
} finally {
|
|
5330
|
+
client.release();
|
|
5331
|
+
}
|
|
5332
|
+
}
|
|
5333
|
+
async runExists(workflowSlug, runId) {
|
|
5334
|
+
const client = await this.pool.connect();
|
|
5335
|
+
try {
|
|
5336
|
+
const result = await client.query(`SELECT 1 FROM ${this.schema}.workflow_events WHERE workflow_slug = $1 AND run_id = $2 LIMIT 1`, [workflowSlug, runId]);
|
|
5337
|
+
return result.rows.length > 0;
|
|
5338
|
+
} finally {
|
|
5339
|
+
client.release();
|
|
5340
|
+
}
|
|
5299
5341
|
}
|
|
5300
5342
|
}
|
|
5301
|
-
|
|
5343
|
+
function createPool(connectionString) {
|
|
5344
|
+
return new Pool2({ connectionString });
|
|
5345
|
+
}
|
|
5346
|
+
|
|
5347
|
+
// src/migrations.ts
|
|
5348
|
+
async function migration000_createSchema(pool, schema) {
|
|
5302
5349
|
const client = await pool.connect();
|
|
5303
5350
|
try {
|
|
5304
|
-
|
|
5305
|
-
|
|
5351
|
+
await client.query(`CREATE SCHEMA IF NOT EXISTS ${schema}`);
|
|
5352
|
+
console.log(`[Migration 000] Schema '${schema}' created successfully`);
|
|
5353
|
+
} catch (error) {
|
|
5354
|
+
console.error(`[Migration 000] Error creating schema '${schema}':`, error);
|
|
5355
|
+
throw error;
|
|
5306
5356
|
} finally {
|
|
5307
5357
|
client.release();
|
|
5308
5358
|
}
|
|
5309
5359
|
}
|
|
5310
|
-
|
|
5311
|
-
// src/migrations.ts
|
|
5312
|
-
async function migration001_createTables(pool) {
|
|
5360
|
+
async function migration001_createTables(pool, schema) {
|
|
5313
5361
|
const client = await pool.connect();
|
|
5314
5362
|
try {
|
|
5315
5363
|
await client.query(`
|
|
5316
|
-
CREATE TABLE IF NOT EXISTS workflow_events (
|
|
5364
|
+
CREATE TABLE IF NOT EXISTS ${schema}.workflow_events (
|
|
5317
5365
|
id SERIAL PRIMARY KEY,
|
|
5318
5366
|
event_id TEXT NOT NULL,
|
|
5319
5367
|
workflow_slug TEXT NOT NULL,
|
|
@@ -5326,7 +5374,7 @@ async function migration001_createTables(pool) {
|
|
|
5326
5374
|
)
|
|
5327
5375
|
`);
|
|
5328
5376
|
await client.query(`
|
|
5329
|
-
CREATE TABLE IF NOT EXISTS step_events (
|
|
5377
|
+
CREATE TABLE IF NOT EXISTS ${schema}.step_events (
|
|
5330
5378
|
id SERIAL PRIMARY KEY,
|
|
5331
5379
|
event_id TEXT NOT NULL,
|
|
5332
5380
|
workflow_slug TEXT NOT NULL,
|
|
@@ -5340,7 +5388,7 @@ async function migration001_createTables(pool) {
|
|
|
5340
5388
|
)
|
|
5341
5389
|
`);
|
|
5342
5390
|
await client.query(`
|
|
5343
|
-
CREATE TABLE IF NOT EXISTS workflow_metadata (
|
|
5391
|
+
CREATE TABLE IF NOT EXISTS ${schema}.workflow_metadata (
|
|
5344
5392
|
slug TEXT PRIMARY KEY,
|
|
5345
5393
|
name TEXT NOT NULL,
|
|
5346
5394
|
description TEXT,
|
|
@@ -5351,7 +5399,7 @@ async function migration001_createTables(pool) {
|
|
|
5351
5399
|
)
|
|
5352
5400
|
`);
|
|
5353
5401
|
await client.query(`
|
|
5354
|
-
CREATE TABLE IF NOT EXISTS step_definitions (
|
|
5402
|
+
CREATE TABLE IF NOT EXISTS ${schema}.step_definitions (
|
|
5355
5403
|
workflow_slug TEXT NOT NULL,
|
|
5356
5404
|
id TEXT NOT NULL,
|
|
5357
5405
|
dependencies JSONB NOT NULL DEFAULT '{}',
|
|
@@ -5362,11 +5410,11 @@ async function migration001_createTables(pool) {
|
|
|
5362
5410
|
retry_delay_ms INTEGER,
|
|
5363
5411
|
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
|
|
5364
5412
|
PRIMARY KEY (workflow_slug, id),
|
|
5365
|
-
FOREIGN KEY (workflow_slug) REFERENCES workflow_metadata(slug) ON DELETE CASCADE
|
|
5413
|
+
FOREIGN KEY (workflow_slug) REFERENCES ${schema}.workflow_metadata(slug) ON DELETE CASCADE
|
|
5366
5414
|
)
|
|
5367
5415
|
`);
|
|
5368
5416
|
await client.query(`
|
|
5369
|
-
CREATE TABLE IF NOT EXISTS step_outputs (
|
|
5417
|
+
CREATE TABLE IF NOT EXISTS ${schema}.step_outputs (
|
|
5370
5418
|
workflow_slug TEXT NOT NULL,
|
|
5371
5419
|
run_id TEXT NOT NULL,
|
|
5372
5420
|
step_id TEXT NOT NULL,
|
|
@@ -5377,7 +5425,7 @@ async function migration001_createTables(pool) {
|
|
|
5377
5425
|
)
|
|
5378
5426
|
`);
|
|
5379
5427
|
await client.query(`
|
|
5380
|
-
CREATE TABLE IF NOT EXISTS idempotency_keys (
|
|
5428
|
+
CREATE TABLE IF NOT EXISTS ${schema}.idempotency_keys (
|
|
5381
5429
|
hash TEXT PRIMARY KEY,
|
|
5382
5430
|
run_id TEXT NOT NULL,
|
|
5383
5431
|
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW()
|
|
@@ -5391,18 +5439,18 @@ async function migration001_createTables(pool) {
|
|
|
5391
5439
|
client.release();
|
|
5392
5440
|
}
|
|
5393
5441
|
}
|
|
5394
|
-
async function migration002_addNormalizedColumns(pool) {
|
|
5442
|
+
async function migration002_addNormalizedColumns(pool, schema) {
|
|
5395
5443
|
const client = await pool.connect();
|
|
5396
5444
|
try {
|
|
5397
5445
|
await client.query(`
|
|
5398
|
-
ALTER TABLE step_events
|
|
5446
|
+
ALTER TABLE ${schema}.step_events
|
|
5399
5447
|
ADD COLUMN IF NOT EXISTS worker_id TEXT,
|
|
5400
5448
|
ADD COLUMN IF NOT EXISTS attempt_number INTEGER,
|
|
5401
5449
|
ADD COLUMN IF NOT EXISTS available_at_us BIGINT,
|
|
5402
5450
|
ADD COLUMN IF NOT EXISTS export_output BOOLEAN
|
|
5403
5451
|
`);
|
|
5404
5452
|
await client.query(`
|
|
5405
|
-
ALTER TABLE workflow_events
|
|
5453
|
+
ALTER TABLE ${schema}.workflow_events
|
|
5406
5454
|
ADD COLUMN IF NOT EXISTS workflow_attempt_number INTEGER,
|
|
5407
5455
|
ADD COLUMN IF NOT EXISTS available_at_us BIGINT,
|
|
5408
5456
|
ADD COLUMN IF NOT EXISTS priority INTEGER,
|
|
@@ -5417,65 +5465,65 @@ async function migration002_addNormalizedColumns(pool) {
|
|
|
5417
5465
|
client.release();
|
|
5418
5466
|
}
|
|
5419
5467
|
}
|
|
5420
|
-
async function migration003_createIndexes(pool) {
|
|
5468
|
+
async function migration003_createIndexes(pool, schema) {
|
|
5421
5469
|
const client = await pool.connect();
|
|
5422
5470
|
try {
|
|
5423
5471
|
await client.query(`
|
|
5424
5472
|
CREATE INDEX IF NOT EXISTS idx_workflow_events_lookup
|
|
5425
|
-
ON workflow_events (workflow_slug, run_id, timestamp_us)
|
|
5473
|
+
ON ${schema}.workflow_events (workflow_slug, run_id, timestamp_us)
|
|
5426
5474
|
`);
|
|
5427
5475
|
await client.query(`
|
|
5428
5476
|
CREATE INDEX IF NOT EXISTS idx_workflow_events_type
|
|
5429
|
-
ON workflow_events (workflow_slug, run_id, type)
|
|
5477
|
+
ON ${schema}.workflow_events (workflow_slug, run_id, type)
|
|
5430
5478
|
`);
|
|
5431
5479
|
await client.query(`
|
|
5432
5480
|
CREATE INDEX IF NOT EXISTS idx_step_events_lookup
|
|
5433
|
-
ON step_events (workflow_slug, run_id, step_id, timestamp_us)
|
|
5481
|
+
ON ${schema}.step_events (workflow_slug, run_id, step_id, timestamp_us)
|
|
5434
5482
|
`);
|
|
5435
5483
|
await client.query(`
|
|
5436
5484
|
CREATE INDEX IF NOT EXISTS idx_step_events_run
|
|
5437
|
-
ON step_events (workflow_slug, run_id, timestamp_us)
|
|
5485
|
+
ON ${schema}.step_events (workflow_slug, run_id, timestamp_us)
|
|
5438
5486
|
`);
|
|
5439
5487
|
await client.query(`
|
|
5440
5488
|
CREATE INDEX IF NOT EXISTS idx_step_events_type
|
|
5441
|
-
ON step_events (workflow_slug, run_id, type)
|
|
5489
|
+
ON ${schema}.step_events (workflow_slug, run_id, type)
|
|
5442
5490
|
`);
|
|
5443
5491
|
await client.query(`
|
|
5444
5492
|
CREATE INDEX IF NOT EXISTS idx_step_events_scheduled
|
|
5445
|
-
ON step_events (workflow_slug, run_id, step_id, type, timestamp_us)
|
|
5493
|
+
ON ${schema}.step_events (workflow_slug, run_id, step_id, type, timestamp_us)
|
|
5446
5494
|
`);
|
|
5447
5495
|
await client.query(`
|
|
5448
5496
|
CREATE INDEX IF NOT EXISTS idx_step_definitions_workflow
|
|
5449
|
-
ON step_definitions (workflow_slug)
|
|
5497
|
+
ON ${schema}.step_definitions (workflow_slug)
|
|
5450
5498
|
`);
|
|
5451
5499
|
await client.query(`
|
|
5452
5500
|
CREATE INDEX IF NOT EXISTS idx_step_events_worker_timestamp
|
|
5453
|
-
ON step_events (worker_id, timestamp_us)
|
|
5501
|
+
ON ${schema}.step_events (worker_id, timestamp_us)
|
|
5454
5502
|
WHERE worker_id IS NOT NULL
|
|
5455
5503
|
`);
|
|
5456
5504
|
await client.query(`
|
|
5457
5505
|
CREATE INDEX IF NOT EXISTS idx_step_events_available_at
|
|
5458
|
-
ON step_events (type, available_at_us)
|
|
5506
|
+
ON ${schema}.step_events (type, available_at_us)
|
|
5459
5507
|
WHERE available_at_us IS NOT NULL
|
|
5460
5508
|
`);
|
|
5461
5509
|
await client.query(`
|
|
5462
5510
|
CREATE INDEX IF NOT EXISTS idx_step_events_attempt
|
|
5463
|
-
ON step_events (workflow_slug, run_id, step_id, attempt_number)
|
|
5511
|
+
ON ${schema}.step_events (workflow_slug, run_id, step_id, attempt_number)
|
|
5464
5512
|
WHERE attempt_number IS NOT NULL
|
|
5465
5513
|
`);
|
|
5466
5514
|
await client.query(`
|
|
5467
5515
|
CREATE INDEX IF NOT EXISTS idx_workflow_events_priority
|
|
5468
|
-
ON workflow_events (priority DESC, available_at_us)
|
|
5516
|
+
ON ${schema}.workflow_events (priority DESC, available_at_us)
|
|
5469
5517
|
WHERE priority IS NOT NULL
|
|
5470
5518
|
`);
|
|
5471
5519
|
await client.query(`
|
|
5472
5520
|
CREATE INDEX IF NOT EXISTS idx_workflow_events_idempotency
|
|
5473
|
-
ON workflow_events (idempotency_key)
|
|
5521
|
+
ON ${schema}.workflow_events (idempotency_key)
|
|
5474
5522
|
WHERE idempotency_key IS NOT NULL
|
|
5475
5523
|
`);
|
|
5476
5524
|
await client.query(`
|
|
5477
5525
|
CREATE INDEX IF NOT EXISTS idx_workflow_events_timeout
|
|
5478
|
-
ON workflow_events (workflow_slug, run_id, timeout_us)
|
|
5526
|
+
ON ${schema}.workflow_events (workflow_slug, run_id, timeout_us)
|
|
5479
5527
|
WHERE timeout_us IS NOT NULL
|
|
5480
5528
|
`);
|
|
5481
5529
|
console.log("[Migration 003] Indexes created successfully");
|
|
@@ -5486,11 +5534,11 @@ async function migration003_createIndexes(pool) {
|
|
|
5486
5534
|
client.release();
|
|
5487
5535
|
}
|
|
5488
5536
|
}
|
|
5489
|
-
async function migration004_addErrorFingerprints(pool) {
|
|
5537
|
+
async function migration004_addErrorFingerprints(pool, schema) {
|
|
5490
5538
|
const client = await pool.connect();
|
|
5491
5539
|
try {
|
|
5492
5540
|
await client.query(`
|
|
5493
|
-
ALTER TABLE step_events
|
|
5541
|
+
ALTER TABLE ${schema}.step_events
|
|
5494
5542
|
ADD COLUMN IF NOT EXISTS error_name_hash TEXT NOT NULL DEFAULT '',
|
|
5495
5543
|
ADD COLUMN IF NOT EXISTS error_message_hash TEXT NOT NULL DEFAULT '',
|
|
5496
5544
|
ADD COLUMN IF NOT EXISTS error_stack_exact_hash TEXT NOT NULL DEFAULT '',
|
|
@@ -5499,17 +5547,17 @@ async function migration004_addErrorFingerprints(pool) {
|
|
|
5499
5547
|
`);
|
|
5500
5548
|
await client.query(`
|
|
5501
5549
|
CREATE INDEX IF NOT EXISTS idx_error_fp_exact
|
|
5502
|
-
ON step_events(error_name_hash, error_message_hash, error_stack_exact_hash)
|
|
5550
|
+
ON ${schema}.step_events(error_name_hash, error_message_hash, error_stack_exact_hash)
|
|
5503
5551
|
WHERE type = 'StepFailed'
|
|
5504
5552
|
`);
|
|
5505
5553
|
await client.query(`
|
|
5506
5554
|
CREATE INDEX IF NOT EXISTS idx_error_fp_normalized
|
|
5507
|
-
ON step_events(error_name_hash, error_message_hash, error_stack_normalized_hash)
|
|
5555
|
+
ON ${schema}.step_events(error_name_hash, error_message_hash, error_stack_normalized_hash)
|
|
5508
5556
|
WHERE type = 'StepFailed'
|
|
5509
5557
|
`);
|
|
5510
5558
|
await client.query(`
|
|
5511
5559
|
CREATE INDEX IF NOT EXISTS idx_error_fp_portable
|
|
5512
|
-
ON step_events(error_name_hash, error_message_hash, error_stack_portable_hash)
|
|
5560
|
+
ON ${schema}.step_events(error_name_hash, error_message_hash, error_stack_portable_hash)
|
|
5513
5561
|
WHERE type = 'StepFailed'
|
|
5514
5562
|
`);
|
|
5515
5563
|
console.log("[Migration 004] Error fingerprint columns and indexes added successfully");
|
|
@@ -5520,13 +5568,64 @@ async function migration004_addErrorFingerprints(pool) {
|
|
|
5520
5568
|
client.release();
|
|
5521
5569
|
}
|
|
5522
5570
|
}
|
|
5523
|
-
async function
|
|
5524
|
-
|
|
5571
|
+
async function migration005_addWorkflowVersioning(pool, schema) {
|
|
5572
|
+
const client = await pool.connect();
|
|
5573
|
+
try {
|
|
5574
|
+
await client.query(`
|
|
5575
|
+
CREATE TABLE IF NOT EXISTS ${schema}.workflow_versions (
|
|
5576
|
+
workflow_slug TEXT NOT NULL,
|
|
5577
|
+
version_id TEXT NOT NULL,
|
|
5578
|
+
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
|
|
5579
|
+
step_manifest TEXT[] NOT NULL,
|
|
5580
|
+
total_steps INTEGER NOT NULL,
|
|
5581
|
+
git_commit TEXT,
|
|
5582
|
+
git_dirty BOOLEAN,
|
|
5583
|
+
git_branch TEXT,
|
|
5584
|
+
PRIMARY KEY (workflow_slug, version_id),
|
|
5585
|
+
FOREIGN KEY (workflow_slug) REFERENCES ${schema}.workflow_metadata(slug) ON DELETE CASCADE
|
|
5586
|
+
)
|
|
5587
|
+
`);
|
|
5588
|
+
await client.query(`
|
|
5589
|
+
CREATE INDEX IF NOT EXISTS idx_workflow_versions_slug
|
|
5590
|
+
ON ${schema}.workflow_versions(workflow_slug)
|
|
5591
|
+
`);
|
|
5592
|
+
await client.query(`
|
|
5593
|
+
CREATE INDEX IF NOT EXISTS idx_workflow_versions_created
|
|
5594
|
+
ON ${schema}.workflow_versions(workflow_slug, created_at DESC)
|
|
5595
|
+
`);
|
|
5596
|
+
await client.query(`
|
|
5597
|
+
ALTER TABLE ${schema}.workflow_events
|
|
5598
|
+
ADD COLUMN IF NOT EXISTS version_id TEXT NOT NULL
|
|
5599
|
+
`);
|
|
5600
|
+
await client.query(`
|
|
5601
|
+
CREATE INDEX IF NOT EXISTS idx_workflow_events_version
|
|
5602
|
+
ON ${schema}.workflow_events(workflow_slug, run_id, version_id)
|
|
5603
|
+
`);
|
|
5604
|
+
await client.query(`
|
|
5605
|
+
ALTER TABLE ${schema}.step_events
|
|
5606
|
+
ADD COLUMN IF NOT EXISTS version_id TEXT NOT NULL
|
|
5607
|
+
`);
|
|
5608
|
+
await client.query(`
|
|
5609
|
+
CREATE INDEX IF NOT EXISTS idx_step_events_version
|
|
5610
|
+
ON ${schema}.step_events(workflow_slug, version_id)
|
|
5611
|
+
`);
|
|
5612
|
+
console.log("[Migration 005] Workflow versioning support added successfully");
|
|
5613
|
+
} catch (error) {
|
|
5614
|
+
console.error("[Migration 005] Error adding workflow versioning:", error);
|
|
5615
|
+
throw error;
|
|
5616
|
+
} finally {
|
|
5617
|
+
client.release();
|
|
5618
|
+
}
|
|
5619
|
+
}
|
|
5620
|
+
async function runMigrations(pool, schema = "cascadeflow") {
|
|
5621
|
+
console.log(`[Migrations] Starting database migrations in schema '${schema}'...`);
|
|
5525
5622
|
try {
|
|
5526
|
-
await
|
|
5527
|
-
await
|
|
5528
|
-
await
|
|
5529
|
-
await
|
|
5623
|
+
await migration000_createSchema(pool, schema);
|
|
5624
|
+
await migration001_createTables(pool, schema);
|
|
5625
|
+
await migration002_addNormalizedColumns(pool, schema);
|
|
5626
|
+
await migration003_createIndexes(pool, schema);
|
|
5627
|
+
await migration004_addErrorFingerprints(pool, schema);
|
|
5628
|
+
await migration005_addWorkflowVersioning(pool, schema);
|
|
5530
5629
|
console.log("[Migrations] All migrations completed successfully");
|
|
5531
5630
|
} catch (error) {
|
|
5532
5631
|
console.error("[Migrations] Migration failed:", error);
|
|
@@ -5536,17 +5635,18 @@ async function runMigrations(pool) {
|
|
|
5536
5635
|
|
|
5537
5636
|
// src/index.ts
|
|
5538
5637
|
class PostgresBackend extends Backend {
|
|
5539
|
-
|
|
5638
|
+
db;
|
|
5540
5639
|
initialized = false;
|
|
5541
|
-
constructor(connectionString) {
|
|
5640
|
+
constructor(connectionString, schema = "cascadeflow") {
|
|
5542
5641
|
super();
|
|
5543
|
-
|
|
5642
|
+
const pool = createPool(connectionString);
|
|
5643
|
+
this.db = new DatabaseClient(pool, schema);
|
|
5544
5644
|
}
|
|
5545
5645
|
async initialize() {
|
|
5546
5646
|
if (this.initialized) {
|
|
5547
5647
|
return;
|
|
5548
5648
|
}
|
|
5549
|
-
await runMigrations(this.
|
|
5649
|
+
await runMigrations(this.db.getPool(), this.db.getSchema());
|
|
5550
5650
|
this.initialized = true;
|
|
5551
5651
|
}
|
|
5552
5652
|
generateRunId() {
|
|
@@ -5561,10 +5661,10 @@ class PostgresBackend extends Backend {
|
|
|
5561
5661
|
}
|
|
5562
5662
|
async initializeRun(workflowSlug, runId) {}
|
|
5563
5663
|
async runExists(workflowSlug, runId) {
|
|
5564
|
-
return runExists(
|
|
5664
|
+
return this.db.runExists(workflowSlug, runId);
|
|
5565
5665
|
}
|
|
5566
5666
|
async loadRun(workflowSlug, runId) {
|
|
5567
|
-
const events = await loadAllRunEvents(
|
|
5667
|
+
const events = await this.db.loadAllRunEvents(workflowSlug, runId);
|
|
5568
5668
|
const stepEvents = new Map;
|
|
5569
5669
|
for (const event of events) {
|
|
5570
5670
|
if (event.category === "step") {
|
|
@@ -5594,22 +5694,22 @@ class PostgresBackend extends Backend {
|
|
|
5594
5694
|
}
|
|
5595
5695
|
eventSchema.parse(event);
|
|
5596
5696
|
const table = event.category === "workflow" ? "workflow_events" : "step_events";
|
|
5597
|
-
await appendEvent(
|
|
5697
|
+
await this.db.appendEvent(table, event);
|
|
5598
5698
|
}
|
|
5599
5699
|
async loadEvents(workflowSlug, runId, options) {
|
|
5600
5700
|
if (options?.category === "workflow") {
|
|
5601
|
-
return loadEvents(
|
|
5701
|
+
return this.db.loadEvents("workflow_events", {
|
|
5602
5702
|
workflowSlug,
|
|
5603
5703
|
runId
|
|
5604
5704
|
});
|
|
5605
5705
|
} else if (options?.category === "step") {
|
|
5606
|
-
return loadEvents(
|
|
5706
|
+
return this.db.loadEvents("step_events", {
|
|
5607
5707
|
workflowSlug,
|
|
5608
5708
|
runId,
|
|
5609
5709
|
stepId: options.stepId
|
|
5610
5710
|
});
|
|
5611
5711
|
} else {
|
|
5612
|
-
return loadAllRunEvents(
|
|
5712
|
+
return this.db.loadAllRunEvents(workflowSlug, runId);
|
|
5613
5713
|
}
|
|
5614
5714
|
}
|
|
5615
5715
|
async saveStepScheduled(workflowSlug, runId, stepId, metadata) {
|
|
@@ -5627,7 +5727,7 @@ class PostgresBackend extends Backend {
|
|
|
5627
5727
|
attemptNumber: metadata.attemptNumber,
|
|
5628
5728
|
retryDelayMs: metadata.retryDelayMs
|
|
5629
5729
|
};
|
|
5630
|
-
await appendEvent(
|
|
5730
|
+
await this.db.appendEvent("step_events", event);
|
|
5631
5731
|
}
|
|
5632
5732
|
async saveStepStart(workflowSlug, runId, stepId, workerId, metadata) {
|
|
5633
5733
|
const events = await this.loadEvents(workflowSlug, runId, { category: "step", stepId });
|
|
@@ -5645,7 +5745,7 @@ class PostgresBackend extends Backend {
|
|
|
5645
5745
|
workerId,
|
|
5646
5746
|
dependencies: metadata.dependencies
|
|
5647
5747
|
};
|
|
5648
|
-
await appendEvent(
|
|
5748
|
+
await this.db.appendEvent("step_events", event);
|
|
5649
5749
|
}
|
|
5650
5750
|
async saveStepComplete(workflowSlug, runId, stepId, output, metadata, exportOutput = false) {
|
|
5651
5751
|
const events = await this.loadEvents(workflowSlug, runId, { category: "step", stepId });
|
|
@@ -5668,7 +5768,7 @@ class PostgresBackend extends Backend {
|
|
|
5668
5768
|
message: log.message,
|
|
5669
5769
|
attemptNumber
|
|
5670
5770
|
};
|
|
5671
|
-
await appendEvent(
|
|
5771
|
+
await this.db.appendEvent("step_events", logEvent);
|
|
5672
5772
|
}
|
|
5673
5773
|
}
|
|
5674
5774
|
const serialized = safeSerialize(output);
|
|
@@ -5687,7 +5787,7 @@ class PostgresBackend extends Backend {
|
|
|
5687
5787
|
attemptNumber,
|
|
5688
5788
|
exportOutput
|
|
5689
5789
|
};
|
|
5690
|
-
await appendEvent(
|
|
5790
|
+
await this.db.appendEvent("step_events", event);
|
|
5691
5791
|
}
|
|
5692
5792
|
async saveStepFailed(workflowSlug, runId, stepId, error, metadata) {
|
|
5693
5793
|
const now = getMicrosecondTimestamp();
|
|
@@ -5707,10 +5807,10 @@ class PostgresBackend extends Backend {
|
|
|
5707
5807
|
nextRetryAtUs: metadata.nextRetryAt,
|
|
5708
5808
|
failureReason: metadata.failureReason
|
|
5709
5809
|
};
|
|
5710
|
-
await appendEvent(
|
|
5810
|
+
await this.db.appendEvent("step_events", event);
|
|
5711
5811
|
}
|
|
5712
5812
|
async saveStepFailedAndScheduleRetry(workflowSlug, runId, stepId, error, failureMetadata, scheduleMetadata) {
|
|
5713
|
-
const client = await this.
|
|
5813
|
+
const client = await this.db.getPool().connect();
|
|
5714
5814
|
try {
|
|
5715
5815
|
await client.query("BEGIN");
|
|
5716
5816
|
const failedTimestamp = getMicrosecondTimestamp();
|
|
@@ -5732,7 +5832,7 @@ class PostgresBackend extends Backend {
|
|
|
5732
5832
|
nextRetryAtUs: failureMetadata.nextRetryAt,
|
|
5733
5833
|
failureReason: failureMetadata.failureReason
|
|
5734
5834
|
};
|
|
5735
|
-
await appendEventWithClient(client, "step_events", failedEvent);
|
|
5835
|
+
await this.db.appendEventWithClient(client, "step_events", failedEvent);
|
|
5736
5836
|
const retryingEvent = {
|
|
5737
5837
|
category: "step",
|
|
5738
5838
|
type: "StepRetrying",
|
|
@@ -5746,7 +5846,7 @@ class PostgresBackend extends Backend {
|
|
|
5746
5846
|
maxRetries: scheduleMetadata.maxRetries,
|
|
5747
5847
|
error
|
|
5748
5848
|
};
|
|
5749
|
-
await appendEventWithClient(client, "step_events", retryingEvent);
|
|
5849
|
+
await this.db.appendEventWithClient(client, "step_events", retryingEvent);
|
|
5750
5850
|
const scheduledEvent = {
|
|
5751
5851
|
category: "step",
|
|
5752
5852
|
type: "StepScheduled",
|
|
@@ -5760,7 +5860,7 @@ class PostgresBackend extends Backend {
|
|
|
5760
5860
|
attemptNumber: scheduleMetadata.nextAttemptNumber,
|
|
5761
5861
|
retryDelayMs: scheduleMetadata.retryDelayMs
|
|
5762
5862
|
};
|
|
5763
|
-
await appendEventWithClient(client, "step_events", scheduledEvent);
|
|
5863
|
+
await this.db.appendEventWithClient(client, "step_events", scheduledEvent);
|
|
5764
5864
|
await client.query("COMMIT");
|
|
5765
5865
|
} catch (error2) {
|
|
5766
5866
|
await client.query("ROLLBACK");
|
|
@@ -5786,7 +5886,7 @@ class PostgresBackend extends Backend {
|
|
|
5786
5886
|
attemptNumber: metadata.attemptNumber,
|
|
5787
5887
|
cascadedFrom: metadata.cascadedFrom
|
|
5788
5888
|
};
|
|
5789
|
-
await appendEvent(
|
|
5889
|
+
await this.db.appendEvent("step_events", event);
|
|
5790
5890
|
}
|
|
5791
5891
|
async saveStepHeartbeat(workflowSlug, runId, stepId, workerId, attemptNumber) {
|
|
5792
5892
|
const now = getMicrosecondTimestamp();
|
|
@@ -5801,7 +5901,7 @@ class PostgresBackend extends Backend {
|
|
|
5801
5901
|
workerId,
|
|
5802
5902
|
attemptNumber
|
|
5803
5903
|
};
|
|
5804
|
-
await appendEvent(
|
|
5904
|
+
await this.db.appendEvent("step_events", event);
|
|
5805
5905
|
}
|
|
5806
5906
|
async saveStepReclaimed(workflowSlug, runId, stepId, metadata) {
|
|
5807
5907
|
const now = getMicrosecondTimestamp();
|
|
@@ -5820,7 +5920,7 @@ class PostgresBackend extends Backend {
|
|
|
5820
5920
|
staleDurationUs: metadata.staleDuration,
|
|
5821
5921
|
attemptNumber: metadata.attemptNumber
|
|
5822
5922
|
};
|
|
5823
|
-
await appendEvent(
|
|
5923
|
+
await this.db.appendEvent("step_events", event);
|
|
5824
5924
|
}
|
|
5825
5925
|
async saveStepLogs(workflowSlug, runId, stepId, logs) {}
|
|
5826
5926
|
async loadStepLogs(workflowSlug, runId, stepId, attemptNumber) {
|
|
@@ -5840,11 +5940,12 @@ class PostgresBackend extends Backend {
|
|
|
5840
5940
|
timestampUs: timestamp,
|
|
5841
5941
|
workflowSlug,
|
|
5842
5942
|
runId,
|
|
5943
|
+
versionId: metadata.versionId,
|
|
5843
5944
|
workflowAttemptNumber: metadata.workflowAttemptNumber,
|
|
5844
5945
|
hasInputSchema: metadata.hasInputSchema,
|
|
5845
5946
|
hasInput: metadata.hasInput
|
|
5846
5947
|
};
|
|
5847
|
-
await appendEvent(
|
|
5948
|
+
await this.db.appendEvent("workflow_events", event);
|
|
5848
5949
|
}
|
|
5849
5950
|
async saveWorkflowInputValidation(workflowSlug, runId, result) {
|
|
5850
5951
|
const timestamp = getMicrosecondTimestamp();
|
|
@@ -5857,7 +5958,7 @@ class PostgresBackend extends Backend {
|
|
|
5857
5958
|
runId,
|
|
5858
5959
|
...result
|
|
5859
5960
|
};
|
|
5860
|
-
await appendEvent(
|
|
5961
|
+
await this.db.appendEvent("workflow_events", event);
|
|
5861
5962
|
}
|
|
5862
5963
|
async saveWorkflowComplete(workflowSlug, runId, output, metadata) {
|
|
5863
5964
|
const timestamp = getMicrosecondTimestamp();
|
|
@@ -5873,7 +5974,7 @@ class PostgresBackend extends Backend {
|
|
|
5873
5974
|
durationUs: metadata.duration,
|
|
5874
5975
|
totalSteps: metadata.totalSteps
|
|
5875
5976
|
};
|
|
5876
|
-
await appendEvent(
|
|
5977
|
+
await this.db.appendEvent("workflow_events", event);
|
|
5877
5978
|
}
|
|
5878
5979
|
async saveWorkflowFailed(workflowSlug, runId, error, metadata, failureReason) {
|
|
5879
5980
|
const timestamp = getMicrosecondTimestamp();
|
|
@@ -5891,7 +5992,7 @@ class PostgresBackend extends Backend {
|
|
|
5891
5992
|
failedStep: metadata.failedStep,
|
|
5892
5993
|
failureReason
|
|
5893
5994
|
};
|
|
5894
|
-
await appendEvent(
|
|
5995
|
+
await this.db.appendEvent("workflow_events", event);
|
|
5895
5996
|
}
|
|
5896
5997
|
async saveWorkflowResumed(workflowSlug, runId, metadata) {
|
|
5897
5998
|
const timestamp = getMicrosecondTimestamp();
|
|
@@ -5906,7 +6007,7 @@ class PostgresBackend extends Backend {
|
|
|
5906
6007
|
resumedSteps: metadata.resumedSteps,
|
|
5907
6008
|
pendingSteps: metadata.pendingSteps
|
|
5908
6009
|
};
|
|
5909
|
-
await appendEvent(
|
|
6010
|
+
await this.db.appendEvent("workflow_events", event);
|
|
5910
6011
|
}
|
|
5911
6012
|
async saveWorkflowCancelled(workflowSlug, runId, metadata) {
|
|
5912
6013
|
const timestamp = getMicrosecondTimestamp();
|
|
@@ -5922,7 +6023,7 @@ class PostgresBackend extends Backend {
|
|
|
5922
6023
|
durationUs: metadata.duration,
|
|
5923
6024
|
completedSteps: metadata.completedSteps
|
|
5924
6025
|
};
|
|
5925
|
-
await appendEvent(
|
|
6026
|
+
await this.db.appendEvent("workflow_events", event);
|
|
5926
6027
|
}
|
|
5927
6028
|
async saveWorkflowRetryStarted(workflowSlug, runId, metadata) {
|
|
5928
6029
|
const timestamp = getMicrosecondTimestamp();
|
|
@@ -5938,7 +6039,7 @@ class PostgresBackend extends Backend {
|
|
|
5938
6039
|
retriedSteps: metadata.retriedSteps,
|
|
5939
6040
|
reason: metadata.reason
|
|
5940
6041
|
};
|
|
5941
|
-
await appendEvent(
|
|
6042
|
+
await this.db.appendEvent("workflow_events", event);
|
|
5942
6043
|
}
|
|
5943
6044
|
async saveRunSubmitted(workflowSlug, runId, metadata) {
|
|
5944
6045
|
const timestamp = getMicrosecondTimestamp();
|
|
@@ -5949,6 +6050,7 @@ class PostgresBackend extends Backend {
|
|
|
5949
6050
|
timestampUs: timestamp,
|
|
5950
6051
|
workflowSlug,
|
|
5951
6052
|
runId,
|
|
6053
|
+
versionId: metadata.versionId,
|
|
5952
6054
|
availableAtUs: metadata.availableAt,
|
|
5953
6055
|
priority: metadata.priority,
|
|
5954
6056
|
input: metadata.input,
|
|
@@ -5958,13 +6060,13 @@ class PostgresBackend extends Backend {
|
|
|
5958
6060
|
metadata: metadata.metadata,
|
|
5959
6061
|
tags: metadata.tags
|
|
5960
6062
|
};
|
|
5961
|
-
await appendEvent(
|
|
6063
|
+
await this.db.appendEvent("workflow_events", event);
|
|
5962
6064
|
}
|
|
5963
6065
|
async submitRun(submission) {
|
|
5964
6066
|
if (submission.idempotencyKey) {
|
|
5965
6067
|
const hash = this.hashIdempotencyKey(submission.idempotencyKey);
|
|
5966
6068
|
const proposedRunId = submission.runId || this.generateRunId();
|
|
5967
|
-
const existingRunId = await saveIdempotencyKey(
|
|
6069
|
+
const existingRunId = await this.db.saveIdempotencyKey(hash, proposedRunId);
|
|
5968
6070
|
if (existingRunId !== proposedRunId) {
|
|
5969
6071
|
return { runId: existingRunId, isNew: false };
|
|
5970
6072
|
}
|
|
@@ -5974,11 +6076,18 @@ class PostgresBackend extends Backend {
|
|
|
5974
6076
|
const availableAt = submission.availableAt || now;
|
|
5975
6077
|
const priority = submission.priority || 0;
|
|
5976
6078
|
await this.initializeRun(submission.workflowSlug, runId);
|
|
6079
|
+
const workflowMetadata = await this.getWorkflowMetadata(submission.workflowSlug);
|
|
6080
|
+
const hasInputSchema = !!workflowMetadata?.inputSchemaJSON;
|
|
6081
|
+
const currentVersion = await this.getCurrentWorkflowVersion(submission.workflowSlug);
|
|
6082
|
+
if (!currentVersion) {
|
|
6083
|
+
throw new Error(`Workflow ${submission.workflowSlug} not registered. Please ensure the worker has started and registered workflows.`);
|
|
6084
|
+
}
|
|
5977
6085
|
await this.saveRunSubmitted(submission.workflowSlug, runId, {
|
|
6086
|
+
versionId: currentVersion.versionId,
|
|
5978
6087
|
availableAt,
|
|
5979
6088
|
priority,
|
|
5980
6089
|
input: submission.input !== undefined ? JSON.stringify(submission.input) : undefined,
|
|
5981
|
-
hasInputSchema
|
|
6090
|
+
hasInputSchema,
|
|
5982
6091
|
timeout: submission.timeout,
|
|
5983
6092
|
idempotencyKey: submission.idempotencyKey,
|
|
5984
6093
|
metadata: submission.metadata,
|
|
@@ -5988,12 +6097,12 @@ class PostgresBackend extends Backend {
|
|
|
5988
6097
|
}
|
|
5989
6098
|
async listRuns(options) {
|
|
5990
6099
|
const allRuns = [];
|
|
5991
|
-
const workflows = options?.workflowSlug ? [options.workflowSlug] : await listActiveWorkflows(
|
|
6100
|
+
const workflows = options?.workflowSlug ? [options.workflowSlug] : await this.db.listActiveWorkflows();
|
|
5992
6101
|
for (const workflowSlug of workflows) {
|
|
5993
|
-
const runIds = await listRunIds(
|
|
6102
|
+
const runIds = await this.db.listRunIds(workflowSlug);
|
|
5994
6103
|
for (const runId of runIds) {
|
|
5995
6104
|
try {
|
|
5996
|
-
const events = await loadAllRunEvents(
|
|
6105
|
+
const events = await this.db.loadAllRunEvents(workflowSlug, runId);
|
|
5997
6106
|
if (events.length === 0)
|
|
5998
6107
|
continue;
|
|
5999
6108
|
const workflowEvents = events.filter((e) => e.category === "workflow");
|
|
@@ -6018,9 +6127,9 @@ class PostgresBackend extends Backend {
|
|
|
6018
6127
|
return options?.limit ? allRuns.slice(0, options.limit) : allRuns;
|
|
6019
6128
|
}
|
|
6020
6129
|
async cancelRun(runId, reason) {
|
|
6021
|
-
const allWorkflows = await listActiveWorkflows(
|
|
6130
|
+
const allWorkflows = await this.db.listActiveWorkflows();
|
|
6022
6131
|
for (const workflowSlug of allWorkflows) {
|
|
6023
|
-
const runIds = await listRunIds(
|
|
6132
|
+
const runIds = await this.db.listRunIds(workflowSlug);
|
|
6024
6133
|
if (runIds.includes(runId)) {
|
|
6025
6134
|
const events = await this.loadEvents(workflowSlug, runId, { category: "workflow" });
|
|
6026
6135
|
if (events.length === 0)
|
|
@@ -6041,11 +6150,11 @@ class PostgresBackend extends Backend {
|
|
|
6041
6150
|
throw new Error(`Run ${runId} not found`);
|
|
6042
6151
|
}
|
|
6043
6152
|
async getRun(runId) {
|
|
6044
|
-
const allWorkflows = await listActiveWorkflows(
|
|
6153
|
+
const allWorkflows = await this.db.listActiveWorkflows();
|
|
6045
6154
|
for (const workflowSlug of allWorkflows) {
|
|
6046
|
-
const runIds = await listRunIds(
|
|
6155
|
+
const runIds = await this.db.listRunIds(workflowSlug);
|
|
6047
6156
|
if (runIds.includes(runId)) {
|
|
6048
|
-
const events = await loadAllRunEvents(
|
|
6157
|
+
const events = await this.db.loadAllRunEvents(workflowSlug, runId);
|
|
6049
6158
|
const workflowEvents = events.filter((e) => e.category === "workflow");
|
|
6050
6159
|
return projectRunStateFromEvents(workflowEvents, workflowSlug);
|
|
6051
6160
|
}
|
|
@@ -6079,17 +6188,17 @@ class PostgresBackend extends Backend {
|
|
|
6079
6188
|
return failedSteps;
|
|
6080
6189
|
}
|
|
6081
6190
|
async listActiveWorkflows() {
|
|
6082
|
-
return listActiveWorkflows(
|
|
6191
|
+
return this.db.listActiveWorkflows();
|
|
6083
6192
|
}
|
|
6084
6193
|
async listScheduledSteps(options) {
|
|
6085
6194
|
const dbOptions = {
|
|
6086
6195
|
workflowSlugs: options?.workflowSlug ? [options.workflowSlug] : undefined,
|
|
6087
6196
|
limit: options?.limit
|
|
6088
6197
|
};
|
|
6089
|
-
return listScheduledSteps(
|
|
6198
|
+
return this.db.listScheduledSteps(dbOptions);
|
|
6090
6199
|
}
|
|
6091
6200
|
async isStepClaimable(workflowSlug, runId, stepId) {
|
|
6092
|
-
const events = await loadEvents(
|
|
6201
|
+
const events = await this.db.loadEvents("step_events", {
|
|
6093
6202
|
workflowSlug,
|
|
6094
6203
|
runId,
|
|
6095
6204
|
stepId
|
|
@@ -6098,7 +6207,7 @@ class PostgresBackend extends Backend {
|
|
|
6098
6207
|
return false;
|
|
6099
6208
|
}
|
|
6100
6209
|
const latestEvent = events[events.length - 1];
|
|
6101
|
-
return latestEvent.type === "StepScheduled" || latestEvent.type === "StepReclaimed" || latestEvent.type === "StepRetrying";
|
|
6210
|
+
return !!(latestEvent && (latestEvent.type === "StepScheduled" || latestEvent.type === "StepReclaimed" || latestEvent.type === "StepRetrying"));
|
|
6102
6211
|
}
|
|
6103
6212
|
async claimScheduledStep(workflowSlug, runId, stepId, workerId, metadata) {
|
|
6104
6213
|
const initialEvents = await this.loadEvents(workflowSlug, runId, { category: "step", stepId });
|
|
@@ -6124,13 +6233,13 @@ class PostgresBackend extends Backend {
|
|
|
6124
6233
|
dependencies: metadata.dependencies,
|
|
6125
6234
|
attemptNumber
|
|
6126
6235
|
};
|
|
6127
|
-
const claimed = await claimScheduledStep(
|
|
6236
|
+
const claimed = await this.db.claimScheduledStep(workflowSlug, runId, stepId, workerId, event);
|
|
6128
6237
|
return claimed ? { attemptNumber } : null;
|
|
6129
6238
|
}
|
|
6130
6239
|
async reclaimStaleSteps(staleThreshold, reclaimedBy) {
|
|
6131
6240
|
const reclaimed = [];
|
|
6132
6241
|
const now = getMicrosecondTimestamp();
|
|
6133
|
-
const staleSteps = await findStaleSteps(
|
|
6242
|
+
const staleSteps = await this.db.findStaleSteps(staleThreshold);
|
|
6134
6243
|
for (const step of staleSteps) {
|
|
6135
6244
|
const events = await this.loadEvents(step.workflowSlug, step.runId, { category: "step", stepId: step.stepId });
|
|
6136
6245
|
if (events.length === 0)
|
|
@@ -6152,7 +6261,7 @@ class PostgresBackend extends Backend {
|
|
|
6152
6261
|
await this.saveStepScheduled(step.workflowSlug, step.runId, step.stepId, {
|
|
6153
6262
|
availableAt: now,
|
|
6154
6263
|
reason: "retry",
|
|
6155
|
-
attemptNumber: state.attemptNumber,
|
|
6264
|
+
attemptNumber: state.attemptNumber + 1,
|
|
6156
6265
|
retryDelayMs: 0
|
|
6157
6266
|
});
|
|
6158
6267
|
reclaimed.push({ workflowSlug: step.workflowSlug, runId: step.runId, stepId: step.stepId });
|
|
@@ -6161,25 +6270,107 @@ class PostgresBackend extends Backend {
|
|
|
6161
6270
|
return reclaimed;
|
|
6162
6271
|
}
|
|
6163
6272
|
async registerWorkflow(registration) {
|
|
6164
|
-
await upsertWorkflowMetadata(
|
|
6273
|
+
await this.db.upsertWorkflowMetadata(registration.slug, registration.name, registration.location, registration.inputSchemaJSON);
|
|
6165
6274
|
for (const step of registration.steps) {
|
|
6166
|
-
await upsertStepDefinition(
|
|
6275
|
+
await this.db.upsertStepDefinition(registration.slug, step);
|
|
6167
6276
|
}
|
|
6168
6277
|
}
|
|
6169
6278
|
async getWorkflowMetadata(slug) {
|
|
6170
|
-
return getWorkflowMetadata(
|
|
6279
|
+
return this.db.getWorkflowMetadata(slug);
|
|
6171
6280
|
}
|
|
6172
6281
|
async listWorkflowMetadata() {
|
|
6173
|
-
return listWorkflowMetadata(
|
|
6282
|
+
return this.db.listWorkflowMetadata();
|
|
6174
6283
|
}
|
|
6175
6284
|
async getWorkflowSteps(slug) {
|
|
6176
|
-
return getWorkflowSteps(
|
|
6285
|
+
return this.db.getWorkflowSteps(slug);
|
|
6177
6286
|
}
|
|
6178
6287
|
async listRunIds(workflowSlug) {
|
|
6179
|
-
return listRunIds(
|
|
6288
|
+
return this.db.listRunIds(workflowSlug);
|
|
6289
|
+
}
|
|
6290
|
+
async createWorkflowVersion(version) {
|
|
6291
|
+
await this.db.getPool().query(`
|
|
6292
|
+
INSERT INTO ${this.db.getSchema()}.workflow_versions
|
|
6293
|
+
(workflow_slug, version_id, created_at, step_manifest, total_steps, git_commit, git_dirty, git_branch)
|
|
6294
|
+
VALUES ($1, $2, to_timestamp($3 / 1000000.0), $4, $5, $6, $7, $8)
|
|
6295
|
+
ON CONFLICT (workflow_slug, version_id) DO NOTHING
|
|
6296
|
+
`, [
|
|
6297
|
+
version.workflowSlug,
|
|
6298
|
+
version.versionId,
|
|
6299
|
+
version.createdAt,
|
|
6300
|
+
version.stepManifest,
|
|
6301
|
+
version.totalSteps,
|
|
6302
|
+
version.git?.commit,
|
|
6303
|
+
version.git?.dirty,
|
|
6304
|
+
version.git?.branch
|
|
6305
|
+
]);
|
|
6306
|
+
}
|
|
6307
|
+
async getWorkflowVersion(workflowSlug, versionId) {
|
|
6308
|
+
const result = await this.db.getPool().query(`
|
|
6309
|
+
SELECT * FROM ${this.db.getSchema()}.workflow_versions
|
|
6310
|
+
WHERE workflow_slug = $1 AND version_id = $2
|
|
6311
|
+
`, [workflowSlug, versionId]);
|
|
6312
|
+
if (result.rows.length === 0)
|
|
6313
|
+
return null;
|
|
6314
|
+
const row = result.rows[0];
|
|
6315
|
+
return {
|
|
6316
|
+
workflowSlug: row.workflow_slug,
|
|
6317
|
+
versionId: row.version_id,
|
|
6318
|
+
createdAt: Math.floor(new Date(row.created_at).getTime() * 1000),
|
|
6319
|
+
stepManifest: row.step_manifest,
|
|
6320
|
+
totalSteps: row.total_steps,
|
|
6321
|
+
git: row.git_commit ? {
|
|
6322
|
+
commit: row.git_commit,
|
|
6323
|
+
dirty: row.git_dirty,
|
|
6324
|
+
branch: row.git_branch
|
|
6325
|
+
} : undefined
|
|
6326
|
+
};
|
|
6327
|
+
}
|
|
6328
|
+
async getCurrentWorkflowVersion(workflowSlug) {
|
|
6329
|
+
const result = await this.db.getPool().query(`
|
|
6330
|
+
SELECT * FROM ${this.db.getSchema()}.workflow_versions
|
|
6331
|
+
WHERE workflow_slug = $1
|
|
6332
|
+
ORDER BY created_at DESC
|
|
6333
|
+
LIMIT 1
|
|
6334
|
+
`, [workflowSlug]);
|
|
6335
|
+
if (result.rows.length === 0)
|
|
6336
|
+
return null;
|
|
6337
|
+
const row = result.rows[0];
|
|
6338
|
+
return {
|
|
6339
|
+
workflowSlug: row.workflow_slug,
|
|
6340
|
+
versionId: row.version_id,
|
|
6341
|
+
createdAt: Math.floor(new Date(row.created_at).getTime() * 1000),
|
|
6342
|
+
stepManifest: row.step_manifest,
|
|
6343
|
+
totalSteps: row.total_steps,
|
|
6344
|
+
git: row.git_commit ? {
|
|
6345
|
+
commit: row.git_commit,
|
|
6346
|
+
dirty: row.git_dirty,
|
|
6347
|
+
branch: row.git_branch
|
|
6348
|
+
} : undefined
|
|
6349
|
+
};
|
|
6350
|
+
}
|
|
6351
|
+
async listWorkflowVersions(workflowSlug, options) {
|
|
6352
|
+
const limit = options?.limit ?? 100;
|
|
6353
|
+
const result = await this.db.getPool().query(`
|
|
6354
|
+
SELECT * FROM ${this.db.getSchema()}.workflow_versions
|
|
6355
|
+
WHERE workflow_slug = $1
|
|
6356
|
+
ORDER BY created_at DESC
|
|
6357
|
+
LIMIT $2
|
|
6358
|
+
`, [workflowSlug, limit]);
|
|
6359
|
+
return result.rows.map((row) => ({
|
|
6360
|
+
workflowSlug: row.workflow_slug,
|
|
6361
|
+
versionId: row.version_id,
|
|
6362
|
+
createdAt: Math.floor(new Date(row.created_at).getTime() * 1000),
|
|
6363
|
+
stepManifest: row.step_manifest,
|
|
6364
|
+
totalSteps: row.total_steps,
|
|
6365
|
+
git: row.git_commit ? {
|
|
6366
|
+
commit: row.git_commit,
|
|
6367
|
+
dirty: row.git_dirty,
|
|
6368
|
+
branch: row.git_branch
|
|
6369
|
+
} : undefined
|
|
6370
|
+
}));
|
|
6180
6371
|
}
|
|
6181
6372
|
async close() {
|
|
6182
|
-
await this.
|
|
6373
|
+
await this.db.getPool().end();
|
|
6183
6374
|
}
|
|
6184
6375
|
async loadEventsForAnalytics(options) {
|
|
6185
6376
|
const now = getMicrosecondTimestamp();
|
|
@@ -6187,7 +6378,7 @@ class PostgresBackend extends Backend {
|
|
|
6187
6378
|
const endUs = options?.endUs ?? now;
|
|
6188
6379
|
let stepQuery = `
|
|
6189
6380
|
SELECT event_data
|
|
6190
|
-
FROM step_events
|
|
6381
|
+
FROM ${this.db.getSchema()}.step_events
|
|
6191
6382
|
WHERE timestamp_us >= $1 AND timestamp_us <= $2
|
|
6192
6383
|
`;
|
|
6193
6384
|
const stepParams = [startUs, endUs];
|
|
@@ -6211,7 +6402,7 @@ class PostgresBackend extends Backend {
|
|
|
6211
6402
|
stepQuery += ` ORDER BY timestamp_us ASC`;
|
|
6212
6403
|
let workflowQuery = `
|
|
6213
6404
|
SELECT event_data
|
|
6214
|
-
FROM workflow_events
|
|
6405
|
+
FROM ${this.db.getSchema()}.workflow_events
|
|
6215
6406
|
WHERE timestamp_us >= $1 AND timestamp_us <= $2
|
|
6216
6407
|
`;
|
|
6217
6408
|
const workflowParams = [startUs, endUs];
|
|
@@ -6229,8 +6420,8 @@ class PostgresBackend extends Backend {
|
|
|
6229
6420
|
}
|
|
6230
6421
|
workflowQuery += ` ORDER BY timestamp_us ASC`;
|
|
6231
6422
|
const [stepResult, workflowResult] = await Promise.all([
|
|
6232
|
-
this.
|
|
6233
|
-
options?.stepId ? Promise.resolve({ rows: [] }) : this.
|
|
6423
|
+
this.db.getPool().query(stepQuery, stepParams),
|
|
6424
|
+
options?.stepId ? Promise.resolve({ rows: [] }) : this.db.getPool().query(workflowQuery, workflowParams)
|
|
6234
6425
|
]);
|
|
6235
6426
|
const stepEvents = stepResult.rows.map((row) => row.event_data);
|
|
6236
6427
|
const workflowEvents = workflowResult.rows.map((row) => row.event_data);
|
|
@@ -6264,10 +6455,10 @@ class PostgresBackend extends Backend {
|
|
|
6264
6455
|
const whereClause = conditions.length > 0 ? `WHERE ${conditions.join(" AND ")}` : "";
|
|
6265
6456
|
const countQuery = `
|
|
6266
6457
|
SELECT COUNT(DISTINCT CONCAT(error_name_hash, ':', error_message_hash, ':', ${stackHashColumn})) as total
|
|
6267
|
-
FROM step_events
|
|
6458
|
+
FROM ${this.db.getSchema()}.step_events
|
|
6268
6459
|
${whereClause}
|
|
6269
6460
|
`;
|
|
6270
|
-
const countResult = await this.
|
|
6461
|
+
const countResult = await this.db.getPool().query(countQuery, params);
|
|
6271
6462
|
const total = parseInt(countResult.rows[0]?.total || "0", 10);
|
|
6272
6463
|
const query = `
|
|
6273
6464
|
SELECT
|
|
@@ -6279,7 +6470,7 @@ class PostgresBackend extends Backend {
|
|
|
6279
6470
|
COUNT(DISTINCT run_id) as affected_runs,
|
|
6280
6471
|
MIN(timestamp_us) as first_seen,
|
|
6281
6472
|
MAX(timestamp_us) as last_seen
|
|
6282
|
-
FROM step_events
|
|
6473
|
+
FROM ${this.db.getSchema()}.step_events
|
|
6283
6474
|
${whereClause}
|
|
6284
6475
|
GROUP BY
|
|
6285
6476
|
error_name_hash,
|
|
@@ -6288,7 +6479,7 @@ class PostgresBackend extends Backend {
|
|
|
6288
6479
|
ORDER BY count DESC
|
|
6289
6480
|
LIMIT $${paramIndex} OFFSET $${paramIndex + 1}
|
|
6290
6481
|
`;
|
|
6291
|
-
const result = await this.
|
|
6482
|
+
const result = await this.db.getPool().query(query, [...params, limit, offset]);
|
|
6292
6483
|
const errors = result.rows.map((row) => ({
|
|
6293
6484
|
fingerprint: row.fingerprint,
|
|
6294
6485
|
errorMessage: row.error_message || "",
|
|
@@ -6336,10 +6527,10 @@ class PostgresBackend extends Backend {
|
|
|
6336
6527
|
COUNT(DISTINCT run_id) as affected_runs,
|
|
6337
6528
|
MIN(timestamp_us) as first_seen,
|
|
6338
6529
|
MAX(timestamp_us) as last_seen
|
|
6339
|
-
FROM step_events
|
|
6530
|
+
FROM ${this.db.getSchema()}.step_events
|
|
6340
6531
|
${whereClause}
|
|
6341
6532
|
`;
|
|
6342
|
-
const statsResult = await this.
|
|
6533
|
+
const statsResult = await this.db.getPool().query(statsQuery, params.slice(0, paramIndex - 1));
|
|
6343
6534
|
if (statsResult.rows.length === 0) {
|
|
6344
6535
|
return {
|
|
6345
6536
|
fingerprint,
|
|
@@ -6362,12 +6553,12 @@ class PostgresBackend extends Backend {
|
|
|
6362
6553
|
step_id,
|
|
6363
6554
|
(event_data->>'attemptNumber')::int as attempt_number,
|
|
6364
6555
|
timestamp_us
|
|
6365
|
-
FROM step_events
|
|
6556
|
+
FROM ${this.db.getSchema()}.step_events
|
|
6366
6557
|
${whereClause}
|
|
6367
6558
|
ORDER BY timestamp_us DESC
|
|
6368
6559
|
LIMIT $${paramIndex} OFFSET $${paramIndex + 1}
|
|
6369
6560
|
`;
|
|
6370
|
-
const occurrencesResult = await this.
|
|
6561
|
+
const occurrencesResult = await this.db.getPool().query(occurrencesQuery, [
|
|
6371
6562
|
...params.slice(0, paramIndex - 1),
|
|
6372
6563
|
limit,
|
|
6373
6564
|
offset
|
|
@@ -6580,4 +6771,4 @@ export {
|
|
|
6580
6771
|
PostgresBackend
|
|
6581
6772
|
};
|
|
6582
6773
|
|
|
6583
|
-
//# debugId=
|
|
6774
|
+
//# debugId=9ABA79C867582FE764756E2164756E21
|