@pgflow/core 0.0.0-test-snapshot-releases-8d5d9bc1-20250922101013 → 0.0.0-update-supabase-868977e5-20251119071021
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +177 -73
- package/dist/ATLAS.md +32 -0
- package/dist/CHANGELOG.md +796 -0
- package/dist/PgflowSqlClient.d.ts +17 -0
- package/dist/PgflowSqlClient.d.ts.map +1 -0
- package/dist/PgflowSqlClient.js +70 -0
- package/dist/README.md +497 -0
- package/dist/database-types.d.ts +1007 -0
- package/dist/database-types.d.ts.map +1 -0
- package/dist/database-types.js +8 -0
- package/dist/index.d.ts +4 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +2 -0
- package/dist/package.json +32 -0
- package/dist/supabase/migrations/20250429164909_pgflow_initial.sql +579 -0
- package/dist/supabase/migrations/20250517072017_pgflow_fix_poll_for_tasks_to_use_separate_statement_for_polling.sql +101 -0
- package/dist/supabase/migrations/20250609105135_pgflow_add_start_tasks_and_started_status.sql +371 -0
- package/dist/supabase/migrations/20250610180554_pgflow_add_set_vt_batch_and_use_it_in_start_tasks.sql +127 -0
- package/dist/supabase/migrations/20250614124241_pgflow_add_realtime.sql +501 -0
- package/dist/supabase/migrations/20250619195327_pgflow_fix_fail_task_missing_realtime_event.sql +185 -0
- package/dist/supabase/migrations/20250627090700_pgflow_fix_function_search_paths.sql +6 -0
- package/dist/supabase/migrations/20250707210212_pgflow_add_opt_start_delay.sql +103 -0
- package/dist/supabase/migrations/20250719205006_pgflow_worker_deprecation.sql +2 -0
- package/dist/supabase/migrations/20251006073122_pgflow_add_map_step_type.sql +1244 -0
- package/dist/supabase/migrations/20251103222045_pgflow_fix_broadcast_order_and_timestamp_handling.sql +622 -0
- package/dist/supabase/migrations/20251104080523_pgflow_upgrade_pgmq_1_5_1.sql +93 -0
- package/dist/tsconfig.lib.tsbuildinfo +1 -0
- package/dist/types.d.ts +93 -0
- package/dist/types.d.ts.map +1 -0
- package/dist/types.js +1 -0
- package/package.json +4 -4
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
-- Migration tested 2025-11-02:
|
|
2
|
+
-- Successfully verified that this migration fails on pgmq 1.4.4 (Supabase CLI < 2.34.3)
|
|
3
|
+
-- with clear error message guiding users to upgrade pgmq to 1.5.0+
|
|
4
|
+
--
|
|
5
|
+
-- Compatibility check: Ensure pgmq.message_record has headers column (pgmq 1.5.0+)
|
|
6
|
+
DO $$
|
|
7
|
+
DECLARE
|
|
8
|
+
has_headers BOOLEAN;
|
|
9
|
+
BEGIN
|
|
10
|
+
SELECT EXISTS (
|
|
11
|
+
SELECT 1
|
|
12
|
+
FROM pg_type t
|
|
13
|
+
JOIN pg_namespace n ON t.typnamespace = n.oid
|
|
14
|
+
JOIN pg_attribute a ON a.attrelid = t.typrelid
|
|
15
|
+
WHERE n.nspname = 'pgmq'
|
|
16
|
+
AND t.typname = 'message_record'
|
|
17
|
+
AND a.attname = 'headers'
|
|
18
|
+
AND a.attnum > 0
|
|
19
|
+
AND NOT a.attisdropped
|
|
20
|
+
) INTO has_headers;
|
|
21
|
+
|
|
22
|
+
IF NOT has_headers THEN
|
|
23
|
+
RAISE EXCEPTION E'INCOMPATIBLE PGMQ VERSION DETECTED\n\n'
|
|
24
|
+
'This migration is part of pgflow 0.8.0+, which requires pgmq 1.5.0 or higher.\n'
|
|
25
|
+
'The pgmq.message_record type is missing the "headers" column, which indicates you are running pgmq < 1.5.0.\n\n'
|
|
26
|
+
'pgflow 0.8.0+ is NOT compatible with pgmq versions below 1.5.0.\n\n'
|
|
27
|
+
'Action required:\n'
|
|
28
|
+
' - If using Supabase: Ensure you are running a recent version that includes pgmq 1.5.0+\n'
|
|
29
|
+
' - If self-hosting: Upgrade pgmq to version 1.5.0 or higher before running this migration\n\n'
|
|
30
|
+
'Migration aborted to prevent runtime failures.';
|
|
31
|
+
END IF;
|
|
32
|
+
END $$;
|
|
33
|
+
|
|
34
|
+
-- Modify "set_vt_batch" function
|
|
35
|
+
-- Must drop first because we're changing the return type from SETOF to TABLE
|
|
36
|
+
DROP FUNCTION IF EXISTS "pgflow"."set_vt_batch"(text, bigint[], integer[]);
|
|
37
|
+
CREATE FUNCTION "pgflow"."set_vt_batch" (
|
|
38
|
+
"queue_name" text,
|
|
39
|
+
"msg_ids" bigint[],
|
|
40
|
+
"vt_offsets" integer[]
|
|
41
|
+
)
|
|
42
|
+
RETURNS TABLE(
|
|
43
|
+
msg_id bigint,
|
|
44
|
+
read_ct integer,
|
|
45
|
+
enqueued_at timestamp with time zone,
|
|
46
|
+
vt timestamp with time zone,
|
|
47
|
+
message jsonb,
|
|
48
|
+
headers jsonb
|
|
49
|
+
)
|
|
50
|
+
LANGUAGE plpgsql AS $$
|
|
51
|
+
DECLARE
|
|
52
|
+
qtable TEXT := pgmq.format_table_name(queue_name, 'q');
|
|
53
|
+
sql TEXT;
|
|
54
|
+
BEGIN
|
|
55
|
+
/* ---------- safety checks ---------------------------------------------------- */
|
|
56
|
+
IF msg_ids IS NULL OR vt_offsets IS NULL OR array_length(msg_ids, 1) = 0 THEN
|
|
57
|
+
RETURN; -- nothing to do, return empty set
|
|
58
|
+
END IF;
|
|
59
|
+
|
|
60
|
+
IF array_length(msg_ids, 1) IS DISTINCT FROM array_length(vt_offsets, 1) THEN
|
|
61
|
+
RAISE EXCEPTION
|
|
62
|
+
'msg_ids length (%) must equal vt_offsets length (%)',
|
|
63
|
+
array_length(msg_ids, 1), array_length(vt_offsets, 1);
|
|
64
|
+
END IF;
|
|
65
|
+
|
|
66
|
+
/* ---------- dynamic statement ------------------------------------------------ */
|
|
67
|
+
/* One UPDATE joins with the unnested arrays */
|
|
68
|
+
sql := format(
|
|
69
|
+
$FMT$
|
|
70
|
+
WITH input (msg_id, vt_offset) AS (
|
|
71
|
+
SELECT unnest($1)::bigint
|
|
72
|
+
, unnest($2)::int
|
|
73
|
+
)
|
|
74
|
+
UPDATE pgmq.%I q
|
|
75
|
+
SET vt = clock_timestamp() + make_interval(secs => input.vt_offset),
|
|
76
|
+
read_ct = read_ct -- no change, but keeps RETURNING list aligned
|
|
77
|
+
FROM input
|
|
78
|
+
WHERE q.msg_id = input.msg_id
|
|
79
|
+
RETURNING q.msg_id,
|
|
80
|
+
q.read_ct,
|
|
81
|
+
q.enqueued_at,
|
|
82
|
+
q.vt,
|
|
83
|
+
q.message,
|
|
84
|
+
q.headers
|
|
85
|
+
$FMT$,
|
|
86
|
+
qtable
|
|
87
|
+
);
|
|
88
|
+
|
|
89
|
+
RETURN QUERY EXECUTE sql USING msg_ids, vt_offsets;
|
|
90
|
+
END;
|
|
91
|
+
$$;
|
|
92
|
+
-- Drop "read_with_poll" function
|
|
93
|
+
DROP FUNCTION "pgflow"."read_with_poll";
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":"5.8.3"}
|
package/dist/types.d.ts
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
import type { ExtractFlowSteps, StepInput, Simplify, AnyFlow, ExtractFlowInput, Json } from '@pgflow/dsl';
|
|
2
|
+
import type { Database } from './database-types.js';
|
|
3
|
+
export type { Json };
|
|
4
|
+
/**
|
|
5
|
+
* Record representing a task from pgflow.start_tasks
|
|
6
|
+
*
|
|
7
|
+
* Same as pgflow.step_task_record type, but with not-null fields and type argument for payload.
|
|
8
|
+
* The input type is automatically inferred based on the step_slug using a discriminated union.
|
|
9
|
+
* This ensures that each step only receives inputs from its declared dependencies and the flow's run input.
|
|
10
|
+
*/
|
|
11
|
+
export type StepTaskRecord<TFlow extends AnyFlow> = {
|
|
12
|
+
[StepSlug in Extract<keyof ExtractFlowSteps<TFlow>, string>]: {
|
|
13
|
+
flow_slug: string;
|
|
14
|
+
run_id: string;
|
|
15
|
+
step_slug: StepSlug;
|
|
16
|
+
task_index: number;
|
|
17
|
+
input: Simplify<StepInput<TFlow, StepSlug>>;
|
|
18
|
+
msg_id: number;
|
|
19
|
+
};
|
|
20
|
+
}[Extract<keyof ExtractFlowSteps<TFlow>, string>];
|
|
21
|
+
/**
|
|
22
|
+
* Composite key that is enough to find a particular step task
|
|
23
|
+
* Contains only the minimum fields needed to identify a task
|
|
24
|
+
*/
|
|
25
|
+
export type StepTaskKey = Pick<StepTaskRecord<AnyFlow>, 'run_id' | 'step_slug' | 'task_index'>;
|
|
26
|
+
/**
|
|
27
|
+
* Record representing a message from queue polling
|
|
28
|
+
*/
|
|
29
|
+
export type MessageRecord = {
|
|
30
|
+
msg_id: number;
|
|
31
|
+
read_ct: number;
|
|
32
|
+
enqueued_at: string;
|
|
33
|
+
vt: string;
|
|
34
|
+
message: Json;
|
|
35
|
+
};
|
|
36
|
+
/**
|
|
37
|
+
* Interface for interacting with pgflow database functions
|
|
38
|
+
*/
|
|
39
|
+
export interface IPgflowClient<TFlow extends AnyFlow = AnyFlow> {
|
|
40
|
+
/**
|
|
41
|
+
* Start a flow with optional run_id
|
|
42
|
+
*/
|
|
43
|
+
startFlow<TFlow extends AnyFlow>(flow_slug: string, input: ExtractFlowInput<TFlow>, run_id?: string): Promise<RunRow>;
|
|
44
|
+
/**
|
|
45
|
+
* Reads messages from queue without starting tasks (phase 1 of two-phase approach)
|
|
46
|
+
* @param queueName - Name of the queue
|
|
47
|
+
* @param visibilityTimeout - Visibility timeout for messages
|
|
48
|
+
* @param batchSize - Number of messages to fetch
|
|
49
|
+
* @param maxPollSeconds - Maximum time to poll for messages
|
|
50
|
+
* @param pollIntervalMs - Poll interval in milliseconds
|
|
51
|
+
*/
|
|
52
|
+
readMessages(queueName: string, visibilityTimeout: number, batchSize: number, maxPollSeconds?: number, pollIntervalMs?: number): Promise<MessageRecord[]>;
|
|
53
|
+
/**
|
|
54
|
+
* Starts tasks for given message IDs (phase 2 of two-phase approach)
|
|
55
|
+
* @param flowSlug - The flow slug to start tasks from
|
|
56
|
+
* @param msgIds - Array of message IDs from readMessages
|
|
57
|
+
* @param workerId - ID of the worker starting the tasks
|
|
58
|
+
*/
|
|
59
|
+
startTasks(flowSlug: string, msgIds: number[], workerId: string): Promise<StepTaskRecord<TFlow>[]>;
|
|
60
|
+
/**
|
|
61
|
+
* Mark a task as completed with output
|
|
62
|
+
*/
|
|
63
|
+
completeTask(stepTask: StepTaskKey, output?: Json): Promise<void>;
|
|
64
|
+
/**
|
|
65
|
+
* Mark a task as failed with error
|
|
66
|
+
*/
|
|
67
|
+
failTask(stepTask: StepTaskKey, error: unknown): Promise<void>;
|
|
68
|
+
}
|
|
69
|
+
/**
|
|
70
|
+
* Record representing a flow from pgflow.flows
|
|
71
|
+
*/
|
|
72
|
+
export type FlowRow = Database['pgflow']['Tables']['flows']['Row'];
|
|
73
|
+
/**
|
|
74
|
+
* Record representing a step from pgflow.steps
|
|
75
|
+
*/
|
|
76
|
+
export type StepRow = Database['pgflow']['Tables']['steps']['Row'];
|
|
77
|
+
/**
|
|
78
|
+
* Record representing a step from pgflow.deps
|
|
79
|
+
*/
|
|
80
|
+
export type DepRow = Database['pgflow']['Tables']['deps']['Row'];
|
|
81
|
+
/**
|
|
82
|
+
* Record representing a step from pgflow.queues
|
|
83
|
+
*/
|
|
84
|
+
export type RunRow = Database['pgflow']['Tables']['runs']['Row'];
|
|
85
|
+
/**
|
|
86
|
+
* Record representing a step from pgflow.step_states
|
|
87
|
+
*/
|
|
88
|
+
export type StepStateRow = Database['pgflow']['Tables']['step_states']['Row'];
|
|
89
|
+
/**
|
|
90
|
+
* Record representing a step from pgflow.step_tasks
|
|
91
|
+
*/
|
|
92
|
+
export type StepTaskRow = Database['pgflow']['Tables']['step_tasks']['Row'];
|
|
93
|
+
//# sourceMappingURL=types.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"types.d.ts","sourceRoot":"","sources":["../src/types.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EACV,gBAAgB,EAChB,SAAS,EACT,QAAQ,EACR,OAAO,EACP,gBAAgB,EAChB,IAAI,EACL,MAAM,aAAa,CAAC;AACrB,OAAO,KAAK,EAAE,QAAQ,EAAE,MAAM,qBAAqB,CAAC;AAEpD,YAAY,EAAE,IAAI,EAAE,CAAC;AAErB;;;;;;GAMG;AACH,MAAM,MAAM,cAAc,CAAC,KAAK,SAAS,OAAO,IAAI;KACjD,QAAQ,IAAI,OAAO,CAAC,MAAM,gBAAgB,CAAC,KAAK,CAAC,EAAE,MAAM,CAAC,GAAG;QAC5D,SAAS,EAAE,MAAM,CAAC;QAClB,MAAM,EAAE,MAAM,CAAC;QACf,SAAS,EAAE,QAAQ,CAAC;QACpB,UAAU,EAAE,MAAM,CAAC;QACnB,KAAK,EAAE,QAAQ,CAAC,SAAS,CAAC,KAAK,EAAE,QAAQ,CAAC,CAAC,CAAC;QAC5C,MAAM,EAAE,MAAM,CAAC;KAChB;CACF,CAAC,OAAO,CAAC,MAAM,gBAAgB,CAAC,KAAK,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC;AAElD;;;GAGG;AACH,MAAM,MAAM,WAAW,GAAG,IAAI,CAAC,cAAc,CAAC,OAAO,CAAC,EAAE,QAAQ,GAAG,WAAW,GAAG,YAAY,CAAC,CAAC;AAI/F;;GAEG;AACH,MAAM,MAAM,aAAa,GAAG;IAC1B,MAAM,EAAE,MAAM,CAAC;IACf,OAAO,EAAE,MAAM,CAAC;IAChB,WAAW,EAAE,MAAM,CAAC;IACpB,EAAE,EAAE,MAAM,CAAC;IACX,OAAO,EAAE,IAAI,CAAC;CACf,CAAC;AAEF;;GAEG;AACH,MAAM,WAAW,aAAa,CAAC,KAAK,SAAS,OAAO,GAAG,OAAO;IAC5D;;OAEG;IACH,SAAS,CAAC,KAAK,SAAS,OAAO,EAC7B,SAAS,EAAE,MAAM,EACjB,KAAK,EAAE,gBAAgB,CAAC,KAAK,CAAC,EAC9B,MAAM,CAAC,EAAE,MAAM,GACd,OAAO,CAAC,MAAM,CAAC,CAAC;IAEnB;;;;;;;OAOG;IACH,YAAY,CACV,SAAS,EAAE,MAAM,EACjB,iBAAiB,EAAE,MAAM,EACzB,SAAS,EAAE,MAAM,EACjB,cAAc,CAAC,EAAE,MAAM,EACvB,cAAc,CAAC,EAAE,MAAM,GACtB,OAAO,CAAC,aAAa,EAAE,CAAC,CAAC;IAE5B;;;;;OAKG;IACH,UAAU,CACR,QAAQ,EAAE,MAAM,EAChB,MAAM,EAAE,MAAM,EAAE,EAChB,QAAQ,EAAE,MAAM,GACf,OAAO,CAAC,cAAc,CAAC,KAAK,CAAC,EAAE,CAAC,CAAC;IAEpC;;OAEG;IACH,YAAY,CAAC,QAAQ,EAAE,WAAW,EAAE,MAAM,CAAC,EAAE,IAAI,GAAG,OAAO,CAAC,IAAI,CAAC,CAAC;IAElE;;OAEG;IACH,QAAQ,CAAC,QAAQ,EAAE,WAAW,EAAE,KAAK,EAAE,OAAO,GAAG,OAAO,CAAC,IAAI,CAAC,CAAC;CAChE;AAED;;GAEG;AACH,MAAM,MAAM,OAAO,GAAG,QAAQ,CAAC,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,OAAO,CAAC,CAAC,KAAK,CAAC,CAAC;AAEnE;;GAEG;AACH,MAAM,MAAM,OAAO,GAAG,QAAQ,CAAC,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,OAAO,CAAC,CAAC,KAAK,CAAC,CAAC;AAEnE;;GAEG;AACH,MAAM,MAAM,MAAM,GAAG,QAAQ,CAAC,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,KAAK,CAAC,CAAC;AAEjE;;GAEG;AACH,MAAM,MAAM,MAAM,GAAG,QAAQ,CAAC,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,KAAK,CAAC,CAAC;AAEjE;;GAEG;AACH,MAAM,MAAM,YAAY,GAAG,QAAQ,CAAC,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,aAAa,CAAC,CAAC,KAAK,CAAC,CAAC;AAE9E;;GAEG;AACH,MAAM,MAAM,WAAW,GAAG,QAAQ,CAAC,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,YAAY,CAAC,CAAC,KAAK,CAAC,CAAC"}
|
package/dist/types.js
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
package/package.json
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@pgflow/core",
|
|
3
|
-
"version": "0.0.0-
|
|
4
|
-
"license": "
|
|
3
|
+
"version": "0.0.0-update-supabase-868977e5-20251119071021",
|
|
4
|
+
"license": "Apache-2.0",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"main": "./dist/index.js",
|
|
7
7
|
"module": "./dist/index.js",
|
|
@@ -20,11 +20,11 @@
|
|
|
20
20
|
},
|
|
21
21
|
"devDependencies": {
|
|
22
22
|
"@types/node": "^22.14.1",
|
|
23
|
-
"supabase": "2.
|
|
23
|
+
"supabase": "^2.34.3"
|
|
24
24
|
},
|
|
25
25
|
"dependencies": {
|
|
26
26
|
"postgres": "^3.4.5",
|
|
27
|
-
"@pgflow/dsl": "0.0.0-
|
|
27
|
+
"@pgflow/dsl": "0.0.0-update-supabase-868977e5-20251119071021"
|
|
28
28
|
},
|
|
29
29
|
"publishConfig": {
|
|
30
30
|
"access": "public"
|