@pgflow/core 0.0.0-array-map-steps-cd94242a-20251008042921 → 0.0.0-condition-4354fcb6-20260108134756
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +24 -7
- package/dist/CHANGELOG.md +187 -13
- package/dist/PgflowSqlClient.js +1 -1
- package/dist/README.md +24 -7
- package/dist/database-types.d.ts +392 -71
- package/dist/database-types.d.ts.map +1 -1
- package/dist/package.json +8 -4
- package/dist/supabase/migrations/20250429164909_pgflow_initial.sql +2 -2
- package/dist/supabase/migrations/20251006073122_pgflow_add_map_step_type.sql +24 -7
- package/dist/supabase/migrations/20251103222045_pgflow_fix_broadcast_order_and_timestamp_handling.sql +622 -0
- package/dist/supabase/migrations/20251104080523_pgflow_upgrade_pgmq_1_5_1.sql +93 -0
- package/dist/supabase/migrations/20251130000000_pgflow_auto_compilation.sql +268 -0
- package/dist/supabase/migrations/20251209074533_pgflow_worker_management.sql +273 -0
- package/dist/supabase/migrations/20251212100113_pgflow_allow_data_loss_parameter.sql +54 -0
- package/dist/supabase/migrations/20251225163110_pgflow_add_flow_input_column.sql +185 -0
- package/dist/supabase/migrations/20260103145141_pgflow_step_output_storage.sql +909 -0
- package/dist/supabase/migrations/20260108131350_pgflow_step_conditions.sql +1515 -0
- package/dist/types.d.ts +7 -4
- package/dist/types.d.ts.map +1 -1
- package/package.json +9 -5
- package/dist/ATLAS.md +0 -32
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
-- Migration tested 2025-11-02:
|
|
2
|
+
-- Successfully verified that this migration fails on pgmq 1.4.4 (Supabase CLI < 2.50.3)
|
|
3
|
+
-- with clear error message guiding users to upgrade pgmq to 1.5.0+
|
|
4
|
+
--
|
|
5
|
+
-- Compatibility check: Ensure pgmq.message_record has headers column (pgmq 1.5.0+)
|
|
6
|
+
DO $$
|
|
7
|
+
DECLARE
|
|
8
|
+
has_headers BOOLEAN;
|
|
9
|
+
BEGIN
|
|
10
|
+
SELECT EXISTS (
|
|
11
|
+
SELECT 1
|
|
12
|
+
FROM pg_type t
|
|
13
|
+
JOIN pg_namespace n ON t.typnamespace = n.oid
|
|
14
|
+
JOIN pg_attribute a ON a.attrelid = t.typrelid
|
|
15
|
+
WHERE n.nspname = 'pgmq'
|
|
16
|
+
AND t.typname = 'message_record'
|
|
17
|
+
AND a.attname = 'headers'
|
|
18
|
+
AND a.attnum > 0
|
|
19
|
+
AND NOT a.attisdropped
|
|
20
|
+
) INTO has_headers;
|
|
21
|
+
|
|
22
|
+
IF NOT has_headers THEN
|
|
23
|
+
RAISE EXCEPTION E'INCOMPATIBLE PGMQ VERSION DETECTED\n\n'
|
|
24
|
+
'This migration is part of pgflow 0.8.0+, which requires pgmq 1.5.0 or higher.\n'
|
|
25
|
+
'The pgmq.message_record type is missing the "headers" column, which indicates you are running pgmq < 1.5.0.\n\n'
|
|
26
|
+
'pgflow 0.8.0+ is NOT compatible with pgmq versions below 1.5.0.\n\n'
|
|
27
|
+
'Action required:\n'
|
|
28
|
+
' - If using Supabase: Ensure you are running a recent version that includes pgmq 1.5.0+\n'
|
|
29
|
+
' - If self-hosting: Upgrade pgmq to version 1.5.0 or higher before running this migration\n\n'
|
|
30
|
+
'Migration aborted to prevent runtime failures.';
|
|
31
|
+
END IF;
|
|
32
|
+
END $$;
|
|
33
|
+
|
|
34
|
+
-- Modify "set_vt_batch" function
|
|
35
|
+
-- Must drop first because we're changing the return type from SETOF to TABLE
|
|
36
|
+
DROP FUNCTION IF EXISTS "pgflow"."set_vt_batch"(text, bigint[], integer[]);
|
|
37
|
+
CREATE FUNCTION "pgflow"."set_vt_batch" (
|
|
38
|
+
"queue_name" text,
|
|
39
|
+
"msg_ids" bigint[],
|
|
40
|
+
"vt_offsets" integer[]
|
|
41
|
+
)
|
|
42
|
+
RETURNS TABLE(
|
|
43
|
+
msg_id bigint,
|
|
44
|
+
read_ct integer,
|
|
45
|
+
enqueued_at timestamp with time zone,
|
|
46
|
+
vt timestamp with time zone,
|
|
47
|
+
message jsonb,
|
|
48
|
+
headers jsonb
|
|
49
|
+
)
|
|
50
|
+
LANGUAGE plpgsql AS $$
|
|
51
|
+
DECLARE
|
|
52
|
+
qtable TEXT := pgmq.format_table_name(queue_name, 'q');
|
|
53
|
+
sql TEXT;
|
|
54
|
+
BEGIN
|
|
55
|
+
/* ---------- safety checks ---------------------------------------------------- */
|
|
56
|
+
IF msg_ids IS NULL OR vt_offsets IS NULL OR array_length(msg_ids, 1) = 0 THEN
|
|
57
|
+
RETURN; -- nothing to do, return empty set
|
|
58
|
+
END IF;
|
|
59
|
+
|
|
60
|
+
IF array_length(msg_ids, 1) IS DISTINCT FROM array_length(vt_offsets, 1) THEN
|
|
61
|
+
RAISE EXCEPTION
|
|
62
|
+
'msg_ids length (%) must equal vt_offsets length (%)',
|
|
63
|
+
array_length(msg_ids, 1), array_length(vt_offsets, 1);
|
|
64
|
+
END IF;
|
|
65
|
+
|
|
66
|
+
/* ---------- dynamic statement ------------------------------------------------ */
|
|
67
|
+
/* One UPDATE joins with the unnested arrays */
|
|
68
|
+
sql := format(
|
|
69
|
+
$FMT$
|
|
70
|
+
WITH input (msg_id, vt_offset) AS (
|
|
71
|
+
SELECT unnest($1)::bigint
|
|
72
|
+
, unnest($2)::int
|
|
73
|
+
)
|
|
74
|
+
UPDATE pgmq.%I q
|
|
75
|
+
SET vt = clock_timestamp() + make_interval(secs => input.vt_offset),
|
|
76
|
+
read_ct = read_ct -- no change, but keeps RETURNING list aligned
|
|
77
|
+
FROM input
|
|
78
|
+
WHERE q.msg_id = input.msg_id
|
|
79
|
+
RETURNING q.msg_id,
|
|
80
|
+
q.read_ct,
|
|
81
|
+
q.enqueued_at,
|
|
82
|
+
q.vt,
|
|
83
|
+
q.message,
|
|
84
|
+
q.headers
|
|
85
|
+
$FMT$,
|
|
86
|
+
qtable
|
|
87
|
+
);
|
|
88
|
+
|
|
89
|
+
RETURN QUERY EXECUTE sql USING msg_ids, vt_offsets;
|
|
90
|
+
END;
|
|
91
|
+
$$;
|
|
92
|
+
-- Drop "read_with_poll" function
|
|
93
|
+
DROP FUNCTION "pgflow"."read_with_poll";
|
|
@@ -0,0 +1,268 @@
|
|
|
1
|
+
-- Modify "create_flow" function
|
|
2
|
+
CREATE OR REPLACE FUNCTION "pgflow"."create_flow" ("flow_slug" text, "max_attempts" integer DEFAULT NULL::integer, "base_delay" integer DEFAULT NULL::integer, "timeout" integer DEFAULT NULL::integer) RETURNS "pgflow"."flows" LANGUAGE sql SET "search_path" = '' AS $$
|
|
3
|
+
WITH
|
|
4
|
+
defaults AS (
|
|
5
|
+
SELECT 3 AS def_max_attempts, 5 AS def_base_delay, 60 AS def_timeout
|
|
6
|
+
),
|
|
7
|
+
flow_upsert AS (
|
|
8
|
+
INSERT INTO pgflow.flows (flow_slug, opt_max_attempts, opt_base_delay, opt_timeout)
|
|
9
|
+
SELECT
|
|
10
|
+
flow_slug,
|
|
11
|
+
COALESCE(max_attempts, defaults.def_max_attempts),
|
|
12
|
+
COALESCE(base_delay, defaults.def_base_delay),
|
|
13
|
+
COALESCE(timeout, defaults.def_timeout)
|
|
14
|
+
FROM defaults
|
|
15
|
+
ON CONFLICT (flow_slug) DO UPDATE
|
|
16
|
+
SET flow_slug = pgflow.flows.flow_slug -- Dummy update
|
|
17
|
+
RETURNING *
|
|
18
|
+
),
|
|
19
|
+
ensure_queue AS (
|
|
20
|
+
SELECT pgmq.create(flow_slug)
|
|
21
|
+
WHERE NOT EXISTS (
|
|
22
|
+
SELECT 1 FROM pgmq.list_queues() WHERE queue_name = flow_slug
|
|
23
|
+
)
|
|
24
|
+
)
|
|
25
|
+
SELECT f.*
|
|
26
|
+
FROM flow_upsert f
|
|
27
|
+
LEFT JOIN (SELECT 1 FROM ensure_queue) _dummy ON true; -- Left join ensures flow is returned
|
|
28
|
+
$$;
|
|
29
|
+
-- Create "_compare_flow_shapes" function
|
|
30
|
+
CREATE FUNCTION "pgflow"."_compare_flow_shapes" ("p_local" jsonb, "p_db" jsonb) RETURNS text[] LANGUAGE plpgsql STABLE SET "search_path" = '' AS $BODY$
|
|
31
|
+
DECLARE
|
|
32
|
+
v_differences text[] := '{}';
|
|
33
|
+
v_local_steps jsonb;
|
|
34
|
+
v_db_steps jsonb;
|
|
35
|
+
v_local_count int;
|
|
36
|
+
v_db_count int;
|
|
37
|
+
v_max_count int;
|
|
38
|
+
v_idx int;
|
|
39
|
+
v_local_step jsonb;
|
|
40
|
+
v_db_step jsonb;
|
|
41
|
+
v_local_deps text;
|
|
42
|
+
v_db_deps text;
|
|
43
|
+
BEGIN
|
|
44
|
+
v_local_steps := p_local->'steps';
|
|
45
|
+
v_db_steps := p_db->'steps';
|
|
46
|
+
v_local_count := jsonb_array_length(COALESCE(v_local_steps, '[]'::jsonb));
|
|
47
|
+
v_db_count := jsonb_array_length(COALESCE(v_db_steps, '[]'::jsonb));
|
|
48
|
+
|
|
49
|
+
-- Compare step counts
|
|
50
|
+
IF v_local_count != v_db_count THEN
|
|
51
|
+
v_differences := array_append(
|
|
52
|
+
v_differences,
|
|
53
|
+
format('Step count differs: %s vs %s', v_local_count, v_db_count)
|
|
54
|
+
);
|
|
55
|
+
END IF;
|
|
56
|
+
|
|
57
|
+
-- Compare steps by index
|
|
58
|
+
v_max_count := GREATEST(v_local_count, v_db_count);
|
|
59
|
+
|
|
60
|
+
FOR v_idx IN 0..(v_max_count - 1) LOOP
|
|
61
|
+
v_local_step := v_local_steps->v_idx;
|
|
62
|
+
v_db_step := v_db_steps->v_idx;
|
|
63
|
+
|
|
64
|
+
IF v_local_step IS NULL THEN
|
|
65
|
+
v_differences := array_append(
|
|
66
|
+
v_differences,
|
|
67
|
+
format(
|
|
68
|
+
$$Step at index %s: missing in first shape (second has '%s')$$,
|
|
69
|
+
v_idx,
|
|
70
|
+
v_db_step->>'slug'
|
|
71
|
+
)
|
|
72
|
+
);
|
|
73
|
+
ELSIF v_db_step IS NULL THEN
|
|
74
|
+
v_differences := array_append(
|
|
75
|
+
v_differences,
|
|
76
|
+
format(
|
|
77
|
+
$$Step at index %s: missing in second shape (first has '%s')$$,
|
|
78
|
+
v_idx,
|
|
79
|
+
v_local_step->>'slug'
|
|
80
|
+
)
|
|
81
|
+
);
|
|
82
|
+
ELSE
|
|
83
|
+
-- Compare slug
|
|
84
|
+
IF v_local_step->>'slug' != v_db_step->>'slug' THEN
|
|
85
|
+
v_differences := array_append(
|
|
86
|
+
v_differences,
|
|
87
|
+
format(
|
|
88
|
+
$$Step at index %s: slug differs '%s' vs '%s'$$,
|
|
89
|
+
v_idx,
|
|
90
|
+
v_local_step->>'slug',
|
|
91
|
+
v_db_step->>'slug'
|
|
92
|
+
)
|
|
93
|
+
);
|
|
94
|
+
END IF;
|
|
95
|
+
|
|
96
|
+
-- Compare step type
|
|
97
|
+
IF v_local_step->>'stepType' != v_db_step->>'stepType' THEN
|
|
98
|
+
v_differences := array_append(
|
|
99
|
+
v_differences,
|
|
100
|
+
format(
|
|
101
|
+
$$Step at index %s: type differs '%s' vs '%s'$$,
|
|
102
|
+
v_idx,
|
|
103
|
+
v_local_step->>'stepType',
|
|
104
|
+
v_db_step->>'stepType'
|
|
105
|
+
)
|
|
106
|
+
);
|
|
107
|
+
END IF;
|
|
108
|
+
|
|
109
|
+
-- Compare dependencies (convert arrays to comma-separated strings)
|
|
110
|
+
SELECT string_agg(dep, ', ' ORDER BY dep)
|
|
111
|
+
INTO v_local_deps
|
|
112
|
+
FROM jsonb_array_elements_text(COALESCE(v_local_step->'dependencies', '[]'::jsonb)) AS dep;
|
|
113
|
+
|
|
114
|
+
SELECT string_agg(dep, ', ' ORDER BY dep)
|
|
115
|
+
INTO v_db_deps
|
|
116
|
+
FROM jsonb_array_elements_text(COALESCE(v_db_step->'dependencies', '[]'::jsonb)) AS dep;
|
|
117
|
+
|
|
118
|
+
IF COALESCE(v_local_deps, '') != COALESCE(v_db_deps, '') THEN
|
|
119
|
+
v_differences := array_append(
|
|
120
|
+
v_differences,
|
|
121
|
+
format(
|
|
122
|
+
$$Step at index %s: dependencies differ [%s] vs [%s]$$,
|
|
123
|
+
v_idx,
|
|
124
|
+
COALESCE(v_local_deps, ''),
|
|
125
|
+
COALESCE(v_db_deps, '')
|
|
126
|
+
)
|
|
127
|
+
);
|
|
128
|
+
END IF;
|
|
129
|
+
END IF;
|
|
130
|
+
END LOOP;
|
|
131
|
+
|
|
132
|
+
RETURN v_differences;
|
|
133
|
+
END;
|
|
134
|
+
$BODY$;
|
|
135
|
+
-- Create "_create_flow_from_shape" function
|
|
136
|
+
CREATE FUNCTION "pgflow"."_create_flow_from_shape" ("p_flow_slug" text, "p_shape" jsonb) RETURNS void LANGUAGE plpgsql SET "search_path" = '' AS $$
|
|
137
|
+
DECLARE
|
|
138
|
+
v_step jsonb;
|
|
139
|
+
v_deps text[];
|
|
140
|
+
v_flow_options jsonb;
|
|
141
|
+
v_step_options jsonb;
|
|
142
|
+
BEGIN
|
|
143
|
+
-- Extract flow-level options (may be null)
|
|
144
|
+
v_flow_options := p_shape->'options';
|
|
145
|
+
|
|
146
|
+
-- Create the flow with options (NULL = use default)
|
|
147
|
+
PERFORM pgflow.create_flow(
|
|
148
|
+
p_flow_slug,
|
|
149
|
+
(v_flow_options->>'maxAttempts')::int,
|
|
150
|
+
(v_flow_options->>'baseDelay')::int,
|
|
151
|
+
(v_flow_options->>'timeout')::int
|
|
152
|
+
);
|
|
153
|
+
|
|
154
|
+
-- Iterate over steps in order and add each one
|
|
155
|
+
FOR v_step IN SELECT * FROM jsonb_array_elements(p_shape->'steps')
|
|
156
|
+
LOOP
|
|
157
|
+
-- Convert dependencies jsonb array to text array
|
|
158
|
+
SELECT COALESCE(array_agg(dep), '{}')
|
|
159
|
+
INTO v_deps
|
|
160
|
+
FROM jsonb_array_elements_text(COALESCE(v_step->'dependencies', '[]'::jsonb)) AS dep;
|
|
161
|
+
|
|
162
|
+
-- Extract step options (may be null)
|
|
163
|
+
v_step_options := v_step->'options';
|
|
164
|
+
|
|
165
|
+
-- Add the step with options (NULL = use default/inherit)
|
|
166
|
+
PERFORM pgflow.add_step(
|
|
167
|
+
flow_slug => p_flow_slug,
|
|
168
|
+
step_slug => v_step->>'slug',
|
|
169
|
+
deps_slugs => v_deps,
|
|
170
|
+
max_attempts => (v_step_options->>'maxAttempts')::int,
|
|
171
|
+
base_delay => (v_step_options->>'baseDelay')::int,
|
|
172
|
+
timeout => (v_step_options->>'timeout')::int,
|
|
173
|
+
start_delay => (v_step_options->>'startDelay')::int,
|
|
174
|
+
step_type => v_step->>'stepType'
|
|
175
|
+
);
|
|
176
|
+
END LOOP;
|
|
177
|
+
END;
|
|
178
|
+
$$;
|
|
179
|
+
-- Create "_get_flow_shape" function
|
|
180
|
+
CREATE FUNCTION "pgflow"."_get_flow_shape" ("p_flow_slug" text) RETURNS jsonb LANGUAGE sql STABLE SET "search_path" = '' AS $$
|
|
181
|
+
SELECT jsonb_build_object(
|
|
182
|
+
'steps',
|
|
183
|
+
COALESCE(
|
|
184
|
+
jsonb_agg(
|
|
185
|
+
jsonb_build_object(
|
|
186
|
+
'slug', step.step_slug,
|
|
187
|
+
'stepType', step.step_type,
|
|
188
|
+
'dependencies', COALESCE(
|
|
189
|
+
(
|
|
190
|
+
SELECT jsonb_agg(dep.dep_slug ORDER BY dep.dep_slug)
|
|
191
|
+
FROM pgflow.deps AS dep
|
|
192
|
+
WHERE dep.flow_slug = step.flow_slug
|
|
193
|
+
AND dep.step_slug = step.step_slug
|
|
194
|
+
),
|
|
195
|
+
'[]'::jsonb
|
|
196
|
+
)
|
|
197
|
+
)
|
|
198
|
+
ORDER BY step.step_index
|
|
199
|
+
),
|
|
200
|
+
'[]'::jsonb
|
|
201
|
+
)
|
|
202
|
+
)
|
|
203
|
+
FROM pgflow.steps AS step
|
|
204
|
+
WHERE step.flow_slug = p_flow_slug;
|
|
205
|
+
$$;
|
|
206
|
+
-- Create "delete_flow_and_data" function
|
|
207
|
+
CREATE FUNCTION "pgflow"."delete_flow_and_data" ("p_flow_slug" text) RETURNS void LANGUAGE plpgsql SET "search_path" = '' AS $$
|
|
208
|
+
BEGIN
|
|
209
|
+
-- Drop queue and archive table (pgmq)
|
|
210
|
+
PERFORM pgmq.drop_queue(p_flow_slug);
|
|
211
|
+
|
|
212
|
+
-- Delete all associated data in the correct order (respecting FK constraints)
|
|
213
|
+
DELETE FROM pgflow.step_tasks AS task WHERE task.flow_slug = p_flow_slug;
|
|
214
|
+
DELETE FROM pgflow.step_states AS state WHERE state.flow_slug = p_flow_slug;
|
|
215
|
+
DELETE FROM pgflow.runs AS run WHERE run.flow_slug = p_flow_slug;
|
|
216
|
+
DELETE FROM pgflow.deps AS dep WHERE dep.flow_slug = p_flow_slug;
|
|
217
|
+
DELETE FROM pgflow.steps AS step WHERE step.flow_slug = p_flow_slug;
|
|
218
|
+
DELETE FROM pgflow.flows AS flow WHERE flow.flow_slug = p_flow_slug;
|
|
219
|
+
END;
|
|
220
|
+
$$;
|
|
221
|
+
-- Create "ensure_flow_compiled" function
|
|
222
|
+
CREATE FUNCTION "pgflow"."ensure_flow_compiled" ("p_flow_slug" text, "p_shape" jsonb, "p_mode" text DEFAULT 'production') RETURNS jsonb LANGUAGE plpgsql SET "search_path" = '' AS $$
|
|
223
|
+
DECLARE
|
|
224
|
+
v_lock_key int;
|
|
225
|
+
v_flow_exists boolean;
|
|
226
|
+
v_db_shape jsonb;
|
|
227
|
+
v_differences text[];
|
|
228
|
+
BEGIN
|
|
229
|
+
-- Generate lock key from flow_slug (deterministic hash)
|
|
230
|
+
v_lock_key := hashtext(p_flow_slug);
|
|
231
|
+
|
|
232
|
+
-- Acquire transaction-level advisory lock
|
|
233
|
+
-- Serializes concurrent compilation attempts for same flow
|
|
234
|
+
PERFORM pg_advisory_xact_lock(1, v_lock_key);
|
|
235
|
+
|
|
236
|
+
-- 1. Check if flow exists
|
|
237
|
+
SELECT EXISTS(SELECT 1 FROM pgflow.flows AS flow WHERE flow.flow_slug = p_flow_slug)
|
|
238
|
+
INTO v_flow_exists;
|
|
239
|
+
|
|
240
|
+
-- 2. If flow missing: compile (both modes)
|
|
241
|
+
IF NOT v_flow_exists THEN
|
|
242
|
+
PERFORM pgflow._create_flow_from_shape(p_flow_slug, p_shape);
|
|
243
|
+
RETURN jsonb_build_object('status', 'compiled', 'differences', '[]'::jsonb);
|
|
244
|
+
END IF;
|
|
245
|
+
|
|
246
|
+
-- 3. Get current shape from DB
|
|
247
|
+
v_db_shape := pgflow._get_flow_shape(p_flow_slug);
|
|
248
|
+
|
|
249
|
+
-- 4. Compare shapes
|
|
250
|
+
v_differences := pgflow._compare_flow_shapes(p_shape, v_db_shape);
|
|
251
|
+
|
|
252
|
+
-- 5. If shapes match: return verified
|
|
253
|
+
IF array_length(v_differences, 1) IS NULL THEN
|
|
254
|
+
RETURN jsonb_build_object('status', 'verified', 'differences', '[]'::jsonb);
|
|
255
|
+
END IF;
|
|
256
|
+
|
|
257
|
+
-- 6. Shapes differ - handle by mode
|
|
258
|
+
IF p_mode = 'development' THEN
|
|
259
|
+
-- Recompile in dev mode: full deletion + fresh compile
|
|
260
|
+
PERFORM pgflow.delete_flow_and_data(p_flow_slug);
|
|
261
|
+
PERFORM pgflow._create_flow_from_shape(p_flow_slug, p_shape);
|
|
262
|
+
RETURN jsonb_build_object('status', 'recompiled', 'differences', to_jsonb(v_differences));
|
|
263
|
+
ELSE
|
|
264
|
+
-- Fail in production mode
|
|
265
|
+
RETURN jsonb_build_object('status', 'mismatch', 'differences', to_jsonb(v_differences));
|
|
266
|
+
END IF;
|
|
267
|
+
END;
|
|
268
|
+
$$;
|
|
@@ -0,0 +1,273 @@
|
|
|
1
|
+
-- Create extension "pg_net" if not exists
|
|
2
|
+
CREATE EXTENSION IF NOT EXISTS "pg_net" WITH SCHEMA "public";
|
|
3
|
+
-- Create extension "pg_cron" if not exists
|
|
4
|
+
CREATE EXTENSION IF NOT EXISTS "pg_cron";
|
|
5
|
+
-- Modify "workers" table
|
|
6
|
+
ALTER TABLE "pgflow"."workers" ADD COLUMN "stopped_at" timestamptz NULL;
|
|
7
|
+
-- Create "worker_functions" table
|
|
8
|
+
CREATE TABLE "pgflow"."worker_functions" (
|
|
9
|
+
"function_name" text NOT NULL,
|
|
10
|
+
"enabled" boolean NOT NULL DEFAULT true,
|
|
11
|
+
"debounce" interval NOT NULL DEFAULT '00:00:06'::interval,
|
|
12
|
+
"last_invoked_at" timestamptz NULL,
|
|
13
|
+
"created_at" timestamptz NOT NULL DEFAULT now(),
|
|
14
|
+
"updated_at" timestamptz NOT NULL DEFAULT now(),
|
|
15
|
+
PRIMARY KEY ("function_name"),
|
|
16
|
+
CONSTRAINT "worker_functions_debounce_check" CHECK (debounce >= '00:00:01'::interval)
|
|
17
|
+
);
|
|
18
|
+
-- Set comment to table: "worker_functions"
|
|
19
|
+
COMMENT ON TABLE "pgflow"."worker_functions" IS 'Registry of edge functions that run pgflow workers, used by ensure_workers() cron';
|
|
20
|
+
-- Set comment to column: "function_name" on table: "worker_functions"
|
|
21
|
+
COMMENT ON COLUMN "pgflow"."worker_functions"."function_name" IS 'Name of the Supabase Edge Function';
|
|
22
|
+
-- Set comment to column: "enabled" on table: "worker_functions"
|
|
23
|
+
COMMENT ON COLUMN "pgflow"."worker_functions"."enabled" IS 'Whether ensure_workers() should ping this function';
|
|
24
|
+
-- Set comment to column: "debounce" on table: "worker_functions"
|
|
25
|
+
COMMENT ON COLUMN "pgflow"."worker_functions"."debounce" IS 'Minimum interval between invocation attempts for this function';
|
|
26
|
+
-- Set comment to column: "last_invoked_at" on table: "worker_functions"
|
|
27
|
+
COMMENT ON COLUMN "pgflow"."worker_functions"."last_invoked_at" IS 'When ensure_workers() last pinged this function (used for debouncing)';
|
|
28
|
+
-- Create "cleanup_ensure_workers_logs" function
|
|
29
|
+
CREATE FUNCTION "pgflow"."cleanup_ensure_workers_logs" ("retention_hours" integer DEFAULT 24) RETURNS TABLE ("cron_deleted" bigint) LANGUAGE sql SECURITY DEFINER SET "search_path" = pgflow, cron, pg_temp AS $$
|
|
30
|
+
with deleted as (
|
|
31
|
+
delete from cron.job_run_details
|
|
32
|
+
where job_run_details.end_time < now() - (cleanup_ensure_workers_logs.retention_hours || ' hours')::interval
|
|
33
|
+
returning 1
|
|
34
|
+
)
|
|
35
|
+
select count(*)::bigint as cron_deleted from deleted
|
|
36
|
+
$$;
|
|
37
|
+
-- Set comment to function: "cleanup_ensure_workers_logs"
|
|
38
|
+
COMMENT ON FUNCTION "pgflow"."cleanup_ensure_workers_logs" IS 'Cleans up old cron job run details to prevent table growth.
|
|
39
|
+
Default retention is 24 hours. HTTP response logs (net._http_response) are
|
|
40
|
+
automatically cleaned by pg_net with a 6-hour TTL, so they are not cleaned here.
|
|
41
|
+
This function follows the standard pg_cron maintenance pattern recommended by
|
|
42
|
+
AWS RDS, Neon, and Supabase documentation.';
|
|
43
|
+
-- Create "is_local" function
|
|
44
|
+
CREATE FUNCTION "pgflow"."is_local" () RETURNS boolean LANGUAGE sql STABLE PARALLEL SAFE SET "search_path" = '' AS $$
|
|
45
|
+
select coalesce(
|
|
46
|
+
current_setting('app.settings.jwt_secret', true)
|
|
47
|
+
= 'super-secret-jwt-token-with-at-least-32-characters-long',
|
|
48
|
+
false
|
|
49
|
+
)
|
|
50
|
+
$$;
|
|
51
|
+
-- Create "ensure_flow_compiled" function
|
|
52
|
+
CREATE FUNCTION "pgflow"."ensure_flow_compiled" ("flow_slug" text, "shape" jsonb) RETURNS jsonb LANGUAGE plpgsql SET "search_path" = '' AS $$
|
|
53
|
+
DECLARE
|
|
54
|
+
v_lock_key int;
|
|
55
|
+
v_flow_exists boolean;
|
|
56
|
+
v_db_shape jsonb;
|
|
57
|
+
v_differences text[];
|
|
58
|
+
v_is_local boolean;
|
|
59
|
+
BEGIN
|
|
60
|
+
-- Generate lock key from flow_slug (deterministic hash)
|
|
61
|
+
v_lock_key := hashtext(ensure_flow_compiled.flow_slug);
|
|
62
|
+
|
|
63
|
+
-- Acquire transaction-level advisory lock
|
|
64
|
+
-- Serializes concurrent compilation attempts for same flow
|
|
65
|
+
PERFORM pg_advisory_xact_lock(1, v_lock_key);
|
|
66
|
+
|
|
67
|
+
-- 1. Check if flow exists
|
|
68
|
+
SELECT EXISTS(SELECT 1 FROM pgflow.flows AS flow WHERE flow.flow_slug = ensure_flow_compiled.flow_slug)
|
|
69
|
+
INTO v_flow_exists;
|
|
70
|
+
|
|
71
|
+
-- 2. If flow missing: compile (both environments)
|
|
72
|
+
IF NOT v_flow_exists THEN
|
|
73
|
+
PERFORM pgflow._create_flow_from_shape(ensure_flow_compiled.flow_slug, ensure_flow_compiled.shape);
|
|
74
|
+
RETURN jsonb_build_object('status', 'compiled', 'differences', '[]'::jsonb);
|
|
75
|
+
END IF;
|
|
76
|
+
|
|
77
|
+
-- 3. Get current shape from DB
|
|
78
|
+
v_db_shape := pgflow._get_flow_shape(ensure_flow_compiled.flow_slug);
|
|
79
|
+
|
|
80
|
+
-- 4. Compare shapes
|
|
81
|
+
v_differences := pgflow._compare_flow_shapes(ensure_flow_compiled.shape, v_db_shape);
|
|
82
|
+
|
|
83
|
+
-- 5. If shapes match: return verified
|
|
84
|
+
IF array_length(v_differences, 1) IS NULL THEN
|
|
85
|
+
RETURN jsonb_build_object('status', 'verified', 'differences', '[]'::jsonb);
|
|
86
|
+
END IF;
|
|
87
|
+
|
|
88
|
+
-- 6. Shapes differ - auto-detect environment via is_local()
|
|
89
|
+
v_is_local := pgflow.is_local();
|
|
90
|
+
|
|
91
|
+
IF v_is_local THEN
|
|
92
|
+
-- Recompile in local/dev: full deletion + fresh compile
|
|
93
|
+
PERFORM pgflow.delete_flow_and_data(ensure_flow_compiled.flow_slug);
|
|
94
|
+
PERFORM pgflow._create_flow_from_shape(ensure_flow_compiled.flow_slug, ensure_flow_compiled.shape);
|
|
95
|
+
RETURN jsonb_build_object('status', 'recompiled', 'differences', to_jsonb(v_differences));
|
|
96
|
+
ELSE
|
|
97
|
+
-- Fail in production
|
|
98
|
+
RETURN jsonb_build_object('status', 'mismatch', 'differences', to_jsonb(v_differences));
|
|
99
|
+
END IF;
|
|
100
|
+
END;
|
|
101
|
+
$$;
|
|
102
|
+
-- Create "ensure_workers" function
|
|
103
|
+
CREATE FUNCTION "pgflow"."ensure_workers" () RETURNS TABLE ("function_name" text, "invoked" boolean, "request_id" bigint) LANGUAGE sql AS $$
|
|
104
|
+
with
|
|
105
|
+
-- Detect environment
|
|
106
|
+
env as (
|
|
107
|
+
select pgflow.is_local() as is_local
|
|
108
|
+
),
|
|
109
|
+
|
|
110
|
+
-- Get credentials: Local mode uses hardcoded URL, production uses vault secrets
|
|
111
|
+
-- Empty strings are treated as NULL using nullif()
|
|
112
|
+
credentials as (
|
|
113
|
+
select
|
|
114
|
+
case
|
|
115
|
+
when (select is_local from env) then null
|
|
116
|
+
else nullif((select decrypted_secret from vault.decrypted_secrets where name = 'supabase_service_role_key'), '')
|
|
117
|
+
end as service_role_key,
|
|
118
|
+
case
|
|
119
|
+
when (select is_local from env) then 'http://kong:8000/functions/v1'
|
|
120
|
+
else (select 'https://' || nullif(decrypted_secret, '') || '.supabase.co/functions/v1' from vault.decrypted_secrets where name = 'supabase_project_id')
|
|
121
|
+
end as base_url
|
|
122
|
+
),
|
|
123
|
+
|
|
124
|
+
-- Find functions that pass the debounce check
|
|
125
|
+
debounce_passed as (
|
|
126
|
+
select wf.function_name, wf.debounce
|
|
127
|
+
from pgflow.worker_functions as wf
|
|
128
|
+
where wf.enabled = true
|
|
129
|
+
and (
|
|
130
|
+
wf.last_invoked_at is null
|
|
131
|
+
or wf.last_invoked_at < now() - wf.debounce
|
|
132
|
+
)
|
|
133
|
+
),
|
|
134
|
+
|
|
135
|
+
-- Find functions that have at least one alive worker
|
|
136
|
+
functions_with_alive_workers as (
|
|
137
|
+
select distinct w.function_name
|
|
138
|
+
from pgflow.workers as w
|
|
139
|
+
inner join debounce_passed as dp on w.function_name = dp.function_name
|
|
140
|
+
where w.stopped_at is null
|
|
141
|
+
and w.deprecated_at is null
|
|
142
|
+
and w.last_heartbeat_at > now() - dp.debounce
|
|
143
|
+
),
|
|
144
|
+
|
|
145
|
+
-- Determine which functions should be invoked
|
|
146
|
+
-- Local mode: all enabled functions (bypass debounce AND alive workers check)
|
|
147
|
+
-- Production mode: only functions that pass debounce AND have no alive workers
|
|
148
|
+
functions_to_invoke as (
|
|
149
|
+
select wf.function_name
|
|
150
|
+
from pgflow.worker_functions as wf
|
|
151
|
+
where wf.enabled = true
|
|
152
|
+
and (
|
|
153
|
+
pgflow.is_local() = true -- Local: all enabled functions
|
|
154
|
+
or (
|
|
155
|
+
-- Production: debounce + no alive workers
|
|
156
|
+
wf.function_name in (select dp.function_name from debounce_passed as dp)
|
|
157
|
+
and wf.function_name not in (select faw.function_name from functions_with_alive_workers as faw)
|
|
158
|
+
)
|
|
159
|
+
)
|
|
160
|
+
),
|
|
161
|
+
|
|
162
|
+
-- Make HTTP requests and capture request_ids
|
|
163
|
+
http_requests as (
|
|
164
|
+
select
|
|
165
|
+
fti.function_name,
|
|
166
|
+
net.http_post(
|
|
167
|
+
url => c.base_url || '/' || fti.function_name,
|
|
168
|
+
headers => case
|
|
169
|
+
when e.is_local then '{}'::jsonb
|
|
170
|
+
else jsonb_build_object(
|
|
171
|
+
'Content-Type', 'application/json',
|
|
172
|
+
'Authorization', 'Bearer ' || c.service_role_key
|
|
173
|
+
)
|
|
174
|
+
end,
|
|
175
|
+
body => '{}'::jsonb
|
|
176
|
+
) as request_id
|
|
177
|
+
from functions_to_invoke as fti
|
|
178
|
+
cross join credentials as c
|
|
179
|
+
cross join env as e
|
|
180
|
+
where c.base_url is not null
|
|
181
|
+
and (e.is_local or c.service_role_key is not null)
|
|
182
|
+
),
|
|
183
|
+
|
|
184
|
+
-- Update last_invoked_at for invoked functions
|
|
185
|
+
updated as (
|
|
186
|
+
update pgflow.worker_functions as wf
|
|
187
|
+
set last_invoked_at = clock_timestamp()
|
|
188
|
+
from http_requests as hr
|
|
189
|
+
where wf.function_name = hr.function_name
|
|
190
|
+
returning wf.function_name
|
|
191
|
+
)
|
|
192
|
+
|
|
193
|
+
select u.function_name, true as invoked, hr.request_id
|
|
194
|
+
from updated as u
|
|
195
|
+
inner join http_requests as hr on u.function_name = hr.function_name
|
|
196
|
+
$$;
|
|
197
|
+
-- Set comment to function: "ensure_workers"
|
|
198
|
+
COMMENT ON FUNCTION "pgflow"."ensure_workers" IS 'Ensures worker functions are running by pinging them via HTTP when needed.
|
|
199
|
+
In local mode: pings ALL enabled functions (ignores debounce AND alive workers check).
|
|
200
|
+
In production mode: only pings functions that pass debounce AND have no alive workers.
|
|
201
|
+
Debounce: skips functions pinged within their debounce interval (production only).
|
|
202
|
+
Credentials: Uses Vault secrets (supabase_service_role_key, supabase_project_id) or local fallbacks.
|
|
203
|
+
URL is built from project_id: https://{project_id}.supabase.co/functions/v1
|
|
204
|
+
Returns request_id from pg_net for each HTTP request made.';
|
|
205
|
+
-- Create "mark_worker_stopped" function
|
|
206
|
+
CREATE FUNCTION "pgflow"."mark_worker_stopped" ("worker_id" uuid) RETURNS void LANGUAGE sql AS $$
|
|
207
|
+
update pgflow.workers
|
|
208
|
+
set stopped_at = clock_timestamp()
|
|
209
|
+
where workers.worker_id = mark_worker_stopped.worker_id;
|
|
210
|
+
$$;
|
|
211
|
+
-- Set comment to function: "mark_worker_stopped"
|
|
212
|
+
COMMENT ON FUNCTION "pgflow"."mark_worker_stopped" IS 'Marks a worker as stopped for graceful shutdown. Called by workers on beforeunload.';
|
|
213
|
+
-- Create "setup_ensure_workers_cron" function
|
|
214
|
+
CREATE FUNCTION "pgflow"."setup_ensure_workers_cron" ("cron_interval" text DEFAULT '1 second') RETURNS text LANGUAGE plpgsql SECURITY DEFINER SET "search_path" = pgflow, cron, pg_temp AS $$
|
|
215
|
+
declare
|
|
216
|
+
ensure_workers_job_id bigint;
|
|
217
|
+
cleanup_job_id bigint;
|
|
218
|
+
begin
|
|
219
|
+
-- Remove existing jobs if they exist (ignore errors if not found)
|
|
220
|
+
begin
|
|
221
|
+
perform cron.unschedule('pgflow_ensure_workers');
|
|
222
|
+
exception when others then
|
|
223
|
+
-- Job doesn't exist, continue
|
|
224
|
+
end;
|
|
225
|
+
|
|
226
|
+
begin
|
|
227
|
+
perform cron.unschedule('pgflow_cleanup_logs');
|
|
228
|
+
exception when others then
|
|
229
|
+
-- Job doesn't exist, continue
|
|
230
|
+
end;
|
|
231
|
+
|
|
232
|
+
-- Schedule ensure_workers job with the specified interval
|
|
233
|
+
ensure_workers_job_id := cron.schedule(
|
|
234
|
+
job_name => 'pgflow_ensure_workers',
|
|
235
|
+
schedule => setup_ensure_workers_cron.cron_interval,
|
|
236
|
+
command => 'select pgflow.ensure_workers()'
|
|
237
|
+
);
|
|
238
|
+
|
|
239
|
+
-- Schedule cleanup job to run hourly
|
|
240
|
+
cleanup_job_id := cron.schedule(
|
|
241
|
+
job_name => 'pgflow_cleanup_logs',
|
|
242
|
+
schedule => '0 * * * *',
|
|
243
|
+
command => 'select pgflow.cleanup_ensure_workers_logs()'
|
|
244
|
+
);
|
|
245
|
+
|
|
246
|
+
return format(
|
|
247
|
+
'Scheduled pgflow_ensure_workers (every %s, job_id=%s) and pgflow_cleanup_logs (hourly, job_id=%s)',
|
|
248
|
+
setup_ensure_workers_cron.cron_interval,
|
|
249
|
+
ensure_workers_job_id,
|
|
250
|
+
cleanup_job_id
|
|
251
|
+
);
|
|
252
|
+
end;
|
|
253
|
+
$$;
|
|
254
|
+
-- Set comment to function: "setup_ensure_workers_cron"
|
|
255
|
+
COMMENT ON FUNCTION "pgflow"."setup_ensure_workers_cron" IS 'Sets up cron jobs for worker management.
|
|
256
|
+
Schedules pgflow_ensure_workers at the specified cron_interval (default: 1 second) to keep workers running.
|
|
257
|
+
Schedules pgflow_cleanup_logs hourly to clean up old cron job logs.
|
|
258
|
+
Replaces existing jobs if they exist (idempotent).
|
|
259
|
+
Returns a confirmation message with job IDs.';
|
|
260
|
+
-- Create "track_worker_function" function
|
|
261
|
+
CREATE FUNCTION "pgflow"."track_worker_function" ("function_name" text) RETURNS void LANGUAGE sql AS $$
|
|
262
|
+
insert into pgflow.worker_functions (function_name, updated_at)
|
|
263
|
+
values (track_worker_function.function_name, clock_timestamp())
|
|
264
|
+
on conflict (function_name)
|
|
265
|
+
do update set
|
|
266
|
+
updated_at = clock_timestamp();
|
|
267
|
+
$$;
|
|
268
|
+
-- Set comment to function: "track_worker_function"
|
|
269
|
+
COMMENT ON FUNCTION "pgflow"."track_worker_function" IS 'Registers an edge function for monitoring. Called by workers on startup.';
|
|
270
|
+
-- Drop "ensure_flow_compiled" function
|
|
271
|
+
DROP FUNCTION "pgflow"."ensure_flow_compiled" (text, jsonb, text);
|
|
272
|
+
-- Auto-install ensure_workers cron job (1 second interval)
|
|
273
|
+
SELECT pgflow.setup_ensure_workers_cron('1 second');
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
-- Drop old 2-parameter version before creating new 3-parameter version
|
|
2
|
+
DROP FUNCTION IF EXISTS "pgflow"."ensure_flow_compiled" (text, jsonb);
|
|
3
|
+
|
|
4
|
+
-- Create "ensure_flow_compiled" function with allow_data_loss parameter
|
|
5
|
+
CREATE FUNCTION "pgflow"."ensure_flow_compiled" ("flow_slug" text, "shape" jsonb, "allow_data_loss" boolean DEFAULT false) RETURNS jsonb LANGUAGE plpgsql SET "search_path" = '' AS $$
|
|
6
|
+
DECLARE
|
|
7
|
+
v_lock_key int;
|
|
8
|
+
v_flow_exists boolean;
|
|
9
|
+
v_db_shape jsonb;
|
|
10
|
+
v_differences text[];
|
|
11
|
+
v_is_local boolean;
|
|
12
|
+
BEGIN
|
|
13
|
+
-- Generate lock key from flow_slug (deterministic hash)
|
|
14
|
+
v_lock_key := hashtext(ensure_flow_compiled.flow_slug);
|
|
15
|
+
|
|
16
|
+
-- Acquire transaction-level advisory lock
|
|
17
|
+
-- Serializes concurrent compilation attempts for same flow
|
|
18
|
+
PERFORM pg_advisory_xact_lock(1, v_lock_key);
|
|
19
|
+
|
|
20
|
+
-- 1. Check if flow exists
|
|
21
|
+
SELECT EXISTS(SELECT 1 FROM pgflow.flows AS flow WHERE flow.flow_slug = ensure_flow_compiled.flow_slug)
|
|
22
|
+
INTO v_flow_exists;
|
|
23
|
+
|
|
24
|
+
-- 2. If flow missing: compile (both environments)
|
|
25
|
+
IF NOT v_flow_exists THEN
|
|
26
|
+
PERFORM pgflow._create_flow_from_shape(ensure_flow_compiled.flow_slug, ensure_flow_compiled.shape);
|
|
27
|
+
RETURN jsonb_build_object('status', 'compiled', 'differences', '[]'::jsonb);
|
|
28
|
+
END IF;
|
|
29
|
+
|
|
30
|
+
-- 3. Get current shape from DB
|
|
31
|
+
v_db_shape := pgflow._get_flow_shape(ensure_flow_compiled.flow_slug);
|
|
32
|
+
|
|
33
|
+
-- 4. Compare shapes
|
|
34
|
+
v_differences := pgflow._compare_flow_shapes(ensure_flow_compiled.shape, v_db_shape);
|
|
35
|
+
|
|
36
|
+
-- 5. If shapes match: return verified
|
|
37
|
+
IF array_length(v_differences, 1) IS NULL THEN
|
|
38
|
+
RETURN jsonb_build_object('status', 'verified', 'differences', '[]'::jsonb);
|
|
39
|
+
END IF;
|
|
40
|
+
|
|
41
|
+
-- 6. Shapes differ - auto-detect environment via is_local()
|
|
42
|
+
v_is_local := pgflow.is_local();
|
|
43
|
+
|
|
44
|
+
IF v_is_local OR allow_data_loss THEN
|
|
45
|
+
-- Recompile in local/dev: full deletion + fresh compile
|
|
46
|
+
PERFORM pgflow.delete_flow_and_data(ensure_flow_compiled.flow_slug);
|
|
47
|
+
PERFORM pgflow._create_flow_from_shape(ensure_flow_compiled.flow_slug, ensure_flow_compiled.shape);
|
|
48
|
+
RETURN jsonb_build_object('status', 'recompiled', 'differences', to_jsonb(v_differences));
|
|
49
|
+
ELSE
|
|
50
|
+
-- Fail in production
|
|
51
|
+
RETURN jsonb_build_object('status', 'mismatch', 'differences', to_jsonb(v_differences));
|
|
52
|
+
END IF;
|
|
53
|
+
END;
|
|
54
|
+
$$;
|