@pgflow/core 0.0.0-test-snapshot-releases2-8d5d9bc1-20250922101158 → 0.0.0-testsnap-11f20f76-20251207210211

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. package/README.md +177 -73
  2. package/dist/ATLAS.md +32 -0
  3. package/dist/CHANGELOG.md +824 -0
  4. package/dist/PgflowSqlClient.d.ts +17 -0
  5. package/dist/PgflowSqlClient.d.ts.map +1 -0
  6. package/dist/PgflowSqlClient.js +70 -0
  7. package/dist/README.md +497 -0
  8. package/dist/database-types.d.ts +1041 -0
  9. package/dist/database-types.d.ts.map +1 -0
  10. package/dist/database-types.js +8 -0
  11. package/dist/index.d.ts +4 -0
  12. package/dist/index.d.ts.map +1 -0
  13. package/dist/index.js +2 -0
  14. package/dist/package.json +31 -0
  15. package/dist/supabase/migrations/20250429164909_pgflow_initial.sql +579 -0
  16. package/dist/supabase/migrations/20250517072017_pgflow_fix_poll_for_tasks_to_use_separate_statement_for_polling.sql +101 -0
  17. package/dist/supabase/migrations/20250609105135_pgflow_add_start_tasks_and_started_status.sql +371 -0
  18. package/dist/supabase/migrations/20250610180554_pgflow_add_set_vt_batch_and_use_it_in_start_tasks.sql +127 -0
  19. package/dist/supabase/migrations/20250614124241_pgflow_add_realtime.sql +501 -0
  20. package/dist/supabase/migrations/20250619195327_pgflow_fix_fail_task_missing_realtime_event.sql +185 -0
  21. package/dist/supabase/migrations/20250627090700_pgflow_fix_function_search_paths.sql +6 -0
  22. package/dist/supabase/migrations/20250707210212_pgflow_add_opt_start_delay.sql +103 -0
  23. package/dist/supabase/migrations/20250719205006_pgflow_worker_deprecation.sql +2 -0
  24. package/dist/supabase/migrations/20251006073122_pgflow_add_map_step_type.sql +1244 -0
  25. package/dist/supabase/migrations/20251103222045_pgflow_fix_broadcast_order_and_timestamp_handling.sql +622 -0
  26. package/dist/supabase/migrations/20251104080523_pgflow_upgrade_pgmq_1_5_1.sql +93 -0
  27. package/dist/supabase/migrations/20251130000000_pgflow_auto_compilation.sql +268 -0
  28. package/dist/tsconfig.lib.tsbuildinfo +1 -0
  29. package/dist/types.d.ts +93 -0
  30. package/dist/types.d.ts.map +1 -0
  31. package/dist/types.js +1 -0
  32. package/package.json +4 -5
@@ -0,0 +1,93 @@
1
+ -- Migration tested 2025-11-02:
2
+ -- Successfully verified that this migration fails on pgmq 1.4.4 (Supabase CLI < 2.50.3)
3
+ -- with clear error message guiding users to upgrade pgmq to 1.5.0+
4
+ --
5
+ -- Compatibility check: Ensure pgmq.message_record has headers column (pgmq 1.5.0+)
6
+ DO $$
7
+ DECLARE
8
+ has_headers BOOLEAN;
9
+ BEGIN
10
+ SELECT EXISTS (
11
+ SELECT 1
12
+ FROM pg_type t
13
+ JOIN pg_namespace n ON t.typnamespace = n.oid
14
+ JOIN pg_attribute a ON a.attrelid = t.typrelid
15
+ WHERE n.nspname = 'pgmq'
16
+ AND t.typname = 'message_record'
17
+ AND a.attname = 'headers'
18
+ AND a.attnum > 0
19
+ AND NOT a.attisdropped
20
+ ) INTO has_headers;
21
+
22
+ IF NOT has_headers THEN
23
+ RAISE EXCEPTION E'INCOMPATIBLE PGMQ VERSION DETECTED\n\n'
24
+ 'This migration is part of pgflow 0.8.0+, which requires pgmq 1.5.0 or higher.\n'
25
+ 'The pgmq.message_record type is missing the "headers" column, which indicates you are running pgmq < 1.5.0.\n\n'
26
+ 'pgflow 0.8.0+ is NOT compatible with pgmq versions below 1.5.0.\n\n'
27
+ 'Action required:\n'
28
+ ' - If using Supabase: Ensure you are running a recent version that includes pgmq 1.5.0+\n'
29
+ ' - If self-hosting: Upgrade pgmq to version 1.5.0 or higher before running this migration\n\n'
30
+ 'Migration aborted to prevent runtime failures.';
31
+ END IF;
32
+ END $$;
33
+
34
+ -- Modify "set_vt_batch" function
35
+ -- Must drop first because we're changing the return type from SETOF to TABLE
36
+ DROP FUNCTION IF EXISTS "pgflow"."set_vt_batch"(text, bigint[], integer[]);
37
+ CREATE FUNCTION "pgflow"."set_vt_batch" (
38
+ "queue_name" text,
39
+ "msg_ids" bigint[],
40
+ "vt_offsets" integer[]
41
+ )
42
+ RETURNS TABLE(
43
+ msg_id bigint,
44
+ read_ct integer,
45
+ enqueued_at timestamp with time zone,
46
+ vt timestamp with time zone,
47
+ message jsonb,
48
+ headers jsonb
49
+ )
50
+ LANGUAGE plpgsql AS $$
51
+ DECLARE
52
+ qtable TEXT := pgmq.format_table_name(queue_name, 'q');
53
+ sql TEXT;
54
+ BEGIN
55
+ /* ---------- safety checks ---------------------------------------------------- */
56
+ IF msg_ids IS NULL OR vt_offsets IS NULL OR array_length(msg_ids, 1) = 0 THEN
57
+ RETURN; -- nothing to do, return empty set
58
+ END IF;
59
+
60
+ IF array_length(msg_ids, 1) IS DISTINCT FROM array_length(vt_offsets, 1) THEN
61
+ RAISE EXCEPTION
62
+ 'msg_ids length (%) must equal vt_offsets length (%)',
63
+ array_length(msg_ids, 1), array_length(vt_offsets, 1);
64
+ END IF;
65
+
66
+ /* ---------- dynamic statement ------------------------------------------------ */
67
+ /* One UPDATE joins with the unnested arrays */
68
+ sql := format(
69
+ $FMT$
70
+ WITH input (msg_id, vt_offset) AS (
71
+ SELECT unnest($1)::bigint
72
+ , unnest($2)::int
73
+ )
74
+ UPDATE pgmq.%I q
75
+ SET vt = clock_timestamp() + make_interval(secs => input.vt_offset),
76
+ read_ct = read_ct -- no change, but keeps RETURNING list aligned
77
+ FROM input
78
+ WHERE q.msg_id = input.msg_id
79
+ RETURNING q.msg_id,
80
+ q.read_ct,
81
+ q.enqueued_at,
82
+ q.vt,
83
+ q.message,
84
+ q.headers
85
+ $FMT$,
86
+ qtable
87
+ );
88
+
89
+ RETURN QUERY EXECUTE sql USING msg_ids, vt_offsets;
90
+ END;
91
+ $$;
92
+ -- Drop "read_with_poll" function
93
+ DROP FUNCTION "pgflow"."read_with_poll";
@@ -0,0 +1,268 @@
1
+ -- Modify "create_flow" function
2
+ CREATE OR REPLACE FUNCTION "pgflow"."create_flow" ("flow_slug" text, "max_attempts" integer DEFAULT NULL::integer, "base_delay" integer DEFAULT NULL::integer, "timeout" integer DEFAULT NULL::integer) RETURNS "pgflow"."flows" LANGUAGE sql SET "search_path" = '' AS $$
3
+ WITH
4
+ defaults AS (
5
+ SELECT 3 AS def_max_attempts, 5 AS def_base_delay, 60 AS def_timeout
6
+ ),
7
+ flow_upsert AS (
8
+ INSERT INTO pgflow.flows (flow_slug, opt_max_attempts, opt_base_delay, opt_timeout)
9
+ SELECT
10
+ flow_slug,
11
+ COALESCE(max_attempts, defaults.def_max_attempts),
12
+ COALESCE(base_delay, defaults.def_base_delay),
13
+ COALESCE(timeout, defaults.def_timeout)
14
+ FROM defaults
15
+ ON CONFLICT (flow_slug) DO UPDATE
16
+ SET flow_slug = pgflow.flows.flow_slug -- Dummy update
17
+ RETURNING *
18
+ ),
19
+ ensure_queue AS (
20
+ SELECT pgmq.create(flow_slug)
21
+ WHERE NOT EXISTS (
22
+ SELECT 1 FROM pgmq.list_queues() WHERE queue_name = flow_slug
23
+ )
24
+ )
25
+ SELECT f.*
26
+ FROM flow_upsert f
27
+ LEFT JOIN (SELECT 1 FROM ensure_queue) _dummy ON true; -- Left join ensures flow is returned
28
+ $$;
29
+ -- Create "_compare_flow_shapes" function
30
+ CREATE FUNCTION "pgflow"."_compare_flow_shapes" ("p_local" jsonb, "p_db" jsonb) RETURNS text[] LANGUAGE plpgsql STABLE SET "search_path" = '' AS $BODY$
31
+ DECLARE
32
+ v_differences text[] := '{}';
33
+ v_local_steps jsonb;
34
+ v_db_steps jsonb;
35
+ v_local_count int;
36
+ v_db_count int;
37
+ v_max_count int;
38
+ v_idx int;
39
+ v_local_step jsonb;
40
+ v_db_step jsonb;
41
+ v_local_deps text;
42
+ v_db_deps text;
43
+ BEGIN
44
+ v_local_steps := p_local->'steps';
45
+ v_db_steps := p_db->'steps';
46
+ v_local_count := jsonb_array_length(COALESCE(v_local_steps, '[]'::jsonb));
47
+ v_db_count := jsonb_array_length(COALESCE(v_db_steps, '[]'::jsonb));
48
+
49
+ -- Compare step counts
50
+ IF v_local_count != v_db_count THEN
51
+ v_differences := array_append(
52
+ v_differences,
53
+ format('Step count differs: %s vs %s', v_local_count, v_db_count)
54
+ );
55
+ END IF;
56
+
57
+ -- Compare steps by index
58
+ v_max_count := GREATEST(v_local_count, v_db_count);
59
+
60
+ FOR v_idx IN 0..(v_max_count - 1) LOOP
61
+ v_local_step := v_local_steps->v_idx;
62
+ v_db_step := v_db_steps->v_idx;
63
+
64
+ IF v_local_step IS NULL THEN
65
+ v_differences := array_append(
66
+ v_differences,
67
+ format(
68
+ $$Step at index %s: missing in first shape (second has '%s')$$,
69
+ v_idx,
70
+ v_db_step->>'slug'
71
+ )
72
+ );
73
+ ELSIF v_db_step IS NULL THEN
74
+ v_differences := array_append(
75
+ v_differences,
76
+ format(
77
+ $$Step at index %s: missing in second shape (first has '%s')$$,
78
+ v_idx,
79
+ v_local_step->>'slug'
80
+ )
81
+ );
82
+ ELSE
83
+ -- Compare slug
84
+ IF v_local_step->>'slug' != v_db_step->>'slug' THEN
85
+ v_differences := array_append(
86
+ v_differences,
87
+ format(
88
+ $$Step at index %s: slug differs '%s' vs '%s'$$,
89
+ v_idx,
90
+ v_local_step->>'slug',
91
+ v_db_step->>'slug'
92
+ )
93
+ );
94
+ END IF;
95
+
96
+ -- Compare step type
97
+ IF v_local_step->>'stepType' != v_db_step->>'stepType' THEN
98
+ v_differences := array_append(
99
+ v_differences,
100
+ format(
101
+ $$Step at index %s: type differs '%s' vs '%s'$$,
102
+ v_idx,
103
+ v_local_step->>'stepType',
104
+ v_db_step->>'stepType'
105
+ )
106
+ );
107
+ END IF;
108
+
109
+ -- Compare dependencies (convert arrays to comma-separated strings)
110
+ SELECT string_agg(dep, ', ' ORDER BY dep)
111
+ INTO v_local_deps
112
+ FROM jsonb_array_elements_text(COALESCE(v_local_step->'dependencies', '[]'::jsonb)) AS dep;
113
+
114
+ SELECT string_agg(dep, ', ' ORDER BY dep)
115
+ INTO v_db_deps
116
+ FROM jsonb_array_elements_text(COALESCE(v_db_step->'dependencies', '[]'::jsonb)) AS dep;
117
+
118
+ IF COALESCE(v_local_deps, '') != COALESCE(v_db_deps, '') THEN
119
+ v_differences := array_append(
120
+ v_differences,
121
+ format(
122
+ $$Step at index %s: dependencies differ [%s] vs [%s]$$,
123
+ v_idx,
124
+ COALESCE(v_local_deps, ''),
125
+ COALESCE(v_db_deps, '')
126
+ )
127
+ );
128
+ END IF;
129
+ END IF;
130
+ END LOOP;
131
+
132
+ RETURN v_differences;
133
+ END;
134
+ $BODY$;
135
+ -- Create "_create_flow_from_shape" function
136
+ CREATE FUNCTION "pgflow"."_create_flow_from_shape" ("p_flow_slug" text, "p_shape" jsonb) RETURNS void LANGUAGE plpgsql SET "search_path" = '' AS $$
137
+ DECLARE
138
+ v_step jsonb;
139
+ v_deps text[];
140
+ v_flow_options jsonb;
141
+ v_step_options jsonb;
142
+ BEGIN
143
+ -- Extract flow-level options (may be null)
144
+ v_flow_options := p_shape->'options';
145
+
146
+ -- Create the flow with options (NULL = use default)
147
+ PERFORM pgflow.create_flow(
148
+ p_flow_slug,
149
+ (v_flow_options->>'maxAttempts')::int,
150
+ (v_flow_options->>'baseDelay')::int,
151
+ (v_flow_options->>'timeout')::int
152
+ );
153
+
154
+ -- Iterate over steps in order and add each one
155
+ FOR v_step IN SELECT * FROM jsonb_array_elements(p_shape->'steps')
156
+ LOOP
157
+ -- Convert dependencies jsonb array to text array
158
+ SELECT COALESCE(array_agg(dep), '{}')
159
+ INTO v_deps
160
+ FROM jsonb_array_elements_text(COALESCE(v_step->'dependencies', '[]'::jsonb)) AS dep;
161
+
162
+ -- Extract step options (may be null)
163
+ v_step_options := v_step->'options';
164
+
165
+ -- Add the step with options (NULL = use default/inherit)
166
+ PERFORM pgflow.add_step(
167
+ flow_slug => p_flow_slug,
168
+ step_slug => v_step->>'slug',
169
+ deps_slugs => v_deps,
170
+ max_attempts => (v_step_options->>'maxAttempts')::int,
171
+ base_delay => (v_step_options->>'baseDelay')::int,
172
+ timeout => (v_step_options->>'timeout')::int,
173
+ start_delay => (v_step_options->>'startDelay')::int,
174
+ step_type => v_step->>'stepType'
175
+ );
176
+ END LOOP;
177
+ END;
178
+ $$;
179
+ -- Create "_get_flow_shape" function
180
+ CREATE FUNCTION "pgflow"."_get_flow_shape" ("p_flow_slug" text) RETURNS jsonb LANGUAGE sql STABLE SET "search_path" = '' AS $$
181
+ SELECT jsonb_build_object(
182
+ 'steps',
183
+ COALESCE(
184
+ jsonb_agg(
185
+ jsonb_build_object(
186
+ 'slug', step.step_slug,
187
+ 'stepType', step.step_type,
188
+ 'dependencies', COALESCE(
189
+ (
190
+ SELECT jsonb_agg(dep.dep_slug ORDER BY dep.dep_slug)
191
+ FROM pgflow.deps AS dep
192
+ WHERE dep.flow_slug = step.flow_slug
193
+ AND dep.step_slug = step.step_slug
194
+ ),
195
+ '[]'::jsonb
196
+ )
197
+ )
198
+ ORDER BY step.step_index
199
+ ),
200
+ '[]'::jsonb
201
+ )
202
+ )
203
+ FROM pgflow.steps AS step
204
+ WHERE step.flow_slug = p_flow_slug;
205
+ $$;
206
+ -- Create "delete_flow_and_data" function
207
+ CREATE FUNCTION "pgflow"."delete_flow_and_data" ("p_flow_slug" text) RETURNS void LANGUAGE plpgsql SET "search_path" = '' AS $$
208
+ BEGIN
209
+ -- Drop queue and archive table (pgmq)
210
+ PERFORM pgmq.drop_queue(p_flow_slug);
211
+
212
+ -- Delete all associated data in the correct order (respecting FK constraints)
213
+ DELETE FROM pgflow.step_tasks AS task WHERE task.flow_slug = p_flow_slug;
214
+ DELETE FROM pgflow.step_states AS state WHERE state.flow_slug = p_flow_slug;
215
+ DELETE FROM pgflow.runs AS run WHERE run.flow_slug = p_flow_slug;
216
+ DELETE FROM pgflow.deps AS dep WHERE dep.flow_slug = p_flow_slug;
217
+ DELETE FROM pgflow.steps AS step WHERE step.flow_slug = p_flow_slug;
218
+ DELETE FROM pgflow.flows AS flow WHERE flow.flow_slug = p_flow_slug;
219
+ END;
220
+ $$;
221
+ -- Create "ensure_flow_compiled" function
222
+ CREATE FUNCTION "pgflow"."ensure_flow_compiled" ("p_flow_slug" text, "p_shape" jsonb, "p_mode" text DEFAULT 'production') RETURNS jsonb LANGUAGE plpgsql SET "search_path" = '' AS $$
223
+ DECLARE
224
+ v_lock_key int;
225
+ v_flow_exists boolean;
226
+ v_db_shape jsonb;
227
+ v_differences text[];
228
+ BEGIN
229
+ -- Generate lock key from flow_slug (deterministic hash)
230
+ v_lock_key := hashtext(p_flow_slug);
231
+
232
+ -- Acquire transaction-level advisory lock
233
+ -- Serializes concurrent compilation attempts for same flow
234
+ PERFORM pg_advisory_xact_lock(1, v_lock_key);
235
+
236
+ -- 1. Check if flow exists
237
+ SELECT EXISTS(SELECT 1 FROM pgflow.flows AS flow WHERE flow.flow_slug = p_flow_slug)
238
+ INTO v_flow_exists;
239
+
240
+ -- 2. If flow missing: compile (both modes)
241
+ IF NOT v_flow_exists THEN
242
+ PERFORM pgflow._create_flow_from_shape(p_flow_slug, p_shape);
243
+ RETURN jsonb_build_object('status', 'compiled', 'differences', '[]'::jsonb);
244
+ END IF;
245
+
246
+ -- 3. Get current shape from DB
247
+ v_db_shape := pgflow._get_flow_shape(p_flow_slug);
248
+
249
+ -- 4. Compare shapes
250
+ v_differences := pgflow._compare_flow_shapes(p_shape, v_db_shape);
251
+
252
+ -- 5. If shapes match: return verified
253
+ IF array_length(v_differences, 1) IS NULL THEN
254
+ RETURN jsonb_build_object('status', 'verified', 'differences', '[]'::jsonb);
255
+ END IF;
256
+
257
+ -- 6. Shapes differ - handle by mode
258
+ IF p_mode = 'development' THEN
259
+ -- Recompile in dev mode: full deletion + fresh compile
260
+ PERFORM pgflow.delete_flow_and_data(p_flow_slug);
261
+ PERFORM pgflow._create_flow_from_shape(p_flow_slug, p_shape);
262
+ RETURN jsonb_build_object('status', 'recompiled', 'differences', to_jsonb(v_differences));
263
+ ELSE
264
+ -- Fail in production mode
265
+ RETURN jsonb_build_object('status', 'mismatch', 'differences', to_jsonb(v_differences));
266
+ END IF;
267
+ END;
268
+ $$;
@@ -0,0 +1 @@
1
+ {"version":"5.8.3"}
@@ -0,0 +1,93 @@
1
+ import type { ExtractFlowSteps, StepInput, Simplify, AnyFlow, ExtractFlowInput, Json } from '@pgflow/dsl';
2
+ import type { Database } from './database-types.js';
3
+ export type { Json };
4
+ /**
5
+ * Record representing a task from pgflow.start_tasks
6
+ *
7
+ * Same as pgflow.step_task_record type, but with not-null fields and type argument for payload.
8
+ * The input type is automatically inferred based on the step_slug using a discriminated union.
9
+ * This ensures that each step only receives inputs from its declared dependencies and the flow's run input.
10
+ */
11
+ export type StepTaskRecord<TFlow extends AnyFlow> = {
12
+ [StepSlug in Extract<keyof ExtractFlowSteps<TFlow>, string>]: {
13
+ flow_slug: string;
14
+ run_id: string;
15
+ step_slug: StepSlug;
16
+ task_index: number;
17
+ input: Simplify<StepInput<TFlow, StepSlug>>;
18
+ msg_id: number;
19
+ };
20
+ }[Extract<keyof ExtractFlowSteps<TFlow>, string>];
21
+ /**
22
+ * Composite key that is enough to find a particular step task
23
+ * Contains only the minimum fields needed to identify a task
24
+ */
25
+ export type StepTaskKey = Pick<StepTaskRecord<AnyFlow>, 'run_id' | 'step_slug' | 'task_index'>;
26
+ /**
27
+ * Record representing a message from queue polling
28
+ */
29
+ export type MessageRecord = {
30
+ msg_id: number;
31
+ read_ct: number;
32
+ enqueued_at: string;
33
+ vt: string;
34
+ message: Json;
35
+ };
36
+ /**
37
+ * Interface for interacting with pgflow database functions
38
+ */
39
+ export interface IPgflowClient<TFlow extends AnyFlow = AnyFlow> {
40
+ /**
41
+ * Start a flow with optional run_id
42
+ */
43
+ startFlow<TFlow extends AnyFlow>(flow_slug: string, input: ExtractFlowInput<TFlow>, run_id?: string): Promise<RunRow>;
44
+ /**
45
+ * Reads messages from queue without starting tasks (phase 1 of two-phase approach)
46
+ * @param queueName - Name of the queue
47
+ * @param visibilityTimeout - Visibility timeout for messages
48
+ * @param batchSize - Number of messages to fetch
49
+ * @param maxPollSeconds - Maximum time to poll for messages
50
+ * @param pollIntervalMs - Poll interval in milliseconds
51
+ */
52
+ readMessages(queueName: string, visibilityTimeout: number, batchSize: number, maxPollSeconds?: number, pollIntervalMs?: number): Promise<MessageRecord[]>;
53
+ /**
54
+ * Starts tasks for given message IDs (phase 2 of two-phase approach)
55
+ * @param flowSlug - The flow slug to start tasks from
56
+ * @param msgIds - Array of message IDs from readMessages
57
+ * @param workerId - ID of the worker starting the tasks
58
+ */
59
+ startTasks(flowSlug: string, msgIds: number[], workerId: string): Promise<StepTaskRecord<TFlow>[]>;
60
+ /**
61
+ * Mark a task as completed with output
62
+ */
63
+ completeTask(stepTask: StepTaskKey, output?: Json): Promise<void>;
64
+ /**
65
+ * Mark a task as failed with error
66
+ */
67
+ failTask(stepTask: StepTaskKey, error: unknown): Promise<void>;
68
+ }
69
+ /**
70
+ * Record representing a flow from pgflow.flows
71
+ */
72
+ export type FlowRow = Database['pgflow']['Tables']['flows']['Row'];
73
+ /**
74
+ * Record representing a step from pgflow.steps
75
+ */
76
+ export type StepRow = Database['pgflow']['Tables']['steps']['Row'];
77
+ /**
78
+ * Record representing a step from pgflow.deps
79
+ */
80
+ export type DepRow = Database['pgflow']['Tables']['deps']['Row'];
81
+ /**
82
+ * Record representing a step from pgflow.queues
83
+ */
84
+ export type RunRow = Database['pgflow']['Tables']['runs']['Row'];
85
+ /**
86
+ * Record representing a step from pgflow.step_states
87
+ */
88
+ export type StepStateRow = Database['pgflow']['Tables']['step_states']['Row'];
89
+ /**
90
+ * Record representing a step from pgflow.step_tasks
91
+ */
92
+ export type StepTaskRow = Database['pgflow']['Tables']['step_tasks']['Row'];
93
+ //# sourceMappingURL=types.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"types.d.ts","sourceRoot":"","sources":["../src/types.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EACV,gBAAgB,EAChB,SAAS,EACT,QAAQ,EACR,OAAO,EACP,gBAAgB,EAChB,IAAI,EACL,MAAM,aAAa,CAAC;AACrB,OAAO,KAAK,EAAE,QAAQ,EAAE,MAAM,qBAAqB,CAAC;AAEpD,YAAY,EAAE,IAAI,EAAE,CAAC;AAErB;;;;;;GAMG;AACH,MAAM,MAAM,cAAc,CAAC,KAAK,SAAS,OAAO,IAAI;KACjD,QAAQ,IAAI,OAAO,CAAC,MAAM,gBAAgB,CAAC,KAAK,CAAC,EAAE,MAAM,CAAC,GAAG;QAC5D,SAAS,EAAE,MAAM,CAAC;QAClB,MAAM,EAAE,MAAM,CAAC;QACf,SAAS,EAAE,QAAQ,CAAC;QACpB,UAAU,EAAE,MAAM,CAAC;QACnB,KAAK,EAAE,QAAQ,CAAC,SAAS,CAAC,KAAK,EAAE,QAAQ,CAAC,CAAC,CAAC;QAC5C,MAAM,EAAE,MAAM,CAAC;KAChB;CACF,CAAC,OAAO,CAAC,MAAM,gBAAgB,CAAC,KAAK,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC;AAElD;;;GAGG;AACH,MAAM,MAAM,WAAW,GAAG,IAAI,CAAC,cAAc,CAAC,OAAO,CAAC,EAAE,QAAQ,GAAG,WAAW,GAAG,YAAY,CAAC,CAAC;AAI/F;;GAEG;AACH,MAAM,MAAM,aAAa,GAAG;IAC1B,MAAM,EAAE,MAAM,CAAC;IACf,OAAO,EAAE,MAAM,CAAC;IAChB,WAAW,EAAE,MAAM,CAAC;IACpB,EAAE,EAAE,MAAM,CAAC;IACX,OAAO,EAAE,IAAI,CAAC;CACf,CAAC;AAEF;;GAEG;AACH,MAAM,WAAW,aAAa,CAAC,KAAK,SAAS,OAAO,GAAG,OAAO;IAC5D;;OAEG;IACH,SAAS,CAAC,KAAK,SAAS,OAAO,EAC7B,SAAS,EAAE,MAAM,EACjB,KAAK,EAAE,gBAAgB,CAAC,KAAK,CAAC,EAC9B,MAAM,CAAC,EAAE,MAAM,GACd,OAAO,CAAC,MAAM,CAAC,CAAC;IAEnB;;;;;;;OAOG;IACH,YAAY,CACV,SAAS,EAAE,MAAM,EACjB,iBAAiB,EAAE,MAAM,EACzB,SAAS,EAAE,MAAM,EACjB,cAAc,CAAC,EAAE,MAAM,EACvB,cAAc,CAAC,EAAE,MAAM,GACtB,OAAO,CAAC,aAAa,EAAE,CAAC,CAAC;IAE5B;;;;;OAKG;IACH,UAAU,CACR,QAAQ,EAAE,MAAM,EAChB,MAAM,EAAE,MAAM,EAAE,EAChB,QAAQ,EAAE,MAAM,GACf,OAAO,CAAC,cAAc,CAAC,KAAK,CAAC,EAAE,CAAC,CAAC;IAEpC;;OAEG;IACH,YAAY,CAAC,QAAQ,EAAE,WAAW,EAAE,MAAM,CAAC,EAAE,IAAI,GAAG,OAAO,CAAC,IAAI,CAAC,CAAC;IAElE;;OAEG;IACH,QAAQ,CAAC,QAAQ,EAAE,WAAW,EAAE,KAAK,EAAE,OAAO,GAAG,OAAO,CAAC,IAAI,CAAC,CAAC;CAChE;AAED;;GAEG;AACH,MAAM,MAAM,OAAO,GAAG,QAAQ,CAAC,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,OAAO,CAAC,CAAC,KAAK,CAAC,CAAC;AAEnE;;GAEG;AACH,MAAM,MAAM,OAAO,GAAG,QAAQ,CAAC,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,OAAO,CAAC,CAAC,KAAK,CAAC,CAAC;AAEnE;;GAEG;AACH,MAAM,MAAM,MAAM,GAAG,QAAQ,CAAC,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,KAAK,CAAC,CAAC;AAEjE;;GAEG;AACH,MAAM,MAAM,MAAM,GAAG,QAAQ,CAAC,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC,KAAK,CAAC,CAAC;AAEjE;;GAEG;AACH,MAAM,MAAM,YAAY,GAAG,QAAQ,CAAC,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,aAAa,CAAC,CAAC,KAAK,CAAC,CAAC;AAE9E;;GAEG;AACH,MAAM,MAAM,WAAW,GAAG,QAAQ,CAAC,QAAQ,CAAC,CAAC,QAAQ,CAAC,CAAC,YAAY,CAAC,CAAC,KAAK,CAAC,CAAC"}
package/dist/types.js ADDED
@@ -0,0 +1 @@
1
+ export {};
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "@pgflow/core",
3
- "version": "0.0.0-test-snapshot-releases2-8d5d9bc1-20250922101158",
4
- "license": "AGPL-3.0",
3
+ "version": "0.0.0-testsnap-11f20f76-20251207210211",
4
+ "license": "Apache-2.0",
5
5
  "type": "module",
6
6
  "main": "./dist/index.js",
7
7
  "module": "./dist/index.js",
@@ -19,12 +19,11 @@
19
19
  }
20
20
  },
21
21
  "devDependencies": {
22
- "@types/node": "^22.14.1",
23
- "supabase": "2.21.1"
22
+ "@types/node": "^22.14.1"
24
23
  },
25
24
  "dependencies": {
26
25
  "postgres": "^3.4.5",
27
- "@pgflow/dsl": "0.0.0-test-snapshot-releases2-8d5d9bc1-20250922101158"
26
+ "@pgflow/dsl": "0.0.0-testsnap-11f20f76-20251207210211"
28
27
  },
29
28
  "publishConfig": {
30
29
  "access": "public"