pgflow 0.0.16 → 0.0.17-yolo

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. package/dist/commands/install/copy-migrations.d.ts +4 -0
  2. package/dist/commands/install/copy-migrations.d.ts.map +1 -0
  3. package/dist/commands/install/copy-migrations.js +70 -0
  4. package/dist/commands/install/index.d.ts +4 -0
  5. package/dist/commands/install/index.d.ts.map +1 -0
  6. package/dist/commands/install/index.js +32 -0
  7. package/dist/commands/install/supabase-path-prompt.d.ts +2 -0
  8. package/dist/commands/install/supabase-path-prompt.d.ts.map +1 -0
  9. package/dist/commands/install/supabase-path-prompt.js +24 -0
  10. package/dist/commands/install/update-config-toml.d.ts +17 -0
  11. package/dist/commands/install/update-config-toml.d.ts.map +1 -0
  12. package/dist/commands/install/update-config-toml.js +87 -0
  13. package/dist/index.d.ts +3 -0
  14. package/dist/index.d.ts.map +1 -0
  15. package/dist/index.js +67 -0
  16. package/dist/migrations/000000_schema.sql +150 -0
  17. package/dist/migrations/000005_create_flow.sql +29 -0
  18. package/dist/migrations/000010_add_step.sql +48 -0
  19. package/dist/migrations/000015_start_ready_steps.sql +45 -0
  20. package/dist/migrations/000020_start_flow.sql +46 -0
  21. package/dist/migrations/000030_read_with_poll_backport.sql +70 -0
  22. package/dist/migrations/000040_poll_for_tasks.sql +100 -0
  23. package/dist/migrations/000045_maybe_complete_run.sql +30 -0
  24. package/dist/migrations/000050_complete_task.sql +98 -0
  25. package/dist/migrations/000055_calculate_retry_delay.sql +11 -0
  26. package/dist/migrations/000060_fail_task.sql +124 -0
  27. package/dist/migrations/000_edge_worker_initial.sql +86 -0
  28. package/dist/package.json +38 -0
  29. package/dist/tsconfig.lib.tsbuildinfo +1 -0
  30. package/package.json +13 -10
  31. package/LICENSE.md +0 -660
@@ -0,0 +1,4 @@
1
+ export declare function copyMigrations({ supabasePath, }: {
2
+ supabasePath: string;
3
+ }): Promise<boolean>;
4
+ //# sourceMappingURL=copy-migrations.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"copy-migrations.d.ts","sourceRoot":"","sources":["../../../src/commands/install/copy-migrations.ts"],"names":[],"mappings":"AAaA,wBAAsB,cAAc,CAAC,EACnC,YAAY,GACb,EAAE;IACD,YAAY,EAAE,MAAM,CAAC;CACtB,GAAG,OAAO,CAAC,OAAO,CAAC,CAmFnB"}
@@ -0,0 +1,70 @@
1
+ import fs from 'fs';
2
+ import path from 'path';
3
+ import { fileURLToPath } from 'url';
4
+ import { log, confirm, note } from '@clack/prompts';
5
+ import chalk from 'chalk';
6
+ // Get the directory name in ES modules
7
+ const __filename = fileURLToPath(import.meta.url);
8
+ const __dirname = path.dirname(__filename);
9
+ // Path to the pgflow migrations
10
+ const sourcePath = path.resolve(__dirname, '../../../migrations');
11
+ export async function copyMigrations({ supabasePath, }) {
12
+ const migrationsPath = path.join(supabasePath, 'migrations');
13
+ if (!fs.existsSync(migrationsPath)) {
14
+ fs.mkdirSync(migrationsPath);
15
+ }
16
+ // Check if pgflow migrations directory exists
17
+ if (!fs.existsSync(sourcePath)) {
18
+ log.error(`Source migrations directory not found at ${sourcePath}`);
19
+ log.info("This might happen if you're running from source instead of the built package.");
20
+ log.info('Try building the package first with: nx build cli');
21
+ return false;
22
+ }
23
+ const files = fs.readdirSync(sourcePath);
24
+ const filesToCopy = [];
25
+ const skippedFiles = [];
26
+ // Determine which files need to be copied
27
+ for (const file of files) {
28
+ const destination = path.join(migrationsPath, file);
29
+ if (fs.existsSync(destination)) {
30
+ skippedFiles.push(file);
31
+ }
32
+ else {
33
+ filesToCopy.push(file);
34
+ }
35
+ }
36
+ // If no files to copy, show message and return false (no changes made)
37
+ if (filesToCopy.length === 0) {
38
+ log.info('No new migrations to copy - all migrations are already in place');
39
+ return false;
40
+ }
41
+ // Prepare summary message with colored output
42
+ const summaryParts = [];
43
+ if (filesToCopy.length > 0) {
44
+ summaryParts.push(`${chalk.green('Files to be copied:')}
45
+ ${filesToCopy.map((file) => `${chalk.green('+')} ${file}`).join('\n')}`);
46
+ }
47
+ if (skippedFiles.length > 0) {
48
+ summaryParts.push(`${chalk.yellow('Files to be skipped (already exist):')}
49
+ ${skippedFiles.map((file) => `${chalk.yellow('=')} ${file}`).join('\n')}`);
50
+ }
51
+ // Show summary and ask for confirmation
52
+ note(summaryParts.join('\n\n'), 'Migration Summary');
53
+ const shouldContinue = await confirm({
54
+ message: `Do you want to proceed with copying ${filesToCopy.length} migration file${filesToCopy.length !== 1 ? 's' : ''}?`,
55
+ });
56
+ if (!shouldContinue) {
57
+ log.info('Migration copy cancelled');
58
+ return false;
59
+ }
60
+ log.info(`Copying migrations`);
61
+ // Copy the files
62
+ for (const file of filesToCopy) {
63
+ const source = path.join(sourcePath, file);
64
+ const destination = path.join(migrationsPath, file);
65
+ fs.copyFileSync(source, destination);
66
+ log.step(`Copied ${file}`);
67
+ }
68
+ log.success(`Successfully copied ${filesToCopy.length} migration file${filesToCopy.length !== 1 ? 's' : ''}`);
69
+ return true; // Return true to indicate migrations were copied
70
+ }
@@ -0,0 +1,4 @@
1
+ import { type Command } from 'commander';
2
+ declare const _default: (program: Command) => void;
3
+ export default _default;
4
+ //# sourceMappingURL=index.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../../src/commands/install/index.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,KAAK,OAAO,EAAE,MAAM,WAAW,CAAC;kCAMhB,OAAO;AAAhC,wBAsCE"}
@@ -0,0 +1,32 @@
1
+ import { intro, isCancel, log } from '@clack/prompts';
2
+ import { copyMigrations } from './copy-migrations.js';
3
+ import { updateConfigToml } from './update-config-toml.js';
4
+ import { supabasePathPrompt } from './supabase-path-prompt.js';
5
+ export default (program) => {
6
+ program
7
+ .command('install')
8
+ .description('Copies migrations and sets config.toml values')
9
+ .action(async () => {
10
+ intro('pgflow - Postgres-native workflows for Supabase');
11
+ const supabasePath = await supabasePathPrompt();
12
+ if (isCancel(supabasePath)) {
13
+ log.error('Aborting installation');
14
+ return;
15
+ }
16
+ const migrationsCopied = await copyMigrations({ supabasePath });
17
+ const configUpdated = await updateConfigToml({ supabasePath });
18
+ if (migrationsCopied || configUpdated) {
19
+ log.success('pgflow installation is completed');
20
+ }
21
+ if (!migrationsCopied && !configUpdated) {
22
+ log.success('No changes were made - pgflow is already properly configured.');
23
+ }
24
+ // Show specific reminders based on what was actually done
25
+ if (configUpdated) {
26
+ log.warn('Remember to restart Supabase for the configuration changes to take effect!');
27
+ }
28
+ if (migrationsCopied) {
29
+ log.warn('Remember to apply the migrations!');
30
+ }
31
+ });
32
+ };
@@ -0,0 +1,2 @@
1
+ export declare function supabasePathPrompt(): Promise<string | symbol>;
2
+ //# sourceMappingURL=supabase-path-prompt.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"supabase-path-prompt.d.ts","sourceRoot":"","sources":["../../../src/commands/install/supabase-path-prompt.ts"],"names":[],"mappings":"AAIA,wBAAsB,kBAAkB,6BAMvC"}
@@ -0,0 +1,24 @@
1
+ import fs from 'fs';
2
+ import path from 'path';
3
+ import { text } from '@clack/prompts';
4
+ export async function supabasePathPrompt() {
5
+ return await text({
6
+ message: 'Enter the path to your supabase/ directory',
7
+ placeholder: 'supabase/',
8
+ validate,
9
+ });
10
+ }
11
+ function validate(inputPath) {
12
+ const pathsToTest = [
13
+ [inputPath, 'is not a valid path'],
14
+ [path.join(inputPath, 'config.toml'), 'does not contain config.toml'],
15
+ ];
16
+ // if any of the pathsToTest fail, return the error message
17
+ for (const [testPath, errorMessage] of pathsToTest) {
18
+ if (!fs.existsSync(testPath)) {
19
+ return `${testPath} ${errorMessage}`;
20
+ }
21
+ }
22
+ // otherwise, return undefined
23
+ return undefined;
24
+ }
@@ -0,0 +1,17 @@
1
+ /**
2
+ * Updates the config.toml file with necessary configurations for EdgeWorker
3
+ * while preserving comments and formatting
4
+ *
5
+ * Makes the following changes:
6
+ * 1. Enables the connection pooler
7
+ * 2. Ensures pool_mode is set to "transaction"
8
+ * 3. Changes edge_runtime policy from "oneshot" to "per_worker"
9
+ * 4. Creates a backup of the original config.toml file before making changes
10
+ *
11
+ * @param options.supabasePath - Path to the supabase directory
12
+ * @returns Promise<boolean> - True if changes were made, false otherwise
13
+ */
14
+ export declare function updateConfigToml({ supabasePath, }: {
15
+ supabasePath: string;
16
+ }): Promise<boolean>;
17
+ //# sourceMappingURL=update-config-toml.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"update-config-toml.d.ts","sourceRoot":"","sources":["../../../src/commands/install/update-config-toml.ts"],"names":[],"mappings":"AAqBA;;;;;;;;;;;;GAYG;AACH,wBAAsB,gBAAgB,CAAC,EACrC,YAAY,GACb,EAAE;IACD,YAAY,EAAE,MAAM,CAAC;CACtB,GAAG,OAAO,CAAC,OAAO,CAAC,CA2FnB"}
@@ -0,0 +1,87 @@
1
+ import fs from 'fs';
2
+ import path from 'path';
3
+ import { log, confirm, note } from '@clack/prompts';
4
+ import * as TOML from 'toml-patch';
5
+ import chalk from 'chalk';
6
+ /**
7
+ * Updates the config.toml file with necessary configurations for EdgeWorker
8
+ * while preserving comments and formatting
9
+ *
10
+ * Makes the following changes:
11
+ * 1. Enables the connection pooler
12
+ * 2. Ensures pool_mode is set to "transaction"
13
+ * 3. Changes edge_runtime policy from "oneshot" to "per_worker"
14
+ * 4. Creates a backup of the original config.toml file before making changes
15
+ *
16
+ * @param options.supabasePath - Path to the supabase directory
17
+ * @returns Promise<boolean> - True if changes were made, false otherwise
18
+ */
19
+ export async function updateConfigToml({ supabasePath, }) {
20
+ const configPath = path.join(supabasePath, 'config.toml');
21
+ const backupPath = `${configPath}.backup`;
22
+ try {
23
+ if (!fs.existsSync(configPath)) {
24
+ throw new Error(`config.toml not found at ${configPath}`);
25
+ }
26
+ const configContent = fs.readFileSync(configPath, 'utf8');
27
+ const config = TOML.parse(configContent);
28
+ const currentSettings = {
29
+ poolerEnabled: config.db?.pooler?.enabled ?? false,
30
+ poolMode: config.db?.pooler?.pool_mode ?? 'none',
31
+ edgeRuntimePolicy: config.edge_runtime?.policy ?? 'oneshot',
32
+ };
33
+ const needsChanges = currentSettings.poolerEnabled !== true ||
34
+ currentSettings.poolMode !== 'transaction' ||
35
+ currentSettings.edgeRuntimePolicy !== 'per_worker';
36
+ if (!needsChanges) {
37
+ log.info(`No changes needed in config.toml - all required settings are already configured`);
38
+ return false;
39
+ }
40
+ const changes = [];
41
+ if (currentSettings.poolerEnabled !== true) {
42
+ changes.push(`[db.pooler]
43
+ ${chalk.red(`- enabled = ${currentSettings.poolerEnabled}`)}
44
+ ${chalk.green('+ enabled = true')}`);
45
+ }
46
+ if (currentSettings.poolMode !== 'transaction') {
47
+ changes.push(`[db.pooler]
48
+ ${chalk.red(`- pool_mode = "${currentSettings.poolMode}"`)}
49
+ ${chalk.green('+ pool_mode = "transaction"')}`);
50
+ }
51
+ if (currentSettings.edgeRuntimePolicy !== 'per_worker') {
52
+ changes.push(`[edge_runtime]
53
+ ${chalk.red(`- policy = "${currentSettings.edgeRuntimePolicy}"`)}
54
+ ${chalk.green('+ policy = "per_worker"')}`);
55
+ }
56
+ note(changes.join('\n\n'), 'Config Changes');
57
+ const shouldContinue = await confirm({
58
+ message: `Do you want to proceed with these configuration changes? A backup will be created at ${backupPath}`,
59
+ });
60
+ if (!shouldContinue) {
61
+ log.info('Configuration update cancelled');
62
+ return false;
63
+ }
64
+ fs.copyFileSync(configPath, backupPath);
65
+ log.info(`Created backup at ${backupPath}`);
66
+ log.info(`Updating config.toml`);
67
+ const updatedConfig = { ...config };
68
+ // Ensure required objects exist and set values
69
+ if (!updatedConfig.db)
70
+ updatedConfig.db = {};
71
+ if (!updatedConfig.db.pooler)
72
+ updatedConfig.db.pooler = {};
73
+ if (!updatedConfig.edge_runtime)
74
+ updatedConfig.edge_runtime = {};
75
+ updatedConfig.db.pooler.enabled = true;
76
+ updatedConfig.db.pooler.pool_mode = 'transaction';
77
+ updatedConfig.edge_runtime.policy = 'per_worker';
78
+ const updatedContent = TOML.patch(configContent, updatedConfig);
79
+ fs.writeFileSync(configPath, updatedContent);
80
+ log.success(`Successfully updated ${configPath} (backup created at ${backupPath})`);
81
+ return true;
82
+ }
83
+ catch (error) {
84
+ log.error(`Failed to update ${configPath}: ${error instanceof Error ? error.message : String(error)}`);
85
+ throw error;
86
+ }
87
+ }
@@ -0,0 +1,3 @@
1
+ #!/usr/bin/env node
2
+ export {};
3
+ //# sourceMappingURL=index.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":""}
package/dist/index.js ADDED
@@ -0,0 +1,67 @@
1
+ #!/usr/bin/env node
2
+ import { Command } from 'commander';
3
+ import { fileURLToPath } from 'url';
4
+ import { readFileSync } from 'fs';
5
+ import { dirname, join } from 'path';
6
+ import installCommand from './commands/install/index.js';
7
+ // Create a function to handle errors
8
+ const errorHandler = (error) => {
9
+ console.error('Error:', error instanceof Error ? error.message : String(error));
10
+ process.exit(1);
11
+ };
12
+ // Set up process-wide unhandled rejection handler
13
+ process.on('unhandledRejection', errorHandler);
14
+ // Function to get version from package.json
15
+ function getVersion() {
16
+ const __filename = fileURLToPath(import.meta.url);
17
+ const __dirname = dirname(__filename);
18
+ const packageJsonPath = join(__dirname, '..', 'package.json');
19
+ try {
20
+ const packageJson = JSON.parse(readFileSync(packageJsonPath, 'utf8'));
21
+ return packageJson.version || 'unknown';
22
+ }
23
+ catch (error) {
24
+ // Log error but don't display it to the user when showing version
25
+ console.error('Error reading package.json:', error);
26
+ return 'unknown';
27
+ }
28
+ }
29
+ const program = new Command();
30
+ program
31
+ .name('npx pgflow')
32
+ .description('Command line interface to help you work with pgflow')
33
+ .version(getVersion())
34
+ .exitOverride((err) => {
35
+ // Don't treat version display as an error
36
+ if (err.code === 'commander.version') {
37
+ process.exit(0);
38
+ }
39
+ throw err;
40
+ });
41
+ installCommand(program);
42
+ // Use a promise-aware approach to parse arguments
43
+ async function main() {
44
+ try {
45
+ await program.parseAsync(process.argv);
46
+ // If we get here with no command specified, it's not an error
47
+ process.exitCode = 0;
48
+ }
49
+ catch (err) {
50
+ // Commander throws a CommanderError when help or version is displayed
51
+ // We want to exit with code 0 in these cases
52
+ if (err &&
53
+ typeof err === 'object' &&
54
+ 'code' in err &&
55
+ (err.code === 'commander.helpDisplayed' ||
56
+ err.code === 'commander.help' ||
57
+ err.code === 'commander.version')) {
58
+ process.exitCode = 0;
59
+ }
60
+ else {
61
+ // For other errors, use our error handler
62
+ errorHandler(err);
63
+ }
64
+ }
65
+ }
66
+ // Execute and handle any errors
67
+ main();
@@ -0,0 +1,150 @@
1
+ create extension if not exists pgmq version '1.4.4';
2
+
3
+ create schema if not exists pgflow;
4
+ set search_path to pgflow;
5
+
6
+ --------------------------------------------------------------------------
7
+ ------------------ TODO: fix me, UNSECURE --------------------------------
8
+ --------------------------------------------------------------------------
9
+ grant usage on schema pgflow to anon, authenticated, service_role;
10
+ grant all on all tables in schema pgflow to anon, authenticated, service_role;
11
+ grant all on all routines in schema pgflow to anon, authenticated, service_role;
12
+ grant all on all sequences in schema pgflow to anon,
13
+ authenticated,
14
+ service_role;
15
+ alter default privileges for role postgres in schema pgflow
16
+ grant all on tables to anon, authenticated, service_role;
17
+ alter default privileges for role postgres in schema pgflow
18
+ grant all on routines to anon, authenticated, service_role;
19
+ alter default privileges for role postgres in schema pgflow
20
+ grant all on sequences to anon, authenticated, service_role;
21
+
22
+ ------------------------------------------
23
+ -- Core flow definition tables
24
+ ------------------------------------------
25
+
26
+ ----- check constraint helper function -------
27
+ create or replace function pgflow.is_valid_slug(
28
+ slug text
29
+ )
30
+ returns boolean
31
+ language plpgsql
32
+ immutable
33
+ as $$
34
+ begin
35
+ return
36
+ slug is not null
37
+ and slug <> ''
38
+ and length(slug) <= 128
39
+ and slug ~ '^[a-zA-Z_][a-zA-Z0-9_]*$'
40
+ and slug NOT IN ('run'); -- reserved words
41
+ end;
42
+ $$;
43
+
44
+ -- Flows table - stores flow definitions
45
+ create table pgflow.flows (
46
+ flow_slug text primary key not null, -- Unique identifier for the flow
47
+ opt_max_attempts int not null default 3,
48
+ opt_base_delay int not null default 1,
49
+ opt_timeout int not null default 60,
50
+ constraint slug_is_valid check (is_valid_slug(flow_slug)),
51
+ constraint opt_max_attempts_is_nonnegative check (opt_max_attempts >= 0),
52
+ constraint opt_base_delay_is_nonnegative check (opt_base_delay >= 0),
53
+ constraint opt_timeout_is_positive check (opt_timeout > 0)
54
+ );
55
+
56
+ -- Steps table - stores individual steps within flows
57
+ create table pgflow.steps (
58
+ flow_slug text not null references flows (flow_slug),
59
+ step_slug text not null,
60
+ step_type text not null default 'single',
61
+ deps_count int not null default 0 check (deps_count >= 0),
62
+ opt_max_attempts int,
63
+ opt_base_delay int,
64
+ opt_timeout int,
65
+ primary key (flow_slug, step_slug),
66
+ check (is_valid_slug(step_slug)),
67
+ check (step_type in ('single')),
68
+ constraint opt_max_attempts_is_nonnegative check (opt_max_attempts is null or opt_max_attempts >= 0),
69
+ constraint opt_base_delay_is_nonnegative check (opt_base_delay is null or opt_base_delay >= 0),
70
+ constraint opt_timeout_is_positive check (opt_timeout is null or opt_timeout > 0)
71
+ );
72
+
73
+ -- Dependencies table - stores relationships between steps
74
+ create table pgflow.deps (
75
+ flow_slug text not null references pgflow.flows (flow_slug),
76
+ dep_slug text not null, -- slug of the dependency
77
+ step_slug text not null, -- slug of the dependent
78
+ primary key (flow_slug, dep_slug, step_slug),
79
+ foreign key (flow_slug, dep_slug)
80
+ references pgflow.steps (flow_slug, step_slug),
81
+ foreign key (flow_slug, step_slug)
82
+ references pgflow.steps (flow_slug, step_slug),
83
+ check (dep_slug != step_slug) -- Prevent self-dependencies
84
+ );
85
+
86
+ ------------------------------------------
87
+ -- Runtime State Tables
88
+ ------------------------------------------
89
+
90
+ -- Runs table - tracks flow execution instances
91
+ create table pgflow.runs (
92
+ run_id uuid primary key not null default gen_random_uuid(),
93
+ flow_slug text not null references pgflow.flows (flow_slug), -- denormalized
94
+ status text not null default 'started',
95
+ input jsonb not null,
96
+ output jsonb,
97
+ remaining_steps int not null default 0 check (remaining_steps >= 0),
98
+ check (status in ('started', 'failed', 'completed'))
99
+ );
100
+
101
+ -- Step states table - tracks the state of individual steps within a run
102
+ create table pgflow.step_states (
103
+ flow_slug text not null references pgflow.flows (flow_slug),
104
+ run_id uuid not null references pgflow.runs (run_id),
105
+ step_slug text not null,
106
+ status text not null default 'created',
107
+ remaining_tasks int not null default 1 check (remaining_tasks >= 0),
108
+ remaining_deps int not null default 0 check (remaining_deps >= 0),
109
+ primary key (run_id, step_slug),
110
+ foreign key (flow_slug, step_slug)
111
+ references pgflow.steps (flow_slug, step_slug),
112
+ check (status in ('created', 'started', 'completed', 'failed')),
113
+ check (status != 'completed' or remaining_tasks = 0)
114
+ );
115
+
116
+ -- Step tasks table - tracks units of work for step
117
+ create table pgflow.step_tasks (
118
+ flow_slug text not null references pgflow.flows (flow_slug),
119
+ run_id uuid not null references pgflow.runs (run_id),
120
+ step_slug text not null,
121
+ message_id bigint,
122
+ task_index int not null default 0,
123
+ status text not null default 'queued',
124
+ attempts_count int not null default 0,
125
+ error_message text,
126
+ output jsonb,
127
+ constraint step_tasks_pkey primary key (run_id, step_slug, task_index),
128
+ foreign key (run_id, step_slug)
129
+ references pgflow.step_states (run_id, step_slug),
130
+ constraint valid_status check (
131
+ status in ('queued', 'completed', 'failed')
132
+ ),
133
+ constraint output_valid_only_for_completed check (
134
+ output is null or status = 'completed'
135
+ ),
136
+ constraint only_single_task_per_step check (task_index = 0),
137
+ constraint attempts_count_nonnegative check (attempts_count >= 0)
138
+ );
139
+
140
+ ------------------------------------------
141
+ -- Types
142
+ ------------------------------------------
143
+
144
+ create type pgflow.step_task_record as (
145
+ flow_slug text,
146
+ run_id uuid,
147
+ step_slug text,
148
+ input jsonb,
149
+ msg_id bigint
150
+ );
@@ -0,0 +1,29 @@
1
+ create or replace function pgflow.create_flow(
2
+ flow_slug text,
3
+ max_attempts int default 3,
4
+ base_delay int default 5,
5
+ timeout int default 60
6
+ )
7
+ returns pgflow.flows
8
+ language sql
9
+ set search_path to ''
10
+ volatile
11
+ as $$
12
+ WITH
13
+ flow_upsert AS (
14
+ INSERT INTO pgflow.flows (flow_slug, opt_max_attempts, opt_base_delay, opt_timeout)
15
+ VALUES (flow_slug, max_attempts, base_delay, timeout)
16
+ ON CONFLICT (flow_slug) DO UPDATE
17
+ SET flow_slug = pgflow.flows.flow_slug -- Dummy update
18
+ RETURNING *
19
+ ),
20
+ ensure_queue AS (
21
+ SELECT pgmq.create(flow_slug)
22
+ WHERE NOT EXISTS (
23
+ SELECT 1 FROM pgmq.list_queues() WHERE queue_name = flow_slug
24
+ )
25
+ )
26
+ SELECT f.*
27
+ FROM flow_upsert f
28
+ LEFT JOIN (SELECT 1 FROM ensure_queue) _dummy ON true; -- Left join ensures flow is returned
29
+ $$;
@@ -0,0 +1,48 @@
1
+ create or replace function pgflow.add_step(
2
+ flow_slug text,
3
+ step_slug text,
4
+ deps_slugs text [],
5
+ max_attempts int default null,
6
+ base_delay int default null,
7
+ timeout int default null
8
+ )
9
+ returns pgflow.steps
10
+ language sql
11
+ set search_path to ''
12
+ volatile
13
+ as $$
14
+ WITH
15
+ create_step AS (
16
+ INSERT INTO pgflow.steps (flow_slug, step_slug, deps_count, opt_max_attempts, opt_base_delay, opt_timeout)
17
+ VALUES (flow_slug, step_slug, COALESCE(array_length(deps_slugs, 1), 0), max_attempts, base_delay, timeout)
18
+ ON CONFLICT (flow_slug, step_slug)
19
+ DO UPDATE SET step_slug = pgflow.steps.step_slug
20
+ RETURNING *
21
+ ),
22
+ insert_deps AS (
23
+ INSERT INTO pgflow.deps (flow_slug, dep_slug, step_slug)
24
+ SELECT add_step.flow_slug, d.dep_slug, add_step.step_slug
25
+ FROM unnest(deps_slugs) AS d(dep_slug)
26
+ ON CONFLICT (flow_slug, dep_slug, step_slug) DO NOTHING
27
+ RETURNING 1
28
+ )
29
+ -- Return the created step
30
+ SELECT * FROM create_step;
31
+ $$;
32
+
33
+ -- New overloaded function without deps_slugs parameter
34
+ create or replace function pgflow.add_step(
35
+ flow_slug text,
36
+ step_slug text,
37
+ max_attempts int default null,
38
+ base_delay int default null,
39
+ timeout int default null
40
+ )
41
+ returns pgflow.steps
42
+ language sql
43
+ set search_path to ''
44
+ volatile
45
+ as $$
46
+ -- Call the original function with an empty array
47
+ SELECT * FROM pgflow.add_step(flow_slug, step_slug, ARRAY[]::text[], max_attempts, base_delay, timeout);
48
+ $$;
@@ -0,0 +1,45 @@
1
+ create or replace function pgflow.start_ready_steps(run_id uuid)
2
+ returns void
3
+ language sql
4
+ set search_path to ''
5
+ as $$
6
+
7
+ WITH ready_steps AS (
8
+ SELECT *
9
+ FROM pgflow.step_states AS step_state
10
+ WHERE step_state.run_id = start_ready_steps.run_id
11
+ AND step_state.status = 'created'
12
+ AND step_state.remaining_deps = 0
13
+ ORDER BY step_state.step_slug
14
+ FOR UPDATE
15
+ ),
16
+ started_step_states AS (
17
+ UPDATE pgflow.step_states
18
+ SET status = 'started'
19
+ FROM ready_steps
20
+ WHERE pgflow.step_states.run_id = start_ready_steps.run_id
21
+ AND pgflow.step_states.step_slug = ready_steps.step_slug
22
+ RETURNING pgflow.step_states.*
23
+ ),
24
+ sent_messages AS (
25
+ SELECT
26
+ started_step.flow_slug,
27
+ started_step.run_id,
28
+ started_step.step_slug,
29
+ pgmq.send(started_step.flow_slug, jsonb_build_object(
30
+ 'flow_slug', started_step.flow_slug,
31
+ 'run_id', started_step.run_id,
32
+ 'step_slug', started_step.step_slug,
33
+ 'task_index', 0
34
+ )) AS msg_id
35
+ FROM started_step_states AS started_step
36
+ )
37
+ INSERT INTO pgflow.step_tasks (flow_slug, run_id, step_slug, message_id)
38
+ SELECT
39
+ sent_messages.flow_slug,
40
+ sent_messages.run_id,
41
+ sent_messages.step_slug,
42
+ sent_messages.msg_id
43
+ FROM sent_messages;
44
+
45
+ $$;
@@ -0,0 +1,46 @@
1
+ -- drop function if exists pgflow.start_flow;
2
+ create or replace function pgflow.start_flow(
3
+ flow_slug TEXT,
4
+ input JSONB
5
+ )
6
+ returns setof PGFLOW.RUNS
7
+ language plpgsql
8
+ set search_path to ''
9
+ volatile
10
+ as $$
11
+ declare
12
+ v_created_run pgflow.runs%ROWTYPE;
13
+ begin
14
+
15
+ WITH
16
+ flow_steps AS (
17
+ SELECT steps.flow_slug, steps.step_slug, steps.deps_count
18
+ FROM pgflow.steps
19
+ WHERE steps.flow_slug = start_flow.flow_slug
20
+ ),
21
+ created_run AS (
22
+ INSERT INTO pgflow.runs (flow_slug, input, remaining_steps)
23
+ VALUES (
24
+ start_flow.flow_slug,
25
+ start_flow.input,
26
+ (SELECT count(*) FROM flow_steps)
27
+ )
28
+ RETURNING *
29
+ ),
30
+ created_step_states AS (
31
+ INSERT INTO pgflow.step_states (flow_slug, run_id, step_slug, remaining_deps)
32
+ SELECT
33
+ fs.flow_slug,
34
+ (SELECT run_id FROM created_run),
35
+ fs.step_slug,
36
+ fs.deps_count
37
+ FROM flow_steps fs
38
+ )
39
+ SELECT * FROM created_run INTO v_created_run;
40
+
41
+ PERFORM pgflow.start_ready_steps(v_created_run.run_id);
42
+
43
+ RETURN QUERY SELECT * FROM pgflow.runs where run_id = v_created_run.run_id;
44
+
45
+ end;
46
+ $$;