@cascade-flow/backend-postgres 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +446 -0
- package/dist/db.d.ts +107 -0
- package/dist/db.d.ts.map +1 -0
- package/dist/index.d.ts +265 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +6583 -0
- package/dist/index.js.map +53 -0
- package/dist/migrations.d.ts +7 -0
- package/dist/migrations.d.ts.map +1 -0
- package/package.json +54 -0
package/README.md
ADDED
|
@@ -0,0 +1,446 @@
|
|
|
1
|
+
# Backend Postgres
|
|
2
|
+
|
|
3
|
+
PostgreSQL-based implementation of the Backend interface for the workflow orchestrator.
|
|
4
|
+
|
|
5
|
+
## Features
|
|
6
|
+
|
|
7
|
+
- **Event Sourcing**: Immutable append-only event storage in PostgreSQL
|
|
8
|
+
- **Automatic Schema Initialization**: Idempotent migrations run automatically on first connection
|
|
9
|
+
- **Race-Safe Step Claiming**: Atomic step claiming using `SELECT FOR UPDATE SKIP LOCKED`
|
|
10
|
+
- **Events-as-Queue Pattern**: Queue state derived from events (no separate queue table)
|
|
11
|
+
- **JSONB Storage**: Flexible schema evolution with JSONB columns
|
|
12
|
+
- **Full TypeScript Support**: Complete type safety across all operations
|
|
13
|
+
- **Easy Database Migration**: All `pg` usage isolated in `db.ts` for easy library swapping
|
|
14
|
+
|
|
15
|
+
## Installation
|
|
16
|
+
|
|
17
|
+
```bash
|
|
18
|
+
bun add backend-postgres pg
|
|
19
|
+
bun add -d @types/pg
|
|
20
|
+
```
|
|
21
|
+
|
|
22
|
+
## Quick Start
|
|
23
|
+
|
|
24
|
+
### Basic Usage
|
|
25
|
+
|
|
26
|
+
```typescript
|
|
27
|
+
import { PostgresBackend } from "backend-postgres";
|
|
28
|
+
|
|
29
|
+
// Create backend with connection string
|
|
30
|
+
const backend = new PostgresBackend("postgres://user:password@localhost:5432/workflow_db");
|
|
31
|
+
|
|
32
|
+
// The database schema is automatically initialized on first use
|
|
33
|
+
// No manual migration commands needed!
|
|
34
|
+
|
|
35
|
+
// Use with the runner
|
|
36
|
+
import { runAll } from "runner";
|
|
37
|
+
const result = await runAll(workflow, input, { backend });
|
|
38
|
+
|
|
39
|
+
// Don't forget to close the connection pool when done
|
|
40
|
+
await backend.close();
|
|
41
|
+
```
|
|
42
|
+
|
|
43
|
+
### With CLI
|
|
44
|
+
|
|
45
|
+
You can use the Postgres backend with the CLI by setting the `POSTGRES_URL` environment variable:
|
|
46
|
+
|
|
47
|
+
```bash
|
|
48
|
+
# Set the Postgres connection string
|
|
49
|
+
export POSTGRES_URL="postgres://user:password@localhost:5432/workflow_db"
|
|
50
|
+
|
|
51
|
+
# Run workflows
|
|
52
|
+
wfo run my-workflow --input '{"key": "value"}'
|
|
53
|
+
|
|
54
|
+
# Queue-based execution
|
|
55
|
+
wfo worker start # Uses Postgres backend
|
|
56
|
+
wfo submit my-workflow --input '{...}'
|
|
57
|
+
```
|
|
58
|
+
|
|
59
|
+
### Programmatic Usage
|
|
60
|
+
|
|
61
|
+
```typescript
|
|
62
|
+
import { PostgresBackend } from "backend-postgres";
|
|
63
|
+
import { Client } from "client";
|
|
64
|
+
|
|
65
|
+
const backend = new PostgresBackend(process.env.POSTGRES_URL!);
|
|
66
|
+
const client = new Client({ backend });
|
|
67
|
+
|
|
68
|
+
// Submit a workflow run
|
|
69
|
+
const runId = await client.submit({
|
|
70
|
+
workflowSlug: "my-workflow",
|
|
71
|
+
input: { message: "Hello" },
|
|
72
|
+
tags: ["production"],
|
|
73
|
+
idempotencyKey: "unique-key-123", // Optional deduplication
|
|
74
|
+
});
|
|
75
|
+
|
|
76
|
+
// Wait for completion
|
|
77
|
+
const result = await client.waitForCompletion(runId);
|
|
78
|
+
console.log("Output:", result.output);
|
|
79
|
+
|
|
80
|
+
// Cleanup
|
|
81
|
+
await backend.close();
|
|
82
|
+
```
|
|
83
|
+
|
|
84
|
+
## Connection String Format
|
|
85
|
+
|
|
86
|
+
The connection string follows the standard PostgreSQL format:
|
|
87
|
+
|
|
88
|
+
```
|
|
89
|
+
postgres://[user[:password]@][host][:port][/database][?option=value]
|
|
90
|
+
```
|
|
91
|
+
|
|
92
|
+
Examples:
|
|
93
|
+
|
|
94
|
+
```bash
|
|
95
|
+
# Local development
|
|
96
|
+
postgres://postgres:postgres@localhost:5432/workflow_dev
|
|
97
|
+
|
|
98
|
+
# With SSL
|
|
99
|
+
postgres://user:pass@db.example.com:5432/workflows?sslmode=require
|
|
100
|
+
|
|
101
|
+
# Unix socket
|
|
102
|
+
postgres://user@/dbname?host=/var/run/postgresql
|
|
103
|
+
|
|
104
|
+
# Cloud providers (example: Neon, Supabase, etc.)
|
|
105
|
+
postgres://user:password@ep-xyz.region.provider.com/dbname?sslmode=require
|
|
106
|
+
```
|
|
107
|
+
|
|
108
|
+
## Database Schema
|
|
109
|
+
|
|
110
|
+
The backend creates and manages the following tables:
|
|
111
|
+
|
|
112
|
+
### Event Tables
|
|
113
|
+
|
|
114
|
+
**`workflow_events`** - Workflow-level events
|
|
115
|
+
- `id` (serial) - Primary key
|
|
116
|
+
- `event_id` (text) - Microsecond timestamp string
|
|
117
|
+
- `workflow_slug` (text) - Workflow identifier
|
|
118
|
+
- `run_id` (text) - Run identifier
|
|
119
|
+
- `timestamp_us` (bigint) - Event timestamp in microseconds
|
|
120
|
+
- `category` (text) - Always 'workflow'
|
|
121
|
+
- `type` (text) - Event type (WorkflowStarted, WorkflowCompleted, etc.)
|
|
122
|
+
- `data` (jsonb) - Full event payload
|
|
123
|
+
|
|
124
|
+
**`step_events`** - Step-level events
|
|
125
|
+
- `id` (serial) - Primary key
|
|
126
|
+
- `event_id` (text) - Microsecond timestamp string
|
|
127
|
+
- `workflow_slug` (text) - Workflow identifier
|
|
128
|
+
- `run_id` (text) - Run identifier
|
|
129
|
+
- `step_id` (text) - Step identifier
|
|
130
|
+
- `timestamp_us` (bigint) - Event timestamp in microseconds
|
|
131
|
+
- `category` (text) - Always 'step'
|
|
132
|
+
- `type` (text) - Event type (StepScheduled, StepStarted, etc.)
|
|
133
|
+
- `data` (jsonb) - Full event payload
|
|
134
|
+
|
|
135
|
+
### Registry Tables
|
|
136
|
+
|
|
137
|
+
**`workflow_metadata`** - Workflow registry
|
|
138
|
+
- `slug` (text) - Primary key
|
|
139
|
+
- `name` (text) - Display name
|
|
140
|
+
- `description` (text) - Optional description
|
|
141
|
+
- `input_schema_json` (jsonb) - JSON Schema for input validation
|
|
142
|
+
- `tags` (text[]) - Tags array
|
|
143
|
+
|
|
144
|
+
**`step_definitions`** - Step definitions
|
|
145
|
+
- `workflow_slug` (text) - References workflow_metadata(slug)
|
|
146
|
+
- `id` (text) - Step identifier
|
|
147
|
+
- `dependencies` (jsonb) - Step dependencies
|
|
148
|
+
- `export_output` (boolean) - Whether output is exported
|
|
149
|
+
- `input_schema_json` (jsonb) - JSON Schema for step input
|
|
150
|
+
- `timeout_ms` (integer) - Step timeout
|
|
151
|
+
- `max_retries` (integer) - Maximum retry attempts
|
|
152
|
+
- `retry_delay_ms` (integer) - Delay between retries
|
|
153
|
+
|
|
154
|
+
### Supporting Tables
|
|
155
|
+
|
|
156
|
+
**`step_outputs`** - Serialized step outputs
|
|
157
|
+
- `workflow_slug`, `run_id`, `step_id`, `attempt_number` - Composite primary key
|
|
158
|
+
- `output` (jsonb) - Step output data
|
|
159
|
+
|
|
160
|
+
**`idempotency_keys`** - Deduplication
|
|
161
|
+
- `hash` (text) - Primary key (SHA256 of idempotency key)
|
|
162
|
+
- `run_id` (text) - Associated run ID
|
|
163
|
+
|
|
164
|
+
## Migration Strategy
|
|
165
|
+
|
|
166
|
+
The backend uses **idempotent migrations** that are safe to rerun. Migrations are automatically executed on the first database connection.
|
|
167
|
+
|
|
168
|
+
### Current Migrations
|
|
169
|
+
|
|
170
|
+
1. **Migration 001**: Create tables - Uses `CREATE TABLE IF NOT EXISTS`
|
|
171
|
+
2. **Migration 002**: Create indexes - Uses `CREATE INDEX IF NOT EXISTS`
|
|
172
|
+
|
|
173
|
+
### Adding New Migrations
|
|
174
|
+
|
|
175
|
+
To add a new migration:
|
|
176
|
+
|
|
177
|
+
1. Add a new migration function to `src/migrations.ts`:
|
|
178
|
+
|
|
179
|
+
```typescript
|
|
180
|
+
async function migration003_addNewFeature(pool: Pool): Promise<void> {
|
|
181
|
+
const client = await pool.connect();
|
|
182
|
+
try {
|
|
183
|
+
// Your migration code here (must be idempotent!)
|
|
184
|
+
await client.query(`
|
|
185
|
+
ALTER TABLE workflow_metadata
|
|
186
|
+
ADD COLUMN IF NOT EXISTS new_field TEXT
|
|
187
|
+
`);
|
|
188
|
+
|
|
189
|
+
console.log("[Migration 003] Added new feature");
|
|
190
|
+
} catch (error) {
|
|
191
|
+
console.error("[Migration 003] Error:", error);
|
|
192
|
+
throw error;
|
|
193
|
+
} finally {
|
|
194
|
+
client.release();
|
|
195
|
+
}
|
|
196
|
+
}
|
|
197
|
+
```
|
|
198
|
+
|
|
199
|
+
2. Add it to the `runMigrations()` function:
|
|
200
|
+
|
|
201
|
+
```typescript
|
|
202
|
+
export async function runMigrations(pool: Pool): Promise<void> {
|
|
203
|
+
await migration001_createTables(pool);
|
|
204
|
+
await migration002_createIndexes(pool);
|
|
205
|
+
await migration003_addNewFeature(pool); // Add here
|
|
206
|
+
}
|
|
207
|
+
```
|
|
208
|
+
|
|
209
|
+
### Idempotency Best Practices
|
|
210
|
+
|
|
211
|
+
All migrations must be idempotent (safe to run multiple times):
|
|
212
|
+
|
|
213
|
+
✅ **Good** (idempotent):
|
|
214
|
+
```sql
|
|
215
|
+
CREATE TABLE IF NOT EXISTS my_table (...);
|
|
216
|
+
CREATE INDEX IF NOT EXISTS idx_name ON table (column);
|
|
217
|
+
ALTER TABLE table ADD COLUMN IF NOT EXISTS new_col TEXT;
|
|
218
|
+
```
|
|
219
|
+
|
|
220
|
+
❌ **Bad** (not idempotent):
|
|
221
|
+
```sql
|
|
222
|
+
CREATE TABLE my_table (...); -- Fails if table exists
|
|
223
|
+
ALTER TABLE table ADD COLUMN new_col TEXT; -- Fails if column exists
|
|
224
|
+
```
|
|
225
|
+
|
|
226
|
+
## Architecture Details
|
|
227
|
+
|
|
228
|
+
### Event Sourcing
|
|
229
|
+
|
|
230
|
+
All state changes are stored as immutable events:
|
|
231
|
+
|
|
232
|
+
```typescript
|
|
233
|
+
// Events are never updated or deleted
|
|
234
|
+
await backend.appendEvent(workflowSlug, runId, {
|
|
235
|
+
category: "step",
|
|
236
|
+
type: "StepStarted",
|
|
237
|
+
eventId: "1234567890123456",
|
|
238
|
+
timestampUs: 1234567890123456,
|
|
239
|
+
stepId: "my-step",
|
|
240
|
+
workerId: "worker-1",
|
|
241
|
+
// ... other event-specific fields
|
|
242
|
+
});
|
|
243
|
+
|
|
244
|
+
// Current state is computed by projecting events
|
|
245
|
+
const events = await backend.loadEvents(workflowSlug, runId);
|
|
246
|
+
const currentState = projectRunStateFromEvents(events, workflowSlug);
|
|
247
|
+
```
|
|
248
|
+
|
|
249
|
+
### Events-as-Queue Pattern
|
|
250
|
+
|
|
251
|
+
There is no separate "queue" table. Queue state is derived by projecting the `RunSubmitted` event along with workflow events:
|
|
252
|
+
|
|
253
|
+
```typescript
|
|
254
|
+
// Submitting creates a RunSubmitted event
|
|
255
|
+
const runId = await backend.submitRun({
|
|
256
|
+
workflowSlug: "my-workflow",
|
|
257
|
+
input: { data: "..." },
|
|
258
|
+
});
|
|
259
|
+
|
|
260
|
+
// Listing runs projects events to compute status
|
|
261
|
+
const runs = await backend.listRuns({
|
|
262
|
+
statuses: ["pending", "running"],
|
|
263
|
+
});
|
|
264
|
+
```
|
|
265
|
+
|
|
266
|
+
### Atomic Step Claiming
|
|
267
|
+
|
|
268
|
+
Step claiming uses PostgreSQL's `SELECT FOR UPDATE SKIP LOCKED` for race-safe concurrency:
|
|
269
|
+
|
|
270
|
+
```typescript
|
|
271
|
+
// Multiple workers can safely claim different steps
|
|
272
|
+
const claimed = await backend.claimScheduledStep(
|
|
273
|
+
workflowSlug,
|
|
274
|
+
runId,
|
|
275
|
+
stepId,
|
|
276
|
+
"worker-1"
|
|
277
|
+
);
|
|
278
|
+
|
|
279
|
+
// Only one worker succeeds per step
|
|
280
|
+
console.log(claimed); // true or false
|
|
281
|
+
```
|
|
282
|
+
|
|
283
|
+
Implementation (simplified):
|
|
284
|
+
|
|
285
|
+
```sql
|
|
286
|
+
BEGIN;
|
|
287
|
+
|
|
288
|
+
-- Acquire lock (SKIP LOCKED prevents blocking)
|
|
289
|
+
SELECT * FROM step_events
|
|
290
|
+
WHERE workflow_slug = $1 AND run_id = $2 AND step_id = $3
|
|
291
|
+
ORDER BY timestamp_us DESC
|
|
292
|
+
LIMIT 1
|
|
293
|
+
FOR UPDATE SKIP LOCKED;
|
|
294
|
+
|
|
295
|
+
-- Verify step is in scheduled state
|
|
296
|
+
-- Write StepStarted event
|
|
297
|
+
|
|
298
|
+
COMMIT;
|
|
299
|
+
```
|
|
300
|
+
|
|
301
|
+
### Time Precision
|
|
302
|
+
|
|
303
|
+
All timestamps use **microsecond precision** (`timestampUs` fields) for correct event ordering in concurrent execution:
|
|
304
|
+
|
|
305
|
+
```typescript
|
|
306
|
+
// Example: 1762916097401523 (microseconds since epoch)
|
|
307
|
+
const timestamp = getMicrosecondTimestamp();
|
|
308
|
+
```
|
|
309
|
+
|
|
310
|
+
## Testing
|
|
311
|
+
|
|
312
|
+
### Integration Tests
|
|
313
|
+
|
|
314
|
+
Integration tests require a running PostgreSQL database:
|
|
315
|
+
|
|
316
|
+
```bash
|
|
317
|
+
# Set test database URL
|
|
318
|
+
export POSTGRES_TEST_URL="postgres://postgres:postgres@localhost:5432/workflow_test"
|
|
319
|
+
|
|
320
|
+
# Run tests
|
|
321
|
+
bun test
|
|
322
|
+
|
|
323
|
+
# Run specific test file
|
|
324
|
+
bun test tests/integration/postgres-backend.integration.test.ts
|
|
325
|
+
```
|
|
326
|
+
|
|
327
|
+
### Docker PostgreSQL for Testing
|
|
328
|
+
|
|
329
|
+
```bash
|
|
330
|
+
# Start PostgreSQL in Docker
|
|
331
|
+
docker run -d \
|
|
332
|
+
--name postgres-test \
|
|
333
|
+
-e POSTGRES_PASSWORD=postgres \
|
|
334
|
+
-e POSTGRES_DB=workflow_test \
|
|
335
|
+
-p 5432:5432 \
|
|
336
|
+
postgres:16
|
|
337
|
+
|
|
338
|
+
# Run tests
|
|
339
|
+
export POSTGRES_TEST_URL="postgres://postgres:postgres@localhost:5432/workflow_test"
|
|
340
|
+
bun test
|
|
341
|
+
|
|
342
|
+
# Cleanup
|
|
343
|
+
docker stop postgres-test
|
|
344
|
+
docker rm postgres-test
|
|
345
|
+
```
|
|
346
|
+
|
|
347
|
+
## Performance Considerations
|
|
348
|
+
|
|
349
|
+
### Indexes
|
|
350
|
+
|
|
351
|
+
The backend creates indexes optimized for common query patterns:
|
|
352
|
+
|
|
353
|
+
- Event lookups: `(workflow_slug, run_id, timestamp_us)`
|
|
354
|
+
- Step claiming: `(workflow_slug, run_id, step_id, type, timestamp_us)`
|
|
355
|
+
- Registry queries: `(workflow_slug)`
|
|
356
|
+
|
|
357
|
+
### Connection Pooling
|
|
358
|
+
|
|
359
|
+
The backend uses `pg.Pool` for efficient connection management. Configure pool size via connection string:
|
|
360
|
+
|
|
361
|
+
```
|
|
362
|
+
postgres://user:pass@host/db?max=20&min=5&idle_timeout=10000
|
|
363
|
+
```
|
|
364
|
+
|
|
365
|
+
### JSONB Storage
|
|
366
|
+
|
|
367
|
+
JSONB columns allow flexible schema evolution but have storage/query trade-offs:
|
|
368
|
+
|
|
369
|
+
- ✅ Flexible schema changes without migrations
|
|
370
|
+
- ✅ Can index specific JSONB fields if needed
|
|
371
|
+
- ⚠️ Slightly larger storage than normalized tables
|
|
372
|
+
- ⚠️ Complex queries on nested JSONB can be slower
|
|
373
|
+
|
|
374
|
+
For high-volume production use, consider adding indexes on frequently-queried JSONB fields:
|
|
375
|
+
|
|
376
|
+
```sql
|
|
377
|
+
CREATE INDEX idx_event_type ON workflow_events ((data->>'type'));
|
|
378
|
+
```
|
|
379
|
+
|
|
380
|
+
## Switching Database Libraries
|
|
381
|
+
|
|
382
|
+
All `pg` usage is isolated in `src/db.ts`, making it easy to swap to another library:
|
|
383
|
+
|
|
384
|
+
1. Install new library (e.g., `postgres`, `kysely`, `drizzle`)
|
|
385
|
+
2. Update `src/db.ts` to use new library's API
|
|
386
|
+
3. Keep function signatures the same
|
|
387
|
+
4. Update `src/migrations.ts` if needed
|
|
388
|
+
|
|
389
|
+
The `PostgresBackend` class in `src/index.ts` doesn't need changes.
|
|
390
|
+
|
|
391
|
+
## Comparison to FileSystemBackend
|
|
392
|
+
|
|
393
|
+
| Feature | FileSystemBackend | PostgresBackend |
|
|
394
|
+
|---------|------------------|-----------------|
|
|
395
|
+
| Storage | Local filesystem | PostgreSQL database |
|
|
396
|
+
| Concurrency | File locks | Database transactions |
|
|
397
|
+
| Scalability | Single machine | Multi-machine clusters |
|
|
398
|
+
| Persistence | Local `.runs/` directory | Remote database |
|
|
399
|
+
| Setup | Zero config | Requires PostgreSQL server |
|
|
400
|
+
| Best for | Development, single-machine | Production, distributed workers |
|
|
401
|
+
|
|
402
|
+
## Troubleshooting
|
|
403
|
+
|
|
404
|
+
### Connection Issues
|
|
405
|
+
|
|
406
|
+
```typescript
|
|
407
|
+
// Error: connection refused
|
|
408
|
+
// Fix: Check PostgreSQL is running and connection string is correct
|
|
409
|
+
|
|
410
|
+
// Error: authentication failed
|
|
411
|
+
// Fix: Verify username/password in connection string
|
|
412
|
+
|
|
413
|
+
// Error: database does not exist
|
|
414
|
+
// Fix: Create database first: createdb workflow_db
|
|
415
|
+
```
|
|
416
|
+
|
|
417
|
+
### Migration Failures
|
|
418
|
+
|
|
419
|
+
```typescript
|
|
420
|
+
// Error: relation already exists
|
|
421
|
+
// Fix: Ensure migrations use IF NOT EXISTS
|
|
422
|
+
|
|
423
|
+
// Error: permission denied
|
|
424
|
+
// Fix: Grant required permissions to database user
|
|
425
|
+
```
|
|
426
|
+
|
|
427
|
+
### Performance Issues
|
|
428
|
+
|
|
429
|
+
```bash
|
|
430
|
+
# Check for missing indexes
|
|
431
|
+
EXPLAIN ANALYZE SELECT ...;
|
|
432
|
+
|
|
433
|
+
# Monitor connection pool
|
|
434
|
+
SELECT * FROM pg_stat_activity WHERE datname = 'workflow_db';
|
|
435
|
+
|
|
436
|
+
# Check table sizes
|
|
437
|
+
SELECT pg_size_pretty(pg_total_relation_size('step_events'));
|
|
438
|
+
```
|
|
439
|
+
|
|
440
|
+
## See Also
|
|
441
|
+
|
|
442
|
+
- [Main CLAUDE.md](../../CLAUDE.md) - Backend architecture overview
|
|
443
|
+
- [Backend Interface](../backend-interface/README.md) - Abstract backend contract
|
|
444
|
+
- [Backend Filesystem](../backend-filesystem/README.md) - Filesystem implementation
|
|
445
|
+
- [Worker README](../worker/README.md) - Distributed worker architecture
|
|
446
|
+
- [Client README](../client/README.md) - Programmatic API usage
|
package/dist/db.d.ts
ADDED
|
@@ -0,0 +1,107 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Database layer - isolates all pg usage and SQL queries
|
|
3
|
+
* This makes it easy to swap to a different Postgres library in the future
|
|
4
|
+
*/
|
|
5
|
+
import pg from "pg";
|
|
6
|
+
import type { Event, StepEvent, WorkflowMetadata, StepDefinition } from "@cascade-flow/backend-interface";
|
|
7
|
+
export type { Pool } from "pg";
|
|
8
|
+
/**
|
|
9
|
+
* Create a connection pool from a connection string
|
|
10
|
+
*/
|
|
11
|
+
export declare function createPool(connectionString: string): pg.Pool;
|
|
12
|
+
/**
|
|
13
|
+
* Generic event append - handles both workflow and step events
|
|
14
|
+
* Extracts normalized fields from events for efficient querying
|
|
15
|
+
*/
|
|
16
|
+
export declare function appendEvent(pool: pg.Pool, table: "workflow_events" | "step_events", event: Event): Promise<void>;
|
|
17
|
+
/**
|
|
18
|
+
* Append event using an existing client (for transactions)
|
|
19
|
+
* Same as appendEvent but doesn't manage the client connection
|
|
20
|
+
*/
|
|
21
|
+
export declare function appendEventWithClient(client: pg.PoolClient, table: "workflow_events" | "step_events", event: Event): Promise<void>;
|
|
22
|
+
/**
|
|
23
|
+
* Load events with optional filtering
|
|
24
|
+
*/
|
|
25
|
+
export declare function loadEvents(pool: pg.Pool, table: "workflow_events" | "step_events", filters: {
|
|
26
|
+
workflowSlug?: string;
|
|
27
|
+
runId?: string;
|
|
28
|
+
stepId?: string;
|
|
29
|
+
category?: "workflow" | "step";
|
|
30
|
+
types?: string[];
|
|
31
|
+
}): Promise<Event[]>;
|
|
32
|
+
/**
|
|
33
|
+
* Load all events for a specific run (both workflow and step events)
|
|
34
|
+
*/
|
|
35
|
+
export declare function loadAllRunEvents(pool: pg.Pool, workflowSlug: string, runId: string): Promise<Event[]>;
|
|
36
|
+
/**
|
|
37
|
+
* Atomic step claiming using SELECT FOR UPDATE SKIP LOCKED
|
|
38
|
+
* Returns true if claim succeeded, false if already claimed
|
|
39
|
+
*/
|
|
40
|
+
export declare function claimScheduledStep(pool: pg.Pool, workflowSlug: string, runId: string, stepId: string, workerId: string, eventToWrite: StepEvent): Promise<boolean>;
|
|
41
|
+
/**
|
|
42
|
+
* Get all scheduled steps across workflows
|
|
43
|
+
* Filters by available_at_us for delayed scheduling support
|
|
44
|
+
*/
|
|
45
|
+
export declare function listScheduledSteps(pool: pg.Pool, options?: {
|
|
46
|
+
workflowSlugs?: string[];
|
|
47
|
+
limit?: number;
|
|
48
|
+
}): Promise<Array<{
|
|
49
|
+
workflowSlug: string;
|
|
50
|
+
runId: string;
|
|
51
|
+
stepId: string;
|
|
52
|
+
}>>;
|
|
53
|
+
/**
|
|
54
|
+
* Find stale steps (last heartbeat older than threshold)
|
|
55
|
+
* Uses normalized worker_id column for efficient querying
|
|
56
|
+
*/
|
|
57
|
+
export declare function findStaleSteps(pool: pg.Pool, staleThresholdUs: number): Promise<Array<{
|
|
58
|
+
workflowSlug: string;
|
|
59
|
+
runId: string;
|
|
60
|
+
stepId: string;
|
|
61
|
+
workerId: string;
|
|
62
|
+
}>>;
|
|
63
|
+
/**
|
|
64
|
+
* Save step output
|
|
65
|
+
*/
|
|
66
|
+
export declare function saveStepOutput(pool: pg.Pool, workflowSlug: string, runId: string, stepId: string, attemptNumber: number, output: any): Promise<void>;
|
|
67
|
+
/**
|
|
68
|
+
* Load step output
|
|
69
|
+
*/
|
|
70
|
+
export declare function loadStepOutput(pool: pg.Pool, workflowSlug: string, runId: string, stepId: string, attemptNumber: number): Promise<any | null>;
|
|
71
|
+
/**
|
|
72
|
+
* Register workflow metadata (upsert)
|
|
73
|
+
*/
|
|
74
|
+
export declare function upsertWorkflowMetadata(pool: pg.Pool, slug: string, name: string, location: string | undefined, inputSchemaJSON: any | undefined): Promise<void>;
|
|
75
|
+
/**
|
|
76
|
+
* Register step definition (upsert)
|
|
77
|
+
*/
|
|
78
|
+
export declare function upsertStepDefinition(pool: pg.Pool, workflowSlug: string, step: StepDefinition): Promise<void>;
|
|
79
|
+
/**
|
|
80
|
+
* Get workflow metadata by slug
|
|
81
|
+
*/
|
|
82
|
+
export declare function getWorkflowMetadata(pool: pg.Pool, slug: string): Promise<WorkflowMetadata | null>;
|
|
83
|
+
/**
|
|
84
|
+
* List all workflow metadata
|
|
85
|
+
*/
|
|
86
|
+
export declare function listWorkflowMetadata(pool: pg.Pool): Promise<WorkflowMetadata[]>;
|
|
87
|
+
/**
|
|
88
|
+
* Get step definitions for a workflow
|
|
89
|
+
*/
|
|
90
|
+
export declare function getWorkflowSteps(pool: pg.Pool, workflowSlug: string): Promise<StepDefinition[]>;
|
|
91
|
+
/**
|
|
92
|
+
* Save idempotency key (returns existing runId if key already exists)
|
|
93
|
+
*/
|
|
94
|
+
export declare function saveIdempotencyKey(pool: pg.Pool, hash: string, runId: string): Promise<string>;
|
|
95
|
+
/**
|
|
96
|
+
* Get all distinct run IDs for a workflow
|
|
97
|
+
*/
|
|
98
|
+
export declare function listRunIds(pool: pg.Pool, workflowSlug: string): Promise<string[]>;
|
|
99
|
+
/**
|
|
100
|
+
* Get all workflows that have active (incomplete) runs
|
|
101
|
+
*/
|
|
102
|
+
export declare function listActiveWorkflows(pool: pg.Pool): Promise<string[]>;
|
|
103
|
+
/**
|
|
104
|
+
* Check if a run exists
|
|
105
|
+
*/
|
|
106
|
+
export declare function runExists(pool: pg.Pool, workflowSlug: string, runId: string): Promise<boolean>;
|
|
107
|
+
//# sourceMappingURL=db.d.ts.map
|
package/dist/db.d.ts.map
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"db.d.ts","sourceRoot":"","sources":["../src/db.ts"],"names":[],"mappings":"AAAA;;;GAGG;AAEH,OAAO,EAAE,MAAM,IAAI,CAAC;AACpB,OAAO,KAAK,EACV,KAAK,EACL,SAAS,EAET,gBAAgB,EAChB,cAAc,EAEf,MAAM,iCAAiC,CAAC;AAGzC,YAAY,EAAE,IAAI,EAAE,MAAM,IAAI,CAAC;AAE/B;;GAEG;AACH,wBAAgB,UAAU,CAAC,gBAAgB,EAAE,MAAM,GAAG,EAAE,CAAC,IAAI,CAE5D;AAED;;;GAGG;AACH,wBAAsB,WAAW,CAC/B,IAAI,EAAE,EAAE,CAAC,IAAI,EACb,KAAK,EAAE,iBAAiB,GAAG,aAAa,EACxC,KAAK,EAAE,KAAK,GACX,OAAO,CAAC,IAAI,CAAC,CAiHf;AAED;;;GAGG;AACH,wBAAsB,qBAAqB,CACzC,MAAM,EAAE,EAAE,CAAC,UAAU,EACrB,KAAK,EAAE,iBAAiB,GAAG,aAAa,EACxC,KAAK,EAAE,KAAK,GACX,OAAO,CAAC,IAAI,CAAC,CA4Gf;AAED;;GAEG;AACH,wBAAsB,UAAU,CAC9B,IAAI,EAAE,EAAE,CAAC,IAAI,EACb,KAAK,EAAE,iBAAiB,GAAG,aAAa,EACxC,OAAO,EAAE;IACP,YAAY,CAAC,EAAE,MAAM,CAAC;IACtB,KAAK,CAAC,EAAE,MAAM,CAAC;IACf,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,QAAQ,CAAC,EAAE,UAAU,GAAG,MAAM,CAAC;IAC/B,KAAK,CAAC,EAAE,MAAM,EAAE,CAAC;CAClB,GACA,OAAO,CAAC,KAAK,EAAE,CAAC,CA4ClB;AAED;;GAEG;AACH,wBAAsB,gBAAgB,CACpC,IAAI,EAAE,EAAE,CAAC,IAAI,EACb,YAAY,EAAE,MAAM,EACpB,KAAK,EAAE,MAAM,GACZ,OAAO,CAAC,KAAK,EAAE,CAAC,CAkBlB;AAED;;;GAGG;AACH,wBAAsB,kBAAkB,CACtC,IAAI,EAAE,EAAE,CAAC,IAAI,EACb,YAAY,EAAE,MAAM,EACpB,KAAK,EAAE,MAAM,EACb,MAAM,EAAE,MAAM,EACd,QAAQ,EAAE,MAAM,EAChB,YAAY,EAAE,SAAS,GACtB,OAAO,CAAC,OAAO,CAAC,CA6FlB;AAED;;;GAGG;AACH,wBAAsB,kBAAkB,CACtC,IAAI,EAAE,EAAE,CAAC,IAAI,EACb,OAAO,CAAC,EAAE;IACR,aAAa,CAAC,EAAE,MAAM,EAAE,CAAC;IACzB,KAAK,CAAC,EAAE,MAAM,CAAC;CAChB,GACA,OAAO,CAAC,KAAK,CAAC;IAAE,YAAY,EAAE,MAAM,CAAC;IAAC,KAAK,EAAE,MAAM,CAAC;IAAC,MAAM,EAAE,MAAM,CAAA;CAAE,CAAC,CAAC,CA2CzE;AAED;;;GAGG;AACH,wBAAsB,cAAc,CAClC,IAAI,EAAE,EAAE,CAAC,IAAI,EACb,gBAAgB,EAAE,MAAM,GACvB,OAAO,CAAC,KAAK,CAAC;IAAE,YAAY,EAAE,MAAM,CAAC;IAAC,KAAK,EAAE,MAAM,CAAC;IAAC,MAAM,EAAE,MAAM,CAAC;IAAC,QAAQ,EAAE,MAAM,CAAA;CAAE,CAAC,CAAC,CA+B3F;AAED;;GAEG;AACH,wBAAsB,cAAc,CAClC,IAAI,EAAE,EAAE,CAAC,IAAI,EACb,YAAY,EAAE,MAAM,EACpB,KAAK,EAAE,MAAM,EACb,MAAM,EAAE,MAAM,EACd,aAAa,EAAE,MAAM,EACrB,MAAM,EAAE,GAAG,GACV,OAAO,CAAC,IAAI,CAAC,CAaf;AAED;;GAEG;AACH,wBAAsB,cAAc,CAClC,IAAI,EAAE,EAAE,CAAC,IAAI,EACb,YAAY,EAAE,MAAM,EACpB,KAAK,EAAE,MAAM,EACb,MAAM,EAAE,MAAM,EACd,aAAa,EAAE,MAAM,GACpB,OAAO,CAAC,GAAG,GAAG,IAAI,CAAC,CAarB;AAED;;GAEG;AACH,wBAAsB,sBAAsB,CAC1C,IAAI,EAAE,EAAE,CAAC,IAAI,EACb,IAAI,EAAE,MAAM,EACZ,IAAI,EAAE,MAAM,EACZ,QAAQ,EAAE,MAAM,GAAG,SAAS,EAC5B,eAAe,EAAE,GAAG,GAAG,SAAS,GAC/B,OAAO,CAAC,IAAI,CAAC,CAuBf;AAED;;GAEG;AACH,wBAAsB,oBAAoB,CACxC,IAAI,EAAE,EAAE,CAAC,IAAI,EACb,YAAY,EAAE,MAAM,EACpB,IAAI,EAAE,cAAc,GACnB,OAAO,CAAC,IAAI,CAAC,CA2Bf;AAED;;GAEG;AACH,wBAAsB,mBAAmB,CACvC,IAAI,EAAE,EAAE,CAAC,IAAI,EACb,IAAI,EAAE,MAAM,GACX,OAAO,CAAC,gBAAgB,GAAG,IAAI,CAAC,CAwBlC;AAED;;GAEG;AACH,wBAAsB,oBAAoB,CAAC,IAAI,EAAE,EAAE,CAAC,IAAI,GAAG,OAAO,CAAC,gBAAgB,EAAE,CAAC,CAkBrF;AAED;;GAEG;AACH,wBAAsB,gBAAgB,CACpC,IAAI,EAAE,EAAE,CAAC,IAAI,EACb,YAAY,EAAE,MAAM,GACnB,OAAO,CAAC,cAAc,EAAE,CAAC,CAoB3B;AAED;;GAEG;AACH,wBAAsB,kBAAkB,CACtC,IAAI,EAAE,EAAE,CAAC,IAAI,EACb,IAAI,EAAE,MAAM,EACZ,KAAK,EAAE,MAAM,GACZ,OAAO,CAAC,MAAM,CAAC,CAgBjB;AAED;;GAEG;AACH,wBAAsB,UAAU,CAAC,IAAI,EAAE,EAAE,CAAC,IAAI,EAAE,YAAY,EAAE,MAAM,GAAG,OAAO,CAAC,MAAM,EAAE,CAAC,CAgBvF;AAED;;GAEG;AACH,wBAAsB,mBAAmB,CAAC,IAAI,EAAE,EAAE,CAAC,IAAI,GAAG,OAAO,CAAC,MAAM,EAAE,CAAC,CAmB1E;AAED;;GAEG;AACH,wBAAsB,SAAS,CAC7B,IAAI,EAAE,EAAE,CAAC,IAAI,EACb,YAAY,EAAE,MAAM,EACpB,KAAK,EAAE,MAAM,GACZ,OAAO,CAAC,OAAO,CAAC,CAYlB"}
|