@gravito/flux 3.0.1 → 3.0.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. package/README.md +329 -8
  2. package/bin/flux.js +25 -1
  3. package/dev/viewer/app.js +4 -4
  4. package/dist/bun.cjs +2 -2
  5. package/dist/bun.d.cts +65 -26
  6. package/dist/bun.d.ts +65 -26
  7. package/dist/bun.js +1 -1
  8. package/dist/chunk-6AZNHVEO.cjs +316 -0
  9. package/dist/chunk-6AZNHVEO.cjs.map +1 -0
  10. package/dist/chunk-DN7SIQ34.cjs +3586 -0
  11. package/dist/chunk-DN7SIQ34.cjs.map +1 -0
  12. package/dist/{chunk-ZAMVC732.js → chunk-EZGSU6AW.js} +73 -16
  13. package/dist/chunk-EZGSU6AW.js.map +1 -0
  14. package/dist/chunk-M2ZRQRF4.js +3586 -0
  15. package/dist/chunk-M2ZRQRF4.js.map +1 -0
  16. package/dist/chunk-WGDTB6OC.js +316 -0
  17. package/dist/chunk-WGDTB6OC.js.map +1 -0
  18. package/dist/{chunk-SJSPR4ZU.cjs → chunk-ZE2RDS47.cjs} +75 -18
  19. package/dist/chunk-ZE2RDS47.cjs.map +1 -0
  20. package/dist/cli/flux-visualize.cjs +108 -0
  21. package/dist/cli/flux-visualize.cjs.map +1 -0
  22. package/dist/cli/flux-visualize.d.cts +1 -0
  23. package/dist/cli/flux-visualize.d.ts +1 -0
  24. package/dist/cli/flux-visualize.js +108 -0
  25. package/dist/cli/flux-visualize.js.map +1 -0
  26. package/dist/index.cjs +97 -9
  27. package/dist/index.cjs.map +1 -1
  28. package/dist/index.d.cts +369 -13
  29. package/dist/index.d.ts +369 -13
  30. package/dist/index.js +96 -8
  31. package/dist/index.js.map +1 -1
  32. package/dist/index.node.cjs +11 -3
  33. package/dist/index.node.cjs.map +1 -1
  34. package/dist/index.node.d.cts +1141 -247
  35. package/dist/index.node.d.ts +1141 -247
  36. package/dist/index.node.js +10 -2
  37. package/dist/types-CGIEQPFv.d.cts +443 -0
  38. package/dist/types-CGIEQPFv.d.ts +443 -0
  39. package/package.json +19 -6
  40. package/dist/chunk-3JGQYHUN.js +0 -1006
  41. package/dist/chunk-3JGQYHUN.js.map +0 -1
  42. package/dist/chunk-5OXXH442.cjs +0 -1006
  43. package/dist/chunk-5OXXH442.cjs.map +0 -1
  44. package/dist/chunk-SJSPR4ZU.cjs.map +0 -1
  45. package/dist/chunk-ZAMVC732.js.map +0 -1
  46. package/dist/types-CZwYGpou.d.cts +0 -353
  47. package/dist/types-CZwYGpou.d.ts +0 -353
package/README.md CHANGED
@@ -2,14 +2,31 @@
2
2
 
3
3
  > ⚡ Platform-agnostic, high-performance workflow engine for Gravito
4
4
 
5
- ## Features
6
-
7
- - **Pure State Machine** - No runtime dependencies, Web Standard APIs only
8
- - **Fluent Builder API** - Type-safe, chainable workflow definitions
9
- - **Storage Adapters** - Memory, SQLite (Bun), PostgreSQL (coming soon)
10
- - **Retry & Timeout** - Automatic retry with exponential backoff
11
- - **Event Hooks** - Subscribe to workflow/step lifecycle events
12
- - **Dual Platform** - Works with both Bun and Node.js
5
+ ## Features
6
+
7
+ - 🪐 **Galaxy-Ready Workflow Engine** - Native integration with PlanetCore for universal business process orchestration.
8
+ - 🔄 **Distributed Saga Coordination** - Reliable transaction management across multiple isolated Satellites with automatic rollback.
9
+ - **Pure State Machine** - High-performance engine with zero runtime dependencies beyond Web Standard APIs.
10
+ - 🔐 **Distributed Locking** - Plasma-backed Redis locking to ensure process safety in multi-node clusters.
11
+ - 📡 **Signal & Suspend** - Pause workflows to wait for external stimulus (Webhooks, manual approvals) and resume instantly.
12
+ - 🛠️ **Fluent Builder API** - Fully type-safe, chainable definitions for complex logic flows.
13
+
14
+ ## 🌌 Role in Galaxy Architecture
15
+
16
+ In the **Gravito Galaxy Architecture**, Flux acts as the **Logic Orchestrator (Cerebral Cortex)**.
17
+
18
+ - **Process Master**: Manages complex, long-running business processes that span across multiple Satellites (e.g., a "Checkout" process involving `Catalog`, `Payment`, and `Notification`).
19
+ - **Reliability Engine**: Ensures that even if a Satellite fails mid-process, the state is preserved and the appropriate compensation logic (Saga) is executed.
20
+ - **State Persistence**: Works with `Atlas` or `Plasma` to store the execution state, allowing workflows to survive system restarts or move between nodes.
21
+
22
+ ```mermaid
23
+ graph TD
24
+ S1[Satellite: Order] -- "Trigger Workflow" --> Flux{Flux Engine}
25
+ Flux -- "Step 1" --> S2[Satellite: Inventory]
26
+ Flux -- "Step 2" --> S3[Satellite: Payment]
27
+ Flux -- "Fail / Rollback" --> S2
28
+ Flux -.-> State[(State: Plasma/Redis)]
29
+ ```
13
30
 
14
31
  ## Installation
15
32
 
@@ -149,6 +166,14 @@ const reportWorkflow = createWorkflow('generate-report')
149
166
  })
150
167
  ```
151
168
 
169
+ ## 📚 Documentation
170
+
171
+ Detailed guides and references for the Galaxy Architecture:
172
+
173
+ - [🏗️ **Architecture Overview**](./README.md) — Under the hood of the state machine.
174
+ - [🔄 **Workflow Patterns**](./doc/WORKFLOW_PATTERNS.md) — **NEW**: Sagas, Suspension, and Distributed Locking.
175
+ - [🧪 **Testing Workflows**](./tests/README.md) — Mocking and execution testing.
176
+
152
177
  ## API
153
178
 
154
179
  ### `createWorkflow(name)`
@@ -237,6 +262,113 @@ Commit steps are marked to always execute, even on workflow replay:
237
262
  })
238
263
  ```
239
264
 
265
+ ## Workflow Versioning
266
+
267
+ Track workflow definition versions for migration and compatibility management:
268
+
269
+ ### Setting Version
270
+
271
+ ```typescript
272
+ const workflow = createWorkflow('order-process')
273
+ .version('2.0.0')
274
+ .input<OrderInput>()
275
+ .step('validate', async (ctx) => { /* ... */ })
276
+ .build()
277
+ ```
278
+
279
+ ### Filtering by Version
280
+
281
+ ```typescript
282
+ // List workflows by version
283
+ const v1Workflows = await engine.list({ version: '1.0.0' })
284
+
285
+ // Combine with other filters
286
+ const results = await engine.list({
287
+ name: 'order-process',
288
+ version: '2.0.0',
289
+ status: 'completed',
290
+ limit: 10,
291
+ })
292
+ ```
293
+
294
+ ### Version Mismatch Warning
295
+
296
+ When resuming a workflow with a different definition version, Flux logs a warning:
297
+
298
+ ```typescript
299
+ // Original execution with v1.0.0
300
+ const result = await engine.execute(workflowV1, input)
301
+
302
+ // Resume with v2.0.0 - warns about mismatch
303
+ await engine.resume(workflowV2, result.id, { fromStep: 1 })
304
+ // ⚠️ Warning: version mismatch (stored: 1.0.0, current: 2.0.0)
305
+ ```
306
+
307
+ ## Batch Execution
308
+
309
+ Execute multiple workflow instances efficiently with controlled concurrency:
310
+
311
+ ### Basic Usage
312
+
313
+ ```typescript
314
+ const results = await engine.executeBatch(
315
+ orderWorkflow,
316
+ orders.map(o => ({ orderId: o.id })),
317
+ {
318
+ concurrency: 10,
319
+ continueOnError: true,
320
+ onProgress: (completed, total) => console.log(`${completed}/${total}`)
321
+ }
322
+ )
323
+
324
+ console.log(`Succeeded: ${results.succeeded}, Failed: ${results.failed}`)
325
+ ```
326
+
327
+ ### Using BatchExecutor
328
+
329
+ For more control, use `BatchExecutor` directly:
330
+
331
+ ```typescript
332
+ import { BatchExecutor } from '@gravito/flux'
333
+
334
+ const executor = new BatchExecutor(engine)
335
+
336
+ // Same workflow, multiple inputs
337
+ const result = await executor.execute(workflow, inputs, {
338
+ concurrency: 5, // Max parallel executions (default: 10)
339
+ continueOnError: true, // Continue if one fails (default: false)
340
+ signal: controller.signal, // AbortSignal for cancellation
341
+ onProgress: (completed, total, lastResult) => {
342
+ updateProgressBar(completed / total)
343
+ }
344
+ })
345
+
346
+ // Different workflows
347
+ const result = await executor.executeMany([
348
+ { workflow: orderWorkflow, input: { orderId: '1' } },
349
+ { workflow: notifyWorkflow, input: { userId: '2' } },
350
+ { workflow: orderWorkflow, input: { orderId: '3' } },
351
+ ])
352
+ ```
353
+
354
+ ### Result Structure
355
+
356
+ ```typescript
357
+ interface BatchResult<T> {
358
+ total: number // Total items processed
359
+ succeeded: number // Successful executions
360
+ failed: number // Failed executions
361
+ duration: number // Total execution time (ms)
362
+ results: Array<{
363
+ index: number
364
+ input: T
365
+ success: boolean
366
+ result?: WorkflowState
367
+ error?: Error
368
+ }>
369
+ }
370
+ ```
371
+
240
372
  ## Storage Adapters
241
373
 
242
374
  ### MemoryStorage (Default)
@@ -261,6 +393,42 @@ const engine = new FluxEngine({
261
393
  })
262
394
  ```
263
395
 
396
+ ### PostgreSQLStorage (Production)
397
+
398
+ PostgreSQL for production deployments:
399
+
400
+ ```typescript
401
+ import { FluxEngine, PostgreSQLStorage } from '@gravito/flux'
402
+
403
+ const storage = new PostgreSQLStorage({
404
+ connectionString: 'postgresql://user:password@localhost:5432/dbname',
405
+ tableName: 'flux_workflows',
406
+ ssl: true
407
+ })
408
+
409
+ const engine = new FluxEngine({ storage })
410
+ ```
411
+
412
+ Features:
413
+ - **JSONB columns** for efficient querying of workflow data
414
+ - **Connection pooling** via `pg` library
415
+ - **Automatic schema migration** on init
416
+ - **Optimized indexes** for name, status, and created_at
417
+
418
+ Alternative configuration:
419
+
420
+ ```typescript
421
+ const storage = new PostgreSQLStorage({
422
+ host: 'localhost',
423
+ port: 5432,
424
+ database: 'myapp',
425
+ user: 'postgres',
426
+ password: 'secret',
427
+ tableName: 'workflows',
428
+ ssl: { rejectUnauthorized: false }
429
+ })
430
+ ```
431
+
264
432
  ### Custom Storage
265
433
 
266
434
  Implement `WorkflowStorage` interface:
@@ -276,6 +444,103 @@ interface WorkflowStorage {
276
444
  }
277
445
  ```
278
446
 
447
+ ## Distributed Locking
448
+
449
+ For multi-node deployments, Flux provides a Redis-based distributed locking mechanism to prevent concurrent execution of the same workflow across multiple nodes.
450
+
451
+ ### RedisLockProvider
452
+
453
+ Use Redis for distributed locking in production clusters:
454
+
455
+ ```typescript
456
+ import { FluxEngine, RedisLockProvider } from '@gravito/flux'
457
+ import Redis from 'ioredis'
458
+
459
+ const redis = new Redis({
460
+ host: 'localhost',
461
+ port: 6379,
462
+ })
463
+
464
+ const lockProvider = new RedisLockProvider({
465
+ client: redis,
466
+ keyPrefix: 'myapp:locks:', // Default: 'flux:lock:'
467
+ defaultTtl: 30000, // Default: 30000ms (30s)
468
+ retryDelay: 100, // Default: 100ms
469
+ maxRetries: 3, // Default: 0 (no retries)
470
+ })
471
+
472
+ const engine = new FluxEngine({
473
+ storage: new PostgreSQLStorage({ /* ... */ }),
474
+ lockProvider,
475
+ })
476
+ ```
477
+
478
+ ### Features
479
+
480
+ - **Atomic Acquisition**: Uses Redis `SET NX PX` for atomic lock acquisition
481
+ - **Safe Release**: Lua scripts ensure only the lock owner can release
482
+ - **Auto-Expiration**: Locks automatically expire if a node crashes
483
+ - **Retry Support**: Configurable retry with exponential backoff
484
+ - **Idempotent**: Same owner can refresh an existing lock
485
+
486
+ ### Usage
487
+
488
+ Locks are automatically acquired and released during workflow execution:
489
+
490
+ ```typescript
491
+ // Flux automatically acquires lock before execution
492
+ const result = await engine.execute(workflow, input)
493
+ // Lock is automatically released after completion
494
+ ```
495
+
496
+ Manual lock management:
497
+
498
+ ```typescript
499
+ const lock = await lockProvider.acquire('workflow-123', 'node-1', 30000)
500
+
501
+ if (lock) {
502
+ try {
503
+ // Critical section - only one node can execute
504
+ await doWork()
505
+ } finally {
506
+ await lock.release()
507
+ }
508
+ } else {
509
+ console.log('Another node is processing this workflow')
510
+ }
511
+ ```
512
+
513
+ ### MemoryLockProvider (Development)
514
+
515
+ For single-node development environments:
516
+
517
+ ```typescript
518
+ import { FluxEngine, MemoryLockProvider } from '@gravito/flux'
519
+
520
+ const engine = new FluxEngine({
521
+ lockProvider: new MemoryLockProvider(),
522
+ })
523
+ ```
524
+
525
+ ### Custom Lock Providers
526
+
527
+ Implement the `LockProvider` interface for custom backends:
528
+
529
+ ```typescript
530
+ interface LockProvider {
531
+ acquire(resourceId: string, owner: string, ttl: number): Promise<Lock | null>
532
+ refresh(resourceId: string, owner: string, ttl: number): Promise<boolean>
533
+ release(resourceId: string): Promise<void>
534
+ }
535
+
536
+ interface Lock {
537
+ id: string
538
+ owner: string
539
+ expiresAt: number
540
+ release(): Promise<void>
541
+ }
542
+ ```
543
+
279
544
  ## Gravito Integration
280
545
 
281
546
  ```typescript
@@ -301,6 +566,7 @@ await flux.execute(myWorkflow, input)
301
566
  |---------|-----|---------|
302
567
  | FluxEngine | ✅ | ✅ |
303
568
  | MemoryStorage | ✅ | ✅ |
569
+ | PostgreSQLStorage | ✅ | ✅ |
304
570
  | BunSQLiteStorage | ✅ | ❌ |
305
571
  | OrbitFlux | ✅ | ✅ |
306
572
 
@@ -320,6 +586,61 @@ bun run examples/user-signup.ts
320
586
 
321
587
  # Report generation
322
588
  bun run examples/report-generation.ts
589
+
590
+ # PostgreSQL storage (requires PostgreSQL)
591
+ POSTGRES_URL="postgresql://localhost:5432/flux_demo" bun run examples/postgresql-storage.ts
592
+ ```
593
+
594
+ ## Testing
595
+
596
+ Flux has comprehensive test coverage with 300 total tests across 26 test files:
597
+
598
+ ```bash
599
+ # Run all tests
600
+ bun test
601
+
602
+ # Run with coverage report
603
+ bun test --coverage
604
+
605
+ # Run specific test file
606
+ bun test tests/flux.test.ts
607
+ bun test tests/errors.test.ts
608
+ bun test tests/workflow-builder.test.ts
609
+ ```
610
+
611
+ ### Coverage Metrics
612
+
613
+ - **Function Coverage:** 87% (86.98%)
614
+ - **Line Coverage:** 92% (92.49%)
615
+ - **Total Tests:** 300 passing, 12 skipped (PostgreSQL integration tests)
616
+
617
+ ### What's Tested
618
+
619
+ - ✅ Core workflow execution (FluxEngine, WorkflowExecutor, StepExecutor)
620
+ - ✅ State management (StateMachine, ContextManager, StateUpdater)
621
+ - ✅ Error handling (all 14 error factory functions, FluxError class)
622
+ - ✅ Retry & timeout mechanisms (CompensationRetryPolicy, IdempotencyGuard)
623
+ - ✅ Compensation & rollback (RollbackManager, RecoveryManager, Saga pattern)
624
+ - ✅ Storage adapters (Memory, BunSQLite, PostgreSQL)
625
+ - ✅ Parallel execution (ParallelExecutor)
626
+ - ✅ Suspension & signals (wait/resume workflows)
627
+ - ✅ Workflow builder API (data, describe, validate, step chaining)
628
+ - ✅ Visualization (MermaidGenerator with all diagram variations)
629
+ - ✅ Profiling & tracing (WorkflowProfiler, TraceEmitter, JsonFileTraceSink)
630
+ - ✅ Gravito integration (OrbitFlux lifecycle)
631
+ - ✅ Workflow versioning (.version(), version filtering, mismatch warnings)
632
+ - ✅ Batch execution (BatchExecutor, executeBatch, concurrency control)
633
+ - ✅ Redis distributed locking (RedisLockProvider with mocked Redis)
634
+
635
+ ### Skipped Tests
636
+
637
+ The 12 skipped tests are PostgreSQL integration tests that require a running database:
638
+
639
+ ```bash
640
+ # To run PostgreSQL tests locally:
641
+ docker run -d -p 5432:5432 -e POSTGRES_PASSWORD=test postgres:15
642
+ export POSTGRES_URL="postgresql://postgres:test@localhost:5432/flux_test"
643
+ bun test tests/postgresql-storage.test.ts
323
644
  ```
324
645
 
325
646
  ## License
package/bin/flux.js CHANGED
@@ -1,6 +1,6 @@
1
1
  #!/usr/bin/env node
2
- import { createServer } from 'node:http'
3
2
  import { readFile } from 'node:fs/promises'
3
+ import { createServer } from 'node:http'
4
4
  import { dirname, extname, join, resolve } from 'node:path'
5
5
  import { fileURLToPath } from 'node:url'
6
6
 
@@ -12,6 +12,13 @@ const help = () => {
12
12
 
13
13
  Usage:
14
14
  flux dev --trace ./.flux/trace.ndjson --port 4280
15
+ flux visualize --definition <path> [options]
16
+
17
+ Commands:
18
+ dev Start development viewer for trace files
19
+ visualize Generate Mermaid diagrams from workflow definitions
20
+
21
+ Run 'flux <command> --help' for command-specific help
15
22
  `)
16
23
  }
17
24
 
@@ -20,6 +27,23 @@ if (!command || command === '--help' || command === '-h') {
20
27
  process.exit(0)
21
28
  }
22
29
 
30
+ if (command === 'visualize') {
31
+ const { fileURLToPath } = await import('node:url')
32
+ const { dirname, join } = await import('node:path')
33
+ const { spawn } = await import('node:child_process')
34
+
35
+ const __filename = fileURLToPath(import.meta.url)
36
+ const __dirname = dirname(__filename)
37
+ const visualizeScript = join(__dirname, '..', 'dist', 'cli', 'flux-visualize.js')
38
+
39
+ const child = spawn('node', [visualizeScript, ...args.slice(1)], {
40
+ stdio: 'inherit',
41
+ })
42
+
43
+ child.on('exit', (code) => process.exit(code || 0))
44
+ await new Promise(() => {})
45
+ }
46
+
23
47
  if (command !== 'dev') {
24
48
  console.error(`Unknown command: ${command}`)
25
49
  help()
package/dev/viewer/app.js CHANGED
@@ -66,9 +66,7 @@ const renderStatus = (events) => {
66
66
  return
67
67
  }
68
68
 
69
- const lastWorkflow = [...events]
70
- .reverse()
71
- .find((event) => event.type.startsWith('workflow:'))
69
+ const lastWorkflow = [...events].reverse().find((event) => event.type.startsWith('workflow:'))
72
70
 
73
71
  const status = lastWorkflow?.status ?? 'unknown'
74
72
  const badgeClass = status === 'completed' ? 'ok' : status === 'failed' ? 'fail' : ''
@@ -115,7 +113,9 @@ const refresh = async () => {
115
113
  const res = await fetch('/trace', { cache: 'no-store' })
116
114
  const text = await res.text()
117
115
  const hash = hashText(text)
118
- if (hash === state.lastHash) return
116
+ if (hash === state.lastHash) {
117
+ return
118
+ }
119
119
 
120
120
  state.lastHash = hash
121
121
  state.events = parseNdjson(text)
package/dist/bun.cjs CHANGED
@@ -1,7 +1,7 @@
1
1
  "use strict";Object.defineProperty(exports, "__esModule", {value: true});
2
2
 
3
- var _chunkSJSPR4ZUcjs = require('./chunk-SJSPR4ZU.cjs');
3
+ var _chunkZE2RDS47cjs = require('./chunk-ZE2RDS47.cjs');
4
4
 
5
5
 
6
- exports.BunSQLiteStorage = _chunkSJSPR4ZUcjs.BunSQLiteStorage;
6
+ exports.BunSQLiteStorage = _chunkZE2RDS47cjs.BunSQLiteStorage;
7
7
  //# sourceMappingURL=bun.cjs.map
package/dist/bun.d.cts CHANGED
@@ -1,74 +1,113 @@
1
1
  import { Database } from 'bun:sqlite';
2
- import { o as WorkflowStorage, m as WorkflowState, l as WorkflowFilter } from './types-CZwYGpou.cjs';
2
+ import { p as WorkflowStorage, a as WorkflowState, n as WorkflowFilter } from './types-CGIEQPFv.cjs';
3
3
 
4
4
  /**
5
- * @fileoverview Bun SQLite Storage Adapter
6
- *
7
- * High-performance storage using Bun's built-in SQLite.
8
- *
9
- * @module @gravito/flux/storage
10
- */
11
-
12
- /**
13
- * SQLite Storage Options
5
+ * Configuration options for the Bun SQLite storage adapter.
14
6
  */
15
7
  interface BunSQLiteStorageOptions {
16
- /** Database file path (default: ':memory:') */
8
+ /**
9
+ * Path to the SQLite database file.
10
+ * Use ':memory:' for an ephemeral in-memory database.
11
+ */
17
12
  path?: string;
18
- /** Table name (default: 'flux_workflows') */
13
+ /**
14
+ * Name of the table used to store workflow states.
15
+ */
19
16
  tableName?: string;
20
17
  }
21
18
  /**
22
- * Bun SQLite Storage
19
+ * BunSQLiteStorage provides a persistent storage backend for Flux workflows using Bun's native SQLite module.
23
20
  *
24
- * High-performance storage adapter using Bun's built-in SQLite.
21
+ * It handles automatic table creation, indexing for performance, and serialization of workflow state
22
+ * into a relational format.
25
23
  *
26
24
  * @example
27
25
  * ```typescript
28
- * const engine = new FluxEngine({
29
- * storage: new BunSQLiteStorage({ path: './data/flux.db' })
30
- * })
26
+ * const storage = new BunSQLiteStorage({
27
+ * path: './workflows.db',
28
+ * tableName: 'my_workflows'
29
+ * });
30
+ * await storage.init();
31
31
  * ```
32
32
  */
33
33
  declare class BunSQLiteStorage implements WorkflowStorage {
34
34
  private db;
35
35
  private tableName;
36
36
  private initialized;
37
+ /**
38
+ * Creates a new instance of BunSQLiteStorage.
39
+ *
40
+ * @param options - Configuration for the database connection and table naming.
41
+ */
37
42
  constructor(options?: BunSQLiteStorageOptions);
38
43
  /**
39
- * Initialize storage (create tables)
44
+ * Initializes the database schema and required indexes.
45
+ *
46
+ * This method is idempotent and will be called automatically by other operations if not invoked manually.
47
+ *
48
+ * @throws {Error} If the database schema cannot be created or indexes fail to initialize.
40
49
  */
41
50
  init(): Promise<void>;
42
51
  /**
43
- * Save workflow state
52
+ * Persists or updates a workflow state in the database.
53
+ *
54
+ * Uses an "INSERT OR REPLACE" strategy to ensure the latest state is always stored for a given ID.
55
+ *
56
+ * @param state - The current state of the workflow to be saved.
57
+ * @throws {Error} If the database write operation fails or serialization errors occur.
44
58
  */
45
59
  save(state: WorkflowState): Promise<void>;
46
60
  /**
47
- * Load workflow state by ID
61
+ * Retrieves a workflow state by its unique identifier.
62
+ *
63
+ * @param id - The unique ID of the workflow to load.
64
+ * @returns The reconstructed workflow state, or null if no record is found.
65
+ * @throws {Error} If the database query fails or deserialization of stored JSON fails.
48
66
  */
49
67
  load(id: string): Promise<WorkflowState | null>;
50
68
  /**
51
- * List workflow states with optional filter
69
+ * Lists workflow states based on the provided filtering criteria.
70
+ *
71
+ * Results are returned in descending order of creation time.
72
+ *
73
+ * @param filter - Criteria for filtering and paginating the results.
74
+ * @returns An array of workflow states matching the filter.
75
+ * @throws {Error} If the database query fails.
52
76
  */
53
77
  list(filter?: WorkflowFilter): Promise<WorkflowState[]>;
54
78
  /**
55
- * Delete workflow state
79
+ * Deletes a workflow state from the database.
80
+ *
81
+ * @param id - The unique ID of the workflow to delete.
82
+ * @throws {Error} If the database deletion fails.
56
83
  */
57
84
  delete(id: string): Promise<void>;
58
85
  /**
59
- * Close database connection
86
+ * Closes the database connection and resets the initialization state.
87
+ *
88
+ * @throws {Error} If the database connection cannot be closed cleanly.
60
89
  */
61
90
  close(): Promise<void>;
62
91
  /**
63
- * Convert SQLite row to WorkflowState
92
+ * Converts a raw database row into a structured WorkflowState object.
93
+ *
94
+ * @param row - The raw SQLite row data.
95
+ * @returns The parsed workflow state.
96
+ * @private
64
97
  */
65
98
  private rowToState;
66
99
  /**
67
- * Get raw database (for advanced usage)
100
+ * Provides direct access to the underlying Bun SQLite Database instance.
101
+ *
102
+ * Useful for performing custom queries or maintenance tasks.
103
+ *
104
+ * @returns The raw Database instance.
68
105
  */
69
106
  getDatabase(): Database;
70
107
  /**
71
- * Run a vacuum to optimize database
108
+ * Performs a VACUUM operation to reclaim unused space and defragment the database.
109
+ *
110
+ * @throws {Error} If the VACUUM operation fails.
72
111
  */
73
112
  vacuum(): void;
74
113
  }