@gravito/flux 3.0.0 → 3.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. package/README.md +298 -0
  2. package/bin/flux.js +25 -1
  3. package/dev/viewer/app.js +4 -4
  4. package/dist/bun.cjs +2 -2
  5. package/dist/bun.cjs.map +1 -1
  6. package/dist/bun.d.cts +65 -26
  7. package/dist/bun.d.ts +65 -26
  8. package/dist/bun.js +1 -1
  9. package/dist/chunk-4DXCQ6CL.js +3486 -0
  10. package/dist/chunk-4DXCQ6CL.js.map +1 -0
  11. package/dist/chunk-6AZNHVEO.cjs +316 -0
  12. package/dist/chunk-6AZNHVEO.cjs.map +1 -0
  13. package/dist/{chunk-ZAMVC732.js → chunk-NAIVO7RR.js} +64 -15
  14. package/dist/chunk-NAIVO7RR.js.map +1 -0
  15. package/dist/chunk-WAPZDXSX.cjs +3486 -0
  16. package/dist/chunk-WAPZDXSX.cjs.map +1 -0
  17. package/dist/chunk-WGDTB6OC.js +316 -0
  18. package/dist/chunk-WGDTB6OC.js.map +1 -0
  19. package/dist/{chunk-SJSPR4ZU.cjs → chunk-YXBEYVGY.cjs} +66 -17
  20. package/dist/chunk-YXBEYVGY.cjs.map +1 -0
  21. package/dist/cli/flux-visualize.cjs +108 -0
  22. package/dist/cli/flux-visualize.cjs.map +1 -0
  23. package/dist/cli/flux-visualize.d.cts +1 -0
  24. package/dist/cli/flux-visualize.d.ts +1 -0
  25. package/dist/cli/flux-visualize.js +108 -0
  26. package/dist/cli/flux-visualize.js.map +1 -0
  27. package/dist/index.cjs +100 -12
  28. package/dist/index.cjs.map +1 -1
  29. package/dist/index.d.cts +402 -12
  30. package/dist/index.d.ts +402 -12
  31. package/dist/index.js +98 -10
  32. package/dist/index.js.map +1 -1
  33. package/dist/index.node.cjs +11 -3
  34. package/dist/index.node.cjs.map +1 -1
  35. package/dist/index.node.d.cts +1114 -258
  36. package/dist/index.node.d.ts +1114 -258
  37. package/dist/index.node.js +10 -2
  38. package/dist/types-CRz5XdLd.d.cts +433 -0
  39. package/dist/types-CRz5XdLd.d.ts +433 -0
  40. package/package.json +17 -6
  41. package/dist/chunk-LULCFPIK.js +0 -1004
  42. package/dist/chunk-LULCFPIK.js.map +0 -1
  43. package/dist/chunk-SJSPR4ZU.cjs.map +0 -1
  44. package/dist/chunk-X3NC7HS4.cjs +0 -1004
  45. package/dist/chunk-X3NC7HS4.cjs.map +0 -1
  46. package/dist/chunk-ZAMVC732.js.map +0 -1
  47. package/dist/types-cnIU1O3n.d.cts +0 -250
  48. package/dist/types-cnIU1O3n.d.ts +0 -250
package/README.md CHANGED
@@ -7,9 +7,11 @@
7
7
  - **Pure State Machine** - No runtime dependencies, Web Standard APIs only
8
8
  - **Fluent Builder API** - Type-safe, chainable workflow definitions
9
9
  - **Storage Adapters** - Memory, SQLite (Bun), PostgreSQL (coming soon)
10
+ - **Distributed Locking** - Redis-based locking for multi-node deployments
10
11
  - **Retry & Timeout** - Automatic retry with exponential backoff
11
12
  - **Event Hooks** - Subscribe to workflow/step lifecycle events
12
13
  - **Dual Platform** - Works with both Bun and Node.js
14
+ - **Well-Tested** - 87% function coverage, 92% line coverage, 277 passing tests
13
15
 
14
16
  ## Installation
15
17
 
@@ -237,6 +239,113 @@ Commit steps are marked to always execute, even on workflow replay:
237
239
  })
238
240
  ```
239
241
 
242
+ ## Workflow Versioning
243
+
244
+ Track workflow definition versions for migration and compatibility management:
245
+
246
+ ### Setting Version
247
+
248
+ ```typescript
249
+ const workflow = createWorkflow('order-process')
250
+ .version('2.0.0')
251
+ .input<OrderInput>()
252
+ .step('validate', async (ctx) => { /* ... */ })
253
+ .build()
254
+ ```
255
+
256
+ ### Filtering by Version
257
+
258
+ ```typescript
259
+ // List workflows by version
260
+ const v1Workflows = await engine.list({ version: '1.0.0' })
261
+
262
+ // Combine with other filters
263
+ const results = await engine.list({
264
+ name: 'order-process',
265
+ version: '2.0.0',
266
+ status: 'completed',
267
+ limit: 10,
268
+ })
269
+ ```
270
+
271
+ ### Version Mismatch Warning
272
+
273
+ When resuming a workflow with a different definition version, Flux logs a warning:
274
+
275
+ ```typescript
276
+ // Original execution with v1.0.0
277
+ const result = await engine.execute(workflowV1, input)
278
+
279
+ // Resume with v2.0.0 - warns about mismatch
280
+ await engine.resume(workflowV2, result.id, { fromStep: 1 })
281
+ // ⚠️ Warning: version mismatch (stored: 1.0.0, current: 2.0.0)
282
+ ```
283
+
284
+ ## Batch Execution
285
+
286
+ Execute multiple workflow instances efficiently with controlled concurrency:
287
+
288
+ ### Basic Usage
289
+
290
+ ```typescript
291
+ const results = await engine.executeBatch(
292
+ orderWorkflow,
293
+ orders.map(o => ({ orderId: o.id })),
294
+ {
295
+ concurrency: 10,
296
+ continueOnError: true,
297
+ onProgress: (completed, total) => console.log(`${completed}/${total}`)
298
+ }
299
+ )
300
+
301
+ console.log(`Succeeded: ${results.succeeded}, Failed: ${results.failed}`)
302
+ ```
303
+
304
+ ### Using BatchExecutor
305
+
306
+ For more control, use `BatchExecutor` directly:
307
+
308
+ ```typescript
309
+ import { BatchExecutor } from '@gravito/flux'
310
+
311
+ const executor = new BatchExecutor(engine)
312
+
313
+ // Same workflow, multiple inputs
314
+ const result = await executor.execute(workflow, inputs, {
315
+ concurrency: 5, // Max parallel executions (default: 10)
316
+ continueOnError: true, // Continue if one fails (default: false)
317
+ signal: controller.signal, // AbortSignal for cancellation
318
+ onProgress: (completed, total, lastResult) => {
319
+ updateProgressBar(completed / total)
320
+ }
321
+ })
322
+
323
+ // Different workflows
324
+ const result = await executor.executeMany([
325
+ { workflow: orderWorkflow, input: { orderId: '1' } },
326
+ { workflow: notifyWorkflow, input: { userId: '2' } },
327
+ { workflow: orderWorkflow, input: { orderId: '3' } },
328
+ ])
329
+ ```
330
+
331
+ ### Result Structure
332
+
333
+ ```typescript
334
+ interface BatchResult<T> {
335
+ total: number // Total items processed
336
+ succeeded: number // Successful executions
337
+ failed: number // Failed executions
338
+ duration: number // Total execution time (ms)
339
+ results: Array<{
340
+ index: number
341
+ input: T
342
+ success: boolean
343
+ result?: WorkflowState
344
+ error?: Error
345
+ }>
346
+ }
347
+ ```
348
+
240
349
  ## Storage Adapters
241
350
 
242
351
  ### MemoryStorage (Default)
@@ -261,6 +370,42 @@ const engine = new FluxEngine({
261
370
  })
262
371
  ```
263
372
 
373
+ ### PostgreSQLStorage (Production)
374
+
375
+ PostgreSQL for production deployments:
376
+
377
+ ```typescript
378
+ import { FluxEngine, PostgreSQLStorage } from '@gravito/flux'
379
+
380
+ const storage = new PostgreSQLStorage({
381
+ connectionString: 'postgresql://user:password@localhost:5432/dbname',
382
+ tableName: 'flux_workflows',
383
+ ssl: true
384
+ })
385
+
386
+ const engine = new FluxEngine({ storage })
387
+ ```
388
+
389
+ Features:
390
+ - **JSONB columns** for efficient querying of workflow data
391
+ - **Connection pooling** via `pg` library
392
+ - **Automatic schema migration** on init
393
+ - **Optimized indexes** for name, status, and created_at
394
+
395
+ Alternative configuration:
396
+
397
+ ```typescript
398
+ const storage = new PostgreSQLStorage({
399
+ host: 'localhost',
400
+ port: 5432,
401
+ database: 'myapp',
402
+ user: 'postgres',
403
+ password: 'secret',
404
+ tableName: 'workflows',
405
+ ssl: { rejectUnauthorized: false }
406
+ })
407
+ ```
408
+
264
409
  ### Custom Storage
265
410
 
266
411
  Implement `WorkflowStorage` interface:
@@ -276,6 +421,103 @@ interface WorkflowStorage {
276
421
  }
277
422
  ```
278
423
 
424
+ ## Distributed Locking
425
+
426
+ For multi-node deployments, Flux provides a Redis-based distributed locking mechanism to prevent concurrent execution of the same workflow across multiple nodes.
427
+
428
+ ### RedisLockProvider
429
+
430
+ Use Redis for distributed locking in production clusters:
431
+
432
+ ```typescript
433
+ import { FluxEngine, RedisLockProvider } from '@gravito/flux'
434
+ import Redis from 'ioredis'
435
+
436
+ const redis = new Redis({
437
+ host: 'localhost',
438
+ port: 6379,
439
+ })
440
+
441
+ const lockProvider = new RedisLockProvider({
442
+ client: redis,
443
+ keyPrefix: 'myapp:locks:', // Default: 'flux:lock:'
444
+ defaultTtl: 30000, // Default: 30000ms (30s)
445
+ retryDelay: 100, // Default: 100ms
446
+ maxRetries: 3, // Default: 0 (no retries)
447
+ })
448
+
449
+ const engine = new FluxEngine({
450
+ storage: new PostgreSQLStorage({ /* ... */ }),
451
+ lockProvider,
452
+ })
453
+ ```
454
+
455
+ ### Features
456
+
457
+ - **Atomic Acquisition**: Uses Redis `SET NX PX` for atomic lock acquisition
458
+ - **Safe Release**: Lua scripts ensure only the lock owner can release
459
+ - **Auto-Expiration**: Locks automatically expire if a node crashes
460
+ - **Retry Support**: Configurable retry with exponential backoff
461
+ - **Idempotent**: Same owner can refresh an existing lock
462
+
463
+ ### Usage
464
+
465
+ Locks are automatically acquired and released during workflow execution:
466
+
467
+ ```typescript
468
+ // Flux automatically acquires lock before execution
469
+ const result = await engine.execute(workflow, input)
470
+ // Lock is automatically released after completion
471
+ ```
472
+
473
+ Manual lock management:
474
+
475
+ ```typescript
476
+ const lock = await lockProvider.acquire('workflow-123', 'node-1', 30000)
477
+
478
+ if (lock) {
479
+ try {
480
+ // Critical section - only one node can execute
481
+ await doWork()
482
+ } finally {
483
+ await lock.release()
484
+ }
485
+ } else {
486
+ console.log('Another node is processing this workflow')
487
+ }
488
+ ```
489
+
490
+ ### MemoryLockProvider (Development)
491
+
492
+ For single-node development environments:
493
+
494
+ ```typescript
495
+ import { FluxEngine, MemoryLockProvider } from '@gravito/flux'
496
+
497
+ const engine = new FluxEngine({
498
+ lockProvider: new MemoryLockProvider(),
499
+ })
500
+ ```
501
+
502
+ ### Custom Lock Providers
503
+
504
+ Implement the `LockProvider` interface for custom backends:
505
+
506
+ ```typescript
507
+ interface LockProvider {
508
+ acquire(resourceId: string, owner: string, ttl: number): Promise<Lock | null>
509
+ refresh(resourceId: string, owner: string, ttl: number): Promise<boolean>
510
+ release(resourceId: string): Promise<void>
511
+ }
512
+
513
+ interface Lock {
514
+ id: string
515
+ owner: string
516
+ expiresAt: number
517
+ release(): Promise<void>
518
+ }
519
+ ```
520
+
279
521
  ## Gravito Integration
280
522
 
281
523
  ```typescript
@@ -301,6 +543,7 @@ await flux.execute(myWorkflow, input)
301
543
  |---------|-----|---------|
302
544
  | FluxEngine | ✅ | ✅ |
303
545
  | MemoryStorage | ✅ | ✅ |
546
+ | PostgreSQLStorage | ✅ | ✅ |
304
547
  | BunSQLiteStorage | ✅ | ❌ |
305
548
  | OrbitFlux | ✅ | ✅ |
306
549
 
@@ -320,6 +563,61 @@ bun run examples/user-signup.ts
320
563
 
321
564
  # Report generation
322
565
  bun run examples/report-generation.ts
566
+
567
+ # PostgreSQL storage (requires PostgreSQL)
568
+ POSTGRES_URL="postgresql://localhost:5432/flux_demo" bun run examples/postgresql-storage.ts
569
+ ```
570
+
571
+ ## Testing
572
+
573
+ Flux has comprehensive test coverage with 300 total tests across 26 test files:
574
+
575
+ ```bash
576
+ # Run all tests
577
+ bun test
578
+
579
+ # Run with coverage report
580
+ bun test --coverage
581
+
582
+ # Run specific test file
583
+ bun test tests/flux.test.ts
584
+ bun test tests/errors.test.ts
585
+ bun test tests/workflow-builder.test.ts
586
+ ```
587
+
588
+ ### Coverage Metrics
589
+
590
+ - **Function Coverage:** 87% (86.98%)
591
+ - **Line Coverage:** 92% (92.49%)
592
+ - **Total Tests:** 300 passing, 12 skipped (PostgreSQL integration tests)
593
+
594
+ ### What's Tested
595
+
596
+ - ✅ Core workflow execution (FluxEngine, WorkflowExecutor, StepExecutor)
597
+ - ✅ State management (StateMachine, ContextManager, StateUpdater)
598
+ - ✅ Error handling (all 14 error factory functions, FluxError class)
599
+ - ✅ Retry & timeout mechanisms (CompensationRetryPolicy, IdempotencyGuard)
600
+ - ✅ Compensation & rollback (RollbackManager, RecoveryManager, Saga pattern)
601
+ - ✅ Storage adapters (Memory, BunSQLite, PostgreSQL)
602
+ - ✅ Parallel execution (ParallelExecutor)
603
+ - ✅ Suspension & signals (wait/resume workflows)
604
+ - ✅ Workflow builder API (data, describe, validate, step chaining)
605
+ - ✅ Visualization (MermaidGenerator with all diagram variations)
606
+ - ✅ Profiling & tracing (WorkflowProfiler, TraceEmitter, JsonFileTraceSink)
607
+ - ✅ Gravito integration (OrbitFlux lifecycle)
608
+ - ✅ Workflow versioning (.version(), version filtering, mismatch warnings)
609
+ - ✅ Batch execution (BatchExecutor, executeBatch, concurrency control)
610
+ - ✅ Redis distributed locking (RedisLockProvider with mocked Redis)
611
+
612
+ ### Skipped Tests
613
+
614
+ The 12 skipped tests are PostgreSQL integration tests that require a running database:
615
+
616
+ ```bash
617
+ # To run PostgreSQL tests locally:
618
+ docker run -d -p 5432:5432 -e POSTGRES_PASSWORD=test postgres:15
619
+ export POSTGRES_URL="postgresql://postgres:test@localhost:5432/flux_test"
620
+ bun test tests/postgresql-storage.test.ts
323
621
  ```
324
622
 
325
623
  ## License
package/bin/flux.js CHANGED
@@ -1,6 +1,6 @@
1
1
  #!/usr/bin/env node
2
- import { createServer } from 'node:http'
3
2
  import { readFile } from 'node:fs/promises'
3
+ import { createServer } from 'node:http'
4
4
  import { dirname, extname, join, resolve } from 'node:path'
5
5
  import { fileURLToPath } from 'node:url'
6
6
 
@@ -12,6 +12,13 @@ const help = () => {
12
12
 
13
13
  Usage:
14
14
  flux dev --trace ./.flux/trace.ndjson --port 4280
15
+ flux visualize --definition <path> [options]
16
+
17
+ Commands:
18
+ dev Start development viewer for trace files
19
+ visualize Generate Mermaid diagrams from workflow definitions
20
+
21
+ Run 'flux <command> --help' for command-specific help
15
22
  `)
16
23
  }
17
24
 
@@ -20,6 +27,23 @@ if (!command || command === '--help' || command === '-h') {
20
27
  process.exit(0)
21
28
  }
22
29
 
30
+ if (command === 'visualize') {
31
+ const { fileURLToPath } = await import('node:url')
32
+ const { dirname, join } = await import('node:path')
33
+ const { spawn } = await import('node:child_process')
34
+
35
+ const __filename = fileURLToPath(import.meta.url)
36
+ const __dirname = dirname(__filename)
37
+ const visualizeScript = join(__dirname, '..', 'dist', 'cli', 'flux-visualize.js')
38
+
39
+ const child = spawn('node', [visualizeScript, ...args.slice(1)], {
40
+ stdio: 'inherit',
41
+ })
42
+
43
+ child.on('exit', (code) => process.exit(code || 0))
44
+ await new Promise(() => {})
45
+ }
46
+
23
47
  if (command !== 'dev') {
24
48
  console.error(`Unknown command: ${command}`)
25
49
  help()
package/dev/viewer/app.js CHANGED
@@ -66,9 +66,7 @@ const renderStatus = (events) => {
66
66
  return
67
67
  }
68
68
 
69
- const lastWorkflow = [...events]
70
- .reverse()
71
- .find((event) => event.type.startsWith('workflow:'))
69
+ const lastWorkflow = [...events].reverse().find((event) => event.type.startsWith('workflow:'))
72
70
 
73
71
  const status = lastWorkflow?.status ?? 'unknown'
74
72
  const badgeClass = status === 'completed' ? 'ok' : status === 'failed' ? 'fail' : ''
@@ -115,7 +113,9 @@ const refresh = async () => {
115
113
  const res = await fetch('/trace', { cache: 'no-store' })
116
114
  const text = await res.text()
117
115
  const hash = hashText(text)
118
- if (hash === state.lastHash) return
116
+ if (hash === state.lastHash) {
117
+ return
118
+ }
119
119
 
120
120
  state.lastHash = hash
121
121
  state.events = parseNdjson(text)
package/dist/bun.cjs CHANGED
@@ -1,7 +1,7 @@
1
1
  "use strict";Object.defineProperty(exports, "__esModule", {value: true});
2
2
 
3
- var _chunkSJSPR4ZUcjs = require('./chunk-SJSPR4ZU.cjs');
3
+ var _chunkYXBEYVGYcjs = require('./chunk-YXBEYVGY.cjs');
4
4
 
5
5
 
6
- exports.BunSQLiteStorage = _chunkSJSPR4ZUcjs.BunSQLiteStorage;
6
+ exports.BunSQLiteStorage = _chunkYXBEYVGYcjs.BunSQLiteStorage;
7
7
  //# sourceMappingURL=bun.cjs.map
package/dist/bun.cjs.map CHANGED
@@ -1 +1 @@
1
- {"version":3,"sources":["/Users/carl/Dev/Carl/gravito-core/packages/flux/dist/bun.cjs"],"names":[],"mappings":"AAAA;AACE;AACF,wDAA6B;AAC7B;AACE;AACF,8DAAC","file":"/Users/carl/Dev/Carl/gravito-core/packages/flux/dist/bun.cjs"}
1
+ {"version":3,"sources":["/Users/carl/Dev/Carl/gravito-core-ci-fix/packages/flux/dist/bun.cjs"],"names":[],"mappings":"AAAA;AACE;AACF,wDAA6B;AAC7B;AACE;AACF,8DAAC","file":"/Users/carl/Dev/Carl/gravito-core-ci-fix/packages/flux/dist/bun.cjs"}
package/dist/bun.d.cts CHANGED
@@ -1,74 +1,113 @@
1
1
  import { Database } from 'bun:sqlite';
2
- import { o as WorkflowStorage, m as WorkflowState, l as WorkflowFilter } from './types-cnIU1O3n.cjs';
2
+ import { p as WorkflowStorage, a as WorkflowState, n as WorkflowFilter } from './types-CRz5XdLd.cjs';
3
3
 
4
4
  /**
5
- * @fileoverview Bun SQLite Storage Adapter
6
- *
7
- * High-performance storage using Bun's built-in SQLite.
8
- *
9
- * @module @gravito/flux/storage
10
- */
11
-
12
- /**
13
- * SQLite Storage Options
5
+ * Configuration options for the Bun SQLite storage adapter.
14
6
  */
15
7
  interface BunSQLiteStorageOptions {
16
- /** Database file path (default: ':memory:') */
8
+ /**
9
+ * Path to the SQLite database file.
10
+ * Use ':memory:' for an ephemeral in-memory database.
11
+ */
17
12
  path?: string;
18
- /** Table name (default: 'flux_workflows') */
13
+ /**
14
+ * Name of the table used to store workflow states.
15
+ */
19
16
  tableName?: string;
20
17
  }
21
18
  /**
22
- * Bun SQLite Storage
19
+ * BunSQLiteStorage provides a persistent storage backend for Flux workflows using Bun's native SQLite module.
23
20
  *
24
- * High-performance storage adapter using Bun's built-in SQLite.
21
+ * It handles automatic table creation, indexing for performance, and serialization of workflow state
22
+ * into a relational format.
25
23
  *
26
24
  * @example
27
25
  * ```typescript
28
- * const engine = new FluxEngine({
29
- * storage: new BunSQLiteStorage({ path: './data/flux.db' })
30
- * })
26
+ * const storage = new BunSQLiteStorage({
27
+ * path: './workflows.db',
28
+ * tableName: 'my_workflows'
29
+ * });
30
+ * await storage.init();
31
31
  * ```
32
32
  */
33
33
  declare class BunSQLiteStorage implements WorkflowStorage {
34
34
  private db;
35
35
  private tableName;
36
36
  private initialized;
37
+ /**
38
+ * Creates a new instance of BunSQLiteStorage.
39
+ *
40
+ * @param options - Configuration for the database connection and table naming.
41
+ */
37
42
  constructor(options?: BunSQLiteStorageOptions);
38
43
  /**
39
- * Initialize storage (create tables)
44
+ * Initializes the database schema and required indexes.
45
+ *
46
+ * This method is idempotent and will be called automatically by other operations if not invoked manually.
47
+ *
48
+ * @throws {Error} If the database schema cannot be created or indexes fail to initialize.
40
49
  */
41
50
  init(): Promise<void>;
42
51
  /**
43
- * Save workflow state
52
+ * Persists or updates a workflow state in the database.
53
+ *
54
+ * Uses an "INSERT OR REPLACE" strategy to ensure the latest state is always stored for a given ID.
55
+ *
56
+ * @param state - The current state of the workflow to be saved.
57
+ * @throws {Error} If the database write operation fails or serialization errors occur.
44
58
  */
45
59
  save(state: WorkflowState): Promise<void>;
46
60
  /**
47
- * Load workflow state by ID
61
+ * Retrieves a workflow state by its unique identifier.
62
+ *
63
+ * @param id - The unique ID of the workflow to load.
64
+ * @returns The reconstructed workflow state, or null if no record is found.
65
+ * @throws {Error} If the database query fails or deserialization of stored JSON fails.
48
66
  */
49
67
  load(id: string): Promise<WorkflowState | null>;
50
68
  /**
51
- * List workflow states with optional filter
69
+ * Lists workflow states based on the provided filtering criteria.
70
+ *
71
+ * Results are returned in descending order of creation time.
72
+ *
73
+ * @param filter - Criteria for filtering and paginating the results.
74
+ * @returns An array of workflow states matching the filter.
75
+ * @throws {Error} If the database query fails.
52
76
  */
53
77
  list(filter?: WorkflowFilter): Promise<WorkflowState[]>;
54
78
  /**
55
- * Delete workflow state
79
+ * Deletes a workflow state from the database.
80
+ *
81
+ * @param id - The unique ID of the workflow to delete.
82
+ * @throws {Error} If the database deletion fails.
56
83
  */
57
84
  delete(id: string): Promise<void>;
58
85
  /**
59
- * Close database connection
86
+ * Closes the database connection and resets the initialization state.
87
+ *
88
+ * @throws {Error} If the database connection cannot be closed cleanly.
60
89
  */
61
90
  close(): Promise<void>;
62
91
  /**
63
- * Convert SQLite row to WorkflowState
92
+ * Converts a raw database row into a structured WorkflowState object.
93
+ *
94
+ * @param row - The raw SQLite row data.
95
+ * @returns The parsed workflow state.
96
+ * @private
64
97
  */
65
98
  private rowToState;
66
99
  /**
67
- * Get raw database (for advanced usage)
100
+ * Provides direct access to the underlying Bun SQLite Database instance.
101
+ *
102
+ * Useful for performing custom queries or maintenance tasks.
103
+ *
104
+ * @returns The raw Database instance.
68
105
  */
69
106
  getDatabase(): Database;
70
107
  /**
71
- * Run a vacuum to optimize database
108
+ * Performs a VACUUM operation to reclaim unused space and defragment the database.
109
+ *
110
+ * @throws {Error} If the VACUUM operation fails.
72
111
  */
73
112
  vacuum(): void;
74
113
  }