spindb 0.13.2 → 0.13.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -429,17 +429,24 @@ spindb backup mydb --output ./backups/ # Custom directory
429
429
  spindb backup mydb --database my_app # Backup specific database
430
430
  ```
431
431
 
432
- Backup formats:
432
+ Backup formats (vary by engine):
433
433
 
434
434
  ```bash
435
- spindb backup mydb --format sql # Plain SQL (.sql)
436
- spindb backup mydb --format dump # Compressed (.dump for PG, .sql.gz for MySQL)
435
+ spindb backup mydb --format sql # Plain SQL (.sql) or text commands (.redis)
436
+ spindb backup mydb --format dump # Binary format (.dump for PG, .sql.gz for MySQL, .rdb for Redis)
437
437
 
438
438
  # Shorthand
439
439
  spindb backup mydb --sql
440
440
  spindb backup mydb --dump
441
441
  ```
442
442
 
443
+ Format by engine:
444
+ - PostgreSQL: `.sql` (plain SQL) / `.dump` (pg_dump custom)
445
+ - MySQL: `.sql` (plain SQL) / `.sql.gz` (compressed SQL)
446
+ - SQLite: `.sql` (plain SQL) / `.sqlite` (binary copy)
447
+ - MongoDB: `.bson` (BSON dump) / `.archive` (compressed archive)
448
+ - Redis: `.redis` (text commands) / `.rdb` (RDB snapshot)
449
+
443
450
  <details>
444
451
  <summary>All options</summary>
445
452
 
@@ -455,6 +462,27 @@ spindb backup mydb --dump
455
462
 
456
463
  </details>
457
464
 
465
+ #### `backups` - List backup files
466
+
467
+ ```bash
468
+ spindb backups # List backups in current directory
469
+ spindb backups ./data # List backups in specific directory
470
+ spindb backups --all # Include ~/.spindb/backups
471
+ spindb backups --limit 50 # Show more results
472
+ spindb backups --json # JSON output
473
+ ```
474
+
475
+ <details>
476
+ <summary>All options</summary>
477
+
478
+ | Option | Description |
479
+ |--------|-------------|
480
+ | `--all`, `-a` | Include backups from `~/.spindb/backups` |
481
+ | `--limit`, `-n` | Limit number of results (default: 20) |
482
+ | `--json`, `-j` | Output as JSON |
483
+
484
+ </details>
485
+
458
486
  #### `restore` - Restore from backup
459
487
 
460
488
  ```bash
@@ -488,6 +516,78 @@ spindb info mydb
488
516
 
489
517
  </details>
490
518
 
519
+ #### Backup & Restore Format Reference
520
+
521
+ Each engine has specific backup formats and restore behaviors:
522
+
523
+ <details>
524
+ <summary>PostgreSQL</summary>
525
+
526
+ | Format | Extension | Tool | Notes |
527
+ |--------|-----------|------|-------|
528
+ | SQL | `.sql` | pg_dump | Plain text SQL, human-readable |
529
+ | Custom | `.dump` | pg_dump -Fc | Compressed, supports parallel restore |
530
+
531
+ **Restore behavior:** Creates new database or replaces existing. Uses `pg_restore` for `.dump`, `psql` for `.sql`.
532
+
533
+ </details>
534
+
535
+ <details>
536
+ <summary>MySQL</summary>
537
+
538
+ | Format | Extension | Tool | Notes |
539
+ |--------|-----------|------|-------|
540
+ | SQL | `.sql` | mysqldump | Plain text SQL |
541
+ | Compressed | `.sql.gz` | mysqldump + gzip | Gzip compressed SQL |
542
+
543
+ **Restore behavior:** Creates new database or replaces existing. Pipes to `mysql` client.
544
+
545
+ </details>
546
+
547
+ <details>
548
+ <summary>SQLite</summary>
549
+
550
+ | Format | Extension | Tool | Notes |
551
+ |--------|-----------|------|-------|
552
+ | SQL | `.sql` | .dump | Plain text SQL |
553
+ | Binary | `.sqlite` | File copy | Exact copy of database file |
554
+
555
+ **Restore behavior:** Creates new file or replaces existing.
556
+
557
+ </details>
558
+
559
+ <details>
560
+ <summary>MongoDB</summary>
561
+
562
+ | Format | Extension | Tool | Notes |
563
+ |--------|-----------|------|-------|
564
+ | BSON | `.bson` | mongodump | Binary JSON per collection |
565
+ | Archive | `.archive` | mongodump --archive | Single compressed file |
566
+
567
+ **Restore behavior:** Creates new database or replaces existing. Uses `mongorestore`.
568
+
569
+ </details>
570
+
571
+ <details>
572
+ <summary>Redis</summary>
573
+
574
+ | Format | Extension | Tool | Notes |
575
+ |--------|-----------|------|-------|
576
+ | RDB | `.rdb` | BGSAVE | Binary snapshot, requires restart |
577
+ | Text | `.redis` | Custom | Human-readable Redis commands |
578
+
579
+ **Text format detection:** Files are detected as Redis text commands if they contain valid Redis commands (SET, HSET, DEL, etc.), regardless of file extension. This allows restoring files like `users.txt` or `data` without renaming.
580
+
581
+ **Restore behavior:**
582
+ - **RDB (`.rdb`):** Requires stopping Redis, copies file to data directory, restart loads data
583
+ - **Text (`.redis`):** Pipes commands to running Redis instance. Prompts for:
584
+ - **Replace all:** Runs `FLUSHDB` first (clean slate)
585
+ - **Merge:** Adds/updates keys, keeps existing keys not in backup
586
+
587
+ **Note:** Redis uses numbered databases (0-15) that always exist. "Create new database" is not applicable.
588
+
589
+ </details>
590
+
491
591
  ### Container Management
492
592
 
493
593
  #### `list` - List all containers
@@ -699,7 +799,7 @@ Native processes mean instant startup and no virtualization overhead.
699
799
  └── mydb.sqlite # Created with: spindb create mydb -e sqlite
700
800
  ```
701
801
 
702
- ### How Data Persists
802
+ ### Data Persistence
703
803
 
704
804
  SpinDB runs databases as **native processes** on your machine. When you start a container:
705
805
 
@@ -717,6 +817,29 @@ When you stop a container:
717
817
 
718
818
  **Your data is never deleted unless you explicitly delete the container.**
719
819
 
820
+ #### Persistence by Engine
821
+
822
+ Each database engine has its own persistence mechanism:
823
+
824
+ | Engine | Mechanism | Durability |
825
+ |--------|-----------|------------|
826
+ | PostgreSQL | Write-Ahead Logging (WAL) | Every commit is immediately durable |
827
+ | MySQL | InnoDB transaction logs | Every commit is immediately durable |
828
+ | SQLite | File-based transactions | Every commit is immediately durable |
829
+ | MongoDB | WiredTiger with journaling | Writes journaled before acknowledged |
830
+ | Redis | RDB snapshots | Periodic snapshots (see below) |
831
+
832
+ **PostgreSQL, MySQL, MongoDB:** These engines use transaction logs or journaling. Every committed write is guaranteed to survive a crash or unexpected shutdown.
833
+
834
+ **SQLite:** As a file-based database, SQLite writes directly to disk on each commit. No server process means no risk of losing in-flight data.
835
+
836
+ **Redis:** SpinDB configures Redis with RDB (Redis Database) snapshots:
837
+ - Save after 900 seconds if at least 1 key changed
838
+ - Save after 300 seconds if at least 10 keys changed
839
+ - Save after 60 seconds if at least 10,000 keys changed
840
+
841
+ This means Redis may lose up to ~60 seconds of writes on an unexpected crash. For local development, this trade-off (speed over strict durability) is typically acceptable. If you need stronger guarantees, use `spindb backup` before stopping work.
842
+
720
843
  ### Binary Sources
721
844
 
722
845
  **PostgreSQL:** Server binaries are downloaded automatically:
@@ -738,14 +861,15 @@ This isn't a preference—it's a practical reality of what's available.
738
861
 
739
862
  This makes multi-version support trivial: need PostgreSQL 14 for a legacy project and 18 for a new one? SpinDB downloads both, and they run side-by-side without conflicts.
740
863
 
741
- **No equivalent exists for MySQL or MongoDB.** Neither database has a comparable embedded binary project:
864
+ **No equivalent exists for MySQL, MongoDB, or Redis.** None of these databases have a comparable embedded binary project:
742
865
 
743
866
  - **MySQL:** Oracle distributes MySQL as large installers with system dependencies, not embeddable binaries. There's no "zonky.io for MySQL."
744
- - **MongoDB:** Server binaries are several hundred MB with complex licensing around redistribution. MongoDB Inc. doesn't provide an embedded distribution.
867
+ - **MongoDB:** Server binaries are several hundred MB and aren't designed for portable distribution.
868
+ - **Redis:** While Redis is small (~6-12 MB), there's no official portable distribution. Community Windows ports exist, but macOS/Linux rely on system packages.
745
869
 
746
870
  For these databases, system packages (Homebrew, apt, choco) are the most reliable option. They handle dependencies, platform quirks, and security updates. SpinDB simply orchestrates what's already installed.
747
871
 
748
- **Does this limit multi-version support?** Yes, for MySQL/MongoDB you get whatever version your package manager provides. In practice, this is rarely a problem—developers seldom need multiple MySQL versions simultaneously. If zonky.io-style distributions emerged for other databases, SpinDB could adopt them.
872
+ **Does this limit multi-version support?** Yes, for MySQL/MongoDB/Redis you get whatever version your package manager provides. In practice, this is rarely a problem—developers seldom need multiple versions of these databases simultaneously. If zonky.io-style distributions emerged for other databases, SpinDB could adopt them.
749
873
 
750
874
  ---
751
875
 
@@ -776,6 +900,20 @@ See [TODO.md](TODO.md) for the full roadmap.
776
900
  - Scheduled backups
777
901
  - Import from Docker
778
902
 
903
+ ### Possible Future Engines
904
+
905
+ These engines are under consideration but not yet on the roadmap. Community interest and feasibility will determine priority:
906
+
907
+ | Engine | Type | Notes |
908
+ |--------|------|-------|
909
+ | **DuckDB** | Embedded analytical | File-based like SQLite, popular for data/analytics work |
910
+ | **libSQL** | Embedded relational | SQLite fork by Turso with replication and edge support |
911
+ | **Valkey** | Key-value store | Redis fork (post-license change), growing adoption |
912
+ | **Meilisearch** | Search engine | Developer-friendly search, good binary distribution |
913
+ | **Elasticsearch/OpenSearch** | Search engine | Full-text search, common in web applications |
914
+ | **Neo4j** | Graph database | Most popular graph database |
915
+ | **InfluxDB** | Time-series | IoT, metrics, and monitoring use cases |
916
+
779
917
  ---
780
918
 
781
919
  ## Troubleshooting
@@ -838,6 +976,14 @@ SpinDB wouldn't be possible without:
838
976
 
839
977
  ---
840
978
 
979
+ ## Related Work
980
+
981
+ We're actively contributing to the broader embedded database ecosystem:
982
+
983
+ - **[hostdb](https://github.com/robertjbass/hostdb)** - A companion project providing downloadable database binaries (Redis, MySQL/MariaDB, etc.) as GitHub releases. This will enable SpinDB to offer multi-version support for additional engines beyond PostgreSQL.
984
+
985
+ ---
986
+
841
987
  ## License
842
988
 
843
989
  [PolyForm Noncommercial 1.0.0](LICENSE)
@@ -29,11 +29,39 @@ function generateDefaultFilename(
29
29
  }
30
30
 
31
31
  function getExtension(format: 'sql' | 'dump', engine: string): string {
32
+ // Handle 'sql' format (human-readable option)
32
33
  if (format === 'sql') {
33
- return '.sql'
34
+ // MongoDB uses BSON directory format for 'sql' choice
35
+ return engine === 'mongodb' ? '' : '.sql'
36
+ }
37
+
38
+ // Handle 'dump' format (binary/compressed option)
39
+ switch (engine) {
40
+ case 'mysql':
41
+ return '.sql.gz'
42
+ case 'sqlite':
43
+ return '.sqlite'
44
+ case 'mongodb':
45
+ return '.archive'
46
+ case 'redis':
47
+ return '.rdb'
48
+ case 'postgresql':
49
+ default:
50
+ return '.dump'
34
51
  }
35
- // MySQL dump is gzipped SQL, PostgreSQL dump is custom format
36
- return engine === 'mysql' ? '.sql.gz' : '.dump'
52
+ }
53
+
54
+ function getFormatDescription(format: 'sql' | 'dump', engine: string): string {
55
+ if (engine === 'redis') {
56
+ return 'RDB snapshot'
57
+ }
58
+ if (engine === 'mongodb') {
59
+ return format === 'sql' ? 'BSON directory' : 'archive'
60
+ }
61
+ if (engine === 'sqlite') {
62
+ return format === 'sql' ? 'SQL' : 'binary'
63
+ }
64
+ return format === 'sql' ? 'SQL' : 'dump'
37
65
  }
38
66
 
39
67
  export const backupCommand = new Command('backup')
@@ -195,8 +223,9 @@ export const backupCommand = new Command('backup')
195
223
  const outputDir = options.output || process.cwd()
196
224
  const outputPath = join(outputDir, `${filename}${extension}`)
197
225
 
226
+ const formatDesc = getFormatDescription(format, engineName)
198
227
  const backupSpinner = createSpinner(
199
- `Creating ${format === 'sql' ? 'SQL' : 'dump'} backup of "${databaseName}"...`,
228
+ `Creating ${formatDesc} backup of "${databaseName}"...`,
200
229
  )
201
230
  backupSpinner.start()
202
231
 
@@ -222,11 +251,8 @@ export const backupCommand = new Command('backup')
222
251
  console.log()
223
252
  console.log(uiSuccess('Backup complete'))
224
253
  console.log()
225
- console.log(chalk.gray(' File:'), chalk.cyan(result.path))
226
- console.log(
227
- chalk.gray(' Size:'),
228
- chalk.white(formatBytes(result.size)),
229
- )
254
+ console.log(chalk.gray(' Saved to:'), chalk.cyan(result.path))
255
+ console.log(chalk.gray(' Size:'), chalk.white(formatBytes(result.size)))
230
256
  console.log(chalk.gray(' Format:'), chalk.white(result.format))
231
257
  console.log()
232
258
  }
@@ -0,0 +1,259 @@
1
+ /**
2
+ * List backups command
3
+ *
4
+ * Scans the current directory (or specified directory) for backup files
5
+ * and displays them with metadata.
6
+ */
7
+
8
+ import { Command } from 'commander'
9
+ import { readdirSync, statSync } from 'fs'
10
+ import { join, extname } from 'path'
11
+ import chalk from 'chalk'
12
+ import { formatBytes } from '../ui/theme'
13
+
14
+ type BackupInfo = {
15
+ filename: string
16
+ path: string
17
+ size: number
18
+ modified: Date
19
+ engine: string | null
20
+ format: string
21
+ }
22
+
23
+ /**
24
+ * Detect engine and format from file extension
25
+ */
26
+ function detectBackupType(filename: string): { engine: string | null; format: string } {
27
+ const ext = extname(filename).toLowerCase()
28
+
29
+ // Check for double extensions like .sql.gz
30
+ if (filename.endsWith('.sql.gz')) {
31
+ return { engine: 'mysql', format: 'Compressed SQL' }
32
+ }
33
+
34
+ switch (ext) {
35
+ case '.sql':
36
+ // Could be PostgreSQL, MySQL, or SQLite
37
+ return { engine: null, format: 'SQL dump' }
38
+ case '.dump':
39
+ return { engine: 'postgresql', format: 'pg_dump custom' }
40
+ case '.sqlite':
41
+ case '.db':
42
+ case '.sqlite3':
43
+ return { engine: 'sqlite', format: 'Binary copy' }
44
+ case '.archive':
45
+ return { engine: 'mongodb', format: 'BSON archive' }
46
+ case '.rdb':
47
+ return { engine: 'redis', format: 'RDB snapshot' }
48
+ case '.redis':
49
+ return { engine: 'redis', format: 'Text commands' }
50
+ case '.bson':
51
+ return { engine: 'mongodb', format: 'BSON' }
52
+ default:
53
+ return { engine: null, format: 'Unknown' }
54
+ }
55
+ }
56
+
57
+ /**
58
+ * Check if a file looks like a backup file
59
+ */
60
+ function isBackupFile(filename: string): boolean {
61
+ const backupExtensions = [
62
+ '.sql',
63
+ '.dump',
64
+ '.sqlite',
65
+ '.sqlite3',
66
+ '.db',
67
+ '.archive',
68
+ '.rdb',
69
+ '.redis',
70
+ '.bson',
71
+ ]
72
+
73
+ // Check for .sql.gz
74
+ if (filename.endsWith('.sql.gz')) return true
75
+
76
+ const ext = extname(filename).toLowerCase()
77
+ return backupExtensions.includes(ext)
78
+ }
79
+
80
+ /**
81
+ * Scan directory for backup files
82
+ */
83
+ function findBackups(directory: string): BackupInfo[] {
84
+ const backups: BackupInfo[] = []
85
+
86
+ try {
87
+ const files = readdirSync(directory)
88
+
89
+ for (const file of files) {
90
+ if (!isBackupFile(file)) continue
91
+
92
+ const filePath = join(directory, file)
93
+ try {
94
+ const stats = statSync(filePath)
95
+ if (!stats.isFile()) continue
96
+
97
+ const { engine, format } = detectBackupType(file)
98
+
99
+ backups.push({
100
+ filename: file,
101
+ path: filePath,
102
+ size: stats.size,
103
+ modified: stats.mtime,
104
+ engine,
105
+ format,
106
+ })
107
+ } catch {
108
+ // Skip files we can't stat
109
+ }
110
+ }
111
+ } catch {
112
+ // Directory doesn't exist or can't be read
113
+ }
114
+
115
+ // Sort by modified date, newest first
116
+ backups.sort((a, b) => b.modified.getTime() - a.modified.getTime())
117
+
118
+ return backups
119
+ }
120
+
121
+ /**
122
+ * Format a relative time string
123
+ */
124
+ function formatRelativeTime(date: Date): string {
125
+ const now = new Date()
126
+ const diffMs = now.getTime() - date.getTime()
127
+ const diffMins = Math.floor(diffMs / 60000)
128
+ const diffHours = Math.floor(diffMs / 3600000)
129
+ const diffDays = Math.floor(diffMs / 86400000)
130
+
131
+ if (diffMins < 1) return 'just now'
132
+ if (diffMins < 60) return `${diffMins}m ago`
133
+ if (diffHours < 24) return `${diffHours}h ago`
134
+ if (diffDays < 7) return `${diffDays}d ago`
135
+
136
+ return date.toLocaleDateString()
137
+ }
138
+
139
+ /**
140
+ * Get engine icon
141
+ */
142
+ function getEngineIcon(engine: string | null): string {
143
+ switch (engine) {
144
+ case 'postgresql':
145
+ return '🐘'
146
+ case 'mysql':
147
+ return '🐬'
148
+ case 'sqlite':
149
+ return '🗄️'
150
+ case 'mongodb':
151
+ return '🍃'
152
+ case 'redis':
153
+ return '🔴'
154
+ default:
155
+ return '📦'
156
+ }
157
+ }
158
+
159
+ export const backupsCommand = new Command('backups')
160
+ .description('List backup files in current directory')
161
+ .argument('[directory]', 'Directory to scan (defaults to current directory)')
162
+ .option('-a, --all', 'Include backups from ~/.spindb/backups as well')
163
+ .option('-n, --limit <count>', 'Limit number of results', '20')
164
+ .option('-j, --json', 'Output as JSON')
165
+ .action(
166
+ async (
167
+ directory: string | undefined,
168
+ options: {
169
+ all?: boolean
170
+ limit?: string
171
+ json?: boolean
172
+ },
173
+ ) => {
174
+ const searchDirs = [directory || process.cwd()]
175
+
176
+ if (options.all) {
177
+ const homeBackups = join(
178
+ process.env.HOME || '',
179
+ '.spindb',
180
+ 'backups',
181
+ )
182
+ searchDirs.push(homeBackups)
183
+ }
184
+
185
+ const allBackups: BackupInfo[] = []
186
+
187
+ for (const dir of searchDirs) {
188
+ const backups = findBackups(dir)
189
+ allBackups.push(...backups)
190
+ }
191
+
192
+ // Sort all backups by date
193
+ allBackups.sort((a, b) => b.modified.getTime() - a.modified.getTime())
194
+
195
+ // Apply limit
196
+ const limit = parseInt(options.limit || '20', 10)
197
+ const limitedBackups = allBackups.slice(0, limit)
198
+
199
+ if (options.json) {
200
+ console.log(
201
+ JSON.stringify(
202
+ limitedBackups.map((b) => ({
203
+ filename: b.filename,
204
+ path: b.path,
205
+ size: b.size,
206
+ modified: b.modified.toISOString(),
207
+ engine: b.engine,
208
+ format: b.format,
209
+ })),
210
+ null,
211
+ 2,
212
+ ),
213
+ )
214
+ return
215
+ }
216
+
217
+ if (limitedBackups.length === 0) {
218
+ console.log()
219
+ console.log(chalk.gray(' No backup files found'))
220
+ console.log()
221
+ console.log(chalk.gray(' Backup files are identified by extensions:'))
222
+ console.log(chalk.gray(' .sql, .dump, .sqlite, .archive, .rdb, .sql.gz'))
223
+ console.log()
224
+ return
225
+ }
226
+
227
+ console.log()
228
+ console.log(chalk.bold(` Found ${allBackups.length} backup(s)`))
229
+ if (allBackups.length > limit) {
230
+ console.log(chalk.gray(` (showing ${limit} most recent)`))
231
+ }
232
+ console.log()
233
+
234
+ // Calculate column widths
235
+ const maxFilename = Math.min(
236
+ 50,
237
+ Math.max(...limitedBackups.map((b) => b.filename.length)),
238
+ )
239
+
240
+ for (const backup of limitedBackups) {
241
+ const icon = getEngineIcon(backup.engine)
242
+ const filename =
243
+ backup.filename.length > maxFilename
244
+ ? backup.filename.slice(0, maxFilename - 3) + '...'
245
+ : backup.filename.padEnd(maxFilename)
246
+
247
+ const size = formatBytes(backup.size).padStart(10)
248
+ const time = formatRelativeTime(backup.modified).padStart(10)
249
+ const format = chalk.gray(backup.format)
250
+
251
+ console.log(` ${icon} ${chalk.cyan(filename)} ${chalk.white(size)} ${chalk.gray(time)} ${format}`)
252
+ }
253
+
254
+ console.log()
255
+ console.log(chalk.gray(' Restore with:'))
256
+ console.log(chalk.cyan(' spindb restore <container> <backup-file>'))
257
+ console.log()
258
+ },
259
+ )
@@ -143,7 +143,6 @@ async function createSqliteContainer(
143
143
  console.log(chalk.cyan(` ${connectionString}`))
144
144
  console.log()
145
145
 
146
- // Connect if requested
147
146
  if (connect) {
148
147
  const config = await containerManager.getConfig(containerName)
149
148
  if (config) {
@@ -445,26 +444,26 @@ export const createCommand = new Command('create')
445
444
  depsSpinner.succeed('Required tools available')
446
445
  }
447
446
 
448
- // For MySQL (and other non-PostgreSQL server DBs), download binaries after dep check
447
+ // For MySQL, MongoDB, Redis (system-installed engines), validate version and get binary path
448
+ // Store the binary path for version consistency
449
+ let binaryPath: string | undefined
449
450
  if (!isPostgreSQL) {
450
451
  const binarySpinner = createSpinner(
451
452
  `Checking ${dbEngine.displayName} ${version} binaries...`,
452
453
  )
453
454
  binarySpinner.start()
454
455
 
455
- const isInstalled = await dbEngine.isBinaryInstalled(version)
456
- if (isInstalled) {
457
- binarySpinner.succeed(
458
- `${dbEngine.displayName} ${version} binaries ready (cached)`,
459
- )
460
- } else {
461
- binarySpinner.text = `Downloading ${dbEngine.displayName} ${version} binaries...`
462
- await dbEngine.ensureBinaries(version, ({ message }) => {
456
+ try {
457
+ // ensureBinaries validates the version and returns the binary path
458
+ binaryPath = await dbEngine.ensureBinaries(version, ({ message }) => {
463
459
  binarySpinner.text = message
464
460
  })
465
461
  binarySpinner.succeed(
466
- `${dbEngine.displayName} ${version} binaries downloaded`,
462
+ `${dbEngine.displayName} ${version} binaries ready`,
467
463
  )
464
+ } catch (error) {
465
+ binarySpinner.fail(`${dbEngine.displayName} ${version} not available`)
466
+ throw error
468
467
  }
469
468
  }
470
469
 
@@ -486,6 +485,7 @@ export const createCommand = new Command('create')
486
485
  version,
487
486
  port,
488
487
  database,
488
+ binaryPath,
489
489
  })
490
490
 
491
491
  tx.addRollback({