@sqldoc/db 0.0.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,116 @@
1
+ /**
2
+ * PostgreSQL extension detection and validation.
3
+ *
4
+ * Extracts CREATE EXTENSION names from SQL, validates against pglite
5
+ * or real Postgres, and provides helpful error messages.
6
+ */
7
+ import { createRequire } from 'node:module'
8
+ import pc from 'picocolors'
9
+
10
+ /** Regex to extract extension names from CREATE EXTENSION statements */
11
+ const CREATE_EXT_RE = /CREATE\s+EXTENSION\s+(?:IF\s+NOT\s+EXISTS\s+)?(?:"([^"]+)"|(\w[\w-]*))/gi
12
+
13
+ /**
14
+ * Extract extension names from SQL strings.
15
+ * Handles both quoted and unquoted names, and hyphenated names like uuid-ossp.
16
+ */
17
+ export function extractExtensions(
18
+ sqlFiles: string[],
19
+ filePaths?: string[],
20
+ ): { extensions: string[]; byFile: Map<string, string[]> } {
21
+ const all = new Set<string>()
22
+ const byFile = new Map<string, string[]>()
23
+ for (let i = 0; i < sqlFiles.length; i++) {
24
+ const sql = sqlFiles[i]
25
+ const file = filePaths?.[i] ?? `file${i}`
26
+ const lines = sql.split('\n')
27
+ for (const line of lines) {
28
+ // Skip commented-out lines
29
+ if (line.trimStart().startsWith('--')) continue
30
+ let match
31
+ CREATE_EXT_RE.lastIndex = 0
32
+ while ((match = CREATE_EXT_RE.exec(line)) !== null) {
33
+ const name = (match[1] ?? match[2]).toLowerCase().replace(/-/g, '_')
34
+ all.add(name)
35
+ const existing = byFile.get(name) ?? []
36
+ existing.push(file)
37
+ byFile.set(name, existing)
38
+ }
39
+ }
40
+ }
41
+ return { extensions: [...all], byFile }
42
+ }
43
+
44
+ /**
45
+ * Validate extensions for pglite by attempting to import them.
46
+ * Returns the list of extensions that are available.
47
+ * Throws with a pretty error if any are not available.
48
+ */
49
+ export async function validatePgliteExtensions(requested: string[]): Promise<string[]> {
50
+ if (requested.length === 0) return []
51
+
52
+ // Resolve from this package's directory (pglite is a dep of @sqldoc/db)
53
+ const req = createRequire(import.meta.url)
54
+
55
+ const results: Array<{ name: string; available: boolean }> = []
56
+
57
+ for (const ext of requested) {
58
+ let found = false
59
+ try {
60
+ req.resolve(`@electric-sql/pglite/contrib/${ext}`)
61
+ found = true
62
+ } catch {}
63
+ if (!found)
64
+ try {
65
+ req.resolve(`@electric-sql/pglite/${ext}`)
66
+ found = true
67
+ } catch {}
68
+ results.push({ name: ext, available: found })
69
+ }
70
+
71
+ const unavailable = results.filter((r) => !r.available)
72
+ if (unavailable.length > 0) {
73
+ const lines = results.map((r) => (r.available ? pc.green(` ✓ ${r.name}`) : pc.red(` ✗ ${r.name}`)))
74
+
75
+ throw new Error(
76
+ `Some extensions are not available for the embedded postgres database:\n${lines.join('\n')}\n\n` +
77
+ `Use a Docker image with these extensions installed as devUrl:\n` +
78
+ ` ${pc.cyan('docker://<image>')} — use an image that includes the extension\n` +
79
+ ` ${pc.cyan('dockerfile://path')} — build a custom Dockerfile with the extension`,
80
+ )
81
+ }
82
+
83
+ return requested
84
+ }
85
+
86
+ /**
87
+ * Validate extensions against a real Postgres database.
88
+ * Queries pg_available_extensions to check availability.
89
+ */
90
+ export async function validatePostgresExtensions(
91
+ requested: string[],
92
+ queryFn: (sql: string) => Promise<{ rows: unknown[][] }>,
93
+ ): Promise<void> {
94
+ if (requested.length === 0) return
95
+
96
+ const result = await queryFn(
97
+ 'SELECT name FROM pg_available_extensions WHERE name = ANY(ARRAY[' +
98
+ requested.map((e) => `'${e}'`).join(',') +
99
+ '])',
100
+ )
101
+
102
+ const available = new Set(result.rows.map((r) => String(r[0])))
103
+
104
+ const missing = requested.filter((e) => !available.has(e))
105
+ if (missing.length > 0) {
106
+ const lines = requested.map((ext) => {
107
+ const ok = available.has(ext)
108
+ return ok ? pc.green(` ✓ ${ext}`) : pc.red(` ✗ ${ext}`)
109
+ })
110
+
111
+ throw new Error(
112
+ `Some required extensions are not available on this database:\n${lines.join('\n')}\n\n` +
113
+ `Install the missing extensions or use a Docker image that includes them.`,
114
+ )
115
+ }
116
+ }
package/src/index.ts ADDED
@@ -0,0 +1,122 @@
1
+ // @sqldoc/db -- Atlas WASI integration for sqldoc
2
+ // Schema types, database adapters, and WASI runner
3
+
4
+ import * as fs from 'node:fs'
5
+ import * as path from 'node:path'
6
+ import { fileURLToPath } from 'node:url'
7
+ import { createMysqlAdapter } from './db/mysql.ts'
8
+ import { createMysqlDockerAdapter } from './db/mysql-docker.ts'
9
+ import { createPgliteAdapter } from './db/pglite.ts'
10
+ import { createPostgresAdapter } from './db/postgres.ts'
11
+ import { createPostgresDockerAdapter } from './db/postgres-docker.ts'
12
+ import { createSqliteAdapter } from './db/sqlite.ts'
13
+ import { validatePgliteExtensions, validatePostgresExtensions } from './extensions.ts'
14
+ import { createAtlasRunner } from './runner.ts'
15
+
16
+ export { createMysqlAdapter } from './db/mysql.ts'
17
+ export { createMysqlDockerAdapter } from './db/mysql-docker.ts'
18
+ export { createPgliteAdapter } from './db/pglite.ts'
19
+ export { createPostgresAdapter } from './db/postgres.ts'
20
+ export { createPostgresDockerAdapter } from './db/postgres-docker.ts'
21
+ export { createSqliteAdapter } from './db/sqlite.ts'
22
+ export type { DatabaseAdapter, ExecResult, QueryResult } from './db/types.ts'
23
+ export { extractExtensions, validatePgliteExtensions, validatePostgresExtensions } from './extensions.ts'
24
+ export type { AtlasRunner, AtlasRunnerOptions, DiffSource } from './runner.ts'
25
+ export { createAtlasRunner } from './runner.ts'
26
+ export * from './types.ts'
27
+
28
+ export interface CreateRunnerConfig {
29
+ /** SQL dialect (required) */
30
+ dialect: 'postgres' | 'mysql' | 'sqlite'
31
+ /** Database connection URL. If omitted, uses dialect-specific default. */
32
+ devUrl?: string
33
+ /** Postgres extensions to load. Validated against the dev database; loaded automatically in PGlite. */
34
+ extensions?: string[]
35
+ }
36
+
37
+ /**
38
+ * Resolve the atlas.wasm binary.
39
+ * Binary mode: ATLAS_WASM_PATH env var set by binary entry point.
40
+ * Dev mode: walk up from current directory to find atlas.wasm.
41
+ */
42
+ function resolveWasm(): string {
43
+ // Binary mode: WASM path set by binary entry point
44
+ if (process.env.ATLAS_WASM_PATH) {
45
+ return process.env.ATLAS_WASM_PATH
46
+ }
47
+
48
+ // Dev mode: walk up from current directory to find atlas.wasm
49
+ let dir = path.dirname(fileURLToPath(import.meta.url))
50
+ while (true) {
51
+ for (const candidate of [
52
+ path.join(dir, 'wasm', 'atlas.wasm'),
53
+ path.join(dir, '..', 'wasm', 'atlas.wasm'),
54
+ path.join(dir, 'node_modules', '@sqldoc', 'db', 'wasm', 'atlas.wasm'),
55
+ path.join(dir, 'packages', 'db', 'wasm', 'atlas.wasm'),
56
+ ]) {
57
+ if (fs.existsSync(candidate)) return candidate
58
+ }
59
+ const parent = path.dirname(dir)
60
+ if (parent === dir) break
61
+ dir = parent
62
+ }
63
+ throw new Error(
64
+ 'atlas.wasm not found. Set ATLAS_WASM_PATH or build: cd atlas/cmd/atlas-wasi && GOOS=wasip1 GOARCH=wasm go build -o atlas.wasm .',
65
+ )
66
+ }
67
+
68
+ /** Return the default dev database URL for each dialect */
69
+ function defaultDevUrl(dialect: 'postgres' | 'mysql' | 'sqlite'): string {
70
+ switch (dialect) {
71
+ case 'postgres':
72
+ return 'pglite'
73
+ case 'sqlite':
74
+ return ':memory:'
75
+ case 'mysql':
76
+ return 'docker://mysql:8'
77
+ }
78
+ }
79
+
80
+ /**
81
+ * Create an Atlas runner with sensible defaults.
82
+ * Resolves the wasm binary, detects extensions from SQL files,
83
+ * validates them, and creates the appropriate DB adapter.
84
+ */
85
+ export async function createRunner(config: CreateRunnerConfig): Promise<import('./runner').AtlasRunner> {
86
+ const wasmPath = resolveWasm()
87
+ const dialect = config.dialect
88
+ const devUrl = config.devUrl ?? defaultDevUrl(dialect)
89
+
90
+ const extensions = dialect === 'postgres' ? (config.extensions ?? []) : []
91
+
92
+ let db: import('./db/types').DatabaseAdapter
93
+
94
+ if (dialect === 'sqlite') {
95
+ db = await createSqliteAdapter(devUrl)
96
+ } else if (dialect === 'mysql') {
97
+ if (devUrl.startsWith('docker://')) {
98
+ db = await createMysqlDockerAdapter(devUrl)
99
+ } else {
100
+ db = await createMysqlAdapter(devUrl)
101
+ }
102
+ } else {
103
+ // postgres (existing logic preserved exactly)
104
+ if (devUrl.startsWith('docker://') || devUrl.startsWith('dockerfile://')) {
105
+ db = await createPostgresDockerAdapter(devUrl)
106
+ if (extensions.length > 0) {
107
+ await validatePostgresExtensions(extensions, (sql) => db.query(sql))
108
+ }
109
+ } else if (devUrl.startsWith('postgres://') || devUrl.startsWith('postgresql://')) {
110
+ db = await createPostgresAdapter(devUrl)
111
+ if (extensions.length > 0) {
112
+ await validatePostgresExtensions(extensions, (sql) => db.query(sql))
113
+ }
114
+ } else {
115
+ // pglite (in-memory embedded postgres)
116
+ const validExtensions = extensions.length > 0 ? await validatePgliteExtensions(extensions) : []
117
+ db = await createPgliteAdapter(validExtensions.length > 0 ? validExtensions : undefined)
118
+ }
119
+ }
120
+
121
+ return createAtlasRunner({ wasmPath, db, dialect })
122
+ }
package/src/runner.ts ADDED
@@ -0,0 +1,284 @@
1
+ /**
2
+ * High-level Atlas runner API.
3
+ *
4
+ * Provides inspect() and diff() functions that:
5
+ * 1. Spawn a worker thread running the Atlas WASI module
6
+ * 2. Handle the SharedArrayBuffer bridge loop on the main thread
7
+ * 3. Execute SQL requests from the WASI module via the DatabaseAdapter
8
+ * 4. Return parsed AtlasResult
9
+ *
10
+ * The compiled WebAssembly.Module is cached across commands.
11
+ */
12
+
13
+ import * as fs from 'node:fs'
14
+ import { createRequire } from 'node:module'
15
+ import * as path from 'node:path'
16
+ import { fileURLToPath } from 'node:url'
17
+ import { Worker } from 'node:worker_threads'
18
+ import {
19
+ type BridgeBuffers,
20
+ bridgeReadRequest,
21
+ bridgeRespond,
22
+ bridgeWaitForSignal,
23
+ createBridgeBuffers,
24
+ SIGNAL_DONE,
25
+ SIGNAL_REQUEST,
26
+ } from './bridge.ts'
27
+ import type { DatabaseAdapter } from './db/types.ts'
28
+ import type { AtlasCommand, AtlasRename, AtlasResult } from './types.ts'
29
+
30
+ export interface AtlasRunnerOptions {
31
+ /** Path to atlas.wasm binary */
32
+ wasmPath: string
33
+ /** Database adapter (pglite or pg) */
34
+ db: DatabaseAdapter
35
+ /** SQL dialect */
36
+ dialect: 'postgres' | 'mysql' | 'sqlite'
37
+ }
38
+
39
+ /** A diff source: SQL file contents (string[]) or a live database (DatabaseAdapter). */
40
+ export type DiffSource = string[] | DatabaseAdapter
41
+
42
+ export interface AtlasRunner {
43
+ /** Execute SQL files and return parsed schema with tags */
44
+ inspect(files: string[], options?: { schema?: string; fileNames?: string[] }): Promise<AtlasResult>
45
+
46
+ /** Compare two schema states. Each side can be SQL or a live database.
47
+ * Live databases are only inspected, never written to. */
48
+ diff(from: DiffSource, to: DiffSource, options?: { schema?: string; renames?: AtlasRename[] }): Promise<AtlasResult>
49
+
50
+ /** Clean up resources */
51
+ close(): Promise<void>
52
+ }
53
+
54
+ /**
55
+ * Resolve the path to the worker file (.js or .ts).
56
+ *
57
+ * Prefers compiled .js (production / Bun binary). Falls back to raw .ts
58
+ * for workspace development (vitest, tsx, Node --experimental-strip-types).
59
+ */
60
+ function resolveWorkerPath(): { workerPath: string; execArgv: string[] } {
61
+ const thisDir = path.dirname(fileURLToPath(import.meta.url))
62
+ const candidates = [
63
+ path.resolve(thisDir, 'worker.js'), // dist/worker.js (production)
64
+ path.resolve(thisDir, '../dist/worker.js'), // src/../dist/worker.js (legacy)
65
+ path.resolve(thisDir, 'worker.ts'), // src/worker.ts (raw .ts dev)
66
+ ]
67
+
68
+ const isBun = typeof (globalThis as any).Bun !== 'undefined'
69
+
70
+ for (const candidate of candidates) {
71
+ if (fs.existsSync(candidate)) {
72
+ if (candidate.endsWith('.ts') && !isBun) {
73
+ // Node 22.21+ runs .ts natively — no loader needed for workers
74
+ // Older Node needs tsx/cjs to register TypeScript transform
75
+ const [major, minor] = (process.versions?.node ?? '0.0').split('.').map(Number)
76
+ if (major > 22 || (major === 22 && minor >= 21)) {
77
+ return { workerPath: candidate, execArgv: [] }
78
+ }
79
+ const atlasRequire = createRequire(candidate)
80
+ const tsxCjs = atlasRequire.resolve('tsx/cjs')
81
+ return { workerPath: candidate, execArgv: ['--require', tsxCjs] }
82
+ }
83
+ return { workerPath: candidate, execArgv: [] }
84
+ }
85
+ }
86
+
87
+ throw new Error(`Cannot find worker file.\nLooked in:\n${candidates.map((c) => ` ${c}`).join('\n')}`)
88
+ }
89
+
90
+ /**
91
+ * Run a single Atlas command via a worker thread.
92
+ *
93
+ * Main thread bridge loop:
94
+ * 1. Wait for worker to signal a request via Atomics
95
+ * 2. Read SQL from shared buffer
96
+ * 3. Execute via DatabaseAdapter
97
+ * 4. Write response to shared buffer
98
+ * 5. Repeat until DONE signal
99
+ */
100
+ async function runCommand(
101
+ wasmPath: string,
102
+ db: DatabaseAdapter,
103
+ command: AtlasCommand,
104
+ extraAdapters?: Record<string, DatabaseAdapter>,
105
+ ): Promise<AtlasResult> {
106
+ const buffers = createBridgeBuffers()
107
+ const stdinData = JSON.stringify(command)
108
+ const { workerPath, execArgv } = resolveWorkerPath()
109
+
110
+ return new Promise<AtlasResult>((resolve, reject) => {
111
+ const worker = new Worker(workerPath, {
112
+ workerData: {
113
+ wasmPath,
114
+ controlBuffer: buffers.control,
115
+ dataBuffer: buffers.data,
116
+ stdinData,
117
+ },
118
+ execArgv,
119
+ })
120
+
121
+ let settled = false
122
+
123
+ worker.on('message', (msg: { type: string; stdout?: string; error?: string }) => {
124
+ if (settled) return
125
+ settled = true
126
+
127
+ if (msg.type === 'error') {
128
+ reject(new Error(`Atlas worker error: ${msg.error}`))
129
+ } else if (msg.type === 'result') {
130
+ try {
131
+ const stdout = (msg.stdout ?? '').trim()
132
+ if (!stdout) {
133
+ resolve({})
134
+ } else {
135
+ resolve(JSON.parse(stdout))
136
+ }
137
+ } catch (_err: unknown) {
138
+ reject(new Error(`Failed to parse Atlas output: ${msg.stdout}`))
139
+ }
140
+ }
141
+ })
142
+
143
+ worker.on('error', (err) => {
144
+ if (settled) return
145
+ settled = true
146
+ reject(err)
147
+ })
148
+
149
+ worker.on('exit', (code) => {
150
+ if (settled) return
151
+ settled = true
152
+ if (code !== 0) {
153
+ reject(new Error(`Atlas worker exited with code ${code}`))
154
+ }
155
+ })
156
+
157
+ // Start the bridge loop (runs on main thread, async)
158
+ handleBridgeLoop(buffers, db, extraAdapters).catch((err) => {
159
+ if (!settled) {
160
+ settled = true
161
+ worker.terminate()
162
+ reject(err)
163
+ }
164
+ })
165
+ })
166
+ }
167
+
168
+ /**
169
+ * Main-thread bridge loop: handles atlas_sql requests from the worker.
170
+ * Runs until the worker signals DONE.
171
+ */
172
+ async function handleBridgeLoop(
173
+ buffers: BridgeBuffers,
174
+ db: DatabaseAdapter,
175
+ extraAdapters?: Record<string, DatabaseAdapter>,
176
+ ): Promise<void> {
177
+ while (true) {
178
+ const signal = await bridgeWaitForSignal(buffers)
179
+
180
+ if (signal === SIGNAL_DONE) {
181
+ break
182
+ }
183
+
184
+ if (signal !== SIGNAL_REQUEST) {
185
+ // Unexpected signal -- wait again
186
+ // Reset to idle so bridgeWaitForSignal can poll again
187
+ const _control = new Int32Array(buffers.control)
188
+ // If it's RESPONSE, the worker hasn't reset yet. Wait briefly.
189
+ await new Promise((r) => setTimeout(r, 1))
190
+ continue
191
+ }
192
+
193
+ // Read SQL request from shared buffer
194
+ const reqJson = bridgeReadRequest(buffers)
195
+
196
+ let response: Record<string, unknown>
197
+ try {
198
+ const req = JSON.parse(reqJson) as { type: string; sql: string; args?: unknown[]; connection?: string }
199
+
200
+ // Route to the right adapter based on connection field
201
+ const adapter =
202
+ req.connection && req.connection !== 'dev' && extraAdapters?.[req.connection]
203
+ ? extraAdapters[req.connection]
204
+ : db
205
+
206
+ if (req.type === 'query') {
207
+ const result = await adapter.query(req.sql, req.args)
208
+ response = { columns: result.columns, rows: result.rows }
209
+ } else {
210
+ const result = await adapter.exec(req.sql, req.args)
211
+ response = { rows_affected: result.rowsAffected }
212
+ }
213
+ } catch (err: unknown) {
214
+ const message = err instanceof Error ? err.message : String(err)
215
+ response = { error: message }
216
+ }
217
+
218
+ // Write response and notify worker (handle BigInt from pglite)
219
+ bridgeRespond(
220
+ buffers,
221
+ JSON.stringify(response, (_k, v) => (typeof v === 'bigint' ? v.toString() : v)),
222
+ )
223
+
224
+ // After responding, the worker will reset signal to IDLE.
225
+ // We need to wait for it to do so before calling bridgeWaitForSignal again.
226
+ // Small yield to allow worker to process.
227
+ await new Promise((r) => setTimeout(r, 0))
228
+ }
229
+ }
230
+
231
+ /**
232
+ * Create an AtlasRunner instance.
233
+ *
234
+ * The runner caches the compiled WebAssembly.Module (compiles atlas.wasm once).
235
+ * Each inspect/diff call spawns a worker thread that reuses the cached module.
236
+ */
237
+ export async function createAtlasRunner(options: AtlasRunnerOptions): Promise<AtlasRunner> {
238
+ const { wasmPath, db, dialect } = options
239
+
240
+ // Verify wasm file exists
241
+ if (!fs.existsSync(wasmPath)) {
242
+ throw new Error(`Atlas WASM binary not found: ${wasmPath}`)
243
+ }
244
+
245
+ return {
246
+ async inspect(files: string[], opts?: { schema?: string; fileNames?: string[] }): Promise<AtlasResult> {
247
+ const command: AtlasCommand = {
248
+ type: 'inspect',
249
+ dialect,
250
+ files,
251
+ fileNames: opts?.fileNames,
252
+ schema: opts?.schema,
253
+ }
254
+ return runCommand(wasmPath, db, command)
255
+ },
256
+
257
+ async diff(
258
+ from: DiffSource,
259
+ to: DiffSource,
260
+ opts?: { schema?: string; renames?: AtlasRename[] },
261
+ ): Promise<AtlasResult> {
262
+ const fromIsDb = !Array.isArray(from)
263
+ const toIsDb = !Array.isArray(to)
264
+ const command: AtlasCommand = {
265
+ type: 'diff',
266
+ dialect,
267
+ from: fromIsDb ? [] : from,
268
+ to: toIsDb ? [] : to,
269
+ schema: opts?.schema,
270
+ renames: opts?.renames,
271
+ fromConnection: fromIsDb ? 'from' : undefined,
272
+ toConnection: toIsDb ? 'to' : undefined,
273
+ }
274
+ const extraAdapters: Record<string, DatabaseAdapter> = {}
275
+ if (fromIsDb) extraAdapters.from = from as DatabaseAdapter
276
+ if (toIsDb) extraAdapters.to = to as DatabaseAdapter
277
+ return runCommand(wasmPath, db, command, extraAdapters)
278
+ },
279
+
280
+ async close(): Promise<void> {
281
+ await db.close()
282
+ },
283
+ }
284
+ }