@sqldoc/db 0.0.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json ADDED
@@ -0,0 +1,41 @@
1
+ {
2
+ "type": "module",
3
+ "name": "@sqldoc/db",
4
+ "version": "0.0.3",
5
+ "description": "Atlas WASI integration for sqldoc -- schema types, database adapters, WASI runner",
6
+ "exports": {
7
+ ".": {
8
+ "types": "./src/index.ts",
9
+ "import": "./src/index.ts",
10
+ "default": "./src/index.ts"
11
+ }
12
+ },
13
+ "main": "./src/index.ts",
14
+ "types": "./src/index.ts",
15
+ "files": [
16
+ "src",
17
+ "!src/__tests__",
18
+ "wasm",
19
+ "package.json"
20
+ ],
21
+ "scripts": {
22
+ "test": "node --test 'src/__tests__/**/*.test.ts'",
23
+ "typecheck": "tsc --noEmit",
24
+ "build:wasm": "cd ../../../atlastest/atlas/cmd/atlas-wasi && GOOS=wasip1 GOARCH=wasm go build -o atlas.wasm . && cp atlas.wasm ../../../../sqldoc/packages/db/wasm/atlas.wasm",
25
+ "codegen:schema": "echo 'Types hand-crafted from atlas/sql/schema/schema.go -- see src/types.ts. tygo cannot handle Go interfaces (Attr, Type, Expr). Re-read schema.go and update src/types.ts manually when the Go source changes.'"
26
+ },
27
+ "dependencies": {
28
+ "@electric-sql/pglite": "^0.4.0",
29
+ "@testcontainers/postgresql": "^11.13.0",
30
+ "mysql2": "^3.20.0",
31
+ "pg": "^8.13.0",
32
+ "picocolors": "catalog:",
33
+ "testcontainers": "^11.13.0"
34
+ },
35
+ "devDependencies": {
36
+ "@types/pg": "^8.11.0",
37
+ "tsx": "^4.21.0",
38
+ "typescript": "catalog:",
39
+ "@sqldoc/test-utils": "workspace:*"
40
+ }
41
+ }
package/src/bridge.ts ADDED
@@ -0,0 +1,152 @@
1
+ /**
2
+ * Sync/async bridge for atlas_sql host function.
3
+ *
4
+ * Architecture:
5
+ * - Worker thread runs WASI module, calls atlas_sql synchronously
6
+ * - atlas_sql writes SQL request to shared buffer, signals main thread, blocks via Atomics.wait
7
+ * - Main thread receives request, runs async DB query, writes response, signals via Atomics.notify
8
+ *
9
+ * Control buffer layout (Int32Array over SharedArrayBuffer):
10
+ * [0] = signal: 0=idle, 1=request_ready, 2=response_ready, 3=done
11
+ * [1] = data length (request or response)
12
+ */
13
+
14
+ // Signal constants
15
+ export const SIGNAL_IDLE = 0
16
+ export const SIGNAL_REQUEST = 1
17
+ export const SIGNAL_RESPONSE = 2
18
+ export const SIGNAL_DONE = 3
19
+
20
+ /**
21
+ * Shared buffers passed between main and worker thread.
22
+ */
23
+ export interface BridgeBuffers {
24
+ /** 8 bytes: signal (Int32) + data length (Int32) */
25
+ control: SharedArrayBuffer
26
+ /** Data exchange buffer (default 1MB, resizable) */
27
+ data: SharedArrayBuffer
28
+ }
29
+
30
+ /**
31
+ * Create shared buffers for bridge communication.
32
+ * @param dataSize Initial data buffer size in bytes (default 1MB)
33
+ */
34
+ export function createBridgeBuffers(dataSize = 1024 * 1024): BridgeBuffers {
35
+ return {
36
+ control: new SharedArrayBuffer(8),
37
+ data: new SharedArrayBuffer(dataSize),
38
+ }
39
+ }
40
+
41
+ // ── Worker-side (synchronous) ───────────────────────────────────────
42
+
43
+ /**
44
+ * Write a request to the shared buffer and block until a response is available.
45
+ * Called from the worker thread inside the atlas_sql host function.
46
+ *
47
+ * @returns Response JSON string
48
+ */
49
+ export function bridgeRequest(buffers: BridgeBuffers, requestJson: string): string {
50
+ const control = new Int32Array(buffers.control)
51
+ const encoder = new TextEncoder()
52
+ const decoder = new TextDecoder()
53
+
54
+ // Write request to data buffer
55
+ const encoded = encoder.encode(requestJson)
56
+ if (encoded.byteLength > buffers.data.byteLength) {
57
+ throw new Error(`Bridge request too large: ${encoded.byteLength} bytes > ${buffers.data.byteLength} buffer`)
58
+ }
59
+ new Uint8Array(buffers.data).set(encoded)
60
+
61
+ // Store length and signal request ready
62
+ Atomics.store(control, 1, encoded.byteLength)
63
+ Atomics.store(control, 0, SIGNAL_REQUEST)
64
+ Atomics.notify(control, 0)
65
+
66
+ // Block until response is ready (value changes from SIGNAL_REQUEST)
67
+ Atomics.wait(control, 0, SIGNAL_REQUEST)
68
+
69
+ // Read response
70
+ const signal = Atomics.load(control, 0)
71
+ if (signal === SIGNAL_DONE) {
72
+ throw new Error('Bridge terminated while waiting for response')
73
+ }
74
+
75
+ const respLen = Atomics.load(control, 1)
76
+ const respBytes = new Uint8Array(buffers.data, 0, respLen)
77
+ const response = decoder.decode(respBytes.slice())
78
+
79
+ // Reset signal to idle for next round-trip
80
+ Atomics.store(control, 0, SIGNAL_IDLE)
81
+
82
+ return response
83
+ }
84
+
85
+ // ── Main-thread side (asynchronous) ─────────────────────────────────
86
+
87
+ /**
88
+ * Wait for the worker to post a request. Uses Atomics.waitAsync
89
+ * (non-blocking on the main thread).
90
+ *
91
+ * @returns The current signal value after waking
92
+ */
93
+ export async function bridgeWaitForSignal(buffers: BridgeBuffers): Promise<number> {
94
+ const control = new Int32Array(buffers.control)
95
+
96
+ // If signal is already non-idle, return immediately
97
+ const current = Atomics.load(control, 0)
98
+ if (current !== SIGNAL_IDLE) {
99
+ return current
100
+ }
101
+
102
+ // Wait asynchronously for signal to change from IDLE
103
+ const result = Atomics.waitAsync(control, 0, SIGNAL_IDLE)
104
+ if (result.async) {
105
+ await result.value
106
+ }
107
+ return Atomics.load(control, 0)
108
+ }
109
+
110
+ /**
111
+ * Read the request JSON from the shared data buffer.
112
+ * Call after bridgeWaitForSignal returns SIGNAL_REQUEST.
113
+ */
114
+ export function bridgeReadRequest(buffers: BridgeBuffers): string {
115
+ const control = new Int32Array(buffers.control)
116
+ const reqLen = Atomics.load(control, 1)
117
+ const reqBytes = new Uint8Array(buffers.data, 0, reqLen)
118
+ return new TextDecoder().decode(reqBytes.slice())
119
+ }
120
+
121
+ /**
122
+ * Write a response to the shared buffer and notify the worker.
123
+ */
124
+ export function bridgeRespond(buffers: BridgeBuffers, responseJson: string): void {
125
+ const control = new Int32Array(buffers.control)
126
+ const encoded = new TextEncoder().encode(responseJson)
127
+
128
+ if (encoded.byteLength > buffers.data.byteLength) {
129
+ // Write an error response instead -- the data buffer is too small
130
+ const errorResp = JSON.stringify({
131
+ error: `Response too large: ${encoded.byteLength} bytes exceeds ${buffers.data.byteLength} byte buffer`,
132
+ })
133
+ const errorEncoded = new TextEncoder().encode(errorResp)
134
+ new Uint8Array(buffers.data).set(errorEncoded)
135
+ Atomics.store(control, 1, errorEncoded.byteLength)
136
+ } else {
137
+ new Uint8Array(buffers.data).set(encoded)
138
+ Atomics.store(control, 1, encoded.byteLength)
139
+ }
140
+
141
+ Atomics.store(control, 0, SIGNAL_RESPONSE)
142
+ Atomics.notify(control, 0)
143
+ }
144
+
145
+ /**
146
+ * Signal the worker that processing is done (no more commands).
147
+ */
148
+ export function bridgeSignalDone(buffers: BridgeBuffers): void {
149
+ const control = new Int32Array(buffers.control)
150
+ Atomics.store(control, 0, SIGNAL_DONE)
151
+ Atomics.notify(control, 0)
152
+ }
@@ -0,0 +1,54 @@
1
+ /**
2
+ * MySQL Docker adapter using testcontainers GenericContainer.
3
+ * Uses log-based wait strategy (same as Postgres adapter) for Bun compatibility —
4
+ * MySqlContainer's built-in health check hangs in Bun's runtime.
5
+ */
6
+ import { GenericContainer, Wait } from 'testcontainers'
7
+ import { createMysqlAdapter } from './mysql.ts'
8
+ import type { DatabaseAdapter } from './types.ts'
9
+
10
+ /**
11
+ * Create a Docker-based MySQL DatabaseAdapter using testcontainers.
12
+ * Supports: docker://mysql:8, docker://mysql:8.0, docker://mariadb:10, etc.
13
+ */
14
+ export async function createMysqlDockerAdapter(devUrl: string): Promise<DatabaseAdapter> {
15
+ const imageName = devUrl.slice('docker://'.length)
16
+
17
+ const container = await new GenericContainer(imageName)
18
+ .withEnvironment({
19
+ MYSQL_ROOT_PASSWORD: 'sqldoc',
20
+ MYSQL_DATABASE: 'sqldoc_dev',
21
+ })
22
+ .withExposedPorts(3306)
23
+ .withWaitStrategy(Wait.forLogMessage(/ready for connections.*port: 3306/, 2))
24
+ .start()
25
+
26
+ const host = container.getHost()
27
+ const port = container.getMappedPort(3306)
28
+ const connectionUri = `mysql://root:sqldoc@${host}:${port}/sqldoc_dev`
29
+
30
+ // Retry connection — container may need a moment after port is mapped
31
+ let mysqlAdapter: DatabaseAdapter | undefined
32
+ for (let i = 0; i < 10; i++) {
33
+ try {
34
+ mysqlAdapter = await createMysqlAdapter(connectionUri)
35
+ break
36
+ } catch {
37
+ await new Promise((resolve) => setTimeout(resolve, 1000))
38
+ }
39
+ }
40
+
41
+ if (!mysqlAdapter) {
42
+ await container.stop()
43
+ throw new Error(`Failed to connect to Docker MySQL at ${connectionUri}`)
44
+ }
45
+
46
+ return {
47
+ query: mysqlAdapter.query,
48
+ exec: mysqlAdapter.exec,
49
+ async close() {
50
+ await mysqlAdapter.close()
51
+ await container.stop()
52
+ },
53
+ }
54
+ }
@@ -0,0 +1,44 @@
1
+ /**
2
+ * MySQL DatabaseAdapter using mysql2/promise.
3
+ * Single connection (no pooling) -- sufficient for schema inspection.
4
+ */
5
+ import mysql from 'mysql2/promise'
6
+ import type { DatabaseAdapter, ExecResult, QueryResult } from './types.ts'
7
+
8
+ /**
9
+ * Normalize row values to types the Go database/sql driver can scan.
10
+ * mysql2 returns Date objects for datetime, Buffer for binary, BigInt for BIGINT.
11
+ */
12
+ function normalizeValue(val: unknown): unknown {
13
+ if (val === null || val === undefined) return null
14
+ if (typeof val === 'bigint') return val.toString()
15
+ if (val instanceof Date) return val.toISOString()
16
+ if (Buffer.isBuffer(val)) return val.toString('hex')
17
+ if (typeof val === 'object') return JSON.stringify(val)
18
+ return val
19
+ }
20
+
21
+ /**
22
+ * Create a MySQL DatabaseAdapter connected to an existing MySQL instance.
23
+ * Uses mysql2/promise for async connection management.
24
+ */
25
+ export async function createMysqlAdapter(connectionString: string): Promise<DatabaseAdapter> {
26
+ const connection = await mysql.createConnection(connectionString)
27
+
28
+ return {
29
+ async query(sql: string, args?: unknown[]): Promise<QueryResult> {
30
+ const [rows, fields] = await connection.query({ sql, values: args, rowsAsArray: true })
31
+ return {
32
+ columns: (fields as mysql.FieldPacket[]).map((f) => f.name),
33
+ rows: (rows as unknown[][]).map((row) => (row as unknown[]).map(normalizeValue)),
34
+ }
35
+ },
36
+ async exec(sql: string, args?: unknown[]): Promise<ExecResult> {
37
+ const [result] = await connection.query(sql, args as any)
38
+ return { rowsAffected: (result as mysql.ResultSetHeader).affectedRows ?? 0 }
39
+ },
40
+ async close(): Promise<void> {
41
+ await connection.end()
42
+ },
43
+ }
44
+ }
@@ -0,0 +1,77 @@
1
+ import { PGlite } from '@electric-sql/pglite'
2
+ import type { DatabaseAdapter, ExecResult, QueryResult } from './types.ts'
3
+
4
+ /**
5
+ * Normalize a row value to a type the Go database/sql driver can scan.
6
+ */
7
+ function normalizeValue(val: unknown): unknown {
8
+ if (val === null || val === undefined) return null
9
+ if (typeof val === 'bigint') return val.toString()
10
+ if (val instanceof Date) return val.toISOString()
11
+ if (typeof val === 'object') {
12
+ return JSON.stringify(val, (_k, v) => (typeof v === 'bigint' ? v.toString() : v))
13
+ }
14
+ return val
15
+ }
16
+
17
+ /**
18
+ * Dynamically load a pglite extension.
19
+ * Tries @electric-sql/pglite/contrib/{name} first (most extensions),
20
+ * then @electric-sql/pglite/{name} (built-in extensions like live).
21
+ * Returns the extension object or null if not available.
22
+ */
23
+ async function loadPgliteExtension(name: string): Promise<any | null> {
24
+ // Try contrib first (most extensions live here)
25
+ try {
26
+ const mod = await import(`@electric-sql/pglite/contrib/${name}`)
27
+ return mod.default ?? mod[name] ?? mod
28
+ } catch {
29
+ /* not in contrib */
30
+ }
31
+
32
+ // Try top-level (built-in extensions like live, vector)
33
+ try {
34
+ const mod = await import(`@electric-sql/pglite/${name}`)
35
+ return mod.default ?? mod[name] ?? mod
36
+ } catch {
37
+ /* not available */
38
+ }
39
+
40
+ return null
41
+ }
42
+
43
+ /**
44
+ * Create a pglite-based DatabaseAdapter (in-memory, zero-config).
45
+ * Optionally loads extensions from @electric-sql/pglite/contrib.
46
+ */
47
+ export async function createPgliteAdapter(extensions?: string[]): Promise<DatabaseAdapter> {
48
+ const extModules: Record<string, any> = {}
49
+ for (const ext of extensions ?? []) {
50
+ const mod = await loadPgliteExtension(ext)
51
+ if (mod) {
52
+ extModules[ext] = mod
53
+ }
54
+ }
55
+
56
+ const db = await PGlite.create({
57
+ extensions: Object.keys(extModules).length > 0 ? extModules : undefined,
58
+ })
59
+
60
+ return {
61
+ async query(sql: string, args?: unknown[]): Promise<QueryResult> {
62
+ const result = await db.query(sql, args, { rowMode: 'array' })
63
+ return {
64
+ columns: result.fields.map((f: { name: string }) => f.name),
65
+ rows: (result.rows as unknown[][]).map((row) => row.map(normalizeValue)),
66
+ }
67
+ },
68
+ async exec(sql: string): Promise<ExecResult> {
69
+ const result = await db.exec(sql)
70
+ const last = result[result.length - 1]
71
+ return { rowsAffected: last?.affectedRows ?? 0 }
72
+ },
73
+ async close(): Promise<void> {
74
+ await db.close()
75
+ },
76
+ }
77
+ }
@@ -0,0 +1,91 @@
1
+ import * as path from 'node:path'
2
+ import { PostgreSqlContainer } from '@testcontainers/postgresql'
3
+ import { GenericContainer, Wait } from 'testcontainers'
4
+ import { createPostgresAdapter } from './postgres.ts'
5
+ import type { DatabaseAdapter } from './types.ts'
6
+
7
+ /**
8
+ * Create a Docker-based DatabaseAdapter using testcontainers.
9
+ * Spins up an ephemeral Postgres container, connects via pg, and
10
+ * automatically cleans up on close() (testcontainers + Ryuk handle
11
+ * orphan cleanup even on process crash).
12
+ *
13
+ * Supports:
14
+ * docker://postgres:16 — official or custom image
15
+ * dockerfile://path/to/file — build from Dockerfile
16
+ */
17
+ export async function createPostgresDockerAdapter(devUrl: string): Promise<DatabaseAdapter> {
18
+ if (devUrl.startsWith('dockerfile://')) {
19
+ return createFromDockerfile(devUrl.slice('dockerfile://'.length))
20
+ }
21
+
22
+ // docker://image:tag
23
+ const imageName = devUrl.slice('docker://'.length)
24
+ const container = await new PostgreSqlContainer(imageName)
25
+ .withDatabase('sqldoc_dev')
26
+ .withUsername('sqldoc')
27
+ .withPassword('sqldoc')
28
+ .withExposedPorts(5432)
29
+ .withWaitStrategy(Wait.forLogMessage('database system is ready to accept connections', 2))
30
+ .start()
31
+
32
+ const pgAdapter = await createPostgresAdapter(container.getConnectionUri())
33
+
34
+ return {
35
+ query: pgAdapter.query,
36
+ exec: pgAdapter.exec,
37
+ async close() {
38
+ await pgAdapter.close()
39
+ await container.stop()
40
+ },
41
+ }
42
+ }
43
+
44
+ async function createFromDockerfile(dockerfilePath: string): Promise<DatabaseAdapter> {
45
+ const absPath = path.resolve(dockerfilePath)
46
+ const dir = path.dirname(absPath)
47
+ const file = path.basename(absPath)
48
+
49
+ const image = await GenericContainer.fromDockerfile(dir, file).build()
50
+
51
+ const container = await image
52
+ .withEnvironment({
53
+ POSTGRES_DB: 'sqldoc_dev',
54
+ POSTGRES_USER: 'sqldoc',
55
+ POSTGRES_PASSWORD: 'sqldoc',
56
+ })
57
+ .withExposedPorts(5432)
58
+ .withWaitStrategy(Wait.forLogMessage('database system is ready to accept connections', 2))
59
+ .withStartupTimeout(30_000)
60
+ .start()
61
+
62
+ // Wait for postgres to accept connections
63
+ const host = container.getHost()
64
+ const port = container.getMappedPort(5432)
65
+ const connectionUri = `postgres://sqldoc:sqldoc@${host}:${port}/sqldoc_dev`
66
+
67
+ // Retry connection — container may need a moment after port is mapped
68
+ let pgAdapter: DatabaseAdapter | undefined
69
+ for (let i = 0; i < 10; i++) {
70
+ try {
71
+ pgAdapter = await createPostgresAdapter(connectionUri)
72
+ break
73
+ } catch {
74
+ await new Promise((resolve) => setTimeout(resolve, 1000))
75
+ }
76
+ }
77
+
78
+ if (!pgAdapter) {
79
+ await container.stop()
80
+ throw new Error(`Failed to connect to Docker Postgres at ${connectionUri}`)
81
+ }
82
+
83
+ return {
84
+ query: pgAdapter.query,
85
+ exec: pgAdapter.exec,
86
+ async close() {
87
+ await pgAdapter.close()
88
+ await container.stop()
89
+ },
90
+ }
91
+ }
@@ -0,0 +1,44 @@
1
+ import { Client } from 'pg'
2
+ import type { DatabaseAdapter, ExecResult, QueryResult } from './types.ts'
3
+
4
+ /**
5
+ * Normalize row values to types the Go database/sql driver can scan.
6
+ * The WASI JSON protocol expects strings, numbers, booleans, and null.
7
+ */
8
+ function normalizeValue(val: unknown): unknown {
9
+ if (val === null || val === undefined) return null
10
+ if (typeof val === 'bigint') return val.toString()
11
+ if (val instanceof Date) return val.toISOString()
12
+ if (Buffer.isBuffer(val)) return val.toString('hex')
13
+ if (typeof val === 'object') {
14
+ return JSON.stringify(val, (_k, v) => (typeof v === 'bigint' ? v.toString() : v))
15
+ }
16
+ return val
17
+ }
18
+
19
+ /**
20
+ * Create a postgres-based DatabaseAdapter using the pg library.
21
+ * Connects to an external Postgres instance via connection string.
22
+ */
23
+ export async function createPostgresAdapter(connectionString: string): Promise<DatabaseAdapter> {
24
+ const client = new Client({ connectionString })
25
+ await client.connect()
26
+ return {
27
+ async query(sql: string, args?: unknown[]): Promise<QueryResult> {
28
+ // Use rowMode: 'array' to get positional values, not name-keyed objects.
29
+ // This handles duplicate column names (e.g. multiple current_setting() calls).
30
+ const result = await client.query({ text: sql, values: args, rowMode: 'array' })
31
+ return {
32
+ columns: result.fields.map((f: { name: string }) => f.name),
33
+ rows: (result.rows as unknown[][]).map((row) => row.map(normalizeValue)),
34
+ }
35
+ },
36
+ async exec(sql: string): Promise<ExecResult> {
37
+ const result = await client.query(sql)
38
+ return { rowsAffected: result.rowCount ?? 0 }
39
+ },
40
+ async close(): Promise<void> {
41
+ await client.end()
42
+ },
43
+ }
44
+ }
@@ -0,0 +1,104 @@
1
+ /**
2
+ * SQLite DatabaseAdapter with runtime detection.
3
+ * Uses bun:sqlite when running in Bun, node:sqlite when running in Node.
4
+ * Both are synchronous APIs wrapped in async methods to match the DatabaseAdapter interface.
5
+ */
6
+ import type { SQLInputValue } from 'node:sqlite'
7
+ import type { DatabaseAdapter, ExecResult, QueryResult } from './types.ts'
8
+
9
+ /**
10
+ * Normalize row values to types the Go database/sql driver can scan.
11
+ * SQLite returns integers for booleans, stores dates as strings or numbers.
12
+ */
13
+ function normalizeValue(val: unknown): unknown {
14
+ if (val === null || val === undefined) return null
15
+ if (typeof val === 'bigint') return val.toString()
16
+ if (val instanceof Uint8Array || Buffer.isBuffer(val)) return Buffer.from(val).toString('hex')
17
+ if (typeof val === 'object') return JSON.stringify(val)
18
+ return val
19
+ }
20
+
21
+ /**
22
+ * Create a SQLite adapter using node:sqlite (Node.js 22.5+).
23
+ * Uses the built-in DatabaseSync — no native addon required.
24
+ */
25
+ async function createNodeSqliteAdapter(filename: string): Promise<DatabaseAdapter> {
26
+ const { DatabaseSync } = await import('node:sqlite')
27
+ const db = new DatabaseSync(filename)
28
+
29
+ return {
30
+ async query(sql: string, args?: SQLInputValue[]): Promise<QueryResult> {
31
+ const stmt = db.prepare(sql)
32
+ const rows = args ? stmt.all(...args) : stmt.all()
33
+ if (rows.length === 0) {
34
+ return { columns: [], rows: [] }
35
+ }
36
+ const columns = Object.keys(rows[0])
37
+ return {
38
+ columns,
39
+ rows: rows.map((row: Record<string, unknown>) => columns.map((c) => normalizeValue(row[c]))),
40
+ }
41
+ },
42
+ async exec(sql: string, args?: SQLInputValue[]): Promise<ExecResult> {
43
+ if (args && args.length > 0) {
44
+ const result = db.prepare(sql).run(...args)
45
+ return { rowsAffected: Number(result.changes) }
46
+ }
47
+ db.exec(sql)
48
+ return { rowsAffected: 0 }
49
+ },
50
+ async close(): Promise<void> {
51
+ db.close()
52
+ },
53
+ }
54
+ }
55
+
56
+ /**
57
+ * Create a SQLite adapter using bun:sqlite (Bun runtime).
58
+ * Uses dynamic import since bun:sqlite is only available in Bun.1`
59
+ */
60
+ async function createBunSqliteAdapter(filename: string): Promise<DatabaseAdapter> {
61
+ // @ts-expect-error -- bun:sqlite only exists in the Bun runtime; guarded by isBun check
62
+ const { Database } = await import('bun:sqlite')
63
+ const db = new Database(filename)
64
+
65
+ return {
66
+ async query(sql: string, args?: unknown[]): Promise<QueryResult> {
67
+ const stmt = db.query(sql)
68
+ const columns = stmt.columnNames
69
+ const rows = args ? stmt.values(...args) : stmt.values()
70
+ return {
71
+ columns,
72
+ rows: (rows as unknown[][]).map((row) => row.map(normalizeValue)),
73
+ }
74
+ },
75
+ async exec(sql: string, args?: unknown[]): Promise<ExecResult> {
76
+ if (args && args.length > 0) {
77
+ const stmt = db.query(sql)
78
+ const result = stmt.run(...args)
79
+ return { rowsAffected: result.changes ?? 0 }
80
+ }
81
+ db.exec(sql)
82
+ return { rowsAffected: 0 }
83
+ },
84
+ async close(): Promise<void> {
85
+ db.close()
86
+ },
87
+ }
88
+ }
89
+
90
+ /**
91
+ * Create a SQLite DatabaseAdapter with runtime detection.
92
+ * Detects whether running in Bun or Node and uses the appropriate driver:
93
+ * - Bun: uses built-in bun:sqlite (zero dependencies)
94
+ * - Node: uses node:sqlite (built-in, Node 22.5+)
95
+ */
96
+ export async function createSqliteAdapter(filename: string): Promise<DatabaseAdapter> {
97
+ const isBun = typeof (globalThis as any).Bun !== 'undefined'
98
+
99
+ if (isBun) {
100
+ return createBunSqliteAdapter(filename)
101
+ }
102
+
103
+ return createNodeSqliteAdapter(filename)
104
+ }
@@ -0,0 +1,26 @@
1
+ /**
2
+ * DatabaseAdapter abstracts the database connection used by the Atlas WASI host.
3
+ * Both pglite (in-memory, zero-config) and pg (external Postgres) implement this.
4
+ */
5
+ export interface DatabaseAdapter {
6
+ query(sql: string, args?: unknown[]): Promise<QueryResult>
7
+ exec(sql: string, args?: unknown[]): Promise<ExecResult>
8
+ close(): Promise<void>
9
+ }
10
+
11
+ /**
12
+ * Result of a SELECT-style query.
13
+ * Matches the WASI protocol Response shape for "query" requests.
14
+ */
15
+ export interface QueryResult {
16
+ columns: string[]
17
+ rows: unknown[][]
18
+ }
19
+
20
+ /**
21
+ * Result of a DDL/DML execution.
22
+ * Matches the WASI protocol Response shape for "exec" requests.
23
+ */
24
+ export interface ExecResult {
25
+ rowsAffected: number
26
+ }