@sqldoc/atlas 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +37 -0
- package/src/__tests__/bridge.test.ts +210 -0
- package/src/__tests__/docker.test.ts +87 -0
- package/src/__tests__/extensions.test.ts +148 -0
- package/src/__tests__/pglite.test.ts +57 -0
- package/src/__tests__/runner.test.ts +151 -0
- package/src/__tests__/types.test.ts +77 -0
- package/src/__tests__/wasi-compat.test.ts +41 -0
- package/src/bridge.ts +152 -0
- package/src/db/docker.ts +91 -0
- package/src/db/pglite.ts +77 -0
- package/src/db/postgres.ts +44 -0
- package/src/db/types.ts +26 -0
- package/src/extensions.ts +116 -0
- package/src/index.ts +97 -0
- package/src/runner.ts +263 -0
- package/src/types.ts +305 -0
- package/src/wasi-host.ts +175 -0
- package/src/worker.ts +106 -0
- package/wasm/atlas.wasm +0 -0
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
import { describe, expect, it } from 'vitest'
|
|
2
|
+
import { type AtlasAttr, type AtlasCheck, type AtlasComment, type AtlasTag, findTags, isTag } from '../types'
|
|
3
|
+
|
|
4
|
+
describe('isTag', () => {
|
|
5
|
+
it('returns true for a valid Tag attr', () => {
|
|
6
|
+
const tag: AtlasTag = { Name: 'pii.mask', Args: '' }
|
|
7
|
+
expect(isTag(tag)).toBe(true)
|
|
8
|
+
})
|
|
9
|
+
|
|
10
|
+
it('returns true for a Tag with non-empty Args', () => {
|
|
11
|
+
const tag: AtlasTag = { Name: 'audit.track', Args: 'on: [delete, update]' }
|
|
12
|
+
expect(isTag(tag)).toBe(true)
|
|
13
|
+
})
|
|
14
|
+
|
|
15
|
+
it('returns false for a Comment attr', () => {
|
|
16
|
+
const comment: AtlasComment = { Text: 'User account table' }
|
|
17
|
+
expect(isTag(comment)).toBe(false)
|
|
18
|
+
})
|
|
19
|
+
|
|
20
|
+
it('returns false for a Check attr (has Name AND Expr)', () => {
|
|
21
|
+
const check: AtlasCheck = { Name: 'email_check', Expr: "email LIKE '%@%'" }
|
|
22
|
+
expect(isTag(check)).toBe(false)
|
|
23
|
+
})
|
|
24
|
+
|
|
25
|
+
it('returns false for an unknown Record attr', () => {
|
|
26
|
+
const unknown: Record<string, unknown> = { Filename: 'schema.sql', Start: { Line: 1 } }
|
|
27
|
+
expect(isTag(unknown)).toBe(false)
|
|
28
|
+
})
|
|
29
|
+
|
|
30
|
+
it('returns false for null', () => {
|
|
31
|
+
expect(isTag(null as unknown as AtlasAttr)).toBe(false)
|
|
32
|
+
})
|
|
33
|
+
|
|
34
|
+
it('returns false for a non-object', () => {
|
|
35
|
+
expect(isTag('string' as unknown as AtlasAttr)).toBe(false)
|
|
36
|
+
})
|
|
37
|
+
})
|
|
38
|
+
|
|
39
|
+
describe('findTags', () => {
|
|
40
|
+
it('returns empty array for undefined attrs', () => {
|
|
41
|
+
expect(findTags(undefined)).toEqual([])
|
|
42
|
+
})
|
|
43
|
+
|
|
44
|
+
it('returns empty array for empty attrs', () => {
|
|
45
|
+
expect(findTags([])).toEqual([])
|
|
46
|
+
})
|
|
47
|
+
|
|
48
|
+
it('extracts only Tags from a mixed Attr array', () => {
|
|
49
|
+
const attrs: AtlasAttr[] = [
|
|
50
|
+
{ Name: 'pii.mask', Args: '' },
|
|
51
|
+
{ Text: 'User email address' },
|
|
52
|
+
{ Name: 'gql.filter', Args: '' },
|
|
53
|
+
{ Name: 'email_check', Expr: "email LIKE '%@%'" },
|
|
54
|
+
{ Filename: 'schema.sql', Start: { Line: 5 } },
|
|
55
|
+
{ Name: 'gql.order', Args: 'asc' },
|
|
56
|
+
]
|
|
57
|
+
|
|
58
|
+
const tags = findTags(attrs)
|
|
59
|
+
expect(tags).toHaveLength(3)
|
|
60
|
+
expect(tags[0]).toEqual({ Name: 'pii.mask', Args: '' })
|
|
61
|
+
expect(tags[1]).toEqual({ Name: 'gql.filter', Args: '' })
|
|
62
|
+
expect(tags[2]).toEqual({ Name: 'gql.order', Args: 'asc' })
|
|
63
|
+
})
|
|
64
|
+
|
|
65
|
+
it('returns all items when all are Tags', () => {
|
|
66
|
+
const attrs: AtlasAttr[] = [
|
|
67
|
+
{ Name: 'ns.a', Args: 'x' },
|
|
68
|
+
{ Name: 'ns.b', Args: '' },
|
|
69
|
+
]
|
|
70
|
+
expect(findTags(attrs)).toHaveLength(2)
|
|
71
|
+
})
|
|
72
|
+
|
|
73
|
+
it('returns empty array when no Tags present', () => {
|
|
74
|
+
const attrs: AtlasAttr[] = [{ Text: 'A comment' }, { Name: 'chk', Expr: 'id > 0' }]
|
|
75
|
+
expect(findTags(attrs)).toHaveLength(0)
|
|
76
|
+
})
|
|
77
|
+
})
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
import { describe, expect, it } from 'vitest'
|
|
2
|
+
import { getWasiImports } from '../wasi-host'
|
|
3
|
+
|
|
4
|
+
describe('getWasiImports', () => {
|
|
5
|
+
it('uses getImportObject when available (Node.js API)', () => {
|
|
6
|
+
const mockWasi = {
|
|
7
|
+
getImportObject: () => ({
|
|
8
|
+
wasi_snapshot_preview1: { fd_write: () => {} },
|
|
9
|
+
}),
|
|
10
|
+
}
|
|
11
|
+
const result = getWasiImports(mockWasi as any)
|
|
12
|
+
expect(result).toHaveProperty('wasi_snapshot_preview1')
|
|
13
|
+
expect(result.wasi_snapshot_preview1).toHaveProperty('fd_write')
|
|
14
|
+
})
|
|
15
|
+
|
|
16
|
+
it('uses wasiImport when getImportObject is missing (Bun API)', () => {
|
|
17
|
+
const mockWasi = {
|
|
18
|
+
wasiImport: { fd_write: () => {}, fd_read: () => {} },
|
|
19
|
+
}
|
|
20
|
+
const result = getWasiImports(mockWasi as any)
|
|
21
|
+
expect(result).toHaveProperty('wasi_snapshot_preview1')
|
|
22
|
+
expect(result.wasi_snapshot_preview1).toHaveProperty('fd_write')
|
|
23
|
+
expect(result.wasi_snapshot_preview1).toHaveProperty('fd_read')
|
|
24
|
+
})
|
|
25
|
+
|
|
26
|
+
it('prefers getImportObject over wasiImport when both exist', () => {
|
|
27
|
+
const mockWasi = {
|
|
28
|
+
getImportObject: () => ({
|
|
29
|
+
wasi_snapshot_preview1: { from_getImportObject: true },
|
|
30
|
+
}),
|
|
31
|
+
wasiImport: { from_wasiImport: true },
|
|
32
|
+
}
|
|
33
|
+
const result = getWasiImports(mockWasi as any)
|
|
34
|
+
expect(result.wasi_snapshot_preview1).toHaveProperty('from_getImportObject')
|
|
35
|
+
})
|
|
36
|
+
|
|
37
|
+
it('throws when neither API is available', () => {
|
|
38
|
+
const mockWasi = {}
|
|
39
|
+
expect(() => getWasiImports(mockWasi as any)).toThrow('Cannot get WASI imports')
|
|
40
|
+
})
|
|
41
|
+
})
|
package/src/bridge.ts
ADDED
|
@@ -0,0 +1,152 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Sync/async bridge for atlas_sql host function.
|
|
3
|
+
*
|
|
4
|
+
* Architecture:
|
|
5
|
+
* - Worker thread runs WASI module, calls atlas_sql synchronously
|
|
6
|
+
* - atlas_sql writes SQL request to shared buffer, signals main thread, blocks via Atomics.wait
|
|
7
|
+
* - Main thread receives request, runs async DB query, writes response, signals via Atomics.notify
|
|
8
|
+
*
|
|
9
|
+
* Control buffer layout (Int32Array over SharedArrayBuffer):
|
|
10
|
+
* [0] = signal: 0=idle, 1=request_ready, 2=response_ready, 3=done
|
|
11
|
+
* [1] = data length (request or response)
|
|
12
|
+
*/
|
|
13
|
+
|
|
14
|
+
// Signal constants
|
|
15
|
+
export const SIGNAL_IDLE = 0
|
|
16
|
+
export const SIGNAL_REQUEST = 1
|
|
17
|
+
export const SIGNAL_RESPONSE = 2
|
|
18
|
+
export const SIGNAL_DONE = 3
|
|
19
|
+
|
|
20
|
+
/**
|
|
21
|
+
* Shared buffers passed between main and worker thread.
|
|
22
|
+
*/
|
|
23
|
+
export interface BridgeBuffers {
|
|
24
|
+
/** 8 bytes: signal (Int32) + data length (Int32) */
|
|
25
|
+
control: SharedArrayBuffer
|
|
26
|
+
/** Data exchange buffer (default 1MB, resizable) */
|
|
27
|
+
data: SharedArrayBuffer
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
/**
|
|
31
|
+
* Create shared buffers for bridge communication.
|
|
32
|
+
* @param dataSize Initial data buffer size in bytes (default 1MB)
|
|
33
|
+
*/
|
|
34
|
+
export function createBridgeBuffers(dataSize = 1024 * 1024): BridgeBuffers {
|
|
35
|
+
return {
|
|
36
|
+
control: new SharedArrayBuffer(8),
|
|
37
|
+
data: new SharedArrayBuffer(dataSize),
|
|
38
|
+
}
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
// ── Worker-side (synchronous) ───────────────────────────────────────
|
|
42
|
+
|
|
43
|
+
/**
|
|
44
|
+
* Write a request to the shared buffer and block until a response is available.
|
|
45
|
+
* Called from the worker thread inside the atlas_sql host function.
|
|
46
|
+
*
|
|
47
|
+
* @returns Response JSON string
|
|
48
|
+
*/
|
|
49
|
+
export function bridgeRequest(buffers: BridgeBuffers, requestJson: string): string {
|
|
50
|
+
const control = new Int32Array(buffers.control)
|
|
51
|
+
const encoder = new TextEncoder()
|
|
52
|
+
const decoder = new TextDecoder()
|
|
53
|
+
|
|
54
|
+
// Write request to data buffer
|
|
55
|
+
const encoded = encoder.encode(requestJson)
|
|
56
|
+
if (encoded.byteLength > buffers.data.byteLength) {
|
|
57
|
+
throw new Error(`Bridge request too large: ${encoded.byteLength} bytes > ${buffers.data.byteLength} buffer`)
|
|
58
|
+
}
|
|
59
|
+
new Uint8Array(buffers.data).set(encoded)
|
|
60
|
+
|
|
61
|
+
// Store length and signal request ready
|
|
62
|
+
Atomics.store(control, 1, encoded.byteLength)
|
|
63
|
+
Atomics.store(control, 0, SIGNAL_REQUEST)
|
|
64
|
+
Atomics.notify(control, 0)
|
|
65
|
+
|
|
66
|
+
// Block until response is ready (value changes from SIGNAL_REQUEST)
|
|
67
|
+
Atomics.wait(control, 0, SIGNAL_REQUEST)
|
|
68
|
+
|
|
69
|
+
// Read response
|
|
70
|
+
const signal = Atomics.load(control, 0)
|
|
71
|
+
if (signal === SIGNAL_DONE) {
|
|
72
|
+
throw new Error('Bridge terminated while waiting for response')
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
const respLen = Atomics.load(control, 1)
|
|
76
|
+
const respBytes = new Uint8Array(buffers.data, 0, respLen)
|
|
77
|
+
const response = decoder.decode(respBytes.slice())
|
|
78
|
+
|
|
79
|
+
// Reset signal to idle for next round-trip
|
|
80
|
+
Atomics.store(control, 0, SIGNAL_IDLE)
|
|
81
|
+
|
|
82
|
+
return response
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
// ── Main-thread side (asynchronous) ─────────────────────────────────
|
|
86
|
+
|
|
87
|
+
/**
|
|
88
|
+
* Wait for the worker to post a request. Uses Atomics.waitAsync
|
|
89
|
+
* (non-blocking on the main thread).
|
|
90
|
+
*
|
|
91
|
+
* @returns The current signal value after waking
|
|
92
|
+
*/
|
|
93
|
+
export async function bridgeWaitForSignal(buffers: BridgeBuffers): Promise<number> {
|
|
94
|
+
const control = new Int32Array(buffers.control)
|
|
95
|
+
|
|
96
|
+
// If signal is already non-idle, return immediately
|
|
97
|
+
const current = Atomics.load(control, 0)
|
|
98
|
+
if (current !== SIGNAL_IDLE) {
|
|
99
|
+
return current
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
// Wait asynchronously for signal to change from IDLE
|
|
103
|
+
const result = Atomics.waitAsync(control, 0, SIGNAL_IDLE)
|
|
104
|
+
if (result.async) {
|
|
105
|
+
await result.value
|
|
106
|
+
}
|
|
107
|
+
return Atomics.load(control, 0)
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
/**
|
|
111
|
+
* Read the request JSON from the shared data buffer.
|
|
112
|
+
* Call after bridgeWaitForSignal returns SIGNAL_REQUEST.
|
|
113
|
+
*/
|
|
114
|
+
export function bridgeReadRequest(buffers: BridgeBuffers): string {
|
|
115
|
+
const control = new Int32Array(buffers.control)
|
|
116
|
+
const reqLen = Atomics.load(control, 1)
|
|
117
|
+
const reqBytes = new Uint8Array(buffers.data, 0, reqLen)
|
|
118
|
+
return new TextDecoder().decode(reqBytes.slice())
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
/**
|
|
122
|
+
* Write a response to the shared buffer and notify the worker.
|
|
123
|
+
*/
|
|
124
|
+
export function bridgeRespond(buffers: BridgeBuffers, responseJson: string): void {
|
|
125
|
+
const control = new Int32Array(buffers.control)
|
|
126
|
+
const encoded = new TextEncoder().encode(responseJson)
|
|
127
|
+
|
|
128
|
+
if (encoded.byteLength > buffers.data.byteLength) {
|
|
129
|
+
// Write an error response instead -- the data buffer is too small
|
|
130
|
+
const errorResp = JSON.stringify({
|
|
131
|
+
error: `Response too large: ${encoded.byteLength} bytes exceeds ${buffers.data.byteLength} byte buffer`,
|
|
132
|
+
})
|
|
133
|
+
const errorEncoded = new TextEncoder().encode(errorResp)
|
|
134
|
+
new Uint8Array(buffers.data).set(errorEncoded)
|
|
135
|
+
Atomics.store(control, 1, errorEncoded.byteLength)
|
|
136
|
+
} else {
|
|
137
|
+
new Uint8Array(buffers.data).set(encoded)
|
|
138
|
+
Atomics.store(control, 1, encoded.byteLength)
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
Atomics.store(control, 0, SIGNAL_RESPONSE)
|
|
142
|
+
Atomics.notify(control, 0)
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
/**
|
|
146
|
+
* Signal the worker that processing is done (no more commands).
|
|
147
|
+
*/
|
|
148
|
+
export function bridgeSignalDone(buffers: BridgeBuffers): void {
|
|
149
|
+
const control = new Int32Array(buffers.control)
|
|
150
|
+
Atomics.store(control, 0, SIGNAL_DONE)
|
|
151
|
+
Atomics.notify(control, 0)
|
|
152
|
+
}
|
package/src/db/docker.ts
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
1
|
+
import * as path from 'node:path'
|
|
2
|
+
import { PostgreSqlContainer } from '@testcontainers/postgresql'
|
|
3
|
+
import { GenericContainer, Wait } from 'testcontainers'
|
|
4
|
+
import { createPostgresAdapter } from './postgres.ts'
|
|
5
|
+
import type { DatabaseAdapter } from './types.ts'
|
|
6
|
+
|
|
7
|
+
/**
|
|
8
|
+
* Create a Docker-based DatabaseAdapter using testcontainers.
|
|
9
|
+
* Spins up an ephemeral Postgres container, connects via pg, and
|
|
10
|
+
* automatically cleans up on close() (testcontainers + Ryuk handle
|
|
11
|
+
* orphan cleanup even on process crash).
|
|
12
|
+
*
|
|
13
|
+
* Supports:
|
|
14
|
+
* docker://postgres:16 — official or custom image
|
|
15
|
+
* dockerfile://path/to/file — build from Dockerfile
|
|
16
|
+
*/
|
|
17
|
+
export async function createDockerAdapter(devUrl: string): Promise<DatabaseAdapter> {
|
|
18
|
+
if (devUrl.startsWith('dockerfile://')) {
|
|
19
|
+
return createFromDockerfile(devUrl.slice('dockerfile://'.length))
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
// docker://image:tag
|
|
23
|
+
const imageName = devUrl.slice('docker://'.length)
|
|
24
|
+
const container = await new PostgreSqlContainer(imageName)
|
|
25
|
+
.withDatabase('sqldoc_dev')
|
|
26
|
+
.withUsername('sqldoc')
|
|
27
|
+
.withPassword('sqldoc')
|
|
28
|
+
.withExposedPorts(5432)
|
|
29
|
+
.withWaitStrategy(Wait.forLogMessage('database system is ready to accept connections', 2))
|
|
30
|
+
.start()
|
|
31
|
+
|
|
32
|
+
const pgAdapter = await createPostgresAdapter(container.getConnectionUri())
|
|
33
|
+
|
|
34
|
+
return {
|
|
35
|
+
query: pgAdapter.query,
|
|
36
|
+
exec: pgAdapter.exec,
|
|
37
|
+
async close() {
|
|
38
|
+
await pgAdapter.close()
|
|
39
|
+
await container.stop()
|
|
40
|
+
},
|
|
41
|
+
}
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
async function createFromDockerfile(dockerfilePath: string): Promise<DatabaseAdapter> {
|
|
45
|
+
const absPath = path.resolve(dockerfilePath)
|
|
46
|
+
const dir = path.dirname(absPath)
|
|
47
|
+
const file = path.basename(absPath)
|
|
48
|
+
|
|
49
|
+
const image = await GenericContainer.fromDockerfile(dir, file).build()
|
|
50
|
+
|
|
51
|
+
const container = await image
|
|
52
|
+
.withEnvironment({
|
|
53
|
+
POSTGRES_DB: 'sqldoc_dev',
|
|
54
|
+
POSTGRES_USER: 'sqldoc',
|
|
55
|
+
POSTGRES_PASSWORD: 'sqldoc',
|
|
56
|
+
})
|
|
57
|
+
.withExposedPorts(5432)
|
|
58
|
+
.withWaitStrategy(Wait.forLogMessage('database system is ready to accept connections', 2))
|
|
59
|
+
.withStartupTimeout(30_000)
|
|
60
|
+
.start()
|
|
61
|
+
|
|
62
|
+
// Wait for postgres to accept connections
|
|
63
|
+
const host = container.getHost()
|
|
64
|
+
const port = container.getMappedPort(5432)
|
|
65
|
+
const connectionUri = `postgres://sqldoc:sqldoc@${host}:${port}/sqldoc_dev`
|
|
66
|
+
|
|
67
|
+
// Retry connection — container may need a moment after port is mapped
|
|
68
|
+
let pgAdapter: DatabaseAdapter | undefined
|
|
69
|
+
for (let i = 0; i < 10; i++) {
|
|
70
|
+
try {
|
|
71
|
+
pgAdapter = await createPostgresAdapter(connectionUri)
|
|
72
|
+
break
|
|
73
|
+
} catch {
|
|
74
|
+
await new Promise((resolve) => setTimeout(resolve, 1000))
|
|
75
|
+
}
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
if (!pgAdapter) {
|
|
79
|
+
await container.stop()
|
|
80
|
+
throw new Error(`Failed to connect to Docker Postgres at ${connectionUri}`)
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
return {
|
|
84
|
+
query: pgAdapter.query,
|
|
85
|
+
exec: pgAdapter.exec,
|
|
86
|
+
async close() {
|
|
87
|
+
await pgAdapter.close()
|
|
88
|
+
await container.stop()
|
|
89
|
+
},
|
|
90
|
+
}
|
|
91
|
+
}
|
package/src/db/pglite.ts
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
import { PGlite } from '@electric-sql/pglite'
|
|
2
|
+
import type { DatabaseAdapter, ExecResult, QueryResult } from './types.ts'
|
|
3
|
+
|
|
4
|
+
/**
|
|
5
|
+
* Normalize a row value to a type the Go database/sql driver can scan.
|
|
6
|
+
*/
|
|
7
|
+
function normalizeValue(val: unknown): unknown {
|
|
8
|
+
if (val === null || val === undefined) return null
|
|
9
|
+
if (typeof val === 'bigint') return val.toString()
|
|
10
|
+
if (val instanceof Date) return val.toISOString()
|
|
11
|
+
if (typeof val === 'object') {
|
|
12
|
+
return JSON.stringify(val, (_k, v) => (typeof v === 'bigint' ? v.toString() : v))
|
|
13
|
+
}
|
|
14
|
+
return val
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
/**
|
|
18
|
+
* Dynamically load a pglite extension.
|
|
19
|
+
* Tries @electric-sql/pglite/contrib/{name} first (most extensions),
|
|
20
|
+
* then @electric-sql/pglite/{name} (built-in extensions like live).
|
|
21
|
+
* Returns the extension object or null if not available.
|
|
22
|
+
*/
|
|
23
|
+
async function loadPgliteExtension(name: string): Promise<any | null> {
|
|
24
|
+
// Try contrib first (most extensions live here)
|
|
25
|
+
try {
|
|
26
|
+
const mod = await import(`@electric-sql/pglite/contrib/${name}`)
|
|
27
|
+
return mod.default ?? mod[name] ?? mod
|
|
28
|
+
} catch {
|
|
29
|
+
/* not in contrib */
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
// Try top-level (built-in extensions like live, vector)
|
|
33
|
+
try {
|
|
34
|
+
const mod = await import(`@electric-sql/pglite/${name}`)
|
|
35
|
+
return mod.default ?? mod[name] ?? mod
|
|
36
|
+
} catch {
|
|
37
|
+
/* not available */
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
return null
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
/**
|
|
44
|
+
* Create a pglite-based DatabaseAdapter (in-memory, zero-config).
|
|
45
|
+
* Optionally loads extensions from @electric-sql/pglite/contrib.
|
|
46
|
+
*/
|
|
47
|
+
export async function createPgliteAdapter(extensions?: string[]): Promise<DatabaseAdapter> {
|
|
48
|
+
const extModules: Record<string, any> = {}
|
|
49
|
+
for (const ext of extensions ?? []) {
|
|
50
|
+
const mod = await loadPgliteExtension(ext)
|
|
51
|
+
if (mod) {
|
|
52
|
+
extModules[ext] = mod
|
|
53
|
+
}
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
const db = await PGlite.create({
|
|
57
|
+
extensions: Object.keys(extModules).length > 0 ? extModules : undefined,
|
|
58
|
+
})
|
|
59
|
+
|
|
60
|
+
return {
|
|
61
|
+
async query(sql: string, args?: unknown[]): Promise<QueryResult> {
|
|
62
|
+
const result = await db.query(sql, args, { rowMode: 'array' })
|
|
63
|
+
return {
|
|
64
|
+
columns: result.fields.map((f: { name: string }) => f.name),
|
|
65
|
+
rows: (result.rows as unknown[][]).map((row) => row.map(normalizeValue)),
|
|
66
|
+
}
|
|
67
|
+
},
|
|
68
|
+
async exec(sql: string): Promise<ExecResult> {
|
|
69
|
+
const result = await db.exec(sql)
|
|
70
|
+
const last = result[result.length - 1]
|
|
71
|
+
return { rowsAffected: last?.affectedRows ?? 0 }
|
|
72
|
+
},
|
|
73
|
+
async close(): Promise<void> {
|
|
74
|
+
await db.close()
|
|
75
|
+
},
|
|
76
|
+
}
|
|
77
|
+
}
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
import { Client } from 'pg'
|
|
2
|
+
import type { DatabaseAdapter, ExecResult, QueryResult } from './types.ts'
|
|
3
|
+
|
|
4
|
+
/**
|
|
5
|
+
* Normalize row values to types the Go database/sql driver can scan.
|
|
6
|
+
* The WASI JSON protocol expects strings, numbers, booleans, and null.
|
|
7
|
+
*/
|
|
8
|
+
function normalizeValue(val: unknown): unknown {
|
|
9
|
+
if (val === null || val === undefined) return null
|
|
10
|
+
if (typeof val === 'bigint') return val.toString()
|
|
11
|
+
if (val instanceof Date) return val.toISOString()
|
|
12
|
+
if (Buffer.isBuffer(val)) return val.toString('hex')
|
|
13
|
+
if (typeof val === 'object') {
|
|
14
|
+
return JSON.stringify(val, (_k, v) => (typeof v === 'bigint' ? v.toString() : v))
|
|
15
|
+
}
|
|
16
|
+
return val
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
/**
|
|
20
|
+
* Create a postgres-based DatabaseAdapter using the pg library.
|
|
21
|
+
* Connects to an external Postgres instance via connection string.
|
|
22
|
+
*/
|
|
23
|
+
export async function createPostgresAdapter(connectionString: string): Promise<DatabaseAdapter> {
|
|
24
|
+
const client = new Client({ connectionString })
|
|
25
|
+
await client.connect()
|
|
26
|
+
return {
|
|
27
|
+
async query(sql: string, args?: unknown[]): Promise<QueryResult> {
|
|
28
|
+
// Use rowMode: 'array' to get positional values, not name-keyed objects.
|
|
29
|
+
// This handles duplicate column names (e.g. multiple current_setting() calls).
|
|
30
|
+
const result = await client.query({ text: sql, values: args, rowMode: 'array' })
|
|
31
|
+
return {
|
|
32
|
+
columns: result.fields.map((f: { name: string }) => f.name),
|
|
33
|
+
rows: (result.rows as unknown[][]).map((row) => row.map(normalizeValue)),
|
|
34
|
+
}
|
|
35
|
+
},
|
|
36
|
+
async exec(sql: string): Promise<ExecResult> {
|
|
37
|
+
const result = await client.query(sql)
|
|
38
|
+
return { rowsAffected: result.rowCount ?? 0 }
|
|
39
|
+
},
|
|
40
|
+
async close(): Promise<void> {
|
|
41
|
+
await client.end()
|
|
42
|
+
},
|
|
43
|
+
}
|
|
44
|
+
}
|
package/src/db/types.ts
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* DatabaseAdapter abstracts the database connection used by the Atlas WASI host.
|
|
3
|
+
* Both pglite (in-memory, zero-config) and pg (external Postgres) implement this.
|
|
4
|
+
*/
|
|
5
|
+
export interface DatabaseAdapter {
|
|
6
|
+
query(sql: string, args?: unknown[]): Promise<QueryResult>
|
|
7
|
+
exec(sql: string, args?: unknown[]): Promise<ExecResult>
|
|
8
|
+
close(): Promise<void>
|
|
9
|
+
}
|
|
10
|
+
|
|
11
|
+
/**
|
|
12
|
+
* Result of a SELECT-style query.
|
|
13
|
+
* Matches the WASI protocol Response shape for "query" requests.
|
|
14
|
+
*/
|
|
15
|
+
export interface QueryResult {
|
|
16
|
+
columns: string[]
|
|
17
|
+
rows: unknown[][]
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
/**
|
|
21
|
+
* Result of a DDL/DML execution.
|
|
22
|
+
* Matches the WASI protocol Response shape for "exec" requests.
|
|
23
|
+
*/
|
|
24
|
+
export interface ExecResult {
|
|
25
|
+
rowsAffected: number
|
|
26
|
+
}
|
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* PostgreSQL extension detection and validation.
|
|
3
|
+
*
|
|
4
|
+
* Extracts CREATE EXTENSION names from SQL, validates against pglite
|
|
5
|
+
* or real Postgres, and provides helpful error messages.
|
|
6
|
+
*/
|
|
7
|
+
import { createRequire } from 'node:module'
|
|
8
|
+
import pc from 'picocolors'
|
|
9
|
+
|
|
10
|
+
/** Regex to extract extension names from CREATE EXTENSION statements */
|
|
11
|
+
const CREATE_EXT_RE = /CREATE\s+EXTENSION\s+(?:IF\s+NOT\s+EXISTS\s+)?(?:"([^"]+)"|(\w[\w-]*))/gi
|
|
12
|
+
|
|
13
|
+
/**
|
|
14
|
+
* Extract extension names from SQL strings.
|
|
15
|
+
* Handles both quoted and unquoted names, and hyphenated names like uuid-ossp.
|
|
16
|
+
*/
|
|
17
|
+
export function extractExtensions(
|
|
18
|
+
sqlFiles: string[],
|
|
19
|
+
filePaths?: string[],
|
|
20
|
+
): { extensions: string[]; byFile: Map<string, string[]> } {
|
|
21
|
+
const all = new Set<string>()
|
|
22
|
+
const byFile = new Map<string, string[]>()
|
|
23
|
+
for (let i = 0; i < sqlFiles.length; i++) {
|
|
24
|
+
const sql = sqlFiles[i]
|
|
25
|
+
const file = filePaths?.[i] ?? `file${i}`
|
|
26
|
+
const lines = sql.split('\n')
|
|
27
|
+
for (const line of lines) {
|
|
28
|
+
// Skip commented-out lines
|
|
29
|
+
if (line.trimStart().startsWith('--')) continue
|
|
30
|
+
let match
|
|
31
|
+
CREATE_EXT_RE.lastIndex = 0
|
|
32
|
+
while ((match = CREATE_EXT_RE.exec(line)) !== null) {
|
|
33
|
+
const name = (match[1] ?? match[2]).toLowerCase().replace(/-/g, '_')
|
|
34
|
+
all.add(name)
|
|
35
|
+
const existing = byFile.get(name) ?? []
|
|
36
|
+
existing.push(file)
|
|
37
|
+
byFile.set(name, existing)
|
|
38
|
+
}
|
|
39
|
+
}
|
|
40
|
+
}
|
|
41
|
+
return { extensions: [...all], byFile }
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
/**
|
|
45
|
+
* Validate extensions for pglite by attempting to import them.
|
|
46
|
+
* Returns the list of extensions that are available.
|
|
47
|
+
* Throws with a pretty error if any are not available.
|
|
48
|
+
*/
|
|
49
|
+
export async function validatePgliteExtensions(requested: string[]): Promise<string[]> {
|
|
50
|
+
if (requested.length === 0) return []
|
|
51
|
+
|
|
52
|
+
// Resolve from this package's directory (pglite is a dep of @sqldoc/atlas)
|
|
53
|
+
const req = createRequire(import.meta.url)
|
|
54
|
+
|
|
55
|
+
const results: Array<{ name: string; available: boolean }> = []
|
|
56
|
+
|
|
57
|
+
for (const ext of requested) {
|
|
58
|
+
let found = false
|
|
59
|
+
try {
|
|
60
|
+
req.resolve(`@electric-sql/pglite/contrib/${ext}`)
|
|
61
|
+
found = true
|
|
62
|
+
} catch {}
|
|
63
|
+
if (!found)
|
|
64
|
+
try {
|
|
65
|
+
req.resolve(`@electric-sql/pglite/${ext}`)
|
|
66
|
+
found = true
|
|
67
|
+
} catch {}
|
|
68
|
+
results.push({ name: ext, available: found })
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
const unavailable = results.filter((r) => !r.available)
|
|
72
|
+
if (unavailable.length > 0) {
|
|
73
|
+
const lines = results.map((r) => (r.available ? pc.green(` ✓ ${r.name}`) : pc.red(` ✗ ${r.name}`)))
|
|
74
|
+
|
|
75
|
+
throw new Error(
|
|
76
|
+
`Some extensions are not available for the embedded postgres database:\n${lines.join('\n')}\n\n` +
|
|
77
|
+
`Use a Docker image with these extensions installed as devUrl:\n` +
|
|
78
|
+
` ${pc.cyan('docker://<image>')} — use an image that includes the extension\n` +
|
|
79
|
+
` ${pc.cyan('dockerfile://path')} — build a custom Dockerfile with the extension`,
|
|
80
|
+
)
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
return requested
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
/**
|
|
87
|
+
* Validate extensions against a real Postgres database.
|
|
88
|
+
* Queries pg_available_extensions to check availability.
|
|
89
|
+
*/
|
|
90
|
+
export async function validatePostgresExtensions(
|
|
91
|
+
requested: string[],
|
|
92
|
+
queryFn: (sql: string) => Promise<{ rows: unknown[][] }>,
|
|
93
|
+
): Promise<void> {
|
|
94
|
+
if (requested.length === 0) return
|
|
95
|
+
|
|
96
|
+
const result = await queryFn(
|
|
97
|
+
'SELECT name FROM pg_available_extensions WHERE name = ANY(ARRAY[' +
|
|
98
|
+
requested.map((e) => `'${e}'`).join(',') +
|
|
99
|
+
'])',
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
const available = new Set(result.rows.map((r) => String(r[0])))
|
|
103
|
+
|
|
104
|
+
const missing = requested.filter((e) => !available.has(e))
|
|
105
|
+
if (missing.length > 0) {
|
|
106
|
+
const lines = requested.map((ext) => {
|
|
107
|
+
const ok = available.has(ext)
|
|
108
|
+
return ok ? pc.green(` ✓ ${ext}`) : pc.red(` ✗ ${ext}`)
|
|
109
|
+
})
|
|
110
|
+
|
|
111
|
+
throw new Error(
|
|
112
|
+
`Some required extensions are not available on this database:\n${lines.join('\n')}\n\n` +
|
|
113
|
+
`Install the missing extensions or use a Docker image that includes them.`,
|
|
114
|
+
)
|
|
115
|
+
}
|
|
116
|
+
}
|