@squadbase/vite-server 0.0.1-build-7 → 0.0.1-build-8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cli/index.js +201 -8
- package/dist/index.d.ts +126 -1
- package/dist/index.js +475 -9
- package/dist/main.js +205 -8
- package/dist/types/data-source.d.ts +5 -5
- package/dist/vite-plugin.js +209 -7
- package/package.json +8 -2
package/dist/main.js
CHANGED
|
@@ -37,6 +37,11 @@ function resolveEnvVar(entry, key, connectionId) {
|
|
|
37
37
|
}
|
|
38
38
|
return value;
|
|
39
39
|
}
|
|
40
|
+
function resolveEnvVarOptional(entry, key) {
|
|
41
|
+
const envVarName = entry.envVars[key];
|
|
42
|
+
if (!envVarName) return void 0;
|
|
43
|
+
return process.env[envVarName] || void 0;
|
|
44
|
+
}
|
|
40
45
|
|
|
41
46
|
// src/connector-client/bigquery.ts
|
|
42
47
|
function createBigQueryClient(entry, connectionId) {
|
|
@@ -48,7 +53,7 @@ function createBigQueryClient(entry, connectionId) {
|
|
|
48
53
|
gcpCredentials = JSON.parse(serviceAccountJson);
|
|
49
54
|
} catch {
|
|
50
55
|
throw new Error(
|
|
51
|
-
`BigQuery service account JSON (decoded from base64) is not valid JSON for
|
|
56
|
+
`BigQuery service account JSON (decoded from base64) is not valid JSON for connectionId "${connectionId}"`
|
|
52
57
|
);
|
|
53
58
|
}
|
|
54
59
|
return {
|
|
@@ -105,6 +110,176 @@ function createSnowflakeClient(entry, connectionId) {
|
|
|
105
110
|
};
|
|
106
111
|
}
|
|
107
112
|
|
|
113
|
+
// src/connector-client/mysql.ts
|
|
114
|
+
function createMySQLClient(entry, connectionId) {
|
|
115
|
+
const connectionUrl = resolveEnvVar(entry, "connection-url", connectionId);
|
|
116
|
+
let poolPromise = null;
|
|
117
|
+
function getPool() {
|
|
118
|
+
if (!poolPromise) {
|
|
119
|
+
poolPromise = import("mysql2/promise").then(
|
|
120
|
+
(mysql) => mysql.default.createPool(connectionUrl)
|
|
121
|
+
);
|
|
122
|
+
}
|
|
123
|
+
return poolPromise;
|
|
124
|
+
}
|
|
125
|
+
return {
|
|
126
|
+
async query(sql, params) {
|
|
127
|
+
const pool = await getPool();
|
|
128
|
+
const [rows] = await pool.execute(sql, params);
|
|
129
|
+
return { rows };
|
|
130
|
+
}
|
|
131
|
+
};
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
// src/connector-client/aws-athena.ts
|
|
135
|
+
function createAthenaClient(entry, connectionId) {
|
|
136
|
+
const region = resolveEnvVar(entry, "aws-region", connectionId);
|
|
137
|
+
const accessKeyId = resolveEnvVar(entry, "aws-access-key-id", connectionId);
|
|
138
|
+
const secretAccessKey = resolveEnvVar(entry, "aws-secret-access-key", connectionId);
|
|
139
|
+
const workgroup = resolveEnvVarOptional(entry, "workgroup") ?? "primary";
|
|
140
|
+
const outputLocation = resolveEnvVarOptional(entry, "output-location");
|
|
141
|
+
return {
|
|
142
|
+
async query(sql) {
|
|
143
|
+
const {
|
|
144
|
+
AthenaClient,
|
|
145
|
+
StartQueryExecutionCommand,
|
|
146
|
+
GetQueryExecutionCommand,
|
|
147
|
+
GetQueryResultsCommand
|
|
148
|
+
} = await import("@aws-sdk/client-athena");
|
|
149
|
+
const client = new AthenaClient({
|
|
150
|
+
region,
|
|
151
|
+
credentials: { accessKeyId, secretAccessKey }
|
|
152
|
+
});
|
|
153
|
+
const startParams = {
|
|
154
|
+
QueryString: sql,
|
|
155
|
+
WorkGroup: workgroup
|
|
156
|
+
};
|
|
157
|
+
if (outputLocation) {
|
|
158
|
+
startParams.ResultConfiguration = { OutputLocation: outputLocation };
|
|
159
|
+
}
|
|
160
|
+
const { QueryExecutionId } = await client.send(
|
|
161
|
+
new StartQueryExecutionCommand(startParams)
|
|
162
|
+
);
|
|
163
|
+
if (!QueryExecutionId) throw new Error("Athena: failed to start query execution");
|
|
164
|
+
while (true) {
|
|
165
|
+
const { QueryExecution } = await client.send(
|
|
166
|
+
new GetQueryExecutionCommand({ QueryExecutionId })
|
|
167
|
+
);
|
|
168
|
+
const state = QueryExecution?.Status?.State;
|
|
169
|
+
if (state === "SUCCEEDED") break;
|
|
170
|
+
if (state === "FAILED") {
|
|
171
|
+
throw new Error(
|
|
172
|
+
`Athena query failed: ${QueryExecution?.Status?.StateChangeReason ?? "unknown"}`
|
|
173
|
+
);
|
|
174
|
+
}
|
|
175
|
+
if (state === "CANCELLED") throw new Error("Athena query was cancelled");
|
|
176
|
+
await new Promise((r) => setTimeout(r, 500));
|
|
177
|
+
}
|
|
178
|
+
const { ResultSet } = await client.send(
|
|
179
|
+
new GetQueryResultsCommand({ QueryExecutionId })
|
|
180
|
+
);
|
|
181
|
+
const resultRows = ResultSet?.Rows ?? [];
|
|
182
|
+
if (resultRows.length === 0) return { rows: [] };
|
|
183
|
+
const headers = resultRows[0].Data?.map((d) => d.VarCharValue ?? "") ?? [];
|
|
184
|
+
const rows = resultRows.slice(1).map((row) => {
|
|
185
|
+
const obj = {};
|
|
186
|
+
row.Data?.forEach((d, i) => {
|
|
187
|
+
obj[headers[i]] = d.VarCharValue ?? null;
|
|
188
|
+
});
|
|
189
|
+
return obj;
|
|
190
|
+
});
|
|
191
|
+
return { rows };
|
|
192
|
+
}
|
|
193
|
+
};
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
// src/connector-client/redshift.ts
|
|
197
|
+
function createRedshiftClient(entry, connectionId) {
|
|
198
|
+
const region = resolveEnvVar(entry, "aws-region", connectionId);
|
|
199
|
+
const accessKeyId = resolveEnvVar(entry, "aws-access-key-id", connectionId);
|
|
200
|
+
const secretAccessKey = resolveEnvVar(entry, "aws-secret-access-key", connectionId);
|
|
201
|
+
const database = resolveEnvVar(entry, "database", connectionId);
|
|
202
|
+
const clusterIdentifier = resolveEnvVarOptional(entry, "cluster-identifier");
|
|
203
|
+
const workgroupName = resolveEnvVarOptional(entry, "workgroup-name");
|
|
204
|
+
const secretArn = resolveEnvVarOptional(entry, "secret-arn");
|
|
205
|
+
const dbUser = resolveEnvVarOptional(entry, "db-user");
|
|
206
|
+
return {
|
|
207
|
+
async query(sql) {
|
|
208
|
+
const {
|
|
209
|
+
RedshiftDataClient,
|
|
210
|
+
ExecuteStatementCommand,
|
|
211
|
+
DescribeStatementCommand,
|
|
212
|
+
GetStatementResultCommand
|
|
213
|
+
} = await import("@aws-sdk/client-redshift-data");
|
|
214
|
+
const client = new RedshiftDataClient({
|
|
215
|
+
region,
|
|
216
|
+
credentials: { accessKeyId, secretAccessKey }
|
|
217
|
+
});
|
|
218
|
+
const executeParams = {
|
|
219
|
+
Sql: sql,
|
|
220
|
+
Database: database
|
|
221
|
+
};
|
|
222
|
+
if (clusterIdentifier) executeParams.ClusterIdentifier = clusterIdentifier;
|
|
223
|
+
if (workgroupName) executeParams.WorkgroupName = workgroupName;
|
|
224
|
+
if (secretArn) executeParams.SecretArn = secretArn;
|
|
225
|
+
if (dbUser) executeParams.DbUser = dbUser;
|
|
226
|
+
const { Id } = await client.send(
|
|
227
|
+
new ExecuteStatementCommand(executeParams)
|
|
228
|
+
);
|
|
229
|
+
if (!Id) throw new Error("Redshift: failed to start statement execution");
|
|
230
|
+
while (true) {
|
|
231
|
+
const desc = await client.send(new DescribeStatementCommand({ Id }));
|
|
232
|
+
const status = desc.Status;
|
|
233
|
+
if (status === "FINISHED") break;
|
|
234
|
+
if (status === "FAILED") {
|
|
235
|
+
throw new Error(`Redshift query failed: ${desc.Error ?? "unknown"}`);
|
|
236
|
+
}
|
|
237
|
+
if (status === "ABORTED") throw new Error("Redshift query was aborted");
|
|
238
|
+
await new Promise((r) => setTimeout(r, 500));
|
|
239
|
+
}
|
|
240
|
+
const result = await client.send(new GetStatementResultCommand({ Id }));
|
|
241
|
+
const columns = result.ColumnMetadata?.map((c) => c.name ?? "") ?? [];
|
|
242
|
+
const rows = (result.Records ?? []).map((record) => {
|
|
243
|
+
const obj = {};
|
|
244
|
+
record.forEach((field, i) => {
|
|
245
|
+
const col = columns[i];
|
|
246
|
+
const value = field.stringValue ?? field.longValue ?? field.doubleValue ?? field.booleanValue ?? (field.isNull ? null : field.blobValue ?? null);
|
|
247
|
+
obj[col] = value;
|
|
248
|
+
});
|
|
249
|
+
return obj;
|
|
250
|
+
});
|
|
251
|
+
return { rows };
|
|
252
|
+
}
|
|
253
|
+
};
|
|
254
|
+
}
|
|
255
|
+
|
|
256
|
+
// src/connector-client/databricks.ts
|
|
257
|
+
function createDatabricksClient(entry, connectionId) {
|
|
258
|
+
const host = resolveEnvVar(entry, "host", connectionId);
|
|
259
|
+
const httpPath = resolveEnvVar(entry, "http-path", connectionId);
|
|
260
|
+
const token = resolveEnvVar(entry, "token", connectionId);
|
|
261
|
+
return {
|
|
262
|
+
async query(sql) {
|
|
263
|
+
const { DBSQLClient } = await import("@databricks/sql");
|
|
264
|
+
const client = new DBSQLClient();
|
|
265
|
+
await client.connect({ host, path: httpPath, token });
|
|
266
|
+
try {
|
|
267
|
+
const session = await client.openSession();
|
|
268
|
+
try {
|
|
269
|
+
const operation = await session.executeStatement(sql);
|
|
270
|
+
const result = await operation.fetchAll();
|
|
271
|
+
await operation.close();
|
|
272
|
+
return { rows: result };
|
|
273
|
+
} finally {
|
|
274
|
+
await session.close();
|
|
275
|
+
}
|
|
276
|
+
} finally {
|
|
277
|
+
await client.close();
|
|
278
|
+
}
|
|
279
|
+
}
|
|
280
|
+
};
|
|
281
|
+
}
|
|
282
|
+
|
|
108
283
|
// src/connector-client/registry.ts
|
|
109
284
|
function createConnectorRegistry() {
|
|
110
285
|
let connectionsCache = null;
|
|
@@ -112,7 +287,7 @@ function createConnectorRegistry() {
|
|
|
112
287
|
function getConnectionsFilePath() {
|
|
113
288
|
return process.env.CONNECTIONS_PATH ?? path.join(process.cwd(), ".squadbase/connections.json");
|
|
114
289
|
}
|
|
115
|
-
function
|
|
290
|
+
function loadConnections2() {
|
|
116
291
|
if (connectionsCache !== null) return connectionsCache;
|
|
117
292
|
const filePath = getConnectionsFilePath();
|
|
118
293
|
try {
|
|
@@ -124,7 +299,7 @@ function createConnectorRegistry() {
|
|
|
124
299
|
return connectionsCache;
|
|
125
300
|
}
|
|
126
301
|
async function getClient2(connectionId) {
|
|
127
|
-
const connections =
|
|
302
|
+
const connections = loadConnections2();
|
|
128
303
|
const entry = connections[connectionId];
|
|
129
304
|
if (!entry) {
|
|
130
305
|
throw new Error(`connection '${connectionId}' not found in .squadbase/connections.json`);
|
|
@@ -138,6 +313,20 @@ function createConnectorRegistry() {
|
|
|
138
313
|
if (connectorSlug === "bigquery") {
|
|
139
314
|
return { client: createBigQueryClient(entry, connectionId), connectorSlug };
|
|
140
315
|
}
|
|
316
|
+
if (connectorSlug === "athena") {
|
|
317
|
+
return { client: createAthenaClient(entry, connectionId), connectorSlug };
|
|
318
|
+
}
|
|
319
|
+
if (connectorSlug === "redshift") {
|
|
320
|
+
return { client: createRedshiftClient(entry, connectionId), connectorSlug };
|
|
321
|
+
}
|
|
322
|
+
if (connectorSlug === "databricks") {
|
|
323
|
+
return { client: createDatabricksClient(entry, connectionId), connectorSlug };
|
|
324
|
+
}
|
|
325
|
+
if (connectorSlug === "mysql") {
|
|
326
|
+
const client = createMySQLClient(entry, connectionId);
|
|
327
|
+
clientCache.set(connectionId, client);
|
|
328
|
+
return { client, connectorSlug };
|
|
329
|
+
}
|
|
141
330
|
if (connectorSlug === "postgresql" || connectorSlug === "squadbase-db") {
|
|
142
331
|
const urlEnvName = entry.envVars["connection-url"];
|
|
143
332
|
if (!urlEnvName) {
|
|
@@ -154,7 +343,7 @@ function createConnectorRegistry() {
|
|
|
154
343
|
return { client, connectorSlug };
|
|
155
344
|
}
|
|
156
345
|
throw new Error(
|
|
157
|
-
`connector type '${connectorSlug}' is not supported. Supported: "snowflake", "bigquery", "
|
|
346
|
+
`connector type '${connectorSlug}' is not supported as a SQL connector. Supported SQL types: "postgresql", "squadbase-db", "mysql", "snowflake", "bigquery", "athena", "redshift", "databricks". Non-SQL types (airtable, google-analytics, kintone, wix-store, dbt) should be used via TypeScript handlers.`
|
|
158
347
|
);
|
|
159
348
|
}
|
|
160
349
|
function reloadEnvFile2(envPath) {
|
|
@@ -186,11 +375,11 @@ function createConnectorRegistry() {
|
|
|
186
375
|
} catch {
|
|
187
376
|
}
|
|
188
377
|
}
|
|
189
|
-
return { getClient: getClient2, reloadEnvFile: reloadEnvFile2, watchConnectionsFile: watchConnectionsFile2 };
|
|
378
|
+
return { getClient: getClient2, loadConnections: loadConnections2, reloadEnvFile: reloadEnvFile2, watchConnectionsFile: watchConnectionsFile2 };
|
|
190
379
|
}
|
|
191
380
|
|
|
192
381
|
// src/connector-client/index.ts
|
|
193
|
-
var { getClient, reloadEnvFile, watchConnectionsFile } = createConnectorRegistry();
|
|
382
|
+
var { getClient, loadConnections, reloadEnvFile, watchConnectionsFile } = createConnectorRegistry();
|
|
194
383
|
|
|
195
384
|
// src/registry.ts
|
|
196
385
|
var dataSources = /* @__PURE__ */ new Map();
|
|
@@ -301,10 +490,10 @@ async function initialize() {
|
|
|
301
490
|
_query: sqlDef.query,
|
|
302
491
|
handler: async (runtimeParams) => {
|
|
303
492
|
const { client, connectorSlug } = await getClient(sqlDef.connectionId);
|
|
304
|
-
const
|
|
493
|
+
const isLiteralConnector = connectorSlug === "snowflake" || connectorSlug === "bigquery" || connectorSlug === "athena" || connectorSlug === "redshift" || connectorSlug === "databricks";
|
|
305
494
|
let queryText;
|
|
306
495
|
let queryValues;
|
|
307
|
-
if (
|
|
496
|
+
if (isLiteralConnector) {
|
|
308
497
|
const defaults = new Map(
|
|
309
498
|
(sqlDef.parameters ?? []).map((p) => [p.name, p.default ?? null])
|
|
310
499
|
);
|
|
@@ -322,6 +511,14 @@ async function initialize() {
|
|
|
322
511
|
}
|
|
323
512
|
);
|
|
324
513
|
queryValues = [];
|
|
514
|
+
} else if (connectorSlug === "mysql") {
|
|
515
|
+
const built = buildQuery(
|
|
516
|
+
sqlDef.query,
|
|
517
|
+
sqlDef.parameters ?? [],
|
|
518
|
+
runtimeParams
|
|
519
|
+
);
|
|
520
|
+
queryText = built.text.replace(/\$(\d+)/g, "?");
|
|
521
|
+
queryValues = built.values;
|
|
325
522
|
} else {
|
|
326
523
|
const built = buildQuery(
|
|
327
524
|
sqlDef.query,
|
|
@@ -7,14 +7,14 @@ interface ParameterMeta {
|
|
|
7
7
|
}
|
|
8
8
|
interface DataSourceCacheConfig {
|
|
9
9
|
/**
|
|
10
|
-
*
|
|
11
|
-
* 0
|
|
10
|
+
* Cache TTL in seconds.
|
|
11
|
+
* 0 or unset means no caching (default behavior for backward compatibility).
|
|
12
12
|
*/
|
|
13
13
|
ttl: number;
|
|
14
14
|
/**
|
|
15
|
-
* true
|
|
16
|
-
*
|
|
17
|
-
*
|
|
15
|
+
* When true, stale data is returned immediately after TTL expiry
|
|
16
|
+
* while fresh data is fetched asynchronously in the background to update the cache.
|
|
17
|
+
* Default: false
|
|
18
18
|
*/
|
|
19
19
|
staleWhileRevalidate?: boolean;
|
|
20
20
|
}
|
package/dist/vite-plugin.js
CHANGED
|
@@ -2,6 +2,8 @@
|
|
|
2
2
|
import buildPlugin from "@hono/vite-build/node";
|
|
3
3
|
import devServer from "@hono/vite-dev-server";
|
|
4
4
|
import nodeAdapter from "@hono/vite-dev-server/node";
|
|
5
|
+
import { fileURLToPath } from "url";
|
|
6
|
+
import path3 from "path";
|
|
5
7
|
|
|
6
8
|
// src/registry.ts
|
|
7
9
|
import { readdir, readFile, mkdir } from "fs/promises";
|
|
@@ -37,6 +39,11 @@ function resolveEnvVar(entry, key, connectionId) {
|
|
|
37
39
|
}
|
|
38
40
|
return value;
|
|
39
41
|
}
|
|
42
|
+
function resolveEnvVarOptional(entry, key) {
|
|
43
|
+
const envVarName = entry.envVars[key];
|
|
44
|
+
if (!envVarName) return void 0;
|
|
45
|
+
return process.env[envVarName] || void 0;
|
|
46
|
+
}
|
|
40
47
|
|
|
41
48
|
// src/connector-client/bigquery.ts
|
|
42
49
|
function createBigQueryClient(entry, connectionId) {
|
|
@@ -48,7 +55,7 @@ function createBigQueryClient(entry, connectionId) {
|
|
|
48
55
|
gcpCredentials = JSON.parse(serviceAccountJson);
|
|
49
56
|
} catch {
|
|
50
57
|
throw new Error(
|
|
51
|
-
`BigQuery service account JSON (decoded from base64) is not valid JSON for
|
|
58
|
+
`BigQuery service account JSON (decoded from base64) is not valid JSON for connectionId "${connectionId}"`
|
|
52
59
|
);
|
|
53
60
|
}
|
|
54
61
|
return {
|
|
@@ -105,6 +112,176 @@ function createSnowflakeClient(entry, connectionId) {
|
|
|
105
112
|
};
|
|
106
113
|
}
|
|
107
114
|
|
|
115
|
+
// src/connector-client/mysql.ts
|
|
116
|
+
function createMySQLClient(entry, connectionId) {
|
|
117
|
+
const connectionUrl = resolveEnvVar(entry, "connection-url", connectionId);
|
|
118
|
+
let poolPromise = null;
|
|
119
|
+
function getPool() {
|
|
120
|
+
if (!poolPromise) {
|
|
121
|
+
poolPromise = import("mysql2/promise").then(
|
|
122
|
+
(mysql) => mysql.default.createPool(connectionUrl)
|
|
123
|
+
);
|
|
124
|
+
}
|
|
125
|
+
return poolPromise;
|
|
126
|
+
}
|
|
127
|
+
return {
|
|
128
|
+
async query(sql, params) {
|
|
129
|
+
const pool = await getPool();
|
|
130
|
+
const [rows] = await pool.execute(sql, params);
|
|
131
|
+
return { rows };
|
|
132
|
+
}
|
|
133
|
+
};
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
// src/connector-client/aws-athena.ts
|
|
137
|
+
function createAthenaClient(entry, connectionId) {
|
|
138
|
+
const region = resolveEnvVar(entry, "aws-region", connectionId);
|
|
139
|
+
const accessKeyId = resolveEnvVar(entry, "aws-access-key-id", connectionId);
|
|
140
|
+
const secretAccessKey = resolveEnvVar(entry, "aws-secret-access-key", connectionId);
|
|
141
|
+
const workgroup = resolveEnvVarOptional(entry, "workgroup") ?? "primary";
|
|
142
|
+
const outputLocation = resolveEnvVarOptional(entry, "output-location");
|
|
143
|
+
return {
|
|
144
|
+
async query(sql) {
|
|
145
|
+
const {
|
|
146
|
+
AthenaClient,
|
|
147
|
+
StartQueryExecutionCommand,
|
|
148
|
+
GetQueryExecutionCommand,
|
|
149
|
+
GetQueryResultsCommand
|
|
150
|
+
} = await import("@aws-sdk/client-athena");
|
|
151
|
+
const client = new AthenaClient({
|
|
152
|
+
region,
|
|
153
|
+
credentials: { accessKeyId, secretAccessKey }
|
|
154
|
+
});
|
|
155
|
+
const startParams = {
|
|
156
|
+
QueryString: sql,
|
|
157
|
+
WorkGroup: workgroup
|
|
158
|
+
};
|
|
159
|
+
if (outputLocation) {
|
|
160
|
+
startParams.ResultConfiguration = { OutputLocation: outputLocation };
|
|
161
|
+
}
|
|
162
|
+
const { QueryExecutionId } = await client.send(
|
|
163
|
+
new StartQueryExecutionCommand(startParams)
|
|
164
|
+
);
|
|
165
|
+
if (!QueryExecutionId) throw new Error("Athena: failed to start query execution");
|
|
166
|
+
while (true) {
|
|
167
|
+
const { QueryExecution } = await client.send(
|
|
168
|
+
new GetQueryExecutionCommand({ QueryExecutionId })
|
|
169
|
+
);
|
|
170
|
+
const state = QueryExecution?.Status?.State;
|
|
171
|
+
if (state === "SUCCEEDED") break;
|
|
172
|
+
if (state === "FAILED") {
|
|
173
|
+
throw new Error(
|
|
174
|
+
`Athena query failed: ${QueryExecution?.Status?.StateChangeReason ?? "unknown"}`
|
|
175
|
+
);
|
|
176
|
+
}
|
|
177
|
+
if (state === "CANCELLED") throw new Error("Athena query was cancelled");
|
|
178
|
+
await new Promise((r) => setTimeout(r, 500));
|
|
179
|
+
}
|
|
180
|
+
const { ResultSet } = await client.send(
|
|
181
|
+
new GetQueryResultsCommand({ QueryExecutionId })
|
|
182
|
+
);
|
|
183
|
+
const resultRows = ResultSet?.Rows ?? [];
|
|
184
|
+
if (resultRows.length === 0) return { rows: [] };
|
|
185
|
+
const headers = resultRows[0].Data?.map((d) => d.VarCharValue ?? "") ?? [];
|
|
186
|
+
const rows = resultRows.slice(1).map((row) => {
|
|
187
|
+
const obj = {};
|
|
188
|
+
row.Data?.forEach((d, i) => {
|
|
189
|
+
obj[headers[i]] = d.VarCharValue ?? null;
|
|
190
|
+
});
|
|
191
|
+
return obj;
|
|
192
|
+
});
|
|
193
|
+
return { rows };
|
|
194
|
+
}
|
|
195
|
+
};
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
// src/connector-client/redshift.ts
|
|
199
|
+
function createRedshiftClient(entry, connectionId) {
|
|
200
|
+
const region = resolveEnvVar(entry, "aws-region", connectionId);
|
|
201
|
+
const accessKeyId = resolveEnvVar(entry, "aws-access-key-id", connectionId);
|
|
202
|
+
const secretAccessKey = resolveEnvVar(entry, "aws-secret-access-key", connectionId);
|
|
203
|
+
const database = resolveEnvVar(entry, "database", connectionId);
|
|
204
|
+
const clusterIdentifier = resolveEnvVarOptional(entry, "cluster-identifier");
|
|
205
|
+
const workgroupName = resolveEnvVarOptional(entry, "workgroup-name");
|
|
206
|
+
const secretArn = resolveEnvVarOptional(entry, "secret-arn");
|
|
207
|
+
const dbUser = resolveEnvVarOptional(entry, "db-user");
|
|
208
|
+
return {
|
|
209
|
+
async query(sql) {
|
|
210
|
+
const {
|
|
211
|
+
RedshiftDataClient,
|
|
212
|
+
ExecuteStatementCommand,
|
|
213
|
+
DescribeStatementCommand,
|
|
214
|
+
GetStatementResultCommand
|
|
215
|
+
} = await import("@aws-sdk/client-redshift-data");
|
|
216
|
+
const client = new RedshiftDataClient({
|
|
217
|
+
region,
|
|
218
|
+
credentials: { accessKeyId, secretAccessKey }
|
|
219
|
+
});
|
|
220
|
+
const executeParams = {
|
|
221
|
+
Sql: sql,
|
|
222
|
+
Database: database
|
|
223
|
+
};
|
|
224
|
+
if (clusterIdentifier) executeParams.ClusterIdentifier = clusterIdentifier;
|
|
225
|
+
if (workgroupName) executeParams.WorkgroupName = workgroupName;
|
|
226
|
+
if (secretArn) executeParams.SecretArn = secretArn;
|
|
227
|
+
if (dbUser) executeParams.DbUser = dbUser;
|
|
228
|
+
const { Id } = await client.send(
|
|
229
|
+
new ExecuteStatementCommand(executeParams)
|
|
230
|
+
);
|
|
231
|
+
if (!Id) throw new Error("Redshift: failed to start statement execution");
|
|
232
|
+
while (true) {
|
|
233
|
+
const desc = await client.send(new DescribeStatementCommand({ Id }));
|
|
234
|
+
const status = desc.Status;
|
|
235
|
+
if (status === "FINISHED") break;
|
|
236
|
+
if (status === "FAILED") {
|
|
237
|
+
throw new Error(`Redshift query failed: ${desc.Error ?? "unknown"}`);
|
|
238
|
+
}
|
|
239
|
+
if (status === "ABORTED") throw new Error("Redshift query was aborted");
|
|
240
|
+
await new Promise((r) => setTimeout(r, 500));
|
|
241
|
+
}
|
|
242
|
+
const result = await client.send(new GetStatementResultCommand({ Id }));
|
|
243
|
+
const columns = result.ColumnMetadata?.map((c) => c.name ?? "") ?? [];
|
|
244
|
+
const rows = (result.Records ?? []).map((record) => {
|
|
245
|
+
const obj = {};
|
|
246
|
+
record.forEach((field, i) => {
|
|
247
|
+
const col = columns[i];
|
|
248
|
+
const value = field.stringValue ?? field.longValue ?? field.doubleValue ?? field.booleanValue ?? (field.isNull ? null : field.blobValue ?? null);
|
|
249
|
+
obj[col] = value;
|
|
250
|
+
});
|
|
251
|
+
return obj;
|
|
252
|
+
});
|
|
253
|
+
return { rows };
|
|
254
|
+
}
|
|
255
|
+
};
|
|
256
|
+
}
|
|
257
|
+
|
|
258
|
+
// src/connector-client/databricks.ts
|
|
259
|
+
function createDatabricksClient(entry, connectionId) {
|
|
260
|
+
const host = resolveEnvVar(entry, "host", connectionId);
|
|
261
|
+
const httpPath = resolveEnvVar(entry, "http-path", connectionId);
|
|
262
|
+
const token = resolveEnvVar(entry, "token", connectionId);
|
|
263
|
+
return {
|
|
264
|
+
async query(sql) {
|
|
265
|
+
const { DBSQLClient } = await import("@databricks/sql");
|
|
266
|
+
const client = new DBSQLClient();
|
|
267
|
+
await client.connect({ host, path: httpPath, token });
|
|
268
|
+
try {
|
|
269
|
+
const session = await client.openSession();
|
|
270
|
+
try {
|
|
271
|
+
const operation = await session.executeStatement(sql);
|
|
272
|
+
const result = await operation.fetchAll();
|
|
273
|
+
await operation.close();
|
|
274
|
+
return { rows: result };
|
|
275
|
+
} finally {
|
|
276
|
+
await session.close();
|
|
277
|
+
}
|
|
278
|
+
} finally {
|
|
279
|
+
await client.close();
|
|
280
|
+
}
|
|
281
|
+
}
|
|
282
|
+
};
|
|
283
|
+
}
|
|
284
|
+
|
|
108
285
|
// src/connector-client/registry.ts
|
|
109
286
|
function createConnectorRegistry() {
|
|
110
287
|
let connectionsCache = null;
|
|
@@ -112,7 +289,7 @@ function createConnectorRegistry() {
|
|
|
112
289
|
function getConnectionsFilePath() {
|
|
113
290
|
return process.env.CONNECTIONS_PATH ?? path.join(process.cwd(), ".squadbase/connections.json");
|
|
114
291
|
}
|
|
115
|
-
function
|
|
292
|
+
function loadConnections2() {
|
|
116
293
|
if (connectionsCache !== null) return connectionsCache;
|
|
117
294
|
const filePath = getConnectionsFilePath();
|
|
118
295
|
try {
|
|
@@ -124,7 +301,7 @@ function createConnectorRegistry() {
|
|
|
124
301
|
return connectionsCache;
|
|
125
302
|
}
|
|
126
303
|
async function getClient2(connectionId) {
|
|
127
|
-
const connections =
|
|
304
|
+
const connections = loadConnections2();
|
|
128
305
|
const entry = connections[connectionId];
|
|
129
306
|
if (!entry) {
|
|
130
307
|
throw new Error(`connection '${connectionId}' not found in .squadbase/connections.json`);
|
|
@@ -138,6 +315,20 @@ function createConnectorRegistry() {
|
|
|
138
315
|
if (connectorSlug === "bigquery") {
|
|
139
316
|
return { client: createBigQueryClient(entry, connectionId), connectorSlug };
|
|
140
317
|
}
|
|
318
|
+
if (connectorSlug === "athena") {
|
|
319
|
+
return { client: createAthenaClient(entry, connectionId), connectorSlug };
|
|
320
|
+
}
|
|
321
|
+
if (connectorSlug === "redshift") {
|
|
322
|
+
return { client: createRedshiftClient(entry, connectionId), connectorSlug };
|
|
323
|
+
}
|
|
324
|
+
if (connectorSlug === "databricks") {
|
|
325
|
+
return { client: createDatabricksClient(entry, connectionId), connectorSlug };
|
|
326
|
+
}
|
|
327
|
+
if (connectorSlug === "mysql") {
|
|
328
|
+
const client = createMySQLClient(entry, connectionId);
|
|
329
|
+
clientCache.set(connectionId, client);
|
|
330
|
+
return { client, connectorSlug };
|
|
331
|
+
}
|
|
141
332
|
if (connectorSlug === "postgresql" || connectorSlug === "squadbase-db") {
|
|
142
333
|
const urlEnvName = entry.envVars["connection-url"];
|
|
143
334
|
if (!urlEnvName) {
|
|
@@ -154,7 +345,7 @@ function createConnectorRegistry() {
|
|
|
154
345
|
return { client, connectorSlug };
|
|
155
346
|
}
|
|
156
347
|
throw new Error(
|
|
157
|
-
`connector type '${connectorSlug}' is not supported. Supported: "snowflake", "bigquery", "
|
|
348
|
+
`connector type '${connectorSlug}' is not supported as a SQL connector. Supported SQL types: "postgresql", "squadbase-db", "mysql", "snowflake", "bigquery", "athena", "redshift", "databricks". Non-SQL types (airtable, google-analytics, kintone, wix-store, dbt) should be used via TypeScript handlers.`
|
|
158
349
|
);
|
|
159
350
|
}
|
|
160
351
|
function reloadEnvFile2(envPath) {
|
|
@@ -186,11 +377,11 @@ function createConnectorRegistry() {
|
|
|
186
377
|
} catch {
|
|
187
378
|
}
|
|
188
379
|
}
|
|
189
|
-
return { getClient: getClient2, reloadEnvFile: reloadEnvFile2, watchConnectionsFile: watchConnectionsFile2 };
|
|
380
|
+
return { getClient: getClient2, loadConnections: loadConnections2, reloadEnvFile: reloadEnvFile2, watchConnectionsFile: watchConnectionsFile2 };
|
|
190
381
|
}
|
|
191
382
|
|
|
192
383
|
// src/connector-client/index.ts
|
|
193
|
-
var { getClient, reloadEnvFile, watchConnectionsFile } = createConnectorRegistry();
|
|
384
|
+
var { getClient, loadConnections, reloadEnvFile, watchConnectionsFile } = createConnectorRegistry();
|
|
194
385
|
|
|
195
386
|
// src/registry.ts
|
|
196
387
|
var viteServer = null;
|
|
@@ -211,6 +402,17 @@ var DEFAULT_EXCLUDE = [
|
|
|
211
402
|
/^\/node_modules\/.*/,
|
|
212
403
|
/^(?!\/api)/
|
|
213
404
|
];
|
|
405
|
+
function resolveEntry(entry) {
|
|
406
|
+
if (entry.startsWith(".") || entry.startsWith("/")) return entry;
|
|
407
|
+
try {
|
|
408
|
+
const resolvedUrl = import.meta.resolve(entry);
|
|
409
|
+
const absolutePath = fileURLToPath(resolvedUrl);
|
|
410
|
+
const relativePath = path3.relative(process.cwd(), absolutePath).replace(/\\/g, "/");
|
|
411
|
+
return "./" + relativePath;
|
|
412
|
+
} catch {
|
|
413
|
+
return entry;
|
|
414
|
+
}
|
|
415
|
+
}
|
|
214
416
|
function squadbasePlugin(options = {}) {
|
|
215
417
|
const {
|
|
216
418
|
buildEntry = "@squadbase/vite-server/main",
|
|
@@ -221,7 +423,7 @@ function squadbasePlugin(options = {}) {
|
|
|
221
423
|
} = options;
|
|
222
424
|
const isServerBuild = (_, { command, mode }) => command === "build" && mode !== "client";
|
|
223
425
|
const rawBuildPlugin = buildPlugin({
|
|
224
|
-
entry: buildEntry,
|
|
426
|
+
entry: resolveEntry(buildEntry),
|
|
225
427
|
outputDir: "./dist/server",
|
|
226
428
|
output: "index.js",
|
|
227
429
|
port,
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@squadbase/vite-server",
|
|
3
|
-
"version": "0.0.1-build-
|
|
3
|
+
"version": "0.0.1-build-8",
|
|
4
4
|
"type": "module",
|
|
5
5
|
"exports": {
|
|
6
6
|
".": {
|
|
@@ -32,14 +32,20 @@
|
|
|
32
32
|
"scripts": {
|
|
33
33
|
"cli": "tsx src/cli/index.ts",
|
|
34
34
|
"build": "tsup && pnpm run build:cli",
|
|
35
|
-
"build:cli": "tsup src/cli/index.ts --out-dir dist/cli --format esm --platform node --no-splitting --external pg --external snowflake-sdk --external @google-cloud/bigquery --external hono --external @clack/prompts"
|
|
35
|
+
"build:cli": "tsup src/cli/index.ts --out-dir dist/cli --format esm --platform node --no-splitting --external pg --external snowflake-sdk --external @google-cloud/bigquery --external mysql2 --external @aws-sdk/client-athena --external @aws-sdk/client-redshift-data --external @databricks/sql --external @google-analytics/data --external @kintone/rest-api-client --external hono --external @clack/prompts"
|
|
36
36
|
},
|
|
37
37
|
"dependencies": {
|
|
38
|
+
"@aws-sdk/client-athena": "^3.750.0",
|
|
39
|
+
"@aws-sdk/client-redshift-data": "^3.750.0",
|
|
40
|
+
"@databricks/sql": "^1.8.0",
|
|
41
|
+
"@google-analytics/data": "^4.8.0",
|
|
38
42
|
"@google-cloud/bigquery": "^7.9.4",
|
|
39
43
|
"@hono/node-server": "^1.19.9",
|
|
40
44
|
"@hono/vite-build": "^1.10.0",
|
|
41
45
|
"@hono/vite-dev-server": "^0.18.0",
|
|
46
|
+
"@kintone/rest-api-client": "^5.5.0",
|
|
42
47
|
"hono": "^4.11.9",
|
|
48
|
+
"mysql2": "^3.11.0",
|
|
43
49
|
"pg": "^8.18.0",
|
|
44
50
|
"snowflake-sdk": "^1.15.0"
|
|
45
51
|
},
|