postgresai 0.14.0-dev.82 → 0.14.0-dev.83

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/lib/checkup.ts CHANGED
@@ -1242,272 +1242,6 @@ async function generateF001(client: Client, nodeName: string): Promise<Report> {
1242
1242
  return report;
1243
1243
  }
1244
1244
 
1245
- /**
1246
- * Generate F004 report - Autovacuum: heap bloat estimate
1247
- *
1248
- * Estimates table bloat based on statistical analysis of table pages vs expected pages.
1249
- * Uses pg_stats for column statistics to estimate row sizes.
1250
- */
1251
- async function generateF004(client: Client, nodeName: string): Promise<Report> {
1252
- const report = createBaseReport("F004", "Autovacuum: heap bloat estimate", nodeName);
1253
- const postgresVersion = await getPostgresVersion(client);
1254
-
1255
- interface TableBloatRow {
1256
- schemaname: string;
1257
- tblname: string;
1258
- real_size_bytes: string;
1259
- real_size_pretty: string;
1260
- extra_size_bytes: string;
1261
- extra_size_pretty: string;
1262
- extra_pct: string;
1263
- fillfactor: string;
1264
- bloat_size_bytes: string;
1265
- bloat_size_pretty: string;
1266
- bloat_pct: string;
1267
- is_na: string;
1268
- }
1269
-
1270
- let bloatData: TableBloatRow[] = [];
1271
- let bloatError: string | null = null;
1272
-
1273
- try {
1274
- const result = await client.query<TableBloatRow>(`
1275
- select
1276
- schemaname,
1277
- tblname,
1278
- (bs * tblpages)::bigint as real_size_bytes,
1279
- pg_size_pretty((bs * tblpages)::bigint) as real_size_pretty,
1280
- ((tblpages - est_tblpages) * bs)::bigint as extra_size_bytes,
1281
- pg_size_pretty(((tblpages - est_tblpages) * bs)::bigint) as extra_size_pretty,
1282
- case when tblpages > 0 and tblpages - est_tblpages > 0
1283
- then round(100.0 * (tblpages - est_tblpages) / tblpages, 2)
1284
- else 0
1285
- end as extra_pct,
1286
- fillfactor,
1287
- case when tblpages - est_tblpages_ff > 0
1288
- then ((tblpages - est_tblpages_ff) * bs)::bigint
1289
- else 0
1290
- end as bloat_size_bytes,
1291
- pg_size_pretty(case when tblpages - est_tblpages_ff > 0
1292
- then ((tblpages - est_tblpages_ff) * bs)::bigint
1293
- else 0
1294
- end) as bloat_size_pretty,
1295
- case when tblpages > 0 and tblpages - est_tblpages_ff > 0
1296
- then round(100.0 * (tblpages - est_tblpages_ff) / tblpages, 2)
1297
- else 0
1298
- end as bloat_pct,
1299
- is_na::text
1300
- from (
1301
- select
1302
- ceil(reltuples / ((bs - page_hdr) / tpl_size)) + ceil(toasttuples / 4) as est_tblpages,
1303
- ceil(reltuples / ((bs - page_hdr) * fillfactor / (tpl_size * 100))) + ceil(toasttuples / 4) as est_tblpages_ff,
1304
- tblpages, fillfactor, bs, schemaname, tblname, is_na
1305
- from (
1306
- select
1307
- (4 + tpl_hdr_size + tpl_data_size + (2 * ma)
1308
- - case when tpl_hdr_size % ma = 0 then ma else tpl_hdr_size % ma end
1309
- - case when ceil(tpl_data_size)::int % ma = 0 then ma else ceil(tpl_data_size)::int % ma end
1310
- ) as tpl_size,
1311
- (heappages + toastpages) as tblpages,
1312
- reltuples, toasttuples, bs, page_hdr, schemaname, tblname, fillfactor, is_na
1313
- from (
1314
- select
1315
- ns.nspname as schemaname,
1316
- tbl.relname as tblname,
1317
- tbl.reltuples,
1318
- tbl.relpages as heappages,
1319
- coalesce(toast.relpages, 0) as toastpages,
1320
- coalesce(toast.reltuples, 0) as toasttuples,
1321
- coalesce(substring(array_to_string(tbl.reloptions, ' ') from 'fillfactor=([0-9]+)')::smallint, 100) as fillfactor,
1322
- current_setting('block_size')::numeric as bs,
1323
- case when version() ~ 'mingw32' or version() ~ '64-bit|x86_64|ppc64|ia64|amd64' then 8 else 4 end as ma,
1324
- 24 as page_hdr,
1325
- 23 + case when max(coalesce(s.null_frac, 0)) > 0 then (7 + count(s.attname)) / 8 else 0::int end
1326
- + case when bool_or(att.attname = 'oid' and att.attnum < 0) then 4 else 0 end as tpl_hdr_size,
1327
- sum((1 - coalesce(s.null_frac, 0)) * coalesce(s.avg_width, 0)) as tpl_data_size,
1328
- (bool_or(att.atttypid = 'pg_catalog.name'::regtype)
1329
- or sum(case when att.attnum > 0 then 1 else 0 end) <> count(s.attname))::int as is_na
1330
- from pg_attribute as att
1331
- join pg_class as tbl on att.attrelid = tbl.oid
1332
- join pg_namespace as ns on ns.oid = tbl.relnamespace
1333
- left join pg_stats as s on s.schemaname = ns.nspname
1334
- and s.tablename = tbl.relname and s.attname = att.attname
1335
- left join pg_class as toast on tbl.reltoastrelid = toast.oid
1336
- where not att.attisdropped
1337
- and tbl.relkind in ('r', 'm')
1338
- and ns.nspname not in ('pg_catalog', 'information_schema', 'pg_toast')
1339
- group by ns.nspname, tbl.relname, tbl.reltuples, tbl.relpages, toast.relpages, toast.reltuples, tbl.reloptions
1340
- ) as s
1341
- ) as s2
1342
- ) as s3
1343
- where tblpages > 0 and (bs * tblpages) >= 1024 * 1024 -- exclude tables < 1 MiB
1344
- order by bloat_size_bytes desc
1345
- limit 100
1346
- `);
1347
- bloatData = result.rows;
1348
- } catch (err) {
1349
- const errorMsg = err instanceof Error ? err.message : String(err);
1350
- console.log(`[F004] Error estimating table bloat: ${errorMsg}`);
1351
- bloatError = errorMsg;
1352
- }
1353
-
1354
- report.results[nodeName] = {
1355
- data: {
1356
- tables: bloatData,
1357
- ...(bloatError && { error: bloatError }),
1358
- },
1359
- postgres_version: postgresVersion,
1360
- };
1361
-
1362
- return report;
1363
- }
1364
-
1365
- /**
1366
- * Generate F005 report - Autovacuum: index bloat estimate
1367
- *
1368
- * Estimates B-tree index bloat based on statistical analysis of index pages vs expected pages.
1369
- */
1370
- async function generateF005(client: Client, nodeName: string): Promise<Report> {
1371
- const report = createBaseReport("F005", "Autovacuum: index bloat estimate", nodeName);
1372
- const postgresVersion = await getPostgresVersion(client);
1373
-
1374
- interface IndexBloatRow {
1375
- schemaname: string;
1376
- tblname: string;
1377
- idxname: string;
1378
- real_size_bytes: string;
1379
- real_size_pretty: string;
1380
- table_size_bytes: string;
1381
- table_size_pretty: string;
1382
- extra_size_bytes: string;
1383
- extra_size_pretty: string;
1384
- extra_pct: string;
1385
- fillfactor: string;
1386
- bloat_size_bytes: string;
1387
- bloat_size_pretty: string;
1388
- bloat_pct: string;
1389
- is_na: string;
1390
- }
1391
-
1392
- let bloatData: IndexBloatRow[] = [];
1393
- let bloatError: string | null = null;
1394
-
1395
- try {
1396
- const result = await client.query<IndexBloatRow>(`
1397
- select
1398
- nspname as schemaname,
1399
- tblname,
1400
- idxname,
1401
- (bs * relpages)::bigint as real_size_bytes,
1402
- pg_size_pretty((bs * relpages)::bigint) as real_size_pretty,
1403
- pg_relation_size(tbloid)::bigint as table_size_bytes,
1404
- pg_size_pretty(pg_relation_size(tbloid)) as table_size_pretty,
1405
- ((relpages - est_pages) * bs)::bigint as extra_size_bytes,
1406
- pg_size_pretty(((relpages - est_pages) * bs)::bigint) as extra_size_pretty,
1407
- round(100.0 * (relpages - est_pages) / relpages, 2) as extra_pct,
1408
- fillfactor,
1409
- case when relpages > est_pages_ff
1410
- then ((relpages - est_pages_ff) * bs)::bigint
1411
- else 0
1412
- end as bloat_size_bytes,
1413
- pg_size_pretty(case when relpages > est_pages_ff
1414
- then ((relpages - est_pages_ff) * bs)::bigint
1415
- else 0
1416
- end) as bloat_size_pretty,
1417
- case when relpages > est_pages_ff
1418
- then round(100.0 * (relpages - est_pages_ff) / relpages, 2)
1419
- else 0
1420
- end as bloat_pct,
1421
- is_na::text
1422
- from (
1423
- select
1424
- coalesce(1 + ceil(reltuples / floor((bs - pageopqdata - pagehdr) / (4 + nulldatahdrwidth)::float)), 0) as est_pages,
1425
- coalesce(1 + ceil(reltuples / floor((bs - pageopqdata - pagehdr) * fillfactor / (100 * (4 + nulldatahdrwidth)::float))), 0) as est_pages_ff,
1426
- bs, nspname, tblname, idxname, relpages, fillfactor, is_na, tbloid
1427
- from (
1428
- select
1429
- maxalign, bs, nspname, tblname, idxname, reltuples, relpages, idxoid, fillfactor, tbloid,
1430
- (index_tuple_hdr_bm + maxalign
1431
- - case when index_tuple_hdr_bm % maxalign = 0 then maxalign else index_tuple_hdr_bm % maxalign end
1432
- + nulldatawidth + maxalign
1433
- - case when nulldatawidth = 0 then 0
1434
- when nulldatawidth::integer % maxalign = 0 then maxalign
1435
- else nulldatawidth::integer % maxalign end
1436
- )::numeric as nulldatahdrwidth,
1437
- pagehdr, pageopqdata, is_na
1438
- from (
1439
- select
1440
- n.nspname, i.tblname, i.idxname, i.reltuples, i.relpages, i.tbloid, i.idxoid, i.fillfactor,
1441
- current_setting('block_size')::numeric as bs,
1442
- case when version() ~ 'mingw32' or version() ~ '64-bit|x86_64|ppc64|ia64|amd64' then 8 else 4 end as maxalign,
1443
- 24 as pagehdr,
1444
- 16 as pageopqdata,
1445
- case when max(coalesce(s.null_frac, 0)) = 0
1446
- then 8
1447
- else 8 + ((32 + 8 - 1) / 8)
1448
- end as index_tuple_hdr_bm,
1449
- sum((1 - coalesce(s.null_frac, 0)) * coalesce(s.avg_width, 1024)) as nulldatawidth,
1450
- (max(case when i.atttypid = 'pg_catalog.name'::regtype then 1 else 0 end) > 0)::int as is_na
1451
- from (
1452
- select
1453
- ct.relname as tblname, ct.relnamespace, ic.idxname, ic.attpos, ic.indkey,
1454
- ic.indkey[ic.attpos], ic.reltuples, ic.relpages, ic.tbloid, ic.idxoid, ic.fillfactor,
1455
- coalesce(a1.attnum, a2.attnum) as attnum,
1456
- coalesce(a1.attname, a2.attname) as attname,
1457
- coalesce(a1.atttypid, a2.atttypid) as atttypid,
1458
- case when a1.attnum is null then ic.idxname else ct.relname end as attrelname
1459
- from (
1460
- select
1461
- idxname, reltuples, relpages, tbloid, idxoid, fillfactor,
1462
- indkey, generate_subscripts(indkey, 1) as attpos
1463
- from (
1464
- select
1465
- ci.relname as idxname, ci.reltuples, ci.relpages, i.indrelid as tbloid,
1466
- i.indexrelid as idxoid,
1467
- coalesce(substring(array_to_string(ci.reloptions, ' ') from 'fillfactor=([0-9]+)')::smallint, 90) as fillfactor,
1468
- i.indkey
1469
- from pg_index as i
1470
- join pg_class as ci on ci.oid = i.indexrelid
1471
- join pg_namespace as ns on ns.oid = ci.relnamespace
1472
- join pg_am as am on am.oid = ci.relam
1473
- where am.amname = 'btree'
1474
- and ci.relpages > 0
1475
- and ns.nspname not in ('pg_catalog', 'information_schema', 'pg_toast')
1476
- ) as idx_data
1477
- ) as ic
1478
- join pg_class as ct on ct.oid = ic.tbloid
1479
- left join pg_attribute as a1 on a1.attrelid = ic.idxoid and a1.attnum = ic.indkey[ic.attpos]
1480
- left join pg_attribute as a2 on a2.attrelid = ic.tbloid and a2.attnum = ic.indkey[ic.attpos]
1481
- ) as i
1482
- join pg_namespace as n on n.oid = i.relnamespace
1483
- left join pg_stats as s on s.schemaname = n.nspname
1484
- and s.tablename = i.attrelname and s.attname = i.attname
1485
- group by n.nspname, i.tblname, i.idxname, i.reltuples, i.relpages, i.tbloid, i.idxoid, i.fillfactor
1486
- ) as rows_data_stats
1487
- ) as rows_hdr_pdg_stats
1488
- ) as relation_stats
1489
- where relpages > 0 and (bs * relpages) >= 1024 * 1024 -- exclude indexes < 1 MiB
1490
- order by bloat_size_bytes desc
1491
- limit 100
1492
- `);
1493
- bloatData = result.rows;
1494
- } catch (err) {
1495
- const errorMsg = err instanceof Error ? err.message : String(err);
1496
- console.log(`[F005] Error estimating index bloat: ${errorMsg}`);
1497
- bloatError = errorMsg;
1498
- }
1499
-
1500
- report.results[nodeName] = {
1501
- data: {
1502
- indexes: bloatData,
1503
- ...(bloatError && { error: bloatError }),
1504
- },
1505
- postgres_version: postgresVersion,
1506
- };
1507
-
1508
- return report;
1509
- }
1510
-
1511
1245
  /**
1512
1246
  * Generate G001 report - Memory-related settings
1513
1247
  */
@@ -1712,8 +1446,6 @@ export const REPORT_GENERATORS: Record<string, (client: Client, nodeName: string
1712
1446
  D001: generateD001,
1713
1447
  D004: generateD004,
1714
1448
  F001: generateF001,
1715
- F004: generateF004,
1716
- F005: generateF005,
1717
1449
  G001: generateG001,
1718
1450
  G003: generateG003,
1719
1451
  H001: generateH001,
package/lib/mcp-server.ts CHANGED
@@ -447,7 +447,7 @@ export async function startMcpServer(rootOpts?: RootOptsLike, extra?: { debug?:
447
447
  });
448
448
 
449
449
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
450
- server.setRequestHandler(CallToolRequestSchema, async (req: any) => {
450
+ server.setRequestHandler(CallToolRequestSchema, async (req: any): Promise<any> => {
451
451
  return handleToolCall(req, rootOpts, extra);
452
452
  });
453
453
 
package/lib/supabase.ts CHANGED
@@ -337,9 +337,14 @@ export class SupabaseClient {
337
337
  * Fetch the database pooler connection string from Supabase Management API.
338
338
  * Returns a postgresql:// URL with the specified username but no password.
339
339
  *
340
+ * Note: The username will be automatically suffixed with `.<projectRef>` if not
341
+ * already present, as required by Supabase pooler connections.
342
+ *
340
343
  * @param config Supabase configuration with projectRef and accessToken
341
- * @param username Username to include in the URL (e.g., monitoring user)
342
- * @returns Database URL without password (e.g., "postgresql://user@host:port/postgres")
344
+ * @param username Username to include in the URL (e.g., monitoring user).
345
+ * Will be transformed to `<username>.<projectRef>` format.
346
+ * @returns Database URL without password (e.g., "postgresql://user.project@host:port/postgres"),
347
+ * or null if the API call fails or returns no pooler config.
343
348
  */
344
349
  export async function fetchPoolerDatabaseUrl(
345
350
  config: SupabaseConfig,
@@ -347,6 +352,14 @@ export async function fetchPoolerDatabaseUrl(
347
352
  ): Promise<string | null> {
348
353
  const url = `${SUPABASE_API_BASE}/v1/projects/${encodeURIComponent(config.projectRef)}/config/database/pooler`;
349
354
 
355
+ // For Supabase pooler connections, the username must include the project ref:
356
+ // <user>.<project_ref>
357
+ // Example:
358
+ // postgresql://postgres_ai_mon.xhaqmsvczjkkvkgdyast@aws-1-eu-west-1.pooler.supabase.com:6543/postgres
359
+ const suffix = `.${config.projectRef}`;
360
+ const effectiveUsername = username.endsWith(suffix) ? username : `${username}${suffix}`;
361
+ // URL-encode the username to handle special characters safely
362
+ const encodedUsername = encodeURIComponent(effectiveUsername);
350
363
  try {
351
364
  const response = await fetch(url, {
352
365
  method: "GET",
@@ -367,7 +380,7 @@ export async function fetchPoolerDatabaseUrl(
367
380
  const pooler = data[0];
368
381
  // Build URL from components if available
369
382
  if (pooler.db_host && pooler.db_port && pooler.db_name) {
370
- return `postgresql://${username}@${pooler.db_host}:${pooler.db_port}/${pooler.db_name}`;
383
+ return `postgresql://${encodedUsername}@${pooler.db_host}:${pooler.db_port}/${pooler.db_name}`;
371
384
  }
372
385
  // Fallback: try to extract from connection_string if present
373
386
  if (typeof pooler.connection_string === "string") {
@@ -375,7 +388,7 @@ export async function fetchPoolerDatabaseUrl(
375
388
  const connUrl = new URL(pooler.connection_string);
376
389
  // Use provided username; handle empty port for default ports (e.g., 5432)
377
390
  const portPart = connUrl.port ? `:${connUrl.port}` : "";
378
- return `postgresql://${username}@${connUrl.hostname}${portPart}${connUrl.pathname}`;
391
+ return `postgresql://${encodedUsername}@${connUrl.hostname}${portPart}${connUrl.pathname}`;
379
392
  } catch {
380
393
  return null;
381
394
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "postgresai",
3
- "version": "0.14.0-dev.82",
3
+ "version": "0.14.0-dev.83",
4
4
  "description": "postgres_ai CLI",
5
5
  "license": "Apache-2.0",
6
6
  "private": false,
@@ -45,12 +45,12 @@
45
45
  "pg": "^8.16.3"
46
46
  },
47
47
  "devDependencies": {
48
- "@types/bun": "^1.1.14",
48
+ "@types/bun": "^1.3.6",
49
49
  "@types/js-yaml": "^4.0.9",
50
50
  "@types/pg": "^8.15.6",
51
51
  "ajv": "^8.17.1",
52
52
  "ajv-formats": "^3.0.1",
53
- "typescript": "^5.3.3"
53
+ "typescript": "^5.9.3"
54
54
  },
55
55
  "publishConfig": {
56
56
  "access": "public"
@@ -1,5 +1,6 @@
1
1
  import { describe, test, expect } from "bun:test";
2
2
  import { resolve } from "path";
3
+ import type { Client } from "pg";
3
4
 
4
5
  // Import from source directly since we're using Bun
5
6
  import * as checkup from "../lib/checkup";
@@ -86,7 +87,7 @@ describe("createBaseReport", () => {
86
87
  // Tests for CHECK_INFO
87
88
  describe("CHECK_INFO and REPORT_GENERATORS", () => {
88
89
  // Express-mode checks that have generators
89
- const expressCheckIds = ["A002", "A003", "A004", "A007", "A013", "D001", "D004", "F001", "F004", "F005", "G001", "G003", "H001", "H002", "H004"];
90
+ const expressCheckIds = ["A002", "A003", "A004", "A007", "A013", "D001", "D004", "F001", "G001", "G003", "H001", "H002", "H004"];
90
91
 
91
92
  test("CHECK_INFO contains all express-mode checks", () => {
92
93
  for (const checkId of expressCheckIds) {
@@ -947,6 +948,66 @@ describe("CLI tests", () => {
947
948
  expect(r.stdout).toMatch(/available checks/i);
948
949
  expect(r.stdout).toMatch(/A002/);
949
950
  });
951
+
952
+ test("checkup --help shows --upload and --no-upload options", () => {
953
+ const r = runCli(["checkup", "--help"]);
954
+ expect(r.status).toBe(0);
955
+ expect(r.stdout).toMatch(/--upload/);
956
+ expect(r.stdout).toMatch(/--no-upload/);
957
+ });
958
+
959
+ test("checkup --no-upload is recognized as valid option", () => {
960
+ // Should not produce "unknown option" error for --no-upload
961
+ const r = runCli(["checkup", "postgresql://test:test@localhost:5432/test", "--no-upload"]);
962
+ // Connection will fail, but option parsing should succeed
963
+ expect(r.stderr).not.toMatch(/unknown option/i);
964
+ expect(r.stderr).not.toMatch(/did you mean/i);
965
+ });
966
+
967
+ test("checkup --upload is recognized as valid option", () => {
968
+ // Should not produce "unknown option" error for --upload
969
+ const r = runCli(["checkup", "postgresql://test:test@localhost:5432/test", "--upload"]);
970
+ // Connection will fail, but option parsing should succeed
971
+ expect(r.stderr).not.toMatch(/unknown option/i);
972
+ expect(r.stderr).not.toMatch(/did you mean/i);
973
+ });
974
+
975
+ test("checkup --json does not imply --no-upload (decoupled behavior)", () => {
976
+ // Use empty config dir to ensure no API key is configured
977
+ const env = { XDG_CONFIG_HOME: "/tmp/postgresai-test-empty-config" };
978
+ // --json alone should NOT disable upload - when --upload is explicitly requested
979
+ // with --json, it should require API key (proving upload is not disabled)
980
+ const r = runCli(["checkup", "postgresql://test:test@localhost:5432/test", "--json", "--upload"], env);
981
+ // Should fail with "API key is required" because upload is enabled
982
+ expect(r.stderr).toMatch(/API key is required/i);
983
+ expect(r.stderr).not.toMatch(/unknown option/i);
984
+ });
985
+
986
+ test("checkup --json --no-upload explicitly disables upload", () => {
987
+ // Use empty config dir to ensure no API key is configured
988
+ const env = { XDG_CONFIG_HOME: "/tmp/postgresai-test-empty-config" };
989
+ // --json with --no-upload should disable upload (no API key error)
990
+ const r = runCli(["checkup", "postgresql://test:test@localhost:5432/test", "--json", "--no-upload"], env);
991
+ // Should NOT show "API key is required" because upload is disabled
992
+ expect(r.stderr).not.toMatch(/API key is required/i);
993
+ expect(r.stderr).not.toMatch(/unknown option/i);
994
+ });
995
+
996
+ test("checkup --upload requires API key", () => {
997
+ // Use empty config dir to ensure no API key is configured
998
+ const env = { XDG_CONFIG_HOME: "/tmp/postgresai-test-empty-config" };
999
+ // --upload explicitly requests upload, should fail without API key
1000
+ const r = runCli(["checkup", "postgresql://test:test@localhost:5432/test", "--upload"], env);
1001
+ expect(r.stderr).toMatch(/API key is required/i);
1002
+ });
1003
+
1004
+ test("checkup --no-upload does not require API key", () => {
1005
+ // Use empty config dir to ensure no API key is configured
1006
+ const env = { XDG_CONFIG_HOME: "/tmp/postgresai-test-empty-config" };
1007
+ // --no-upload disables upload, should not require API key
1008
+ const r = runCli(["checkup", "postgresql://test:test@localhost:5432/test", "--no-upload"], env);
1009
+ expect(r.stderr).not.toMatch(/API key is required/i);
1010
+ });
950
1011
  });
951
1012
 
952
1013
  // Tests for checkup-api module
package/test/init.test.ts CHANGED
@@ -68,9 +68,9 @@ describe("init module", () => {
68
68
  expect(plan.database).toBe("mydb");
69
69
  const roleStep = plan.steps.find((s: { name: string }) => s.name === "01.role");
70
70
  expect(roleStep).toBeTruthy();
71
- expect(roleStep.sql).toMatch(/do\s+\$\$/i);
72
- expect(roleStep.sql).toMatch(/create\s+user/i);
73
- expect(roleStep.sql).toMatch(/alter\s+user/i);
71
+ expect(roleStep!.sql).toMatch(/do\s+\$\$/i);
72
+ expect(roleStep!.sql).toMatch(/create\s+user/i);
73
+ expect(roleStep!.sql).toMatch(/alter\s+user/i);
74
74
  expect(plan.steps.some((s: { optional?: boolean }) => s.optional)).toBe(false);
75
75
  });
76
76
 
@@ -86,12 +86,12 @@ describe("init module", () => {
86
86
 
87
87
  const roleStep = plan.steps.find((s: { name: string }) => s.name === "01.role");
88
88
  expect(roleStep).toBeTruthy();
89
- expect(roleStep.sql).toMatch(/create\s+user\s+"user ""with"" quotes ✓"/i);
90
- expect(roleStep.sql).toMatch(/alter\s+user\s+"user ""with"" quotes ✓"/i);
89
+ expect(roleStep!.sql).toMatch(/create\s+user\s+"user ""with"" quotes ✓"/i);
90
+ expect(roleStep!.sql).toMatch(/alter\s+user\s+"user ""with"" quotes ✓"/i);
91
91
 
92
92
  const permStep = plan.steps.find((s: { name: string }) => s.name === "03.permissions");
93
93
  expect(permStep).toBeTruthy();
94
- expect(permStep.sql).toMatch(/grant connect on database "db name ""with"" quotes ✓" to "user ""with"" quotes ✓"/i);
94
+ expect(permStep!.sql).toMatch(/grant connect on database "db name ""with"" quotes ✓" to "user ""with"" quotes ✓"/i);
95
95
  });
96
96
 
97
97
  test("buildInitPlan keeps backslashes in passwords (no unintended escaping)", async () => {
@@ -104,7 +104,7 @@ describe("init module", () => {
104
104
  });
105
105
  const roleStep = plan.steps.find((s: { name: string }) => s.name === "01.role");
106
106
  expect(roleStep).toBeTruthy();
107
- expect(roleStep.sql).toContain(`password '${pw}'`);
107
+ expect(roleStep!.sql).toContain(`password '${pw}'`);
108
108
  });
109
109
 
110
110
  test("buildInitPlan rejects identifiers with null bytes", async () => {
@@ -138,8 +138,8 @@ describe("init module", () => {
138
138
  });
139
139
  const step = plan.steps.find((s: { name: string }) => s.name === "01.role");
140
140
  expect(step).toBeTruthy();
141
- expect(step.sql).toMatch(/password 'pa''ss'/);
142
- expect(step.params).toBeUndefined();
141
+ expect(step!.sql).toMatch(/password 'pa''ss'/);
142
+ expect(step!.params).toBeUndefined();
143
143
  });
144
144
 
145
145
  test("buildInitPlan includes optional steps when enabled", async () => {
@@ -420,7 +420,7 @@ describe("init module", () => {
420
420
  });
421
421
  const step = plan.steps.find((s: { name: string }) => s.name === "01.role");
422
422
  expect(step).toBeTruthy();
423
- const redacted = init.redactPasswordsInSql(step.sql);
423
+ const redacted = init.redactPasswordsInSql(step!.sql);
424
424
  expect(redacted).toMatch(/password '<redacted>'/i);
425
425
  });
426
426
 
@@ -60,7 +60,7 @@ describe("createIssue", () => {
60
60
  headers: { "Content-Type": "application/json" },
61
61
  })
62
62
  )
63
- );
63
+ ) as unknown as typeof fetch;
64
64
 
65
65
  const result = await createIssue({
66
66
  apiKey: "test-key",
@@ -93,7 +93,7 @@ describe("createIssue", () => {
93
93
  headers: { "Content-Type": "application/json" },
94
94
  })
95
95
  );
96
- });
96
+ }) as unknown as typeof fetch;
97
97
 
98
98
  const result = await createIssue({
99
99
  apiKey: "test-key",
@@ -127,7 +127,7 @@ describe("createIssue", () => {
127
127
  headers: { "Content-Type": "application/json" },
128
128
  })
129
129
  )
130
- );
130
+ ) as unknown as typeof fetch;
131
131
 
132
132
  await expect(
133
133
  createIssue({
@@ -194,7 +194,7 @@ describe("updateIssue", () => {
194
194
  headers: { "Content-Type": "application/json" },
195
195
  })
196
196
  )
197
- );
197
+ ) as unknown as typeof fetch;
198
198
 
199
199
  const result = await updateIssue({
200
200
  apiKey: "test-key",
@@ -223,7 +223,7 @@ describe("updateIssue", () => {
223
223
  headers: { "Content-Type": "application/json" },
224
224
  })
225
225
  )
226
- );
226
+ ) as unknown as typeof fetch;
227
227
 
228
228
  const result = await updateIssue({
229
229
  apiKey: "test-key",
@@ -252,7 +252,7 @@ describe("updateIssue", () => {
252
252
  headers: { "Content-Type": "application/json" },
253
253
  })
254
254
  )
255
- );
255
+ ) as unknown as typeof fetch;
256
256
 
257
257
  const result = await updateIssue({
258
258
  apiKey: "test-key",
@@ -281,7 +281,7 @@ describe("updateIssue", () => {
281
281
  headers: { "Content-Type": "application/json" },
282
282
  })
283
283
  )
284
- );
284
+ ) as unknown as typeof fetch;
285
285
 
286
286
  const result = await updateIssue({
287
287
  apiKey: "test-key",
@@ -313,7 +313,7 @@ describe("updateIssue", () => {
313
313
  headers: { "Content-Type": "application/json" },
314
314
  })
315
315
  );
316
- });
316
+ }) as unknown as typeof fetch;
317
317
 
318
318
  await updateIssue({
319
319
  apiKey: "test-key",
@@ -345,7 +345,7 @@ describe("updateIssue", () => {
345
345
  headers: { "Content-Type": "application/json" },
346
346
  })
347
347
  )
348
- );
348
+ ) as unknown as typeof fetch;
349
349
 
350
350
  await expect(
351
351
  updateIssue({
@@ -414,7 +414,7 @@ describe("updateIssueComment", () => {
414
414
  headers: { "Content-Type": "application/json" },
415
415
  })
416
416
  );
417
- });
417
+ }) as unknown as typeof fetch;
418
418
 
419
419
  const result = await updateIssueComment({
420
420
  apiKey: "test-key",
@@ -442,7 +442,7 @@ describe("updateIssueComment", () => {
442
442
  headers: { "Content-Type": "application/json" },
443
443
  })
444
444
  )
445
- );
445
+ ) as unknown as typeof fetch;
446
446
 
447
447
  await expect(
448
448
  updateIssueComment({