postgresai 0.15.0-dev.7 → 0.15.0-dev.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,14 @@
1
+ # Demo PostgreSQL instance for `pgai mon local-install --demo`
2
+ # This file is copied to instances.yml during demo mode setup.
3
+
4
+ - name: target_database
5
+ conn_str: postgresql://monitor:monitor_pass@target-db:5432/target_database
6
+ preset_metrics: full
7
+ custom_metrics:
8
+ is_enabled: true
9
+ group: default
10
+ custom_tags:
11
+ env: demo
12
+ cluster: local
13
+ node_name: node-01
14
+ sink_type: ~sink_type~ # sed token substituted by generate-pgwatch-sources.sh; values: postgres, prometheus
@@ -1,3 +1,4 @@
1
+ import * as http from "http";
1
2
  import * as https from "https";
2
3
  import { URL } from "url";
3
4
  import { normalizeBaseUrl } from "./util";
@@ -27,7 +28,7 @@ function isRetryableError(err: unknown): boolean {
27
28
  // Retry on server errors (5xx), not on client errors (4xx)
28
29
  return err.statusCode >= 500 && err.statusCode < 600;
29
30
  }
30
-
31
+
31
32
  // Check for Node.js error codes (works on Error and Error-like objects)
32
33
  if (typeof err === "object" && err !== null && "code" in err) {
33
34
  const code = String((err as { code: unknown }).code);
@@ -35,7 +36,7 @@ function isRetryableError(err: unknown): boolean {
35
36
  return true;
36
37
  }
37
38
  }
38
-
39
+
39
40
  if (err instanceof Error) {
40
41
  const msg = err.message.toLowerCase();
41
42
  // Retry on network-related errors based on message content
@@ -49,7 +50,7 @@ function isRetryableError(err: unknown): boolean {
49
50
  msg.includes("network")
50
51
  );
51
52
  }
52
-
53
+
53
54
  return false;
54
55
  }
55
56
 
@@ -226,7 +227,25 @@ async function postRpc<T>(params: {
226
227
  resolve(value);
227
228
  };
228
229
 
229
- const req = https.request(
230
+ // Transport is picked from the URL protocol so the CLI can talk to a
231
+ // local-dev PostgREST over plain HTTP. Production URLs are always HTTPS;
232
+ // to guard against typos (e.g. a missing 's' in 'https://') silently
233
+ // leaking the API key in cleartext, refuse HTTP to non-loopback hosts
234
+ // unless the operator explicitly opts in via CHECKUP_ALLOW_HTTP=1.
235
+ if (url.protocol === "http:") {
236
+ // WHATWG URL keeps IPv6 literals bracketed in .hostname
237
+ // (e.g. `[::1]`), so strip the brackets before matching the allowlist.
238
+ const hostname = url.hostname.replace(/^\[|\]$/g, "");
239
+ const isLoopback = ["localhost", "127.0.0.1", "::1"].includes(hostname);
240
+ if (!isLoopback && process.env.CHECKUP_ALLOW_HTTP !== "1") {
241
+ throw new Error(
242
+ `Refusing to send API key over plaintext HTTP to '${url.host}'. ` +
243
+ `Use https://, a loopback hostname, or set CHECKUP_ALLOW_HTTP=1.`
244
+ );
245
+ }
246
+ }
247
+ const transport = url.protocol === "http:" ? http : https;
248
+ const req = transport.request(
230
249
  url,
231
250
  {
232
251
  method: "POST",
@@ -277,7 +296,7 @@ async function postRpc<T>(params: {
277
296
  req.destroy(); // Backup: ensure request is terminated
278
297
  settledReject(new Error(`RPC ${rpcName} timed out after ${timeoutMs}ms (no response)`));
279
298
  }, timeoutMs);
280
-
299
+
281
300
  req.on("error", (err: Error) => {
282
301
  // Handle abort as timeout (may already be rejected by timeout handler)
283
302
  if (err.name === "AbortError" || (err as any).code === "ABORT_ERR") {
@@ -295,7 +314,7 @@ async function postRpc<T>(params: {
295
314
  settledReject(err);
296
315
  }
297
316
  });
298
-
317
+
299
318
  req.write(body);
300
319
  req.end();
301
320
  });
package/lib/checkup.ts CHANGED
@@ -243,6 +243,50 @@ export interface RedundantIndex {
243
243
  redundant_to_parse_error?: string;
244
244
  }
245
245
 
246
+ /**
247
+ * I/O statistics by backend type (I001) - matches I001.schema.json backendIOStats
248
+ */
249
+ export interface BackendIOStats {
250
+ backend_type: string;
251
+ reads: number;
252
+ /** Read MiB. The historical `_mb` suffix is retained for schema compatibility. */
253
+ read_bytes_mb: number;
254
+ read_time_ms: number;
255
+ writes: number;
256
+ /** Written MiB. The historical `_mb` suffix is retained for schema compatibility. */
257
+ write_bytes_mb: number;
258
+ write_time_ms: number;
259
+ writebacks: number;
260
+ /** Writeback MiB. The historical `_mb` suffix is retained for schema compatibility. */
261
+ writeback_bytes_mb: number;
262
+ writeback_time_ms: number;
263
+ fsyncs: number;
264
+ fsync_time_ms: number;
265
+ /** Relation extension operations reported by pg_stat_io for PostgreSQL 16+. */
266
+ extends?: number;
267
+ /** Extended MiB; PG16 derives extends * op_bytes, PG18+ uses native extend_bytes. */
268
+ extend_bytes_mb?: number;
269
+ hits: number;
270
+ evictions: number;
271
+ reuses: number;
272
+ }
273
+
274
+ /**
275
+ * I/O statistics analysis summary (I001)
276
+ */
277
+ export interface IOAnalysis {
278
+ total_read_mb: number;
279
+ total_write_mb: number;
280
+ /** read_time_ms + write_time_ms across backends. Excludes writeback and fsync time. */
281
+ total_io_time_ms: number;
282
+ /** Buffer hit ratio: hits / (hits + reads) * 100. */
283
+ read_hit_ratio_pct: number;
284
+ /** Average read latency, or null when there are no reads. */
285
+ avg_read_time_ms: number | null;
286
+ /** Average write latency, or null when there are no writes. */
287
+ avg_write_time_ms: number | null;
288
+ }
289
+
246
290
  /**
247
291
  * Node result for reports
248
292
  */
@@ -1664,6 +1708,186 @@ async function generateG003(client: Client, nodeName: string): Promise<Report> {
1664
1708
  return report;
1665
1709
  }
1666
1710
 
1711
+ /**
1712
+ * Get I/O statistics from pg_stat_io (PostgreSQL 16+).
1713
+ * Uses 'pg_stat_io' metric from metrics.yml.
1714
+ *
1715
+ * @param client - Connected PostgreSQL client
1716
+ * @param pgMajorVersion - PostgreSQL major version; defaults to 0 so omitted versions return unavailable
1717
+ * @param metricSqlOverride - Optional SQL override; empty or placeholder SQL returns [] without querying
1718
+ * @returns Array of I/O stats by backend type, or empty array if unavailable
1719
+ */
1720
+ export async function getIOStatistics(
1721
+ client: Client,
1722
+ pgMajorVersion: number = 0,
1723
+ metricSqlOverride?: string
1724
+ ): Promise<BackendIOStats[]> {
1725
+ // pg_stat_io requires PostgreSQL 16+
1726
+ if (pgMajorVersion < 16) {
1727
+ return [];
1728
+ }
1729
+
1730
+ try {
1731
+ const sql = metricSqlOverride ?? getMetricSql(METRIC_NAMES.I001, pgMajorVersion);
1732
+ // Skip if metric returns empty/placeholder SQL
1733
+ if (!sql || sql.trim().startsWith(";")) {
1734
+ return [];
1735
+ }
1736
+
1737
+ const result = await client.query(sql);
1738
+ return result.rows.map((row) => {
1739
+ const transformed = transformMetricRow(row);
1740
+ return {
1741
+ backend_type: String(transformed.backend_type || "unknown"),
1742
+ reads: parseInt(String(transformed.reads || 0), 10),
1743
+ read_bytes_mb: parseInt(String(transformed.read_bytes_mb || 0), 10),
1744
+ read_time_ms: parseInt(String(transformed.read_time_ms || 0), 10),
1745
+ writes: parseInt(String(transformed.writes || 0), 10),
1746
+ write_bytes_mb: parseInt(String(transformed.write_bytes_mb || 0), 10),
1747
+ write_time_ms: parseInt(String(transformed.write_time_ms || 0), 10),
1748
+ writebacks: parseInt(String(transformed.writebacks || 0), 10),
1749
+ writeback_bytes_mb: parseInt(String(transformed.writeback_bytes_mb || 0), 10),
1750
+ writeback_time_ms: parseInt(String(transformed.writeback_time_ms || 0), 10),
1751
+ fsyncs: parseInt(String(transformed.fsyncs || 0), 10),
1752
+ fsync_time_ms: parseInt(String(transformed.fsync_time_ms || 0), 10),
1753
+ extends: parseInt(String(transformed.extends || 0), 10),
1754
+ extend_bytes_mb: parseInt(String(transformed.extend_bytes_mb || 0), 10),
1755
+ hits: parseInt(String(transformed.hits || 0), 10),
1756
+ evictions: parseInt(String(transformed.evictions || 0), 10),
1757
+ reuses: parseInt(String(transformed.reuses || 0), 10),
1758
+ };
1759
+ });
1760
+ } catch (err) {
1761
+ const errorMsg = err instanceof Error ? err.message : String(err);
1762
+ console.log(`[I001] Error fetching I/O statistics: ${errorMsg}`);
1763
+ return [];
1764
+ }
1765
+ }
1766
+
1767
+ /**
1768
+ * Generate I001 report - I/O statistics (pg_stat_io)
1769
+ *
1770
+ * This report collects I/O statistics from pg_stat_io (PostgreSQL 16+),
1771
+ * providing insights into read/write operations by backend type.
1772
+ *
1773
+ * @param client - Connected PostgreSQL client
1774
+ * @param nodeName - Node name for the report payload
1775
+ * @returns I001 report payload
1776
+ */
1777
+ async function generateI001(client: Client, nodeName: string): Promise<Report> {
1778
+ const report = createBaseReport("I001", "I/O statistics (pg_stat_io)", nodeName);
1779
+ const postgresVersion = await getPostgresVersion(client);
1780
+ const parsedPgMajorVersion = parseInt(postgresVersion.server_major_ver, 10);
1781
+ const pgMajorVersion = Number.isFinite(parsedPgMajorVersion) ? parsedPgMajorVersion : 0;
1782
+
1783
+ // pg_stat_io requires PostgreSQL 16+
1784
+ if (pgMajorVersion < 16) {
1785
+ report.results[nodeName] = {
1786
+ data: {
1787
+ available: false,
1788
+ min_version_required: "16",
1789
+ by_backend_type: [],
1790
+ analysis: {
1791
+ total_read_mb: 0,
1792
+ total_write_mb: 0,
1793
+ total_io_time_ms: 0,
1794
+ read_hit_ratio_pct: 0,
1795
+ avg_read_time_ms: null,
1796
+ avg_write_time_ms: null,
1797
+ },
1798
+ stats_reset_s: null,
1799
+ },
1800
+ postgres_version: postgresVersion,
1801
+ };
1802
+ return report;
1803
+ }
1804
+
1805
+ const ioStats = await getIOStatistics(client, pgMajorVersion);
1806
+
1807
+ // Sort by backend_type, putting 'total' first if present
1808
+ ioStats.sort((a, b) => {
1809
+ if (a.backend_type === "total") return -1;
1810
+ if (b.backend_type === "total") return 1;
1811
+ return a.backend_type.localeCompare(b.backend_type);
1812
+ });
1813
+
1814
+ // Find 'total' row for analysis, or sum all rows if not present
1815
+ let totalStats = ioStats.find((s) => s.backend_type === "total");
1816
+ if (!totalStats && ioStats.length > 0) {
1817
+ totalStats = {
1818
+ backend_type: "total",
1819
+ reads: ioStats.reduce((sum, s) => sum + s.reads, 0),
1820
+ read_bytes_mb: ioStats.reduce((sum, s) => sum + s.read_bytes_mb, 0),
1821
+ read_time_ms: ioStats.reduce((sum, s) => sum + s.read_time_ms, 0),
1822
+ writes: ioStats.reduce((sum, s) => sum + s.writes, 0),
1823
+ write_bytes_mb: ioStats.reduce((sum, s) => sum + s.write_bytes_mb, 0),
1824
+ write_time_ms: ioStats.reduce((sum, s) => sum + s.write_time_ms, 0),
1825
+ writebacks: ioStats.reduce((sum, s) => sum + s.writebacks, 0),
1826
+ writeback_bytes_mb: ioStats.reduce((sum, s) => sum + s.writeback_bytes_mb, 0),
1827
+ writeback_time_ms: ioStats.reduce((sum, s) => sum + s.writeback_time_ms, 0),
1828
+ fsyncs: ioStats.reduce((sum, s) => sum + s.fsyncs, 0),
1829
+ fsync_time_ms: ioStats.reduce((sum, s) => sum + s.fsync_time_ms, 0),
1830
+ extends: ioStats.reduce((sum, s) => sum + (s.extends || 0), 0),
1831
+ extend_bytes_mb: ioStats.reduce((sum, s) => sum + (s.extend_bytes_mb || 0), 0),
1832
+ hits: ioStats.reduce((sum, s) => sum + s.hits, 0),
1833
+ evictions: ioStats.reduce((sum, s) => sum + s.evictions, 0),
1834
+ reuses: ioStats.reduce((sum, s) => sum + s.reuses, 0),
1835
+ };
1836
+ }
1837
+
1838
+ // Calculate analysis
1839
+ const totalReadMb = totalStats?.read_bytes_mb || 0;
1840
+ const totalWriteMb = totalStats?.write_bytes_mb || 0;
1841
+ const totalReadTime = totalStats?.read_time_ms || 0;
1842
+ const totalWriteTime = totalStats?.write_time_ms || 0;
1843
+ const totalIoTimeMs = totalReadTime + totalWriteTime;
1844
+ const totalReads = totalStats?.reads || 0;
1845
+ const totalWrites = totalStats?.writes || 0;
1846
+ const totalHits = totalStats?.hits || 0;
1847
+
1848
+ // Hit ratio: hits / (hits + reads) * 100
1849
+ const totalRequests = totalHits + totalReads;
1850
+ const readHitRatioPct = totalRequests > 0 ? Math.round((totalHits / totalRequests) * 10000) / 100 : 0;
1851
+
1852
+ // Average times
1853
+ const avgReadTimeMs = totalReads > 0 ? Math.round((totalReadTime / totalReads) * 1000) / 1000 : null;
1854
+ const avgWriteTimeMs = totalWrites > 0 ? Math.round((totalWriteTime / totalWrites) * 1000) / 1000 : null;
1855
+
1856
+ // Direct-connect checkup queries stats_reset separately instead of reading it from pgwatch metrics.
1857
+ let statsResetS: number | null = null;
1858
+ try {
1859
+ const resetResult = await client.query(`
1860
+ select max(extract(epoch from now() - stats_reset)::int) as stats_reset_s
1861
+ from pg_stat_io
1862
+ `);
1863
+ if (resetResult.rows.length > 0 && resetResult.rows[0].stats_reset_s !== null) {
1864
+ const parsedStatsResetS = parseInt(resetResult.rows[0].stats_reset_s, 10);
1865
+ statsResetS = Number.isFinite(parsedStatsResetS) ? parsedStatsResetS : null;
1866
+ }
1867
+ } catch (err) {
1868
+ // Ignore errors getting stats_reset - not critical
1869
+ }
1870
+
1871
+ report.results[nodeName] = {
1872
+ data: {
1873
+ available: ioStats.length > 0,
1874
+ by_backend_type: ioStats,
1875
+ analysis: {
1876
+ total_read_mb: totalReadMb,
1877
+ total_write_mb: totalWriteMb,
1878
+ total_io_time_ms: totalIoTimeMs,
1879
+ read_hit_ratio_pct: readHitRatioPct,
1880
+ avg_read_time_ms: avgReadTimeMs,
1881
+ avg_write_time_ms: avgWriteTimeMs,
1882
+ },
1883
+ stats_reset_s: statsResetS,
1884
+ },
1885
+ postgres_version: postgresVersion,
1886
+ };
1887
+
1888
+ return report;
1889
+ }
1890
+
1667
1891
  /**
1668
1892
  * Available report generators
1669
1893
  */
@@ -1683,6 +1907,7 @@ export const REPORT_GENERATORS: Record<string, (client: Client, nodeName: string
1683
1907
  H001: generateH001,
1684
1908
  H002: generateH002,
1685
1909
  H004: generateH004,
1910
+ I001: generateI001,
1686
1911
  };
1687
1912
 
1688
1913
  /**
package/lib/init.ts CHANGED
@@ -127,8 +127,10 @@ export async function connectWithSslFallback(
127
127
  verbose?: boolean
128
128
  ): Promise<{ client: PgClient; usedSsl: boolean }> {
129
129
  const tryConnect = async (config: PgClientConfig): Promise<PgClient> => {
130
- const client = new ClientClass(config);
130
+ const client = new ClientClass({ ...config, connectionTimeoutMillis: 10_000 } as any);
131
131
  await client.connect();
132
+ // Set a default statement timeout to prevent runaway queries
133
+ await client.query("SET statement_timeout = '30s'");
132
134
  return client;
133
135
  };
134
136
 
@@ -454,8 +456,18 @@ export function resolveAdminConnection(opts: {
454
456
  return { clientConfig: cfg, display: describePgConfig(cfg), sslFallbackEnabled: true };
455
457
  }
456
458
 
459
+ /**
460
+ * Generate a cryptographically secure random password for the monitoring role.
461
+ *
462
+ * Encoding note — bytes vs output length:
463
+ * - hex: N bytes → 2N characters (24 bytes → 48 hex chars)
464
+ * - base64: N bytes → ⌈4N/3⌉ chars (24 bytes → 32 base64url chars, no padding)
465
+ *
466
+ * We use base64url (RFC 4648 §5) because it is shorter than hex and safe in URLs,
467
+ * connection strings, and shell variables without quoting.
468
+ */
457
469
  function generateMonitoringPassword(): string {
458
- // URL-safe and easy to copy/paste; 24 bytes => 32 base64url chars (no padding).
470
+ // 24 random bytes 32 base64url characters (no padding).
459
471
  // Note: randomBytes() throws on failure; we add a tiny sanity check for unexpected output.
460
472
  const password = randomBytes(24).toString("base64url");
461
473
  if (password.length < 30) {
@@ -659,6 +671,36 @@ export type VerifyInitResult = {
659
671
  missingOptional: string[];
660
672
  };
661
673
 
674
+ /** A single permission check result from the preflight query. */
675
+ export type PermissionCheckRow = {
676
+ permission_name: string;
677
+ status: "required" | "optional";
678
+ /**
679
+ * Whether the permission is granted.
680
+ * - `true` — permission is granted
681
+ * - `false` — permission is explicitly denied
682
+ * - `null` — check was skipped (e.g., object does not exist, so the privilege
683
+ * check is inapplicable — such as SELECT on a view that hasn't been created)
684
+ */
685
+ granted: boolean | null;
686
+ fix_command: string | null;
687
+ };
688
+
689
+ /**
690
+ * Result of the preflight permission check for the current DB user.
691
+ *
692
+ * - `ok` is `true` when `missingRequired` is empty.
693
+ * - `rows` contains every check (for inspection / logging).
694
+ * - `missingRequired` / `missingOptional` are filtered subsets of `rows`
695
+ * where the permission is not granted (`granted !== true`).
696
+ */
697
+ export type PreflightPermissionResult = {
698
+ ok: boolean;
699
+ rows: PermissionCheckRow[];
700
+ missingRequired: PermissionCheckRow[];
701
+ missingOptional: PermissionCheckRow[];
702
+ };
703
+
662
704
  export type UninitPlan = {
663
705
  monitoringUser: string;
664
706
  database: string;
@@ -813,7 +855,12 @@ export async function verifyInitSetup(params: {
813
855
  missingRequired.push("USAGE on schema postgres_ai");
814
856
  }
815
857
 
816
- const viewExistsRes = await params.client.query("select to_regclass('postgres_ai.pg_statistic') is not null as ok");
858
+ const viewExistsRes = await params.client.query(`
859
+ select case
860
+ when not has_schema_privilege(current_user, 'postgres_ai', 'USAGE') then null
861
+ else to_regclass('postgres_ai.pg_statistic') is not null
862
+ end as ok
863
+ `);
817
864
  if (!viewExistsRes.rows?.[0]?.ok) {
818
865
  missingRequired.push("view postgres_ai.pg_statistic exists");
819
866
  } else {
@@ -936,4 +983,149 @@ export async function verifyInitSetup(params: {
936
983
  }
937
984
  }
938
985
 
986
+ /**
987
+ * Check that the currently connected DB user has sufficient permissions for
988
+ * monitoring operations. Returns structured results with fix commands.
989
+ *
990
+ * Required permissions cause startup to fail; optional ones produce warnings.
991
+ *
992
+ * @param client An already-connected PostgreSQL client.
993
+ * @returns A {@link PreflightPermissionResult} with per-check rows and
994
+ * filtered `missingRequired` / `missingOptional` arrays.
995
+ * @throws Propagates database errors (network, permission denied on catalog
996
+ * tables, timeout) to the caller.
997
+ */
998
+ export async function checkCurrentUserPermissions(
999
+ client: PgClient
1000
+ ): Promise<PreflightPermissionResult> {
1001
+ const sql = `
1002
+ with permission_checks as (
1003
+ select
1004
+ format('connect on database %I', current_database()) as permission_name,
1005
+ 'required' as status,
1006
+ has_database_privilege(current_user, current_database(), 'connect') as granted
1007
+
1008
+ union all
1009
+
1010
+ select
1011
+ 'pg_monitor role membership' as permission_name,
1012
+ 'required' as status,
1013
+ -- CASE guarantees evaluation order: pg_has_role() is only called if the
1014
+ -- pg_monitor role exists, avoiding ERROR on PostgreSQL < 10 or when dropped.
1015
+ case
1016
+ when not exists (select from pg_roles where rolname = 'pg_monitor')
1017
+ then false
1018
+ else pg_has_role(current_user, 'pg_monitor', 'member')
1019
+ end as granted
1020
+
1021
+ union all
1022
+
1023
+ select
1024
+ 'select on pg_catalog.pg_index' as permission_name,
1025
+ 'required' as status,
1026
+ has_table_privilege(current_user, 'pg_catalog.pg_index', 'select') as granted
1027
+
1028
+ union all
1029
+
1030
+ select
1031
+ 'postgres_ai.pg_statistic view exists' as permission_name,
1032
+ 'optional' as status,
1033
+ case
1034
+ when not has_schema_privilege(current_user, 'postgres_ai', 'USAGE') then null
1035
+ else to_regclass('postgres_ai.pg_statistic') is not null
1036
+ end as granted
1037
+
1038
+ union all
1039
+
1040
+ select
1041
+ 'select on postgres_ai.pg_statistic' as permission_name,
1042
+ 'optional' as status,
1043
+ case
1044
+ when not has_schema_privilege(current_user, 'postgres_ai', 'USAGE') then null
1045
+ when to_regclass('postgres_ai.pg_statistic') is null then null
1046
+ else has_table_privilege(current_user, 'postgres_ai.pg_statistic', 'select')
1047
+ end as granted
1048
+ )
1049
+ select
1050
+ permission_name,
1051
+ status,
1052
+ granted,
1053
+ case
1054
+ when status = 'required' and not coalesce(granted, false) then
1055
+ case
1056
+ when permission_name like 'connect%' then
1057
+ format('grant connect on database %I to %I;', current_database(), current_user)
1058
+ when permission_name = 'pg_monitor role membership' then
1059
+ format('grant pg_monitor to %I;', current_user)
1060
+ when permission_name like 'select on pg_catalog.pg_index' then
1061
+ format('grant select on pg_catalog.pg_index to %I;', current_user)
1062
+ end
1063
+ when permission_name = 'postgres_ai.pg_statistic view exists' and granted = false then
1064
+ '-- create postgres_ai.pg_statistic view (see setup script)'
1065
+ when permission_name = 'select on postgres_ai.pg_statistic' and granted = false then
1066
+ format('grant select on postgres_ai.pg_statistic to %I;', current_user)
1067
+ else null
1068
+ end as fix_command
1069
+ from permission_checks
1070
+ order by
1071
+ case status when 'required' then 1 else 2 end,
1072
+ permission_name;
1073
+ `;
1074
+
1075
+ const res = await client.query(sql);
1076
+ const rows: PermissionCheckRow[] = res.rows;
1077
+
1078
+ // Required: treat null (skipped) as not-granted — fail safe.
1079
+ // Optional: only explicit false counts as missing; null means the check was
1080
+ // skipped (e.g., view doesn't exist) and is not actionable.
1081
+ const missingRequired = rows.filter((r) => r.status === "required" && r.granted !== true);
1082
+ const missingOptional = rows.filter((r) => r.status === "optional" && r.granted === false);
1083
+
1084
+ return {
1085
+ ok: missingRequired.length === 0,
1086
+ rows,
1087
+ missingRequired,
1088
+ missingOptional,
1089
+ };
1090
+ }
1091
+
1092
+ /**
1093
+ * Format permission check results into user-facing error/warning lines.
1094
+ *
1095
+ * @returns An object with `warnings` (for optional misses), `errors` (for
1096
+ * required misses including fix SQL), and `failed` (whether required
1097
+ * permissions are missing).
1098
+ */
1099
+ export function formatPermissionCheckMessages(result: PreflightPermissionResult): {
1100
+ failed: boolean;
1101
+ warnings: string[];
1102
+ errors: string[];
1103
+ } {
1104
+ const warnings: string[] = [];
1105
+ const errors: string[] = [];
1106
+
1107
+ for (const row of result.missingOptional) {
1108
+ const fix = row.fix_command ? ` Fix: ${row.fix_command}` : "";
1109
+ warnings.push(`Warning: optional permission missing — ${row.permission_name}.${fix}`);
1110
+ }
939
1111
 
1112
+ if (!result.ok) {
1113
+ errors.push("Error: the database user is missing required permissions.\n");
1114
+ errors.push("Missing permissions:");
1115
+ for (const row of result.missingRequired) {
1116
+ errors.push(` - ${row.permission_name}`);
1117
+ }
1118
+ const fixes = result.missingRequired
1119
+ .map((r) => r.fix_command)
1120
+ .filter(Boolean);
1121
+ if (fixes.length > 0) {
1122
+ errors.push("\nTo fix, run the following as a superuser:\n");
1123
+ for (const fix of fixes) {
1124
+ errors.push(` ${fix}`);
1125
+ }
1126
+ }
1127
+ errors.push("\nAlternatively, run 'postgresai prepare-db' to set up permissions automatically.");
1128
+ }
1129
+
1130
+ return { failed: !result.ok, warnings, errors };
1131
+ }
@@ -63,7 +63,7 @@ export function listMetricNames(): string[] {
63
63
  export const METRIC_NAMES = {
64
64
  // Index health checks
65
65
  H001: "pg_invalid_indexes",
66
- H002: "unused_indexes",
66
+ H002: "unused_indexes",
67
67
  H004: "redundant_indexes",
68
68
  // Bloat estimation
69
69
  F004: "pg_table_bloat",
@@ -75,6 +75,8 @@ export const METRIC_NAMES = {
75
75
  dbSize: "db_size",
76
76
  // Stats reset info (H002)
77
77
  statsReset: "stats_reset",
78
+ // I/O statistics (I001) - PostgreSQL 16+
79
+ I001: "pg_stat_io",
78
80
  } as const;
79
81
 
80
82
  /**
package/lib/supabase.ts CHANGED
@@ -350,6 +350,10 @@ export async function fetchPoolerDatabaseUrl(
350
350
  config: SupabaseConfig,
351
351
  username: string
352
352
  ): Promise<string | null> {
353
+ // Validate projectRef format to prevent SSRF via crafted project references
354
+ if (!isValidProjectRef(config.projectRef)) {
355
+ throw new Error(`Invalid Supabase project reference format: "${config.projectRef}". Expected 10-30 alphanumeric characters.`);
356
+ }
353
357
  const url = `${SUPABASE_API_BASE}/v1/projects/${encodeURIComponent(config.projectRef)}/config/database/pooler`;
354
358
 
355
359
  // For Supabase pooler connections, the username must include the project ref:
@@ -669,7 +673,10 @@ export async function verifyInitSetupViaSupabase(params: {
669
673
 
670
674
  // Check pg_statistic view
671
675
  const viewExistsRes = await params.client.query(
672
- "SELECT to_regclass('postgres_ai.pg_statistic') IS NOT NULL as ok",
676
+ `SELECT CASE
677
+ WHEN NOT has_schema_privilege(current_user, 'postgres_ai', 'USAGE') THEN NULL
678
+ ELSE to_regclass('postgres_ai.pg_statistic') IS NOT NULL
679
+ END as ok`,
673
680
  true
674
681
  );
675
682
  if (!viewExistsRes.rows?.[0]?.ok) {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "postgresai",
3
- "version": "0.15.0-dev.7",
3
+ "version": "0.15.0-dev.9",
4
4
  "description": "postgres_ai CLI",
5
5
  "license": "Apache-2.0",
6
6
  "private": false,
@@ -28,7 +28,7 @@
28
28
  "embed-metrics": "bun run scripts/embed-metrics.ts",
29
29
  "embed-checkup-dictionary": "bun run scripts/embed-checkup-dictionary.ts",
30
30
  "embed-all": "bun run embed-metrics && bun run embed-checkup-dictionary",
31
- "build": "bun run embed-all && bun build ./bin/postgres-ai.ts --outdir ./dist/bin --target node && node -e \"const fs=require('fs');const f='./dist/bin/postgres-ai.js';fs.writeFileSync(f,fs.readFileSync(f,'utf8').replace('#!/usr/bin/env bun','#!/usr/bin/env node'))\" && cp -r ./sql ./dist/sql",
31
+ "build": "bun run embed-all && bun build ./bin/postgres-ai.ts --outdir ./dist/bin --target node && node -e \"const fs=require('fs');const f='./dist/bin/postgres-ai.js';fs.writeFileSync(f,fs.readFileSync(f,'utf8').replace('#!/usr/bin/env bun','#!/usr/bin/env node'))\" && cp -r ./sql ./dist/sql && cp ../instances.demo.yml ./instances.demo.yml",
32
32
  "prepublishOnly": "npm run build",
33
33
  "start": "bun ./bin/postgres-ai.ts --help",
34
34
  "start:node": "node ./dist/bin/postgres-ai.js --help",
@@ -41,7 +41,7 @@
41
41
  },
42
42
  "dependencies": {
43
43
  "@modelcontextprotocol/sdk": "^1.20.2",
44
- "commander": "^12.1.0",
44
+ "commander": "^14.0.3",
45
45
  "js-yaml": "^4.1.0",
46
46
  "pg": "^8.16.3"
47
47
  },
@@ -51,7 +51,7 @@
51
51
  "@types/pg": "^8.15.6",
52
52
  "ajv": "^8.17.1",
53
53
  "ajv-formats": "^3.0.1",
54
- "typescript": "^5.9.3"
54
+ "typescript": "^6.0.2"
55
55
  },
56
56
  "publishConfig": {
57
57
  "access": "public"
@@ -51,7 +51,16 @@ function generateTypeScript(data: CheckupDictionaryEntry[], sourceUrl: string):
51
51
  return lines.join("\n");
52
52
  }
53
53
 
54
+ // Allowed hosts for fetch requests to prevent SSRF
55
+ const ALLOWED_HOSTS = ["postgres.ai"];
56
+
54
57
  async function fetchWithTimeout(url: string, timeoutMs: number): Promise<Response> {
58
+ // Validate URL against allowlist to prevent SSRF
59
+ const parsed = new URL(url);
60
+ if (!ALLOWED_HOSTS.includes(parsed.hostname)) {
61
+ throw new Error(`Fetch blocked: host "${parsed.hostname}" is not in the allowlist`);
62
+ }
63
+
55
64
  const controller = new AbortController();
56
65
  const timeoutId = setTimeout(() => controller.abort(), timeoutMs);
57
66
 
@@ -49,6 +49,8 @@ const REQUIRED_METRICS = [
49
49
  // Bloat estimation (F004, F005)
50
50
  "pg_table_bloat",
51
51
  "pg_btree_bloat",
52
+ // I/O statistics (I001)
53
+ "pg_stat_io",
52
54
  ];
53
55
 
54
56
  function main() {