postgresai 0.14.0-beta.3 → 0.14.0-beta.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (63) hide show
  1. package/README.md +45 -45
  2. package/bin/postgres-ai.ts +946 -336
  3. package/bun.lock +258 -0
  4. package/bunfig.toml +11 -0
  5. package/dist/bin/postgres-ai.js +27868 -1771
  6. package/lib/auth-server.ts +124 -106
  7. package/lib/checkup-api.ts +386 -0
  8. package/lib/checkup.ts +1327 -0
  9. package/lib/config.ts +3 -0
  10. package/lib/init.ts +282 -78
  11. package/lib/issues.ts +86 -195
  12. package/lib/mcp-server.ts +6 -17
  13. package/lib/metrics-embedded.ts +79 -0
  14. package/lib/metrics-loader.ts +127 -0
  15. package/lib/util.ts +61 -0
  16. package/package.json +18 -10
  17. package/packages/postgres-ai/README.md +26 -0
  18. package/packages/postgres-ai/bin/postgres-ai.js +27 -0
  19. package/packages/postgres-ai/package.json +27 -0
  20. package/scripts/embed-metrics.ts +154 -0
  21. package/sql/02.permissions.sql +9 -5
  22. package/sql/05.helpers.sql +415 -0
  23. package/test/checkup.integration.test.ts +273 -0
  24. package/test/checkup.test.ts +890 -0
  25. package/test/init.integration.test.ts +399 -0
  26. package/test/init.test.ts +345 -0
  27. package/test/schema-validation.test.ts +81 -0
  28. package/test/test-utils.ts +122 -0
  29. package/tsconfig.json +12 -20
  30. package/dist/bin/postgres-ai.d.ts +0 -3
  31. package/dist/bin/postgres-ai.d.ts.map +0 -1
  32. package/dist/bin/postgres-ai.js.map +0 -1
  33. package/dist/lib/auth-server.d.ts +0 -31
  34. package/dist/lib/auth-server.d.ts.map +0 -1
  35. package/dist/lib/auth-server.js +0 -263
  36. package/dist/lib/auth-server.js.map +0 -1
  37. package/dist/lib/config.d.ts +0 -45
  38. package/dist/lib/config.d.ts.map +0 -1
  39. package/dist/lib/config.js +0 -181
  40. package/dist/lib/config.js.map +0 -1
  41. package/dist/lib/init.d.ts +0 -75
  42. package/dist/lib/init.d.ts.map +0 -1
  43. package/dist/lib/init.js +0 -482
  44. package/dist/lib/init.js.map +0 -1
  45. package/dist/lib/issues.d.ts +0 -75
  46. package/dist/lib/issues.d.ts.map +0 -1
  47. package/dist/lib/issues.js +0 -336
  48. package/dist/lib/issues.js.map +0 -1
  49. package/dist/lib/mcp-server.d.ts +0 -9
  50. package/dist/lib/mcp-server.d.ts.map +0 -1
  51. package/dist/lib/mcp-server.js +0 -168
  52. package/dist/lib/mcp-server.js.map +0 -1
  53. package/dist/lib/pkce.d.ts +0 -32
  54. package/dist/lib/pkce.d.ts.map +0 -1
  55. package/dist/lib/pkce.js +0 -101
  56. package/dist/lib/pkce.js.map +0 -1
  57. package/dist/lib/util.d.ts +0 -27
  58. package/dist/lib/util.d.ts.map +0 -1
  59. package/dist/lib/util.js +0 -46
  60. package/dist/lib/util.js.map +0 -1
  61. package/dist/package.json +0 -46
  62. package/test/init.integration.test.cjs +0 -382
  63. package/test/init.test.cjs +0 -323
package/lib/issues.ts CHANGED
@@ -1,6 +1,4 @@
1
- import * as https from "https";
2
- import { URL } from "url";
3
- import { maskSecret, normalizeBaseUrl } from "./util";
1
+ import { formatHttpError, maskSecret, normalizeBaseUrl } from "./util";
4
2
 
5
3
  export interface IssueActionItem {
6
4
  id: string;
@@ -75,59 +73,33 @@ export async function fetchIssues(params: FetchIssuesParams): Promise<IssueListI
75
73
 
76
74
  if (debug) {
77
75
  const debugHeaders: Record<string, string> = { ...headers, "access-token": maskSecret(apiKey) };
78
- // eslint-disable-next-line no-console
79
76
  console.log(`Debug: Resolved API base URL: ${base}`);
80
- // eslint-disable-next-line no-console
81
77
  console.log(`Debug: GET URL: ${url.toString()}`);
82
- // eslint-disable-next-line no-console
83
78
  console.log(`Debug: Auth scheme: access-token`);
84
- // eslint-disable-next-line no-console
85
79
  console.log(`Debug: Request headers: ${JSON.stringify(debugHeaders)}`);
86
80
  }
87
81
 
88
- return new Promise((resolve, reject) => {
89
- const req = https.request(
90
- url,
91
- {
92
- method: "GET",
93
- headers,
94
- },
95
- (res) => {
96
- let data = "";
97
- res.on("data", (chunk) => (data += chunk));
98
- res.on("end", () => {
99
- if (debug) {
100
- // eslint-disable-next-line no-console
101
- console.log(`Debug: Response status: ${res.statusCode}`);
102
- // eslint-disable-next-line no-console
103
- console.log(`Debug: Response headers: ${JSON.stringify(res.headers)}`);
104
- }
105
- if (res.statusCode && res.statusCode >= 200 && res.statusCode < 300) {
106
- try {
107
- const parsed = JSON.parse(data) as IssueListItem[];
108
- resolve(parsed);
109
- } catch {
110
- reject(new Error(`Failed to parse issues response: ${data}`));
111
- }
112
- } else {
113
- let errMsg = `Failed to fetch issues: HTTP ${res.statusCode}`;
114
- if (data) {
115
- try {
116
- const errObj = JSON.parse(data);
117
- errMsg += `\n${JSON.stringify(errObj, null, 2)}`;
118
- } catch {
119
- errMsg += `\n${data}`;
120
- }
121
- }
122
- reject(new Error(errMsg));
123
- }
124
- });
125
- }
126
- );
127
-
128
- req.on("error", (err: Error) => reject(err));
129
- req.end();
82
+ const response = await fetch(url.toString(), {
83
+ method: "GET",
84
+ headers,
130
85
  });
86
+
87
+ if (debug) {
88
+ console.log(`Debug: Response status: ${response.status}`);
89
+ console.log(`Debug: Response headers: ${JSON.stringify(Object.fromEntries(response.headers.entries()))}`);
90
+ }
91
+
92
+ const data = await response.text();
93
+
94
+ if (response.ok) {
95
+ try {
96
+ return JSON.parse(data) as IssueListItem[];
97
+ } catch {
98
+ throw new Error(`Failed to parse issues response: ${data}`);
99
+ }
100
+ } else {
101
+ throw new Error(formatHttpError("Failed to fetch issues", response.status, data));
102
+ }
131
103
  }
132
104
 
133
105
 
@@ -158,59 +130,33 @@ export async function fetchIssueComments(params: FetchIssueCommentsParams): Prom
158
130
 
159
131
  if (debug) {
160
132
  const debugHeaders: Record<string, string> = { ...headers, "access-token": maskSecret(apiKey) };
161
- // eslint-disable-next-line no-console
162
133
  console.log(`Debug: Resolved API base URL: ${base}`);
163
- // eslint-disable-next-line no-console
164
134
  console.log(`Debug: GET URL: ${url.toString()}`);
165
- // eslint-disable-next-line no-console
166
135
  console.log(`Debug: Auth scheme: access-token`);
167
- // eslint-disable-next-line no-console
168
136
  console.log(`Debug: Request headers: ${JSON.stringify(debugHeaders)}`);
169
137
  }
170
138
 
171
- return new Promise((resolve, reject) => {
172
- const req = https.request(
173
- url,
174
- {
175
- method: "GET",
176
- headers,
177
- },
178
- (res) => {
179
- let data = "";
180
- res.on("data", (chunk) => (data += chunk));
181
- res.on("end", () => {
182
- if (debug) {
183
- // eslint-disable-next-line no-console
184
- console.log(`Debug: Response status: ${res.statusCode}`);
185
- // eslint-disable-next-line no-console
186
- console.log(`Debug: Response headers: ${JSON.stringify(res.headers)}`);
187
- }
188
- if (res.statusCode && res.statusCode >= 200 && res.statusCode < 300) {
189
- try {
190
- const parsed = JSON.parse(data) as IssueComment[];
191
- resolve(parsed);
192
- } catch {
193
- reject(new Error(`Failed to parse issue comments response: ${data}`));
194
- }
195
- } else {
196
- let errMsg = `Failed to fetch issue comments: HTTP ${res.statusCode}`;
197
- if (data) {
198
- try {
199
- const errObj = JSON.parse(data);
200
- errMsg += `\n${JSON.stringify(errObj, null, 2)}`;
201
- } catch {
202
- errMsg += `\n${data}`;
203
- }
204
- }
205
- reject(new Error(errMsg));
206
- }
207
- });
208
- }
209
- );
210
-
211
- req.on("error", (err: Error) => reject(err));
212
- req.end();
139
+ const response = await fetch(url.toString(), {
140
+ method: "GET",
141
+ headers,
213
142
  });
143
+
144
+ if (debug) {
145
+ console.log(`Debug: Response status: ${response.status}`);
146
+ console.log(`Debug: Response headers: ${JSON.stringify(Object.fromEntries(response.headers.entries()))}`);
147
+ }
148
+
149
+ const data = await response.text();
150
+
151
+ if (response.ok) {
152
+ try {
153
+ return JSON.parse(data) as IssueComment[];
154
+ } catch {
155
+ throw new Error(`Failed to parse issue comments response: ${data}`);
156
+ }
157
+ } else {
158
+ throw new Error(formatHttpError("Failed to fetch issue comments", response.status, data));
159
+ }
214
160
  }
215
161
 
216
162
  export interface FetchIssueParams {
@@ -243,63 +189,38 @@ export async function fetchIssue(params: FetchIssueParams): Promise<IssueDetail
243
189
 
244
190
  if (debug) {
245
191
  const debugHeaders: Record<string, string> = { ...headers, "access-token": maskSecret(apiKey) };
246
- // eslint-disable-next-line no-console
247
192
  console.log(`Debug: Resolved API base URL: ${base}`);
248
- // eslint-disable-next-line no-console
249
193
  console.log(`Debug: GET URL: ${url.toString()}`);
250
- // eslint-disable-next-line no-console
251
194
  console.log(`Debug: Auth scheme: access-token`);
252
- // eslint-disable-next-line no-console
253
195
  console.log(`Debug: Request headers: ${JSON.stringify(debugHeaders)}`);
254
196
  }
255
197
 
256
- return new Promise((resolve, reject) => {
257
- const req = https.request(
258
- url,
259
- {
260
- method: "GET",
261
- headers,
262
- },
263
- (res) => {
264
- let data = "";
265
- res.on("data", (chunk) => (data += chunk));
266
- res.on("end", () => {
267
- if (debug) {
268
- // eslint-disable-next-line no-console
269
- console.log(`Debug: Response status: ${res.statusCode}`);
270
- // eslint-disable-next-line no-console
271
- console.log(`Debug: Response headers: ${JSON.stringify(res.headers)}`);
272
- }
273
- if (res.statusCode && res.statusCode >= 200 && res.statusCode < 300) {
274
- try {
275
- const parsed = JSON.parse(data);
276
- if (Array.isArray(parsed)) {
277
- resolve((parsed[0] as IssueDetail) ?? null);
278
- } else {
279
- resolve(parsed as IssueDetail);
280
- }
281
- } catch {
282
- reject(new Error(`Failed to parse issue response: ${data}`));
283
- }
284
- } else {
285
- let errMsg = `Failed to fetch issue: HTTP ${res.statusCode}`;
286
- if (data) {
287
- try {
288
- const errObj = JSON.parse(data);
289
- errMsg += `\n${JSON.stringify(errObj, null, 2)}`;
290
- } catch {
291
- errMsg += `\n${data}`;
292
- }
293
- }
294
- reject(new Error(errMsg));
295
- }
296
- });
297
- }
298
- );
299
-
300
- req.on("error", (err: Error) => reject(err));
301
- req.end();
198
+ const response = await fetch(url.toString(), {
199
+ method: "GET",
200
+ headers,
302
201
  });
202
+
203
+ if (debug) {
204
+ console.log(`Debug: Response status: ${response.status}`);
205
+ console.log(`Debug: Response headers: ${JSON.stringify(Object.fromEntries(response.headers.entries()))}`);
206
+ }
207
+
208
+ const data = await response.text();
209
+
210
+ if (response.ok) {
211
+ try {
212
+ const parsed = JSON.parse(data);
213
+ if (Array.isArray(parsed)) {
214
+ return (parsed[0] as IssueDetail) ?? null;
215
+ } else {
216
+ return parsed as IssueDetail;
217
+ }
218
+ } catch {
219
+ throw new Error(`Failed to parse issue response: ${data}`);
220
+ }
221
+ } else {
222
+ throw new Error(formatHttpError("Failed to fetch issue", response.status, data));
223
+ }
303
224
  }
304
225
 
305
226
  export interface CreateIssueCommentParams {
@@ -339,67 +260,37 @@ export async function createIssueComment(params: CreateIssueCommentParams): Prom
339
260
  "access-token": apiKey,
340
261
  "Prefer": "return=representation",
341
262
  "Content-Type": "application/json",
342
- "Content-Length": Buffer.byteLength(body).toString(),
343
263
  };
344
264
 
345
265
  if (debug) {
346
266
  const debugHeaders: Record<string, string> = { ...headers, "access-token": maskSecret(apiKey) };
347
- // eslint-disable-next-line no-console
348
267
  console.log(`Debug: Resolved API base URL: ${base}`);
349
- // eslint-disable-next-line no-console
350
268
  console.log(`Debug: POST URL: ${url.toString()}`);
351
- // eslint-disable-next-line no-console
352
269
  console.log(`Debug: Auth scheme: access-token`);
353
- // eslint-disable-next-line no-console
354
270
  console.log(`Debug: Request headers: ${JSON.stringify(debugHeaders)}`);
355
- // eslint-disable-next-line no-console
356
271
  console.log(`Debug: Request body: ${body}`);
357
272
  }
358
273
 
359
- return new Promise((resolve, reject) => {
360
- const req = https.request(
361
- url,
362
- {
363
- method: "POST",
364
- headers,
365
- },
366
- (res) => {
367
- let data = "";
368
- res.on("data", (chunk) => (data += chunk));
369
- res.on("end", () => {
370
- if (debug) {
371
- // eslint-disable-next-line no-console
372
- console.log(`Debug: Response status: ${res.statusCode}`);
373
- // eslint-disable-next-line no-console
374
- console.log(`Debug: Response headers: ${JSON.stringify(res.headers)}`);
375
- }
376
- if (res.statusCode && res.statusCode >= 200 && res.statusCode < 300) {
377
- try {
378
- const parsed = JSON.parse(data) as IssueComment;
379
- resolve(parsed);
380
- } catch {
381
- reject(new Error(`Failed to parse create comment response: ${data}`));
382
- }
383
- } else {
384
- let errMsg = `Failed to create issue comment: HTTP ${res.statusCode}`;
385
- if (data) {
386
- try {
387
- const errObj = JSON.parse(data);
388
- errMsg += `\n${JSON.stringify(errObj, null, 2)}`;
389
- } catch {
390
- errMsg += `\n${data}`;
391
- }
392
- }
393
- reject(new Error(errMsg));
394
- }
395
- });
396
- }
397
- );
398
-
399
- req.on("error", (err: Error) => reject(err));
400
- req.write(body);
401
- req.end();
274
+ const response = await fetch(url.toString(), {
275
+ method: "POST",
276
+ headers,
277
+ body,
402
278
  });
403
- }
404
279
 
280
+ if (debug) {
281
+ console.log(`Debug: Response status: ${response.status}`);
282
+ console.log(`Debug: Response headers: ${JSON.stringify(Object.fromEntries(response.headers.entries()))}`);
283
+ }
284
+
285
+ const data = await response.text();
405
286
 
287
+ if (response.ok) {
288
+ try {
289
+ return JSON.parse(data) as IssueComment;
290
+ } catch {
291
+ throw new Error(`Failed to parse create comment response: ${data}`);
292
+ }
293
+ } else {
294
+ throw new Error(formatHttpError("Failed to create issue comment", response.status, data));
295
+ }
296
+ }
package/lib/mcp-server.ts CHANGED
@@ -1,12 +1,12 @@
1
- import * as pkg from "../package.json";
1
+ import pkg from "../package.json";
2
2
  import * as config from "./config";
3
3
  import { fetchIssues, fetchIssueComments, createIssueComment, fetchIssue } from "./issues";
4
4
  import { resolveBaseUrls } from "./util";
5
5
 
6
- // MCP SDK imports
7
- import { Server } from "@modelcontextprotocol/sdk/server";
8
- import * as path from "path";
9
- // Types schemas will be loaded dynamically from the SDK's CJS bundle
6
+ // MCP SDK imports - Bun handles these directly
7
+ import { Server } from "@modelcontextprotocol/sdk/server/index.js";
8
+ import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
9
+ import { CallToolRequestSchema, ListToolsRequestSchema } from "@modelcontextprotocol/sdk/types.js";
10
10
 
11
11
  interface RootOptsLike {
12
12
  apiKey?: string;
@@ -14,16 +14,6 @@ interface RootOptsLike {
14
14
  }
15
15
 
16
16
  export async function startMcpServer(rootOpts?: RootOptsLike, extra?: { debug?: boolean }): Promise<void> {
17
- // Resolve stdio transport at runtime to avoid subpath export resolution issues
18
- const serverEntry = require.resolve("@modelcontextprotocol/sdk/server");
19
- const stdioPath = path.join(path.dirname(serverEntry), "stdio.js");
20
- // eslint-disable-next-line @typescript-eslint/no-var-requires
21
- const { StdioServerTransport } = require(stdioPath);
22
- // Load schemas dynamically to avoid subpath export resolution issues
23
- const typesPath = path.resolve(path.dirname(serverEntry), "../types.js");
24
- // eslint-disable-next-line @typescript-eslint/no-var-requires
25
- const { CallToolRequestSchema, ListToolsRequestSchema } = require(typesPath);
26
-
27
17
  const server = new Server(
28
18
  { name: "postgresai-mcp", version: pkg.version },
29
19
  { capabilities: { tools: {} } }
@@ -85,6 +75,7 @@ export async function startMcpServer(rootOpts?: RootOptsLike, extra?: { debug?:
85
75
  };
86
76
  });
87
77
 
78
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
88
79
  server.setRequestHandler(CallToolRequestSchema, async (req: any) => {
89
80
  const toolName = req.params.name;
90
81
  const args = (req.params.arguments as Record<string, unknown>) || {};
@@ -152,5 +143,3 @@ export async function startMcpServer(rootOpts?: RootOptsLike, extra?: { debug?:
152
143
  const transport = new StdioServerTransport();
153
144
  await server.connect(transport);
154
145
  }
155
-
156
-
@@ -0,0 +1,79 @@
1
+ // AUTO-GENERATED FILE - DO NOT EDIT
2
+ // Generated from config/pgwatch-prometheus/metrics.yml by scripts/embed-metrics.ts
3
+ // Generated at: 2025-12-28T15:08:46.857Z
4
+
5
+ /**
6
+ * Metric definition from metrics.yml
7
+ */
8
+ export interface MetricDefinition {
9
+ description?: string;
10
+ sqls: Record<number, string>; // PG major version -> SQL query
11
+ gauges?: string[];
12
+ statement_timeout_seconds?: number;
13
+ }
14
+
15
+ /**
16
+ * Embedded metrics for express mode reports.
17
+ * Only includes metrics required for CLI checkup reports.
18
+ */
19
+ export const METRICS: Record<string, MetricDefinition> = {
20
+ "settings": {
21
+ description: "This metric collects various PostgreSQL server settings and configurations. It provides insights into the server's configuration, including version, memory settings, and other important parameters. This metric is useful for monitoring server settings and ensuring optimal performance. Note: For lock_timeout and statement_timeout, we use reset_val instead of setting because pgwatch overrides these during metric collection, which would mask the actual configured values.",
22
+ sqls: {
23
+ 11: "with base as ( /* pgwatch_generated */\n select\n name,\n -- Use reset_val for lock_timeout/statement_timeout because pgwatch overrides them\n -- during collection (lock_timeout=100ms, statement_timeout per-metric).\n case\n when name in ('lock_timeout', 'statement_timeout') then reset_val\n else setting\n end as effective_setting,\n unit,\n category,\n vartype,\n -- For lock_timeout/statement_timeout, compare reset_val with boot_val\n -- since source becomes 'session' during collection.\n case\n when name in ('lock_timeout', 'statement_timeout') then (reset_val = boot_val)\n else (source = 'default')\n end as is_default_bool\n from pg_settings\n), with_numeric as (\n select\n *,\n case\n when effective_setting ~ '^-?[0-9]+$' then effective_setting::bigint\n else null\n end as numeric_value\n from base\n)\nselect\n (extract(epoch from now()) * 1e9)::int8 as epoch_ns,\n current_database() as tag_datname,\n name as tag_setting_name,\n effective_setting as tag_setting_value,\n unit as tag_unit,\n category as tag_category,\n vartype as tag_vartype,\n numeric_value,\n case\n when numeric_value is null then null\n when unit = '8kB' then numeric_value * 8192\n when unit = 'kB' then numeric_value * 1024\n when unit = 'MB' then numeric_value * 1024 * 1024\n when unit = 'B' then numeric_value\n when unit = 'ms' then numeric_value::numeric / 1000\n when unit = 's' then numeric_value::numeric\n when unit = 'min' then numeric_value::numeric * 60\n else null\n end as setting_normalized,\n case unit\n when '8kB' then 'bytes'\n when 'kB' then 'bytes'\n when 'MB' then 'bytes'\n when 'B' then 'bytes'\n when 'ms' then 'seconds'\n when 's' then 'seconds'\n when 'min' then 'seconds'\n else null\n end as unit_normalized,\n case when is_default_bool then 1 else 0 end as is_default,\n 1 as configured\nfrom with_numeric",
24
+ },
25
+ gauges: ["*"],
26
+ statement_timeout_seconds: 15,
27
+ },
28
+ "db_stats": {
29
+ description: "Retrieves key statistics from the PostgreSQL `pg_stat_database` view, providing insights into the current database's performance. It returns the number of backends, transaction commits and rollbacks, buffer reads and hits, tuple statistics, conflicts, temporary files and bytes, deadlocks, block read and write times, postmaster uptime, backup duration, recovery status, system identifier, and invalid indexes. This metric helps administrators monitor database activity and performance.",
30
+ sqls: {
31
+ 11: "select /* pgwatch_generated */\n (extract(epoch from now()) * 1e9)::int8 as epoch_ns,\n current_database() as tag_datname,\n numbackends,\n xact_commit,\n xact_rollback,\n blks_read,\n blks_hit,\n tup_returned,\n tup_fetched,\n tup_inserted,\n tup_updated,\n tup_deleted,\n conflicts,\n temp_files,\n temp_bytes,\n deadlocks,\n blk_read_time,\n blk_write_time,\n extract(epoch from (now() - pg_postmaster_start_time()))::int8 as postmaster_uptime_s,\n case when pg_is_in_recovery() then 1 else 0 end as in_recovery_int,\n system_identifier::text as tag_sys_id,\n (select count(*) from pg_index i\n where not indisvalid\n and not exists ( /* leave out ones that are being actively rebuilt */\n select * from pg_locks l\n join pg_stat_activity a using (pid)\n where l.relation = i.indexrelid\n and a.state = 'active'\n and a.query ~* 'concurrently'\n )) as invalid_indexes\nfrom\n pg_stat_database, pg_control_system()\nwhere\n datname = current_database()",
32
+ 12: "select /* pgwatch_generated */\n (extract(epoch from now()) * 1e9)::int8 as epoch_ns,\n current_database() as tag_datname,\n numbackends,\n xact_commit,\n xact_rollback,\n blks_read,\n blks_hit,\n tup_returned,\n tup_fetched,\n tup_inserted,\n tup_updated,\n tup_deleted,\n conflicts,\n temp_files,\n temp_bytes,\n deadlocks,\n blk_read_time,\n blk_write_time,\n extract(epoch from (now() - pg_postmaster_start_time()))::int8 as postmaster_uptime_s,\n extract(epoch from (now() - pg_backup_start_time()))::int8 as backup_duration_s,\n checksum_failures,\n extract(epoch from (now() - checksum_last_failure))::int8 as checksum_last_failure_s,\n case when pg_is_in_recovery() then 1 else 0 end as in_recovery_int,\n system_identifier::text as tag_sys_id,\n (select count(*) from pg_index i\n where not indisvalid\n and not exists ( /* leave out ones that are being actively rebuilt */\n select * from pg_locks l\n join pg_stat_activity a using (pid)\n where l.relation = i.indexrelid\n and a.state = 'active'\n and a.query ~* 'concurrently'\n )) as invalid_indexes\nfrom\n pg_stat_database, pg_control_system()\nwhere\n datname = current_database()",
33
+ 14: "select /* pgwatch_generated */\n (extract(epoch from now()) * 1e9)::int8 as epoch_ns,\n current_database() as tag_datname,\n numbackends,\n xact_commit,\n xact_rollback,\n blks_read,\n blks_hit,\n tup_returned,\n tup_fetched,\n tup_inserted,\n tup_updated,\n tup_deleted,\n conflicts,\n temp_files,\n temp_bytes,\n deadlocks,\n blk_read_time,\n blk_write_time,\n extract(epoch from (now() - pg_postmaster_start_time()))::int8 as postmaster_uptime_s,\n extract(epoch from (now() - pg_backup_start_time()))::int8 as backup_duration_s,\n checksum_failures,\n extract(epoch from (now() - checksum_last_failure))::int8 as checksum_last_failure_s,\n case when pg_is_in_recovery() then 1 else 0 end as in_recovery_int,\n system_identifier::text as tag_sys_id,\n session_time::int8,\n active_time::int8,\n idle_in_transaction_time::int8,\n sessions,\n sessions_abandoned,\n sessions_fatal,\n sessions_killed,\n (select count(*) from pg_index i\n where not indisvalid\n and not exists ( /* leave out ones that are being actively rebuilt */\n select * from pg_locks l\n join pg_stat_activity a using (pid)\n where l.relation = i.indexrelid\n and a.state = 'active'\n and a.query ~* 'concurrently'\n )) as invalid_indexes\nfrom\n pg_stat_database, pg_control_system()\nwhere\n datname = current_database()",
34
+ 15: "select /* pgwatch_generated */\n (extract(epoch from now()) * 1e9)::int8 as epoch_ns,\n current_database() as tag_datname,\n numbackends,\n xact_commit,\n xact_rollback,\n blks_read,\n blks_hit,\n tup_returned,\n tup_fetched,\n tup_inserted,\n tup_updated,\n tup_deleted,\n conflicts,\n temp_files,\n temp_bytes,\n deadlocks,\n blk_read_time,\n blk_write_time,\n extract(epoch from (now() - pg_postmaster_start_time()))::int8 as postmaster_uptime_s,\n checksum_failures,\n extract(epoch from (now() - checksum_last_failure))::int8 as checksum_last_failure_s,\n case when pg_is_in_recovery() then 1 else 0 end as in_recovery_int,\n system_identifier::text as tag_sys_id,\n session_time::int8,\n active_time::int8,\n idle_in_transaction_time::int8,\n sessions,\n sessions_abandoned,\n sessions_fatal,\n sessions_killed,\n (select count(*) from pg_index i\n where not indisvalid\n and not exists ( /* leave out ones that are being actively rebuilt */\n select * from pg_locks l\n join pg_stat_activity a using (pid)\n where l.relation = i.indexrelid\n and a.state = 'active'\n and a.query ~* 'concurrently'\n )) as invalid_indexes\nfrom\n pg_stat_database, pg_control_system()\nwhere\n datname = current_database()",
35
+ },
36
+ gauges: ["*"],
37
+ statement_timeout_seconds: 15,
38
+ },
39
+ "db_size": {
40
+ description: "Retrieves the size of the current database and the size of the `pg_catalog` schema, providing insights into the storage usage of the database. It returns the size in bytes for both the current database and the catalog schema. This metric helps administrators monitor database size and storage consumption.",
41
+ sqls: {
42
+ 11: "select /* pgwatch_generated */\n (extract(epoch from now()) * 1e9)::int8 as epoch_ns,\n current_database() as tag_datname,\n pg_database_size(current_database()) as size_b,\n (select sum(pg_total_relation_size(c.oid))::int8\n from pg_class c join pg_namespace n on n.oid = c.relnamespace\n where nspname = 'pg_catalog' and relkind = 'r'\n ) as catalog_size_b",
43
+ },
44
+ gauges: ["*"],
45
+ statement_timeout_seconds: 300,
46
+ },
47
+ "pg_invalid_indexes": {
48
+ description: "This metric identifies invalid indexes in the database. It provides insights into the number of invalid indexes and their details. This metric helps administrators identify and fix invalid indexes to improve database performance.",
49
+ sqls: {
50
+ 11: "with fk_indexes as ( /* pgwatch_generated */\n select\n schemaname as tag_schema_name,\n (indexrelid::regclass)::text as tag_index_name,\n (relid::regclass)::text as tag_table_name,\n (confrelid::regclass)::text as tag_fk_table_ref,\n array_to_string(indclass, ', ') as tag_opclasses\n from\n pg_stat_all_indexes\n join pg_index using (indexrelid)\n left join pg_constraint\n on array_to_string(indkey, ',') = array_to_string(conkey, ',')\n and schemaname = (connamespace::regnamespace)::text\n and conrelid = relid\n and contype = 'f'\n where idx_scan = 0\n and indisunique is false\n and conkey is not null --conkey is not null then true else false end as is_fk_idx\n), data as (\n select\n pci.relname as tag_index_name,\n pn.nspname as tag_schema_name,\n pct.relname as tag_table_name,\n quote_ident(pn.nspname) as tag_schema_name,\n quote_ident(pci.relname) as tag_index_name,\n quote_ident(pct.relname) as tag_table_name,\n coalesce(nullif(quote_ident(pn.nspname), 'public') || '.', '') || quote_ident(pct.relname) as tag_relation_name,\n pg_relation_size(pidx.indexrelid) index_size_bytes,\n ((\n select count(1)\n from fk_indexes fi\n where\n fi.tag_fk_table_ref = pct.relname\n and fi.tag_opclasses like (array_to_string(pidx.indclass, ', ') || '%')\n ) > 0)::int as supports_fk\n from pg_index pidx\n join pg_class as pci on pci.oid = pidx.indexrelid\n join pg_class as pct on pct.oid = pidx.indrelid\n left join pg_namespace pn on pn.oid = pct.relnamespace\n where pidx.indisvalid = false\n), data_total as (\n select\n sum(index_size_bytes) as index_size_bytes_sum\n from data\n), num_data as (\n select\n row_number() over () num,\n data.*\n from data\n)\nselect\n (extract(epoch from now()) * 1e9)::int8 as epoch_ns,\n current_database() as tag_datname,\n num_data.*\nfrom num_data\nlimit 1000;\n",
51
+ },
52
+ gauges: ["*"],
53
+ statement_timeout_seconds: 15,
54
+ },
55
+ "unused_indexes": {
56
+ description: "This metric identifies unused indexes in the database. It provides insights into the number of unused indexes and their details. This metric helps administrators identify and fix unused indexes to improve database performance.",
57
+ sqls: {
58
+ 11: "with fk_indexes as ( /* pgwatch_generated */\n select\n n.nspname as schema_name,\n ci.relname as index_name,\n cr.relname as table_name,\n (confrelid::regclass)::text as fk_table_ref,\n array_to_string(indclass, ', ') as opclasses\n from pg_index i\n join pg_class ci on ci.oid = i.indexrelid and ci.relkind = 'i'\n join pg_class cr on cr.oid = i.indrelid and cr.relkind = 'r'\n join pg_namespace n on n.oid = ci.relnamespace\n join pg_constraint cn on cn.conrelid = cr.oid\n left join pg_stat_all_indexes as si on si.indexrelid = i.indexrelid\n where\n contype = 'f'\n and i.indisunique is false\n and conkey is not null\n and ci.relpages > 5\n and si.idx_scan < 10\n), table_scans as (\n select relid,\n tables.idx_scan + tables.seq_scan as all_scans,\n ( tables.n_tup_ins + tables.n_tup_upd + tables.n_tup_del ) as writes,\n pg_relation_size(relid) as table_size\n from pg_stat_all_tables as tables\n join pg_class c on c.oid = relid\n where c.relpages > 5\n), indexes as (\n select\n i.indrelid,\n i.indexrelid,\n n.nspname as schema_name,\n cr.relname as table_name,\n ci.relname as index_name,\n si.idx_scan,\n pg_relation_size(i.indexrelid) as index_bytes,\n ci.relpages,\n (case when a.amname = 'btree' then true else false end) as idx_is_btree,\n array_to_string(i.indclass, ', ') as opclasses\n from pg_index i\n join pg_class ci on ci.oid = i.indexrelid and ci.relkind = 'i'\n join pg_class cr on cr.oid = i.indrelid and cr.relkind = 'r'\n join pg_namespace n on n.oid = ci.relnamespace\n join pg_am a on ci.relam = a.oid\n left join pg_stat_all_indexes as si on si.indexrelid = i.indexrelid\n where\n i.indisunique = false\n and i.indisvalid = true\n and ci.relpages > 5\n), index_ratios as (\n select\n i.indexrelid as index_id,\n i.schema_name,\n i.table_name,\n i.index_name,\n idx_scan,\n all_scans,\n round(( case when all_scans = 0 then 0.0::numeric\n else idx_scan::numeric/all_scans * 100 end), 2) as index_scan_pct,\n writes,\n round((case when writes = 0 then idx_scan::numeric else idx_scan::numeric/writes end), 2)\n as scans_per_write,\n index_bytes as index_size_bytes,\n table_size as table_size_bytes,\n i.relpages,\n idx_is_btree,\n i.opclasses,\n (\n select count(1)\n from fk_indexes fi\n where fi.fk_table_ref = i.table_name\n and fi.schema_name = i.schema_name\n and fi.opclasses like (i.opclasses || '%')\n ) > 0 as supports_fk\n from indexes i\n join table_scans ts on ts.relid = i.indrelid\n)\nselect\n 'Never Used Indexes' as tag_reason,\n current_database() as tag_datname,\n index_id,\n schema_name as tag_schema_name,\n table_name as tag_table_name,\n index_name as tag_index_name,\n pg_get_indexdef(index_id) as index_definition,\n idx_scan,\n all_scans,\n index_scan_pct,\n writes,\n scans_per_write,\n index_size_bytes,\n table_size_bytes,\n relpages,\n idx_is_btree,\n opclasses as tag_opclasses,\n supports_fk\nfrom index_ratios\nwhere\n idx_scan = 0\n and idx_is_btree\norder by index_size_bytes desc\nlimit 1000;\n",
59
+ },
60
+ gauges: ["*"],
61
+ statement_timeout_seconds: 15,
62
+ },
63
+ "redundant_indexes": {
64
+ description: "This metric identifies redundant indexes that can potentially be dropped to save storage space and improve write performance. It analyzes index relationships and finds indexes that are covered by other indexes, considering column order, operator classes, and foreign key constraints. Uses the exact logic from tmp.sql with JSON aggregation and proper thresholds.",
65
+ sqls: {
66
+ 11: "with fk_indexes as ( /* pgwatch_generated */\n select\n n.nspname as schema_name,\n ci.relname as index_name,\n cr.relname as table_name,\n (confrelid::regclass)::text as fk_table_ref,\n array_to_string(indclass, ', ') as opclasses\n from pg_index i\n join pg_class ci on ci.oid = i.indexrelid and ci.relkind = 'i'\n join pg_class cr on cr.oid = i.indrelid and cr.relkind = 'r'\n join pg_namespace n on n.oid = ci.relnamespace\n join pg_constraint cn on cn.conrelid = cr.oid\n left join pg_stat_all_indexes as si on si.indexrelid = i.indexrelid\n where\n contype = 'f'\n and i.indisunique is false\n and conkey is not null\n and ci.relpages > 5\n and si.idx_scan < 10\n),\n-- Redundant indexes\nindex_data as (\n select\n *,\n indkey::text as columns,\n array_to_string(indclass, ', ') as opclasses\n from pg_index i\n join pg_class ci on ci.oid = i.indexrelid and ci.relkind = 'i'\n where indisvalid = true and ci.relpages > 5\n), redundant_indexes as (\n select\n i2.indexrelid as index_id,\n tnsp.nspname as schema_name,\n trel.relname as table_name,\n pg_relation_size(trel.oid) as table_size_bytes,\n irel.relname as index_name,\n am1.amname as access_method,\n (i1.indexrelid::regclass)::text as reason,\n i1.indexrelid as reason_index_id,\n pg_get_indexdef(i1.indexrelid) main_index_def,\n pg_relation_size(i1.indexrelid) main_index_size_bytes,\n pg_get_indexdef(i2.indexrelid) index_def,\n pg_relation_size(i2.indexrelid) index_size_bytes,\n s.idx_scan as index_usage,\n quote_ident(tnsp.nspname) as formated_schema_name,\n coalesce(nullif(quote_ident(tnsp.nspname), 'public') || '.', '') || quote_ident(irel.relname) as formated_index_name,\n quote_ident(trel.relname) as formated_table_name,\n coalesce(nullif(quote_ident(tnsp.nspname), 'public') || '.', '') || quote_ident(trel.relname) as formated_relation_name,\n i2.opclasses\n from (\n select indrelid, indexrelid, opclasses, indclass, indexprs, indpred, indisprimary, indisunique, columns\n from index_data\n order by indexrelid\n ) as i1\n join index_data as i2 on (\n i1.indrelid = i2.indrelid -- same table\n and i1.indexrelid <> i2.indexrelid -- NOT same index\n )\n inner join pg_opclass op1 on i1.indclass[0] = op1.oid\n inner join pg_opclass op2 on i2.indclass[0] = op2.oid\n inner join pg_am am1 on op1.opcmethod = am1.oid\n inner join pg_am am2 on op2.opcmethod = am2.oid\n join pg_stat_all_indexes as s on s.indexrelid = i2.indexrelid\n join pg_class as trel on trel.oid = i2.indrelid\n join pg_namespace as tnsp on trel.relnamespace = tnsp.oid\n join pg_class as irel on irel.oid = i2.indexrelid\n where\n not i2.indisprimary -- index 1 is not primary\n and not i2.indisunique -- index 1 is not unique (unique indexes serve constraint purpose)\n and am1.amname = am2.amname -- same access type\n and i1.columns like (i2.columns || '%') -- index 2 includes all columns from index 1\n and i1.opclasses like (i2.opclasses || '%')\n -- index expressions is same\n and pg_get_expr(i1.indexprs, i1.indrelid) is not distinct from pg_get_expr(i2.indexprs, i2.indrelid)\n -- index predicates is same\n and pg_get_expr(i1.indpred, i1.indrelid) is not distinct from pg_get_expr(i2.indpred, i2.indrelid)\n), redundant_indexes_fk as (\n select\n ri.*,\n ((\n select count(1)\n from fk_indexes fi\n where\n fi.fk_table_ref = ri.table_name\n and fi.opclasses like (ri.opclasses || '%')\n ) > 0)::int as supports_fk\n from redundant_indexes ri\n),\n-- Cut recursive links\nredundant_indexes_tmp_num as (\n select row_number() over () num, rig.*\n from redundant_indexes_fk rig\n), redundant_indexes_tmp_links as (\n select\n ri1.*,\n ri2.num as r_num\n from redundant_indexes_tmp_num ri1\n left join redundant_indexes_tmp_num ri2 on ri2.reason_index_id = ri1.index_id and ri1.reason_index_id = ri2.index_id\n), redundant_indexes_tmp_cut as (\n select\n *\n from redundant_indexes_tmp_links\n where num < r_num or r_num is null\n), redundant_indexes_cut_grouped as (\n select\n distinct(num),\n *\n from redundant_indexes_tmp_cut\n order by index_size_bytes desc\n), redundant_indexes_grouped as (\n select\n index_id,\n schema_name as tag_schema_name,\n table_name,\n table_size_bytes,\n index_name as tag_index_name,\n access_method as tag_access_method,\n string_agg(distinct reason, ', ') as tag_reason,\n index_size_bytes,\n index_usage,\n index_def as index_definition,\n formated_index_name as tag_index_name,\n formated_schema_name as tag_schema_name,\n formated_table_name as tag_table_name,\n formated_relation_name as tag_relation_name,\n supports_fk::int as supports_fk,\n json_agg(\n distinct jsonb_build_object(\n 'index_name', reason,\n 'index_definition', main_index_def,\n 'index_size_bytes', main_index_size_bytes\n )\n )::text as redundant_to_json\n from redundant_indexes_cut_grouped\n group by\n index_id,\n table_size_bytes,\n schema_name,\n table_name,\n index_name,\n access_method,\n index_def,\n index_size_bytes,\n index_usage,\n formated_index_name,\n formated_schema_name,\n formated_table_name,\n formated_relation_name,\n supports_fk\n order by index_size_bytes desc\n)\nselect * from redundant_indexes_grouped\nlimit 1000;\n",
67
+ },
68
+ gauges: ["*"],
69
+ statement_timeout_seconds: 15,
70
+ },
71
+ "stats_reset": {
72
+ description: "This metric tracks when statistics were last reset at the database level. It provides visibility into the freshness of statistics data, which is essential for understanding the reliability of usage metrics. A recent reset time indicates that usage statistics may not reflect long-term patterns. Note that Postgres tracks stats resets at the database level, not per-index or per-table.",
73
+ sqls: {
74
+ 11: "select /* pgwatch_generated */\n datname as tag_database_name,\n extract(epoch from stats_reset)::int as stats_reset_epoch,\n extract(epoch from now() - stats_reset)::int as seconds_since_reset\nfrom pg_stat_database\nwhere datname = current_database()\n and stats_reset is not null;\n",
75
+ },
76
+ gauges: ["stats_reset_epoch","seconds_since_reset"],
77
+ statement_timeout_seconds: 15,
78
+ },
79
+ };
@@ -0,0 +1,127 @@
1
+ /**
2
+ * Metrics loader for express checkup reports
3
+ *
4
+ * Loads SQL queries from embedded metrics data (generated from metrics.yml at build time).
5
+ * Provides version-aware query selection and row transformation utilities.
6
+ */
7
+
8
+ import { METRICS, MetricDefinition } from "./metrics-embedded";
9
+
10
+ /**
11
+ * Get SQL query for a specific metric, selecting the appropriate version.
12
+ *
13
+ * @param metricName - Name of the metric (e.g., "settings", "db_stats")
14
+ * @param pgMajorVersion - PostgreSQL major version (default: 16)
15
+ * @returns SQL query string
16
+ * @throws Error if metric not found or no compatible version available
17
+ */
18
+ export function getMetricSql(metricName: string, pgMajorVersion: number = 16): string {
19
+ const metric = METRICS[metricName];
20
+
21
+ if (!metric) {
22
+ throw new Error(`Metric "${metricName}" not found. Available metrics: ${Object.keys(METRICS).join(", ")}`);
23
+ }
24
+
25
+ // Find the best matching version: highest version <= pgMajorVersion
26
+ const availableVersions = Object.keys(metric.sqls)
27
+ .map(v => parseInt(v, 10))
28
+ .sort((a, b) => b - a); // Sort descending
29
+
30
+ const matchingVersion = availableVersions.find(v => v <= pgMajorVersion);
31
+
32
+ if (matchingVersion === undefined) {
33
+ throw new Error(
34
+ `No compatible SQL version for metric "${metricName}" with PostgreSQL ${pgMajorVersion}. ` +
35
+ `Available versions: ${availableVersions.join(", ")}`
36
+ );
37
+ }
38
+
39
+ return metric.sqls[matchingVersion];
40
+ }
41
+
42
+ /**
43
+ * Get metric definition including all metadata.
44
+ *
45
+ * @param metricName - Name of the metric
46
+ * @returns MetricDefinition or undefined if not found
47
+ */
48
+ export function getMetricDefinition(metricName: string): MetricDefinition | undefined {
49
+ return METRICS[metricName];
50
+ }
51
+
52
+ /**
53
+ * List all available metric names.
54
+ */
55
+ export function listMetricNames(): string[] {
56
+ return Object.keys(METRICS);
57
+ }
58
+
59
+ /**
60
+ * Metric names that correspond to express report checks.
61
+ * Maps check IDs and logical names to metric names in the METRICS object.
62
+ */
63
+ export const METRIC_NAMES = {
64
+ // Index health checks
65
+ H001: "pg_invalid_indexes",
66
+ H002: "unused_indexes",
67
+ H004: "redundant_indexes",
68
+ // Settings and version info (A002, A003, A007, A013)
69
+ settings: "settings",
70
+ // Database statistics (A004)
71
+ dbStats: "db_stats",
72
+ dbSize: "db_size",
73
+ // Stats reset info (H002)
74
+ statsReset: "stats_reset",
75
+ } as const;
76
+
77
+ /**
78
+ * Transform a row from metrics query output to JSON report format.
79
+ * Metrics use `tag_` prefix for dimensions; we strip it for JSON reports.
80
+ * Also removes Prometheus-specific fields like epoch_ns, num, tag_datname.
81
+ */
82
+ export function transformMetricRow(row: Record<string, unknown>): Record<string, unknown> {
83
+ const result: Record<string, unknown> = {};
84
+
85
+ for (const [key, value] of Object.entries(row)) {
86
+ // Skip Prometheus-specific fields
87
+ if (key === "epoch_ns" || key === "num" || key === "tag_datname") {
88
+ continue;
89
+ }
90
+
91
+ // Strip tag_ prefix
92
+ const newKey = key.startsWith("tag_") ? key.slice(4) : key;
93
+ result[newKey] = value;
94
+ }
95
+
96
+ return result;
97
+ }
98
+
99
+ /**
100
+ * Transform settings metric row to the format expected by express reports.
101
+ * The settings metric returns one row per setting with tag_setting_name as key.
102
+ */
103
+ export function transformSettingsRow(row: Record<string, unknown>): {
104
+ name: string;
105
+ setting: string;
106
+ unit: string;
107
+ category: string;
108
+ vartype: string;
109
+ is_default: boolean;
110
+ } {
111
+ return {
112
+ name: String(row.tag_setting_name || ""),
113
+ setting: String(row.tag_setting_value || ""),
114
+ unit: String(row.tag_unit || ""),
115
+ category: String(row.tag_category || ""),
116
+ vartype: String(row.tag_vartype || ""),
117
+ is_default: row.is_default === 1 || row.is_default === true,
118
+ };
119
+ }
120
+
121
+ // Re-export types for convenience
122
+ export type { MetricDefinition } from "./metrics-embedded";
123
+
124
+ // Legacy export for backward compatibility
125
+ export function loadMetricsYml(): { metrics: Record<string, unknown> } {
126
+ return { metrics: METRICS };
127
+ }