postgresai 0.14.0-dev.53 → 0.14.0-dev.54
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +34 -35
- package/bin/postgres-ai.ts +436 -4
- package/bun.lock +3 -1
- package/bunfig.toml +11 -0
- package/dist/bin/postgres-ai.js +2184 -218
- package/lib/auth-server.ts +52 -5
- package/lib/checkup-api.ts +386 -0
- package/lib/checkup.ts +1327 -0
- package/lib/config.ts +3 -0
- package/lib/issues.ts +5 -41
- package/lib/metrics-embedded.ts +79 -0
- package/lib/metrics-loader.ts +127 -0
- package/lib/util.ts +61 -0
- package/package.json +12 -6
- package/packages/postgres-ai/README.md +26 -0
- package/packages/postgres-ai/bin/postgres-ai.js +27 -0
- package/packages/postgres-ai/package.json +27 -0
- package/scripts/embed-metrics.ts +154 -0
- package/test/checkup.integration.test.ts +273 -0
- package/test/checkup.test.ts +890 -0
- package/test/init.integration.test.ts +36 -33
- package/test/schema-validation.test.ts +81 -0
- package/test/test-utils.ts +122 -0
- package/dist/sql/01.role.sql +0 -16
- package/dist/sql/02.permissions.sql +0 -37
- package/dist/sql/03.optional_rds.sql +0 -6
- package/dist/sql/04.optional_self_managed.sql +0 -8
- package/dist/sql/05.helpers.sql +0 -415
package/dist/bin/postgres-ai.js
CHANGED
|
@@ -13064,7 +13064,7 @@ var {
|
|
|
13064
13064
|
// package.json
|
|
13065
13065
|
var package_default = {
|
|
13066
13066
|
name: "postgresai",
|
|
13067
|
-
version: "0.14.0-dev.
|
|
13067
|
+
version: "0.14.0-dev.54",
|
|
13068
13068
|
description: "postgres_ai CLI",
|
|
13069
13069
|
license: "Apache-2.0",
|
|
13070
13070
|
private: false,
|
|
@@ -13077,22 +13077,26 @@ var package_default = {
|
|
|
13077
13077
|
url: "https://gitlab.com/postgres-ai/postgres_ai/-/issues"
|
|
13078
13078
|
},
|
|
13079
13079
|
bin: {
|
|
13080
|
-
"postgres-ai": "./dist/bin/postgres-ai.js",
|
|
13081
13080
|
postgresai: "./dist/bin/postgres-ai.js",
|
|
13082
13081
|
pgai: "./dist/bin/postgres-ai.js"
|
|
13083
13082
|
},
|
|
13083
|
+
exports: {
|
|
13084
|
+
".": "./dist/bin/postgres-ai.js",
|
|
13085
|
+
"./cli": "./dist/bin/postgres-ai.js"
|
|
13086
|
+
},
|
|
13084
13087
|
type: "module",
|
|
13085
13088
|
engines: {
|
|
13086
13089
|
node: ">=18"
|
|
13087
13090
|
},
|
|
13088
13091
|
scripts: {
|
|
13089
|
-
|
|
13092
|
+
"embed-metrics": "bun run scripts/embed-metrics.ts",
|
|
13093
|
+
build: `bun run embed-metrics && bun build ./bin/postgres-ai.ts --outdir ./dist/bin --target node && node -e "const fs=require('fs');const f='./dist/bin/postgres-ai.js';fs.writeFileSync(f,fs.readFileSync(f,'utf8').replace('#!/usr/bin/env bun','#!/usr/bin/env node'))"`,
|
|
13090
13094
|
prepublishOnly: "npm run build",
|
|
13091
13095
|
start: "bun ./bin/postgres-ai.ts --help",
|
|
13092
13096
|
"start:node": "node ./dist/bin/postgres-ai.js --help",
|
|
13093
|
-
dev: "bun --watch ./bin/postgres-ai.ts",
|
|
13094
|
-
test: "bun test",
|
|
13095
|
-
typecheck: "bunx tsc --noEmit"
|
|
13097
|
+
dev: "bun run embed-metrics && bun --watch ./bin/postgres-ai.ts",
|
|
13098
|
+
test: "bun run embed-metrics && bun test",
|
|
13099
|
+
typecheck: "bun run embed-metrics && bunx tsc --noEmit"
|
|
13096
13100
|
},
|
|
13097
13101
|
dependencies: {
|
|
13098
13102
|
"@modelcontextprotocol/sdk": "^1.20.2",
|
|
@@ -13104,6 +13108,8 @@ var package_default = {
|
|
|
13104
13108
|
"@types/bun": "^1.1.14",
|
|
13105
13109
|
"@types/js-yaml": "^4.0.9",
|
|
13106
13110
|
"@types/pg": "^8.15.6",
|
|
13111
|
+
ajv: "^8.17.1",
|
|
13112
|
+
"ajv-formats": "^3.0.1",
|
|
13107
13113
|
typescript: "^5.3.3"
|
|
13108
13114
|
},
|
|
13109
13115
|
publishConfig: {
|
|
@@ -13129,7 +13135,8 @@ function readConfig() {
|
|
|
13129
13135
|
const config = {
|
|
13130
13136
|
apiKey: null,
|
|
13131
13137
|
baseUrl: null,
|
|
13132
|
-
orgId: null
|
|
13138
|
+
orgId: null,
|
|
13139
|
+
defaultProject: null
|
|
13133
13140
|
};
|
|
13134
13141
|
const userConfigPath = getConfigPath();
|
|
13135
13142
|
if (fs.existsSync(userConfigPath)) {
|
|
@@ -13139,6 +13146,7 @@ function readConfig() {
|
|
|
13139
13146
|
config.apiKey = parsed.apiKey || null;
|
|
13140
13147
|
config.baseUrl = parsed.baseUrl || null;
|
|
13141
13148
|
config.orgId = parsed.orgId || null;
|
|
13149
|
+
config.defaultProject = parsed.defaultProject || null;
|
|
13142
13150
|
return config;
|
|
13143
13151
|
} catch (err) {
|
|
13144
13152
|
const message = err instanceof Error ? err.message : String(err);
|
|
@@ -15858,9 +15866,10 @@ var safeLoadAll = renamed("safeLoadAll", "loadAll");
|
|
|
15858
15866
|
var safeDump = renamed("safeDump", "dump");
|
|
15859
15867
|
|
|
15860
15868
|
// bin/postgres-ai.ts
|
|
15861
|
-
import * as
|
|
15862
|
-
import * as
|
|
15869
|
+
import * as fs5 from "fs";
|
|
15870
|
+
import * as path5 from "path";
|
|
15863
15871
|
import * as os3 from "os";
|
|
15872
|
+
import * as crypto2 from "crypto";
|
|
15864
15873
|
|
|
15865
15874
|
// node_modules/pg/esm/index.mjs
|
|
15866
15875
|
var import_lib = __toESM(require_lib2(), 1);
|
|
@@ -15876,9 +15885,10 @@ var Result = import_lib.default.Result;
|
|
|
15876
15885
|
var TypeOverrides = import_lib.default.TypeOverrides;
|
|
15877
15886
|
var defaults = import_lib.default.defaults;
|
|
15878
15887
|
// package.json
|
|
15888
|
+
var version = "0.14.0-dev.54";
|
|
15879
15889
|
var package_default2 = {
|
|
15880
15890
|
name: "postgresai",
|
|
15881
|
-
version
|
|
15891
|
+
version,
|
|
15882
15892
|
description: "postgres_ai CLI",
|
|
15883
15893
|
license: "Apache-2.0",
|
|
15884
15894
|
private: false,
|
|
@@ -15891,22 +15901,26 @@ var package_default2 = {
|
|
|
15891
15901
|
url: "https://gitlab.com/postgres-ai/postgres_ai/-/issues"
|
|
15892
15902
|
},
|
|
15893
15903
|
bin: {
|
|
15894
|
-
"postgres-ai": "./dist/bin/postgres-ai.js",
|
|
15895
15904
|
postgresai: "./dist/bin/postgres-ai.js",
|
|
15896
15905
|
pgai: "./dist/bin/postgres-ai.js"
|
|
15897
15906
|
},
|
|
15907
|
+
exports: {
|
|
15908
|
+
".": "./dist/bin/postgres-ai.js",
|
|
15909
|
+
"./cli": "./dist/bin/postgres-ai.js"
|
|
15910
|
+
},
|
|
15898
15911
|
type: "module",
|
|
15899
15912
|
engines: {
|
|
15900
15913
|
node: ">=18"
|
|
15901
15914
|
},
|
|
15902
15915
|
scripts: {
|
|
15903
|
-
|
|
15916
|
+
"embed-metrics": "bun run scripts/embed-metrics.ts",
|
|
15917
|
+
build: `bun run embed-metrics && bun build ./bin/postgres-ai.ts --outdir ./dist/bin --target node && node -e "const fs=require('fs');const f='./dist/bin/postgres-ai.js';fs.writeFileSync(f,fs.readFileSync(f,'utf8').replace('#!/usr/bin/env bun','#!/usr/bin/env node'))"`,
|
|
15904
15918
|
prepublishOnly: "npm run build",
|
|
15905
15919
|
start: "bun ./bin/postgres-ai.ts --help",
|
|
15906
15920
|
"start:node": "node ./dist/bin/postgres-ai.js --help",
|
|
15907
|
-
dev: "bun --watch ./bin/postgres-ai.ts",
|
|
15908
|
-
test: "bun test",
|
|
15909
|
-
typecheck: "bunx tsc --noEmit"
|
|
15921
|
+
dev: "bun run embed-metrics && bun --watch ./bin/postgres-ai.ts",
|
|
15922
|
+
test: "bun run embed-metrics && bun test",
|
|
15923
|
+
typecheck: "bun run embed-metrics && bunx tsc --noEmit"
|
|
15910
15924
|
},
|
|
15911
15925
|
dependencies: {
|
|
15912
15926
|
"@modelcontextprotocol/sdk": "^1.20.2",
|
|
@@ -15918,6 +15932,8 @@ var package_default2 = {
|
|
|
15918
15932
|
"@types/bun": "^1.1.14",
|
|
15919
15933
|
"@types/js-yaml": "^4.0.9",
|
|
15920
15934
|
"@types/pg": "^8.15.6",
|
|
15935
|
+
ajv: "^8.17.1",
|
|
15936
|
+
"ajv-formats": "^3.0.1",
|
|
15921
15937
|
typescript: "^5.3.3"
|
|
15922
15938
|
},
|
|
15923
15939
|
publishConfig: {
|
|
@@ -15943,7 +15959,8 @@ function readConfig2() {
|
|
|
15943
15959
|
const config = {
|
|
15944
15960
|
apiKey: null,
|
|
15945
15961
|
baseUrl: null,
|
|
15946
|
-
orgId: null
|
|
15962
|
+
orgId: null,
|
|
15963
|
+
defaultProject: null
|
|
15947
15964
|
};
|
|
15948
15965
|
const userConfigPath = getConfigPath2();
|
|
15949
15966
|
if (fs2.existsSync(userConfigPath)) {
|
|
@@ -15953,6 +15970,7 @@ function readConfig2() {
|
|
|
15953
15970
|
config.apiKey = parsed.apiKey || null;
|
|
15954
15971
|
config.baseUrl = parsed.baseUrl || null;
|
|
15955
15972
|
config.orgId = parsed.orgId || null;
|
|
15973
|
+
config.defaultProject = parsed.defaultProject || null;
|
|
15956
15974
|
return config;
|
|
15957
15975
|
} catch (err) {
|
|
15958
15976
|
const message = err instanceof Error ? err.message : String(err);
|
|
@@ -15983,6 +16001,49 @@ function readConfig2() {
|
|
|
15983
16001
|
}
|
|
15984
16002
|
|
|
15985
16003
|
// lib/util.ts
|
|
16004
|
+
var HTTP_STATUS_MESSAGES = {
|
|
16005
|
+
400: "Bad Request",
|
|
16006
|
+
401: "Unauthorized - check your API key",
|
|
16007
|
+
403: "Forbidden - access denied",
|
|
16008
|
+
404: "Not Found",
|
|
16009
|
+
408: "Request Timeout",
|
|
16010
|
+
429: "Too Many Requests - rate limited",
|
|
16011
|
+
500: "Internal Server Error",
|
|
16012
|
+
502: "Bad Gateway - server temporarily unavailable",
|
|
16013
|
+
503: "Service Unavailable - server temporarily unavailable",
|
|
16014
|
+
504: "Gateway Timeout - server temporarily unavailable"
|
|
16015
|
+
};
|
|
16016
|
+
function isHtmlContent(text) {
|
|
16017
|
+
const trimmed = text.trim();
|
|
16018
|
+
return trimmed.startsWith("<!DOCTYPE") || trimmed.startsWith("<html") || trimmed.startsWith("<HTML");
|
|
16019
|
+
}
|
|
16020
|
+
function formatHttpError(operation, status, responseBody) {
|
|
16021
|
+
const statusMessage = HTTP_STATUS_MESSAGES[status] || "Request failed";
|
|
16022
|
+
let errMsg = `${operation}: HTTP ${status} - ${statusMessage}`;
|
|
16023
|
+
if (responseBody) {
|
|
16024
|
+
if (isHtmlContent(responseBody)) {
|
|
16025
|
+
return errMsg;
|
|
16026
|
+
}
|
|
16027
|
+
try {
|
|
16028
|
+
const errObj = JSON.parse(responseBody);
|
|
16029
|
+
const message = errObj.message || errObj.error || errObj.detail;
|
|
16030
|
+
if (message && typeof message === "string") {
|
|
16031
|
+
errMsg += `
|
|
16032
|
+
${message}`;
|
|
16033
|
+
} else {
|
|
16034
|
+
errMsg += `
|
|
16035
|
+
${JSON.stringify(errObj, null, 2)}`;
|
|
16036
|
+
}
|
|
16037
|
+
} catch {
|
|
16038
|
+
const trimmed = responseBody.trim();
|
|
16039
|
+
if (trimmed.length > 0 && trimmed.length < 500) {
|
|
16040
|
+
errMsg += `
|
|
16041
|
+
${trimmed}`;
|
|
16042
|
+
}
|
|
16043
|
+
}
|
|
16044
|
+
}
|
|
16045
|
+
return errMsg;
|
|
16046
|
+
}
|
|
15986
16047
|
function maskSecret(secret) {
|
|
15987
16048
|
if (!secret)
|
|
15988
16049
|
return "";
|
|
@@ -16049,18 +16110,7 @@ async function fetchIssues(params) {
|
|
|
16049
16110
|
throw new Error(`Failed to parse issues response: ${data}`);
|
|
16050
16111
|
}
|
|
16051
16112
|
} else {
|
|
16052
|
-
|
|
16053
|
-
if (data) {
|
|
16054
|
-
try {
|
|
16055
|
-
const errObj = JSON.parse(data);
|
|
16056
|
-
errMsg += `
|
|
16057
|
-
${JSON.stringify(errObj, null, 2)}`;
|
|
16058
|
-
} catch {
|
|
16059
|
-
errMsg += `
|
|
16060
|
-
${data}`;
|
|
16061
|
-
}
|
|
16062
|
-
}
|
|
16063
|
-
throw new Error(errMsg);
|
|
16113
|
+
throw new Error(formatHttpError("Failed to fetch issues", response.status, data));
|
|
16064
16114
|
}
|
|
16065
16115
|
}
|
|
16066
16116
|
async function fetchIssueComments(params) {
|
|
@@ -16101,18 +16151,7 @@ async function fetchIssueComments(params) {
|
|
|
16101
16151
|
throw new Error(`Failed to parse issue comments response: ${data}`);
|
|
16102
16152
|
}
|
|
16103
16153
|
} else {
|
|
16104
|
-
|
|
16105
|
-
if (data) {
|
|
16106
|
-
try {
|
|
16107
|
-
const errObj = JSON.parse(data);
|
|
16108
|
-
errMsg += `
|
|
16109
|
-
${JSON.stringify(errObj, null, 2)}`;
|
|
16110
|
-
} catch {
|
|
16111
|
-
errMsg += `
|
|
16112
|
-
${data}`;
|
|
16113
|
-
}
|
|
16114
|
-
}
|
|
16115
|
-
throw new Error(errMsg);
|
|
16154
|
+
throw new Error(formatHttpError("Failed to fetch issue comments", response.status, data));
|
|
16116
16155
|
}
|
|
16117
16156
|
}
|
|
16118
16157
|
async function fetchIssue(params) {
|
|
@@ -16161,18 +16200,7 @@ async function fetchIssue(params) {
|
|
|
16161
16200
|
throw new Error(`Failed to parse issue response: ${data}`);
|
|
16162
16201
|
}
|
|
16163
16202
|
} else {
|
|
16164
|
-
|
|
16165
|
-
if (data) {
|
|
16166
|
-
try {
|
|
16167
|
-
const errObj = JSON.parse(data);
|
|
16168
|
-
errMsg += `
|
|
16169
|
-
${JSON.stringify(errObj, null, 2)}`;
|
|
16170
|
-
} catch {
|
|
16171
|
-
errMsg += `
|
|
16172
|
-
${data}`;
|
|
16173
|
-
}
|
|
16174
|
-
}
|
|
16175
|
-
throw new Error(errMsg);
|
|
16203
|
+
throw new Error(formatHttpError("Failed to fetch issue", response.status, data));
|
|
16176
16204
|
}
|
|
16177
16205
|
}
|
|
16178
16206
|
async function createIssueComment(params) {
|
|
@@ -16226,18 +16254,7 @@ async function createIssueComment(params) {
|
|
|
16226
16254
|
throw new Error(`Failed to parse create comment response: ${data}`);
|
|
16227
16255
|
}
|
|
16228
16256
|
} else {
|
|
16229
|
-
|
|
16230
|
-
if (data) {
|
|
16231
|
-
try {
|
|
16232
|
-
const errObj = JSON.parse(data);
|
|
16233
|
-
errMsg += `
|
|
16234
|
-
${JSON.stringify(errObj, null, 2)}`;
|
|
16235
|
-
} catch {
|
|
16236
|
-
errMsg += `
|
|
16237
|
-
${data}`;
|
|
16238
|
-
}
|
|
16239
|
-
}
|
|
16240
|
-
throw new Error(errMsg);
|
|
16257
|
+
throw new Error(formatHttpError("Failed to create issue comment", response.status, data));
|
|
16241
16258
|
}
|
|
16242
16259
|
}
|
|
16243
16260
|
|
|
@@ -17104,10 +17121,10 @@ var ksuid = /^[A-Za-z0-9]{27}$/;
|
|
|
17104
17121
|
var nanoid = /^[a-zA-Z0-9_-]{21}$/;
|
|
17105
17122
|
var duration = /^P(?:(\d+W)|(?!.*W)(?=\d|T\d)(\d+Y)?(\d+M)?(\d+D)?(T(?=\d)(\d+H)?(\d+M)?(\d+([.,]\d+)?S)?)?)$/;
|
|
17106
17123
|
var guid = /^([0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12})$/;
|
|
17107
|
-
var uuid = (
|
|
17108
|
-
if (!
|
|
17124
|
+
var uuid = (version2) => {
|
|
17125
|
+
if (!version2)
|
|
17109
17126
|
return /^([0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[1-8][0-9a-fA-F]{3}-[89abAB][0-9a-fA-F]{3}-[0-9a-fA-F]{12}|00000000-0000-0000-0000-000000000000|ffffffff-ffff-ffff-ffff-ffffffffffff)$/;
|
|
17110
|
-
return new RegExp(`^([0-9a-fA-F]{8}-[0-9a-fA-F]{4}-${
|
|
17127
|
+
return new RegExp(`^([0-9a-fA-F]{8}-[0-9a-fA-F]{4}-${version2}[0-9a-fA-F]{3}-[89abAB][0-9a-fA-F]{3}-[0-9a-fA-F]{12})$`);
|
|
17111
17128
|
};
|
|
17112
17129
|
var email = /^(?!\.)(?!.*\.\.)([A-Za-z0-9_'+\-\.]*)[A-Za-z0-9_+-]@([A-Za-z0-9][A-Za-z0-9\-]*\.)+[A-Za-z]{2,}$/;
|
|
17113
17130
|
var _emoji = `^(\\p{Extended_Pictographic}|\\p{Emoji_Component})+$`;
|
|
@@ -17576,7 +17593,7 @@ class Doc {
|
|
|
17576
17593
|
}
|
|
17577
17594
|
|
|
17578
17595
|
// node_modules/zod/v4/core/versions.js
|
|
17579
|
-
var
|
|
17596
|
+
var version2 = {
|
|
17580
17597
|
major: 4,
|
|
17581
17598
|
minor: 2,
|
|
17582
17599
|
patch: 1
|
|
@@ -17588,7 +17605,7 @@ var $ZodType = /* @__PURE__ */ $constructor("$ZodType", (inst, def) => {
|
|
|
17588
17605
|
inst ?? (inst = {});
|
|
17589
17606
|
inst._zod.def = def;
|
|
17590
17607
|
inst._zod.bag = inst._zod.bag || {};
|
|
17591
|
-
inst._zod.version =
|
|
17608
|
+
inst._zod.version = version2;
|
|
17592
17609
|
const checks = [...inst._zod.def.checks ?? []];
|
|
17593
17610
|
if (inst._zod.traits.has("$ZodCheck")) {
|
|
17594
17611
|
checks.unshift(inst);
|
|
@@ -23273,18 +23290,7 @@ async function fetchIssues2(params) {
|
|
|
23273
23290
|
throw new Error(`Failed to parse issues response: ${data}`);
|
|
23274
23291
|
}
|
|
23275
23292
|
} else {
|
|
23276
|
-
|
|
23277
|
-
if (data) {
|
|
23278
|
-
try {
|
|
23279
|
-
const errObj = JSON.parse(data);
|
|
23280
|
-
errMsg += `
|
|
23281
|
-
${JSON.stringify(errObj, null, 2)}`;
|
|
23282
|
-
} catch {
|
|
23283
|
-
errMsg += `
|
|
23284
|
-
${data}`;
|
|
23285
|
-
}
|
|
23286
|
-
}
|
|
23287
|
-
throw new Error(errMsg);
|
|
23293
|
+
throw new Error(formatHttpError("Failed to fetch issues", response.status, data));
|
|
23288
23294
|
}
|
|
23289
23295
|
}
|
|
23290
23296
|
async function fetchIssueComments2(params) {
|
|
@@ -23325,18 +23331,7 @@ async function fetchIssueComments2(params) {
|
|
|
23325
23331
|
throw new Error(`Failed to parse issue comments response: ${data}`);
|
|
23326
23332
|
}
|
|
23327
23333
|
} else {
|
|
23328
|
-
|
|
23329
|
-
if (data) {
|
|
23330
|
-
try {
|
|
23331
|
-
const errObj = JSON.parse(data);
|
|
23332
|
-
errMsg += `
|
|
23333
|
-
${JSON.stringify(errObj, null, 2)}`;
|
|
23334
|
-
} catch {
|
|
23335
|
-
errMsg += `
|
|
23336
|
-
${data}`;
|
|
23337
|
-
}
|
|
23338
|
-
}
|
|
23339
|
-
throw new Error(errMsg);
|
|
23334
|
+
throw new Error(formatHttpError("Failed to fetch issue comments", response.status, data));
|
|
23340
23335
|
}
|
|
23341
23336
|
}
|
|
23342
23337
|
async function fetchIssue2(params) {
|
|
@@ -23385,18 +23380,7 @@ async function fetchIssue2(params) {
|
|
|
23385
23380
|
throw new Error(`Failed to parse issue response: ${data}`);
|
|
23386
23381
|
}
|
|
23387
23382
|
} else {
|
|
23388
|
-
|
|
23389
|
-
if (data) {
|
|
23390
|
-
try {
|
|
23391
|
-
const errObj = JSON.parse(data);
|
|
23392
|
-
errMsg += `
|
|
23393
|
-
${JSON.stringify(errObj, null, 2)}`;
|
|
23394
|
-
} catch {
|
|
23395
|
-
errMsg += `
|
|
23396
|
-
${data}`;
|
|
23397
|
-
}
|
|
23398
|
-
}
|
|
23399
|
-
throw new Error(errMsg);
|
|
23383
|
+
throw new Error(formatHttpError("Failed to fetch issue", response.status, data));
|
|
23400
23384
|
}
|
|
23401
23385
|
}
|
|
23402
23386
|
async function createIssueComment2(params) {
|
|
@@ -23450,18 +23434,7 @@ async function createIssueComment2(params) {
|
|
|
23450
23434
|
throw new Error(`Failed to parse create comment response: ${data}`);
|
|
23451
23435
|
}
|
|
23452
23436
|
} else {
|
|
23453
|
-
|
|
23454
|
-
if (data) {
|
|
23455
|
-
try {
|
|
23456
|
-
const errObj = JSON.parse(data);
|
|
23457
|
-
errMsg += `
|
|
23458
|
-
${JSON.stringify(errObj, null, 2)}`;
|
|
23459
|
-
} catch {
|
|
23460
|
-
errMsg += `
|
|
23461
|
-
${data}`;
|
|
23462
|
-
}
|
|
23463
|
-
}
|
|
23464
|
-
throw new Error(errMsg);
|
|
23437
|
+
throw new Error(formatHttpError("Failed to create issue comment", response.status, data));
|
|
23465
23438
|
}
|
|
23466
23439
|
}
|
|
23467
23440
|
|
|
@@ -24055,20 +24028,32 @@ function createCallbackServer(port = 0, expectedState = null, timeoutMs = 300000
|
|
|
24055
24028
|
let actualPort = port;
|
|
24056
24029
|
let resolveCallback;
|
|
24057
24030
|
let rejectCallback;
|
|
24031
|
+
let resolveReady;
|
|
24032
|
+
let rejectReady;
|
|
24058
24033
|
let serverInstance = null;
|
|
24059
24034
|
const promise2 = new Promise((resolve4, reject) => {
|
|
24060
24035
|
resolveCallback = resolve4;
|
|
24061
24036
|
rejectCallback = reject;
|
|
24062
24037
|
});
|
|
24038
|
+
const ready = new Promise((resolve4, reject) => {
|
|
24039
|
+
resolveReady = resolve4;
|
|
24040
|
+
rejectReady = reject;
|
|
24041
|
+
});
|
|
24042
|
+
let timeoutId = null;
|
|
24063
24043
|
const stopServer = () => {
|
|
24044
|
+
if (timeoutId) {
|
|
24045
|
+
clearTimeout(timeoutId);
|
|
24046
|
+
timeoutId = null;
|
|
24047
|
+
}
|
|
24064
24048
|
if (serverInstance) {
|
|
24065
24049
|
serverInstance.close();
|
|
24066
24050
|
serverInstance = null;
|
|
24067
24051
|
}
|
|
24068
24052
|
};
|
|
24069
|
-
|
|
24053
|
+
timeoutId = setTimeout(() => {
|
|
24070
24054
|
if (!resolved) {
|
|
24071
24055
|
resolved = true;
|
|
24056
|
+
timeoutId = null;
|
|
24072
24057
|
stopServer();
|
|
24073
24058
|
rejectCallback(new Error("Authentication timeout. Please try again."));
|
|
24074
24059
|
}
|
|
@@ -24091,7 +24076,10 @@ function createCallbackServer(port = 0, expectedState = null, timeoutMs = 300000
|
|
|
24091
24076
|
const errorDescription = url.searchParams.get("error_description");
|
|
24092
24077
|
if (error2) {
|
|
24093
24078
|
resolved = true;
|
|
24094
|
-
|
|
24079
|
+
if (timeoutId) {
|
|
24080
|
+
clearTimeout(timeoutId);
|
|
24081
|
+
timeoutId = null;
|
|
24082
|
+
}
|
|
24095
24083
|
setTimeout(() => stopServer(), 100);
|
|
24096
24084
|
rejectCallback(new Error(`OAuth error: ${error2}${errorDescription ? ` - ${errorDescription}` : ""}`));
|
|
24097
24085
|
res.writeHead(400, { "Content-Type": "text/html" });
|
|
@@ -24145,7 +24133,10 @@ function createCallbackServer(port = 0, expectedState = null, timeoutMs = 300000
|
|
|
24145
24133
|
}
|
|
24146
24134
|
if (expectedState && state !== expectedState) {
|
|
24147
24135
|
resolved = true;
|
|
24148
|
-
|
|
24136
|
+
if (timeoutId) {
|
|
24137
|
+
clearTimeout(timeoutId);
|
|
24138
|
+
timeoutId = null;
|
|
24139
|
+
}
|
|
24149
24140
|
setTimeout(() => stopServer(), 100);
|
|
24150
24141
|
rejectCallback(new Error("State mismatch (possible CSRF attack)"));
|
|
24151
24142
|
res.writeHead(400, { "Content-Type": "text/html" });
|
|
@@ -24172,7 +24163,10 @@ function createCallbackServer(port = 0, expectedState = null, timeoutMs = 300000
|
|
|
24172
24163
|
return;
|
|
24173
24164
|
}
|
|
24174
24165
|
resolved = true;
|
|
24175
|
-
|
|
24166
|
+
if (timeoutId) {
|
|
24167
|
+
clearTimeout(timeoutId);
|
|
24168
|
+
timeoutId = null;
|
|
24169
|
+
}
|
|
24176
24170
|
resolveCallback({ code, state });
|
|
24177
24171
|
setTimeout(() => stopServer(), 100);
|
|
24178
24172
|
res.writeHead(200, { "Content-Type": "text/html" });
|
|
@@ -24197,15 +24191,32 @@ function createCallbackServer(port = 0, expectedState = null, timeoutMs = 300000
|
|
|
24197
24191
|
</html>
|
|
24198
24192
|
`);
|
|
24199
24193
|
});
|
|
24194
|
+
serverInstance.on("error", (err) => {
|
|
24195
|
+
if (timeoutId) {
|
|
24196
|
+
clearTimeout(timeoutId);
|
|
24197
|
+
timeoutId = null;
|
|
24198
|
+
}
|
|
24199
|
+
if (err.code === "EADDRINUSE") {
|
|
24200
|
+
rejectReady(new Error(`Port ${port} is already in use`));
|
|
24201
|
+
} else {
|
|
24202
|
+
rejectReady(new Error(`Server error: ${err.message}`));
|
|
24203
|
+
}
|
|
24204
|
+
if (!resolved) {
|
|
24205
|
+
resolved = true;
|
|
24206
|
+
rejectCallback(err);
|
|
24207
|
+
}
|
|
24208
|
+
});
|
|
24200
24209
|
serverInstance.listen(port, "127.0.0.1", () => {
|
|
24201
24210
|
const address = serverInstance?.address();
|
|
24202
24211
|
if (address && typeof address === "object") {
|
|
24203
24212
|
actualPort = address.port;
|
|
24204
24213
|
}
|
|
24214
|
+
resolveReady(actualPort);
|
|
24205
24215
|
});
|
|
24206
24216
|
return {
|
|
24207
24217
|
server: { stop: stopServer },
|
|
24208
24218
|
promise: promise2,
|
|
24219
|
+
ready,
|
|
24209
24220
|
getPort: () => actualPort
|
|
24210
24221
|
};
|
|
24211
24222
|
}
|
|
@@ -24213,6 +24224,1667 @@ function createCallbackServer(port = 0, expectedState = null, timeoutMs = 300000
|
|
|
24213
24224
|
// bin/postgres-ai.ts
|
|
24214
24225
|
import { createInterface } from "readline";
|
|
24215
24226
|
import * as childProcess from "child_process";
|
|
24227
|
+
|
|
24228
|
+
// lib/checkup.ts
|
|
24229
|
+
import * as fs4 from "fs";
|
|
24230
|
+
import * as path4 from "path";
|
|
24231
|
+
|
|
24232
|
+
// lib/metrics-embedded.ts
|
|
24233
|
+
var METRICS = {
|
|
24234
|
+
settings: {
|
|
24235
|
+
description: "This metric collects various PostgreSQL server settings and configurations. It provides insights into the server's configuration, including version, memory settings, and other important parameters. This metric is useful for monitoring server settings and ensuring optimal performance. Note: For lock_timeout and statement_timeout, we use reset_val instead of setting because pgwatch overrides these during metric collection, which would mask the actual configured values.",
|
|
24236
|
+
sqls: {
|
|
24237
|
+
11: `with base as ( /* pgwatch_generated */
|
|
24238
|
+
select
|
|
24239
|
+
name,
|
|
24240
|
+
-- Use reset_val for lock_timeout/statement_timeout because pgwatch overrides them
|
|
24241
|
+
-- during collection (lock_timeout=100ms, statement_timeout per-metric).
|
|
24242
|
+
case
|
|
24243
|
+
when name in ('lock_timeout', 'statement_timeout') then reset_val
|
|
24244
|
+
else setting
|
|
24245
|
+
end as effective_setting,
|
|
24246
|
+
unit,
|
|
24247
|
+
category,
|
|
24248
|
+
vartype,
|
|
24249
|
+
-- For lock_timeout/statement_timeout, compare reset_val with boot_val
|
|
24250
|
+
-- since source becomes 'session' during collection.
|
|
24251
|
+
case
|
|
24252
|
+
when name in ('lock_timeout', 'statement_timeout') then (reset_val = boot_val)
|
|
24253
|
+
else (source = 'default')
|
|
24254
|
+
end as is_default_bool
|
|
24255
|
+
from pg_settings
|
|
24256
|
+
), with_numeric as (
|
|
24257
|
+
select
|
|
24258
|
+
*,
|
|
24259
|
+
case
|
|
24260
|
+
when effective_setting ~ '^-?[0-9]+$' then effective_setting::bigint
|
|
24261
|
+
else null
|
|
24262
|
+
end as numeric_value
|
|
24263
|
+
from base
|
|
24264
|
+
)
|
|
24265
|
+
select
|
|
24266
|
+
(extract(epoch from now()) * 1e9)::int8 as epoch_ns,
|
|
24267
|
+
current_database() as tag_datname,
|
|
24268
|
+
name as tag_setting_name,
|
|
24269
|
+
effective_setting as tag_setting_value,
|
|
24270
|
+
unit as tag_unit,
|
|
24271
|
+
category as tag_category,
|
|
24272
|
+
vartype as tag_vartype,
|
|
24273
|
+
numeric_value,
|
|
24274
|
+
case
|
|
24275
|
+
when numeric_value is null then null
|
|
24276
|
+
when unit = '8kB' then numeric_value * 8192
|
|
24277
|
+
when unit = 'kB' then numeric_value * 1024
|
|
24278
|
+
when unit = 'MB' then numeric_value * 1024 * 1024
|
|
24279
|
+
when unit = 'B' then numeric_value
|
|
24280
|
+
when unit = 'ms' then numeric_value::numeric / 1000
|
|
24281
|
+
when unit = 's' then numeric_value::numeric
|
|
24282
|
+
when unit = 'min' then numeric_value::numeric * 60
|
|
24283
|
+
else null
|
|
24284
|
+
end as setting_normalized,
|
|
24285
|
+
case unit
|
|
24286
|
+
when '8kB' then 'bytes'
|
|
24287
|
+
when 'kB' then 'bytes'
|
|
24288
|
+
when 'MB' then 'bytes'
|
|
24289
|
+
when 'B' then 'bytes'
|
|
24290
|
+
when 'ms' then 'seconds'
|
|
24291
|
+
when 's' then 'seconds'
|
|
24292
|
+
when 'min' then 'seconds'
|
|
24293
|
+
else null
|
|
24294
|
+
end as unit_normalized,
|
|
24295
|
+
case when is_default_bool then 1 else 0 end as is_default,
|
|
24296
|
+
1 as configured
|
|
24297
|
+
from with_numeric`
|
|
24298
|
+
},
|
|
24299
|
+
gauges: ["*"],
|
|
24300
|
+
statement_timeout_seconds: 15
|
|
24301
|
+
},
|
|
24302
|
+
db_stats: {
|
|
24303
|
+
description: "Retrieves key statistics from the PostgreSQL `pg_stat_database` view, providing insights into the current database's performance. It returns the number of backends, transaction commits and rollbacks, buffer reads and hits, tuple statistics, conflicts, temporary files and bytes, deadlocks, block read and write times, postmaster uptime, backup duration, recovery status, system identifier, and invalid indexes. This metric helps administrators monitor database activity and performance.",
|
|
24304
|
+
sqls: {
|
|
24305
|
+
11: `select /* pgwatch_generated */
|
|
24306
|
+
(extract(epoch from now()) * 1e9)::int8 as epoch_ns,
|
|
24307
|
+
current_database() as tag_datname,
|
|
24308
|
+
numbackends,
|
|
24309
|
+
xact_commit,
|
|
24310
|
+
xact_rollback,
|
|
24311
|
+
blks_read,
|
|
24312
|
+
blks_hit,
|
|
24313
|
+
tup_returned,
|
|
24314
|
+
tup_fetched,
|
|
24315
|
+
tup_inserted,
|
|
24316
|
+
tup_updated,
|
|
24317
|
+
tup_deleted,
|
|
24318
|
+
conflicts,
|
|
24319
|
+
temp_files,
|
|
24320
|
+
temp_bytes,
|
|
24321
|
+
deadlocks,
|
|
24322
|
+
blk_read_time,
|
|
24323
|
+
blk_write_time,
|
|
24324
|
+
extract(epoch from (now() - pg_postmaster_start_time()))::int8 as postmaster_uptime_s,
|
|
24325
|
+
case when pg_is_in_recovery() then 1 else 0 end as in_recovery_int,
|
|
24326
|
+
system_identifier::text as tag_sys_id,
|
|
24327
|
+
(select count(*) from pg_index i
|
|
24328
|
+
where not indisvalid
|
|
24329
|
+
and not exists ( /* leave out ones that are being actively rebuilt */
|
|
24330
|
+
select * from pg_locks l
|
|
24331
|
+
join pg_stat_activity a using (pid)
|
|
24332
|
+
where l.relation = i.indexrelid
|
|
24333
|
+
and a.state = 'active'
|
|
24334
|
+
and a.query ~* 'concurrently'
|
|
24335
|
+
)) as invalid_indexes
|
|
24336
|
+
from
|
|
24337
|
+
pg_stat_database, pg_control_system()
|
|
24338
|
+
where
|
|
24339
|
+
datname = current_database()`,
|
|
24340
|
+
12: `select /* pgwatch_generated */
|
|
24341
|
+
(extract(epoch from now()) * 1e9)::int8 as epoch_ns,
|
|
24342
|
+
current_database() as tag_datname,
|
|
24343
|
+
numbackends,
|
|
24344
|
+
xact_commit,
|
|
24345
|
+
xact_rollback,
|
|
24346
|
+
blks_read,
|
|
24347
|
+
blks_hit,
|
|
24348
|
+
tup_returned,
|
|
24349
|
+
tup_fetched,
|
|
24350
|
+
tup_inserted,
|
|
24351
|
+
tup_updated,
|
|
24352
|
+
tup_deleted,
|
|
24353
|
+
conflicts,
|
|
24354
|
+
temp_files,
|
|
24355
|
+
temp_bytes,
|
|
24356
|
+
deadlocks,
|
|
24357
|
+
blk_read_time,
|
|
24358
|
+
blk_write_time,
|
|
24359
|
+
extract(epoch from (now() - pg_postmaster_start_time()))::int8 as postmaster_uptime_s,
|
|
24360
|
+
extract(epoch from (now() - pg_backup_start_time()))::int8 as backup_duration_s,
|
|
24361
|
+
checksum_failures,
|
|
24362
|
+
extract(epoch from (now() - checksum_last_failure))::int8 as checksum_last_failure_s,
|
|
24363
|
+
case when pg_is_in_recovery() then 1 else 0 end as in_recovery_int,
|
|
24364
|
+
system_identifier::text as tag_sys_id,
|
|
24365
|
+
(select count(*) from pg_index i
|
|
24366
|
+
where not indisvalid
|
|
24367
|
+
and not exists ( /* leave out ones that are being actively rebuilt */
|
|
24368
|
+
select * from pg_locks l
|
|
24369
|
+
join pg_stat_activity a using (pid)
|
|
24370
|
+
where l.relation = i.indexrelid
|
|
24371
|
+
and a.state = 'active'
|
|
24372
|
+
and a.query ~* 'concurrently'
|
|
24373
|
+
)) as invalid_indexes
|
|
24374
|
+
from
|
|
24375
|
+
pg_stat_database, pg_control_system()
|
|
24376
|
+
where
|
|
24377
|
+
datname = current_database()`,
|
|
24378
|
+
14: `select /* pgwatch_generated */
|
|
24379
|
+
(extract(epoch from now()) * 1e9)::int8 as epoch_ns,
|
|
24380
|
+
current_database() as tag_datname,
|
|
24381
|
+
numbackends,
|
|
24382
|
+
xact_commit,
|
|
24383
|
+
xact_rollback,
|
|
24384
|
+
blks_read,
|
|
24385
|
+
blks_hit,
|
|
24386
|
+
tup_returned,
|
|
24387
|
+
tup_fetched,
|
|
24388
|
+
tup_inserted,
|
|
24389
|
+
tup_updated,
|
|
24390
|
+
tup_deleted,
|
|
24391
|
+
conflicts,
|
|
24392
|
+
temp_files,
|
|
24393
|
+
temp_bytes,
|
|
24394
|
+
deadlocks,
|
|
24395
|
+
blk_read_time,
|
|
24396
|
+
blk_write_time,
|
|
24397
|
+
extract(epoch from (now() - pg_postmaster_start_time()))::int8 as postmaster_uptime_s,
|
|
24398
|
+
extract(epoch from (now() - pg_backup_start_time()))::int8 as backup_duration_s,
|
|
24399
|
+
checksum_failures,
|
|
24400
|
+
extract(epoch from (now() - checksum_last_failure))::int8 as checksum_last_failure_s,
|
|
24401
|
+
case when pg_is_in_recovery() then 1 else 0 end as in_recovery_int,
|
|
24402
|
+
system_identifier::text as tag_sys_id,
|
|
24403
|
+
session_time::int8,
|
|
24404
|
+
active_time::int8,
|
|
24405
|
+
idle_in_transaction_time::int8,
|
|
24406
|
+
sessions,
|
|
24407
|
+
sessions_abandoned,
|
|
24408
|
+
sessions_fatal,
|
|
24409
|
+
sessions_killed,
|
|
24410
|
+
(select count(*) from pg_index i
|
|
24411
|
+
where not indisvalid
|
|
24412
|
+
and not exists ( /* leave out ones that are being actively rebuilt */
|
|
24413
|
+
select * from pg_locks l
|
|
24414
|
+
join pg_stat_activity a using (pid)
|
|
24415
|
+
where l.relation = i.indexrelid
|
|
24416
|
+
and a.state = 'active'
|
|
24417
|
+
and a.query ~* 'concurrently'
|
|
24418
|
+
)) as invalid_indexes
|
|
24419
|
+
from
|
|
24420
|
+
pg_stat_database, pg_control_system()
|
|
24421
|
+
where
|
|
24422
|
+
datname = current_database()`,
|
|
24423
|
+
15: `select /* pgwatch_generated */
|
|
24424
|
+
(extract(epoch from now()) * 1e9)::int8 as epoch_ns,
|
|
24425
|
+
current_database() as tag_datname,
|
|
24426
|
+
numbackends,
|
|
24427
|
+
xact_commit,
|
|
24428
|
+
xact_rollback,
|
|
24429
|
+
blks_read,
|
|
24430
|
+
blks_hit,
|
|
24431
|
+
tup_returned,
|
|
24432
|
+
tup_fetched,
|
|
24433
|
+
tup_inserted,
|
|
24434
|
+
tup_updated,
|
|
24435
|
+
tup_deleted,
|
|
24436
|
+
conflicts,
|
|
24437
|
+
temp_files,
|
|
24438
|
+
temp_bytes,
|
|
24439
|
+
deadlocks,
|
|
24440
|
+
blk_read_time,
|
|
24441
|
+
blk_write_time,
|
|
24442
|
+
extract(epoch from (now() - pg_postmaster_start_time()))::int8 as postmaster_uptime_s,
|
|
24443
|
+
checksum_failures,
|
|
24444
|
+
extract(epoch from (now() - checksum_last_failure))::int8 as checksum_last_failure_s,
|
|
24445
|
+
case when pg_is_in_recovery() then 1 else 0 end as in_recovery_int,
|
|
24446
|
+
system_identifier::text as tag_sys_id,
|
|
24447
|
+
session_time::int8,
|
|
24448
|
+
active_time::int8,
|
|
24449
|
+
idle_in_transaction_time::int8,
|
|
24450
|
+
sessions,
|
|
24451
|
+
sessions_abandoned,
|
|
24452
|
+
sessions_fatal,
|
|
24453
|
+
sessions_killed,
|
|
24454
|
+
(select count(*) from pg_index i
|
|
24455
|
+
where not indisvalid
|
|
24456
|
+
and not exists ( /* leave out ones that are being actively rebuilt */
|
|
24457
|
+
select * from pg_locks l
|
|
24458
|
+
join pg_stat_activity a using (pid)
|
|
24459
|
+
where l.relation = i.indexrelid
|
|
24460
|
+
and a.state = 'active'
|
|
24461
|
+
and a.query ~* 'concurrently'
|
|
24462
|
+
)) as invalid_indexes
|
|
24463
|
+
from
|
|
24464
|
+
pg_stat_database, pg_control_system()
|
|
24465
|
+
where
|
|
24466
|
+
datname = current_database()`
|
|
24467
|
+
},
|
|
24468
|
+
gauges: ["*"],
|
|
24469
|
+
statement_timeout_seconds: 15
|
|
24470
|
+
},
|
|
24471
|
+
db_size: {
|
|
24472
|
+
description: "Retrieves the size of the current database and the size of the `pg_catalog` schema, providing insights into the storage usage of the database. It returns the size in bytes for both the current database and the catalog schema. This metric helps administrators monitor database size and storage consumption.",
|
|
24473
|
+
sqls: {
|
|
24474
|
+
11: `select /* pgwatch_generated */
|
|
24475
|
+
(extract(epoch from now()) * 1e9)::int8 as epoch_ns,
|
|
24476
|
+
current_database() as tag_datname,
|
|
24477
|
+
pg_database_size(current_database()) as size_b,
|
|
24478
|
+
(select sum(pg_total_relation_size(c.oid))::int8
|
|
24479
|
+
from pg_class c join pg_namespace n on n.oid = c.relnamespace
|
|
24480
|
+
where nspname = 'pg_catalog' and relkind = 'r'
|
|
24481
|
+
) as catalog_size_b`
|
|
24482
|
+
},
|
|
24483
|
+
gauges: ["*"],
|
|
24484
|
+
statement_timeout_seconds: 300
|
|
24485
|
+
},
|
|
24486
|
+
pg_invalid_indexes: {
|
|
24487
|
+
description: "This metric identifies invalid indexes in the database. It provides insights into the number of invalid indexes and their details. This metric helps administrators identify and fix invalid indexes to improve database performance.",
|
|
24488
|
+
sqls: {
|
|
24489
|
+
11: `with fk_indexes as ( /* pgwatch_generated */
|
|
24490
|
+
select
|
|
24491
|
+
schemaname as tag_schema_name,
|
|
24492
|
+
(indexrelid::regclass)::text as tag_index_name,
|
|
24493
|
+
(relid::regclass)::text as tag_table_name,
|
|
24494
|
+
(confrelid::regclass)::text as tag_fk_table_ref,
|
|
24495
|
+
array_to_string(indclass, ', ') as tag_opclasses
|
|
24496
|
+
from
|
|
24497
|
+
pg_stat_all_indexes
|
|
24498
|
+
join pg_index using (indexrelid)
|
|
24499
|
+
left join pg_constraint
|
|
24500
|
+
on array_to_string(indkey, ',') = array_to_string(conkey, ',')
|
|
24501
|
+
and schemaname = (connamespace::regnamespace)::text
|
|
24502
|
+
and conrelid = relid
|
|
24503
|
+
and contype = 'f'
|
|
24504
|
+
where idx_scan = 0
|
|
24505
|
+
and indisunique is false
|
|
24506
|
+
and conkey is not null --conkey is not null then true else false end as is_fk_idx
|
|
24507
|
+
), data as (
|
|
24508
|
+
select
|
|
24509
|
+
pci.relname as tag_index_name,
|
|
24510
|
+
pn.nspname as tag_schema_name,
|
|
24511
|
+
pct.relname as tag_table_name,
|
|
24512
|
+
quote_ident(pn.nspname) as tag_schema_name,
|
|
24513
|
+
quote_ident(pci.relname) as tag_index_name,
|
|
24514
|
+
quote_ident(pct.relname) as tag_table_name,
|
|
24515
|
+
coalesce(nullif(quote_ident(pn.nspname), 'public') || '.', '') || quote_ident(pct.relname) as tag_relation_name,
|
|
24516
|
+
pg_relation_size(pidx.indexrelid) index_size_bytes,
|
|
24517
|
+
((
|
|
24518
|
+
select count(1)
|
|
24519
|
+
from fk_indexes fi
|
|
24520
|
+
where
|
|
24521
|
+
fi.tag_fk_table_ref = pct.relname
|
|
24522
|
+
and fi.tag_opclasses like (array_to_string(pidx.indclass, ', ') || '%')
|
|
24523
|
+
) > 0)::int as supports_fk
|
|
24524
|
+
from pg_index pidx
|
|
24525
|
+
join pg_class as pci on pci.oid = pidx.indexrelid
|
|
24526
|
+
join pg_class as pct on pct.oid = pidx.indrelid
|
|
24527
|
+
left join pg_namespace pn on pn.oid = pct.relnamespace
|
|
24528
|
+
where pidx.indisvalid = false
|
|
24529
|
+
), data_total as (
|
|
24530
|
+
select
|
|
24531
|
+
sum(index_size_bytes) as index_size_bytes_sum
|
|
24532
|
+
from data
|
|
24533
|
+
), num_data as (
|
|
24534
|
+
select
|
|
24535
|
+
row_number() over () num,
|
|
24536
|
+
data.*
|
|
24537
|
+
from data
|
|
24538
|
+
)
|
|
24539
|
+
select
|
|
24540
|
+
(extract(epoch from now()) * 1e9)::int8 as epoch_ns,
|
|
24541
|
+
current_database() as tag_datname,
|
|
24542
|
+
num_data.*
|
|
24543
|
+
from num_data
|
|
24544
|
+
limit 1000;
|
|
24545
|
+
`
|
|
24546
|
+
},
|
|
24547
|
+
gauges: ["*"],
|
|
24548
|
+
statement_timeout_seconds: 15
|
|
24549
|
+
},
|
|
24550
|
+
unused_indexes: {
|
|
24551
|
+
description: "This metric identifies unused indexes in the database. It provides insights into the number of unused indexes and their details. This metric helps administrators identify and fix unused indexes to improve database performance.",
|
|
24552
|
+
sqls: {
|
|
24553
|
+
11: `with fk_indexes as ( /* pgwatch_generated */
|
|
24554
|
+
select
|
|
24555
|
+
n.nspname as schema_name,
|
|
24556
|
+
ci.relname as index_name,
|
|
24557
|
+
cr.relname as table_name,
|
|
24558
|
+
(confrelid::regclass)::text as fk_table_ref,
|
|
24559
|
+
array_to_string(indclass, ', ') as opclasses
|
|
24560
|
+
from pg_index i
|
|
24561
|
+
join pg_class ci on ci.oid = i.indexrelid and ci.relkind = 'i'
|
|
24562
|
+
join pg_class cr on cr.oid = i.indrelid and cr.relkind = 'r'
|
|
24563
|
+
join pg_namespace n on n.oid = ci.relnamespace
|
|
24564
|
+
join pg_constraint cn on cn.conrelid = cr.oid
|
|
24565
|
+
left join pg_stat_all_indexes as si on si.indexrelid = i.indexrelid
|
|
24566
|
+
where
|
|
24567
|
+
contype = 'f'
|
|
24568
|
+
and i.indisunique is false
|
|
24569
|
+
and conkey is not null
|
|
24570
|
+
and ci.relpages > 5
|
|
24571
|
+
and si.idx_scan < 10
|
|
24572
|
+
), table_scans as (
|
|
24573
|
+
select relid,
|
|
24574
|
+
tables.idx_scan + tables.seq_scan as all_scans,
|
|
24575
|
+
( tables.n_tup_ins + tables.n_tup_upd + tables.n_tup_del ) as writes,
|
|
24576
|
+
pg_relation_size(relid) as table_size
|
|
24577
|
+
from pg_stat_all_tables as tables
|
|
24578
|
+
join pg_class c on c.oid = relid
|
|
24579
|
+
where c.relpages > 5
|
|
24580
|
+
), indexes as (
|
|
24581
|
+
select
|
|
24582
|
+
i.indrelid,
|
|
24583
|
+
i.indexrelid,
|
|
24584
|
+
n.nspname as schema_name,
|
|
24585
|
+
cr.relname as table_name,
|
|
24586
|
+
ci.relname as index_name,
|
|
24587
|
+
si.idx_scan,
|
|
24588
|
+
pg_relation_size(i.indexrelid) as index_bytes,
|
|
24589
|
+
ci.relpages,
|
|
24590
|
+
(case when a.amname = 'btree' then true else false end) as idx_is_btree,
|
|
24591
|
+
array_to_string(i.indclass, ', ') as opclasses
|
|
24592
|
+
from pg_index i
|
|
24593
|
+
join pg_class ci on ci.oid = i.indexrelid and ci.relkind = 'i'
|
|
24594
|
+
join pg_class cr on cr.oid = i.indrelid and cr.relkind = 'r'
|
|
24595
|
+
join pg_namespace n on n.oid = ci.relnamespace
|
|
24596
|
+
join pg_am a on ci.relam = a.oid
|
|
24597
|
+
left join pg_stat_all_indexes as si on si.indexrelid = i.indexrelid
|
|
24598
|
+
where
|
|
24599
|
+
i.indisunique = false
|
|
24600
|
+
and i.indisvalid = true
|
|
24601
|
+
and ci.relpages > 5
|
|
24602
|
+
), index_ratios as (
|
|
24603
|
+
select
|
|
24604
|
+
i.indexrelid as index_id,
|
|
24605
|
+
i.schema_name,
|
|
24606
|
+
i.table_name,
|
|
24607
|
+
i.index_name,
|
|
24608
|
+
idx_scan,
|
|
24609
|
+
all_scans,
|
|
24610
|
+
round(( case when all_scans = 0 then 0.0::numeric
|
|
24611
|
+
else idx_scan::numeric/all_scans * 100 end), 2) as index_scan_pct,
|
|
24612
|
+
writes,
|
|
24613
|
+
round((case when writes = 0 then idx_scan::numeric else idx_scan::numeric/writes end), 2)
|
|
24614
|
+
as scans_per_write,
|
|
24615
|
+
index_bytes as index_size_bytes,
|
|
24616
|
+
table_size as table_size_bytes,
|
|
24617
|
+
i.relpages,
|
|
24618
|
+
idx_is_btree,
|
|
24619
|
+
i.opclasses,
|
|
24620
|
+
(
|
|
24621
|
+
select count(1)
|
|
24622
|
+
from fk_indexes fi
|
|
24623
|
+
where fi.fk_table_ref = i.table_name
|
|
24624
|
+
and fi.schema_name = i.schema_name
|
|
24625
|
+
and fi.opclasses like (i.opclasses || '%')
|
|
24626
|
+
) > 0 as supports_fk
|
|
24627
|
+
from indexes i
|
|
24628
|
+
join table_scans ts on ts.relid = i.indrelid
|
|
24629
|
+
)
|
|
24630
|
+
select
|
|
24631
|
+
'Never Used Indexes' as tag_reason,
|
|
24632
|
+
current_database() as tag_datname,
|
|
24633
|
+
index_id,
|
|
24634
|
+
schema_name as tag_schema_name,
|
|
24635
|
+
table_name as tag_table_name,
|
|
24636
|
+
index_name as tag_index_name,
|
|
24637
|
+
pg_get_indexdef(index_id) as index_definition,
|
|
24638
|
+
idx_scan,
|
|
24639
|
+
all_scans,
|
|
24640
|
+
index_scan_pct,
|
|
24641
|
+
writes,
|
|
24642
|
+
scans_per_write,
|
|
24643
|
+
index_size_bytes,
|
|
24644
|
+
table_size_bytes,
|
|
24645
|
+
relpages,
|
|
24646
|
+
idx_is_btree,
|
|
24647
|
+
opclasses as tag_opclasses,
|
|
24648
|
+
supports_fk
|
|
24649
|
+
from index_ratios
|
|
24650
|
+
where
|
|
24651
|
+
idx_scan = 0
|
|
24652
|
+
and idx_is_btree
|
|
24653
|
+
order by index_size_bytes desc
|
|
24654
|
+
limit 1000;
|
|
24655
|
+
`
|
|
24656
|
+
},
|
|
24657
|
+
gauges: ["*"],
|
|
24658
|
+
statement_timeout_seconds: 15
|
|
24659
|
+
},
|
|
24660
|
+
redundant_indexes: {
|
|
24661
|
+
description: "This metric identifies redundant indexes that can potentially be dropped to save storage space and improve write performance. It analyzes index relationships and finds indexes that are covered by other indexes, considering column order, operator classes, and foreign key constraints. Uses the exact logic from tmp.sql with JSON aggregation and proper thresholds.",
|
|
24662
|
+
sqls: {
|
|
24663
|
+
11: `with fk_indexes as ( /* pgwatch_generated */
|
|
24664
|
+
select
|
|
24665
|
+
n.nspname as schema_name,
|
|
24666
|
+
ci.relname as index_name,
|
|
24667
|
+
cr.relname as table_name,
|
|
24668
|
+
(confrelid::regclass)::text as fk_table_ref,
|
|
24669
|
+
array_to_string(indclass, ', ') as opclasses
|
|
24670
|
+
from pg_index i
|
|
24671
|
+
join pg_class ci on ci.oid = i.indexrelid and ci.relkind = 'i'
|
|
24672
|
+
join pg_class cr on cr.oid = i.indrelid and cr.relkind = 'r'
|
|
24673
|
+
join pg_namespace n on n.oid = ci.relnamespace
|
|
24674
|
+
join pg_constraint cn on cn.conrelid = cr.oid
|
|
24675
|
+
left join pg_stat_all_indexes as si on si.indexrelid = i.indexrelid
|
|
24676
|
+
where
|
|
24677
|
+
contype = 'f'
|
|
24678
|
+
and i.indisunique is false
|
|
24679
|
+
and conkey is not null
|
|
24680
|
+
and ci.relpages > 5
|
|
24681
|
+
and si.idx_scan < 10
|
|
24682
|
+
),
|
|
24683
|
+
-- Redundant indexes
|
|
24684
|
+
index_data as (
|
|
24685
|
+
select
|
|
24686
|
+
*,
|
|
24687
|
+
indkey::text as columns,
|
|
24688
|
+
array_to_string(indclass, ', ') as opclasses
|
|
24689
|
+
from pg_index i
|
|
24690
|
+
join pg_class ci on ci.oid = i.indexrelid and ci.relkind = 'i'
|
|
24691
|
+
where indisvalid = true and ci.relpages > 5
|
|
24692
|
+
), redundant_indexes as (
|
|
24693
|
+
select
|
|
24694
|
+
i2.indexrelid as index_id,
|
|
24695
|
+
tnsp.nspname as schema_name,
|
|
24696
|
+
trel.relname as table_name,
|
|
24697
|
+
pg_relation_size(trel.oid) as table_size_bytes,
|
|
24698
|
+
irel.relname as index_name,
|
|
24699
|
+
am1.amname as access_method,
|
|
24700
|
+
(i1.indexrelid::regclass)::text as reason,
|
|
24701
|
+
i1.indexrelid as reason_index_id,
|
|
24702
|
+
pg_get_indexdef(i1.indexrelid) main_index_def,
|
|
24703
|
+
pg_relation_size(i1.indexrelid) main_index_size_bytes,
|
|
24704
|
+
pg_get_indexdef(i2.indexrelid) index_def,
|
|
24705
|
+
pg_relation_size(i2.indexrelid) index_size_bytes,
|
|
24706
|
+
s.idx_scan as index_usage,
|
|
24707
|
+
quote_ident(tnsp.nspname) as formated_schema_name,
|
|
24708
|
+
coalesce(nullif(quote_ident(tnsp.nspname), 'public') || '.', '') || quote_ident(irel.relname) as formated_index_name,
|
|
24709
|
+
quote_ident(trel.relname) as formated_table_name,
|
|
24710
|
+
coalesce(nullif(quote_ident(tnsp.nspname), 'public') || '.', '') || quote_ident(trel.relname) as formated_relation_name,
|
|
24711
|
+
i2.opclasses
|
|
24712
|
+
from (
|
|
24713
|
+
select indrelid, indexrelid, opclasses, indclass, indexprs, indpred, indisprimary, indisunique, columns
|
|
24714
|
+
from index_data
|
|
24715
|
+
order by indexrelid
|
|
24716
|
+
) as i1
|
|
24717
|
+
join index_data as i2 on (
|
|
24718
|
+
i1.indrelid = i2.indrelid -- same table
|
|
24719
|
+
and i1.indexrelid <> i2.indexrelid -- NOT same index
|
|
24720
|
+
)
|
|
24721
|
+
inner join pg_opclass op1 on i1.indclass[0] = op1.oid
|
|
24722
|
+
inner join pg_opclass op2 on i2.indclass[0] = op2.oid
|
|
24723
|
+
inner join pg_am am1 on op1.opcmethod = am1.oid
|
|
24724
|
+
inner join pg_am am2 on op2.opcmethod = am2.oid
|
|
24725
|
+
join pg_stat_all_indexes as s on s.indexrelid = i2.indexrelid
|
|
24726
|
+
join pg_class as trel on trel.oid = i2.indrelid
|
|
24727
|
+
join pg_namespace as tnsp on trel.relnamespace = tnsp.oid
|
|
24728
|
+
join pg_class as irel on irel.oid = i2.indexrelid
|
|
24729
|
+
where
|
|
24730
|
+
not i2.indisprimary -- index 1 is not primary
|
|
24731
|
+
and not i2.indisunique -- index 1 is not unique (unique indexes serve constraint purpose)
|
|
24732
|
+
and am1.amname = am2.amname -- same access type
|
|
24733
|
+
and i1.columns like (i2.columns || '%') -- index 2 includes all columns from index 1
|
|
24734
|
+
and i1.opclasses like (i2.opclasses || '%')
|
|
24735
|
+
-- index expressions is same
|
|
24736
|
+
and pg_get_expr(i1.indexprs, i1.indrelid) is not distinct from pg_get_expr(i2.indexprs, i2.indrelid)
|
|
24737
|
+
-- index predicates is same
|
|
24738
|
+
and pg_get_expr(i1.indpred, i1.indrelid) is not distinct from pg_get_expr(i2.indpred, i2.indrelid)
|
|
24739
|
+
), redundant_indexes_fk as (
|
|
24740
|
+
select
|
|
24741
|
+
ri.*,
|
|
24742
|
+
((
|
|
24743
|
+
select count(1)
|
|
24744
|
+
from fk_indexes fi
|
|
24745
|
+
where
|
|
24746
|
+
fi.fk_table_ref = ri.table_name
|
|
24747
|
+
and fi.opclasses like (ri.opclasses || '%')
|
|
24748
|
+
) > 0)::int as supports_fk
|
|
24749
|
+
from redundant_indexes ri
|
|
24750
|
+
),
|
|
24751
|
+
-- Cut recursive links
|
|
24752
|
+
redundant_indexes_tmp_num as (
|
|
24753
|
+
select row_number() over () num, rig.*
|
|
24754
|
+
from redundant_indexes_fk rig
|
|
24755
|
+
), redundant_indexes_tmp_links as (
|
|
24756
|
+
select
|
|
24757
|
+
ri1.*,
|
|
24758
|
+
ri2.num as r_num
|
|
24759
|
+
from redundant_indexes_tmp_num ri1
|
|
24760
|
+
left join redundant_indexes_tmp_num ri2 on ri2.reason_index_id = ri1.index_id and ri1.reason_index_id = ri2.index_id
|
|
24761
|
+
), redundant_indexes_tmp_cut as (
|
|
24762
|
+
select
|
|
24763
|
+
*
|
|
24764
|
+
from redundant_indexes_tmp_links
|
|
24765
|
+
where num < r_num or r_num is null
|
|
24766
|
+
), redundant_indexes_cut_grouped as (
|
|
24767
|
+
select
|
|
24768
|
+
distinct(num),
|
|
24769
|
+
*
|
|
24770
|
+
from redundant_indexes_tmp_cut
|
|
24771
|
+
order by index_size_bytes desc
|
|
24772
|
+
), redundant_indexes_grouped as (
|
|
24773
|
+
select
|
|
24774
|
+
index_id,
|
|
24775
|
+
schema_name as tag_schema_name,
|
|
24776
|
+
table_name,
|
|
24777
|
+
table_size_bytes,
|
|
24778
|
+
index_name as tag_index_name,
|
|
24779
|
+
access_method as tag_access_method,
|
|
24780
|
+
string_agg(distinct reason, ', ') as tag_reason,
|
|
24781
|
+
index_size_bytes,
|
|
24782
|
+
index_usage,
|
|
24783
|
+
index_def as index_definition,
|
|
24784
|
+
formated_index_name as tag_index_name,
|
|
24785
|
+
formated_schema_name as tag_schema_name,
|
|
24786
|
+
formated_table_name as tag_table_name,
|
|
24787
|
+
formated_relation_name as tag_relation_name,
|
|
24788
|
+
supports_fk::int as supports_fk,
|
|
24789
|
+
json_agg(
|
|
24790
|
+
distinct jsonb_build_object(
|
|
24791
|
+
'index_name', reason,
|
|
24792
|
+
'index_definition', main_index_def,
|
|
24793
|
+
'index_size_bytes', main_index_size_bytes
|
|
24794
|
+
)
|
|
24795
|
+
)::text as redundant_to_json
|
|
24796
|
+
from redundant_indexes_cut_grouped
|
|
24797
|
+
group by
|
|
24798
|
+
index_id,
|
|
24799
|
+
table_size_bytes,
|
|
24800
|
+
schema_name,
|
|
24801
|
+
table_name,
|
|
24802
|
+
index_name,
|
|
24803
|
+
access_method,
|
|
24804
|
+
index_def,
|
|
24805
|
+
index_size_bytes,
|
|
24806
|
+
index_usage,
|
|
24807
|
+
formated_index_name,
|
|
24808
|
+
formated_schema_name,
|
|
24809
|
+
formated_table_name,
|
|
24810
|
+
formated_relation_name,
|
|
24811
|
+
supports_fk
|
|
24812
|
+
order by index_size_bytes desc
|
|
24813
|
+
)
|
|
24814
|
+
select * from redundant_indexes_grouped
|
|
24815
|
+
limit 1000;
|
|
24816
|
+
`
|
|
24817
|
+
},
|
|
24818
|
+
gauges: ["*"],
|
|
24819
|
+
statement_timeout_seconds: 15
|
|
24820
|
+
},
|
|
24821
|
+
stats_reset: {
|
|
24822
|
+
description: "This metric tracks when statistics were last reset at the database level. It provides visibility into the freshness of statistics data, which is essential for understanding the reliability of usage metrics. A recent reset time indicates that usage statistics may not reflect long-term patterns. Note that Postgres tracks stats resets at the database level, not per-index or per-table.",
|
|
24823
|
+
sqls: {
|
|
24824
|
+
11: `select /* pgwatch_generated */
|
|
24825
|
+
datname as tag_database_name,
|
|
24826
|
+
extract(epoch from stats_reset)::int as stats_reset_epoch,
|
|
24827
|
+
extract(epoch from now() - stats_reset)::int as seconds_since_reset
|
|
24828
|
+
from pg_stat_database
|
|
24829
|
+
where datname = current_database()
|
|
24830
|
+
and stats_reset is not null;
|
|
24831
|
+
`
|
|
24832
|
+
},
|
|
24833
|
+
gauges: ["stats_reset_epoch", "seconds_since_reset"],
|
|
24834
|
+
statement_timeout_seconds: 15
|
|
24835
|
+
}
|
|
24836
|
+
};
|
|
24837
|
+
|
|
24838
|
+
// lib/metrics-loader.ts
|
|
24839
|
+
function getMetricSql(metricName, pgMajorVersion = 16) {
|
|
24840
|
+
const metric = METRICS[metricName];
|
|
24841
|
+
if (!metric) {
|
|
24842
|
+
throw new Error(`Metric "${metricName}" not found. Available metrics: ${Object.keys(METRICS).join(", ")}`);
|
|
24843
|
+
}
|
|
24844
|
+
const availableVersions = Object.keys(metric.sqls).map((v) => parseInt(v, 10)).sort((a, b) => b - a);
|
|
24845
|
+
const matchingVersion = availableVersions.find((v) => v <= pgMajorVersion);
|
|
24846
|
+
if (matchingVersion === undefined) {
|
|
24847
|
+
throw new Error(`No compatible SQL version for metric "${metricName}" with PostgreSQL ${pgMajorVersion}. ` + `Available versions: ${availableVersions.join(", ")}`);
|
|
24848
|
+
}
|
|
24849
|
+
return metric.sqls[matchingVersion];
|
|
24850
|
+
}
|
|
24851
|
+
var METRIC_NAMES = {
|
|
24852
|
+
H001: "pg_invalid_indexes",
|
|
24853
|
+
H002: "unused_indexes",
|
|
24854
|
+
H004: "redundant_indexes",
|
|
24855
|
+
settings: "settings",
|
|
24856
|
+
dbStats: "db_stats",
|
|
24857
|
+
dbSize: "db_size",
|
|
24858
|
+
statsReset: "stats_reset"
|
|
24859
|
+
};
|
|
24860
|
+
function transformMetricRow(row) {
|
|
24861
|
+
const result = {};
|
|
24862
|
+
for (const [key, value] of Object.entries(row)) {
|
|
24863
|
+
if (key === "epoch_ns" || key === "num" || key === "tag_datname") {
|
|
24864
|
+
continue;
|
|
24865
|
+
}
|
|
24866
|
+
const newKey = key.startsWith("tag_") ? key.slice(4) : key;
|
|
24867
|
+
result[newKey] = value;
|
|
24868
|
+
}
|
|
24869
|
+
return result;
|
|
24870
|
+
}
|
|
24871
|
+
|
|
24872
|
+
// lib/checkup.ts
|
|
24873
|
+
var __dirname = "/builds/postgres-ai/postgres_ai/cli/lib";
|
|
24874
|
+
var SECONDS_PER_DAY = 86400;
|
|
24875
|
+
var SECONDS_PER_HOUR = 3600;
|
|
24876
|
+
var SECONDS_PER_MINUTE = 60;
|
|
24877
|
+
function toBool(val) {
|
|
24878
|
+
return val === true || val === 1 || val === "t" || val === "true";
|
|
24879
|
+
}
|
|
24880
|
+
function parseVersionNum(versionNum) {
|
|
24881
|
+
if (!versionNum || versionNum.length < 6) {
|
|
24882
|
+
return { major: "", minor: "" };
|
|
24883
|
+
}
|
|
24884
|
+
try {
|
|
24885
|
+
const num = parseInt(versionNum, 10);
|
|
24886
|
+
return {
|
|
24887
|
+
major: Math.floor(num / 1e4).toString(),
|
|
24888
|
+
minor: (num % 1e4).toString()
|
|
24889
|
+
};
|
|
24890
|
+
} catch (err) {
|
|
24891
|
+
const errorMsg = err instanceof Error ? err.message : String(err);
|
|
24892
|
+
console.log(`[parseVersionNum] Warning: Failed to parse "${versionNum}": ${errorMsg}`);
|
|
24893
|
+
return { major: "", minor: "" };
|
|
24894
|
+
}
|
|
24895
|
+
}
|
|
24896
|
+
function formatBytes(bytes) {
|
|
24897
|
+
if (bytes === 0)
|
|
24898
|
+
return "0 B";
|
|
24899
|
+
if (bytes < 0)
|
|
24900
|
+
return `-${formatBytes(-bytes)}`;
|
|
24901
|
+
if (!Number.isFinite(bytes))
|
|
24902
|
+
return `${bytes} B`;
|
|
24903
|
+
const units = ["B", "KiB", "MiB", "GiB", "TiB", "PiB"];
|
|
24904
|
+
const i2 = Math.min(Math.floor(Math.log(bytes) / Math.log(1024)), units.length - 1);
|
|
24905
|
+
return `${(bytes / Math.pow(1024, i2)).toFixed(2)} ${units[i2]}`;
|
|
24906
|
+
}
|
|
24907
|
+
function formatSettingPrettyValue(settingNormalized, unitNormalized, rawValue) {
|
|
24908
|
+
if (settingNormalized === null || unitNormalized === null) {
|
|
24909
|
+
return rawValue;
|
|
24910
|
+
}
|
|
24911
|
+
if (unitNormalized === "bytes") {
|
|
24912
|
+
return formatBytes(settingNormalized);
|
|
24913
|
+
}
|
|
24914
|
+
if (unitNormalized === "seconds") {
|
|
24915
|
+
const MS_PER_SECOND = 1000;
|
|
24916
|
+
if (settingNormalized < 1) {
|
|
24917
|
+
return `${(settingNormalized * MS_PER_SECOND).toFixed(0)} ms`;
|
|
24918
|
+
} else if (settingNormalized < SECONDS_PER_MINUTE) {
|
|
24919
|
+
return `${settingNormalized} s`;
|
|
24920
|
+
} else {
|
|
24921
|
+
return `${(settingNormalized / SECONDS_PER_MINUTE).toFixed(1)} min`;
|
|
24922
|
+
}
|
|
24923
|
+
}
|
|
24924
|
+
return rawValue;
|
|
24925
|
+
}
|
|
24926
|
+
async function getPostgresVersion(client) {
|
|
24927
|
+
const result = await client.query(`
|
|
24928
|
+
select name, setting
|
|
24929
|
+
from pg_settings
|
|
24930
|
+
where name in ('server_version', 'server_version_num')
|
|
24931
|
+
`);
|
|
24932
|
+
let version3 = "";
|
|
24933
|
+
let serverVersionNum = "";
|
|
24934
|
+
for (const row of result.rows) {
|
|
24935
|
+
if (row.name === "server_version") {
|
|
24936
|
+
version3 = row.setting;
|
|
24937
|
+
} else if (row.name === "server_version_num") {
|
|
24938
|
+
serverVersionNum = row.setting;
|
|
24939
|
+
}
|
|
24940
|
+
}
|
|
24941
|
+
const { major, minor } = parseVersionNum(serverVersionNum);
|
|
24942
|
+
return {
|
|
24943
|
+
version: version3,
|
|
24944
|
+
server_version_num: serverVersionNum,
|
|
24945
|
+
server_major_ver: major,
|
|
24946
|
+
server_minor_ver: minor
|
|
24947
|
+
};
|
|
24948
|
+
}
|
|
24949
|
+
async function getSettings(client, pgMajorVersion = 16) {
|
|
24950
|
+
const sql = getMetricSql(METRIC_NAMES.settings, pgMajorVersion);
|
|
24951
|
+
const result = await client.query(sql);
|
|
24952
|
+
const settings = {};
|
|
24953
|
+
for (const row of result.rows) {
|
|
24954
|
+
const name = row.tag_setting_name;
|
|
24955
|
+
const settingValue = row.tag_setting_value;
|
|
24956
|
+
const unit = row.tag_unit || "";
|
|
24957
|
+
const category = row.tag_category || "";
|
|
24958
|
+
const vartype = row.tag_vartype || "";
|
|
24959
|
+
const settingNormalized = row.setting_normalized !== null ? parseFloat(row.setting_normalized) : null;
|
|
24960
|
+
const unitNormalized = row.unit_normalized || null;
|
|
24961
|
+
settings[name] = {
|
|
24962
|
+
setting: settingValue,
|
|
24963
|
+
unit,
|
|
24964
|
+
category,
|
|
24965
|
+
context: "",
|
|
24966
|
+
vartype,
|
|
24967
|
+
pretty_value: formatSettingPrettyValue(settingNormalized, unitNormalized, settingValue)
|
|
24968
|
+
};
|
|
24969
|
+
}
|
|
24970
|
+
return settings;
|
|
24971
|
+
}
|
|
24972
|
+
async function getAlteredSettings(client, pgMajorVersion = 16) {
|
|
24973
|
+
const sql = getMetricSql(METRIC_NAMES.settings, pgMajorVersion);
|
|
24974
|
+
const result = await client.query(sql);
|
|
24975
|
+
const settings = {};
|
|
24976
|
+
for (const row of result.rows) {
|
|
24977
|
+
if (!toBool(row.is_default)) {
|
|
24978
|
+
const name = row.tag_setting_name;
|
|
24979
|
+
const settingValue = row.tag_setting_value;
|
|
24980
|
+
const unit = row.tag_unit || "";
|
|
24981
|
+
const category = row.tag_category || "";
|
|
24982
|
+
const settingNormalized = row.setting_normalized !== null ? parseFloat(row.setting_normalized) : null;
|
|
24983
|
+
const unitNormalized = row.unit_normalized || null;
|
|
24984
|
+
settings[name] = {
|
|
24985
|
+
value: settingValue,
|
|
24986
|
+
unit,
|
|
24987
|
+
category,
|
|
24988
|
+
pretty_value: formatSettingPrettyValue(settingNormalized, unitNormalized, settingValue)
|
|
24989
|
+
};
|
|
24990
|
+
}
|
|
24991
|
+
}
|
|
24992
|
+
return settings;
|
|
24993
|
+
}
|
|
24994
|
+
async function getDatabaseSizes(client) {
|
|
24995
|
+
const result = await client.query(`
|
|
24996
|
+
select
|
|
24997
|
+
datname,
|
|
24998
|
+
pg_database_size(datname) as size_bytes
|
|
24999
|
+
from pg_database
|
|
25000
|
+
where datistemplate = false
|
|
25001
|
+
order by size_bytes desc
|
|
25002
|
+
`);
|
|
25003
|
+
const sizes = {};
|
|
25004
|
+
for (const row of result.rows) {
|
|
25005
|
+
sizes[row.datname] = parseInt(row.size_bytes, 10);
|
|
25006
|
+
}
|
|
25007
|
+
return sizes;
|
|
25008
|
+
}
|
|
25009
|
+
async function getClusterInfo(client, pgMajorVersion = 16) {
|
|
25010
|
+
const info = {};
|
|
25011
|
+
const dbStatsSql = getMetricSql(METRIC_NAMES.dbStats, pgMajorVersion);
|
|
25012
|
+
const statsResult = await client.query(dbStatsSql);
|
|
25013
|
+
if (statsResult.rows.length > 0) {
|
|
25014
|
+
const stats = statsResult.rows[0];
|
|
25015
|
+
info.total_connections = {
|
|
25016
|
+
value: String(stats.numbackends || 0),
|
|
25017
|
+
unit: "connections",
|
|
25018
|
+
description: "Current database connections"
|
|
25019
|
+
};
|
|
25020
|
+
info.total_commits = {
|
|
25021
|
+
value: String(stats.xact_commit || 0),
|
|
25022
|
+
unit: "transactions",
|
|
25023
|
+
description: "Total committed transactions"
|
|
25024
|
+
};
|
|
25025
|
+
info.total_rollbacks = {
|
|
25026
|
+
value: String(stats.xact_rollback || 0),
|
|
25027
|
+
unit: "transactions",
|
|
25028
|
+
description: "Total rolled back transactions"
|
|
25029
|
+
};
|
|
25030
|
+
const blocksHit = parseInt(stats.blks_hit || "0", 10);
|
|
25031
|
+
const blocksRead = parseInt(stats.blks_read || "0", 10);
|
|
25032
|
+
const totalBlocks = blocksHit + blocksRead;
|
|
25033
|
+
const cacheHitRatio = totalBlocks > 0 ? (blocksHit / totalBlocks * 100).toFixed(2) : "0.00";
|
|
25034
|
+
info.cache_hit_ratio = {
|
|
25035
|
+
value: cacheHitRatio,
|
|
25036
|
+
unit: "%",
|
|
25037
|
+
description: "Buffer cache hit ratio"
|
|
25038
|
+
};
|
|
25039
|
+
info.blocks_read = {
|
|
25040
|
+
value: String(blocksRead),
|
|
25041
|
+
unit: "blocks",
|
|
25042
|
+
description: "Total disk blocks read"
|
|
25043
|
+
};
|
|
25044
|
+
info.blocks_hit = {
|
|
25045
|
+
value: String(blocksHit),
|
|
25046
|
+
unit: "blocks",
|
|
25047
|
+
description: "Total buffer cache hits"
|
|
25048
|
+
};
|
|
25049
|
+
info.tuples_returned = {
|
|
25050
|
+
value: String(stats.tup_returned || 0),
|
|
25051
|
+
unit: "rows",
|
|
25052
|
+
description: "Total rows returned by queries"
|
|
25053
|
+
};
|
|
25054
|
+
info.tuples_fetched = {
|
|
25055
|
+
value: String(stats.tup_fetched || 0),
|
|
25056
|
+
unit: "rows",
|
|
25057
|
+
description: "Total rows fetched by queries"
|
|
25058
|
+
};
|
|
25059
|
+
info.tuples_inserted = {
|
|
25060
|
+
value: String(stats.tup_inserted || 0),
|
|
25061
|
+
unit: "rows",
|
|
25062
|
+
description: "Total rows inserted"
|
|
25063
|
+
};
|
|
25064
|
+
info.tuples_updated = {
|
|
25065
|
+
value: String(stats.tup_updated || 0),
|
|
25066
|
+
unit: "rows",
|
|
25067
|
+
description: "Total rows updated"
|
|
25068
|
+
};
|
|
25069
|
+
info.tuples_deleted = {
|
|
25070
|
+
value: String(stats.tup_deleted || 0),
|
|
25071
|
+
unit: "rows",
|
|
25072
|
+
description: "Total rows deleted"
|
|
25073
|
+
};
|
|
25074
|
+
info.total_deadlocks = {
|
|
25075
|
+
value: String(stats.deadlocks || 0),
|
|
25076
|
+
unit: "deadlocks",
|
|
25077
|
+
description: "Total deadlocks detected"
|
|
25078
|
+
};
|
|
25079
|
+
info.temp_files_created = {
|
|
25080
|
+
value: String(stats.temp_files || 0),
|
|
25081
|
+
unit: "files",
|
|
25082
|
+
description: "Total temporary files created"
|
|
25083
|
+
};
|
|
25084
|
+
const tempBytes = parseInt(stats.temp_bytes || "0", 10);
|
|
25085
|
+
info.temp_bytes_written = {
|
|
25086
|
+
value: formatBytes(tempBytes),
|
|
25087
|
+
unit: "bytes",
|
|
25088
|
+
description: "Total temporary file bytes written"
|
|
25089
|
+
};
|
|
25090
|
+
if (stats.postmaster_uptime_s) {
|
|
25091
|
+
const uptimeSeconds = parseInt(stats.postmaster_uptime_s, 10);
|
|
25092
|
+
const days = Math.floor(uptimeSeconds / SECONDS_PER_DAY);
|
|
25093
|
+
const hours = Math.floor(uptimeSeconds % SECONDS_PER_DAY / SECONDS_PER_HOUR);
|
|
25094
|
+
const minutes = Math.floor(uptimeSeconds % SECONDS_PER_HOUR / SECONDS_PER_MINUTE);
|
|
25095
|
+
info.uptime = {
|
|
25096
|
+
value: `${days} days ${hours}:${String(minutes).padStart(2, "0")}:${String(uptimeSeconds % SECONDS_PER_MINUTE).padStart(2, "0")}`,
|
|
25097
|
+
unit: "interval",
|
|
25098
|
+
description: "Server uptime"
|
|
25099
|
+
};
|
|
25100
|
+
}
|
|
25101
|
+
}
|
|
25102
|
+
const connResult = await client.query(`
|
|
25103
|
+
select
|
|
25104
|
+
coalesce(state, 'null') as state,
|
|
25105
|
+
count(*) as count
|
|
25106
|
+
from pg_stat_activity
|
|
25107
|
+
group by state
|
|
25108
|
+
`);
|
|
25109
|
+
for (const row of connResult.rows) {
|
|
25110
|
+
const stateKey = `connections_${row.state.replace(/\s+/g, "_")}`;
|
|
25111
|
+
info[stateKey] = {
|
|
25112
|
+
value: String(row.count),
|
|
25113
|
+
unit: "connections",
|
|
25114
|
+
description: `Connections in '${row.state}' state`
|
|
25115
|
+
};
|
|
25116
|
+
}
|
|
25117
|
+
const uptimeResult = await client.query(`
|
|
25118
|
+
select
|
|
25119
|
+
pg_postmaster_start_time() as start_time,
|
|
25120
|
+
current_timestamp - pg_postmaster_start_time() as uptime
|
|
25121
|
+
`);
|
|
25122
|
+
if (uptimeResult.rows.length > 0) {
|
|
25123
|
+
const uptime = uptimeResult.rows[0];
|
|
25124
|
+
const startTime = uptime.start_time instanceof Date ? uptime.start_time.toISOString() : String(uptime.start_time);
|
|
25125
|
+
info.start_time = {
|
|
25126
|
+
value: startTime,
|
|
25127
|
+
unit: "timestamp",
|
|
25128
|
+
description: "PostgreSQL server start time"
|
|
25129
|
+
};
|
|
25130
|
+
if (!info.uptime) {
|
|
25131
|
+
info.uptime = {
|
|
25132
|
+
value: String(uptime.uptime),
|
|
25133
|
+
unit: "interval",
|
|
25134
|
+
description: "Server uptime"
|
|
25135
|
+
};
|
|
25136
|
+
}
|
|
25137
|
+
}
|
|
25138
|
+
return info;
|
|
25139
|
+
}
|
|
25140
|
+
async function getInvalidIndexes(client, pgMajorVersion = 16) {
|
|
25141
|
+
const sql = getMetricSql(METRIC_NAMES.H001, pgMajorVersion);
|
|
25142
|
+
const result = await client.query(sql);
|
|
25143
|
+
return result.rows.map((row) => {
|
|
25144
|
+
const transformed = transformMetricRow(row);
|
|
25145
|
+
const indexSizeBytes = parseInt(String(transformed.index_size_bytes || 0), 10);
|
|
25146
|
+
return {
|
|
25147
|
+
schema_name: String(transformed.schema_name || ""),
|
|
25148
|
+
table_name: String(transformed.table_name || ""),
|
|
25149
|
+
index_name: String(transformed.index_name || ""),
|
|
25150
|
+
relation_name: String(transformed.relation_name || ""),
|
|
25151
|
+
index_size_bytes: indexSizeBytes,
|
|
25152
|
+
index_size_pretty: formatBytes(indexSizeBytes),
|
|
25153
|
+
supports_fk: toBool(transformed.supports_fk)
|
|
25154
|
+
};
|
|
25155
|
+
});
|
|
25156
|
+
}
|
|
25157
|
+
async function getUnusedIndexes(client, pgMajorVersion = 16) {
|
|
25158
|
+
const sql = getMetricSql(METRIC_NAMES.H002, pgMajorVersion);
|
|
25159
|
+
const result = await client.query(sql);
|
|
25160
|
+
return result.rows.map((row) => {
|
|
25161
|
+
const transformed = transformMetricRow(row);
|
|
25162
|
+
const indexSizeBytes = parseInt(String(transformed.index_size_bytes || 0), 10);
|
|
25163
|
+
return {
|
|
25164
|
+
schema_name: String(transformed.schema_name || ""),
|
|
25165
|
+
table_name: String(transformed.table_name || ""),
|
|
25166
|
+
index_name: String(transformed.index_name || ""),
|
|
25167
|
+
index_definition: String(transformed.index_definition || ""),
|
|
25168
|
+
reason: String(transformed.reason || ""),
|
|
25169
|
+
idx_scan: parseInt(String(transformed.idx_scan || 0), 10),
|
|
25170
|
+
index_size_bytes: indexSizeBytes,
|
|
25171
|
+
idx_is_btree: toBool(transformed.idx_is_btree),
|
|
25172
|
+
supports_fk: toBool(transformed.supports_fk),
|
|
25173
|
+
index_size_pretty: formatBytes(indexSizeBytes)
|
|
25174
|
+
};
|
|
25175
|
+
});
|
|
25176
|
+
}
|
|
25177
|
+
async function getStatsReset(client, pgMajorVersion = 16) {
|
|
25178
|
+
const sql = getMetricSql(METRIC_NAMES.statsReset, pgMajorVersion);
|
|
25179
|
+
const result = await client.query(sql);
|
|
25180
|
+
const row = result.rows[0] || {};
|
|
25181
|
+
const statsResetEpoch = row.stats_reset_epoch ? parseFloat(row.stats_reset_epoch) : null;
|
|
25182
|
+
const secondsSinceReset = row.seconds_since_reset ? parseInt(row.seconds_since_reset, 10) : null;
|
|
25183
|
+
const statsResetTime = statsResetEpoch ? new Date(statsResetEpoch * 1000).toISOString() : null;
|
|
25184
|
+
const daysSinceReset = secondsSinceReset !== null ? Math.floor(secondsSinceReset / SECONDS_PER_DAY) : null;
|
|
25185
|
+
let postmasterStartupEpoch = null;
|
|
25186
|
+
let postmasterStartupTime = null;
|
|
25187
|
+
let postmasterStartupError;
|
|
25188
|
+
try {
|
|
25189
|
+
const pmResult = await client.query(`
|
|
25190
|
+
select
|
|
25191
|
+
extract(epoch from pg_postmaster_start_time()) as postmaster_startup_epoch,
|
|
25192
|
+
pg_postmaster_start_time()::text as postmaster_startup_time
|
|
25193
|
+
`);
|
|
25194
|
+
if (pmResult.rows.length > 0) {
|
|
25195
|
+
postmasterStartupEpoch = pmResult.rows[0].postmaster_startup_epoch ? parseFloat(pmResult.rows[0].postmaster_startup_epoch) : null;
|
|
25196
|
+
postmasterStartupTime = pmResult.rows[0].postmaster_startup_time || null;
|
|
25197
|
+
}
|
|
25198
|
+
} catch (err) {
|
|
25199
|
+
const errorMsg = err instanceof Error ? err.message : String(err);
|
|
25200
|
+
postmasterStartupError = `Failed to query postmaster start time: ${errorMsg}`;
|
|
25201
|
+
console.log(`[getStatsReset] Warning: ${postmasterStartupError}`);
|
|
25202
|
+
}
|
|
25203
|
+
const statsResult = {
|
|
25204
|
+
stats_reset_epoch: statsResetEpoch,
|
|
25205
|
+
stats_reset_time: statsResetTime,
|
|
25206
|
+
days_since_reset: daysSinceReset,
|
|
25207
|
+
postmaster_startup_epoch: postmasterStartupEpoch,
|
|
25208
|
+
postmaster_startup_time: postmasterStartupTime
|
|
25209
|
+
};
|
|
25210
|
+
if (postmasterStartupError) {
|
|
25211
|
+
statsResult.postmaster_startup_error = postmasterStartupError;
|
|
25212
|
+
}
|
|
25213
|
+
return statsResult;
|
|
25214
|
+
}
|
|
25215
|
+
async function getCurrentDatabaseInfo(client, pgMajorVersion = 16) {
|
|
25216
|
+
const sql = getMetricSql(METRIC_NAMES.dbSize, pgMajorVersion);
|
|
25217
|
+
const result = await client.query(sql);
|
|
25218
|
+
const row = result.rows[0] || {};
|
|
25219
|
+
return {
|
|
25220
|
+
datname: row.tag_datname || "postgres",
|
|
25221
|
+
size_bytes: parseInt(row.size_b || "0", 10)
|
|
25222
|
+
};
|
|
25223
|
+
}
|
|
25224
|
+
function isValidRedundantToItem(item) {
|
|
25225
|
+
return typeof item === "object" && item !== null && !Array.isArray(item);
|
|
25226
|
+
}
|
|
25227
|
+
async function getRedundantIndexes(client, pgMajorVersion = 16) {
|
|
25228
|
+
const sql = getMetricSql(METRIC_NAMES.H004, pgMajorVersion);
|
|
25229
|
+
const result = await client.query(sql);
|
|
25230
|
+
return result.rows.map((row) => {
|
|
25231
|
+
const transformed = transformMetricRow(row);
|
|
25232
|
+
const indexSizeBytes = parseInt(String(transformed.index_size_bytes || 0), 10);
|
|
25233
|
+
const tableSizeBytes = parseInt(String(transformed.table_size_bytes || 0), 10);
|
|
25234
|
+
let redundantTo = [];
|
|
25235
|
+
let parseError;
|
|
25236
|
+
try {
|
|
25237
|
+
const jsonStr = String(transformed.redundant_to_json || "[]");
|
|
25238
|
+
const parsed = JSON.parse(jsonStr);
|
|
25239
|
+
if (Array.isArray(parsed)) {
|
|
25240
|
+
redundantTo = parsed.filter(isValidRedundantToItem).map((item) => {
|
|
25241
|
+
const sizeBytes = parseInt(String(item.index_size_bytes ?? 0), 10);
|
|
25242
|
+
return {
|
|
25243
|
+
index_name: String(item.index_name ?? ""),
|
|
25244
|
+
index_definition: String(item.index_definition ?? ""),
|
|
25245
|
+
index_size_bytes: sizeBytes,
|
|
25246
|
+
index_size_pretty: formatBytes(sizeBytes)
|
|
25247
|
+
};
|
|
25248
|
+
});
|
|
25249
|
+
}
|
|
25250
|
+
} catch (err) {
|
|
25251
|
+
const errorMsg = err instanceof Error ? err.message : String(err);
|
|
25252
|
+
const indexName = String(transformed.index_name || "unknown");
|
|
25253
|
+
parseError = `Failed to parse redundant_to_json: ${errorMsg}`;
|
|
25254
|
+
console.log(`[H004] Warning: ${parseError} for index "${indexName}"`);
|
|
25255
|
+
}
|
|
25256
|
+
const result2 = {
|
|
25257
|
+
schema_name: String(transformed.schema_name || ""),
|
|
25258
|
+
table_name: String(transformed.table_name || ""),
|
|
25259
|
+
index_name: String(transformed.index_name || ""),
|
|
25260
|
+
relation_name: String(transformed.relation_name || ""),
|
|
25261
|
+
access_method: String(transformed.access_method || ""),
|
|
25262
|
+
reason: String(transformed.reason || ""),
|
|
25263
|
+
index_size_bytes: indexSizeBytes,
|
|
25264
|
+
table_size_bytes: tableSizeBytes,
|
|
25265
|
+
index_usage: parseInt(String(transformed.index_usage || 0), 10),
|
|
25266
|
+
supports_fk: toBool(transformed.supports_fk),
|
|
25267
|
+
index_definition: String(transformed.index_definition || ""),
|
|
25268
|
+
index_size_pretty: formatBytes(indexSizeBytes),
|
|
25269
|
+
table_size_pretty: formatBytes(tableSizeBytes),
|
|
25270
|
+
redundant_to: redundantTo
|
|
25271
|
+
};
|
|
25272
|
+
if (parseError) {
|
|
25273
|
+
result2.redundant_to_parse_error = parseError;
|
|
25274
|
+
}
|
|
25275
|
+
return result2;
|
|
25276
|
+
});
|
|
25277
|
+
}
|
|
25278
|
+
function createBaseReport(checkId, checkTitle, nodeName) {
|
|
25279
|
+
const buildTs = resolveBuildTs();
|
|
25280
|
+
return {
|
|
25281
|
+
version: version || null,
|
|
25282
|
+
build_ts: buildTs,
|
|
25283
|
+
generation_mode: "express",
|
|
25284
|
+
checkId,
|
|
25285
|
+
checkTitle,
|
|
25286
|
+
timestamptz: new Date().toISOString(),
|
|
25287
|
+
nodes: {
|
|
25288
|
+
primary: nodeName,
|
|
25289
|
+
standbys: []
|
|
25290
|
+
},
|
|
25291
|
+
results: {}
|
|
25292
|
+
};
|
|
25293
|
+
}
|
|
25294
|
+
function readTextFileSafe(p) {
|
|
25295
|
+
try {
|
|
25296
|
+
const value = fs4.readFileSync(p, "utf8").trim();
|
|
25297
|
+
return value || null;
|
|
25298
|
+
} catch {
|
|
25299
|
+
return null;
|
|
25300
|
+
}
|
|
25301
|
+
}
|
|
25302
|
+
function resolveBuildTs() {
|
|
25303
|
+
const envPath = process.env.PGAI_BUILD_TS_FILE;
|
|
25304
|
+
const p = envPath && envPath.trim() ? envPath.trim() : "/BUILD_TS";
|
|
25305
|
+
const fromFile = readTextFileSafe(p);
|
|
25306
|
+
if (fromFile)
|
|
25307
|
+
return fromFile;
|
|
25308
|
+
try {
|
|
25309
|
+
const pkgRoot = path4.resolve(__dirname, "..");
|
|
25310
|
+
const fromPkgFile = readTextFileSafe(path4.join(pkgRoot, "BUILD_TS"));
|
|
25311
|
+
if (fromPkgFile)
|
|
25312
|
+
return fromPkgFile;
|
|
25313
|
+
} catch (err) {
|
|
25314
|
+
const errorMsg = err instanceof Error ? err.message : String(err);
|
|
25315
|
+
console.warn(`[resolveBuildTs] Warning: path resolution failed: ${errorMsg}`);
|
|
25316
|
+
}
|
|
25317
|
+
try {
|
|
25318
|
+
const pkgJsonPath = path4.resolve(__dirname, "..", "package.json");
|
|
25319
|
+
const st = fs4.statSync(pkgJsonPath);
|
|
25320
|
+
return st.mtime.toISOString();
|
|
25321
|
+
} catch (err) {
|
|
25322
|
+
if (process.env.DEBUG) {
|
|
25323
|
+
const errorMsg = err instanceof Error ? err.message : String(err);
|
|
25324
|
+
console.log(`[resolveBuildTs] Could not stat package.json, using current time: ${errorMsg}`);
|
|
25325
|
+
}
|
|
25326
|
+
return new Date().toISOString();
|
|
25327
|
+
}
|
|
25328
|
+
}
|
|
25329
|
+
async function generateVersionReport(client, nodeName, checkId, checkTitle) {
|
|
25330
|
+
const report = createBaseReport(checkId, checkTitle, nodeName);
|
|
25331
|
+
const postgresVersion = await getPostgresVersion(client);
|
|
25332
|
+
report.results[nodeName] = { data: { version: postgresVersion } };
|
|
25333
|
+
return report;
|
|
25334
|
+
}
|
|
25335
|
+
async function generateSettingsReport(client, nodeName, checkId, checkTitle, fetchSettings) {
|
|
25336
|
+
const report = createBaseReport(checkId, checkTitle, nodeName);
|
|
25337
|
+
const postgresVersion = await getPostgresVersion(client);
|
|
25338
|
+
const pgMajorVersion = parseInt(postgresVersion.server_major_ver, 10) || 16;
|
|
25339
|
+
const settings = await fetchSettings(client, pgMajorVersion);
|
|
25340
|
+
report.results[nodeName] = { data: settings, postgres_version: postgresVersion };
|
|
25341
|
+
return report;
|
|
25342
|
+
}
|
|
25343
|
+
async function generateIndexReport(client, nodeName, checkId, checkTitle, indexFieldName, fetchIndexes, extraFields) {
|
|
25344
|
+
const report = createBaseReport(checkId, checkTitle, nodeName);
|
|
25345
|
+
const postgresVersion = await getPostgresVersion(client);
|
|
25346
|
+
const pgMajorVersion = parseInt(postgresVersion.server_major_ver, 10) || 16;
|
|
25347
|
+
const indexes = await fetchIndexes(client, pgMajorVersion);
|
|
25348
|
+
const { datname: dbName, size_bytes: dbSizeBytes } = await getCurrentDatabaseInfo(client, pgMajorVersion);
|
|
25349
|
+
const totalCount = indexes.length;
|
|
25350
|
+
const totalSizeBytes = indexes.reduce((sum, idx) => sum + idx.index_size_bytes, 0);
|
|
25351
|
+
const dbEntry = {
|
|
25352
|
+
[indexFieldName]: indexes,
|
|
25353
|
+
total_count: totalCount,
|
|
25354
|
+
total_size_bytes: totalSizeBytes,
|
|
25355
|
+
total_size_pretty: formatBytes(totalSizeBytes),
|
|
25356
|
+
database_size_bytes: dbSizeBytes,
|
|
25357
|
+
database_size_pretty: formatBytes(dbSizeBytes)
|
|
25358
|
+
};
|
|
25359
|
+
if (extraFields) {
|
|
25360
|
+
Object.assign(dbEntry, await extraFields(client, pgMajorVersion));
|
|
25361
|
+
}
|
|
25362
|
+
report.results[nodeName] = { data: { [dbName]: dbEntry }, postgres_version: postgresVersion };
|
|
25363
|
+
return report;
|
|
25364
|
+
}
|
|
25365
|
+
var generateA002 = (client, nodeName = "node-01") => generateVersionReport(client, nodeName, "A002", "Postgres major version");
|
|
25366
|
+
var generateA003 = (client, nodeName = "node-01") => generateSettingsReport(client, nodeName, "A003", "Postgres settings", getSettings);
|
|
25367
|
+
async function generateA004(client, nodeName = "node-01") {
|
|
25368
|
+
const report = createBaseReport("A004", "Cluster information", nodeName);
|
|
25369
|
+
const postgresVersion = await getPostgresVersion(client);
|
|
25370
|
+
const pgMajorVersion = parseInt(postgresVersion.server_major_ver, 10) || 16;
|
|
25371
|
+
report.results[nodeName] = {
|
|
25372
|
+
data: {
|
|
25373
|
+
general_info: await getClusterInfo(client, pgMajorVersion),
|
|
25374
|
+
database_sizes: await getDatabaseSizes(client)
|
|
25375
|
+
},
|
|
25376
|
+
postgres_version: postgresVersion
|
|
25377
|
+
};
|
|
25378
|
+
return report;
|
|
25379
|
+
}
|
|
25380
|
+
var generateA007 = (client, nodeName = "node-01") => generateSettingsReport(client, nodeName, "A007", "Altered settings", getAlteredSettings);
|
|
25381
|
+
var generateA013 = (client, nodeName = "node-01") => generateVersionReport(client, nodeName, "A013", "Postgres minor version");
|
|
25382
|
+
var generateH001 = (client, nodeName = "node-01") => generateIndexReport(client, nodeName, "H001", "Invalid indexes", "invalid_indexes", getInvalidIndexes);
|
|
25383
|
+
var generateH002 = (client, nodeName = "node-01") => generateIndexReport(client, nodeName, "H002", "Unused indexes", "unused_indexes", getUnusedIndexes, async (c, v) => ({ stats_reset: await getStatsReset(c, v) }));
|
|
25384
|
+
var generateH004 = (client, nodeName = "node-01") => generateIndexReport(client, nodeName, "H004", "Redundant indexes", "redundant_indexes", getRedundantIndexes);
|
|
25385
|
+
async function generateD004(client, nodeName) {
|
|
25386
|
+
const report = createBaseReport("D004", "pg_stat_statements and pg_stat_kcache settings", nodeName);
|
|
25387
|
+
const postgresVersion = await getPostgresVersion(client);
|
|
25388
|
+
const pgMajorVersion = parseInt(postgresVersion.server_major_ver, 10) || 16;
|
|
25389
|
+
const allSettings = await getSettings(client, pgMajorVersion);
|
|
25390
|
+
const pgssSettings = {};
|
|
25391
|
+
for (const [name, setting] of Object.entries(allSettings)) {
|
|
25392
|
+
if (name.startsWith("pg_stat_statements") || name.startsWith("pg_stat_kcache")) {
|
|
25393
|
+
pgssSettings[name] = setting;
|
|
25394
|
+
}
|
|
25395
|
+
}
|
|
25396
|
+
let pgssAvailable = false;
|
|
25397
|
+
let pgssMetricsCount = 0;
|
|
25398
|
+
let pgssTotalCalls = 0;
|
|
25399
|
+
let pgssError = null;
|
|
25400
|
+
const pgssSampleQueries = [];
|
|
25401
|
+
try {
|
|
25402
|
+
const extCheck = await client.query("select 1 from pg_extension where extname = 'pg_stat_statements'");
|
|
25403
|
+
if (extCheck.rows.length > 0) {
|
|
25404
|
+
pgssAvailable = true;
|
|
25405
|
+
const statsResult = await client.query(`
|
|
25406
|
+
select count(*) as cnt, coalesce(sum(calls), 0) as total_calls
|
|
25407
|
+
from pg_stat_statements
|
|
25408
|
+
`);
|
|
25409
|
+
pgssMetricsCount = parseInt(statsResult.rows[0]?.cnt || "0", 10);
|
|
25410
|
+
pgssTotalCalls = parseInt(statsResult.rows[0]?.total_calls || "0", 10);
|
|
25411
|
+
const sampleResult = await client.query(`
|
|
25412
|
+
select
|
|
25413
|
+
queryid::text as queryid,
|
|
25414
|
+
coalesce(usename, 'unknown') as "user",
|
|
25415
|
+
coalesce(datname, 'unknown') as database,
|
|
25416
|
+
calls
|
|
25417
|
+
from pg_stat_statements s
|
|
25418
|
+
left join pg_database d on s.dbid = d.oid
|
|
25419
|
+
left join pg_user u on s.userid = u.usesysid
|
|
25420
|
+
order by calls desc
|
|
25421
|
+
limit 5
|
|
25422
|
+
`);
|
|
25423
|
+
for (const row of sampleResult.rows) {
|
|
25424
|
+
pgssSampleQueries.push({
|
|
25425
|
+
queryid: row.queryid,
|
|
25426
|
+
user: row.user,
|
|
25427
|
+
database: row.database,
|
|
25428
|
+
calls: parseInt(row.calls, 10)
|
|
25429
|
+
});
|
|
25430
|
+
}
|
|
25431
|
+
}
|
|
25432
|
+
} catch (err) {
|
|
25433
|
+
const errorMsg = err instanceof Error ? err.message : String(err);
|
|
25434
|
+
console.log(`[D004] Error querying pg_stat_statements: ${errorMsg}`);
|
|
25435
|
+
pgssError = errorMsg;
|
|
25436
|
+
}
|
|
25437
|
+
let kcacheAvailable = false;
|
|
25438
|
+
let kcacheMetricsCount = 0;
|
|
25439
|
+
let kcacheTotalExecTime = 0;
|
|
25440
|
+
let kcacheTotalUserTime = 0;
|
|
25441
|
+
let kcacheTotalSystemTime = 0;
|
|
25442
|
+
let kcacheError = null;
|
|
25443
|
+
const kcacheSampleQueries = [];
|
|
25444
|
+
try {
|
|
25445
|
+
const extCheck = await client.query("select 1 from pg_extension where extname = 'pg_stat_kcache'");
|
|
25446
|
+
if (extCheck.rows.length > 0) {
|
|
25447
|
+
kcacheAvailable = true;
|
|
25448
|
+
const statsResult = await client.query(`
|
|
25449
|
+
select
|
|
25450
|
+
count(*) as cnt,
|
|
25451
|
+
coalesce(sum(exec_user_time + exec_system_time), 0) as total_exec_time,
|
|
25452
|
+
coalesce(sum(exec_user_time), 0) as total_user_time,
|
|
25453
|
+
coalesce(sum(exec_system_time), 0) as total_system_time
|
|
25454
|
+
from pg_stat_kcache
|
|
25455
|
+
`);
|
|
25456
|
+
kcacheMetricsCount = parseInt(statsResult.rows[0]?.cnt || "0", 10);
|
|
25457
|
+
kcacheTotalExecTime = parseFloat(statsResult.rows[0]?.total_exec_time || "0");
|
|
25458
|
+
kcacheTotalUserTime = parseFloat(statsResult.rows[0]?.total_user_time || "0");
|
|
25459
|
+
kcacheTotalSystemTime = parseFloat(statsResult.rows[0]?.total_system_time || "0");
|
|
25460
|
+
const sampleResult = await client.query(`
|
|
25461
|
+
select
|
|
25462
|
+
queryid::text as queryid,
|
|
25463
|
+
coalesce(usename, 'unknown') as "user",
|
|
25464
|
+
(exec_user_time + exec_system_time) as exec_total_time
|
|
25465
|
+
from pg_stat_kcache k
|
|
25466
|
+
left join pg_user u on k.userid = u.usesysid
|
|
25467
|
+
order by (exec_user_time + exec_system_time) desc
|
|
25468
|
+
limit 5
|
|
25469
|
+
`);
|
|
25470
|
+
for (const row of sampleResult.rows) {
|
|
25471
|
+
kcacheSampleQueries.push({
|
|
25472
|
+
queryid: row.queryid,
|
|
25473
|
+
user: row.user,
|
|
25474
|
+
exec_total_time: parseFloat(row.exec_total_time)
|
|
25475
|
+
});
|
|
25476
|
+
}
|
|
25477
|
+
}
|
|
25478
|
+
} catch (err) {
|
|
25479
|
+
const errorMsg = err instanceof Error ? err.message : String(err);
|
|
25480
|
+
console.log(`[D004] Error querying pg_stat_kcache: ${errorMsg}`);
|
|
25481
|
+
kcacheError = errorMsg;
|
|
25482
|
+
}
|
|
25483
|
+
report.results[nodeName] = {
|
|
25484
|
+
data: {
|
|
25485
|
+
settings: pgssSettings,
|
|
25486
|
+
pg_stat_statements_status: {
|
|
25487
|
+
extension_available: pgssAvailable,
|
|
25488
|
+
metrics_count: pgssMetricsCount,
|
|
25489
|
+
total_calls: pgssTotalCalls,
|
|
25490
|
+
sample_queries: pgssSampleQueries,
|
|
25491
|
+
...pgssError && { error: pgssError }
|
|
25492
|
+
},
|
|
25493
|
+
pg_stat_kcache_status: {
|
|
25494
|
+
extension_available: kcacheAvailable,
|
|
25495
|
+
metrics_count: kcacheMetricsCount,
|
|
25496
|
+
total_exec_time: kcacheTotalExecTime,
|
|
25497
|
+
total_user_time: kcacheTotalUserTime,
|
|
25498
|
+
total_system_time: kcacheTotalSystemTime,
|
|
25499
|
+
sample_queries: kcacheSampleQueries,
|
|
25500
|
+
...kcacheError && { error: kcacheError }
|
|
25501
|
+
}
|
|
25502
|
+
},
|
|
25503
|
+
postgres_version: postgresVersion
|
|
25504
|
+
};
|
|
25505
|
+
return report;
|
|
25506
|
+
}
|
|
25507
|
+
async function generateF001(client, nodeName) {
|
|
25508
|
+
const report = createBaseReport("F001", "Autovacuum: current settings", nodeName);
|
|
25509
|
+
const postgresVersion = await getPostgresVersion(client);
|
|
25510
|
+
const pgMajorVersion = parseInt(postgresVersion.server_major_ver, 10) || 16;
|
|
25511
|
+
const allSettings = await getSettings(client, pgMajorVersion);
|
|
25512
|
+
const autovacuumSettings = {};
|
|
25513
|
+
for (const [name, setting] of Object.entries(allSettings)) {
|
|
25514
|
+
if (name.includes("autovacuum") || name.includes("vacuum")) {
|
|
25515
|
+
autovacuumSettings[name] = setting;
|
|
25516
|
+
}
|
|
25517
|
+
}
|
|
25518
|
+
report.results[nodeName] = {
|
|
25519
|
+
data: autovacuumSettings,
|
|
25520
|
+
postgres_version: postgresVersion
|
|
25521
|
+
};
|
|
25522
|
+
return report;
|
|
25523
|
+
}
|
|
25524
|
+
async function generateG001(client, nodeName) {
|
|
25525
|
+
const report = createBaseReport("G001", "Memory-related settings", nodeName);
|
|
25526
|
+
const postgresVersion = await getPostgresVersion(client);
|
|
25527
|
+
const pgMajorVersion = parseInt(postgresVersion.server_major_ver, 10) || 16;
|
|
25528
|
+
const allSettings = await getSettings(client, pgMajorVersion);
|
|
25529
|
+
const memorySettingNames = [
|
|
25530
|
+
"shared_buffers",
|
|
25531
|
+
"work_mem",
|
|
25532
|
+
"maintenance_work_mem",
|
|
25533
|
+
"effective_cache_size",
|
|
25534
|
+
"wal_buffers",
|
|
25535
|
+
"temp_buffers",
|
|
25536
|
+
"max_connections",
|
|
25537
|
+
"autovacuum_work_mem",
|
|
25538
|
+
"hash_mem_multiplier",
|
|
25539
|
+
"logical_decoding_work_mem",
|
|
25540
|
+
"max_stack_depth",
|
|
25541
|
+
"max_prepared_transactions",
|
|
25542
|
+
"max_locks_per_transaction",
|
|
25543
|
+
"max_pred_locks_per_transaction"
|
|
25544
|
+
];
|
|
25545
|
+
const memorySettings = {};
|
|
25546
|
+
for (const name of memorySettingNames) {
|
|
25547
|
+
if (allSettings[name]) {
|
|
25548
|
+
memorySettings[name] = allSettings[name];
|
|
25549
|
+
}
|
|
25550
|
+
}
|
|
25551
|
+
let memoryUsage = {};
|
|
25552
|
+
let memoryError = null;
|
|
25553
|
+
try {
|
|
25554
|
+
const memQuery = await client.query(`
|
|
25555
|
+
select
|
|
25556
|
+
pg_size_bytes(current_setting('shared_buffers')) as shared_buffers_bytes,
|
|
25557
|
+
pg_size_bytes(current_setting('wal_buffers')) as wal_buffers_bytes,
|
|
25558
|
+
pg_size_bytes(current_setting('work_mem')) as work_mem_bytes,
|
|
25559
|
+
pg_size_bytes(current_setting('maintenance_work_mem')) as maintenance_work_mem_bytes,
|
|
25560
|
+
pg_size_bytes(current_setting('effective_cache_size')) as effective_cache_size_bytes,
|
|
25561
|
+
current_setting('max_connections')::int as max_connections
|
|
25562
|
+
`);
|
|
25563
|
+
if (memQuery.rows.length > 0) {
|
|
25564
|
+
const row = memQuery.rows[0];
|
|
25565
|
+
const sharedBuffersBytes = parseInt(row.shared_buffers_bytes, 10);
|
|
25566
|
+
const walBuffersBytes = parseInt(row.wal_buffers_bytes, 10);
|
|
25567
|
+
const workMemBytes = parseInt(row.work_mem_bytes, 10);
|
|
25568
|
+
const maintenanceWorkMemBytes = parseInt(row.maintenance_work_mem_bytes, 10);
|
|
25569
|
+
const effectiveCacheSizeBytes = parseInt(row.effective_cache_size_bytes, 10);
|
|
25570
|
+
const maxConnections = row.max_connections;
|
|
25571
|
+
const sharedMemoryTotal = sharedBuffersBytes + walBuffersBytes;
|
|
25572
|
+
const maxWorkMemUsage = workMemBytes * maxConnections;
|
|
25573
|
+
memoryUsage = {
|
|
25574
|
+
shared_buffers_bytes: sharedBuffersBytes,
|
|
25575
|
+
shared_buffers_pretty: formatBytes(sharedBuffersBytes),
|
|
25576
|
+
wal_buffers_bytes: walBuffersBytes,
|
|
25577
|
+
wal_buffers_pretty: formatBytes(walBuffersBytes),
|
|
25578
|
+
shared_memory_total_bytes: sharedMemoryTotal,
|
|
25579
|
+
shared_memory_total_pretty: formatBytes(sharedMemoryTotal),
|
|
25580
|
+
work_mem_per_connection_bytes: workMemBytes,
|
|
25581
|
+
work_mem_per_connection_pretty: formatBytes(workMemBytes),
|
|
25582
|
+
max_work_mem_usage_bytes: maxWorkMemUsage,
|
|
25583
|
+
max_work_mem_usage_pretty: formatBytes(maxWorkMemUsage),
|
|
25584
|
+
maintenance_work_mem_bytes: maintenanceWorkMemBytes,
|
|
25585
|
+
maintenance_work_mem_pretty: formatBytes(maintenanceWorkMemBytes),
|
|
25586
|
+
effective_cache_size_bytes: effectiveCacheSizeBytes,
|
|
25587
|
+
effective_cache_size_pretty: formatBytes(effectiveCacheSizeBytes)
|
|
25588
|
+
};
|
|
25589
|
+
}
|
|
25590
|
+
} catch (err) {
|
|
25591
|
+
const errorMsg = err instanceof Error ? err.message : String(err);
|
|
25592
|
+
console.log(`[G001] Error calculating memory usage: ${errorMsg}`);
|
|
25593
|
+
memoryError = errorMsg;
|
|
25594
|
+
}
|
|
25595
|
+
report.results[nodeName] = {
|
|
25596
|
+
data: {
|
|
25597
|
+
settings: memorySettings,
|
|
25598
|
+
analysis: {
|
|
25599
|
+
estimated_total_memory_usage: memoryUsage,
|
|
25600
|
+
...memoryError && { error: memoryError }
|
|
25601
|
+
}
|
|
25602
|
+
},
|
|
25603
|
+
postgres_version: postgresVersion
|
|
25604
|
+
};
|
|
25605
|
+
return report;
|
|
25606
|
+
}
|
|
25607
|
+
var REPORT_GENERATORS = {
|
|
25608
|
+
A002: generateA002,
|
|
25609
|
+
A003: generateA003,
|
|
25610
|
+
A004: generateA004,
|
|
25611
|
+
A007: generateA007,
|
|
25612
|
+
A013: generateA013,
|
|
25613
|
+
D004: generateD004,
|
|
25614
|
+
F001: generateF001,
|
|
25615
|
+
G001: generateG001,
|
|
25616
|
+
H001: generateH001,
|
|
25617
|
+
H002: generateH002,
|
|
25618
|
+
H004: generateH004
|
|
25619
|
+
};
|
|
25620
|
+
var CHECK_INFO = {
|
|
25621
|
+
A002: "Postgres major version",
|
|
25622
|
+
A003: "Postgres settings",
|
|
25623
|
+
A004: "Cluster information",
|
|
25624
|
+
A007: "Altered settings",
|
|
25625
|
+
A013: "Postgres minor version",
|
|
25626
|
+
D004: "pg_stat_statements and pg_stat_kcache settings",
|
|
25627
|
+
F001: "Autovacuum: current settings",
|
|
25628
|
+
G001: "Memory-related settings",
|
|
25629
|
+
H001: "Invalid indexes",
|
|
25630
|
+
H002: "Unused indexes",
|
|
25631
|
+
H004: "Redundant indexes"
|
|
25632
|
+
};
|
|
25633
|
+
async function generateAllReports(client, nodeName = "node-01", onProgress) {
|
|
25634
|
+
const reports = {};
|
|
25635
|
+
const entries = Object.entries(REPORT_GENERATORS);
|
|
25636
|
+
const total = entries.length;
|
|
25637
|
+
let index = 0;
|
|
25638
|
+
for (const [checkId, generator] of entries) {
|
|
25639
|
+
index += 1;
|
|
25640
|
+
onProgress?.({
|
|
25641
|
+
checkId,
|
|
25642
|
+
checkTitle: CHECK_INFO[checkId] || checkId,
|
|
25643
|
+
index,
|
|
25644
|
+
total
|
|
25645
|
+
});
|
|
25646
|
+
reports[checkId] = await generator(client, nodeName);
|
|
25647
|
+
}
|
|
25648
|
+
return reports;
|
|
25649
|
+
}
|
|
25650
|
+
|
|
25651
|
+
// lib/checkup-api.ts
|
|
25652
|
+
import * as https from "https";
|
|
25653
|
+
import { URL as URL3 } from "url";
|
|
25654
|
+
var DEFAULT_RETRY_CONFIG = {
|
|
25655
|
+
maxAttempts: 3,
|
|
25656
|
+
initialDelayMs: 1000,
|
|
25657
|
+
maxDelayMs: 1e4,
|
|
25658
|
+
backoffMultiplier: 2
|
|
25659
|
+
};
|
|
25660
|
+
function isRetryableError(err) {
|
|
25661
|
+
if (err instanceof RpcError) {
|
|
25662
|
+
return err.statusCode >= 500 && err.statusCode < 600;
|
|
25663
|
+
}
|
|
25664
|
+
if (typeof err === "object" && err !== null && "code" in err) {
|
|
25665
|
+
const code = String(err.code);
|
|
25666
|
+
if (["ECONNRESET", "ECONNREFUSED", "ENOTFOUND", "ETIMEDOUT"].includes(code)) {
|
|
25667
|
+
return true;
|
|
25668
|
+
}
|
|
25669
|
+
}
|
|
25670
|
+
if (err instanceof Error) {
|
|
25671
|
+
const msg = err.message.toLowerCase();
|
|
25672
|
+
return msg.includes("timeout") || msg.includes("timed out") || msg.includes("econnreset") || msg.includes("econnrefused") || msg.includes("enotfound") || msg.includes("socket hang up") || msg.includes("network");
|
|
25673
|
+
}
|
|
25674
|
+
return false;
|
|
25675
|
+
}
|
|
25676
|
+
async function withRetry(fn, config2 = {}, onRetry) {
|
|
25677
|
+
const { maxAttempts, initialDelayMs, maxDelayMs, backoffMultiplier } = {
|
|
25678
|
+
...DEFAULT_RETRY_CONFIG,
|
|
25679
|
+
...config2
|
|
25680
|
+
};
|
|
25681
|
+
let lastError;
|
|
25682
|
+
let delayMs = initialDelayMs;
|
|
25683
|
+
for (let attempt = 1;attempt <= maxAttempts; attempt++) {
|
|
25684
|
+
try {
|
|
25685
|
+
return await fn();
|
|
25686
|
+
} catch (err) {
|
|
25687
|
+
lastError = err;
|
|
25688
|
+
if (attempt === maxAttempts || !isRetryableError(err)) {
|
|
25689
|
+
throw err;
|
|
25690
|
+
}
|
|
25691
|
+
if (onRetry) {
|
|
25692
|
+
onRetry(attempt, err, delayMs);
|
|
25693
|
+
}
|
|
25694
|
+
await new Promise((resolve5) => setTimeout(resolve5, delayMs));
|
|
25695
|
+
delayMs = Math.min(delayMs * backoffMultiplier, maxDelayMs);
|
|
25696
|
+
}
|
|
25697
|
+
}
|
|
25698
|
+
throw lastError;
|
|
25699
|
+
}
|
|
25700
|
+
|
|
25701
|
+
class RpcError extends Error {
|
|
25702
|
+
rpcName;
|
|
25703
|
+
statusCode;
|
|
25704
|
+
payloadText;
|
|
25705
|
+
payloadJson;
|
|
25706
|
+
constructor(params) {
|
|
25707
|
+
const { rpcName, statusCode, payloadText, payloadJson } = params;
|
|
25708
|
+
super(`RPC ${rpcName} failed: HTTP ${statusCode}`);
|
|
25709
|
+
this.name = "RpcError";
|
|
25710
|
+
this.rpcName = rpcName;
|
|
25711
|
+
this.statusCode = statusCode;
|
|
25712
|
+
this.payloadText = payloadText;
|
|
25713
|
+
this.payloadJson = payloadJson;
|
|
25714
|
+
}
|
|
25715
|
+
}
|
|
25716
|
+
function formatRpcErrorForDisplay(err) {
|
|
25717
|
+
const lines = [];
|
|
25718
|
+
lines.push(`Error: RPC ${err.rpcName} failed: HTTP ${err.statusCode}`);
|
|
25719
|
+
const obj = err.payloadJson && typeof err.payloadJson === "object" ? err.payloadJson : null;
|
|
25720
|
+
const details = obj && typeof obj.details === "string" ? obj.details : "";
|
|
25721
|
+
const hint = obj && typeof obj.hint === "string" ? obj.hint : "";
|
|
25722
|
+
const message = obj && typeof obj.message === "string" ? obj.message : "";
|
|
25723
|
+
if (message)
|
|
25724
|
+
lines.push(`Message: ${message}`);
|
|
25725
|
+
if (details)
|
|
25726
|
+
lines.push(`Details: ${details}`);
|
|
25727
|
+
if (hint)
|
|
25728
|
+
lines.push(`Hint: ${hint}`);
|
|
25729
|
+
if (!message && !details && !hint) {
|
|
25730
|
+
const t = (err.payloadText || "").trim();
|
|
25731
|
+
if (t)
|
|
25732
|
+
lines.push(t);
|
|
25733
|
+
}
|
|
25734
|
+
return lines;
|
|
25735
|
+
}
|
|
25736
|
+
function unwrapRpcResponse(parsed) {
|
|
25737
|
+
if (Array.isArray(parsed)) {
|
|
25738
|
+
if (parsed.length === 1)
|
|
25739
|
+
return unwrapRpcResponse(parsed[0]);
|
|
25740
|
+
return parsed;
|
|
25741
|
+
}
|
|
25742
|
+
if (parsed && typeof parsed === "object") {
|
|
25743
|
+
const obj = parsed;
|
|
25744
|
+
if (obj.result !== undefined)
|
|
25745
|
+
return obj.result;
|
|
25746
|
+
}
|
|
25747
|
+
return parsed;
|
|
25748
|
+
}
|
|
25749
|
+
var HTTP_TIMEOUT_MS = 30000;
|
|
25750
|
+
async function postRpc(params) {
|
|
25751
|
+
const { apiKey, apiBaseUrl, rpcName, bodyObj, timeoutMs = HTTP_TIMEOUT_MS } = params;
|
|
25752
|
+
if (!apiKey)
|
|
25753
|
+
throw new Error("API key is required");
|
|
25754
|
+
const base = normalizeBaseUrl(apiBaseUrl);
|
|
25755
|
+
const url = new URL3(`${base}/rpc/${rpcName}`);
|
|
25756
|
+
const body = JSON.stringify(bodyObj);
|
|
25757
|
+
const headers = {
|
|
25758
|
+
"access-token": apiKey,
|
|
25759
|
+
Prefer: "return=representation",
|
|
25760
|
+
"Content-Type": "application/json",
|
|
25761
|
+
"Content-Length": Buffer.byteLength(body).toString()
|
|
25762
|
+
};
|
|
25763
|
+
const controller = new AbortController;
|
|
25764
|
+
let timeoutId = null;
|
|
25765
|
+
let settled = false;
|
|
25766
|
+
return new Promise((resolve5, reject) => {
|
|
25767
|
+
const settledReject = (err) => {
|
|
25768
|
+
if (settled)
|
|
25769
|
+
return;
|
|
25770
|
+
settled = true;
|
|
25771
|
+
if (timeoutId)
|
|
25772
|
+
clearTimeout(timeoutId);
|
|
25773
|
+
reject(err);
|
|
25774
|
+
};
|
|
25775
|
+
const settledResolve = (value) => {
|
|
25776
|
+
if (settled)
|
|
25777
|
+
return;
|
|
25778
|
+
settled = true;
|
|
25779
|
+
if (timeoutId)
|
|
25780
|
+
clearTimeout(timeoutId);
|
|
25781
|
+
resolve5(value);
|
|
25782
|
+
};
|
|
25783
|
+
const req = https.request(url, {
|
|
25784
|
+
method: "POST",
|
|
25785
|
+
headers,
|
|
25786
|
+
signal: controller.signal
|
|
25787
|
+
}, (res) => {
|
|
25788
|
+
if (timeoutId) {
|
|
25789
|
+
clearTimeout(timeoutId);
|
|
25790
|
+
timeoutId = null;
|
|
25791
|
+
}
|
|
25792
|
+
let data = "";
|
|
25793
|
+
res.on("data", (chunk) => data += chunk);
|
|
25794
|
+
res.on("end", () => {
|
|
25795
|
+
if (res.statusCode && res.statusCode >= 200 && res.statusCode < 300) {
|
|
25796
|
+
try {
|
|
25797
|
+
const parsed = JSON.parse(data);
|
|
25798
|
+
settledResolve(unwrapRpcResponse(parsed));
|
|
25799
|
+
} catch {
|
|
25800
|
+
settledReject(new Error(`Failed to parse RPC response: ${data}`));
|
|
25801
|
+
}
|
|
25802
|
+
} else {
|
|
25803
|
+
const statusCode = res.statusCode || 0;
|
|
25804
|
+
let payloadJson = null;
|
|
25805
|
+
if (data) {
|
|
25806
|
+
try {
|
|
25807
|
+
payloadJson = JSON.parse(data);
|
|
25808
|
+
} catch {
|
|
25809
|
+
payloadJson = null;
|
|
25810
|
+
}
|
|
25811
|
+
}
|
|
25812
|
+
settledReject(new RpcError({ rpcName, statusCode, payloadText: data, payloadJson }));
|
|
25813
|
+
}
|
|
25814
|
+
});
|
|
25815
|
+
res.on("error", (err) => {
|
|
25816
|
+
settledReject(err);
|
|
25817
|
+
});
|
|
25818
|
+
});
|
|
25819
|
+
timeoutId = setTimeout(() => {
|
|
25820
|
+
controller.abort();
|
|
25821
|
+
req.destroy();
|
|
25822
|
+
settledReject(new Error(`RPC ${rpcName} timed out after ${timeoutMs}ms (no response)`));
|
|
25823
|
+
}, timeoutMs);
|
|
25824
|
+
req.on("error", (err) => {
|
|
25825
|
+
if (err.name === "AbortError" || err.code === "ABORT_ERR") {
|
|
25826
|
+
settledReject(new Error(`RPC ${rpcName} timed out after ${timeoutMs}ms`));
|
|
25827
|
+
return;
|
|
25828
|
+
}
|
|
25829
|
+
if (err.code === "ECONNREFUSED") {
|
|
25830
|
+
settledReject(new Error(`RPC ${rpcName} failed: connection refused to ${url.host}`));
|
|
25831
|
+
} else if (err.code === "ENOTFOUND") {
|
|
25832
|
+
settledReject(new Error(`RPC ${rpcName} failed: DNS lookup failed for ${url.host}`));
|
|
25833
|
+
} else if (err.code === "ECONNRESET") {
|
|
25834
|
+
settledReject(new Error(`RPC ${rpcName} failed: connection reset by server`));
|
|
25835
|
+
} else {
|
|
25836
|
+
settledReject(err);
|
|
25837
|
+
}
|
|
25838
|
+
});
|
|
25839
|
+
req.write(body);
|
|
25840
|
+
req.end();
|
|
25841
|
+
});
|
|
25842
|
+
}
|
|
25843
|
+
async function createCheckupReport(params) {
|
|
25844
|
+
const { apiKey, apiBaseUrl, project, status } = params;
|
|
25845
|
+
const bodyObj = {
|
|
25846
|
+
access_token: apiKey,
|
|
25847
|
+
project
|
|
25848
|
+
};
|
|
25849
|
+
if (status)
|
|
25850
|
+
bodyObj.status = status;
|
|
25851
|
+
const resp = await postRpc({
|
|
25852
|
+
apiKey,
|
|
25853
|
+
apiBaseUrl,
|
|
25854
|
+
rpcName: "checkup_report_create",
|
|
25855
|
+
bodyObj
|
|
25856
|
+
});
|
|
25857
|
+
const reportId = Number(resp?.report_id);
|
|
25858
|
+
if (!Number.isFinite(reportId) || reportId <= 0) {
|
|
25859
|
+
throw new Error(`Unexpected checkup_report_create response: ${JSON.stringify(resp)}`);
|
|
25860
|
+
}
|
|
25861
|
+
return { reportId };
|
|
25862
|
+
}
|
|
25863
|
+
async function uploadCheckupReportJson(params) {
|
|
25864
|
+
const { apiKey, apiBaseUrl, reportId, filename, checkId, jsonText } = params;
|
|
25865
|
+
const bodyObj = {
|
|
25866
|
+
access_token: apiKey,
|
|
25867
|
+
checkup_report_id: reportId,
|
|
25868
|
+
filename,
|
|
25869
|
+
check_id: checkId,
|
|
25870
|
+
data: jsonText,
|
|
25871
|
+
type: "json",
|
|
25872
|
+
generate_issue: true
|
|
25873
|
+
};
|
|
25874
|
+
const resp = await postRpc({
|
|
25875
|
+
apiKey,
|
|
25876
|
+
apiBaseUrl,
|
|
25877
|
+
rpcName: "checkup_report_file_post",
|
|
25878
|
+
bodyObj
|
|
25879
|
+
});
|
|
25880
|
+
const chunkId = Number(resp?.report_chunck_id ?? resp?.report_chunk_id);
|
|
25881
|
+
if (!Number.isFinite(chunkId) || chunkId <= 0) {
|
|
25882
|
+
throw new Error(`Unexpected checkup_report_file_post response: ${JSON.stringify(resp)}`);
|
|
25883
|
+
}
|
|
25884
|
+
return { reportChunkId: chunkId };
|
|
25885
|
+
}
|
|
25886
|
+
|
|
25887
|
+
// bin/postgres-ai.ts
|
|
24216
25888
|
var rl = null;
|
|
24217
25889
|
function getReadline() {
|
|
24218
25890
|
if (!rl) {
|
|
@@ -24227,27 +25899,27 @@ function closeReadline() {
|
|
|
24227
25899
|
}
|
|
24228
25900
|
}
|
|
24229
25901
|
async function execPromise(command) {
|
|
24230
|
-
return new Promise((
|
|
25902
|
+
return new Promise((resolve6, reject) => {
|
|
24231
25903
|
childProcess.exec(command, (error2, stdout, stderr) => {
|
|
24232
25904
|
if (error2) {
|
|
24233
25905
|
const err = error2;
|
|
24234
25906
|
err.code = error2.code ?? 1;
|
|
24235
25907
|
reject(err);
|
|
24236
25908
|
} else {
|
|
24237
|
-
|
|
25909
|
+
resolve6({ stdout, stderr });
|
|
24238
25910
|
}
|
|
24239
25911
|
});
|
|
24240
25912
|
});
|
|
24241
25913
|
}
|
|
24242
25914
|
async function execFilePromise(file, args) {
|
|
24243
|
-
return new Promise((
|
|
25915
|
+
return new Promise((resolve6, reject) => {
|
|
24244
25916
|
childProcess.execFile(file, args, (error2, stdout, stderr) => {
|
|
24245
25917
|
if (error2) {
|
|
24246
25918
|
const err = error2;
|
|
24247
25919
|
err.code = error2.code ?? 1;
|
|
24248
25920
|
reject(err);
|
|
24249
25921
|
} else {
|
|
24250
|
-
|
|
25922
|
+
resolve6({ stdout, stderr });
|
|
24251
25923
|
}
|
|
24252
25924
|
});
|
|
24253
25925
|
});
|
|
@@ -24288,17 +25960,181 @@ function spawn2(cmd, args, options) {
|
|
|
24288
25960
|
};
|
|
24289
25961
|
}
|
|
24290
25962
|
async function question(prompt) {
|
|
24291
|
-
return new Promise((
|
|
25963
|
+
return new Promise((resolve6) => {
|
|
24292
25964
|
getReadline().question(prompt, (answer) => {
|
|
24293
|
-
|
|
25965
|
+
resolve6(answer);
|
|
24294
25966
|
});
|
|
24295
25967
|
});
|
|
24296
25968
|
}
|
|
25969
|
+
function expandHomePath(p) {
|
|
25970
|
+
const s = (p || "").trim();
|
|
25971
|
+
if (!s)
|
|
25972
|
+
return s;
|
|
25973
|
+
if (s === "~")
|
|
25974
|
+
return os3.homedir();
|
|
25975
|
+
if (s.startsWith("~/") || s.startsWith("~\\")) {
|
|
25976
|
+
return path5.join(os3.homedir(), s.slice(2));
|
|
25977
|
+
}
|
|
25978
|
+
return s;
|
|
25979
|
+
}
|
|
25980
|
+
function createTtySpinner(enabled, initialText) {
|
|
25981
|
+
if (!enabled) {
|
|
25982
|
+
return {
|
|
25983
|
+
update: () => {},
|
|
25984
|
+
stop: () => {}
|
|
25985
|
+
};
|
|
25986
|
+
}
|
|
25987
|
+
const frames = ["|", "/", "-", "\\"];
|
|
25988
|
+
const startTs = Date.now();
|
|
25989
|
+
let text = initialText;
|
|
25990
|
+
let frameIdx = 0;
|
|
25991
|
+
let stopped = false;
|
|
25992
|
+
const render = () => {
|
|
25993
|
+
if (stopped)
|
|
25994
|
+
return;
|
|
25995
|
+
const elapsedSec = ((Date.now() - startTs) / 1000).toFixed(1);
|
|
25996
|
+
const frame = frames[frameIdx % frames.length] ?? frames[0] ?? "\u283F";
|
|
25997
|
+
frameIdx += 1;
|
|
25998
|
+
process.stdout.write(`\r\x1B[2K${frame} ${text} (${elapsedSec}s)`);
|
|
25999
|
+
};
|
|
26000
|
+
const timer = setInterval(render, 120);
|
|
26001
|
+
render();
|
|
26002
|
+
return {
|
|
26003
|
+
update: (t) => {
|
|
26004
|
+
text = t;
|
|
26005
|
+
render();
|
|
26006
|
+
},
|
|
26007
|
+
stop: (finalText) => {
|
|
26008
|
+
if (stopped)
|
|
26009
|
+
return;
|
|
26010
|
+
stopped = true;
|
|
26011
|
+
clearInterval(timer);
|
|
26012
|
+
process.stdout.write("\r\x1B[2K");
|
|
26013
|
+
if (finalText && finalText.trim()) {
|
|
26014
|
+
process.stdout.write(finalText);
|
|
26015
|
+
}
|
|
26016
|
+
process.stdout.write(`
|
|
26017
|
+
`);
|
|
26018
|
+
}
|
|
26019
|
+
};
|
|
26020
|
+
}
|
|
26021
|
+
function prepareOutputDirectory(outputOpt) {
|
|
26022
|
+
if (!outputOpt)
|
|
26023
|
+
return;
|
|
26024
|
+
const outputDir = expandHomePath(outputOpt);
|
|
26025
|
+
const outputPath = path5.isAbsolute(outputDir) ? outputDir : path5.resolve(process.cwd(), outputDir);
|
|
26026
|
+
if (!fs5.existsSync(outputPath)) {
|
|
26027
|
+
try {
|
|
26028
|
+
fs5.mkdirSync(outputPath, { recursive: true });
|
|
26029
|
+
} catch (e) {
|
|
26030
|
+
const errAny = e;
|
|
26031
|
+
const code = typeof errAny?.code === "string" ? errAny.code : "";
|
|
26032
|
+
const msg = errAny instanceof Error ? errAny.message : String(errAny);
|
|
26033
|
+
if (code === "EACCES" || code === "EPERM" || code === "ENOENT") {
|
|
26034
|
+
console.error(`Error: Failed to create output directory: ${outputPath}`);
|
|
26035
|
+
console.error(`Reason: ${msg}`);
|
|
26036
|
+
console.error("Tip: choose a writable path, e.g. --output ./reports or --output ~/reports");
|
|
26037
|
+
return null;
|
|
26038
|
+
}
|
|
26039
|
+
throw e;
|
|
26040
|
+
}
|
|
26041
|
+
}
|
|
26042
|
+
return outputPath;
|
|
26043
|
+
}
|
|
26044
|
+
function prepareUploadConfig(opts, rootOpts, shouldUpload, uploadExplicitlyRequested) {
|
|
26045
|
+
if (!shouldUpload)
|
|
26046
|
+
return;
|
|
26047
|
+
const { apiKey } = getConfig(rootOpts);
|
|
26048
|
+
if (!apiKey) {
|
|
26049
|
+
if (uploadExplicitlyRequested) {
|
|
26050
|
+
console.error("Error: API key is required for upload");
|
|
26051
|
+
console.error("Tip: run 'postgresai auth' or pass --api-key / set PGAI_API_KEY");
|
|
26052
|
+
return null;
|
|
26053
|
+
}
|
|
26054
|
+
return;
|
|
26055
|
+
}
|
|
26056
|
+
const cfg = readConfig();
|
|
26057
|
+
const { apiBaseUrl } = resolveBaseUrls2(rootOpts, cfg);
|
|
26058
|
+
let project = (opts.project || cfg.defaultProject || "").trim();
|
|
26059
|
+
let projectWasGenerated = false;
|
|
26060
|
+
if (!project) {
|
|
26061
|
+
project = `project_${crypto2.randomBytes(6).toString("hex")}`;
|
|
26062
|
+
projectWasGenerated = true;
|
|
26063
|
+
try {
|
|
26064
|
+
writeConfig({ defaultProject: project });
|
|
26065
|
+
} catch (e) {
|
|
26066
|
+
const message = e instanceof Error ? e.message : String(e);
|
|
26067
|
+
console.error(`Warning: Failed to save generated default project: ${message}`);
|
|
26068
|
+
}
|
|
26069
|
+
}
|
|
26070
|
+
return {
|
|
26071
|
+
config: { apiKey, apiBaseUrl, project },
|
|
26072
|
+
projectWasGenerated
|
|
26073
|
+
};
|
|
26074
|
+
}
|
|
26075
|
+
async function uploadCheckupReports(uploadCfg, reports, spinner, logUpload) {
|
|
26076
|
+
spinner.update("Creating remote checkup report");
|
|
26077
|
+
const created = await withRetry(() => createCheckupReport({
|
|
26078
|
+
apiKey: uploadCfg.apiKey,
|
|
26079
|
+
apiBaseUrl: uploadCfg.apiBaseUrl,
|
|
26080
|
+
project: uploadCfg.project
|
|
26081
|
+
}), { maxAttempts: 3 }, (attempt, err, delayMs) => {
|
|
26082
|
+
const errMsg = err instanceof Error ? err.message : String(err);
|
|
26083
|
+
logUpload(`[Retry ${attempt}/3] createCheckupReport failed: ${errMsg}, retrying in ${delayMs}ms...`);
|
|
26084
|
+
});
|
|
26085
|
+
const reportId = created.reportId;
|
|
26086
|
+
logUpload(`Created remote checkup report: ${reportId}`);
|
|
26087
|
+
const uploaded = [];
|
|
26088
|
+
for (const [checkId, report] of Object.entries(reports)) {
|
|
26089
|
+
spinner.update(`Uploading ${checkId}.json`);
|
|
26090
|
+
const jsonText = JSON.stringify(report, null, 2);
|
|
26091
|
+
const r = await withRetry(() => uploadCheckupReportJson({
|
|
26092
|
+
apiKey: uploadCfg.apiKey,
|
|
26093
|
+
apiBaseUrl: uploadCfg.apiBaseUrl,
|
|
26094
|
+
reportId,
|
|
26095
|
+
filename: `${checkId}.json`,
|
|
26096
|
+
checkId,
|
|
26097
|
+
jsonText
|
|
26098
|
+
}), { maxAttempts: 3 }, (attempt, err, delayMs) => {
|
|
26099
|
+
const errMsg = err instanceof Error ? err.message : String(err);
|
|
26100
|
+
logUpload(`[Retry ${attempt}/3] Upload ${checkId}.json failed: ${errMsg}, retrying in ${delayMs}ms...`);
|
|
26101
|
+
});
|
|
26102
|
+
uploaded.push({ checkId, filename: `${checkId}.json`, chunkId: r.reportChunkId });
|
|
26103
|
+
}
|
|
26104
|
+
logUpload("Upload completed");
|
|
26105
|
+
return { project: uploadCfg.project, reportId, uploaded };
|
|
26106
|
+
}
|
|
26107
|
+
function writeReportFiles(reports, outputPath) {
|
|
26108
|
+
for (const [checkId, report] of Object.entries(reports)) {
|
|
26109
|
+
const filePath = path5.join(outputPath, `${checkId}.json`);
|
|
26110
|
+
fs5.writeFileSync(filePath, JSON.stringify(report, null, 2), "utf8");
|
|
26111
|
+
console.log(`\u2713 ${checkId}: ${filePath}`);
|
|
26112
|
+
}
|
|
26113
|
+
}
|
|
26114
|
+
function printUploadSummary(summary, projectWasGenerated, useStderr) {
|
|
26115
|
+
const out = useStderr ? console.error : console.log;
|
|
26116
|
+
out(`
|
|
26117
|
+
Checkup report uploaded`);
|
|
26118
|
+
out(`======================
|
|
26119
|
+
`);
|
|
26120
|
+
if (projectWasGenerated) {
|
|
26121
|
+
out(`Project: ${summary.project} (generated and saved as default)`);
|
|
26122
|
+
} else {
|
|
26123
|
+
out(`Project: ${summary.project}`);
|
|
26124
|
+
}
|
|
26125
|
+
out(`Report ID: ${summary.reportId}`);
|
|
26126
|
+
out("View in Console: console.postgres.ai \u2192 Support \u2192 checkup reports");
|
|
26127
|
+
out("");
|
|
26128
|
+
out("Files:");
|
|
26129
|
+
for (const item of summary.uploaded) {
|
|
26130
|
+
out(`- ${item.checkId}: ${item.filename}`);
|
|
26131
|
+
}
|
|
26132
|
+
}
|
|
24297
26133
|
function getDefaultMonitoringProjectDir() {
|
|
24298
26134
|
const override = process.env.PGAI_PROJECT_DIR;
|
|
24299
26135
|
if (override && override.trim())
|
|
24300
26136
|
return override.trim();
|
|
24301
|
-
return
|
|
26137
|
+
return path5.join(getConfigDir(), "monitoring");
|
|
24302
26138
|
}
|
|
24303
26139
|
async function downloadText(url) {
|
|
24304
26140
|
const controller = new AbortController;
|
|
@@ -24315,12 +26151,12 @@ async function downloadText(url) {
|
|
|
24315
26151
|
}
|
|
24316
26152
|
async function ensureDefaultMonitoringProject() {
|
|
24317
26153
|
const projectDir = getDefaultMonitoringProjectDir();
|
|
24318
|
-
const composeFile =
|
|
24319
|
-
const instancesFile =
|
|
24320
|
-
if (!
|
|
24321
|
-
|
|
26154
|
+
const composeFile = path5.resolve(projectDir, "docker-compose.yml");
|
|
26155
|
+
const instancesFile = path5.resolve(projectDir, "instances.yml");
|
|
26156
|
+
if (!fs5.existsSync(projectDir)) {
|
|
26157
|
+
fs5.mkdirSync(projectDir, { recursive: true, mode: 448 });
|
|
24322
26158
|
}
|
|
24323
|
-
if (!
|
|
26159
|
+
if (!fs5.existsSync(composeFile)) {
|
|
24324
26160
|
const refs = [
|
|
24325
26161
|
process.env.PGAI_PROJECT_REF,
|
|
24326
26162
|
package_default.version,
|
|
@@ -24332,36 +26168,36 @@ async function ensureDefaultMonitoringProject() {
|
|
|
24332
26168
|
const url = `https://gitlab.com/postgres-ai/postgres_ai/-/raw/${encodeURIComponent(ref)}/docker-compose.yml`;
|
|
24333
26169
|
try {
|
|
24334
26170
|
const text = await downloadText(url);
|
|
24335
|
-
|
|
26171
|
+
fs5.writeFileSync(composeFile, text, { encoding: "utf8", mode: 384 });
|
|
24336
26172
|
break;
|
|
24337
26173
|
} catch (err) {
|
|
24338
26174
|
lastErr = err;
|
|
24339
26175
|
}
|
|
24340
26176
|
}
|
|
24341
|
-
if (!
|
|
26177
|
+
if (!fs5.existsSync(composeFile)) {
|
|
24342
26178
|
const msg = lastErr instanceof Error ? lastErr.message : String(lastErr);
|
|
24343
26179
|
throw new Error(`Failed to bootstrap docker-compose.yml: ${msg}`);
|
|
24344
26180
|
}
|
|
24345
26181
|
}
|
|
24346
|
-
if (!
|
|
26182
|
+
if (!fs5.existsSync(instancesFile)) {
|
|
24347
26183
|
const header = `# PostgreSQL instances to monitor
|
|
24348
26184
|
` + `# Add your instances using: pgai mon targets add <connection-string> <name>
|
|
24349
26185
|
|
|
24350
26186
|
`;
|
|
24351
|
-
|
|
26187
|
+
fs5.writeFileSync(instancesFile, header, { encoding: "utf8", mode: 384 });
|
|
24352
26188
|
}
|
|
24353
|
-
const pgwatchConfig =
|
|
24354
|
-
if (!
|
|
24355
|
-
|
|
26189
|
+
const pgwatchConfig = path5.resolve(projectDir, ".pgwatch-config");
|
|
26190
|
+
if (!fs5.existsSync(pgwatchConfig)) {
|
|
26191
|
+
fs5.writeFileSync(pgwatchConfig, "", { encoding: "utf8", mode: 384 });
|
|
24356
26192
|
}
|
|
24357
|
-
const envFile =
|
|
24358
|
-
if (!
|
|
26193
|
+
const envFile = path5.resolve(projectDir, ".env");
|
|
26194
|
+
if (!fs5.existsSync(envFile)) {
|
|
24359
26195
|
const envText = `PGAI_TAG=${package_default.version}
|
|
24360
26196
|
# PGAI_REGISTRY=registry.gitlab.com/postgres-ai/postgres_ai
|
|
24361
26197
|
`;
|
|
24362
|
-
|
|
26198
|
+
fs5.writeFileSync(envFile, envText, { encoding: "utf8", mode: 384 });
|
|
24363
26199
|
}
|
|
24364
|
-
return { fs:
|
|
26200
|
+
return { fs: fs5, path: path5, projectDir, composeFile, instancesFile };
|
|
24365
26201
|
}
|
|
24366
26202
|
function getConfig(opts) {
|
|
24367
26203
|
let apiKey = opts.apiKey || process.env.PGAI_API_KEY || "";
|
|
@@ -24393,6 +26229,16 @@ function printResult(result, json2) {
|
|
|
24393
26229
|
}
|
|
24394
26230
|
var program2 = new Command;
|
|
24395
26231
|
program2.name("postgres-ai").description("PostgresAI CLI").version(package_default.version).option("--api-key <key>", "API key (overrides PGAI_API_KEY)").option("--api-base-url <url>", "API base URL for backend RPC (overrides PGAI_API_BASE_URL)").option("--ui-base-url <url>", "UI base URL for browser routes (overrides PGAI_UI_BASE_URL)");
|
|
26232
|
+
program2.command("set-default-project <project>").description("store default project for checkup uploads").action(async (project) => {
|
|
26233
|
+
const value = (project || "").trim();
|
|
26234
|
+
if (!value) {
|
|
26235
|
+
console.error("Error: project is required");
|
|
26236
|
+
process.exitCode = 1;
|
|
26237
|
+
return;
|
|
26238
|
+
}
|
|
26239
|
+
writeConfig({ defaultProject: value });
|
|
26240
|
+
console.log(`Default project saved: ${value}`);
|
|
26241
|
+
});
|
|
24396
26242
|
program2.command("prepare-db [conn]").description("prepare database for monitoring: create monitoring user, required view(s), and grant permissions (idempotent)").option("--db-url <url>", "PostgreSQL connection URL (admin) to run the setup against (deprecated; pass it as positional arg)").option("-h, --host <host>", "PostgreSQL host (psql-like)").option("-p, --port <port>", "PostgreSQL port (psql-like)").option("-U, --username <username>", "PostgreSQL user (psql-like)").option("-d, --dbname <dbname>", "PostgreSQL database name (psql-like)").option("--admin-password <password>", "Admin connection password (otherwise uses PGPASSWORD if set)").option("--monitoring-user <name>", "Monitoring role name to create/update", DEFAULT_MONITORING_USER).option("--password <password>", "Monitoring role password (overrides PGAI_MON_PASSWORD)").option("--skip-optional-permissions", "Skip optional permissions (RDS/self-managed extras)", false).option("--verify", "Verify that monitoring role/permissions are in place (no changes)", false).option("--reset-password", "Reset monitoring role password only (no other changes)", false).option("--print-sql", "Print SQL plan and exit (no changes applied)", false).option("--print-password", "Print generated monitoring password (DANGEROUS in CI logs)", false).addHelpText("after", [
|
|
24397
26243
|
"",
|
|
24398
26244
|
"Examples:",
|
|
@@ -24663,16 +26509,117 @@ program2.command("prepare-db [conn]").description("prepare database for monitori
|
|
|
24663
26509
|
}
|
|
24664
26510
|
}
|
|
24665
26511
|
});
|
|
26512
|
+
program2.command("checkup [conn]").description("generate health check reports directly from PostgreSQL (express mode)").option("--check-id <id>", `specific check to run: ${Object.keys(CHECK_INFO).join(", ")}, or ALL`, "ALL").option("--node-name <name>", "node name for reports", "node-01").option("--output <path>", "output directory for JSON files").option("--[no-]upload", "upload JSON results to PostgresAI (default: enabled; requires API key)", undefined).option("--project <project>", "project name or ID for remote upload (used with --upload; defaults to config defaultProject; auto-generated on first run)").option("--json", "output JSON to stdout (implies --no-upload)").addHelpText("after", [
|
|
26513
|
+
"",
|
|
26514
|
+
"Available checks:",
|
|
26515
|
+
...Object.entries(CHECK_INFO).map(([id, title]) => ` ${id}: ${title}`),
|
|
26516
|
+
"",
|
|
26517
|
+
"Examples:",
|
|
26518
|
+
" postgresai checkup postgresql://user:pass@host:5432/db",
|
|
26519
|
+
" postgresai checkup postgresql://user:pass@host:5432/db --check-id A003",
|
|
26520
|
+
" postgresai checkup postgresql://user:pass@host:5432/db --output ./reports",
|
|
26521
|
+
" postgresai checkup postgresql://user:pass@host:5432/db --project my_project",
|
|
26522
|
+
" postgresai set-default-project my_project",
|
|
26523
|
+
" postgresai checkup postgresql://user:pass@host:5432/db",
|
|
26524
|
+
" postgresai checkup postgresql://user:pass@host:5432/db --no-upload --json"
|
|
26525
|
+
].join(`
|
|
26526
|
+
`)).action(async (conn, opts, cmd) => {
|
|
26527
|
+
if (!conn) {
|
|
26528
|
+
cmd.outputHelp();
|
|
26529
|
+
process.exitCode = 1;
|
|
26530
|
+
return;
|
|
26531
|
+
}
|
|
26532
|
+
const shouldPrintJson = !!opts.json;
|
|
26533
|
+
const uploadExplicitlyRequested = opts.upload === true;
|
|
26534
|
+
const uploadExplicitlyDisabled = opts.upload === false || shouldPrintJson;
|
|
26535
|
+
let shouldUpload = !uploadExplicitlyDisabled;
|
|
26536
|
+
const outputPath = prepareOutputDirectory(opts.output);
|
|
26537
|
+
if (outputPath === null) {
|
|
26538
|
+
process.exitCode = 1;
|
|
26539
|
+
return;
|
|
26540
|
+
}
|
|
26541
|
+
const rootOpts = program2.opts();
|
|
26542
|
+
const uploadResult = prepareUploadConfig(opts, rootOpts, shouldUpload, uploadExplicitlyRequested);
|
|
26543
|
+
if (uploadResult === null) {
|
|
26544
|
+
process.exitCode = 1;
|
|
26545
|
+
return;
|
|
26546
|
+
}
|
|
26547
|
+
const uploadCfg = uploadResult?.config;
|
|
26548
|
+
const projectWasGenerated = uploadResult?.projectWasGenerated ?? false;
|
|
26549
|
+
shouldUpload = !!uploadCfg;
|
|
26550
|
+
const adminConn = resolveAdminConnection({
|
|
26551
|
+
conn,
|
|
26552
|
+
envPassword: process.env.PGPASSWORD
|
|
26553
|
+
});
|
|
26554
|
+
let client;
|
|
26555
|
+
const spinnerEnabled = !!process.stdout.isTTY && shouldUpload;
|
|
26556
|
+
const spinner = createTtySpinner(spinnerEnabled, "Connecting to Postgres");
|
|
26557
|
+
try {
|
|
26558
|
+
spinner.update("Connecting to Postgres");
|
|
26559
|
+
const connResult = await connectWithSslFallback(Client, adminConn);
|
|
26560
|
+
client = connResult.client;
|
|
26561
|
+
let reports;
|
|
26562
|
+
if (opts.checkId === "ALL") {
|
|
26563
|
+
reports = await generateAllReports(client, opts.nodeName, (p) => {
|
|
26564
|
+
spinner.update(`Running ${p.checkId}: ${p.checkTitle} (${p.index}/${p.total})`);
|
|
26565
|
+
});
|
|
26566
|
+
} else {
|
|
26567
|
+
const checkId = opts.checkId.toUpperCase();
|
|
26568
|
+
const generator = REPORT_GENERATORS[checkId];
|
|
26569
|
+
if (!generator) {
|
|
26570
|
+
spinner.stop();
|
|
26571
|
+
console.error(`Unknown check ID: ${opts.checkId}`);
|
|
26572
|
+
console.error(`Available: ${Object.keys(CHECK_INFO).join(", ")}, ALL`);
|
|
26573
|
+
process.exitCode = 1;
|
|
26574
|
+
return;
|
|
26575
|
+
}
|
|
26576
|
+
spinner.update(`Running ${checkId}: ${CHECK_INFO[checkId] || checkId}`);
|
|
26577
|
+
reports = { [checkId]: await generator(client, opts.nodeName) };
|
|
26578
|
+
}
|
|
26579
|
+
let uploadSummary;
|
|
26580
|
+
if (uploadCfg) {
|
|
26581
|
+
const logUpload = (msg) => {
|
|
26582
|
+
(shouldPrintJson ? console.error : console.log)(msg);
|
|
26583
|
+
};
|
|
26584
|
+
uploadSummary = await uploadCheckupReports(uploadCfg, reports, spinner, logUpload);
|
|
26585
|
+
}
|
|
26586
|
+
spinner.stop();
|
|
26587
|
+
if (outputPath) {
|
|
26588
|
+
writeReportFiles(reports, outputPath);
|
|
26589
|
+
}
|
|
26590
|
+
if (uploadSummary) {
|
|
26591
|
+
printUploadSummary(uploadSummary, projectWasGenerated, shouldPrintJson);
|
|
26592
|
+
}
|
|
26593
|
+
if (shouldPrintJson || !shouldUpload && !opts.output) {
|
|
26594
|
+
console.log(JSON.stringify(reports, null, 2));
|
|
26595
|
+
}
|
|
26596
|
+
} catch (error2) {
|
|
26597
|
+
if (error2 instanceof RpcError) {
|
|
26598
|
+
for (const line of formatRpcErrorForDisplay(error2)) {
|
|
26599
|
+
console.error(line);
|
|
26600
|
+
}
|
|
26601
|
+
} else {
|
|
26602
|
+
const message = error2 instanceof Error ? error2.message : String(error2);
|
|
26603
|
+
console.error(`Error: ${message}`);
|
|
26604
|
+
}
|
|
26605
|
+
process.exitCode = 1;
|
|
26606
|
+
} finally {
|
|
26607
|
+
spinner.stop();
|
|
26608
|
+
if (client) {
|
|
26609
|
+
await client.end();
|
|
26610
|
+
}
|
|
26611
|
+
}
|
|
26612
|
+
});
|
|
24666
26613
|
function resolvePaths() {
|
|
24667
26614
|
const startDir = process.cwd();
|
|
24668
26615
|
let currentDir = startDir;
|
|
24669
26616
|
while (true) {
|
|
24670
|
-
const composeFile =
|
|
24671
|
-
if (
|
|
24672
|
-
const instancesFile =
|
|
24673
|
-
return { fs:
|
|
26617
|
+
const composeFile = path5.resolve(currentDir, "docker-compose.yml");
|
|
26618
|
+
if (fs5.existsSync(composeFile)) {
|
|
26619
|
+
const instancesFile = path5.resolve(currentDir, "instances.yml");
|
|
26620
|
+
return { fs: fs5, path: path5, projectDir: currentDir, composeFile, instancesFile };
|
|
24674
26621
|
}
|
|
24675
|
-
const parentDir =
|
|
26622
|
+
const parentDir = path5.dirname(currentDir);
|
|
24676
26623
|
if (parentDir === currentDir)
|
|
24677
26624
|
break;
|
|
24678
26625
|
currentDir = parentDir;
|
|
@@ -24738,12 +26685,12 @@ async function runCompose(args) {
|
|
|
24738
26685
|
return 1;
|
|
24739
26686
|
}
|
|
24740
26687
|
const env = { ...process.env };
|
|
24741
|
-
const cfgPath =
|
|
24742
|
-
if (
|
|
26688
|
+
const cfgPath = path5.resolve(projectDir, ".pgwatch-config");
|
|
26689
|
+
if (fs5.existsSync(cfgPath)) {
|
|
24743
26690
|
try {
|
|
24744
|
-
const stats =
|
|
26691
|
+
const stats = fs5.statSync(cfgPath);
|
|
24745
26692
|
if (!stats.isDirectory()) {
|
|
24746
|
-
const content =
|
|
26693
|
+
const content = fs5.readFileSync(cfgPath, "utf8");
|
|
24747
26694
|
const match = content.match(/^grafana_password=([^\r\n]+)/m);
|
|
24748
26695
|
if (match) {
|
|
24749
26696
|
env.GF_SECURITY_ADMIN_PASSWORD = match[1].trim();
|
|
@@ -24751,13 +26698,13 @@ async function runCompose(args) {
|
|
|
24751
26698
|
}
|
|
24752
26699
|
} catch (err) {}
|
|
24753
26700
|
}
|
|
24754
|
-
return new Promise((
|
|
26701
|
+
return new Promise((resolve6) => {
|
|
24755
26702
|
const child = spawn2(cmd[0], [...cmd.slice(1), "-f", composeFile, ...args], {
|
|
24756
26703
|
stdio: "inherit",
|
|
24757
26704
|
env,
|
|
24758
26705
|
cwd: projectDir
|
|
24759
26706
|
});
|
|
24760
|
-
child.on("close", (code) =>
|
|
26707
|
+
child.on("close", (code) => resolve6(code || 0));
|
|
24761
26708
|
});
|
|
24762
26709
|
}
|
|
24763
26710
|
program2.command("help", { isDefault: true }).description("show help").action(() => {
|
|
@@ -24775,17 +26722,17 @@ mon.command("local-install").description("install local monitoring stack (genera
|
|
|
24775
26722
|
const { projectDir } = await resolveOrInitPaths();
|
|
24776
26723
|
console.log(`Project directory: ${projectDir}
|
|
24777
26724
|
`);
|
|
24778
|
-
const envFile =
|
|
26725
|
+
const envFile = path5.resolve(projectDir, ".env");
|
|
24779
26726
|
const imageTag = opts.tag || package_default.version;
|
|
24780
26727
|
const envLines = [`PGAI_TAG=${imageTag}`];
|
|
24781
|
-
if (
|
|
24782
|
-
const existingEnv =
|
|
26728
|
+
if (fs5.existsSync(envFile)) {
|
|
26729
|
+
const existingEnv = fs5.readFileSync(envFile, "utf8");
|
|
24783
26730
|
const pwdMatch = existingEnv.match(/^GF_SECURITY_ADMIN_PASSWORD=(.+)$/m);
|
|
24784
26731
|
if (pwdMatch) {
|
|
24785
26732
|
envLines.push(`GF_SECURITY_ADMIN_PASSWORD=${pwdMatch[1]}`);
|
|
24786
26733
|
}
|
|
24787
26734
|
}
|
|
24788
|
-
|
|
26735
|
+
fs5.writeFileSync(envFile, envLines.join(`
|
|
24789
26736
|
`) + `
|
|
24790
26737
|
`, { encoding: "utf8", mode: 384 });
|
|
24791
26738
|
if (opts.tag) {
|
|
@@ -24821,7 +26768,7 @@ Use demo mode without API key: postgres-ai mon local-install --demo`);
|
|
|
24821
26768
|
if (opts.apiKey) {
|
|
24822
26769
|
console.log("Using API key provided via --api-key parameter");
|
|
24823
26770
|
writeConfig({ apiKey: opts.apiKey });
|
|
24824
|
-
|
|
26771
|
+
fs5.writeFileSync(path5.resolve(projectDir, ".pgwatch-config"), `api_key=${opts.apiKey}
|
|
24825
26772
|
`, {
|
|
24826
26773
|
encoding: "utf8",
|
|
24827
26774
|
mode: 384
|
|
@@ -24842,7 +26789,7 @@ Use demo mode without API key: postgres-ai mon local-install --demo`);
|
|
|
24842
26789
|
const trimmedKey = inputApiKey.trim();
|
|
24843
26790
|
if (trimmedKey) {
|
|
24844
26791
|
writeConfig({ apiKey: trimmedKey });
|
|
24845
|
-
|
|
26792
|
+
fs5.writeFileSync(path5.resolve(projectDir, ".pgwatch-config"), `api_key=${trimmedKey}
|
|
24846
26793
|
`, {
|
|
24847
26794
|
encoding: "utf8",
|
|
24848
26795
|
mode: 384
|
|
@@ -24879,7 +26826,7 @@ Use demo mode without API key: postgres-ai mon local-install --demo`);
|
|
|
24879
26826
|
# Add your instances using: postgres-ai mon targets add
|
|
24880
26827
|
|
|
24881
26828
|
`;
|
|
24882
|
-
|
|
26829
|
+
fs5.writeFileSync(instancesPath, emptyInstancesContent, "utf8");
|
|
24883
26830
|
console.log(`Instances file: ${instancesPath}`);
|
|
24884
26831
|
console.log(`Project directory: ${projectDir2}
|
|
24885
26832
|
`);
|
|
@@ -24911,7 +26858,7 @@ Use demo mode without API key: postgres-ai mon local-install --demo`);
|
|
|
24911
26858
|
node_name: ${instanceName}
|
|
24912
26859
|
sink_type: ~sink_type~
|
|
24913
26860
|
`;
|
|
24914
|
-
|
|
26861
|
+
fs5.appendFileSync(instancesPath, body, "utf8");
|
|
24915
26862
|
console.log(`\u2713 Monitoring target '${instanceName}' added
|
|
24916
26863
|
`);
|
|
24917
26864
|
console.log("Testing connection to the added instance...");
|
|
@@ -24966,7 +26913,7 @@ You can provide either:`);
|
|
|
24966
26913
|
node_name: ${instanceName}
|
|
24967
26914
|
sink_type: ~sink_type~
|
|
24968
26915
|
`;
|
|
24969
|
-
|
|
26916
|
+
fs5.appendFileSync(instancesPath, body, "utf8");
|
|
24970
26917
|
console.log(`\u2713 Monitoring target '${instanceName}' added
|
|
24971
26918
|
`);
|
|
24972
26919
|
console.log("Testing connection to the added instance...");
|
|
@@ -25006,13 +26953,13 @@ You can provide either:`);
|
|
|
25006
26953
|
console.log(`\u2713 Configuration updated
|
|
25007
26954
|
`);
|
|
25008
26955
|
console.log(opts.demo ? "Step 4: Configuring Grafana security..." : "Step 4: Configuring Grafana security...");
|
|
25009
|
-
const cfgPath =
|
|
26956
|
+
const cfgPath = path5.resolve(projectDir, ".pgwatch-config");
|
|
25010
26957
|
let grafanaPassword = "";
|
|
25011
26958
|
try {
|
|
25012
|
-
if (
|
|
25013
|
-
const stats =
|
|
26959
|
+
if (fs5.existsSync(cfgPath)) {
|
|
26960
|
+
const stats = fs5.statSync(cfgPath);
|
|
25014
26961
|
if (!stats.isDirectory()) {
|
|
25015
|
-
const content =
|
|
26962
|
+
const content = fs5.readFileSync(cfgPath, "utf8");
|
|
25016
26963
|
const match = content.match(/^grafana_password=([^\r\n]+)/m);
|
|
25017
26964
|
if (match) {
|
|
25018
26965
|
grafanaPassword = match[1].trim();
|
|
@@ -25025,15 +26972,15 @@ You can provide either:`);
|
|
|
25025
26972
|
'`);
|
|
25026
26973
|
grafanaPassword = password.trim();
|
|
25027
26974
|
let configContent = "";
|
|
25028
|
-
if (
|
|
25029
|
-
const stats =
|
|
26975
|
+
if (fs5.existsSync(cfgPath)) {
|
|
26976
|
+
const stats = fs5.statSync(cfgPath);
|
|
25030
26977
|
if (!stats.isDirectory()) {
|
|
25031
|
-
configContent =
|
|
26978
|
+
configContent = fs5.readFileSync(cfgPath, "utf8");
|
|
25032
26979
|
}
|
|
25033
26980
|
}
|
|
25034
26981
|
const lines = configContent.split(/\r?\n/).filter((l) => !/^grafana_password=/.test(l));
|
|
25035
26982
|
lines.push(`grafana_password=${grafanaPassword}`);
|
|
25036
|
-
|
|
26983
|
+
fs5.writeFileSync(cfgPath, lines.filter(Boolean).join(`
|
|
25037
26984
|
`) + `
|
|
25038
26985
|
`, "utf8");
|
|
25039
26986
|
}
|
|
@@ -25147,7 +27094,7 @@ mon.command("health").description("health check for monitoring services").option
|
|
|
25147
27094
|
if (attempt > 1) {
|
|
25148
27095
|
console.log(`Retrying (attempt ${attempt}/${maxAttempts})...
|
|
25149
27096
|
`);
|
|
25150
|
-
await new Promise((
|
|
27097
|
+
await new Promise((resolve6) => setTimeout(resolve6, 5000));
|
|
25151
27098
|
}
|
|
25152
27099
|
allHealthy = true;
|
|
25153
27100
|
for (const service of services) {
|
|
@@ -25195,11 +27142,11 @@ mon.command("config").description("show monitoring services configuration").acti
|
|
|
25195
27142
|
console.log(`Project Directory: ${projectDir}`);
|
|
25196
27143
|
console.log(`Docker Compose File: ${composeFile}`);
|
|
25197
27144
|
console.log(`Instances File: ${instancesFile}`);
|
|
25198
|
-
if (
|
|
27145
|
+
if (fs5.existsSync(instancesFile)) {
|
|
25199
27146
|
console.log(`
|
|
25200
27147
|
Instances configuration:
|
|
25201
27148
|
`);
|
|
25202
|
-
const text =
|
|
27149
|
+
const text = fs5.readFileSync(instancesFile, "utf8");
|
|
25203
27150
|
process.stdout.write(text);
|
|
25204
27151
|
if (!/\n$/.test(text))
|
|
25205
27152
|
console.log();
|
|
@@ -25214,8 +27161,8 @@ mon.command("update").description("update monitoring stack").action(async () =>
|
|
|
25214
27161
|
console.log(`Updating PostgresAI monitoring stack...
|
|
25215
27162
|
`);
|
|
25216
27163
|
try {
|
|
25217
|
-
const gitDir =
|
|
25218
|
-
if (!
|
|
27164
|
+
const gitDir = path5.resolve(process.cwd(), ".git");
|
|
27165
|
+
if (!fs5.existsSync(gitDir)) {
|
|
25219
27166
|
console.error("Not a git repository. Cannot update.");
|
|
25220
27167
|
process.exitCode = 1;
|
|
25221
27168
|
return;
|
|
@@ -25341,13 +27288,13 @@ mon.command("check").description("monitoring services system readiness check").a
|
|
|
25341
27288
|
var targets = mon.command("targets").description("manage databases to monitor");
|
|
25342
27289
|
targets.command("list").description("list monitoring target databases").action(async () => {
|
|
25343
27290
|
const { instancesFile: instancesPath, projectDir } = await resolveOrInitPaths();
|
|
25344
|
-
if (!
|
|
27291
|
+
if (!fs5.existsSync(instancesPath)) {
|
|
25345
27292
|
console.error(`instances.yml not found in ${projectDir}`);
|
|
25346
27293
|
process.exitCode = 1;
|
|
25347
27294
|
return;
|
|
25348
27295
|
}
|
|
25349
27296
|
try {
|
|
25350
|
-
const content =
|
|
27297
|
+
const content = fs5.readFileSync(instancesPath, "utf8");
|
|
25351
27298
|
const instances = load(content);
|
|
25352
27299
|
if (!instances || !Array.isArray(instances) || instances.length === 0) {
|
|
25353
27300
|
console.log("No monitoring targets configured");
|
|
@@ -25396,8 +27343,8 @@ targets.command("add [connStr] [name]").description("add monitoring target datab
|
|
|
25396
27343
|
const db = m[5];
|
|
25397
27344
|
const instanceName = name && name.trim() ? name.trim() : `${host}-${db}`.replace(/[^a-zA-Z0-9-]/g, "-");
|
|
25398
27345
|
try {
|
|
25399
|
-
if (
|
|
25400
|
-
const content2 =
|
|
27346
|
+
if (fs5.existsSync(file)) {
|
|
27347
|
+
const content2 = fs5.readFileSync(file, "utf8");
|
|
25401
27348
|
const instances = load(content2) || [];
|
|
25402
27349
|
if (Array.isArray(instances)) {
|
|
25403
27350
|
const exists = instances.some((inst) => inst.name === instanceName);
|
|
@@ -25409,7 +27356,7 @@ targets.command("add [connStr] [name]").description("add monitoring target datab
|
|
|
25409
27356
|
}
|
|
25410
27357
|
}
|
|
25411
27358
|
} catch (err) {
|
|
25412
|
-
const content2 =
|
|
27359
|
+
const content2 = fs5.existsSync(file) ? fs5.readFileSync(file, "utf8") : "";
|
|
25413
27360
|
if (new RegExp(`^- name: ${instanceName}$`, "m").test(content2)) {
|
|
25414
27361
|
console.error(`Monitoring target '${instanceName}' already exists`);
|
|
25415
27362
|
process.exitCode = 1;
|
|
@@ -25428,20 +27375,20 @@ targets.command("add [connStr] [name]").description("add monitoring target datab
|
|
|
25428
27375
|
node_name: ${instanceName}
|
|
25429
27376
|
sink_type: ~sink_type~
|
|
25430
27377
|
`;
|
|
25431
|
-
const content =
|
|
25432
|
-
|
|
27378
|
+
const content = fs5.existsSync(file) ? fs5.readFileSync(file, "utf8") : "";
|
|
27379
|
+
fs5.appendFileSync(file, (content && !/\n$/.test(content) ? `
|
|
25433
27380
|
` : "") + body, "utf8");
|
|
25434
27381
|
console.log(`Monitoring target '${instanceName}' added`);
|
|
25435
27382
|
});
|
|
25436
27383
|
targets.command("remove <name>").description("remove monitoring target database").action(async (name) => {
|
|
25437
27384
|
const { instancesFile: file } = await resolveOrInitPaths();
|
|
25438
|
-
if (!
|
|
27385
|
+
if (!fs5.existsSync(file)) {
|
|
25439
27386
|
console.error("instances.yml not found");
|
|
25440
27387
|
process.exitCode = 1;
|
|
25441
27388
|
return;
|
|
25442
27389
|
}
|
|
25443
27390
|
try {
|
|
25444
|
-
const content =
|
|
27391
|
+
const content = fs5.readFileSync(file, "utf8");
|
|
25445
27392
|
const instances = load(content);
|
|
25446
27393
|
if (!instances || !Array.isArray(instances)) {
|
|
25447
27394
|
console.error("Invalid instances.yml format");
|
|
@@ -25454,7 +27401,7 @@ targets.command("remove <name>").description("remove monitoring target database"
|
|
|
25454
27401
|
process.exitCode = 1;
|
|
25455
27402
|
return;
|
|
25456
27403
|
}
|
|
25457
|
-
|
|
27404
|
+
fs5.writeFileSync(file, dump(filtered), "utf8");
|
|
25458
27405
|
console.log(`Monitoring target '${name}' removed`);
|
|
25459
27406
|
} catch (err) {
|
|
25460
27407
|
const message = err instanceof Error ? err.message : String(err);
|
|
@@ -25464,13 +27411,13 @@ targets.command("remove <name>").description("remove monitoring target database"
|
|
|
25464
27411
|
});
|
|
25465
27412
|
targets.command("test <name>").description("test monitoring target database connectivity").action(async (name) => {
|
|
25466
27413
|
const { instancesFile: instancesPath } = await resolveOrInitPaths();
|
|
25467
|
-
if (!
|
|
27414
|
+
if (!fs5.existsSync(instancesPath)) {
|
|
25468
27415
|
console.error("instances.yml not found");
|
|
25469
27416
|
process.exitCode = 1;
|
|
25470
27417
|
return;
|
|
25471
27418
|
}
|
|
25472
27419
|
try {
|
|
25473
|
-
const content =
|
|
27420
|
+
const content = fs5.readFileSync(instancesPath, "utf8");
|
|
25474
27421
|
const instances = load(content);
|
|
25475
27422
|
if (!instances || !Array.isArray(instances)) {
|
|
25476
27423
|
console.error("Invalid instances.yml format");
|
|
@@ -25513,8 +27460,15 @@ auth.command("login", { isDefault: true }).description("authenticate via browser
|
|
|
25513
27460
|
process.exitCode = 1;
|
|
25514
27461
|
return;
|
|
25515
27462
|
}
|
|
27463
|
+
const existingConfig = readConfig();
|
|
27464
|
+
const existingProject = existingConfig.defaultProject;
|
|
25516
27465
|
writeConfig({ apiKey: trimmedKey });
|
|
27466
|
+
deleteConfigKeys(["orgId"]);
|
|
25517
27467
|
console.log(`API key saved to ${getConfigPath()}`);
|
|
27468
|
+
if (existingProject) {
|
|
27469
|
+
console.log(`Note: Your default project "${existingProject}" has been preserved.`);
|
|
27470
|
+
console.log(` If this key belongs to a different account, use --project to specify a new one.`);
|
|
27471
|
+
}
|
|
25518
27472
|
return;
|
|
25519
27473
|
}
|
|
25520
27474
|
console.log(`Starting authentication flow...
|
|
@@ -25531,9 +27485,8 @@ auth.command("login", { isDefault: true }).description("authenticate via browser
|
|
|
25531
27485
|
console.log("Starting local callback server...");
|
|
25532
27486
|
const requestedPort = opts.port || 0;
|
|
25533
27487
|
const callbackServer = createCallbackServer(requestedPort, params.state, 120000);
|
|
25534
|
-
|
|
25535
|
-
const
|
|
25536
|
-
const redirectUri = `http://localhost:${actualPort}/callback`;
|
|
27488
|
+
const actualPort = await callbackServer.ready;
|
|
27489
|
+
const redirectUri = `http://127.0.0.1:${actualPort}/callback`;
|
|
25537
27490
|
console.log(`Callback server listening on port ${actualPort}`);
|
|
25538
27491
|
console.log("Initializing authentication session...");
|
|
25539
27492
|
const initData = JSON.stringify({
|
|
@@ -25651,15 +27604,28 @@ Please verify the --api-base-url parameter.`);
|
|
|
25651
27604
|
const result = JSON.parse(exchangeBody);
|
|
25652
27605
|
const apiToken = result.api_token || result?.[0]?.result?.api_token;
|
|
25653
27606
|
const orgId = result.org_id || result?.[0]?.result?.org_id;
|
|
27607
|
+
const existingConfig = readConfig();
|
|
27608
|
+
const existingOrgId = existingConfig.orgId;
|
|
27609
|
+
const existingProject = existingConfig.defaultProject;
|
|
27610
|
+
const orgChanged = existingOrgId && existingOrgId !== orgId;
|
|
25654
27611
|
writeConfig({
|
|
25655
27612
|
apiKey: apiToken,
|
|
25656
27613
|
baseUrl: apiBaseUrl,
|
|
25657
27614
|
orgId
|
|
25658
27615
|
});
|
|
27616
|
+
if (orgChanged && existingProject) {
|
|
27617
|
+
deleteConfigKeys(["defaultProject"]);
|
|
27618
|
+
console.log(`
|
|
27619
|
+
Note: Organization changed (${existingOrgId} \u2192 ${orgId}).`);
|
|
27620
|
+
console.log(` Default project "${existingProject}" has been cleared.`);
|
|
27621
|
+
}
|
|
25659
27622
|
console.log(`
|
|
25660
27623
|
Authentication successful!`);
|
|
25661
27624
|
console.log(`API key saved to: ${getConfigPath()}`);
|
|
25662
27625
|
console.log(`Organization ID: ${orgId}`);
|
|
27626
|
+
if (!orgChanged && existingProject) {
|
|
27627
|
+
console.log(`Default project: ${existingProject} (preserved)`);
|
|
27628
|
+
}
|
|
25663
27629
|
console.log(`
|
|
25664
27630
|
You can now use the CLI without specifying an API key.`);
|
|
25665
27631
|
process.exit(0);
|
|
@@ -25704,15 +27670,15 @@ To authenticate, run: pgai auth`);
|
|
|
25704
27670
|
});
|
|
25705
27671
|
auth.command("remove-key").description("remove API key").action(async () => {
|
|
25706
27672
|
const newConfigPath = getConfigPath();
|
|
25707
|
-
const hasNewConfig =
|
|
27673
|
+
const hasNewConfig = fs5.existsSync(newConfigPath);
|
|
25708
27674
|
let legacyPath;
|
|
25709
27675
|
try {
|
|
25710
27676
|
const { projectDir } = await resolveOrInitPaths();
|
|
25711
|
-
legacyPath =
|
|
27677
|
+
legacyPath = path5.resolve(projectDir, ".pgwatch-config");
|
|
25712
27678
|
} catch {
|
|
25713
|
-
legacyPath =
|
|
27679
|
+
legacyPath = path5.resolve(process.cwd(), ".pgwatch-config");
|
|
25714
27680
|
}
|
|
25715
|
-
const hasLegacyConfig =
|
|
27681
|
+
const hasLegacyConfig = fs5.existsSync(legacyPath) && fs5.statSync(legacyPath).isFile();
|
|
25716
27682
|
if (!hasNewConfig && !hasLegacyConfig) {
|
|
25717
27683
|
console.log("No API key configured");
|
|
25718
27684
|
return;
|
|
@@ -25722,11 +27688,11 @@ auth.command("remove-key").description("remove API key").action(async () => {
|
|
|
25722
27688
|
}
|
|
25723
27689
|
if (hasLegacyConfig) {
|
|
25724
27690
|
try {
|
|
25725
|
-
const content =
|
|
27691
|
+
const content = fs5.readFileSync(legacyPath, "utf8");
|
|
25726
27692
|
const filtered = content.split(/\r?\n/).filter((l) => !/^api_key=/.test(l)).join(`
|
|
25727
27693
|
`).replace(/\n+$/g, `
|
|
25728
27694
|
`);
|
|
25729
|
-
|
|
27695
|
+
fs5.writeFileSync(legacyPath, filtered, "utf8");
|
|
25730
27696
|
} catch (err) {
|
|
25731
27697
|
console.warn(`Warning: Could not update legacy config: ${err instanceof Error ? err.message : String(err)}`);
|
|
25732
27698
|
}
|
|
@@ -25737,7 +27703,7 @@ To authenticate again, run: pgai auth`);
|
|
|
25737
27703
|
});
|
|
25738
27704
|
mon.command("generate-grafana-password").description("generate Grafana password for monitoring services").action(async () => {
|
|
25739
27705
|
const { projectDir } = await resolveOrInitPaths();
|
|
25740
|
-
const cfgPath =
|
|
27706
|
+
const cfgPath = path5.resolve(projectDir, ".pgwatch-config");
|
|
25741
27707
|
try {
|
|
25742
27708
|
const { stdout: password } = await execPromise(`openssl rand -base64 12 | tr -d '
|
|
25743
27709
|
'`);
|
|
@@ -25748,17 +27714,17 @@ mon.command("generate-grafana-password").description("generate Grafana password
|
|
|
25748
27714
|
return;
|
|
25749
27715
|
}
|
|
25750
27716
|
let configContent = "";
|
|
25751
|
-
if (
|
|
25752
|
-
const stats =
|
|
27717
|
+
if (fs5.existsSync(cfgPath)) {
|
|
27718
|
+
const stats = fs5.statSync(cfgPath);
|
|
25753
27719
|
if (stats.isDirectory()) {
|
|
25754
27720
|
console.error(".pgwatch-config is a directory, expected a file. Skipping read.");
|
|
25755
27721
|
} else {
|
|
25756
|
-
configContent =
|
|
27722
|
+
configContent = fs5.readFileSync(cfgPath, "utf8");
|
|
25757
27723
|
}
|
|
25758
27724
|
}
|
|
25759
27725
|
const lines = configContent.split(/\r?\n/).filter((l) => !/^grafana_password=/.test(l));
|
|
25760
27726
|
lines.push(`grafana_password=${newPassword}`);
|
|
25761
|
-
|
|
27727
|
+
fs5.writeFileSync(cfgPath, lines.filter(Boolean).join(`
|
|
25762
27728
|
`) + `
|
|
25763
27729
|
`, "utf8");
|
|
25764
27730
|
console.log("\u2713 New Grafana password generated and saved");
|
|
@@ -25780,19 +27746,19 @@ Note: This command requires 'openssl' to be installed`);
|
|
|
25780
27746
|
});
|
|
25781
27747
|
mon.command("show-grafana-credentials").description("show Grafana credentials for monitoring services").action(async () => {
|
|
25782
27748
|
const { projectDir } = await resolveOrInitPaths();
|
|
25783
|
-
const cfgPath =
|
|
25784
|
-
if (!
|
|
27749
|
+
const cfgPath = path5.resolve(projectDir, ".pgwatch-config");
|
|
27750
|
+
if (!fs5.existsSync(cfgPath)) {
|
|
25785
27751
|
console.error("Configuration file not found. Run 'postgres-ai mon local-install' first.");
|
|
25786
27752
|
process.exitCode = 1;
|
|
25787
27753
|
return;
|
|
25788
27754
|
}
|
|
25789
|
-
const stats =
|
|
27755
|
+
const stats = fs5.statSync(cfgPath);
|
|
25790
27756
|
if (stats.isDirectory()) {
|
|
25791
27757
|
console.error(".pgwatch-config is a directory, expected a file. Cannot read credentials.");
|
|
25792
27758
|
process.exitCode = 1;
|
|
25793
27759
|
return;
|
|
25794
27760
|
}
|
|
25795
|
-
const content =
|
|
27761
|
+
const content = fs5.readFileSync(cfgPath, "utf8");
|
|
25796
27762
|
const lines = content.split(/\r?\n/);
|
|
25797
27763
|
let password = "";
|
|
25798
27764
|
for (const line of lines) {
|
|
@@ -25976,29 +27942,29 @@ mcp.command("install [client]").description("install MCP server configuration fo
|
|
|
25976
27942
|
let configDir;
|
|
25977
27943
|
switch (client) {
|
|
25978
27944
|
case "cursor":
|
|
25979
|
-
configPath =
|
|
25980
|
-
configDir =
|
|
27945
|
+
configPath = path5.join(homeDir, ".cursor", "mcp.json");
|
|
27946
|
+
configDir = path5.dirname(configPath);
|
|
25981
27947
|
break;
|
|
25982
27948
|
case "windsurf":
|
|
25983
|
-
configPath =
|
|
25984
|
-
configDir =
|
|
27949
|
+
configPath = path5.join(homeDir, ".windsurf", "mcp.json");
|
|
27950
|
+
configDir = path5.dirname(configPath);
|
|
25985
27951
|
break;
|
|
25986
27952
|
case "codex":
|
|
25987
|
-
configPath =
|
|
25988
|
-
configDir =
|
|
27953
|
+
configPath = path5.join(homeDir, ".codex", "mcp.json");
|
|
27954
|
+
configDir = path5.dirname(configPath);
|
|
25989
27955
|
break;
|
|
25990
27956
|
default:
|
|
25991
27957
|
console.error(`Configuration not implemented for: ${client}`);
|
|
25992
27958
|
process.exitCode = 1;
|
|
25993
27959
|
return;
|
|
25994
27960
|
}
|
|
25995
|
-
if (!
|
|
25996
|
-
|
|
27961
|
+
if (!fs5.existsSync(configDir)) {
|
|
27962
|
+
fs5.mkdirSync(configDir, { recursive: true });
|
|
25997
27963
|
}
|
|
25998
27964
|
let config2 = { mcpServers: {} };
|
|
25999
|
-
if (
|
|
27965
|
+
if (fs5.existsSync(configPath)) {
|
|
26000
27966
|
try {
|
|
26001
|
-
const content =
|
|
27967
|
+
const content = fs5.readFileSync(configPath, "utf8");
|
|
26002
27968
|
config2 = JSON.parse(content);
|
|
26003
27969
|
if (!config2.mcpServers) {
|
|
26004
27970
|
config2.mcpServers = {};
|
|
@@ -26011,7 +27977,7 @@ mcp.command("install [client]").description("install MCP server configuration fo
|
|
|
26011
27977
|
command: pgaiPath,
|
|
26012
27978
|
args: ["mcp", "start"]
|
|
26013
27979
|
};
|
|
26014
|
-
|
|
27980
|
+
fs5.writeFileSync(configPath, JSON.stringify(config2, null, 2), "utf8");
|
|
26015
27981
|
console.log(`\u2713 PostgresAI MCP server configured for ${client}`);
|
|
26016
27982
|
console.log(` Config file: ${configPath}`);
|
|
26017
27983
|
console.log("");
|