postgresai 0.14.0-dev.53 → 0.14.0-dev.55
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +65 -38
- package/bin/postgres-ai.ts +461 -12
- package/bun.lock +3 -1
- package/bunfig.toml +19 -0
- package/dist/bin/postgres-ai.js +2208 -224
- package/lib/auth-server.ts +52 -5
- package/lib/checkup-api.ts +386 -0
- package/lib/checkup.ts +1327 -0
- package/lib/config.ts +3 -0
- package/lib/issues.ts +5 -41
- package/lib/metrics-embedded.ts +79 -0
- package/lib/metrics-loader.ts +127 -0
- package/lib/util.ts +61 -0
- package/package.json +14 -6
- package/packages/postgres-ai/README.md +26 -0
- package/packages/postgres-ai/bin/postgres-ai.js +27 -0
- package/packages/postgres-ai/package.json +27 -0
- package/scripts/embed-metrics.ts +154 -0
- package/test/auth.test.ts +258 -0
- package/test/checkup.integration.test.ts +273 -0
- package/test/checkup.test.ts +890 -0
- package/test/init.integration.test.ts +36 -33
- package/test/schema-validation.test.ts +81 -0
- package/test/test-utils.ts +122 -0
- package/dist/sql/01.role.sql +0 -16
- package/dist/sql/02.permissions.sql +0 -37
- package/dist/sql/03.optional_rds.sql +0 -6
- package/dist/sql/04.optional_self_managed.sql +0 -8
- package/dist/sql/05.helpers.sql +0 -415
package/dist/bin/postgres-ai.js
CHANGED
|
@@ -13064,7 +13064,7 @@ var {
|
|
|
13064
13064
|
// package.json
|
|
13065
13065
|
var package_default = {
|
|
13066
13066
|
name: "postgresai",
|
|
13067
|
-
version: "0.14.0-dev.
|
|
13067
|
+
version: "0.14.0-dev.55",
|
|
13068
13068
|
description: "postgres_ai CLI",
|
|
13069
13069
|
license: "Apache-2.0",
|
|
13070
13070
|
private: false,
|
|
@@ -13077,22 +13077,28 @@ var package_default = {
|
|
|
13077
13077
|
url: "https://gitlab.com/postgres-ai/postgres_ai/-/issues"
|
|
13078
13078
|
},
|
|
13079
13079
|
bin: {
|
|
13080
|
-
"postgres-ai": "./dist/bin/postgres-ai.js",
|
|
13081
13080
|
postgresai: "./dist/bin/postgres-ai.js",
|
|
13082
13081
|
pgai: "./dist/bin/postgres-ai.js"
|
|
13083
13082
|
},
|
|
13083
|
+
exports: {
|
|
13084
|
+
".": "./dist/bin/postgres-ai.js",
|
|
13085
|
+
"./cli": "./dist/bin/postgres-ai.js"
|
|
13086
|
+
},
|
|
13084
13087
|
type: "module",
|
|
13085
13088
|
engines: {
|
|
13086
13089
|
node: ">=18"
|
|
13087
13090
|
},
|
|
13088
13091
|
scripts: {
|
|
13089
|
-
|
|
13092
|
+
"embed-metrics": "bun run scripts/embed-metrics.ts",
|
|
13093
|
+
build: `bun run embed-metrics && bun build ./bin/postgres-ai.ts --outdir ./dist/bin --target node && node -e "const fs=require('fs');const f='./dist/bin/postgres-ai.js';fs.writeFileSync(f,fs.readFileSync(f,'utf8').replace('#!/usr/bin/env bun','#!/usr/bin/env node'))"`,
|
|
13090
13094
|
prepublishOnly: "npm run build",
|
|
13091
13095
|
start: "bun ./bin/postgres-ai.ts --help",
|
|
13092
13096
|
"start:node": "node ./dist/bin/postgres-ai.js --help",
|
|
13093
|
-
dev: "bun --watch ./bin/postgres-ai.ts",
|
|
13094
|
-
test: "bun test",
|
|
13095
|
-
|
|
13097
|
+
dev: "bun run embed-metrics && bun --watch ./bin/postgres-ai.ts",
|
|
13098
|
+
test: "bun run embed-metrics && bun test",
|
|
13099
|
+
"test:fast": "bun run embed-metrics && bun test --coverage=false",
|
|
13100
|
+
"test:coverage": "bun run embed-metrics && bun test --coverage && echo 'Coverage report: cli/coverage/lcov-report/index.html'",
|
|
13101
|
+
typecheck: "bun run embed-metrics && bunx tsc --noEmit"
|
|
13096
13102
|
},
|
|
13097
13103
|
dependencies: {
|
|
13098
13104
|
"@modelcontextprotocol/sdk": "^1.20.2",
|
|
@@ -13104,6 +13110,8 @@ var package_default = {
|
|
|
13104
13110
|
"@types/bun": "^1.1.14",
|
|
13105
13111
|
"@types/js-yaml": "^4.0.9",
|
|
13106
13112
|
"@types/pg": "^8.15.6",
|
|
13113
|
+
ajv: "^8.17.1",
|
|
13114
|
+
"ajv-formats": "^3.0.1",
|
|
13107
13115
|
typescript: "^5.3.3"
|
|
13108
13116
|
},
|
|
13109
13117
|
publishConfig: {
|
|
@@ -13129,7 +13137,8 @@ function readConfig() {
|
|
|
13129
13137
|
const config = {
|
|
13130
13138
|
apiKey: null,
|
|
13131
13139
|
baseUrl: null,
|
|
13132
|
-
orgId: null
|
|
13140
|
+
orgId: null,
|
|
13141
|
+
defaultProject: null
|
|
13133
13142
|
};
|
|
13134
13143
|
const userConfigPath = getConfigPath();
|
|
13135
13144
|
if (fs.existsSync(userConfigPath)) {
|
|
@@ -13139,6 +13148,7 @@ function readConfig() {
|
|
|
13139
13148
|
config.apiKey = parsed.apiKey || null;
|
|
13140
13149
|
config.baseUrl = parsed.baseUrl || null;
|
|
13141
13150
|
config.orgId = parsed.orgId || null;
|
|
13151
|
+
config.defaultProject = parsed.defaultProject || null;
|
|
13142
13152
|
return config;
|
|
13143
13153
|
} catch (err) {
|
|
13144
13154
|
const message = err instanceof Error ? err.message : String(err);
|
|
@@ -15858,9 +15868,10 @@ var safeLoadAll = renamed("safeLoadAll", "loadAll");
|
|
|
15858
15868
|
var safeDump = renamed("safeDump", "dump");
|
|
15859
15869
|
|
|
15860
15870
|
// bin/postgres-ai.ts
|
|
15861
|
-
import * as
|
|
15862
|
-
import * as
|
|
15871
|
+
import * as fs5 from "fs";
|
|
15872
|
+
import * as path5 from "path";
|
|
15863
15873
|
import * as os3 from "os";
|
|
15874
|
+
import * as crypto2 from "crypto";
|
|
15864
15875
|
|
|
15865
15876
|
// node_modules/pg/esm/index.mjs
|
|
15866
15877
|
var import_lib = __toESM(require_lib2(), 1);
|
|
@@ -15876,9 +15887,10 @@ var Result = import_lib.default.Result;
|
|
|
15876
15887
|
var TypeOverrides = import_lib.default.TypeOverrides;
|
|
15877
15888
|
var defaults = import_lib.default.defaults;
|
|
15878
15889
|
// package.json
|
|
15890
|
+
var version = "0.14.0-dev.55";
|
|
15879
15891
|
var package_default2 = {
|
|
15880
15892
|
name: "postgresai",
|
|
15881
|
-
version
|
|
15893
|
+
version,
|
|
15882
15894
|
description: "postgres_ai CLI",
|
|
15883
15895
|
license: "Apache-2.0",
|
|
15884
15896
|
private: false,
|
|
@@ -15891,22 +15903,28 @@ var package_default2 = {
|
|
|
15891
15903
|
url: "https://gitlab.com/postgres-ai/postgres_ai/-/issues"
|
|
15892
15904
|
},
|
|
15893
15905
|
bin: {
|
|
15894
|
-
"postgres-ai": "./dist/bin/postgres-ai.js",
|
|
15895
15906
|
postgresai: "./dist/bin/postgres-ai.js",
|
|
15896
15907
|
pgai: "./dist/bin/postgres-ai.js"
|
|
15897
15908
|
},
|
|
15909
|
+
exports: {
|
|
15910
|
+
".": "./dist/bin/postgres-ai.js",
|
|
15911
|
+
"./cli": "./dist/bin/postgres-ai.js"
|
|
15912
|
+
},
|
|
15898
15913
|
type: "module",
|
|
15899
15914
|
engines: {
|
|
15900
15915
|
node: ">=18"
|
|
15901
15916
|
},
|
|
15902
15917
|
scripts: {
|
|
15903
|
-
|
|
15918
|
+
"embed-metrics": "bun run scripts/embed-metrics.ts",
|
|
15919
|
+
build: `bun run embed-metrics && bun build ./bin/postgres-ai.ts --outdir ./dist/bin --target node && node -e "const fs=require('fs');const f='./dist/bin/postgres-ai.js';fs.writeFileSync(f,fs.readFileSync(f,'utf8').replace('#!/usr/bin/env bun','#!/usr/bin/env node'))"`,
|
|
15904
15920
|
prepublishOnly: "npm run build",
|
|
15905
15921
|
start: "bun ./bin/postgres-ai.ts --help",
|
|
15906
15922
|
"start:node": "node ./dist/bin/postgres-ai.js --help",
|
|
15907
|
-
dev: "bun --watch ./bin/postgres-ai.ts",
|
|
15908
|
-
test: "bun test",
|
|
15909
|
-
|
|
15923
|
+
dev: "bun run embed-metrics && bun --watch ./bin/postgres-ai.ts",
|
|
15924
|
+
test: "bun run embed-metrics && bun test",
|
|
15925
|
+
"test:fast": "bun run embed-metrics && bun test --coverage=false",
|
|
15926
|
+
"test:coverage": "bun run embed-metrics && bun test --coverage && echo 'Coverage report: cli/coverage/lcov-report/index.html'",
|
|
15927
|
+
typecheck: "bun run embed-metrics && bunx tsc --noEmit"
|
|
15910
15928
|
},
|
|
15911
15929
|
dependencies: {
|
|
15912
15930
|
"@modelcontextprotocol/sdk": "^1.20.2",
|
|
@@ -15918,6 +15936,8 @@ var package_default2 = {
|
|
|
15918
15936
|
"@types/bun": "^1.1.14",
|
|
15919
15937
|
"@types/js-yaml": "^4.0.9",
|
|
15920
15938
|
"@types/pg": "^8.15.6",
|
|
15939
|
+
ajv: "^8.17.1",
|
|
15940
|
+
"ajv-formats": "^3.0.1",
|
|
15921
15941
|
typescript: "^5.3.3"
|
|
15922
15942
|
},
|
|
15923
15943
|
publishConfig: {
|
|
@@ -15943,7 +15963,8 @@ function readConfig2() {
|
|
|
15943
15963
|
const config = {
|
|
15944
15964
|
apiKey: null,
|
|
15945
15965
|
baseUrl: null,
|
|
15946
|
-
orgId: null
|
|
15966
|
+
orgId: null,
|
|
15967
|
+
defaultProject: null
|
|
15947
15968
|
};
|
|
15948
15969
|
const userConfigPath = getConfigPath2();
|
|
15949
15970
|
if (fs2.existsSync(userConfigPath)) {
|
|
@@ -15953,6 +15974,7 @@ function readConfig2() {
|
|
|
15953
15974
|
config.apiKey = parsed.apiKey || null;
|
|
15954
15975
|
config.baseUrl = parsed.baseUrl || null;
|
|
15955
15976
|
config.orgId = parsed.orgId || null;
|
|
15977
|
+
config.defaultProject = parsed.defaultProject || null;
|
|
15956
15978
|
return config;
|
|
15957
15979
|
} catch (err) {
|
|
15958
15980
|
const message = err instanceof Error ? err.message : String(err);
|
|
@@ -15983,6 +16005,49 @@ function readConfig2() {
|
|
|
15983
16005
|
}
|
|
15984
16006
|
|
|
15985
16007
|
// lib/util.ts
|
|
16008
|
+
var HTTP_STATUS_MESSAGES = {
|
|
16009
|
+
400: "Bad Request",
|
|
16010
|
+
401: "Unauthorized - check your API key",
|
|
16011
|
+
403: "Forbidden - access denied",
|
|
16012
|
+
404: "Not Found",
|
|
16013
|
+
408: "Request Timeout",
|
|
16014
|
+
429: "Too Many Requests - rate limited",
|
|
16015
|
+
500: "Internal Server Error",
|
|
16016
|
+
502: "Bad Gateway - server temporarily unavailable",
|
|
16017
|
+
503: "Service Unavailable - server temporarily unavailable",
|
|
16018
|
+
504: "Gateway Timeout - server temporarily unavailable"
|
|
16019
|
+
};
|
|
16020
|
+
function isHtmlContent(text) {
|
|
16021
|
+
const trimmed = text.trim();
|
|
16022
|
+
return trimmed.startsWith("<!DOCTYPE") || trimmed.startsWith("<html") || trimmed.startsWith("<HTML");
|
|
16023
|
+
}
|
|
16024
|
+
function formatHttpError(operation, status, responseBody) {
|
|
16025
|
+
const statusMessage = HTTP_STATUS_MESSAGES[status] || "Request failed";
|
|
16026
|
+
let errMsg = `${operation}: HTTP ${status} - ${statusMessage}`;
|
|
16027
|
+
if (responseBody) {
|
|
16028
|
+
if (isHtmlContent(responseBody)) {
|
|
16029
|
+
return errMsg;
|
|
16030
|
+
}
|
|
16031
|
+
try {
|
|
16032
|
+
const errObj = JSON.parse(responseBody);
|
|
16033
|
+
const message = errObj.message || errObj.error || errObj.detail;
|
|
16034
|
+
if (message && typeof message === "string") {
|
|
16035
|
+
errMsg += `
|
|
16036
|
+
${message}`;
|
|
16037
|
+
} else {
|
|
16038
|
+
errMsg += `
|
|
16039
|
+
${JSON.stringify(errObj, null, 2)}`;
|
|
16040
|
+
}
|
|
16041
|
+
} catch {
|
|
16042
|
+
const trimmed = responseBody.trim();
|
|
16043
|
+
if (trimmed.length > 0 && trimmed.length < 500) {
|
|
16044
|
+
errMsg += `
|
|
16045
|
+
${trimmed}`;
|
|
16046
|
+
}
|
|
16047
|
+
}
|
|
16048
|
+
}
|
|
16049
|
+
return errMsg;
|
|
16050
|
+
}
|
|
15986
16051
|
function maskSecret(secret) {
|
|
15987
16052
|
if (!secret)
|
|
15988
16053
|
return "";
|
|
@@ -16049,18 +16114,7 @@ async function fetchIssues(params) {
|
|
|
16049
16114
|
throw new Error(`Failed to parse issues response: ${data}`);
|
|
16050
16115
|
}
|
|
16051
16116
|
} else {
|
|
16052
|
-
|
|
16053
|
-
if (data) {
|
|
16054
|
-
try {
|
|
16055
|
-
const errObj = JSON.parse(data);
|
|
16056
|
-
errMsg += `
|
|
16057
|
-
${JSON.stringify(errObj, null, 2)}`;
|
|
16058
|
-
} catch {
|
|
16059
|
-
errMsg += `
|
|
16060
|
-
${data}`;
|
|
16061
|
-
}
|
|
16062
|
-
}
|
|
16063
|
-
throw new Error(errMsg);
|
|
16117
|
+
throw new Error(formatHttpError("Failed to fetch issues", response.status, data));
|
|
16064
16118
|
}
|
|
16065
16119
|
}
|
|
16066
16120
|
async function fetchIssueComments(params) {
|
|
@@ -16101,18 +16155,7 @@ async function fetchIssueComments(params) {
|
|
|
16101
16155
|
throw new Error(`Failed to parse issue comments response: ${data}`);
|
|
16102
16156
|
}
|
|
16103
16157
|
} else {
|
|
16104
|
-
|
|
16105
|
-
if (data) {
|
|
16106
|
-
try {
|
|
16107
|
-
const errObj = JSON.parse(data);
|
|
16108
|
-
errMsg += `
|
|
16109
|
-
${JSON.stringify(errObj, null, 2)}`;
|
|
16110
|
-
} catch {
|
|
16111
|
-
errMsg += `
|
|
16112
|
-
${data}`;
|
|
16113
|
-
}
|
|
16114
|
-
}
|
|
16115
|
-
throw new Error(errMsg);
|
|
16158
|
+
throw new Error(formatHttpError("Failed to fetch issue comments", response.status, data));
|
|
16116
16159
|
}
|
|
16117
16160
|
}
|
|
16118
16161
|
async function fetchIssue(params) {
|
|
@@ -16161,18 +16204,7 @@ async function fetchIssue(params) {
|
|
|
16161
16204
|
throw new Error(`Failed to parse issue response: ${data}`);
|
|
16162
16205
|
}
|
|
16163
16206
|
} else {
|
|
16164
|
-
|
|
16165
|
-
if (data) {
|
|
16166
|
-
try {
|
|
16167
|
-
const errObj = JSON.parse(data);
|
|
16168
|
-
errMsg += `
|
|
16169
|
-
${JSON.stringify(errObj, null, 2)}`;
|
|
16170
|
-
} catch {
|
|
16171
|
-
errMsg += `
|
|
16172
|
-
${data}`;
|
|
16173
|
-
}
|
|
16174
|
-
}
|
|
16175
|
-
throw new Error(errMsg);
|
|
16207
|
+
throw new Error(formatHttpError("Failed to fetch issue", response.status, data));
|
|
16176
16208
|
}
|
|
16177
16209
|
}
|
|
16178
16210
|
async function createIssueComment(params) {
|
|
@@ -16226,18 +16258,7 @@ async function createIssueComment(params) {
|
|
|
16226
16258
|
throw new Error(`Failed to parse create comment response: ${data}`);
|
|
16227
16259
|
}
|
|
16228
16260
|
} else {
|
|
16229
|
-
|
|
16230
|
-
if (data) {
|
|
16231
|
-
try {
|
|
16232
|
-
const errObj = JSON.parse(data);
|
|
16233
|
-
errMsg += `
|
|
16234
|
-
${JSON.stringify(errObj, null, 2)}`;
|
|
16235
|
-
} catch {
|
|
16236
|
-
errMsg += `
|
|
16237
|
-
${data}`;
|
|
16238
|
-
}
|
|
16239
|
-
}
|
|
16240
|
-
throw new Error(errMsg);
|
|
16261
|
+
throw new Error(formatHttpError("Failed to create issue comment", response.status, data));
|
|
16241
16262
|
}
|
|
16242
16263
|
}
|
|
16243
16264
|
|
|
@@ -17104,10 +17125,10 @@ var ksuid = /^[A-Za-z0-9]{27}$/;
|
|
|
17104
17125
|
var nanoid = /^[a-zA-Z0-9_-]{21}$/;
|
|
17105
17126
|
var duration = /^P(?:(\d+W)|(?!.*W)(?=\d|T\d)(\d+Y)?(\d+M)?(\d+D)?(T(?=\d)(\d+H)?(\d+M)?(\d+([.,]\d+)?S)?)?)$/;
|
|
17106
17127
|
var guid = /^([0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12})$/;
|
|
17107
|
-
var uuid = (
|
|
17108
|
-
if (!
|
|
17128
|
+
var uuid = (version2) => {
|
|
17129
|
+
if (!version2)
|
|
17109
17130
|
return /^([0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[1-8][0-9a-fA-F]{3}-[89abAB][0-9a-fA-F]{3}-[0-9a-fA-F]{12}|00000000-0000-0000-0000-000000000000|ffffffff-ffff-ffff-ffff-ffffffffffff)$/;
|
|
17110
|
-
return new RegExp(`^([0-9a-fA-F]{8}-[0-9a-fA-F]{4}-${
|
|
17131
|
+
return new RegExp(`^([0-9a-fA-F]{8}-[0-9a-fA-F]{4}-${version2}[0-9a-fA-F]{3}-[89abAB][0-9a-fA-F]{3}-[0-9a-fA-F]{12})$`);
|
|
17111
17132
|
};
|
|
17112
17133
|
var email = /^(?!\.)(?!.*\.\.)([A-Za-z0-9_'+\-\.]*)[A-Za-z0-9_+-]@([A-Za-z0-9][A-Za-z0-9\-]*\.)+[A-Za-z]{2,}$/;
|
|
17113
17134
|
var _emoji = `^(\\p{Extended_Pictographic}|\\p{Emoji_Component})+$`;
|
|
@@ -17576,7 +17597,7 @@ class Doc {
|
|
|
17576
17597
|
}
|
|
17577
17598
|
|
|
17578
17599
|
// node_modules/zod/v4/core/versions.js
|
|
17579
|
-
var
|
|
17600
|
+
var version2 = {
|
|
17580
17601
|
major: 4,
|
|
17581
17602
|
minor: 2,
|
|
17582
17603
|
patch: 1
|
|
@@ -17588,7 +17609,7 @@ var $ZodType = /* @__PURE__ */ $constructor("$ZodType", (inst, def) => {
|
|
|
17588
17609
|
inst ?? (inst = {});
|
|
17589
17610
|
inst._zod.def = def;
|
|
17590
17611
|
inst._zod.bag = inst._zod.bag || {};
|
|
17591
|
-
inst._zod.version =
|
|
17612
|
+
inst._zod.version = version2;
|
|
17592
17613
|
const checks = [...inst._zod.def.checks ?? []];
|
|
17593
17614
|
if (inst._zod.traits.has("$ZodCheck")) {
|
|
17594
17615
|
checks.unshift(inst);
|
|
@@ -23273,18 +23294,7 @@ async function fetchIssues2(params) {
|
|
|
23273
23294
|
throw new Error(`Failed to parse issues response: ${data}`);
|
|
23274
23295
|
}
|
|
23275
23296
|
} else {
|
|
23276
|
-
|
|
23277
|
-
if (data) {
|
|
23278
|
-
try {
|
|
23279
|
-
const errObj = JSON.parse(data);
|
|
23280
|
-
errMsg += `
|
|
23281
|
-
${JSON.stringify(errObj, null, 2)}`;
|
|
23282
|
-
} catch {
|
|
23283
|
-
errMsg += `
|
|
23284
|
-
${data}`;
|
|
23285
|
-
}
|
|
23286
|
-
}
|
|
23287
|
-
throw new Error(errMsg);
|
|
23297
|
+
throw new Error(formatHttpError("Failed to fetch issues", response.status, data));
|
|
23288
23298
|
}
|
|
23289
23299
|
}
|
|
23290
23300
|
async function fetchIssueComments2(params) {
|
|
@@ -23325,18 +23335,7 @@ async function fetchIssueComments2(params) {
|
|
|
23325
23335
|
throw new Error(`Failed to parse issue comments response: ${data}`);
|
|
23326
23336
|
}
|
|
23327
23337
|
} else {
|
|
23328
|
-
|
|
23329
|
-
if (data) {
|
|
23330
|
-
try {
|
|
23331
|
-
const errObj = JSON.parse(data);
|
|
23332
|
-
errMsg += `
|
|
23333
|
-
${JSON.stringify(errObj, null, 2)}`;
|
|
23334
|
-
} catch {
|
|
23335
|
-
errMsg += `
|
|
23336
|
-
${data}`;
|
|
23337
|
-
}
|
|
23338
|
-
}
|
|
23339
|
-
throw new Error(errMsg);
|
|
23338
|
+
throw new Error(formatHttpError("Failed to fetch issue comments", response.status, data));
|
|
23340
23339
|
}
|
|
23341
23340
|
}
|
|
23342
23341
|
async function fetchIssue2(params) {
|
|
@@ -23385,18 +23384,7 @@ async function fetchIssue2(params) {
|
|
|
23385
23384
|
throw new Error(`Failed to parse issue response: ${data}`);
|
|
23386
23385
|
}
|
|
23387
23386
|
} else {
|
|
23388
|
-
|
|
23389
|
-
if (data) {
|
|
23390
|
-
try {
|
|
23391
|
-
const errObj = JSON.parse(data);
|
|
23392
|
-
errMsg += `
|
|
23393
|
-
${JSON.stringify(errObj, null, 2)}`;
|
|
23394
|
-
} catch {
|
|
23395
|
-
errMsg += `
|
|
23396
|
-
${data}`;
|
|
23397
|
-
}
|
|
23398
|
-
}
|
|
23399
|
-
throw new Error(errMsg);
|
|
23387
|
+
throw new Error(formatHttpError("Failed to fetch issue", response.status, data));
|
|
23400
23388
|
}
|
|
23401
23389
|
}
|
|
23402
23390
|
async function createIssueComment2(params) {
|
|
@@ -23450,18 +23438,7 @@ async function createIssueComment2(params) {
|
|
|
23450
23438
|
throw new Error(`Failed to parse create comment response: ${data}`);
|
|
23451
23439
|
}
|
|
23452
23440
|
} else {
|
|
23453
|
-
|
|
23454
|
-
if (data) {
|
|
23455
|
-
try {
|
|
23456
|
-
const errObj = JSON.parse(data);
|
|
23457
|
-
errMsg += `
|
|
23458
|
-
${JSON.stringify(errObj, null, 2)}`;
|
|
23459
|
-
} catch {
|
|
23460
|
-
errMsg += `
|
|
23461
|
-
${data}`;
|
|
23462
|
-
}
|
|
23463
|
-
}
|
|
23464
|
-
throw new Error(errMsg);
|
|
23441
|
+
throw new Error(formatHttpError("Failed to create issue comment", response.status, data));
|
|
23465
23442
|
}
|
|
23466
23443
|
}
|
|
23467
23444
|
|
|
@@ -24055,20 +24032,32 @@ function createCallbackServer(port = 0, expectedState = null, timeoutMs = 300000
|
|
|
24055
24032
|
let actualPort = port;
|
|
24056
24033
|
let resolveCallback;
|
|
24057
24034
|
let rejectCallback;
|
|
24035
|
+
let resolveReady;
|
|
24036
|
+
let rejectReady;
|
|
24058
24037
|
let serverInstance = null;
|
|
24059
24038
|
const promise2 = new Promise((resolve4, reject) => {
|
|
24060
24039
|
resolveCallback = resolve4;
|
|
24061
24040
|
rejectCallback = reject;
|
|
24062
24041
|
});
|
|
24042
|
+
const ready = new Promise((resolve4, reject) => {
|
|
24043
|
+
resolveReady = resolve4;
|
|
24044
|
+
rejectReady = reject;
|
|
24045
|
+
});
|
|
24046
|
+
let timeoutId = null;
|
|
24063
24047
|
const stopServer = () => {
|
|
24048
|
+
if (timeoutId) {
|
|
24049
|
+
clearTimeout(timeoutId);
|
|
24050
|
+
timeoutId = null;
|
|
24051
|
+
}
|
|
24064
24052
|
if (serverInstance) {
|
|
24065
24053
|
serverInstance.close();
|
|
24066
24054
|
serverInstance = null;
|
|
24067
24055
|
}
|
|
24068
24056
|
};
|
|
24069
|
-
|
|
24057
|
+
timeoutId = setTimeout(() => {
|
|
24070
24058
|
if (!resolved) {
|
|
24071
24059
|
resolved = true;
|
|
24060
|
+
timeoutId = null;
|
|
24072
24061
|
stopServer();
|
|
24073
24062
|
rejectCallback(new Error("Authentication timeout. Please try again."));
|
|
24074
24063
|
}
|
|
@@ -24091,7 +24080,10 @@ function createCallbackServer(port = 0, expectedState = null, timeoutMs = 300000
|
|
|
24091
24080
|
const errorDescription = url.searchParams.get("error_description");
|
|
24092
24081
|
if (error2) {
|
|
24093
24082
|
resolved = true;
|
|
24094
|
-
|
|
24083
|
+
if (timeoutId) {
|
|
24084
|
+
clearTimeout(timeoutId);
|
|
24085
|
+
timeoutId = null;
|
|
24086
|
+
}
|
|
24095
24087
|
setTimeout(() => stopServer(), 100);
|
|
24096
24088
|
rejectCallback(new Error(`OAuth error: ${error2}${errorDescription ? ` - ${errorDescription}` : ""}`));
|
|
24097
24089
|
res.writeHead(400, { "Content-Type": "text/html" });
|
|
@@ -24145,7 +24137,10 @@ function createCallbackServer(port = 0, expectedState = null, timeoutMs = 300000
|
|
|
24145
24137
|
}
|
|
24146
24138
|
if (expectedState && state !== expectedState) {
|
|
24147
24139
|
resolved = true;
|
|
24148
|
-
|
|
24140
|
+
if (timeoutId) {
|
|
24141
|
+
clearTimeout(timeoutId);
|
|
24142
|
+
timeoutId = null;
|
|
24143
|
+
}
|
|
24149
24144
|
setTimeout(() => stopServer(), 100);
|
|
24150
24145
|
rejectCallback(new Error("State mismatch (possible CSRF attack)"));
|
|
24151
24146
|
res.writeHead(400, { "Content-Type": "text/html" });
|
|
@@ -24172,7 +24167,10 @@ function createCallbackServer(port = 0, expectedState = null, timeoutMs = 300000
|
|
|
24172
24167
|
return;
|
|
24173
24168
|
}
|
|
24174
24169
|
resolved = true;
|
|
24175
|
-
|
|
24170
|
+
if (timeoutId) {
|
|
24171
|
+
clearTimeout(timeoutId);
|
|
24172
|
+
timeoutId = null;
|
|
24173
|
+
}
|
|
24176
24174
|
resolveCallback({ code, state });
|
|
24177
24175
|
setTimeout(() => stopServer(), 100);
|
|
24178
24176
|
res.writeHead(200, { "Content-Type": "text/html" });
|
|
@@ -24197,15 +24195,32 @@ function createCallbackServer(port = 0, expectedState = null, timeoutMs = 300000
|
|
|
24197
24195
|
</html>
|
|
24198
24196
|
`);
|
|
24199
24197
|
});
|
|
24198
|
+
serverInstance.on("error", (err) => {
|
|
24199
|
+
if (timeoutId) {
|
|
24200
|
+
clearTimeout(timeoutId);
|
|
24201
|
+
timeoutId = null;
|
|
24202
|
+
}
|
|
24203
|
+
if (err.code === "EADDRINUSE") {
|
|
24204
|
+
rejectReady(new Error(`Port ${port} is already in use`));
|
|
24205
|
+
} else {
|
|
24206
|
+
rejectReady(new Error(`Server error: ${err.message}`));
|
|
24207
|
+
}
|
|
24208
|
+
if (!resolved) {
|
|
24209
|
+
resolved = true;
|
|
24210
|
+
rejectCallback(err);
|
|
24211
|
+
}
|
|
24212
|
+
});
|
|
24200
24213
|
serverInstance.listen(port, "127.0.0.1", () => {
|
|
24201
24214
|
const address = serverInstance?.address();
|
|
24202
24215
|
if (address && typeof address === "object") {
|
|
24203
24216
|
actualPort = address.port;
|
|
24204
24217
|
}
|
|
24218
|
+
resolveReady(actualPort);
|
|
24205
24219
|
});
|
|
24206
24220
|
return {
|
|
24207
24221
|
server: { stop: stopServer },
|
|
24208
24222
|
promise: promise2,
|
|
24223
|
+
ready,
|
|
24209
24224
|
getPort: () => actualPort
|
|
24210
24225
|
};
|
|
24211
24226
|
}
|
|
@@ -24213,6 +24228,1667 @@ function createCallbackServer(port = 0, expectedState = null, timeoutMs = 300000
|
|
|
24213
24228
|
// bin/postgres-ai.ts
|
|
24214
24229
|
import { createInterface } from "readline";
|
|
24215
24230
|
import * as childProcess from "child_process";
|
|
24231
|
+
|
|
24232
|
+
// lib/checkup.ts
|
|
24233
|
+
import * as fs4 from "fs";
|
|
24234
|
+
import * as path4 from "path";
|
|
24235
|
+
|
|
24236
|
+
// lib/metrics-embedded.ts
|
|
24237
|
+
var METRICS = {
|
|
24238
|
+
settings: {
|
|
24239
|
+
description: "This metric collects various PostgreSQL server settings and configurations. It provides insights into the server's configuration, including version, memory settings, and other important parameters. This metric is useful for monitoring server settings and ensuring optimal performance. Note: For lock_timeout and statement_timeout, we use reset_val instead of setting because pgwatch overrides these during metric collection, which would mask the actual configured values.",
|
|
24240
|
+
sqls: {
|
|
24241
|
+
11: `with base as ( /* pgwatch_generated */
|
|
24242
|
+
select
|
|
24243
|
+
name,
|
|
24244
|
+
-- Use reset_val for lock_timeout/statement_timeout because pgwatch overrides them
|
|
24245
|
+
-- during collection (lock_timeout=100ms, statement_timeout per-metric).
|
|
24246
|
+
case
|
|
24247
|
+
when name in ('lock_timeout', 'statement_timeout') then reset_val
|
|
24248
|
+
else setting
|
|
24249
|
+
end as effective_setting,
|
|
24250
|
+
unit,
|
|
24251
|
+
category,
|
|
24252
|
+
vartype,
|
|
24253
|
+
-- For lock_timeout/statement_timeout, compare reset_val with boot_val
|
|
24254
|
+
-- since source becomes 'session' during collection.
|
|
24255
|
+
case
|
|
24256
|
+
when name in ('lock_timeout', 'statement_timeout') then (reset_val = boot_val)
|
|
24257
|
+
else (source = 'default')
|
|
24258
|
+
end as is_default_bool
|
|
24259
|
+
from pg_settings
|
|
24260
|
+
), with_numeric as (
|
|
24261
|
+
select
|
|
24262
|
+
*,
|
|
24263
|
+
case
|
|
24264
|
+
when effective_setting ~ '^-?[0-9]+$' then effective_setting::bigint
|
|
24265
|
+
else null
|
|
24266
|
+
end as numeric_value
|
|
24267
|
+
from base
|
|
24268
|
+
)
|
|
24269
|
+
select
|
|
24270
|
+
(extract(epoch from now()) * 1e9)::int8 as epoch_ns,
|
|
24271
|
+
current_database() as tag_datname,
|
|
24272
|
+
name as tag_setting_name,
|
|
24273
|
+
effective_setting as tag_setting_value,
|
|
24274
|
+
unit as tag_unit,
|
|
24275
|
+
category as tag_category,
|
|
24276
|
+
vartype as tag_vartype,
|
|
24277
|
+
numeric_value,
|
|
24278
|
+
case
|
|
24279
|
+
when numeric_value is null then null
|
|
24280
|
+
when unit = '8kB' then numeric_value * 8192
|
|
24281
|
+
when unit = 'kB' then numeric_value * 1024
|
|
24282
|
+
when unit = 'MB' then numeric_value * 1024 * 1024
|
|
24283
|
+
when unit = 'B' then numeric_value
|
|
24284
|
+
when unit = 'ms' then numeric_value::numeric / 1000
|
|
24285
|
+
when unit = 's' then numeric_value::numeric
|
|
24286
|
+
when unit = 'min' then numeric_value::numeric * 60
|
|
24287
|
+
else null
|
|
24288
|
+
end as setting_normalized,
|
|
24289
|
+
case unit
|
|
24290
|
+
when '8kB' then 'bytes'
|
|
24291
|
+
when 'kB' then 'bytes'
|
|
24292
|
+
when 'MB' then 'bytes'
|
|
24293
|
+
when 'B' then 'bytes'
|
|
24294
|
+
when 'ms' then 'seconds'
|
|
24295
|
+
when 's' then 'seconds'
|
|
24296
|
+
when 'min' then 'seconds'
|
|
24297
|
+
else null
|
|
24298
|
+
end as unit_normalized,
|
|
24299
|
+
case when is_default_bool then 1 else 0 end as is_default,
|
|
24300
|
+
1 as configured
|
|
24301
|
+
from with_numeric`
|
|
24302
|
+
},
|
|
24303
|
+
gauges: ["*"],
|
|
24304
|
+
statement_timeout_seconds: 15
|
|
24305
|
+
},
|
|
24306
|
+
db_stats: {
|
|
24307
|
+
description: "Retrieves key statistics from the PostgreSQL `pg_stat_database` view, providing insights into the current database's performance. It returns the number of backends, transaction commits and rollbacks, buffer reads and hits, tuple statistics, conflicts, temporary files and bytes, deadlocks, block read and write times, postmaster uptime, backup duration, recovery status, system identifier, and invalid indexes. This metric helps administrators monitor database activity and performance.",
|
|
24308
|
+
sqls: {
|
|
24309
|
+
11: `select /* pgwatch_generated */
|
|
24310
|
+
(extract(epoch from now()) * 1e9)::int8 as epoch_ns,
|
|
24311
|
+
current_database() as tag_datname,
|
|
24312
|
+
numbackends,
|
|
24313
|
+
xact_commit,
|
|
24314
|
+
xact_rollback,
|
|
24315
|
+
blks_read,
|
|
24316
|
+
blks_hit,
|
|
24317
|
+
tup_returned,
|
|
24318
|
+
tup_fetched,
|
|
24319
|
+
tup_inserted,
|
|
24320
|
+
tup_updated,
|
|
24321
|
+
tup_deleted,
|
|
24322
|
+
conflicts,
|
|
24323
|
+
temp_files,
|
|
24324
|
+
temp_bytes,
|
|
24325
|
+
deadlocks,
|
|
24326
|
+
blk_read_time,
|
|
24327
|
+
blk_write_time,
|
|
24328
|
+
extract(epoch from (now() - pg_postmaster_start_time()))::int8 as postmaster_uptime_s,
|
|
24329
|
+
case when pg_is_in_recovery() then 1 else 0 end as in_recovery_int,
|
|
24330
|
+
system_identifier::text as tag_sys_id,
|
|
24331
|
+
(select count(*) from pg_index i
|
|
24332
|
+
where not indisvalid
|
|
24333
|
+
and not exists ( /* leave out ones that are being actively rebuilt */
|
|
24334
|
+
select * from pg_locks l
|
|
24335
|
+
join pg_stat_activity a using (pid)
|
|
24336
|
+
where l.relation = i.indexrelid
|
|
24337
|
+
and a.state = 'active'
|
|
24338
|
+
and a.query ~* 'concurrently'
|
|
24339
|
+
)) as invalid_indexes
|
|
24340
|
+
from
|
|
24341
|
+
pg_stat_database, pg_control_system()
|
|
24342
|
+
where
|
|
24343
|
+
datname = current_database()`,
|
|
24344
|
+
12: `select /* pgwatch_generated */
|
|
24345
|
+
(extract(epoch from now()) * 1e9)::int8 as epoch_ns,
|
|
24346
|
+
current_database() as tag_datname,
|
|
24347
|
+
numbackends,
|
|
24348
|
+
xact_commit,
|
|
24349
|
+
xact_rollback,
|
|
24350
|
+
blks_read,
|
|
24351
|
+
blks_hit,
|
|
24352
|
+
tup_returned,
|
|
24353
|
+
tup_fetched,
|
|
24354
|
+
tup_inserted,
|
|
24355
|
+
tup_updated,
|
|
24356
|
+
tup_deleted,
|
|
24357
|
+
conflicts,
|
|
24358
|
+
temp_files,
|
|
24359
|
+
temp_bytes,
|
|
24360
|
+
deadlocks,
|
|
24361
|
+
blk_read_time,
|
|
24362
|
+
blk_write_time,
|
|
24363
|
+
extract(epoch from (now() - pg_postmaster_start_time()))::int8 as postmaster_uptime_s,
|
|
24364
|
+
extract(epoch from (now() - pg_backup_start_time()))::int8 as backup_duration_s,
|
|
24365
|
+
checksum_failures,
|
|
24366
|
+
extract(epoch from (now() - checksum_last_failure))::int8 as checksum_last_failure_s,
|
|
24367
|
+
case when pg_is_in_recovery() then 1 else 0 end as in_recovery_int,
|
|
24368
|
+
system_identifier::text as tag_sys_id,
|
|
24369
|
+
(select count(*) from pg_index i
|
|
24370
|
+
where not indisvalid
|
|
24371
|
+
and not exists ( /* leave out ones that are being actively rebuilt */
|
|
24372
|
+
select * from pg_locks l
|
|
24373
|
+
join pg_stat_activity a using (pid)
|
|
24374
|
+
where l.relation = i.indexrelid
|
|
24375
|
+
and a.state = 'active'
|
|
24376
|
+
and a.query ~* 'concurrently'
|
|
24377
|
+
)) as invalid_indexes
|
|
24378
|
+
from
|
|
24379
|
+
pg_stat_database, pg_control_system()
|
|
24380
|
+
where
|
|
24381
|
+
datname = current_database()`,
|
|
24382
|
+
14: `select /* pgwatch_generated */
|
|
24383
|
+
(extract(epoch from now()) * 1e9)::int8 as epoch_ns,
|
|
24384
|
+
current_database() as tag_datname,
|
|
24385
|
+
numbackends,
|
|
24386
|
+
xact_commit,
|
|
24387
|
+
xact_rollback,
|
|
24388
|
+
blks_read,
|
|
24389
|
+
blks_hit,
|
|
24390
|
+
tup_returned,
|
|
24391
|
+
tup_fetched,
|
|
24392
|
+
tup_inserted,
|
|
24393
|
+
tup_updated,
|
|
24394
|
+
tup_deleted,
|
|
24395
|
+
conflicts,
|
|
24396
|
+
temp_files,
|
|
24397
|
+
temp_bytes,
|
|
24398
|
+
deadlocks,
|
|
24399
|
+
blk_read_time,
|
|
24400
|
+
blk_write_time,
|
|
24401
|
+
extract(epoch from (now() - pg_postmaster_start_time()))::int8 as postmaster_uptime_s,
|
|
24402
|
+
extract(epoch from (now() - pg_backup_start_time()))::int8 as backup_duration_s,
|
|
24403
|
+
checksum_failures,
|
|
24404
|
+
extract(epoch from (now() - checksum_last_failure))::int8 as checksum_last_failure_s,
|
|
24405
|
+
case when pg_is_in_recovery() then 1 else 0 end as in_recovery_int,
|
|
24406
|
+
system_identifier::text as tag_sys_id,
|
|
24407
|
+
session_time::int8,
|
|
24408
|
+
active_time::int8,
|
|
24409
|
+
idle_in_transaction_time::int8,
|
|
24410
|
+
sessions,
|
|
24411
|
+
sessions_abandoned,
|
|
24412
|
+
sessions_fatal,
|
|
24413
|
+
sessions_killed,
|
|
24414
|
+
(select count(*) from pg_index i
|
|
24415
|
+
where not indisvalid
|
|
24416
|
+
and not exists ( /* leave out ones that are being actively rebuilt */
|
|
24417
|
+
select * from pg_locks l
|
|
24418
|
+
join pg_stat_activity a using (pid)
|
|
24419
|
+
where l.relation = i.indexrelid
|
|
24420
|
+
and a.state = 'active'
|
|
24421
|
+
and a.query ~* 'concurrently'
|
|
24422
|
+
)) as invalid_indexes
|
|
24423
|
+
from
|
|
24424
|
+
pg_stat_database, pg_control_system()
|
|
24425
|
+
where
|
|
24426
|
+
datname = current_database()`,
|
|
24427
|
+
15: `select /* pgwatch_generated */
|
|
24428
|
+
(extract(epoch from now()) * 1e9)::int8 as epoch_ns,
|
|
24429
|
+
current_database() as tag_datname,
|
|
24430
|
+
numbackends,
|
|
24431
|
+
xact_commit,
|
|
24432
|
+
xact_rollback,
|
|
24433
|
+
blks_read,
|
|
24434
|
+
blks_hit,
|
|
24435
|
+
tup_returned,
|
|
24436
|
+
tup_fetched,
|
|
24437
|
+
tup_inserted,
|
|
24438
|
+
tup_updated,
|
|
24439
|
+
tup_deleted,
|
|
24440
|
+
conflicts,
|
|
24441
|
+
temp_files,
|
|
24442
|
+
temp_bytes,
|
|
24443
|
+
deadlocks,
|
|
24444
|
+
blk_read_time,
|
|
24445
|
+
blk_write_time,
|
|
24446
|
+
extract(epoch from (now() - pg_postmaster_start_time()))::int8 as postmaster_uptime_s,
|
|
24447
|
+
checksum_failures,
|
|
24448
|
+
extract(epoch from (now() - checksum_last_failure))::int8 as checksum_last_failure_s,
|
|
24449
|
+
case when pg_is_in_recovery() then 1 else 0 end as in_recovery_int,
|
|
24450
|
+
system_identifier::text as tag_sys_id,
|
|
24451
|
+
session_time::int8,
|
|
24452
|
+
active_time::int8,
|
|
24453
|
+
idle_in_transaction_time::int8,
|
|
24454
|
+
sessions,
|
|
24455
|
+
sessions_abandoned,
|
|
24456
|
+
sessions_fatal,
|
|
24457
|
+
sessions_killed,
|
|
24458
|
+
(select count(*) from pg_index i
|
|
24459
|
+
where not indisvalid
|
|
24460
|
+
and not exists ( /* leave out ones that are being actively rebuilt */
|
|
24461
|
+
select * from pg_locks l
|
|
24462
|
+
join pg_stat_activity a using (pid)
|
|
24463
|
+
where l.relation = i.indexrelid
|
|
24464
|
+
and a.state = 'active'
|
|
24465
|
+
and a.query ~* 'concurrently'
|
|
24466
|
+
)) as invalid_indexes
|
|
24467
|
+
from
|
|
24468
|
+
pg_stat_database, pg_control_system()
|
|
24469
|
+
where
|
|
24470
|
+
datname = current_database()`
|
|
24471
|
+
},
|
|
24472
|
+
gauges: ["*"],
|
|
24473
|
+
statement_timeout_seconds: 15
|
|
24474
|
+
},
|
|
24475
|
+
db_size: {
|
|
24476
|
+
description: "Retrieves the size of the current database and the size of the `pg_catalog` schema, providing insights into the storage usage of the database. It returns the size in bytes for both the current database and the catalog schema. This metric helps administrators monitor database size and storage consumption.",
|
|
24477
|
+
sqls: {
|
|
24478
|
+
11: `select /* pgwatch_generated */
|
|
24479
|
+
(extract(epoch from now()) * 1e9)::int8 as epoch_ns,
|
|
24480
|
+
current_database() as tag_datname,
|
|
24481
|
+
pg_database_size(current_database()) as size_b,
|
|
24482
|
+
(select sum(pg_total_relation_size(c.oid))::int8
|
|
24483
|
+
from pg_class c join pg_namespace n on n.oid = c.relnamespace
|
|
24484
|
+
where nspname = 'pg_catalog' and relkind = 'r'
|
|
24485
|
+
) as catalog_size_b`
|
|
24486
|
+
},
|
|
24487
|
+
gauges: ["*"],
|
|
24488
|
+
statement_timeout_seconds: 300
|
|
24489
|
+
},
|
|
24490
|
+
pg_invalid_indexes: {
|
|
24491
|
+
description: "This metric identifies invalid indexes in the database. It provides insights into the number of invalid indexes and their details. This metric helps administrators identify and fix invalid indexes to improve database performance.",
|
|
24492
|
+
sqls: {
|
|
24493
|
+
11: `with fk_indexes as ( /* pgwatch_generated */
|
|
24494
|
+
select
|
|
24495
|
+
schemaname as tag_schema_name,
|
|
24496
|
+
(indexrelid::regclass)::text as tag_index_name,
|
|
24497
|
+
(relid::regclass)::text as tag_table_name,
|
|
24498
|
+
(confrelid::regclass)::text as tag_fk_table_ref,
|
|
24499
|
+
array_to_string(indclass, ', ') as tag_opclasses
|
|
24500
|
+
from
|
|
24501
|
+
pg_stat_all_indexes
|
|
24502
|
+
join pg_index using (indexrelid)
|
|
24503
|
+
left join pg_constraint
|
|
24504
|
+
on array_to_string(indkey, ',') = array_to_string(conkey, ',')
|
|
24505
|
+
and schemaname = (connamespace::regnamespace)::text
|
|
24506
|
+
and conrelid = relid
|
|
24507
|
+
and contype = 'f'
|
|
24508
|
+
where idx_scan = 0
|
|
24509
|
+
and indisunique is false
|
|
24510
|
+
and conkey is not null --conkey is not null then true else false end as is_fk_idx
|
|
24511
|
+
), data as (
|
|
24512
|
+
select
|
|
24513
|
+
pci.relname as tag_index_name,
|
|
24514
|
+
pn.nspname as tag_schema_name,
|
|
24515
|
+
pct.relname as tag_table_name,
|
|
24516
|
+
quote_ident(pn.nspname) as tag_schema_name,
|
|
24517
|
+
quote_ident(pci.relname) as tag_index_name,
|
|
24518
|
+
quote_ident(pct.relname) as tag_table_name,
|
|
24519
|
+
coalesce(nullif(quote_ident(pn.nspname), 'public') || '.', '') || quote_ident(pct.relname) as tag_relation_name,
|
|
24520
|
+
pg_relation_size(pidx.indexrelid) index_size_bytes,
|
|
24521
|
+
((
|
|
24522
|
+
select count(1)
|
|
24523
|
+
from fk_indexes fi
|
|
24524
|
+
where
|
|
24525
|
+
fi.tag_fk_table_ref = pct.relname
|
|
24526
|
+
and fi.tag_opclasses like (array_to_string(pidx.indclass, ', ') || '%')
|
|
24527
|
+
) > 0)::int as supports_fk
|
|
24528
|
+
from pg_index pidx
|
|
24529
|
+
join pg_class as pci on pci.oid = pidx.indexrelid
|
|
24530
|
+
join pg_class as pct on pct.oid = pidx.indrelid
|
|
24531
|
+
left join pg_namespace pn on pn.oid = pct.relnamespace
|
|
24532
|
+
where pidx.indisvalid = false
|
|
24533
|
+
), data_total as (
|
|
24534
|
+
select
|
|
24535
|
+
sum(index_size_bytes) as index_size_bytes_sum
|
|
24536
|
+
from data
|
|
24537
|
+
), num_data as (
|
|
24538
|
+
select
|
|
24539
|
+
row_number() over () num,
|
|
24540
|
+
data.*
|
|
24541
|
+
from data
|
|
24542
|
+
)
|
|
24543
|
+
select
|
|
24544
|
+
(extract(epoch from now()) * 1e9)::int8 as epoch_ns,
|
|
24545
|
+
current_database() as tag_datname,
|
|
24546
|
+
num_data.*
|
|
24547
|
+
from num_data
|
|
24548
|
+
limit 1000;
|
|
24549
|
+
`
|
|
24550
|
+
},
|
|
24551
|
+
gauges: ["*"],
|
|
24552
|
+
statement_timeout_seconds: 15
|
|
24553
|
+
},
|
|
24554
|
+
unused_indexes: {
|
|
24555
|
+
description: "This metric identifies unused indexes in the database. It provides insights into the number of unused indexes and their details. This metric helps administrators identify and fix unused indexes to improve database performance.",
|
|
24556
|
+
sqls: {
|
|
24557
|
+
11: `with fk_indexes as ( /* pgwatch_generated */
|
|
24558
|
+
select
|
|
24559
|
+
n.nspname as schema_name,
|
|
24560
|
+
ci.relname as index_name,
|
|
24561
|
+
cr.relname as table_name,
|
|
24562
|
+
(confrelid::regclass)::text as fk_table_ref,
|
|
24563
|
+
array_to_string(indclass, ', ') as opclasses
|
|
24564
|
+
from pg_index i
|
|
24565
|
+
join pg_class ci on ci.oid = i.indexrelid and ci.relkind = 'i'
|
|
24566
|
+
join pg_class cr on cr.oid = i.indrelid and cr.relkind = 'r'
|
|
24567
|
+
join pg_namespace n on n.oid = ci.relnamespace
|
|
24568
|
+
join pg_constraint cn on cn.conrelid = cr.oid
|
|
24569
|
+
left join pg_stat_all_indexes as si on si.indexrelid = i.indexrelid
|
|
24570
|
+
where
|
|
24571
|
+
contype = 'f'
|
|
24572
|
+
and i.indisunique is false
|
|
24573
|
+
and conkey is not null
|
|
24574
|
+
and ci.relpages > 5
|
|
24575
|
+
and si.idx_scan < 10
|
|
24576
|
+
), table_scans as (
|
|
24577
|
+
select relid,
|
|
24578
|
+
tables.idx_scan + tables.seq_scan as all_scans,
|
|
24579
|
+
( tables.n_tup_ins + tables.n_tup_upd + tables.n_tup_del ) as writes,
|
|
24580
|
+
pg_relation_size(relid) as table_size
|
|
24581
|
+
from pg_stat_all_tables as tables
|
|
24582
|
+
join pg_class c on c.oid = relid
|
|
24583
|
+
where c.relpages > 5
|
|
24584
|
+
), indexes as (
|
|
24585
|
+
select
|
|
24586
|
+
i.indrelid,
|
|
24587
|
+
i.indexrelid,
|
|
24588
|
+
n.nspname as schema_name,
|
|
24589
|
+
cr.relname as table_name,
|
|
24590
|
+
ci.relname as index_name,
|
|
24591
|
+
si.idx_scan,
|
|
24592
|
+
pg_relation_size(i.indexrelid) as index_bytes,
|
|
24593
|
+
ci.relpages,
|
|
24594
|
+
(case when a.amname = 'btree' then true else false end) as idx_is_btree,
|
|
24595
|
+
array_to_string(i.indclass, ', ') as opclasses
|
|
24596
|
+
from pg_index i
|
|
24597
|
+
join pg_class ci on ci.oid = i.indexrelid and ci.relkind = 'i'
|
|
24598
|
+
join pg_class cr on cr.oid = i.indrelid and cr.relkind = 'r'
|
|
24599
|
+
join pg_namespace n on n.oid = ci.relnamespace
|
|
24600
|
+
join pg_am a on ci.relam = a.oid
|
|
24601
|
+
left join pg_stat_all_indexes as si on si.indexrelid = i.indexrelid
|
|
24602
|
+
where
|
|
24603
|
+
i.indisunique = false
|
|
24604
|
+
and i.indisvalid = true
|
|
24605
|
+
and ci.relpages > 5
|
|
24606
|
+
), index_ratios as (
|
|
24607
|
+
select
|
|
24608
|
+
i.indexrelid as index_id,
|
|
24609
|
+
i.schema_name,
|
|
24610
|
+
i.table_name,
|
|
24611
|
+
i.index_name,
|
|
24612
|
+
idx_scan,
|
|
24613
|
+
all_scans,
|
|
24614
|
+
round(( case when all_scans = 0 then 0.0::numeric
|
|
24615
|
+
else idx_scan::numeric/all_scans * 100 end), 2) as index_scan_pct,
|
|
24616
|
+
writes,
|
|
24617
|
+
round((case when writes = 0 then idx_scan::numeric else idx_scan::numeric/writes end), 2)
|
|
24618
|
+
as scans_per_write,
|
|
24619
|
+
index_bytes as index_size_bytes,
|
|
24620
|
+
table_size as table_size_bytes,
|
|
24621
|
+
i.relpages,
|
|
24622
|
+
idx_is_btree,
|
|
24623
|
+
i.opclasses,
|
|
24624
|
+
(
|
|
24625
|
+
select count(1)
|
|
24626
|
+
from fk_indexes fi
|
|
24627
|
+
where fi.fk_table_ref = i.table_name
|
|
24628
|
+
and fi.schema_name = i.schema_name
|
|
24629
|
+
and fi.opclasses like (i.opclasses || '%')
|
|
24630
|
+
) > 0 as supports_fk
|
|
24631
|
+
from indexes i
|
|
24632
|
+
join table_scans ts on ts.relid = i.indrelid
|
|
24633
|
+
)
|
|
24634
|
+
select
|
|
24635
|
+
'Never Used Indexes' as tag_reason,
|
|
24636
|
+
current_database() as tag_datname,
|
|
24637
|
+
index_id,
|
|
24638
|
+
schema_name as tag_schema_name,
|
|
24639
|
+
table_name as tag_table_name,
|
|
24640
|
+
index_name as tag_index_name,
|
|
24641
|
+
pg_get_indexdef(index_id) as index_definition,
|
|
24642
|
+
idx_scan,
|
|
24643
|
+
all_scans,
|
|
24644
|
+
index_scan_pct,
|
|
24645
|
+
writes,
|
|
24646
|
+
scans_per_write,
|
|
24647
|
+
index_size_bytes,
|
|
24648
|
+
table_size_bytes,
|
|
24649
|
+
relpages,
|
|
24650
|
+
idx_is_btree,
|
|
24651
|
+
opclasses as tag_opclasses,
|
|
24652
|
+
supports_fk
|
|
24653
|
+
from index_ratios
|
|
24654
|
+
where
|
|
24655
|
+
idx_scan = 0
|
|
24656
|
+
and idx_is_btree
|
|
24657
|
+
order by index_size_bytes desc
|
|
24658
|
+
limit 1000;
|
|
24659
|
+
`
|
|
24660
|
+
},
|
|
24661
|
+
gauges: ["*"],
|
|
24662
|
+
statement_timeout_seconds: 15
|
|
24663
|
+
},
|
|
24664
|
+
redundant_indexes: {
|
|
24665
|
+
description: "This metric identifies redundant indexes that can potentially be dropped to save storage space and improve write performance. It analyzes index relationships and finds indexes that are covered by other indexes, considering column order, operator classes, and foreign key constraints. Uses the exact logic from tmp.sql with JSON aggregation and proper thresholds.",
|
|
24666
|
+
sqls: {
|
|
24667
|
+
11: `with fk_indexes as ( /* pgwatch_generated */
|
|
24668
|
+
select
|
|
24669
|
+
n.nspname as schema_name,
|
|
24670
|
+
ci.relname as index_name,
|
|
24671
|
+
cr.relname as table_name,
|
|
24672
|
+
(confrelid::regclass)::text as fk_table_ref,
|
|
24673
|
+
array_to_string(indclass, ', ') as opclasses
|
|
24674
|
+
from pg_index i
|
|
24675
|
+
join pg_class ci on ci.oid = i.indexrelid and ci.relkind = 'i'
|
|
24676
|
+
join pg_class cr on cr.oid = i.indrelid and cr.relkind = 'r'
|
|
24677
|
+
join pg_namespace n on n.oid = ci.relnamespace
|
|
24678
|
+
join pg_constraint cn on cn.conrelid = cr.oid
|
|
24679
|
+
left join pg_stat_all_indexes as si on si.indexrelid = i.indexrelid
|
|
24680
|
+
where
|
|
24681
|
+
contype = 'f'
|
|
24682
|
+
and i.indisunique is false
|
|
24683
|
+
and conkey is not null
|
|
24684
|
+
and ci.relpages > 5
|
|
24685
|
+
and si.idx_scan < 10
|
|
24686
|
+
),
|
|
24687
|
+
-- Redundant indexes
|
|
24688
|
+
index_data as (
|
|
24689
|
+
select
|
|
24690
|
+
*,
|
|
24691
|
+
indkey::text as columns,
|
|
24692
|
+
array_to_string(indclass, ', ') as opclasses
|
|
24693
|
+
from pg_index i
|
|
24694
|
+
join pg_class ci on ci.oid = i.indexrelid and ci.relkind = 'i'
|
|
24695
|
+
where indisvalid = true and ci.relpages > 5
|
|
24696
|
+
), redundant_indexes as (
|
|
24697
|
+
select
|
|
24698
|
+
i2.indexrelid as index_id,
|
|
24699
|
+
tnsp.nspname as schema_name,
|
|
24700
|
+
trel.relname as table_name,
|
|
24701
|
+
pg_relation_size(trel.oid) as table_size_bytes,
|
|
24702
|
+
irel.relname as index_name,
|
|
24703
|
+
am1.amname as access_method,
|
|
24704
|
+
(i1.indexrelid::regclass)::text as reason,
|
|
24705
|
+
i1.indexrelid as reason_index_id,
|
|
24706
|
+
pg_get_indexdef(i1.indexrelid) main_index_def,
|
|
24707
|
+
pg_relation_size(i1.indexrelid) main_index_size_bytes,
|
|
24708
|
+
pg_get_indexdef(i2.indexrelid) index_def,
|
|
24709
|
+
pg_relation_size(i2.indexrelid) index_size_bytes,
|
|
24710
|
+
s.idx_scan as index_usage,
|
|
24711
|
+
quote_ident(tnsp.nspname) as formated_schema_name,
|
|
24712
|
+
coalesce(nullif(quote_ident(tnsp.nspname), 'public') || '.', '') || quote_ident(irel.relname) as formated_index_name,
|
|
24713
|
+
quote_ident(trel.relname) as formated_table_name,
|
|
24714
|
+
coalesce(nullif(quote_ident(tnsp.nspname), 'public') || '.', '') || quote_ident(trel.relname) as formated_relation_name,
|
|
24715
|
+
i2.opclasses
|
|
24716
|
+
from (
|
|
24717
|
+
select indrelid, indexrelid, opclasses, indclass, indexprs, indpred, indisprimary, indisunique, columns
|
|
24718
|
+
from index_data
|
|
24719
|
+
order by indexrelid
|
|
24720
|
+
) as i1
|
|
24721
|
+
join index_data as i2 on (
|
|
24722
|
+
i1.indrelid = i2.indrelid -- same table
|
|
24723
|
+
and i1.indexrelid <> i2.indexrelid -- NOT same index
|
|
24724
|
+
)
|
|
24725
|
+
inner join pg_opclass op1 on i1.indclass[0] = op1.oid
|
|
24726
|
+
inner join pg_opclass op2 on i2.indclass[0] = op2.oid
|
|
24727
|
+
inner join pg_am am1 on op1.opcmethod = am1.oid
|
|
24728
|
+
inner join pg_am am2 on op2.opcmethod = am2.oid
|
|
24729
|
+
join pg_stat_all_indexes as s on s.indexrelid = i2.indexrelid
|
|
24730
|
+
join pg_class as trel on trel.oid = i2.indrelid
|
|
24731
|
+
join pg_namespace as tnsp on trel.relnamespace = tnsp.oid
|
|
24732
|
+
join pg_class as irel on irel.oid = i2.indexrelid
|
|
24733
|
+
where
|
|
24734
|
+
not i2.indisprimary -- index 1 is not primary
|
|
24735
|
+
and not i2.indisunique -- index 1 is not unique (unique indexes serve constraint purpose)
|
|
24736
|
+
and am1.amname = am2.amname -- same access type
|
|
24737
|
+
and i1.columns like (i2.columns || '%') -- index 2 includes all columns from index 1
|
|
24738
|
+
and i1.opclasses like (i2.opclasses || '%')
|
|
24739
|
+
-- index expressions is same
|
|
24740
|
+
and pg_get_expr(i1.indexprs, i1.indrelid) is not distinct from pg_get_expr(i2.indexprs, i2.indrelid)
|
|
24741
|
+
-- index predicates is same
|
|
24742
|
+
and pg_get_expr(i1.indpred, i1.indrelid) is not distinct from pg_get_expr(i2.indpred, i2.indrelid)
|
|
24743
|
+
), redundant_indexes_fk as (
|
|
24744
|
+
select
|
|
24745
|
+
ri.*,
|
|
24746
|
+
((
|
|
24747
|
+
select count(1)
|
|
24748
|
+
from fk_indexes fi
|
|
24749
|
+
where
|
|
24750
|
+
fi.fk_table_ref = ri.table_name
|
|
24751
|
+
and fi.opclasses like (ri.opclasses || '%')
|
|
24752
|
+
) > 0)::int as supports_fk
|
|
24753
|
+
from redundant_indexes ri
|
|
24754
|
+
),
|
|
24755
|
+
-- Cut recursive links
|
|
24756
|
+
redundant_indexes_tmp_num as (
|
|
24757
|
+
select row_number() over () num, rig.*
|
|
24758
|
+
from redundant_indexes_fk rig
|
|
24759
|
+
), redundant_indexes_tmp_links as (
|
|
24760
|
+
select
|
|
24761
|
+
ri1.*,
|
|
24762
|
+
ri2.num as r_num
|
|
24763
|
+
from redundant_indexes_tmp_num ri1
|
|
24764
|
+
left join redundant_indexes_tmp_num ri2 on ri2.reason_index_id = ri1.index_id and ri1.reason_index_id = ri2.index_id
|
|
24765
|
+
), redundant_indexes_tmp_cut as (
|
|
24766
|
+
select
|
|
24767
|
+
*
|
|
24768
|
+
from redundant_indexes_tmp_links
|
|
24769
|
+
where num < r_num or r_num is null
|
|
24770
|
+
), redundant_indexes_cut_grouped as (
|
|
24771
|
+
select
|
|
24772
|
+
distinct(num),
|
|
24773
|
+
*
|
|
24774
|
+
from redundant_indexes_tmp_cut
|
|
24775
|
+
order by index_size_bytes desc
|
|
24776
|
+
), redundant_indexes_grouped as (
|
|
24777
|
+
select
|
|
24778
|
+
index_id,
|
|
24779
|
+
schema_name as tag_schema_name,
|
|
24780
|
+
table_name,
|
|
24781
|
+
table_size_bytes,
|
|
24782
|
+
index_name as tag_index_name,
|
|
24783
|
+
access_method as tag_access_method,
|
|
24784
|
+
string_agg(distinct reason, ', ') as tag_reason,
|
|
24785
|
+
index_size_bytes,
|
|
24786
|
+
index_usage,
|
|
24787
|
+
index_def as index_definition,
|
|
24788
|
+
formated_index_name as tag_index_name,
|
|
24789
|
+
formated_schema_name as tag_schema_name,
|
|
24790
|
+
formated_table_name as tag_table_name,
|
|
24791
|
+
formated_relation_name as tag_relation_name,
|
|
24792
|
+
supports_fk::int as supports_fk,
|
|
24793
|
+
json_agg(
|
|
24794
|
+
distinct jsonb_build_object(
|
|
24795
|
+
'index_name', reason,
|
|
24796
|
+
'index_definition', main_index_def,
|
|
24797
|
+
'index_size_bytes', main_index_size_bytes
|
|
24798
|
+
)
|
|
24799
|
+
)::text as redundant_to_json
|
|
24800
|
+
from redundant_indexes_cut_grouped
|
|
24801
|
+
group by
|
|
24802
|
+
index_id,
|
|
24803
|
+
table_size_bytes,
|
|
24804
|
+
schema_name,
|
|
24805
|
+
table_name,
|
|
24806
|
+
index_name,
|
|
24807
|
+
access_method,
|
|
24808
|
+
index_def,
|
|
24809
|
+
index_size_bytes,
|
|
24810
|
+
index_usage,
|
|
24811
|
+
formated_index_name,
|
|
24812
|
+
formated_schema_name,
|
|
24813
|
+
formated_table_name,
|
|
24814
|
+
formated_relation_name,
|
|
24815
|
+
supports_fk
|
|
24816
|
+
order by index_size_bytes desc
|
|
24817
|
+
)
|
|
24818
|
+
select * from redundant_indexes_grouped
|
|
24819
|
+
limit 1000;
|
|
24820
|
+
`
|
|
24821
|
+
},
|
|
24822
|
+
gauges: ["*"],
|
|
24823
|
+
statement_timeout_seconds: 15
|
|
24824
|
+
},
|
|
24825
|
+
stats_reset: {
|
|
24826
|
+
description: "This metric tracks when statistics were last reset at the database level. It provides visibility into the freshness of statistics data, which is essential for understanding the reliability of usage metrics. A recent reset time indicates that usage statistics may not reflect long-term patterns. Note that Postgres tracks stats resets at the database level, not per-index or per-table.",
|
|
24827
|
+
sqls: {
|
|
24828
|
+
11: `select /* pgwatch_generated */
|
|
24829
|
+
datname as tag_database_name,
|
|
24830
|
+
extract(epoch from stats_reset)::int as stats_reset_epoch,
|
|
24831
|
+
extract(epoch from now() - stats_reset)::int as seconds_since_reset
|
|
24832
|
+
from pg_stat_database
|
|
24833
|
+
where datname = current_database()
|
|
24834
|
+
and stats_reset is not null;
|
|
24835
|
+
`
|
|
24836
|
+
},
|
|
24837
|
+
gauges: ["stats_reset_epoch", "seconds_since_reset"],
|
|
24838
|
+
statement_timeout_seconds: 15
|
|
24839
|
+
}
|
|
24840
|
+
};
|
|
24841
|
+
|
|
24842
|
+
// lib/metrics-loader.ts
|
|
24843
|
+
function getMetricSql(metricName, pgMajorVersion = 16) {
|
|
24844
|
+
const metric = METRICS[metricName];
|
|
24845
|
+
if (!metric) {
|
|
24846
|
+
throw new Error(`Metric "${metricName}" not found. Available metrics: ${Object.keys(METRICS).join(", ")}`);
|
|
24847
|
+
}
|
|
24848
|
+
const availableVersions = Object.keys(metric.sqls).map((v) => parseInt(v, 10)).sort((a, b) => b - a);
|
|
24849
|
+
const matchingVersion = availableVersions.find((v) => v <= pgMajorVersion);
|
|
24850
|
+
if (matchingVersion === undefined) {
|
|
24851
|
+
throw new Error(`No compatible SQL version for metric "${metricName}" with PostgreSQL ${pgMajorVersion}. ` + `Available versions: ${availableVersions.join(", ")}`);
|
|
24852
|
+
}
|
|
24853
|
+
return metric.sqls[matchingVersion];
|
|
24854
|
+
}
|
|
24855
|
+
var METRIC_NAMES = {
|
|
24856
|
+
H001: "pg_invalid_indexes",
|
|
24857
|
+
H002: "unused_indexes",
|
|
24858
|
+
H004: "redundant_indexes",
|
|
24859
|
+
settings: "settings",
|
|
24860
|
+
dbStats: "db_stats",
|
|
24861
|
+
dbSize: "db_size",
|
|
24862
|
+
statsReset: "stats_reset"
|
|
24863
|
+
};
|
|
24864
|
+
function transformMetricRow(row) {
|
|
24865
|
+
const result = {};
|
|
24866
|
+
for (const [key, value] of Object.entries(row)) {
|
|
24867
|
+
if (key === "epoch_ns" || key === "num" || key === "tag_datname") {
|
|
24868
|
+
continue;
|
|
24869
|
+
}
|
|
24870
|
+
const newKey = key.startsWith("tag_") ? key.slice(4) : key;
|
|
24871
|
+
result[newKey] = value;
|
|
24872
|
+
}
|
|
24873
|
+
return result;
|
|
24874
|
+
}
|
|
24875
|
+
|
|
24876
|
+
// lib/checkup.ts
|
|
24877
|
+
var __dirname = "/builds/postgres-ai/postgres_ai/cli/lib";
|
|
24878
|
+
var SECONDS_PER_DAY = 86400;
|
|
24879
|
+
var SECONDS_PER_HOUR = 3600;
|
|
24880
|
+
var SECONDS_PER_MINUTE = 60;
|
|
24881
|
+
function toBool(val) {
|
|
24882
|
+
return val === true || val === 1 || val === "t" || val === "true";
|
|
24883
|
+
}
|
|
24884
|
+
function parseVersionNum(versionNum) {
|
|
24885
|
+
if (!versionNum || versionNum.length < 6) {
|
|
24886
|
+
return { major: "", minor: "" };
|
|
24887
|
+
}
|
|
24888
|
+
try {
|
|
24889
|
+
const num = parseInt(versionNum, 10);
|
|
24890
|
+
return {
|
|
24891
|
+
major: Math.floor(num / 1e4).toString(),
|
|
24892
|
+
minor: (num % 1e4).toString()
|
|
24893
|
+
};
|
|
24894
|
+
} catch (err) {
|
|
24895
|
+
const errorMsg = err instanceof Error ? err.message : String(err);
|
|
24896
|
+
console.log(`[parseVersionNum] Warning: Failed to parse "${versionNum}": ${errorMsg}`);
|
|
24897
|
+
return { major: "", minor: "" };
|
|
24898
|
+
}
|
|
24899
|
+
}
|
|
24900
|
+
function formatBytes(bytes) {
|
|
24901
|
+
if (bytes === 0)
|
|
24902
|
+
return "0 B";
|
|
24903
|
+
if (bytes < 0)
|
|
24904
|
+
return `-${formatBytes(-bytes)}`;
|
|
24905
|
+
if (!Number.isFinite(bytes))
|
|
24906
|
+
return `${bytes} B`;
|
|
24907
|
+
const units = ["B", "KiB", "MiB", "GiB", "TiB", "PiB"];
|
|
24908
|
+
const i2 = Math.min(Math.floor(Math.log(bytes) / Math.log(1024)), units.length - 1);
|
|
24909
|
+
return `${(bytes / Math.pow(1024, i2)).toFixed(2)} ${units[i2]}`;
|
|
24910
|
+
}
|
|
24911
|
+
function formatSettingPrettyValue(settingNormalized, unitNormalized, rawValue) {
|
|
24912
|
+
if (settingNormalized === null || unitNormalized === null) {
|
|
24913
|
+
return rawValue;
|
|
24914
|
+
}
|
|
24915
|
+
if (unitNormalized === "bytes") {
|
|
24916
|
+
return formatBytes(settingNormalized);
|
|
24917
|
+
}
|
|
24918
|
+
if (unitNormalized === "seconds") {
|
|
24919
|
+
const MS_PER_SECOND = 1000;
|
|
24920
|
+
if (settingNormalized < 1) {
|
|
24921
|
+
return `${(settingNormalized * MS_PER_SECOND).toFixed(0)} ms`;
|
|
24922
|
+
} else if (settingNormalized < SECONDS_PER_MINUTE) {
|
|
24923
|
+
return `${settingNormalized} s`;
|
|
24924
|
+
} else {
|
|
24925
|
+
return `${(settingNormalized / SECONDS_PER_MINUTE).toFixed(1)} min`;
|
|
24926
|
+
}
|
|
24927
|
+
}
|
|
24928
|
+
return rawValue;
|
|
24929
|
+
}
|
|
24930
|
+
async function getPostgresVersion(client) {
|
|
24931
|
+
const result = await client.query(`
|
|
24932
|
+
select name, setting
|
|
24933
|
+
from pg_settings
|
|
24934
|
+
where name in ('server_version', 'server_version_num')
|
|
24935
|
+
`);
|
|
24936
|
+
let version3 = "";
|
|
24937
|
+
let serverVersionNum = "";
|
|
24938
|
+
for (const row of result.rows) {
|
|
24939
|
+
if (row.name === "server_version") {
|
|
24940
|
+
version3 = row.setting;
|
|
24941
|
+
} else if (row.name === "server_version_num") {
|
|
24942
|
+
serverVersionNum = row.setting;
|
|
24943
|
+
}
|
|
24944
|
+
}
|
|
24945
|
+
const { major, minor } = parseVersionNum(serverVersionNum);
|
|
24946
|
+
return {
|
|
24947
|
+
version: version3,
|
|
24948
|
+
server_version_num: serverVersionNum,
|
|
24949
|
+
server_major_ver: major,
|
|
24950
|
+
server_minor_ver: minor
|
|
24951
|
+
};
|
|
24952
|
+
}
|
|
24953
|
+
async function getSettings(client, pgMajorVersion = 16) {
|
|
24954
|
+
const sql = getMetricSql(METRIC_NAMES.settings, pgMajorVersion);
|
|
24955
|
+
const result = await client.query(sql);
|
|
24956
|
+
const settings = {};
|
|
24957
|
+
for (const row of result.rows) {
|
|
24958
|
+
const name = row.tag_setting_name;
|
|
24959
|
+
const settingValue = row.tag_setting_value;
|
|
24960
|
+
const unit = row.tag_unit || "";
|
|
24961
|
+
const category = row.tag_category || "";
|
|
24962
|
+
const vartype = row.tag_vartype || "";
|
|
24963
|
+
const settingNormalized = row.setting_normalized !== null ? parseFloat(row.setting_normalized) : null;
|
|
24964
|
+
const unitNormalized = row.unit_normalized || null;
|
|
24965
|
+
settings[name] = {
|
|
24966
|
+
setting: settingValue,
|
|
24967
|
+
unit,
|
|
24968
|
+
category,
|
|
24969
|
+
context: "",
|
|
24970
|
+
vartype,
|
|
24971
|
+
pretty_value: formatSettingPrettyValue(settingNormalized, unitNormalized, settingValue)
|
|
24972
|
+
};
|
|
24973
|
+
}
|
|
24974
|
+
return settings;
|
|
24975
|
+
}
|
|
24976
|
+
async function getAlteredSettings(client, pgMajorVersion = 16) {
|
|
24977
|
+
const sql = getMetricSql(METRIC_NAMES.settings, pgMajorVersion);
|
|
24978
|
+
const result = await client.query(sql);
|
|
24979
|
+
const settings = {};
|
|
24980
|
+
for (const row of result.rows) {
|
|
24981
|
+
if (!toBool(row.is_default)) {
|
|
24982
|
+
const name = row.tag_setting_name;
|
|
24983
|
+
const settingValue = row.tag_setting_value;
|
|
24984
|
+
const unit = row.tag_unit || "";
|
|
24985
|
+
const category = row.tag_category || "";
|
|
24986
|
+
const settingNormalized = row.setting_normalized !== null ? parseFloat(row.setting_normalized) : null;
|
|
24987
|
+
const unitNormalized = row.unit_normalized || null;
|
|
24988
|
+
settings[name] = {
|
|
24989
|
+
value: settingValue,
|
|
24990
|
+
unit,
|
|
24991
|
+
category,
|
|
24992
|
+
pretty_value: formatSettingPrettyValue(settingNormalized, unitNormalized, settingValue)
|
|
24993
|
+
};
|
|
24994
|
+
}
|
|
24995
|
+
}
|
|
24996
|
+
return settings;
|
|
24997
|
+
}
|
|
24998
|
+
async function getDatabaseSizes(client) {
|
|
24999
|
+
const result = await client.query(`
|
|
25000
|
+
select
|
|
25001
|
+
datname,
|
|
25002
|
+
pg_database_size(datname) as size_bytes
|
|
25003
|
+
from pg_database
|
|
25004
|
+
where datistemplate = false
|
|
25005
|
+
order by size_bytes desc
|
|
25006
|
+
`);
|
|
25007
|
+
const sizes = {};
|
|
25008
|
+
for (const row of result.rows) {
|
|
25009
|
+
sizes[row.datname] = parseInt(row.size_bytes, 10);
|
|
25010
|
+
}
|
|
25011
|
+
return sizes;
|
|
25012
|
+
}
|
|
25013
|
+
async function getClusterInfo(client, pgMajorVersion = 16) {
|
|
25014
|
+
const info = {};
|
|
25015
|
+
const dbStatsSql = getMetricSql(METRIC_NAMES.dbStats, pgMajorVersion);
|
|
25016
|
+
const statsResult = await client.query(dbStatsSql);
|
|
25017
|
+
if (statsResult.rows.length > 0) {
|
|
25018
|
+
const stats = statsResult.rows[0];
|
|
25019
|
+
info.total_connections = {
|
|
25020
|
+
value: String(stats.numbackends || 0),
|
|
25021
|
+
unit: "connections",
|
|
25022
|
+
description: "Current database connections"
|
|
25023
|
+
};
|
|
25024
|
+
info.total_commits = {
|
|
25025
|
+
value: String(stats.xact_commit || 0),
|
|
25026
|
+
unit: "transactions",
|
|
25027
|
+
description: "Total committed transactions"
|
|
25028
|
+
};
|
|
25029
|
+
info.total_rollbacks = {
|
|
25030
|
+
value: String(stats.xact_rollback || 0),
|
|
25031
|
+
unit: "transactions",
|
|
25032
|
+
description: "Total rolled back transactions"
|
|
25033
|
+
};
|
|
25034
|
+
const blocksHit = parseInt(stats.blks_hit || "0", 10);
|
|
25035
|
+
const blocksRead = parseInt(stats.blks_read || "0", 10);
|
|
25036
|
+
const totalBlocks = blocksHit + blocksRead;
|
|
25037
|
+
const cacheHitRatio = totalBlocks > 0 ? (blocksHit / totalBlocks * 100).toFixed(2) : "0.00";
|
|
25038
|
+
info.cache_hit_ratio = {
|
|
25039
|
+
value: cacheHitRatio,
|
|
25040
|
+
unit: "%",
|
|
25041
|
+
description: "Buffer cache hit ratio"
|
|
25042
|
+
};
|
|
25043
|
+
info.blocks_read = {
|
|
25044
|
+
value: String(blocksRead),
|
|
25045
|
+
unit: "blocks",
|
|
25046
|
+
description: "Total disk blocks read"
|
|
25047
|
+
};
|
|
25048
|
+
info.blocks_hit = {
|
|
25049
|
+
value: String(blocksHit),
|
|
25050
|
+
unit: "blocks",
|
|
25051
|
+
description: "Total buffer cache hits"
|
|
25052
|
+
};
|
|
25053
|
+
info.tuples_returned = {
|
|
25054
|
+
value: String(stats.tup_returned || 0),
|
|
25055
|
+
unit: "rows",
|
|
25056
|
+
description: "Total rows returned by queries"
|
|
25057
|
+
};
|
|
25058
|
+
info.tuples_fetched = {
|
|
25059
|
+
value: String(stats.tup_fetched || 0),
|
|
25060
|
+
unit: "rows",
|
|
25061
|
+
description: "Total rows fetched by queries"
|
|
25062
|
+
};
|
|
25063
|
+
info.tuples_inserted = {
|
|
25064
|
+
value: String(stats.tup_inserted || 0),
|
|
25065
|
+
unit: "rows",
|
|
25066
|
+
description: "Total rows inserted"
|
|
25067
|
+
};
|
|
25068
|
+
info.tuples_updated = {
|
|
25069
|
+
value: String(stats.tup_updated || 0),
|
|
25070
|
+
unit: "rows",
|
|
25071
|
+
description: "Total rows updated"
|
|
25072
|
+
};
|
|
25073
|
+
info.tuples_deleted = {
|
|
25074
|
+
value: String(stats.tup_deleted || 0),
|
|
25075
|
+
unit: "rows",
|
|
25076
|
+
description: "Total rows deleted"
|
|
25077
|
+
};
|
|
25078
|
+
info.total_deadlocks = {
|
|
25079
|
+
value: String(stats.deadlocks || 0),
|
|
25080
|
+
unit: "deadlocks",
|
|
25081
|
+
description: "Total deadlocks detected"
|
|
25082
|
+
};
|
|
25083
|
+
info.temp_files_created = {
|
|
25084
|
+
value: String(stats.temp_files || 0),
|
|
25085
|
+
unit: "files",
|
|
25086
|
+
description: "Total temporary files created"
|
|
25087
|
+
};
|
|
25088
|
+
const tempBytes = parseInt(stats.temp_bytes || "0", 10);
|
|
25089
|
+
info.temp_bytes_written = {
|
|
25090
|
+
value: formatBytes(tempBytes),
|
|
25091
|
+
unit: "bytes",
|
|
25092
|
+
description: "Total temporary file bytes written"
|
|
25093
|
+
};
|
|
25094
|
+
if (stats.postmaster_uptime_s) {
|
|
25095
|
+
const uptimeSeconds = parseInt(stats.postmaster_uptime_s, 10);
|
|
25096
|
+
const days = Math.floor(uptimeSeconds / SECONDS_PER_DAY);
|
|
25097
|
+
const hours = Math.floor(uptimeSeconds % SECONDS_PER_DAY / SECONDS_PER_HOUR);
|
|
25098
|
+
const minutes = Math.floor(uptimeSeconds % SECONDS_PER_HOUR / SECONDS_PER_MINUTE);
|
|
25099
|
+
info.uptime = {
|
|
25100
|
+
value: `${days} days ${hours}:${String(minutes).padStart(2, "0")}:${String(uptimeSeconds % SECONDS_PER_MINUTE).padStart(2, "0")}`,
|
|
25101
|
+
unit: "interval",
|
|
25102
|
+
description: "Server uptime"
|
|
25103
|
+
};
|
|
25104
|
+
}
|
|
25105
|
+
}
|
|
25106
|
+
const connResult = await client.query(`
|
|
25107
|
+
select
|
|
25108
|
+
coalesce(state, 'null') as state,
|
|
25109
|
+
count(*) as count
|
|
25110
|
+
from pg_stat_activity
|
|
25111
|
+
group by state
|
|
25112
|
+
`);
|
|
25113
|
+
for (const row of connResult.rows) {
|
|
25114
|
+
const stateKey = `connections_${row.state.replace(/\s+/g, "_")}`;
|
|
25115
|
+
info[stateKey] = {
|
|
25116
|
+
value: String(row.count),
|
|
25117
|
+
unit: "connections",
|
|
25118
|
+
description: `Connections in '${row.state}' state`
|
|
25119
|
+
};
|
|
25120
|
+
}
|
|
25121
|
+
const uptimeResult = await client.query(`
|
|
25122
|
+
select
|
|
25123
|
+
pg_postmaster_start_time() as start_time,
|
|
25124
|
+
current_timestamp - pg_postmaster_start_time() as uptime
|
|
25125
|
+
`);
|
|
25126
|
+
if (uptimeResult.rows.length > 0) {
|
|
25127
|
+
const uptime = uptimeResult.rows[0];
|
|
25128
|
+
const startTime = uptime.start_time instanceof Date ? uptime.start_time.toISOString() : String(uptime.start_time);
|
|
25129
|
+
info.start_time = {
|
|
25130
|
+
value: startTime,
|
|
25131
|
+
unit: "timestamp",
|
|
25132
|
+
description: "PostgreSQL server start time"
|
|
25133
|
+
};
|
|
25134
|
+
if (!info.uptime) {
|
|
25135
|
+
info.uptime = {
|
|
25136
|
+
value: String(uptime.uptime),
|
|
25137
|
+
unit: "interval",
|
|
25138
|
+
description: "Server uptime"
|
|
25139
|
+
};
|
|
25140
|
+
}
|
|
25141
|
+
}
|
|
25142
|
+
return info;
|
|
25143
|
+
}
|
|
25144
|
+
async function getInvalidIndexes(client, pgMajorVersion = 16) {
|
|
25145
|
+
const sql = getMetricSql(METRIC_NAMES.H001, pgMajorVersion);
|
|
25146
|
+
const result = await client.query(sql);
|
|
25147
|
+
return result.rows.map((row) => {
|
|
25148
|
+
const transformed = transformMetricRow(row);
|
|
25149
|
+
const indexSizeBytes = parseInt(String(transformed.index_size_bytes || 0), 10);
|
|
25150
|
+
return {
|
|
25151
|
+
schema_name: String(transformed.schema_name || ""),
|
|
25152
|
+
table_name: String(transformed.table_name || ""),
|
|
25153
|
+
index_name: String(transformed.index_name || ""),
|
|
25154
|
+
relation_name: String(transformed.relation_name || ""),
|
|
25155
|
+
index_size_bytes: indexSizeBytes,
|
|
25156
|
+
index_size_pretty: formatBytes(indexSizeBytes),
|
|
25157
|
+
supports_fk: toBool(transformed.supports_fk)
|
|
25158
|
+
};
|
|
25159
|
+
});
|
|
25160
|
+
}
|
|
25161
|
+
async function getUnusedIndexes(client, pgMajorVersion = 16) {
|
|
25162
|
+
const sql = getMetricSql(METRIC_NAMES.H002, pgMajorVersion);
|
|
25163
|
+
const result = await client.query(sql);
|
|
25164
|
+
return result.rows.map((row) => {
|
|
25165
|
+
const transformed = transformMetricRow(row);
|
|
25166
|
+
const indexSizeBytes = parseInt(String(transformed.index_size_bytes || 0), 10);
|
|
25167
|
+
return {
|
|
25168
|
+
schema_name: String(transformed.schema_name || ""),
|
|
25169
|
+
table_name: String(transformed.table_name || ""),
|
|
25170
|
+
index_name: String(transformed.index_name || ""),
|
|
25171
|
+
index_definition: String(transformed.index_definition || ""),
|
|
25172
|
+
reason: String(transformed.reason || ""),
|
|
25173
|
+
idx_scan: parseInt(String(transformed.idx_scan || 0), 10),
|
|
25174
|
+
index_size_bytes: indexSizeBytes,
|
|
25175
|
+
idx_is_btree: toBool(transformed.idx_is_btree),
|
|
25176
|
+
supports_fk: toBool(transformed.supports_fk),
|
|
25177
|
+
index_size_pretty: formatBytes(indexSizeBytes)
|
|
25178
|
+
};
|
|
25179
|
+
});
|
|
25180
|
+
}
|
|
25181
|
+
async function getStatsReset(client, pgMajorVersion = 16) {
|
|
25182
|
+
const sql = getMetricSql(METRIC_NAMES.statsReset, pgMajorVersion);
|
|
25183
|
+
const result = await client.query(sql);
|
|
25184
|
+
const row = result.rows[0] || {};
|
|
25185
|
+
const statsResetEpoch = row.stats_reset_epoch ? parseFloat(row.stats_reset_epoch) : null;
|
|
25186
|
+
const secondsSinceReset = row.seconds_since_reset ? parseInt(row.seconds_since_reset, 10) : null;
|
|
25187
|
+
const statsResetTime = statsResetEpoch ? new Date(statsResetEpoch * 1000).toISOString() : null;
|
|
25188
|
+
const daysSinceReset = secondsSinceReset !== null ? Math.floor(secondsSinceReset / SECONDS_PER_DAY) : null;
|
|
25189
|
+
let postmasterStartupEpoch = null;
|
|
25190
|
+
let postmasterStartupTime = null;
|
|
25191
|
+
let postmasterStartupError;
|
|
25192
|
+
try {
|
|
25193
|
+
const pmResult = await client.query(`
|
|
25194
|
+
select
|
|
25195
|
+
extract(epoch from pg_postmaster_start_time()) as postmaster_startup_epoch,
|
|
25196
|
+
pg_postmaster_start_time()::text as postmaster_startup_time
|
|
25197
|
+
`);
|
|
25198
|
+
if (pmResult.rows.length > 0) {
|
|
25199
|
+
postmasterStartupEpoch = pmResult.rows[0].postmaster_startup_epoch ? parseFloat(pmResult.rows[0].postmaster_startup_epoch) : null;
|
|
25200
|
+
postmasterStartupTime = pmResult.rows[0].postmaster_startup_time || null;
|
|
25201
|
+
}
|
|
25202
|
+
} catch (err) {
|
|
25203
|
+
const errorMsg = err instanceof Error ? err.message : String(err);
|
|
25204
|
+
postmasterStartupError = `Failed to query postmaster start time: ${errorMsg}`;
|
|
25205
|
+
console.log(`[getStatsReset] Warning: ${postmasterStartupError}`);
|
|
25206
|
+
}
|
|
25207
|
+
const statsResult = {
|
|
25208
|
+
stats_reset_epoch: statsResetEpoch,
|
|
25209
|
+
stats_reset_time: statsResetTime,
|
|
25210
|
+
days_since_reset: daysSinceReset,
|
|
25211
|
+
postmaster_startup_epoch: postmasterStartupEpoch,
|
|
25212
|
+
postmaster_startup_time: postmasterStartupTime
|
|
25213
|
+
};
|
|
25214
|
+
if (postmasterStartupError) {
|
|
25215
|
+
statsResult.postmaster_startup_error = postmasterStartupError;
|
|
25216
|
+
}
|
|
25217
|
+
return statsResult;
|
|
25218
|
+
}
|
|
25219
|
+
async function getCurrentDatabaseInfo(client, pgMajorVersion = 16) {
|
|
25220
|
+
const sql = getMetricSql(METRIC_NAMES.dbSize, pgMajorVersion);
|
|
25221
|
+
const result = await client.query(sql);
|
|
25222
|
+
const row = result.rows[0] || {};
|
|
25223
|
+
return {
|
|
25224
|
+
datname: row.tag_datname || "postgres",
|
|
25225
|
+
size_bytes: parseInt(row.size_b || "0", 10)
|
|
25226
|
+
};
|
|
25227
|
+
}
|
|
25228
|
+
function isValidRedundantToItem(item) {
|
|
25229
|
+
return typeof item === "object" && item !== null && !Array.isArray(item);
|
|
25230
|
+
}
|
|
25231
|
+
async function getRedundantIndexes(client, pgMajorVersion = 16) {
|
|
25232
|
+
const sql = getMetricSql(METRIC_NAMES.H004, pgMajorVersion);
|
|
25233
|
+
const result = await client.query(sql);
|
|
25234
|
+
return result.rows.map((row) => {
|
|
25235
|
+
const transformed = transformMetricRow(row);
|
|
25236
|
+
const indexSizeBytes = parseInt(String(transformed.index_size_bytes || 0), 10);
|
|
25237
|
+
const tableSizeBytes = parseInt(String(transformed.table_size_bytes || 0), 10);
|
|
25238
|
+
let redundantTo = [];
|
|
25239
|
+
let parseError;
|
|
25240
|
+
try {
|
|
25241
|
+
const jsonStr = String(transformed.redundant_to_json || "[]");
|
|
25242
|
+
const parsed = JSON.parse(jsonStr);
|
|
25243
|
+
if (Array.isArray(parsed)) {
|
|
25244
|
+
redundantTo = parsed.filter(isValidRedundantToItem).map((item) => {
|
|
25245
|
+
const sizeBytes = parseInt(String(item.index_size_bytes ?? 0), 10);
|
|
25246
|
+
return {
|
|
25247
|
+
index_name: String(item.index_name ?? ""),
|
|
25248
|
+
index_definition: String(item.index_definition ?? ""),
|
|
25249
|
+
index_size_bytes: sizeBytes,
|
|
25250
|
+
index_size_pretty: formatBytes(sizeBytes)
|
|
25251
|
+
};
|
|
25252
|
+
});
|
|
25253
|
+
}
|
|
25254
|
+
} catch (err) {
|
|
25255
|
+
const errorMsg = err instanceof Error ? err.message : String(err);
|
|
25256
|
+
const indexName = String(transformed.index_name || "unknown");
|
|
25257
|
+
parseError = `Failed to parse redundant_to_json: ${errorMsg}`;
|
|
25258
|
+
console.log(`[H004] Warning: ${parseError} for index "${indexName}"`);
|
|
25259
|
+
}
|
|
25260
|
+
const result2 = {
|
|
25261
|
+
schema_name: String(transformed.schema_name || ""),
|
|
25262
|
+
table_name: String(transformed.table_name || ""),
|
|
25263
|
+
index_name: String(transformed.index_name || ""),
|
|
25264
|
+
relation_name: String(transformed.relation_name || ""),
|
|
25265
|
+
access_method: String(transformed.access_method || ""),
|
|
25266
|
+
reason: String(transformed.reason || ""),
|
|
25267
|
+
index_size_bytes: indexSizeBytes,
|
|
25268
|
+
table_size_bytes: tableSizeBytes,
|
|
25269
|
+
index_usage: parseInt(String(transformed.index_usage || 0), 10),
|
|
25270
|
+
supports_fk: toBool(transformed.supports_fk),
|
|
25271
|
+
index_definition: String(transformed.index_definition || ""),
|
|
25272
|
+
index_size_pretty: formatBytes(indexSizeBytes),
|
|
25273
|
+
table_size_pretty: formatBytes(tableSizeBytes),
|
|
25274
|
+
redundant_to: redundantTo
|
|
25275
|
+
};
|
|
25276
|
+
if (parseError) {
|
|
25277
|
+
result2.redundant_to_parse_error = parseError;
|
|
25278
|
+
}
|
|
25279
|
+
return result2;
|
|
25280
|
+
});
|
|
25281
|
+
}
|
|
25282
|
+
function createBaseReport(checkId, checkTitle, nodeName) {
|
|
25283
|
+
const buildTs = resolveBuildTs();
|
|
25284
|
+
return {
|
|
25285
|
+
version: version || null,
|
|
25286
|
+
build_ts: buildTs,
|
|
25287
|
+
generation_mode: "express",
|
|
25288
|
+
checkId,
|
|
25289
|
+
checkTitle,
|
|
25290
|
+
timestamptz: new Date().toISOString(),
|
|
25291
|
+
nodes: {
|
|
25292
|
+
primary: nodeName,
|
|
25293
|
+
standbys: []
|
|
25294
|
+
},
|
|
25295
|
+
results: {}
|
|
25296
|
+
};
|
|
25297
|
+
}
|
|
25298
|
+
function readTextFileSafe(p) {
|
|
25299
|
+
try {
|
|
25300
|
+
const value = fs4.readFileSync(p, "utf8").trim();
|
|
25301
|
+
return value || null;
|
|
25302
|
+
} catch {
|
|
25303
|
+
return null;
|
|
25304
|
+
}
|
|
25305
|
+
}
|
|
25306
|
+
function resolveBuildTs() {
|
|
25307
|
+
const envPath = process.env.PGAI_BUILD_TS_FILE;
|
|
25308
|
+
const p = envPath && envPath.trim() ? envPath.trim() : "/BUILD_TS";
|
|
25309
|
+
const fromFile = readTextFileSafe(p);
|
|
25310
|
+
if (fromFile)
|
|
25311
|
+
return fromFile;
|
|
25312
|
+
try {
|
|
25313
|
+
const pkgRoot = path4.resolve(__dirname, "..");
|
|
25314
|
+
const fromPkgFile = readTextFileSafe(path4.join(pkgRoot, "BUILD_TS"));
|
|
25315
|
+
if (fromPkgFile)
|
|
25316
|
+
return fromPkgFile;
|
|
25317
|
+
} catch (err) {
|
|
25318
|
+
const errorMsg = err instanceof Error ? err.message : String(err);
|
|
25319
|
+
console.warn(`[resolveBuildTs] Warning: path resolution failed: ${errorMsg}`);
|
|
25320
|
+
}
|
|
25321
|
+
try {
|
|
25322
|
+
const pkgJsonPath = path4.resolve(__dirname, "..", "package.json");
|
|
25323
|
+
const st = fs4.statSync(pkgJsonPath);
|
|
25324
|
+
return st.mtime.toISOString();
|
|
25325
|
+
} catch (err) {
|
|
25326
|
+
if (process.env.DEBUG) {
|
|
25327
|
+
const errorMsg = err instanceof Error ? err.message : String(err);
|
|
25328
|
+
console.log(`[resolveBuildTs] Could not stat package.json, using current time: ${errorMsg}`);
|
|
25329
|
+
}
|
|
25330
|
+
return new Date().toISOString();
|
|
25331
|
+
}
|
|
25332
|
+
}
|
|
25333
|
+
async function generateVersionReport(client, nodeName, checkId, checkTitle) {
|
|
25334
|
+
const report = createBaseReport(checkId, checkTitle, nodeName);
|
|
25335
|
+
const postgresVersion = await getPostgresVersion(client);
|
|
25336
|
+
report.results[nodeName] = { data: { version: postgresVersion } };
|
|
25337
|
+
return report;
|
|
25338
|
+
}
|
|
25339
|
+
async function generateSettingsReport(client, nodeName, checkId, checkTitle, fetchSettings) {
|
|
25340
|
+
const report = createBaseReport(checkId, checkTitle, nodeName);
|
|
25341
|
+
const postgresVersion = await getPostgresVersion(client);
|
|
25342
|
+
const pgMajorVersion = parseInt(postgresVersion.server_major_ver, 10) || 16;
|
|
25343
|
+
const settings = await fetchSettings(client, pgMajorVersion);
|
|
25344
|
+
report.results[nodeName] = { data: settings, postgres_version: postgresVersion };
|
|
25345
|
+
return report;
|
|
25346
|
+
}
|
|
25347
|
+
async function generateIndexReport(client, nodeName, checkId, checkTitle, indexFieldName, fetchIndexes, extraFields) {
|
|
25348
|
+
const report = createBaseReport(checkId, checkTitle, nodeName);
|
|
25349
|
+
const postgresVersion = await getPostgresVersion(client);
|
|
25350
|
+
const pgMajorVersion = parseInt(postgresVersion.server_major_ver, 10) || 16;
|
|
25351
|
+
const indexes = await fetchIndexes(client, pgMajorVersion);
|
|
25352
|
+
const { datname: dbName, size_bytes: dbSizeBytes } = await getCurrentDatabaseInfo(client, pgMajorVersion);
|
|
25353
|
+
const totalCount = indexes.length;
|
|
25354
|
+
const totalSizeBytes = indexes.reduce((sum, idx) => sum + idx.index_size_bytes, 0);
|
|
25355
|
+
const dbEntry = {
|
|
25356
|
+
[indexFieldName]: indexes,
|
|
25357
|
+
total_count: totalCount,
|
|
25358
|
+
total_size_bytes: totalSizeBytes,
|
|
25359
|
+
total_size_pretty: formatBytes(totalSizeBytes),
|
|
25360
|
+
database_size_bytes: dbSizeBytes,
|
|
25361
|
+
database_size_pretty: formatBytes(dbSizeBytes)
|
|
25362
|
+
};
|
|
25363
|
+
if (extraFields) {
|
|
25364
|
+
Object.assign(dbEntry, await extraFields(client, pgMajorVersion));
|
|
25365
|
+
}
|
|
25366
|
+
report.results[nodeName] = { data: { [dbName]: dbEntry }, postgres_version: postgresVersion };
|
|
25367
|
+
return report;
|
|
25368
|
+
}
|
|
25369
|
+
var generateA002 = (client, nodeName = "node-01") => generateVersionReport(client, nodeName, "A002", "Postgres major version");
|
|
25370
|
+
var generateA003 = (client, nodeName = "node-01") => generateSettingsReport(client, nodeName, "A003", "Postgres settings", getSettings);
|
|
25371
|
+
async function generateA004(client, nodeName = "node-01") {
|
|
25372
|
+
const report = createBaseReport("A004", "Cluster information", nodeName);
|
|
25373
|
+
const postgresVersion = await getPostgresVersion(client);
|
|
25374
|
+
const pgMajorVersion = parseInt(postgresVersion.server_major_ver, 10) || 16;
|
|
25375
|
+
report.results[nodeName] = {
|
|
25376
|
+
data: {
|
|
25377
|
+
general_info: await getClusterInfo(client, pgMajorVersion),
|
|
25378
|
+
database_sizes: await getDatabaseSizes(client)
|
|
25379
|
+
},
|
|
25380
|
+
postgres_version: postgresVersion
|
|
25381
|
+
};
|
|
25382
|
+
return report;
|
|
25383
|
+
}
|
|
25384
|
+
var generateA007 = (client, nodeName = "node-01") => generateSettingsReport(client, nodeName, "A007", "Altered settings", getAlteredSettings);
|
|
25385
|
+
var generateA013 = (client, nodeName = "node-01") => generateVersionReport(client, nodeName, "A013", "Postgres minor version");
|
|
25386
|
+
var generateH001 = (client, nodeName = "node-01") => generateIndexReport(client, nodeName, "H001", "Invalid indexes", "invalid_indexes", getInvalidIndexes);
|
|
25387
|
+
var generateH002 = (client, nodeName = "node-01") => generateIndexReport(client, nodeName, "H002", "Unused indexes", "unused_indexes", getUnusedIndexes, async (c, v) => ({ stats_reset: await getStatsReset(c, v) }));
|
|
25388
|
+
var generateH004 = (client, nodeName = "node-01") => generateIndexReport(client, nodeName, "H004", "Redundant indexes", "redundant_indexes", getRedundantIndexes);
|
|
25389
|
+
async function generateD004(client, nodeName) {
|
|
25390
|
+
const report = createBaseReport("D004", "pg_stat_statements and pg_stat_kcache settings", nodeName);
|
|
25391
|
+
const postgresVersion = await getPostgresVersion(client);
|
|
25392
|
+
const pgMajorVersion = parseInt(postgresVersion.server_major_ver, 10) || 16;
|
|
25393
|
+
const allSettings = await getSettings(client, pgMajorVersion);
|
|
25394
|
+
const pgssSettings = {};
|
|
25395
|
+
for (const [name, setting] of Object.entries(allSettings)) {
|
|
25396
|
+
if (name.startsWith("pg_stat_statements") || name.startsWith("pg_stat_kcache")) {
|
|
25397
|
+
pgssSettings[name] = setting;
|
|
25398
|
+
}
|
|
25399
|
+
}
|
|
25400
|
+
let pgssAvailable = false;
|
|
25401
|
+
let pgssMetricsCount = 0;
|
|
25402
|
+
let pgssTotalCalls = 0;
|
|
25403
|
+
let pgssError = null;
|
|
25404
|
+
const pgssSampleQueries = [];
|
|
25405
|
+
try {
|
|
25406
|
+
const extCheck = await client.query("select 1 from pg_extension where extname = 'pg_stat_statements'");
|
|
25407
|
+
if (extCheck.rows.length > 0) {
|
|
25408
|
+
pgssAvailable = true;
|
|
25409
|
+
const statsResult = await client.query(`
|
|
25410
|
+
select count(*) as cnt, coalesce(sum(calls), 0) as total_calls
|
|
25411
|
+
from pg_stat_statements
|
|
25412
|
+
`);
|
|
25413
|
+
pgssMetricsCount = parseInt(statsResult.rows[0]?.cnt || "0", 10);
|
|
25414
|
+
pgssTotalCalls = parseInt(statsResult.rows[0]?.total_calls || "0", 10);
|
|
25415
|
+
const sampleResult = await client.query(`
|
|
25416
|
+
select
|
|
25417
|
+
queryid::text as queryid,
|
|
25418
|
+
coalesce(usename, 'unknown') as "user",
|
|
25419
|
+
coalesce(datname, 'unknown') as database,
|
|
25420
|
+
calls
|
|
25421
|
+
from pg_stat_statements s
|
|
25422
|
+
left join pg_database d on s.dbid = d.oid
|
|
25423
|
+
left join pg_user u on s.userid = u.usesysid
|
|
25424
|
+
order by calls desc
|
|
25425
|
+
limit 5
|
|
25426
|
+
`);
|
|
25427
|
+
for (const row of sampleResult.rows) {
|
|
25428
|
+
pgssSampleQueries.push({
|
|
25429
|
+
queryid: row.queryid,
|
|
25430
|
+
user: row.user,
|
|
25431
|
+
database: row.database,
|
|
25432
|
+
calls: parseInt(row.calls, 10)
|
|
25433
|
+
});
|
|
25434
|
+
}
|
|
25435
|
+
}
|
|
25436
|
+
} catch (err) {
|
|
25437
|
+
const errorMsg = err instanceof Error ? err.message : String(err);
|
|
25438
|
+
console.log(`[D004] Error querying pg_stat_statements: ${errorMsg}`);
|
|
25439
|
+
pgssError = errorMsg;
|
|
25440
|
+
}
|
|
25441
|
+
let kcacheAvailable = false;
|
|
25442
|
+
let kcacheMetricsCount = 0;
|
|
25443
|
+
let kcacheTotalExecTime = 0;
|
|
25444
|
+
let kcacheTotalUserTime = 0;
|
|
25445
|
+
let kcacheTotalSystemTime = 0;
|
|
25446
|
+
let kcacheError = null;
|
|
25447
|
+
const kcacheSampleQueries = [];
|
|
25448
|
+
try {
|
|
25449
|
+
const extCheck = await client.query("select 1 from pg_extension where extname = 'pg_stat_kcache'");
|
|
25450
|
+
if (extCheck.rows.length > 0) {
|
|
25451
|
+
kcacheAvailable = true;
|
|
25452
|
+
const statsResult = await client.query(`
|
|
25453
|
+
select
|
|
25454
|
+
count(*) as cnt,
|
|
25455
|
+
coalesce(sum(exec_user_time + exec_system_time), 0) as total_exec_time,
|
|
25456
|
+
coalesce(sum(exec_user_time), 0) as total_user_time,
|
|
25457
|
+
coalesce(sum(exec_system_time), 0) as total_system_time
|
|
25458
|
+
from pg_stat_kcache
|
|
25459
|
+
`);
|
|
25460
|
+
kcacheMetricsCount = parseInt(statsResult.rows[0]?.cnt || "0", 10);
|
|
25461
|
+
kcacheTotalExecTime = parseFloat(statsResult.rows[0]?.total_exec_time || "0");
|
|
25462
|
+
kcacheTotalUserTime = parseFloat(statsResult.rows[0]?.total_user_time || "0");
|
|
25463
|
+
kcacheTotalSystemTime = parseFloat(statsResult.rows[0]?.total_system_time || "0");
|
|
25464
|
+
const sampleResult = await client.query(`
|
|
25465
|
+
select
|
|
25466
|
+
queryid::text as queryid,
|
|
25467
|
+
coalesce(usename, 'unknown') as "user",
|
|
25468
|
+
(exec_user_time + exec_system_time) as exec_total_time
|
|
25469
|
+
from pg_stat_kcache k
|
|
25470
|
+
left join pg_user u on k.userid = u.usesysid
|
|
25471
|
+
order by (exec_user_time + exec_system_time) desc
|
|
25472
|
+
limit 5
|
|
25473
|
+
`);
|
|
25474
|
+
for (const row of sampleResult.rows) {
|
|
25475
|
+
kcacheSampleQueries.push({
|
|
25476
|
+
queryid: row.queryid,
|
|
25477
|
+
user: row.user,
|
|
25478
|
+
exec_total_time: parseFloat(row.exec_total_time)
|
|
25479
|
+
});
|
|
25480
|
+
}
|
|
25481
|
+
}
|
|
25482
|
+
} catch (err) {
|
|
25483
|
+
const errorMsg = err instanceof Error ? err.message : String(err);
|
|
25484
|
+
console.log(`[D004] Error querying pg_stat_kcache: ${errorMsg}`);
|
|
25485
|
+
kcacheError = errorMsg;
|
|
25486
|
+
}
|
|
25487
|
+
report.results[nodeName] = {
|
|
25488
|
+
data: {
|
|
25489
|
+
settings: pgssSettings,
|
|
25490
|
+
pg_stat_statements_status: {
|
|
25491
|
+
extension_available: pgssAvailable,
|
|
25492
|
+
metrics_count: pgssMetricsCount,
|
|
25493
|
+
total_calls: pgssTotalCalls,
|
|
25494
|
+
sample_queries: pgssSampleQueries,
|
|
25495
|
+
...pgssError && { error: pgssError }
|
|
25496
|
+
},
|
|
25497
|
+
pg_stat_kcache_status: {
|
|
25498
|
+
extension_available: kcacheAvailable,
|
|
25499
|
+
metrics_count: kcacheMetricsCount,
|
|
25500
|
+
total_exec_time: kcacheTotalExecTime,
|
|
25501
|
+
total_user_time: kcacheTotalUserTime,
|
|
25502
|
+
total_system_time: kcacheTotalSystemTime,
|
|
25503
|
+
sample_queries: kcacheSampleQueries,
|
|
25504
|
+
...kcacheError && { error: kcacheError }
|
|
25505
|
+
}
|
|
25506
|
+
},
|
|
25507
|
+
postgres_version: postgresVersion
|
|
25508
|
+
};
|
|
25509
|
+
return report;
|
|
25510
|
+
}
|
|
25511
|
+
async function generateF001(client, nodeName) {
|
|
25512
|
+
const report = createBaseReport("F001", "Autovacuum: current settings", nodeName);
|
|
25513
|
+
const postgresVersion = await getPostgresVersion(client);
|
|
25514
|
+
const pgMajorVersion = parseInt(postgresVersion.server_major_ver, 10) || 16;
|
|
25515
|
+
const allSettings = await getSettings(client, pgMajorVersion);
|
|
25516
|
+
const autovacuumSettings = {};
|
|
25517
|
+
for (const [name, setting] of Object.entries(allSettings)) {
|
|
25518
|
+
if (name.includes("autovacuum") || name.includes("vacuum")) {
|
|
25519
|
+
autovacuumSettings[name] = setting;
|
|
25520
|
+
}
|
|
25521
|
+
}
|
|
25522
|
+
report.results[nodeName] = {
|
|
25523
|
+
data: autovacuumSettings,
|
|
25524
|
+
postgres_version: postgresVersion
|
|
25525
|
+
};
|
|
25526
|
+
return report;
|
|
25527
|
+
}
|
|
25528
|
+
async function generateG001(client, nodeName) {
|
|
25529
|
+
const report = createBaseReport("G001", "Memory-related settings", nodeName);
|
|
25530
|
+
const postgresVersion = await getPostgresVersion(client);
|
|
25531
|
+
const pgMajorVersion = parseInt(postgresVersion.server_major_ver, 10) || 16;
|
|
25532
|
+
const allSettings = await getSettings(client, pgMajorVersion);
|
|
25533
|
+
const memorySettingNames = [
|
|
25534
|
+
"shared_buffers",
|
|
25535
|
+
"work_mem",
|
|
25536
|
+
"maintenance_work_mem",
|
|
25537
|
+
"effective_cache_size",
|
|
25538
|
+
"wal_buffers",
|
|
25539
|
+
"temp_buffers",
|
|
25540
|
+
"max_connections",
|
|
25541
|
+
"autovacuum_work_mem",
|
|
25542
|
+
"hash_mem_multiplier",
|
|
25543
|
+
"logical_decoding_work_mem",
|
|
25544
|
+
"max_stack_depth",
|
|
25545
|
+
"max_prepared_transactions",
|
|
25546
|
+
"max_locks_per_transaction",
|
|
25547
|
+
"max_pred_locks_per_transaction"
|
|
25548
|
+
];
|
|
25549
|
+
const memorySettings = {};
|
|
25550
|
+
for (const name of memorySettingNames) {
|
|
25551
|
+
if (allSettings[name]) {
|
|
25552
|
+
memorySettings[name] = allSettings[name];
|
|
25553
|
+
}
|
|
25554
|
+
}
|
|
25555
|
+
let memoryUsage = {};
|
|
25556
|
+
let memoryError = null;
|
|
25557
|
+
try {
|
|
25558
|
+
const memQuery = await client.query(`
|
|
25559
|
+
select
|
|
25560
|
+
pg_size_bytes(current_setting('shared_buffers')) as shared_buffers_bytes,
|
|
25561
|
+
pg_size_bytes(current_setting('wal_buffers')) as wal_buffers_bytes,
|
|
25562
|
+
pg_size_bytes(current_setting('work_mem')) as work_mem_bytes,
|
|
25563
|
+
pg_size_bytes(current_setting('maintenance_work_mem')) as maintenance_work_mem_bytes,
|
|
25564
|
+
pg_size_bytes(current_setting('effective_cache_size')) as effective_cache_size_bytes,
|
|
25565
|
+
current_setting('max_connections')::int as max_connections
|
|
25566
|
+
`);
|
|
25567
|
+
if (memQuery.rows.length > 0) {
|
|
25568
|
+
const row = memQuery.rows[0];
|
|
25569
|
+
const sharedBuffersBytes = parseInt(row.shared_buffers_bytes, 10);
|
|
25570
|
+
const walBuffersBytes = parseInt(row.wal_buffers_bytes, 10);
|
|
25571
|
+
const workMemBytes = parseInt(row.work_mem_bytes, 10);
|
|
25572
|
+
const maintenanceWorkMemBytes = parseInt(row.maintenance_work_mem_bytes, 10);
|
|
25573
|
+
const effectiveCacheSizeBytes = parseInt(row.effective_cache_size_bytes, 10);
|
|
25574
|
+
const maxConnections = row.max_connections;
|
|
25575
|
+
const sharedMemoryTotal = sharedBuffersBytes + walBuffersBytes;
|
|
25576
|
+
const maxWorkMemUsage = workMemBytes * maxConnections;
|
|
25577
|
+
memoryUsage = {
|
|
25578
|
+
shared_buffers_bytes: sharedBuffersBytes,
|
|
25579
|
+
shared_buffers_pretty: formatBytes(sharedBuffersBytes),
|
|
25580
|
+
wal_buffers_bytes: walBuffersBytes,
|
|
25581
|
+
wal_buffers_pretty: formatBytes(walBuffersBytes),
|
|
25582
|
+
shared_memory_total_bytes: sharedMemoryTotal,
|
|
25583
|
+
shared_memory_total_pretty: formatBytes(sharedMemoryTotal),
|
|
25584
|
+
work_mem_per_connection_bytes: workMemBytes,
|
|
25585
|
+
work_mem_per_connection_pretty: formatBytes(workMemBytes),
|
|
25586
|
+
max_work_mem_usage_bytes: maxWorkMemUsage,
|
|
25587
|
+
max_work_mem_usage_pretty: formatBytes(maxWorkMemUsage),
|
|
25588
|
+
maintenance_work_mem_bytes: maintenanceWorkMemBytes,
|
|
25589
|
+
maintenance_work_mem_pretty: formatBytes(maintenanceWorkMemBytes),
|
|
25590
|
+
effective_cache_size_bytes: effectiveCacheSizeBytes,
|
|
25591
|
+
effective_cache_size_pretty: formatBytes(effectiveCacheSizeBytes)
|
|
25592
|
+
};
|
|
25593
|
+
}
|
|
25594
|
+
} catch (err) {
|
|
25595
|
+
const errorMsg = err instanceof Error ? err.message : String(err);
|
|
25596
|
+
console.log(`[G001] Error calculating memory usage: ${errorMsg}`);
|
|
25597
|
+
memoryError = errorMsg;
|
|
25598
|
+
}
|
|
25599
|
+
report.results[nodeName] = {
|
|
25600
|
+
data: {
|
|
25601
|
+
settings: memorySettings,
|
|
25602
|
+
analysis: {
|
|
25603
|
+
estimated_total_memory_usage: memoryUsage,
|
|
25604
|
+
...memoryError && { error: memoryError }
|
|
25605
|
+
}
|
|
25606
|
+
},
|
|
25607
|
+
postgres_version: postgresVersion
|
|
25608
|
+
};
|
|
25609
|
+
return report;
|
|
25610
|
+
}
|
|
25611
|
+
var REPORT_GENERATORS = {
|
|
25612
|
+
A002: generateA002,
|
|
25613
|
+
A003: generateA003,
|
|
25614
|
+
A004: generateA004,
|
|
25615
|
+
A007: generateA007,
|
|
25616
|
+
A013: generateA013,
|
|
25617
|
+
D004: generateD004,
|
|
25618
|
+
F001: generateF001,
|
|
25619
|
+
G001: generateG001,
|
|
25620
|
+
H001: generateH001,
|
|
25621
|
+
H002: generateH002,
|
|
25622
|
+
H004: generateH004
|
|
25623
|
+
};
|
|
25624
|
+
var CHECK_INFO = {
|
|
25625
|
+
A002: "Postgres major version",
|
|
25626
|
+
A003: "Postgres settings",
|
|
25627
|
+
A004: "Cluster information",
|
|
25628
|
+
A007: "Altered settings",
|
|
25629
|
+
A013: "Postgres minor version",
|
|
25630
|
+
D004: "pg_stat_statements and pg_stat_kcache settings",
|
|
25631
|
+
F001: "Autovacuum: current settings",
|
|
25632
|
+
G001: "Memory-related settings",
|
|
25633
|
+
H001: "Invalid indexes",
|
|
25634
|
+
H002: "Unused indexes",
|
|
25635
|
+
H004: "Redundant indexes"
|
|
25636
|
+
};
|
|
25637
|
+
async function generateAllReports(client, nodeName = "node-01", onProgress) {
|
|
25638
|
+
const reports = {};
|
|
25639
|
+
const entries = Object.entries(REPORT_GENERATORS);
|
|
25640
|
+
const total = entries.length;
|
|
25641
|
+
let index = 0;
|
|
25642
|
+
for (const [checkId, generator] of entries) {
|
|
25643
|
+
index += 1;
|
|
25644
|
+
onProgress?.({
|
|
25645
|
+
checkId,
|
|
25646
|
+
checkTitle: CHECK_INFO[checkId] || checkId,
|
|
25647
|
+
index,
|
|
25648
|
+
total
|
|
25649
|
+
});
|
|
25650
|
+
reports[checkId] = await generator(client, nodeName);
|
|
25651
|
+
}
|
|
25652
|
+
return reports;
|
|
25653
|
+
}
|
|
25654
|
+
|
|
25655
|
+
// lib/checkup-api.ts
|
|
25656
|
+
import * as https from "https";
|
|
25657
|
+
import { URL as URL3 } from "url";
|
|
25658
|
+
var DEFAULT_RETRY_CONFIG = {
|
|
25659
|
+
maxAttempts: 3,
|
|
25660
|
+
initialDelayMs: 1000,
|
|
25661
|
+
maxDelayMs: 1e4,
|
|
25662
|
+
backoffMultiplier: 2
|
|
25663
|
+
};
|
|
25664
|
+
function isRetryableError(err) {
|
|
25665
|
+
if (err instanceof RpcError) {
|
|
25666
|
+
return err.statusCode >= 500 && err.statusCode < 600;
|
|
25667
|
+
}
|
|
25668
|
+
if (typeof err === "object" && err !== null && "code" in err) {
|
|
25669
|
+
const code = String(err.code);
|
|
25670
|
+
if (["ECONNRESET", "ECONNREFUSED", "ENOTFOUND", "ETIMEDOUT"].includes(code)) {
|
|
25671
|
+
return true;
|
|
25672
|
+
}
|
|
25673
|
+
}
|
|
25674
|
+
if (err instanceof Error) {
|
|
25675
|
+
const msg = err.message.toLowerCase();
|
|
25676
|
+
return msg.includes("timeout") || msg.includes("timed out") || msg.includes("econnreset") || msg.includes("econnrefused") || msg.includes("enotfound") || msg.includes("socket hang up") || msg.includes("network");
|
|
25677
|
+
}
|
|
25678
|
+
return false;
|
|
25679
|
+
}
|
|
25680
|
+
async function withRetry(fn, config2 = {}, onRetry) {
|
|
25681
|
+
const { maxAttempts, initialDelayMs, maxDelayMs, backoffMultiplier } = {
|
|
25682
|
+
...DEFAULT_RETRY_CONFIG,
|
|
25683
|
+
...config2
|
|
25684
|
+
};
|
|
25685
|
+
let lastError;
|
|
25686
|
+
let delayMs = initialDelayMs;
|
|
25687
|
+
for (let attempt = 1;attempt <= maxAttempts; attempt++) {
|
|
25688
|
+
try {
|
|
25689
|
+
return await fn();
|
|
25690
|
+
} catch (err) {
|
|
25691
|
+
lastError = err;
|
|
25692
|
+
if (attempt === maxAttempts || !isRetryableError(err)) {
|
|
25693
|
+
throw err;
|
|
25694
|
+
}
|
|
25695
|
+
if (onRetry) {
|
|
25696
|
+
onRetry(attempt, err, delayMs);
|
|
25697
|
+
}
|
|
25698
|
+
await new Promise((resolve5) => setTimeout(resolve5, delayMs));
|
|
25699
|
+
delayMs = Math.min(delayMs * backoffMultiplier, maxDelayMs);
|
|
25700
|
+
}
|
|
25701
|
+
}
|
|
25702
|
+
throw lastError;
|
|
25703
|
+
}
|
|
25704
|
+
|
|
25705
|
+
class RpcError extends Error {
|
|
25706
|
+
rpcName;
|
|
25707
|
+
statusCode;
|
|
25708
|
+
payloadText;
|
|
25709
|
+
payloadJson;
|
|
25710
|
+
constructor(params) {
|
|
25711
|
+
const { rpcName, statusCode, payloadText, payloadJson } = params;
|
|
25712
|
+
super(`RPC ${rpcName} failed: HTTP ${statusCode}`);
|
|
25713
|
+
this.name = "RpcError";
|
|
25714
|
+
this.rpcName = rpcName;
|
|
25715
|
+
this.statusCode = statusCode;
|
|
25716
|
+
this.payloadText = payloadText;
|
|
25717
|
+
this.payloadJson = payloadJson;
|
|
25718
|
+
}
|
|
25719
|
+
}
|
|
25720
|
+
function formatRpcErrorForDisplay(err) {
|
|
25721
|
+
const lines = [];
|
|
25722
|
+
lines.push(`Error: RPC ${err.rpcName} failed: HTTP ${err.statusCode}`);
|
|
25723
|
+
const obj = err.payloadJson && typeof err.payloadJson === "object" ? err.payloadJson : null;
|
|
25724
|
+
const details = obj && typeof obj.details === "string" ? obj.details : "";
|
|
25725
|
+
const hint = obj && typeof obj.hint === "string" ? obj.hint : "";
|
|
25726
|
+
const message = obj && typeof obj.message === "string" ? obj.message : "";
|
|
25727
|
+
if (message)
|
|
25728
|
+
lines.push(`Message: ${message}`);
|
|
25729
|
+
if (details)
|
|
25730
|
+
lines.push(`Details: ${details}`);
|
|
25731
|
+
if (hint)
|
|
25732
|
+
lines.push(`Hint: ${hint}`);
|
|
25733
|
+
if (!message && !details && !hint) {
|
|
25734
|
+
const t = (err.payloadText || "").trim();
|
|
25735
|
+
if (t)
|
|
25736
|
+
lines.push(t);
|
|
25737
|
+
}
|
|
25738
|
+
return lines;
|
|
25739
|
+
}
|
|
25740
|
+
function unwrapRpcResponse(parsed) {
|
|
25741
|
+
if (Array.isArray(parsed)) {
|
|
25742
|
+
if (parsed.length === 1)
|
|
25743
|
+
return unwrapRpcResponse(parsed[0]);
|
|
25744
|
+
return parsed;
|
|
25745
|
+
}
|
|
25746
|
+
if (parsed && typeof parsed === "object") {
|
|
25747
|
+
const obj = parsed;
|
|
25748
|
+
if (obj.result !== undefined)
|
|
25749
|
+
return obj.result;
|
|
25750
|
+
}
|
|
25751
|
+
return parsed;
|
|
25752
|
+
}
|
|
25753
|
+
var HTTP_TIMEOUT_MS = 30000;
|
|
25754
|
+
async function postRpc(params) {
|
|
25755
|
+
const { apiKey, apiBaseUrl, rpcName, bodyObj, timeoutMs = HTTP_TIMEOUT_MS } = params;
|
|
25756
|
+
if (!apiKey)
|
|
25757
|
+
throw new Error("API key is required");
|
|
25758
|
+
const base = normalizeBaseUrl(apiBaseUrl);
|
|
25759
|
+
const url = new URL3(`${base}/rpc/${rpcName}`);
|
|
25760
|
+
const body = JSON.stringify(bodyObj);
|
|
25761
|
+
const headers = {
|
|
25762
|
+
"access-token": apiKey,
|
|
25763
|
+
Prefer: "return=representation",
|
|
25764
|
+
"Content-Type": "application/json",
|
|
25765
|
+
"Content-Length": Buffer.byteLength(body).toString()
|
|
25766
|
+
};
|
|
25767
|
+
const controller = new AbortController;
|
|
25768
|
+
let timeoutId = null;
|
|
25769
|
+
let settled = false;
|
|
25770
|
+
return new Promise((resolve5, reject) => {
|
|
25771
|
+
const settledReject = (err) => {
|
|
25772
|
+
if (settled)
|
|
25773
|
+
return;
|
|
25774
|
+
settled = true;
|
|
25775
|
+
if (timeoutId)
|
|
25776
|
+
clearTimeout(timeoutId);
|
|
25777
|
+
reject(err);
|
|
25778
|
+
};
|
|
25779
|
+
const settledResolve = (value) => {
|
|
25780
|
+
if (settled)
|
|
25781
|
+
return;
|
|
25782
|
+
settled = true;
|
|
25783
|
+
if (timeoutId)
|
|
25784
|
+
clearTimeout(timeoutId);
|
|
25785
|
+
resolve5(value);
|
|
25786
|
+
};
|
|
25787
|
+
const req = https.request(url, {
|
|
25788
|
+
method: "POST",
|
|
25789
|
+
headers,
|
|
25790
|
+
signal: controller.signal
|
|
25791
|
+
}, (res) => {
|
|
25792
|
+
if (timeoutId) {
|
|
25793
|
+
clearTimeout(timeoutId);
|
|
25794
|
+
timeoutId = null;
|
|
25795
|
+
}
|
|
25796
|
+
let data = "";
|
|
25797
|
+
res.on("data", (chunk) => data += chunk);
|
|
25798
|
+
res.on("end", () => {
|
|
25799
|
+
if (res.statusCode && res.statusCode >= 200 && res.statusCode < 300) {
|
|
25800
|
+
try {
|
|
25801
|
+
const parsed = JSON.parse(data);
|
|
25802
|
+
settledResolve(unwrapRpcResponse(parsed));
|
|
25803
|
+
} catch {
|
|
25804
|
+
settledReject(new Error(`Failed to parse RPC response: ${data}`));
|
|
25805
|
+
}
|
|
25806
|
+
} else {
|
|
25807
|
+
const statusCode = res.statusCode || 0;
|
|
25808
|
+
let payloadJson = null;
|
|
25809
|
+
if (data) {
|
|
25810
|
+
try {
|
|
25811
|
+
payloadJson = JSON.parse(data);
|
|
25812
|
+
} catch {
|
|
25813
|
+
payloadJson = null;
|
|
25814
|
+
}
|
|
25815
|
+
}
|
|
25816
|
+
settledReject(new RpcError({ rpcName, statusCode, payloadText: data, payloadJson }));
|
|
25817
|
+
}
|
|
25818
|
+
});
|
|
25819
|
+
res.on("error", (err) => {
|
|
25820
|
+
settledReject(err);
|
|
25821
|
+
});
|
|
25822
|
+
});
|
|
25823
|
+
timeoutId = setTimeout(() => {
|
|
25824
|
+
controller.abort();
|
|
25825
|
+
req.destroy();
|
|
25826
|
+
settledReject(new Error(`RPC ${rpcName} timed out after ${timeoutMs}ms (no response)`));
|
|
25827
|
+
}, timeoutMs);
|
|
25828
|
+
req.on("error", (err) => {
|
|
25829
|
+
if (err.name === "AbortError" || err.code === "ABORT_ERR") {
|
|
25830
|
+
settledReject(new Error(`RPC ${rpcName} timed out after ${timeoutMs}ms`));
|
|
25831
|
+
return;
|
|
25832
|
+
}
|
|
25833
|
+
if (err.code === "ECONNREFUSED") {
|
|
25834
|
+
settledReject(new Error(`RPC ${rpcName} failed: connection refused to ${url.host}`));
|
|
25835
|
+
} else if (err.code === "ENOTFOUND") {
|
|
25836
|
+
settledReject(new Error(`RPC ${rpcName} failed: DNS lookup failed for ${url.host}`));
|
|
25837
|
+
} else if (err.code === "ECONNRESET") {
|
|
25838
|
+
settledReject(new Error(`RPC ${rpcName} failed: connection reset by server`));
|
|
25839
|
+
} else {
|
|
25840
|
+
settledReject(err);
|
|
25841
|
+
}
|
|
25842
|
+
});
|
|
25843
|
+
req.write(body);
|
|
25844
|
+
req.end();
|
|
25845
|
+
});
|
|
25846
|
+
}
|
|
25847
|
+
async function createCheckupReport(params) {
|
|
25848
|
+
const { apiKey, apiBaseUrl, project, status } = params;
|
|
25849
|
+
const bodyObj = {
|
|
25850
|
+
access_token: apiKey,
|
|
25851
|
+
project
|
|
25852
|
+
};
|
|
25853
|
+
if (status)
|
|
25854
|
+
bodyObj.status = status;
|
|
25855
|
+
const resp = await postRpc({
|
|
25856
|
+
apiKey,
|
|
25857
|
+
apiBaseUrl,
|
|
25858
|
+
rpcName: "checkup_report_create",
|
|
25859
|
+
bodyObj
|
|
25860
|
+
});
|
|
25861
|
+
const reportId = Number(resp?.report_id);
|
|
25862
|
+
if (!Number.isFinite(reportId) || reportId <= 0) {
|
|
25863
|
+
throw new Error(`Unexpected checkup_report_create response: ${JSON.stringify(resp)}`);
|
|
25864
|
+
}
|
|
25865
|
+
return { reportId };
|
|
25866
|
+
}
|
|
25867
|
+
async function uploadCheckupReportJson(params) {
|
|
25868
|
+
const { apiKey, apiBaseUrl, reportId, filename, checkId, jsonText } = params;
|
|
25869
|
+
const bodyObj = {
|
|
25870
|
+
access_token: apiKey,
|
|
25871
|
+
checkup_report_id: reportId,
|
|
25872
|
+
filename,
|
|
25873
|
+
check_id: checkId,
|
|
25874
|
+
data: jsonText,
|
|
25875
|
+
type: "json",
|
|
25876
|
+
generate_issue: true
|
|
25877
|
+
};
|
|
25878
|
+
const resp = await postRpc({
|
|
25879
|
+
apiKey,
|
|
25880
|
+
apiBaseUrl,
|
|
25881
|
+
rpcName: "checkup_report_file_post",
|
|
25882
|
+
bodyObj
|
|
25883
|
+
});
|
|
25884
|
+
const chunkId = Number(resp?.report_chunck_id ?? resp?.report_chunk_id);
|
|
25885
|
+
if (!Number.isFinite(chunkId) || chunkId <= 0) {
|
|
25886
|
+
throw new Error(`Unexpected checkup_report_file_post response: ${JSON.stringify(resp)}`);
|
|
25887
|
+
}
|
|
25888
|
+
return { reportChunkId: chunkId };
|
|
25889
|
+
}
|
|
25890
|
+
|
|
25891
|
+
// bin/postgres-ai.ts
|
|
24216
25892
|
var rl = null;
|
|
24217
25893
|
function getReadline() {
|
|
24218
25894
|
if (!rl) {
|
|
@@ -24227,27 +25903,27 @@ function closeReadline() {
|
|
|
24227
25903
|
}
|
|
24228
25904
|
}
|
|
24229
25905
|
async function execPromise(command) {
|
|
24230
|
-
return new Promise((
|
|
25906
|
+
return new Promise((resolve6, reject) => {
|
|
24231
25907
|
childProcess.exec(command, (error2, stdout, stderr) => {
|
|
24232
25908
|
if (error2) {
|
|
24233
25909
|
const err = error2;
|
|
24234
25910
|
err.code = error2.code ?? 1;
|
|
24235
25911
|
reject(err);
|
|
24236
25912
|
} else {
|
|
24237
|
-
|
|
25913
|
+
resolve6({ stdout, stderr });
|
|
24238
25914
|
}
|
|
24239
25915
|
});
|
|
24240
25916
|
});
|
|
24241
25917
|
}
|
|
24242
25918
|
async function execFilePromise(file, args) {
|
|
24243
|
-
return new Promise((
|
|
25919
|
+
return new Promise((resolve6, reject) => {
|
|
24244
25920
|
childProcess.execFile(file, args, (error2, stdout, stderr) => {
|
|
24245
25921
|
if (error2) {
|
|
24246
25922
|
const err = error2;
|
|
24247
25923
|
err.code = error2.code ?? 1;
|
|
24248
25924
|
reject(err);
|
|
24249
25925
|
} else {
|
|
24250
|
-
|
|
25926
|
+
resolve6({ stdout, stderr });
|
|
24251
25927
|
}
|
|
24252
25928
|
});
|
|
24253
25929
|
});
|
|
@@ -24288,17 +25964,181 @@ function spawn2(cmd, args, options) {
|
|
|
24288
25964
|
};
|
|
24289
25965
|
}
|
|
24290
25966
|
async function question(prompt) {
|
|
24291
|
-
return new Promise((
|
|
25967
|
+
return new Promise((resolve6) => {
|
|
24292
25968
|
getReadline().question(prompt, (answer) => {
|
|
24293
|
-
|
|
25969
|
+
resolve6(answer);
|
|
24294
25970
|
});
|
|
24295
25971
|
});
|
|
24296
25972
|
}
|
|
25973
|
+
function expandHomePath(p) {
|
|
25974
|
+
const s = (p || "").trim();
|
|
25975
|
+
if (!s)
|
|
25976
|
+
return s;
|
|
25977
|
+
if (s === "~")
|
|
25978
|
+
return os3.homedir();
|
|
25979
|
+
if (s.startsWith("~/") || s.startsWith("~\\")) {
|
|
25980
|
+
return path5.join(os3.homedir(), s.slice(2));
|
|
25981
|
+
}
|
|
25982
|
+
return s;
|
|
25983
|
+
}
|
|
25984
|
+
function createTtySpinner(enabled, initialText) {
|
|
25985
|
+
if (!enabled) {
|
|
25986
|
+
return {
|
|
25987
|
+
update: () => {},
|
|
25988
|
+
stop: () => {}
|
|
25989
|
+
};
|
|
25990
|
+
}
|
|
25991
|
+
const frames = ["|", "/", "-", "\\"];
|
|
25992
|
+
const startTs = Date.now();
|
|
25993
|
+
let text = initialText;
|
|
25994
|
+
let frameIdx = 0;
|
|
25995
|
+
let stopped = false;
|
|
25996
|
+
const render = () => {
|
|
25997
|
+
if (stopped)
|
|
25998
|
+
return;
|
|
25999
|
+
const elapsedSec = ((Date.now() - startTs) / 1000).toFixed(1);
|
|
26000
|
+
const frame = frames[frameIdx % frames.length] ?? frames[0] ?? "\u283F";
|
|
26001
|
+
frameIdx += 1;
|
|
26002
|
+
process.stdout.write(`\r\x1B[2K${frame} ${text} (${elapsedSec}s)`);
|
|
26003
|
+
};
|
|
26004
|
+
const timer = setInterval(render, 120);
|
|
26005
|
+
render();
|
|
26006
|
+
return {
|
|
26007
|
+
update: (t) => {
|
|
26008
|
+
text = t;
|
|
26009
|
+
render();
|
|
26010
|
+
},
|
|
26011
|
+
stop: (finalText) => {
|
|
26012
|
+
if (stopped)
|
|
26013
|
+
return;
|
|
26014
|
+
stopped = true;
|
|
26015
|
+
clearInterval(timer);
|
|
26016
|
+
process.stdout.write("\r\x1B[2K");
|
|
26017
|
+
if (finalText && finalText.trim()) {
|
|
26018
|
+
process.stdout.write(finalText);
|
|
26019
|
+
}
|
|
26020
|
+
process.stdout.write(`
|
|
26021
|
+
`);
|
|
26022
|
+
}
|
|
26023
|
+
};
|
|
26024
|
+
}
|
|
26025
|
+
function prepareOutputDirectory(outputOpt) {
|
|
26026
|
+
if (!outputOpt)
|
|
26027
|
+
return;
|
|
26028
|
+
const outputDir = expandHomePath(outputOpt);
|
|
26029
|
+
const outputPath = path5.isAbsolute(outputDir) ? outputDir : path5.resolve(process.cwd(), outputDir);
|
|
26030
|
+
if (!fs5.existsSync(outputPath)) {
|
|
26031
|
+
try {
|
|
26032
|
+
fs5.mkdirSync(outputPath, { recursive: true });
|
|
26033
|
+
} catch (e) {
|
|
26034
|
+
const errAny = e;
|
|
26035
|
+
const code = typeof errAny?.code === "string" ? errAny.code : "";
|
|
26036
|
+
const msg = errAny instanceof Error ? errAny.message : String(errAny);
|
|
26037
|
+
if (code === "EACCES" || code === "EPERM" || code === "ENOENT") {
|
|
26038
|
+
console.error(`Error: Failed to create output directory: ${outputPath}`);
|
|
26039
|
+
console.error(`Reason: ${msg}`);
|
|
26040
|
+
console.error("Tip: choose a writable path, e.g. --output ./reports or --output ~/reports");
|
|
26041
|
+
return null;
|
|
26042
|
+
}
|
|
26043
|
+
throw e;
|
|
26044
|
+
}
|
|
26045
|
+
}
|
|
26046
|
+
return outputPath;
|
|
26047
|
+
}
|
|
26048
|
+
function prepareUploadConfig(opts, rootOpts, shouldUpload, uploadExplicitlyRequested) {
|
|
26049
|
+
if (!shouldUpload)
|
|
26050
|
+
return;
|
|
26051
|
+
const { apiKey } = getConfig(rootOpts);
|
|
26052
|
+
if (!apiKey) {
|
|
26053
|
+
if (uploadExplicitlyRequested) {
|
|
26054
|
+
console.error("Error: API key is required for upload");
|
|
26055
|
+
console.error("Tip: run 'postgresai auth' or pass --api-key / set PGAI_API_KEY");
|
|
26056
|
+
return null;
|
|
26057
|
+
}
|
|
26058
|
+
return;
|
|
26059
|
+
}
|
|
26060
|
+
const cfg = readConfig();
|
|
26061
|
+
const { apiBaseUrl } = resolveBaseUrls2(rootOpts, cfg);
|
|
26062
|
+
let project = (opts.project || cfg.defaultProject || "").trim();
|
|
26063
|
+
let projectWasGenerated = false;
|
|
26064
|
+
if (!project) {
|
|
26065
|
+
project = `project_${crypto2.randomBytes(6).toString("hex")}`;
|
|
26066
|
+
projectWasGenerated = true;
|
|
26067
|
+
try {
|
|
26068
|
+
writeConfig({ defaultProject: project });
|
|
26069
|
+
} catch (e) {
|
|
26070
|
+
const message = e instanceof Error ? e.message : String(e);
|
|
26071
|
+
console.error(`Warning: Failed to save generated default project: ${message}`);
|
|
26072
|
+
}
|
|
26073
|
+
}
|
|
26074
|
+
return {
|
|
26075
|
+
config: { apiKey, apiBaseUrl, project },
|
|
26076
|
+
projectWasGenerated
|
|
26077
|
+
};
|
|
26078
|
+
}
|
|
26079
|
+
async function uploadCheckupReports(uploadCfg, reports, spinner, logUpload) {
|
|
26080
|
+
spinner.update("Creating remote checkup report");
|
|
26081
|
+
const created = await withRetry(() => createCheckupReport({
|
|
26082
|
+
apiKey: uploadCfg.apiKey,
|
|
26083
|
+
apiBaseUrl: uploadCfg.apiBaseUrl,
|
|
26084
|
+
project: uploadCfg.project
|
|
26085
|
+
}), { maxAttempts: 3 }, (attempt, err, delayMs) => {
|
|
26086
|
+
const errMsg = err instanceof Error ? err.message : String(err);
|
|
26087
|
+
logUpload(`[Retry ${attempt}/3] createCheckupReport failed: ${errMsg}, retrying in ${delayMs}ms...`);
|
|
26088
|
+
});
|
|
26089
|
+
const reportId = created.reportId;
|
|
26090
|
+
logUpload(`Created remote checkup report: ${reportId}`);
|
|
26091
|
+
const uploaded = [];
|
|
26092
|
+
for (const [checkId, report] of Object.entries(reports)) {
|
|
26093
|
+
spinner.update(`Uploading ${checkId}.json`);
|
|
26094
|
+
const jsonText = JSON.stringify(report, null, 2);
|
|
26095
|
+
const r = await withRetry(() => uploadCheckupReportJson({
|
|
26096
|
+
apiKey: uploadCfg.apiKey,
|
|
26097
|
+
apiBaseUrl: uploadCfg.apiBaseUrl,
|
|
26098
|
+
reportId,
|
|
26099
|
+
filename: `${checkId}.json`,
|
|
26100
|
+
checkId,
|
|
26101
|
+
jsonText
|
|
26102
|
+
}), { maxAttempts: 3 }, (attempt, err, delayMs) => {
|
|
26103
|
+
const errMsg = err instanceof Error ? err.message : String(err);
|
|
26104
|
+
logUpload(`[Retry ${attempt}/3] Upload ${checkId}.json failed: ${errMsg}, retrying in ${delayMs}ms...`);
|
|
26105
|
+
});
|
|
26106
|
+
uploaded.push({ checkId, filename: `${checkId}.json`, chunkId: r.reportChunkId });
|
|
26107
|
+
}
|
|
26108
|
+
logUpload("Upload completed");
|
|
26109
|
+
return { project: uploadCfg.project, reportId, uploaded };
|
|
26110
|
+
}
|
|
26111
|
+
function writeReportFiles(reports, outputPath) {
|
|
26112
|
+
for (const [checkId, report] of Object.entries(reports)) {
|
|
26113
|
+
const filePath = path5.join(outputPath, `${checkId}.json`);
|
|
26114
|
+
fs5.writeFileSync(filePath, JSON.stringify(report, null, 2), "utf8");
|
|
26115
|
+
console.log(`\u2713 ${checkId}: ${filePath}`);
|
|
26116
|
+
}
|
|
26117
|
+
}
|
|
26118
|
+
function printUploadSummary(summary, projectWasGenerated, useStderr) {
|
|
26119
|
+
const out = useStderr ? console.error : console.log;
|
|
26120
|
+
out(`
|
|
26121
|
+
Checkup report uploaded`);
|
|
26122
|
+
out(`======================
|
|
26123
|
+
`);
|
|
26124
|
+
if (projectWasGenerated) {
|
|
26125
|
+
out(`Project: ${summary.project} (generated and saved as default)`);
|
|
26126
|
+
} else {
|
|
26127
|
+
out(`Project: ${summary.project}`);
|
|
26128
|
+
}
|
|
26129
|
+
out(`Report ID: ${summary.reportId}`);
|
|
26130
|
+
out("View in Console: console.postgres.ai \u2192 Support \u2192 checkup reports");
|
|
26131
|
+
out("");
|
|
26132
|
+
out("Files:");
|
|
26133
|
+
for (const item of summary.uploaded) {
|
|
26134
|
+
out(`- ${item.checkId}: ${item.filename}`);
|
|
26135
|
+
}
|
|
26136
|
+
}
|
|
24297
26137
|
function getDefaultMonitoringProjectDir() {
|
|
24298
26138
|
const override = process.env.PGAI_PROJECT_DIR;
|
|
24299
26139
|
if (override && override.trim())
|
|
24300
26140
|
return override.trim();
|
|
24301
|
-
return
|
|
26141
|
+
return path5.join(getConfigDir(), "monitoring");
|
|
24302
26142
|
}
|
|
24303
26143
|
async function downloadText(url) {
|
|
24304
26144
|
const controller = new AbortController;
|
|
@@ -24315,12 +26155,12 @@ async function downloadText(url) {
|
|
|
24315
26155
|
}
|
|
24316
26156
|
async function ensureDefaultMonitoringProject() {
|
|
24317
26157
|
const projectDir = getDefaultMonitoringProjectDir();
|
|
24318
|
-
const composeFile =
|
|
24319
|
-
const instancesFile =
|
|
24320
|
-
if (!
|
|
24321
|
-
|
|
26158
|
+
const composeFile = path5.resolve(projectDir, "docker-compose.yml");
|
|
26159
|
+
const instancesFile = path5.resolve(projectDir, "instances.yml");
|
|
26160
|
+
if (!fs5.existsSync(projectDir)) {
|
|
26161
|
+
fs5.mkdirSync(projectDir, { recursive: true, mode: 448 });
|
|
24322
26162
|
}
|
|
24323
|
-
if (!
|
|
26163
|
+
if (!fs5.existsSync(composeFile)) {
|
|
24324
26164
|
const refs = [
|
|
24325
26165
|
process.env.PGAI_PROJECT_REF,
|
|
24326
26166
|
package_default.version,
|
|
@@ -24332,36 +26172,36 @@ async function ensureDefaultMonitoringProject() {
|
|
|
24332
26172
|
const url = `https://gitlab.com/postgres-ai/postgres_ai/-/raw/${encodeURIComponent(ref)}/docker-compose.yml`;
|
|
24333
26173
|
try {
|
|
24334
26174
|
const text = await downloadText(url);
|
|
24335
|
-
|
|
26175
|
+
fs5.writeFileSync(composeFile, text, { encoding: "utf8", mode: 384 });
|
|
24336
26176
|
break;
|
|
24337
26177
|
} catch (err) {
|
|
24338
26178
|
lastErr = err;
|
|
24339
26179
|
}
|
|
24340
26180
|
}
|
|
24341
|
-
if (!
|
|
26181
|
+
if (!fs5.existsSync(composeFile)) {
|
|
24342
26182
|
const msg = lastErr instanceof Error ? lastErr.message : String(lastErr);
|
|
24343
26183
|
throw new Error(`Failed to bootstrap docker-compose.yml: ${msg}`);
|
|
24344
26184
|
}
|
|
24345
26185
|
}
|
|
24346
|
-
if (!
|
|
26186
|
+
if (!fs5.existsSync(instancesFile)) {
|
|
24347
26187
|
const header = `# PostgreSQL instances to monitor
|
|
24348
26188
|
` + `# Add your instances using: pgai mon targets add <connection-string> <name>
|
|
24349
26189
|
|
|
24350
26190
|
`;
|
|
24351
|
-
|
|
26191
|
+
fs5.writeFileSync(instancesFile, header, { encoding: "utf8", mode: 384 });
|
|
24352
26192
|
}
|
|
24353
|
-
const pgwatchConfig =
|
|
24354
|
-
if (!
|
|
24355
|
-
|
|
26193
|
+
const pgwatchConfig = path5.resolve(projectDir, ".pgwatch-config");
|
|
26194
|
+
if (!fs5.existsSync(pgwatchConfig)) {
|
|
26195
|
+
fs5.writeFileSync(pgwatchConfig, "", { encoding: "utf8", mode: 384 });
|
|
24356
26196
|
}
|
|
24357
|
-
const envFile =
|
|
24358
|
-
if (!
|
|
26197
|
+
const envFile = path5.resolve(projectDir, ".env");
|
|
26198
|
+
if (!fs5.existsSync(envFile)) {
|
|
24359
26199
|
const envText = `PGAI_TAG=${package_default.version}
|
|
24360
26200
|
# PGAI_REGISTRY=registry.gitlab.com/postgres-ai/postgres_ai
|
|
24361
26201
|
`;
|
|
24362
|
-
|
|
26202
|
+
fs5.writeFileSync(envFile, envText, { encoding: "utf8", mode: 384 });
|
|
24363
26203
|
}
|
|
24364
|
-
return { fs:
|
|
26204
|
+
return { fs: fs5, path: path5, projectDir, composeFile, instancesFile };
|
|
24365
26205
|
}
|
|
24366
26206
|
function getConfig(opts) {
|
|
24367
26207
|
let apiKey = opts.apiKey || process.env.PGAI_API_KEY || "";
|
|
@@ -24393,6 +26233,16 @@ function printResult(result, json2) {
|
|
|
24393
26233
|
}
|
|
24394
26234
|
var program2 = new Command;
|
|
24395
26235
|
program2.name("postgres-ai").description("PostgresAI CLI").version(package_default.version).option("--api-key <key>", "API key (overrides PGAI_API_KEY)").option("--api-base-url <url>", "API base URL for backend RPC (overrides PGAI_API_BASE_URL)").option("--ui-base-url <url>", "UI base URL for browser routes (overrides PGAI_UI_BASE_URL)");
|
|
26236
|
+
program2.command("set-default-project <project>").description("store default project for checkup uploads").action(async (project) => {
|
|
26237
|
+
const value = (project || "").trim();
|
|
26238
|
+
if (!value) {
|
|
26239
|
+
console.error("Error: project is required");
|
|
26240
|
+
process.exitCode = 1;
|
|
26241
|
+
return;
|
|
26242
|
+
}
|
|
26243
|
+
writeConfig({ defaultProject: value });
|
|
26244
|
+
console.log(`Default project saved: ${value}`);
|
|
26245
|
+
});
|
|
24396
26246
|
program2.command("prepare-db [conn]").description("prepare database for monitoring: create monitoring user, required view(s), and grant permissions (idempotent)").option("--db-url <url>", "PostgreSQL connection URL (admin) to run the setup against (deprecated; pass it as positional arg)").option("-h, --host <host>", "PostgreSQL host (psql-like)").option("-p, --port <port>", "PostgreSQL port (psql-like)").option("-U, --username <username>", "PostgreSQL user (psql-like)").option("-d, --dbname <dbname>", "PostgreSQL database name (psql-like)").option("--admin-password <password>", "Admin connection password (otherwise uses PGPASSWORD if set)").option("--monitoring-user <name>", "Monitoring role name to create/update", DEFAULT_MONITORING_USER).option("--password <password>", "Monitoring role password (overrides PGAI_MON_PASSWORD)").option("--skip-optional-permissions", "Skip optional permissions (RDS/self-managed extras)", false).option("--verify", "Verify that monitoring role/permissions are in place (no changes)", false).option("--reset-password", "Reset monitoring role password only (no other changes)", false).option("--print-sql", "Print SQL plan and exit (no changes applied)", false).option("--print-password", "Print generated monitoring password (DANGEROUS in CI logs)", false).addHelpText("after", [
|
|
24397
26247
|
"",
|
|
24398
26248
|
"Examples:",
|
|
@@ -24663,16 +26513,117 @@ program2.command("prepare-db [conn]").description("prepare database for monitori
|
|
|
24663
26513
|
}
|
|
24664
26514
|
}
|
|
24665
26515
|
});
|
|
26516
|
+
program2.command("checkup [conn]").description("generate health check reports directly from PostgreSQL (express mode)").option("--check-id <id>", `specific check to run: ${Object.keys(CHECK_INFO).join(", ")}, or ALL`, "ALL").option("--node-name <name>", "node name for reports", "node-01").option("--output <path>", "output directory for JSON files").option("--[no-]upload", "upload JSON results to PostgresAI (default: enabled; requires API key)", undefined).option("--project <project>", "project name or ID for remote upload (used with --upload; defaults to config defaultProject; auto-generated on first run)").option("--json", "output JSON to stdout (implies --no-upload)").addHelpText("after", [
|
|
26517
|
+
"",
|
|
26518
|
+
"Available checks:",
|
|
26519
|
+
...Object.entries(CHECK_INFO).map(([id, title]) => ` ${id}: ${title}`),
|
|
26520
|
+
"",
|
|
26521
|
+
"Examples:",
|
|
26522
|
+
" postgresai checkup postgresql://user:pass@host:5432/db",
|
|
26523
|
+
" postgresai checkup postgresql://user:pass@host:5432/db --check-id A003",
|
|
26524
|
+
" postgresai checkup postgresql://user:pass@host:5432/db --output ./reports",
|
|
26525
|
+
" postgresai checkup postgresql://user:pass@host:5432/db --project my_project",
|
|
26526
|
+
" postgresai set-default-project my_project",
|
|
26527
|
+
" postgresai checkup postgresql://user:pass@host:5432/db",
|
|
26528
|
+
" postgresai checkup postgresql://user:pass@host:5432/db --no-upload --json"
|
|
26529
|
+
].join(`
|
|
26530
|
+
`)).action(async (conn, opts, cmd) => {
|
|
26531
|
+
if (!conn) {
|
|
26532
|
+
cmd.outputHelp();
|
|
26533
|
+
process.exitCode = 1;
|
|
26534
|
+
return;
|
|
26535
|
+
}
|
|
26536
|
+
const shouldPrintJson = !!opts.json;
|
|
26537
|
+
const uploadExplicitlyRequested = opts.upload === true;
|
|
26538
|
+
const uploadExplicitlyDisabled = opts.upload === false || shouldPrintJson;
|
|
26539
|
+
let shouldUpload = !uploadExplicitlyDisabled;
|
|
26540
|
+
const outputPath = prepareOutputDirectory(opts.output);
|
|
26541
|
+
if (outputPath === null) {
|
|
26542
|
+
process.exitCode = 1;
|
|
26543
|
+
return;
|
|
26544
|
+
}
|
|
26545
|
+
const rootOpts = program2.opts();
|
|
26546
|
+
const uploadResult = prepareUploadConfig(opts, rootOpts, shouldUpload, uploadExplicitlyRequested);
|
|
26547
|
+
if (uploadResult === null) {
|
|
26548
|
+
process.exitCode = 1;
|
|
26549
|
+
return;
|
|
26550
|
+
}
|
|
26551
|
+
const uploadCfg = uploadResult?.config;
|
|
26552
|
+
const projectWasGenerated = uploadResult?.projectWasGenerated ?? false;
|
|
26553
|
+
shouldUpload = !!uploadCfg;
|
|
26554
|
+
const adminConn = resolveAdminConnection({
|
|
26555
|
+
conn,
|
|
26556
|
+
envPassword: process.env.PGPASSWORD
|
|
26557
|
+
});
|
|
26558
|
+
let client;
|
|
26559
|
+
const spinnerEnabled = !!process.stdout.isTTY && shouldUpload;
|
|
26560
|
+
const spinner = createTtySpinner(spinnerEnabled, "Connecting to Postgres");
|
|
26561
|
+
try {
|
|
26562
|
+
spinner.update("Connecting to Postgres");
|
|
26563
|
+
const connResult = await connectWithSslFallback(Client, adminConn);
|
|
26564
|
+
client = connResult.client;
|
|
26565
|
+
let reports;
|
|
26566
|
+
if (opts.checkId === "ALL") {
|
|
26567
|
+
reports = await generateAllReports(client, opts.nodeName, (p) => {
|
|
26568
|
+
spinner.update(`Running ${p.checkId}: ${p.checkTitle} (${p.index}/${p.total})`);
|
|
26569
|
+
});
|
|
26570
|
+
} else {
|
|
26571
|
+
const checkId = opts.checkId.toUpperCase();
|
|
26572
|
+
const generator = REPORT_GENERATORS[checkId];
|
|
26573
|
+
if (!generator) {
|
|
26574
|
+
spinner.stop();
|
|
26575
|
+
console.error(`Unknown check ID: ${opts.checkId}`);
|
|
26576
|
+
console.error(`Available: ${Object.keys(CHECK_INFO).join(", ")}, ALL`);
|
|
26577
|
+
process.exitCode = 1;
|
|
26578
|
+
return;
|
|
26579
|
+
}
|
|
26580
|
+
spinner.update(`Running ${checkId}: ${CHECK_INFO[checkId] || checkId}`);
|
|
26581
|
+
reports = { [checkId]: await generator(client, opts.nodeName) };
|
|
26582
|
+
}
|
|
26583
|
+
let uploadSummary;
|
|
26584
|
+
if (uploadCfg) {
|
|
26585
|
+
const logUpload = (msg) => {
|
|
26586
|
+
(shouldPrintJson ? console.error : console.log)(msg);
|
|
26587
|
+
};
|
|
26588
|
+
uploadSummary = await uploadCheckupReports(uploadCfg, reports, spinner, logUpload);
|
|
26589
|
+
}
|
|
26590
|
+
spinner.stop();
|
|
26591
|
+
if (outputPath) {
|
|
26592
|
+
writeReportFiles(reports, outputPath);
|
|
26593
|
+
}
|
|
26594
|
+
if (uploadSummary) {
|
|
26595
|
+
printUploadSummary(uploadSummary, projectWasGenerated, shouldPrintJson);
|
|
26596
|
+
}
|
|
26597
|
+
if (shouldPrintJson || !shouldUpload && !opts.output) {
|
|
26598
|
+
console.log(JSON.stringify(reports, null, 2));
|
|
26599
|
+
}
|
|
26600
|
+
} catch (error2) {
|
|
26601
|
+
if (error2 instanceof RpcError) {
|
|
26602
|
+
for (const line of formatRpcErrorForDisplay(error2)) {
|
|
26603
|
+
console.error(line);
|
|
26604
|
+
}
|
|
26605
|
+
} else {
|
|
26606
|
+
const message = error2 instanceof Error ? error2.message : String(error2);
|
|
26607
|
+
console.error(`Error: ${message}`);
|
|
26608
|
+
}
|
|
26609
|
+
process.exitCode = 1;
|
|
26610
|
+
} finally {
|
|
26611
|
+
spinner.stop();
|
|
26612
|
+
if (client) {
|
|
26613
|
+
await client.end();
|
|
26614
|
+
}
|
|
26615
|
+
}
|
|
26616
|
+
});
|
|
24666
26617
|
function resolvePaths() {
|
|
24667
26618
|
const startDir = process.cwd();
|
|
24668
26619
|
let currentDir = startDir;
|
|
24669
26620
|
while (true) {
|
|
24670
|
-
const composeFile =
|
|
24671
|
-
if (
|
|
24672
|
-
const instancesFile =
|
|
24673
|
-
return { fs:
|
|
26621
|
+
const composeFile = path5.resolve(currentDir, "docker-compose.yml");
|
|
26622
|
+
if (fs5.existsSync(composeFile)) {
|
|
26623
|
+
const instancesFile = path5.resolve(currentDir, "instances.yml");
|
|
26624
|
+
return { fs: fs5, path: path5, projectDir: currentDir, composeFile, instancesFile };
|
|
24674
26625
|
}
|
|
24675
|
-
const parentDir =
|
|
26626
|
+
const parentDir = path5.dirname(currentDir);
|
|
24676
26627
|
if (parentDir === currentDir)
|
|
24677
26628
|
break;
|
|
24678
26629
|
currentDir = parentDir;
|
|
@@ -24738,12 +26689,12 @@ async function runCompose(args) {
|
|
|
24738
26689
|
return 1;
|
|
24739
26690
|
}
|
|
24740
26691
|
const env = { ...process.env };
|
|
24741
|
-
const cfgPath =
|
|
24742
|
-
if (
|
|
26692
|
+
const cfgPath = path5.resolve(projectDir, ".pgwatch-config");
|
|
26693
|
+
if (fs5.existsSync(cfgPath)) {
|
|
24743
26694
|
try {
|
|
24744
|
-
const stats =
|
|
26695
|
+
const stats = fs5.statSync(cfgPath);
|
|
24745
26696
|
if (!stats.isDirectory()) {
|
|
24746
|
-
const content =
|
|
26697
|
+
const content = fs5.readFileSync(cfgPath, "utf8");
|
|
24747
26698
|
const match = content.match(/^grafana_password=([^\r\n]+)/m);
|
|
24748
26699
|
if (match) {
|
|
24749
26700
|
env.GF_SECURITY_ADMIN_PASSWORD = match[1].trim();
|
|
@@ -24751,13 +26702,13 @@ async function runCompose(args) {
|
|
|
24751
26702
|
}
|
|
24752
26703
|
} catch (err) {}
|
|
24753
26704
|
}
|
|
24754
|
-
return new Promise((
|
|
26705
|
+
return new Promise((resolve6) => {
|
|
24755
26706
|
const child = spawn2(cmd[0], [...cmd.slice(1), "-f", composeFile, ...args], {
|
|
24756
26707
|
stdio: "inherit",
|
|
24757
26708
|
env,
|
|
24758
26709
|
cwd: projectDir
|
|
24759
26710
|
});
|
|
24760
|
-
child.on("close", (code) =>
|
|
26711
|
+
child.on("close", (code) => resolve6(code || 0));
|
|
24761
26712
|
});
|
|
24762
26713
|
}
|
|
24763
26714
|
program2.command("help", { isDefault: true }).description("show help").action(() => {
|
|
@@ -24775,17 +26726,31 @@ mon.command("local-install").description("install local monitoring stack (genera
|
|
|
24775
26726
|
const { projectDir } = await resolveOrInitPaths();
|
|
24776
26727
|
console.log(`Project directory: ${projectDir}
|
|
24777
26728
|
`);
|
|
24778
|
-
const envFile =
|
|
24779
|
-
|
|
24780
|
-
|
|
24781
|
-
|
|
24782
|
-
|
|
26729
|
+
const envFile = path5.resolve(projectDir, ".env");
|
|
26730
|
+
let existingTag = null;
|
|
26731
|
+
let existingRegistry = null;
|
|
26732
|
+
let existingPassword = null;
|
|
26733
|
+
if (fs5.existsSync(envFile)) {
|
|
26734
|
+
const existingEnv = fs5.readFileSync(envFile, "utf8");
|
|
26735
|
+
const tagMatch = existingEnv.match(/^PGAI_TAG=(.+)$/m);
|
|
26736
|
+
if (tagMatch)
|
|
26737
|
+
existingTag = tagMatch[1].trim();
|
|
26738
|
+
const registryMatch = existingEnv.match(/^PGAI_REGISTRY=(.+)$/m);
|
|
26739
|
+
if (registryMatch)
|
|
26740
|
+
existingRegistry = registryMatch[1].trim();
|
|
24783
26741
|
const pwdMatch = existingEnv.match(/^GF_SECURITY_ADMIN_PASSWORD=(.+)$/m);
|
|
24784
|
-
if (pwdMatch)
|
|
24785
|
-
|
|
24786
|
-
}
|
|
26742
|
+
if (pwdMatch)
|
|
26743
|
+
existingPassword = pwdMatch[1].trim();
|
|
24787
26744
|
}
|
|
24788
|
-
|
|
26745
|
+
const imageTag = opts.tag || existingTag || package_default.version;
|
|
26746
|
+
const envLines = [`PGAI_TAG=${imageTag}`];
|
|
26747
|
+
if (existingRegistry) {
|
|
26748
|
+
envLines.push(`PGAI_REGISTRY=${existingRegistry}`);
|
|
26749
|
+
}
|
|
26750
|
+
if (existingPassword) {
|
|
26751
|
+
envLines.push(`GF_SECURITY_ADMIN_PASSWORD=${existingPassword}`);
|
|
26752
|
+
}
|
|
26753
|
+
fs5.writeFileSync(envFile, envLines.join(`
|
|
24789
26754
|
`) + `
|
|
24790
26755
|
`, { encoding: "utf8", mode: 384 });
|
|
24791
26756
|
if (opts.tag) {
|
|
@@ -24821,7 +26786,7 @@ Use demo mode without API key: postgres-ai mon local-install --demo`);
|
|
|
24821
26786
|
if (opts.apiKey) {
|
|
24822
26787
|
console.log("Using API key provided via --api-key parameter");
|
|
24823
26788
|
writeConfig({ apiKey: opts.apiKey });
|
|
24824
|
-
|
|
26789
|
+
fs5.writeFileSync(path5.resolve(projectDir, ".pgwatch-config"), `api_key=${opts.apiKey}
|
|
24825
26790
|
`, {
|
|
24826
26791
|
encoding: "utf8",
|
|
24827
26792
|
mode: 384
|
|
@@ -24842,7 +26807,7 @@ Use demo mode without API key: postgres-ai mon local-install --demo`);
|
|
|
24842
26807
|
const trimmedKey = inputApiKey.trim();
|
|
24843
26808
|
if (trimmedKey) {
|
|
24844
26809
|
writeConfig({ apiKey: trimmedKey });
|
|
24845
|
-
|
|
26810
|
+
fs5.writeFileSync(path5.resolve(projectDir, ".pgwatch-config"), `api_key=${trimmedKey}
|
|
24846
26811
|
`, {
|
|
24847
26812
|
encoding: "utf8",
|
|
24848
26813
|
mode: 384
|
|
@@ -24879,7 +26844,7 @@ Use demo mode without API key: postgres-ai mon local-install --demo`);
|
|
|
24879
26844
|
# Add your instances using: postgres-ai mon targets add
|
|
24880
26845
|
|
|
24881
26846
|
`;
|
|
24882
|
-
|
|
26847
|
+
fs5.writeFileSync(instancesPath, emptyInstancesContent, "utf8");
|
|
24883
26848
|
console.log(`Instances file: ${instancesPath}`);
|
|
24884
26849
|
console.log(`Project directory: ${projectDir2}
|
|
24885
26850
|
`);
|
|
@@ -24911,7 +26876,7 @@ Use demo mode without API key: postgres-ai mon local-install --demo`);
|
|
|
24911
26876
|
node_name: ${instanceName}
|
|
24912
26877
|
sink_type: ~sink_type~
|
|
24913
26878
|
`;
|
|
24914
|
-
|
|
26879
|
+
fs5.appendFileSync(instancesPath, body, "utf8");
|
|
24915
26880
|
console.log(`\u2713 Monitoring target '${instanceName}' added
|
|
24916
26881
|
`);
|
|
24917
26882
|
console.log("Testing connection to the added instance...");
|
|
@@ -24966,7 +26931,7 @@ You can provide either:`);
|
|
|
24966
26931
|
node_name: ${instanceName}
|
|
24967
26932
|
sink_type: ~sink_type~
|
|
24968
26933
|
`;
|
|
24969
|
-
|
|
26934
|
+
fs5.appendFileSync(instancesPath, body, "utf8");
|
|
24970
26935
|
console.log(`\u2713 Monitoring target '${instanceName}' added
|
|
24971
26936
|
`);
|
|
24972
26937
|
console.log("Testing connection to the added instance...");
|
|
@@ -25006,13 +26971,13 @@ You can provide either:`);
|
|
|
25006
26971
|
console.log(`\u2713 Configuration updated
|
|
25007
26972
|
`);
|
|
25008
26973
|
console.log(opts.demo ? "Step 4: Configuring Grafana security..." : "Step 4: Configuring Grafana security...");
|
|
25009
|
-
const cfgPath =
|
|
26974
|
+
const cfgPath = path5.resolve(projectDir, ".pgwatch-config");
|
|
25010
26975
|
let grafanaPassword = "";
|
|
25011
26976
|
try {
|
|
25012
|
-
if (
|
|
25013
|
-
const stats =
|
|
26977
|
+
if (fs5.existsSync(cfgPath)) {
|
|
26978
|
+
const stats = fs5.statSync(cfgPath);
|
|
25014
26979
|
if (!stats.isDirectory()) {
|
|
25015
|
-
const content =
|
|
26980
|
+
const content = fs5.readFileSync(cfgPath, "utf8");
|
|
25016
26981
|
const match = content.match(/^grafana_password=([^\r\n]+)/m);
|
|
25017
26982
|
if (match) {
|
|
25018
26983
|
grafanaPassword = match[1].trim();
|
|
@@ -25025,15 +26990,15 @@ You can provide either:`);
|
|
|
25025
26990
|
'`);
|
|
25026
26991
|
grafanaPassword = password.trim();
|
|
25027
26992
|
let configContent = "";
|
|
25028
|
-
if (
|
|
25029
|
-
const stats =
|
|
26993
|
+
if (fs5.existsSync(cfgPath)) {
|
|
26994
|
+
const stats = fs5.statSync(cfgPath);
|
|
25030
26995
|
if (!stats.isDirectory()) {
|
|
25031
|
-
configContent =
|
|
26996
|
+
configContent = fs5.readFileSync(cfgPath, "utf8");
|
|
25032
26997
|
}
|
|
25033
26998
|
}
|
|
25034
26999
|
const lines = configContent.split(/\r?\n/).filter((l) => !/^grafana_password=/.test(l));
|
|
25035
27000
|
lines.push(`grafana_password=${grafanaPassword}`);
|
|
25036
|
-
|
|
27001
|
+
fs5.writeFileSync(cfgPath, lines.filter(Boolean).join(`
|
|
25037
27002
|
`) + `
|
|
25038
27003
|
`, "utf8");
|
|
25039
27004
|
}
|
|
@@ -25147,7 +27112,7 @@ mon.command("health").description("health check for monitoring services").option
|
|
|
25147
27112
|
if (attempt > 1) {
|
|
25148
27113
|
console.log(`Retrying (attempt ${attempt}/${maxAttempts})...
|
|
25149
27114
|
`);
|
|
25150
|
-
await new Promise((
|
|
27115
|
+
await new Promise((resolve6) => setTimeout(resolve6, 5000));
|
|
25151
27116
|
}
|
|
25152
27117
|
allHealthy = true;
|
|
25153
27118
|
for (const service of services) {
|
|
@@ -25195,11 +27160,11 @@ mon.command("config").description("show monitoring services configuration").acti
|
|
|
25195
27160
|
console.log(`Project Directory: ${projectDir}`);
|
|
25196
27161
|
console.log(`Docker Compose File: ${composeFile}`);
|
|
25197
27162
|
console.log(`Instances File: ${instancesFile}`);
|
|
25198
|
-
if (
|
|
27163
|
+
if (fs5.existsSync(instancesFile)) {
|
|
25199
27164
|
console.log(`
|
|
25200
27165
|
Instances configuration:
|
|
25201
27166
|
`);
|
|
25202
|
-
const text =
|
|
27167
|
+
const text = fs5.readFileSync(instancesFile, "utf8");
|
|
25203
27168
|
process.stdout.write(text);
|
|
25204
27169
|
if (!/\n$/.test(text))
|
|
25205
27170
|
console.log();
|
|
@@ -25214,8 +27179,8 @@ mon.command("update").description("update monitoring stack").action(async () =>
|
|
|
25214
27179
|
console.log(`Updating PostgresAI monitoring stack...
|
|
25215
27180
|
`);
|
|
25216
27181
|
try {
|
|
25217
|
-
const gitDir =
|
|
25218
|
-
if (!
|
|
27182
|
+
const gitDir = path5.resolve(process.cwd(), ".git");
|
|
27183
|
+
if (!fs5.existsSync(gitDir)) {
|
|
25219
27184
|
console.error("Not a git repository. Cannot update.");
|
|
25220
27185
|
process.exitCode = 1;
|
|
25221
27186
|
return;
|
|
@@ -25341,13 +27306,13 @@ mon.command("check").description("monitoring services system readiness check").a
|
|
|
25341
27306
|
var targets = mon.command("targets").description("manage databases to monitor");
|
|
25342
27307
|
targets.command("list").description("list monitoring target databases").action(async () => {
|
|
25343
27308
|
const { instancesFile: instancesPath, projectDir } = await resolveOrInitPaths();
|
|
25344
|
-
if (!
|
|
27309
|
+
if (!fs5.existsSync(instancesPath)) {
|
|
25345
27310
|
console.error(`instances.yml not found in ${projectDir}`);
|
|
25346
27311
|
process.exitCode = 1;
|
|
25347
27312
|
return;
|
|
25348
27313
|
}
|
|
25349
27314
|
try {
|
|
25350
|
-
const content =
|
|
27315
|
+
const content = fs5.readFileSync(instancesPath, "utf8");
|
|
25351
27316
|
const instances = load(content);
|
|
25352
27317
|
if (!instances || !Array.isArray(instances) || instances.length === 0) {
|
|
25353
27318
|
console.log("No monitoring targets configured");
|
|
@@ -25396,8 +27361,8 @@ targets.command("add [connStr] [name]").description("add monitoring target datab
|
|
|
25396
27361
|
const db = m[5];
|
|
25397
27362
|
const instanceName = name && name.trim() ? name.trim() : `${host}-${db}`.replace(/[^a-zA-Z0-9-]/g, "-");
|
|
25398
27363
|
try {
|
|
25399
|
-
if (
|
|
25400
|
-
const content2 =
|
|
27364
|
+
if (fs5.existsSync(file)) {
|
|
27365
|
+
const content2 = fs5.readFileSync(file, "utf8");
|
|
25401
27366
|
const instances = load(content2) || [];
|
|
25402
27367
|
if (Array.isArray(instances)) {
|
|
25403
27368
|
const exists = instances.some((inst) => inst.name === instanceName);
|
|
@@ -25409,7 +27374,7 @@ targets.command("add [connStr] [name]").description("add monitoring target datab
|
|
|
25409
27374
|
}
|
|
25410
27375
|
}
|
|
25411
27376
|
} catch (err) {
|
|
25412
|
-
const content2 =
|
|
27377
|
+
const content2 = fs5.existsSync(file) ? fs5.readFileSync(file, "utf8") : "";
|
|
25413
27378
|
if (new RegExp(`^- name: ${instanceName}$`, "m").test(content2)) {
|
|
25414
27379
|
console.error(`Monitoring target '${instanceName}' already exists`);
|
|
25415
27380
|
process.exitCode = 1;
|
|
@@ -25428,20 +27393,20 @@ targets.command("add [connStr] [name]").description("add monitoring target datab
|
|
|
25428
27393
|
node_name: ${instanceName}
|
|
25429
27394
|
sink_type: ~sink_type~
|
|
25430
27395
|
`;
|
|
25431
|
-
const content =
|
|
25432
|
-
|
|
27396
|
+
const content = fs5.existsSync(file) ? fs5.readFileSync(file, "utf8") : "";
|
|
27397
|
+
fs5.appendFileSync(file, (content && !/\n$/.test(content) ? `
|
|
25433
27398
|
` : "") + body, "utf8");
|
|
25434
27399
|
console.log(`Monitoring target '${instanceName}' added`);
|
|
25435
27400
|
});
|
|
25436
27401
|
targets.command("remove <name>").description("remove monitoring target database").action(async (name) => {
|
|
25437
27402
|
const { instancesFile: file } = await resolveOrInitPaths();
|
|
25438
|
-
if (!
|
|
27403
|
+
if (!fs5.existsSync(file)) {
|
|
25439
27404
|
console.error("instances.yml not found");
|
|
25440
27405
|
process.exitCode = 1;
|
|
25441
27406
|
return;
|
|
25442
27407
|
}
|
|
25443
27408
|
try {
|
|
25444
|
-
const content =
|
|
27409
|
+
const content = fs5.readFileSync(file, "utf8");
|
|
25445
27410
|
const instances = load(content);
|
|
25446
27411
|
if (!instances || !Array.isArray(instances)) {
|
|
25447
27412
|
console.error("Invalid instances.yml format");
|
|
@@ -25454,7 +27419,7 @@ targets.command("remove <name>").description("remove monitoring target database"
|
|
|
25454
27419
|
process.exitCode = 1;
|
|
25455
27420
|
return;
|
|
25456
27421
|
}
|
|
25457
|
-
|
|
27422
|
+
fs5.writeFileSync(file, dump(filtered), "utf8");
|
|
25458
27423
|
console.log(`Monitoring target '${name}' removed`);
|
|
25459
27424
|
} catch (err) {
|
|
25460
27425
|
const message = err instanceof Error ? err.message : String(err);
|
|
@@ -25464,13 +27429,13 @@ targets.command("remove <name>").description("remove monitoring target database"
|
|
|
25464
27429
|
});
|
|
25465
27430
|
targets.command("test <name>").description("test monitoring target database connectivity").action(async (name) => {
|
|
25466
27431
|
const { instancesFile: instancesPath } = await resolveOrInitPaths();
|
|
25467
|
-
if (!
|
|
27432
|
+
if (!fs5.existsSync(instancesPath)) {
|
|
25468
27433
|
console.error("instances.yml not found");
|
|
25469
27434
|
process.exitCode = 1;
|
|
25470
27435
|
return;
|
|
25471
27436
|
}
|
|
25472
27437
|
try {
|
|
25473
|
-
const content =
|
|
27438
|
+
const content = fs5.readFileSync(instancesPath, "utf8");
|
|
25474
27439
|
const instances = load(content);
|
|
25475
27440
|
if (!instances || !Array.isArray(instances)) {
|
|
25476
27441
|
console.error("Invalid instances.yml format");
|
|
@@ -25513,8 +27478,15 @@ auth.command("login", { isDefault: true }).description("authenticate via browser
|
|
|
25513
27478
|
process.exitCode = 1;
|
|
25514
27479
|
return;
|
|
25515
27480
|
}
|
|
27481
|
+
const existingConfig = readConfig();
|
|
27482
|
+
const existingProject = existingConfig.defaultProject;
|
|
25516
27483
|
writeConfig({ apiKey: trimmedKey });
|
|
27484
|
+
deleteConfigKeys(["orgId"]);
|
|
25517
27485
|
console.log(`API key saved to ${getConfigPath()}`);
|
|
27486
|
+
if (existingProject) {
|
|
27487
|
+
console.log(`Note: Your default project "${existingProject}" has been preserved.`);
|
|
27488
|
+
console.log(` If this key belongs to a different account, use --project to specify a new one.`);
|
|
27489
|
+
}
|
|
25518
27490
|
return;
|
|
25519
27491
|
}
|
|
25520
27492
|
console.log(`Starting authentication flow...
|
|
@@ -25531,9 +27503,8 @@ auth.command("login", { isDefault: true }).description("authenticate via browser
|
|
|
25531
27503
|
console.log("Starting local callback server...");
|
|
25532
27504
|
const requestedPort = opts.port || 0;
|
|
25533
27505
|
const callbackServer = createCallbackServer(requestedPort, params.state, 120000);
|
|
25534
|
-
|
|
25535
|
-
const
|
|
25536
|
-
const redirectUri = `http://localhost:${actualPort}/callback`;
|
|
27506
|
+
const actualPort = await callbackServer.ready;
|
|
27507
|
+
const redirectUri = `http://127.0.0.1:${actualPort}/callback`;
|
|
25537
27508
|
console.log(`Callback server listening on port ${actualPort}`);
|
|
25538
27509
|
console.log("Initializing authentication session...");
|
|
25539
27510
|
const initData = JSON.stringify({
|
|
@@ -25582,7 +27553,7 @@ Please verify the --api-base-url parameter.`);
|
|
|
25582
27553
|
process.exit(1);
|
|
25583
27554
|
return;
|
|
25584
27555
|
}
|
|
25585
|
-
const authUrl = `${uiBaseUrl}/cli/auth?state=${encodeURIComponent(params.state)}&code_challenge=${encodeURIComponent(params.codeChallenge)}&code_challenge_method=S256&redirect_uri=${encodeURIComponent(redirectUri)}`;
|
|
27556
|
+
const authUrl = `${uiBaseUrl}/cli/auth?state=${encodeURIComponent(params.state)}&code_challenge=${encodeURIComponent(params.codeChallenge)}&code_challenge_method=S256&redirect_uri=${encodeURIComponent(redirectUri)}&api_url=${encodeURIComponent(apiBaseUrl)}`;
|
|
25586
27557
|
if (opts.debug) {
|
|
25587
27558
|
console.log(`Debug: Auth URL: ${authUrl}`);
|
|
25588
27559
|
}
|
|
@@ -25651,15 +27622,28 @@ Please verify the --api-base-url parameter.`);
|
|
|
25651
27622
|
const result = JSON.parse(exchangeBody);
|
|
25652
27623
|
const apiToken = result.api_token || result?.[0]?.result?.api_token;
|
|
25653
27624
|
const orgId = result.org_id || result?.[0]?.result?.org_id;
|
|
27625
|
+
const existingConfig = readConfig();
|
|
27626
|
+
const existingOrgId = existingConfig.orgId;
|
|
27627
|
+
const existingProject = existingConfig.defaultProject;
|
|
27628
|
+
const orgChanged = existingOrgId && existingOrgId !== orgId;
|
|
25654
27629
|
writeConfig({
|
|
25655
27630
|
apiKey: apiToken,
|
|
25656
27631
|
baseUrl: apiBaseUrl,
|
|
25657
27632
|
orgId
|
|
25658
27633
|
});
|
|
27634
|
+
if (orgChanged && existingProject) {
|
|
27635
|
+
deleteConfigKeys(["defaultProject"]);
|
|
27636
|
+
console.log(`
|
|
27637
|
+
Note: Organization changed (${existingOrgId} \u2192 ${orgId}).`);
|
|
27638
|
+
console.log(` Default project "${existingProject}" has been cleared.`);
|
|
27639
|
+
}
|
|
25659
27640
|
console.log(`
|
|
25660
27641
|
Authentication successful!`);
|
|
25661
27642
|
console.log(`API key saved to: ${getConfigPath()}`);
|
|
25662
27643
|
console.log(`Organization ID: ${orgId}`);
|
|
27644
|
+
if (!orgChanged && existingProject) {
|
|
27645
|
+
console.log(`Default project: ${existingProject} (preserved)`);
|
|
27646
|
+
}
|
|
25663
27647
|
console.log(`
|
|
25664
27648
|
You can now use the CLI without specifying an API key.`);
|
|
25665
27649
|
process.exit(0);
|
|
@@ -25704,15 +27688,15 @@ To authenticate, run: pgai auth`);
|
|
|
25704
27688
|
});
|
|
25705
27689
|
auth.command("remove-key").description("remove API key").action(async () => {
|
|
25706
27690
|
const newConfigPath = getConfigPath();
|
|
25707
|
-
const hasNewConfig =
|
|
27691
|
+
const hasNewConfig = fs5.existsSync(newConfigPath);
|
|
25708
27692
|
let legacyPath;
|
|
25709
27693
|
try {
|
|
25710
27694
|
const { projectDir } = await resolveOrInitPaths();
|
|
25711
|
-
legacyPath =
|
|
27695
|
+
legacyPath = path5.resolve(projectDir, ".pgwatch-config");
|
|
25712
27696
|
} catch {
|
|
25713
|
-
legacyPath =
|
|
27697
|
+
legacyPath = path5.resolve(process.cwd(), ".pgwatch-config");
|
|
25714
27698
|
}
|
|
25715
|
-
const hasLegacyConfig =
|
|
27699
|
+
const hasLegacyConfig = fs5.existsSync(legacyPath) && fs5.statSync(legacyPath).isFile();
|
|
25716
27700
|
if (!hasNewConfig && !hasLegacyConfig) {
|
|
25717
27701
|
console.log("No API key configured");
|
|
25718
27702
|
return;
|
|
@@ -25722,11 +27706,11 @@ auth.command("remove-key").description("remove API key").action(async () => {
|
|
|
25722
27706
|
}
|
|
25723
27707
|
if (hasLegacyConfig) {
|
|
25724
27708
|
try {
|
|
25725
|
-
const content =
|
|
27709
|
+
const content = fs5.readFileSync(legacyPath, "utf8");
|
|
25726
27710
|
const filtered = content.split(/\r?\n/).filter((l) => !/^api_key=/.test(l)).join(`
|
|
25727
27711
|
`).replace(/\n+$/g, `
|
|
25728
27712
|
`);
|
|
25729
|
-
|
|
27713
|
+
fs5.writeFileSync(legacyPath, filtered, "utf8");
|
|
25730
27714
|
} catch (err) {
|
|
25731
27715
|
console.warn(`Warning: Could not update legacy config: ${err instanceof Error ? err.message : String(err)}`);
|
|
25732
27716
|
}
|
|
@@ -25737,7 +27721,7 @@ To authenticate again, run: pgai auth`);
|
|
|
25737
27721
|
});
|
|
25738
27722
|
mon.command("generate-grafana-password").description("generate Grafana password for monitoring services").action(async () => {
|
|
25739
27723
|
const { projectDir } = await resolveOrInitPaths();
|
|
25740
|
-
const cfgPath =
|
|
27724
|
+
const cfgPath = path5.resolve(projectDir, ".pgwatch-config");
|
|
25741
27725
|
try {
|
|
25742
27726
|
const { stdout: password } = await execPromise(`openssl rand -base64 12 | tr -d '
|
|
25743
27727
|
'`);
|
|
@@ -25748,17 +27732,17 @@ mon.command("generate-grafana-password").description("generate Grafana password
|
|
|
25748
27732
|
return;
|
|
25749
27733
|
}
|
|
25750
27734
|
let configContent = "";
|
|
25751
|
-
if (
|
|
25752
|
-
const stats =
|
|
27735
|
+
if (fs5.existsSync(cfgPath)) {
|
|
27736
|
+
const stats = fs5.statSync(cfgPath);
|
|
25753
27737
|
if (stats.isDirectory()) {
|
|
25754
27738
|
console.error(".pgwatch-config is a directory, expected a file. Skipping read.");
|
|
25755
27739
|
} else {
|
|
25756
|
-
configContent =
|
|
27740
|
+
configContent = fs5.readFileSync(cfgPath, "utf8");
|
|
25757
27741
|
}
|
|
25758
27742
|
}
|
|
25759
27743
|
const lines = configContent.split(/\r?\n/).filter((l) => !/^grafana_password=/.test(l));
|
|
25760
27744
|
lines.push(`grafana_password=${newPassword}`);
|
|
25761
|
-
|
|
27745
|
+
fs5.writeFileSync(cfgPath, lines.filter(Boolean).join(`
|
|
25762
27746
|
`) + `
|
|
25763
27747
|
`, "utf8");
|
|
25764
27748
|
console.log("\u2713 New Grafana password generated and saved");
|
|
@@ -25780,19 +27764,19 @@ Note: This command requires 'openssl' to be installed`);
|
|
|
25780
27764
|
});
|
|
25781
27765
|
mon.command("show-grafana-credentials").description("show Grafana credentials for monitoring services").action(async () => {
|
|
25782
27766
|
const { projectDir } = await resolveOrInitPaths();
|
|
25783
|
-
const cfgPath =
|
|
25784
|
-
if (!
|
|
27767
|
+
const cfgPath = path5.resolve(projectDir, ".pgwatch-config");
|
|
27768
|
+
if (!fs5.existsSync(cfgPath)) {
|
|
25785
27769
|
console.error("Configuration file not found. Run 'postgres-ai mon local-install' first.");
|
|
25786
27770
|
process.exitCode = 1;
|
|
25787
27771
|
return;
|
|
25788
27772
|
}
|
|
25789
|
-
const stats =
|
|
27773
|
+
const stats = fs5.statSync(cfgPath);
|
|
25790
27774
|
if (stats.isDirectory()) {
|
|
25791
27775
|
console.error(".pgwatch-config is a directory, expected a file. Cannot read credentials.");
|
|
25792
27776
|
process.exitCode = 1;
|
|
25793
27777
|
return;
|
|
25794
27778
|
}
|
|
25795
|
-
const content =
|
|
27779
|
+
const content = fs5.readFileSync(cfgPath, "utf8");
|
|
25796
27780
|
const lines = content.split(/\r?\n/);
|
|
25797
27781
|
let password = "";
|
|
25798
27782
|
for (const line of lines) {
|
|
@@ -25976,29 +27960,29 @@ mcp.command("install [client]").description("install MCP server configuration fo
|
|
|
25976
27960
|
let configDir;
|
|
25977
27961
|
switch (client) {
|
|
25978
27962
|
case "cursor":
|
|
25979
|
-
configPath =
|
|
25980
|
-
configDir =
|
|
27963
|
+
configPath = path5.join(homeDir, ".cursor", "mcp.json");
|
|
27964
|
+
configDir = path5.dirname(configPath);
|
|
25981
27965
|
break;
|
|
25982
27966
|
case "windsurf":
|
|
25983
|
-
configPath =
|
|
25984
|
-
configDir =
|
|
27967
|
+
configPath = path5.join(homeDir, ".windsurf", "mcp.json");
|
|
27968
|
+
configDir = path5.dirname(configPath);
|
|
25985
27969
|
break;
|
|
25986
27970
|
case "codex":
|
|
25987
|
-
configPath =
|
|
25988
|
-
configDir =
|
|
27971
|
+
configPath = path5.join(homeDir, ".codex", "mcp.json");
|
|
27972
|
+
configDir = path5.dirname(configPath);
|
|
25989
27973
|
break;
|
|
25990
27974
|
default:
|
|
25991
27975
|
console.error(`Configuration not implemented for: ${client}`);
|
|
25992
27976
|
process.exitCode = 1;
|
|
25993
27977
|
return;
|
|
25994
27978
|
}
|
|
25995
|
-
if (!
|
|
25996
|
-
|
|
27979
|
+
if (!fs5.existsSync(configDir)) {
|
|
27980
|
+
fs5.mkdirSync(configDir, { recursive: true });
|
|
25997
27981
|
}
|
|
25998
27982
|
let config2 = { mcpServers: {} };
|
|
25999
|
-
if (
|
|
27983
|
+
if (fs5.existsSync(configPath)) {
|
|
26000
27984
|
try {
|
|
26001
|
-
const content =
|
|
27985
|
+
const content = fs5.readFileSync(configPath, "utf8");
|
|
26002
27986
|
config2 = JSON.parse(content);
|
|
26003
27987
|
if (!config2.mcpServers) {
|
|
26004
27988
|
config2.mcpServers = {};
|
|
@@ -26011,7 +27995,7 @@ mcp.command("install [client]").description("install MCP server configuration fo
|
|
|
26011
27995
|
command: pgaiPath,
|
|
26012
27996
|
args: ["mcp", "start"]
|
|
26013
27997
|
};
|
|
26014
|
-
|
|
27998
|
+
fs5.writeFileSync(configPath, JSON.stringify(config2, null, 2), "utf8");
|
|
26015
27999
|
console.log(`\u2713 PostgresAI MCP server configured for ${client}`);
|
|
26016
28000
|
console.log(` Config file: ${configPath}`);
|
|
26017
28001
|
console.log("");
|