postgresai 0.14.0-dev.8 → 0.14.0-dev.81

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (96) hide show
  1. package/README.md +161 -61
  2. package/bin/postgres-ai.ts +2596 -428
  3. package/bun.lock +258 -0
  4. package/bunfig.toml +20 -0
  5. package/dist/bin/postgres-ai.js +31277 -1575
  6. package/dist/sql/01.role.sql +16 -0
  7. package/dist/sql/02.extensions.sql +8 -0
  8. package/dist/sql/03.permissions.sql +38 -0
  9. package/dist/sql/04.optional_rds.sql +6 -0
  10. package/dist/sql/05.optional_self_managed.sql +8 -0
  11. package/dist/sql/06.helpers.sql +439 -0
  12. package/dist/sql/sql/01.role.sql +16 -0
  13. package/dist/sql/sql/02.extensions.sql +8 -0
  14. package/dist/sql/sql/03.permissions.sql +38 -0
  15. package/dist/sql/sql/04.optional_rds.sql +6 -0
  16. package/dist/sql/sql/05.optional_self_managed.sql +8 -0
  17. package/dist/sql/sql/06.helpers.sql +439 -0
  18. package/dist/sql/sql/uninit/01.helpers.sql +5 -0
  19. package/dist/sql/sql/uninit/02.permissions.sql +30 -0
  20. package/dist/sql/sql/uninit/03.role.sql +27 -0
  21. package/dist/sql/uninit/01.helpers.sql +5 -0
  22. package/dist/sql/uninit/02.permissions.sql +30 -0
  23. package/dist/sql/uninit/03.role.sql +27 -0
  24. package/lib/auth-server.ts +124 -106
  25. package/lib/checkup-api.ts +386 -0
  26. package/lib/checkup-dictionary.ts +113 -0
  27. package/lib/checkup.ts +1512 -0
  28. package/lib/config.ts +6 -3
  29. package/lib/init.ts +655 -189
  30. package/lib/issues.ts +848 -193
  31. package/lib/mcp-server.ts +391 -91
  32. package/lib/metrics-loader.ts +127 -0
  33. package/lib/supabase.ts +824 -0
  34. package/lib/util.ts +61 -0
  35. package/package.json +22 -10
  36. package/packages/postgres-ai/README.md +26 -0
  37. package/packages/postgres-ai/bin/postgres-ai.js +27 -0
  38. package/packages/postgres-ai/package.json +27 -0
  39. package/scripts/embed-checkup-dictionary.ts +106 -0
  40. package/scripts/embed-metrics.ts +154 -0
  41. package/sql/01.role.sql +16 -0
  42. package/sql/02.extensions.sql +8 -0
  43. package/sql/03.permissions.sql +38 -0
  44. package/sql/04.optional_rds.sql +6 -0
  45. package/sql/05.optional_self_managed.sql +8 -0
  46. package/sql/06.helpers.sql +439 -0
  47. package/sql/uninit/01.helpers.sql +5 -0
  48. package/sql/uninit/02.permissions.sql +30 -0
  49. package/sql/uninit/03.role.sql +27 -0
  50. package/test/auth.test.ts +258 -0
  51. package/test/checkup.integration.test.ts +321 -0
  52. package/test/checkup.test.ts +1116 -0
  53. package/test/config-consistency.test.ts +36 -0
  54. package/test/init.integration.test.ts +508 -0
  55. package/test/init.test.ts +916 -0
  56. package/test/issues.cli.test.ts +538 -0
  57. package/test/issues.test.ts +456 -0
  58. package/test/mcp-server.test.ts +1527 -0
  59. package/test/schema-validation.test.ts +81 -0
  60. package/test/supabase.test.ts +568 -0
  61. package/test/test-utils.ts +128 -0
  62. package/tsconfig.json +12 -20
  63. package/dist/bin/postgres-ai.d.ts +0 -3
  64. package/dist/bin/postgres-ai.d.ts.map +0 -1
  65. package/dist/bin/postgres-ai.js.map +0 -1
  66. package/dist/lib/auth-server.d.ts +0 -31
  67. package/dist/lib/auth-server.d.ts.map +0 -1
  68. package/dist/lib/auth-server.js +0 -263
  69. package/dist/lib/auth-server.js.map +0 -1
  70. package/dist/lib/config.d.ts +0 -45
  71. package/dist/lib/config.d.ts.map +0 -1
  72. package/dist/lib/config.js +0 -181
  73. package/dist/lib/config.js.map +0 -1
  74. package/dist/lib/init.d.ts +0 -64
  75. package/dist/lib/init.d.ts.map +0 -1
  76. package/dist/lib/init.js +0 -399
  77. package/dist/lib/init.js.map +0 -1
  78. package/dist/lib/issues.d.ts +0 -75
  79. package/dist/lib/issues.d.ts.map +0 -1
  80. package/dist/lib/issues.js +0 -336
  81. package/dist/lib/issues.js.map +0 -1
  82. package/dist/lib/mcp-server.d.ts +0 -9
  83. package/dist/lib/mcp-server.d.ts.map +0 -1
  84. package/dist/lib/mcp-server.js +0 -168
  85. package/dist/lib/mcp-server.js.map +0 -1
  86. package/dist/lib/pkce.d.ts +0 -32
  87. package/dist/lib/pkce.d.ts.map +0 -1
  88. package/dist/lib/pkce.js +0 -101
  89. package/dist/lib/pkce.js.map +0 -1
  90. package/dist/lib/util.d.ts +0 -27
  91. package/dist/lib/util.d.ts.map +0 -1
  92. package/dist/lib/util.js +0 -46
  93. package/dist/lib/util.js.map +0 -1
  94. package/dist/package.json +0 -46
  95. package/test/init.integration.test.cjs +0 -269
  96. package/test/init.test.cjs +0 -76
@@ -0,0 +1,1116 @@
1
+ import { describe, test, expect } from "bun:test";
2
+ import { resolve } from "path";
3
+
4
+ // Import from source directly since we're using Bun
5
+ import * as checkup from "../lib/checkup";
6
+ import * as api from "../lib/checkup-api";
7
+ import { createMockClient } from "./test-utils";
8
+
9
+
10
+ function runCli(args: string[], env: Record<string, string> = {}) {
11
+ const cliPath = resolve(import.meta.dir, "..", "bin", "postgres-ai.ts");
12
+ const bunBin = typeof process.execPath === "string" && process.execPath.length > 0 ? process.execPath : "bun";
13
+ const result = Bun.spawnSync([bunBin, cliPath, ...args], {
14
+ env: { ...process.env, ...env },
15
+ });
16
+ return {
17
+ status: result.exitCode,
18
+ stdout: new TextDecoder().decode(result.stdout),
19
+ stderr: new TextDecoder().decode(result.stderr),
20
+ };
21
+ }
22
+
23
+ // Unit tests for parseVersionNum
24
+ describe("parseVersionNum", () => {
25
+ test("parses PG 16.3 version number", () => {
26
+ const result = checkup.parseVersionNum("160003");
27
+ expect(result.major).toBe("16");
28
+ expect(result.minor).toBe("3");
29
+ });
30
+
31
+ test("parses PG 15.7 version number", () => {
32
+ const result = checkup.parseVersionNum("150007");
33
+ expect(result.major).toBe("15");
34
+ expect(result.minor).toBe("7");
35
+ });
36
+
37
+ test("parses PG 14.12 version number", () => {
38
+ const result = checkup.parseVersionNum("140012");
39
+ expect(result.major).toBe("14");
40
+ expect(result.minor).toBe("12");
41
+ });
42
+
43
+ test("handles empty string", () => {
44
+ const result = checkup.parseVersionNum("");
45
+ expect(result.major).toBe("");
46
+ expect(result.minor).toBe("");
47
+ });
48
+
49
+ test("handles null/undefined", () => {
50
+ const result = checkup.parseVersionNum(null as any);
51
+ expect(result.major).toBe("");
52
+ expect(result.minor).toBe("");
53
+ });
54
+
55
+ test("handles short string", () => {
56
+ const result = checkup.parseVersionNum("123");
57
+ expect(result.major).toBe("");
58
+ expect(result.minor).toBe("");
59
+ });
60
+ });
61
+
62
+ // Unit tests for createBaseReport
63
+ describe("createBaseReport", () => {
64
+ test("creates correct structure", () => {
65
+ const report = checkup.createBaseReport("A002", "Postgres major version", "test-node");
66
+
67
+ expect(report.checkId).toBe("A002");
68
+ expect(report.checkTitle).toBe("Postgres major version");
69
+ expect(typeof report.version).toBe("string");
70
+ expect(report.version!.length).toBeGreaterThan(0);
71
+ expect(typeof report.build_ts).toBe("string");
72
+ expect(report.nodes.primary).toBe("test-node");
73
+ expect(report.nodes.standbys).toEqual([]);
74
+ expect(report.results).toEqual({});
75
+ expect(typeof report.timestamptz).toBe("string");
76
+ // Verify timestamp is ISO format
77
+ expect(new Date(report.timestamptz).toISOString()).toBe(report.timestamptz);
78
+ });
79
+
80
+ test("uses provided node name", () => {
81
+ const report = checkup.createBaseReport("A003", "Postgres settings", "my-custom-node");
82
+ expect(report.nodes.primary).toBe("my-custom-node");
83
+ });
84
+ });
85
+
86
+ // Tests for CHECK_INFO
87
+ describe("CHECK_INFO and REPORT_GENERATORS", () => {
88
+ // Express-mode checks that have generators
89
+ const expressCheckIds = ["A002", "A003", "A004", "A007", "A013", "D001", "D004", "F001", "G001", "G003", "H001", "H002", "H004"];
90
+
91
+ test("CHECK_INFO contains all express-mode checks", () => {
92
+ for (const checkId of expressCheckIds) {
93
+ expect(checkup.CHECK_INFO[checkId]).toBeDefined();
94
+ expect(typeof checkup.CHECK_INFO[checkId]).toBe("string");
95
+ expect(checkup.CHECK_INFO[checkId].length).toBeGreaterThan(0);
96
+ }
97
+ });
98
+
99
+ test("CHECK_INFO titles are loaded from embedded dictionary", () => {
100
+ // Verify a few known titles match the API dictionary
101
+ // These are canonical titles from postgres.ai/api/general/checkup_dictionary
102
+ expect(checkup.CHECK_INFO["A002"]).toBe("Postgres major version");
103
+ expect(checkup.CHECK_INFO["H001"]).toBe("Invalid indexes");
104
+ expect(checkup.CHECK_INFO["H002"]).toBe("Unused indexes");
105
+ });
106
+
107
+ test("REPORT_GENERATORS has function for each check", () => {
108
+ for (const checkId of expressCheckIds) {
109
+ expect(typeof checkup.REPORT_GENERATORS[checkId]).toBe("function");
110
+ }
111
+ });
112
+
113
+ test("REPORT_GENERATORS and CHECK_INFO have same keys", () => {
114
+ const generatorKeys = Object.keys(checkup.REPORT_GENERATORS).sort();
115
+ const infoKeys = Object.keys(checkup.CHECK_INFO).sort();
116
+ expect(generatorKeys).toEqual(infoKeys);
117
+ });
118
+ });
119
+
120
+ // Tests for formatBytes
121
+ describe("formatBytes", () => {
122
+ test("formats zero bytes", () => {
123
+ expect(checkup.formatBytes(0)).toBe("0 B");
124
+ });
125
+
126
+ test("formats bytes", () => {
127
+ expect(checkup.formatBytes(500)).toBe("500.00 B");
128
+ });
129
+
130
+ test("formats kibibytes", () => {
131
+ expect(checkup.formatBytes(1024)).toBe("1.00 KiB");
132
+ expect(checkup.formatBytes(1536)).toBe("1.50 KiB");
133
+ });
134
+
135
+ test("formats mebibytes", () => {
136
+ expect(checkup.formatBytes(1048576)).toBe("1.00 MiB");
137
+ });
138
+
139
+ test("formats gibibytes", () => {
140
+ expect(checkup.formatBytes(1073741824)).toBe("1.00 GiB");
141
+ });
142
+
143
+ test("handles negative bytes", () => {
144
+ expect(checkup.formatBytes(-1024)).toBe("-1.00 KiB");
145
+ expect(checkup.formatBytes(-1048576)).toBe("-1.00 MiB");
146
+ });
147
+
148
+ test("handles edge cases", () => {
149
+ expect(checkup.formatBytes(NaN)).toBe("NaN B");
150
+ expect(checkup.formatBytes(Infinity)).toBe("Infinity B");
151
+ });
152
+ });
153
+
154
+ // Mock client tests for report generators
155
+ describe("Report generators with mock client", () => {
156
+ test("getPostgresVersion extracts version info", async () => {
157
+ const mockClient = createMockClient({
158
+ versionRows: [
159
+ { name: "server_version", setting: "16.3" },
160
+ { name: "server_version_num", setting: "160003" },
161
+ ],
162
+ });
163
+
164
+ const version = await checkup.getPostgresVersion(mockClient as any);
165
+ expect(version.version).toBe("16.3");
166
+ expect(version.server_version_num).toBe("160003");
167
+ expect(version.server_major_ver).toBe("16");
168
+ expect(version.server_minor_ver).toBe("3");
169
+ });
170
+
171
+ test("getSettings transforms rows to keyed object", async () => {
172
+ const mockClient = createMockClient({
173
+ settingsRows: [
174
+ {
175
+ tag_setting_name: "shared_buffers",
176
+ tag_setting_value: "16384",
177
+ tag_unit: "8kB",
178
+ tag_category: "Resource Usage / Memory",
179
+ tag_vartype: "integer",
180
+ is_default: 1,
181
+ setting_normalized: "134217728", // 16384 * 8192
182
+ unit_normalized: "bytes",
183
+ },
184
+ {
185
+ tag_setting_name: "work_mem",
186
+ tag_setting_value: "4096",
187
+ tag_unit: "kB",
188
+ tag_category: "Resource Usage / Memory",
189
+ tag_vartype: "integer",
190
+ is_default: 1,
191
+ setting_normalized: "4194304", // 4096 * 1024
192
+ unit_normalized: "bytes",
193
+ },
194
+ ],
195
+ });
196
+
197
+ const settings = await checkup.getSettings(mockClient as any);
198
+ expect("shared_buffers" in settings).toBe(true);
199
+ expect("work_mem" in settings).toBe(true);
200
+ expect(settings.shared_buffers.setting).toBe("16384");
201
+ expect(settings.shared_buffers.unit).toBe("8kB");
202
+ // pretty_value is now computed from setting_normalized
203
+ expect(settings.shared_buffers.pretty_value).toBe("128.00 MiB");
204
+ expect(settings.work_mem.pretty_value).toBe("4.00 MiB");
205
+ });
206
+
207
+ test("generateA002 creates report with version data", async () => {
208
+ const mockClient = createMockClient({
209
+ versionRows: [
210
+ { name: "server_version", setting: "16.3" },
211
+ { name: "server_version_num", setting: "160003" },
212
+ ],
213
+ });
214
+
215
+ const report = await checkup.generateA002(mockClient as any, "test-node");
216
+ expect(report.checkId).toBe("A002");
217
+ expect(report.checkTitle).toBe("Postgres major version");
218
+ expect(report.nodes.primary).toBe("test-node");
219
+ expect("test-node" in report.results).toBe(true);
220
+ expect("version" in report.results["test-node"].data).toBe(true);
221
+ expect(report.results["test-node"].data.version.version).toBe("16.3");
222
+ });
223
+
224
+ test("generateA003 creates report with settings and version", async () => {
225
+ const mockClient = createMockClient({
226
+ versionRows: [
227
+ { name: "server_version", setting: "16.3" },
228
+ { name: "server_version_num", setting: "160003" },
229
+ ],
230
+ settingsRows: [
231
+ {
232
+ tag_setting_name: "shared_buffers",
233
+ tag_setting_value: "16384",
234
+ tag_unit: "8kB",
235
+ tag_category: "Resource Usage / Memory",
236
+ tag_vartype: "integer",
237
+ is_default: 1,
238
+ setting_normalized: "134217728",
239
+ unit_normalized: "bytes",
240
+ },
241
+ ],
242
+ });
243
+
244
+ const report = await checkup.generateA003(mockClient as any, "test-node");
245
+ expect(report.checkId).toBe("A003");
246
+ expect(report.checkTitle).toBe("Postgres settings");
247
+ expect("test-node" in report.results).toBe(true);
248
+ expect("shared_buffers" in report.results["test-node"].data).toBe(true);
249
+ expect(report.results["test-node"].postgres_version).toBeTruthy();
250
+ expect(report.results["test-node"].postgres_version!.version).toBe("16.3");
251
+ });
252
+
253
+ test("generateA013 creates report with minor version data", async () => {
254
+ const mockClient = createMockClient({
255
+ versionRows: [
256
+ { name: "server_version", setting: "16.3" },
257
+ { name: "server_version_num", setting: "160003" },
258
+ ],
259
+ });
260
+
261
+ const report = await checkup.generateA013(mockClient as any, "test-node");
262
+ expect(report.checkId).toBe("A013");
263
+ expect(report.checkTitle).toBe("Postgres minor version");
264
+ expect(report.nodes.primary).toBe("test-node");
265
+ expect("test-node" in report.results).toBe(true);
266
+ expect("version" in report.results["test-node"].data).toBe(true);
267
+ expect(report.results["test-node"].data.version.server_minor_ver).toBe("3");
268
+ });
269
+
270
+ test("generateAllReports returns reports for all checks", async () => {
271
+ const mockClient = createMockClient({
272
+ versionRows: [
273
+ { name: "server_version", setting: "16.3" },
274
+ { name: "server_version_num", setting: "160003" },
275
+ ],
276
+ settingsRows: [
277
+ {
278
+ tag_setting_name: "shared_buffers",
279
+ tag_setting_value: "16384",
280
+ tag_unit: "8kB",
281
+ tag_category: "Resource Usage / Memory",
282
+ tag_vartype: "integer",
283
+ is_default: 0, // Non-default for A007
284
+ setting_normalized: "134217728",
285
+ unit_normalized: "bytes",
286
+ },
287
+ ],
288
+ databaseSizesRows: [{ datname: "postgres", size_bytes: "1073741824" }],
289
+ dbStatsRows: [{
290
+ numbackends: 5,
291
+ xact_commit: 100,
292
+ xact_rollback: 1,
293
+ blks_read: 1000,
294
+ blks_hit: 9000,
295
+ tup_returned: 500,
296
+ tup_fetched: 400,
297
+ tup_inserted: 50,
298
+ tup_updated: 30,
299
+ tup_deleted: 10,
300
+ deadlocks: 0,
301
+ temp_files: 0,
302
+ temp_bytes: 0,
303
+ postmaster_uptime_s: 864000
304
+ }],
305
+ connectionStatesRows: [{ state: "active", count: 2 }, { state: "idle", count: 3 }],
306
+ uptimeRows: [{ start_time: new Date("2024-01-01T00:00:00Z"), uptime: "10 days" }],
307
+ invalidIndexesRows: [],
308
+ unusedIndexesRows: [],
309
+ redundantIndexesRows: [],
310
+ sensitiveColumnsRows: [],
311
+ }
312
+ );
313
+
314
+ const reports = await checkup.generateAllReports(mockClient as any, "test-node");
315
+ expect("A002" in reports).toBe(true);
316
+ expect("A003" in reports).toBe(true);
317
+ expect("A004" in reports).toBe(true);
318
+ expect("A007" in reports).toBe(true);
319
+ expect("A013" in reports).toBe(true);
320
+ expect("H001" in reports).toBe(true);
321
+ expect("H002" in reports).toBe(true);
322
+ expect("H004" in reports).toBe(true);
323
+ // S001 is only available in Python reporter, not in CLI express mode
324
+ expect(reports.A002.checkId).toBe("A002");
325
+ expect(reports.A003.checkId).toBe("A003");
326
+ expect(reports.A004.checkId).toBe("A004");
327
+ expect(reports.A007.checkId).toBe("A007");
328
+ expect(reports.A013.checkId).toBe("A013");
329
+ expect(reports.H001.checkId).toBe("H001");
330
+ expect(reports.H002.checkId).toBe("H002");
331
+ expect(reports.H004.checkId).toBe("H004");
332
+ });
333
+ });
334
+
335
+ // Tests for A007 (Altered settings)
336
+ describe("A007 - Altered settings", () => {
337
+ test("getAlteredSettings returns non-default settings", async () => {
338
+ const mockClient = createMockClient({
339
+ settingsRows: [
340
+ { tag_setting_name: "shared_buffers", tag_setting_value: "256MB", tag_unit: "", tag_category: "Resource Usage / Memory", tag_vartype: "string", is_default: 0, setting_normalized: null, unit_normalized: null },
341
+ { tag_setting_name: "work_mem", tag_setting_value: "64MB", tag_unit: "", tag_category: "Resource Usage / Memory", tag_vartype: "string", is_default: 0, setting_normalized: null, unit_normalized: null },
342
+ { tag_setting_name: "default_setting", tag_setting_value: "on", tag_unit: "", tag_category: "Other", tag_vartype: "bool", is_default: 1, setting_normalized: null, unit_normalized: null },
343
+ ],
344
+ });
345
+
346
+ const settings = await checkup.getAlteredSettings(mockClient as any);
347
+ expect("shared_buffers" in settings).toBe(true);
348
+ expect("work_mem" in settings).toBe(true);
349
+ expect("default_setting" in settings).toBe(false); // Should be filtered out
350
+ expect(settings.shared_buffers.value).toBe("256MB");
351
+ expect(settings.work_mem.value).toBe("64MB");
352
+ });
353
+
354
+ test("generateA007 creates report with altered settings", async () => {
355
+ const mockClient = createMockClient({
356
+ versionRows: [
357
+ { name: "server_version", setting: "16.3" },
358
+ { name: "server_version_num", setting: "160003" },
359
+ ],
360
+ settingsRows: [
361
+ { tag_setting_name: "max_connections", tag_setting_value: "200", tag_unit: "", tag_category: "Connections and Authentication", tag_vartype: "integer", is_default: 0, setting_normalized: null, unit_normalized: null },
362
+ ],
363
+ }
364
+ );
365
+
366
+ const report = await checkup.generateA007(mockClient as any, "test-node");
367
+ expect(report.checkId).toBe("A007");
368
+ expect(report.checkTitle).toBe("Altered settings");
369
+ expect(report.nodes.primary).toBe("test-node");
370
+ expect("test-node" in report.results).toBe(true);
371
+ expect("max_connections" in report.results["test-node"].data).toBe(true);
372
+ expect(report.results["test-node"].data.max_connections.value).toBe("200");
373
+ expect(report.results["test-node"].postgres_version).toBeTruthy();
374
+ });
375
+ });
376
+
377
+ // Tests for A004 (Cluster information)
378
+ describe("A004 - Cluster information", () => {
379
+ test("getDatabaseSizes returns database sizes", async () => {
380
+ const mockClient = createMockClient({
381
+ databaseSizesRows: [
382
+ { datname: "postgres", size_bytes: "1073741824" },
383
+ { datname: "mydb", size_bytes: "536870912" },
384
+ ],
385
+ });
386
+
387
+ const sizes = await checkup.getDatabaseSizes(mockClient as any);
388
+ expect("postgres" in sizes).toBe(true);
389
+ expect("mydb" in sizes).toBe(true);
390
+ expect(sizes.postgres).toBe(1073741824);
391
+ expect(sizes.mydb).toBe(536870912);
392
+ });
393
+
394
+ test("getClusterInfo returns cluster metrics", async () => {
395
+ const mockClient = createMockClient({
396
+ dbStatsRows: [{
397
+ numbackends: 10,
398
+ xact_commit: 1000,
399
+ xact_rollback: 5,
400
+ blks_read: 500,
401
+ blks_hit: 9500,
402
+ tup_returned: 5000,
403
+ tup_fetched: 4000,
404
+ tup_inserted: 100,
405
+ tup_updated: 50,
406
+ tup_deleted: 25,
407
+ deadlocks: 0,
408
+ temp_files: 2,
409
+ temp_bytes: 1048576,
410
+ postmaster_uptime_s: 2592000, // 30 days
411
+ }],
412
+ connectionStatesRows: [
413
+ { state: "active", count: 3 },
414
+ { state: "idle", count: 7 },
415
+ ],
416
+ uptimeRows: [{
417
+ start_time: new Date("2024-01-01T00:00:00Z"),
418
+ uptime: "30 days",
419
+ }],
420
+ });
421
+
422
+ const info = await checkup.getClusterInfo(mockClient as any);
423
+ expect("total_connections" in info).toBe(true);
424
+ expect("cache_hit_ratio" in info).toBe(true);
425
+ expect("connections_active" in info).toBe(true);
426
+ expect("connections_idle" in info).toBe(true);
427
+ expect("start_time" in info).toBe(true);
428
+ expect(info.total_connections.value).toBe("10");
429
+ expect(info.cache_hit_ratio.value).toBe("95.00");
430
+ expect(info.connections_active.value).toBe("3");
431
+ });
432
+
433
+ test("generateA004 creates report with cluster info and database sizes", async () => {
434
+ const mockClient = createMockClient({
435
+ versionRows: [
436
+ { name: "server_version", setting: "16.3" },
437
+ { name: "server_version_num", setting: "160003" },
438
+ ],
439
+ databaseSizesRows: [
440
+ { datname: "postgres", size_bytes: "1073741824" },
441
+ ],
442
+ dbStatsRows: [{
443
+ numbackends: 5,
444
+ xact_commit: 100,
445
+ xact_rollback: 1,
446
+ blks_read: 100,
447
+ blks_hit: 900,
448
+ tup_returned: 500,
449
+ tup_fetched: 400,
450
+ tup_inserted: 50,
451
+ tup_updated: 30,
452
+ tup_deleted: 10,
453
+ deadlocks: 0,
454
+ temp_files: 0,
455
+ temp_bytes: 0,
456
+ postmaster_uptime_s: 864000,
457
+ }],
458
+ connectionStatesRows: [{ state: "active", count: 2 }],
459
+ uptimeRows: [{ start_time: new Date("2024-01-01T00:00:00Z"), uptime: "10 days" }],
460
+ }
461
+ );
462
+
463
+ const report = await checkup.generateA004(mockClient as any, "test-node");
464
+ expect(report.checkId).toBe("A004");
465
+ expect(report.checkTitle).toBe("Cluster information");
466
+ expect(report.nodes.primary).toBe("test-node");
467
+ expect("test-node" in report.results).toBe(true);
468
+
469
+ const data = report.results["test-node"].data;
470
+ expect("general_info" in data).toBe(true);
471
+ expect("database_sizes" in data).toBe(true);
472
+ expect("total_connections" in data.general_info).toBe(true);
473
+ expect("postgres" in data.database_sizes).toBe(true);
474
+ expect(data.database_sizes.postgres).toBe(1073741824);
475
+ expect(report.results["test-node"].postgres_version).toBeTruthy();
476
+ });
477
+ });
478
+
479
+ // Tests for H001 (Invalid indexes)
480
+ describe("H001 - Invalid indexes", () => {
481
+ test("getInvalidIndexes returns invalid indexes", async () => {
482
+ const mockClient = createMockClient({
483
+ invalidIndexesRows: [
484
+ { schema_name: "public", table_name: "users", index_name: "users_email_idx", relation_name: "users", index_size_bytes: "1048576", index_definition: "CREATE INDEX users_email_idx ON public.users USING btree (email)", supports_fk: false },
485
+ ],
486
+ });
487
+
488
+ const indexes = await checkup.getInvalidIndexes(mockClient as any);
489
+ expect(indexes.length).toBe(1);
490
+ expect(indexes[0].schema_name).toBe("public");
491
+ expect(indexes[0].table_name).toBe("users");
492
+ expect(indexes[0].index_name).toBe("users_email_idx");
493
+ expect(indexes[0].index_size_bytes).toBe(1048576);
494
+ expect(indexes[0].index_size_pretty).toBeTruthy();
495
+ expect(indexes[0].index_definition).toMatch(/^CREATE INDEX/);
496
+ expect(indexes[0].relation_name).toBe("users");
497
+ expect(indexes[0].supports_fk).toBe(false);
498
+ });
499
+
500
+ test("generateH001 creates report with invalid indexes", async () => {
501
+ const mockClient = createMockClient({
502
+ versionRows: [
503
+ { name: "server_version", setting: "16.3" },
504
+ { name: "server_version_num", setting: "160003" },
505
+ ],
506
+ invalidIndexesRows: [
507
+ { schema_name: "public", table_name: "orders", index_name: "orders_status_idx", relation_name: "orders", index_size_bytes: "2097152", index_definition: "CREATE INDEX orders_status_idx ON public.orders USING btree (status)", supports_fk: false },
508
+ ],
509
+ }
510
+ );
511
+
512
+ const report = await checkup.generateH001(mockClient as any, "test-node");
513
+ expect(report.checkId).toBe("H001");
514
+ expect(report.checkTitle).toBe("Invalid indexes");
515
+ expect("test-node" in report.results).toBe(true);
516
+
517
+ // Data is now keyed by database name
518
+ const data = report.results["test-node"].data;
519
+ expect("testdb" in data).toBe(true);
520
+ const dbData = data["testdb"] as any;
521
+ expect(dbData.invalid_indexes).toBeTruthy();
522
+ expect(dbData.total_count).toBe(1);
523
+ expect(dbData.total_size_bytes).toBe(2097152);
524
+ expect(dbData.total_size_pretty).toBeTruthy();
525
+ expect(dbData.database_size_bytes).toBeTruthy();
526
+ expect(dbData.database_size_pretty).toBeTruthy();
527
+ expect(report.results["test-node"].postgres_version).toBeTruthy();
528
+ });
529
+
530
+ test("getInvalidIndexes returns decision tree fields including valid_duplicate_definition", async () => {
531
+ const mockClient = createMockClient({
532
+ invalidIndexesRows: [
533
+ {
534
+ schema_name: "public",
535
+ table_name: "users",
536
+ index_name: "users_email_idx_invalid",
537
+ relation_name: "users",
538
+ index_size_bytes: "1048576",
539
+ index_definition: "CREATE INDEX users_email_idx_invalid ON public.users USING btree (email)",
540
+ supports_fk: false,
541
+ is_pk: false,
542
+ is_unique: false,
543
+ constraint_name: null,
544
+ table_row_estimate: "5000",
545
+ has_valid_duplicate: true,
546
+ valid_index_name: "users_email_idx",
547
+ valid_index_definition: "CREATE INDEX users_email_idx ON public.users USING btree (email)",
548
+ },
549
+ ],
550
+ });
551
+
552
+ const indexes = await checkup.getInvalidIndexes(mockClient as any);
553
+ expect(indexes.length).toBe(1);
554
+ expect(indexes[0].is_pk).toBe(false);
555
+ expect(indexes[0].is_unique).toBe(false);
556
+ expect(indexes[0].constraint_name).toBeNull();
557
+ expect(indexes[0].table_row_estimate).toBe(5000);
558
+ expect(indexes[0].has_valid_duplicate).toBe(true);
559
+ expect(indexes[0].valid_duplicate_name).toBe("users_email_idx");
560
+ expect(indexes[0].valid_duplicate_definition).toBe("CREATE INDEX users_email_idx ON public.users USING btree (email)");
561
+ });
562
+
563
+ test("getInvalidIndexes handles has_valid_duplicate: false with null values", async () => {
564
+ const mockClient = createMockClient({
565
+ invalidIndexesRows: [
566
+ {
567
+ schema_name: "public",
568
+ table_name: "orders",
569
+ index_name: "orders_status_idx_invalid",
570
+ relation_name: "orders",
571
+ index_size_bytes: "524288",
572
+ index_definition: "CREATE INDEX orders_status_idx_invalid ON public.orders USING btree (status)",
573
+ supports_fk: false,
574
+ is_pk: false,
575
+ is_unique: false,
576
+ constraint_name: null,
577
+ table_row_estimate: "100000",
578
+ has_valid_duplicate: false,
579
+ valid_index_name: null,
580
+ valid_index_definition: null,
581
+ },
582
+ ],
583
+ });
584
+
585
+ const indexes = await checkup.getInvalidIndexes(mockClient as Client);
586
+ expect(indexes.length).toBe(1);
587
+ expect(indexes[0].has_valid_duplicate).toBe(false);
588
+ expect(indexes[0].valid_duplicate_name).toBeNull();
589
+ expect(indexes[0].valid_duplicate_definition).toBeNull();
590
+ });
591
+
592
+ test("getInvalidIndexes handles is_pk: true with constraint", async () => {
593
+ const mockClient = createMockClient({
594
+ invalidIndexesRows: [
595
+ {
596
+ schema_name: "public",
597
+ table_name: "accounts",
598
+ index_name: "accounts_pkey_invalid",
599
+ relation_name: "accounts",
600
+ index_size_bytes: "262144",
601
+ index_definition: "CREATE UNIQUE INDEX accounts_pkey_invalid ON public.accounts USING btree (id)",
602
+ supports_fk: true,
603
+ is_pk: true,
604
+ is_unique: true,
605
+ constraint_name: "accounts_pkey",
606
+ table_row_estimate: "500",
607
+ has_valid_duplicate: false,
608
+ valid_index_name: null,
609
+ valid_index_definition: null,
610
+ },
611
+ ],
612
+ });
613
+
614
+ const indexes = await checkup.getInvalidIndexes(mockClient as Client);
615
+ expect(indexes.length).toBe(1);
616
+ expect(indexes[0].is_pk).toBe(true);
617
+ expect(indexes[0].is_unique).toBe(true);
618
+ expect(indexes[0].constraint_name).toBe("accounts_pkey");
619
+ expect(indexes[0].supports_fk).toBe(true);
620
+ });
621
+
622
+ test("getInvalidIndexes handles is_unique: true without PK", async () => {
623
+ const mockClient = createMockClient({
624
+ invalidIndexesRows: [
625
+ {
626
+ schema_name: "public",
627
+ table_name: "users",
628
+ index_name: "users_email_unique_invalid",
629
+ relation_name: "users",
630
+ index_size_bytes: "131072",
631
+ index_definition: "CREATE UNIQUE INDEX users_email_unique_invalid ON public.users USING btree (email)",
632
+ supports_fk: false,
633
+ is_pk: false,
634
+ is_unique: true,
635
+ constraint_name: "users_email_unique",
636
+ table_row_estimate: "25000",
637
+ has_valid_duplicate: true,
638
+ valid_index_name: "users_email_unique_idx",
639
+ valid_index_definition: "CREATE UNIQUE INDEX users_email_unique_idx ON public.users USING btree (email)",
640
+ },
641
+ ],
642
+ });
643
+
644
+ const indexes = await checkup.getInvalidIndexes(mockClient as Client);
645
+ expect(indexes.length).toBe(1);
646
+ expect(indexes[0].is_pk).toBe(false);
647
+ expect(indexes[0].is_unique).toBe(true);
648
+ expect(indexes[0].constraint_name).toBe("users_email_unique");
649
+ expect(indexes[0].has_valid_duplicate).toBe(true);
650
+ });
651
+ // Top-level structure tests removed - covered by schema-validation.test.ts
652
+ });
653
+
654
+ // Tests for H001 decision tree recommendation logic
655
+ describe("H001 - Decision tree recommendations", () => {
656
+ // Helper to create a minimal InvalidIndex for testing
657
+ const createTestIndex = (overrides: Partial<checkup.InvalidIndex> = {}): checkup.InvalidIndex => ({
658
+ schema_name: "public",
659
+ table_name: "test_table",
660
+ index_name: "test_idx",
661
+ relation_name: "public.test_table",
662
+ index_size_bytes: 1024,
663
+ index_size_pretty: "1 KiB",
664
+ index_definition: "CREATE INDEX test_idx ON public.test_table USING btree (col)",
665
+ supports_fk: false,
666
+ is_pk: false,
667
+ is_unique: false,
668
+ constraint_name: null,
669
+ table_row_estimate: 100000, // Large table by default
670
+ has_valid_duplicate: false,
671
+ valid_duplicate_name: null,
672
+ valid_duplicate_definition: null,
673
+ ...overrides,
674
+ });
675
+
676
+ test("returns DROP when has_valid_duplicate is true", () => {
677
+ const index = createTestIndex({ has_valid_duplicate: true, valid_duplicate_name: "existing_idx" });
678
+ expect(checkup.getInvalidIndexRecommendation(index)).toBe("DROP");
679
+ });
680
+
681
+ test("returns DROP even when is_pk is true if has_valid_duplicate is true", () => {
682
+ // has_valid_duplicate takes precedence over is_pk
683
+ const index = createTestIndex({
684
+ has_valid_duplicate: true,
685
+ is_pk: true,
686
+ is_unique: true,
687
+ });
688
+ expect(checkup.getInvalidIndexRecommendation(index)).toBe("DROP");
689
+ });
690
+
691
+ test("returns RECREATE when is_pk is true and no valid duplicate", () => {
692
+ const index = createTestIndex({
693
+ is_pk: true,
694
+ is_unique: true,
695
+ constraint_name: "test_pkey",
696
+ });
697
+ expect(checkup.getInvalidIndexRecommendation(index)).toBe("RECREATE");
698
+ });
699
+
700
+ test("returns RECREATE when is_unique is true (non-PK) and no valid duplicate", () => {
701
+ const index = createTestIndex({
702
+ is_unique: true,
703
+ constraint_name: "test_unique",
704
+ });
705
+ expect(checkup.getInvalidIndexRecommendation(index)).toBe("RECREATE");
706
+ });
707
+
708
+ test("returns RECREATE for small table (< 10K rows) without valid duplicate", () => {
709
+ const index = createTestIndex({ table_row_estimate: 5000 });
710
+ expect(checkup.getInvalidIndexRecommendation(index)).toBe("RECREATE");
711
+ });
712
+
713
+ test("returns RECREATE for table at threshold boundary (9999 rows)", () => {
714
+ const index = createTestIndex({ table_row_estimate: 9999 });
715
+ expect(checkup.getInvalidIndexRecommendation(index)).toBe("RECREATE");
716
+ });
717
+
718
+ test("returns UNCERTAIN for large table (>= 10K rows) at threshold boundary", () => {
719
+ const index = createTestIndex({ table_row_estimate: 10000 });
720
+ expect(checkup.getInvalidIndexRecommendation(index)).toBe("UNCERTAIN");
721
+ });
722
+
723
+ test("returns UNCERTAIN for large table without valid duplicate or constraint", () => {
724
+ const index = createTestIndex({ table_row_estimate: 1000000 });
725
+ expect(checkup.getInvalidIndexRecommendation(index)).toBe("UNCERTAIN");
726
+ });
727
+
728
+ test("returns UNCERTAIN for empty table (0 rows) with no valid duplicate - edge case", () => {
729
+ // Empty table should be RECREATE (< 10K threshold)
730
+ const index = createTestIndex({ table_row_estimate: 0 });
731
+ expect(checkup.getInvalidIndexRecommendation(index)).toBe("RECREATE");
732
+ });
733
+
734
+ test("decision tree priority: has_valid_duplicate > is_pk > small_table", () => {
735
+ // Even with PK and small table, has_valid_duplicate should win
736
+ const index = createTestIndex({
737
+ has_valid_duplicate: true,
738
+ is_pk: true,
739
+ is_unique: true,
740
+ table_row_estimate: 100,
741
+ });
742
+ expect(checkup.getInvalidIndexRecommendation(index)).toBe("DROP");
743
+ });
744
+
745
+ test("decision tree priority: is_pk > small_table", () => {
746
+ // is_pk should return RECREATE regardless of table size
747
+ const index = createTestIndex({
748
+ is_pk: true,
749
+ is_unique: true,
750
+ table_row_estimate: 1000000, // Large table
751
+ });
752
+ expect(checkup.getInvalidIndexRecommendation(index)).toBe("RECREATE");
753
+ });
754
+ });
755
+
756
+ // Tests for H002 (Unused indexes)
757
+ describe("H002 - Unused indexes", () => {
758
+ test("getUnusedIndexes returns unused indexes", async () => {
759
+ const mockClient = createMockClient({
760
+ unusedIndexesRows: [
761
+ {
762
+ schema_name: "public",
763
+ table_name: "products",
764
+ index_name: "products_old_idx",
765
+ index_definition: "CREATE INDEX products_old_idx ON public.products USING btree (old_column)",
766
+ reason: "Never Used Indexes",
767
+ index_size_bytes: "4194304",
768
+ idx_scan: "0",
769
+ idx_is_btree: true,
770
+ supports_fk: false,
771
+ },
772
+ ],
773
+ });
774
+
775
+ const indexes = await checkup.getUnusedIndexes(mockClient as any);
776
+ expect(indexes.length).toBe(1);
777
+ expect(indexes[0].schema_name).toBe("public");
778
+ expect(indexes[0].index_name).toBe("products_old_idx");
779
+ expect(indexes[0].index_size_bytes).toBe(4194304);
780
+ expect(indexes[0].idx_scan).toBe(0);
781
+ expect(indexes[0].supports_fk).toBe(false);
782
+ expect(indexes[0].index_definition).toBeTruthy();
783
+ expect(indexes[0].idx_is_btree).toBe(true);
784
+ });
785
+
786
+ test("generateH002 creates report with unused indexes", async () => {
787
+ const mockClient = createMockClient({
788
+ versionRows: [
789
+ { name: "server_version", setting: "16.3" },
790
+ { name: "server_version_num", setting: "160003" },
791
+ ],
792
+ unusedIndexesRows: [
793
+ {
794
+ schema_name: "public",
795
+ table_name: "logs",
796
+ index_name: "logs_created_idx",
797
+ index_definition: "CREATE INDEX logs_created_idx ON public.logs USING btree (created_at)",
798
+ reason: "Never Used Indexes",
799
+ index_size_bytes: "8388608",
800
+ idx_scan: "0",
801
+ idx_is_btree: true,
802
+ supports_fk: false,
803
+ },
804
+ ],
805
+ }
806
+ );
807
+
808
+ const report = await checkup.generateH002(mockClient as any, "test-node");
809
+ expect(report.checkId).toBe("H002");
810
+ expect(report.checkTitle).toBe("Unused indexes");
811
+ expect("test-node" in report.results).toBe(true);
812
+
813
+ // Data is now keyed by database name
814
+ const data = report.results["test-node"].data;
815
+ expect("testdb" in data).toBe(true);
816
+ const dbData = data["testdb"] as any;
817
+ expect(dbData.unused_indexes).toBeTruthy();
818
+ expect(dbData.total_count).toBe(1);
819
+ expect(dbData.total_size_bytes).toBe(8388608);
820
+ expect(dbData.total_size_pretty).toBeTruthy();
821
+ expect(dbData.stats_reset).toBeTruthy();
822
+ expect(report.results["test-node"].postgres_version).toBeTruthy();
823
+ });
824
+ // Top-level structure tests removed - covered by schema-validation.test.ts
825
+ });
826
+
827
+ // Tests for H004 (Redundant indexes)
828
+ describe("H004 - Redundant indexes", () => {
829
+ test("getRedundantIndexes returns redundant indexes", async () => {
830
+ const mockClient = createMockClient({
831
+ redundantIndexesRows: [
832
+ {
833
+ schema_name: "public",
834
+ table_name: "orders",
835
+ index_name: "orders_user_id_idx",
836
+ relation_name: "orders",
837
+ access_method: "btree",
838
+ reason: "public.orders_user_id_created_idx",
839
+ index_size_bytes: "2097152",
840
+ table_size_bytes: "16777216",
841
+ index_usage: "0",
842
+ supports_fk: false,
843
+ index_definition: "CREATE INDEX orders_user_id_idx ON public.orders USING btree (user_id)",
844
+ redundant_to_json: JSON.stringify([
845
+ { index_name: "public.orders_user_id_created_idx", index_definition: "CREATE INDEX orders_user_id_created_idx ON public.orders USING btree (user_id, created_at)", index_size_bytes: 1048576 }
846
+ ]),
847
+ },
848
+ ],
849
+ });
850
+
851
+ const indexes = await checkup.getRedundantIndexes(mockClient as any);
852
+ expect(indexes.length).toBe(1);
853
+ expect(indexes[0].schema_name).toBe("public");
854
+ expect(indexes[0].index_name).toBe("orders_user_id_idx");
855
+ expect(indexes[0].reason).toBe("public.orders_user_id_created_idx");
856
+ expect(indexes[0].index_size_bytes).toBe(2097152);
857
+ expect(indexes[0].supports_fk).toBe(false);
858
+ expect(indexes[0].index_definition).toBeTruthy();
859
+ expect(indexes[0].relation_name).toBe("orders");
860
+ // Verify redundant_to is populated with definitions and sizes
861
+ expect(indexes[0].redundant_to).toBeInstanceOf(Array);
862
+ expect(indexes[0].redundant_to.length).toBe(1);
863
+ expect(indexes[0].redundant_to[0].index_name).toBe("public.orders_user_id_created_idx");
864
+ expect(indexes[0].redundant_to[0].index_definition).toContain("CREATE INDEX");
865
+ expect(indexes[0].redundant_to[0].index_size_bytes).toBe(1048576);
866
+ expect(indexes[0].redundant_to[0].index_size_pretty).toBe("1.00 MiB");
867
+ });
868
+
869
+ test("generateH004 creates report with redundant indexes", async () => {
870
+ const mockClient = createMockClient({
871
+ versionRows: [
872
+ { name: "server_version", setting: "16.3" },
873
+ { name: "server_version_num", setting: "160003" },
874
+ ],
875
+ redundantIndexesRows: [
876
+ {
877
+ schema_name: "public",
878
+ table_name: "products",
879
+ index_name: "products_category_idx",
880
+ relation_name: "products",
881
+ access_method: "btree",
882
+ reason: "public.products_category_name_idx",
883
+ index_size_bytes: "4194304",
884
+ table_size_bytes: "33554432",
885
+ index_usage: "5",
886
+ supports_fk: false,
887
+ index_definition: "CREATE INDEX products_category_idx ON public.products USING btree (category)",
888
+ redundant_to_json: JSON.stringify([
889
+ { index_name: "public.products_category_name_idx", index_definition: "CREATE INDEX products_category_name_idx ON public.products USING btree (category, name)", index_size_bytes: 2097152 }
890
+ ]),
891
+ },
892
+ ],
893
+ }
894
+ );
895
+
896
+ const report = await checkup.generateH004(mockClient as any, "test-node");
897
+ expect(report.checkId).toBe("H004");
898
+ expect(report.checkTitle).toBe("Redundant indexes");
899
+ expect("test-node" in report.results).toBe(true);
900
+
901
+ // Data is now keyed by database name
902
+ const data = report.results["test-node"].data;
903
+ expect("testdb" in data).toBe(true);
904
+ const dbData = data["testdb"] as any;
905
+ expect(dbData.redundant_indexes).toBeTruthy();
906
+ expect(dbData.total_count).toBe(1);
907
+ expect(dbData.total_size_bytes).toBe(4194304);
908
+ expect(dbData.total_size_pretty).toBeTruthy();
909
+ expect(dbData.database_size_bytes).toBeTruthy();
910
+ expect(report.results["test-node"].postgres_version).toBeTruthy();
911
+ });
912
+ // Top-level structure tests removed - covered by schema-validation.test.ts
913
+ });
914
+
915
+ // CLI tests
916
+ describe("CLI tests", () => {
917
+ test("checkup command exists and shows help", () => {
918
+ const r = runCli(["checkup", "--help"]);
919
+ expect(r.status).toBe(0);
920
+ expect(r.stdout).toMatch(/express mode/i);
921
+ expect(r.stdout).toMatch(/--check-id/);
922
+ expect(r.stdout).toMatch(/--node-name/);
923
+ expect(r.stdout).toMatch(/--output/);
924
+ expect(r.stdout).toMatch(/upload/);
925
+ expect(r.stdout).toMatch(/--json/);
926
+ });
927
+
928
+ test("checkup --help shows available check IDs", () => {
929
+ const r = runCli(["checkup", "--help"]);
930
+ expect(r.status).toBe(0);
931
+ expect(r.stdout).toMatch(/A002/);
932
+ expect(r.stdout).toMatch(/A003/);
933
+ expect(r.stdout).toMatch(/A004/);
934
+ expect(r.stdout).toMatch(/A007/);
935
+ expect(r.stdout).toMatch(/A013/);
936
+ expect(r.stdout).toMatch(/H001/);
937
+ expect(r.stdout).toMatch(/H002/);
938
+ expect(r.stdout).toMatch(/H004/);
939
+ });
940
+
941
+ test("checkup without connection shows help", () => {
942
+ const r = runCli(["checkup"]);
943
+ expect(r.status).not.toBe(0);
944
+ // Should show full help (options + examples), like `checkup --help`
945
+ expect(r.stdout).toMatch(/generate health check reports/i);
946
+ expect(r.stdout).toMatch(/--check-id/);
947
+ expect(r.stdout).toMatch(/available checks/i);
948
+ expect(r.stdout).toMatch(/A002/);
949
+ });
950
+ });
951
+
952
+ // Tests for checkup-api module
953
+ describe("checkup-api", () => {
954
+ test("formatRpcErrorForDisplay formats details/hint nicely", () => {
955
+ const err = new api.RpcError({
956
+ rpcName: "checkup_report_file_post",
957
+ statusCode: 402,
958
+ payloadText: JSON.stringify({
959
+ hint: "Start an express checkup subscription for the organization or contact support.",
960
+ details: "Checkup report uploads require an active checkup subscription",
961
+ }),
962
+ payloadJson: {
963
+ hint: "Start an express checkup subscription for the organization or contact support.",
964
+ details: "Checkup report uploads require an active checkup subscription.",
965
+ },
966
+ });
967
+ const lines = api.formatRpcErrorForDisplay(err);
968
+ const text = lines.join("\n");
969
+ expect(text).toMatch(/RPC checkup_report_file_post failed: HTTP 402/);
970
+ expect(text).toMatch(/Details:/);
971
+ expect(text).toMatch(/Hint:/);
972
+ });
973
+
974
+ test("withRetry succeeds on first attempt", async () => {
975
+ let attempts = 0;
976
+ const result = await api.withRetry(async () => {
977
+ attempts++;
978
+ return "success";
979
+ });
980
+ expect(result).toBe("success");
981
+ expect(attempts).toBe(1);
982
+ });
983
+
984
+ test("withRetry retries on retryable errors and succeeds", async () => {
985
+ let attempts = 0;
986
+ const result = await api.withRetry(
987
+ async () => {
988
+ attempts++;
989
+ if (attempts < 3) {
990
+ throw new Error("connection timeout");
991
+ }
992
+ return "success after retry";
993
+ },
994
+ { maxAttempts: 3, initialDelayMs: 10 }
995
+ );
996
+ expect(result).toBe("success after retry");
997
+ expect(attempts).toBe(3);
998
+ });
999
+
1000
+ test("withRetry calls onRetry callback", async () => {
1001
+ let attempts = 0;
1002
+ const retryLogs: string[] = [];
1003
+ await api.withRetry(
1004
+ async () => {
1005
+ attempts++;
1006
+ if (attempts < 2) {
1007
+ throw new Error("socket hang up");
1008
+ }
1009
+ return "ok";
1010
+ },
1011
+ { maxAttempts: 3, initialDelayMs: 10 },
1012
+ (attempt, err, delayMs) => {
1013
+ retryLogs.push(`attempt ${attempt}, delay ${delayMs}ms`);
1014
+ }
1015
+ );
1016
+ expect(retryLogs.length).toBe(1);
1017
+ expect(retryLogs[0]).toMatch(/attempt 1/);
1018
+ });
1019
+
1020
+ test("withRetry does not retry on non-retryable errors", async () => {
1021
+ let attempts = 0;
1022
+ try {
1023
+ await api.withRetry(
1024
+ async () => {
1025
+ attempts++;
1026
+ throw new Error("invalid input");
1027
+ },
1028
+ { maxAttempts: 3, initialDelayMs: 10 }
1029
+ );
1030
+ } catch (err) {
1031
+ expect((err as Error).message).toBe("invalid input");
1032
+ }
1033
+ expect(attempts).toBe(1);
1034
+ });
1035
+
1036
+ test("withRetry does not retry on 4xx RpcError", async () => {
1037
+ let attempts = 0;
1038
+ try {
1039
+ await api.withRetry(
1040
+ async () => {
1041
+ attempts++;
1042
+ throw new api.RpcError({
1043
+ rpcName: "test",
1044
+ statusCode: 400,
1045
+ payloadText: "bad request",
1046
+ payloadJson: null,
1047
+ });
1048
+ },
1049
+ { maxAttempts: 3, initialDelayMs: 10 }
1050
+ );
1051
+ } catch (err) {
1052
+ expect(err).toBeInstanceOf(api.RpcError);
1053
+ }
1054
+ expect(attempts).toBe(1);
1055
+ });
1056
+
1057
+ test("withRetry retries on 5xx RpcError", async () => {
1058
+ let attempts = 0;
1059
+ try {
1060
+ await api.withRetry(
1061
+ async () => {
1062
+ attempts++;
1063
+ throw new api.RpcError({
1064
+ rpcName: "test",
1065
+ statusCode: 503,
1066
+ payloadText: "service unavailable",
1067
+ payloadJson: null,
1068
+ });
1069
+ },
1070
+ { maxAttempts: 2, initialDelayMs: 10 }
1071
+ );
1072
+ } catch (err) {
1073
+ expect(err).toBeInstanceOf(api.RpcError);
1074
+ }
1075
+ expect(attempts).toBe(2);
1076
+ });
1077
+
1078
+ test("withRetry retries on timeout errors", async () => {
1079
+ // Tests that timeout-like error messages are considered retryable
1080
+ let attempts = 0;
1081
+ try {
1082
+ await api.withRetry(
1083
+ async () => {
1084
+ attempts++;
1085
+ throw new Error("RPC test timed out after 30000ms (no response)");
1086
+ },
1087
+ { maxAttempts: 3, initialDelayMs: 10 }
1088
+ );
1089
+ } catch (err) {
1090
+ expect(err).toBeInstanceOf(Error);
1091
+ expect((err as Error).message).toContain("timed out");
1092
+ }
1093
+ expect(attempts).toBe(3); // Should retry on timeout
1094
+ });
1095
+
1096
+ test("withRetry retries on ECONNRESET errors", async () => {
1097
+ // Tests that connection reset errors are considered retryable
1098
+ let attempts = 0;
1099
+ try {
1100
+ await api.withRetry(
1101
+ async () => {
1102
+ attempts++;
1103
+ const err = new Error("connection reset") as Error & { code: string };
1104
+ err.code = "ECONNRESET";
1105
+ throw err;
1106
+ },
1107
+ { maxAttempts: 2, initialDelayMs: 10 }
1108
+ );
1109
+ } catch (err) {
1110
+ expect(err).toBeInstanceOf(Error);
1111
+ }
1112
+ expect(attempts).toBe(2); // Should retry on ECONNRESET
1113
+ });
1114
+ });
1115
+
1116
+