postgresai 0.14.0-dev.7 → 0.14.0-dev.71

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (83) hide show
  1. package/README.md +161 -61
  2. package/bin/postgres-ai.ts +1982 -404
  3. package/bun.lock +258 -0
  4. package/bunfig.toml +20 -0
  5. package/dist/bin/postgres-ai.js +29395 -1576
  6. package/dist/sql/01.role.sql +16 -0
  7. package/dist/sql/02.permissions.sql +37 -0
  8. package/dist/sql/03.optional_rds.sql +6 -0
  9. package/dist/sql/04.optional_self_managed.sql +8 -0
  10. package/dist/sql/05.helpers.sql +439 -0
  11. package/dist/sql/sql/01.role.sql +16 -0
  12. package/dist/sql/sql/02.permissions.sql +37 -0
  13. package/dist/sql/sql/03.optional_rds.sql +6 -0
  14. package/dist/sql/sql/04.optional_self_managed.sql +8 -0
  15. package/dist/sql/sql/05.helpers.sql +439 -0
  16. package/lib/auth-server.ts +124 -106
  17. package/lib/checkup-api.ts +386 -0
  18. package/lib/checkup.ts +1396 -0
  19. package/lib/config.ts +6 -3
  20. package/lib/init.ts +568 -155
  21. package/lib/issues.ts +400 -191
  22. package/lib/mcp-server.ts +213 -90
  23. package/lib/metrics-embedded.ts +79 -0
  24. package/lib/metrics-loader.ts +127 -0
  25. package/lib/supabase.ts +769 -0
  26. package/lib/util.ts +61 -0
  27. package/package.json +20 -10
  28. package/packages/postgres-ai/README.md +26 -0
  29. package/packages/postgres-ai/bin/postgres-ai.js +27 -0
  30. package/packages/postgres-ai/package.json +27 -0
  31. package/scripts/embed-metrics.ts +154 -0
  32. package/sql/01.role.sql +16 -0
  33. package/sql/02.permissions.sql +37 -0
  34. package/sql/03.optional_rds.sql +6 -0
  35. package/sql/04.optional_self_managed.sql +8 -0
  36. package/sql/05.helpers.sql +439 -0
  37. package/test/auth.test.ts +258 -0
  38. package/test/checkup.integration.test.ts +321 -0
  39. package/test/checkup.test.ts +1117 -0
  40. package/test/config-consistency.test.ts +36 -0
  41. package/test/init.integration.test.ts +500 -0
  42. package/test/init.test.ts +682 -0
  43. package/test/issues.cli.test.ts +314 -0
  44. package/test/issues.test.ts +456 -0
  45. package/test/mcp-server.test.ts +988 -0
  46. package/test/schema-validation.test.ts +81 -0
  47. package/test/supabase.test.ts +568 -0
  48. package/test/test-utils.ts +128 -0
  49. package/tsconfig.json +12 -20
  50. package/dist/bin/postgres-ai.d.ts +0 -3
  51. package/dist/bin/postgres-ai.d.ts.map +0 -1
  52. package/dist/bin/postgres-ai.js.map +0 -1
  53. package/dist/lib/auth-server.d.ts +0 -31
  54. package/dist/lib/auth-server.d.ts.map +0 -1
  55. package/dist/lib/auth-server.js +0 -263
  56. package/dist/lib/auth-server.js.map +0 -1
  57. package/dist/lib/config.d.ts +0 -45
  58. package/dist/lib/config.d.ts.map +0 -1
  59. package/dist/lib/config.js +0 -181
  60. package/dist/lib/config.js.map +0 -1
  61. package/dist/lib/init.d.ts +0 -61
  62. package/dist/lib/init.d.ts.map +0 -1
  63. package/dist/lib/init.js +0 -359
  64. package/dist/lib/init.js.map +0 -1
  65. package/dist/lib/issues.d.ts +0 -75
  66. package/dist/lib/issues.d.ts.map +0 -1
  67. package/dist/lib/issues.js +0 -336
  68. package/dist/lib/issues.js.map +0 -1
  69. package/dist/lib/mcp-server.d.ts +0 -9
  70. package/dist/lib/mcp-server.d.ts.map +0 -1
  71. package/dist/lib/mcp-server.js +0 -168
  72. package/dist/lib/mcp-server.js.map +0 -1
  73. package/dist/lib/pkce.d.ts +0 -32
  74. package/dist/lib/pkce.d.ts.map +0 -1
  75. package/dist/lib/pkce.js +0 -101
  76. package/dist/lib/pkce.js.map +0 -1
  77. package/dist/lib/util.d.ts +0 -27
  78. package/dist/lib/util.d.ts.map +0 -1
  79. package/dist/lib/util.js +0 -46
  80. package/dist/lib/util.js.map +0 -1
  81. package/dist/package.json +0 -46
  82. package/test/init.integration.test.cjs +0 -269
  83. package/test/init.test.cjs +0 -69
@@ -0,0 +1,1117 @@
1
+ import { describe, test, expect } from "bun:test";
2
+ import { resolve } from "path";
3
+
4
+ // Import from source directly since we're using Bun
5
+ import * as checkup from "../lib/checkup";
6
+ import * as api from "../lib/checkup-api";
7
+ import { createMockClient } from "./test-utils";
8
+
9
+
10
+ function runCli(args: string[], env: Record<string, string> = {}) {
11
+ const cliPath = resolve(import.meta.dir, "..", "bin", "postgres-ai.ts");
12
+ const bunBin = typeof process.execPath === "string" && process.execPath.length > 0 ? process.execPath : "bun";
13
+ const result = Bun.spawnSync([bunBin, cliPath, ...args], {
14
+ env: { ...process.env, ...env },
15
+ });
16
+ return {
17
+ status: result.exitCode,
18
+ stdout: new TextDecoder().decode(result.stdout),
19
+ stderr: new TextDecoder().decode(result.stderr),
20
+ };
21
+ }
22
+
23
+ // Unit tests for parseVersionNum
24
+ describe("parseVersionNum", () => {
25
+ test("parses PG 16.3 version number", () => {
26
+ const result = checkup.parseVersionNum("160003");
27
+ expect(result.major).toBe("16");
28
+ expect(result.minor).toBe("3");
29
+ });
30
+
31
+ test("parses PG 15.7 version number", () => {
32
+ const result = checkup.parseVersionNum("150007");
33
+ expect(result.major).toBe("15");
34
+ expect(result.minor).toBe("7");
35
+ });
36
+
37
+ test("parses PG 14.12 version number", () => {
38
+ const result = checkup.parseVersionNum("140012");
39
+ expect(result.major).toBe("14");
40
+ expect(result.minor).toBe("12");
41
+ });
42
+
43
+ test("handles empty string", () => {
44
+ const result = checkup.parseVersionNum("");
45
+ expect(result.major).toBe("");
46
+ expect(result.minor).toBe("");
47
+ });
48
+
49
+ test("handles null/undefined", () => {
50
+ const result = checkup.parseVersionNum(null as any);
51
+ expect(result.major).toBe("");
52
+ expect(result.minor).toBe("");
53
+ });
54
+
55
+ test("handles short string", () => {
56
+ const result = checkup.parseVersionNum("123");
57
+ expect(result.major).toBe("");
58
+ expect(result.minor).toBe("");
59
+ });
60
+ });
61
+
62
+ // Unit tests for createBaseReport
63
+ describe("createBaseReport", () => {
64
+ test("creates correct structure", () => {
65
+ const report = checkup.createBaseReport("A002", "Postgres major version", "test-node");
66
+
67
+ expect(report.checkId).toBe("A002");
68
+ expect(report.checkTitle).toBe("Postgres major version");
69
+ expect(typeof report.version).toBe("string");
70
+ expect(report.version!.length).toBeGreaterThan(0);
71
+ expect(typeof report.build_ts).toBe("string");
72
+ expect(report.nodes.primary).toBe("test-node");
73
+ expect(report.nodes.standbys).toEqual([]);
74
+ expect(report.results).toEqual({});
75
+ expect(typeof report.timestamptz).toBe("string");
76
+ // Verify timestamp is ISO format
77
+ expect(new Date(report.timestamptz).toISOString()).toBe(report.timestamptz);
78
+ });
79
+
80
+ test("uses provided node name", () => {
81
+ const report = checkup.createBaseReport("A003", "Postgres settings", "my-custom-node");
82
+ expect(report.nodes.primary).toBe("my-custom-node");
83
+ });
84
+ });
85
+
86
+ // Tests for CHECK_INFO
87
+ describe("CHECK_INFO and REPORT_GENERATORS", () => {
88
+ const expectedChecks: Record<string, string> = {
89
+ A002: "Postgres major version",
90
+ A003: "Postgres settings",
91
+ A004: "Cluster information",
92
+ A007: "Altered settings",
93
+ A013: "Postgres minor version",
94
+ D004: "pg_stat_statements and pg_stat_kcache settings",
95
+ F001: "Autovacuum: current settings",
96
+ G001: "Memory-related settings",
97
+ H001: "Invalid indexes",
98
+ H002: "Unused indexes",
99
+ H004: "Redundant indexes",
100
+ };
101
+
102
+ test("CHECK_INFO contains all expected checks with correct descriptions", () => {
103
+ for (const [checkId, description] of Object.entries(expectedChecks)) {
104
+ expect(checkup.CHECK_INFO[checkId]).toBe(description);
105
+ }
106
+ });
107
+
108
+ test("REPORT_GENERATORS has function for each check", () => {
109
+ for (const checkId of Object.keys(expectedChecks)) {
110
+ expect(typeof checkup.REPORT_GENERATORS[checkId]).toBe("function");
111
+ }
112
+ });
113
+
114
+ test("REPORT_GENERATORS and CHECK_INFO have same keys", () => {
115
+ const generatorKeys = Object.keys(checkup.REPORT_GENERATORS).sort();
116
+ const infoKeys = Object.keys(checkup.CHECK_INFO).sort();
117
+ expect(generatorKeys).toEqual(infoKeys);
118
+ });
119
+ });
120
+
121
+ // Tests for formatBytes
122
+ describe("formatBytes", () => {
123
+ test("formats zero bytes", () => {
124
+ expect(checkup.formatBytes(0)).toBe("0 B");
125
+ });
126
+
127
+ test("formats bytes", () => {
128
+ expect(checkup.formatBytes(500)).toBe("500.00 B");
129
+ });
130
+
131
+ test("formats kibibytes", () => {
132
+ expect(checkup.formatBytes(1024)).toBe("1.00 KiB");
133
+ expect(checkup.formatBytes(1536)).toBe("1.50 KiB");
134
+ });
135
+
136
+ test("formats mebibytes", () => {
137
+ expect(checkup.formatBytes(1048576)).toBe("1.00 MiB");
138
+ });
139
+
140
+ test("formats gibibytes", () => {
141
+ expect(checkup.formatBytes(1073741824)).toBe("1.00 GiB");
142
+ });
143
+
144
+ test("handles negative bytes", () => {
145
+ expect(checkup.formatBytes(-1024)).toBe("-1.00 KiB");
146
+ expect(checkup.formatBytes(-1048576)).toBe("-1.00 MiB");
147
+ });
148
+
149
+ test("handles edge cases", () => {
150
+ expect(checkup.formatBytes(NaN)).toBe("NaN B");
151
+ expect(checkup.formatBytes(Infinity)).toBe("Infinity B");
152
+ });
153
+ });
154
+
155
+ // Mock client tests for report generators
156
+ describe("Report generators with mock client", () => {
157
+ test("getPostgresVersion extracts version info", async () => {
158
+ const mockClient = createMockClient({
159
+ versionRows: [
160
+ { name: "server_version", setting: "16.3" },
161
+ { name: "server_version_num", setting: "160003" },
162
+ ],
163
+ });
164
+
165
+ const version = await checkup.getPostgresVersion(mockClient as any);
166
+ expect(version.version).toBe("16.3");
167
+ expect(version.server_version_num).toBe("160003");
168
+ expect(version.server_major_ver).toBe("16");
169
+ expect(version.server_minor_ver).toBe("3");
170
+ });
171
+
172
+ test("getSettings transforms rows to keyed object", async () => {
173
+ const mockClient = createMockClient({
174
+ settingsRows: [
175
+ {
176
+ tag_setting_name: "shared_buffers",
177
+ tag_setting_value: "16384",
178
+ tag_unit: "8kB",
179
+ tag_category: "Resource Usage / Memory",
180
+ tag_vartype: "integer",
181
+ is_default: 1,
182
+ setting_normalized: "134217728", // 16384 * 8192
183
+ unit_normalized: "bytes",
184
+ },
185
+ {
186
+ tag_setting_name: "work_mem",
187
+ tag_setting_value: "4096",
188
+ tag_unit: "kB",
189
+ tag_category: "Resource Usage / Memory",
190
+ tag_vartype: "integer",
191
+ is_default: 1,
192
+ setting_normalized: "4194304", // 4096 * 1024
193
+ unit_normalized: "bytes",
194
+ },
195
+ ],
196
+ });
197
+
198
+ const settings = await checkup.getSettings(mockClient as any);
199
+ expect("shared_buffers" in settings).toBe(true);
200
+ expect("work_mem" in settings).toBe(true);
201
+ expect(settings.shared_buffers.setting).toBe("16384");
202
+ expect(settings.shared_buffers.unit).toBe("8kB");
203
+ // pretty_value is now computed from setting_normalized
204
+ expect(settings.shared_buffers.pretty_value).toBe("128.00 MiB");
205
+ expect(settings.work_mem.pretty_value).toBe("4.00 MiB");
206
+ });
207
+
208
+ test("generateA002 creates report with version data", async () => {
209
+ const mockClient = createMockClient({
210
+ versionRows: [
211
+ { name: "server_version", setting: "16.3" },
212
+ { name: "server_version_num", setting: "160003" },
213
+ ],
214
+ });
215
+
216
+ const report = await checkup.generateA002(mockClient as any, "test-node");
217
+ expect(report.checkId).toBe("A002");
218
+ expect(report.checkTitle).toBe("Postgres major version");
219
+ expect(report.nodes.primary).toBe("test-node");
220
+ expect("test-node" in report.results).toBe(true);
221
+ expect("version" in report.results["test-node"].data).toBe(true);
222
+ expect(report.results["test-node"].data.version.version).toBe("16.3");
223
+ });
224
+
225
+ test("generateA003 creates report with settings and version", async () => {
226
+ const mockClient = createMockClient({
227
+ versionRows: [
228
+ { name: "server_version", setting: "16.3" },
229
+ { name: "server_version_num", setting: "160003" },
230
+ ],
231
+ settingsRows: [
232
+ {
233
+ tag_setting_name: "shared_buffers",
234
+ tag_setting_value: "16384",
235
+ tag_unit: "8kB",
236
+ tag_category: "Resource Usage / Memory",
237
+ tag_vartype: "integer",
238
+ is_default: 1,
239
+ setting_normalized: "134217728",
240
+ unit_normalized: "bytes",
241
+ },
242
+ ],
243
+ });
244
+
245
+ const report = await checkup.generateA003(mockClient as any, "test-node");
246
+ expect(report.checkId).toBe("A003");
247
+ expect(report.checkTitle).toBe("Postgres settings");
248
+ expect("test-node" in report.results).toBe(true);
249
+ expect("shared_buffers" in report.results["test-node"].data).toBe(true);
250
+ expect(report.results["test-node"].postgres_version).toBeTruthy();
251
+ expect(report.results["test-node"].postgres_version!.version).toBe("16.3");
252
+ });
253
+
254
+ test("generateA013 creates report with minor version data", async () => {
255
+ const mockClient = createMockClient({
256
+ versionRows: [
257
+ { name: "server_version", setting: "16.3" },
258
+ { name: "server_version_num", setting: "160003" },
259
+ ],
260
+ });
261
+
262
+ const report = await checkup.generateA013(mockClient as any, "test-node");
263
+ expect(report.checkId).toBe("A013");
264
+ expect(report.checkTitle).toBe("Postgres minor version");
265
+ expect(report.nodes.primary).toBe("test-node");
266
+ expect("test-node" in report.results).toBe(true);
267
+ expect("version" in report.results["test-node"].data).toBe(true);
268
+ expect(report.results["test-node"].data.version.server_minor_ver).toBe("3");
269
+ });
270
+
271
+ test("generateAllReports returns reports for all checks", async () => {
272
+ const mockClient = createMockClient({
273
+ versionRows: [
274
+ { name: "server_version", setting: "16.3" },
275
+ { name: "server_version_num", setting: "160003" },
276
+ ],
277
+ settingsRows: [
278
+ {
279
+ tag_setting_name: "shared_buffers",
280
+ tag_setting_value: "16384",
281
+ tag_unit: "8kB",
282
+ tag_category: "Resource Usage / Memory",
283
+ tag_vartype: "integer",
284
+ is_default: 0, // Non-default for A007
285
+ setting_normalized: "134217728",
286
+ unit_normalized: "bytes",
287
+ },
288
+ ],
289
+ databaseSizesRows: [{ datname: "postgres", size_bytes: "1073741824" }],
290
+ dbStatsRows: [{
291
+ numbackends: 5,
292
+ xact_commit: 100,
293
+ xact_rollback: 1,
294
+ blks_read: 1000,
295
+ blks_hit: 9000,
296
+ tup_returned: 500,
297
+ tup_fetched: 400,
298
+ tup_inserted: 50,
299
+ tup_updated: 30,
300
+ tup_deleted: 10,
301
+ deadlocks: 0,
302
+ temp_files: 0,
303
+ temp_bytes: 0,
304
+ postmaster_uptime_s: 864000
305
+ }],
306
+ connectionStatesRows: [{ state: "active", count: 2 }, { state: "idle", count: 3 }],
307
+ uptimeRows: [{ start_time: new Date("2024-01-01T00:00:00Z"), uptime: "10 days" }],
308
+ invalidIndexesRows: [],
309
+ unusedIndexesRows: [],
310
+ redundantIndexesRows: [],
311
+ sensitiveColumnsRows: [],
312
+ }
313
+ );
314
+
315
+ const reports = await checkup.generateAllReports(mockClient as any, "test-node");
316
+ expect("A002" in reports).toBe(true);
317
+ expect("A003" in reports).toBe(true);
318
+ expect("A004" in reports).toBe(true);
319
+ expect("A007" in reports).toBe(true);
320
+ expect("A013" in reports).toBe(true);
321
+ expect("H001" in reports).toBe(true);
322
+ expect("H002" in reports).toBe(true);
323
+ expect("H004" in reports).toBe(true);
324
+ // S001 is only available in Python reporter, not in CLI express mode
325
+ expect(reports.A002.checkId).toBe("A002");
326
+ expect(reports.A003.checkId).toBe("A003");
327
+ expect(reports.A004.checkId).toBe("A004");
328
+ expect(reports.A007.checkId).toBe("A007");
329
+ expect(reports.A013.checkId).toBe("A013");
330
+ expect(reports.H001.checkId).toBe("H001");
331
+ expect(reports.H002.checkId).toBe("H002");
332
+ expect(reports.H004.checkId).toBe("H004");
333
+ });
334
+ });
335
+
336
+ // Tests for A007 (Altered settings)
337
+ describe("A007 - Altered settings", () => {
338
+ test("getAlteredSettings returns non-default settings", async () => {
339
+ const mockClient = createMockClient({
340
+ settingsRows: [
341
+ { tag_setting_name: "shared_buffers", tag_setting_value: "256MB", tag_unit: "", tag_category: "Resource Usage / Memory", tag_vartype: "string", is_default: 0, setting_normalized: null, unit_normalized: null },
342
+ { tag_setting_name: "work_mem", tag_setting_value: "64MB", tag_unit: "", tag_category: "Resource Usage / Memory", tag_vartype: "string", is_default: 0, setting_normalized: null, unit_normalized: null },
343
+ { tag_setting_name: "default_setting", tag_setting_value: "on", tag_unit: "", tag_category: "Other", tag_vartype: "bool", is_default: 1, setting_normalized: null, unit_normalized: null },
344
+ ],
345
+ });
346
+
347
+ const settings = await checkup.getAlteredSettings(mockClient as any);
348
+ expect("shared_buffers" in settings).toBe(true);
349
+ expect("work_mem" in settings).toBe(true);
350
+ expect("default_setting" in settings).toBe(false); // Should be filtered out
351
+ expect(settings.shared_buffers.value).toBe("256MB");
352
+ expect(settings.work_mem.value).toBe("64MB");
353
+ });
354
+
355
+ test("generateA007 creates report with altered settings", async () => {
356
+ const mockClient = createMockClient({
357
+ versionRows: [
358
+ { name: "server_version", setting: "16.3" },
359
+ { name: "server_version_num", setting: "160003" },
360
+ ],
361
+ settingsRows: [
362
+ { tag_setting_name: "max_connections", tag_setting_value: "200", tag_unit: "", tag_category: "Connections and Authentication", tag_vartype: "integer", is_default: 0, setting_normalized: null, unit_normalized: null },
363
+ ],
364
+ }
365
+ );
366
+
367
+ const report = await checkup.generateA007(mockClient as any, "test-node");
368
+ expect(report.checkId).toBe("A007");
369
+ expect(report.checkTitle).toBe("Altered settings");
370
+ expect(report.nodes.primary).toBe("test-node");
371
+ expect("test-node" in report.results).toBe(true);
372
+ expect("max_connections" in report.results["test-node"].data).toBe(true);
373
+ expect(report.results["test-node"].data.max_connections.value).toBe("200");
374
+ expect(report.results["test-node"].postgres_version).toBeTruthy();
375
+ });
376
+ });
377
+
378
+ // Tests for A004 (Cluster information)
379
+ describe("A004 - Cluster information", () => {
380
+ test("getDatabaseSizes returns database sizes", async () => {
381
+ const mockClient = createMockClient({
382
+ databaseSizesRows: [
383
+ { datname: "postgres", size_bytes: "1073741824" },
384
+ { datname: "mydb", size_bytes: "536870912" },
385
+ ],
386
+ });
387
+
388
+ const sizes = await checkup.getDatabaseSizes(mockClient as any);
389
+ expect("postgres" in sizes).toBe(true);
390
+ expect("mydb" in sizes).toBe(true);
391
+ expect(sizes.postgres).toBe(1073741824);
392
+ expect(sizes.mydb).toBe(536870912);
393
+ });
394
+
395
+ test("getClusterInfo returns cluster metrics", async () => {
396
+ const mockClient = createMockClient({
397
+ dbStatsRows: [{
398
+ numbackends: 10,
399
+ xact_commit: 1000,
400
+ xact_rollback: 5,
401
+ blks_read: 500,
402
+ blks_hit: 9500,
403
+ tup_returned: 5000,
404
+ tup_fetched: 4000,
405
+ tup_inserted: 100,
406
+ tup_updated: 50,
407
+ tup_deleted: 25,
408
+ deadlocks: 0,
409
+ temp_files: 2,
410
+ temp_bytes: 1048576,
411
+ postmaster_uptime_s: 2592000, // 30 days
412
+ }],
413
+ connectionStatesRows: [
414
+ { state: "active", count: 3 },
415
+ { state: "idle", count: 7 },
416
+ ],
417
+ uptimeRows: [{
418
+ start_time: new Date("2024-01-01T00:00:00Z"),
419
+ uptime: "30 days",
420
+ }],
421
+ });
422
+
423
+ const info = await checkup.getClusterInfo(mockClient as any);
424
+ expect("total_connections" in info).toBe(true);
425
+ expect("cache_hit_ratio" in info).toBe(true);
426
+ expect("connections_active" in info).toBe(true);
427
+ expect("connections_idle" in info).toBe(true);
428
+ expect("start_time" in info).toBe(true);
429
+ expect(info.total_connections.value).toBe("10");
430
+ expect(info.cache_hit_ratio.value).toBe("95.00");
431
+ expect(info.connections_active.value).toBe("3");
432
+ });
433
+
434
+ test("generateA004 creates report with cluster info and database sizes", async () => {
435
+ const mockClient = createMockClient({
436
+ versionRows: [
437
+ { name: "server_version", setting: "16.3" },
438
+ { name: "server_version_num", setting: "160003" },
439
+ ],
440
+ databaseSizesRows: [
441
+ { datname: "postgres", size_bytes: "1073741824" },
442
+ ],
443
+ dbStatsRows: [{
444
+ numbackends: 5,
445
+ xact_commit: 100,
446
+ xact_rollback: 1,
447
+ blks_read: 100,
448
+ blks_hit: 900,
449
+ tup_returned: 500,
450
+ tup_fetched: 400,
451
+ tup_inserted: 50,
452
+ tup_updated: 30,
453
+ tup_deleted: 10,
454
+ deadlocks: 0,
455
+ temp_files: 0,
456
+ temp_bytes: 0,
457
+ postmaster_uptime_s: 864000,
458
+ }],
459
+ connectionStatesRows: [{ state: "active", count: 2 }],
460
+ uptimeRows: [{ start_time: new Date("2024-01-01T00:00:00Z"), uptime: "10 days" }],
461
+ }
462
+ );
463
+
464
+ const report = await checkup.generateA004(mockClient as any, "test-node");
465
+ expect(report.checkId).toBe("A004");
466
+ expect(report.checkTitle).toBe("Cluster information");
467
+ expect(report.nodes.primary).toBe("test-node");
468
+ expect("test-node" in report.results).toBe(true);
469
+
470
+ const data = report.results["test-node"].data;
471
+ expect("general_info" in data).toBe(true);
472
+ expect("database_sizes" in data).toBe(true);
473
+ expect("total_connections" in data.general_info).toBe(true);
474
+ expect("postgres" in data.database_sizes).toBe(true);
475
+ expect(data.database_sizes.postgres).toBe(1073741824);
476
+ expect(report.results["test-node"].postgres_version).toBeTruthy();
477
+ });
478
+ });
479
+
480
+ // Tests for H001 (Invalid indexes)
481
+ describe("H001 - Invalid indexes", () => {
482
+ test("getInvalidIndexes returns invalid indexes", async () => {
483
+ const mockClient = createMockClient({
484
+ invalidIndexesRows: [
485
+ { schema_name: "public", table_name: "users", index_name: "users_email_idx", relation_name: "users", index_size_bytes: "1048576", index_definition: "CREATE INDEX users_email_idx ON public.users USING btree (email)", supports_fk: false },
486
+ ],
487
+ });
488
+
489
+ const indexes = await checkup.getInvalidIndexes(mockClient as any);
490
+ expect(indexes.length).toBe(1);
491
+ expect(indexes[0].schema_name).toBe("public");
492
+ expect(indexes[0].table_name).toBe("users");
493
+ expect(indexes[0].index_name).toBe("users_email_idx");
494
+ expect(indexes[0].index_size_bytes).toBe(1048576);
495
+ expect(indexes[0].index_size_pretty).toBeTruthy();
496
+ expect(indexes[0].index_definition).toMatch(/^CREATE INDEX/);
497
+ expect(indexes[0].relation_name).toBe("users");
498
+ expect(indexes[0].supports_fk).toBe(false);
499
+ });
500
+
501
+ test("generateH001 creates report with invalid indexes", async () => {
502
+ const mockClient = createMockClient({
503
+ versionRows: [
504
+ { name: "server_version", setting: "16.3" },
505
+ { name: "server_version_num", setting: "160003" },
506
+ ],
507
+ invalidIndexesRows: [
508
+ { schema_name: "public", table_name: "orders", index_name: "orders_status_idx", relation_name: "orders", index_size_bytes: "2097152", index_definition: "CREATE INDEX orders_status_idx ON public.orders USING btree (status)", supports_fk: false },
509
+ ],
510
+ }
511
+ );
512
+
513
+ const report = await checkup.generateH001(mockClient as any, "test-node");
514
+ expect(report.checkId).toBe("H001");
515
+ expect(report.checkTitle).toBe("Invalid indexes");
516
+ expect("test-node" in report.results).toBe(true);
517
+
518
+ // Data is now keyed by database name
519
+ const data = report.results["test-node"].data;
520
+ expect("testdb" in data).toBe(true);
521
+ const dbData = data["testdb"] as any;
522
+ expect(dbData.invalid_indexes).toBeTruthy();
523
+ expect(dbData.total_count).toBe(1);
524
+ expect(dbData.total_size_bytes).toBe(2097152);
525
+ expect(dbData.total_size_pretty).toBeTruthy();
526
+ expect(dbData.database_size_bytes).toBeTruthy();
527
+ expect(dbData.database_size_pretty).toBeTruthy();
528
+ expect(report.results["test-node"].postgres_version).toBeTruthy();
529
+ });
530
+
531
+ test("getInvalidIndexes returns decision tree fields including valid_duplicate_definition", async () => {
532
+ const mockClient = createMockClient({
533
+ invalidIndexesRows: [
534
+ {
535
+ schema_name: "public",
536
+ table_name: "users",
537
+ index_name: "users_email_idx_invalid",
538
+ relation_name: "users",
539
+ index_size_bytes: "1048576",
540
+ index_definition: "CREATE INDEX users_email_idx_invalid ON public.users USING btree (email)",
541
+ supports_fk: false,
542
+ is_pk: false,
543
+ is_unique: false,
544
+ constraint_name: null,
545
+ table_row_estimate: "5000",
546
+ has_valid_duplicate: true,
547
+ valid_index_name: "users_email_idx",
548
+ valid_index_definition: "CREATE INDEX users_email_idx ON public.users USING btree (email)",
549
+ },
550
+ ],
551
+ });
552
+
553
+ const indexes = await checkup.getInvalidIndexes(mockClient as any);
554
+ expect(indexes.length).toBe(1);
555
+ expect(indexes[0].is_pk).toBe(false);
556
+ expect(indexes[0].is_unique).toBe(false);
557
+ expect(indexes[0].constraint_name).toBeNull();
558
+ expect(indexes[0].table_row_estimate).toBe(5000);
559
+ expect(indexes[0].has_valid_duplicate).toBe(true);
560
+ expect(indexes[0].valid_duplicate_name).toBe("users_email_idx");
561
+ expect(indexes[0].valid_duplicate_definition).toBe("CREATE INDEX users_email_idx ON public.users USING btree (email)");
562
+ });
563
+
564
+ test("getInvalidIndexes handles has_valid_duplicate: false with null values", async () => {
565
+ const mockClient = createMockClient({
566
+ invalidIndexesRows: [
567
+ {
568
+ schema_name: "public",
569
+ table_name: "orders",
570
+ index_name: "orders_status_idx_invalid",
571
+ relation_name: "orders",
572
+ index_size_bytes: "524288",
573
+ index_definition: "CREATE INDEX orders_status_idx_invalid ON public.orders USING btree (status)",
574
+ supports_fk: false,
575
+ is_pk: false,
576
+ is_unique: false,
577
+ constraint_name: null,
578
+ table_row_estimate: "100000",
579
+ has_valid_duplicate: false,
580
+ valid_index_name: null,
581
+ valid_index_definition: null,
582
+ },
583
+ ],
584
+ });
585
+
586
+ const indexes = await checkup.getInvalidIndexes(mockClient as Client);
587
+ expect(indexes.length).toBe(1);
588
+ expect(indexes[0].has_valid_duplicate).toBe(false);
589
+ expect(indexes[0].valid_duplicate_name).toBeNull();
590
+ expect(indexes[0].valid_duplicate_definition).toBeNull();
591
+ });
592
+
593
+ test("getInvalidIndexes handles is_pk: true with constraint", async () => {
594
+ const mockClient = createMockClient({
595
+ invalidIndexesRows: [
596
+ {
597
+ schema_name: "public",
598
+ table_name: "accounts",
599
+ index_name: "accounts_pkey_invalid",
600
+ relation_name: "accounts",
601
+ index_size_bytes: "262144",
602
+ index_definition: "CREATE UNIQUE INDEX accounts_pkey_invalid ON public.accounts USING btree (id)",
603
+ supports_fk: true,
604
+ is_pk: true,
605
+ is_unique: true,
606
+ constraint_name: "accounts_pkey",
607
+ table_row_estimate: "500",
608
+ has_valid_duplicate: false,
609
+ valid_index_name: null,
610
+ valid_index_definition: null,
611
+ },
612
+ ],
613
+ });
614
+
615
+ const indexes = await checkup.getInvalidIndexes(mockClient as Client);
616
+ expect(indexes.length).toBe(1);
617
+ expect(indexes[0].is_pk).toBe(true);
618
+ expect(indexes[0].is_unique).toBe(true);
619
+ expect(indexes[0].constraint_name).toBe("accounts_pkey");
620
+ expect(indexes[0].supports_fk).toBe(true);
621
+ });
622
+
623
+ test("getInvalidIndexes handles is_unique: true without PK", async () => {
624
+ const mockClient = createMockClient({
625
+ invalidIndexesRows: [
626
+ {
627
+ schema_name: "public",
628
+ table_name: "users",
629
+ index_name: "users_email_unique_invalid",
630
+ relation_name: "users",
631
+ index_size_bytes: "131072",
632
+ index_definition: "CREATE UNIQUE INDEX users_email_unique_invalid ON public.users USING btree (email)",
633
+ supports_fk: false,
634
+ is_pk: false,
635
+ is_unique: true,
636
+ constraint_name: "users_email_unique",
637
+ table_row_estimate: "25000",
638
+ has_valid_duplicate: true,
639
+ valid_index_name: "users_email_unique_idx",
640
+ valid_index_definition: "CREATE UNIQUE INDEX users_email_unique_idx ON public.users USING btree (email)",
641
+ },
642
+ ],
643
+ });
644
+
645
+ const indexes = await checkup.getInvalidIndexes(mockClient as Client);
646
+ expect(indexes.length).toBe(1);
647
+ expect(indexes[0].is_pk).toBe(false);
648
+ expect(indexes[0].is_unique).toBe(true);
649
+ expect(indexes[0].constraint_name).toBe("users_email_unique");
650
+ expect(indexes[0].has_valid_duplicate).toBe(true);
651
+ });
652
+ // Top-level structure tests removed - covered by schema-validation.test.ts
653
+ });
654
+
655
+ // Tests for H001 decision tree recommendation logic
656
+ describe("H001 - Decision tree recommendations", () => {
657
+ // Helper to create a minimal InvalidIndex for testing
658
+ const createTestIndex = (overrides: Partial<checkup.InvalidIndex> = {}): checkup.InvalidIndex => ({
659
+ schema_name: "public",
660
+ table_name: "test_table",
661
+ index_name: "test_idx",
662
+ relation_name: "public.test_table",
663
+ index_size_bytes: 1024,
664
+ index_size_pretty: "1 KiB",
665
+ index_definition: "CREATE INDEX test_idx ON public.test_table USING btree (col)",
666
+ supports_fk: false,
667
+ is_pk: false,
668
+ is_unique: false,
669
+ constraint_name: null,
670
+ table_row_estimate: 100000, // Large table by default
671
+ has_valid_duplicate: false,
672
+ valid_duplicate_name: null,
673
+ valid_duplicate_definition: null,
674
+ ...overrides,
675
+ });
676
+
677
+ test("returns DROP when has_valid_duplicate is true", () => {
678
+ const index = createTestIndex({ has_valid_duplicate: true, valid_duplicate_name: "existing_idx" });
679
+ expect(checkup.getInvalidIndexRecommendation(index)).toBe("DROP");
680
+ });
681
+
682
+ test("returns DROP even when is_pk is true if has_valid_duplicate is true", () => {
683
+ // has_valid_duplicate takes precedence over is_pk
684
+ const index = createTestIndex({
685
+ has_valid_duplicate: true,
686
+ is_pk: true,
687
+ is_unique: true,
688
+ });
689
+ expect(checkup.getInvalidIndexRecommendation(index)).toBe("DROP");
690
+ });
691
+
692
+ test("returns RECREATE when is_pk is true and no valid duplicate", () => {
693
+ const index = createTestIndex({
694
+ is_pk: true,
695
+ is_unique: true,
696
+ constraint_name: "test_pkey",
697
+ });
698
+ expect(checkup.getInvalidIndexRecommendation(index)).toBe("RECREATE");
699
+ });
700
+
701
+ test("returns RECREATE when is_unique is true (non-PK) and no valid duplicate", () => {
702
+ const index = createTestIndex({
703
+ is_unique: true,
704
+ constraint_name: "test_unique",
705
+ });
706
+ expect(checkup.getInvalidIndexRecommendation(index)).toBe("RECREATE");
707
+ });
708
+
709
+ test("returns RECREATE for small table (< 10K rows) without valid duplicate", () => {
710
+ const index = createTestIndex({ table_row_estimate: 5000 });
711
+ expect(checkup.getInvalidIndexRecommendation(index)).toBe("RECREATE");
712
+ });
713
+
714
+ test("returns RECREATE for table at threshold boundary (9999 rows)", () => {
715
+ const index = createTestIndex({ table_row_estimate: 9999 });
716
+ expect(checkup.getInvalidIndexRecommendation(index)).toBe("RECREATE");
717
+ });
718
+
719
+ test("returns UNCERTAIN for large table (>= 10K rows) at threshold boundary", () => {
720
+ const index = createTestIndex({ table_row_estimate: 10000 });
721
+ expect(checkup.getInvalidIndexRecommendation(index)).toBe("UNCERTAIN");
722
+ });
723
+
724
+ test("returns UNCERTAIN for large table without valid duplicate or constraint", () => {
725
+ const index = createTestIndex({ table_row_estimate: 1000000 });
726
+ expect(checkup.getInvalidIndexRecommendation(index)).toBe("UNCERTAIN");
727
+ });
728
+
729
+ test("returns UNCERTAIN for empty table (0 rows) with no valid duplicate - edge case", () => {
730
+ // Empty table should be RECREATE (< 10K threshold)
731
+ const index = createTestIndex({ table_row_estimate: 0 });
732
+ expect(checkup.getInvalidIndexRecommendation(index)).toBe("RECREATE");
733
+ });
734
+
735
+ test("decision tree priority: has_valid_duplicate > is_pk > small_table", () => {
736
+ // Even with PK and small table, has_valid_duplicate should win
737
+ const index = createTestIndex({
738
+ has_valid_duplicate: true,
739
+ is_pk: true,
740
+ is_unique: true,
741
+ table_row_estimate: 100,
742
+ });
743
+ expect(checkup.getInvalidIndexRecommendation(index)).toBe("DROP");
744
+ });
745
+
746
+ test("decision tree priority: is_pk > small_table", () => {
747
+ // is_pk should return RECREATE regardless of table size
748
+ const index = createTestIndex({
749
+ is_pk: true,
750
+ is_unique: true,
751
+ table_row_estimate: 1000000, // Large table
752
+ });
753
+ expect(checkup.getInvalidIndexRecommendation(index)).toBe("RECREATE");
754
+ });
755
+ });
756
+
757
+ // Tests for H002 (Unused indexes)
758
+ describe("H002 - Unused indexes", () => {
759
+ test("getUnusedIndexes returns unused indexes", async () => {
760
+ const mockClient = createMockClient({
761
+ unusedIndexesRows: [
762
+ {
763
+ schema_name: "public",
764
+ table_name: "products",
765
+ index_name: "products_old_idx",
766
+ index_definition: "CREATE INDEX products_old_idx ON public.products USING btree (old_column)",
767
+ reason: "Never Used Indexes",
768
+ index_size_bytes: "4194304",
769
+ idx_scan: "0",
770
+ idx_is_btree: true,
771
+ supports_fk: false,
772
+ },
773
+ ],
774
+ });
775
+
776
+ const indexes = await checkup.getUnusedIndexes(mockClient as any);
777
+ expect(indexes.length).toBe(1);
778
+ expect(indexes[0].schema_name).toBe("public");
779
+ expect(indexes[0].index_name).toBe("products_old_idx");
780
+ expect(indexes[0].index_size_bytes).toBe(4194304);
781
+ expect(indexes[0].idx_scan).toBe(0);
782
+ expect(indexes[0].supports_fk).toBe(false);
783
+ expect(indexes[0].index_definition).toBeTruthy();
784
+ expect(indexes[0].idx_is_btree).toBe(true);
785
+ });
786
+
787
+ test("generateH002 creates report with unused indexes", async () => {
788
+ const mockClient = createMockClient({
789
+ versionRows: [
790
+ { name: "server_version", setting: "16.3" },
791
+ { name: "server_version_num", setting: "160003" },
792
+ ],
793
+ unusedIndexesRows: [
794
+ {
795
+ schema_name: "public",
796
+ table_name: "logs",
797
+ index_name: "logs_created_idx",
798
+ index_definition: "CREATE INDEX logs_created_idx ON public.logs USING btree (created_at)",
799
+ reason: "Never Used Indexes",
800
+ index_size_bytes: "8388608",
801
+ idx_scan: "0",
802
+ idx_is_btree: true,
803
+ supports_fk: false,
804
+ },
805
+ ],
806
+ }
807
+ );
808
+
809
+ const report = await checkup.generateH002(mockClient as any, "test-node");
810
+ expect(report.checkId).toBe("H002");
811
+ expect(report.checkTitle).toBe("Unused indexes");
812
+ expect("test-node" in report.results).toBe(true);
813
+
814
+ // Data is now keyed by database name
815
+ const data = report.results["test-node"].data;
816
+ expect("testdb" in data).toBe(true);
817
+ const dbData = data["testdb"] as any;
818
+ expect(dbData.unused_indexes).toBeTruthy();
819
+ expect(dbData.total_count).toBe(1);
820
+ expect(dbData.total_size_bytes).toBe(8388608);
821
+ expect(dbData.total_size_pretty).toBeTruthy();
822
+ expect(dbData.stats_reset).toBeTruthy();
823
+ expect(report.results["test-node"].postgres_version).toBeTruthy();
824
+ });
825
+ // Top-level structure tests removed - covered by schema-validation.test.ts
826
+ });
827
+
828
+ // Tests for H004 (Redundant indexes)
829
+ describe("H004 - Redundant indexes", () => {
830
+ test("getRedundantIndexes returns redundant indexes", async () => {
831
+ const mockClient = createMockClient({
832
+ redundantIndexesRows: [
833
+ {
834
+ schema_name: "public",
835
+ table_name: "orders",
836
+ index_name: "orders_user_id_idx",
837
+ relation_name: "orders",
838
+ access_method: "btree",
839
+ reason: "public.orders_user_id_created_idx",
840
+ index_size_bytes: "2097152",
841
+ table_size_bytes: "16777216",
842
+ index_usage: "0",
843
+ supports_fk: false,
844
+ index_definition: "CREATE INDEX orders_user_id_idx ON public.orders USING btree (user_id)",
845
+ redundant_to_json: JSON.stringify([
846
+ { index_name: "public.orders_user_id_created_idx", index_definition: "CREATE INDEX orders_user_id_created_idx ON public.orders USING btree (user_id, created_at)", index_size_bytes: 1048576 }
847
+ ]),
848
+ },
849
+ ],
850
+ });
851
+
852
+ const indexes = await checkup.getRedundantIndexes(mockClient as any);
853
+ expect(indexes.length).toBe(1);
854
+ expect(indexes[0].schema_name).toBe("public");
855
+ expect(indexes[0].index_name).toBe("orders_user_id_idx");
856
+ expect(indexes[0].reason).toBe("public.orders_user_id_created_idx");
857
+ expect(indexes[0].index_size_bytes).toBe(2097152);
858
+ expect(indexes[0].supports_fk).toBe(false);
859
+ expect(indexes[0].index_definition).toBeTruthy();
860
+ expect(indexes[0].relation_name).toBe("orders");
861
+ // Verify redundant_to is populated with definitions and sizes
862
+ expect(indexes[0].redundant_to).toBeInstanceOf(Array);
863
+ expect(indexes[0].redundant_to.length).toBe(1);
864
+ expect(indexes[0].redundant_to[0].index_name).toBe("public.orders_user_id_created_idx");
865
+ expect(indexes[0].redundant_to[0].index_definition).toContain("CREATE INDEX");
866
+ expect(indexes[0].redundant_to[0].index_size_bytes).toBe(1048576);
867
+ expect(indexes[0].redundant_to[0].index_size_pretty).toBe("1.00 MiB");
868
+ });
869
+
870
+ test("generateH004 creates report with redundant indexes", async () => {
871
+ const mockClient = createMockClient({
872
+ versionRows: [
873
+ { name: "server_version", setting: "16.3" },
874
+ { name: "server_version_num", setting: "160003" },
875
+ ],
876
+ redundantIndexesRows: [
877
+ {
878
+ schema_name: "public",
879
+ table_name: "products",
880
+ index_name: "products_category_idx",
881
+ relation_name: "products",
882
+ access_method: "btree",
883
+ reason: "public.products_category_name_idx",
884
+ index_size_bytes: "4194304",
885
+ table_size_bytes: "33554432",
886
+ index_usage: "5",
887
+ supports_fk: false,
888
+ index_definition: "CREATE INDEX products_category_idx ON public.products USING btree (category)",
889
+ redundant_to_json: JSON.stringify([
890
+ { index_name: "public.products_category_name_idx", index_definition: "CREATE INDEX products_category_name_idx ON public.products USING btree (category, name)", index_size_bytes: 2097152 }
891
+ ]),
892
+ },
893
+ ],
894
+ }
895
+ );
896
+
897
+ const report = await checkup.generateH004(mockClient as any, "test-node");
898
+ expect(report.checkId).toBe("H004");
899
+ expect(report.checkTitle).toBe("Redundant indexes");
900
+ expect("test-node" in report.results).toBe(true);
901
+
902
+ // Data is now keyed by database name
903
+ const data = report.results["test-node"].data;
904
+ expect("testdb" in data).toBe(true);
905
+ const dbData = data["testdb"] as any;
906
+ expect(dbData.redundant_indexes).toBeTruthy();
907
+ expect(dbData.total_count).toBe(1);
908
+ expect(dbData.total_size_bytes).toBe(4194304);
909
+ expect(dbData.total_size_pretty).toBeTruthy();
910
+ expect(dbData.database_size_bytes).toBeTruthy();
911
+ expect(report.results["test-node"].postgres_version).toBeTruthy();
912
+ });
913
+ // Top-level structure tests removed - covered by schema-validation.test.ts
914
+ });
915
+
916
+ // CLI tests
917
+ describe("CLI tests", () => {
918
+ test("checkup command exists and shows help", () => {
919
+ const r = runCli(["checkup", "--help"]);
920
+ expect(r.status).toBe(0);
921
+ expect(r.stdout).toMatch(/express mode/i);
922
+ expect(r.stdout).toMatch(/--check-id/);
923
+ expect(r.stdout).toMatch(/--node-name/);
924
+ expect(r.stdout).toMatch(/--output/);
925
+ expect(r.stdout).toMatch(/upload/);
926
+ expect(r.stdout).toMatch(/--json/);
927
+ });
928
+
929
+ test("checkup --help shows available check IDs", () => {
930
+ const r = runCli(["checkup", "--help"]);
931
+ expect(r.status).toBe(0);
932
+ expect(r.stdout).toMatch(/A002/);
933
+ expect(r.stdout).toMatch(/A003/);
934
+ expect(r.stdout).toMatch(/A004/);
935
+ expect(r.stdout).toMatch(/A007/);
936
+ expect(r.stdout).toMatch(/A013/);
937
+ expect(r.stdout).toMatch(/H001/);
938
+ expect(r.stdout).toMatch(/H002/);
939
+ expect(r.stdout).toMatch(/H004/);
940
+ });
941
+
942
+ test("checkup without connection shows help", () => {
943
+ const r = runCli(["checkup"]);
944
+ expect(r.status).not.toBe(0);
945
+ // Should show full help (options + examples), like `checkup --help`
946
+ expect(r.stdout).toMatch(/generate health check reports/i);
947
+ expect(r.stdout).toMatch(/--check-id/);
948
+ expect(r.stdout).toMatch(/available checks/i);
949
+ expect(r.stdout).toMatch(/A002/);
950
+ });
951
+ });
952
+
953
+ // Tests for checkup-api module
954
+ describe("checkup-api", () => {
955
+ test("formatRpcErrorForDisplay formats details/hint nicely", () => {
956
+ const err = new api.RpcError({
957
+ rpcName: "checkup_report_file_post",
958
+ statusCode: 402,
959
+ payloadText: JSON.stringify({
960
+ hint: "Start an express checkup subscription for the organization or contact support.",
961
+ details: "Checkup report uploads require an active checkup subscription",
962
+ }),
963
+ payloadJson: {
964
+ hint: "Start an express checkup subscription for the organization or contact support.",
965
+ details: "Checkup report uploads require an active checkup subscription.",
966
+ },
967
+ });
968
+ const lines = api.formatRpcErrorForDisplay(err);
969
+ const text = lines.join("\n");
970
+ expect(text).toMatch(/RPC checkup_report_file_post failed: HTTP 402/);
971
+ expect(text).toMatch(/Details:/);
972
+ expect(text).toMatch(/Hint:/);
973
+ });
974
+
975
+ test("withRetry succeeds on first attempt", async () => {
976
+ let attempts = 0;
977
+ const result = await api.withRetry(async () => {
978
+ attempts++;
979
+ return "success";
980
+ });
981
+ expect(result).toBe("success");
982
+ expect(attempts).toBe(1);
983
+ });
984
+
985
+ test("withRetry retries on retryable errors and succeeds", async () => {
986
+ let attempts = 0;
987
+ const result = await api.withRetry(
988
+ async () => {
989
+ attempts++;
990
+ if (attempts < 3) {
991
+ throw new Error("connection timeout");
992
+ }
993
+ return "success after retry";
994
+ },
995
+ { maxAttempts: 3, initialDelayMs: 10 }
996
+ );
997
+ expect(result).toBe("success after retry");
998
+ expect(attempts).toBe(3);
999
+ });
1000
+
1001
+ test("withRetry calls onRetry callback", async () => {
1002
+ let attempts = 0;
1003
+ const retryLogs: string[] = [];
1004
+ await api.withRetry(
1005
+ async () => {
1006
+ attempts++;
1007
+ if (attempts < 2) {
1008
+ throw new Error("socket hang up");
1009
+ }
1010
+ return "ok";
1011
+ },
1012
+ { maxAttempts: 3, initialDelayMs: 10 },
1013
+ (attempt, err, delayMs) => {
1014
+ retryLogs.push(`attempt ${attempt}, delay ${delayMs}ms`);
1015
+ }
1016
+ );
1017
+ expect(retryLogs.length).toBe(1);
1018
+ expect(retryLogs[0]).toMatch(/attempt 1/);
1019
+ });
1020
+
1021
+ test("withRetry does not retry on non-retryable errors", async () => {
1022
+ let attempts = 0;
1023
+ try {
1024
+ await api.withRetry(
1025
+ async () => {
1026
+ attempts++;
1027
+ throw new Error("invalid input");
1028
+ },
1029
+ { maxAttempts: 3, initialDelayMs: 10 }
1030
+ );
1031
+ } catch (err) {
1032
+ expect((err as Error).message).toBe("invalid input");
1033
+ }
1034
+ expect(attempts).toBe(1);
1035
+ });
1036
+
1037
+ test("withRetry does not retry on 4xx RpcError", async () => {
1038
+ let attempts = 0;
1039
+ try {
1040
+ await api.withRetry(
1041
+ async () => {
1042
+ attempts++;
1043
+ throw new api.RpcError({
1044
+ rpcName: "test",
1045
+ statusCode: 400,
1046
+ payloadText: "bad request",
1047
+ payloadJson: null,
1048
+ });
1049
+ },
1050
+ { maxAttempts: 3, initialDelayMs: 10 }
1051
+ );
1052
+ } catch (err) {
1053
+ expect(err).toBeInstanceOf(api.RpcError);
1054
+ }
1055
+ expect(attempts).toBe(1);
1056
+ });
1057
+
1058
+ test("withRetry retries on 5xx RpcError", async () => {
1059
+ let attempts = 0;
1060
+ try {
1061
+ await api.withRetry(
1062
+ async () => {
1063
+ attempts++;
1064
+ throw new api.RpcError({
1065
+ rpcName: "test",
1066
+ statusCode: 503,
1067
+ payloadText: "service unavailable",
1068
+ payloadJson: null,
1069
+ });
1070
+ },
1071
+ { maxAttempts: 2, initialDelayMs: 10 }
1072
+ );
1073
+ } catch (err) {
1074
+ expect(err).toBeInstanceOf(api.RpcError);
1075
+ }
1076
+ expect(attempts).toBe(2);
1077
+ });
1078
+
1079
+ test("withRetry retries on timeout errors", async () => {
1080
+ // Tests that timeout-like error messages are considered retryable
1081
+ let attempts = 0;
1082
+ try {
1083
+ await api.withRetry(
1084
+ async () => {
1085
+ attempts++;
1086
+ throw new Error("RPC test timed out after 30000ms (no response)");
1087
+ },
1088
+ { maxAttempts: 3, initialDelayMs: 10 }
1089
+ );
1090
+ } catch (err) {
1091
+ expect(err).toBeInstanceOf(Error);
1092
+ expect((err as Error).message).toContain("timed out");
1093
+ }
1094
+ expect(attempts).toBe(3); // Should retry on timeout
1095
+ });
1096
+
1097
+ test("withRetry retries on ECONNRESET errors", async () => {
1098
+ // Tests that connection reset errors are considered retryable
1099
+ let attempts = 0;
1100
+ try {
1101
+ await api.withRetry(
1102
+ async () => {
1103
+ attempts++;
1104
+ const err = new Error("connection reset") as Error & { code: string };
1105
+ err.code = "ECONNRESET";
1106
+ throw err;
1107
+ },
1108
+ { maxAttempts: 2, initialDelayMs: 10 }
1109
+ );
1110
+ } catch (err) {
1111
+ expect(err).toBeInstanceOf(Error);
1112
+ }
1113
+ expect(attempts).toBe(2); // Should retry on ECONNRESET
1114
+ });
1115
+ });
1116
+
1117
+