postgresai 0.14.0-dev.75 → 0.14.0-dev.77

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. package/bin/postgres-ai.ts +312 -6
  2. package/dist/bin/postgres-ai.js +916 -40
  3. package/dist/sql/02.extensions.sql +8 -0
  4. package/dist/sql/{02.permissions.sql → 03.permissions.sql} +1 -0
  5. package/dist/sql/sql/02.extensions.sql +8 -0
  6. package/dist/sql/sql/{02.permissions.sql → 03.permissions.sql} +1 -0
  7. package/dist/sql/sql/uninit/01.helpers.sql +5 -0
  8. package/dist/sql/sql/uninit/02.permissions.sql +30 -0
  9. package/dist/sql/sql/uninit/03.role.sql +27 -0
  10. package/dist/sql/uninit/01.helpers.sql +5 -0
  11. package/dist/sql/uninit/02.permissions.sql +30 -0
  12. package/dist/sql/uninit/03.role.sql +27 -0
  13. package/lib/checkup-dictionary.ts +113 -0
  14. package/lib/checkup.ts +21 -14
  15. package/lib/init.ts +109 -8
  16. package/package.json +9 -7
  17. package/scripts/embed-checkup-dictionary.ts +106 -0
  18. package/sql/02.extensions.sql +8 -0
  19. package/sql/{02.permissions.sql → 03.permissions.sql} +1 -0
  20. package/sql/uninit/01.helpers.sql +5 -0
  21. package/sql/uninit/02.permissions.sql +30 -0
  22. package/sql/uninit/03.role.sql +27 -0
  23. package/test/checkup.test.ts +17 -18
  24. package/test/init.test.ts +245 -11
  25. package/lib/metrics-embedded.ts +0 -79
  26. /package/dist/sql/{03.optional_rds.sql → 04.optional_rds.sql} +0 -0
  27. /package/dist/sql/{04.optional_self_managed.sql → 05.optional_self_managed.sql} +0 -0
  28. /package/dist/sql/{05.helpers.sql → 06.helpers.sql} +0 -0
  29. /package/dist/sql/sql/{03.optional_rds.sql → 04.optional_rds.sql} +0 -0
  30. /package/dist/sql/sql/{04.optional_self_managed.sql → 05.optional_self_managed.sql} +0 -0
  31. /package/dist/sql/sql/{05.helpers.sql → 06.helpers.sql} +0 -0
  32. /package/sql/{03.optional_rds.sql → 04.optional_rds.sql} +0 -0
  33. /package/sql/{04.optional_self_managed.sql → 05.optional_self_managed.sql} +0 -0
  34. /package/sql/{05.helpers.sql → 06.helpers.sql} +0 -0
@@ -0,0 +1,30 @@
1
+ -- Revoke permissions and drop objects created by prepare-db (template-filled by cli/lib/init.ts)
2
+
3
+ -- Drop the postgres_ai.pg_statistic view
4
+ drop view if exists postgres_ai.pg_statistic;
5
+
6
+ -- Drop the postgres_ai schema (CASCADE to handle any remaining objects)
7
+ drop schema if exists postgres_ai cascade;
8
+
9
+ -- Revoke permissions from the monitoring role
10
+ -- Use a DO block to handle the case where the role doesn't exist
11
+ do $$ begin
12
+ revoke pg_monitor from {{ROLE_IDENT}};
13
+ exception when undefined_object then
14
+ null; -- Role doesn't exist, nothing to revoke
15
+ end $$;
16
+
17
+ do $$ begin
18
+ revoke select on pg_catalog.pg_index from {{ROLE_IDENT}};
19
+ exception when undefined_object then
20
+ null; -- Role doesn't exist
21
+ end $$;
22
+
23
+ do $$ begin
24
+ revoke connect on database {{DB_IDENT}} from {{ROLE_IDENT}};
25
+ exception when undefined_object then
26
+ null; -- Role doesn't exist
27
+ end $$;
28
+
29
+ -- Note: USAGE on public is typically granted by default; we don't revoke it
30
+ -- to avoid breaking other applications that may rely on it.
@@ -0,0 +1,27 @@
1
+ -- Drop the monitoring role created by prepare-db (template-filled by cli/lib/init.ts)
2
+ -- This must run after revoking all permissions from the role.
3
+
4
+ -- Use a DO block to handle the case where the role doesn't exist
5
+ do $$ begin
6
+ -- Reassign owned objects to current user before dropping
7
+ -- This handles any objects that might have been created by the role
8
+ begin
9
+ execute format('reassign owned by %I to current_user', {{ROLE_LITERAL}});
10
+ exception when undefined_object then
11
+ null; -- Role doesn't exist, nothing to reassign
12
+ end;
13
+
14
+ -- Drop owned objects (in case reassign didn't work for some objects)
15
+ begin
16
+ execute format('drop owned by %I', {{ROLE_LITERAL}});
17
+ exception when undefined_object then
18
+ null; -- Role doesn't exist
19
+ end;
20
+
21
+ -- Drop the role
22
+ begin
23
+ execute format('drop role %I', {{ROLE_LITERAL}});
24
+ exception when undefined_object then
25
+ null; -- Role doesn't exist, that's fine
26
+ end;
27
+ end $$;
@@ -85,28 +85,27 @@ describe("createBaseReport", () => {
85
85
 
86
86
  // Tests for CHECK_INFO
87
87
  describe("CHECK_INFO and REPORT_GENERATORS", () => {
88
- const expectedChecks: Record<string, string> = {
89
- A002: "Postgres major version",
90
- A003: "Postgres settings",
91
- A004: "Cluster information",
92
- A007: "Altered settings",
93
- A013: "Postgres minor version",
94
- D004: "pg_stat_statements and pg_stat_kcache settings",
95
- F001: "Autovacuum: current settings",
96
- G001: "Memory-related settings",
97
- H001: "Invalid indexes",
98
- H002: "Unused indexes",
99
- H004: "Redundant indexes",
100
- };
101
-
102
- test("CHECK_INFO contains all expected checks with correct descriptions", () => {
103
- for (const [checkId, description] of Object.entries(expectedChecks)) {
104
- expect(checkup.CHECK_INFO[checkId]).toBe(description);
88
+ // Express-mode checks that have generators
89
+ const expressCheckIds = ["A002", "A003", "A004", "A007", "A013", "D004", "F001", "G001", "H001", "H002", "H004"];
90
+
91
+ test("CHECK_INFO contains all express-mode checks", () => {
92
+ for (const checkId of expressCheckIds) {
93
+ expect(checkup.CHECK_INFO[checkId]).toBeDefined();
94
+ expect(typeof checkup.CHECK_INFO[checkId]).toBe("string");
95
+ expect(checkup.CHECK_INFO[checkId].length).toBeGreaterThan(0);
105
96
  }
106
97
  });
107
98
 
99
+ test("CHECK_INFO titles are loaded from embedded dictionary", () => {
100
+ // Verify a few known titles match the API dictionary
101
+ // These are canonical titles from postgres.ai/api/general/checkup_dictionary
102
+ expect(checkup.CHECK_INFO["A002"]).toBe("Postgres major version");
103
+ expect(checkup.CHECK_INFO["H001"]).toBe("Invalid indexes");
104
+ expect(checkup.CHECK_INFO["H002"]).toBe("Unused indexes");
105
+ });
106
+
108
107
  test("REPORT_GENERATORS has function for each check", () => {
109
- for (const checkId of Object.keys(expectedChecks)) {
108
+ for (const checkId of expressCheckIds) {
110
109
  expect(typeof checkup.REPORT_GENERATORS[checkId]).toBe("function");
111
110
  }
112
111
  });
package/test/init.test.ts CHANGED
@@ -89,7 +89,7 @@ describe("init module", () => {
89
89
  expect(roleStep.sql).toMatch(/create\s+user\s+"user ""with"" quotes ✓"/i);
90
90
  expect(roleStep.sql).toMatch(/alter\s+user\s+"user ""with"" quotes ✓"/i);
91
91
 
92
- const permStep = plan.steps.find((s: { name: string }) => s.name === "02.permissions");
92
+ const permStep = plan.steps.find((s: { name: string }) => s.name === "03.permissions");
93
93
  expect(permStep).toBeTruthy();
94
94
  expect(permStep.sql).toMatch(/grant connect on database "db name ""with"" quotes ✓" to "user ""with"" quotes ✓"/i);
95
95
  });
@@ -161,7 +161,7 @@ describe("init module", () => {
161
161
  provider: "supabase",
162
162
  });
163
163
  expect(plan.steps.some((s) => s.name === "01.role")).toBe(false);
164
- expect(plan.steps.some((s) => s.name === "02.permissions")).toBe(true);
164
+ expect(plan.steps.some((s) => s.name === "03.permissions")).toBe(true);
165
165
  });
166
166
 
167
167
  test("buildInitPlan removes ALTER USER for supabase provider", async () => {
@@ -172,7 +172,7 @@ describe("init module", () => {
172
172
  includeOptionalPermissions: false,
173
173
  provider: "supabase",
174
174
  });
175
- const permStep = plan.steps.find((s) => s.name === "02.permissions");
175
+ const permStep = plan.steps.find((s) => s.name === "03.permissions");
176
176
  expect(permStep).toBeDefined();
177
177
  expect(permStep!.sql.toLowerCase()).not.toMatch(/alter user/);
178
178
  });
@@ -390,7 +390,7 @@ describe("init module", () => {
390
390
  includeOptionalPermissions: false,
391
391
  provider: "supabase",
392
392
  });
393
- const permStep = plan.steps.find((s) => s.name === "02.permissions");
393
+ const permStep = plan.steps.find((s) => s.name === "03.permissions");
394
394
  expect(permStep).toBeDefined();
395
395
  // Should have removed ALTER USER but kept comments
396
396
  expect(permStep!.sql.toLowerCase()).not.toMatch(/^\s*alter\s+user/m);
@@ -423,6 +423,179 @@ describe("init module", () => {
423
423
  const redacted = init.redactPasswordsInSql(step.sql);
424
424
  expect(redacted).toMatch(/password '<redacted>'/i);
425
425
  });
426
+
427
+ // Tests for buildUninitPlan
428
+ test("buildUninitPlan generates correct steps with dropRole=true", async () => {
429
+ const plan = await init.buildUninitPlan({
430
+ database: "mydb",
431
+ monitoringUser: DEFAULT_MONITORING_USER,
432
+ dropRole: true,
433
+ });
434
+
435
+ expect(plan.database).toBe("mydb");
436
+ expect(plan.monitoringUser).toBe(DEFAULT_MONITORING_USER);
437
+ expect(plan.dropRole).toBe(true);
438
+ expect(plan.steps.length).toBe(3);
439
+ expect(plan.steps.map((s) => s.name)).toEqual([
440
+ "01.drop_helpers",
441
+ "02.revoke_permissions",
442
+ "03.drop_role",
443
+ ]);
444
+ });
445
+
446
+ test("buildUninitPlan skips role drop when dropRole=false", async () => {
447
+ const plan = await init.buildUninitPlan({
448
+ database: "mydb",
449
+ monitoringUser: DEFAULT_MONITORING_USER,
450
+ dropRole: false,
451
+ });
452
+
453
+ expect(plan.dropRole).toBe(false);
454
+ expect(plan.steps.length).toBe(2);
455
+ expect(plan.steps.map((s) => s.name)).toEqual([
456
+ "01.drop_helpers",
457
+ "02.revoke_permissions",
458
+ ]);
459
+ });
460
+
461
+ test("buildUninitPlan skips role drop for supabase provider", async () => {
462
+ const plan = await init.buildUninitPlan({
463
+ database: "mydb",
464
+ monitoringUser: DEFAULT_MONITORING_USER,
465
+ dropRole: true,
466
+ provider: "supabase",
467
+ });
468
+
469
+ // Even with dropRole=true, supabase provider skips role operations
470
+ expect(plan.steps.length).toBe(2);
471
+ expect(plan.steps.some((s) => s.name === "03.drop_role")).toBe(false);
472
+ });
473
+
474
+ test("buildUninitPlan handles special characters in identifiers", async () => {
475
+ const monitoringUser = 'user "with" quotes';
476
+ const database = 'db "name"';
477
+ const plan = await init.buildUninitPlan({
478
+ database,
479
+ monitoringUser,
480
+ dropRole: true,
481
+ });
482
+
483
+ // Check that identifiers are properly quoted in SQL
484
+ const dropHelpersStep = plan.steps.find((s) => s.name === "01.drop_helpers");
485
+ expect(dropHelpersStep).toBeTruthy();
486
+
487
+ const revokeStep = plan.steps.find((s) => s.name === "02.revoke_permissions");
488
+ expect(revokeStep).toBeTruthy();
489
+ expect(revokeStep!.sql).toContain('"user ""with"" quotes"');
490
+ expect(revokeStep!.sql).toContain('"db ""name"""');
491
+
492
+ const dropRoleStep = plan.steps.find((s) => s.name === "03.drop_role");
493
+ expect(dropRoleStep).toBeTruthy();
494
+ // Uses ROLE_LITERAL (single-quoted) for format('%I', ...) in dynamic SQL
495
+ expect(dropRoleStep!.sql).toContain("'user \"with\" quotes'");
496
+ });
497
+
498
+ test("buildUninitPlan rejects identifiers with null bytes", async () => {
499
+ await expect(
500
+ init.buildUninitPlan({
501
+ database: "mydb",
502
+ monitoringUser: "bad\0user",
503
+ dropRole: true,
504
+ })
505
+ ).rejects.toThrow(/Identifier cannot contain null bytes/);
506
+ });
507
+
508
+ test("applyUninitPlan continues on errors and reports them", async () => {
509
+ const plan = {
510
+ monitoringUser: DEFAULT_MONITORING_USER,
511
+ database: "mydb",
512
+ dropRole: true,
513
+ steps: [
514
+ { name: "01.drop_helpers", sql: "drop function if exists postgres_ai.test()" },
515
+ { name: "02.revoke_permissions", sql: "select 1/0" }, // Will fail
516
+ { name: "03.drop_role", sql: "select 1" },
517
+ ],
518
+ };
519
+
520
+ const calls: string[] = [];
521
+ const client = {
522
+ query: async (sql: string) => {
523
+ calls.push(sql);
524
+ if (sql === "begin;") return { rowCount: 1 };
525
+ if (sql === "commit;") return { rowCount: 1 };
526
+ if (sql === "rollback;") return { rowCount: 1 };
527
+ if (sql.includes("1/0")) throw new Error("division by zero");
528
+ return { rowCount: 1 };
529
+ },
530
+ };
531
+
532
+ const result = await init.applyUninitPlan({ client: client as any, plan: plan as any });
533
+
534
+ // Should have applied steps 1 and 3, with step 2 in errors
535
+ expect(result.applied).toContain("01.drop_helpers");
536
+ expect(result.applied).toContain("03.drop_role");
537
+ expect(result.applied).not.toContain("02.revoke_permissions");
538
+ expect(result.errors.length).toBe(1);
539
+ expect(result.errors[0]).toMatch(/02\.revoke_permissions.*division by zero/);
540
+ });
541
+
542
+ test("buildInitPlan includes 02.extensions step with pg_stat_statements", async () => {
543
+ const plan = await init.buildInitPlan({
544
+ database: "mydb",
545
+ monitoringUser: DEFAULT_MONITORING_USER,
546
+ monitoringPassword: "pw",
547
+ includeOptionalPermissions: false,
548
+ });
549
+
550
+ const extStep = plan.steps.find((s) => s.name === "02.extensions");
551
+ expect(extStep).toBeTruthy();
552
+ // Should create pg_stat_statements with IF NOT EXISTS
553
+ expect(extStep!.sql).toMatch(/create extension if not exists pg_stat_statements/i);
554
+ });
555
+
556
+ test("buildInitPlan creates extensions before permissions", async () => {
557
+ const plan = await init.buildInitPlan({
558
+ database: "mydb",
559
+ monitoringUser: DEFAULT_MONITORING_USER,
560
+ monitoringPassword: "pw",
561
+ includeOptionalPermissions: false,
562
+ });
563
+
564
+ const stepNames = plan.steps.map((s) => s.name);
565
+ const extIndex = stepNames.indexOf("02.extensions");
566
+ const permIndex = stepNames.indexOf("03.permissions");
567
+ expect(extIndex).toBeGreaterThanOrEqual(0);
568
+ expect(permIndex).toBeGreaterThanOrEqual(0);
569
+ // Extensions should come before permissions
570
+ expect(extIndex).toBeLessThan(permIndex);
571
+ });
572
+
573
+ test("buildInitPlan uses IF NOT EXISTS for postgres_ai schema (idempotent)", async () => {
574
+ const plan = await init.buildInitPlan({
575
+ database: "mydb",
576
+ monitoringUser: DEFAULT_MONITORING_USER,
577
+ monitoringPassword: "pw",
578
+ includeOptionalPermissions: false,
579
+ });
580
+
581
+ const permStep = plan.steps.find((s) => s.name === "03.permissions");
582
+ expect(permStep).toBeTruthy();
583
+ // Should use IF NOT EXISTS for idempotent behavior
584
+ expect(permStep!.sql).toMatch(/create schema if not exists postgres_ai/i);
585
+ });
586
+
587
+ test("buildUninitPlan does NOT drop pg_stat_statements extension", async () => {
588
+ const plan = await init.buildUninitPlan({
589
+ database: "mydb",
590
+ monitoringUser: DEFAULT_MONITORING_USER,
591
+ dropRole: true,
592
+ });
593
+
594
+ // Check all steps - none should drop pg_stat_statements
595
+ for (const step of plan.steps) {
596
+ expect(step.sql.toLowerCase()).not.toMatch(/drop extension.*pg_stat_statements/);
597
+ }
598
+ });
426
599
  });
427
600
 
428
601
  describe("CLI commands", () => {
@@ -446,8 +619,9 @@ describe("CLI commands", () => {
446
619
  expect(r.stdout).toMatch(/provider: supabase/);
447
620
  // Should not have 01.role step
448
621
  expect(r.stdout).not.toMatch(/-- 01\.role/);
449
- // Should have 02.permissions step
450
- expect(r.stdout).toMatch(/-- 02\.permissions/);
622
+ // Should have 02.extensions and 03.permissions steps
623
+ expect(r.stdout).toMatch(/-- 02\.extensions/);
624
+ expect(r.stdout).toMatch(/-- 03\.permissions/);
451
625
  });
452
626
 
453
627
  test("cli: prepare-db warns about unknown provider", () => {
@@ -571,11 +745,66 @@ describe("CLI commands", () => {
571
745
  expect(r.status).not.toBe(0);
572
746
  expect(r.stderr).toMatch(/Cannot use --api-key with --demo mode/);
573
747
  });
748
+
749
+ // Tests for unprepare-db command
750
+ test("cli: unprepare-db with missing connection prints help/options", () => {
751
+ const r = runCli(["unprepare-db"]);
752
+ expect(r.status).not.toBe(0);
753
+ expect(r.stderr).toMatch(/--print-sql/);
754
+ expect(r.stderr).toMatch(/--monitoring-user/);
755
+ });
756
+
757
+ test("cli: unprepare-db --print-sql works without connection (offline mode)", () => {
758
+ const r = runCli(["unprepare-db", "--print-sql", "-d", "mydb"]);
759
+ expect(r.status).toBe(0);
760
+ expect(r.stdout).toMatch(/SQL plan \(offline; not connected\)/);
761
+ expect(r.stdout).toMatch(/drop schema if exists postgres_ai/i);
762
+ });
763
+
764
+ test("cli: unprepare-db --print-sql with --keep-role skips role drop", () => {
765
+ const r = runCli(["unprepare-db", "--print-sql", "-d", "mydb", "--keep-role"]);
766
+ expect(r.status).toBe(0);
767
+ expect(r.stdout).toMatch(/drop role: false/);
768
+ // Should not have 03.drop_role step
769
+ expect(r.stdout).not.toMatch(/-- 03\.drop_role/);
770
+ // Should have 01 and 02 steps
771
+ expect(r.stdout).toMatch(/-- 01\.drop_helpers/);
772
+ expect(r.stdout).toMatch(/-- 02\.revoke_permissions/);
773
+ });
774
+
775
+ test("cli: unprepare-db --print-sql with --provider supabase skips role step", () => {
776
+ const r = runCli(["unprepare-db", "--print-sql", "-d", "mydb", "--provider", "supabase"]);
777
+ expect(r.status).toBe(0);
778
+ expect(r.stdout).toMatch(/provider: supabase/);
779
+ // Should not have 03.drop_role step
780
+ expect(r.stdout).not.toMatch(/-- 03\.drop_role/);
781
+ });
782
+
783
+ test("cli: unprepare-db command exists and shows help", () => {
784
+ const r = runCli(["unprepare-db", "--help"]);
785
+ expect(r.status).toBe(0);
786
+ expect(r.stdout).toMatch(/--keep-role/);
787
+ expect(r.stdout).toMatch(/--print-sql/);
788
+ expect(r.stdout).toMatch(/--force/);
789
+ });
574
790
  });
575
791
 
576
- describe("imageTag priority behavior", () => {
792
+ // Check if Docker is available for imageTag tests
793
+ function isDockerAvailable(): boolean {
794
+ try {
795
+ const result = Bun.spawnSync(["docker", "info"], { timeout: 5000 });
796
+ return result.exitCode === 0;
797
+ } catch {
798
+ return false;
799
+ }
800
+ }
801
+
802
+ const dockerAvailable = isDockerAvailable();
803
+
804
+ describe.skipIf(!dockerAvailable)("imageTag priority behavior", () => {
577
805
  // Tests for the imageTag priority: --tag flag > PGAI_TAG env var > pkg.version
578
806
  // This verifies the fix that prevents stale .env PGAI_TAG from being used
807
+ // These tests require Docker and spawn subprocesses so need longer timeout
579
808
 
580
809
  let tempDir: string;
581
810
 
@@ -598,11 +827,13 @@ describe("imageTag priority behavior", () => {
598
827
  fs.writeFileSync(resolve(testDir, "docker-compose.yml"), "version: '3'\nservices: {}\n");
599
828
 
600
829
  // Run from the test directory (so resolvePaths finds docker-compose.yml)
830
+ // Note: Command may hang on Docker check in CI without Docker, so we use a timeout
601
831
  const cliPath = resolve(import.meta.dir, "..", "bin", "postgres-ai.ts");
602
832
  const bunBin = typeof process.execPath === "string" && process.execPath.length > 0 ? process.execPath : "bun";
603
833
  const result = Bun.spawnSync([bunBin, cliPath, "mon", "local-install", "--db-url", "postgresql://u:p@h:5432/d", "--yes"], {
604
834
  env: { ...process.env, PGAI_TAG: undefined },
605
835
  cwd: testDir,
836
+ timeout: 30000, // Kill subprocess after 30s if it hangs on Docker
606
837
  });
607
838
 
608
839
  // Read the .env that was written
@@ -612,7 +843,7 @@ describe("imageTag priority behavior", () => {
612
843
  expect(envContent).not.toMatch(/PGAI_TAG=beta/);
613
844
  // It should contain the CLI version (0.0.0-dev.0 in dev)
614
845
  expect(envContent).toMatch(/PGAI_TAG=\d+\.\d+\.\d+|PGAI_TAG=0\.0\.0-dev/);
615
- });
846
+ }, 60000);
616
847
 
617
848
  test("--tag flag takes priority over pkg.version", () => {
618
849
  const testDir = resolve(tempDir, "tag-flag-test");
@@ -624,6 +855,7 @@ describe("imageTag priority behavior", () => {
624
855
  const result = Bun.spawnSync([bunBin, cliPath, "mon", "local-install", "--tag", "v1.2.3-custom", "--db-url", "postgresql://u:p@h:5432/d", "--yes"], {
625
856
  env: { ...process.env, PGAI_TAG: undefined },
626
857
  cwd: testDir,
858
+ timeout: 30000,
627
859
  });
628
860
 
629
861
  const envContent = fs.readFileSync(resolve(testDir, ".env"), "utf8");
@@ -632,7 +864,7 @@ describe("imageTag priority behavior", () => {
632
864
  // Verify stdout confirms the tag being used
633
865
  const stdout = new TextDecoder().decode(result.stdout);
634
866
  expect(stdout).toMatch(/Using image tag: v1\.2\.3-custom/);
635
- });
867
+ }, 60000);
636
868
 
637
869
  test("PGAI_TAG env var is intentionally ignored (Bun auto-loads .env)", () => {
638
870
  // Note: We do NOT use process.env.PGAI_TAG because Bun auto-loads .env files,
@@ -647,13 +879,14 @@ describe("imageTag priority behavior", () => {
647
879
  const result = Bun.spawnSync([bunBin, cliPath, "mon", "local-install", "--db-url", "postgresql://u:p@h:5432/d", "--yes"], {
648
880
  env: { ...process.env, PGAI_TAG: "v2.0.0-from-env" },
649
881
  cwd: testDir,
882
+ timeout: 30000,
650
883
  });
651
884
 
652
885
  const envContent = fs.readFileSync(resolve(testDir, ".env"), "utf8");
653
886
  // PGAI_TAG env var should be IGNORED - uses pkg.version instead
654
887
  expect(envContent).not.toMatch(/PGAI_TAG=v2\.0\.0-from-env/);
655
888
  expect(envContent).toMatch(/PGAI_TAG=\d+\.\d+\.\d+|PGAI_TAG=0\.0\.0-dev/);
656
- });
889
+ }, 60000);
657
890
 
658
891
  test("existing registry and password are preserved while tag is updated", () => {
659
892
  const testDir = resolve(tempDir, "preserve-test");
@@ -668,6 +901,7 @@ describe("imageTag priority behavior", () => {
668
901
  const result = Bun.spawnSync([bunBin, cliPath, "mon", "local-install", "--db-url", "postgresql://u:p@h:5432/d", "--yes"], {
669
902
  env: { ...process.env, PGAI_TAG: undefined },
670
903
  cwd: testDir,
904
+ timeout: 30000,
671
905
  });
672
906
 
673
907
  const envContent = fs.readFileSync(resolve(testDir, ".env"), "utf8");
@@ -678,5 +912,5 @@ describe("imageTag priority behavior", () => {
678
912
  // But registry and password should be preserved
679
913
  expect(envContent).toMatch(/PGAI_REGISTRY=my\.registry\.com/);
680
914
  expect(envContent).toMatch(/GF_SECURITY_ADMIN_PASSWORD=secret123/);
681
- });
915
+ }, 60000);
682
916
  });
@@ -1,79 +0,0 @@
1
- // AUTO-GENERATED FILE - DO NOT EDIT
2
- // Generated from config/pgwatch-prometheus/metrics.yml by scripts/embed-metrics.ts
3
- // Generated at: 2026-01-12T23:17:14.698Z
4
-
5
- /**
6
- * Metric definition from metrics.yml
7
- */
8
- export interface MetricDefinition {
9
- description?: string;
10
- sqls: Record<number, string>; // PG major version -> SQL query
11
- gauges?: string[];
12
- statement_timeout_seconds?: number;
13
- }
14
-
15
- /**
16
- * Embedded metrics for express mode reports.
17
- * Only includes metrics required for CLI checkup reports.
18
- */
19
- export const METRICS: Record<string, MetricDefinition> = {
20
- "settings": {
21
- description: "This metric collects various PostgreSQL server settings and configurations. It provides insights into the server's configuration, including version, memory settings, and other important parameters. This metric is useful for monitoring server settings and ensuring optimal performance. Note: For lock_timeout and statement_timeout, we use reset_val instead of setting because pgwatch overrides these during metric collection, which would mask the actual configured values.",
22
- sqls: {
23
- 11: "with base as ( /* pgwatch_generated */\n select\n name,\n -- Use reset_val for lock_timeout/statement_timeout because pgwatch overrides them\n -- during collection (lock_timeout=100ms, statement_timeout per-metric).\n case\n when name in ('lock_timeout', 'statement_timeout') then reset_val\n else setting\n end as effective_setting,\n unit,\n category,\n vartype,\n -- For lock_timeout/statement_timeout, compare reset_val with boot_val\n -- since source becomes 'session' during collection.\n case\n when name in ('lock_timeout', 'statement_timeout') then (reset_val = boot_val)\n else (source = 'default')\n end as is_default_bool\n from pg_settings\n), with_numeric as (\n select\n *,\n case\n when effective_setting ~ '^-?[0-9]+$' then effective_setting::bigint\n else null\n end as numeric_value\n from base\n)\nselect\n (extract(epoch from now()) * 1e9)::int8 as epoch_ns,\n current_database() as tag_datname,\n name as tag_setting_name,\n effective_setting as tag_setting_value,\n unit as tag_unit,\n category as tag_category,\n vartype as tag_vartype,\n numeric_value,\n case\n when numeric_value is null then null\n when unit = '8kB' then numeric_value * 8192\n when unit = 'kB' then numeric_value * 1024\n when unit = 'MB' then numeric_value * 1024 * 1024\n when unit = 'B' then numeric_value\n when unit = 'ms' then numeric_value::numeric / 1000\n when unit = 's' then numeric_value::numeric\n when unit = 'min' then numeric_value::numeric * 60\n else null\n end as setting_normalized,\n case unit\n when '8kB' then 'bytes'\n when 'kB' then 'bytes'\n when 'MB' then 'bytes'\n when 'B' then 'bytes'\n when 'ms' then 'seconds'\n when 's' then 'seconds'\n when 'min' then 'seconds'\n else null\n end as unit_normalized,\n case when is_default_bool then 1 else 0 end as is_default,\n 1 as configured\nfrom with_numeric",
24
- },
25
- gauges: ["*"],
26
- statement_timeout_seconds: 15,
27
- },
28
- "db_stats": {
29
- description: "Retrieves key statistics from the PostgreSQL `pg_stat_database` view, providing insights into the current database's performance. It returns the number of backends, transaction commits and rollbacks, buffer reads and hits, tuple statistics, conflicts, temporary files and bytes, deadlocks, block read and write times, postmaster uptime, backup duration, recovery status, system identifier, and invalid indexes. This metric helps administrators monitor database activity and performance.",
30
- sqls: {
31
- 11: "select /* pgwatch_generated */\n (extract(epoch from now()) * 1e9)::int8 as epoch_ns,\n current_database() as tag_datname,\n numbackends,\n xact_commit,\n xact_rollback,\n blks_read,\n blks_hit,\n tup_returned,\n tup_fetched,\n tup_inserted,\n tup_updated,\n tup_deleted,\n conflicts,\n temp_files,\n temp_bytes,\n deadlocks,\n blk_read_time,\n blk_write_time,\n extract(epoch from (now() - pg_postmaster_start_time()))::int8 as postmaster_uptime_s,\n case when pg_is_in_recovery() then 1 else 0 end as in_recovery_int,\n system_identifier::text as tag_sys_id,\n (select count(*) from pg_index i\n where not indisvalid\n and not exists ( /* leave out ones that are being actively rebuilt */\n select * from pg_locks l\n join pg_stat_activity a using (pid)\n where l.relation = i.indexrelid\n and a.state = 'active'\n and a.query ~* 'concurrently'\n )) as invalid_indexes\nfrom\n pg_stat_database, pg_control_system()\nwhere\n datname = current_database()",
32
- 12: "select /* pgwatch_generated */\n (extract(epoch from now()) * 1e9)::int8 as epoch_ns,\n current_database() as tag_datname,\n numbackends,\n xact_commit,\n xact_rollback,\n blks_read,\n blks_hit,\n tup_returned,\n tup_fetched,\n tup_inserted,\n tup_updated,\n tup_deleted,\n conflicts,\n temp_files,\n temp_bytes,\n deadlocks,\n blk_read_time,\n blk_write_time,\n extract(epoch from (now() - pg_postmaster_start_time()))::int8 as postmaster_uptime_s,\n extract(epoch from (now() - pg_backup_start_time()))::int8 as backup_duration_s,\n checksum_failures,\n extract(epoch from (now() - checksum_last_failure))::int8 as checksum_last_failure_s,\n case when pg_is_in_recovery() then 1 else 0 end as in_recovery_int,\n system_identifier::text as tag_sys_id,\n (select count(*) from pg_index i\n where not indisvalid\n and not exists ( /* leave out ones that are being actively rebuilt */\n select * from pg_locks l\n join pg_stat_activity a using (pid)\n where l.relation = i.indexrelid\n and a.state = 'active'\n and a.query ~* 'concurrently'\n )) as invalid_indexes\nfrom\n pg_stat_database, pg_control_system()\nwhere\n datname = current_database()",
33
- 14: "select /* pgwatch_generated */\n (extract(epoch from now()) * 1e9)::int8 as epoch_ns,\n current_database() as tag_datname,\n numbackends,\n xact_commit,\n xact_rollback,\n blks_read,\n blks_hit,\n tup_returned,\n tup_fetched,\n tup_inserted,\n tup_updated,\n tup_deleted,\n conflicts,\n temp_files,\n temp_bytes,\n deadlocks,\n blk_read_time,\n blk_write_time,\n extract(epoch from (now() - pg_postmaster_start_time()))::int8 as postmaster_uptime_s,\n extract(epoch from (now() - pg_backup_start_time()))::int8 as backup_duration_s,\n checksum_failures,\n extract(epoch from (now() - checksum_last_failure))::int8 as checksum_last_failure_s,\n case when pg_is_in_recovery() then 1 else 0 end as in_recovery_int,\n system_identifier::text as tag_sys_id,\n session_time::int8,\n active_time::int8,\n idle_in_transaction_time::int8,\n sessions,\n sessions_abandoned,\n sessions_fatal,\n sessions_killed,\n (select count(*) from pg_index i\n where not indisvalid\n and not exists ( /* leave out ones that are being actively rebuilt */\n select * from pg_locks l\n join pg_stat_activity a using (pid)\n where l.relation = i.indexrelid\n and a.state = 'active'\n and a.query ~* 'concurrently'\n )) as invalid_indexes\nfrom\n pg_stat_database, pg_control_system()\nwhere\n datname = current_database()",
34
- 15: "select /* pgwatch_generated */\n (extract(epoch from now()) * 1e9)::int8 as epoch_ns,\n current_database() as tag_datname,\n numbackends,\n xact_commit,\n xact_rollback,\n blks_read,\n blks_hit,\n tup_returned,\n tup_fetched,\n tup_inserted,\n tup_updated,\n tup_deleted,\n conflicts,\n temp_files,\n temp_bytes,\n deadlocks,\n blk_read_time,\n blk_write_time,\n extract(epoch from (now() - pg_postmaster_start_time()))::int8 as postmaster_uptime_s,\n checksum_failures,\n extract(epoch from (now() - checksum_last_failure))::int8 as checksum_last_failure_s,\n case when pg_is_in_recovery() then 1 else 0 end as in_recovery_int,\n system_identifier::text as tag_sys_id,\n session_time::int8,\n active_time::int8,\n idle_in_transaction_time::int8,\n sessions,\n sessions_abandoned,\n sessions_fatal,\n sessions_killed,\n (select count(*) from pg_index i\n where not indisvalid\n and not exists ( /* leave out ones that are being actively rebuilt */\n select * from pg_locks l\n join pg_stat_activity a using (pid)\n where l.relation = i.indexrelid\n and a.state = 'active'\n and a.query ~* 'concurrently'\n )) as invalid_indexes\nfrom\n pg_stat_database, pg_control_system()\nwhere\n datname = current_database()",
35
- },
36
- gauges: ["*"],
37
- statement_timeout_seconds: 15,
38
- },
39
- "db_size": {
40
- description: "Retrieves the size of the current database and the size of the `pg_catalog` schema, providing insights into the storage usage of the database. It returns the size in bytes for both the current database and the catalog schema. This metric helps administrators monitor database size and storage consumption.",
41
- sqls: {
42
- 11: "select /* pgwatch_generated */\n (extract(epoch from now()) * 1e9)::int8 as epoch_ns,\n current_database() as tag_datname,\n pg_database_size(current_database()) as size_b,\n (select sum(pg_total_relation_size(c.oid))::int8\n from pg_class c join pg_namespace n on n.oid = c.relnamespace\n where nspname = 'pg_catalog' and relkind = 'r'\n ) as catalog_size_b",
43
- },
44
- gauges: ["*"],
45
- statement_timeout_seconds: 300,
46
- },
47
- "pg_invalid_indexes": {
48
- description: "This metric identifies invalid indexes in the database with decision tree data for remediation. It provides insights into whether to DROP (if duplicate exists), RECREATE (if backs constraint), or flag as UNCERTAIN (if additional RCA is needed to check query plans). Decision tree: 1) Valid duplicate exists -> DROP, 2) Backs PK/UNIQUE constraint -> RECREATE, 3) Table < 10K rows -> RECREATE (small tables rebuild quickly, typically under 1 second), 4) Otherwise -> UNCERTAIN (need query plan analysis to assess impact).",
49
- sqls: {
50
- 11: "with fk_indexes as ( /* pgwatch_generated */\n select\n schemaname as schema_name,\n indexrelid,\n (indexrelid::regclass)::text as index_name,\n (relid::regclass)::text as table_name,\n (confrelid::regclass)::text as fk_table_ref,\n array_to_string(indclass, ', ') as opclasses\n from pg_stat_all_indexes\n join pg_index using (indexrelid)\n left join pg_constraint\n on array_to_string(indkey, ',') = array_to_string(conkey, ',')\n and schemaname = (connamespace::regnamespace)::text\n and conrelid = relid\n and contype = 'f'\n where idx_scan = 0\n and indisunique is false\n and conkey is not null\n),\n-- Find valid indexes that could be duplicates (same table, same columns)\nvalid_duplicates as (\n select\n inv.indexrelid as invalid_indexrelid,\n val.indexrelid as valid_indexrelid,\n (val.indexrelid::regclass)::text as valid_index_name,\n pg_get_indexdef(val.indexrelid) as valid_index_definition\n from pg_index inv\n join pg_index val on inv.indrelid = val.indrelid -- same table\n and inv.indkey = val.indkey -- same columns (in same order)\n and inv.indexrelid != val.indexrelid -- different index\n and val.indisvalid = true -- valid index\n where inv.indisvalid = false\n),\ndata as (\n select\n pci.relname as tag_index_name,\n pn.nspname as tag_schema_name,\n pct.relname as tag_table_name,\n coalesce(nullif(quote_ident(pn.nspname), 'public') || '.', '') || quote_ident(pct.relname) as tag_relation_name,\n pg_get_indexdef(pidx.indexrelid) as index_definition,\n pg_relation_size(pidx.indexrelid) as index_size_bytes,\n -- Constraint info\n pidx.indisprimary as is_pk,\n pidx.indisunique as is_unique,\n con.conname as constraint_name,\n -- Table row estimate\n pct.reltuples::bigint as table_row_estimate,\n -- Valid duplicate check\n (vd.valid_indexrelid is not null) as has_valid_duplicate,\n vd.valid_index_name,\n vd.valid_index_definition,\n -- FK support check\n ((\n select count(1)\n from fk_indexes fi\n where fi.fk_table_ref = pct.relname\n and fi.opclasses like (array_to_string(pidx.indclass, ', ') || '%')\n ) > 0)::int as supports_fk\n from pg_index pidx\n join pg_class pci on pci.oid = pidx.indexrelid\n join pg_class pct on pct.oid = pidx.indrelid\n left join pg_namespace pn on pn.oid = pct.relnamespace\n left join pg_constraint con on con.conindid = pidx.indexrelid\n left join valid_duplicates vd on vd.invalid_indexrelid = pidx.indexrelid\n where pidx.indisvalid = false\n),\nnum_data as (\n select\n row_number() over () as num,\n data.*\n from data\n)\nselect\n (extract(epoch from now()) * 1e9)::int8 as epoch_ns,\n current_database() as tag_datname,\n num_data.*\nfrom num_data\nlimit 1000;\n",
51
- },
52
- gauges: ["*"],
53
- statement_timeout_seconds: 15,
54
- },
55
- "unused_indexes": {
56
- description: "This metric identifies unused indexes in the database. It provides insights into the number of unused indexes and their details. This metric helps administrators identify and fix unused indexes to improve database performance.",
57
- sqls: {
58
- 11: "with fk_indexes as ( /* pgwatch_generated */\n select\n n.nspname as schema_name,\n ci.relname as index_name,\n cr.relname as table_name,\n (confrelid::regclass)::text as fk_table_ref,\n array_to_string(indclass, ', ') as opclasses\n from pg_index i\n join pg_class ci on ci.oid = i.indexrelid and ci.relkind = 'i'\n join pg_class cr on cr.oid = i.indrelid and cr.relkind = 'r'\n join pg_namespace n on n.oid = ci.relnamespace\n join pg_constraint cn on cn.conrelid = cr.oid\n left join pg_stat_all_indexes as si on si.indexrelid = i.indexrelid\n where\n contype = 'f'\n and i.indisunique is false\n and conkey is not null\n and ci.relpages > 5\n and si.idx_scan < 10\n), table_scans as (\n select relid,\n tables.idx_scan + tables.seq_scan as all_scans,\n ( tables.n_tup_ins + tables.n_tup_upd + tables.n_tup_del ) as writes,\n pg_relation_size(relid) as table_size\n from pg_stat_all_tables as tables\n join pg_class c on c.oid = relid\n where c.relpages > 5\n), indexes as (\n select\n i.indrelid,\n i.indexrelid,\n n.nspname as schema_name,\n cr.relname as table_name,\n ci.relname as index_name,\n si.idx_scan,\n pg_relation_size(i.indexrelid) as index_bytes,\n ci.relpages,\n (case when a.amname = 'btree' then true else false end) as idx_is_btree,\n array_to_string(i.indclass, ', ') as opclasses\n from pg_index i\n join pg_class ci on ci.oid = i.indexrelid and ci.relkind = 'i'\n join pg_class cr on cr.oid = i.indrelid and cr.relkind = 'r'\n join pg_namespace n on n.oid = ci.relnamespace\n join pg_am a on ci.relam = a.oid\n left join pg_stat_all_indexes as si on si.indexrelid = i.indexrelid\n where\n i.indisunique = false\n and i.indisvalid = true\n and ci.relpages > 5\n), index_ratios as (\n select\n i.indexrelid as index_id,\n i.schema_name,\n i.table_name,\n i.index_name,\n idx_scan,\n all_scans,\n round(( case when all_scans = 0 then 0.0::numeric\n else idx_scan::numeric/all_scans * 100 end), 2) as index_scan_pct,\n writes,\n round((case when writes = 0 then idx_scan::numeric else idx_scan::numeric/writes end), 2)\n as scans_per_write,\n index_bytes as index_size_bytes,\n table_size as table_size_bytes,\n i.relpages,\n idx_is_btree,\n i.opclasses,\n (\n select count(1)\n from fk_indexes fi\n where fi.fk_table_ref = i.table_name\n and fi.schema_name = i.schema_name\n and fi.opclasses like (i.opclasses || '%')\n ) > 0 as supports_fk\n from indexes i\n join table_scans ts on ts.relid = i.indrelid\n)\nselect\n 'Never Used Indexes' as tag_reason,\n current_database() as tag_datname,\n index_id,\n schema_name as tag_schema_name,\n table_name as tag_table_name,\n index_name as tag_index_name,\n pg_get_indexdef(index_id) as index_definition,\n idx_scan,\n all_scans,\n index_scan_pct,\n writes,\n scans_per_write,\n index_size_bytes,\n table_size_bytes,\n relpages,\n idx_is_btree,\n opclasses as tag_opclasses,\n supports_fk\nfrom index_ratios\nwhere\n idx_scan = 0\n and idx_is_btree\norder by index_size_bytes desc\nlimit 1000;\n",
59
- },
60
- gauges: ["*"],
61
- statement_timeout_seconds: 15,
62
- },
63
- "redundant_indexes": {
64
- description: "This metric identifies redundant indexes that can potentially be dropped to save storage space and improve write performance. It analyzes index relationships and finds indexes that are covered by other indexes, considering column order, operator classes, and foreign key constraints. Uses the exact logic from tmp.sql with JSON aggregation and proper thresholds.",
65
- sqls: {
66
- 11: "with fk_indexes as ( /* pgwatch_generated */\n select\n n.nspname as schema_name,\n ci.relname as index_name,\n cr.relname as table_name,\n (confrelid::regclass)::text as fk_table_ref,\n array_to_string(indclass, ', ') as opclasses\n from pg_index i\n join pg_class ci on ci.oid = i.indexrelid and ci.relkind = 'i'\n join pg_class cr on cr.oid = i.indrelid and cr.relkind = 'r'\n join pg_namespace n on n.oid = ci.relnamespace\n join pg_constraint cn on cn.conrelid = cr.oid\n left join pg_stat_all_indexes as si on si.indexrelid = i.indexrelid\n where\n contype = 'f'\n and i.indisunique is false\n and conkey is not null\n and ci.relpages > 5\n and si.idx_scan < 10\n),\n-- Redundant indexes\nindex_data as (\n select\n *,\n indkey::text as columns,\n array_to_string(indclass, ', ') as opclasses\n from pg_index i\n join pg_class ci on ci.oid = i.indexrelid and ci.relkind = 'i'\n where indisvalid = true and ci.relpages > 5\n), redundant_indexes as (\n select\n i2.indexrelid as index_id,\n tnsp.nspname as schema_name,\n trel.relname as table_name,\n pg_relation_size(trel.oid) as table_size_bytes,\n irel.relname as index_name,\n am1.amname as access_method,\n (i1.indexrelid::regclass)::text as reason,\n i1.indexrelid as reason_index_id,\n pg_get_indexdef(i1.indexrelid) main_index_def,\n pg_relation_size(i1.indexrelid) main_index_size_bytes,\n pg_get_indexdef(i2.indexrelid) index_def,\n pg_relation_size(i2.indexrelid) index_size_bytes,\n s.idx_scan as index_usage,\n quote_ident(tnsp.nspname) as formated_schema_name,\n coalesce(nullif(quote_ident(tnsp.nspname), 'public') || '.', '') || quote_ident(irel.relname) as formated_index_name,\n quote_ident(trel.relname) as formated_table_name,\n coalesce(nullif(quote_ident(tnsp.nspname), 'public') || '.', '') || quote_ident(trel.relname) as formated_relation_name,\n i2.opclasses\n from (\n select indrelid, indexrelid, opclasses, indclass, indexprs, indpred, indisprimary, indisunique, columns\n from index_data\n order by indexrelid\n ) as i1\n join index_data as i2 on (\n i1.indrelid = i2.indrelid -- same table\n and i1.indexrelid <> i2.indexrelid -- NOT same index\n )\n inner join pg_opclass op1 on i1.indclass[0] = op1.oid\n inner join pg_opclass op2 on i2.indclass[0] = op2.oid\n inner join pg_am am1 on op1.opcmethod = am1.oid\n inner join pg_am am2 on op2.opcmethod = am2.oid\n join pg_stat_all_indexes as s on s.indexrelid = i2.indexrelid\n join pg_class as trel on trel.oid = i2.indrelid\n join pg_namespace as tnsp on trel.relnamespace = tnsp.oid\n join pg_class as irel on irel.oid = i2.indexrelid\n where\n not i2.indisprimary -- index 1 is not primary\n and not i2.indisunique -- index 1 is not unique (unique indexes serve constraint purpose)\n and am1.amname = am2.amname -- same access type\n and i1.columns like (i2.columns || '%') -- index 2 includes all columns from index 1\n and i1.opclasses like (i2.opclasses || '%')\n -- index expressions is same\n and pg_get_expr(i1.indexprs, i1.indrelid) is not distinct from pg_get_expr(i2.indexprs, i2.indrelid)\n -- index predicates is same\n and pg_get_expr(i1.indpred, i1.indrelid) is not distinct from pg_get_expr(i2.indpred, i2.indrelid)\n), redundant_indexes_fk as (\n select\n ri.*,\n ((\n select count(1)\n from fk_indexes fi\n where\n fi.fk_table_ref = ri.table_name\n and fi.opclasses like (ri.opclasses || '%')\n ) > 0)::int as supports_fk\n from redundant_indexes ri\n),\n-- Cut recursive links\nredundant_indexes_tmp_num as (\n select row_number() over () num, rig.*\n from redundant_indexes_fk rig\n), redundant_indexes_tmp_links as (\n select\n ri1.*,\n ri2.num as r_num\n from redundant_indexes_tmp_num ri1\n left join redundant_indexes_tmp_num ri2 on ri2.reason_index_id = ri1.index_id and ri1.reason_index_id = ri2.index_id\n), redundant_indexes_tmp_cut as (\n select\n *\n from redundant_indexes_tmp_links\n where num < r_num or r_num is null\n), redundant_indexes_cut_grouped as (\n select\n distinct(num),\n *\n from redundant_indexes_tmp_cut\n order by index_size_bytes desc\n), redundant_indexes_grouped as (\n select\n index_id,\n schema_name as tag_schema_name,\n table_name,\n table_size_bytes,\n index_name as tag_index_name,\n access_method as tag_access_method,\n string_agg(distinct reason, ', ') as tag_reason,\n index_size_bytes,\n index_usage,\n index_def as index_definition,\n formated_index_name as tag_index_name,\n formated_schema_name as tag_schema_name,\n formated_table_name as tag_table_name,\n formated_relation_name as tag_relation_name,\n supports_fk::int as supports_fk,\n json_agg(\n distinct jsonb_build_object(\n 'index_name', reason,\n 'index_definition', main_index_def,\n 'index_size_bytes', main_index_size_bytes\n )\n )::text as redundant_to_json\n from redundant_indexes_cut_grouped\n group by\n index_id,\n table_size_bytes,\n schema_name,\n table_name,\n index_name,\n access_method,\n index_def,\n index_size_bytes,\n index_usage,\n formated_index_name,\n formated_schema_name,\n formated_table_name,\n formated_relation_name,\n supports_fk\n order by index_size_bytes desc\n)\nselect * from redundant_indexes_grouped\nlimit 1000;\n",
67
- },
68
- gauges: ["*"],
69
- statement_timeout_seconds: 15,
70
- },
71
- "stats_reset": {
72
- description: "This metric tracks when statistics were last reset at the database level. It provides visibility into the freshness of statistics data, which is essential for understanding the reliability of usage metrics. A recent reset time indicates that usage statistics may not reflect long-term patterns. Note that Postgres tracks stats resets at the database level, not per-index or per-table.",
73
- sqls: {
74
- 11: "select /* pgwatch_generated */\n datname as tag_database_name,\n extract(epoch from stats_reset)::int as stats_reset_epoch,\n extract(epoch from now() - stats_reset)::int as seconds_since_reset\nfrom pg_stat_database\nwhere datname = current_database()\n and stats_reset is not null;\n",
75
- },
76
- gauges: ["stats_reset_epoch","seconds_since_reset"],
77
- statement_timeout_seconds: 15,
78
- },
79
- };
File without changes
File without changes
File without changes