postgresai 0.14.0-dev.67 → 0.14.0-dev.69

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1204,25 +1204,25 @@ mon
1204
1204
  // Update .env with custom tag if provided
1205
1205
  const envFile = path.resolve(projectDir, ".env");
1206
1206
 
1207
- // Build .env content, preserving important existing values
1208
- // Read existing .env first to preserve CI/custom settings
1209
- let existingTag: string | null = null;
1207
+ // Build .env content, preserving important existing values (registry, password)
1208
+ // Note: PGAI_TAG is intentionally NOT preserved - the CLI version should always match Docker images
1210
1209
  let existingRegistry: string | null = null;
1211
1210
  let existingPassword: string | null = null;
1212
1211
 
1213
1212
  if (fs.existsSync(envFile)) {
1214
1213
  const existingEnv = fs.readFileSync(envFile, "utf8");
1215
- // Extract existing values
1216
- const tagMatch = existingEnv.match(/^PGAI_TAG=(.+)$/m);
1217
- if (tagMatch) existingTag = tagMatch[1].trim();
1214
+ // Extract existing values (except tag - always use CLI version)
1218
1215
  const registryMatch = existingEnv.match(/^PGAI_REGISTRY=(.+)$/m);
1219
1216
  if (registryMatch) existingRegistry = registryMatch[1].trim();
1220
1217
  const pwdMatch = existingEnv.match(/^GF_SECURITY_ADMIN_PASSWORD=(.+)$/m);
1221
1218
  if (pwdMatch) existingPassword = pwdMatch[1].trim();
1222
1219
  }
1223
1220
 
1224
- // Priority: CLI --tag flag > PGAI_TAG env var > existing .env > package version
1225
- const imageTag = opts.tag || process.env.PGAI_TAG || existingTag || pkg.version;
1221
+ // Priority: CLI --tag flag > package version
1222
+ // Note: We intentionally do NOT use process.env.PGAI_TAG here because Bun auto-loads .env files,
1223
+ // which would cause stale .env values to override the CLI version. The CLI version should always
1224
+ // match the Docker images. Users can override with --tag if needed.
1225
+ const imageTag = opts.tag || pkg.version;
1226
1226
 
1227
1227
  const envLines: string[] = [`PGAI_TAG=${imageTag}`];
1228
1228
  if (existingRegistry) {
@@ -1550,6 +1550,16 @@ const MONITORING_CONTAINERS = [
1550
1550
  "postgres-reports",
1551
1551
  ];
1552
1552
 
1553
+ /**
1554
+ * Network cleanup constants.
1555
+ * Docker Compose creates a default network named "{project}_default".
1556
+ * In CI environments, network cleanup can fail if containers are slow to disconnect.
1557
+ */
1558
+ const COMPOSE_PROJECT_NAME = "postgres_ai";
1559
+ const DOCKER_NETWORK_NAME = `${COMPOSE_PROJECT_NAME}_default`;
1560
+ /** Delay before retrying network cleanup (allows container network disconnections to complete) */
1561
+ const NETWORK_CLEANUP_DELAY_MS = 2000;
1562
+
1553
1563
  /** Remove orphaned containers that docker compose down might miss */
1554
1564
  async function removeOrphanedContainers(): Promise<void> {
1555
1565
  for (const container of MONITORING_CONTAINERS) {
@@ -1565,7 +1575,33 @@ mon
1565
1575
  .command("stop")
1566
1576
  .description("stop monitoring services")
1567
1577
  .action(async () => {
1568
- const code = await runCompose(["down"]);
1578
+ // Multi-stage cleanup strategy for reliable shutdown in CI environments:
1579
+ // Stage 1: Standard compose down with orphan removal
1580
+ // Stage 2: Force remove any orphaned containers, then retry compose down
1581
+ // Stage 3: Force remove the Docker network directly
1582
+ // This handles edge cases where containers are slow to disconnect from networks.
1583
+ let code = await runCompose(["down", "--remove-orphans"]);
1584
+
1585
+ // Stage 2: If initial cleanup fails, try removing orphaned containers first
1586
+ if (code !== 0) {
1587
+ await removeOrphanedContainers();
1588
+ // Wait a moment for container network disconnections to complete
1589
+ await new Promise(resolve => setTimeout(resolve, NETWORK_CLEANUP_DELAY_MS));
1590
+ // Retry compose down
1591
+ code = await runCompose(["down", "--remove-orphans"]);
1592
+ }
1593
+
1594
+ // Final cleanup: force remove the network if it still exists
1595
+ if (code !== 0) {
1596
+ try {
1597
+ await execFilePromise("docker", ["network", "rm", DOCKER_NETWORK_NAME]);
1598
+ // Network removal succeeded - cleanup is complete
1599
+ code = 0;
1600
+ } catch {
1601
+ // Network doesn't exist or couldn't be removed, ignore
1602
+ }
1603
+ }
1604
+
1569
1605
  if (code !== 0) process.exitCode = code;
1570
1606
  });
1571
1607
 
@@ -13064,7 +13064,7 @@ var {
13064
13064
  // package.json
13065
13065
  var package_default = {
13066
13066
  name: "postgresai",
13067
- version: "0.14.0-dev.67",
13067
+ version: "0.14.0-dev.69",
13068
13068
  description: "postgres_ai CLI",
13069
13069
  license: "Apache-2.0",
13070
13070
  private: false,
@@ -15887,7 +15887,7 @@ var Result = import_lib.default.Result;
15887
15887
  var TypeOverrides = import_lib.default.TypeOverrides;
15888
15888
  var defaults = import_lib.default.defaults;
15889
15889
  // package.json
15890
- var version = "0.14.0-dev.67";
15890
+ var version = "0.14.0-dev.69";
15891
15891
  var package_default2 = {
15892
15892
  name: "postgresai",
15893
15893
  version,
@@ -24958,17 +24958,17 @@ where
24958
24958
  statement_timeout_seconds: 300
24959
24959
  },
24960
24960
  pg_invalid_indexes: {
24961
- description: "This metric identifies invalid indexes in the database. It provides insights into the number of invalid indexes and their details. This metric helps administrators identify and fix invalid indexes to improve database performance.",
24961
+ description: "This metric identifies invalid indexes in the database with decision tree data for remediation. It provides insights into whether to DROP (if duplicate exists), RECREATE (if backs constraint), or flag as UNCERTAIN (if additional RCA is needed to check query plans). Decision tree: 1) Valid duplicate exists -> DROP, 2) Backs PK/UNIQUE constraint -> RECREATE, 3) Table < 10K rows -> RECREATE (small tables rebuild quickly, typically under 1 second), 4) Otherwise -> UNCERTAIN (need query plan analysis to assess impact).",
24962
24962
  sqls: {
24963
24963
  11: `with fk_indexes as ( /* pgwatch_generated */
24964
24964
  select
24965
- schemaname as tag_schema_name,
24966
- (indexrelid::regclass)::text as tag_index_name,
24967
- (relid::regclass)::text as tag_table_name,
24968
- (confrelid::regclass)::text as tag_fk_table_ref,
24969
- array_to_string(indclass, ', ') as tag_opclasses
24970
- from
24971
- pg_stat_all_indexes
24965
+ schemaname as schema_name,
24966
+ indexrelid,
24967
+ (indexrelid::regclass)::text as index_name,
24968
+ (relid::regclass)::text as table_name,
24969
+ (confrelid::regclass)::text as fk_table_ref,
24970
+ array_to_string(indclass, ', ') as opclasses
24971
+ from pg_stat_all_indexes
24972
24972
  join pg_index using (indexrelid)
24973
24973
  left join pg_constraint
24974
24974
  on array_to_string(indkey, ',') = array_to_string(conkey, ',')
@@ -24977,37 +24977,58 @@ where
24977
24977
  and contype = 'f'
24978
24978
  where idx_scan = 0
24979
24979
  and indisunique is false
24980
- and conkey is not null --conkey is not null then true else false end as is_fk_idx
24981
- ), data as (
24980
+ and conkey is not null
24981
+ ),
24982
+ -- Find valid indexes that could be duplicates (same table, same columns)
24983
+ valid_duplicates as (
24984
+ select
24985
+ inv.indexrelid as invalid_indexrelid,
24986
+ val.indexrelid as valid_indexrelid,
24987
+ (val.indexrelid::regclass)::text as valid_index_name,
24988
+ pg_get_indexdef(val.indexrelid) as valid_index_definition
24989
+ from pg_index inv
24990
+ join pg_index val on inv.indrelid = val.indrelid -- same table
24991
+ and inv.indkey = val.indkey -- same columns (in same order)
24992
+ and inv.indexrelid != val.indexrelid -- different index
24993
+ and val.indisvalid = true -- valid index
24994
+ where inv.indisvalid = false
24995
+ ),
24996
+ data as (
24982
24997
  select
24983
24998
  pci.relname as tag_index_name,
24984
24999
  pn.nspname as tag_schema_name,
24985
25000
  pct.relname as tag_table_name,
24986
- quote_ident(pn.nspname) as tag_schema_name,
24987
- quote_ident(pci.relname) as tag_index_name,
24988
- quote_ident(pct.relname) as tag_table_name,
24989
25001
  coalesce(nullif(quote_ident(pn.nspname), 'public') || '.', '') || quote_ident(pct.relname) as tag_relation_name,
24990
25002
  pg_get_indexdef(pidx.indexrelid) as index_definition,
24991
- pg_relation_size(pidx.indexrelid) index_size_bytes,
25003
+ pg_relation_size(pidx.indexrelid) as index_size_bytes,
25004
+ -- Constraint info
25005
+ pidx.indisprimary as is_pk,
25006
+ pidx.indisunique as is_unique,
25007
+ con.conname as constraint_name,
25008
+ -- Table row estimate
25009
+ pct.reltuples::bigint as table_row_estimate,
25010
+ -- Valid duplicate check
25011
+ (vd.valid_indexrelid is not null) as has_valid_duplicate,
25012
+ vd.valid_index_name,
25013
+ vd.valid_index_definition,
25014
+ -- FK support check
24992
25015
  ((
24993
25016
  select count(1)
24994
25017
  from fk_indexes fi
24995
- where
24996
- fi.tag_fk_table_ref = pct.relname
24997
- and fi.tag_opclasses like (array_to_string(pidx.indclass, ', ') || '%')
25018
+ where fi.fk_table_ref = pct.relname
25019
+ and fi.opclasses like (array_to_string(pidx.indclass, ', ') || '%')
24998
25020
  ) > 0)::int as supports_fk
24999
25021
  from pg_index pidx
25000
- join pg_class as pci on pci.oid = pidx.indexrelid
25001
- join pg_class as pct on pct.oid = pidx.indrelid
25022
+ join pg_class pci on pci.oid = pidx.indexrelid
25023
+ join pg_class pct on pct.oid = pidx.indrelid
25002
25024
  left join pg_namespace pn on pn.oid = pct.relnamespace
25025
+ left join pg_constraint con on con.conindid = pidx.indexrelid
25026
+ left join valid_duplicates vd on vd.invalid_indexrelid = pidx.indexrelid
25003
25027
  where pidx.indisvalid = false
25004
- ), data_total as (
25005
- select
25006
- sum(index_size_bytes) as index_size_bytes_sum
25007
- from data
25008
- ), num_data as (
25028
+ ),
25029
+ num_data as (
25009
25030
  select
25010
- row_number() over () num,
25031
+ row_number() over () as num,
25011
25032
  data.*
25012
25033
  from data
25013
25034
  )
@@ -25626,7 +25647,14 @@ async function getInvalidIndexes(client, pgMajorVersion = 16) {
25626
25647
  index_size_bytes: indexSizeBytes,
25627
25648
  index_size_pretty: formatBytes(indexSizeBytes),
25628
25649
  index_definition: String(transformed.index_definition || ""),
25629
- supports_fk: toBool(transformed.supports_fk)
25650
+ supports_fk: toBool(transformed.supports_fk),
25651
+ is_pk: toBool(transformed.is_pk),
25652
+ is_unique: toBool(transformed.is_unique),
25653
+ constraint_name: transformed.constraint_name ? String(transformed.constraint_name) : null,
25654
+ table_row_estimate: parseInt(String(transformed.table_row_estimate || 0), 10),
25655
+ has_valid_duplicate: toBool(transformed.has_valid_duplicate),
25656
+ valid_duplicate_name: transformed.valid_index_name ? String(transformed.valid_index_name) : null,
25657
+ valid_duplicate_definition: transformed.valid_index_definition ? String(transformed.valid_index_definition) : null
25630
25658
  };
25631
25659
  });
25632
25660
  }
@@ -27213,14 +27241,10 @@ mon.command("local-install").description("install local monitoring stack (genera
27213
27241
  console.log(`Project directory: ${projectDir}
27214
27242
  `);
27215
27243
  const envFile = path5.resolve(projectDir, ".env");
27216
- let existingTag = null;
27217
27244
  let existingRegistry = null;
27218
27245
  let existingPassword = null;
27219
27246
  if (fs5.existsSync(envFile)) {
27220
27247
  const existingEnv = fs5.readFileSync(envFile, "utf8");
27221
- const tagMatch = existingEnv.match(/^PGAI_TAG=(.+)$/m);
27222
- if (tagMatch)
27223
- existingTag = tagMatch[1].trim();
27224
27248
  const registryMatch = existingEnv.match(/^PGAI_REGISTRY=(.+)$/m);
27225
27249
  if (registryMatch)
27226
27250
  existingRegistry = registryMatch[1].trim();
@@ -27228,7 +27252,7 @@ mon.command("local-install").description("install local monitoring stack (genera
27228
27252
  if (pwdMatch)
27229
27253
  existingPassword = pwdMatch[1].trim();
27230
27254
  }
27231
- const imageTag = opts.tag || process.env.PGAI_TAG || existingTag || package_default.version;
27255
+ const imageTag = opts.tag || package_default.version;
27232
27256
  const envLines = [`PGAI_TAG=${imageTag}`];
27233
27257
  if (existingRegistry) {
27234
27258
  envLines.push(`PGAI_REGISTRY=${existingRegistry}`);
@@ -27565,6 +27589,9 @@ var MONITORING_CONTAINERS = [
27565
27589
  "sources-generator",
27566
27590
  "postgres-reports"
27567
27591
  ];
27592
+ var COMPOSE_PROJECT_NAME = "postgres_ai";
27593
+ var DOCKER_NETWORK_NAME = `${COMPOSE_PROJECT_NAME}_default`;
27594
+ var NETWORK_CLEANUP_DELAY_MS = 2000;
27568
27595
  async function removeOrphanedContainers() {
27569
27596
  for (const container of MONITORING_CONTAINERS) {
27570
27597
  try {
@@ -27573,7 +27600,18 @@ async function removeOrphanedContainers() {
27573
27600
  }
27574
27601
  }
27575
27602
  mon.command("stop").description("stop monitoring services").action(async () => {
27576
- const code = await runCompose(["down"]);
27603
+ let code = await runCompose(["down", "--remove-orphans"]);
27604
+ if (code !== 0) {
27605
+ await removeOrphanedContainers();
27606
+ await new Promise((resolve6) => setTimeout(resolve6, NETWORK_CLEANUP_DELAY_MS));
27607
+ code = await runCompose(["down", "--remove-orphans"]);
27608
+ }
27609
+ if (code !== 0) {
27610
+ try {
27611
+ await execFilePromise("docker", ["network", "rm", DOCKER_NETWORK_NAME]);
27612
+ code = 0;
27613
+ } catch {}
27614
+ }
27577
27615
  if (code !== 0)
27578
27616
  process.exitCode = code;
27579
27617
  });
package/lib/checkup.ts CHANGED
@@ -109,6 +109,12 @@ export interface ClusterMetric {
109
109
 
110
110
  /**
111
111
  * Invalid index entry (H001) - matches H001.schema.json invalidIndex
112
+ *
113
+ * Decision tree for remediation recommendations:
114
+ * 1. has_valid_duplicate=true → DROP (valid duplicate exists, safe to remove)
115
+ * 2. is_pk=true or is_unique=true → RECREATE (backs a constraint, must restore)
116
+ * 3. table_row_estimate < 10000 → RECREATE (small table, quick rebuild)
117
+ * 4. Otherwise → UNCERTAIN (needs manual analysis of query plans)
112
118
  */
113
119
  export interface InvalidIndex {
114
120
  schema_name: string;
@@ -117,9 +123,61 @@ export interface InvalidIndex {
117
123
  relation_name: string;
118
124
  index_size_bytes: number;
119
125
  index_size_pretty: string;
120
- /** Full CREATE INDEX statement from pg_get_indexdef(), useful for DROP/CREATE migrations */
126
+ /** Full CREATE INDEX statement from pg_get_indexdef() - useful for DROP/RECREATE migrations */
121
127
  index_definition: string;
122
128
  supports_fk: boolean;
129
+ /** True if this index backs a PRIMARY KEY constraint */
130
+ is_pk: boolean;
131
+ /** True if this is a UNIQUE index (includes PK indexes) */
132
+ is_unique: boolean;
133
+ /** Name of the constraint this index backs, or null if none */
134
+ constraint_name: string | null;
135
+ /** Estimated row count of the table from pg_class.reltuples */
136
+ table_row_estimate: number;
137
+ /** True if there is a valid index on the same column(s) */
138
+ has_valid_duplicate: boolean;
139
+ /** Name of the valid duplicate index if one exists */
140
+ valid_duplicate_name: string | null;
141
+ /** Full CREATE INDEX statement of the valid duplicate index */
142
+ valid_duplicate_definition: string | null;
143
+ }
144
+
145
+ /** Recommendation for handling an invalid index */
146
+ export type InvalidIndexRecommendation = "DROP" | "RECREATE" | "UNCERTAIN";
147
+
148
+ /** Threshold for considering a table "small" (quick to rebuild) */
149
+ const SMALL_TABLE_ROW_THRESHOLD = 10000;
150
+
151
+ /**
152
+ * Compute remediation recommendation for an invalid index using decision tree.
153
+ *
154
+ * Decision tree logic:
155
+ * 1. If has_valid_duplicate is true → DROP (valid duplicate exists, safe to remove)
156
+ * 2. If is_pk or is_unique is true → RECREATE (backs a constraint, must restore)
157
+ * 3. If table_row_estimate < 10000 → RECREATE (small table, quick rebuild)
158
+ * 4. Otherwise → UNCERTAIN (needs manual analysis of query plans)
159
+ *
160
+ * @param index - Invalid index with observation data
161
+ * @returns Recommendation: "DROP", "RECREATE", or "UNCERTAIN"
162
+ */
163
+ export function getInvalidIndexRecommendation(index: InvalidIndex): InvalidIndexRecommendation {
164
+ // 1. Valid duplicate exists - safe to drop
165
+ if (index.has_valid_duplicate) {
166
+ return "DROP";
167
+ }
168
+
169
+ // 2. Backs a constraint - must recreate
170
+ if (index.is_pk || index.is_unique) {
171
+ return "RECREATE";
172
+ }
173
+
174
+ // 3. Small table - quick to recreate
175
+ if (index.table_row_estimate < SMALL_TABLE_ROW_THRESHOLD) {
176
+ return "RECREATE";
177
+ }
178
+
179
+ // 4. Large table without clear path - needs manual analysis
180
+ return "UNCERTAIN";
123
181
  }
124
182
 
125
183
  /**
@@ -564,11 +622,11 @@ export async function getClusterInfo(client: Client, pgMajorVersion: number = 16
564
622
 
565
623
  /**
566
624
  * Get invalid indexes from the database (H001).
567
- * Invalid indexes are indexes that failed to build (e.g., due to CONCURRENTLY failure).
625
+ * Invalid indexes have indisvalid = false, typically from failed CREATE INDEX CONCURRENTLY.
568
626
  *
569
627
  * @param client - Connected PostgreSQL client
570
628
  * @param pgMajorVersion - PostgreSQL major version (default: 16)
571
- * @returns Array of invalid index entries with size and FK support info
629
+ * @returns Array of invalid index entries with observation data for decision tree analysis
572
630
  */
573
631
  export async function getInvalidIndexes(client: Client, pgMajorVersion: number = 16): Promise<InvalidIndex[]> {
574
632
  const sql = getMetricSql(METRIC_NAMES.H001, pgMajorVersion);
@@ -576,6 +634,7 @@ export async function getInvalidIndexes(client: Client, pgMajorVersion: number =
576
634
  return result.rows.map((row) => {
577
635
  const transformed = transformMetricRow(row);
578
636
  const indexSizeBytes = parseInt(String(transformed.index_size_bytes || 0), 10);
637
+
579
638
  return {
580
639
  schema_name: String(transformed.schema_name || ""),
581
640
  table_name: String(transformed.table_name || ""),
@@ -585,6 +644,13 @@ export async function getInvalidIndexes(client: Client, pgMajorVersion: number =
585
644
  index_size_pretty: formatBytes(indexSizeBytes),
586
645
  index_definition: String(transformed.index_definition || ""),
587
646
  supports_fk: toBool(transformed.supports_fk),
647
+ is_pk: toBool(transformed.is_pk),
648
+ is_unique: toBool(transformed.is_unique),
649
+ constraint_name: transformed.constraint_name ? String(transformed.constraint_name) : null,
650
+ table_row_estimate: parseInt(String(transformed.table_row_estimate || 0), 10),
651
+ has_valid_duplicate: toBool(transformed.has_valid_duplicate),
652
+ valid_duplicate_name: transformed.valid_index_name ? String(transformed.valid_index_name) : null,
653
+ valid_duplicate_definition: transformed.valid_index_definition ? String(transformed.valid_index_definition) : null,
588
654
  };
589
655
  });
590
656
  }
@@ -1,6 +1,6 @@
1
1
  // AUTO-GENERATED FILE - DO NOT EDIT
2
2
  // Generated from config/pgwatch-prometheus/metrics.yml by scripts/embed-metrics.ts
3
- // Generated at: 2025-12-30T04:11:39.278Z
3
+ // Generated at: 2026-01-06T14:31:10.733Z
4
4
 
5
5
  /**
6
6
  * Metric definition from metrics.yml
@@ -45,9 +45,9 @@ export const METRICS: Record<string, MetricDefinition> = {
45
45
  statement_timeout_seconds: 300,
46
46
  },
47
47
  "pg_invalid_indexes": {
48
- description: "This metric identifies invalid indexes in the database. It provides insights into the number of invalid indexes and their details. This metric helps administrators identify and fix invalid indexes to improve database performance.",
48
+ description: "This metric identifies invalid indexes in the database with decision tree data for remediation. It provides insights into whether to DROP (if duplicate exists), RECREATE (if backs constraint), or flag as UNCERTAIN (if additional RCA is needed to check query plans). Decision tree: 1) Valid duplicate exists -> DROP, 2) Backs PK/UNIQUE constraint -> RECREATE, 3) Table < 10K rows -> RECREATE (small tables rebuild quickly, typically under 1 second), 4) Otherwise -> UNCERTAIN (need query plan analysis to assess impact).",
49
49
  sqls: {
50
- 11: "with fk_indexes as ( /* pgwatch_generated */\n select\n schemaname as tag_schema_name,\n (indexrelid::regclass)::text as tag_index_name,\n (relid::regclass)::text as tag_table_name,\n (confrelid::regclass)::text as tag_fk_table_ref,\n array_to_string(indclass, ', ') as tag_opclasses\n from\n pg_stat_all_indexes\n join pg_index using (indexrelid)\n left join pg_constraint\n on array_to_string(indkey, ',') = array_to_string(conkey, ',')\n and schemaname = (connamespace::regnamespace)::text\n and conrelid = relid\n and contype = 'f'\n where idx_scan = 0\n and indisunique is false\n and conkey is not null --conkey is not null then true else false end as is_fk_idx\n), data as (\n select\n pci.relname as tag_index_name,\n pn.nspname as tag_schema_name,\n pct.relname as tag_table_name,\n quote_ident(pn.nspname) as tag_schema_name,\n quote_ident(pci.relname) as tag_index_name,\n quote_ident(pct.relname) as tag_table_name,\n coalesce(nullif(quote_ident(pn.nspname), 'public') || '.', '') || quote_ident(pct.relname) as tag_relation_name,\n pg_get_indexdef(pidx.indexrelid) as index_definition,\n pg_relation_size(pidx.indexrelid) index_size_bytes,\n ((\n select count(1)\n from fk_indexes fi\n where\n fi.tag_fk_table_ref = pct.relname\n and fi.tag_opclasses like (array_to_string(pidx.indclass, ', ') || '%')\n ) > 0)::int as supports_fk\n from pg_index pidx\n join pg_class as pci on pci.oid = pidx.indexrelid\n join pg_class as pct on pct.oid = pidx.indrelid\n left join pg_namespace pn on pn.oid = pct.relnamespace\n where pidx.indisvalid = false\n), data_total as (\n select\n sum(index_size_bytes) as index_size_bytes_sum\n from data\n), num_data as (\n select\n row_number() over () num,\n data.*\n from data\n)\nselect\n (extract(epoch from now()) * 1e9)::int8 as epoch_ns,\n current_database() as tag_datname,\n num_data.*\nfrom num_data\nlimit 1000;\n",
50
+ 11: "with fk_indexes as ( /* pgwatch_generated */\n select\n schemaname as schema_name,\n indexrelid,\n (indexrelid::regclass)::text as index_name,\n (relid::regclass)::text as table_name,\n (confrelid::regclass)::text as fk_table_ref,\n array_to_string(indclass, ', ') as opclasses\n from pg_stat_all_indexes\n join pg_index using (indexrelid)\n left join pg_constraint\n on array_to_string(indkey, ',') = array_to_string(conkey, ',')\n and schemaname = (connamespace::regnamespace)::text\n and conrelid = relid\n and contype = 'f'\n where idx_scan = 0\n and indisunique is false\n and conkey is not null\n),\n-- Find valid indexes that could be duplicates (same table, same columns)\nvalid_duplicates as (\n select\n inv.indexrelid as invalid_indexrelid,\n val.indexrelid as valid_indexrelid,\n (val.indexrelid::regclass)::text as valid_index_name,\n pg_get_indexdef(val.indexrelid) as valid_index_definition\n from pg_index inv\n join pg_index val on inv.indrelid = val.indrelid -- same table\n and inv.indkey = val.indkey -- same columns (in same order)\n and inv.indexrelid != val.indexrelid -- different index\n and val.indisvalid = true -- valid index\n where inv.indisvalid = false\n),\ndata as (\n select\n pci.relname as tag_index_name,\n pn.nspname as tag_schema_name,\n pct.relname as tag_table_name,\n coalesce(nullif(quote_ident(pn.nspname), 'public') || '.', '') || quote_ident(pct.relname) as tag_relation_name,\n pg_get_indexdef(pidx.indexrelid) as index_definition,\n pg_relation_size(pidx.indexrelid) as index_size_bytes,\n -- Constraint info\n pidx.indisprimary as is_pk,\n pidx.indisunique as is_unique,\n con.conname as constraint_name,\n -- Table row estimate\n pct.reltuples::bigint as table_row_estimate,\n -- Valid duplicate check\n (vd.valid_indexrelid is not null) as has_valid_duplicate,\n vd.valid_index_name,\n vd.valid_index_definition,\n -- FK support check\n ((\n select count(1)\n from fk_indexes fi\n where fi.fk_table_ref = pct.relname\n and fi.opclasses like (array_to_string(pidx.indclass, ', ') || '%')\n ) > 0)::int as supports_fk\n from pg_index pidx\n join pg_class pci on pci.oid = pidx.indexrelid\n join pg_class pct on pct.oid = pidx.indrelid\n left join pg_namespace pn on pn.oid = pct.relnamespace\n left join pg_constraint con on con.conindid = pidx.indexrelid\n left join valid_duplicates vd on vd.invalid_indexrelid = pidx.indexrelid\n where pidx.indisvalid = false\n),\nnum_data as (\n select\n row_number() over () as num,\n data.*\n from data\n)\nselect\n (extract(epoch from now()) * 1e9)::int8 as epoch_ns,\n current_database() as tag_datname,\n num_data.*\nfrom num_data\nlimit 1000;\n",
51
51
  },
52
52
  gauges: ["*"],
53
53
  statement_timeout_seconds: 15,
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "postgresai",
3
- "version": "0.14.0-dev.67",
3
+ "version": "0.14.0-dev.69",
4
4
  "description": "postgres_ai CLI",
5
5
  "license": "Apache-2.0",
6
6
  "private": false,
@@ -287,27 +287,28 @@ describe("Report generators with mock client", () => {
287
287
  },
288
288
  ],
289
289
  databaseSizesRows: [{ datname: "postgres", size_bytes: "1073741824" }],
290
- dbStatsRows: [{
291
- numbackends: 5,
292
- xact_commit: 100,
293
- xact_rollback: 1,
294
- blks_read: 1000,
295
- blks_hit: 9000,
296
- tup_returned: 500,
297
- tup_fetched: 400,
298
- tup_inserted: 50,
299
- tup_updated: 30,
300
- tup_deleted: 10,
301
- deadlocks: 0,
302
- temp_files: 0,
290
+ dbStatsRows: [{
291
+ numbackends: 5,
292
+ xact_commit: 100,
293
+ xact_rollback: 1,
294
+ blks_read: 1000,
295
+ blks_hit: 9000,
296
+ tup_returned: 500,
297
+ tup_fetched: 400,
298
+ tup_inserted: 50,
299
+ tup_updated: 30,
300
+ tup_deleted: 10,
301
+ deadlocks: 0,
302
+ temp_files: 0,
303
303
  temp_bytes: 0,
304
- postmaster_uptime_s: 864000
304
+ postmaster_uptime_s: 864000
305
305
  }],
306
306
  connectionStatesRows: [{ state: "active", count: 2 }, { state: "idle", count: 3 }],
307
307
  uptimeRows: [{ start_time: new Date("2024-01-01T00:00:00Z"), uptime: "10 days" }],
308
308
  invalidIndexesRows: [],
309
309
  unusedIndexesRows: [],
310
310
  redundantIndexesRows: [],
311
+ sensitiveColumnsRows: [],
311
312
  }
312
313
  );
313
314
 
@@ -320,6 +321,7 @@ describe("Report generators with mock client", () => {
320
321
  expect("H001" in reports).toBe(true);
321
322
  expect("H002" in reports).toBe(true);
322
323
  expect("H004" in reports).toBe(true);
324
+ // S001 is only available in Python reporter, not in CLI express mode
323
325
  expect(reports.A002.checkId).toBe("A002");
324
326
  expect(reports.A003.checkId).toBe("A003");
325
327
  expect(reports.A004.checkId).toBe("A004");
@@ -525,9 +527,233 @@ describe("H001 - Invalid indexes", () => {
525
527
  expect(dbData.database_size_pretty).toBeTruthy();
526
528
  expect(report.results["test-node"].postgres_version).toBeTruthy();
527
529
  });
530
+
531
+ test("getInvalidIndexes returns decision tree fields including valid_duplicate_definition", async () => {
532
+ const mockClient = createMockClient({
533
+ invalidIndexesRows: [
534
+ {
535
+ schema_name: "public",
536
+ table_name: "users",
537
+ index_name: "users_email_idx_invalid",
538
+ relation_name: "users",
539
+ index_size_bytes: "1048576",
540
+ index_definition: "CREATE INDEX users_email_idx_invalid ON public.users USING btree (email)",
541
+ supports_fk: false,
542
+ is_pk: false,
543
+ is_unique: false,
544
+ constraint_name: null,
545
+ table_row_estimate: "5000",
546
+ has_valid_duplicate: true,
547
+ valid_index_name: "users_email_idx",
548
+ valid_index_definition: "CREATE INDEX users_email_idx ON public.users USING btree (email)",
549
+ },
550
+ ],
551
+ });
552
+
553
+ const indexes = await checkup.getInvalidIndexes(mockClient as any);
554
+ expect(indexes.length).toBe(1);
555
+ expect(indexes[0].is_pk).toBe(false);
556
+ expect(indexes[0].is_unique).toBe(false);
557
+ expect(indexes[0].constraint_name).toBeNull();
558
+ expect(indexes[0].table_row_estimate).toBe(5000);
559
+ expect(indexes[0].has_valid_duplicate).toBe(true);
560
+ expect(indexes[0].valid_duplicate_name).toBe("users_email_idx");
561
+ expect(indexes[0].valid_duplicate_definition).toBe("CREATE INDEX users_email_idx ON public.users USING btree (email)");
562
+ });
563
+
564
+ test("getInvalidIndexes handles has_valid_duplicate: false with null values", async () => {
565
+ const mockClient = createMockClient({
566
+ invalidIndexesRows: [
567
+ {
568
+ schema_name: "public",
569
+ table_name: "orders",
570
+ index_name: "orders_status_idx_invalid",
571
+ relation_name: "orders",
572
+ index_size_bytes: "524288",
573
+ index_definition: "CREATE INDEX orders_status_idx_invalid ON public.orders USING btree (status)",
574
+ supports_fk: false,
575
+ is_pk: false,
576
+ is_unique: false,
577
+ constraint_name: null,
578
+ table_row_estimate: "100000",
579
+ has_valid_duplicate: false,
580
+ valid_index_name: null,
581
+ valid_index_definition: null,
582
+ },
583
+ ],
584
+ });
585
+
586
+ const indexes = await checkup.getInvalidIndexes(mockClient as Client);
587
+ expect(indexes.length).toBe(1);
588
+ expect(indexes[0].has_valid_duplicate).toBe(false);
589
+ expect(indexes[0].valid_duplicate_name).toBeNull();
590
+ expect(indexes[0].valid_duplicate_definition).toBeNull();
591
+ });
592
+
593
+ test("getInvalidIndexes handles is_pk: true with constraint", async () => {
594
+ const mockClient = createMockClient({
595
+ invalidIndexesRows: [
596
+ {
597
+ schema_name: "public",
598
+ table_name: "accounts",
599
+ index_name: "accounts_pkey_invalid",
600
+ relation_name: "accounts",
601
+ index_size_bytes: "262144",
602
+ index_definition: "CREATE UNIQUE INDEX accounts_pkey_invalid ON public.accounts USING btree (id)",
603
+ supports_fk: true,
604
+ is_pk: true,
605
+ is_unique: true,
606
+ constraint_name: "accounts_pkey",
607
+ table_row_estimate: "500",
608
+ has_valid_duplicate: false,
609
+ valid_index_name: null,
610
+ valid_index_definition: null,
611
+ },
612
+ ],
613
+ });
614
+
615
+ const indexes = await checkup.getInvalidIndexes(mockClient as Client);
616
+ expect(indexes.length).toBe(1);
617
+ expect(indexes[0].is_pk).toBe(true);
618
+ expect(indexes[0].is_unique).toBe(true);
619
+ expect(indexes[0].constraint_name).toBe("accounts_pkey");
620
+ expect(indexes[0].supports_fk).toBe(true);
621
+ });
622
+
623
+ test("getInvalidIndexes handles is_unique: true without PK", async () => {
624
+ const mockClient = createMockClient({
625
+ invalidIndexesRows: [
626
+ {
627
+ schema_name: "public",
628
+ table_name: "users",
629
+ index_name: "users_email_unique_invalid",
630
+ relation_name: "users",
631
+ index_size_bytes: "131072",
632
+ index_definition: "CREATE UNIQUE INDEX users_email_unique_invalid ON public.users USING btree (email)",
633
+ supports_fk: false,
634
+ is_pk: false,
635
+ is_unique: true,
636
+ constraint_name: "users_email_unique",
637
+ table_row_estimate: "25000",
638
+ has_valid_duplicate: true,
639
+ valid_index_name: "users_email_unique_idx",
640
+ valid_index_definition: "CREATE UNIQUE INDEX users_email_unique_idx ON public.users USING btree (email)",
641
+ },
642
+ ],
643
+ });
644
+
645
+ const indexes = await checkup.getInvalidIndexes(mockClient as Client);
646
+ expect(indexes.length).toBe(1);
647
+ expect(indexes[0].is_pk).toBe(false);
648
+ expect(indexes[0].is_unique).toBe(true);
649
+ expect(indexes[0].constraint_name).toBe("users_email_unique");
650
+ expect(indexes[0].has_valid_duplicate).toBe(true);
651
+ });
528
652
  // Top-level structure tests removed - covered by schema-validation.test.ts
529
653
  });
530
654
 
655
+ // Tests for H001 decision tree recommendation logic
656
+ describe("H001 - Decision tree recommendations", () => {
657
+ // Helper to create a minimal InvalidIndex for testing
658
+ const createTestIndex = (overrides: Partial<checkup.InvalidIndex> = {}): checkup.InvalidIndex => ({
659
+ schema_name: "public",
660
+ table_name: "test_table",
661
+ index_name: "test_idx",
662
+ relation_name: "public.test_table",
663
+ index_size_bytes: 1024,
664
+ index_size_pretty: "1 KiB",
665
+ index_definition: "CREATE INDEX test_idx ON public.test_table USING btree (col)",
666
+ supports_fk: false,
667
+ is_pk: false,
668
+ is_unique: false,
669
+ constraint_name: null,
670
+ table_row_estimate: 100000, // Large table by default
671
+ has_valid_duplicate: false,
672
+ valid_duplicate_name: null,
673
+ valid_duplicate_definition: null,
674
+ ...overrides,
675
+ });
676
+
677
+ test("returns DROP when has_valid_duplicate is true", () => {
678
+ const index = createTestIndex({ has_valid_duplicate: true, valid_duplicate_name: "existing_idx" });
679
+ expect(checkup.getInvalidIndexRecommendation(index)).toBe("DROP");
680
+ });
681
+
682
+ test("returns DROP even when is_pk is true if has_valid_duplicate is true", () => {
683
+ // has_valid_duplicate takes precedence over is_pk
684
+ const index = createTestIndex({
685
+ has_valid_duplicate: true,
686
+ is_pk: true,
687
+ is_unique: true,
688
+ });
689
+ expect(checkup.getInvalidIndexRecommendation(index)).toBe("DROP");
690
+ });
691
+
692
+ test("returns RECREATE when is_pk is true and no valid duplicate", () => {
693
+ const index = createTestIndex({
694
+ is_pk: true,
695
+ is_unique: true,
696
+ constraint_name: "test_pkey",
697
+ });
698
+ expect(checkup.getInvalidIndexRecommendation(index)).toBe("RECREATE");
699
+ });
700
+
701
+ test("returns RECREATE when is_unique is true (non-PK) and no valid duplicate", () => {
702
+ const index = createTestIndex({
703
+ is_unique: true,
704
+ constraint_name: "test_unique",
705
+ });
706
+ expect(checkup.getInvalidIndexRecommendation(index)).toBe("RECREATE");
707
+ });
708
+
709
+ test("returns RECREATE for small table (< 10K rows) without valid duplicate", () => {
710
+ const index = createTestIndex({ table_row_estimate: 5000 });
711
+ expect(checkup.getInvalidIndexRecommendation(index)).toBe("RECREATE");
712
+ });
713
+
714
+ test("returns RECREATE for table at threshold boundary (9999 rows)", () => {
715
+ const index = createTestIndex({ table_row_estimate: 9999 });
716
+ expect(checkup.getInvalidIndexRecommendation(index)).toBe("RECREATE");
717
+ });
718
+
719
+ test("returns UNCERTAIN for large table (>= 10K rows) at threshold boundary", () => {
720
+ const index = createTestIndex({ table_row_estimate: 10000 });
721
+ expect(checkup.getInvalidIndexRecommendation(index)).toBe("UNCERTAIN");
722
+ });
723
+
724
+ test("returns UNCERTAIN for large table without valid duplicate or constraint", () => {
725
+ const index = createTestIndex({ table_row_estimate: 1000000 });
726
+ expect(checkup.getInvalidIndexRecommendation(index)).toBe("UNCERTAIN");
727
+ });
728
+
729
+ test("returns UNCERTAIN for empty table (0 rows) with no valid duplicate - edge case", () => {
730
+ // Empty table should be RECREATE (< 10K threshold)
731
+ const index = createTestIndex({ table_row_estimate: 0 });
732
+ expect(checkup.getInvalidIndexRecommendation(index)).toBe("RECREATE");
733
+ });
734
+
735
+ test("decision tree priority: has_valid_duplicate > is_pk > small_table", () => {
736
+ // Even with PK and small table, has_valid_duplicate should win
737
+ const index = createTestIndex({
738
+ has_valid_duplicate: true,
739
+ is_pk: true,
740
+ is_unique: true,
741
+ table_row_estimate: 100,
742
+ });
743
+ expect(checkup.getInvalidIndexRecommendation(index)).toBe("DROP");
744
+ });
745
+
746
+ test("decision tree priority: is_pk > small_table", () => {
747
+ // is_pk should return RECREATE regardless of table size
748
+ const index = createTestIndex({
749
+ is_pk: true,
750
+ is_unique: true,
751
+ table_row_estimate: 1000000, // Large table
752
+ });
753
+ expect(checkup.getInvalidIndexRecommendation(index)).toBe("RECREATE");
754
+ });
755
+ });
756
+
531
757
  // Tests for H002 (Unused indexes)
532
758
  describe("H002 - Unused indexes", () => {
533
759
  test("getUnusedIndexes returns unused indexes", async () => {
@@ -399,6 +399,7 @@ describe.skipIf(skipTests)("integration: prepare-db", () => {
399
399
  }
400
400
  });
401
401
 
402
+ // 60s timeout for PostgreSQL startup + multiple SQL queries in slow CI
402
403
  test("explain_generic validates input and prevents SQL injection", async () => {
403
404
  pg = await createTempPostgres();
404
405
 
@@ -495,5 +496,5 @@ describe.skipIf(skipTests)("integration: prepare-db", () => {
495
496
  } finally {
496
497
  await pg.cleanup();
497
498
  }
498
- });
499
+ }, { timeout: 60000 });
499
500
  });
package/test/init.test.ts CHANGED
@@ -1,5 +1,7 @@
1
- import { describe, test, expect, beforeAll } from "bun:test";
1
+ import { describe, test, expect, beforeAll, afterAll } from "bun:test";
2
2
  import { resolve } from "path";
3
+ import * as fs from "fs";
4
+ import * as os from "os";
3
5
 
4
6
  // Import from source directly since we're using Bun
5
7
  import * as init from "../lib/init";
@@ -415,3 +417,111 @@ describe("CLI commands", () => {
415
417
  expect(r.stderr).toMatch(/Cannot use --api-key with --demo mode/);
416
418
  });
417
419
  });
420
+
421
+ describe("imageTag priority behavior", () => {
422
+ // Tests for the imageTag priority: --tag flag > PGAI_TAG env var > pkg.version
423
+ // This verifies the fix that prevents stale .env PGAI_TAG from being used
424
+
425
+ let tempDir: string;
426
+
427
+ beforeAll(() => {
428
+ tempDir = fs.mkdtempSync(resolve(os.tmpdir(), "pgai-test-"));
429
+ });
430
+
431
+ afterAll(() => {
432
+ if (tempDir && fs.existsSync(tempDir)) {
433
+ fs.rmSync(tempDir, { recursive: true, force: true });
434
+ }
435
+ });
436
+
437
+ test("stale .env PGAI_TAG is NOT used - CLI version takes precedence", () => {
438
+ // Create a stale .env with an old tag value
439
+ const testDir = resolve(tempDir, "stale-tag-test");
440
+ fs.mkdirSync(testDir, { recursive: true });
441
+ fs.writeFileSync(resolve(testDir, ".env"), "PGAI_TAG=beta\n");
442
+ // Create minimal docker-compose.yml so resolvePaths() finds it
443
+ fs.writeFileSync(resolve(testDir, "docker-compose.yml"), "version: '3'\nservices: {}\n");
444
+
445
+ // Run from the test directory (so resolvePaths finds docker-compose.yml)
446
+ const cliPath = resolve(import.meta.dir, "..", "bin", "postgres-ai.ts");
447
+ const bunBin = typeof process.execPath === "string" && process.execPath.length > 0 ? process.execPath : "bun";
448
+ const result = Bun.spawnSync([bunBin, cliPath, "mon", "local-install", "--db-url", "postgresql://u:p@h:5432/d", "--yes"], {
449
+ env: { ...process.env, PGAI_TAG: undefined },
450
+ cwd: testDir,
451
+ });
452
+
453
+ // Read the .env that was written
454
+ const envContent = fs.readFileSync(resolve(testDir, ".env"), "utf8");
455
+
456
+ // The .env should NOT contain the stale "beta" tag - it should use pkg.version
457
+ expect(envContent).not.toMatch(/PGAI_TAG=beta/);
458
+ // It should contain the CLI version (0.0.0-dev.0 in dev)
459
+ expect(envContent).toMatch(/PGAI_TAG=\d+\.\d+\.\d+|PGAI_TAG=0\.0\.0-dev/);
460
+ });
461
+
462
+ test("--tag flag takes priority over pkg.version", () => {
463
+ const testDir = resolve(tempDir, "tag-flag-test");
464
+ fs.mkdirSync(testDir, { recursive: true });
465
+ fs.writeFileSync(resolve(testDir, "docker-compose.yml"), "version: '3'\nservices: {}\n");
466
+
467
+ const cliPath = resolve(import.meta.dir, "..", "bin", "postgres-ai.ts");
468
+ const bunBin = typeof process.execPath === "string" && process.execPath.length > 0 ? process.execPath : "bun";
469
+ const result = Bun.spawnSync([bunBin, cliPath, "mon", "local-install", "--tag", "v1.2.3-custom", "--db-url", "postgresql://u:p@h:5432/d", "--yes"], {
470
+ env: { ...process.env, PGAI_TAG: undefined },
471
+ cwd: testDir,
472
+ });
473
+
474
+ const envContent = fs.readFileSync(resolve(testDir, ".env"), "utf8");
475
+ expect(envContent).toMatch(/PGAI_TAG=v1\.2\.3-custom/);
476
+
477
+ // Verify stdout confirms the tag being used
478
+ const stdout = new TextDecoder().decode(result.stdout);
479
+ expect(stdout).toMatch(/Using image tag: v1\.2\.3-custom/);
480
+ });
481
+
482
+ test("PGAI_TAG env var is intentionally ignored (Bun auto-loads .env)", () => {
483
+ // Note: We do NOT use process.env.PGAI_TAG because Bun auto-loads .env files,
484
+ // which would cause stale .env values to pollute the environment.
485
+ // Users should use --tag flag to override, not env vars.
486
+ const testDir = resolve(tempDir, "env-var-ignored-test");
487
+ fs.mkdirSync(testDir, { recursive: true });
488
+ fs.writeFileSync(resolve(testDir, "docker-compose.yml"), "version: '3'\nservices: {}\n");
489
+
490
+ const cliPath = resolve(import.meta.dir, "..", "bin", "postgres-ai.ts");
491
+ const bunBin = typeof process.execPath === "string" && process.execPath.length > 0 ? process.execPath : "bun";
492
+ const result = Bun.spawnSync([bunBin, cliPath, "mon", "local-install", "--db-url", "postgresql://u:p@h:5432/d", "--yes"], {
493
+ env: { ...process.env, PGAI_TAG: "v2.0.0-from-env" },
494
+ cwd: testDir,
495
+ });
496
+
497
+ const envContent = fs.readFileSync(resolve(testDir, ".env"), "utf8");
498
+ // PGAI_TAG env var should be IGNORED - uses pkg.version instead
499
+ expect(envContent).not.toMatch(/PGAI_TAG=v2\.0\.0-from-env/);
500
+ expect(envContent).toMatch(/PGAI_TAG=\d+\.\d+\.\d+|PGAI_TAG=0\.0\.0-dev/);
501
+ });
502
+
503
+ test("existing registry and password are preserved while tag is updated", () => {
504
+ const testDir = resolve(tempDir, "preserve-test");
505
+ fs.mkdirSync(testDir, { recursive: true });
506
+ // Create .env with stale tag but valid registry and password
507
+ fs.writeFileSync(resolve(testDir, ".env"),
508
+ "PGAI_TAG=stale-tag\nPGAI_REGISTRY=my.registry.com\nGF_SECURITY_ADMIN_PASSWORD=secret123\n");
509
+ fs.writeFileSync(resolve(testDir, "docker-compose.yml"), "version: '3'\nservices: {}\n");
510
+
511
+ const cliPath = resolve(import.meta.dir, "..", "bin", "postgres-ai.ts");
512
+ const bunBin = typeof process.execPath === "string" && process.execPath.length > 0 ? process.execPath : "bun";
513
+ const result = Bun.spawnSync([bunBin, cliPath, "mon", "local-install", "--db-url", "postgresql://u:p@h:5432/d", "--yes"], {
514
+ env: { ...process.env, PGAI_TAG: undefined },
515
+ cwd: testDir,
516
+ });
517
+
518
+ const envContent = fs.readFileSync(resolve(testDir, ".env"), "utf8");
519
+
520
+ // Tag should be updated (not stale-tag)
521
+ expect(envContent).not.toMatch(/PGAI_TAG=stale-tag/);
522
+
523
+ // But registry and password should be preserved
524
+ expect(envContent).toMatch(/PGAI_REGISTRY=my\.registry\.com/);
525
+ expect(envContent).toMatch(/GF_SECURITY_ADMIN_PASSWORD=secret123/);
526
+ });
527
+ });
@@ -15,6 +15,7 @@ export interface MockClientOptions {
15
15
  invalidIndexesRows?: any[];
16
16
  unusedIndexesRows?: any[];
17
17
  redundantIndexesRows?: any[];
18
+ sensitiveColumnsRows?: any[];
18
19
  }
19
20
 
20
21
  const DEFAULT_VERSION_ROWS = [
@@ -45,6 +46,7 @@ export function createMockClient(options: MockClientOptions = {}) {
45
46
  invalidIndexesRows = [],
46
47
  unusedIndexesRows = [],
47
48
  redundantIndexesRows = [],
49
+ sensitiveColumnsRows = [],
48
50
  } = options;
49
51
 
50
52
  return {
@@ -116,6 +118,10 @@ export function createMockClient(options: MockClientOptions = {}) {
116
118
  max_connections: 100,
117
119
  }] };
118
120
  }
121
+ // S001: Sensitive columns query (from metrics.yml)
122
+ if (sql.includes("information_schema.columns") && sql.includes("tag_schema_name") && sql.includes("tag_column_name")) {
123
+ return { rows: sensitiveColumnsRows };
124
+ }
119
125
  throw new Error(`Unexpected query: ${sql}`);
120
126
  },
121
127
  };