postgresai 0.14.0-beta.3 → 0.14.0-beta.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +45 -45
- package/bin/postgres-ai.ts +946 -336
- package/bun.lock +258 -0
- package/bunfig.toml +11 -0
- package/dist/bin/postgres-ai.js +27868 -1771
- package/lib/auth-server.ts +124 -106
- package/lib/checkup-api.ts +386 -0
- package/lib/checkup.ts +1327 -0
- package/lib/config.ts +3 -0
- package/lib/init.ts +282 -78
- package/lib/issues.ts +86 -195
- package/lib/mcp-server.ts +6 -17
- package/lib/metrics-embedded.ts +79 -0
- package/lib/metrics-loader.ts +127 -0
- package/lib/util.ts +61 -0
- package/package.json +18 -10
- package/packages/postgres-ai/README.md +26 -0
- package/packages/postgres-ai/bin/postgres-ai.js +27 -0
- package/packages/postgres-ai/package.json +27 -0
- package/scripts/embed-metrics.ts +154 -0
- package/sql/02.permissions.sql +9 -5
- package/sql/05.helpers.sql +415 -0
- package/test/checkup.integration.test.ts +273 -0
- package/test/checkup.test.ts +890 -0
- package/test/init.integration.test.ts +399 -0
- package/test/init.test.ts +345 -0
- package/test/schema-validation.test.ts +81 -0
- package/test/test-utils.ts +122 -0
- package/tsconfig.json +12 -20
- package/dist/bin/postgres-ai.d.ts +0 -3
- package/dist/bin/postgres-ai.d.ts.map +0 -1
- package/dist/bin/postgres-ai.js.map +0 -1
- package/dist/lib/auth-server.d.ts +0 -31
- package/dist/lib/auth-server.d.ts.map +0 -1
- package/dist/lib/auth-server.js +0 -263
- package/dist/lib/auth-server.js.map +0 -1
- package/dist/lib/config.d.ts +0 -45
- package/dist/lib/config.d.ts.map +0 -1
- package/dist/lib/config.js +0 -181
- package/dist/lib/config.js.map +0 -1
- package/dist/lib/init.d.ts +0 -75
- package/dist/lib/init.d.ts.map +0 -1
- package/dist/lib/init.js +0 -482
- package/dist/lib/init.js.map +0 -1
- package/dist/lib/issues.d.ts +0 -75
- package/dist/lib/issues.d.ts.map +0 -1
- package/dist/lib/issues.js +0 -336
- package/dist/lib/issues.js.map +0 -1
- package/dist/lib/mcp-server.d.ts +0 -9
- package/dist/lib/mcp-server.d.ts.map +0 -1
- package/dist/lib/mcp-server.js +0 -168
- package/dist/lib/mcp-server.js.map +0 -1
- package/dist/lib/pkce.d.ts +0 -32
- package/dist/lib/pkce.d.ts.map +0 -1
- package/dist/lib/pkce.js +0 -101
- package/dist/lib/pkce.js.map +0 -1
- package/dist/lib/util.d.ts +0 -27
- package/dist/lib/util.d.ts.map +0 -1
- package/dist/lib/util.js +0 -46
- package/dist/lib/util.js.map +0 -1
- package/dist/package.json +0 -46
- package/test/init.integration.test.cjs +0 -382
- package/test/init.test.cjs +0 -323
|
@@ -0,0 +1,415 @@
|
|
|
1
|
+
-- Helper functions for postgres_ai monitoring user (template-filled by cli/lib/init.ts)
|
|
2
|
+
-- These functions use SECURITY DEFINER to allow the monitoring user to perform
|
|
3
|
+
-- operations they don't have direct permissions for.
|
|
4
|
+
|
|
5
|
+
/*
|
|
6
|
+
* pgai_explain_generic
|
|
7
|
+
*
|
|
8
|
+
* Function to get generic explain plans with optional HypoPG index testing.
|
|
9
|
+
* Requires: PostgreSQL 16+ (for generic_plan option), HypoPG extension (optional).
|
|
10
|
+
*
|
|
11
|
+
* Usage examples:
|
|
12
|
+
* -- Basic generic plan
|
|
13
|
+
* select postgres_ai.explain_generic('select * from users where id = $1');
|
|
14
|
+
*
|
|
15
|
+
* -- JSON format
|
|
16
|
+
* select postgres_ai.explain_generic('select * from users where id = $1', 'json');
|
|
17
|
+
*
|
|
18
|
+
* -- Test a hypothetical index
|
|
19
|
+
* select postgres_ai.explain_generic(
|
|
20
|
+
* 'select * from users where email = $1',
|
|
21
|
+
* 'text',
|
|
22
|
+
* 'create index on users (email)'
|
|
23
|
+
* );
|
|
24
|
+
*/
|
|
25
|
+
create or replace function postgres_ai.explain_generic(
|
|
26
|
+
in query text,
|
|
27
|
+
in format text default 'text',
|
|
28
|
+
in hypopg_index text default null,
|
|
29
|
+
out result text
|
|
30
|
+
)
|
|
31
|
+
language plpgsql
|
|
32
|
+
security definer
|
|
33
|
+
set search_path = pg_catalog, public
|
|
34
|
+
as $$
|
|
35
|
+
declare
|
|
36
|
+
v_line record;
|
|
37
|
+
v_lines text[] := '{}';
|
|
38
|
+
v_explain_query text;
|
|
39
|
+
v_hypo_result record;
|
|
40
|
+
v_version int;
|
|
41
|
+
v_hypopg_available boolean;
|
|
42
|
+
begin
|
|
43
|
+
-- Check PostgreSQL version (generic_plan requires 16+)
|
|
44
|
+
select current_setting('server_version_num')::int into v_version;
|
|
45
|
+
|
|
46
|
+
if v_version < 160000 then
|
|
47
|
+
raise exception 'generic_plan requires PostgreSQL 16+, current version: %',
|
|
48
|
+
current_setting('server_version');
|
|
49
|
+
end if;
|
|
50
|
+
|
|
51
|
+
-- Check if HypoPG extension is available
|
|
52
|
+
if hypopg_index is not null then
|
|
53
|
+
select exists(
|
|
54
|
+
select 1 from pg_extension where extname = 'hypopg'
|
|
55
|
+
) into v_hypopg_available;
|
|
56
|
+
|
|
57
|
+
if not v_hypopg_available then
|
|
58
|
+
raise exception 'HypoPG extension is required for hypothetical index testing but is not installed';
|
|
59
|
+
end if;
|
|
60
|
+
|
|
61
|
+
-- Create hypothetical index
|
|
62
|
+
select * into v_hypo_result from hypopg_create_index(hypopg_index);
|
|
63
|
+
raise notice 'Created hypothetical index: % (oid: %)',
|
|
64
|
+
v_hypo_result.indexname, v_hypo_result.indexrelid;
|
|
65
|
+
end if;
|
|
66
|
+
|
|
67
|
+
-- Build and execute explain query based on format
|
|
68
|
+
-- Output is preserved exactly as EXPLAIN returns it
|
|
69
|
+
begin
|
|
70
|
+
if lower(format) = 'json' then
|
|
71
|
+
v_explain_query := 'explain (verbose, settings, generic_plan, format json) ' || query;
|
|
72
|
+
execute v_explain_query into result;
|
|
73
|
+
else
|
|
74
|
+
v_explain_query := 'explain (verbose, settings, generic_plan) ' || query;
|
|
75
|
+
for v_line in execute v_explain_query loop
|
|
76
|
+
v_lines := array_append(v_lines, v_line."QUERY PLAN");
|
|
77
|
+
end loop;
|
|
78
|
+
result := array_to_string(v_lines, e'\n');
|
|
79
|
+
end if;
|
|
80
|
+
exception when others then
|
|
81
|
+
-- Clean up hypothetical index before re-raising
|
|
82
|
+
if hypopg_index is not null then
|
|
83
|
+
perform hypopg_reset();
|
|
84
|
+
end if;
|
|
85
|
+
raise;
|
|
86
|
+
end;
|
|
87
|
+
|
|
88
|
+
-- Clean up hypothetical index
|
|
89
|
+
if hypopg_index is not null then
|
|
90
|
+
perform hypopg_reset();
|
|
91
|
+
end if;
|
|
92
|
+
end;
|
|
93
|
+
$$;
|
|
94
|
+
|
|
95
|
+
comment on function postgres_ai.explain_generic(text, text, text) is
|
|
96
|
+
'Returns generic EXPLAIN plan with optional HypoPG index testing (requires PG16+)';
|
|
97
|
+
|
|
98
|
+
-- Grant execute to the monitoring user
|
|
99
|
+
grant execute on function postgres_ai.explain_generic(text, text, text) to {{ROLE_IDENT}};
|
|
100
|
+
|
|
101
|
+
/*
|
|
102
|
+
* table_describe
|
|
103
|
+
*
|
|
104
|
+
* Collects comprehensive information about a table for LLM analysis.
|
|
105
|
+
* Returns a compact text format with:
|
|
106
|
+
* - Table metadata (type, size estimates)
|
|
107
|
+
* - Columns (name, type, nullable, default)
|
|
108
|
+
* - Indexes
|
|
109
|
+
* - Constraints (PK, FK, unique, check)
|
|
110
|
+
* - Maintenance stats (vacuum/analyze times)
|
|
111
|
+
*
|
|
112
|
+
* Usage:
|
|
113
|
+
* select postgres_ai.table_describe('public.users');
|
|
114
|
+
* select postgres_ai.table_describe('my_table'); -- uses search_path
|
|
115
|
+
*/
|
|
116
|
+
create or replace function postgres_ai.table_describe(
|
|
117
|
+
in table_name text,
|
|
118
|
+
out result text
|
|
119
|
+
)
|
|
120
|
+
language plpgsql
|
|
121
|
+
security definer
|
|
122
|
+
set search_path = pg_catalog, public
|
|
123
|
+
as $$
|
|
124
|
+
declare
|
|
125
|
+
v_oid oid;
|
|
126
|
+
v_schema text;
|
|
127
|
+
v_table text;
|
|
128
|
+
v_relkind char;
|
|
129
|
+
v_relpages int;
|
|
130
|
+
v_reltuples float;
|
|
131
|
+
v_lines text[] := '{}';
|
|
132
|
+
v_line text;
|
|
133
|
+
v_rec record;
|
|
134
|
+
v_constraint_count int := 0;
|
|
135
|
+
begin
|
|
136
|
+
-- Resolve table name to OID (handles schema-qualified and search_path)
|
|
137
|
+
v_oid := table_name::regclass::oid;
|
|
138
|
+
|
|
139
|
+
-- Get basic table info
|
|
140
|
+
select
|
|
141
|
+
n.nspname,
|
|
142
|
+
c.relname,
|
|
143
|
+
c.relkind,
|
|
144
|
+
c.relpages,
|
|
145
|
+
c.reltuples
|
|
146
|
+
into v_schema, v_table, v_relkind, v_relpages, v_reltuples
|
|
147
|
+
from pg_class c
|
|
148
|
+
join pg_namespace n on n.oid = c.relnamespace
|
|
149
|
+
where c.oid = v_oid;
|
|
150
|
+
|
|
151
|
+
-- Validate object type - only tables, views, and materialized views are supported
|
|
152
|
+
if v_relkind not in ('r', 'p', 'v', 'm', 'f') then
|
|
153
|
+
raise exception 'table_describe does not support % (relkind=%)',
|
|
154
|
+
case v_relkind
|
|
155
|
+
when 'i' then 'indexes'
|
|
156
|
+
when 'I' then 'partitioned indexes'
|
|
157
|
+
when 'S' then 'sequences'
|
|
158
|
+
when 't' then 'TOAST tables'
|
|
159
|
+
when 'c' then 'composite types'
|
|
160
|
+
else format('objects of type "%s"', v_relkind)
|
|
161
|
+
end,
|
|
162
|
+
v_relkind;
|
|
163
|
+
end if;
|
|
164
|
+
|
|
165
|
+
-- Header
|
|
166
|
+
v_lines := array_append(v_lines, format('Table: %I.%I', v_schema, v_table));
|
|
167
|
+
v_lines := array_append(v_lines, format('Type: %s | relpages: %s | reltuples: %s',
|
|
168
|
+
case v_relkind
|
|
169
|
+
when 'r' then 'table'
|
|
170
|
+
when 'p' then 'partitioned table'
|
|
171
|
+
when 'v' then 'view'
|
|
172
|
+
when 'm' then 'materialized view'
|
|
173
|
+
when 'f' then 'foreign table'
|
|
174
|
+
end,
|
|
175
|
+
v_relpages,
|
|
176
|
+
case when v_reltuples < 0 then '-1' else v_reltuples::bigint::text end
|
|
177
|
+
));
|
|
178
|
+
|
|
179
|
+
-- Vacuum/analyze stats (only for tables and materialized views, not views)
|
|
180
|
+
if v_relkind in ('r', 'p', 'm', 'f') then
|
|
181
|
+
select
|
|
182
|
+
format('Vacuum: %s (auto: %s) | Analyze: %s (auto: %s)',
|
|
183
|
+
coalesce(to_char(last_vacuum at time zone 'UTC', 'YYYY-MM-DD HH24:MI:SS UTC'), 'never'),
|
|
184
|
+
coalesce(to_char(last_autovacuum at time zone 'UTC', 'YYYY-MM-DD HH24:MI:SS UTC'), 'never'),
|
|
185
|
+
coalesce(to_char(last_analyze at time zone 'UTC', 'YYYY-MM-DD HH24:MI:SS UTC'), 'never'),
|
|
186
|
+
coalesce(to_char(last_autoanalyze at time zone 'UTC', 'YYYY-MM-DD HH24:MI:SS UTC'), 'never')
|
|
187
|
+
)
|
|
188
|
+
into v_line
|
|
189
|
+
from pg_stat_all_tables
|
|
190
|
+
where relid = v_oid;
|
|
191
|
+
|
|
192
|
+
if v_line is not null then
|
|
193
|
+
v_lines := array_append(v_lines, v_line);
|
|
194
|
+
end if;
|
|
195
|
+
end if;
|
|
196
|
+
|
|
197
|
+
v_lines := array_append(v_lines, '');
|
|
198
|
+
|
|
199
|
+
-- Columns
|
|
200
|
+
v_lines := array_append(v_lines, 'Columns:');
|
|
201
|
+
for v_rec in
|
|
202
|
+
select
|
|
203
|
+
a.attname,
|
|
204
|
+
format_type(a.atttypid, a.atttypmod) as data_type,
|
|
205
|
+
a.attnotnull,
|
|
206
|
+
(select pg_get_expr(d.adbin, d.adrelid, true)
|
|
207
|
+
from pg_attrdef d
|
|
208
|
+
where d.adrelid = a.attrelid and d.adnum = a.attnum and a.atthasdef) as default_val,
|
|
209
|
+
a.attidentity,
|
|
210
|
+
a.attgenerated
|
|
211
|
+
from pg_attribute a
|
|
212
|
+
where a.attrelid = v_oid
|
|
213
|
+
and a.attnum > 0
|
|
214
|
+
and not a.attisdropped
|
|
215
|
+
order by a.attnum
|
|
216
|
+
loop
|
|
217
|
+
v_line := format(' %s %s', v_rec.attname, v_rec.data_type);
|
|
218
|
+
|
|
219
|
+
if v_rec.attnotnull then
|
|
220
|
+
v_line := v_line || ' NOT NULL';
|
|
221
|
+
end if;
|
|
222
|
+
|
|
223
|
+
if v_rec.attidentity = 'a' then
|
|
224
|
+
v_line := v_line || ' GENERATED ALWAYS AS IDENTITY';
|
|
225
|
+
elsif v_rec.attidentity = 'd' then
|
|
226
|
+
v_line := v_line || ' GENERATED BY DEFAULT AS IDENTITY';
|
|
227
|
+
elsif v_rec.attgenerated = 's' then
|
|
228
|
+
v_line := v_line || format(' GENERATED ALWAYS AS (%s) STORED', v_rec.default_val);
|
|
229
|
+
elsif v_rec.default_val is not null then
|
|
230
|
+
v_line := v_line || format(' DEFAULT %s', v_rec.default_val);
|
|
231
|
+
end if;
|
|
232
|
+
|
|
233
|
+
v_lines := array_append(v_lines, v_line);
|
|
234
|
+
end loop;
|
|
235
|
+
|
|
236
|
+
-- View definition (for views and materialized views)
|
|
237
|
+
if v_relkind in ('v', 'm') then
|
|
238
|
+
v_lines := array_append(v_lines, '');
|
|
239
|
+
v_lines := array_append(v_lines, 'Definition:');
|
|
240
|
+
v_line := pg_get_viewdef(v_oid, true);
|
|
241
|
+
if v_line is not null then
|
|
242
|
+
-- Indent the view definition
|
|
243
|
+
v_line := ' ' || replace(v_line, e'\n', e'\n ');
|
|
244
|
+
v_lines := array_append(v_lines, v_line);
|
|
245
|
+
end if;
|
|
246
|
+
end if;
|
|
247
|
+
|
|
248
|
+
-- Indexes (tables, partitioned tables, and materialized views can have indexes)
|
|
249
|
+
if v_relkind in ('r', 'p', 'm') then
|
|
250
|
+
v_lines := array_append(v_lines, '');
|
|
251
|
+
v_lines := array_append(v_lines, 'Indexes:');
|
|
252
|
+
for v_rec in
|
|
253
|
+
select
|
|
254
|
+
i.relname as index_name,
|
|
255
|
+
pg_get_indexdef(i.oid) as index_def,
|
|
256
|
+
ix.indisprimary,
|
|
257
|
+
ix.indisunique
|
|
258
|
+
from pg_index ix
|
|
259
|
+
join pg_class i on i.oid = ix.indexrelid
|
|
260
|
+
where ix.indrelid = v_oid
|
|
261
|
+
order by ix.indisprimary desc, ix.indisunique desc, i.relname
|
|
262
|
+
loop
|
|
263
|
+
v_line := ' ';
|
|
264
|
+
if v_rec.indisprimary then
|
|
265
|
+
v_line := v_line || 'PRIMARY KEY: ';
|
|
266
|
+
elsif v_rec.indisunique then
|
|
267
|
+
v_line := v_line || 'UNIQUE: ';
|
|
268
|
+
else
|
|
269
|
+
v_line := v_line || 'INDEX: ';
|
|
270
|
+
end if;
|
|
271
|
+
-- Extract just the column part from index definition
|
|
272
|
+
v_line := v_line || v_rec.index_name || ' ' ||
|
|
273
|
+
regexp_replace(v_rec.index_def, '^CREATE.*INDEX.*ON.*USING\s+\w+\s*', '');
|
|
274
|
+
v_lines := array_append(v_lines, v_line);
|
|
275
|
+
end loop;
|
|
276
|
+
|
|
277
|
+
if not exists (select 1 from pg_index where indrelid = v_oid) then
|
|
278
|
+
v_lines := array_append(v_lines, ' (none)');
|
|
279
|
+
end if;
|
|
280
|
+
end if;
|
|
281
|
+
|
|
282
|
+
-- Constraints (only tables can have constraints)
|
|
283
|
+
if v_relkind in ('r', 'p', 'f') then
|
|
284
|
+
v_lines := array_append(v_lines, '');
|
|
285
|
+
v_lines := array_append(v_lines, 'Constraints:');
|
|
286
|
+
v_constraint_count := 0;
|
|
287
|
+
|
|
288
|
+
for v_rec in
|
|
289
|
+
select
|
|
290
|
+
conname,
|
|
291
|
+
contype,
|
|
292
|
+
pg_get_constraintdef(oid, true) as condef
|
|
293
|
+
from pg_constraint
|
|
294
|
+
where conrelid = v_oid
|
|
295
|
+
and contype != 'p' -- skip primary key (shown with indexes)
|
|
296
|
+
order by
|
|
297
|
+
case contype when 'f' then 1 when 'u' then 2 when 'c' then 3 else 4 end,
|
|
298
|
+
conname
|
|
299
|
+
loop
|
|
300
|
+
v_constraint_count := v_constraint_count + 1;
|
|
301
|
+
v_line := ' ';
|
|
302
|
+
case v_rec.contype
|
|
303
|
+
when 'f' then v_line := v_line || 'FK: ';
|
|
304
|
+
when 'u' then v_line := v_line || 'UNIQUE: ';
|
|
305
|
+
when 'c' then v_line := v_line || 'CHECK: ';
|
|
306
|
+
else v_line := v_line || v_rec.contype || ': ';
|
|
307
|
+
end case;
|
|
308
|
+
v_line := v_line || v_rec.conname || ' ' || v_rec.condef;
|
|
309
|
+
v_lines := array_append(v_lines, v_line);
|
|
310
|
+
end loop;
|
|
311
|
+
|
|
312
|
+
if v_constraint_count = 0 then
|
|
313
|
+
v_lines := array_append(v_lines, ' (none)');
|
|
314
|
+
end if;
|
|
315
|
+
|
|
316
|
+
-- Foreign keys referencing this table
|
|
317
|
+
v_lines := array_append(v_lines, '');
|
|
318
|
+
v_lines := array_append(v_lines, 'Referenced by:');
|
|
319
|
+
v_constraint_count := 0;
|
|
320
|
+
|
|
321
|
+
for v_rec in
|
|
322
|
+
select
|
|
323
|
+
conname,
|
|
324
|
+
conrelid::regclass::text as from_table,
|
|
325
|
+
pg_get_constraintdef(oid, true) as condef
|
|
326
|
+
from pg_constraint
|
|
327
|
+
where confrelid = v_oid
|
|
328
|
+
and contype = 'f'
|
|
329
|
+
order by conrelid::regclass::text, conname
|
|
330
|
+
loop
|
|
331
|
+
v_constraint_count := v_constraint_count + 1;
|
|
332
|
+
v_lines := array_append(v_lines, format(' %s.%s %s',
|
|
333
|
+
v_rec.from_table, v_rec.conname, v_rec.condef));
|
|
334
|
+
end loop;
|
|
335
|
+
|
|
336
|
+
if v_constraint_count = 0 then
|
|
337
|
+
v_lines := array_append(v_lines, ' (none)');
|
|
338
|
+
end if;
|
|
339
|
+
end if;
|
|
340
|
+
|
|
341
|
+
-- Partition info (if partitioned table or partition)
|
|
342
|
+
if v_relkind = 'p' then
|
|
343
|
+
-- This is a partitioned table - show partition key and partitions
|
|
344
|
+
v_lines := array_append(v_lines, '');
|
|
345
|
+
v_lines := array_append(v_lines, 'Partitioning:');
|
|
346
|
+
|
|
347
|
+
select format(' %s BY %s',
|
|
348
|
+
case partstrat
|
|
349
|
+
when 'r' then 'RANGE'
|
|
350
|
+
when 'l' then 'LIST'
|
|
351
|
+
when 'h' then 'HASH'
|
|
352
|
+
else partstrat
|
|
353
|
+
end,
|
|
354
|
+
pg_get_partkeydef(v_oid)
|
|
355
|
+
)
|
|
356
|
+
into v_line
|
|
357
|
+
from pg_partitioned_table
|
|
358
|
+
where partrelid = v_oid;
|
|
359
|
+
|
|
360
|
+
if v_line is not null then
|
|
361
|
+
v_lines := array_append(v_lines, v_line);
|
|
362
|
+
end if;
|
|
363
|
+
|
|
364
|
+
-- List partitions
|
|
365
|
+
v_constraint_count := 0;
|
|
366
|
+
for v_rec in
|
|
367
|
+
select
|
|
368
|
+
c.oid::regclass::text as partition_name,
|
|
369
|
+
pg_get_expr(c.relpartbound, c.oid) as partition_bound,
|
|
370
|
+
c.relpages,
|
|
371
|
+
c.reltuples
|
|
372
|
+
from pg_inherits i
|
|
373
|
+
join pg_class c on c.oid = i.inhrelid
|
|
374
|
+
where i.inhparent = v_oid
|
|
375
|
+
order by c.oid::regclass::text
|
|
376
|
+
loop
|
|
377
|
+
v_constraint_count := v_constraint_count + 1;
|
|
378
|
+
v_lines := array_append(v_lines, format(' %s: %s (relpages: %s, reltuples: %s)',
|
|
379
|
+
v_rec.partition_name, v_rec.partition_bound,
|
|
380
|
+
v_rec.relpages,
|
|
381
|
+
case when v_rec.reltuples < 0 then '-1' else v_rec.reltuples::bigint::text end
|
|
382
|
+
));
|
|
383
|
+
end loop;
|
|
384
|
+
|
|
385
|
+
v_lines := array_append(v_lines, format(' Total partitions: %s', v_constraint_count));
|
|
386
|
+
|
|
387
|
+
elsif exists (select 1 from pg_inherits where inhrelid = v_oid) then
|
|
388
|
+
-- This is a partition - show parent and bound
|
|
389
|
+
v_lines := array_append(v_lines, '');
|
|
390
|
+
v_lines := array_append(v_lines, 'Partition of:');
|
|
391
|
+
|
|
392
|
+
select format(' %s FOR VALUES %s',
|
|
393
|
+
i.inhparent::regclass::text,
|
|
394
|
+
pg_get_expr(c.relpartbound, c.oid)
|
|
395
|
+
)
|
|
396
|
+
into v_line
|
|
397
|
+
from pg_inherits i
|
|
398
|
+
join pg_class c on c.oid = i.inhrelid
|
|
399
|
+
where i.inhrelid = v_oid;
|
|
400
|
+
|
|
401
|
+
if v_line is not null then
|
|
402
|
+
v_lines := array_append(v_lines, v_line);
|
|
403
|
+
end if;
|
|
404
|
+
end if;
|
|
405
|
+
|
|
406
|
+
result := array_to_string(v_lines, e'\n');
|
|
407
|
+
end;
|
|
408
|
+
$$;
|
|
409
|
+
|
|
410
|
+
comment on function postgres_ai.table_describe(text) is
|
|
411
|
+
'Returns comprehensive table information in compact text format for LLM analysis';
|
|
412
|
+
|
|
413
|
+
grant execute on function postgres_ai.table_describe(text) to {{ROLE_IDENT}};
|
|
414
|
+
|
|
415
|
+
|
|
@@ -0,0 +1,273 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Integration tests for checkup command (express mode)
|
|
3
|
+
* Validates that CLI-generated reports match JSON schemas used by the Python reporter.
|
|
4
|
+
* This ensures compatibility between "express" and "full" (monitoring) modes.
|
|
5
|
+
*/
|
|
6
|
+
import { describe, test, expect, afterAll, beforeAll } from "bun:test";
|
|
7
|
+
import * as fs from "fs";
|
|
8
|
+
import * as os from "os";
|
|
9
|
+
import * as path from "path";
|
|
10
|
+
import * as net from "net";
|
|
11
|
+
import { Client } from "pg";
|
|
12
|
+
import { resolve } from "path";
|
|
13
|
+
import { readFileSync } from "fs";
|
|
14
|
+
import Ajv2020 from "ajv/dist/2020";
|
|
15
|
+
|
|
16
|
+
import * as checkup from "../lib/checkup";
|
|
17
|
+
|
|
18
|
+
const ajv = new Ajv2020({ allErrors: true, strict: false });
|
|
19
|
+
const schemasDir = resolve(import.meta.dir, "../../reporter/schemas");
|
|
20
|
+
|
|
21
|
+
function findOnPath(cmd: string): string | null {
|
|
22
|
+
const result = Bun.spawnSync(["sh", "-c", `command -v ${cmd}`]);
|
|
23
|
+
if (result.exitCode === 0) {
|
|
24
|
+
return new TextDecoder().decode(result.stdout).trim();
|
|
25
|
+
}
|
|
26
|
+
return null;
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
function findPgBin(cmd: string): string | null {
|
|
30
|
+
const p = findOnPath(cmd);
|
|
31
|
+
if (p) return p;
|
|
32
|
+
const probe = Bun.spawnSync([
|
|
33
|
+
"sh",
|
|
34
|
+
"-c",
|
|
35
|
+
`ls -1 /usr/lib/postgresql/*/bin/${cmd} 2>/dev/null | head -n 1 || true`,
|
|
36
|
+
]);
|
|
37
|
+
const out = new TextDecoder().decode(probe.stdout).trim();
|
|
38
|
+
if (out) return out;
|
|
39
|
+
return null;
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
function havePostgresBinaries(): boolean {
|
|
43
|
+
return !!(findPgBin("initdb") && findPgBin("postgres"));
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
function isRunningAsRoot(): boolean {
|
|
47
|
+
return process.getuid?.() === 0;
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
async function getFreePort(): Promise<number> {
|
|
51
|
+
return new Promise((resolve, reject) => {
|
|
52
|
+
const srv = net.createServer();
|
|
53
|
+
srv.listen(0, "127.0.0.1", () => {
|
|
54
|
+
const addr = srv.address() as net.AddressInfo;
|
|
55
|
+
srv.close((err) => {
|
|
56
|
+
if (err) return reject(err);
|
|
57
|
+
resolve(addr.port);
|
|
58
|
+
});
|
|
59
|
+
});
|
|
60
|
+
srv.on("error", reject);
|
|
61
|
+
});
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
async function waitFor<T>(
|
|
65
|
+
fn: () => Promise<T>,
|
|
66
|
+
{ timeoutMs = 10000, intervalMs = 100 } = {}
|
|
67
|
+
): Promise<T> {
|
|
68
|
+
const start = Date.now();
|
|
69
|
+
while (true) {
|
|
70
|
+
try {
|
|
71
|
+
return await fn();
|
|
72
|
+
} catch (e) {
|
|
73
|
+
if (Date.now() - start > timeoutMs) throw e;
|
|
74
|
+
await new Promise((r) => setTimeout(r, intervalMs));
|
|
75
|
+
}
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
interface TempPostgres {
|
|
80
|
+
port: number;
|
|
81
|
+
socketDir: string;
|
|
82
|
+
cleanup: () => Promise<void>;
|
|
83
|
+
connect: (database?: string) => Promise<Client>;
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
async function createTempPostgres(): Promise<TempPostgres> {
|
|
87
|
+
const tmpRoot = fs.mkdtempSync(path.join(os.tmpdir(), "postgresai-checkup-"));
|
|
88
|
+
const dataDir = path.join(tmpRoot, "data");
|
|
89
|
+
const socketDir = path.join(tmpRoot, "sock");
|
|
90
|
+
fs.mkdirSync(socketDir, { recursive: true });
|
|
91
|
+
|
|
92
|
+
const initdb = findPgBin("initdb");
|
|
93
|
+
const postgresBin = findPgBin("postgres");
|
|
94
|
+
if (!initdb || !postgresBin) {
|
|
95
|
+
throw new Error("PostgreSQL binaries not found");
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
const init = Bun.spawnSync([initdb, "-D", dataDir, "-U", "postgres", "-A", "trust"]);
|
|
99
|
+
if (init.exitCode !== 0) {
|
|
100
|
+
throw new Error(new TextDecoder().decode(init.stderr) || new TextDecoder().decode(init.stdout));
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
const hbaPath = path.join(dataDir, "pg_hba.conf");
|
|
104
|
+
fs.appendFileSync(hbaPath, "\nlocal all all trust\n", "utf8");
|
|
105
|
+
|
|
106
|
+
const port = await getFreePort();
|
|
107
|
+
const postgresProc = Bun.spawn(
|
|
108
|
+
[postgresBin, "-D", dataDir, "-k", socketDir, "-h", "127.0.0.1", "-p", String(port)],
|
|
109
|
+
{ stdio: ["ignore", "pipe", "pipe"] }
|
|
110
|
+
);
|
|
111
|
+
|
|
112
|
+
const cleanup = async () => {
|
|
113
|
+
postgresProc.kill("SIGTERM");
|
|
114
|
+
try {
|
|
115
|
+
await waitFor(
|
|
116
|
+
async () => {
|
|
117
|
+
if (postgresProc.exitCode === null) throw new Error("still running");
|
|
118
|
+
},
|
|
119
|
+
{ timeoutMs: 5000, intervalMs: 100 }
|
|
120
|
+
);
|
|
121
|
+
} catch {
|
|
122
|
+
postgresProc.kill("SIGKILL");
|
|
123
|
+
}
|
|
124
|
+
fs.rmSync(tmpRoot, { recursive: true, force: true });
|
|
125
|
+
};
|
|
126
|
+
|
|
127
|
+
const connect = async (database = "postgres"): Promise<Client> => {
|
|
128
|
+
const c = new Client({ host: socketDir, port, user: "postgres", database });
|
|
129
|
+
await c.connect();
|
|
130
|
+
return c;
|
|
131
|
+
};
|
|
132
|
+
|
|
133
|
+
// Wait for Postgres to start
|
|
134
|
+
await waitFor(async () => {
|
|
135
|
+
const c = await connect();
|
|
136
|
+
await c.end();
|
|
137
|
+
});
|
|
138
|
+
|
|
139
|
+
return { port, socketDir, cleanup, connect };
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
function validateAgainstSchema(report: any, checkId: string): void {
|
|
143
|
+
const schemaPath = resolve(schemasDir, `${checkId}.schema.json`);
|
|
144
|
+
if (!fs.existsSync(schemaPath)) {
|
|
145
|
+
throw new Error(`Schema not found: ${schemaPath}`);
|
|
146
|
+
}
|
|
147
|
+
const schema = JSON.parse(readFileSync(schemaPath, "utf8"));
|
|
148
|
+
const validate = ajv.compile(schema);
|
|
149
|
+
const valid = validate(report);
|
|
150
|
+
if (!valid) {
|
|
151
|
+
const errors = validate.errors?.map(e => `${e.instancePath}: ${e.message}`).join(", ");
|
|
152
|
+
throw new Error(`${checkId} schema validation failed: ${errors}`);
|
|
153
|
+
}
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
// Skip tests if PostgreSQL binaries are not available
|
|
157
|
+
const skipReason = !havePostgresBinaries()
|
|
158
|
+
? "PostgreSQL binaries not available"
|
|
159
|
+
: isRunningAsRoot()
|
|
160
|
+
? "Cannot run as root (PostgreSQL refuses)"
|
|
161
|
+
: null;
|
|
162
|
+
|
|
163
|
+
// In CI, warn if integration tests are being skipped (helps catch configuration issues)
|
|
164
|
+
const isCI = process.env.CI === "true" || process.env.GITLAB_CI === "true";
|
|
165
|
+
if (skipReason && isCI) {
|
|
166
|
+
console.warn(`[CI WARNING] Integration tests skipped: ${skipReason}`);
|
|
167
|
+
console.warn("This may indicate a CI configuration issue - PostgreSQL binaries should be available.");
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
describe.skipIf(!!skipReason)("checkup integration: express mode schema compatibility", () => {
|
|
171
|
+
let pg: TempPostgres;
|
|
172
|
+
let client: Client;
|
|
173
|
+
|
|
174
|
+
beforeAll(async () => {
|
|
175
|
+
pg = await createTempPostgres();
|
|
176
|
+
client = await pg.connect();
|
|
177
|
+
});
|
|
178
|
+
|
|
179
|
+
afterAll(async () => {
|
|
180
|
+
if (client) await client.end();
|
|
181
|
+
if (pg) await pg.cleanup();
|
|
182
|
+
});
|
|
183
|
+
|
|
184
|
+
// Test all checks supported by express mode
|
|
185
|
+
const expressChecks = Object.keys(checkup.CHECK_INFO);
|
|
186
|
+
|
|
187
|
+
for (const checkId of expressChecks) {
|
|
188
|
+
test(`${checkId} report validates against shared schema`, async () => {
|
|
189
|
+
const generator = checkup.REPORT_GENERATORS[checkId];
|
|
190
|
+
expect(generator).toBeDefined();
|
|
191
|
+
|
|
192
|
+
const report = await generator(client, "test-node");
|
|
193
|
+
|
|
194
|
+
// Validate basic report structure (matching schema requirements)
|
|
195
|
+
expect(report).toHaveProperty("checkId", checkId);
|
|
196
|
+
expect(report).toHaveProperty("checkTitle");
|
|
197
|
+
expect(report).toHaveProperty("timestamptz");
|
|
198
|
+
expect(report).toHaveProperty("nodes");
|
|
199
|
+
expect(report).toHaveProperty("results");
|
|
200
|
+
expect(report.results).toHaveProperty("test-node");
|
|
201
|
+
|
|
202
|
+
// Validate against JSON schema (same schema used by Python reporter)
|
|
203
|
+
validateAgainstSchema(report, checkId);
|
|
204
|
+
});
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
test("generateAllReports produces valid reports for all checks", async () => {
|
|
208
|
+
const reports = await checkup.generateAllReports(client, "test-node");
|
|
209
|
+
|
|
210
|
+
expect(Object.keys(reports).length).toBe(expressChecks.length);
|
|
211
|
+
|
|
212
|
+
for (const [checkId, report] of Object.entries(reports)) {
|
|
213
|
+
validateAgainstSchema(report, checkId);
|
|
214
|
+
}
|
|
215
|
+
});
|
|
216
|
+
|
|
217
|
+
test("report structure matches Python reporter format", async () => {
|
|
218
|
+
// Generate A003 (settings) report and verify structure matches what Python produces
|
|
219
|
+
const report = await checkup.generateA003(client, "test-node");
|
|
220
|
+
|
|
221
|
+
// Check required fields match Python reporter output structure (per schema)
|
|
222
|
+
expect(report).toHaveProperty("checkId", "A003");
|
|
223
|
+
expect(report).toHaveProperty("checkTitle", "Postgres settings");
|
|
224
|
+
expect(report).toHaveProperty("timestamptz");
|
|
225
|
+
expect(report).toHaveProperty("nodes");
|
|
226
|
+
expect(report.nodes).toHaveProperty("primary");
|
|
227
|
+
expect(report.nodes).toHaveProperty("standbys");
|
|
228
|
+
expect(report).toHaveProperty("results");
|
|
229
|
+
|
|
230
|
+
// Results should have node-specific data
|
|
231
|
+
const nodeResult = report.results["test-node"];
|
|
232
|
+
expect(nodeResult).toHaveProperty("data");
|
|
233
|
+
|
|
234
|
+
// A003 should have settings as keyed object
|
|
235
|
+
expect(typeof nodeResult.data).toBe("object");
|
|
236
|
+
|
|
237
|
+
// Check postgres_version if present
|
|
238
|
+
if (nodeResult.postgres_version) {
|
|
239
|
+
expect(nodeResult.postgres_version).toHaveProperty("version");
|
|
240
|
+
expect(nodeResult.postgres_version).toHaveProperty("server_version_num");
|
|
241
|
+
expect(nodeResult.postgres_version).toHaveProperty("server_major_ver");
|
|
242
|
+
expect(nodeResult.postgres_version).toHaveProperty("server_minor_ver");
|
|
243
|
+
}
|
|
244
|
+
});
|
|
245
|
+
|
|
246
|
+
test("H001 (invalid indexes) has correct data structure", async () => {
|
|
247
|
+
const report = await checkup.generateH001(client, "test-node");
|
|
248
|
+
validateAgainstSchema(report, "H001");
|
|
249
|
+
|
|
250
|
+
const nodeResult = report.results["test-node"];
|
|
251
|
+
expect(nodeResult).toHaveProperty("data");
|
|
252
|
+
// data should be an object with indexes (may be empty on fresh DB)
|
|
253
|
+
expect(typeof nodeResult.data).toBe("object");
|
|
254
|
+
});
|
|
255
|
+
|
|
256
|
+
test("H002 (unused indexes) has correct data structure", async () => {
|
|
257
|
+
const report = await checkup.generateH002(client, "test-node");
|
|
258
|
+
validateAgainstSchema(report, "H002");
|
|
259
|
+
|
|
260
|
+
const nodeResult = report.results["test-node"];
|
|
261
|
+
expect(nodeResult).toHaveProperty("data");
|
|
262
|
+
expect(typeof nodeResult.data).toBe("object");
|
|
263
|
+
});
|
|
264
|
+
|
|
265
|
+
test("H004 (redundant indexes) has correct data structure", async () => {
|
|
266
|
+
const report = await checkup.generateH004(client, "test-node");
|
|
267
|
+
validateAgainstSchema(report, "H004");
|
|
268
|
+
|
|
269
|
+
const nodeResult = report.results["test-node"];
|
|
270
|
+
expect(nodeResult).toHaveProperty("data");
|
|
271
|
+
expect(typeof nodeResult.data).toBe("object");
|
|
272
|
+
});
|
|
273
|
+
});
|