postgresai 0.14.0-beta.12 → 0.14.0-beta.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +32 -0
- package/bin/postgres-ai.ts +928 -170
- package/dist/bin/postgres-ai.js +2095 -335
- package/lib/checkup.ts +69 -3
- package/lib/init.ts +76 -19
- package/lib/issues.ts +453 -7
- package/lib/mcp-server.ts +180 -3
- package/lib/metrics-embedded.ts +3 -3
- package/lib/supabase.ts +824 -0
- package/package.json +1 -1
- package/test/checkup.test.ts +240 -14
- package/test/config-consistency.test.ts +36 -0
- package/test/init.integration.test.ts +80 -71
- package/test/init.test.ts +266 -1
- package/test/issues.cli.test.ts +224 -0
- package/test/mcp-server.test.ts +551 -12
- package/test/supabase.test.ts +568 -0
- package/test/test-utils.ts +6 -0
package/package.json
CHANGED
package/test/checkup.test.ts
CHANGED
|
@@ -287,27 +287,28 @@ describe("Report generators with mock client", () => {
|
|
|
287
287
|
},
|
|
288
288
|
],
|
|
289
289
|
databaseSizesRows: [{ datname: "postgres", size_bytes: "1073741824" }],
|
|
290
|
-
dbStatsRows: [{
|
|
291
|
-
numbackends: 5,
|
|
292
|
-
xact_commit: 100,
|
|
293
|
-
xact_rollback: 1,
|
|
294
|
-
blks_read: 1000,
|
|
295
|
-
blks_hit: 9000,
|
|
296
|
-
tup_returned: 500,
|
|
297
|
-
tup_fetched: 400,
|
|
298
|
-
tup_inserted: 50,
|
|
299
|
-
tup_updated: 30,
|
|
300
|
-
tup_deleted: 10,
|
|
301
|
-
deadlocks: 0,
|
|
302
|
-
temp_files: 0,
|
|
290
|
+
dbStatsRows: [{
|
|
291
|
+
numbackends: 5,
|
|
292
|
+
xact_commit: 100,
|
|
293
|
+
xact_rollback: 1,
|
|
294
|
+
blks_read: 1000,
|
|
295
|
+
blks_hit: 9000,
|
|
296
|
+
tup_returned: 500,
|
|
297
|
+
tup_fetched: 400,
|
|
298
|
+
tup_inserted: 50,
|
|
299
|
+
tup_updated: 30,
|
|
300
|
+
tup_deleted: 10,
|
|
301
|
+
deadlocks: 0,
|
|
302
|
+
temp_files: 0,
|
|
303
303
|
temp_bytes: 0,
|
|
304
|
-
postmaster_uptime_s: 864000
|
|
304
|
+
postmaster_uptime_s: 864000
|
|
305
305
|
}],
|
|
306
306
|
connectionStatesRows: [{ state: "active", count: 2 }, { state: "idle", count: 3 }],
|
|
307
307
|
uptimeRows: [{ start_time: new Date("2024-01-01T00:00:00Z"), uptime: "10 days" }],
|
|
308
308
|
invalidIndexesRows: [],
|
|
309
309
|
unusedIndexesRows: [],
|
|
310
310
|
redundantIndexesRows: [],
|
|
311
|
+
sensitiveColumnsRows: [],
|
|
311
312
|
}
|
|
312
313
|
);
|
|
313
314
|
|
|
@@ -320,6 +321,7 @@ describe("Report generators with mock client", () => {
|
|
|
320
321
|
expect("H001" in reports).toBe(true);
|
|
321
322
|
expect("H002" in reports).toBe(true);
|
|
322
323
|
expect("H004" in reports).toBe(true);
|
|
324
|
+
// S001 is only available in Python reporter, not in CLI express mode
|
|
323
325
|
expect(reports.A002.checkId).toBe("A002");
|
|
324
326
|
expect(reports.A003.checkId).toBe("A003");
|
|
325
327
|
expect(reports.A004.checkId).toBe("A004");
|
|
@@ -525,9 +527,233 @@ describe("H001 - Invalid indexes", () => {
|
|
|
525
527
|
expect(dbData.database_size_pretty).toBeTruthy();
|
|
526
528
|
expect(report.results["test-node"].postgres_version).toBeTruthy();
|
|
527
529
|
});
|
|
530
|
+
|
|
531
|
+
test("getInvalidIndexes returns decision tree fields including valid_duplicate_definition", async () => {
|
|
532
|
+
const mockClient = createMockClient({
|
|
533
|
+
invalidIndexesRows: [
|
|
534
|
+
{
|
|
535
|
+
schema_name: "public",
|
|
536
|
+
table_name: "users",
|
|
537
|
+
index_name: "users_email_idx_invalid",
|
|
538
|
+
relation_name: "users",
|
|
539
|
+
index_size_bytes: "1048576",
|
|
540
|
+
index_definition: "CREATE INDEX users_email_idx_invalid ON public.users USING btree (email)",
|
|
541
|
+
supports_fk: false,
|
|
542
|
+
is_pk: false,
|
|
543
|
+
is_unique: false,
|
|
544
|
+
constraint_name: null,
|
|
545
|
+
table_row_estimate: "5000",
|
|
546
|
+
has_valid_duplicate: true,
|
|
547
|
+
valid_index_name: "users_email_idx",
|
|
548
|
+
valid_index_definition: "CREATE INDEX users_email_idx ON public.users USING btree (email)",
|
|
549
|
+
},
|
|
550
|
+
],
|
|
551
|
+
});
|
|
552
|
+
|
|
553
|
+
const indexes = await checkup.getInvalidIndexes(mockClient as any);
|
|
554
|
+
expect(indexes.length).toBe(1);
|
|
555
|
+
expect(indexes[0].is_pk).toBe(false);
|
|
556
|
+
expect(indexes[0].is_unique).toBe(false);
|
|
557
|
+
expect(indexes[0].constraint_name).toBeNull();
|
|
558
|
+
expect(indexes[0].table_row_estimate).toBe(5000);
|
|
559
|
+
expect(indexes[0].has_valid_duplicate).toBe(true);
|
|
560
|
+
expect(indexes[0].valid_duplicate_name).toBe("users_email_idx");
|
|
561
|
+
expect(indexes[0].valid_duplicate_definition).toBe("CREATE INDEX users_email_idx ON public.users USING btree (email)");
|
|
562
|
+
});
|
|
563
|
+
|
|
564
|
+
test("getInvalidIndexes handles has_valid_duplicate: false with null values", async () => {
|
|
565
|
+
const mockClient = createMockClient({
|
|
566
|
+
invalidIndexesRows: [
|
|
567
|
+
{
|
|
568
|
+
schema_name: "public",
|
|
569
|
+
table_name: "orders",
|
|
570
|
+
index_name: "orders_status_idx_invalid",
|
|
571
|
+
relation_name: "orders",
|
|
572
|
+
index_size_bytes: "524288",
|
|
573
|
+
index_definition: "CREATE INDEX orders_status_idx_invalid ON public.orders USING btree (status)",
|
|
574
|
+
supports_fk: false,
|
|
575
|
+
is_pk: false,
|
|
576
|
+
is_unique: false,
|
|
577
|
+
constraint_name: null,
|
|
578
|
+
table_row_estimate: "100000",
|
|
579
|
+
has_valid_duplicate: false,
|
|
580
|
+
valid_index_name: null,
|
|
581
|
+
valid_index_definition: null,
|
|
582
|
+
},
|
|
583
|
+
],
|
|
584
|
+
});
|
|
585
|
+
|
|
586
|
+
const indexes = await checkup.getInvalidIndexes(mockClient as Client);
|
|
587
|
+
expect(indexes.length).toBe(1);
|
|
588
|
+
expect(indexes[0].has_valid_duplicate).toBe(false);
|
|
589
|
+
expect(indexes[0].valid_duplicate_name).toBeNull();
|
|
590
|
+
expect(indexes[0].valid_duplicate_definition).toBeNull();
|
|
591
|
+
});
|
|
592
|
+
|
|
593
|
+
test("getInvalidIndexes handles is_pk: true with constraint", async () => {
|
|
594
|
+
const mockClient = createMockClient({
|
|
595
|
+
invalidIndexesRows: [
|
|
596
|
+
{
|
|
597
|
+
schema_name: "public",
|
|
598
|
+
table_name: "accounts",
|
|
599
|
+
index_name: "accounts_pkey_invalid",
|
|
600
|
+
relation_name: "accounts",
|
|
601
|
+
index_size_bytes: "262144",
|
|
602
|
+
index_definition: "CREATE UNIQUE INDEX accounts_pkey_invalid ON public.accounts USING btree (id)",
|
|
603
|
+
supports_fk: true,
|
|
604
|
+
is_pk: true,
|
|
605
|
+
is_unique: true,
|
|
606
|
+
constraint_name: "accounts_pkey",
|
|
607
|
+
table_row_estimate: "500",
|
|
608
|
+
has_valid_duplicate: false,
|
|
609
|
+
valid_index_name: null,
|
|
610
|
+
valid_index_definition: null,
|
|
611
|
+
},
|
|
612
|
+
],
|
|
613
|
+
});
|
|
614
|
+
|
|
615
|
+
const indexes = await checkup.getInvalidIndexes(mockClient as Client);
|
|
616
|
+
expect(indexes.length).toBe(1);
|
|
617
|
+
expect(indexes[0].is_pk).toBe(true);
|
|
618
|
+
expect(indexes[0].is_unique).toBe(true);
|
|
619
|
+
expect(indexes[0].constraint_name).toBe("accounts_pkey");
|
|
620
|
+
expect(indexes[0].supports_fk).toBe(true);
|
|
621
|
+
});
|
|
622
|
+
|
|
623
|
+
test("getInvalidIndexes handles is_unique: true without PK", async () => {
|
|
624
|
+
const mockClient = createMockClient({
|
|
625
|
+
invalidIndexesRows: [
|
|
626
|
+
{
|
|
627
|
+
schema_name: "public",
|
|
628
|
+
table_name: "users",
|
|
629
|
+
index_name: "users_email_unique_invalid",
|
|
630
|
+
relation_name: "users",
|
|
631
|
+
index_size_bytes: "131072",
|
|
632
|
+
index_definition: "CREATE UNIQUE INDEX users_email_unique_invalid ON public.users USING btree (email)",
|
|
633
|
+
supports_fk: false,
|
|
634
|
+
is_pk: false,
|
|
635
|
+
is_unique: true,
|
|
636
|
+
constraint_name: "users_email_unique",
|
|
637
|
+
table_row_estimate: "25000",
|
|
638
|
+
has_valid_duplicate: true,
|
|
639
|
+
valid_index_name: "users_email_unique_idx",
|
|
640
|
+
valid_index_definition: "CREATE UNIQUE INDEX users_email_unique_idx ON public.users USING btree (email)",
|
|
641
|
+
},
|
|
642
|
+
],
|
|
643
|
+
});
|
|
644
|
+
|
|
645
|
+
const indexes = await checkup.getInvalidIndexes(mockClient as Client);
|
|
646
|
+
expect(indexes.length).toBe(1);
|
|
647
|
+
expect(indexes[0].is_pk).toBe(false);
|
|
648
|
+
expect(indexes[0].is_unique).toBe(true);
|
|
649
|
+
expect(indexes[0].constraint_name).toBe("users_email_unique");
|
|
650
|
+
expect(indexes[0].has_valid_duplicate).toBe(true);
|
|
651
|
+
});
|
|
528
652
|
// Top-level structure tests removed - covered by schema-validation.test.ts
|
|
529
653
|
});
|
|
530
654
|
|
|
655
|
+
// Tests for H001 decision tree recommendation logic
|
|
656
|
+
describe("H001 - Decision tree recommendations", () => {
|
|
657
|
+
// Helper to create a minimal InvalidIndex for testing
|
|
658
|
+
const createTestIndex = (overrides: Partial<checkup.InvalidIndex> = {}): checkup.InvalidIndex => ({
|
|
659
|
+
schema_name: "public",
|
|
660
|
+
table_name: "test_table",
|
|
661
|
+
index_name: "test_idx",
|
|
662
|
+
relation_name: "public.test_table",
|
|
663
|
+
index_size_bytes: 1024,
|
|
664
|
+
index_size_pretty: "1 KiB",
|
|
665
|
+
index_definition: "CREATE INDEX test_idx ON public.test_table USING btree (col)",
|
|
666
|
+
supports_fk: false,
|
|
667
|
+
is_pk: false,
|
|
668
|
+
is_unique: false,
|
|
669
|
+
constraint_name: null,
|
|
670
|
+
table_row_estimate: 100000, // Large table by default
|
|
671
|
+
has_valid_duplicate: false,
|
|
672
|
+
valid_duplicate_name: null,
|
|
673
|
+
valid_duplicate_definition: null,
|
|
674
|
+
...overrides,
|
|
675
|
+
});
|
|
676
|
+
|
|
677
|
+
test("returns DROP when has_valid_duplicate is true", () => {
|
|
678
|
+
const index = createTestIndex({ has_valid_duplicate: true, valid_duplicate_name: "existing_idx" });
|
|
679
|
+
expect(checkup.getInvalidIndexRecommendation(index)).toBe("DROP");
|
|
680
|
+
});
|
|
681
|
+
|
|
682
|
+
test("returns DROP even when is_pk is true if has_valid_duplicate is true", () => {
|
|
683
|
+
// has_valid_duplicate takes precedence over is_pk
|
|
684
|
+
const index = createTestIndex({
|
|
685
|
+
has_valid_duplicate: true,
|
|
686
|
+
is_pk: true,
|
|
687
|
+
is_unique: true,
|
|
688
|
+
});
|
|
689
|
+
expect(checkup.getInvalidIndexRecommendation(index)).toBe("DROP");
|
|
690
|
+
});
|
|
691
|
+
|
|
692
|
+
test("returns RECREATE when is_pk is true and no valid duplicate", () => {
|
|
693
|
+
const index = createTestIndex({
|
|
694
|
+
is_pk: true,
|
|
695
|
+
is_unique: true,
|
|
696
|
+
constraint_name: "test_pkey",
|
|
697
|
+
});
|
|
698
|
+
expect(checkup.getInvalidIndexRecommendation(index)).toBe("RECREATE");
|
|
699
|
+
});
|
|
700
|
+
|
|
701
|
+
test("returns RECREATE when is_unique is true (non-PK) and no valid duplicate", () => {
|
|
702
|
+
const index = createTestIndex({
|
|
703
|
+
is_unique: true,
|
|
704
|
+
constraint_name: "test_unique",
|
|
705
|
+
});
|
|
706
|
+
expect(checkup.getInvalidIndexRecommendation(index)).toBe("RECREATE");
|
|
707
|
+
});
|
|
708
|
+
|
|
709
|
+
test("returns RECREATE for small table (< 10K rows) without valid duplicate", () => {
|
|
710
|
+
const index = createTestIndex({ table_row_estimate: 5000 });
|
|
711
|
+
expect(checkup.getInvalidIndexRecommendation(index)).toBe("RECREATE");
|
|
712
|
+
});
|
|
713
|
+
|
|
714
|
+
test("returns RECREATE for table at threshold boundary (9999 rows)", () => {
|
|
715
|
+
const index = createTestIndex({ table_row_estimate: 9999 });
|
|
716
|
+
expect(checkup.getInvalidIndexRecommendation(index)).toBe("RECREATE");
|
|
717
|
+
});
|
|
718
|
+
|
|
719
|
+
test("returns UNCERTAIN for large table (>= 10K rows) at threshold boundary", () => {
|
|
720
|
+
const index = createTestIndex({ table_row_estimate: 10000 });
|
|
721
|
+
expect(checkup.getInvalidIndexRecommendation(index)).toBe("UNCERTAIN");
|
|
722
|
+
});
|
|
723
|
+
|
|
724
|
+
test("returns UNCERTAIN for large table without valid duplicate or constraint", () => {
|
|
725
|
+
const index = createTestIndex({ table_row_estimate: 1000000 });
|
|
726
|
+
expect(checkup.getInvalidIndexRecommendation(index)).toBe("UNCERTAIN");
|
|
727
|
+
});
|
|
728
|
+
|
|
729
|
+
test("returns UNCERTAIN for empty table (0 rows) with no valid duplicate - edge case", () => {
|
|
730
|
+
// Empty table should be RECREATE (< 10K threshold)
|
|
731
|
+
const index = createTestIndex({ table_row_estimate: 0 });
|
|
732
|
+
expect(checkup.getInvalidIndexRecommendation(index)).toBe("RECREATE");
|
|
733
|
+
});
|
|
734
|
+
|
|
735
|
+
test("decision tree priority: has_valid_duplicate > is_pk > small_table", () => {
|
|
736
|
+
// Even with PK and small table, has_valid_duplicate should win
|
|
737
|
+
const index = createTestIndex({
|
|
738
|
+
has_valid_duplicate: true,
|
|
739
|
+
is_pk: true,
|
|
740
|
+
is_unique: true,
|
|
741
|
+
table_row_estimate: 100,
|
|
742
|
+
});
|
|
743
|
+
expect(checkup.getInvalidIndexRecommendation(index)).toBe("DROP");
|
|
744
|
+
});
|
|
745
|
+
|
|
746
|
+
test("decision tree priority: is_pk > small_table", () => {
|
|
747
|
+
// is_pk should return RECREATE regardless of table size
|
|
748
|
+
const index = createTestIndex({
|
|
749
|
+
is_pk: true,
|
|
750
|
+
is_unique: true,
|
|
751
|
+
table_row_estimate: 1000000, // Large table
|
|
752
|
+
});
|
|
753
|
+
expect(checkup.getInvalidIndexRecommendation(index)).toBe("RECREATE");
|
|
754
|
+
});
|
|
755
|
+
});
|
|
756
|
+
|
|
531
757
|
// Tests for H002 (Unused indexes)
|
|
532
758
|
describe("H002 - Unused indexes", () => {
|
|
533
759
|
test("getUnusedIndexes returns unused indexes", async () => {
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Tests that config files are consistent with what the CLI expects.
|
|
3
|
+
* Catches schema mismatches like pg_statistic in wrong schema.
|
|
4
|
+
*/
|
|
5
|
+
import { describe, test, expect } from "bun:test";
|
|
6
|
+
import { readFileSync } from "fs";
|
|
7
|
+
import { resolve } from "path";
|
|
8
|
+
|
|
9
|
+
const configDir = resolve(import.meta.dir, "../../config");
|
|
10
|
+
|
|
11
|
+
describe("Config consistency", () => {
|
|
12
|
+
test("target-db/init.sql creates pg_statistic in postgres_ai schema", () => {
|
|
13
|
+
const initSql = readFileSync(resolve(configDir, "target-db/init.sql"), "utf8");
|
|
14
|
+
|
|
15
|
+
// Must create postgres_ai schema
|
|
16
|
+
expect(initSql).toMatch(/create\s+schema\s+if\s+not\s+exists\s+postgres_ai/i);
|
|
17
|
+
|
|
18
|
+
// Must create view in postgres_ai schema, not public
|
|
19
|
+
expect(initSql).toMatch(/create\s+or\s+replace\s+view\s+postgres_ai\.pg_statistic/i);
|
|
20
|
+
expect(initSql).not.toMatch(/create\s+or\s+replace\s+view\s+public\.pg_statistic/i);
|
|
21
|
+
|
|
22
|
+
// Must grant on postgres_ai.pg_statistic
|
|
23
|
+
expect(initSql).toMatch(/grant\s+select\s+on\s+postgres_ai\.pg_statistic/i);
|
|
24
|
+
});
|
|
25
|
+
|
|
26
|
+
test("pgwatch metrics.yml uses postgres_ai.pg_statistic", () => {
|
|
27
|
+
const metricsYml = readFileSync(
|
|
28
|
+
resolve(configDir, "pgwatch-prometheus/metrics.yml"),
|
|
29
|
+
"utf8"
|
|
30
|
+
);
|
|
31
|
+
|
|
32
|
+
// Should reference postgres_ai.pg_statistic, not public.pg_statistic
|
|
33
|
+
expect(metricsYml).not.toMatch(/public\.pg_statistic/);
|
|
34
|
+
expect(metricsYml).toMatch(/postgres_ai\.pg_statistic/);
|
|
35
|
+
});
|
|
36
|
+
});
|
|
@@ -241,70 +241,76 @@ describe.skipIf(skipTests)("integration: prepare-db", () => {
|
|
|
241
241
|
}
|
|
242
242
|
});
|
|
243
243
|
|
|
244
|
-
test(
|
|
245
|
-
|
|
244
|
+
test(
|
|
245
|
+
"fixes slightly-off permissions idempotently",
|
|
246
|
+
async () => {
|
|
247
|
+
pg = await createTempPostgres();
|
|
246
248
|
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
249
|
+
try {
|
|
250
|
+
// Create monitoring role with wrong password, no grants.
|
|
251
|
+
{
|
|
252
|
+
const c = new Client({ connectionString: pg.adminUri });
|
|
253
|
+
await c.connect();
|
|
254
|
+
await c.query(
|
|
255
|
+
"do $$ begin if not exists (select 1 from pg_roles where rolname='postgres_ai_mon') then create role postgres_ai_mon login password 'wrong'; end if; end $$;"
|
|
256
|
+
);
|
|
257
|
+
await c.end();
|
|
258
|
+
}
|
|
257
259
|
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
260
|
+
// Run init (should grant everything).
|
|
261
|
+
{
|
|
262
|
+
const r = runCliInit([pg.adminUri, "--password", "correctpw", "--skip-optional-permissions"]);
|
|
263
|
+
expect(r.status).toBe(0);
|
|
264
|
+
}
|
|
263
265
|
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
266
|
+
// Verify privileges.
|
|
267
|
+
{
|
|
268
|
+
const c = new Client({ connectionString: pg.adminUri });
|
|
269
|
+
await c.connect();
|
|
270
|
+
const dbOk = await c.query(
|
|
271
|
+
"select has_database_privilege('postgres_ai_mon', current_database(), 'CONNECT') as ok"
|
|
272
|
+
);
|
|
273
|
+
expect(dbOk.rows[0].ok).toBe(true);
|
|
274
|
+
const roleOk = await c.query("select pg_has_role('postgres_ai_mon', 'pg_monitor', 'member') as ok");
|
|
275
|
+
expect(roleOk.rows[0].ok).toBe(true);
|
|
276
|
+
const idxOk = await c.query(
|
|
277
|
+
"select has_table_privilege('postgres_ai_mon', 'pg_catalog.pg_index', 'SELECT') as ok"
|
|
278
|
+
);
|
|
279
|
+
expect(idxOk.rows[0].ok).toBe(true);
|
|
280
|
+
const viewOk = await c.query(
|
|
281
|
+
"select has_table_privilege('postgres_ai_mon', 'postgres_ai.pg_statistic', 'SELECT') as ok"
|
|
282
|
+
);
|
|
283
|
+
expect(viewOk.rows[0].ok).toBe(true);
|
|
284
|
+
const sp = await c.query("select rolconfig from pg_roles where rolname='postgres_ai_mon'");
|
|
285
|
+
expect(Array.isArray(sp.rows[0].rolconfig)).toBe(true);
|
|
286
|
+
expect(sp.rows[0].rolconfig.some((v: string) => String(v).includes("search_path="))).toBe(true);
|
|
287
|
+
await c.end();
|
|
288
|
+
}
|
|
287
289
|
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
290
|
+
// Run init again (idempotent).
|
|
291
|
+
{
|
|
292
|
+
const r = runCliInit([pg.adminUri, "--password", "correctpw", "--skip-optional-permissions"]);
|
|
293
|
+
expect(r.status).toBe(0);
|
|
294
|
+
}
|
|
295
|
+
} finally {
|
|
296
|
+
await pg.cleanup();
|
|
292
297
|
}
|
|
293
|
-
}
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
});
|
|
298
|
+
},
|
|
299
|
+
{ timeout: 15000 }
|
|
300
|
+
);
|
|
297
301
|
|
|
298
|
-
test(
|
|
299
|
-
|
|
302
|
+
test(
|
|
303
|
+
"reports nicely when lacking permissions",
|
|
304
|
+
async () => {
|
|
305
|
+
pg = await createTempPostgres();
|
|
300
306
|
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
307
|
+
try {
|
|
308
|
+
// Create limited user that can connect but cannot create roles / grant.
|
|
309
|
+
const limitedPw = "limitedpw";
|
|
310
|
+
{
|
|
311
|
+
const c = new Client({ connectionString: pg.adminUri });
|
|
312
|
+
await c.connect();
|
|
313
|
+
await c.query(`do $$ begin
|
|
308
314
|
if not exists (select 1 from pg_roles where rolname='limited') then
|
|
309
315
|
begin
|
|
310
316
|
create role limited login password ${sqlLiteral(limitedPw)};
|
|
@@ -313,20 +319,22 @@ describe.skipIf(skipTests)("integration: prepare-db", () => {
|
|
|
313
319
|
end;
|
|
314
320
|
end if;
|
|
315
321
|
end $$;`);
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
322
|
+
await c.query("grant connect on database testdb to limited");
|
|
323
|
+
await c.end();
|
|
324
|
+
}
|
|
319
325
|
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
326
|
+
const limitedUri = `postgresql://limited:${limitedPw}@127.0.0.1:${pg.port}/testdb`;
|
|
327
|
+
const r = runCliInit([limitedUri, "--password", "monpw", "--skip-optional-permissions"]);
|
|
328
|
+
expect(r.status).not.toBe(0);
|
|
329
|
+
expect(r.stderr).toMatch(/Error: prepare-db:/);
|
|
330
|
+
expect(r.stderr).toMatch(/Failed at step "/);
|
|
331
|
+
expect(r.stderr).toMatch(/Fix: connect as a superuser/i);
|
|
332
|
+
} finally {
|
|
333
|
+
await pg.cleanup();
|
|
334
|
+
}
|
|
335
|
+
},
|
|
336
|
+
{ timeout: 15000 }
|
|
337
|
+
);
|
|
330
338
|
|
|
331
339
|
test(
|
|
332
340
|
"--verify returns 0 when ok and non-zero when missing",
|
|
@@ -399,6 +407,7 @@ describe.skipIf(skipTests)("integration: prepare-db", () => {
|
|
|
399
407
|
}
|
|
400
408
|
});
|
|
401
409
|
|
|
410
|
+
// 60s timeout for PostgreSQL startup + multiple SQL queries in slow CI
|
|
402
411
|
test("explain_generic validates input and prevents SQL injection", async () => {
|
|
403
412
|
pg = await createTempPostgres();
|
|
404
413
|
|
|
@@ -495,5 +504,5 @@ describe.skipIf(skipTests)("integration: prepare-db", () => {
|
|
|
495
504
|
} finally {
|
|
496
505
|
await pg.cleanup();
|
|
497
506
|
}
|
|
498
|
-
});
|
|
507
|
+
}, { timeout: 60000 });
|
|
499
508
|
});
|