@neverinfamous/postgres-mcp 1.2.0 → 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (293) hide show
  1. package/README.md +202 -148
  2. package/dist/__tests__/benchmarks/codemode.bench.d.ts +10 -0
  3. package/dist/__tests__/benchmarks/codemode.bench.d.ts.map +1 -0
  4. package/dist/__tests__/benchmarks/codemode.bench.js +159 -0
  5. package/dist/__tests__/benchmarks/codemode.bench.js.map +1 -0
  6. package/dist/__tests__/benchmarks/connection-pool.bench.d.ts +10 -0
  7. package/dist/__tests__/benchmarks/connection-pool.bench.d.ts.map +1 -0
  8. package/dist/__tests__/benchmarks/connection-pool.bench.js +123 -0
  9. package/dist/__tests__/benchmarks/connection-pool.bench.js.map +1 -0
  10. package/dist/__tests__/benchmarks/handler-dispatch.bench.d.ts +11 -0
  11. package/dist/__tests__/benchmarks/handler-dispatch.bench.d.ts.map +1 -0
  12. package/dist/__tests__/benchmarks/handler-dispatch.bench.js +199 -0
  13. package/dist/__tests__/benchmarks/handler-dispatch.bench.js.map +1 -0
  14. package/dist/__tests__/benchmarks/logger-sanitization.bench.d.ts +15 -0
  15. package/dist/__tests__/benchmarks/logger-sanitization.bench.d.ts.map +1 -0
  16. package/dist/__tests__/benchmarks/logger-sanitization.bench.js +155 -0
  17. package/dist/__tests__/benchmarks/logger-sanitization.bench.js.map +1 -0
  18. package/dist/__tests__/benchmarks/resource-prompts.bench.d.ts +10 -0
  19. package/dist/__tests__/benchmarks/resource-prompts.bench.d.ts.map +1 -0
  20. package/dist/__tests__/benchmarks/resource-prompts.bench.js +181 -0
  21. package/dist/__tests__/benchmarks/resource-prompts.bench.js.map +1 -0
  22. package/dist/__tests__/benchmarks/schema-parsing.bench.d.ts +11 -0
  23. package/dist/__tests__/benchmarks/schema-parsing.bench.d.ts.map +1 -0
  24. package/dist/__tests__/benchmarks/schema-parsing.bench.js +209 -0
  25. package/dist/__tests__/benchmarks/schema-parsing.bench.js.map +1 -0
  26. package/dist/__tests__/benchmarks/tool-filtering.bench.d.ts +9 -0
  27. package/dist/__tests__/benchmarks/tool-filtering.bench.d.ts.map +1 -0
  28. package/dist/__tests__/benchmarks/tool-filtering.bench.js +83 -0
  29. package/dist/__tests__/benchmarks/tool-filtering.bench.js.map +1 -0
  30. package/dist/__tests__/benchmarks/transport-auth.bench.d.ts +10 -0
  31. package/dist/__tests__/benchmarks/transport-auth.bench.d.ts.map +1 -0
  32. package/dist/__tests__/benchmarks/transport-auth.bench.js +128 -0
  33. package/dist/__tests__/benchmarks/transport-auth.bench.js.map +1 -0
  34. package/dist/__tests__/benchmarks/utilities.bench.d.ts +10 -0
  35. package/dist/__tests__/benchmarks/utilities.bench.d.ts.map +1 -0
  36. package/dist/__tests__/benchmarks/utilities.bench.js +164 -0
  37. package/dist/__tests__/benchmarks/utilities.bench.js.map +1 -0
  38. package/dist/adapters/DatabaseAdapter.d.ts.map +1 -1
  39. package/dist/adapters/DatabaseAdapter.js +12 -0
  40. package/dist/adapters/DatabaseAdapter.js.map +1 -1
  41. package/dist/adapters/postgresql/PostgresAdapter.d.ts.map +1 -1
  42. package/dist/adapters/postgresql/PostgresAdapter.js +56 -3
  43. package/dist/adapters/postgresql/PostgresAdapter.js.map +1 -1
  44. package/dist/adapters/postgresql/prompts/ltree.js +2 -2
  45. package/dist/adapters/postgresql/prompts/ltree.js.map +1 -1
  46. package/dist/adapters/postgresql/schemas/admin.d.ts +10 -5
  47. package/dist/adapters/postgresql/schemas/admin.d.ts.map +1 -1
  48. package/dist/adapters/postgresql/schemas/admin.js +10 -5
  49. package/dist/adapters/postgresql/schemas/admin.js.map +1 -1
  50. package/dist/adapters/postgresql/schemas/backup.d.ts +45 -27
  51. package/dist/adapters/postgresql/schemas/backup.d.ts.map +1 -1
  52. package/dist/adapters/postgresql/schemas/backup.js +64 -26
  53. package/dist/adapters/postgresql/schemas/backup.js.map +1 -1
  54. package/dist/adapters/postgresql/schemas/core.d.ts +53 -19
  55. package/dist/adapters/postgresql/schemas/core.d.ts.map +1 -1
  56. package/dist/adapters/postgresql/schemas/core.js +61 -17
  57. package/dist/adapters/postgresql/schemas/core.js.map +1 -1
  58. package/dist/adapters/postgresql/schemas/cron.d.ts +51 -32
  59. package/dist/adapters/postgresql/schemas/cron.d.ts.map +1 -1
  60. package/dist/adapters/postgresql/schemas/cron.js +64 -44
  61. package/dist/adapters/postgresql/schemas/cron.js.map +1 -1
  62. package/dist/adapters/postgresql/schemas/extensions.d.ts +224 -110
  63. package/dist/adapters/postgresql/schemas/extensions.d.ts.map +1 -1
  64. package/dist/adapters/postgresql/schemas/extensions.js +245 -96
  65. package/dist/adapters/postgresql/schemas/extensions.js.map +1 -1
  66. package/dist/adapters/postgresql/schemas/index.d.ts +7 -6
  67. package/dist/adapters/postgresql/schemas/index.d.ts.map +1 -1
  68. package/dist/adapters/postgresql/schemas/index.js +16 -8
  69. package/dist/adapters/postgresql/schemas/index.js.map +1 -1
  70. package/dist/adapters/postgresql/schemas/introspection.d.ts +445 -0
  71. package/dist/adapters/postgresql/schemas/introspection.d.ts.map +1 -0
  72. package/dist/adapters/postgresql/schemas/introspection.js +478 -0
  73. package/dist/adapters/postgresql/schemas/introspection.js.map +1 -0
  74. package/dist/adapters/postgresql/schemas/jsonb.d.ts +102 -42
  75. package/dist/adapters/postgresql/schemas/jsonb.d.ts.map +1 -1
  76. package/dist/adapters/postgresql/schemas/jsonb.js +125 -30
  77. package/dist/adapters/postgresql/schemas/jsonb.js.map +1 -1
  78. package/dist/adapters/postgresql/schemas/monitoring.d.ts +69 -36
  79. package/dist/adapters/postgresql/schemas/monitoring.d.ts.map +1 -1
  80. package/dist/adapters/postgresql/schemas/monitoring.js +98 -40
  81. package/dist/adapters/postgresql/schemas/monitoring.js.map +1 -1
  82. package/dist/adapters/postgresql/schemas/partitioning.d.ts +21 -24
  83. package/dist/adapters/postgresql/schemas/partitioning.d.ts.map +1 -1
  84. package/dist/adapters/postgresql/schemas/partitioning.js +26 -14
  85. package/dist/adapters/postgresql/schemas/partitioning.js.map +1 -1
  86. package/dist/adapters/postgresql/schemas/partman.d.ts +69 -0
  87. package/dist/adapters/postgresql/schemas/partman.d.ts.map +1 -1
  88. package/dist/adapters/postgresql/schemas/partman.js +46 -33
  89. package/dist/adapters/postgresql/schemas/partman.js.map +1 -1
  90. package/dist/adapters/postgresql/schemas/performance.d.ts +97 -49
  91. package/dist/adapters/postgresql/schemas/performance.d.ts.map +1 -1
  92. package/dist/adapters/postgresql/schemas/performance.js +139 -34
  93. package/dist/adapters/postgresql/schemas/performance.js.map +1 -1
  94. package/dist/adapters/postgresql/schemas/postgis.d.ts +20 -0
  95. package/dist/adapters/postgresql/schemas/postgis.d.ts.map +1 -1
  96. package/dist/adapters/postgresql/schemas/postgis.js +40 -0
  97. package/dist/adapters/postgresql/schemas/postgis.js.map +1 -1
  98. package/dist/adapters/postgresql/schemas/schema-mgmt.d.ts +50 -30
  99. package/dist/adapters/postgresql/schemas/schema-mgmt.d.ts.map +1 -1
  100. package/dist/adapters/postgresql/schemas/schema-mgmt.js +105 -33
  101. package/dist/adapters/postgresql/schemas/schema-mgmt.js.map +1 -1
  102. package/dist/adapters/postgresql/schemas/stats.d.ts +33 -20
  103. package/dist/adapters/postgresql/schemas/stats.d.ts.map +1 -1
  104. package/dist/adapters/postgresql/schemas/stats.js +36 -20
  105. package/dist/adapters/postgresql/schemas/stats.js.map +1 -1
  106. package/dist/adapters/postgresql/schemas/text-search.d.ts +34 -19
  107. package/dist/adapters/postgresql/schemas/text-search.d.ts.map +1 -1
  108. package/dist/adapters/postgresql/schemas/text-search.js +52 -13
  109. package/dist/adapters/postgresql/schemas/text-search.js.map +1 -1
  110. package/dist/adapters/postgresql/tools/admin.d.ts.map +1 -1
  111. package/dist/adapters/postgresql/tools/admin.js +272 -186
  112. package/dist/adapters/postgresql/tools/admin.js.map +1 -1
  113. package/dist/adapters/postgresql/tools/backup/dump.d.ts.map +1 -1
  114. package/dist/adapters/postgresql/tools/backup/dump.js +376 -350
  115. package/dist/adapters/postgresql/tools/backup/dump.js.map +1 -1
  116. package/dist/adapters/postgresql/tools/citext.d.ts.map +1 -1
  117. package/dist/adapters/postgresql/tools/citext.js +333 -243
  118. package/dist/adapters/postgresql/tools/citext.js.map +1 -1
  119. package/dist/adapters/postgresql/tools/codemode/index.d.ts.map +1 -1
  120. package/dist/adapters/postgresql/tools/codemode/index.js +2 -11
  121. package/dist/adapters/postgresql/tools/codemode/index.js.map +1 -1
  122. package/dist/adapters/postgresql/tools/core/convenience.d.ts +9 -1
  123. package/dist/adapters/postgresql/tools/core/convenience.d.ts.map +1 -1
  124. package/dist/adapters/postgresql/tools/core/convenience.js +101 -19
  125. package/dist/adapters/postgresql/tools/core/convenience.js.map +1 -1
  126. package/dist/adapters/postgresql/tools/core/error-helpers.d.ts +48 -0
  127. package/dist/adapters/postgresql/tools/core/error-helpers.d.ts.map +1 -0
  128. package/dist/adapters/postgresql/tools/core/error-helpers.js +256 -0
  129. package/dist/adapters/postgresql/tools/core/error-helpers.js.map +1 -0
  130. package/dist/adapters/postgresql/tools/core/health.d.ts.map +1 -1
  131. package/dist/adapters/postgresql/tools/core/health.js +18 -4
  132. package/dist/adapters/postgresql/tools/core/health.js.map +1 -1
  133. package/dist/adapters/postgresql/tools/core/indexes.d.ts.map +1 -1
  134. package/dist/adapters/postgresql/tools/core/indexes.js +48 -6
  135. package/dist/adapters/postgresql/tools/core/indexes.js.map +1 -1
  136. package/dist/adapters/postgresql/tools/core/objects.d.ts.map +1 -1
  137. package/dist/adapters/postgresql/tools/core/objects.js +104 -85
  138. package/dist/adapters/postgresql/tools/core/objects.js.map +1 -1
  139. package/dist/adapters/postgresql/tools/core/query.d.ts.map +1 -1
  140. package/dist/adapters/postgresql/tools/core/query.js +100 -42
  141. package/dist/adapters/postgresql/tools/core/query.js.map +1 -1
  142. package/dist/adapters/postgresql/tools/core/schemas.d.ts +51 -25
  143. package/dist/adapters/postgresql/tools/core/schemas.d.ts.map +1 -1
  144. package/dist/adapters/postgresql/tools/core/schemas.js +51 -25
  145. package/dist/adapters/postgresql/tools/core/schemas.js.map +1 -1
  146. package/dist/adapters/postgresql/tools/core/tables.d.ts.map +1 -1
  147. package/dist/adapters/postgresql/tools/core/tables.js +72 -32
  148. package/dist/adapters/postgresql/tools/core/tables.js.map +1 -1
  149. package/dist/adapters/postgresql/tools/cron.d.ts.map +1 -1
  150. package/dist/adapters/postgresql/tools/cron.js +333 -206
  151. package/dist/adapters/postgresql/tools/cron.js.map +1 -1
  152. package/dist/adapters/postgresql/tools/introspection.d.ts +15 -0
  153. package/dist/adapters/postgresql/tools/introspection.d.ts.map +1 -0
  154. package/dist/adapters/postgresql/tools/introspection.js +1682 -0
  155. package/dist/adapters/postgresql/tools/introspection.js.map +1 -0
  156. package/dist/adapters/postgresql/tools/jsonb/advanced.d.ts.map +1 -1
  157. package/dist/adapters/postgresql/tools/jsonb/advanced.js +394 -297
  158. package/dist/adapters/postgresql/tools/jsonb/advanced.js.map +1 -1
  159. package/dist/adapters/postgresql/tools/jsonb/basic.d.ts.map +1 -1
  160. package/dist/adapters/postgresql/tools/jsonb/basic.js +686 -398
  161. package/dist/adapters/postgresql/tools/jsonb/basic.js.map +1 -1
  162. package/dist/adapters/postgresql/tools/kcache.d.ts.map +1 -1
  163. package/dist/adapters/postgresql/tools/kcache.js +278 -246
  164. package/dist/adapters/postgresql/tools/kcache.js.map +1 -1
  165. package/dist/adapters/postgresql/tools/ltree.d.ts.map +1 -1
  166. package/dist/adapters/postgresql/tools/ltree.js +137 -38
  167. package/dist/adapters/postgresql/tools/ltree.js.map +1 -1
  168. package/dist/adapters/postgresql/tools/monitoring.d.ts.map +1 -1
  169. package/dist/adapters/postgresql/tools/monitoring.js +86 -55
  170. package/dist/adapters/postgresql/tools/monitoring.js.map +1 -1
  171. package/dist/adapters/postgresql/tools/partitioning.d.ts.map +1 -1
  172. package/dist/adapters/postgresql/tools/partitioning.js +79 -15
  173. package/dist/adapters/postgresql/tools/partitioning.js.map +1 -1
  174. package/dist/adapters/postgresql/tools/partman/management.d.ts.map +1 -1
  175. package/dist/adapters/postgresql/tools/partman/management.js +43 -56
  176. package/dist/adapters/postgresql/tools/partman/management.js.map +1 -1
  177. package/dist/adapters/postgresql/tools/partman/operations.d.ts.map +1 -1
  178. package/dist/adapters/postgresql/tools/partman/operations.js +137 -24
  179. package/dist/adapters/postgresql/tools/partman/operations.js.map +1 -1
  180. package/dist/adapters/postgresql/tools/performance/analysis.d.ts.map +1 -1
  181. package/dist/adapters/postgresql/tools/performance/analysis.js +276 -165
  182. package/dist/adapters/postgresql/tools/performance/analysis.js.map +1 -1
  183. package/dist/adapters/postgresql/tools/performance/explain.d.ts.map +1 -1
  184. package/dist/adapters/postgresql/tools/performance/explain.js +61 -21
  185. package/dist/adapters/postgresql/tools/performance/explain.js.map +1 -1
  186. package/dist/adapters/postgresql/tools/performance/monitoring.d.ts.map +1 -1
  187. package/dist/adapters/postgresql/tools/performance/monitoring.js +52 -12
  188. package/dist/adapters/postgresql/tools/performance/monitoring.js.map +1 -1
  189. package/dist/adapters/postgresql/tools/performance/optimization.d.ts.map +1 -1
  190. package/dist/adapters/postgresql/tools/performance/optimization.js +92 -81
  191. package/dist/adapters/postgresql/tools/performance/optimization.js.map +1 -1
  192. package/dist/adapters/postgresql/tools/performance/stats.d.ts.map +1 -1
  193. package/dist/adapters/postgresql/tools/performance/stats.js +182 -60
  194. package/dist/adapters/postgresql/tools/performance/stats.js.map +1 -1
  195. package/dist/adapters/postgresql/tools/pgcrypto.d.ts.map +1 -1
  196. package/dist/adapters/postgresql/tools/pgcrypto.js +277 -102
  197. package/dist/adapters/postgresql/tools/pgcrypto.js.map +1 -1
  198. package/dist/adapters/postgresql/tools/postgis/advanced.d.ts.map +1 -1
  199. package/dist/adapters/postgresql/tools/postgis/advanced.js +298 -230
  200. package/dist/adapters/postgresql/tools/postgis/advanced.js.map +1 -1
  201. package/dist/adapters/postgresql/tools/postgis/basic.d.ts.map +1 -1
  202. package/dist/adapters/postgresql/tools/postgis/basic.js +370 -251
  203. package/dist/adapters/postgresql/tools/postgis/basic.js.map +1 -1
  204. package/dist/adapters/postgresql/tools/postgis/standalone.d.ts.map +1 -1
  205. package/dist/adapters/postgresql/tools/postgis/standalone.js +135 -51
  206. package/dist/adapters/postgresql/tools/postgis/standalone.js.map +1 -1
  207. package/dist/adapters/postgresql/tools/schema.d.ts.map +1 -1
  208. package/dist/adapters/postgresql/tools/schema.js +580 -233
  209. package/dist/adapters/postgresql/tools/schema.js.map +1 -1
  210. package/dist/adapters/postgresql/tools/stats/advanced.d.ts.map +1 -1
  211. package/dist/adapters/postgresql/tools/stats/advanced.js +567 -506
  212. package/dist/adapters/postgresql/tools/stats/advanced.js.map +1 -1
  213. package/dist/adapters/postgresql/tools/stats/basic.d.ts.map +1 -1
  214. package/dist/adapters/postgresql/tools/stats/basic.js +340 -316
  215. package/dist/adapters/postgresql/tools/stats/basic.js.map +1 -1
  216. package/dist/adapters/postgresql/tools/text.d.ts.map +1 -1
  217. package/dist/adapters/postgresql/tools/text.js +690 -337
  218. package/dist/adapters/postgresql/tools/text.js.map +1 -1
  219. package/dist/adapters/postgresql/tools/transactions.d.ts.map +1 -1
  220. package/dist/adapters/postgresql/tools/transactions.js +157 -50
  221. package/dist/adapters/postgresql/tools/transactions.js.map +1 -1
  222. package/dist/adapters/postgresql/tools/vector/advanced.d.ts.map +1 -1
  223. package/dist/adapters/postgresql/tools/vector/advanced.js +18 -0
  224. package/dist/adapters/postgresql/tools/vector/advanced.js.map +1 -1
  225. package/dist/adapters/postgresql/tools/vector/basic.d.ts.map +1 -1
  226. package/dist/adapters/postgresql/tools/vector/basic.js +100 -53
  227. package/dist/adapters/postgresql/tools/vector/basic.js.map +1 -1
  228. package/dist/auth/auth-context.d.ts +28 -0
  229. package/dist/auth/auth-context.d.ts.map +1 -0
  230. package/dist/auth/auth-context.js +37 -0
  231. package/dist/auth/auth-context.js.map +1 -0
  232. package/dist/auth/scope-map.d.ts +20 -0
  233. package/dist/auth/scope-map.d.ts.map +1 -0
  234. package/dist/auth/scope-map.js +40 -0
  235. package/dist/auth/scope-map.js.map +1 -0
  236. package/dist/auth/scopes.d.ts.map +1 -1
  237. package/dist/auth/scopes.js +2 -0
  238. package/dist/auth/scopes.js.map +1 -1
  239. package/dist/cli.js +1 -1
  240. package/dist/cli.js.map +1 -1
  241. package/dist/codemode/api.d.ts +1 -0
  242. package/dist/codemode/api.d.ts.map +1 -1
  243. package/dist/codemode/api.js +35 -1
  244. package/dist/codemode/api.js.map +1 -1
  245. package/dist/codemode/index.d.ts +0 -2
  246. package/dist/codemode/index.d.ts.map +1 -1
  247. package/dist/codemode/index.js +0 -4
  248. package/dist/codemode/index.js.map +1 -1
  249. package/dist/codemode/sandbox.d.ts +14 -1
  250. package/dist/codemode/sandbox.d.ts.map +1 -1
  251. package/dist/codemode/sandbox.js +58 -19
  252. package/dist/codemode/sandbox.js.map +1 -1
  253. package/dist/codemode/types.d.ts.map +1 -1
  254. package/dist/codemode/types.js +3 -0
  255. package/dist/codemode/types.js.map +1 -1
  256. package/dist/constants/ServerInstructions.d.ts +5 -1
  257. package/dist/constants/ServerInstructions.d.ts.map +1 -1
  258. package/dist/constants/ServerInstructions.js +117 -31
  259. package/dist/constants/ServerInstructions.js.map +1 -1
  260. package/dist/filtering/ToolConstants.d.ts +22 -19
  261. package/dist/filtering/ToolConstants.d.ts.map +1 -1
  262. package/dist/filtering/ToolConstants.js +48 -37
  263. package/dist/filtering/ToolConstants.js.map +1 -1
  264. package/dist/filtering/ToolFilter.d.ts.map +1 -1
  265. package/dist/filtering/ToolFilter.js +10 -13
  266. package/dist/filtering/ToolFilter.js.map +1 -1
  267. package/dist/pool/ConnectionPool.js +1 -1
  268. package/dist/pool/ConnectionPool.js.map +1 -1
  269. package/dist/transports/http.d.ts +1 -0
  270. package/dist/transports/http.d.ts.map +1 -1
  271. package/dist/transports/http.js +75 -21
  272. package/dist/transports/http.js.map +1 -1
  273. package/dist/types/filtering.d.ts +2 -2
  274. package/dist/types/filtering.d.ts.map +1 -1
  275. package/dist/utils/icons.d.ts.map +1 -1
  276. package/dist/utils/icons.js +5 -0
  277. package/dist/utils/icons.js.map +1 -1
  278. package/dist/utils/where-clause.d.ts.map +1 -1
  279. package/dist/utils/where-clause.js +24 -0
  280. package/dist/utils/where-clause.js.map +1 -1
  281. package/package.json +20 -13
  282. package/dist/codemode/sandbox-factory.d.ts +0 -72
  283. package/dist/codemode/sandbox-factory.d.ts.map +0 -1
  284. package/dist/codemode/sandbox-factory.js +0 -88
  285. package/dist/codemode/sandbox-factory.js.map +0 -1
  286. package/dist/codemode/worker-sandbox.d.ts +0 -82
  287. package/dist/codemode/worker-sandbox.d.ts.map +0 -1
  288. package/dist/codemode/worker-sandbox.js +0 -244
  289. package/dist/codemode/worker-sandbox.js.map +0 -1
  290. package/dist/codemode/worker-script.d.ts +0 -8
  291. package/dist/codemode/worker-script.d.ts.map +0 -1
  292. package/dist/codemode/worker-script.js +0 -113
  293. package/dist/codemode/worker-script.js.map +0 -1
@@ -0,0 +1,1682 @@
1
+ /**
2
+ * PostgreSQL Introspection Tools
3
+ *
4
+ * Agent-optimized database analysis tools for dependency graphs,
5
+ * cascade simulation, schema snapshots, migration risk analysis,
6
+ * and schema version tracking.
7
+ * 11 tools total (6 read-only + 5 migration tracking).
8
+ */
9
+ import { createHash } from "node:crypto";
10
+ import { readOnly, write, destructive } from "../../../utils/annotations.js";
11
+ import { getToolIcons } from "../../../utils/icons.js";
12
+ import { DependencyGraphSchemaBase, DependencyGraphSchema, TopologicalSortSchemaBase, TopologicalSortSchema, CascadeSimulatorSchemaBase, CascadeSimulatorSchema, SchemaSnapshotSchemaBase, SchemaSnapshotSchema, ConstraintAnalysisSchemaBase, ConstraintAnalysisSchema, MigrationRisksSchemaBase, MigrationRisksSchema, MigrationInitSchemaBase, MigrationInitSchema, MigrationRecordSchemaBase, MigrationRecordSchema, MigrationApplySchemaBase, MigrationApplySchema, MigrationRollbackSchemaBase, MigrationRollbackSchema, MigrationHistorySchemaBase, MigrationHistorySchema, MigrationStatusSchemaBase, MigrationStatusSchema,
13
+ // Output schemas
14
+ DependencyGraphOutputSchema, TopologicalSortOutputSchema, CascadeSimulatorOutputSchema, SchemaSnapshotOutputSchema, ConstraintAnalysisOutputSchema, MigrationRisksOutputSchema, MigrationInitOutputSchema, MigrationRecordOutputSchema, MigrationApplyOutputSchema, MigrationRollbackOutputSchema, MigrationHistoryOutputSchema, MigrationStatusOutputSchema, } from "../schemas/index.js";
15
+ // =============================================================================
16
+ // Shared queries
17
+ // =============================================================================
18
+ /**
19
+ * Fetch all foreign key relationships across user schemas
20
+ */
21
+ async function fetchForeignKeys(adapter, schemaFilter, excludeExtensionSchemas) {
22
+ const params = [];
23
+ let schemaClause = "";
24
+ if (schemaFilter) {
25
+ params.push(schemaFilter);
26
+ schemaClause = `AND src_ns.nspname = $${String(params.length)}`;
27
+ }
28
+ const extensionSchemaExclude = !schemaFilter && excludeExtensionSchemas !== false
29
+ ? "AND src_ns.nspname NOT IN ('cron', 'topology', 'tiger', 'tiger_data')"
30
+ : "";
31
+ const result = await adapter.executeQuery(`SELECT
32
+ c.conname AS constraint_name,
33
+ src_ns.nspname AS from_schema,
34
+ src_t.relname AS from_table,
35
+ array_agg(DISTINCT src_a.attname ORDER BY src_a.attname) AS from_columns,
36
+ ref_ns.nspname AS to_schema,
37
+ ref_t.relname AS to_table,
38
+ array_agg(DISTINCT ref_a.attname ORDER BY ref_a.attname) AS to_columns,
39
+ CASE c.confdeltype
40
+ WHEN 'a' THEN 'NO ACTION' WHEN 'r' THEN 'RESTRICT'
41
+ WHEN 'c' THEN 'CASCADE' WHEN 'n' THEN 'SET NULL'
42
+ WHEN 'd' THEN 'SET DEFAULT'
43
+ END AS on_delete,
44
+ CASE c.confupdtype
45
+ WHEN 'a' THEN 'NO ACTION' WHEN 'r' THEN 'RESTRICT'
46
+ WHEN 'c' THEN 'CASCADE' WHEN 'n' THEN 'SET NULL'
47
+ WHEN 'd' THEN 'SET DEFAULT'
48
+ END AS on_update
49
+ FROM pg_constraint c
50
+ JOIN pg_class src_t ON src_t.oid = c.conrelid
51
+ JOIN pg_namespace src_ns ON src_ns.oid = src_t.relnamespace
52
+ JOIN pg_class ref_t ON ref_t.oid = c.confrelid
53
+ JOIN pg_namespace ref_ns ON ref_ns.oid = ref_t.relnamespace
54
+ JOIN pg_attribute src_a ON src_a.attrelid = src_t.oid AND src_a.attnum = ANY(c.conkey)
55
+ JOIN pg_attribute ref_a ON ref_a.attrelid = ref_t.oid AND ref_a.attnum = ANY(c.confkey)
56
+ WHERE c.contype = 'f'
57
+ AND src_ns.nspname NOT IN ('pg_catalog', 'information_schema')
58
+ AND src_ns.nspname !~ '^pg_toast'
59
+ ${extensionSchemaExclude}
60
+ ${schemaClause}
61
+ GROUP BY c.conname, src_ns.nspname, src_t.relname,
62
+ ref_ns.nspname, ref_t.relname, c.confdeltype, c.confupdtype
63
+ ORDER BY src_ns.nspname, src_t.relname, c.conname`, params.length > 0 ? params : undefined);
64
+ return (result.rows ?? []).map((row) => ({
65
+ constraintName: row["constraint_name"],
66
+ fromSchema: row["from_schema"],
67
+ fromTable: row["from_table"],
68
+ fromColumns: parseArrayColumn(row["from_columns"]),
69
+ toSchema: row["to_schema"],
70
+ toTable: row["to_table"],
71
+ toColumns: parseArrayColumn(row["to_columns"]),
72
+ onDelete: row["on_delete"],
73
+ onUpdate: row["on_update"],
74
+ }));
75
+ }
76
+ /**
77
+ * Fetch all user tables with row counts and sizes
78
+ */
79
+ async function fetchTableNodes(adapter, schemaFilter, excludeExtensionSchemas) {
80
+ const params = [];
81
+ let schemaClause = "";
82
+ if (schemaFilter) {
83
+ params.push(schemaFilter);
84
+ schemaClause = `AND n.nspname = $${String(params.length)}`;
85
+ }
86
+ const extensionSchemaExclude = !schemaFilter && excludeExtensionSchemas !== false
87
+ ? "AND n.nspname NOT IN ('cron', 'topology', 'tiger', 'tiger_data')"
88
+ : "";
89
+ const result = await adapter.executeQuery(`SELECT
90
+ n.nspname AS schema,
91
+ c.relname AS table_name,
92
+ CASE WHEN c.reltuples = -1 THEN COALESCE(s.n_live_tup, 0)
93
+ ELSE c.reltuples END::bigint AS row_count,
94
+ pg_table_size(c.oid) AS size_bytes
95
+ FROM pg_class c
96
+ JOIN pg_namespace n ON n.oid = c.relnamespace
97
+ LEFT JOIN pg_stat_user_tables s ON s.relid = c.oid
98
+ WHERE c.relkind IN ('r', 'p')
99
+ AND n.nspname NOT IN ('pg_catalog', 'information_schema')
100
+ AND n.nspname !~ '^pg_toast'
101
+ ${extensionSchemaExclude}
102
+ ${schemaClause}
103
+ ORDER BY n.nspname, c.relname`, params.length > 0 ? params : undefined);
104
+ return (result.rows ?? []).map((row) => ({
105
+ schema: row["schema"],
106
+ table: row["table_name"],
107
+ rowCount: Number(row["row_count"]) || 0,
108
+ sizeBytes: Number(row["size_bytes"]) || 0,
109
+ }));
110
+ }
111
+ /**
112
+ * Parse PostgreSQL array column (handles both native arrays and string format)
113
+ */
114
+ function parseArrayColumn(value) {
115
+ if (Array.isArray(value))
116
+ return value;
117
+ if (typeof value === "string") {
118
+ const trimmed = value.replace(/^{|}$/g, "");
119
+ if (trimmed === "")
120
+ return [];
121
+ return trimmed.split(",").map((c) => c.trim().replace(/^"|"$/g, ""));
122
+ }
123
+ return [];
124
+ }
125
+ /**
126
+ * Create qualified table name
127
+ */
128
+ function qualifiedName(schema, table) {
129
+ return `${schema}.${table}`;
130
+ }
131
+ // =============================================================================
132
+ // Graph algorithms
133
+ // =============================================================================
134
+ /**
135
+ * Detect circular dependencies using DFS
136
+ */
137
+ function detectCycles(adjacency) {
138
+ const cycles = [];
139
+ const visited = new Set();
140
+ const inStack = new Set();
141
+ const stack = [];
142
+ function dfs(node) {
143
+ if (inStack.has(node)) {
144
+ // Found a cycle - extract it from the stack
145
+ const cycleStart = stack.indexOf(node);
146
+ if (cycleStart !== -1) {
147
+ cycles.push([...stack.slice(cycleStart), node]);
148
+ }
149
+ return;
150
+ }
151
+ if (visited.has(node))
152
+ return;
153
+ visited.add(node);
154
+ inStack.add(node);
155
+ stack.push(node);
156
+ for (const neighbor of adjacency.get(node) ?? []) {
157
+ dfs(neighbor);
158
+ }
159
+ stack.pop();
160
+ inStack.delete(node);
161
+ }
162
+ for (const node of adjacency.keys()) {
163
+ dfs(node);
164
+ }
165
+ return cycles;
166
+ }
167
+ /**
168
+ * Topological sort using Kahn's algorithm
169
+ * Returns null if cycles exist
170
+ */
171
+ function topologicalSort(adjacency, allNodes) {
172
+ // Compute in-degrees
173
+ const inDegree = new Map();
174
+ for (const node of allNodes) {
175
+ inDegree.set(node, 0);
176
+ }
177
+ for (const [, neighbors] of adjacency) {
178
+ for (const n of neighbors) {
179
+ inDegree.set(n, (inDegree.get(n) ?? 0) + 1);
180
+ }
181
+ }
182
+ // Enqueue nodes with 0 in-degree
183
+ const queue = [];
184
+ for (const [node, degree] of inDegree) {
185
+ if (degree === 0) {
186
+ queue.push(node);
187
+ }
188
+ }
189
+ queue.sort(); // Deterministic ordering
190
+ const result = [];
191
+ while (queue.length > 0) {
192
+ const node = queue.shift();
193
+ if (node === undefined)
194
+ break;
195
+ result.push(node);
196
+ for (const neighbor of adjacency.get(node) ?? []) {
197
+ const newDegree = (inDegree.get(neighbor) ?? 1) - 1;
198
+ inDegree.set(neighbor, newDegree);
199
+ if (newDegree === 0) {
200
+ // Insert in sorted position for deterministic output
201
+ const insertIdx = queue.findIndex((q) => q > neighbor);
202
+ if (insertIdx === -1) {
203
+ queue.push(neighbor);
204
+ }
205
+ else {
206
+ queue.splice(insertIdx, 0, neighbor);
207
+ }
208
+ }
209
+ }
210
+ }
211
+ return result.length === allNodes.size ? result : null;
212
+ }
213
+ /**
214
+ * Calculate max depth from root nodes in DAG
215
+ */
216
+ function calculateMaxDepth(adjacency, roots) {
217
+ if (roots.length === 0)
218
+ return 0;
219
+ let maxDepth = 0;
220
+ const depthMap = new Map();
221
+ function dfs(node, depth, visited) {
222
+ if (visited.has(node))
223
+ return;
224
+ visited.add(node);
225
+ const currentMax = depthMap.get(node) ?? -1;
226
+ if (depth > currentMax) {
227
+ depthMap.set(node, depth);
228
+ if (depth > maxDepth)
229
+ maxDepth = depth;
230
+ }
231
+ for (const neighbor of adjacency.get(node) ?? []) {
232
+ dfs(neighbor, depth + 1, visited);
233
+ }
234
+ }
235
+ for (const root of roots) {
236
+ dfs(root, 0, new Set());
237
+ }
238
+ return maxDepth;
239
+ }
240
+ // =============================================================================
241
+ // Tool factory functions
242
+ // =============================================================================
243
+ /**
244
+ * Get all introspection tools
245
+ */
246
+ export function getIntrospectionTools(adapter) {
247
+ return [
248
+ createDependencyGraphTool(adapter),
249
+ createTopologicalSortTool(adapter),
250
+ createCascadeSimulatorTool(adapter),
251
+ createSchemaSnapshotTool(adapter),
252
+ createConstraintAnalysisTool(adapter),
253
+ createMigrationRisksTool(adapter),
254
+ createMigrationInitTool(adapter),
255
+ createMigrationRecordTool(adapter),
256
+ createMigrationApplyTool(adapter),
257
+ createMigrationRollbackTool(adapter),
258
+ createMigrationHistoryTool(adapter),
259
+ createMigrationStatusTool(adapter),
260
+ ];
261
+ }
262
+ // =============================================================================
263
+ // pg_dependency_graph
264
+ // =============================================================================
265
+ function createDependencyGraphTool(adapter) {
266
+ return {
267
+ name: "pg_dependency_graph",
268
+ description: "Get the full foreign key dependency graph with cascade paths, row counts, circular dependency detection, and severity assessment. Agent-optimized structured output.",
269
+ group: "introspection",
270
+ inputSchema: DependencyGraphSchemaBase,
271
+ outputSchema: DependencyGraphOutputSchema,
272
+ annotations: readOnly("Dependency Graph"),
273
+ icons: getToolIcons("introspection", readOnly("Dependency Graph")),
274
+ handler: async (params, _context) => {
275
+ const parsed = DependencyGraphSchema.parse(params);
276
+ const includeRowCounts = parsed.includeRowCounts !== false;
277
+ const excludeExt = parsed.excludeExtensionSchemas;
278
+ const [fks, tables] = await Promise.all([
279
+ fetchForeignKeys(adapter, parsed.schema, excludeExt),
280
+ includeRowCounts
281
+ ? fetchTableNodes(adapter, parsed.schema, excludeExt)
282
+ : Promise.resolve([]),
283
+ ]);
284
+ const tableMap = new Map(tables.map((t) => [qualifiedName(t.schema, t.table), t]));
285
+ // Build adjacency list (from → to, meaning "from" depends on "to")
286
+ const adjacency = new Map();
287
+ const allNodes = new Set();
288
+ // Ensure all tables are in the node set even if they have no FKs
289
+ for (const t of tables) {
290
+ allNodes.add(qualifiedName(t.schema, t.table));
291
+ }
292
+ for (const fk of fks) {
293
+ const from = qualifiedName(fk.fromSchema, fk.fromTable);
294
+ const to = qualifiedName(fk.toSchema, fk.toTable);
295
+ allNodes.add(from);
296
+ allNodes.add(to);
297
+ const existing = adjacency.get(from) ?? [];
298
+ existing.push(to);
299
+ adjacency.set(from, existing);
300
+ }
301
+ // Find root tables (no dependencies) and leaf tables (no dependents)
302
+ const dependents = new Set();
303
+ for (const [, neighbors] of adjacency) {
304
+ for (const n of neighbors) {
305
+ dependents.add(n);
306
+ }
307
+ }
308
+ const rootTables = [...allNodes]
309
+ .filter((n) => !adjacency.has(n) || (adjacency.get(n)?.length ?? 0) === 0)
310
+ .sort();
311
+ const leafTables = [...allNodes].filter((n) => !dependents.has(n)).sort();
312
+ // Detect cycles
313
+ const cycles = detectCycles(adjacency);
314
+ const maxDepth = calculateMaxDepth(adjacency, leafTables);
315
+ // Build nodes
316
+ const nodes = [...allNodes].sort().map((name) => {
317
+ const info = tableMap.get(name);
318
+ const parts = name.split(".");
319
+ return {
320
+ table: parts[1] ?? name,
321
+ schema: parts[0] ?? "public",
322
+ ...(includeRowCounts && info
323
+ ? { rowCount: info.rowCount, sizeBytes: info.sizeBytes }
324
+ : {}),
325
+ };
326
+ });
327
+ // Build edges
328
+ const edges = fks.map((fk) => ({
329
+ from: qualifiedName(fk.fromSchema, fk.fromTable),
330
+ to: qualifiedName(fk.toSchema, fk.toTable),
331
+ constraint: fk.constraintName,
332
+ columns: fk.fromColumns.map((col, i) => ({
333
+ from: col,
334
+ to: fk.toColumns[i] ?? col,
335
+ })),
336
+ onDelete: fk.onDelete,
337
+ onUpdate: fk.onUpdate,
338
+ }));
339
+ // Add hint for nonexistent/empty schema
340
+ const hint = parsed.schema !== undefined && allNodes.size === 0
341
+ ? `Schema '${parsed.schema}' returned no tables. Verify the schema exists with pg_list_schemas.`
342
+ : undefined;
343
+ return {
344
+ nodes,
345
+ edges,
346
+ circularDependencies: cycles,
347
+ stats: {
348
+ totalTables: allNodes.size,
349
+ totalRelationships: fks.length,
350
+ maxDepth,
351
+ rootTables,
352
+ leafTables,
353
+ },
354
+ ...(hint !== undefined && { hint }),
355
+ };
356
+ },
357
+ };
358
+ }
359
+ // =============================================================================
360
+ // pg_topological_sort
361
+ // =============================================================================
362
+ function createTopologicalSortTool(adapter) {
363
+ return {
364
+ name: "pg_topological_sort",
365
+ description: "Get tables in safe DDL execution order. 'create' direction: dependencies first (for CREATE TABLE). 'drop' direction: dependents first (for DROP TABLE).",
366
+ group: "introspection",
367
+ inputSchema: TopologicalSortSchemaBase,
368
+ outputSchema: TopologicalSortOutputSchema,
369
+ annotations: readOnly("Topological Sort"),
370
+ icons: getToolIcons("introspection", readOnly("Topological Sort")),
371
+ handler: async (params, _context) => {
372
+ const parsed = TopologicalSortSchema.parse(params);
373
+ const direction = parsed.direction ?? "create";
374
+ const excludeExt = parsed.excludeExtensionSchemas;
375
+ const fks = await fetchForeignKeys(adapter, parsed.schema, excludeExt);
376
+ const tables = await fetchTableNodes(adapter, parsed.schema, excludeExt);
377
+ // Build adjacency: A depends on B means A→B
378
+ // For "create" order, we need B before A (dependencies first)
379
+ // For "drop" order, we need A before B (dependents first)
380
+ const adjacency = new Map();
381
+ const allNodes = new Set();
382
+ for (const t of tables) {
383
+ allNodes.add(qualifiedName(t.schema, t.table));
384
+ }
385
+ for (const fk of fks) {
386
+ const from = qualifiedName(fk.fromSchema, fk.fromTable);
387
+ const to = qualifiedName(fk.toSchema, fk.toTable);
388
+ allNodes.add(from);
389
+ allNodes.add(to);
390
+ }
391
+ // Build dependency graph: from→to means "from depends on to"
392
+ const dependsOn = new Map();
393
+ for (const fk of fks) {
394
+ const from = qualifiedName(fk.fromSchema, fk.fromTable);
395
+ const to = qualifiedName(fk.toSchema, fk.toTable);
396
+ if (from === to)
397
+ continue; // Self-references don't affect ordering
398
+ const deps = dependsOn.get(from) ?? new Set();
399
+ deps.add(to);
400
+ dependsOn.set(from, deps);
401
+ }
402
+ // For create order: edge from dependency → dependent (process deps first)
403
+ // For drop order: edge from dependent → dependency (process dependents first)
404
+ for (const fk of fks) {
405
+ const from = qualifiedName(fk.fromSchema, fk.fromTable);
406
+ const to = qualifiedName(fk.toSchema, fk.toTable);
407
+ if (from === to)
408
+ continue; // Self-references don't affect ordering
409
+ if (direction === "create") {
410
+ const existing = adjacency.get(to) ?? [];
411
+ existing.push(from);
412
+ adjacency.set(to, existing);
413
+ }
414
+ else {
415
+ const existing = adjacency.get(from) ?? [];
416
+ existing.push(to);
417
+ adjacency.set(from, existing);
418
+ }
419
+ }
420
+ const sorted = topologicalSort(adjacency, allNodes);
421
+ const cycles = sorted === null ? detectCycles(adjacency) : [];
422
+ // Compute level (depth in the dependency graph)
423
+ // Always use create-order traversal for consistent levels regardless of direction
424
+ const levelMap = new Map();
425
+ if (sorted) {
426
+ // For create direction, sorted is already in dependency order.
427
+ // For drop direction, we need create-order to compute levels correctly.
428
+ let createOrder;
429
+ if (direction === "create") {
430
+ createOrder = sorted;
431
+ }
432
+ else {
433
+ // Build create-direction adjacency and sort
434
+ const createAdj = new Map();
435
+ for (const fk of fks) {
436
+ const from = qualifiedName(fk.fromSchema, fk.fromTable);
437
+ const to = qualifiedName(fk.toSchema, fk.toTable);
438
+ if (from === to)
439
+ continue;
440
+ const existing = createAdj.get(to) ?? [];
441
+ existing.push(from);
442
+ createAdj.set(to, existing);
443
+ }
444
+ createOrder =
445
+ topologicalSort(createAdj, allNodes) ?? [...allNodes].sort();
446
+ }
447
+ for (const node of createOrder) {
448
+ const deps = dependsOn.get(node);
449
+ if (!deps || deps.size === 0) {
450
+ levelMap.set(node, 0);
451
+ }
452
+ else {
453
+ let maxParentLevel = 0;
454
+ for (const dep of deps) {
455
+ const parentLevel = levelMap.get(dep) ?? 0;
456
+ if (parentLevel >= maxParentLevel) {
457
+ maxParentLevel = parentLevel + 1;
458
+ }
459
+ }
460
+ levelMap.set(node, maxParentLevel);
461
+ }
462
+ }
463
+ }
464
+ const order = (sorted ?? [...allNodes].sort()).map((name) => {
465
+ const parts = name.split(".");
466
+ return {
467
+ table: parts[1] ?? name,
468
+ schema: parts[0] ?? "public",
469
+ level: levelMap.get(name) ?? 0,
470
+ dependencies: [...(dependsOn.get(name) ?? [])].sort(),
471
+ };
472
+ });
473
+ // Add hint for nonexistent/empty schema
474
+ const hint = parsed.schema !== undefined && allNodes.size === 0
475
+ ? `Schema '${parsed.schema}' returned no tables. Verify the schema exists with pg_list_schemas.`
476
+ : undefined;
477
+ return {
478
+ order,
479
+ direction,
480
+ hasCycles: sorted === null,
481
+ ...(cycles.length > 0 ? { cycles } : {}),
482
+ ...(hint !== undefined && { hint }),
483
+ };
484
+ },
485
+ };
486
+ }
487
+ // =============================================================================
488
+ // pg_cascade_simulator
489
+ // =============================================================================
490
+ function createCascadeSimulatorTool(adapter) {
491
+ return {
492
+ name: "pg_cascade_simulator",
493
+ description: "Simulate the impact of DELETE, DROP, or TRUNCATE on a table. Returns affected tables, estimated row counts, cascade paths, and severity assessment.",
494
+ group: "introspection",
495
+ inputSchema: CascadeSimulatorSchemaBase,
496
+ outputSchema: CascadeSimulatorOutputSchema,
497
+ annotations: readOnly("Cascade Simulator"),
498
+ icons: getToolIcons("introspection", readOnly("Cascade Simulator")),
499
+ handler: async (params, _context) => {
500
+ const parsed = CascadeSimulatorSchema.parse(params);
501
+ const schema = parsed.schema ?? "public";
502
+ const operation = parsed.operation ?? "DELETE";
503
+ const sourceQName = qualifiedName(schema, parsed.table);
504
+ // Cascade simulator must include ALL schemas for accurate cascade path tracing
505
+ const [fks, tables] = await Promise.all([
506
+ fetchForeignKeys(adapter, undefined, false),
507
+ fetchTableNodes(adapter, undefined, false),
508
+ ]);
509
+ const tableMap = new Map(tables.map((t) => [qualifiedName(t.schema, t.table), t]));
510
+ // Check if source table exists
511
+ if (!tableMap.has(sourceQName)) {
512
+ return {
513
+ sourceTable: sourceQName,
514
+ operation,
515
+ affectedTables: [],
516
+ severity: "low",
517
+ stats: {
518
+ totalTablesAffected: 0,
519
+ cascadeActions: 0,
520
+ blockingActions: 0,
521
+ setNullActions: 0,
522
+ maxDepth: 0,
523
+ },
524
+ error: `Table '${sourceQName}' not found. Use pg_list_tables to verify.`,
525
+ };
526
+ }
527
+ // Build reverse adjacency: for each table, find what references it
528
+ // (which tables have FKs pointing TO this table)
529
+ const referencedBy = new Map();
530
+ for (const fk of fks) {
531
+ const to = qualifiedName(fk.toSchema, fk.toTable);
532
+ const existing = referencedBy.get(to) ?? [];
533
+ existing.push(fk);
534
+ referencedBy.set(to, existing);
535
+ }
536
+ const affected = [];
537
+ const visited = new Set();
538
+ const queue = [{ tableName: sourceQName, path: [sourceQName], depth: 0 }];
539
+ visited.add(sourceQName);
540
+ let cascadeActions = 0;
541
+ let blockingActions = 0;
542
+ let setNullActions = 0;
543
+ while (queue.length > 0) {
544
+ const current = queue.shift();
545
+ if (current === undefined)
546
+ break;
547
+ const refs = referencedBy.get(current.tableName) ?? [];
548
+ for (const ref of refs) {
549
+ const refQName = qualifiedName(ref.fromSchema, ref.fromTable);
550
+ if (visited.has(refQName))
551
+ continue;
552
+ visited.add(refQName);
553
+ const action = operation === "DELETE" ? ref.onDelete : "CASCADE";
554
+ const tableInfo = tableMap.get(refQName);
555
+ if (action === "CASCADE") {
556
+ cascadeActions++;
557
+ affected.push({
558
+ table: ref.fromTable,
559
+ schema: ref.fromSchema,
560
+ action: "CASCADE",
561
+ estimatedRows: tableInfo?.rowCount,
562
+ path: [...current.path, refQName],
563
+ depth: current.depth + 1,
564
+ });
565
+ // Continue traversal for cascade
566
+ queue.push({
567
+ tableName: refQName,
568
+ path: [...current.path, refQName],
569
+ depth: current.depth + 1,
570
+ });
571
+ }
572
+ else if (action === "RESTRICT" || action === "NO ACTION") {
573
+ blockingActions++;
574
+ affected.push({
575
+ table: ref.fromTable,
576
+ schema: ref.fromSchema,
577
+ action,
578
+ estimatedRows: tableInfo?.rowCount,
579
+ path: [...current.path, refQName],
580
+ depth: current.depth + 1,
581
+ });
582
+ }
583
+ else if (action === "SET NULL" || action === "SET DEFAULT") {
584
+ setNullActions++;
585
+ affected.push({
586
+ table: ref.fromTable,
587
+ schema: ref.fromSchema,
588
+ action,
589
+ estimatedRows: tableInfo?.rowCount,
590
+ path: [...current.path, refQName],
591
+ depth: current.depth + 1,
592
+ });
593
+ }
594
+ }
595
+ }
596
+ const maxDepth = affected.reduce((max, a) => Math.max(max, a.depth), 0);
597
+ // Severity assessment
598
+ let severity;
599
+ if (blockingActions > 0) {
600
+ severity = "critical"; // Operation will fail
601
+ }
602
+ else if (operation !== "DELETE" && cascadeActions > 0) {
603
+ severity = "critical"; // DROP/TRUNCATE force-cascades everything
604
+ }
605
+ else if (cascadeActions > 5 || maxDepth > 3) {
606
+ severity = "high";
607
+ }
608
+ else if (cascadeActions > 0) {
609
+ severity = "medium";
610
+ }
611
+ else {
612
+ severity = "low";
613
+ }
614
+ return {
615
+ sourceTable: sourceQName,
616
+ operation,
617
+ affectedTables: affected,
618
+ severity,
619
+ stats: {
620
+ totalTablesAffected: affected.length,
621
+ cascadeActions,
622
+ blockingActions,
623
+ setNullActions,
624
+ maxDepth,
625
+ },
626
+ };
627
+ },
628
+ };
629
+ }
630
+ // =============================================================================
631
+ // pg_schema_snapshot
632
+ // =============================================================================
633
+ function createSchemaSnapshotTool(adapter) {
634
+ return {
635
+ name: "pg_schema_snapshot",
636
+ description: "Get a complete schema snapshot in a single agent-optimized JSON structure. Includes tables, columns, types, constraints, indexes, triggers, sequences, and extensions.",
637
+ group: "introspection",
638
+ inputSchema: SchemaSnapshotSchemaBase,
639
+ outputSchema: SchemaSnapshotOutputSchema,
640
+ annotations: readOnly("Schema Snapshot"),
641
+ icons: getToolIcons("introspection", readOnly("Schema Snapshot")),
642
+ handler: async (params, _context) => {
643
+ const parsed = SchemaSnapshotSchema.parse(params);
644
+ const includeAll = !parsed.sections || parsed.sections.length === 0;
645
+ const sections = new Set(parsed.sections ?? []);
646
+ const snapshot = {};
647
+ const stats = {
648
+ tables: 0,
649
+ views: 0,
650
+ indexes: 0,
651
+ constraints: 0,
652
+ functions: 0,
653
+ triggers: 0,
654
+ sequences: 0,
655
+ customTypes: 0,
656
+ extensions: 0,
657
+ };
658
+ const schemaExclude = parsed.includeSystem
659
+ ? ""
660
+ : "AND n.nspname NOT IN ('pg_catalog', 'information_schema') AND n.nspname !~ '^pg_toast'";
661
+ const extensionSchemaExclude = !parsed.schema &&
662
+ !parsed.includeSystem &&
663
+ parsed.excludeExtensionSchemas !== false
664
+ ? "AND n.nspname NOT IN ('cron', 'topology', 'tiger', 'tiger_data')"
665
+ : "";
666
+ // Exclude extension-owned objects (e.g. spatial_ref_sys, part_config) from public schema
667
+ const extOwnedActive = !parsed.includeSystem && parsed.excludeExtensionSchemas !== false;
668
+ const extOwnedClause = (oidExpr) => extOwnedActive
669
+ ? `AND NOT EXISTS (SELECT 1 FROM pg_depend dep WHERE dep.objid = ${oidExpr} AND dep.deptype = 'e')`
670
+ : "";
671
+ const schemaParams = [];
672
+ let schemaWhere = "";
673
+ if (parsed.schema) {
674
+ schemaParams.push(parsed.schema);
675
+ schemaWhere = `AND n.nspname = $${String(schemaParams.length)}`;
676
+ }
677
+ // Tables + columns (or compact mode without columns)
678
+ if (includeAll || sections.has("tables")) {
679
+ const columnsSubquery = parsed.compact
680
+ ? ""
681
+ : `,
682
+ (SELECT json_agg(json_build_object(
683
+ 'name', a.attname,
684
+ 'type', pg_catalog.format_type(a.atttypid, a.atttypmod),
685
+ 'nullable', NOT a.attnotnull,
686
+ 'default', pg_get_expr(d.adbin, d.adrelid),
687
+ 'primaryKey', COALESCE((SELECT true FROM pg_constraint pk
688
+ WHERE pk.conrelid = a.attrelid AND a.attnum = ANY(pk.conkey)
689
+ AND pk.contype = 'p'), false)
690
+ ) ORDER BY a.attnum)
691
+ FROM pg_attribute a
692
+ LEFT JOIN pg_attrdef d ON (a.attrelid, a.attnum) = (d.adrelid, d.adnum)
693
+ WHERE a.attrelid = c.oid AND a.attnum > 0 AND NOT a.attisdropped
694
+ ) AS columns`;
695
+ const tablesResult = await adapter.executeQuery(`SELECT
696
+ n.nspname AS schema, c.relname AS name,
697
+ CASE c.relkind WHEN 'r' THEN 'table' WHEN 'p' THEN 'partitioned_table' END AS type,
698
+ CASE WHEN c.reltuples = -1 THEN COALESCE(s.n_live_tup, 0) ELSE c.reltuples END::bigint AS row_count,
699
+ pg_table_size(c.oid) AS size_bytes,
700
+ obj_description(c.oid, 'pg_class') AS comment${columnsSubquery}
701
+ FROM pg_class c
702
+ JOIN pg_namespace n ON n.oid = c.relnamespace
703
+ LEFT JOIN pg_stat_user_tables s ON s.relid = c.oid
704
+ WHERE c.relkind IN ('r', 'p')
705
+ ${schemaExclude} ${extensionSchemaExclude} ${extOwnedClause("c.oid")} ${schemaWhere}
706
+ ORDER BY n.nspname, c.relname`, schemaParams.length > 0 ? schemaParams : undefined);
707
+ snapshot["tables"] = tablesResult.rows ?? [];
708
+ stats.tables = tablesResult.rows?.length ?? 0;
709
+ }
710
+ // Views
711
+ if (includeAll || sections.has("views")) {
712
+ const viewsResult = await adapter.executeQuery(`SELECT
713
+ n.nspname AS schema, c.relname AS name,
714
+ CASE c.relkind WHEN 'v' THEN 'view' WHEN 'm' THEN 'materialized_view' END AS type,
715
+ pg_get_viewdef(c.oid, true) AS definition
716
+ FROM pg_class c
717
+ JOIN pg_namespace n ON n.oid = c.relnamespace
718
+ WHERE c.relkind IN ('v', 'm')
719
+ ${schemaExclude} ${extensionSchemaExclude} ${extOwnedClause("c.oid")} ${schemaWhere}
720
+ ORDER BY n.nspname, c.relname`, schemaParams.length > 0 ? schemaParams : undefined);
721
+ snapshot["views"] = viewsResult.rows ?? [];
722
+ stats.views = viewsResult.rows?.length ?? 0;
723
+ }
724
+ // Indexes
725
+ if (includeAll || sections.has("indexes")) {
726
+ const indexesResult = await adapter.executeQuery(`SELECT
727
+ i.relname AS name, t.relname AS table_name, n.nspname AS schema,
728
+ am.amname AS type, ix.indisunique AS is_unique,
729
+ pg_get_indexdef(ix.indexrelid) AS definition,
730
+ pg_relation_size(i.oid) AS size_bytes
731
+ FROM pg_index ix
732
+ JOIN pg_class t ON t.oid = ix.indrelid
733
+ JOIN pg_class i ON i.oid = ix.indexrelid
734
+ JOIN pg_namespace n ON n.oid = t.relnamespace
735
+ JOIN pg_am am ON am.oid = i.relam
736
+ WHERE ${parsed.includeSystem ? "true" : "n.nspname NOT IN ('pg_catalog', 'information_schema') AND n.nspname !~ '^pg_toast'"}
737
+ ${extensionSchemaExclude} ${extOwnedClause("t.oid")} ${schemaWhere}
738
+ ORDER BY n.nspname, t.relname, i.relname`, schemaParams.length > 0 ? schemaParams : undefined);
739
+ snapshot["indexes"] = indexesResult.rows ?? [];
740
+ stats.indexes = indexesResult.rows?.length ?? 0;
741
+ }
742
+ // Constraints
743
+ if (includeAll || sections.has("constraints")) {
744
+ const constraintsResult = await adapter.executeQuery(`SELECT
745
+ c.conname AS name, t.relname AS table_name, n.nspname AS schema,
746
+ CASE c.contype WHEN 'p' THEN 'primary_key' WHEN 'f' THEN 'foreign_key'
747
+ WHEN 'u' THEN 'unique' WHEN 'c' THEN 'check' WHEN 'x' THEN 'exclusion' END AS type,
748
+ pg_get_constraintdef(c.oid) AS definition
749
+ FROM pg_constraint c
750
+ JOIN pg_class t ON t.oid = c.conrelid
751
+ JOIN pg_namespace n ON n.oid = t.relnamespace
752
+ WHERE ${parsed.includeSystem ? "true" : "n.nspname NOT IN ('pg_catalog', 'information_schema')"}
753
+ ${extensionSchemaExclude} ${extOwnedClause("t.oid")} ${schemaWhere}
754
+ ORDER BY n.nspname, t.relname, c.conname`, schemaParams.length > 0 ? schemaParams : undefined);
755
+ snapshot["constraints"] = constraintsResult.rows ?? [];
756
+ stats.constraints = constraintsResult.rows?.length ?? 0;
757
+ }
758
+ // Functions
759
+ if (includeAll || sections.has("functions")) {
760
+ const functionsResult = await adapter.executeQuery(`SELECT
761
+ n.nspname AS schema, p.proname AS name,
762
+ pg_get_function_arguments(p.oid) AS arguments,
763
+ pg_get_function_result(p.oid) AS return_type,
764
+ l.lanname AS language, p.provolatile AS volatility
765
+ FROM pg_proc p
766
+ JOIN pg_namespace n ON n.oid = p.pronamespace
767
+ JOIN pg_language l ON l.oid = p.prolang
768
+ WHERE ${parsed.includeSystem ? "true" : "n.nspname NOT IN ('pg_catalog', 'information_schema')"}
769
+ ${extensionSchemaExclude} ${extOwnedClause("p.oid")} ${schemaWhere}
770
+ ORDER BY n.nspname, p.proname`, schemaParams.length > 0 ? schemaParams : undefined);
771
+ snapshot["functions"] = functionsResult.rows ?? [];
772
+ stats.functions = functionsResult.rows?.length ?? 0;
773
+ }
774
+ // Triggers
775
+ if (includeAll || sections.has("triggers")) {
776
+ const triggersResult = await adapter.executeQuery(`SELECT
777
+ t.tgname AS name, c.relname AS table_name, n.nspname AS schema,
778
+ CASE WHEN t.tgtype & 2 = 2 THEN 'BEFORE' WHEN t.tgtype & 64 = 64 THEN 'INSTEAD OF' ELSE 'AFTER' END AS timing,
779
+ array_remove(ARRAY[
780
+ CASE WHEN t.tgtype & 4 = 4 THEN 'INSERT' END,
781
+ CASE WHEN t.tgtype & 8 = 8 THEN 'DELETE' END,
782
+ CASE WHEN t.tgtype & 16 = 16 THEN 'UPDATE' END,
783
+ CASE WHEN t.tgtype & 32 = 32 THEN 'TRUNCATE' END
784
+ ], NULL) AS events,
785
+ p.proname AS function_name
786
+ FROM pg_trigger t
787
+ JOIN pg_class c ON c.oid = t.tgrelid
788
+ JOIN pg_namespace n ON n.oid = c.relnamespace
789
+ JOIN pg_proc p ON p.oid = t.tgfoid
790
+ WHERE NOT t.tgisinternal
791
+ ${schemaExclude} ${extensionSchemaExclude} ${extOwnedClause("c.oid")} ${schemaWhere}
792
+ ORDER BY n.nspname, c.relname, t.tgname`, schemaParams.length > 0 ? schemaParams : undefined);
793
+ snapshot["triggers"] = triggersResult.rows ?? [];
794
+ stats.triggers = triggersResult.rows?.length ?? 0;
795
+ }
796
+ // Sequences
797
+ if (includeAll || sections.has("sequences")) {
798
+ const seqResult = await adapter.executeQuery(`SELECT
799
+ n.nspname AS schema, c.relname AS name,
800
+ (SELECT tc.relname || '.' || a.attname
801
+ FROM pg_depend d
802
+ JOIN pg_class tc ON tc.oid = d.refobjid
803
+ JOIN pg_attribute a ON a.attrelid = tc.oid AND a.attnum = d.refobjsubid
804
+ WHERE d.objid = c.oid AND d.classid = 'pg_class'::regclass AND d.deptype = 'a'
805
+ LIMIT 1) AS owned_by
806
+ FROM pg_class c
807
+ JOIN pg_namespace n ON n.oid = c.relnamespace
808
+ WHERE c.relkind = 'S'
809
+ ${schemaExclude} ${extensionSchemaExclude} ${extOwnedClause("c.oid")} ${schemaWhere}
810
+ ORDER BY n.nspname, c.relname`, schemaParams.length > 0 ? schemaParams : undefined);
811
+ snapshot["sequences"] = seqResult.rows ?? [];
812
+ stats.sequences = seqResult.rows?.length ?? 0;
813
+ }
814
+ // Custom types
815
+ if (includeAll || sections.has("types")) {
816
+ const typesResult = await adapter.executeQuery(`SELECT
817
+ n.nspname AS schema, t.typname AS name,
818
+ CASE t.typtype WHEN 'e' THEN 'enum' WHEN 'c' THEN 'composite' WHEN 'd' THEN 'domain' WHEN 'r' THEN 'range' END AS type,
819
+ CASE WHEN t.typtype = 'e' THEN
820
+ (SELECT json_agg(e.enumlabel ORDER BY e.enumsortorder) FROM pg_enum e WHERE e.enumtypid = t.oid)
821
+ END AS values
822
+ FROM pg_type t
823
+ JOIN pg_namespace n ON n.oid = t.typnamespace
824
+ WHERE t.typtype IN ('e', 'c', 'd', 'r')
825
+ AND n.nspname NOT IN ('pg_catalog', 'information_schema')
826
+ ${extensionSchemaExclude} ${extOwnedClause("t.oid")} ${schemaWhere}
827
+ ORDER BY n.nspname, t.typname`, schemaParams.length > 0 ? schemaParams : undefined);
828
+ snapshot["types"] = typesResult.rows ?? [];
829
+ stats.customTypes = typesResult.rows?.length ?? 0;
830
+ }
831
+ // Extensions (skip when schema filter is active — extensions are global objects)
832
+ if ((includeAll || sections.has("extensions")) && !parsed.schema) {
833
+ const extResult = await adapter.executeQuery(`SELECT extname AS name, extversion AS version,
834
+ n.nspname AS schema
835
+ FROM pg_extension e
836
+ JOIN pg_namespace n ON n.oid = e.extnamespace
837
+ ORDER BY e.extname`);
838
+ snapshot["extensions"] = extResult.rows ?? [];
839
+ stats.extensions = extResult.rows?.length ?? 0;
840
+ }
841
+ // Add hint for nonexistent/empty schema
842
+ const allEmpty = Object.values(stats).every((v) => v === 0);
843
+ const hint = parsed.schema !== undefined && allEmpty
844
+ ? `Schema '${parsed.schema}' returned no tables. Verify the schema exists with pg_list_schemas.`
845
+ : undefined;
846
+ return {
847
+ snapshot,
848
+ stats,
849
+ generatedAt: new Date().toISOString(),
850
+ ...(parsed.compact && { compact: true }),
851
+ ...(hint !== undefined && { hint }),
852
+ };
853
+ },
854
+ };
855
+ }
856
+ // =============================================================================
857
+ // pg_constraint_analysis
858
+ // =============================================================================
859
+ function createConstraintAnalysisTool(adapter) {
860
+ return {
861
+ name: "pg_constraint_analysis",
862
+ description: "Analyze all constraints for issues: redundant indexes, missing foreign keys, missing NOT NULL, missing primary keys, and unindexed foreign keys.",
863
+ group: "introspection",
864
+ inputSchema: ConstraintAnalysisSchemaBase,
865
+ outputSchema: ConstraintAnalysisOutputSchema,
866
+ annotations: readOnly("Constraint Analysis"),
867
+ icons: getToolIcons("introspection", readOnly("Constraint Analysis")),
868
+ handler: async (params, _context) => {
869
+ const parsed = ConstraintAnalysisSchema.parse(params);
870
+ const runAll = !parsed.checks || parsed.checks.length === 0;
871
+ const checks = new Set(parsed.checks ?? []);
872
+ const findings = [];
873
+ const schemaParams = [];
874
+ let schemaWhere = "";
875
+ let tableWhere = "";
876
+ if (parsed.schema) {
877
+ schemaParams.push(parsed.schema);
878
+ schemaWhere = `AND n.nspname = $${String(schemaParams.length)}`;
879
+ }
880
+ if (parsed.table) {
881
+ schemaParams.push(parsed.table);
882
+ tableWhere = `AND c.relname = $${String(schemaParams.length)}`;
883
+ }
884
+ const extensionSchemaExclude = !parsed.schema &&
885
+ !parsed.table &&
886
+ parsed.excludeExtensionSchemas !== false
887
+ ? "AND n.nspname NOT IN ('cron', 'topology', 'tiger', 'tiger_data')"
888
+ : "";
889
+ // Check: Tables without primary keys
890
+ if (runAll || checks.has("missing_pk")) {
891
+ const result = await adapter.executeQuery(`SELECT n.nspname AS schema, c.relname AS table_name
892
+ FROM pg_class c
893
+ JOIN pg_namespace n ON n.oid = c.relnamespace
894
+ WHERE c.relkind IN ('r', 'p')
895
+ AND n.nspname NOT IN ('pg_catalog', 'information_schema')
896
+ AND n.nspname !~ '^pg_toast'
897
+ AND NOT EXISTS (
898
+ SELECT 1 FROM pg_constraint pk
899
+ WHERE pk.conrelid = c.oid AND pk.contype = 'p'
900
+ )
901
+ ${extensionSchemaExclude} ${schemaWhere} ${tableWhere}
902
+ ORDER BY n.nspname, c.relname`, schemaParams.length > 0 ? schemaParams : undefined);
903
+ for (const row of result.rows ?? []) {
904
+ findings.push({
905
+ type: "missing_pk",
906
+ severity: "error",
907
+ table: qualifiedName(row["schema"], row["table_name"]),
908
+ description: "Table has no primary key",
909
+ suggestion: "Add a primary key column (e.g., id SERIAL PRIMARY KEY) for data integrity and efficient lookups",
910
+ });
911
+ }
912
+ }
913
+ // Check: Unindexed foreign keys
914
+ if (runAll || checks.has("unindexed_fk")) {
915
+ const result = await adapter.executeQuery(`SELECT
916
+ n.nspname AS schema, t.relname AS table_name,
917
+ c.conname AS constraint_name,
918
+ array_agg(a.attname ORDER BY x.ordinality) AS columns
919
+ FROM pg_constraint c
920
+ JOIN pg_class t ON t.oid = c.conrelid
921
+ JOIN pg_namespace n ON n.oid = t.relnamespace
922
+ CROSS JOIN LATERAL unnest(c.conkey) WITH ORDINALITY AS x(attnum, ordinality)
923
+ JOIN pg_attribute a ON a.attrelid = t.oid AND a.attnum = x.attnum
924
+ WHERE c.contype = 'f'
925
+ AND n.nspname NOT IN ('pg_catalog', 'information_schema')
926
+ ${extensionSchemaExclude}
927
+ AND NOT EXISTS (
928
+ SELECT 1 FROM pg_index ix
929
+ WHERE ix.indrelid = t.oid
930
+ AND c.conkey <@ ix.indkey::smallint[]
931
+ )
932
+ ${schemaWhere} ${tableWhere.replace("c.relname", "t.relname")}
933
+ GROUP BY n.nspname, t.relname, c.conname
934
+ ORDER BY n.nspname, t.relname`, schemaParams.length > 0 ? schemaParams : undefined);
935
+ for (const row of result.rows ?? []) {
936
+ const cols = parseArrayColumn(row["columns"]);
937
+ findings.push({
938
+ type: "unindexed_fk",
939
+ severity: "warning",
940
+ table: qualifiedName(row["schema"], row["table_name"]),
941
+ description: `Foreign key '${row["constraint_name"]}' on column(s) [${cols.join(", ")}] has no supporting index`,
942
+ suggestion: `CREATE INDEX ON ${qualifiedName(row["schema"], row["table_name"])} (${cols.join(", ")})`,
943
+ });
944
+ }
945
+ }
946
+ // Check: Tables with columns that likely should have NOT NULL
947
+ if (runAll || checks.has("missing_not_null")) {
948
+ const result = await adapter.executeQuery(`SELECT
949
+ n.nspname AS schema, c.relname AS table_name,
950
+ a.attname AS column_name, pg_catalog.format_type(a.atttypid, a.atttypmod) AS type
951
+ FROM pg_attribute a
952
+ JOIN pg_class c ON c.oid = a.attrelid
953
+ JOIN pg_namespace n ON n.oid = c.relnamespace
954
+ WHERE c.relkind IN ('r', 'p')
955
+ AND a.attnum > 0 AND NOT a.attisdropped AND a.attnotnull = false
956
+ AND n.nspname NOT IN ('pg_catalog', 'information_schema')
957
+ AND n.nspname !~ '^pg_toast'
958
+ ${extensionSchemaExclude}
959
+ AND a.attname IN ('id', 'uuid', 'email', 'name', 'created_at', 'updated_at', 'status', 'type')
960
+ AND NOT EXISTS (SELECT 1 FROM pg_constraint pk WHERE pk.conrelid = c.oid AND a.attnum = ANY(pk.conkey) AND pk.contype = 'p')
961
+ ${schemaWhere} ${tableWhere}
962
+ ORDER BY n.nspname, c.relname, a.attname`, schemaParams.length > 0 ? schemaParams : undefined);
963
+ for (const row of result.rows ?? []) {
964
+ findings.push({
965
+ type: "missing_not_null",
966
+ severity: "info",
967
+ table: qualifiedName(row["schema"], row["table_name"]),
968
+ description: `Column '${row["column_name"]}' (${row["type"]}) is nullable but commonly expected to be NOT NULL`,
969
+ suggestion: `ALTER TABLE ${qualifiedName(row["schema"], row["table_name"])} ALTER COLUMN "${row["column_name"]}" SET NOT NULL`,
970
+ });
971
+ }
972
+ }
973
+ // Build summary
974
+ const byType = {};
975
+ const bySeverity = {};
976
+ for (const f of findings) {
977
+ byType[f.type] = (byType[f.type] ?? 0) + 1;
978
+ bySeverity[f.severity] = (bySeverity[f.severity] ?? 0) + 1;
979
+ }
980
+ // Add hint for nonexistent table
981
+ const hint = parsed.table !== undefined && findings.length === 0
982
+ ? `No findings for table '${parsed.schema ? parsed.schema + "." : "public."}${parsed.table}'. Verify the table exists with pg_list_tables.`
983
+ : undefined;
984
+ return {
985
+ findings,
986
+ summary: {
987
+ totalFindings: findings.length,
988
+ byType,
989
+ bySeverity,
990
+ },
991
+ ...(hint !== undefined && { hint }),
992
+ };
993
+ },
994
+ };
995
+ }
996
+ // =============================================================================
997
+ // pg_migration_risks
998
+ // =============================================================================
999
+ /** DDL patterns and their associated risks */
1000
+ const DDL_RISK_PATTERNS = [
1001
+ {
1002
+ pattern: /\bDROP\s+TABLE\b/i,
1003
+ category: "data_loss",
1004
+ riskLevel: "critical",
1005
+ description: "DROP TABLE permanently deletes the table and all its data",
1006
+ mitigation: "Back up the table first (pg_dump_table), verify no active references",
1007
+ requiresDowntime: false,
1008
+ lockImpact: "ACCESS EXCLUSIVE on the table",
1009
+ },
1010
+ {
1011
+ pattern: /\bTRUNCATE\b/i,
1012
+ category: "data_loss",
1013
+ riskLevel: "critical",
1014
+ description: "TRUNCATE removes all rows from the table",
1015
+ mitigation: "Verify you intend to delete all data, check CASCADE effects",
1016
+ requiresDowntime: false,
1017
+ lockImpact: "ACCESS EXCLUSIVE on the table",
1018
+ },
1019
+ {
1020
+ pattern: /\bDROP\s+COLUMN\b/i,
1021
+ category: "data_loss",
1022
+ riskLevel: "high",
1023
+ description: "DROP COLUMN permanently removes the column and its data",
1024
+ mitigation: "Back up the column data first, verify no application dependencies",
1025
+ requiresDowntime: false,
1026
+ lockImpact: "ACCESS EXCLUSIVE on the table",
1027
+ },
1028
+ {
1029
+ pattern: /\bALTER\s+(?:TABLE|COLUMN)\b.*\bSET\s+NOT\s+NULL\b/i,
1030
+ category: "constraint",
1031
+ riskLevel: "high",
1032
+ description: "Adding NOT NULL requires a full table scan to verify no NULL values exist",
1033
+ mitigation: "First check for NULLs: SELECT COUNT(*) FROM table WHERE column IS NULL",
1034
+ requiresDowntime: false,
1035
+ lockImpact: "ACCESS EXCLUSIVE during verification scan",
1036
+ },
1037
+ {
1038
+ pattern: /\bALTER\s+TABLE\b.*\bADD\s+(?:CONSTRAINT\b.*\b)?FOREIGN\s+KEY\b/i,
1039
+ category: "constraint",
1040
+ riskLevel: "medium",
1041
+ description: "Adding a foreign key requires validating all existing rows",
1042
+ mitigation: "Use NOT VALID to skip validation, then VALIDATE CONSTRAINT separately",
1043
+ requiresDowntime: false,
1044
+ lockImpact: "SHARE ROW EXCLUSIVE on both tables",
1045
+ },
1046
+ {
1047
+ pattern: /\bALTER\s+TABLE\b.*\bADD\s+COLUMN\b/i,
1048
+ category: "schema_change",
1049
+ riskLevel: "low",
1050
+ description: "Adding a nullable column without a default is a metadata-only change",
1051
+ requiresDowntime: false,
1052
+ lockImpact: "ACCESS EXCLUSIVE (very brief)",
1053
+ },
1054
+ {
1055
+ pattern: /\bALTER\s+TABLE\b.*\bADD\s+COLUMN\b.*\bDEFAULT\b/i,
1056
+ category: "schema_change",
1057
+ riskLevel: "medium",
1058
+ description: "Adding a column with a volatile DEFAULT may require rewriting all rows (PG < 11) or is metadata-only (PG >= 11)",
1059
+ mitigation: "On PG >= 11, this is usually fast. On older versions, consider adding without default then updating",
1060
+ requiresDowntime: false,
1061
+ lockImpact: "ACCESS EXCLUSIVE (metadata-only on PG >= 11)",
1062
+ },
1063
+ {
1064
+ pattern: /\bALTER\s+TABLE\b.*\bALTER\s+COLUMN\b.*\bTYPE\b/i,
1065
+ category: "schema_change",
1066
+ riskLevel: "high",
1067
+ description: "Changing column type requires rewriting the entire table",
1068
+ mitigation: "Consider creating a new column, migrating data, then dropping the old one",
1069
+ requiresDowntime: true,
1070
+ lockImpact: "ACCESS EXCLUSIVE for the entire rewrite",
1071
+ },
1072
+ {
1073
+ pattern: /\bCREATE\s+INDEX\b(?!\s+CONCURRENTLY)/i,
1074
+ category: "locking",
1075
+ riskLevel: "high",
1076
+ description: "CREATE INDEX (non-concurrent) blocks writes to the table for the entire build duration",
1077
+ mitigation: "Use CREATE INDEX CONCURRENTLY to avoid blocking writes",
1078
+ requiresDowntime: false,
1079
+ lockImpact: "SHARE lock on the table (blocks INSERT/UPDATE/DELETE)",
1080
+ },
1081
+ {
1082
+ pattern: /\bCREATE\s+INDEX\s+CONCURRENTLY\b/i,
1083
+ category: "locking",
1084
+ riskLevel: "low",
1085
+ description: "CREATE INDEX CONCURRENTLY allows concurrent writes but takes longer",
1086
+ requiresDowntime: false,
1087
+ lockImpact: "No blocking locks (uses ShareUpdateExclusiveLock)",
1088
+ },
1089
+ {
1090
+ pattern: /\bDROP\s+INDEX\b(?!\s+CONCURRENTLY)/i,
1091
+ category: "locking",
1092
+ riskLevel: "medium",
1093
+ description: "DROP INDEX blocks writes briefly. May degrade query performance",
1094
+ mitigation: "Use DROP INDEX CONCURRENTLY in production, verify no critical queries depend on it",
1095
+ requiresDowntime: false,
1096
+ lockImpact: "ACCESS EXCLUSIVE (brief)",
1097
+ },
1098
+ {
1099
+ pattern: /\bRENAME\s+(?:TABLE|COLUMN|TO)\b/i,
1100
+ category: "breaking_change",
1101
+ riskLevel: "high",
1102
+ description: "Renaming a table or column will break any application queries referencing the old name",
1103
+ mitigation: "Create a view with the old name pointing to the new name for backward compatibility",
1104
+ requiresDowntime: false,
1105
+ lockImpact: "ACCESS EXCLUSIVE (brief)",
1106
+ },
1107
+ {
1108
+ pattern: /\bDROP\s+SCHEMA\b.*\bCASCADE\b/i,
1109
+ category: "data_loss",
1110
+ riskLevel: "critical",
1111
+ description: "DROP SCHEMA CASCADE deletes the schema and ALL objects within it",
1112
+ mitigation: "List all objects in the schema first, verify intent, and back up critical data",
1113
+ requiresDowntime: false,
1114
+ lockImpact: "ACCESS EXCLUSIVE on all objects in the schema",
1115
+ },
1116
+ ];
1117
+ function createMigrationRisksTool(adapter) {
1118
+ return {
1119
+ name: "pg_migration_risks",
1120
+ description: "Analyze proposed DDL statements for risks: data loss, lock contention, constraint violations, and breaking changes. Pre-flight check before executing migrations.",
1121
+ group: "introspection",
1122
+ inputSchema: MigrationRisksSchemaBase,
1123
+ outputSchema: MigrationRisksOutputSchema,
1124
+ annotations: readOnly("Migration Risks"),
1125
+ icons: getToolIcons("introspection", readOnly("Migration Risks")),
1126
+ handler: (params, _context) => Promise.resolve().then(() => {
1127
+ // adapter is available for future enhancements (e.g., checking table existence)
1128
+ void adapter;
1129
+ const parsed = MigrationRisksSchema.parse(params);
1130
+ const risks = [];
1131
+ let requiresDowntime = false;
1132
+ let highestRiskLevel = "low";
1133
+ const lockImpacts = new Set();
1134
+ const riskOrder = { low: 0, medium: 1, high: 2, critical: 3 };
1135
+ for (let i = 0; i < parsed.statements.length; i++) {
1136
+ const stmt = parsed.statements[i] ?? "";
1137
+ for (const pattern of DDL_RISK_PATTERNS) {
1138
+ if (pattern.pattern.test(stmt)) {
1139
+ risks.push({
1140
+ statement: stmt.length > 200 ? stmt.slice(0, 200) + "..." : stmt,
1141
+ statementIndex: i,
1142
+ riskLevel: pattern.riskLevel,
1143
+ category: pattern.category,
1144
+ description: pattern.description,
1145
+ mitigation: pattern.mitigation,
1146
+ });
1147
+ if (pattern.requiresDowntime) {
1148
+ requiresDowntime = true;
1149
+ }
1150
+ if (riskOrder[pattern.riskLevel] > riskOrder[highestRiskLevel]) {
1151
+ highestRiskLevel = pattern.riskLevel;
1152
+ }
1153
+ lockImpacts.add(pattern.lockImpact);
1154
+ }
1155
+ }
1156
+ }
1157
+ return {
1158
+ risks,
1159
+ summary: {
1160
+ totalStatements: parsed.statements.length,
1161
+ totalRisks: risks.length,
1162
+ highestRisk: highestRiskLevel,
1163
+ requiresDowntime,
1164
+ estimatedLockImpact: lockImpacts.size > 0 ? [...lockImpacts].join("; ") : "None",
1165
+ },
1166
+ };
1167
+ }),
1168
+ };
1169
+ }
1170
+ // =============================================================================
1171
+ // Migration tracking — shared helpers
1172
+ // =============================================================================
1173
+ const TRACKING_TABLE = "_mcp_schema_versions";
1174
+ const CREATE_TRACKING_TABLE_SQL = `
1175
+ CREATE TABLE IF NOT EXISTS ${TRACKING_TABLE} (
1176
+ id SERIAL PRIMARY KEY,
1177
+ version VARCHAR(50) NOT NULL,
1178
+ description TEXT,
1179
+ applied_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
1180
+ applied_by VARCHAR(255),
1181
+ migration_hash VARCHAR(64) NOT NULL,
1182
+ migration_sql TEXT NOT NULL,
1183
+ source_system VARCHAR(50),
1184
+ rollback_sql TEXT,
1185
+ status VARCHAR(20) NOT NULL DEFAULT 'applied',
1186
+ CONSTRAINT valid_status CHECK (status IN ('applied', 'rolled_back', 'failed'))
1187
+ )`;
1188
+ /**
1189
+ * Ensure the _mcp_schema_versions table exists.
1190
+ * Returns true if the table was newly created, false if it already existed.
1191
+ */
1192
+ async function ensureTrackingTable(adapter) {
1193
+ const check = await adapter.executeQuery(`SELECT EXISTS (
1194
+ SELECT 1 FROM pg_tables
1195
+ WHERE schemaname = 'public' AND tablename = $1
1196
+ ) AS "table_exists"`, [TRACKING_TABLE]);
1197
+ const firstRow = (check.rows ?? [])[0];
1198
+ const existed = firstRow?.["table_exists"] === true;
1199
+ if (!existed) {
1200
+ await adapter.executeQuery(CREATE_TRACKING_TABLE_SQL);
1201
+ }
1202
+ return !existed;
1203
+ }
1204
+ function hashMigrationSql(sql) {
1205
+ return createHash("sha256").update(sql).digest("hex");
1206
+ }
1207
+ function formatRecord(row) {
1208
+ const appliedAt = row["applied_at"];
1209
+ const appliedAtStr = appliedAt instanceof Date
1210
+ ? appliedAt.toISOString()
1211
+ : (appliedAt ?? "");
1212
+ return {
1213
+ id: row["id"],
1214
+ version: row["version"],
1215
+ description: row["description"] ?? null,
1216
+ appliedAt: appliedAtStr,
1217
+ appliedBy: row["applied_by"] ?? null,
1218
+ migrationHash: row["migration_hash"],
1219
+ sourceSystem: row["source_system"] ?? null,
1220
+ status: row["status"],
1221
+ };
1222
+ }
1223
+ // =============================================================================
1224
+ // pg_migration_init
1225
+ // =============================================================================
1226
+ function createMigrationInitTool(adapter) {
1227
+ const annotations = write("Initialize migration tracking");
1228
+ return {
1229
+ name: "pg_migration_init",
1230
+ description: "Initialize or verify the schema version tracking table (_mcp_schema_versions). " +
1231
+ "Idempotent — safe to call repeatedly. Returns current tracking state.",
1232
+ group: "introspection",
1233
+ inputSchema: MigrationInitSchemaBase,
1234
+ outputSchema: MigrationInitOutputSchema,
1235
+ annotations,
1236
+ icons: getToolIcons("introspection", annotations),
1237
+ handler: async (params, _context) => {
1238
+ const parsed = MigrationInitSchema.parse(params);
1239
+ const targetSchema = parsed.schema ?? "public";
1240
+ // Create table in target schema
1241
+ const createSql = targetSchema === "public"
1242
+ ? CREATE_TRACKING_TABLE_SQL
1243
+ : CREATE_TRACKING_TABLE_SQL.replace(TRACKING_TABLE, `${targetSchema}.${TRACKING_TABLE}`);
1244
+ const check = await adapter.executeQuery(`SELECT EXISTS (
1245
+ SELECT 1 FROM pg_tables
1246
+ WHERE schemaname = $1 AND tablename = $2
1247
+ ) AS "table_exists"`, [targetSchema, TRACKING_TABLE]);
1248
+ const firstRow = (check.rows ?? [])[0];
1249
+ const existed = firstRow?.["table_exists"] === true;
1250
+ if (!existed) {
1251
+ await adapter.executeQuery(createSql);
1252
+ }
1253
+ const qualifiedTable = targetSchema === "public"
1254
+ ? TRACKING_TABLE
1255
+ : `${targetSchema}.${TRACKING_TABLE}`;
1256
+ const countResult = await adapter.executeQuery(`SELECT COUNT(*)::int AS count FROM ${qualifiedTable}`);
1257
+ const countRow = (countResult.rows ?? [])[0];
1258
+ const existingRecords = countRow?.["count"] ?? 0;
1259
+ return {
1260
+ success: true,
1261
+ tableCreated: !existed,
1262
+ tableName: qualifiedTable,
1263
+ existingRecords,
1264
+ };
1265
+ },
1266
+ };
1267
+ }
1268
+ // =============================================================================
1269
+ // pg_migration_record
1270
+ // =============================================================================
1271
+ function createMigrationRecordTool(adapter) {
1272
+ const annotations = write("Record migration");
1273
+ return {
1274
+ name: "pg_migration_record",
1275
+ description: "Record a migration in the schema version tracking table. " +
1276
+ "Auto-provisions the tracking table on first use. " +
1277
+ "Computes SHA-256 hash for idempotency detection.",
1278
+ group: "introspection",
1279
+ inputSchema: MigrationRecordSchemaBase,
1280
+ outputSchema: MigrationRecordOutputSchema,
1281
+ annotations,
1282
+ icons: getToolIcons("introspection", annotations),
1283
+ handler: async (params, _context) => {
1284
+ let parsed;
1285
+ try {
1286
+ parsed = MigrationRecordSchema.parse(params);
1287
+ }
1288
+ catch (error) {
1289
+ if (error !== null &&
1290
+ typeof error === "object" &&
1291
+ "issues" in error &&
1292
+ Array.isArray(error.issues)) {
1293
+ const issues = error.issues;
1294
+ const messages = issues.map((i) => i.message).join("; ");
1295
+ return {
1296
+ success: false,
1297
+ error: `Validation error: ${messages}`,
1298
+ };
1299
+ }
1300
+ throw error;
1301
+ }
1302
+ await ensureTrackingTable(adapter);
1303
+ const migrationHash = hashMigrationSql(parsed.migrationSql);
1304
+ // Check for duplicate hash
1305
+ const dupCheck = await adapter.executeQuery(`SELECT id, version, status FROM ${TRACKING_TABLE}
1306
+ WHERE migration_hash = $1 AND status = 'applied'`, [migrationHash]);
1307
+ const dupRows = dupCheck.rows ?? [];
1308
+ if (dupRows.length > 0) {
1309
+ const dup = dupRows[0] ?? {};
1310
+ const dupId = dup["id"];
1311
+ const dupVersion = dup["version"];
1312
+ return {
1313
+ success: false,
1314
+ error: `Duplicate migration detected: version "${dupVersion}" (id: ${String(dupId)}) has the same SQL hash. ` +
1315
+ `Use a different migration SQL or roll back the existing one first.`,
1316
+ };
1317
+ }
1318
+ const result = await adapter.executeQuery(`INSERT INTO ${TRACKING_TABLE}
1319
+ (version, description, applied_by, migration_hash, migration_sql, source_system, rollback_sql)
1320
+ VALUES ($1, $2, $3, $4, $5, $6, $7)
1321
+ RETURNING *`, [
1322
+ parsed.version,
1323
+ parsed.description ?? null,
1324
+ parsed.appliedBy ?? null,
1325
+ migrationHash,
1326
+ parsed.migrationSql,
1327
+ parsed.sourceSystem ?? null,
1328
+ parsed.rollbackSql ?? null,
1329
+ ]);
1330
+ const resultRows = result.rows ?? [];
1331
+ if (resultRows.length === 0) {
1332
+ return {
1333
+ success: false,
1334
+ error: "Failed to insert migration record.",
1335
+ };
1336
+ }
1337
+ const row = resultRows[0] ?? {};
1338
+ return {
1339
+ success: true,
1340
+ record: formatRecord(row),
1341
+ };
1342
+ },
1343
+ };
1344
+ }
1345
+ // =============================================================================
1346
+ // pg_migration_apply
1347
+ // =============================================================================
1348
+ function createMigrationApplyTool(adapter) {
1349
+ const annotations = destructive("Apply migration");
1350
+ return {
1351
+ name: "pg_migration_apply",
1352
+ description: "Execute migration SQL and record it atomically in a single transaction. " +
1353
+ "Auto-provisions the tracking table on first use. " +
1354
+ "On failure, rolls back and records a 'failed' entry. " +
1355
+ "Use pg_migration_record instead if you only need to log an already-applied migration.",
1356
+ group: "introspection",
1357
+ inputSchema: MigrationApplySchemaBase,
1358
+ outputSchema: MigrationApplyOutputSchema,
1359
+ annotations,
1360
+ icons: getToolIcons("introspection", annotations),
1361
+ handler: async (params, _context) => {
1362
+ let parsed;
1363
+ try {
1364
+ parsed = MigrationApplySchema.parse(params);
1365
+ }
1366
+ catch (error) {
1367
+ if (error !== null &&
1368
+ typeof error === "object" &&
1369
+ "issues" in error &&
1370
+ Array.isArray(error.issues)) {
1371
+ const issues = error.issues;
1372
+ const messages = issues.map((i) => i.message).join("; ");
1373
+ return {
1374
+ success: false,
1375
+ error: `Validation error: ${messages}`,
1376
+ };
1377
+ }
1378
+ throw error;
1379
+ }
1380
+ await ensureTrackingTable(adapter);
1381
+ const migrationHash = hashMigrationSql(parsed.migrationSql);
1382
+ // Check for duplicate hash
1383
+ const dupCheck = await adapter.executeQuery(`SELECT id, version, status FROM ${TRACKING_TABLE}
1384
+ WHERE migration_hash = $1 AND status = 'applied'`, [migrationHash]);
1385
+ const dupRows = dupCheck.rows ?? [];
1386
+ if (dupRows.length > 0) {
1387
+ const dup = dupRows[0] ?? {};
1388
+ const dupId = dup["id"];
1389
+ const dupVersion = dup["version"];
1390
+ return {
1391
+ success: false,
1392
+ error: `Duplicate migration detected: version "${dupVersion}" (id: ${String(dupId)}) has the same SQL hash. ` +
1393
+ `Use a different migration SQL or roll back the existing one first.`,
1394
+ };
1395
+ }
1396
+ // Execute migration SQL and record atomically
1397
+ try {
1398
+ await adapter.executeQuery("BEGIN");
1399
+ // Execute the migration SQL
1400
+ await adapter.executeQuery(parsed.migrationSql);
1401
+ // Record in tracking table
1402
+ const result = await adapter.executeQuery(`INSERT INTO ${TRACKING_TABLE}
1403
+ (version, description, applied_by, migration_hash, migration_sql, source_system, rollback_sql)
1404
+ VALUES ($1, $2, $3, $4, $5, $6, $7)
1405
+ RETURNING *`, [
1406
+ parsed.version,
1407
+ parsed.description ?? null,
1408
+ parsed.appliedBy ?? null,
1409
+ migrationHash,
1410
+ parsed.migrationSql,
1411
+ parsed.sourceSystem ?? null,
1412
+ parsed.rollbackSql ?? null,
1413
+ ]);
1414
+ await adapter.executeQuery("COMMIT");
1415
+ const resultRows = result.rows ?? [];
1416
+ if (resultRows.length === 0) {
1417
+ return {
1418
+ success: false,
1419
+ error: "Migration was applied but failed to insert tracking record.",
1420
+ };
1421
+ }
1422
+ const row = resultRows[0] ?? {};
1423
+ return {
1424
+ success: true,
1425
+ record: formatRecord(row),
1426
+ };
1427
+ }
1428
+ catch (err) {
1429
+ // Roll back the entire transaction (migration SQL + INSERT)
1430
+ await adapter.executeQuery("ROLLBACK");
1431
+ const message = err instanceof Error ? err.message : "Unknown error";
1432
+ // Record a 'failed' entry outside the rolled-back transaction
1433
+ try {
1434
+ await adapter.executeQuery(`INSERT INTO ${TRACKING_TABLE}
1435
+ (version, description, applied_by, migration_hash, migration_sql, source_system, rollback_sql, status)
1436
+ VALUES ($1, $2, $3, $4, $5, $6, $7, 'failed')`, [
1437
+ parsed.version,
1438
+ parsed.description ?? null,
1439
+ parsed.appliedBy ?? null,
1440
+ migrationHash,
1441
+ parsed.migrationSql,
1442
+ parsed.sourceSystem ?? null,
1443
+ parsed.rollbackSql ?? null,
1444
+ ]);
1445
+ }
1446
+ catch {
1447
+ // Best-effort: if we can't record the failure, still return the error
1448
+ }
1449
+ return {
1450
+ success: false,
1451
+ error: `Migration "${parsed.version}" failed: ${message}. Transaction was rolled back.`,
1452
+ };
1453
+ }
1454
+ },
1455
+ };
1456
+ }
1457
+ // =============================================================================
1458
+ // pg_migration_rollback
1459
+ // =============================================================================
1460
+ function createMigrationRollbackTool(adapter) {
1461
+ const annotations = destructive("Roll back migration");
1462
+ return {
1463
+ name: "pg_migration_rollback",
1464
+ description: "Roll back a specific migration by ID or version. " +
1465
+ "Executes the stored rollback_sql in a transaction and updates status to 'rolled_back'. " +
1466
+ "Use dryRun: true to preview the rollback SQL without executing.",
1467
+ group: "introspection",
1468
+ inputSchema: MigrationRollbackSchemaBase,
1469
+ outputSchema: MigrationRollbackOutputSchema,
1470
+ annotations,
1471
+ icons: getToolIcons("introspection", annotations),
1472
+ handler: async (params, _context) => {
1473
+ const parsed = MigrationRollbackSchema.parse(params);
1474
+ await ensureTrackingTable(adapter);
1475
+ if (parsed.id === undefined && parsed.version === undefined) {
1476
+ return {
1477
+ success: false,
1478
+ error: "Either 'id' or 'version' is required to identify the migration to roll back.",
1479
+ };
1480
+ }
1481
+ // Find the migration
1482
+ const whereClause = parsed.id !== undefined ? "id = $1" : "version = $1";
1483
+ const whereValue = parsed.id ?? parsed.version;
1484
+ const findResult = await adapter.executeQuery(`SELECT * FROM ${TRACKING_TABLE} WHERE ${whereClause} ORDER BY id DESC LIMIT 1`, [whereValue]);
1485
+ const findRows = findResult.rows ?? [];
1486
+ if (findRows.length === 0) {
1487
+ const identifier = parsed.id !== undefined
1488
+ ? `id ${String(parsed.id)}`
1489
+ : `version "${parsed.version ?? ""}"`;
1490
+ return {
1491
+ success: false,
1492
+ error: `Migration not found: ${identifier}`,
1493
+ };
1494
+ }
1495
+ const row = findRows[0] ?? {};
1496
+ const rowId = row["id"];
1497
+ const rowVersion = row["version"];
1498
+ const rowStatus = row["status"];
1499
+ const rollbackSql = row["rollback_sql"] ?? null;
1500
+ if (rowStatus === "rolled_back") {
1501
+ return {
1502
+ success: false,
1503
+ error: `Migration "${rowVersion}" (id: ${String(rowId)}) has already been rolled back.`,
1504
+ };
1505
+ }
1506
+ if (rollbackSql === null) {
1507
+ return {
1508
+ success: false,
1509
+ error: `Migration "${rowVersion}" (id: ${String(rowId)}) has no rollback SQL stored. Manual rollback required.`,
1510
+ };
1511
+ }
1512
+ if (parsed.dryRun === true) {
1513
+ return {
1514
+ success: true,
1515
+ dryRun: true,
1516
+ rollbackSql,
1517
+ record: formatRecord(row),
1518
+ };
1519
+ }
1520
+ // Execute rollback in a transaction
1521
+ try {
1522
+ await adapter.executeQuery("BEGIN");
1523
+ await adapter.executeQuery(rollbackSql);
1524
+ await adapter.executeQuery(`UPDATE ${TRACKING_TABLE} SET status = 'rolled_back' WHERE id = $1`, [rowId]);
1525
+ await adapter.executeQuery("COMMIT");
1526
+ return {
1527
+ success: true,
1528
+ dryRun: false,
1529
+ rollbackSql,
1530
+ record: {
1531
+ ...formatRecord(row),
1532
+ status: "rolled_back",
1533
+ },
1534
+ };
1535
+ }
1536
+ catch (err) {
1537
+ await adapter.executeQuery("ROLLBACK");
1538
+ const message = err instanceof Error ? err.message : "Unknown error";
1539
+ return {
1540
+ success: false,
1541
+ error: `Rollback failed for migration "${rowVersion}" (id: ${String(rowId)}): ${message}. Transaction was rolled back.`,
1542
+ };
1543
+ }
1544
+ },
1545
+ };
1546
+ }
1547
+ // =============================================================================
1548
+ // pg_migration_history
1549
+ // =============================================================================
1550
+ function createMigrationHistoryTool(adapter) {
1551
+ const annotations = readOnly("Migration history");
1552
+ return {
1553
+ name: "pg_migration_history",
1554
+ description: "Query migration history with optional filtering by status and source system. " +
1555
+ "Returns paginated results ordered by applied_at descending.",
1556
+ group: "introspection",
1557
+ inputSchema: MigrationHistorySchemaBase,
1558
+ outputSchema: MigrationHistoryOutputSchema,
1559
+ annotations,
1560
+ icons: getToolIcons("introspection", annotations),
1561
+ handler: async (params, _context) => {
1562
+ const parsed = MigrationHistorySchema.parse(params);
1563
+ await ensureTrackingTable(adapter);
1564
+ const limit = parsed.limit ?? 50;
1565
+ const offset = parsed.offset ?? 0;
1566
+ // Build dynamic WHERE clause
1567
+ const conditions = [];
1568
+ const values = [];
1569
+ let paramIdx = 1;
1570
+ if (parsed.status != null) {
1571
+ conditions.push(`status = $${String(paramIdx)}`);
1572
+ paramIdx++;
1573
+ values.push(parsed.status);
1574
+ }
1575
+ if (parsed.sourceSystem != null) {
1576
+ conditions.push(`source_system = $${String(paramIdx)}`);
1577
+ paramIdx++;
1578
+ values.push(parsed.sourceSystem);
1579
+ }
1580
+ const whereClause = conditions.length > 0 ? `WHERE ${conditions.join(" AND ")}` : "";
1581
+ // Get total count
1582
+ const countResult = await adapter.executeQuery(`SELECT COUNT(*)::int AS count FROM ${TRACKING_TABLE} ${whereClause}`, values.length > 0 ? values : undefined);
1583
+ const countRow = (countResult.rows ?? [])[0];
1584
+ const total = countRow?.["count"] ?? 0;
1585
+ // Get page of results (exclude migration_sql for payload efficiency)
1586
+ const limitIdx = String(paramIdx);
1587
+ paramIdx++;
1588
+ const offsetIdx = String(paramIdx);
1589
+ const dataResult = await adapter.executeQuery(`SELECT id, version, description, applied_at, applied_by,
1590
+ migration_hash, source_system, rollback_sql IS NOT NULL AS has_rollback, status
1591
+ FROM ${TRACKING_TABLE}
1592
+ ${whereClause}
1593
+ ORDER BY applied_at DESC
1594
+ LIMIT $${limitIdx} OFFSET $${offsetIdx}`, [...values, limit, offset]);
1595
+ const records = (dataResult.rows ?? []).map(formatRecord);
1596
+ return {
1597
+ records,
1598
+ total,
1599
+ limit,
1600
+ offset,
1601
+ };
1602
+ },
1603
+ };
1604
+ }
1605
+ // =============================================================================
1606
+ // pg_migration_status
1607
+ // =============================================================================
1608
+ function createMigrationStatusTool(adapter) {
1609
+ const annotations = readOnly("Migration status");
1610
+ return {
1611
+ name: "pg_migration_status",
1612
+ description: "Get current migration tracking status: latest version, counts by status, " +
1613
+ "and list of source systems. Returns initialized: false if tracking table doesn't exist.",
1614
+ group: "introspection",
1615
+ inputSchema: MigrationStatusSchemaBase,
1616
+ outputSchema: MigrationStatusOutputSchema,
1617
+ annotations,
1618
+ icons: getToolIcons("introspection", annotations),
1619
+ handler: async (params, _context) => {
1620
+ const parsed = MigrationStatusSchema.parse(params);
1621
+ const targetSchema = parsed.schema ?? "public";
1622
+ // Check if tracking table exists
1623
+ const check = await adapter.executeQuery(`SELECT EXISTS (
1624
+ SELECT 1 FROM pg_tables
1625
+ WHERE schemaname = $1 AND tablename = $2
1626
+ ) AS "table_exists"`, [targetSchema, TRACKING_TABLE]);
1627
+ const firstRow = (check.rows ?? [])[0];
1628
+ const tableExists = firstRow?.["table_exists"] === true;
1629
+ if (!tableExists) {
1630
+ return {
1631
+ initialized: false,
1632
+ latestVersion: null,
1633
+ latestAppliedAt: null,
1634
+ counts: { total: 0, applied: 0, rolledBack: 0, failed: 0 },
1635
+ sourceSystems: [],
1636
+ };
1637
+ }
1638
+ const qualifiedTable = targetSchema === "public"
1639
+ ? TRACKING_TABLE
1640
+ : `${targetSchema}.${TRACKING_TABLE}`;
1641
+ // Get aggregate status
1642
+ const statsResult = await adapter.executeQuery(`SELECT
1643
+ COUNT(*)::int AS total,
1644
+ COUNT(*) FILTER (WHERE status = 'applied')::int AS applied,
1645
+ COUNT(*) FILTER (WHERE status = 'rolled_back')::int AS rolled_back,
1646
+ COUNT(*) FILTER (WHERE status = 'failed')::int AS failed
1647
+ FROM ${qualifiedTable}`);
1648
+ const statsRow = (statsResult.rows ?? [])[0] ?? {};
1649
+ // Get latest applied migration
1650
+ const latestResult = await adapter.executeQuery(`SELECT version, applied_at FROM ${qualifiedTable}
1651
+ WHERE status = 'applied'
1652
+ ORDER BY applied_at DESC LIMIT 1`);
1653
+ const latestRow = (latestResult.rows ?? [])[0];
1654
+ // Get distinct source systems
1655
+ const systemsResult = await adapter.executeQuery(`SELECT DISTINCT source_system FROM ${qualifiedTable}
1656
+ WHERE source_system IS NOT NULL
1657
+ ORDER BY source_system`);
1658
+ const sourceSystems = (systemsResult.rows ?? []).map((r) => r["source_system"]);
1659
+ let latestAppliedAt = null;
1660
+ if (latestRow != null) {
1661
+ const appliedAt = latestRow["applied_at"];
1662
+ latestAppliedAt =
1663
+ appliedAt instanceof Date
1664
+ ? appliedAt.toISOString()
1665
+ : (appliedAt ?? "");
1666
+ }
1667
+ return {
1668
+ initialized: true,
1669
+ latestVersion: latestRow != null ? latestRow["version"] : null,
1670
+ latestAppliedAt,
1671
+ counts: {
1672
+ total: statsRow["total"],
1673
+ applied: statsRow["applied"],
1674
+ rolledBack: statsRow["rolled_back"],
1675
+ failed: statsRow["failed"],
1676
+ },
1677
+ sourceSystems,
1678
+ };
1679
+ },
1680
+ };
1681
+ }
1682
+ //# sourceMappingURL=introspection.js.map