@neverinfamous/postgres-mcp 1.1.0 → 1.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (243) hide show
  1. package/README.md +95 -81
  2. package/dist/__tests__/mocks/adapter.d.ts.map +1 -1
  3. package/dist/__tests__/mocks/adapter.js +0 -1
  4. package/dist/__tests__/mocks/adapter.js.map +1 -1
  5. package/dist/__tests__/mocks/pool.d.ts.map +1 -1
  6. package/dist/__tests__/mocks/pool.js +0 -1
  7. package/dist/__tests__/mocks/pool.js.map +1 -1
  8. package/dist/adapters/DatabaseAdapter.js +1 -1
  9. package/dist/adapters/DatabaseAdapter.js.map +1 -1
  10. package/dist/adapters/postgresql/PostgresAdapter.d.ts.map +1 -1
  11. package/dist/adapters/postgresql/PostgresAdapter.js +78 -8
  12. package/dist/adapters/postgresql/PostgresAdapter.js.map +1 -1
  13. package/dist/adapters/postgresql/prompts/backup.d.ts.map +1 -1
  14. package/dist/adapters/postgresql/prompts/backup.js +2 -3
  15. package/dist/adapters/postgresql/prompts/backup.js.map +1 -1
  16. package/dist/adapters/postgresql/prompts/citext.d.ts.map +1 -1
  17. package/dist/adapters/postgresql/prompts/citext.js +3 -4
  18. package/dist/adapters/postgresql/prompts/citext.js.map +1 -1
  19. package/dist/adapters/postgresql/prompts/extensionSetup.d.ts.map +1 -1
  20. package/dist/adapters/postgresql/prompts/extensionSetup.js +2 -3
  21. package/dist/adapters/postgresql/prompts/extensionSetup.js.map +1 -1
  22. package/dist/adapters/postgresql/prompts/health.d.ts.map +1 -1
  23. package/dist/adapters/postgresql/prompts/health.js +2 -3
  24. package/dist/adapters/postgresql/prompts/health.js.map +1 -1
  25. package/dist/adapters/postgresql/prompts/index.js +20 -27
  26. package/dist/adapters/postgresql/prompts/index.js.map +1 -1
  27. package/dist/adapters/postgresql/prompts/indexTuning.d.ts.map +1 -1
  28. package/dist/adapters/postgresql/prompts/indexTuning.js +2 -3
  29. package/dist/adapters/postgresql/prompts/indexTuning.js.map +1 -1
  30. package/dist/adapters/postgresql/prompts/kcache.d.ts.map +1 -1
  31. package/dist/adapters/postgresql/prompts/kcache.js +3 -4
  32. package/dist/adapters/postgresql/prompts/kcache.js.map +1 -1
  33. package/dist/adapters/postgresql/prompts/ltree.d.ts.map +1 -1
  34. package/dist/adapters/postgresql/prompts/ltree.js +5 -6
  35. package/dist/adapters/postgresql/prompts/ltree.js.map +1 -1
  36. package/dist/adapters/postgresql/prompts/partman.d.ts.map +1 -1
  37. package/dist/adapters/postgresql/prompts/partman.js +2 -3
  38. package/dist/adapters/postgresql/prompts/partman.js.map +1 -1
  39. package/dist/adapters/postgresql/prompts/pgcron.d.ts.map +1 -1
  40. package/dist/adapters/postgresql/prompts/pgcron.js +2 -3
  41. package/dist/adapters/postgresql/prompts/pgcron.js.map +1 -1
  42. package/dist/adapters/postgresql/prompts/pgcrypto.d.ts.map +1 -1
  43. package/dist/adapters/postgresql/prompts/pgcrypto.js +3 -4
  44. package/dist/adapters/postgresql/prompts/pgcrypto.js.map +1 -1
  45. package/dist/adapters/postgresql/prompts/pgvector.d.ts.map +1 -1
  46. package/dist/adapters/postgresql/prompts/pgvector.js +3 -4
  47. package/dist/adapters/postgresql/prompts/pgvector.js.map +1 -1
  48. package/dist/adapters/postgresql/prompts/postgis.d.ts.map +1 -1
  49. package/dist/adapters/postgresql/prompts/postgis.js +2 -3
  50. package/dist/adapters/postgresql/prompts/postgis.js.map +1 -1
  51. package/dist/adapters/postgresql/schemas/admin.d.ts +10 -5
  52. package/dist/adapters/postgresql/schemas/admin.d.ts.map +1 -1
  53. package/dist/adapters/postgresql/schemas/admin.js +10 -5
  54. package/dist/adapters/postgresql/schemas/admin.js.map +1 -1
  55. package/dist/adapters/postgresql/schemas/backup.d.ts +8 -4
  56. package/dist/adapters/postgresql/schemas/backup.d.ts.map +1 -1
  57. package/dist/adapters/postgresql/schemas/backup.js +11 -4
  58. package/dist/adapters/postgresql/schemas/backup.js.map +1 -1
  59. package/dist/adapters/postgresql/schemas/core.d.ts +54 -19
  60. package/dist/adapters/postgresql/schemas/core.d.ts.map +1 -1
  61. package/dist/adapters/postgresql/schemas/core.js +65 -17
  62. package/dist/adapters/postgresql/schemas/core.js.map +1 -1
  63. package/dist/adapters/postgresql/schemas/cron.d.ts +51 -32
  64. package/dist/adapters/postgresql/schemas/cron.d.ts.map +1 -1
  65. package/dist/adapters/postgresql/schemas/cron.js +64 -44
  66. package/dist/adapters/postgresql/schemas/cron.js.map +1 -1
  67. package/dist/adapters/postgresql/schemas/extensions.d.ts +168 -73
  68. package/dist/adapters/postgresql/schemas/extensions.d.ts.map +1 -1
  69. package/dist/adapters/postgresql/schemas/extensions.js +179 -62
  70. package/dist/adapters/postgresql/schemas/extensions.js.map +1 -1
  71. package/dist/adapters/postgresql/schemas/index.d.ts +5 -5
  72. package/dist/adapters/postgresql/schemas/index.d.ts.map +1 -1
  73. package/dist/adapters/postgresql/schemas/index.js +9 -7
  74. package/dist/adapters/postgresql/schemas/index.js.map +1 -1
  75. package/dist/adapters/postgresql/schemas/jsonb.d.ts +94 -42
  76. package/dist/adapters/postgresql/schemas/jsonb.d.ts.map +1 -1
  77. package/dist/adapters/postgresql/schemas/jsonb.js +101 -30
  78. package/dist/adapters/postgresql/schemas/jsonb.js.map +1 -1
  79. package/dist/adapters/postgresql/schemas/monitoring.d.ts +28 -11
  80. package/dist/adapters/postgresql/schemas/monitoring.d.ts.map +1 -1
  81. package/dist/adapters/postgresql/schemas/monitoring.js +49 -24
  82. package/dist/adapters/postgresql/schemas/monitoring.js.map +1 -1
  83. package/dist/adapters/postgresql/schemas/partitioning.d.ts +15 -11
  84. package/dist/adapters/postgresql/schemas/partitioning.d.ts.map +1 -1
  85. package/dist/adapters/postgresql/schemas/partitioning.js +17 -13
  86. package/dist/adapters/postgresql/schemas/partitioning.js.map +1 -1
  87. package/dist/adapters/postgresql/schemas/performance.d.ts +62 -31
  88. package/dist/adapters/postgresql/schemas/performance.d.ts.map +1 -1
  89. package/dist/adapters/postgresql/schemas/performance.js +86 -24
  90. package/dist/adapters/postgresql/schemas/performance.js.map +1 -1
  91. package/dist/adapters/postgresql/schemas/postgis.d.ts +20 -0
  92. package/dist/adapters/postgresql/schemas/postgis.d.ts.map +1 -1
  93. package/dist/adapters/postgresql/schemas/postgis.js +20 -3
  94. package/dist/adapters/postgresql/schemas/postgis.js.map +1 -1
  95. package/dist/adapters/postgresql/schemas/schema-mgmt.d.ts +35 -23
  96. package/dist/adapters/postgresql/schemas/schema-mgmt.d.ts.map +1 -1
  97. package/dist/adapters/postgresql/schemas/schema-mgmt.js +69 -26
  98. package/dist/adapters/postgresql/schemas/schema-mgmt.js.map +1 -1
  99. package/dist/adapters/postgresql/schemas/stats.d.ts +33 -20
  100. package/dist/adapters/postgresql/schemas/stats.d.ts.map +1 -1
  101. package/dist/adapters/postgresql/schemas/stats.js +36 -20
  102. package/dist/adapters/postgresql/schemas/stats.js.map +1 -1
  103. package/dist/adapters/postgresql/schemas/text-search.d.ts +8 -5
  104. package/dist/adapters/postgresql/schemas/text-search.d.ts.map +1 -1
  105. package/dist/adapters/postgresql/schemas/text-search.js +15 -5
  106. package/dist/adapters/postgresql/schemas/text-search.js.map +1 -1
  107. package/dist/adapters/postgresql/tools/admin.d.ts.map +1 -1
  108. package/dist/adapters/postgresql/tools/admin.js +211 -140
  109. package/dist/adapters/postgresql/tools/admin.js.map +1 -1
  110. package/dist/adapters/postgresql/tools/backup/dump.d.ts.map +1 -1
  111. package/dist/adapters/postgresql/tools/backup/dump.js +410 -387
  112. package/dist/adapters/postgresql/tools/backup/dump.js.map +1 -1
  113. package/dist/adapters/postgresql/tools/backup/planning.d.ts.map +1 -1
  114. package/dist/adapters/postgresql/tools/backup/planning.js +175 -172
  115. package/dist/adapters/postgresql/tools/backup/planning.js.map +1 -1
  116. package/dist/adapters/postgresql/tools/citext.d.ts.map +1 -1
  117. package/dist/adapters/postgresql/tools/citext.js +221 -163
  118. package/dist/adapters/postgresql/tools/citext.js.map +1 -1
  119. package/dist/adapters/postgresql/tools/core/convenience.d.ts +9 -1
  120. package/dist/adapters/postgresql/tools/core/convenience.d.ts.map +1 -1
  121. package/dist/adapters/postgresql/tools/core/convenience.js +96 -9
  122. package/dist/adapters/postgresql/tools/core/convenience.js.map +1 -1
  123. package/dist/adapters/postgresql/tools/core/error-helpers.d.ts +48 -0
  124. package/dist/adapters/postgresql/tools/core/error-helpers.d.ts.map +1 -0
  125. package/dist/adapters/postgresql/tools/core/error-helpers.js +256 -0
  126. package/dist/adapters/postgresql/tools/core/error-helpers.js.map +1 -0
  127. package/dist/adapters/postgresql/tools/core/health.d.ts.map +1 -1
  128. package/dist/adapters/postgresql/tools/core/health.js +23 -6
  129. package/dist/adapters/postgresql/tools/core/health.js.map +1 -1
  130. package/dist/adapters/postgresql/tools/core/indexes.d.ts.map +1 -1
  131. package/dist/adapters/postgresql/tools/core/indexes.js +45 -4
  132. package/dist/adapters/postgresql/tools/core/indexes.js.map +1 -1
  133. package/dist/adapters/postgresql/tools/core/objects.d.ts.map +1 -1
  134. package/dist/adapters/postgresql/tools/core/objects.js +104 -85
  135. package/dist/adapters/postgresql/tools/core/objects.js.map +1 -1
  136. package/dist/adapters/postgresql/tools/core/query.d.ts.map +1 -1
  137. package/dist/adapters/postgresql/tools/core/query.js +100 -42
  138. package/dist/adapters/postgresql/tools/core/query.js.map +1 -1
  139. package/dist/adapters/postgresql/tools/core/schemas.d.ts +52 -25
  140. package/dist/adapters/postgresql/tools/core/schemas.d.ts.map +1 -1
  141. package/dist/adapters/postgresql/tools/core/schemas.js +55 -25
  142. package/dist/adapters/postgresql/tools/core/schemas.js.map +1 -1
  143. package/dist/adapters/postgresql/tools/core/tables.d.ts.map +1 -1
  144. package/dist/adapters/postgresql/tools/core/tables.js +74 -30
  145. package/dist/adapters/postgresql/tools/core/tables.js.map +1 -1
  146. package/dist/adapters/postgresql/tools/cron.d.ts.map +1 -1
  147. package/dist/adapters/postgresql/tools/cron.js +274 -179
  148. package/dist/adapters/postgresql/tools/cron.js.map +1 -1
  149. package/dist/adapters/postgresql/tools/jsonb/advanced.d.ts.map +1 -1
  150. package/dist/adapters/postgresql/tools/jsonb/advanced.js +372 -284
  151. package/dist/adapters/postgresql/tools/jsonb/advanced.js.map +1 -1
  152. package/dist/adapters/postgresql/tools/jsonb/basic.d.ts.map +1 -1
  153. package/dist/adapters/postgresql/tools/jsonb/basic.js +617 -398
  154. package/dist/adapters/postgresql/tools/jsonb/basic.js.map +1 -1
  155. package/dist/adapters/postgresql/tools/kcache.d.ts.map +1 -1
  156. package/dist/adapters/postgresql/tools/kcache.js +282 -220
  157. package/dist/adapters/postgresql/tools/kcache.js.map +1 -1
  158. package/dist/adapters/postgresql/tools/ltree.d.ts.map +1 -1
  159. package/dist/adapters/postgresql/tools/ltree.js +126 -35
  160. package/dist/adapters/postgresql/tools/ltree.js.map +1 -1
  161. package/dist/adapters/postgresql/tools/monitoring.d.ts.map +1 -1
  162. package/dist/adapters/postgresql/tools/monitoring.js +59 -40
  163. package/dist/adapters/postgresql/tools/monitoring.js.map +1 -1
  164. package/dist/adapters/postgresql/tools/partitioning.d.ts.map +1 -1
  165. package/dist/adapters/postgresql/tools/partitioning.js +150 -15
  166. package/dist/adapters/postgresql/tools/partitioning.js.map +1 -1
  167. package/dist/adapters/postgresql/tools/partman/management.d.ts.map +1 -1
  168. package/dist/adapters/postgresql/tools/partman/management.js +12 -5
  169. package/dist/adapters/postgresql/tools/partman/management.js.map +1 -1
  170. package/dist/adapters/postgresql/tools/partman/operations.d.ts.map +1 -1
  171. package/dist/adapters/postgresql/tools/partman/operations.js +135 -22
  172. package/dist/adapters/postgresql/tools/partman/operations.js.map +1 -1
  173. package/dist/adapters/postgresql/tools/performance/analysis.d.ts.map +1 -1
  174. package/dist/adapters/postgresql/tools/performance/analysis.js +264 -160
  175. package/dist/adapters/postgresql/tools/performance/analysis.js.map +1 -1
  176. package/dist/adapters/postgresql/tools/performance/explain.d.ts.map +1 -1
  177. package/dist/adapters/postgresql/tools/performance/explain.js +61 -21
  178. package/dist/adapters/postgresql/tools/performance/explain.js.map +1 -1
  179. package/dist/adapters/postgresql/tools/performance/monitoring.d.ts.map +1 -1
  180. package/dist/adapters/postgresql/tools/performance/monitoring.js +44 -7
  181. package/dist/adapters/postgresql/tools/performance/monitoring.js.map +1 -1
  182. package/dist/adapters/postgresql/tools/performance/optimization.d.ts.map +1 -1
  183. package/dist/adapters/postgresql/tools/performance/optimization.js +92 -81
  184. package/dist/adapters/postgresql/tools/performance/optimization.js.map +1 -1
  185. package/dist/adapters/postgresql/tools/performance/stats.d.ts.map +1 -1
  186. package/dist/adapters/postgresql/tools/performance/stats.js +128 -37
  187. package/dist/adapters/postgresql/tools/performance/stats.js.map +1 -1
  188. package/dist/adapters/postgresql/tools/pgcrypto.d.ts.map +1 -1
  189. package/dist/adapters/postgresql/tools/pgcrypto.js +242 -87
  190. package/dist/adapters/postgresql/tools/pgcrypto.js.map +1 -1
  191. package/dist/adapters/postgresql/tools/postgis/advanced.d.ts.map +1 -1
  192. package/dist/adapters/postgresql/tools/postgis/advanced.js +293 -201
  193. package/dist/adapters/postgresql/tools/postgis/advanced.js.map +1 -1
  194. package/dist/adapters/postgresql/tools/postgis/basic.d.ts.map +1 -1
  195. package/dist/adapters/postgresql/tools/postgis/basic.js +359 -249
  196. package/dist/adapters/postgresql/tools/postgis/basic.js.map +1 -1
  197. package/dist/adapters/postgresql/tools/postgis/standalone.d.ts.map +1 -1
  198. package/dist/adapters/postgresql/tools/postgis/standalone.js +135 -51
  199. package/dist/adapters/postgresql/tools/postgis/standalone.js.map +1 -1
  200. package/dist/adapters/postgresql/tools/schema.d.ts.map +1 -1
  201. package/dist/adapters/postgresql/tools/schema.js +515 -226
  202. package/dist/adapters/postgresql/tools/schema.js.map +1 -1
  203. package/dist/adapters/postgresql/tools/stats/advanced.d.ts.map +1 -1
  204. package/dist/adapters/postgresql/tools/stats/advanced.js +515 -476
  205. package/dist/adapters/postgresql/tools/stats/advanced.js.map +1 -1
  206. package/dist/adapters/postgresql/tools/stats/basic.d.ts.map +1 -1
  207. package/dist/adapters/postgresql/tools/stats/basic.js +302 -293
  208. package/dist/adapters/postgresql/tools/stats/basic.js.map +1 -1
  209. package/dist/adapters/postgresql/tools/text.d.ts.map +1 -1
  210. package/dist/adapters/postgresql/tools/text.js +398 -220
  211. package/dist/adapters/postgresql/tools/text.js.map +1 -1
  212. package/dist/adapters/postgresql/tools/transactions.d.ts.map +1 -1
  213. package/dist/adapters/postgresql/tools/transactions.js +157 -50
  214. package/dist/adapters/postgresql/tools/transactions.js.map +1 -1
  215. package/dist/adapters/postgresql/tools/vector/advanced.d.ts.map +1 -1
  216. package/dist/adapters/postgresql/tools/vector/advanced.js +70 -38
  217. package/dist/adapters/postgresql/tools/vector/advanced.js.map +1 -1
  218. package/dist/adapters/postgresql/tools/vector/basic.d.ts +8 -0
  219. package/dist/adapters/postgresql/tools/vector/basic.d.ts.map +1 -1
  220. package/dist/adapters/postgresql/tools/vector/basic.js +194 -82
  221. package/dist/adapters/postgresql/tools/vector/basic.js.map +1 -1
  222. package/dist/cli/args.d.ts +2 -0
  223. package/dist/cli/args.d.ts.map +1 -1
  224. package/dist/cli/args.js +15 -0
  225. package/dist/cli/args.js.map +1 -1
  226. package/dist/cli.js +7 -6
  227. package/dist/cli.js.map +1 -1
  228. package/dist/codemode/api.d.ts.map +1 -1
  229. package/dist/codemode/api.js +4 -3
  230. package/dist/codemode/api.js.map +1 -1
  231. package/dist/constants/ServerInstructions.d.ts +1 -1
  232. package/dist/constants/ServerInstructions.d.ts.map +1 -1
  233. package/dist/constants/ServerInstructions.js +76 -34
  234. package/dist/constants/ServerInstructions.js.map +1 -1
  235. package/dist/filtering/ToolConstants.d.ts +29 -13
  236. package/dist/filtering/ToolConstants.d.ts.map +1 -1
  237. package/dist/filtering/ToolConstants.js +44 -27
  238. package/dist/filtering/ToolConstants.js.map +1 -1
  239. package/dist/utils/logger.js +2 -2
  240. package/dist/utils/logger.js.map +1 -1
  241. package/dist/utils/progress-utils.js +1 -1
  242. package/dist/utils/progress-utils.js.map +1 -1
  243. package/package.json +13 -9
@@ -9,5 +9,5 @@
9
9
  * - Removed redundant alias documentation (already in tool schemas)
10
10
  * - Semantic tagging for high-signal guidance
11
11
  */
12
- export declare const SERVER_INSTRUCTIONS = "# postgres-mcp Code Mode\n\n## \u26A0\uFE0F Critical Gotchas\n\n1. **Transactions**: `pg.transactions.execute({statements: [{sql: \"...\"}]})` auto-commits on success, auto-rollbacks on error. To join existing transaction: `{transactionId: txId, statements: [...]}` (no auto-commit, caller controls)\n2. **pg_write_query**: \u26D4 Throws for SELECT\u2014use `pg_read_query` for SELECT statements\n3. **pg_upsert/pg_create_table**: `schema.table` format auto-parses (e.g., `'myschema.users'` \u2192 schema: 'myschema', table: 'users')\n4. **pg_create_table columns**: `notNull`, `defaultValue` (string literals auto-quoted; numbers/booleans auto-coerced; `now()` \u2192 `CURRENT_TIMESTAMP`), `check`, `references` (object or string `\"table(column)\"` syntax)\n5. **pg_create_table constraints**: `constraints` array only accepts `{type: 'unique'|'check'}`. Primary keys: use `column.primaryKey` or top-level `primaryKey: ['col1', 'col2']`\n6. **pg_create_index expression**: Columns can be expressions like `LOWER(name)` or `UPPER(email)`\u2014auto-detected. \u26A0\uFE0F Cast syntax (`::`) requires raw SQL via `pg_write_query`\n7. **pg_list_objects type**: Use `type` (singular string) or `types` (array). Auto-converts: `{type: 'table'}` \u2261 `{types: ['table']}`\n8. **pg_object_details**: Accepts: `name`, `objectName`, `object`, or `table`. Use `type`/`objectType` for type hint (supports: table, view, materialized_view, partitioned_table, function, sequence, index)\n9. **pg_exists optional WHERE**: `where`/`condition`/`filter` is optional. Without it, checks if table has any rows\n10. **pg_describe_table**: Returns columns, foreignKeys, primaryKey\u2014use `pg_get_indexes` separately for index details\n11. **pg_vector_insert updateExisting**: Uses direct UPDATE (avoids NOT NULL constraint issues vs INSERT mode)\n12. **pg_get_indexes without table**: Returns ALL database indexes (potentially large). Use `table` param for specific table\n13. **pg_upsert/pg_batch_insert RETURNING**: `returning` param must be array of column names: `[\"id\", \"name\"]`. \u26D4 `\"*\"` wildcard not supported\n14. **Small tables**: Optimizer correctly uses Seq Scan for <1000 rows\u2014this is expected behavior\n\n## \uD83D\uDD04 Response Structures\n\n| Tool | Returns | Notes |\n|------|---------|-------|\n| `pg_read_query` | `{rows, rowCount, fields?}` | `fields` contains column metadata (name, dataTypeID) |\n| `pg_write_query` | `{rowsAffected, affectedRows, rows?}` | `rows` only with RETURNING clause. \u26D4 Throws for SELECT |\n| `pg_upsert` | `{operation, rowsAffected, rowCount, rows?}` | `operation: 'insert'|'update'`. `rows` only with RETURNING clause |\n| `pg_batch_insert` | `{rowsAffected, affectedRows, insertedCount, rows?}` | Empty objects use DEFAULT VALUES. \u26A0\uFE0F BIGINT > 2^53 loses precision |\n| `pg_count` | `{count: N}` | Use `params` for placeholders: `where: 'id=$1', params: [5]`. DISTINCT: use `pg_read_query` |\n| `pg_exists` | `{exists: bool, mode, hint?}` | `params` for placeholders. `mode: 'filtered'|'any_rows'` |\n| `pg_get_indexes` | `{indexes, count, totalCount?}` | Default `limit: 100` without `table`. Use `schema`/`limit` to filter. Index objects have `name`, `type`, `columns` |\n| `pg_list_objects` | `{objects, count, totalCount, byType}` | Use `limit` to cap results, `type`/`types` to filter |\n| `pg_object_details` | `{name, schema, type, returnType?, ...}` | Functions: `returnType` alias. Views/Mat. views: `definition` |\n| `pg_analyze_db_health` | `{cacheHitRatio: {ratio, heap, index, status}}` | `ratio` = primary numeric %. `bloat` available |\n| `pg_describe_table` | `{columns, indexes, constraints, foreignKeys}` | Columns include `notNull` (alias for `!nullable`), `foreignKey`. `constraints` includes PK, UNIQUE, CHECK, NOT NULL. \u26A0\uFE0F `rowCount: -1` = no statistics (run ANALYZE) |\n| `pg_analyze_query_indexes` | `{plan, issues, recommendations}` | `verbosity`: 'summary' (default) or 'full'. Summary mode returns condensed plan |\n| `pg_list_tables` | `{tables, count}` | Use `schema` to filter, `limit` to cap results |\n| List operations | `{items, count}` | Access via `result.tables`, `result.views`, etc. |\n| `pg_jsonb_agg groupBy` | `{result: [{group_key, items}], count, grouped: true}` | Without groupBy: `{result: [...], count, grouped: false}` |\n| `pg_vector_aggregate` | `{average_vector, count}` or `{groups: [{group_key, average_vector, count}]}` | Without/with `groupBy` |\n\n## API Mapping\n\n`pg_group_action` \u2192 `pg.group.action()` (group prefixes dropped: `pg_jsonb_extract` \u2192 `pg.jsonb.extract()`)\n\n**Top-Level Core Aliases**: All starter tools available directly: `pg.readQuery()`, `pg.writeQuery()`, `pg.listTables()`, `pg.describeTable()`, `pg.createTable()`, `pg.dropTable()`, `pg.count()`, `pg.exists()`, `pg.upsert()`, `pg.batchInsert()`, `pg.truncate()`, `pg.createIndex()`, `pg.dropIndex()`, `pg.getIndexes()`, `pg.listObjects()`, `pg.objectDetails()`, `pg.listExtensions()`, `pg.analyzeDbHealth()`, `pg.analyzeQueryIndexes()`, `pg.analyzeWorkloadIndexes()`\n\n**Positional args work**: `readQuery(\"SELECT...\")`, `exists(\"users\", \"id=1\")`, `createIndex(\"users\", [\"email\"])`\n\n**Discovery**: `pg.help()` returns `{group: methods[]}` mapping (e.g., `{core: ['readQuery', ...], jsonb: [...]}`). `pg.core.help()`, `pg.jsonb.help()` for group-specific methods.\n\n## Format Auto-Resolution\n\n- **Schema.Table**: `'public.users'` auto-parses to `{schema: 'public', table: 'users'}`\n- **JSONB Paths**: Both `'a.b.c'` (string) and `['a','b','c']` (array) work. Use array for literal dots: `[\"key.with.dots\"]`\n- **Aliases**: Common parameter variations resolve automatically (e.g., `query`/`sql`, `table`/`tableName`)\n\n---\n\n## Vector Tools\n\n\u26A0\uFE0F **Large Vectors**: Direct MCP tool calls may truncate vectors >256 dimensions due to JSON-RPC message size limits. For vectors \u2265256 dimensions (e.g., OpenAI 1536-dim, local 384-dim), use Code Mode: `await pg.vector.search({table, column, vector, limit})`\n\n- `pg_vector_search`: Supports `schema.table` format (auto-parsed). Returns `{results: [...], count, metric}`. Use `select: [\"id\", \"name\"]` to include identifying columns. Without select, only returns distance. `filter` = `where`. \u26A0\uFE0F Vectors read from DB are strings\u2014parse before passing: `vec.replace(/^\\[|\\]$/g, '').split(',').map(Number)`\n- `pg_vector_insert`: Supports `schema.table` format (auto-parsed). Use `updateExisting` + `conflictColumn` + `conflictValue` for UPDATE mode. `additionalColumns` is applied in both INSERT and UPDATE modes\n- `pg_vector_batch_insert`: `vectors` expects `[{vector: [...], data?: {...}}]` objects, not raw arrays\n- `pg_vector_normalize`: Returns `{normalized: [...], magnitude: N}`. Note: `magnitude` is the **original** vector length (not 1)\n- `pg_vector_aggregate`: Supports `schema.table` format (auto-parsed). \u26D4 Validates column is vector type. Returns `{average_vector: {preview, dimensions, truncated}, count}` or `{groups: [{group_key, average_vector, count}]}` with groupBy. \u26A0\uFE0F `groupBy` only supports simple column names (not expressions)\n- `pg_vector_dimension_reduce`: Direct mode returns `{reduced: [...], originalDimensions, targetDimensions}`. Table mode returns `{rows: [{id, original_dimensions, reduced}], processedCount, summarized}`. Default `summarize: true` in table mode returns compact `{preview, dimensions, truncated}` format. Use `summarize: false` for full vectors\n- `pg_vector_distance`: Calculate distance between two vectors. `metric`: 'l2' (default), 'cosine', 'inner_product'. Returns `{distance, metric}`\n- `pg_vector_cluster`: `clusters` = `k`. Returns centroids with `{preview, dimensions, truncated}` format for large vectors (>10 dims)\u2014use `pg_vector_distance` to assign rows\n- `pg_vector_create_index`: Use `type` (or alias `method`) with values 'ivfflat' or 'hnsw'. IVFFlat: `lists` param. HNSW: `m`, `efConstruction` params\n- `pg_vector_performance`: Auto-generates testVector from first row if omitted. Returns `testVectorSource: 'auto-generated'|'user-provided'`\n- `pg_vector_validate`: Returns `{valid: bool, vectorDimensions}`. Empty vector `[]` returns `{valid: true, vectorDimensions: 0}`\n- \u26D4 `pg_vector_embed`: Demo only (hash-based). Use OpenAI/Cohere for production.\n- `pg_hybrid_search`: Supports `schema.table` format (auto-parsed). Combines vector similarity and full-text search with weighted scoring. Code mode alias: `pg.hybridSearch()` \u2192 `pg.vector.hybridSearch()`\n- \uD83D\uDCDD **Error Handling**: Vector tools return `{success: false, error: \"...\", suggestion: \"...\"}` for validation/semantic errors (dimension mismatch, non-vector column, table not found). Check `success` field before processing results.\n\n## JSONB Tools\n\n- `pg_jsonb_extract`: Returns null if path doesn't exist\n- `pg_jsonb_insert`: Index -1 inserts BEFORE last element; use `insertAfter: true` to append. \u26A0\uFE0F Use array format `[-1]` not string `\"[-1]\"` for negative indices\n- `pg_jsonb_set`: `createMissing=true` creates full nested paths; initializes NULL columns to `{}`. Empty path (`''` or `[]`) replaces entire column value\n- `pg_jsonb_strip_nulls`: \u26A0\uFE0F Requires `where`/`filter` clause\u2014write operations must be targeted. Use `preview: true` to see changes first\n- `pg_jsonb_agg`: Supports AS aliases in select: `[\"id\", \"metadata->>'name' AS name\"]`. \u26A0\uFE0F `->>` returns text\u2014use `->` to preserve JSON types\n- `pg_jsonb_object`: Use `data`, `object`, or `pairs` parameter: `{data: {name: \"John\", age: 30}}`. Returns `{object: {...}}`\n- `pg_jsonb_normalize`: `flatten` doesn't descend into arrays; `keys` returns text (use `pairs` for JSON types)\n- `pg_jsonb_stats`: Returns column-level statistics. `topKeysLimit` controls key count (default: 20). \u26A0\uFE0F `typeDistribution` null type = SQL NULL columns (entire column NULL, not JSON `null` literal). Use `sqlNullCount` for explicit count\n- \u26D4 **Object-only tools**: `diff`, `merge`, `keys`, `indexSuggest`, `securityScan`, `stats`\u2014topKeys require JSONB objects, throw descriptive errors for arrays\n- \u26D4 **Array-only tools**: `insert`\u2014requires JSONB arrays, throws errors for objects\n- \uD83D\uDCDD `normalize` modes: `pairs`/`keys`/`flatten` for objects; `array` for arrays\n\n**Top-Level Aliases**: `pg.jsonbExtract()`, `pg.jsonbSet()`, `pg.jsonbInsert()`, `pg.jsonbDelete()`, `pg.jsonbContains()`, `pg.jsonbPathQuery()`, `pg.jsonbAgg()`, `pg.jsonbObject()`, `pg.jsonbArray()`, `pg.jsonbKeys()`, `pg.jsonbStripNulls()`, `pg.jsonbTypeof()`, `pg.jsonbValidatePath()`, `pg.jsonbMerge()`, `pg.jsonbNormalize()`, `pg.jsonbDiff()`, `pg.jsonbIndexSuggest()`, `pg.jsonbSecurityScan()`, `pg.jsonbStats()`\n\n\n## Stats Tools\n\n- All stats tools support `schema.table` format (auto-parsed, embedded schema takes priority over explicit `schema` param)\n- `timeSeries`: Both `timeColumn` (must be timestamp/date) and `valueColumn` (must be numeric) are validated upfront with clear error messages. Aliases: `time`\u2192`timeColumn`, `value`\u2192`valueColumn`. `interval` accepts: `second`, `minute`, `hour`, `day`, `week`, `month`, `year` (keywords, PostgreSQL format, or plurals). Default `limit: 100` time buckets. Use `limit: 0` for no limit. Returns `truncated` and `totalCount` indicators when default limit is applied. **groupBy payloads**: Default `groupLimit: 20` groups. Returns `truncated` + `totalGroupCount` when groups are limited. Use `groupLimit: 0` for all groups\n- `correlation`: Use `column1`/`column2` or aliases `x`/`y` for column names\n- `distribution`: Returns `skewness`, `kurtosis` (excess). `buckets` must be > 0. **groupBy payloads**: Default `groupLimit: 20` groups (prevents large payloads with many histogram buckets per group). Returns `truncated` + `totalGroupCount` when groups are limited. Use `groupLimit: 0` for all groups\n- `sampling`: Defaults to `random` method with 20 rows (optimized for LLM context). `sampleSize` always takes precedence over `percentage`. \u26A0\uFE0F `percentage` param only works with `bernoulli`/`system` methods\u2014ignored for default `random` method. Default limit of 100 rows applied to `bernoulli`/`system` with `percentage` to prevent large payloads. Returns `truncated` and `totalSampled` when TABLESAMPLE returns more rows than limit\n- `percentiles`: Accepts 0-1 or 0-100 (auto-normalized). \u26A0\uFE0F Use consistent scale\u2014mixing (e.g., `[0.1, 50]`) produces unexpected keys and returns a `warning` field explaining the issue. Empty array \u2192 defaults [0.25, 0.5, 0.75]\n- `hypothesis`: Returns nested `results` object containing `pValue` (two-tailed), `testStatistic`, `interpretation`, `sampleMean`, `sampleStdDev`. Access via `hyp.results.pValue`. Use `populationStdDev` for z-test, otherwise defaults to t-test\n- `regression`: Use `xColumn`/`yColumn`, aliases `x`/`y`, or `column1`/`column2` (for consistency with correlation). Returns nested `regression` object containing `slope`, `intercept`, `rSquared`, `equation`, `avgX`, `avgY`, `sampleSize`. Access via `reg.regression.slope`\n- \u26A0\uFE0F WARNING: `sampling` with `system` method unreliable for small tables\u2014use `bernoulli` or `random`\n\n**Top-Level Aliases**: `pg.descriptive()`, `pg.percentiles()`, `pg.correlation()`, `pg.regression()`, `pg.timeSeries()`, `pg.distribution()`, `pg.hypothesis()`, `pg.sampling()`\n\n## Performance Tools\n\nCore (20 methods): `explain()`, `explainAnalyze()`, `explainBuffers()`, `indexStats()`, `tableStats()`, `statStatements()`, `statActivity()`, `locks()`, `bloatCheck()`, `cacheHitRatio()`, `seqScanTables()`, `indexRecommendations()`, `queryPlanCompare()`, `baseline()`, `connectionPoolOptimize()`, `partitionStrategySuggest()`, `unusedIndexes()`, `duplicateIndexes()`, `vacuumStats()`, `queryPlanStats()`\n\nWrappers (3): `blockingQueries()`\u2192`locks({showBlocked:true})`, `longRunningQueries({ seconds | minDuration }?)` filters by duration (returns `statActivity` format), `analyzeTable({ table })` runs ANALYZE (accepts `schema.table` format)\n\n- `explain({ sql, format?, params? })`: Supports `format: 'text'|'json'|'yaml'|'xml'`. Default: text. Use `params: [value]` for `$1, $2` placeholders\n- `explainAnalyze({ sql, format?, params? })`: Same format/params options as explain\n- `explainBuffers({ sql, params? })`: Always returns JSON format (includes buffer statistics)\n- `indexRecommendations({ sql?, params? })`: Pass `params: [value]` for parameterized queries (e.g., `sql: 'SELECT * FROM orders WHERE id = $1', params: [5]`)\n- `queryPlanCompare({ query1, query2, params1?, params2? })`: Compare two query plans. Use `params1`/`params2` for parameterized queries\n- `partitionStrategySuggest({ table })`: Accepts `schema.table` format (auto-parsed) or separate `table` + `schema` params\n- \u26A0\uFE0F **Data Type Awareness**: Query literals must match column types exactly\u2014`WHERE sensor_id = 1` (integer), not `'sensor_1'` (string)\n\nAliases: `cacheStats`\u2192`cacheHitRatio`, `queryStats`\u2192`statStatements`, `activity`\u2192`statActivity`, `vacuum`\u2192`vacuumStats`, `indexUsage`\u2192`indexStats`, `bloatEstimate`/`bloat`\u2192`bloatCheck`, `runningQueries`\u2192`longRunningQueries`\n\n\uD83D\uDCE6 **AI-Optimized Payloads**: Tools return limited results by default to reduce context size:\n- `indexStats({ limit? })`: Default 50 rows. Returns `truncated: true` + `totalCount` when limited. Use `limit: 0` for all\n- `tableStats({ limit? })`: Default 50 rows. Returns `truncated: true` + `totalCount` when limited. Use `limit: 0` for all\n- `vacuumStats({ limit? })`: Default 50 rows. Same truncation indicators. Use `limit: 0` for all\n- `statStatements({ limit?, orderBy? })`: Default 20 rows. Returns `truncated: true` + `totalCount` when limited. Use `limit: 0` for all\n- `unusedIndexes({ limit?, summary? })`: Default 20 rows. Use `summary: true` for aggregated stats by schema\n- `queryPlanStats({ limit?, truncateQuery? })`: Default 20 rows, queries truncated to 100 chars. Use `truncateQuery: 0` for full text\n\n\uD83D\uDCCD **Code Mode Note**: `pg_performance_baseline` \u2192 `pg.performance.baseline()` (not `performanceBaseline`). `indexRecommendations` accepts `query` alias for `sql`\n\n**Top-Level Aliases**: `pg.explain()`, `pg.explainAnalyze()`, `pg.cacheHitRatio()`, `pg.indexStats()`, `pg.tableStats()`, `pg.indexRecommendations()`, `pg.bloatCheck()`, `pg.vacuumStats()`, `pg.unusedIndexes()`, `pg.duplicateIndexes()`, `pg.seqScanTables()`\n\n## Monitoring Tools\n\nCore: `databaseSize()`, `tableSizes()`, `connectionStats()`, `showSettings()`, `capacityPlanning()`, `uptime()`, `serverVersion()`, `recoveryStatus()`, `replicationStatus()`, `resourceUsageAnalyze()`, `alertThresholdSet()`\n\n- `databaseSize()`: Returns `{bytes: number, size: string}`. Optional `database` param for specific db\n- `tableSizes({ limit?, schema? })`: Default limit 50. Returns `{tables: [...], count, truncated?, totalCount?}`. `truncated: true` + `totalCount` when limited. Use `limit: 0` for all\n- `connectionStats()`: Returns `{byDatabaseAndState, totalConnections: number, maxConnections: number}`\n- `showSettings({ setting?, limit? })`: Default limit 50 when no pattern. Returns `{settings: [...], count, truncated?, totalCount?}`. Accepts `pattern`, `setting`, or `name`. Exact names auto-match; `%` for LIKE patterns\n- `capacityPlanning({days: 90})`: `days` = `projectionDays`. Returns `{current, growth, projection, recommendations}` with numeric fields. \u26D4 Negative days rejected\n- `uptime()`: Returns `{start_time: string, uptime: {days, hours, minutes, seconds, milliseconds}}`\n- `serverVersion()`: Returns `{full_version: string, version: string, version_num: number}`\n- `recoveryStatus()`: Returns `{in_recovery: boolean, last_replay_timestamp: string|null}`\n- `replicationStatus()`: Returns `{role: 'primary'|'replica', replicas: [...]}` for primary, or `{role: 'replica', replay_lag, ...}` for replica\n- `resourceUsageAnalyze()`: Returns `{backgroundWriter, checkpoints, connectionDistribution, bufferUsage, activity, analysis}` with all counts as numbers\n- `alertThresholdSet({metric?: 'connection_usage'})`: Returns recommended thresholds. \u26D4 Invalid metric throws validation error. Valid metrics: connection_usage, cache_hit_ratio, replication_lag, dead_tuples, long_running_queries, lock_wait_time\n\n\uD83D\uDCE6 **AI-Optimized Payloads**: Tools return limited results by default to reduce context size:\n- `tableSizes({ limit? })`: Default 50 rows. Returns `truncated: true` + `totalCount` when limited. Use `limit: 0` for all\n- `showSettings({ limit? })`: Default 50 rows when no pattern specified. Use `limit: 0` for all or specify a pattern\n\nAliases: `tables`\u2192`tableSizes`, `connections`\u2192`connectionStats`, `settings`/`config`\u2192`showSettings`, `alerts`/`thresholds`\u2192`alertThresholdSet`\n\n**Top-Level Aliases**: `pg.databaseSize()`, `pg.tableSizes()`, `pg.connectionStats()`, `pg.serverVersion()`, `pg.uptime()`, `pg.showSettings()`, `pg.recoveryStatus()`, `pg.replicationStatus()`, `pg.capacityPlanning()`, `pg.resourceUsageAnalyze()`, `pg.alertThresholdSet()`\n\n## Admin Tools\n\nCore: `vacuum()`, `vacuumAnalyze()`, `analyze()`, `reindex()`, `cluster()`, `setConfig()`, `reloadConf()`, `resetStats()`, `cancelBackend()`, `terminateBackend()`\n\n- All admin tools support `schema.table` format (auto-parsed, embedded schema takes priority over explicit `schema` param)\n- `vacuum({ table?, full?, analyze?, verbose? })`: Without `table`, vacuums ALL tables. `verbose` output goes to PostgreSQL server logs\n- `reindex({ target, name?, concurrently? })`: Targets: 'table', 'index', 'schema', 'database'. `database` target defaults to current db when `name` omitted\n- `cluster()`: Without args, re-clusters all previously-clustered tables. With args, requires BOTH `table` AND `index`\n- `setConfig({ name, value, isLocal? })`: `isLocal: true` applies only to current transaction\n- `cancelBackend({ pid })`: Graceful query cancellation\u2014returns `{success: false}` for invalid PID (no error thrown)\n- `terminateBackend({ pid })`: Forceful connection termination\u2014use with caution\n\nAliases: `tableName`\u2192`table`, `indexName`\u2192`index`, `param`/`setting`\u2192`name`, `processId`\u2192`pid`\n\n**Top-Level Aliases**: `pg.vacuum()`, `pg.vacuumAnalyze()`, `pg.analyze()`, `pg.reindex()`, `pg.cluster()`, `pg.setConfig()`, `pg.reloadConf()`, `pg.resetStats()`, `pg.cancelBackend()`, `pg.terminateBackend()`\n\n## Backup Tools\n\nCore: `dumpTable()`, `dumpSchema()`, `copyExport()`, `copyImport()`, `createBackupPlan()`, `restoreCommand()`, `physical()`, `restoreValidate()`, `scheduleOptimize()`\n\nResponse Structures:\n- `dumpTable`: `{ddl, type, note, insertStatements?}` \u2014 `insertStatements` only with `includeData: true` (separate field from `ddl`)\n- `copyExport`: `{data, rowCount, truncated?, limit?}` \u2014 `data` contains CSV/text content. `truncated: true` + `limit` when rows returned equals applied limit (indicating more rows likely exist)\n- `copyImport`: `{command, stdinCommand, notes}` \u2014 Both file and stdin COPY commands\n- `createBackupPlan`: `{strategy: {fullBackup, walArchiving}, estimates}`\n- `restoreCommand`: `{command, warnings?, notes}` \u2014 Warnings when `database` omitted\n- `restoreValidate`: `{validationSteps: [{step, name, command?, commands?, note?}], recommendations}` \u2014 Note: `note` field only for pg_dump default type\n- `physical`: `{command, notes, requirements}`\n- `scheduleOptimize`: `{analysis, recommendation, commands}`\n\n\uD83D\uDCE6 **AI-Optimized Payloads**: `copyExport` limits results to 500 rows by default to prevent large payloads. Use `limit: 0` for all rows, or specify a custom limit.\n\n- `pg_copy_export`: Use `query`/`sql` OR `table`. \u26A0\uFE0F If both provided, `query` takes precedence with warning. Supports `schema.table` format (auto-parsed, takes priority over `schema` param). Format: `csv` (default, comma-delimited), `text` (tab-delimited). Both formats support `header: true` (default). \u26D4 `binary` not supported via MCP\u2014use `pg_dump_schema` for binary exports. Default `limit: 500` (use `0` for all rows). Optional `delimiter` to customize\n- `pg_dump_table`: Returns `ddl` + `insertStatements` when `includeData: true`. Supports sequences (`type: 'sequence'`), views (`type: 'view'`), and partitioned tables (`type: 'partitioned_table'` with `PARTITION BY` clause). **PRIMARY KEYS, INDEXES, CONSTRAINTS NOT included**\u2014use `pg_get_indexes`/`pg_get_constraints`. Supports `schema.table` format\n- `pg_dump_schema`: Generates pg_dump command. Optional `schema`, `table`, `filename`\n- `pg_copy_import`: Generates COPY FROM command. Supports `schema.table` format (auto-parsed, takes priority over `schema` param). `columns` array, `filePath`, `format`, `header`, `delimiter`\n- `pg_restore_command`: Include `database` parameter for complete command. Optional `schemaOnly`, `dataOnly`\n- `pg_create_backup_plan`: Generates backup strategy with cron schedule. `frequency`: 'hourly'|'daily'|'weekly', `retention` count\n- `pg_backup_physical`: Generates pg_basebackup command. `format`: 'plain'|'tar', `checkpoint`: 'fast'|'spread', `compress`: 0-9\n- `pg_restore_validate`: Generates validation commands. `backupType`: 'pg_dump' (default)|'pg_basebackup'\n- `pg_backup_schedule_optimize`: Analyzes database activity patterns and recommends optimal backup schedule\n\n**Top-Level Aliases**: `pg.dumpTable()`, `pg.dumpSchema()`, `pg.copyExport()`, `pg.copyImport()`, `pg.createBackupPlan()`, `pg.restoreCommand()`, `pg.restoreValidate()`, `pg.physical()`, `pg.backupPhysical()`, `pg.scheduleOptimize()`, `pg.backupScheduleOptimize()`\n\n## Text Tools\n\nDefaults: `threshold`=0.3 (use 0.1-0.2 for partial), `maxDistance`=3 (use 5+ for longer strings)\n\n- All text tools support `schema.table` format (auto-parsed, embedded schema takes priority over explicit `schema` param)\n- `pg_text_search`: Supports both `column` (singular string) and `columns` (array). Either is valid\u2014`column` auto-converts to array\n- `pg_trigram_similarity` vs `pg_similarity_search`: Both use pg_trgm. First filters by threshold; second uses set_limit() with %\n- `pg_fuzzy_match`: Levenshtein returns distance (lower=better). Soundex/metaphone return phonetic codes (exact match only). \u26D4 Invalid `method` values throw error with valid options\n- `pg_text_normalize`: Removes accents only (unaccent). Does NOT lowercase/trim\n- \uD83D\uDCCD **Table vs Standalone**: `normalize`, `sentiment`, `toVector`, `toQuery`, `searchConfig` are standalone (text input only). For phonetic matching: use `pg_fuzzy_match` with `method: 'soundex'|'metaphone'` (direct MCP), or `pg.text.soundex()`/`pg.text.metaphone()` (Code Mode convenience wrappers that call fuzzyMatch internally)\n\n**Top-Level Aliases**: `pg.textSearch()`, `pg.textRank()`, `pg.textHeadline()`, `pg.textNormalize()`, `pg.textSentiment()`, `pg.textToVector()`, `pg.textToQuery()`, `pg.textSearchConfig()`, `pg.textTrigramSimilarity()`, `pg.textFuzzyMatch()`, `pg.textLikeSearch()`, `pg.textRegexpMatch()`, `pg.textCreateFtsIndex()`\n\n\n## Schema Tools\n\nCore: `listSchemas()`, `createSchema()`, `dropSchema()`, `listViews()`, `createView()`, `dropView()`, `listSequences()`, `createSequence()`, `dropSequence()`, `listFunctions()`, `listTriggers()`, `listConstraints()`\n\nResponse Structures:\n- `listSchemas()`: `{schemas: string[], count}`\n- `listViews({ includeMaterialized?, truncateDefinition?, limit? })`: `{views: [{schema, name, type, definition, definitionTruncated?}], count, hasMatViews, truncatedDefinitions?, truncated, note?}`. Default `limit: 50` (use `0` for all). Default `truncateDefinition: 500` chars (use `0` for full definitions). `truncated` always included (`true`/`false`)\n- `listSequences({ schema? })`: `{sequences: [{schema, name, owned_by}], count}`. Note: `owned_by` omits `public.` prefix for sequences in public schema (e.g., `users.id` not `public.users.id`)\n- `listFunctions({ schema?, limit?, exclude? })`: `{functions: [{schema, name, arguments, returns, language, volatility}], count, limit, note?}`\n- `listTriggers({ schema?, table? })`: `{triggers: [{schema, table_name, name, timing, events, function_name, enabled}], count}`\n- `listConstraints({ schema?, table?, type? })`: `{constraints: [{schema, table_name, name, type, definition}], count}`. Type codes: `p`=primary_key, `f`=foreign_key, `u`=unique, `c`=check\n- `dropSchema/dropView/dropSequence`: All return `{existed: true/false}` to indicate if object existed before drop\n- `createSchema/createSequence` (with `ifNotExists`) and `createView` (with `orReplace`): Return `{alreadyExisted: true/false}` to indicate if object existed before creation\n\n- `pg_create_view`: Supports `schema.name` format (auto-parsed). Use `orReplace: true` for CREATE OR REPLACE. `checkOption`: 'cascaded', 'local', 'none'. \u26D4 OR REPLACE can add new columns but cannot rename/remove existing ones\u2014PostgreSQL limitation\n- `pg_create_sequence`: Supports `schema.name` format. Parameters: `start`, `increment`, `minValue`, `maxValue`, `cache`, `cycle`, `ownedBy`, `ifNotExists`\n- `pg_list_functions`: Default limit=500. Use `schema: 'public'`, `limit: 2000`, or `exclude: ['postgis']` to filter. \u26A0\uFE0F `exclude` filters by **schema name** AND extension-owned functions. Note: Aggressive `exclude` may return 0 results if all functions belong to excluded extensions\n\n**Discovery**: `pg.schema.help()` returns `{methods: string[], examples: string[]}` object with available methods and usage examples\n\n\n## Partitioning Tools\n\n- `pg_create_partitioned_table`: `partitionBy` case-insensitive. Supports `schema.table` format for `name` (auto-parsed). `primaryKey` accepts array (e.g., `['id', 'event_date']`). \u26D4 `primaryKey`/`unique` must include partition key\u2014throws validation error otherwise\n- `pg_create_partition`: Use `parent`/`table`/`parentTable`. `forValues` is a raw SQL string: `\"FROM ('2024-01-01') TO ('2024-07-01')\"`, `\"IN ('US', 'CA')\"`, `\"WITH (MODULUS 4, REMAINDER 0)\"`. For DEFAULT partition, use `isDefault: true`. Supports `schema.table` format for `parent` (auto-parsed)\n- `pg_attach_partition`/`pg_detach_partition`: Support `schema.table` format for `parent` and `partition` (auto-parsed). For DEFAULT partition, use `isDefault: true` or `forValues: \"DEFAULT\"`\n- `pg_list_partitions`: Default `limit: 50` (use `0` for all). Returns `{partitions, count, truncated, totalCount?}`. Uses `bounds` field (consistent with `pg_partition_info`)\n- `pg_partition_info`: Returns `{tableInfo, partitions, totalSizeBytes}`. Uses `bounds` field\n- Both list/info tools support `schema.table` format (auto-parsed) and accept `table`, `parent`, `parentTable`, or `name` aliases\n- \uD83D\uDCCD Code Mode: `pg.partitioning.create()` = `createPartition`, NOT `createPartitionedTable`\n\n## pg_partman Tools\n\n- `pg_partman_create_parent`: Interval uses PostgreSQL syntax ('1 day', '1 month') NOT keywords ('daily'). `startPartition` accepts 'now' shorthand for current date. Required params: `parentTable`/`table`, `controlColumn`/`control`/`column`, `interval`\n- `pg_partman_run_maintenance`: Without `parentTable`, maintains ALL partition sets. Returns `partial: true` when some tables are skipped. `orphaned` object groups orphaned configs with `count`, `tables`, and cleanup `hint`. `errors` array for other failures\n- `pg_partman_show_config`: Default `limit: 50` (use `0` for all). Returns `truncated` + `totalCount` when limited. `orphaned` flag per config. Supports `schema.table` or plain table name (auto-prefixes `public.`)\n- `pg_partman_show_partitions`: Default `limit: 50` (use `0` for all). Returns `truncated` + `totalCount` when limited. `parentTable` required. Supports `schema.table` format (auto-parsed)\n- `pg_partman_check_default`/`partition_data`: `parentTable` required. Supports `schema.table` format (auto-parsed)\n- `pg_partman_set_retention`: \u26A0\uFE0F **CAUTION: Default is DROP** \u2014 `retentionKeepTable: false` (default) = DROP partitions, `true` = detach only (safer). Pass `retention: null` to disable retention\n- `pg_partman_undo_partition`: `targetTable` MUST exist before calling. Requires both `parentTable` and `targetTable`/`target`\n- `pg_partman_analyze_partition_health`: Default `limit: 50` (use `0` for all). Returns `truncated` + `totalCount` when limited. `summary.overallHealth`: 'healthy'|'warnings'|'issues_found'\n- \uD83D\uDCDD **Schema Resolution**: All partman tools auto-prefix `public.` when no schema specified in `parentTable`\n- \uD83D\uDCDD **Aliases**: `parentTable` accepts `table`, `parent`, `name`. `controlColumn` accepts `control`, `column`. `targetTable` accepts `target`\n\n## pg_stat_kcache Tools\n\nCore: `createExtension()`, `queryStats()`, `topCpu()`, `topIo()`, `databaseStats()`, `resourceAnalysis()`, `reset()`\n\n- `pg_kcache_query_stats`: Default `limit: 50` (use `0` for all). Returns `truncated` + `totalCount` when limited. `orderBy`: 'total_time' (default), 'cpu_time', 'reads', 'writes'. `queryPreviewLength`: chars for query preview (default: 100, max: 500, 0 for full). \u26D4 'calls' NOT valid for orderBy\u2014use `minCalls` param\n- `pg_kcache_resource_analysis`: Default `limit: 50` (use `0` for all). Returns `truncated` + `totalCount` when limited. `minCalls`, `queryPreviewLength` supported. Classifies queries as 'CPU-bound', 'I/O-bound', or 'Balanced'\n- `pg_kcache_top_cpu`: Top CPU-consuming queries. `limit` param (default: 10)\n- `pg_kcache_top_io`: `type`/`ioType` (alias): 'reads', 'writes', 'both' (default). `limit` param (default: 10)\n- `pg_kcache_database_stats`: Aggregated CPU/IO stats per database\n- `pg_kcache_reset`: Resets pg_stat_kcache AND pg_stat_statements statistics\n\n## citext Tools\n\nCore: `createExtension()`, `convertColumn()`, `listColumns()`, `analyzeCandidates()`, `compare()`, `schemaAdvisor()`\n\n- `pg_citext_create_extension`: Enable citext extension (idempotent). Returns `{success, message, usage}`\n- `pg_citext_convert_column`: Supports `schema.table` format (auto-parsed). \u26D4 Only allows text-based columns (text, varchar, character varying)\u2014non-text columns return `{success: false, error, allowedTypes, suggestion}`. When views depend on column, returns `{success: false, dependentViews, hint}`\u2014drop/recreate views manually. `col` alias for `column`. Returns `{previousType}` showing original type\n- `pg_citext_list_columns`: Default `limit: 100` (use `0` for all). Returns `{columns: [{table_schema, table_name, column_name, is_nullable, column_default}], count, totalCount, truncated}`. Optional `schema`, `limit` filters\n- `pg_citext_analyze_candidates`: Default `limit: 50` (use `0` for all). Default `excludeSystemSchemas: true` filters out extension schemas (cron, topology, partman, tiger) when no `schema`/`table` filter specified\u2014use `excludeSystemSchemas: false` to include all. Returns `truncated: true` + `totalCount` when results are limited. Scans tables for TEXT/VARCHAR columns matching common patterns (email, username, name, etc.). Optional `schema`, `table`, `limit`, `excludeSystemSchemas`, `patterns` filters. Returns `{candidates, count, totalCount, truncated, summary: {highConfidence, mediumConfidence}, recommendation, patternsUsed, excludedSchemas?}`\n- `pg_citext_compare`: Test case-insensitive comparison. Returns `{value1, value2, citextEqual, textEqual, lowerEqual, extensionInstalled}`\n- `pg_citext_schema_advisor`: Supports `schema.table` format (auto-parsed). Analyzes specific table. Returns `{table, recommendations: [{column, currentType, previousType?, recommendation, confidence, reason}], summary, nextSteps}`. `tableName` alias for `table`. Already-citext columns include `previousType: \"text or varchar (converted)\"`\n\n**Discovery**: `pg.citext.help()` returns `{methods, methodAliases, examples}` object\n\n## ltree Tools\n\nCore: `createExtension()`, `query()`, `match()`, `subpath()`, `lca()`, `listColumns()`, `convertColumn()`, `createIndex()`\n\n- `pg_ltree_create_extension`: Enable ltree extension (idempotent). Returns `{success, message}`\n- `pg_ltree_query`: Query hierarchical relationships. Supports `schema.table` format (auto-parsed). `mode`/`type`: 'ancestors', 'descendants' (default), 'exact'. Returns `{results, count, path, mode, isPattern}`. \u26A0\uFE0F Validates column is ltree type\u2014returns clear error for non-ltree columns\n- `pg_ltree_match`: Match paths using lquery pattern syntax (`*`, `*{1,2}`, `*.label.*`). Supports `schema.table` format. `pattern`/`lquery`/`query` aliases. Returns `{results, count, pattern}`\n- `pg_ltree_subpath`: Extract portion of ltree path. `offset`/`start`/`from` and `length`/`len` aliases. Negative `offset` counts from end. \u26A0\uFE0F Returns `{success: false, error, pathDepth}` for invalid offset (validated before PostgreSQL call)\n- `pg_ltree_lca`: Find longest common ancestor of multiple paths. Requires `paths` array (min 2). Returns `{longestCommonAncestor, hasCommonAncestor: bool, paths}`\n- `pg_ltree_list_columns`: List all ltree columns in database. Optional `schema` filter. Returns `{columns: [{table_schema, table_name, column_name, is_nullable, column_default}], count}`\n- `pg_ltree_convert_column`: Convert TEXT column to ltree. Supports `schema.table` format. `col` alias for `column`. Returns `{previousType}`. \u26A0\uFE0F When views depend on column, returns `{success: false, dependentViews, hint}`\u2014drop/recreate views manually\n- `pg_ltree_create_index`: Create GiST index on ltree column. Supports `schema.table` format. Auto-generates index name if `indexName` omitted. Returns `{indexName, indexType: 'gist', alreadyExists?}`\n\n**Discovery**: `pg.ltree.help()` returns `{methods, aliases, examples}` object. Top-level aliases available: `pg.ltreeQuery()`, `pg.ltreeMatch()`, etc.\n\n## PostGIS Tools\n\n**Geometry Creation:**\n- `pg_geocode`: Create point geometry from lat/lng. Returns `{geojson, wkt}`. \u26A0\uFE0F Validates bounds: lat \u00B190\u00B0, lng \u00B1180\u00B0\n- `pg_geometry_column`: Add geometry column to table. `ifNotExists` returns `{alreadyExists: true}`\n- `pg_spatial_index`: Create GiST spatial index. Auto-generates name if not provided. `ifNotExists` supported\n\n**Spatial Queries:**\n- `pg_distance`: Find geometries within distance from point. Returns `{results, count}` with `distance_meters`. \u26A0\uFE0F Validates point bounds\n- `pg_bounding_box`: Find geometries within lat/lng bounding box. Use `select` array for specific columns\n- `pg_intersection`: Find geometries intersecting a WKT/GeoJSON geometry. Auto-detects SRID from column\n- `pg_point_in_polygon`: Check if point is within table polygons. Returns `{containingPolygons, count}`. \u26A0\uFE0F Validates point bounds\n\n**Geometry Operations (Table-based):**\n- `pg_buffer`: Create buffer zone around table geometries. Default limit: 50 rows. Default simplify: 10m (set `simplify: 0` to disable). Returns `truncated: true` + `totalCount` when results are truncated. Use `limit: 0` for all rows\n- `pg_geo_transform`: Transform table geometries between SRIDs. Default limit: 50 rows. Returns `truncated: true` + `totalCount` when results are truncated. Use `limit: 0` for all rows. `fromSrid`/`sourceSrid` and `toSrid`/`targetSrid` aliases\n- `pg_geo_cluster`: Spatial clustering (DBSCAN/K-Means). K-Means: If `numClusters` exceeds row count, automatically clamps to available rows with `warning` field. DBSCAN: Returns contextual `hints` array explaining parameter effects (e.g., \"All points formed single cluster\u2014decrease eps\") and `parameterGuide` explaining eps/minPoints trade-offs\n\n**Geometry Operations (Standalone WKT/GeoJSON):**\n- `pg_geometry_buffer`: Create buffer around WKT/GeoJSON. Returns `{buffer_geojson, buffer_wkt, distance_meters}`. Optional `simplify` param (meters) reduces polygon complexity\u2014returns `simplified`, `simplifyTolerance` when applied. \u26A0\uFE0F Returns `warning` if simplify tolerance is too high and geometry collapses to null\n- `pg_geometry_transform`: Transform WKT/GeoJSON between SRIDs. Returns `{transformed_geojson, transformed_wkt, fromSrid, toSrid}`\n- `pg_geometry_intersection`: Compute intersection of two geometries. Returns `{intersects, intersection_geojson, intersection_area_sqm}`. Normalizes SRID (4326) automatically\u2014safe to mix GeoJSON and WKT\n\n**Administration:**\n- `pg_postgis_create_extension`: Enable PostGIS extension (idempotent)\n- `pg_geo_index_optimize`: Analyze spatial indexes. Without `table` param, analyzes all spatial indexes\n\n**Code Mode Aliases:** `pg.postgis.addColumn()` \u2192 `geometryColumn`, `pg.postgis.indexOptimize()` \u2192 `geoIndexOptimize`. Note: `pg.{group}.help()` returns `{methods, aliases, examples}`\n\n## Cron Tools (pg_cron)\n\nCore: `createExtension()`, `schedule()`, `scheduleInDatabase()`, `unschedule()`, `alterJob()`, `listJobs()`, `jobRunDetails()`, `cleanupHistory()`\n\n- `pg_cron_schedule`: Schedule a cron job. `schedule` supports standard cron (`0 5 * * *`) or interval (`1 second` to `59 seconds`). \u26A0\uFE0F Interval syntax only works for 1-59 seconds\u2014for 60+ seconds, use cron syntax (e.g., `* * * * *` for every minute). Use `name`/`jobName` for identification. `command`/`sql`/`query` aliases supported. Note: pg_cron allows duplicate job names; use unique names to avoid confusion when unscheduling\n- `pg_cron_schedule_in_database`: Schedule job in specific database. `database`/`db` aliases. Optional `username`, `active` params\n- `pg_cron_unschedule`: Remove job by `jobId` or `jobName`. If both provided, `jobName` takes precedence (with warning)\n- `pg_cron_alter_job`: Modify existing job. Can change `schedule`, `command`, `database`, `username`, `active`. \u26D4 Non-existent jobId throws error\n- `pg_cron_list_jobs`: List all jobs. Default `limit: 50` (use `0` for all). Optional `active` boolean filter. Returns `truncated` + `totalCount` when limited. Returns `hint` when jobs have no name\n- `pg_cron_job_run_details`: View execution history. Default `limit: 50`. Optional `jobId`, `status` ('running'|'succeeded'|'failed') filters. Returns `truncated` + `totalCount` when limited. Returns `summary` with counts\n- `pg_cron_cleanup_history`: Delete old run records. `olderThanDays`/`days` param (default: 7). Optional `jobId` to target specific job\n- `pg_cron_create_extension`: Enable pg_cron extension (idempotent). Requires superuser\n\n**Discovery**: `pg.cron.help()` returns `{methods, aliases, examples}` object\n\n## pgcrypto Tools\n\nCore: `createExtension()`, `hash()`, `hmac()`, `encrypt()`, `decrypt()`, `genRandomUuid()`, `genRandomBytes()`, `genSalt()`, `crypt()`\n\n- `pg_pgcrypto_create_extension`: Enable pgcrypto extension (idempotent). Returns `{success, message}`\n- `pg_pgcrypto_hash`: Hash data using digest algorithms. `algorithm`: 'md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512'. `encoding`: 'hex' (default), 'base64'. Returns `{hash, algorithm, encoding, inputLength}`\n- `pg_pgcrypto_hmac`: HMAC authentication. Same algorithms as hash. Returns `{hmac, algorithm, encoding}`. `key` param for secret\n- `pg_pgcrypto_encrypt`: PGP symmetric encryption. `data` + `password`/`key` (aliases). Optional `options` for cipher config (e.g., 'cipher-algo=aes256'). Returns `{encrypted, encoding: 'base64'}`\n- `pg_pgcrypto_decrypt`: Decrypt PGP-encrypted data. `encryptedData`/`data` + `password`/`key` (aliases). Returns `{decrypted, verified}`. \u26D4 Throws on wrong key/corrupt data\n- `pg_pgcrypto_gen_random_uuid`: Generate UUID v4. Optional `count` (1-100, default 1). Returns `{uuid, uuids, count}` (`uuid` convenience property for single requests)\n- `pg_pgcrypto_gen_random_bytes`: Generate random bytes. `length` (1-1024). `encoding`: 'hex' (default), 'base64'. Returns `{randomBytes, length, encoding}`\n- `pg_pgcrypto_gen_salt`: Generate salt for crypt(). `type`: 'bf' (bcrypt, recommended), 'md5', 'xdes', 'des'. Optional `iterations` for bf (4-31) or xdes. Returns `{salt, type}`\n- `pg_pgcrypto_crypt`: Hash password with salt. Use stored hash as salt for verification. Returns `{hash, algorithm}`. Verification: `crypt(password, storedHash).hash === storedHash`\n\n**Password Workflow**: 1) `genSalt({type:'bf', iterations:10})` \u2192 2) `crypt({password, salt})` \u2192 store hash \u2192 3) Verify: `crypt({password, salt: storedHash})` and compare hashes\n\n**Top-Level Aliases**: `pg.pgcryptoHash()`, `pg.pgcryptoEncrypt()`, `pg.pgcryptoDecrypt()`, `pg.pgcryptoGenRandomUuid()`, etc.\n\n**Discovery**: `pg.pgcrypto.help()` returns `{methods, aliases, examples}` object\n\n## Code Mode Sandbox\n\nNo `setTimeout`, `setInterval`, `fetch`, or network access. Use `pg.core.readQuery()` for data access.\n\n\uD83D\uDCCA **Metrics Note**: `memoryUsedMb` measures heap delta (end - start). Negative values indicate memory freed during execution (e.g., GC ran).\n\n## Transactions\n\nCore: `begin()`, `commit()`, `rollback()`, `savepoint()`, `rollbackTo()`, `release()`, `execute()`\n\n**Transaction Lifecycle:**\n- `pg_transaction_begin`: Start new transaction. Returns `{transactionId, isolationLevel, message}`. Use `transactionId` for subsequent operations\n- `pg_transaction_commit`: Commit transaction, making all changes permanent. `transactionId`/`tx`/`txId` aliases\n- `pg_transaction_rollback`: Rollback transaction, discarding all changes. `transactionId`/`tx`/`txId` aliases\n\n**Savepoints:**\n- `pg_transaction_savepoint`: Create savepoint within transaction. `name`/`savepoint` + `transactionId`/`tx`/`txId`\n- `pg_transaction_rollback_to`: Rollback to savepoint, restoring database state to when the savepoint was created. \u26A0\uFE0F Undoes ALL work (data changes AND savepoints) created after the target savepoint\n- `pg_transaction_release`: Release savepoint, keeping all changes since it was created. `name`/`savepoint` aliases\n\n**Atomic Execution:**\n- `pg_transaction_execute`: Execute multiple statements atomically. Two modes:\n - **Auto-commit**: Without `transactionId`\u2014auto-commits on success, auto-rollbacks on any error\n - **Join existing**: With `transactionId`/`tx`/`txId`\u2014no auto-commit, caller controls via commit/rollback\n- `statements`: Array of `{sql: \"...\", params?: [...]}` objects. \u26A0\uFE0F Each object MUST have `sql` key\n- `isolationLevel`: Optional isolation level for new transactions ('READ COMMITTED', 'REPEATABLE READ', 'SERIALIZABLE')\n\n**Response Structures:**\n- `begin`: `{transactionId, isolationLevel: 'READ COMMITTED', message}`\n- `commit/rollback`: `{success, transactionId, message}`\n- `savepoint/release/rollbackTo`: `{success, transactionId, savepoint, message}`\n- `execute`: `{success, statementsExecuted, results: [{sql, rowsAffected, rowCount, rows?}], transactionId?}`\n\n**Discovery**: `pg.transactions.help()` returns `{methods, methodAliases, examples}`";
12
+ export declare const SERVER_INSTRUCTIONS = "# postgres-mcp Code Mode\n\n## \u26A0\uFE0F Critical Gotchas\n\n1. **Transactions**: `pg.transactions.execute({statements: [{sql: \"...\"}]})` auto-commits on success, auto-rollbacks on error. To join existing transaction: `{transactionId: txId, statements: [...]}` (no auto-commit, caller controls)\n2. **pg_write_query**: \u26D4 Throws for SELECT\u2014use `pg_read_query` for SELECT statements\n3. **pg_upsert/pg_create_table**: `schema.table` format auto-parses (e.g., `'myschema.users'` \u2192 schema: 'myschema', table: 'users')\n4. **pg_create_table columns**: `notNull`, `defaultValue` (string literals auto-quoted; numbers/booleans auto-coerced; `now()` \u2192 `CURRENT_TIMESTAMP`), `check`, `references` (object or string `\"table(column)\"` syntax)\n5. **pg_create_table constraints**: `constraints` array only accepts `{type: 'unique'|'check'}`. Primary keys: use `column.primaryKey` or top-level `primaryKey: ['col1', 'col2']`\n6. **pg_create_index expression**: Columns can be expressions like `LOWER(name)` or `UPPER(email)`\u2014auto-detected. \u26A0\uFE0F Cast syntax (`::`) requires raw SQL via `pg_write_query`\n7. **pg_list_objects type**: Use `type` (singular string) or `types` (array). Auto-converts: `{type: 'table'}` \u2261 `{types: ['table']}`\n8. **pg_object_details**: Accepts: `name`, `objectName`, `object`, or `table`. Use `type`/`objectType` for type hint (supports: table, view, materialized_view, partitioned_table, function, sequence, index)\n9. **pg_exists optional WHERE**: `where`/`condition`/`filter` is optional. Without it, checks if table has any rows\n10. **pg_describe_table**: Returns columns, foreignKeys, primaryKey, indexes, constraints. For listing ALL database indexes (not table-specific), use `pg_get_indexes` without `table` param\n11. **pg_vector_insert updateExisting**: Uses direct UPDATE (avoids NOT NULL constraint issues vs INSERT mode)\n12. **pg_get_indexes without table**: Returns ALL database indexes (potentially large). Use `table` param for specific table\n13. **pg_upsert/pg_batch_insert RETURNING**: `returning` param must be array of column names: `[\"id\", \"name\"]`. \u26D4 `\"*\"` wildcard not supported\n14. **Small tables**: Optimizer correctly uses Seq Scan for <1000 rows\u2014this is expected behavior\n\n## \uD83D\uDD04 Response Structures\n\n| Tool | Returns | Notes |\n|------|---------|-------|\n| `pg_read_query` | `{rows, rowCount, fields?}` | `fields` contains column metadata (name, dataTypeID) |\n| `pg_write_query` | `{rowsAffected, affectedRows, rows?}` | `rows` only with RETURNING clause. DDL statements return `rowsAffected: 0`. \u26D4 Throws for SELECT |\n| `pg_upsert` | `{success, operation, rowsAffected, rowCount, rows?}` | `operation: 'insert'|'update'`. `rows` only with RETURNING clause |\n| `pg_batch_insert` | `{success, rowsAffected, affectedRows, insertedCount, rowCount, rows?}` | Empty objects use DEFAULT VALUES. \u26A0\uFE0F BIGINT > 2^53 loses precision |\n| `pg_create_table` | `{success, table, sql, compositePrimaryKey?}` | `table` = schema-qualified name. `compositePrimaryKey` only when composite PK used |\n| `pg_drop_table` | `{success, dropped, existed}` | `existed` indicates whether table was present before drop |\n| `pg_create_index` | `{success, index, indexName, table, sql, ifNotExists?, alreadyExists?, message?}` | `alreadyExists`/`message` only with `ifNotExists: true` when index pre-exists |\n| `pg_drop_index` | `{success, index, existed, sql}` | `existed` indicates whether index was present before drop |\n| `pg_truncate` | `{success, table, cascade, restartIdentity}` | `cascade`/`restartIdentity` reflect the options used |\n| `pg_count` | `{count: N}` | Use `params` for placeholders: `where: 'id=$1', params: [5]`. DISTINCT: use `pg_read_query` |\n| `pg_exists` | `{exists: bool, mode, hint?}` | `params` for placeholders. `mode: 'filtered'|'any_rows'` |\n| `pg_get_indexes` | `{indexes, count, totalCount?}` | Default `limit: 100` without `table`. Use `schema`/`limit` to filter. Index objects have `name`, `type`, `columns` |\n| `pg_list_objects` | `{objects, count, totalCount, byType}` | Use `limit` to cap results, `type`/`types` to filter |\n| `pg_object_details` | `{name, schema, type, returnType?, ...}` | Functions: `returnType` alias. Views/Mat. views: `definition`. Tables: equivalent to `pg_describe_table` (columns, primaryKey, indexes, constraints, foreignKeys) |\n| `pg_analyze_db_health` | `{cacheHitRatio, databaseSize, tableStats, unusedIndexes, tablesNeedingVacuum, connections, bloat, isReplica, overallScore, overallStatus}` | `cacheHitRatio`: `{ratio, heap, index, status}`. `overallStatus`: `healthy|needs_attention|critical`. Optional sections via `includeIndexes`, `includeVacuum`, `includeConnections` |\n| `pg_describe_table` | `{name, schema, type, owner, rowCount, columns, primaryKey, indexes, constraints, foreignKeys}` | Columns include `notNull` (alias for `!nullable`), `foreignKey`. `constraints` includes PK, UNIQUE, CHECK, NOT NULL. \u26A0\uFE0F `rowCount: -1` = stale/missing statistics (run ANALYZE on the table). Small tables (<~50 rows) may show -1 until first ANALYZE |\n| `pg_analyze_query_indexes` | `{plan, issues, recommendations}` | `verbosity`: 'summary' (default) or 'full'. Summary mode returns condensed plan |\n| `pg_list_tables` | `{tables, count, totalCount, truncated?, hint?}` | Use `schema` to filter, `limit` to cap results, `exclude` to hide extension schemas (e.g., `['cron', 'topology', 'partman']`) |\n| List operations | `{items, count}` | Access via `result.tables`, `result.views`, etc. |\n| `pg_jsonb_agg groupBy` | `{result: [{group_key, items}], count, grouped: true}` | Without groupBy: `{result: [...], count, grouped: false}` |\n| `pg_vector_aggregate` | `{average_vector, count}` or `{groups: [{group_key, average_vector, count}]}` | Without/with `groupBy` |\n| `pg_index_stats` | `{indexes, count, truncated?, totalCount?}` | Default 50 rows. Use `limit: 0` for all |\n| `pg_table_stats` | `{tables, count, truncated?, totalCount?}` | Default 50 rows. Use `limit: 0` for all |\n| `pg_vacuum_stats` | `{tables, count, truncated?, totalCount?}` | Default 50 rows. Use `limit: 0` for all |\n| `pg_stat_statements` | `{statements, count, truncated?, totalCount?}` | Default 20 rows. `orderBy` supported |\n| `pg_query_plan_stats` | `{queryPlanStats, count, truncated?, totalCount?}` | Default 20 rows. `truncateQuery: 0` for full text |\n| `pg_stat_activity` | `{connections, count}` | `includeIdle: true` to include idle connections |\n| `pg_locks` | `{locks}` | `showBlocked: true` switches to blocked/blocking pid format |\n| `pg_bloat_check` | `{tables, count}` | Tables with `live_tuples`, `dead_tuples`, `dead_pct` |\n| `pg_cache_hit_ratio` | `{heap_read, heap_hit, cache_hit_ratio}` | All fields nullable (0 tables = null). Flat response, differs from `pg_analyze_db_health.cacheHitRatio` |\n| `pg_seq_scan_tables` | `{tables, count, minScans, hint, truncated?, totalCount?}` | Default 50 rows. `minScans` default: 10 |\n| `pg_connection_pool_optimize` | `{current, config, waitEvents, recommendations}` | No params needed |\n| `pg_performance_baseline` | `{name, timestamp, metrics}` | `metrics`: `cache`, `tables`, `indexes`, `connections`, `databaseSize` |\n| `pg_duplicate_indexes` | `{duplicateIndexes, count, hint, truncated?, totalCount?}` | Default 50 rows. `duplicate_type`: EXACT_DUPLICATE, OVERLAPPING, SUBSET |\n| `pg_query_plan_compare` | `{query1, query2, analysis, fullPlans}` | `analysis.costDifference` + `recommendation` |\n| `pg_unused_indexes` | `{unusedIndexes, count, hint, truncated?, totalCount?}` | Default 20 rows. `summary: true` \u2192 `{summary, bySchema, totalCount}` |\n\n## API Mapping\n\n`pg_group_action` \u2192 `pg.group.action()` (group prefixes dropped: `pg_jsonb_extract` \u2192 `pg.jsonb.extract()`)\n\n**Top-Level Core Aliases**: All starter tools available directly: `pg.readQuery()`, `pg.writeQuery()`, `pg.listTables()`, `pg.describeTable()`, `pg.createTable()`, `pg.dropTable()`, `pg.count()`, `pg.exists()`, `pg.upsert()`, `pg.batchInsert()`, `pg.truncate()`, `pg.createIndex()`, `pg.dropIndex()`, `pg.getIndexes()`, `pg.listObjects()`, `pg.objectDetails()`, `pg.listExtensions()`, `pg.analyzeDbHealth()`, `pg.analyzeQueryIndexes()`, `pg.analyzeWorkloadIndexes()`\n\n**Positional args work**: `readQuery(\"SELECT...\")`, `exists(\"users\", \"id=1\")`, `createIndex(\"users\", [\"email\"])`\n\n**Discovery**: `pg.help()` returns `{group: methods[]}` mapping (e.g., `{core: ['readQuery', ...], jsonb: [...]}`). `pg.core.help()`, `pg.jsonb.help()` for group-specific methods.\n\n## Format Auto-Resolution\n\n- **Schema.Table**: `'public.users'` auto-parses to `{schema: 'public', table: 'users'}`\n- **JSONB Paths**: Both `'a.b.c'` (string) and `['a','b','c']` (array) work. Use array for literal dots: `[\"key.with.dots\"]`\n- **Aliases**: Common parameter variations resolve automatically (e.g., `query`/`sql`, `table`/`tableName`)\n\n---\n\n## Vector Tools\n\n\u26A0\uFE0F **Large Vectors**: Direct MCP tool calls may truncate vectors >256 dimensions due to JSON-RPC message size limits. For vectors \u2265256 dimensions (e.g., OpenAI 1536-dim, local 384-dim), use Code Mode: `await pg.vector.search({table, column, vector, limit})`\n\n- `pg_vector_search`: Supports `schema.table` format (auto-parsed). Returns `{results: [...], count, metric}`. Use `select: [\"id\", \"name\"]` to include identifying columns. Without select, only returns distance. `filter` = `where`. \u26A0\uFE0F Vectors read from DB are strings\u2014parse before passing: `vec.replace(/^\\[|\\]$/g, '').split(',').map(Number)`\n- `pg_vector_insert`: Supports `schema.table` format (auto-parsed). Use `updateExisting` + `conflictColumn` + `conflictValue` for UPDATE mode. `additionalColumns` is applied in both INSERT and UPDATE modes\n- `pg_vector_batch_insert`: `vectors` expects `[{vector: [...], data?: {...}}]` objects, not raw arrays\n- `pg_vector_normalize`: Returns `{normalized: [...], magnitude: N}`. Note: `magnitude` is the **original** vector length (not 1)\n- `pg_vector_aggregate`: Supports `schema.table` format (auto-parsed). \u26D4 Validates column is vector type. Returns `{average_vector: {preview, dimensions, truncated}, count}` or `{groups: [{group_key, average_vector, count}]}` with groupBy. \u26A0\uFE0F `groupBy` only supports simple column names (not expressions)\n- `pg_vector_dimension_reduce`: Direct mode returns `{reduced: [...], originalDimensions, targetDimensions}`. Table mode returns `{rows: [{id, original_dimensions, reduced}], processedCount, summarized}`. Default `summarize: true` in table mode returns compact `{preview, dimensions, truncated}` format. Use `summarize: false` for full vectors\n- `pg_vector_distance`: Calculate distance between two vectors. `metric`: 'l2' (default), 'cosine', 'inner_product'. Returns `{distance, metric}`\n- `pg_vector_cluster`: `clusters` = `k`. Returns centroids with `{preview, dimensions, truncated}` format for large vectors (>10 dims)\u2014use `pg_vector_distance` to assign rows\n- `pg_vector_create_index`: Use `type` (or alias `method`) with values 'ivfflat' or 'hnsw'. IVFFlat: `lists` param. HNSW: `m`, `efConstruction` params\n- `pg_vector_performance`: Auto-generates testVector from first row if omitted. Returns `testVectorSource: 'auto-generated from first row'|'user-provided'`\n- `pg_vector_validate`: Returns `{valid: bool, vectorDimensions}`. Empty vector `[]` returns `{valid: true, vectorDimensions: 0}`\n- \u26D4 `pg_vector_embed`: Demo only (hash-based). Use OpenAI/Cohere for production.\n- `pg_hybrid_search`: Supports `schema.table` format (auto-parsed). Combines vector similarity and full-text search with weighted scoring. `textColumn` auto-detects type: uses tsvector columns directly, wraps text columns with `to_tsvector()`. Code mode alias: `pg.hybridSearch()` \u2192 `pg.vector.hybridSearch()`\n- \uD83D\uDCDD **Error Handling**: Vector tools return `{success: false, error: \"...\", suggestion: \"...\"}` for validation/semantic errors (dimension mismatch, non-vector column, table not found). Check `success` field before processing results.\n\n## JSONB Tools\n\n- `pg_jsonb_extract`: Returns null if path doesn't exist\n- `pg_jsonb_insert`: Index -1 inserts BEFORE last element; use `insertAfter: true` to append. \u26A0\uFE0F Use array format `[-1]` not string `\"[-1]\"` for negative indices\n- `pg_jsonb_set`: `createMissing=true` creates full nested paths; initializes NULL columns to `{}`. Empty path (`''` or `[]`) replaces entire column value\n- `pg_jsonb_strip_nulls`: \u26A0\uFE0F Requires `where`/`filter` clause\u2014write operations must be targeted. Use `preview: true` to see changes first\n- `pg_jsonb_agg`: Supports AS aliases in select: `[\"id\", \"metadata->>'name' AS name\"]`. \u26A0\uFE0F `->>` returns text\u2014use `->` to preserve JSON types\n- `pg_jsonb_object`: Use `data`, `object`, or `pairs` parameter: `{data: {name: \"John\", age: 30}}`. Returns `{object: {...}}`\n- `pg_jsonb_normalize`: `flatten` doesn't descend into arrays; `keys` returns text (use `pairs` for JSON types)\n- `pg_jsonb_stats`: Returns column-level statistics. `topKeysLimit` controls key count (default: 20). \u26A0\uFE0F `typeDistribution` null type = SQL NULL columns (entire column NULL, not JSON `null` literal). Use `sqlNullCount` for explicit count\n- \u26D4 **Object-only tools**: `diff`, `merge`, `keys`, `indexSuggest`, `securityScan`, `stats`\u2014topKeys require JSONB objects, throw descriptive errors for arrays\n- \u26D4 **Array-only tools**: `insert`\u2014requires JSONB arrays, throws errors for objects\n- \uD83D\uDCDD `normalize` modes: `pairs`/`keys`/`flatten` for objects; `array` for arrays\n\n**Top-Level Aliases**: `pg.jsonbExtract()`, `pg.jsonbSet()`, `pg.jsonbInsert()`, `pg.jsonbDelete()`, `pg.jsonbContains()`, `pg.jsonbPathQuery()`, `pg.jsonbAgg()`, `pg.jsonbObject()`, `pg.jsonbArray()`, `pg.jsonbKeys()`, `pg.jsonbStripNulls()`, `pg.jsonbTypeof()`, `pg.jsonbValidatePath()`, `pg.jsonbMerge()`, `pg.jsonbNormalize()`, `pg.jsonbDiff()`, `pg.jsonbIndexSuggest()`, `pg.jsonbSecurityScan()`, `pg.jsonbStats()`\n\n## Stats Tools\n\n- All stats tools support `schema.table` format (auto-parsed, embedded schema takes priority over explicit `schema` param)\n- `timeSeries`: Both `timeColumn` (must be timestamp/date) and `valueColumn` (must be numeric) are validated upfront with clear error messages. Aliases: `time`\u2192`timeColumn`, `value`\u2192`valueColumn`. `interval` accepts: `second`, `minute`, `hour`, `day`, `week`, `month`, `year` (keywords, PostgreSQL format, or plurals). Default `limit: 100` time buckets. Use `limit: 0` for no limit. Returns `truncated` and `totalCount` indicators when default limit is applied. **groupBy payloads**: Default `groupLimit: 20` groups. Returns `truncated` + `totalGroupCount` when groups are limited. Use `groupLimit: 0` for all groups\n- `correlation`: Use `column1`/`column2` or aliases `x`/`y` for column names\n- `distribution`: Returns `skewness`, `kurtosis` (excess). `buckets` must be > 0. **groupBy payloads**: Default `groupLimit: 20` groups (prevents large payloads with many histogram buckets per group). Returns `truncated` + `totalGroupCount` when groups are limited. Use `groupLimit: 0` for all groups\n- `sampling`: Defaults to `random` method with 20 rows (optimized for LLM context). `sampleSize` always takes precedence over `percentage`. \u26A0\uFE0F `percentage` param only works with `bernoulli`/`system` methods\u2014ignored for default `random` method. Default limit of 100 rows applied to `bernoulli`/`system` with `percentage` to prevent large payloads. Returns `truncated` and `totalSampled` when TABLESAMPLE returns more rows than limit\n- `percentiles`: Accepts 0-1 or 0-100 (auto-normalized). \u26A0\uFE0F Use consistent scale\u2014mixing (e.g., `[0.1, 50]`) produces unexpected keys and returns a `warning` field explaining the issue. Empty array \u2192 defaults [0.25, 0.5, 0.75]\n- `hypothesis`: Returns nested `results` object containing `pValue` (two-tailed), `testStatistic`, `interpretation`, `sampleMean`, `sampleStdDev`. Access via `hyp.results.pValue`. Use `populationStdDev` for z-test, otherwise defaults to t-test\n- `regression`: Use `xColumn`/`yColumn`, aliases `x`/`y`, or `column1`/`column2` (for consistency with correlation). Returns nested `regression` object containing `slope`, `intercept`, `rSquared`, `equation`, `avgX`, `avgY`, `sampleSize`. Access via `reg.regression.slope`\n- \u26A0\uFE0F WARNING: `sampling` with `system` method unreliable for small tables\u2014use `bernoulli` or `random`\n\n**Top-Level Aliases**: `pg.descriptive()`, `pg.percentiles()`, `pg.correlation()`, `pg.regression()`, `pg.timeSeries()`, `pg.distribution()`, `pg.hypothesis()`, `pg.sampling()`\n\n## Text Tools\n\n- `pg_text_search`/`pg_text_rank`: Column must be `text` type\u2014pre-built `tsvector` columns are **not** supported (wrap with `to_tsvector()` fails on tsvector input). Use `pg_read_query` with raw FTS SQL for tsvector columns\n- `pg_create_fts_index`: Returns `{success, index, config, skipped}`. `skipped: true` = index already existed (IF NOT EXISTS). `ifNotExists` defaults to `true`\n\n## Performance Tools\n\nCore (20 methods): `explain()`, `explainAnalyze()`, `explainBuffers()`, `indexStats()`, `tableStats()`, `statStatements()`, `statActivity()`, `locks()`, `bloatCheck()`, `cacheHitRatio()`, `seqScanTables()`, `indexRecommendations()`, `queryPlanCompare()`, `baseline()`, `connectionPoolOptimize()`, `partitionStrategySuggest()`, `unusedIndexes()`, `duplicateIndexes()`, `vacuumStats()`, `queryPlanStats()`\n\nWrappers (3): `blockingQueries()`\u2192`locks({showBlocked:true})`, `longRunningQueries({ seconds | minDuration }?)` filters by duration (returns `{longRunningQueries, count, threshold}`), `analyzeTable({ table })` runs ANALYZE (accepts `schema.table` format)\n\n- `explain({ sql, format?, params? })`: Supports `format: 'text'|'json'|'yaml'|'xml'`. Default: text. Use `params: [value]` for `$1, $2` placeholders\n- `explainAnalyze({ sql, format?, params? })`: Same format/params options as explain\n- `explainBuffers({ sql, params? })`: Always returns JSON format (includes buffer statistics)\n- `indexRecommendations({ sql?, params? })`: Pass `params: [value]` for parameterized queries (e.g., `sql: 'SELECT * FROM orders WHERE id = $1', params: [5]`)\n- `queryPlanCompare({ query1, query2, params1?, params2? })`: Compare two query plans. Use `params1`/`params2` for parameterized queries\n- `partitionStrategySuggest({ table })`: Accepts `schema.table` format (auto-parsed) or separate `table` + `schema` params\n- \u26A0\uFE0F **Data Type Awareness**: Query literals must match column types exactly\u2014`WHERE sensor_id = 1` (integer), not `'sensor_1'` (string)\n\nAliases: `cacheStats`\u2192`cacheHitRatio`, `queryStats`\u2192`statStatements`, `activity`\u2192`statActivity`, `vacuum`\u2192`vacuumStats`, `indexUsage`\u2192`indexStats`, `bloatEstimate`/`bloat`\u2192`bloatCheck`, `runningQueries`\u2192`longRunningQueries`\n\n\uD83D\uDCE6 **AI-Optimized Payloads**: Tools return limited results by default to reduce context size:\n- `indexStats({ limit? })`: Default 50 rows. Returns `truncated: true` + `totalCount` when limited. Use `limit: 0` for all\n- `tableStats({ limit? })`: Default 50 rows. Returns `truncated: true` + `totalCount` when limited. Use `limit: 0` for all\n- `vacuumStats({ limit? })`: Default 50 rows. Same truncation indicators. Use `limit: 0` for all\n- `statStatements({ limit?, orderBy? })`: Default 20 rows. Returns `truncated: true` + `totalCount` when limited. Use `limit: 0` for all\n- `unusedIndexes({ limit?, summary? })`: Default 20 rows. Use `summary: true` for aggregated stats by schema\n- `queryPlanStats({ limit?, truncateQuery? })`: Default 20 rows, queries truncated to 100 chars. Use `truncateQuery: 0` for full text\n\n\uD83D\uDCCD **Code Mode Note**: `pg_performance_baseline` \u2192 `pg.performance.baseline()` (not `performanceBaseline`). `indexRecommendations` accepts `query` alias for `sql`\n\n**Top-Level Aliases**: `pg.explain()`, `pg.explainAnalyze()`, `pg.cacheHitRatio()`, `pg.indexStats()`, `pg.tableStats()`, `pg.indexRecommendations()`, `pg.bloatCheck()`, `pg.vacuumStats()`, `pg.unusedIndexes()`, `pg.duplicateIndexes()`, `pg.seqScanTables()`\n\n## Monitoring Tools\n\nCore: `databaseSize()`, `tableSizes()`, `connectionStats()`, `showSettings()`, `capacityPlanning()`, `uptime()`, `serverVersion()`, `recoveryStatus()`, `replicationStatus()`, `resourceUsageAnalyze()`, `alertThresholdSet()`\n\n- `databaseSize()`: Returns `{bytes: number, size: string}`. Optional `database` param for specific db\n- `tableSizes({ limit?, schema? })`: Default limit 50. Returns `{tables: [...], count, truncated?, totalCount?}`. `truncated: true` + `totalCount` when limited. Use `limit: 0` for all\n- `connectionStats()`: Returns `{byDatabaseAndState, totalConnections: number, maxConnections: number}`\n- `showSettings({ setting?, limit? })`: Default limit 50 when no pattern. Returns `{settings: [...], count, truncated?, totalCount?}`. Accepts `pattern`, `setting`, or `name`. Exact names auto-match; `%` for LIKE patterns\n- `capacityPlanning({days: 90})`: `days` = `projectionDays`. Returns `{current, growth, projection, recommendations}` with numeric fields. \u26D4 Negative days rejected\n- `uptime()`: Returns `{start_time: string, uptime: {days, hours, minutes, seconds, milliseconds}}`\n- `serverVersion()`: Returns `{full_version: string, version: string, version_num: number}`\n- `recoveryStatus()`: Returns `{in_recovery: boolean, last_replay_timestamp: string|null}`\n- `replicationStatus()`: Returns `{role: 'primary'|'replica', replicas: [...]}` for primary, or `{role: 'replica', replay_lag, ...}` for replica\n- `resourceUsageAnalyze()`: Returns `{backgroundWriter, checkpoints, connectionDistribution, bufferUsage, activity, analysis}` with all counts as numbers\n- `alertThresholdSet({metric?: 'connection_usage'})`: Returns recommended thresholds. Invalid metric returns `{success: false, error: \"...\"}`. Valid metrics: connection_usage, cache_hit_ratio, replication_lag, dead_tuples, long_running_queries, lock_wait_time\n\n\uD83D\uDCE6 **AI-Optimized Payloads**: Tools return limited results by default to reduce context size:\n- `tableSizes({ limit? })`: Default 50 rows. Returns `truncated: true` + `totalCount` when limited. Use `limit: 0` for all\n- `showSettings({ limit? })`: Default 50 rows when no pattern specified. Use `limit: 0` for all or specify a pattern\n\nAliases: `tables`\u2192`tableSizes`, `connections`\u2192`connectionStats`, `settings`/`config`\u2192`showSettings`, `alerts`/`thresholds`\u2192`alertThresholdSet`\n\n**Top-Level Aliases**: `pg.databaseSize()`, `pg.tableSizes()`, `pg.connectionStats()`, `pg.serverVersion()`, `pg.uptime()`, `pg.showSettings()`, `pg.recoveryStatus()`, `pg.replicationStatus()`, `pg.capacityPlanning()`, `pg.resourceUsageAnalyze()`, `pg.alertThresholdSet()`\n\n## Admin Tools\n\nCore: `vacuum()`, `vacuumAnalyze()`, `analyze()`, `reindex()`, `cluster()`, `setConfig()`, `reloadConf()`, `resetStats()`, `cancelBackend()`, `terminateBackend()`\n\n- All admin tools support `schema.table` format (auto-parsed, embedded schema takes priority over explicit `schema` param)\n- `vacuum({ table?, full?, analyze?, verbose? })`: Without `table`, vacuums ALL tables. `verbose` output goes to PostgreSQL server logs\n- `reindex({ target, name?, concurrently? })`: Targets: 'table', 'index', 'schema', 'database'. `database` target defaults to current db when `name` omitted\n- `cluster()`: Without args, re-clusters all previously-clustered tables. With args, requires BOTH `table` AND `index`\n- `setConfig({ name, value, isLocal? })`: `isLocal: true` applies only to current transaction\n- `cancelBackend({ pid })`: Graceful query cancellation\u2014returns `{success: false}` for invalid PID (no error thrown)\n- `terminateBackend({ pid })`: Forceful connection termination\u2014use with caution\n\nAliases: `tableName`\u2192`table`, `indexName`\u2192`index`, `param`/`setting`\u2192`name`, `processId`\u2192`pid`\n\n**Top-Level Aliases**: `pg.vacuum()`, `pg.vacuumAnalyze()`, `pg.analyze()`, `pg.reindex()`, `pg.cluster()`, `pg.setConfig()`, `pg.reloadConf()`, `pg.resetStats()`, `pg.cancelBackend()`, `pg.terminateBackend()`\n\n**Discovery**: `pg.admin.help()` returns `{methods, methodAliases, examples}` object\n\n**Response structures**:\n- `vacuum()` / `vacuumAnalyze()`: `{success, message, table?, schema?, hint?}` (hint present when verbose: true)\n- `analyze()`: `{success, message, table?, schema?, columns?}`\n- `reindex()`: `{success, message}`\n- `cluster()`: `{success, message, table?, index?}` (table/index present for table-specific cluster)\n- `setConfig()`: `{success, message, parameter, value}`\n- `reloadConf()` / `resetStats()`: `{success, message}`\n- `cancelBackend()` / `terminateBackend()`: `{success, message}`\n\n## Backup Tools\n\nCore: `dumpTable()`, `dumpSchema()`, `copyExport()`, `copyImport()`, `createBackupPlan()`, `restoreCommand()`, `physical()`, `restoreValidate()`, `scheduleOptimize()`\n\nResponse Structures:\n- `dumpTable`: `{ddl, type, note, insertStatements?}` \u2014 `insertStatements` only with `includeData: true` (separate field from `ddl`)\n- `copyExport`: `{data, rowCount, truncated?, limit?}` \u2014 `data` contains CSV/text content. `truncated: true` + `limit` when rows returned equals applied limit (indicating more rows likely exist)\n- `copyImport`: `{command, stdinCommand, notes}` \u2014 Both file and stdin COPY commands\n- `createBackupPlan`: `{strategy: {fullBackup, walArchiving}, estimates}`\n- `restoreCommand`: `{command, warnings?, notes}` \u2014 Warnings when `database` omitted\n- `restoreValidate`: `{note?, validationSteps: [{step, name, command?, commands?, note?}], recommendations}` \u2014 Top-level `note` when `backupType` omitted (defaults to pg_dump). Step-level `note` for non-command steps\n- `physical`: `{command, notes, requirements}`\n- `scheduleOptimize`: `{analysis, recommendation, commands}`\n\n\uD83D\uDCE6 **AI-Optimized Payloads**: `copyExport` limits results to 500 rows by default to prevent large payloads. Use `limit: 0` for all rows, or specify a custom limit.\n\n- `pg_copy_export`: Use `query`/`sql` OR `table`. \u26A0\uFE0F If both provided, `query` takes precedence with warning. Supports `schema.table` format (auto-parsed, takes priority over `schema` param). Format: `csv` (default, comma-delimited), `text` (tab-delimited). Both formats support `header: true` (default). \u26D4 `binary` not supported via MCP\u2014use `pg_dump_schema` for binary exports. Default `limit: 500` (use `0` for all rows). Optional `delimiter` to customize\n- `pg_dump_table`: Returns `ddl` + `insertStatements` when `includeData: true`. Supports sequences (`type: 'sequence'`), views (`type: 'view'`), and partitioned tables (`type: 'partitioned_table'` with `PARTITION BY` clause). **PRIMARY KEYS, INDEXES, CONSTRAINTS NOT included**\u2014use `pg_get_indexes`/`pg_get_constraints`. Supports `schema.table` format\n- `pg_dump_schema`: Generates pg_dump command. Optional `schema`, `table`, `filename`\n- `pg_copy_import`: Generates COPY FROM command. Supports `schema.table` format (auto-parsed, takes priority over `schema` param). `columns` array, `filePath`, `format`, `header`, `delimiter`\n- `pg_restore_command`: Include `database` parameter for complete command. Optional `schemaOnly`, `dataOnly`\n- `pg_create_backup_plan`: Generates backup strategy with cron schedule. `frequency`: 'hourly'|'daily'|'weekly', `retention` count\n- `pg_backup_physical`: Generates pg_basebackup command. `format`: 'plain'|'tar' (default: 'tar'), `checkpoint`: 'fast'|'spread', `compress`: 0-9\n- `pg_restore_validate`: Generates validation commands. `backupType`: 'pg_dump' (default)|'pg_basebackup'\n- `pg_backup_schedule_optimize`: Analyzes database activity patterns and recommends optimal backup schedule\n\n**Top-Level Aliases**: `pg.dumpTable()`, `pg.dumpSchema()`, `pg.copyExport()`, `pg.copyImport()`, `pg.createBackupPlan()`, `pg.restoreCommand()`, `pg.restoreValidate()`, `pg.physical()`, `pg.backupPhysical()`, `pg.scheduleOptimize()`, `pg.backupScheduleOptimize()`\n\n## Text Tools\n\nDefaults: `threshold`=0.3 (use 0.1-0.2 for partial), `maxDistance`=3 (use 5+ for longer strings)\n\n- All text tools support `schema.table` format (auto-parsed, embedded schema takes priority over explicit `schema` param)\n- `pg_text_search`: Supports both `column` (singular string) and `columns` (array). Either is valid\u2014`column` auto-converts to array\n- `pg_trigram_similarity`, `pg_fuzzy_match`, `pg_regexp_match`, `pg_like_search`: All default to 100 results to prevent large payloads. Use `limit: 0` for all rows\n- `pg_fuzzy_match`: Levenshtein returns distance (lower=better). Soundex/metaphone return phonetic codes (exact match only). \u26D4 Invalid `method` values throw error with valid options\n- `pg_text_normalize`: Removes accents only (unaccent). Does NOT lowercase/trim\n- \uD83D\uDCCD **Table vs Standalone**: `normalize`, `sentiment`, `toVector`, `toQuery`, `searchConfig` are standalone (text input only). For phonetic matching: use `pg_fuzzy_match` with `method: 'soundex'|'metaphone'` (direct MCP), or `pg.text.soundex()`/`pg.text.metaphone()` (Code Mode convenience wrappers that call fuzzyMatch internally)\n\n**Top-Level Aliases**: `pg.textSearch()`, `pg.textRank()`, `pg.textHeadline()`, `pg.textNormalize()`, `pg.textSentiment()`, `pg.textToVector()`, `pg.textToQuery()`, `pg.textSearchConfig()`, `pg.textTrigramSimilarity()`, `pg.textFuzzyMatch()`, `pg.textLikeSearch()`, `pg.textRegexpMatch()`, `pg.textCreateFtsIndex()`\n\n## Schema Tools\n\nCore: `listSchemas()`, `createSchema()`, `dropSchema()`, `listViews()`, `createView()`, `dropView()`, `listSequences()`, `createSequence()`, `dropSequence()`, `listFunctions()`, `listTriggers()`, `listConstraints()`\n\nResponse Structures:\n- `listSchemas()`: `{schemas: string[], count}`\n- `listViews({ includeMaterialized?, truncateDefinition?, limit? })`: `{views: [{schema, name, type, definition, definitionTruncated?}], count, hasMatViews, truncatedDefinitions?, truncated, note?}`. Default `limit: 50` (use `0` for all). Default `truncateDefinition: 500` chars (use `0` for full definitions). `truncated` always included (`true`/`false`)\n- `listSequences({ schema? })`: `{sequences: [{schema, name, owned_by}], count}`. Note: `owned_by` omits `public.` prefix for sequences in public schema (e.g., `users.id` not `public.users.id`)\n- `listFunctions({ schema?, limit?, exclude? })`: `{functions: [{schema, name, arguments, returns, language, volatility}], count, limit}`\n- `listTriggers({ schema?, table? })`: `{triggers: [{schema, table_name, name, timing, events, function_name, enabled}], count}`\n- `listConstraints({ schema?, table?, type? })`: `{constraints: [{schema, table_name, name, type, definition}], count}`. Type codes: `p`=primary_key, `f`=foreign_key, `u`=unique, `c`=check\n- `dropSchema/dropView/dropSequence`: All return `{existed: true/false}` to indicate if object existed before drop\n- `createSchema/createSequence` (with `ifNotExists`) and `createView` (with `orReplace`): Return `{alreadyExisted: true/false}` when the flag is set. Without `ifNotExists`/`orReplace`, the field is omitted\n\n- `pg_create_view`: Supports `schema.name` format (auto-parsed). Use `orReplace: true` for CREATE OR REPLACE. `checkOption`: 'cascaded', 'local', 'none'. \u26D4 OR REPLACE can add new columns but cannot rename/remove existing ones\u2014PostgreSQL limitation\n- `pg_create_sequence`: Supports `schema.name` format. Parameters: `start`, `increment`, `minValue`, `maxValue`, `cache`, `cycle`, `ownedBy`, `ifNotExists`\n- `pg_list_functions`: Default limit=500. Use `schema: 'public'`, `limit: 2000`, or `exclude: ['postgis', 'pg_trgm', 'ltree', 'citext', 'fuzzystrmatch', 'pg_stat_statements', 'hypopg', 'unaccent', 'pg_stat_kcache', 'pgcrypto', 'partman', 'vector', 'topology']` to filter. \u26A0\uFE0F `exclude` filters by **schema name** AND extension-owned functions. The `language` filter does NOT exclude extension functions\u2014use `exclude` alongside `language` for clean results. Note: Aggressive `exclude` may return 0 results if all functions belong to excluded extensions\n\n**Discovery**: `pg.schema.help()` returns `{methods, methodAliases, examples}` object\n\n## Partitioning Tools\n\n- `pg_create_partitioned_table`: `partitionBy` case-insensitive. Supports `schema.table` format for `name` (auto-parsed). `primaryKey` accepts array (e.g., `['id', 'event_date']`). \u26D4 `primaryKey`/`unique` must include partition key\u2014throws validation error otherwise\n- `pg_create_partition`: Use `parent`/`table`/`parentTable`. `forValues` is a raw SQL string: `\"FROM ('2024-01-01') TO ('2024-07-01')\"`, `\"IN ('US', 'CA')\"`, `\"WITH (MODULUS 4, REMAINDER 0)\"`. For DEFAULT partition, use `isDefault: true`. Supports `schema.table` format for `parent` (auto-parsed)\n- `pg_attach_partition`/`pg_detach_partition`: Support `schema.table` format for `parent` and `partition` (auto-parsed). For DEFAULT partition, use `isDefault: true` or `forValues: \"DEFAULT\"`\n- `pg_list_partitions`: Default `limit: 50` (use `0` for all). Returns `{partitions, count, truncated, totalCount?}`. Uses `bounds` field (consistent with `pg_partition_info`)\n- `pg_partition_info`: Returns `{tableInfo, partitions, totalSizeBytes}`. Uses `bounds` field\n- Both list/info tools support `schema.table` format (auto-parsed) and accept `table`, `parent`, `parentTable`, or `name` aliases\n- Response structures: `pg_create_partitioned_table` \u2192 `{success, table, partitionBy, partitionKey, primaryKey?}`. `pg_create_partition` \u2192 `{success, partition, parent, bounds, subpartitionBy?, subpartitionKey?}`. `pg_attach_partition` \u2192 `{success, parent, partition, bounds}`. `pg_detach_partition` \u2192 `{success, parent, partition}`\n- \u26A0\uFE0F Sub-partitioning: `subpartitionBy`/`subpartitionKey` on `pg_create_partition` makes a partition itself partitionable. The parent's `primaryKey` must include the sub-partition key column (PostgreSQL constraint)\n- \uD83D\uDCCD Code Mode: `pg.partitioning.create()` = `createPartition`, NOT `createPartitionedTable`\n\n## pg_partman Tools\n\n- `pg_partman_create_parent`: Interval uses PostgreSQL syntax ('1 day', '1 month') NOT keywords ('daily'). `startPartition` accepts 'now' shorthand for current date. Required params: `parentTable`/`table`, `controlColumn`/`control`/`column`, `interval`\n- `pg_partman_run_maintenance`: Without `parentTable`, maintains ALL partition sets. Returns `partial: true` when some tables are skipped. `orphaned` object groups orphaned configs with `count`, `tables`, and cleanup `hint`. `errors` array for other failures\n- `pg_partman_show_config`: Default `limit: 50` (use `0` for all). Returns `truncated` + `totalCount` when limited. `orphaned` flag per config. Supports `schema.table` or plain table name (auto-prefixes `public.`)\n- `pg_partman_show_partitions`: Default `limit: 50` (use `0` for all). Returns `truncated` + `totalCount` when limited. `parentTable` required. Supports `schema.table` format (auto-parsed)\n- `pg_partman_check_default`/`partition_data`: `parentTable` required. Supports `schema.table` format (auto-parsed)\n- `pg_partman_set_retention`: \u26A0\uFE0F **CAUTION: Default is DROP** \u2014 `retentionKeepTable: false` (default) = DROP partitions, `true` = detach only (safer). Pass `retention: null` to disable retention\n- `pg_partman_undo_partition`: `targetTable` MUST exist before calling. Requires both `parentTable` and `targetTable`/`target`. \u26A0\uFE0F Parent table and child partitions remain after undo\u2014use `DROP TABLE parent CASCADE` to clean up\n- `pg_partman_analyze_partition_health`: Default `limit: 50` (use `0` for all). Returns `truncated` + `totalCount` when limited. `summary.overallHealth`: 'healthy'|'warnings'|'issues_found'\n- \uD83D\uDCDD **Schema Resolution**: All partman tools auto-prefix `public.` when no schema specified in `parentTable`\n- \uD83D\uDCDD **Aliases**: `parentTable` accepts `table`, `parent`, `name`. `controlColumn` accepts `control`, `column`. `targetTable` accepts `target`\n\n## pg_stat_kcache Tools\n\nCore: `createExtension()`, `queryStats()`, `topCpu()`, `topIo()`, `databaseStats()`, `resourceAnalysis()`, `reset()`\n\n- `pg_kcache_query_stats`: Default `limit: 20` (use `0` for all). Returns `truncated` + `totalCount` when limited. `orderBy`: 'total_time' (default), 'cpu_time', 'reads', 'writes'. `queryPreviewLength`: chars for query preview (default: 100, max: 500, 0 for full). \u26D4 'calls' NOT valid for orderBy\u2014use `minCalls` param\n- `pg_kcache_resource_analysis`: Default `limit: 20` (use `0` for all). Returns `truncated` + `totalCount` when limited. `minCalls`, `queryPreviewLength` supported. Classifies queries as 'CPU-bound', 'I/O-bound', or 'Balanced'\n- `pg_kcache_top_cpu`: Top CPU-consuming queries. `limit` param (default: 10). `queryPreviewLength`: chars for query preview (default: 100, max: 500, 0 for full). Returns `truncated` + `totalCount` when limited\n- `pg_kcache_top_io`: `type`/`ioType` (alias): 'reads', 'writes', 'both' (default). `limit` param (default: 10). `queryPreviewLength`: chars for query preview (default: 100, max: 500, 0 for full). Returns `truncated` + `totalCount` when limited\n- `pg_kcache_database_stats`: Aggregated CPU/IO stats per database. Optional `database` param to filter specific db\n- `pg_kcache_reset`: Resets pg_stat_kcache AND pg_stat_statements statistics\n\n## citext Tools\n\nCore: `createExtension()`, `convertColumn()`, `listColumns()`, `analyzeCandidates()`, `compare()`, `schemaAdvisor()`\n\n- `pg_citext_create_extension`: Enable citext extension (idempotent). Returns `{success, message, usage}`\n- `pg_citext_convert_column`: Supports `schema.table` format (auto-parsed). \u26D4 Only allows text-based columns (text, varchar, character varying)\u2014non-text columns return `{success: false, error, allowedTypes, suggestion}`. When views depend on column, returns `{success: false, dependentViews, hint}`\u2014drop/recreate views manually. `col` alias for `column`. Returns `{previousType}` showing original type\n- `pg_citext_list_columns`: Default `limit: 100` (use `0` for all). Returns `{columns: [{table_schema, table_name, column_name, is_nullable, column_default}], count, totalCount, truncated}`. Optional `schema`, `limit` filters\n- `pg_citext_analyze_candidates`: Default `limit: 50` (use `0` for all). Default `excludeSystemSchemas: true` filters out extension schemas (cron, topology, partman, tiger) when no `schema`/`table` filter specified\u2014use `excludeSystemSchemas: false` to include all. Returns `truncated: true` + `totalCount` when results are limited. Scans tables for TEXT/VARCHAR columns matching common patterns (email, username, name, etc.). Optional `schema`, `table`, `limit`, `excludeSystemSchemas`, `patterns` filters. Returns `{candidates, count, totalCount, truncated, summary: {highConfidence, mediumConfidence}, recommendation, patternsUsed, excludedSchemas?}`\n- `pg_citext_compare`: Test case-insensitive comparison. Returns `{value1, value2, citextEqual, textEqual, lowerEqual, extensionInstalled}`\n- `pg_citext_schema_advisor`: Supports `schema.table` format (auto-parsed). Analyzes specific table. Returns `{table, recommendations: [{column, currentType, previousType?, recommendation, confidence, reason}], summary, nextSteps}`. `tableName` alias for `table`. Already-citext columns include `previousType: \"text or varchar (converted)\"`\n\n**Discovery**: `pg.citext.help()` returns `{methods, methodAliases, examples}` object\n\n## ltree Tools\n\nCore: `createExtension()`, `query()`, `match()`, `subpath()`, `lca()`, `listColumns()`, `convertColumn()`, `createIndex()`\n\n- `pg_ltree_create_extension`: Enable ltree extension (idempotent). Returns `{success, message}`\n- `pg_ltree_query`: Query hierarchical relationships. Supports `schema.table` format (auto-parsed). `mode`/`type`: 'ancestors', 'descendants' (default), 'exact'. Returns `{results, count, path, mode, isPattern}`. \u26A0\uFE0F Validates column is ltree type\u2014returns clear error for non-ltree columns\n- `pg_ltree_match`: Match paths using lquery pattern syntax (`*`, `*{1,2}`, `*.label.*`). Supports `schema.table` format. `pattern`/`lquery`/`query` aliases. Returns `{results, count, pattern}`\n- `pg_ltree_subpath`: Extract portion of ltree path. `offset`/`start`/`from` and `length`/`len` aliases. Negative `offset` counts from end. \u26A0\uFE0F Returns `{success: false, error, pathDepth}` for invalid offset (validated before PostgreSQL call)\n- `pg_ltree_lca`: Find longest common ancestor of multiple paths. Requires `paths` array (min 2). Returns `{longestCommonAncestor, hasCommonAncestor: bool, paths}`\n- `pg_ltree_list_columns`: List all ltree columns in database. Optional `schema` filter. Returns `{columns: [{table_schema, table_name, column_name, is_nullable, column_default}], count}`\n- `pg_ltree_convert_column`: Convert TEXT column to ltree. Supports `schema.table` format. `col` alias for `column`. Returns `{previousType}`. \u26A0\uFE0F When views depend on column, returns `{success: false, dependentViews, hint}`\u2014drop/recreate views manually\n- `pg_ltree_create_index`: Create GiST index on ltree column. Supports `schema.table` format. Auto-generates index name if `indexName` omitted. Returns `{indexName, indexType: 'gist', alreadyExists?}`\n\n**Discovery**: `pg.ltree.help()` returns `{methods, methodAliases, examples}` object. Top-level aliases available: `pg.ltreeQuery()`, `pg.ltreeMatch()`, etc.\n\n## PostGIS Tools\n\n**Geometry Creation:**\n- `pg_geocode`: Create point geometry from lat/lng. Returns `{geojson, wkt}`. \u26A0\uFE0F Validates bounds: lat \u00B190\u00B0, lng \u00B1180\u00B0\n- `pg_geometry_column`: Add geometry column to table. `ifNotExists` returns `{alreadyExists: true}`\n- `pg_spatial_index`: Create GiST spatial index. Auto-generates name if not provided. `ifNotExists` supported\n\n**Spatial Queries:**\n- `pg_distance`: Find geometries within distance from point. Returns `{results, count}` with `distance_meters`. \u26A0\uFE0F Validates point bounds\n- `pg_bounding_box`: Find geometries within lat/lng bounding box. Use `select` array for specific columns\n- `pg_intersection`: Find geometries intersecting a WKT/GeoJSON geometry. Auto-detects SRID from column\n- `pg_point_in_polygon`: Check if point is within table polygons. Returns `{containingPolygons, count}`. \u26A0\uFE0F Validates point bounds\n\n**Geometry Operations (Table-based):**\n- `pg_buffer`: Create buffer zone around table geometries. Default limit: 50 rows. Default simplify: 10m (set `simplify: 0` to disable). Returns `truncated: true` + `totalCount` when results are truncated. Use `limit: 0` for all rows\n- `pg_geo_transform`: Transform table geometries between SRIDs. Default limit: 50 rows. Returns `truncated: true` + `totalCount` when results are truncated. Use `limit: 0` for all rows. Auto-detects `fromSrid` from column metadata if not provided (returns `autoDetectedSrid: true`). `fromSrid`/`sourceSrid` and `toSrid`/`targetSrid` aliases\n- `pg_geo_cluster`: Spatial clustering (DBSCAN/K-Means). K-Means: If `numClusters` exceeds row count, automatically clamps to available rows with `warning` field. DBSCAN: Returns contextual `hints` array explaining parameter effects (e.g., \"All points formed single cluster\u2014decrease eps\") and `parameterGuide` explaining eps/minPoints trade-offs\n\n**Geometry Operations (Standalone WKT/GeoJSON):**\n- `pg_geometry_buffer`: Create buffer around WKT/GeoJSON. Returns `{buffer_geojson, buffer_wkt, distance_meters}`. Optional `simplify` param (meters) reduces polygon complexity\u2014returns `simplified`, `simplifyTolerance` when applied. \u26A0\uFE0F Returns `warning` if simplify tolerance is too high and geometry collapses to null\n- `pg_geometry_transform`: Transform WKT/GeoJSON between SRIDs. Returns `{transformed_geojson, transformed_wkt, fromSrid, toSrid}`\n- `pg_geometry_intersection`: Compute intersection of two geometries. Returns `{intersects, intersection_geojson, intersection_area_sqm}`. Normalizes SRID (4326) automatically\u2014safe to mix GeoJSON and WKT\n\n**Administration:**\n- `pg_postgis_create_extension`: Enable PostGIS extension (idempotent)\n- `pg_geo_index_optimize`: Analyze spatial indexes. Without `table` param, analyzes all spatial indexes\n\n**Code Mode Aliases:** `pg.postgis.addColumn()` \u2192 `geometryColumn`, `pg.postgis.indexOptimize()` \u2192 `geoIndexOptimize`, `pg.postgis.geoCluster()` \u2192 `pg_geo_cluster`, `pg.postgis.geoTransform()` \u2192 `pg_geo_transform`. Note: `pg.{group}.help()` returns `{methods, methodAliases, examples}`\n\n## Cron Tools (pg_cron)\n\nCore: `createExtension()`, `schedule()`, `scheduleInDatabase()`, `unschedule()`, `alterJob()`, `listJobs()`, `jobRunDetails()`, `cleanupHistory()`\n\n- `pg_cron_schedule`: Schedule a cron job. `schedule` supports standard cron (`0 5 * * *`) or interval (`1 second` to `59 seconds`). \u26A0\uFE0F Interval syntax only works for 1-59 seconds\u2014for 60+ seconds, use cron syntax (e.g., `* * * * *` for every minute). Use `name`/`jobName` for identification. `command`/`sql`/`query` aliases supported. Note: pg_cron allows duplicate job names; use unique names to avoid confusion when unscheduling\n- `pg_cron_schedule_in_database`: Schedule job in specific database. `database`/`db` aliases. Optional `username`, `active` params\n- `pg_cron_unschedule`: Remove job by `jobId` or `jobName`. If both provided, `jobName` takes precedence (with warning)\n- `pg_cron_alter_job`: Modify existing job. Can change `schedule`, `command`, `database`, `username`, `active`. \u26D4 Non-existent jobId returns error\n- `pg_cron_list_jobs`: List all jobs. Default `limit: 50` (use `0` for all). Optional `active` boolean filter. Returns `truncated` + `totalCount` when limited. Returns `hint` when jobs have no name\n- `pg_cron_job_run_details`: View execution history. Default `limit: 50`. Optional `jobId`, `status` ('running'|'succeeded'|'failed') filters. Returns `truncated` + `totalCount` when limited. Returns `summary` with counts\n- `pg_cron_cleanup_history`: Delete old run records. `olderThanDays`/`days` param (default: 7). Optional `jobId` to target specific job\n- `pg_cron_create_extension`: Enable pg_cron extension (idempotent). Requires superuser\n\n**Discovery**: `pg.cron.help()` returns `{methods, methodAliases, examples}` object\n\n## pgcrypto Tools\n\nCore: `createExtension()`, `hash()`, `hmac()`, `encrypt()`, `decrypt()`, `genRandomUuid()`, `genRandomBytes()`, `genSalt()`, `crypt()`\n\n- `pg_pgcrypto_create_extension`: Enable pgcrypto extension (idempotent). Returns `{success, message}`\n- `pg_pgcrypto_hash`: Hash data using digest algorithms. `algorithm`: 'md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512'. `encoding`: 'hex' (default), 'base64'. Returns `{hash, algorithm, encoding, inputLength}`\n- `pg_pgcrypto_hmac`: HMAC authentication. Same algorithms as hash. Returns `{hmac, algorithm, encoding}`. `key` param for secret\n- `pg_pgcrypto_encrypt`: PGP symmetric encryption. `data` + `password`/`key` (aliases). Optional `options` for cipher config (e.g., 'cipher-algo=aes256'). Returns `{encrypted, encoding: 'base64'}`\n- `pg_pgcrypto_decrypt`: Decrypt PGP-encrypted data. `encryptedData`/`data` + `password`/`key` (aliases). Returns `{decrypted, verified}`. \u26D4 Throws on wrong key/corrupt data\n- `pg_pgcrypto_gen_random_uuid`: Generate UUID v4. Optional `count` (1-100, default 1). Returns `{uuid, uuids, count}` (`uuid` convenience property for single requests)\n- `pg_pgcrypto_gen_random_bytes`: Generate random bytes. `length` (1-1024). `encoding`: 'hex' (default), 'base64'. Returns `{randomBytes, length, encoding}`\n- `pg_pgcrypto_gen_salt`: Generate salt for crypt(). `type`: 'bf' (bcrypt, recommended), 'md5', 'xdes', 'des'. Optional `iterations` for bf (4-31) or xdes. Returns `{salt, type}`\n- `pg_pgcrypto_crypt`: Hash password with salt. Use stored hash as salt for verification. Returns `{hash, algorithm}`. Verification: `crypt(password, storedHash).hash === storedHash`\n\n**Password Workflow**: 1) `genSalt({type:'bf', iterations:10})` \u2192 2) `crypt({password, salt})` \u2192 store hash \u2192 3) Verify: `crypt({password, salt: storedHash})` and compare hashes\n\n**Top-Level Aliases**: `pg.pgcryptoHash()`, `pg.pgcryptoEncrypt()`, `pg.pgcryptoDecrypt()`, `pg.pgcryptoGenRandomUuid()`, etc.\n\n**Discovery**: `pg.pgcrypto.help()` returns `{methods, methodAliases, examples}` object\n\n## Code Mode Sandbox\n\nNo `setTimeout`, `setInterval`, `fetch`, or network access. Use `pg.core.readQuery()` for data access.\n\n\uD83D\uDCCA **Metrics Note**: `memoryUsedMb` measures heap delta (end - start). Negative values indicate memory freed during execution (e.g., GC ran).\n\n## Transactions\n\nCore: `begin()`, `commit()`, `rollback()`, `savepoint()`, `rollbackTo()`, `release()`, `execute()`\n\n**Transaction Lifecycle:**\n- `pg_transaction_begin`: Start new transaction. Returns `{transactionId, isolationLevel, message}`. Use `transactionId` for subsequent operations\n- `pg_transaction_commit`: Commit transaction, making all changes permanent. `transactionId`/`tx`/`txId` aliases\n- `pg_transaction_rollback`: Rollback transaction, discarding all changes. `transactionId`/`tx`/`txId` aliases\n\n**Savepoints:**\n- `pg_transaction_savepoint`: Create savepoint within transaction. `name`/`savepoint` + `transactionId`/`tx`/`txId`\n- `pg_transaction_rollback_to`: Rollback to savepoint, restoring database state to when the savepoint was created. \u26A0\uFE0F Undoes ALL work (data changes AND savepoints) created after the target savepoint\n- `pg_transaction_release`: Release savepoint, keeping all changes since it was created. `name`/`savepoint` aliases\n\n**Atomic Execution:**\n- `pg_transaction_execute`: Execute multiple statements atomically. Two modes:\n - **Auto-commit**: Without `transactionId`\u2014auto-commits on success, auto-rollbacks on any error\n - **Join existing**: With `transactionId`/`tx`/`txId`\u2014no auto-commit, caller controls via commit/rollback\n- `statements`: Array of `{sql: \"...\", params?: [...]}` objects. \u26A0\uFE0F Each object MUST have `sql` key\n- `isolationLevel`: Optional isolation level for new transactions ('READ COMMITTED', 'REPEATABLE READ', 'SERIALIZABLE')\n- Supports SELECT statements inside `statements`\u2014results include `rows` in the response for mixed read/write workflows\n\n**Aborted Transaction State:**\n- \u26A0\uFE0F If any statement in a transaction fails, PostgreSQL puts the transaction into an **aborted state**\n- In aborted state, only `ROLLBACK` or `ROLLBACK TO SAVEPOINT` commands are accepted\u2014all other commands will error\n- Use `pg_transaction_rollback` to end the transaction, or `pg_transaction_rollback_to` to recover to a savepoint\n- `pg_transaction_commit` on an aborted transaction will detect the state and report it (not silently rollback)\n\n**Response Structures:**\n- `begin`: `{transactionId, isolationLevel: 'READ COMMITTED', message}`\n- `commit/rollback`: `{success, transactionId, message}`\n- `savepoint/release/rollbackTo`: `{success, transactionId, savepoint, message}`\n- `execute`: `{success, statementsExecuted, results: [{sql, rowsAffected, rowCount, rows?}], transactionId?}`\n\n**Discovery**: `pg.transactions.help()` returns `{methods, methodAliases, examples}`";
13
13
  //# sourceMappingURL=ServerInstructions.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"ServerInstructions.d.ts","sourceRoot":"","sources":["../../src/constants/ServerInstructions.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;GAUG;AACH,eAAO,MAAM,mBAAmB,sn4CA8YyD,CAAC"}
1
+ {"version":3,"file":"ServerInstructions.d.ts","sourceRoot":"","sources":["../../src/constants/ServerInstructions.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;GAUG;AACH,eAAO,MAAM,mBAAmB,+llDAwbyD,CAAC"}
@@ -22,7 +22,7 @@ export const SERVER_INSTRUCTIONS = `# postgres-mcp Code Mode
22
22
  7. **pg_list_objects type**: Use \`type\` (singular string) or \`types\` (array). Auto-converts: \`{type: 'table'}\` ≡ \`{types: ['table']}\`
23
23
  8. **pg_object_details**: Accepts: \`name\`, \`objectName\`, \`object\`, or \`table\`. Use \`type\`/\`objectType\` for type hint (supports: table, view, materialized_view, partitioned_table, function, sequence, index)
24
24
  9. **pg_exists optional WHERE**: \`where\`/\`condition\`/\`filter\` is optional. Without it, checks if table has any rows
25
- 10. **pg_describe_table**: Returns columns, foreignKeys, primaryKeyuse \`pg_get_indexes\` separately for index details
25
+ 10. **pg_describe_table**: Returns columns, foreignKeys, primaryKey, indexes, constraints. For listing ALL database indexes (not table-specific), use \`pg_get_indexes\` without \`table\` param
26
26
  11. **pg_vector_insert updateExisting**: Uses direct UPDATE (avoids NOT NULL constraint issues vs INSERT mode)
27
27
  12. **pg_get_indexes without table**: Returns ALL database indexes (potentially large). Use \`table\` param for specific table
28
28
  13. **pg_upsert/pg_batch_insert RETURNING**: \`returning\` param must be array of column names: \`["id", "name"]\`. ⛔ \`"*"\` wildcard not supported
@@ -33,21 +33,41 @@ export const SERVER_INSTRUCTIONS = `# postgres-mcp Code Mode
33
33
  | Tool | Returns | Notes |
34
34
  |------|---------|-------|
35
35
  | \`pg_read_query\` | \`{rows, rowCount, fields?}\` | \`fields\` contains column metadata (name, dataTypeID) |
36
- | \`pg_write_query\` | \`{rowsAffected, affectedRows, rows?}\` | \`rows\` only with RETURNING clause. ⛔ Throws for SELECT |
37
- | \`pg_upsert\` | \`{operation, rowsAffected, rowCount, rows?}\` | \`operation: 'insert'|'update'\`. \`rows\` only with RETURNING clause |
38
- | \`pg_batch_insert\` | \`{rowsAffected, affectedRows, insertedCount, rows?}\` | Empty objects use DEFAULT VALUES. ⚠️ BIGINT > 2^53 loses precision |
36
+ | \`pg_write_query\` | \`{rowsAffected, affectedRows, rows?}\` | \`rows\` only with RETURNING clause. DDL statements return \`rowsAffected: 0\`. ⛔ Throws for SELECT |
37
+ | \`pg_upsert\` | \`{success, operation, rowsAffected, rowCount, rows?}\` | \`operation: 'insert'|'update'\`. \`rows\` only with RETURNING clause |
38
+ | \`pg_batch_insert\` | \`{success, rowsAffected, affectedRows, insertedCount, rowCount, rows?}\` | Empty objects use DEFAULT VALUES. ⚠️ BIGINT > 2^53 loses precision |
39
+ | \`pg_create_table\` | \`{success, table, sql, compositePrimaryKey?}\` | \`table\` = schema-qualified name. \`compositePrimaryKey\` only when composite PK used |
40
+ | \`pg_drop_table\` | \`{success, dropped, existed}\` | \`existed\` indicates whether table was present before drop |
41
+ | \`pg_create_index\` | \`{success, index, indexName, table, sql, ifNotExists?, alreadyExists?, message?}\` | \`alreadyExists\`/\`message\` only with \`ifNotExists: true\` when index pre-exists |
42
+ | \`pg_drop_index\` | \`{success, index, existed, sql}\` | \`existed\` indicates whether index was present before drop |
43
+ | \`pg_truncate\` | \`{success, table, cascade, restartIdentity}\` | \`cascade\`/\`restartIdentity\` reflect the options used |
39
44
  | \`pg_count\` | \`{count: N}\` | Use \`params\` for placeholders: \`where: 'id=$1', params: [5]\`. DISTINCT: use \`pg_read_query\` |
40
45
  | \`pg_exists\` | \`{exists: bool, mode, hint?}\` | \`params\` for placeholders. \`mode: 'filtered'|'any_rows'\` |
41
46
  | \`pg_get_indexes\` | \`{indexes, count, totalCount?}\` | Default \`limit: 100\` without \`table\`. Use \`schema\`/\`limit\` to filter. Index objects have \`name\`, \`type\`, \`columns\` |
42
47
  | \`pg_list_objects\` | \`{objects, count, totalCount, byType}\` | Use \`limit\` to cap results, \`type\`/\`types\` to filter |
43
- | \`pg_object_details\` | \`{name, schema, type, returnType?, ...}\` | Functions: \`returnType\` alias. Views/Mat. views: \`definition\` |
44
- | \`pg_analyze_db_health\` | \`{cacheHitRatio: {ratio, heap, index, status}}\` | \`ratio\` = primary numeric %. \`bloat\` available |
45
- | \`pg_describe_table\` | \`{columns, indexes, constraints, foreignKeys}\` | Columns include \`notNull\` (alias for \`!nullable\`), \`foreignKey\`. \`constraints\` includes PK, UNIQUE, CHECK, NOT NULL. ⚠️ \`rowCount: -1\` = no statistics (run ANALYZE) |
48
+ | \`pg_object_details\` | \`{name, schema, type, returnType?, ...}\` | Functions: \`returnType\` alias. Views/Mat. views: \`definition\`. Tables: equivalent to \`pg_describe_table\` (columns, primaryKey, indexes, constraints, foreignKeys) |
49
+ | \`pg_analyze_db_health\` | \`{cacheHitRatio, databaseSize, tableStats, unusedIndexes, tablesNeedingVacuum, connections, bloat, isReplica, overallScore, overallStatus}\` | \`cacheHitRatio\`: \`{ratio, heap, index, status}\`. \`overallStatus\`: \`healthy|needs_attention|critical\`. Optional sections via \`includeIndexes\`, \`includeVacuum\`, \`includeConnections\` |
50
+ | \`pg_describe_table\` | \`{name, schema, type, owner, rowCount, columns, primaryKey, indexes, constraints, foreignKeys}\` | Columns include \`notNull\` (alias for \`!nullable\`), \`foreignKey\`. \`constraints\` includes PK, UNIQUE, CHECK, NOT NULL. ⚠️ \`rowCount: -1\` = stale/missing statistics (run ANALYZE on the table). Small tables (<~50 rows) may show -1 until first ANALYZE |
46
51
  | \`pg_analyze_query_indexes\` | \`{plan, issues, recommendations}\` | \`verbosity\`: 'summary' (default) or 'full'. Summary mode returns condensed plan |
47
- | \`pg_list_tables\` | \`{tables, count}\` | Use \`schema\` to filter, \`limit\` to cap results |
52
+ | \`pg_list_tables\` | \`{tables, count, totalCount, truncated?, hint?}\` | Use \`schema\` to filter, \`limit\` to cap results, \`exclude\` to hide extension schemas (e.g., \`['cron', 'topology', 'partman']\`) |
48
53
  | List operations | \`{items, count}\` | Access via \`result.tables\`, \`result.views\`, etc. |
49
54
  | \`pg_jsonb_agg groupBy\` | \`{result: [{group_key, items}], count, grouped: true}\` | Without groupBy: \`{result: [...], count, grouped: false}\` |
50
55
  | \`pg_vector_aggregate\` | \`{average_vector, count}\` or \`{groups: [{group_key, average_vector, count}]}\` | Without/with \`groupBy\` |
56
+ | \`pg_index_stats\` | \`{indexes, count, truncated?, totalCount?}\` | Default 50 rows. Use \`limit: 0\` for all |
57
+ | \`pg_table_stats\` | \`{tables, count, truncated?, totalCount?}\` | Default 50 rows. Use \`limit: 0\` for all |
58
+ | \`pg_vacuum_stats\` | \`{tables, count, truncated?, totalCount?}\` | Default 50 rows. Use \`limit: 0\` for all |
59
+ | \`pg_stat_statements\` | \`{statements, count, truncated?, totalCount?}\` | Default 20 rows. \`orderBy\` supported |
60
+ | \`pg_query_plan_stats\` | \`{queryPlanStats, count, truncated?, totalCount?}\` | Default 20 rows. \`truncateQuery: 0\` for full text |
61
+ | \`pg_stat_activity\` | \`{connections, count}\` | \`includeIdle: true\` to include idle connections |
62
+ | \`pg_locks\` | \`{locks}\` | \`showBlocked: true\` switches to blocked/blocking pid format |
63
+ | \`pg_bloat_check\` | \`{tables, count}\` | Tables with \`live_tuples\`, \`dead_tuples\`, \`dead_pct\` |
64
+ | \`pg_cache_hit_ratio\` | \`{heap_read, heap_hit, cache_hit_ratio}\` | All fields nullable (0 tables = null). Flat response, differs from \`pg_analyze_db_health.cacheHitRatio\` |
65
+ | \`pg_seq_scan_tables\` | \`{tables, count, minScans, hint, truncated?, totalCount?}\` | Default 50 rows. \`minScans\` default: 10 |
66
+ | \`pg_connection_pool_optimize\` | \`{current, config, waitEvents, recommendations}\` | No params needed |
67
+ | \`pg_performance_baseline\` | \`{name, timestamp, metrics}\` | \`metrics\`: \`cache\`, \`tables\`, \`indexes\`, \`connections\`, \`databaseSize\` |
68
+ | \`pg_duplicate_indexes\` | \`{duplicateIndexes, count, hint, truncated?, totalCount?}\` | Default 50 rows. \`duplicate_type\`: EXACT_DUPLICATE, OVERLAPPING, SUBSET |
69
+ | \`pg_query_plan_compare\` | \`{query1, query2, analysis, fullPlans}\` | \`analysis.costDifference\` + \`recommendation\` |
70
+ | \`pg_unused_indexes\` | \`{unusedIndexes, count, hint, truncated?, totalCount?}\` | Default 20 rows. \`summary: true\` → \`{summary, bySchema, totalCount}\` |
51
71
 
52
72
  ## API Mapping
53
73
 
@@ -80,10 +100,10 @@ export const SERVER_INSTRUCTIONS = `# postgres-mcp Code Mode
80
100
  - \`pg_vector_distance\`: Calculate distance between two vectors. \`metric\`: 'l2' (default), 'cosine', 'inner_product'. Returns \`{distance, metric}\`
81
101
  - \`pg_vector_cluster\`: \`clusters\` = \`k\`. Returns centroids with \`{preview, dimensions, truncated}\` format for large vectors (>10 dims)—use \`pg_vector_distance\` to assign rows
82
102
  - \`pg_vector_create_index\`: Use \`type\` (or alias \`method\`) with values 'ivfflat' or 'hnsw'. IVFFlat: \`lists\` param. HNSW: \`m\`, \`efConstruction\` params
83
- - \`pg_vector_performance\`: Auto-generates testVector from first row if omitted. Returns \`testVectorSource: 'auto-generated'|'user-provided'\`
103
+ - \`pg_vector_performance\`: Auto-generates testVector from first row if omitted. Returns \`testVectorSource: 'auto-generated from first row'|'user-provided'\`
84
104
  - \`pg_vector_validate\`: Returns \`{valid: bool, vectorDimensions}\`. Empty vector \`[]\` returns \`{valid: true, vectorDimensions: 0}\`
85
105
  - ⛔ \`pg_vector_embed\`: Demo only (hash-based). Use OpenAI/Cohere for production.
86
- - \`pg_hybrid_search\`: Supports \`schema.table\` format (auto-parsed). Combines vector similarity and full-text search with weighted scoring. Code mode alias: \`pg.hybridSearch()\` → \`pg.vector.hybridSearch()\`
106
+ - \`pg_hybrid_search\`: Supports \`schema.table\` format (auto-parsed). Combines vector similarity and full-text search with weighted scoring. \`textColumn\` auto-detects type: uses tsvector columns directly, wraps text columns with \`to_tsvector()\`. Code mode alias: \`pg.hybridSearch()\` → \`pg.vector.hybridSearch()\`
87
107
  - 📝 **Error Handling**: Vector tools return \`{success: false, error: "...", suggestion: "..."}\` for validation/semantic errors (dimension mismatch, non-vector column, table not found). Check \`success\` field before processing results.
88
108
 
89
109
  ## JSONB Tools
@@ -102,7 +122,6 @@ export const SERVER_INSTRUCTIONS = `# postgres-mcp Code Mode
102
122
 
103
123
  **Top-Level Aliases**: \`pg.jsonbExtract()\`, \`pg.jsonbSet()\`, \`pg.jsonbInsert()\`, \`pg.jsonbDelete()\`, \`pg.jsonbContains()\`, \`pg.jsonbPathQuery()\`, \`pg.jsonbAgg()\`, \`pg.jsonbObject()\`, \`pg.jsonbArray()\`, \`pg.jsonbKeys()\`, \`pg.jsonbStripNulls()\`, \`pg.jsonbTypeof()\`, \`pg.jsonbValidatePath()\`, \`pg.jsonbMerge()\`, \`pg.jsonbNormalize()\`, \`pg.jsonbDiff()\`, \`pg.jsonbIndexSuggest()\`, \`pg.jsonbSecurityScan()\`, \`pg.jsonbStats()\`
104
124
 
105
-
106
125
  ## Stats Tools
107
126
 
108
127
  - All stats tools support \`schema.table\` format (auto-parsed, embedded schema takes priority over explicit \`schema\` param)
@@ -117,11 +136,16 @@ export const SERVER_INSTRUCTIONS = `# postgres-mcp Code Mode
117
136
 
118
137
  **Top-Level Aliases**: \`pg.descriptive()\`, \`pg.percentiles()\`, \`pg.correlation()\`, \`pg.regression()\`, \`pg.timeSeries()\`, \`pg.distribution()\`, \`pg.hypothesis()\`, \`pg.sampling()\`
119
138
 
139
+ ## Text Tools
140
+
141
+ - \`pg_text_search\`/\`pg_text_rank\`: Column must be \`text\` type—pre-built \`tsvector\` columns are **not** supported (wrap with \`to_tsvector()\` fails on tsvector input). Use \`pg_read_query\` with raw FTS SQL for tsvector columns
142
+ - \`pg_create_fts_index\`: Returns \`{success, index, config, skipped}\`. \`skipped: true\` = index already existed (IF NOT EXISTS). \`ifNotExists\` defaults to \`true\`
143
+
120
144
  ## Performance Tools
121
145
 
122
146
  Core (20 methods): \`explain()\`, \`explainAnalyze()\`, \`explainBuffers()\`, \`indexStats()\`, \`tableStats()\`, \`statStatements()\`, \`statActivity()\`, \`locks()\`, \`bloatCheck()\`, \`cacheHitRatio()\`, \`seqScanTables()\`, \`indexRecommendations()\`, \`queryPlanCompare()\`, \`baseline()\`, \`connectionPoolOptimize()\`, \`partitionStrategySuggest()\`, \`unusedIndexes()\`, \`duplicateIndexes()\`, \`vacuumStats()\`, \`queryPlanStats()\`
123
147
 
124
- Wrappers (3): \`blockingQueries()\`→\`locks({showBlocked:true})\`, \`longRunningQueries({ seconds | minDuration }?)\` filters by duration (returns \`statActivity\` format), \`analyzeTable({ table })\` runs ANALYZE (accepts \`schema.table\` format)
148
+ Wrappers (3): \`blockingQueries()\`→\`locks({showBlocked:true})\`, \`longRunningQueries({ seconds | minDuration }?)\` filters by duration (returns \`{longRunningQueries, count, threshold}\`), \`analyzeTable({ table })\` runs ANALYZE (accepts \`schema.table\` format)
125
149
 
126
150
  - \`explain({ sql, format?, params? })\`: Supports \`format: 'text'|'json'|'yaml'|'xml'\`. Default: text. Use \`params: [value]\` for \`$1, $2\` placeholders
127
151
  - \`explainAnalyze({ sql, format?, params? })\`: Same format/params options as explain
@@ -159,7 +183,7 @@ Core: \`databaseSize()\`, \`tableSizes()\`, \`connectionStats()\`, \`showSetting
159
183
  - \`recoveryStatus()\`: Returns \`{in_recovery: boolean, last_replay_timestamp: string|null}\`
160
184
  - \`replicationStatus()\`: Returns \`{role: 'primary'|'replica', replicas: [...]}\` for primary, or \`{role: 'replica', replay_lag, ...}\` for replica
161
185
  - \`resourceUsageAnalyze()\`: Returns \`{backgroundWriter, checkpoints, connectionDistribution, bufferUsage, activity, analysis}\` with all counts as numbers
162
- - \`alertThresholdSet({metric?: 'connection_usage'})\`: Returns recommended thresholds. Invalid metric throws validation error. Valid metrics: connection_usage, cache_hit_ratio, replication_lag, dead_tuples, long_running_queries, lock_wait_time
186
+ - \`alertThresholdSet({metric?: 'connection_usage'})\`: Returns recommended thresholds. Invalid metric returns \`{success: false, error: "..."}\`. Valid metrics: connection_usage, cache_hit_ratio, replication_lag, dead_tuples, long_running_queries, lock_wait_time
163
187
 
164
188
  📦 **AI-Optimized Payloads**: Tools return limited results by default to reduce context size:
165
189
  - \`tableSizes({ limit? })\`: Default 50 rows. Returns \`truncated: true\` + \`totalCount\` when limited. Use \`limit: 0\` for all
@@ -185,6 +209,17 @@ Aliases: \`tableName\`→\`table\`, \`indexName\`→\`index\`, \`param\`/\`setti
185
209
 
186
210
  **Top-Level Aliases**: \`pg.vacuum()\`, \`pg.vacuumAnalyze()\`, \`pg.analyze()\`, \`pg.reindex()\`, \`pg.cluster()\`, \`pg.setConfig()\`, \`pg.reloadConf()\`, \`pg.resetStats()\`, \`pg.cancelBackend()\`, \`pg.terminateBackend()\`
187
211
 
212
+ **Discovery**: \`pg.admin.help()\` returns \`{methods, methodAliases, examples}\` object
213
+
214
+ **Response structures**:
215
+ - \`vacuum()\` / \`vacuumAnalyze()\`: \`{success, message, table?, schema?, hint?}\` (hint present when verbose: true)
216
+ - \`analyze()\`: \`{success, message, table?, schema?, columns?}\`
217
+ - \`reindex()\`: \`{success, message}\`
218
+ - \`cluster()\`: \`{success, message, table?, index?}\` (table/index present for table-specific cluster)
219
+ - \`setConfig()\`: \`{success, message, parameter, value}\`
220
+ - \`reloadConf()\` / \`resetStats()\`: \`{success, message}\`
221
+ - \`cancelBackend()\` / \`terminateBackend()\`: \`{success, message}\`
222
+
188
223
  ## Backup Tools
189
224
 
190
225
  Core: \`dumpTable()\`, \`dumpSchema()\`, \`copyExport()\`, \`copyImport()\`, \`createBackupPlan()\`, \`restoreCommand()\`, \`physical()\`, \`restoreValidate()\`, \`scheduleOptimize()\`
@@ -195,7 +230,7 @@ Response Structures:
195
230
  - \`copyImport\`: \`{command, stdinCommand, notes}\` — Both file and stdin COPY commands
196
231
  - \`createBackupPlan\`: \`{strategy: {fullBackup, walArchiving}, estimates}\`
197
232
  - \`restoreCommand\`: \`{command, warnings?, notes}\` — Warnings when \`database\` omitted
198
- - \`restoreValidate\`: \`{validationSteps: [{step, name, command?, commands?, note?}], recommendations}\` — Note: \`note\` field only for pg_dump default type
233
+ - \`restoreValidate\`: \`{note?, validationSteps: [{step, name, command?, commands?, note?}], recommendations}\` — Top-level \`note\` when \`backupType\` omitted (defaults to pg_dump). Step-level \`note\` for non-command steps
199
234
  - \`physical\`: \`{command, notes, requirements}\`
200
235
  - \`scheduleOptimize\`: \`{analysis, recommendation, commands}\`
201
236
 
@@ -207,7 +242,7 @@ Response Structures:
207
242
  - \`pg_copy_import\`: Generates COPY FROM command. Supports \`schema.table\` format (auto-parsed, takes priority over \`schema\` param). \`columns\` array, \`filePath\`, \`format\`, \`header\`, \`delimiter\`
208
243
  - \`pg_restore_command\`: Include \`database\` parameter for complete command. Optional \`schemaOnly\`, \`dataOnly\`
209
244
  - \`pg_create_backup_plan\`: Generates backup strategy with cron schedule. \`frequency\`: 'hourly'|'daily'|'weekly', \`retention\` count
210
- - \`pg_backup_physical\`: Generates pg_basebackup command. \`format\`: 'plain'|'tar', \`checkpoint\`: 'fast'|'spread', \`compress\`: 0-9
245
+ - \`pg_backup_physical\`: Generates pg_basebackup command. \`format\`: 'plain'|'tar' (default: 'tar'), \`checkpoint\`: 'fast'|'spread', \`compress\`: 0-9
211
246
  - \`pg_restore_validate\`: Generates validation commands. \`backupType\`: 'pg_dump' (default)|'pg_basebackup'
212
247
  - \`pg_backup_schedule_optimize\`: Analyzes database activity patterns and recommends optimal backup schedule
213
248
 
@@ -219,14 +254,13 @@ Defaults: \`threshold\`=0.3 (use 0.1-0.2 for partial), \`maxDistance\`=3 (use 5+
219
254
 
220
255
  - All text tools support \`schema.table\` format (auto-parsed, embedded schema takes priority over explicit \`schema\` param)
221
256
  - \`pg_text_search\`: Supports both \`column\` (singular string) and \`columns\` (array). Either is valid—\`column\` auto-converts to array
222
- - \`pg_trigram_similarity\` vs \`pg_similarity_search\`: Both use pg_trgm. First filters by threshold; second uses set_limit() with %
257
+ - \`pg_trigram_similarity\`, \`pg_fuzzy_match\`, \`pg_regexp_match\`, \`pg_like_search\`: All default to 100 results to prevent large payloads. Use \`limit: 0\` for all rows
223
258
  - \`pg_fuzzy_match\`: Levenshtein returns distance (lower=better). Soundex/metaphone return phonetic codes (exact match only). ⛔ Invalid \`method\` values throw error with valid options
224
259
  - \`pg_text_normalize\`: Removes accents only (unaccent). Does NOT lowercase/trim
225
260
  - 📍 **Table vs Standalone**: \`normalize\`, \`sentiment\`, \`toVector\`, \`toQuery\`, \`searchConfig\` are standalone (text input only). For phonetic matching: use \`pg_fuzzy_match\` with \`method: 'soundex'|'metaphone'\` (direct MCP), or \`pg.text.soundex()\`/\`pg.text.metaphone()\` (Code Mode convenience wrappers that call fuzzyMatch internally)
226
261
 
227
262
  **Top-Level Aliases**: \`pg.textSearch()\`, \`pg.textRank()\`, \`pg.textHeadline()\`, \`pg.textNormalize()\`, \`pg.textSentiment()\`, \`pg.textToVector()\`, \`pg.textToQuery()\`, \`pg.textSearchConfig()\`, \`pg.textTrigramSimilarity()\`, \`pg.textFuzzyMatch()\`, \`pg.textLikeSearch()\`, \`pg.textRegexpMatch()\`, \`pg.textCreateFtsIndex()\`
228
263
 
229
-
230
264
  ## Schema Tools
231
265
 
232
266
  Core: \`listSchemas()\`, \`createSchema()\`, \`dropSchema()\`, \`listViews()\`, \`createView()\`, \`dropView()\`, \`listSequences()\`, \`createSequence()\`, \`dropSequence()\`, \`listFunctions()\`, \`listTriggers()\`, \`listConstraints()\`
@@ -235,18 +269,17 @@ Response Structures:
235
269
  - \`listSchemas()\`: \`{schemas: string[], count}\`
236
270
  - \`listViews({ includeMaterialized?, truncateDefinition?, limit? })\`: \`{views: [{schema, name, type, definition, definitionTruncated?}], count, hasMatViews, truncatedDefinitions?, truncated, note?}\`. Default \`limit: 50\` (use \`0\` for all). Default \`truncateDefinition: 500\` chars (use \`0\` for full definitions). \`truncated\` always included (\`true\`/\`false\`)
237
271
  - \`listSequences({ schema? })\`: \`{sequences: [{schema, name, owned_by}], count}\`. Note: \`owned_by\` omits \`public.\` prefix for sequences in public schema (e.g., \`users.id\` not \`public.users.id\`)
238
- - \`listFunctions({ schema?, limit?, exclude? })\`: \`{functions: [{schema, name, arguments, returns, language, volatility}], count, limit, note?}\`
272
+ - \`listFunctions({ schema?, limit?, exclude? })\`: \`{functions: [{schema, name, arguments, returns, language, volatility}], count, limit}\`
239
273
  - \`listTriggers({ schema?, table? })\`: \`{triggers: [{schema, table_name, name, timing, events, function_name, enabled}], count}\`
240
274
  - \`listConstraints({ schema?, table?, type? })\`: \`{constraints: [{schema, table_name, name, type, definition}], count}\`. Type codes: \`p\`=primary_key, \`f\`=foreign_key, \`u\`=unique, \`c\`=check
241
275
  - \`dropSchema/dropView/dropSequence\`: All return \`{existed: true/false}\` to indicate if object existed before drop
242
- - \`createSchema/createSequence\` (with \`ifNotExists\`) and \`createView\` (with \`orReplace\`): Return \`{alreadyExisted: true/false}\` to indicate if object existed before creation
276
+ - \`createSchema/createSequence\` (with \`ifNotExists\`) and \`createView\` (with \`orReplace\`): Return \`{alreadyExisted: true/false}\` when the flag is set. Without \`ifNotExists\`/\`orReplace\`, the field is omitted
243
277
 
244
278
  - \`pg_create_view\`: Supports \`schema.name\` format (auto-parsed). Use \`orReplace: true\` for CREATE OR REPLACE. \`checkOption\`: 'cascaded', 'local', 'none'. ⛔ OR REPLACE can add new columns but cannot rename/remove existing ones—PostgreSQL limitation
245
279
  - \`pg_create_sequence\`: Supports \`schema.name\` format. Parameters: \`start\`, \`increment\`, \`minValue\`, \`maxValue\`, \`cache\`, \`cycle\`, \`ownedBy\`, \`ifNotExists\`
246
- - \`pg_list_functions\`: Default limit=500. Use \`schema: 'public'\`, \`limit: 2000\`, or \`exclude: ['postgis']\` to filter. ⚠️ \`exclude\` filters by **schema name** AND extension-owned functions. Note: Aggressive \`exclude\` may return 0 results if all functions belong to excluded extensions
247
-
248
- **Discovery**: \`pg.schema.help()\` returns \`{methods: string[], examples: string[]}\` object with available methods and usage examples
280
+ - \`pg_list_functions\`: Default limit=500. Use \`schema: 'public'\`, \`limit: 2000\`, or \`exclude: ['postgis', 'pg_trgm', 'ltree', 'citext', 'fuzzystrmatch', 'pg_stat_statements', 'hypopg', 'unaccent', 'pg_stat_kcache', 'pgcrypto', 'partman', 'vector', 'topology']\` to filter. ⚠️ \`exclude\` filters by **schema name** AND extension-owned functions. The \`language\` filter does NOT exclude extension functions—use \`exclude\` alongside \`language\` for clean results. Note: Aggressive \`exclude\` may return 0 results if all functions belong to excluded extensions
249
281
 
282
+ **Discovery**: \`pg.schema.help()\` returns \`{methods, methodAliases, examples}\` object
250
283
 
251
284
  ## Partitioning Tools
252
285
 
@@ -256,6 +289,8 @@ Response Structures:
256
289
  - \`pg_list_partitions\`: Default \`limit: 50\` (use \`0\` for all). Returns \`{partitions, count, truncated, totalCount?}\`. Uses \`bounds\` field (consistent with \`pg_partition_info\`)
257
290
  - \`pg_partition_info\`: Returns \`{tableInfo, partitions, totalSizeBytes}\`. Uses \`bounds\` field
258
291
  - Both list/info tools support \`schema.table\` format (auto-parsed) and accept \`table\`, \`parent\`, \`parentTable\`, or \`name\` aliases
292
+ - Response structures: \`pg_create_partitioned_table\` → \`{success, table, partitionBy, partitionKey, primaryKey?}\`. \`pg_create_partition\` → \`{success, partition, parent, bounds, subpartitionBy?, subpartitionKey?}\`. \`pg_attach_partition\` → \`{success, parent, partition, bounds}\`. \`pg_detach_partition\` → \`{success, parent, partition}\`
293
+ - ⚠️ Sub-partitioning: \`subpartitionBy\`/\`subpartitionKey\` on \`pg_create_partition\` makes a partition itself partitionable. The parent's \`primaryKey\` must include the sub-partition key column (PostgreSQL constraint)
259
294
  - 📍 Code Mode: \`pg.partitioning.create()\` = \`createPartition\`, NOT \`createPartitionedTable\`
260
295
 
261
296
  ## pg_partman Tools
@@ -266,7 +301,7 @@ Response Structures:
266
301
  - \`pg_partman_show_partitions\`: Default \`limit: 50\` (use \`0\` for all). Returns \`truncated\` + \`totalCount\` when limited. \`parentTable\` required. Supports \`schema.table\` format (auto-parsed)
267
302
  - \`pg_partman_check_default\`/\`partition_data\`: \`parentTable\` required. Supports \`schema.table\` format (auto-parsed)
268
303
  - \`pg_partman_set_retention\`: ⚠️ **CAUTION: Default is DROP** — \`retentionKeepTable: false\` (default) = DROP partitions, \`true\` = detach only (safer). Pass \`retention: null\` to disable retention
269
- - \`pg_partman_undo_partition\`: \`targetTable\` MUST exist before calling. Requires both \`parentTable\` and \`targetTable\`/\`target\`
304
+ - \`pg_partman_undo_partition\`: \`targetTable\` MUST exist before calling. Requires both \`parentTable\` and \`targetTable\`/\`target\`. ⚠️ Parent table and child partitions remain after undo—use \`DROP TABLE parent CASCADE\` to clean up
270
305
  - \`pg_partman_analyze_partition_health\`: Default \`limit: 50\` (use \`0\` for all). Returns \`truncated\` + \`totalCount\` when limited. \`summary.overallHealth\`: 'healthy'|'warnings'|'issues_found'
271
306
  - 📝 **Schema Resolution**: All partman tools auto-prefix \`public.\` when no schema specified in \`parentTable\`
272
307
  - 📝 **Aliases**: \`parentTable\` accepts \`table\`, \`parent\`, \`name\`. \`controlColumn\` accepts \`control\`, \`column\`. \`targetTable\` accepts \`target\`
@@ -275,11 +310,11 @@ Response Structures:
275
310
 
276
311
  Core: \`createExtension()\`, \`queryStats()\`, \`topCpu()\`, \`topIo()\`, \`databaseStats()\`, \`resourceAnalysis()\`, \`reset()\`
277
312
 
278
- - \`pg_kcache_query_stats\`: Default \`limit: 50\` (use \`0\` for all). Returns \`truncated\` + \`totalCount\` when limited. \`orderBy\`: 'total_time' (default), 'cpu_time', 'reads', 'writes'. \`queryPreviewLength\`: chars for query preview (default: 100, max: 500, 0 for full). ⛔ 'calls' NOT valid for orderBy—use \`minCalls\` param
279
- - \`pg_kcache_resource_analysis\`: Default \`limit: 50\` (use \`0\` for all). Returns \`truncated\` + \`totalCount\` when limited. \`minCalls\`, \`queryPreviewLength\` supported. Classifies queries as 'CPU-bound', 'I/O-bound', or 'Balanced'
280
- - \`pg_kcache_top_cpu\`: Top CPU-consuming queries. \`limit\` param (default: 10)
281
- - \`pg_kcache_top_io\`: \`type\`/\`ioType\` (alias): 'reads', 'writes', 'both' (default). \`limit\` param (default: 10)
282
- - \`pg_kcache_database_stats\`: Aggregated CPU/IO stats per database
313
+ - \`pg_kcache_query_stats\`: Default \`limit: 20\` (use \`0\` for all). Returns \`truncated\` + \`totalCount\` when limited. \`orderBy\`: 'total_time' (default), 'cpu_time', 'reads', 'writes'. \`queryPreviewLength\`: chars for query preview (default: 100, max: 500, 0 for full). ⛔ 'calls' NOT valid for orderBy—use \`minCalls\` param
314
+ - \`pg_kcache_resource_analysis\`: Default \`limit: 20\` (use \`0\` for all). Returns \`truncated\` + \`totalCount\` when limited. \`minCalls\`, \`queryPreviewLength\` supported. Classifies queries as 'CPU-bound', 'I/O-bound', or 'Balanced'
315
+ - \`pg_kcache_top_cpu\`: Top CPU-consuming queries. \`limit\` param (default: 10). \`queryPreviewLength\`: chars for query preview (default: 100, max: 500, 0 for full). Returns \`truncated\` + \`totalCount\` when limited
316
+ - \`pg_kcache_top_io\`: \`type\`/\`ioType\` (alias): 'reads', 'writes', 'both' (default). \`limit\` param (default: 10). \`queryPreviewLength\`: chars for query preview (default: 100, max: 500, 0 for full). Returns \`truncated\` + \`totalCount\` when limited
317
+ - \`pg_kcache_database_stats\`: Aggregated CPU/IO stats per database. Optional \`database\` param to filter specific db
283
318
  - \`pg_kcache_reset\`: Resets pg_stat_kcache AND pg_stat_statements statistics
284
319
 
285
320
  ## citext Tools
@@ -308,7 +343,7 @@ Core: \`createExtension()\`, \`query()\`, \`match()\`, \`subpath()\`, \`lca()\`,
308
343
  - \`pg_ltree_convert_column\`: Convert TEXT column to ltree. Supports \`schema.table\` format. \`col\` alias for \`column\`. Returns \`{previousType}\`. ⚠️ When views depend on column, returns \`{success: false, dependentViews, hint}\`—drop/recreate views manually
309
344
  - \`pg_ltree_create_index\`: Create GiST index on ltree column. Supports \`schema.table\` format. Auto-generates index name if \`indexName\` omitted. Returns \`{indexName, indexType: 'gist', alreadyExists?}\`
310
345
 
311
- **Discovery**: \`pg.ltree.help()\` returns \`{methods, aliases, examples}\` object. Top-level aliases available: \`pg.ltreeQuery()\`, \`pg.ltreeMatch()\`, etc.
346
+ **Discovery**: \`pg.ltree.help()\` returns \`{methods, methodAliases, examples}\` object. Top-level aliases available: \`pg.ltreeQuery()\`, \`pg.ltreeMatch()\`, etc.
312
347
 
313
348
  ## PostGIS Tools
314
349
 
@@ -325,7 +360,7 @@ Core: \`createExtension()\`, \`query()\`, \`match()\`, \`subpath()\`, \`lca()\`,
325
360
 
326
361
  **Geometry Operations (Table-based):**
327
362
  - \`pg_buffer\`: Create buffer zone around table geometries. Default limit: 50 rows. Default simplify: 10m (set \`simplify: 0\` to disable). Returns \`truncated: true\` + \`totalCount\` when results are truncated. Use \`limit: 0\` for all rows
328
- - \`pg_geo_transform\`: Transform table geometries between SRIDs. Default limit: 50 rows. Returns \`truncated: true\` + \`totalCount\` when results are truncated. Use \`limit: 0\` for all rows. \`fromSrid\`/\`sourceSrid\` and \`toSrid\`/\`targetSrid\` aliases
363
+ - \`pg_geo_transform\`: Transform table geometries between SRIDs. Default limit: 50 rows. Returns \`truncated: true\` + \`totalCount\` when results are truncated. Use \`limit: 0\` for all rows. Auto-detects \`fromSrid\` from column metadata if not provided (returns \`autoDetectedSrid: true\`). \`fromSrid\`/\`sourceSrid\` and \`toSrid\`/\`targetSrid\` aliases
329
364
  - \`pg_geo_cluster\`: Spatial clustering (DBSCAN/K-Means). K-Means: If \`numClusters\` exceeds row count, automatically clamps to available rows with \`warning\` field. DBSCAN: Returns contextual \`hints\` array explaining parameter effects (e.g., "All points formed single cluster—decrease eps") and \`parameterGuide\` explaining eps/minPoints trade-offs
330
365
 
331
366
  **Geometry Operations (Standalone WKT/GeoJSON):**
@@ -337,7 +372,7 @@ Core: \`createExtension()\`, \`query()\`, \`match()\`, \`subpath()\`, \`lca()\`,
337
372
  - \`pg_postgis_create_extension\`: Enable PostGIS extension (idempotent)
338
373
  - \`pg_geo_index_optimize\`: Analyze spatial indexes. Without \`table\` param, analyzes all spatial indexes
339
374
 
340
- **Code Mode Aliases:** \`pg.postgis.addColumn()\` → \`geometryColumn\`, \`pg.postgis.indexOptimize()\` → \`geoIndexOptimize\`. Note: \`pg.{group}.help()\` returns \`{methods, aliases, examples}\`
375
+ **Code Mode Aliases:** \`pg.postgis.addColumn()\` → \`geometryColumn\`, \`pg.postgis.indexOptimize()\` → \`geoIndexOptimize\`, \`pg.postgis.geoCluster()\` → \`pg_geo_cluster\`, \`pg.postgis.geoTransform()\` → \`pg_geo_transform\`. Note: \`pg.{group}.help()\` returns \`{methods, methodAliases, examples}\`
341
376
 
342
377
  ## Cron Tools (pg_cron)
343
378
 
@@ -346,13 +381,13 @@ Core: \`createExtension()\`, \`schedule()\`, \`scheduleInDatabase()\`, \`unsched
346
381
  - \`pg_cron_schedule\`: Schedule a cron job. \`schedule\` supports standard cron (\`0 5 * * *\`) or interval (\`1 second\` to \`59 seconds\`). ⚠️ Interval syntax only works for 1-59 seconds—for 60+ seconds, use cron syntax (e.g., \`* * * * *\` for every minute). Use \`name\`/\`jobName\` for identification. \`command\`/\`sql\`/\`query\` aliases supported. Note: pg_cron allows duplicate job names; use unique names to avoid confusion when unscheduling
347
382
  - \`pg_cron_schedule_in_database\`: Schedule job in specific database. \`database\`/\`db\` aliases. Optional \`username\`, \`active\` params
348
383
  - \`pg_cron_unschedule\`: Remove job by \`jobId\` or \`jobName\`. If both provided, \`jobName\` takes precedence (with warning)
349
- - \`pg_cron_alter_job\`: Modify existing job. Can change \`schedule\`, \`command\`, \`database\`, \`username\`, \`active\`. ⛔ Non-existent jobId throws error
384
+ - \`pg_cron_alter_job\`: Modify existing job. Can change \`schedule\`, \`command\`, \`database\`, \`username\`, \`active\`. ⛔ Non-existent jobId returns error
350
385
  - \`pg_cron_list_jobs\`: List all jobs. Default \`limit: 50\` (use \`0\` for all). Optional \`active\` boolean filter. Returns \`truncated\` + \`totalCount\` when limited. Returns \`hint\` when jobs have no name
351
386
  - \`pg_cron_job_run_details\`: View execution history. Default \`limit: 50\`. Optional \`jobId\`, \`status\` ('running'|'succeeded'|'failed') filters. Returns \`truncated\` + \`totalCount\` when limited. Returns \`summary\` with counts
352
387
  - \`pg_cron_cleanup_history\`: Delete old run records. \`olderThanDays\`/\`days\` param (default: 7). Optional \`jobId\` to target specific job
353
388
  - \`pg_cron_create_extension\`: Enable pg_cron extension (idempotent). Requires superuser
354
389
 
355
- **Discovery**: \`pg.cron.help()\` returns \`{methods, aliases, examples}\` object
390
+ **Discovery**: \`pg.cron.help()\` returns \`{methods, methodAliases, examples}\` object
356
391
 
357
392
  ## pgcrypto Tools
358
393
 
@@ -372,7 +407,7 @@ Core: \`createExtension()\`, \`hash()\`, \`hmac()\`, \`encrypt()\`, \`decrypt()\
372
407
 
373
408
  **Top-Level Aliases**: \`pg.pgcryptoHash()\`, \`pg.pgcryptoEncrypt()\`, \`pg.pgcryptoDecrypt()\`, \`pg.pgcryptoGenRandomUuid()\`, etc.
374
409
 
375
- **Discovery**: \`pg.pgcrypto.help()\` returns \`{methods, aliases, examples}\` object
410
+ **Discovery**: \`pg.pgcrypto.help()\` returns \`{methods, methodAliases, examples}\` object
376
411
 
377
412
  ## Code Mode Sandbox
378
413
 
@@ -400,6 +435,13 @@ Core: \`begin()\`, \`commit()\`, \`rollback()\`, \`savepoint()\`, \`rollbackTo()
400
435
  - **Join existing**: With \`transactionId\`/\`tx\`/\`txId\`—no auto-commit, caller controls via commit/rollback
401
436
  - \`statements\`: Array of \`{sql: "...", params?: [...]}\` objects. ⚠️ Each object MUST have \`sql\` key
402
437
  - \`isolationLevel\`: Optional isolation level for new transactions ('READ COMMITTED', 'REPEATABLE READ', 'SERIALIZABLE')
438
+ - Supports SELECT statements inside \`statements\`—results include \`rows\` in the response for mixed read/write workflows
439
+
440
+ **Aborted Transaction State:**
441
+ - ⚠️ If any statement in a transaction fails, PostgreSQL puts the transaction into an **aborted state**
442
+ - In aborted state, only \`ROLLBACK\` or \`ROLLBACK TO SAVEPOINT\` commands are accepted—all other commands will error
443
+ - Use \`pg_transaction_rollback\` to end the transaction, or \`pg_transaction_rollback_to\` to recover to a savepoint
444
+ - \`pg_transaction_commit\` on an aborted transaction will detect the state and report it (not silently rollback)
403
445
 
404
446
  **Response Structures:**
405
447
  - \`begin\`: \`{transactionId, isolationLevel: 'READ COMMITTED', message}\`
@@ -1 +1 @@
1
- {"version":3,"file":"ServerInstructions.js","sourceRoot":"","sources":["../../src/constants/ServerInstructions.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;GAUG;AACH,MAAM,CAAC,MAAM,mBAAmB,GAAG;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;yFA8YsD,CAAC"}
1
+ {"version":3,"file":"ServerInstructions.js","sourceRoot":"","sources":["../../src/constants/ServerInstructions.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;GAUG;AACH,MAAM,CAAC,MAAM,mBAAmB,GAAG;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;yFAwbsD,CAAC"}