@centrali-io/centrali-mcp 4.4.6 → 4.4.8-rc.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -4,7 +4,7 @@ MCP (Model Context Protocol) server for the Centrali platform. Lets AI assistant
4
4
 
5
5
  > **Full documentation:** [docs.centrali.io](https://docs.centrali.io) — SDK guide, API reference, compute functions, orchestrations, and more.
6
6
 
7
- Built on `@centrali-io/centrali-sdk` v4.4.6. Authenticates via service account client credentials.
7
+ Built on `@centrali-io/centrali-sdk` v4.4.7. Authenticates via service account client credentials.
8
8
 
9
9
  ## Setup
10
10
 
@@ -110,6 +110,7 @@ After connecting, call `describe_centrali` first — it returns the full capabil
110
110
  | `get_compute_job_status` | Check async job status by job ID (poll after invoke_trigger) |
111
111
  | `list_allowed_domains` | List allowed domains for outbound HTTP |
112
112
  | `add_allowed_domain` | Add a domain to the allowlist |
113
+ | `invoke_endpoint` | Call a sync compute endpoint by path (returns response inline) |
113
114
  | `remove_allowed_domain` | Remove a domain from the allowlist |
114
115
 
115
116
  ### Smart Queries
package/dist/index.js CHANGED
@@ -51,7 +51,7 @@ function main() {
51
51
  version: "1.0.0",
52
52
  });
53
53
  // Register all tools
54
- (0, structures_js_1.registerCollectionTools)(server, sdk);
54
+ (0, structures_js_1.registerCollectionTools)(server, sdk, baseUrl, workspaceId);
55
55
  (0, structures_js_1.registerStructureTools)(server, sdk);
56
56
  (0, records_js_1.registerRecordTools)(server, sdk);
57
57
  (0, search_js_1.registerSearchTools)(server, sdk);
@@ -37,10 +37,10 @@ function ensureToken(sdk) {
37
37
  */
38
38
  function createIamClient(sdk, centraliUrl, workspaceId) {
39
39
  const url = new URL(centraliUrl);
40
- const hostname = url.hostname.startsWith("api.")
40
+ const hostname = url.hostname.startsWith("auth.")
41
41
  ? url.hostname
42
- : `api.${url.hostname}`;
43
- const baseURL = `${url.protocol}//${hostname}/iam/workspace/${workspaceId}/api/v1/external-auth-providers`;
42
+ : `auth.${url.hostname.replace(/^api\./, '')}`;
43
+ const baseURL = `${url.protocol}//${hostname}/workspace/${workspaceId}/api/v1/external-auth-providers`;
44
44
  const client = axios_1.default.create({ baseURL });
45
45
  client.interceptors.request.use((config) => __awaiter(this, void 0, void 0, function* () {
46
46
  const token = yield ensureToken(sdk);
@@ -82,9 +82,9 @@ function registerComputeTools(server, sdk, centraliUrl, workspaceId) {
82
82
  };
83
83
  }
84
84
  }));
85
- server.tool("list_triggers", "List function triggers in the workspace. Triggers define how and when compute functions are executed (on-demand, event-driven, scheduled, http-trigger).", {
85
+ server.tool("list_triggers", "List function triggers in the workspace. Triggers define how and when compute functions are executed (on-demand, event-driven, scheduled, http-trigger, endpoint).", {
86
86
  executionType: zod_1.z
87
- .enum(["on-demand", "event-driven", "scheduled", "http-trigger"])
87
+ .enum(["on-demand", "event-driven", "scheduled", "http-trigger", "endpoint"])
88
88
  .optional()
89
89
  .describe("Filter by trigger execution type"),
90
90
  page: zod_1.z.number().optional().describe("Page number"),
@@ -394,13 +394,13 @@ function registerComputeTools(server, sdk, centraliUrl, workspaceId) {
394
394
  name: zod_1.z.string().describe("Display name for the trigger"),
395
395
  functionId: zod_1.z.string().describe("The compute function ID (UUID) to execute"),
396
396
  executionType: zod_1.z
397
- .enum(["on-demand", "event-driven", "scheduled", "http-trigger"])
398
- .describe("How the trigger fires: on-demand (manual), event-driven (data events), scheduled (cron), or http-trigger (external HTTP POST)"),
397
+ .enum(["on-demand", "event-driven", "scheduled", "http-trigger", "endpoint"])
398
+ .describe("How the trigger fires: on-demand (manual), event-driven (data events), scheduled (cron), http-trigger (external HTTP POST), or endpoint (synchronous HTTP API — returns response inline)"),
399
399
  description: zod_1.z.string().optional().describe("Optional description"),
400
400
  triggerMetadata: zod_1.z
401
401
  .record(zod_1.z.string(), zod_1.z.any())
402
402
  .optional()
403
- .describe("Type-specific configuration. For event-driven: { eventType, recordSlug } where eventType is record_created | record_updated | record_deleted. For scheduled: { scheduleType, cronExpression, timezone }. For http-trigger: auto-generated URL."),
403
+ .describe("Type-specific configuration. For event-driven: { eventType, recordSlug }. For scheduled: { scheduleType, cronExpression, timezone }. For http-trigger: auto-generated URL. For endpoint: { path, allowedMethods?, timeoutMs?, auth? } where path is URL-safe (e.g., 'create-order'), allowedMethods defaults to ['POST'], timeoutMs 1000-30000 (default 30000), auth is { mode: 'bearer'|'public'|'apiKey'|'hmac' }."),
404
404
  enabled: zod_1.z.boolean().optional().describe("Whether the trigger is enabled (default: true)"),
405
405
  }, (_a) => __awaiter(this, [_a], void 0, function* ({ name, functionId, executionType, description, triggerMetadata, enabled }) {
406
406
  try {
@@ -616,6 +616,54 @@ function registerComputeTools(server, sdk, centraliUrl, workspaceId) {
616
616
  };
617
617
  }
618
618
  }));
619
+ // ── Endpoint Trigger (Sync Execution) ─────────────────────────────
620
+ server.tool("invoke_endpoint", "Invoke a compute endpoint trigger by path. The function executes synchronously — Centrali waits for the function to complete and returns its output directly in the response. No polling needed. Max execution time: 30 seconds (configurable via triggerMetadata.timeoutMs, range 1–30s). If the function exceeds the timeout, returns 504. Endpoint triggers must be created first with executionType='endpoint'. Use this for real-time API responses; use invoke_trigger for long-running background work that doesn't need an immediate response.", {
621
+ path: zod_1.z.string().describe("The endpoint path (e.g., 'create-order', 'webhook/shipments'). This is set in the trigger's triggerMetadata.path."),
622
+ method: zod_1.z.enum(["GET", "POST", "PUT", "DELETE", "PATCH"]).optional().describe("HTTP method (default: POST). Must be in the trigger's allowedMethods."),
623
+ payload: zod_1.z.record(zod_1.z.string(), zod_1.z.any()).optional().describe("Request body payload (sent as JSON)"),
624
+ headers: zod_1.z.record(zod_1.z.string(), zod_1.z.string()).optional().describe("Additional headers (e.g., X-API-Key for apiKey auth)"),
625
+ }, (_a) => __awaiter(this, [_a], void 0, function* ({ path, method, payload, headers: extraHeaders }) {
626
+ try {
627
+ const token = yield ensureToken(sdk);
628
+ const url = new URL(centraliUrl);
629
+ const hostname = url.hostname.startsWith("api.")
630
+ ? url.hostname
631
+ : `api.${url.hostname}`;
632
+ const apiUrl = `${url.protocol}//${hostname}/data/workspace/${workspaceId}/api/v1/endpoints/${path}`;
633
+ const reqHeaders = {};
634
+ if (token)
635
+ reqHeaders.Authorization = `Bearer ${token}`;
636
+ if (extraHeaders)
637
+ Object.assign(reqHeaders, extraHeaders);
638
+ const httpMethod = (method || "POST").toLowerCase();
639
+ const result = yield (0, axios_1.default)({
640
+ method: httpMethod,
641
+ url: apiUrl,
642
+ data: ["get", "delete"].includes(httpMethod) ? undefined : (payload || {}),
643
+ headers: reqHeaders,
644
+ validateStatus: () => true, // Don't throw on non-2xx — return the function's response as-is
645
+ });
646
+ return {
647
+ content: [{
648
+ type: "text",
649
+ text: JSON.stringify({
650
+ status: result.status,
651
+ headers: {
652
+ "content-type": result.headers["content-type"],
653
+ "x-execution-id": result.headers["x-execution-id"],
654
+ },
655
+ body: result.data,
656
+ }, null, 2),
657
+ }],
658
+ };
659
+ }
660
+ catch (error) {
661
+ return {
662
+ content: [{ type: "text", text: formatError(error, `invoking endpoint '${path}'`) }],
663
+ isError: true,
664
+ };
665
+ }
666
+ }));
619
667
  // ── Allowed Domains tools ──────────────────────────────────────────
620
668
  server.tool("list_allowed_domains", "List all allowed domains for compute function HTTP requests. Functions can only call external APIs on domains in this allowlist.", {}, () => __awaiter(this, void 0, void 0, function* () {
621
669
  try {
@@ -85,6 +85,7 @@ function registerDescribeTools(server) {
85
85
  "get_function_run",
86
86
  "list_function_runs",
87
87
  "get_compute_job_status",
88
+ "invoke_endpoint",
88
89
  ],
89
90
  },
90
91
  smart_queries: {
@@ -220,6 +221,16 @@ function registerDescribeTools(server) {
220
221
  ],
221
222
  },
222
223
  },
224
+ naming_guide: {
225
+ description: "Different tools use different parameter names for the same concept (collection identifier). This is a historical naming drift — all of these refer to the same thing.",
226
+ aliases: {
227
+ recordSlug: "Used by record tools (query_records, create_record, etc.). This is the collection's URL-safe slug, e.g., 'orders'.",
228
+ structureSlug: "Used by validation and insights tools. Same value as recordSlug.",
229
+ collections: "Used by search_records. Same value as recordSlug. Accepts a string or array of strings.",
230
+ structureIds: "Used by generate_starter_pages. This is the collection UUID (not the slug). Get it from list_collections → id field.",
231
+ },
232
+ rule: "When a tool asks for recordSlug, structureSlug, or collections — use the collection's slug (e.g., 'orders'). When a tool asks for structureIds — use the collection's UUID.",
233
+ },
223
234
  workflow: "Typical workflow: 1) Define collections → 2) Create records → 3) Write compute functions → 4) Wire orchestrations → 5) Build pages to surface data → 6) Publish pages for end users. When building an app, also: 7) Create a service account → 8) Grant least-privilege permissions via remediation → 9) Create publishable keys for the frontend.",
224
235
  app_credential_setup: {
225
236
  description: "When building an app that uses Centrali, you need credentials. The flow depends on whether the code runs server-side or client-side.",
@@ -522,13 +533,29 @@ function registerDescribeTools(server) {
522
533
  description: "Triggered by an external HTTP POST to a generated webhook URL.",
523
534
  config: "Each http-trigger gets a unique URL to share with external services",
524
535
  },
536
+ "endpoint": {
537
+ description: "Turns a compute function into a custom API endpoint. Unlike all other trigger types (which are async and return a job ID you must poll), endpoint triggers WAIT for the function to complete and return its output directly in the HTTP response. Max execution time: 30 seconds.",
538
+ when_to_use: "Use endpoint triggers when the caller needs the function's output immediately (REST APIs, form handlers, webhook responders, data calculations). Use on-demand/invoke_trigger for long-running work (>30s) or fire-and-forget background jobs.",
539
+ config: {
540
+ path: "string — URL-safe path (e.g., 'create-order', 'webhook/shipments'). Must be unique per workspace.",
541
+ allowedMethods: "string[] — HTTP methods to accept (default: ['POST']). Options: GET, POST, PUT, DELETE, PATCH.",
542
+ timeoutMs: "number — execution timeout 1000-30000ms (default: 30000). Function MUST complete within this window or the request returns 504 Gateway Timeout.",
543
+ auth: "{ mode: 'bearer'|'public'|'apiKey'|'hmac' } — authentication mode. bearer=IAM token, public=no auth, apiKey=X-API-Key header (auto-generated), hmac=X-Signature header (auto-generated signing secret).",
544
+ },
545
+ invocation: "Call invoke_endpoint with the path. Response comes back inline — no polling needed.",
546
+ example_use_cases: [
547
+ "Build a REST API endpoint backed by a compute function",
548
+ "Create a webhook receiver that processes and responds synchronously",
549
+ "Expose a function as an HTTP service for external integrations",
550
+ ],
551
+ },
525
552
  },
526
553
  trigger_shape: {
527
554
  id: "UUID",
528
555
  name: "string",
529
556
  description: "string | null",
530
557
  functionId: "UUID — the compute function to execute",
531
- executionType: "'on-demand' | 'event-driven' | 'scheduled' | 'http-trigger'",
558
+ executionType: "'on-demand' | 'event-driven' | 'scheduled' | 'http-trigger' | 'endpoint'",
532
559
  triggerMetadata: "object — type-specific configuration (event, cron, params, etc.)",
533
560
  enabled: "boolean — whether the trigger is active (default: true)",
534
561
  workspaceSlug: "string",
@@ -575,6 +602,7 @@ function registerDescribeTools(server) {
575
602
  triggerMetadata_examples: {
576
603
  "event-driven": { eventType: "record_created", recordSlug: "orders" },
577
604
  scheduled: { scheduleType: "cron", cronExpression: "0 9 * * *", timezone: "America/New_York" },
605
+ endpoint: { path: "create-order", allowedMethods: ["POST"], timeoutMs: 10000, auth: { mode: "bearer" } },
578
606
  },
579
607
  },
580
608
  update_trigger: {
@@ -969,7 +997,7 @@ function registerDescribeTools(server) {
969
997
  id: "UUID",
970
998
  structureSlug: "string — the collection where the anomaly was detected",
971
999
  type: "string — anomaly type (e.g., 'spike', 'drop', 'outlier', 'pattern_break')",
972
- severity: "'critical' | 'high' | 'medium' | 'low'",
1000
+ severity: "'info' | 'warning' | 'critical'",
973
1001
  status: "'active' | 'acknowledged' | 'dismissed'",
974
1002
  title: "string — human-readable summary",
975
1003
  description: "string — detailed explanation of the anomaly",
@@ -986,12 +1014,12 @@ function registerDescribeTools(server) {
986
1014
  totalActive: "number",
987
1015
  totalAcknowledged: "number",
988
1016
  totalDismissed: "number",
989
- bySeverity: "{ critical: n, high: n, medium: n, low: n }",
1017
+ bySeverity: "{ info: n, warning: n, critical: n }",
990
1018
  },
991
1019
  tips: [
992
1020
  "Use trigger_anomaly_analysis to scan a specific collection on-demand",
993
1021
  "Use get_insights_summary for a quick overview before diving into individual insights",
994
- "Filter by severity='critical' to focus on the most important issues first",
1022
+ "Filter by severity='critical' to focus on the most important issues first. Severity levels: info (noteworthy), warning (needs attention), critical (requires action).",
995
1023
  "Acknowledged insights are still visible — use dismiss for false positives",
996
1024
  ],
997
1025
  }, null, 2),
@@ -1108,6 +1136,12 @@ function registerDescribeTools(server) {
1108
1136
  "4_publish": "publish_page — makes the page accessible at its runtime URL",
1109
1137
  "5_iterate": "Repeat steps 2-4 to update. Each publish creates a new version.",
1110
1138
  unpublish: "unpublish_page — removes the page from its runtime URL (keeps the definition)",
1139
+ important_behavior: {
1140
+ description: "Publishing does NOT consume or delete the draft. After publishing, the page has BOTH an activePublication (the live version) and a currentDraft (a new working draft for future edits). This is intentional — it allows iterating on changes without affecting the live page.",
1141
+ example: "After publishing version 1, get_page shows: activePublication.versionNumber=1 (live) and currentDraft.versionNumber=2 (editable). The draft is not stale — it's the starting point for the next publish cycle.",
1142
+ agent_tip: "Do not treat the draft as an error or leftover. The normal state of a published page is: one active publication + one working draft. Edit the draft, validate, publish again to create version 3, etc.",
1143
+ },
1144
+ eventual_consistency_note: "After save_page_draft or set_navigation, there may be a brief delay before the data is visible via get_page_draft or get_navigation. If you get a 'not found' immediately after writing, wait 1-2 seconds and retry.",
1111
1145
  },
1112
1146
  access_modes: {
1113
1147
  public: "Anyone can view — no authentication required",
@@ -46,7 +46,7 @@ function registerInsightTools(server, sdk) {
46
46
  .optional()
47
47
  .describe("Filter by insight status"),
48
48
  severity: zod_1.z
49
- .enum(["critical", "high", "medium", "low"])
49
+ .enum(["info", "warning", "critical"])
50
50
  .optional()
51
51
  .describe("Filter by severity level"),
52
52
  }, (_a) => __awaiter(this, [_a], void 0, function* ({ structureSlug, status, severity }) {
@@ -37,10 +37,10 @@ function ensureToken(sdk) {
37
37
  */
38
38
  function createIamClient(sdk, centraliUrl, workspaceId, baseSuffix) {
39
39
  const url = new URL(centraliUrl);
40
- const hostname = url.hostname.startsWith("api.")
40
+ const hostname = url.hostname.startsWith("auth.")
41
41
  ? url.hostname
42
- : `api.${url.hostname}`;
43
- const baseURL = `${url.protocol}//${hostname}/iam/workspace/${workspaceId}/api/v1/${baseSuffix}`;
42
+ : `auth.${url.hostname.replace(/^api\./, '')}`;
43
+ const baseURL = `${url.protocol}//${hostname}/workspace/${workspaceId}/api/v1/${baseSuffix}`;
44
44
  const client = axios_1.default.create({ baseURL });
45
45
  client.interceptors.request.use((config) => __awaiter(this, void 0, void 0, function* () {
46
46
  const token = yield ensureToken(sdk);
@@ -1,4 +1,4 @@
1
1
  import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
2
2
  import { CentraliSDK } from "@centrali-io/centrali-sdk";
3
3
  export declare function registerStructureTools(server: McpServer, sdk: CentraliSDK): void;
4
- export declare function registerCollectionTools(server: McpServer, sdk: CentraliSDK): void;
4
+ export declare function registerCollectionTools(server: McpServer, sdk: CentraliSDK, centraliUrl: string, workspaceId: string): void;
@@ -8,18 +8,39 @@ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, ge
8
8
  step((generator = generator.apply(thisArg, _arguments || [])).next());
9
9
  });
10
10
  };
11
+ var __importDefault = (this && this.__importDefault) || function (mod) {
12
+ return (mod && mod.__esModule) ? mod : { "default": mod };
13
+ };
11
14
  Object.defineProperty(exports, "__esModule", { value: true });
12
15
  exports.registerStructureTools = registerStructureTools;
13
16
  exports.registerCollectionTools = registerCollectionTools;
17
+ const axios_1 = __importDefault(require("axios"));
14
18
  const zod_1 = require("zod");
15
19
  function formatError(error, context) {
16
- var _a, _b;
20
+ var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l;
17
21
  if (error && typeof error === 'object') {
18
22
  const e = error;
23
+ if ((_a = e.response) === null || _a === void 0 ? void 0 : _a.data) {
24
+ const d = e.response.data;
25
+ const code = (_f = (_e = (_c = (_b = d.code) !== null && _b !== void 0 ? _b : d.errorCode) !== null && _c !== void 0 ? _c : (_d = d.error) === null || _d === void 0 ? void 0 : _d.code) !== null && _e !== void 0 ? _e : e.response.status) !== null && _f !== void 0 ? _f : "ERROR";
26
+ const message = (_j = (_g = d.message) !== null && _g !== void 0 ? _g : (_h = d.error) === null || _h === void 0 ? void 0 : _h.message) !== null && _j !== void 0 ? _j : JSON.stringify(d);
27
+ let msg = `Error ${context}: [${code}] ${message}`;
28
+ // Include extra context from error response (e.g. classification details)
29
+ if (d.classification) {
30
+ msg += `\nClassification: ${JSON.stringify(d.classification)}`;
31
+ }
32
+ if (d.criticalChanges) {
33
+ msg += `\nCritical changes: ${JSON.stringify(d.criticalChanges)}`;
34
+ }
35
+ if (d.suggestion) {
36
+ msg += `\nSuggestion: ${d.suggestion}`;
37
+ }
38
+ return msg;
39
+ }
19
40
  if ('message' in e) {
20
41
  let msg = `Error ${context}`;
21
42
  if ('code' in e || 'status' in e) {
22
- msg += `: [${(_b = (_a = e.code) !== null && _a !== void 0 ? _a : e.status) !== null && _b !== void 0 ? _b : 'ERROR'}] ${e.message}`;
43
+ msg += `: [${(_l = (_k = e.code) !== null && _k !== void 0 ? _k : e.status) !== null && _l !== void 0 ? _l : 'ERROR'}] ${e.message}`;
23
44
  }
24
45
  else {
25
46
  msg += `: ${e.message}`;
@@ -34,6 +55,58 @@ function formatError(error, context) {
34
55
  }
35
56
  return `Error ${context}: ${error instanceof Error ? error.message : String(error)}`;
36
57
  }
58
+ /**
59
+ * Ensures the SDK has a valid token.
60
+ */
61
+ function ensureToken(sdk) {
62
+ return __awaiter(this, void 0, void 0, function* () {
63
+ let token = sdk.getToken();
64
+ if (token)
65
+ return token;
66
+ try {
67
+ yield sdk.functions.list({ limit: 1 });
68
+ }
69
+ catch ( /* token refresh side effect */_a) { /* token refresh side effect */ }
70
+ return sdk.getToken();
71
+ });
72
+ }
73
+ /**
74
+ * Creates an axios instance pointing at the data service collections API.
75
+ */
76
+ function createDataClient(sdk, centraliUrl, workspaceId, pathSuffix) {
77
+ const url = new URL(centraliUrl);
78
+ const hostname = url.hostname.startsWith("api.")
79
+ ? url.hostname
80
+ : `api.${url.hostname}`;
81
+ const baseURL = `${url.protocol}//${hostname}/data/workspace/${workspaceId}/api/v1/${pathSuffix}`;
82
+ const client = axios_1.default.create({ baseURL });
83
+ client.interceptors.request.use((config) => __awaiter(this, void 0, void 0, function* () {
84
+ const token = yield ensureToken(sdk);
85
+ if (token) {
86
+ config.headers.Authorization = `Bearer ${token}`;
87
+ }
88
+ return config;
89
+ }));
90
+ client.interceptors.response.use((response) => response, (error) => __awaiter(this, void 0, void 0, function* () {
91
+ var _a, _b;
92
+ const originalRequest = error.config;
93
+ const isAuthError = ((_a = error.response) === null || _a === void 0 ? void 0 : _a.status) === 401 || ((_b = error.response) === null || _b === void 0 ? void 0 : _b.status) === 403;
94
+ if (isAuthError && !originalRequest._hasRetried) {
95
+ originalRequest._hasRetried = true;
96
+ try {
97
+ yield sdk.functions.list({ limit: 1 });
98
+ }
99
+ catch ( /* token refresh side effect */_c) { /* token refresh side effect */ }
100
+ const token = sdk.getToken();
101
+ if (token) {
102
+ originalRequest.headers.Authorization = `Bearer ${token}`;
103
+ return client.request(originalRequest);
104
+ }
105
+ }
106
+ return Promise.reject(error);
107
+ }));
108
+ return client;
109
+ }
37
110
  function registerStructureTools(server, sdk) {
38
111
  server.tool("list_structures", "[DEPRECATED: use list_collections instead] List all data structures (schemas) in the Centrali workspace. Returns name, slug, description, and property definitions for each structure.", {
39
112
  page: zod_1.z.number().optional().describe("Page number (1-indexed)"),
@@ -85,7 +158,8 @@ function registerStructureTools(server, sdk) {
85
158
  }
86
159
  }));
87
160
  }
88
- function registerCollectionTools(server, sdk) {
161
+ function registerCollectionTools(server, sdk, centraliUrl, workspaceId) {
162
+ const getCollectionsClient = () => createDataClient(sdk, centraliUrl, workspaceId, "collections");
89
163
  server.tool("list_collections", "List all data collections (schemas) in the Centrali workspace. Returns name, slug, description, and property definitions for each collection.", {
90
164
  page: zod_1.z.number().optional().describe("Page number (1-indexed)"),
91
165
  limit: zod_1.z.number().optional().describe("Results per page"),
@@ -181,14 +255,132 @@ function registerCollectionTools(server, sdk) {
181
255
  };
182
256
  }
183
257
  }));
184
- server.tool("update_collection", "Update an existing collection by ID. Only include the fields you want to change.", {
258
+ // ── Schema Update Workflow ─────────────────────────────────────────
259
+ //
260
+ // Updating a collection schema is a multi-step process because changes
261
+ // may require migrating existing records. The workflow is:
262
+ //
263
+ // 1. analyze_collection_update — classify changes, get suggested fixes
264
+ // 2. preview_collection_migration — (optional) test fixes on sample records
265
+ // 3. update_collection — apply the update, with migration plan if needed
266
+ // 4. get_collection_upgrade_progress — poll async migration status
267
+ //
268
+ // For simple changes (adding optional fields, renaming, metadata-only),
269
+ // update_collection handles everything automatically. For breaking or
270
+ // critical changes, analyze first to understand what's needed.
271
+ server.tool("analyze_collection_update", `Analyze proposed schema changes BEFORE applying them. Returns whether migration is needed, what changes are breaking/critical, suggested fixes, and estimated duration.
272
+
273
+ WHEN TO USE: Call this before update_collection when you are changing properties (adding required fields, changing types, renaming fields, deleting fields, or toggling isSecret). This tells you exactly what will happen and what migration plan is needed.
274
+
275
+ CHANGE CATEGORIES:
276
+ - Non-breaking (no migration): adding optional fields, metadata changes → update_collection handles directly
277
+ - Breaking (migration, skippable on non-strict): field renames, deletions, type changes, constraint changes → can skip on schemaless/auto-evolving collections
278
+ - Critical (migration, NEVER skippable): adding required fields without defaults, toggling isSecret → must provide a migration plan with fixes
279
+
280
+ RESPONSE FIELDS:
281
+ - needsMigration: whether existing records need to be updated
282
+ - canSkip: whether migration can be skipped (only false for critical changes or strict schemas with breaking changes)
283
+ - recordCount: number of existing records affected
284
+ - classification: detailed breakdown of all changes by category
285
+ - suggestedFixes: auto-generated fixes you can use in update_collection's migrationPlan
286
+ - estimatedDuration: 'instant' | 'seconds' | 'minutes' | 'long'`, {
287
+ collectionId: zod_1.z.string().describe("The collection ID (UUID) to analyze"),
288
+ properties: zod_1.z
289
+ .array(zod_1.z.record(zod_1.z.string(), zod_1.z.any()))
290
+ .describe("The COMPLETE new properties array (all fields, not just changed ones). Must include existing property IDs for fields being kept/modified."),
291
+ }, (_a) => __awaiter(this, [_a], void 0, function* ({ collectionId, properties }) {
292
+ try {
293
+ const client = getCollectionsClient();
294
+ const result = yield client.post(`/${collectionId}/analyze-update`, { properties });
295
+ const data = result.data;
296
+ // Build a human-readable summary for the LLM
297
+ const lines = [];
298
+ lines.push(`## Analysis for collection ${collectionId}`);
299
+ lines.push(`- Records affected: ${data.recordCount}`);
300
+ lines.push(`- Needs migration: ${data.needsMigration}`);
301
+ lines.push(`- Can skip migration: ${data.canSkip}`);
302
+ lines.push(`- Estimated duration: ${data.estimatedDuration}`);
303
+ if (!data.needsMigration) {
304
+ lines.push(`\n✅ No migration needed. You can call update_collection directly.`);
305
+ }
306
+ else if (data.canSkip) {
307
+ lines.push(`\n⚠️ Migration can be skipped. Call update_collection without a migrationPlan to skip, or provide one for a clean migration.`);
308
+ }
309
+ else {
310
+ lines.push(`\n🔴 Migration REQUIRED and cannot be skipped. You must call update_collection with a migrationPlan.`);
311
+ lines.push(`Use mode 'auto' to apply suggested fixes, or mode 'manual' with custom fixes.`);
312
+ }
313
+ if (data.suggestedFixes && data.suggestedFixes.length > 0) {
314
+ lines.push(`\n### Suggested fixes (use these in migrationPlan.fixes or mode 'auto'):`);
315
+ }
316
+ lines.push(`\n### Full analysis:`);
317
+ lines.push(JSON.stringify(data, null, 2));
318
+ return {
319
+ content: [{ type: "text", text: lines.join("\n") }],
320
+ };
321
+ }
322
+ catch (error) {
323
+ return {
324
+ content: [{ type: "text", text: formatError(error, `analyzing update for collection '${collectionId}'`) }],
325
+ isError: true,
326
+ };
327
+ }
328
+ }));
329
+ server.tool("preview_collection_migration", `Preview the effect of migration fixes on sample records BEFORE applying them. Shows before/after snapshots so you can verify fixes are correct.
330
+
331
+ WHEN TO USE: After analyze_collection_update returns suggestedFixes, call this to see how those fixes would transform actual records. Useful for validating type conversions, default values, and expressions before committing.`, {
332
+ collectionId: zod_1.z.string().describe("The collection ID (UUID)"),
333
+ fixes: zod_1.z
334
+ .array(zod_1.z.record(zod_1.z.string(), zod_1.z.any()))
335
+ .describe("Array of migration fixes to preview (from analyze_collection_update suggestedFixes or custom fixes)"),
336
+ properties: zod_1.z
337
+ .array(zod_1.z.record(zod_1.z.string(), zod_1.z.any()))
338
+ .describe("The COMPLETE new properties array (same as passed to analyze_collection_update)"),
339
+ }, (_a) => __awaiter(this, [_a], void 0, function* ({ collectionId, fixes, properties }) {
340
+ try {
341
+ const client = getCollectionsClient();
342
+ const result = yield client.post(`/${collectionId}/preview-fixes`, { fixes, properties });
343
+ return {
344
+ content: [{ type: "text", text: JSON.stringify(result.data, null, 2) }],
345
+ };
346
+ }
347
+ catch (error) {
348
+ return {
349
+ content: [{ type: "text", text: formatError(error, `previewing fixes for collection '${collectionId}'`) }],
350
+ isError: true,
351
+ };
352
+ }
353
+ }));
354
+ server.tool("update_collection", `Update a collection's schema using the safe upgrade endpoint. This handles both simple updates and complex migrations.
355
+
356
+ HOW IT WORKS:
357
+ 1. If your changes don't require migration (adding optional fields, metadata changes) → completes immediately
358
+ 2. If migration is required but you don't provide a migrationPlan → returns an error telling you what's needed
359
+ 3. If migration is required and you provide migrationPlan → starts the migration (may be async for large collections)
360
+
361
+ RECOMMENDED WORKFLOW:
362
+ 1. Call analyze_collection_update first to understand what changes require
363
+ 2. If needsMigration=false → call this tool without migrationPlan
364
+ 3. If needsMigration=true and canSkip=true → call this tool without migrationPlan to skip migration
365
+ 4. If needsMigration=true and canSkip=false → call this tool WITH migrationPlan
366
+ 5. If status='in_progress' → poll with get_collection_upgrade_progress
367
+
368
+ MIGRATION PLAN:
369
+ - mode 'auto': Uses system-generated fixes from analyze_collection_update (handles renames, type conversions, deletions automatically)
370
+ - mode 'manual': Provide your own fixes array for full control over how records are transformed
371
+
372
+ FIX FORMAT (for manual mode):
373
+ Each fix is an object with: { fieldName, fixType, value?, expression?, applyToAll?, filterConditions? }
374
+ - fixType: 'static_value' (set a fixed value), 'expression' (JS expression with 'record' variable), 'auto' (system handles it)
375
+ - For new required fields: provide a default value or expression
376
+ - For type changes: provide a conversion expression`, {
185
377
  collectionId: zod_1.z.string().describe("The collection ID (UUID) to update"),
186
378
  name: zod_1.z.string().optional().describe("Updated display name"),
187
379
  description: zod_1.z.string().optional().describe("Updated description"),
188
380
  properties: zod_1.z
189
381
  .array(zod_1.z.record(zod_1.z.string(), zod_1.z.any()))
190
382
  .optional()
191
- .describe("Updated array of property definitions (replaces existing properties)"),
383
+ .describe("The COMPLETE new properties array. Must include ALL fields (existing + new). Include property 'id' for existing fields being kept/modified."),
192
384
  enableVersioning: zod_1.z.boolean().optional().describe("Enable or disable record versioning"),
193
385
  tags: zod_1.z.array(zod_1.z.string()).optional().describe("Updated tags"),
194
386
  defaultTtlSeconds: zod_1.z
@@ -196,36 +388,83 @@ function registerCollectionTools(server, sdk) {
196
388
  .nullable()
197
389
  .optional()
198
390
  .describe("Default TTL in seconds for new records. Set to null to clear."),
199
- }, (_a) => __awaiter(this, [_a], void 0, function* ({ collectionId, name, description, properties, enableVersioning, tags, defaultTtlSeconds }) {
391
+ migrationPlan: zod_1.z
392
+ .object({
393
+ mode: zod_1.z.enum(["auto", "manual"]).describe("'auto' uses suggested fixes, 'manual' uses your custom fixes array"),
394
+ fixes: zod_1.z
395
+ .array(zod_1.z.record(zod_1.z.string(), zod_1.z.any()))
396
+ .optional()
397
+ .describe("Required for 'manual' mode. Array of fix objects specifying how to transform records."),
398
+ })
399
+ .optional()
400
+ .describe("Migration plan for breaking/critical changes. Omit to skip migration (only works if canSkip=true from analyze). Required when canSkip=false."),
401
+ }, (_a) => __awaiter(this, [_a], void 0, function* ({ collectionId, name, description, properties, enableVersioning, tags, defaultTtlSeconds, migrationPlan }) {
200
402
  try {
201
- const input = {};
403
+ const client = getCollectionsClient();
404
+ const body = {};
202
405
  if (name !== undefined)
203
- input.name = name;
406
+ body.name = name;
204
407
  if (description !== undefined)
205
- input.description = description;
408
+ body.description = description;
206
409
  if (properties !== undefined)
207
- input.properties = properties;
410
+ body.properties = properties;
208
411
  if (enableVersioning !== undefined)
209
- input.enableVersioning = enableVersioning;
412
+ body.enableVersioning = enableVersioning;
210
413
  if (tags !== undefined)
211
- input.tags = tags;
414
+ body.tags = tags;
212
415
  if (defaultTtlSeconds !== undefined)
213
- input.defaultTtlSeconds = defaultTtlSeconds;
214
- const result = yield sdk.collections.update(collectionId, input);
416
+ body.defaultTtlSeconds = defaultTtlSeconds;
417
+ if (migrationPlan !== undefined)
418
+ body.migrationPlan = migrationPlan;
419
+ const result = yield client.post(`/${collectionId}/upgrade`, body);
420
+ const data = result.data;
421
+ if (data.status === 'completed') {
422
+ return {
423
+ content: [{
424
+ type: "text",
425
+ text: `Collection updated successfully.\n\n${JSON.stringify(data.structure, null, 2)}`,
426
+ }],
427
+ };
428
+ }
429
+ if (data.status === 'in_progress') {
430
+ return {
431
+ content: [{
432
+ type: "text",
433
+ text: [
434
+ `Migration started (async). Records are being updated in the background.`,
435
+ `- Job ID: ${data.jobId}`,
436
+ `- Progress URL: ${data.progressUrl}`,
437
+ ``,
438
+ `Call get_collection_upgrade_progress with collectionId='${collectionId}' and jobId='${data.jobId}' to check status.`,
439
+ ].join("\n"),
440
+ }],
441
+ };
442
+ }
215
443
  return {
216
- content: [
217
- { type: "text", text: JSON.stringify(result.data, null, 2) },
218
- ],
444
+ content: [{ type: "text", text: JSON.stringify(data, null, 2) }],
219
445
  };
220
446
  }
221
447
  catch (error) {
222
448
  return {
223
- content: [
224
- {
225
- type: "text",
226
- text: formatError(error, `updating collection '${collectionId}'`),
227
- },
228
- ],
449
+ content: [{ type: "text", text: formatError(error, `updating collection '${collectionId}'`) }],
450
+ isError: true,
451
+ };
452
+ }
453
+ }));
454
+ server.tool("get_collection_upgrade_progress", "Check the progress of an async collection migration/upgrade job. Call this after update_collection returns status='in_progress'.", {
455
+ collectionId: zod_1.z.string().describe("The collection ID (UUID)"),
456
+ jobId: zod_1.z.string().describe("The job ID returned by update_collection"),
457
+ }, (_a) => __awaiter(this, [_a], void 0, function* ({ collectionId, jobId }) {
458
+ try {
459
+ const client = getCollectionsClient();
460
+ const result = yield client.get(`/${collectionId}/upgrade/${jobId}/progress`);
461
+ return {
462
+ content: [{ type: "text", text: JSON.stringify(result.data, null, 2) }],
463
+ };
464
+ }
465
+ catch (error) {
466
+ return {
467
+ content: [{ type: "text", text: formatError(error, `getting upgrade progress for '${collectionId}'`) }],
229
468
  isError: true,
230
469
  };
231
470
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@centrali-io/centrali-mcp",
3
- "version": "4.4.6",
3
+ "version": "4.4.8-rc.0",
4
4
  "description": "Centrali MCP Server - AI assistant integration for Centrali workspaces",
5
5
  "main": "dist/index.js",
6
6
  "type": "commonjs",
@@ -25,7 +25,7 @@
25
25
  "author": "Blueinit",
26
26
  "license": "ISC",
27
27
  "dependencies": {
28
- "@centrali-io/centrali-sdk": "^4.4.6",
28
+ "@centrali-io/centrali-sdk": "^4.4.7",
29
29
  "@modelcontextprotocol/sdk": "^1.12.1"
30
30
  },
31
31
  "devDependencies": {
package/src/index.ts CHANGED
@@ -45,7 +45,7 @@ async function main() {
45
45
  });
46
46
 
47
47
  // Register all tools
48
- registerCollectionTools(server, sdk);
48
+ registerCollectionTools(server, sdk, baseUrl, workspaceId);
49
49
  registerStructureTools(server, sdk);
50
50
  registerRecordTools(server, sdk);
51
51
  registerSearchTools(server, sdk);
@@ -22,10 +22,10 @@ async function ensureToken(sdk: CentraliSDK): Promise<string | null> {
22
22
  */
23
23
  function createIamClient(sdk: CentraliSDK, centraliUrl: string, workspaceId: string): AxiosInstance {
24
24
  const url = new URL(centraliUrl);
25
- const hostname = url.hostname.startsWith("api.")
25
+ const hostname = url.hostname.startsWith("auth.")
26
26
  ? url.hostname
27
- : `api.${url.hostname}`;
28
- const baseURL = `${url.protocol}//${hostname}/iam/workspace/${workspaceId}/api/v1/external-auth-providers`;
27
+ : `auth.${url.hostname.replace(/^api\./, '')}`;
28
+ const baseURL = `${url.protocol}//${hostname}/workspace/${workspaceId}/api/v1/external-auth-providers`;
29
29
 
30
30
  const client = axios.create({ baseURL });
31
31
 
@@ -74,10 +74,10 @@ export function registerComputeTools(server: McpServer, sdk: CentraliSDK, centra
74
74
 
75
75
  server.tool(
76
76
  "list_triggers",
77
- "List function triggers in the workspace. Triggers define how and when compute functions are executed (on-demand, event-driven, scheduled, http-trigger).",
77
+ "List function triggers in the workspace. Triggers define how and when compute functions are executed (on-demand, event-driven, scheduled, http-trigger, endpoint).",
78
78
  {
79
79
  executionType: z
80
- .enum(["on-demand", "event-driven", "scheduled", "http-trigger"])
80
+ .enum(["on-demand", "event-driven", "scheduled", "http-trigger", "endpoint"])
81
81
  .optional()
82
82
  .describe("Filter by trigger execution type"),
83
83
  page: z.number().optional().describe("Page number"),
@@ -437,13 +437,13 @@ export function registerComputeTools(server: McpServer, sdk: CentraliSDK, centra
437
437
  name: z.string().describe("Display name for the trigger"),
438
438
  functionId: z.string().describe("The compute function ID (UUID) to execute"),
439
439
  executionType: z
440
- .enum(["on-demand", "event-driven", "scheduled", "http-trigger"])
441
- .describe("How the trigger fires: on-demand (manual), event-driven (data events), scheduled (cron), or http-trigger (external HTTP POST)"),
440
+ .enum(["on-demand", "event-driven", "scheduled", "http-trigger", "endpoint"])
441
+ .describe("How the trigger fires: on-demand (manual), event-driven (data events), scheduled (cron), http-trigger (external HTTP POST), or endpoint (synchronous HTTP API — returns response inline)"),
442
442
  description: z.string().optional().describe("Optional description"),
443
443
  triggerMetadata: z
444
444
  .record(z.string(), z.any())
445
445
  .optional()
446
- .describe("Type-specific configuration. For event-driven: { eventType, recordSlug } where eventType is record_created | record_updated | record_deleted. For scheduled: { scheduleType, cronExpression, timezone }. For http-trigger: auto-generated URL."),
446
+ .describe("Type-specific configuration. For event-driven: { eventType, recordSlug }. For scheduled: { scheduleType, cronExpression, timezone }. For http-trigger: auto-generated URL. For endpoint: { path, allowedMethods?, timeoutMs?, auth? } where path is URL-safe (e.g., 'create-order'), allowedMethods defaults to ['POST'], timeoutMs 1000-30000 (default 30000), auth is { mode: 'bearer'|'public'|'apiKey'|'hmac' }."),
447
447
  enabled: z.boolean().optional().describe("Whether the trigger is enabled (default: true)"),
448
448
  },
449
449
  async ({ name, functionId, executionType, description, triggerMetadata, enabled }) => {
@@ -687,6 +687,61 @@ export function registerComputeTools(server: McpServer, sdk: CentraliSDK, centra
687
687
  }
688
688
  );
689
689
 
690
+ // ── Endpoint Trigger (Sync Execution) ─────────────────────────────
691
+
692
+ server.tool(
693
+ "invoke_endpoint",
694
+ "Invoke a compute endpoint trigger by path. The function executes synchronously — Centrali waits for the function to complete and returns its output directly in the response. No polling needed. Max execution time: 30 seconds (configurable via triggerMetadata.timeoutMs, range 1–30s). If the function exceeds the timeout, returns 504. Endpoint triggers must be created first with executionType='endpoint'. Use this for real-time API responses; use invoke_trigger for long-running background work that doesn't need an immediate response.",
695
+ {
696
+ path: z.string().describe("The endpoint path (e.g., 'create-order', 'webhook/shipments'). This is set in the trigger's triggerMetadata.path."),
697
+ method: z.enum(["GET", "POST", "PUT", "DELETE", "PATCH"]).optional().describe("HTTP method (default: POST). Must be in the trigger's allowedMethods."),
698
+ payload: z.record(z.string(), z.any()).optional().describe("Request body payload (sent as JSON)"),
699
+ headers: z.record(z.string(), z.string()).optional().describe("Additional headers (e.g., X-API-Key for apiKey auth)"),
700
+ },
701
+ async ({ path, method, payload, headers: extraHeaders }) => {
702
+ try {
703
+ const token = await ensureToken(sdk);
704
+ const url = new URL(centraliUrl);
705
+ const hostname = url.hostname.startsWith("api.")
706
+ ? url.hostname
707
+ : `api.${url.hostname}`;
708
+ const apiUrl = `${url.protocol}//${hostname}/data/workspace/${workspaceId}/api/v1/endpoints/${path}`;
709
+
710
+ const reqHeaders: Record<string, string> = {};
711
+ if (token) reqHeaders.Authorization = `Bearer ${token}`;
712
+ if (extraHeaders) Object.assign(reqHeaders, extraHeaders);
713
+
714
+ const httpMethod = (method || "POST").toLowerCase();
715
+ const result = await axios({
716
+ method: httpMethod as any,
717
+ url: apiUrl,
718
+ data: ["get", "delete"].includes(httpMethod) ? undefined : (payload || {}),
719
+ headers: reqHeaders,
720
+ validateStatus: () => true, // Don't throw on non-2xx — return the function's response as-is
721
+ });
722
+
723
+ return {
724
+ content: [{
725
+ type: "text",
726
+ text: JSON.stringify({
727
+ status: result.status,
728
+ headers: {
729
+ "content-type": result.headers["content-type"],
730
+ "x-execution-id": result.headers["x-execution-id"],
731
+ },
732
+ body: result.data,
733
+ }, null, 2),
734
+ }],
735
+ };
736
+ } catch (error: unknown) {
737
+ return {
738
+ content: [{ type: "text", text: formatError(error, `invoking endpoint '${path}'`) }],
739
+ isError: true,
740
+ };
741
+ }
742
+ }
743
+ );
744
+
690
745
  // ── Allowed Domains tools ──────────────────────────────────────────
691
746
 
692
747
  server.tool(
@@ -85,6 +85,7 @@ export function registerDescribeTools(server: McpServer) {
85
85
  "get_function_run",
86
86
  "list_function_runs",
87
87
  "get_compute_job_status",
88
+ "invoke_endpoint",
88
89
  ],
89
90
  },
90
91
  smart_queries: {
@@ -227,6 +228,16 @@ export function registerDescribeTools(server: McpServer) {
227
228
  ],
228
229
  },
229
230
  },
231
+ naming_guide: {
232
+ description: "Different tools use different parameter names for the same concept (collection identifier). This is a historical naming drift — all of these refer to the same thing.",
233
+ aliases: {
234
+ recordSlug: "Used by record tools (query_records, create_record, etc.). This is the collection's URL-safe slug, e.g., 'orders'.",
235
+ structureSlug: "Used by validation and insights tools. Same value as recordSlug.",
236
+ collections: "Used by search_records. Same value as recordSlug. Accepts a string or array of strings.",
237
+ structureIds: "Used by generate_starter_pages. This is the collection UUID (not the slug). Get it from list_collections → id field.",
238
+ },
239
+ rule: "When a tool asks for recordSlug, structureSlug, or collections — use the collection's slug (e.g., 'orders'). When a tool asks for structureIds — use the collection's UUID.",
240
+ },
230
241
  workflow:
231
242
  "Typical workflow: 1) Define collections → 2) Create records → 3) Write compute functions → 4) Wire orchestrations → 5) Build pages to surface data → 6) Publish pages for end users. When building an app, also: 7) Create a service account → 8) Grant least-privilege permissions via remediation → 9) Create publishable keys for the frontend.",
232
243
  app_credential_setup: {
@@ -594,13 +605,30 @@ export function registerDescribeTools(server: McpServer) {
594
605
  "Triggered by an external HTTP POST to a generated webhook URL.",
595
606
  config: "Each http-trigger gets a unique URL to share with external services",
596
607
  },
608
+ "endpoint": {
609
+ description:
610
+ "Turns a compute function into a custom API endpoint. Unlike all other trigger types (which are async and return a job ID you must poll), endpoint triggers WAIT for the function to complete and return its output directly in the HTTP response. Max execution time: 30 seconds.",
611
+ when_to_use: "Use endpoint triggers when the caller needs the function's output immediately (REST APIs, form handlers, webhook responders, data calculations). Use on-demand/invoke_trigger for long-running work (>30s) or fire-and-forget background jobs.",
612
+ config: {
613
+ path: "string — URL-safe path (e.g., 'create-order', 'webhook/shipments'). Must be unique per workspace.",
614
+ allowedMethods: "string[] — HTTP methods to accept (default: ['POST']). Options: GET, POST, PUT, DELETE, PATCH.",
615
+ timeoutMs: "number — execution timeout 1000-30000ms (default: 30000). Function MUST complete within this window or the request returns 504 Gateway Timeout.",
616
+ auth: "{ mode: 'bearer'|'public'|'apiKey'|'hmac' } — authentication mode. bearer=IAM token, public=no auth, apiKey=X-API-Key header (auto-generated), hmac=X-Signature header (auto-generated signing secret).",
617
+ },
618
+ invocation: "Call invoke_endpoint with the path. Response comes back inline — no polling needed.",
619
+ example_use_cases: [
620
+ "Build a REST API endpoint backed by a compute function",
621
+ "Create a webhook receiver that processes and responds synchronously",
622
+ "Expose a function as an HTTP service for external integrations",
623
+ ],
624
+ },
597
625
  },
598
626
  trigger_shape: {
599
627
  id: "UUID",
600
628
  name: "string",
601
629
  description: "string | null",
602
630
  functionId: "UUID — the compute function to execute",
603
- executionType: "'on-demand' | 'event-driven' | 'scheduled' | 'http-trigger'",
631
+ executionType: "'on-demand' | 'event-driven' | 'scheduled' | 'http-trigger' | 'endpoint'",
604
632
  triggerMetadata: "object — type-specific configuration (event, cron, params, etc.)",
605
633
  enabled: "boolean — whether the trigger is active (default: true)",
606
634
  workspaceSlug: "string",
@@ -647,6 +675,7 @@ export function registerDescribeTools(server: McpServer) {
647
675
  triggerMetadata_examples: {
648
676
  "event-driven": { eventType: "record_created", recordSlug: "orders" },
649
677
  scheduled: { scheduleType: "cron", cronExpression: "0 9 * * *", timezone: "America/New_York" },
678
+ endpoint: { path: "create-order", allowedMethods: ["POST"], timeoutMs: 10000, auth: { mode: "bearer" } },
650
679
  },
651
680
  },
652
681
  update_trigger: {
@@ -1074,7 +1103,7 @@ export function registerDescribeTools(server: McpServer) {
1074
1103
  id: "UUID",
1075
1104
  structureSlug: "string — the collection where the anomaly was detected",
1076
1105
  type: "string — anomaly type (e.g., 'spike', 'drop', 'outlier', 'pattern_break')",
1077
- severity: "'critical' | 'high' | 'medium' | 'low'",
1106
+ severity: "'info' | 'warning' | 'critical'",
1078
1107
  status: "'active' | 'acknowledged' | 'dismissed'",
1079
1108
  title: "string — human-readable summary",
1080
1109
  description: "string — detailed explanation of the anomaly",
@@ -1093,12 +1122,12 @@ export function registerDescribeTools(server: McpServer) {
1093
1122
  totalActive: "number",
1094
1123
  totalAcknowledged: "number",
1095
1124
  totalDismissed: "number",
1096
- bySeverity: "{ critical: n, high: n, medium: n, low: n }",
1125
+ bySeverity: "{ info: n, warning: n, critical: n }",
1097
1126
  },
1098
1127
  tips: [
1099
1128
  "Use trigger_anomaly_analysis to scan a specific collection on-demand",
1100
1129
  "Use get_insights_summary for a quick overview before diving into individual insights",
1101
- "Filter by severity='critical' to focus on the most important issues first",
1130
+ "Filter by severity='critical' to focus on the most important issues first. Severity levels: info (noteworthy), warning (needs attention), critical (requires action).",
1102
1131
  "Acknowledged insights are still visible — use dismiss for false positives",
1103
1132
  ],
1104
1133
  },
@@ -1255,6 +1284,12 @@ export function registerDescribeTools(server: McpServer) {
1255
1284
  "Repeat steps 2-4 to update. Each publish creates a new version.",
1256
1285
  unpublish:
1257
1286
  "unpublish_page — removes the page from its runtime URL (keeps the definition)",
1287
+ important_behavior: {
1288
+ description: "Publishing does NOT consume or delete the draft. After publishing, the page has BOTH an activePublication (the live version) and a currentDraft (a new working draft for future edits). This is intentional — it allows iterating on changes without affecting the live page.",
1289
+ example: "After publishing version 1, get_page shows: activePublication.versionNumber=1 (live) and currentDraft.versionNumber=2 (editable). The draft is not stale — it's the starting point for the next publish cycle.",
1290
+ agent_tip: "Do not treat the draft as an error or leftover. The normal state of a published page is: one active publication + one working draft. Edit the draft, validate, publish again to create version 3, etc.",
1291
+ },
1292
+ eventual_consistency_note: "After save_page_draft or set_navigation, there may be a brief delay before the data is visible via get_page_draft or get_navigation. If you get a 'not found' immediately after writing, wait 1-2 seconds and retry.",
1258
1293
  },
1259
1294
  access_modes: {
1260
1295
  public: "Anyone can view — no authentication required",
@@ -41,7 +41,7 @@ export function registerInsightTools(server: McpServer, sdk: CentraliSDK) {
41
41
  .optional()
42
42
  .describe("Filter by insight status"),
43
43
  severity: z
44
- .enum(["critical", "high", "medium", "low"])
44
+ .enum(["info", "warning", "critical"])
45
45
  .optional()
46
46
  .describe("Filter by severity level"),
47
47
  },
@@ -22,10 +22,10 @@ async function ensureToken(sdk: CentraliSDK): Promise<string | null> {
22
22
  */
23
23
  function createIamClient(sdk: CentraliSDK, centraliUrl: string, workspaceId: string, baseSuffix: string): AxiosInstance {
24
24
  const url = new URL(centraliUrl);
25
- const hostname = url.hostname.startsWith("api.")
25
+ const hostname = url.hostname.startsWith("auth.")
26
26
  ? url.hostname
27
- : `api.${url.hostname}`;
28
- const baseURL = `${url.protocol}//${hostname}/iam/workspace/${workspaceId}/api/v1/${baseSuffix}`;
27
+ : `auth.${url.hostname.replace(/^api\./, '')}`;
28
+ const baseURL = `${url.protocol}//${hostname}/workspace/${workspaceId}/api/v1/${baseSuffix}`;
29
29
 
30
30
  const client = axios.create({ baseURL });
31
31
 
@@ -1,10 +1,28 @@
1
1
  import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
2
2
  import { CentraliSDK } from "@centrali-io/centrali-sdk";
3
+ import axios, { AxiosInstance } from "axios";
3
4
  import { z } from "zod";
4
5
 
5
6
  function formatError(error: unknown, context: string): string {
6
7
  if (error && typeof error === 'object') {
7
8
  const e = error as Record<string, any>;
9
+ if (e.response?.data) {
10
+ const d = e.response.data;
11
+ const code = d.code ?? d.errorCode ?? d.error?.code ?? e.response.status ?? "ERROR";
12
+ const message = d.message ?? d.error?.message ?? JSON.stringify(d);
13
+ let msg = `Error ${context}: [${code}] ${message}`;
14
+ // Include extra context from error response (e.g. classification details)
15
+ if (d.classification) {
16
+ msg += `\nClassification: ${JSON.stringify(d.classification)}`;
17
+ }
18
+ if (d.criticalChanges) {
19
+ msg += `\nCritical changes: ${JSON.stringify(d.criticalChanges)}`;
20
+ }
21
+ if (d.suggestion) {
22
+ msg += `\nSuggestion: ${d.suggestion}`;
23
+ }
24
+ return msg;
25
+ }
8
26
  if ('message' in e) {
9
27
  let msg = `Error ${context}`;
10
28
  if ('code' in e || 'status' in e) {
@@ -23,6 +41,63 @@ function formatError(error: unknown, context: string): string {
23
41
  return `Error ${context}: ${error instanceof Error ? error.message : String(error)}`;
24
42
  }
25
43
 
44
+ /**
45
+ * Ensures the SDK has a valid token.
46
+ */
47
+ async function ensureToken(sdk: CentraliSDK): Promise<string | null> {
48
+ let token = sdk.getToken();
49
+ if (token) return token;
50
+ try {
51
+ await sdk.functions.list({ limit: 1 });
52
+ } catch { /* token refresh side effect */ }
53
+ return sdk.getToken();
54
+ }
55
+
56
+ /**
57
+ * Creates an axios instance pointing at the data service collections API.
58
+ */
59
+ function createDataClient(sdk: CentraliSDK, centraliUrl: string, workspaceId: string, pathSuffix: string): AxiosInstance {
60
+ const url = new URL(centraliUrl);
61
+ const hostname = url.hostname.startsWith("api.")
62
+ ? url.hostname
63
+ : `api.${url.hostname}`;
64
+ const baseURL = `${url.protocol}//${hostname}/data/workspace/${workspaceId}/api/v1/${pathSuffix}`;
65
+
66
+ const client = axios.create({ baseURL });
67
+
68
+ client.interceptors.request.use(async (config) => {
69
+ const token = await ensureToken(sdk);
70
+ if (token) {
71
+ config.headers.Authorization = `Bearer ${token}`;
72
+ }
73
+ return config;
74
+ });
75
+
76
+ client.interceptors.response.use(
77
+ (response) => response,
78
+ async (error) => {
79
+ const originalRequest = error.config;
80
+ const isAuthError = error.response?.status === 401 || error.response?.status === 403;
81
+
82
+ if (isAuthError && !originalRequest._hasRetried) {
83
+ originalRequest._hasRetried = true;
84
+ try {
85
+ await sdk.functions.list({ limit: 1 });
86
+ } catch { /* token refresh side effect */ }
87
+
88
+ const token = sdk.getToken();
89
+ if (token) {
90
+ originalRequest.headers.Authorization = `Bearer ${token}`;
91
+ return client.request(originalRequest);
92
+ }
93
+ }
94
+ return Promise.reject(error);
95
+ }
96
+ );
97
+
98
+ return client;
99
+ }
100
+
26
101
  export function registerStructureTools(server: McpServer, sdk: CentraliSDK) {
27
102
  server.tool(
28
103
  "list_structures",
@@ -84,7 +159,9 @@ export function registerStructureTools(server: McpServer, sdk: CentraliSDK) {
84
159
  );
85
160
  }
86
161
 
87
- export function registerCollectionTools(server: McpServer, sdk: CentraliSDK) {
162
+ export function registerCollectionTools(server: McpServer, sdk: CentraliSDK, centraliUrl: string, workspaceId: string) {
163
+ const getCollectionsClient = () => createDataClient(sdk, centraliUrl, workspaceId, "collections");
164
+
88
165
  server.tool(
89
166
  "list_collections",
90
167
  "List all data collections (schemas) in the Centrali workspace. Returns name, slug, description, and property definitions for each collection.",
@@ -192,9 +269,141 @@ export function registerCollectionTools(server: McpServer, sdk: CentraliSDK) {
192
269
  }
193
270
  );
194
271
 
272
+ // ── Schema Update Workflow ─────────────────────────────────────────
273
+ //
274
+ // Updating a collection schema is a multi-step process because changes
275
+ // may require migrating existing records. The workflow is:
276
+ //
277
+ // 1. analyze_collection_update — classify changes, get suggested fixes
278
+ // 2. preview_collection_migration — (optional) test fixes on sample records
279
+ // 3. update_collection — apply the update, with migration plan if needed
280
+ // 4. get_collection_upgrade_progress — poll async migration status
281
+ //
282
+ // For simple changes (adding optional fields, renaming, metadata-only),
283
+ // update_collection handles everything automatically. For breaking or
284
+ // critical changes, analyze first to understand what's needed.
285
+
286
+ server.tool(
287
+ "analyze_collection_update",
288
+ `Analyze proposed schema changes BEFORE applying them. Returns whether migration is needed, what changes are breaking/critical, suggested fixes, and estimated duration.
289
+
290
+ WHEN TO USE: Call this before update_collection when you are changing properties (adding required fields, changing types, renaming fields, deleting fields, or toggling isSecret). This tells you exactly what will happen and what migration plan is needed.
291
+
292
+ CHANGE CATEGORIES:
293
+ - Non-breaking (no migration): adding optional fields, metadata changes → update_collection handles directly
294
+ - Breaking (migration, skippable on non-strict): field renames, deletions, type changes, constraint changes → can skip on schemaless/auto-evolving collections
295
+ - Critical (migration, NEVER skippable): adding required fields without defaults, toggling isSecret → must provide a migration plan with fixes
296
+
297
+ RESPONSE FIELDS:
298
+ - needsMigration: whether existing records need to be updated
299
+ - canSkip: whether migration can be skipped (only false for critical changes or strict schemas with breaking changes)
300
+ - recordCount: number of existing records affected
301
+ - classification: detailed breakdown of all changes by category
302
+ - suggestedFixes: auto-generated fixes you can use in update_collection's migrationPlan
303
+ - estimatedDuration: 'instant' | 'seconds' | 'minutes' | 'long'`,
304
+ {
305
+ collectionId: z.string().describe("The collection ID (UUID) to analyze"),
306
+ properties: z
307
+ .array(z.record(z.string(), z.any()))
308
+ .describe("The COMPLETE new properties array (all fields, not just changed ones). Must include existing property IDs for fields being kept/modified."),
309
+ },
310
+ async ({ collectionId, properties }) => {
311
+ try {
312
+ const client = getCollectionsClient();
313
+ const result = await client.post(`/${collectionId}/analyze-update`, { properties });
314
+ const data = result.data;
315
+
316
+ // Build a human-readable summary for the LLM
317
+ const lines: string[] = [];
318
+ lines.push(`## Analysis for collection ${collectionId}`);
319
+ lines.push(`- Records affected: ${data.recordCount}`);
320
+ lines.push(`- Needs migration: ${data.needsMigration}`);
321
+ lines.push(`- Can skip migration: ${data.canSkip}`);
322
+ lines.push(`- Estimated duration: ${data.estimatedDuration}`);
323
+
324
+ if (!data.needsMigration) {
325
+ lines.push(`\n✅ No migration needed. You can call update_collection directly.`);
326
+ } else if (data.canSkip) {
327
+ lines.push(`\n⚠️ Migration can be skipped. Call update_collection without a migrationPlan to skip, or provide one for a clean migration.`);
328
+ } else {
329
+ lines.push(`\n🔴 Migration REQUIRED and cannot be skipped. You must call update_collection with a migrationPlan.`);
330
+ lines.push(`Use mode 'auto' to apply suggested fixes, or mode 'manual' with custom fixes.`);
331
+ }
332
+
333
+ if (data.suggestedFixes && data.suggestedFixes.length > 0) {
334
+ lines.push(`\n### Suggested fixes (use these in migrationPlan.fixes or mode 'auto'):`);
335
+ }
336
+
337
+ lines.push(`\n### Full analysis:`);
338
+ lines.push(JSON.stringify(data, null, 2));
339
+
340
+ return {
341
+ content: [{ type: "text", text: lines.join("\n") }],
342
+ };
343
+ } catch (error: unknown) {
344
+ return {
345
+ content: [{ type: "text", text: formatError(error, `analyzing update for collection '${collectionId}'`) }],
346
+ isError: true,
347
+ };
348
+ }
349
+ }
350
+ );
351
+
352
+ server.tool(
353
+ "preview_collection_migration",
354
+ `Preview the effect of migration fixes on sample records BEFORE applying them. Shows before/after snapshots so you can verify fixes are correct.
355
+
356
+ WHEN TO USE: After analyze_collection_update returns suggestedFixes, call this to see how those fixes would transform actual records. Useful for validating type conversions, default values, and expressions before committing.`,
357
+ {
358
+ collectionId: z.string().describe("The collection ID (UUID)"),
359
+ fixes: z
360
+ .array(z.record(z.string(), z.any()))
361
+ .describe("Array of migration fixes to preview (from analyze_collection_update suggestedFixes or custom fixes)"),
362
+ properties: z
363
+ .array(z.record(z.string(), z.any()))
364
+ .describe("The COMPLETE new properties array (same as passed to analyze_collection_update)"),
365
+ },
366
+ async ({ collectionId, fixes, properties }) => {
367
+ try {
368
+ const client = getCollectionsClient();
369
+ const result = await client.post(`/${collectionId}/preview-fixes`, { fixes, properties });
370
+ return {
371
+ content: [{ type: "text", text: JSON.stringify(result.data, null, 2) }],
372
+ };
373
+ } catch (error: unknown) {
374
+ return {
375
+ content: [{ type: "text", text: formatError(error, `previewing fixes for collection '${collectionId}'`) }],
376
+ isError: true,
377
+ };
378
+ }
379
+ }
380
+ );
381
+
195
382
  server.tool(
196
383
  "update_collection",
197
- "Update an existing collection by ID. Only include the fields you want to change.",
384
+ `Update a collection's schema using the safe upgrade endpoint. This handles both simple updates and complex migrations.
385
+
386
+ HOW IT WORKS:
387
+ 1. If your changes don't require migration (adding optional fields, metadata changes) → completes immediately
388
+ 2. If migration is required but you don't provide a migrationPlan → returns an error telling you what's needed
389
+ 3. If migration is required and you provide migrationPlan → starts the migration (may be async for large collections)
390
+
391
+ RECOMMENDED WORKFLOW:
392
+ 1. Call analyze_collection_update first to understand what changes require
393
+ 2. If needsMigration=false → call this tool without migrationPlan
394
+ 3. If needsMigration=true and canSkip=true → call this tool without migrationPlan to skip migration
395
+ 4. If needsMigration=true and canSkip=false → call this tool WITH migrationPlan
396
+ 5. If status='in_progress' → poll with get_collection_upgrade_progress
397
+
398
+ MIGRATION PLAN:
399
+ - mode 'auto': Uses system-generated fixes from analyze_collection_update (handles renames, type conversions, deletions automatically)
400
+ - mode 'manual': Provide your own fixes array for full control over how records are transformed
401
+
402
+ FIX FORMAT (for manual mode):
403
+ Each fix is an object with: { fieldName, fixType, value?, expression?, applyToAll?, filterConditions? }
404
+ - fixType: 'static_value' (set a fixed value), 'expression' (JS expression with 'record' variable), 'auto' (system handles it)
405
+ - For new required fields: provide a default value or expression
406
+ - For type changes: provide a conversion expression`,
198
407
  {
199
408
  collectionId: z.string().describe("The collection ID (UUID) to update"),
200
409
  name: z.string().optional().describe("Updated display name"),
@@ -202,7 +411,7 @@ export function registerCollectionTools(server: McpServer, sdk: CentraliSDK) {
202
411
  properties: z
203
412
  .array(z.record(z.string(), z.any()))
204
413
  .optional()
205
- .describe("Updated array of property definitions (replaces existing properties)"),
414
+ .describe("The COMPLETE new properties array. Must include ALL fields (existing + new). Include property 'id' for existing fields being kept/modified."),
206
415
  enableVersioning: z.boolean().optional().describe("Enable or disable record versioning"),
207
416
  tags: z.array(z.string()).optional().describe("Updated tags"),
208
417
  defaultTtlSeconds: z
@@ -210,30 +419,85 @@ export function registerCollectionTools(server: McpServer, sdk: CentraliSDK) {
210
419
  .nullable()
211
420
  .optional()
212
421
  .describe("Default TTL in seconds for new records. Set to null to clear."),
422
+ migrationPlan: z
423
+ .object({
424
+ mode: z.enum(["auto", "manual"]).describe("'auto' uses suggested fixes, 'manual' uses your custom fixes array"),
425
+ fixes: z
426
+ .array(z.record(z.string(), z.any()))
427
+ .optional()
428
+ .describe("Required for 'manual' mode. Array of fix objects specifying how to transform records."),
429
+ })
430
+ .optional()
431
+ .describe("Migration plan for breaking/critical changes. Omit to skip migration (only works if canSkip=true from analyze). Required when canSkip=false."),
213
432
  },
214
- async ({ collectionId, name, description, properties, enableVersioning, tags, defaultTtlSeconds }) => {
433
+ async ({ collectionId, name, description, properties, enableVersioning, tags, defaultTtlSeconds, migrationPlan }) => {
215
434
  try {
216
- const input: Record<string, any> = {};
217
- if (name !== undefined) input.name = name;
218
- if (description !== undefined) input.description = description;
219
- if (properties !== undefined) input.properties = properties;
220
- if (enableVersioning !== undefined) input.enableVersioning = enableVersioning;
221
- if (tags !== undefined) input.tags = tags;
222
- if (defaultTtlSeconds !== undefined) input.defaultTtlSeconds = defaultTtlSeconds;
223
- const result = await sdk.collections.update(collectionId, input as any);
435
+ const client = getCollectionsClient();
436
+ const body: Record<string, any> = {};
437
+ if (name !== undefined) body.name = name;
438
+ if (description !== undefined) body.description = description;
439
+ if (properties !== undefined) body.properties = properties;
440
+ if (enableVersioning !== undefined) body.enableVersioning = enableVersioning;
441
+ if (tags !== undefined) body.tags = tags;
442
+ if (defaultTtlSeconds !== undefined) body.defaultTtlSeconds = defaultTtlSeconds;
443
+ if (migrationPlan !== undefined) body.migrationPlan = migrationPlan;
444
+
445
+ const result = await client.post(`/${collectionId}/upgrade`, body);
446
+ const data = result.data;
447
+
448
+ if (data.status === 'completed') {
449
+ return {
450
+ content: [{
451
+ type: "text",
452
+ text: `Collection updated successfully.\n\n${JSON.stringify(data.structure, null, 2)}`,
453
+ }],
454
+ };
455
+ }
456
+
457
+ if (data.status === 'in_progress') {
458
+ return {
459
+ content: [{
460
+ type: "text",
461
+ text: [
462
+ `Migration started (async). Records are being updated in the background.`,
463
+ `- Job ID: ${data.jobId}`,
464
+ `- Progress URL: ${data.progressUrl}`,
465
+ ``,
466
+ `Call get_collection_upgrade_progress with collectionId='${collectionId}' and jobId='${data.jobId}' to check status.`,
467
+ ].join("\n"),
468
+ }],
469
+ };
470
+ }
471
+
224
472
  return {
225
- content: [
226
- { type: "text", text: JSON.stringify(result.data, null, 2) },
227
- ],
473
+ content: [{ type: "text", text: JSON.stringify(data, null, 2) }],
228
474
  };
229
475
  } catch (error: unknown) {
230
476
  return {
231
- content: [
232
- {
233
- type: "text",
234
- text: formatError(error, `updating collection '${collectionId}'`),
235
- },
236
- ],
477
+ content: [{ type: "text", text: formatError(error, `updating collection '${collectionId}'`) }],
478
+ isError: true,
479
+ };
480
+ }
481
+ }
482
+ );
483
+
484
+ server.tool(
485
+ "get_collection_upgrade_progress",
486
+ "Check the progress of an async collection migration/upgrade job. Call this after update_collection returns status='in_progress'.",
487
+ {
488
+ collectionId: z.string().describe("The collection ID (UUID)"),
489
+ jobId: z.string().describe("The job ID returned by update_collection"),
490
+ },
491
+ async ({ collectionId, jobId }) => {
492
+ try {
493
+ const client = getCollectionsClient();
494
+ const result = await client.get(`/${collectionId}/upgrade/${jobId}/progress`);
495
+ return {
496
+ content: [{ type: "text", text: JSON.stringify(result.data, null, 2) }],
497
+ };
498
+ } catch (error: unknown) {
499
+ return {
500
+ content: [{ type: "text", text: formatError(error, `getting upgrade progress for '${collectionId}'`) }],
237
501
  isError: true,
238
502
  };
239
503
  }