@commandable/mcp 0.10.0 → 0.12.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -4431,7 +4431,7 @@ function _expandFromEnv(value) {
4431
4431
  const _inlineRuntimeConfig = {
4432
4432
  "app": {
4433
4433
  "baseURL": "/",
4434
- "buildId": "0857a55b-f766-4fe6-86be-9bd9d857861a",
4434
+ "buildId": "886deef4-f3b5-464c-b4e2-11735eb5272e",
4435
4435
  "buildAssetsDir": "/_nuxt/",
4436
4436
  "cdnURL": ""
4437
4437
  },
@@ -5034,7 +5034,7 @@ const GENERATED_INTEGRATIONS = {
5034
5034
  }
5035
5035
  ]
5036
5036
  },
5037
- "prompt": null,
5037
+ "usageGuide": null,
5038
5038
  "variants": {
5039
5039
  "variants": {
5040
5040
  "personal_access_token": {
@@ -5480,7 +5480,7 @@ const GENERATED_INTEGRATIONS = {
5480
5480
  }
5481
5481
  ]
5482
5482
  },
5483
- "prompt": '# Confluence usage guide\n\n## Recommended workflow\n\n1. Use `list_spaces` (or `search_pages`) to discover where content lives.\n2. Use `search_pages` with CQL to find the right page ID(s).\n3. Use `read_page` to get the page content as Confluence storage format (XHTML).\n4. For edits, use `update_page` with storage XHTML in `bodyStorage` (it automatically handles version increments).\n\n## Content format\n\nAll content is exchanged in **Confluence storage format (XHTML)**. This applies to both reads (`contentStorage` field in `read_page`) and writes (`bodyStorage` field in `create_page`, `update_page`, `add_comment`).\n\nCommon markup:\n\n- Headings: `<h1>Title</h1>`, `<h2>Section</h2>`\n- Paragraphs: `<p>Text</p>`\n- Lists: `<ul><li>Item</li></ul>`, `<ol><li>Item</li></ol>`\n- Inline code: `<code>const x = 1</code>`\n- Code blocks: `<pre><code>...</code></pre>`\n- Links: `<a href="https://example.com">Example</a>`\n- Tables: `<table><tr><th>A</th></tr><tr><td>1</td></tr></table>`\n- Macros: `<ac:structured-macro ac:name="info"><ac:rich-text-body><p>Note</p></ac:rich-text-body></ac:structured-macro>`\n\nUsing XHTML natively means round-tripping pages preserves all formatting, macros, and Confluence-specific markup.\n\n`read_page` accepts `outputMarkdown: true` to return content as Markdown instead of XHTML. \n\n## CQL (Confluence Query Language) quick reference\n\nCommon patterns for `search_pages.cql`:\n\n- Restrict to a space:\n - `space = "ENG" AND type = page`\n- Title match:\n - `title ~ "runbook" AND type = page`\n- Full-text match:\n - `text ~ "oncall" AND type = page`\n- Label match:\n - `label = "runbook" AND type = page`\n- Combine filters:\n - `space = "ENG" AND type = page AND (title ~ "onboarding" OR text ~ "onboarding")`\n- Sort:\n - `... ORDER BY lastmodified DESC`\n\nTips:\n- Prefer small `limit` (e.g. 10) and paginate with `start`.\n- Use labels as a stable way to group pages for later discovery.\n\n## Page hierarchy\n\n- Spaces contain pages.\n- Pages can be nested under a parent page (`parentId`).\n- Use `get_page_children` to traverse a documentation tree (e.g. a handbook or runbook index).\n\n',
5483
+ "usageGuide": '# Confluence usage guide\n\n## Recommended workflow\n\n1. Use `list_spaces` (or `search_pages`) to discover where content lives.\n2. Use `search_pages` with CQL to find the right page ID(s).\n3. Use `read_page` to get the page content as Confluence storage format (XHTML).\n4. For edits, use `update_page` with storage XHTML in `bodyStorage` (it automatically handles version increments).\n\n## Content format\n\nAll content is exchanged in **Confluence storage format (XHTML)**. This applies to both reads (`contentStorage` field in `read_page`) and writes (`bodyStorage` field in `create_page`, `update_page`, `add_comment`).\n\nCommon markup:\n\n- Headings: `<h1>Title</h1>`, `<h2>Section</h2>`\n- Paragraphs: `<p>Text</p>`\n- Lists: `<ul><li>Item</li></ul>`, `<ol><li>Item</li></ol>`\n- Inline code: `<code>const x = 1</code>`\n- Code blocks: `<pre><code>...</code></pre>`\n- Links: `<a href="https://example.com">Example</a>`\n- Tables: `<table><tr><th>A</th></tr><tr><td>1</td></tr></table>`\n- Macros: `<ac:structured-macro ac:name="info"><ac:rich-text-body><p>Note</p></ac:rich-text-body></ac:structured-macro>`\n\nUsing XHTML natively means round-tripping pages preserves all formatting, macros, and Confluence-specific markup.\n\n`read_page` accepts `outputMarkdown: true` to return content as Markdown instead of XHTML. \n\n## CQL (Confluence Query Language) quick reference\n\nCommon patterns for `search_pages.cql`:\n\n- Restrict to a space:\n - `space = "ENG" AND type = page`\n- Title match:\n - `title ~ "runbook" AND type = page`\n- Full-text match:\n - `text ~ "oncall" AND type = page`\n- Label match:\n - `label = "runbook" AND type = page`\n- Combine filters:\n - `space = "ENG" AND type = page AND (title ~ "onboarding" OR text ~ "onboarding")`\n- Sort:\n - `... ORDER BY lastmodified DESC`\n\nTips:\n- Prefer small `limit` (e.g. 10) and paginate with `start`.\n- Use labels as a stable way to group pages for later discovery.\n\n## Page hierarchy\n\n- Spaces contain pages.\n- Pages can be nested under a parent page (`parentId`).\n- Use `get_page_children` to traverse a documentation tree (e.g. a handbook or runbook index).\n\n',
5484
5484
  "variants": {
5485
5485
  "variants": {
5486
5486
  "api_token": {
@@ -6381,7 +6381,7 @@ const GENERATED_INTEGRATIONS = {
6381
6381
  }
6382
6382
  ]
6383
6383
  },
6384
- "prompt": "# GitHub coding workflow\n\n## Branch-based workflow\n\nAlways work on a feature branch, never commit directly to main:\n\n1. `create_branch` from the default branch\n2. Make changes with `edit_file`, `edit_files`, `create_file`, or `delete_file` -- each call auto-commits to the branch\n3. `create_pull_request` when done\n4. `merge_pull_request` with `merge_method: \"squash\"` to collapse all commits into one clean commit on main\n\nMultiple small commits on a feature branch are fine -- they get squash-merged into a single commit.\n\n## Choosing the right write tool\n\n- **`edit_file`** -- Surgical edits to a single existing file. Use for most code changes. Each call is a commit.\n- **`edit_files`** -- Atomic multi-file changes (create + edit + delete in one commit). Use when files must change together to stay consistent (e.g. renaming across files, adding a module + updating imports).\n- **`create_file`** -- Create a new file or completely replace an existing file's content. Use for new files or full rewrites.\n- **`delete_file`** -- Remove a file.\n\n## Search/replace rules for edit_file and edit_files\n\nThe `old_text` field must be an **exact match** of the text currently in the file:\n\n- Whitespace matters: spaces, tabs, and indentation must match exactly\n- Line breaks matter: include the exact newline characters\n- Include enough surrounding context to uniquely identify the location\n- Each edit replaces the **first occurrence** only. To replace multiple occurrences, use separate edits.\n\n**Before editing**, call `get_file_contents` to see the file's current content. This avoids failed edits from stale or incorrect assumptions about file content.\n\n## Reading before writing\n\n- Use `get_repo_tree` to discover the project structure and file paths\n- Use `get_file_contents` to read a file before editing it\n- Use `search_code` to find where something is defined or used across the repo\n",
6384
+ "usageGuide": "# GitHub coding workflow\n\n## Branch-based workflow\n\nAlways work on a feature branch, never commit directly to main:\n\n1. `create_branch` from the default branch\n2. Make changes with `edit_file`, `edit_files`, `create_file`, or `delete_file` -- each call auto-commits to the branch\n3. `create_pull_request` when done\n4. `merge_pull_request` with `merge_method: \"squash\"` to collapse all commits into one clean commit on main\n\nMultiple small commits on a feature branch are fine -- they get squash-merged into a single commit.\n\n## Choosing the right write tool\n\n- **`edit_file`** -- Surgical edits to a single existing file. Use for most code changes. Each call is a commit.\n- **`edit_files`** -- Atomic multi-file changes (create + edit + delete in one commit). Use when files must change together to stay consistent (e.g. renaming across files, adding a module + updating imports).\n- **`create_file`** -- Create a new file or completely replace an existing file's content. Use for new files or full rewrites.\n- **`delete_file`** -- Remove a file.\n\n## Search/replace rules for edit_file and edit_files\n\nThe `old_text` field must be an **exact match** of the text currently in the file:\n\n- Whitespace matters: spaces, tabs, and indentation must match exactly\n- Line breaks matter: include the exact newline characters\n- Include enough surrounding context to uniquely identify the location\n- Each edit replaces the **first occurrence** only. To replace multiple occurrences, use separate edits.\n\n**Before editing**, call `get_file_contents` to see the file's current content. This avoids failed edits from stale or incorrect assumptions about file content.\n\n## Reading before writing\n\n- Use `get_repo_tree` to discover the project structure and file paths\n- Use `get_file_contents` to read a file before editing it\n- Use `search_code` to find where something is defined or used across the repo\n",
6385
6385
  "variants": {
6386
6386
  "variants": {
6387
6387
  "classic_pat": {
@@ -8436,7 +8436,7 @@ const GENERATED_INTEGRATIONS = {
8436
8436
  }
8437
8437
  ]
8438
8438
  },
8439
- "prompt": '## Calendar IDs\n\n- Use `calendarId=\'primary\'` for the authenticated user\'s main calendar\n- Use `list_calendars` to discover other calendar IDs (work, shared, subscribed calendars)\n- Calendar IDs typically look like email addresses (e.g. `user@example.com`) or opaque strings for subscribed calendars\n\n## Date and time format\n\nAll times must be in RFC3339 format:\n- Timed events: `\'2024-01-15T10:00:00-05:00\'` (with timezone offset) or `\'2024-01-15T15:00:00Z\'` (UTC)\n- All-day events use date-only format: `\'2024-01-15\'`\n\n## Creating events\n\nFor `create_event`, required fields are `calendarId`, `summary`, `start`, and `end`:\n\n**Timed event:**\n```json\n{\n "calendarId": "primary",\n "summary": "Team Meeting",\n "start": { "dateTime": "2024-01-15T10:00:00", "timeZone": "America/New_York" },\n "end": { "dateTime": "2024-01-15T11:00:00", "timeZone": "America/New_York" }\n}\n```\n\n**All-day event:**\n```json\n{\n "calendarId": "primary",\n "summary": "Company Holiday",\n "start": { "date": "2024-01-15" },\n "end": { "date": "2024-01-16" }\n}\n```\n\nNote: For all-day events, `end.date` should be the day *after* the last day (exclusive end).\n\n## Listing events in chronological order\n\nTo list upcoming events in start-time order (e.g. "what\'s on my calendar this week"):\n- Set `singleEvents=true` to expand recurring events into individual instances\n- Set `orderBy=\'startTime\'` (requires `singleEvents=true`)\n- Set `timeMin` to now (current ISO timestamp) and `timeMax` to the end of the desired range\n\n## Quick add\n\n`quick_add` parses natural language:\n- `"Meeting with Bob tomorrow at 3pm for 1 hour"`\n- `"Dentist appointment on Friday at 2pm"`\n- `"Weekly standup every Monday at 9am"`\n\n## Free/busy queries\n\nUse `freebusy_query` to check availability before scheduling:\n```json\n{\n "timeMin": "2024-01-15T00:00:00Z",\n "timeMax": "2024-01-15T23:59:59Z",\n "items": [{ "id": "primary" }, { "id": "colleague@example.com" }]\n}\n```\n\n## Updating events\n\n- Use `update_event` for a full replacement (all fields must be provided)\n- Use `patch_event` for partial updates (only provide the fields you want to change in `body`)\n- `patch_event` is preferred when modifying one or two fields to avoid accidentally clearing others\n',
8439
+ "usageGuide": '## Calendar IDs\n\n- Use `calendarId=\'primary\'` for the authenticated user\'s main calendar\n- Use `list_calendars` to discover other calendar IDs (work, shared, subscribed calendars)\n- Calendar IDs typically look like email addresses (e.g. `user@example.com`) or opaque strings for subscribed calendars\n\n## Date and time format\n\nAll times must be in RFC3339 format:\n- Timed events: `\'2024-01-15T10:00:00-05:00\'` (with timezone offset) or `\'2024-01-15T15:00:00Z\'` (UTC)\n- All-day events use date-only format: `\'2024-01-15\'`\n\n## Creating events\n\nFor `create_event`, required fields are `calendarId`, `summary`, `start`, and `end`:\n\n**Timed event:**\n```json\n{\n "calendarId": "primary",\n "summary": "Team Meeting",\n "start": { "dateTime": "2024-01-15T10:00:00", "timeZone": "America/New_York" },\n "end": { "dateTime": "2024-01-15T11:00:00", "timeZone": "America/New_York" }\n}\n```\n\n**All-day event:**\n```json\n{\n "calendarId": "primary",\n "summary": "Company Holiday",\n "start": { "date": "2024-01-15" },\n "end": { "date": "2024-01-16" }\n}\n```\n\nNote: For all-day events, `end.date` should be the day *after* the last day (exclusive end).\n\n## Listing events in chronological order\n\nTo list upcoming events in start-time order (e.g. "what\'s on my calendar this week"):\n- Set `singleEvents=true` to expand recurring events into individual instances\n- Set `orderBy=\'startTime\'` (requires `singleEvents=true`)\n- Set `timeMin` to now (current ISO timestamp) and `timeMax` to the end of the desired range\n\n## Quick add\n\n`quick_add` parses natural language:\n- `"Meeting with Bob tomorrow at 3pm for 1 hour"`\n- `"Dentist appointment on Friday at 2pm"`\n- `"Weekly standup every Monday at 9am"`\n\n## Free/busy queries\n\nUse `freebusy_query` to check availability before scheduling:\n```json\n{\n "timeMin": "2024-01-15T00:00:00Z",\n "timeMax": "2024-01-15T23:59:59Z",\n "items": [{ "id": "primary" }, { "id": "colleague@example.com" }]\n}\n```\n\n## Updating events\n\n- Use `update_event` for a full replacement (all fields must be provided)\n- Use `patch_event` for partial updates (only provide the fields you want to change in `body`)\n- `patch_event` is preferred when modifying one or two fields to avoid accidentally clearing others\n',
8440
8440
  "variants": {
8441
8441
  "variants": {
8442
8442
  "service_account": {
@@ -9242,7 +9242,7 @@ const GENERATED_INTEGRATIONS = {
9242
9242
  }
9243
9243
  ]
9244
9244
  },
9245
- "prompt": "## Gmail search query syntax\n\nGmail's `q` parameter supports a powerful search language. Key operators:\n\n- `is:unread` / `is:read` \u2014 filter by read status\n- `is:starred`, `is:important` \u2014 filter by markers\n- `from:user@example.com` \u2014 sender filter\n- `to:user@example.com`, `cc:user@example.com` \u2014 recipient filters\n- `subject:keyword` \u2014 subject line search\n- `has:attachment` \u2014 messages with attachments\n- `filename:report.pdf` \u2014 specific attachment filename\n- `label:INBOX` \u2014 filter by label (use label name or ID)\n- `after:2024/01/01`, `before:2024/12/31` \u2014 date range (YYYY/MM/DD)\n- `newer_than:7d`, `older_than:1y` \u2014 relative time (d=days, m=months, y=years)\n- `in:sent`, `in:drafts`, `in:trash`, `in:spam` \u2014 folder filters\n- `larger:5M`, `smaller:1M` \u2014 size filters\n\nCombine operators with spaces (implicit AND): `from:alice is:unread has:attachment`\n\n## Recommended workflows\n\n**Reading emails:**\n1. Use `list_messages` with a `q` query to find relevant message IDs\n2. Use `read_email` on each ID to get decoded subject, from, to, date, and body text\n3. For raw access or advanced format options, use `get_message` with `format='full'`\n\n**Searching for threads:**\n1. Use `list_threads` with `q` to find conversation threads\n2. Use `get_thread` to retrieve all messages in a conversation at once\n\n**Sending email:**\n- Use `send_email` for the vast majority of cases -- it accepts plain `to`, `subject`, `body` fields\n- Use `create_draft_email` + `send_draft` when you want to create a draft for review before sending\n\n**Replying to an email:**\n1. Get the original message with `read_email` to obtain its `threadId` and `id`\n2. Call `send_email` with `replyToMessageId` = original message `id` and `threadId` = original `threadId`\n3. The reply will appear in the same conversation thread\n\n## Label IDs\n\nSystem label IDs (always uppercase): `INBOX`, `UNREAD`, `STARRED`, `IMPORTANT`, `SENT`, `DRAFT`, `SPAM`, `TRASH`, `CATEGORY_PERSONAL`, `CATEGORY_SOCIAL`, `CATEGORY_PROMOTIONS`, `CATEGORY_UPDATES`, `CATEGORY_FORUMS`\n\nUser-created labels have auto-generated IDs. Use `list_labels` to discover them.\n\n## Archiving and organizing\n\n- Archive a message: `modify_message` with `removeLabelIds=['INBOX']`\n- Mark as read: `modify_message` with `removeLabelIds=['UNREAD']`\n- Star a message: `modify_message` with `addLabelIds=['STARRED']`\n- Apply a label: `modify_message` with `addLabelIds=['<labelId>']`\n- Use `modify_thread` to apply the same operation to all messages in a thread at once\n",
9245
+ "usageGuide": "## Gmail search query syntax\n\nGmail's `q` parameter supports a powerful search language. Key operators:\n\n- `is:unread` / `is:read` \u2014 filter by read status\n- `is:starred`, `is:important` \u2014 filter by markers\n- `from:user@example.com` \u2014 sender filter\n- `to:user@example.com`, `cc:user@example.com` \u2014 recipient filters\n- `subject:keyword` \u2014 subject line search\n- `has:attachment` \u2014 messages with attachments\n- `filename:report.pdf` \u2014 specific attachment filename\n- `label:INBOX` \u2014 filter by label (use label name or ID)\n- `after:2024/01/01`, `before:2024/12/31` \u2014 date range (YYYY/MM/DD)\n- `newer_than:7d`, `older_than:1y` \u2014 relative time (d=days, m=months, y=years)\n- `in:sent`, `in:drafts`, `in:trash`, `in:spam` \u2014 folder filters\n- `larger:5M`, `smaller:1M` \u2014 size filters\n\nCombine operators with spaces (implicit AND): `from:alice is:unread has:attachment`\n\n## Recommended workflows\n\n**Reading emails:**\n1. Use `list_messages` with a `q` query to find relevant message IDs\n2. Use `read_email` on each ID to get decoded subject, from, to, date, and body text\n3. For raw access or advanced format options, use `get_message` with `format='full'`\n\n**Searching for threads:**\n1. Use `list_threads` with `q` to find conversation threads\n2. Use `get_thread` to retrieve all messages in a conversation at once\n\n**Sending email:**\n- Use `send_email` for the vast majority of cases -- it accepts plain `to`, `subject`, `body` fields\n- Use `create_draft_email` + `send_draft` when you want to create a draft for review before sending\n\n**Replying to an email:**\n1. Get the original message with `read_email` to obtain its `threadId` and `id`\n2. Call `send_email` with `replyToMessageId` = original message `id` and `threadId` = original `threadId`\n3. The reply will appear in the same conversation thread\n\n## Label IDs\n\nSystem label IDs (always uppercase): `INBOX`, `UNREAD`, `STARRED`, `IMPORTANT`, `SENT`, `DRAFT`, `SPAM`, `TRASH`, `CATEGORY_PERSONAL`, `CATEGORY_SOCIAL`, `CATEGORY_PROMOTIONS`, `CATEGORY_UPDATES`, `CATEGORY_FORUMS`\n\nUser-created labels have auto-generated IDs. Use `list_labels` to discover them.\n\n## Archiving and organizing\n\n- Archive a message: `modify_message` with `removeLabelIds=['INBOX']`\n- Mark as read: `modify_message` with `removeLabelIds=['UNREAD']`\n- Star a message: `modify_message` with `addLabelIds=['STARRED']`\n- Apply a label: `modify_message` with `addLabelIds=['<labelId>']`\n- Use `modify_thread` to apply the same operation to all messages in a thread at once\n",
9246
9246
  "variants": {
9247
9247
  "variants": {
9248
9248
  "service_account": {
@@ -10627,7 +10627,7 @@ const GENERATED_INTEGRATIONS = {
10627
10627
  }
10628
10628
  ]
10629
10629
  },
10630
- "prompt": null,
10630
+ "usageGuide": null,
10631
10631
  "variants": {
10632
10632
  "variants": {
10633
10633
  "service_account": {
@@ -10827,11 +10827,17 @@ const GENERATED_INTEGRATIONS = {
10827
10827
  "mimeType": {
10828
10828
  "type": "string",
10829
10829
  "description": "Optional Drive MIME type from get_file_meta or search_files."
10830
+ },
10831
+ "previewPages": {
10832
+ "type": "integer",
10833
+ "minimum": 1,
10834
+ "maximum": 10,
10835
+ "description": "Number of pages to render as images and return alongside the text (PDF only). Omit or set to 0 to skip. Useful for visually checking signatures, logos, or layout."
10830
10836
  }
10831
10837
  },
10832
10838
  "additionalProperties": false
10833
10839
  },
10834
- "handlerCode": "async (input) => {\n const googleNativeExports = {\n 'application/vnd.google-apps.document': 'text/markdown',\n 'application/vnd.google-apps.spreadsheet': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',\n 'application/vnd.google-apps.presentation': 'application/vnd.openxmlformats-officedocument.presentationml.presentation',\n 'application/vnd.google-apps.drawing': 'image/svg+xml',\n 'application/vnd.google-apps.script': 'application/vnd.google-apps.script+json',\n }\n const isTextLikeMimeType = (value) => {\n const mimeType = String(value || '').split(';', 1)[0].trim().toLowerCase()\n return mimeType.startsWith('text/')\n || mimeType.includes('json')\n || mimeType.includes('csv')\n || mimeType === 'application/xml'\n || mimeType === 'text/xml'\n || mimeType.endsWith('+xml')\n || mimeType.includes('javascript')\n || mimeType.includes('svg')\n }\n const resolveMimeType = async () => {\n if (typeof input.mimeType === 'string' && input.mimeType.trim())\n return input.mimeType.trim()\n\n const metaRes = await integration.fetch(`/files/${fileId}?fields=id,name,mimeType`)\n const meta = await metaRes.json()\n return meta?.mimeType || ''\n }\n const readTextContent = async (source) => {\n const res = await integration.fetch(source)\n const contentMimeType = res.headers?.get?.('content-type') || ''\n const content = await res.text()\n return { contentMimeType, content }\n }\n\n const fileId = encodeURIComponent(input.fileId)\n const mimeType = await resolveMimeType()\n\n if (!mimeType) {\n return {\n fileId: input.fileId,\n mimeType: null,\n content: null,\n message: 'Could not determine the Drive file MIME type.',\n }\n }\n\n if (mimeType === 'application/vnd.google-apps.folder') {\n return {\n fileId: input.fileId,\n mimeType,\n content: null,\n message: 'Folders do not have readable file content.',\n }\n }\n\n const isGoogleNative = mimeType.startsWith('application/vnd.google-apps.')\n const exportMimeType = isGoogleNative\n ? (typeof input.exportMimeType === 'string' && input.exportMimeType.trim())\n ? input.exportMimeType.trim()\n : googleNativeExports[mimeType] || null\n : null\n\n if (isGoogleNative && !exportMimeType) {\n return {\n fileId: input.fileId,\n mimeType,\n content: null,\n message: 'This Google-native file type does not have a configured export path for read_file_content.',\n }\n }\n\n const source = isGoogleNative\n ? `/files/${fileId}/export?mimeType=${encodeURIComponent(exportMimeType)}`\n : `/files/${fileId}?alt=media`\n\n if (isTextLikeMimeType(exportMimeType || mimeType)) {\n const textResult = await readTextContent(source)\n return {\n fileId: input.fileId,\n mimeType,\n contentMimeType: textResult.contentMimeType || exportMimeType || mimeType,\n content: textResult.content,\n }\n }\n\n const extracted = await utils.extractFileContent({\n auth: true,\n source,\n })\n\n return {\n fileId: input.fileId,\n mimeType,\n contentMimeType: exportMimeType || mimeType,\n ...extracted,\n }\n}",
10840
+ "handlerCode": "async (input) => {\n const googleNativeExports = {\n 'application/vnd.google-apps.document': 'text/markdown',\n 'application/vnd.google-apps.spreadsheet': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',\n 'application/vnd.google-apps.presentation': 'application/vnd.openxmlformats-officedocument.presentationml.presentation',\n 'application/vnd.google-apps.drawing': 'image/svg+xml',\n 'application/vnd.google-apps.script': 'application/vnd.google-apps.script+json',\n }\n const isTextLikeMimeType = (value) => {\n const mimeType = String(value || '').split(';', 1)[0].trim().toLowerCase()\n return mimeType.startsWith('text/')\n || mimeType.includes('json')\n || mimeType.includes('csv')\n || mimeType === 'application/xml'\n || mimeType === 'text/xml'\n || mimeType.endsWith('+xml')\n || mimeType.includes('javascript')\n || mimeType.includes('svg')\n }\n const resolveMimeType = async () => {\n if (typeof input.mimeType === 'string' && input.mimeType.trim())\n return input.mimeType.trim()\n\n const metaRes = await integration.fetch(`/files/${fileId}?fields=id,name,mimeType`)\n const meta = await metaRes.json()\n return meta?.mimeType || ''\n }\n const readTextContent = async (source) => {\n const res = await integration.fetch(source)\n const contentMimeType = res.headers?.get?.('content-type') || ''\n const content = await res.text()\n return { contentMimeType, content }\n }\n\n const fileId = encodeURIComponent(input.fileId)\n const mimeType = await resolveMimeType()\n\n if (!mimeType) {\n return {\n fileId: input.fileId,\n mimeType: null,\n content: null,\n message: 'Could not determine the Drive file MIME type.',\n }\n }\n\n if (mimeType === 'application/vnd.google-apps.folder') {\n return {\n fileId: input.fileId,\n mimeType,\n content: null,\n message: 'Folders do not have readable file content.',\n }\n }\n\n const isGoogleNative = mimeType.startsWith('application/vnd.google-apps.')\n const exportMimeType = isGoogleNative\n ? (typeof input.exportMimeType === 'string' && input.exportMimeType.trim())\n ? input.exportMimeType.trim()\n : googleNativeExports[mimeType] || null\n : null\n\n if (isGoogleNative && !exportMimeType) {\n return {\n fileId: input.fileId,\n mimeType,\n content: null,\n message: 'This Google-native file type does not have a configured export path for read_file_content.',\n }\n }\n\n const source = isGoogleNative\n ? `/files/${fileId}/export?mimeType=${encodeURIComponent(exportMimeType)}`\n : `/files/${fileId}?alt=media`\n\n if (isTextLikeMimeType(exportMimeType || mimeType)) {\n const textResult = await readTextContent(source)\n return {\n fileId: input.fileId,\n mimeType,\n contentMimeType: textResult.contentMimeType || exportMimeType || mimeType,\n content: textResult.content,\n }\n }\n\n const extracted = await utils.extractFileContent({\n auth: true,\n source,\n previewPages: input.previewPages || 0,\n })\n\n return {\n fileId: input.fileId,\n mimeType,\n contentMimeType: exportMimeType || mimeType,\n ...extracted,\n }\n}",
10835
10841
  "scope": "read",
10836
10842
  "toolset": "drive"
10837
10843
  },
@@ -12523,7 +12529,7 @@ const GENERATED_INTEGRATIONS = {
12523
12529
  }
12524
12530
  ]
12525
12531
  },
12526
- "prompt": '## HubSpot guidance\n\nThis integration uses HubSpot CRM v3 object endpoints and CRM v4 association endpoints.\n\n- Prefer `search_*` tools for discovery (they support free-text `query` and structured `filters`).\n- Use `get_*` tools when you already have an object ID and want full details / associations.\n\n### Search filters\n\nAll `search_*` tools accept:\n\n- `query`: free-text search (optional)\n- `filters`: property-based filtering. Each filter is `{ propertyName, operator, value? }`.\n\nCommon operators:\n\n- `EQ`, `NEQ`\n- `LT`, `LTE`, `GT`, `GTE` (numbers or millisecond timestamps)\n- `CONTAINS_TOKEN` (tokenized contains)\n- `HAS_PROPERTY`, `NOT_HAS_PROPERTY` (value ignored)\n- `BETWEEN` (pass `value` as a string `"low,high"`; timestamps in ms recommended)\n\n### Common property names (quick reference)\n\nContacts:\n- `firstname`, `lastname`, `email`\n\nCompanies:\n- `name`, `domain`\n\nDeals:\n- `dealname`, `amount`, `pipeline`, `dealstage`, `closedate`\n\nTickets:\n- `subject`, `content`, `hs_pipeline`, `hs_pipeline_stage`\n\n### Pipelines and stages (deals/tickets)\n\nPipelines and stages are stored as IDs (not human-friendly names). Recommended workflow:\n\n1. Call `list_pipelines` with `objectType: "deals"` or `objectType: "tickets"`.\n2. Pick a pipeline ID and stage ID from the response.\n3. Use those IDs when calling `create_deal` / `update_deal` (via `pipeline` / `dealstage`) or `create_ticket` / `update_ticket` (via `hs_pipeline` / `hs_pipeline_stage`).\n\n### Associations\n\n- Use `get_associations` to list linked records (returns associated IDs).\n- Use `create_association` to link two records (default/unlabeled association).\n- Use `remove_association` to unlink records.\n\n### Engagement objects\n\nNotes:\n- Content: `hs_note_body`\n- Timestamp: `hs_timestamp` (milliseconds)\n\nTasks:\n- Subject: `hs_task_subject`\n- Body: `hs_task_body`\n- Status: `hs_task_status` (`NOT_STARTED` or `COMPLETED`)\n- Priority: `hs_task_priority` (`LOW`, `MEDIUM`, `HIGH`)\n- Due timestamp: `hs_timestamp` (milliseconds)\n\nThe `create_note` and `create_task` tools can also associate the engagement to CRM records in the same call.\n\n### Pagination\n\nHubSpot uses cursor-based pagination. When a response includes `paging.next.after`, pass that value back as `after` in your next call.\n\n',
12532
+ "usageGuide": '## HubSpot guidance\n\nThis integration uses HubSpot CRM v3 object endpoints and CRM v4 association endpoints.\n\n- Prefer `search_*` tools for discovery (they support free-text `query` and structured `filters`).\n- Use `get_*` tools when you already have an object ID and want full details / associations.\n\n### Search filters\n\nAll `search_*` tools accept:\n\n- `query`: free-text search (optional)\n- `filters`: property-based filtering. Each filter is `{ propertyName, operator, value? }`.\n\nCommon operators:\n\n- `EQ`, `NEQ`\n- `LT`, `LTE`, `GT`, `GTE` (numbers or millisecond timestamps)\n- `CONTAINS_TOKEN` (tokenized contains)\n- `HAS_PROPERTY`, `NOT_HAS_PROPERTY` (value ignored)\n- `BETWEEN` (pass `value` as a string `"low,high"`; timestamps in ms recommended)\n\n### Common property names (quick reference)\n\nContacts:\n- `firstname`, `lastname`, `email`\n\nCompanies:\n- `name`, `domain`\n\nDeals:\n- `dealname`, `amount`, `pipeline`, `dealstage`, `closedate`\n\nTickets:\n- `subject`, `content`, `hs_pipeline`, `hs_pipeline_stage`\n\n### Pipelines and stages (deals/tickets)\n\nPipelines and stages are stored as IDs (not human-friendly names). Recommended workflow:\n\n1. Call `list_pipelines` with `objectType: "deals"` or `objectType: "tickets"`.\n2. Pick a pipeline ID and stage ID from the response.\n3. Use those IDs when calling `create_deal` / `update_deal` (via `pipeline` / `dealstage`) or `create_ticket` / `update_ticket` (via `hs_pipeline` / `hs_pipeline_stage`).\n\n### Associations\n\n- Use `get_associations` to list linked records (returns associated IDs).\n- Use `create_association` to link two records (default/unlabeled association).\n- Use `remove_association` to unlink records.\n\n### Engagement objects\n\nNotes:\n- Content: `hs_note_body`\n- Timestamp: `hs_timestamp` (milliseconds)\n\nTasks:\n- Subject: `hs_task_subject`\n- Body: `hs_task_body`\n- Status: `hs_task_status` (`NOT_STARTED` or `COMPLETED`)\n- Priority: `hs_task_priority` (`LOW`, `MEDIUM`, `HIGH`)\n- Due timestamp: `hs_timestamp` (milliseconds)\n\nThe `create_note` and `create_task` tools can also associate the engagement to CRM records in the same call.\n\n### Pagination\n\nHubSpot uses cursor-based pagination. When a response includes `paging.next.after`, pass that value back as `after` in your next call.\n\n',
12527
12533
  "variants": {
12528
12534
  "variants": {
12529
12535
  "private_app_token": {
@@ -14100,7 +14106,7 @@ const GENERATED_INTEGRATIONS = {
14100
14106
  }
14101
14107
  ]
14102
14108
  },
14103
- "prompt": "# Jira usage guide\n\n## Core workflow patterns\n\n### Discover projects and issue types (before creating issues)\n\n1. Call `list_projects` to find the project key (e.g. `PROJ`).\n2. Call `get_project` with `projectIdOrKey=PROJ` to see available `issueTypes`.\n3. Use the returned issue type name with `create_issue.issueTypeName`.\n\n### Search issues (JQL)\n\nUse `search_issues` with JQL. Common examples:\n\n- My open issues:\n - `assignee = currentUser() AND statusCategory != Done ORDER BY updated DESC`\n- Recently updated issues in a project:\n - `project = PROJ ORDER BY updated DESC`\n- Unassigned bugs:\n - `project = PROJ AND issuetype = Bug AND assignee is EMPTY ORDER BY created DESC`\n- Blocked issues (label-based):\n - `project = PROJ AND labels = blocked ORDER BY priority DESC, updated DESC`\n\nPagination:\n- `search_issues` uses `nextPageToken`. If `nextPageToken` is returned and `isLast=false`, pass it back to get the next page.\n\n### Read issue content\n\n- Use `get_issue` to read a compact issue summary.\n- `get_issue` converts Jira's ADF description into `descriptionMarkdown` when possible (fallback: `descriptionText`).\n- Use `get_issue_comments` to read the comment thread (comment bodies are converted to Markdown).\n\n### Transition an issue (change status)\n\nJira workflows are project-specific, so you must discover valid transitions:\n\n1. Call `get_transitions` to see available transition names/IDs for the issue.\n2. Call `transition_issue` using either `transitionId` (preferred) or `transitionName`.\n\n### Assigning issues\n\n1. Call `search_users` to find the user's `accountId`.\n2. Assign:\n - `assign_issue { issueIdOrKey, accountId }`\n3. Unassign:\n - `assign_issue { issueIdOrKey, accountId: null }`\n\n## Notes on Jira rich text (ADF)\n\nJira Cloud REST API v3 uses **Atlassian Document Format (ADF)** for fields like `description` and comment bodies.\n\n- Read tools convert ADF to Markdown so you can read it directly.\n- Write tools accept **Markdown** in their `*Text` fields and convert it to ADF (`descriptionText`, `bodyText`, `commentText`).\n\n### Round-trip Markdown workflow (recommended)\n\nYou can safely do a Markdown round-trip:\n\n1. `get_issue` -> edit `descriptionMarkdown`\n2. `update_issue { descriptionText: <your edited Markdown> }`\n\nSupported Markdown features when writing:\n\n- Headings (`#`, `##`, ...)\n- Bold/italic/strikethrough (`**bold**`, `*italic*`, `~~strike~~`)\n- Inline code and fenced code blocks (```), including optional language fences\n- Lists (ordered/unordered), nested lists\n- Blockquotes (`>`)\n- Horizontal rules (`---`)\n- Tables\n- Links (`[text](url)`)\n\n## Boards & sprints\n\nIf you\u2019re using Jira Software:\n\n1. Call `list_boards` (optionally filter by `projectKeyOrId`).\n2. Call `list_sprints` for a board to find active/future sprints.\n3. Use `move_issues_to_sprint` to pull work into a sprint (sprint planning).\n\n",
14109
+ "usageGuide": "# Jira usage guide\n\n## Core workflow patterns\n\n### Discover projects and issue types (before creating issues)\n\n1. Call `list_projects` to find the project key (e.g. `PROJ`).\n2. Call `get_project` with `projectIdOrKey=PROJ` to see available `issueTypes`.\n3. Use the returned issue type name with `create_issue.issueTypeName`.\n\n### Search issues (JQL)\n\nUse `search_issues` with JQL. Common examples:\n\n- My open issues:\n - `assignee = currentUser() AND statusCategory != Done ORDER BY updated DESC`\n- Recently updated issues in a project:\n - `project = PROJ ORDER BY updated DESC`\n- Unassigned bugs:\n - `project = PROJ AND issuetype = Bug AND assignee is EMPTY ORDER BY created DESC`\n- Blocked issues (label-based):\n - `project = PROJ AND labels = blocked ORDER BY priority DESC, updated DESC`\n\nPagination:\n- `search_issues` uses `nextPageToken`. If `nextPageToken` is returned and `isLast=false`, pass it back to get the next page.\n\n### Read issue content\n\n- Use `get_issue` to read a compact issue summary.\n- `get_issue` converts Jira's ADF description into `descriptionMarkdown` when possible (fallback: `descriptionText`).\n- Use `get_issue_comments` to read the comment thread (comment bodies are converted to Markdown).\n\n### Transition an issue (change status)\n\nJira workflows are project-specific, so you must discover valid transitions:\n\n1. Call `get_transitions` to see available transition names/IDs for the issue.\n2. Call `transition_issue` using either `transitionId` (preferred) or `transitionName`.\n\n### Assigning issues\n\n1. Call `search_users` to find the user's `accountId`.\n2. Assign:\n - `assign_issue { issueIdOrKey, accountId }`\n3. Unassign:\n - `assign_issue { issueIdOrKey, accountId: null }`\n\n## Notes on Jira rich text (ADF)\n\nJira Cloud REST API v3 uses **Atlassian Document Format (ADF)** for fields like `description` and comment bodies.\n\n- Read tools convert ADF to Markdown so you can read it directly.\n- Write tools accept **Markdown** in their `*Text` fields and convert it to ADF (`descriptionText`, `bodyText`, `commentText`).\n\n### Round-trip Markdown workflow (recommended)\n\nYou can safely do a Markdown round-trip:\n\n1. `get_issue` -> edit `descriptionMarkdown`\n2. `update_issue { descriptionText: <your edited Markdown> }`\n\nSupported Markdown features when writing:\n\n- Headings (`#`, `##`, ...)\n- Bold/italic/strikethrough (`**bold**`, `*italic*`, `~~strike~~`)\n- Inline code and fenced code blocks (```), including optional language fences\n- Lists (ordered/unordered), nested lists\n- Blockquotes (`>`)\n- Horizontal rules (`---`)\n- Tables\n- Links (`[text](url)`)\n\n## Boards & sprints\n\nIf you\u2019re using Jira Software:\n\n1. Call `list_boards` (optionally filter by `projectKeyOrId`).\n2. Call `list_sprints` for a board to find active/future sprints.\n3. Use `move_issues_to_sprint` to pull work into a sprint (sprint planning).\n\n",
14104
14110
  "variants": {
14105
14111
  "variants": {
14106
14112
  "api_token": {
@@ -15188,7 +15194,7 @@ const GENERATED_INTEGRATIONS = {
15188
15194
  }
15189
15195
  ]
15190
15196
  },
15191
- "prompt": '## Appending paragraph blocks\n\nWhen appending a paragraph block to a Notion page, ensure the `rich_text` field is correctly defined within the `paragraph` type. Example format:\n\n```json\n{\n "block_id": "<page_id>",\n "children": [\n {\n "object": "block",\n "type": "paragraph",\n "paragraph": {\n "rich_text": [\n {\n "type": "text",\n "text": {\n "content": "Your text here"\n }\n }\n ]\n }\n }\n ]\n}\n```\n\n',
15197
+ "usageGuide": '## Appending paragraph blocks\n\nWhen appending a paragraph block to a Notion page, ensure the `rich_text` field is correctly defined within the `paragraph` type. Example format:\n\n```json\n{\n "block_id": "<page_id>",\n "children": [\n {\n "object": "block",\n "type": "paragraph",\n "paragraph": {\n "rich_text": [\n {\n "type": "text",\n "text": {\n "content": "Your text here"\n }\n }\n ]\n }\n }\n ]\n}\n```\n\n',
15192
15198
  "variants": {
15193
15199
  "variants": {
15194
15200
  "internal_integration": {
@@ -15829,20 +15835,947 @@ const GENERATED_INTEGRATIONS = {
15829
15835
  "null"
15830
15836
  ]
15831
15837
  },
15832
- "archived": {
15833
- "type": [
15834
- "boolean",
15835
- "null"
15836
- ]
15838
+ "archived": {
15839
+ "type": [
15840
+ "boolean",
15841
+ "null"
15842
+ ]
15843
+ }
15844
+ }
15845
+ },
15846
+ "handlerCode": "async (input) => {\n const body = {\n title: input.title || undefined,\n description: input.description || undefined,\n properties: input.properties || undefined,\n archived: input.archived === undefined ? undefined : input.archived,\n }\n const res = await integration.fetch(`/databases/${encodeURIComponent(input.database_id)}`, { method: 'PATCH', body })\n return await res.json()\n}",
15847
+ "scope": "write",
15848
+ "toolset": "databases"
15849
+ }
15850
+ ],
15851
+ "variantOwnerType": null
15852
+ },
15853
+ "sharepoint": {
15854
+ "manifest": {
15855
+ "name": "sharepoint",
15856
+ "version": "0.1.0",
15857
+ "baseUrl": "https://graph.microsoft.com/v1.0",
15858
+ "tools": [
15859
+ {
15860
+ "name": "search_sites",
15861
+ "description": "Search SharePoint sites by keyword across the tenant. Returns compact site summaries with IDs and web URLs. Use this when you know the site name or topic but not the site ID. If you already know the hostname and path, use get_site_by_path instead.",
15862
+ "inputSchema": "schemas/search_sites.json",
15863
+ "handler": "handlers/search_sites.js",
15864
+ "scope": "read"
15865
+ },
15866
+ {
15867
+ "name": "get_site_by_path",
15868
+ "description": "Resolve a SharePoint site from its hostname and server-relative path, such as hostname='contoso.sharepoint.com' and relativePath='//Marketing'. Use this when you know the SharePoint URL structure and need the stable site ID for later calls.",
15869
+ "inputSchema": "schemas/get_site_by_path.json",
15870
+ "handler": "handlers/get_site_by_path.js",
15871
+ "scope": "read"
15872
+ },
15873
+ {
15874
+ "name": "get_site",
15875
+ "description": "Get metadata for a SharePoint site by site ID. Returns the site name, description, web URL, and timestamps. Use search_sites or get_site_by_path first if you do not already know the site ID.",
15876
+ "inputSchema": "schemas/get_site.json",
15877
+ "handler": "handlers/get_site.js",
15878
+ "scope": "read"
15879
+ },
15880
+ {
15881
+ "name": "list_site_drives",
15882
+ "description": "List document libraries (drives) for a SharePoint site. Returns compact drive summaries including IDs, names, web URLs, and drive type. Use this after resolving a site to discover the right document library before browsing folders or reading files.",
15883
+ "inputSchema": "schemas/list_site_drives.json",
15884
+ "handler": "handlers/list_site_drives.js",
15885
+ "scope": "read"
15886
+ },
15887
+ {
15888
+ "name": "list_drive_children",
15889
+ "description": "List the files and folders inside a SharePoint document library folder. By default this lists the root of the drive. Provide itemId to browse a specific folder. Returns compact entries with file-or-folder flags, MIME type when available, and parent references. Use get_drive_item_meta when you need one specific item.",
15890
+ "inputSchema": "schemas/list_drive_children.json",
15891
+ "handler": "handlers/list_drive_children.js",
15892
+ "scope": "read"
15893
+ },
15894
+ {
15895
+ "name": "get_drive_item_meta",
15896
+ "description": "Get metadata for a single SharePoint file or folder by drive ID and item ID. Returns a compact item summary including IDs, name, type flags, web URL, size, timestamps, and parent reference. Use read_file_content to read the actual file contents.",
15897
+ "inputSchema": "schemas/get_drive_item.json",
15898
+ "handler": "handlers/get_drive_item.js",
15899
+ "scope": "read"
15900
+ },
15901
+ {
15902
+ "name": "search_files",
15903
+ "description": "Search SharePoint and OneDrive content through Microsoft Graph search and return flattened file hits. Provide a query string; Graph KQL syntax is supported. Optional siteId and driveId filters narrow the flattened results after search. Use this for broad file discovery when folder-by-folder browsing is too narrow.",
15904
+ "inputSchema": "schemas/search_files.json",
15905
+ "handler": "handlers/search_files.js",
15906
+ "scope": "read"
15907
+ },
15908
+ {
15909
+ "name": "read_file_content",
15910
+ "description": "Read a SharePoint file into agent-friendly text using the shared file extraction pipeline. This is the standard way to consume document contents such as PDF, DOCX, XLSX, PPTX, and text-like files stored in SharePoint document libraries. Provide driveId and itemId. Folders are rejected; use list_drive_children to browse them.",
15911
+ "inputSchema": "schemas/read_file_content.json",
15912
+ "handler": "handlers/read_file_content.js",
15913
+ "scope": "read"
15914
+ },
15915
+ {
15916
+ "name": "create_folder",
15917
+ "description": "Create a new folder in a SharePoint document library. By default the folder is created in the drive root. Provide parentItemId to create it inside an existing folder. Returns the created folder metadata including its item ID for later browsing or moves.",
15918
+ "inputSchema": "schemas/create_folder.json",
15919
+ "handler": "handlers/create_folder.js",
15920
+ "scope": "write"
15921
+ },
15922
+ {
15923
+ "name": "move_drive_item",
15924
+ "description": "Move a SharePoint file or folder to a different parent folder in the same drive. Provide destinationParentId and optionally a newName to rename during the move. Use get_drive_item_meta or list_drive_children first to discover the current item and destination IDs.",
15925
+ "inputSchema": "schemas/move_drive_item.json",
15926
+ "handler": "handlers/move_drive_item.js",
15927
+ "scope": "write"
15928
+ },
15929
+ {
15930
+ "name": "delete_drive_item",
15931
+ "description": "Delete a SharePoint file or folder by drive ID and item ID. This is a destructive operation. Use get_drive_item_meta or list_drive_children first to confirm you have the correct item before deleting it.",
15932
+ "inputSchema": "schemas/delete_drive_item.json",
15933
+ "handler": "handlers/delete_drive_item.js",
15934
+ "scope": "write"
15935
+ }
15936
+ ]
15937
+ },
15938
+ "usageGuide": "Use this integration for SharePoint document libraries and files.\n\nRecommended workflow:\n\n1. If you know the SharePoint hostname and path, start with `get_site_by_path`.\n2. Otherwise use `search_sites` to discover the correct site.\n3. Use `list_site_drives` to find the relevant document library for that site.\n4. Use `list_drive_children` for deterministic folder browsing or `search_files` for broader file discovery.\n5. Use `get_drive_item_meta` when you need compact metadata for a specific file or folder.\n6. Use `read_file_content` to consume the actual contents of a file in agent-friendly text.\n\nNotes:\n\n- `search_files` uses Microsoft Graph search. The `query` field accepts normal keywords and Graph KQL syntax.\n- `siteId` and `driveId` filters on `search_files` are applied to the flattened search results after Graph returns them.\n- `read_file_content` is for files only. Folders do not have readable file content.\n- This v1 integration is intentionally focused on SharePoint sites, document libraries, folders, and files. It does not include classic SharePoint list/list-item tools or file upload.\n",
15939
+ "variants": {
15940
+ "variants": {
15941
+ "app_credentials": {
15942
+ "label": "Microsoft Graph App Credentials",
15943
+ "schema": {
15944
+ "type": "object",
15945
+ "properties": {
15946
+ "tenantId": {
15947
+ "type": "string",
15948
+ "title": "Tenant ID",
15949
+ "description": "Microsoft Entra tenant ID (GUID) that owns the SharePoint tenant."
15950
+ },
15951
+ "clientId": {
15952
+ "type": "string",
15953
+ "title": "Client ID",
15954
+ "description": "Application (client) ID of the Microsoft Entra app registration."
15955
+ },
15956
+ "clientSecret": {
15957
+ "type": "string",
15958
+ "title": "Client Secret",
15959
+ "description": "Client secret value for the Microsoft Entra app registration.",
15960
+ "format": "password"
15961
+ }
15962
+ },
15963
+ "required": [
15964
+ "tenantId",
15965
+ "clientId",
15966
+ "clientSecret"
15967
+ ],
15968
+ "additionalProperties": false
15969
+ },
15970
+ "preprocess": {
15971
+ "type": "handler",
15972
+ "handlerCode": "async (creds, utils) => {\n const tenantId = String(creds?.tenantId || '').trim()\n const clientId = String(creds?.clientId || '').trim()\n const clientSecret = String(creds?.clientSecret || '').trim()\n\n if (!tenantId)\n throw new Error('Missing tenantId')\n if (!clientId)\n throw new Error('Missing clientId')\n if (!clientSecret)\n throw new Error('Missing clientSecret')\n\n const response = await utils.tokenFetch(\n `https://login.microsoftonline.com/${encodeURIComponent(tenantId)}/oauth2/v2.0/token`,\n {\n method: 'POST',\n body: new URLSearchParams({\n grant_type: 'client_credentials',\n client_id: clientId,\n client_secret: clientSecret,\n scope: 'https://graph.microsoft.com/.default',\n }),\n },\n )\n\n const data = await response.json()\n if (!response.ok) {\n const message = typeof data?.error_description === 'string'\n ? data.error_description\n : (typeof data?.error === 'string' ? data.error : `Token request failed with status ${response.status}`)\n throw new Error(message)\n }\n\n const token = typeof data?.access_token === 'string' ? data.access_token : ''\n if (!token)\n throw new Error('Microsoft token response did not include access_token')\n\n return {\n token,\n expiresIn: data?.expires_in,\n }\n}",
15973
+ "allowedOrigins": [
15974
+ "https://login.microsoftonline.com"
15975
+ ]
15976
+ },
15977
+ "injection": {
15978
+ "headers": {
15979
+ "Authorization": "Bearer {{token}}"
15980
+ }
15981
+ },
15982
+ "healthCheck": {
15983
+ "notViable": true
15984
+ }
15985
+ }
15986
+ },
15987
+ "default": "app_credentials"
15988
+ },
15989
+ "hint": "1. Create or use a Microsoft Entra app registration for Microsoft Graph at https://entra.microsoft.com/#view/Microsoft_AAD_RegisteredApps/ApplicationsListBlade\n2. Create a client secret for that app and copy the **tenant ID**, **client ID**, and **client secret value**.\n3. In Microsoft Graph **Application permissions**, grant at least `Sites.Read.All` and `Files.Read.All`.\n4. If you intend to use write actions such as folder creation, moves, and deletes, also grant `Sites.ReadWrite.All` and `Files.ReadWrite.All`.\n5. Grant admin consent for those application permissions in the tenant.\n6. Paste the tenant ID, client ID, and client secret into this integration. The integration exchanges them for short-lived Microsoft Graph access tokens automatically.",
15990
+ "hintsByVariant": {},
15991
+ "tools": [
15992
+ {
15993
+ "name": "search_sites",
15994
+ "description": "Search SharePoint sites by keyword across the tenant. Returns compact site summaries with IDs and web URLs. Use this when you know the site name or topic but not the site ID. If you already know the hostname and path, use get_site_by_path instead.",
15995
+ "inputSchema": {
15996
+ "$schema": "http://json-schema.org/draft-07/schema#",
15997
+ "type": "object",
15998
+ "required": [
15999
+ "query"
16000
+ ],
16001
+ "properties": {
16002
+ "query": {
16003
+ "type": "string",
16004
+ "description": "Keyword search for SharePoint sites, such as a team name or department."
16005
+ }
16006
+ },
16007
+ "additionalProperties": false
16008
+ },
16009
+ "handlerCode": "async (input) => {\n const params = new URLSearchParams()\n params.set('search', input.query)\n const res = await integration.fetch(`/sites?${params.toString()}`)\n const data = await res.json()\n const sites = Array.isArray(data?.value)\n ? data.value.map(site => ({\n id: site.id,\n name: site.displayName || site.name || null,\n displayName: site.displayName || site.name || null,\n description: site.description || '',\n webUrl: site.webUrl || null,\n createdDateTime: site.createdDateTime || null,\n lastModifiedDateTime: site.lastModifiedDateTime || null,\n }))\n : []\n return { query: input.query, sites }\n}",
16010
+ "scope": "read"
16011
+ },
16012
+ {
16013
+ "name": "get_site_by_path",
16014
+ "description": "Resolve a SharePoint site from its hostname and server-relative path, such as hostname='contoso.sharepoint.com' and relativePath='//Marketing'. Use this when you know the SharePoint URL structure and need the stable site ID for later calls.",
16015
+ "inputSchema": {
16016
+ "$schema": "http://json-schema.org/draft-07/schema#",
16017
+ "type": "object",
16018
+ "required": [
16019
+ "hostname",
16020
+ "relativePath"
16021
+ ],
16022
+ "properties": {
16023
+ "hostname": {
16024
+ "type": "string",
16025
+ "description": "SharePoint hostname, such as contoso.sharepoint.com."
16026
+ },
16027
+ "relativePath": {
16028
+ "type": "string",
16029
+ "description": "Server-relative site path, such as /sites/Marketing."
16030
+ }
16031
+ },
16032
+ "additionalProperties": false
16033
+ },
16034
+ "handlerCode": "async (input) => {\n const hostname = String(input.hostname || '').trim()\n const rawPath = String(input.relativePath || '').trim()\n const normalizedPath = `/${rawPath.replace(/^\\/+/, '')}`\n const encodedPath = normalizedPath\n .split('/')\n .map((segment, index) => index === 0 ? '' : encodeURIComponent(segment))\n .join('/')\n const res = await integration.fetch(`/sites/${hostname}:${encodedPath}`)\n const site = await res.json()\n return {\n id: site.id,\n name: site.displayName || site.name || null,\n displayName: site.displayName || site.name || null,\n description: site.description || '',\n webUrl: site.webUrl || null,\n createdDateTime: site.createdDateTime || null,\n lastModifiedDateTime: site.lastModifiedDateTime || null,\n }\n}",
16035
+ "scope": "read"
16036
+ },
16037
+ {
16038
+ "name": "get_site",
16039
+ "description": "Get metadata for a SharePoint site by site ID. Returns the site name, description, web URL, and timestamps. Use search_sites or get_site_by_path first if you do not already know the site ID.",
16040
+ "inputSchema": {
16041
+ "$schema": "http://json-schema.org/draft-07/schema#",
16042
+ "type": "object",
16043
+ "required": [
16044
+ "siteId"
16045
+ ],
16046
+ "properties": {
16047
+ "siteId": {
16048
+ "type": "string",
16049
+ "description": "Microsoft Graph SharePoint site ID."
16050
+ }
16051
+ },
16052
+ "additionalProperties": false
16053
+ },
16054
+ "handlerCode": "async (input) => {\n const res = await integration.fetch(`/sites/${encodeURIComponent(input.siteId)}`)\n const site = await res.json()\n return {\n id: site.id,\n name: site.displayName || site.name || null,\n displayName: site.displayName || site.name || null,\n description: site.description || '',\n webUrl: site.webUrl || null,\n createdDateTime: site.createdDateTime || null,\n lastModifiedDateTime: site.lastModifiedDateTime || null,\n }\n}",
16055
+ "scope": "read"
16056
+ },
16057
+ {
16058
+ "name": "list_site_drives",
16059
+ "description": "List document libraries (drives) for a SharePoint site. Returns compact drive summaries including IDs, names, web URLs, and drive type. Use this after resolving a site to discover the right document library before browsing folders or reading files.",
16060
+ "inputSchema": {
16061
+ "$schema": "http://json-schema.org/draft-07/schema#",
16062
+ "type": "object",
16063
+ "required": [
16064
+ "siteId"
16065
+ ],
16066
+ "properties": {
16067
+ "siteId": {
16068
+ "type": "string",
16069
+ "description": "Microsoft Graph SharePoint site ID."
16070
+ },
16071
+ "top": {
16072
+ "type": "integer",
16073
+ "minimum": 1,
16074
+ "maximum": 200,
16075
+ "description": "Maximum number of drives to return."
16076
+ },
16077
+ "includeSystem": {
16078
+ "type": "boolean",
16079
+ "description": "Set true to include hidden/system drives."
16080
+ }
16081
+ },
16082
+ "additionalProperties": false
16083
+ },
16084
+ "handlerCode": "async (input) => {\n const params = new URLSearchParams()\n params.set('$select', input.includeSystem\n ? 'id,name,webUrl,driveType,createdDateTime,lastModifiedDateTime,system'\n : 'id,name,webUrl,driveType,createdDateTime,lastModifiedDateTime')\n if (input.top)\n params.set('$top', String(input.top))\n\n const res = await integration.fetch(`/sites/${encodeURIComponent(input.siteId)}/drives?${params.toString()}`)\n const data = await res.json()\n const drives = Array.isArray(data?.value)\n ? data.value.map(drive => ({\n id: drive.id,\n name: drive.name || null,\n webUrl: drive.webUrl || null,\n driveType: drive.driveType || null,\n createdDateTime: drive.createdDateTime || null,\n lastModifiedDateTime: drive.lastModifiedDateTime || null,\n isSystem: Boolean(drive.system),\n }))\n : []\n\n return {\n siteId: input.siteId,\n drives,\n nextLink: data?.['@odata.nextLink'] || null,\n }\n}",
16085
+ "scope": "read"
16086
+ },
16087
+ {
16088
+ "name": "list_drive_children",
16089
+ "description": "List the files and folders inside a SharePoint document library folder. By default this lists the root of the drive. Provide itemId to browse a specific folder. Returns compact entries with file-or-folder flags, MIME type when available, and parent references. Use get_drive_item_meta when you need one specific item.",
16090
+ "inputSchema": {
16091
+ "$schema": "http://json-schema.org/draft-07/schema#",
16092
+ "type": "object",
16093
+ "required": [
16094
+ "driveId"
16095
+ ],
16096
+ "properties": {
16097
+ "driveId": {
16098
+ "type": "string",
16099
+ "description": "Document library drive ID."
16100
+ },
16101
+ "itemId": {
16102
+ "type": "string",
16103
+ "description": "Optional folder item ID. Omit to list the drive root."
16104
+ },
16105
+ "top": {
16106
+ "type": "integer",
16107
+ "minimum": 1,
16108
+ "maximum": 200,
16109
+ "description": "Maximum number of children to return."
16110
+ },
16111
+ "orderBy": {
16112
+ "type": "string",
16113
+ "description": "Optional Microsoft Graph orderBy expression, such as name or lastModifiedDateTime desc."
16114
+ }
16115
+ },
16116
+ "additionalProperties": false
16117
+ },
16118
+ "handlerCode": "async (input) => {\n const flattenItem = item => ({\n id: item.id,\n name: item.name || null,\n webUrl: item.webUrl || null,\n size: item.size ?? null,\n createdDateTime: item.createdDateTime || null,\n lastModifiedDateTime: item.lastModifiedDateTime || null,\n eTag: item.eTag || null,\n cTag: item.cTag || null,\n mimeType: item.file?.mimeType || null,\n isFolder: Boolean(item.folder || item.package),\n isFile: Boolean(item.file),\n childCount: item.folder?.childCount ?? null,\n parentReference: item.parentReference || null,\n })\n\n const params = new URLSearchParams()\n params.set(\n '$select',\n 'id,name,webUrl,size,createdDateTime,lastModifiedDateTime,eTag,cTag,parentReference,file,folder,package',\n )\n if (input.top)\n params.set('$top', String(input.top))\n if (input.orderBy)\n params.set('$orderby', input.orderBy)\n\n const basePath = input.itemId\n ? `/drives/${encodeURIComponent(input.driveId)}/items/${encodeURIComponent(input.itemId)}/children`\n : `/drives/${encodeURIComponent(input.driveId)}/root/children`\n\n const res = await integration.fetch(`${basePath}?${params.toString()}`)\n const data = await res.json()\n\n return {\n driveId: input.driveId,\n itemId: input.itemId || null,\n children: Array.isArray(data?.value) ? data.value.map(flattenItem) : [],\n nextLink: data?.['@odata.nextLink'] || null,\n }\n}",
16119
+ "scope": "read"
16120
+ },
16121
+ {
16122
+ "name": "get_drive_item_meta",
16123
+ "description": "Get metadata for a single SharePoint file or folder by drive ID and item ID. Returns a compact item summary including IDs, name, type flags, web URL, size, timestamps, and parent reference. Use read_file_content to read the actual file contents.",
16124
+ "inputSchema": {
16125
+ "$schema": "http://json-schema.org/draft-07/schema#",
16126
+ "type": "object",
16127
+ "required": [
16128
+ "driveId",
16129
+ "itemId"
16130
+ ],
16131
+ "properties": {
16132
+ "driveId": {
16133
+ "type": "string",
16134
+ "description": "Document library drive ID."
16135
+ },
16136
+ "itemId": {
16137
+ "type": "string",
16138
+ "description": "Drive item ID for the file or folder."
16139
+ }
16140
+ },
16141
+ "additionalProperties": false
16142
+ },
16143
+ "handlerCode": "async (input) => {\n const flattenItem = item => ({\n id: item.id,\n name: item.name || null,\n webUrl: item.webUrl || null,\n size: item.size ?? null,\n createdDateTime: item.createdDateTime || null,\n lastModifiedDateTime: item.lastModifiedDateTime || null,\n eTag: item.eTag || null,\n cTag: item.cTag || null,\n mimeType: item.file?.mimeType || null,\n isFolder: Boolean(item.folder || item.package),\n isFile: Boolean(item.file),\n childCount: item.folder?.childCount ?? null,\n parentReference: item.parentReference || null,\n })\n\n const params = new URLSearchParams()\n params.set(\n '$select',\n 'id,name,webUrl,size,createdDateTime,lastModifiedDateTime,eTag,cTag,parentReference,file,folder,package',\n )\n const res = await integration.fetch(`/drives/${encodeURIComponent(input.driveId)}/items/${encodeURIComponent(input.itemId)}?${params.toString()}`)\n return flattenItem(await res.json())\n}",
16144
+ "scope": "read"
16145
+ },
16146
+ {
16147
+ "name": "search_files",
16148
+ "description": "Search SharePoint and OneDrive content through Microsoft Graph search and return flattened file hits. Provide a query string; Graph KQL syntax is supported. Optional siteId and driveId filters narrow the flattened results after search. Use this for broad file discovery when folder-by-folder browsing is too narrow.",
16149
+ "inputSchema": {
16150
+ "$schema": "http://json-schema.org/draft-07/schema#",
16151
+ "type": "object",
16152
+ "required": [
16153
+ "query"
16154
+ ],
16155
+ "properties": {
16156
+ "query": {
16157
+ "type": "string",
16158
+ "description": "Search query string. Microsoft Graph KQL syntax is supported."
16159
+ },
16160
+ "siteId": {
16161
+ "type": "string",
16162
+ "description": "Optional site ID to keep only hits from a specific SharePoint site."
16163
+ },
16164
+ "driveId": {
16165
+ "type": "string",
16166
+ "description": "Optional drive ID to keep only hits from a specific document library."
16167
+ },
16168
+ "from": {
16169
+ "type": "integer",
16170
+ "minimum": 0,
16171
+ "description": "Offset into the Graph search results."
16172
+ },
16173
+ "size": {
16174
+ "type": "integer",
16175
+ "minimum": 1,
16176
+ "maximum": 50,
16177
+ "description": "Maximum number of hits to request from Graph."
16178
+ }
16179
+ },
16180
+ "additionalProperties": false
16181
+ },
16182
+ "handlerCode": "async (input) => {\n const extractFallbackRegion = (error) => {\n const texts = [error?.data?.body, error?.message].filter(s => typeof s === 'string')\n for (const text of texts) {\n const match = text.match(/Only valid regions are ([A-Z,\\s]+)/i)\n const region = match?.[1]?.split(',').map(r => r.trim().toUpperCase()).filter(Boolean)[0]\n if (region)\n return region\n }\n return null\n }\n\n const runSearch = async (region) => {\n const res = await integration.fetch('/search/query', {\n method: 'POST',\n body: {\n requests: [{\n entityTypes: ['driveItem'],\n query: { queryString: input.query },\n from: typeof input.from === 'number' ? input.from : 0,\n size: typeof input.size === 'number' ? input.size : 25,\n region,\n }],\n },\n })\n return res.json()\n }\n\n const flattenHit = (hit) => {\n const resource = hit?.resource || {}\n const parentReference = resource.parentReference || {}\n return {\n id: resource.id || hit?.hitId || null,\n name: resource.name || null,\n webUrl: resource.webUrl || null,\n summary: hit?.summary || '',\n rank: hit?.rank ?? null,\n createdDateTime: resource.createdDateTime || null,\n lastModifiedDateTime: resource.lastModifiedDateTime || null,\n mimeType: resource.file?.mimeType || null,\n size: resource.size ?? null,\n isFolder: Boolean(resource.folder || resource.package),\n isFile: Boolean(resource.file),\n driveId: parentReference.driveId || null,\n siteId: parentReference.siteId || null,\n parentReference,\n }\n }\n\n let data\n try {\n data = await runSearch('NAM')\n }\n catch (error) {\n const fallback = extractFallbackRegion(error)\n if (!fallback)\n throw error\n data = await runSearch(fallback)\n }\n\n const container = data?.value?.[0]?.hitsContainers?.[0]\n const allHits = Array.isArray(container?.hits) ? container.hits.map(flattenHit) : []\n const hits = allHits.filter((hit) => {\n if (input.siteId && hit.siteId !== input.siteId)\n return false\n if (input.driveId && hit.driveId !== input.driveId)\n return false\n return true\n })\n\n return {\n query: input.query,\n hits,\n total: container?.total ?? hits.length,\n moreResultsAvailable: Boolean(container?.moreResultsAvailable),\n }\n}",
16183
+ "scope": "read"
16184
+ },
16185
+ {
16186
+ "name": "read_file_content",
16187
+ "description": "Read a SharePoint file into agent-friendly text using the shared file extraction pipeline. This is the standard way to consume document contents such as PDF, DOCX, XLSX, PPTX, and text-like files stored in SharePoint document libraries. Provide driveId and itemId. Folders are rejected; use list_drive_children to browse them.",
16188
+ "inputSchema": {
16189
+ "$schema": "http://json-schema.org/draft-07/schema#",
16190
+ "type": "object",
16191
+ "required": [
16192
+ "driveId",
16193
+ "itemId"
16194
+ ],
16195
+ "properties": {
16196
+ "driveId": {
16197
+ "type": "string",
16198
+ "description": "Document library drive ID."
16199
+ },
16200
+ "itemId": {
16201
+ "type": "string",
16202
+ "description": "Drive item ID for the file."
16203
+ },
16204
+ "mimeType": {
16205
+ "type": "string",
16206
+ "description": "Optional MIME type from get_drive_item_meta or search_files."
16207
+ },
16208
+ "previewPages": {
16209
+ "type": "integer",
16210
+ "minimum": 1,
16211
+ "maximum": 10,
16212
+ "description": "Number of pages to render as images and return alongside the text (PDF only). Omit or set to 0 to skip. Useful for visually checking signatures, logos, or layout."
16213
+ }
16214
+ },
16215
+ "additionalProperties": false
16216
+ },
16217
+ "handlerCode": "async (input) => {\n const params = new URLSearchParams()\n params.set(\n '$select',\n 'id,name,webUrl,size,createdDateTime,lastModifiedDateTime,parentReference,file,folder,package',\n )\n const res = await integration.fetch(`/drives/${encodeURIComponent(input.driveId)}/items/${encodeURIComponent(input.itemId)}?${params.toString()}`)\n const item = await res.json()\n const mimeType = input.mimeType || item?.file?.mimeType || null\n\n if (item?.folder || item?.package) {\n return {\n driveId: input.driveId,\n itemId: input.itemId,\n name: item?.name || null,\n mimeType,\n content: null,\n message: 'Folders do not have readable file content.',\n }\n }\n\n const extracted = await utils.extractFileContent({\n auth: true,\n source: `/drives/${encodeURIComponent(input.driveId)}/items/${encodeURIComponent(input.itemId)}/content`,\n previewPages: input.previewPages || 0,\n })\n\n return {\n driveId: input.driveId,\n itemId: input.itemId,\n name: item?.name || null,\n webUrl: item?.webUrl || null,\n mimeType,\n ...extracted,\n }\n}",
16218
+ "scope": "read"
16219
+ },
16220
+ {
16221
+ "name": "create_folder",
16222
+ "description": "Create a new folder in a SharePoint document library. By default the folder is created in the drive root. Provide parentItemId to create it inside an existing folder. Returns the created folder metadata including its item ID for later browsing or moves.",
16223
+ "inputSchema": {
16224
+ "$schema": "http://json-schema.org/draft-07/schema#",
16225
+ "type": "object",
16226
+ "required": [
16227
+ "driveId",
16228
+ "name"
16229
+ ],
16230
+ "properties": {
16231
+ "driveId": {
16232
+ "type": "string",
16233
+ "description": "Document library drive ID."
16234
+ },
16235
+ "name": {
16236
+ "type": "string",
16237
+ "description": "Folder name to create."
16238
+ },
16239
+ "parentItemId": {
16240
+ "type": "string",
16241
+ "description": "Optional parent folder item ID. Omit to create in the drive root."
16242
+ },
16243
+ "conflictBehavior": {
16244
+ "type": "string",
16245
+ "enum": [
16246
+ "rename",
16247
+ "replace",
16248
+ "fail"
16249
+ ],
16250
+ "description": "How Graph should handle name conflicts. Defaults to rename."
16251
+ }
16252
+ },
16253
+ "additionalProperties": false
16254
+ },
16255
+ "handlerCode": "async (input) => {\n const path = input.parentItemId\n ? `/drives/${encodeURIComponent(input.driveId)}/items/${encodeURIComponent(input.parentItemId)}/children`\n : `/drives/${encodeURIComponent(input.driveId)}/root/children`\n\n const res = await integration.fetch(path, {\n method: 'POST',\n body: {\n name: input.name,\n folder: {},\n '@microsoft.graph.conflictBehavior': input.conflictBehavior || 'rename',\n },\n })\n const item = await res.json()\n return {\n id: item.id,\n name: item.name || null,\n webUrl: item.webUrl || null,\n size: item.size ?? null,\n createdDateTime: item.createdDateTime || null,\n lastModifiedDateTime: item.lastModifiedDateTime || null,\n mimeType: item.file?.mimeType || null,\n isFolder: Boolean(item.folder || item.package),\n isFile: Boolean(item.file),\n childCount: item.folder?.childCount ?? null,\n parentReference: item.parentReference || null,\n }\n}",
16256
+ "scope": "write"
16257
+ },
16258
+ {
16259
+ "name": "move_drive_item",
16260
+ "description": "Move a SharePoint file or folder to a different parent folder in the same drive. Provide destinationParentId and optionally a newName to rename during the move. Use get_drive_item_meta or list_drive_children first to discover the current item and destination IDs.",
16261
+ "inputSchema": {
16262
+ "$schema": "http://json-schema.org/draft-07/schema#",
16263
+ "type": "object",
16264
+ "required": [
16265
+ "driveId",
16266
+ "itemId",
16267
+ "destinationParentId"
16268
+ ],
16269
+ "properties": {
16270
+ "driveId": {
16271
+ "type": "string",
16272
+ "description": "Document library drive ID."
16273
+ },
16274
+ "itemId": {
16275
+ "type": "string",
16276
+ "description": "Drive item ID for the file or folder to move."
16277
+ },
16278
+ "destinationParentId": {
16279
+ "type": "string",
16280
+ "description": "Destination folder item ID."
16281
+ },
16282
+ "newName": {
16283
+ "type": "string",
16284
+ "description": "Optional new item name to apply during the move."
16285
+ }
16286
+ },
16287
+ "additionalProperties": false
16288
+ },
16289
+ "handlerCode": "async (input) => {\n const body = {\n parentReference: {\n id: input.destinationParentId,\n },\n }\n if (input.newName)\n body.name = input.newName\n\n const res = await integration.fetch(`/drives/${encodeURIComponent(input.driveId)}/items/${encodeURIComponent(input.itemId)}`, {\n method: 'PATCH',\n body,\n })\n const item = await res.json()\n return {\n id: item.id,\n name: item.name || null,\n webUrl: item.webUrl || null,\n size: item.size ?? null,\n createdDateTime: item.createdDateTime || null,\n lastModifiedDateTime: item.lastModifiedDateTime || null,\n mimeType: item.file?.mimeType || null,\n isFolder: Boolean(item.folder || item.package),\n isFile: Boolean(item.file),\n childCount: item.folder?.childCount ?? null,\n parentReference: item.parentReference || null,\n }\n}",
16290
+ "scope": "write"
16291
+ },
16292
+ {
16293
+ "name": "delete_drive_item",
16294
+ "description": "Delete a SharePoint file or folder by drive ID and item ID. This is a destructive operation. Use get_drive_item_meta or list_drive_children first to confirm you have the correct item before deleting it.",
16295
+ "inputSchema": {
16296
+ "$schema": "http://json-schema.org/draft-07/schema#",
16297
+ "type": "object",
16298
+ "required": [
16299
+ "driveId",
16300
+ "itemId"
16301
+ ],
16302
+ "properties": {
16303
+ "driveId": {
16304
+ "type": "string",
16305
+ "description": "Document library drive ID."
16306
+ },
16307
+ "itemId": {
16308
+ "type": "string",
16309
+ "description": "Drive item ID for the file or folder to delete."
16310
+ }
16311
+ },
16312
+ "additionalProperties": false
16313
+ },
16314
+ "handlerCode": "async (input) => {\n const res = await integration.fetch(`/drives/${encodeURIComponent(input.driveId)}/items/${encodeURIComponent(input.itemId)}`, {\n method: 'DELETE',\n })\n if (res.status === 204)\n return { success: true, status: 204 }\n try {\n return await res.json()\n }\n catch {\n return { success: res.ok, status: res.status }\n }\n}",
16315
+ "scope": "write"
16316
+ }
16317
+ ],
16318
+ "variantOwnerType": null
16319
+ },
16320
+ "sharepoint-folder": {
16321
+ "manifest": {
16322
+ "name": "sharepoint",
16323
+ "version": "0.1.0",
16324
+ "baseUrl": "https://graph.microsoft.com/v1.0",
16325
+ "variantLabel": "Single folder",
16326
+ "variantConfig": [
16327
+ {
16328
+ "key": "site",
16329
+ "label": "Site",
16330
+ "selectionMode": "single",
16331
+ "listHandler": "async (config) => {\n const res = await integration.fetch('/sites?search=*&$select=id,displayName,name&$top=50')\n const data = await res.json()\n if (!Array.isArray(data?.value)) return []\n return data.value\n .map(site => ({ id: site.id, name: site.displayName || site.name || site.id }))\n .filter(s => s.id && s.name)\n}"
16332
+ },
16333
+ {
16334
+ "key": "drive",
16335
+ "label": "Document library",
16336
+ "selectionMode": "single",
16337
+ "listHandler": "async (config) => {\n if (!config.siteId) return []\n const res = await integration.fetch(`/sites/${encodeURIComponent(config.siteId)}/drives?$select=id,name,driveType,system`)\n const data = await res.json()\n if (!Array.isArray(data?.value)) return []\n return data.value\n .filter(d => !d.system)\n .map(d => ({ id: d.id, name: d.name || d.id }))\n .filter(d => d.id && d.name)\n}"
16338
+ },
16339
+ {
16340
+ "key": "folder",
16341
+ "label": "Folder",
16342
+ "selectionMode": "single",
16343
+ "listHandler": "async (config) => {\n if (!config.driveId) return []\n const res = await integration.fetch(`/drives/${encodeURIComponent(config.driveId)}/root/children?$select=id,name,folder&$top=100`)\n const data = await res.json()\n if (!Array.isArray(data?.value)) return []\n return data.value\n .filter(item => item.folder)\n .map(item => ({ id: item.id, name: item.name || item.id }))\n .filter(item => item.id && item.name)\n}"
16344
+ }
16345
+ ],
16346
+ "tools": [
16347
+ {
16348
+ "name": "list_folder",
16349
+ "description": "List files and folders inside the connected SharePoint folder.",
16350
+ "inputSchema": "../../schemas/list_drive_children.json",
16351
+ "handler": "../../handlers/list_drive_children.js",
16352
+ "scope": "read",
16353
+ "injectFromConfig": {
16354
+ "driveId": "driveId",
16355
+ "itemId": "folderId"
16356
+ }
16357
+ },
16358
+ {
16359
+ "name": "browse_folder",
16360
+ "description": "List files and folders inside a sub-folder by item ID. Use list_folder first to discover sub-folder IDs.",
16361
+ "inputSchema": "../../schemas/list_drive_children.json",
16362
+ "handler": "../../handlers/list_drive_children.js",
16363
+ "scope": "read",
16364
+ "injectFromConfig": {
16365
+ "driveId": "driveId"
16366
+ }
16367
+ },
16368
+ {
16369
+ "name": "get_drive_item_meta",
16370
+ "description": "Get metadata for a file or folder by item ID.",
16371
+ "inputSchema": "../../schemas/get_drive_item.json",
16372
+ "handler": "../../handlers/get_drive_item.js",
16373
+ "scope": "read",
16374
+ "injectFromConfig": {
16375
+ "driveId": "driveId"
16376
+ }
16377
+ },
16378
+ {
16379
+ "name": "search_files",
16380
+ "description": "Search files within the connected document library. KQL syntax is supported.",
16381
+ "inputSchema": "../../schemas/search_files.json",
16382
+ "handler": "../../handlers/search_files.js",
16383
+ "scope": "read",
16384
+ "injectFromConfig": {
16385
+ "siteId": "siteId",
16386
+ "driveId": "driveId"
16387
+ }
16388
+ },
16389
+ {
16390
+ "name": "read_file_content",
16391
+ "description": "Read a file's contents. Provide the file item ID. Use list_folder or search_files to discover file IDs.",
16392
+ "inputSchema": "../../schemas/read_file_content.json",
16393
+ "handler": "../../handlers/read_file_content.js",
16394
+ "scope": "read",
16395
+ "injectFromConfig": {
16396
+ "driveId": "driveId"
16397
+ }
16398
+ },
16399
+ {
16400
+ "name": "create_folder",
16401
+ "description": "Create a new sub-folder inside the connected folder. Omit parentItemId to create directly inside the connected folder.",
16402
+ "inputSchema": "../../schemas/create_folder.json",
16403
+ "handler": "../../handlers/create_folder.js",
16404
+ "scope": "write",
16405
+ "injectFromConfig": {
16406
+ "driveId": "driveId",
16407
+ "parentItemId": "folderId"
16408
+ }
16409
+ },
16410
+ {
16411
+ "name": "move_drive_item",
16412
+ "description": "Move a file or folder to a different parent folder within the document library.",
16413
+ "inputSchema": "../../schemas/move_drive_item.json",
16414
+ "handler": "../../handlers/move_drive_item.js",
16415
+ "scope": "write",
16416
+ "injectFromConfig": {
16417
+ "driveId": "driveId"
16418
+ }
16419
+ },
16420
+ {
16421
+ "name": "delete_drive_item",
16422
+ "description": "Delete a file or folder by item ID.",
16423
+ "inputSchema": "../../schemas/delete_drive_item.json",
16424
+ "handler": "../../handlers/delete_drive_item.js",
16425
+ "scope": "write",
16426
+ "injectFromConfig": {
16427
+ "driveId": "driveId"
16428
+ }
16429
+ }
16430
+ ]
16431
+ },
16432
+ "usageGuide": "Use this integration for SharePoint document libraries and files.\n\nRecommended workflow:\n\n1. If you know the SharePoint hostname and path, start with `get_site_by_path`.\n2. Otherwise use `search_sites` to discover the correct site.\n3. Use `list_site_drives` to find the relevant document library for that site.\n4. Use `list_drive_children` for deterministic folder browsing or `search_files` for broader file discovery.\n5. Use `get_drive_item_meta` when you need compact metadata for a specific file or folder.\n6. Use `read_file_content` to consume the actual contents of a file in agent-friendly text.\n\nNotes:\n\n- `search_files` uses Microsoft Graph search. The `query` field accepts normal keywords and Graph KQL syntax.\n- `siteId` and `driveId` filters on `search_files` are applied to the flattened search results after Graph returns them.\n- `read_file_content` is for files only. Folders do not have readable file content.\n- This v1 integration is intentionally focused on SharePoint sites, document libraries, folders, and files. It does not include classic SharePoint list/list-item tools or file upload.\n",
16433
+ "variants": {
16434
+ "variants": {
16435
+ "app_credentials": {
16436
+ "label": "Microsoft Graph App Credentials",
16437
+ "schema": {
16438
+ "type": "object",
16439
+ "properties": {
16440
+ "tenantId": {
16441
+ "type": "string",
16442
+ "title": "Tenant ID",
16443
+ "description": "Microsoft Entra tenant ID (GUID) that owns the SharePoint tenant."
16444
+ },
16445
+ "clientId": {
16446
+ "type": "string",
16447
+ "title": "Client ID",
16448
+ "description": "Application (client) ID of the Microsoft Entra app registration."
16449
+ },
16450
+ "clientSecret": {
16451
+ "type": "string",
16452
+ "title": "Client Secret",
16453
+ "description": "Client secret value for the Microsoft Entra app registration.",
16454
+ "format": "password"
16455
+ }
16456
+ },
16457
+ "required": [
16458
+ "tenantId",
16459
+ "clientId",
16460
+ "clientSecret"
16461
+ ],
16462
+ "additionalProperties": false
16463
+ },
16464
+ "preprocess": {
16465
+ "type": "handler",
16466
+ "handlerCode": "async (creds, utils) => {\n const tenantId = String(creds?.tenantId || '').trim()\n const clientId = String(creds?.clientId || '').trim()\n const clientSecret = String(creds?.clientSecret || '').trim()\n\n if (!tenantId)\n throw new Error('Missing tenantId')\n if (!clientId)\n throw new Error('Missing clientId')\n if (!clientSecret)\n throw new Error('Missing clientSecret')\n\n const response = await utils.tokenFetch(\n `https://login.microsoftonline.com/${encodeURIComponent(tenantId)}/oauth2/v2.0/token`,\n {\n method: 'POST',\n body: new URLSearchParams({\n grant_type: 'client_credentials',\n client_id: clientId,\n client_secret: clientSecret,\n scope: 'https://graph.microsoft.com/.default',\n }),\n },\n )\n\n const data = await response.json()\n if (!response.ok) {\n const message = typeof data?.error_description === 'string'\n ? data.error_description\n : (typeof data?.error === 'string' ? data.error : `Token request failed with status ${response.status}`)\n throw new Error(message)\n }\n\n const token = typeof data?.access_token === 'string' ? data.access_token : ''\n if (!token)\n throw new Error('Microsoft token response did not include access_token')\n\n return {\n token,\n expiresIn: data?.expires_in,\n }\n}",
16467
+ "allowedOrigins": [
16468
+ "https://login.microsoftonline.com"
16469
+ ]
16470
+ },
16471
+ "injection": {
16472
+ "headers": {
16473
+ "Authorization": "Bearer {{token}}"
16474
+ }
16475
+ },
16476
+ "healthCheck": {
16477
+ "notViable": true
16478
+ }
16479
+ }
16480
+ },
16481
+ "default": "app_credentials"
16482
+ },
16483
+ "hint": "1. Create or use a Microsoft Entra app registration for Microsoft Graph at https://entra.microsoft.com/#view/Microsoft_AAD_RegisteredApps/ApplicationsListBlade\n2. Create a client secret for that app and copy the **tenant ID**, **client ID**, and **client secret value**.\n3. In Microsoft Graph **Application permissions**, grant at least `Sites.Read.All` and `Files.Read.All`.\n4. If you intend to use write actions such as folder creation, moves, and deletes, also grant `Sites.ReadWrite.All` and `Files.ReadWrite.All`.\n5. Grant admin consent for those application permissions in the tenant.\n6. Paste the tenant ID, client ID, and client secret into this integration. The integration exchanges them for short-lived Microsoft Graph access tokens automatically.",
16484
+ "hintsByVariant": {},
16485
+ "tools": [
16486
+ {
16487
+ "name": "list_folder",
16488
+ "description": "List files and folders inside the connected SharePoint folder.",
16489
+ "inputSchema": {
16490
+ "$schema": "http://json-schema.org/draft-07/schema#",
16491
+ "type": "object",
16492
+ "required": [
16493
+ "driveId"
16494
+ ],
16495
+ "properties": {
16496
+ "driveId": {
16497
+ "type": "string",
16498
+ "description": "Document library drive ID."
16499
+ },
16500
+ "itemId": {
16501
+ "type": "string",
16502
+ "description": "Optional folder item ID. Omit to list the drive root."
16503
+ },
16504
+ "top": {
16505
+ "type": "integer",
16506
+ "minimum": 1,
16507
+ "maximum": 200,
16508
+ "description": "Maximum number of children to return."
16509
+ },
16510
+ "orderBy": {
16511
+ "type": "string",
16512
+ "description": "Optional Microsoft Graph orderBy expression, such as name or lastModifiedDateTime desc."
16513
+ }
16514
+ },
16515
+ "additionalProperties": false
16516
+ },
16517
+ "handlerCode": "async (input) => {\n const flattenItem = item => ({\n id: item.id,\n name: item.name || null,\n webUrl: item.webUrl || null,\n size: item.size ?? null,\n createdDateTime: item.createdDateTime || null,\n lastModifiedDateTime: item.lastModifiedDateTime || null,\n eTag: item.eTag || null,\n cTag: item.cTag || null,\n mimeType: item.file?.mimeType || null,\n isFolder: Boolean(item.folder || item.package),\n isFile: Boolean(item.file),\n childCount: item.folder?.childCount ?? null,\n parentReference: item.parentReference || null,\n })\n\n const params = new URLSearchParams()\n params.set(\n '$select',\n 'id,name,webUrl,size,createdDateTime,lastModifiedDateTime,eTag,cTag,parentReference,file,folder,package',\n )\n if (input.top)\n params.set('$top', String(input.top))\n if (input.orderBy)\n params.set('$orderby', input.orderBy)\n\n const basePath = input.itemId\n ? `/drives/${encodeURIComponent(input.driveId)}/items/${encodeURIComponent(input.itemId)}/children`\n : `/drives/${encodeURIComponent(input.driveId)}/root/children`\n\n const res = await integration.fetch(`${basePath}?${params.toString()}`)\n const data = await res.json()\n\n return {\n driveId: input.driveId,\n itemId: input.itemId || null,\n children: Array.isArray(data?.value) ? data.value.map(flattenItem) : [],\n nextLink: data?.['@odata.nextLink'] || null,\n }\n}",
16518
+ "scope": "read",
16519
+ "injectFromConfig": {
16520
+ "driveId": "driveId",
16521
+ "itemId": "folderId"
16522
+ }
16523
+ },
16524
+ {
16525
+ "name": "browse_folder",
16526
+ "description": "List files and folders inside a sub-folder by item ID. Use list_folder first to discover sub-folder IDs.",
16527
+ "inputSchema": {
16528
+ "$schema": "http://json-schema.org/draft-07/schema#",
16529
+ "type": "object",
16530
+ "required": [
16531
+ "driveId"
16532
+ ],
16533
+ "properties": {
16534
+ "driveId": {
16535
+ "type": "string",
16536
+ "description": "Document library drive ID."
16537
+ },
16538
+ "itemId": {
16539
+ "type": "string",
16540
+ "description": "Optional folder item ID. Omit to list the drive root."
16541
+ },
16542
+ "top": {
16543
+ "type": "integer",
16544
+ "minimum": 1,
16545
+ "maximum": 200,
16546
+ "description": "Maximum number of children to return."
16547
+ },
16548
+ "orderBy": {
16549
+ "type": "string",
16550
+ "description": "Optional Microsoft Graph orderBy expression, such as name or lastModifiedDateTime desc."
16551
+ }
16552
+ },
16553
+ "additionalProperties": false
16554
+ },
16555
+ "handlerCode": "async (input) => {\n const flattenItem = item => ({\n id: item.id,\n name: item.name || null,\n webUrl: item.webUrl || null,\n size: item.size ?? null,\n createdDateTime: item.createdDateTime || null,\n lastModifiedDateTime: item.lastModifiedDateTime || null,\n eTag: item.eTag || null,\n cTag: item.cTag || null,\n mimeType: item.file?.mimeType || null,\n isFolder: Boolean(item.folder || item.package),\n isFile: Boolean(item.file),\n childCount: item.folder?.childCount ?? null,\n parentReference: item.parentReference || null,\n })\n\n const params = new URLSearchParams()\n params.set(\n '$select',\n 'id,name,webUrl,size,createdDateTime,lastModifiedDateTime,eTag,cTag,parentReference,file,folder,package',\n )\n if (input.top)\n params.set('$top', String(input.top))\n if (input.orderBy)\n params.set('$orderby', input.orderBy)\n\n const basePath = input.itemId\n ? `/drives/${encodeURIComponent(input.driveId)}/items/${encodeURIComponent(input.itemId)}/children`\n : `/drives/${encodeURIComponent(input.driveId)}/root/children`\n\n const res = await integration.fetch(`${basePath}?${params.toString()}`)\n const data = await res.json()\n\n return {\n driveId: input.driveId,\n itemId: input.itemId || null,\n children: Array.isArray(data?.value) ? data.value.map(flattenItem) : [],\n nextLink: data?.['@odata.nextLink'] || null,\n }\n}",
16556
+ "scope": "read",
16557
+ "injectFromConfig": {
16558
+ "driveId": "driveId"
16559
+ }
16560
+ },
16561
+ {
16562
+ "name": "get_drive_item_meta",
16563
+ "description": "Get metadata for a file or folder by item ID.",
16564
+ "inputSchema": {
16565
+ "$schema": "http://json-schema.org/draft-07/schema#",
16566
+ "type": "object",
16567
+ "required": [
16568
+ "driveId",
16569
+ "itemId"
16570
+ ],
16571
+ "properties": {
16572
+ "driveId": {
16573
+ "type": "string",
16574
+ "description": "Document library drive ID."
16575
+ },
16576
+ "itemId": {
16577
+ "type": "string",
16578
+ "description": "Drive item ID for the file or folder."
16579
+ }
16580
+ },
16581
+ "additionalProperties": false
16582
+ },
16583
+ "handlerCode": "async (input) => {\n const flattenItem = item => ({\n id: item.id,\n name: item.name || null,\n webUrl: item.webUrl || null,\n size: item.size ?? null,\n createdDateTime: item.createdDateTime || null,\n lastModifiedDateTime: item.lastModifiedDateTime || null,\n eTag: item.eTag || null,\n cTag: item.cTag || null,\n mimeType: item.file?.mimeType || null,\n isFolder: Boolean(item.folder || item.package),\n isFile: Boolean(item.file),\n childCount: item.folder?.childCount ?? null,\n parentReference: item.parentReference || null,\n })\n\n const params = new URLSearchParams()\n params.set(\n '$select',\n 'id,name,webUrl,size,createdDateTime,lastModifiedDateTime,eTag,cTag,parentReference,file,folder,package',\n )\n const res = await integration.fetch(`/drives/${encodeURIComponent(input.driveId)}/items/${encodeURIComponent(input.itemId)}?${params.toString()}`)\n return flattenItem(await res.json())\n}",
16584
+ "scope": "read",
16585
+ "injectFromConfig": {
16586
+ "driveId": "driveId"
16587
+ }
16588
+ },
16589
+ {
16590
+ "name": "search_files",
16591
+ "description": "Search files within the connected document library. KQL syntax is supported.",
16592
+ "inputSchema": {
16593
+ "$schema": "http://json-schema.org/draft-07/schema#",
16594
+ "type": "object",
16595
+ "required": [
16596
+ "query"
16597
+ ],
16598
+ "properties": {
16599
+ "query": {
16600
+ "type": "string",
16601
+ "description": "Search query string. Microsoft Graph KQL syntax is supported."
16602
+ },
16603
+ "siteId": {
16604
+ "type": "string",
16605
+ "description": "Optional site ID to keep only hits from a specific SharePoint site."
16606
+ },
16607
+ "driveId": {
16608
+ "type": "string",
16609
+ "description": "Optional drive ID to keep only hits from a specific document library."
16610
+ },
16611
+ "from": {
16612
+ "type": "integer",
16613
+ "minimum": 0,
16614
+ "description": "Offset into the Graph search results."
16615
+ },
16616
+ "size": {
16617
+ "type": "integer",
16618
+ "minimum": 1,
16619
+ "maximum": 50,
16620
+ "description": "Maximum number of hits to request from Graph."
16621
+ }
16622
+ },
16623
+ "additionalProperties": false
16624
+ },
16625
+ "handlerCode": "async (input) => {\n const extractFallbackRegion = (error) => {\n const texts = [error?.data?.body, error?.message].filter(s => typeof s === 'string')\n for (const text of texts) {\n const match = text.match(/Only valid regions are ([A-Z,\\s]+)/i)\n const region = match?.[1]?.split(',').map(r => r.trim().toUpperCase()).filter(Boolean)[0]\n if (region)\n return region\n }\n return null\n }\n\n const runSearch = async (region) => {\n const res = await integration.fetch('/search/query', {\n method: 'POST',\n body: {\n requests: [{\n entityTypes: ['driveItem'],\n query: { queryString: input.query },\n from: typeof input.from === 'number' ? input.from : 0,\n size: typeof input.size === 'number' ? input.size : 25,\n region,\n }],\n },\n })\n return res.json()\n }\n\n const flattenHit = (hit) => {\n const resource = hit?.resource || {}\n const parentReference = resource.parentReference || {}\n return {\n id: resource.id || hit?.hitId || null,\n name: resource.name || null,\n webUrl: resource.webUrl || null,\n summary: hit?.summary || '',\n rank: hit?.rank ?? null,\n createdDateTime: resource.createdDateTime || null,\n lastModifiedDateTime: resource.lastModifiedDateTime || null,\n mimeType: resource.file?.mimeType || null,\n size: resource.size ?? null,\n isFolder: Boolean(resource.folder || resource.package),\n isFile: Boolean(resource.file),\n driveId: parentReference.driveId || null,\n siteId: parentReference.siteId || null,\n parentReference,\n }\n }\n\n let data\n try {\n data = await runSearch('NAM')\n }\n catch (error) {\n const fallback = extractFallbackRegion(error)\n if (!fallback)\n throw error\n data = await runSearch(fallback)\n }\n\n const container = data?.value?.[0]?.hitsContainers?.[0]\n const allHits = Array.isArray(container?.hits) ? container.hits.map(flattenHit) : []\n const hits = allHits.filter((hit) => {\n if (input.siteId && hit.siteId !== input.siteId)\n return false\n if (input.driveId && hit.driveId !== input.driveId)\n return false\n return true\n })\n\n return {\n query: input.query,\n hits,\n total: container?.total ?? hits.length,\n moreResultsAvailable: Boolean(container?.moreResultsAvailable),\n }\n}",
16626
+ "scope": "read",
16627
+ "injectFromConfig": {
16628
+ "siteId": "siteId",
16629
+ "driveId": "driveId"
16630
+ }
16631
+ },
16632
+ {
16633
+ "name": "read_file_content",
16634
+ "description": "Read a file's contents. Provide the file item ID. Use list_folder or search_files to discover file IDs.",
16635
+ "inputSchema": {
16636
+ "$schema": "http://json-schema.org/draft-07/schema#",
16637
+ "type": "object",
16638
+ "required": [
16639
+ "driveId",
16640
+ "itemId"
16641
+ ],
16642
+ "properties": {
16643
+ "driveId": {
16644
+ "type": "string",
16645
+ "description": "Document library drive ID."
16646
+ },
16647
+ "itemId": {
16648
+ "type": "string",
16649
+ "description": "Drive item ID for the file."
16650
+ },
16651
+ "mimeType": {
16652
+ "type": "string",
16653
+ "description": "Optional MIME type from get_drive_item_meta or search_files."
16654
+ },
16655
+ "previewPages": {
16656
+ "type": "integer",
16657
+ "minimum": 1,
16658
+ "maximum": 10,
16659
+ "description": "Number of pages to render as images and return alongside the text (PDF only). Omit or set to 0 to skip. Useful for visually checking signatures, logos, or layout."
16660
+ }
16661
+ },
16662
+ "additionalProperties": false
16663
+ },
16664
+ "handlerCode": "async (input) => {\n const params = new URLSearchParams()\n params.set(\n '$select',\n 'id,name,webUrl,size,createdDateTime,lastModifiedDateTime,parentReference,file,folder,package',\n )\n const res = await integration.fetch(`/drives/${encodeURIComponent(input.driveId)}/items/${encodeURIComponent(input.itemId)}?${params.toString()}`)\n const item = await res.json()\n const mimeType = input.mimeType || item?.file?.mimeType || null\n\n if (item?.folder || item?.package) {\n return {\n driveId: input.driveId,\n itemId: input.itemId,\n name: item?.name || null,\n mimeType,\n content: null,\n message: 'Folders do not have readable file content.',\n }\n }\n\n const extracted = await utils.extractFileContent({\n auth: true,\n source: `/drives/${encodeURIComponent(input.driveId)}/items/${encodeURIComponent(input.itemId)}/content`,\n previewPages: input.previewPages || 0,\n })\n\n return {\n driveId: input.driveId,\n itemId: input.itemId,\n name: item?.name || null,\n webUrl: item?.webUrl || null,\n mimeType,\n ...extracted,\n }\n}",
16665
+ "scope": "read",
16666
+ "injectFromConfig": {
16667
+ "driveId": "driveId"
16668
+ }
16669
+ },
16670
+ {
16671
+ "name": "create_folder",
16672
+ "description": "Create a new sub-folder inside the connected folder. Omit parentItemId to create directly inside the connected folder.",
16673
+ "inputSchema": {
16674
+ "$schema": "http://json-schema.org/draft-07/schema#",
16675
+ "type": "object",
16676
+ "required": [
16677
+ "driveId",
16678
+ "name"
16679
+ ],
16680
+ "properties": {
16681
+ "driveId": {
16682
+ "type": "string",
16683
+ "description": "Document library drive ID."
16684
+ },
16685
+ "name": {
16686
+ "type": "string",
16687
+ "description": "Folder name to create."
16688
+ },
16689
+ "parentItemId": {
16690
+ "type": "string",
16691
+ "description": "Optional parent folder item ID. Omit to create in the drive root."
16692
+ },
16693
+ "conflictBehavior": {
16694
+ "type": "string",
16695
+ "enum": [
16696
+ "rename",
16697
+ "replace",
16698
+ "fail"
16699
+ ],
16700
+ "description": "How Graph should handle name conflicts. Defaults to rename."
16701
+ }
16702
+ },
16703
+ "additionalProperties": false
16704
+ },
16705
+ "handlerCode": "async (input) => {\n const path = input.parentItemId\n ? `/drives/${encodeURIComponent(input.driveId)}/items/${encodeURIComponent(input.parentItemId)}/children`\n : `/drives/${encodeURIComponent(input.driveId)}/root/children`\n\n const res = await integration.fetch(path, {\n method: 'POST',\n body: {\n name: input.name,\n folder: {},\n '@microsoft.graph.conflictBehavior': input.conflictBehavior || 'rename',\n },\n })\n const item = await res.json()\n return {\n id: item.id,\n name: item.name || null,\n webUrl: item.webUrl || null,\n size: item.size ?? null,\n createdDateTime: item.createdDateTime || null,\n lastModifiedDateTime: item.lastModifiedDateTime || null,\n mimeType: item.file?.mimeType || null,\n isFolder: Boolean(item.folder || item.package),\n isFile: Boolean(item.file),\n childCount: item.folder?.childCount ?? null,\n parentReference: item.parentReference || null,\n }\n}",
16706
+ "scope": "write",
16707
+ "injectFromConfig": {
16708
+ "driveId": "driveId",
16709
+ "parentItemId": "folderId"
16710
+ }
16711
+ },
16712
+ {
16713
+ "name": "move_drive_item",
16714
+ "description": "Move a file or folder to a different parent folder within the document library.",
16715
+ "inputSchema": {
16716
+ "$schema": "http://json-schema.org/draft-07/schema#",
16717
+ "type": "object",
16718
+ "required": [
16719
+ "driveId",
16720
+ "itemId",
16721
+ "destinationParentId"
16722
+ ],
16723
+ "properties": {
16724
+ "driveId": {
16725
+ "type": "string",
16726
+ "description": "Document library drive ID."
16727
+ },
16728
+ "itemId": {
16729
+ "type": "string",
16730
+ "description": "Drive item ID for the file or folder to move."
16731
+ },
16732
+ "destinationParentId": {
16733
+ "type": "string",
16734
+ "description": "Destination folder item ID."
16735
+ },
16736
+ "newName": {
16737
+ "type": "string",
16738
+ "description": "Optional new item name to apply during the move."
16739
+ }
16740
+ },
16741
+ "additionalProperties": false
16742
+ },
16743
+ "handlerCode": "async (input) => {\n const body = {\n parentReference: {\n id: input.destinationParentId,\n },\n }\n if (input.newName)\n body.name = input.newName\n\n const res = await integration.fetch(`/drives/${encodeURIComponent(input.driveId)}/items/${encodeURIComponent(input.itemId)}`, {\n method: 'PATCH',\n body,\n })\n const item = await res.json()\n return {\n id: item.id,\n name: item.name || null,\n webUrl: item.webUrl || null,\n size: item.size ?? null,\n createdDateTime: item.createdDateTime || null,\n lastModifiedDateTime: item.lastModifiedDateTime || null,\n mimeType: item.file?.mimeType || null,\n isFolder: Boolean(item.folder || item.package),\n isFile: Boolean(item.file),\n childCount: item.folder?.childCount ?? null,\n parentReference: item.parentReference || null,\n }\n}",
16744
+ "scope": "write",
16745
+ "injectFromConfig": {
16746
+ "driveId": "driveId"
16747
+ }
16748
+ },
16749
+ {
16750
+ "name": "delete_drive_item",
16751
+ "description": "Delete a file or folder by item ID.",
16752
+ "inputSchema": {
16753
+ "$schema": "http://json-schema.org/draft-07/schema#",
16754
+ "type": "object",
16755
+ "required": [
16756
+ "driveId",
16757
+ "itemId"
16758
+ ],
16759
+ "properties": {
16760
+ "driveId": {
16761
+ "type": "string",
16762
+ "description": "Document library drive ID."
16763
+ },
16764
+ "itemId": {
16765
+ "type": "string",
16766
+ "description": "Drive item ID for the file or folder to delete."
15837
16767
  }
15838
- }
16768
+ },
16769
+ "additionalProperties": false
15839
16770
  },
15840
- "handlerCode": "async (input) => {\n const body = {\n title: input.title || undefined,\n description: input.description || undefined,\n properties: input.properties || undefined,\n archived: input.archived === undefined ? undefined : input.archived,\n }\n const res = await integration.fetch(`/databases/${encodeURIComponent(input.database_id)}`, { method: 'PATCH', body })\n return await res.json()\n}",
16771
+ "handlerCode": "async (input) => {\n const res = await integration.fetch(`/drives/${encodeURIComponent(input.driveId)}/items/${encodeURIComponent(input.itemId)}`, {\n method: 'DELETE',\n })\n if (res.status === 204)\n return { success: true, status: 204 }\n try {\n return await res.json()\n }\n catch {\n return { success: res.ok, status: res.status }\n }\n}",
15841
16772
  "scope": "write",
15842
- "toolset": "databases"
16773
+ "injectFromConfig": {
16774
+ "driveId": "driveId"
16775
+ }
15843
16776
  }
15844
16777
  ],
15845
- "variantOwnerType": null
16778
+ "variantOwnerType": "sharepoint"
15846
16779
  },
15847
16780
  "trello": {
15848
16781
  "manifest": {
@@ -16090,7 +17023,7 @@ const GENERATED_INTEGRATIONS = {
16090
17023
  }
16091
17024
  ]
16092
17025
  },
16093
- "prompt": null,
17026
+ "usageGuide": null,
16094
17027
  "variants": {
16095
17028
  "variants": {
16096
17029
  "api_key_token": {
@@ -17071,7 +18004,7 @@ const GENERATED_INTEGRATIONS = {
17071
18004
  }
17072
18005
  ]
17073
18006
  },
17074
- "prompt": null,
18007
+ "usageGuide": null,
17075
18008
  "variants": {
17076
18009
  "variants": {
17077
18010
  "api_key_token": {
@@ -17737,6 +18670,10 @@ function cloneCredentialVariant(variant) {
17737
18670
  headers: ((_a = variant.injection) == null ? void 0 : _a.headers) ? { ...variant.injection.headers } : void 0,
17738
18671
  query: ((_b = variant.injection) == null ? void 0 : _b.query) ? { ...variant.injection.query } : void 0
17739
18672
  },
18673
+ preprocess: typeof variant.preprocess === "object" && variant.preprocess !== null ? {
18674
+ ...variant.preprocess,
18675
+ allowedOrigins: Array.isArray(variant.preprocess.allowedOrigins) ? [...variant.preprocess.allowedOrigins] : void 0
18676
+ } : variant.preprocess,
17740
18677
  healthCheck: "path" in variant.healthCheck ? { ...variant.healthCheck } : { notViable: true }
17741
18678
  };
17742
18679
  }
@@ -17785,9 +18722,9 @@ function loadIntegrationManifest(type) {
17785
18722
  const entry = getIntegration(type);
17786
18723
  return entry ? cloneManifest(entry.manifest) : null;
17787
18724
  }
17788
- function loadIntegrationPrompt(type) {
18725
+ function loadIntegrationUsageGuide(type) {
17789
18726
  var _a, _b;
17790
- return (_b = (_a = getIntegration(type)) == null ? void 0 : _a.prompt) != null ? _b : null;
18727
+ return (_b = (_a = getIntegration(type)) == null ? void 0 : _a.usageGuide) != null ? _b : null;
17791
18728
  }
17792
18729
  function loadIntegrationToolsets(type) {
17793
18730
  var _a;
@@ -17992,6 +18929,35 @@ function makeIntegrationToolName(type, name, nodeId) {
17992
18929
  return `${base}${suffix}`;
17993
18930
  }
17994
18931
 
18932
+ const hoistedArtifactsByResult = /* @__PURE__ */ new WeakMap();
18933
+ function attachHoistedArtifacts(result, artifacts) {
18934
+ if (artifacts.length)
18935
+ hoistedArtifactsByResult.set(result, artifacts);
18936
+ return result;
18937
+ }
18938
+ function getHoistedArtifacts(result) {
18939
+ var _a;
18940
+ return (_a = hoistedArtifactsByResult.get(result)) != null ? _a : [];
18941
+ }
18942
+ function hoistExtractFileContentArtifacts(result) {
18943
+ if (!result || typeof result !== "object" || Array.isArray(result))
18944
+ return { cleanedResult: result, artifacts: [] };
18945
+ const images = Array.isArray(result.pageImages) ? result.pageImages.filter((value) => typeof value === "string") : [];
18946
+ if (!images.length)
18947
+ return { cleanedResult: result, artifacts: [] };
18948
+ const { pageImages: _dropped, ...rest } = result;
18949
+ return {
18950
+ cleanedResult: rest,
18951
+ artifacts: images.map((data) => ({
18952
+ type: "image",
18953
+ mimeType: "image/jpeg",
18954
+ data
18955
+ }))
18956
+ };
18957
+ }
18958
+
18959
+ const EXECUTION_RESULT = /* @__PURE__ */ Symbol("executionResult");
18960
+ const EXECUTION_HOISTED_ARTIFACTS = /* @__PURE__ */ Symbol("executionHoistedArtifacts");
17995
18961
  function createSafeHandlerFromString(handlerString, getIntegration, utils) {
17996
18962
  const realConsole = console;
17997
18963
  const isolatedConsole = {
@@ -18061,7 +19027,15 @@ function createSafeHandlerFromString(handlerString, getIntegration, utils) {
18061
19027
  const code = `module.exports = async function(input) { return (${handlerString})(input) }`;
18062
19028
  const script = new vm.Script(code);
18063
19029
  script.runInContext(context);
18064
- return withLogging(context.module.exports, isolatedConsole);
19030
+ return withLogging(async (args) => {
19031
+ const hoistedArtifacts = [];
19032
+ context.utils = createRuntimeUtils(utils, hoistedArtifacts);
19033
+ const result = await context.module.exports(args);
19034
+ return {
19035
+ [EXECUTION_RESULT]: result,
19036
+ [EXECUTION_HOISTED_ARTIFACTS]: hoistedArtifacts
19037
+ };
19038
+ }, isolatedConsole);
18065
19039
  }
18066
19040
  function withLogging(handler, vmConsole) {
18067
19041
  function safeSerializeForLog(value) {
@@ -18099,8 +19073,14 @@ function withLogging(handler, vmConsole) {
18099
19073
  logs.push(line);
18100
19074
  originalLog.apply(vmConsole, args2);
18101
19075
  };
18102
- const result = await handler(args);
18103
- return { success: true, result, logs };
19076
+ const execution = await handler(args);
19077
+ const { result, hoistedArtifacts } = unwrapExecutionEnvelope(execution);
19078
+ const toolRunResult = {
19079
+ success: true,
19080
+ result,
19081
+ logs
19082
+ };
19083
+ return hoistedArtifacts.length ? attachHoistedArtifacts(toolRunResult, hoistedArtifacts) : toolRunResult;
18104
19084
  } catch (err) {
18105
19085
  logs.push((err == null ? void 0 : err.stack) || String(err));
18106
19086
  const result = err && typeof err === "object" ? {
@@ -18115,6 +19095,32 @@ function withLogging(handler, vmConsole) {
18115
19095
  }
18116
19096
  };
18117
19097
  }
19098
+ function createRuntimeUtils(baseUtils, hoistedArtifacts) {
19099
+ const runtimeUtils = { ...baseUtils || {} };
19100
+ const extractFileContent = runtimeUtils.extractFileContent;
19101
+ if (typeof extractFileContent === "function") {
19102
+ runtimeUtils.extractFileContent = async (...args) => {
19103
+ const extracted = await extractFileContent(...args);
19104
+ const { cleanedResult, artifacts } = hoistExtractFileContentArtifacts(extracted);
19105
+ hoistedArtifacts.push(...artifacts);
19106
+ return cleanedResult;
19107
+ };
19108
+ }
19109
+ return runtimeUtils;
19110
+ }
19111
+ function unwrapExecutionEnvelope(value) {
19112
+ if (!value || typeof value !== "object" || !(EXECUTION_RESULT in value)) {
19113
+ return {
19114
+ result: value,
19115
+ hoistedArtifacts: []
19116
+ };
19117
+ }
19118
+ const envelope = value;
19119
+ return {
19120
+ result: envelope[EXECUTION_RESULT],
19121
+ hoistedArtifacts: Array.isArray(envelope[EXECUTION_HOISTED_ARTIFACTS]) ? envelope[EXECUTION_HOISTED_ARTIFACTS] : []
19122
+ };
19123
+ }
18118
19124
 
18119
19125
  const turndown = new TurndownService({
18120
19126
  headingStyle: "atx",
@@ -18609,7 +19615,10 @@ function createGetIntegration(integrations, proxy) {
18609
19615
 
18610
19616
  const EXTRACT_FILE_PY = `#!/usr/bin/env python3
18611
19617
  import argparse
19618
+ import base64
18612
19619
  import csv
19620
+ import email
19621
+ import email.policy
18613
19622
  import html
18614
19623
  import json
18615
19624
  import mimetypes
@@ -18619,6 +19628,18 @@ from pathlib import Path
18619
19628
 
18620
19629
  from markitdown import MarkItDown
18621
19630
 
19631
+ try:
19632
+ import fitz # PyMuPDF
19633
+ _FITZ_AVAILABLE = True
19634
+ except ImportError:
19635
+ _FITZ_AVAILABLE = False
19636
+
19637
+ try:
19638
+ import extract_msg as _extract_msg
19639
+ _EXTRACT_MSG_AVAILABLE = True
19640
+ except ImportError:
19641
+ _EXTRACT_MSG_AVAILABLE = False
19642
+
18622
19643
 
18623
19644
  def collapse_whitespace(value: str) -> str:
18624
19645
  return re.sub(r"\\s+", " ", value or "").strip()
@@ -18663,6 +19684,12 @@ def sniff_kind(path: Path) -> str:
18663
19684
  suffix = path.suffix.lower()
18664
19685
  if suffix in DIRECT_TEXT_EXTENSIONS | MARKITDOWN_EXTENSIONS:
18665
19686
  return suffix.lstrip(".")
19687
+ if suffix == ".msg":
19688
+ return "msg"
19689
+ if suffix == ".eml":
19690
+ return "eml"
19691
+ if suffix == ".zip":
19692
+ return "zip"
18666
19693
 
18667
19694
  mime_guess, _ = mimetypes.guess_type(path.name)
18668
19695
  if mime_guess == "application/pdf":
@@ -18682,10 +19709,78 @@ def sniff_kind(path: Path) -> str:
18682
19709
  return "xlsx"
18683
19710
  if "ppt/presentation.xml" in names:
18684
19711
  return "pptx"
19712
+ return "zip"
18685
19713
 
18686
19714
  return "unknown"
18687
19715
 
18688
19716
 
19717
+ def parse_pdf_date(date_str: str):
19718
+ """Parse a PDF date string (D:YYYYMMDDHHmmSS...) to ISO-8601."""
19719
+ if not date_str or not isinstance(date_str, str) or not date_str.startswith("D:"):
19720
+ return None
19721
+ try:
19722
+ raw = date_str[2:]
19723
+ m = re.match(r"(\\d{4})(\\d{2})(\\d{2})(\\d{2})(\\d{2})(\\d{2})", raw)
19724
+ if not m:
19725
+ return None
19726
+ y, mo, d, h, mi, s = m.groups()
19727
+ return f"{y}-{mo}-{d}T{h}:{mi}:{s}Z"
19728
+ except Exception:
19729
+ return None
19730
+
19731
+
19732
+ def get_xlsx_sheet_names(path: Path) -> list:
19733
+ try:
19734
+ with zipfile.ZipFile(path, "r") as archive:
19735
+ if "xl/workbook.xml" not in archive.namelist():
19736
+ return []
19737
+ content = archive.read("xl/workbook.xml").decode("utf-8", errors="replace")
19738
+ return re.findall(r'<sheet\\b[^>]+\\bname="([^"]+)"', content)
19739
+ except Exception:
19740
+ return []
19741
+
19742
+
19743
+ def get_pptx_slide_count(path: Path) -> int:
19744
+ try:
19745
+ with zipfile.ZipFile(path, "r") as archive:
19746
+ return sum(
19747
+ 1 for name in archive.namelist()
19748
+ if re.match(r"ppt/slides/slide\\d+\\.xml$", name)
19749
+ )
19750
+ except Exception:
19751
+ return 0
19752
+
19753
+
19754
+ def get_office_core_props(path: Path) -> dict:
19755
+ """Read author and lastModifiedBy from docProps/core.xml (all Office formats)."""
19756
+ try:
19757
+ with zipfile.ZipFile(path, "r") as archive:
19758
+ if "docProps/core.xml" not in archive.namelist():
19759
+ return {}
19760
+ content = archive.read("docProps/core.xml").decode("utf-8", errors="replace")
19761
+ props = {}
19762
+ m = re.search(r"<dc:creator[^>]*>([^<]+)</dc:creator>", content)
19763
+ if m:
19764
+ props["author"] = m.group(1).strip()
19765
+ m = re.search(r"<cp:lastModifiedBy[^>]*>([^<]+)</cp:lastModifiedBy>", content)
19766
+ if m:
19767
+ props["lastModifiedBy"] = m.group(1).strip()
19768
+ return props
19769
+ except Exception:
19770
+ return {}
19771
+
19772
+
19773
+ def has_tracked_changes_docx(path: Path) -> bool:
19774
+ try:
19775
+ with zipfile.ZipFile(path, "r") as archive:
19776
+ if "word/document.xml" not in archive.namelist():
19777
+ return False
19778
+ content = archive.read("word/document.xml").decode("utf-8", errors="replace")
19779
+ return bool(re.search(r"<w:(del|ins)\\s", content))
19780
+ except Exception:
19781
+ return False
19782
+
19783
+
18689
19784
  def read_text(path: Path) -> dict:
18690
19785
  content = path.read_text(encoding="utf-8", errors="replace").strip()
18691
19786
  return {"kind": "text", "content": content, "metadata": {"filename": path.name}}
@@ -18749,6 +19844,278 @@ def read_csv_file(path: Path) -> dict:
18749
19844
  }
18750
19845
 
18751
19846
 
19847
+ def _strip_html_to_text(raw_html: str) -> str:
19848
+ """Strip HTML to plain text, removing style/script blocks first."""
19849
+ text = re.sub(r"<style[^>]*>[\\s\\S]*?</style>", " ", raw_html, flags=re.IGNORECASE)
19850
+ text = re.sub(r"<script[^>]*>[\\s\\S]*?<\/script>", " ", text, flags=re.IGNORECASE)
19851
+ text = re.sub(r"<br\\s*/?>", "\\n", text, flags=re.IGNORECASE)
19852
+ text = re.sub(r"</(p|div|li|tr|h[1-6])>", "\\n", text, flags=re.IGNORECASE)
19853
+ text = re.sub(r"<[^>]+>", " ", text)
19854
+ text = html.unescape(text)
19855
+ return re.sub(r"\\s+", " ", text).strip()
19856
+
19857
+
19858
+ def read_msg(path: Path) -> dict:
19859
+ import tempfile as _tempfile
19860
+
19861
+ if not _EXTRACT_MSG_AVAILABLE:
19862
+ raise RuntimeError(
19863
+ f"extract-msg is not installed; cannot read {path.name}. "
19864
+ "Install with: pip install extract-msg"
19865
+ )
19866
+
19867
+ try:
19868
+ msg = _extract_msg.openMsg(str(path))
19869
+ except Exception as exc:
19870
+ raise RuntimeError(f"MSG extraction failed for {path.name}: {exc}") from exc
19871
+
19872
+ try:
19873
+ subject = (msg.subject or "").strip() or None
19874
+ sender = (msg.sender or "").strip() or None
19875
+ to = (msg.to or "").strip() or None
19876
+ cc = (msg.cc or "").strip() or None
19877
+ date = str(msg.date).strip() if msg.date else None
19878
+ body = (msg.body or "").strip()
19879
+
19880
+ # Fall back to HTML body stripped to plain text when no plain-text body.
19881
+ if not body:
19882
+ raw_html = getattr(msg, "htmlBody", None) or b""
19883
+ if isinstance(raw_html, bytes):
19884
+ raw_html = raw_html.decode("utf-8", errors="replace")
19885
+ if raw_html:
19886
+ body = _strip_html_to_text(raw_html)
19887
+
19888
+ blocks = []
19889
+ if subject:
19890
+ blocks.append(f"# {subject}")
19891
+ header_lines = []
19892
+ if sender:
19893
+ header_lines.append(f"From: {sender}")
19894
+ if to:
19895
+ header_lines.append(f"To: {to}")
19896
+ if cc:
19897
+ header_lines.append(f"Cc: {cc}")
19898
+ if date:
19899
+ header_lines.append(f"Date: {date}")
19900
+ if header_lines:
19901
+ blocks.append("\\n".join(header_lines))
19902
+ if body:
19903
+ blocks.append(body)
19904
+
19905
+ attachments = list(msg.attachments or [])
19906
+ att_names = []
19907
+ warnings = []
19908
+
19909
+ with _tempfile.TemporaryDirectory() as tmp:
19910
+ tmp_path = Path(tmp)
19911
+ for i, att in enumerate(attachments):
19912
+ att_name = (
19913
+ getattr(att, "longFilename", None)
19914
+ or getattr(att, "shortFilename", None)
19915
+ or ""
19916
+ ).strip() or "attachment"
19917
+ att_names.append(att_name)
19918
+ try:
19919
+ att_path = None
19920
+
19921
+ # Primary: use save() \u2014 handles all attachment types including
19922
+ # OLE embedded objects where .data returns None.
19923
+ att_dir = tmp_path / f"att_{i}"
19924
+ att_dir.mkdir()
19925
+ try:
19926
+ att.save(customPath=str(att_dir))
19927
+ saved = list(att_dir.iterdir())
19928
+ if saved:
19929
+ att_path = saved[0]
19930
+ except Exception:
19931
+ pass
19932
+
19933
+ # Fallback: direct .data bytes.
19934
+ if att_path is None:
19935
+ data = att.data
19936
+ if data is None:
19937
+ warnings.append(f"Attachment '{att_name}' has no data.")
19938
+ continue
19939
+ att_path = att_dir / att_name
19940
+ att_path.write_bytes(data)
19941
+
19942
+ inner = extract(att_path)
19943
+ inner_content = (inner.get("content") or "").strip()
19944
+ if inner_content:
19945
+ blocks.append(f"## Attachment: {att_name}\\n\\n{inner_content}")
19946
+ except Exception as exc:
19947
+ warnings.append(f"Could not extract attachment '{att_name}': {exc}")
19948
+
19949
+ content = join_blocks(blocks)
19950
+ metadata: dict = {"filename": path.name, "attachmentCount": len(attachments)}
19951
+ if subject:
19952
+ metadata["subject"] = subject
19953
+ if sender:
19954
+ metadata["sender"] = sender
19955
+ if to:
19956
+ metadata["to"] = to
19957
+ if cc:
19958
+ metadata["cc"] = cc
19959
+ if date:
19960
+ metadata["date"] = date
19961
+ if att_names:
19962
+ metadata["attachmentNames"] = att_names
19963
+
19964
+ result: dict = {"kind": "msg", "content": content, "metadata": metadata}
19965
+ if warnings:
19966
+ result["warnings"] = warnings
19967
+ return result
19968
+
19969
+ finally:
19970
+ msg.close()
19971
+
19972
+
19973
+ def read_eml(path: Path) -> dict:
19974
+ import tempfile as _tempfile
19975
+
19976
+ raw = path.read_bytes()
19977
+ msg = email.message_from_bytes(raw, policy=email.policy.compat32)
19978
+
19979
+ subject = collapse_whitespace(msg.get("Subject", "") or "")
19980
+ sender = collapse_whitespace(msg.get("From", "") or "")
19981
+ to = collapse_whitespace(msg.get("To", "") or "")
19982
+ cc = collapse_whitespace(msg.get("Cc", "") or "")
19983
+ date = collapse_whitespace(msg.get("Date", "") or "")
19984
+
19985
+ # Walk parts: collect plain-text body and attachments.
19986
+ body_parts = []
19987
+ attachments = [] # list of (filename, bytes)
19988
+
19989
+ for part in msg.walk():
19990
+ content_type = part.get_content_type()
19991
+ disposition = (part.get("Content-Disposition") or "").lower()
19992
+ filename = part.get_filename()
19993
+
19994
+ if filename:
19995
+ try:
19996
+ data = part.get_payload(decode=True)
19997
+ if data:
19998
+ attachments.append((filename, data))
19999
+ except Exception:
20000
+ pass
20001
+ elif content_type == "text/plain" and "attachment" not in disposition:
20002
+ try:
20003
+ charset = part.get_content_charset() or "utf-8"
20004
+ payload = part.get_payload(decode=True)
20005
+ if payload:
20006
+ body_parts.append(payload.decode(charset, errors="replace"))
20007
+ except Exception:
20008
+ pass
20009
+
20010
+ body = "\\n\\n".join(p.strip() for p in body_parts if p.strip())
20011
+
20012
+ blocks = []
20013
+ if subject:
20014
+ blocks.append(f"# {subject}")
20015
+ header_lines = []
20016
+ if sender:
20017
+ header_lines.append(f"From: {sender}")
20018
+ if to:
20019
+ header_lines.append(f"To: {to}")
20020
+ if cc:
20021
+ header_lines.append(f"Cc: {cc}")
20022
+ if date:
20023
+ header_lines.append(f"Date: {date}")
20024
+ if header_lines:
20025
+ blocks.append("\\n".join(header_lines))
20026
+ if body:
20027
+ blocks.append(body)
20028
+
20029
+ att_names = []
20030
+ warnings = []
20031
+
20032
+ with _tempfile.TemporaryDirectory() as tmp:
20033
+ tmp_path = Path(tmp)
20034
+ for att_name, data in attachments:
20035
+ att_names.append(att_name)
20036
+ try:
20037
+ att_path = tmp_path / Path(att_name).name
20038
+ att_path.write_bytes(data)
20039
+ inner = extract(att_path)
20040
+ inner_content = (inner.get("content") or "").strip()
20041
+ if inner_content:
20042
+ blocks.append(f"## Attachment: {att_name}\\n\\n{inner_content}")
20043
+ except Exception as exc:
20044
+ warnings.append(f"Could not extract attachment '{att_name}': {exc}")
20045
+
20046
+ content = join_blocks(blocks)
20047
+ metadata: dict = {"filename": path.name, "attachmentCount": len(attachments)}
20048
+ if subject:
20049
+ metadata["subject"] = subject
20050
+ if sender:
20051
+ metadata["sender"] = sender
20052
+ if to:
20053
+ metadata["to"] = to
20054
+ if cc:
20055
+ metadata["cc"] = cc
20056
+ if date:
20057
+ metadata["date"] = date
20058
+ if att_names:
20059
+ metadata["attachmentNames"] = att_names
20060
+
20061
+ result: dict = {"kind": "eml", "content": content, "metadata": metadata}
20062
+ if warnings:
20063
+ result["warnings"] = warnings
20064
+ return result
20065
+
20066
+
20067
+ def read_zip(path: Path) -> dict:
20068
+ import tempfile as _tempfile
20069
+
20070
+ try:
20071
+ with zipfile.ZipFile(path, "r") as archive:
20072
+ all_names = archive.namelist()
20073
+ except Exception as exc:
20074
+ raise RuntimeError(f"ZIP extraction failed for {path.name}: {exc}") from exc
20075
+
20076
+ # Skip directories and hidden/system files.
20077
+ file_entries = [
20078
+ n for n in all_names
20079
+ if not n.endswith("/") and not Path(n).name.startswith(".")
20080
+ ]
20081
+
20082
+ blocks = []
20083
+ warnings = []
20084
+
20085
+ try:
20086
+ with zipfile.ZipFile(path, "r") as archive:
20087
+ with _tempfile.TemporaryDirectory() as tmp:
20088
+ tmp_path = Path(tmp)
20089
+ for entry in file_entries:
20090
+ entry_name = Path(entry).name
20091
+ if not entry_name:
20092
+ continue
20093
+ try:
20094
+ data = archive.read(entry)
20095
+ entry_path = tmp_path / entry_name
20096
+ entry_path.write_bytes(data)
20097
+ inner = extract(entry_path)
20098
+ inner_content = (inner.get("content") or "").strip()
20099
+ if inner_content:
20100
+ blocks.append(f"## {entry}\\n\\n{inner_content}")
20101
+ except Exception as exc:
20102
+ warnings.append(f"Could not extract '{entry}': {exc}")
20103
+ except Exception as exc:
20104
+ raise RuntimeError(f"ZIP extraction failed for {path.name}: {exc}") from exc
20105
+
20106
+ content = join_blocks(blocks)
20107
+ metadata: dict = {
20108
+ "filename": path.name,
20109
+ "fileCount": len(file_entries),
20110
+ "fileNames": file_entries,
20111
+ }
20112
+
20113
+ result: dict = {"kind": "zip", "content": content, "metadata": metadata}
20114
+ if warnings:
20115
+ result["warnings"] = warnings
20116
+ return result
20117
+
20118
+
18752
20119
  def read_with_markitdown(path: Path, kind: str) -> dict:
18753
20120
  try:
18754
20121
  result = MarkItDown().convert(str(path))
@@ -18756,14 +20123,132 @@ def read_with_markitdown(path: Path, kind: str) -> dict:
18756
20123
  raise RuntimeError(f"MarkItDown extraction failed for {path.name}: {exc}") from exc
18757
20124
 
18758
20125
  content = (getattr(result, "text_content", "") or "").strip()
20126
+ metadata: dict = {"filename": path.name, "parser": "markitdown"}
20127
+
20128
+ # XLSX: surface sheet names so the agent knows the workbook structure upfront.
20129
+ if kind == "xlsx":
20130
+ sheet_names = get_xlsx_sheet_names(path)
20131
+ if sheet_names:
20132
+ metadata["sheetNames"] = sheet_names
20133
+
20134
+ # DOCX: tracked changes flag + author metadata from core properties.
20135
+ if kind == "docx":
20136
+ metadata["hasTrackedChanges"] = has_tracked_changes_docx(path)
20137
+ core = get_office_core_props(path)
20138
+ if core.get("author"):
20139
+ metadata["author"] = core["author"]
20140
+ if core.get("lastModifiedBy"):
20141
+ metadata["lastModifiedBy"] = core["lastModifiedBy"]
20142
+
20143
+ # PPTX: slide count.
20144
+ if kind == "pptx":
20145
+ count = get_pptx_slide_count(path)
20146
+ if count:
20147
+ metadata["slideCount"] = count
20148
+
18759
20149
  return {
18760
20150
  "kind": kind,
18761
20151
  "content": content,
18762
- "metadata": {"filename": path.name, "parser": "markitdown"},
20152
+ "metadata": metadata,
18763
20153
  }
18764
20154
 
18765
20155
 
18766
- def extract(path: Path) -> dict:
20156
+ def read_pdf(path: Path, preview_pages: int = 0) -> dict:
20157
+ # Text extraction via MarkItDown (higher quality than pymupdf for most PDFs).
20158
+ try:
20159
+ md_result = MarkItDown().convert(str(path))
20160
+ content = (getattr(md_result, "text_content", "") or "").strip()
20161
+ except Exception as exc:
20162
+ raise RuntimeError(f"MarkItDown extraction failed for {path.name}: {exc}") from exc
20163
+
20164
+ metadata: dict = {"filename": path.name, "parser": "markitdown"}
20165
+
20166
+ if not _FITZ_AVAILABLE:
20167
+ return {"kind": "pdf", "content": content, "metadata": metadata}
20168
+
20169
+ page_images = []
20170
+
20171
+ try:
20172
+ doc = fitz.open(str(path))
20173
+ page_count = doc.page_count
20174
+ metadata["pageCount"] = page_count
20175
+
20176
+ # Document info dictionary (title, author, etc.)
20177
+ info = doc.metadata or {}
20178
+ for src_key, out_key in [
20179
+ ("title", "title"),
20180
+ ("author", "author"),
20181
+ ("subject", "subject"),
20182
+ ("keywords", "keywords"),
20183
+ ("creator", "creator"),
20184
+ ]:
20185
+ val = (info.get(src_key) or "").strip()
20186
+ if val:
20187
+ metadata[out_key] = val
20188
+
20189
+ created = parse_pdf_date(info.get("creationDate", ""))
20190
+ if created:
20191
+ metadata["createdAt"] = created
20192
+ modified = parse_pdf_date(info.get("modDate", ""))
20193
+ if modified:
20194
+ metadata["modifiedAt"] = modified
20195
+
20196
+ metadata["isEncrypted"] = bool(doc.is_encrypted)
20197
+
20198
+ # Single pass over all pages: signatures, annotations, form fields.
20199
+ sig_widget_type = getattr(fitz, "PDF_WIDGET_TYPE_SIGNATURE", 7)
20200
+ non_content_annot_types = {"Widget", "Link"}
20201
+ has_annotations = False
20202
+ has_form_fields = False
20203
+ signatures = []
20204
+
20205
+ for i in range(page_count):
20206
+ page = doc.load_page(i)
20207
+
20208
+ # Widgets: split into signatures and fillable form fields.
20209
+ for widget in (page.widgets() or []):
20210
+ if widget.field_type == sig_widget_type:
20211
+ signer = (widget.field_value or "").strip() or None
20212
+ signatures.append({
20213
+ "fieldName": (widget.field_name or "").strip() or None,
20214
+ "signer": signer,
20215
+ "isSigned": bool(signer),
20216
+ })
20217
+ else:
20218
+ has_form_fields = True
20219
+
20220
+ # Annotations: exclude widget and link annotations.
20221
+ if not has_annotations:
20222
+ for annot in page.annots():
20223
+ if annot.type[1] not in non_content_annot_types:
20224
+ has_annotations = True
20225
+ break
20226
+
20227
+ metadata["hasAnnotations"] = has_annotations
20228
+ metadata["hasFormFields"] = has_form_fields
20229
+ if signatures:
20230
+ metadata["signatures"] = signatures
20231
+
20232
+ # Page preview images (opt-in via previewPages > 0).
20233
+ if preview_pages > 0:
20234
+ n = min(preview_pages, page_count)
20235
+ for i in range(n):
20236
+ pixmap = doc.load_page(i).get_pixmap(dpi=96)
20237
+ jpeg_bytes = pixmap.tobytes("jpeg")
20238
+ page_images.append(base64.b64encode(jpeg_bytes).decode("ascii"))
20239
+
20240
+ doc.close()
20241
+
20242
+ except Exception:
20243
+ pass # Metadata and image enhancement is best-effort; don't fail the extraction.
20244
+
20245
+ result: dict = {"kind": "pdf", "content": content, "metadata": metadata}
20246
+ if page_images:
20247
+ result["pageImages"] = page_images
20248
+ return result
20249
+
20250
+
20251
+ def extract(path: Path, preview_pages: int = 0) -> dict:
18767
20252
  kind = sniff_kind(path)
18768
20253
  if kind == "txt":
18769
20254
  return read_text(path)
@@ -18775,7 +20260,15 @@ def extract(path: Path) -> dict:
18775
20260
  return read_html(path)
18776
20261
  if kind == "csv":
18777
20262
  return read_csv_file(path)
18778
- if path.suffix.lower() in MARKITDOWN_EXTENSIONS or kind in {"pdf", "doc", "docx", "xls", "xlsx", "ppt", "pptx"}:
20263
+ if kind == "pdf":
20264
+ return read_pdf(path, preview_pages)
20265
+ if kind == "msg":
20266
+ return read_msg(path)
20267
+ if kind == "eml":
20268
+ return read_eml(path)
20269
+ if kind == "zip":
20270
+ return read_zip(path)
20271
+ if path.suffix.lower() in MARKITDOWN_EXTENSIONS or kind in {"doc", "docx", "xls", "xlsx", "ppt", "pptx"}:
18779
20272
  return read_with_markitdown(path, kind)
18780
20273
 
18781
20274
  raw = path.read_text(encoding="utf-8", errors="replace")
@@ -18791,12 +20284,13 @@ def main() -> int:
18791
20284
  parser = argparse.ArgumentParser()
18792
20285
  parser.add_argument("--input", required=True)
18793
20286
  parser.add_argument("--output", required=True)
20287
+ parser.add_argument("--preview-pages", type=int, default=0)
18794
20288
  args = parser.parse_args()
18795
20289
 
18796
20290
  input_path = Path(args.input)
18797
20291
  output_path = Path(args.output)
18798
20292
 
18799
- result = extract(input_path)
20293
+ result = extract(input_path, preview_pages=args.preview_pages)
18800
20294
  output_path.write_text(json.dumps(result, ensure_ascii=False), encoding="utf-8")
18801
20295
  return 0
18802
20296
 
@@ -18804,10 +20298,10 @@ def main() -> int:
18804
20298
  if __name__ == "__main__":
18805
20299
  raise SystemExit(main())
18806
20300
  `;
18807
- const EXTRACT_FILE_PY_HASH = "632e2322c14941f8c30b5f60b4c5bb1d773e0d2953fde39a7a16eaf1dbfc21c2";
20301
+ const EXTRACT_FILE_PY_HASH = "bc00c9206b20fc39f1ab718271dd49ae335f95c0b1683bf393d0b3f1376d8f33";
18808
20302
 
18809
20303
  const execFile$1 = promisify(execFile$2);
18810
- const INSTALL_COMMAND = "pip3 install markitdown[all]";
20304
+ const INSTALL_COMMAND = "pip3 install markitdown[all] pymupdf";
18811
20305
  const DOCKER_HINT = "Run Commandable with Docker to use the preinstalled extraction runtime.";
18812
20306
  let cachedScriptPath = null;
18813
20307
  function ensureExtractorScript() {
@@ -18820,7 +20314,8 @@ function ensureExtractorScript() {
18820
20314
  return path;
18821
20315
  }
18822
20316
  const FILE_PROCESSING_DISABLED_TOOLS = {
18823
- "google-workspace": ["read_file_content"]
20317
+ "google-workspace": ["read_file_content"],
20318
+ sharepoint: ["read_file_content"]
18824
20319
  };
18825
20320
  let capabilityPromise = null;
18826
20321
  function pythonExecutable() {
@@ -19025,14 +20520,19 @@ function createExtractFileContent(getIntegration, defaultIntegrationId) {
19025
20520
  const outputPath = join(tempDir, "result.json");
19026
20521
  const bytes = Buffer.from(await response.arrayBuffer());
19027
20522
  await writeFile$1(filePath, bytes);
19028
- await execFile(pythonExecutable(), [extractorScriptPath(), "--input", filePath, "--output", outputPath], { cwd: tempDir, maxBuffer: 10 * 1024 * 1024 });
20523
+ const pythonArgs = [extractorScriptPath(), "--input", filePath, "--output", outputPath];
20524
+ const previewPages = typeof resolvedArgs.previewPages === "number" && resolvedArgs.previewPages > 0 ? Math.floor(resolvedArgs.previewPages) : 0;
20525
+ if (previewPages > 0)
20526
+ pythonArgs.push("--preview-pages", String(previewPages));
20527
+ await execFile(pythonExecutable(), pythonArgs, { cwd: tempDir, maxBuffer: 50 * 1024 * 1024 });
19029
20528
  const raw = await readFile$1(outputPath, "utf8");
19030
20529
  const parsed = JSON.parse(raw);
19031
20530
  return {
19032
20531
  kind: typeof (parsed == null ? void 0 : parsed.kind) === "string" ? parsed.kind : "unknown",
19033
20532
  content: typeof (parsed == null ? void 0 : parsed.content) === "string" ? parsed.content : "",
19034
20533
  warnings: Array.isArray(parsed == null ? void 0 : parsed.warnings) ? parsed.warnings.map((item) => String(item)) : void 0,
19035
- metadata: (parsed == null ? void 0 : parsed.metadata) && typeof parsed.metadata === "object" ? parsed.metadata : void 0
20534
+ metadata: (parsed == null ? void 0 : parsed.metadata) && typeof parsed.metadata === "object" ? parsed.metadata : void 0,
20535
+ pageImages: Array.isArray(parsed == null ? void 0 : parsed.pageImages) ? parsed.pageImages.filter((v) => typeof v === "string") : void 0
19036
20536
  };
19037
20537
  } catch (error) {
19038
20538
  const stderr = typeof (error == null ? void 0 : error.stderr) === "string" ? error.stderr.trim() : "";
@@ -19157,6 +20657,9 @@ function buildToolsByIntegration(spaceId, integrations, proxy, opts = {}) {
19157
20657
  return toolsByIntegration;
19158
20658
  }
19159
20659
 
20660
+ function isHandlerPreprocess(preprocess) {
20661
+ return typeof preprocess === "object" && preprocess !== null && preprocess.type === "handler" && typeof preprocess.handlerCode === "string";
20662
+ }
19160
20663
  function getBuiltInIntegrationTypeConfig(typeSlug) {
19161
20664
  var _a, _b, _c, _d;
19162
20665
  const variantsFile = loadIntegrationVariants(typeSlug);
@@ -19168,8 +20671,9 @@ function getBuiltInIntegrationTypeConfig(typeSlug) {
19168
20671
  const variants = {};
19169
20672
  for (const [key, variant] of Object.entries(variantsFile.variants)) {
19170
20673
  const preprocess = (_c = variant.preprocess) != null ? _c : null;
19171
- if (preprocess !== null && preprocess !== "google_service_account") {
19172
- throw new Error(`Unsupported preprocess '${preprocess}' for built-in integration '${typeSlug}/${key}'. Only 'google_service_account' is allowed.`);
20674
+ const isSupportedHandler = isHandlerPreprocess(preprocess);
20675
+ if (preprocess !== null && preprocess !== "google_service_account" && !isSupportedHandler) {
20676
+ throw new Error(`Unsupported preprocess for built-in integration '${typeSlug}/${key}'.`);
19173
20677
  }
19174
20678
  variants[key] = {
19175
20679
  label: variant.label,
@@ -19180,7 +20684,11 @@ function getBuiltInIntegrationTypeConfig(typeSlug) {
19180
20684
  allowedOrigins: manifestAllowedOrigins,
19181
20685
  healthCheck: (_d = variant.healthCheck) != null ? _d : null,
19182
20686
  hintMarkdown: loadIntegrationHint(typeSlug, key),
19183
- preprocess
20687
+ preprocess: isSupportedHandler ? {
20688
+ type: "handler",
20689
+ handlerCode: preprocess.handlerCode,
20690
+ allowedOrigins: Array.isArray(preprocess.allowedOrigins) ? [...preprocess.allowedOrigins] : null
20691
+ } : preprocess
19184
20692
  };
19185
20693
  }
19186
20694
  return {
@@ -19283,6 +20791,28 @@ function buildCredentialUrl(integrationId) {
19283
20791
  const port = portRaw && /^\d+$/.test(portRaw) ? Number(portRaw) : 23432;
19284
20792
  return `http://127.0.0.1:${port}/integrations/${encodeURIComponent(integrationId)}`;
19285
20793
  }
20794
+ function decodeJwtPayloadForDebug(token) {
20795
+ try {
20796
+ const parts = token.split(".");
20797
+ if (parts.length < 2 || !parts[1])
20798
+ return null;
20799
+ let b64 = parts[1].replace(/-/g, "+").replace(/_/g, "/");
20800
+ const pad = b64.length % 4 === 0 ? "" : "=".repeat(4 - b64.length % 4);
20801
+ b64 = b64 + pad;
20802
+ const json = Buffer$1.from(b64, "base64").toString("utf8");
20803
+ const payload = JSON.parse(json);
20804
+ return {
20805
+ aud: payload.aud,
20806
+ tid: payload.tid,
20807
+ appid: payload.appid,
20808
+ roles: payload.roles,
20809
+ scp: payload.scp,
20810
+ idtyp: payload.idtyp
20811
+ };
20812
+ } catch {
20813
+ return null;
20814
+ }
20815
+ }
19286
20816
  function isAbsoluteHttpUrl(value) {
19287
20817
  try {
19288
20818
  const url = new URL(value);
@@ -19343,71 +20873,163 @@ function resolveRelativeBaseUrl(provider, baseUrl, rawPath) {
19343
20873
  return "https://slides.googleapis.com/v1";
19344
20874
  return baseUrl;
19345
20875
  }
20876
+ function joinWithoutDuplicateSegments(baseUrl, rawPath) {
20877
+ let pathOnly = rawPath || "";
20878
+ let queryPart = "";
20879
+ const qIndex = pathOnly.indexOf("?");
20880
+ if (qIndex >= 0) {
20881
+ queryPart = pathOnly.slice(qIndex + 1);
20882
+ pathOnly = pathOnly.slice(0, qIndex);
20883
+ }
20884
+ try {
20885
+ const base = new URL(baseUrl);
20886
+ const baseSegs = base.pathname.split("/").filter(Boolean);
20887
+ const pathSegs = (pathOnly || "/").split("/").filter(Boolean);
20888
+ let overlap = 0;
20889
+ const maxK = Math.min(baseSegs.length, pathSegs.length);
20890
+ for (let k = maxK; k >= 1; k--) {
20891
+ let ok = true;
20892
+ for (let i = 0; i < k; i++) {
20893
+ if (baseSegs[baseSegs.length - k + i] !== pathSegs[i]) {
20894
+ ok = false;
20895
+ break;
20896
+ }
20897
+ }
20898
+ if (ok) {
20899
+ overlap = k;
20900
+ break;
20901
+ }
20902
+ }
20903
+ const normalizedPath = `/${[...baseSegs, ...pathSegs.slice(overlap)].join("/")}`;
20904
+ const baseOrigin = base.origin;
20905
+ const urlNoQuery = `${baseOrigin}${normalizedPath}`;
20906
+ return queryPart ? `${urlNoQuery}?${queryPart}` : urlNoQuery;
20907
+ } catch {
20908
+ const cleanedBase = baseUrl.replace(/\/+$/, "");
20909
+ const cleanedPath = `/${(pathOnly || "").replace(/^\/+/, "")}`;
20910
+ const baseParts = cleanedBase.split("/").filter(Boolean);
20911
+ const pathParts = cleanedPath.split("/").filter(Boolean);
20912
+ let overlap = 0;
20913
+ const maxK = Math.min(baseParts.length, pathParts.length);
20914
+ for (let k = maxK; k >= 1; k--) {
20915
+ let ok = true;
20916
+ for (let i = 0; i < k; i++) {
20917
+ if (baseParts[baseParts.length - k + i] !== pathParts[i]) {
20918
+ ok = false;
20919
+ break;
20920
+ }
20921
+ }
20922
+ if (ok) {
20923
+ overlap = k;
20924
+ break;
20925
+ }
20926
+ }
20927
+ const joined = `/${[...baseParts, ...pathParts.slice(overlap)].join("/")}`;
20928
+ return queryPart ? `${joined}?${queryPart}` : joined;
20929
+ }
20930
+ }
20931
+ function stableStringify(value) {
20932
+ if (value === null || typeof value !== "object")
20933
+ return JSON.stringify(value);
20934
+ if (Array.isArray(value))
20935
+ return `[${value.map(stableStringify).join(",")}]`;
20936
+ const entries = Object.entries(value).sort(([a], [b]) => a.localeCompare(b)).map(([key, innerValue]) => `${JSON.stringify(key)}:${stableStringify(innerValue)}`);
20937
+ return `{${entries.join(",")}}`;
20938
+ }
20939
+ const preprocessResultCache = /* @__PURE__ */ new Map();
20940
+ function isHandlerCredentialPreprocess(preprocess) {
20941
+ return typeof preprocess === "object" && preprocess !== null && preprocess.type === "handler" && typeof preprocess.handlerCode === "string";
20942
+ }
20943
+ function getPreprocessCacheKey(provider, variantKey, creds) {
20944
+ return createHash("sha256").update(`${provider}:${variantKey}:${stableStringify(creds)}`).digest("hex");
20945
+ }
20946
+ function getExpiresAtMs(result, now) {
20947
+ var _a;
20948
+ const rawExpiresIn = (_a = result.expiresIn) != null ? _a : result.expires_in;
20949
+ const expiresIn = typeof rawExpiresIn === "number" ? rawExpiresIn : typeof rawExpiresIn === "string" && rawExpiresIn.trim() ? Number(rawExpiresIn) : NaN;
20950
+ if (Number.isFinite(expiresIn) && expiresIn > 0)
20951
+ return now + expiresIn * 1e3;
20952
+ return now + 55 * 6e4;
20953
+ }
20954
+ function normalizeRequestInit(init = {}) {
20955
+ const preparedInit = { ...init };
20956
+ if (preparedInit.body !== void 0 && typeof preparedInit.body !== "string" && !(preparedInit.body instanceof URLSearchParams) && !(preparedInit.body instanceof FormData) && !(preparedInit.body instanceof Blob) && !(preparedInit.body instanceof ArrayBuffer)) {
20957
+ preparedInit.body = JSON.stringify(preparedInit.body);
20958
+ preparedInit.headers = {
20959
+ "Content-Type": "application/json",
20960
+ ...preparedInit.headers
20961
+ };
20962
+ } else if (preparedInit.body instanceof URLSearchParams) {
20963
+ preparedInit.body = preparedInit.body.toString();
20964
+ preparedInit.headers = {
20965
+ "Content-Type": "application/x-www-form-urlencoded",
20966
+ ...preparedInit.headers
20967
+ };
20968
+ }
20969
+ return preparedInit;
20970
+ }
20971
+ async function runSandboxCredentialPreprocess(params) {
20972
+ var _a, _b;
20973
+ const { provider, variantKey, preprocess, creds, baseUrl, allowedOrigins } = params;
20974
+ const cacheKey = getPreprocessCacheKey(provider, variantKey, creds);
20975
+ const existing = preprocessResultCache.get(cacheKey);
20976
+ const now = Date.now();
20977
+ if (existing && existing.expiresAtMs - now > 6e4) {
20978
+ Object.assign(creds, existing.data);
20979
+ return;
20980
+ }
20981
+ const tokenFetch = async (path, init = {}) => {
20982
+ let finalUrl;
20983
+ if (isAbsoluteHttpUrl(path)) {
20984
+ assertAbsoluteUrlIsAllowed(path, baseUrl, allowedOrigins);
20985
+ finalUrl = path;
20986
+ } else {
20987
+ finalUrl = joinWithoutDuplicateSegments(baseUrl, path);
20988
+ }
20989
+ const preparedInit = normalizeRequestInit(init);
20990
+ return await fetch(finalUrl, {
20991
+ ...preparedInit,
20992
+ method: preparedInit.method || "GET"
20993
+ });
20994
+ };
20995
+ const wrapper = `async (input) => {
20996
+ const __inner = ${preprocess.handlerCode};
20997
+ return await __inner(input, utils)
20998
+ }`;
20999
+ const safeHandler = createSafeHandlerFromString(wrapper, () => ({}), { tokenFetch });
21000
+ const res = await safeHandler(creds);
21001
+ if (!res.success)
21002
+ throw new HttpError(400, `Credential preprocess failed for '${provider}': ${String(((_a = res.result) == null ? void 0 : _a.message) || res.result || "Unknown error")}`);
21003
+ const result = res.result;
21004
+ if (!result || typeof result !== "object" || Array.isArray(result))
21005
+ throw new HttpError(400, `Credential preprocess for '${provider}' must return an object.`);
21006
+ if (typeof result.token !== "string" || !result.token.trim())
21007
+ throw new HttpError(400, `Credential preprocess for '${provider}' must return a non-empty 'token' string.`);
21008
+ Object.assign(creds, result);
21009
+ preprocessResultCache.set(cacheKey, {
21010
+ data: { ...result },
21011
+ expiresAtMs: getExpiresAtMs(result, now)
21012
+ });
21013
+ if (provider === "sharepoint") {
21014
+ const t = result == null ? void 0 : result.token;
21015
+ const tokenStr = typeof t === "string" ? t : "";
21016
+ const claims = tokenStr ? decodeJwtPayloadForDebug(tokenStr) : null;
21017
+ const roles = claims == null ? void 0 : claims.roles;
21018
+ const rolesList = Array.isArray(roles) ? roles.map((r) => String(r).slice(0, 80)) : [];
21019
+ fetch("http://127.0.0.1:7886/ingest/d4127044-8bb5-4b15-95f1-be96d51d67ea", { method: "POST", headers: { "Content-Type": "application/json", "X-Debug-Session-Id": "797117" }, body: JSON.stringify({ sessionId: "797117", location: "proxy.ts:runSandboxCredentialPreprocess", message: "sharepoint preprocess ok", data: { variantKey, tokenLen: typeof t === "string" ? t.length : 0, expiresIn: (_b = result == null ? void 0 : result.expiresIn) != null ? _b : result == null ? void 0 : result.expires_in, tokenAud: claims == null ? void 0 : claims.aud, tokenTid: claims == null ? void 0 : claims.tid, tokenAppId: claims == null ? void 0 : claims.appid, tokenIdtyp: claims == null ? void 0 : claims.idtyp, rolesCount: rolesList.length, rolesSample: rolesList.slice(0, 12), hasScp: typeof (claims == null ? void 0 : claims.scp) === "string" && String(claims.scp).length > 0, jwtDecodeOk: !!claims }, timestamp: Date.now(), hypothesisId: "H6" }) }).catch(() => {
21020
+ });
21021
+ }
21022
+ }
19346
21023
  class IntegrationProxy {
19347
21024
  constructor(opts = {}) {
19348
21025
  __publicField$4(this, "opts");
19349
21026
  this.opts = opts;
19350
21027
  }
19351
21028
  async call(integration, path, init = {}) {
19352
- var _a, _b, _c, _d, _e, _f, _g;
21029
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i;
19353
21030
  const { type: provider } = integration;
19354
21031
  if (!provider || !path)
19355
21032
  throw new HttpError(400, "provider and path are required.");
19356
- const joinWithoutDuplicateSegments = (baseUrl, rawPath) => {
19357
- let pathOnly = rawPath || "";
19358
- let queryPart = "";
19359
- const qIndex = pathOnly.indexOf("?");
19360
- if (qIndex >= 0) {
19361
- queryPart = pathOnly.slice(qIndex + 1);
19362
- pathOnly = pathOnly.slice(0, qIndex);
19363
- }
19364
- try {
19365
- const base = new URL(baseUrl);
19366
- const baseSegs = base.pathname.split("/").filter(Boolean);
19367
- const pathSegs = (pathOnly || "/").split("/").filter(Boolean);
19368
- let overlap = 0;
19369
- const maxK = Math.min(baseSegs.length, pathSegs.length);
19370
- for (let k = maxK; k >= 1; k--) {
19371
- let ok = true;
19372
- for (let i = 0; i < k; i++) {
19373
- if (baseSegs[baseSegs.length - k + i] !== pathSegs[i]) {
19374
- ok = false;
19375
- break;
19376
- }
19377
- }
19378
- if (ok) {
19379
- overlap = k;
19380
- break;
19381
- }
19382
- }
19383
- const normalizedPath = `/${[...baseSegs, ...pathSegs.slice(overlap)].join("/")}`;
19384
- const baseOrigin = base.origin;
19385
- const urlNoQuery = `${baseOrigin}${normalizedPath}`;
19386
- return queryPart ? `${urlNoQuery}?${queryPart}` : urlNoQuery;
19387
- } catch {
19388
- const cleanedBase = baseUrl.replace(/\/+$/, "");
19389
- const cleanedPath = `/${(pathOnly || "").replace(/^\/+/, "")}`;
19390
- const baseParts = cleanedBase.split("/").filter(Boolean);
19391
- const pathParts = cleanedPath.split("/").filter(Boolean);
19392
- let overlap = 0;
19393
- const maxK = Math.min(baseParts.length, pathParts.length);
19394
- for (let k = maxK; k >= 1; k--) {
19395
- let ok = true;
19396
- for (let i = 0; i < k; i++) {
19397
- if (baseParts[baseParts.length - k + i] !== pathParts[i]) {
19398
- ok = false;
19399
- break;
19400
- }
19401
- }
19402
- if (ok) {
19403
- overlap = k;
19404
- break;
19405
- }
19406
- }
19407
- const joined = `/${[...baseParts, ...pathParts.slice(overlap)].join("/")}`;
19408
- return queryPart ? `${joined}?${queryPart}` : joined;
19409
- }
19410
- };
19411
21033
  const usesCredentials = integration.connectionMethod === "credentials";
19412
21034
  if (usesCredentials) {
19413
21035
  if (!this.opts.credentialStore)
@@ -19498,6 +21120,18 @@ class IntegrationProxy {
19498
21120
  throw new HttpError(400, `Missing OAuth scopes for Google integration '${provider}'.`);
19499
21121
  const token = await getGoogleAccessToken({ serviceAccountJson, scopes, subject });
19500
21122
  creds.token = token;
21123
+ } else if (isHandlerCredentialPreprocess(typeConfig.preprocess)) {
21124
+ await runSandboxCredentialPreprocess({
21125
+ provider,
21126
+ variantKey,
21127
+ preprocess: typeConfig.preprocess,
21128
+ creds,
21129
+ baseUrl,
21130
+ allowedOrigins: [
21131
+ ...typeConfig.allowedOrigins,
21132
+ ...(_f = typeConfig.preprocess.allowedOrigins) != null ? _f : []
21133
+ ]
21134
+ });
19501
21135
  }
19502
21136
  const resolvedHeaders = {};
19503
21137
  const resolvedQuery = new URLSearchParams();
@@ -19510,9 +21144,9 @@ class IntegrationProxy {
19510
21144
  const token = Buffer$1.from(`${username}:${password}`).toString("base64");
19511
21145
  resolvedHeaders.Authorization = `Basic ${token}`;
19512
21146
  } else {
19513
- for (const [k, v] of Object.entries(((_f = typeConfig.auth.injection) == null ? void 0 : _f.headers) || {}))
21147
+ for (const [k, v] of Object.entries(((_g = typeConfig.auth.injection) == null ? void 0 : _g.headers) || {}))
19514
21148
  resolvedHeaders[k] = resolveTemplate(v);
19515
- for (const [k, v] of Object.entries(((_g = typeConfig.auth.injection) == null ? void 0 : _g.query) || {}))
21149
+ for (const [k, v] of Object.entries(((_h = typeConfig.auth.injection) == null ? void 0 : _h.query) || {}))
19516
21150
  resolvedQuery.set(k, resolveTemplate(v));
19517
21151
  }
19518
21152
  let finalUrl;
@@ -19525,14 +21159,7 @@ class IntegrationProxy {
19525
21159
  const queryString = resolvedQuery.toString();
19526
21160
  if (queryString)
19527
21161
  finalUrl = finalUrl + (finalUrl.includes("?") ? "&" : "?") + queryString;
19528
- const preparedInit = { ...init };
19529
- if (preparedInit.body !== void 0 && typeof preparedInit.body !== "string") {
19530
- preparedInit.body = JSON.stringify(preparedInit.body);
19531
- preparedInit.headers = {
19532
- "Content-Type": "application/json",
19533
- ...preparedInit.headers
19534
- };
19535
- }
21162
+ const preparedInit = normalizeRequestInit(init);
19536
21163
  const redact = (s) => {
19537
21164
  let out = s;
19538
21165
  for (const val of Object.values(creds)) {
@@ -19541,6 +21168,18 @@ class IntegrationProxy {
19541
21168
  }
19542
21169
  return out;
19543
21170
  };
21171
+ if (provider === "sharepoint") {
21172
+ const auth = resolvedHeaders.Authorization;
21173
+ const bodyString = typeof preparedInit.body === "string" ? preparedInit.body : "";
21174
+ fetch("http://127.0.0.1:7886/ingest/d4127044-8bb5-4b15-95f1-be96d51d67ea", { method: "POST", headers: { "Content-Type": "application/json", "X-Debug-Session-Id": "797117" }, body: JSON.stringify({ sessionId: "797117", location: "proxy.ts:before-fetch", message: "sharepoint outgoing", data: { method: preparedInit.method || "GET", pathPreview: String(path).slice(0, 200), finalUrlHost: (() => {
21175
+ try {
21176
+ return new URL(finalUrl).host;
21177
+ } catch {
21178
+ return "invalid-url";
21179
+ }
21180
+ })(), hasAuthHeader: !!auth, authPrefix: auth ? String(auth).slice(0, 8) : "", tokenFieldLen: typeof creds.token === "string" ? creds.token.length : 0, bodyPreview: bodyString.slice(0, 300), bodyHasRegion: bodyString.includes('"region"'), isSearchQuery: String(path).includes("/search/query"), isRegionLookup: String(path).includes("siteCollection/root ne null") }, timestamp: Date.now(), hypothesisId: String(path).includes("/search/query") || String(path).includes("siteCollection/root ne null") ? "H9" : "H1" }) }).catch(() => {
21181
+ });
21182
+ }
19544
21183
  const response = await fetch(finalUrl, {
19545
21184
  ...preparedInit,
19546
21185
  method: preparedInit.method || "GET",
@@ -19556,6 +21195,23 @@ class IntegrationProxy {
19556
21195
  bodyText = contentType.includes("json") ? JSON.stringify(await response.json()) : await response.text();
19557
21196
  } catch {
19558
21197
  }
21198
+ if (provider === "sharepoint") {
21199
+ const auth = resolvedHeaders.Authorization;
21200
+ let graphErrorCode = "";
21201
+ try {
21202
+ const parsed = JSON.parse(bodyText);
21203
+ graphErrorCode = String(((_i = parsed == null ? void 0 : parsed.error) == null ? void 0 : _i.code) || (parsed == null ? void 0 : parsed.error) || "");
21204
+ } catch {
21205
+ }
21206
+ fetch("http://127.0.0.1:7886/ingest/d4127044-8bb5-4b15-95f1-be96d51d67ea", { method: "POST", headers: { "Content-Type": "application/json", "X-Debug-Session-Id": "797117" }, body: JSON.stringify({ sessionId: "797117", location: "proxy.ts:graph-error", message: "sharepoint graph non-ok", data: { status: response.status, pathPreview: String(path).slice(0, 160), finalUrlHost: (() => {
21207
+ try {
21208
+ return new URL(finalUrl).host;
21209
+ } catch {
21210
+ return "invalid-url";
21211
+ }
21212
+ })(), hasAuthHeader: !!auth, authPrefix: auth ? String(auth).slice(0, 8) : "", graphErrorCode: graphErrorCode.slice(0, 80), bodyPreview: bodyText.slice(0, 220) }, timestamp: Date.now(), hypothesisId: "H2" }) }).catch(() => {
21213
+ });
21214
+ }
19559
21215
  const hint = getErrorHint(response.status, provider, bodyText);
19560
21216
  const hintSuffix = hint ? ` ${hint}` : "";
19561
21217
  const credentialUrl = buildCredentialUrl(integration.id);
@@ -20835,7 +22491,7 @@ ${lines ? `${lines}
20835
22491
  ` : "No integrations configured yet.\n"}`;
20836
22492
  }
20837
22493
  try {
20838
- return loadIntegrationPrompt(ability.integrationtype);
22494
+ return loadIntegrationUsageGuide(ability.integrationtype);
20839
22495
  } catch {
20840
22496
  return null;
20841
22497
  }
@@ -21435,6 +23091,32 @@ function formatAsText(value) {
21435
23091
  return String(value);
21436
23092
  }
21437
23093
  }
23094
+ function renderArtifact(artifact) {
23095
+ if (artifact.type === "image" && artifact.data) {
23096
+ return [{
23097
+ type: "image",
23098
+ data: artifact.data,
23099
+ mimeType: artifact.mimeType
23100
+ }];
23101
+ }
23102
+ if (artifact.type === "image" && artifact.url) {
23103
+ return [{
23104
+ type: "text",
23105
+ text: `Image artifact URL (${artifact.mimeType}): ${artifact.url}`
23106
+ }];
23107
+ }
23108
+ return [];
23109
+ }
23110
+ function buildToolSuccessContent(res) {
23111
+ var _a;
23112
+ const artifacts = getHoistedArtifacts(res);
23113
+ return [
23114
+ { type: "text", text: formatAsText(res.result) },
23115
+ ...artifacts.flatMap(renderArtifact),
23116
+ ...((_a = res.logs) == null ? void 0 : _a.length) ? [{ type: "text", text: `Logs:
23117
+ ${res.logs.join("\n")}` }] : []
23118
+ ];
23119
+ }
21438
23120
  function registerToolHandlers(server, tools, options = { mode: "static" }) {
21439
23121
  const metaToolDefs = getMetaToolDefinitions();
21440
23122
  const { mode, dynamicMode } = options;
@@ -21452,7 +23134,7 @@ function registerToolHandlers(server, tools, options = { mode: "static" }) {
21452
23134
  return { tools: [...metaToolDefs, ...toolDefs] };
21453
23135
  });
21454
23136
  server.setRequestHandler(CallToolRequestSchema, async (req, extra) => {
21455
- var _a, _b, _c, _d, _e;
23137
+ var _a, _b, _c;
21456
23138
  const name = req.params.name;
21457
23139
  const args = (_a = req.params.arguments) != null ? _a : {};
21458
23140
  const sessionId = extra == null ? void 0 : extra.sessionId;
@@ -21490,11 +23172,7 @@ ${res2.logs.join("\n")}` }] : []
21490
23172
  };
21491
23173
  }
21492
23174
  return {
21493
- content: [
21494
- { type: "text", text: formatAsText(res2.result) },
21495
- ...((_c = res2.logs) == null ? void 0 : _c.length) ? [{ type: "text", text: `Logs:
21496
- ${res2.logs.join("\n")}` }] : []
21497
- ]
23175
+ content: buildToolSuccessContent(res2)
21498
23176
  };
21499
23177
  }
21500
23178
  const staticMeta = handleStaticReadmeCall(name);
@@ -21511,17 +23189,13 @@ ${res2.logs.join("\n")}` }] : []
21511
23189
  return {
21512
23190
  content: [
21513
23191
  { type: "text", text: `Tool error: ${formatAsText(res.result)}` },
21514
- ...((_d = res.logs) == null ? void 0 : _d.length) ? [{ type: "text", text: `Logs:
23192
+ ...((_c = res.logs) == null ? void 0 : _c.length) ? [{ type: "text", text: `Logs:
21515
23193
  ${res.logs.join("\n")}` }] : []
21516
23194
  ]
21517
23195
  };
21518
23196
  }
21519
23197
  return {
21520
- content: [
21521
- { type: "text", text: formatAsText(res.result) },
21522
- ...((_e = res.logs) == null ? void 0 : _e.length) ? [{ type: "text", text: `Logs:
21523
- ${res.logs.join("\n")}` }] : []
21524
- ]
23198
+ content: buildToolSuccessContent(res)
21525
23199
  };
21526
23200
  });
21527
23201
  }
@@ -21941,140 +23615,140 @@ const assets = {
21941
23615
  "/favicon.ico": {
21942
23616
  "type": "image/vnd.microsoft.icon",
21943
23617
  "etag": "\"10be-n8egyE9tcb7sKGr/pYCaQ4uWqxI\"",
21944
- "mtime": "2026-04-05T18:35:56.190Z",
23618
+ "mtime": "2026-04-12T18:58:38.513Z",
21945
23619
  "size": 4286,
21946
23620
  "path": "../public/favicon.ico"
21947
23621
  },
21948
23622
  "/_fonts/57NSSoFy1VLVs2gqly8Ls9awBnZMFyXGrefpmqvdqmc-zJfbBtpgM4cDmcXBsqZNW79_kFnlpPd62b48glgdydA.woff2": {
21949
23623
  "type": "font/woff2",
21950
23624
  "etag": "\"4b5c-TAo9mx7r3xQs52+HbHcHJ52z8Qo\"",
21951
- "mtime": "2026-04-05T18:35:56.183Z",
23625
+ "mtime": "2026-04-12T18:58:38.507Z",
21952
23626
  "size": 19292,
21953
23627
  "path": "../public/_fonts/57NSSoFy1VLVs2gqly8Ls9awBnZMFyXGrefpmqvdqmc-zJfbBtpgM4cDmcXBsqZNW79_kFnlpPd62b48glgdydA.woff2"
21954
23628
  },
21955
23629
  "/_fonts/8VR2wSMN-3U4NbWAVYXlkRV6hA0jFBXP-0RtL3X7fko-x2gYI4qfmkRdxyQQUPaBZdZdgl1TeVrquF_TxHeM4lM.woff2": {
21956
23630
  "type": "font/woff2",
21957
23631
  "etag": "\"212c-FshXJibFzNhd2HEIMP8C3JR5PYg\"",
21958
- "mtime": "2026-04-05T18:35:56.183Z",
23632
+ "mtime": "2026-04-12T18:58:38.507Z",
21959
23633
  "size": 8492,
21960
23634
  "path": "../public/_fonts/8VR2wSMN-3U4NbWAVYXlkRV6hA0jFBXP-0RtL3X7fko-x2gYI4qfmkRdxyQQUPaBZdZdgl1TeVrquF_TxHeM4lM.woff2"
21961
23635
  },
21962
23636
  "/_fonts/GsKUclqeNLJ96g5AU593ug6yanivOiwjW_7zESNPChw-jHA4tBeM1bjF7LATGUpfBuSTyomIFrWBTzjF7txVYfg.woff2": {
21963
23637
  "type": "font/woff2",
21964
23638
  "etag": "\"680c-mJtsV33lkTAKSmfq5k3lKHSllcU\"",
21965
- "mtime": "2026-04-05T18:35:56.183Z",
23639
+ "mtime": "2026-04-12T18:58:38.507Z",
21966
23640
  "size": 26636,
21967
23641
  "path": "../public/_fonts/GsKUclqeNLJ96g5AU593ug6yanivOiwjW_7zESNPChw-jHA4tBeM1bjF7LATGUpfBuSTyomIFrWBTzjF7txVYfg.woff2"
21968
23642
  },
21969
23643
  "/_fonts/Ld1FnTo3yTIwDyGfTQ5-Fws9AWsCbKfMvgxduXr7JcY-W25bL8NF1fjpLRSOgJb7RoZPHqGQNwMTM7S9tHVoxx8.woff2": {
21970
23644
  "type": "font/woff2",
21971
23645
  "etag": "\"6ec4-8OoFFPZKF1grqmfGVjh5JDE6DOU\"",
21972
- "mtime": "2026-04-05T18:35:56.184Z",
23646
+ "mtime": "2026-04-12T18:58:38.507Z",
21973
23647
  "size": 28356,
21974
23648
  "path": "../public/_fonts/Ld1FnTo3yTIwDyGfTQ5-Fws9AWsCbKfMvgxduXr7JcY-W25bL8NF1fjpLRSOgJb7RoZPHqGQNwMTM7S9tHVoxx8.woff2"
21975
23649
  },
21976
23650
  "/_fonts/NdzqRASp2bovDUhQT1IRE_EMqKJ2KYQdTCfFcBvL8yw-KhwZiS86o3fErOe5GGMExHUemmI_dBfaEFxjISZrBd0.woff2": {
21977
23651
  "type": "font/woff2",
21978
23652
  "etag": "\"1d98-cDZfMibtk4T04FTTAmlfhWDpkN0\"",
21979
- "mtime": "2026-04-05T18:35:56.183Z",
23653
+ "mtime": "2026-04-12T18:58:38.507Z",
21980
23654
  "size": 7576,
21981
23655
  "path": "../public/_fonts/NdzqRASp2bovDUhQT1IRE_EMqKJ2KYQdTCfFcBvL8yw-KhwZiS86o3fErOe5GGMExHUemmI_dBfaEFxjISZrBd0.woff2"
21982
23656
  },
21983
23657
  "/_fonts/iTkrULNFJJkTvihIg1Vqi5IODRH_9btXCioVF5l98I8-AndUyau2HR2felA_ra8V2mutQgschhasE5FD1dXGJX8.woff2": {
21984
23658
  "type": "font/woff2",
21985
23659
  "etag": "\"47c4-5xyngHnzzhetUee74tMx9OTgqNQ\"",
21986
- "mtime": "2026-04-05T18:35:56.184Z",
23660
+ "mtime": "2026-04-12T18:58:38.507Z",
21987
23661
  "size": 18372,
21988
23662
  "path": "../public/_fonts/iTkrULNFJJkTvihIg1Vqi5IODRH_9btXCioVF5l98I8-AndUyau2HR2felA_ra8V2mutQgschhasE5FD1dXGJX8.woff2"
21989
23663
  },
21990
23664
  "/_nuxt/BD6mASiY.js": {
21991
23665
  "type": "text/javascript; charset=utf-8",
21992
23666
  "etag": "\"ab-ScyLcA/4r5aOxEv1YY+kqXazCHI\"",
21993
- "mtime": "2026-04-05T18:35:56.186Z",
23667
+ "mtime": "2026-04-12T18:58:38.509Z",
21994
23668
  "size": 171,
21995
23669
  "path": "../public/_nuxt/BD6mASiY.js"
21996
23670
  },
21997
- "/_nuxt/CjAs3eBq.js": {
21998
- "type": "text/javascript; charset=utf-8",
21999
- "etag": "\"1df7-cTFKdH9K34T9NixeUm/CLQ8lWUc\"",
22000
- "mtime": "2026-04-05T18:35:56.186Z",
22001
- "size": 7671,
22002
- "path": "../public/_nuxt/CjAs3eBq.js"
22003
- },
22004
23671
  "/_nuxt/D9wFDhac.js": {
22005
23672
  "type": "text/javascript; charset=utf-8",
22006
23673
  "etag": "\"e99-sUFV1wmMOK2XGfzDXJyP2NA8TG4\"",
22007
- "mtime": "2026-04-05T18:35:56.186Z",
23674
+ "mtime": "2026-04-12T18:58:38.510Z",
22008
23675
  "size": 3737,
22009
23676
  "path": "../public/_nuxt/D9wFDhac.js"
22010
23677
  },
23678
+ "/_nuxt/CjAs3eBq.js": {
23679
+ "type": "text/javascript; charset=utf-8",
23680
+ "etag": "\"1df7-cTFKdH9K34T9NixeUm/CLQ8lWUc\"",
23681
+ "mtime": "2026-04-12T18:58:38.510Z",
23682
+ "size": 7671,
23683
+ "path": "../public/_nuxt/CjAs3eBq.js"
23684
+ },
22011
23685
  "/_nuxt/DSWYWRXT.js": {
22012
23686
  "type": "text/javascript; charset=utf-8",
22013
23687
  "etag": "\"10875-8b+YwIvP6QkcBFnHXqxd+WeZ05o\"",
22014
- "mtime": "2026-04-05T18:35:56.186Z",
23688
+ "mtime": "2026-04-12T18:58:38.510Z",
22015
23689
  "size": 67701,
22016
23690
  "path": "../public/_nuxt/DSWYWRXT.js"
22017
23691
  },
22018
23692
  "/_nuxt/DRfk9W3W.js": {
22019
23693
  "type": "text/javascript; charset=utf-8",
22020
23694
  "etag": "\"194dc-Oj5Ixz12+pq4yqDtF/N+YAPzoWw\"",
22021
- "mtime": "2026-04-05T18:35:56.186Z",
23695
+ "mtime": "2026-04-12T18:58:38.510Z",
22022
23696
  "size": 103644,
22023
23697
  "path": "../public/_nuxt/DRfk9W3W.js"
22024
23698
  },
22025
23699
  "/_nuxt/VvnbcAzZ.js": {
22026
23700
  "type": "text/javascript; charset=utf-8",
22027
23701
  "etag": "\"d7b-hU4O5jppM7Ou3kZAYy3iYXlgoa8\"",
22028
- "mtime": "2026-04-05T18:35:56.186Z",
23702
+ "mtime": "2026-04-12T18:58:38.510Z",
22029
23703
  "size": 3451,
22030
23704
  "path": "../public/_nuxt/VvnbcAzZ.js"
22031
23705
  },
22032
23706
  "/_nuxt/_id_.DhlLK-mY.css": {
22033
23707
  "type": "text/css; charset=utf-8",
22034
23708
  "etag": "\"2f4-xtV37kE566jU74wpZnFHA29RoAY\"",
22035
- "mtime": "2026-04-05T18:35:56.186Z",
23709
+ "mtime": "2026-04-12T18:58:38.510Z",
22036
23710
  "size": 756,
22037
23711
  "path": "../public/_nuxt/_id_.DhlLK-mY.css"
22038
23712
  },
22039
23713
  "/_nuxt/error-404.C7fg894-.css": {
22040
23714
  "type": "text/css; charset=utf-8",
22041
23715
  "etag": "\"97e-fiQ3o7A11L9BuXRBr0GJldkx0AU\"",
22042
- "mtime": "2026-04-05T18:35:56.187Z",
23716
+ "mtime": "2026-04-12T18:58:38.510Z",
22043
23717
  "size": 2430,
22044
23718
  "path": "../public/_nuxt/error-404.C7fg894-.css"
22045
23719
  },
22046
23720
  "/_nuxt/error-500.DjUK_N2Y.css": {
22047
23721
  "type": "text/css; charset=utf-8",
22048
23722
  "etag": "\"773-Qf61bSDos4KtmZDaA06FmZyUYNo\"",
22049
- "mtime": "2026-04-05T18:35:56.187Z",
23723
+ "mtime": "2026-04-12T18:58:38.510Z",
22050
23724
  "size": 1907,
22051
23725
  "path": "../public/_nuxt/error-500.DjUK_N2Y.css"
22052
23726
  },
22053
23727
  "/_nuxt/builds/latest.json": {
22054
23728
  "type": "application/json",
22055
- "etag": "\"47-GZLcjWZaQqnb3MWtaAhgJPSJq0w\"",
22056
- "mtime": "2026-04-05T18:35:56.182Z",
23729
+ "etag": "\"47-e/86GxbuxL65vE0JCcC0T3jGAp4\"",
23730
+ "mtime": "2026-04-12T18:58:38.505Z",
22057
23731
  "size": 71,
22058
23732
  "path": "../public/_nuxt/builds/latest.json"
22059
23733
  },
22060
- "/_nuxt/builds/meta/0857a55b-f766-4fe6-86be-9bd9d857861a.json": {
23734
+ "/_nuxt/builds/meta/886deef4-f3b5-464c-b4e2-11735eb5272e.json": {
22061
23735
  "type": "application/json",
22062
- "etag": "\"58-/dM+mpaUFnmvLLmrgwHgZBuTtQg\"",
22063
- "mtime": "2026-04-05T18:35:56.180Z",
23736
+ "etag": "\"58-BEsLMe80cnxppLc/ZshfoAR3oqs\"",
23737
+ "mtime": "2026-04-12T18:58:38.503Z",
22064
23738
  "size": 88,
22065
- "path": "../public/_nuxt/builds/meta/0857a55b-f766-4fe6-86be-9bd9d857861a.json"
23739
+ "path": "../public/_nuxt/builds/meta/886deef4-f3b5-464c-b4e2-11735eb5272e.json"
22066
23740
  },
22067
23741
  "/_nuxt/BUmYUDQu.js": {
22068
23742
  "type": "text/javascript; charset=utf-8",
22069
23743
  "etag": "\"66cba-d/pdEXVc78H3VlgFN3kVzKpvD1Q\"",
22070
- "mtime": "2026-04-05T18:35:56.186Z",
23744
+ "mtime": "2026-04-12T18:58:38.510Z",
22071
23745
  "size": 421050,
22072
23746
  "path": "../public/_nuxt/BUmYUDQu.js"
22073
23747
  },
22074
23748
  "/_nuxt/entry.Y3mA4bzA.css": {
22075
23749
  "type": "text/css; charset=utf-8",
22076
23750
  "etag": "\"2d46b-zfrD3Ny9WW6qm4fCXAfX5eIAxPA\"",
22077
- "mtime": "2026-04-05T18:35:56.187Z",
23751
+ "mtime": "2026-04-12T18:58:38.510Z",
22078
23752
  "size": 185451,
22079
23753
  "path": "../public/_nuxt/entry.Y3mA4bzA.css"
22080
23754
  }