@bubblelab/bubble-core 0.1.65 → 0.1.67

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. package/dist/bubble-bundle.d.ts +104 -104
  2. package/dist/bubbles/service-bubble/agi-inc.d.ts +88 -88
  3. package/dist/bubbles/service-bubble/ai-agent.d.ts +117 -96
  4. package/dist/bubbles/service-bubble/ai-agent.d.ts.map +1 -1
  5. package/dist/bubbles/service-bubble/ai-agent.js +131 -0
  6. package/dist/bubbles/service-bubble/ai-agent.js.map +1 -1
  7. package/dist/bubbles/service-bubble/airtable.d.ts +118 -118
  8. package/dist/bubbles/service-bubble/apify/apify.d.ts +26 -26
  9. package/dist/bubbles/service-bubble/ashby/ashby.d.ts +89 -68
  10. package/dist/bubbles/service-bubble/ashby/ashby.d.ts.map +1 -1
  11. package/dist/bubbles/service-bubble/ashby/ashby.js +96 -0
  12. package/dist/bubbles/service-bubble/ashby/ashby.js.map +1 -1
  13. package/dist/bubbles/service-bubble/ashby/ashby.schema.d.ts +108 -102
  14. package/dist/bubbles/service-bubble/ashby/ashby.schema.d.ts.map +1 -1
  15. package/dist/bubbles/service-bubble/ashby/ashby.schema.js +10 -1
  16. package/dist/bubbles/service-bubble/ashby/ashby.schema.js.map +1 -1
  17. package/dist/bubbles/service-bubble/browserbase/browserbase.d.ts +26 -26
  18. package/dist/bubbles/service-bubble/browserbase/browserbase.schema.d.ts +2 -2
  19. package/dist/bubbles/service-bubble/crustdata/crustdata.d.ts +130 -130
  20. package/dist/bubbles/service-bubble/crustdata/crustdata.schema.d.ts +16 -16
  21. package/dist/bubbles/service-bubble/eleven-labs.d.ts +28 -28
  22. package/dist/bubbles/service-bubble/firecrawl.d.ts +1001 -1001
  23. package/dist/bubbles/service-bubble/followupboss.d.ts +144 -144
  24. package/dist/bubbles/service-bubble/fullenrich/fullenrich.d.ts +56 -56
  25. package/dist/bubbles/service-bubble/github.d.ts +176 -176
  26. package/dist/bubbles/service-bubble/gmail.d.ts +240 -240
  27. package/dist/bubbles/service-bubble/google-calendar.d.ts +138 -138
  28. package/dist/bubbles/service-bubble/google-drive.d.ts +76 -76
  29. package/dist/bubbles/service-bubble/google-sheets/google-sheets.d.ts +32 -32
  30. package/dist/bubbles/service-bubble/hello-world.d.ts +8 -8
  31. package/dist/bubbles/service-bubble/http.d.ts +12 -12
  32. package/dist/bubbles/service-bubble/insforge-db.d.ts +16 -16
  33. package/dist/bubbles/service-bubble/jira/jira.d.ts +54 -54
  34. package/dist/bubbles/service-bubble/notion/notion.d.ts +2089 -2089
  35. package/dist/bubbles/service-bubble/postgresql.d.ts +16 -16
  36. package/dist/bubbles/service-bubble/resend.d.ts +28 -28
  37. package/dist/bubbles/service-bubble/slack/slack.d.ts +420 -420
  38. package/dist/bubbles/service-bubble/storage.d.ts +20 -20
  39. package/dist/bubbles/service-bubble/stripe/stripe.d.ts +107 -107
  40. package/dist/bubbles/service-bubble/stripe/stripe.schema.d.ts +4 -4
  41. package/dist/bubbles/service-bubble/telegram.d.ts +1562 -1562
  42. package/dist/bubbles/tool-bubble/amazon-shopping-tool/amazon-shopping-tool.d.ts +15 -15
  43. package/dist/bubbles/tool-bubble/amazon-shopping-tool/amazon-shopping-tool.schema.d.ts +2 -2
  44. package/dist/bubbles/tool-bubble/bubbleflow-validation-tool.d.ts +20 -20
  45. package/dist/bubbles/tool-bubble/chart-js-tool.d.ts +18 -18
  46. package/dist/bubbles/tool-bubble/code-edit-tool.d.ts +8 -8
  47. package/dist/bubbles/tool-bubble/company-enrichment-tool.d.ts +70 -70
  48. package/dist/bubbles/tool-bubble/get-bubble-details-tool.d.ts +4 -4
  49. package/dist/bubbles/tool-bubble/get-trigger-detail-tool.d.ts +4 -4
  50. package/dist/bubbles/tool-bubble/google-maps-tool.d.ts +4 -4
  51. package/dist/bubbles/tool-bubble/instagram-tool.d.ts +14 -14
  52. package/dist/bubbles/tool-bubble/linkedin-connection-tool/linkedin-connection-tool.d.ts +2 -2
  53. package/dist/bubbles/tool-bubble/linkedin-tool.d.ts +326 -326
  54. package/dist/bubbles/tool-bubble/list-bubbles-tool.d.ts +4 -4
  55. package/dist/bubbles/tool-bubble/people-search-tool.d.ts +112 -112
  56. package/dist/bubbles/tool-bubble/reddit-scrape-tool.d.ts +24 -24
  57. package/dist/bubbles/tool-bubble/research-agent-tool.d.ts +10 -10
  58. package/dist/bubbles/tool-bubble/sql-query-tool.d.ts +8 -8
  59. package/dist/bubbles/tool-bubble/tiktok-tool.d.ts +76 -76
  60. package/dist/bubbles/tool-bubble/tool-template.d.ts +4 -4
  61. package/dist/bubbles/tool-bubble/twitter-tool.d.ts +160 -160
  62. package/dist/bubbles/tool-bubble/web-crawl-tool.d.ts +22 -22
  63. package/dist/bubbles/tool-bubble/web-extract-tool.d.ts +8 -8
  64. package/dist/bubbles/tool-bubble/web-scrape-tool.d.ts +8 -8
  65. package/dist/bubbles/tool-bubble/web-search-tool.d.ts +8 -8
  66. package/dist/bubbles/tool-bubble/youtube-tool.d.ts +28 -28
  67. package/dist/bubbles/workflow-bubble/database-analyzer.workflow.d.ts +4 -4
  68. package/dist/bubbles/workflow-bubble/generate-document.workflow.d.ts +50 -50
  69. package/dist/bubbles/workflow-bubble/parse-document.workflow.d.ts +18 -18
  70. package/dist/bubbles/workflow-bubble/pdf-form-operations.workflow.d.ts +72 -72
  71. package/dist/bubbles/workflow-bubble/pdf-ocr.workflow.d.ts +36 -36
  72. package/dist/bubbles/workflow-bubble/slack-data-assistant.workflow.d.ts +36 -36
  73. package/dist/bubbles/workflow-bubble/slack-formatter-agent.d.ts +98 -98
  74. package/dist/bubbles/workflow-bubble/slack-notifier.workflow.d.ts +18 -18
  75. package/dist/bubbles.json +59 -22
  76. package/package.json +2 -2
package/dist/bubbles.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "version": "2.0.0",
3
- "generatedAt": "2026-01-30T08:29:08.214Z",
3
+ "generatedAt": "2026-01-31T21:56:39.458Z",
4
4
  "totalCount": 58,
5
5
  "bubbles": [
6
6
  {
@@ -68,7 +68,7 @@
68
68
  "type": "service",
69
69
  "shortDescription": "AI agent with LangGraph for tool-enabled conversations, multimodal support, and JSON mode",
70
70
  "useCase": "- Add tools to enhance the AI agent's capabilities (web-search-tool, web-scrape-tool)",
71
- "outputSchema": "{\n response: string // The AI agents final response to the user message. For text responses, returns plain text. If JSON mode is enabled, returns a JSON string. For image generation models (like gemini-2.5-flash-image-preview), returns base64-encoded image data with data URI format (data:image/png;base64,...),\n toolCalls: { tool: string // Name of the tool that was called, input: unknown // Input parameters passed to the tool, output: unknown // Output returned by the tool }[] // Array of tool calls made during the conversation,\n iterations: number // Number of back-and-forth iterations in the agent workflow,\n error: string // Error message of the run, undefined if successful,\n success: boolean // Whether the agent execution completed successfully\n}",
71
+ "outputSchema": "{\n response: string // The AI agents final response to the user message. For text responses, returns plain text. If JSON mode is enabled, returns a JSON string. For image generation models (like gemini-2.5-flash-image-preview), returns base64-encoded image data with data URI format (data:image/png;base64,...),\n reasoning: string | undefined // The reasoning/thinking tokens from the model (if available). Present for deep research models and reasoning models.,\n toolCalls: { tool: string // Name of the tool that was called, input: unknown // Input parameters passed to the tool, output: unknown // Output returned by the tool }[] // Array of tool calls made during the conversation,\n iterations: number // Number of back-and-forth iterations in the agent workflow,\n totalCost: number | undefined // Total cost in USD for this request (includes tokens + web search for deep research models),\n error: string // Error message of the run, undefined if successful,\n success: boolean // Whether the agent execution completed successfully\n}",
72
72
  "inputJsonSchema": {
73
73
  "type": "object",
74
74
  "properties": {
@@ -210,7 +210,9 @@
210
210
  "openrouter/anthropic/claude-sonnet-4.5",
211
211
  "openrouter/google/gemini-3-pro-preview",
212
212
  "openrouter/morph/morph-v3-large",
213
- "openrouter/openai/gpt-oss-120b"
213
+ "openrouter/openai/gpt-oss-120b",
214
+ "openrouter/openai/o3-deep-research",
215
+ "openrouter/openai/o4-mini-deep-research"
214
216
  ],
215
217
  "description": "AI model to use (format: provider/model-name)."
216
218
  },
@@ -281,7 +283,9 @@
281
283
  "openrouter/anthropic/claude-sonnet-4.5",
282
284
  "openrouter/google/gemini-3-pro-preview",
283
285
  "openrouter/morph/morph-v3-large",
284
- "openrouter/openai/gpt-oss-120b"
286
+ "openrouter/openai/gpt-oss-120b",
287
+ "openrouter/openai/o3-deep-research",
288
+ "openrouter/openai/o4-mini-deep-research"
285
289
  ],
286
290
  "description": "Backup AI model to use if the primary model fails (format: provider/model-name)."
287
291
  },
@@ -467,6 +471,10 @@
467
471
  "type": "string",
468
472
  "description": "The AI agents final response to the user message. For text responses, returns plain text. If JSON mode is enabled, returns a JSON string. For image generation models (like gemini-2.5-flash-image-preview), returns base64-encoded image data with data URI format (data:image/png;base64,...)"
469
473
  },
474
+ "reasoning": {
475
+ "type": "string",
476
+ "description": "The reasoning/thinking tokens from the model (if available). Present for deep research models and reasoning models."
477
+ },
470
478
  "toolCalls": {
471
479
  "type": "array",
472
480
  "items": {
@@ -494,6 +502,10 @@
494
502
  "type": "number",
495
503
  "description": "Number of back-and-forth iterations in the agent workflow"
496
504
  },
505
+ "totalCost": {
506
+ "type": "number",
507
+ "description": "Total cost in USD for this request (includes tokens + web search for deep research models)"
508
+ },
497
509
  "error": {
498
510
  "type": "string",
499
511
  "description": "Error message of the run, undefined if successful"
@@ -512,7 +524,7 @@
512
524
  ],
513
525
  "additionalProperties": false
514
526
  },
515
- "usageExample": "// Example usage of ai-agent bubble\nconst aiAgent = new AIAgentBubble({\n message: \"example string\", // The message or question to send to the AI agent,\n images: [{ type: \"base64\" // default, data: \"example string\", mimeType: \"image/png\" // default, description: \"example string\" }] // example for array, // Array of base64 encoded images to include with the message (for multimodal AI models). Example: [{type: \"base64\", data: \"base64...\", mimeType: \"image/png\", description: \"A beautiful image of a cat\"}] or [{type: \"url\", url: \"https://example.com/image.png\", description: \"A beautiful image of a cat\"}],\n conversationHistory: [{ role: \"user\" // options: \"user\", \"assistant\", \"tool\" // The role of the message sender, content: \"example string\" // The message content, toolCallId: \"example string\" // Tool call ID for tool messages, name: \"example string\" // Tool name for tool messages }], // Previous conversation messages for multi-turn conversations. When provided, messages are sent as separate turns to enable KV cache optimization. Format: [{role: \"user\", content: \"...\"}, {role: \"assistant\", content: \"...\"}, ...],\n systemPrompt: \"You are a helpful AI assistant\" // default, // System prompt that defines the AI agents behavior and personality,\n name: \"AI Agent\" // default, // A friendly name for the AI agent,\n model: { model: \"openai/gpt-5\" // options: \"openai/gpt-5\", \"openai/gpt-5-mini\", \"openai/gpt-5.1\", \"openai/gpt-5.2\", \"google/gemini-2.5-pro\", \"google/gemini-2.5-flash\", \"google/gemini-2.5-flash-lite\", \"google/gemini-2.5-flash-image-preview\", \"google/gemini-3-pro-preview\", \"google/gemini-3-pro-image-preview\", \"google/gemini-3-flash-preview\", \"anthropic/claude-sonnet-4-5\", \"anthropic/claude-opus-4-5\", \"anthropic/claude-haiku-4-5\", \"openrouter/x-ai/grok-code-fast-1\", \"openrouter/z-ai/glm-4.6\", \"openrouter/anthropic/claude-sonnet-4.5\", \"openrouter/google/gemini-3-pro-preview\", \"openrouter/morph/morph-v3-large\", \"openrouter/openai/gpt-oss-120b\" // AI model to use (format: provider/model-name)., temperature: 1 // default // Temperature for response randomness (0 = deterministic, 2 = very random), maxTokens: 64000 // default // Maximum number of tokens to generate in response, keep at default of 40000 unless the response is expected to be certain length, reasoningEffort: \"low\" // options: \"low\", \"medium\", \"high\" // Reasoning effort for model. If not specified, uses primary model reasoningEffort., maxRetries: 3 // default // Maximum number of retries for API calls (default: 3). Useful for handling transient errors like 503 Service Unavailable., provider: [\"example string\"] // Providers for ai agent (open router only)., jsonMode: false // default // When true, returns clean JSON response, you must provide the exact JSON schema in the system prompt, backupModel: { model: \"openai/gpt-5\" // options: \"openai/gpt-5\", \"openai/gpt-5-mini\", \"openai/gpt-5.1\", \"openai/gpt-5.2\", \"google/gemini-2.5-pro\", \"google/gemini-2.5-flash\", \"google/gemini-2.5-flash-lite\", \"google/gemini-2.5-flash-image-preview\", \"google/gemini-3-pro-preview\", \"google/gemini-3-pro-image-preview\", \"google/gemini-3-flash-preview\", \"anthropic/claude-sonnet-4-5\", \"anthropic/claude-opus-4-5\", \"anthropic/claude-haiku-4-5\", \"openrouter/x-ai/grok-code-fast-1\", \"openrouter/z-ai/glm-4.6\", \"openrouter/anthropic/claude-sonnet-4.5\", \"openrouter/google/gemini-3-pro-preview\", \"openrouter/morph/morph-v3-large\", \"openrouter/openai/gpt-oss-120b\" // Backup AI model to use if the primary model fails (format: provider/model-name)., temperature: 42 // Temperature for backup model. If not specified, uses primary model temperature., maxTokens: 42 // Max tokens for backup model. If not specified, uses primary model maxTokens., reasoningEffort: \"low\" // options: \"low\", \"medium\", \"high\" // Reasoning effort for backup model. If not specified, uses primary model reasoningEffort., maxRetries: 42 // Max retries for backup model. If not specified, uses primary model maxRetries. } // structure // Backup model configuration to use if the primary model fails. } // structure, // AI model configuration including provider, temperature, and tokens, retries, and json mode. Always include this.,\n tools: [{ name: \"web-search-tool\" // options: \"web-search-tool\", \"web-scrape-tool\", \"web-crawl-tool\", \"web-extract-tool\", \"research-agent-tool\", \"reddit-scrape-tool\", \"instagram-tool\", \"list-bubbles-tool\", \"get-bubble-details-tool\", \"get-trigger-detail-tool\", \"bubbleflow-validation-tool\", \"code-edit-tool\", \"chart-js-tool\", \"amazon-shopping-tool\", \"linkedin-tool\", \"tiktok-tool\", \"twitter-tool\", \"google-maps-tool\", \"youtube-tool\", \"people-search-tool\", \"sql-query-tool\" // Name of the tool type or tool bubble to enable for the AI agent, config: {} // Configuration for the tool or tool bubble }] // example for array, // Array of pre-registered tools the AI agent can use. Can be tool types (web-search-tool, web-scrape-tool, web-crawl-tool, web-extract-tool, instagram-tool). If using image models, set the tools to [],\n customTools: [{ name: \"example string\" // Unique name for your custom tool (e.g., \"calculate-tax\"), description: \"example string\" // Description of what the tool does - helps the AI know when to use it, schema: {} // Zod schema object defining the tool parameters. Can be either a plain object (e.g., { amount: z.number() }) or a Zod object directly (e.g., z.object({ amount: z.number() })). }] // example for array, // Array of custom runtime-defined tools with their own schemas and functions. Use this to add domain-specific tools without pre-registration. Example: [{ name: \"calculate-tax\", description: \"Calculates sales tax\", schema: { amount: z.number() }, func: async (input) => {...} }],\n maxIterations: 40 // default, // Maximum number of iterations for the agent workflow, 5 iterations per turn of conversation,\n streaming: false // default, // Enable real-time streaming of tokens, tool calls, and iteration progress,\n expectedOutputSchema: \"example string\", // Zod schema or JSON schema string that defines the expected structure of the AI response. When provided, automatically enables JSON mode and instructs the AI to output in the exact format. Example: z.object({ summary: z.string(), items: z.array(z.object({ name: z.string(), score: z.number() })) }),\n});\n\nconst result = await aiAgent.action();\n\n// Always check success status before using data\nif (!result.success) {\n throw new Error(`${metadata.name} failed: ${result.error}`);\n}\n\n// outputSchema for result.data:\n// {\n// response: string // The AI agents final response to the user message. For text responses, returns plain text. If JSON mode is enabled, returns a JSON string. For image generation models (like gemini-2.5-flash-image-preview), returns base64-encoded image data with data URI format (data:image/png;base64,...),\n// toolCalls: { tool: string // Name of the tool that was called, input: unknown // Input parameters passed to the tool, output: unknown // Output returned by the tool }[] // Array of tool calls made during the conversation,\n// iterations: number // Number of back-and-forth iterations in the agent workflow,\n// error: string // Error message of the run, undefined if successful,\n// success: boolean // Whether the agent execution completed successfully\n// }\n\n// Access the actual data\nconst actualData = result.data;\nconsole.log(actualData);",
527
+ "usageExample": "// Example usage of ai-agent bubble\nconst aiAgent = new AIAgentBubble({\n message: \"example string\", // The message or question to send to the AI agent,\n images: [{ type: \"base64\" // default, data: \"example string\", mimeType: \"image/png\" // default, description: \"example string\" }] // example for array, // Array of base64 encoded images to include with the message (for multimodal AI models). Example: [{type: \"base64\", data: \"base64...\", mimeType: \"image/png\", description: \"A beautiful image of a cat\"}] or [{type: \"url\", url: \"https://example.com/image.png\", description: \"A beautiful image of a cat\"}],\n conversationHistory: [{ role: \"user\" // options: \"user\", \"assistant\", \"tool\" // The role of the message sender, content: \"example string\" // The message content, toolCallId: \"example string\" // Tool call ID for tool messages, name: \"example string\" // Tool name for tool messages }], // Previous conversation messages for multi-turn conversations. When provided, messages are sent as separate turns to enable KV cache optimization. Format: [{role: \"user\", content: \"...\"}, {role: \"assistant\", content: \"...\"}, ...],\n systemPrompt: \"You are a helpful AI assistant\" // default, // System prompt that defines the AI agents behavior and personality,\n name: \"AI Agent\" // default, // A friendly name for the AI agent,\n model: { model: \"openai/gpt-5\" // options: \"openai/gpt-5\", \"openai/gpt-5-mini\", \"openai/gpt-5.1\", \"openai/gpt-5.2\", \"google/gemini-2.5-pro\", \"google/gemini-2.5-flash\", \"google/gemini-2.5-flash-lite\", \"google/gemini-2.5-flash-image-preview\", \"google/gemini-3-pro-preview\", \"google/gemini-3-pro-image-preview\", \"google/gemini-3-flash-preview\", \"anthropic/claude-sonnet-4-5\", \"anthropic/claude-opus-4-5\", \"anthropic/claude-haiku-4-5\", \"openrouter/x-ai/grok-code-fast-1\", \"openrouter/z-ai/glm-4.6\", \"openrouter/anthropic/claude-sonnet-4.5\", \"openrouter/google/gemini-3-pro-preview\", \"openrouter/morph/morph-v3-large\", \"openrouter/openai/gpt-oss-120b\", \"openrouter/openai/o3-deep-research\", \"openrouter/openai/o4-mini-deep-research\" // AI model to use (format: provider/model-name)., temperature: 1 // default // Temperature for response randomness (0 = deterministic, 2 = very random), maxTokens: 64000 // default // Maximum number of tokens to generate in response, keep at default of 40000 unless the response is expected to be certain length, reasoningEffort: \"low\" // options: \"low\", \"medium\", \"high\" // Reasoning effort for model. If not specified, uses primary model reasoningEffort., maxRetries: 3 // default // Maximum number of retries for API calls (default: 3). Useful for handling transient errors like 503 Service Unavailable., provider: [\"example string\"] // Providers for ai agent (open router only)., jsonMode: false // default // When true, returns clean JSON response, you must provide the exact JSON schema in the system prompt, backupModel: { model: \"openai/gpt-5\" // options: \"openai/gpt-5\", \"openai/gpt-5-mini\", \"openai/gpt-5.1\", \"openai/gpt-5.2\", \"google/gemini-2.5-pro\", \"google/gemini-2.5-flash\", \"google/gemini-2.5-flash-lite\", \"google/gemini-2.5-flash-image-preview\", \"google/gemini-3-pro-preview\", \"google/gemini-3-pro-image-preview\", \"google/gemini-3-flash-preview\", \"anthropic/claude-sonnet-4-5\", \"anthropic/claude-opus-4-5\", \"anthropic/claude-haiku-4-5\", \"openrouter/x-ai/grok-code-fast-1\", \"openrouter/z-ai/glm-4.6\", \"openrouter/anthropic/claude-sonnet-4.5\", \"openrouter/google/gemini-3-pro-preview\", \"openrouter/morph/morph-v3-large\", \"openrouter/openai/gpt-oss-120b\", \"openrouter/openai/o3-deep-research\", \"openrouter/openai/o4-mini-deep-research\" // Backup AI model to use if the primary model fails (format: provider/model-name)., temperature: 42 // Temperature for backup model. If not specified, uses primary model temperature., maxTokens: 42 // Max tokens for backup model. If not specified, uses primary model maxTokens., reasoningEffort: \"low\" // options: \"low\", \"medium\", \"high\" // Reasoning effort for backup model. If not specified, uses primary model reasoningEffort., maxRetries: 42 // Max retries for backup model. If not specified, uses primary model maxRetries. } // structure // Backup model configuration to use if the primary model fails. } // structure, // AI model configuration including provider, temperature, and tokens, retries, and json mode. Always include this.,\n tools: [{ name: \"web-search-tool\" // options: \"web-search-tool\", \"web-scrape-tool\", \"web-crawl-tool\", \"web-extract-tool\", \"research-agent-tool\", \"reddit-scrape-tool\", \"instagram-tool\", \"list-bubbles-tool\", \"get-bubble-details-tool\", \"get-trigger-detail-tool\", \"bubbleflow-validation-tool\", \"code-edit-tool\", \"chart-js-tool\", \"amazon-shopping-tool\", \"linkedin-tool\", \"tiktok-tool\", \"twitter-tool\", \"google-maps-tool\", \"youtube-tool\", \"people-search-tool\", \"sql-query-tool\" // Name of the tool type or tool bubble to enable for the AI agent, config: {} // Configuration for the tool or tool bubble }] // example for array, // Array of pre-registered tools the AI agent can use. Can be tool types (web-search-tool, web-scrape-tool, web-crawl-tool, web-extract-tool, instagram-tool). If using image models, set the tools to [],\n customTools: [{ name: \"example string\" // Unique name for your custom tool (e.g., \"calculate-tax\"), description: \"example string\" // Description of what the tool does - helps the AI know when to use it, schema: {} // Zod schema object defining the tool parameters. Can be either a plain object (e.g., { amount: z.number() }) or a Zod object directly (e.g., z.object({ amount: z.number() })). }] // example for array, // Array of custom runtime-defined tools with their own schemas and functions. Use this to add domain-specific tools without pre-registration. Example: [{ name: \"calculate-tax\", description: \"Calculates sales tax\", schema: { amount: z.number() }, func: async (input) => {...} }],\n maxIterations: 40 // default, // Maximum number of iterations for the agent workflow, 5 iterations per turn of conversation,\n streaming: false // default, // Enable real-time streaming of tokens, tool calls, and iteration progress,\n expectedOutputSchema: \"example string\", // Zod schema or JSON schema string that defines the expected structure of the AI response. When provided, automatically enables JSON mode and instructs the AI to output in the exact format. Example: z.object({ summary: z.string(), items: z.array(z.object({ name: z.string(), score: z.number() })) }),\n});\n\nconst result = await aiAgent.action();\n\n// Always check success status before using data\nif (!result.success) {\n throw new Error(`${metadata.name} failed: ${result.error}`);\n}\n\n// outputSchema for result.data:\n// {\n// response: string // The AI agents final response to the user message. For text responses, returns plain text. If JSON mode is enabled, returns a JSON string. For image generation models (like gemini-2.5-flash-image-preview), returns base64-encoded image data with data URI format (data:image/png;base64,...),\n// reasoning: string | undefined // The reasoning/thinking tokens from the model (if available). Present for deep research models and reasoning models.,\n// toolCalls: { tool: string // Name of the tool that was called, input: unknown // Input parameters passed to the tool, output: unknown // Output returned by the tool }[] // Array of tool calls made during the conversation,\n// iterations: number // Number of back-and-forth iterations in the agent workflow,\n// totalCost: number | undefined // Total cost in USD for this request (includes tokens + web search for deep research models),\n// error: string // Error message of the run, undefined if successful,\n// success: boolean // Whether the agent execution completed successfully\n// }\n\n// Access the actual data\nconst actualData = result.data;\nconsole.log(actualData);",
516
528
  "requiredCredentials": [
517
529
  "OPENAI_CRED",
518
530
  "GOOGLE_GEMINI_CRED",
@@ -30806,7 +30818,9 @@
30806
30818
  "openrouter/anthropic/claude-sonnet-4.5",
30807
30819
  "openrouter/google/gemini-3-pro-preview",
30808
30820
  "openrouter/morph/morph-v3-large",
30809
- "openrouter/openai/gpt-oss-120b"
30821
+ "openrouter/openai/gpt-oss-120b",
30822
+ "openrouter/openai/o3-deep-research",
30823
+ "openrouter/openai/o4-mini-deep-research"
30810
30824
  ],
30811
30825
  "default": "google/gemini-2.5-flash"
30812
30826
  },
@@ -30891,7 +30905,7 @@
30891
30905
  ],
30892
30906
  "additionalProperties": false
30893
30907
  },
30894
- "usageExample": "// Example usage of slack-notifier bubble\nconst slackNotifier = new SlackNotifierWorkflowBubble({\n contentToFormat: \"example string\", // Raw content or data to format for Slack,\n originalUserQuery: \"example string\", // Original user question or context,\n targetChannel: \"example string\", // Slack channel name (without #) or channel ID,\n messageTitle: \"example string\", // Custom title for the Slack message,\n messageStyle: \"professional\" // options: \"professional\", \"casual\", \"technical\", \"concise\", \"detailed\", // Style and tone for message formatting,\n includeFormatting: true // default, // Include emojis and rich Slack formatting,\n maxMessageLength: 3000 // default, // Maximum message length for Slack,\n aiModel: { model: \"openai/gpt-5\" // options: \"openai/gpt-5\", \"openai/gpt-5-mini\", \"openai/gpt-5.1\", \"openai/gpt-5.2\", \"google/gemini-2.5-pro\", \"google/gemini-2.5-flash\", \"google/gemini-2.5-flash-lite\", \"google/gemini-2.5-flash-image-preview\", \"google/gemini-3-pro-preview\", \"google/gemini-3-pro-image-preview\", \"google/gemini-3-flash-preview\", \"anthropic/claude-sonnet-4-5\", \"anthropic/claude-opus-4-5\", \"anthropic/claude-haiku-4-5\", \"openrouter/x-ai/grok-code-fast-1\", \"openrouter/z-ai/glm-4.6\", \"openrouter/anthropic/claude-sonnet-4.5\", \"openrouter/google/gemini-3-pro-preview\", \"openrouter/morph/morph-v3-large\", \"openrouter/openai/gpt-oss-120b\", temperature: 0.3 // default, maxTokens: 50000 // default }, // AI model settings for content formatting,\n});\n\nconst result = await slackNotifier.action();\n\n// Always check success status before using data\nif (!result.success) {\n throw new Error(`${metadata.name} failed: ${result.error}`);\n}\n\n// outputSchema for result.data:\n// {\n// success: boolean,\n// error: string,\n// messageInfo: { messageTimestamp: string | undefined, channelId: string | undefined, channelName: string | undefined, formattedMessage: string | undefined, messageLength: number | undefined } | undefined,\n// formattingInfo: { modelUsed: string | undefined, wasTruncated: boolean, originalLength: number | undefined } | undefined\n// }\n\n// Access the actual data\nconst actualData = result.data;\nconsole.log(actualData);",
30908
+ "usageExample": "// Example usage of slack-notifier bubble\nconst slackNotifier = new SlackNotifierWorkflowBubble({\n contentToFormat: \"example string\", // Raw content or data to format for Slack,\n originalUserQuery: \"example string\", // Original user question or context,\n targetChannel: \"example string\", // Slack channel name (without #) or channel ID,\n messageTitle: \"example string\", // Custom title for the Slack message,\n messageStyle: \"professional\" // options: \"professional\", \"casual\", \"technical\", \"concise\", \"detailed\", // Style and tone for message formatting,\n includeFormatting: true // default, // Include emojis and rich Slack formatting,\n maxMessageLength: 3000 // default, // Maximum message length for Slack,\n aiModel: { model: \"openai/gpt-5\" // options: \"openai/gpt-5\", \"openai/gpt-5-mini\", \"openai/gpt-5.1\", \"openai/gpt-5.2\", \"google/gemini-2.5-pro\", \"google/gemini-2.5-flash\", \"google/gemini-2.5-flash-lite\", \"google/gemini-2.5-flash-image-preview\", \"google/gemini-3-pro-preview\", \"google/gemini-3-pro-image-preview\", \"google/gemini-3-flash-preview\", \"anthropic/claude-sonnet-4-5\", \"anthropic/claude-opus-4-5\", \"anthropic/claude-haiku-4-5\", \"openrouter/x-ai/grok-code-fast-1\", \"openrouter/z-ai/glm-4.6\", \"openrouter/anthropic/claude-sonnet-4.5\", \"openrouter/google/gemini-3-pro-preview\", \"openrouter/morph/morph-v3-large\", \"openrouter/openai/gpt-oss-120b\", \"openrouter/openai/o3-deep-research\", \"openrouter/openai/o4-mini-deep-research\", temperature: 0.3 // default, maxTokens: 50000 // default }, // AI model settings for content formatting,\n});\n\nconst result = await slackNotifier.action();\n\n// Always check success status before using data\nif (!result.success) {\n throw new Error(`${metadata.name} failed: ${result.error}`);\n}\n\n// outputSchema for result.data:\n// {\n// success: boolean,\n// error: string,\n// messageInfo: { messageTimestamp: string | undefined, channelId: string | undefined, channelName: string | undefined, formattedMessage: string | undefined, messageLength: number | undefined } | undefined,\n// formattingInfo: { modelUsed: string | undefined, wasTruncated: boolean, originalLength: number | undefined } | undefined\n// }\n\n// Access the actual data\nconst actualData = result.data;\nconsole.log(actualData);",
30895
30909
  "requiredCredentials": [
30896
30910
  "SLACK_CRED",
30897
30911
  "OPENAI_CRED",
@@ -30975,7 +30989,9 @@
30975
30989
  "openrouter/anthropic/claude-sonnet-4.5",
30976
30990
  "openrouter/google/gemini-3-pro-preview",
30977
30991
  "openrouter/morph/morph-v3-large",
30978
- "openrouter/openai/gpt-oss-120b"
30992
+ "openrouter/openai/gpt-oss-120b",
30993
+ "openrouter/openai/o3-deep-research",
30994
+ "openrouter/openai/o4-mini-deep-research"
30979
30995
  ],
30980
30996
  "default": "google/gemini-2.5-flash",
30981
30997
  "description": "AI model to use for query generation"
@@ -31144,7 +31160,7 @@
31144
31160
  ],
31145
31161
  "additionalProperties": false
31146
31162
  },
31147
- "usageExample": "// Example usage of slack-data-assistant bubble\nconst slackDataAssistant = new SlackDataAssistantWorkflow({\n slackChannel: \"example string\", // Slack channel ID where the bot will respond,\n slackThreadTs: \"example string\", // Thread timestamp if replying to a thread,\n userQuestion: \"example string\", // The user question from Slack,\n userName: \"example string\", // Name of the user asking the question,\n name: \"Data Assistant\" // default, // Name of the AI assistant (e.g., \"DataBot\", \"Analytics Assistant\"),\n dataSourceType: \"postgresql\" // options: \"postgresql\", \"mysql\", \"sqlite\", \"mariadb\", \"mssql\", // Type of database to analyze,\n databaseUrl: \"example string\", // Database connection URL (if not using credentials),\n ignoreSSLErrors: false // default, // Ignore SSL certificate errors for database connection,\n aiModel: \"openai/gpt-5\" // options: \"openai/gpt-5\", \"openai/gpt-5-mini\", \"openai/gpt-5.1\", \"openai/gpt-5.2\", \"google/gemini-2.5-pro\", \"google/gemini-2.5-flash\", \"google/gemini-2.5-flash-lite\", \"google/gemini-2.5-flash-image-preview\", \"google/gemini-3-pro-preview\", \"google/gemini-3-pro-image-preview\", \"google/gemini-3-flash-preview\", \"anthropic/claude-sonnet-4-5\", \"anthropic/claude-opus-4-5\", \"anthropic/claude-haiku-4-5\", \"openrouter/x-ai/grok-code-fast-1\", \"openrouter/z-ai/glm-4.6\", \"openrouter/anthropic/claude-sonnet-4.5\", \"openrouter/google/gemini-3-pro-preview\", \"openrouter/morph/morph-v3-large\", \"openrouter/openai/gpt-oss-120b\", // AI model to use for query generation,\n temperature: 0.3 // default, // Temperature for AI responses (lower = more focused),\n verbosity: \"1\" // options: \"1\", \"2\", \"3\", \"4\", \"5\", // Response verbosity level (1=concise, 5=comprehensive),\n technicality: \"1\" // options: \"1\", \"2\", \"3\", \"4\", \"5\", // Technical complexity level (1=plain English, 5=expert),\n includeQuery: true // default, // Include the SQL query in the response,\n includeExplanation: true // default, // Include query explanation in the response,\n injectedMetadata: { tables: { \"example_key\": { \"example_key\": \"example string\" } }, tableNotes: { \"example_key\": \"example string\" }, rules: [\"example string\"] }, // Additional database context injected from user credentials metadata,\n additionalContext: \"example string\", // Additional context about how to answer the question,\n maxQueries: 20 // default, // Maximum number of queries to run,\n});\n\nconst result = await slackDataAssistant.action();\n\n// Always check success status before using data\nif (!result.success) {\n throw new Error(`${metadata.name} failed: ${result.error}`);\n}\n\n// outputSchema for result.data:\n// {\n// success: boolean // Whether the workflow completed successfully,\n// error: string // Error message if workflow failed,\n// query: string | undefined // Generated SQL query,\n// queryExplanation: string | undefined // Explanation of the query,\n// queryResults: Record<string, unknown>[] | undefined // Results from the database query,\n// formattedResponse: string | undefined // Formatted response for Slack,\n// slackBlocks: unknown[] | undefined // Slack block kit formatted message,\n// slackMessageTs: string | undefined // Timestamp of sent Slack message,\n// isDataQuestion: boolean | undefined // Whether the question was data-related,\n// metadata: { executionTime: number // Total execution time in milliseconds, rowCount: number | undefined // Number of rows returned, wordCount: number | undefined // Word count of response } | undefined\n// }\n\n// Access the actual data\nconst actualData = result.data;\nconsole.log(actualData);",
31163
+ "usageExample": "// Example usage of slack-data-assistant bubble\nconst slackDataAssistant = new SlackDataAssistantWorkflow({\n slackChannel: \"example string\", // Slack channel ID where the bot will respond,\n slackThreadTs: \"example string\", // Thread timestamp if replying to a thread,\n userQuestion: \"example string\", // The user question from Slack,\n userName: \"example string\", // Name of the user asking the question,\n name: \"Data Assistant\" // default, // Name of the AI assistant (e.g., \"DataBot\", \"Analytics Assistant\"),\n dataSourceType: \"postgresql\" // options: \"postgresql\", \"mysql\", \"sqlite\", \"mariadb\", \"mssql\", // Type of database to analyze,\n databaseUrl: \"example string\", // Database connection URL (if not using credentials),\n ignoreSSLErrors: false // default, // Ignore SSL certificate errors for database connection,\n aiModel: \"openai/gpt-5\" // options: \"openai/gpt-5\", \"openai/gpt-5-mini\", \"openai/gpt-5.1\", \"openai/gpt-5.2\", \"google/gemini-2.5-pro\", \"google/gemini-2.5-flash\", \"google/gemini-2.5-flash-lite\", \"google/gemini-2.5-flash-image-preview\", \"google/gemini-3-pro-preview\", \"google/gemini-3-pro-image-preview\", \"google/gemini-3-flash-preview\", \"anthropic/claude-sonnet-4-5\", \"anthropic/claude-opus-4-5\", \"anthropic/claude-haiku-4-5\", \"openrouter/x-ai/grok-code-fast-1\", \"openrouter/z-ai/glm-4.6\", \"openrouter/anthropic/claude-sonnet-4.5\", \"openrouter/google/gemini-3-pro-preview\", \"openrouter/morph/morph-v3-large\", \"openrouter/openai/gpt-oss-120b\", \"openrouter/openai/o3-deep-research\", \"openrouter/openai/o4-mini-deep-research\", // AI model to use for query generation,\n temperature: 0.3 // default, // Temperature for AI responses (lower = more focused),\n verbosity: \"1\" // options: \"1\", \"2\", \"3\", \"4\", \"5\", // Response verbosity level (1=concise, 5=comprehensive),\n technicality: \"1\" // options: \"1\", \"2\", \"3\", \"4\", \"5\", // Technical complexity level (1=plain English, 5=expert),\n includeQuery: true // default, // Include the SQL query in the response,\n includeExplanation: true // default, // Include query explanation in the response,\n injectedMetadata: { tables: { \"example_key\": { \"example_key\": \"example string\" } }, tableNotes: { \"example_key\": \"example string\" }, rules: [\"example string\"] }, // Additional database context injected from user credentials metadata,\n additionalContext: \"example string\", // Additional context about how to answer the question,\n maxQueries: 20 // default, // Maximum number of queries to run,\n});\n\nconst result = await slackDataAssistant.action();\n\n// Always check success status before using data\nif (!result.success) {\n throw new Error(`${metadata.name} failed: ${result.error}`);\n}\n\n// outputSchema for result.data:\n// {\n// success: boolean // Whether the workflow completed successfully,\n// error: string // Error message if workflow failed,\n// query: string | undefined // Generated SQL query,\n// queryExplanation: string | undefined // Explanation of the query,\n// queryResults: Record<string, unknown>[] | undefined // Results from the database query,\n// formattedResponse: string | undefined // Formatted response for Slack,\n// slackBlocks: unknown[] | undefined // Slack block kit formatted message,\n// slackMessageTs: string | undefined // Timestamp of sent Slack message,\n// isDataQuestion: boolean | undefined // Whether the question was data-related,\n// metadata: { executionTime: number // Total execution time in milliseconds, rowCount: number | undefined // Number of rows returned, wordCount: number | undefined // Word count of response } | undefined\n// }\n\n// Access the actual data\nconst actualData = result.data;\nconsole.log(actualData);",
31148
31164
  "requiredCredentials": [
31149
31165
  "DATABASE_CRED",
31150
31166
  "SLACK_CRED",
@@ -31232,7 +31248,9 @@
31232
31248
  "openrouter/anthropic/claude-sonnet-4.5",
31233
31249
  "openrouter/google/gemini-3-pro-preview",
31234
31250
  "openrouter/morph/morph-v3-large",
31235
- "openrouter/openai/gpt-oss-120b"
31251
+ "openrouter/openai/gpt-oss-120b",
31252
+ "openrouter/openai/o3-deep-research",
31253
+ "openrouter/openai/o4-mini-deep-research"
31236
31254
  ],
31237
31255
  "default": "google/gemini-3-flash-preview",
31238
31256
  "description": "AI model to use (format: provider/model-name)"
@@ -31543,7 +31561,7 @@
31543
31561
  ],
31544
31562
  "additionalProperties": false
31545
31563
  },
31546
- "usageExample": "// Example usage of slack-formatter-agent bubble\nconst slackFormatterAgent = new SlackFormatterAgentBubble({\n message: \"example string\", // The message or question to send to the AI agent,\n verbosity: \"1\" // options: \"1\", \"2\", \"3\", \"4\", \"5\", // Response verbosity level (1-5): 1=concise bullet points, 5=comprehensive explanations,\n technicality: \"1\" // options: \"1\", \"2\", \"3\", \"4\", \"5\", // Technical complexity level (1-5): 1=plain English, 5=expert terminology,\n includeBlockKit: true // default, // Include Slack Block Kit JSON for rich formatting,\n includeQuery: false // default, // Include the query that was executed in the response,\n includeExplanation: false // default, // Include explanation of what the query does and why it was chosen,\n model: { model: \"openai/gpt-5\" // options: \"openai/gpt-5\", \"openai/gpt-5-mini\", \"openai/gpt-5.1\", \"openai/gpt-5.2\", \"google/gemini-2.5-pro\", \"google/gemini-2.5-flash\", \"google/gemini-2.5-flash-lite\", \"google/gemini-2.5-flash-image-preview\", \"google/gemini-3-pro-preview\", \"google/gemini-3-pro-image-preview\", \"google/gemini-3-flash-preview\", \"anthropic/claude-sonnet-4-5\", \"anthropic/claude-opus-4-5\", \"anthropic/claude-haiku-4-5\", \"openrouter/x-ai/grok-code-fast-1\", \"openrouter/z-ai/glm-4.6\", \"openrouter/anthropic/claude-sonnet-4.5\", \"openrouter/google/gemini-3-pro-preview\", \"openrouter/morph/morph-v3-large\", \"openrouter/openai/gpt-oss-120b\" // AI model to use (format: provider/model-name), temperature: 0.7 // default // Temperature for response randomness (0 = deterministic, 2 = very random), maxTokens: 10000 // default // Maximum number of tokens to generate in response } // structure, // AI model configuration including provider, temperature, and tokens,\n tools: [{ name: \"example string\" // Name of the tool bubble to enable for the AI agent, config: {} // Configuration for the tool bubble }] // example for array, // Array of tool bubbles the AI agent can use,\n maxIterations: 10 // default, // Maximum number of iterations for the agent workflow,\n additionalContext: \"example string\", // Additional context about how to answer the question,\n});\n\nconst result = await slackFormatterAgent.action();\n\n// Always check success status before using data\nif (!result.success) {\n throw new Error(`${metadata.name} failed: ${result.error}`);\n}\n\n// outputSchema for result.data:\n// {\n// response: string // The AI agents formatted response in Slack markdown,\n// blocks: { type: \"section\" | \"header\" | \"divider\" | \"context\" | \"actions\" | \"input\" | \"file\" | \"image\", text: { type: \"plain_text\" | \"mrkdwn\", text: string, emoji: boolean | undefined, verbatim: boolean | undefined } | undefined, block_id: string | undefined, accessory: unknown | undefined, fields: { type: \"plain_text\" | \"mrkdwn\", text: string, emoji: boolean | undefined, verbatim: boolean | undefined }[] | undefined, element: unknown | undefined, label: unknown | undefined, hint: unknown | undefined, optional: boolean | undefined, alt_text: string | undefined, image_url: string | undefined, title: { type: \"plain_text\", text: string, emoji: boolean | undefined } | undefined, elements: { type: \"plain_text\" | \"mrkdwn\", text: string, emoji: boolean | undefined, verbatim: boolean | undefined }[] | undefined }[] | undefined // Slack Block Kit formatted blocks for rich message display,\n// metadata: { verbosityLevel: string // Applied verbosity level, technicalityLevel: string // Applied technicality level, wordCount: number // Total word count of response, blockCount: number | undefined // Number of Slack blocks generated } // Metadata about the formatting,\n// toolCalls: { tool: string // Name of the tool that was called, input: unknown // Input parameters passed to the tool, output: unknown // Output returned by the tool }[] | undefined // Array of tool calls made during the conversation,\n// iterations: number // Number of back-and-forth iterations in the agent workflow,\n// error: string // Error message of the run, undefined if successful,\n// success: boolean // Whether the agent execution completed successfully\n// }\n\n// Access the actual data\nconst actualData = result.data;\nconsole.log(actualData);",
31564
+ "usageExample": "// Example usage of slack-formatter-agent bubble\nconst slackFormatterAgent = new SlackFormatterAgentBubble({\n message: \"example string\", // The message or question to send to the AI agent,\n verbosity: \"1\" // options: \"1\", \"2\", \"3\", \"4\", \"5\", // Response verbosity level (1-5): 1=concise bullet points, 5=comprehensive explanations,\n technicality: \"1\" // options: \"1\", \"2\", \"3\", \"4\", \"5\", // Technical complexity level (1-5): 1=plain English, 5=expert terminology,\n includeBlockKit: true // default, // Include Slack Block Kit JSON for rich formatting,\n includeQuery: false // default, // Include the query that was executed in the response,\n includeExplanation: false // default, // Include explanation of what the query does and why it was chosen,\n model: { model: \"openai/gpt-5\" // options: \"openai/gpt-5\", \"openai/gpt-5-mini\", \"openai/gpt-5.1\", \"openai/gpt-5.2\", \"google/gemini-2.5-pro\", \"google/gemini-2.5-flash\", \"google/gemini-2.5-flash-lite\", \"google/gemini-2.5-flash-image-preview\", \"google/gemini-3-pro-preview\", \"google/gemini-3-pro-image-preview\", \"google/gemini-3-flash-preview\", \"anthropic/claude-sonnet-4-5\", \"anthropic/claude-opus-4-5\", \"anthropic/claude-haiku-4-5\", \"openrouter/x-ai/grok-code-fast-1\", \"openrouter/z-ai/glm-4.6\", \"openrouter/anthropic/claude-sonnet-4.5\", \"openrouter/google/gemini-3-pro-preview\", \"openrouter/morph/morph-v3-large\", \"openrouter/openai/gpt-oss-120b\", \"openrouter/openai/o3-deep-research\", \"openrouter/openai/o4-mini-deep-research\" // AI model to use (format: provider/model-name), temperature: 0.7 // default // Temperature for response randomness (0 = deterministic, 2 = very random), maxTokens: 10000 // default // Maximum number of tokens to generate in response } // structure, // AI model configuration including provider, temperature, and tokens,\n tools: [{ name: \"example string\" // Name of the tool bubble to enable for the AI agent, config: {} // Configuration for the tool bubble }] // example for array, // Array of tool bubbles the AI agent can use,\n maxIterations: 10 // default, // Maximum number of iterations for the agent workflow,\n additionalContext: \"example string\", // Additional context about how to answer the question,\n});\n\nconst result = await slackFormatterAgent.action();\n\n// Always check success status before using data\nif (!result.success) {\n throw new Error(`${metadata.name} failed: ${result.error}`);\n}\n\n// outputSchema for result.data:\n// {\n// response: string // The AI agents formatted response in Slack markdown,\n// blocks: { type: \"section\" | \"header\" | \"divider\" | \"context\" | \"actions\" | \"input\" | \"file\" | \"image\", text: { type: \"plain_text\" | \"mrkdwn\", text: string, emoji: boolean | undefined, verbatim: boolean | undefined } | undefined, block_id: string | undefined, accessory: unknown | undefined, fields: { type: \"plain_text\" | \"mrkdwn\", text: string, emoji: boolean | undefined, verbatim: boolean | undefined }[] | undefined, element: unknown | undefined, label: unknown | undefined, hint: unknown | undefined, optional: boolean | undefined, alt_text: string | undefined, image_url: string | undefined, title: { type: \"plain_text\", text: string, emoji: boolean | undefined } | undefined, elements: { type: \"plain_text\" | \"mrkdwn\", text: string, emoji: boolean | undefined, verbatim: boolean | undefined }[] | undefined }[] | undefined // Slack Block Kit formatted blocks for rich message display,\n// metadata: { verbosityLevel: string // Applied verbosity level, technicalityLevel: string // Applied technicality level, wordCount: number // Total word count of response, blockCount: number | undefined // Number of Slack blocks generated } // Metadata about the formatting,\n// toolCalls: { tool: string // Name of the tool that was called, input: unknown // Input parameters passed to the tool, output: unknown // Output returned by the tool }[] | undefined // Array of tool calls made during the conversation,\n// iterations: number // Number of back-and-forth iterations in the agent workflow,\n// error: string // Error message of the run, undefined if successful,\n// success: boolean // Whether the agent execution completed successfully\n// }\n\n// Access the actual data\nconst actualData = result.data;\nconsole.log(actualData);",
31547
31565
  "requiredCredentials": [
31548
31566
  "OPENAI_CRED",
31549
31567
  "GOOGLE_GEMINI_CRED",
@@ -32340,7 +32358,9 @@
32340
32358
  "openrouter/anthropic/claude-sonnet-4.5",
32341
32359
  "openrouter/google/gemini-3-pro-preview",
32342
32360
  "openrouter/morph/morph-v3-large",
32343
- "openrouter/openai/gpt-oss-120b"
32361
+ "openrouter/openai/gpt-oss-120b",
32362
+ "openrouter/openai/o3-deep-research",
32363
+ "openrouter/openai/o4-mini-deep-research"
32344
32364
  ],
32345
32365
  "default": "google/gemini-2.5-flash",
32346
32366
  "description": "AI model to use for field identification"
@@ -32491,7 +32511,9 @@
32491
32511
  "openrouter/anthropic/claude-sonnet-4.5",
32492
32512
  "openrouter/google/gemini-3-pro-preview",
32493
32513
  "openrouter/morph/morph-v3-large",
32494
- "openrouter/openai/gpt-oss-120b"
32514
+ "openrouter/openai/gpt-oss-120b",
32515
+ "openrouter/openai/o3-deep-research",
32516
+ "openrouter/openai/o4-mini-deep-research"
32495
32517
  ],
32496
32518
  "default": "google/gemini-2.5-flash",
32497
32519
  "description": "AI model to use for field identification and autofill"
@@ -32834,7 +32856,7 @@
32834
32856
  }
32835
32857
  ]
32836
32858
  },
32837
- "usageExample": "// Identify example\nconst pdfOcrWorkflow_identify = new PDFOcrWorkflow({\n mode: \"identify\", // Identify form fields and generate descriptive names\n pdfData: \"example string\", // Base64 encoded PDF data\n discoveryOptions: { targetPage: 42 // Extract fields from specific page only (default: all pages) } // structure, // Options for PDF field discovery\n imageOptions: { format: \"png\" // options: \"png\", \"jpeg\" // Output image format, quality: 0.8 // default // JPEG quality (0.1-1.0, only for JPEG format), dpi: 150 // default // Output DPI (dots per inch), pages: [42] // Specific page numbers to convert (1-indexed). If not provided, converts all pages } // structure, // Options for PDF to images conversion\n aiOptions: { model: \"openai/gpt-5\" // options: \"openai/gpt-5\", \"openai/gpt-5-mini\", \"openai/gpt-5.1\", \"openai/gpt-5.2\", \"google/gemini-2.5-pro\", \"google/gemini-2.5-flash\", \"google/gemini-2.5-flash-lite\", \"google/gemini-2.5-flash-image-preview\", \"google/gemini-3-pro-preview\", \"google/gemini-3-pro-image-preview\", \"google/gemini-3-flash-preview\", \"anthropic/claude-sonnet-4-5\", \"anthropic/claude-opus-4-5\", \"anthropic/claude-haiku-4-5\", \"openrouter/x-ai/grok-code-fast-1\", \"openrouter/z-ai/glm-4.6\", \"openrouter/anthropic/claude-sonnet-4.5\", \"openrouter/google/gemini-3-pro-preview\", \"openrouter/morph/morph-v3-large\", \"openrouter/openai/gpt-oss-120b\" // AI model to use for field identification, temperature: 0.3 // default // Temperature for AI responses (lower = more consistent), maxTokens: 50000 // default // Maximum tokens for AI response, jsonMode: true // default // Enable JSON mode to ensure clean JSON output } // structure, // AI agent configuration options\n});\n\nconst result = await pdfOcrWorkflow_identify.action();\n// outputSchema for result.data when operation === 'identify':\n// {\n// mode: \"identify\" // Result from identify mode,\n// extractedFields: { id: number // Field ID from discovery or auto-generated, fieldName: string // Descriptive name generated based on PDF content, confidence: number // AI confidence in the field identification (0.0-1.0) }[] // Array of identified fields with descriptive names,\n// discoveryData: { totalFields: number, fieldsWithCoordinates: number, pages: number[] } // Summary of field discovery results,\n// imageData: { totalPages: number, convertedPages: number, format: string, dpi: number } // Summary of image conversion results,\n// aiAnalysis: { model: string, iterations: number, processingTime: number | undefined } // AI analysis metadata,\n// success: boolean // Whether the workflow completed successfully,\n// error: string // Error message if workflow failed\n// }\n\n\n// Autofill example\nconst pdfOcrWorkflow_autofill = new PDFOcrWorkflow({\n mode: \"autofill\", // Identify form fields and autofill with client information\n pdfData: \"example string\", // Base64 encoded PDF data\n clientInformation: \"example string\", // Free text containing client information to use for autofilling form fields\n discoveryOptions: { targetPage: 42 // Extract fields from specific page only (default: all pages) } // structure, // Options for PDF field discovery\n imageOptions: { format: \"png\" // options: \"png\", \"jpeg\" // Output image format, quality: 0.8 // default // JPEG quality (0.1-1.0, only for JPEG format), dpi: 150 // default // Output DPI (dots per inch), pages: [42] // Specific page numbers to convert (1-indexed). If not provided, converts all pages } // structure, // Options for PDF to images conversion\n aiOptions: { model: \"openai/gpt-5\" // options: \"openai/gpt-5\", \"openai/gpt-5-mini\", \"openai/gpt-5.1\", \"openai/gpt-5.2\", \"google/gemini-2.5-pro\", \"google/gemini-2.5-flash\", \"google/gemini-2.5-flash-lite\", \"google/gemini-2.5-flash-image-preview\", \"google/gemini-3-pro-preview\", \"google/gemini-3-pro-image-preview\", \"google/gemini-3-flash-preview\", \"anthropic/claude-sonnet-4-5\", \"anthropic/claude-opus-4-5\", \"anthropic/claude-haiku-4-5\", \"openrouter/x-ai/grok-code-fast-1\", \"openrouter/z-ai/glm-4.6\", \"openrouter/anthropic/claude-sonnet-4.5\", \"openrouter/google/gemini-3-pro-preview\", \"openrouter/morph/morph-v3-large\", \"openrouter/openai/gpt-oss-120b\" // AI model to use for field identification and autofill, temperature: 0.3 // default // Temperature for AI responses (lower = more consistent), maxTokens: 50000 // default // Maximum tokens for AI response, jsonMode: true // default // Enable JSON mode to ensure clean JSON output } // structure, // AI agent configuration options\n});\n\nconst result = await pdfOcrWorkflow_autofill.action();\n// outputSchema for result.data when operation === 'autofill':\n// {\n// mode: \"autofill\" // Result from autofill mode,\n// extractedFields: { id: number // Field ID from discovery or auto-generated, originalFieldName: string | undefined // Original field name from discovery for precise matching, fieldName: string // Descriptive name generated based on PDF content, value: string // Value to fill in the field based on client information, confidence: number // AI confidence in the field identification and value assignment (0.0-1.0) }[] // Array of identified fields with values for autofill,\n// filledPdfData: string // Base64 encoded filled PDF data,\n// discoveryData: { totalFields: number, fieldsWithCoordinates: number, pages: number[] } // Summary of field discovery results,\n// imageData: { totalPages: number, convertedPages: number, format: string, dpi: number } // Summary of image conversion results,\n// aiAnalysis: { model: string, iterations: number, processingTime: number | undefined } // AI analysis metadata,\n// fillResults: { filledFields: number, successfullyFilled: number } // Summary of PDF filling results,\n// success: boolean // Whether the workflow completed successfully,\n// error: string // Error message if workflow failed\n// }\n\n\n// Always check success status before using data\nif (!result.success) {\n throw new Error(`pdf-ocr-workflow failed: ${result.error}`);\n}\n\n// Access the actual data\nconst actualData = result.data;\nconsole.log(actualData);",
32859
+ "usageExample": "// Identify example\nconst pdfOcrWorkflow_identify = new PDFOcrWorkflow({\n mode: \"identify\", // Identify form fields and generate descriptive names\n pdfData: \"example string\", // Base64 encoded PDF data\n discoveryOptions: { targetPage: 42 // Extract fields from specific page only (default: all pages) } // structure, // Options for PDF field discovery\n imageOptions: { format: \"png\" // options: \"png\", \"jpeg\" // Output image format, quality: 0.8 // default // JPEG quality (0.1-1.0, only for JPEG format), dpi: 150 // default // Output DPI (dots per inch), pages: [42] // Specific page numbers to convert (1-indexed). If not provided, converts all pages } // structure, // Options for PDF to images conversion\n aiOptions: { model: \"openai/gpt-5\" // options: \"openai/gpt-5\", \"openai/gpt-5-mini\", \"openai/gpt-5.1\", \"openai/gpt-5.2\", \"google/gemini-2.5-pro\", \"google/gemini-2.5-flash\", \"google/gemini-2.5-flash-lite\", \"google/gemini-2.5-flash-image-preview\", \"google/gemini-3-pro-preview\", \"google/gemini-3-pro-image-preview\", \"google/gemini-3-flash-preview\", \"anthropic/claude-sonnet-4-5\", \"anthropic/claude-opus-4-5\", \"anthropic/claude-haiku-4-5\", \"openrouter/x-ai/grok-code-fast-1\", \"openrouter/z-ai/glm-4.6\", \"openrouter/anthropic/claude-sonnet-4.5\", \"openrouter/google/gemini-3-pro-preview\", \"openrouter/morph/morph-v3-large\", \"openrouter/openai/gpt-oss-120b\", \"openrouter/openai/o3-deep-research\", \"openrouter/openai/o4-mini-deep-research\" // AI model to use for field identification, temperature: 0.3 // default // Temperature for AI responses (lower = more consistent), maxTokens: 50000 // default // Maximum tokens for AI response, jsonMode: true // default // Enable JSON mode to ensure clean JSON output } // structure, // AI agent configuration options\n});\n\nconst result = await pdfOcrWorkflow_identify.action();\n// outputSchema for result.data when operation === 'identify':\n// {\n// mode: \"identify\" // Result from identify mode,\n// extractedFields: { id: number // Field ID from discovery or auto-generated, fieldName: string // Descriptive name generated based on PDF content, confidence: number // AI confidence in the field identification (0.0-1.0) }[] // Array of identified fields with descriptive names,\n// discoveryData: { totalFields: number, fieldsWithCoordinates: number, pages: number[] } // Summary of field discovery results,\n// imageData: { totalPages: number, convertedPages: number, format: string, dpi: number } // Summary of image conversion results,\n// aiAnalysis: { model: string, iterations: number, processingTime: number | undefined } // AI analysis metadata,\n// success: boolean // Whether the workflow completed successfully,\n// error: string // Error message if workflow failed\n// }\n\n\n// Autofill example\nconst pdfOcrWorkflow_autofill = new PDFOcrWorkflow({\n mode: \"autofill\", // Identify form fields and autofill with client information\n pdfData: \"example string\", // Base64 encoded PDF data\n clientInformation: \"example string\", // Free text containing client information to use for autofilling form fields\n discoveryOptions: { targetPage: 42 // Extract fields from specific page only (default: all pages) } // structure, // Options for PDF field discovery\n imageOptions: { format: \"png\" // options: \"png\", \"jpeg\" // Output image format, quality: 0.8 // default // JPEG quality (0.1-1.0, only for JPEG format), dpi: 150 // default // Output DPI (dots per inch), pages: [42] // Specific page numbers to convert (1-indexed). If not provided, converts all pages } // structure, // Options for PDF to images conversion\n aiOptions: { model: \"openai/gpt-5\" // options: \"openai/gpt-5\", \"openai/gpt-5-mini\", \"openai/gpt-5.1\", \"openai/gpt-5.2\", \"google/gemini-2.5-pro\", \"google/gemini-2.5-flash\", \"google/gemini-2.5-flash-lite\", \"google/gemini-2.5-flash-image-preview\", \"google/gemini-3-pro-preview\", \"google/gemini-3-pro-image-preview\", \"google/gemini-3-flash-preview\", \"anthropic/claude-sonnet-4-5\", \"anthropic/claude-opus-4-5\", \"anthropic/claude-haiku-4-5\", \"openrouter/x-ai/grok-code-fast-1\", \"openrouter/z-ai/glm-4.6\", \"openrouter/anthropic/claude-sonnet-4.5\", \"openrouter/google/gemini-3-pro-preview\", \"openrouter/morph/morph-v3-large\", \"openrouter/openai/gpt-oss-120b\", \"openrouter/openai/o3-deep-research\", \"openrouter/openai/o4-mini-deep-research\" // AI model to use for field identification and autofill, temperature: 0.3 // default // Temperature for AI responses (lower = more consistent), maxTokens: 50000 // default // Maximum tokens for AI response, jsonMode: true // default // Enable JSON mode to ensure clean JSON output } // structure, // AI agent configuration options\n});\n\nconst result = await pdfOcrWorkflow_autofill.action();\n// outputSchema for result.data when operation === 'autofill':\n// {\n// mode: \"autofill\" // Result from autofill mode,\n// extractedFields: { id: number // Field ID from discovery or auto-generated, originalFieldName: string | undefined // Original field name from discovery for precise matching, fieldName: string // Descriptive name generated based on PDF content, value: string // Value to fill in the field based on client information, confidence: number // AI confidence in the field identification and value assignment (0.0-1.0) }[] // Array of identified fields with values for autofill,\n// filledPdfData: string // Base64 encoded filled PDF data,\n// discoveryData: { totalFields: number, fieldsWithCoordinates: number, pages: number[] } // Summary of field discovery results,\n// imageData: { totalPages: number, convertedPages: number, format: string, dpi: number } // Summary of image conversion results,\n// aiAnalysis: { model: string, iterations: number, processingTime: number | undefined } // AI analysis metadata,\n// fillResults: { filledFields: number, successfullyFilled: number } // Summary of PDF filling results,\n// success: boolean // Whether the workflow completed successfully,\n// error: string // Error message if workflow failed\n// }\n\n\n// Always check success status before using data\nif (!result.success) {\n throw new Error(`pdf-ocr-workflow failed: ${result.error}`);\n}\n\n// Access the actual data\nconst actualData = result.data;\nconsole.log(actualData);",
32838
32860
  "requiredCredentials": [
32839
32861
  "GOOGLE_GEMINI_CRED",
32840
32862
  "OPENAI_CRED",
@@ -32949,7 +32971,9 @@
32949
32971
  "openrouter/anthropic/claude-sonnet-4.5",
32950
32972
  "openrouter/google/gemini-3-pro-preview",
32951
32973
  "openrouter/morph/morph-v3-large",
32952
- "openrouter/openai/gpt-oss-120b"
32974
+ "openrouter/openai/gpt-oss-120b",
32975
+ "openrouter/openai/o3-deep-research",
32976
+ "openrouter/openai/o4-mini-deep-research"
32953
32977
  ],
32954
32978
  "default": "google/gemini-2.5-flash",
32955
32979
  "description": "AI model to use for document analysis"
@@ -33160,7 +33184,7 @@
33160
33184
  ],
33161
33185
  "additionalProperties": false
33162
33186
  },
33163
- "usageExample": "// Example usage of generate-document-workflow bubble\nconst generateDocumentWorkflow = new GenerateDocumentWorkflow({\n documents: [{ content: \"example string\", index: 42, metadata: { originalFilename: \"example string\", pageCount: 42, uploadedImages: [{ pageNumber: 42, fileName: \"example string\", fileUrl: \"example string\" }] } }], // Array of document objects with content, index, and metadata,\n outputDescription: \"example string\", // Description of what the user wants to extract (e.g., \"expense tracking with vendor, amount, date, category\"),\n outputFormat: \"html\" // options: \"html\", \"csv\", \"json\", // Output format for the structured data,\n aiOptions: { model: \"openai/gpt-5\" // options: \"openai/gpt-5\", \"openai/gpt-5-mini\", \"openai/gpt-5.1\", \"openai/gpt-5.2\", \"google/gemini-2.5-pro\", \"google/gemini-2.5-flash\", \"google/gemini-2.5-flash-lite\", \"google/gemini-2.5-flash-image-preview\", \"google/gemini-3-pro-preview\", \"google/gemini-3-pro-image-preview\", \"google/gemini-3-flash-preview\", \"anthropic/claude-sonnet-4-5\", \"anthropic/claude-opus-4-5\", \"anthropic/claude-haiku-4-5\", \"openrouter/x-ai/grok-code-fast-1\", \"openrouter/z-ai/glm-4.6\", \"openrouter/anthropic/claude-sonnet-4.5\", \"openrouter/google/gemini-3-pro-preview\", \"openrouter/morph/morph-v3-large\", \"openrouter/openai/gpt-oss-120b\" // AI model to use for document analysis, temperature: 0.1 // default // Temperature for AI responses (lower = more consistent), maxTokens: 50000 // default // Maximum tokens for AI response, jsonMode: true // default // Enable JSON mode to ensure clean JSON output } // structure, // AI agent configuration options,\n});\n\nconst result = await generateDocumentWorkflow.action();\n\n// Always check success status before using data\nif (!result.success) {\n throw new Error(`${metadata.name} failed: ${result.error}`);\n}\n\n// outputSchema for result.data:\n// {\n// columns: { name: string // Column name, type: \"string\" | \"number\" | \"integer\" | \"float\" | \"date\" | \"boolean\" // Data type of the column, description: string // Description of what this column contains }[] // Column definitions for the structured data,\n// rows: Record<string, unknown>[] // Array of data rows extracted from documents,\n// metadata: { totalDocuments: number // Number of documents processed, totalRows: number // Number of data rows extracted, totalColumns: number // Number of columns in the result, processingTime: number // Processing time in milliseconds, extractedFrom: string[] // Summary of document sources } // Metadata about the extraction process,\n// generatedFiles: { html: string | undefined // Generated HTML table, csv: string | undefined // Generated CSV data, json: string | undefined // Generated JSON data } // Generated files in requested formats,\n// aiAnalysis: { model: string // AI model used, iterations: number // Number of AI iterations, processingTime: number | undefined // AI processing time } // AI analysis metadata,\n// success: boolean // Whether the workflow completed successfully,\n// error: string // Error message if workflow failed\n// }\n\n// Access the actual data\nconst actualData = result.data;\nconsole.log(actualData);",
33187
+ "usageExample": "// Example usage of generate-document-workflow bubble\nconst generateDocumentWorkflow = new GenerateDocumentWorkflow({\n documents: [{ content: \"example string\", index: 42, metadata: { originalFilename: \"example string\", pageCount: 42, uploadedImages: [{ pageNumber: 42, fileName: \"example string\", fileUrl: \"example string\" }] } }], // Array of document objects with content, index, and metadata,\n outputDescription: \"example string\", // Description of what the user wants to extract (e.g., \"expense tracking with vendor, amount, date, category\"),\n outputFormat: \"html\" // options: \"html\", \"csv\", \"json\", // Output format for the structured data,\n aiOptions: { model: \"openai/gpt-5\" // options: \"openai/gpt-5\", \"openai/gpt-5-mini\", \"openai/gpt-5.1\", \"openai/gpt-5.2\", \"google/gemini-2.5-pro\", \"google/gemini-2.5-flash\", \"google/gemini-2.5-flash-lite\", \"google/gemini-2.5-flash-image-preview\", \"google/gemini-3-pro-preview\", \"google/gemini-3-pro-image-preview\", \"google/gemini-3-flash-preview\", \"anthropic/claude-sonnet-4-5\", \"anthropic/claude-opus-4-5\", \"anthropic/claude-haiku-4-5\", \"openrouter/x-ai/grok-code-fast-1\", \"openrouter/z-ai/glm-4.6\", \"openrouter/anthropic/claude-sonnet-4.5\", \"openrouter/google/gemini-3-pro-preview\", \"openrouter/morph/morph-v3-large\", \"openrouter/openai/gpt-oss-120b\", \"openrouter/openai/o3-deep-research\", \"openrouter/openai/o4-mini-deep-research\" // AI model to use for document analysis, temperature: 0.1 // default // Temperature for AI responses (lower = more consistent), maxTokens: 50000 // default // Maximum tokens for AI response, jsonMode: true // default // Enable JSON mode to ensure clean JSON output } // structure, // AI agent configuration options,\n});\n\nconst result = await generateDocumentWorkflow.action();\n\n// Always check success status before using data\nif (!result.success) {\n throw new Error(`${metadata.name} failed: ${result.error}`);\n}\n\n// outputSchema for result.data:\n// {\n// columns: { name: string // Column name, type: \"string\" | \"number\" | \"integer\" | \"float\" | \"date\" | \"boolean\" // Data type of the column, description: string // Description of what this column contains }[] // Column definitions for the structured data,\n// rows: Record<string, unknown>[] // Array of data rows extracted from documents,\n// metadata: { totalDocuments: number // Number of documents processed, totalRows: number // Number of data rows extracted, totalColumns: number // Number of columns in the result, processingTime: number // Processing time in milliseconds, extractedFrom: string[] // Summary of document sources } // Metadata about the extraction process,\n// generatedFiles: { html: string | undefined // Generated HTML table, csv: string | undefined // Generated CSV data, json: string | undefined // Generated JSON data } // Generated files in requested formats,\n// aiAnalysis: { model: string // AI model used, iterations: number // Number of AI iterations, processingTime: number | undefined // AI processing time } // AI analysis metadata,\n// success: boolean // Whether the workflow completed successfully,\n// error: string // Error message if workflow failed\n// }\n\n// Access the actual data\nconst actualData = result.data;\nconsole.log(actualData);",
33164
33188
  "requiredCredentials": [
33165
33189
  "GOOGLE_GEMINI_CRED",
33166
33190
  "OPENAI_CRED",
@@ -33299,7 +33323,9 @@
33299
33323
  "openrouter/anthropic/claude-sonnet-4.5",
33300
33324
  "openrouter/google/gemini-3-pro-preview",
33301
33325
  "openrouter/morph/morph-v3-large",
33302
- "openrouter/openai/gpt-oss-120b"
33326
+ "openrouter/openai/gpt-oss-120b",
33327
+ "openrouter/openai/o3-deep-research",
33328
+ "openrouter/openai/o4-mini-deep-research"
33303
33329
  ],
33304
33330
  "default": "google/gemini-2.5-flash",
33305
33331
  "description": "AI model to use for document analysis and conversion"
@@ -33573,7 +33599,7 @@
33573
33599
  ],
33574
33600
  "additionalProperties": false
33575
33601
  },
33576
- "usageExample": "// Example usage of parse-document-workflow bubble\nconst parseDocumentWorkflow = new ParseDocumentWorkflow({\n documentData: \"example string\", // Base64 encoded document data (PDF or image) OR R2 file URL starting with https://,\n documentType: \"pdf\" // options: \"pdf\", \"image\", // Type of document being processed,\n isFileUrl: false // default, // Set to true if documentData is an R2 file URL instead of base64,\n conversionOptions: { preserveStructure: true // default // Maintain original document structure and hierarchy, includeVisualDescriptions: true // default // Include detailed descriptions of charts, images, and diagrams, extractNumericalData: true // default // Extract specific numerical values from charts and tables, combinePages: false // default // Deprecated: Pages are always kept separate with clear headers } // structure, // Options for document conversion and parsing,\n imageOptions: { format: \"png\" // options: \"png\", \"jpeg\" // Output image format for PDF conversion, quality: 0.9 // default // Image quality (0.1-1.0, higher = better quality), dpi: 200 // default // Output DPI for PDF conversion (higher = better quality), pages: [42] // Specific page numbers to process (1-indexed) } // structure, // Options for PDF to images conversion,\n aiOptions: { model: \"openai/gpt-5\" // options: \"openai/gpt-5\", \"openai/gpt-5-mini\", \"openai/gpt-5.1\", \"openai/gpt-5.2\", \"google/gemini-2.5-pro\", \"google/gemini-2.5-flash\", \"google/gemini-2.5-flash-lite\", \"google/gemini-2.5-flash-image-preview\", \"google/gemini-3-pro-preview\", \"google/gemini-3-pro-image-preview\", \"google/gemini-3-flash-preview\", \"anthropic/claude-sonnet-4-5\", \"anthropic/claude-opus-4-5\", \"anthropic/claude-haiku-4-5\", \"openrouter/x-ai/grok-code-fast-1\", \"openrouter/z-ai/glm-4.6\", \"openrouter/anthropic/claude-sonnet-4.5\", \"openrouter/google/gemini-3-pro-preview\", \"openrouter/morph/morph-v3-large\", \"openrouter/openai/gpt-oss-120b\" // AI model to use for document analysis and conversion, temperature: 0.4 // default // Temperature for AI responses (balanced for accuracy vs recitation), maxTokens: 2000 // default // Maximum tokens for AI response, jsonMode: false // default // Use JSON mode for structured output } // structure, // AI agent configuration options,\n storageOptions: { uploadImages: false // default // Whether to upload converted page images to S3, bucketName: \"example string\" // S3 bucket name for image uploads, pageImageUrls: [{ pageNumber: 42, uploadUrl: \"example string\", fileName: \"example string\" }] // Pre-generated upload URLs for page images, userId: \"example string\" // User ID for secure file isolation }, // Storage options for uploading page images,\n});\n\nconst result = await parseDocumentWorkflow.action();\n\n// Always check success status before using data\nif (!result.success) {\n throw new Error(`${metadata.name} failed: ${result.error}`);\n}\n\n// outputSchema for result.data:\n// {\n// markdown: string // Generated markdown content from the document,\n// pages: { pageNumber: number // Page number (1-indexed), markdown: string // Markdown content for this page, hasCharts: boolean // Whether this page contains charts or graphs, hasTables: boolean // Whether this page contains tables, hasImages: boolean // Whether this page contains images }[] // Per-page analysis results,\n// metadata: { totalPages: number // Total number of pages processed, processedPages: number // Number of pages successfully processed, hasVisualElements: boolean // Whether document contains charts, tables, or images, processingTime: number // Total processing time in milliseconds, imageFormat: string // Image format used for conversion, imageDpi: number // DPI used for image conversion } // Metadata about the parsing process,\n// conversionSummary: { totalCharacters: number // Total characters in generated markdown, tablesExtracted: number // Number of tables converted to markdown, chartsDescribed: number // Number of charts and graphs described, imagesDescribed: number // Number of images described } // Summary of conversion results,\n// aiAnalysis: { model: string // AI model used for analysis, iterations: number // Number of AI iterations, processingTime: number // AI processing time in milliseconds } // AI analysis metadata,\n// uploadedImages: { pageNumber: number, fileName: string, fileUrl: string | undefined, uploaded: boolean }[] | undefined // Information about uploaded page images,\n// success: boolean // Whether the workflow completed successfully,\n// error: string // Error message if workflow failed\n// }\n\n// Access the actual data\nconst actualData = result.data;\nconsole.log(actualData);",
33602
+ "usageExample": "// Example usage of parse-document-workflow bubble\nconst parseDocumentWorkflow = new ParseDocumentWorkflow({\n documentData: \"example string\", // Base64 encoded document data (PDF or image) OR R2 file URL starting with https://,\n documentType: \"pdf\" // options: \"pdf\", \"image\", // Type of document being processed,\n isFileUrl: false // default, // Set to true if documentData is an R2 file URL instead of base64,\n conversionOptions: { preserveStructure: true // default // Maintain original document structure and hierarchy, includeVisualDescriptions: true // default // Include detailed descriptions of charts, images, and diagrams, extractNumericalData: true // default // Extract specific numerical values from charts and tables, combinePages: false // default // Deprecated: Pages are always kept separate with clear headers } // structure, // Options for document conversion and parsing,\n imageOptions: { format: \"png\" // options: \"png\", \"jpeg\" // Output image format for PDF conversion, quality: 0.9 // default // Image quality (0.1-1.0, higher = better quality), dpi: 200 // default // Output DPI for PDF conversion (higher = better quality), pages: [42] // Specific page numbers to process (1-indexed) } // structure, // Options for PDF to images conversion,\n aiOptions: { model: \"openai/gpt-5\" // options: \"openai/gpt-5\", \"openai/gpt-5-mini\", \"openai/gpt-5.1\", \"openai/gpt-5.2\", \"google/gemini-2.5-pro\", \"google/gemini-2.5-flash\", \"google/gemini-2.5-flash-lite\", \"google/gemini-2.5-flash-image-preview\", \"google/gemini-3-pro-preview\", \"google/gemini-3-pro-image-preview\", \"google/gemini-3-flash-preview\", \"anthropic/claude-sonnet-4-5\", \"anthropic/claude-opus-4-5\", \"anthropic/claude-haiku-4-5\", \"openrouter/x-ai/grok-code-fast-1\", \"openrouter/z-ai/glm-4.6\", \"openrouter/anthropic/claude-sonnet-4.5\", \"openrouter/google/gemini-3-pro-preview\", \"openrouter/morph/morph-v3-large\", \"openrouter/openai/gpt-oss-120b\", \"openrouter/openai/o3-deep-research\", \"openrouter/openai/o4-mini-deep-research\" // AI model to use for document analysis and conversion, temperature: 0.4 // default // Temperature for AI responses (balanced for accuracy vs recitation), maxTokens: 2000 // default // Maximum tokens for AI response, jsonMode: false // default // Use JSON mode for structured output } // structure, // AI agent configuration options,\n storageOptions: { uploadImages: false // default // Whether to upload converted page images to S3, bucketName: \"example string\" // S3 bucket name for image uploads, pageImageUrls: [{ pageNumber: 42, uploadUrl: \"example string\", fileName: \"example string\" }] // Pre-generated upload URLs for page images, userId: \"example string\" // User ID for secure file isolation }, // Storage options for uploading page images,\n});\n\nconst result = await parseDocumentWorkflow.action();\n\n// Always check success status before using data\nif (!result.success) {\n throw new Error(`${metadata.name} failed: ${result.error}`);\n}\n\n// outputSchema for result.data:\n// {\n// markdown: string // Generated markdown content from the document,\n// pages: { pageNumber: number // Page number (1-indexed), markdown: string // Markdown content for this page, hasCharts: boolean // Whether this page contains charts or graphs, hasTables: boolean // Whether this page contains tables, hasImages: boolean // Whether this page contains images }[] // Per-page analysis results,\n// metadata: { totalPages: number // Total number of pages processed, processedPages: number // Number of pages successfully processed, hasVisualElements: boolean // Whether document contains charts, tables, or images, processingTime: number // Total processing time in milliseconds, imageFormat: string // Image format used for conversion, imageDpi: number // DPI used for image conversion } // Metadata about the parsing process,\n// conversionSummary: { totalCharacters: number // Total characters in generated markdown, tablesExtracted: number // Number of tables converted to markdown, chartsDescribed: number // Number of charts and graphs described, imagesDescribed: number // Number of images described } // Summary of conversion results,\n// aiAnalysis: { model: string // AI model used for analysis, iterations: number // Number of AI iterations, processingTime: number // AI processing time in milliseconds } // AI analysis metadata,\n// uploadedImages: { pageNumber: number, fileName: string, fileUrl: string | undefined, uploaded: boolean }[] | undefined // Information about uploaded page images,\n// success: boolean // Whether the workflow completed successfully,\n// error: string // Error message if workflow failed\n// }\n\n// Access the actual data\nconst actualData = result.data;\nconsole.log(actualData);",
33577
33603
  "requiredCredentials": [
33578
33604
  "GOOGLE_GEMINI_CRED",
33579
33605
  "OPENAI_CRED",
@@ -34858,7 +34884,9 @@
34858
34884
  "openrouter/anthropic/claude-sonnet-4.5",
34859
34885
  "openrouter/google/gemini-3-pro-preview",
34860
34886
  "openrouter/morph/morph-v3-large",
34861
- "openrouter/openai/gpt-oss-120b"
34887
+ "openrouter/openai/gpt-oss-120b",
34888
+ "openrouter/openai/o3-deep-research",
34889
+ "openrouter/openai/o4-mini-deep-research"
34862
34890
  ],
34863
34891
  "description": "Model to use for the research agent (default: google/gemini-3-pro-preview)",
34864
34892
  "default": "google/gemini-3-pro-preview"
@@ -34929,7 +34957,7 @@
34929
34957
  ],
34930
34958
  "additionalProperties": false
34931
34959
  },
34932
- "usageExample": "// Example usage of research-agent-tool bubble\nconst researchAgentTool = new ResearchAgentTool({\n task: \"example string\", // The research task that requires searching the internet and gathering information,\n expectedResultSchema: \"example string\", // Zod schema or JSON schema string that defines the expected structure of the research result. Example: z.object({ trends: z.array(z.string()).describe(\"An array of trends\"), summary: z.string().describe(\"A summary of the trends\") }) or JSON.stringify({ type: \"object\", properties: { trends: { type: \"array\", items: { type: \"string\" } }, summary: { type: \"string\" } } }),\n model: \"openai/gpt-5\" // options: \"openai/gpt-5\", \"openai/gpt-5-mini\", \"openai/gpt-5.1\", \"openai/gpt-5.2\", \"google/gemini-2.5-pro\", \"google/gemini-2.5-flash\", \"google/gemini-2.5-flash-lite\", \"google/gemini-2.5-flash-image-preview\", \"google/gemini-3-pro-preview\", \"google/gemini-3-pro-image-preview\", \"google/gemini-3-flash-preview\", \"anthropic/claude-sonnet-4-5\", \"anthropic/claude-opus-4-5\", \"anthropic/claude-haiku-4-5\", \"openrouter/x-ai/grok-code-fast-1\", \"openrouter/z-ai/glm-4.6\", \"openrouter/anthropic/claude-sonnet-4.5\", \"openrouter/google/gemini-3-pro-preview\", \"openrouter/morph/morph-v3-large\", \"openrouter/openai/gpt-oss-120b\", // Model to use for the research agent (default: google/gemini-3-pro-preview),\n maxTokens: 40000 // default, // Maximum number of tokens for the research agent (default: 40000),\n maxIterations: 400 // default, // Maximum number of iterations for the research agent (default: 100),\n});\n\nconst result = await researchAgentTool.action();\n\n// Always check success status before using data\nif (!result.success) {\n throw new Error(`${metadata.name} failed: ${result.error}`);\n}\n\n// outputSchema for result.data:\n// {\n// result: unknown // The research result matching the expected JSON schema structure, parsed to object,\n// summary: string // 1-2 sentence summary of what research was conducted and completed,\n// sourcesUsed: string[] // Array of URLs and sources that were searched and scraped during research,\n// iterationsUsed: number // Number of AI agent iterations used to complete the research,\n// success: boolean // Whether the research task was completed successfully,\n// error: string // Error message if research failed\n// }\n\n// Access the actual data\nconst actualData = result.data;\nconsole.log(actualData);",
34960
+ "usageExample": "// Example usage of research-agent-tool bubble\nconst researchAgentTool = new ResearchAgentTool({\n task: \"example string\", // The research task that requires searching the internet and gathering information,\n expectedResultSchema: \"example string\", // Zod schema or JSON schema string that defines the expected structure of the research result. Example: z.object({ trends: z.array(z.string()).describe(\"An array of trends\"), summary: z.string().describe(\"A summary of the trends\") }) or JSON.stringify({ type: \"object\", properties: { trends: { type: \"array\", items: { type: \"string\" } }, summary: { type: \"string\" } } }),\n model: \"openai/gpt-5\" // options: \"openai/gpt-5\", \"openai/gpt-5-mini\", \"openai/gpt-5.1\", \"openai/gpt-5.2\", \"google/gemini-2.5-pro\", \"google/gemini-2.5-flash\", \"google/gemini-2.5-flash-lite\", \"google/gemini-2.5-flash-image-preview\", \"google/gemini-3-pro-preview\", \"google/gemini-3-pro-image-preview\", \"google/gemini-3-flash-preview\", \"anthropic/claude-sonnet-4-5\", \"anthropic/claude-opus-4-5\", \"anthropic/claude-haiku-4-5\", \"openrouter/x-ai/grok-code-fast-1\", \"openrouter/z-ai/glm-4.6\", \"openrouter/anthropic/claude-sonnet-4.5\", \"openrouter/google/gemini-3-pro-preview\", \"openrouter/morph/morph-v3-large\", \"openrouter/openai/gpt-oss-120b\", \"openrouter/openai/o3-deep-research\", \"openrouter/openai/o4-mini-deep-research\", // Model to use for the research agent (default: google/gemini-3-pro-preview),\n maxTokens: 40000 // default, // Maximum number of tokens for the research agent (default: 40000),\n maxIterations: 400 // default, // Maximum number of iterations for the research agent (default: 100),\n});\n\nconst result = await researchAgentTool.action();\n\n// Always check success status before using data\nif (!result.success) {\n throw new Error(`${metadata.name} failed: ${result.error}`);\n}\n\n// outputSchema for result.data:\n// {\n// result: unknown // The research result matching the expected JSON schema structure, parsed to object,\n// summary: string // 1-2 sentence summary of what research was conducted and completed,\n// sourcesUsed: string[] // Array of URLs and sources that were searched and scraped during research,\n// iterationsUsed: number // Number of AI agent iterations used to complete the research,\n// success: boolean // Whether the research task was completed successfully,\n// error: string // Error message if research failed\n// }\n\n// Access the actual data\nconst actualData = result.data;\nconsole.log(actualData);",
34933
34961
  "requiredCredentials": [
34934
34962
  "FIRECRAWL_API_KEY",
34935
34963
  "GOOGLE_GEMINI_CRED",
@@ -55411,6 +55439,11 @@
55411
55439
  "type": "string",
55412
55440
  "description": "Optional tag to add to the candidate. Can be a tag ID (UUID) or tag name. If a name is provided, the tag will be created first."
55413
55441
  },
55442
+ "allow_duplicate_linkedin": {
55443
+ "type": "boolean",
55444
+ "default": false,
55445
+ "description": "Whether to allow creating a candidate with a LinkedIn URL that already exists. When false (default), the existing candidate is returned instead of creating a duplicate."
55446
+ },
55414
55447
  "credentials": {
55415
55448
  "type": "object",
55416
55449
  "additionalProperties": {
@@ -56212,7 +56245,11 @@
56212
56245
  "name"
56213
56246
  ],
56214
56247
  "additionalProperties": false,
56215
- "description": "Created candidate details"
56248
+ "description": "Created candidate details (or existing candidate if duplicate was found)"
56249
+ },
56250
+ "duplicate": {
56251
+ "type": "boolean",
56252
+ "description": "True if a candidate with the same LinkedIn profile already existed and was returned instead of creating a new one"
56216
56253
  },
56217
56254
  "error": {
56218
56255
  "type": "string",
@@ -56729,7 +56766,7 @@
56729
56766
  }
56730
56767
  ]
56731
56768
  },
56732
- "usageExample": "// List Candidates example\nconst ashby_list_candidates = new AshbyBubble({\n operation: \"list_candidates\", // List all candidates with optional filtering\n limit: 100 // default, // Maximum number of candidates to return (1-100)\n cursor: \"example string\", // Pagination cursor for fetching subsequent pages\n status: \"Hired\" // options: \"Hired\", \"Archived\", \"Active\", \"Lead\", // Filter candidates by application status\n job_id: \"example string\", // Filter candidates by specific job ID\n created_after: 42, // Unix timestamp in milliseconds to filter candidates created after this time\n});\n\nconst result = await ashby_list_candidates.action();\n// outputSchema for result.data when operation === 'list_candidates':\n// {\n// operation: \"list_candidates\" // List candidates operation,\n// success: boolean // Whether the operation was successful,\n// candidates: { id: string // Unique candidate identifier (UUID), createdAt: string | undefined // ISO 8601 creation timestamp, updatedAt: string | undefined // ISO 8601 update timestamp, name: string // Full name of the candidate, primaryEmailAddress: { value: string // Email address value, type: \"Personal\" | \"Work\" | \"Other\" // Type of email, isPrimary: boolean // Whether this is the primary email } | undefined | null // Primary email address, emailAddresses: { value: string // Email address value, type: \"Personal\" | \"Work\" | \"Other\" // Type of email, isPrimary: boolean // Whether this is the primary email }[] | undefined // All email addresses, primaryPhoneNumber: { value: string // Phone number value, type: \"Personal\" | \"Work\" | \"Other\" // Type of phone, isPrimary: boolean // Whether this is the primary phone } | undefined | null // Primary phone number, phoneNumbers: { value: string // Phone number value, type: \"Personal\" | \"Work\" | \"Other\" // Type of phone, isPrimary: boolean // Whether this is the primary phone }[] | undefined // All phone numbers, socialLinks: { url: string // Social link URL, type: string // Type of social link (e.g., LinkedIn, GitHub) }[] | undefined // Social media links, tags: { id: string // Tag ID, title: string // Tag title, isArchived: boolean | undefined // Whether the tag is archived }[] | undefined // Tags assigned to candidate, position: string | undefined | null // Current position, company: string | undefined | null // Current company, school: string | undefined | null // School, applicationIds: string[] | undefined // IDs of applications for this candidate, resumeFileHandle: { id: string // File ID, name: string // File name, handle: string // File handle for download } | undefined | null // Resume file handle, fileHandles: { id: string // File ID, name: string // File name, handle: string // File handle for download }[] | undefined // All file handles, customFields: { id: string // Custom field ID, title: string // Custom field title, value: unknown // Custom field value, isPrivate: boolean | undefined // Whether the field is private }[] | undefined // Custom field values }[] | undefined // List of candidates,\n// next_cursor: string | undefined // Cursor for fetching the next page of results,\n// more_data_available: boolean | undefined // Whether more data is available,\n// sync_token: string | undefined // Token for incremental sync,\n// error: string // Error message if operation failed\n// }\n\n\n// Get Candidate example\nconst ashby_get_candidate = new AshbyBubble({\n operation: \"get_candidate\", // Get detailed information about a specific candidate\n candidate_id: \"example string\", // UUID of the candidate to retrieve\n});\n\nconst result = await ashby_get_candidate.action();\n// outputSchema for result.data when operation === 'get_candidate':\n// {\n// operation: \"get_candidate\" // Get candidate operation,\n// success: boolean // Whether the operation was successful,\n// candidate: { id: string // Unique candidate identifier (UUID), createdAt: string | undefined // ISO 8601 creation timestamp, updatedAt: string | undefined // ISO 8601 update timestamp, name: string // Full name of the candidate, primaryEmailAddress: { value: string // Email address value, type: \"Personal\" | \"Work\" | \"Other\" // Type of email, isPrimary: boolean // Whether this is the primary email } | undefined | null // Primary email address, primaryPhoneNumber: { value: string // Phone number value, type: \"Personal\" | \"Work\" | \"Other\" // Type of phone, isPrimary: boolean // Whether this is the primary phone } | undefined | null // Primary phone number, customFields: { id: string // Custom field ID, title: string // Custom field title, value: unknown // Custom field value, isPrivate: boolean | undefined // Whether the field is private }[] | undefined // Custom field values } | undefined // Candidate details,\n// error: string // Error message if operation failed\n// }\n\n\n// Create Candidate example\nconst ashby_create_candidate = new AshbyBubble({\n operation: \"create_candidate\", // Create a new candidate\n name: \"example string\", // Candidate's full name (first and last name)\n emails: [{ email: \"example string\" // Email address, type: \"Personal\" // options: \"Personal\", \"Work\", \"Other\" // Type of email (Personal, Work, or Other) }], // Candidate's email addresses with type. The Personal email becomes the primary email, others become alternates.\n phone_number: \"example string\", // Candidate's primary phone number\n linkedin_url: \"example string\", // URL to the candidate's LinkedIn profile\n github_url: \"example string\", // URL to the candidate's GitHub profile\n website: \"example string\", // URL of the candidate's website\n source_id: \"example string\", // The source ID to set on the candidate\n credited_to_user_id: \"example string\", // The ID of the user the candidate will be credited to\n tag: \"example string\", // Optional tag to add to the candidate. Can be a tag ID (UUID) or tag name. If a name is provided, the tag will be created first.\n});\n\nconst result = await ashby_create_candidate.action();\n// outputSchema for result.data when operation === 'create_candidate':\n// {\n// operation: \"create_candidate\" // Create candidate operation,\n// success: boolean // Whether the operation was successful,\n// candidate: { id: string // Unique candidate identifier (UUID), createdAt: string | undefined // ISO 8601 creation timestamp, updatedAt: string | undefined // ISO 8601 update timestamp, name: string // Full name of the candidate, primaryEmailAddress: { value: string // Email address value, type: \"Personal\" | \"Work\" | \"Other\" // Type of email, isPrimary: boolean // Whether this is the primary email } | undefined | null // Primary email address, primaryPhoneNumber: { value: string // Phone number value, type: \"Personal\" | \"Work\" | \"Other\" // Type of phone, isPrimary: boolean // Whether this is the primary phone } | undefined | null // Primary phone number, customFields: { id: string // Custom field ID, title: string // Custom field title, value: unknown // Custom field value, isPrivate: boolean | undefined // Whether the field is private }[] | undefined // Custom field values } | undefined // Created candidate details,\n// error: string // Error message if operation failed\n// }\n\n\n// Search Candidates example\nconst ashby_search_candidates = new AshbyBubble({\n operation: \"search_candidates\", // Search for candidates by email or name\n email: \"example string\", // Search by candidate email address\n name: \"example string\", // Search by candidate name\n});\n\nconst result = await ashby_search_candidates.action();\n// outputSchema for result.data when operation === 'search_candidates':\n// {\n// operation: \"search_candidates\" // Search candidates operation,\n// success: boolean // Whether the operation was successful,\n// candidates: { id: string // Unique candidate identifier (UUID), createdAt: string | undefined // ISO 8601 creation timestamp, updatedAt: string | undefined // ISO 8601 update timestamp, name: string // Full name of the candidate, primaryEmailAddress: { value: string // Email address value, type: \"Personal\" | \"Work\" | \"Other\" // Type of email, isPrimary: boolean // Whether this is the primary email } | undefined | null // Primary email address, primaryPhoneNumber: { value: string // Phone number value, type: \"Personal\" | \"Work\" | \"Other\" // Type of phone, isPrimary: boolean // Whether this is the primary phone } | undefined | null // Primary phone number, customFields: { id: string // Custom field ID, title: string // Custom field title, value: unknown // Custom field value, isPrivate: boolean | undefined // Whether the field is private }[] | undefined // Custom field values }[] | undefined // List of matching candidates,\n// error: string // Error message if operation failed\n// }\n\n\n// Add Tag example\nconst ashby_add_tag = new AshbyBubble({\n operation: \"add_tag\", // Add a tag to a candidate\n candidate_id: \"example string\", // UUID of the candidate\n tag_id: \"example string\", // UUID of the tag to add\n});\n\nconst result = await ashby_add_tag.action();\n// outputSchema for result.data when operation === 'add_tag':\n// {\n// operation: \"add_tag\" // Add tag operation,\n// success: boolean // Whether the operation was successful,\n// candidate: { id: string // Unique candidate identifier (UUID), createdAt: string | undefined // ISO 8601 creation timestamp, updatedAt: string | undefined // ISO 8601 update timestamp, name: string // Full name of the candidate, primaryEmailAddress: { value: string // Email address value, type: \"Personal\" | \"Work\" | \"Other\" // Type of email, isPrimary: boolean // Whether this is the primary email } | undefined | null // Primary email address, primaryPhoneNumber: { value: string // Phone number value, type: \"Personal\" | \"Work\" | \"Other\" // Type of phone, isPrimary: boolean // Whether this is the primary phone } | undefined | null // Primary phone number, customFields: { id: string // Custom field ID, title: string // Custom field title, value: unknown // Custom field value, isPrivate: boolean | undefined // Whether the field is private }[] | undefined // Custom field values } | undefined // Updated candidate details,\n// error: string // Error message if operation failed\n// }\n\n\n// List Tags example\nconst ashby_list_tags = new AshbyBubble({\n operation: \"list_tags\", // List all candidate tags\n include_archived: false // default, // Whether to include archived tags\n});\n\nconst result = await ashby_list_tags.action();\n// outputSchema for result.data when operation === 'list_tags':\n// {\n// operation: \"list_tags\" // List tags operation,\n// success: boolean // Whether the operation was successful,\n// tags: { id: string // Tag ID, title: string // Tag title, isArchived: boolean | undefined // Whether the tag is archived }[] | undefined // List of candidate tags,\n// error: string // Error message if operation failed\n// }\n\n\n// Create Tag example\nconst ashby_create_tag = new AshbyBubble({\n operation: \"create_tag\", // Create a new candidate tag\n title: \"example string\", // Title of the tag to create\n});\n\nconst result = await ashby_create_tag.action();\n// outputSchema for result.data when operation === 'create_tag':\n// {\n// operation: \"create_tag\" // Create tag operation,\n// success: boolean // Whether the operation was successful,\n// tag: { id: string // Tag ID, title: string // Tag title, isArchived: boolean | undefined // Whether the tag is archived } | undefined // Created tag details,\n// error: string // Error message if operation failed\n// }\n\n\n// List Custom Fields example\nconst ashby_list_custom_fields = new AshbyBubble({\n operation: \"list_custom_fields\", // List all custom field definitions\n limit: 100 // default, // Maximum number of custom fields to return (1-100)\n cursor: \"example string\", // Pagination cursor for fetching subsequent pages\n sync_token: \"example string\", // Token for incremental synchronization\n});\n\nconst result = await ashby_list_custom_fields.action();\n// outputSchema for result.data when operation === 'list_custom_fields':\n// {\n// operation: \"list_custom_fields\" // List custom fields operation,\n// success: boolean // Whether the operation was successful,\n// custom_fields: { id: string // Custom field ID (UUID), isPrivate: boolean // Whether the field is private, title: string // Custom field title, objectType: string // Object type this field applies to (e.g., Application, Candidate), isArchived: boolean // Whether the field is archived, fieldType: string // Type of field (e.g., MultiValueSelect, String, Number), selectableValues: { label: string // Display label for the value, value: string // Value identifier, isArchived: boolean // Whether the value is archived }[] | undefined // Available values for select-type fields }[] | undefined // List of custom field definitions,\n// next_cursor: string | undefined // Cursor for fetching the next page of results,\n// more_data_available: boolean | undefined // Whether more data is available,\n// sync_token: string | undefined // Token for incremental sync,\n// error: string // Error message if operation failed\n// }\n\n\n// Always check success status before using data\nif (!result.success) {\n throw new Error(`ashby failed: ${result.error}`);\n}\n\n// Access the actual data\nconst actualData = result.data;\nconsole.log(actualData);",
56769
+ "usageExample": "// List Candidates example\nconst ashby_list_candidates = new AshbyBubble({\n operation: \"list_candidates\", // List all candidates with optional filtering\n limit: 100 // default, // Maximum number of candidates to return (1-100)\n cursor: \"example string\", // Pagination cursor for fetching subsequent pages\n status: \"Hired\" // options: \"Hired\", \"Archived\", \"Active\", \"Lead\", // Filter candidates by application status\n job_id: \"example string\", // Filter candidates by specific job ID\n created_after: 42, // Unix timestamp in milliseconds to filter candidates created after this time\n});\n\nconst result = await ashby_list_candidates.action();\n// outputSchema for result.data when operation === 'list_candidates':\n// {\n// operation: \"list_candidates\" // List candidates operation,\n// success: boolean // Whether the operation was successful,\n// candidates: { id: string // Unique candidate identifier (UUID), createdAt: string | undefined // ISO 8601 creation timestamp, updatedAt: string | undefined // ISO 8601 update timestamp, name: string // Full name of the candidate, primaryEmailAddress: { value: string // Email address value, type: \"Personal\" | \"Work\" | \"Other\" // Type of email, isPrimary: boolean // Whether this is the primary email } | undefined | null // Primary email address, emailAddresses: { value: string // Email address value, type: \"Personal\" | \"Work\" | \"Other\" // Type of email, isPrimary: boolean // Whether this is the primary email }[] | undefined // All email addresses, primaryPhoneNumber: { value: string // Phone number value, type: \"Personal\" | \"Work\" | \"Other\" // Type of phone, isPrimary: boolean // Whether this is the primary phone } | undefined | null // Primary phone number, phoneNumbers: { value: string // Phone number value, type: \"Personal\" | \"Work\" | \"Other\" // Type of phone, isPrimary: boolean // Whether this is the primary phone }[] | undefined // All phone numbers, socialLinks: { url: string // Social link URL, type: string // Type of social link (e.g., LinkedIn, GitHub) }[] | undefined // Social media links, tags: { id: string // Tag ID, title: string // Tag title, isArchived: boolean | undefined // Whether the tag is archived }[] | undefined // Tags assigned to candidate, position: string | undefined | null // Current position, company: string | undefined | null // Current company, school: string | undefined | null // School, applicationIds: string[] | undefined // IDs of applications for this candidate, resumeFileHandle: { id: string // File ID, name: string // File name, handle: string // File handle for download } | undefined | null // Resume file handle, fileHandles: { id: string // File ID, name: string // File name, handle: string // File handle for download }[] | undefined // All file handles, customFields: { id: string // Custom field ID, title: string // Custom field title, value: unknown // Custom field value, isPrivate: boolean | undefined // Whether the field is private }[] | undefined // Custom field values }[] | undefined // List of candidates,\n// next_cursor: string | undefined // Cursor for fetching the next page of results,\n// more_data_available: boolean | undefined // Whether more data is available,\n// sync_token: string | undefined // Token for incremental sync,\n// error: string // Error message if operation failed\n// }\n\n\n// Get Candidate example\nconst ashby_get_candidate = new AshbyBubble({\n operation: \"get_candidate\", // Get detailed information about a specific candidate\n candidate_id: \"example string\", // UUID of the candidate to retrieve\n});\n\nconst result = await ashby_get_candidate.action();\n// outputSchema for result.data when operation === 'get_candidate':\n// {\n// operation: \"get_candidate\" // Get candidate operation,\n// success: boolean // Whether the operation was successful,\n// candidate: { id: string // Unique candidate identifier (UUID), createdAt: string | undefined // ISO 8601 creation timestamp, updatedAt: string | undefined // ISO 8601 update timestamp, name: string // Full name of the candidate, primaryEmailAddress: { value: string // Email address value, type: \"Personal\" | \"Work\" | \"Other\" // Type of email, isPrimary: boolean // Whether this is the primary email } | undefined | null // Primary email address, primaryPhoneNumber: { value: string // Phone number value, type: \"Personal\" | \"Work\" | \"Other\" // Type of phone, isPrimary: boolean // Whether this is the primary phone } | undefined | null // Primary phone number, customFields: { id: string // Custom field ID, title: string // Custom field title, value: unknown // Custom field value, isPrivate: boolean | undefined // Whether the field is private }[] | undefined // Custom field values } | undefined // Candidate details,\n// error: string // Error message if operation failed\n// }\n\n\n// Create Candidate example\nconst ashby_create_candidate = new AshbyBubble({\n operation: \"create_candidate\", // Create a new candidate\n name: \"example string\", // Candidate's full name (first and last name)\n emails: [{ email: \"example string\" // Email address, type: \"Personal\" // options: \"Personal\", \"Work\", \"Other\" // Type of email (Personal, Work, or Other) }], // Candidate's email addresses with type. The Personal email becomes the primary email, others become alternates.\n phone_number: \"example string\", // Candidate's primary phone number\n linkedin_url: \"example string\", // URL to the candidate's LinkedIn profile\n github_url: \"example string\", // URL to the candidate's GitHub profile\n website: \"example string\", // URL of the candidate's website\n source_id: \"example string\", // The source ID to set on the candidate\n credited_to_user_id: \"example string\", // The ID of the user the candidate will be credited to\n tag: \"example string\", // Optional tag to add to the candidate. Can be a tag ID (UUID) or tag name. If a name is provided, the tag will be created first.\n allow_duplicate_linkedin: false // default, // Whether to allow creating a candidate with a LinkedIn URL that already exists. When false (default), the existing candidate is returned instead of creating a duplicate.\n});\n\nconst result = await ashby_create_candidate.action();\n// outputSchema for result.data when operation === 'create_candidate':\n// {\n// operation: \"create_candidate\" // Create candidate operation,\n// success: boolean // Whether the operation was successful,\n// candidate: { id: string // Unique candidate identifier (UUID), createdAt: string | undefined // ISO 8601 creation timestamp, updatedAt: string | undefined // ISO 8601 update timestamp, name: string // Full name of the candidate, primaryEmailAddress: { value: string // Email address value, type: \"Personal\" | \"Work\" | \"Other\" // Type of email, isPrimary: boolean // Whether this is the primary email } | undefined | null // Primary email address, primaryPhoneNumber: { value: string // Phone number value, type: \"Personal\" | \"Work\" | \"Other\" // Type of phone, isPrimary: boolean // Whether this is the primary phone } | undefined | null // Primary phone number, customFields: { id: string // Custom field ID, title: string // Custom field title, value: unknown // Custom field value, isPrivate: boolean | undefined // Whether the field is private }[] | undefined // Custom field values } | undefined // Created candidate details (or existing candidate if duplicate was found),\n// duplicate: boolean | undefined // True if a candidate with the same LinkedIn profile already existed and was returned instead of creating a new one,\n// error: string // Error message if operation failed\n// }\n\n\n// Search Candidates example\nconst ashby_search_candidates = new AshbyBubble({\n operation: \"search_candidates\", // Search for candidates by email or name\n email: \"example string\", // Search by candidate email address\n name: \"example string\", // Search by candidate name\n});\n\nconst result = await ashby_search_candidates.action();\n// outputSchema for result.data when operation === 'search_candidates':\n// {\n// operation: \"search_candidates\" // Search candidates operation,\n// success: boolean // Whether the operation was successful,\n// candidates: { id: string // Unique candidate identifier (UUID), createdAt: string | undefined // ISO 8601 creation timestamp, updatedAt: string | undefined // ISO 8601 update timestamp, name: string // Full name of the candidate, primaryEmailAddress: { value: string // Email address value, type: \"Personal\" | \"Work\" | \"Other\" // Type of email, isPrimary: boolean // Whether this is the primary email } | undefined | null // Primary email address, primaryPhoneNumber: { value: string // Phone number value, type: \"Personal\" | \"Work\" | \"Other\" // Type of phone, isPrimary: boolean // Whether this is the primary phone } | undefined | null // Primary phone number, customFields: { id: string // Custom field ID, title: string // Custom field title, value: unknown // Custom field value, isPrivate: boolean | undefined // Whether the field is private }[] | undefined // Custom field values }[] | undefined // List of matching candidates,\n// error: string // Error message if operation failed\n// }\n\n\n// Add Tag example\nconst ashby_add_tag = new AshbyBubble({\n operation: \"add_tag\", // Add a tag to a candidate\n candidate_id: \"example string\", // UUID of the candidate\n tag_id: \"example string\", // UUID of the tag to add\n});\n\nconst result = await ashby_add_tag.action();\n// outputSchema for result.data when operation === 'add_tag':\n// {\n// operation: \"add_tag\" // Add tag operation,\n// success: boolean // Whether the operation was successful,\n// candidate: { id: string // Unique candidate identifier (UUID), createdAt: string | undefined // ISO 8601 creation timestamp, updatedAt: string | undefined // ISO 8601 update timestamp, name: string // Full name of the candidate, primaryEmailAddress: { value: string // Email address value, type: \"Personal\" | \"Work\" | \"Other\" // Type of email, isPrimary: boolean // Whether this is the primary email } | undefined | null // Primary email address, primaryPhoneNumber: { value: string // Phone number value, type: \"Personal\" | \"Work\" | \"Other\" // Type of phone, isPrimary: boolean // Whether this is the primary phone } | undefined | null // Primary phone number, customFields: { id: string // Custom field ID, title: string // Custom field title, value: unknown // Custom field value, isPrivate: boolean | undefined // Whether the field is private }[] | undefined // Custom field values } | undefined // Updated candidate details,\n// error: string // Error message if operation failed\n// }\n\n\n// List Tags example\nconst ashby_list_tags = new AshbyBubble({\n operation: \"list_tags\", // List all candidate tags\n include_archived: false // default, // Whether to include archived tags\n});\n\nconst result = await ashby_list_tags.action();\n// outputSchema for result.data when operation === 'list_tags':\n// {\n// operation: \"list_tags\" // List tags operation,\n// success: boolean // Whether the operation was successful,\n// tags: { id: string // Tag ID, title: string // Tag title, isArchived: boolean | undefined // Whether the tag is archived }[] | undefined // List of candidate tags,\n// error: string // Error message if operation failed\n// }\n\n\n// Create Tag example\nconst ashby_create_tag = new AshbyBubble({\n operation: \"create_tag\", // Create a new candidate tag\n title: \"example string\", // Title of the tag to create\n});\n\nconst result = await ashby_create_tag.action();\n// outputSchema for result.data when operation === 'create_tag':\n// {\n// operation: \"create_tag\" // Create tag operation,\n// success: boolean // Whether the operation was successful,\n// tag: { id: string // Tag ID, title: string // Tag title, isArchived: boolean | undefined // Whether the tag is archived } | undefined // Created tag details,\n// error: string // Error message if operation failed\n// }\n\n\n// List Custom Fields example\nconst ashby_list_custom_fields = new AshbyBubble({\n operation: \"list_custom_fields\", // List all custom field definitions\n limit: 100 // default, // Maximum number of custom fields to return (1-100)\n cursor: \"example string\", // Pagination cursor for fetching subsequent pages\n sync_token: \"example string\", // Token for incremental synchronization\n});\n\nconst result = await ashby_list_custom_fields.action();\n// outputSchema for result.data when operation === 'list_custom_fields':\n// {\n// operation: \"list_custom_fields\" // List custom fields operation,\n// success: boolean // Whether the operation was successful,\n// custom_fields: { id: string // Custom field ID (UUID), isPrivate: boolean // Whether the field is private, title: string // Custom field title, objectType: string // Object type this field applies to (e.g., Application, Candidate), isArchived: boolean // Whether the field is archived, fieldType: string // Type of field (e.g., MultiValueSelect, String, Number), selectableValues: { label: string // Display label for the value, value: string // Value identifier, isArchived: boolean // Whether the value is archived }[] | undefined // Available values for select-type fields }[] | undefined // List of custom field definitions,\n// next_cursor: string | undefined // Cursor for fetching the next page of results,\n// more_data_available: boolean | undefined // Whether more data is available,\n// sync_token: string | undefined // Token for incremental sync,\n// error: string // Error message if operation failed\n// }\n\n\n// Always check success status before using data\nif (!result.success) {\n throw new Error(`ashby failed: ${result.error}`);\n}\n\n// Access the actual data\nconst actualData = result.data;\nconsole.log(actualData);",
56733
56770
  "requiredCredentials": [
56734
56771
  "ASHBY_CRED"
56735
56772
  ]
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@bubblelab/bubble-core",
3
- "version": "0.1.65",
3
+ "version": "0.1.67",
4
4
  "type": "module",
5
5
  "license": "Apache-2.0",
6
6
  "main": "./dist/index.js",
@@ -40,7 +40,7 @@
40
40
  "puppeteer-core": "^24.10.0",
41
41
  "resend": "^4.8.0",
42
42
  "zod": "^3.24.1",
43
- "@bubblelab/shared-schemas": "0.1.66"
43
+ "@bubblelab/shared-schemas": "0.1.68"
44
44
  },
45
45
  "devDependencies": {
46
46
  "zod-to-json-schema": "^3.24.6",