@bubblelab/bubble-core 0.1.9 → 0.1.11
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/bubble-bundle.d.ts +633 -1658
- package/dist/bubble-factory.d.ts.map +1 -1
- package/dist/bubble-factory.js +165 -43
- package/dist/bubble-factory.js.map +1 -1
- package/dist/bubble-flow/bubble-flow-class.d.ts +22 -1
- package/dist/bubble-flow/bubble-flow-class.d.ts.map +1 -1
- package/dist/bubble-flow/bubble-flow-class.js +36 -0
- package/dist/bubble-flow/bubble-flow-class.js.map +1 -1
- package/dist/bubble-flow/sample/data-analyst-flow.d.ts +1 -1
- package/dist/bubble-flow/sample/data-analyst-flow.d.ts.map +1 -1
- package/dist/bubble-flow/sample/error-ts.d.ts +1 -1
- package/dist/bubble-flow/sample/error-ts.d.ts.map +1 -1
- package/dist/bubble-flow/sample/sanitytest.d.ts +1 -1
- package/dist/bubble-flow/sample/sanitytest.d.ts.map +1 -1
- package/dist/bubble-flow/sample/simple-webhook-2.d.ts +1 -1
- package/dist/bubble-flow/sample/simple-webhook-2.d.ts.map +1 -1
- package/dist/bubble-flow/sample/simple-webhook.d.ts +1 -1
- package/dist/bubble-flow/sample/simple-webhook.d.ts.map +1 -1
- package/dist/bubble-flow/sample/simplified-data-analysis.flow.d.ts +1 -1
- package/dist/bubble-flow/sample/simplified-data-analysis.flow.d.ts.map +1 -1
- package/dist/bubble-flow/sample/simplified-data-analysis.flow.js +6 -3
- package/dist/bubble-flow/sample/simplified-data-analysis.flow.js.map +1 -1
- package/dist/bubble-flow/sample/slack-v0.1.d.ts +1 -1
- package/dist/bubble-flow/sample/slack-v0.1.d.ts.map +1 -1
- package/dist/bubble-flow/sample/slackagenttest.d.ts +1 -1
- package/dist/bubble-flow/sample/slackagenttest.d.ts.map +1 -1
- package/dist/bubbles/service-bubble/agi-inc.d.ts +1121 -0
- package/dist/bubbles/service-bubble/agi-inc.d.ts.map +1 -0
- package/dist/bubbles/service-bubble/agi-inc.js +730 -0
- package/dist/bubbles/service-bubble/agi-inc.js.map +1 -0
- package/dist/bubbles/service-bubble/ai-agent.d.ts +297 -85
- package/dist/bubbles/service-bubble/ai-agent.d.ts.map +1 -1
- package/dist/bubbles/service-bubble/ai-agent.js +535 -399
- package/dist/bubbles/service-bubble/ai-agent.js.map +1 -1
- package/dist/bubbles/service-bubble/airtable.d.ts +1753 -0
- package/dist/bubbles/service-bubble/airtable.d.ts.map +1 -0
- package/dist/bubbles/service-bubble/airtable.js +1173 -0
- package/dist/bubbles/service-bubble/airtable.js.map +1 -0
- package/dist/bubbles/service-bubble/apify/actors/google-maps-scraper.d.ts +240 -0
- package/dist/bubbles/service-bubble/apify/actors/google-maps-scraper.d.ts.map +1 -0
- package/dist/bubbles/service-bubble/apify/actors/google-maps-scraper.js +119 -0
- package/dist/bubbles/service-bubble/apify/actors/google-maps-scraper.js.map +1 -0
- package/dist/bubbles/service-bubble/apify/actors/instagram-hashtag-scraper.d.ts +38 -38
- package/dist/bubbles/service-bubble/apify/actors/instagram-scraper.d.ts +56 -56
- package/dist/bubbles/service-bubble/apify/actors/linkedin-jobs-scraper.d.ts +137 -0
- package/dist/bubbles/service-bubble/apify/actors/linkedin-jobs-scraper.d.ts.map +1 -0
- package/dist/bubbles/service-bubble/apify/actors/linkedin-jobs-scraper.js +81 -0
- package/dist/bubbles/service-bubble/apify/actors/linkedin-jobs-scraper.js.map +1 -0
- package/dist/bubbles/service-bubble/apify/actors/linkedin-posts-search.d.ts +27 -27
- package/dist/bubbles/service-bubble/apify/actors/linkedin-profile-posts.d.ts +38 -38
- package/dist/bubbles/service-bubble/apify/actors/tiktok-scraper.d.ts +488 -0
- package/dist/bubbles/service-bubble/apify/actors/tiktok-scraper.d.ts.map +1 -0
- package/dist/bubbles/service-bubble/apify/actors/tiktok-scraper.js +463 -0
- package/dist/bubbles/service-bubble/apify/actors/tiktok-scraper.js.map +1 -0
- package/dist/bubbles/service-bubble/apify/actors/twitter-scraper.d.ts +262 -0
- package/dist/bubbles/service-bubble/apify/actors/twitter-scraper.d.ts.map +1 -0
- package/dist/bubbles/service-bubble/apify/actors/twitter-scraper.js +291 -0
- package/dist/bubbles/service-bubble/apify/actors/twitter-scraper.js.map +1 -0
- package/dist/bubbles/service-bubble/apify/actors/youtube-scraper.d.ts +184 -0
- package/dist/bubbles/service-bubble/apify/actors/youtube-scraper.d.ts.map +1 -0
- package/dist/bubbles/service-bubble/apify/actors/youtube-scraper.js +145 -0
- package/dist/bubbles/service-bubble/apify/actors/youtube-scraper.js.map +1 -0
- package/dist/bubbles/service-bubble/apify/actors/youtube-transcript-scraper.d.ts +52 -0
- package/dist/bubbles/service-bubble/apify/actors/youtube-transcript-scraper.d.ts.map +1 -0
- package/dist/bubbles/service-bubble/apify/actors/youtube-transcript-scraper.js +29 -0
- package/dist/bubbles/service-bubble/apify/actors/youtube-transcript-scraper.js.map +1 -0
- package/dist/bubbles/service-bubble/apify/apify-scraper.schema.d.ts +1483 -123
- package/dist/bubbles/service-bubble/apify/apify-scraper.schema.d.ts.map +1 -1
- package/dist/bubbles/service-bubble/apify/apify-scraper.schema.js +48 -0
- package/dist/bubbles/service-bubble/apify/apify-scraper.schema.js.map +1 -1
- package/dist/bubbles/service-bubble/apify/apify.d.ts +156 -11
- package/dist/bubbles/service-bubble/apify/apify.d.ts.map +1 -1
- package/dist/bubbles/service-bubble/apify/apify.js +205 -32
- package/dist/bubbles/service-bubble/apify/apify.js.map +1 -1
- package/dist/bubbles/service-bubble/apify/index.d.ts +2 -0
- package/dist/bubbles/service-bubble/apify/index.d.ts.map +1 -1
- package/dist/bubbles/service-bubble/apify/index.js +3 -0
- package/dist/bubbles/service-bubble/apify/index.js.map +1 -1
- package/dist/bubbles/service-bubble/apify/types.js +1 -2
- package/dist/bubbles/service-bubble/apify/types.js.map +1 -1
- package/dist/bubbles/service-bubble/eleven-labs.d.ts +421 -0
- package/dist/bubbles/service-bubble/eleven-labs.d.ts.map +1 -0
- package/dist/bubbles/service-bubble/eleven-labs.js +479 -0
- package/dist/bubbles/service-bubble/eleven-labs.js.map +1 -0
- package/dist/bubbles/service-bubble/firecrawl.d.ts +37748 -0
- package/dist/bubbles/service-bubble/firecrawl.d.ts.map +1 -0
- package/dist/bubbles/service-bubble/firecrawl.js +1489 -0
- package/dist/bubbles/service-bubble/firecrawl.js.map +1 -0
- package/dist/bubbles/service-bubble/followupboss.d.ts +6822 -0
- package/dist/bubbles/service-bubble/followupboss.d.ts.map +1 -0
- package/dist/bubbles/service-bubble/followupboss.js +1394 -0
- package/dist/bubbles/service-bubble/followupboss.js.map +1 -0
- package/dist/bubbles/service-bubble/github.d.ts +2399 -0
- package/dist/bubbles/service-bubble/github.d.ts.map +1 -0
- package/dist/bubbles/service-bubble/github.js +1052 -0
- package/dist/bubbles/service-bubble/github.js.map +1 -0
- package/dist/bubbles/service-bubble/gmail.d.ts +726 -232
- package/dist/bubbles/service-bubble/gmail.d.ts.map +1 -1
- package/dist/bubbles/service-bubble/gmail.js +435 -7
- package/dist/bubbles/service-bubble/gmail.js.map +1 -1
- package/dist/bubbles/service-bubble/google-calendar.d.ts +8 -8
- package/dist/bubbles/service-bubble/google-drive.d.ts +270 -40
- package/dist/bubbles/service-bubble/google-drive.d.ts.map +1 -1
- package/dist/bubbles/service-bubble/google-drive.js +100 -78
- package/dist/bubbles/service-bubble/google-drive.js.map +1 -1
- package/dist/bubbles/service-bubble/google-sheets/google-sheets.d.ts +943 -0
- package/dist/bubbles/service-bubble/google-sheets/google-sheets.d.ts.map +1 -0
- package/dist/bubbles/service-bubble/google-sheets/google-sheets.integration.flow.d.ts +31 -0
- package/dist/bubbles/service-bubble/google-sheets/google-sheets.integration.flow.d.ts.map +1 -0
- package/dist/bubbles/service-bubble/google-sheets/google-sheets.integration.flow.js +184 -0
- package/dist/bubbles/service-bubble/google-sheets/google-sheets.integration.flow.js.map +1 -0
- package/dist/bubbles/service-bubble/google-sheets/google-sheets.js +401 -0
- package/dist/bubbles/service-bubble/google-sheets/google-sheets.js.map +1 -0
- package/dist/bubbles/service-bubble/google-sheets/google-sheets.schema.d.ts +1024 -0
- package/dist/bubbles/service-bubble/google-sheets/google-sheets.schema.d.ts.map +1 -0
- package/dist/bubbles/service-bubble/{google-sheets.js → google-sheets/google-sheets.schema.js} +45 -409
- package/dist/bubbles/service-bubble/google-sheets/google-sheets.schema.js.map +1 -0
- package/dist/bubbles/service-bubble/google-sheets/google-sheets.utils.d.ts +38 -0
- package/dist/bubbles/service-bubble/google-sheets/google-sheets.utils.d.ts.map +1 -0
- package/dist/bubbles/service-bubble/google-sheets/google-sheets.utils.js +183 -0
- package/dist/bubbles/service-bubble/google-sheets/google-sheets.utils.js.map +1 -0
- package/dist/bubbles/service-bubble/google-sheets/index.d.ts +4 -0
- package/dist/bubbles/service-bubble/google-sheets/index.d.ts.map +1 -0
- package/dist/bubbles/service-bubble/google-sheets/index.js +4 -0
- package/dist/bubbles/service-bubble/google-sheets/index.js.map +1 -0
- package/dist/bubbles/service-bubble/hello-world.js +2 -2
- package/dist/bubbles/service-bubble/hello-world.js.map +1 -1
- package/dist/bubbles/service-bubble/http.d.ts +6 -6
- package/dist/bubbles/service-bubble/http.d.ts.map +1 -1
- package/dist/bubbles/service-bubble/http.js +7 -1
- package/dist/bubbles/service-bubble/http.js.map +1 -1
- package/dist/bubbles/service-bubble/insforge-db.d.ts +140 -0
- package/dist/bubbles/service-bubble/insforge-db.d.ts.map +1 -0
- package/dist/bubbles/service-bubble/insforge-db.js +260 -0
- package/dist/bubbles/service-bubble/insforge-db.js.map +1 -0
- package/dist/bubbles/service-bubble/notion/index.d.ts +3 -0
- package/dist/bubbles/service-bubble/notion/index.d.ts.map +1 -0
- package/dist/bubbles/service-bubble/notion/index.js +3 -0
- package/dist/bubbles/service-bubble/notion/index.js.map +1 -0
- package/dist/bubbles/service-bubble/notion/notion.d.ts +35405 -0
- package/dist/bubbles/service-bubble/notion/notion.d.ts.map +1 -0
- package/dist/bubbles/service-bubble/notion/notion.js +1492 -0
- package/dist/bubbles/service-bubble/notion/notion.js.map +1 -0
- package/dist/bubbles/service-bubble/notion/property-schemas.d.ts +1148 -0
- package/dist/bubbles/service-bubble/notion/property-schemas.d.ts.map +1 -0
- package/dist/bubbles/service-bubble/notion/property-schemas.js +341 -0
- package/dist/bubbles/service-bubble/notion/property-schemas.js.map +1 -0
- package/dist/bubbles/service-bubble/postgresql.d.ts +8 -8
- package/dist/bubbles/service-bubble/resend.d.ts +30 -9
- package/dist/bubbles/service-bubble/resend.d.ts.map +1 -1
- package/dist/bubbles/service-bubble/resend.js +133 -2
- package/dist/bubbles/service-bubble/resend.js.map +1 -1
- package/dist/bubbles/service-bubble/slack.d.ts +525 -525
- package/dist/bubbles/service-bubble/slack.d.ts.map +1 -1
- package/dist/bubbles/service-bubble/slack.js +2 -2
- package/dist/bubbles/service-bubble/slack.js.map +1 -1
- package/dist/bubbles/service-bubble/storage.d.ts +21 -17
- package/dist/bubbles/service-bubble/storage.d.ts.map +1 -1
- package/dist/bubbles/service-bubble/storage.js +59 -7
- package/dist/bubbles/service-bubble/storage.js.map +1 -1
- package/dist/bubbles/service-bubble/telegram.d.ts +7742 -0
- package/dist/bubbles/service-bubble/telegram.d.ts.map +1 -0
- package/dist/bubbles/service-bubble/telegram.js +1132 -0
- package/dist/bubbles/service-bubble/telegram.js.map +1 -0
- package/dist/bubbles/tool-bubble/bubbleflow-validation-tool.d.ts +60 -4
- package/dist/bubbles/tool-bubble/bubbleflow-validation-tool.d.ts.map +1 -1
- package/dist/bubbles/tool-bubble/bubbleflow-validation-tool.js +12 -0
- package/dist/bubbles/tool-bubble/bubbleflow-validation-tool.js.map +1 -1
- package/dist/bubbles/tool-bubble/chart-js-tool.d.ts +4 -4
- package/dist/bubbles/tool-bubble/code-edit-tool.d.ts +188 -0
- package/dist/bubbles/tool-bubble/code-edit-tool.d.ts.map +1 -0
- package/dist/bubbles/tool-bubble/code-edit-tool.js +321 -0
- package/dist/bubbles/tool-bubble/code-edit-tool.js.map +1 -0
- package/dist/bubbles/tool-bubble/get-bubble-details-tool.d.ts +8 -4
- package/dist/bubbles/tool-bubble/get-bubble-details-tool.d.ts.map +1 -1
- package/dist/bubbles/tool-bubble/get-bubble-details-tool.js +132 -19
- package/dist/bubbles/tool-bubble/get-bubble-details-tool.js.map +1 -1
- package/dist/bubbles/tool-bubble/google-maps-tool.d.ts +455 -0
- package/dist/bubbles/tool-bubble/google-maps-tool.d.ts.map +1 -0
- package/dist/bubbles/tool-bubble/google-maps-tool.js +205 -0
- package/dist/bubbles/tool-bubble/google-maps-tool.js.map +1 -0
- package/dist/bubbles/tool-bubble/instagram-tool.d.ts +90 -90
- package/dist/bubbles/tool-bubble/instagram-tool.d.ts.map +1 -1
- package/dist/bubbles/tool-bubble/instagram-tool.js +2 -2
- package/dist/bubbles/tool-bubble/instagram-tool.js.map +1 -1
- package/dist/bubbles/tool-bubble/linkedin-tool.d.ts +808 -431
- package/dist/bubbles/tool-bubble/linkedin-tool.d.ts.map +1 -1
- package/dist/bubbles/tool-bubble/linkedin-tool.js +232 -12
- package/dist/bubbles/tool-bubble/linkedin-tool.js.map +1 -1
- package/dist/bubbles/tool-bubble/list-bubbles-tool.d.ts +4 -4
- package/dist/bubbles/tool-bubble/reddit-scrape-tool.d.ts +21 -16
- package/dist/bubbles/tool-bubble/reddit-scrape-tool.d.ts.map +1 -1
- package/dist/bubbles/tool-bubble/reddit-scrape-tool.js +97 -22
- package/dist/bubbles/tool-bubble/reddit-scrape-tool.js.map +1 -1
- package/dist/bubbles/tool-bubble/research-agent-tool.d.ts +17 -16
- package/dist/bubbles/tool-bubble/research-agent-tool.d.ts.map +1 -1
- package/dist/bubbles/tool-bubble/research-agent-tool.js +27 -17
- package/dist/bubbles/tool-bubble/research-agent-tool.js.map +1 -1
- package/dist/bubbles/tool-bubble/sql-query-tool.d.ts +4 -4
- package/dist/bubbles/tool-bubble/tiktok-tool.d.ts +485 -0
- package/dist/bubbles/tool-bubble/tiktok-tool.d.ts.map +1 -0
- package/dist/bubbles/tool-bubble/tiktok-tool.js +226 -0
- package/dist/bubbles/tool-bubble/tiktok-tool.js.map +1 -0
- package/dist/bubbles/tool-bubble/twitter-tool.d.ts +947 -0
- package/dist/bubbles/tool-bubble/twitter-tool.d.ts.map +1 -0
- package/dist/bubbles/tool-bubble/twitter-tool.js +494 -0
- package/dist/bubbles/tool-bubble/twitter-tool.js.map +1 -0
- package/dist/bubbles/tool-bubble/web-crawl-tool.d.ts +32 -26
- package/dist/bubbles/tool-bubble/web-crawl-tool.d.ts.map +1 -1
- package/dist/bubbles/tool-bubble/web-crawl-tool.js +58 -59
- package/dist/bubbles/tool-bubble/web-crawl-tool.js.map +1 -1
- package/dist/bubbles/tool-bubble/web-extract-tool.d.ts +4 -4
- package/dist/bubbles/tool-bubble/web-extract-tool.d.ts.map +1 -1
- package/dist/bubbles/tool-bubble/web-extract-tool.js +17 -17
- package/dist/bubbles/tool-bubble/web-extract-tool.js.map +1 -1
- package/dist/bubbles/tool-bubble/web-scrape-tool.d.ts +23 -115
- package/dist/bubbles/tool-bubble/web-scrape-tool.d.ts.map +1 -1
- package/dist/bubbles/tool-bubble/web-scrape-tool.js +51 -72
- package/dist/bubbles/tool-bubble/web-scrape-tool.js.map +1 -1
- package/dist/bubbles/tool-bubble/web-search-tool.d.ts +18 -6
- package/dist/bubbles/tool-bubble/web-search-tool.d.ts.map +1 -1
- package/dist/bubbles/tool-bubble/web-search-tool.js +46 -32
- package/dist/bubbles/tool-bubble/web-search-tool.js.map +1 -1
- package/dist/bubbles/tool-bubble/youtube-tool.d.ts +394 -0
- package/dist/bubbles/tool-bubble/youtube-tool.d.ts.map +1 -0
- package/dist/bubbles/tool-bubble/youtube-tool.js +352 -0
- package/dist/bubbles/tool-bubble/youtube-tool.js.map +1 -0
- package/dist/bubbles/workflow-bubble/generate-document.workflow.d.ts +78 -78
- package/dist/bubbles/workflow-bubble/generate-document.workflow.js +1 -1
- package/dist/bubbles/workflow-bubble/parse-document.workflow.d.ts +62 -62
- package/dist/bubbles/workflow-bubble/parse-document.workflow.js +1 -1
- package/dist/bubbles/workflow-bubble/pdf-form-operations.workflow.d.ts +80 -80
- package/dist/bubbles/workflow-bubble/pdf-form-operations.workflow.d.ts.map +1 -1
- package/dist/bubbles/workflow-bubble/pdf-form-operations.workflow.js +4 -4
- package/dist/bubbles/workflow-bubble/pdf-form-operations.workflow.js.map +1 -1
- package/dist/bubbles/workflow-bubble/pdf-ocr.workflow.d.ts +52 -52
- package/dist/bubbles/workflow-bubble/pdf-ocr.workflow.d.ts.map +1 -1
- package/dist/bubbles/workflow-bubble/pdf-ocr.workflow.js +2 -2
- package/dist/bubbles/workflow-bubble/pdf-ocr.workflow.js.map +1 -1
- package/dist/bubbles/workflow-bubble/slack-data-assistant.workflow.d.ts +32 -32
- package/dist/bubbles/workflow-bubble/slack-data-assistant.workflow.d.ts.map +1 -1
- package/dist/bubbles/workflow-bubble/slack-data-assistant.workflow.js +6 -6
- package/dist/bubbles/workflow-bubble/slack-data-assistant.workflow.js.map +1 -1
- package/dist/bubbles/workflow-bubble/slack-formatter-agent.d.ts +46 -46
- package/dist/bubbles/workflow-bubble/slack-formatter-agent.d.ts.map +1 -1
- package/dist/bubbles/workflow-bubble/slack-formatter-agent.js +24 -4
- package/dist/bubbles/workflow-bubble/slack-formatter-agent.js.map +1 -1
- package/dist/bubbles/workflow-bubble/slack-notifier.workflow.d.ts +27 -27
- package/dist/bubbles/workflow-bubble/slack-notifier.workflow.d.ts.map +1 -1
- package/dist/bubbles/workflow-bubble/slack-notifier.workflow.js +8 -8
- package/dist/bubbles/workflow-bubble/slack-notifier.workflow.js.map +1 -1
- package/dist/bubbles.json +251 -88
- package/dist/index.d.ts +27 -6
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +18 -4
- package/dist/index.js.map +1 -1
- package/dist/logging/BubbleLogger.d.ts +45 -16
- package/dist/logging/BubbleLogger.d.ts.map +1 -1
- package/dist/logging/BubbleLogger.js +197 -89
- package/dist/logging/BubbleLogger.js.map +1 -1
- package/dist/logging/StreamingBubbleLogger.d.ts +13 -1
- package/dist/logging/StreamingBubbleLogger.d.ts.map +1 -1
- package/dist/logging/StreamingBubbleLogger.js +56 -13
- package/dist/logging/StreamingBubbleLogger.js.map +1 -1
- package/dist/logging/WebhookStreamLogger.d.ts +66 -0
- package/dist/logging/WebhookStreamLogger.d.ts.map +1 -0
- package/dist/logging/WebhookStreamLogger.js +291 -0
- package/dist/logging/WebhookStreamLogger.js.map +1 -0
- package/dist/types/available-tools.d.ts +1 -1
- package/dist/types/available-tools.d.ts.map +1 -1
- package/dist/types/available-tools.js +1 -0
- package/dist/types/available-tools.js.map +1 -1
- package/dist/types/base-bubble-class.d.ts +7 -5
- package/dist/types/base-bubble-class.d.ts.map +1 -1
- package/dist/types/base-bubble-class.js +63 -40
- package/dist/types/base-bubble-class.js.map +1 -1
- package/dist/types/bubble.d.ts +3 -10
- package/dist/types/bubble.d.ts.map +1 -1
- package/dist/types/bubble.js +1 -1
- package/dist/types/bubble.js.map +1 -1
- package/dist/types/service-bubble-class.d.ts +2 -2
- package/dist/types/service-bubble-class.d.ts.map +1 -1
- package/dist/types/service-bubble-class.js +2 -2
- package/dist/types/service-bubble-class.js.map +1 -1
- package/dist/types/tool-bubble-class.d.ts +2 -2
- package/dist/types/tool-bubble-class.d.ts.map +1 -1
- package/dist/types/tool-bubble-class.js +2 -4
- package/dist/types/tool-bubble-class.js.map +1 -1
- package/dist/types/workflow-bubble-class.d.ts +2 -2
- package/dist/types/workflow-bubble-class.d.ts.map +1 -1
- package/dist/types/workflow-bubble-class.js +2 -2
- package/dist/types/workflow-bubble-class.js.map +1 -1
- package/dist/utils/agent-formatter.d.ts +14 -2
- package/dist/utils/agent-formatter.d.ts.map +1 -1
- package/dist/utils/agent-formatter.js +176 -28
- package/dist/utils/agent-formatter.js.map +1 -1
- package/dist/utils/bubbleflow-validation.d.ts +7 -0
- package/dist/utils/bubbleflow-validation.d.ts.map +1 -1
- package/dist/utils/bubbleflow-validation.js +257 -35
- package/dist/utils/bubbleflow-validation.js.map +1 -1
- package/dist/utils/error-sanitizer.d.ts +12 -0
- package/dist/utils/error-sanitizer.d.ts.map +1 -0
- package/dist/utils/error-sanitizer.js +77 -0
- package/dist/utils/error-sanitizer.js.map +1 -0
- package/dist/utils/json-parsing.d.ts.map +1 -1
- package/dist/utils/json-parsing.js +146 -0
- package/dist/utils/json-parsing.js.map +1 -1
- package/dist/utils/safe-gemini-chat.d.ts +31 -0
- package/dist/utils/safe-gemini-chat.d.ts.map +1 -0
- package/dist/utils/safe-gemini-chat.js +93 -0
- package/dist/utils/safe-gemini-chat.js.map +1 -0
- package/dist/utils/schema-comparison.d.ts +92 -0
- package/dist/utils/schema-comparison.d.ts.map +1 -0
- package/dist/utils/schema-comparison.js +716 -0
- package/dist/utils/schema-comparison.js.map +1 -0
- package/dist/utils/zod-schema.d.ts +24 -0
- package/dist/utils/zod-schema.d.ts.map +1 -0
- package/dist/utils/zod-schema.js +56 -0
- package/dist/utils/zod-schema.js.map +1 -0
- package/package.json +7 -6
- package/dist/bubble-trigger/index.d.ts +0 -2
- package/dist/bubble-trigger/index.d.ts.map +0 -1
- package/dist/bubble-trigger/index.js +0 -2
- package/dist/bubble-trigger/index.js.map +0 -1
- package/dist/bubble-trigger/types.d.ts +0 -87
- package/dist/bubble-trigger/types.d.ts.map +0 -1
- package/dist/bubble-trigger/types.js +0 -14
- package/dist/bubble-trigger/types.js.map +0 -1
- package/dist/bubbles/service-bubble/apify/api-scraper.schema.d.ts +0 -370
- package/dist/bubbles/service-bubble/apify/api-scraper.schema.d.ts.map +0 -1
- package/dist/bubbles/service-bubble/apify/api-scraper.schema.js +0 -14
- package/dist/bubbles/service-bubble/apify/api-scraper.schema.js.map +0 -1
- package/dist/bubbles/service-bubble/apify.d.ts +0 -136
- package/dist/bubbles/service-bubble/apify.d.ts.map +0 -1
- package/dist/bubbles/service-bubble/apify.js +0 -282
- package/dist/bubbles/service-bubble/apify.js.map +0 -1
- package/dist/bubbles/service-bubble/google-sheets.d.ts +0 -1811
- package/dist/bubbles/service-bubble/google-sheets.d.ts.map +0 -1
- package/dist/bubbles/service-bubble/google-sheets.js.map +0 -1
- package/dist/bubbles/workflow-bubble/bubbleflow-generator.workflow.d.ts +0 -114
- package/dist/bubbles/workflow-bubble/bubbleflow-generator.workflow.d.ts.map +0 -1
- package/dist/bubbles/workflow-bubble/bubbleflow-generator.workflow.js +0 -782
- package/dist/bubbles/workflow-bubble/bubbleflow-generator.workflow.js.map +0 -1
- package/dist/types/ai-models.d.ts +0 -4
- package/dist/types/ai-models.d.ts.map +0 -1
- package/dist/types/ai-models.js +0 -20
- package/dist/types/ai-models.js.map +0 -1
- package/dist/types/api-scraper.schema.d.ts +0 -453
- package/dist/types/api-scraper.schema.d.ts.map +0 -1
- package/dist/types/api-scraper.schema.js +0 -160
- package/dist/types/api-scraper.schema.js.map +0 -1
- package/dist/utils/param-helper.d.ts +0 -2
- package/dist/utils/param-helper.d.ts.map +0 -1
- package/dist/utils/param-helper.js +0 -5
- package/dist/utils/param-helper.js.map +0 -1
|
@@ -1,20 +1,49 @@
|
|
|
1
1
|
import { z } from 'zod';
|
|
2
2
|
import { ServiceBubble } from '../../types/service-bubble-class.js';
|
|
3
|
-
import { CredentialType, BUBBLE_CREDENTIAL_OPTIONS, } from '@bubblelab/shared-schemas';
|
|
3
|
+
import { CredentialType, BUBBLE_CREDENTIAL_OPTIONS, RECOMMENDED_MODELS, } from '@bubblelab/shared-schemas';
|
|
4
4
|
import { StateGraph, MessagesAnnotation } from '@langchain/langgraph';
|
|
5
5
|
import { ChatOpenAI } from '@langchain/openai';
|
|
6
6
|
import { ChatAnthropic } from '@langchain/anthropic';
|
|
7
|
-
import { ChatGoogleGenerativeAI } from '@langchain/google-genai';
|
|
8
7
|
import { HumanMessage, AIMessage, ToolMessage, AIMessageChunk, } from '@langchain/core/messages';
|
|
9
8
|
import { DynamicStructuredTool } from '@langchain/core/tools';
|
|
10
9
|
import { AvailableModels } from '@bubblelab/shared-schemas';
|
|
11
10
|
import { AvailableTools, } from '../../types/available-tools.js';
|
|
12
11
|
import { BubbleFactory } from '../../bubble-factory.js';
|
|
13
|
-
import {
|
|
12
|
+
import { ConversationMessageSchema } from '@bubblelab/shared-schemas';
|
|
13
|
+
import { extractAndStreamThinkingTokens, formatFinalResponse, generationsToMessageContent, } from '../../utils/agent-formatter.js';
|
|
14
14
|
import { isAIMessage, isAIMessageChunk } from '@langchain/core/messages';
|
|
15
|
+
import { HarmBlockThreshold, HarmCategory } from '@google/generative-ai';
|
|
16
|
+
import { SafeGeminiChat } from '../../utils/safe-gemini-chat.js';
|
|
17
|
+
import { zodSchemaToJsonString, buildJsonSchemaInstruction, } from '../../utils/zod-schema.js';
|
|
18
|
+
// Define backup model configuration schema
|
|
19
|
+
const BackupModelConfigSchema = z.object({
|
|
20
|
+
model: AvailableModels.describe('Backup AI model to use if the primary model fails (format: provider/model-name).'),
|
|
21
|
+
temperature: z
|
|
22
|
+
.number()
|
|
23
|
+
.min(0)
|
|
24
|
+
.max(2)
|
|
25
|
+
.optional()
|
|
26
|
+
.describe('Temperature for backup model. If not specified, uses primary model temperature.'),
|
|
27
|
+
maxTokens: z
|
|
28
|
+
.number()
|
|
29
|
+
.positive()
|
|
30
|
+
.optional()
|
|
31
|
+
.describe('Max tokens for backup model. If not specified, uses primary model maxTokens.'),
|
|
32
|
+
reasoningEffort: z
|
|
33
|
+
.enum(['low', 'medium', 'high'])
|
|
34
|
+
.optional()
|
|
35
|
+
.describe('Reasoning effort for backup model. If not specified, uses primary model reasoningEffort.'),
|
|
36
|
+
maxRetries: z
|
|
37
|
+
.number()
|
|
38
|
+
.int()
|
|
39
|
+
.min(0)
|
|
40
|
+
.max(10)
|
|
41
|
+
.optional()
|
|
42
|
+
.describe('Max retries for backup model. If not specified, uses primary model maxRetries.'),
|
|
43
|
+
});
|
|
15
44
|
// Define model configuration
|
|
16
45
|
const ModelConfigSchema = z.object({
|
|
17
|
-
model: AvailableModels.
|
|
46
|
+
model: AvailableModels.describe('AI model to use (format: provider/model-name).'),
|
|
18
47
|
temperature: z
|
|
19
48
|
.number()
|
|
20
49
|
.min(0)
|
|
@@ -25,12 +54,32 @@ const ModelConfigSchema = z.object({
|
|
|
25
54
|
.number()
|
|
26
55
|
.positive()
|
|
27
56
|
.optional()
|
|
28
|
-
.default(
|
|
57
|
+
.default(12800)
|
|
29
58
|
.describe('Maximum number of tokens to generate in response, keep at default of 40000 unless the response is expected to be certain length'),
|
|
59
|
+
reasoningEffort: z
|
|
60
|
+
.enum(['low', 'medium', 'high'])
|
|
61
|
+
.optional()
|
|
62
|
+
.describe('Reasoning effort for model. If not specified, uses primary model reasoningEffort.'),
|
|
63
|
+
maxRetries: z
|
|
64
|
+
.number()
|
|
65
|
+
.int()
|
|
66
|
+
.min(0)
|
|
67
|
+
.max(10)
|
|
68
|
+
.default(3)
|
|
69
|
+
.describe('Maximum number of retries for API calls (default: 3). Useful for handling transient errors like 503 Service Unavailable.'),
|
|
70
|
+
provider: z
|
|
71
|
+
.array(z.string())
|
|
72
|
+
.optional()
|
|
73
|
+
.describe('Providers for ai agent (open router only).'),
|
|
30
74
|
jsonMode: z
|
|
31
75
|
.boolean()
|
|
32
76
|
.default(false)
|
|
33
|
-
.describe('When true,
|
|
77
|
+
.describe('When true, returns clean JSON response, you must provide the exact JSON schema in the system prompt'),
|
|
78
|
+
backupModel: BackupModelConfigSchema.default({
|
|
79
|
+
model: RECOMMENDED_MODELS.FAST,
|
|
80
|
+
})
|
|
81
|
+
.optional()
|
|
82
|
+
.describe('Backup model configuration to use if the primary model fails.'),
|
|
34
83
|
});
|
|
35
84
|
// Define tool configuration for pre-registered tools
|
|
36
85
|
const ToolConfigSchema = z.object({
|
|
@@ -56,8 +105,11 @@ const CustomToolSchema = z.object({
|
|
|
56
105
|
.min(1)
|
|
57
106
|
.describe('Description of what the tool does - helps the AI know when to use it'),
|
|
58
107
|
schema: z
|
|
59
|
-
.
|
|
60
|
-
.
|
|
108
|
+
.union([
|
|
109
|
+
z.record(z.string(), z.unknown()),
|
|
110
|
+
z.custom((val) => val && typeof val === 'object' && '_def' in val),
|
|
111
|
+
])
|
|
112
|
+
.describe('Zod schema object defining the tool parameters. Can be either a plain object (e.g., { amount: z.number() }) or a Zod object directly (e.g., z.object({ amount: z.number() })).'),
|
|
61
113
|
func: z
|
|
62
114
|
.function()
|
|
63
115
|
.args(z.record(z.string(), z.unknown()))
|
|
@@ -91,6 +143,11 @@ const ImageInputSchema = z.discriminatedUnion('type', [
|
|
|
91
143
|
Base64ImageSchema,
|
|
92
144
|
UrlImageSchema,
|
|
93
145
|
]);
|
|
146
|
+
// Schema for the expected JSON output structure - accepts either a Zod schema or a JSON schema string
|
|
147
|
+
const ExpectedOutputSchema = z.union([
|
|
148
|
+
z.custom((val) => val?._def !== undefined),
|
|
149
|
+
z.string(),
|
|
150
|
+
]);
|
|
94
151
|
// Define the parameters schema for the AI Agent bubble
|
|
95
152
|
const AIAgentParamsSchema = z.object({
|
|
96
153
|
message: z
|
|
@@ -101,6 +158,10 @@ const AIAgentParamsSchema = z.object({
|
|
|
101
158
|
.array(ImageInputSchema)
|
|
102
159
|
.default([])
|
|
103
160
|
.describe('Array of base64 encoded images to include with the message (for multimodal AI models). Example: [{type: "base64", data: "base64...", mimeType: "image/png", description: "A beautiful image of a cat"}] or [{type: "url", url: "https://example.com/image.png", description: "A beautiful image of a cat"}]'),
|
|
161
|
+
conversationHistory: z
|
|
162
|
+
.array(ConversationMessageSchema)
|
|
163
|
+
.optional()
|
|
164
|
+
.describe('Previous conversation messages for multi-turn conversations. When provided, messages are sent as separate turns to enable KV cache optimization. Format: [{role: "user", content: "..."}, {role: "assistant", content: "..."}, ...]'),
|
|
104
165
|
systemPrompt: z
|
|
105
166
|
.string()
|
|
106
167
|
.default('You are a helpful AI assistant')
|
|
@@ -111,21 +172,15 @@ const AIAgentParamsSchema = z.object({
|
|
|
111
172
|
.optional()
|
|
112
173
|
.describe('A friendly name for the AI agent'),
|
|
113
174
|
model: ModelConfigSchema.default({
|
|
114
|
-
model:
|
|
115
|
-
temperature:
|
|
175
|
+
model: RECOMMENDED_MODELS.FAST,
|
|
176
|
+
temperature: 1,
|
|
116
177
|
maxTokens: 50000,
|
|
178
|
+
maxRetries: 3,
|
|
117
179
|
jsonMode: false,
|
|
118
|
-
}).describe('AI model configuration including provider, temperature, and tokens
|
|
180
|
+
}).describe('AI model configuration including provider, temperature, and tokens, retries, and json mode. Always include this.'),
|
|
119
181
|
tools: z
|
|
120
182
|
.array(ToolConfigSchema)
|
|
121
|
-
.default([
|
|
122
|
-
{
|
|
123
|
-
name: 'web-search-tool',
|
|
124
|
-
config: {
|
|
125
|
-
maxResults: 5,
|
|
126
|
-
},
|
|
127
|
-
},
|
|
128
|
-
])
|
|
183
|
+
.default([])
|
|
129
184
|
.describe('Array of pre-registered tools the AI agent can use. Can be tool types (web-search-tool, web-scrape-tool, web-crawl-tool, web-extract-tool, instagram-tool). If using image models, set the tools to []'),
|
|
130
185
|
customTools: z
|
|
131
186
|
.array(CustomToolSchema)
|
|
@@ -135,9 +190,9 @@ const AIAgentParamsSchema = z.object({
|
|
|
135
190
|
maxIterations: z
|
|
136
191
|
.number()
|
|
137
192
|
.positive()
|
|
138
|
-
.min(
|
|
139
|
-
.default(
|
|
140
|
-
.describe('Maximum number of iterations for the agent workflow,
|
|
193
|
+
.min(4)
|
|
194
|
+
.default(40)
|
|
195
|
+
.describe('Maximum number of iterations for the agent workflow, 5 iterations per turn of conversation'),
|
|
141
196
|
credentials: z
|
|
142
197
|
.record(z.nativeEnum(CredentialType), z.string())
|
|
143
198
|
.optional()
|
|
@@ -146,13 +201,14 @@ const AIAgentParamsSchema = z.object({
|
|
|
146
201
|
.boolean()
|
|
147
202
|
.default(false)
|
|
148
203
|
.describe('Enable real-time streaming of tokens, tool calls, and iteration progress'),
|
|
204
|
+
expectedOutputSchema: ExpectedOutputSchema.optional().describe('Zod schema or JSON schema string that defines the expected structure of the AI response. When provided, automatically enables JSON mode and instructs the AI to output in the exact format. Example: z.object({ summary: z.string(), items: z.array(z.object({ name: z.string(), score: z.number() })) })'),
|
|
149
205
|
// Note: beforeToolCall and afterToolCall are function hooks added via TypeScript interface
|
|
150
206
|
// They cannot be part of the Zod schema but are available in the params
|
|
151
207
|
});
|
|
152
208
|
const AIAgentResultSchema = z.object({
|
|
153
209
|
response: z
|
|
154
210
|
.string()
|
|
155
|
-
.describe('The AI agents final response to the user message. For text responses, returns plain text
|
|
211
|
+
.describe('The AI agents final response to the user message. For text responses, returns plain text. If JSON mode is enabled, returns a JSON string. For image generation models (like gemini-2.5-flash-image-preview), returns base64-encoded image data with data URI format (data:image/png;base64,...)'),
|
|
156
212
|
toolCalls: z
|
|
157
213
|
.array(z.object({
|
|
158
214
|
tool: z.string().describe('Name of the tool that was called'),
|
|
@@ -190,15 +246,19 @@ export class AIAgentBubble extends ServiceBubble {
|
|
|
190
246
|
factory;
|
|
191
247
|
beforeToolCallHook;
|
|
192
248
|
afterToolCallHook;
|
|
249
|
+
afterLLMCallHook;
|
|
193
250
|
streamingCallback;
|
|
194
251
|
shouldStopAfterTools = false;
|
|
252
|
+
shouldContinueToAgent = false;
|
|
195
253
|
constructor(params = {
|
|
196
254
|
message: 'Hello, how are you?',
|
|
197
255
|
systemPrompt: 'You are a helpful AI assistant',
|
|
198
|
-
|
|
199
|
-
|
|
256
|
+
model: { model: RECOMMENDED_MODELS.FAST },
|
|
257
|
+
}, context, instanceId) {
|
|
258
|
+
super(params, context, instanceId);
|
|
200
259
|
this.beforeToolCallHook = params.beforeToolCall;
|
|
201
260
|
this.afterToolCallHook = params.afterToolCall;
|
|
261
|
+
this.afterLLMCallHook = params.afterLLMCall;
|
|
202
262
|
this.streamingCallback = params.streamingCallback;
|
|
203
263
|
this.factory = new BubbleFactory();
|
|
204
264
|
}
|
|
@@ -211,97 +271,89 @@ export class AIAgentBubble extends ServiceBubble {
|
|
|
211
271
|
}
|
|
212
272
|
return false;
|
|
213
273
|
}
|
|
274
|
+
/**
|
|
275
|
+
* Build effective model config from primary and optional backup settings
|
|
276
|
+
*/
|
|
277
|
+
buildModelConfig(primaryConfig, backupConfig) {
|
|
278
|
+
if (!backupConfig) {
|
|
279
|
+
return primaryConfig;
|
|
280
|
+
}
|
|
281
|
+
return {
|
|
282
|
+
model: backupConfig.model,
|
|
283
|
+
temperature: backupConfig.temperature ?? primaryConfig.temperature,
|
|
284
|
+
maxTokens: backupConfig.maxTokens ?? primaryConfig.maxTokens,
|
|
285
|
+
maxRetries: backupConfig.maxRetries ?? primaryConfig.maxRetries,
|
|
286
|
+
provider: primaryConfig.provider,
|
|
287
|
+
jsonMode: primaryConfig.jsonMode,
|
|
288
|
+
backupModel: undefined, // Don't chain backup models
|
|
289
|
+
};
|
|
290
|
+
}
|
|
291
|
+
/**
|
|
292
|
+
* Core execution logic for running the agent with a given model config
|
|
293
|
+
*/
|
|
294
|
+
async executeWithModel(modelConfig) {
|
|
295
|
+
const { message, images, systemPrompt, tools, customTools, maxIterations, conversationHistory, } = this.params;
|
|
296
|
+
// Initialize the language model
|
|
297
|
+
const llm = this.initializeModel(modelConfig);
|
|
298
|
+
// Initialize tools (both pre-registered and custom)
|
|
299
|
+
const agentTools = await this.initializeTools(tools, customTools);
|
|
300
|
+
// Create the agent graph
|
|
301
|
+
const graph = await this.createAgentGraph(llm, agentTools, systemPrompt);
|
|
302
|
+
// Execute the agent
|
|
303
|
+
return this.executeAgent(graph, message, images, maxIterations, modelConfig, conversationHistory);
|
|
304
|
+
}
|
|
305
|
+
/**
|
|
306
|
+
* Modify params before execution - centralizes all param transformations
|
|
307
|
+
*/
|
|
308
|
+
beforeAction() {
|
|
309
|
+
// Auto-enable JSON mode when expectedOutputSchema is provided
|
|
310
|
+
if (this.params.expectedOutputSchema) {
|
|
311
|
+
this.params.model.jsonMode = true;
|
|
312
|
+
// Enhance system prompt with JSON schema instructions
|
|
313
|
+
const schemaString = zodSchemaToJsonString(this.params.expectedOutputSchema);
|
|
314
|
+
this.params.systemPrompt = `${this.params.systemPrompt}\n\n${buildJsonSchemaInstruction(schemaString)}`;
|
|
315
|
+
}
|
|
316
|
+
}
|
|
214
317
|
async performAction(context) {
|
|
215
318
|
// Context is available but not currently used in this implementation
|
|
216
319
|
void context;
|
|
217
|
-
|
|
320
|
+
// Apply param transformations before execution
|
|
321
|
+
this.beforeAction();
|
|
218
322
|
try {
|
|
219
|
-
|
|
220
|
-
const llm = this.initializeModel(model);
|
|
221
|
-
// Initialize tools (both pre-registered and custom)
|
|
222
|
-
const agentTools = await this.initializeTools(tools, customTools);
|
|
223
|
-
// Create the agent graph
|
|
224
|
-
const graph = await this.createAgentGraph(llm, agentTools, systemPrompt);
|
|
225
|
-
// Execute the agent
|
|
226
|
-
const result = await this.executeAgent(graph, message, images, maxIterations, model.jsonMode);
|
|
227
|
-
return result;
|
|
323
|
+
return await this.executeWithModel(this.params.model);
|
|
228
324
|
}
|
|
229
325
|
catch (error) {
|
|
230
|
-
// Return error information but mark as recoverable
|
|
231
326
|
const errorMessage = error instanceof Error ? error.message : 'Unknown error';
|
|
232
|
-
console.warn('[AIAgent] Execution error
|
|
327
|
+
console.warn('[AIAgent] Execution error:', errorMessage);
|
|
328
|
+
// Return error information but mark as recoverable
|
|
233
329
|
return {
|
|
234
330
|
response: `Error: ${errorMessage}`,
|
|
235
|
-
success: false,
|
|
331
|
+
success: false,
|
|
236
332
|
toolCalls: [],
|
|
237
333
|
error: errorMessage,
|
|
238
334
|
iterations: 0,
|
|
239
335
|
};
|
|
240
336
|
}
|
|
241
337
|
}
|
|
338
|
+
getCredentialType() {
|
|
339
|
+
return this.getCredentialTypeForModel(this.params.model.model);
|
|
340
|
+
}
|
|
242
341
|
/**
|
|
243
|
-
*
|
|
342
|
+
* Get credential type for a specific model string
|
|
244
343
|
*/
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
});
|
|
259
|
-
try {
|
|
260
|
-
// Send LLM start event
|
|
261
|
-
await streamingCallback({
|
|
262
|
-
type: 'llm_start',
|
|
263
|
-
data: {
|
|
264
|
-
model: model.model,
|
|
265
|
-
temperature: model.temperature,
|
|
266
|
-
},
|
|
267
|
-
});
|
|
268
|
-
// Initialize the language model
|
|
269
|
-
const llm = this.initializeModel(model);
|
|
270
|
-
// Initialize tools (both pre-registered and custom)
|
|
271
|
-
const agentTools = await this.initializeTools(tools, customTools);
|
|
272
|
-
// Create the agent graph
|
|
273
|
-
const graph = await this.createAgentGraph(llm, agentTools, systemPrompt);
|
|
274
|
-
// Execute the agent with streaming
|
|
275
|
-
const result = await this.executeAgentWithStreaming(graph, message, images, maxIterations, model.jsonMode, streamingCallback);
|
|
276
|
-
const totalDuration = Date.now() - startTime;
|
|
277
|
-
// Send completion event
|
|
278
|
-
await streamingCallback({
|
|
279
|
-
type: 'complete',
|
|
280
|
-
data: {
|
|
281
|
-
result,
|
|
282
|
-
totalDuration,
|
|
283
|
-
},
|
|
284
|
-
});
|
|
285
|
-
return result;
|
|
286
|
-
}
|
|
287
|
-
catch (error) {
|
|
288
|
-
const errorMessage = error instanceof Error ? error.message : 'Unknown error';
|
|
289
|
-
// Send error event as recoverable
|
|
290
|
-
await streamingCallback({
|
|
291
|
-
type: 'error',
|
|
292
|
-
data: {
|
|
293
|
-
error: errorMessage,
|
|
294
|
-
recoverable: true, // Mark as recoverable to continue execution
|
|
295
|
-
},
|
|
296
|
-
});
|
|
297
|
-
console.warn('[AIAgent] Streaming execution error (continuing):', errorMessage);
|
|
298
|
-
return {
|
|
299
|
-
response: `Error: ${errorMessage}`,
|
|
300
|
-
success: false, // Still false but execution can continue
|
|
301
|
-
toolCalls: [],
|
|
302
|
-
error: errorMessage,
|
|
303
|
-
iterations: 0,
|
|
304
|
-
};
|
|
344
|
+
getCredentialTypeForModel(model) {
|
|
345
|
+
const [provider] = model.split('/');
|
|
346
|
+
switch (provider) {
|
|
347
|
+
case 'openai':
|
|
348
|
+
return CredentialType.OPENAI_CRED;
|
|
349
|
+
case 'google':
|
|
350
|
+
return CredentialType.GOOGLE_GEMINI_CRED;
|
|
351
|
+
case 'anthropic':
|
|
352
|
+
return CredentialType.ANTHROPIC_CRED;
|
|
353
|
+
case 'openrouter':
|
|
354
|
+
return CredentialType.OPENROUTER_CRED;
|
|
355
|
+
default:
|
|
356
|
+
throw new Error(`Unsupported model provider: ${provider}`);
|
|
305
357
|
}
|
|
306
358
|
}
|
|
307
359
|
chooseCredential() {
|
|
@@ -327,15 +379,40 @@ export class AIAgentBubble extends ServiceBubble {
|
|
|
327
379
|
}
|
|
328
380
|
}
|
|
329
381
|
initializeModel(modelConfig) {
|
|
330
|
-
const { model, temperature, maxTokens } = modelConfig;
|
|
382
|
+
const { model, temperature, maxTokens, maxRetries } = modelConfig;
|
|
331
383
|
const slashIndex = model.indexOf('/');
|
|
332
384
|
const provider = model.substring(0, slashIndex);
|
|
333
385
|
const modelName = model.substring(slashIndex + 1);
|
|
334
|
-
|
|
335
|
-
//
|
|
336
|
-
const
|
|
386
|
+
const reasoningEffort = modelConfig.reasoningEffort;
|
|
387
|
+
// Get credential based on the modelConfig's provider (not this.params.model)
|
|
388
|
+
const credentials = this.params.credentials;
|
|
389
|
+
if (!credentials || typeof credentials !== 'object') {
|
|
390
|
+
throw new Error(`No ${provider.toUpperCase()} credentials provided`);
|
|
391
|
+
}
|
|
392
|
+
let apiKey;
|
|
393
|
+
switch (provider) {
|
|
394
|
+
case 'openai':
|
|
395
|
+
apiKey = credentials[CredentialType.OPENAI_CRED];
|
|
396
|
+
break;
|
|
397
|
+
case 'google':
|
|
398
|
+
apiKey = credentials[CredentialType.GOOGLE_GEMINI_CRED];
|
|
399
|
+
break;
|
|
400
|
+
case 'anthropic':
|
|
401
|
+
apiKey = credentials[CredentialType.ANTHROPIC_CRED];
|
|
402
|
+
break;
|
|
403
|
+
case 'openrouter':
|
|
404
|
+
apiKey = credentials[CredentialType.OPENROUTER_CRED];
|
|
405
|
+
break;
|
|
406
|
+
default:
|
|
407
|
+
throw new Error(`Unsupported model provider: ${provider}`);
|
|
408
|
+
}
|
|
409
|
+
if (!apiKey) {
|
|
410
|
+
throw new Error(`No credential found for provider: ${provider}`);
|
|
411
|
+
}
|
|
337
412
|
// Enable streaming if streamingCallback is provided
|
|
338
413
|
const enableStreaming = !!this.streamingCallback;
|
|
414
|
+
// Default to 3 retries if not specified
|
|
415
|
+
const retries = maxRetries ?? 3;
|
|
339
416
|
switch (provider) {
|
|
340
417
|
case 'openai':
|
|
341
418
|
return new ChatOpenAI({
|
|
@@ -343,17 +420,70 @@ export class AIAgentBubble extends ServiceBubble {
|
|
|
343
420
|
temperature,
|
|
344
421
|
maxTokens,
|
|
345
422
|
apiKey,
|
|
423
|
+
...(reasoningEffort && {
|
|
424
|
+
reasoning: {
|
|
425
|
+
effort: reasoningEffort,
|
|
426
|
+
summary: 'auto',
|
|
427
|
+
},
|
|
428
|
+
}),
|
|
346
429
|
streaming: enableStreaming,
|
|
430
|
+
maxRetries: retries,
|
|
347
431
|
});
|
|
348
|
-
case 'google':
|
|
349
|
-
|
|
432
|
+
case 'google': {
|
|
433
|
+
const thinkingConfig = reasoningEffort
|
|
434
|
+
? {
|
|
435
|
+
includeThoughts: reasoningEffort ? true : false,
|
|
436
|
+
thinkingBudget: reasoningEffort === 'low'
|
|
437
|
+
? 1025
|
|
438
|
+
: reasoningEffort === 'medium'
|
|
439
|
+
? 5000
|
|
440
|
+
: 10000,
|
|
441
|
+
}
|
|
442
|
+
: undefined;
|
|
443
|
+
return new SafeGeminiChat({
|
|
350
444
|
model: modelName,
|
|
351
445
|
temperature,
|
|
352
446
|
maxOutputTokens: maxTokens,
|
|
447
|
+
...(thinkingConfig && { thinkingConfig }),
|
|
353
448
|
apiKey,
|
|
354
|
-
streaming
|
|
449
|
+
// 3.0 pro preview does breaks with streaming, disabled temporarily until fixed
|
|
450
|
+
streaming: false,
|
|
451
|
+
maxRetries: retries,
|
|
452
|
+
// Disable all safety filters to prevent candidateContent.parts.reduce errors
|
|
453
|
+
// when Gemini blocks content and returns candidates without content field
|
|
454
|
+
safetySettings: [
|
|
455
|
+
{
|
|
456
|
+
category: HarmCategory.HARM_CATEGORY_HARASSMENT,
|
|
457
|
+
threshold: HarmBlockThreshold.BLOCK_NONE,
|
|
458
|
+
},
|
|
459
|
+
{
|
|
460
|
+
category: HarmCategory.HARM_CATEGORY_HATE_SPEECH,
|
|
461
|
+
threshold: HarmBlockThreshold.BLOCK_NONE,
|
|
462
|
+
},
|
|
463
|
+
{
|
|
464
|
+
category: HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
|
|
465
|
+
threshold: HarmBlockThreshold.BLOCK_NONE,
|
|
466
|
+
},
|
|
467
|
+
{
|
|
468
|
+
category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
|
|
469
|
+
threshold: HarmBlockThreshold.BLOCK_NONE,
|
|
470
|
+
},
|
|
471
|
+
],
|
|
355
472
|
});
|
|
356
|
-
|
|
473
|
+
}
|
|
474
|
+
case 'anthropic': {
|
|
475
|
+
// Configure Anthropic "thinking" only when reasoning is enabled.
|
|
476
|
+
// Anthropic's API does not allow `budget_tokens` when thinking is disabled.
|
|
477
|
+
const thinkingConfig = reasoningEffort != null
|
|
478
|
+
? {
|
|
479
|
+
type: 'enabled',
|
|
480
|
+
budget_tokens: reasoningEffort === 'low'
|
|
481
|
+
? 1025
|
|
482
|
+
: reasoningEffort === 'medium'
|
|
483
|
+
? 5000
|
|
484
|
+
: 10000,
|
|
485
|
+
}
|
|
486
|
+
: undefined;
|
|
357
487
|
return new ChatAnthropic({
|
|
358
488
|
model: modelName,
|
|
359
489
|
temperature,
|
|
@@ -361,7 +491,10 @@ export class AIAgentBubble extends ServiceBubble {
|
|
|
361
491
|
maxTokens,
|
|
362
492
|
streaming: enableStreaming,
|
|
363
493
|
apiKey,
|
|
494
|
+
...(thinkingConfig && { thinking: thinkingConfig }),
|
|
495
|
+
maxRetries: retries,
|
|
364
496
|
});
|
|
497
|
+
}
|
|
365
498
|
case 'openrouter':
|
|
366
499
|
console.log('openrouter', modelName);
|
|
367
500
|
return new ChatOpenAI({
|
|
@@ -371,12 +504,16 @@ export class AIAgentBubble extends ServiceBubble {
|
|
|
371
504
|
maxTokens,
|
|
372
505
|
apiKey,
|
|
373
506
|
streaming: enableStreaming,
|
|
507
|
+
maxRetries: retries,
|
|
374
508
|
configuration: {
|
|
375
509
|
baseURL: 'https://openrouter.ai/api/v1',
|
|
376
510
|
},
|
|
377
511
|
modelKwargs: {
|
|
512
|
+
provider: {
|
|
513
|
+
order: this.params.model.provider,
|
|
514
|
+
},
|
|
378
515
|
reasoning: {
|
|
379
|
-
effort: 'medium',
|
|
516
|
+
effort: reasoningEffort ?? 'medium',
|
|
380
517
|
exclude: false,
|
|
381
518
|
},
|
|
382
519
|
},
|
|
@@ -392,10 +529,22 @@ export class AIAgentBubble extends ServiceBubble {
|
|
|
392
529
|
for (const customTool of customToolConfigs) {
|
|
393
530
|
try {
|
|
394
531
|
console.log(`🛠️ [AIAgent] Initializing custom tool: ${customTool.name}`);
|
|
532
|
+
// Handle both plain object and Zod object schemas
|
|
533
|
+
let schema;
|
|
534
|
+
if (customTool.schema &&
|
|
535
|
+
typeof customTool.schema === 'object' &&
|
|
536
|
+
'_def' in customTool.schema) {
|
|
537
|
+
// Already a Zod schema object, use it directly
|
|
538
|
+
schema = customTool.schema;
|
|
539
|
+
}
|
|
540
|
+
else {
|
|
541
|
+
// Plain object, convert to Zod object
|
|
542
|
+
schema = z.object(customTool.schema);
|
|
543
|
+
}
|
|
395
544
|
const dynamicTool = new DynamicStructuredTool({
|
|
396
545
|
name: customTool.name,
|
|
397
546
|
description: customTool.description,
|
|
398
|
-
schema:
|
|
547
|
+
schema: schema,
|
|
399
548
|
func: customTool.func,
|
|
400
549
|
});
|
|
401
550
|
tools.push(dynamicTool);
|
|
@@ -475,12 +624,35 @@ export class AIAgentBubble extends ServiceBubble {
|
|
|
475
624
|
const tool = tools.find((t) => t.name === toolCall.name);
|
|
476
625
|
if (!tool) {
|
|
477
626
|
console.warn(`Tool ${toolCall.name} not found`);
|
|
627
|
+
const errorContent = `Error: Tool ${toolCall.name} not found`;
|
|
628
|
+
const startTime = Date.now();
|
|
629
|
+
// Send tool_start event
|
|
630
|
+
this.streamingCallback?.({
|
|
631
|
+
type: 'tool_start',
|
|
632
|
+
data: {
|
|
633
|
+
tool: toolCall.name,
|
|
634
|
+
input: toolCall.args,
|
|
635
|
+
callId: toolCall.id,
|
|
636
|
+
},
|
|
637
|
+
});
|
|
638
|
+
// Send tool_complete event with error
|
|
639
|
+
this.streamingCallback?.({
|
|
640
|
+
type: 'tool_complete',
|
|
641
|
+
data: {
|
|
642
|
+
callId: toolCall.id,
|
|
643
|
+
input: toolCall.args,
|
|
644
|
+
tool: toolCall.name,
|
|
645
|
+
output: { error: errorContent },
|
|
646
|
+
duration: Date.now() - startTime,
|
|
647
|
+
},
|
|
648
|
+
});
|
|
478
649
|
toolMessages.push(new ToolMessage({
|
|
479
|
-
content:
|
|
650
|
+
content: errorContent,
|
|
480
651
|
tool_call_id: toolCall.id,
|
|
481
652
|
}));
|
|
482
653
|
continue;
|
|
483
654
|
}
|
|
655
|
+
const startTime = Date.now();
|
|
484
656
|
try {
|
|
485
657
|
// Call beforeToolCall hook if provided
|
|
486
658
|
const hookResult_before = await this.beforeToolCallHook?.({
|
|
@@ -488,7 +660,6 @@ export class AIAgentBubble extends ServiceBubble {
|
|
|
488
660
|
toolInput: toolCall.args,
|
|
489
661
|
messages: currentMessages,
|
|
490
662
|
});
|
|
491
|
-
const startTime = Date.now();
|
|
492
663
|
this.streamingCallback?.({
|
|
493
664
|
type: 'tool_start',
|
|
494
665
|
data: {
|
|
@@ -536,6 +707,7 @@ export class AIAgentBubble extends ServiceBubble {
|
|
|
536
707
|
type: 'tool_complete',
|
|
537
708
|
data: {
|
|
538
709
|
callId: toolCall.id,
|
|
710
|
+
input: toolCall.args,
|
|
539
711
|
tool: toolCall.name,
|
|
540
712
|
output: toolOutput,
|
|
541
713
|
duration: Date.now() - startTime,
|
|
@@ -544,17 +716,30 @@ export class AIAgentBubble extends ServiceBubble {
|
|
|
544
716
|
}
|
|
545
717
|
catch (error) {
|
|
546
718
|
console.error(`Error executing tool ${toolCall.name}:`, error);
|
|
719
|
+
const errorContent = `Error: ${error instanceof Error ? error.message : 'Unknown error'}`;
|
|
547
720
|
const errorMessage = new ToolMessage({
|
|
548
|
-
content:
|
|
721
|
+
content: errorContent,
|
|
549
722
|
tool_call_id: toolCall.id,
|
|
550
723
|
});
|
|
551
724
|
toolMessages.push(errorMessage);
|
|
552
725
|
currentMessages = [...currentMessages, errorMessage];
|
|
726
|
+
// Send tool_complete event even on failure so frontend can track it properly
|
|
727
|
+
this.streamingCallback?.({
|
|
728
|
+
type: 'tool_complete',
|
|
729
|
+
data: {
|
|
730
|
+
callId: toolCall.id,
|
|
731
|
+
input: toolCall.args,
|
|
732
|
+
tool: toolCall.name,
|
|
733
|
+
output: { error: errorContent },
|
|
734
|
+
duration: Date.now() - startTime,
|
|
735
|
+
},
|
|
736
|
+
});
|
|
553
737
|
}
|
|
554
738
|
}
|
|
555
739
|
// Return the updated messages
|
|
556
740
|
// If hooks modified messages, use those; otherwise use the original messages + tool messages
|
|
557
741
|
if (currentMessages.length !== messages.length + toolMessages.length) {
|
|
742
|
+
console.error('[AIAgent] Current messages length does not match expected length', currentMessages.length, messages.length, toolMessages.length);
|
|
558
743
|
return { messages: currentMessages };
|
|
559
744
|
}
|
|
560
745
|
return { messages: toolMessages };
|
|
@@ -562,63 +747,180 @@ export class AIAgentBubble extends ServiceBubble {
|
|
|
562
747
|
async createAgentGraph(llm, tools, systemPrompt) {
|
|
563
748
|
// Define the agent node
|
|
564
749
|
const agentNode = async ({ messages }) => {
|
|
565
|
-
//
|
|
750
|
+
// systemPrompt is already enhanced by beforeAction() if expectedOutputSchema was provided
|
|
566
751
|
const systemMessage = new HumanMessage(systemPrompt);
|
|
567
752
|
const allMessages = [systemMessage, ...messages];
|
|
568
|
-
//
|
|
569
|
-
const
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
const
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
|
|
585
|
-
|
|
586
|
-
|
|
587
|
-
|
|
588
|
-
|
|
589
|
-
|
|
753
|
+
// Helper function for exponential backoff with jitter
|
|
754
|
+
const exponentialBackoff = (attemptNumber) => {
|
|
755
|
+
// Base delay: 1 second, exponentially increases (1s, 2s, 4s, 8s, ...)
|
|
756
|
+
const baseDelay = 1000;
|
|
757
|
+
const maxDelay = 32000; // Cap at 32 seconds
|
|
758
|
+
const delay = Math.min(baseDelay * Math.pow(2, attemptNumber - 1), maxDelay);
|
|
759
|
+
// Add jitter (random ±25% variation) to prevent thundering herd
|
|
760
|
+
const jitter = delay * 0.25 * (Math.random() - 0.5);
|
|
761
|
+
const finalDelay = delay + jitter;
|
|
762
|
+
return new Promise((resolve) => setTimeout(resolve, finalDelay));
|
|
763
|
+
};
|
|
764
|
+
// Shared onFailedAttempt callback to avoid duplication
|
|
765
|
+
const onFailedAttempt = async (error) => {
|
|
766
|
+
const attemptNumber = error.attemptNumber;
|
|
767
|
+
const retriesLeft = error.retriesLeft;
|
|
768
|
+
// Check if this is a candidateContent error
|
|
769
|
+
const errorMessage = error.message || String(error);
|
|
770
|
+
if (errorMessage.includes('candidateContent') ||
|
|
771
|
+
errorMessage.includes('parts.reduce') ||
|
|
772
|
+
errorMessage.includes('undefined is not an object')) {
|
|
773
|
+
this.context?.logger?.error(`[AIAgent] Gemini candidateContent error detected (attempt ${attemptNumber}). This indicates blocked/empty content from Gemini API.`);
|
|
774
|
+
}
|
|
775
|
+
this.context?.logger?.warn(`[AIAgent] LLM call failed (attempt ${attemptNumber}/${this.params.model.maxRetries}). Retries left: ${retriesLeft}. Error: ${error.message}`);
|
|
776
|
+
// Optionally emit streaming event for retry
|
|
777
|
+
if (this.streamingCallback) {
|
|
778
|
+
await this.streamingCallback({
|
|
779
|
+
type: 'error',
|
|
780
|
+
data: {
|
|
781
|
+
error: `Retry attempt ${attemptNumber}/${this.params.model.maxRetries}: ${error.message}`,
|
|
782
|
+
recoverable: retriesLeft > 0,
|
|
783
|
+
},
|
|
784
|
+
});
|
|
785
|
+
}
|
|
786
|
+
// Wait with exponential backoff before retrying
|
|
787
|
+
if (retriesLeft > 0) {
|
|
788
|
+
await exponentialBackoff(attemptNumber);
|
|
789
|
+
}
|
|
790
|
+
};
|
|
791
|
+
// If we have tools, bind them to the LLM, then add retry logic
|
|
792
|
+
// IMPORTANT: Must bind tools FIRST, then add retry - not the other way around
|
|
793
|
+
const modelWithTools = tools.length > 0
|
|
794
|
+
? llm.bindTools(tools).withRetry({
|
|
795
|
+
stopAfterAttempt: this.params.model.maxRetries,
|
|
796
|
+
onFailedAttempt,
|
|
797
|
+
})
|
|
798
|
+
: llm.withRetry({
|
|
799
|
+
stopAfterAttempt: this.params.model.maxRetries,
|
|
800
|
+
onFailedAttempt,
|
|
801
|
+
});
|
|
802
|
+
try {
|
|
803
|
+
// Use streaming if streamingCallback is provided
|
|
804
|
+
if (this.streamingCallback) {
|
|
805
|
+
const messageId = `msg-${Date.now()}-${Math.random().toString(36).substring(2, 9)}`;
|
|
806
|
+
// Use invoke with callbacks for streaming
|
|
807
|
+
const response = await modelWithTools.invoke(allMessages, {
|
|
808
|
+
callbacks: [
|
|
809
|
+
{
|
|
810
|
+
handleLLMStart: async () => {
|
|
811
|
+
await this.streamingCallback?.({
|
|
812
|
+
type: 'llm_start',
|
|
813
|
+
data: {
|
|
814
|
+
model: this.params.model.model,
|
|
815
|
+
temperature: this.params.model.temperature,
|
|
816
|
+
},
|
|
817
|
+
});
|
|
818
|
+
},
|
|
819
|
+
handleLLMEnd: async (output) => {
|
|
820
|
+
// Extract thinking tokens from different model providers
|
|
821
|
+
const thinking = extractAndStreamThinkingTokens(output);
|
|
822
|
+
if (thinking) {
|
|
823
|
+
await this.streamingCallback?.({
|
|
824
|
+
type: 'think',
|
|
825
|
+
data: {
|
|
826
|
+
content: thinking,
|
|
827
|
+
messageId,
|
|
828
|
+
},
|
|
829
|
+
});
|
|
830
|
+
}
|
|
831
|
+
const content = formatFinalResponse(generationsToMessageContent(output.generations.flat()), this.params.model.model).response;
|
|
590
832
|
await this.streamingCallback?.({
|
|
591
|
-
type: '
|
|
833
|
+
type: 'llm_complete',
|
|
592
834
|
data: {
|
|
593
|
-
content: thinking,
|
|
594
835
|
messageId,
|
|
836
|
+
content: content,
|
|
837
|
+
totalTokens: output.llmOutput?.usage_metadata?.total_tokens,
|
|
595
838
|
},
|
|
596
839
|
});
|
|
597
|
-
}
|
|
598
|
-
await this.streamingCallback?.({
|
|
599
|
-
type: 'llm_complete',
|
|
600
|
-
data: {
|
|
601
|
-
messageId,
|
|
602
|
-
totalTokens: output.llmOutput?.usage_metadata?.total_tokens,
|
|
603
|
-
},
|
|
604
|
-
});
|
|
840
|
+
},
|
|
605
841
|
},
|
|
606
|
-
|
|
607
|
-
|
|
608
|
-
|
|
609
|
-
|
|
842
|
+
],
|
|
843
|
+
});
|
|
844
|
+
return { messages: [response] };
|
|
845
|
+
}
|
|
846
|
+
else {
|
|
847
|
+
// Non-streaming fallback
|
|
848
|
+
const response = await modelWithTools.invoke(allMessages);
|
|
849
|
+
return { messages: [response] };
|
|
850
|
+
}
|
|
610
851
|
}
|
|
611
|
-
|
|
612
|
-
//
|
|
613
|
-
const
|
|
614
|
-
|
|
852
|
+
catch (error) {
|
|
853
|
+
// Catch candidateContent errors that slip through SafeGeminiChat
|
|
854
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
855
|
+
if (errorMessage.includes('candidateContent') ||
|
|
856
|
+
errorMessage.includes('parts.reduce') ||
|
|
857
|
+
errorMessage.includes('undefined is not an object')) {
|
|
858
|
+
console.error('[AIAgent] Caught candidateContent error in agentNode:', errorMessage);
|
|
859
|
+
// Return error as AIMessage instead of crashing
|
|
860
|
+
return {
|
|
861
|
+
messages: [
|
|
862
|
+
new AIMessage({
|
|
863
|
+
content: `[Gemini Error] Unable to generate response due to content filtering. Error: ${errorMessage}`,
|
|
864
|
+
additional_kwargs: {
|
|
865
|
+
finishReason: 'ERROR',
|
|
866
|
+
error: errorMessage,
|
|
867
|
+
},
|
|
868
|
+
}),
|
|
869
|
+
],
|
|
870
|
+
};
|
|
871
|
+
}
|
|
872
|
+
// Rethrow other errors
|
|
873
|
+
throw error;
|
|
615
874
|
}
|
|
616
875
|
};
|
|
617
|
-
//
|
|
618
|
-
const
|
|
876
|
+
// Node that runs after agent to check afterLLMCall hook before routing
|
|
877
|
+
const afterLLMCheckNode = async ({ messages, }) => {
|
|
878
|
+
// Reset the flag at the start
|
|
879
|
+
this.shouldContinueToAgent = false;
|
|
880
|
+
// Get the last AI message
|
|
619
881
|
const lastMessage = messages[messages.length - 1];
|
|
620
|
-
|
|
621
|
-
if
|
|
882
|
+
const hasToolCalls = !!(lastMessage.tool_calls && lastMessage.tool_calls.length > 0);
|
|
883
|
+
// Only call hook if we're about to end (no tool calls) and hook is provided
|
|
884
|
+
if (!hasToolCalls && this.afterLLMCallHook) {
|
|
885
|
+
console.log('[AIAgent] No tool calls detected, calling afterLLMCall hook');
|
|
886
|
+
const hookResult = await this.afterLLMCallHook({
|
|
887
|
+
messages,
|
|
888
|
+
lastAIMessage: lastMessage,
|
|
889
|
+
hasToolCalls,
|
|
890
|
+
});
|
|
891
|
+
// If hook wants to continue to agent, set flag and return modified messages
|
|
892
|
+
if (hookResult.continueToAgent) {
|
|
893
|
+
console.log('[AIAgent] afterLLMCall hook requested retry to agent');
|
|
894
|
+
this.shouldContinueToAgent = true;
|
|
895
|
+
// Return the modified messages from the hook
|
|
896
|
+
// We need to return only the new messages to append
|
|
897
|
+
const newMessages = hookResult.messages.slice(messages.length);
|
|
898
|
+
return { messages: newMessages };
|
|
899
|
+
}
|
|
900
|
+
}
|
|
901
|
+
// No modifications needed
|
|
902
|
+
return { messages: [] };
|
|
903
|
+
};
|
|
904
|
+
// Define conditional edge function after LLM check
|
|
905
|
+
const shouldContinueAfterLLMCheck = ({ messages, }) => {
|
|
906
|
+
// First check if afterLLMCall hook requested continuing to agent
|
|
907
|
+
if (this.shouldContinueToAgent) {
|
|
908
|
+
return 'agent';
|
|
909
|
+
}
|
|
910
|
+
// Find the last AI message (could be followed by human messages from hook)
|
|
911
|
+
const aiMessages = [];
|
|
912
|
+
for (const msg of messages) {
|
|
913
|
+
if (isAIMessage(msg)) {
|
|
914
|
+
aiMessages.push(msg);
|
|
915
|
+
}
|
|
916
|
+
else if ('tool_calls' in msg &&
|
|
917
|
+
msg.constructor?.name === 'AIMessageChunk') {
|
|
918
|
+
aiMessages.push(msg);
|
|
919
|
+
}
|
|
920
|
+
}
|
|
921
|
+
const lastAIMessage = aiMessages[aiMessages.length - 1];
|
|
922
|
+
// Check if the last AI message has tool calls
|
|
923
|
+
if (lastAIMessage?.tool_calls && lastAIMessage.tool_calls.length > 0) {
|
|
622
924
|
return 'tools';
|
|
623
925
|
}
|
|
624
926
|
return '__end__';
|
|
@@ -641,23 +943,57 @@ export class AIAgentBubble extends ServiceBubble {
|
|
|
641
943
|
};
|
|
642
944
|
graph
|
|
643
945
|
.addNode('tools', toolNode)
|
|
946
|
+
.addNode('afterLLMCheck', afterLLMCheckNode)
|
|
644
947
|
.addEdge('__start__', 'agent')
|
|
645
|
-
.
|
|
948
|
+
.addEdge('agent', 'afterLLMCheck')
|
|
949
|
+
.addConditionalEdges('afterLLMCheck', shouldContinueAfterLLMCheck)
|
|
646
950
|
.addConditionalEdges('tools', shouldContinueAfterTools);
|
|
647
951
|
}
|
|
648
952
|
else {
|
|
649
|
-
|
|
953
|
+
// Even without tools, add the afterLLMCheck node for hook support
|
|
954
|
+
graph
|
|
955
|
+
.addNode('afterLLMCheck', afterLLMCheckNode)
|
|
956
|
+
.addEdge('__start__', 'agent')
|
|
957
|
+
.addEdge('agent', 'afterLLMCheck')
|
|
958
|
+
.addConditionalEdges('afterLLMCheck', shouldContinueAfterLLMCheck);
|
|
650
959
|
}
|
|
651
960
|
return graph.compile();
|
|
652
961
|
}
|
|
653
|
-
async executeAgent(graph, message, images, maxIterations,
|
|
962
|
+
async executeAgent(graph, message, images, maxIterations, modelConfig, conversationHistory) {
|
|
963
|
+
const jsonMode = modelConfig.jsonMode;
|
|
654
964
|
const toolCalls = [];
|
|
655
965
|
let iterations = 0;
|
|
656
966
|
console.log('[AIAgent] Starting execution with message:', message.substring(0, 100) + '...');
|
|
657
967
|
console.log('[AIAgent] Max iterations:', maxIterations);
|
|
658
968
|
try {
|
|
659
969
|
console.log('[AIAgent] Invoking graph...');
|
|
660
|
-
//
|
|
970
|
+
// Build messages array starting with conversation history (for KV cache optimization)
|
|
971
|
+
const initialMessages = [];
|
|
972
|
+
// Convert conversation history to LangChain messages if provided
|
|
973
|
+
// This enables KV cache optimization by keeping previous turns as separate messages
|
|
974
|
+
if (conversationHistory && conversationHistory.length > 0) {
|
|
975
|
+
for (const historyMsg of conversationHistory) {
|
|
976
|
+
switch (historyMsg.role) {
|
|
977
|
+
case 'user':
|
|
978
|
+
initialMessages.push(new HumanMessage(historyMsg.content));
|
|
979
|
+
break;
|
|
980
|
+
case 'assistant':
|
|
981
|
+
initialMessages.push(new AIMessage(historyMsg.content));
|
|
982
|
+
break;
|
|
983
|
+
case 'tool':
|
|
984
|
+
// Tool messages require a tool_call_id
|
|
985
|
+
if (historyMsg.toolCallId) {
|
|
986
|
+
initialMessages.push(new ToolMessage({
|
|
987
|
+
content: historyMsg.content,
|
|
988
|
+
tool_call_id: historyMsg.toolCallId,
|
|
989
|
+
name: historyMsg.name,
|
|
990
|
+
}));
|
|
991
|
+
}
|
|
992
|
+
break;
|
|
993
|
+
}
|
|
994
|
+
}
|
|
995
|
+
}
|
|
996
|
+
// Create the current human message with text and optional images
|
|
661
997
|
let humanMessage;
|
|
662
998
|
if (images && images.length > 0) {
|
|
663
999
|
console.log('[AIAgent] Creating multimodal message with', images.length, 'images');
|
|
@@ -708,7 +1044,9 @@ export class AIAgentBubble extends ServiceBubble {
|
|
|
708
1044
|
// Text-only message
|
|
709
1045
|
humanMessage = new HumanMessage(message);
|
|
710
1046
|
}
|
|
711
|
-
|
|
1047
|
+
// Add the current message to the conversation
|
|
1048
|
+
initialMessages.push(humanMessage);
|
|
1049
|
+
const result = await graph.invoke({ messages: initialMessages }, { recursionLimit: maxIterations });
|
|
712
1050
|
console.log('[AIAgent] Graph execution completed');
|
|
713
1051
|
console.log('[AIAgent] Total messages:', result.messages.length);
|
|
714
1052
|
iterations = result.messages.length;
|
|
@@ -759,6 +1097,9 @@ export class AIAgentBubble extends ServiceBubble {
|
|
|
759
1097
|
const aiMessages = result.messages.filter((msg) => isAIMessage(msg) || isAIMessageChunk(msg));
|
|
760
1098
|
console.log('[AIAgent] Found', aiMessages.length, 'AI messages');
|
|
761
1099
|
const finalMessage = aiMessages[aiMessages.length - 1];
|
|
1100
|
+
if (finalMessage?.additional_kwargs?.finishReason === 'SAFETY_BLOCKED') {
|
|
1101
|
+
throw new Error(`[Gemini Error] Unable to generate a response. Please increase maxTokens in model configuration or try again with a different model.`);
|
|
1102
|
+
}
|
|
762
1103
|
// Check for MAX_TOKENS finish reason
|
|
763
1104
|
if (finalMessage?.additional_kwargs?.finishReason === 'MAX_TOKENS') {
|
|
764
1105
|
throw new Error('Response was truncated due to max tokens limit. Please increase maxTokens in model configuration.');
|
|
@@ -784,19 +1125,29 @@ export class AIAgentBubble extends ServiceBubble {
|
|
|
784
1125
|
}
|
|
785
1126
|
if (totalTokensSum > 0 && this.context && this.context.logger) {
|
|
786
1127
|
this.context.logger.logTokenUsage({
|
|
787
|
-
|
|
788
|
-
|
|
789
|
-
|
|
790
|
-
|
|
791
|
-
}, `LLM completion: ${totalInputTokens} input
|
|
1128
|
+
usage: totalInputTokens,
|
|
1129
|
+
service: this.getCredentialTypeForModel(modelConfig.model),
|
|
1130
|
+
unit: 'input_tokens',
|
|
1131
|
+
subService: modelConfig.model,
|
|
1132
|
+
}, `LLM completion: ${totalInputTokens} input`, {
|
|
1133
|
+
bubbleName: 'ai-agent',
|
|
1134
|
+
variableId: this.context?.variableId,
|
|
1135
|
+
operationType: 'bubble_execution',
|
|
1136
|
+
});
|
|
1137
|
+
this.context.logger.logTokenUsage({
|
|
1138
|
+
usage: totalOutputTokens,
|
|
1139
|
+
service: this.getCredentialTypeForModel(modelConfig.model),
|
|
1140
|
+
unit: 'output_tokens',
|
|
1141
|
+
subService: modelConfig.model,
|
|
1142
|
+
}, `LLM completion: ${totalOutputTokens} output`, {
|
|
792
1143
|
bubbleName: 'ai-agent',
|
|
793
1144
|
variableId: this.context?.variableId,
|
|
794
1145
|
operationType: 'bubble_execution',
|
|
795
1146
|
});
|
|
796
1147
|
}
|
|
797
|
-
const response = finalMessage?.content || '
|
|
1148
|
+
const response = finalMessage?.content || '';
|
|
798
1149
|
// Use shared formatting method
|
|
799
|
-
const formattedResult =
|
|
1150
|
+
const formattedResult = formatFinalResponse(response, modelConfig.model, jsonMode);
|
|
800
1151
|
// If there's an error from formatting (e.g., invalid JSON), return early
|
|
801
1152
|
if (formattedResult.error) {
|
|
802
1153
|
return {
|
|
@@ -827,6 +1178,21 @@ export class AIAgentBubble extends ServiceBubble {
|
|
|
827
1178
|
console.warn('[AIAgent] Execution error (continuing):', error);
|
|
828
1179
|
console.log('[AIAgent] Tool calls before error:', toolCalls.length);
|
|
829
1180
|
console.log('[AIAgent] Iterations before error:', iterations);
|
|
1181
|
+
// Model fallback logic - only retry if this config has a backup model
|
|
1182
|
+
if (modelConfig.backupModel) {
|
|
1183
|
+
console.log(`[AIAgent] Retrying with backup model: ${modelConfig.backupModel.model}`);
|
|
1184
|
+
this.context?.logger?.warn(`Primary model ${modelConfig.model} failed: ${error instanceof Error ? error.message : 'Unknown error'}. Retrying with backup model... ${modelConfig.backupModel.model}`);
|
|
1185
|
+
this.streamingCallback?.({
|
|
1186
|
+
type: 'error',
|
|
1187
|
+
data: {
|
|
1188
|
+
error: `Primary model ${modelConfig.model} failed: ${error instanceof Error ? error.message : 'Unknown error'}. Retrying with backup model... ${modelConfig.backupModel.model}`,
|
|
1189
|
+
recoverable: true,
|
|
1190
|
+
},
|
|
1191
|
+
});
|
|
1192
|
+
const backupModelConfig = this.buildModelConfig(modelConfig, modelConfig.backupModel);
|
|
1193
|
+
const backupResult = await this.executeWithModel(backupModelConfig);
|
|
1194
|
+
return backupResult;
|
|
1195
|
+
}
|
|
830
1196
|
const errorMessage = error instanceof Error ? error.message : 'Unknown error';
|
|
831
1197
|
// Return partial results to allow execution to continue
|
|
832
1198
|
// Include any tool calls that were completed before the error
|
|
@@ -839,235 +1205,5 @@ export class AIAgentBubble extends ServiceBubble {
|
|
|
839
1205
|
};
|
|
840
1206
|
}
|
|
841
1207
|
}
|
|
842
|
-
/**
|
|
843
|
-
* Execute agent with streaming support using LangGraph streamEvents
|
|
844
|
-
*/
|
|
845
|
-
async executeAgentWithStreaming(graph, message, images, maxIterations, jsonMode, streamingCallback) {
|
|
846
|
-
const toolCalls = [];
|
|
847
|
-
let iterations = 0;
|
|
848
|
-
let currentMessageId = '';
|
|
849
|
-
console.log('[AIAgent] Starting streaming execution with message:', message.substring(0, 100) + '...');
|
|
850
|
-
try {
|
|
851
|
-
// Create human message with text and optional images
|
|
852
|
-
let humanMessage;
|
|
853
|
-
if (images && images.length > 0) {
|
|
854
|
-
console.log('[AIAgent] Creating multimodal message with', images.length, 'images');
|
|
855
|
-
// Create multimodal content array
|
|
856
|
-
const content = [{ type: 'text', text: message }];
|
|
857
|
-
// Add images to content
|
|
858
|
-
for (const image of images) {
|
|
859
|
-
let imageUrl;
|
|
860
|
-
if (image.type === 'base64') {
|
|
861
|
-
// Base64 encoded image
|
|
862
|
-
imageUrl = `data:${image.mimeType};base64,${image.data}`;
|
|
863
|
-
}
|
|
864
|
-
else {
|
|
865
|
-
// URL image - fetch and convert to base64 for Google Gemini compatibility
|
|
866
|
-
try {
|
|
867
|
-
console.log('[AIAgent] Fetching image from URL:', image.url);
|
|
868
|
-
const response = await fetch(image.url);
|
|
869
|
-
if (!response.ok) {
|
|
870
|
-
throw new Error(`Failed to fetch image: ${response.status} ${response.statusText}`);
|
|
871
|
-
}
|
|
872
|
-
const arrayBuffer = await response.arrayBuffer();
|
|
873
|
-
const base64Data = Buffer.from(arrayBuffer).toString('base64');
|
|
874
|
-
// Detect MIME type from response or default to PNG
|
|
875
|
-
const contentType = response.headers.get('content-type') || 'image/png';
|
|
876
|
-
imageUrl = `data:${contentType};base64,${base64Data}`;
|
|
877
|
-
console.log('[AIAgent] Successfully converted URL image to base64');
|
|
878
|
-
}
|
|
879
|
-
catch (error) {
|
|
880
|
-
console.error('[AIAgent] Error fetching image from URL:', error);
|
|
881
|
-
throw new Error(`Failed to load image from URL ${image.url}: ${error instanceof Error ? error.message : 'Unknown error'}`);
|
|
882
|
-
}
|
|
883
|
-
}
|
|
884
|
-
content.push({
|
|
885
|
-
type: 'image_url',
|
|
886
|
-
image_url: { url: imageUrl },
|
|
887
|
-
});
|
|
888
|
-
// Add image description if provided
|
|
889
|
-
if (image.description) {
|
|
890
|
-
content.push({
|
|
891
|
-
type: 'text',
|
|
892
|
-
text: `Image description: ${image.description}`,
|
|
893
|
-
});
|
|
894
|
-
}
|
|
895
|
-
}
|
|
896
|
-
humanMessage = new HumanMessage({ content });
|
|
897
|
-
}
|
|
898
|
-
else {
|
|
899
|
-
// Text-only message
|
|
900
|
-
humanMessage = new HumanMessage(message);
|
|
901
|
-
}
|
|
902
|
-
// Stream events from the graph
|
|
903
|
-
const eventStream = graph.streamEvents({ messages: [humanMessage] }, {
|
|
904
|
-
version: 'v2',
|
|
905
|
-
recursionLimit: maxIterations,
|
|
906
|
-
});
|
|
907
|
-
let currentIteration = 0;
|
|
908
|
-
const toolCallMap = new Map();
|
|
909
|
-
let accumulatedContent = '';
|
|
910
|
-
// Track processed events to prevent duplicates
|
|
911
|
-
const processedIterationEvents = new Set();
|
|
912
|
-
for await (const event of eventStream) {
|
|
913
|
-
if (!event || typeof event !== 'object')
|
|
914
|
-
continue;
|
|
915
|
-
// Handle different types of streaming events
|
|
916
|
-
switch (event.event) {
|
|
917
|
-
case 'on_chat_model_start':
|
|
918
|
-
currentIteration++;
|
|
919
|
-
currentMessageId = `msg-${Date.now()}-${currentIteration}`;
|
|
920
|
-
if (streamingCallback) {
|
|
921
|
-
await streamingCallback({
|
|
922
|
-
type: 'iteration_start',
|
|
923
|
-
data: { iteration: currentIteration },
|
|
924
|
-
});
|
|
925
|
-
}
|
|
926
|
-
break;
|
|
927
|
-
case 'on_chat_model_stream':
|
|
928
|
-
// Stream individual tokens
|
|
929
|
-
if (event.data?.chunk?.content && streamingCallback) {
|
|
930
|
-
const content = event.data.chunk.content;
|
|
931
|
-
accumulatedContent += content;
|
|
932
|
-
await streamingCallback({
|
|
933
|
-
type: 'token',
|
|
934
|
-
data: {
|
|
935
|
-
content,
|
|
936
|
-
messageId: currentMessageId,
|
|
937
|
-
},
|
|
938
|
-
});
|
|
939
|
-
}
|
|
940
|
-
break;
|
|
941
|
-
case 'on_chat_model_end':
|
|
942
|
-
if (streamingCallback) {
|
|
943
|
-
const usageMetadata = event.data?.output?.usage_metadata;
|
|
944
|
-
const totalTokens = usageMetadata?.total_tokens;
|
|
945
|
-
// Track token usage if available
|
|
946
|
-
if (usageMetadata &&
|
|
947
|
-
this.context != null &&
|
|
948
|
-
this.context.logger != null) {
|
|
949
|
-
const tokenUsage = {
|
|
950
|
-
inputTokens: usageMetadata.input_tokens || 0,
|
|
951
|
-
outputTokens: usageMetadata.output_tokens || 0,
|
|
952
|
-
totalTokens: totalTokens || 0,
|
|
953
|
-
modelName: this.params.model.model,
|
|
954
|
-
};
|
|
955
|
-
this.context.logger.logTokenUsage(tokenUsage, `LLM completion: ${tokenUsage.inputTokens} input + ${tokenUsage.outputTokens} output = ${tokenUsage.totalTokens} total tokens`, {
|
|
956
|
-
bubbleName: 'ai-agent',
|
|
957
|
-
variableId: this.context?.variableId,
|
|
958
|
-
operationType: 'bubble_execution',
|
|
959
|
-
});
|
|
960
|
-
}
|
|
961
|
-
await streamingCallback({
|
|
962
|
-
type: 'llm_complete',
|
|
963
|
-
data: {
|
|
964
|
-
messageId: currentMessageId,
|
|
965
|
-
totalTokens,
|
|
966
|
-
},
|
|
967
|
-
});
|
|
968
|
-
}
|
|
969
|
-
break;
|
|
970
|
-
case 'on_tool_start':
|
|
971
|
-
if (event.name && event.data?.input && streamingCallback) {
|
|
972
|
-
const callId = `tool-${Date.now()}-${Math.random().toString(36).substring(2, 9)}`;
|
|
973
|
-
toolCallMap.set(callId, {
|
|
974
|
-
name: event.name,
|
|
975
|
-
args: event.data.input,
|
|
976
|
-
startTime: Date.now(),
|
|
977
|
-
});
|
|
978
|
-
await streamingCallback({
|
|
979
|
-
type: 'tool_start',
|
|
980
|
-
data: {
|
|
981
|
-
tool: event.name,
|
|
982
|
-
input: event.data.input,
|
|
983
|
-
callId,
|
|
984
|
-
},
|
|
985
|
-
});
|
|
986
|
-
}
|
|
987
|
-
break;
|
|
988
|
-
case 'on_tool_end':
|
|
989
|
-
if (event.name && event.data?.output && streamingCallback) {
|
|
990
|
-
// Find matching tool call
|
|
991
|
-
const matchingCall = Array.from(toolCallMap.entries()).find(([, callData]) => callData.name === event.name);
|
|
992
|
-
if (matchingCall) {
|
|
993
|
-
const [callId, callData] = matchingCall;
|
|
994
|
-
const duration = Date.now() - callData.startTime;
|
|
995
|
-
toolCalls.push({
|
|
996
|
-
tool: callData.name,
|
|
997
|
-
input: callData.args,
|
|
998
|
-
output: event.data.output,
|
|
999
|
-
});
|
|
1000
|
-
await streamingCallback({
|
|
1001
|
-
type: 'tool_complete',
|
|
1002
|
-
data: {
|
|
1003
|
-
callId,
|
|
1004
|
-
tool: callData.name,
|
|
1005
|
-
output: event.data.output,
|
|
1006
|
-
duration,
|
|
1007
|
-
},
|
|
1008
|
-
});
|
|
1009
|
-
toolCallMap.delete(callId);
|
|
1010
|
-
}
|
|
1011
|
-
}
|
|
1012
|
-
break;
|
|
1013
|
-
case 'on_chain_end':
|
|
1014
|
-
// This indicates the completion of the entire graph
|
|
1015
|
-
if (event.data?.output) {
|
|
1016
|
-
iterations = currentIteration;
|
|
1017
|
-
// Prevent duplicate iteration_complete events
|
|
1018
|
-
const iterationKey = `iteration_${currentIteration}`;
|
|
1019
|
-
if (streamingCallback &&
|
|
1020
|
-
!processedIterationEvents.has(iterationKey)) {
|
|
1021
|
-
processedIterationEvents.add(iterationKey);
|
|
1022
|
-
await streamingCallback({
|
|
1023
|
-
type: 'iteration_complete',
|
|
1024
|
-
data: {
|
|
1025
|
-
iteration: currentIteration,
|
|
1026
|
-
hasToolCalls: toolCalls.length > 0,
|
|
1027
|
-
},
|
|
1028
|
-
});
|
|
1029
|
-
}
|
|
1030
|
-
}
|
|
1031
|
-
break;
|
|
1032
|
-
}
|
|
1033
|
-
}
|
|
1034
|
-
// Process final result
|
|
1035
|
-
const accumulatedResponse = accumulatedContent || 'No response generated';
|
|
1036
|
-
// Use shared formatting method
|
|
1037
|
-
const formattedResult = await formatFinalResponse(accumulatedResponse, this.params.model.model, jsonMode);
|
|
1038
|
-
// If there's an error from formatting (e.g., invalid JSON), return early with consistent behavior
|
|
1039
|
-
if (formattedResult.error) {
|
|
1040
|
-
return {
|
|
1041
|
-
response: formattedResult.response,
|
|
1042
|
-
toolCalls: toolCalls.length > 0 ? toolCalls : [],
|
|
1043
|
-
iterations,
|
|
1044
|
-
error: formattedResult.error,
|
|
1045
|
-
success: false,
|
|
1046
|
-
};
|
|
1047
|
-
}
|
|
1048
|
-
const finalResponse = formattedResult.response;
|
|
1049
|
-
console.log('[AIAgent] Streaming execution completed with', iterations, 'iterations and', toolCalls.length, 'tool calls');
|
|
1050
|
-
return {
|
|
1051
|
-
response: typeof finalResponse === 'string'
|
|
1052
|
-
? finalResponse
|
|
1053
|
-
: JSON.stringify(finalResponse),
|
|
1054
|
-
toolCalls: toolCalls.length > 0 ? toolCalls : [],
|
|
1055
|
-
iterations,
|
|
1056
|
-
error: '',
|
|
1057
|
-
success: true,
|
|
1058
|
-
};
|
|
1059
|
-
}
|
|
1060
|
-
catch (error) {
|
|
1061
|
-
console.warn('[AIAgent] Streaming execution error (continuing):', error);
|
|
1062
|
-
const errorMessage = error instanceof Error ? error.message : 'Unknown error';
|
|
1063
|
-
return {
|
|
1064
|
-
response: `Execution error: ${errorMessage}`,
|
|
1065
|
-
success: false, // Still false but don't completely halt execution
|
|
1066
|
-
iterations,
|
|
1067
|
-
toolCalls: toolCalls.length > 0 ? toolCalls : [], // Preserve completed tool calls
|
|
1068
|
-
error: errorMessage,
|
|
1069
|
-
};
|
|
1070
|
-
}
|
|
1071
|
-
}
|
|
1072
1208
|
}
|
|
1073
1209
|
//# sourceMappingURL=ai-agent.js.map
|