@mastra/core 1.0.0-beta.11 → 1.0.0-beta.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +173 -0
- package/dist/_types/@internal_ai-sdk-v4/dist/index.d.ts +7549 -0
- package/dist/_types/@internal_ai-sdk-v4/dist/test.d.ts +65 -0
- package/dist/_types/@internal_ai-sdk-v5/dist/index.d.ts +8396 -0
- package/dist/_types/@internal_ai-sdk-v5/dist/test.d.ts +1708 -0
- package/dist/_types/@internal_external-types/dist/index.d.ts +858 -0
- package/dist/agent/agent-legacy.d.ts +1 -1
- package/dist/agent/agent.d.ts +1 -1
- package/dist/agent/agent.d.ts.map +1 -1
- package/dist/agent/agent.types.d.ts +5 -1
- package/dist/agent/agent.types.d.ts.map +1 -1
- package/dist/agent/index.cjs +9 -9
- package/dist/agent/index.js +2 -2
- package/dist/agent/message-list/index.cjs +3 -3
- package/dist/agent/message-list/index.d.ts +4 -3
- package/dist/agent/message-list/index.d.ts.map +1 -1
- package/dist/agent/message-list/index.js +1 -1
- package/dist/agent/message-list/prompt/attachments-to-parts.d.ts +1 -1
- package/dist/agent/message-list/prompt/invalid-content-error.d.ts +1 -1
- package/dist/agent/message-list/types.d.ts +3 -3
- package/dist/agent/message-list/types.d.ts.map +1 -1
- package/dist/agent/message-list/utils/ai-v4-v5/core-model-message.d.ts +1 -1
- package/dist/agent/message-list/utils/ai-v4-v5/ui-message.d.ts +1 -1
- package/dist/agent/message-list/utils/ai-v5/gemini-compatibility.d.ts +2 -2
- package/dist/agent/message-list/utils/ai-v5/gemini-compatibility.d.ts.map +1 -1
- package/dist/agent/message-list/utils/convert-messages.d.ts +2 -2
- package/dist/agent/message-list/utils/convert-messages.d.ts.map +1 -1
- package/dist/agent/types.d.ts +3 -3
- package/dist/agent/utils.d.ts +3 -3
- package/dist/agent/utils.d.ts.map +1 -1
- package/dist/agent/workflows/prepare-stream/index.d.ts +2 -1
- package/dist/agent/workflows/prepare-stream/index.d.ts.map +1 -1
- package/dist/agent/workflows/prepare-stream/prepare-tools-step.d.ts.map +1 -1
- package/dist/agent/workflows/prepare-stream/stream-step.d.ts +3 -1
- package/dist/agent/workflows/prepare-stream/stream-step.d.ts.map +1 -1
- package/dist/bundler/types.d.ts +14 -1
- package/dist/bundler/types.d.ts.map +1 -1
- package/dist/{chunk-U3XOLEPX.js → chunk-2IU4RGU5.js} +6 -32
- package/dist/chunk-2IU4RGU5.js.map +1 -0
- package/dist/chunk-2SQB3WBT.js +4574 -0
- package/dist/chunk-2SQB3WBT.js.map +1 -0
- package/dist/{chunk-THZTRBFS.js → chunk-373OC54J.js} +8 -8
- package/dist/chunk-373OC54J.js.map +1 -0
- package/dist/{chunk-F2GAJSBI.js → chunk-4BC5FUAO.js} +8 -6
- package/dist/{chunk-F2GAJSBI.js.map → chunk-4BC5FUAO.js.map} +1 -1
- package/dist/chunk-55VPMN3N.js +250 -0
- package/dist/chunk-55VPMN3N.js.map +1 -0
- package/dist/{chunk-QM5SRDJX.js → chunk-5PTZG26U.js} +66 -84
- package/dist/chunk-5PTZG26U.js.map +1 -0
- package/dist/chunk-5VZGJTPR.js +4837 -0
- package/dist/chunk-5VZGJTPR.js.map +1 -0
- package/dist/{chunk-C36YRTZ6.js → chunk-62Q7K656.js} +6 -7
- package/dist/chunk-62Q7K656.js.map +1 -0
- package/dist/chunk-6PMMP3FR.js +7 -0
- package/dist/chunk-6PMMP3FR.js.map +1 -0
- package/dist/{chunk-DZUJEN5N.cjs → chunk-6SZKM6EC.cjs} +10 -3
- package/dist/{chunk-DZUJEN5N.cjs.map → chunk-6SZKM6EC.cjs.map} +1 -1
- package/dist/{chunk-5WRI5ZAA.js → chunk-7D4SUZUM.js} +10 -4
- package/dist/{chunk-5WRI5ZAA.js.map → chunk-7D4SUZUM.js.map} +1 -1
- package/dist/{chunk-YWMMBIOM.cjs → chunk-7HEAVZRS.cjs} +15 -15
- package/dist/{chunk-YWMMBIOM.cjs.map → chunk-7HEAVZRS.cjs.map} +1 -1
- package/dist/{chunk-BUKY6CTR.cjs → chunk-AGHLXC4I.cjs} +106 -36
- package/dist/chunk-AGHLXC4I.cjs.map +1 -0
- package/dist/{chunk-PK2A5WBG.js → chunk-ARAQIW6E.js} +222 -604
- package/dist/chunk-ARAQIW6E.js.map +1 -0
- package/dist/{chunk-US2U7ECW.js → chunk-BQDZIQ3G.js} +156 -90
- package/dist/chunk-BQDZIQ3G.js.map +1 -0
- package/dist/chunk-D22XABFZ.js +79 -0
- package/dist/chunk-D22XABFZ.js.map +1 -0
- package/dist/{chunk-2ULLRN4Y.js → chunk-E5BQRAJK.js} +943 -626
- package/dist/chunk-E5BQRAJK.js.map +1 -0
- package/dist/chunk-FST2G2FQ.cjs +84 -0
- package/dist/chunk-FST2G2FQ.cjs.map +1 -0
- package/dist/chunk-FVQTJUBD.cjs +2120 -0
- package/dist/chunk-FVQTJUBD.cjs.map +1 -0
- package/dist/chunk-G6E6V2Z4.js +2070 -0
- package/dist/chunk-G6E6V2Z4.js.map +1 -0
- package/dist/{chunk-7P6BNIJH.js → chunk-GIWC35YQ.js} +105 -35
- package/dist/chunk-GIWC35YQ.js.map +1 -0
- package/dist/{chunk-4JKEUSCC.cjs → chunk-H4VUIOWU.cjs} +22 -20
- package/dist/chunk-H4VUIOWU.cjs.map +1 -0
- package/dist/{chunk-TWH4PTDG.cjs → chunk-HWMMIRIF.cjs} +32 -27
- package/dist/chunk-HWMMIRIF.cjs.map +1 -0
- package/dist/chunk-IXZ2T2QX.cjs +448 -0
- package/dist/chunk-IXZ2T2QX.cjs.map +1 -0
- package/dist/chunk-L3NKIMF5.cjs +10 -0
- package/dist/chunk-L3NKIMF5.cjs.map +1 -0
- package/dist/chunk-L4JCRWDY.cjs +252 -0
- package/dist/chunk-L4JCRWDY.cjs.map +1 -0
- package/dist/{chunk-BJXKH4LG.cjs → chunk-LGB4VNZI.cjs} +43 -78
- package/dist/chunk-LGB4VNZI.cjs.map +1 -0
- package/dist/{chunk-PG5H6QIO.cjs → chunk-MLKE7HRS.cjs} +41 -21
- package/dist/chunk-MLKE7HRS.cjs.map +1 -0
- package/dist/{chunk-OEIVMCWX.js → chunk-MRRFTNF4.js} +2537 -84
- package/dist/chunk-MRRFTNF4.js.map +1 -0
- package/dist/chunk-MXBVP7HX.cjs +4842 -0
- package/dist/chunk-MXBVP7HX.cjs.map +1 -0
- package/dist/chunk-NESKUIRE.cjs +4586 -0
- package/dist/chunk-NESKUIRE.cjs.map +1 -0
- package/dist/{chunk-SVLMF4UZ.cjs → chunk-NIOEY3N3.cjs} +66 -85
- package/dist/chunk-NIOEY3N3.cjs.map +1 -0
- package/dist/{chunk-CZEJQSWB.cjs → chunk-OWIEOL55.cjs} +295 -677
- package/dist/chunk-OWIEOL55.cjs.map +1 -0
- package/dist/{chunk-WTSZBHIZ.cjs → chunk-PJAK4U6R.cjs} +24 -24
- package/dist/{chunk-WTSZBHIZ.cjs.map → chunk-PJAK4U6R.cjs.map} +1 -1
- package/dist/{chunk-52RSUALV.cjs → chunk-R5AJGM55.cjs} +1314 -995
- package/dist/chunk-R5AJGM55.cjs.map +1 -0
- package/dist/{chunk-IVV5TOMD.js → chunk-RCJLMMTO.js} +32 -12
- package/dist/chunk-RCJLMMTO.js.map +1 -0
- package/dist/{chunk-S73Z3PBJ.cjs → chunk-SZYSDJTN.cjs} +27 -28
- package/dist/chunk-SZYSDJTN.cjs.map +1 -0
- package/dist/{chunk-YC6PJEPH.cjs → chunk-U4CSOY6T.cjs} +188 -122
- package/dist/chunk-U4CSOY6T.cjs.map +1 -0
- package/dist/chunk-UBSPZTQX.js +434 -0
- package/dist/chunk-UBSPZTQX.js.map +1 -0
- package/dist/{chunk-SCUWP4II.cjs → chunk-VEPP75C4.cjs} +47 -74
- package/dist/chunk-VEPP75C4.cjs.map +1 -0
- package/dist/{chunk-JIGDJK2O.js → chunk-VETAQUW3.js} +4 -39
- package/dist/chunk-VETAQUW3.js.map +1 -0
- package/dist/{chunk-Z57R5WS4.js → chunk-WPTTKULS.js} +4 -4
- package/dist/{chunk-Z57R5WS4.js.map → chunk-WPTTKULS.js.map} +1 -1
- package/dist/{chunk-O2BJW7YA.js → chunk-WYGUWVTF.js} +5 -5
- package/dist/{chunk-O2BJW7YA.js.map → chunk-WYGUWVTF.js.map} +1 -1
- package/dist/{chunk-SXNQRJQD.js → chunk-WYWRMIQC.js} +127 -22
- package/dist/chunk-WYWRMIQC.js.map +1 -0
- package/dist/{chunk-5Q6WAYEY.cjs → chunk-X6IBA7FP.cjs} +137 -50
- package/dist/chunk-X6IBA7FP.cjs.map +1 -0
- package/dist/{chunk-MRFUISXC.cjs → chunk-Y7MZ5LJT.cjs} +2632 -179
- package/dist/chunk-Y7MZ5LJT.cjs.map +1 -0
- package/dist/{chunk-JJ5O45LH.js → chunk-YPLZDWG7.js} +32 -27
- package/dist/chunk-YPLZDWG7.js.map +1 -0
- package/dist/{chunk-MGCGWPQJ.cjs → chunk-Z55SJVEC.cjs} +8 -8
- package/dist/chunk-Z55SJVEC.cjs.map +1 -0
- package/dist/error/index.cjs +6 -6
- package/dist/error/index.d.ts +26 -20
- package/dist/error/index.d.ts.map +1 -1
- package/dist/error/index.js +1 -1
- package/dist/error/utils.d.ts +19 -5
- package/dist/error/utils.d.ts.map +1 -1
- package/dist/evals/index.cjs +4 -4
- package/dist/evals/index.js +1 -1
- package/dist/evals/run/index.d.ts +1 -1
- package/dist/evals/scoreTraces/index.cjs +8 -8
- package/dist/evals/scoreTraces/index.js +2 -2
- package/dist/evals/types.d.ts +1 -1
- package/dist/events/event-emitter.d.ts +6 -1
- package/dist/events/event-emitter.d.ts.map +1 -1
- package/dist/index.cjs +2 -2
- package/dist/index.js +1 -1
- package/dist/integration/index.cjs +2 -2
- package/dist/integration/index.js +1 -1
- package/dist/llm/index.cjs +15 -15
- package/dist/llm/index.d.ts +2 -2
- package/dist/llm/index.d.ts.map +1 -1
- package/dist/llm/index.js +5 -5
- package/dist/llm/model/aisdk/v5/model.d.ts +1 -1
- package/dist/llm/model/base.types.d.ts +2 -2
- package/dist/llm/model/model.d.ts +1 -1
- package/dist/llm/model/model.d.ts.map +1 -1
- package/dist/llm/model/model.loop.d.ts +2 -2
- package/dist/llm/model/model.loop.d.ts.map +1 -1
- package/dist/llm/model/model.loop.types.d.ts +1 -1
- package/dist/llm/model/model.loop.types.d.ts.map +1 -1
- package/dist/llm/model/provider-types.generated.d.ts +51 -11
- package/dist/llm/model/shared.types.d.ts +1 -1
- package/dist/loop/index.cjs +2 -2
- package/dist/loop/index.js +1 -1
- package/dist/loop/loop.d.ts +2 -2
- package/dist/loop/loop.d.ts.map +1 -1
- package/dist/loop/test-utils/MastraLanguageModelV2Mock.d.ts +1 -1
- package/dist/loop/test-utils/MastraLanguageModelV2Mock.d.ts.map +1 -1
- package/dist/loop/test-utils/options.d.ts.map +1 -1
- package/dist/loop/test-utils/streamObject.d.ts +1 -1
- package/dist/loop/test-utils/streamObject.d.ts.map +1 -1
- package/dist/loop/test-utils/tools.d.ts.map +1 -1
- package/dist/loop/types.d.ts +3 -1
- package/dist/loop/types.d.ts.map +1 -1
- package/dist/loop/workflows/agentic-execution/index.d.ts +13 -13
- package/dist/loop/workflows/agentic-execution/index.d.ts.map +1 -1
- package/dist/loop/workflows/agentic-execution/llm-execution-step.d.ts +10 -10
- package/dist/loop/workflows/agentic-execution/llm-execution-step.d.ts.map +1 -1
- package/dist/loop/workflows/agentic-execution/llm-mapping-step.d.ts +5 -5
- package/dist/loop/workflows/agentic-execution/llm-mapping-step.d.ts.map +1 -1
- package/dist/loop/workflows/agentic-execution/tool-call-step.d.ts +22 -21
- package/dist/loop/workflows/agentic-execution/tool-call-step.d.ts.map +1 -1
- package/dist/loop/workflows/agentic-loop/index.d.ts +13 -13
- package/dist/loop/workflows/agentic-loop/index.d.ts.map +1 -1
- package/dist/loop/workflows/schema.d.ts +6 -6
- package/dist/loop/workflows/schema.d.ts.map +1 -1
- package/dist/loop/workflows/stream.d.ts +2 -2
- package/dist/loop/workflows/stream.d.ts.map +1 -1
- package/dist/mastra/index.cjs +2 -2
- package/dist/mastra/index.js +1 -1
- package/dist/mcp/index.cjs +4 -4
- package/dist/mcp/index.js +1 -1
- package/dist/memory/index.cjs +6 -6
- package/dist/memory/index.js +1 -1
- package/dist/memory/memory.d.ts +1 -1
- package/dist/memory/types.d.ts +3 -3
- package/dist/memory/types.d.ts.map +1 -1
- package/dist/models-dev-D3EKFGAO.cjs +12 -0
- package/dist/{models-dev-EO3SUIY2.cjs.map → models-dev-D3EKFGAO.cjs.map} +1 -1
- package/dist/models-dev-EO22XOXQ.js +3 -0
- package/dist/{models-dev-23RN2WHG.js.map → models-dev-EO22XOXQ.js.map} +1 -1
- package/dist/netlify-AE4LNCAI.js +3 -0
- package/dist/{netlify-GXJ5D5DD.js.map → netlify-AE4LNCAI.js.map} +1 -1
- package/dist/netlify-WE42TZIT.cjs +12 -0
- package/dist/{netlify-KJLY3GFS.cjs.map → netlify-WE42TZIT.cjs.map} +1 -1
- package/dist/processors/index.cjs +37 -37
- package/dist/processors/index.d.ts +2 -2
- package/dist/processors/index.d.ts.map +1 -1
- package/dist/processors/index.js +1 -1
- package/dist/processors/step-schema.d.ts +1267 -1267
- package/dist/processors/step-schema.d.ts.map +1 -1
- package/dist/provider-registry-6LF3NGC5.js +3 -0
- package/dist/{provider-registry-F67Y6OF2.js.map → provider-registry-6LF3NGC5.js.map} +1 -1
- package/dist/provider-registry-73FKMXJV.cjs +40 -0
- package/dist/{provider-registry-3TG2KUD2.cjs.map → provider-registry-73FKMXJV.cjs.map} +1 -1
- package/dist/provider-registry.json +100 -30
- package/dist/{registry-generator-UMTNPBJX.js → registry-generator-AVQXI3GX.js} +2 -2
- package/dist/{registry-generator-UMTNPBJX.js.map → registry-generator-AVQXI3GX.js.map} +1 -1
- package/dist/{registry-generator-34SC4TAU.cjs → registry-generator-KOFNIIWJ.cjs} +2 -2
- package/dist/{registry-generator-34SC4TAU.cjs.map → registry-generator-KOFNIIWJ.cjs.map} +1 -1
- package/dist/relevance/index.cjs +2 -2
- package/dist/relevance/index.js +1 -1
- package/dist/server/index.cjs +5 -5
- package/dist/server/index.js +1 -1
- package/dist/storage/base.d.ts +2 -10
- package/dist/storage/base.d.ts.map +1 -1
- package/dist/storage/domains/workflows/base.d.ts +2 -8
- package/dist/storage/domains/workflows/base.d.ts.map +1 -1
- package/dist/storage/domains/workflows/inmemory.d.ts +2 -8
- package/dist/storage/domains/workflows/inmemory.d.ts.map +1 -1
- package/dist/storage/index.cjs +38 -38
- package/dist/storage/index.js +1 -1
- package/dist/storage/mock.d.ts +2 -8
- package/dist/storage/mock.d.ts.map +1 -1
- package/dist/storage/types.d.ts +9 -1
- package/dist/storage/types.d.ts.map +1 -1
- package/dist/stream/RunOutput.d.ts +1 -1
- package/dist/stream/aisdk/v4/input.d.ts +1 -1
- package/dist/stream/aisdk/v5/compat/content.d.ts +1 -1
- package/dist/stream/aisdk/v5/compat/content.d.ts.map +1 -1
- package/dist/stream/aisdk/v5/compat/prepare-tools.d.ts +1 -1
- package/dist/stream/aisdk/v5/compat/prepare-tools.d.ts.map +1 -1
- package/dist/stream/aisdk/v5/compat/ui-message.d.ts +1 -1
- package/dist/stream/aisdk/v5/compat/ui-message.d.ts.map +1 -1
- package/dist/stream/aisdk/v5/compat/validation.d.ts +1 -1
- package/dist/stream/aisdk/v5/compat/validation.d.ts.map +1 -1
- package/dist/stream/aisdk/v5/execute.d.ts +2 -2
- package/dist/stream/aisdk/v5/execute.d.ts.map +1 -1
- package/dist/stream/aisdk/v5/input.d.ts +1 -1
- package/dist/stream/aisdk/v5/input.d.ts.map +1 -1
- package/dist/stream/aisdk/v5/output-helpers.d.ts +12 -27
- package/dist/stream/aisdk/v5/output-helpers.d.ts.map +1 -1
- package/dist/stream/aisdk/v5/output.d.ts +41 -91
- package/dist/stream/aisdk/v5/output.d.ts.map +1 -1
- package/dist/stream/aisdk/v5/transform.d.ts +1 -1
- package/dist/stream/aisdk/v5/transform.d.ts.map +1 -1
- package/dist/stream/base/input.d.ts +1 -1
- package/dist/stream/base/output.d.ts +9 -31
- package/dist/stream/base/output.d.ts.map +1 -1
- package/dist/stream/base/schema.d.ts +2 -2
- package/dist/stream/base/schema.d.ts.map +1 -1
- package/dist/stream/index.cjs +12 -12
- package/dist/stream/index.js +2 -2
- package/dist/stream/types.d.ts +3 -2
- package/dist/stream/types.d.ts.map +1 -1
- package/dist/test-utils/llm-mock.cjs +14587 -14
- package/dist/test-utils/llm-mock.cjs.map +1 -1
- package/dist/test-utils/llm-mock.d.ts +3 -3
- package/dist/test-utils/llm-mock.d.ts.map +1 -1
- package/dist/test-utils/llm-mock.js +14577 -4
- package/dist/test-utils/llm-mock.js.map +1 -1
- package/dist/token-6GSAFR2W-SGVIXFCP.cjs +63 -0
- package/dist/token-6GSAFR2W-SGVIXFCP.cjs.map +1 -0
- package/dist/token-6GSAFR2W-SPYPLMBM.js +61 -0
- package/dist/token-6GSAFR2W-SPYPLMBM.js.map +1 -0
- package/dist/token-util-NEHG7TUY-7GMW5FXI.cjs +10 -0
- package/dist/token-util-NEHG7TUY-7GMW5FXI.cjs.map +1 -0
- package/dist/token-util-NEHG7TUY-JRJTGTAB.js +8 -0
- package/dist/token-util-NEHG7TUY-JRJTGTAB.js.map +1 -0
- package/dist/tools/index.cjs +4 -4
- package/dist/tools/index.js +1 -1
- package/dist/tools/is-vercel-tool.cjs +2 -2
- package/dist/tools/is-vercel-tool.js +1 -1
- package/dist/tools/tool-builder/builder.d.ts +2 -1
- package/dist/tools/tool-builder/builder.d.ts.map +1 -1
- package/dist/tools/tool.d.ts.map +1 -1
- package/dist/tools/types.d.ts +5 -5
- package/dist/tools/types.d.ts.map +1 -1
- package/dist/utils.cjs +22 -22
- package/dist/utils.d.ts +3 -3
- package/dist/utils.d.ts.map +1 -1
- package/dist/utils.js +1 -1
- package/dist/vector/embed.d.ts +2 -2
- package/dist/vector/embed.d.ts.map +1 -1
- package/dist/vector/index.cjs +11 -11
- package/dist/vector/index.js +3 -3
- package/dist/vector/vector.d.ts +1 -1
- package/dist/voice/aisdk/speech.d.ts +1 -1
- package/dist/voice/aisdk/speech.d.ts.map +1 -1
- package/dist/voice/aisdk/transcription.d.ts +1 -1
- package/dist/voice/aisdk/transcription.d.ts.map +1 -1
- package/dist/voice/composite-voice.d.ts +1 -1
- package/dist/voice/composite-voice.d.ts.map +1 -1
- package/dist/voice/index.cjs +6 -6
- package/dist/voice/index.js +1 -1
- package/dist/workflows/constants.cjs +4 -4
- package/dist/workflows/constants.d.ts +1 -1
- package/dist/workflows/constants.d.ts.map +1 -1
- package/dist/workflows/constants.js +1 -1
- package/dist/workflows/default.d.ts +9 -16
- package/dist/workflows/default.d.ts.map +1 -1
- package/dist/workflows/evented/execution-engine.d.ts +3 -2
- package/dist/workflows/evented/execution-engine.d.ts.map +1 -1
- package/dist/workflows/evented/index.cjs +10 -10
- package/dist/workflows/evented/index.js +1 -1
- package/dist/workflows/evented/step-executor.d.ts +1 -1
- package/dist/workflows/evented/step-executor.d.ts.map +1 -1
- package/dist/workflows/evented/workflow-event-processor/index.d.ts +1 -1
- package/dist/workflows/evented/workflow-event-processor/index.d.ts.map +1 -1
- package/dist/workflows/evented/workflow.d.ts +15 -0
- package/dist/workflows/evented/workflow.d.ts.map +1 -1
- package/dist/workflows/execution-engine.d.ts +25 -2
- package/dist/workflows/execution-engine.d.ts.map +1 -1
- package/dist/workflows/handlers/control-flow.d.ts +6 -5
- package/dist/workflows/handlers/control-flow.d.ts.map +1 -1
- package/dist/workflows/handlers/entry.d.ts +5 -3
- package/dist/workflows/handlers/entry.d.ts.map +1 -1
- package/dist/workflows/handlers/sleep.d.ts +4 -3
- package/dist/workflows/handlers/sleep.d.ts.map +1 -1
- package/dist/workflows/handlers/step.d.ts +5 -3
- package/dist/workflows/handlers/step.d.ts.map +1 -1
- package/dist/workflows/index.cjs +26 -22
- package/dist/workflows/index.js +1 -1
- package/dist/workflows/step.d.ts +5 -4
- package/dist/workflows/step.d.ts.map +1 -1
- package/dist/workflows/types.d.ts +66 -14
- package/dist/workflows/types.d.ts.map +1 -1
- package/dist/workflows/utils.d.ts +11 -0
- package/dist/workflows/utils.d.ts.map +1 -1
- package/dist/workflows/workflow.d.ts +26 -8
- package/dist/workflows/workflow.d.ts.map +1 -1
- package/package.json +12 -11
- package/src/llm/model/provider-types.generated.d.ts +51 -11
- package/dist/agent/__tests__/mock-model.d.ts +0 -8
- package/dist/agent/__tests__/mock-model.d.ts.map +0 -1
- package/dist/agent/agent-types.test-d.d.ts +0 -2
- package/dist/agent/agent-types.test-d.d.ts.map +0 -1
- package/dist/ai-sdk.types.d.ts +0 -4705
- package/dist/chunk-2ULLRN4Y.js.map +0 -1
- package/dist/chunk-3E3ILV6T.cjs +0 -518
- package/dist/chunk-3E3ILV6T.cjs.map +0 -1
- package/dist/chunk-4JKEUSCC.cjs.map +0 -1
- package/dist/chunk-52RSUALV.cjs.map +0 -1
- package/dist/chunk-5PAEYE3Q.js +0 -513
- package/dist/chunk-5PAEYE3Q.js.map +0 -1
- package/dist/chunk-5Q6WAYEY.cjs.map +0 -1
- package/dist/chunk-7P6BNIJH.js.map +0 -1
- package/dist/chunk-ABJOUEVA.cjs +0 -10
- package/dist/chunk-ABJOUEVA.cjs.map +0 -1
- package/dist/chunk-BJXKH4LG.cjs.map +0 -1
- package/dist/chunk-BUKY6CTR.cjs.map +0 -1
- package/dist/chunk-C36YRTZ6.js.map +0 -1
- package/dist/chunk-CZEJQSWB.cjs.map +0 -1
- package/dist/chunk-IVV5TOMD.js.map +0 -1
- package/dist/chunk-JIGDJK2O.js.map +0 -1
- package/dist/chunk-JJ5O45LH.js.map +0 -1
- package/dist/chunk-MGCGWPQJ.cjs.map +0 -1
- package/dist/chunk-MRFUISXC.cjs.map +0 -1
- package/dist/chunk-NLNKQD2T.js +0 -7
- package/dist/chunk-NLNKQD2T.js.map +0 -1
- package/dist/chunk-OEIVMCWX.js.map +0 -1
- package/dist/chunk-PG5H6QIO.cjs.map +0 -1
- package/dist/chunk-PK2A5WBG.js.map +0 -1
- package/dist/chunk-QM5SRDJX.js.map +0 -1
- package/dist/chunk-S73Z3PBJ.cjs.map +0 -1
- package/dist/chunk-SCUWP4II.cjs.map +0 -1
- package/dist/chunk-SVLMF4UZ.cjs.map +0 -1
- package/dist/chunk-SXNQRJQD.js.map +0 -1
- package/dist/chunk-THZTRBFS.js.map +0 -1
- package/dist/chunk-TWH4PTDG.cjs.map +0 -1
- package/dist/chunk-U3XOLEPX.js.map +0 -1
- package/dist/chunk-US2U7ECW.js.map +0 -1
- package/dist/chunk-YC6PJEPH.cjs.map +0 -1
- package/dist/models-dev-23RN2WHG.js +0 -3
- package/dist/models-dev-EO3SUIY2.cjs +0 -12
- package/dist/netlify-GXJ5D5DD.js +0 -3
- package/dist/netlify-KJLY3GFS.cjs +0 -12
- package/dist/provider-registry-3TG2KUD2.cjs +0 -40
- package/dist/provider-registry-F67Y6OF2.js +0 -3
- package/dist/tools/tool-stream-types.test-d.d.ts +0 -2
- package/dist/tools/tool-stream-types.test-d.d.ts.map +0 -1
|
@@ -0,0 +1,1708 @@
|
|
|
1
|
+
export declare function convertArrayToAsyncIterable<T>(values: T[]): AsyncIterable<T>;
|
|
2
|
+
|
|
3
|
+
export declare function convertArrayToReadableStream<T>(values: T[]): ReadableStream<T>;
|
|
4
|
+
|
|
5
|
+
export declare function convertReadableStreamToArray<T>(stream: ReadableStream<T>): Promise<T[]>;
|
|
6
|
+
|
|
7
|
+
/**
|
|
8
|
+
Specification for an embedding model that implements the embedding model
|
|
9
|
+
interface version 1.
|
|
10
|
+
|
|
11
|
+
VALUE is the type of the values that the model can embed.
|
|
12
|
+
This will allow us to go beyond text embeddings in the future,
|
|
13
|
+
e.g. to support image embeddings
|
|
14
|
+
*/
|
|
15
|
+
declare type EmbeddingModelV2<VALUE> = {
|
|
16
|
+
/**
|
|
17
|
+
The embedding model must specify which embedding model interface
|
|
18
|
+
version it implements. This will allow us to evolve the embedding
|
|
19
|
+
model interface and retain backwards compatibility. The different
|
|
20
|
+
implementation versions can be handled as a discriminated union
|
|
21
|
+
on our side.
|
|
22
|
+
*/
|
|
23
|
+
readonly specificationVersion: 'v2';
|
|
24
|
+
/**
|
|
25
|
+
Name of the provider for logging purposes.
|
|
26
|
+
*/
|
|
27
|
+
readonly provider: string;
|
|
28
|
+
/**
|
|
29
|
+
Provider-specific model ID for logging purposes.
|
|
30
|
+
*/
|
|
31
|
+
readonly modelId: string;
|
|
32
|
+
/**
|
|
33
|
+
Limit of how many embeddings can be generated in a single API call.
|
|
34
|
+
|
|
35
|
+
Use Infinity for models that do not have a limit.
|
|
36
|
+
*/
|
|
37
|
+
readonly maxEmbeddingsPerCall: PromiseLike<number | undefined> | number | undefined;
|
|
38
|
+
/**
|
|
39
|
+
True if the model can handle multiple embedding calls in parallel.
|
|
40
|
+
*/
|
|
41
|
+
readonly supportsParallelCalls: PromiseLike<boolean> | boolean;
|
|
42
|
+
/**
|
|
43
|
+
Generates a list of embeddings for the given input text.
|
|
44
|
+
|
|
45
|
+
Naming: "do" prefix to prevent accidental direct usage of the method
|
|
46
|
+
by the user.
|
|
47
|
+
*/
|
|
48
|
+
doEmbed(options: {
|
|
49
|
+
/**
|
|
50
|
+
List of values to embed.
|
|
51
|
+
*/
|
|
52
|
+
values: Array<VALUE>;
|
|
53
|
+
/**
|
|
54
|
+
Abort signal for cancelling the operation.
|
|
55
|
+
*/
|
|
56
|
+
abortSignal?: AbortSignal;
|
|
57
|
+
/**
|
|
58
|
+
Additional provider-specific options. They are passed through
|
|
59
|
+
to the provider from the AI SDK and enable provider-specific
|
|
60
|
+
functionality that can be fully encapsulated in the provider.
|
|
61
|
+
*/
|
|
62
|
+
providerOptions?: SharedV2ProviderOptions;
|
|
63
|
+
/**
|
|
64
|
+
Additional HTTP headers to be sent with the request.
|
|
65
|
+
Only applicable for HTTP-based providers.
|
|
66
|
+
*/
|
|
67
|
+
headers?: Record<string, string | undefined>;
|
|
68
|
+
}): PromiseLike<{
|
|
69
|
+
/**
|
|
70
|
+
Generated embeddings. They are in the same order as the input values.
|
|
71
|
+
*/
|
|
72
|
+
embeddings: Array<EmbeddingModelV2Embedding>;
|
|
73
|
+
/**
|
|
74
|
+
Token usage. We only have input tokens for embeddings.
|
|
75
|
+
*/
|
|
76
|
+
usage?: {
|
|
77
|
+
tokens: number;
|
|
78
|
+
};
|
|
79
|
+
/**
|
|
80
|
+
Additional provider-specific metadata. They are passed through
|
|
81
|
+
from the provider to the AI SDK and enable provider-specific
|
|
82
|
+
results that can be fully encapsulated in the provider.
|
|
83
|
+
*/
|
|
84
|
+
providerMetadata?: SharedV2ProviderMetadata;
|
|
85
|
+
/**
|
|
86
|
+
Optional response information for debugging purposes.
|
|
87
|
+
*/
|
|
88
|
+
response?: {
|
|
89
|
+
/**
|
|
90
|
+
Response headers.
|
|
91
|
+
*/
|
|
92
|
+
headers?: SharedV2Headers;
|
|
93
|
+
/**
|
|
94
|
+
The response body.
|
|
95
|
+
*/
|
|
96
|
+
body?: unknown;
|
|
97
|
+
};
|
|
98
|
+
}>;
|
|
99
|
+
};
|
|
100
|
+
|
|
101
|
+
/**
|
|
102
|
+
An embedding is a vector, i.e. an array of numbers.
|
|
103
|
+
It is e.g. used to represent a text as a vector of word embeddings.
|
|
104
|
+
*/
|
|
105
|
+
declare type EmbeddingModelV2Embedding = Array<number>;
|
|
106
|
+
|
|
107
|
+
declare type GetMaxImagesPerCallFunction = (options: {
|
|
108
|
+
modelId: string;
|
|
109
|
+
}) => PromiseLike<number | undefined> | number | undefined;
|
|
110
|
+
|
|
111
|
+
/**
|
|
112
|
+
Image generation model specification version 2.
|
|
113
|
+
*/
|
|
114
|
+
declare type ImageModelV2 = {
|
|
115
|
+
/**
|
|
116
|
+
The image model must specify which image model interface
|
|
117
|
+
version it implements. This will allow us to evolve the image
|
|
118
|
+
model interface and retain backwards compatibility. The different
|
|
119
|
+
implementation versions can be handled as a discriminated union
|
|
120
|
+
on our side.
|
|
121
|
+
*/
|
|
122
|
+
readonly specificationVersion: 'v2';
|
|
123
|
+
/**
|
|
124
|
+
Name of the provider for logging purposes.
|
|
125
|
+
*/
|
|
126
|
+
readonly provider: string;
|
|
127
|
+
/**
|
|
128
|
+
Provider-specific model ID for logging purposes.
|
|
129
|
+
*/
|
|
130
|
+
readonly modelId: string;
|
|
131
|
+
/**
|
|
132
|
+
Limit of how many images can be generated in a single API call.
|
|
133
|
+
Can be set to a number for a fixed limit, to undefined to use
|
|
134
|
+
the global limit, or a function that returns a number or undefined,
|
|
135
|
+
optionally as a promise.
|
|
136
|
+
*/
|
|
137
|
+
readonly maxImagesPerCall: number | undefined | GetMaxImagesPerCallFunction;
|
|
138
|
+
/**
|
|
139
|
+
Generates an array of images.
|
|
140
|
+
*/
|
|
141
|
+
doGenerate(options: ImageModelV2CallOptions): PromiseLike<{
|
|
142
|
+
/**
|
|
143
|
+
Generated images as base64 encoded strings or binary data.
|
|
144
|
+
The images should be returned without any unnecessary conversion.
|
|
145
|
+
If the API returns base64 encoded strings, the images should be returned
|
|
146
|
+
as base64 encoded strings. If the API returns binary data, the images should
|
|
147
|
+
be returned as binary data.
|
|
148
|
+
*/
|
|
149
|
+
images: Array<string> | Array<Uint8Array>;
|
|
150
|
+
/**
|
|
151
|
+
Warnings for the call, e.g. unsupported settings.
|
|
152
|
+
*/
|
|
153
|
+
warnings: Array<ImageModelV2CallWarning>;
|
|
154
|
+
/**
|
|
155
|
+
Additional provider-specific metadata. They are passed through
|
|
156
|
+
from the provider to the AI SDK and enable provider-specific
|
|
157
|
+
results that can be fully encapsulated in the provider.
|
|
158
|
+
|
|
159
|
+
The outer record is keyed by the provider name, and the inner
|
|
160
|
+
record is provider-specific metadata. It always includes an
|
|
161
|
+
`images` key with image-specific metadata
|
|
162
|
+
|
|
163
|
+
```ts
|
|
164
|
+
{
|
|
165
|
+
"openai": {
|
|
166
|
+
"images": ["revisedPrompt": "Revised prompt here."]
|
|
167
|
+
}
|
|
168
|
+
}
|
|
169
|
+
```
|
|
170
|
+
*/
|
|
171
|
+
providerMetadata?: ImageModelV2ProviderMetadata;
|
|
172
|
+
/**
|
|
173
|
+
Response information for telemetry and debugging purposes.
|
|
174
|
+
*/
|
|
175
|
+
response: {
|
|
176
|
+
/**
|
|
177
|
+
Timestamp for the start of the generated response.
|
|
178
|
+
*/
|
|
179
|
+
timestamp: Date;
|
|
180
|
+
/**
|
|
181
|
+
The ID of the response model that was used to generate the response.
|
|
182
|
+
*/
|
|
183
|
+
modelId: string;
|
|
184
|
+
/**
|
|
185
|
+
Response headers.
|
|
186
|
+
*/
|
|
187
|
+
headers: Record<string, string> | undefined;
|
|
188
|
+
};
|
|
189
|
+
}>;
|
|
190
|
+
};
|
|
191
|
+
|
|
192
|
+
declare type ImageModelV2CallOptions = {
|
|
193
|
+
/**
|
|
194
|
+
Prompt for the image generation.
|
|
195
|
+
*/
|
|
196
|
+
prompt: string;
|
|
197
|
+
/**
|
|
198
|
+
Number of images to generate.
|
|
199
|
+
*/
|
|
200
|
+
n: number;
|
|
201
|
+
/**
|
|
202
|
+
Size of the images to generate.
|
|
203
|
+
Must have the format `{width}x{height}`.
|
|
204
|
+
`undefined` will use the provider's default size.
|
|
205
|
+
*/
|
|
206
|
+
size: `${number}x${number}` | undefined;
|
|
207
|
+
/**
|
|
208
|
+
Aspect ratio of the images to generate.
|
|
209
|
+
Must have the format `{width}:{height}`.
|
|
210
|
+
`undefined` will use the provider's default aspect ratio.
|
|
211
|
+
*/
|
|
212
|
+
aspectRatio: `${number}:${number}` | undefined;
|
|
213
|
+
/**
|
|
214
|
+
Seed for the image generation.
|
|
215
|
+
`undefined` will use the provider's default seed.
|
|
216
|
+
*/
|
|
217
|
+
seed: number | undefined;
|
|
218
|
+
/**
|
|
219
|
+
Additional provider-specific options that are passed through to the provider
|
|
220
|
+
as body parameters.
|
|
221
|
+
|
|
222
|
+
The outer record is keyed by the provider name, and the inner
|
|
223
|
+
record is keyed by the provider-specific metadata key.
|
|
224
|
+
```ts
|
|
225
|
+
{
|
|
226
|
+
"openai": {
|
|
227
|
+
"style": "vivid"
|
|
228
|
+
}
|
|
229
|
+
}
|
|
230
|
+
```
|
|
231
|
+
*/
|
|
232
|
+
providerOptions: SharedV2ProviderOptions;
|
|
233
|
+
/**
|
|
234
|
+
Abort signal for cancelling the operation.
|
|
235
|
+
*/
|
|
236
|
+
abortSignal?: AbortSignal;
|
|
237
|
+
/**
|
|
238
|
+
Additional HTTP headers to be sent with the request.
|
|
239
|
+
Only applicable for HTTP-based providers.
|
|
240
|
+
*/
|
|
241
|
+
headers?: Record<string, string | undefined>;
|
|
242
|
+
};
|
|
243
|
+
|
|
244
|
+
/**
|
|
245
|
+
Warning from the model provider for this call. The call will proceed, but e.g.
|
|
246
|
+
some settings might not be supported, which can lead to suboptimal results.
|
|
247
|
+
*/
|
|
248
|
+
declare type ImageModelV2CallWarning = {
|
|
249
|
+
type: 'unsupported-setting';
|
|
250
|
+
setting: keyof ImageModelV2CallOptions;
|
|
251
|
+
details?: string;
|
|
252
|
+
} | {
|
|
253
|
+
type: 'other';
|
|
254
|
+
message: string;
|
|
255
|
+
};
|
|
256
|
+
|
|
257
|
+
declare type ImageModelV2ProviderMetadata = Record<string, {
|
|
258
|
+
images: JSONArray;
|
|
259
|
+
} & JSONValue>;
|
|
260
|
+
|
|
261
|
+
declare type JSONArray = JSONValue[];
|
|
262
|
+
|
|
263
|
+
declare type JSONObject = {
|
|
264
|
+
[key: string]: JSONValue;
|
|
265
|
+
};
|
|
266
|
+
|
|
267
|
+
declare interface JSONSchema7 {
|
|
268
|
+
$id?: string | undefined;
|
|
269
|
+
$ref?: string | undefined;
|
|
270
|
+
$schema?: JSONSchema7Version | undefined;
|
|
271
|
+
$comment?: string | undefined;
|
|
272
|
+
|
|
273
|
+
/**
|
|
274
|
+
* @see https://datatracker.ietf.org/doc/html/draft-bhutton-json-schema-00#section-8.2.4
|
|
275
|
+
* @see https://datatracker.ietf.org/doc/html/draft-bhutton-json-schema-validation-00#appendix-A
|
|
276
|
+
*/
|
|
277
|
+
$defs?: {
|
|
278
|
+
[key: string]: JSONSchema7Definition;
|
|
279
|
+
} | undefined;
|
|
280
|
+
|
|
281
|
+
/**
|
|
282
|
+
* @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-6.1
|
|
283
|
+
*/
|
|
284
|
+
type?: JSONSchema7TypeName | JSONSchema7TypeName[] | undefined;
|
|
285
|
+
enum?: JSONSchema7Type[] | undefined;
|
|
286
|
+
const?: JSONSchema7Type | undefined;
|
|
287
|
+
|
|
288
|
+
/**
|
|
289
|
+
* @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-6.2
|
|
290
|
+
*/
|
|
291
|
+
multipleOf?: number | undefined;
|
|
292
|
+
maximum?: number | undefined;
|
|
293
|
+
exclusiveMaximum?: number | undefined;
|
|
294
|
+
minimum?: number | undefined;
|
|
295
|
+
exclusiveMinimum?: number | undefined;
|
|
296
|
+
|
|
297
|
+
/**
|
|
298
|
+
* @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-6.3
|
|
299
|
+
*/
|
|
300
|
+
maxLength?: number | undefined;
|
|
301
|
+
minLength?: number | undefined;
|
|
302
|
+
pattern?: string | undefined;
|
|
303
|
+
|
|
304
|
+
/**
|
|
305
|
+
* @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-6.4
|
|
306
|
+
*/
|
|
307
|
+
items?: JSONSchema7Definition | JSONSchema7Definition[] | undefined;
|
|
308
|
+
additionalItems?: JSONSchema7Definition | undefined;
|
|
309
|
+
maxItems?: number | undefined;
|
|
310
|
+
minItems?: number | undefined;
|
|
311
|
+
uniqueItems?: boolean | undefined;
|
|
312
|
+
contains?: JSONSchema7Definition | undefined;
|
|
313
|
+
|
|
314
|
+
/**
|
|
315
|
+
* @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-6.5
|
|
316
|
+
*/
|
|
317
|
+
maxProperties?: number | undefined;
|
|
318
|
+
minProperties?: number | undefined;
|
|
319
|
+
required?: string[] | undefined;
|
|
320
|
+
properties?: {
|
|
321
|
+
[key: string]: JSONSchema7Definition;
|
|
322
|
+
} | undefined;
|
|
323
|
+
patternProperties?: {
|
|
324
|
+
[key: string]: JSONSchema7Definition;
|
|
325
|
+
} | undefined;
|
|
326
|
+
additionalProperties?: JSONSchema7Definition | undefined;
|
|
327
|
+
dependencies?: {
|
|
328
|
+
[key: string]: JSONSchema7Definition | string[];
|
|
329
|
+
} | undefined;
|
|
330
|
+
propertyNames?: JSONSchema7Definition | undefined;
|
|
331
|
+
|
|
332
|
+
/**
|
|
333
|
+
* @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-6.6
|
|
334
|
+
*/
|
|
335
|
+
if?: JSONSchema7Definition | undefined;
|
|
336
|
+
then?: JSONSchema7Definition | undefined;
|
|
337
|
+
else?: JSONSchema7Definition | undefined;
|
|
338
|
+
|
|
339
|
+
/**
|
|
340
|
+
* @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-6.7
|
|
341
|
+
*/
|
|
342
|
+
allOf?: JSONSchema7Definition[] | undefined;
|
|
343
|
+
anyOf?: JSONSchema7Definition[] | undefined;
|
|
344
|
+
oneOf?: JSONSchema7Definition[] | undefined;
|
|
345
|
+
not?: JSONSchema7Definition | undefined;
|
|
346
|
+
|
|
347
|
+
/**
|
|
348
|
+
* @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-7
|
|
349
|
+
*/
|
|
350
|
+
format?: string | undefined;
|
|
351
|
+
|
|
352
|
+
/**
|
|
353
|
+
* @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-8
|
|
354
|
+
*/
|
|
355
|
+
contentMediaType?: string | undefined;
|
|
356
|
+
contentEncoding?: string | undefined;
|
|
357
|
+
|
|
358
|
+
/**
|
|
359
|
+
* @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-9
|
|
360
|
+
*/
|
|
361
|
+
definitions?: {
|
|
362
|
+
[key: string]: JSONSchema7Definition;
|
|
363
|
+
} | undefined;
|
|
364
|
+
|
|
365
|
+
/**
|
|
366
|
+
* @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-10
|
|
367
|
+
*/
|
|
368
|
+
title?: string | undefined;
|
|
369
|
+
description?: string | undefined;
|
|
370
|
+
default?: JSONSchema7Type | undefined;
|
|
371
|
+
readOnly?: boolean | undefined;
|
|
372
|
+
writeOnly?: boolean | undefined;
|
|
373
|
+
examples?: JSONSchema7Type | undefined;
|
|
374
|
+
}
|
|
375
|
+
|
|
376
|
+
declare interface JSONSchema7Array extends Array<JSONSchema7Type> {}
|
|
377
|
+
|
|
378
|
+
/**
|
|
379
|
+
* JSON Schema v7
|
|
380
|
+
* @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01
|
|
381
|
+
*/
|
|
382
|
+
declare type JSONSchema7Definition = JSONSchema7 | boolean;
|
|
383
|
+
|
|
384
|
+
declare interface JSONSchema7Object {
|
|
385
|
+
[key: string]: JSONSchema7Type;
|
|
386
|
+
}
|
|
387
|
+
|
|
388
|
+
/**
|
|
389
|
+
* Primitive type
|
|
390
|
+
* @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-6.1.1
|
|
391
|
+
*/
|
|
392
|
+
declare type JSONSchema7Type =
|
|
393
|
+
| string //
|
|
394
|
+
| number
|
|
395
|
+
| boolean
|
|
396
|
+
| JSONSchema7Object
|
|
397
|
+
| JSONSchema7Array
|
|
398
|
+
| null;
|
|
399
|
+
|
|
400
|
+
/**
|
|
401
|
+
* Primitive type
|
|
402
|
+
* @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-6.1.1
|
|
403
|
+
*/
|
|
404
|
+
declare type JSONSchema7TypeName =
|
|
405
|
+
| "string" //
|
|
406
|
+
| "number"
|
|
407
|
+
| "integer"
|
|
408
|
+
| "boolean"
|
|
409
|
+
| "object"
|
|
410
|
+
| "array"
|
|
411
|
+
| "null";
|
|
412
|
+
|
|
413
|
+
/**
|
|
414
|
+
* Meta schema
|
|
415
|
+
*
|
|
416
|
+
* Recommended values:
|
|
417
|
+
* - 'http://json-schema.org/schema#'
|
|
418
|
+
* - 'http://json-schema.org/hyper-schema#'
|
|
419
|
+
* - 'http://json-schema.org/draft-07/schema#'
|
|
420
|
+
* - 'http://json-schema.org/draft-07/hyper-schema#'
|
|
421
|
+
*
|
|
422
|
+
* @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-5
|
|
423
|
+
*/
|
|
424
|
+
declare type JSONSchema7Version = string;
|
|
425
|
+
|
|
426
|
+
/**
|
|
427
|
+
A JSON value can be a string, number, boolean, object, array, or null.
|
|
428
|
+
JSON values can be serialized and deserialized by the JSON.stringify and JSON.parse methods.
|
|
429
|
+
*/
|
|
430
|
+
declare type JSONValue = null | string | number | boolean | JSONObject | JSONArray;
|
|
431
|
+
|
|
432
|
+
/**
|
|
433
|
+
Specification for a language model that implements the language model interface version 2.
|
|
434
|
+
*/
|
|
435
|
+
declare type LanguageModelV2 = {
|
|
436
|
+
/**
|
|
437
|
+
The language model must specify which language model interface version it implements.
|
|
438
|
+
*/
|
|
439
|
+
readonly specificationVersion: 'v2';
|
|
440
|
+
/**
|
|
441
|
+
Name of the provider for logging purposes.
|
|
442
|
+
*/
|
|
443
|
+
readonly provider: string;
|
|
444
|
+
/**
|
|
445
|
+
Provider-specific model ID for logging purposes.
|
|
446
|
+
*/
|
|
447
|
+
readonly modelId: string;
|
|
448
|
+
/**
|
|
449
|
+
Supported URL patterns by media type for the provider.
|
|
450
|
+
|
|
451
|
+
The keys are media type patterns or full media types (e.g. `*\/*` for everything, `audio/*`, `video/*`, or `application/pdf`).
|
|
452
|
+
and the values are arrays of regular expressions that match the URL paths.
|
|
453
|
+
|
|
454
|
+
The matching should be against lower-case URLs.
|
|
455
|
+
|
|
456
|
+
Matched URLs are supported natively by the model and are not downloaded.
|
|
457
|
+
|
|
458
|
+
@returns A map of supported URL patterns by media type (as a promise or a plain object).
|
|
459
|
+
*/
|
|
460
|
+
supportedUrls: PromiseLike<Record<string, RegExp[]>> | Record<string, RegExp[]>;
|
|
461
|
+
/**
|
|
462
|
+
Generates a language model output (non-streaming).
|
|
463
|
+
|
|
464
|
+
Naming: "do" prefix to prevent accidental direct usage of the method
|
|
465
|
+
by the user.
|
|
466
|
+
*/
|
|
467
|
+
doGenerate(options: LanguageModelV2CallOptions): PromiseLike<{
|
|
468
|
+
/**
|
|
469
|
+
Ordered content that the model has generated.
|
|
470
|
+
*/
|
|
471
|
+
content: Array<LanguageModelV2Content>;
|
|
472
|
+
/**
|
|
473
|
+
Finish reason.
|
|
474
|
+
*/
|
|
475
|
+
finishReason: LanguageModelV2FinishReason;
|
|
476
|
+
/**
|
|
477
|
+
Usage information.
|
|
478
|
+
*/
|
|
479
|
+
usage: LanguageModelV2Usage;
|
|
480
|
+
/**
|
|
481
|
+
Additional provider-specific metadata. They are passed through
|
|
482
|
+
from the provider to the AI SDK and enable provider-specific
|
|
483
|
+
results that can be fully encapsulated in the provider.
|
|
484
|
+
*/
|
|
485
|
+
providerMetadata?: SharedV2ProviderMetadata;
|
|
486
|
+
/**
|
|
487
|
+
Optional request information for telemetry and debugging purposes.
|
|
488
|
+
*/
|
|
489
|
+
request?: {
|
|
490
|
+
/**
|
|
491
|
+
Request HTTP body that was sent to the provider API.
|
|
492
|
+
*/
|
|
493
|
+
body?: unknown;
|
|
494
|
+
};
|
|
495
|
+
/**
|
|
496
|
+
Optional response information for telemetry and debugging purposes.
|
|
497
|
+
*/
|
|
498
|
+
response?: LanguageModelV2ResponseMetadata & {
|
|
499
|
+
/**
|
|
500
|
+
Response headers.
|
|
501
|
+
*/
|
|
502
|
+
headers?: SharedV2Headers;
|
|
503
|
+
/**
|
|
504
|
+
Response HTTP body.
|
|
505
|
+
*/
|
|
506
|
+
body?: unknown;
|
|
507
|
+
};
|
|
508
|
+
/**
|
|
509
|
+
Warnings for the call, e.g. unsupported settings.
|
|
510
|
+
*/
|
|
511
|
+
warnings: Array<LanguageModelV2CallWarning>;
|
|
512
|
+
}>;
|
|
513
|
+
/**
|
|
514
|
+
Generates a language model output (streaming).
|
|
515
|
+
|
|
516
|
+
Naming: "do" prefix to prevent accidental direct usage of the method
|
|
517
|
+
by the user.
|
|
518
|
+
*
|
|
519
|
+
@return A stream of higher-level language model output parts.
|
|
520
|
+
*/
|
|
521
|
+
doStream(options: LanguageModelV2CallOptions): PromiseLike<{
|
|
522
|
+
stream: ReadableStream<LanguageModelV2StreamPart>;
|
|
523
|
+
/**
|
|
524
|
+
Optional request information for telemetry and debugging purposes.
|
|
525
|
+
*/
|
|
526
|
+
request?: {
|
|
527
|
+
/**
|
|
528
|
+
Request HTTP body that was sent to the provider API.
|
|
529
|
+
*/
|
|
530
|
+
body?: unknown;
|
|
531
|
+
};
|
|
532
|
+
/**
|
|
533
|
+
Optional response data.
|
|
534
|
+
*/
|
|
535
|
+
response?: {
|
|
536
|
+
/**
|
|
537
|
+
Response headers.
|
|
538
|
+
*/
|
|
539
|
+
headers?: SharedV2Headers;
|
|
540
|
+
};
|
|
541
|
+
}>;
|
|
542
|
+
};
|
|
543
|
+
|
|
544
|
+
declare type LanguageModelV2CallOptions = {
|
|
545
|
+
/**
|
|
546
|
+
A language mode prompt is a standardized prompt type.
|
|
547
|
+
|
|
548
|
+
Note: This is **not** the user-facing prompt. The AI SDK methods will map the
|
|
549
|
+
user-facing prompt types such as chat or instruction prompts to this format.
|
|
550
|
+
That approach allows us to evolve the user facing prompts without breaking
|
|
551
|
+
the language model interface.
|
|
552
|
+
*/
|
|
553
|
+
prompt: LanguageModelV2Prompt;
|
|
554
|
+
/**
|
|
555
|
+
Maximum number of tokens to generate.
|
|
556
|
+
*/
|
|
557
|
+
maxOutputTokens?: number;
|
|
558
|
+
/**
|
|
559
|
+
Temperature setting. The range depends on the provider and model.
|
|
560
|
+
*/
|
|
561
|
+
temperature?: number;
|
|
562
|
+
/**
|
|
563
|
+
Stop sequences.
|
|
564
|
+
If set, the model will stop generating text when one of the stop sequences is generated.
|
|
565
|
+
Providers may have limits on the number of stop sequences.
|
|
566
|
+
*/
|
|
567
|
+
stopSequences?: string[];
|
|
568
|
+
/**
|
|
569
|
+
Nucleus sampling.
|
|
570
|
+
*/
|
|
571
|
+
topP?: number;
|
|
572
|
+
/**
|
|
573
|
+
Only sample from the top K options for each subsequent token.
|
|
574
|
+
|
|
575
|
+
Used to remove "long tail" low probability responses.
|
|
576
|
+
Recommended for advanced use cases only. You usually only need to use temperature.
|
|
577
|
+
*/
|
|
578
|
+
topK?: number;
|
|
579
|
+
/**
|
|
580
|
+
Presence penalty setting. It affects the likelihood of the model to
|
|
581
|
+
repeat information that is already in the prompt.
|
|
582
|
+
*/
|
|
583
|
+
presencePenalty?: number;
|
|
584
|
+
/**
|
|
585
|
+
Frequency penalty setting. It affects the likelihood of the model
|
|
586
|
+
to repeatedly use the same words or phrases.
|
|
587
|
+
*/
|
|
588
|
+
frequencyPenalty?: number;
|
|
589
|
+
/**
|
|
590
|
+
Response format. The output can either be text or JSON. Default is text.
|
|
591
|
+
|
|
592
|
+
If JSON is selected, a schema can optionally be provided to guide the LLM.
|
|
593
|
+
*/
|
|
594
|
+
responseFormat?: {
|
|
595
|
+
type: 'text';
|
|
596
|
+
} | {
|
|
597
|
+
type: 'json';
|
|
598
|
+
/**
|
|
599
|
+
* JSON schema that the generated output should conform to.
|
|
600
|
+
*/
|
|
601
|
+
schema?: JSONSchema7;
|
|
602
|
+
/**
|
|
603
|
+
* Name of output that should be generated. Used by some providers for additional LLM guidance.
|
|
604
|
+
*/
|
|
605
|
+
name?: string;
|
|
606
|
+
/**
|
|
607
|
+
* Description of the output that should be generated. Used by some providers for additional LLM guidance.
|
|
608
|
+
*/
|
|
609
|
+
description?: string;
|
|
610
|
+
};
|
|
611
|
+
/**
|
|
612
|
+
The seed (integer) to use for random sampling. If set and supported
|
|
613
|
+
by the model, calls will generate deterministic results.
|
|
614
|
+
*/
|
|
615
|
+
seed?: number;
|
|
616
|
+
/**
|
|
617
|
+
The tools that are available for the model.
|
|
618
|
+
*/
|
|
619
|
+
tools?: Array<LanguageModelV2FunctionTool | LanguageModelV2ProviderDefinedTool>;
|
|
620
|
+
/**
|
|
621
|
+
Specifies how the tool should be selected. Defaults to 'auto'.
|
|
622
|
+
*/
|
|
623
|
+
toolChoice?: LanguageModelV2ToolChoice;
|
|
624
|
+
/**
|
|
625
|
+
Include raw chunks in the stream. Only applicable for streaming calls.
|
|
626
|
+
*/
|
|
627
|
+
includeRawChunks?: boolean;
|
|
628
|
+
/**
|
|
629
|
+
Abort signal for cancelling the operation.
|
|
630
|
+
*/
|
|
631
|
+
abortSignal?: AbortSignal;
|
|
632
|
+
/**
|
|
633
|
+
Additional HTTP headers to be sent with the request.
|
|
634
|
+
Only applicable for HTTP-based providers.
|
|
635
|
+
*/
|
|
636
|
+
headers?: Record<string, string | undefined>;
|
|
637
|
+
/**
|
|
638
|
+
* Additional provider-specific options. They are passed through
|
|
639
|
+
* to the provider from the AI SDK and enable provider-specific
|
|
640
|
+
* functionality that can be fully encapsulated in the provider.
|
|
641
|
+
*/
|
|
642
|
+
providerOptions?: SharedV2ProviderOptions;
|
|
643
|
+
};
|
|
644
|
+
|
|
645
|
+
/**
|
|
646
|
+
Warning from the model provider for this call. The call will proceed, but e.g.
|
|
647
|
+
some settings might not be supported, which can lead to suboptimal results.
|
|
648
|
+
*/
|
|
649
|
+
declare type LanguageModelV2CallWarning = {
|
|
650
|
+
type: 'unsupported-setting';
|
|
651
|
+
setting: Omit<keyof LanguageModelV2CallOptions, 'prompt'>;
|
|
652
|
+
details?: string;
|
|
653
|
+
} | {
|
|
654
|
+
type: 'unsupported-tool';
|
|
655
|
+
tool: LanguageModelV2FunctionTool | LanguageModelV2ProviderDefinedTool;
|
|
656
|
+
details?: string;
|
|
657
|
+
} | {
|
|
658
|
+
type: 'other';
|
|
659
|
+
message: string;
|
|
660
|
+
};
|
|
661
|
+
|
|
662
|
+
declare type LanguageModelV2Content = LanguageModelV2Text | LanguageModelV2Reasoning | LanguageModelV2File | LanguageModelV2Source | LanguageModelV2ToolCall | LanguageModelV2ToolResult;
|
|
663
|
+
|
|
664
|
+
/**
|
|
665
|
+
Data content. Can be a Uint8Array, base64 encoded data as a string or a URL.
|
|
666
|
+
*/
|
|
667
|
+
declare type LanguageModelV2DataContent = Uint8Array | string | URL;
|
|
668
|
+
|
|
669
|
+
/**
|
|
670
|
+
A file that has been generated by the model.
|
|
671
|
+
Generated files as base64 encoded strings or binary data.
|
|
672
|
+
The files should be returned without any unnecessary conversion.
|
|
673
|
+
*/
|
|
674
|
+
declare type LanguageModelV2File = {
|
|
675
|
+
type: 'file';
|
|
676
|
+
/**
|
|
677
|
+
The IANA media type of the file, e.g. `image/png` or `audio/mp3`.
|
|
678
|
+
|
|
679
|
+
@see https://www.iana.org/assignments/media-types/media-types.xhtml
|
|
680
|
+
*/
|
|
681
|
+
mediaType: string;
|
|
682
|
+
/**
|
|
683
|
+
Generated file data as base64 encoded strings or binary data.
|
|
684
|
+
|
|
685
|
+
The file data should be returned without any unnecessary conversion.
|
|
686
|
+
If the API returns base64 encoded strings, the file data should be returned
|
|
687
|
+
as base64 encoded strings. If the API returns binary data, the file data should
|
|
688
|
+
be returned as binary data.
|
|
689
|
+
*/
|
|
690
|
+
data: string | Uint8Array;
|
|
691
|
+
};
|
|
692
|
+
|
|
693
|
+
/**
|
|
694
|
+
File content part of a prompt. It contains a file.
|
|
695
|
+
*/
|
|
696
|
+
declare interface LanguageModelV2FilePart {
|
|
697
|
+
type: 'file';
|
|
698
|
+
/**
|
|
699
|
+
* Optional filename of the file.
|
|
700
|
+
*/
|
|
701
|
+
filename?: string;
|
|
702
|
+
/**
|
|
703
|
+
File data. Can be a Uint8Array, base64 encoded data as a string or a URL.
|
|
704
|
+
*/
|
|
705
|
+
data: LanguageModelV2DataContent;
|
|
706
|
+
/**
|
|
707
|
+
IANA media type of the file.
|
|
708
|
+
|
|
709
|
+
Can support wildcards, e.g. `image/*` (in which case the provider needs to take appropriate action).
|
|
710
|
+
|
|
711
|
+
@see https://www.iana.org/assignments/media-types/media-types.xhtml
|
|
712
|
+
*/
|
|
713
|
+
mediaType: string;
|
|
714
|
+
/**
|
|
715
|
+
* Additional provider-specific options. They are passed through
|
|
716
|
+
* to the provider from the AI SDK and enable provider-specific
|
|
717
|
+
* functionality that can be fully encapsulated in the provider.
|
|
718
|
+
*/
|
|
719
|
+
providerOptions?: SharedV2ProviderOptions;
|
|
720
|
+
}
|
|
721
|
+
|
|
722
|
+
/**
|
|
723
|
+
Reason why a language model finished generating a response.
|
|
724
|
+
|
|
725
|
+
Can be one of the following:
|
|
726
|
+
- `stop`: model generated stop sequence
|
|
727
|
+
- `length`: model generated maximum number of tokens
|
|
728
|
+
- `content-filter`: content filter violation stopped the model
|
|
729
|
+
- `tool-calls`: model triggered tool calls
|
|
730
|
+
- `error`: model stopped because of an error
|
|
731
|
+
- `other`: model stopped for other reasons
|
|
732
|
+
- `unknown`: the model has not transmitted a finish reason
|
|
733
|
+
*/
|
|
734
|
+
declare type LanguageModelV2FinishReason = 'stop' | 'length' | 'content-filter' | 'tool-calls' | 'error' | 'other' | 'unknown';
|
|
735
|
+
|
|
736
|
+
/**
|
|
737
|
+
A tool has a name, a description, and a set of parameters.
|
|
738
|
+
|
|
739
|
+
Note: this is **not** the user-facing tool definition. The AI SDK methods will
|
|
740
|
+
map the user-facing tool definitions to this format.
|
|
741
|
+
*/
|
|
742
|
+
declare type LanguageModelV2FunctionTool = {
|
|
743
|
+
/**
|
|
744
|
+
The type of the tool (always 'function').
|
|
745
|
+
*/
|
|
746
|
+
type: 'function';
|
|
747
|
+
/**
|
|
748
|
+
The name of the tool. Unique within this model call.
|
|
749
|
+
*/
|
|
750
|
+
name: string;
|
|
751
|
+
/**
|
|
752
|
+
A description of the tool. The language model uses this to understand the
|
|
753
|
+
tool's purpose and to provide better completion suggestions.
|
|
754
|
+
*/
|
|
755
|
+
description?: string;
|
|
756
|
+
/**
|
|
757
|
+
The parameters that the tool expects. The language model uses this to
|
|
758
|
+
understand the tool's input requirements and to provide matching suggestions.
|
|
759
|
+
*/
|
|
760
|
+
inputSchema: JSONSchema7;
|
|
761
|
+
/**
|
|
762
|
+
The provider-specific options for the tool.
|
|
763
|
+
*/
|
|
764
|
+
providerOptions?: SharedV2ProviderOptions;
|
|
765
|
+
};
|
|
766
|
+
|
|
767
|
+
declare type LanguageModelV2Message = ({
|
|
768
|
+
role: 'system';
|
|
769
|
+
content: string;
|
|
770
|
+
} | {
|
|
771
|
+
role: 'user';
|
|
772
|
+
content: Array<LanguageModelV2TextPart | LanguageModelV2FilePart>;
|
|
773
|
+
} | {
|
|
774
|
+
role: 'assistant';
|
|
775
|
+
content: Array<LanguageModelV2TextPart | LanguageModelV2FilePart | LanguageModelV2ReasoningPart | LanguageModelV2ToolCallPart | LanguageModelV2ToolResultPart>;
|
|
776
|
+
} | {
|
|
777
|
+
role: 'tool';
|
|
778
|
+
content: Array<LanguageModelV2ToolResultPart>;
|
|
779
|
+
}) & {
|
|
780
|
+
/**
|
|
781
|
+
* Additional provider-specific options. They are passed through
|
|
782
|
+
* to the provider from the AI SDK and enable provider-specific
|
|
783
|
+
* functionality that can be fully encapsulated in the provider.
|
|
784
|
+
*/
|
|
785
|
+
providerOptions?: SharedV2ProviderOptions;
|
|
786
|
+
};
|
|
787
|
+
|
|
788
|
+
/**
|
|
789
|
+
A prompt is a list of messages.
|
|
790
|
+
|
|
791
|
+
Note: Not all models and prompt formats support multi-modal inputs and
|
|
792
|
+
tool calls. The validation happens at runtime.
|
|
793
|
+
|
|
794
|
+
Note: This is not a user-facing prompt. The AI SDK methods will map the
|
|
795
|
+
user-facing prompt types such as chat or instruction prompts to this format.
|
|
796
|
+
*/
|
|
797
|
+
declare type LanguageModelV2Prompt = Array<LanguageModelV2Message>;
|
|
798
|
+
|
|
799
|
+
/**
|
|
800
|
+
The configuration of a tool that is defined by the provider.
|
|
801
|
+
*/
|
|
802
|
+
declare type LanguageModelV2ProviderDefinedTool = {
|
|
803
|
+
/**
|
|
804
|
+
The type of the tool (always 'provider-defined').
|
|
805
|
+
*/
|
|
806
|
+
type: 'provider-defined';
|
|
807
|
+
/**
|
|
808
|
+
The ID of the tool. Should follow the format `<provider-name>.<unique-tool-name>`.
|
|
809
|
+
*/
|
|
810
|
+
id: `${string}.${string}`;
|
|
811
|
+
/**
|
|
812
|
+
The name of the tool that the user must use in the tool set.
|
|
813
|
+
*/
|
|
814
|
+
name: string;
|
|
815
|
+
/**
|
|
816
|
+
The arguments for configuring the tool. Must match the expected arguments defined by the provider for this tool.
|
|
817
|
+
*/
|
|
818
|
+
args: Record<string, unknown>;
|
|
819
|
+
};
|
|
820
|
+
|
|
821
|
+
/**
|
|
822
|
+
Reasoning that the model has generated.
|
|
823
|
+
*/
|
|
824
|
+
declare type LanguageModelV2Reasoning = {
|
|
825
|
+
type: 'reasoning';
|
|
826
|
+
text: string;
|
|
827
|
+
/**
|
|
828
|
+
* Optional provider-specific metadata for the reasoning part.
|
|
829
|
+
*/
|
|
830
|
+
providerMetadata?: SharedV2ProviderMetadata;
|
|
831
|
+
};
|
|
832
|
+
|
|
833
|
+
/**
|
|
834
|
+
Reasoning content part of a prompt. It contains a string of reasoning text.
|
|
835
|
+
*/
|
|
836
|
+
declare interface LanguageModelV2ReasoningPart {
|
|
837
|
+
type: 'reasoning';
|
|
838
|
+
/**
|
|
839
|
+
The reasoning text.
|
|
840
|
+
*/
|
|
841
|
+
text: string;
|
|
842
|
+
/**
|
|
843
|
+
* Additional provider-specific options. They are passed through
|
|
844
|
+
* to the provider from the AI SDK and enable provider-specific
|
|
845
|
+
* functionality that can be fully encapsulated in the provider.
|
|
846
|
+
*/
|
|
847
|
+
providerOptions?: SharedV2ProviderOptions;
|
|
848
|
+
}
|
|
849
|
+
|
|
850
|
+
declare interface LanguageModelV2ResponseMetadata {
|
|
851
|
+
/**
|
|
852
|
+
ID for the generated response, if the provider sends one.
|
|
853
|
+
*/
|
|
854
|
+
id?: string;
|
|
855
|
+
/**
|
|
856
|
+
Timestamp for the start of the generated response, if the provider sends one.
|
|
857
|
+
*/
|
|
858
|
+
timestamp?: Date;
|
|
859
|
+
/**
|
|
860
|
+
The ID of the response model that was used to generate the response, if the provider sends one.
|
|
861
|
+
*/
|
|
862
|
+
modelId?: string;
|
|
863
|
+
}
|
|
864
|
+
|
|
865
|
+
/**
|
|
866
|
+
A source that has been used as input to generate the response.
|
|
867
|
+
*/
|
|
868
|
+
declare type LanguageModelV2Source = {
|
|
869
|
+
type: 'source';
|
|
870
|
+
/**
|
|
871
|
+
* The type of source - URL sources reference web content.
|
|
872
|
+
*/
|
|
873
|
+
sourceType: 'url';
|
|
874
|
+
/**
|
|
875
|
+
* The ID of the source.
|
|
876
|
+
*/
|
|
877
|
+
id: string;
|
|
878
|
+
/**
|
|
879
|
+
* The URL of the source.
|
|
880
|
+
*/
|
|
881
|
+
url: string;
|
|
882
|
+
/**
|
|
883
|
+
* The title of the source.
|
|
884
|
+
*/
|
|
885
|
+
title?: string;
|
|
886
|
+
/**
|
|
887
|
+
* Additional provider metadata for the source.
|
|
888
|
+
*/
|
|
889
|
+
providerMetadata?: SharedV2ProviderMetadata;
|
|
890
|
+
} | {
|
|
891
|
+
type: 'source';
|
|
892
|
+
/**
|
|
893
|
+
* The type of source - document sources reference files/documents.
|
|
894
|
+
*/
|
|
895
|
+
sourceType: 'document';
|
|
896
|
+
/**
|
|
897
|
+
* The ID of the source.
|
|
898
|
+
*/
|
|
899
|
+
id: string;
|
|
900
|
+
/**
|
|
901
|
+
* IANA media type of the document (e.g., 'application/pdf').
|
|
902
|
+
*/
|
|
903
|
+
mediaType: string;
|
|
904
|
+
/**
|
|
905
|
+
* The title of the document.
|
|
906
|
+
*/
|
|
907
|
+
title: string;
|
|
908
|
+
/**
|
|
909
|
+
* Optional filename of the document.
|
|
910
|
+
*/
|
|
911
|
+
filename?: string;
|
|
912
|
+
/**
|
|
913
|
+
* Additional provider metadata for the source.
|
|
914
|
+
*/
|
|
915
|
+
providerMetadata?: SharedV2ProviderMetadata;
|
|
916
|
+
};
|
|
917
|
+
|
|
918
|
+
declare type LanguageModelV2StreamPart = {
|
|
919
|
+
type: 'text-start';
|
|
920
|
+
providerMetadata?: SharedV2ProviderMetadata;
|
|
921
|
+
id: string;
|
|
922
|
+
} | {
|
|
923
|
+
type: 'text-delta';
|
|
924
|
+
id: string;
|
|
925
|
+
providerMetadata?: SharedV2ProviderMetadata;
|
|
926
|
+
delta: string;
|
|
927
|
+
} | {
|
|
928
|
+
type: 'text-end';
|
|
929
|
+
providerMetadata?: SharedV2ProviderMetadata;
|
|
930
|
+
id: string;
|
|
931
|
+
} | {
|
|
932
|
+
type: 'reasoning-start';
|
|
933
|
+
providerMetadata?: SharedV2ProviderMetadata;
|
|
934
|
+
id: string;
|
|
935
|
+
} | {
|
|
936
|
+
type: 'reasoning-delta';
|
|
937
|
+
id: string;
|
|
938
|
+
providerMetadata?: SharedV2ProviderMetadata;
|
|
939
|
+
delta: string;
|
|
940
|
+
} | {
|
|
941
|
+
type: 'reasoning-end';
|
|
942
|
+
id: string;
|
|
943
|
+
providerMetadata?: SharedV2ProviderMetadata;
|
|
944
|
+
} | {
|
|
945
|
+
type: 'tool-input-start';
|
|
946
|
+
id: string;
|
|
947
|
+
toolName: string;
|
|
948
|
+
providerMetadata?: SharedV2ProviderMetadata;
|
|
949
|
+
providerExecuted?: boolean;
|
|
950
|
+
} | {
|
|
951
|
+
type: 'tool-input-delta';
|
|
952
|
+
id: string;
|
|
953
|
+
delta: string;
|
|
954
|
+
providerMetadata?: SharedV2ProviderMetadata;
|
|
955
|
+
} | {
|
|
956
|
+
type: 'tool-input-end';
|
|
957
|
+
id: string;
|
|
958
|
+
providerMetadata?: SharedV2ProviderMetadata;
|
|
959
|
+
} | LanguageModelV2ToolCall | LanguageModelV2ToolResult | LanguageModelV2File | LanguageModelV2Source | {
|
|
960
|
+
type: 'stream-start';
|
|
961
|
+
warnings: Array<LanguageModelV2CallWarning>;
|
|
962
|
+
} | ({
|
|
963
|
+
type: 'response-metadata';
|
|
964
|
+
} & LanguageModelV2ResponseMetadata) | {
|
|
965
|
+
type: 'finish';
|
|
966
|
+
usage: LanguageModelV2Usage;
|
|
967
|
+
finishReason: LanguageModelV2FinishReason;
|
|
968
|
+
providerMetadata?: SharedV2ProviderMetadata;
|
|
969
|
+
} | {
|
|
970
|
+
type: 'raw';
|
|
971
|
+
rawValue: unknown;
|
|
972
|
+
} | {
|
|
973
|
+
type: 'error';
|
|
974
|
+
error: unknown;
|
|
975
|
+
};
|
|
976
|
+
|
|
977
|
+
/**
|
|
978
|
+
Text that the model has generated.
|
|
979
|
+
*/
|
|
980
|
+
declare type LanguageModelV2Text = {
|
|
981
|
+
type: 'text';
|
|
982
|
+
/**
|
|
983
|
+
The text content.
|
|
984
|
+
*/
|
|
985
|
+
text: string;
|
|
986
|
+
providerMetadata?: SharedV2ProviderMetadata;
|
|
987
|
+
};
|
|
988
|
+
|
|
989
|
+
/**
|
|
990
|
+
Text content part of a prompt. It contains a string of text.
|
|
991
|
+
*/
|
|
992
|
+
declare interface LanguageModelV2TextPart {
|
|
993
|
+
type: 'text';
|
|
994
|
+
/**
|
|
995
|
+
The text content.
|
|
996
|
+
*/
|
|
997
|
+
text: string;
|
|
998
|
+
/**
|
|
999
|
+
* Additional provider-specific options. They are passed through
|
|
1000
|
+
* to the provider from the AI SDK and enable provider-specific
|
|
1001
|
+
* functionality that can be fully encapsulated in the provider.
|
|
1002
|
+
*/
|
|
1003
|
+
providerOptions?: SharedV2ProviderOptions;
|
|
1004
|
+
}
|
|
1005
|
+
|
|
1006
|
+
/**
|
|
1007
|
+
Tool calls that the model has generated.
|
|
1008
|
+
*/
|
|
1009
|
+
declare type LanguageModelV2ToolCall = {
|
|
1010
|
+
type: 'tool-call';
|
|
1011
|
+
toolCallId: string;
|
|
1012
|
+
toolName: string;
|
|
1013
|
+
/**
|
|
1014
|
+
Stringified JSON object with the tool call arguments. Must match the
|
|
1015
|
+
parameters schema of the tool.
|
|
1016
|
+
*/
|
|
1017
|
+
input: string;
|
|
1018
|
+
/**
|
|
1019
|
+
* Whether the tool call will be executed by the provider.
|
|
1020
|
+
* If this flag is not set or is false, the tool call will be executed by the client.
|
|
1021
|
+
*/
|
|
1022
|
+
providerExecuted?: boolean;
|
|
1023
|
+
/**
|
|
1024
|
+
* Additional provider-specific metadata for the tool call.
|
|
1025
|
+
*/
|
|
1026
|
+
providerMetadata?: SharedV2ProviderMetadata;
|
|
1027
|
+
};
|
|
1028
|
+
|
|
1029
|
+
/**
|
|
1030
|
+
Tool call content part of a prompt. It contains a tool call (usually generated by the AI model).
|
|
1031
|
+
*/
|
|
1032
|
+
declare interface LanguageModelV2ToolCallPart {
|
|
1033
|
+
type: 'tool-call';
|
|
1034
|
+
/**
|
|
1035
|
+
ID of the tool call. This ID is used to match the tool call with the tool result.
|
|
1036
|
+
*/
|
|
1037
|
+
toolCallId: string;
|
|
1038
|
+
/**
|
|
1039
|
+
Name of the tool that is being called.
|
|
1040
|
+
*/
|
|
1041
|
+
toolName: string;
|
|
1042
|
+
/**
|
|
1043
|
+
Arguments of the tool call. This is a JSON-serializable object that matches the tool's input schema.
|
|
1044
|
+
*/
|
|
1045
|
+
input: unknown;
|
|
1046
|
+
/**
|
|
1047
|
+
* Whether the tool call will be executed by the provider.
|
|
1048
|
+
* If this flag is not set or is false, the tool call will be executed by the client.
|
|
1049
|
+
*/
|
|
1050
|
+
providerExecuted?: boolean;
|
|
1051
|
+
/**
|
|
1052
|
+
* Additional provider-specific options. They are passed through
|
|
1053
|
+
* to the provider from the AI SDK and enable provider-specific
|
|
1054
|
+
* functionality that can be fully encapsulated in the provider.
|
|
1055
|
+
*/
|
|
1056
|
+
providerOptions?: SharedV2ProviderOptions;
|
|
1057
|
+
}
|
|
1058
|
+
|
|
1059
|
+
declare type LanguageModelV2ToolChoice = {
|
|
1060
|
+
type: 'auto';
|
|
1061
|
+
} | {
|
|
1062
|
+
type: 'none';
|
|
1063
|
+
} | {
|
|
1064
|
+
type: 'required';
|
|
1065
|
+
} | {
|
|
1066
|
+
type: 'tool';
|
|
1067
|
+
toolName: string;
|
|
1068
|
+
};
|
|
1069
|
+
|
|
1070
|
+
/**
|
|
1071
|
+
Result of a tool call that has been executed by the provider.
|
|
1072
|
+
*/
|
|
1073
|
+
declare type LanguageModelV2ToolResult = {
|
|
1074
|
+
type: 'tool-result';
|
|
1075
|
+
/**
|
|
1076
|
+
* The ID of the tool call that this result is associated with.
|
|
1077
|
+
*/
|
|
1078
|
+
toolCallId: string;
|
|
1079
|
+
/**
|
|
1080
|
+
* Name of the tool that generated this result.
|
|
1081
|
+
*/
|
|
1082
|
+
toolName: string;
|
|
1083
|
+
/**
|
|
1084
|
+
* Result of the tool call. This is a JSON-serializable object.
|
|
1085
|
+
*/
|
|
1086
|
+
result: unknown;
|
|
1087
|
+
/**
|
|
1088
|
+
* Optional flag if the result is an error or an error message.
|
|
1089
|
+
*/
|
|
1090
|
+
isError?: boolean;
|
|
1091
|
+
/**
|
|
1092
|
+
* Whether the tool result was generated by the provider.
|
|
1093
|
+
* If this flag is set to true, the tool result was generated by the provider.
|
|
1094
|
+
* If this flag is not set or is false, the tool result was generated by the client.
|
|
1095
|
+
*/
|
|
1096
|
+
providerExecuted?: boolean;
|
|
1097
|
+
/**
|
|
1098
|
+
* Additional provider-specific metadata for the tool result.
|
|
1099
|
+
*/
|
|
1100
|
+
providerMetadata?: SharedV2ProviderMetadata;
|
|
1101
|
+
};
|
|
1102
|
+
|
|
1103
|
+
declare type LanguageModelV2ToolResultOutput = {
|
|
1104
|
+
type: 'text';
|
|
1105
|
+
value: string;
|
|
1106
|
+
} | {
|
|
1107
|
+
type: 'json';
|
|
1108
|
+
value: JSONValue;
|
|
1109
|
+
} | {
|
|
1110
|
+
type: 'error-text';
|
|
1111
|
+
value: string;
|
|
1112
|
+
} | {
|
|
1113
|
+
type: 'error-json';
|
|
1114
|
+
value: JSONValue;
|
|
1115
|
+
} | {
|
|
1116
|
+
type: 'content';
|
|
1117
|
+
value: Array<{
|
|
1118
|
+
type: 'text';
|
|
1119
|
+
/**
|
|
1120
|
+
Text content.
|
|
1121
|
+
*/
|
|
1122
|
+
text: string;
|
|
1123
|
+
} | {
|
|
1124
|
+
type: 'media';
|
|
1125
|
+
/**
|
|
1126
|
+
Base-64 encoded media data.
|
|
1127
|
+
*/
|
|
1128
|
+
data: string;
|
|
1129
|
+
/**
|
|
1130
|
+
IANA media type.
|
|
1131
|
+
@see https://www.iana.org/assignments/media-types/media-types.xhtml
|
|
1132
|
+
*/
|
|
1133
|
+
mediaType: string;
|
|
1134
|
+
}>;
|
|
1135
|
+
};
|
|
1136
|
+
|
|
1137
|
+
/**
|
|
1138
|
+
Tool result content part of a prompt. It contains the result of the tool call with the matching ID.
|
|
1139
|
+
*/
|
|
1140
|
+
declare interface LanguageModelV2ToolResultPart {
|
|
1141
|
+
type: 'tool-result';
|
|
1142
|
+
/**
|
|
1143
|
+
ID of the tool call that this result is associated with.
|
|
1144
|
+
*/
|
|
1145
|
+
toolCallId: string;
|
|
1146
|
+
/**
|
|
1147
|
+
Name of the tool that generated this result.
|
|
1148
|
+
*/
|
|
1149
|
+
toolName: string;
|
|
1150
|
+
/**
|
|
1151
|
+
Result of the tool call.
|
|
1152
|
+
*/
|
|
1153
|
+
output: LanguageModelV2ToolResultOutput;
|
|
1154
|
+
/**
|
|
1155
|
+
* Additional provider-specific options. They are passed through
|
|
1156
|
+
* to the provider from the AI SDK and enable provider-specific
|
|
1157
|
+
* functionality that can be fully encapsulated in the provider.
|
|
1158
|
+
*/
|
|
1159
|
+
providerOptions?: SharedV2ProviderOptions;
|
|
1160
|
+
}
|
|
1161
|
+
|
|
1162
|
+
/**
|
|
1163
|
+
Usage information for a language model call.
|
|
1164
|
+
|
|
1165
|
+
If your API return additional usage information, you can add it to the
|
|
1166
|
+
provider metadata under your provider's key.
|
|
1167
|
+
*/
|
|
1168
|
+
declare type LanguageModelV2Usage = {
|
|
1169
|
+
/**
|
|
1170
|
+
The number of input (prompt) tokens used.
|
|
1171
|
+
*/
|
|
1172
|
+
inputTokens: number | undefined;
|
|
1173
|
+
/**
|
|
1174
|
+
The number of output (completion) tokens used.
|
|
1175
|
+
*/
|
|
1176
|
+
outputTokens: number | undefined;
|
|
1177
|
+
/**
|
|
1178
|
+
The total number of tokens as reported by the provider.
|
|
1179
|
+
This number might be different from the sum of `inputTokens` and `outputTokens`
|
|
1180
|
+
and e.g. include reasoning tokens or other overhead.
|
|
1181
|
+
*/
|
|
1182
|
+
totalTokens: number | undefined;
|
|
1183
|
+
/**
|
|
1184
|
+
The number of reasoning tokens used.
|
|
1185
|
+
*/
|
|
1186
|
+
reasoningTokens?: number | undefined;
|
|
1187
|
+
/**
|
|
1188
|
+
The number of cached input tokens.
|
|
1189
|
+
*/
|
|
1190
|
+
cachedInputTokens?: number | undefined;
|
|
1191
|
+
};
|
|
1192
|
+
|
|
1193
|
+
export declare class MockEmbeddingModelV2<VALUE> implements EmbeddingModelV2<VALUE> {
|
|
1194
|
+
readonly specificationVersion = "v2";
|
|
1195
|
+
readonly provider: EmbeddingModelV2<VALUE>['provider'];
|
|
1196
|
+
readonly modelId: EmbeddingModelV2<VALUE>['modelId'];
|
|
1197
|
+
readonly maxEmbeddingsPerCall: EmbeddingModelV2<VALUE>['maxEmbeddingsPerCall'];
|
|
1198
|
+
readonly supportsParallelCalls: EmbeddingModelV2<VALUE>['supportsParallelCalls'];
|
|
1199
|
+
doEmbed: EmbeddingModelV2<VALUE>['doEmbed'];
|
|
1200
|
+
constructor({ provider, modelId, maxEmbeddingsPerCall, supportsParallelCalls, doEmbed, }?: {
|
|
1201
|
+
provider?: EmbeddingModelV2<VALUE>['provider'];
|
|
1202
|
+
modelId?: EmbeddingModelV2<VALUE>['modelId'];
|
|
1203
|
+
maxEmbeddingsPerCall?: EmbeddingModelV2<VALUE>['maxEmbeddingsPerCall'] | null;
|
|
1204
|
+
supportsParallelCalls?: EmbeddingModelV2<VALUE>['supportsParallelCalls'];
|
|
1205
|
+
doEmbed?: EmbeddingModelV2<VALUE>['doEmbed'];
|
|
1206
|
+
});
|
|
1207
|
+
}
|
|
1208
|
+
|
|
1209
|
+
export declare function mockId({ prefix, }?: {
|
|
1210
|
+
prefix?: string;
|
|
1211
|
+
}): () => string;
|
|
1212
|
+
|
|
1213
|
+
export declare class MockImageModelV2 implements ImageModelV2 {
|
|
1214
|
+
readonly specificationVersion = "v2";
|
|
1215
|
+
readonly provider: ImageModelV2['provider'];
|
|
1216
|
+
readonly modelId: ImageModelV2['modelId'];
|
|
1217
|
+
readonly maxImagesPerCall: ImageModelV2['maxImagesPerCall'];
|
|
1218
|
+
doGenerate: ImageModelV2['doGenerate'];
|
|
1219
|
+
constructor({ provider, modelId, maxImagesPerCall, doGenerate, }?: {
|
|
1220
|
+
provider?: ImageModelV2['provider'];
|
|
1221
|
+
modelId?: ImageModelV2['modelId'];
|
|
1222
|
+
maxImagesPerCall?: ImageModelV2['maxImagesPerCall'];
|
|
1223
|
+
doGenerate?: ImageModelV2['doGenerate'];
|
|
1224
|
+
});
|
|
1225
|
+
}
|
|
1226
|
+
|
|
1227
|
+
export declare class MockLanguageModelV2 implements LanguageModelV2 {
|
|
1228
|
+
readonly specificationVersion = "v2";
|
|
1229
|
+
private _supportedUrls;
|
|
1230
|
+
readonly provider: LanguageModelV2['provider'];
|
|
1231
|
+
readonly modelId: LanguageModelV2['modelId'];
|
|
1232
|
+
doGenerate: LanguageModelV2['doGenerate'];
|
|
1233
|
+
doStream: LanguageModelV2['doStream'];
|
|
1234
|
+
doGenerateCalls: Parameters<LanguageModelV2['doGenerate']>[0][];
|
|
1235
|
+
doStreamCalls: Parameters<LanguageModelV2['doStream']>[0][];
|
|
1236
|
+
constructor({ provider, modelId, supportedUrls, doGenerate, doStream, }?: {
|
|
1237
|
+
provider?: LanguageModelV2['provider'];
|
|
1238
|
+
modelId?: LanguageModelV2['modelId'];
|
|
1239
|
+
supportedUrls?: LanguageModelV2['supportedUrls'] | (() => LanguageModelV2['supportedUrls']);
|
|
1240
|
+
doGenerate?: LanguageModelV2['doGenerate'] | Awaited<ReturnType<LanguageModelV2['doGenerate']>> | Awaited<ReturnType<LanguageModelV2['doGenerate']>>[];
|
|
1241
|
+
doStream?: LanguageModelV2['doStream'] | Awaited<ReturnType<LanguageModelV2['doStream']>> | Awaited<ReturnType<LanguageModelV2['doStream']>>[];
|
|
1242
|
+
});
|
|
1243
|
+
get supportedUrls(): Record<string, RegExp[]> | PromiseLike<Record<string, RegExp[]>>;
|
|
1244
|
+
}
|
|
1245
|
+
|
|
1246
|
+
export declare class MockProviderV2 implements ProviderV2 {
|
|
1247
|
+
languageModel: ProviderV2['languageModel'];
|
|
1248
|
+
textEmbeddingModel: ProviderV2['textEmbeddingModel'];
|
|
1249
|
+
imageModel: ProviderV2['imageModel'];
|
|
1250
|
+
transcriptionModel: ProviderV2['transcriptionModel'];
|
|
1251
|
+
speechModel: ProviderV2['speechModel'];
|
|
1252
|
+
constructor({ languageModels, embeddingModels, imageModels, transcriptionModels, speechModels, }?: {
|
|
1253
|
+
languageModels?: Record<string, LanguageModelV2>;
|
|
1254
|
+
embeddingModels?: Record<string, EmbeddingModelV2<string>>;
|
|
1255
|
+
imageModels?: Record<string, ImageModelV2>;
|
|
1256
|
+
transcriptionModels?: Record<string, TranscriptionModelV2>;
|
|
1257
|
+
speechModels?: Record<string, SpeechModelV2>;
|
|
1258
|
+
});
|
|
1259
|
+
}
|
|
1260
|
+
|
|
1261
|
+
export declare class MockSpeechModelV2 implements SpeechModelV2 {
|
|
1262
|
+
readonly specificationVersion = "v2";
|
|
1263
|
+
readonly provider: SpeechModelV2['provider'];
|
|
1264
|
+
readonly modelId: SpeechModelV2['modelId'];
|
|
1265
|
+
doGenerate: SpeechModelV2['doGenerate'];
|
|
1266
|
+
constructor({ provider, modelId, doGenerate, }?: {
|
|
1267
|
+
provider?: SpeechModelV2['provider'];
|
|
1268
|
+
modelId?: SpeechModelV2['modelId'];
|
|
1269
|
+
doGenerate?: SpeechModelV2['doGenerate'];
|
|
1270
|
+
});
|
|
1271
|
+
}
|
|
1272
|
+
|
|
1273
|
+
export declare class MockTranscriptionModelV2 implements TranscriptionModelV2 {
|
|
1274
|
+
readonly specificationVersion = "v2";
|
|
1275
|
+
readonly provider: TranscriptionModelV2['provider'];
|
|
1276
|
+
readonly modelId: TranscriptionModelV2['modelId'];
|
|
1277
|
+
doGenerate: TranscriptionModelV2['doGenerate'];
|
|
1278
|
+
constructor({ provider, modelId, doGenerate, }?: {
|
|
1279
|
+
provider?: TranscriptionModelV2['provider'];
|
|
1280
|
+
modelId?: TranscriptionModelV2['modelId'];
|
|
1281
|
+
doGenerate?: TranscriptionModelV2['doGenerate'];
|
|
1282
|
+
});
|
|
1283
|
+
}
|
|
1284
|
+
|
|
1285
|
+
export declare function mockValues<T>(...values: T[]): () => T;
|
|
1286
|
+
|
|
1287
|
+
/**
|
|
1288
|
+
* Provider for language, text embedding, and image generation models.
|
|
1289
|
+
*/
|
|
1290
|
+
declare interface ProviderV2 {
|
|
1291
|
+
/**
|
|
1292
|
+
Returns the language model with the given id.
|
|
1293
|
+
The model id is then passed to the provider function to get the model.
|
|
1294
|
+
|
|
1295
|
+
@param {string} modelId - The id of the model to return.
|
|
1296
|
+
|
|
1297
|
+
@returns {LanguageModel} The language model associated with the id
|
|
1298
|
+
|
|
1299
|
+
@throws {NoSuchModelError} If no such model exists.
|
|
1300
|
+
*/
|
|
1301
|
+
languageModel(modelId: string): LanguageModelV2;
|
|
1302
|
+
/**
|
|
1303
|
+
Returns the text embedding model with the given id.
|
|
1304
|
+
The model id is then passed to the provider function to get the model.
|
|
1305
|
+
|
|
1306
|
+
@param {string} modelId - The id of the model to return.
|
|
1307
|
+
|
|
1308
|
+
@returns {LanguageModel} The language model associated with the id
|
|
1309
|
+
|
|
1310
|
+
@throws {NoSuchModelError} If no such model exists.
|
|
1311
|
+
*/
|
|
1312
|
+
textEmbeddingModel(modelId: string): EmbeddingModelV2<string>;
|
|
1313
|
+
/**
|
|
1314
|
+
Returns the image model with the given id.
|
|
1315
|
+
The model id is then passed to the provider function to get the model.
|
|
1316
|
+
|
|
1317
|
+
@param {string} modelId - The id of the model to return.
|
|
1318
|
+
|
|
1319
|
+
@returns {ImageModel} The image model associated with the id
|
|
1320
|
+
*/
|
|
1321
|
+
imageModel(modelId: string): ImageModelV2;
|
|
1322
|
+
/**
|
|
1323
|
+
Returns the transcription model with the given id.
|
|
1324
|
+
The model id is then passed to the provider function to get the model.
|
|
1325
|
+
|
|
1326
|
+
@param {string} modelId - The id of the model to return.
|
|
1327
|
+
|
|
1328
|
+
@returns {TranscriptionModel} The transcription model associated with the id
|
|
1329
|
+
*/
|
|
1330
|
+
transcriptionModel?(modelId: string): TranscriptionModelV2;
|
|
1331
|
+
/**
|
|
1332
|
+
Returns the speech model with the given id.
|
|
1333
|
+
The model id is then passed to the provider function to get the model.
|
|
1334
|
+
|
|
1335
|
+
@param {string} modelId - The id of the model to return.
|
|
1336
|
+
|
|
1337
|
+
@returns {SpeechModel} The speech model associated with the id
|
|
1338
|
+
*/
|
|
1339
|
+
speechModel?(modelId: string): SpeechModelV2;
|
|
1340
|
+
}
|
|
1341
|
+
|
|
1342
|
+
declare type SharedV2Headers = Record<string, string>;
|
|
1343
|
+
|
|
1344
|
+
/**
|
|
1345
|
+
* Additional provider-specific metadata.
|
|
1346
|
+
* Metadata are additional outputs from the provider.
|
|
1347
|
+
* They are passed through to the provider from the AI SDK
|
|
1348
|
+
* and enable provider-specific functionality
|
|
1349
|
+
* that can be fully encapsulated in the provider.
|
|
1350
|
+
*
|
|
1351
|
+
* This enables us to quickly ship provider-specific functionality
|
|
1352
|
+
* without affecting the core AI SDK.
|
|
1353
|
+
*
|
|
1354
|
+
* The outer record is keyed by the provider name, and the inner
|
|
1355
|
+
* record is keyed by the provider-specific metadata key.
|
|
1356
|
+
*
|
|
1357
|
+
* ```ts
|
|
1358
|
+
* {
|
|
1359
|
+
* "anthropic": {
|
|
1360
|
+
* "cacheControl": { "type": "ephemeral" }
|
|
1361
|
+
* }
|
|
1362
|
+
* }
|
|
1363
|
+
* ```
|
|
1364
|
+
*/
|
|
1365
|
+
declare type SharedV2ProviderMetadata = Record<string, Record<string, JSONValue>>;
|
|
1366
|
+
|
|
1367
|
+
/**
|
|
1368
|
+
* Additional provider-specific options.
|
|
1369
|
+
* Options are additional input to the provider.
|
|
1370
|
+
* They are passed through to the provider from the AI SDK
|
|
1371
|
+
* and enable provider-specific functionality
|
|
1372
|
+
* that can be fully encapsulated in the provider.
|
|
1373
|
+
*
|
|
1374
|
+
* This enables us to quickly ship provider-specific functionality
|
|
1375
|
+
* without affecting the core AI SDK.
|
|
1376
|
+
*
|
|
1377
|
+
* The outer record is keyed by the provider name, and the inner
|
|
1378
|
+
* record is keyed by the provider-specific metadata key.
|
|
1379
|
+
*
|
|
1380
|
+
* ```ts
|
|
1381
|
+
* {
|
|
1382
|
+
* "anthropic": {
|
|
1383
|
+
* "cacheControl": { "type": "ephemeral" }
|
|
1384
|
+
* }
|
|
1385
|
+
* }
|
|
1386
|
+
* ```
|
|
1387
|
+
*/
|
|
1388
|
+
declare type SharedV2ProviderOptions = Record<string, Record<string, JSONValue>>;
|
|
1389
|
+
|
|
1390
|
+
/**
|
|
1391
|
+
* Creates a ReadableStream that emits the provided values with an optional delay between each value.
|
|
1392
|
+
*
|
|
1393
|
+
* @param options - The configuration options
|
|
1394
|
+
* @param options.chunks - Array of values to be emitted by the stream
|
|
1395
|
+
* @param options.initialDelayInMs - Optional initial delay in milliseconds before emitting the first value (default: 0). Can be set to `null` to skip the initial delay. The difference between `initialDelayInMs: null` and `initialDelayInMs: 0` is that `initialDelayInMs: null` will emit the values without any delay, while `initialDelayInMs: 0` will emit the values with a delay of 0 milliseconds.
|
|
1396
|
+
* @param options.chunkDelayInMs - Optional delay in milliseconds between emitting each value (default: 0). Can be set to `null` to skip the delay. The difference between `chunkDelayInMs: null` and `chunkDelayInMs: 0` is that `chunkDelayInMs: null` will emit the values without any delay, while `chunkDelayInMs: 0` will emit the values with a delay of 0 milliseconds.
|
|
1397
|
+
* @returns A ReadableStream that emits the provided values
|
|
1398
|
+
*/
|
|
1399
|
+
declare function simulateReadableStream$1<T>({ chunks, initialDelayInMs, chunkDelayInMs, _internal, }: {
|
|
1400
|
+
chunks: T[];
|
|
1401
|
+
initialDelayInMs?: number | null;
|
|
1402
|
+
chunkDelayInMs?: number | null;
|
|
1403
|
+
_internal?: {
|
|
1404
|
+
delay?: (ms: number | null) => Promise<void>;
|
|
1405
|
+
};
|
|
1406
|
+
}): ReadableStream<T>;
|
|
1407
|
+
|
|
1408
|
+
/**
|
|
1409
|
+
* @deprecated Use `simulateReadableStream` from `ai` instead.
|
|
1410
|
+
*/
|
|
1411
|
+
export declare const simulateReadableStream: typeof simulateReadableStream$1;
|
|
1412
|
+
|
|
1413
|
+
/**
|
|
1414
|
+
* Speech model specification version 2.
|
|
1415
|
+
*/
|
|
1416
|
+
declare type SpeechModelV2 = {
|
|
1417
|
+
/**
|
|
1418
|
+
* The speech model must specify which speech model interface
|
|
1419
|
+
* version it implements. This will allow us to evolve the speech
|
|
1420
|
+
* model interface and retain backwards compatibility. The different
|
|
1421
|
+
* implementation versions can be handled as a discriminated union
|
|
1422
|
+
* on our side.
|
|
1423
|
+
*/
|
|
1424
|
+
readonly specificationVersion: 'v2';
|
|
1425
|
+
/**
|
|
1426
|
+
* Name of the provider for logging purposes.
|
|
1427
|
+
*/
|
|
1428
|
+
readonly provider: string;
|
|
1429
|
+
/**
|
|
1430
|
+
* Provider-specific model ID for logging purposes.
|
|
1431
|
+
*/
|
|
1432
|
+
readonly modelId: string;
|
|
1433
|
+
/**
|
|
1434
|
+
* Generates speech audio from text.
|
|
1435
|
+
*/
|
|
1436
|
+
doGenerate(options: SpeechModelV2CallOptions): PromiseLike<{
|
|
1437
|
+
/**
|
|
1438
|
+
* Generated audio as an ArrayBuffer.
|
|
1439
|
+
* The audio should be returned without any unnecessary conversion.
|
|
1440
|
+
* If the API returns base64 encoded strings, the audio should be returned
|
|
1441
|
+
* as base64 encoded strings. If the API returns binary data, the audio
|
|
1442
|
+
* should be returned as binary data.
|
|
1443
|
+
*/
|
|
1444
|
+
audio: string | Uint8Array;
|
|
1445
|
+
/**
|
|
1446
|
+
* Warnings for the call, e.g. unsupported settings.
|
|
1447
|
+
*/
|
|
1448
|
+
warnings: Array<SpeechModelV2CallWarning>;
|
|
1449
|
+
/**
|
|
1450
|
+
* Optional request information for telemetry and debugging purposes.
|
|
1451
|
+
*/
|
|
1452
|
+
request?: {
|
|
1453
|
+
/**
|
|
1454
|
+
* Response body (available only for providers that use HTTP requests).
|
|
1455
|
+
*/
|
|
1456
|
+
body?: unknown;
|
|
1457
|
+
};
|
|
1458
|
+
/**
|
|
1459
|
+
* Response information for telemetry and debugging purposes.
|
|
1460
|
+
*/
|
|
1461
|
+
response: {
|
|
1462
|
+
/**
|
|
1463
|
+
* Timestamp for the start of the generated response.
|
|
1464
|
+
*/
|
|
1465
|
+
timestamp: Date;
|
|
1466
|
+
/**
|
|
1467
|
+
* The ID of the response model that was used to generate the response.
|
|
1468
|
+
*/
|
|
1469
|
+
modelId: string;
|
|
1470
|
+
/**
|
|
1471
|
+
* Response headers.
|
|
1472
|
+
*/
|
|
1473
|
+
headers?: SharedV2Headers;
|
|
1474
|
+
/**
|
|
1475
|
+
* Response body.
|
|
1476
|
+
*/
|
|
1477
|
+
body?: unknown;
|
|
1478
|
+
};
|
|
1479
|
+
/**
|
|
1480
|
+
* Additional provider-specific metadata. They are passed through
|
|
1481
|
+
* from the provider to the AI SDK and enable provider-specific
|
|
1482
|
+
* results that can be fully encapsulated in the provider.
|
|
1483
|
+
*/
|
|
1484
|
+
providerMetadata?: Record<string, Record<string, JSONValue>>;
|
|
1485
|
+
}>;
|
|
1486
|
+
};
|
|
1487
|
+
|
|
1488
|
+
declare type SpeechModelV2CallOptions = {
|
|
1489
|
+
/**
|
|
1490
|
+
* Text to convert to speech.
|
|
1491
|
+
*/
|
|
1492
|
+
text: string;
|
|
1493
|
+
/**
|
|
1494
|
+
* The voice to use for speech synthesis.
|
|
1495
|
+
* This is provider-specific and may be a voice ID, name, or other identifier.
|
|
1496
|
+
*/
|
|
1497
|
+
voice?: string;
|
|
1498
|
+
/**
|
|
1499
|
+
* The desired output format for the audio e.g. "mp3", "wav", etc.
|
|
1500
|
+
*/
|
|
1501
|
+
outputFormat?: string;
|
|
1502
|
+
/**
|
|
1503
|
+
* Instructions for the speech generation e.g. "Speak in a slow and steady tone".
|
|
1504
|
+
*/
|
|
1505
|
+
instructions?: string;
|
|
1506
|
+
/**
|
|
1507
|
+
* The speed of the speech generation.
|
|
1508
|
+
*/
|
|
1509
|
+
speed?: number;
|
|
1510
|
+
/**
|
|
1511
|
+
* The language for speech generation. This should be an ISO 639-1 language code (e.g. "en", "es", "fr")
|
|
1512
|
+
* or "auto" for automatic language detection. Provider support varies.
|
|
1513
|
+
*/
|
|
1514
|
+
language?: string;
|
|
1515
|
+
/**
|
|
1516
|
+
* Additional provider-specific options that are passed through to the provider
|
|
1517
|
+
* as body parameters.
|
|
1518
|
+
*
|
|
1519
|
+
* The outer record is keyed by the provider name, and the inner
|
|
1520
|
+
* record is keyed by the provider-specific metadata key.
|
|
1521
|
+
* ```ts
|
|
1522
|
+
* {
|
|
1523
|
+
* "openai": {}
|
|
1524
|
+
* }
|
|
1525
|
+
* ```
|
|
1526
|
+
*/
|
|
1527
|
+
providerOptions?: SpeechModelV2ProviderOptions;
|
|
1528
|
+
/**
|
|
1529
|
+
* Abort signal for cancelling the operation.
|
|
1530
|
+
*/
|
|
1531
|
+
abortSignal?: AbortSignal;
|
|
1532
|
+
/**
|
|
1533
|
+
* Additional HTTP headers to be sent with the request.
|
|
1534
|
+
* Only applicable for HTTP-based providers.
|
|
1535
|
+
*/
|
|
1536
|
+
headers?: Record<string, string | undefined>;
|
|
1537
|
+
};
|
|
1538
|
+
|
|
1539
|
+
/**
|
|
1540
|
+
* Warning from the model provider for this call. The call will proceed, but e.g.
|
|
1541
|
+
* some settings might not be supported, which can lead to suboptimal results.
|
|
1542
|
+
*/
|
|
1543
|
+
declare type SpeechModelV2CallWarning = {
|
|
1544
|
+
type: 'unsupported-setting';
|
|
1545
|
+
setting: keyof SpeechModelV2CallOptions;
|
|
1546
|
+
details?: string;
|
|
1547
|
+
} | {
|
|
1548
|
+
type: 'other';
|
|
1549
|
+
message: string;
|
|
1550
|
+
};
|
|
1551
|
+
|
|
1552
|
+
declare type SpeechModelV2ProviderOptions = Record<string, Record<string, JSONValue>>;
|
|
1553
|
+
|
|
1554
|
+
/**
|
|
1555
|
+
Transcription model specification version 2.
|
|
1556
|
+
*/
|
|
1557
|
+
declare type TranscriptionModelV2 = {
|
|
1558
|
+
/**
|
|
1559
|
+
The transcription model must specify which transcription model interface
|
|
1560
|
+
version it implements. This will allow us to evolve the transcription
|
|
1561
|
+
model interface and retain backwards compatibility. The different
|
|
1562
|
+
implementation versions can be handled as a discriminated union
|
|
1563
|
+
on our side.
|
|
1564
|
+
*/
|
|
1565
|
+
readonly specificationVersion: 'v2';
|
|
1566
|
+
/**
|
|
1567
|
+
Name of the provider for logging purposes.
|
|
1568
|
+
*/
|
|
1569
|
+
readonly provider: string;
|
|
1570
|
+
/**
|
|
1571
|
+
Provider-specific model ID for logging purposes.
|
|
1572
|
+
*/
|
|
1573
|
+
readonly modelId: string;
|
|
1574
|
+
/**
|
|
1575
|
+
Generates a transcript.
|
|
1576
|
+
*/
|
|
1577
|
+
doGenerate(options: TranscriptionModelV2CallOptions): PromiseLike<{
|
|
1578
|
+
/**
|
|
1579
|
+
* The complete transcribed text from the audio.
|
|
1580
|
+
*/
|
|
1581
|
+
text: string;
|
|
1582
|
+
/**
|
|
1583
|
+
* Array of transcript segments with timing information.
|
|
1584
|
+
* Each segment represents a portion of the transcribed text with start and end times.
|
|
1585
|
+
*/
|
|
1586
|
+
segments: Array<{
|
|
1587
|
+
/**
|
|
1588
|
+
* The text content of this segment.
|
|
1589
|
+
*/
|
|
1590
|
+
text: string;
|
|
1591
|
+
/**
|
|
1592
|
+
* The start time of this segment in seconds.
|
|
1593
|
+
*/
|
|
1594
|
+
startSecond: number;
|
|
1595
|
+
/**
|
|
1596
|
+
* The end time of this segment in seconds.
|
|
1597
|
+
*/
|
|
1598
|
+
endSecond: number;
|
|
1599
|
+
}>;
|
|
1600
|
+
/**
|
|
1601
|
+
* The detected language of the audio content, as an ISO-639-1 code (e.g., 'en' for English).
|
|
1602
|
+
* May be undefined if the language couldn't be detected.
|
|
1603
|
+
*/
|
|
1604
|
+
language: string | undefined;
|
|
1605
|
+
/**
|
|
1606
|
+
* The total duration of the audio file in seconds.
|
|
1607
|
+
* May be undefined if the duration couldn't be determined.
|
|
1608
|
+
*/
|
|
1609
|
+
durationInSeconds: number | undefined;
|
|
1610
|
+
/**
|
|
1611
|
+
Warnings for the call, e.g. unsupported settings.
|
|
1612
|
+
*/
|
|
1613
|
+
warnings: Array<TranscriptionModelV2CallWarning>;
|
|
1614
|
+
/**
|
|
1615
|
+
Optional request information for telemetry and debugging purposes.
|
|
1616
|
+
*/
|
|
1617
|
+
request?: {
|
|
1618
|
+
/**
|
|
1619
|
+
Raw request HTTP body that was sent to the provider API as a string (JSON should be stringified).
|
|
1620
|
+
Non-HTTP(s) providers should not set this.
|
|
1621
|
+
*/
|
|
1622
|
+
body?: string;
|
|
1623
|
+
};
|
|
1624
|
+
/**
|
|
1625
|
+
Response information for telemetry and debugging purposes.
|
|
1626
|
+
*/
|
|
1627
|
+
response: {
|
|
1628
|
+
/**
|
|
1629
|
+
Timestamp for the start of the generated response.
|
|
1630
|
+
*/
|
|
1631
|
+
timestamp: Date;
|
|
1632
|
+
/**
|
|
1633
|
+
The ID of the response model that was used to generate the response.
|
|
1634
|
+
*/
|
|
1635
|
+
modelId: string;
|
|
1636
|
+
/**
|
|
1637
|
+
Response headers.
|
|
1638
|
+
*/
|
|
1639
|
+
headers?: SharedV2Headers;
|
|
1640
|
+
/**
|
|
1641
|
+
Response body.
|
|
1642
|
+
*/
|
|
1643
|
+
body?: unknown;
|
|
1644
|
+
};
|
|
1645
|
+
/**
|
|
1646
|
+
Additional provider-specific metadata. They are passed through
|
|
1647
|
+
from the provider to the AI SDK and enable provider-specific
|
|
1648
|
+
results that can be fully encapsulated in the provider.
|
|
1649
|
+
*/
|
|
1650
|
+
providerMetadata?: Record<string, Record<string, JSONValue>>;
|
|
1651
|
+
}>;
|
|
1652
|
+
};
|
|
1653
|
+
|
|
1654
|
+
declare type TranscriptionModelV2CallOptions = {
|
|
1655
|
+
/**
|
|
1656
|
+
Audio data to transcribe.
|
|
1657
|
+
Accepts a `Uint8Array` or `string`, where `string` is a base64 encoded audio file.
|
|
1658
|
+
*/
|
|
1659
|
+
audio: Uint8Array | string;
|
|
1660
|
+
/**
|
|
1661
|
+
The IANA media type of the audio data.
|
|
1662
|
+
|
|
1663
|
+
@see https://www.iana.org/assignments/media-types/media-types.xhtml
|
|
1664
|
+
*/
|
|
1665
|
+
mediaType: string;
|
|
1666
|
+
/**
|
|
1667
|
+
Additional provider-specific options that are passed through to the provider
|
|
1668
|
+
as body parameters.
|
|
1669
|
+
|
|
1670
|
+
The outer record is keyed by the provider name, and the inner
|
|
1671
|
+
record is keyed by the provider-specific metadata key.
|
|
1672
|
+
```ts
|
|
1673
|
+
{
|
|
1674
|
+
"openai": {
|
|
1675
|
+
"timestampGranularities": ["word"]
|
|
1676
|
+
}
|
|
1677
|
+
}
|
|
1678
|
+
```
|
|
1679
|
+
*/
|
|
1680
|
+
providerOptions?: TranscriptionModelV2ProviderOptions;
|
|
1681
|
+
/**
|
|
1682
|
+
Abort signal for cancelling the operation.
|
|
1683
|
+
*/
|
|
1684
|
+
abortSignal?: AbortSignal;
|
|
1685
|
+
/**
|
|
1686
|
+
Additional HTTP headers to be sent with the request.
|
|
1687
|
+
Only applicable for HTTP-based providers.
|
|
1688
|
+
*/
|
|
1689
|
+
headers?: Record<string, string | undefined>;
|
|
1690
|
+
};
|
|
1691
|
+
|
|
1692
|
+
/**
|
|
1693
|
+
Warning from the model provider for this call. The call will proceed, but e.g.
|
|
1694
|
+
some settings might not be supported, which can lead to suboptimal results.
|
|
1695
|
+
*/
|
|
1696
|
+
declare type TranscriptionModelV2CallWarning = {
|
|
1697
|
+
type: 'unsupported-setting';
|
|
1698
|
+
setting: keyof TranscriptionModelV2CallOptions;
|
|
1699
|
+
details?: string;
|
|
1700
|
+
} | {
|
|
1701
|
+
type: 'other';
|
|
1702
|
+
message: string;
|
|
1703
|
+
};
|
|
1704
|
+
|
|
1705
|
+
declare type TranscriptionModelV2ProviderOptions = Record<string, Record<string, JSONValue>>;
|
|
1706
|
+
|
|
1707
|
+
export { }
|
|
1708
|
+
export { EmbeddingModelV2 as EmbeddingModelV2, ImageModelV2 as ImageModelV2, LanguageModelV2 as LanguageModelV2, ProviderV2 as ProviderV2, TranscriptionModelV2 as TranscriptionModelV2, SpeechModelV2 as SpeechModelV2, simulateReadableStream$1 as simulateReadableStream$1 };
|