@mariozechner/pi-ai 0.70.5 → 0.71.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +6 -31
- package/dist/env-api-keys.d.ts.map +1 -1
- package/dist/env-api-keys.js +4 -0
- package/dist/env-api-keys.js.map +1 -1
- package/dist/index.d.ts +1 -1
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js.map +1 -1
- package/dist/models.d.ts +1 -1
- package/dist/models.d.ts.map +1 -1
- package/dist/models.generated.d.ts +1653 -596
- package/dist/models.generated.d.ts.map +1 -1
- package/dist/models.generated.js +1337 -422
- package/dist/models.generated.js.map +1 -1
- package/dist/models.js +3 -2
- package/dist/models.js.map +1 -1
- package/dist/providers/amazon-bedrock.d.ts.map +1 -1
- package/dist/providers/amazon-bedrock.js +14 -15
- package/dist/providers/amazon-bedrock.js.map +1 -1
- package/dist/providers/anthropic.d.ts.map +1 -1
- package/dist/providers/anthropic.js +38 -15
- package/dist/providers/anthropic.js.map +1 -1
- package/dist/providers/cloudflare.d.ts +13 -0
- package/dist/providers/cloudflare.d.ts.map +1 -0
- package/dist/providers/cloudflare.js +26 -0
- package/dist/providers/cloudflare.js.map +1 -0
- package/dist/providers/google-shared.d.ts +7 -2
- package/dist/providers/google-shared.d.ts.map +1 -1
- package/dist/providers/google-shared.js +4 -13
- package/dist/providers/google-shared.js.map +1 -1
- package/dist/providers/google-vertex.d.ts +1 -1
- package/dist/providers/google-vertex.d.ts.map +1 -1
- package/dist/providers/google-vertex.js.map +1 -1
- package/dist/providers/google.d.ts +1 -1
- package/dist/providers/google.d.ts.map +1 -1
- package/dist/providers/google.js.map +1 -1
- package/dist/providers/mistral.d.ts.map +1 -1
- package/dist/providers/mistral.js +1 -1
- package/dist/providers/mistral.js.map +1 -1
- package/dist/providers/openai-completions.d.ts.map +1 -1
- package/dist/providers/openai-completions.js +25 -8
- package/dist/providers/openai-completions.js.map +1 -1
- package/dist/providers/openai-responses.d.ts.map +1 -1
- package/dist/providers/openai-responses.js +10 -2
- package/dist/providers/openai-responses.js.map +1 -1
- package/dist/providers/register-builtins.d.ts +0 -3
- package/dist/providers/register-builtins.d.ts.map +1 -1
- package/dist/providers/register-builtins.js +0 -18
- package/dist/providers/register-builtins.js.map +1 -1
- package/dist/types.d.ts +3 -2
- package/dist/types.d.ts.map +1 -1
- package/dist/types.js.map +1 -1
- package/dist/utils/oauth/index.d.ts +0 -4
- package/dist/utils/oauth/index.d.ts.map +1 -1
- package/dist/utils/oauth/index.js +0 -10
- package/dist/utils/oauth/index.js.map +1 -1
- package/package.json +2 -6
- package/dist/providers/google-gemini-cli.d.ts +0 -74
- package/dist/providers/google-gemini-cli.d.ts.map +0 -1
- package/dist/providers/google-gemini-cli.js +0 -779
- package/dist/providers/google-gemini-cli.js.map +0 -1
- package/dist/utils/oauth/google-antigravity.d.ts +0 -26
- package/dist/utils/oauth/google-antigravity.d.ts.map +0 -1
- package/dist/utils/oauth/google-antigravity.js +0 -377
- package/dist/utils/oauth/google-antigravity.js.map +0 -1
- package/dist/utils/oauth/google-gemini-cli.d.ts +0 -26
- package/dist/utils/oauth/google-gemini-cli.d.ts.map +0 -1
- package/dist/utils/oauth/google-gemini-cli.js +0 -482
- package/dist/utils/oauth/google-gemini-cli.js.map +0 -1
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"register-builtins.d.ts","sourceRoot":"","sources":["../../src/providers/register-builtins.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAGX,qBAAqB,EACrB,OAAO,EACP,KAAK,EACL,mBAAmB,EACnB,cAAc,EAEd,MAAM,aAAa,CAAC;AAErB,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,qBAAqB,CAAC;AAC1D,OAAO,KAAK,EAAE,gBAAgB,EAAE,MAAM,gBAAgB,CAAC;AACvD,OAAO,KAAK,EAAE,2BAA2B,EAAE,MAAM,6BAA6B,CAAC;AAC/E,OAAO,KAAK,EAAE,aAAa,EAAE,MAAM,aAAa,CAAC;AACjD,OAAO,KAAK,EAAE,sBAAsB,EAAE,MAAM,wBAAwB,CAAC;AACrE,OAAO,KAAK,EAAE,mBAAmB,EAAE,MAAM,oBAAoB,CAAC;AAC9D,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,cAAc,CAAC;AACnD,OAAO,KAAK,EAAE,2BAA2B,EAAE,MAAM,6BAA6B,CAAC;AAC/E,OAAO,KAAK,EAAE,wBAAwB,EAAE,MAAM,yBAAyB,CAAC;AACxE,OAAO,KAAK,EAAE,sBAAsB,EAAE,MAAM,uBAAuB,CAAC;AA4DpE,UAAU,qBAAqB;IAC9B,aAAa,EAAE,CACd,KAAK,EAAE,KAAK,CAAC,yBAAyB,CAAC,EACvC,OAAO,EAAE,OAAO,EAChB,OAAO,CAAC,EAAE,cAAc,KACpB,aAAa,CAAC,qBAAqB,CAAC,CAAC;IAC1C,mBAAmB,EAAE,CACpB,KAAK,EAAE,KAAK,CAAC,yBAAyB,CAAC,EACvC,OAAO,EAAE,OAAO,EAChB,OAAO,CAAC,EAAE,mBAAmB,KACzB,aAAa,CAAC,qBAAqB,CAAC,CAAC;CAC1C;AAsCD,wBAAgB,wBAAwB,CAAC,MAAM,EAAE,qBAAqB,GAAG,IAAI,CAK5E;AAiND,eAAO,MAAM,eAAe,wDAAgD,CAAC;AAC7E,eAAO,MAAM,qBAAqB,2DAAsD,CAAC;AACzF,eAAO,MAAM,0BAA0B,uEAA2D,CAAC;AACnG,eAAO,MAAM,gCAAgC,+DAAiE,CAAC;AAC/G,eAAO,MAAM,YAAY,uDAA6C,CAAC;AACvE,eAAO,MAAM,kBAAkB,6DAAmD,CAAC;AACnF,eAAO,MAAM,qBAAqB,6DAAsD,CAAC;AACzF,eAAO,MAAM,2BAA2B,0DAA4D,CAAC;AACrG,eAAO,MAAM,kBAAkB,sDAAmD,CAAC;AACnF,eAAO,MAAM,wBAAwB,sDAAyD,CAAC;AAC/F,eAAO,MAAM,aAAa,yDAA8C,CAAC;AACzE,eAAO,MAAM,mBAAmB,8DAAoD,CAAC;AACrF,eAAO,MAAM,0BAA0B,uEAA2D,CAAC;AACnG,eAAO,MAAM,gCAAgC,+DAAiE,CAAC;AAC/G,eAAO,MAAM,uBAAuB,gEAAwD,CAAC;AAC7F,eAAO,MAAM,6BAA6B,2DAA8D,CAAC;AACzG,eAAO,MAAM,qBAAqB,4DAAsD,CAAC;AACzF,eAAO,MAAM,2BAA2B,yDAA4D,CAAC;AAIrG,wBAAgB,2BAA2B,IAAI,IAAI,CA4DlD;AAED,wBAAgB,iBAAiB,IAAI,IAAI,CAGxC","sourcesContent":["import { clearApiProviders, registerApiProvider } from \"../api-registry.js\";\nimport type {\n\tApi,\n\tAssistantMessage,\n\tAssistantMessageEvent,\n\tContext,\n\tModel,\n\tSimpleStreamOptions,\n\tStreamFunction,\n\tStreamOptions,\n} from \"../types.js\";\nimport { AssistantMessageEventStream } from \"../utils/event-stream.js\";\nimport type { BedrockOptions } from \"./amazon-bedrock.js\";\nimport type { AnthropicOptions } from \"./anthropic.js\";\nimport type { AzureOpenAIResponsesOptions } from \"./azure-openai-responses.js\";\nimport type { GoogleOptions } from \"./google.js\";\nimport type { GoogleGeminiCliOptions } from \"./google-gemini-cli.js\";\nimport type { GoogleVertexOptions } from \"./google-vertex.js\";\nimport type { MistralOptions } from \"./mistral.js\";\nimport type { OpenAICodexResponsesOptions } from \"./openai-codex-responses.js\";\nimport type { OpenAICompletionsOptions } from \"./openai-completions.js\";\nimport type { OpenAIResponsesOptions } from \"./openai-responses.js\";\n\ninterface LazyProviderModule<\n\tTApi extends Api,\n\tTOptions extends StreamOptions,\n\tTSimpleOptions extends SimpleStreamOptions,\n> {\n\tstream: (model: Model<TApi>, context: Context, options?: TOptions) => AsyncIterable<AssistantMessageEvent>;\n\tstreamSimple: (\n\t\tmodel: Model<TApi>,\n\t\tcontext: Context,\n\t\toptions?: TSimpleOptions,\n\t) => AsyncIterable<AssistantMessageEvent>;\n}\n\ninterface AnthropicProviderModule {\n\tstreamAnthropic: StreamFunction<\"anthropic-messages\", AnthropicOptions>;\n\tstreamSimpleAnthropic: StreamFunction<\"anthropic-messages\", SimpleStreamOptions>;\n}\n\ninterface AzureOpenAIResponsesProviderModule {\n\tstreamAzureOpenAIResponses: StreamFunction<\"azure-openai-responses\", AzureOpenAIResponsesOptions>;\n\tstreamSimpleAzureOpenAIResponses: StreamFunction<\"azure-openai-responses\", SimpleStreamOptions>;\n}\n\ninterface GoogleProviderModule {\n\tstreamGoogle: StreamFunction<\"google-generative-ai\", GoogleOptions>;\n\tstreamSimpleGoogle: StreamFunction<\"google-generative-ai\", SimpleStreamOptions>;\n}\n\ninterface GoogleGeminiCliProviderModule {\n\tstreamGoogleGeminiCli: StreamFunction<\"google-gemini-cli\", GoogleGeminiCliOptions>;\n\tstreamSimpleGoogleGeminiCli: StreamFunction<\"google-gemini-cli\", SimpleStreamOptions>;\n}\n\ninterface GoogleVertexProviderModule {\n\tstreamGoogleVertex: StreamFunction<\"google-vertex\", GoogleVertexOptions>;\n\tstreamSimpleGoogleVertex: StreamFunction<\"google-vertex\", SimpleStreamOptions>;\n}\n\ninterface MistralProviderModule {\n\tstreamMistral: StreamFunction<\"mistral-conversations\", MistralOptions>;\n\tstreamSimpleMistral: StreamFunction<\"mistral-conversations\", SimpleStreamOptions>;\n}\n\ninterface OpenAICodexResponsesProviderModule {\n\tstreamOpenAICodexResponses: StreamFunction<\"openai-codex-responses\", OpenAICodexResponsesOptions>;\n\tstreamSimpleOpenAICodexResponses: StreamFunction<\"openai-codex-responses\", SimpleStreamOptions>;\n}\n\ninterface OpenAICompletionsProviderModule {\n\tstreamOpenAICompletions: StreamFunction<\"openai-completions\", OpenAICompletionsOptions>;\n\tstreamSimpleOpenAICompletions: StreamFunction<\"openai-completions\", SimpleStreamOptions>;\n}\n\ninterface OpenAIResponsesProviderModule {\n\tstreamOpenAIResponses: StreamFunction<\"openai-responses\", OpenAIResponsesOptions>;\n\tstreamSimpleOpenAIResponses: StreamFunction<\"openai-responses\", SimpleStreamOptions>;\n}\n\ninterface BedrockProviderModule {\n\tstreamBedrock: (\n\t\tmodel: Model<\"bedrock-converse-stream\">,\n\t\tcontext: Context,\n\t\toptions?: BedrockOptions,\n\t) => AsyncIterable<AssistantMessageEvent>;\n\tstreamSimpleBedrock: (\n\t\tmodel: Model<\"bedrock-converse-stream\">,\n\t\tcontext: Context,\n\t\toptions?: SimpleStreamOptions,\n\t) => AsyncIterable<AssistantMessageEvent>;\n}\n\nconst importNodeOnlyProvider = (specifier: string): Promise<unknown> => import(specifier);\n\nlet anthropicProviderModulePromise:\n\t| Promise<LazyProviderModule<\"anthropic-messages\", AnthropicOptions, SimpleStreamOptions>>\n\t| undefined;\nlet azureOpenAIResponsesProviderModulePromise:\n\t| Promise<LazyProviderModule<\"azure-openai-responses\", AzureOpenAIResponsesOptions, SimpleStreamOptions>>\n\t| undefined;\nlet googleProviderModulePromise:\n\t| Promise<LazyProviderModule<\"google-generative-ai\", GoogleOptions, SimpleStreamOptions>>\n\t| undefined;\nlet googleGeminiCliProviderModulePromise:\n\t| Promise<LazyProviderModule<\"google-gemini-cli\", GoogleGeminiCliOptions, SimpleStreamOptions>>\n\t| undefined;\nlet googleVertexProviderModulePromise:\n\t| Promise<LazyProviderModule<\"google-vertex\", GoogleVertexOptions, SimpleStreamOptions>>\n\t| undefined;\nlet mistralProviderModulePromise:\n\t| Promise<LazyProviderModule<\"mistral-conversations\", MistralOptions, SimpleStreamOptions>>\n\t| undefined;\nlet openAICodexResponsesProviderModulePromise:\n\t| Promise<LazyProviderModule<\"openai-codex-responses\", OpenAICodexResponsesOptions, SimpleStreamOptions>>\n\t| undefined;\nlet openAICompletionsProviderModulePromise:\n\t| Promise<LazyProviderModule<\"openai-completions\", OpenAICompletionsOptions, SimpleStreamOptions>>\n\t| undefined;\nlet openAIResponsesProviderModulePromise:\n\t| Promise<LazyProviderModule<\"openai-responses\", OpenAIResponsesOptions, SimpleStreamOptions>>\n\t| undefined;\nlet bedrockProviderModuleOverride:\n\t| LazyProviderModule<\"bedrock-converse-stream\", BedrockOptions, SimpleStreamOptions>\n\t| undefined;\nlet bedrockProviderModulePromise:\n\t| Promise<LazyProviderModule<\"bedrock-converse-stream\", BedrockOptions, SimpleStreamOptions>>\n\t| undefined;\n\nexport function setBedrockProviderModule(module: BedrockProviderModule): void {\n\tbedrockProviderModuleOverride = {\n\t\tstream: module.streamBedrock,\n\t\tstreamSimple: module.streamSimpleBedrock,\n\t};\n}\n\nfunction forwardStream(target: AssistantMessageEventStream, source: AsyncIterable<AssistantMessageEvent>): void {\n\t(async () => {\n\t\tfor await (const event of source) {\n\t\t\ttarget.push(event);\n\t\t}\n\t\ttarget.end();\n\t})();\n}\n\nfunction createLazyLoadErrorMessage<TApi extends Api>(model: Model<TApi>, error: unknown): AssistantMessage {\n\treturn {\n\t\trole: \"assistant\",\n\t\tcontent: [],\n\t\tapi: model.api,\n\t\tprovider: model.provider,\n\t\tmodel: model.id,\n\t\tusage: {\n\t\t\tinput: 0,\n\t\t\toutput: 0,\n\t\t\tcacheRead: 0,\n\t\t\tcacheWrite: 0,\n\t\t\ttotalTokens: 0,\n\t\t\tcost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },\n\t\t},\n\t\tstopReason: \"error\",\n\t\terrorMessage: error instanceof Error ? error.message : String(error),\n\t\ttimestamp: Date.now(),\n\t};\n}\n\nfunction createLazyStream<TApi extends Api, TOptions extends StreamOptions, TSimpleOptions extends SimpleStreamOptions>(\n\tloadModule: () => Promise<LazyProviderModule<TApi, TOptions, TSimpleOptions>>,\n): StreamFunction<TApi, TOptions> {\n\treturn (model, context, options) => {\n\t\tconst outer = new AssistantMessageEventStream();\n\n\t\tloadModule()\n\t\t\t.then((module) => {\n\t\t\t\tconst inner = module.stream(model, context, options);\n\t\t\t\tforwardStream(outer, inner);\n\t\t\t})\n\t\t\t.catch((error) => {\n\t\t\t\tconst message = createLazyLoadErrorMessage(model, error);\n\t\t\t\touter.push({ type: \"error\", reason: \"error\", error: message });\n\t\t\t\touter.end(message);\n\t\t\t});\n\n\t\treturn outer;\n\t};\n}\n\nfunction createLazySimpleStream<\n\tTApi extends Api,\n\tTOptions extends StreamOptions,\n\tTSimpleOptions extends SimpleStreamOptions,\n>(loadModule: () => Promise<LazyProviderModule<TApi, TOptions, TSimpleOptions>>): StreamFunction<TApi, TSimpleOptions> {\n\treturn (model, context, options) => {\n\t\tconst outer = new AssistantMessageEventStream();\n\n\t\tloadModule()\n\t\t\t.then((module) => {\n\t\t\t\tconst inner = module.streamSimple(model, context, options);\n\t\t\t\tforwardStream(outer, inner);\n\t\t\t})\n\t\t\t.catch((error) => {\n\t\t\t\tconst message = createLazyLoadErrorMessage(model, error);\n\t\t\t\touter.push({ type: \"error\", reason: \"error\", error: message });\n\t\t\t\touter.end(message);\n\t\t\t});\n\n\t\treturn outer;\n\t};\n}\n\nfunction loadAnthropicProviderModule(): Promise<\n\tLazyProviderModule<\"anthropic-messages\", AnthropicOptions, SimpleStreamOptions>\n> {\n\tanthropicProviderModulePromise ||= import(\"./anthropic.js\").then((module) => {\n\t\tconst provider = module as AnthropicProviderModule;\n\t\treturn {\n\t\t\tstream: provider.streamAnthropic,\n\t\t\tstreamSimple: provider.streamSimpleAnthropic,\n\t\t};\n\t});\n\treturn anthropicProviderModulePromise;\n}\n\nfunction loadAzureOpenAIResponsesProviderModule(): Promise<\n\tLazyProviderModule<\"azure-openai-responses\", AzureOpenAIResponsesOptions, SimpleStreamOptions>\n> {\n\tazureOpenAIResponsesProviderModulePromise ||= import(\"./azure-openai-responses.js\").then((module) => {\n\t\tconst provider = module as AzureOpenAIResponsesProviderModule;\n\t\treturn {\n\t\t\tstream: provider.streamAzureOpenAIResponses,\n\t\t\tstreamSimple: provider.streamSimpleAzureOpenAIResponses,\n\t\t};\n\t});\n\treturn azureOpenAIResponsesProviderModulePromise;\n}\n\nfunction loadGoogleProviderModule(): Promise<\n\tLazyProviderModule<\"google-generative-ai\", GoogleOptions, SimpleStreamOptions>\n> {\n\tgoogleProviderModulePromise ||= import(\"./google.js\").then((module) => {\n\t\tconst provider = module as GoogleProviderModule;\n\t\treturn {\n\t\t\tstream: provider.streamGoogle,\n\t\t\tstreamSimple: provider.streamSimpleGoogle,\n\t\t};\n\t});\n\treturn googleProviderModulePromise;\n}\n\nfunction loadGoogleGeminiCliProviderModule(): Promise<\n\tLazyProviderModule<\"google-gemini-cli\", GoogleGeminiCliOptions, SimpleStreamOptions>\n> {\n\tgoogleGeminiCliProviderModulePromise ||= import(\"./google-gemini-cli.js\").then((module) => {\n\t\tconst provider = module as GoogleGeminiCliProviderModule;\n\t\treturn {\n\t\t\tstream: provider.streamGoogleGeminiCli,\n\t\t\tstreamSimple: provider.streamSimpleGoogleGeminiCli,\n\t\t};\n\t});\n\treturn googleGeminiCliProviderModulePromise;\n}\n\nfunction loadGoogleVertexProviderModule(): Promise<\n\tLazyProviderModule<\"google-vertex\", GoogleVertexOptions, SimpleStreamOptions>\n> {\n\tgoogleVertexProviderModulePromise ||= import(\"./google-vertex.js\").then((module) => {\n\t\tconst provider = module as GoogleVertexProviderModule;\n\t\treturn {\n\t\t\tstream: provider.streamGoogleVertex,\n\t\t\tstreamSimple: provider.streamSimpleGoogleVertex,\n\t\t};\n\t});\n\treturn googleVertexProviderModulePromise;\n}\n\nfunction loadMistralProviderModule(): Promise<\n\tLazyProviderModule<\"mistral-conversations\", MistralOptions, SimpleStreamOptions>\n> {\n\tmistralProviderModulePromise ||= import(\"./mistral.js\").then((module) => {\n\t\tconst provider = module as MistralProviderModule;\n\t\treturn {\n\t\t\tstream: provider.streamMistral,\n\t\t\tstreamSimple: provider.streamSimpleMistral,\n\t\t};\n\t});\n\treturn mistralProviderModulePromise;\n}\n\nfunction loadOpenAICodexResponsesProviderModule(): Promise<\n\tLazyProviderModule<\"openai-codex-responses\", OpenAICodexResponsesOptions, SimpleStreamOptions>\n> {\n\topenAICodexResponsesProviderModulePromise ||= import(\"./openai-codex-responses.js\").then((module) => {\n\t\tconst provider = module as OpenAICodexResponsesProviderModule;\n\t\treturn {\n\t\t\tstream: provider.streamOpenAICodexResponses,\n\t\t\tstreamSimple: provider.streamSimpleOpenAICodexResponses,\n\t\t};\n\t});\n\treturn openAICodexResponsesProviderModulePromise;\n}\n\nfunction loadOpenAICompletionsProviderModule(): Promise<\n\tLazyProviderModule<\"openai-completions\", OpenAICompletionsOptions, SimpleStreamOptions>\n> {\n\topenAICompletionsProviderModulePromise ||= import(\"./openai-completions.js\").then((module) => {\n\t\tconst provider = module as OpenAICompletionsProviderModule;\n\t\treturn {\n\t\t\tstream: provider.streamOpenAICompletions,\n\t\t\tstreamSimple: provider.streamSimpleOpenAICompletions,\n\t\t};\n\t});\n\treturn openAICompletionsProviderModulePromise;\n}\n\nfunction loadOpenAIResponsesProviderModule(): Promise<\n\tLazyProviderModule<\"openai-responses\", OpenAIResponsesOptions, SimpleStreamOptions>\n> {\n\topenAIResponsesProviderModulePromise ||= import(\"./openai-responses.js\").then((module) => {\n\t\tconst provider = module as OpenAIResponsesProviderModule;\n\t\treturn {\n\t\t\tstream: provider.streamOpenAIResponses,\n\t\t\tstreamSimple: provider.streamSimpleOpenAIResponses,\n\t\t};\n\t});\n\treturn openAIResponsesProviderModulePromise;\n}\n\nfunction loadBedrockProviderModule(): Promise<\n\tLazyProviderModule<\"bedrock-converse-stream\", BedrockOptions, SimpleStreamOptions>\n> {\n\tif (bedrockProviderModuleOverride) {\n\t\treturn Promise.resolve(bedrockProviderModuleOverride);\n\t}\n\tbedrockProviderModulePromise ||= importNodeOnlyProvider(\"./amazon-bedrock.js\").then((module) => {\n\t\tconst provider = module as BedrockProviderModule;\n\t\treturn {\n\t\t\tstream: provider.streamBedrock,\n\t\t\tstreamSimple: provider.streamSimpleBedrock,\n\t\t};\n\t});\n\treturn bedrockProviderModulePromise;\n}\n\nexport const streamAnthropic = createLazyStream(loadAnthropicProviderModule);\nexport const streamSimpleAnthropic = createLazySimpleStream(loadAnthropicProviderModule);\nexport const streamAzureOpenAIResponses = createLazyStream(loadAzureOpenAIResponsesProviderModule);\nexport const streamSimpleAzureOpenAIResponses = createLazySimpleStream(loadAzureOpenAIResponsesProviderModule);\nexport const streamGoogle = createLazyStream(loadGoogleProviderModule);\nexport const streamSimpleGoogle = createLazySimpleStream(loadGoogleProviderModule);\nexport const streamGoogleGeminiCli = createLazyStream(loadGoogleGeminiCliProviderModule);\nexport const streamSimpleGoogleGeminiCli = createLazySimpleStream(loadGoogleGeminiCliProviderModule);\nexport const streamGoogleVertex = createLazyStream(loadGoogleVertexProviderModule);\nexport const streamSimpleGoogleVertex = createLazySimpleStream(loadGoogleVertexProviderModule);\nexport const streamMistral = createLazyStream(loadMistralProviderModule);\nexport const streamSimpleMistral = createLazySimpleStream(loadMistralProviderModule);\nexport const streamOpenAICodexResponses = createLazyStream(loadOpenAICodexResponsesProviderModule);\nexport const streamSimpleOpenAICodexResponses = createLazySimpleStream(loadOpenAICodexResponsesProviderModule);\nexport const streamOpenAICompletions = createLazyStream(loadOpenAICompletionsProviderModule);\nexport const streamSimpleOpenAICompletions = createLazySimpleStream(loadOpenAICompletionsProviderModule);\nexport const streamOpenAIResponses = createLazyStream(loadOpenAIResponsesProviderModule);\nexport const streamSimpleOpenAIResponses = createLazySimpleStream(loadOpenAIResponsesProviderModule);\nconst streamBedrockLazy = createLazyStream(loadBedrockProviderModule);\nconst streamSimpleBedrockLazy = createLazySimpleStream(loadBedrockProviderModule);\n\nexport function registerBuiltInApiProviders(): void {\n\tregisterApiProvider({\n\t\tapi: \"anthropic-messages\",\n\t\tstream: streamAnthropic,\n\t\tstreamSimple: streamSimpleAnthropic,\n\t});\n\n\tregisterApiProvider({\n\t\tapi: \"openai-completions\",\n\t\tstream: streamOpenAICompletions,\n\t\tstreamSimple: streamSimpleOpenAICompletions,\n\t});\n\n\tregisterApiProvider({\n\t\tapi: \"mistral-conversations\",\n\t\tstream: streamMistral,\n\t\tstreamSimple: streamSimpleMistral,\n\t});\n\n\tregisterApiProvider({\n\t\tapi: \"openai-responses\",\n\t\tstream: streamOpenAIResponses,\n\t\tstreamSimple: streamSimpleOpenAIResponses,\n\t});\n\n\tregisterApiProvider({\n\t\tapi: \"azure-openai-responses\",\n\t\tstream: streamAzureOpenAIResponses,\n\t\tstreamSimple: streamSimpleAzureOpenAIResponses,\n\t});\n\n\tregisterApiProvider({\n\t\tapi: \"openai-codex-responses\",\n\t\tstream: streamOpenAICodexResponses,\n\t\tstreamSimple: streamSimpleOpenAICodexResponses,\n\t});\n\n\tregisterApiProvider({\n\t\tapi: \"google-generative-ai\",\n\t\tstream: streamGoogle,\n\t\tstreamSimple: streamSimpleGoogle,\n\t});\n\n\tregisterApiProvider({\n\t\tapi: \"google-gemini-cli\",\n\t\tstream: streamGoogleGeminiCli,\n\t\tstreamSimple: streamSimpleGoogleGeminiCli,\n\t});\n\n\tregisterApiProvider({\n\t\tapi: \"google-vertex\",\n\t\tstream: streamGoogleVertex,\n\t\tstreamSimple: streamSimpleGoogleVertex,\n\t});\n\n\tregisterApiProvider({\n\t\tapi: \"bedrock-converse-stream\",\n\t\tstream: streamBedrockLazy,\n\t\tstreamSimple: streamSimpleBedrockLazy,\n\t});\n}\n\nexport function resetApiProviders(): void {\n\tclearApiProviders();\n\tregisterBuiltInApiProviders();\n}\n\nregisterBuiltInApiProviders();\n"]}
|
|
1
|
+
{"version":3,"file":"register-builtins.d.ts","sourceRoot":"","sources":["../../src/providers/register-builtins.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAGX,qBAAqB,EACrB,OAAO,EACP,KAAK,EACL,mBAAmB,EACnB,cAAc,EAEd,MAAM,aAAa,CAAC;AAErB,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,qBAAqB,CAAC;AAC1D,OAAO,KAAK,EAAE,gBAAgB,EAAE,MAAM,gBAAgB,CAAC;AACvD,OAAO,KAAK,EAAE,2BAA2B,EAAE,MAAM,6BAA6B,CAAC;AAC/E,OAAO,KAAK,EAAE,aAAa,EAAE,MAAM,aAAa,CAAC;AACjD,OAAO,KAAK,EAAE,mBAAmB,EAAE,MAAM,oBAAoB,CAAC;AAC9D,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,cAAc,CAAC;AACnD,OAAO,KAAK,EAAE,2BAA2B,EAAE,MAAM,6BAA6B,CAAC;AAC/E,OAAO,KAAK,EAAE,wBAAwB,EAAE,MAAM,yBAAyB,CAAC;AACxE,OAAO,KAAK,EAAE,sBAAsB,EAAE,MAAM,uBAAuB,CAAC;AAuDpE,UAAU,qBAAqB;IAC9B,aAAa,EAAE,CACd,KAAK,EAAE,KAAK,CAAC,yBAAyB,CAAC,EACvC,OAAO,EAAE,OAAO,EAChB,OAAO,CAAC,EAAE,cAAc,KACpB,aAAa,CAAC,qBAAqB,CAAC,CAAC;IAC1C,mBAAmB,EAAE,CACpB,KAAK,EAAE,KAAK,CAAC,yBAAyB,CAAC,EACvC,OAAO,EAAE,OAAO,EAChB,OAAO,CAAC,EAAE,mBAAmB,KACzB,aAAa,CAAC,qBAAqB,CAAC,CAAC;CAC1C;AAmCD,wBAAgB,wBAAwB,CAAC,MAAM,EAAE,qBAAqB,GAAG,IAAI,CAK5E;AAoMD,eAAO,MAAM,eAAe,wDAAgD,CAAC;AAC7E,eAAO,MAAM,qBAAqB,2DAAsD,CAAC;AACzF,eAAO,MAAM,0BAA0B,uEAA2D,CAAC;AACnG,eAAO,MAAM,gCAAgC,+DAAiE,CAAC;AAC/G,eAAO,MAAM,YAAY,uDAA6C,CAAC;AACvE,eAAO,MAAM,kBAAkB,6DAAmD,CAAC;AACnF,eAAO,MAAM,kBAAkB,sDAAmD,CAAC;AACnF,eAAO,MAAM,wBAAwB,sDAAyD,CAAC;AAC/F,eAAO,MAAM,aAAa,yDAA8C,CAAC;AACzE,eAAO,MAAM,mBAAmB,8DAAoD,CAAC;AACrF,eAAO,MAAM,0BAA0B,uEAA2D,CAAC;AACnG,eAAO,MAAM,gCAAgC,+DAAiE,CAAC;AAC/G,eAAO,MAAM,uBAAuB,gEAAwD,CAAC;AAC7F,eAAO,MAAM,6BAA6B,2DAA8D,CAAC;AACzG,eAAO,MAAM,qBAAqB,4DAAsD,CAAC;AACzF,eAAO,MAAM,2BAA2B,yDAA4D,CAAC;AAIrG,wBAAgB,2BAA2B,IAAI,IAAI,CAsDlD;AAED,wBAAgB,iBAAiB,IAAI,IAAI,CAGxC","sourcesContent":["import { clearApiProviders, registerApiProvider } from \"../api-registry.js\";\nimport type {\n\tApi,\n\tAssistantMessage,\n\tAssistantMessageEvent,\n\tContext,\n\tModel,\n\tSimpleStreamOptions,\n\tStreamFunction,\n\tStreamOptions,\n} from \"../types.js\";\nimport { AssistantMessageEventStream } from \"../utils/event-stream.js\";\nimport type { BedrockOptions } from \"./amazon-bedrock.js\";\nimport type { AnthropicOptions } from \"./anthropic.js\";\nimport type { AzureOpenAIResponsesOptions } from \"./azure-openai-responses.js\";\nimport type { GoogleOptions } from \"./google.js\";\nimport type { GoogleVertexOptions } from \"./google-vertex.js\";\nimport type { MistralOptions } from \"./mistral.js\";\nimport type { OpenAICodexResponsesOptions } from \"./openai-codex-responses.js\";\nimport type { OpenAICompletionsOptions } from \"./openai-completions.js\";\nimport type { OpenAIResponsesOptions } from \"./openai-responses.js\";\n\ninterface LazyProviderModule<\n\tTApi extends Api,\n\tTOptions extends StreamOptions,\n\tTSimpleOptions extends SimpleStreamOptions,\n> {\n\tstream: (model: Model<TApi>, context: Context, options?: TOptions) => AsyncIterable<AssistantMessageEvent>;\n\tstreamSimple: (\n\t\tmodel: Model<TApi>,\n\t\tcontext: Context,\n\t\toptions?: TSimpleOptions,\n\t) => AsyncIterable<AssistantMessageEvent>;\n}\n\ninterface AnthropicProviderModule {\n\tstreamAnthropic: StreamFunction<\"anthropic-messages\", AnthropicOptions>;\n\tstreamSimpleAnthropic: StreamFunction<\"anthropic-messages\", SimpleStreamOptions>;\n}\n\ninterface AzureOpenAIResponsesProviderModule {\n\tstreamAzureOpenAIResponses: StreamFunction<\"azure-openai-responses\", AzureOpenAIResponsesOptions>;\n\tstreamSimpleAzureOpenAIResponses: StreamFunction<\"azure-openai-responses\", SimpleStreamOptions>;\n}\n\ninterface GoogleProviderModule {\n\tstreamGoogle: StreamFunction<\"google-generative-ai\", GoogleOptions>;\n\tstreamSimpleGoogle: StreamFunction<\"google-generative-ai\", SimpleStreamOptions>;\n}\n\ninterface GoogleVertexProviderModule {\n\tstreamGoogleVertex: StreamFunction<\"google-vertex\", GoogleVertexOptions>;\n\tstreamSimpleGoogleVertex: StreamFunction<\"google-vertex\", SimpleStreamOptions>;\n}\n\ninterface MistralProviderModule {\n\tstreamMistral: StreamFunction<\"mistral-conversations\", MistralOptions>;\n\tstreamSimpleMistral: StreamFunction<\"mistral-conversations\", SimpleStreamOptions>;\n}\n\ninterface OpenAICodexResponsesProviderModule {\n\tstreamOpenAICodexResponses: StreamFunction<\"openai-codex-responses\", OpenAICodexResponsesOptions>;\n\tstreamSimpleOpenAICodexResponses: StreamFunction<\"openai-codex-responses\", SimpleStreamOptions>;\n}\n\ninterface OpenAICompletionsProviderModule {\n\tstreamOpenAICompletions: StreamFunction<\"openai-completions\", OpenAICompletionsOptions>;\n\tstreamSimpleOpenAICompletions: StreamFunction<\"openai-completions\", SimpleStreamOptions>;\n}\n\ninterface OpenAIResponsesProviderModule {\n\tstreamOpenAIResponses: StreamFunction<\"openai-responses\", OpenAIResponsesOptions>;\n\tstreamSimpleOpenAIResponses: StreamFunction<\"openai-responses\", SimpleStreamOptions>;\n}\n\ninterface BedrockProviderModule {\n\tstreamBedrock: (\n\t\tmodel: Model<\"bedrock-converse-stream\">,\n\t\tcontext: Context,\n\t\toptions?: BedrockOptions,\n\t) => AsyncIterable<AssistantMessageEvent>;\n\tstreamSimpleBedrock: (\n\t\tmodel: Model<\"bedrock-converse-stream\">,\n\t\tcontext: Context,\n\t\toptions?: SimpleStreamOptions,\n\t) => AsyncIterable<AssistantMessageEvent>;\n}\n\nconst importNodeOnlyProvider = (specifier: string): Promise<unknown> => import(specifier);\n\nlet anthropicProviderModulePromise:\n\t| Promise<LazyProviderModule<\"anthropic-messages\", AnthropicOptions, SimpleStreamOptions>>\n\t| undefined;\nlet azureOpenAIResponsesProviderModulePromise:\n\t| Promise<LazyProviderModule<\"azure-openai-responses\", AzureOpenAIResponsesOptions, SimpleStreamOptions>>\n\t| undefined;\nlet googleProviderModulePromise:\n\t| Promise<LazyProviderModule<\"google-generative-ai\", GoogleOptions, SimpleStreamOptions>>\n\t| undefined;\nlet googleVertexProviderModulePromise:\n\t| Promise<LazyProviderModule<\"google-vertex\", GoogleVertexOptions, SimpleStreamOptions>>\n\t| undefined;\nlet mistralProviderModulePromise:\n\t| Promise<LazyProviderModule<\"mistral-conversations\", MistralOptions, SimpleStreamOptions>>\n\t| undefined;\nlet openAICodexResponsesProviderModulePromise:\n\t| Promise<LazyProviderModule<\"openai-codex-responses\", OpenAICodexResponsesOptions, SimpleStreamOptions>>\n\t| undefined;\nlet openAICompletionsProviderModulePromise:\n\t| Promise<LazyProviderModule<\"openai-completions\", OpenAICompletionsOptions, SimpleStreamOptions>>\n\t| undefined;\nlet openAIResponsesProviderModulePromise:\n\t| Promise<LazyProviderModule<\"openai-responses\", OpenAIResponsesOptions, SimpleStreamOptions>>\n\t| undefined;\nlet bedrockProviderModuleOverride:\n\t| LazyProviderModule<\"bedrock-converse-stream\", BedrockOptions, SimpleStreamOptions>\n\t| undefined;\nlet bedrockProviderModulePromise:\n\t| Promise<LazyProviderModule<\"bedrock-converse-stream\", BedrockOptions, SimpleStreamOptions>>\n\t| undefined;\n\nexport function setBedrockProviderModule(module: BedrockProviderModule): void {\n\tbedrockProviderModuleOverride = {\n\t\tstream: module.streamBedrock,\n\t\tstreamSimple: module.streamSimpleBedrock,\n\t};\n}\n\nfunction forwardStream(target: AssistantMessageEventStream, source: AsyncIterable<AssistantMessageEvent>): void {\n\t(async () => {\n\t\tfor await (const event of source) {\n\t\t\ttarget.push(event);\n\t\t}\n\t\ttarget.end();\n\t})();\n}\n\nfunction createLazyLoadErrorMessage<TApi extends Api>(model: Model<TApi>, error: unknown): AssistantMessage {\n\treturn {\n\t\trole: \"assistant\",\n\t\tcontent: [],\n\t\tapi: model.api,\n\t\tprovider: model.provider,\n\t\tmodel: model.id,\n\t\tusage: {\n\t\t\tinput: 0,\n\t\t\toutput: 0,\n\t\t\tcacheRead: 0,\n\t\t\tcacheWrite: 0,\n\t\t\ttotalTokens: 0,\n\t\t\tcost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },\n\t\t},\n\t\tstopReason: \"error\",\n\t\terrorMessage: error instanceof Error ? error.message : String(error),\n\t\ttimestamp: Date.now(),\n\t};\n}\n\nfunction createLazyStream<TApi extends Api, TOptions extends StreamOptions, TSimpleOptions extends SimpleStreamOptions>(\n\tloadModule: () => Promise<LazyProviderModule<TApi, TOptions, TSimpleOptions>>,\n): StreamFunction<TApi, TOptions> {\n\treturn (model, context, options) => {\n\t\tconst outer = new AssistantMessageEventStream();\n\n\t\tloadModule()\n\t\t\t.then((module) => {\n\t\t\t\tconst inner = module.stream(model, context, options);\n\t\t\t\tforwardStream(outer, inner);\n\t\t\t})\n\t\t\t.catch((error) => {\n\t\t\t\tconst message = createLazyLoadErrorMessage(model, error);\n\t\t\t\touter.push({ type: \"error\", reason: \"error\", error: message });\n\t\t\t\touter.end(message);\n\t\t\t});\n\n\t\treturn outer;\n\t};\n}\n\nfunction createLazySimpleStream<\n\tTApi extends Api,\n\tTOptions extends StreamOptions,\n\tTSimpleOptions extends SimpleStreamOptions,\n>(loadModule: () => Promise<LazyProviderModule<TApi, TOptions, TSimpleOptions>>): StreamFunction<TApi, TSimpleOptions> {\n\treturn (model, context, options) => {\n\t\tconst outer = new AssistantMessageEventStream();\n\n\t\tloadModule()\n\t\t\t.then((module) => {\n\t\t\t\tconst inner = module.streamSimple(model, context, options);\n\t\t\t\tforwardStream(outer, inner);\n\t\t\t})\n\t\t\t.catch((error) => {\n\t\t\t\tconst message = createLazyLoadErrorMessage(model, error);\n\t\t\t\touter.push({ type: \"error\", reason: \"error\", error: message });\n\t\t\t\touter.end(message);\n\t\t\t});\n\n\t\treturn outer;\n\t};\n}\n\nfunction loadAnthropicProviderModule(): Promise<\n\tLazyProviderModule<\"anthropic-messages\", AnthropicOptions, SimpleStreamOptions>\n> {\n\tanthropicProviderModulePromise ||= import(\"./anthropic.js\").then((module) => {\n\t\tconst provider = module as AnthropicProviderModule;\n\t\treturn {\n\t\t\tstream: provider.streamAnthropic,\n\t\t\tstreamSimple: provider.streamSimpleAnthropic,\n\t\t};\n\t});\n\treturn anthropicProviderModulePromise;\n}\n\nfunction loadAzureOpenAIResponsesProviderModule(): Promise<\n\tLazyProviderModule<\"azure-openai-responses\", AzureOpenAIResponsesOptions, SimpleStreamOptions>\n> {\n\tazureOpenAIResponsesProviderModulePromise ||= import(\"./azure-openai-responses.js\").then((module) => {\n\t\tconst provider = module as AzureOpenAIResponsesProviderModule;\n\t\treturn {\n\t\t\tstream: provider.streamAzureOpenAIResponses,\n\t\t\tstreamSimple: provider.streamSimpleAzureOpenAIResponses,\n\t\t};\n\t});\n\treturn azureOpenAIResponsesProviderModulePromise;\n}\n\nfunction loadGoogleProviderModule(): Promise<\n\tLazyProviderModule<\"google-generative-ai\", GoogleOptions, SimpleStreamOptions>\n> {\n\tgoogleProviderModulePromise ||= import(\"./google.js\").then((module) => {\n\t\tconst provider = module as GoogleProviderModule;\n\t\treturn {\n\t\t\tstream: provider.streamGoogle,\n\t\t\tstreamSimple: provider.streamSimpleGoogle,\n\t\t};\n\t});\n\treturn googleProviderModulePromise;\n}\n\nfunction loadGoogleVertexProviderModule(): Promise<\n\tLazyProviderModule<\"google-vertex\", GoogleVertexOptions, SimpleStreamOptions>\n> {\n\tgoogleVertexProviderModulePromise ||= import(\"./google-vertex.js\").then((module) => {\n\t\tconst provider = module as GoogleVertexProviderModule;\n\t\treturn {\n\t\t\tstream: provider.streamGoogleVertex,\n\t\t\tstreamSimple: provider.streamSimpleGoogleVertex,\n\t\t};\n\t});\n\treturn googleVertexProviderModulePromise;\n}\n\nfunction loadMistralProviderModule(): Promise<\n\tLazyProviderModule<\"mistral-conversations\", MistralOptions, SimpleStreamOptions>\n> {\n\tmistralProviderModulePromise ||= import(\"./mistral.js\").then((module) => {\n\t\tconst provider = module as MistralProviderModule;\n\t\treturn {\n\t\t\tstream: provider.streamMistral,\n\t\t\tstreamSimple: provider.streamSimpleMistral,\n\t\t};\n\t});\n\treturn mistralProviderModulePromise;\n}\n\nfunction loadOpenAICodexResponsesProviderModule(): Promise<\n\tLazyProviderModule<\"openai-codex-responses\", OpenAICodexResponsesOptions, SimpleStreamOptions>\n> {\n\topenAICodexResponsesProviderModulePromise ||= import(\"./openai-codex-responses.js\").then((module) => {\n\t\tconst provider = module as OpenAICodexResponsesProviderModule;\n\t\treturn {\n\t\t\tstream: provider.streamOpenAICodexResponses,\n\t\t\tstreamSimple: provider.streamSimpleOpenAICodexResponses,\n\t\t};\n\t});\n\treturn openAICodexResponsesProviderModulePromise;\n}\n\nfunction loadOpenAICompletionsProviderModule(): Promise<\n\tLazyProviderModule<\"openai-completions\", OpenAICompletionsOptions, SimpleStreamOptions>\n> {\n\topenAICompletionsProviderModulePromise ||= import(\"./openai-completions.js\").then((module) => {\n\t\tconst provider = module as OpenAICompletionsProviderModule;\n\t\treturn {\n\t\t\tstream: provider.streamOpenAICompletions,\n\t\t\tstreamSimple: provider.streamSimpleOpenAICompletions,\n\t\t};\n\t});\n\treturn openAICompletionsProviderModulePromise;\n}\n\nfunction loadOpenAIResponsesProviderModule(): Promise<\n\tLazyProviderModule<\"openai-responses\", OpenAIResponsesOptions, SimpleStreamOptions>\n> {\n\topenAIResponsesProviderModulePromise ||= import(\"./openai-responses.js\").then((module) => {\n\t\tconst provider = module as OpenAIResponsesProviderModule;\n\t\treturn {\n\t\t\tstream: provider.streamOpenAIResponses,\n\t\t\tstreamSimple: provider.streamSimpleOpenAIResponses,\n\t\t};\n\t});\n\treturn openAIResponsesProviderModulePromise;\n}\n\nfunction loadBedrockProviderModule(): Promise<\n\tLazyProviderModule<\"bedrock-converse-stream\", BedrockOptions, SimpleStreamOptions>\n> {\n\tif (bedrockProviderModuleOverride) {\n\t\treturn Promise.resolve(bedrockProviderModuleOverride);\n\t}\n\tbedrockProviderModulePromise ||= importNodeOnlyProvider(\"./amazon-bedrock.js\").then((module) => {\n\t\tconst provider = module as BedrockProviderModule;\n\t\treturn {\n\t\t\tstream: provider.streamBedrock,\n\t\t\tstreamSimple: provider.streamSimpleBedrock,\n\t\t};\n\t});\n\treturn bedrockProviderModulePromise;\n}\n\nexport const streamAnthropic = createLazyStream(loadAnthropicProviderModule);\nexport const streamSimpleAnthropic = createLazySimpleStream(loadAnthropicProviderModule);\nexport const streamAzureOpenAIResponses = createLazyStream(loadAzureOpenAIResponsesProviderModule);\nexport const streamSimpleAzureOpenAIResponses = createLazySimpleStream(loadAzureOpenAIResponsesProviderModule);\nexport const streamGoogle = createLazyStream(loadGoogleProviderModule);\nexport const streamSimpleGoogle = createLazySimpleStream(loadGoogleProviderModule);\nexport const streamGoogleVertex = createLazyStream(loadGoogleVertexProviderModule);\nexport const streamSimpleGoogleVertex = createLazySimpleStream(loadGoogleVertexProviderModule);\nexport const streamMistral = createLazyStream(loadMistralProviderModule);\nexport const streamSimpleMistral = createLazySimpleStream(loadMistralProviderModule);\nexport const streamOpenAICodexResponses = createLazyStream(loadOpenAICodexResponsesProviderModule);\nexport const streamSimpleOpenAICodexResponses = createLazySimpleStream(loadOpenAICodexResponsesProviderModule);\nexport const streamOpenAICompletions = createLazyStream(loadOpenAICompletionsProviderModule);\nexport const streamSimpleOpenAICompletions = createLazySimpleStream(loadOpenAICompletionsProviderModule);\nexport const streamOpenAIResponses = createLazyStream(loadOpenAIResponsesProviderModule);\nexport const streamSimpleOpenAIResponses = createLazySimpleStream(loadOpenAIResponsesProviderModule);\nconst streamBedrockLazy = createLazyStream(loadBedrockProviderModule);\nconst streamSimpleBedrockLazy = createLazySimpleStream(loadBedrockProviderModule);\n\nexport function registerBuiltInApiProviders(): void {\n\tregisterApiProvider({\n\t\tapi: \"anthropic-messages\",\n\t\tstream: streamAnthropic,\n\t\tstreamSimple: streamSimpleAnthropic,\n\t});\n\n\tregisterApiProvider({\n\t\tapi: \"openai-completions\",\n\t\tstream: streamOpenAICompletions,\n\t\tstreamSimple: streamSimpleOpenAICompletions,\n\t});\n\n\tregisterApiProvider({\n\t\tapi: \"mistral-conversations\",\n\t\tstream: streamMistral,\n\t\tstreamSimple: streamSimpleMistral,\n\t});\n\n\tregisterApiProvider({\n\t\tapi: \"openai-responses\",\n\t\tstream: streamOpenAIResponses,\n\t\tstreamSimple: streamSimpleOpenAIResponses,\n\t});\n\n\tregisterApiProvider({\n\t\tapi: \"azure-openai-responses\",\n\t\tstream: streamAzureOpenAIResponses,\n\t\tstreamSimple: streamSimpleAzureOpenAIResponses,\n\t});\n\n\tregisterApiProvider({\n\t\tapi: \"openai-codex-responses\",\n\t\tstream: streamOpenAICodexResponses,\n\t\tstreamSimple: streamSimpleOpenAICodexResponses,\n\t});\n\n\tregisterApiProvider({\n\t\tapi: \"google-generative-ai\",\n\t\tstream: streamGoogle,\n\t\tstreamSimple: streamSimpleGoogle,\n\t});\n\n\tregisterApiProvider({\n\t\tapi: \"google-vertex\",\n\t\tstream: streamGoogleVertex,\n\t\tstreamSimple: streamSimpleGoogleVertex,\n\t});\n\n\tregisterApiProvider({\n\t\tapi: \"bedrock-converse-stream\",\n\t\tstream: streamBedrockLazy,\n\t\tstreamSimple: streamSimpleBedrockLazy,\n\t});\n}\n\nexport function resetApiProviders(): void {\n\tclearApiProviders();\n\tregisterBuiltInApiProviders();\n}\n\nregisterBuiltInApiProviders();\n"]}
|
|
@@ -4,7 +4,6 @@ const importNodeOnlyProvider = (specifier) => import(specifier);
|
|
|
4
4
|
let anthropicProviderModulePromise;
|
|
5
5
|
let azureOpenAIResponsesProviderModulePromise;
|
|
6
6
|
let googleProviderModulePromise;
|
|
7
|
-
let googleGeminiCliProviderModulePromise;
|
|
8
7
|
let googleVertexProviderModulePromise;
|
|
9
8
|
let mistralProviderModulePromise;
|
|
10
9
|
let openAICodexResponsesProviderModulePromise;
|
|
@@ -108,16 +107,6 @@ function loadGoogleProviderModule() {
|
|
|
108
107
|
});
|
|
109
108
|
return googleProviderModulePromise;
|
|
110
109
|
}
|
|
111
|
-
function loadGoogleGeminiCliProviderModule() {
|
|
112
|
-
googleGeminiCliProviderModulePromise ||= import("./google-gemini-cli.js").then((module) => {
|
|
113
|
-
const provider = module;
|
|
114
|
-
return {
|
|
115
|
-
stream: provider.streamGoogleGeminiCli,
|
|
116
|
-
streamSimple: provider.streamSimpleGoogleGeminiCli,
|
|
117
|
-
};
|
|
118
|
-
});
|
|
119
|
-
return googleGeminiCliProviderModulePromise;
|
|
120
|
-
}
|
|
121
110
|
function loadGoogleVertexProviderModule() {
|
|
122
111
|
googleVertexProviderModulePromise ||= import("./google-vertex.js").then((module) => {
|
|
123
112
|
const provider = module;
|
|
@@ -187,8 +176,6 @@ export const streamAzureOpenAIResponses = createLazyStream(loadAzureOpenAIRespon
|
|
|
187
176
|
export const streamSimpleAzureOpenAIResponses = createLazySimpleStream(loadAzureOpenAIResponsesProviderModule);
|
|
188
177
|
export const streamGoogle = createLazyStream(loadGoogleProviderModule);
|
|
189
178
|
export const streamSimpleGoogle = createLazySimpleStream(loadGoogleProviderModule);
|
|
190
|
-
export const streamGoogleGeminiCli = createLazyStream(loadGoogleGeminiCliProviderModule);
|
|
191
|
-
export const streamSimpleGoogleGeminiCli = createLazySimpleStream(loadGoogleGeminiCliProviderModule);
|
|
192
179
|
export const streamGoogleVertex = createLazyStream(loadGoogleVertexProviderModule);
|
|
193
180
|
export const streamSimpleGoogleVertex = createLazySimpleStream(loadGoogleVertexProviderModule);
|
|
194
181
|
export const streamMistral = createLazyStream(loadMistralProviderModule);
|
|
@@ -237,11 +224,6 @@ export function registerBuiltInApiProviders() {
|
|
|
237
224
|
stream: streamGoogle,
|
|
238
225
|
streamSimple: streamSimpleGoogle,
|
|
239
226
|
});
|
|
240
|
-
registerApiProvider({
|
|
241
|
-
api: "google-gemini-cli",
|
|
242
|
-
stream: streamGoogleGeminiCli,
|
|
243
|
-
streamSimple: streamSimpleGoogleGeminiCli,
|
|
244
|
-
});
|
|
245
227
|
registerApiProvider({
|
|
246
228
|
api: "google-vertex",
|
|
247
229
|
stream: streamGoogleVertex,
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"register-builtins.js","sourceRoot":"","sources":["../../src/providers/register-builtins.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,iBAAiB,EAAE,mBAAmB,EAAE,MAAM,oBAAoB,CAAC;AAW5E,OAAO,EAAE,2BAA2B,EAAE,MAAM,0BAA0B,CAAC;AAmFvE,MAAM,sBAAsB,GAAG,CAAC,SAAiB,EAAoB,EAAE,CAAC,MAAM,CAAC,SAAS,CAAC,CAAC;AAE1F,IAAI,8BAEQ,CAAC;AACb,IAAI,yCAEQ,CAAC;AACb,IAAI,2BAEQ,CAAC;AACb,IAAI,oCAEQ,CAAC;AACb,IAAI,iCAEQ,CAAC;AACb,IAAI,4BAEQ,CAAC;AACb,IAAI,yCAEQ,CAAC;AACb,IAAI,sCAEQ,CAAC;AACb,IAAI,oCAEQ,CAAC;AACb,IAAI,6BAEQ,CAAC;AACb,IAAI,4BAEQ,CAAC;AAEb,MAAM,UAAU,wBAAwB,CAAC,MAA6B,EAAQ;IAC7E,6BAA6B,GAAG;QAC/B,MAAM,EAAE,MAAM,CAAC,aAAa;QAC5B,YAAY,EAAE,MAAM,CAAC,mBAAmB;KACxC,CAAC;AAAA,CACF;AAED,SAAS,aAAa,CAAC,MAAmC,EAAE,MAA4C,EAAQ;IAC/G,CAAC,KAAK,IAAI,EAAE,CAAC;QACZ,IAAI,KAAK,EAAE,MAAM,KAAK,IAAI,MAAM,EAAE,CAAC;YAClC,MAAM,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC;QACpB,CAAC;QACD,MAAM,CAAC,GAAG,EAAE,CAAC;IAAA,CACb,CAAC,EAAE,CAAC;AAAA,CACL;AAED,SAAS,0BAA0B,CAAmB,KAAkB,EAAE,KAAc,EAAoB;IAC3G,OAAO;QACN,IAAI,EAAE,WAAW;QACjB,OAAO,EAAE,EAAE;QACX,GAAG,EAAE,KAAK,CAAC,GAAG;QACd,QAAQ,EAAE,KAAK,CAAC,QAAQ;QACxB,KAAK,EAAE,KAAK,CAAC,EAAE;QACf,KAAK,EAAE;YACN,KAAK,EAAE,CAAC;YACR,MAAM,EAAE,CAAC;YACT,SAAS,EAAE,CAAC;YACZ,UAAU,EAAE,CAAC;YACb,WAAW,EAAE,CAAC;YACd,IAAI,EAAE,EAAE,KAAK,EAAE,CAAC,EAAE,MAAM,EAAE,CAAC,EAAE,SAAS,EAAE,CAAC,EAAE,UAAU,EAAE,CAAC,EAAE,KAAK,EAAE,CAAC,EAAE;SACpE;QACD,UAAU,EAAE,OAAO;QACnB,YAAY,EAAE,KAAK,YAAY,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;QACpE,SAAS,EAAE,IAAI,CAAC,GAAG,EAAE;KACrB,CAAC;AAAA,CACF;AAED,SAAS,gBAAgB,CACxB,UAA6E,EAC5C;IACjC,OAAO,CAAC,KAAK,EAAE,OAAO,EAAE,OAAO,EAAE,EAAE,CAAC;QACnC,MAAM,KAAK,GAAG,IAAI,2BAA2B,EAAE,CAAC;QAEhD,UAAU,EAAE;aACV,IAAI,CAAC,CAAC,MAAM,EAAE,EAAE,CAAC;YACjB,MAAM,KAAK,GAAG,MAAM,CAAC,MAAM,CAAC,KAAK,EAAE,OAAO,EAAE,OAAO,CAAC,CAAC;YACrD,aAAa,CAAC,KAAK,EAAE,KAAK,CAAC,CAAC;QAAA,CAC5B,CAAC;aACD,KAAK,CAAC,CAAC,KAAK,EAAE,EAAE,CAAC;YACjB,MAAM,OAAO,GAAG,0BAA0B,CAAC,KAAK,EAAE,KAAK,CAAC,CAAC;YACzD,KAAK,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,OAAO,EAAE,MAAM,EAAE,OAAO,EAAE,KAAK,EAAE,OAAO,EAAE,CAAC,CAAC;YAC/D,KAAK,CAAC,GAAG,CAAC,OAAO,CAAC,CAAC;QAAA,CACnB,CAAC,CAAC;QAEJ,OAAO,KAAK,CAAC;IAAA,CACb,CAAC;AAAA,CACF;AAED,SAAS,sBAAsB,CAI7B,UAA6E,EAAwC;IACtH,OAAO,CAAC,KAAK,EAAE,OAAO,EAAE,OAAO,EAAE,EAAE,CAAC;QACnC,MAAM,KAAK,GAAG,IAAI,2BAA2B,EAAE,CAAC;QAEhD,UAAU,EAAE;aACV,IAAI,CAAC,CAAC,MAAM,EAAE,EAAE,CAAC;YACjB,MAAM,KAAK,GAAG,MAAM,CAAC,YAAY,CAAC,KAAK,EAAE,OAAO,EAAE,OAAO,CAAC,CAAC;YAC3D,aAAa,CAAC,KAAK,EAAE,KAAK,CAAC,CAAC;QAAA,CAC5B,CAAC;aACD,KAAK,CAAC,CAAC,KAAK,EAAE,EAAE,CAAC;YACjB,MAAM,OAAO,GAAG,0BAA0B,CAAC,KAAK,EAAE,KAAK,CAAC,CAAC;YACzD,KAAK,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,OAAO,EAAE,MAAM,EAAE,OAAO,EAAE,KAAK,EAAE,OAAO,EAAE,CAAC,CAAC;YAC/D,KAAK,CAAC,GAAG,CAAC,OAAO,CAAC,CAAC;QAAA,CACnB,CAAC,CAAC;QAEJ,OAAO,KAAK,CAAC;IAAA,CACb,CAAC;AAAA,CACF;AAED,SAAS,2BAA2B,GAElC;IACD,8BAA8B,KAAK,MAAM,CAAC,gBAAgB,CAAC,CAAC,IAAI,CAAC,CAAC,MAAM,EAAE,EAAE,CAAC;QAC5E,MAAM,QAAQ,GAAG,MAAiC,CAAC;QACnD,OAAO;YACN,MAAM,EAAE,QAAQ,CAAC,eAAe;YAChC,YAAY,EAAE,QAAQ,CAAC,qBAAqB;SAC5C,CAAC;IAAA,CACF,CAAC,CAAC;IACH,OAAO,8BAA8B,CAAC;AAAA,CACtC;AAED,SAAS,sCAAsC,GAE7C;IACD,yCAAyC,KAAK,MAAM,CAAC,6BAA6B,CAAC,CAAC,IAAI,CAAC,CAAC,MAAM,EAAE,EAAE,CAAC;QACpG,MAAM,QAAQ,GAAG,MAA4C,CAAC;QAC9D,OAAO;YACN,MAAM,EAAE,QAAQ,CAAC,0BAA0B;YAC3C,YAAY,EAAE,QAAQ,CAAC,gCAAgC;SACvD,CAAC;IAAA,CACF,CAAC,CAAC;IACH,OAAO,yCAAyC,CAAC;AAAA,CACjD;AAED,SAAS,wBAAwB,GAE/B;IACD,2BAA2B,KAAK,MAAM,CAAC,aAAa,CAAC,CAAC,IAAI,CAAC,CAAC,MAAM,EAAE,EAAE,CAAC;QACtE,MAAM,QAAQ,GAAG,MAA8B,CAAC;QAChD,OAAO;YACN,MAAM,EAAE,QAAQ,CAAC,YAAY;YAC7B,YAAY,EAAE,QAAQ,CAAC,kBAAkB;SACzC,CAAC;IAAA,CACF,CAAC,CAAC;IACH,OAAO,2BAA2B,CAAC;AAAA,CACnC;AAED,SAAS,iCAAiC,GAExC;IACD,oCAAoC,KAAK,MAAM,CAAC,wBAAwB,CAAC,CAAC,IAAI,CAAC,CAAC,MAAM,EAAE,EAAE,CAAC;QAC1F,MAAM,QAAQ,GAAG,MAAuC,CAAC;QACzD,OAAO;YACN,MAAM,EAAE,QAAQ,CAAC,qBAAqB;YACtC,YAAY,EAAE,QAAQ,CAAC,2BAA2B;SAClD,CAAC;IAAA,CACF,CAAC,CAAC;IACH,OAAO,oCAAoC,CAAC;AAAA,CAC5C;AAED,SAAS,8BAA8B,GAErC;IACD,iCAAiC,KAAK,MAAM,CAAC,oBAAoB,CAAC,CAAC,IAAI,CAAC,CAAC,MAAM,EAAE,EAAE,CAAC;QACnF,MAAM,QAAQ,GAAG,MAAoC,CAAC;QACtD,OAAO;YACN,MAAM,EAAE,QAAQ,CAAC,kBAAkB;YACnC,YAAY,EAAE,QAAQ,CAAC,wBAAwB;SAC/C,CAAC;IAAA,CACF,CAAC,CAAC;IACH,OAAO,iCAAiC,CAAC;AAAA,CACzC;AAED,SAAS,yBAAyB,GAEhC;IACD,4BAA4B,KAAK,MAAM,CAAC,cAAc,CAAC,CAAC,IAAI,CAAC,CAAC,MAAM,EAAE,EAAE,CAAC;QACxE,MAAM,QAAQ,GAAG,MAA+B,CAAC;QACjD,OAAO;YACN,MAAM,EAAE,QAAQ,CAAC,aAAa;YAC9B,YAAY,EAAE,QAAQ,CAAC,mBAAmB;SAC1C,CAAC;IAAA,CACF,CAAC,CAAC;IACH,OAAO,4BAA4B,CAAC;AAAA,CACpC;AAED,SAAS,sCAAsC,GAE7C;IACD,yCAAyC,KAAK,MAAM,CAAC,6BAA6B,CAAC,CAAC,IAAI,CAAC,CAAC,MAAM,EAAE,EAAE,CAAC;QACpG,MAAM,QAAQ,GAAG,MAA4C,CAAC;QAC9D,OAAO;YACN,MAAM,EAAE,QAAQ,CAAC,0BAA0B;YAC3C,YAAY,EAAE,QAAQ,CAAC,gCAAgC;SACvD,CAAC;IAAA,CACF,CAAC,CAAC;IACH,OAAO,yCAAyC,CAAC;AAAA,CACjD;AAED,SAAS,mCAAmC,GAE1C;IACD,sCAAsC,KAAK,MAAM,CAAC,yBAAyB,CAAC,CAAC,IAAI,CAAC,CAAC,MAAM,EAAE,EAAE,CAAC;QAC7F,MAAM,QAAQ,GAAG,MAAyC,CAAC;QAC3D,OAAO;YACN,MAAM,EAAE,QAAQ,CAAC,uBAAuB;YACxC,YAAY,EAAE,QAAQ,CAAC,6BAA6B;SACpD,CAAC;IAAA,CACF,CAAC,CAAC;IACH,OAAO,sCAAsC,CAAC;AAAA,CAC9C;AAED,SAAS,iCAAiC,GAExC;IACD,oCAAoC,KAAK,MAAM,CAAC,uBAAuB,CAAC,CAAC,IAAI,CAAC,CAAC,MAAM,EAAE,EAAE,CAAC;QACzF,MAAM,QAAQ,GAAG,MAAuC,CAAC;QACzD,OAAO;YACN,MAAM,EAAE,QAAQ,CAAC,qBAAqB;YACtC,YAAY,EAAE,QAAQ,CAAC,2BAA2B;SAClD,CAAC;IAAA,CACF,CAAC,CAAC;IACH,OAAO,oCAAoC,CAAC;AAAA,CAC5C;AAED,SAAS,yBAAyB,GAEhC;IACD,IAAI,6BAA6B,EAAE,CAAC;QACnC,OAAO,OAAO,CAAC,OAAO,CAAC,6BAA6B,CAAC,CAAC;IACvD,CAAC;IACD,4BAA4B,KAAK,sBAAsB,CAAC,qBAAqB,CAAC,CAAC,IAAI,CAAC,CAAC,MAAM,EAAE,EAAE,CAAC;QAC/F,MAAM,QAAQ,GAAG,MAA+B,CAAC;QACjD,OAAO;YACN,MAAM,EAAE,QAAQ,CAAC,aAAa;YAC9B,YAAY,EAAE,QAAQ,CAAC,mBAAmB;SAC1C,CAAC;IAAA,CACF,CAAC,CAAC;IACH,OAAO,4BAA4B,CAAC;AAAA,CACpC;AAED,MAAM,CAAC,MAAM,eAAe,GAAG,gBAAgB,CAAC,2BAA2B,CAAC,CAAC;AAC7E,MAAM,CAAC,MAAM,qBAAqB,GAAG,sBAAsB,CAAC,2BAA2B,CAAC,CAAC;AACzF,MAAM,CAAC,MAAM,0BAA0B,GAAG,gBAAgB,CAAC,sCAAsC,CAAC,CAAC;AACnG,MAAM,CAAC,MAAM,gCAAgC,GAAG,sBAAsB,CAAC,sCAAsC,CAAC,CAAC;AAC/G,MAAM,CAAC,MAAM,YAAY,GAAG,gBAAgB,CAAC,wBAAwB,CAAC,CAAC;AACvE,MAAM,CAAC,MAAM,kBAAkB,GAAG,sBAAsB,CAAC,wBAAwB,CAAC,CAAC;AACnF,MAAM,CAAC,MAAM,qBAAqB,GAAG,gBAAgB,CAAC,iCAAiC,CAAC,CAAC;AACzF,MAAM,CAAC,MAAM,2BAA2B,GAAG,sBAAsB,CAAC,iCAAiC,CAAC,CAAC;AACrG,MAAM,CAAC,MAAM,kBAAkB,GAAG,gBAAgB,CAAC,8BAA8B,CAAC,CAAC;AACnF,MAAM,CAAC,MAAM,wBAAwB,GAAG,sBAAsB,CAAC,8BAA8B,CAAC,CAAC;AAC/F,MAAM,CAAC,MAAM,aAAa,GAAG,gBAAgB,CAAC,yBAAyB,CAAC,CAAC;AACzE,MAAM,CAAC,MAAM,mBAAmB,GAAG,sBAAsB,CAAC,yBAAyB,CAAC,CAAC;AACrF,MAAM,CAAC,MAAM,0BAA0B,GAAG,gBAAgB,CAAC,sCAAsC,CAAC,CAAC;AACnG,MAAM,CAAC,MAAM,gCAAgC,GAAG,sBAAsB,CAAC,sCAAsC,CAAC,CAAC;AAC/G,MAAM,CAAC,MAAM,uBAAuB,GAAG,gBAAgB,CAAC,mCAAmC,CAAC,CAAC;AAC7F,MAAM,CAAC,MAAM,6BAA6B,GAAG,sBAAsB,CAAC,mCAAmC,CAAC,CAAC;AACzG,MAAM,CAAC,MAAM,qBAAqB,GAAG,gBAAgB,CAAC,iCAAiC,CAAC,CAAC;AACzF,MAAM,CAAC,MAAM,2BAA2B,GAAG,sBAAsB,CAAC,iCAAiC,CAAC,CAAC;AACrG,MAAM,iBAAiB,GAAG,gBAAgB,CAAC,yBAAyB,CAAC,CAAC;AACtE,MAAM,uBAAuB,GAAG,sBAAsB,CAAC,yBAAyB,CAAC,CAAC;AAElF,MAAM,UAAU,2BAA2B,GAAS;IACnD,mBAAmB,CAAC;QACnB,GAAG,EAAE,oBAAoB;QACzB,MAAM,EAAE,eAAe;QACvB,YAAY,EAAE,qBAAqB;KACnC,CAAC,CAAC;IAEH,mBAAmB,CAAC;QACnB,GAAG,EAAE,oBAAoB;QACzB,MAAM,EAAE,uBAAuB;QAC/B,YAAY,EAAE,6BAA6B;KAC3C,CAAC,CAAC;IAEH,mBAAmB,CAAC;QACnB,GAAG,EAAE,uBAAuB;QAC5B,MAAM,EAAE,aAAa;QACrB,YAAY,EAAE,mBAAmB;KACjC,CAAC,CAAC;IAEH,mBAAmB,CAAC;QACnB,GAAG,EAAE,kBAAkB;QACvB,MAAM,EAAE,qBAAqB;QAC7B,YAAY,EAAE,2BAA2B;KACzC,CAAC,CAAC;IAEH,mBAAmB,CAAC;QACnB,GAAG,EAAE,wBAAwB;QAC7B,MAAM,EAAE,0BAA0B;QAClC,YAAY,EAAE,gCAAgC;KAC9C,CAAC,CAAC;IAEH,mBAAmB,CAAC;QACnB,GAAG,EAAE,wBAAwB;QAC7B,MAAM,EAAE,0BAA0B;QAClC,YAAY,EAAE,gCAAgC;KAC9C,CAAC,CAAC;IAEH,mBAAmB,CAAC;QACnB,GAAG,EAAE,sBAAsB;QAC3B,MAAM,EAAE,YAAY;QACpB,YAAY,EAAE,kBAAkB;KAChC,CAAC,CAAC;IAEH,mBAAmB,CAAC;QACnB,GAAG,EAAE,mBAAmB;QACxB,MAAM,EAAE,qBAAqB;QAC7B,YAAY,EAAE,2BAA2B;KACzC,CAAC,CAAC;IAEH,mBAAmB,CAAC;QACnB,GAAG,EAAE,eAAe;QACpB,MAAM,EAAE,kBAAkB;QAC1B,YAAY,EAAE,wBAAwB;KACtC,CAAC,CAAC;IAEH,mBAAmB,CAAC;QACnB,GAAG,EAAE,yBAAyB;QAC9B,MAAM,EAAE,iBAAiB;QACzB,YAAY,EAAE,uBAAuB;KACrC,CAAC,CAAC;AAAA,CACH;AAED,MAAM,UAAU,iBAAiB,GAAS;IACzC,iBAAiB,EAAE,CAAC;IACpB,2BAA2B,EAAE,CAAC;AAAA,CAC9B;AAED,2BAA2B,EAAE,CAAC","sourcesContent":["import { clearApiProviders, registerApiProvider } from \"../api-registry.js\";\nimport type {\n\tApi,\n\tAssistantMessage,\n\tAssistantMessageEvent,\n\tContext,\n\tModel,\n\tSimpleStreamOptions,\n\tStreamFunction,\n\tStreamOptions,\n} from \"../types.js\";\nimport { AssistantMessageEventStream } from \"../utils/event-stream.js\";\nimport type { BedrockOptions } from \"./amazon-bedrock.js\";\nimport type { AnthropicOptions } from \"./anthropic.js\";\nimport type { AzureOpenAIResponsesOptions } from \"./azure-openai-responses.js\";\nimport type { GoogleOptions } from \"./google.js\";\nimport type { GoogleGeminiCliOptions } from \"./google-gemini-cli.js\";\nimport type { GoogleVertexOptions } from \"./google-vertex.js\";\nimport type { MistralOptions } from \"./mistral.js\";\nimport type { OpenAICodexResponsesOptions } from \"./openai-codex-responses.js\";\nimport type { OpenAICompletionsOptions } from \"./openai-completions.js\";\nimport type { OpenAIResponsesOptions } from \"./openai-responses.js\";\n\ninterface LazyProviderModule<\n\tTApi extends Api,\n\tTOptions extends StreamOptions,\n\tTSimpleOptions extends SimpleStreamOptions,\n> {\n\tstream: (model: Model<TApi>, context: Context, options?: TOptions) => AsyncIterable<AssistantMessageEvent>;\n\tstreamSimple: (\n\t\tmodel: Model<TApi>,\n\t\tcontext: Context,\n\t\toptions?: TSimpleOptions,\n\t) => AsyncIterable<AssistantMessageEvent>;\n}\n\ninterface AnthropicProviderModule {\n\tstreamAnthropic: StreamFunction<\"anthropic-messages\", AnthropicOptions>;\n\tstreamSimpleAnthropic: StreamFunction<\"anthropic-messages\", SimpleStreamOptions>;\n}\n\ninterface AzureOpenAIResponsesProviderModule {\n\tstreamAzureOpenAIResponses: StreamFunction<\"azure-openai-responses\", AzureOpenAIResponsesOptions>;\n\tstreamSimpleAzureOpenAIResponses: StreamFunction<\"azure-openai-responses\", SimpleStreamOptions>;\n}\n\ninterface GoogleProviderModule {\n\tstreamGoogle: StreamFunction<\"google-generative-ai\", GoogleOptions>;\n\tstreamSimpleGoogle: StreamFunction<\"google-generative-ai\", SimpleStreamOptions>;\n}\n\ninterface GoogleGeminiCliProviderModule {\n\tstreamGoogleGeminiCli: StreamFunction<\"google-gemini-cli\", GoogleGeminiCliOptions>;\n\tstreamSimpleGoogleGeminiCli: StreamFunction<\"google-gemini-cli\", SimpleStreamOptions>;\n}\n\ninterface GoogleVertexProviderModule {\n\tstreamGoogleVertex: StreamFunction<\"google-vertex\", GoogleVertexOptions>;\n\tstreamSimpleGoogleVertex: StreamFunction<\"google-vertex\", SimpleStreamOptions>;\n}\n\ninterface MistralProviderModule {\n\tstreamMistral: StreamFunction<\"mistral-conversations\", MistralOptions>;\n\tstreamSimpleMistral: StreamFunction<\"mistral-conversations\", SimpleStreamOptions>;\n}\n\ninterface OpenAICodexResponsesProviderModule {\n\tstreamOpenAICodexResponses: StreamFunction<\"openai-codex-responses\", OpenAICodexResponsesOptions>;\n\tstreamSimpleOpenAICodexResponses: StreamFunction<\"openai-codex-responses\", SimpleStreamOptions>;\n}\n\ninterface OpenAICompletionsProviderModule {\n\tstreamOpenAICompletions: StreamFunction<\"openai-completions\", OpenAICompletionsOptions>;\n\tstreamSimpleOpenAICompletions: StreamFunction<\"openai-completions\", SimpleStreamOptions>;\n}\n\ninterface OpenAIResponsesProviderModule {\n\tstreamOpenAIResponses: StreamFunction<\"openai-responses\", OpenAIResponsesOptions>;\n\tstreamSimpleOpenAIResponses: StreamFunction<\"openai-responses\", SimpleStreamOptions>;\n}\n\ninterface BedrockProviderModule {\n\tstreamBedrock: (\n\t\tmodel: Model<\"bedrock-converse-stream\">,\n\t\tcontext: Context,\n\t\toptions?: BedrockOptions,\n\t) => AsyncIterable<AssistantMessageEvent>;\n\tstreamSimpleBedrock: (\n\t\tmodel: Model<\"bedrock-converse-stream\">,\n\t\tcontext: Context,\n\t\toptions?: SimpleStreamOptions,\n\t) => AsyncIterable<AssistantMessageEvent>;\n}\n\nconst importNodeOnlyProvider = (specifier: string): Promise<unknown> => import(specifier);\n\nlet anthropicProviderModulePromise:\n\t| Promise<LazyProviderModule<\"anthropic-messages\", AnthropicOptions, SimpleStreamOptions>>\n\t| undefined;\nlet azureOpenAIResponsesProviderModulePromise:\n\t| Promise<LazyProviderModule<\"azure-openai-responses\", AzureOpenAIResponsesOptions, SimpleStreamOptions>>\n\t| undefined;\nlet googleProviderModulePromise:\n\t| Promise<LazyProviderModule<\"google-generative-ai\", GoogleOptions, SimpleStreamOptions>>\n\t| undefined;\nlet googleGeminiCliProviderModulePromise:\n\t| Promise<LazyProviderModule<\"google-gemini-cli\", GoogleGeminiCliOptions, SimpleStreamOptions>>\n\t| undefined;\nlet googleVertexProviderModulePromise:\n\t| Promise<LazyProviderModule<\"google-vertex\", GoogleVertexOptions, SimpleStreamOptions>>\n\t| undefined;\nlet mistralProviderModulePromise:\n\t| Promise<LazyProviderModule<\"mistral-conversations\", MistralOptions, SimpleStreamOptions>>\n\t| undefined;\nlet openAICodexResponsesProviderModulePromise:\n\t| Promise<LazyProviderModule<\"openai-codex-responses\", OpenAICodexResponsesOptions, SimpleStreamOptions>>\n\t| undefined;\nlet openAICompletionsProviderModulePromise:\n\t| Promise<LazyProviderModule<\"openai-completions\", OpenAICompletionsOptions, SimpleStreamOptions>>\n\t| undefined;\nlet openAIResponsesProviderModulePromise:\n\t| Promise<LazyProviderModule<\"openai-responses\", OpenAIResponsesOptions, SimpleStreamOptions>>\n\t| undefined;\nlet bedrockProviderModuleOverride:\n\t| LazyProviderModule<\"bedrock-converse-stream\", BedrockOptions, SimpleStreamOptions>\n\t| undefined;\nlet bedrockProviderModulePromise:\n\t| Promise<LazyProviderModule<\"bedrock-converse-stream\", BedrockOptions, SimpleStreamOptions>>\n\t| undefined;\n\nexport function setBedrockProviderModule(module: BedrockProviderModule): void {\n\tbedrockProviderModuleOverride = {\n\t\tstream: module.streamBedrock,\n\t\tstreamSimple: module.streamSimpleBedrock,\n\t};\n}\n\nfunction forwardStream(target: AssistantMessageEventStream, source: AsyncIterable<AssistantMessageEvent>): void {\n\t(async () => {\n\t\tfor await (const event of source) {\n\t\t\ttarget.push(event);\n\t\t}\n\t\ttarget.end();\n\t})();\n}\n\nfunction createLazyLoadErrorMessage<TApi extends Api>(model: Model<TApi>, error: unknown): AssistantMessage {\n\treturn {\n\t\trole: \"assistant\",\n\t\tcontent: [],\n\t\tapi: model.api,\n\t\tprovider: model.provider,\n\t\tmodel: model.id,\n\t\tusage: {\n\t\t\tinput: 0,\n\t\t\toutput: 0,\n\t\t\tcacheRead: 0,\n\t\t\tcacheWrite: 0,\n\t\t\ttotalTokens: 0,\n\t\t\tcost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },\n\t\t},\n\t\tstopReason: \"error\",\n\t\terrorMessage: error instanceof Error ? error.message : String(error),\n\t\ttimestamp: Date.now(),\n\t};\n}\n\nfunction createLazyStream<TApi extends Api, TOptions extends StreamOptions, TSimpleOptions extends SimpleStreamOptions>(\n\tloadModule: () => Promise<LazyProviderModule<TApi, TOptions, TSimpleOptions>>,\n): StreamFunction<TApi, TOptions> {\n\treturn (model, context, options) => {\n\t\tconst outer = new AssistantMessageEventStream();\n\n\t\tloadModule()\n\t\t\t.then((module) => {\n\t\t\t\tconst inner = module.stream(model, context, options);\n\t\t\t\tforwardStream(outer, inner);\n\t\t\t})\n\t\t\t.catch((error) => {\n\t\t\t\tconst message = createLazyLoadErrorMessage(model, error);\n\t\t\t\touter.push({ type: \"error\", reason: \"error\", error: message });\n\t\t\t\touter.end(message);\n\t\t\t});\n\n\t\treturn outer;\n\t};\n}\n\nfunction createLazySimpleStream<\n\tTApi extends Api,\n\tTOptions extends StreamOptions,\n\tTSimpleOptions extends SimpleStreamOptions,\n>(loadModule: () => Promise<LazyProviderModule<TApi, TOptions, TSimpleOptions>>): StreamFunction<TApi, TSimpleOptions> {\n\treturn (model, context, options) => {\n\t\tconst outer = new AssistantMessageEventStream();\n\n\t\tloadModule()\n\t\t\t.then((module) => {\n\t\t\t\tconst inner = module.streamSimple(model, context, options);\n\t\t\t\tforwardStream(outer, inner);\n\t\t\t})\n\t\t\t.catch((error) => {\n\t\t\t\tconst message = createLazyLoadErrorMessage(model, error);\n\t\t\t\touter.push({ type: \"error\", reason: \"error\", error: message });\n\t\t\t\touter.end(message);\n\t\t\t});\n\n\t\treturn outer;\n\t};\n}\n\nfunction loadAnthropicProviderModule(): Promise<\n\tLazyProviderModule<\"anthropic-messages\", AnthropicOptions, SimpleStreamOptions>\n> {\n\tanthropicProviderModulePromise ||= import(\"./anthropic.js\").then((module) => {\n\t\tconst provider = module as AnthropicProviderModule;\n\t\treturn {\n\t\t\tstream: provider.streamAnthropic,\n\t\t\tstreamSimple: provider.streamSimpleAnthropic,\n\t\t};\n\t});\n\treturn anthropicProviderModulePromise;\n}\n\nfunction loadAzureOpenAIResponsesProviderModule(): Promise<\n\tLazyProviderModule<\"azure-openai-responses\", AzureOpenAIResponsesOptions, SimpleStreamOptions>\n> {\n\tazureOpenAIResponsesProviderModulePromise ||= import(\"./azure-openai-responses.js\").then((module) => {\n\t\tconst provider = module as AzureOpenAIResponsesProviderModule;\n\t\treturn {\n\t\t\tstream: provider.streamAzureOpenAIResponses,\n\t\t\tstreamSimple: provider.streamSimpleAzureOpenAIResponses,\n\t\t};\n\t});\n\treturn azureOpenAIResponsesProviderModulePromise;\n}\n\nfunction loadGoogleProviderModule(): Promise<\n\tLazyProviderModule<\"google-generative-ai\", GoogleOptions, SimpleStreamOptions>\n> {\n\tgoogleProviderModulePromise ||= import(\"./google.js\").then((module) => {\n\t\tconst provider = module as GoogleProviderModule;\n\t\treturn {\n\t\t\tstream: provider.streamGoogle,\n\t\t\tstreamSimple: provider.streamSimpleGoogle,\n\t\t};\n\t});\n\treturn googleProviderModulePromise;\n}\n\nfunction loadGoogleGeminiCliProviderModule(): Promise<\n\tLazyProviderModule<\"google-gemini-cli\", GoogleGeminiCliOptions, SimpleStreamOptions>\n> {\n\tgoogleGeminiCliProviderModulePromise ||= import(\"./google-gemini-cli.js\").then((module) => {\n\t\tconst provider = module as GoogleGeminiCliProviderModule;\n\t\treturn {\n\t\t\tstream: provider.streamGoogleGeminiCli,\n\t\t\tstreamSimple: provider.streamSimpleGoogleGeminiCli,\n\t\t};\n\t});\n\treturn googleGeminiCliProviderModulePromise;\n}\n\nfunction loadGoogleVertexProviderModule(): Promise<\n\tLazyProviderModule<\"google-vertex\", GoogleVertexOptions, SimpleStreamOptions>\n> {\n\tgoogleVertexProviderModulePromise ||= import(\"./google-vertex.js\").then((module) => {\n\t\tconst provider = module as GoogleVertexProviderModule;\n\t\treturn {\n\t\t\tstream: provider.streamGoogleVertex,\n\t\t\tstreamSimple: provider.streamSimpleGoogleVertex,\n\t\t};\n\t});\n\treturn googleVertexProviderModulePromise;\n}\n\nfunction loadMistralProviderModule(): Promise<\n\tLazyProviderModule<\"mistral-conversations\", MistralOptions, SimpleStreamOptions>\n> {\n\tmistralProviderModulePromise ||= import(\"./mistral.js\").then((module) => {\n\t\tconst provider = module as MistralProviderModule;\n\t\treturn {\n\t\t\tstream: provider.streamMistral,\n\t\t\tstreamSimple: provider.streamSimpleMistral,\n\t\t};\n\t});\n\treturn mistralProviderModulePromise;\n}\n\nfunction loadOpenAICodexResponsesProviderModule(): Promise<\n\tLazyProviderModule<\"openai-codex-responses\", OpenAICodexResponsesOptions, SimpleStreamOptions>\n> {\n\topenAICodexResponsesProviderModulePromise ||= import(\"./openai-codex-responses.js\").then((module) => {\n\t\tconst provider = module as OpenAICodexResponsesProviderModule;\n\t\treturn {\n\t\t\tstream: provider.streamOpenAICodexResponses,\n\t\t\tstreamSimple: provider.streamSimpleOpenAICodexResponses,\n\t\t};\n\t});\n\treturn openAICodexResponsesProviderModulePromise;\n}\n\nfunction loadOpenAICompletionsProviderModule(): Promise<\n\tLazyProviderModule<\"openai-completions\", OpenAICompletionsOptions, SimpleStreamOptions>\n> {\n\topenAICompletionsProviderModulePromise ||= import(\"./openai-completions.js\").then((module) => {\n\t\tconst provider = module as OpenAICompletionsProviderModule;\n\t\treturn {\n\t\t\tstream: provider.streamOpenAICompletions,\n\t\t\tstreamSimple: provider.streamSimpleOpenAICompletions,\n\t\t};\n\t});\n\treturn openAICompletionsProviderModulePromise;\n}\n\nfunction loadOpenAIResponsesProviderModule(): Promise<\n\tLazyProviderModule<\"openai-responses\", OpenAIResponsesOptions, SimpleStreamOptions>\n> {\n\topenAIResponsesProviderModulePromise ||= import(\"./openai-responses.js\").then((module) => {\n\t\tconst provider = module as OpenAIResponsesProviderModule;\n\t\treturn {\n\t\t\tstream: provider.streamOpenAIResponses,\n\t\t\tstreamSimple: provider.streamSimpleOpenAIResponses,\n\t\t};\n\t});\n\treturn openAIResponsesProviderModulePromise;\n}\n\nfunction loadBedrockProviderModule(): Promise<\n\tLazyProviderModule<\"bedrock-converse-stream\", BedrockOptions, SimpleStreamOptions>\n> {\n\tif (bedrockProviderModuleOverride) {\n\t\treturn Promise.resolve(bedrockProviderModuleOverride);\n\t}\n\tbedrockProviderModulePromise ||= importNodeOnlyProvider(\"./amazon-bedrock.js\").then((module) => {\n\t\tconst provider = module as BedrockProviderModule;\n\t\treturn {\n\t\t\tstream: provider.streamBedrock,\n\t\t\tstreamSimple: provider.streamSimpleBedrock,\n\t\t};\n\t});\n\treturn bedrockProviderModulePromise;\n}\n\nexport const streamAnthropic = createLazyStream(loadAnthropicProviderModule);\nexport const streamSimpleAnthropic = createLazySimpleStream(loadAnthropicProviderModule);\nexport const streamAzureOpenAIResponses = createLazyStream(loadAzureOpenAIResponsesProviderModule);\nexport const streamSimpleAzureOpenAIResponses = createLazySimpleStream(loadAzureOpenAIResponsesProviderModule);\nexport const streamGoogle = createLazyStream(loadGoogleProviderModule);\nexport const streamSimpleGoogle = createLazySimpleStream(loadGoogleProviderModule);\nexport const streamGoogleGeminiCli = createLazyStream(loadGoogleGeminiCliProviderModule);\nexport const streamSimpleGoogleGeminiCli = createLazySimpleStream(loadGoogleGeminiCliProviderModule);\nexport const streamGoogleVertex = createLazyStream(loadGoogleVertexProviderModule);\nexport const streamSimpleGoogleVertex = createLazySimpleStream(loadGoogleVertexProviderModule);\nexport const streamMistral = createLazyStream(loadMistralProviderModule);\nexport const streamSimpleMistral = createLazySimpleStream(loadMistralProviderModule);\nexport const streamOpenAICodexResponses = createLazyStream(loadOpenAICodexResponsesProviderModule);\nexport const streamSimpleOpenAICodexResponses = createLazySimpleStream(loadOpenAICodexResponsesProviderModule);\nexport const streamOpenAICompletions = createLazyStream(loadOpenAICompletionsProviderModule);\nexport const streamSimpleOpenAICompletions = createLazySimpleStream(loadOpenAICompletionsProviderModule);\nexport const streamOpenAIResponses = createLazyStream(loadOpenAIResponsesProviderModule);\nexport const streamSimpleOpenAIResponses = createLazySimpleStream(loadOpenAIResponsesProviderModule);\nconst streamBedrockLazy = createLazyStream(loadBedrockProviderModule);\nconst streamSimpleBedrockLazy = createLazySimpleStream(loadBedrockProviderModule);\n\nexport function registerBuiltInApiProviders(): void {\n\tregisterApiProvider({\n\t\tapi: \"anthropic-messages\",\n\t\tstream: streamAnthropic,\n\t\tstreamSimple: streamSimpleAnthropic,\n\t});\n\n\tregisterApiProvider({\n\t\tapi: \"openai-completions\",\n\t\tstream: streamOpenAICompletions,\n\t\tstreamSimple: streamSimpleOpenAICompletions,\n\t});\n\n\tregisterApiProvider({\n\t\tapi: \"mistral-conversations\",\n\t\tstream: streamMistral,\n\t\tstreamSimple: streamSimpleMistral,\n\t});\n\n\tregisterApiProvider({\n\t\tapi: \"openai-responses\",\n\t\tstream: streamOpenAIResponses,\n\t\tstreamSimple: streamSimpleOpenAIResponses,\n\t});\n\n\tregisterApiProvider({\n\t\tapi: \"azure-openai-responses\",\n\t\tstream: streamAzureOpenAIResponses,\n\t\tstreamSimple: streamSimpleAzureOpenAIResponses,\n\t});\n\n\tregisterApiProvider({\n\t\tapi: \"openai-codex-responses\",\n\t\tstream: streamOpenAICodexResponses,\n\t\tstreamSimple: streamSimpleOpenAICodexResponses,\n\t});\n\n\tregisterApiProvider({\n\t\tapi: \"google-generative-ai\",\n\t\tstream: streamGoogle,\n\t\tstreamSimple: streamSimpleGoogle,\n\t});\n\n\tregisterApiProvider({\n\t\tapi: \"google-gemini-cli\",\n\t\tstream: streamGoogleGeminiCli,\n\t\tstreamSimple: streamSimpleGoogleGeminiCli,\n\t});\n\n\tregisterApiProvider({\n\t\tapi: \"google-vertex\",\n\t\tstream: streamGoogleVertex,\n\t\tstreamSimple: streamSimpleGoogleVertex,\n\t});\n\n\tregisterApiProvider({\n\t\tapi: \"bedrock-converse-stream\",\n\t\tstream: streamBedrockLazy,\n\t\tstreamSimple: streamSimpleBedrockLazy,\n\t});\n}\n\nexport function resetApiProviders(): void {\n\tclearApiProviders();\n\tregisterBuiltInApiProviders();\n}\n\nregisterBuiltInApiProviders();\n"]}
|
|
1
|
+
{"version":3,"file":"register-builtins.js","sourceRoot":"","sources":["../../src/providers/register-builtins.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,iBAAiB,EAAE,mBAAmB,EAAE,MAAM,oBAAoB,CAAC;AAW5E,OAAO,EAAE,2BAA2B,EAAE,MAAM,0BAA0B,CAAC;AA6EvE,MAAM,sBAAsB,GAAG,CAAC,SAAiB,EAAoB,EAAE,CAAC,MAAM,CAAC,SAAS,CAAC,CAAC;AAE1F,IAAI,8BAEQ,CAAC;AACb,IAAI,yCAEQ,CAAC;AACb,IAAI,2BAEQ,CAAC;AACb,IAAI,iCAEQ,CAAC;AACb,IAAI,4BAEQ,CAAC;AACb,IAAI,yCAEQ,CAAC;AACb,IAAI,sCAEQ,CAAC;AACb,IAAI,oCAEQ,CAAC;AACb,IAAI,6BAEQ,CAAC;AACb,IAAI,4BAEQ,CAAC;AAEb,MAAM,UAAU,wBAAwB,CAAC,MAA6B,EAAQ;IAC7E,6BAA6B,GAAG;QAC/B,MAAM,EAAE,MAAM,CAAC,aAAa;QAC5B,YAAY,EAAE,MAAM,CAAC,mBAAmB;KACxC,CAAC;AAAA,CACF;AAED,SAAS,aAAa,CAAC,MAAmC,EAAE,MAA4C,EAAQ;IAC/G,CAAC,KAAK,IAAI,EAAE,CAAC;QACZ,IAAI,KAAK,EAAE,MAAM,KAAK,IAAI,MAAM,EAAE,CAAC;YAClC,MAAM,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC;QACpB,CAAC;QACD,MAAM,CAAC,GAAG,EAAE,CAAC;IAAA,CACb,CAAC,EAAE,CAAC;AAAA,CACL;AAED,SAAS,0BAA0B,CAAmB,KAAkB,EAAE,KAAc,EAAoB;IAC3G,OAAO;QACN,IAAI,EAAE,WAAW;QACjB,OAAO,EAAE,EAAE;QACX,GAAG,EAAE,KAAK,CAAC,GAAG;QACd,QAAQ,EAAE,KAAK,CAAC,QAAQ;QACxB,KAAK,EAAE,KAAK,CAAC,EAAE;QACf,KAAK,EAAE;YACN,KAAK,EAAE,CAAC;YACR,MAAM,EAAE,CAAC;YACT,SAAS,EAAE,CAAC;YACZ,UAAU,EAAE,CAAC;YACb,WAAW,EAAE,CAAC;YACd,IAAI,EAAE,EAAE,KAAK,EAAE,CAAC,EAAE,MAAM,EAAE,CAAC,EAAE,SAAS,EAAE,CAAC,EAAE,UAAU,EAAE,CAAC,EAAE,KAAK,EAAE,CAAC,EAAE;SACpE;QACD,UAAU,EAAE,OAAO;QACnB,YAAY,EAAE,KAAK,YAAY,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC;QACpE,SAAS,EAAE,IAAI,CAAC,GAAG,EAAE;KACrB,CAAC;AAAA,CACF;AAED,SAAS,gBAAgB,CACxB,UAA6E,EAC5C;IACjC,OAAO,CAAC,KAAK,EAAE,OAAO,EAAE,OAAO,EAAE,EAAE,CAAC;QACnC,MAAM,KAAK,GAAG,IAAI,2BAA2B,EAAE,CAAC;QAEhD,UAAU,EAAE;aACV,IAAI,CAAC,CAAC,MAAM,EAAE,EAAE,CAAC;YACjB,MAAM,KAAK,GAAG,MAAM,CAAC,MAAM,CAAC,KAAK,EAAE,OAAO,EAAE,OAAO,CAAC,CAAC;YACrD,aAAa,CAAC,KAAK,EAAE,KAAK,CAAC,CAAC;QAAA,CAC5B,CAAC;aACD,KAAK,CAAC,CAAC,KAAK,EAAE,EAAE,CAAC;YACjB,MAAM,OAAO,GAAG,0BAA0B,CAAC,KAAK,EAAE,KAAK,CAAC,CAAC;YACzD,KAAK,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,OAAO,EAAE,MAAM,EAAE,OAAO,EAAE,KAAK,EAAE,OAAO,EAAE,CAAC,CAAC;YAC/D,KAAK,CAAC,GAAG,CAAC,OAAO,CAAC,CAAC;QAAA,CACnB,CAAC,CAAC;QAEJ,OAAO,KAAK,CAAC;IAAA,CACb,CAAC;AAAA,CACF;AAED,SAAS,sBAAsB,CAI7B,UAA6E,EAAwC;IACtH,OAAO,CAAC,KAAK,EAAE,OAAO,EAAE,OAAO,EAAE,EAAE,CAAC;QACnC,MAAM,KAAK,GAAG,IAAI,2BAA2B,EAAE,CAAC;QAEhD,UAAU,EAAE;aACV,IAAI,CAAC,CAAC,MAAM,EAAE,EAAE,CAAC;YACjB,MAAM,KAAK,GAAG,MAAM,CAAC,YAAY,CAAC,KAAK,EAAE,OAAO,EAAE,OAAO,CAAC,CAAC;YAC3D,aAAa,CAAC,KAAK,EAAE,KAAK,CAAC,CAAC;QAAA,CAC5B,CAAC;aACD,KAAK,CAAC,CAAC,KAAK,EAAE,EAAE,CAAC;YACjB,MAAM,OAAO,GAAG,0BAA0B,CAAC,KAAK,EAAE,KAAK,CAAC,CAAC;YACzD,KAAK,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,OAAO,EAAE,MAAM,EAAE,OAAO,EAAE,KAAK,EAAE,OAAO,EAAE,CAAC,CAAC;YAC/D,KAAK,CAAC,GAAG,CAAC,OAAO,CAAC,CAAC;QAAA,CACnB,CAAC,CAAC;QAEJ,OAAO,KAAK,CAAC;IAAA,CACb,CAAC;AAAA,CACF;AAED,SAAS,2BAA2B,GAElC;IACD,8BAA8B,KAAK,MAAM,CAAC,gBAAgB,CAAC,CAAC,IAAI,CAAC,CAAC,MAAM,EAAE,EAAE,CAAC;QAC5E,MAAM,QAAQ,GAAG,MAAiC,CAAC;QACnD,OAAO;YACN,MAAM,EAAE,QAAQ,CAAC,eAAe;YAChC,YAAY,EAAE,QAAQ,CAAC,qBAAqB;SAC5C,CAAC;IAAA,CACF,CAAC,CAAC;IACH,OAAO,8BAA8B,CAAC;AAAA,CACtC;AAED,SAAS,sCAAsC,GAE7C;IACD,yCAAyC,KAAK,MAAM,CAAC,6BAA6B,CAAC,CAAC,IAAI,CAAC,CAAC,MAAM,EAAE,EAAE,CAAC;QACpG,MAAM,QAAQ,GAAG,MAA4C,CAAC;QAC9D,OAAO;YACN,MAAM,EAAE,QAAQ,CAAC,0BAA0B;YAC3C,YAAY,EAAE,QAAQ,CAAC,gCAAgC;SACvD,CAAC;IAAA,CACF,CAAC,CAAC;IACH,OAAO,yCAAyC,CAAC;AAAA,CACjD;AAED,SAAS,wBAAwB,GAE/B;IACD,2BAA2B,KAAK,MAAM,CAAC,aAAa,CAAC,CAAC,IAAI,CAAC,CAAC,MAAM,EAAE,EAAE,CAAC;QACtE,MAAM,QAAQ,GAAG,MAA8B,CAAC;QAChD,OAAO;YACN,MAAM,EAAE,QAAQ,CAAC,YAAY;YAC7B,YAAY,EAAE,QAAQ,CAAC,kBAAkB;SACzC,CAAC;IAAA,CACF,CAAC,CAAC;IACH,OAAO,2BAA2B,CAAC;AAAA,CACnC;AAED,SAAS,8BAA8B,GAErC;IACD,iCAAiC,KAAK,MAAM,CAAC,oBAAoB,CAAC,CAAC,IAAI,CAAC,CAAC,MAAM,EAAE,EAAE,CAAC;QACnF,MAAM,QAAQ,GAAG,MAAoC,CAAC;QACtD,OAAO;YACN,MAAM,EAAE,QAAQ,CAAC,kBAAkB;YACnC,YAAY,EAAE,QAAQ,CAAC,wBAAwB;SAC/C,CAAC;IAAA,CACF,CAAC,CAAC;IACH,OAAO,iCAAiC,CAAC;AAAA,CACzC;AAED,SAAS,yBAAyB,GAEhC;IACD,4BAA4B,KAAK,MAAM,CAAC,cAAc,CAAC,CAAC,IAAI,CAAC,CAAC,MAAM,EAAE,EAAE,CAAC;QACxE,MAAM,QAAQ,GAAG,MAA+B,CAAC;QACjD,OAAO;YACN,MAAM,EAAE,QAAQ,CAAC,aAAa;YAC9B,YAAY,EAAE,QAAQ,CAAC,mBAAmB;SAC1C,CAAC;IAAA,CACF,CAAC,CAAC;IACH,OAAO,4BAA4B,CAAC;AAAA,CACpC;AAED,SAAS,sCAAsC,GAE7C;IACD,yCAAyC,KAAK,MAAM,CAAC,6BAA6B,CAAC,CAAC,IAAI,CAAC,CAAC,MAAM,EAAE,EAAE,CAAC;QACpG,MAAM,QAAQ,GAAG,MAA4C,CAAC;QAC9D,OAAO;YACN,MAAM,EAAE,QAAQ,CAAC,0BAA0B;YAC3C,YAAY,EAAE,QAAQ,CAAC,gCAAgC;SACvD,CAAC;IAAA,CACF,CAAC,CAAC;IACH,OAAO,yCAAyC,CAAC;AAAA,CACjD;AAED,SAAS,mCAAmC,GAE1C;IACD,sCAAsC,KAAK,MAAM,CAAC,yBAAyB,CAAC,CAAC,IAAI,CAAC,CAAC,MAAM,EAAE,EAAE,CAAC;QAC7F,MAAM,QAAQ,GAAG,MAAyC,CAAC;QAC3D,OAAO;YACN,MAAM,EAAE,QAAQ,CAAC,uBAAuB;YACxC,YAAY,EAAE,QAAQ,CAAC,6BAA6B;SACpD,CAAC;IAAA,CACF,CAAC,CAAC;IACH,OAAO,sCAAsC,CAAC;AAAA,CAC9C;AAED,SAAS,iCAAiC,GAExC;IACD,oCAAoC,KAAK,MAAM,CAAC,uBAAuB,CAAC,CAAC,IAAI,CAAC,CAAC,MAAM,EAAE,EAAE,CAAC;QACzF,MAAM,QAAQ,GAAG,MAAuC,CAAC;QACzD,OAAO;YACN,MAAM,EAAE,QAAQ,CAAC,qBAAqB;YACtC,YAAY,EAAE,QAAQ,CAAC,2BAA2B;SAClD,CAAC;IAAA,CACF,CAAC,CAAC;IACH,OAAO,oCAAoC,CAAC;AAAA,CAC5C;AAED,SAAS,yBAAyB,GAEhC;IACD,IAAI,6BAA6B,EAAE,CAAC;QACnC,OAAO,OAAO,CAAC,OAAO,CAAC,6BAA6B,CAAC,CAAC;IACvD,CAAC;IACD,4BAA4B,KAAK,sBAAsB,CAAC,qBAAqB,CAAC,CAAC,IAAI,CAAC,CAAC,MAAM,EAAE,EAAE,CAAC;QAC/F,MAAM,QAAQ,GAAG,MAA+B,CAAC;QACjD,OAAO;YACN,MAAM,EAAE,QAAQ,CAAC,aAAa;YAC9B,YAAY,EAAE,QAAQ,CAAC,mBAAmB;SAC1C,CAAC;IAAA,CACF,CAAC,CAAC;IACH,OAAO,4BAA4B,CAAC;AAAA,CACpC;AAED,MAAM,CAAC,MAAM,eAAe,GAAG,gBAAgB,CAAC,2BAA2B,CAAC,CAAC;AAC7E,MAAM,CAAC,MAAM,qBAAqB,GAAG,sBAAsB,CAAC,2BAA2B,CAAC,CAAC;AACzF,MAAM,CAAC,MAAM,0BAA0B,GAAG,gBAAgB,CAAC,sCAAsC,CAAC,CAAC;AACnG,MAAM,CAAC,MAAM,gCAAgC,GAAG,sBAAsB,CAAC,sCAAsC,CAAC,CAAC;AAC/G,MAAM,CAAC,MAAM,YAAY,GAAG,gBAAgB,CAAC,wBAAwB,CAAC,CAAC;AACvE,MAAM,CAAC,MAAM,kBAAkB,GAAG,sBAAsB,CAAC,wBAAwB,CAAC,CAAC;AACnF,MAAM,CAAC,MAAM,kBAAkB,GAAG,gBAAgB,CAAC,8BAA8B,CAAC,CAAC;AACnF,MAAM,CAAC,MAAM,wBAAwB,GAAG,sBAAsB,CAAC,8BAA8B,CAAC,CAAC;AAC/F,MAAM,CAAC,MAAM,aAAa,GAAG,gBAAgB,CAAC,yBAAyB,CAAC,CAAC;AACzE,MAAM,CAAC,MAAM,mBAAmB,GAAG,sBAAsB,CAAC,yBAAyB,CAAC,CAAC;AACrF,MAAM,CAAC,MAAM,0BAA0B,GAAG,gBAAgB,CAAC,sCAAsC,CAAC,CAAC;AACnG,MAAM,CAAC,MAAM,gCAAgC,GAAG,sBAAsB,CAAC,sCAAsC,CAAC,CAAC;AAC/G,MAAM,CAAC,MAAM,uBAAuB,GAAG,gBAAgB,CAAC,mCAAmC,CAAC,CAAC;AAC7F,MAAM,CAAC,MAAM,6BAA6B,GAAG,sBAAsB,CAAC,mCAAmC,CAAC,CAAC;AACzG,MAAM,CAAC,MAAM,qBAAqB,GAAG,gBAAgB,CAAC,iCAAiC,CAAC,CAAC;AACzF,MAAM,CAAC,MAAM,2BAA2B,GAAG,sBAAsB,CAAC,iCAAiC,CAAC,CAAC;AACrG,MAAM,iBAAiB,GAAG,gBAAgB,CAAC,yBAAyB,CAAC,CAAC;AACtE,MAAM,uBAAuB,GAAG,sBAAsB,CAAC,yBAAyB,CAAC,CAAC;AAElF,MAAM,UAAU,2BAA2B,GAAS;IACnD,mBAAmB,CAAC;QACnB,GAAG,EAAE,oBAAoB;QACzB,MAAM,EAAE,eAAe;QACvB,YAAY,EAAE,qBAAqB;KACnC,CAAC,CAAC;IAEH,mBAAmB,CAAC;QACnB,GAAG,EAAE,oBAAoB;QACzB,MAAM,EAAE,uBAAuB;QAC/B,YAAY,EAAE,6BAA6B;KAC3C,CAAC,CAAC;IAEH,mBAAmB,CAAC;QACnB,GAAG,EAAE,uBAAuB;QAC5B,MAAM,EAAE,aAAa;QACrB,YAAY,EAAE,mBAAmB;KACjC,CAAC,CAAC;IAEH,mBAAmB,CAAC;QACnB,GAAG,EAAE,kBAAkB;QACvB,MAAM,EAAE,qBAAqB;QAC7B,YAAY,EAAE,2BAA2B;KACzC,CAAC,CAAC;IAEH,mBAAmB,CAAC;QACnB,GAAG,EAAE,wBAAwB;QAC7B,MAAM,EAAE,0BAA0B;QAClC,YAAY,EAAE,gCAAgC;KAC9C,CAAC,CAAC;IAEH,mBAAmB,CAAC;QACnB,GAAG,EAAE,wBAAwB;QAC7B,MAAM,EAAE,0BAA0B;QAClC,YAAY,EAAE,gCAAgC;KAC9C,CAAC,CAAC;IAEH,mBAAmB,CAAC;QACnB,GAAG,EAAE,sBAAsB;QAC3B,MAAM,EAAE,YAAY;QACpB,YAAY,EAAE,kBAAkB;KAChC,CAAC,CAAC;IAEH,mBAAmB,CAAC;QACnB,GAAG,EAAE,eAAe;QACpB,MAAM,EAAE,kBAAkB;QAC1B,YAAY,EAAE,wBAAwB;KACtC,CAAC,CAAC;IAEH,mBAAmB,CAAC;QACnB,GAAG,EAAE,yBAAyB;QAC9B,MAAM,EAAE,iBAAiB;QACzB,YAAY,EAAE,uBAAuB;KACrC,CAAC,CAAC;AAAA,CACH;AAED,MAAM,UAAU,iBAAiB,GAAS;IACzC,iBAAiB,EAAE,CAAC;IACpB,2BAA2B,EAAE,CAAC;AAAA,CAC9B;AAED,2BAA2B,EAAE,CAAC","sourcesContent":["import { clearApiProviders, registerApiProvider } from \"../api-registry.js\";\nimport type {\n\tApi,\n\tAssistantMessage,\n\tAssistantMessageEvent,\n\tContext,\n\tModel,\n\tSimpleStreamOptions,\n\tStreamFunction,\n\tStreamOptions,\n} from \"../types.js\";\nimport { AssistantMessageEventStream } from \"../utils/event-stream.js\";\nimport type { BedrockOptions } from \"./amazon-bedrock.js\";\nimport type { AnthropicOptions } from \"./anthropic.js\";\nimport type { AzureOpenAIResponsesOptions } from \"./azure-openai-responses.js\";\nimport type { GoogleOptions } from \"./google.js\";\nimport type { GoogleVertexOptions } from \"./google-vertex.js\";\nimport type { MistralOptions } from \"./mistral.js\";\nimport type { OpenAICodexResponsesOptions } from \"./openai-codex-responses.js\";\nimport type { OpenAICompletionsOptions } from \"./openai-completions.js\";\nimport type { OpenAIResponsesOptions } from \"./openai-responses.js\";\n\ninterface LazyProviderModule<\n\tTApi extends Api,\n\tTOptions extends StreamOptions,\n\tTSimpleOptions extends SimpleStreamOptions,\n> {\n\tstream: (model: Model<TApi>, context: Context, options?: TOptions) => AsyncIterable<AssistantMessageEvent>;\n\tstreamSimple: (\n\t\tmodel: Model<TApi>,\n\t\tcontext: Context,\n\t\toptions?: TSimpleOptions,\n\t) => AsyncIterable<AssistantMessageEvent>;\n}\n\ninterface AnthropicProviderModule {\n\tstreamAnthropic: StreamFunction<\"anthropic-messages\", AnthropicOptions>;\n\tstreamSimpleAnthropic: StreamFunction<\"anthropic-messages\", SimpleStreamOptions>;\n}\n\ninterface AzureOpenAIResponsesProviderModule {\n\tstreamAzureOpenAIResponses: StreamFunction<\"azure-openai-responses\", AzureOpenAIResponsesOptions>;\n\tstreamSimpleAzureOpenAIResponses: StreamFunction<\"azure-openai-responses\", SimpleStreamOptions>;\n}\n\ninterface GoogleProviderModule {\n\tstreamGoogle: StreamFunction<\"google-generative-ai\", GoogleOptions>;\n\tstreamSimpleGoogle: StreamFunction<\"google-generative-ai\", SimpleStreamOptions>;\n}\n\ninterface GoogleVertexProviderModule {\n\tstreamGoogleVertex: StreamFunction<\"google-vertex\", GoogleVertexOptions>;\n\tstreamSimpleGoogleVertex: StreamFunction<\"google-vertex\", SimpleStreamOptions>;\n}\n\ninterface MistralProviderModule {\n\tstreamMistral: StreamFunction<\"mistral-conversations\", MistralOptions>;\n\tstreamSimpleMistral: StreamFunction<\"mistral-conversations\", SimpleStreamOptions>;\n}\n\ninterface OpenAICodexResponsesProviderModule {\n\tstreamOpenAICodexResponses: StreamFunction<\"openai-codex-responses\", OpenAICodexResponsesOptions>;\n\tstreamSimpleOpenAICodexResponses: StreamFunction<\"openai-codex-responses\", SimpleStreamOptions>;\n}\n\ninterface OpenAICompletionsProviderModule {\n\tstreamOpenAICompletions: StreamFunction<\"openai-completions\", OpenAICompletionsOptions>;\n\tstreamSimpleOpenAICompletions: StreamFunction<\"openai-completions\", SimpleStreamOptions>;\n}\n\ninterface OpenAIResponsesProviderModule {\n\tstreamOpenAIResponses: StreamFunction<\"openai-responses\", OpenAIResponsesOptions>;\n\tstreamSimpleOpenAIResponses: StreamFunction<\"openai-responses\", SimpleStreamOptions>;\n}\n\ninterface BedrockProviderModule {\n\tstreamBedrock: (\n\t\tmodel: Model<\"bedrock-converse-stream\">,\n\t\tcontext: Context,\n\t\toptions?: BedrockOptions,\n\t) => AsyncIterable<AssistantMessageEvent>;\n\tstreamSimpleBedrock: (\n\t\tmodel: Model<\"bedrock-converse-stream\">,\n\t\tcontext: Context,\n\t\toptions?: SimpleStreamOptions,\n\t) => AsyncIterable<AssistantMessageEvent>;\n}\n\nconst importNodeOnlyProvider = (specifier: string): Promise<unknown> => import(specifier);\n\nlet anthropicProviderModulePromise:\n\t| Promise<LazyProviderModule<\"anthropic-messages\", AnthropicOptions, SimpleStreamOptions>>\n\t| undefined;\nlet azureOpenAIResponsesProviderModulePromise:\n\t| Promise<LazyProviderModule<\"azure-openai-responses\", AzureOpenAIResponsesOptions, SimpleStreamOptions>>\n\t| undefined;\nlet googleProviderModulePromise:\n\t| Promise<LazyProviderModule<\"google-generative-ai\", GoogleOptions, SimpleStreamOptions>>\n\t| undefined;\nlet googleVertexProviderModulePromise:\n\t| Promise<LazyProviderModule<\"google-vertex\", GoogleVertexOptions, SimpleStreamOptions>>\n\t| undefined;\nlet mistralProviderModulePromise:\n\t| Promise<LazyProviderModule<\"mistral-conversations\", MistralOptions, SimpleStreamOptions>>\n\t| undefined;\nlet openAICodexResponsesProviderModulePromise:\n\t| Promise<LazyProviderModule<\"openai-codex-responses\", OpenAICodexResponsesOptions, SimpleStreamOptions>>\n\t| undefined;\nlet openAICompletionsProviderModulePromise:\n\t| Promise<LazyProviderModule<\"openai-completions\", OpenAICompletionsOptions, SimpleStreamOptions>>\n\t| undefined;\nlet openAIResponsesProviderModulePromise:\n\t| Promise<LazyProviderModule<\"openai-responses\", OpenAIResponsesOptions, SimpleStreamOptions>>\n\t| undefined;\nlet bedrockProviderModuleOverride:\n\t| LazyProviderModule<\"bedrock-converse-stream\", BedrockOptions, SimpleStreamOptions>\n\t| undefined;\nlet bedrockProviderModulePromise:\n\t| Promise<LazyProviderModule<\"bedrock-converse-stream\", BedrockOptions, SimpleStreamOptions>>\n\t| undefined;\n\nexport function setBedrockProviderModule(module: BedrockProviderModule): void {\n\tbedrockProviderModuleOverride = {\n\t\tstream: module.streamBedrock,\n\t\tstreamSimple: module.streamSimpleBedrock,\n\t};\n}\n\nfunction forwardStream(target: AssistantMessageEventStream, source: AsyncIterable<AssistantMessageEvent>): void {\n\t(async () => {\n\t\tfor await (const event of source) {\n\t\t\ttarget.push(event);\n\t\t}\n\t\ttarget.end();\n\t})();\n}\n\nfunction createLazyLoadErrorMessage<TApi extends Api>(model: Model<TApi>, error: unknown): AssistantMessage {\n\treturn {\n\t\trole: \"assistant\",\n\t\tcontent: [],\n\t\tapi: model.api,\n\t\tprovider: model.provider,\n\t\tmodel: model.id,\n\t\tusage: {\n\t\t\tinput: 0,\n\t\t\toutput: 0,\n\t\t\tcacheRead: 0,\n\t\t\tcacheWrite: 0,\n\t\t\ttotalTokens: 0,\n\t\t\tcost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },\n\t\t},\n\t\tstopReason: \"error\",\n\t\terrorMessage: error instanceof Error ? error.message : String(error),\n\t\ttimestamp: Date.now(),\n\t};\n}\n\nfunction createLazyStream<TApi extends Api, TOptions extends StreamOptions, TSimpleOptions extends SimpleStreamOptions>(\n\tloadModule: () => Promise<LazyProviderModule<TApi, TOptions, TSimpleOptions>>,\n): StreamFunction<TApi, TOptions> {\n\treturn (model, context, options) => {\n\t\tconst outer = new AssistantMessageEventStream();\n\n\t\tloadModule()\n\t\t\t.then((module) => {\n\t\t\t\tconst inner = module.stream(model, context, options);\n\t\t\t\tforwardStream(outer, inner);\n\t\t\t})\n\t\t\t.catch((error) => {\n\t\t\t\tconst message = createLazyLoadErrorMessage(model, error);\n\t\t\t\touter.push({ type: \"error\", reason: \"error\", error: message });\n\t\t\t\touter.end(message);\n\t\t\t});\n\n\t\treturn outer;\n\t};\n}\n\nfunction createLazySimpleStream<\n\tTApi extends Api,\n\tTOptions extends StreamOptions,\n\tTSimpleOptions extends SimpleStreamOptions,\n>(loadModule: () => Promise<LazyProviderModule<TApi, TOptions, TSimpleOptions>>): StreamFunction<TApi, TSimpleOptions> {\n\treturn (model, context, options) => {\n\t\tconst outer = new AssistantMessageEventStream();\n\n\t\tloadModule()\n\t\t\t.then((module) => {\n\t\t\t\tconst inner = module.streamSimple(model, context, options);\n\t\t\t\tforwardStream(outer, inner);\n\t\t\t})\n\t\t\t.catch((error) => {\n\t\t\t\tconst message = createLazyLoadErrorMessage(model, error);\n\t\t\t\touter.push({ type: \"error\", reason: \"error\", error: message });\n\t\t\t\touter.end(message);\n\t\t\t});\n\n\t\treturn outer;\n\t};\n}\n\nfunction loadAnthropicProviderModule(): Promise<\n\tLazyProviderModule<\"anthropic-messages\", AnthropicOptions, SimpleStreamOptions>\n> {\n\tanthropicProviderModulePromise ||= import(\"./anthropic.js\").then((module) => {\n\t\tconst provider = module as AnthropicProviderModule;\n\t\treturn {\n\t\t\tstream: provider.streamAnthropic,\n\t\t\tstreamSimple: provider.streamSimpleAnthropic,\n\t\t};\n\t});\n\treturn anthropicProviderModulePromise;\n}\n\nfunction loadAzureOpenAIResponsesProviderModule(): Promise<\n\tLazyProviderModule<\"azure-openai-responses\", AzureOpenAIResponsesOptions, SimpleStreamOptions>\n> {\n\tazureOpenAIResponsesProviderModulePromise ||= import(\"./azure-openai-responses.js\").then((module) => {\n\t\tconst provider = module as AzureOpenAIResponsesProviderModule;\n\t\treturn {\n\t\t\tstream: provider.streamAzureOpenAIResponses,\n\t\t\tstreamSimple: provider.streamSimpleAzureOpenAIResponses,\n\t\t};\n\t});\n\treturn azureOpenAIResponsesProviderModulePromise;\n}\n\nfunction loadGoogleProviderModule(): Promise<\n\tLazyProviderModule<\"google-generative-ai\", GoogleOptions, SimpleStreamOptions>\n> {\n\tgoogleProviderModulePromise ||= import(\"./google.js\").then((module) => {\n\t\tconst provider = module as GoogleProviderModule;\n\t\treturn {\n\t\t\tstream: provider.streamGoogle,\n\t\t\tstreamSimple: provider.streamSimpleGoogle,\n\t\t};\n\t});\n\treturn googleProviderModulePromise;\n}\n\nfunction loadGoogleVertexProviderModule(): Promise<\n\tLazyProviderModule<\"google-vertex\", GoogleVertexOptions, SimpleStreamOptions>\n> {\n\tgoogleVertexProviderModulePromise ||= import(\"./google-vertex.js\").then((module) => {\n\t\tconst provider = module as GoogleVertexProviderModule;\n\t\treturn {\n\t\t\tstream: provider.streamGoogleVertex,\n\t\t\tstreamSimple: provider.streamSimpleGoogleVertex,\n\t\t};\n\t});\n\treturn googleVertexProviderModulePromise;\n}\n\nfunction loadMistralProviderModule(): Promise<\n\tLazyProviderModule<\"mistral-conversations\", MistralOptions, SimpleStreamOptions>\n> {\n\tmistralProviderModulePromise ||= import(\"./mistral.js\").then((module) => {\n\t\tconst provider = module as MistralProviderModule;\n\t\treturn {\n\t\t\tstream: provider.streamMistral,\n\t\t\tstreamSimple: provider.streamSimpleMistral,\n\t\t};\n\t});\n\treturn mistralProviderModulePromise;\n}\n\nfunction loadOpenAICodexResponsesProviderModule(): Promise<\n\tLazyProviderModule<\"openai-codex-responses\", OpenAICodexResponsesOptions, SimpleStreamOptions>\n> {\n\topenAICodexResponsesProviderModulePromise ||= import(\"./openai-codex-responses.js\").then((module) => {\n\t\tconst provider = module as OpenAICodexResponsesProviderModule;\n\t\treturn {\n\t\t\tstream: provider.streamOpenAICodexResponses,\n\t\t\tstreamSimple: provider.streamSimpleOpenAICodexResponses,\n\t\t};\n\t});\n\treturn openAICodexResponsesProviderModulePromise;\n}\n\nfunction loadOpenAICompletionsProviderModule(): Promise<\n\tLazyProviderModule<\"openai-completions\", OpenAICompletionsOptions, SimpleStreamOptions>\n> {\n\topenAICompletionsProviderModulePromise ||= import(\"./openai-completions.js\").then((module) => {\n\t\tconst provider = module as OpenAICompletionsProviderModule;\n\t\treturn {\n\t\t\tstream: provider.streamOpenAICompletions,\n\t\t\tstreamSimple: provider.streamSimpleOpenAICompletions,\n\t\t};\n\t});\n\treturn openAICompletionsProviderModulePromise;\n}\n\nfunction loadOpenAIResponsesProviderModule(): Promise<\n\tLazyProviderModule<\"openai-responses\", OpenAIResponsesOptions, SimpleStreamOptions>\n> {\n\topenAIResponsesProviderModulePromise ||= import(\"./openai-responses.js\").then((module) => {\n\t\tconst provider = module as OpenAIResponsesProviderModule;\n\t\treturn {\n\t\t\tstream: provider.streamOpenAIResponses,\n\t\t\tstreamSimple: provider.streamSimpleOpenAIResponses,\n\t\t};\n\t});\n\treturn openAIResponsesProviderModulePromise;\n}\n\nfunction loadBedrockProviderModule(): Promise<\n\tLazyProviderModule<\"bedrock-converse-stream\", BedrockOptions, SimpleStreamOptions>\n> {\n\tif (bedrockProviderModuleOverride) {\n\t\treturn Promise.resolve(bedrockProviderModuleOverride);\n\t}\n\tbedrockProviderModulePromise ||= importNodeOnlyProvider(\"./amazon-bedrock.js\").then((module) => {\n\t\tconst provider = module as BedrockProviderModule;\n\t\treturn {\n\t\t\tstream: provider.streamBedrock,\n\t\t\tstreamSimple: provider.streamSimpleBedrock,\n\t\t};\n\t});\n\treturn bedrockProviderModulePromise;\n}\n\nexport const streamAnthropic = createLazyStream(loadAnthropicProviderModule);\nexport const streamSimpleAnthropic = createLazySimpleStream(loadAnthropicProviderModule);\nexport const streamAzureOpenAIResponses = createLazyStream(loadAzureOpenAIResponsesProviderModule);\nexport const streamSimpleAzureOpenAIResponses = createLazySimpleStream(loadAzureOpenAIResponsesProviderModule);\nexport const streamGoogle = createLazyStream(loadGoogleProviderModule);\nexport const streamSimpleGoogle = createLazySimpleStream(loadGoogleProviderModule);\nexport const streamGoogleVertex = createLazyStream(loadGoogleVertexProviderModule);\nexport const streamSimpleGoogleVertex = createLazySimpleStream(loadGoogleVertexProviderModule);\nexport const streamMistral = createLazyStream(loadMistralProviderModule);\nexport const streamSimpleMistral = createLazySimpleStream(loadMistralProviderModule);\nexport const streamOpenAICodexResponses = createLazyStream(loadOpenAICodexResponsesProviderModule);\nexport const streamSimpleOpenAICodexResponses = createLazySimpleStream(loadOpenAICodexResponsesProviderModule);\nexport const streamOpenAICompletions = createLazyStream(loadOpenAICompletionsProviderModule);\nexport const streamSimpleOpenAICompletions = createLazySimpleStream(loadOpenAICompletionsProviderModule);\nexport const streamOpenAIResponses = createLazyStream(loadOpenAIResponsesProviderModule);\nexport const streamSimpleOpenAIResponses = createLazySimpleStream(loadOpenAIResponsesProviderModule);\nconst streamBedrockLazy = createLazyStream(loadBedrockProviderModule);\nconst streamSimpleBedrockLazy = createLazySimpleStream(loadBedrockProviderModule);\n\nexport function registerBuiltInApiProviders(): void {\n\tregisterApiProvider({\n\t\tapi: \"anthropic-messages\",\n\t\tstream: streamAnthropic,\n\t\tstreamSimple: streamSimpleAnthropic,\n\t});\n\n\tregisterApiProvider({\n\t\tapi: \"openai-completions\",\n\t\tstream: streamOpenAICompletions,\n\t\tstreamSimple: streamSimpleOpenAICompletions,\n\t});\n\n\tregisterApiProvider({\n\t\tapi: \"mistral-conversations\",\n\t\tstream: streamMistral,\n\t\tstreamSimple: streamSimpleMistral,\n\t});\n\n\tregisterApiProvider({\n\t\tapi: \"openai-responses\",\n\t\tstream: streamOpenAIResponses,\n\t\tstreamSimple: streamSimpleOpenAIResponses,\n\t});\n\n\tregisterApiProvider({\n\t\tapi: \"azure-openai-responses\",\n\t\tstream: streamAzureOpenAIResponses,\n\t\tstreamSimple: streamSimpleAzureOpenAIResponses,\n\t});\n\n\tregisterApiProvider({\n\t\tapi: \"openai-codex-responses\",\n\t\tstream: streamOpenAICodexResponses,\n\t\tstreamSimple: streamSimpleOpenAICodexResponses,\n\t});\n\n\tregisterApiProvider({\n\t\tapi: \"google-generative-ai\",\n\t\tstream: streamGoogle,\n\t\tstreamSimple: streamSimpleGoogle,\n\t});\n\n\tregisterApiProvider({\n\t\tapi: \"google-vertex\",\n\t\tstream: streamGoogleVertex,\n\t\tstreamSimple: streamSimpleGoogleVertex,\n\t});\n\n\tregisterApiProvider({\n\t\tapi: \"bedrock-converse-stream\",\n\t\tstream: streamBedrockLazy,\n\t\tstreamSimple: streamSimpleBedrockLazy,\n\t});\n}\n\nexport function resetApiProviders(): void {\n\tclearApiProviders();\n\tregisterBuiltInApiProviders();\n}\n\nregisterBuiltInApiProviders();\n"]}
|
package/dist/types.d.ts
CHANGED
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
import type { AssistantMessageEventStream } from "./utils/event-stream.js";
|
|
2
2
|
export type { AssistantMessageEventStream } from "./utils/event-stream.js";
|
|
3
|
-
export type KnownApi = "openai-completions" | "mistral-conversations" | "openai-responses" | "azure-openai-responses" | "openai-codex-responses" | "anthropic-messages" | "bedrock-converse-stream" | "google-generative-ai" | "google-
|
|
3
|
+
export type KnownApi = "openai-completions" | "mistral-conversations" | "openai-responses" | "azure-openai-responses" | "openai-codex-responses" | "anthropic-messages" | "bedrock-converse-stream" | "google-generative-ai" | "google-vertex";
|
|
4
4
|
export type Api = KnownApi | (string & {});
|
|
5
|
-
export type KnownProvider = "amazon-bedrock" | "anthropic" | "google" | "google-
|
|
5
|
+
export type KnownProvider = "amazon-bedrock" | "anthropic" | "google" | "google-vertex" | "openai" | "azure-openai-responses" | "openai-codex" | "deepseek" | "github-copilot" | "xai" | "groq" | "cerebras" | "openrouter" | "vercel-ai-gateway" | "zai" | "mistral" | "minimax" | "minimax-cn" | "moonshotai" | "moonshotai-cn" | "huggingface" | "fireworks" | "opencode" | "opencode-go" | "kimi-coding" | "cloudflare-workers-ai" | "cloudflare-ai-gateway";
|
|
6
6
|
export type Provider = KnownProvider | string;
|
|
7
7
|
export type ThinkingLevel = "minimal" | "low" | "medium" | "high" | "xhigh";
|
|
8
8
|
/** Token budgets for each thinking level (token-based providers only) */
|
|
@@ -144,6 +144,7 @@ export interface AssistantMessage {
|
|
|
144
144
|
api: Api;
|
|
145
145
|
provider: Provider;
|
|
146
146
|
model: string;
|
|
147
|
+
responseModel?: string;
|
|
147
148
|
responseId?: string;
|
|
148
149
|
usage: Usage;
|
|
149
150
|
stopReason: StopReason;
|
package/dist/types.d.ts.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"types.d.ts","sourceRoot":"","sources":["../src/types.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,2BAA2B,EAAE,MAAM,yBAAyB,CAAC;AAE3E,YAAY,EAAE,2BAA2B,EAAE,MAAM,yBAAyB,CAAC;AAE3E,MAAM,MAAM,QAAQ,GACjB,oBAAoB,GACpB,uBAAuB,GACvB,kBAAkB,GAClB,wBAAwB,GACxB,wBAAwB,GACxB,oBAAoB,GACpB,yBAAyB,GACzB,sBAAsB,GACtB,mBAAmB,GACnB,eAAe,CAAC;AAEnB,MAAM,MAAM,GAAG,GAAG,QAAQ,GAAG,CAAC,MAAM,GAAG,EAAE,CAAC,CAAC;AAE3C,MAAM,MAAM,aAAa,GACtB,gBAAgB,GAChB,WAAW,GACX,QAAQ,GACR,mBAAmB,GACnB,oBAAoB,GACpB,eAAe,GACf,QAAQ,GACR,wBAAwB,GACxB,cAAc,GACd,UAAU,GACV,gBAAgB,GAChB,KAAK,GACL,MAAM,GACN,UAAU,GACV,YAAY,GACZ,mBAAmB,GACnB,KAAK,GACL,SAAS,GACT,SAAS,GACT,YAAY,GACZ,aAAa,GACb,WAAW,GACX,UAAU,GACV,aAAa,GACb,aAAa,CAAC;AACjB,MAAM,MAAM,QAAQ,GAAG,aAAa,GAAG,MAAM,CAAC;AAE9C,MAAM,MAAM,aAAa,GAAG,SAAS,GAAG,KAAK,GAAG,QAAQ,GAAG,MAAM,GAAG,OAAO,CAAC;AAE5E,yEAAyE;AACzE,MAAM,WAAW,eAAe;IAC/B,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB,GAAG,CAAC,EAAE,MAAM,CAAC;IACb,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,IAAI,CAAC,EAAE,MAAM,CAAC;CACd;AAGD,MAAM,MAAM,cAAc,GAAG,MAAM,GAAG,OAAO,GAAG,MAAM,CAAC;AAEvD,MAAM,MAAM,SAAS,GAAG,KAAK,GAAG,WAAW,GAAG,MAAM,CAAC;AAErD,MAAM,WAAW,gBAAgB;IAChC,MAAM,EAAE,MAAM,CAAC;IACf,OAAO,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;CAChC;AAED,MAAM,WAAW,aAAa;IAC7B,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB,MAAM,CAAC,EAAE,WAAW,CAAC;IACrB,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB;;;OAGG;IACH,SAAS,CAAC,EAAE,SAAS,CAAC;IACtB;;;OAGG;IACH,cAAc,CAAC,EAAE,cAAc,CAAC;IAChC;;;;OAIG;IACH,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB;;;OAGG;IACH,SAAS,CAAC,EAAE,CAAC,OAAO,EAAE,OAAO,EAAE,KAAK,EAAE,KAAK,CAAC,GAAG,CAAC,KAAK,OAAO,GAAG,SAAS,GAAG,OAAO,CAAC,OAAO,GAAG,SAAS,CAAC,CAAC;IACxG;;;OAGG;IACH,UAAU,CAAC,EAAE,CAAC,QAAQ,EAAE,gBAAgB,EAAE,KAAK,EAAE,KAAK,CAAC,GAAG,CAAC,KAAK,IAAI,GAAG,OAAO,CAAC,IAAI,CAAC,CAAC;IACrF;;;;OAIG;IACH,OAAO,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;IACjC;;;OAGG;IACH,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB;;;OAGG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB;;;;;;OAMG;IACH,eAAe,CAAC,EAAE,MAAM,CAAC;IACzB;;;;OAIG;IACH,QAAQ,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;CACnC;AAED,MAAM,MAAM,qBAAqB,GAAG,aAAa,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;AAG5E,MAAM,WAAW,mBAAoB,SAAQ,aAAa;IACzD,SAAS,CAAC,EAAE,aAAa,CAAC;IAC1B,4EAA4E;IAC5E,eAAe,CAAC,EAAE,eAAe,CAAC;CAClC;AAUD,MAAM,MAAM,cAAc,CAAC,IAAI,SAAS,GAAG,GAAG,GAAG,EAAE,QAAQ,SAAS,aAAa,GAAG,aAAa,IAAI,CACpG,KAAK,EAAE,KAAK,CAAC,IAAI,CAAC,EAClB,OAAO,EAAE,OAAO,EAChB,OAAO,CAAC,EAAE,QAAQ,KACd,2BAA2B,CAAC;AAEjC,MAAM,WAAW,eAAe;IAC/B,CAAC,EAAE,CAAC,CAAC;IACL,EAAE,EAAE,MAAM,CAAC;IACX,KAAK,CAAC,EAAE,YAAY,GAAG,cAAc,CAAC;CACtC;AAED,MAAM,WAAW,WAAW;IAC3B,IAAI,EAAE,MAAM,CAAC;IACb,IAAI,EAAE,MAAM,CAAC;IACb,aAAa,CAAC,EAAE,MAAM,CAAC;CACvB;AAED,MAAM,WAAW,eAAe;IAC/B,IAAI,EAAE,UAAU,CAAC;IACjB,QAAQ,EAAE,MAAM,CAAC;IACjB,iBAAiB,CAAC,EAAE,MAAM,CAAC;IAC3B;;gDAE4C;IAC5C,QAAQ,CAAC,EAAE,OAAO,CAAC;CACnB;AAED,MAAM,WAAW,YAAY;IAC5B,IAAI,EAAE,OAAO,CAAC;IACd,IAAI,EAAE,MAAM,CAAC;IACb,QAAQ,EAAE,MAAM,CAAC;CACjB;AAED,MAAM,WAAW,QAAQ;IACxB,IAAI,EAAE,UAAU,CAAC;IACjB,EAAE,EAAE,MAAM,CAAC;IACX,IAAI,EAAE,MAAM,CAAC;IACb,SAAS,EAAE,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,CAAC;IAC/B,gBAAgB,CAAC,EAAE,MAAM,CAAC;CAC1B;AAED,MAAM,WAAW,KAAK;IACrB,KAAK,EAAE,MAAM,CAAC;IACd,MAAM,EAAE,MAAM,CAAC;IACf,SAAS,EAAE,MAAM,CAAC;IAClB,UAAU,EAAE,MAAM,CAAC;IACnB,WAAW,EAAE,MAAM,CAAC;IACpB,IAAI,EAAE;QACL,KAAK,EAAE,MAAM,CAAC;QACd,MAAM,EAAE,MAAM,CAAC;QACf,SAAS,EAAE,MAAM,CAAC;QAClB,UAAU,EAAE,MAAM,CAAC;QACnB,KAAK,EAAE,MAAM,CAAC;KACd,CAAC;CACF;AAED,MAAM,MAAM,UAAU,GAAG,MAAM,GAAG,QAAQ,GAAG,SAAS,GAAG,OAAO,GAAG,SAAS,CAAC;AAE7E,MAAM,WAAW,WAAW;IAC3B,IAAI,EAAE,MAAM,CAAC;IACb,OAAO,EAAE,MAAM,GAAG,CAAC,WAAW,GAAG,YAAY,CAAC,EAAE,CAAC;IACjD,SAAS,EAAE,MAAM,CAAC;CAClB;AAED,MAAM,WAAW,gBAAgB;IAChC,IAAI,EAAE,WAAW,CAAC;IAClB,OAAO,EAAE,CAAC,WAAW,GAAG,eAAe,GAAG,QAAQ,CAAC,EAAE,CAAC;IACtD,GAAG,EAAE,GAAG,CAAC;IACT,QAAQ,EAAE,QAAQ,CAAC;IACnB,KAAK,EAAE,MAAM,CAAC;IACd,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB,KAAK,EAAE,KAAK,CAAC;IACb,UAAU,EAAE,UAAU,CAAC;IACvB,YAAY,CAAC,EAAE,MAAM,CAAC;IACtB,SAAS,EAAE,MAAM,CAAC;CAClB;AAED,MAAM,WAAW,iBAAiB,CAAC,QAAQ,GAAG,GAAG;IAChD,IAAI,EAAE,YAAY,CAAC;IACnB,UAAU,EAAE,MAAM,CAAC;IACnB,QAAQ,EAAE,MAAM,CAAC;IACjB,OAAO,EAAE,CAAC,WAAW,GAAG,YAAY,CAAC,EAAE,CAAC;IACxC,OAAO,CAAC,EAAE,QAAQ,CAAC;IACnB,OAAO,EAAE,OAAO,CAAC;IACjB,SAAS,EAAE,MAAM,CAAC;CAClB;AAED,MAAM,MAAM,OAAO,GAAG,WAAW,GAAG,gBAAgB,GAAG,iBAAiB,CAAC;AAEzE,OAAO,KAAK,EAAE,OAAO,EAAE,MAAM,SAAS,CAAC;AAEvC,MAAM,WAAW,IAAI,CAAC,WAAW,SAAS,OAAO,GAAG,OAAO;IAC1D,IAAI,EAAE,MAAM,CAAC;IACb,WAAW,EAAE,MAAM,CAAC;IACpB,UAAU,EAAE,WAAW,CAAC;CACxB;AAED,MAAM,WAAW,OAAO;IACvB,YAAY,CAAC,EAAE,MAAM,CAAC;IACtB,QAAQ,EAAE,OAAO,EAAE,CAAC;IACpB,KAAK,CAAC,EAAE,IAAI,EAAE,CAAC;CACf;AAED;;;;;;;GAOG;AACH,MAAM,MAAM,qBAAqB,GAC9B;IAAE,IAAI,EAAE,OAAO,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GAC5C;IAAE,IAAI,EAAE,YAAY,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GACvE;IAAE,IAAI,EAAE,YAAY,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,KAAK,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GACtF;IAAE,IAAI,EAAE,UAAU,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GACtF;IAAE,IAAI,EAAE,gBAAgB,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GAC3E;IAAE,IAAI,EAAE,gBAAgB,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,KAAK,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GAC1F;IAAE,IAAI,EAAE,cAAc,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GAC1F;IAAE,IAAI,EAAE,gBAAgB,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GAC3E;IAAE,IAAI,EAAE,gBAAgB,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,KAAK,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GAC1F;IAAE,IAAI,EAAE,cAAc,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,QAAQ,EAAE,QAAQ,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GAC7F;IAAE,IAAI,EAAE,MAAM,CAAC;IAAC,MAAM,EAAE,OAAO,CAAC,UAAU,EAAE,MAAM,GAAG,QAAQ,GAAG,SAAS,CAAC,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GACvG;IAAE,IAAI,EAAE,OAAO,CAAC;IAAC,MAAM,EAAE,OAAO,CAAC,UAAU,EAAE,SAAS,GAAG,OAAO,CAAC,CAAC;IAAC,KAAK,EAAE,gBAAgB,CAAA;CAAE,CAAC;AAEhG;;;GAGG;AACH,MAAM,WAAW,uBAAuB;IACvC,wFAAwF;IACxF,aAAa,CAAC,EAAE,OAAO,CAAC;IACxB,yGAAyG;IACzG,qBAAqB,CAAC,EAAE,OAAO,CAAC;IAChC,yFAAyF;IACzF,uBAAuB,CAAC,EAAE,OAAO,CAAC;IAClC,yGAAyG;IACzG,kBAAkB,CAAC,EAAE,OAAO,CAAC,MAAM,CAAC,aAAa,EAAE,MAAM,CAAC,CAAC,CAAC;IAC5D,qIAAqI;IACrI,wBAAwB,CAAC,EAAE,OAAO,CAAC;IACnC,0EAA0E;IAC1E,cAAc,CAAC,EAAE,uBAAuB,GAAG,YAAY,CAAC;IACxD,sFAAsF;IACtF,sBAAsB,CAAC,EAAE,OAAO,CAAC;IACjC,2HAA2H;IAC3H,gCAAgC,CAAC,EAAE,OAAO,CAAC;IAC3C,4HAA4H;IAC5H,sBAAsB,CAAC,EAAE,OAAO,CAAC;IACjC,wJAAwJ;IACxJ,2CAA2C,CAAC,EAAE,OAAO,CAAC;IACtD,wWAAwW;IACxW,cAAc,CAAC,EAAE,QAAQ,GAAG,YAAY,GAAG,UAAU,GAAG,KAAK,GAAG,MAAM,GAAG,oBAAoB,CAAC;IAC9F,4FAA4F;IAC5F,iBAAiB,CAAC,EAAE,iBAAiB,CAAC;IACtC,iGAAiG;IACjG,oBAAoB,CAAC,EAAE,oBAAoB,CAAC;IAC5C,0GAA0G;IAC1G,aAAa,CAAC,EAAE,OAAO,CAAC;IACxB,2FAA2F;IAC3F,kBAAkB,CAAC,EAAE,OAAO,CAAC;IAC7B,iMAAiM;IACjM,kBAAkB,CAAC,EAAE,WAAW,CAAC;IACjC,mLAAmL;IACnL,0BAA0B,CAAC,EAAE,OAAO,CAAC;IACrC,oLAAoL;IACpL,0BAA0B,CAAC,EAAE,OAAO,CAAC;CACrC;AAED,wDAAwD;AACxD,MAAM,WAAW,qBAAqB;IACrC,qIAAqI;IACrI,mBAAmB,CAAC,EAAE,OAAO,CAAC;IAC9B,oFAAoF;IACpF,0BAA0B,CAAC,EAAE,OAAO,CAAC;CACrC;AAED,qEAAqE;AACrE,MAAM,WAAW,uBAAuB;IACvC;;;;;;OAMG;IACH,+BAA+B,CAAC,EAAE,OAAO,CAAC;IAC1C,+GAA+G;IAC/G,0BAA0B,CAAC,EAAE,OAAO,CAAC;CACrC;AAED;;;;;GAKG;AACH,MAAM,WAAW,iBAAiB;IACjC,0EAA0E;IAC1E,eAAe,CAAC,EAAE,OAAO,CAAC;IAC1B,4GAA4G;IAC5G,kBAAkB,CAAC,EAAE,OAAO,CAAC;IAC7B,yJAAyJ;IACzJ,eAAe,CAAC,EAAE,MAAM,GAAG,OAAO,CAAC;IACnC,+EAA+E;IAC/E,GAAG,CAAC,EAAE,OAAO,CAAC;IACd,+EAA+E;IAC/E,wBAAwB,CAAC,EAAE,OAAO,CAAC;IACnC,2GAA2G;IAC3G,KAAK,CAAC,EAAE,MAAM,EAAE,CAAC;IACjB,0EAA0E;IAC1E,IAAI,CAAC,EAAE,MAAM,EAAE,CAAC;IAChB,6DAA6D;IAC7D,MAAM,CAAC,EAAE,MAAM,EAAE,CAAC;IAClB,kIAAkI;IAClI,aAAa,CAAC,EAAE,MAAM,EAAE,CAAC;IACzB,yHAAyH;IACzH,IAAI,CAAC,EACF,MAAM,GACN;QACA,4DAA4D;QAC5D,EAAE,CAAC,EAAE,MAAM,CAAC;QACZ,0DAA0D;QAC1D,SAAS,CAAC,EAAE,MAAM,GAAG,IAAI,CAAC;KACzB,CAAC;IACL,8CAA8C;IAC9C,SAAS,CAAC,EAAE;QACX,uCAAuC;QACvC,MAAM,CAAC,EAAE,MAAM,GAAG,MAAM,CAAC;QACzB,2CAA2C;QAC3C,UAAU,CAAC,EAAE,MAAM,GAAG,MAAM,CAAC;QAC7B,uBAAuB;QACvB,KAAK,CAAC,EAAE,MAAM,GAAG,MAAM,CAAC;QACxB,4BAA4B;QAC5B,KAAK,CAAC,EAAE,MAAM,GAAG,MAAM,CAAC;QACxB,yBAAyB;QACzB,OAAO,CAAC,EAAE,MAAM,GAAG,MAAM,CAAC;KAC1B,CAAC;IACF,oIAAoI;IACpI,wBAAwB,CAAC,EACtB,MAAM,GACN;QACA,oDAAoD;QACpD,GAAG,CAAC,EAAE,MAAM,CAAC;QACb,oDAAoD;QACpD,GAAG,CAAC,EAAE,MAAM,CAAC;QACb,oDAAoD;QACpD,GAAG,CAAC,EAAE,MAAM,CAAC;QACb,oDAAoD;QACpD,GAAG,CAAC,EAAE,MAAM,CAAC;KACZ,CAAC;IACL,2HAA2H;IAC3H,qBAAqB,CAAC,EACnB,MAAM,GACN;QACA,yDAAyD;QACzD,GAAG,CAAC,EAAE,MAAM,CAAC;QACb,yDAAyD;QACzD,GAAG,CAAC,EAAE,MAAM,CAAC;QACb,yDAAyD;QACzD,GAAG,CAAC,EAAE,MAAM,CAAC;QACb,yDAAyD;QACzD,GAAG,CAAC,EAAE,MAAM,CAAC;KACZ,CAAC;CACL;AAED;;;;GAIG;AACH,MAAM,WAAW,oBAAoB;IACpC,mGAAmG;IACnG,IAAI,CAAC,EAAE,MAAM,EAAE,CAAC;IAChB,8EAA8E;IAC9E,KAAK,CAAC,EAAE,MAAM,EAAE,CAAC;CACjB;AAGD,MAAM,WAAW,KAAK,CAAC,IAAI,SAAS,GAAG;IACtC,EAAE,EAAE,MAAM,CAAC;IACX,IAAI,EAAE,MAAM,CAAC;IACb,GAAG,EAAE,IAAI,CAAC;IACV,QAAQ,EAAE,QAAQ,CAAC;IACnB,OAAO,EAAE,MAAM,CAAC;IAChB,SAAS,EAAE,OAAO,CAAC;IACnB,KAAK,EAAE,CAAC,MAAM,GAAG,OAAO,CAAC,EAAE,CAAC;IAC5B,IAAI,EAAE;QACL,KAAK,EAAE,MAAM,CAAC;QACd,MAAM,EAAE,MAAM,CAAC;QACf,SAAS,EAAE,MAAM,CAAC;QAClB,UAAU,EAAE,MAAM,CAAC;KACnB,CAAC;IACF,aAAa,EAAE,MAAM,CAAC;IACtB,SAAS,EAAE,MAAM,CAAC;IAClB,OAAO,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;IACjC,kGAAkG;IAClG,MAAM,CAAC,EAAE,IAAI,SAAS,oBAAoB,GACvC,uBAAuB,GACvB,IAAI,SAAS,kBAAkB,GAC9B,qBAAqB,GACrB,IAAI,SAAS,oBAAoB,GAChC,uBAAuB,GACvB,KAAK,CAAC;CACX","sourcesContent":["import type { AssistantMessageEventStream } from \"./utils/event-stream.js\";\n\nexport type { AssistantMessageEventStream } from \"./utils/event-stream.js\";\n\nexport type KnownApi =\n\t| \"openai-completions\"\n\t| \"mistral-conversations\"\n\t| \"openai-responses\"\n\t| \"azure-openai-responses\"\n\t| \"openai-codex-responses\"\n\t| \"anthropic-messages\"\n\t| \"bedrock-converse-stream\"\n\t| \"google-generative-ai\"\n\t| \"google-gemini-cli\"\n\t| \"google-vertex\";\n\nexport type Api = KnownApi | (string & {});\n\nexport type KnownProvider =\n\t| \"amazon-bedrock\"\n\t| \"anthropic\"\n\t| \"google\"\n\t| \"google-gemini-cli\"\n\t| \"google-antigravity\"\n\t| \"google-vertex\"\n\t| \"openai\"\n\t| \"azure-openai-responses\"\n\t| \"openai-codex\"\n\t| \"deepseek\"\n\t| \"github-copilot\"\n\t| \"xai\"\n\t| \"groq\"\n\t| \"cerebras\"\n\t| \"openrouter\"\n\t| \"vercel-ai-gateway\"\n\t| \"zai\"\n\t| \"mistral\"\n\t| \"minimax\"\n\t| \"minimax-cn\"\n\t| \"huggingface\"\n\t| \"fireworks\"\n\t| \"opencode\"\n\t| \"opencode-go\"\n\t| \"kimi-coding\";\nexport type Provider = KnownProvider | string;\n\nexport type ThinkingLevel = \"minimal\" | \"low\" | \"medium\" | \"high\" | \"xhigh\";\n\n/** Token budgets for each thinking level (token-based providers only) */\nexport interface ThinkingBudgets {\n\tminimal?: number;\n\tlow?: number;\n\tmedium?: number;\n\thigh?: number;\n}\n\n// Base options all providers share\nexport type CacheRetention = \"none\" | \"short\" | \"long\";\n\nexport type Transport = \"sse\" | \"websocket\" | \"auto\";\n\nexport interface ProviderResponse {\n\tstatus: number;\n\theaders: Record<string, string>;\n}\n\nexport interface StreamOptions {\n\ttemperature?: number;\n\tmaxTokens?: number;\n\tsignal?: AbortSignal;\n\tapiKey?: string;\n\t/**\n\t * Preferred transport for providers that support multiple transports.\n\t * Providers that do not support this option ignore it.\n\t */\n\ttransport?: Transport;\n\t/**\n\t * Prompt cache retention preference. Providers map this to their supported values.\n\t * Default: \"short\".\n\t */\n\tcacheRetention?: CacheRetention;\n\t/**\n\t * Optional session identifier for providers that support session-based caching.\n\t * Providers can use this to enable prompt caching, request routing, or other\n\t * session-aware features. Ignored by providers that don't support it.\n\t */\n\tsessionId?: string;\n\t/**\n\t * Optional callback for inspecting or replacing provider payloads before sending.\n\t * Return undefined to keep the payload unchanged.\n\t */\n\tonPayload?: (payload: unknown, model: Model<Api>) => unknown | undefined | Promise<unknown | undefined>;\n\t/**\n\t * Optional callback invoked after an HTTP response is received and before\n\t * its body stream is consumed.\n\t */\n\tonResponse?: (response: ProviderResponse, model: Model<Api>) => void | Promise<void>;\n\t/**\n\t * Optional custom HTTP headers to include in API requests.\n\t * Merged with provider defaults; can override default headers.\n\t * Not supported by all providers (e.g., AWS Bedrock uses SDK auth).\n\t */\n\theaders?: Record<string, string>;\n\t/**\n\t * HTTP request timeout in milliseconds for providers/SDKs that support it.\n\t * For example, OpenAI and Anthropic SDK clients default to 10 minutes.\n\t */\n\ttimeoutMs?: number;\n\t/**\n\t * Maximum retry attempts for providers/SDKs that support client-side retries.\n\t * For example, OpenAI and Anthropic SDK clients default to 2.\n\t */\n\tmaxRetries?: number;\n\t/**\n\t * Maximum delay in milliseconds to wait for a retry when the server requests a long wait.\n\t * If the server's requested delay exceeds this value, the request fails immediately\n\t * with an error containing the requested delay, allowing higher-level retry logic\n\t * to handle it with user visibility.\n\t * Default: 60000 (60 seconds). Set to 0 to disable the cap.\n\t */\n\tmaxRetryDelayMs?: number;\n\t/**\n\t * Optional metadata to include in API requests.\n\t * Providers extract the fields they understand and ignore the rest.\n\t * For example, Anthropic uses `user_id` for abuse tracking and rate limiting.\n\t */\n\tmetadata?: Record<string, unknown>;\n}\n\nexport type ProviderStreamOptions = StreamOptions & Record<string, unknown>;\n\n// Unified options with reasoning passed to streamSimple() and completeSimple()\nexport interface SimpleStreamOptions extends StreamOptions {\n\treasoning?: ThinkingLevel;\n\t/** Custom token budgets for thinking levels (token-based providers only) */\n\tthinkingBudgets?: ThinkingBudgets;\n}\n\n// Generic StreamFunction with typed options.\n//\n// Contract:\n// - Must return an AssistantMessageEventStream.\n// - Once invoked, request/model/runtime failures should be encoded in the\n// returned stream, not thrown.\n// - Error termination must produce an AssistantMessage with stopReason\n// \"error\" or \"aborted\" and errorMessage, emitted via the stream protocol.\nexport type StreamFunction<TApi extends Api = Api, TOptions extends StreamOptions = StreamOptions> = (\n\tmodel: Model<TApi>,\n\tcontext: Context,\n\toptions?: TOptions,\n) => AssistantMessageEventStream;\n\nexport interface TextSignatureV1 {\n\tv: 1;\n\tid: string;\n\tphase?: \"commentary\" | \"final_answer\";\n}\n\nexport interface TextContent {\n\ttype: \"text\";\n\ttext: string;\n\ttextSignature?: string; // e.g., for OpenAI responses, message metadata (legacy id string or TextSignatureV1 JSON)\n}\n\nexport interface ThinkingContent {\n\ttype: \"thinking\";\n\tthinking: string;\n\tthinkingSignature?: string; // e.g., for OpenAI responses, the reasoning item ID\n\t/** When true, the thinking content was redacted by safety filters. The opaque\n\t * encrypted payload is stored in `thinkingSignature` so it can be passed back\n\t * to the API for multi-turn continuity. */\n\tredacted?: boolean;\n}\n\nexport interface ImageContent {\n\ttype: \"image\";\n\tdata: string; // base64 encoded image data\n\tmimeType: string; // e.g., \"image/jpeg\", \"image/png\"\n}\n\nexport interface ToolCall {\n\ttype: \"toolCall\";\n\tid: string;\n\tname: string;\n\targuments: Record<string, any>;\n\tthoughtSignature?: string; // Google-specific: opaque signature for reusing thought context\n}\n\nexport interface Usage {\n\tinput: number;\n\toutput: number;\n\tcacheRead: number;\n\tcacheWrite: number;\n\ttotalTokens: number;\n\tcost: {\n\t\tinput: number;\n\t\toutput: number;\n\t\tcacheRead: number;\n\t\tcacheWrite: number;\n\t\ttotal: number;\n\t};\n}\n\nexport type StopReason = \"stop\" | \"length\" | \"toolUse\" | \"error\" | \"aborted\";\n\nexport interface UserMessage {\n\trole: \"user\";\n\tcontent: string | (TextContent | ImageContent)[];\n\ttimestamp: number; // Unix timestamp in milliseconds\n}\n\nexport interface AssistantMessage {\n\trole: \"assistant\";\n\tcontent: (TextContent | ThinkingContent | ToolCall)[];\n\tapi: Api;\n\tprovider: Provider;\n\tmodel: string;\n\tresponseId?: string; // Provider-specific response/message identifier when the upstream API exposes one\n\tusage: Usage;\n\tstopReason: StopReason;\n\terrorMessage?: string;\n\ttimestamp: number; // Unix timestamp in milliseconds\n}\n\nexport interface ToolResultMessage<TDetails = any> {\n\trole: \"toolResult\";\n\ttoolCallId: string;\n\ttoolName: string;\n\tcontent: (TextContent | ImageContent)[]; // Supports text and images\n\tdetails?: TDetails;\n\tisError: boolean;\n\ttimestamp: number; // Unix timestamp in milliseconds\n}\n\nexport type Message = UserMessage | AssistantMessage | ToolResultMessage;\n\nimport type { TSchema } from \"typebox\";\n\nexport interface Tool<TParameters extends TSchema = TSchema> {\n\tname: string;\n\tdescription: string;\n\tparameters: TParameters;\n}\n\nexport interface Context {\n\tsystemPrompt?: string;\n\tmessages: Message[];\n\ttools?: Tool[];\n}\n\n/**\n * Event protocol for AssistantMessageEventStream.\n *\n * Streams should emit `start` before partial updates, then terminate with either:\n * - `done` carrying the final successful AssistantMessage, or\n * - `error` carrying the final AssistantMessage with stopReason \"error\" or \"aborted\"\n * and errorMessage.\n */\nexport type AssistantMessageEvent =\n\t| { type: \"start\"; partial: AssistantMessage }\n\t| { type: \"text_start\"; contentIndex: number; partial: AssistantMessage }\n\t| { type: \"text_delta\"; contentIndex: number; delta: string; partial: AssistantMessage }\n\t| { type: \"text_end\"; contentIndex: number; content: string; partial: AssistantMessage }\n\t| { type: \"thinking_start\"; contentIndex: number; partial: AssistantMessage }\n\t| { type: \"thinking_delta\"; contentIndex: number; delta: string; partial: AssistantMessage }\n\t| { type: \"thinking_end\"; contentIndex: number; content: string; partial: AssistantMessage }\n\t| { type: \"toolcall_start\"; contentIndex: number; partial: AssistantMessage }\n\t| { type: \"toolcall_delta\"; contentIndex: number; delta: string; partial: AssistantMessage }\n\t| { type: \"toolcall_end\"; contentIndex: number; toolCall: ToolCall; partial: AssistantMessage }\n\t| { type: \"done\"; reason: Extract<StopReason, \"stop\" | \"length\" | \"toolUse\">; message: AssistantMessage }\n\t| { type: \"error\"; reason: Extract<StopReason, \"aborted\" | \"error\">; error: AssistantMessage };\n\n/**\n * Compatibility settings for OpenAI-compatible completions APIs.\n * Use this to override URL-based auto-detection for custom providers.\n */\nexport interface OpenAICompletionsCompat {\n\t/** Whether the provider supports the `store` field. Default: auto-detected from URL. */\n\tsupportsStore?: boolean;\n\t/** Whether the provider supports the `developer` role (vs `system`). Default: auto-detected from URL. */\n\tsupportsDeveloperRole?: boolean;\n\t/** Whether the provider supports `reasoning_effort`. Default: auto-detected from URL. */\n\tsupportsReasoningEffort?: boolean;\n\t/** Optional mapping from pi-ai reasoning levels to provider/model-specific `reasoning_effort` values. */\n\treasoningEffortMap?: Partial<Record<ThinkingLevel, string>>;\n\t/** Whether the provider supports `stream_options: { include_usage: true }` for token usage in streaming responses. Default: true. */\n\tsupportsUsageInStreaming?: boolean;\n\t/** Which field to use for max tokens. Default: auto-detected from URL. */\n\tmaxTokensField?: \"max_completion_tokens\" | \"max_tokens\";\n\t/** Whether tool results require the `name` field. Default: auto-detected from URL. */\n\trequiresToolResultName?: boolean;\n\t/** Whether a user message after tool results requires an assistant message in between. Default: auto-detected from URL. */\n\trequiresAssistantAfterToolResult?: boolean;\n\t/** Whether thinking blocks must be converted to text blocks with <thinking> delimiters. Default: auto-detected from URL. */\n\trequiresThinkingAsText?: boolean;\n\t/** Whether all replayed assistant messages must include an empty reasoning_content field when reasoning is enabled. Default: auto-detected from URL. */\n\trequiresReasoningContentOnAssistantMessages?: boolean;\n\t/** Format for reasoning/thinking parameter. \"openai\" uses reasoning_effort, \"openrouter\" uses reasoning: { effort }, \"deepseek\" uses thinking: { type } plus reasoning_effort, \"zai\" uses top-level enable_thinking: boolean, \"qwen\" uses top-level enable_thinking: boolean, and \"qwen-chat-template\" uses chat_template_kwargs.enable_thinking. Default: \"openai\". */\n\tthinkingFormat?: \"openai\" | \"openrouter\" | \"deepseek\" | \"zai\" | \"qwen\" | \"qwen-chat-template\";\n\t/** OpenRouter-specific routing preferences. Only used when baseUrl points to OpenRouter. */\n\topenRouterRouting?: OpenRouterRouting;\n\t/** Vercel AI Gateway routing preferences. Only used when baseUrl points to Vercel AI Gateway. */\n\tvercelGatewayRouting?: VercelGatewayRouting;\n\t/** Whether z.ai supports top-level `tool_stream: true` for streaming tool call deltas. Default: false. */\n\tzaiToolStream?: boolean;\n\t/** Whether the provider supports the `strict` field in tool definitions. Default: true. */\n\tsupportsStrictMode?: boolean;\n\t/** Cache control convention for prompt caching. \"anthropic\" applies Anthropic-style `cache_control` markers to the system prompt, last tool definition, and last user/assistant text content. */\n\tcacheControlFormat?: \"anthropic\";\n\t/** Whether to send known session-affinity headers (`session_id`, `x-client-request-id`, `x-session-affinity`) from `options.sessionId` when caching is enabled. Default: false. */\n\tsendSessionAffinityHeaders?: boolean;\n\t/** Whether the provider supports long prompt cache retention (`prompt_cache_retention: \"24h\"` or Anthropic-style `cache_control.ttl: \"1h\"`, depending on format). Default: true. */\n\tsupportsLongCacheRetention?: boolean;\n}\n\n/** Compatibility settings for OpenAI Responses APIs. */\nexport interface OpenAIResponsesCompat {\n\t/** Whether to send the OpenAI `session_id` cache-affinity header from `options.sessionId` when caching is enabled. Default: true. */\n\tsendSessionIdHeader?: boolean;\n\t/** Whether the provider supports `prompt_cache_retention: \"24h\"`. Default: true. */\n\tsupportsLongCacheRetention?: boolean;\n}\n\n/** Compatibility settings for Anthropic Messages-compatible APIs. */\nexport interface AnthropicMessagesCompat {\n\t/**\n\t * Whether the provider accepts per-tool `eager_input_streaming`.\n\t * When false, the Anthropic provider omits `tools[].eager_input_streaming`\n\t * and sends the legacy `fine-grained-tool-streaming-2025-05-14` beta header\n\t * for tool-enabled requests.\n\t * Default: true.\n\t */\n\tsupportsEagerToolInputStreaming?: boolean;\n\t/** Whether the provider supports Anthropic long cache retention (`cache_control.ttl: \"1h\"`). Default: true. */\n\tsupportsLongCacheRetention?: boolean;\n}\n\n/**\n * OpenRouter provider routing preferences.\n * Controls which upstream providers OpenRouter routes requests to.\n * Sent as the `provider` field in the OpenRouter API request body.\n * @see https://openrouter.ai/docs/guides/routing/provider-selection\n */\nexport interface OpenRouterRouting {\n\t/** Whether to allow backup providers to serve requests. Default: true. */\n\tallow_fallbacks?: boolean;\n\t/** Whether to filter providers to only those that support all parameters in the request. Default: false. */\n\trequire_parameters?: boolean;\n\t/** Data collection setting. \"allow\" (default): allow providers that may store/train on data. \"deny\": only use providers that don't collect user data. */\n\tdata_collection?: \"deny\" | \"allow\";\n\t/** Whether to restrict routing to only ZDR (Zero Data Retention) endpoints. */\n\tzdr?: boolean;\n\t/** Whether to restrict routing to only models that allow text distillation. */\n\tenforce_distillable_text?: boolean;\n\t/** An ordered list of provider names/slugs to try in sequence, falling back to the next if unavailable. */\n\torder?: string[];\n\t/** List of provider names/slugs to exclusively allow for this request. */\n\tonly?: string[];\n\t/** List of provider names/slugs to skip for this request. */\n\tignore?: string[];\n\t/** A list of quantization levels to filter providers by (e.g., [\"fp16\", \"bf16\", \"fp8\", \"fp6\", \"int8\", \"int4\", \"fp4\", \"fp32\"]). */\n\tquantizations?: string[];\n\t/** Sorting strategy. Can be a string (e.g., \"price\", \"throughput\", \"latency\") or an object with `by` and `partition`. */\n\tsort?:\n\t\t| string\n\t\t| {\n\t\t\t\t/** The sorting metric: \"price\", \"throughput\", \"latency\". */\n\t\t\t\tby?: string;\n\t\t\t\t/** Partitioning strategy: \"model\" (default) or \"none\". */\n\t\t\t\tpartition?: string | null;\n\t\t };\n\t/** Maximum price per million tokens (USD). */\n\tmax_price?: {\n\t\t/** Price per million prompt tokens. */\n\t\tprompt?: number | string;\n\t\t/** Price per million completion tokens. */\n\t\tcompletion?: number | string;\n\t\t/** Price per image. */\n\t\timage?: number | string;\n\t\t/** Price per audio unit. */\n\t\taudio?: number | string;\n\t\t/** Price per request. */\n\t\trequest?: number | string;\n\t};\n\t/** Preferred minimum throughput (tokens/second). Can be a number (applies to p50) or an object with percentile-specific cutoffs. */\n\tpreferred_min_throughput?:\n\t\t| number\n\t\t| {\n\t\t\t\t/** Minimum tokens/second at the 50th percentile. */\n\t\t\t\tp50?: number;\n\t\t\t\t/** Minimum tokens/second at the 75th percentile. */\n\t\t\t\tp75?: number;\n\t\t\t\t/** Minimum tokens/second at the 90th percentile. */\n\t\t\t\tp90?: number;\n\t\t\t\t/** Minimum tokens/second at the 99th percentile. */\n\t\t\t\tp99?: number;\n\t\t };\n\t/** Preferred maximum latency (seconds). Can be a number (applies to p50) or an object with percentile-specific cutoffs. */\n\tpreferred_max_latency?:\n\t\t| number\n\t\t| {\n\t\t\t\t/** Maximum latency in seconds at the 50th percentile. */\n\t\t\t\tp50?: number;\n\t\t\t\t/** Maximum latency in seconds at the 75th percentile. */\n\t\t\t\tp75?: number;\n\t\t\t\t/** Maximum latency in seconds at the 90th percentile. */\n\t\t\t\tp90?: number;\n\t\t\t\t/** Maximum latency in seconds at the 99th percentile. */\n\t\t\t\tp99?: number;\n\t\t };\n}\n\n/**\n * Vercel AI Gateway routing preferences.\n * Controls which upstream providers the gateway routes requests to.\n * @see https://vercel.com/docs/ai-gateway/models-and-providers/provider-options\n */\nexport interface VercelGatewayRouting {\n\t/** List of provider slugs to exclusively use for this request (e.g., [\"bedrock\", \"anthropic\"]). */\n\tonly?: string[];\n\t/** List of provider slugs to try in order (e.g., [\"anthropic\", \"openai\"]). */\n\torder?: string[];\n}\n\n// Model interface for the unified model system\nexport interface Model<TApi extends Api> {\n\tid: string;\n\tname: string;\n\tapi: TApi;\n\tprovider: Provider;\n\tbaseUrl: string;\n\treasoning: boolean;\n\tinput: (\"text\" | \"image\")[];\n\tcost: {\n\t\tinput: number; // $/million tokens\n\t\toutput: number; // $/million tokens\n\t\tcacheRead: number; // $/million tokens\n\t\tcacheWrite: number; // $/million tokens\n\t};\n\tcontextWindow: number;\n\tmaxTokens: number;\n\theaders?: Record<string, string>;\n\t/** Compatibility overrides for OpenAI-compatible APIs. If not set, auto-detected from baseUrl. */\n\tcompat?: TApi extends \"openai-completions\"\n\t\t? OpenAICompletionsCompat\n\t\t: TApi extends \"openai-responses\"\n\t\t\t? OpenAIResponsesCompat\n\t\t\t: TApi extends \"anthropic-messages\"\n\t\t\t\t? AnthropicMessagesCompat\n\t\t\t\t: never;\n}\n"]}
|
|
1
|
+
{"version":3,"file":"types.d.ts","sourceRoot":"","sources":["../src/types.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,2BAA2B,EAAE,MAAM,yBAAyB,CAAC;AAE3E,YAAY,EAAE,2BAA2B,EAAE,MAAM,yBAAyB,CAAC;AAE3E,MAAM,MAAM,QAAQ,GACjB,oBAAoB,GACpB,uBAAuB,GACvB,kBAAkB,GAClB,wBAAwB,GACxB,wBAAwB,GACxB,oBAAoB,GACpB,yBAAyB,GACzB,sBAAsB,GACtB,eAAe,CAAC;AAEnB,MAAM,MAAM,GAAG,GAAG,QAAQ,GAAG,CAAC,MAAM,GAAG,EAAE,CAAC,CAAC;AAE3C,MAAM,MAAM,aAAa,GACtB,gBAAgB,GAChB,WAAW,GACX,QAAQ,GACR,eAAe,GACf,QAAQ,GACR,wBAAwB,GACxB,cAAc,GACd,UAAU,GACV,gBAAgB,GAChB,KAAK,GACL,MAAM,GACN,UAAU,GACV,YAAY,GACZ,mBAAmB,GACnB,KAAK,GACL,SAAS,GACT,SAAS,GACT,YAAY,GACZ,YAAY,GACZ,eAAe,GACf,aAAa,GACb,WAAW,GACX,UAAU,GACV,aAAa,GACb,aAAa,GACb,uBAAuB,GACvB,uBAAuB,CAAC;AAC3B,MAAM,MAAM,QAAQ,GAAG,aAAa,GAAG,MAAM,CAAC;AAE9C,MAAM,MAAM,aAAa,GAAG,SAAS,GAAG,KAAK,GAAG,QAAQ,GAAG,MAAM,GAAG,OAAO,CAAC;AAE5E,yEAAyE;AACzE,MAAM,WAAW,eAAe;IAC/B,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB,GAAG,CAAC,EAAE,MAAM,CAAC;IACb,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,IAAI,CAAC,EAAE,MAAM,CAAC;CACd;AAGD,MAAM,MAAM,cAAc,GAAG,MAAM,GAAG,OAAO,GAAG,MAAM,CAAC;AAEvD,MAAM,MAAM,SAAS,GAAG,KAAK,GAAG,WAAW,GAAG,MAAM,CAAC;AAErD,MAAM,WAAW,gBAAgB;IAChC,MAAM,EAAE,MAAM,CAAC;IACf,OAAO,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;CAChC;AAED,MAAM,WAAW,aAAa;IAC7B,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB,MAAM,CAAC,EAAE,WAAW,CAAC;IACrB,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB;;;OAGG;IACH,SAAS,CAAC,EAAE,SAAS,CAAC;IACtB;;;OAGG;IACH,cAAc,CAAC,EAAE,cAAc,CAAC;IAChC;;;;OAIG;IACH,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB;;;OAGG;IACH,SAAS,CAAC,EAAE,CAAC,OAAO,EAAE,OAAO,EAAE,KAAK,EAAE,KAAK,CAAC,GAAG,CAAC,KAAK,OAAO,GAAG,SAAS,GAAG,OAAO,CAAC,OAAO,GAAG,SAAS,CAAC,CAAC;IACxG;;;OAGG;IACH,UAAU,CAAC,EAAE,CAAC,QAAQ,EAAE,gBAAgB,EAAE,KAAK,EAAE,KAAK,CAAC,GAAG,CAAC,KAAK,IAAI,GAAG,OAAO,CAAC,IAAI,CAAC,CAAC;IACrF;;;;OAIG;IACH,OAAO,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;IACjC;;;OAGG;IACH,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB;;;OAGG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB;;;;;;OAMG;IACH,eAAe,CAAC,EAAE,MAAM,CAAC;IACzB;;;;OAIG;IACH,QAAQ,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;CACnC;AAED,MAAM,MAAM,qBAAqB,GAAG,aAAa,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;AAG5E,MAAM,WAAW,mBAAoB,SAAQ,aAAa;IACzD,SAAS,CAAC,EAAE,aAAa,CAAC;IAC1B,4EAA4E;IAC5E,eAAe,CAAC,EAAE,eAAe,CAAC;CAClC;AAUD,MAAM,MAAM,cAAc,CAAC,IAAI,SAAS,GAAG,GAAG,GAAG,EAAE,QAAQ,SAAS,aAAa,GAAG,aAAa,IAAI,CACpG,KAAK,EAAE,KAAK,CAAC,IAAI,CAAC,EAClB,OAAO,EAAE,OAAO,EAChB,OAAO,CAAC,EAAE,QAAQ,KACd,2BAA2B,CAAC;AAEjC,MAAM,WAAW,eAAe;IAC/B,CAAC,EAAE,CAAC,CAAC;IACL,EAAE,EAAE,MAAM,CAAC;IACX,KAAK,CAAC,EAAE,YAAY,GAAG,cAAc,CAAC;CACtC;AAED,MAAM,WAAW,WAAW;IAC3B,IAAI,EAAE,MAAM,CAAC;IACb,IAAI,EAAE,MAAM,CAAC;IACb,aAAa,CAAC,EAAE,MAAM,CAAC;CACvB;AAED,MAAM,WAAW,eAAe;IAC/B,IAAI,EAAE,UAAU,CAAC;IACjB,QAAQ,EAAE,MAAM,CAAC;IACjB,iBAAiB,CAAC,EAAE,MAAM,CAAC;IAC3B;;gDAE4C;IAC5C,QAAQ,CAAC,EAAE,OAAO,CAAC;CACnB;AAED,MAAM,WAAW,YAAY;IAC5B,IAAI,EAAE,OAAO,CAAC;IACd,IAAI,EAAE,MAAM,CAAC;IACb,QAAQ,EAAE,MAAM,CAAC;CACjB;AAED,MAAM,WAAW,QAAQ;IACxB,IAAI,EAAE,UAAU,CAAC;IACjB,EAAE,EAAE,MAAM,CAAC;IACX,IAAI,EAAE,MAAM,CAAC;IACb,SAAS,EAAE,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,CAAC;IAC/B,gBAAgB,CAAC,EAAE,MAAM,CAAC;CAC1B;AAED,MAAM,WAAW,KAAK;IACrB,KAAK,EAAE,MAAM,CAAC;IACd,MAAM,EAAE,MAAM,CAAC;IACf,SAAS,EAAE,MAAM,CAAC;IAClB,UAAU,EAAE,MAAM,CAAC;IACnB,WAAW,EAAE,MAAM,CAAC;IACpB,IAAI,EAAE;QACL,KAAK,EAAE,MAAM,CAAC;QACd,MAAM,EAAE,MAAM,CAAC;QACf,SAAS,EAAE,MAAM,CAAC;QAClB,UAAU,EAAE,MAAM,CAAC;QACnB,KAAK,EAAE,MAAM,CAAC;KACd,CAAC;CACF;AAED,MAAM,MAAM,UAAU,GAAG,MAAM,GAAG,QAAQ,GAAG,SAAS,GAAG,OAAO,GAAG,SAAS,CAAC;AAE7E,MAAM,WAAW,WAAW;IAC3B,IAAI,EAAE,MAAM,CAAC;IACb,OAAO,EAAE,MAAM,GAAG,CAAC,WAAW,GAAG,YAAY,CAAC,EAAE,CAAC;IACjD,SAAS,EAAE,MAAM,CAAC;CAClB;AAED,MAAM,WAAW,gBAAgB;IAChC,IAAI,EAAE,WAAW,CAAC;IAClB,OAAO,EAAE,CAAC,WAAW,GAAG,eAAe,GAAG,QAAQ,CAAC,EAAE,CAAC;IACtD,GAAG,EAAE,GAAG,CAAC;IACT,QAAQ,EAAE,QAAQ,CAAC;IACnB,KAAK,EAAE,MAAM,CAAC;IACd,aAAa,CAAC,EAAE,MAAM,CAAC;IACvB,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB,KAAK,EAAE,KAAK,CAAC;IACb,UAAU,EAAE,UAAU,CAAC;IACvB,YAAY,CAAC,EAAE,MAAM,CAAC;IACtB,SAAS,EAAE,MAAM,CAAC;CAClB;AAED,MAAM,WAAW,iBAAiB,CAAC,QAAQ,GAAG,GAAG;IAChD,IAAI,EAAE,YAAY,CAAC;IACnB,UAAU,EAAE,MAAM,CAAC;IACnB,QAAQ,EAAE,MAAM,CAAC;IACjB,OAAO,EAAE,CAAC,WAAW,GAAG,YAAY,CAAC,EAAE,CAAC;IACxC,OAAO,CAAC,EAAE,QAAQ,CAAC;IACnB,OAAO,EAAE,OAAO,CAAC;IACjB,SAAS,EAAE,MAAM,CAAC;CAClB;AAED,MAAM,MAAM,OAAO,GAAG,WAAW,GAAG,gBAAgB,GAAG,iBAAiB,CAAC;AAEzE,OAAO,KAAK,EAAE,OAAO,EAAE,MAAM,SAAS,CAAC;AAEvC,MAAM,WAAW,IAAI,CAAC,WAAW,SAAS,OAAO,GAAG,OAAO;IAC1D,IAAI,EAAE,MAAM,CAAC;IACb,WAAW,EAAE,MAAM,CAAC;IACpB,UAAU,EAAE,WAAW,CAAC;CACxB;AAED,MAAM,WAAW,OAAO;IACvB,YAAY,CAAC,EAAE,MAAM,CAAC;IACtB,QAAQ,EAAE,OAAO,EAAE,CAAC;IACpB,KAAK,CAAC,EAAE,IAAI,EAAE,CAAC;CACf;AAED;;;;;;;GAOG;AACH,MAAM,MAAM,qBAAqB,GAC9B;IAAE,IAAI,EAAE,OAAO,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GAC5C;IAAE,IAAI,EAAE,YAAY,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GACvE;IAAE,IAAI,EAAE,YAAY,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,KAAK,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GACtF;IAAE,IAAI,EAAE,UAAU,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GACtF;IAAE,IAAI,EAAE,gBAAgB,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GAC3E;IAAE,IAAI,EAAE,gBAAgB,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,KAAK,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GAC1F;IAAE,IAAI,EAAE,cAAc,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GAC1F;IAAE,IAAI,EAAE,gBAAgB,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GAC3E;IAAE,IAAI,EAAE,gBAAgB,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,KAAK,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GAC1F;IAAE,IAAI,EAAE,cAAc,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,QAAQ,EAAE,QAAQ,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GAC7F;IAAE,IAAI,EAAE,MAAM,CAAC;IAAC,MAAM,EAAE,OAAO,CAAC,UAAU,EAAE,MAAM,GAAG,QAAQ,GAAG,SAAS,CAAC,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GACvG;IAAE,IAAI,EAAE,OAAO,CAAC;IAAC,MAAM,EAAE,OAAO,CAAC,UAAU,EAAE,SAAS,GAAG,OAAO,CAAC,CAAC;IAAC,KAAK,EAAE,gBAAgB,CAAA;CAAE,CAAC;AAEhG;;;GAGG;AACH,MAAM,WAAW,uBAAuB;IACvC,wFAAwF;IACxF,aAAa,CAAC,EAAE,OAAO,CAAC;IACxB,yGAAyG;IACzG,qBAAqB,CAAC,EAAE,OAAO,CAAC;IAChC,yFAAyF;IACzF,uBAAuB,CAAC,EAAE,OAAO,CAAC;IAClC,yGAAyG;IACzG,kBAAkB,CAAC,EAAE,OAAO,CAAC,MAAM,CAAC,aAAa,EAAE,MAAM,CAAC,CAAC,CAAC;IAC5D,qIAAqI;IACrI,wBAAwB,CAAC,EAAE,OAAO,CAAC;IACnC,0EAA0E;IAC1E,cAAc,CAAC,EAAE,uBAAuB,GAAG,YAAY,CAAC;IACxD,sFAAsF;IACtF,sBAAsB,CAAC,EAAE,OAAO,CAAC;IACjC,2HAA2H;IAC3H,gCAAgC,CAAC,EAAE,OAAO,CAAC;IAC3C,4HAA4H;IAC5H,sBAAsB,CAAC,EAAE,OAAO,CAAC;IACjC,wJAAwJ;IACxJ,2CAA2C,CAAC,EAAE,OAAO,CAAC;IACtD,wWAAwW;IACxW,cAAc,CAAC,EAAE,QAAQ,GAAG,YAAY,GAAG,UAAU,GAAG,KAAK,GAAG,MAAM,GAAG,oBAAoB,CAAC;IAC9F,4FAA4F;IAC5F,iBAAiB,CAAC,EAAE,iBAAiB,CAAC;IACtC,iGAAiG;IACjG,oBAAoB,CAAC,EAAE,oBAAoB,CAAC;IAC5C,0GAA0G;IAC1G,aAAa,CAAC,EAAE,OAAO,CAAC;IACxB,2FAA2F;IAC3F,kBAAkB,CAAC,EAAE,OAAO,CAAC;IAC7B,iMAAiM;IACjM,kBAAkB,CAAC,EAAE,WAAW,CAAC;IACjC,mLAAmL;IACnL,0BAA0B,CAAC,EAAE,OAAO,CAAC;IACrC,oLAAoL;IACpL,0BAA0B,CAAC,EAAE,OAAO,CAAC;CACrC;AAED,wDAAwD;AACxD,MAAM,WAAW,qBAAqB;IACrC,qIAAqI;IACrI,mBAAmB,CAAC,EAAE,OAAO,CAAC;IAC9B,oFAAoF;IACpF,0BAA0B,CAAC,EAAE,OAAO,CAAC;CACrC;AAED,qEAAqE;AACrE,MAAM,WAAW,uBAAuB;IACvC;;;;;;OAMG;IACH,+BAA+B,CAAC,EAAE,OAAO,CAAC;IAC1C,+GAA+G;IAC/G,0BAA0B,CAAC,EAAE,OAAO,CAAC;CACrC;AAED;;;;;GAKG;AACH,MAAM,WAAW,iBAAiB;IACjC,0EAA0E;IAC1E,eAAe,CAAC,EAAE,OAAO,CAAC;IAC1B,4GAA4G;IAC5G,kBAAkB,CAAC,EAAE,OAAO,CAAC;IAC7B,yJAAyJ;IACzJ,eAAe,CAAC,EAAE,MAAM,GAAG,OAAO,CAAC;IACnC,+EAA+E;IAC/E,GAAG,CAAC,EAAE,OAAO,CAAC;IACd,+EAA+E;IAC/E,wBAAwB,CAAC,EAAE,OAAO,CAAC;IACnC,2GAA2G;IAC3G,KAAK,CAAC,EAAE,MAAM,EAAE,CAAC;IACjB,0EAA0E;IAC1E,IAAI,CAAC,EAAE,MAAM,EAAE,CAAC;IAChB,6DAA6D;IAC7D,MAAM,CAAC,EAAE,MAAM,EAAE,CAAC;IAClB,kIAAkI;IAClI,aAAa,CAAC,EAAE,MAAM,EAAE,CAAC;IACzB,yHAAyH;IACzH,IAAI,CAAC,EACF,MAAM,GACN;QACA,4DAA4D;QAC5D,EAAE,CAAC,EAAE,MAAM,CAAC;QACZ,0DAA0D;QAC1D,SAAS,CAAC,EAAE,MAAM,GAAG,IAAI,CAAC;KACzB,CAAC;IACL,8CAA8C;IAC9C,SAAS,CAAC,EAAE;QACX,uCAAuC;QACvC,MAAM,CAAC,EAAE,MAAM,GAAG,MAAM,CAAC;QACzB,2CAA2C;QAC3C,UAAU,CAAC,EAAE,MAAM,GAAG,MAAM,CAAC;QAC7B,uBAAuB;QACvB,KAAK,CAAC,EAAE,MAAM,GAAG,MAAM,CAAC;QACxB,4BAA4B;QAC5B,KAAK,CAAC,EAAE,MAAM,GAAG,MAAM,CAAC;QACxB,yBAAyB;QACzB,OAAO,CAAC,EAAE,MAAM,GAAG,MAAM,CAAC;KAC1B,CAAC;IACF,oIAAoI;IACpI,wBAAwB,CAAC,EACtB,MAAM,GACN;QACA,oDAAoD;QACpD,GAAG,CAAC,EAAE,MAAM,CAAC;QACb,oDAAoD;QACpD,GAAG,CAAC,EAAE,MAAM,CAAC;QACb,oDAAoD;QACpD,GAAG,CAAC,EAAE,MAAM,CAAC;QACb,oDAAoD;QACpD,GAAG,CAAC,EAAE,MAAM,CAAC;KACZ,CAAC;IACL,2HAA2H;IAC3H,qBAAqB,CAAC,EACnB,MAAM,GACN;QACA,yDAAyD;QACzD,GAAG,CAAC,EAAE,MAAM,CAAC;QACb,yDAAyD;QACzD,GAAG,CAAC,EAAE,MAAM,CAAC;QACb,yDAAyD;QACzD,GAAG,CAAC,EAAE,MAAM,CAAC;QACb,yDAAyD;QACzD,GAAG,CAAC,EAAE,MAAM,CAAC;KACZ,CAAC;CACL;AAED;;;;GAIG;AACH,MAAM,WAAW,oBAAoB;IACpC,mGAAmG;IACnG,IAAI,CAAC,EAAE,MAAM,EAAE,CAAC;IAChB,8EAA8E;IAC9E,KAAK,CAAC,EAAE,MAAM,EAAE,CAAC;CACjB;AAGD,MAAM,WAAW,KAAK,CAAC,IAAI,SAAS,GAAG;IACtC,EAAE,EAAE,MAAM,CAAC;IACX,IAAI,EAAE,MAAM,CAAC;IACb,GAAG,EAAE,IAAI,CAAC;IACV,QAAQ,EAAE,QAAQ,CAAC;IACnB,OAAO,EAAE,MAAM,CAAC;IAChB,SAAS,EAAE,OAAO,CAAC;IACnB,KAAK,EAAE,CAAC,MAAM,GAAG,OAAO,CAAC,EAAE,CAAC;IAC5B,IAAI,EAAE;QACL,KAAK,EAAE,MAAM,CAAC;QACd,MAAM,EAAE,MAAM,CAAC;QACf,SAAS,EAAE,MAAM,CAAC;QAClB,UAAU,EAAE,MAAM,CAAC;KACnB,CAAC;IACF,aAAa,EAAE,MAAM,CAAC;IACtB,SAAS,EAAE,MAAM,CAAC;IAClB,OAAO,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;IACjC,kGAAkG;IAClG,MAAM,CAAC,EAAE,IAAI,SAAS,oBAAoB,GACvC,uBAAuB,GACvB,IAAI,SAAS,kBAAkB,GAC9B,qBAAqB,GACrB,IAAI,SAAS,oBAAoB,GAChC,uBAAuB,GACvB,KAAK,CAAC;CACX","sourcesContent":["import type { AssistantMessageEventStream } from \"./utils/event-stream.js\";\n\nexport type { AssistantMessageEventStream } from \"./utils/event-stream.js\";\n\nexport type KnownApi =\n\t| \"openai-completions\"\n\t| \"mistral-conversations\"\n\t| \"openai-responses\"\n\t| \"azure-openai-responses\"\n\t| \"openai-codex-responses\"\n\t| \"anthropic-messages\"\n\t| \"bedrock-converse-stream\"\n\t| \"google-generative-ai\"\n\t| \"google-vertex\";\n\nexport type Api = KnownApi | (string & {});\n\nexport type KnownProvider =\n\t| \"amazon-bedrock\"\n\t| \"anthropic\"\n\t| \"google\"\n\t| \"google-vertex\"\n\t| \"openai\"\n\t| \"azure-openai-responses\"\n\t| \"openai-codex\"\n\t| \"deepseek\"\n\t| \"github-copilot\"\n\t| \"xai\"\n\t| \"groq\"\n\t| \"cerebras\"\n\t| \"openrouter\"\n\t| \"vercel-ai-gateway\"\n\t| \"zai\"\n\t| \"mistral\"\n\t| \"minimax\"\n\t| \"minimax-cn\"\n\t| \"moonshotai\"\n\t| \"moonshotai-cn\"\n\t| \"huggingface\"\n\t| \"fireworks\"\n\t| \"opencode\"\n\t| \"opencode-go\"\n\t| \"kimi-coding\"\n\t| \"cloudflare-workers-ai\"\n\t| \"cloudflare-ai-gateway\";\nexport type Provider = KnownProvider | string;\n\nexport type ThinkingLevel = \"minimal\" | \"low\" | \"medium\" | \"high\" | \"xhigh\";\n\n/** Token budgets for each thinking level (token-based providers only) */\nexport interface ThinkingBudgets {\n\tminimal?: number;\n\tlow?: number;\n\tmedium?: number;\n\thigh?: number;\n}\n\n// Base options all providers share\nexport type CacheRetention = \"none\" | \"short\" | \"long\";\n\nexport type Transport = \"sse\" | \"websocket\" | \"auto\";\n\nexport interface ProviderResponse {\n\tstatus: number;\n\theaders: Record<string, string>;\n}\n\nexport interface StreamOptions {\n\ttemperature?: number;\n\tmaxTokens?: number;\n\tsignal?: AbortSignal;\n\tapiKey?: string;\n\t/**\n\t * Preferred transport for providers that support multiple transports.\n\t * Providers that do not support this option ignore it.\n\t */\n\ttransport?: Transport;\n\t/**\n\t * Prompt cache retention preference. Providers map this to their supported values.\n\t * Default: \"short\".\n\t */\n\tcacheRetention?: CacheRetention;\n\t/**\n\t * Optional session identifier for providers that support session-based caching.\n\t * Providers can use this to enable prompt caching, request routing, or other\n\t * session-aware features. Ignored by providers that don't support it.\n\t */\n\tsessionId?: string;\n\t/**\n\t * Optional callback for inspecting or replacing provider payloads before sending.\n\t * Return undefined to keep the payload unchanged.\n\t */\n\tonPayload?: (payload: unknown, model: Model<Api>) => unknown | undefined | Promise<unknown | undefined>;\n\t/**\n\t * Optional callback invoked after an HTTP response is received and before\n\t * its body stream is consumed.\n\t */\n\tonResponse?: (response: ProviderResponse, model: Model<Api>) => void | Promise<void>;\n\t/**\n\t * Optional custom HTTP headers to include in API requests.\n\t * Merged with provider defaults; can override default headers.\n\t * Not supported by all providers (e.g., AWS Bedrock uses SDK auth).\n\t */\n\theaders?: Record<string, string>;\n\t/**\n\t * HTTP request timeout in milliseconds for providers/SDKs that support it.\n\t * For example, OpenAI and Anthropic SDK clients default to 10 minutes.\n\t */\n\ttimeoutMs?: number;\n\t/**\n\t * Maximum retry attempts for providers/SDKs that support client-side retries.\n\t * For example, OpenAI and Anthropic SDK clients default to 2.\n\t */\n\tmaxRetries?: number;\n\t/**\n\t * Maximum delay in milliseconds to wait for a retry when the server requests a long wait.\n\t * If the server's requested delay exceeds this value, the request fails immediately\n\t * with an error containing the requested delay, allowing higher-level retry logic\n\t * to handle it with user visibility.\n\t * Default: 60000 (60 seconds). Set to 0 to disable the cap.\n\t */\n\tmaxRetryDelayMs?: number;\n\t/**\n\t * Optional metadata to include in API requests.\n\t * Providers extract the fields they understand and ignore the rest.\n\t * For example, Anthropic uses `user_id` for abuse tracking and rate limiting.\n\t */\n\tmetadata?: Record<string, unknown>;\n}\n\nexport type ProviderStreamOptions = StreamOptions & Record<string, unknown>;\n\n// Unified options with reasoning passed to streamSimple() and completeSimple()\nexport interface SimpleStreamOptions extends StreamOptions {\n\treasoning?: ThinkingLevel;\n\t/** Custom token budgets for thinking levels (token-based providers only) */\n\tthinkingBudgets?: ThinkingBudgets;\n}\n\n// Generic StreamFunction with typed options.\n//\n// Contract:\n// - Must return an AssistantMessageEventStream.\n// - Once invoked, request/model/runtime failures should be encoded in the\n// returned stream, not thrown.\n// - Error termination must produce an AssistantMessage with stopReason\n// \"error\" or \"aborted\" and errorMessage, emitted via the stream protocol.\nexport type StreamFunction<TApi extends Api = Api, TOptions extends StreamOptions = StreamOptions> = (\n\tmodel: Model<TApi>,\n\tcontext: Context,\n\toptions?: TOptions,\n) => AssistantMessageEventStream;\n\nexport interface TextSignatureV1 {\n\tv: 1;\n\tid: string;\n\tphase?: \"commentary\" | \"final_answer\";\n}\n\nexport interface TextContent {\n\ttype: \"text\";\n\ttext: string;\n\ttextSignature?: string; // e.g., for OpenAI responses, message metadata (legacy id string or TextSignatureV1 JSON)\n}\n\nexport interface ThinkingContent {\n\ttype: \"thinking\";\n\tthinking: string;\n\tthinkingSignature?: string; // e.g., for OpenAI responses, the reasoning item ID\n\t/** When true, the thinking content was redacted by safety filters. The opaque\n\t * encrypted payload is stored in `thinkingSignature` so it can be passed back\n\t * to the API for multi-turn continuity. */\n\tredacted?: boolean;\n}\n\nexport interface ImageContent {\n\ttype: \"image\";\n\tdata: string; // base64 encoded image data\n\tmimeType: string; // e.g., \"image/jpeg\", \"image/png\"\n}\n\nexport interface ToolCall {\n\ttype: \"toolCall\";\n\tid: string;\n\tname: string;\n\targuments: Record<string, any>;\n\tthoughtSignature?: string; // Google-specific: opaque signature for reusing thought context\n}\n\nexport interface Usage {\n\tinput: number;\n\toutput: number;\n\tcacheRead: number;\n\tcacheWrite: number;\n\ttotalTokens: number;\n\tcost: {\n\t\tinput: number;\n\t\toutput: number;\n\t\tcacheRead: number;\n\t\tcacheWrite: number;\n\t\ttotal: number;\n\t};\n}\n\nexport type StopReason = \"stop\" | \"length\" | \"toolUse\" | \"error\" | \"aborted\";\n\nexport interface UserMessage {\n\trole: \"user\";\n\tcontent: string | (TextContent | ImageContent)[];\n\ttimestamp: number; // Unix timestamp in milliseconds\n}\n\nexport interface AssistantMessage {\n\trole: \"assistant\";\n\tcontent: (TextContent | ThinkingContent | ToolCall)[];\n\tapi: Api;\n\tprovider: Provider;\n\tmodel: string;\n\tresponseModel?: string; // Concrete `chunk.model` when different from the requested `model` (e.g. OpenRouter `auto` -> `anthropic/...`)\n\tresponseId?: string; // Provider-specific response/message identifier when the upstream API exposes one\n\tusage: Usage;\n\tstopReason: StopReason;\n\terrorMessage?: string;\n\ttimestamp: number; // Unix timestamp in milliseconds\n}\n\nexport interface ToolResultMessage<TDetails = any> {\n\trole: \"toolResult\";\n\ttoolCallId: string;\n\ttoolName: string;\n\tcontent: (TextContent | ImageContent)[]; // Supports text and images\n\tdetails?: TDetails;\n\tisError: boolean;\n\ttimestamp: number; // Unix timestamp in milliseconds\n}\n\nexport type Message = UserMessage | AssistantMessage | ToolResultMessage;\n\nimport type { TSchema } from \"typebox\";\n\nexport interface Tool<TParameters extends TSchema = TSchema> {\n\tname: string;\n\tdescription: string;\n\tparameters: TParameters;\n}\n\nexport interface Context {\n\tsystemPrompt?: string;\n\tmessages: Message[];\n\ttools?: Tool[];\n}\n\n/**\n * Event protocol for AssistantMessageEventStream.\n *\n * Streams should emit `start` before partial updates, then terminate with either:\n * - `done` carrying the final successful AssistantMessage, or\n * - `error` carrying the final AssistantMessage with stopReason \"error\" or \"aborted\"\n * and errorMessage.\n */\nexport type AssistantMessageEvent =\n\t| { type: \"start\"; partial: AssistantMessage }\n\t| { type: \"text_start\"; contentIndex: number; partial: AssistantMessage }\n\t| { type: \"text_delta\"; contentIndex: number; delta: string; partial: AssistantMessage }\n\t| { type: \"text_end\"; contentIndex: number; content: string; partial: AssistantMessage }\n\t| { type: \"thinking_start\"; contentIndex: number; partial: AssistantMessage }\n\t| { type: \"thinking_delta\"; contentIndex: number; delta: string; partial: AssistantMessage }\n\t| { type: \"thinking_end\"; contentIndex: number; content: string; partial: AssistantMessage }\n\t| { type: \"toolcall_start\"; contentIndex: number; partial: AssistantMessage }\n\t| { type: \"toolcall_delta\"; contentIndex: number; delta: string; partial: AssistantMessage }\n\t| { type: \"toolcall_end\"; contentIndex: number; toolCall: ToolCall; partial: AssistantMessage }\n\t| { type: \"done\"; reason: Extract<StopReason, \"stop\" | \"length\" | \"toolUse\">; message: AssistantMessage }\n\t| { type: \"error\"; reason: Extract<StopReason, \"aborted\" | \"error\">; error: AssistantMessage };\n\n/**\n * Compatibility settings for OpenAI-compatible completions APIs.\n * Use this to override URL-based auto-detection for custom providers.\n */\nexport interface OpenAICompletionsCompat {\n\t/** Whether the provider supports the `store` field. Default: auto-detected from URL. */\n\tsupportsStore?: boolean;\n\t/** Whether the provider supports the `developer` role (vs `system`). Default: auto-detected from URL. */\n\tsupportsDeveloperRole?: boolean;\n\t/** Whether the provider supports `reasoning_effort`. Default: auto-detected from URL. */\n\tsupportsReasoningEffort?: boolean;\n\t/** Optional mapping from pi-ai reasoning levels to provider/model-specific `reasoning_effort` values. */\n\treasoningEffortMap?: Partial<Record<ThinkingLevel, string>>;\n\t/** Whether the provider supports `stream_options: { include_usage: true }` for token usage in streaming responses. Default: true. */\n\tsupportsUsageInStreaming?: boolean;\n\t/** Which field to use for max tokens. Default: auto-detected from URL. */\n\tmaxTokensField?: \"max_completion_tokens\" | \"max_tokens\";\n\t/** Whether tool results require the `name` field. Default: auto-detected from URL. */\n\trequiresToolResultName?: boolean;\n\t/** Whether a user message after tool results requires an assistant message in between. Default: auto-detected from URL. */\n\trequiresAssistantAfterToolResult?: boolean;\n\t/** Whether thinking blocks must be converted to text blocks with <thinking> delimiters. Default: auto-detected from URL. */\n\trequiresThinkingAsText?: boolean;\n\t/** Whether all replayed assistant messages must include an empty reasoning_content field when reasoning is enabled. Default: auto-detected from URL. */\n\trequiresReasoningContentOnAssistantMessages?: boolean;\n\t/** Format for reasoning/thinking parameter. \"openai\" uses reasoning_effort, \"openrouter\" uses reasoning: { effort }, \"deepseek\" uses thinking: { type } plus reasoning_effort, \"zai\" uses top-level enable_thinking: boolean, \"qwen\" uses top-level enable_thinking: boolean, and \"qwen-chat-template\" uses chat_template_kwargs.enable_thinking. Default: \"openai\". */\n\tthinkingFormat?: \"openai\" | \"openrouter\" | \"deepseek\" | \"zai\" | \"qwen\" | \"qwen-chat-template\";\n\t/** OpenRouter-specific routing preferences. Only used when baseUrl points to OpenRouter. */\n\topenRouterRouting?: OpenRouterRouting;\n\t/** Vercel AI Gateway routing preferences. Only used when baseUrl points to Vercel AI Gateway. */\n\tvercelGatewayRouting?: VercelGatewayRouting;\n\t/** Whether z.ai supports top-level `tool_stream: true` for streaming tool call deltas. Default: false. */\n\tzaiToolStream?: boolean;\n\t/** Whether the provider supports the `strict` field in tool definitions. Default: true. */\n\tsupportsStrictMode?: boolean;\n\t/** Cache control convention for prompt caching. \"anthropic\" applies Anthropic-style `cache_control` markers to the system prompt, last tool definition, and last user/assistant text content. */\n\tcacheControlFormat?: \"anthropic\";\n\t/** Whether to send known session-affinity headers (`session_id`, `x-client-request-id`, `x-session-affinity`) from `options.sessionId` when caching is enabled. Default: false. */\n\tsendSessionAffinityHeaders?: boolean;\n\t/** Whether the provider supports long prompt cache retention (`prompt_cache_retention: \"24h\"` or Anthropic-style `cache_control.ttl: \"1h\"`, depending on format). Default: true. */\n\tsupportsLongCacheRetention?: boolean;\n}\n\n/** Compatibility settings for OpenAI Responses APIs. */\nexport interface OpenAIResponsesCompat {\n\t/** Whether to send the OpenAI `session_id` cache-affinity header from `options.sessionId` when caching is enabled. Default: true. */\n\tsendSessionIdHeader?: boolean;\n\t/** Whether the provider supports `prompt_cache_retention: \"24h\"`. Default: true. */\n\tsupportsLongCacheRetention?: boolean;\n}\n\n/** Compatibility settings for Anthropic Messages-compatible APIs. */\nexport interface AnthropicMessagesCompat {\n\t/**\n\t * Whether the provider accepts per-tool `eager_input_streaming`.\n\t * When false, the Anthropic provider omits `tools[].eager_input_streaming`\n\t * and sends the legacy `fine-grained-tool-streaming-2025-05-14` beta header\n\t * for tool-enabled requests.\n\t * Default: true.\n\t */\n\tsupportsEagerToolInputStreaming?: boolean;\n\t/** Whether the provider supports Anthropic long cache retention (`cache_control.ttl: \"1h\"`). Default: true. */\n\tsupportsLongCacheRetention?: boolean;\n}\n\n/**\n * OpenRouter provider routing preferences.\n * Controls which upstream providers OpenRouter routes requests to.\n * Sent as the `provider` field in the OpenRouter API request body.\n * @see https://openrouter.ai/docs/guides/routing/provider-selection\n */\nexport interface OpenRouterRouting {\n\t/** Whether to allow backup providers to serve requests. Default: true. */\n\tallow_fallbacks?: boolean;\n\t/** Whether to filter providers to only those that support all parameters in the request. Default: false. */\n\trequire_parameters?: boolean;\n\t/** Data collection setting. \"allow\" (default): allow providers that may store/train on data. \"deny\": only use providers that don't collect user data. */\n\tdata_collection?: \"deny\" | \"allow\";\n\t/** Whether to restrict routing to only ZDR (Zero Data Retention) endpoints. */\n\tzdr?: boolean;\n\t/** Whether to restrict routing to only models that allow text distillation. */\n\tenforce_distillable_text?: boolean;\n\t/** An ordered list of provider names/slugs to try in sequence, falling back to the next if unavailable. */\n\torder?: string[];\n\t/** List of provider names/slugs to exclusively allow for this request. */\n\tonly?: string[];\n\t/** List of provider names/slugs to skip for this request. */\n\tignore?: string[];\n\t/** A list of quantization levels to filter providers by (e.g., [\"fp16\", \"bf16\", \"fp8\", \"fp6\", \"int8\", \"int4\", \"fp4\", \"fp32\"]). */\n\tquantizations?: string[];\n\t/** Sorting strategy. Can be a string (e.g., \"price\", \"throughput\", \"latency\") or an object with `by` and `partition`. */\n\tsort?:\n\t\t| string\n\t\t| {\n\t\t\t\t/** The sorting metric: \"price\", \"throughput\", \"latency\". */\n\t\t\t\tby?: string;\n\t\t\t\t/** Partitioning strategy: \"model\" (default) or \"none\". */\n\t\t\t\tpartition?: string | null;\n\t\t };\n\t/** Maximum price per million tokens (USD). */\n\tmax_price?: {\n\t\t/** Price per million prompt tokens. */\n\t\tprompt?: number | string;\n\t\t/** Price per million completion tokens. */\n\t\tcompletion?: number | string;\n\t\t/** Price per image. */\n\t\timage?: number | string;\n\t\t/** Price per audio unit. */\n\t\taudio?: number | string;\n\t\t/** Price per request. */\n\t\trequest?: number | string;\n\t};\n\t/** Preferred minimum throughput (tokens/second). Can be a number (applies to p50) or an object with percentile-specific cutoffs. */\n\tpreferred_min_throughput?:\n\t\t| number\n\t\t| {\n\t\t\t\t/** Minimum tokens/second at the 50th percentile. */\n\t\t\t\tp50?: number;\n\t\t\t\t/** Minimum tokens/second at the 75th percentile. */\n\t\t\t\tp75?: number;\n\t\t\t\t/** Minimum tokens/second at the 90th percentile. */\n\t\t\t\tp90?: number;\n\t\t\t\t/** Minimum tokens/second at the 99th percentile. */\n\t\t\t\tp99?: number;\n\t\t };\n\t/** Preferred maximum latency (seconds). Can be a number (applies to p50) or an object with percentile-specific cutoffs. */\n\tpreferred_max_latency?:\n\t\t| number\n\t\t| {\n\t\t\t\t/** Maximum latency in seconds at the 50th percentile. */\n\t\t\t\tp50?: number;\n\t\t\t\t/** Maximum latency in seconds at the 75th percentile. */\n\t\t\t\tp75?: number;\n\t\t\t\t/** Maximum latency in seconds at the 90th percentile. */\n\t\t\t\tp90?: number;\n\t\t\t\t/** Maximum latency in seconds at the 99th percentile. */\n\t\t\t\tp99?: number;\n\t\t };\n}\n\n/**\n * Vercel AI Gateway routing preferences.\n * Controls which upstream providers the gateway routes requests to.\n * @see https://vercel.com/docs/ai-gateway/models-and-providers/provider-options\n */\nexport interface VercelGatewayRouting {\n\t/** List of provider slugs to exclusively use for this request (e.g., [\"bedrock\", \"anthropic\"]). */\n\tonly?: string[];\n\t/** List of provider slugs to try in order (e.g., [\"anthropic\", \"openai\"]). */\n\torder?: string[];\n}\n\n// Model interface for the unified model system\nexport interface Model<TApi extends Api> {\n\tid: string;\n\tname: string;\n\tapi: TApi;\n\tprovider: Provider;\n\tbaseUrl: string;\n\treasoning: boolean;\n\tinput: (\"text\" | \"image\")[];\n\tcost: {\n\t\tinput: number; // $/million tokens\n\t\toutput: number; // $/million tokens\n\t\tcacheRead: number; // $/million tokens\n\t\tcacheWrite: number; // $/million tokens\n\t};\n\tcontextWindow: number;\n\tmaxTokens: number;\n\theaders?: Record<string, string>;\n\t/** Compatibility overrides for OpenAI-compatible APIs. If not set, auto-detected from baseUrl. */\n\tcompat?: TApi extends \"openai-completions\"\n\t\t? OpenAICompletionsCompat\n\t\t: TApi extends \"openai-responses\"\n\t\t\t? OpenAIResponsesCompat\n\t\t\t: TApi extends \"anthropic-messages\"\n\t\t\t\t? AnthropicMessagesCompat\n\t\t\t\t: never;\n}\n"]}
|
package/dist/types.js.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"types.js","sourceRoot":"","sources":["../src/types.ts"],"names":[],"mappings":"","sourcesContent":["import type { AssistantMessageEventStream } from \"./utils/event-stream.js\";\n\nexport type { AssistantMessageEventStream } from \"./utils/event-stream.js\";\n\nexport type KnownApi =\n\t| \"openai-completions\"\n\t| \"mistral-conversations\"\n\t| \"openai-responses\"\n\t| \"azure-openai-responses\"\n\t| \"openai-codex-responses\"\n\t| \"anthropic-messages\"\n\t| \"bedrock-converse-stream\"\n\t| \"google-generative-ai\"\n\t| \"google-gemini-cli\"\n\t| \"google-vertex\";\n\nexport type Api = KnownApi | (string & {});\n\nexport type KnownProvider =\n\t| \"amazon-bedrock\"\n\t| \"anthropic\"\n\t| \"google\"\n\t| \"google-gemini-cli\"\n\t| \"google-antigravity\"\n\t| \"google-vertex\"\n\t| \"openai\"\n\t| \"azure-openai-responses\"\n\t| \"openai-codex\"\n\t| \"deepseek\"\n\t| \"github-copilot\"\n\t| \"xai\"\n\t| \"groq\"\n\t| \"cerebras\"\n\t| \"openrouter\"\n\t| \"vercel-ai-gateway\"\n\t| \"zai\"\n\t| \"mistral\"\n\t| \"minimax\"\n\t| \"minimax-cn\"\n\t| \"huggingface\"\n\t| \"fireworks\"\n\t| \"opencode\"\n\t| \"opencode-go\"\n\t| \"kimi-coding\";\nexport type Provider = KnownProvider | string;\n\nexport type ThinkingLevel = \"minimal\" | \"low\" | \"medium\" | \"high\" | \"xhigh\";\n\n/** Token budgets for each thinking level (token-based providers only) */\nexport interface ThinkingBudgets {\n\tminimal?: number;\n\tlow?: number;\n\tmedium?: number;\n\thigh?: number;\n}\n\n// Base options all providers share\nexport type CacheRetention = \"none\" | \"short\" | \"long\";\n\nexport type Transport = \"sse\" | \"websocket\" | \"auto\";\n\nexport interface ProviderResponse {\n\tstatus: number;\n\theaders: Record<string, string>;\n}\n\nexport interface StreamOptions {\n\ttemperature?: number;\n\tmaxTokens?: number;\n\tsignal?: AbortSignal;\n\tapiKey?: string;\n\t/**\n\t * Preferred transport for providers that support multiple transports.\n\t * Providers that do not support this option ignore it.\n\t */\n\ttransport?: Transport;\n\t/**\n\t * Prompt cache retention preference. Providers map this to their supported values.\n\t * Default: \"short\".\n\t */\n\tcacheRetention?: CacheRetention;\n\t/**\n\t * Optional session identifier for providers that support session-based caching.\n\t * Providers can use this to enable prompt caching, request routing, or other\n\t * session-aware features. Ignored by providers that don't support it.\n\t */\n\tsessionId?: string;\n\t/**\n\t * Optional callback for inspecting or replacing provider payloads before sending.\n\t * Return undefined to keep the payload unchanged.\n\t */\n\tonPayload?: (payload: unknown, model: Model<Api>) => unknown | undefined | Promise<unknown | undefined>;\n\t/**\n\t * Optional callback invoked after an HTTP response is received and before\n\t * its body stream is consumed.\n\t */\n\tonResponse?: (response: ProviderResponse, model: Model<Api>) => void | Promise<void>;\n\t/**\n\t * Optional custom HTTP headers to include in API requests.\n\t * Merged with provider defaults; can override default headers.\n\t * Not supported by all providers (e.g., AWS Bedrock uses SDK auth).\n\t */\n\theaders?: Record<string, string>;\n\t/**\n\t * HTTP request timeout in milliseconds for providers/SDKs that support it.\n\t * For example, OpenAI and Anthropic SDK clients default to 10 minutes.\n\t */\n\ttimeoutMs?: number;\n\t/**\n\t * Maximum retry attempts for providers/SDKs that support client-side retries.\n\t * For example, OpenAI and Anthropic SDK clients default to 2.\n\t */\n\tmaxRetries?: number;\n\t/**\n\t * Maximum delay in milliseconds to wait for a retry when the server requests a long wait.\n\t * If the server's requested delay exceeds this value, the request fails immediately\n\t * with an error containing the requested delay, allowing higher-level retry logic\n\t * to handle it with user visibility.\n\t * Default: 60000 (60 seconds). Set to 0 to disable the cap.\n\t */\n\tmaxRetryDelayMs?: number;\n\t/**\n\t * Optional metadata to include in API requests.\n\t * Providers extract the fields they understand and ignore the rest.\n\t * For example, Anthropic uses `user_id` for abuse tracking and rate limiting.\n\t */\n\tmetadata?: Record<string, unknown>;\n}\n\nexport type ProviderStreamOptions = StreamOptions & Record<string, unknown>;\n\n// Unified options with reasoning passed to streamSimple() and completeSimple()\nexport interface SimpleStreamOptions extends StreamOptions {\n\treasoning?: ThinkingLevel;\n\t/** Custom token budgets for thinking levels (token-based providers only) */\n\tthinkingBudgets?: ThinkingBudgets;\n}\n\n// Generic StreamFunction with typed options.\n//\n// Contract:\n// - Must return an AssistantMessageEventStream.\n// - Once invoked, request/model/runtime failures should be encoded in the\n// returned stream, not thrown.\n// - Error termination must produce an AssistantMessage with stopReason\n// \"error\" or \"aborted\" and errorMessage, emitted via the stream protocol.\nexport type StreamFunction<TApi extends Api = Api, TOptions extends StreamOptions = StreamOptions> = (\n\tmodel: Model<TApi>,\n\tcontext: Context,\n\toptions?: TOptions,\n) => AssistantMessageEventStream;\n\nexport interface TextSignatureV1 {\n\tv: 1;\n\tid: string;\n\tphase?: \"commentary\" | \"final_answer\";\n}\n\nexport interface TextContent {\n\ttype: \"text\";\n\ttext: string;\n\ttextSignature?: string; // e.g., for OpenAI responses, message metadata (legacy id string or TextSignatureV1 JSON)\n}\n\nexport interface ThinkingContent {\n\ttype: \"thinking\";\n\tthinking: string;\n\tthinkingSignature?: string; // e.g., for OpenAI responses, the reasoning item ID\n\t/** When true, the thinking content was redacted by safety filters. The opaque\n\t * encrypted payload is stored in `thinkingSignature` so it can be passed back\n\t * to the API for multi-turn continuity. */\n\tredacted?: boolean;\n}\n\nexport interface ImageContent {\n\ttype: \"image\";\n\tdata: string; // base64 encoded image data\n\tmimeType: string; // e.g., \"image/jpeg\", \"image/png\"\n}\n\nexport interface ToolCall {\n\ttype: \"toolCall\";\n\tid: string;\n\tname: string;\n\targuments: Record<string, any>;\n\tthoughtSignature?: string; // Google-specific: opaque signature for reusing thought context\n}\n\nexport interface Usage {\n\tinput: number;\n\toutput: number;\n\tcacheRead: number;\n\tcacheWrite: number;\n\ttotalTokens: number;\n\tcost: {\n\t\tinput: number;\n\t\toutput: number;\n\t\tcacheRead: number;\n\t\tcacheWrite: number;\n\t\ttotal: number;\n\t};\n}\n\nexport type StopReason = \"stop\" | \"length\" | \"toolUse\" | \"error\" | \"aborted\";\n\nexport interface UserMessage {\n\trole: \"user\";\n\tcontent: string | (TextContent | ImageContent)[];\n\ttimestamp: number; // Unix timestamp in milliseconds\n}\n\nexport interface AssistantMessage {\n\trole: \"assistant\";\n\tcontent: (TextContent | ThinkingContent | ToolCall)[];\n\tapi: Api;\n\tprovider: Provider;\n\tmodel: string;\n\tresponseId?: string; // Provider-specific response/message identifier when the upstream API exposes one\n\tusage: Usage;\n\tstopReason: StopReason;\n\terrorMessage?: string;\n\ttimestamp: number; // Unix timestamp in milliseconds\n}\n\nexport interface ToolResultMessage<TDetails = any> {\n\trole: \"toolResult\";\n\ttoolCallId: string;\n\ttoolName: string;\n\tcontent: (TextContent | ImageContent)[]; // Supports text and images\n\tdetails?: TDetails;\n\tisError: boolean;\n\ttimestamp: number; // Unix timestamp in milliseconds\n}\n\nexport type Message = UserMessage | AssistantMessage | ToolResultMessage;\n\nimport type { TSchema } from \"typebox\";\n\nexport interface Tool<TParameters extends TSchema = TSchema> {\n\tname: string;\n\tdescription: string;\n\tparameters: TParameters;\n}\n\nexport interface Context {\n\tsystemPrompt?: string;\n\tmessages: Message[];\n\ttools?: Tool[];\n}\n\n/**\n * Event protocol for AssistantMessageEventStream.\n *\n * Streams should emit `start` before partial updates, then terminate with either:\n * - `done` carrying the final successful AssistantMessage, or\n * - `error` carrying the final AssistantMessage with stopReason \"error\" or \"aborted\"\n * and errorMessage.\n */\nexport type AssistantMessageEvent =\n\t| { type: \"start\"; partial: AssistantMessage }\n\t| { type: \"text_start\"; contentIndex: number; partial: AssistantMessage }\n\t| { type: \"text_delta\"; contentIndex: number; delta: string; partial: AssistantMessage }\n\t| { type: \"text_end\"; contentIndex: number; content: string; partial: AssistantMessage }\n\t| { type: \"thinking_start\"; contentIndex: number; partial: AssistantMessage }\n\t| { type: \"thinking_delta\"; contentIndex: number; delta: string; partial: AssistantMessage }\n\t| { type: \"thinking_end\"; contentIndex: number; content: string; partial: AssistantMessage }\n\t| { type: \"toolcall_start\"; contentIndex: number; partial: AssistantMessage }\n\t| { type: \"toolcall_delta\"; contentIndex: number; delta: string; partial: AssistantMessage }\n\t| { type: \"toolcall_end\"; contentIndex: number; toolCall: ToolCall; partial: AssistantMessage }\n\t| { type: \"done\"; reason: Extract<StopReason, \"stop\" | \"length\" | \"toolUse\">; message: AssistantMessage }\n\t| { type: \"error\"; reason: Extract<StopReason, \"aborted\" | \"error\">; error: AssistantMessage };\n\n/**\n * Compatibility settings for OpenAI-compatible completions APIs.\n * Use this to override URL-based auto-detection for custom providers.\n */\nexport interface OpenAICompletionsCompat {\n\t/** Whether the provider supports the `store` field. Default: auto-detected from URL. */\n\tsupportsStore?: boolean;\n\t/** Whether the provider supports the `developer` role (vs `system`). Default: auto-detected from URL. */\n\tsupportsDeveloperRole?: boolean;\n\t/** Whether the provider supports `reasoning_effort`. Default: auto-detected from URL. */\n\tsupportsReasoningEffort?: boolean;\n\t/** Optional mapping from pi-ai reasoning levels to provider/model-specific `reasoning_effort` values. */\n\treasoningEffortMap?: Partial<Record<ThinkingLevel, string>>;\n\t/** Whether the provider supports `stream_options: { include_usage: true }` for token usage in streaming responses. Default: true. */\n\tsupportsUsageInStreaming?: boolean;\n\t/** Which field to use for max tokens. Default: auto-detected from URL. */\n\tmaxTokensField?: \"max_completion_tokens\" | \"max_tokens\";\n\t/** Whether tool results require the `name` field. Default: auto-detected from URL. */\n\trequiresToolResultName?: boolean;\n\t/** Whether a user message after tool results requires an assistant message in between. Default: auto-detected from URL. */\n\trequiresAssistantAfterToolResult?: boolean;\n\t/** Whether thinking blocks must be converted to text blocks with <thinking> delimiters. Default: auto-detected from URL. */\n\trequiresThinkingAsText?: boolean;\n\t/** Whether all replayed assistant messages must include an empty reasoning_content field when reasoning is enabled. Default: auto-detected from URL. */\n\trequiresReasoningContentOnAssistantMessages?: boolean;\n\t/** Format for reasoning/thinking parameter. \"openai\" uses reasoning_effort, \"openrouter\" uses reasoning: { effort }, \"deepseek\" uses thinking: { type } plus reasoning_effort, \"zai\" uses top-level enable_thinking: boolean, \"qwen\" uses top-level enable_thinking: boolean, and \"qwen-chat-template\" uses chat_template_kwargs.enable_thinking. Default: \"openai\". */\n\tthinkingFormat?: \"openai\" | \"openrouter\" | \"deepseek\" | \"zai\" | \"qwen\" | \"qwen-chat-template\";\n\t/** OpenRouter-specific routing preferences. Only used when baseUrl points to OpenRouter. */\n\topenRouterRouting?: OpenRouterRouting;\n\t/** Vercel AI Gateway routing preferences. Only used when baseUrl points to Vercel AI Gateway. */\n\tvercelGatewayRouting?: VercelGatewayRouting;\n\t/** Whether z.ai supports top-level `tool_stream: true` for streaming tool call deltas. Default: false. */\n\tzaiToolStream?: boolean;\n\t/** Whether the provider supports the `strict` field in tool definitions. Default: true. */\n\tsupportsStrictMode?: boolean;\n\t/** Cache control convention for prompt caching. \"anthropic\" applies Anthropic-style `cache_control` markers to the system prompt, last tool definition, and last user/assistant text content. */\n\tcacheControlFormat?: \"anthropic\";\n\t/** Whether to send known session-affinity headers (`session_id`, `x-client-request-id`, `x-session-affinity`) from `options.sessionId` when caching is enabled. Default: false. */\n\tsendSessionAffinityHeaders?: boolean;\n\t/** Whether the provider supports long prompt cache retention (`prompt_cache_retention: \"24h\"` or Anthropic-style `cache_control.ttl: \"1h\"`, depending on format). Default: true. */\n\tsupportsLongCacheRetention?: boolean;\n}\n\n/** Compatibility settings for OpenAI Responses APIs. */\nexport interface OpenAIResponsesCompat {\n\t/** Whether to send the OpenAI `session_id` cache-affinity header from `options.sessionId` when caching is enabled. Default: true. */\n\tsendSessionIdHeader?: boolean;\n\t/** Whether the provider supports `prompt_cache_retention: \"24h\"`. Default: true. */\n\tsupportsLongCacheRetention?: boolean;\n}\n\n/** Compatibility settings for Anthropic Messages-compatible APIs. */\nexport interface AnthropicMessagesCompat {\n\t/**\n\t * Whether the provider accepts per-tool `eager_input_streaming`.\n\t * When false, the Anthropic provider omits `tools[].eager_input_streaming`\n\t * and sends the legacy `fine-grained-tool-streaming-2025-05-14` beta header\n\t * for tool-enabled requests.\n\t * Default: true.\n\t */\n\tsupportsEagerToolInputStreaming?: boolean;\n\t/** Whether the provider supports Anthropic long cache retention (`cache_control.ttl: \"1h\"`). Default: true. */\n\tsupportsLongCacheRetention?: boolean;\n}\n\n/**\n * OpenRouter provider routing preferences.\n * Controls which upstream providers OpenRouter routes requests to.\n * Sent as the `provider` field in the OpenRouter API request body.\n * @see https://openrouter.ai/docs/guides/routing/provider-selection\n */\nexport interface OpenRouterRouting {\n\t/** Whether to allow backup providers to serve requests. Default: true. */\n\tallow_fallbacks?: boolean;\n\t/** Whether to filter providers to only those that support all parameters in the request. Default: false. */\n\trequire_parameters?: boolean;\n\t/** Data collection setting. \"allow\" (default): allow providers that may store/train on data. \"deny\": only use providers that don't collect user data. */\n\tdata_collection?: \"deny\" | \"allow\";\n\t/** Whether to restrict routing to only ZDR (Zero Data Retention) endpoints. */\n\tzdr?: boolean;\n\t/** Whether to restrict routing to only models that allow text distillation. */\n\tenforce_distillable_text?: boolean;\n\t/** An ordered list of provider names/slugs to try in sequence, falling back to the next if unavailable. */\n\torder?: string[];\n\t/** List of provider names/slugs to exclusively allow for this request. */\n\tonly?: string[];\n\t/** List of provider names/slugs to skip for this request. */\n\tignore?: string[];\n\t/** A list of quantization levels to filter providers by (e.g., [\"fp16\", \"bf16\", \"fp8\", \"fp6\", \"int8\", \"int4\", \"fp4\", \"fp32\"]). */\n\tquantizations?: string[];\n\t/** Sorting strategy. Can be a string (e.g., \"price\", \"throughput\", \"latency\") or an object with `by` and `partition`. */\n\tsort?:\n\t\t| string\n\t\t| {\n\t\t\t\t/** The sorting metric: \"price\", \"throughput\", \"latency\". */\n\t\t\t\tby?: string;\n\t\t\t\t/** Partitioning strategy: \"model\" (default) or \"none\". */\n\t\t\t\tpartition?: string | null;\n\t\t };\n\t/** Maximum price per million tokens (USD). */\n\tmax_price?: {\n\t\t/** Price per million prompt tokens. */\n\t\tprompt?: number | string;\n\t\t/** Price per million completion tokens. */\n\t\tcompletion?: number | string;\n\t\t/** Price per image. */\n\t\timage?: number | string;\n\t\t/** Price per audio unit. */\n\t\taudio?: number | string;\n\t\t/** Price per request. */\n\t\trequest?: number | string;\n\t};\n\t/** Preferred minimum throughput (tokens/second). Can be a number (applies to p50) or an object with percentile-specific cutoffs. */\n\tpreferred_min_throughput?:\n\t\t| number\n\t\t| {\n\t\t\t\t/** Minimum tokens/second at the 50th percentile. */\n\t\t\t\tp50?: number;\n\t\t\t\t/** Minimum tokens/second at the 75th percentile. */\n\t\t\t\tp75?: number;\n\t\t\t\t/** Minimum tokens/second at the 90th percentile. */\n\t\t\t\tp90?: number;\n\t\t\t\t/** Minimum tokens/second at the 99th percentile. */\n\t\t\t\tp99?: number;\n\t\t };\n\t/** Preferred maximum latency (seconds). Can be a number (applies to p50) or an object with percentile-specific cutoffs. */\n\tpreferred_max_latency?:\n\t\t| number\n\t\t| {\n\t\t\t\t/** Maximum latency in seconds at the 50th percentile. */\n\t\t\t\tp50?: number;\n\t\t\t\t/** Maximum latency in seconds at the 75th percentile. */\n\t\t\t\tp75?: number;\n\t\t\t\t/** Maximum latency in seconds at the 90th percentile. */\n\t\t\t\tp90?: number;\n\t\t\t\t/** Maximum latency in seconds at the 99th percentile. */\n\t\t\t\tp99?: number;\n\t\t };\n}\n\n/**\n * Vercel AI Gateway routing preferences.\n * Controls which upstream providers the gateway routes requests to.\n * @see https://vercel.com/docs/ai-gateway/models-and-providers/provider-options\n */\nexport interface VercelGatewayRouting {\n\t/** List of provider slugs to exclusively use for this request (e.g., [\"bedrock\", \"anthropic\"]). */\n\tonly?: string[];\n\t/** List of provider slugs to try in order (e.g., [\"anthropic\", \"openai\"]). */\n\torder?: string[];\n}\n\n// Model interface for the unified model system\nexport interface Model<TApi extends Api> {\n\tid: string;\n\tname: string;\n\tapi: TApi;\n\tprovider: Provider;\n\tbaseUrl: string;\n\treasoning: boolean;\n\tinput: (\"text\" | \"image\")[];\n\tcost: {\n\t\tinput: number; // $/million tokens\n\t\toutput: number; // $/million tokens\n\t\tcacheRead: number; // $/million tokens\n\t\tcacheWrite: number; // $/million tokens\n\t};\n\tcontextWindow: number;\n\tmaxTokens: number;\n\theaders?: Record<string, string>;\n\t/** Compatibility overrides for OpenAI-compatible APIs. If not set, auto-detected from baseUrl. */\n\tcompat?: TApi extends \"openai-completions\"\n\t\t? OpenAICompletionsCompat\n\t\t: TApi extends \"openai-responses\"\n\t\t\t? OpenAIResponsesCompat\n\t\t\t: TApi extends \"anthropic-messages\"\n\t\t\t\t? AnthropicMessagesCompat\n\t\t\t\t: never;\n}\n"]}
|
|
1
|
+
{"version":3,"file":"types.js","sourceRoot":"","sources":["../src/types.ts"],"names":[],"mappings":"","sourcesContent":["import type { AssistantMessageEventStream } from \"./utils/event-stream.js\";\n\nexport type { AssistantMessageEventStream } from \"./utils/event-stream.js\";\n\nexport type KnownApi =\n\t| \"openai-completions\"\n\t| \"mistral-conversations\"\n\t| \"openai-responses\"\n\t| \"azure-openai-responses\"\n\t| \"openai-codex-responses\"\n\t| \"anthropic-messages\"\n\t| \"bedrock-converse-stream\"\n\t| \"google-generative-ai\"\n\t| \"google-vertex\";\n\nexport type Api = KnownApi | (string & {});\n\nexport type KnownProvider =\n\t| \"amazon-bedrock\"\n\t| \"anthropic\"\n\t| \"google\"\n\t| \"google-vertex\"\n\t| \"openai\"\n\t| \"azure-openai-responses\"\n\t| \"openai-codex\"\n\t| \"deepseek\"\n\t| \"github-copilot\"\n\t| \"xai\"\n\t| \"groq\"\n\t| \"cerebras\"\n\t| \"openrouter\"\n\t| \"vercel-ai-gateway\"\n\t| \"zai\"\n\t| \"mistral\"\n\t| \"minimax\"\n\t| \"minimax-cn\"\n\t| \"moonshotai\"\n\t| \"moonshotai-cn\"\n\t| \"huggingface\"\n\t| \"fireworks\"\n\t| \"opencode\"\n\t| \"opencode-go\"\n\t| \"kimi-coding\"\n\t| \"cloudflare-workers-ai\"\n\t| \"cloudflare-ai-gateway\";\nexport type Provider = KnownProvider | string;\n\nexport type ThinkingLevel = \"minimal\" | \"low\" | \"medium\" | \"high\" | \"xhigh\";\n\n/** Token budgets for each thinking level (token-based providers only) */\nexport interface ThinkingBudgets {\n\tminimal?: number;\n\tlow?: number;\n\tmedium?: number;\n\thigh?: number;\n}\n\n// Base options all providers share\nexport type CacheRetention = \"none\" | \"short\" | \"long\";\n\nexport type Transport = \"sse\" | \"websocket\" | \"auto\";\n\nexport interface ProviderResponse {\n\tstatus: number;\n\theaders: Record<string, string>;\n}\n\nexport interface StreamOptions {\n\ttemperature?: number;\n\tmaxTokens?: number;\n\tsignal?: AbortSignal;\n\tapiKey?: string;\n\t/**\n\t * Preferred transport for providers that support multiple transports.\n\t * Providers that do not support this option ignore it.\n\t */\n\ttransport?: Transport;\n\t/**\n\t * Prompt cache retention preference. Providers map this to their supported values.\n\t * Default: \"short\".\n\t */\n\tcacheRetention?: CacheRetention;\n\t/**\n\t * Optional session identifier for providers that support session-based caching.\n\t * Providers can use this to enable prompt caching, request routing, or other\n\t * session-aware features. Ignored by providers that don't support it.\n\t */\n\tsessionId?: string;\n\t/**\n\t * Optional callback for inspecting or replacing provider payloads before sending.\n\t * Return undefined to keep the payload unchanged.\n\t */\n\tonPayload?: (payload: unknown, model: Model<Api>) => unknown | undefined | Promise<unknown | undefined>;\n\t/**\n\t * Optional callback invoked after an HTTP response is received and before\n\t * its body stream is consumed.\n\t */\n\tonResponse?: (response: ProviderResponse, model: Model<Api>) => void | Promise<void>;\n\t/**\n\t * Optional custom HTTP headers to include in API requests.\n\t * Merged with provider defaults; can override default headers.\n\t * Not supported by all providers (e.g., AWS Bedrock uses SDK auth).\n\t */\n\theaders?: Record<string, string>;\n\t/**\n\t * HTTP request timeout in milliseconds for providers/SDKs that support it.\n\t * For example, OpenAI and Anthropic SDK clients default to 10 minutes.\n\t */\n\ttimeoutMs?: number;\n\t/**\n\t * Maximum retry attempts for providers/SDKs that support client-side retries.\n\t * For example, OpenAI and Anthropic SDK clients default to 2.\n\t */\n\tmaxRetries?: number;\n\t/**\n\t * Maximum delay in milliseconds to wait for a retry when the server requests a long wait.\n\t * If the server's requested delay exceeds this value, the request fails immediately\n\t * with an error containing the requested delay, allowing higher-level retry logic\n\t * to handle it with user visibility.\n\t * Default: 60000 (60 seconds). Set to 0 to disable the cap.\n\t */\n\tmaxRetryDelayMs?: number;\n\t/**\n\t * Optional metadata to include in API requests.\n\t * Providers extract the fields they understand and ignore the rest.\n\t * For example, Anthropic uses `user_id` for abuse tracking and rate limiting.\n\t */\n\tmetadata?: Record<string, unknown>;\n}\n\nexport type ProviderStreamOptions = StreamOptions & Record<string, unknown>;\n\n// Unified options with reasoning passed to streamSimple() and completeSimple()\nexport interface SimpleStreamOptions extends StreamOptions {\n\treasoning?: ThinkingLevel;\n\t/** Custom token budgets for thinking levels (token-based providers only) */\n\tthinkingBudgets?: ThinkingBudgets;\n}\n\n// Generic StreamFunction with typed options.\n//\n// Contract:\n// - Must return an AssistantMessageEventStream.\n// - Once invoked, request/model/runtime failures should be encoded in the\n// returned stream, not thrown.\n// - Error termination must produce an AssistantMessage with stopReason\n// \"error\" or \"aborted\" and errorMessage, emitted via the stream protocol.\nexport type StreamFunction<TApi extends Api = Api, TOptions extends StreamOptions = StreamOptions> = (\n\tmodel: Model<TApi>,\n\tcontext: Context,\n\toptions?: TOptions,\n) => AssistantMessageEventStream;\n\nexport interface TextSignatureV1 {\n\tv: 1;\n\tid: string;\n\tphase?: \"commentary\" | \"final_answer\";\n}\n\nexport interface TextContent {\n\ttype: \"text\";\n\ttext: string;\n\ttextSignature?: string; // e.g., for OpenAI responses, message metadata (legacy id string or TextSignatureV1 JSON)\n}\n\nexport interface ThinkingContent {\n\ttype: \"thinking\";\n\tthinking: string;\n\tthinkingSignature?: string; // e.g., for OpenAI responses, the reasoning item ID\n\t/** When true, the thinking content was redacted by safety filters. The opaque\n\t * encrypted payload is stored in `thinkingSignature` so it can be passed back\n\t * to the API for multi-turn continuity. */\n\tredacted?: boolean;\n}\n\nexport interface ImageContent {\n\ttype: \"image\";\n\tdata: string; // base64 encoded image data\n\tmimeType: string; // e.g., \"image/jpeg\", \"image/png\"\n}\n\nexport interface ToolCall {\n\ttype: \"toolCall\";\n\tid: string;\n\tname: string;\n\targuments: Record<string, any>;\n\tthoughtSignature?: string; // Google-specific: opaque signature for reusing thought context\n}\n\nexport interface Usage {\n\tinput: number;\n\toutput: number;\n\tcacheRead: number;\n\tcacheWrite: number;\n\ttotalTokens: number;\n\tcost: {\n\t\tinput: number;\n\t\toutput: number;\n\t\tcacheRead: number;\n\t\tcacheWrite: number;\n\t\ttotal: number;\n\t};\n}\n\nexport type StopReason = \"stop\" | \"length\" | \"toolUse\" | \"error\" | \"aborted\";\n\nexport interface UserMessage {\n\trole: \"user\";\n\tcontent: string | (TextContent | ImageContent)[];\n\ttimestamp: number; // Unix timestamp in milliseconds\n}\n\nexport interface AssistantMessage {\n\trole: \"assistant\";\n\tcontent: (TextContent | ThinkingContent | ToolCall)[];\n\tapi: Api;\n\tprovider: Provider;\n\tmodel: string;\n\tresponseModel?: string; // Concrete `chunk.model` when different from the requested `model` (e.g. OpenRouter `auto` -> `anthropic/...`)\n\tresponseId?: string; // Provider-specific response/message identifier when the upstream API exposes one\n\tusage: Usage;\n\tstopReason: StopReason;\n\terrorMessage?: string;\n\ttimestamp: number; // Unix timestamp in milliseconds\n}\n\nexport interface ToolResultMessage<TDetails = any> {\n\trole: \"toolResult\";\n\ttoolCallId: string;\n\ttoolName: string;\n\tcontent: (TextContent | ImageContent)[]; // Supports text and images\n\tdetails?: TDetails;\n\tisError: boolean;\n\ttimestamp: number; // Unix timestamp in milliseconds\n}\n\nexport type Message = UserMessage | AssistantMessage | ToolResultMessage;\n\nimport type { TSchema } from \"typebox\";\n\nexport interface Tool<TParameters extends TSchema = TSchema> {\n\tname: string;\n\tdescription: string;\n\tparameters: TParameters;\n}\n\nexport interface Context {\n\tsystemPrompt?: string;\n\tmessages: Message[];\n\ttools?: Tool[];\n}\n\n/**\n * Event protocol for AssistantMessageEventStream.\n *\n * Streams should emit `start` before partial updates, then terminate with either:\n * - `done` carrying the final successful AssistantMessage, or\n * - `error` carrying the final AssistantMessage with stopReason \"error\" or \"aborted\"\n * and errorMessage.\n */\nexport type AssistantMessageEvent =\n\t| { type: \"start\"; partial: AssistantMessage }\n\t| { type: \"text_start\"; contentIndex: number; partial: AssistantMessage }\n\t| { type: \"text_delta\"; contentIndex: number; delta: string; partial: AssistantMessage }\n\t| { type: \"text_end\"; contentIndex: number; content: string; partial: AssistantMessage }\n\t| { type: \"thinking_start\"; contentIndex: number; partial: AssistantMessage }\n\t| { type: \"thinking_delta\"; contentIndex: number; delta: string; partial: AssistantMessage }\n\t| { type: \"thinking_end\"; contentIndex: number; content: string; partial: AssistantMessage }\n\t| { type: \"toolcall_start\"; contentIndex: number; partial: AssistantMessage }\n\t| { type: \"toolcall_delta\"; contentIndex: number; delta: string; partial: AssistantMessage }\n\t| { type: \"toolcall_end\"; contentIndex: number; toolCall: ToolCall; partial: AssistantMessage }\n\t| { type: \"done\"; reason: Extract<StopReason, \"stop\" | \"length\" | \"toolUse\">; message: AssistantMessage }\n\t| { type: \"error\"; reason: Extract<StopReason, \"aborted\" | \"error\">; error: AssistantMessage };\n\n/**\n * Compatibility settings for OpenAI-compatible completions APIs.\n * Use this to override URL-based auto-detection for custom providers.\n */\nexport interface OpenAICompletionsCompat {\n\t/** Whether the provider supports the `store` field. Default: auto-detected from URL. */\n\tsupportsStore?: boolean;\n\t/** Whether the provider supports the `developer` role (vs `system`). Default: auto-detected from URL. */\n\tsupportsDeveloperRole?: boolean;\n\t/** Whether the provider supports `reasoning_effort`. Default: auto-detected from URL. */\n\tsupportsReasoningEffort?: boolean;\n\t/** Optional mapping from pi-ai reasoning levels to provider/model-specific `reasoning_effort` values. */\n\treasoningEffortMap?: Partial<Record<ThinkingLevel, string>>;\n\t/** Whether the provider supports `stream_options: { include_usage: true }` for token usage in streaming responses. Default: true. */\n\tsupportsUsageInStreaming?: boolean;\n\t/** Which field to use for max tokens. Default: auto-detected from URL. */\n\tmaxTokensField?: \"max_completion_tokens\" | \"max_tokens\";\n\t/** Whether tool results require the `name` field. Default: auto-detected from URL. */\n\trequiresToolResultName?: boolean;\n\t/** Whether a user message after tool results requires an assistant message in between. Default: auto-detected from URL. */\n\trequiresAssistantAfterToolResult?: boolean;\n\t/** Whether thinking blocks must be converted to text blocks with <thinking> delimiters. Default: auto-detected from URL. */\n\trequiresThinkingAsText?: boolean;\n\t/** Whether all replayed assistant messages must include an empty reasoning_content field when reasoning is enabled. Default: auto-detected from URL. */\n\trequiresReasoningContentOnAssistantMessages?: boolean;\n\t/** Format for reasoning/thinking parameter. \"openai\" uses reasoning_effort, \"openrouter\" uses reasoning: { effort }, \"deepseek\" uses thinking: { type } plus reasoning_effort, \"zai\" uses top-level enable_thinking: boolean, \"qwen\" uses top-level enable_thinking: boolean, and \"qwen-chat-template\" uses chat_template_kwargs.enable_thinking. Default: \"openai\". */\n\tthinkingFormat?: \"openai\" | \"openrouter\" | \"deepseek\" | \"zai\" | \"qwen\" | \"qwen-chat-template\";\n\t/** OpenRouter-specific routing preferences. Only used when baseUrl points to OpenRouter. */\n\topenRouterRouting?: OpenRouterRouting;\n\t/** Vercel AI Gateway routing preferences. Only used when baseUrl points to Vercel AI Gateway. */\n\tvercelGatewayRouting?: VercelGatewayRouting;\n\t/** Whether z.ai supports top-level `tool_stream: true` for streaming tool call deltas. Default: false. */\n\tzaiToolStream?: boolean;\n\t/** Whether the provider supports the `strict` field in tool definitions. Default: true. */\n\tsupportsStrictMode?: boolean;\n\t/** Cache control convention for prompt caching. \"anthropic\" applies Anthropic-style `cache_control` markers to the system prompt, last tool definition, and last user/assistant text content. */\n\tcacheControlFormat?: \"anthropic\";\n\t/** Whether to send known session-affinity headers (`session_id`, `x-client-request-id`, `x-session-affinity`) from `options.sessionId` when caching is enabled. Default: false. */\n\tsendSessionAffinityHeaders?: boolean;\n\t/** Whether the provider supports long prompt cache retention (`prompt_cache_retention: \"24h\"` or Anthropic-style `cache_control.ttl: \"1h\"`, depending on format). Default: true. */\n\tsupportsLongCacheRetention?: boolean;\n}\n\n/** Compatibility settings for OpenAI Responses APIs. */\nexport interface OpenAIResponsesCompat {\n\t/** Whether to send the OpenAI `session_id` cache-affinity header from `options.sessionId` when caching is enabled. Default: true. */\n\tsendSessionIdHeader?: boolean;\n\t/** Whether the provider supports `prompt_cache_retention: \"24h\"`. Default: true. */\n\tsupportsLongCacheRetention?: boolean;\n}\n\n/** Compatibility settings for Anthropic Messages-compatible APIs. */\nexport interface AnthropicMessagesCompat {\n\t/**\n\t * Whether the provider accepts per-tool `eager_input_streaming`.\n\t * When false, the Anthropic provider omits `tools[].eager_input_streaming`\n\t * and sends the legacy `fine-grained-tool-streaming-2025-05-14` beta header\n\t * for tool-enabled requests.\n\t * Default: true.\n\t */\n\tsupportsEagerToolInputStreaming?: boolean;\n\t/** Whether the provider supports Anthropic long cache retention (`cache_control.ttl: \"1h\"`). Default: true. */\n\tsupportsLongCacheRetention?: boolean;\n}\n\n/**\n * OpenRouter provider routing preferences.\n * Controls which upstream providers OpenRouter routes requests to.\n * Sent as the `provider` field in the OpenRouter API request body.\n * @see https://openrouter.ai/docs/guides/routing/provider-selection\n */\nexport interface OpenRouterRouting {\n\t/** Whether to allow backup providers to serve requests. Default: true. */\n\tallow_fallbacks?: boolean;\n\t/** Whether to filter providers to only those that support all parameters in the request. Default: false. */\n\trequire_parameters?: boolean;\n\t/** Data collection setting. \"allow\" (default): allow providers that may store/train on data. \"deny\": only use providers that don't collect user data. */\n\tdata_collection?: \"deny\" | \"allow\";\n\t/** Whether to restrict routing to only ZDR (Zero Data Retention) endpoints. */\n\tzdr?: boolean;\n\t/** Whether to restrict routing to only models that allow text distillation. */\n\tenforce_distillable_text?: boolean;\n\t/** An ordered list of provider names/slugs to try in sequence, falling back to the next if unavailable. */\n\torder?: string[];\n\t/** List of provider names/slugs to exclusively allow for this request. */\n\tonly?: string[];\n\t/** List of provider names/slugs to skip for this request. */\n\tignore?: string[];\n\t/** A list of quantization levels to filter providers by (e.g., [\"fp16\", \"bf16\", \"fp8\", \"fp6\", \"int8\", \"int4\", \"fp4\", \"fp32\"]). */\n\tquantizations?: string[];\n\t/** Sorting strategy. Can be a string (e.g., \"price\", \"throughput\", \"latency\") or an object with `by` and `partition`. */\n\tsort?:\n\t\t| string\n\t\t| {\n\t\t\t\t/** The sorting metric: \"price\", \"throughput\", \"latency\". */\n\t\t\t\tby?: string;\n\t\t\t\t/** Partitioning strategy: \"model\" (default) or \"none\". */\n\t\t\t\tpartition?: string | null;\n\t\t };\n\t/** Maximum price per million tokens (USD). */\n\tmax_price?: {\n\t\t/** Price per million prompt tokens. */\n\t\tprompt?: number | string;\n\t\t/** Price per million completion tokens. */\n\t\tcompletion?: number | string;\n\t\t/** Price per image. */\n\t\timage?: number | string;\n\t\t/** Price per audio unit. */\n\t\taudio?: number | string;\n\t\t/** Price per request. */\n\t\trequest?: number | string;\n\t};\n\t/** Preferred minimum throughput (tokens/second). Can be a number (applies to p50) or an object with percentile-specific cutoffs. */\n\tpreferred_min_throughput?:\n\t\t| number\n\t\t| {\n\t\t\t\t/** Minimum tokens/second at the 50th percentile. */\n\t\t\t\tp50?: number;\n\t\t\t\t/** Minimum tokens/second at the 75th percentile. */\n\t\t\t\tp75?: number;\n\t\t\t\t/** Minimum tokens/second at the 90th percentile. */\n\t\t\t\tp90?: number;\n\t\t\t\t/** Minimum tokens/second at the 99th percentile. */\n\t\t\t\tp99?: number;\n\t\t };\n\t/** Preferred maximum latency (seconds). Can be a number (applies to p50) or an object with percentile-specific cutoffs. */\n\tpreferred_max_latency?:\n\t\t| number\n\t\t| {\n\t\t\t\t/** Maximum latency in seconds at the 50th percentile. */\n\t\t\t\tp50?: number;\n\t\t\t\t/** Maximum latency in seconds at the 75th percentile. */\n\t\t\t\tp75?: number;\n\t\t\t\t/** Maximum latency in seconds at the 90th percentile. */\n\t\t\t\tp90?: number;\n\t\t\t\t/** Maximum latency in seconds at the 99th percentile. */\n\t\t\t\tp99?: number;\n\t\t };\n}\n\n/**\n * Vercel AI Gateway routing preferences.\n * Controls which upstream providers the gateway routes requests to.\n * @see https://vercel.com/docs/ai-gateway/models-and-providers/provider-options\n */\nexport interface VercelGatewayRouting {\n\t/** List of provider slugs to exclusively use for this request (e.g., [\"bedrock\", \"anthropic\"]). */\n\tonly?: string[];\n\t/** List of provider slugs to try in order (e.g., [\"anthropic\", \"openai\"]). */\n\torder?: string[];\n}\n\n// Model interface for the unified model system\nexport interface Model<TApi extends Api> {\n\tid: string;\n\tname: string;\n\tapi: TApi;\n\tprovider: Provider;\n\tbaseUrl: string;\n\treasoning: boolean;\n\tinput: (\"text\" | \"image\")[];\n\tcost: {\n\t\tinput: number; // $/million tokens\n\t\toutput: number; // $/million tokens\n\t\tcacheRead: number; // $/million tokens\n\t\tcacheWrite: number; // $/million tokens\n\t};\n\tcontextWindow: number;\n\tmaxTokens: number;\n\theaders?: Record<string, string>;\n\t/** Compatibility overrides for OpenAI-compatible APIs. If not set, auto-detected from baseUrl. */\n\tcompat?: TApi extends \"openai-completions\"\n\t\t? OpenAICompletionsCompat\n\t\t: TApi extends \"openai-responses\"\n\t\t\t? OpenAIResponsesCompat\n\t\t\t: TApi extends \"anthropic-messages\"\n\t\t\t\t? AnthropicMessagesCompat\n\t\t\t\t: never;\n}\n"]}
|
|
@@ -5,13 +5,9 @@
|
|
|
5
5
|
* for OAuth-based providers:
|
|
6
6
|
* - Anthropic (Claude Pro/Max)
|
|
7
7
|
* - GitHub Copilot
|
|
8
|
-
* - Google Cloud Code Assist (Gemini CLI)
|
|
9
|
-
* - Antigravity (Gemini 3, Claude, GPT-OSS via Google Cloud)
|
|
10
8
|
*/
|
|
11
9
|
export { anthropicOAuthProvider, loginAnthropic, refreshAnthropicToken } from "./anthropic.js";
|
|
12
10
|
export { getGitHubCopilotBaseUrl, githubCopilotOAuthProvider, loginGitHubCopilot, normalizeDomain, refreshGitHubCopilotToken, } from "./github-copilot.js";
|
|
13
|
-
export { antigravityOAuthProvider, loginAntigravity, refreshAntigravityToken } from "./google-antigravity.js";
|
|
14
|
-
export { geminiCliOAuthProvider, loginGeminiCli, refreshGoogleCloudToken } from "./google-gemini-cli.js";
|
|
15
11
|
export { loginOpenAICodex, openaiCodexOAuthProvider, refreshOpenAICodexToken } from "./openai-codex.js";
|
|
16
12
|
export * from "./types.js";
|
|
17
13
|
import type { OAuthCredentials, OAuthProviderId, OAuthProviderInfo, OAuthProviderInterface } from "./types.js";
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../../src/utils/oauth/index.ts"],"names":[],"mappings":"AAAA
|
|
1
|
+
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../../src/utils/oauth/index.ts"],"names":[],"mappings":"AAAA;;;;;;;GAOG;AAGH,OAAO,EAAE,sBAAsB,EAAE,cAAc,EAAE,qBAAqB,EAAE,MAAM,gBAAgB,CAAC;AAE/F,OAAO,EACN,uBAAuB,EACvB,0BAA0B,EAC1B,kBAAkB,EAClB,eAAe,EACf,yBAAyB,GACzB,MAAM,qBAAqB,CAAC;AAE7B,OAAO,EAAE,gBAAgB,EAAE,wBAAwB,EAAE,uBAAuB,EAAE,MAAM,mBAAmB,CAAC;AAExG,cAAc,YAAY,CAAC;AAS3B,OAAO,KAAK,EAAE,gBAAgB,EAAE,eAAe,EAAE,iBAAiB,EAAE,sBAAsB,EAAE,MAAM,YAAY,CAAC;AAY/G;;GAEG;AACH,wBAAgB,gBAAgB,CAAC,EAAE,EAAE,eAAe,GAAG,sBAAsB,GAAG,SAAS,CAExF;AAED;;GAEG;AACH,wBAAgB,qBAAqB,CAAC,QAAQ,EAAE,sBAAsB,GAAG,IAAI,CAE5E;AAED;;;;;GAKG;AACH,wBAAgB,uBAAuB,CAAC,EAAE,EAAE,MAAM,GAAG,IAAI,CAOxD;AAED;;GAEG;AACH,wBAAgB,mBAAmB,IAAI,IAAI,CAK1C;AAED;;GAEG;AACH,wBAAgB,iBAAiB,IAAI,sBAAsB,EAAE,CAE5D;AAED;;GAEG;AACH,wBAAgB,wBAAwB,IAAI,iBAAiB,EAAE,CAM9D;AAMD;;;GAGG;AACH,wBAAsB,iBAAiB,CACtC,UAAU,EAAE,eAAe,EAC3B,WAAW,EAAE,gBAAgB,GAC3B,OAAO,CAAC,gBAAgB,CAAC,CAM3B;AAED;;;;;;GAMG;AACH,wBAAsB,cAAc,CACnC,UAAU,EAAE,eAAe,EAC3B,WAAW,EAAE,MAAM,CAAC,MAAM,EAAE,gBAAgB,CAAC,GAC3C,OAAO,CAAC;IAAE,cAAc,EAAE,gBAAgB,CAAC;IAAC,MAAM,EAAE,MAAM,CAAA;CAAE,GAAG,IAAI,CAAC,CAsBtE","sourcesContent":["/**\n * OAuth credential management for AI providers.\n *\n * This module handles login, token refresh, and credential storage\n * for OAuth-based providers:\n * - Anthropic (Claude Pro/Max)\n * - GitHub Copilot\n */\n\n// Anthropic\nexport { anthropicOAuthProvider, loginAnthropic, refreshAnthropicToken } from \"./anthropic.js\";\n// GitHub Copilot\nexport {\n\tgetGitHubCopilotBaseUrl,\n\tgithubCopilotOAuthProvider,\n\tloginGitHubCopilot,\n\tnormalizeDomain,\n\trefreshGitHubCopilotToken,\n} from \"./github-copilot.js\";\n// OpenAI Codex (ChatGPT OAuth)\nexport { loginOpenAICodex, openaiCodexOAuthProvider, refreshOpenAICodexToken } from \"./openai-codex.js\";\n\nexport * from \"./types.js\";\n\n// ============================================================================\n// Provider Registry\n// ============================================================================\n\nimport { anthropicOAuthProvider } from \"./anthropic.js\";\nimport { githubCopilotOAuthProvider } from \"./github-copilot.js\";\nimport { openaiCodexOAuthProvider } from \"./openai-codex.js\";\nimport type { OAuthCredentials, OAuthProviderId, OAuthProviderInfo, OAuthProviderInterface } from \"./types.js\";\n\nconst BUILT_IN_OAUTH_PROVIDERS: OAuthProviderInterface[] = [\n\tanthropicOAuthProvider,\n\tgithubCopilotOAuthProvider,\n\topenaiCodexOAuthProvider,\n];\n\nconst oauthProviderRegistry = new Map<string, OAuthProviderInterface>(\n\tBUILT_IN_OAUTH_PROVIDERS.map((provider) => [provider.id, provider]),\n);\n\n/**\n * Get an OAuth provider by ID\n */\nexport function getOAuthProvider(id: OAuthProviderId): OAuthProviderInterface | undefined {\n\treturn oauthProviderRegistry.get(id);\n}\n\n/**\n * Register a custom OAuth provider\n */\nexport function registerOAuthProvider(provider: OAuthProviderInterface): void {\n\toauthProviderRegistry.set(provider.id, provider);\n}\n\n/**\n * Unregister an OAuth provider.\n *\n * If the provider is built-in, restores the built-in implementation.\n * Custom providers are removed completely.\n */\nexport function unregisterOAuthProvider(id: string): void {\n\tconst builtInProvider = BUILT_IN_OAUTH_PROVIDERS.find((provider) => provider.id === id);\n\tif (builtInProvider) {\n\t\toauthProviderRegistry.set(id, builtInProvider);\n\t\treturn;\n\t}\n\toauthProviderRegistry.delete(id);\n}\n\n/**\n * Reset OAuth providers to built-ins.\n */\nexport function resetOAuthProviders(): void {\n\toauthProviderRegistry.clear();\n\tfor (const provider of BUILT_IN_OAUTH_PROVIDERS) {\n\t\toauthProviderRegistry.set(provider.id, provider);\n\t}\n}\n\n/**\n * Get all registered OAuth providers\n */\nexport function getOAuthProviders(): OAuthProviderInterface[] {\n\treturn Array.from(oauthProviderRegistry.values());\n}\n\n/**\n * @deprecated Use getOAuthProviders() which returns OAuthProviderInterface[]\n */\nexport function getOAuthProviderInfoList(): OAuthProviderInfo[] {\n\treturn getOAuthProviders().map((p) => ({\n\t\tid: p.id,\n\t\tname: p.name,\n\t\tavailable: true,\n\t}));\n}\n\n// ============================================================================\n// High-level API (uses provider registry)\n// ============================================================================\n\n/**\n * Refresh token for any OAuth provider.\n * @deprecated Use getOAuthProvider(id).refreshToken() instead\n */\nexport async function refreshOAuthToken(\n\tproviderId: OAuthProviderId,\n\tcredentials: OAuthCredentials,\n): Promise<OAuthCredentials> {\n\tconst provider = getOAuthProvider(providerId);\n\tif (!provider) {\n\t\tthrow new Error(`Unknown OAuth provider: ${providerId}`);\n\t}\n\treturn provider.refreshToken(credentials);\n}\n\n/**\n * Get API key for a provider from OAuth credentials.\n * Automatically refreshes expired tokens.\n *\n * @returns API key string and updated credentials, or null if no credentials\n * @throws Error if refresh fails\n */\nexport async function getOAuthApiKey(\n\tproviderId: OAuthProviderId,\n\tcredentials: Record<string, OAuthCredentials>,\n): Promise<{ newCredentials: OAuthCredentials; apiKey: string } | null> {\n\tconst provider = getOAuthProvider(providerId);\n\tif (!provider) {\n\t\tthrow new Error(`Unknown OAuth provider: ${providerId}`);\n\t}\n\n\tlet creds = credentials[providerId];\n\tif (!creds) {\n\t\treturn null;\n\t}\n\n\t// Refresh if expired\n\tif (Date.now() >= creds.expires) {\n\t\ttry {\n\t\t\tcreds = await provider.refreshToken(creds);\n\t\t} catch (_error) {\n\t\t\tthrow new Error(`Failed to refresh OAuth token for ${providerId}`);\n\t\t}\n\t}\n\n\tconst apiKey = provider.getApiKey(creds);\n\treturn { newCredentials: creds, apiKey };\n}\n"]}
|