lsd-pi 1.1.9 → 1.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/resources/extensions/slash-commands/context.js +15 -8
- package/dist/resources/extensions/slash-commands/index.js +2 -0
- package/dist/resources/extensions/slash-commands/init.js +47 -0
- package/dist/resources/extensions/slash-commands/plan.js +241 -54
- package/dist/resources/extensions/slash-commands/tools.js +47 -21
- package/dist/resources/extensions/subagent/index.js +5 -10
- package/dist/startup-model-validation.d.ts +1 -1
- package/package.json +1 -1
- package/packages/pi-agent-core/dist/types.d.ts +2 -1
- package/packages/pi-agent-core/dist/types.d.ts.map +1 -1
- package/packages/pi-agent-core/dist/types.js.map +1 -1
- package/packages/pi-agent-core/src/types.ts +2 -1
- package/packages/pi-ai/dist/providers/amazon-bedrock.js +10 -3
- package/packages/pi-ai/dist/providers/amazon-bedrock.js.map +1 -1
- package/packages/pi-ai/dist/providers/anthropic-shared.d.ts +1 -1
- package/packages/pi-ai/dist/providers/anthropic-shared.d.ts.map +1 -1
- package/packages/pi-ai/dist/providers/anthropic-shared.js +4 -1
- package/packages/pi-ai/dist/providers/anthropic-shared.js.map +1 -1
- package/packages/pi-ai/dist/providers/anthropic.d.ts +2 -2
- package/packages/pi-ai/dist/providers/anthropic.d.ts.map +1 -1
- package/packages/pi-ai/dist/providers/anthropic.js +2 -2
- package/packages/pi-ai/dist/providers/anthropic.js.map +1 -1
- package/packages/pi-ai/dist/providers/azure-openai-responses.d.ts.map +1 -1
- package/packages/pi-ai/dist/providers/azure-openai-responses.js +2 -1
- package/packages/pi-ai/dist/providers/azure-openai-responses.js.map +1 -1
- package/packages/pi-ai/dist/providers/google-gemini-cli.js +2 -0
- package/packages/pi-ai/dist/providers/google-gemini-cli.js.map +1 -1
- package/packages/pi-ai/dist/providers/google-vertex.js +2 -0
- package/packages/pi-ai/dist/providers/google-vertex.js.map +1 -1
- package/packages/pi-ai/dist/providers/google.js +2 -0
- package/packages/pi-ai/dist/providers/google.js.map +1 -1
- package/packages/pi-ai/dist/providers/openai-codex-responses.d.ts.map +1 -1
- package/packages/pi-ai/dist/providers/openai-codex-responses.js +2 -1
- package/packages/pi-ai/dist/providers/openai-codex-responses.js.map +1 -1
- package/packages/pi-ai/dist/providers/openai-completions.d.ts.map +1 -1
- package/packages/pi-ai/dist/providers/openai-completions.js +2 -1
- package/packages/pi-ai/dist/providers/openai-completions.js.map +1 -1
- package/packages/pi-ai/dist/providers/openai-responses.d.ts.map +1 -1
- package/packages/pi-ai/dist/providers/openai-responses.js +2 -1
- package/packages/pi-ai/dist/providers/openai-responses.js.map +1 -1
- package/packages/pi-ai/dist/providers/simple-options.d.ts +1 -1
- package/packages/pi-ai/dist/providers/simple-options.d.ts.map +1 -1
- package/packages/pi-ai/dist/providers/simple-options.js +6 -2
- package/packages/pi-ai/dist/providers/simple-options.js.map +1 -1
- package/packages/pi-ai/dist/types.d.ts +1 -1
- package/packages/pi-ai/dist/types.d.ts.map +1 -1
- package/packages/pi-ai/dist/types.js.map +1 -1
- package/packages/pi-ai/src/providers/amazon-bedrock.ts +11 -4
- package/packages/pi-ai/src/providers/anthropic-shared.ts +5 -2
- package/packages/pi-ai/src/providers/anthropic.ts +2 -2
- package/packages/pi-ai/src/providers/azure-openai-responses.ts +2 -1
- package/packages/pi-ai/src/providers/google-gemini-cli.ts +3 -1
- package/packages/pi-ai/src/providers/google-vertex.ts +3 -1
- package/packages/pi-ai/src/providers/google.ts +3 -1
- package/packages/pi-ai/src/providers/openai-codex-responses.ts +2 -1
- package/packages/pi-ai/src/providers/openai-completions.ts +2 -1
- package/packages/pi-ai/src/providers/openai-responses.ts +2 -1
- package/packages/pi-ai/src/providers/simple-options.ts +5 -3
- package/packages/pi-ai/src/types.ts +1 -1
- package/packages/pi-coding-agent/dist/cli/args.js +1 -1
- package/packages/pi-coding-agent/dist/cli/args.js.map +1 -1
- package/packages/pi-coding-agent/dist/core/agent-session.d.ts +10 -0
- package/packages/pi-coding-agent/dist/core/agent-session.d.ts.map +1 -1
- package/packages/pi-coding-agent/dist/core/agent-session.js +57 -20
- package/packages/pi-coding-agent/dist/core/agent-session.js.map +1 -1
- package/packages/pi-coding-agent/dist/core/classifier-service.d.ts.map +1 -1
- package/packages/pi-coding-agent/dist/core/classifier-service.js +34 -61
- package/packages/pi-coding-agent/dist/core/classifier-service.js.map +1 -1
- package/packages/pi-coding-agent/dist/core/compaction/utils.d.ts +1 -1
- package/packages/pi-coding-agent/dist/core/compaction/utils.d.ts.map +1 -1
- package/packages/pi-coding-agent/dist/core/compaction/utils.js +3 -1
- package/packages/pi-coding-agent/dist/core/compaction/utils.js.map +1 -1
- package/packages/pi-coding-agent/dist/core/resource-loader-lsd-md.test.js +59 -7
- package/packages/pi-coding-agent/dist/core/resource-loader-lsd-md.test.js.map +1 -1
- package/packages/pi-coding-agent/dist/core/resource-loader.js +4 -4
- package/packages/pi-coding-agent/dist/core/resource-loader.js.map +1 -1
- package/packages/pi-coding-agent/dist/core/sdk.d.ts +1 -1
- package/packages/pi-coding-agent/dist/core/sdk.d.ts.map +1 -1
- package/packages/pi-coding-agent/dist/core/sdk.js +19 -20
- package/packages/pi-coding-agent/dist/core/sdk.js.map +1 -1
- package/packages/pi-coding-agent/dist/core/sdk.test.js +80 -0
- package/packages/pi-coding-agent/dist/core/sdk.test.js.map +1 -1
- package/packages/pi-coding-agent/dist/core/settings-manager.d.ts +15 -5
- package/packages/pi-coding-agent/dist/core/settings-manager.d.ts.map +1 -1
- package/packages/pi-coding-agent/dist/core/settings-manager.js +31 -5
- package/packages/pi-coding-agent/dist/core/settings-manager.js.map +1 -1
- package/packages/pi-coding-agent/dist/core/system-prompt.d.ts +6 -1
- package/packages/pi-coding-agent/dist/core/system-prompt.d.ts.map +1 -1
- package/packages/pi-coding-agent/dist/core/system-prompt.js +28 -68
- package/packages/pi-coding-agent/dist/core/system-prompt.js.map +1 -1
- package/packages/pi-coding-agent/dist/core/tools/bash-interceptor.js +1 -1
- package/packages/pi-coding-agent/dist/core/tools/bash-interceptor.js.map +1 -1
- package/packages/pi-coding-agent/dist/core/tools/bash-interceptor.test.js +5 -0
- package/packages/pi-coding-agent/dist/core/tools/bash-interceptor.test.js.map +1 -1
- package/packages/pi-coding-agent/dist/modes/interactive/components/bash-execution.d.ts.map +1 -1
- package/packages/pi-coding-agent/dist/modes/interactive/components/bash-execution.js +28 -0
- package/packages/pi-coding-agent/dist/modes/interactive/components/bash-execution.js.map +1 -1
- package/packages/pi-coding-agent/dist/modes/interactive/components/settings-selector.d.ts +8 -0
- package/packages/pi-coding-agent/dist/modes/interactive/components/settings-selector.d.ts.map +1 -1
- package/packages/pi-coding-agent/dist/modes/interactive/components/settings-selector.js +44 -1
- package/packages/pi-coding-agent/dist/modes/interactive/components/settings-selector.js.map +1 -1
- package/packages/pi-coding-agent/dist/modes/interactive/components/thinking-selector.d.ts.map +1 -1
- package/packages/pi-coding-agent/dist/modes/interactive/components/thinking-selector.js +1 -0
- package/packages/pi-coding-agent/dist/modes/interactive/components/thinking-selector.js.map +1 -1
- package/packages/pi-coding-agent/dist/modes/interactive/components/tool-execution.d.ts.map +1 -1
- package/packages/pi-coding-agent/dist/modes/interactive/components/tool-execution.js +36 -0
- package/packages/pi-coding-agent/dist/modes/interactive/components/tool-execution.js.map +1 -1
- package/packages/pi-coding-agent/dist/modes/interactive/interactive-mode.d.ts.map +1 -1
- package/packages/pi-coding-agent/dist/modes/interactive/interactive-mode.js +41 -5
- package/packages/pi-coding-agent/dist/modes/interactive/interactive-mode.js.map +1 -1
- package/packages/pi-coding-agent/dist/modes/interactive/theme/theme.d.ts +1 -1
- package/packages/pi-coding-agent/dist/modes/interactive/theme/theme.d.ts.map +1 -1
- package/packages/pi-coding-agent/dist/modes/interactive/theme/theme.js +2 -0
- package/packages/pi-coding-agent/dist/modes/interactive/theme/theme.js.map +1 -1
- package/packages/pi-coding-agent/package.json +1 -1
- package/packages/pi-coding-agent/src/cli/args.ts +1 -1
- package/packages/pi-coding-agent/src/core/agent-session.ts +62 -19
- package/packages/pi-coding-agent/src/core/classifier-service.ts +35 -63
- package/packages/pi-coding-agent/src/core/compaction/utils.ts +3 -1
- package/packages/pi-coding-agent/src/core/resource-loader-lsd-md.test.ts +67 -7
- package/packages/pi-coding-agent/src/core/resource-loader.ts +4 -4
- package/packages/pi-coding-agent/src/core/sdk.test.ts +100 -0
- package/packages/pi-coding-agent/src/core/sdk.ts +24 -21
- package/packages/pi-coding-agent/src/core/settings-manager.ts +42 -8
- package/packages/pi-coding-agent/src/core/system-prompt.ts +39 -82
- package/packages/pi-coding-agent/src/core/tools/bash-interceptor.test.ts +6 -0
- package/packages/pi-coding-agent/src/core/tools/bash-interceptor.ts +1 -1
- package/packages/pi-coding-agent/src/modes/interactive/components/bash-execution.ts +26 -0
- package/packages/pi-coding-agent/src/modes/interactive/components/settings-selector.ts +53 -1
- package/packages/pi-coding-agent/src/modes/interactive/components/thinking-selector.ts +1 -0
- package/packages/pi-coding-agent/src/modes/interactive/components/tool-execution.ts +41 -0
- package/packages/pi-coding-agent/src/modes/interactive/interactive-mode.ts +50 -7
- package/packages/pi-coding-agent/src/modes/interactive/theme/theme.ts +3 -1
- package/pkg/dist/modes/interactive/theme/theme.d.ts +1 -1
- package/pkg/dist/modes/interactive/theme/theme.d.ts.map +1 -1
- package/pkg/dist/modes/interactive/theme/theme.js +2 -0
- package/pkg/dist/modes/interactive/theme/theme.js.map +1 -1
- package/pkg/package.json +1 -1
- package/src/resources/extensions/slash-commands/context.ts +15 -8
- package/src/resources/extensions/slash-commands/index.ts +2 -0
- package/src/resources/extensions/slash-commands/init.ts +55 -0
- package/src/resources/extensions/slash-commands/plan.ts +277 -55
- package/src/resources/extensions/slash-commands/tools.ts +47 -21
- package/src/resources/extensions/subagent/index.ts +5 -10
|
@@ -71,7 +71,8 @@ export const streamSimpleOpenAIResponses = (model, context, options) => {
|
|
|
71
71
|
throw new Error(`No API key for provider: ${model.provider}`);
|
|
72
72
|
}
|
|
73
73
|
const base = buildBaseOptions(model, options, apiKey);
|
|
74
|
-
const
|
|
74
|
+
const rawReasoning = options?.reasoning === "adaptive" ? "high" : options?.reasoning;
|
|
75
|
+
const reasoningEffort = supportsXhigh(model) ? rawReasoning : clampReasoning(rawReasoning);
|
|
75
76
|
return streamOpenAIResponses(model, context, {
|
|
76
77
|
...base,
|
|
77
78
|
reasoningEffort,
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"openai-responses.js","sourceRoot":"","sources":["../../src/providers/openai-responses.ts"],"names":[],"mappings":"AAGA,OAAO,EAAE,YAAY,EAAE,MAAM,oBAAoB,CAAC;AAClD,OAAO,EAAE,aAAa,EAAE,MAAM,cAAc,CAAC;AAU7C,OAAO,EAAE,2BAA2B,EAAE,MAAM,0BAA0B,CAAC;AACvE,OAAO,EAAE,wBAAwB,EAAE,qBAAqB,EAAE,sBAAsB,EAAE,MAAM,8BAA8B,CAAC;AACvH,OAAO,EACN,mBAAmB,EACnB,kBAAkB,EAClB,sBAAsB,EACtB,kBAAkB,EAClB,cAAc,EACd,iBAAiB,GACjB,MAAM,oBAAoB,CAAC;AAC5B,OAAO,EAAE,gBAAgB,EAAE,cAAc,EAAE,MAAM,qBAAqB,CAAC;AAEvE,MAAM,0BAA0B,GAAG,IAAI,GAAG,CAAC,CAAC,QAAQ,EAAE,cAAc,EAAE,UAAU,CAAC,CAAC,CAAC;AAEnF;;;GAGG;AACH,SAAS,qBAAqB,CAAC,cAA+B;IAC7D,IAAI,cAAc,EAAE,CAAC;QACpB,OAAO,cAAc,CAAC;IACvB,CAAC;IACD,IAAI,OAAO,OAAO,KAAK,WAAW,IAAI,OAAO,CAAC,GAAG,CAAC,kBAAkB,KAAK,MAAM,EAAE,CAAC;QACjF,OAAO,MAAM,CAAC;IACf,CAAC;IACD,OAAO,OAAO,CAAC;AAChB,CAAC;AAED;;;GAGG;AACH,SAAS,uBAAuB,CAAC,OAAe,EAAE,cAA8B;IAC/E,IAAI,cAAc,KAAK,MAAM,EAAE,CAAC;QAC/B,OAAO,SAAS,CAAC;IAClB,CAAC;IACD,IAAI,OAAO,CAAC,QAAQ,CAAC,gBAAgB,CAAC,EAAE,CAAC;QACxC,OAAO,KAAK,CAAC;IACd,CAAC;IACD,OAAO,SAAS,CAAC;AAClB,CAAC;AASD;;GAEG;AACH,MAAM,CAAC,MAAM,qBAAqB,GAA+D,CAChG,KAAgC,EAChC,OAAgB,EAChB,OAAgC,EACF,EAAE;IAChC,MAAM,MAAM,GAAG,IAAI,2BAA2B,EAAE,CAAC;IAEjD,yBAAyB;IACzB,CAAC,KAAK,IAAI,EAAE;QACX,MAAM,MAAM,GAAG,kBAAkB,CAAC,KAAK,CAAC,CAAC;QAEzC,IAAI,CAAC;YACJ,uBAAuB;YACvB,MAAM,MAAM,GAAG,OAAO,EAAE,MAAM,IAAI,YAAY,CAAC,KAAK,CAAC,QAAQ,CAAC,IAAI,EAAE,CAAC;YACrE,MAAM,MAAM,GAAG,MAAM,kBAAkB,CAAC,KAAK,EAAE,OAAO,EAAE,MAAM,EAAE;gBAC/D,cAAc,EAAE,OAAO,EAAE,OAAO;aAChC,CAAC,CAAC;YACH,IAAI,MAAM,GAAG,WAAW,CAAC,KAAK,EAAE,OAAO,EAAE,OAAO,CAAC,CAAC;YAClD,MAAM,UAAU,GAAG,MAAM,OAAO,EAAE,SAAS,EAAE,CAAC,MAAM,EAAE,KAAK,CAAC,CAAC;YAC7D,IAAI,UAAU,KAAK,SAAS,EAAE,CAAC;gBAC9B,MAAM,GAAG,UAA2C,CAAC;YACtD,CAAC;YACD,MAAM,YAAY,GAAG,MAAM,MAAM,CAAC,SAAS,CAAC,MAAM,CACjD,MAAM,EACN,OAAO,EAAE,MAAM,CAAC,CAAC,CAAC,EAAE,MAAM,EAAE,OAAO,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,SAAS,CACxD,CAAC;YACF,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,OAAO,EAAE,OAAO,EAAE,MAAM,EAAE,CAAC,CAAC;YAEhD,MAAM,sBAAsB,CAAC,YAAY,EAAE,MAAM,EAAE,MAAM,EAAE,KAAK,EAAE;gBACjE,WAAW,EAAE,OAAO,EAAE,WAAW;gBACjC,uBAAuB;aACvB,CAAC,CAAC;YAEH,mBAAmB,CAAC,MAAM,EAAE,OAAO,EAAE,MAAM,CAAC,CAAC;YAC7C,cAAc,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;QAChC,CAAC;QAAC,OAAO,KAAK,EAAE,CAAC;YAChB,iBAAiB,CAAC,MAAM,EAAE,MAAM,EAAE,KAAK,EAAE,OAAO,EAAE,MAAM,CAAC,CAAC;QAC3D,CAAC;IACF,CAAC,CAAC,EAAE,CAAC;IAEL,OAAO,MAAM,CAAC;AACf,CAAC,CAAC;AAEF,MAAM,CAAC,MAAM,2BAA2B,GAA4D,CACnG,KAAgC,EAChC,OAAgB,EAChB,OAA6B,EACC,EAAE;IAChC,MAAM,MAAM,GAAG,OAAO,EAAE,MAAM,IAAI,YAAY,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC;IAC/D,IAAI,CAAC,MAAM,EAAE,CAAC;QACb,MAAM,IAAI,KAAK,CAAC,4BAA4B,KAAK,CAAC,QAAQ,EAAE,CAAC,CAAC;IAC/D,CAAC;IAED,MAAM,IAAI,GAAG,gBAAgB,CAAC,KAAK,EAAE,OAAO,EAAE,MAAM,CAAC,CAAC;IACtD,MAAM,eAAe,GAAG,aAAa,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,OAAO,EAAE,SAAS,CAAC,CAAC,CAAC,cAAc,CAAC,OAAO,EAAE,SAAS,CAAC,CAAC;IAEvG,OAAO,qBAAqB,CAAC,KAAK,EAAE,OAAO,EAAE;QAC5C,GAAG,IAAI;QACP,eAAe;KACkB,CAAC,CAAC;AACrC,CAAC,CAAC;AAEF,SAAS,WAAW,CAAC,KAAgC,EAAE,OAAgB,EAAE,OAAgC;IACxG,MAAM,QAAQ,GAAG,wBAAwB,CAAC,KAAK,EAAE,OAAO,EAAE,0BAA0B,CAAC,CAAC;IAEtF,MAAM,cAAc,GAAG,qBAAqB,CAAC,OAAO,EAAE,cAAc,CAAC,CAAC;IACtE,MAAM,MAAM,GAAkC;QAC7C,KAAK,EAAE,KAAK,CAAC,EAAE;QACf,KAAK,EAAE,QAAQ;QACf,MAAM,EAAE,IAAI;QACZ,gBAAgB,EAAE,cAAc,KAAK,MAAM,CAAC,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,OAAO,EAAE,SAAS;QAC5E,sBAAsB,EAAE,uBAAuB,CAAC,KAAK,CAAC,OAAO,EAAE,cAAc,CAAC;QAC9E,KAAK,EAAE,KAAK;KACZ,CAAC;IAEF,IAAI,OAAO,EAAE,SAAS,EAAE,CAAC;QACxB,MAAM,CAAC,iBAAiB,GAAG,OAAO,EAAE,SAAS,CAAC;IAC/C,CAAC;IAED,IAAI,OAAO,EAAE,WAAW,KAAK,SAAS,EAAE,CAAC;QACxC,MAAM,CAAC,WAAW,GAAG,OAAO,EAAE,WAAW,CAAC;IAC3C,CAAC;IAED,IAAI,OAAO,EAAE,WAAW,KAAK,SAAS,EAAE,CAAC;QACxC,MAAM,CAAC,YAAY,GAAG,OAAO,CAAC,WAAW,CAAC;IAC3C,CAAC;IAED,IAAI,OAAO,CAAC,KAAK,EAAE,CAAC;QACnB,MAAM,CAAC,KAAK,GAAG,qBAAqB,CAAC,OAAO,CAAC,KAAK,CAAC,CAAC;IACrD,CAAC;IAED,IAAI,KAAK,CAAC,SAAS,EAAE,CAAC;QACrB,MAAM,CAAC,OAAO,GAAG,CAAC,6BAA6B,CAAC,CAAC;QACjD,IAAI,OAAO,EAAE,eAAe,IAAI,OAAO,EAAE,gBAAgB,EAAE,CAAC;YAC3D,MAAM,MAAM,GAAG,sBAAsB,CAAC,KAAK,CAAC,IAAI,EAAE,OAAO,EAAE,eAAe,IAAI,QAAQ,CAAmC,CAAC;YAC1H,MAAM,CAAC,SAAS,GAAG;gBAClB,MAAM,EAAE,MAAM,IAAI,QAAQ;gBAC1B,OAAO,EAAE,OAAO,EAAE,gBAAgB,IAAI,MAAM;aAC5C,CAAC;QACH,CAAC;aAAM,CAAC;YACP,IAAI,KAAK,CAAC,IAAI,CAAC,UAAU,CAAC,OAAO,CAAC,EAAE,CAAC;gBACpC,mGAAmG;gBACnG,QAAQ,CAAC,IAAI,CAAC;oBACb,IAAI,EAAE,WAAW;oBACjB,OAAO,EAAE;wBACR;4BACC,IAAI,EAAE,YAAY;4BAClB,IAAI,EAAE,uBAAuB;yBAC7B;qBACD;iBACD,CAAC,CAAC;YACJ,CAAC;QACF,CAAC;IACF,CAAC;IAED,OAAO,MAAM,CAAC;AACf,CAAC;AAED,SAAS,4BAA4B,CAAC,WAAsE;IAC3G,QAAQ,WAAW,EAAE,CAAC;QACrB,KAAK,MAAM;YACV,OAAO,GAAG,CAAC;QACZ,KAAK,UAAU;YACd,OAAO,CAAC,CAAC;QACV;YACC,OAAO,CAAC,CAAC;IACX,CAAC;AACF,CAAC;AAED,SAAS,uBAAuB,CAAC,KAAY,EAAE,WAAsE;IACpH,MAAM,UAAU,GAAG,4BAA4B,CAAC,WAAW,CAAC,CAAC;IAC7D,IAAI,UAAU,KAAK,CAAC;QAAE,OAAO;IAE7B,KAAK,CAAC,IAAI,CAAC,KAAK,IAAI,UAAU,CAAC;IAC/B,KAAK,CAAC,IAAI,CAAC,MAAM,IAAI,UAAU,CAAC;IAChC,KAAK,CAAC,IAAI,CAAC,SAAS,IAAI,UAAU,CAAC;IACnC,KAAK,CAAC,IAAI,CAAC,UAAU,IAAI,UAAU,CAAC;IACpC,KAAK,CAAC,IAAI,CAAC,KAAK,GAAG,KAAK,CAAC,IAAI,CAAC,KAAK,GAAG,KAAK,CAAC,IAAI,CAAC,MAAM,GAAG,KAAK,CAAC,IAAI,CAAC,SAAS,GAAG,KAAK,CAAC,IAAI,CAAC,UAAU,CAAC;AACxG,CAAC","sourcesContent":["// Lazy-loaded: OpenAI SDK is imported on first use, not at startup.\n// This avoids penalizing users who don't use OpenAI models.\nimport type { ResponseCreateParamsStreaming } from \"openai/resources/responses/responses.js\";\nimport { getEnvApiKey } from \"../env-api-keys.js\";\nimport { supportsXhigh } from \"../models.js\";\nimport type {\n\tCacheRetention,\n\tContext,\n\tModel,\n\tSimpleStreamOptions,\n\tStreamFunction,\n\tStreamOptions,\n\tUsage,\n} from \"../types.js\";\nimport { AssistantMessageEventStream } from \"../utils/event-stream.js\";\nimport { convertResponsesMessages, convertResponsesTools, processResponsesStream } from \"./openai-responses-shared.js\";\nimport {\n\tassertStreamSuccess,\n\tbuildInitialOutput,\n\tclampReasoningForModel,\n\tcreateOpenAIClient,\n\tfinalizeStream,\n\thandleStreamError,\n} from \"./openai-shared.js\";\nimport { buildBaseOptions, clampReasoning } from \"./simple-options.js\";\n\nconst OPENAI_TOOL_CALL_PROVIDERS = new Set([\"openai\", \"openai-codex\", \"opencode\"]);\n\n/**\n * Resolve cache retention preference.\n * Defaults to \"short\" and uses PI_CACHE_RETENTION for backward compatibility.\n */\nfunction resolveCacheRetention(cacheRetention?: CacheRetention): CacheRetention {\n\tif (cacheRetention) {\n\t\treturn cacheRetention;\n\t}\n\tif (typeof process !== \"undefined\" && process.env.PI_CACHE_RETENTION === \"long\") {\n\t\treturn \"long\";\n\t}\n\treturn \"short\";\n}\n\n/**\n * Get prompt cache retention based on cacheRetention and base URL.\n * Only applies to direct OpenAI API calls (api.openai.com).\n */\nfunction getPromptCacheRetention(baseUrl: string, cacheRetention: CacheRetention): \"24h\" | undefined {\n\tif (cacheRetention !== \"long\") {\n\t\treturn undefined;\n\t}\n\tif (baseUrl.includes(\"api.openai.com\")) {\n\t\treturn \"24h\";\n\t}\n\treturn undefined;\n}\n\n// OpenAI Responses-specific options\nexport interface OpenAIResponsesOptions extends StreamOptions {\n\treasoningEffort?: \"minimal\" | \"low\" | \"medium\" | \"high\" | \"xhigh\";\n\treasoningSummary?: \"auto\" | \"detailed\" | \"concise\" | null;\n\tserviceTier?: ResponseCreateParamsStreaming[\"service_tier\"];\n}\n\n/**\n * Generate function for OpenAI Responses API\n */\nexport const streamOpenAIResponses: StreamFunction<\"openai-responses\", OpenAIResponsesOptions> = (\n\tmodel: Model<\"openai-responses\">,\n\tcontext: Context,\n\toptions?: OpenAIResponsesOptions,\n): AssistantMessageEventStream => {\n\tconst stream = new AssistantMessageEventStream();\n\n\t// Start async processing\n\t(async () => {\n\t\tconst output = buildInitialOutput(model);\n\n\t\ttry {\n\t\t\t// Create OpenAI client\n\t\t\tconst apiKey = options?.apiKey || getEnvApiKey(model.provider) || \"\";\n\t\t\tconst client = await createOpenAIClient(model, context, apiKey, {\n\t\t\t\toptionsHeaders: options?.headers,\n\t\t\t});\n\t\t\tlet params = buildParams(model, context, options);\n\t\t\tconst nextParams = await options?.onPayload?.(params, model);\n\t\t\tif (nextParams !== undefined) {\n\t\t\t\tparams = nextParams as ResponseCreateParamsStreaming;\n\t\t\t}\n\t\t\tconst openaiStream = await client.responses.create(\n\t\t\t\tparams,\n\t\t\t\toptions?.signal ? { signal: options.signal } : undefined,\n\t\t\t);\n\t\t\tstream.push({ type: \"start\", partial: output });\n\n\t\t\tawait processResponsesStream(openaiStream, output, stream, model, {\n\t\t\t\tserviceTier: options?.serviceTier,\n\t\t\t\tapplyServiceTierPricing,\n\t\t\t});\n\n\t\t\tassertStreamSuccess(output, options?.signal);\n\t\t\tfinalizeStream(stream, output);\n\t\t} catch (error) {\n\t\t\thandleStreamError(stream, output, error, options?.signal);\n\t\t}\n\t})();\n\n\treturn stream;\n};\n\nexport const streamSimpleOpenAIResponses: StreamFunction<\"openai-responses\", SimpleStreamOptions> = (\n\tmodel: Model<\"openai-responses\">,\n\tcontext: Context,\n\toptions?: SimpleStreamOptions,\n): AssistantMessageEventStream => {\n\tconst apiKey = options?.apiKey || getEnvApiKey(model.provider);\n\tif (!apiKey) {\n\t\tthrow new Error(`No API key for provider: ${model.provider}`);\n\t}\n\n\tconst base = buildBaseOptions(model, options, apiKey);\n\tconst reasoningEffort = supportsXhigh(model) ? options?.reasoning : clampReasoning(options?.reasoning);\n\n\treturn streamOpenAIResponses(model, context, {\n\t\t...base,\n\t\treasoningEffort,\n\t} satisfies OpenAIResponsesOptions);\n};\n\nfunction buildParams(model: Model<\"openai-responses\">, context: Context, options?: OpenAIResponsesOptions) {\n\tconst messages = convertResponsesMessages(model, context, OPENAI_TOOL_CALL_PROVIDERS);\n\n\tconst cacheRetention = resolveCacheRetention(options?.cacheRetention);\n\tconst params: ResponseCreateParamsStreaming = {\n\t\tmodel: model.id,\n\t\tinput: messages,\n\t\tstream: true,\n\t\tprompt_cache_key: cacheRetention === \"none\" ? undefined : options?.sessionId,\n\t\tprompt_cache_retention: getPromptCacheRetention(model.baseUrl, cacheRetention),\n\t\tstore: false,\n\t};\n\n\tif (options?.maxTokens) {\n\t\tparams.max_output_tokens = options?.maxTokens;\n\t}\n\n\tif (options?.temperature !== undefined) {\n\t\tparams.temperature = options?.temperature;\n\t}\n\n\tif (options?.serviceTier !== undefined) {\n\t\tparams.service_tier = options.serviceTier;\n\t}\n\n\tif (context.tools) {\n\t\tparams.tools = convertResponsesTools(context.tools);\n\t}\n\n\tif (model.reasoning) {\n\t\tparams.include = [\"reasoning.encrypted_content\"];\n\t\tif (options?.reasoningEffort || options?.reasoningSummary) {\n\t\t\tconst effort = clampReasoningForModel(model.name, options?.reasoningEffort || \"medium\") as typeof options.reasoningEffort;\n\t\t\tparams.reasoning = {\n\t\t\t\teffort: effort || \"medium\",\n\t\t\t\tsummary: options?.reasoningSummary || \"auto\",\n\t\t\t};\n\t\t} else {\n\t\t\tif (model.name.startsWith(\"gpt-5\")) {\n\t\t\t\t// Jesus Christ, see https://community.openai.com/t/need-reasoning-false-option-for-gpt-5/1351588/7\n\t\t\t\tmessages.push({\n\t\t\t\t\trole: \"developer\",\n\t\t\t\t\tcontent: [\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\ttype: \"input_text\",\n\t\t\t\t\t\t\ttext: \"# Juice: 0 !important\",\n\t\t\t\t\t\t},\n\t\t\t\t\t],\n\t\t\t\t});\n\t\t\t}\n\t\t}\n\t}\n\n\treturn params;\n}\n\nfunction getServiceTierCostMultiplier(serviceTier: ResponseCreateParamsStreaming[\"service_tier\"] | undefined): number {\n\tswitch (serviceTier) {\n\t\tcase \"flex\":\n\t\t\treturn 0.5;\n\t\tcase \"priority\":\n\t\t\treturn 2;\n\t\tdefault:\n\t\t\treturn 1;\n\t}\n}\n\nfunction applyServiceTierPricing(usage: Usage, serviceTier: ResponseCreateParamsStreaming[\"service_tier\"] | undefined) {\n\tconst multiplier = getServiceTierCostMultiplier(serviceTier);\n\tif (multiplier === 1) return;\n\n\tusage.cost.input *= multiplier;\n\tusage.cost.output *= multiplier;\n\tusage.cost.cacheRead *= multiplier;\n\tusage.cost.cacheWrite *= multiplier;\n\tusage.cost.total = usage.cost.input + usage.cost.output + usage.cost.cacheRead + usage.cost.cacheWrite;\n}\n"]}
|
|
1
|
+
{"version":3,"file":"openai-responses.js","sourceRoot":"","sources":["../../src/providers/openai-responses.ts"],"names":[],"mappings":"AAGA,OAAO,EAAE,YAAY,EAAE,MAAM,oBAAoB,CAAC;AAClD,OAAO,EAAE,aAAa,EAAE,MAAM,cAAc,CAAC;AAU7C,OAAO,EAAE,2BAA2B,EAAE,MAAM,0BAA0B,CAAC;AACvE,OAAO,EAAE,wBAAwB,EAAE,qBAAqB,EAAE,sBAAsB,EAAE,MAAM,8BAA8B,CAAC;AACvH,OAAO,EACN,mBAAmB,EACnB,kBAAkB,EAClB,sBAAsB,EACtB,kBAAkB,EAClB,cAAc,EACd,iBAAiB,GACjB,MAAM,oBAAoB,CAAC;AAC5B,OAAO,EAAE,gBAAgB,EAAE,cAAc,EAAE,MAAM,qBAAqB,CAAC;AAEvE,MAAM,0BAA0B,GAAG,IAAI,GAAG,CAAC,CAAC,QAAQ,EAAE,cAAc,EAAE,UAAU,CAAC,CAAC,CAAC;AAEnF;;;GAGG;AACH,SAAS,qBAAqB,CAAC,cAA+B;IAC7D,IAAI,cAAc,EAAE,CAAC;QACpB,OAAO,cAAc,CAAC;IACvB,CAAC;IACD,IAAI,OAAO,OAAO,KAAK,WAAW,IAAI,OAAO,CAAC,GAAG,CAAC,kBAAkB,KAAK,MAAM,EAAE,CAAC;QACjF,OAAO,MAAM,CAAC;IACf,CAAC;IACD,OAAO,OAAO,CAAC;AAChB,CAAC;AAED;;;GAGG;AACH,SAAS,uBAAuB,CAAC,OAAe,EAAE,cAA8B;IAC/E,IAAI,cAAc,KAAK,MAAM,EAAE,CAAC;QAC/B,OAAO,SAAS,CAAC;IAClB,CAAC;IACD,IAAI,OAAO,CAAC,QAAQ,CAAC,gBAAgB,CAAC,EAAE,CAAC;QACxC,OAAO,KAAK,CAAC;IACd,CAAC;IACD,OAAO,SAAS,CAAC;AAClB,CAAC;AASD;;GAEG;AACH,MAAM,CAAC,MAAM,qBAAqB,GAA+D,CAChG,KAAgC,EAChC,OAAgB,EAChB,OAAgC,EACF,EAAE;IAChC,MAAM,MAAM,GAAG,IAAI,2BAA2B,EAAE,CAAC;IAEjD,yBAAyB;IACzB,CAAC,KAAK,IAAI,EAAE;QACX,MAAM,MAAM,GAAG,kBAAkB,CAAC,KAAK,CAAC,CAAC;QAEzC,IAAI,CAAC;YACJ,uBAAuB;YACvB,MAAM,MAAM,GAAG,OAAO,EAAE,MAAM,IAAI,YAAY,CAAC,KAAK,CAAC,QAAQ,CAAC,IAAI,EAAE,CAAC;YACrE,MAAM,MAAM,GAAG,MAAM,kBAAkB,CAAC,KAAK,EAAE,OAAO,EAAE,MAAM,EAAE;gBAC/D,cAAc,EAAE,OAAO,EAAE,OAAO;aAChC,CAAC,CAAC;YACH,IAAI,MAAM,GAAG,WAAW,CAAC,KAAK,EAAE,OAAO,EAAE,OAAO,CAAC,CAAC;YAClD,MAAM,UAAU,GAAG,MAAM,OAAO,EAAE,SAAS,EAAE,CAAC,MAAM,EAAE,KAAK,CAAC,CAAC;YAC7D,IAAI,UAAU,KAAK,SAAS,EAAE,CAAC;gBAC9B,MAAM,GAAG,UAA2C,CAAC;YACtD,CAAC;YACD,MAAM,YAAY,GAAG,MAAM,MAAM,CAAC,SAAS,CAAC,MAAM,CACjD,MAAM,EACN,OAAO,EAAE,MAAM,CAAC,CAAC,CAAC,EAAE,MAAM,EAAE,OAAO,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,SAAS,CACxD,CAAC;YACF,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,OAAO,EAAE,OAAO,EAAE,MAAM,EAAE,CAAC,CAAC;YAEhD,MAAM,sBAAsB,CAAC,YAAY,EAAE,MAAM,EAAE,MAAM,EAAE,KAAK,EAAE;gBACjE,WAAW,EAAE,OAAO,EAAE,WAAW;gBACjC,uBAAuB;aACvB,CAAC,CAAC;YAEH,mBAAmB,CAAC,MAAM,EAAE,OAAO,EAAE,MAAM,CAAC,CAAC;YAC7C,cAAc,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;QAChC,CAAC;QAAC,OAAO,KAAK,EAAE,CAAC;YAChB,iBAAiB,CAAC,MAAM,EAAE,MAAM,EAAE,KAAK,EAAE,OAAO,EAAE,MAAM,CAAC,CAAC;QAC3D,CAAC;IACF,CAAC,CAAC,EAAE,CAAC;IAEL,OAAO,MAAM,CAAC;AACf,CAAC,CAAC;AAEF,MAAM,CAAC,MAAM,2BAA2B,GAA4D,CACnG,KAAgC,EAChC,OAAgB,EAChB,OAA6B,EACC,EAAE;IAChC,MAAM,MAAM,GAAG,OAAO,EAAE,MAAM,IAAI,YAAY,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC;IAC/D,IAAI,CAAC,MAAM,EAAE,CAAC;QACb,MAAM,IAAI,KAAK,CAAC,4BAA4B,KAAK,CAAC,QAAQ,EAAE,CAAC,CAAC;IAC/D,CAAC;IAED,MAAM,IAAI,GAAG,gBAAgB,CAAC,KAAK,EAAE,OAAO,EAAE,MAAM,CAAC,CAAC;IACtD,MAAM,YAAY,GAAG,OAAO,EAAE,SAAS,KAAK,UAAU,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,OAAO,EAAE,SAAS,CAAC;IACrF,MAAM,eAAe,GAAG,aAAa,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,cAAc,CAAC,YAAY,CAAC,CAAC;IAE3F,OAAO,qBAAqB,CAAC,KAAK,EAAE,OAAO,EAAE;QAC5C,GAAG,IAAI;QACP,eAAe;KACkB,CAAC,CAAC;AACrC,CAAC,CAAC;AAEF,SAAS,WAAW,CAAC,KAAgC,EAAE,OAAgB,EAAE,OAAgC;IACxG,MAAM,QAAQ,GAAG,wBAAwB,CAAC,KAAK,EAAE,OAAO,EAAE,0BAA0B,CAAC,CAAC;IAEtF,MAAM,cAAc,GAAG,qBAAqB,CAAC,OAAO,EAAE,cAAc,CAAC,CAAC;IACtE,MAAM,MAAM,GAAkC;QAC7C,KAAK,EAAE,KAAK,CAAC,EAAE;QACf,KAAK,EAAE,QAAQ;QACf,MAAM,EAAE,IAAI;QACZ,gBAAgB,EAAE,cAAc,KAAK,MAAM,CAAC,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,OAAO,EAAE,SAAS;QAC5E,sBAAsB,EAAE,uBAAuB,CAAC,KAAK,CAAC,OAAO,EAAE,cAAc,CAAC;QAC9E,KAAK,EAAE,KAAK;KACZ,CAAC;IAEF,IAAI,OAAO,EAAE,SAAS,EAAE,CAAC;QACxB,MAAM,CAAC,iBAAiB,GAAG,OAAO,EAAE,SAAS,CAAC;IAC/C,CAAC;IAED,IAAI,OAAO,EAAE,WAAW,KAAK,SAAS,EAAE,CAAC;QACxC,MAAM,CAAC,WAAW,GAAG,OAAO,EAAE,WAAW,CAAC;IAC3C,CAAC;IAED,IAAI,OAAO,EAAE,WAAW,KAAK,SAAS,EAAE,CAAC;QACxC,MAAM,CAAC,YAAY,GAAG,OAAO,CAAC,WAAW,CAAC;IAC3C,CAAC;IAED,IAAI,OAAO,CAAC,KAAK,EAAE,CAAC;QACnB,MAAM,CAAC,KAAK,GAAG,qBAAqB,CAAC,OAAO,CAAC,KAAK,CAAC,CAAC;IACrD,CAAC;IAED,IAAI,KAAK,CAAC,SAAS,EAAE,CAAC;QACrB,MAAM,CAAC,OAAO,GAAG,CAAC,6BAA6B,CAAC,CAAC;QACjD,IAAI,OAAO,EAAE,eAAe,IAAI,OAAO,EAAE,gBAAgB,EAAE,CAAC;YAC3D,MAAM,MAAM,GAAG,sBAAsB,CAAC,KAAK,CAAC,IAAI,EAAE,OAAO,EAAE,eAAe,IAAI,QAAQ,CAAmC,CAAC;YAC1H,MAAM,CAAC,SAAS,GAAG;gBAClB,MAAM,EAAE,MAAM,IAAI,QAAQ;gBAC1B,OAAO,EAAE,OAAO,EAAE,gBAAgB,IAAI,MAAM;aAC5C,CAAC;QACH,CAAC;aAAM,CAAC;YACP,IAAI,KAAK,CAAC,IAAI,CAAC,UAAU,CAAC,OAAO,CAAC,EAAE,CAAC;gBACpC,mGAAmG;gBACnG,QAAQ,CAAC,IAAI,CAAC;oBACb,IAAI,EAAE,WAAW;oBACjB,OAAO,EAAE;wBACR;4BACC,IAAI,EAAE,YAAY;4BAClB,IAAI,EAAE,uBAAuB;yBAC7B;qBACD;iBACD,CAAC,CAAC;YACJ,CAAC;QACF,CAAC;IACF,CAAC;IAED,OAAO,MAAM,CAAC;AACf,CAAC;AAED,SAAS,4BAA4B,CAAC,WAAsE;IAC3G,QAAQ,WAAW,EAAE,CAAC;QACrB,KAAK,MAAM;YACV,OAAO,GAAG,CAAC;QACZ,KAAK,UAAU;YACd,OAAO,CAAC,CAAC;QACV;YACC,OAAO,CAAC,CAAC;IACX,CAAC;AACF,CAAC;AAED,SAAS,uBAAuB,CAAC,KAAY,EAAE,WAAsE;IACpH,MAAM,UAAU,GAAG,4BAA4B,CAAC,WAAW,CAAC,CAAC;IAC7D,IAAI,UAAU,KAAK,CAAC;QAAE,OAAO;IAE7B,KAAK,CAAC,IAAI,CAAC,KAAK,IAAI,UAAU,CAAC;IAC/B,KAAK,CAAC,IAAI,CAAC,MAAM,IAAI,UAAU,CAAC;IAChC,KAAK,CAAC,IAAI,CAAC,SAAS,IAAI,UAAU,CAAC;IACnC,KAAK,CAAC,IAAI,CAAC,UAAU,IAAI,UAAU,CAAC;IACpC,KAAK,CAAC,IAAI,CAAC,KAAK,GAAG,KAAK,CAAC,IAAI,CAAC,KAAK,GAAG,KAAK,CAAC,IAAI,CAAC,MAAM,GAAG,KAAK,CAAC,IAAI,CAAC,SAAS,GAAG,KAAK,CAAC,IAAI,CAAC,UAAU,CAAC;AACxG,CAAC","sourcesContent":["// Lazy-loaded: OpenAI SDK is imported on first use, not at startup.\n// This avoids penalizing users who don't use OpenAI models.\nimport type { ResponseCreateParamsStreaming } from \"openai/resources/responses/responses.js\";\nimport { getEnvApiKey } from \"../env-api-keys.js\";\nimport { supportsXhigh } from \"../models.js\";\nimport type {\n\tCacheRetention,\n\tContext,\n\tModel,\n\tSimpleStreamOptions,\n\tStreamFunction,\n\tStreamOptions,\n\tUsage,\n} from \"../types.js\";\nimport { AssistantMessageEventStream } from \"../utils/event-stream.js\";\nimport { convertResponsesMessages, convertResponsesTools, processResponsesStream } from \"./openai-responses-shared.js\";\nimport {\n\tassertStreamSuccess,\n\tbuildInitialOutput,\n\tclampReasoningForModel,\n\tcreateOpenAIClient,\n\tfinalizeStream,\n\thandleStreamError,\n} from \"./openai-shared.js\";\nimport { buildBaseOptions, clampReasoning } from \"./simple-options.js\";\n\nconst OPENAI_TOOL_CALL_PROVIDERS = new Set([\"openai\", \"openai-codex\", \"opencode\"]);\n\n/**\n * Resolve cache retention preference.\n * Defaults to \"short\" and uses PI_CACHE_RETENTION for backward compatibility.\n */\nfunction resolveCacheRetention(cacheRetention?: CacheRetention): CacheRetention {\n\tif (cacheRetention) {\n\t\treturn cacheRetention;\n\t}\n\tif (typeof process !== \"undefined\" && process.env.PI_CACHE_RETENTION === \"long\") {\n\t\treturn \"long\";\n\t}\n\treturn \"short\";\n}\n\n/**\n * Get prompt cache retention based on cacheRetention and base URL.\n * Only applies to direct OpenAI API calls (api.openai.com).\n */\nfunction getPromptCacheRetention(baseUrl: string, cacheRetention: CacheRetention): \"24h\" | undefined {\n\tif (cacheRetention !== \"long\") {\n\t\treturn undefined;\n\t}\n\tif (baseUrl.includes(\"api.openai.com\")) {\n\t\treturn \"24h\";\n\t}\n\treturn undefined;\n}\n\n// OpenAI Responses-specific options\nexport interface OpenAIResponsesOptions extends StreamOptions {\n\treasoningEffort?: \"minimal\" | \"low\" | \"medium\" | \"high\" | \"xhigh\";\n\treasoningSummary?: \"auto\" | \"detailed\" | \"concise\" | null;\n\tserviceTier?: ResponseCreateParamsStreaming[\"service_tier\"];\n}\n\n/**\n * Generate function for OpenAI Responses API\n */\nexport const streamOpenAIResponses: StreamFunction<\"openai-responses\", OpenAIResponsesOptions> = (\n\tmodel: Model<\"openai-responses\">,\n\tcontext: Context,\n\toptions?: OpenAIResponsesOptions,\n): AssistantMessageEventStream => {\n\tconst stream = new AssistantMessageEventStream();\n\n\t// Start async processing\n\t(async () => {\n\t\tconst output = buildInitialOutput(model);\n\n\t\ttry {\n\t\t\t// Create OpenAI client\n\t\t\tconst apiKey = options?.apiKey || getEnvApiKey(model.provider) || \"\";\n\t\t\tconst client = await createOpenAIClient(model, context, apiKey, {\n\t\t\t\toptionsHeaders: options?.headers,\n\t\t\t});\n\t\t\tlet params = buildParams(model, context, options);\n\t\t\tconst nextParams = await options?.onPayload?.(params, model);\n\t\t\tif (nextParams !== undefined) {\n\t\t\t\tparams = nextParams as ResponseCreateParamsStreaming;\n\t\t\t}\n\t\t\tconst openaiStream = await client.responses.create(\n\t\t\t\tparams,\n\t\t\t\toptions?.signal ? { signal: options.signal } : undefined,\n\t\t\t);\n\t\t\tstream.push({ type: \"start\", partial: output });\n\n\t\t\tawait processResponsesStream(openaiStream, output, stream, model, {\n\t\t\t\tserviceTier: options?.serviceTier,\n\t\t\t\tapplyServiceTierPricing,\n\t\t\t});\n\n\t\t\tassertStreamSuccess(output, options?.signal);\n\t\t\tfinalizeStream(stream, output);\n\t\t} catch (error) {\n\t\t\thandleStreamError(stream, output, error, options?.signal);\n\t\t}\n\t})();\n\n\treturn stream;\n};\n\nexport const streamSimpleOpenAIResponses: StreamFunction<\"openai-responses\", SimpleStreamOptions> = (\n\tmodel: Model<\"openai-responses\">,\n\tcontext: Context,\n\toptions?: SimpleStreamOptions,\n): AssistantMessageEventStream => {\n\tconst apiKey = options?.apiKey || getEnvApiKey(model.provider);\n\tif (!apiKey) {\n\t\tthrow new Error(`No API key for provider: ${model.provider}`);\n\t}\n\n\tconst base = buildBaseOptions(model, options, apiKey);\n\tconst rawReasoning = options?.reasoning === \"adaptive\" ? \"high\" : options?.reasoning;\n\tconst reasoningEffort = supportsXhigh(model) ? rawReasoning : clampReasoning(rawReasoning);\n\n\treturn streamOpenAIResponses(model, context, {\n\t\t...base,\n\t\treasoningEffort,\n\t} satisfies OpenAIResponsesOptions);\n};\n\nfunction buildParams(model: Model<\"openai-responses\">, context: Context, options?: OpenAIResponsesOptions) {\n\tconst messages = convertResponsesMessages(model, context, OPENAI_TOOL_CALL_PROVIDERS);\n\n\tconst cacheRetention = resolveCacheRetention(options?.cacheRetention);\n\tconst params: ResponseCreateParamsStreaming = {\n\t\tmodel: model.id,\n\t\tinput: messages,\n\t\tstream: true,\n\t\tprompt_cache_key: cacheRetention === \"none\" ? undefined : options?.sessionId,\n\t\tprompt_cache_retention: getPromptCacheRetention(model.baseUrl, cacheRetention),\n\t\tstore: false,\n\t};\n\n\tif (options?.maxTokens) {\n\t\tparams.max_output_tokens = options?.maxTokens;\n\t}\n\n\tif (options?.temperature !== undefined) {\n\t\tparams.temperature = options?.temperature;\n\t}\n\n\tif (options?.serviceTier !== undefined) {\n\t\tparams.service_tier = options.serviceTier;\n\t}\n\n\tif (context.tools) {\n\t\tparams.tools = convertResponsesTools(context.tools);\n\t}\n\n\tif (model.reasoning) {\n\t\tparams.include = [\"reasoning.encrypted_content\"];\n\t\tif (options?.reasoningEffort || options?.reasoningSummary) {\n\t\t\tconst effort = clampReasoningForModel(model.name, options?.reasoningEffort || \"medium\") as typeof options.reasoningEffort;\n\t\t\tparams.reasoning = {\n\t\t\t\teffort: effort || \"medium\",\n\t\t\t\tsummary: options?.reasoningSummary || \"auto\",\n\t\t\t};\n\t\t} else {\n\t\t\tif (model.name.startsWith(\"gpt-5\")) {\n\t\t\t\t// Jesus Christ, see https://community.openai.com/t/need-reasoning-false-option-for-gpt-5/1351588/7\n\t\t\t\tmessages.push({\n\t\t\t\t\trole: \"developer\",\n\t\t\t\t\tcontent: [\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\ttype: \"input_text\",\n\t\t\t\t\t\t\ttext: \"# Juice: 0 !important\",\n\t\t\t\t\t\t},\n\t\t\t\t\t],\n\t\t\t\t});\n\t\t\t}\n\t\t}\n\t}\n\n\treturn params;\n}\n\nfunction getServiceTierCostMultiplier(serviceTier: ResponseCreateParamsStreaming[\"service_tier\"] | undefined): number {\n\tswitch (serviceTier) {\n\t\tcase \"flex\":\n\t\t\treturn 0.5;\n\t\tcase \"priority\":\n\t\t\treturn 2;\n\t\tdefault:\n\t\t\treturn 1;\n\t}\n}\n\nfunction applyServiceTierPricing(usage: Usage, serviceTier: ResponseCreateParamsStreaming[\"service_tier\"] | undefined) {\n\tconst multiplier = getServiceTierCostMultiplier(serviceTier);\n\tif (multiplier === 1) return;\n\n\tusage.cost.input *= multiplier;\n\tusage.cost.output *= multiplier;\n\tusage.cost.cacheRead *= multiplier;\n\tusage.cost.cacheWrite *= multiplier;\n\tusage.cost.total = usage.cost.input + usage.cost.output + usage.cost.cacheRead + usage.cost.cacheWrite;\n}\n"]}
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import type { Api, Model, SimpleStreamOptions, StreamOptions, ThinkingBudgets, ThinkingLevel } from "../types.js";
|
|
2
2
|
export declare function buildBaseOptions(model: Model<Api>, options?: SimpleStreamOptions, apiKey?: string): StreamOptions;
|
|
3
|
-
export declare function clampReasoning(effort: ThinkingLevel | undefined): Exclude<ThinkingLevel, "xhigh"> | undefined;
|
|
3
|
+
export declare function clampReasoning(effort: ThinkingLevel | undefined): Exclude<ThinkingLevel, "xhigh" | "adaptive"> | undefined;
|
|
4
4
|
export declare function adjustMaxTokensForThinking(baseMaxTokens: number, modelMaxTokens: number, reasoningLevel: ThinkingLevel, customBudgets?: ThinkingBudgets): {
|
|
5
5
|
maxTokens: number;
|
|
6
6
|
thinkingBudget: number;
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"simple-options.d.ts","sourceRoot":"","sources":["../../src/providers/simple-options.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,GAAG,EAAE,KAAK,EAAE,mBAAmB,EAAE,aAAa,EAAE,eAAe,EAAE,aAAa,EAAE,MAAM,aAAa,CAAC;AAElH,wBAAgB,gBAAgB,CAAC,KAAK,EAAE,KAAK,CAAC,GAAG,CAAC,EAAE,OAAO,CAAC,EAAE,mBAAmB,EAAE,MAAM,CAAC,EAAE,MAAM,GAAG,aAAa,CAajH;AAED,wBAAgB,cAAc,CAAC,MAAM,EAAE,aAAa,GAAG,SAAS,GAAG,OAAO,CAAC,aAAa,EAAE,OAAO,CAAC,GAAG,SAAS,
|
|
1
|
+
{"version":3,"file":"simple-options.d.ts","sourceRoot":"","sources":["../../src/providers/simple-options.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,GAAG,EAAE,KAAK,EAAE,mBAAmB,EAAE,aAAa,EAAE,eAAe,EAAE,aAAa,EAAE,MAAM,aAAa,CAAC;AAElH,wBAAgB,gBAAgB,CAAC,KAAK,EAAE,KAAK,CAAC,GAAG,CAAC,EAAE,OAAO,CAAC,EAAE,mBAAmB,EAAE,MAAM,CAAC,EAAE,MAAM,GAAG,aAAa,CAajH;AAED,wBAAgB,cAAc,CAAC,MAAM,EAAE,aAAa,GAAG,SAAS,GAAG,OAAO,CAAC,aAAa,EAAE,OAAO,GAAG,UAAU,CAAC,GAAG,SAAS,CAI1H;AAED,wBAAgB,0BAA0B,CACzC,aAAa,EAAE,MAAM,EACrB,cAAc,EAAE,MAAM,EACtB,cAAc,EAAE,aAAa,EAC7B,aAAa,CAAC,EAAE,eAAe,GAC7B;IAAE,SAAS,EAAE,MAAM,CAAC;IAAC,cAAc,EAAE,MAAM,CAAA;CAAE,CAmB/C"}
|
|
@@ -13,7 +13,11 @@ export function buildBaseOptions(model, options, apiKey) {
|
|
|
13
13
|
};
|
|
14
14
|
}
|
|
15
15
|
export function clampReasoning(effort) {
|
|
16
|
-
|
|
16
|
+
if (effort === "xhigh")
|
|
17
|
+
return "high";
|
|
18
|
+
if (effort === "adaptive")
|
|
19
|
+
return "high";
|
|
20
|
+
return effort;
|
|
17
21
|
}
|
|
18
22
|
export function adjustMaxTokensForThinking(baseMaxTokens, modelMaxTokens, reasoningLevel, customBudgets) {
|
|
19
23
|
const defaultBudgets = {
|
|
@@ -24,7 +28,7 @@ export function adjustMaxTokensForThinking(baseMaxTokens, modelMaxTokens, reason
|
|
|
24
28
|
};
|
|
25
29
|
const budgets = { ...defaultBudgets, ...customBudgets };
|
|
26
30
|
const minOutputTokens = 1024;
|
|
27
|
-
const level = clampReasoning(reasoningLevel);
|
|
31
|
+
const level = clampReasoning(reasoningLevel) ?? "high";
|
|
28
32
|
let thinkingBudget = budgets[level];
|
|
29
33
|
const maxTokens = Math.min(baseMaxTokens + thinkingBudget, modelMaxTokens);
|
|
30
34
|
if (maxTokens <= thinkingBudget) {
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"simple-options.js","sourceRoot":"","sources":["../../src/providers/simple-options.ts"],"names":[],"mappings":"AAEA,MAAM,UAAU,gBAAgB,CAAC,KAAiB,EAAE,OAA6B,EAAE,MAAe;IACjG,OAAO;QACN,WAAW,EAAE,OAAO,EAAE,WAAW;QACjC,SAAS,EAAE,OAAO,EAAE,SAAS,IAAI,IAAI,CAAC,GAAG,CAAC,KAAK,CAAC,SAAS,EAAE,KAAK,CAAC;QACjE,MAAM,EAAE,OAAO,EAAE,MAAM;QACvB,MAAM,EAAE,MAAM,IAAI,OAAO,EAAE,MAAM;QACjC,cAAc,EAAE,OAAO,EAAE,cAAc;QACvC,SAAS,EAAE,OAAO,EAAE,SAAS;QAC7B,OAAO,EAAE,OAAO,EAAE,OAAO;QACzB,SAAS,EAAE,OAAO,EAAE,SAAS;QAC7B,eAAe,EAAE,OAAO,EAAE,eAAe;QACzC,QAAQ,EAAE,OAAO,EAAE,QAAQ;KAC3B,CAAC;AACH,CAAC;AAED,MAAM,UAAU,cAAc,CAAC,MAAiC;IAC/D,
|
|
1
|
+
{"version":3,"file":"simple-options.js","sourceRoot":"","sources":["../../src/providers/simple-options.ts"],"names":[],"mappings":"AAEA,MAAM,UAAU,gBAAgB,CAAC,KAAiB,EAAE,OAA6B,EAAE,MAAe;IACjG,OAAO;QACN,WAAW,EAAE,OAAO,EAAE,WAAW;QACjC,SAAS,EAAE,OAAO,EAAE,SAAS,IAAI,IAAI,CAAC,GAAG,CAAC,KAAK,CAAC,SAAS,EAAE,KAAK,CAAC;QACjE,MAAM,EAAE,OAAO,EAAE,MAAM;QACvB,MAAM,EAAE,MAAM,IAAI,OAAO,EAAE,MAAM;QACjC,cAAc,EAAE,OAAO,EAAE,cAAc;QACvC,SAAS,EAAE,OAAO,EAAE,SAAS;QAC7B,OAAO,EAAE,OAAO,EAAE,OAAO;QACzB,SAAS,EAAE,OAAO,EAAE,SAAS;QAC7B,eAAe,EAAE,OAAO,EAAE,eAAe;QACzC,QAAQ,EAAE,OAAO,EAAE,QAAQ;KAC3B,CAAC;AACH,CAAC;AAED,MAAM,UAAU,cAAc,CAAC,MAAiC;IAC/D,IAAI,MAAM,KAAK,OAAO;QAAE,OAAO,MAAM,CAAC;IACtC,IAAI,MAAM,KAAK,UAAU;QAAE,OAAO,MAAM,CAAC;IACzC,OAAO,MAAM,CAAC;AACf,CAAC;AAED,MAAM,UAAU,0BAA0B,CACzC,aAAqB,EACrB,cAAsB,EACtB,cAA6B,EAC7B,aAA+B;IAE/B,MAAM,cAAc,GAAoB;QACvC,OAAO,EAAE,IAAI;QACb,GAAG,EAAE,IAAI;QACT,MAAM,EAAE,IAAI;QACZ,IAAI,EAAE,KAAK;KACX,CAAC;IACF,MAAM,OAAO,GAAG,EAAE,GAAG,cAAc,EAAE,GAAG,aAAa,EAAE,CAAC;IAExD,MAAM,eAAe,GAAG,IAAI,CAAC;IAC7B,MAAM,KAAK,GAAG,cAAc,CAAC,cAAc,CAAC,IAAI,MAAM,CAAC;IACvD,IAAI,cAAc,GAAG,OAAO,CAAC,KAAK,CAAE,CAAC;IACrC,MAAM,SAAS,GAAG,IAAI,CAAC,GAAG,CAAC,aAAa,GAAG,cAAc,EAAE,cAAc,CAAC,CAAC;IAE3E,IAAI,SAAS,IAAI,cAAc,EAAE,CAAC;QACjC,cAAc,GAAG,IAAI,CAAC,GAAG,CAAC,CAAC,EAAE,SAAS,GAAG,eAAe,CAAC,CAAC;IAC3D,CAAC;IAED,OAAO,EAAE,SAAS,EAAE,cAAc,EAAE,CAAC;AACtC,CAAC","sourcesContent":["import type { Api, Model, SimpleStreamOptions, StreamOptions, ThinkingBudgets, ThinkingLevel } from \"../types.js\";\n\nexport function buildBaseOptions(model: Model<Api>, options?: SimpleStreamOptions, apiKey?: string): StreamOptions {\n\treturn {\n\t\ttemperature: options?.temperature,\n\t\tmaxTokens: options?.maxTokens || Math.min(model.maxTokens, 32000),\n\t\tsignal: options?.signal,\n\t\tapiKey: apiKey || options?.apiKey,\n\t\tcacheRetention: options?.cacheRetention,\n\t\tsessionId: options?.sessionId,\n\t\theaders: options?.headers,\n\t\tonPayload: options?.onPayload,\n\t\tmaxRetryDelayMs: options?.maxRetryDelayMs,\n\t\tmetadata: options?.metadata,\n\t};\n}\n\nexport function clampReasoning(effort: ThinkingLevel | undefined): Exclude<ThinkingLevel, \"xhigh\" | \"adaptive\"> | undefined {\n\tif (effort === \"xhigh\") return \"high\";\n\tif (effort === \"adaptive\") return \"high\";\n\treturn effort;\n}\n\nexport function adjustMaxTokensForThinking(\n\tbaseMaxTokens: number,\n\tmodelMaxTokens: number,\n\treasoningLevel: ThinkingLevel,\n\tcustomBudgets?: ThinkingBudgets,\n): { maxTokens: number; thinkingBudget: number } {\n\tconst defaultBudgets: ThinkingBudgets = {\n\t\tminimal: 1024,\n\t\tlow: 2048,\n\t\tmedium: 8192,\n\t\thigh: 16384,\n\t};\n\tconst budgets = { ...defaultBudgets, ...customBudgets };\n\n\tconst minOutputTokens = 1024;\n\tconst level = clampReasoning(reasoningLevel) ?? \"high\";\n\tlet thinkingBudget = budgets[level]!;\n\tconst maxTokens = Math.min(baseMaxTokens + thinkingBudget, modelMaxTokens);\n\n\tif (maxTokens <= thinkingBudget) {\n\t\tthinkingBudget = Math.max(0, maxTokens - minOutputTokens);\n\t}\n\n\treturn { maxTokens, thinkingBudget };\n}\n"]}
|
|
@@ -4,7 +4,7 @@ export type KnownApi = "openai-completions" | "mistral-conversations" | "openai-
|
|
|
4
4
|
export type Api = KnownApi | (string & {});
|
|
5
5
|
export type KnownProvider = "amazon-bedrock" | "anthropic" | "anthropic-vertex" | "google" | "google-gemini-cli" | "google-antigravity" | "google-vertex" | "openai" | "azure-openai-responses" | "openai-codex" | "github-copilot" | "xai" | "groq" | "cerebras" | "openrouter" | "vercel-ai-gateway" | "zai" | "mistral" | "minimax" | "minimax-cn" | "huggingface" | "opencode" | "opencode-go" | "kimi-coding" | "alibaba-coding-plan" | "ollama-cloud";
|
|
6
6
|
export type Provider = KnownProvider | string;
|
|
7
|
-
export type ThinkingLevel = "minimal" | "low" | "medium" | "high" | "xhigh";
|
|
7
|
+
export type ThinkingLevel = "minimal" | "low" | "medium" | "high" | "xhigh" | "adaptive";
|
|
8
8
|
/** Token budgets for each thinking level (token-based providers only) */
|
|
9
9
|
export interface ThinkingBudgets {
|
|
10
10
|
minimal?: number;
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"types.d.ts","sourceRoot":"","sources":["../src/types.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,2BAA2B,EAAE,MAAM,yBAAyB,CAAC;AAE3E,YAAY,EAAE,2BAA2B,EAAE,MAAM,yBAAyB,CAAC;AAE3E,MAAM,MAAM,QAAQ,GACjB,oBAAoB,GACpB,uBAAuB,GACvB,kBAAkB,GAClB,wBAAwB,GACxB,wBAAwB,GACxB,oBAAoB,GACpB,kBAAkB,GAClB,yBAAyB,GACzB,sBAAsB,GACtB,mBAAmB,GACnB,eAAe,CAAC;AAEnB,MAAM,MAAM,GAAG,GAAG,QAAQ,GAAG,CAAC,MAAM,GAAG,EAAE,CAAC,CAAC;AAE3C,MAAM,MAAM,aAAa,GACtB,gBAAgB,GAChB,WAAW,GACX,kBAAkB,GAClB,QAAQ,GACR,mBAAmB,GACnB,oBAAoB,GACpB,eAAe,GACf,QAAQ,GACR,wBAAwB,GACxB,cAAc,GACd,gBAAgB,GAChB,KAAK,GACL,MAAM,GACN,UAAU,GACV,YAAY,GACZ,mBAAmB,GACnB,KAAK,GACL,SAAS,GACT,SAAS,GACT,YAAY,GACZ,aAAa,GACb,UAAU,GACV,aAAa,GACb,aAAa,GACb,qBAAqB,GACrB,cAAc,CAAC;AAClB,MAAM,MAAM,QAAQ,GAAG,aAAa,GAAG,MAAM,CAAC;AAE9C,MAAM,MAAM,aAAa,GAAG,SAAS,GAAG,KAAK,GAAG,QAAQ,GAAG,MAAM,GAAG,OAAO,CAAC;
|
|
1
|
+
{"version":3,"file":"types.d.ts","sourceRoot":"","sources":["../src/types.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,2BAA2B,EAAE,MAAM,yBAAyB,CAAC;AAE3E,YAAY,EAAE,2BAA2B,EAAE,MAAM,yBAAyB,CAAC;AAE3E,MAAM,MAAM,QAAQ,GACjB,oBAAoB,GACpB,uBAAuB,GACvB,kBAAkB,GAClB,wBAAwB,GACxB,wBAAwB,GACxB,oBAAoB,GACpB,kBAAkB,GAClB,yBAAyB,GACzB,sBAAsB,GACtB,mBAAmB,GACnB,eAAe,CAAC;AAEnB,MAAM,MAAM,GAAG,GAAG,QAAQ,GAAG,CAAC,MAAM,GAAG,EAAE,CAAC,CAAC;AAE3C,MAAM,MAAM,aAAa,GACtB,gBAAgB,GAChB,WAAW,GACX,kBAAkB,GAClB,QAAQ,GACR,mBAAmB,GACnB,oBAAoB,GACpB,eAAe,GACf,QAAQ,GACR,wBAAwB,GACxB,cAAc,GACd,gBAAgB,GAChB,KAAK,GACL,MAAM,GACN,UAAU,GACV,YAAY,GACZ,mBAAmB,GACnB,KAAK,GACL,SAAS,GACT,SAAS,GACT,YAAY,GACZ,aAAa,GACb,UAAU,GACV,aAAa,GACb,aAAa,GACb,qBAAqB,GACrB,cAAc,CAAC;AAClB,MAAM,MAAM,QAAQ,GAAG,aAAa,GAAG,MAAM,CAAC;AAE9C,MAAM,MAAM,aAAa,GAAG,SAAS,GAAG,KAAK,GAAG,QAAQ,GAAG,MAAM,GAAG,OAAO,GAAG,UAAU,CAAC;AAEzF,yEAAyE;AACzE,MAAM,WAAW,eAAe;IAC/B,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB,GAAG,CAAC,EAAE,MAAM,CAAC;IACb,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,IAAI,CAAC,EAAE,MAAM,CAAC;CACd;AAGD,MAAM,MAAM,cAAc,GAAG,MAAM,GAAG,OAAO,GAAG,MAAM,CAAC;AAEvD,MAAM,MAAM,SAAS,GAAG,KAAK,GAAG,WAAW,GAAG,MAAM,CAAC;AAErD,MAAM,WAAW,aAAa;IAC7B,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB,MAAM,CAAC,EAAE,WAAW,CAAC;IACrB,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB;;;OAGG;IACH,SAAS,CAAC,EAAE,SAAS,CAAC;IACtB;;;OAGG;IACH,cAAc,CAAC,EAAE,cAAc,CAAC;IAChC;;;;OAIG;IACH,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB;;;OAGG;IACH,SAAS,CAAC,EAAE,CAAC,OAAO,EAAE,OAAO,EAAE,KAAK,EAAE,KAAK,CAAC,GAAG,CAAC,KAAK,OAAO,GAAG,SAAS,GAAG,OAAO,CAAC,OAAO,GAAG,SAAS,CAAC,CAAC;IACxG;;;;OAIG;IACH,OAAO,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;IACjC;;;;;;OAMG;IACH,eAAe,CAAC,EAAE,MAAM,CAAC;IACzB;;;;OAIG;IACH,QAAQ,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;CACnC;AAED,MAAM,MAAM,qBAAqB,GAAG,aAAa,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;AAG5E,MAAM,WAAW,mBAAoB,SAAQ,aAAa;IACzD,SAAS,CAAC,EAAE,aAAa,CAAC;IAC1B,4EAA4E;IAC5E,eAAe,CAAC,EAAE,eAAe,CAAC;CAClC;AAGD,MAAM,MAAM,cAAc,CAAC,IAAI,SAAS,GAAG,GAAG,GAAG,EAAE,QAAQ,SAAS,aAAa,GAAG,aAAa,IAAI,CACpG,KAAK,EAAE,KAAK,CAAC,IAAI,CAAC,EAClB,OAAO,EAAE,OAAO,EAChB,OAAO,CAAC,EAAE,QAAQ,KACd,2BAA2B,CAAC;AAEjC,MAAM,WAAW,eAAe;IAC/B,CAAC,EAAE,CAAC,CAAC;IACL,EAAE,EAAE,MAAM,CAAC;IACX,KAAK,CAAC,EAAE,YAAY,GAAG,cAAc,CAAC;CACtC;AAED,MAAM,WAAW,WAAW;IAC3B,IAAI,EAAE,MAAM,CAAC;IACb,IAAI,EAAE,MAAM,CAAC;IACb,aAAa,CAAC,EAAE,MAAM,CAAC;CACvB;AAED,MAAM,WAAW,eAAe;IAC/B,IAAI,EAAE,UAAU,CAAC;IACjB,QAAQ,EAAE,MAAM,CAAC;IACjB,iBAAiB,CAAC,EAAE,MAAM,CAAC;IAC3B;;gDAE4C;IAC5C,QAAQ,CAAC,EAAE,OAAO,CAAC;CACnB;AAED,MAAM,WAAW,YAAY;IAC5B,IAAI,EAAE,OAAO,CAAC;IACd,IAAI,EAAE,MAAM,CAAC;IACb,QAAQ,EAAE,MAAM,CAAC;CACjB;AAED,MAAM,WAAW,QAAQ;IACxB,IAAI,EAAE,UAAU,CAAC;IACjB,EAAE,EAAE,MAAM,CAAC;IACX,IAAI,EAAE,MAAM,CAAC;IACb,SAAS,EAAE,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,CAAC;IAC/B,gBAAgB,CAAC,EAAE,MAAM,CAAC;CAC1B;AAED,qGAAqG;AACrG,MAAM,WAAW,oBAAoB;IACpC,IAAI,EAAE,eAAe,CAAC;IACtB,EAAE,EAAE,MAAM,CAAC;IACX,IAAI,EAAE,MAAM,CAAC;IACb,KAAK,EAAE,OAAO,CAAC;CACf;AAED,+FAA+F;AAC/F,MAAM,WAAW,sBAAsB;IACtC,IAAI,EAAE,iBAAiB,CAAC;IACxB,SAAS,EAAE,MAAM,CAAC;IAClB,+EAA+E;IAC/E,OAAO,EAAE,OAAO,CAAC;CACjB;AAED,MAAM,WAAW,KAAK;IACrB,KAAK,EAAE,MAAM,CAAC;IACd,MAAM,EAAE,MAAM,CAAC;IACf,SAAS,EAAE,MAAM,CAAC;IAClB,UAAU,EAAE,MAAM,CAAC;IACnB,WAAW,EAAE,MAAM,CAAC;IACpB,IAAI,EAAE;QACL,KAAK,EAAE,MAAM,CAAC;QACd,MAAM,EAAE,MAAM,CAAC;QACf,SAAS,EAAE,MAAM,CAAC;QAClB,UAAU,EAAE,MAAM,CAAC;QACnB,KAAK,EAAE,MAAM,CAAC;KACd,CAAC;CACF;AAED,MAAM,MAAM,UAAU,GAAG,MAAM,GAAG,QAAQ,GAAG,SAAS,GAAG,WAAW,GAAG,OAAO,GAAG,SAAS,CAAC;AAE3F,MAAM,WAAW,WAAW;IAC3B,IAAI,EAAE,MAAM,CAAC;IACb,OAAO,EAAE,MAAM,GAAG,CAAC,WAAW,GAAG,YAAY,CAAC,EAAE,CAAC;IACjD,SAAS,EAAE,MAAM,CAAC;CAClB;AAED,MAAM,WAAW,gBAAgB;IAChC,IAAI,EAAE,WAAW,CAAC;IAClB,OAAO,EAAE,CAAC,WAAW,GAAG,eAAe,GAAG,QAAQ,GAAG,oBAAoB,GAAG,sBAAsB,CAAC,EAAE,CAAC;IACtG,GAAG,EAAE,GAAG,CAAC;IACT,QAAQ,EAAE,QAAQ,CAAC;IACnB,KAAK,EAAE,MAAM,CAAC;IACd,KAAK,EAAE,KAAK,CAAC;IACb,UAAU,EAAE,UAAU,CAAC;IACvB,YAAY,CAAC,EAAE,MAAM,CAAC;IACtB,6FAA6F;IAC7F,YAAY,CAAC,EAAE,MAAM,CAAC;IACtB,SAAS,EAAE,MAAM,CAAC;CAClB;AAED,MAAM,WAAW,iBAAiB,CAAC,QAAQ,GAAG,GAAG;IAChD,IAAI,EAAE,YAAY,CAAC;IACnB,UAAU,EAAE,MAAM,CAAC;IACnB,QAAQ,EAAE,MAAM,CAAC;IACjB,OAAO,EAAE,CAAC,WAAW,GAAG,YAAY,CAAC,EAAE,CAAC;IACxC,OAAO,CAAC,EAAE,QAAQ,CAAC;IACnB,OAAO,EAAE,OAAO,CAAC;IACjB,SAAS,EAAE,MAAM,CAAC;CAClB;AAED,MAAM,MAAM,OAAO,GAAG,WAAW,GAAG,gBAAgB,GAAG,iBAAiB,CAAC;AAEzE,OAAO,KAAK,EAAE,OAAO,EAAE,MAAM,mBAAmB,CAAC;AAEjD,MAAM,WAAW,IAAI,CAAC,WAAW,SAAS,OAAO,GAAG,OAAO;IAC1D,IAAI,EAAE,MAAM,CAAC;IACb,WAAW,EAAE,MAAM,CAAC;IACpB,UAAU,EAAE,WAAW,CAAC;CACxB;AAED,MAAM,WAAW,OAAO;IACvB,YAAY,CAAC,EAAE,MAAM,CAAC;IACtB,QAAQ,EAAE,OAAO,EAAE,CAAC;IACpB,KAAK,CAAC,EAAE,IAAI,EAAE,CAAC;CACf;AAED,MAAM,MAAM,qBAAqB,GAC9B;IAAE,IAAI,EAAE,OAAO,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GAC5C;IAAE,IAAI,EAAE,YAAY,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GACvE;IAAE,IAAI,EAAE,YAAY,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,KAAK,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GACtF;IAAE,IAAI,EAAE,UAAU,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GACtF;IAAE,IAAI,EAAE,gBAAgB,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GAC3E;IAAE,IAAI,EAAE,gBAAgB,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,KAAK,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GAC1F;IAAE,IAAI,EAAE,cAAc,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GAC1F;IAAE,IAAI,EAAE,gBAAgB,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GAC3E;IAAE,IAAI,EAAE,gBAAgB,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,KAAK,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GAC1F;IAAE,IAAI,EAAE,cAAc,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,QAAQ,EAAE,QAAQ,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAC;IAAC,kBAAkB,CAAC,EAAE,OAAO,CAAA;CAAE,GAC3H;IAAE,IAAI,EAAE,iBAAiB,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GAC5E;IAAE,IAAI,EAAE,mBAAmB,CAAC;IAAC,YAAY,EAAE,MAAM,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GAC9E;IAAE,IAAI,EAAE,MAAM,CAAC;IAAC,MAAM,EAAE,OAAO,CAAC,UAAU,EAAE,MAAM,GAAG,QAAQ,GAAG,SAAS,GAAG,WAAW,CAAC,CAAC;IAAC,OAAO,EAAE,gBAAgB,CAAA;CAAE,GACrH;IAAE,IAAI,EAAE,OAAO,CAAC;IAAC,MAAM,EAAE,OAAO,CAAC,UAAU,EAAE,SAAS,GAAG,OAAO,CAAC,CAAC;IAAC,KAAK,EAAE,gBAAgB,CAAA;CAAE,CAAC;AAEhG;;;GAGG;AACH,MAAM,WAAW,uBAAuB;IACvC,wFAAwF;IACxF,aAAa,CAAC,EAAE,OAAO,CAAC;IACxB,yGAAyG;IACzG,qBAAqB,CAAC,EAAE,OAAO,CAAC;IAChC,yFAAyF;IACzF,uBAAuB,CAAC,EAAE,OAAO,CAAC;IAClC,yGAAyG;IACzG,kBAAkB,CAAC,EAAE,OAAO,CAAC,MAAM,CAAC,aAAa,EAAE,MAAM,CAAC,CAAC,CAAC;IAC5D,qIAAqI;IACrI,wBAAwB,CAAC,EAAE,OAAO,CAAC;IACnC,0EAA0E;IAC1E,cAAc,CAAC,EAAE,uBAAuB,GAAG,YAAY,CAAC;IACxD,sFAAsF;IACtF,sBAAsB,CAAC,EAAE,OAAO,CAAC;IACjC,2HAA2H;IAC3H,gCAAgC,CAAC,EAAE,OAAO,CAAC;IAC3C,4HAA4H;IAC5H,sBAAsB,CAAC,EAAE,OAAO,CAAC;IACjC,kLAAkL;IAClL,cAAc,CAAC,EAAE,QAAQ,GAAG,KAAK,GAAG,MAAM,CAAC;IAC3C,4FAA4F;IAC5F,iBAAiB,CAAC,EAAE,iBAAiB,CAAC;IACtC,iGAAiG;IACjG,oBAAoB,CAAC,EAAE,oBAAoB,CAAC;IAC5C,2FAA2F;IAC3F,kBAAkB,CAAC,EAAE,OAAO,CAAC;CAC7B;AAED,wDAAwD;AACxD,MAAM,WAAW,qBAAqB;CAErC;AAED;;;;GAIG;AACH,MAAM,WAAW,iBAAiB;IACjC,0GAA0G;IAC1G,IAAI,CAAC,EAAE,MAAM,EAAE,CAAC;IAChB,8EAA8E;IAC9E,KAAK,CAAC,EAAE,MAAM,EAAE,CAAC;CACjB;AAED;;;;GAIG;AACH,MAAM,WAAW,oBAAoB;IACpC,mGAAmG;IACnG,IAAI,CAAC,EAAE,MAAM,EAAE,CAAC;IAChB,8EAA8E;IAC9E,KAAK,CAAC,EAAE,MAAM,EAAE,CAAC;CACjB;AAED;;;;;;GAMG;AACH,MAAM,WAAW,iBAAiB;IACjC,uDAAuD;IACvD,aAAa,CAAC,EAAE,OAAO,CAAC;IACxB;;;;OAIG;IACH,kBAAkB,CAAC,EAAE,OAAO,CAAC;IAC7B,8EAA8E;IAC9E,mBAAmB,CAAC,EAAE,OAAO,CAAC;IAC9B;;;;OAIG;IACH,aAAa,CAAC,EAAE,MAAM,CAAC;CACvB;AAGD,MAAM,WAAW,KAAK,CAAC,IAAI,SAAS,GAAG;IACtC,EAAE,EAAE,MAAM,CAAC;IACX,IAAI,EAAE,MAAM,CAAC;IACb,GAAG,EAAE,IAAI,CAAC;IACV,QAAQ,EAAE,QAAQ,CAAC;IACnB,OAAO,EAAE,MAAM,CAAC;IAChB,SAAS,EAAE,OAAO,CAAC;IACnB,KAAK,EAAE,CAAC,MAAM,GAAG,OAAO,CAAC,EAAE,CAAC;IAC5B,IAAI,EAAE;QACL,KAAK,EAAE,MAAM,CAAC;QACd,MAAM,EAAE,MAAM,CAAC;QACf,SAAS,EAAE,MAAM,CAAC;QAClB,UAAU,EAAE,MAAM,CAAC;KACnB,CAAC;IACF,aAAa,EAAE,MAAM,CAAC;IACtB,SAAS,EAAE,MAAM,CAAC;IAClB,OAAO,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;IACjC,kGAAkG;IAClG,MAAM,CAAC,EAAE,IAAI,SAAS,oBAAoB,GACvC,uBAAuB,GACvB,IAAI,SAAS,kBAAkB,GAC9B,qBAAqB,GACrB,KAAK,CAAC;IACV;;;OAGG;IACH,YAAY,CAAC,EAAE,iBAAiB,CAAC;CACjC"}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"types.js","sourceRoot":"","sources":["../src/types.ts"],"names":[],"mappings":"","sourcesContent":["import type { AssistantMessageEventStream } from \"./utils/event-stream.js\";\n\nexport type { AssistantMessageEventStream } from \"./utils/event-stream.js\";\n\nexport type KnownApi =\n\t| \"openai-completions\"\n\t| \"mistral-conversations\"\n\t| \"openai-responses\"\n\t| \"azure-openai-responses\"\n\t| \"openai-codex-responses\"\n\t| \"anthropic-messages\"\n\t| \"anthropic-vertex\"\n\t| \"bedrock-converse-stream\"\n\t| \"google-generative-ai\"\n\t| \"google-gemini-cli\"\n\t| \"google-vertex\";\n\nexport type Api = KnownApi | (string & {});\n\nexport type KnownProvider =\n\t| \"amazon-bedrock\"\n\t| \"anthropic\"\n\t| \"anthropic-vertex\"\n\t| \"google\"\n\t| \"google-gemini-cli\"\n\t| \"google-antigravity\"\n\t| \"google-vertex\"\n\t| \"openai\"\n\t| \"azure-openai-responses\"\n\t| \"openai-codex\"\n\t| \"github-copilot\"\n\t| \"xai\"\n\t| \"groq\"\n\t| \"cerebras\"\n\t| \"openrouter\"\n\t| \"vercel-ai-gateway\"\n\t| \"zai\"\n\t| \"mistral\"\n\t| \"minimax\"\n\t| \"minimax-cn\"\n\t| \"huggingface\"\n\t| \"opencode\"\n\t| \"opencode-go\"\n\t| \"kimi-coding\"\n\t| \"alibaba-coding-plan\"\n\t| \"ollama-cloud\";\nexport type Provider = KnownProvider | string;\n\nexport type ThinkingLevel = \"minimal\" | \"low\" | \"medium\" | \"high\" | \"xhigh\";\n\n/** Token budgets for each thinking level (token-based providers only) */\nexport interface ThinkingBudgets {\n\tminimal?: number;\n\tlow?: number;\n\tmedium?: number;\n\thigh?: number;\n}\n\n// Base options all providers share\nexport type CacheRetention = \"none\" | \"short\" | \"long\";\n\nexport type Transport = \"sse\" | \"websocket\" | \"auto\";\n\nexport interface StreamOptions {\n\ttemperature?: number;\n\tmaxTokens?: number;\n\tsignal?: AbortSignal;\n\tapiKey?: string;\n\t/**\n\t * Preferred transport for providers that support multiple transports.\n\t * Providers that do not support this option ignore it.\n\t */\n\ttransport?: Transport;\n\t/**\n\t * Prompt cache retention preference. Providers map this to their supported values.\n\t * Default: \"short\".\n\t */\n\tcacheRetention?: CacheRetention;\n\t/**\n\t * Optional session identifier for providers that support session-based caching.\n\t * Providers can use this to enable prompt caching, request routing, or other\n\t * session-aware features. Ignored by providers that don't support it.\n\t */\n\tsessionId?: string;\n\t/**\n\t * Optional callback for inspecting or replacing provider payloads before sending.\n\t * Return undefined to keep the payload unchanged.\n\t */\n\tonPayload?: (payload: unknown, model: Model<Api>) => unknown | undefined | Promise<unknown | undefined>;\n\t/**\n\t * Optional custom HTTP headers to include in API requests.\n\t * Merged with provider defaults; can override default headers.\n\t * Not supported by all providers (e.g., AWS Bedrock uses SDK auth).\n\t */\n\theaders?: Record<string, string>;\n\t/**\n\t * Maximum delay in milliseconds to wait for a retry when the server requests a long wait.\n\t * If the server's requested delay exceeds this value, the request fails immediately\n\t * with an error containing the requested delay, allowing higher-level retry logic\n\t * to handle it with user visibility.\n\t * Default: 60000 (60 seconds). Set to 0 to disable the cap.\n\t */\n\tmaxRetryDelayMs?: number;\n\t/**\n\t * Optional metadata to include in API requests.\n\t * Providers extract the fields they understand and ignore the rest.\n\t * For example, Anthropic uses `user_id` for abuse tracking and rate limiting.\n\t */\n\tmetadata?: Record<string, unknown>;\n}\n\nexport type ProviderStreamOptions = StreamOptions & Record<string, unknown>;\n\n// Unified options with reasoning passed to streamSimple() and completeSimple()\nexport interface SimpleStreamOptions extends StreamOptions {\n\treasoning?: ThinkingLevel;\n\t/** Custom token budgets for thinking levels (token-based providers only) */\n\tthinkingBudgets?: ThinkingBudgets;\n}\n\n// Generic StreamFunction with typed options\nexport type StreamFunction<TApi extends Api = Api, TOptions extends StreamOptions = StreamOptions> = (\n\tmodel: Model<TApi>,\n\tcontext: Context,\n\toptions?: TOptions,\n) => AssistantMessageEventStream;\n\nexport interface TextSignatureV1 {\n\tv: 1;\n\tid: string;\n\tphase?: \"commentary\" | \"final_answer\";\n}\n\nexport interface TextContent {\n\ttype: \"text\";\n\ttext: string;\n\ttextSignature?: string; // e.g., for OpenAI responses, message metadata (legacy id string or TextSignatureV1 JSON)\n}\n\nexport interface ThinkingContent {\n\ttype: \"thinking\";\n\tthinking: string;\n\tthinkingSignature?: string; // e.g., for OpenAI responses, the reasoning item ID\n\t/** When true, the thinking content was redacted by safety filters. The opaque\n\t * encrypted payload is stored in `thinkingSignature` so it can be passed back\n\t * to the API for multi-turn continuity. */\n\tredacted?: boolean;\n}\n\nexport interface ImageContent {\n\ttype: \"image\";\n\tdata: string; // base64 encoded image data\n\tmimeType: string; // e.g., \"image/jpeg\", \"image/png\"\n}\n\nexport interface ToolCall {\n\ttype: \"toolCall\";\n\tid: string;\n\tname: string;\n\targuments: Record<string, any>;\n\tthoughtSignature?: string; // Google-specific: opaque signature for reusing thought context\n}\n\n/** Server-side tool use (e.g., Anthropic native web search). Executed by the API, not the client. */\nexport interface ServerToolUseContent {\n\ttype: \"serverToolUse\";\n\tid: string;\n\tname: string; // e.g., \"web_search\"\n\tinput: unknown;\n}\n\n/** Result of a server-side tool execution, paired with a ServerToolUseContent by toolUseId. */\nexport interface WebSearchResultContent {\n\ttype: \"webSearchResult\";\n\ttoolUseId: string;\n\t/** Search results or error from the server. Opaque — stored for API replay. */\n\tcontent: unknown;\n}\n\nexport interface Usage {\n\tinput: number;\n\toutput: number;\n\tcacheRead: number;\n\tcacheWrite: number;\n\ttotalTokens: number;\n\tcost: {\n\t\tinput: number;\n\t\toutput: number;\n\t\tcacheRead: number;\n\t\tcacheWrite: number;\n\t\ttotal: number;\n\t};\n}\n\nexport type StopReason = \"stop\" | \"length\" | \"toolUse\" | \"pauseTurn\" | \"error\" | \"aborted\";\n\nexport interface UserMessage {\n\trole: \"user\";\n\tcontent: string | (TextContent | ImageContent)[];\n\ttimestamp: number; // Unix timestamp in milliseconds\n}\n\nexport interface AssistantMessage {\n\trole: \"assistant\";\n\tcontent: (TextContent | ThinkingContent | ToolCall | ServerToolUseContent | WebSearchResultContent)[];\n\tapi: Api;\n\tprovider: Provider;\n\tmodel: string;\n\tusage: Usage;\n\tstopReason: StopReason;\n\terrorMessage?: string;\n\t/** Server-requested retry delay in milliseconds (from Retry-After or rate limit headers). */\n\tretryAfterMs?: number;\n\ttimestamp: number; // Unix timestamp in milliseconds\n}\n\nexport interface ToolResultMessage<TDetails = any> {\n\trole: \"toolResult\";\n\ttoolCallId: string;\n\ttoolName: string;\n\tcontent: (TextContent | ImageContent)[]; // Supports text and images\n\tdetails?: TDetails;\n\tisError: boolean;\n\ttimestamp: number; // Unix timestamp in milliseconds\n}\n\nexport type Message = UserMessage | AssistantMessage | ToolResultMessage;\n\nimport type { TSchema } from \"@sinclair/typebox\";\n\nexport interface Tool<TParameters extends TSchema = TSchema> {\n\tname: string;\n\tdescription: string;\n\tparameters: TParameters;\n}\n\nexport interface Context {\n\tsystemPrompt?: string;\n\tmessages: Message[];\n\ttools?: Tool[];\n}\n\nexport type AssistantMessageEvent =\n\t| { type: \"start\"; partial: AssistantMessage }\n\t| { type: \"text_start\"; contentIndex: number; partial: AssistantMessage }\n\t| { type: \"text_delta\"; contentIndex: number; delta: string; partial: AssistantMessage }\n\t| { type: \"text_end\"; contentIndex: number; content: string; partial: AssistantMessage }\n\t| { type: \"thinking_start\"; contentIndex: number; partial: AssistantMessage }\n\t| { type: \"thinking_delta\"; contentIndex: number; delta: string; partial: AssistantMessage }\n\t| { type: \"thinking_end\"; contentIndex: number; content: string; partial: AssistantMessage }\n\t| { type: \"toolcall_start\"; contentIndex: number; partial: AssistantMessage }\n\t| { type: \"toolcall_delta\"; contentIndex: number; delta: string; partial: AssistantMessage }\n\t| { type: \"toolcall_end\"; contentIndex: number; toolCall: ToolCall; partial: AssistantMessage; malformedArguments?: boolean }\n\t| { type: \"server_tool_use\"; contentIndex: number; partial: AssistantMessage }\n\t| { type: \"web_search_result\"; contentIndex: number; partial: AssistantMessage }\n\t| { type: \"done\"; reason: Extract<StopReason, \"stop\" | \"length\" | \"toolUse\" | \"pauseTurn\">; message: AssistantMessage }\n\t| { type: \"error\"; reason: Extract<StopReason, \"aborted\" | \"error\">; error: AssistantMessage };\n\n/**\n * Compatibility settings for OpenAI-compatible completions APIs.\n * Use this to override URL-based auto-detection for custom providers.\n */\nexport interface OpenAICompletionsCompat {\n\t/** Whether the provider supports the `store` field. Default: auto-detected from URL. */\n\tsupportsStore?: boolean;\n\t/** Whether the provider supports the `developer` role (vs `system`). Default: auto-detected from URL. */\n\tsupportsDeveloperRole?: boolean;\n\t/** Whether the provider supports `reasoning_effort`. Default: auto-detected from URL. */\n\tsupportsReasoningEffort?: boolean;\n\t/** Optional mapping from pi-ai reasoning levels to provider/model-specific `reasoning_effort` values. */\n\treasoningEffortMap?: Partial<Record<ThinkingLevel, string>>;\n\t/** Whether the provider supports `stream_options: { include_usage: true }` for token usage in streaming responses. Default: true. */\n\tsupportsUsageInStreaming?: boolean;\n\t/** Which field to use for max tokens. Default: auto-detected from URL. */\n\tmaxTokensField?: \"max_completion_tokens\" | \"max_tokens\";\n\t/** Whether tool results require the `name` field. Default: auto-detected from URL. */\n\trequiresToolResultName?: boolean;\n\t/** Whether a user message after tool results requires an assistant message in between. Default: auto-detected from URL. */\n\trequiresAssistantAfterToolResult?: boolean;\n\t/** Whether thinking blocks must be converted to text blocks with <thinking> delimiters. Default: auto-detected from URL. */\n\trequiresThinkingAsText?: boolean;\n\t/** Format for reasoning/thinking parameter. \"openai\" uses reasoning_effort, \"zai\" uses thinking: { type: \"enabled\" }, \"qwen\" uses enable_thinking: boolean. Default: \"openai\". */\n\tthinkingFormat?: \"openai\" | \"zai\" | \"qwen\";\n\t/** OpenRouter-specific routing preferences. Only used when baseUrl points to OpenRouter. */\n\topenRouterRouting?: OpenRouterRouting;\n\t/** Vercel AI Gateway routing preferences. Only used when baseUrl points to Vercel AI Gateway. */\n\tvercelGatewayRouting?: VercelGatewayRouting;\n\t/** Whether the provider supports the `strict` field in tool definitions. Default: true. */\n\tsupportsStrictMode?: boolean;\n}\n\n/** Compatibility settings for OpenAI Responses APIs. */\nexport interface OpenAIResponsesCompat {\n\t// Reserved for future use\n}\n\n/**\n * OpenRouter provider routing preferences.\n * Controls which upstream providers OpenRouter routes requests to.\n * @see https://openrouter.ai/docs/provider-routing\n */\nexport interface OpenRouterRouting {\n\t/** List of provider slugs to exclusively use for this request (e.g., [\"amazon-bedrock\", \"anthropic\"]). */\n\tonly?: string[];\n\t/** List of provider slugs to try in order (e.g., [\"anthropic\", \"openai\"]). */\n\torder?: string[];\n}\n\n/**\n * Vercel AI Gateway routing preferences.\n * Controls which upstream providers the gateway routes requests to.\n * @see https://vercel.com/docs/ai-gateway/models-and-providers/provider-options\n */\nexport interface VercelGatewayRouting {\n\t/** List of provider slugs to exclusively use for this request (e.g., [\"bedrock\", \"anthropic\"]). */\n\tonly?: string[];\n\t/** List of provider slugs to try in order (e.g., [\"anthropic\", \"openai\"]). */\n\torder?: string[];\n}\n\n/**\n * Provider-agnostic capability declarations for a model.\n *\n * These fields allow models to self-declare supported features so that call\n * sites can read from metadata rather than pattern-matching on model IDs or\n * provider names. Add fields here as new cross-provider capabilities emerge.\n */\nexport interface ModelCapabilities {\n\t/** Whether the model supports xhigh thinking level. */\n\tsupportsXhigh?: boolean;\n\t/**\n\t * Whether tool call IDs must be included and normalised in tool results for\n\t * this model. Relevant for models deployed cross-provider (e.g. Claude or\n\t * GPT variants via Google APIs) where the host API imposes stricter ID rules.\n\t */\n\trequiresToolCallId?: boolean;\n\t/** Whether OpenAI-style service tiers (priority/flex) apply to this model. */\n\tsupportsServiceTier?: boolean;\n\t/**\n\t * Approximate characters per token for this model.\n\t * Used as a fallback when an accurate tokenizer is unavailable.\n\t * If omitted, the provider-level default is used.\n\t */\n\tcharsPerToken?: number;\n}\n\n// Model interface for the unified model system\nexport interface Model<TApi extends Api> {\n\tid: string;\n\tname: string;\n\tapi: TApi;\n\tprovider: Provider;\n\tbaseUrl: string;\n\treasoning: boolean;\n\tinput: (\"text\" | \"image\")[];\n\tcost: {\n\t\tinput: number; // $/million tokens\n\t\toutput: number; // $/million tokens\n\t\tcacheRead: number; // $/million tokens\n\t\tcacheWrite: number; // $/million tokens\n\t};\n\tcontextWindow: number;\n\tmaxTokens: number;\n\theaders?: Record<string, string>;\n\t/** Compatibility overrides for OpenAI-compatible APIs. If not set, auto-detected from baseUrl. */\n\tcompat?: TApi extends \"openai-completions\"\n\t\t? OpenAICompletionsCompat\n\t\t: TApi extends \"openai-responses\"\n\t\t\t? OpenAIResponsesCompat\n\t\t\t: never;\n\t/**\n\t * Provider-agnostic capability declarations for this model.\n\t * Read these fields instead of pattern-matching on model IDs or provider names.\n\t */\n\tcapabilities?: ModelCapabilities;\n}\n"]}
|
|
1
|
+
{"version":3,"file":"types.js","sourceRoot":"","sources":["../src/types.ts"],"names":[],"mappings":"","sourcesContent":["import type { AssistantMessageEventStream } from \"./utils/event-stream.js\";\n\nexport type { AssistantMessageEventStream } from \"./utils/event-stream.js\";\n\nexport type KnownApi =\n\t| \"openai-completions\"\n\t| \"mistral-conversations\"\n\t| \"openai-responses\"\n\t| \"azure-openai-responses\"\n\t| \"openai-codex-responses\"\n\t| \"anthropic-messages\"\n\t| \"anthropic-vertex\"\n\t| \"bedrock-converse-stream\"\n\t| \"google-generative-ai\"\n\t| \"google-gemini-cli\"\n\t| \"google-vertex\";\n\nexport type Api = KnownApi | (string & {});\n\nexport type KnownProvider =\n\t| \"amazon-bedrock\"\n\t| \"anthropic\"\n\t| \"anthropic-vertex\"\n\t| \"google\"\n\t| \"google-gemini-cli\"\n\t| \"google-antigravity\"\n\t| \"google-vertex\"\n\t| \"openai\"\n\t| \"azure-openai-responses\"\n\t| \"openai-codex\"\n\t| \"github-copilot\"\n\t| \"xai\"\n\t| \"groq\"\n\t| \"cerebras\"\n\t| \"openrouter\"\n\t| \"vercel-ai-gateway\"\n\t| \"zai\"\n\t| \"mistral\"\n\t| \"minimax\"\n\t| \"minimax-cn\"\n\t| \"huggingface\"\n\t| \"opencode\"\n\t| \"opencode-go\"\n\t| \"kimi-coding\"\n\t| \"alibaba-coding-plan\"\n\t| \"ollama-cloud\";\nexport type Provider = KnownProvider | string;\n\nexport type ThinkingLevel = \"minimal\" | \"low\" | \"medium\" | \"high\" | \"xhigh\" | \"adaptive\";\n\n/** Token budgets for each thinking level (token-based providers only) */\nexport interface ThinkingBudgets {\n\tminimal?: number;\n\tlow?: number;\n\tmedium?: number;\n\thigh?: number;\n}\n\n// Base options all providers share\nexport type CacheRetention = \"none\" | \"short\" | \"long\";\n\nexport type Transport = \"sse\" | \"websocket\" | \"auto\";\n\nexport interface StreamOptions {\n\ttemperature?: number;\n\tmaxTokens?: number;\n\tsignal?: AbortSignal;\n\tapiKey?: string;\n\t/**\n\t * Preferred transport for providers that support multiple transports.\n\t * Providers that do not support this option ignore it.\n\t */\n\ttransport?: Transport;\n\t/**\n\t * Prompt cache retention preference. Providers map this to their supported values.\n\t * Default: \"short\".\n\t */\n\tcacheRetention?: CacheRetention;\n\t/**\n\t * Optional session identifier for providers that support session-based caching.\n\t * Providers can use this to enable prompt caching, request routing, or other\n\t * session-aware features. Ignored by providers that don't support it.\n\t */\n\tsessionId?: string;\n\t/**\n\t * Optional callback for inspecting or replacing provider payloads before sending.\n\t * Return undefined to keep the payload unchanged.\n\t */\n\tonPayload?: (payload: unknown, model: Model<Api>) => unknown | undefined | Promise<unknown | undefined>;\n\t/**\n\t * Optional custom HTTP headers to include in API requests.\n\t * Merged with provider defaults; can override default headers.\n\t * Not supported by all providers (e.g., AWS Bedrock uses SDK auth).\n\t */\n\theaders?: Record<string, string>;\n\t/**\n\t * Maximum delay in milliseconds to wait for a retry when the server requests a long wait.\n\t * If the server's requested delay exceeds this value, the request fails immediately\n\t * with an error containing the requested delay, allowing higher-level retry logic\n\t * to handle it with user visibility.\n\t * Default: 60000 (60 seconds). Set to 0 to disable the cap.\n\t */\n\tmaxRetryDelayMs?: number;\n\t/**\n\t * Optional metadata to include in API requests.\n\t * Providers extract the fields they understand and ignore the rest.\n\t * For example, Anthropic uses `user_id` for abuse tracking and rate limiting.\n\t */\n\tmetadata?: Record<string, unknown>;\n}\n\nexport type ProviderStreamOptions = StreamOptions & Record<string, unknown>;\n\n// Unified options with reasoning passed to streamSimple() and completeSimple()\nexport interface SimpleStreamOptions extends StreamOptions {\n\treasoning?: ThinkingLevel;\n\t/** Custom token budgets for thinking levels (token-based providers only) */\n\tthinkingBudgets?: ThinkingBudgets;\n}\n\n// Generic StreamFunction with typed options\nexport type StreamFunction<TApi extends Api = Api, TOptions extends StreamOptions = StreamOptions> = (\n\tmodel: Model<TApi>,\n\tcontext: Context,\n\toptions?: TOptions,\n) => AssistantMessageEventStream;\n\nexport interface TextSignatureV1 {\n\tv: 1;\n\tid: string;\n\tphase?: \"commentary\" | \"final_answer\";\n}\n\nexport interface TextContent {\n\ttype: \"text\";\n\ttext: string;\n\ttextSignature?: string; // e.g., for OpenAI responses, message metadata (legacy id string or TextSignatureV1 JSON)\n}\n\nexport interface ThinkingContent {\n\ttype: \"thinking\";\n\tthinking: string;\n\tthinkingSignature?: string; // e.g., for OpenAI responses, the reasoning item ID\n\t/** When true, the thinking content was redacted by safety filters. The opaque\n\t * encrypted payload is stored in `thinkingSignature` so it can be passed back\n\t * to the API for multi-turn continuity. */\n\tredacted?: boolean;\n}\n\nexport interface ImageContent {\n\ttype: \"image\";\n\tdata: string; // base64 encoded image data\n\tmimeType: string; // e.g., \"image/jpeg\", \"image/png\"\n}\n\nexport interface ToolCall {\n\ttype: \"toolCall\";\n\tid: string;\n\tname: string;\n\targuments: Record<string, any>;\n\tthoughtSignature?: string; // Google-specific: opaque signature for reusing thought context\n}\n\n/** Server-side tool use (e.g., Anthropic native web search). Executed by the API, not the client. */\nexport interface ServerToolUseContent {\n\ttype: \"serverToolUse\";\n\tid: string;\n\tname: string; // e.g., \"web_search\"\n\tinput: unknown;\n}\n\n/** Result of a server-side tool execution, paired with a ServerToolUseContent by toolUseId. */\nexport interface WebSearchResultContent {\n\ttype: \"webSearchResult\";\n\ttoolUseId: string;\n\t/** Search results or error from the server. Opaque — stored for API replay. */\n\tcontent: unknown;\n}\n\nexport interface Usage {\n\tinput: number;\n\toutput: number;\n\tcacheRead: number;\n\tcacheWrite: number;\n\ttotalTokens: number;\n\tcost: {\n\t\tinput: number;\n\t\toutput: number;\n\t\tcacheRead: number;\n\t\tcacheWrite: number;\n\t\ttotal: number;\n\t};\n}\n\nexport type StopReason = \"stop\" | \"length\" | \"toolUse\" | \"pauseTurn\" | \"error\" | \"aborted\";\n\nexport interface UserMessage {\n\trole: \"user\";\n\tcontent: string | (TextContent | ImageContent)[];\n\ttimestamp: number; // Unix timestamp in milliseconds\n}\n\nexport interface AssistantMessage {\n\trole: \"assistant\";\n\tcontent: (TextContent | ThinkingContent | ToolCall | ServerToolUseContent | WebSearchResultContent)[];\n\tapi: Api;\n\tprovider: Provider;\n\tmodel: string;\n\tusage: Usage;\n\tstopReason: StopReason;\n\terrorMessage?: string;\n\t/** Server-requested retry delay in milliseconds (from Retry-After or rate limit headers). */\n\tretryAfterMs?: number;\n\ttimestamp: number; // Unix timestamp in milliseconds\n}\n\nexport interface ToolResultMessage<TDetails = any> {\n\trole: \"toolResult\";\n\ttoolCallId: string;\n\ttoolName: string;\n\tcontent: (TextContent | ImageContent)[]; // Supports text and images\n\tdetails?: TDetails;\n\tisError: boolean;\n\ttimestamp: number; // Unix timestamp in milliseconds\n}\n\nexport type Message = UserMessage | AssistantMessage | ToolResultMessage;\n\nimport type { TSchema } from \"@sinclair/typebox\";\n\nexport interface Tool<TParameters extends TSchema = TSchema> {\n\tname: string;\n\tdescription: string;\n\tparameters: TParameters;\n}\n\nexport interface Context {\n\tsystemPrompt?: string;\n\tmessages: Message[];\n\ttools?: Tool[];\n}\n\nexport type AssistantMessageEvent =\n\t| { type: \"start\"; partial: AssistantMessage }\n\t| { type: \"text_start\"; contentIndex: number; partial: AssistantMessage }\n\t| { type: \"text_delta\"; contentIndex: number; delta: string; partial: AssistantMessage }\n\t| { type: \"text_end\"; contentIndex: number; content: string; partial: AssistantMessage }\n\t| { type: \"thinking_start\"; contentIndex: number; partial: AssistantMessage }\n\t| { type: \"thinking_delta\"; contentIndex: number; delta: string; partial: AssistantMessage }\n\t| { type: \"thinking_end\"; contentIndex: number; content: string; partial: AssistantMessage }\n\t| { type: \"toolcall_start\"; contentIndex: number; partial: AssistantMessage }\n\t| { type: \"toolcall_delta\"; contentIndex: number; delta: string; partial: AssistantMessage }\n\t| { type: \"toolcall_end\"; contentIndex: number; toolCall: ToolCall; partial: AssistantMessage; malformedArguments?: boolean }\n\t| { type: \"server_tool_use\"; contentIndex: number; partial: AssistantMessage }\n\t| { type: \"web_search_result\"; contentIndex: number; partial: AssistantMessage }\n\t| { type: \"done\"; reason: Extract<StopReason, \"stop\" | \"length\" | \"toolUse\" | \"pauseTurn\">; message: AssistantMessage }\n\t| { type: \"error\"; reason: Extract<StopReason, \"aborted\" | \"error\">; error: AssistantMessage };\n\n/**\n * Compatibility settings for OpenAI-compatible completions APIs.\n * Use this to override URL-based auto-detection for custom providers.\n */\nexport interface OpenAICompletionsCompat {\n\t/** Whether the provider supports the `store` field. Default: auto-detected from URL. */\n\tsupportsStore?: boolean;\n\t/** Whether the provider supports the `developer` role (vs `system`). Default: auto-detected from URL. */\n\tsupportsDeveloperRole?: boolean;\n\t/** Whether the provider supports `reasoning_effort`. Default: auto-detected from URL. */\n\tsupportsReasoningEffort?: boolean;\n\t/** Optional mapping from pi-ai reasoning levels to provider/model-specific `reasoning_effort` values. */\n\treasoningEffortMap?: Partial<Record<ThinkingLevel, string>>;\n\t/** Whether the provider supports `stream_options: { include_usage: true }` for token usage in streaming responses. Default: true. */\n\tsupportsUsageInStreaming?: boolean;\n\t/** Which field to use for max tokens. Default: auto-detected from URL. */\n\tmaxTokensField?: \"max_completion_tokens\" | \"max_tokens\";\n\t/** Whether tool results require the `name` field. Default: auto-detected from URL. */\n\trequiresToolResultName?: boolean;\n\t/** Whether a user message after tool results requires an assistant message in between. Default: auto-detected from URL. */\n\trequiresAssistantAfterToolResult?: boolean;\n\t/** Whether thinking blocks must be converted to text blocks with <thinking> delimiters. Default: auto-detected from URL. */\n\trequiresThinkingAsText?: boolean;\n\t/** Format for reasoning/thinking parameter. \"openai\" uses reasoning_effort, \"zai\" uses thinking: { type: \"enabled\" }, \"qwen\" uses enable_thinking: boolean. Default: \"openai\". */\n\tthinkingFormat?: \"openai\" | \"zai\" | \"qwen\";\n\t/** OpenRouter-specific routing preferences. Only used when baseUrl points to OpenRouter. */\n\topenRouterRouting?: OpenRouterRouting;\n\t/** Vercel AI Gateway routing preferences. Only used when baseUrl points to Vercel AI Gateway. */\n\tvercelGatewayRouting?: VercelGatewayRouting;\n\t/** Whether the provider supports the `strict` field in tool definitions. Default: true. */\n\tsupportsStrictMode?: boolean;\n}\n\n/** Compatibility settings for OpenAI Responses APIs. */\nexport interface OpenAIResponsesCompat {\n\t// Reserved for future use\n}\n\n/**\n * OpenRouter provider routing preferences.\n * Controls which upstream providers OpenRouter routes requests to.\n * @see https://openrouter.ai/docs/provider-routing\n */\nexport interface OpenRouterRouting {\n\t/** List of provider slugs to exclusively use for this request (e.g., [\"amazon-bedrock\", \"anthropic\"]). */\n\tonly?: string[];\n\t/** List of provider slugs to try in order (e.g., [\"anthropic\", \"openai\"]). */\n\torder?: string[];\n}\n\n/**\n * Vercel AI Gateway routing preferences.\n * Controls which upstream providers the gateway routes requests to.\n * @see https://vercel.com/docs/ai-gateway/models-and-providers/provider-options\n */\nexport interface VercelGatewayRouting {\n\t/** List of provider slugs to exclusively use for this request (e.g., [\"bedrock\", \"anthropic\"]). */\n\tonly?: string[];\n\t/** List of provider slugs to try in order (e.g., [\"anthropic\", \"openai\"]). */\n\torder?: string[];\n}\n\n/**\n * Provider-agnostic capability declarations for a model.\n *\n * These fields allow models to self-declare supported features so that call\n * sites can read from metadata rather than pattern-matching on model IDs or\n * provider names. Add fields here as new cross-provider capabilities emerge.\n */\nexport interface ModelCapabilities {\n\t/** Whether the model supports xhigh thinking level. */\n\tsupportsXhigh?: boolean;\n\t/**\n\t * Whether tool call IDs must be included and normalised in tool results for\n\t * this model. Relevant for models deployed cross-provider (e.g. Claude or\n\t * GPT variants via Google APIs) where the host API imposes stricter ID rules.\n\t */\n\trequiresToolCallId?: boolean;\n\t/** Whether OpenAI-style service tiers (priority/flex) apply to this model. */\n\tsupportsServiceTier?: boolean;\n\t/**\n\t * Approximate characters per token for this model.\n\t * Used as a fallback when an accurate tokenizer is unavailable.\n\t * If omitted, the provider-level default is used.\n\t */\n\tcharsPerToken?: number;\n}\n\n// Model interface for the unified model system\nexport interface Model<TApi extends Api> {\n\tid: string;\n\tname: string;\n\tapi: TApi;\n\tprovider: Provider;\n\tbaseUrl: string;\n\treasoning: boolean;\n\tinput: (\"text\" | \"image\")[];\n\tcost: {\n\t\tinput: number; // $/million tokens\n\t\toutput: number; // $/million tokens\n\t\tcacheRead: number; // $/million tokens\n\t\tcacheWrite: number; // $/million tokens\n\t};\n\tcontextWindow: number;\n\tmaxTokens: number;\n\theaders?: Record<string, string>;\n\t/** Compatibility overrides for OpenAI-compatible APIs. If not set, auto-detected from baseUrl. */\n\tcompat?: TApi extends \"openai-completions\"\n\t\t? OpenAICompletionsCompat\n\t\t: TApi extends \"openai-responses\"\n\t\t\t? OpenAIResponsesCompat\n\t\t\t: never;\n\t/**\n\t * Provider-agnostic capability declarations for this model.\n\t * Read these fields instead of pattern-matching on model IDs or provider names.\n\t */\n\tcapabilities?: ModelCapabilities;\n}\n"]}
|
|
@@ -397,8 +397,11 @@ function supportsAdaptiveThinking(modelId: string): boolean {
|
|
|
397
397
|
function mapThinkingLevelToEffort(
|
|
398
398
|
level: SimpleStreamOptions["reasoning"],
|
|
399
399
|
modelId: string,
|
|
400
|
-
): "low" | "medium" | "high" | "max" {
|
|
400
|
+
): "low" | "medium" | "high" | "max" | undefined {
|
|
401
401
|
switch (level) {
|
|
402
|
+
case "adaptive":
|
|
403
|
+
// No effort override — let Claude decide based on request complexity
|
|
404
|
+
return undefined;
|
|
402
405
|
case "minimal":
|
|
403
406
|
case "low":
|
|
404
407
|
return "low";
|
|
@@ -685,13 +688,17 @@ function buildAdditionalModelRequestFields(
|
|
|
685
688
|
}
|
|
686
689
|
|
|
687
690
|
if (model.id.includes("anthropic.claude") || model.id.includes("anthropic/claude")) {
|
|
691
|
+
const adaptiveEffort = supportsAdaptiveThinking(model.id)
|
|
692
|
+
? mapThinkingLevelToEffort(options.reasoning, model.id)
|
|
693
|
+
: undefined;
|
|
688
694
|
const result: Record<string, any> = supportsAdaptiveThinking(model.id)
|
|
689
695
|
? {
|
|
690
696
|
thinking: { type: "adaptive" },
|
|
691
|
-
output_config: { effort:
|
|
697
|
+
...(adaptiveEffort != null ? { output_config: { effort: adaptiveEffort } } : {}),
|
|
692
698
|
}
|
|
693
699
|
: (() => {
|
|
694
700
|
const defaultBudgets: Record<ThinkingLevel, number> = {
|
|
701
|
+
adaptive: 16384, // Fallback for old models that don't support adaptive
|
|
695
702
|
minimal: 1024,
|
|
696
703
|
low: 2048,
|
|
697
704
|
medium: 8192,
|
|
@@ -699,8 +706,8 @@ function buildAdditionalModelRequestFields(
|
|
|
699
706
|
xhigh: 16384, // Claude doesn't support xhigh, clamp to high
|
|
700
707
|
};
|
|
701
708
|
|
|
702
|
-
// Custom budgets override defaults (xhigh not in ThinkingBudgets, use high)
|
|
703
|
-
const level = options.reasoning === "xhigh" ? "high" : options.reasoning;
|
|
709
|
+
// Custom budgets override defaults (xhigh/adaptive not in ThinkingBudgets, use high)
|
|
710
|
+
const level = options.reasoning === "xhigh" || options.reasoning === "adaptive" ? "high" : options.reasoning;
|
|
704
711
|
const budget = options.thinkingBudgets?.[level] ?? defaultBudgets[options.reasoning];
|
|
705
712
|
|
|
706
713
|
return {
|
|
@@ -157,8 +157,11 @@ export function supportsAdaptiveThinking(modelId: string): boolean {
|
|
|
157
157
|
);
|
|
158
158
|
}
|
|
159
159
|
|
|
160
|
-
export function mapThinkingLevelToEffort(level: string | undefined, modelId: string): AnthropicEffort {
|
|
160
|
+
export function mapThinkingLevelToEffort(level: string | undefined, modelId: string): AnthropicEffort | undefined {
|
|
161
161
|
switch (level) {
|
|
162
|
+
case "adaptive":
|
|
163
|
+
// No effort override — let Claude decide based on request complexity
|
|
164
|
+
return undefined;
|
|
162
165
|
case "minimal":
|
|
163
166
|
return "low";
|
|
164
167
|
case "low":
|
|
@@ -463,7 +466,7 @@ export function buildParams(
|
|
|
463
466
|
if (options?.thinkingEnabled && model.reasoning) {
|
|
464
467
|
if (supportsAdaptiveThinking(model.id)) {
|
|
465
468
|
params.thinking = { type: "adaptive" };
|
|
466
|
-
if (options.effort) {
|
|
469
|
+
if (options.effort != null) {
|
|
467
470
|
params.output_config = { effort: options.effort };
|
|
468
471
|
}
|
|
469
472
|
} else {
|
|
@@ -23,7 +23,7 @@ import {
|
|
|
23
23
|
|
|
24
24
|
// Re-export types used by other modules
|
|
25
25
|
export type { AnthropicEffort, AnthropicOptions };
|
|
26
|
-
export { extractRetryAfterMs };
|
|
26
|
+
export { extractRetryAfterMs, supportsAdaptiveThinking };
|
|
27
27
|
|
|
28
28
|
let _AnthropicClass: typeof Anthropic | undefined;
|
|
29
29
|
async function getAnthropicClass(): Promise<typeof Anthropic> {
|
|
@@ -197,7 +197,7 @@ export const streamSimpleAnthropic: StreamFunction<"anthropic-messages", SimpleS
|
|
|
197
197
|
return streamAnthropic(model, context, { ...base, thinkingEnabled: false } satisfies AnthropicOptions);
|
|
198
198
|
}
|
|
199
199
|
|
|
200
|
-
// For Opus 4.6 and Sonnet 4.6: use adaptive thinking
|
|
200
|
+
// For Opus 4.6 and Sonnet 4.6: use adaptive thinking (effort may be undefined to let Claude decide)
|
|
201
201
|
// For older models: use budget-based thinking
|
|
202
202
|
if (supportsAdaptiveThinking(model.id)) {
|
|
203
203
|
const effort = mapThinkingLevelToEffort(options.reasoning, model.id);
|
|
@@ -118,7 +118,8 @@ export const streamSimpleAzureOpenAIResponses: StreamFunction<"azure-openai-resp
|
|
|
118
118
|
}
|
|
119
119
|
|
|
120
120
|
const base = buildBaseOptions(model, options, apiKey);
|
|
121
|
-
const
|
|
121
|
+
const rawReasoning = options?.reasoning === "adaptive" ? "high" : options?.reasoning;
|
|
122
|
+
const reasoningEffort = supportsXhigh(model) ? rawReasoning : clampReasoning(rawReasoning);
|
|
122
123
|
|
|
123
124
|
return streamAzureOpenAIResponses(model, context, {
|
|
124
125
|
...base,
|
|
@@ -949,7 +949,7 @@ function buildRequest(
|
|
|
949
949
|
};
|
|
950
950
|
}
|
|
951
951
|
|
|
952
|
-
type ClampedThinkingLevel = Exclude<ThinkingLevel, "xhigh">;
|
|
952
|
+
type ClampedThinkingLevel = Exclude<ThinkingLevel, "xhigh" | "adaptive">;
|
|
953
953
|
|
|
954
954
|
function getGeminiCliThinkingLevel(effort: ClampedThinkingLevel, modelId: string): GoogleThinkingLevel {
|
|
955
955
|
if (isGemini3ProModel(modelId)) {
|
|
@@ -971,5 +971,7 @@ function getGeminiCliThinkingLevel(effort: ClampedThinkingLevel, modelId: string
|
|
|
971
971
|
return "MEDIUM";
|
|
972
972
|
case "high":
|
|
973
973
|
return "HIGH";
|
|
974
|
+
default:
|
|
975
|
+
return "HIGH";
|
|
974
976
|
}
|
|
975
977
|
}
|
|
@@ -431,7 +431,7 @@ function buildParams(
|
|
|
431
431
|
return params;
|
|
432
432
|
}
|
|
433
433
|
|
|
434
|
-
type ClampedThinkingLevel = Exclude<PiThinkingLevel, "xhigh">;
|
|
434
|
+
type ClampedThinkingLevel = Exclude<PiThinkingLevel, "xhigh" | "adaptive">;
|
|
435
435
|
|
|
436
436
|
function isGemini3ProModel(model: Model<"google-generative-ai">): boolean {
|
|
437
437
|
return /gemini-3(?:\.\d+)?-pro/.test(model.id.toLowerCase());
|
|
@@ -464,6 +464,8 @@ function getGemini3ThinkingLevel(
|
|
|
464
464
|
return "MEDIUM";
|
|
465
465
|
case "high":
|
|
466
466
|
return "HIGH";
|
|
467
|
+
default:
|
|
468
|
+
return "HIGH";
|
|
467
469
|
}
|
|
468
470
|
}
|
|
469
471
|
|
|
@@ -398,7 +398,7 @@ function buildParams(
|
|
|
398
398
|
return params;
|
|
399
399
|
}
|
|
400
400
|
|
|
401
|
-
type ClampedThinkingLevel = Exclude<ThinkingLevel, "xhigh">;
|
|
401
|
+
type ClampedThinkingLevel = Exclude<ThinkingLevel, "xhigh" | "adaptive">;
|
|
402
402
|
|
|
403
403
|
function isGemini3ProModel(model: Model<"google-generative-ai">): boolean {
|
|
404
404
|
return /gemini-3(?:\.\d+)?-pro/.test(model.id.toLowerCase());
|
|
@@ -431,6 +431,8 @@ function getGemini3ThinkingLevel(
|
|
|
431
431
|
return "MEDIUM";
|
|
432
432
|
case "high":
|
|
433
433
|
return "HIGH";
|
|
434
|
+
default:
|
|
435
|
+
return "HIGH";
|
|
434
436
|
}
|
|
435
437
|
}
|
|
436
438
|
|
|
@@ -281,7 +281,8 @@ export const streamSimpleOpenAICodexResponses: StreamFunction<"openai-codex-resp
|
|
|
281
281
|
}
|
|
282
282
|
|
|
283
283
|
const base = buildBaseOptions(model, options, apiKey);
|
|
284
|
-
const
|
|
284
|
+
const rawReasoning = options?.reasoning === "adaptive" ? "high" : options?.reasoning;
|
|
285
|
+
const reasoningEffort = supportsXhigh(model) ? rawReasoning : clampReasoning(rawReasoning);
|
|
285
286
|
|
|
286
287
|
return streamOpenAICodexResponses(model, context, {
|
|
287
288
|
...base,
|
|
@@ -300,7 +300,8 @@ export const streamSimpleOpenAICompletions: StreamFunction<"openai-completions",
|
|
|
300
300
|
}
|
|
301
301
|
|
|
302
302
|
const base = buildBaseOptions(model, options, apiKey);
|
|
303
|
-
const
|
|
303
|
+
const rawReasoning = options?.reasoning === "adaptive" ? "high" : options?.reasoning;
|
|
304
|
+
const reasoningEffort = supportsXhigh(model) ? rawReasoning : clampReasoning(rawReasoning);
|
|
304
305
|
const toolChoice = (options as OpenAICompletionsOptions | undefined)?.toolChoice;
|
|
305
306
|
|
|
306
307
|
return streamOpenAICompletions(model, context, {
|
|
@@ -118,7 +118,8 @@ export const streamSimpleOpenAIResponses: StreamFunction<"openai-responses", Sim
|
|
|
118
118
|
}
|
|
119
119
|
|
|
120
120
|
const base = buildBaseOptions(model, options, apiKey);
|
|
121
|
-
const
|
|
121
|
+
const rawReasoning = options?.reasoning === "adaptive" ? "high" : options?.reasoning;
|
|
122
|
+
const reasoningEffort = supportsXhigh(model) ? rawReasoning : clampReasoning(rawReasoning);
|
|
122
123
|
|
|
123
124
|
return streamOpenAIResponses(model, context, {
|
|
124
125
|
...base,
|
|
@@ -15,8 +15,10 @@ export function buildBaseOptions(model: Model<Api>, options?: SimpleStreamOption
|
|
|
15
15
|
};
|
|
16
16
|
}
|
|
17
17
|
|
|
18
|
-
export function clampReasoning(effort: ThinkingLevel | undefined): Exclude<ThinkingLevel, "xhigh"> | undefined {
|
|
19
|
-
|
|
18
|
+
export function clampReasoning(effort: ThinkingLevel | undefined): Exclude<ThinkingLevel, "xhigh" | "adaptive"> | undefined {
|
|
19
|
+
if (effort === "xhigh") return "high";
|
|
20
|
+
if (effort === "adaptive") return "high";
|
|
21
|
+
return effort;
|
|
20
22
|
}
|
|
21
23
|
|
|
22
24
|
export function adjustMaxTokensForThinking(
|
|
@@ -34,7 +36,7 @@ export function adjustMaxTokensForThinking(
|
|
|
34
36
|
const budgets = { ...defaultBudgets, ...customBudgets };
|
|
35
37
|
|
|
36
38
|
const minOutputTokens = 1024;
|
|
37
|
-
const level = clampReasoning(reasoningLevel)
|
|
39
|
+
const level = clampReasoning(reasoningLevel) ?? "high";
|
|
38
40
|
let thinkingBudget = budgets[level]!;
|
|
39
41
|
const maxTokens = Math.min(baseMaxTokens + thinkingBudget, modelMaxTokens);
|
|
40
42
|
|
|
@@ -46,7 +46,7 @@ export type KnownProvider =
|
|
|
46
46
|
| "ollama-cloud";
|
|
47
47
|
export type Provider = KnownProvider | string;
|
|
48
48
|
|
|
49
|
-
export type ThinkingLevel = "minimal" | "low" | "medium" | "high" | "xhigh";
|
|
49
|
+
export type ThinkingLevel = "minimal" | "low" | "medium" | "high" | "xhigh" | "adaptive";
|
|
50
50
|
|
|
51
51
|
/** Token budgets for each thinking level (token-based providers only) */
|
|
52
52
|
export interface ThinkingBudgets {
|
|
@@ -4,7 +4,7 @@
|
|
|
4
4
|
import chalk from "chalk";
|
|
5
5
|
import { APP_NAME, CONFIG_DIR_NAME, ENV_AGENT_DIR } from "../config.js";
|
|
6
6
|
import { allTools } from "../core/tools/index.js";
|
|
7
|
-
const VALID_THINKING_LEVELS = ["off", "minimal", "low", "medium", "high", "xhigh"];
|
|
7
|
+
const VALID_THINKING_LEVELS = ["off", "minimal", "low", "medium", "high", "xhigh", "adaptive"];
|
|
8
8
|
export function isValidThinkingLevel(level) {
|
|
9
9
|
return VALID_THINKING_LEVELS.includes(level);
|
|
10
10
|
}
|