@dexto/core 1.6.0 → 1.6.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/agent/DextoAgent.cjs +79 -5
- package/dist/agent/DextoAgent.d.ts +24 -2
- package/dist/agent/DextoAgent.d.ts.map +1 -1
- package/dist/agent/DextoAgent.js +79 -5
- package/dist/agent/agent-options.d.ts +6 -1
- package/dist/agent/agent-options.d.ts.map +1 -1
- package/dist/agent/schemas.d.ts +18 -18
- package/dist/approval/manager.cjs +87 -27
- package/dist/approval/manager.d.ts +10 -1
- package/dist/approval/manager.d.ts.map +1 -1
- package/dist/approval/manager.js +87 -27
- package/dist/approval/schemas.cjs +22 -8
- package/dist/approval/schemas.d.ts +276 -102
- package/dist/approval/schemas.d.ts.map +1 -1
- package/dist/approval/schemas.js +22 -8
- package/dist/context/manager.cjs +2 -2
- package/dist/context/manager.d.ts +2 -1
- package/dist/context/manager.d.ts.map +1 -1
- package/dist/context/manager.js +2 -2
- package/dist/context/types.d.ts +3 -2
- package/dist/context/types.d.ts.map +1 -1
- package/dist/events/index.d.ts +26 -13
- package/dist/events/index.d.ts.map +1 -1
- package/dist/hooks/index.d.ts +1 -1
- package/dist/hooks/index.d.ts.map +1 -1
- package/dist/hooks/types.d.ts +1 -22
- package/dist/hooks/types.d.ts.map +1 -1
- package/dist/llm/executor/provider-options.cjs +223 -28
- package/dist/llm/executor/provider-options.d.ts +3 -37
- package/dist/llm/executor/provider-options.d.ts.map +1 -1
- package/dist/llm/executor/provider-options.js +227 -27
- package/dist/llm/executor/stream-processor.cjs +57 -34
- package/dist/llm/executor/stream-processor.d.ts +12 -4
- package/dist/llm/executor/stream-processor.d.ts.map +1 -1
- package/dist/llm/executor/stream-processor.js +55 -32
- package/dist/llm/executor/turn-executor.cjs +66 -44
- package/dist/llm/executor/turn-executor.d.ts +3 -3
- package/dist/llm/executor/turn-executor.d.ts.map +1 -1
- package/dist/llm/executor/turn-executor.js +56 -34
- package/dist/llm/formatters/vercel.cjs +15 -3
- package/dist/llm/formatters/vercel.d.ts +1 -0
- package/dist/llm/formatters/vercel.d.ts.map +1 -1
- package/dist/llm/formatters/vercel.js +15 -3
- package/dist/llm/index.cjs +8 -0
- package/dist/llm/index.d.ts +2 -1
- package/dist/llm/index.d.ts.map +1 -1
- package/dist/llm/index.js +7 -0
- package/dist/llm/providers/local/schemas.d.ts +2 -2
- package/dist/llm/providers/openrouter-model-registry.cjs +66 -11
- package/dist/llm/providers/openrouter-model-registry.d.ts +26 -0
- package/dist/llm/providers/openrouter-model-registry.d.ts.map +1 -1
- package/dist/llm/providers/openrouter-model-registry.js +65 -11
- package/dist/llm/reasoning/anthropic-betas.cjs +31 -0
- package/dist/llm/reasoning/anthropic-betas.d.ts +3 -0
- package/dist/llm/reasoning/anthropic-betas.d.ts.map +1 -0
- package/dist/llm/reasoning/anthropic-betas.js +7 -0
- package/dist/llm/reasoning/anthropic-thinking.cjs +79 -0
- package/dist/llm/reasoning/anthropic-thinking.d.ts +15 -0
- package/dist/llm/reasoning/anthropic-thinking.d.ts.map +1 -0
- package/dist/llm/reasoning/anthropic-thinking.js +52 -0
- package/dist/llm/reasoning/openai-reasoning-effort.cjs +86 -0
- package/dist/llm/reasoning/openai-reasoning-effort.d.ts +5 -0
- package/dist/llm/reasoning/openai-reasoning-effort.d.ts.map +1 -0
- package/dist/llm/reasoning/openai-reasoning-effort.js +61 -0
- package/dist/llm/reasoning/profile.cjs +113 -0
- package/dist/llm/reasoning/profile.d.ts +13 -0
- package/dist/llm/reasoning/profile.d.ts.map +1 -0
- package/dist/llm/reasoning/profile.js +92 -0
- package/dist/llm/reasoning/profiles/anthropic.cjs +61 -0
- package/dist/llm/reasoning/profiles/anthropic.d.ts +8 -0
- package/dist/llm/reasoning/profiles/anthropic.d.ts.map +1 -0
- package/dist/llm/reasoning/profiles/anthropic.js +45 -0
- package/dist/llm/reasoning/profiles/bedrock.cjs +54 -0
- package/dist/llm/reasoning/profiles/bedrock.d.ts +3 -0
- package/dist/llm/reasoning/profiles/bedrock.d.ts.map +1 -0
- package/dist/llm/reasoning/profiles/bedrock.js +36 -0
- package/dist/llm/reasoning/profiles/google.cjs +45 -0
- package/dist/llm/reasoning/profiles/google.d.ts +9 -0
- package/dist/llm/reasoning/profiles/google.d.ts.map +1 -0
- package/dist/llm/reasoning/profiles/google.js +21 -0
- package/dist/llm/reasoning/profiles/openai-compatible.cjs +39 -0
- package/dist/llm/reasoning/profiles/openai-compatible.d.ts +3 -0
- package/dist/llm/reasoning/profiles/openai-compatible.d.ts.map +1 -0
- package/dist/llm/reasoning/profiles/openai-compatible.js +16 -0
- package/dist/llm/reasoning/profiles/openai.cjs +41 -0
- package/dist/llm/reasoning/profiles/openai.d.ts +3 -0
- package/dist/llm/reasoning/profiles/openai.d.ts.map +1 -0
- package/dist/llm/reasoning/profiles/openai.js +18 -0
- package/dist/llm/reasoning/profiles/openrouter.cjs +83 -0
- package/dist/llm/reasoning/profiles/openrouter.d.ts +10 -0
- package/dist/llm/reasoning/profiles/openrouter.d.ts.map +1 -0
- package/dist/llm/reasoning/profiles/openrouter.js +59 -0
- package/dist/llm/reasoning/profiles/shared.cjs +80 -0
- package/dist/llm/reasoning/profiles/shared.d.ts +25 -0
- package/dist/llm/reasoning/profiles/shared.d.ts.map +1 -0
- package/dist/llm/reasoning/profiles/shared.js +53 -0
- package/dist/llm/reasoning/profiles/vertex.cjs +46 -0
- package/dist/llm/reasoning/profiles/vertex.d.ts +3 -0
- package/dist/llm/reasoning/profiles/vertex.d.ts.map +1 -0
- package/dist/llm/reasoning/profiles/vertex.js +23 -0
- package/dist/llm/registry/auto-update.cjs +18 -0
- package/dist/llm/registry/auto-update.d.ts.map +1 -1
- package/dist/llm/registry/auto-update.js +18 -0
- package/dist/llm/registry/index.cjs +126 -26
- package/dist/llm/registry/index.d.ts +48 -4
- package/dist/llm/registry/index.d.ts.map +1 -1
- package/dist/llm/registry/index.js +136 -28
- package/dist/llm/registry/models.generated.cjs +5198 -59
- package/dist/llm/registry/models.generated.d.ts +1893 -76
- package/dist/llm/registry/models.generated.d.ts.map +1 -1
- package/dist/llm/registry/models.generated.js +5196 -58
- package/dist/llm/registry/sync.cjs +72 -1
- package/dist/llm/registry/sync.d.ts +21 -1
- package/dist/llm/registry/sync.d.ts.map +1 -1
- package/dist/llm/registry/sync.js +72 -1
- package/dist/llm/resolver.cjs +13 -1
- package/dist/llm/resolver.d.ts.map +1 -1
- package/dist/llm/resolver.js +13 -1
- package/dist/llm/schemas.cjs +75 -14
- package/dist/llm/schemas.d.ts +84 -27
- package/dist/llm/schemas.d.ts.map +1 -1
- package/dist/llm/schemas.js +75 -14
- package/dist/llm/services/factory.cjs +55 -8
- package/dist/llm/services/factory.d.ts +1 -1
- package/dist/llm/services/factory.d.ts.map +1 -1
- package/dist/llm/services/factory.js +58 -8
- package/dist/llm/services/vercel.cjs +2 -2
- package/dist/llm/services/vercel.js +2 -2
- package/dist/llm/types.d.ts +9 -0
- package/dist/llm/types.d.ts.map +1 -1
- package/dist/logger/default-logger-factory.d.ts +12 -12
- package/dist/logger/v2/dexto-logger.cjs +35 -0
- package/dist/logger/v2/dexto-logger.d.ts +19 -0
- package/dist/logger/v2/dexto-logger.d.ts.map +1 -1
- package/dist/logger/v2/dexto-logger.js +35 -0
- package/dist/logger/v2/schemas.d.ts +6 -6
- package/dist/logger/v2/test-utils.cjs +2 -0
- package/dist/logger/v2/test-utils.d.ts.map +1 -1
- package/dist/logger/v2/test-utils.js +2 -0
- package/dist/logger/v2/types.d.ts +14 -1
- package/dist/logger/v2/types.d.ts.map +1 -1
- package/dist/mcp/schemas.d.ts +15 -15
- package/dist/memory/schemas.d.ts +4 -4
- package/dist/prompts/index.cjs +9 -0
- package/dist/prompts/index.d.ts +1 -0
- package/dist/prompts/index.d.ts.map +1 -1
- package/dist/prompts/index.js +10 -0
- package/dist/prompts/prompt-manager.cjs +2 -0
- package/dist/prompts/prompt-manager.d.ts.map +1 -1
- package/dist/prompts/prompt-manager.js +2 -0
- package/dist/prompts/providers/config-prompt-provider.cjs +11 -1
- package/dist/prompts/providers/config-prompt-provider.d.ts.map +1 -1
- package/dist/prompts/providers/config-prompt-provider.js +11 -1
- package/dist/prompts/schemas.cjs +2 -2
- package/dist/prompts/schemas.d.ts +7 -7
- package/dist/prompts/schemas.js +2 -2
- package/dist/prompts/types.d.ts +6 -2
- package/dist/prompts/types.d.ts.map +1 -1
- package/dist/systemPrompt/in-built-prompts.cjs +5 -5
- package/dist/systemPrompt/in-built-prompts.d.ts +1 -1
- package/dist/systemPrompt/in-built-prompts.d.ts.map +1 -1
- package/dist/systemPrompt/in-built-prompts.js +5 -5
- package/dist/systemPrompt/schemas.d.ts +5 -5
- package/dist/systemPrompt/types.d.ts +11 -0
- package/dist/systemPrompt/types.d.ts.map +1 -1
- package/dist/tools/display-types.d.ts +10 -0
- package/dist/tools/display-types.d.ts.map +1 -1
- package/dist/tools/index.cjs +3 -1
- package/dist/tools/index.d.ts +1 -0
- package/dist/tools/index.d.ts.map +1 -1
- package/dist/tools/index.js +1 -0
- package/dist/tools/presentation.cjs +49 -0
- package/dist/tools/presentation.d.ts +11 -0
- package/dist/tools/presentation.d.ts.map +1 -0
- package/dist/tools/presentation.js +24 -0
- package/dist/tools/tool-manager.cjs +389 -136
- package/dist/tools/tool-manager.d.ts +36 -24
- package/dist/tools/tool-manager.d.ts.map +1 -1
- package/dist/tools/tool-manager.js +389 -136
- package/dist/tools/types.d.ts +134 -55
- package/dist/tools/types.d.ts.map +1 -1
- package/dist/utils/path.cjs +10 -1
- package/dist/utils/path.d.ts +5 -2
- package/dist/utils/path.d.ts.map +1 -1
- package/dist/utils/path.js +10 -1
- package/dist/utils/service-initializer.d.ts +1 -0
- package/dist/utils/service-initializer.d.ts.map +1 -1
- package/package.json +7 -5
|
@@ -19,69 +19,264 @@ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: tru
|
|
|
19
19
|
var provider_options_exports = {};
|
|
20
20
|
__export(provider_options_exports, {
|
|
21
21
|
buildProviderOptions: () => buildProviderOptions,
|
|
22
|
-
|
|
22
|
+
getEffectiveReasoningBudgetTokens: () => getEffectiveReasoningBudgetTokens
|
|
23
23
|
});
|
|
24
24
|
module.exports = __toCommonJS(provider_options_exports);
|
|
25
25
|
var import_registry = require("../registry/index.js");
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
26
|
+
var import_anthropic_thinking = require("../reasoning/anthropic-thinking.js");
|
|
27
|
+
var import_anthropic_betas = require("../reasoning/anthropic-betas.js");
|
|
28
|
+
var import_profile = require("../reasoning/profile.js");
|
|
29
|
+
var import_openrouter = require("../reasoning/profiles/openrouter.js");
|
|
30
|
+
var import_openai_reasoning_effort = require("../reasoning/openai-reasoning-effort.js");
|
|
31
|
+
const ANTHROPIC_MIN_THINKING_BUDGET_TOKENS = 1024;
|
|
32
|
+
const ANTHROPIC_DEFAULT_BUDGET_TOKENS = 2048;
|
|
33
|
+
const GOOGLE_DEFAULT_BUDGET_TOKENS = 2048;
|
|
34
|
+
const BEDROCK_DEFAULT_BUDGET_TOKENS = 2048;
|
|
35
|
+
const ANTHROPIC_CACHE_CONTROL = { type: "ephemeral" };
|
|
36
|
+
function coerceBudgetTokens(tokens, minimum) {
|
|
37
|
+
if (tokens === void 0) return void 0;
|
|
38
|
+
if (!Number.isFinite(tokens)) return void 0;
|
|
39
|
+
return Math.max(minimum, Math.floor(tokens));
|
|
40
|
+
}
|
|
41
|
+
function toOpenAIReasoningEffort(reasoningVariant) {
|
|
42
|
+
return reasoningVariant === "none" || reasoningVariant === "minimal" || reasoningVariant === "low" || reasoningVariant === "medium" || reasoningVariant === "high" || reasoningVariant === "xhigh" ? reasoningVariant : void 0;
|
|
43
|
+
}
|
|
44
|
+
function toOpenAICompatibleReasoningEffort(reasoningVariant) {
|
|
45
|
+
return reasoningVariant === "none" || reasoningVariant === "low" || reasoningVariant === "medium" || reasoningVariant === "high" ? reasoningVariant : void 0;
|
|
46
|
+
}
|
|
47
|
+
function getSelectedReasoningVariant(config) {
|
|
48
|
+
const profile = (0, import_profile.getReasoningProfile)(config.provider, config.model);
|
|
49
|
+
const requested = config.reasoning?.variant;
|
|
50
|
+
if (requested !== void 0) {
|
|
51
|
+
const supported = profile.variants.some((entry) => entry.id === requested);
|
|
52
|
+
return {
|
|
53
|
+
reasoningVariant: supported ? requested : void 0,
|
|
54
|
+
hasInvalidRequestedReasoningVariant: !supported
|
|
55
|
+
};
|
|
56
|
+
}
|
|
57
|
+
return {
|
|
58
|
+
reasoningVariant: profile.defaultVariant,
|
|
59
|
+
hasInvalidRequestedReasoningVariant: false
|
|
60
|
+
};
|
|
61
|
+
}
|
|
62
|
+
function buildAnthropicProviderOptions(config) {
|
|
63
|
+
const { model, reasoningVariant, budgetTokens, capable } = config;
|
|
64
|
+
const adaptiveThinking = (0, import_anthropic_thinking.isAnthropicAdaptiveThinkingModel)(model);
|
|
65
|
+
if (adaptiveThinking) {
|
|
66
|
+
if (reasoningVariant === "disabled") {
|
|
67
|
+
return {
|
|
68
|
+
anthropic: {
|
|
69
|
+
cacheControl: ANTHROPIC_CACHE_CONTROL,
|
|
70
|
+
sendReasoning: false,
|
|
71
|
+
thinking: { type: "disabled" }
|
|
72
|
+
}
|
|
73
|
+
};
|
|
74
|
+
}
|
|
75
|
+
const effort = reasoningVariant === "low" || reasoningVariant === "medium" || reasoningVariant === "high" || reasoningVariant === "max" ? reasoningVariant : void 0;
|
|
76
|
+
return {
|
|
77
|
+
anthropic: {
|
|
78
|
+
cacheControl: ANTHROPIC_CACHE_CONTROL,
|
|
79
|
+
sendReasoning: true,
|
|
80
|
+
thinking: { type: "adaptive" },
|
|
81
|
+
...effort !== void 0 && { effort }
|
|
82
|
+
}
|
|
83
|
+
};
|
|
84
|
+
}
|
|
85
|
+
if (!capable) {
|
|
30
86
|
return {
|
|
31
87
|
anthropic: {
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
sendReasoning: true
|
|
88
|
+
cacheControl: ANTHROPIC_CACHE_CONTROL,
|
|
89
|
+
sendReasoning: false,
|
|
90
|
+
...reasoningVariant === "disabled" ? { thinking: { type: "disabled" } } : {}
|
|
36
91
|
}
|
|
37
92
|
};
|
|
38
93
|
}
|
|
39
|
-
if (
|
|
94
|
+
if (reasoningVariant === "disabled") {
|
|
40
95
|
return {
|
|
41
|
-
|
|
42
|
-
cacheControl:
|
|
43
|
-
sendReasoning:
|
|
96
|
+
anthropic: {
|
|
97
|
+
cacheControl: ANTHROPIC_CACHE_CONTROL,
|
|
98
|
+
sendReasoning: false,
|
|
99
|
+
thinking: { type: "disabled" }
|
|
44
100
|
}
|
|
45
101
|
};
|
|
46
102
|
}
|
|
47
|
-
|
|
103
|
+
const effectiveBudgetTokens = coerceBudgetTokens(
|
|
104
|
+
budgetTokens ?? ANTHROPIC_DEFAULT_BUDGET_TOKENS,
|
|
105
|
+
ANTHROPIC_MIN_THINKING_BUDGET_TOKENS
|
|
106
|
+
);
|
|
107
|
+
return {
|
|
108
|
+
anthropic: {
|
|
109
|
+
cacheControl: ANTHROPIC_CACHE_CONTROL,
|
|
110
|
+
sendReasoning: true,
|
|
111
|
+
...effectiveBudgetTokens !== void 0 ? { thinking: { type: "enabled", budgetTokens: effectiveBudgetTokens } } : {}
|
|
112
|
+
}
|
|
113
|
+
};
|
|
114
|
+
}
|
|
115
|
+
function buildOpenRouterProviderOptions(config) {
|
|
116
|
+
const { provider, model, reasoningVariant, budgetTokens } = config;
|
|
117
|
+
const profile = (0, import_profile.getReasoningProfile)(provider, model);
|
|
118
|
+
if (!profile.capable) {
|
|
119
|
+
return void 0;
|
|
120
|
+
}
|
|
121
|
+
if (reasoningVariant === "disabled") {
|
|
122
|
+
return { openrouter: { include_reasoning: false } };
|
|
123
|
+
}
|
|
124
|
+
if (budgetTokens !== void 0) {
|
|
48
125
|
return {
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
126
|
+
openrouter: {
|
|
127
|
+
include_reasoning: true,
|
|
128
|
+
reasoning: { enabled: true, max_tokens: budgetTokens }
|
|
52
129
|
}
|
|
53
130
|
};
|
|
54
131
|
}
|
|
132
|
+
if (profile.paradigm === "budget") {
|
|
133
|
+
if (reasoningVariant === void 0 || reasoningVariant === "enabled") {
|
|
134
|
+
return {
|
|
135
|
+
openrouter: {
|
|
136
|
+
include_reasoning: true
|
|
137
|
+
}
|
|
138
|
+
};
|
|
139
|
+
}
|
|
140
|
+
return void 0;
|
|
141
|
+
}
|
|
142
|
+
const explicitEffort = toOpenAIReasoningEffort(reasoningVariant);
|
|
143
|
+
const effort = explicitEffort ?? (profile.paradigm === "adaptive-effort" && reasoningVariant === "max" ? "xhigh" : void 0);
|
|
144
|
+
return {
|
|
145
|
+
openrouter: {
|
|
146
|
+
include_reasoning: true,
|
|
147
|
+
...effort !== void 0 ? { reasoning: { enabled: true, effort } } : {}
|
|
148
|
+
}
|
|
149
|
+
};
|
|
150
|
+
}
|
|
151
|
+
function asRecord(value) {
|
|
152
|
+
if (!value || typeof value !== "object" || Array.isArray(value)) return void 0;
|
|
153
|
+
return value;
|
|
154
|
+
}
|
|
155
|
+
function getEffectiveReasoningBudgetTokens(providerOptions) {
|
|
156
|
+
if (providerOptions === void 0) return void 0;
|
|
157
|
+
const anthropic = asRecord(providerOptions["anthropic"]);
|
|
158
|
+
const thinking = asRecord(anthropic?.["thinking"]);
|
|
159
|
+
if (thinking?.["type"] === "enabled" && typeof thinking["budgetTokens"] === "number") {
|
|
160
|
+
return thinking["budgetTokens"];
|
|
161
|
+
}
|
|
162
|
+
const google = asRecord(providerOptions["google"]);
|
|
163
|
+
const thinkingConfig = asRecord(google?.["thinkingConfig"]);
|
|
164
|
+
if (typeof thinkingConfig?.["thinkingBudget"] === "number") {
|
|
165
|
+
return thinkingConfig["thinkingBudget"];
|
|
166
|
+
}
|
|
167
|
+
const bedrock = asRecord(providerOptions["bedrock"]);
|
|
168
|
+
const reasoningConfig = asRecord(bedrock?.["reasoningConfig"]);
|
|
169
|
+
if (typeof reasoningConfig?.["budgetTokens"] === "number") {
|
|
170
|
+
return reasoningConfig["budgetTokens"];
|
|
171
|
+
}
|
|
172
|
+
const openrouter = asRecord(providerOptions["openrouter"]);
|
|
173
|
+
const reasoning = asRecord(openrouter?.["reasoning"]);
|
|
174
|
+
if (typeof reasoning?.["max_tokens"] === "number") {
|
|
175
|
+
return reasoning["max_tokens"];
|
|
176
|
+
}
|
|
177
|
+
return void 0;
|
|
178
|
+
}
|
|
179
|
+
function buildProviderOptions(config) {
|
|
180
|
+
const { provider, model, reasoning } = config;
|
|
181
|
+
const modelLower = model.toLowerCase();
|
|
182
|
+
const { reasoningVariant, hasInvalidRequestedReasoningVariant } = getSelectedReasoningVariant(config);
|
|
183
|
+
const budgetTokens = reasoning?.budgetTokens;
|
|
184
|
+
if (hasInvalidRequestedReasoningVariant) {
|
|
185
|
+
return void 0;
|
|
186
|
+
}
|
|
187
|
+
if (provider === "anthropic") {
|
|
188
|
+
const capable = (0, import_registry.isReasoningCapableModel)(model, "anthropic");
|
|
189
|
+
return buildAnthropicProviderOptions({ model, reasoningVariant, budgetTokens, capable });
|
|
190
|
+
}
|
|
191
|
+
if (provider === "bedrock") {
|
|
192
|
+
const capable = (0, import_registry.isReasoningCapableModel)(model, "bedrock");
|
|
193
|
+
if (!capable) {
|
|
194
|
+
return { bedrock: {} };
|
|
195
|
+
}
|
|
196
|
+
const isAnthropicModel = modelLower.includes("anthropic");
|
|
197
|
+
const isNovaModel = modelLower.includes("nova");
|
|
198
|
+
if (!isAnthropicModel && !isNovaModel) {
|
|
199
|
+
return { bedrock: {} };
|
|
200
|
+
}
|
|
201
|
+
const bedrock = {};
|
|
202
|
+
if (reasoningVariant === "disabled") {
|
|
203
|
+
bedrock["reasoningConfig"] = { type: "disabled" };
|
|
204
|
+
return { bedrock };
|
|
205
|
+
}
|
|
206
|
+
if (isAnthropicModel) {
|
|
207
|
+
const effectiveBudgetTokens = coerceBudgetTokens(
|
|
208
|
+
budgetTokens ?? BEDROCK_DEFAULT_BUDGET_TOKENS,
|
|
209
|
+
1
|
|
210
|
+
);
|
|
211
|
+
if (effectiveBudgetTokens === void 0) {
|
|
212
|
+
return { bedrock: {} };
|
|
213
|
+
}
|
|
214
|
+
bedrock["reasoningConfig"] = { type: "enabled", budgetTokens: effectiveBudgetTokens };
|
|
215
|
+
if ((0, import_anthropic_thinking.supportsAnthropicInterleavedThinking)(model)) {
|
|
216
|
+
bedrock["anthropicBeta"] = [import_anthropic_betas.ANTHROPIC_INTERLEAVED_THINKING_BETA];
|
|
217
|
+
}
|
|
218
|
+
return { bedrock };
|
|
219
|
+
}
|
|
220
|
+
const maxReasoningEffort = reasoningVariant === "low" || reasoningVariant === "medium" || reasoningVariant === "high" ? reasoningVariant : void 0;
|
|
221
|
+
if (maxReasoningEffort !== void 0) {
|
|
222
|
+
bedrock["reasoningConfig"] = { type: "enabled", maxReasoningEffort };
|
|
223
|
+
}
|
|
224
|
+
return { bedrock };
|
|
225
|
+
}
|
|
226
|
+
if (provider === "vertex" && modelLower.includes("claude")) {
|
|
227
|
+
const capable = (0, import_registry.isReasoningCapableModel)(model, "vertex");
|
|
228
|
+
return buildAnthropicProviderOptions({ model, reasoningVariant, budgetTokens, capable });
|
|
229
|
+
}
|
|
55
230
|
if (provider === "google" || provider === "vertex" && !modelLower.includes("claude")) {
|
|
231
|
+
const profile = (0, import_profile.getReasoningProfile)(provider, model);
|
|
232
|
+
const includeThoughts = profile.capable && reasoningVariant !== "disabled";
|
|
233
|
+
const isThinkingLevel = profile.paradigm === "thinking-level";
|
|
234
|
+
const thinkingLevel = includeThoughts && isThinkingLevel && (reasoningVariant === "minimal" || reasoningVariant === "low" || reasoningVariant === "medium" || reasoningVariant === "high") ? reasoningVariant : void 0;
|
|
235
|
+
const thinkingBudgetTokens = coerceBudgetTokens(
|
|
236
|
+
budgetTokens ?? GOOGLE_DEFAULT_BUDGET_TOKENS,
|
|
237
|
+
1
|
|
238
|
+
);
|
|
56
239
|
return {
|
|
57
240
|
google: {
|
|
58
241
|
thinkingConfig: {
|
|
59
|
-
|
|
60
|
-
includeThoughts
|
|
242
|
+
includeThoughts,
|
|
243
|
+
...includeThoughts && isThinkingLevel && thinkingLevel !== void 0 && {
|
|
244
|
+
thinkingLevel
|
|
245
|
+
},
|
|
246
|
+
...includeThoughts && profile.paradigm === "budget" && thinkingBudgetTokens !== void 0 && {
|
|
247
|
+
thinkingBudget: thinkingBudgetTokens
|
|
248
|
+
}
|
|
61
249
|
}
|
|
62
250
|
}
|
|
63
251
|
};
|
|
64
252
|
}
|
|
65
253
|
if (provider === "openai") {
|
|
66
|
-
const
|
|
67
|
-
if (
|
|
254
|
+
const effortCandidate = toOpenAIReasoningEffort(reasoningVariant);
|
|
255
|
+
if (effortCandidate && (0, import_openai_reasoning_effort.supportsOpenAIReasoningEffort)(model, effortCandidate)) {
|
|
68
256
|
return {
|
|
69
257
|
openai: {
|
|
70
|
-
reasoningEffort:
|
|
258
|
+
reasoningEffort: effortCandidate,
|
|
259
|
+
...effortCandidate !== "none" && { reasoningSummary: "auto" }
|
|
71
260
|
}
|
|
72
261
|
};
|
|
73
262
|
}
|
|
74
263
|
}
|
|
75
|
-
|
|
76
|
-
}
|
|
77
|
-
|
|
78
|
-
if (
|
|
79
|
-
|
|
264
|
+
if ((0, import_openrouter.isOpenRouterGatewayProvider)(provider)) {
|
|
265
|
+
return buildOpenRouterProviderOptions({ provider, model, reasoningVariant, budgetTokens });
|
|
266
|
+
}
|
|
267
|
+
if (provider === "openai-compatible") {
|
|
268
|
+
const profile = (0, import_profile.getReasoningProfile)(provider, model);
|
|
269
|
+
if (!profile.capable) return void 0;
|
|
270
|
+
const reasoningEffort = toOpenAICompatibleReasoningEffort(reasoningVariant);
|
|
271
|
+
if (reasoningEffort === void 0) return void 0;
|
|
272
|
+
return {
|
|
273
|
+
openaiCompatible: { reasoningEffort }
|
|
274
|
+
};
|
|
80
275
|
}
|
|
81
276
|
return void 0;
|
|
82
277
|
}
|
|
83
278
|
// Annotate the CommonJS export names for ESM import in node:
|
|
84
279
|
0 && (module.exports = {
|
|
85
280
|
buildProviderOptions,
|
|
86
|
-
|
|
281
|
+
getEffectiveReasoningBudgetTokens
|
|
87
282
|
});
|
|
@@ -1,49 +1,15 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* Provider-specific options builder for Vercel AI SDK's streamText/generateText.
|
|
3
|
-
*
|
|
4
|
-
* Centralizes provider-specific configuration that requires explicit opt-in:
|
|
5
|
-
* - Anthropic: cacheControl for prompt caching, sendReasoning for extended thinking
|
|
6
|
-
* - Bedrock/Vertex Claude: Same as Anthropic (Claude models on these platforms)
|
|
7
|
-
* - Google: thinkingConfig for Gemini thinking models
|
|
8
|
-
* - OpenAI: reasoningEffort for o1/o3/codex/gpt-5 models
|
|
9
|
-
*
|
|
10
|
-
* Caching notes:
|
|
11
|
-
* - Anthropic: Requires explicit cacheControl option (we enable it)
|
|
12
|
-
* - OpenAI: Automatic for prompts ≥1024 tokens (no config needed)
|
|
13
|
-
* - Google: Implicit caching automatic for Gemini 2.5+ (≥1024 tokens for Flash,
|
|
14
|
-
* ≥2048 for Pro). Explicit caching requires pre-created cachedContent IDs.
|
|
15
|
-
* All providers return cached token counts in the response (cachedInputTokens).
|
|
16
3
|
*/
|
|
17
|
-
import type { LLMProvider } from '../types.js';
|
|
18
|
-
export type ReasoningEffort = 'none' | 'minimal' | 'low' | 'medium' | 'high' | 'xhigh';
|
|
4
|
+
import type { LLMProvider, LLMReasoningConfig } from '../types.js';
|
|
19
5
|
export interface ProviderOptionsConfig {
|
|
20
6
|
provider: LLMProvider;
|
|
21
7
|
model: string;
|
|
22
|
-
|
|
8
|
+
reasoning?: LLMReasoningConfig | undefined;
|
|
23
9
|
}
|
|
10
|
+
export declare function getEffectiveReasoningBudgetTokens(providerOptions: Record<string, Record<string, unknown>> | undefined): number | undefined;
|
|
24
11
|
/**
|
|
25
12
|
* Build provider-specific options for streamText/generateText.
|
|
26
|
-
*
|
|
27
|
-
* @param config Provider, model, and optional reasoning effort configuration
|
|
28
|
-
* @returns Provider options object or undefined if no special options needed
|
|
29
13
|
*/
|
|
30
14
|
export declare function buildProviderOptions(config: ProviderOptionsConfig): Record<string, Record<string, unknown>> | undefined;
|
|
31
|
-
/**
|
|
32
|
-
* Determine the default reasoning effort for OpenAI models.
|
|
33
|
-
*
|
|
34
|
-
* OpenAI reasoning effort levels (from lowest to highest):
|
|
35
|
-
* - 'none': No reasoning, fastest responses
|
|
36
|
-
* - 'low': Minimal reasoning, fast responses
|
|
37
|
-
* - 'medium': Balanced reasoning (OpenAI's recommended daily driver)
|
|
38
|
-
* - 'high': Thorough reasoning for complex tasks
|
|
39
|
-
* - 'xhigh': Extra high reasoning for quality-critical, non-latency-sensitive tasks
|
|
40
|
-
*
|
|
41
|
-
* Default strategy:
|
|
42
|
-
* - Reasoning-capable models (codex, o1, o3, gpt-5): 'medium' - OpenAI's recommended default
|
|
43
|
-
* - Other models: undefined (no reasoning effort needed)
|
|
44
|
-
*
|
|
45
|
-
* @param model The model name
|
|
46
|
-
* @returns Reasoning effort level or undefined if not applicable
|
|
47
|
-
*/
|
|
48
|
-
export declare function getDefaultReasoningEffort(model: string): Exclude<ReasoningEffort, 'none'> | undefined;
|
|
49
15
|
//# sourceMappingURL=provider-options.d.ts.map
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"provider-options.d.ts","sourceRoot":"","sources":["../../../src/llm/executor/provider-options.ts"],"names":[],"mappings":"AAAA
|
|
1
|
+
{"version":3,"file":"provider-options.d.ts","sourceRoot":"","sources":["../../../src/llm/executor/provider-options.ts"],"names":[],"mappings":"AAAA;;GAEG;AAEH,OAAO,KAAK,EAAE,WAAW,EAAE,kBAAkB,EAAE,MAAM,aAAa,CAAC;AAcnE,MAAM,WAAW,qBAAqB;IAClC,QAAQ,EAAE,WAAW,CAAC;IACtB,KAAK,EAAE,MAAM,CAAC;IACd,SAAS,CAAC,EAAE,kBAAkB,GAAG,SAAS,CAAC;CAC9C;AA4LD,wBAAgB,iCAAiC,CAC7C,eAAe,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC,GAAG,SAAS,GACrE,MAAM,GAAG,SAAS,CA4BpB;AAED;;GAEG;AACH,wBAAgB,oBAAoB,CAChC,MAAM,EAAE,qBAAqB,GAC9B,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC,GAAG,SAAS,CA2IrD"}
|
|
@@ -1,63 +1,263 @@
|
|
|
1
1
|
import "../../chunk-PTJYTZNU.js";
|
|
2
2
|
import { isReasoningCapableModel } from "../registry/index.js";
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
3
|
+
import {
|
|
4
|
+
isAnthropicAdaptiveThinkingModel,
|
|
5
|
+
supportsAnthropicInterleavedThinking
|
|
6
|
+
} from "../reasoning/anthropic-thinking.js";
|
|
7
|
+
import { ANTHROPIC_INTERLEAVED_THINKING_BETA } from "../reasoning/anthropic-betas.js";
|
|
8
|
+
import { getReasoningProfile } from "../reasoning/profile.js";
|
|
9
|
+
import { isOpenRouterGatewayProvider } from "../reasoning/profiles/openrouter.js";
|
|
10
|
+
import {
|
|
11
|
+
supportsOpenAIReasoningEffort
|
|
12
|
+
} from "../reasoning/openai-reasoning-effort.js";
|
|
13
|
+
const ANTHROPIC_MIN_THINKING_BUDGET_TOKENS = 1024;
|
|
14
|
+
const ANTHROPIC_DEFAULT_BUDGET_TOKENS = 2048;
|
|
15
|
+
const GOOGLE_DEFAULT_BUDGET_TOKENS = 2048;
|
|
16
|
+
const BEDROCK_DEFAULT_BUDGET_TOKENS = 2048;
|
|
17
|
+
const ANTHROPIC_CACHE_CONTROL = { type: "ephemeral" };
|
|
18
|
+
function coerceBudgetTokens(tokens, minimum) {
|
|
19
|
+
if (tokens === void 0) return void 0;
|
|
20
|
+
if (!Number.isFinite(tokens)) return void 0;
|
|
21
|
+
return Math.max(minimum, Math.floor(tokens));
|
|
22
|
+
}
|
|
23
|
+
function toOpenAIReasoningEffort(reasoningVariant) {
|
|
24
|
+
return reasoningVariant === "none" || reasoningVariant === "minimal" || reasoningVariant === "low" || reasoningVariant === "medium" || reasoningVariant === "high" || reasoningVariant === "xhigh" ? reasoningVariant : void 0;
|
|
25
|
+
}
|
|
26
|
+
function toOpenAICompatibleReasoningEffort(reasoningVariant) {
|
|
27
|
+
return reasoningVariant === "none" || reasoningVariant === "low" || reasoningVariant === "medium" || reasoningVariant === "high" ? reasoningVariant : void 0;
|
|
28
|
+
}
|
|
29
|
+
function getSelectedReasoningVariant(config) {
|
|
30
|
+
const profile = getReasoningProfile(config.provider, config.model);
|
|
31
|
+
const requested = config.reasoning?.variant;
|
|
32
|
+
if (requested !== void 0) {
|
|
33
|
+
const supported = profile.variants.some((entry) => entry.id === requested);
|
|
34
|
+
return {
|
|
35
|
+
reasoningVariant: supported ? requested : void 0,
|
|
36
|
+
hasInvalidRequestedReasoningVariant: !supported
|
|
37
|
+
};
|
|
38
|
+
}
|
|
39
|
+
return {
|
|
40
|
+
reasoningVariant: profile.defaultVariant,
|
|
41
|
+
hasInvalidRequestedReasoningVariant: false
|
|
42
|
+
};
|
|
43
|
+
}
|
|
44
|
+
function buildAnthropicProviderOptions(config) {
|
|
45
|
+
const { model, reasoningVariant, budgetTokens, capable } = config;
|
|
46
|
+
const adaptiveThinking = isAnthropicAdaptiveThinkingModel(model);
|
|
47
|
+
if (adaptiveThinking) {
|
|
48
|
+
if (reasoningVariant === "disabled") {
|
|
49
|
+
return {
|
|
50
|
+
anthropic: {
|
|
51
|
+
cacheControl: ANTHROPIC_CACHE_CONTROL,
|
|
52
|
+
sendReasoning: false,
|
|
53
|
+
thinking: { type: "disabled" }
|
|
54
|
+
}
|
|
55
|
+
};
|
|
56
|
+
}
|
|
57
|
+
const effort = reasoningVariant === "low" || reasoningVariant === "medium" || reasoningVariant === "high" || reasoningVariant === "max" ? reasoningVariant : void 0;
|
|
58
|
+
return {
|
|
59
|
+
anthropic: {
|
|
60
|
+
cacheControl: ANTHROPIC_CACHE_CONTROL,
|
|
61
|
+
sendReasoning: true,
|
|
62
|
+
thinking: { type: "adaptive" },
|
|
63
|
+
...effort !== void 0 && { effort }
|
|
64
|
+
}
|
|
65
|
+
};
|
|
66
|
+
}
|
|
67
|
+
if (!capable) {
|
|
7
68
|
return {
|
|
8
69
|
anthropic: {
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
sendReasoning: true
|
|
70
|
+
cacheControl: ANTHROPIC_CACHE_CONTROL,
|
|
71
|
+
sendReasoning: false,
|
|
72
|
+
...reasoningVariant === "disabled" ? { thinking: { type: "disabled" } } : {}
|
|
13
73
|
}
|
|
14
74
|
};
|
|
15
75
|
}
|
|
16
|
-
if (
|
|
76
|
+
if (reasoningVariant === "disabled") {
|
|
17
77
|
return {
|
|
18
|
-
|
|
19
|
-
cacheControl:
|
|
20
|
-
sendReasoning:
|
|
78
|
+
anthropic: {
|
|
79
|
+
cacheControl: ANTHROPIC_CACHE_CONTROL,
|
|
80
|
+
sendReasoning: false,
|
|
81
|
+
thinking: { type: "disabled" }
|
|
21
82
|
}
|
|
22
83
|
};
|
|
23
84
|
}
|
|
24
|
-
|
|
85
|
+
const effectiveBudgetTokens = coerceBudgetTokens(
|
|
86
|
+
budgetTokens ?? ANTHROPIC_DEFAULT_BUDGET_TOKENS,
|
|
87
|
+
ANTHROPIC_MIN_THINKING_BUDGET_TOKENS
|
|
88
|
+
);
|
|
89
|
+
return {
|
|
90
|
+
anthropic: {
|
|
91
|
+
cacheControl: ANTHROPIC_CACHE_CONTROL,
|
|
92
|
+
sendReasoning: true,
|
|
93
|
+
...effectiveBudgetTokens !== void 0 ? { thinking: { type: "enabled", budgetTokens: effectiveBudgetTokens } } : {}
|
|
94
|
+
}
|
|
95
|
+
};
|
|
96
|
+
}
|
|
97
|
+
function buildOpenRouterProviderOptions(config) {
|
|
98
|
+
const { provider, model, reasoningVariant, budgetTokens } = config;
|
|
99
|
+
const profile = getReasoningProfile(provider, model);
|
|
100
|
+
if (!profile.capable) {
|
|
101
|
+
return void 0;
|
|
102
|
+
}
|
|
103
|
+
if (reasoningVariant === "disabled") {
|
|
104
|
+
return { openrouter: { include_reasoning: false } };
|
|
105
|
+
}
|
|
106
|
+
if (budgetTokens !== void 0) {
|
|
25
107
|
return {
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
108
|
+
openrouter: {
|
|
109
|
+
include_reasoning: true,
|
|
110
|
+
reasoning: { enabled: true, max_tokens: budgetTokens }
|
|
29
111
|
}
|
|
30
112
|
};
|
|
31
113
|
}
|
|
114
|
+
if (profile.paradigm === "budget") {
|
|
115
|
+
if (reasoningVariant === void 0 || reasoningVariant === "enabled") {
|
|
116
|
+
return {
|
|
117
|
+
openrouter: {
|
|
118
|
+
include_reasoning: true
|
|
119
|
+
}
|
|
120
|
+
};
|
|
121
|
+
}
|
|
122
|
+
return void 0;
|
|
123
|
+
}
|
|
124
|
+
const explicitEffort = toOpenAIReasoningEffort(reasoningVariant);
|
|
125
|
+
const effort = explicitEffort ?? (profile.paradigm === "adaptive-effort" && reasoningVariant === "max" ? "xhigh" : void 0);
|
|
126
|
+
return {
|
|
127
|
+
openrouter: {
|
|
128
|
+
include_reasoning: true,
|
|
129
|
+
...effort !== void 0 ? { reasoning: { enabled: true, effort } } : {}
|
|
130
|
+
}
|
|
131
|
+
};
|
|
132
|
+
}
|
|
133
|
+
function asRecord(value) {
|
|
134
|
+
if (!value || typeof value !== "object" || Array.isArray(value)) return void 0;
|
|
135
|
+
return value;
|
|
136
|
+
}
|
|
137
|
+
function getEffectiveReasoningBudgetTokens(providerOptions) {
|
|
138
|
+
if (providerOptions === void 0) return void 0;
|
|
139
|
+
const anthropic = asRecord(providerOptions["anthropic"]);
|
|
140
|
+
const thinking = asRecord(anthropic?.["thinking"]);
|
|
141
|
+
if (thinking?.["type"] === "enabled" && typeof thinking["budgetTokens"] === "number") {
|
|
142
|
+
return thinking["budgetTokens"];
|
|
143
|
+
}
|
|
144
|
+
const google = asRecord(providerOptions["google"]);
|
|
145
|
+
const thinkingConfig = asRecord(google?.["thinkingConfig"]);
|
|
146
|
+
if (typeof thinkingConfig?.["thinkingBudget"] === "number") {
|
|
147
|
+
return thinkingConfig["thinkingBudget"];
|
|
148
|
+
}
|
|
149
|
+
const bedrock = asRecord(providerOptions["bedrock"]);
|
|
150
|
+
const reasoningConfig = asRecord(bedrock?.["reasoningConfig"]);
|
|
151
|
+
if (typeof reasoningConfig?.["budgetTokens"] === "number") {
|
|
152
|
+
return reasoningConfig["budgetTokens"];
|
|
153
|
+
}
|
|
154
|
+
const openrouter = asRecord(providerOptions["openrouter"]);
|
|
155
|
+
const reasoning = asRecord(openrouter?.["reasoning"]);
|
|
156
|
+
if (typeof reasoning?.["max_tokens"] === "number") {
|
|
157
|
+
return reasoning["max_tokens"];
|
|
158
|
+
}
|
|
159
|
+
return void 0;
|
|
160
|
+
}
|
|
161
|
+
function buildProviderOptions(config) {
|
|
162
|
+
const { provider, model, reasoning } = config;
|
|
163
|
+
const modelLower = model.toLowerCase();
|
|
164
|
+
const { reasoningVariant, hasInvalidRequestedReasoningVariant } = getSelectedReasoningVariant(config);
|
|
165
|
+
const budgetTokens = reasoning?.budgetTokens;
|
|
166
|
+
if (hasInvalidRequestedReasoningVariant) {
|
|
167
|
+
return void 0;
|
|
168
|
+
}
|
|
169
|
+
if (provider === "anthropic") {
|
|
170
|
+
const capable = isReasoningCapableModel(model, "anthropic");
|
|
171
|
+
return buildAnthropicProviderOptions({ model, reasoningVariant, budgetTokens, capable });
|
|
172
|
+
}
|
|
173
|
+
if (provider === "bedrock") {
|
|
174
|
+
const capable = isReasoningCapableModel(model, "bedrock");
|
|
175
|
+
if (!capable) {
|
|
176
|
+
return { bedrock: {} };
|
|
177
|
+
}
|
|
178
|
+
const isAnthropicModel = modelLower.includes("anthropic");
|
|
179
|
+
const isNovaModel = modelLower.includes("nova");
|
|
180
|
+
if (!isAnthropicModel && !isNovaModel) {
|
|
181
|
+
return { bedrock: {} };
|
|
182
|
+
}
|
|
183
|
+
const bedrock = {};
|
|
184
|
+
if (reasoningVariant === "disabled") {
|
|
185
|
+
bedrock["reasoningConfig"] = { type: "disabled" };
|
|
186
|
+
return { bedrock };
|
|
187
|
+
}
|
|
188
|
+
if (isAnthropicModel) {
|
|
189
|
+
const effectiveBudgetTokens = coerceBudgetTokens(
|
|
190
|
+
budgetTokens ?? BEDROCK_DEFAULT_BUDGET_TOKENS,
|
|
191
|
+
1
|
|
192
|
+
);
|
|
193
|
+
if (effectiveBudgetTokens === void 0) {
|
|
194
|
+
return { bedrock: {} };
|
|
195
|
+
}
|
|
196
|
+
bedrock["reasoningConfig"] = { type: "enabled", budgetTokens: effectiveBudgetTokens };
|
|
197
|
+
if (supportsAnthropicInterleavedThinking(model)) {
|
|
198
|
+
bedrock["anthropicBeta"] = [ANTHROPIC_INTERLEAVED_THINKING_BETA];
|
|
199
|
+
}
|
|
200
|
+
return { bedrock };
|
|
201
|
+
}
|
|
202
|
+
const maxReasoningEffort = reasoningVariant === "low" || reasoningVariant === "medium" || reasoningVariant === "high" ? reasoningVariant : void 0;
|
|
203
|
+
if (maxReasoningEffort !== void 0) {
|
|
204
|
+
bedrock["reasoningConfig"] = { type: "enabled", maxReasoningEffort };
|
|
205
|
+
}
|
|
206
|
+
return { bedrock };
|
|
207
|
+
}
|
|
208
|
+
if (provider === "vertex" && modelLower.includes("claude")) {
|
|
209
|
+
const capable = isReasoningCapableModel(model, "vertex");
|
|
210
|
+
return buildAnthropicProviderOptions({ model, reasoningVariant, budgetTokens, capable });
|
|
211
|
+
}
|
|
32
212
|
if (provider === "google" || provider === "vertex" && !modelLower.includes("claude")) {
|
|
213
|
+
const profile = getReasoningProfile(provider, model);
|
|
214
|
+
const includeThoughts = profile.capable && reasoningVariant !== "disabled";
|
|
215
|
+
const isThinkingLevel = profile.paradigm === "thinking-level";
|
|
216
|
+
const thinkingLevel = includeThoughts && isThinkingLevel && (reasoningVariant === "minimal" || reasoningVariant === "low" || reasoningVariant === "medium" || reasoningVariant === "high") ? reasoningVariant : void 0;
|
|
217
|
+
const thinkingBudgetTokens = coerceBudgetTokens(
|
|
218
|
+
budgetTokens ?? GOOGLE_DEFAULT_BUDGET_TOKENS,
|
|
219
|
+
1
|
|
220
|
+
);
|
|
33
221
|
return {
|
|
34
222
|
google: {
|
|
35
223
|
thinkingConfig: {
|
|
36
|
-
|
|
37
|
-
includeThoughts
|
|
224
|
+
includeThoughts,
|
|
225
|
+
...includeThoughts && isThinkingLevel && thinkingLevel !== void 0 && {
|
|
226
|
+
thinkingLevel
|
|
227
|
+
},
|
|
228
|
+
...includeThoughts && profile.paradigm === "budget" && thinkingBudgetTokens !== void 0 && {
|
|
229
|
+
thinkingBudget: thinkingBudgetTokens
|
|
230
|
+
}
|
|
38
231
|
}
|
|
39
232
|
}
|
|
40
233
|
};
|
|
41
234
|
}
|
|
42
235
|
if (provider === "openai") {
|
|
43
|
-
const
|
|
44
|
-
if (
|
|
236
|
+
const effortCandidate = toOpenAIReasoningEffort(reasoningVariant);
|
|
237
|
+
if (effortCandidate && supportsOpenAIReasoningEffort(model, effortCandidate)) {
|
|
45
238
|
return {
|
|
46
239
|
openai: {
|
|
47
|
-
reasoningEffort:
|
|
240
|
+
reasoningEffort: effortCandidate,
|
|
241
|
+
...effortCandidate !== "none" && { reasoningSummary: "auto" }
|
|
48
242
|
}
|
|
49
243
|
};
|
|
50
244
|
}
|
|
51
245
|
}
|
|
52
|
-
|
|
53
|
-
}
|
|
54
|
-
|
|
55
|
-
if (
|
|
56
|
-
|
|
246
|
+
if (isOpenRouterGatewayProvider(provider)) {
|
|
247
|
+
return buildOpenRouterProviderOptions({ provider, model, reasoningVariant, budgetTokens });
|
|
248
|
+
}
|
|
249
|
+
if (provider === "openai-compatible") {
|
|
250
|
+
const profile = getReasoningProfile(provider, model);
|
|
251
|
+
if (!profile.capable) return void 0;
|
|
252
|
+
const reasoningEffort = toOpenAICompatibleReasoningEffort(reasoningVariant);
|
|
253
|
+
if (reasoningEffort === void 0) return void 0;
|
|
254
|
+
return {
|
|
255
|
+
openaiCompatible: { reasoningEffort }
|
|
256
|
+
};
|
|
57
257
|
}
|
|
58
258
|
return void 0;
|
|
59
259
|
}
|
|
60
260
|
export {
|
|
61
261
|
buildProviderOptions,
|
|
62
|
-
|
|
262
|
+
getEffectiveReasoningBudgetTokens
|
|
63
263
|
};
|