@juspay/neurolink 9.54.2 → 9.54.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. package/CHANGELOG.md +6 -0
  2. package/dist/browser/neurolink.min.js +259 -259
  3. package/dist/cli/factories/commandFactory.js +43 -4
  4. package/dist/cli/utils/abortHandler.d.ts +22 -0
  5. package/dist/cli/utils/abortHandler.js +53 -0
  6. package/dist/core/baseProvider.d.ts +7 -1
  7. package/dist/core/baseProvider.js +19 -0
  8. package/dist/lib/core/baseProvider.d.ts +7 -1
  9. package/dist/lib/core/baseProvider.js +19 -0
  10. package/dist/lib/neurolink.js +17 -1
  11. package/dist/lib/providers/anthropic.js +1 -0
  12. package/dist/lib/providers/anthropicBaseProvider.js +1 -0
  13. package/dist/lib/providers/azureOpenai.js +1 -0
  14. package/dist/lib/providers/googleAiStudio.js +1 -0
  15. package/dist/lib/providers/googleVertex.js +1 -0
  16. package/dist/lib/providers/huggingFace.js +1 -0
  17. package/dist/lib/providers/litellm.js +1 -0
  18. package/dist/lib/providers/mistral.js +1 -0
  19. package/dist/lib/providers/openAI.js +1 -0
  20. package/dist/lib/providers/openRouter.js +1 -0
  21. package/dist/lib/providers/openaiCompatible.js +1 -0
  22. package/dist/lib/types/streamTypes.d.ts +6 -0
  23. package/dist/lib/utils/toolCallRepair.d.ts +21 -0
  24. package/dist/lib/utils/toolCallRepair.js +298 -0
  25. package/dist/neurolink.js +17 -1
  26. package/dist/providers/anthropic.js +1 -0
  27. package/dist/providers/anthropicBaseProvider.js +1 -0
  28. package/dist/providers/azureOpenai.js +1 -0
  29. package/dist/providers/googleAiStudio.js +1 -0
  30. package/dist/providers/googleVertex.js +1 -0
  31. package/dist/providers/huggingFace.js +1 -0
  32. package/dist/providers/litellm.js +1 -0
  33. package/dist/providers/mistral.js +1 -0
  34. package/dist/providers/openAI.js +1 -0
  35. package/dist/providers/openRouter.js +1 -0
  36. package/dist/providers/openaiCompatible.js +1 -0
  37. package/dist/types/streamTypes.d.ts +6 -0
  38. package/dist/utils/toolCallRepair.d.ts +21 -0
  39. package/dist/utils/toolCallRepair.js +297 -0
  40. package/package.json +1 -1
@@ -0,0 +1,298 @@
1
+ /**
2
+ * Schema-Driven Tool Call Repair (BZ-665)
3
+ *
4
+ * Implements `experimental_repairToolCall` for the Vercel AI SDK.
5
+ * When an LLM sends a wrong tool name or wrong parameter names,
6
+ * this module attempts deterministic, schema-driven repair:
7
+ *
8
+ * 1. Tool name: case-insensitive → substring → Levenshtein
9
+ * 2. Param names: compare against JSON schema properties dynamically
10
+ * 3. Type coercion: string→number, JSON string→object/array per schema
11
+ *
12
+ * Zero static alias maps. The tool's JSON schema is the only source of truth.
13
+ *
14
+ * @module utils/toolCallRepair
15
+ */
16
+ import { logger } from "./logger.js";
17
+ /**
18
+ * Create an `experimental_repairToolCall` handler for streamText/generateText.
19
+ * Fully dynamic — reads the tool schema at repair time, no configuration needed.
20
+ */
21
+ export function createToolCallRepair() {
22
+ return async ({ toolCall, tools, inputSchema, error }) => {
23
+ // Import error classes lazily to avoid circular deps at module level
24
+ const { NoSuchToolError: NoSuchTool, InvalidToolInputError: InvalidInput } = await import("ai");
25
+ if (NoSuchTool.isInstance(error)) {
26
+ return repairToolName(toolCall, Object.keys(tools));
27
+ }
28
+ if (InvalidInput.isInstance(error)) {
29
+ try {
30
+ const schema = await inputSchema({ toolName: toolCall.toolName });
31
+ return repairToolInput(toolCall, schema);
32
+ }
33
+ catch {
34
+ // inputSchema() failed — can't repair without schema
35
+ return null;
36
+ }
37
+ }
38
+ return null;
39
+ };
40
+ }
41
+ // ─── Tool Name Repair ──────────────────────────────────────────────
42
+ /**
43
+ * Attempt to match a wrong tool name against available tool names.
44
+ * Strategies (in order): case-insensitive exact → substring → Levenshtein.
45
+ */
46
+ function repairToolName(toolCall, availableTools) {
47
+ const called = toolCall.toolName;
48
+ // Guard: empty or whitespace-only tool name cannot be meaningfully repaired
49
+ if (!called || called.trim().length === 0) {
50
+ return null;
51
+ }
52
+ // 1. Case-insensitive exact match
53
+ const ciMatch = availableTools.find((t) => t.toLowerCase() === called.toLowerCase());
54
+ if (ciMatch) {
55
+ logger.debug(`[ToolCallRepair] Name repair (case): "${called}" → "${ciMatch}"`);
56
+ return { ...toolCall, toolName: ciMatch };
57
+ }
58
+ // 2. Substring match: "search_file" is substring of "search_files" or vice versa.
59
+ // Only accept when exactly one tool matches to avoid ambiguous repairs.
60
+ const calledLower = called.toLowerCase();
61
+ const subCandidates = availableTools.filter((t) => {
62
+ const tLower = t.toLowerCase();
63
+ return tLower.includes(calledLower) || calledLower.includes(tLower);
64
+ });
65
+ if (subCandidates.length === 1) {
66
+ logger.debug(`[ToolCallRepair] Name repair (substring): "${called}" → "${subCandidates[0]}"`);
67
+ return { ...toolCall, toolName: subCandidates[0] };
68
+ }
69
+ // 3. Levenshtein distance — accept if normalized distance < 0.3
70
+ // Compare by normalized score (not raw edits) so length differences don't skew selection.
71
+ let bestMatch = null;
72
+ let bestNormalized = Infinity;
73
+ for (const t of availableTools) {
74
+ const dist = levenshtein(calledLower, t.toLowerCase());
75
+ const maxLen = Math.max(called.length, t.length);
76
+ const normalized = maxLen === 0 ? 0 : dist / maxLen;
77
+ if (normalized < 0.3 && normalized < bestNormalized) {
78
+ bestNormalized = normalized;
79
+ bestMatch = t;
80
+ }
81
+ }
82
+ if (bestMatch) {
83
+ logger.debug(`[ToolCallRepair] Name repair (levenshtein ${bestNormalized.toFixed(2)}): "${called}" → "${bestMatch}"`);
84
+ return { ...toolCall, toolName: bestMatch };
85
+ }
86
+ logger.debug(`[ToolCallRepair] Could not repair tool name "${called}". Available: [${availableTools.join(", ")}]`);
87
+ return null;
88
+ }
89
+ // ─── Tool Input Repair ─────────────────────────────────────────────
90
+ /**
91
+ * Attempt to repair wrong parameter names and types using the JSON schema.
92
+ * Compares LLM-provided keys against schema properties dynamically.
93
+ *
94
+ * `toolCall.input` is a JSON string per LanguageModelV3ToolCall.
95
+ */
96
+ function repairToolInput(toolCall, schema) {
97
+ let args;
98
+ try {
99
+ args = JSON.parse(toolCall.input);
100
+ }
101
+ catch {
102
+ return null; // input is not valid JSON — can't repair
103
+ }
104
+ if (!args || typeof args !== "object" || Array.isArray(args)) {
105
+ return null;
106
+ }
107
+ const schemaProps = schema.properties;
108
+ if (!schemaProps) {
109
+ return null;
110
+ }
111
+ const expectedKeys = Object.keys(schemaProps);
112
+ const inputObj = args;
113
+ const inputKeys = Object.keys(inputObj);
114
+ const repaired = Object.create(null);
115
+ let didRepair = false;
116
+ const dropUnknown = schema.additionalProperties === false;
117
+ for (const inputKey of inputKeys) {
118
+ // Already matches a schema property — keep as-is
119
+ if (expectedKeys.includes(inputKey)) {
120
+ repaired[inputKey] = inputObj[inputKey];
121
+ continue;
122
+ }
123
+ // Try to find a matching schema property
124
+ const mapped = findMatchingKey(inputKey, expectedKeys);
125
+ if (mapped) {
126
+ // Don't overwrite an already-populated canonical key — but still mark as repaired
127
+ // so the function returns the corrected object instead of null.
128
+ if (Object.prototype.hasOwnProperty.call(repaired, mapped)) {
129
+ didRepair = true;
130
+ continue;
131
+ }
132
+ logger.debug(`[ToolCallRepair] Param repair: "${inputKey}" → "${mapped}" (tool: ${toolCall.toolName})`);
133
+ repaired[mapped] = inputObj[inputKey];
134
+ didRepair = true;
135
+ }
136
+ else if (dropUnknown) {
137
+ // Schema forbids extra properties — drop unmapped keys
138
+ logger.debug(`[ToolCallRepair] Dropping unmapped key "${inputKey}" (additionalProperties: false, tool: ${toolCall.toolName})`);
139
+ didRepair = true;
140
+ }
141
+ else {
142
+ // Unknown key — pass through (schema allows additionalProperties)
143
+ repaired[inputKey] = inputObj[inputKey];
144
+ }
145
+ }
146
+ // Type coercion based on schema types
147
+ for (const key of Object.keys(repaired)) {
148
+ const propSchema = schemaProps[key];
149
+ if (!propSchema) {
150
+ continue;
151
+ }
152
+ const coerced = coerceType(repaired[key], propSchema);
153
+ if (coerced !== repaired[key]) {
154
+ logger.debug(`[ToolCallRepair] Type coercion on "${key}": ${typeof repaired[key]} → ${typeof coerced} (tool: ${toolCall.toolName})`);
155
+ repaired[key] = coerced;
156
+ didRepair = true;
157
+ }
158
+ }
159
+ if (didRepair) {
160
+ return { ...toolCall, input: JSON.stringify(repaired) };
161
+ }
162
+ return null;
163
+ }
164
+ /**
165
+ * Find a matching schema key for a mismatched input key.
166
+ * Strategies: case-insensitive → Levenshtein (threshold ≤2 edits).
167
+ */
168
+ function findMatchingKey(inputKey, schemaKeys) {
169
+ const inputLower = inputKey.toLowerCase();
170
+ // Case-insensitive match
171
+ const ciMatch = schemaKeys.find((k) => k.toLowerCase() === inputLower);
172
+ if (ciMatch) {
173
+ return ciMatch;
174
+ }
175
+ // Levenshtein — threshold ≤2 edits
176
+ let best = null;
177
+ let bestDist = Infinity;
178
+ for (const k of schemaKeys) {
179
+ const dist = levenshtein(inputLower, k.toLowerCase());
180
+ if (dist <= 2 && dist < bestDist) {
181
+ bestDist = dist;
182
+ best = k;
183
+ }
184
+ }
185
+ return best;
186
+ }
187
+ // ─── Type Coercion ─────────────────────────────────────────────────
188
+ /**
189
+ * Coerce a value to match the expected schema type.
190
+ * Handles: string→number, JSON string→object, JSON string→array, value→[value].
191
+ */
192
+ function coerceType(value, propSchema) {
193
+ const expectedType = propSchema.type;
194
+ if (!expectedType || value === null || value === undefined) {
195
+ return value;
196
+ }
197
+ // String → Number (trim first, reject empty/whitespace, require finite result)
198
+ if (expectedType === "number" && typeof value === "string") {
199
+ const trimmed = value.trim();
200
+ if (trimmed !== "") {
201
+ const num = Number(trimmed);
202
+ if (isFinite(num)) {
203
+ return num;
204
+ }
205
+ }
206
+ }
207
+ // String → Integer (strict: reject "12abc", "3.7", etc.)
208
+ if (expectedType === "integer" && typeof value === "string") {
209
+ const trimmed = value.trim();
210
+ if (/^[+-]?\d+$/.test(trimmed)) {
211
+ const num = Number(trimmed);
212
+ if (Number.isSafeInteger(num)) {
213
+ return num;
214
+ }
215
+ }
216
+ }
217
+ // String → Boolean
218
+ if (expectedType === "boolean" && typeof value === "string") {
219
+ if (value.toLowerCase() === "true") {
220
+ return true;
221
+ }
222
+ if (value.toLowerCase() === "false") {
223
+ return false;
224
+ }
225
+ }
226
+ // JSON string → Object
227
+ if (expectedType === "object" && typeof value === "string") {
228
+ try {
229
+ const parsed = JSON.parse(value);
230
+ if (parsed && typeof parsed === "object" && !Array.isArray(parsed)) {
231
+ return parsed;
232
+ }
233
+ }
234
+ catch {
235
+ // Not valid JSON — return as-is
236
+ }
237
+ }
238
+ // JSON string → Array
239
+ if (expectedType === "array" && typeof value === "string") {
240
+ try {
241
+ const parsed = JSON.parse(value);
242
+ if (Array.isArray(parsed)) {
243
+ return parsed;
244
+ }
245
+ }
246
+ catch {
247
+ // Not valid JSON — return as-is
248
+ }
249
+ }
250
+ // Single non-string value → Array (wrap).
251
+ // Strings are excluded because they are more likely a JSON-encoded array
252
+ // that failed to parse above, and wrapping "foo" into ["foo"] is rarely correct.
253
+ if (expectedType === "array" &&
254
+ !Array.isArray(value) &&
255
+ typeof value !== "string") {
256
+ return [value];
257
+ }
258
+ return value;
259
+ }
260
+ // ─── Levenshtein Distance ──────────────────────────────────────────
261
+ /**
262
+ * Compute Levenshtein edit distance between two strings.
263
+ * Uses the iterative matrix approach — O(m*n) time, O(min(m,n)) space.
264
+ */
265
+ function levenshtein(a, b) {
266
+ if (a === b) {
267
+ return 0;
268
+ }
269
+ if (a.length === 0) {
270
+ return b.length;
271
+ }
272
+ if (b.length === 0) {
273
+ return a.length;
274
+ }
275
+ // Use shorter string as column to minimize space
276
+ if (a.length > b.length) {
277
+ [a, b] = [b, a];
278
+ }
279
+ const aLen = a.length;
280
+ const bLen = b.length;
281
+ let prev = new Array(aLen + 1);
282
+ let curr = new Array(aLen + 1);
283
+ for (let i = 0; i <= aLen; i++) {
284
+ prev[i] = i;
285
+ }
286
+ for (let j = 1; j <= bLen; j++) {
287
+ curr[0] = j;
288
+ for (let i = 1; i <= aLen; i++) {
289
+ const cost = a[i - 1] === b[j - 1] ? 0 : 1;
290
+ curr[i] = Math.min(prev[i] + 1, // deletion
291
+ curr[i - 1] + 1, // insertion
292
+ prev[i - 1] + cost);
293
+ }
294
+ [prev, curr] = [curr, prev];
295
+ }
296
+ return prev[aLen];
297
+ }
298
+ //# sourceMappingURL=toolCallRepair.js.map
package/dist/neurolink.js CHANGED
@@ -4831,15 +4831,31 @@ Current user's request: ${currentInput}`;
4831
4831
  catch {
4832
4832
  /* non-blocking */
4833
4833
  }
4834
- const fallbackRoute = ModelRouter.getFallbackRoute(originalPrompt || enhancedOptions.input.text || "", {
4834
+ // BZ-1341: Support fallback provider override via options or env vars
4835
+ const optFallbackProvider = enhancedOptions.fallbackProvider?.trim() || undefined;
4836
+ const optFallbackModel = enhancedOptions.fallbackModel?.trim() || undefined;
4837
+ const envFallbackProvider = process.env.FALLBACK_PROVIDER?.trim() || undefined;
4838
+ const envFallbackModel = process.env.FALLBACK_MODEL?.trim() || undefined;
4839
+ const modelConfigRoute = ModelRouter.getFallbackRoute(originalPrompt || enhancedOptions.input.text || "", {
4835
4840
  provider: providerName,
4836
4841
  model: enhancedOptions.model || "gpt-4o",
4837
4842
  reasoning: "primary failed",
4838
4843
  confidence: 0.5,
4839
4844
  }, { fallbackStrategy: "auto" });
4845
+ const fallbackRoute = {
4846
+ ...modelConfigRoute,
4847
+ provider: optFallbackProvider ?? envFallbackProvider ?? modelConfigRoute.provider,
4848
+ model: optFallbackModel ?? envFallbackModel ?? modelConfigRoute.model,
4849
+ };
4840
4850
  logger.warn("Retrying with fallback provider", {
4841
4851
  originalProvider: providerName,
4842
4852
  fallbackProvider: fallbackRoute.provider,
4853
+ fallbackModel: fallbackRoute.model,
4854
+ fallbackSource: optFallbackProvider || optFallbackModel
4855
+ ? "options"
4856
+ : envFallbackProvider || envFallbackModel
4857
+ ? "env"
4858
+ : "model_config",
4843
4859
  reason: errorMsg,
4844
4860
  });
4845
4861
  try {
@@ -799,6 +799,7 @@ export class AnthropicProvider extends BaseProvider {
799
799
  stopWhen: stepCountIs(options.maxSteps || DEFAULT_MAX_STEPS),
800
800
  toolChoice: resolveToolChoice(options, tools, shouldUseTools),
801
801
  abortSignal: composeAbortSignals(options.abortSignal, timeoutController?.controller.signal),
802
+ experimental_repairToolCall: this.getToolCallRepairFn(options),
802
803
  experimental_telemetry: this.telemetryHandler.getTelemetryConfig(options),
803
804
  onStepFinish: ({ toolCalls, toolResults }) => {
804
805
  this.handleToolExecutionStorage(toolCalls, toolResults, options, new Date()).catch((error) => {
@@ -94,6 +94,7 @@ export class AnthropicProviderV2 extends BaseProvider {
94
94
  toolChoice: resolveToolChoice(options, tools, shouldUseTools),
95
95
  abortSignal: composeAbortSignals(options.abortSignal, timeoutController?.controller.signal),
96
96
  experimental_telemetry: this.telemetryHandler.getTelemetryConfig(options),
97
+ experimental_repairToolCall: this.getToolCallRepairFn(options),
97
98
  onStepFinish: ({ toolCalls, toolResults }) => {
98
99
  this.handleToolExecutionStorage(toolCalls, toolResults, options, new Date()).catch((error) => {
99
100
  logger.warn("[AnthropicBaseProvider] Failed to store tool executions", {
@@ -124,6 +124,7 @@ export class AzureOpenAIProvider extends BaseProvider {
124
124
  stopWhen: stepCountIs(options.maxSteps || DEFAULT_MAX_STEPS),
125
125
  abortSignal: composeAbortSignals(options.abortSignal, timeoutController?.controller.signal),
126
126
  experimental_telemetry: this.telemetryHandler.getTelemetryConfig(options),
127
+ experimental_repairToolCall: this.getToolCallRepairFn(options),
127
128
  onStepFinish: (event) => {
128
129
  this.handleToolExecutionStorage([...event.toolCalls], [...event.toolResults], options, new Date()).catch((error) => {
129
130
  logger.warn("[AzureOpenaiProvider] Failed to store tool executions", {
@@ -478,6 +478,7 @@ export class GoogleAIStudioProvider extends BaseProvider {
478
478
  toolChoice: resolveToolChoice(options, tools, shouldUseTools),
479
479
  abortSignal: composeAbortSignals(options.abortSignal, timeoutController?.controller.signal),
480
480
  experimental_telemetry: this.telemetryHandler.getTelemetryConfig(options),
481
+ experimental_repairToolCall: this.getToolCallRepairFn(options),
481
482
  // Gemini 3: use thinkingLevel via providerOptions
482
483
  // Gemini 2.5: use thinkingBudget via providerOptions
483
484
  ...(options.thinkingConfig?.enabled && {
@@ -994,6 +994,7 @@ export class GoogleVertexProvider extends BaseProvider {
994
994
  }),
995
995
  abortSignal: composeAbortSignals(options.abortSignal, timeoutController?.controller.signal),
996
996
  experimental_telemetry: this.telemetryHandler.getTelemetryConfig(options),
997
+ experimental_repairToolCall: this.getToolCallRepairFn(options),
997
998
  ...(options.thinkingConfig?.enabled && {
998
999
  providerOptions: {
999
1000
  vertex: {
@@ -139,6 +139,7 @@ export class HuggingFaceProvider extends BaseProvider {
139
139
  toolChoice: resolveToolChoice(options, (shouldUseTools ? streamOptions.tools || allTools : {}), shouldUseTools),
140
140
  abortSignal: composeAbortSignals(options.abortSignal, timeoutController?.controller.signal),
141
141
  experimental_telemetry: this.telemetryHandler.getTelemetryConfig(options),
142
+ experimental_repairToolCall: this.getToolCallRepairFn(options),
142
143
  onStepFinish: ({ toolCalls, toolResults }) => {
143
144
  this.handleToolExecutionStorage(toolCalls, toolResults, options, new Date()).catch((error) => {
144
145
  logger.warn("[HuggingFaceProvider] Failed to store tool executions", {
@@ -169,6 +169,7 @@ export class LiteLLMProvider extends BaseProvider {
169
169
  }),
170
170
  abortSignal: composeAbortSignals(options.abortSignal, timeoutController?.controller.signal),
171
171
  experimental_telemetry: this.telemetryHandler.getTelemetryConfig(options),
172
+ experimental_repairToolCall: this.getToolCallRepairFn(options),
172
173
  onError: (event) => {
173
174
  const error = event.error;
174
175
  const errorMessage = error instanceof Error ? error.message : String(error);
@@ -67,6 +67,7 @@ export class MistralProvider extends BaseProvider {
67
67
  toolChoice: resolveToolChoice(options, tools, shouldUseTools),
68
68
  abortSignal: composeAbortSignals(options.abortSignal, timeoutController?.controller.signal),
69
69
  experimental_telemetry: this.telemetryHandler.getTelemetryConfig(options),
70
+ experimental_repairToolCall: this.getToolCallRepairFn(options),
70
71
  onStepFinish: ({ toolCalls, toolResults }) => {
71
72
  this.handleToolExecutionStorage(toolCalls, toolResults, options, new Date()).catch((error) => {
72
73
  logger.warn("[MistralProvider] Failed to store tool executions", {
@@ -330,6 +330,7 @@ export class OpenAIProvider extends BaseProvider {
330
330
  stopWhen: stepCountIs(options.maxSteps || DEFAULT_MAX_STEPS),
331
331
  toolChoice: resolvedToolChoice,
332
332
  abortSignal: composeAbortSignals(options.abortSignal, timeoutController?.controller.signal),
333
+ experimental_repairToolCall: this.getToolCallRepairFn(options),
333
334
  experimental_telemetry: this.telemetryHandler.getTelemetryConfig(options),
334
335
  onStepFinish: ({ toolCalls, toolResults }) => {
335
336
  logger.info("Tool execution completed", {
@@ -252,6 +252,7 @@ export class OpenRouterProvider extends BaseProvider {
252
252
  }),
253
253
  abortSignal: composeAbortSignals(options.abortSignal, timeoutController?.controller.signal),
254
254
  experimental_telemetry: this.telemetryHandler.getTelemetryConfig(options),
255
+ experimental_repairToolCall: this.getToolCallRepairFn(options),
255
256
  onError: (event) => {
256
257
  const error = event.error;
257
258
  const errorMessage = error instanceof Error ? error.message : String(error);
@@ -195,6 +195,7 @@ export class OpenAICompatibleProvider extends BaseProvider {
195
195
  stopWhen: stepCountIs(options.maxSteps || DEFAULT_MAX_STEPS),
196
196
  abortSignal: composeAbortSignals(options.abortSignal, timeoutController?.controller.signal),
197
197
  experimental_telemetry: this.telemetryHandler.getTelemetryConfig(options),
198
+ experimental_repairToolCall: this.getToolCallRepairFn(options),
198
199
  onStepFinish: (event) => {
199
200
  this.handleToolExecutionStorage([...event.toolCalls], [...event.toolResults], options, new Date()).catch((error) => {
200
201
  logger.warn("[OpenAiCompatibleProvider] Failed to store tool executions", {
@@ -325,6 +325,8 @@ export type StreamOptions = {
325
325
  /** AbortSignal for external cancellation of the AI call */
326
326
  abortSignal?: AbortSignal;
327
327
  disableTools?: boolean;
328
+ /** Disable the schema-driven tool call repair mechanism (BZ-665). Default: false (repair enabled). */
329
+ disableToolCallRepair?: boolean;
328
330
  maxSteps?: number;
329
331
  /**
330
332
  * Tool choice configuration for streaming generation.
@@ -432,6 +434,10 @@ export type StreamOptions = {
432
434
  * @internal Set by NeuroLink SDK — not typically used directly by consumers.
433
435
  */
434
436
  fileRegistry?: unknown;
437
+ /** BZ-1341: Override fallback provider name (takes precedence over env/model config). */
438
+ fallbackProvider?: string;
439
+ /** BZ-1341: Override fallback model name (takes precedence over env/model config). */
440
+ fallbackModel?: string;
435
441
  /** Callback invoked when streaming completes successfully. */
436
442
  onFinish?: OnFinishCallback;
437
443
  /** Callback invoked when streaming encounters an error. */
@@ -0,0 +1,21 @@
1
+ /**
2
+ * Schema-Driven Tool Call Repair (BZ-665)
3
+ *
4
+ * Implements `experimental_repairToolCall` for the Vercel AI SDK.
5
+ * When an LLM sends a wrong tool name or wrong parameter names,
6
+ * this module attempts deterministic, schema-driven repair:
7
+ *
8
+ * 1. Tool name: case-insensitive → substring → Levenshtein
9
+ * 2. Param names: compare against JSON schema properties dynamically
10
+ * 3. Type coercion: string→number, JSON string→object/array per schema
11
+ *
12
+ * Zero static alias maps. The tool's JSON schema is the only source of truth.
13
+ *
14
+ * @module utils/toolCallRepair
15
+ */
16
+ import type { ToolCallRepairFunction, ToolSet } from "ai";
17
+ /**
18
+ * Create an `experimental_repairToolCall` handler for streamText/generateText.
19
+ * Fully dynamic — reads the tool schema at repair time, no configuration needed.
20
+ */
21
+ export declare function createToolCallRepair(): ToolCallRepairFunction<ToolSet>;