@juspay/neurolink 9.54.2 → 9.54.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. package/CHANGELOG.md +6 -0
  2. package/dist/browser/neurolink.min.js +259 -259
  3. package/dist/cli/factories/commandFactory.js +43 -4
  4. package/dist/cli/utils/abortHandler.d.ts +22 -0
  5. package/dist/cli/utils/abortHandler.js +53 -0
  6. package/dist/core/baseProvider.d.ts +7 -1
  7. package/dist/core/baseProvider.js +19 -0
  8. package/dist/lib/core/baseProvider.d.ts +7 -1
  9. package/dist/lib/core/baseProvider.js +19 -0
  10. package/dist/lib/neurolink.js +17 -1
  11. package/dist/lib/providers/anthropic.js +1 -0
  12. package/dist/lib/providers/anthropicBaseProvider.js +1 -0
  13. package/dist/lib/providers/azureOpenai.js +1 -0
  14. package/dist/lib/providers/googleAiStudio.js +1 -0
  15. package/dist/lib/providers/googleVertex.js +1 -0
  16. package/dist/lib/providers/huggingFace.js +1 -0
  17. package/dist/lib/providers/litellm.js +1 -0
  18. package/dist/lib/providers/mistral.js +1 -0
  19. package/dist/lib/providers/openAI.js +1 -0
  20. package/dist/lib/providers/openRouter.js +1 -0
  21. package/dist/lib/providers/openaiCompatible.js +1 -0
  22. package/dist/lib/types/streamTypes.d.ts +6 -0
  23. package/dist/lib/utils/toolCallRepair.d.ts +21 -0
  24. package/dist/lib/utils/toolCallRepair.js +298 -0
  25. package/dist/neurolink.js +17 -1
  26. package/dist/providers/anthropic.js +1 -0
  27. package/dist/providers/anthropicBaseProvider.js +1 -0
  28. package/dist/providers/azureOpenai.js +1 -0
  29. package/dist/providers/googleAiStudio.js +1 -0
  30. package/dist/providers/googleVertex.js +1 -0
  31. package/dist/providers/huggingFace.js +1 -0
  32. package/dist/providers/litellm.js +1 -0
  33. package/dist/providers/mistral.js +1 -0
  34. package/dist/providers/openAI.js +1 -0
  35. package/dist/providers/openRouter.js +1 -0
  36. package/dist/providers/openaiCompatible.js +1 -0
  37. package/dist/types/streamTypes.d.ts +6 -0
  38. package/dist/utils/toolCallRepair.d.ts +21 -0
  39. package/dist/utils/toolCallRepair.js +297 -0
  40. package/package.json +1 -1
@@ -20,6 +20,7 @@ import { initializeCliParser } from "../parser.js";
20
20
  import { formatFileSize, saveAudioToFile } from "../utils/audioFileUtils.js";
21
21
  import { resolveFilePaths } from "../utils/pathResolver.js";
22
22
  import { animatedWrite } from "../utils/typewriter.js";
23
+ import { createStreamAbortHandler } from "../utils/abortHandler.js";
23
24
  import { formatVideoFileSize, getVideoMetadataSummary, saveVideoToFile, } from "../utils/videoFileUtils.js";
24
25
  import { OllamaCommandFactory } from "./ollamaCommandFactory.js";
25
26
  import { SageMakerCommandFactory } from "./sagemakerCommandFactory.js";
@@ -2176,6 +2177,11 @@ export class CLICommandFactory {
2176
2177
  let lastImageBase64;
2177
2178
  let contentReceived = false;
2178
2179
  const abortController = new AbortController();
2180
+ // BZ-667: Wire SIGINT to abort stream gracefully
2181
+ const abortHandler = createStreamAbortHandler();
2182
+ abortHandler.signal.addEventListener("abort", () => {
2183
+ abortController.abort();
2184
+ }, { once: true });
2179
2185
  // Create timeout promise for stream consumption (default: 30 seconds, respects user-provided timeout)
2180
2186
  const streamTimeout = options.timeout && typeof options.timeout === "number"
2181
2187
  ? options.timeout * 1000
@@ -2197,22 +2203,37 @@ export class CLICommandFactory {
2197
2203
  clearTimeout(timeoutId);
2198
2204
  });
2199
2205
  });
2206
+ const streamIterator = stream.stream[Symbol.asyncIterator]();
2200
2207
  try {
2201
2208
  // Process the stream with timeout handling
2202
- const streamIterator = stream.stream[Symbol.asyncIterator]();
2203
2209
  let timeoutActive = true;
2210
+ // BZ-667: Create an abort promise that rejects when the user presses Ctrl+C,
2211
+ // so we can race it against streamIterator.next() and unblock pending reads.
2212
+ const abortPromise = new Promise((_, reject) => {
2213
+ if (abortHandler.signal.aborted) {
2214
+ reject(new DOMException("Stream aborted", "AbortError"));
2215
+ return;
2216
+ }
2217
+ abortHandler.signal.addEventListener("abort", () => {
2218
+ reject(new DOMException("Stream aborted", "AbortError"));
2219
+ }, { once: true });
2220
+ });
2204
2221
  while (true) {
2205
2222
  let nextResult;
2206
2223
  if (timeoutActive && !contentReceived) {
2207
- // Race between next chunk and timeout for first chunk only
2224
+ // Race between next chunk, timeout, and abort signal
2208
2225
  nextResult = await Promise.race([
2209
2226
  streamIterator.next(),
2210
2227
  timeoutPromise,
2228
+ abortPromise,
2211
2229
  ]);
2212
2230
  }
2213
2231
  else {
2214
- // No timeout for subsequent chunks
2215
- nextResult = await streamIterator.next();
2232
+ // Race between next chunk and abort signal
2233
+ nextResult = await Promise.race([
2234
+ streamIterator.next(),
2235
+ abortPromise,
2236
+ ]);
2216
2237
  }
2217
2238
  if (nextResult.done) {
2218
2239
  break;
@@ -2266,8 +2287,26 @@ export class CLICommandFactory {
2266
2287
  }
2267
2288
  catch (error) {
2268
2289
  abortController.abort(); // Clean up timeout
2290
+ // BZ-667: Close the stream iterator so the provider connection is released.
2291
+ // Wrap in try/catch to prevent cleanup failures from masking the original error.
2292
+ try {
2293
+ await streamIterator.return?.();
2294
+ }
2295
+ catch {
2296
+ // Iterator cleanup failed — swallow so the original error propagates
2297
+ }
2298
+ abortHandler.cleanup();
2299
+ // BZ-667: Handle graceful abort — return partial content instead of throwing
2300
+ if (abortHandler.signal.aborted ||
2301
+ (error instanceof Error && error.name === "AbortError")) {
2302
+ if (!options.quiet) {
2303
+ process.stdout.write("\n");
2304
+ }
2305
+ return { content: fullContent, imageBase64: lastImageBase64 };
2306
+ }
2269
2307
  throw error;
2270
2308
  }
2309
+ abortHandler.cleanup();
2271
2310
  if (!contentReceived) {
2272
2311
  throw new Error("\n❌ No content received from stream\n" +
2273
2312
  "Check your credentials and provider configuration");
@@ -0,0 +1,22 @@
1
+ /**
2
+ * CLI Abort Handler (BZ-667)
3
+ *
4
+ * Bridges SIGINT (Ctrl+C) to an AbortController for graceful stream cancellation.
5
+ * First Ctrl+C aborts the stream and shows "Stream cancelled."
6
+ * Second Ctrl+C within 1 second force-exits the process.
7
+ *
8
+ * Uses `prependListener` so the stream handler fires BEFORE the top-level
9
+ * SIGINT handler in cli/index.ts (which calls process.exit). The listener
10
+ * remains registered until `cleanup()` removes it. On the first Ctrl+C the
11
+ * stream is cancelled gracefully; only a rapid second press exits.
12
+ *
13
+ * @module cli/utils/abortHandler
14
+ */
15
+ /**
16
+ * Create an abort handler that wires SIGINT to an AbortController.
17
+ * Call cleanup() when the stream finishes (success or error) to remove listeners.
18
+ */
19
+ export declare function createStreamAbortHandler(): {
20
+ signal: AbortSignal;
21
+ cleanup: () => void;
22
+ };
@@ -0,0 +1,53 @@
1
+ /**
2
+ * CLI Abort Handler (BZ-667)
3
+ *
4
+ * Bridges SIGINT (Ctrl+C) to an AbortController for graceful stream cancellation.
5
+ * First Ctrl+C aborts the stream and shows "Stream cancelled."
6
+ * Second Ctrl+C within 1 second force-exits the process.
7
+ *
8
+ * Uses `prependListener` so the stream handler fires BEFORE the top-level
9
+ * SIGINT handler in cli/index.ts (which calls process.exit). The listener
10
+ * remains registered until `cleanup()` removes it. On the first Ctrl+C the
11
+ * stream is cancelled gracefully; only a rapid second press exits.
12
+ *
13
+ * @module cli/utils/abortHandler
14
+ */
15
+ import chalk from "chalk";
16
+ /**
17
+ * Create an abort handler that wires SIGINT to an AbortController.
18
+ * Call cleanup() when the stream finishes (success or error) to remove listeners.
19
+ */
20
+ export function createStreamAbortHandler() {
21
+ const controller = new AbortController();
22
+ let aborted = false;
23
+ let forceExitTimer = null;
24
+ const sigintHandler = () => {
25
+ if (aborted) {
26
+ // Second Ctrl+C — force exit
27
+ if (forceExitTimer) {
28
+ clearTimeout(forceExitTimer);
29
+ }
30
+ // Let the top-level SIGINT handler in cli/index.ts handle the exit
31
+ return;
32
+ }
33
+ aborted = true;
34
+ controller.abort();
35
+ process.stderr.write(chalk.yellow("\nStream cancelled.\n"));
36
+ // Allow force exit on second Ctrl+C within 1 second
37
+ forceExitTimer = setTimeout(() => {
38
+ forceExitTimer = null;
39
+ }, 1000);
40
+ };
41
+ // Use prependListener so our handler fires before the top-level
42
+ // SIGINT handler in cli/index.ts. cleanup() removes it after the stream ends.
43
+ process.prependListener("SIGINT", sigintHandler);
44
+ const cleanup = () => {
45
+ process.removeListener("SIGINT", sigintHandler);
46
+ if (forceExitTimer) {
47
+ clearTimeout(forceExitTimer);
48
+ forceExitTimer = null;
49
+ }
50
+ };
51
+ return { signal: controller.signal, cleanup };
52
+ }
53
+ //# sourceMappingURL=abortHandler.js.map
@@ -1,4 +1,4 @@
1
- import type { LanguageModel, ModelMessage, Tool } from "ai";
1
+ import type { LanguageModel, ModelMessage, Tool, ToolCallRepairFunction, ToolSet } from "ai";
2
2
  import type { AIProviderName } from "../constants/enums.js";
3
3
  import type { EvaluationData } from "../index.js";
4
4
  import type { NeuroLink } from "../neurolink.js";
@@ -189,6 +189,12 @@ export declare abstract class BaseProvider implements AIProvider {
189
189
  * @returns The default embedding model name, or undefined if not supported
190
190
  */
191
191
  protected getDefaultEmbeddingModel(): string | undefined;
192
+ /**
193
+ * Create an `experimental_repairToolCall` handler for streamText/generateText.
194
+ * Dynamically reads the tool's JSON schema to repair wrong names and params.
195
+ * Returns undefined when repair is disabled via options.
196
+ */
197
+ protected getToolCallRepairFn(options?: StreamOptions | TextGenerationOptions): ToolCallRepairFunction<ToolSet> | undefined;
192
198
  /**
193
199
  * Provider-specific streaming implementation (only used when tools are disabled)
194
200
  */
@@ -861,6 +861,25 @@ export class BaseProvider {
861
861
  // Default implementation returns undefined - providers override this
862
862
  return undefined;
863
863
  }
864
+ // ===================
865
+ // ===================
866
+ // BZ-665: Schema-driven tool call repair
867
+ // ===================
868
+ /**
869
+ * Create an `experimental_repairToolCall` handler for streamText/generateText.
870
+ * Dynamically reads the tool's JSON schema to repair wrong names and params.
871
+ * Returns undefined when repair is disabled via options.
872
+ */
873
+ getToolCallRepairFn(options) {
874
+ if (options?.disableToolCallRepair) {
875
+ return undefined;
876
+ }
877
+ // Lazy import to avoid circular dependency at module load time
878
+ return (async (...args) => {
879
+ const { createToolCallRepair } = await import("../utils/toolCallRepair.js");
880
+ return createToolCallRepair()(...args);
881
+ });
882
+ }
864
883
  /**
865
884
  * Get AI SDK model with middleware applied
866
885
  * This method wraps the base model with any configured middleware
@@ -1,4 +1,4 @@
1
- import type { LanguageModel, ModelMessage, Tool } from "ai";
1
+ import type { LanguageModel, ModelMessage, Tool, ToolCallRepairFunction, ToolSet } from "ai";
2
2
  import type { AIProviderName } from "../constants/enums.js";
3
3
  import type { EvaluationData } from "../index.js";
4
4
  import type { NeuroLink } from "../neurolink.js";
@@ -189,6 +189,12 @@ export declare abstract class BaseProvider implements AIProvider {
189
189
  * @returns The default embedding model name, or undefined if not supported
190
190
  */
191
191
  protected getDefaultEmbeddingModel(): string | undefined;
192
+ /**
193
+ * Create an `experimental_repairToolCall` handler for streamText/generateText.
194
+ * Dynamically reads the tool's JSON schema to repair wrong names and params.
195
+ * Returns undefined when repair is disabled via options.
196
+ */
197
+ protected getToolCallRepairFn(options?: StreamOptions | TextGenerationOptions): ToolCallRepairFunction<ToolSet> | undefined;
192
198
  /**
193
199
  * Provider-specific streaming implementation (only used when tools are disabled)
194
200
  */
@@ -861,6 +861,25 @@ export class BaseProvider {
861
861
  // Default implementation returns undefined - providers override this
862
862
  return undefined;
863
863
  }
864
+ // ===================
865
+ // ===================
866
+ // BZ-665: Schema-driven tool call repair
867
+ // ===================
868
+ /**
869
+ * Create an `experimental_repairToolCall` handler for streamText/generateText.
870
+ * Dynamically reads the tool's JSON schema to repair wrong names and params.
871
+ * Returns undefined when repair is disabled via options.
872
+ */
873
+ getToolCallRepairFn(options) {
874
+ if (options?.disableToolCallRepair) {
875
+ return undefined;
876
+ }
877
+ // Lazy import to avoid circular dependency at module load time
878
+ return (async (...args) => {
879
+ const { createToolCallRepair } = await import("../utils/toolCallRepair.js");
880
+ return createToolCallRepair()(...args);
881
+ });
882
+ }
864
883
  /**
865
884
  * Get AI SDK model with middleware applied
866
885
  * This method wraps the base model with any configured middleware
@@ -4831,15 +4831,31 @@ Current user's request: ${currentInput}`;
4831
4831
  catch {
4832
4832
  /* non-blocking */
4833
4833
  }
4834
- const fallbackRoute = ModelRouter.getFallbackRoute(originalPrompt || enhancedOptions.input.text || "", {
4834
+ // BZ-1341: Support fallback provider override via options or env vars
4835
+ const optFallbackProvider = enhancedOptions.fallbackProvider?.trim() || undefined;
4836
+ const optFallbackModel = enhancedOptions.fallbackModel?.trim() || undefined;
4837
+ const envFallbackProvider = process.env.FALLBACK_PROVIDER?.trim() || undefined;
4838
+ const envFallbackModel = process.env.FALLBACK_MODEL?.trim() || undefined;
4839
+ const modelConfigRoute = ModelRouter.getFallbackRoute(originalPrompt || enhancedOptions.input.text || "", {
4835
4840
  provider: providerName,
4836
4841
  model: enhancedOptions.model || "gpt-4o",
4837
4842
  reasoning: "primary failed",
4838
4843
  confidence: 0.5,
4839
4844
  }, { fallbackStrategy: "auto" });
4845
+ const fallbackRoute = {
4846
+ ...modelConfigRoute,
4847
+ provider: optFallbackProvider ?? envFallbackProvider ?? modelConfigRoute.provider,
4848
+ model: optFallbackModel ?? envFallbackModel ?? modelConfigRoute.model,
4849
+ };
4840
4850
  logger.warn("Retrying with fallback provider", {
4841
4851
  originalProvider: providerName,
4842
4852
  fallbackProvider: fallbackRoute.provider,
4853
+ fallbackModel: fallbackRoute.model,
4854
+ fallbackSource: optFallbackProvider || optFallbackModel
4855
+ ? "options"
4856
+ : envFallbackProvider || envFallbackModel
4857
+ ? "env"
4858
+ : "model_config",
4843
4859
  reason: errorMsg,
4844
4860
  });
4845
4861
  try {
@@ -799,6 +799,7 @@ export class AnthropicProvider extends BaseProvider {
799
799
  stopWhen: stepCountIs(options.maxSteps || DEFAULT_MAX_STEPS),
800
800
  toolChoice: resolveToolChoice(options, tools, shouldUseTools),
801
801
  abortSignal: composeAbortSignals(options.abortSignal, timeoutController?.controller.signal),
802
+ experimental_repairToolCall: this.getToolCallRepairFn(options),
802
803
  experimental_telemetry: this.telemetryHandler.getTelemetryConfig(options),
803
804
  onStepFinish: ({ toolCalls, toolResults }) => {
804
805
  this.handleToolExecutionStorage(toolCalls, toolResults, options, new Date()).catch((error) => {
@@ -94,6 +94,7 @@ export class AnthropicProviderV2 extends BaseProvider {
94
94
  toolChoice: resolveToolChoice(options, tools, shouldUseTools),
95
95
  abortSignal: composeAbortSignals(options.abortSignal, timeoutController?.controller.signal),
96
96
  experimental_telemetry: this.telemetryHandler.getTelemetryConfig(options),
97
+ experimental_repairToolCall: this.getToolCallRepairFn(options),
97
98
  onStepFinish: ({ toolCalls, toolResults }) => {
98
99
  this.handleToolExecutionStorage(toolCalls, toolResults, options, new Date()).catch((error) => {
99
100
  logger.warn("[AnthropicBaseProvider] Failed to store tool executions", {
@@ -124,6 +124,7 @@ export class AzureOpenAIProvider extends BaseProvider {
124
124
  stopWhen: stepCountIs(options.maxSteps || DEFAULT_MAX_STEPS),
125
125
  abortSignal: composeAbortSignals(options.abortSignal, timeoutController?.controller.signal),
126
126
  experimental_telemetry: this.telemetryHandler.getTelemetryConfig(options),
127
+ experimental_repairToolCall: this.getToolCallRepairFn(options),
127
128
  onStepFinish: (event) => {
128
129
  this.handleToolExecutionStorage([...event.toolCalls], [...event.toolResults], options, new Date()).catch((error) => {
129
130
  logger.warn("[AzureOpenaiProvider] Failed to store tool executions", {
@@ -478,6 +478,7 @@ export class GoogleAIStudioProvider extends BaseProvider {
478
478
  toolChoice: resolveToolChoice(options, tools, shouldUseTools),
479
479
  abortSignal: composeAbortSignals(options.abortSignal, timeoutController?.controller.signal),
480
480
  experimental_telemetry: this.telemetryHandler.getTelemetryConfig(options),
481
+ experimental_repairToolCall: this.getToolCallRepairFn(options),
481
482
  // Gemini 3: use thinkingLevel via providerOptions
482
483
  // Gemini 2.5: use thinkingBudget via providerOptions
483
484
  ...(options.thinkingConfig?.enabled && {
@@ -994,6 +994,7 @@ export class GoogleVertexProvider extends BaseProvider {
994
994
  }),
995
995
  abortSignal: composeAbortSignals(options.abortSignal, timeoutController?.controller.signal),
996
996
  experimental_telemetry: this.telemetryHandler.getTelemetryConfig(options),
997
+ experimental_repairToolCall: this.getToolCallRepairFn(options),
997
998
  ...(options.thinkingConfig?.enabled && {
998
999
  providerOptions: {
999
1000
  vertex: {
@@ -139,6 +139,7 @@ export class HuggingFaceProvider extends BaseProvider {
139
139
  toolChoice: resolveToolChoice(options, (shouldUseTools ? streamOptions.tools || allTools : {}), shouldUseTools),
140
140
  abortSignal: composeAbortSignals(options.abortSignal, timeoutController?.controller.signal),
141
141
  experimental_telemetry: this.telemetryHandler.getTelemetryConfig(options),
142
+ experimental_repairToolCall: this.getToolCallRepairFn(options),
142
143
  onStepFinish: ({ toolCalls, toolResults }) => {
143
144
  this.handleToolExecutionStorage(toolCalls, toolResults, options, new Date()).catch((error) => {
144
145
  logger.warn("[HuggingFaceProvider] Failed to store tool executions", {
@@ -169,6 +169,7 @@ export class LiteLLMProvider extends BaseProvider {
169
169
  }),
170
170
  abortSignal: composeAbortSignals(options.abortSignal, timeoutController?.controller.signal),
171
171
  experimental_telemetry: this.telemetryHandler.getTelemetryConfig(options),
172
+ experimental_repairToolCall: this.getToolCallRepairFn(options),
172
173
  onError: (event) => {
173
174
  const error = event.error;
174
175
  const errorMessage = error instanceof Error ? error.message : String(error);
@@ -67,6 +67,7 @@ export class MistralProvider extends BaseProvider {
67
67
  toolChoice: resolveToolChoice(options, tools, shouldUseTools),
68
68
  abortSignal: composeAbortSignals(options.abortSignal, timeoutController?.controller.signal),
69
69
  experimental_telemetry: this.telemetryHandler.getTelemetryConfig(options),
70
+ experimental_repairToolCall: this.getToolCallRepairFn(options),
70
71
  onStepFinish: ({ toolCalls, toolResults }) => {
71
72
  this.handleToolExecutionStorage(toolCalls, toolResults, options, new Date()).catch((error) => {
72
73
  logger.warn("[MistralProvider] Failed to store tool executions", {
@@ -330,6 +330,7 @@ export class OpenAIProvider extends BaseProvider {
330
330
  stopWhen: stepCountIs(options.maxSteps || DEFAULT_MAX_STEPS),
331
331
  toolChoice: resolvedToolChoice,
332
332
  abortSignal: composeAbortSignals(options.abortSignal, timeoutController?.controller.signal),
333
+ experimental_repairToolCall: this.getToolCallRepairFn(options),
333
334
  experimental_telemetry: this.telemetryHandler.getTelemetryConfig(options),
334
335
  onStepFinish: ({ toolCalls, toolResults }) => {
335
336
  logger.info("Tool execution completed", {
@@ -252,6 +252,7 @@ export class OpenRouterProvider extends BaseProvider {
252
252
  }),
253
253
  abortSignal: composeAbortSignals(options.abortSignal, timeoutController?.controller.signal),
254
254
  experimental_telemetry: this.telemetryHandler.getTelemetryConfig(options),
255
+ experimental_repairToolCall: this.getToolCallRepairFn(options),
255
256
  onError: (event) => {
256
257
  const error = event.error;
257
258
  const errorMessage = error instanceof Error ? error.message : String(error);
@@ -195,6 +195,7 @@ export class OpenAICompatibleProvider extends BaseProvider {
195
195
  stopWhen: stepCountIs(options.maxSteps || DEFAULT_MAX_STEPS),
196
196
  abortSignal: composeAbortSignals(options.abortSignal, timeoutController?.controller.signal),
197
197
  experimental_telemetry: this.telemetryHandler.getTelemetryConfig(options),
198
+ experimental_repairToolCall: this.getToolCallRepairFn(options),
198
199
  onStepFinish: (event) => {
199
200
  this.handleToolExecutionStorage([...event.toolCalls], [...event.toolResults], options, new Date()).catch((error) => {
200
201
  logger.warn("[OpenAiCompatibleProvider] Failed to store tool executions", {
@@ -325,6 +325,8 @@ export type StreamOptions = {
325
325
  /** AbortSignal for external cancellation of the AI call */
326
326
  abortSignal?: AbortSignal;
327
327
  disableTools?: boolean;
328
+ /** Disable the schema-driven tool call repair mechanism (BZ-665). Default: false (repair enabled). */
329
+ disableToolCallRepair?: boolean;
328
330
  maxSteps?: number;
329
331
  /**
330
332
  * Tool choice configuration for streaming generation.
@@ -432,6 +434,10 @@ export type StreamOptions = {
432
434
  * @internal Set by NeuroLink SDK — not typically used directly by consumers.
433
435
  */
434
436
  fileRegistry?: unknown;
437
+ /** BZ-1341: Override fallback provider name (takes precedence over env/model config). */
438
+ fallbackProvider?: string;
439
+ /** BZ-1341: Override fallback model name (takes precedence over env/model config). */
440
+ fallbackModel?: string;
435
441
  /** Callback invoked when streaming completes successfully. */
436
442
  onFinish?: OnFinishCallback;
437
443
  /** Callback invoked when streaming encounters an error. */
@@ -0,0 +1,21 @@
1
+ /**
2
+ * Schema-Driven Tool Call Repair (BZ-665)
3
+ *
4
+ * Implements `experimental_repairToolCall` for the Vercel AI SDK.
5
+ * When an LLM sends a wrong tool name or wrong parameter names,
6
+ * this module attempts deterministic, schema-driven repair:
7
+ *
8
+ * 1. Tool name: case-insensitive → substring → Levenshtein
9
+ * 2. Param names: compare against JSON schema properties dynamically
10
+ * 3. Type coercion: string→number, JSON string→object/array per schema
11
+ *
12
+ * Zero static alias maps. The tool's JSON schema is the only source of truth.
13
+ *
14
+ * @module utils/toolCallRepair
15
+ */
16
+ import type { ToolCallRepairFunction, ToolSet } from "ai";
17
+ /**
18
+ * Create an `experimental_repairToolCall` handler for streamText/generateText.
19
+ * Fully dynamic — reads the tool schema at repair time, no configuration needed.
20
+ */
21
+ export declare function createToolCallRepair(): ToolCallRepairFunction<ToolSet>;