@petrgrishin/ai-sdk-ollama 3.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,726 @@
1
+ import { RerankingModelV3, RerankingModelV3CallOptions, JSONSchema7, ProviderV3, LanguageModelV3, EmbeddingModelV3 } from '@ai-sdk/provider';
2
+ import { Ollama, Config, ChatRequest, Options as Options$1, EmbedRequest } from 'ollama';
3
+ import { FetchFunction } from '@ai-sdk/provider-utils';
4
+ import * as ai from 'ai';
5
+ import { generateText as generateText$1, streamText as streamText$1 } from 'ai';
6
+
7
+ /**
8
+ * Configuration for the Ollama reranking model
9
+ */
10
+ interface OllamaRerankingConfig {
11
+ provider: string;
12
+ baseURL: string;
13
+ headers: () => Record<string, string | undefined>;
14
+ fetch?: FetchFunction;
15
+ }
16
+ /**
17
+ * Settings for configuring Ollama reranking models
18
+ */
19
+ interface OllamaRerankingSettings {
20
+ /**
21
+ * Custom instruction for the reranker model.
22
+ * Defaults to the model's built-in instruction (usually "Please judge relevance").
23
+ */
24
+ instruction?: string;
25
+ }
26
+ /**
27
+ * Ollama provider options for reranking calls
28
+ */
29
+ interface OllamaRerankingProviderOptions {
30
+ /**
31
+ * Custom instruction for this specific reranking call.
32
+ * Overrides the instruction set in model settings.
33
+ */
34
+ instruction?: string;
35
+ }
36
+ /**
37
+ * Native Ollama Reranking Model
38
+ *
39
+ * **WAITING FOR OFFICIAL SUPPORT**
40
+ *
41
+ * This implementation uses Ollama's /api/rerank endpoint from PR #11389.
42
+ * As of December 2024, this PR has NOT been merged into Ollama.
43
+ *
44
+ * **For a working solution, use `OllamaEmbeddingRerankingModel` instead:**
45
+ * ```ts
46
+ * import { ollama } from 'ai-sdk-ollama';
47
+ * import { rerank } from 'ai';
48
+ *
49
+ * const result = await rerank({
50
+ * model: ollama.embeddingReranking('bge-m3'),
51
+ * query: 'What is machine learning?',
52
+ * documents: [...],
53
+ * });
54
+ * ```
55
+ *
56
+ * This native implementation will work once Ollama adds reranking support:
57
+ * @see https://github.com/ollama/ollama/pull/11389
58
+ *
59
+ * @example
60
+ * ```ts
61
+ * // NOT YET WORKING - requires Ollama reranking API
62
+ * import { ollama } from 'ai-sdk-ollama';
63
+ * import { rerank } from 'ai';
64
+ *
65
+ * const { rerankedDocuments } = await rerank({
66
+ * model: ollama.rerankingModel('bge-reranker-v2-m3'),
67
+ * query: 'What is machine learning?',
68
+ * documents: [
69
+ * 'Machine learning is a subset of AI...',
70
+ * 'The weather today is sunny...',
71
+ * 'Deep learning uses neural networks...',
72
+ * ],
73
+ * topN: 2,
74
+ * });
75
+ * ```
76
+ */
77
+ declare class OllamaRerankingModel implements RerankingModelV3 {
78
+ readonly specificationVersion: "v3";
79
+ readonly modelId: string;
80
+ private readonly config;
81
+ private readonly settings;
82
+ constructor(modelId: string, settings: OllamaRerankingSettings, config: OllamaRerankingConfig);
83
+ get provider(): string;
84
+ doRerank({ documents, headers, query, topN, abortSignal, providerOptions, }: RerankingModelV3CallOptions): Promise<Awaited<ReturnType<RerankingModelV3['doRerank']>>>;
85
+ }
86
+
87
+ /**
88
+ * Settings for configuring Ollama embedding-based reranking
89
+ */
90
+ interface OllamaEmbeddingRerankingSettings {
91
+ /**
92
+ * Embedding model to use for computing document similarity.
93
+ * If not specified, uses the modelId passed to the constructor.
94
+ * Recommended models: 'bge-m3', 'nomic-embed-text', 'mxbai-embed-large'
95
+ */
96
+ embeddingModel?: string;
97
+ /**
98
+ * Maximum number of texts to embed per request. Smaller batches reduce
99
+ * memory/latency spikes for large document sets while still avoiding one
100
+ * request per document. Defaults to 16.
101
+ */
102
+ maxBatchSize?: number;
103
+ }
104
+
105
+ type OllamaWithWebSearch = Ollama;
106
+ /**
107
+ * Configuration options for the web search tool
108
+ */
109
+ interface WebSearchToolOptions {
110
+ /**
111
+ * Timeout for search requests in milliseconds
112
+ * @default 30000
113
+ */
114
+ timeout?: number;
115
+ /**
116
+ * Ollama client instance to use for web search
117
+ * If not provided, will need to be injected at runtime
118
+ */
119
+ client?: OllamaWithWebSearch;
120
+ }
121
+ /**
122
+ * Output schema for web search results
123
+ */
124
+ type WebSearchOutput = {
125
+ results: Array<{
126
+ title: string;
127
+ url: string;
128
+ snippet: string;
129
+ publishedDate?: string;
130
+ }>;
131
+ searchQuery: string;
132
+ totalResults: number;
133
+ };
134
+ declare function webSearch(options?: WebSearchToolOptions): ai.Tool<{
135
+ query: string;
136
+ maxResults?: number | undefined;
137
+ }, WebSearchOutput>;
138
+
139
+ type OllamaWithWebFetch = Ollama;
140
+ /**
141
+ * Configuration options for the web fetch tool
142
+ */
143
+ interface WebFetchToolOptions {
144
+ /**
145
+ * Timeout for fetch requests in milliseconds
146
+ * @default 30000
147
+ */
148
+ timeout?: number;
149
+ /**
150
+ * Maximum content length to return in characters
151
+ * @default 10000
152
+ */
153
+ maxContentLength?: number;
154
+ /**
155
+ * Ollama client instance to use for web fetch
156
+ * If not provided, will need to be injected at runtime
157
+ */
158
+ client?: OllamaWithWebFetch;
159
+ }
160
+ /**
161
+ * Output schema for web fetch results
162
+ */
163
+ type WebFetchOutput = {
164
+ content: string;
165
+ title?: string;
166
+ url: string;
167
+ contentLength: number;
168
+ error?: string;
169
+ };
170
+ /**
171
+ * Creates a web fetch tool that allows AI models to retrieve content from specific URLs.
172
+ *
173
+ * This tool uses Ollama's web fetch capabilities to retrieve and parse web page content,
174
+ * making it accessible to AI models for analysis, summarization, or answering questions
175
+ * about specific web pages.
176
+ *
177
+ * @param options - Configuration options for the web fetch tool
178
+ * @returns A tool that can be used in AI SDK generateText/streamText calls
179
+ *
180
+ * @example
181
+ * ```typescript
182
+ * import { generateText } from 'ai';
183
+ * import { ollama } from 'ai-sdk-ollama';
184
+ *
185
+ * const result = await generateText({
186
+ * model: ollama('llama3.2'),
187
+ * prompt: 'Summarize the main points from this article: https://example.com/article',
188
+ * tools: {
189
+ * webFetch: ollama.tools.webFetch()
190
+ * }
191
+ * });
192
+ * ```
193
+ */
194
+ declare function webFetch(options?: WebFetchToolOptions): ai.Tool<{
195
+ url: string;
196
+ }, WebFetchOutput>;
197
+
198
+ /**
199
+ * Ollama-specific tools that leverage the provider's web search capabilities.
200
+ * Follows the same pattern as Google and OpenAI providers.
201
+ */
202
+ declare const ollamaTools: {
203
+ /**
204
+ * Creates a web search tool that allows models to search the internet for current information.
205
+ *
206
+ * @param options - Configuration options for the web search tool
207
+ * @returns A tool that can search the web and return relevant results
208
+ *
209
+ * @example
210
+ * ```typescript
211
+ * import { ollama } from 'ai-sdk-ollama';
212
+ * import { generateText } from 'ai';
213
+ *
214
+ * const result = await generateText({
215
+ * model: ollama('llama3.2'),
216
+ * prompt: 'What are the latest AI developments?',
217
+ * tools: {
218
+ * webSearch: ollama.tools.webSearch({ maxResults: 5 })
219
+ * }
220
+ * });
221
+ * ```
222
+ */
223
+ readonly webSearch: typeof webSearch;
224
+ /**
225
+ * Creates a web fetch tool that allows models to retrieve content from specific URLs.
226
+ *
227
+ * @param options - Configuration options for the web fetch tool
228
+ * @returns A tool that can fetch web page content
229
+ *
230
+ * @example
231
+ * ```typescript
232
+ * import { ollama } from 'ai-sdk-ollama';
233
+ * import { generateText } from 'ai';
234
+ *
235
+ * const result = await generateText({
236
+ * model: ollama('llama3.2'),
237
+ * prompt: 'Summarize the content from https://example.com',
238
+ * tools: {
239
+ * webFetch: ollama.tools.webFetch()
240
+ * }
241
+ * });
242
+ * ```
243
+ */
244
+ readonly webFetch: typeof webFetch;
245
+ };
246
+
247
+ /**
248
+ * Object Generation Reliability Utilities for Ollama
249
+ *
250
+ * This module provides utilities to make Ollama object generation more reliable
251
+ * and deterministic. It addresses common issues like:
252
+ * - Schema validation failures
253
+ * - Inconsistent results across multiple attempts
254
+ * - Timeout and fetch errors
255
+ * - Malformed JSON responses
256
+ * - Type mismatches (strings vs numbers)
257
+ */
258
+
259
+ /**
260
+ * A function that attempts to repair the raw output of the model
261
+ * to enable JSON parsing and validation.
262
+ *
263
+ * Similar to AI SDK's RepairTextFunction but tailored for Ollama's output patterns.
264
+ */
265
+ type RepairTextFunction = (options: {
266
+ text: string;
267
+ error: Error;
268
+ schema?: JSONSchema7 | unknown;
269
+ }) => Promise<string | null>;
270
+ interface ObjectGenerationOptions {
271
+ /**
272
+ * Maximum number of retry attempts for object generation
273
+ */
274
+ maxRetries?: number;
275
+ /**
276
+ * Whether to attempt schema recovery when validation fails
277
+ */
278
+ attemptRecovery?: boolean;
279
+ /**
280
+ * Whether to use fallback values for failed generations
281
+ */
282
+ useFallbacks?: boolean;
283
+ /**
284
+ * Custom fallback values for specific fields
285
+ */
286
+ fallbackValues?: Record<string, unknown>;
287
+ /**
288
+ * Timeout for object generation in milliseconds
289
+ */
290
+ generationTimeout?: number;
291
+ /**
292
+ * Whether to validate and fix type mismatches
293
+ */
294
+ fixTypeMismatches?: boolean;
295
+ /**
296
+ * Custom repair function for malformed JSON or validation errors
297
+ * If provided, this will be used instead of the default jsonrepair
298
+ */
299
+ repairText?: RepairTextFunction;
300
+ /**
301
+ * Whether to enable automatic JSON repair for malformed LLM outputs
302
+ * Default: true (enabled by default for better reliability)
303
+ * Handles 14+ types of JSON issues including Python constants, JSONP, comments,
304
+ * escaped quotes, URLs in strings, trailing commas, unquoted keys, etc.
305
+ * Set to false to disable all automatic repair
306
+ */
307
+ enableTextRepair?: boolean;
308
+ }
309
+
310
+ interface Options extends Options$1 {
311
+ /**
312
+ * Minimum probability threshold for token selection
313
+ * This parameter is supported by Ollama API but missing from ollama-js TypeScript definitions
314
+ */
315
+ min_p?: number;
316
+ }
317
+
318
+ /**
319
+ * Settings for configuring the Ollama provider.
320
+ * Extends from Ollama's Config type for consistency with the underlying client.
321
+ */
322
+ interface OllamaProviderSettings extends Pick<Config, 'headers' | 'fetch'> {
323
+ /**
324
+ * Base URL for the Ollama API (defaults to http://127.0.0.1:11434)
325
+ * Maps to Config.host in the Ollama client
326
+ */
327
+ baseURL?: string;
328
+ /**
329
+ * Ollama API key for authentication with cloud services.
330
+ * The API key will be set as Authorization: Bearer {apiKey} header.
331
+ */
332
+ apiKey?: string;
333
+ /**
334
+ * Existing Ollama client instance to use instead of creating a new one.
335
+ * When provided, baseURL, headers, and fetch are ignored.
336
+ */
337
+ client?: Ollama;
338
+ }
339
+ interface OllamaProvider extends ProviderV3 {
340
+ /**
341
+ * Create a language model instance
342
+ */
343
+ (modelId: string, settings?: OllamaChatSettings): LanguageModelV3;
344
+ /**
345
+ * Create a language model instance with the `chat` method
346
+ */
347
+ chat(modelId: string, settings?: OllamaChatSettings): LanguageModelV3;
348
+ /**
349
+ * Create a language model instance with the `languageModel` method
350
+ */
351
+ languageModel(modelId: string, settings?: OllamaChatSettings): LanguageModelV3;
352
+ /**
353
+ * Create an embedding model instance
354
+ */
355
+ embedding(modelId: string, settings?: OllamaEmbeddingSettings): EmbeddingModelV3;
356
+ /**
357
+ * Create an embedding model instance with the `textEmbedding` method
358
+ */
359
+ textEmbedding(modelId: string, settings?: OllamaEmbeddingSettings): EmbeddingModelV3;
360
+ /**
361
+ * Create an embedding model instance with the `textEmbeddingModel` method
362
+ */
363
+ textEmbeddingModel(modelId: string, settings?: OllamaEmbeddingSettings): EmbeddingModelV3;
364
+ /**
365
+ * Create a reranking model instance
366
+ */
367
+ reranking(modelId: string, settings?: OllamaRerankingSettings): RerankingModelV3;
368
+ /**
369
+ * Create a reranking model instance with the `rerankingModel` method
370
+ *
371
+ * NOTE: This uses Ollama's native /api/rerank endpoint which is NOT YET AVAILABLE.
372
+ * Use `embeddingReranking()` for a working solution.
373
+ * @see https://github.com/ollama/ollama/pull/11389
374
+ */
375
+ rerankingModel(modelId: string, settings?: OllamaRerankingSettings): RerankingModelV3;
376
+ /**
377
+ * Create an embedding-based reranking model (RECOMMENDED - working now)
378
+ *
379
+ * This is a workaround that uses embedding similarity for reranking
380
+ * since Ollama doesn't have native reranking support yet.
381
+ *
382
+ * @param modelId - The embedding model to use (e.g., 'bge-m3', 'nomic-embed-text')
383
+ * @param settings - Optional settings for the reranking model
384
+ *
385
+ * @example
386
+ * ```ts
387
+ * const result = await rerank({
388
+ * model: ollama.embeddingReranking('bge-m3'),
389
+ * query: 'What is machine learning?',
390
+ * documents: [...],
391
+ * topN: 3,
392
+ * });
393
+ * ```
394
+ */
395
+ embeddingReranking(modelId: string, settings?: OllamaEmbeddingRerankingSettings): RerankingModelV3;
396
+ /**
397
+ * Ollama-specific tools that leverage web search capabilities
398
+ */
399
+ tools: {
400
+ webSearch: (options?: WebSearchToolOptions) => ReturnType<typeof ollamaTools.webSearch>;
401
+ webFetch: (options?: WebFetchToolOptions) => ReturnType<typeof ollamaTools.webFetch>;
402
+ };
403
+ }
404
+ interface OllamaChatSettings extends Pick<ChatRequest, 'keep_alive' | 'format' | 'tools' | 'think'> {
405
+ /**
406
+ * Additional model parameters - uses extended Options type that includes min_p
407
+ * This automatically includes ALL Ollama parameters including new ones like 'dimensions'
408
+ */
409
+ options?: Partial<Options>;
410
+ /**
411
+ * Enable structured output mode
412
+ */
413
+ structuredOutputs?: boolean;
414
+ /**
415
+ * Enable reliable tool calling with retry and completion mechanisms.
416
+ * Defaults to true whenever function tools are provided; set to false to opt out.
417
+ */
418
+ reliableToolCalling?: boolean;
419
+ /**
420
+ * Tool calling reliability options. These override the sensible defaults used by the
421
+ * built-in reliability layer (maxRetries=2, forceCompletion=true,
422
+ * normalizeParameters=true, validateResults=true).
423
+ */
424
+ toolCallingOptions?: {
425
+ /**
426
+ * Maximum number of retry attempts for tool calls
427
+ */
428
+ maxRetries?: number;
429
+ /**
430
+ * Whether to force completion when tool calls succeed but no final text is generated
431
+ */
432
+ forceCompletion?: boolean;
433
+ /**
434
+ * Whether to normalize parameter names to handle inconsistencies
435
+ */
436
+ normalizeParameters?: boolean;
437
+ /**
438
+ * Whether to validate tool results and attempt recovery
439
+ */
440
+ validateResults?: boolean;
441
+ /**
442
+ * Custom parameter normalization mappings
443
+ */
444
+ parameterMappings?: Record<string, string[]>;
445
+ /**
446
+ * Timeout for tool execution in milliseconds
447
+ */
448
+ toolTimeout?: number;
449
+ };
450
+ /**
451
+ * Enable reliable object generation with retry and repair mechanisms.
452
+ * Defaults to true whenever JSON schemas are used; set to false to opt out.
453
+ */
454
+ reliableObjectGeneration?: boolean;
455
+ /**
456
+ * Object generation reliability options. These override the sensible defaults used by the
457
+ * built-in reliability layer (maxRetries=3, attemptRecovery=true, useFallbacks=true,
458
+ * fixTypeMismatches=true, enableTextRepair=true).
459
+ */
460
+ objectGenerationOptions?: ObjectGenerationOptions;
461
+ }
462
+ /**
463
+ * Settings for configuring Ollama embedding models.
464
+ * Uses Pick from EmbedRequest for type consistency with the Ollama API.
465
+ */
466
+ interface OllamaEmbeddingSettings extends Pick<EmbedRequest, 'dimensions'> {
467
+ /**
468
+ * Additional embedding parameters (temperature, num_ctx, etc.)
469
+ */
470
+ options?: Partial<Options>;
471
+ }
472
+ /**
473
+ * Options for configuring Ollama provider calls
474
+ */
475
+ interface OllamaProviderOptions {
476
+ /**
477
+ * Additional headers to include in requests
478
+ */
479
+ headers?: Record<string, string>;
480
+ }
481
+ /**
482
+ * Options for configuring Ollama chat model calls
483
+ */
484
+ interface OllamaChatProviderOptions extends OllamaProviderOptions {
485
+ /**
486
+ * Enable structured output mode for object generation
487
+ */
488
+ structuredOutputs?: boolean;
489
+ }
490
+ /**
491
+ * Options for configuring Ollama embedding model calls
492
+ */
493
+ interface OllamaEmbeddingProviderOptions extends OllamaProviderOptions {
494
+ /**
495
+ * Maximum number of embeddings to process in a single call
496
+ */
497
+ maxEmbeddingsPerCall?: number;
498
+ }
499
+
500
+ /**
501
+ * Create an Ollama provider instance for browser environments
502
+ */
503
+ declare function createOllama(options?: OllamaProviderSettings): OllamaProvider;
504
+ /**
505
+ * Default Ollama provider instance for browser environments
506
+ */
507
+ declare const ollama: OllamaProvider;
508
+
509
+ interface OllamaErrorData {
510
+ message: string;
511
+ code?: string;
512
+ details?: unknown;
513
+ }
514
+ declare class OllamaError extends Error {
515
+ readonly cause?: unknown;
516
+ readonly data?: OllamaErrorData;
517
+ constructor({ message, cause, data, }: {
518
+ message: string;
519
+ cause?: unknown;
520
+ data?: OllamaErrorData;
521
+ });
522
+ static isOllamaError(error: unknown): error is OllamaError;
523
+ }
524
+
525
+ /**
526
+ * Calculate cosine similarity between two vectors.
527
+ *
528
+ * Cosine similarity measures the angle between two vectors in multi-dimensional space,
529
+ * returning a value between -1 and 1 where:
530
+ * - 1 means identical direction (most similar)
531
+ * - 0 means orthogonal (unrelated)
532
+ * - -1 means opposite direction (least similar)
533
+ *
534
+ * For normalized embedding vectors, this is equivalent to the dot product.
535
+ *
536
+ * @param a First vector
537
+ * @param b Second vector
538
+ * @returns Cosine similarity score between -1 and 1
539
+ * @throws Error if vectors have different dimensions
540
+ */
541
+ declare function cosineSimilarity(a: number[], b: number[]): number;
542
+
543
+ /**
544
+ * Tool Calling Reliability Utilities for Ollama
545
+ *
546
+ * This module provides utilities to make Ollama tool calling more reliable
547
+ * and deterministic. It addresses common issues like:
548
+ * - Empty final text responses after tool execution
549
+ * - Inconsistent parameter names
550
+ * - Incomplete agent loops
551
+ * - Tool result validation and recovery
552
+ */
553
+
554
+ interface ToolCallingOptions {
555
+ /**
556
+ * Maximum number of retry attempts for tool calls
557
+ */
558
+ maxRetries?: number;
559
+ /**
560
+ * Whether to force completion when tool calls succeed but no final text is generated
561
+ */
562
+ forceCompletion?: boolean;
563
+ /**
564
+ * Whether to normalize parameter names to handle inconsistencies
565
+ */
566
+ normalizeParameters?: boolean;
567
+ /**
568
+ * Whether to validate tool results and attempt recovery
569
+ */
570
+ validateResults?: boolean;
571
+ /**
572
+ * Custom parameter normalization mappings
573
+ */
574
+ parameterMappings?: Record<string, string[]>;
575
+ /**
576
+ * Timeout for tool execution in milliseconds
577
+ */
578
+ toolTimeout?: number;
579
+ }
580
+ interface ResolvedToolCallingOptions extends Omit<ToolCallingOptions, 'maxRetries' | 'forceCompletion' | 'normalizeParameters' | 'validateResults'> {
581
+ maxRetries: number;
582
+ forceCompletion: boolean;
583
+ normalizeParameters: boolean;
584
+ validateResults: boolean;
585
+ }
586
+ interface ToolCallResult {
587
+ success: boolean;
588
+ result?: unknown;
589
+ error?: string;
590
+ normalizedInput?: Record<string, unknown>;
591
+ }
592
+ interface ReliableToolCallResult {
593
+ text: string;
594
+ toolCalls: Array<{
595
+ toolName: string;
596
+ input: Record<string, unknown>;
597
+ }>;
598
+ toolResults?: Array<{
599
+ toolName: string;
600
+ input: Record<string, unknown>;
601
+ normalizedInput?: Record<string, unknown>;
602
+ result: unknown;
603
+ success: boolean;
604
+ error?: string;
605
+ }>;
606
+ completionMethod: 'natural' | 'forced' | 'incomplete';
607
+ retryCount: number;
608
+ errors?: string[];
609
+ }
610
+ interface ToolDefinition {
611
+ description: string;
612
+ inputSchema: Record<string, unknown>;
613
+ execute: (params: Record<string, unknown>) => Promise<unknown>;
614
+ }
615
+
616
+ /**
617
+ * generateText - Enhanced generateText with Ollama-specific reliability
618
+ *
619
+ * This wrapper provides response synthesis and enhanced tool calling reliability
620
+ * that addresses the core Ollama limitation: tools execute but no final text is generated.
621
+ */
622
+
623
+ /**
624
+ * Enhanced options for Ollama-specific reliability features
625
+ */
626
+ interface EnhancedOptions {
627
+ /**
628
+ * Enable response synthesis when tools are called but no text is generated
629
+ * @default true
630
+ */
631
+ enableSynthesis?: boolean;
632
+ /**
633
+ * Custom synthesis prompt template
634
+ */
635
+ synthesisPrompt?: string;
636
+ /**
637
+ * Maximum attempts for synthesis
638
+ * @default 2
639
+ */
640
+ maxSynthesisAttempts?: number;
641
+ /**
642
+ * Minimum response length to consider valid
643
+ * @default 10
644
+ */
645
+ minResponseLength?: number;
646
+ /**
647
+ * EXPERIMENTAL: Enable tool calling with structured output (experimental_output)
648
+ *
649
+ * The official AI SDK doesn't support combining toolChoice: 'required' with experimental_output.
650
+ * When enabled, this uses a two-phase approach:
651
+ * 1. Execute tools first (without experimental_output)
652
+ * 2. Generate structured output with tool results injected as context
653
+ *
654
+ * This is NOT standard AI SDK behavior - only enable if you need both features together.
655
+ *
656
+ * @default false
657
+ */
658
+ enableToolsWithStructuredOutput?: boolean;
659
+ }
660
+ /**
661
+ * Enhanced generateText options that extend the official AI SDK options
662
+ */
663
+ type GenerateTextOptions = Parameters<typeof generateText$1>[0] & {
664
+ /**
665
+ * Enhanced options for Ollama-specific reliability features
666
+ */
667
+ enhancedOptions?: EnhancedOptions;
668
+ };
669
+ /**
670
+ * Enhanced generateText function with Ollama-specific reliability improvements
671
+ *
672
+ * This function applies synthesis by default when tools execute but return empty responses.
673
+ * The enhancement preserves the original response prototype and all methods/getters.
674
+ *
675
+ * Type parameters are inferred from the options, preserving AI SDK's type inference.
676
+ */
677
+ declare function generateText(options: GenerateTextOptions): Promise<Awaited<ReturnType<typeof generateText$1>>>;
678
+
679
+ /**
680
+ * streamText - Enhanced streamText with Ollama-specific reliability
681
+ *
682
+ * This wrapper provides streaming tool calling reliability by detecting
683
+ * when tools execute but no text is streamed, then providing synthesis.
684
+ *
685
+ * Enhances both `textStream` and `fullStream` with synthesis support.
686
+ */
687
+
688
+ type AIStreamTextOptions = Parameters<typeof streamText$1>[0];
689
+ /**
690
+ * Enhanced streamText options that extend the official AI SDK options
691
+ * This ensures 100% compatibility - all AI SDK properties are supported
692
+ */
693
+ type StreamTextOptions = AIStreamTextOptions & {
694
+ /**
695
+ * Enhanced options for Ollama-specific reliability features
696
+ */
697
+ enhancedOptions?: {
698
+ /**
699
+ * Enable enhanced tool calling logging
700
+ * @default true
701
+ */
702
+ enableToolLogging?: boolean;
703
+ /**
704
+ * Enable streaming synthesis when tools execute but no text streams
705
+ * @default true
706
+ */
707
+ enableStreamingSynthesis?: boolean;
708
+ /**
709
+ * Minimum streamed characters before considering it successful
710
+ * @default 10
711
+ */
712
+ minStreamLength?: number;
713
+ /**
714
+ * Timeout in ms to wait for streaming before applying synthesis
715
+ * @default 3000
716
+ */
717
+ synthesisTimeout?: number;
718
+ };
719
+ };
720
+ /**
721
+ * Enhanced streamText function with Ollama-specific reliability improvements
722
+ * Enhances both textStream and fullStream with synthesis support
723
+ */
724
+ declare function streamText(options: StreamTextOptions): Promise<Awaited<ReturnType<typeof streamText$1>>>;
725
+
726
+ export { type GenerateTextOptions, type ObjectGenerationOptions, type OllamaChatProviderOptions, type OllamaChatSettings, type OllamaEmbeddingProviderOptions, type OllamaEmbeddingSettings, OllamaError, type OllamaErrorData, type OllamaProvider, type OllamaProviderOptions, type OllamaProviderSettings, OllamaRerankingModel, type OllamaRerankingProviderOptions, type OllamaRerankingSettings, type ReliableToolCallResult, type ResolvedToolCallingOptions, type StreamTextOptions, type ToolCallResult, type ToolCallingOptions, type ToolDefinition, cosineSimilarity, createOllama, generateText, ollama, streamText };