@juspay/neurolink 5.1.0 → 5.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +15 -9
- package/README.md +123 -126
- package/dist/agent/direct-tools.d.ts +6 -6
- package/dist/cli/commands/config.d.ts +3 -3
- package/dist/cli/commands/mcp.js +8 -7
- package/dist/cli/factories/command-factory.d.ts +4 -0
- package/dist/cli/factories/command-factory.js +57 -3
- package/dist/cli/index.js +87 -140
- package/dist/core/base-provider.d.ts +423 -0
- package/dist/core/base-provider.js +365 -0
- package/dist/core/constants.d.ts +1 -1
- package/dist/core/constants.js +1 -1
- package/dist/core/dynamic-models.d.ts +6 -6
- package/dist/core/evaluation.d.ts +19 -80
- package/dist/core/evaluation.js +185 -484
- package/dist/core/factory.d.ts +3 -3
- package/dist/core/factory.js +31 -91
- package/dist/core/service-registry.d.ts +47 -0
- package/dist/core/service-registry.js +112 -0
- package/dist/core/types.d.ts +8 -1
- package/dist/factories/compatibility-factory.js +1 -1
- package/dist/factories/provider-factory.d.ts +72 -0
- package/dist/factories/provider-factory.js +144 -0
- package/dist/factories/provider-registry.d.ts +38 -0
- package/dist/factories/provider-registry.js +107 -0
- package/dist/index.d.ts +4 -3
- package/dist/index.js +2 -4
- package/dist/lib/agent/direct-tools.d.ts +6 -6
- package/dist/lib/core/base-provider.d.ts +423 -0
- package/dist/lib/core/base-provider.js +365 -0
- package/dist/lib/core/constants.d.ts +1 -1
- package/dist/lib/core/constants.js +1 -1
- package/dist/lib/core/dynamic-models.d.ts +6 -6
- package/dist/lib/core/evaluation.d.ts +19 -80
- package/dist/lib/core/evaluation.js +185 -484
- package/dist/lib/core/factory.d.ts +3 -3
- package/dist/lib/core/factory.js +30 -91
- package/dist/lib/core/service-registry.d.ts +47 -0
- package/dist/lib/core/service-registry.js +112 -0
- package/dist/lib/core/types.d.ts +8 -1
- package/dist/lib/factories/compatibility-factory.js +1 -1
- package/dist/lib/factories/provider-factory.d.ts +72 -0
- package/dist/lib/factories/provider-factory.js +144 -0
- package/dist/lib/factories/provider-registry.d.ts +38 -0
- package/dist/lib/factories/provider-registry.js +107 -0
- package/dist/lib/index.d.ts +4 -3
- package/dist/lib/index.js +2 -4
- package/dist/lib/mcp/config.js +28 -3
- package/dist/lib/mcp/function-calling.js +1 -1
- package/dist/lib/mcp/initialize-tools.d.ts +1 -1
- package/dist/lib/mcp/initialize-tools.js +45 -1
- package/dist/lib/mcp/initialize.js +16 -6
- package/dist/lib/mcp/servers/agent/direct-tools-server.d.ts +8 -0
- package/dist/lib/mcp/servers/agent/direct-tools-server.js +109 -0
- package/dist/lib/mcp/servers/ai-providers/ai-core-server.js +3 -1
- package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
- package/dist/lib/mcp/unified-registry.d.ts +4 -0
- package/dist/lib/mcp/unified-registry.js +42 -9
- package/dist/lib/neurolink.d.ts +156 -117
- package/dist/lib/neurolink.js +619 -404
- package/dist/lib/providers/amazon-bedrock.d.ts +32 -0
- package/dist/lib/providers/amazon-bedrock.js +143 -0
- package/dist/lib/providers/analytics-helper.js +7 -4
- package/dist/lib/providers/anthropic-baseprovider.d.ts +23 -0
- package/dist/lib/providers/anthropic-baseprovider.js +114 -0
- package/dist/lib/providers/anthropic.d.ts +19 -43
- package/dist/lib/providers/anthropic.js +82 -306
- package/dist/lib/providers/azure-openai.d.ts +20 -0
- package/dist/lib/providers/azure-openai.js +89 -0
- package/dist/lib/providers/google-ai-studio.d.ts +23 -0
- package/dist/lib/providers/google-ai-studio.js +107 -0
- package/dist/lib/providers/google-vertex.d.ts +47 -0
- package/dist/lib/providers/google-vertex.js +205 -0
- package/dist/lib/providers/huggingFace.d.ts +32 -25
- package/dist/lib/providers/huggingFace.js +97 -431
- package/dist/lib/providers/index.d.ts +9 -9
- package/dist/lib/providers/index.js +9 -9
- package/dist/lib/providers/mcp-provider.js +4 -0
- package/dist/lib/providers/mistral.d.ts +42 -0
- package/dist/lib/providers/mistral.js +160 -0
- package/dist/lib/providers/ollama.d.ts +52 -36
- package/dist/lib/providers/ollama.js +297 -520
- package/dist/lib/providers/openAI.d.ts +19 -18
- package/dist/lib/providers/openAI.js +76 -275
- package/dist/lib/sdk/tool-extension.d.ts +181 -0
- package/dist/lib/sdk/tool-extension.js +283 -0
- package/dist/lib/sdk/tool-registration.d.ts +95 -0
- package/dist/lib/sdk/tool-registration.js +167 -0
- package/dist/lib/types/generate-types.d.ts +1 -0
- package/dist/lib/types/mcp-types.d.ts +116 -0
- package/dist/lib/types/mcp-types.js +5 -0
- package/dist/lib/types/stream-types.d.ts +30 -18
- package/dist/lib/types/universal-provider-options.d.ts +87 -0
- package/dist/lib/types/universal-provider-options.js +53 -0
- package/dist/mcp/config.js +28 -3
- package/dist/mcp/function-calling.js +1 -1
- package/dist/mcp/initialize-tools.d.ts +1 -1
- package/dist/mcp/initialize-tools.js +45 -1
- package/dist/mcp/initialize.js +16 -6
- package/dist/mcp/servers/agent/direct-tools-server.d.ts +8 -0
- package/dist/mcp/servers/agent/direct-tools-server.js +109 -0
- package/dist/mcp/servers/ai-providers/ai-core-server.js +3 -1
- package/dist/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
- package/dist/mcp/unified-registry.d.ts +4 -0
- package/dist/mcp/unified-registry.js +42 -9
- package/dist/neurolink.d.ts +156 -117
- package/dist/neurolink.js +619 -404
- package/dist/providers/amazon-bedrock.d.ts +32 -0
- package/dist/providers/amazon-bedrock.js +143 -0
- package/dist/providers/analytics-helper.js +7 -4
- package/dist/providers/anthropic-baseprovider.d.ts +23 -0
- package/dist/providers/anthropic-baseprovider.js +114 -0
- package/dist/providers/anthropic.d.ts +19 -43
- package/dist/providers/anthropic.js +81 -305
- package/dist/providers/azure-openai.d.ts +20 -0
- package/dist/providers/azure-openai.js +89 -0
- package/dist/providers/google-ai-studio.d.ts +23 -0
- package/dist/providers/google-ai-studio.js +108 -0
- package/dist/providers/google-vertex.d.ts +47 -0
- package/dist/providers/google-vertex.js +205 -0
- package/dist/providers/huggingFace.d.ts +32 -25
- package/dist/providers/huggingFace.js +96 -430
- package/dist/providers/index.d.ts +9 -9
- package/dist/providers/index.js +9 -9
- package/dist/providers/mcp-provider.js +4 -0
- package/dist/providers/mistral.d.ts +42 -0
- package/dist/providers/mistral.js +160 -0
- package/dist/providers/ollama.d.ts +52 -36
- package/dist/providers/ollama.js +297 -519
- package/dist/providers/openAI.d.ts +19 -18
- package/dist/providers/openAI.js +76 -276
- package/dist/sdk/tool-extension.d.ts +181 -0
- package/dist/sdk/tool-extension.js +283 -0
- package/dist/sdk/tool-registration.d.ts +95 -0
- package/dist/sdk/tool-registration.js +168 -0
- package/dist/types/generate-types.d.ts +1 -0
- package/dist/types/mcp-types.d.ts +116 -0
- package/dist/types/mcp-types.js +5 -0
- package/dist/types/stream-types.d.ts +30 -18
- package/dist/types/universal-provider-options.d.ts +87 -0
- package/dist/types/universal-provider-options.js +53 -0
- package/package.json +15 -10
- package/dist/lib/providers/agent-enhanced-provider.d.ts +0 -93
- package/dist/lib/providers/agent-enhanced-provider.js +0 -605
- package/dist/lib/providers/amazonBedrock.d.ts +0 -28
- package/dist/lib/providers/amazonBedrock.js +0 -364
- package/dist/lib/providers/azureOpenAI.d.ts +0 -42
- package/dist/lib/providers/azureOpenAI.js +0 -347
- package/dist/lib/providers/googleAIStudio.d.ts +0 -42
- package/dist/lib/providers/googleAIStudio.js +0 -364
- package/dist/lib/providers/googleVertexAI.d.ts +0 -34
- package/dist/lib/providers/googleVertexAI.js +0 -547
- package/dist/lib/providers/mistralAI.d.ts +0 -37
- package/dist/lib/providers/mistralAI.js +0 -325
- package/dist/providers/agent-enhanced-provider.d.ts +0 -93
- package/dist/providers/agent-enhanced-provider.js +0 -606
- package/dist/providers/amazonBedrock.d.ts +0 -28
- package/dist/providers/amazonBedrock.js +0 -364
- package/dist/providers/azureOpenAI.d.ts +0 -42
- package/dist/providers/azureOpenAI.js +0 -348
- package/dist/providers/googleAIStudio.d.ts +0 -42
- package/dist/providers/googleAIStudio.js +0 -364
- package/dist/providers/googleVertexAI.d.ts +0 -34
- package/dist/providers/googleVertexAI.js +0 -547
- package/dist/providers/mistralAI.d.ts +0 -37
- package/dist/providers/mistralAI.js +0 -325
|
@@ -0,0 +1,160 @@
|
|
|
1
|
+
import { createMistral } from "@ai-sdk/mistral";
|
|
2
|
+
import { streamText, Output } from "ai";
|
|
3
|
+
import { BaseProvider } from "../core/base-provider.js";
|
|
4
|
+
import { logger } from "../utils/logger.js";
|
|
5
|
+
import { createTimeoutController, TimeoutError, getDefaultTimeout, } from "../utils/timeout.js";
|
|
6
|
+
import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
|
|
7
|
+
// Configuration helpers
|
|
8
|
+
const getMistralApiKey = () => {
|
|
9
|
+
const apiKey = process.env.MISTRAL_API_KEY;
|
|
10
|
+
if (!apiKey) {
|
|
11
|
+
throw new Error(`❌ Mistral AI Provider Configuration Error\n\nMissing required environment variable: MISTRAL_API_KEY\n\n🔧 Step 1: Get Mistral AI API Key\n1. Visit: https://console.mistral.ai/\n2. Sign in or create an account\n3. Go to API Keys section\n4. Create a new API key\n\n🔧 Step 2: Set Environment Variable\nAdd to your .env file:\nMISTRAL_API_KEY=your_api_key_here\n\n🔧 Step 3: Restart Application\nRestart your application to load the new environment variables.`);
|
|
12
|
+
}
|
|
13
|
+
return apiKey;
|
|
14
|
+
};
|
|
15
|
+
const getDefaultMistralModel = () => {
|
|
16
|
+
return process.env.MISTRAL_MODEL || "mistral-small";
|
|
17
|
+
};
|
|
18
|
+
const hasMistralCredentials = () => {
|
|
19
|
+
return !!process.env.MISTRAL_API_KEY;
|
|
20
|
+
};
|
|
21
|
+
/**
|
|
22
|
+
* Mistral AI Provider v2 - BaseProvider Implementation
|
|
23
|
+
*
|
|
24
|
+
* PHASE 3.6: Simple BaseProvider wrap around existing @ai-sdk/mistral implementation
|
|
25
|
+
*
|
|
26
|
+
* Features:
|
|
27
|
+
* - Extends BaseProvider for shared functionality
|
|
28
|
+
* - Uses pre-configured Mistral instance for efficiency
|
|
29
|
+
* - Enhanced error handling with setup guidance
|
|
30
|
+
* - Supports all Mistral models (mistral-small, mistral-medium, mistral-large)
|
|
31
|
+
*/
|
|
32
|
+
export class MistralProvider extends BaseProvider {
|
|
33
|
+
mistral;
|
|
34
|
+
model;
|
|
35
|
+
constructor(modelName, sdk) {
|
|
36
|
+
super(modelName, "mistral", sdk);
|
|
37
|
+
// Validate Mistral API credentials
|
|
38
|
+
if (!hasMistralCredentials()) {
|
|
39
|
+
throw new Error(`❌ Mistral AI Provider Configuration Error\n\nMissing Mistral AI API key.\n\n🔧 Required Environment Variable:\nMISTRAL_API_KEY=your_api_key_here\n\n🔧 Get API Key:\n1. Visit: https://console.mistral.ai/\n2. Sign in or create account\n3. Generate API key\n4. Add to .env file\n\n🔧 Restart Application\nRestart your application to load the new environment variables.`);
|
|
40
|
+
}
|
|
41
|
+
// Initialize Mistral provider
|
|
42
|
+
this.mistral = createMistral({
|
|
43
|
+
apiKey: getMistralApiKey(),
|
|
44
|
+
});
|
|
45
|
+
// Pre-initialize model for efficiency
|
|
46
|
+
this.model = this.mistral(this.modelName || getDefaultMistralModel());
|
|
47
|
+
logger.debug("Mistral AI BaseProvider v2 initialized", {
|
|
48
|
+
modelName: this.modelName,
|
|
49
|
+
provider: this.providerName,
|
|
50
|
+
});
|
|
51
|
+
}
|
|
52
|
+
getProviderName() {
|
|
53
|
+
return "mistral";
|
|
54
|
+
}
|
|
55
|
+
getDefaultModel() {
|
|
56
|
+
return getDefaultMistralModel();
|
|
57
|
+
}
|
|
58
|
+
/**
|
|
59
|
+
* Returns the Vercel AI SDK model instance for Mistral
|
|
60
|
+
*/
|
|
61
|
+
getAISDKModel() {
|
|
62
|
+
return this.model;
|
|
63
|
+
}
|
|
64
|
+
// executeGenerate removed - BaseProvider handles all generation with tools
|
|
65
|
+
async executeStream(options, analysisSchema) {
|
|
66
|
+
try {
|
|
67
|
+
this.validateStreamOptions(options);
|
|
68
|
+
const result = await streamText({
|
|
69
|
+
model: this.model,
|
|
70
|
+
prompt: options.input.text,
|
|
71
|
+
system: options.systemPrompt,
|
|
72
|
+
maxTokens: options.maxTokens || DEFAULT_MAX_TOKENS,
|
|
73
|
+
temperature: options.temperature,
|
|
74
|
+
});
|
|
75
|
+
return {
|
|
76
|
+
stream: (async function* () {
|
|
77
|
+
for await (const chunk of result.textStream) {
|
|
78
|
+
yield { content: chunk };
|
|
79
|
+
}
|
|
80
|
+
})(),
|
|
81
|
+
provider: this.providerName,
|
|
82
|
+
model: this.modelName,
|
|
83
|
+
};
|
|
84
|
+
}
|
|
85
|
+
catch (error) {
|
|
86
|
+
throw this.handleProviderError(error);
|
|
87
|
+
}
|
|
88
|
+
}
|
|
89
|
+
handleProviderError(error) {
|
|
90
|
+
if (error.name === "TimeoutError") {
|
|
91
|
+
return new TimeoutError(`Mistral AI request timed out. Consider increasing timeout or using a lighter model.`, this.defaultTimeout);
|
|
92
|
+
}
|
|
93
|
+
if (error.message?.includes("401") ||
|
|
94
|
+
error.message?.includes("Unauthorized")) {
|
|
95
|
+
return new Error(`❌ Mistral AI Authentication Error\n\nYour API key is invalid or expired.\n\n🔧 Steps to Fix:\n1. Check your MISTRAL_API_KEY in .env file\n2. Verify the API key is correct and active\n3. Generate a new API key if needed at https://console.mistral.ai/\n4. Restart your application after updating`);
|
|
96
|
+
}
|
|
97
|
+
if (error.message?.includes("403") ||
|
|
98
|
+
error.message?.includes("Forbidden")) {
|
|
99
|
+
return new Error(`❌ Mistral AI Access Denied\n\nYour account doesn't have permission to access this model.\n\n🔧 Possible Solutions:\n1. Check if your account has access to the model: ${this.modelName}\n2. Try a different model (e.g., 'mistral-small')\n3. Verify your subscription status\n4. Contact Mistral AI support if needed`);
|
|
100
|
+
}
|
|
101
|
+
if (error.message?.includes("429") ||
|
|
102
|
+
error.message?.includes("rate limit")) {
|
|
103
|
+
return new Error(`❌ Mistral AI Rate Limit Exceeded\n\n${error.message}\n\n🔧 Solutions:\n1. Wait a moment before retrying\n2. Reduce request frequency\n3. Check your usage quotas\n4. Consider upgrading your plan`);
|
|
104
|
+
}
|
|
105
|
+
if (error.message?.includes("400") ||
|
|
106
|
+
error.message?.includes("Bad Request")) {
|
|
107
|
+
return new Error(`❌ Mistral AI Invalid Request\n\n${error.message}\n\n🔧 Check:\n1. Input text is properly formatted\n2. Model name is correct: ${this.modelName}\n3. Parameters are within limits\n4. Request format matches API requirements`);
|
|
108
|
+
}
|
|
109
|
+
if (error.message?.includes("404") ||
|
|
110
|
+
error.message?.includes("Not Found")) {
|
|
111
|
+
return new Error(`❌ Mistral AI Model Not Found\n\nModel '${this.modelName}' is not available.\n\n🔧 Available Models:\n- mistral-small (fastest, cost-effective)\n- mistral-medium (balanced performance)\n- mistral-large (highest quality)\n\n🔧 Fix: Update MISTRAL_MODEL environment variable`);
|
|
112
|
+
}
|
|
113
|
+
return new Error(`❌ Mistral AI Provider Error\n\n${error.message || "Unknown error occurred"}\n\n🔧 Troubleshooting:\n1. Check API key and network connectivity\n2. Verify model availability\n3. Review request parameters\n4. Check Mistral AI status page`);
|
|
114
|
+
}
|
|
115
|
+
validateStreamOptions(options) {
|
|
116
|
+
if (!options.input?.text?.trim()) {
|
|
117
|
+
throw new Error("Prompt is required for streaming");
|
|
118
|
+
}
|
|
119
|
+
if (options.maxTokens &&
|
|
120
|
+
(options.maxTokens < 1 || options.maxTokens > 32768)) {
|
|
121
|
+
throw new Error("maxTokens must be between 1 and 32768 for Mistral AI");
|
|
122
|
+
}
|
|
123
|
+
if (options.temperature &&
|
|
124
|
+
(options.temperature < 0 || options.temperature > 1)) {
|
|
125
|
+
throw new Error("temperature must be between 0 and 1");
|
|
126
|
+
}
|
|
127
|
+
}
|
|
128
|
+
/**
|
|
129
|
+
* Check available Mistral models
|
|
130
|
+
* @returns Array of available model names
|
|
131
|
+
*/
|
|
132
|
+
getAvailableModels() {
|
|
133
|
+
return [
|
|
134
|
+
"mistral-small",
|
|
135
|
+
"mistral-medium",
|
|
136
|
+
"mistral-large",
|
|
137
|
+
"mistral-7b-instruct",
|
|
138
|
+
"mistral-8x7b-instruct",
|
|
139
|
+
"mistral-8x22b-instruct",
|
|
140
|
+
];
|
|
141
|
+
}
|
|
142
|
+
/**
|
|
143
|
+
* Get recommended model based on use case
|
|
144
|
+
* @param useCase - The intended use case
|
|
145
|
+
* @returns Recommended model name
|
|
146
|
+
*/
|
|
147
|
+
getRecommendedModel(useCase) {
|
|
148
|
+
switch (useCase) {
|
|
149
|
+
case "speed":
|
|
150
|
+
return "mistral-small";
|
|
151
|
+
case "balanced":
|
|
152
|
+
return "mistral-medium";
|
|
153
|
+
case "quality":
|
|
154
|
+
return "mistral-large";
|
|
155
|
+
default:
|
|
156
|
+
return "mistral-small";
|
|
157
|
+
}
|
|
158
|
+
}
|
|
159
|
+
}
|
|
160
|
+
export default MistralProvider;
|
|
@@ -1,54 +1,70 @@
|
|
|
1
|
+
import type { AIProviderName } from "../core/types.js";
|
|
2
|
+
import type { LanguageModelV1 } from "ai";
|
|
3
|
+
import type { StreamOptions, StreamResult } from "../types/stream-types.js";
|
|
4
|
+
import type { ZodType, ZodTypeDef } from "zod";
|
|
5
|
+
import type { Schema } from "ai";
|
|
6
|
+
import { BaseProvider } from "../core/base-provider.js";
|
|
1
7
|
/**
|
|
2
|
-
* Ollama Provider
|
|
8
|
+
* Ollama Provider v2 - BaseProvider Implementation
|
|
3
9
|
*
|
|
4
|
-
*
|
|
5
|
-
* Provides offline AI capabilities with local model hosting.
|
|
10
|
+
* PHASE 3.7: BaseProvider wrap around existing custom Ollama implementation
|
|
6
11
|
*
|
|
7
12
|
* Features:
|
|
8
|
-
* -
|
|
9
|
-
* -
|
|
10
|
-
* -
|
|
11
|
-
* -
|
|
13
|
+
* - Extends BaseProvider for shared functionality
|
|
14
|
+
* - Preserves custom OllamaLanguageModel implementation
|
|
15
|
+
* - Local model management and health checking
|
|
16
|
+
* - Enhanced error handling with Ollama-specific guidance
|
|
12
17
|
*/
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
import type { StreamOptions, StreamResult } from "../types/stream-types.js";
|
|
16
|
-
import type { ZodType, ZodTypeDef } from "zod";
|
|
17
|
-
import type { Schema } from "ai";
|
|
18
|
-
export declare class Ollama implements AIProvider {
|
|
18
|
+
export declare class OllamaProvider extends BaseProvider {
|
|
19
|
+
private ollamaModel;
|
|
19
20
|
private baseUrl;
|
|
20
|
-
private
|
|
21
|
-
private defaultTimeout;
|
|
21
|
+
private timeout;
|
|
22
22
|
constructor(modelName?: string);
|
|
23
|
+
protected getProviderName(): AIProviderName;
|
|
24
|
+
protected getDefaultModel(): string;
|
|
23
25
|
/**
|
|
24
|
-
*
|
|
25
|
-
* @private
|
|
26
|
-
*/
|
|
27
|
-
private getModel;
|
|
28
|
-
/**
|
|
29
|
-
* Health check - verify Ollama service is running and accessible
|
|
26
|
+
* Returns the Vercel AI SDK model instance for Ollama
|
|
30
27
|
*/
|
|
31
|
-
|
|
28
|
+
protected getAISDKModel(): LanguageModelV1;
|
|
32
29
|
/**
|
|
33
|
-
*
|
|
30
|
+
* Ollama tool/function calling support is currently disabled due to integration issues.
|
|
31
|
+
*
|
|
32
|
+
* **Current Issues:**
|
|
33
|
+
* 1. The OllamaLanguageModel from @ai-sdk/provider-utils doesn't properly integrate
|
|
34
|
+
* with BaseProvider's tool calling mechanism
|
|
35
|
+
* 2. Ollama models require specific prompt formatting for function calls that differs
|
|
36
|
+
* from the standardized AI SDK format
|
|
37
|
+
* 3. Tool response parsing and execution flow needs custom implementation
|
|
38
|
+
*
|
|
39
|
+
* **What's needed to enable tool support:**
|
|
40
|
+
* - Create a custom OllamaLanguageModel wrapper that handles tool schema formatting
|
|
41
|
+
* - Implement Ollama-specific tool calling prompt templates
|
|
42
|
+
* - Add proper response parsing for Ollama's function call format
|
|
43
|
+
* - Test with models that support function calling (llama3.1, mistral, etc.)
|
|
44
|
+
*
|
|
45
|
+
* **Tracking:**
|
|
46
|
+
* - See BaseProvider tool integration patterns in other providers
|
|
47
|
+
* - Monitor Ollama function calling documentation: https://ollama.com/blog/tool-support
|
|
48
|
+
* - Track AI SDK updates for better Ollama integration
|
|
49
|
+
*
|
|
50
|
+
* @returns false to disable tools by default
|
|
34
51
|
*/
|
|
35
|
-
|
|
52
|
+
supportsTools(): boolean;
|
|
53
|
+
protected executeStream(options: StreamOptions, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamResult>;
|
|
54
|
+
private createOllamaStream;
|
|
55
|
+
protected handleProviderError(error: any): Error;
|
|
56
|
+
private validateStreamOptions;
|
|
36
57
|
/**
|
|
37
|
-
* Check if
|
|
58
|
+
* Check if Ollama service is healthy and accessible
|
|
38
59
|
*/
|
|
39
|
-
|
|
60
|
+
private checkOllamaHealth;
|
|
40
61
|
/**
|
|
41
|
-
*
|
|
62
|
+
* Get available models from Ollama
|
|
42
63
|
*/
|
|
43
|
-
|
|
64
|
+
getAvailableModels(): Promise<string[]>;
|
|
44
65
|
/**
|
|
45
|
-
*
|
|
46
|
-
* Future-ready for multi-modal capabilities with current text focus
|
|
47
|
-
*/
|
|
48
|
-
stream(optionsOrPrompt: StreamOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamResult>;
|
|
49
|
-
/**
|
|
50
|
-
* Generate text using Ollama local models
|
|
66
|
+
* Check if a specific model is available
|
|
51
67
|
*/
|
|
52
|
-
|
|
53
|
-
gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: any): Promise<EnhancedGenerateResult | null>;
|
|
68
|
+
isModelAvailable(modelName: string): Promise<boolean>;
|
|
54
69
|
}
|
|
70
|
+
export default OllamaProvider;
|