@juspay/neurolink 5.1.0 → 5.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (190) hide show
  1. package/CHANGELOG.md +21 -9
  2. package/README.md +123 -126
  3. package/dist/agent/direct-tools.d.ts +6 -6
  4. package/dist/cli/commands/config.d.ts +3 -3
  5. package/dist/cli/commands/mcp.js +8 -7
  6. package/dist/cli/factories/command-factory.d.ts +4 -0
  7. package/dist/cli/factories/command-factory.js +63 -8
  8. package/dist/cli/index.js +87 -140
  9. package/dist/core/base-provider.d.ts +423 -0
  10. package/dist/core/base-provider.js +376 -0
  11. package/dist/core/constants.d.ts +2 -1
  12. package/dist/core/constants.js +2 -1
  13. package/dist/core/dynamic-models.d.ts +6 -6
  14. package/dist/core/evaluation.d.ts +19 -80
  15. package/dist/core/evaluation.js +185 -484
  16. package/dist/core/factory.d.ts +3 -3
  17. package/dist/core/factory.js +31 -91
  18. package/dist/core/service-registry.d.ts +47 -0
  19. package/dist/core/service-registry.js +112 -0
  20. package/dist/core/types.d.ts +8 -1
  21. package/dist/factories/compatibility-factory.js +1 -1
  22. package/dist/factories/provider-factory.d.ts +72 -0
  23. package/dist/factories/provider-factory.js +144 -0
  24. package/dist/factories/provider-registry.d.ts +38 -0
  25. package/dist/factories/provider-registry.js +107 -0
  26. package/dist/index.d.ts +4 -3
  27. package/dist/index.js +2 -4
  28. package/dist/lib/agent/direct-tools.d.ts +6 -6
  29. package/dist/lib/core/base-provider.d.ts +423 -0
  30. package/dist/lib/core/base-provider.js +376 -0
  31. package/dist/lib/core/constants.d.ts +2 -1
  32. package/dist/lib/core/constants.js +2 -1
  33. package/dist/lib/core/dynamic-models.d.ts +6 -6
  34. package/dist/lib/core/evaluation.d.ts +19 -80
  35. package/dist/lib/core/evaluation.js +185 -484
  36. package/dist/lib/core/factory.d.ts +3 -3
  37. package/dist/lib/core/factory.js +30 -91
  38. package/dist/lib/core/service-registry.d.ts +47 -0
  39. package/dist/lib/core/service-registry.js +112 -0
  40. package/dist/lib/core/types.d.ts +8 -1
  41. package/dist/lib/factories/compatibility-factory.js +1 -1
  42. package/dist/lib/factories/provider-factory.d.ts +72 -0
  43. package/dist/lib/factories/provider-factory.js +144 -0
  44. package/dist/lib/factories/provider-registry.d.ts +38 -0
  45. package/dist/lib/factories/provider-registry.js +107 -0
  46. package/dist/lib/index.d.ts +4 -3
  47. package/dist/lib/index.js +2 -4
  48. package/dist/lib/mcp/client.d.ts +1 -0
  49. package/dist/lib/mcp/client.js +1 -0
  50. package/dist/lib/mcp/config.js +28 -3
  51. package/dist/lib/mcp/context-manager.d.ts +1 -0
  52. package/dist/lib/mcp/context-manager.js +8 -4
  53. package/dist/lib/mcp/function-calling.d.ts +13 -0
  54. package/dist/lib/mcp/function-calling.js +134 -35
  55. package/dist/lib/mcp/initialize-tools.d.ts +1 -1
  56. package/dist/lib/mcp/initialize-tools.js +45 -1
  57. package/dist/lib/mcp/initialize.js +16 -6
  58. package/dist/lib/mcp/neurolink-mcp-client.d.ts +1 -0
  59. package/dist/lib/mcp/neurolink-mcp-client.js +21 -5
  60. package/dist/lib/mcp/servers/agent/direct-tools-server.d.ts +8 -0
  61. package/dist/lib/mcp/servers/agent/direct-tools-server.js +109 -0
  62. package/dist/lib/mcp/servers/ai-providers/ai-core-server.js +3 -1
  63. package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
  64. package/dist/lib/mcp/unified-registry.d.ts +4 -0
  65. package/dist/lib/mcp/unified-registry.js +42 -9
  66. package/dist/lib/neurolink.d.ts +156 -117
  67. package/dist/lib/neurolink.js +619 -404
  68. package/dist/lib/providers/amazon-bedrock.d.ts +32 -0
  69. package/dist/lib/providers/amazon-bedrock.js +143 -0
  70. package/dist/lib/providers/analytics-helper.js +7 -4
  71. package/dist/lib/providers/anthropic-baseprovider.d.ts +23 -0
  72. package/dist/lib/providers/anthropic-baseprovider.js +114 -0
  73. package/dist/lib/providers/anthropic.d.ts +19 -43
  74. package/dist/lib/providers/anthropic.js +82 -306
  75. package/dist/lib/providers/azure-openai.d.ts +20 -0
  76. package/dist/lib/providers/azure-openai.js +89 -0
  77. package/dist/lib/providers/function-calling-provider.d.ts +64 -2
  78. package/dist/lib/providers/function-calling-provider.js +208 -9
  79. package/dist/lib/providers/google-ai-studio.d.ts +23 -0
  80. package/dist/lib/providers/google-ai-studio.js +107 -0
  81. package/dist/lib/providers/google-vertex.d.ts +47 -0
  82. package/dist/lib/providers/google-vertex.js +205 -0
  83. package/dist/lib/providers/huggingFace.d.ts +32 -25
  84. package/dist/lib/providers/huggingFace.js +97 -431
  85. package/dist/lib/providers/index.d.ts +9 -9
  86. package/dist/lib/providers/index.js +9 -9
  87. package/dist/lib/providers/mcp-provider.js +24 -5
  88. package/dist/lib/providers/mistral.d.ts +42 -0
  89. package/dist/lib/providers/mistral.js +160 -0
  90. package/dist/lib/providers/ollama.d.ts +52 -36
  91. package/dist/lib/providers/ollama.js +297 -520
  92. package/dist/lib/providers/openAI.d.ts +19 -18
  93. package/dist/lib/providers/openAI.js +76 -275
  94. package/dist/lib/sdk/tool-extension.d.ts +181 -0
  95. package/dist/lib/sdk/tool-extension.js +283 -0
  96. package/dist/lib/sdk/tool-registration.d.ts +95 -0
  97. package/dist/lib/sdk/tool-registration.js +167 -0
  98. package/dist/lib/services/streaming/streaming-manager.js +11 -10
  99. package/dist/lib/services/websocket/websocket-server.js +12 -11
  100. package/dist/lib/telemetry/telemetry-service.js +8 -7
  101. package/dist/lib/types/generate-types.d.ts +1 -0
  102. package/dist/lib/types/mcp-types.d.ts +116 -0
  103. package/dist/lib/types/mcp-types.js +5 -0
  104. package/dist/lib/types/stream-types.d.ts +30 -18
  105. package/dist/lib/types/universal-provider-options.d.ts +87 -0
  106. package/dist/lib/types/universal-provider-options.js +53 -0
  107. package/dist/mcp/client.d.ts +1 -0
  108. package/dist/mcp/client.js +1 -0
  109. package/dist/mcp/config.js +28 -3
  110. package/dist/mcp/context-manager.d.ts +1 -0
  111. package/dist/mcp/context-manager.js +8 -4
  112. package/dist/mcp/function-calling.d.ts +13 -0
  113. package/dist/mcp/function-calling.js +134 -35
  114. package/dist/mcp/initialize-tools.d.ts +1 -1
  115. package/dist/mcp/initialize-tools.js +45 -1
  116. package/dist/mcp/initialize.js +16 -6
  117. package/dist/mcp/neurolink-mcp-client.d.ts +1 -0
  118. package/dist/mcp/neurolink-mcp-client.js +21 -5
  119. package/dist/mcp/servers/agent/direct-tools-server.d.ts +8 -0
  120. package/dist/mcp/servers/agent/direct-tools-server.js +109 -0
  121. package/dist/mcp/servers/ai-providers/ai-core-server.js +3 -1
  122. package/dist/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
  123. package/dist/mcp/unified-registry.d.ts +4 -0
  124. package/dist/mcp/unified-registry.js +42 -9
  125. package/dist/neurolink.d.ts +156 -117
  126. package/dist/neurolink.js +619 -404
  127. package/dist/providers/amazon-bedrock.d.ts +32 -0
  128. package/dist/providers/amazon-bedrock.js +143 -0
  129. package/dist/providers/analytics-helper.js +7 -4
  130. package/dist/providers/anthropic-baseprovider.d.ts +23 -0
  131. package/dist/providers/anthropic-baseprovider.js +114 -0
  132. package/dist/providers/anthropic.d.ts +19 -43
  133. package/dist/providers/anthropic.js +81 -305
  134. package/dist/providers/azure-openai.d.ts +20 -0
  135. package/dist/providers/azure-openai.js +89 -0
  136. package/dist/providers/function-calling-provider.d.ts +64 -2
  137. package/dist/providers/function-calling-provider.js +208 -9
  138. package/dist/providers/google-ai-studio.d.ts +23 -0
  139. package/dist/providers/google-ai-studio.js +108 -0
  140. package/dist/providers/google-vertex.d.ts +47 -0
  141. package/dist/providers/google-vertex.js +205 -0
  142. package/dist/providers/huggingFace.d.ts +32 -25
  143. package/dist/providers/huggingFace.js +96 -430
  144. package/dist/providers/index.d.ts +9 -9
  145. package/dist/providers/index.js +9 -9
  146. package/dist/providers/mcp-provider.js +24 -5
  147. package/dist/providers/mistral.d.ts +42 -0
  148. package/dist/providers/mistral.js +160 -0
  149. package/dist/providers/ollama.d.ts +52 -36
  150. package/dist/providers/ollama.js +297 -519
  151. package/dist/providers/openAI.d.ts +19 -18
  152. package/dist/providers/openAI.js +76 -276
  153. package/dist/sdk/tool-extension.d.ts +181 -0
  154. package/dist/sdk/tool-extension.js +283 -0
  155. package/dist/sdk/tool-registration.d.ts +95 -0
  156. package/dist/sdk/tool-registration.js +168 -0
  157. package/dist/services/streaming/streaming-manager.js +11 -10
  158. package/dist/services/websocket/websocket-server.js +12 -11
  159. package/dist/telemetry/telemetry-service.js +8 -7
  160. package/dist/types/generate-types.d.ts +1 -0
  161. package/dist/types/mcp-types.d.ts +116 -0
  162. package/dist/types/mcp-types.js +5 -0
  163. package/dist/types/stream-types.d.ts +30 -18
  164. package/dist/types/universal-provider-options.d.ts +87 -0
  165. package/dist/types/universal-provider-options.js +53 -0
  166. package/package.json +12 -5
  167. package/dist/lib/providers/agent-enhanced-provider.d.ts +0 -93
  168. package/dist/lib/providers/agent-enhanced-provider.js +0 -605
  169. package/dist/lib/providers/amazonBedrock.d.ts +0 -28
  170. package/dist/lib/providers/amazonBedrock.js +0 -364
  171. package/dist/lib/providers/azureOpenAI.d.ts +0 -42
  172. package/dist/lib/providers/azureOpenAI.js +0 -347
  173. package/dist/lib/providers/googleAIStudio.d.ts +0 -42
  174. package/dist/lib/providers/googleAIStudio.js +0 -364
  175. package/dist/lib/providers/googleVertexAI.d.ts +0 -34
  176. package/dist/lib/providers/googleVertexAI.js +0 -547
  177. package/dist/lib/providers/mistralAI.d.ts +0 -37
  178. package/dist/lib/providers/mistralAI.js +0 -325
  179. package/dist/providers/agent-enhanced-provider.d.ts +0 -93
  180. package/dist/providers/agent-enhanced-provider.js +0 -606
  181. package/dist/providers/amazonBedrock.d.ts +0 -28
  182. package/dist/providers/amazonBedrock.js +0 -364
  183. package/dist/providers/azureOpenAI.d.ts +0 -42
  184. package/dist/providers/azureOpenAI.js +0 -348
  185. package/dist/providers/googleAIStudio.d.ts +0 -42
  186. package/dist/providers/googleAIStudio.js +0 -364
  187. package/dist/providers/googleVertexAI.d.ts +0 -34
  188. package/dist/providers/googleVertexAI.js +0 -547
  189. package/dist/providers/mistralAI.d.ts +0 -37
  190. package/dist/providers/mistralAI.js +0 -325
@@ -1,23 +1,29 @@
1
- /**
2
- * Ollama Provider for NeuroLink
3
- *
4
- * Local AI model deployment and management using Ollama.
5
- * Provides offline AI capabilities with local model hosting.
6
- *
7
- * Features:
8
- * - Local model deployment (privacy-first)
9
- * - Model management (download, list, remove)
10
- * - Health checking and service validation
11
- * - Streaming and non-streaming text generation
12
- */
13
- import { streamText, generateText, Output } from "ai";
1
+ import { BaseProvider } from "../core/base-provider.js";
14
2
  import { logger } from "../utils/logger.js";
15
- import { getDefaultTimeout, TimeoutError } from "../utils/timeout.js";
3
+ import { TimeoutError } from "../utils/timeout.js";
16
4
  import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
17
- import { evaluateResponse } from "../core/evaluation.js";
18
- // Default system context
19
- const DEFAULT_SYSTEM_CONTEXT = {
20
- systemPrompt: "You are a helpful AI assistant.",
5
+ // Model version constants (configurable via environment)
6
+ const DEFAULT_OLLAMA_MODEL = "llama3.1:8b";
7
+ const FALLBACK_OLLAMA_MODEL = "llama3.2:latest"; // Used when primary model fails
8
+ // Configuration helpers
9
+ const getOllamaBaseUrl = () => {
10
+ return process.env.OLLAMA_BASE_URL || "http://localhost:11434";
11
+ };
12
+ // Create AbortController with timeout for better compatibility
13
+ const createAbortSignalWithTimeout = (timeoutMs) => {
14
+ const controller = new AbortController();
15
+ const timeoutId = setTimeout(() => controller.abort(), timeoutMs);
16
+ // Clear timeout if signal is aborted through other means
17
+ controller.signal.addEventListener("abort", () => {
18
+ clearTimeout(timeoutId);
19
+ });
20
+ return controller.signal;
21
+ };
22
+ const getDefaultOllamaModel = () => {
23
+ return process.env.OLLAMA_MODEL || DEFAULT_OLLAMA_MODEL;
24
+ };
25
+ const getOllamaTimeout = () => {
26
+ return parseInt(process.env.OLLAMA_TIMEOUT || "60000", 10);
21
27
  };
22
28
  // Custom LanguageModelV1 implementation for Ollama
23
29
  class OllamaLanguageModel {
@@ -35,7 +41,7 @@ class OllamaLanguageModel {
35
41
  this.timeout = timeout;
36
42
  }
37
43
  estimateTokens(text) {
38
- return Math.ceil(text.length / 4); // Rough estimation: 4 characters per token
44
+ return Math.ceil(text.length / 4);
39
45
  }
40
46
  convertMessagesToPrompt(messages) {
41
47
  return messages
@@ -43,569 +49,340 @@ class OllamaLanguageModel {
43
49
  if (typeof msg.content === "string") {
44
50
  return `${msg.role}: ${msg.content}`;
45
51
  }
46
- else if (Array.isArray(msg.content)) {
47
- // Handle multi-part content (text, images, etc.)
48
- return `${msg.role}: ${msg.content
49
- .filter((part) => part.type === "text")
50
- .map((part) => part.text)
51
- .join(" ")}`;
52
- }
53
- return "";
52
+ return `${msg.role}: ${JSON.stringify(msg.content)}`;
54
53
  })
55
54
  .join("\n");
56
55
  }
57
- async checkHealth() {
58
- try {
59
- const controller = new AbortController();
60
- const timeoutId = setTimeout(() => controller.abort(), 5000);
61
- const response = await fetch(`${this.baseUrl}/api/tags`, {
62
- method: "GET",
63
- signal: controller.signal,
64
- headers: { "Content-Type": "application/json" },
65
- });
66
- clearTimeout(timeoutId);
67
- return response.ok;
68
- }
69
- catch {
70
- return false;
71
- }
72
- }
73
- async ensureModelAvailable() {
74
- try {
75
- const response = await fetch(`${this.baseUrl}/api/tags`);
76
- if (!response.ok) {
77
- throw new Error("Cannot access Ollama");
78
- }
79
- const data = (await response.json());
80
- const models = data.models?.map((m) => m.name) || [];
81
- if (!models.includes(this.modelId)) {
82
- // Try to pull the model
83
- const pullResponse = await fetch(`${this.baseUrl}/api/pull`, {
84
- method: "POST",
85
- headers: { "Content-Type": "application/json" },
86
- body: JSON.stringify({ name: this.modelId }),
87
- });
88
- if (!pullResponse.ok) {
89
- throw new Error(`Model '${this.modelId}' not available and cannot be pulled`);
90
- }
91
- }
92
- }
93
- catch (error) {
94
- throw new Error(`Failed to ensure model availability: ${error instanceof Error ? error.message : String(error)}`);
95
- }
96
- }
97
56
  async doGenerate(options) {
98
- // Health check and model availability
99
- const isHealthy = await this.checkHealth();
100
- if (!isHealthy) {
101
- throw new Error("Ollama service is not running or accessible. Please ensure Ollama is installed and running.");
57
+ const messages = options.messages || [];
58
+ const prompt = this.convertMessagesToPrompt(messages);
59
+ const response = await fetch(`${this.baseUrl}/api/generate`, {
60
+ method: "POST",
61
+ headers: { "Content-Type": "application/json" },
62
+ body: JSON.stringify({
63
+ model: this.modelId,
64
+ prompt,
65
+ stream: false,
66
+ system: messages.find((m) => m.role === "system")?.content,
67
+ options: {
68
+ temperature: options.temperature,
69
+ num_predict: options.maxTokens,
70
+ },
71
+ }),
72
+ signal: createAbortSignalWithTimeout(this.timeout),
73
+ });
74
+ if (!response.ok) {
75
+ throw new Error(`Ollama API error: ${response.status} ${response.statusText}`);
102
76
  }
103
- await this.ensureModelAvailable();
104
- const prompt = this.convertMessagesToPrompt(options.prompt);
105
- const requestPayload = {
106
- model: this.modelId,
107
- prompt,
108
- stream: false,
109
- options: {
110
- temperature: options.temperature || 0.7,
111
- num_predict: options.maxTokens ?? DEFAULT_MAX_TOKENS,
77
+ const data = await response.json();
78
+ return {
79
+ text: data.response,
80
+ usage: {
81
+ promptTokens: this.estimateTokens(prompt),
82
+ completionTokens: this.estimateTokens(data.response),
112
83
  },
113
84
  };
114
- const controller = new AbortController();
115
- const timeoutId = setTimeout(() => controller.abort(), this.timeout);
116
- try {
117
- const response = await fetch(`${this.baseUrl}/api/generate`, {
118
- method: "POST",
119
- headers: { "Content-Type": "application/json" },
120
- body: JSON.stringify(requestPayload),
121
- signal: controller.signal,
122
- });
123
- clearTimeout(timeoutId);
124
- if (!response.ok) {
125
- if (response.status === 404) {
126
- const errorData = await response.json();
127
- if (errorData.error && errorData.error.includes("not found")) {
128
- throw new Error(`Model '${this.modelId}' not found. Please run 'ollama pull ${this.modelId}'`);
129
- }
130
- }
131
- throw new Error(`Ollama API error: ${response.status} ${response.statusText}`);
132
- }
133
- const data = (await response.json());
134
- if (!data.response) {
135
- throw new Error("No response received from Ollama");
136
- }
137
- const promptTokens = this.estimateTokens(prompt);
138
- const completionTokens = this.estimateTokens(data.response);
139
- return {
140
- text: data.response,
141
- usage: {
142
- promptTokens,
143
- completionTokens,
144
- totalTokens: promptTokens + completionTokens,
145
- },
146
- finishReason: "stop",
147
- logprobs: undefined,
148
- rawCall: { rawPrompt: prompt, rawSettings: options },
149
- rawResponse: { headers: {} },
150
- };
151
- }
152
- catch (error) {
153
- clearTimeout(timeoutId);
154
- const errorMessage = error instanceof Error ? error.message : String(error);
155
- if (errorMessage.includes("AbortError") ||
156
- errorMessage.includes("timeout")) {
157
- throw new Error(`Ollama request timeout (${this.timeout}ms). The model may be large or the system is under load.`);
158
- }
159
- if (errorMessage.includes("ECONNREFUSED") ||
160
- errorMessage.includes("fetch failed")) {
161
- throw new Error("Cannot connect to Ollama service. Please ensure Ollama is installed and running on " +
162
- this.baseUrl);
163
- }
164
- throw error;
165
- }
166
85
  }
167
86
  async doStream(options) {
168
- // Health check and model availability
169
- const isHealthy = await this.checkHealth();
170
- if (!isHealthy) {
171
- throw new Error("Ollama service is not running or accessible. Please ensure Ollama is installed and running.");
87
+ const messages = options.messages || [];
88
+ const prompt = this.convertMessagesToPrompt(messages);
89
+ const response = await fetch(`${this.baseUrl}/api/generate`, {
90
+ method: "POST",
91
+ headers: { "Content-Type": "application/json" },
92
+ body: JSON.stringify({
93
+ model: this.modelId,
94
+ prompt,
95
+ stream: true,
96
+ system: messages.find((m) => m.role === "system")?.content,
97
+ options: {
98
+ temperature: options.temperature,
99
+ num_predict: options.maxTokens,
100
+ },
101
+ }),
102
+ signal: createAbortSignalWithTimeout(this.timeout),
103
+ });
104
+ if (!response.ok) {
105
+ throw new Error(`Ollama API error: ${response.status} ${response.statusText}`);
172
106
  }
173
- await this.ensureModelAvailable();
174
- const prompt = this.convertMessagesToPrompt(options.prompt);
175
- const requestPayload = {
176
- model: this.modelId,
177
- prompt,
178
- stream: true,
179
- options: {
180
- temperature: options.temperature || 0.7,
181
- num_predict: options.maxTokens ?? DEFAULT_MAX_TOKENS,
182
- },
107
+ return {
108
+ stream: this.parseStreamResponse(response),
183
109
  };
184
- const controller = new AbortController();
185
- const timeoutId = setTimeout(() => controller.abort(), this.timeout);
110
+ }
111
+ async *parseStreamResponse(response) {
112
+ const reader = response.body?.getReader();
113
+ if (!reader) {
114
+ throw new Error("No response body");
115
+ }
116
+ const decoder = new TextDecoder();
117
+ let buffer = "";
186
118
  try {
187
- const response = await fetch(`${this.baseUrl}/api/generate`, {
188
- method: "POST",
189
- headers: { "Content-Type": "application/json" },
190
- body: JSON.stringify(requestPayload),
191
- signal: controller.signal,
192
- });
193
- clearTimeout(timeoutId);
194
- if (!response.ok) {
195
- if (response.status === 404) {
196
- const errorData = await response.json();
197
- if (errorData.error && errorData.error.includes("not found")) {
198
- throw new Error(`Model '${this.modelId}' not found. Please run 'ollama pull ${this.modelId}'`);
199
- }
119
+ while (true) {
120
+ const { done, value } = await reader.read();
121
+ if (done) {
122
+ break;
200
123
  }
201
- throw new Error(`Ollama API error: ${response.status} ${response.statusText}`);
202
- }
203
- if (!response.body) {
204
- throw new Error("No response body received from Ollama streaming API");
205
- }
206
- // Create a ReadableStream that parses Ollama's streaming format
207
- const stream = new ReadableStream({
208
- async start(controller) {
209
- const reader = response.body.getReader();
210
- const decoder = new TextDecoder();
211
- let totalTokens = 0;
212
- try {
213
- while (true) {
214
- const { done, value } = await reader.read();
215
- if (done) {
216
- break;
124
+ buffer += decoder.decode(value, { stream: true });
125
+ const lines = buffer.split("\n");
126
+ buffer = lines.pop() || "";
127
+ for (const line of lines) {
128
+ if (line.trim()) {
129
+ try {
130
+ const data = JSON.parse(line);
131
+ if (data.response) {
132
+ yield {
133
+ type: "text-delta",
134
+ textDelta: data.response,
135
+ };
217
136
  }
218
- const chunk = decoder.decode(value, { stream: true });
219
- const lines = chunk.split("\n").filter((line) => line.trim());
220
- for (const line of lines) {
221
- try {
222
- const data = JSON.parse(line);
223
- if (data.response) {
224
- controller.enqueue({
225
- type: "text-delta",
226
- textDelta: data.response,
227
- });
228
- totalTokens += Math.ceil(data.response.length / 4);
229
- }
230
- if (data.done) {
231
- controller.enqueue({
232
- type: "finish",
233
- finishReason: "stop",
234
- usage: {
235
- promptTokens: data.prompt_eval_count ||
236
- Math.ceil(prompt.length / 4),
237
- completionTokens: data.eval_count || totalTokens,
238
- },
239
- logprobs: undefined,
240
- });
241
- controller.close();
242
- return;
243
- }
244
- }
245
- catch (parseError) {
246
- // Skip invalid JSON lines
247
- }
137
+ if (data.done) {
138
+ yield {
139
+ type: "finish",
140
+ finishReason: "stop",
141
+ usage: {
142
+ promptTokens: this.estimateTokens(data.context || ""),
143
+ completionTokens: data.eval_count || 0,
144
+ },
145
+ };
146
+ return;
248
147
  }
249
148
  }
149
+ catch (error) {
150
+ // Ignore JSON parse errors for incomplete chunks
151
+ }
250
152
  }
251
- finally {
252
- reader.releaseLock();
253
- }
254
- },
255
- });
256
- return {
257
- stream,
258
- rawCall: { rawPrompt: prompt, rawSettings: options },
259
- rawResponse: { headers: {} },
260
- };
261
- }
262
- catch (error) {
263
- clearTimeout(timeoutId);
264
- const errorMessage = error instanceof Error ? error.message : String(error);
265
- if (errorMessage.includes("AbortError") ||
266
- errorMessage.includes("timeout")) {
267
- throw new Error(`Ollama streaming timeout (${this.timeout}ms). The model may be large or the system is under load.`);
268
- }
269
- if (errorMessage.includes("ECONNREFUSED") ||
270
- errorMessage.includes("fetch failed")) {
271
- throw new Error("Cannot connect to Ollama service. Please ensure Ollama is installed and running on " +
272
- this.baseUrl);
153
+ }
273
154
  }
274
- throw error;
155
+ }
156
+ finally {
157
+ reader.releaseLock();
275
158
  }
276
159
  }
277
160
  }
278
- export class Ollama {
161
+ /**
162
+ * Ollama Provider v2 - BaseProvider Implementation
163
+ *
164
+ * PHASE 3.7: BaseProvider wrap around existing custom Ollama implementation
165
+ *
166
+ * Features:
167
+ * - Extends BaseProvider for shared functionality
168
+ * - Preserves custom OllamaLanguageModel implementation
169
+ * - Local model management and health checking
170
+ * - Enhanced error handling with Ollama-specific guidance
171
+ */
172
+ export class OllamaProvider extends BaseProvider {
173
+ ollamaModel;
279
174
  baseUrl;
280
- modelName;
281
- defaultTimeout;
175
+ timeout;
282
176
  constructor(modelName) {
283
- this.baseUrl = process.env.OLLAMA_BASE_URL || "http://localhost:11434";
284
- this.modelName = modelName || process.env.OLLAMA_MODEL || "llama2";
285
- // Use environment variable for backward compatibility, but convert to format used by other providers
286
- const envTimeout = process.env.OLLAMA_TIMEOUT
287
- ? parseInt(process.env.OLLAMA_TIMEOUT)
288
- : undefined;
289
- this.defaultTimeout =
290
- envTimeout ||
291
- parseInt(getDefaultTimeout("ollama", "generate").replace(/[^\d]/g, ""));
292
- logger.debug("[Ollama] Initialized", {
293
- baseUrl: this.baseUrl,
177
+ super(modelName, "ollama");
178
+ this.baseUrl = getOllamaBaseUrl();
179
+ this.timeout = getOllamaTimeout();
180
+ // Initialize Ollama model
181
+ this.ollamaModel = new OllamaLanguageModel(this.modelName || getDefaultOllamaModel(), this.baseUrl, this.timeout);
182
+ logger.debug("Ollama BaseProvider v2 initialized", {
294
183
  modelName: this.modelName,
295
- defaultTimeout: this.defaultTimeout,
184
+ baseUrl: this.baseUrl,
185
+ timeout: this.timeout,
186
+ provider: this.providerName,
296
187
  });
297
188
  }
298
- /**
299
- * Gets the appropriate model instance
300
- * @private
301
- */
302
- getModel(timeout) {
303
- logger.debug("Ollama.getModel - Ollama model selected", {
304
- modelName: this.modelName,
305
- timeout: timeout || this.defaultTimeout,
306
- });
307
- return new OllamaLanguageModel(this.modelName, this.baseUrl, timeout || this.defaultTimeout);
189
+ getProviderName() {
190
+ return "ollama";
191
+ }
192
+ getDefaultModel() {
193
+ return getDefaultOllamaModel();
308
194
  }
309
195
  /**
310
- * Health check - verify Ollama service is running and accessible
196
+ * Returns the Vercel AI SDK model instance for Ollama
311
197
  */
312
- async checkHealth() {
313
- const model = new OllamaLanguageModel(this.modelName, this.baseUrl, this.defaultTimeout);
314
- return await model["checkHealth"]();
198
+ getAISDKModel() {
199
+ return this.ollamaModel;
315
200
  }
316
201
  /**
317
- * List available models on the Ollama instance
202
+ * Ollama tool/function calling support is currently disabled due to integration issues.
203
+ *
204
+ * **Current Issues:**
205
+ * 1. The OllamaLanguageModel from @ai-sdk/provider-utils doesn't properly integrate
206
+ * with BaseProvider's tool calling mechanism
207
+ * 2. Ollama models require specific prompt formatting for function calls that differs
208
+ * from the standardized AI SDK format
209
+ * 3. Tool response parsing and execution flow needs custom implementation
210
+ *
211
+ * **What's needed to enable tool support:**
212
+ * - Create a custom OllamaLanguageModel wrapper that handles tool schema formatting
213
+ * - Implement Ollama-specific tool calling prompt templates
214
+ * - Add proper response parsing for Ollama's function call format
215
+ * - Test with models that support function calling (llama3.1, mistral, etc.)
216
+ *
217
+ * **Tracking:**
218
+ * - See BaseProvider tool integration patterns in other providers
219
+ * - Monitor Ollama function calling documentation: https://ollama.com/blog/tool-support
220
+ * - Track AI SDK updates for better Ollama integration
221
+ *
222
+ * @returns false to disable tools by default
318
223
  */
319
- async listModels() {
320
- const functionTag = "Ollama.listModels";
224
+ supportsTools() {
225
+ // TODO: Fix the OllamaLanguageModel integration with BaseProvider for tool support.
226
+ // Track progress on resolving this issue. See the detailed steps above.
227
+ // Issue tracking required for enabling tool support
228
+ return false;
229
+ }
230
+ // executeGenerate removed - BaseProvider handles all generation with tools
231
+ async executeStream(options, analysisSchema) {
321
232
  try {
322
- logger.debug(`[${functionTag}] Listing available models`);
323
- const response = await fetch(`${this.baseUrl}/api/tags`, {
324
- method: "GET",
325
- headers: {
326
- "Content-Type": "application/json",
327
- },
233
+ this.validateStreamOptions(options);
234
+ await this.checkOllamaHealth();
235
+ // Direct HTTP streaming implementation for better compatibility
236
+ const response = await fetch(`${this.baseUrl}/api/generate`, {
237
+ method: "POST",
238
+ headers: { "Content-Type": "application/json" },
239
+ body: JSON.stringify({
240
+ model: this.modelName || FALLBACK_OLLAMA_MODEL,
241
+ prompt: options.input.text,
242
+ system: options.systemPrompt,
243
+ stream: true,
244
+ options: {
245
+ temperature: options.temperature,
246
+ num_predict: options.maxTokens || DEFAULT_MAX_TOKENS,
247
+ },
248
+ }),
249
+ signal: createAbortSignalWithTimeout(this.timeout),
328
250
  });
329
251
  if (!response.ok) {
330
- throw new Error(`Failed to list models: ${response.status} ${response.statusText}`);
252
+ throw new Error(`Ollama API error: ${response.status} ${response.statusText}`);
331
253
  }
332
- const data = (await response.json());
333
- const modelNames = data.models?.map((model) => model.name) || [];
334
- logger.debug(`[${functionTag}] Found models`, {
335
- count: modelNames.length,
336
- models: modelNames,
337
- });
338
- return modelNames;
254
+ // Transform to async generator to match other providers
255
+ const self = this;
256
+ const transformedStream = async function* () {
257
+ const generator = self.createOllamaStream(response);
258
+ for await (const chunk of generator) {
259
+ yield chunk;
260
+ }
261
+ };
262
+ return {
263
+ stream: transformedStream(),
264
+ provider: this.providerName,
265
+ model: this.modelName,
266
+ };
339
267
  }
340
268
  catch (error) {
341
- logger.debug(`[${functionTag}] Error listing models`, {
342
- error: error instanceof Error ? error.message : String(error),
343
- });
344
- throw new Error(`Failed to list Ollama models: ${error instanceof Error ? error.message : String(error)}`);
269
+ throw this.handleProviderError(error);
345
270
  }
346
271
  }
347
- /**
348
- * Check if a specific model is available
349
- */
350
- async isModelAvailable(modelName) {
272
+ async *createOllamaStream(response) {
273
+ const reader = response.body?.getReader();
274
+ if (!reader) {
275
+ throw new Error("No response body");
276
+ }
277
+ const decoder = new TextDecoder();
278
+ let buffer = "";
351
279
  try {
352
- const models = await this.listModels();
353
- return models.includes(modelName);
280
+ while (true) {
281
+ const { done, value } = await reader.read();
282
+ if (done) {
283
+ break;
284
+ }
285
+ buffer += decoder.decode(value, { stream: true });
286
+ const lines = buffer.split("\n");
287
+ buffer = lines.pop() || "";
288
+ for (const line of lines) {
289
+ if (line.trim()) {
290
+ try {
291
+ const data = JSON.parse(line);
292
+ if (data.response) {
293
+ yield { content: data.response };
294
+ }
295
+ if (data.done) {
296
+ return;
297
+ }
298
+ }
299
+ catch (error) {
300
+ // Ignore JSON parse errors for incomplete chunks
301
+ }
302
+ }
303
+ }
304
+ }
354
305
  }
355
- catch (error) {
356
- return false;
306
+ finally {
307
+ reader.releaseLock();
357
308
  }
358
309
  }
359
- /**
360
- * Pull/download a model to the local Ollama instance
361
- */
362
- async pullModel(modelName) {
363
- const functionTag = "Ollama.pullModel";
364
- try {
365
- logger.debug(`[${functionTag}] Pulling model`, { modelName });
366
- const response = await fetch(`${this.baseUrl}/api/pull`, {
367
- method: "POST",
368
- headers: {
369
- "Content-Type": "application/json",
370
- },
371
- body: JSON.stringify({
372
- name: modelName,
373
- }),
374
- });
375
- if (!response.ok) {
376
- throw new Error(`Failed to pull model: ${response.status} ${response.statusText}`);
377
- }
378
- // Note: Ollama pull API returns streaming responses
379
- // For simplicity, we're not handling the streaming progress here
380
- logger.debug(`[${functionTag}] Model pull completed`, { modelName });
310
+ handleProviderError(error) {
311
+ if (error.name === "TimeoutError") {
312
+ return new TimeoutError(`Ollama request timed out. The model might be loading or the request is too complex.`, this.defaultTimeout);
381
313
  }
382
- catch (error) {
383
- logger.debug(`[${functionTag}] Error pulling model`, {
384
- modelName,
385
- error: error instanceof Error ? error.message : String(error),
386
- });
387
- throw new Error(`Failed to pull model '${modelName}': ${error instanceof Error ? error.message : String(error)}`);
314
+ if (error.message?.includes("ECONNREFUSED") ||
315
+ error.message?.includes("fetch failed")) {
316
+ return new Error(`āŒ Ollama Service Not Running\n\nCannot connect to Ollama at ${this.baseUrl}\n\nšŸ”§ Steps to Fix:\n1. Install Ollama: https://ollama.ai/\n2. Start Ollama service: 'ollama serve'\n3. Verify it's running: 'curl ${this.baseUrl}/api/version'\n4. Try again`);
317
+ }
318
+ if (error.message?.includes("model") &&
319
+ error.message?.includes("not found")) {
320
+ return new Error(`āŒ Ollama Model Not Found\n\nModel '${this.modelName}' is not available locally.\n\nšŸ”§ Install Model:\n1. Run: ollama pull ${this.modelName}\n2. Or try a different model:\n - ollama pull ${FALLBACK_OLLAMA_MODEL}\n - ollama pull mistral:latest\n - ollama pull codellama:latest\n\nšŸ”§ List Available Models:\nollama list`);
321
+ }
322
+ if (error.message?.includes("404")) {
323
+ return new Error(`āŒ Ollama API Endpoint Not Found\n\nThe API endpoint might have changed or Ollama version is incompatible.\n\nšŸ”§ Check:\n1. Ollama version: 'ollama --version'\n2. Update Ollama to latest version\n3. Verify API is available: 'curl ${this.baseUrl}/api/version'`);
324
+ }
325
+ return new Error(`āŒ Ollama Provider Error\n\n${error.message || "Unknown error occurred"}\n\nšŸ”§ Troubleshooting:\n1. Check if Ollama service is running\n2. Verify model is installed: 'ollama list'\n3. Check network connectivity to ${this.baseUrl}\n4. Review Ollama logs for details`);
326
+ }
327
+ validateStreamOptions(options) {
328
+ if (!options.input?.text?.trim()) {
329
+ throw new Error("Prompt is required for streaming");
330
+ }
331
+ if (options.maxTokens && options.maxTokens < 1) {
332
+ throw new Error("maxTokens must be greater than 0");
333
+ }
334
+ if (options.temperature &&
335
+ (options.temperature < 0 || options.temperature > 2)) {
336
+ throw new Error("temperature must be between 0 and 2");
388
337
  }
389
338
  }
390
339
  /**
391
- * PRIMARY METHOD: Stream content using AI (recommended for new code)
392
- * Future-ready for multi-modal capabilities with current text focus
340
+ * Check if Ollama service is healthy and accessible
393
341
  */
394
- async stream(optionsOrPrompt, analysisSchema) {
395
- const functionTag = "Ollama.stream";
396
- const provider = "ollama";
397
- let chunkCount = 0;
398
- const startTime = Date.now();
342
+ async checkOllamaHealth() {
399
343
  try {
400
- // Parse parameters - support both string and options object
401
- const options = typeof optionsOrPrompt === "string"
402
- ? { input: { text: optionsOrPrompt } }
403
- : optionsOrPrompt;
404
- // Validate input
405
- if (!options?.input?.text ||
406
- typeof options.input.text !== "string" ||
407
- options.input.text.trim() === "") {
408
- throw new Error("Stream options must include input.text as a non-empty string");
409
- }
410
- // Extract parameters
411
- const { prompt = options.input.text, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout, } = options;
412
- // Use schema from options or fallback parameter
413
- const finalSchema = schema || analysisSchema;
414
- // Convert timeout to milliseconds if provided as string
415
- const timeoutMs = timeout
416
- ? typeof timeout === "string"
417
- ? parseInt(getDefaultTimeout("ollama", "stream").replace(/[^\d]/g, ""))
418
- : timeout
419
- : this.defaultTimeout;
420
- logger.debug(`[${functionTag}] Stream request started`, {
421
- provider,
422
- modelName: this.modelName,
423
- promptLength: prompt.length,
424
- temperature,
425
- maxTokens,
426
- hasSchema: !!finalSchema,
427
- timeout: timeoutMs,
344
+ // Use traditional AbortController for better compatibility
345
+ const controller = new AbortController();
346
+ const timeoutId = setTimeout(() => controller.abort(), 5000);
347
+ const response = await fetch(`${this.baseUrl}/api/version`, {
348
+ method: "GET",
349
+ signal: controller.signal,
428
350
  });
429
- const model = this.getModel(timeoutMs);
430
- const streamOptions = {
431
- model: model,
432
- prompt: prompt,
433
- system: systemPrompt,
434
- temperature,
435
- maxTokens,
436
- onError: (event) => {
437
- const error = event.error;
438
- const errorMessage = error instanceof Error ? error.message : String(error);
439
- const errorStack = error instanceof Error ? error.stack : undefined;
440
- logger.debug(`[${functionTag}] Stream error`, {
441
- provider,
442
- modelName: this.modelName,
443
- error: errorMessage,
444
- stack: errorStack,
445
- promptLength: prompt.length,
446
- chunkCount,
447
- });
448
- },
449
- onFinish: (event) => {
450
- logger.debug(`[${functionTag}] Stream finished`, {
451
- provider,
452
- modelName: this.modelName,
453
- finishReason: event.finishReason,
454
- usage: event.usage,
455
- totalChunks: chunkCount,
456
- promptLength: prompt.length,
457
- responseLength: event.text?.length || 0,
458
- });
459
- },
460
- onChunk: (event) => {
461
- chunkCount++;
462
- logger.debug(`[${functionTag}] Stream chunk`, {
463
- provider,
464
- modelName: this.modelName,
465
- chunkNumber: chunkCount,
466
- chunkLength: event.chunk.text?.length || 0,
467
- chunkType: event.chunk.type,
468
- });
469
- },
470
- };
471
- if (finalSchema) {
472
- streamOptions.experimental_output = Output.object({
473
- schema: finalSchema,
474
- });
351
+ clearTimeout(timeoutId);
352
+ if (!response.ok) {
353
+ throw new Error(`Ollama health check failed: ${response.status}`);
475
354
  }
476
- const result = streamText(streamOptions);
477
- logger.debug(`[${functionTag}] Stream request completed`, {
478
- provider,
479
- modelName: this.modelName,
480
- });
481
- // Convert to StreamResult format
482
- return {
483
- stream: (async function* () {
484
- for await (const chunk of result.textStream) {
485
- yield { content: chunk };
486
- }
487
- })(),
488
- provider: "ollama",
489
- model: this.modelName,
490
- metadata: {
491
- streamId: `ollama-${Date.now()}`,
492
- startTime,
493
- },
494
- };
495
355
  }
496
- catch (err) {
497
- // Log timeout errors specifically
498
- if (err instanceof TimeoutError) {
499
- logger.debug(`[${functionTag}] Timeout error`, {
500
- provider,
501
- modelName: this.modelName,
502
- timeout: err.timeout,
503
- message: err.message,
504
- });
505
- }
506
- else {
507
- logger.debug(`[${functionTag}] Exception`, {
508
- provider,
509
- modelName: this.modelName,
510
- message: "Error in streaming content",
511
- err: String(err),
512
- });
356
+ catch (error) {
357
+ if (error instanceof Error && error.message.includes("ECONNREFUSED")) {
358
+ throw new Error(`āŒ Ollama Service Not Running\n\nCannot connect to Ollama service.\n\nšŸ”§ Start Ollama:\n1. Run: ollama serve\n2. Or start Ollama app\n3. Verify: curl ${this.baseUrl}/api/version`);
513
359
  }
514
- throw err; // Re-throw error to trigger fallback
360
+ throw error;
515
361
  }
516
362
  }
517
363
  /**
518
- * Generate text using Ollama local models
364
+ * Get available models from Ollama
519
365
  */
520
- async generate(optionsOrPrompt, analysisSchema) {
521
- const functionTag = "Ollama.generate";
522
- const provider = "ollama";
523
- const startTime = Date.now();
366
+ async getAvailableModels() {
524
367
  try {
525
- // Parse parameters - support both string and options object
526
- const options = typeof optionsOrPrompt === "string"
527
- ? { prompt: optionsOrPrompt }
528
- : optionsOrPrompt;
529
- const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout, } = options;
530
- // Use schema from options or fallback parameter
531
- const finalSchema = schema || analysisSchema;
532
- // Convert timeout to milliseconds if provided as string
533
- const timeoutMs = timeout
534
- ? typeof timeout === "string"
535
- ? parseInt(getDefaultTimeout("ollama", "generate").replace(/[^\d]/g, ""))
536
- : timeout
537
- : this.defaultTimeout;
538
- logger.debug(`[${functionTag}] Generate request started`, {
539
- provider,
540
- modelName: this.modelName,
541
- promptLength: prompt.length,
542
- temperature,
543
- maxTokens,
544
- timeout: timeoutMs,
545
- });
546
- const model = this.getModel(timeoutMs);
547
- const generateOptions = {
548
- model: model,
549
- prompt: prompt,
550
- system: systemPrompt,
551
- temperature,
552
- maxTokens,
553
- };
554
- if (finalSchema) {
555
- generateOptions.experimental_output = Output.object({
556
- schema: finalSchema,
557
- });
558
- }
559
- const result = await generateText(generateOptions);
560
- if (result.text.includes("model not found")) {
561
- throw new Error(`Model '${this.modelName}' not found. Please run 'ollama pull ${this.modelName}'`);
562
- }
563
- logger.debug(`[${functionTag}] Generate text completed`, {
564
- provider,
565
- modelName: this.modelName,
566
- usage: result.usage,
567
- finishReason: result.finishReason,
568
- responseLength: result.text?.length || 0,
569
- });
570
- // Add analytics if enabled
571
- if (options.enableAnalytics) {
572
- result.analytics = {
573
- provider,
574
- model: this.modelName,
575
- tokens: result.usage,
576
- responseTime: Date.now() - startTime,
577
- context: options.context,
578
- };
579
- }
580
- // Add evaluation if enabled
581
- if (options.enableEvaluation) {
582
- result.evaluation = await evaluateResponse(prompt, result.text, options.context);
368
+ const response = await fetch(`${this.baseUrl}/api/tags`);
369
+ if (!response.ok) {
370
+ throw new Error(`Failed to fetch models: ${response.status}`);
583
371
  }
584
- return {
585
- content: result.text,
586
- provider: "ollama",
587
- model: this.modelName,
588
- usage: result.usage
589
- ? {
590
- inputTokens: result.usage.promptTokens,
591
- outputTokens: result.usage.completionTokens,
592
- totalTokens: result.usage.totalTokens,
593
- }
594
- : undefined,
595
- responseTime: Date.now() - startTime,
596
- };
372
+ const data = await response.json();
373
+ return data.models?.map((model) => model.name) || [];
597
374
  }
598
- catch (err) {
599
- logger.debug(`[${functionTag}] Exception`, {
600
- provider,
601
- modelName: this.modelName,
602
- message: "Error in generating text",
603
- err: String(err),
604
- });
605
- throw err; // Re-throw error to trigger fallback
375
+ catch (error) {
376
+ logger.warn("Failed to fetch Ollama models:", error);
377
+ return [];
606
378
  }
607
379
  }
608
- async gen(optionsOrPrompt, analysisSchema) {
609
- return this.generate(optionsOrPrompt, analysisSchema);
380
+ /**
381
+ * Check if a specific model is available
382
+ */
383
+ async isModelAvailable(modelName) {
384
+ const models = await this.getAvailableModels();
385
+ return models.includes(modelName);
610
386
  }
611
387
  }
388
+ export default OllamaProvider;