@juspay/neurolink 5.1.0 → 5.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (190) hide show
  1. package/CHANGELOG.md +21 -9
  2. package/README.md +123 -126
  3. package/dist/agent/direct-tools.d.ts +6 -6
  4. package/dist/cli/commands/config.d.ts +3 -3
  5. package/dist/cli/commands/mcp.js +8 -7
  6. package/dist/cli/factories/command-factory.d.ts +4 -0
  7. package/dist/cli/factories/command-factory.js +63 -8
  8. package/dist/cli/index.js +87 -140
  9. package/dist/core/base-provider.d.ts +423 -0
  10. package/dist/core/base-provider.js +376 -0
  11. package/dist/core/constants.d.ts +2 -1
  12. package/dist/core/constants.js +2 -1
  13. package/dist/core/dynamic-models.d.ts +6 -6
  14. package/dist/core/evaluation.d.ts +19 -80
  15. package/dist/core/evaluation.js +185 -484
  16. package/dist/core/factory.d.ts +3 -3
  17. package/dist/core/factory.js +31 -91
  18. package/dist/core/service-registry.d.ts +47 -0
  19. package/dist/core/service-registry.js +112 -0
  20. package/dist/core/types.d.ts +8 -1
  21. package/dist/factories/compatibility-factory.js +1 -1
  22. package/dist/factories/provider-factory.d.ts +72 -0
  23. package/dist/factories/provider-factory.js +144 -0
  24. package/dist/factories/provider-registry.d.ts +38 -0
  25. package/dist/factories/provider-registry.js +107 -0
  26. package/dist/index.d.ts +4 -3
  27. package/dist/index.js +2 -4
  28. package/dist/lib/agent/direct-tools.d.ts +6 -6
  29. package/dist/lib/core/base-provider.d.ts +423 -0
  30. package/dist/lib/core/base-provider.js +376 -0
  31. package/dist/lib/core/constants.d.ts +2 -1
  32. package/dist/lib/core/constants.js +2 -1
  33. package/dist/lib/core/dynamic-models.d.ts +6 -6
  34. package/dist/lib/core/evaluation.d.ts +19 -80
  35. package/dist/lib/core/evaluation.js +185 -484
  36. package/dist/lib/core/factory.d.ts +3 -3
  37. package/dist/lib/core/factory.js +30 -91
  38. package/dist/lib/core/service-registry.d.ts +47 -0
  39. package/dist/lib/core/service-registry.js +112 -0
  40. package/dist/lib/core/types.d.ts +8 -1
  41. package/dist/lib/factories/compatibility-factory.js +1 -1
  42. package/dist/lib/factories/provider-factory.d.ts +72 -0
  43. package/dist/lib/factories/provider-factory.js +144 -0
  44. package/dist/lib/factories/provider-registry.d.ts +38 -0
  45. package/dist/lib/factories/provider-registry.js +107 -0
  46. package/dist/lib/index.d.ts +4 -3
  47. package/dist/lib/index.js +2 -4
  48. package/dist/lib/mcp/client.d.ts +1 -0
  49. package/dist/lib/mcp/client.js +1 -0
  50. package/dist/lib/mcp/config.js +28 -3
  51. package/dist/lib/mcp/context-manager.d.ts +1 -0
  52. package/dist/lib/mcp/context-manager.js +8 -4
  53. package/dist/lib/mcp/function-calling.d.ts +13 -0
  54. package/dist/lib/mcp/function-calling.js +134 -35
  55. package/dist/lib/mcp/initialize-tools.d.ts +1 -1
  56. package/dist/lib/mcp/initialize-tools.js +45 -1
  57. package/dist/lib/mcp/initialize.js +16 -6
  58. package/dist/lib/mcp/neurolink-mcp-client.d.ts +1 -0
  59. package/dist/lib/mcp/neurolink-mcp-client.js +21 -5
  60. package/dist/lib/mcp/servers/agent/direct-tools-server.d.ts +8 -0
  61. package/dist/lib/mcp/servers/agent/direct-tools-server.js +109 -0
  62. package/dist/lib/mcp/servers/ai-providers/ai-core-server.js +3 -1
  63. package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
  64. package/dist/lib/mcp/unified-registry.d.ts +4 -0
  65. package/dist/lib/mcp/unified-registry.js +42 -9
  66. package/dist/lib/neurolink.d.ts +156 -117
  67. package/dist/lib/neurolink.js +619 -404
  68. package/dist/lib/providers/amazon-bedrock.d.ts +32 -0
  69. package/dist/lib/providers/amazon-bedrock.js +143 -0
  70. package/dist/lib/providers/analytics-helper.js +7 -4
  71. package/dist/lib/providers/anthropic-baseprovider.d.ts +23 -0
  72. package/dist/lib/providers/anthropic-baseprovider.js +114 -0
  73. package/dist/lib/providers/anthropic.d.ts +19 -43
  74. package/dist/lib/providers/anthropic.js +82 -306
  75. package/dist/lib/providers/azure-openai.d.ts +20 -0
  76. package/dist/lib/providers/azure-openai.js +89 -0
  77. package/dist/lib/providers/function-calling-provider.d.ts +64 -2
  78. package/dist/lib/providers/function-calling-provider.js +208 -9
  79. package/dist/lib/providers/google-ai-studio.d.ts +23 -0
  80. package/dist/lib/providers/google-ai-studio.js +107 -0
  81. package/dist/lib/providers/google-vertex.d.ts +47 -0
  82. package/dist/lib/providers/google-vertex.js +205 -0
  83. package/dist/lib/providers/huggingFace.d.ts +32 -25
  84. package/dist/lib/providers/huggingFace.js +97 -431
  85. package/dist/lib/providers/index.d.ts +9 -9
  86. package/dist/lib/providers/index.js +9 -9
  87. package/dist/lib/providers/mcp-provider.js +24 -5
  88. package/dist/lib/providers/mistral.d.ts +42 -0
  89. package/dist/lib/providers/mistral.js +160 -0
  90. package/dist/lib/providers/ollama.d.ts +52 -36
  91. package/dist/lib/providers/ollama.js +297 -520
  92. package/dist/lib/providers/openAI.d.ts +19 -18
  93. package/dist/lib/providers/openAI.js +76 -275
  94. package/dist/lib/sdk/tool-extension.d.ts +181 -0
  95. package/dist/lib/sdk/tool-extension.js +283 -0
  96. package/dist/lib/sdk/tool-registration.d.ts +95 -0
  97. package/dist/lib/sdk/tool-registration.js +167 -0
  98. package/dist/lib/services/streaming/streaming-manager.js +11 -10
  99. package/dist/lib/services/websocket/websocket-server.js +12 -11
  100. package/dist/lib/telemetry/telemetry-service.js +8 -7
  101. package/dist/lib/types/generate-types.d.ts +1 -0
  102. package/dist/lib/types/mcp-types.d.ts +116 -0
  103. package/dist/lib/types/mcp-types.js +5 -0
  104. package/dist/lib/types/stream-types.d.ts +30 -18
  105. package/dist/lib/types/universal-provider-options.d.ts +87 -0
  106. package/dist/lib/types/universal-provider-options.js +53 -0
  107. package/dist/mcp/client.d.ts +1 -0
  108. package/dist/mcp/client.js +1 -0
  109. package/dist/mcp/config.js +28 -3
  110. package/dist/mcp/context-manager.d.ts +1 -0
  111. package/dist/mcp/context-manager.js +8 -4
  112. package/dist/mcp/function-calling.d.ts +13 -0
  113. package/dist/mcp/function-calling.js +134 -35
  114. package/dist/mcp/initialize-tools.d.ts +1 -1
  115. package/dist/mcp/initialize-tools.js +45 -1
  116. package/dist/mcp/initialize.js +16 -6
  117. package/dist/mcp/neurolink-mcp-client.d.ts +1 -0
  118. package/dist/mcp/neurolink-mcp-client.js +21 -5
  119. package/dist/mcp/servers/agent/direct-tools-server.d.ts +8 -0
  120. package/dist/mcp/servers/agent/direct-tools-server.js +109 -0
  121. package/dist/mcp/servers/ai-providers/ai-core-server.js +3 -1
  122. package/dist/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
  123. package/dist/mcp/unified-registry.d.ts +4 -0
  124. package/dist/mcp/unified-registry.js +42 -9
  125. package/dist/neurolink.d.ts +156 -117
  126. package/dist/neurolink.js +619 -404
  127. package/dist/providers/amazon-bedrock.d.ts +32 -0
  128. package/dist/providers/amazon-bedrock.js +143 -0
  129. package/dist/providers/analytics-helper.js +7 -4
  130. package/dist/providers/anthropic-baseprovider.d.ts +23 -0
  131. package/dist/providers/anthropic-baseprovider.js +114 -0
  132. package/dist/providers/anthropic.d.ts +19 -43
  133. package/dist/providers/anthropic.js +81 -305
  134. package/dist/providers/azure-openai.d.ts +20 -0
  135. package/dist/providers/azure-openai.js +89 -0
  136. package/dist/providers/function-calling-provider.d.ts +64 -2
  137. package/dist/providers/function-calling-provider.js +208 -9
  138. package/dist/providers/google-ai-studio.d.ts +23 -0
  139. package/dist/providers/google-ai-studio.js +108 -0
  140. package/dist/providers/google-vertex.d.ts +47 -0
  141. package/dist/providers/google-vertex.js +205 -0
  142. package/dist/providers/huggingFace.d.ts +32 -25
  143. package/dist/providers/huggingFace.js +96 -430
  144. package/dist/providers/index.d.ts +9 -9
  145. package/dist/providers/index.js +9 -9
  146. package/dist/providers/mcp-provider.js +24 -5
  147. package/dist/providers/mistral.d.ts +42 -0
  148. package/dist/providers/mistral.js +160 -0
  149. package/dist/providers/ollama.d.ts +52 -36
  150. package/dist/providers/ollama.js +297 -519
  151. package/dist/providers/openAI.d.ts +19 -18
  152. package/dist/providers/openAI.js +76 -276
  153. package/dist/sdk/tool-extension.d.ts +181 -0
  154. package/dist/sdk/tool-extension.js +283 -0
  155. package/dist/sdk/tool-registration.d.ts +95 -0
  156. package/dist/sdk/tool-registration.js +168 -0
  157. package/dist/services/streaming/streaming-manager.js +11 -10
  158. package/dist/services/websocket/websocket-server.js +12 -11
  159. package/dist/telemetry/telemetry-service.js +8 -7
  160. package/dist/types/generate-types.d.ts +1 -0
  161. package/dist/types/mcp-types.d.ts +116 -0
  162. package/dist/types/mcp-types.js +5 -0
  163. package/dist/types/stream-types.d.ts +30 -18
  164. package/dist/types/universal-provider-options.d.ts +87 -0
  165. package/dist/types/universal-provider-options.js +53 -0
  166. package/package.json +12 -5
  167. package/dist/lib/providers/agent-enhanced-provider.d.ts +0 -93
  168. package/dist/lib/providers/agent-enhanced-provider.js +0 -605
  169. package/dist/lib/providers/amazonBedrock.d.ts +0 -28
  170. package/dist/lib/providers/amazonBedrock.js +0 -364
  171. package/dist/lib/providers/azureOpenAI.d.ts +0 -42
  172. package/dist/lib/providers/azureOpenAI.js +0 -347
  173. package/dist/lib/providers/googleAIStudio.d.ts +0 -42
  174. package/dist/lib/providers/googleAIStudio.js +0 -364
  175. package/dist/lib/providers/googleVertexAI.d.ts +0 -34
  176. package/dist/lib/providers/googleVertexAI.js +0 -547
  177. package/dist/lib/providers/mistralAI.d.ts +0 -37
  178. package/dist/lib/providers/mistralAI.js +0 -325
  179. package/dist/providers/agent-enhanced-provider.d.ts +0 -93
  180. package/dist/providers/agent-enhanced-provider.js +0 -606
  181. package/dist/providers/amazonBedrock.d.ts +0 -28
  182. package/dist/providers/amazonBedrock.js +0 -364
  183. package/dist/providers/azureOpenAI.d.ts +0 -42
  184. package/dist/providers/azureOpenAI.js +0 -348
  185. package/dist/providers/googleAIStudio.d.ts +0 -42
  186. package/dist/providers/googleAIStudio.js +0 -364
  187. package/dist/providers/googleVertexAI.d.ts +0 -34
  188. package/dist/providers/googleVertexAI.js +0 -547
  189. package/dist/providers/mistralAI.d.ts +0 -37
  190. package/dist/providers/mistralAI.js +0 -325
@@ -1,23 +1,30 @@
1
- /**
2
- * Ollama Provider for NeuroLink
3
- *
4
- * Local AI model deployment and management using Ollama.
5
- * Provides offline AI capabilities with local model hosting.
6
- *
7
- * Features:
8
- * - Local model deployment (privacy-first)
9
- * - Model management (download, list, remove)
10
- * - Health checking and service validation
11
- * - Streaming and non-streaming text generation
12
- */
13
- import { streamText, generateText, Output } from "ai";
1
+ import { streamText, Output } from "ai";
2
+ import { BaseProvider } from "../core/base-provider.js";
14
3
  import { logger } from "../utils/logger.js";
15
4
  import { getDefaultTimeout, TimeoutError } from "../utils/timeout.js";
16
5
  import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
17
- import { evaluateResponse } from "../core/evaluation.js";
18
- // Default system context
19
- const DEFAULT_SYSTEM_CONTEXT = {
20
- systemPrompt: "You are a helpful AI assistant.",
6
+ // Model version constants (configurable via environment)
7
+ const DEFAULT_OLLAMA_MODEL = "llama3.1:8b";
8
+ const FALLBACK_OLLAMA_MODEL = "llama3.2:latest"; // Used when primary model fails
9
+ // Configuration helpers
10
+ const getOllamaBaseUrl = () => {
11
+ return process.env.OLLAMA_BASE_URL || "http://localhost:11434";
12
+ };
13
+ // Create AbortController with timeout for better compatibility
14
+ const createAbortSignalWithTimeout = (timeoutMs) => {
15
+ const controller = new AbortController();
16
+ const timeoutId = setTimeout(() => controller.abort(), timeoutMs);
17
+ // Clear timeout if signal is aborted through other means
18
+ controller.signal.addEventListener("abort", () => {
19
+ clearTimeout(timeoutId);
20
+ });
21
+ return controller.signal;
22
+ };
23
+ const getDefaultOllamaModel = () => {
24
+ return process.env.OLLAMA_MODEL || DEFAULT_OLLAMA_MODEL;
25
+ };
26
+ const getOllamaTimeout = () => {
27
+ return parseInt(process.env.OLLAMA_TIMEOUT || "60000", 10);
21
28
  };
22
29
  // Custom LanguageModelV1 implementation for Ollama
23
30
  class OllamaLanguageModel {
@@ -35,7 +42,7 @@ class OllamaLanguageModel {
35
42
  this.timeout = timeout;
36
43
  }
37
44
  estimateTokens(text) {
38
- return Math.ceil(text.length / 4); // Rough estimation: 4 characters per token
45
+ return Math.ceil(text.length / 4);
39
46
  }
40
47
  convertMessagesToPrompt(messages) {
41
48
  return messages
@@ -43,569 +50,340 @@ class OllamaLanguageModel {
43
50
  if (typeof msg.content === "string") {
44
51
  return `${msg.role}: ${msg.content}`;
45
52
  }
46
- else if (Array.isArray(msg.content)) {
47
- // Handle multi-part content (text, images, etc.)
48
- return `${msg.role}: ${msg.content
49
- .filter((part) => part.type === "text")
50
- .map((part) => part.text)
51
- .join(" ")}`;
52
- }
53
- return "";
53
+ return `${msg.role}: ${JSON.stringify(msg.content)}`;
54
54
  })
55
55
  .join("\n");
56
56
  }
57
- async checkHealth() {
58
- try {
59
- const controller = new AbortController();
60
- const timeoutId = setTimeout(() => controller.abort(), 5000);
61
- const response = await fetch(`${this.baseUrl}/api/tags`, {
62
- method: "GET",
63
- signal: controller.signal,
64
- headers: { "Content-Type": "application/json" },
65
- });
66
- clearTimeout(timeoutId);
67
- return response.ok;
68
- }
69
- catch {
70
- return false;
71
- }
72
- }
73
- async ensureModelAvailable() {
74
- try {
75
- const response = await fetch(`${this.baseUrl}/api/tags`);
76
- if (!response.ok) {
77
- throw new Error("Cannot access Ollama");
78
- }
79
- const data = (await response.json());
80
- const models = data.models?.map((m) => m.name) || [];
81
- if (!models.includes(this.modelId)) {
82
- // Try to pull the model
83
- const pullResponse = await fetch(`${this.baseUrl}/api/pull`, {
84
- method: "POST",
85
- headers: { "Content-Type": "application/json" },
86
- body: JSON.stringify({ name: this.modelId }),
87
- });
88
- if (!pullResponse.ok) {
89
- throw new Error(`Model '${this.modelId}' not available and cannot be pulled`);
90
- }
91
- }
92
- }
93
- catch (error) {
94
- throw new Error(`Failed to ensure model availability: ${error instanceof Error ? error.message : String(error)}`);
95
- }
96
- }
97
57
  async doGenerate(options) {
98
- // Health check and model availability
99
- const isHealthy = await this.checkHealth();
100
- if (!isHealthy) {
101
- throw new Error("Ollama service is not running or accessible. Please ensure Ollama is installed and running.");
58
+ const messages = options.messages || [];
59
+ const prompt = this.convertMessagesToPrompt(messages);
60
+ const response = await fetch(`${this.baseUrl}/api/generate`, {
61
+ method: "POST",
62
+ headers: { "Content-Type": "application/json" },
63
+ body: JSON.stringify({
64
+ model: this.modelId,
65
+ prompt,
66
+ stream: false,
67
+ system: messages.find((m) => m.role === "system")?.content,
68
+ options: {
69
+ temperature: options.temperature,
70
+ num_predict: options.maxTokens,
71
+ },
72
+ }),
73
+ signal: createAbortSignalWithTimeout(this.timeout),
74
+ });
75
+ if (!response.ok) {
76
+ throw new Error(`Ollama API error: ${response.status} ${response.statusText}`);
102
77
  }
103
- await this.ensureModelAvailable();
104
- const prompt = this.convertMessagesToPrompt(options.prompt);
105
- const requestPayload = {
106
- model: this.modelId,
107
- prompt,
108
- stream: false,
109
- options: {
110
- temperature: options.temperature || 0.7,
111
- num_predict: options.maxTokens ?? DEFAULT_MAX_TOKENS,
78
+ const data = await response.json();
79
+ return {
80
+ text: data.response,
81
+ usage: {
82
+ promptTokens: this.estimateTokens(prompt),
83
+ completionTokens: this.estimateTokens(data.response),
112
84
  },
113
85
  };
114
- const controller = new AbortController();
115
- const timeoutId = setTimeout(() => controller.abort(), this.timeout);
116
- try {
117
- const response = await fetch(`${this.baseUrl}/api/generate`, {
118
- method: "POST",
119
- headers: { "Content-Type": "application/json" },
120
- body: JSON.stringify(requestPayload),
121
- signal: controller.signal,
122
- });
123
- clearTimeout(timeoutId);
124
- if (!response.ok) {
125
- if (response.status === 404) {
126
- const errorData = await response.json();
127
- if (errorData.error && errorData.error.includes("not found")) {
128
- throw new Error(`Model '${this.modelId}' not found. Please run 'ollama pull ${this.modelId}'`);
129
- }
130
- }
131
- throw new Error(`Ollama API error: ${response.status} ${response.statusText}`);
132
- }
133
- const data = (await response.json());
134
- if (!data.response) {
135
- throw new Error("No response received from Ollama");
136
- }
137
- const promptTokens = this.estimateTokens(prompt);
138
- const completionTokens = this.estimateTokens(data.response);
139
- return {
140
- text: data.response,
141
- usage: {
142
- promptTokens,
143
- completionTokens,
144
- totalTokens: promptTokens + completionTokens,
145
- },
146
- finishReason: "stop",
147
- logprobs: undefined,
148
- rawCall: { rawPrompt: prompt, rawSettings: options },
149
- rawResponse: { headers: {} },
150
- };
151
- }
152
- catch (error) {
153
- clearTimeout(timeoutId);
154
- const errorMessage = error instanceof Error ? error.message : String(error);
155
- if (errorMessage.includes("AbortError") ||
156
- errorMessage.includes("timeout")) {
157
- throw new Error(`Ollama request timeout (${this.timeout}ms). The model may be large or the system is under load.`);
158
- }
159
- if (errorMessage.includes("ECONNREFUSED") ||
160
- errorMessage.includes("fetch failed")) {
161
- throw new Error("Cannot connect to Ollama service. Please ensure Ollama is installed and running on " +
162
- this.baseUrl);
163
- }
164
- throw error;
165
- }
166
86
  }
167
87
  async doStream(options) {
168
- // Health check and model availability
169
- const isHealthy = await this.checkHealth();
170
- if (!isHealthy) {
171
- throw new Error("Ollama service is not running or accessible. Please ensure Ollama is installed and running.");
88
+ const messages = options.messages || [];
89
+ const prompt = this.convertMessagesToPrompt(messages);
90
+ const response = await fetch(`${this.baseUrl}/api/generate`, {
91
+ method: "POST",
92
+ headers: { "Content-Type": "application/json" },
93
+ body: JSON.stringify({
94
+ model: this.modelId,
95
+ prompt,
96
+ stream: true,
97
+ system: messages.find((m) => m.role === "system")?.content,
98
+ options: {
99
+ temperature: options.temperature,
100
+ num_predict: options.maxTokens,
101
+ },
102
+ }),
103
+ signal: createAbortSignalWithTimeout(this.timeout),
104
+ });
105
+ if (!response.ok) {
106
+ throw new Error(`Ollama API error: ${response.status} ${response.statusText}`);
172
107
  }
173
- await this.ensureModelAvailable();
174
- const prompt = this.convertMessagesToPrompt(options.prompt);
175
- const requestPayload = {
176
- model: this.modelId,
177
- prompt,
178
- stream: true,
179
- options: {
180
- temperature: options.temperature || 0.7,
181
- num_predict: options.maxTokens ?? DEFAULT_MAX_TOKENS,
182
- },
108
+ return {
109
+ stream: this.parseStreamResponse(response),
183
110
  };
184
- const controller = new AbortController();
185
- const timeoutId = setTimeout(() => controller.abort(), this.timeout);
111
+ }
112
+ async *parseStreamResponse(response) {
113
+ const reader = response.body?.getReader();
114
+ if (!reader) {
115
+ throw new Error("No response body");
116
+ }
117
+ const decoder = new TextDecoder();
118
+ let buffer = "";
186
119
  try {
187
- const response = await fetch(`${this.baseUrl}/api/generate`, {
188
- method: "POST",
189
- headers: { "Content-Type": "application/json" },
190
- body: JSON.stringify(requestPayload),
191
- signal: controller.signal,
192
- });
193
- clearTimeout(timeoutId);
194
- if (!response.ok) {
195
- if (response.status === 404) {
196
- const errorData = await response.json();
197
- if (errorData.error && errorData.error.includes("not found")) {
198
- throw new Error(`Model '${this.modelId}' not found. Please run 'ollama pull ${this.modelId}'`);
199
- }
120
+ while (true) {
121
+ const { done, value } = await reader.read();
122
+ if (done) {
123
+ break;
200
124
  }
201
- throw new Error(`Ollama API error: ${response.status} ${response.statusText}`);
202
- }
203
- if (!response.body) {
204
- throw new Error("No response body received from Ollama streaming API");
205
- }
206
- // Create a ReadableStream that parses Ollama's streaming format
207
- const stream = new ReadableStream({
208
- async start(controller) {
209
- const reader = response.body.getReader();
210
- const decoder = new TextDecoder();
211
- let totalTokens = 0;
212
- try {
213
- while (true) {
214
- const { done, value } = await reader.read();
215
- if (done) {
216
- break;
125
+ buffer += decoder.decode(value, { stream: true });
126
+ const lines = buffer.split("\n");
127
+ buffer = lines.pop() || "";
128
+ for (const line of lines) {
129
+ if (line.trim()) {
130
+ try {
131
+ const data = JSON.parse(line);
132
+ if (data.response) {
133
+ yield {
134
+ type: "text-delta",
135
+ textDelta: data.response,
136
+ };
217
137
  }
218
- const chunk = decoder.decode(value, { stream: true });
219
- const lines = chunk.split("\n").filter((line) => line.trim());
220
- for (const line of lines) {
221
- try {
222
- const data = JSON.parse(line);
223
- if (data.response) {
224
- controller.enqueue({
225
- type: "text-delta",
226
- textDelta: data.response,
227
- });
228
- totalTokens += Math.ceil(data.response.length / 4);
229
- }
230
- if (data.done) {
231
- controller.enqueue({
232
- type: "finish",
233
- finishReason: "stop",
234
- usage: {
235
- promptTokens: data.prompt_eval_count ||
236
- Math.ceil(prompt.length / 4),
237
- completionTokens: data.eval_count || totalTokens,
238
- },
239
- logprobs: undefined,
240
- });
241
- controller.close();
242
- return;
243
- }
244
- }
245
- catch (parseError) {
246
- // Skip invalid JSON lines
247
- }
138
+ if (data.done) {
139
+ yield {
140
+ type: "finish",
141
+ finishReason: "stop",
142
+ usage: {
143
+ promptTokens: this.estimateTokens(data.context || ""),
144
+ completionTokens: data.eval_count || 0,
145
+ },
146
+ };
147
+ return;
248
148
  }
249
149
  }
150
+ catch (error) {
151
+ // Ignore JSON parse errors for incomplete chunks
152
+ }
250
153
  }
251
- finally {
252
- reader.releaseLock();
253
- }
254
- },
255
- });
256
- return {
257
- stream,
258
- rawCall: { rawPrompt: prompt, rawSettings: options },
259
- rawResponse: { headers: {} },
260
- };
261
- }
262
- catch (error) {
263
- clearTimeout(timeoutId);
264
- const errorMessage = error instanceof Error ? error.message : String(error);
265
- if (errorMessage.includes("AbortError") ||
266
- errorMessage.includes("timeout")) {
267
- throw new Error(`Ollama streaming timeout (${this.timeout}ms). The model may be large or the system is under load.`);
268
- }
269
- if (errorMessage.includes("ECONNREFUSED") ||
270
- errorMessage.includes("fetch failed")) {
271
- throw new Error("Cannot connect to Ollama service. Please ensure Ollama is installed and running on " +
272
- this.baseUrl);
154
+ }
273
155
  }
274
- throw error;
156
+ }
157
+ finally {
158
+ reader.releaseLock();
275
159
  }
276
160
  }
277
161
  }
278
- export class Ollama {
162
+ /**
163
+ * Ollama Provider v2 - BaseProvider Implementation
164
+ *
165
+ * PHASE 3.7: BaseProvider wrap around existing custom Ollama implementation
166
+ *
167
+ * Features:
168
+ * - Extends BaseProvider for shared functionality
169
+ * - Preserves custom OllamaLanguageModel implementation
170
+ * - Local model management and health checking
171
+ * - Enhanced error handling with Ollama-specific guidance
172
+ */
173
+ export class OllamaProvider extends BaseProvider {
174
+ ollamaModel;
279
175
  baseUrl;
280
- modelName;
281
- defaultTimeout;
176
+ timeout;
282
177
  constructor(modelName) {
283
- this.baseUrl = process.env.OLLAMA_BASE_URL || "http://localhost:11434";
284
- this.modelName = modelName || process.env.OLLAMA_MODEL || "llama2";
285
- // Use environment variable for backward compatibility, but convert to format used by other providers
286
- const envTimeout = process.env.OLLAMA_TIMEOUT
287
- ? parseInt(process.env.OLLAMA_TIMEOUT)
288
- : undefined;
289
- this.defaultTimeout =
290
- envTimeout ||
291
- parseInt(getDefaultTimeout("ollama", "generate").replace(/[^\d]/g, ""));
292
- logger.debug("[Ollama] Initialized", {
293
- baseUrl: this.baseUrl,
178
+ super(modelName, "ollama");
179
+ this.baseUrl = getOllamaBaseUrl();
180
+ this.timeout = getOllamaTimeout();
181
+ // Initialize Ollama model
182
+ this.ollamaModel = new OllamaLanguageModel(this.modelName || getDefaultOllamaModel(), this.baseUrl, this.timeout);
183
+ logger.debug("Ollama BaseProvider v2 initialized", {
294
184
  modelName: this.modelName,
295
- defaultTimeout: this.defaultTimeout,
185
+ baseUrl: this.baseUrl,
186
+ timeout: this.timeout,
187
+ provider: this.providerName,
296
188
  });
297
189
  }
298
- /**
299
- * Gets the appropriate model instance
300
- * @private
301
- */
302
- getModel(timeout) {
303
- logger.debug("Ollama.getModel - Ollama model selected", {
304
- modelName: this.modelName,
305
- timeout: timeout || this.defaultTimeout,
306
- });
307
- return new OllamaLanguageModel(this.modelName, this.baseUrl, timeout || this.defaultTimeout);
190
+ getProviderName() {
191
+ return "ollama";
192
+ }
193
+ getDefaultModel() {
194
+ return getDefaultOllamaModel();
308
195
  }
309
196
  /**
310
- * Health check - verify Ollama service is running and accessible
197
+ * Returns the Vercel AI SDK model instance for Ollama
311
198
  */
312
- async checkHealth() {
313
- const model = new OllamaLanguageModel(this.modelName, this.baseUrl, this.defaultTimeout);
314
- return await model["checkHealth"]();
199
+ getAISDKModel() {
200
+ return this.ollamaModel;
315
201
  }
316
202
  /**
317
- * List available models on the Ollama instance
203
+ * Ollama tool/function calling support is currently disabled due to integration issues.
204
+ *
205
+ * **Current Issues:**
206
+ * 1. The OllamaLanguageModel from @ai-sdk/provider-utils doesn't properly integrate
207
+ * with BaseProvider's tool calling mechanism
208
+ * 2. Ollama models require specific prompt formatting for function calls that differs
209
+ * from the standardized AI SDK format
210
+ * 3. Tool response parsing and execution flow needs custom implementation
211
+ *
212
+ * **What's needed to enable tool support:**
213
+ * - Create a custom OllamaLanguageModel wrapper that handles tool schema formatting
214
+ * - Implement Ollama-specific tool calling prompt templates
215
+ * - Add proper response parsing for Ollama's function call format
216
+ * - Test with models that support function calling (llama3.1, mistral, etc.)
217
+ *
218
+ * **Tracking:**
219
+ * - See BaseProvider tool integration patterns in other providers
220
+ * - Monitor Ollama function calling documentation: https://ollama.com/blog/tool-support
221
+ * - Track AI SDK updates for better Ollama integration
222
+ *
223
+ * @returns false to disable tools by default
318
224
  */
319
- async listModels() {
320
- const functionTag = "Ollama.listModels";
225
+ supportsTools() {
226
+ // TODO: Fix the OllamaLanguageModel integration with BaseProvider for tool support.
227
+ // Track progress on resolving this issue. See the detailed steps above.
228
+ // Issue tracking required for enabling tool support
229
+ return false;
230
+ }
231
+ // executeGenerate removed - BaseProvider handles all generation with tools
232
+ async executeStream(options, analysisSchema) {
321
233
  try {
322
- logger.debug(`[${functionTag}] Listing available models`);
323
- const response = await fetch(`${this.baseUrl}/api/tags`, {
324
- method: "GET",
325
- headers: {
326
- "Content-Type": "application/json",
327
- },
234
+ this.validateStreamOptions(options);
235
+ await this.checkOllamaHealth();
236
+ // Direct HTTP streaming implementation for better compatibility
237
+ const response = await fetch(`${this.baseUrl}/api/generate`, {
238
+ method: "POST",
239
+ headers: { "Content-Type": "application/json" },
240
+ body: JSON.stringify({
241
+ model: this.modelName || FALLBACK_OLLAMA_MODEL,
242
+ prompt: options.input.text,
243
+ system: options.systemPrompt,
244
+ stream: true,
245
+ options: {
246
+ temperature: options.temperature,
247
+ num_predict: options.maxTokens || DEFAULT_MAX_TOKENS,
248
+ },
249
+ }),
250
+ signal: createAbortSignalWithTimeout(this.timeout),
328
251
  });
329
252
  if (!response.ok) {
330
- throw new Error(`Failed to list models: ${response.status} ${response.statusText}`);
253
+ throw new Error(`Ollama API error: ${response.status} ${response.statusText}`);
331
254
  }
332
- const data = (await response.json());
333
- const modelNames = data.models?.map((model) => model.name) || [];
334
- logger.debug(`[${functionTag}] Found models`, {
335
- count: modelNames.length,
336
- models: modelNames,
337
- });
338
- return modelNames;
255
+ // Transform to async generator to match other providers
256
+ const self = this;
257
+ const transformedStream = async function* () {
258
+ const generator = self.createOllamaStream(response);
259
+ for await (const chunk of generator) {
260
+ yield chunk;
261
+ }
262
+ };
263
+ return {
264
+ stream: transformedStream(),
265
+ provider: this.providerName,
266
+ model: this.modelName,
267
+ };
339
268
  }
340
269
  catch (error) {
341
- logger.debug(`[${functionTag}] Error listing models`, {
342
- error: error instanceof Error ? error.message : String(error),
343
- });
344
- throw new Error(`Failed to list Ollama models: ${error instanceof Error ? error.message : String(error)}`);
270
+ throw this.handleProviderError(error);
345
271
  }
346
272
  }
347
- /**
348
- * Check if a specific model is available
349
- */
350
- async isModelAvailable(modelName) {
273
+ async *createOllamaStream(response) {
274
+ const reader = response.body?.getReader();
275
+ if (!reader) {
276
+ throw new Error("No response body");
277
+ }
278
+ const decoder = new TextDecoder();
279
+ let buffer = "";
351
280
  try {
352
- const models = await this.listModels();
353
- return models.includes(modelName);
281
+ while (true) {
282
+ const { done, value } = await reader.read();
283
+ if (done) {
284
+ break;
285
+ }
286
+ buffer += decoder.decode(value, { stream: true });
287
+ const lines = buffer.split("\n");
288
+ buffer = lines.pop() || "";
289
+ for (const line of lines) {
290
+ if (line.trim()) {
291
+ try {
292
+ const data = JSON.parse(line);
293
+ if (data.response) {
294
+ yield { content: data.response };
295
+ }
296
+ if (data.done) {
297
+ return;
298
+ }
299
+ }
300
+ catch (error) {
301
+ // Ignore JSON parse errors for incomplete chunks
302
+ }
303
+ }
304
+ }
305
+ }
354
306
  }
355
- catch (error) {
356
- return false;
307
+ finally {
308
+ reader.releaseLock();
357
309
  }
358
310
  }
359
- /**
360
- * Pull/download a model to the local Ollama instance
361
- */
362
- async pullModel(modelName) {
363
- const functionTag = "Ollama.pullModel";
364
- try {
365
- logger.debug(`[${functionTag}] Pulling model`, { modelName });
366
- const response = await fetch(`${this.baseUrl}/api/pull`, {
367
- method: "POST",
368
- headers: {
369
- "Content-Type": "application/json",
370
- },
371
- body: JSON.stringify({
372
- name: modelName,
373
- }),
374
- });
375
- if (!response.ok) {
376
- throw new Error(`Failed to pull model: ${response.status} ${response.statusText}`);
377
- }
378
- // Note: Ollama pull API returns streaming responses
379
- // For simplicity, we're not handling the streaming progress here
380
- logger.debug(`[${functionTag}] Model pull completed`, { modelName });
311
+ handleProviderError(error) {
312
+ if (error.name === "TimeoutError") {
313
+ return new TimeoutError(`Ollama request timed out. The model might be loading or the request is too complex.`, this.defaultTimeout);
381
314
  }
382
- catch (error) {
383
- logger.debug(`[${functionTag}] Error pulling model`, {
384
- modelName,
385
- error: error instanceof Error ? error.message : String(error),
386
- });
387
- throw new Error(`Failed to pull model '${modelName}': ${error instanceof Error ? error.message : String(error)}`);
315
+ if (error.message?.includes("ECONNREFUSED") ||
316
+ error.message?.includes("fetch failed")) {
317
+ return new Error(`āŒ Ollama Service Not Running\n\nCannot connect to Ollama at ${this.baseUrl}\n\nšŸ”§ Steps to Fix:\n1. Install Ollama: https://ollama.ai/\n2. Start Ollama service: 'ollama serve'\n3. Verify it's running: 'curl ${this.baseUrl}/api/version'\n4. Try again`);
318
+ }
319
+ if (error.message?.includes("model") &&
320
+ error.message?.includes("not found")) {
321
+ return new Error(`āŒ Ollama Model Not Found\n\nModel '${this.modelName}' is not available locally.\n\nšŸ”§ Install Model:\n1. Run: ollama pull ${this.modelName}\n2. Or try a different model:\n - ollama pull ${FALLBACK_OLLAMA_MODEL}\n - ollama pull mistral:latest\n - ollama pull codellama:latest\n\nšŸ”§ List Available Models:\nollama list`);
322
+ }
323
+ if (error.message?.includes("404")) {
324
+ return new Error(`āŒ Ollama API Endpoint Not Found\n\nThe API endpoint might have changed or Ollama version is incompatible.\n\nšŸ”§ Check:\n1. Ollama version: 'ollama --version'\n2. Update Ollama to latest version\n3. Verify API is available: 'curl ${this.baseUrl}/api/version'`);
325
+ }
326
+ return new Error(`āŒ Ollama Provider Error\n\n${error.message || "Unknown error occurred"}\n\nšŸ”§ Troubleshooting:\n1. Check if Ollama service is running\n2. Verify model is installed: 'ollama list'\n3. Check network connectivity to ${this.baseUrl}\n4. Review Ollama logs for details`);
327
+ }
328
+ validateStreamOptions(options) {
329
+ if (!options.input?.text?.trim()) {
330
+ throw new Error("Prompt is required for streaming");
331
+ }
332
+ if (options.maxTokens && options.maxTokens < 1) {
333
+ throw new Error("maxTokens must be greater than 0");
334
+ }
335
+ if (options.temperature &&
336
+ (options.temperature < 0 || options.temperature > 2)) {
337
+ throw new Error("temperature must be between 0 and 2");
388
338
  }
389
339
  }
390
340
  /**
391
- * PRIMARY METHOD: Stream content using AI (recommended for new code)
392
- * Future-ready for multi-modal capabilities with current text focus
341
+ * Check if Ollama service is healthy and accessible
393
342
  */
394
- async stream(optionsOrPrompt, analysisSchema) {
395
- const functionTag = "Ollama.stream";
396
- const provider = "ollama";
397
- let chunkCount = 0;
398
- const startTime = Date.now();
343
+ async checkOllamaHealth() {
399
344
  try {
400
- // Parse parameters - support both string and options object
401
- const options = typeof optionsOrPrompt === "string"
402
- ? { input: { text: optionsOrPrompt } }
403
- : optionsOrPrompt;
404
- // Validate input
405
- if (!options?.input?.text ||
406
- typeof options.input.text !== "string" ||
407
- options.input.text.trim() === "") {
408
- throw new Error("Stream options must include input.text as a non-empty string");
409
- }
410
- // Extract parameters
411
- const { prompt = options.input.text, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout, } = options;
412
- // Use schema from options or fallback parameter
413
- const finalSchema = schema || analysisSchema;
414
- // Convert timeout to milliseconds if provided as string
415
- const timeoutMs = timeout
416
- ? typeof timeout === "string"
417
- ? parseInt(getDefaultTimeout("ollama", "stream").replace(/[^\d]/g, ""))
418
- : timeout
419
- : this.defaultTimeout;
420
- logger.debug(`[${functionTag}] Stream request started`, {
421
- provider,
422
- modelName: this.modelName,
423
- promptLength: prompt.length,
424
- temperature,
425
- maxTokens,
426
- hasSchema: !!finalSchema,
427
- timeout: timeoutMs,
345
+ // Use traditional AbortController for better compatibility
346
+ const controller = new AbortController();
347
+ const timeoutId = setTimeout(() => controller.abort(), 5000);
348
+ const response = await fetch(`${this.baseUrl}/api/version`, {
349
+ method: "GET",
350
+ signal: controller.signal,
428
351
  });
429
- const model = this.getModel(timeoutMs);
430
- const streamOptions = {
431
- model: model,
432
- prompt: prompt,
433
- system: systemPrompt,
434
- temperature,
435
- maxTokens,
436
- onError: (event) => {
437
- const error = event.error;
438
- const errorMessage = error instanceof Error ? error.message : String(error);
439
- const errorStack = error instanceof Error ? error.stack : undefined;
440
- logger.debug(`[${functionTag}] Stream error`, {
441
- provider,
442
- modelName: this.modelName,
443
- error: errorMessage,
444
- stack: errorStack,
445
- promptLength: prompt.length,
446
- chunkCount,
447
- });
448
- },
449
- onFinish: (event) => {
450
- logger.debug(`[${functionTag}] Stream finished`, {
451
- provider,
452
- modelName: this.modelName,
453
- finishReason: event.finishReason,
454
- usage: event.usage,
455
- totalChunks: chunkCount,
456
- promptLength: prompt.length,
457
- responseLength: event.text?.length || 0,
458
- });
459
- },
460
- onChunk: (event) => {
461
- chunkCount++;
462
- logger.debug(`[${functionTag}] Stream chunk`, {
463
- provider,
464
- modelName: this.modelName,
465
- chunkNumber: chunkCount,
466
- chunkLength: event.chunk.text?.length || 0,
467
- chunkType: event.chunk.type,
468
- });
469
- },
470
- };
471
- if (finalSchema) {
472
- streamOptions.experimental_output = Output.object({
473
- schema: finalSchema,
474
- });
352
+ clearTimeout(timeoutId);
353
+ if (!response.ok) {
354
+ throw new Error(`Ollama health check failed: ${response.status}`);
475
355
  }
476
- const result = streamText(streamOptions);
477
- logger.debug(`[${functionTag}] Stream request completed`, {
478
- provider,
479
- modelName: this.modelName,
480
- });
481
- // Convert to StreamResult format
482
- return {
483
- stream: (async function* () {
484
- for await (const chunk of result.textStream) {
485
- yield { content: chunk };
486
- }
487
- })(),
488
- provider: "ollama",
489
- model: this.modelName,
490
- metadata: {
491
- streamId: `ollama-${Date.now()}`,
492
- startTime,
493
- },
494
- };
495
356
  }
496
- catch (err) {
497
- // Log timeout errors specifically
498
- if (err instanceof TimeoutError) {
499
- logger.debug(`[${functionTag}] Timeout error`, {
500
- provider,
501
- modelName: this.modelName,
502
- timeout: err.timeout,
503
- message: err.message,
504
- });
505
- }
506
- else {
507
- logger.debug(`[${functionTag}] Exception`, {
508
- provider,
509
- modelName: this.modelName,
510
- message: "Error in streaming content",
511
- err: String(err),
512
- });
357
+ catch (error) {
358
+ if (error instanceof Error && error.message.includes("ECONNREFUSED")) {
359
+ throw new Error(`āŒ Ollama Service Not Running\n\nCannot connect to Ollama service.\n\nšŸ”§ Start Ollama:\n1. Run: ollama serve\n2. Or start Ollama app\n3. Verify: curl ${this.baseUrl}/api/version`);
513
360
  }
514
- throw err; // Re-throw error to trigger fallback
361
+ throw error;
515
362
  }
516
363
  }
517
364
  /**
518
- * Generate text using Ollama local models
365
+ * Get available models from Ollama
519
366
  */
520
- async generate(optionsOrPrompt, analysisSchema) {
521
- const functionTag = "Ollama.generate";
522
- const provider = "ollama";
523
- const startTime = Date.now();
367
+ async getAvailableModels() {
524
368
  try {
525
- // Parse parameters - support both string and options object
526
- const options = typeof optionsOrPrompt === "string"
527
- ? { prompt: optionsOrPrompt }
528
- : optionsOrPrompt;
529
- const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout, } = options;
530
- // Use schema from options or fallback parameter
531
- const finalSchema = schema || analysisSchema;
532
- // Convert timeout to milliseconds if provided as string
533
- const timeoutMs = timeout
534
- ? typeof timeout === "string"
535
- ? parseInt(getDefaultTimeout("ollama", "generate").replace(/[^\d]/g, ""))
536
- : timeout
537
- : this.defaultTimeout;
538
- logger.debug(`[${functionTag}] Generate request started`, {
539
- provider,
540
- modelName: this.modelName,
541
- promptLength: prompt.length,
542
- temperature,
543
- maxTokens,
544
- timeout: timeoutMs,
545
- });
546
- const model = this.getModel(timeoutMs);
547
- const generateOptions = {
548
- model: model,
549
- prompt: prompt,
550
- system: systemPrompt,
551
- temperature,
552
- maxTokens,
553
- };
554
- if (finalSchema) {
555
- generateOptions.experimental_output = Output.object({
556
- schema: finalSchema,
557
- });
558
- }
559
- const result = await generateText(generateOptions);
560
- if (result.text.includes("model not found")) {
561
- throw new Error(`Model '${this.modelName}' not found. Please run 'ollama pull ${this.modelName}'`);
562
- }
563
- logger.debug(`[${functionTag}] Generate text completed`, {
564
- provider,
565
- modelName: this.modelName,
566
- usage: result.usage,
567
- finishReason: result.finishReason,
568
- responseLength: result.text?.length || 0,
569
- });
570
- // Add analytics if enabled
571
- if (options.enableAnalytics) {
572
- result.analytics = {
573
- provider,
574
- model: this.modelName,
575
- tokens: result.usage,
576
- responseTime: Date.now() - startTime,
577
- context: options.context,
578
- };
579
- }
580
- // Add evaluation if enabled
581
- if (options.enableEvaluation) {
582
- result.evaluation = await evaluateResponse(prompt, result.text, options.context);
369
+ const response = await fetch(`${this.baseUrl}/api/tags`);
370
+ if (!response.ok) {
371
+ throw new Error(`Failed to fetch models: ${response.status}`);
583
372
  }
584
- return {
585
- content: result.text,
586
- provider: "ollama",
587
- model: this.modelName,
588
- usage: result.usage
589
- ? {
590
- inputTokens: result.usage.promptTokens,
591
- outputTokens: result.usage.completionTokens,
592
- totalTokens: result.usage.totalTokens,
593
- }
594
- : undefined,
595
- responseTime: Date.now() - startTime,
596
- };
373
+ const data = await response.json();
374
+ return data.models?.map((model) => model.name) || [];
597
375
  }
598
- catch (err) {
599
- logger.debug(`[${functionTag}] Exception`, {
600
- provider,
601
- modelName: this.modelName,
602
- message: "Error in generating text",
603
- err: String(err),
604
- });
605
- throw err; // Re-throw error to trigger fallback
376
+ catch (error) {
377
+ logger.warn("Failed to fetch Ollama models:", error);
378
+ return [];
606
379
  }
607
380
  }
608
- async gen(optionsOrPrompt, analysisSchema) {
609
- return this.generate(optionsOrPrompt, analysisSchema);
381
+ /**
382
+ * Check if a specific model is available
383
+ */
384
+ async isModelAvailable(modelName) {
385
+ const models = await this.getAvailableModels();
386
+ return models.includes(modelName);
610
387
  }
611
388
  }
389
+ export default OllamaProvider;