@juspay/neurolink 5.0.0 → 5.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (214) hide show
  1. package/CHANGELOG.md +20 -7
  2. package/README.md +160 -172
  3. package/dist/agent/direct-tools.d.ts +6 -6
  4. package/dist/chat/sse-handler.js +5 -4
  5. package/dist/chat/websocket-chat-handler.js +9 -9
  6. package/dist/cli/commands/config.d.ts +3 -3
  7. package/dist/cli/commands/mcp.js +9 -8
  8. package/dist/cli/commands/ollama.js +3 -3
  9. package/dist/cli/factories/command-factory.d.ts +18 -0
  10. package/dist/cli/factories/command-factory.js +183 -0
  11. package/dist/cli/index.js +105 -157
  12. package/dist/cli/utils/interactive-setup.js +2 -2
  13. package/dist/core/base-provider.d.ts +423 -0
  14. package/dist/core/base-provider.js +365 -0
  15. package/dist/core/constants.d.ts +1 -1
  16. package/dist/core/constants.js +1 -1
  17. package/dist/core/dynamic-models.d.ts +6 -6
  18. package/dist/core/evaluation.d.ts +19 -80
  19. package/dist/core/evaluation.js +185 -484
  20. package/dist/core/factory.d.ts +3 -3
  21. package/dist/core/factory.js +31 -91
  22. package/dist/core/service-registry.d.ts +47 -0
  23. package/dist/core/service-registry.js +112 -0
  24. package/dist/core/types.d.ts +49 -49
  25. package/dist/core/types.js +1 -0
  26. package/dist/factories/compatibility-factory.d.ts +20 -0
  27. package/dist/factories/compatibility-factory.js +69 -0
  28. package/dist/factories/provider-factory.d.ts +72 -0
  29. package/dist/factories/provider-factory.js +144 -0
  30. package/dist/factories/provider-generate-factory.d.ts +20 -0
  31. package/dist/factories/provider-generate-factory.js +87 -0
  32. package/dist/factories/provider-registry.d.ts +38 -0
  33. package/dist/factories/provider-registry.js +107 -0
  34. package/dist/index.d.ts +8 -5
  35. package/dist/index.js +5 -5
  36. package/dist/lib/agent/direct-tools.d.ts +6 -6
  37. package/dist/lib/chat/sse-handler.js +5 -4
  38. package/dist/lib/chat/websocket-chat-handler.js +9 -9
  39. package/dist/lib/core/base-provider.d.ts +423 -0
  40. package/dist/lib/core/base-provider.js +365 -0
  41. package/dist/lib/core/constants.d.ts +1 -1
  42. package/dist/lib/core/constants.js +1 -1
  43. package/dist/lib/core/dynamic-models.d.ts +6 -6
  44. package/dist/lib/core/evaluation.d.ts +19 -80
  45. package/dist/lib/core/evaluation.js +185 -484
  46. package/dist/lib/core/factory.d.ts +3 -3
  47. package/dist/lib/core/factory.js +30 -91
  48. package/dist/lib/core/service-registry.d.ts +47 -0
  49. package/dist/lib/core/service-registry.js +112 -0
  50. package/dist/lib/core/types.d.ts +49 -49
  51. package/dist/lib/core/types.js +1 -0
  52. package/dist/lib/factories/compatibility-factory.d.ts +20 -0
  53. package/dist/lib/factories/compatibility-factory.js +69 -0
  54. package/dist/lib/factories/provider-factory.d.ts +72 -0
  55. package/dist/lib/factories/provider-factory.js +144 -0
  56. package/dist/lib/factories/provider-generate-factory.d.ts +20 -0
  57. package/dist/lib/factories/provider-generate-factory.js +87 -0
  58. package/dist/lib/factories/provider-registry.d.ts +38 -0
  59. package/dist/lib/factories/provider-registry.js +107 -0
  60. package/dist/lib/index.d.ts +8 -5
  61. package/dist/lib/index.js +5 -5
  62. package/dist/lib/mcp/client.js +5 -5
  63. package/dist/lib/mcp/config.js +28 -3
  64. package/dist/lib/mcp/dynamic-orchestrator.js +8 -8
  65. package/dist/lib/mcp/external-client.js +2 -2
  66. package/dist/lib/mcp/factory.d.ts +1 -1
  67. package/dist/lib/mcp/factory.js +1 -1
  68. package/dist/lib/mcp/function-calling.js +1 -1
  69. package/dist/lib/mcp/initialize-tools.d.ts +1 -1
  70. package/dist/lib/mcp/initialize-tools.js +45 -1
  71. package/dist/lib/mcp/initialize.js +16 -6
  72. package/dist/lib/mcp/neurolink-mcp-client.js +10 -10
  73. package/dist/lib/mcp/orchestrator.js +4 -4
  74. package/dist/lib/mcp/servers/agent/direct-tools-server.d.ts +8 -0
  75. package/dist/lib/mcp/servers/agent/direct-tools-server.js +109 -0
  76. package/dist/lib/mcp/servers/ai-providers/ai-analysis-tools.js +10 -10
  77. package/dist/lib/mcp/servers/ai-providers/ai-core-server.js +8 -6
  78. package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
  79. package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.js +16 -16
  80. package/dist/lib/mcp/unified-registry.d.ts +4 -0
  81. package/dist/lib/mcp/unified-registry.js +42 -9
  82. package/dist/lib/neurolink.d.ts +161 -174
  83. package/dist/lib/neurolink.js +723 -397
  84. package/dist/lib/providers/amazon-bedrock.d.ts +32 -0
  85. package/dist/lib/providers/amazon-bedrock.js +143 -0
  86. package/dist/lib/providers/analytics-helper.js +7 -4
  87. package/dist/lib/providers/anthropic-baseprovider.d.ts +23 -0
  88. package/dist/lib/providers/anthropic-baseprovider.js +114 -0
  89. package/dist/lib/providers/anthropic.d.ts +19 -39
  90. package/dist/lib/providers/anthropic.js +84 -378
  91. package/dist/lib/providers/azure-openai.d.ts +20 -0
  92. package/dist/lib/providers/azure-openai.js +89 -0
  93. package/dist/lib/providers/function-calling-provider.d.ts +14 -12
  94. package/dist/lib/providers/function-calling-provider.js +114 -64
  95. package/dist/lib/providers/google-ai-studio.d.ts +23 -0
  96. package/dist/lib/providers/google-ai-studio.js +107 -0
  97. package/dist/lib/providers/google-vertex.d.ts +47 -0
  98. package/dist/lib/providers/google-vertex.js +205 -0
  99. package/dist/lib/providers/huggingFace.d.ts +33 -27
  100. package/dist/lib/providers/huggingFace.js +103 -400
  101. package/dist/lib/providers/index.d.ts +9 -9
  102. package/dist/lib/providers/index.js +9 -9
  103. package/dist/lib/providers/mcp-provider.d.ts +13 -8
  104. package/dist/lib/providers/mcp-provider.js +63 -18
  105. package/dist/lib/providers/mistral.d.ts +42 -0
  106. package/dist/lib/providers/mistral.js +160 -0
  107. package/dist/lib/providers/ollama.d.ts +52 -35
  108. package/dist/lib/providers/ollama.js +297 -477
  109. package/dist/lib/providers/openAI.d.ts +21 -21
  110. package/dist/lib/providers/openAI.js +81 -245
  111. package/dist/lib/sdk/tool-extension.d.ts +181 -0
  112. package/dist/lib/sdk/tool-extension.js +283 -0
  113. package/dist/lib/sdk/tool-registration.d.ts +95 -0
  114. package/dist/lib/sdk/tool-registration.js +167 -0
  115. package/dist/lib/types/generate-types.d.ts +80 -0
  116. package/dist/lib/types/generate-types.js +1 -0
  117. package/dist/lib/types/mcp-types.d.ts +116 -0
  118. package/dist/lib/types/mcp-types.js +5 -0
  119. package/dist/lib/types/stream-types.d.ts +95 -0
  120. package/dist/lib/types/stream-types.js +1 -0
  121. package/dist/lib/types/universal-provider-options.d.ts +87 -0
  122. package/dist/lib/types/universal-provider-options.js +53 -0
  123. package/dist/lib/utils/providerUtils-fixed.js +1 -1
  124. package/dist/lib/utils/streaming-utils.d.ts +14 -2
  125. package/dist/lib/utils/streaming-utils.js +0 -3
  126. package/dist/mcp/client.js +5 -5
  127. package/dist/mcp/config.js +28 -3
  128. package/dist/mcp/dynamic-orchestrator.js +8 -8
  129. package/dist/mcp/external-client.js +2 -2
  130. package/dist/mcp/factory.d.ts +1 -1
  131. package/dist/mcp/factory.js +1 -1
  132. package/dist/mcp/function-calling.js +1 -1
  133. package/dist/mcp/initialize-tools.d.ts +1 -1
  134. package/dist/mcp/initialize-tools.js +45 -1
  135. package/dist/mcp/initialize.js +16 -6
  136. package/dist/mcp/neurolink-mcp-client.js +10 -10
  137. package/dist/mcp/orchestrator.js +4 -4
  138. package/dist/mcp/servers/agent/direct-tools-server.d.ts +8 -0
  139. package/dist/mcp/servers/agent/direct-tools-server.js +109 -0
  140. package/dist/mcp/servers/ai-providers/ai-analysis-tools.js +10 -10
  141. package/dist/mcp/servers/ai-providers/ai-core-server.js +8 -6
  142. package/dist/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
  143. package/dist/mcp/servers/ai-providers/ai-workflow-tools.js +16 -16
  144. package/dist/mcp/unified-registry.d.ts +4 -0
  145. package/dist/mcp/unified-registry.js +42 -9
  146. package/dist/neurolink.d.ts +161 -174
  147. package/dist/neurolink.js +723 -397
  148. package/dist/providers/amazon-bedrock.d.ts +32 -0
  149. package/dist/providers/amazon-bedrock.js +143 -0
  150. package/dist/providers/analytics-helper.js +7 -4
  151. package/dist/providers/anthropic-baseprovider.d.ts +23 -0
  152. package/dist/providers/anthropic-baseprovider.js +114 -0
  153. package/dist/providers/anthropic.d.ts +19 -39
  154. package/dist/providers/anthropic.js +83 -377
  155. package/dist/providers/azure-openai.d.ts +20 -0
  156. package/dist/providers/azure-openai.js +89 -0
  157. package/dist/providers/function-calling-provider.d.ts +14 -12
  158. package/dist/providers/function-calling-provider.js +114 -64
  159. package/dist/providers/google-ai-studio.d.ts +23 -0
  160. package/dist/providers/google-ai-studio.js +108 -0
  161. package/dist/providers/google-vertex.d.ts +47 -0
  162. package/dist/providers/google-vertex.js +205 -0
  163. package/dist/providers/huggingFace.d.ts +33 -27
  164. package/dist/providers/huggingFace.js +102 -399
  165. package/dist/providers/index.d.ts +9 -9
  166. package/dist/providers/index.js +9 -9
  167. package/dist/providers/mcp-provider.d.ts +13 -8
  168. package/dist/providers/mcp-provider.js +63 -18
  169. package/dist/providers/mistral.d.ts +42 -0
  170. package/dist/providers/mistral.js +160 -0
  171. package/dist/providers/ollama.d.ts +52 -35
  172. package/dist/providers/ollama.js +297 -476
  173. package/dist/providers/openAI.d.ts +21 -21
  174. package/dist/providers/openAI.js +81 -246
  175. package/dist/sdk/tool-extension.d.ts +181 -0
  176. package/dist/sdk/tool-extension.js +283 -0
  177. package/dist/sdk/tool-registration.d.ts +95 -0
  178. package/dist/sdk/tool-registration.js +168 -0
  179. package/dist/types/generate-types.d.ts +80 -0
  180. package/dist/types/generate-types.js +1 -0
  181. package/dist/types/mcp-types.d.ts +116 -0
  182. package/dist/types/mcp-types.js +5 -0
  183. package/dist/types/stream-types.d.ts +95 -0
  184. package/dist/types/stream-types.js +1 -0
  185. package/dist/types/universal-provider-options.d.ts +87 -0
  186. package/dist/types/universal-provider-options.js +53 -0
  187. package/dist/utils/providerUtils-fixed.js +1 -1
  188. package/dist/utils/streaming-utils.d.ts +14 -2
  189. package/dist/utils/streaming-utils.js +0 -3
  190. package/package.json +15 -10
  191. package/dist/lib/providers/agent-enhanced-provider.d.ts +0 -89
  192. package/dist/lib/providers/agent-enhanced-provider.js +0 -614
  193. package/dist/lib/providers/amazonBedrock.d.ts +0 -19
  194. package/dist/lib/providers/amazonBedrock.js +0 -334
  195. package/dist/lib/providers/azureOpenAI.d.ts +0 -39
  196. package/dist/lib/providers/azureOpenAI.js +0 -436
  197. package/dist/lib/providers/googleAIStudio.d.ts +0 -49
  198. package/dist/lib/providers/googleAIStudio.js +0 -333
  199. package/dist/lib/providers/googleVertexAI.d.ts +0 -38
  200. package/dist/lib/providers/googleVertexAI.js +0 -519
  201. package/dist/lib/providers/mistralAI.d.ts +0 -34
  202. package/dist/lib/providers/mistralAI.js +0 -294
  203. package/dist/providers/agent-enhanced-provider.d.ts +0 -89
  204. package/dist/providers/agent-enhanced-provider.js +0 -614
  205. package/dist/providers/amazonBedrock.d.ts +0 -19
  206. package/dist/providers/amazonBedrock.js +0 -334
  207. package/dist/providers/azureOpenAI.d.ts +0 -39
  208. package/dist/providers/azureOpenAI.js +0 -437
  209. package/dist/providers/googleAIStudio.d.ts +0 -49
  210. package/dist/providers/googleAIStudio.js +0 -333
  211. package/dist/providers/googleVertexAI.d.ts +0 -38
  212. package/dist/providers/googleVertexAI.js +0 -519
  213. package/dist/providers/mistralAI.d.ts +0 -34
  214. package/dist/providers/mistralAI.js +0 -294
@@ -1,23 +1,30 @@
1
- /**
2
- * Ollama Provider for NeuroLink
3
- *
4
- * Local AI model deployment and management using Ollama.
5
- * Provides offline AI capabilities with local model hosting.
6
- *
7
- * Features:
8
- * - Local model deployment (privacy-first)
9
- * - Model management (download, list, remove)
10
- * - Health checking and service validation
11
- * - Streaming and non-streaming text generation
12
- */
13
- import { streamText, generateText, Output } from "ai";
1
+ import { streamText, Output } from "ai";
2
+ import { BaseProvider } from "../core/base-provider.js";
14
3
  import { logger } from "../utils/logger.js";
15
4
  import { getDefaultTimeout, TimeoutError } from "../utils/timeout.js";
16
5
  import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
17
- import { evaluateResponse } from "../core/evaluation.js";
18
- // Default system context
19
- const DEFAULT_SYSTEM_CONTEXT = {
20
- systemPrompt: "You are a helpful AI assistant.",
6
+ // Model version constants (configurable via environment)
7
+ const DEFAULT_OLLAMA_MODEL = "llama3.1:8b";
8
+ const FALLBACK_OLLAMA_MODEL = "llama3.2:latest"; // Used when primary model fails
9
+ // Configuration helpers
10
+ const getOllamaBaseUrl = () => {
11
+ return process.env.OLLAMA_BASE_URL || "http://localhost:11434";
12
+ };
13
+ // Create AbortController with timeout for better compatibility
14
+ const createAbortSignalWithTimeout = (timeoutMs) => {
15
+ const controller = new AbortController();
16
+ const timeoutId = setTimeout(() => controller.abort(), timeoutMs);
17
+ // Clear timeout if signal is aborted through other means
18
+ controller.signal.addEventListener("abort", () => {
19
+ clearTimeout(timeoutId);
20
+ });
21
+ return controller.signal;
22
+ };
23
+ const getDefaultOllamaModel = () => {
24
+ return process.env.OLLAMA_MODEL || DEFAULT_OLLAMA_MODEL;
25
+ };
26
+ const getOllamaTimeout = () => {
27
+ return parseInt(process.env.OLLAMA_TIMEOUT || "60000", 10);
21
28
  };
22
29
  // Custom LanguageModelV1 implementation for Ollama
23
30
  class OllamaLanguageModel {
@@ -35,7 +42,7 @@ class OllamaLanguageModel {
35
42
  this.timeout = timeout;
36
43
  }
37
44
  estimateTokens(text) {
38
- return Math.ceil(text.length / 4); // Rough estimation: 4 characters per token
45
+ return Math.ceil(text.length / 4);
39
46
  }
40
47
  convertMessagesToPrompt(messages) {
41
48
  return messages
@@ -43,526 +50,340 @@ class OllamaLanguageModel {
43
50
  if (typeof msg.content === "string") {
44
51
  return `${msg.role}: ${msg.content}`;
45
52
  }
46
- else if (Array.isArray(msg.content)) {
47
- // Handle multi-part content (text, images, etc.)
48
- return `${msg.role}: ${msg.content
49
- .filter((part) => part.type === "text")
50
- .map((part) => part.text)
51
- .join(" ")}`;
52
- }
53
- return "";
53
+ return `${msg.role}: ${JSON.stringify(msg.content)}`;
54
54
  })
55
55
  .join("\n");
56
56
  }
57
- async checkHealth() {
58
- try {
59
- const controller = new AbortController();
60
- const timeoutId = setTimeout(() => controller.abort(), 5000);
61
- const response = await fetch(`${this.baseUrl}/api/tags`, {
62
- method: "GET",
63
- signal: controller.signal,
64
- headers: { "Content-Type": "application/json" },
65
- });
66
- clearTimeout(timeoutId);
67
- return response.ok;
68
- }
69
- catch {
70
- return false;
71
- }
72
- }
73
- async ensureModelAvailable() {
74
- try {
75
- const response = await fetch(`${this.baseUrl}/api/tags`);
76
- if (!response.ok) {
77
- throw new Error("Cannot access Ollama");
78
- }
79
- const data = (await response.json());
80
- const models = data.models?.map((m) => m.name) || [];
81
- if (!models.includes(this.modelId)) {
82
- // Try to pull the model
83
- const pullResponse = await fetch(`${this.baseUrl}/api/pull`, {
84
- method: "POST",
85
- headers: { "Content-Type": "application/json" },
86
- body: JSON.stringify({ name: this.modelId }),
87
- });
88
- if (!pullResponse.ok) {
89
- throw new Error(`Model '${this.modelId}' not available and cannot be pulled`);
90
- }
91
- }
92
- }
93
- catch (error) {
94
- throw new Error(`Failed to ensure model availability: ${error instanceof Error ? error.message : String(error)}`);
95
- }
96
- }
97
57
  async doGenerate(options) {
98
- // Health check and model availability
99
- const isHealthy = await this.checkHealth();
100
- if (!isHealthy) {
101
- throw new Error("Ollama service is not running or accessible. Please ensure Ollama is installed and running.");
58
+ const messages = options.messages || [];
59
+ const prompt = this.convertMessagesToPrompt(messages);
60
+ const response = await fetch(`${this.baseUrl}/api/generate`, {
61
+ method: "POST",
62
+ headers: { "Content-Type": "application/json" },
63
+ body: JSON.stringify({
64
+ model: this.modelId,
65
+ prompt,
66
+ stream: false,
67
+ system: messages.find((m) => m.role === "system")?.content,
68
+ options: {
69
+ temperature: options.temperature,
70
+ num_predict: options.maxTokens,
71
+ },
72
+ }),
73
+ signal: createAbortSignalWithTimeout(this.timeout),
74
+ });
75
+ if (!response.ok) {
76
+ throw new Error(`Ollama API error: ${response.status} ${response.statusText}`);
102
77
  }
103
- await this.ensureModelAvailable();
104
- const prompt = this.convertMessagesToPrompt(options.prompt);
105
- const requestPayload = {
106
- model: this.modelId,
107
- prompt,
108
- stream: false,
109
- options: {
110
- temperature: options.temperature || 0.7,
111
- num_predict: options.maxTokens ?? DEFAULT_MAX_TOKENS,
78
+ const data = await response.json();
79
+ return {
80
+ text: data.response,
81
+ usage: {
82
+ promptTokens: this.estimateTokens(prompt),
83
+ completionTokens: this.estimateTokens(data.response),
112
84
  },
113
85
  };
114
- const controller = new AbortController();
115
- const timeoutId = setTimeout(() => controller.abort(), this.timeout);
116
- try {
117
- const response = await fetch(`${this.baseUrl}/api/generate`, {
118
- method: "POST",
119
- headers: { "Content-Type": "application/json" },
120
- body: JSON.stringify(requestPayload),
121
- signal: controller.signal,
122
- });
123
- clearTimeout(timeoutId);
124
- if (!response.ok) {
125
- if (response.status === 404) {
126
- const errorData = await response.json();
127
- if (errorData.error && errorData.error.includes("not found")) {
128
- throw new Error(`Model '${this.modelId}' not found. Please run 'ollama pull ${this.modelId}'`);
129
- }
130
- }
131
- throw new Error(`Ollama API error: ${response.status} ${response.statusText}`);
132
- }
133
- const data = (await response.json());
134
- if (!data.response) {
135
- throw new Error("No response received from Ollama");
136
- }
137
- const promptTokens = this.estimateTokens(prompt);
138
- const completionTokens = this.estimateTokens(data.response);
139
- return {
140
- text: data.response,
141
- usage: {
142
- promptTokens,
143
- completionTokens,
144
- totalTokens: promptTokens + completionTokens,
145
- },
146
- finishReason: "stop",
147
- logprobs: undefined,
148
- rawCall: { rawPrompt: prompt, rawSettings: options },
149
- rawResponse: { headers: {} },
150
- };
151
- }
152
- catch (error) {
153
- clearTimeout(timeoutId);
154
- const errorMessage = error instanceof Error ? error.message : String(error);
155
- if (errorMessage.includes("AbortError") ||
156
- errorMessage.includes("timeout")) {
157
- throw new Error(`Ollama request timeout (${this.timeout}ms). The model may be large or the system is under load.`);
158
- }
159
- if (errorMessage.includes("ECONNREFUSED") ||
160
- errorMessage.includes("fetch failed")) {
161
- throw new Error("Cannot connect to Ollama service. Please ensure Ollama is installed and running on " +
162
- this.baseUrl);
163
- }
164
- throw error;
165
- }
166
86
  }
167
87
  async doStream(options) {
168
- // Health check and model availability
169
- const isHealthy = await this.checkHealth();
170
- if (!isHealthy) {
171
- throw new Error("Ollama service is not running or accessible. Please ensure Ollama is installed and running.");
88
+ const messages = options.messages || [];
89
+ const prompt = this.convertMessagesToPrompt(messages);
90
+ const response = await fetch(`${this.baseUrl}/api/generate`, {
91
+ method: "POST",
92
+ headers: { "Content-Type": "application/json" },
93
+ body: JSON.stringify({
94
+ model: this.modelId,
95
+ prompt,
96
+ stream: true,
97
+ system: messages.find((m) => m.role === "system")?.content,
98
+ options: {
99
+ temperature: options.temperature,
100
+ num_predict: options.maxTokens,
101
+ },
102
+ }),
103
+ signal: createAbortSignalWithTimeout(this.timeout),
104
+ });
105
+ if (!response.ok) {
106
+ throw new Error(`Ollama API error: ${response.status} ${response.statusText}`);
172
107
  }
173
- await this.ensureModelAvailable();
174
- const prompt = this.convertMessagesToPrompt(options.prompt);
175
- const requestPayload = {
176
- model: this.modelId,
177
- prompt,
178
- stream: true,
179
- options: {
180
- temperature: options.temperature || 0.7,
181
- num_predict: options.maxTokens ?? DEFAULT_MAX_TOKENS,
182
- },
108
+ return {
109
+ stream: this.parseStreamResponse(response),
183
110
  };
184
- const controller = new AbortController();
185
- const timeoutId = setTimeout(() => controller.abort(), this.timeout);
111
+ }
112
+ async *parseStreamResponse(response) {
113
+ const reader = response.body?.getReader();
114
+ if (!reader) {
115
+ throw new Error("No response body");
116
+ }
117
+ const decoder = new TextDecoder();
118
+ let buffer = "";
186
119
  try {
187
- const response = await fetch(`${this.baseUrl}/api/generate`, {
188
- method: "POST",
189
- headers: { "Content-Type": "application/json" },
190
- body: JSON.stringify(requestPayload),
191
- signal: controller.signal,
192
- });
193
- clearTimeout(timeoutId);
194
- if (!response.ok) {
195
- if (response.status === 404) {
196
- const errorData = await response.json();
197
- if (errorData.error && errorData.error.includes("not found")) {
198
- throw new Error(`Model '${this.modelId}' not found. Please run 'ollama pull ${this.modelId}'`);
199
- }
120
+ while (true) {
121
+ const { done, value } = await reader.read();
122
+ if (done) {
123
+ break;
200
124
  }
201
- throw new Error(`Ollama API error: ${response.status} ${response.statusText}`);
202
- }
203
- if (!response.body) {
204
- throw new Error("No response body received from Ollama streaming API");
205
- }
206
- // Create a ReadableStream that parses Ollama's streaming format
207
- const stream = new ReadableStream({
208
- async start(controller) {
209
- const reader = response.body.getReader();
210
- const decoder = new TextDecoder();
211
- let totalTokens = 0;
212
- try {
213
- while (true) {
214
- const { done, value } = await reader.read();
215
- if (done) {
216
- break;
125
+ buffer += decoder.decode(value, { stream: true });
126
+ const lines = buffer.split("\n");
127
+ buffer = lines.pop() || "";
128
+ for (const line of lines) {
129
+ if (line.trim()) {
130
+ try {
131
+ const data = JSON.parse(line);
132
+ if (data.response) {
133
+ yield {
134
+ type: "text-delta",
135
+ textDelta: data.response,
136
+ };
217
137
  }
218
- const chunk = decoder.decode(value, { stream: true });
219
- const lines = chunk.split("\n").filter((line) => line.trim());
220
- for (const line of lines) {
221
- try {
222
- const data = JSON.parse(line);
223
- if (data.response) {
224
- controller.enqueue({
225
- type: "text-delta",
226
- textDelta: data.response,
227
- });
228
- totalTokens += Math.ceil(data.response.length / 4);
229
- }
230
- if (data.done) {
231
- controller.enqueue({
232
- type: "finish",
233
- finishReason: "stop",
234
- usage: {
235
- promptTokens: data.prompt_eval_count ||
236
- Math.ceil(prompt.length / 4),
237
- completionTokens: data.eval_count || totalTokens,
238
- },
239
- logprobs: undefined,
240
- });
241
- controller.close();
242
- return;
243
- }
244
- }
245
- catch (parseError) {
246
- // Skip invalid JSON lines
247
- }
138
+ if (data.done) {
139
+ yield {
140
+ type: "finish",
141
+ finishReason: "stop",
142
+ usage: {
143
+ promptTokens: this.estimateTokens(data.context || ""),
144
+ completionTokens: data.eval_count || 0,
145
+ },
146
+ };
147
+ return;
248
148
  }
249
149
  }
150
+ catch (error) {
151
+ // Ignore JSON parse errors for incomplete chunks
152
+ }
250
153
  }
251
- finally {
252
- reader.releaseLock();
253
- }
254
- },
255
- });
256
- return {
257
- stream,
258
- rawCall: { rawPrompt: prompt, rawSettings: options },
259
- rawResponse: { headers: {} },
260
- };
261
- }
262
- catch (error) {
263
- clearTimeout(timeoutId);
264
- const errorMessage = error instanceof Error ? error.message : String(error);
265
- if (errorMessage.includes("AbortError") ||
266
- errorMessage.includes("timeout")) {
267
- throw new Error(`Ollama streaming timeout (${this.timeout}ms). The model may be large or the system is under load.`);
268
- }
269
- if (errorMessage.includes("ECONNREFUSED") ||
270
- errorMessage.includes("fetch failed")) {
271
- throw new Error("Cannot connect to Ollama service. Please ensure Ollama is installed and running on " +
272
- this.baseUrl);
154
+ }
273
155
  }
274
- throw error;
156
+ }
157
+ finally {
158
+ reader.releaseLock();
275
159
  }
276
160
  }
277
161
  }
278
- export class Ollama {
162
+ /**
163
+ * Ollama Provider v2 - BaseProvider Implementation
164
+ *
165
+ * PHASE 3.7: BaseProvider wrap around existing custom Ollama implementation
166
+ *
167
+ * Features:
168
+ * - Extends BaseProvider for shared functionality
169
+ * - Preserves custom OllamaLanguageModel implementation
170
+ * - Local model management and health checking
171
+ * - Enhanced error handling with Ollama-specific guidance
172
+ */
173
+ export class OllamaProvider extends BaseProvider {
174
+ ollamaModel;
279
175
  baseUrl;
280
- modelName;
281
- defaultTimeout;
176
+ timeout;
282
177
  constructor(modelName) {
283
- this.baseUrl = process.env.OLLAMA_BASE_URL || "http://localhost:11434";
284
- this.modelName = modelName || process.env.OLLAMA_MODEL || "llama2";
285
- // Use environment variable for backward compatibility, but convert to format used by other providers
286
- const envTimeout = process.env.OLLAMA_TIMEOUT
287
- ? parseInt(process.env.OLLAMA_TIMEOUT)
288
- : undefined;
289
- this.defaultTimeout =
290
- envTimeout ||
291
- parseInt(getDefaultTimeout("ollama", "generate").replace(/[^\d]/g, ""));
292
- logger.debug("[Ollama] Initialized", {
293
- baseUrl: this.baseUrl,
178
+ super(modelName, "ollama");
179
+ this.baseUrl = getOllamaBaseUrl();
180
+ this.timeout = getOllamaTimeout();
181
+ // Initialize Ollama model
182
+ this.ollamaModel = new OllamaLanguageModel(this.modelName || getDefaultOllamaModel(), this.baseUrl, this.timeout);
183
+ logger.debug("Ollama BaseProvider v2 initialized", {
294
184
  modelName: this.modelName,
295
- defaultTimeout: this.defaultTimeout,
185
+ baseUrl: this.baseUrl,
186
+ timeout: this.timeout,
187
+ provider: this.providerName,
296
188
  });
297
189
  }
298
- /**
299
- * Gets the appropriate model instance
300
- * @private
301
- */
302
- getModel(timeout) {
303
- logger.debug("Ollama.getModel - Ollama model selected", {
304
- modelName: this.modelName,
305
- timeout: timeout || this.defaultTimeout,
306
- });
307
- return new OllamaLanguageModel(this.modelName, this.baseUrl, timeout || this.defaultTimeout);
190
+ getProviderName() {
191
+ return "ollama";
192
+ }
193
+ getDefaultModel() {
194
+ return getDefaultOllamaModel();
308
195
  }
309
196
  /**
310
- * Health check - verify Ollama service is running and accessible
197
+ * Returns the Vercel AI SDK model instance for Ollama
311
198
  */
312
- async checkHealth() {
313
- const model = new OllamaLanguageModel(this.modelName, this.baseUrl, this.defaultTimeout);
314
- return await model["checkHealth"]();
199
+ getAISDKModel() {
200
+ return this.ollamaModel;
315
201
  }
316
202
  /**
317
- * List available models on the Ollama instance
203
+ * Ollama tool/function calling support is currently disabled due to integration issues.
204
+ *
205
+ * **Current Issues:**
206
+ * 1. The OllamaLanguageModel from @ai-sdk/provider-utils doesn't properly integrate
207
+ * with BaseProvider's tool calling mechanism
208
+ * 2. Ollama models require specific prompt formatting for function calls that differs
209
+ * from the standardized AI SDK format
210
+ * 3. Tool response parsing and execution flow needs custom implementation
211
+ *
212
+ * **What's needed to enable tool support:**
213
+ * - Create a custom OllamaLanguageModel wrapper that handles tool schema formatting
214
+ * - Implement Ollama-specific tool calling prompt templates
215
+ * - Add proper response parsing for Ollama's function call format
216
+ * - Test with models that support function calling (llama3.1, mistral, etc.)
217
+ *
218
+ * **Tracking:**
219
+ * - See BaseProvider tool integration patterns in other providers
220
+ * - Monitor Ollama function calling documentation: https://ollama.com/blog/tool-support
221
+ * - Track AI SDK updates for better Ollama integration
222
+ *
223
+ * @returns false to disable tools by default
318
224
  */
319
- async listModels() {
320
- const functionTag = "Ollama.listModels";
225
+ supportsTools() {
226
+ // TODO: Fix the OllamaLanguageModel integration with BaseProvider for tool support.
227
+ // Track progress on resolving this issue. See the detailed steps above.
228
+ // Issue tracking required for enabling tool support
229
+ return false;
230
+ }
231
+ // executeGenerate removed - BaseProvider handles all generation with tools
232
+ async executeStream(options, analysisSchema) {
321
233
  try {
322
- logger.debug(`[${functionTag}] Listing available models`);
323
- const response = await fetch(`${this.baseUrl}/api/tags`, {
324
- method: "GET",
325
- headers: {
326
- "Content-Type": "application/json",
327
- },
234
+ this.validateStreamOptions(options);
235
+ await this.checkOllamaHealth();
236
+ // Direct HTTP streaming implementation for better compatibility
237
+ const response = await fetch(`${this.baseUrl}/api/generate`, {
238
+ method: "POST",
239
+ headers: { "Content-Type": "application/json" },
240
+ body: JSON.stringify({
241
+ model: this.modelName || FALLBACK_OLLAMA_MODEL,
242
+ prompt: options.input.text,
243
+ system: options.systemPrompt,
244
+ stream: true,
245
+ options: {
246
+ temperature: options.temperature,
247
+ num_predict: options.maxTokens || DEFAULT_MAX_TOKENS,
248
+ },
249
+ }),
250
+ signal: createAbortSignalWithTimeout(this.timeout),
328
251
  });
329
252
  if (!response.ok) {
330
- throw new Error(`Failed to list models: ${response.status} ${response.statusText}`);
253
+ throw new Error(`Ollama API error: ${response.status} ${response.statusText}`);
331
254
  }
332
- const data = (await response.json());
333
- const modelNames = data.models?.map((model) => model.name) || [];
334
- logger.debug(`[${functionTag}] Found models`, {
335
- count: modelNames.length,
336
- models: modelNames,
337
- });
338
- return modelNames;
255
+ // Transform to async generator to match other providers
256
+ const self = this;
257
+ const transformedStream = async function* () {
258
+ const generator = self.createOllamaStream(response);
259
+ for await (const chunk of generator) {
260
+ yield chunk;
261
+ }
262
+ };
263
+ return {
264
+ stream: transformedStream(),
265
+ provider: this.providerName,
266
+ model: this.modelName,
267
+ };
339
268
  }
340
269
  catch (error) {
341
- logger.debug(`[${functionTag}] Error listing models`, {
342
- error: error instanceof Error ? error.message : String(error),
343
- });
344
- throw new Error(`Failed to list Ollama models: ${error instanceof Error ? error.message : String(error)}`);
270
+ throw this.handleProviderError(error);
345
271
  }
346
272
  }
347
- /**
348
- * Check if a specific model is available
349
- */
350
- async isModelAvailable(modelName) {
273
+ async *createOllamaStream(response) {
274
+ const reader = response.body?.getReader();
275
+ if (!reader) {
276
+ throw new Error("No response body");
277
+ }
278
+ const decoder = new TextDecoder();
279
+ let buffer = "";
351
280
  try {
352
- const models = await this.listModels();
353
- return models.includes(modelName);
281
+ while (true) {
282
+ const { done, value } = await reader.read();
283
+ if (done) {
284
+ break;
285
+ }
286
+ buffer += decoder.decode(value, { stream: true });
287
+ const lines = buffer.split("\n");
288
+ buffer = lines.pop() || "";
289
+ for (const line of lines) {
290
+ if (line.trim()) {
291
+ try {
292
+ const data = JSON.parse(line);
293
+ if (data.response) {
294
+ yield { content: data.response };
295
+ }
296
+ if (data.done) {
297
+ return;
298
+ }
299
+ }
300
+ catch (error) {
301
+ // Ignore JSON parse errors for incomplete chunks
302
+ }
303
+ }
304
+ }
305
+ }
354
306
  }
355
- catch (error) {
356
- return false;
307
+ finally {
308
+ reader.releaseLock();
309
+ }
310
+ }
311
+ handleProviderError(error) {
312
+ if (error.name === "TimeoutError") {
313
+ return new TimeoutError(`Ollama request timed out. The model might be loading or the request is too complex.`, this.defaultTimeout);
314
+ }
315
+ if (error.message?.includes("ECONNREFUSED") ||
316
+ error.message?.includes("fetch failed")) {
317
+ return new Error(`❌ Ollama Service Not Running\n\nCannot connect to Ollama at ${this.baseUrl}\n\n🔧 Steps to Fix:\n1. Install Ollama: https://ollama.ai/\n2. Start Ollama service: 'ollama serve'\n3. Verify it's running: 'curl ${this.baseUrl}/api/version'\n4. Try again`);
318
+ }
319
+ if (error.message?.includes("model") &&
320
+ error.message?.includes("not found")) {
321
+ return new Error(`❌ Ollama Model Not Found\n\nModel '${this.modelName}' is not available locally.\n\n🔧 Install Model:\n1. Run: ollama pull ${this.modelName}\n2. Or try a different model:\n - ollama pull ${FALLBACK_OLLAMA_MODEL}\n - ollama pull mistral:latest\n - ollama pull codellama:latest\n\n🔧 List Available Models:\nollama list`);
322
+ }
323
+ if (error.message?.includes("404")) {
324
+ return new Error(`❌ Ollama API Endpoint Not Found\n\nThe API endpoint might have changed or Ollama version is incompatible.\n\n🔧 Check:\n1. Ollama version: 'ollama --version'\n2. Update Ollama to latest version\n3. Verify API is available: 'curl ${this.baseUrl}/api/version'`);
325
+ }
326
+ return new Error(`❌ Ollama Provider Error\n\n${error.message || "Unknown error occurred"}\n\n🔧 Troubleshooting:\n1. Check if Ollama service is running\n2. Verify model is installed: 'ollama list'\n3. Check network connectivity to ${this.baseUrl}\n4. Review Ollama logs for details`);
327
+ }
328
+ validateStreamOptions(options) {
329
+ if (!options.input?.text?.trim()) {
330
+ throw new Error("Prompt is required for streaming");
331
+ }
332
+ if (options.maxTokens && options.maxTokens < 1) {
333
+ throw new Error("maxTokens must be greater than 0");
334
+ }
335
+ if (options.temperature &&
336
+ (options.temperature < 0 || options.temperature > 2)) {
337
+ throw new Error("temperature must be between 0 and 2");
357
338
  }
358
339
  }
359
340
  /**
360
- * Pull/download a model to the local Ollama instance
341
+ * Check if Ollama service is healthy and accessible
361
342
  */
362
- async pullModel(modelName) {
363
- const functionTag = "Ollama.pullModel";
343
+ async checkOllamaHealth() {
364
344
  try {
365
- logger.debug(`[${functionTag}] Pulling model`, { modelName });
366
- const response = await fetch(`${this.baseUrl}/api/pull`, {
367
- method: "POST",
368
- headers: {
369
- "Content-Type": "application/json",
370
- },
371
- body: JSON.stringify({
372
- name: modelName,
373
- }),
345
+ // Use traditional AbortController for better compatibility
346
+ const controller = new AbortController();
347
+ const timeoutId = setTimeout(() => controller.abort(), 5000);
348
+ const response = await fetch(`${this.baseUrl}/api/version`, {
349
+ method: "GET",
350
+ signal: controller.signal,
374
351
  });
352
+ clearTimeout(timeoutId);
375
353
  if (!response.ok) {
376
- throw new Error(`Failed to pull model: ${response.status} ${response.statusText}`);
354
+ throw new Error(`Ollama health check failed: ${response.status}`);
377
355
  }
378
- // Note: Ollama pull API returns streaming responses
379
- // For simplicity, we're not handling the streaming progress here
380
- logger.debug(`[${functionTag}] Model pull completed`, { modelName });
381
356
  }
382
357
  catch (error) {
383
- logger.debug(`[${functionTag}] Error pulling model`, {
384
- modelName,
385
- error: error instanceof Error ? error.message : String(error),
386
- });
387
- throw new Error(`Failed to pull model '${modelName}': ${error instanceof Error ? error.message : String(error)}`);
358
+ if (error instanceof Error && error.message.includes("ECONNREFUSED")) {
359
+ throw new Error(`❌ Ollama Service Not Running\n\nCannot connect to Ollama service.\n\n🔧 Start Ollama:\n1. Run: ollama serve\n2. Or start Ollama app\n3. Verify: curl ${this.baseUrl}/api/version`);
360
+ }
361
+ throw error;
388
362
  }
389
363
  }
390
364
  /**
391
- * Generate text using Ollama local models
365
+ * Get available models from Ollama
392
366
  */
393
- async generateText(optionsOrPrompt, analysisSchema) {
394
- const functionTag = "Ollama.generateText";
395
- const provider = "ollama";
396
- const startTime = Date.now();
367
+ async getAvailableModels() {
397
368
  try {
398
- // Parse parameters - support both string and options object
399
- const options = typeof optionsOrPrompt === "string"
400
- ? { prompt: optionsOrPrompt }
401
- : optionsOrPrompt;
402
- const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout, } = options;
403
- // Use schema from options or fallback parameter
404
- const finalSchema = schema || analysisSchema;
405
- // Convert timeout to milliseconds if provided as string
406
- const timeoutMs = timeout
407
- ? typeof timeout === "string"
408
- ? parseInt(getDefaultTimeout("ollama", "generate").replace(/[^\d]/g, ""))
409
- : timeout
410
- : this.defaultTimeout;
411
- logger.debug(`[${functionTag}] Generate request started`, {
412
- provider,
413
- modelName: this.modelName,
414
- promptLength: prompt.length,
415
- temperature,
416
- maxTokens,
417
- timeout: timeoutMs,
418
- });
419
- const model = this.getModel(timeoutMs);
420
- const generateOptions = {
421
- model: model,
422
- prompt: prompt,
423
- system: systemPrompt,
424
- temperature,
425
- maxTokens,
426
- };
427
- if (finalSchema) {
428
- generateOptions.experimental_output = Output.object({
429
- schema: finalSchema,
430
- });
431
- }
432
- const result = await generateText(generateOptions);
433
- if (result.text.includes("model not found")) {
434
- throw new Error(`Model '${this.modelName}' not found. Please run 'ollama pull ${this.modelName}'`);
435
- }
436
- logger.debug(`[${functionTag}] Generate text completed`, {
437
- provider,
438
- modelName: this.modelName,
439
- usage: result.usage,
440
- finishReason: result.finishReason,
441
- responseLength: result.text?.length || 0,
442
- });
443
- // Add analytics if enabled
444
- if (options.enableAnalytics) {
445
- result.analytics = {
446
- provider,
447
- model: this.modelName,
448
- tokens: result.usage,
449
- responseTime: Date.now() - startTime,
450
- context: options.context,
451
- };
452
- }
453
- // Add evaluation if enabled
454
- if (options.enableEvaluation) {
455
- result.evaluation = await evaluateResponse(prompt, result.text, options.context);
369
+ const response = await fetch(`${this.baseUrl}/api/tags`);
370
+ if (!response.ok) {
371
+ throw new Error(`Failed to fetch models: ${response.status}`);
456
372
  }
457
- return result;
373
+ const data = await response.json();
374
+ return data.models?.map((model) => model.name) || [];
458
375
  }
459
- catch (err) {
460
- logger.debug(`[${functionTag}] Exception`, {
461
- provider,
462
- modelName: this.modelName,
463
- message: "Error in generating text",
464
- err: String(err),
465
- });
466
- throw err; // Re-throw error to trigger fallback
376
+ catch (error) {
377
+ logger.warn("Failed to fetch Ollama models:", error);
378
+ return [];
467
379
  }
468
380
  }
469
381
  /**
470
- * Generate streaming text using Ollama local models
382
+ * Check if a specific model is available
471
383
  */
472
- async streamText(optionsOrPrompt, analysisSchema) {
473
- const functionTag = "Ollama.streamText";
474
- const provider = "ollama";
475
- let chunkCount = 0;
476
- try {
477
- // Parse parameters - support both string and options object
478
- const options = typeof optionsOrPrompt === "string"
479
- ? { prompt: optionsOrPrompt }
480
- : optionsOrPrompt;
481
- const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout, } = options;
482
- // Use schema from options or fallback parameter
483
- const finalSchema = schema || analysisSchema;
484
- // Convert timeout to milliseconds if provided as string
485
- const timeoutMs = timeout
486
- ? typeof timeout === "string"
487
- ? parseInt(getDefaultTimeout("ollama", "stream").replace(/[^\d]/g, ""))
488
- : timeout
489
- : this.defaultTimeout;
490
- logger.debug(`[${functionTag}] Stream request started`, {
491
- provider,
492
- modelName: this.modelName,
493
- promptLength: prompt.length,
494
- temperature,
495
- maxTokens,
496
- hasSchema: !!finalSchema,
497
- timeout: timeoutMs,
498
- });
499
- const model = this.getModel(timeoutMs);
500
- const streamOptions = {
501
- model: model,
502
- prompt: prompt,
503
- system: systemPrompt,
504
- temperature,
505
- maxTokens,
506
- onError: (event) => {
507
- const error = event.error;
508
- const errorMessage = error instanceof Error ? error.message : String(error);
509
- const errorStack = error instanceof Error ? error.stack : undefined;
510
- logger.debug(`[${functionTag}] Stream text error`, {
511
- provider,
512
- modelName: this.modelName,
513
- error: errorMessage,
514
- stack: errorStack,
515
- promptLength: prompt.length,
516
- chunkCount,
517
- });
518
- },
519
- onFinish: (event) => {
520
- logger.debug(`[${functionTag}] Stream text finished`, {
521
- provider,
522
- modelName: this.modelName,
523
- finishReason: event.finishReason,
524
- usage: event.usage,
525
- totalChunks: chunkCount,
526
- promptLength: prompt.length,
527
- responseLength: event.text?.length || 0,
528
- });
529
- },
530
- onChunk: (event) => {
531
- chunkCount++;
532
- logger.debug(`[${functionTag}] Stream text chunk`, {
533
- provider,
534
- modelName: this.modelName,
535
- chunkNumber: chunkCount,
536
- chunkLength: event.chunk.text?.length || 0,
537
- chunkType: event.chunk.type,
538
- });
539
- },
540
- };
541
- if (finalSchema) {
542
- streamOptions.experimental_output = Output.object({
543
- schema: finalSchema,
544
- });
545
- }
546
- const result = streamText(streamOptions);
547
- return result;
548
- }
549
- catch (err) {
550
- logger.debug(`[${functionTag}] Exception`, {
551
- provider,
552
- modelName: this.modelName,
553
- message: "Error in streaming text",
554
- err: String(err),
555
- promptLength: typeof optionsOrPrompt === "string"
556
- ? optionsOrPrompt.length
557
- : optionsOrPrompt.prompt.length,
558
- });
559
- throw err; // Re-throw error to trigger fallback
560
- }
561
- }
562
- async generate(optionsOrPrompt, analysisSchema) {
563
- return this.generateText(optionsOrPrompt, analysisSchema);
564
- }
565
- async gen(optionsOrPrompt, analysisSchema) {
566
- return this.generateText(optionsOrPrompt, analysisSchema);
384
+ async isModelAvailable(modelName) {
385
+ const models = await this.getAvailableModels();
386
+ return models.includes(modelName);
567
387
  }
568
388
  }
389
+ export default OllamaProvider;