@juspay/neurolink 1.6.0 → 1.10.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (182) hide show
  1. package/CHANGELOG.md +200 -7
  2. package/README.md +101 -18
  3. package/dist/agent/direct-tools.d.ts +1203 -0
  4. package/dist/agent/direct-tools.js +387 -0
  5. package/dist/cli/commands/agent-generate.d.ts +2 -0
  6. package/dist/cli/commands/agent-generate.js +70 -0
  7. package/dist/cli/commands/config.d.ts +6 -6
  8. package/dist/cli/commands/config.js +326 -273
  9. package/dist/cli/commands/mcp.d.ts +2 -1
  10. package/dist/cli/commands/mcp.js +874 -146
  11. package/dist/cli/commands/ollama.d.ts +1 -1
  12. package/dist/cli/commands/ollama.js +153 -143
  13. package/dist/cli/index.js +687 -325
  14. package/dist/cli/utils/complete-setup.d.ts +19 -0
  15. package/dist/cli/utils/complete-setup.js +81 -0
  16. package/dist/cli/utils/env-manager.d.ts +44 -0
  17. package/dist/cli/utils/env-manager.js +226 -0
  18. package/dist/cli/utils/interactive-setup.d.ts +48 -0
  19. package/dist/cli/utils/interactive-setup.js +302 -0
  20. package/dist/core/dynamic-models.d.ts +208 -0
  21. package/dist/core/dynamic-models.js +250 -0
  22. package/dist/core/factory.d.ts +13 -6
  23. package/dist/core/factory.js +177 -62
  24. package/dist/core/types.d.ts +4 -2
  25. package/dist/core/types.js +4 -4
  26. package/dist/index.d.ts +16 -16
  27. package/dist/index.js +16 -16
  28. package/dist/lib/agent/direct-tools.d.ts +1203 -0
  29. package/dist/lib/agent/direct-tools.js +387 -0
  30. package/dist/lib/core/dynamic-models.d.ts +208 -0
  31. package/dist/lib/core/dynamic-models.js +250 -0
  32. package/dist/lib/core/factory.d.ts +13 -6
  33. package/dist/lib/core/factory.js +177 -62
  34. package/dist/lib/core/types.d.ts +4 -2
  35. package/dist/lib/core/types.js +4 -4
  36. package/dist/lib/index.d.ts +16 -16
  37. package/dist/lib/index.js +16 -16
  38. package/dist/lib/mcp/auto-discovery.d.ts +120 -0
  39. package/dist/lib/mcp/auto-discovery.js +793 -0
  40. package/dist/lib/mcp/client.d.ts +66 -0
  41. package/dist/lib/mcp/client.js +245 -0
  42. package/dist/lib/mcp/config.d.ts +31 -0
  43. package/dist/lib/mcp/config.js +74 -0
  44. package/dist/lib/mcp/context-manager.d.ts +4 -4
  45. package/dist/lib/mcp/context-manager.js +24 -18
  46. package/dist/lib/mcp/factory.d.ts +28 -11
  47. package/dist/lib/mcp/factory.js +36 -29
  48. package/dist/lib/mcp/function-calling.d.ts +51 -0
  49. package/dist/lib/mcp/function-calling.js +510 -0
  50. package/dist/lib/mcp/index.d.ts +190 -0
  51. package/dist/lib/mcp/index.js +156 -0
  52. package/dist/lib/mcp/initialize-tools.d.ts +28 -0
  53. package/dist/lib/mcp/initialize-tools.js +209 -0
  54. package/dist/lib/mcp/initialize.d.ts +17 -0
  55. package/dist/lib/mcp/initialize.js +51 -0
  56. package/dist/lib/mcp/logging.d.ts +71 -0
  57. package/dist/lib/mcp/logging.js +183 -0
  58. package/dist/lib/mcp/manager.d.ts +67 -0
  59. package/dist/lib/mcp/manager.js +176 -0
  60. package/dist/lib/mcp/neurolink-mcp-client.d.ts +96 -0
  61. package/dist/lib/mcp/neurolink-mcp-client.js +417 -0
  62. package/dist/lib/mcp/orchestrator.d.ts +3 -3
  63. package/dist/lib/mcp/orchestrator.js +46 -43
  64. package/dist/lib/mcp/registry.d.ts +12 -4
  65. package/dist/lib/mcp/registry.js +64 -37
  66. package/dist/lib/mcp/servers/ai-providers/ai-analysis-tools.d.ts +1 -1
  67. package/dist/lib/mcp/servers/ai-providers/ai-analysis-tools.js +204 -65
  68. package/dist/lib/mcp/servers/ai-providers/ai-core-server.js +142 -102
  69. package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.d.ts +6 -6
  70. package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.js +197 -142
  71. package/dist/lib/mcp/servers/utilities/utility-server.d.ts +8 -0
  72. package/dist/lib/mcp/servers/utilities/utility-server.js +326 -0
  73. package/dist/lib/mcp/tool-integration.d.ts +67 -0
  74. package/dist/lib/mcp/tool-integration.js +179 -0
  75. package/dist/lib/mcp/unified-registry.d.ts +269 -0
  76. package/dist/lib/mcp/unified-registry.js +1411 -0
  77. package/dist/lib/neurolink.d.ts +68 -6
  78. package/dist/lib/neurolink.js +304 -42
  79. package/dist/lib/providers/agent-enhanced-provider.d.ts +59 -0
  80. package/dist/lib/providers/agent-enhanced-provider.js +242 -0
  81. package/dist/lib/providers/amazonBedrock.d.ts +3 -3
  82. package/dist/lib/providers/amazonBedrock.js +54 -50
  83. package/dist/lib/providers/anthropic.d.ts +2 -2
  84. package/dist/lib/providers/anthropic.js +92 -84
  85. package/dist/lib/providers/azureOpenAI.d.ts +2 -2
  86. package/dist/lib/providers/azureOpenAI.js +97 -86
  87. package/dist/lib/providers/function-calling-provider.d.ts +70 -0
  88. package/dist/lib/providers/function-calling-provider.js +359 -0
  89. package/dist/lib/providers/googleAIStudio.d.ts +10 -5
  90. package/dist/lib/providers/googleAIStudio.js +60 -38
  91. package/dist/lib/providers/googleVertexAI.d.ts +3 -3
  92. package/dist/lib/providers/googleVertexAI.js +96 -86
  93. package/dist/lib/providers/huggingFace.d.ts +3 -3
  94. package/dist/lib/providers/huggingFace.js +70 -63
  95. package/dist/lib/providers/index.d.ts +11 -11
  96. package/dist/lib/providers/index.js +18 -18
  97. package/dist/lib/providers/mcp-provider.d.ts +62 -0
  98. package/dist/lib/providers/mcp-provider.js +183 -0
  99. package/dist/lib/providers/mistralAI.d.ts +3 -3
  100. package/dist/lib/providers/mistralAI.js +42 -36
  101. package/dist/lib/providers/ollama.d.ts +4 -4
  102. package/dist/lib/providers/ollama.js +128 -98
  103. package/dist/lib/providers/openAI.d.ts +7 -3
  104. package/dist/lib/providers/openAI.js +45 -33
  105. package/dist/lib/utils/logger.js +2 -2
  106. package/dist/lib/utils/providerUtils-fixed.d.ts +8 -0
  107. package/dist/lib/utils/providerUtils-fixed.js +75 -0
  108. package/dist/lib/utils/providerUtils.d.ts +8 -1
  109. package/dist/lib/utils/providerUtils.js +63 -32
  110. package/dist/mcp/auto-discovery.d.ts +120 -0
  111. package/dist/mcp/auto-discovery.js +794 -0
  112. package/dist/mcp/client.d.ts +66 -0
  113. package/dist/mcp/client.js +245 -0
  114. package/dist/mcp/config.d.ts +31 -0
  115. package/dist/mcp/config.js +74 -0
  116. package/dist/mcp/context-manager.d.ts +4 -4
  117. package/dist/mcp/context-manager.js +24 -18
  118. package/dist/mcp/factory.d.ts +28 -11
  119. package/dist/mcp/factory.js +36 -29
  120. package/dist/mcp/function-calling.d.ts +51 -0
  121. package/dist/mcp/function-calling.js +510 -0
  122. package/dist/mcp/index.d.ts +190 -0
  123. package/dist/mcp/index.js +156 -0
  124. package/dist/mcp/initialize-tools.d.ts +28 -0
  125. package/dist/mcp/initialize-tools.js +210 -0
  126. package/dist/mcp/initialize.d.ts +17 -0
  127. package/dist/mcp/initialize.js +51 -0
  128. package/dist/mcp/logging.d.ts +71 -0
  129. package/dist/mcp/logging.js +183 -0
  130. package/dist/mcp/manager.d.ts +67 -0
  131. package/dist/mcp/manager.js +176 -0
  132. package/dist/mcp/neurolink-mcp-client.d.ts +96 -0
  133. package/dist/mcp/neurolink-mcp-client.js +417 -0
  134. package/dist/mcp/orchestrator.d.ts +3 -3
  135. package/dist/mcp/orchestrator.js +46 -43
  136. package/dist/mcp/registry.d.ts +12 -4
  137. package/dist/mcp/registry.js +64 -37
  138. package/dist/mcp/servers/ai-providers/ai-analysis-tools.d.ts +1 -1
  139. package/dist/mcp/servers/ai-providers/ai-analysis-tools.js +204 -65
  140. package/dist/mcp/servers/ai-providers/ai-core-server.js +142 -102
  141. package/dist/mcp/servers/ai-providers/ai-workflow-tools.d.ts +6 -6
  142. package/dist/mcp/servers/ai-providers/ai-workflow-tools.js +197 -142
  143. package/dist/mcp/servers/utilities/utility-server.d.ts +8 -0
  144. package/dist/mcp/servers/utilities/utility-server.js +326 -0
  145. package/dist/mcp/tool-integration.d.ts +67 -0
  146. package/dist/mcp/tool-integration.js +179 -0
  147. package/dist/mcp/unified-registry.d.ts +269 -0
  148. package/dist/mcp/unified-registry.js +1411 -0
  149. package/dist/neurolink.d.ts +68 -6
  150. package/dist/neurolink.js +304 -42
  151. package/dist/providers/agent-enhanced-provider.d.ts +59 -0
  152. package/dist/providers/agent-enhanced-provider.js +242 -0
  153. package/dist/providers/amazonBedrock.d.ts +3 -3
  154. package/dist/providers/amazonBedrock.js +54 -50
  155. package/dist/providers/anthropic.d.ts +2 -2
  156. package/dist/providers/anthropic.js +92 -84
  157. package/dist/providers/azureOpenAI.d.ts +2 -2
  158. package/dist/providers/azureOpenAI.js +97 -86
  159. package/dist/providers/function-calling-provider.d.ts +70 -0
  160. package/dist/providers/function-calling-provider.js +359 -0
  161. package/dist/providers/googleAIStudio.d.ts +10 -5
  162. package/dist/providers/googleAIStudio.js +60 -38
  163. package/dist/providers/googleVertexAI.d.ts +3 -3
  164. package/dist/providers/googleVertexAI.js +96 -86
  165. package/dist/providers/huggingFace.d.ts +3 -3
  166. package/dist/providers/huggingFace.js +70 -63
  167. package/dist/providers/index.d.ts +11 -11
  168. package/dist/providers/index.js +18 -18
  169. package/dist/providers/mcp-provider.d.ts +62 -0
  170. package/dist/providers/mcp-provider.js +183 -0
  171. package/dist/providers/mistralAI.d.ts +3 -3
  172. package/dist/providers/mistralAI.js +42 -36
  173. package/dist/providers/ollama.d.ts +4 -4
  174. package/dist/providers/ollama.js +128 -98
  175. package/dist/providers/openAI.d.ts +7 -3
  176. package/dist/providers/openAI.js +45 -33
  177. package/dist/utils/logger.js +2 -2
  178. package/dist/utils/providerUtils-fixed.d.ts +8 -0
  179. package/dist/utils/providerUtils-fixed.js +75 -0
  180. package/dist/utils/providerUtils.d.ts +8 -1
  181. package/dist/utils/providerUtils.js +63 -32
  182. package/package.json +182 -160
@@ -10,20 +10,20 @@
10
10
  * - Health checking and service validation
11
11
  * - Streaming and non-streaming text generation
12
12
  */
13
- import { streamText, generateText, Output } from 'ai';
14
- import { logger } from '../utils/logger.js';
13
+ import { streamText, generateText, Output } from "ai";
14
+ import { logger } from "../utils/logger.js";
15
15
  // Default system context
16
16
  const DEFAULT_SYSTEM_CONTEXT = {
17
- systemPrompt: 'You are a helpful AI assistant.'
17
+ systemPrompt: "You are a helpful AI assistant.",
18
18
  };
19
19
  // Custom LanguageModelV1 implementation for Ollama
20
20
  class OllamaLanguageModel {
21
- specificationVersion = 'v1';
22
- provider = 'ollama';
21
+ specificationVersion = "v1";
22
+ provider = "ollama";
23
23
  modelId;
24
24
  maxTokens;
25
25
  supportsStreaming = true;
26
- defaultObjectGenerationMode = 'json';
26
+ defaultObjectGenerationMode = "json";
27
27
  baseUrl;
28
28
  timeout;
29
29
  constructor(modelId, baseUrl, timeout) {
@@ -36,29 +36,29 @@ class OllamaLanguageModel {
36
36
  }
37
37
  convertMessagesToPrompt(messages) {
38
38
  return messages
39
- .map(msg => {
40
- if (typeof msg.content === 'string') {
39
+ .map((msg) => {
40
+ if (typeof msg.content === "string") {
41
41
  return `${msg.role}: ${msg.content}`;
42
42
  }
43
43
  else if (Array.isArray(msg.content)) {
44
44
  // Handle multi-part content (text, images, etc.)
45
45
  return `${msg.role}: ${msg.content
46
- .filter((part) => part.type === 'text')
46
+ .filter((part) => part.type === "text")
47
47
  .map((part) => part.text)
48
- .join(' ')}`;
48
+ .join(" ")}`;
49
49
  }
50
- return '';
50
+ return "";
51
51
  })
52
- .join('\n');
52
+ .join("\n");
53
53
  }
54
54
  async checkHealth() {
55
55
  try {
56
56
  const controller = new AbortController();
57
57
  const timeoutId = setTimeout(() => controller.abort(), 5000);
58
58
  const response = await fetch(`${this.baseUrl}/api/tags`, {
59
- method: 'GET',
59
+ method: "GET",
60
60
  signal: controller.signal,
61
- headers: { 'Content-Type': 'application/json' }
61
+ headers: { "Content-Type": "application/json" },
62
62
  });
63
63
  clearTimeout(timeoutId);
64
64
  return response.ok;
@@ -70,16 +70,17 @@ class OllamaLanguageModel {
70
70
  async ensureModelAvailable() {
71
71
  try {
72
72
  const response = await fetch(`${this.baseUrl}/api/tags`);
73
- if (!response.ok)
74
- throw new Error('Cannot access Ollama');
75
- const data = await response.json();
76
- const models = data.models?.map(m => m.name) || [];
73
+ if (!response.ok) {
74
+ throw new Error("Cannot access Ollama");
75
+ }
76
+ const data = (await response.json());
77
+ const models = data.models?.map((m) => m.name) || [];
77
78
  if (!models.includes(this.modelId)) {
78
79
  // Try to pull the model
79
80
  const pullResponse = await fetch(`${this.baseUrl}/api/pull`, {
80
- method: 'POST',
81
- headers: { 'Content-Type': 'application/json' },
82
- body: JSON.stringify({ name: this.modelId })
81
+ method: "POST",
82
+ headers: { "Content-Type": "application/json" },
83
+ body: JSON.stringify({ name: this.modelId }),
83
84
  });
84
85
  if (!pullResponse.ok) {
85
86
  throw new Error(`Model '${this.modelId}' not available and cannot be pulled`);
@@ -94,7 +95,7 @@ class OllamaLanguageModel {
94
95
  // Health check and model availability
95
96
  const isHealthy = await this.checkHealth();
96
97
  if (!isHealthy) {
97
- throw new Error('Ollama service is not running or accessible. Please ensure Ollama is installed and running.');
98
+ throw new Error("Ollama service is not running or accessible. Please ensure Ollama is installed and running.");
98
99
  }
99
100
  await this.ensureModelAvailable();
100
101
  const prompt = this.convertMessagesToPrompt(options.prompt);
@@ -105,24 +106,30 @@ class OllamaLanguageModel {
105
106
  options: {
106
107
  temperature: options.temperature || 0.7,
107
108
  num_predict: options.maxTokens || 500,
108
- }
109
+ },
109
110
  };
110
111
  const controller = new AbortController();
111
112
  const timeoutId = setTimeout(() => controller.abort(), this.timeout);
112
113
  try {
113
114
  const response = await fetch(`${this.baseUrl}/api/generate`, {
114
- method: 'POST',
115
- headers: { 'Content-Type': 'application/json' },
115
+ method: "POST",
116
+ headers: { "Content-Type": "application/json" },
116
117
  body: JSON.stringify(requestPayload),
117
- signal: controller.signal
118
+ signal: controller.signal,
118
119
  });
119
120
  clearTimeout(timeoutId);
120
121
  if (!response.ok) {
122
+ if (response.status === 404) {
123
+ const errorData = await response.json();
124
+ if (errorData.error && errorData.error.includes("not found")) {
125
+ throw new Error(`Model '${this.modelId}' not found. Please run 'ollama pull ${this.modelId}'`);
126
+ }
127
+ }
121
128
  throw new Error(`Ollama API error: ${response.status} ${response.statusText}`);
122
129
  }
123
- const data = await response.json();
130
+ const data = (await response.json());
124
131
  if (!data.response) {
125
- throw new Error('No response received from Ollama');
132
+ throw new Error("No response received from Ollama");
126
133
  }
127
134
  const promptTokens = this.estimateTokens(prompt);
128
135
  const completionTokens = this.estimateTokens(data.response);
@@ -131,22 +138,25 @@ class OllamaLanguageModel {
131
138
  usage: {
132
139
  promptTokens,
133
140
  completionTokens,
134
- totalTokens: promptTokens + completionTokens
141
+ totalTokens: promptTokens + completionTokens,
135
142
  },
136
- finishReason: 'stop',
143
+ finishReason: "stop",
137
144
  logprobs: undefined,
138
145
  rawCall: { rawPrompt: prompt, rawSettings: options },
139
- rawResponse: { headers: {} }
146
+ rawResponse: { headers: {} },
140
147
  };
141
148
  }
142
149
  catch (error) {
143
150
  clearTimeout(timeoutId);
144
151
  const errorMessage = error instanceof Error ? error.message : String(error);
145
- if (errorMessage.includes('AbortError') || errorMessage.includes('timeout')) {
152
+ if (errorMessage.includes("AbortError") ||
153
+ errorMessage.includes("timeout")) {
146
154
  throw new Error(`Ollama request timeout (${this.timeout}ms). The model may be large or the system is under load.`);
147
155
  }
148
- if (errorMessage.includes('ECONNREFUSED') || errorMessage.includes('fetch failed')) {
149
- throw new Error('Cannot connect to Ollama service. Please ensure Ollama is installed and running on ' + this.baseUrl);
156
+ if (errorMessage.includes("ECONNREFUSED") ||
157
+ errorMessage.includes("fetch failed")) {
158
+ throw new Error("Cannot connect to Ollama service. Please ensure Ollama is installed and running on " +
159
+ this.baseUrl);
150
160
  }
151
161
  throw error;
152
162
  }
@@ -155,7 +165,7 @@ class OllamaLanguageModel {
155
165
  // Health check and model availability
156
166
  const isHealthy = await this.checkHealth();
157
167
  if (!isHealthy) {
158
- throw new Error('Ollama service is not running or accessible. Please ensure Ollama is installed and running.');
168
+ throw new Error("Ollama service is not running or accessible. Please ensure Ollama is installed and running.");
159
169
  }
160
170
  await this.ensureModelAvailable();
161
171
  const prompt = this.convertMessagesToPrompt(options.prompt);
@@ -166,23 +176,29 @@ class OllamaLanguageModel {
166
176
  options: {
167
177
  temperature: options.temperature || 0.7,
168
178
  num_predict: options.maxTokens || 500,
169
- }
179
+ },
170
180
  };
171
181
  const controller = new AbortController();
172
182
  const timeoutId = setTimeout(() => controller.abort(), this.timeout);
173
183
  try {
174
184
  const response = await fetch(`${this.baseUrl}/api/generate`, {
175
- method: 'POST',
176
- headers: { 'Content-Type': 'application/json' },
185
+ method: "POST",
186
+ headers: { "Content-Type": "application/json" },
177
187
  body: JSON.stringify(requestPayload),
178
- signal: controller.signal
188
+ signal: controller.signal,
179
189
  });
180
190
  clearTimeout(timeoutId);
181
191
  if (!response.ok) {
192
+ if (response.status === 404) {
193
+ const errorData = await response.json();
194
+ if (errorData.error && errorData.error.includes("not found")) {
195
+ throw new Error(`Model '${this.modelId}' not found. Please run 'ollama pull ${this.modelId}'`);
196
+ }
197
+ }
182
198
  throw new Error(`Ollama API error: ${response.status} ${response.statusText}`);
183
199
  }
184
200
  if (!response.body) {
185
- throw new Error('No response body received from Ollama streaming API');
201
+ throw new Error("No response body received from Ollama streaming API");
186
202
  }
187
203
  // Create a ReadableStream that parses Ollama's streaming format
188
204
  const stream = new ReadableStream({
@@ -193,29 +209,31 @@ class OllamaLanguageModel {
193
209
  try {
194
210
  while (true) {
195
211
  const { done, value } = await reader.read();
196
- if (done)
212
+ if (done) {
197
213
  break;
214
+ }
198
215
  const chunk = decoder.decode(value, { stream: true });
199
- const lines = chunk.split('\n').filter(line => line.trim());
216
+ const lines = chunk.split("\n").filter((line) => line.trim());
200
217
  for (const line of lines) {
201
218
  try {
202
219
  const data = JSON.parse(line);
203
220
  if (data.response) {
204
221
  controller.enqueue({
205
- type: 'text-delta',
206
- textDelta: data.response
222
+ type: "text-delta",
223
+ textDelta: data.response,
207
224
  });
208
225
  totalTokens += Math.ceil(data.response.length / 4);
209
226
  }
210
227
  if (data.done) {
211
228
  controller.enqueue({
212
- type: 'finish',
213
- finishReason: 'stop',
229
+ type: "finish",
230
+ finishReason: "stop",
214
231
  usage: {
215
- promptTokens: data.prompt_eval_count || Math.ceil(prompt.length / 4),
216
- completionTokens: data.eval_count || totalTokens
232
+ promptTokens: data.prompt_eval_count ||
233
+ Math.ceil(prompt.length / 4),
234
+ completionTokens: data.eval_count || totalTokens,
217
235
  },
218
- logprobs: undefined
236
+ logprobs: undefined,
219
237
  });
220
238
  controller.close();
221
239
  return;
@@ -230,22 +248,25 @@ class OllamaLanguageModel {
230
248
  finally {
231
249
  reader.releaseLock();
232
250
  }
233
- }
251
+ },
234
252
  });
235
253
  return {
236
254
  stream,
237
255
  rawCall: { rawPrompt: prompt, rawSettings: options },
238
- rawResponse: { headers: {} }
256
+ rawResponse: { headers: {} },
239
257
  };
240
258
  }
241
259
  catch (error) {
242
260
  clearTimeout(timeoutId);
243
261
  const errorMessage = error instanceof Error ? error.message : String(error);
244
- if (errorMessage.includes('AbortError') || errorMessage.includes('timeout')) {
262
+ if (errorMessage.includes("AbortError") ||
263
+ errorMessage.includes("timeout")) {
245
264
  throw new Error(`Ollama streaming timeout (${this.timeout}ms). The model may be large or the system is under load.`);
246
265
  }
247
- if (errorMessage.includes('ECONNREFUSED') || errorMessage.includes('fetch failed')) {
248
- throw new Error('Cannot connect to Ollama service. Please ensure Ollama is installed and running on ' + this.baseUrl);
266
+ if (errorMessage.includes("ECONNREFUSED") ||
267
+ errorMessage.includes("fetch failed")) {
268
+ throw new Error("Cannot connect to Ollama service. Please ensure Ollama is installed and running on " +
269
+ this.baseUrl);
249
270
  }
250
271
  throw error;
251
272
  }
@@ -256,13 +277,13 @@ export class Ollama {
256
277
  modelName;
257
278
  timeout;
258
279
  constructor(modelName) {
259
- this.baseUrl = process.env.OLLAMA_BASE_URL || 'http://localhost:11434';
260
- this.modelName = modelName || process.env.OLLAMA_MODEL || 'llama2';
261
- this.timeout = parseInt(process.env.OLLAMA_TIMEOUT || '60000'); // 60 seconds default
262
- logger.debug('[Ollama] Initialized', {
280
+ this.baseUrl = process.env.OLLAMA_BASE_URL || "http://localhost:11434";
281
+ this.modelName = modelName || process.env.OLLAMA_MODEL || "llama2";
282
+ this.timeout = parseInt(process.env.OLLAMA_TIMEOUT || "60000"); // 60 seconds default
283
+ logger.debug("[Ollama] Initialized", {
263
284
  baseUrl: this.baseUrl,
264
285
  modelName: this.modelName,
265
- timeout: this.timeout
286
+ timeout: this.timeout,
266
287
  });
267
288
  }
268
289
  /**
@@ -270,8 +291,8 @@ export class Ollama {
270
291
  * @private
271
292
  */
272
293
  getModel() {
273
- logger.debug('Ollama.getModel - Ollama model selected', {
274
- modelName: this.modelName
294
+ logger.debug("Ollama.getModel - Ollama model selected", {
295
+ modelName: this.modelName,
275
296
  });
276
297
  return new OllamaLanguageModel(this.modelName, this.baseUrl, this.timeout);
277
298
  }
@@ -280,35 +301,35 @@ export class Ollama {
280
301
  */
281
302
  async checkHealth() {
282
303
  const model = new OllamaLanguageModel(this.modelName, this.baseUrl, this.timeout);
283
- return await model['checkHealth']();
304
+ return await model["checkHealth"]();
284
305
  }
285
306
  /**
286
307
  * List available models on the Ollama instance
287
308
  */
288
309
  async listModels() {
289
- const functionTag = 'Ollama.listModels';
310
+ const functionTag = "Ollama.listModels";
290
311
  try {
291
312
  logger.debug(`[${functionTag}] Listing available models`);
292
313
  const response = await fetch(`${this.baseUrl}/api/tags`, {
293
- method: 'GET',
314
+ method: "GET",
294
315
  headers: {
295
- 'Content-Type': 'application/json'
296
- }
316
+ "Content-Type": "application/json",
317
+ },
297
318
  });
298
319
  if (!response.ok) {
299
320
  throw new Error(`Failed to list models: ${response.status} ${response.statusText}`);
300
321
  }
301
- const data = await response.json();
302
- const modelNames = data.models?.map(model => model.name) || [];
322
+ const data = (await response.json());
323
+ const modelNames = data.models?.map((model) => model.name) || [];
303
324
  logger.debug(`[${functionTag}] Found models`, {
304
325
  count: modelNames.length,
305
- models: modelNames
326
+ models: modelNames,
306
327
  });
307
328
  return modelNames;
308
329
  }
309
330
  catch (error) {
310
331
  logger.debug(`[${functionTag}] Error listing models`, {
311
- error: error instanceof Error ? error.message : String(error)
332
+ error: error instanceof Error ? error.message : String(error),
312
333
  });
313
334
  throw new Error(`Failed to list Ollama models: ${error instanceof Error ? error.message : String(error)}`);
314
335
  }
@@ -329,17 +350,17 @@ export class Ollama {
329
350
  * Pull/download a model to the local Ollama instance
330
351
  */
331
352
  async pullModel(modelName) {
332
- const functionTag = 'Ollama.pullModel';
353
+ const functionTag = "Ollama.pullModel";
333
354
  try {
334
355
  logger.debug(`[${functionTag}] Pulling model`, { modelName });
335
356
  const response = await fetch(`${this.baseUrl}/api/pull`, {
336
- method: 'POST',
357
+ method: "POST",
337
358
  headers: {
338
- 'Content-Type': 'application/json'
359
+ "Content-Type": "application/json",
339
360
  },
340
361
  body: JSON.stringify({
341
- name: modelName
342
- })
362
+ name: modelName,
363
+ }),
343
364
  });
344
365
  if (!response.ok) {
345
366
  throw new Error(`Failed to pull model: ${response.status} ${response.statusText}`);
@@ -351,7 +372,7 @@ export class Ollama {
351
372
  catch (error) {
352
373
  logger.debug(`[${functionTag}] Error pulling model`, {
353
374
  modelName,
354
- error: error instanceof Error ? error.message : String(error)
375
+ error: error instanceof Error ? error.message : String(error),
355
376
  });
356
377
  throw new Error(`Failed to pull model '${modelName}': ${error instanceof Error ? error.message : String(error)}`);
357
378
  }
@@ -360,14 +381,14 @@ export class Ollama {
360
381
  * Generate text using Ollama local models
361
382
  */
362
383
  async generateText(optionsOrPrompt, analysisSchema) {
363
- const functionTag = 'Ollama.generateText';
364
- const provider = 'ollama';
384
+ const functionTag = "Ollama.generateText";
385
+ const provider = "ollama";
365
386
  try {
366
387
  // Parse parameters - support both string and options object
367
- const options = typeof optionsOrPrompt === 'string'
388
+ const options = typeof optionsOrPrompt === "string"
368
389
  ? { prompt: optionsOrPrompt }
369
390
  : optionsOrPrompt;
370
- const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema } = options;
391
+ const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, } = options;
371
392
  // Use schema from options or fallback parameter
372
393
  const finalSchema = schema || analysisSchema;
373
394
  logger.debug(`[${functionTag}] Generate request started`, {
@@ -375,7 +396,7 @@ export class Ollama {
375
396
  modelName: this.modelName,
376
397
  promptLength: prompt.length,
377
398
  temperature,
378
- maxTokens
399
+ maxTokens,
379
400
  });
380
401
  const model = this.getModel();
381
402
  const generateOptions = {
@@ -383,18 +404,23 @@ export class Ollama {
383
404
  prompt: prompt,
384
405
  system: systemPrompt,
385
406
  temperature,
386
- maxTokens
407
+ maxTokens,
387
408
  };
388
409
  if (finalSchema) {
389
- generateOptions.experimental_output = Output.object({ schema: finalSchema });
410
+ generateOptions.experimental_output = Output.object({
411
+ schema: finalSchema,
412
+ });
390
413
  }
391
414
  const result = await generateText(generateOptions);
415
+ if (result.text.includes("model not found")) {
416
+ throw new Error(`Model '${this.modelName}' not found. Please run 'ollama pull ${this.modelName}'`);
417
+ }
392
418
  logger.debug(`[${functionTag}] Generate text completed`, {
393
419
  provider,
394
420
  modelName: this.modelName,
395
421
  usage: result.usage,
396
422
  finishReason: result.finishReason,
397
- responseLength: result.text?.length || 0
423
+ responseLength: result.text?.length || 0,
398
424
  });
399
425
  return result;
400
426
  }
@@ -402,8 +428,8 @@ export class Ollama {
402
428
  logger.debug(`[${functionTag}] Exception`, {
403
429
  provider,
404
430
  modelName: this.modelName,
405
- message: 'Error in generating text',
406
- err: String(err)
431
+ message: "Error in generating text",
432
+ err: String(err),
407
433
  });
408
434
  throw err; // Re-throw error to trigger fallback
409
435
  }
@@ -412,15 +438,15 @@ export class Ollama {
412
438
  * Generate streaming text using Ollama local models
413
439
  */
414
440
  async streamText(optionsOrPrompt, analysisSchema) {
415
- const functionTag = 'Ollama.streamText';
416
- const provider = 'ollama';
441
+ const functionTag = "Ollama.streamText";
442
+ const provider = "ollama";
417
443
  let chunkCount = 0;
418
444
  try {
419
445
  // Parse parameters - support both string and options object
420
- const options = typeof optionsOrPrompt === 'string'
446
+ const options = typeof optionsOrPrompt === "string"
421
447
  ? { prompt: optionsOrPrompt }
422
448
  : optionsOrPrompt;
423
- const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema } = options;
449
+ const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, } = options;
424
450
  // Use schema from options or fallback parameter
425
451
  const finalSchema = schema || analysisSchema;
426
452
  logger.debug(`[${functionTag}] Stream request started`, {
@@ -429,7 +455,7 @@ export class Ollama {
429
455
  promptLength: prompt.length,
430
456
  temperature,
431
457
  maxTokens,
432
- hasSchema: !!finalSchema
458
+ hasSchema: !!finalSchema,
433
459
  });
434
460
  const model = this.getModel();
435
461
  const streamOptions = {
@@ -448,7 +474,7 @@ export class Ollama {
448
474
  error: errorMessage,
449
475
  stack: errorStack,
450
476
  promptLength: prompt.length,
451
- chunkCount
477
+ chunkCount,
452
478
  });
453
479
  },
454
480
  onFinish: (event) => {
@@ -459,7 +485,7 @@ export class Ollama {
459
485
  usage: event.usage,
460
486
  totalChunks: chunkCount,
461
487
  promptLength: prompt.length,
462
- responseLength: event.text?.length || 0
488
+ responseLength: event.text?.length || 0,
463
489
  });
464
490
  },
465
491
  onChunk: (event) => {
@@ -469,12 +495,14 @@ export class Ollama {
469
495
  modelName: this.modelName,
470
496
  chunkNumber: chunkCount,
471
497
  chunkLength: event.chunk.text?.length || 0,
472
- chunkType: event.chunk.type
498
+ chunkType: event.chunk.type,
473
499
  });
474
- }
500
+ },
475
501
  };
476
502
  if (finalSchema) {
477
- streamOptions.experimental_output = Output.object({ schema: finalSchema });
503
+ streamOptions.experimental_output = Output.object({
504
+ schema: finalSchema,
505
+ });
478
506
  }
479
507
  const result = streamText(streamOptions);
480
508
  return result;
@@ -483,9 +511,11 @@ export class Ollama {
483
511
  logger.debug(`[${functionTag}] Exception`, {
484
512
  provider,
485
513
  modelName: this.modelName,
486
- message: 'Error in streaming text',
514
+ message: "Error in streaming text",
487
515
  err: String(err),
488
- promptLength: typeof optionsOrPrompt === 'string' ? optionsOrPrompt.length : optionsOrPrompt.prompt.length
516
+ promptLength: typeof optionsOrPrompt === "string"
517
+ ? optionsOrPrompt.length
518
+ : optionsOrPrompt.prompt.length,
489
519
  });
490
520
  throw err; // Re-throw error to trigger fallback
491
521
  }
@@ -1,10 +1,14 @@
1
- import type { ZodType, ZodTypeDef } from 'zod';
2
- import { type StreamTextResult, type ToolSet, type Schema, type GenerateTextResult } from 'ai';
3
- import type { AIProvider, TextGenerationOptions, StreamTextOptions } from '../core/types.js';
1
+ import type { ZodType, ZodTypeDef } from "zod";
2
+ import { type StreamTextResult, type ToolSet, type Schema, type GenerateTextResult, type LanguageModelV1 } from "ai";
3
+ import type { AIProvider, TextGenerationOptions, StreamTextOptions } from "../core/types.js";
4
4
  export declare class OpenAI implements AIProvider {
5
5
  private modelName;
6
6
  private model;
7
7
  constructor(modelName?: string | null);
8
+ /**
9
+ * Get the underlying model for function calling
10
+ */
11
+ getModel(): LanguageModelV1;
8
12
  streamText(optionsOrPrompt: StreamTextOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamTextResult<ToolSet, unknown> | null>;
9
13
  generateText(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<GenerateTextResult<ToolSet, unknown> | null>;
10
14
  }