@juspay/neurolink 1.6.0 → 1.9.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (176) hide show
  1. package/CHANGELOG.md +193 -7
  2. package/README.md +100 -17
  3. package/dist/agent/direct-tools.d.ts +1203 -0
  4. package/dist/agent/direct-tools.js +387 -0
  5. package/dist/cli/commands/agent-generate.d.ts +2 -0
  6. package/dist/cli/commands/agent-generate.js +70 -0
  7. package/dist/cli/commands/config.d.ts +6 -6
  8. package/dist/cli/commands/config.js +326 -273
  9. package/dist/cli/commands/mcp.d.ts +2 -1
  10. package/dist/cli/commands/mcp.js +874 -146
  11. package/dist/cli/commands/ollama.d.ts +1 -1
  12. package/dist/cli/commands/ollama.js +153 -143
  13. package/dist/cli/index.js +589 -323
  14. package/dist/cli/utils/complete-setup.d.ts +19 -0
  15. package/dist/cli/utils/complete-setup.js +81 -0
  16. package/dist/cli/utils/env-manager.d.ts +44 -0
  17. package/dist/cli/utils/env-manager.js +226 -0
  18. package/dist/cli/utils/interactive-setup.d.ts +48 -0
  19. package/dist/cli/utils/interactive-setup.js +302 -0
  20. package/dist/core/dynamic-models.d.ts +208 -0
  21. package/dist/core/dynamic-models.js +250 -0
  22. package/dist/core/factory.d.ts +13 -6
  23. package/dist/core/factory.js +176 -61
  24. package/dist/core/types.d.ts +4 -2
  25. package/dist/core/types.js +4 -4
  26. package/dist/index.d.ts +16 -16
  27. package/dist/index.js +16 -16
  28. package/dist/lib/agent/direct-tools.d.ts +1203 -0
  29. package/dist/lib/agent/direct-tools.js +387 -0
  30. package/dist/lib/core/dynamic-models.d.ts +208 -0
  31. package/dist/lib/core/dynamic-models.js +250 -0
  32. package/dist/lib/core/factory.d.ts +13 -6
  33. package/dist/lib/core/factory.js +176 -61
  34. package/dist/lib/core/types.d.ts +4 -2
  35. package/dist/lib/core/types.js +4 -4
  36. package/dist/lib/index.d.ts +16 -16
  37. package/dist/lib/index.js +16 -16
  38. package/dist/lib/mcp/auto-discovery.d.ts +120 -0
  39. package/dist/lib/mcp/auto-discovery.js +793 -0
  40. package/dist/lib/mcp/client.d.ts +66 -0
  41. package/dist/lib/mcp/client.js +245 -0
  42. package/dist/lib/mcp/config.d.ts +31 -0
  43. package/dist/lib/mcp/config.js +74 -0
  44. package/dist/lib/mcp/context-manager.d.ts +4 -4
  45. package/dist/lib/mcp/context-manager.js +24 -18
  46. package/dist/lib/mcp/factory.d.ts +28 -11
  47. package/dist/lib/mcp/factory.js +36 -29
  48. package/dist/lib/mcp/function-calling.d.ts +51 -0
  49. package/dist/lib/mcp/function-calling.js +510 -0
  50. package/dist/lib/mcp/index.d.ts +190 -0
  51. package/dist/lib/mcp/index.js +156 -0
  52. package/dist/lib/mcp/initialize-tools.d.ts +28 -0
  53. package/dist/lib/mcp/initialize-tools.js +209 -0
  54. package/dist/lib/mcp/initialize.d.ts +17 -0
  55. package/dist/lib/mcp/initialize.js +51 -0
  56. package/dist/lib/mcp/logging.d.ts +71 -0
  57. package/dist/lib/mcp/logging.js +183 -0
  58. package/dist/lib/mcp/manager.d.ts +67 -0
  59. package/dist/lib/mcp/manager.js +176 -0
  60. package/dist/lib/mcp/neurolink-mcp-client.d.ts +96 -0
  61. package/dist/lib/mcp/neurolink-mcp-client.js +417 -0
  62. package/dist/lib/mcp/orchestrator.d.ts +3 -3
  63. package/dist/lib/mcp/orchestrator.js +46 -43
  64. package/dist/lib/mcp/registry.d.ts +2 -2
  65. package/dist/lib/mcp/registry.js +42 -33
  66. package/dist/lib/mcp/servers/ai-providers/ai-analysis-tools.d.ts +1 -1
  67. package/dist/lib/mcp/servers/ai-providers/ai-analysis-tools.js +204 -65
  68. package/dist/lib/mcp/servers/ai-providers/ai-core-server.js +142 -102
  69. package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.d.ts +6 -6
  70. package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.js +197 -142
  71. package/dist/lib/mcp/servers/utilities/utility-server.d.ts +8 -0
  72. package/dist/lib/mcp/servers/utilities/utility-server.js +326 -0
  73. package/dist/lib/mcp/tool-integration.d.ts +67 -0
  74. package/dist/lib/mcp/tool-integration.js +179 -0
  75. package/dist/lib/mcp/unified-registry.d.ts +269 -0
  76. package/dist/lib/mcp/unified-registry.js +1411 -0
  77. package/dist/lib/neurolink.d.ts +68 -6
  78. package/dist/lib/neurolink.js +304 -42
  79. package/dist/lib/providers/agent-enhanced-provider.d.ts +59 -0
  80. package/dist/lib/providers/agent-enhanced-provider.js +242 -0
  81. package/dist/lib/providers/amazonBedrock.d.ts +3 -3
  82. package/dist/lib/providers/amazonBedrock.js +54 -50
  83. package/dist/lib/providers/anthropic.d.ts +2 -2
  84. package/dist/lib/providers/anthropic.js +92 -84
  85. package/dist/lib/providers/azureOpenAI.d.ts +2 -2
  86. package/dist/lib/providers/azureOpenAI.js +97 -86
  87. package/dist/lib/providers/function-calling-provider.d.ts +70 -0
  88. package/dist/lib/providers/function-calling-provider.js +359 -0
  89. package/dist/lib/providers/googleAIStudio.d.ts +10 -5
  90. package/dist/lib/providers/googleAIStudio.js +60 -38
  91. package/dist/lib/providers/googleVertexAI.d.ts +3 -3
  92. package/dist/lib/providers/googleVertexAI.js +96 -86
  93. package/dist/lib/providers/huggingFace.d.ts +3 -3
  94. package/dist/lib/providers/huggingFace.js +70 -63
  95. package/dist/lib/providers/index.d.ts +11 -11
  96. package/dist/lib/providers/index.js +18 -18
  97. package/dist/lib/providers/mcp-provider.d.ts +62 -0
  98. package/dist/lib/providers/mcp-provider.js +183 -0
  99. package/dist/lib/providers/mistralAI.d.ts +3 -3
  100. package/dist/lib/providers/mistralAI.js +42 -36
  101. package/dist/lib/providers/ollama.d.ts +4 -4
  102. package/dist/lib/providers/ollama.js +113 -98
  103. package/dist/lib/providers/openAI.d.ts +7 -3
  104. package/dist/lib/providers/openAI.js +45 -33
  105. package/dist/lib/utils/logger.js +2 -2
  106. package/dist/lib/utils/providerUtils.js +53 -31
  107. package/dist/mcp/auto-discovery.d.ts +120 -0
  108. package/dist/mcp/auto-discovery.js +794 -0
  109. package/dist/mcp/client.d.ts +66 -0
  110. package/dist/mcp/client.js +245 -0
  111. package/dist/mcp/config.d.ts +31 -0
  112. package/dist/mcp/config.js +74 -0
  113. package/dist/mcp/context-manager.d.ts +4 -4
  114. package/dist/mcp/context-manager.js +24 -18
  115. package/dist/mcp/factory.d.ts +28 -11
  116. package/dist/mcp/factory.js +36 -29
  117. package/dist/mcp/function-calling.d.ts +51 -0
  118. package/dist/mcp/function-calling.js +510 -0
  119. package/dist/mcp/index.d.ts +190 -0
  120. package/dist/mcp/index.js +156 -0
  121. package/dist/mcp/initialize-tools.d.ts +28 -0
  122. package/dist/mcp/initialize-tools.js +210 -0
  123. package/dist/mcp/initialize.d.ts +17 -0
  124. package/dist/mcp/initialize.js +51 -0
  125. package/dist/mcp/logging.d.ts +71 -0
  126. package/dist/mcp/logging.js +183 -0
  127. package/dist/mcp/manager.d.ts +67 -0
  128. package/dist/mcp/manager.js +176 -0
  129. package/dist/mcp/neurolink-mcp-client.d.ts +96 -0
  130. package/dist/mcp/neurolink-mcp-client.js +417 -0
  131. package/dist/mcp/orchestrator.d.ts +3 -3
  132. package/dist/mcp/orchestrator.js +46 -43
  133. package/dist/mcp/registry.d.ts +2 -2
  134. package/dist/mcp/registry.js +42 -33
  135. package/dist/mcp/servers/ai-providers/ai-analysis-tools.d.ts +1 -1
  136. package/dist/mcp/servers/ai-providers/ai-analysis-tools.js +204 -65
  137. package/dist/mcp/servers/ai-providers/ai-core-server.js +142 -102
  138. package/dist/mcp/servers/ai-providers/ai-workflow-tools.d.ts +6 -6
  139. package/dist/mcp/servers/ai-providers/ai-workflow-tools.js +197 -142
  140. package/dist/mcp/servers/utilities/utility-server.d.ts +8 -0
  141. package/dist/mcp/servers/utilities/utility-server.js +326 -0
  142. package/dist/mcp/tool-integration.d.ts +67 -0
  143. package/dist/mcp/tool-integration.js +179 -0
  144. package/dist/mcp/unified-registry.d.ts +269 -0
  145. package/dist/mcp/unified-registry.js +1411 -0
  146. package/dist/neurolink.d.ts +68 -6
  147. package/dist/neurolink.js +304 -42
  148. package/dist/providers/agent-enhanced-provider.d.ts +59 -0
  149. package/dist/providers/agent-enhanced-provider.js +242 -0
  150. package/dist/providers/amazonBedrock.d.ts +3 -3
  151. package/dist/providers/amazonBedrock.js +54 -50
  152. package/dist/providers/anthropic.d.ts +2 -2
  153. package/dist/providers/anthropic.js +92 -84
  154. package/dist/providers/azureOpenAI.d.ts +2 -2
  155. package/dist/providers/azureOpenAI.js +97 -86
  156. package/dist/providers/function-calling-provider.d.ts +70 -0
  157. package/dist/providers/function-calling-provider.js +359 -0
  158. package/dist/providers/googleAIStudio.d.ts +10 -5
  159. package/dist/providers/googleAIStudio.js +60 -38
  160. package/dist/providers/googleVertexAI.d.ts +3 -3
  161. package/dist/providers/googleVertexAI.js +96 -86
  162. package/dist/providers/huggingFace.d.ts +3 -3
  163. package/dist/providers/huggingFace.js +70 -63
  164. package/dist/providers/index.d.ts +11 -11
  165. package/dist/providers/index.js +18 -18
  166. package/dist/providers/mcp-provider.d.ts +62 -0
  167. package/dist/providers/mcp-provider.js +183 -0
  168. package/dist/providers/mistralAI.d.ts +3 -3
  169. package/dist/providers/mistralAI.js +42 -36
  170. package/dist/providers/ollama.d.ts +4 -4
  171. package/dist/providers/ollama.js +113 -98
  172. package/dist/providers/openAI.d.ts +7 -3
  173. package/dist/providers/openAI.js +45 -33
  174. package/dist/utils/logger.js +2 -2
  175. package/dist/utils/providerUtils.js +53 -31
  176. package/package.json +175 -161
@@ -10,20 +10,20 @@
10
10
  * - Health checking and service validation
11
11
  * - Streaming and non-streaming text generation
12
12
  */
13
- import { streamText, generateText, Output } from 'ai';
14
- import { logger } from '../utils/logger.js';
13
+ import { streamText, generateText, Output } from "ai";
14
+ import { logger } from "../utils/logger.js";
15
15
  // Default system context
16
16
  const DEFAULT_SYSTEM_CONTEXT = {
17
- systemPrompt: 'You are a helpful AI assistant.'
17
+ systemPrompt: "You are a helpful AI assistant.",
18
18
  };
19
19
  // Custom LanguageModelV1 implementation for Ollama
20
20
  class OllamaLanguageModel {
21
- specificationVersion = 'v1';
22
- provider = 'ollama';
21
+ specificationVersion = "v1";
22
+ provider = "ollama";
23
23
  modelId;
24
24
  maxTokens;
25
25
  supportsStreaming = true;
26
- defaultObjectGenerationMode = 'json';
26
+ defaultObjectGenerationMode = "json";
27
27
  baseUrl;
28
28
  timeout;
29
29
  constructor(modelId, baseUrl, timeout) {
@@ -36,29 +36,29 @@ class OllamaLanguageModel {
36
36
  }
37
37
  convertMessagesToPrompt(messages) {
38
38
  return messages
39
- .map(msg => {
40
- if (typeof msg.content === 'string') {
39
+ .map((msg) => {
40
+ if (typeof msg.content === "string") {
41
41
  return `${msg.role}: ${msg.content}`;
42
42
  }
43
43
  else if (Array.isArray(msg.content)) {
44
44
  // Handle multi-part content (text, images, etc.)
45
45
  return `${msg.role}: ${msg.content
46
- .filter((part) => part.type === 'text')
46
+ .filter((part) => part.type === "text")
47
47
  .map((part) => part.text)
48
- .join(' ')}`;
48
+ .join(" ")}`;
49
49
  }
50
- return '';
50
+ return "";
51
51
  })
52
- .join('\n');
52
+ .join("\n");
53
53
  }
54
54
  async checkHealth() {
55
55
  try {
56
56
  const controller = new AbortController();
57
57
  const timeoutId = setTimeout(() => controller.abort(), 5000);
58
58
  const response = await fetch(`${this.baseUrl}/api/tags`, {
59
- method: 'GET',
59
+ method: "GET",
60
60
  signal: controller.signal,
61
- headers: { 'Content-Type': 'application/json' }
61
+ headers: { "Content-Type": "application/json" },
62
62
  });
63
63
  clearTimeout(timeoutId);
64
64
  return response.ok;
@@ -70,16 +70,17 @@ class OllamaLanguageModel {
70
70
  async ensureModelAvailable() {
71
71
  try {
72
72
  const response = await fetch(`${this.baseUrl}/api/tags`);
73
- if (!response.ok)
74
- throw new Error('Cannot access Ollama');
75
- const data = await response.json();
76
- const models = data.models?.map(m => m.name) || [];
73
+ if (!response.ok) {
74
+ throw new Error("Cannot access Ollama");
75
+ }
76
+ const data = (await response.json());
77
+ const models = data.models?.map((m) => m.name) || [];
77
78
  if (!models.includes(this.modelId)) {
78
79
  // Try to pull the model
79
80
  const pullResponse = await fetch(`${this.baseUrl}/api/pull`, {
80
- method: 'POST',
81
- headers: { 'Content-Type': 'application/json' },
82
- body: JSON.stringify({ name: this.modelId })
81
+ method: "POST",
82
+ headers: { "Content-Type": "application/json" },
83
+ body: JSON.stringify({ name: this.modelId }),
83
84
  });
84
85
  if (!pullResponse.ok) {
85
86
  throw new Error(`Model '${this.modelId}' not available and cannot be pulled`);
@@ -94,7 +95,7 @@ class OllamaLanguageModel {
94
95
  // Health check and model availability
95
96
  const isHealthy = await this.checkHealth();
96
97
  if (!isHealthy) {
97
- throw new Error('Ollama service is not running or accessible. Please ensure Ollama is installed and running.');
98
+ throw new Error("Ollama service is not running or accessible. Please ensure Ollama is installed and running.");
98
99
  }
99
100
  await this.ensureModelAvailable();
100
101
  const prompt = this.convertMessagesToPrompt(options.prompt);
@@ -105,24 +106,24 @@ class OllamaLanguageModel {
105
106
  options: {
106
107
  temperature: options.temperature || 0.7,
107
108
  num_predict: options.maxTokens || 500,
108
- }
109
+ },
109
110
  };
110
111
  const controller = new AbortController();
111
112
  const timeoutId = setTimeout(() => controller.abort(), this.timeout);
112
113
  try {
113
114
  const response = await fetch(`${this.baseUrl}/api/generate`, {
114
- method: 'POST',
115
- headers: { 'Content-Type': 'application/json' },
115
+ method: "POST",
116
+ headers: { "Content-Type": "application/json" },
116
117
  body: JSON.stringify(requestPayload),
117
- signal: controller.signal
118
+ signal: controller.signal,
118
119
  });
119
120
  clearTimeout(timeoutId);
120
121
  if (!response.ok) {
121
122
  throw new Error(`Ollama API error: ${response.status} ${response.statusText}`);
122
123
  }
123
- const data = await response.json();
124
+ const data = (await response.json());
124
125
  if (!data.response) {
125
- throw new Error('No response received from Ollama');
126
+ throw new Error("No response received from Ollama");
126
127
  }
127
128
  const promptTokens = this.estimateTokens(prompt);
128
129
  const completionTokens = this.estimateTokens(data.response);
@@ -131,22 +132,25 @@ class OllamaLanguageModel {
131
132
  usage: {
132
133
  promptTokens,
133
134
  completionTokens,
134
- totalTokens: promptTokens + completionTokens
135
+ totalTokens: promptTokens + completionTokens,
135
136
  },
136
- finishReason: 'stop',
137
+ finishReason: "stop",
137
138
  logprobs: undefined,
138
139
  rawCall: { rawPrompt: prompt, rawSettings: options },
139
- rawResponse: { headers: {} }
140
+ rawResponse: { headers: {} },
140
141
  };
141
142
  }
142
143
  catch (error) {
143
144
  clearTimeout(timeoutId);
144
145
  const errorMessage = error instanceof Error ? error.message : String(error);
145
- if (errorMessage.includes('AbortError') || errorMessage.includes('timeout')) {
146
+ if (errorMessage.includes("AbortError") ||
147
+ errorMessage.includes("timeout")) {
146
148
  throw new Error(`Ollama request timeout (${this.timeout}ms). The model may be large or the system is under load.`);
147
149
  }
148
- if (errorMessage.includes('ECONNREFUSED') || errorMessage.includes('fetch failed')) {
149
- throw new Error('Cannot connect to Ollama service. Please ensure Ollama is installed and running on ' + this.baseUrl);
150
+ if (errorMessage.includes("ECONNREFUSED") ||
151
+ errorMessage.includes("fetch failed")) {
152
+ throw new Error("Cannot connect to Ollama service. Please ensure Ollama is installed and running on " +
153
+ this.baseUrl);
150
154
  }
151
155
  throw error;
152
156
  }
@@ -155,7 +159,7 @@ class OllamaLanguageModel {
155
159
  // Health check and model availability
156
160
  const isHealthy = await this.checkHealth();
157
161
  if (!isHealthy) {
158
- throw new Error('Ollama service is not running or accessible. Please ensure Ollama is installed and running.');
162
+ throw new Error("Ollama service is not running or accessible. Please ensure Ollama is installed and running.");
159
163
  }
160
164
  await this.ensureModelAvailable();
161
165
  const prompt = this.convertMessagesToPrompt(options.prompt);
@@ -166,23 +170,23 @@ class OllamaLanguageModel {
166
170
  options: {
167
171
  temperature: options.temperature || 0.7,
168
172
  num_predict: options.maxTokens || 500,
169
- }
173
+ },
170
174
  };
171
175
  const controller = new AbortController();
172
176
  const timeoutId = setTimeout(() => controller.abort(), this.timeout);
173
177
  try {
174
178
  const response = await fetch(`${this.baseUrl}/api/generate`, {
175
- method: 'POST',
176
- headers: { 'Content-Type': 'application/json' },
179
+ method: "POST",
180
+ headers: { "Content-Type": "application/json" },
177
181
  body: JSON.stringify(requestPayload),
178
- signal: controller.signal
182
+ signal: controller.signal,
179
183
  });
180
184
  clearTimeout(timeoutId);
181
185
  if (!response.ok) {
182
186
  throw new Error(`Ollama API error: ${response.status} ${response.statusText}`);
183
187
  }
184
188
  if (!response.body) {
185
- throw new Error('No response body received from Ollama streaming API');
189
+ throw new Error("No response body received from Ollama streaming API");
186
190
  }
187
191
  // Create a ReadableStream that parses Ollama's streaming format
188
192
  const stream = new ReadableStream({
@@ -193,29 +197,31 @@ class OllamaLanguageModel {
193
197
  try {
194
198
  while (true) {
195
199
  const { done, value } = await reader.read();
196
- if (done)
200
+ if (done) {
197
201
  break;
202
+ }
198
203
  const chunk = decoder.decode(value, { stream: true });
199
- const lines = chunk.split('\n').filter(line => line.trim());
204
+ const lines = chunk.split("\n").filter((line) => line.trim());
200
205
  for (const line of lines) {
201
206
  try {
202
207
  const data = JSON.parse(line);
203
208
  if (data.response) {
204
209
  controller.enqueue({
205
- type: 'text-delta',
206
- textDelta: data.response
210
+ type: "text-delta",
211
+ textDelta: data.response,
207
212
  });
208
213
  totalTokens += Math.ceil(data.response.length / 4);
209
214
  }
210
215
  if (data.done) {
211
216
  controller.enqueue({
212
- type: 'finish',
213
- finishReason: 'stop',
217
+ type: "finish",
218
+ finishReason: "stop",
214
219
  usage: {
215
- promptTokens: data.prompt_eval_count || Math.ceil(prompt.length / 4),
216
- completionTokens: data.eval_count || totalTokens
220
+ promptTokens: data.prompt_eval_count ||
221
+ Math.ceil(prompt.length / 4),
222
+ completionTokens: data.eval_count || totalTokens,
217
223
  },
218
- logprobs: undefined
224
+ logprobs: undefined,
219
225
  });
220
226
  controller.close();
221
227
  return;
@@ -230,22 +236,25 @@ class OllamaLanguageModel {
230
236
  finally {
231
237
  reader.releaseLock();
232
238
  }
233
- }
239
+ },
234
240
  });
235
241
  return {
236
242
  stream,
237
243
  rawCall: { rawPrompt: prompt, rawSettings: options },
238
- rawResponse: { headers: {} }
244
+ rawResponse: { headers: {} },
239
245
  };
240
246
  }
241
247
  catch (error) {
242
248
  clearTimeout(timeoutId);
243
249
  const errorMessage = error instanceof Error ? error.message : String(error);
244
- if (errorMessage.includes('AbortError') || errorMessage.includes('timeout')) {
250
+ if (errorMessage.includes("AbortError") ||
251
+ errorMessage.includes("timeout")) {
245
252
  throw new Error(`Ollama streaming timeout (${this.timeout}ms). The model may be large or the system is under load.`);
246
253
  }
247
- if (errorMessage.includes('ECONNREFUSED') || errorMessage.includes('fetch failed')) {
248
- throw new Error('Cannot connect to Ollama service. Please ensure Ollama is installed and running on ' + this.baseUrl);
254
+ if (errorMessage.includes("ECONNREFUSED") ||
255
+ errorMessage.includes("fetch failed")) {
256
+ throw new Error("Cannot connect to Ollama service. Please ensure Ollama is installed and running on " +
257
+ this.baseUrl);
249
258
  }
250
259
  throw error;
251
260
  }
@@ -256,13 +265,13 @@ export class Ollama {
256
265
  modelName;
257
266
  timeout;
258
267
  constructor(modelName) {
259
- this.baseUrl = process.env.OLLAMA_BASE_URL || 'http://localhost:11434';
260
- this.modelName = modelName || process.env.OLLAMA_MODEL || 'llama2';
261
- this.timeout = parseInt(process.env.OLLAMA_TIMEOUT || '60000'); // 60 seconds default
262
- logger.debug('[Ollama] Initialized', {
268
+ this.baseUrl = process.env.OLLAMA_BASE_URL || "http://localhost:11434";
269
+ this.modelName = modelName || process.env.OLLAMA_MODEL || "llama2";
270
+ this.timeout = parseInt(process.env.OLLAMA_TIMEOUT || "60000"); // 60 seconds default
271
+ logger.debug("[Ollama] Initialized", {
263
272
  baseUrl: this.baseUrl,
264
273
  modelName: this.modelName,
265
- timeout: this.timeout
274
+ timeout: this.timeout,
266
275
  });
267
276
  }
268
277
  /**
@@ -270,8 +279,8 @@ export class Ollama {
270
279
  * @private
271
280
  */
272
281
  getModel() {
273
- logger.debug('Ollama.getModel - Ollama model selected', {
274
- modelName: this.modelName
282
+ logger.debug("Ollama.getModel - Ollama model selected", {
283
+ modelName: this.modelName,
275
284
  });
276
285
  return new OllamaLanguageModel(this.modelName, this.baseUrl, this.timeout);
277
286
  }
@@ -280,35 +289,35 @@ export class Ollama {
280
289
  */
281
290
  async checkHealth() {
282
291
  const model = new OllamaLanguageModel(this.modelName, this.baseUrl, this.timeout);
283
- return await model['checkHealth']();
292
+ return await model["checkHealth"]();
284
293
  }
285
294
  /**
286
295
  * List available models on the Ollama instance
287
296
  */
288
297
  async listModels() {
289
- const functionTag = 'Ollama.listModels';
298
+ const functionTag = "Ollama.listModels";
290
299
  try {
291
300
  logger.debug(`[${functionTag}] Listing available models`);
292
301
  const response = await fetch(`${this.baseUrl}/api/tags`, {
293
- method: 'GET',
302
+ method: "GET",
294
303
  headers: {
295
- 'Content-Type': 'application/json'
296
- }
304
+ "Content-Type": "application/json",
305
+ },
297
306
  });
298
307
  if (!response.ok) {
299
308
  throw new Error(`Failed to list models: ${response.status} ${response.statusText}`);
300
309
  }
301
- const data = await response.json();
302
- const modelNames = data.models?.map(model => model.name) || [];
310
+ const data = (await response.json());
311
+ const modelNames = data.models?.map((model) => model.name) || [];
303
312
  logger.debug(`[${functionTag}] Found models`, {
304
313
  count: modelNames.length,
305
- models: modelNames
314
+ models: modelNames,
306
315
  });
307
316
  return modelNames;
308
317
  }
309
318
  catch (error) {
310
319
  logger.debug(`[${functionTag}] Error listing models`, {
311
- error: error instanceof Error ? error.message : String(error)
320
+ error: error instanceof Error ? error.message : String(error),
312
321
  });
313
322
  throw new Error(`Failed to list Ollama models: ${error instanceof Error ? error.message : String(error)}`);
314
323
  }
@@ -329,17 +338,17 @@ export class Ollama {
329
338
  * Pull/download a model to the local Ollama instance
330
339
  */
331
340
  async pullModel(modelName) {
332
- const functionTag = 'Ollama.pullModel';
341
+ const functionTag = "Ollama.pullModel";
333
342
  try {
334
343
  logger.debug(`[${functionTag}] Pulling model`, { modelName });
335
344
  const response = await fetch(`${this.baseUrl}/api/pull`, {
336
- method: 'POST',
345
+ method: "POST",
337
346
  headers: {
338
- 'Content-Type': 'application/json'
347
+ "Content-Type": "application/json",
339
348
  },
340
349
  body: JSON.stringify({
341
- name: modelName
342
- })
350
+ name: modelName,
351
+ }),
343
352
  });
344
353
  if (!response.ok) {
345
354
  throw new Error(`Failed to pull model: ${response.status} ${response.statusText}`);
@@ -351,7 +360,7 @@ export class Ollama {
351
360
  catch (error) {
352
361
  logger.debug(`[${functionTag}] Error pulling model`, {
353
362
  modelName,
354
- error: error instanceof Error ? error.message : String(error)
363
+ error: error instanceof Error ? error.message : String(error),
355
364
  });
356
365
  throw new Error(`Failed to pull model '${modelName}': ${error instanceof Error ? error.message : String(error)}`);
357
366
  }
@@ -360,14 +369,14 @@ export class Ollama {
360
369
  * Generate text using Ollama local models
361
370
  */
362
371
  async generateText(optionsOrPrompt, analysisSchema) {
363
- const functionTag = 'Ollama.generateText';
364
- const provider = 'ollama';
372
+ const functionTag = "Ollama.generateText";
373
+ const provider = "ollama";
365
374
  try {
366
375
  // Parse parameters - support both string and options object
367
- const options = typeof optionsOrPrompt === 'string'
376
+ const options = typeof optionsOrPrompt === "string"
368
377
  ? { prompt: optionsOrPrompt }
369
378
  : optionsOrPrompt;
370
- const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema } = options;
379
+ const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, } = options;
371
380
  // Use schema from options or fallback parameter
372
381
  const finalSchema = schema || analysisSchema;
373
382
  logger.debug(`[${functionTag}] Generate request started`, {
@@ -375,7 +384,7 @@ export class Ollama {
375
384
  modelName: this.modelName,
376
385
  promptLength: prompt.length,
377
386
  temperature,
378
- maxTokens
387
+ maxTokens,
379
388
  });
380
389
  const model = this.getModel();
381
390
  const generateOptions = {
@@ -383,10 +392,12 @@ export class Ollama {
383
392
  prompt: prompt,
384
393
  system: systemPrompt,
385
394
  temperature,
386
- maxTokens
395
+ maxTokens,
387
396
  };
388
397
  if (finalSchema) {
389
- generateOptions.experimental_output = Output.object({ schema: finalSchema });
398
+ generateOptions.experimental_output = Output.object({
399
+ schema: finalSchema,
400
+ });
390
401
  }
391
402
  const result = await generateText(generateOptions);
392
403
  logger.debug(`[${functionTag}] Generate text completed`, {
@@ -394,7 +405,7 @@ export class Ollama {
394
405
  modelName: this.modelName,
395
406
  usage: result.usage,
396
407
  finishReason: result.finishReason,
397
- responseLength: result.text?.length || 0
408
+ responseLength: result.text?.length || 0,
398
409
  });
399
410
  return result;
400
411
  }
@@ -402,8 +413,8 @@ export class Ollama {
402
413
  logger.debug(`[${functionTag}] Exception`, {
403
414
  provider,
404
415
  modelName: this.modelName,
405
- message: 'Error in generating text',
406
- err: String(err)
416
+ message: "Error in generating text",
417
+ err: String(err),
407
418
  });
408
419
  throw err; // Re-throw error to trigger fallback
409
420
  }
@@ -412,15 +423,15 @@ export class Ollama {
412
423
  * Generate streaming text using Ollama local models
413
424
  */
414
425
  async streamText(optionsOrPrompt, analysisSchema) {
415
- const functionTag = 'Ollama.streamText';
416
- const provider = 'ollama';
426
+ const functionTag = "Ollama.streamText";
427
+ const provider = "ollama";
417
428
  let chunkCount = 0;
418
429
  try {
419
430
  // Parse parameters - support both string and options object
420
- const options = typeof optionsOrPrompt === 'string'
431
+ const options = typeof optionsOrPrompt === "string"
421
432
  ? { prompt: optionsOrPrompt }
422
433
  : optionsOrPrompt;
423
- const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema } = options;
434
+ const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, } = options;
424
435
  // Use schema from options or fallback parameter
425
436
  const finalSchema = schema || analysisSchema;
426
437
  logger.debug(`[${functionTag}] Stream request started`, {
@@ -429,7 +440,7 @@ export class Ollama {
429
440
  promptLength: prompt.length,
430
441
  temperature,
431
442
  maxTokens,
432
- hasSchema: !!finalSchema
443
+ hasSchema: !!finalSchema,
433
444
  });
434
445
  const model = this.getModel();
435
446
  const streamOptions = {
@@ -448,7 +459,7 @@ export class Ollama {
448
459
  error: errorMessage,
449
460
  stack: errorStack,
450
461
  promptLength: prompt.length,
451
- chunkCount
462
+ chunkCount,
452
463
  });
453
464
  },
454
465
  onFinish: (event) => {
@@ -459,7 +470,7 @@ export class Ollama {
459
470
  usage: event.usage,
460
471
  totalChunks: chunkCount,
461
472
  promptLength: prompt.length,
462
- responseLength: event.text?.length || 0
473
+ responseLength: event.text?.length || 0,
463
474
  });
464
475
  },
465
476
  onChunk: (event) => {
@@ -469,12 +480,14 @@ export class Ollama {
469
480
  modelName: this.modelName,
470
481
  chunkNumber: chunkCount,
471
482
  chunkLength: event.chunk.text?.length || 0,
472
- chunkType: event.chunk.type
483
+ chunkType: event.chunk.type,
473
484
  });
474
- }
485
+ },
475
486
  };
476
487
  if (finalSchema) {
477
- streamOptions.experimental_output = Output.object({ schema: finalSchema });
488
+ streamOptions.experimental_output = Output.object({
489
+ schema: finalSchema,
490
+ });
478
491
  }
479
492
  const result = streamText(streamOptions);
480
493
  return result;
@@ -483,9 +496,11 @@ export class Ollama {
483
496
  logger.debug(`[${functionTag}] Exception`, {
484
497
  provider,
485
498
  modelName: this.modelName,
486
- message: 'Error in streaming text',
499
+ message: "Error in streaming text",
487
500
  err: String(err),
488
- promptLength: typeof optionsOrPrompt === 'string' ? optionsOrPrompt.length : optionsOrPrompt.prompt.length
501
+ promptLength: typeof optionsOrPrompt === "string"
502
+ ? optionsOrPrompt.length
503
+ : optionsOrPrompt.prompt.length,
489
504
  });
490
505
  throw err; // Re-throw error to trigger fallback
491
506
  }
@@ -1,10 +1,14 @@
1
- import type { ZodType, ZodTypeDef } from 'zod';
2
- import { type StreamTextResult, type ToolSet, type Schema, type GenerateTextResult } from 'ai';
3
- import type { AIProvider, TextGenerationOptions, StreamTextOptions } from '../core/types.js';
1
+ import type { ZodType, ZodTypeDef } from "zod";
2
+ import { type StreamTextResult, type ToolSet, type Schema, type GenerateTextResult, type LanguageModelV1 } from "ai";
3
+ import type { AIProvider, TextGenerationOptions, StreamTextOptions } from "../core/types.js";
4
4
  export declare class OpenAI implements AIProvider {
5
5
  private modelName;
6
6
  private model;
7
7
  constructor(modelName?: string | null);
8
+ /**
9
+ * Get the underlying model for function calling
10
+ */
11
+ getModel(): LanguageModelV1;
8
12
  streamText(optionsOrPrompt: StreamTextOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamTextResult<ToolSet, unknown> | null>;
9
13
  generateText(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<GenerateTextResult<ToolSet, unknown> | null>;
10
14
  }