@juspay/neurolink 1.5.3 → 1.9.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (176) hide show
  1. package/CHANGELOG.md +241 -1
  2. package/README.md +113 -20
  3. package/dist/agent/direct-tools.d.ts +1203 -0
  4. package/dist/agent/direct-tools.js +387 -0
  5. package/dist/cli/commands/agent-generate.d.ts +2 -0
  6. package/dist/cli/commands/agent-generate.js +70 -0
  7. package/dist/cli/commands/config.d.ts +76 -9
  8. package/dist/cli/commands/config.js +358 -233
  9. package/dist/cli/commands/mcp.d.ts +2 -1
  10. package/dist/cli/commands/mcp.js +874 -146
  11. package/dist/cli/commands/ollama.d.ts +8 -0
  12. package/dist/cli/commands/ollama.js +333 -0
  13. package/dist/cli/index.js +591 -327
  14. package/dist/cli/utils/complete-setup.d.ts +19 -0
  15. package/dist/cli/utils/complete-setup.js +81 -0
  16. package/dist/cli/utils/env-manager.d.ts +44 -0
  17. package/dist/cli/utils/env-manager.js +226 -0
  18. package/dist/cli/utils/interactive-setup.d.ts +48 -0
  19. package/dist/cli/utils/interactive-setup.js +302 -0
  20. package/dist/core/dynamic-models.d.ts +208 -0
  21. package/dist/core/dynamic-models.js +250 -0
  22. package/dist/core/factory.d.ts +13 -6
  23. package/dist/core/factory.js +180 -50
  24. package/dist/core/types.d.ts +8 -3
  25. package/dist/core/types.js +7 -4
  26. package/dist/index.d.ts +16 -16
  27. package/dist/index.js +16 -16
  28. package/dist/lib/agent/direct-tools.d.ts +1203 -0
  29. package/dist/lib/agent/direct-tools.js +387 -0
  30. package/dist/lib/core/dynamic-models.d.ts +208 -0
  31. package/dist/lib/core/dynamic-models.js +250 -0
  32. package/dist/lib/core/factory.d.ts +13 -6
  33. package/dist/lib/core/factory.js +180 -50
  34. package/dist/lib/core/types.d.ts +8 -3
  35. package/dist/lib/core/types.js +7 -4
  36. package/dist/lib/index.d.ts +16 -16
  37. package/dist/lib/index.js +16 -16
  38. package/dist/lib/mcp/auto-discovery.d.ts +120 -0
  39. package/dist/lib/mcp/auto-discovery.js +793 -0
  40. package/dist/lib/mcp/client.d.ts +66 -0
  41. package/dist/lib/mcp/client.js +245 -0
  42. package/dist/lib/mcp/config.d.ts +31 -0
  43. package/dist/lib/mcp/config.js +74 -0
  44. package/dist/lib/mcp/context-manager.d.ts +4 -4
  45. package/dist/lib/mcp/context-manager.js +24 -18
  46. package/dist/lib/mcp/factory.d.ts +28 -11
  47. package/dist/lib/mcp/factory.js +36 -29
  48. package/dist/lib/mcp/function-calling.d.ts +51 -0
  49. package/dist/lib/mcp/function-calling.js +510 -0
  50. package/dist/lib/mcp/index.d.ts +190 -0
  51. package/dist/lib/mcp/index.js +156 -0
  52. package/dist/lib/mcp/initialize-tools.d.ts +28 -0
  53. package/dist/lib/mcp/initialize-tools.js +209 -0
  54. package/dist/lib/mcp/initialize.d.ts +17 -0
  55. package/dist/lib/mcp/initialize.js +51 -0
  56. package/dist/lib/mcp/logging.d.ts +71 -0
  57. package/dist/lib/mcp/logging.js +183 -0
  58. package/dist/lib/mcp/manager.d.ts +67 -0
  59. package/dist/lib/mcp/manager.js +176 -0
  60. package/dist/lib/mcp/neurolink-mcp-client.d.ts +96 -0
  61. package/dist/lib/mcp/neurolink-mcp-client.js +417 -0
  62. package/dist/lib/mcp/orchestrator.d.ts +3 -3
  63. package/dist/lib/mcp/orchestrator.js +46 -43
  64. package/dist/lib/mcp/registry.d.ts +2 -2
  65. package/dist/lib/mcp/registry.js +42 -33
  66. package/dist/lib/mcp/servers/ai-providers/ai-analysis-tools.d.ts +1 -1
  67. package/dist/lib/mcp/servers/ai-providers/ai-analysis-tools.js +205 -66
  68. package/dist/lib/mcp/servers/ai-providers/ai-core-server.js +143 -99
  69. package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.d.ts +6 -6
  70. package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.js +404 -251
  71. package/dist/lib/mcp/servers/utilities/utility-server.d.ts +8 -0
  72. package/dist/lib/mcp/servers/utilities/utility-server.js +326 -0
  73. package/dist/lib/mcp/tool-integration.d.ts +67 -0
  74. package/dist/lib/mcp/tool-integration.js +179 -0
  75. package/dist/lib/mcp/unified-registry.d.ts +269 -0
  76. package/dist/lib/mcp/unified-registry.js +1411 -0
  77. package/dist/lib/neurolink.d.ts +68 -6
  78. package/dist/lib/neurolink.js +314 -42
  79. package/dist/lib/providers/agent-enhanced-provider.d.ts +59 -0
  80. package/dist/lib/providers/agent-enhanced-provider.js +242 -0
  81. package/dist/lib/providers/amazonBedrock.d.ts +3 -3
  82. package/dist/lib/providers/amazonBedrock.js +54 -50
  83. package/dist/lib/providers/anthropic.d.ts +2 -2
  84. package/dist/lib/providers/anthropic.js +92 -84
  85. package/dist/lib/providers/azureOpenAI.d.ts +2 -2
  86. package/dist/lib/providers/azureOpenAI.js +97 -86
  87. package/dist/lib/providers/function-calling-provider.d.ts +70 -0
  88. package/dist/lib/providers/function-calling-provider.js +359 -0
  89. package/dist/lib/providers/googleAIStudio.d.ts +10 -5
  90. package/dist/lib/providers/googleAIStudio.js +60 -38
  91. package/dist/lib/providers/googleVertexAI.d.ts +3 -3
  92. package/dist/lib/providers/googleVertexAI.js +96 -86
  93. package/dist/lib/providers/huggingFace.d.ts +31 -0
  94. package/dist/lib/providers/huggingFace.js +362 -0
  95. package/dist/lib/providers/index.d.ts +14 -8
  96. package/dist/lib/providers/index.js +18 -12
  97. package/dist/lib/providers/mcp-provider.d.ts +62 -0
  98. package/dist/lib/providers/mcp-provider.js +183 -0
  99. package/dist/lib/providers/mistralAI.d.ts +32 -0
  100. package/dist/lib/providers/mistralAI.js +223 -0
  101. package/dist/lib/providers/ollama.d.ts +51 -0
  102. package/dist/lib/providers/ollama.js +508 -0
  103. package/dist/lib/providers/openAI.d.ts +7 -3
  104. package/dist/lib/providers/openAI.js +45 -33
  105. package/dist/lib/utils/logger.js +2 -2
  106. package/dist/lib/utils/providerUtils.js +59 -22
  107. package/dist/mcp/auto-discovery.d.ts +120 -0
  108. package/dist/mcp/auto-discovery.js +794 -0
  109. package/dist/mcp/client.d.ts +66 -0
  110. package/dist/mcp/client.js +245 -0
  111. package/dist/mcp/config.d.ts +31 -0
  112. package/dist/mcp/config.js +74 -0
  113. package/dist/mcp/context-manager.d.ts +4 -4
  114. package/dist/mcp/context-manager.js +24 -18
  115. package/dist/mcp/factory.d.ts +28 -11
  116. package/dist/mcp/factory.js +36 -29
  117. package/dist/mcp/function-calling.d.ts +51 -0
  118. package/dist/mcp/function-calling.js +510 -0
  119. package/dist/mcp/index.d.ts +190 -0
  120. package/dist/mcp/index.js +156 -0
  121. package/dist/mcp/initialize-tools.d.ts +28 -0
  122. package/dist/mcp/initialize-tools.js +210 -0
  123. package/dist/mcp/initialize.d.ts +17 -0
  124. package/dist/mcp/initialize.js +51 -0
  125. package/dist/mcp/logging.d.ts +71 -0
  126. package/dist/mcp/logging.js +183 -0
  127. package/dist/mcp/manager.d.ts +67 -0
  128. package/dist/mcp/manager.js +176 -0
  129. package/dist/mcp/neurolink-mcp-client.d.ts +96 -0
  130. package/dist/mcp/neurolink-mcp-client.js +417 -0
  131. package/dist/mcp/orchestrator.d.ts +3 -3
  132. package/dist/mcp/orchestrator.js +46 -43
  133. package/dist/mcp/registry.d.ts +2 -2
  134. package/dist/mcp/registry.js +42 -33
  135. package/dist/mcp/servers/ai-providers/ai-analysis-tools.d.ts +1 -1
  136. package/dist/mcp/servers/ai-providers/ai-analysis-tools.js +205 -66
  137. package/dist/mcp/servers/ai-providers/ai-core-server.js +143 -99
  138. package/dist/mcp/servers/ai-providers/ai-workflow-tools.d.ts +6 -6
  139. package/dist/mcp/servers/ai-providers/ai-workflow-tools.js +404 -253
  140. package/dist/mcp/servers/utilities/utility-server.d.ts +8 -0
  141. package/dist/mcp/servers/utilities/utility-server.js +326 -0
  142. package/dist/mcp/tool-integration.d.ts +67 -0
  143. package/dist/mcp/tool-integration.js +179 -0
  144. package/dist/mcp/unified-registry.d.ts +269 -0
  145. package/dist/mcp/unified-registry.js +1411 -0
  146. package/dist/neurolink.d.ts +68 -6
  147. package/dist/neurolink.js +314 -42
  148. package/dist/providers/agent-enhanced-provider.d.ts +59 -0
  149. package/dist/providers/agent-enhanced-provider.js +242 -0
  150. package/dist/providers/amazonBedrock.d.ts +3 -3
  151. package/dist/providers/amazonBedrock.js +54 -50
  152. package/dist/providers/anthropic.d.ts +2 -2
  153. package/dist/providers/anthropic.js +92 -84
  154. package/dist/providers/azureOpenAI.d.ts +2 -2
  155. package/dist/providers/azureOpenAI.js +97 -86
  156. package/dist/providers/function-calling-provider.d.ts +70 -0
  157. package/dist/providers/function-calling-provider.js +359 -0
  158. package/dist/providers/googleAIStudio.d.ts +10 -5
  159. package/dist/providers/googleAIStudio.js +60 -38
  160. package/dist/providers/googleVertexAI.d.ts +3 -3
  161. package/dist/providers/googleVertexAI.js +96 -86
  162. package/dist/providers/huggingFace.d.ts +31 -0
  163. package/dist/providers/huggingFace.js +362 -0
  164. package/dist/providers/index.d.ts +14 -8
  165. package/dist/providers/index.js +18 -12
  166. package/dist/providers/mcp-provider.d.ts +62 -0
  167. package/dist/providers/mcp-provider.js +183 -0
  168. package/dist/providers/mistralAI.d.ts +32 -0
  169. package/dist/providers/mistralAI.js +223 -0
  170. package/dist/providers/ollama.d.ts +51 -0
  171. package/dist/providers/ollama.js +508 -0
  172. package/dist/providers/openAI.d.ts +7 -3
  173. package/dist/providers/openAI.js +45 -33
  174. package/dist/utils/logger.js +2 -2
  175. package/dist/utils/providerUtils.js +59 -22
  176. package/package.json +28 -4
@@ -0,0 +1,508 @@
1
+ /**
2
+ * Ollama Provider for NeuroLink
3
+ *
4
+ * Local AI model deployment and management using Ollama.
5
+ * Provides offline AI capabilities with local model hosting.
6
+ *
7
+ * Features:
8
+ * - Local model deployment (privacy-first)
9
+ * - Model management (download, list, remove)
10
+ * - Health checking and service validation
11
+ * - Streaming and non-streaming text generation
12
+ */
13
+ import { streamText, generateText, Output } from "ai";
14
+ import { logger } from "../utils/logger.js";
15
+ // Default system context
16
+ const DEFAULT_SYSTEM_CONTEXT = {
17
+ systemPrompt: "You are a helpful AI assistant.",
18
+ };
19
+ // Custom LanguageModelV1 implementation for Ollama
20
+ class OllamaLanguageModel {
21
+ specificationVersion = "v1";
22
+ provider = "ollama";
23
+ modelId;
24
+ maxTokens;
25
+ supportsStreaming = true;
26
+ defaultObjectGenerationMode = "json";
27
+ baseUrl;
28
+ timeout;
29
+ constructor(modelId, baseUrl, timeout) {
30
+ this.modelId = modelId;
31
+ this.baseUrl = baseUrl;
32
+ this.timeout = timeout;
33
+ }
34
+ estimateTokens(text) {
35
+ return Math.ceil(text.length / 4); // Rough estimation: 4 characters per token
36
+ }
37
+ convertMessagesToPrompt(messages) {
38
+ return messages
39
+ .map((msg) => {
40
+ if (typeof msg.content === "string") {
41
+ return `${msg.role}: ${msg.content}`;
42
+ }
43
+ else if (Array.isArray(msg.content)) {
44
+ // Handle multi-part content (text, images, etc.)
45
+ return `${msg.role}: ${msg.content
46
+ .filter((part) => part.type === "text")
47
+ .map((part) => part.text)
48
+ .join(" ")}`;
49
+ }
50
+ return "";
51
+ })
52
+ .join("\n");
53
+ }
54
+ async checkHealth() {
55
+ try {
56
+ const controller = new AbortController();
57
+ const timeoutId = setTimeout(() => controller.abort(), 5000);
58
+ const response = await fetch(`${this.baseUrl}/api/tags`, {
59
+ method: "GET",
60
+ signal: controller.signal,
61
+ headers: { "Content-Type": "application/json" },
62
+ });
63
+ clearTimeout(timeoutId);
64
+ return response.ok;
65
+ }
66
+ catch {
67
+ return false;
68
+ }
69
+ }
70
+ async ensureModelAvailable() {
71
+ try {
72
+ const response = await fetch(`${this.baseUrl}/api/tags`);
73
+ if (!response.ok) {
74
+ throw new Error("Cannot access Ollama");
75
+ }
76
+ const data = (await response.json());
77
+ const models = data.models?.map((m) => m.name) || [];
78
+ if (!models.includes(this.modelId)) {
79
+ // Try to pull the model
80
+ const pullResponse = await fetch(`${this.baseUrl}/api/pull`, {
81
+ method: "POST",
82
+ headers: { "Content-Type": "application/json" },
83
+ body: JSON.stringify({ name: this.modelId }),
84
+ });
85
+ if (!pullResponse.ok) {
86
+ throw new Error(`Model '${this.modelId}' not available and cannot be pulled`);
87
+ }
88
+ }
89
+ }
90
+ catch (error) {
91
+ throw new Error(`Failed to ensure model availability: ${error instanceof Error ? error.message : String(error)}`);
92
+ }
93
+ }
94
+ async doGenerate(options) {
95
+ // Health check and model availability
96
+ const isHealthy = await this.checkHealth();
97
+ if (!isHealthy) {
98
+ throw new Error("Ollama service is not running or accessible. Please ensure Ollama is installed and running.");
99
+ }
100
+ await this.ensureModelAvailable();
101
+ const prompt = this.convertMessagesToPrompt(options.prompt);
102
+ const requestPayload = {
103
+ model: this.modelId,
104
+ prompt,
105
+ stream: false,
106
+ options: {
107
+ temperature: options.temperature || 0.7,
108
+ num_predict: options.maxTokens || 500,
109
+ },
110
+ };
111
+ const controller = new AbortController();
112
+ const timeoutId = setTimeout(() => controller.abort(), this.timeout);
113
+ try {
114
+ const response = await fetch(`${this.baseUrl}/api/generate`, {
115
+ method: "POST",
116
+ headers: { "Content-Type": "application/json" },
117
+ body: JSON.stringify(requestPayload),
118
+ signal: controller.signal,
119
+ });
120
+ clearTimeout(timeoutId);
121
+ if (!response.ok) {
122
+ throw new Error(`Ollama API error: ${response.status} ${response.statusText}`);
123
+ }
124
+ const data = (await response.json());
125
+ if (!data.response) {
126
+ throw new Error("No response received from Ollama");
127
+ }
128
+ const promptTokens = this.estimateTokens(prompt);
129
+ const completionTokens = this.estimateTokens(data.response);
130
+ return {
131
+ text: data.response,
132
+ usage: {
133
+ promptTokens,
134
+ completionTokens,
135
+ totalTokens: promptTokens + completionTokens,
136
+ },
137
+ finishReason: "stop",
138
+ logprobs: undefined,
139
+ rawCall: { rawPrompt: prompt, rawSettings: options },
140
+ rawResponse: { headers: {} },
141
+ };
142
+ }
143
+ catch (error) {
144
+ clearTimeout(timeoutId);
145
+ const errorMessage = error instanceof Error ? error.message : String(error);
146
+ if (errorMessage.includes("AbortError") ||
147
+ errorMessage.includes("timeout")) {
148
+ throw new Error(`Ollama request timeout (${this.timeout}ms). The model may be large or the system is under load.`);
149
+ }
150
+ if (errorMessage.includes("ECONNREFUSED") ||
151
+ errorMessage.includes("fetch failed")) {
152
+ throw new Error("Cannot connect to Ollama service. Please ensure Ollama is installed and running on " +
153
+ this.baseUrl);
154
+ }
155
+ throw error;
156
+ }
157
+ }
158
+ async doStream(options) {
159
+ // Health check and model availability
160
+ const isHealthy = await this.checkHealth();
161
+ if (!isHealthy) {
162
+ throw new Error("Ollama service is not running or accessible. Please ensure Ollama is installed and running.");
163
+ }
164
+ await this.ensureModelAvailable();
165
+ const prompt = this.convertMessagesToPrompt(options.prompt);
166
+ const requestPayload = {
167
+ model: this.modelId,
168
+ prompt,
169
+ stream: true,
170
+ options: {
171
+ temperature: options.temperature || 0.7,
172
+ num_predict: options.maxTokens || 500,
173
+ },
174
+ };
175
+ const controller = new AbortController();
176
+ const timeoutId = setTimeout(() => controller.abort(), this.timeout);
177
+ try {
178
+ const response = await fetch(`${this.baseUrl}/api/generate`, {
179
+ method: "POST",
180
+ headers: { "Content-Type": "application/json" },
181
+ body: JSON.stringify(requestPayload),
182
+ signal: controller.signal,
183
+ });
184
+ clearTimeout(timeoutId);
185
+ if (!response.ok) {
186
+ throw new Error(`Ollama API error: ${response.status} ${response.statusText}`);
187
+ }
188
+ if (!response.body) {
189
+ throw new Error("No response body received from Ollama streaming API");
190
+ }
191
+ // Create a ReadableStream that parses Ollama's streaming format
192
+ const stream = new ReadableStream({
193
+ async start(controller) {
194
+ const reader = response.body.getReader();
195
+ const decoder = new TextDecoder();
196
+ let totalTokens = 0;
197
+ try {
198
+ while (true) {
199
+ const { done, value } = await reader.read();
200
+ if (done) {
201
+ break;
202
+ }
203
+ const chunk = decoder.decode(value, { stream: true });
204
+ const lines = chunk.split("\n").filter((line) => line.trim());
205
+ for (const line of lines) {
206
+ try {
207
+ const data = JSON.parse(line);
208
+ if (data.response) {
209
+ controller.enqueue({
210
+ type: "text-delta",
211
+ textDelta: data.response,
212
+ });
213
+ totalTokens += Math.ceil(data.response.length / 4);
214
+ }
215
+ if (data.done) {
216
+ controller.enqueue({
217
+ type: "finish",
218
+ finishReason: "stop",
219
+ usage: {
220
+ promptTokens: data.prompt_eval_count ||
221
+ Math.ceil(prompt.length / 4),
222
+ completionTokens: data.eval_count || totalTokens,
223
+ },
224
+ logprobs: undefined,
225
+ });
226
+ controller.close();
227
+ return;
228
+ }
229
+ }
230
+ catch (parseError) {
231
+ // Skip invalid JSON lines
232
+ }
233
+ }
234
+ }
235
+ }
236
+ finally {
237
+ reader.releaseLock();
238
+ }
239
+ },
240
+ });
241
+ return {
242
+ stream,
243
+ rawCall: { rawPrompt: prompt, rawSettings: options },
244
+ rawResponse: { headers: {} },
245
+ };
246
+ }
247
+ catch (error) {
248
+ clearTimeout(timeoutId);
249
+ const errorMessage = error instanceof Error ? error.message : String(error);
250
+ if (errorMessage.includes("AbortError") ||
251
+ errorMessage.includes("timeout")) {
252
+ throw new Error(`Ollama streaming timeout (${this.timeout}ms). The model may be large or the system is under load.`);
253
+ }
254
+ if (errorMessage.includes("ECONNREFUSED") ||
255
+ errorMessage.includes("fetch failed")) {
256
+ throw new Error("Cannot connect to Ollama service. Please ensure Ollama is installed and running on " +
257
+ this.baseUrl);
258
+ }
259
+ throw error;
260
+ }
261
+ }
262
+ }
263
+ export class Ollama {
264
+ baseUrl;
265
+ modelName;
266
+ timeout;
267
+ constructor(modelName) {
268
+ this.baseUrl = process.env.OLLAMA_BASE_URL || "http://localhost:11434";
269
+ this.modelName = modelName || process.env.OLLAMA_MODEL || "llama2";
270
+ this.timeout = parseInt(process.env.OLLAMA_TIMEOUT || "60000"); // 60 seconds default
271
+ logger.debug("[Ollama] Initialized", {
272
+ baseUrl: this.baseUrl,
273
+ modelName: this.modelName,
274
+ timeout: this.timeout,
275
+ });
276
+ }
277
+ /**
278
+ * Gets the appropriate model instance
279
+ * @private
280
+ */
281
+ getModel() {
282
+ logger.debug("Ollama.getModel - Ollama model selected", {
283
+ modelName: this.modelName,
284
+ });
285
+ return new OllamaLanguageModel(this.modelName, this.baseUrl, this.timeout);
286
+ }
287
+ /**
288
+ * Health check - verify Ollama service is running and accessible
289
+ */
290
+ async checkHealth() {
291
+ const model = new OllamaLanguageModel(this.modelName, this.baseUrl, this.timeout);
292
+ return await model["checkHealth"]();
293
+ }
294
+ /**
295
+ * List available models on the Ollama instance
296
+ */
297
+ async listModels() {
298
+ const functionTag = "Ollama.listModels";
299
+ try {
300
+ logger.debug(`[${functionTag}] Listing available models`);
301
+ const response = await fetch(`${this.baseUrl}/api/tags`, {
302
+ method: "GET",
303
+ headers: {
304
+ "Content-Type": "application/json",
305
+ },
306
+ });
307
+ if (!response.ok) {
308
+ throw new Error(`Failed to list models: ${response.status} ${response.statusText}`);
309
+ }
310
+ const data = (await response.json());
311
+ const modelNames = data.models?.map((model) => model.name) || [];
312
+ logger.debug(`[${functionTag}] Found models`, {
313
+ count: modelNames.length,
314
+ models: modelNames,
315
+ });
316
+ return modelNames;
317
+ }
318
+ catch (error) {
319
+ logger.debug(`[${functionTag}] Error listing models`, {
320
+ error: error instanceof Error ? error.message : String(error),
321
+ });
322
+ throw new Error(`Failed to list Ollama models: ${error instanceof Error ? error.message : String(error)}`);
323
+ }
324
+ }
325
+ /**
326
+ * Check if a specific model is available
327
+ */
328
+ async isModelAvailable(modelName) {
329
+ try {
330
+ const models = await this.listModels();
331
+ return models.includes(modelName);
332
+ }
333
+ catch (error) {
334
+ return false;
335
+ }
336
+ }
337
+ /**
338
+ * Pull/download a model to the local Ollama instance
339
+ */
340
+ async pullModel(modelName) {
341
+ const functionTag = "Ollama.pullModel";
342
+ try {
343
+ logger.debug(`[${functionTag}] Pulling model`, { modelName });
344
+ const response = await fetch(`${this.baseUrl}/api/pull`, {
345
+ method: "POST",
346
+ headers: {
347
+ "Content-Type": "application/json",
348
+ },
349
+ body: JSON.stringify({
350
+ name: modelName,
351
+ }),
352
+ });
353
+ if (!response.ok) {
354
+ throw new Error(`Failed to pull model: ${response.status} ${response.statusText}`);
355
+ }
356
+ // Note: Ollama pull API returns streaming responses
357
+ // For simplicity, we're not handling the streaming progress here
358
+ logger.debug(`[${functionTag}] Model pull completed`, { modelName });
359
+ }
360
+ catch (error) {
361
+ logger.debug(`[${functionTag}] Error pulling model`, {
362
+ modelName,
363
+ error: error instanceof Error ? error.message : String(error),
364
+ });
365
+ throw new Error(`Failed to pull model '${modelName}': ${error instanceof Error ? error.message : String(error)}`);
366
+ }
367
+ }
368
+ /**
369
+ * Generate text using Ollama local models
370
+ */
371
+ async generateText(optionsOrPrompt, analysisSchema) {
372
+ const functionTag = "Ollama.generateText";
373
+ const provider = "ollama";
374
+ try {
375
+ // Parse parameters - support both string and options object
376
+ const options = typeof optionsOrPrompt === "string"
377
+ ? { prompt: optionsOrPrompt }
378
+ : optionsOrPrompt;
379
+ const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, } = options;
380
+ // Use schema from options or fallback parameter
381
+ const finalSchema = schema || analysisSchema;
382
+ logger.debug(`[${functionTag}] Generate request started`, {
383
+ provider,
384
+ modelName: this.modelName,
385
+ promptLength: prompt.length,
386
+ temperature,
387
+ maxTokens,
388
+ });
389
+ const model = this.getModel();
390
+ const generateOptions = {
391
+ model: model,
392
+ prompt: prompt,
393
+ system: systemPrompt,
394
+ temperature,
395
+ maxTokens,
396
+ };
397
+ if (finalSchema) {
398
+ generateOptions.experimental_output = Output.object({
399
+ schema: finalSchema,
400
+ });
401
+ }
402
+ const result = await generateText(generateOptions);
403
+ logger.debug(`[${functionTag}] Generate text completed`, {
404
+ provider,
405
+ modelName: this.modelName,
406
+ usage: result.usage,
407
+ finishReason: result.finishReason,
408
+ responseLength: result.text?.length || 0,
409
+ });
410
+ return result;
411
+ }
412
+ catch (err) {
413
+ logger.debug(`[${functionTag}] Exception`, {
414
+ provider,
415
+ modelName: this.modelName,
416
+ message: "Error in generating text",
417
+ err: String(err),
418
+ });
419
+ throw err; // Re-throw error to trigger fallback
420
+ }
421
+ }
422
+ /**
423
+ * Generate streaming text using Ollama local models
424
+ */
425
+ async streamText(optionsOrPrompt, analysisSchema) {
426
+ const functionTag = "Ollama.streamText";
427
+ const provider = "ollama";
428
+ let chunkCount = 0;
429
+ try {
430
+ // Parse parameters - support both string and options object
431
+ const options = typeof optionsOrPrompt === "string"
432
+ ? { prompt: optionsOrPrompt }
433
+ : optionsOrPrompt;
434
+ const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, } = options;
435
+ // Use schema from options or fallback parameter
436
+ const finalSchema = schema || analysisSchema;
437
+ logger.debug(`[${functionTag}] Stream request started`, {
438
+ provider,
439
+ modelName: this.modelName,
440
+ promptLength: prompt.length,
441
+ temperature,
442
+ maxTokens,
443
+ hasSchema: !!finalSchema,
444
+ });
445
+ const model = this.getModel();
446
+ const streamOptions = {
447
+ model: model,
448
+ prompt: prompt,
449
+ system: systemPrompt,
450
+ temperature,
451
+ maxTokens,
452
+ onError: (event) => {
453
+ const error = event.error;
454
+ const errorMessage = error instanceof Error ? error.message : String(error);
455
+ const errorStack = error instanceof Error ? error.stack : undefined;
456
+ logger.debug(`[${functionTag}] Stream text error`, {
457
+ provider,
458
+ modelName: this.modelName,
459
+ error: errorMessage,
460
+ stack: errorStack,
461
+ promptLength: prompt.length,
462
+ chunkCount,
463
+ });
464
+ },
465
+ onFinish: (event) => {
466
+ logger.debug(`[${functionTag}] Stream text finished`, {
467
+ provider,
468
+ modelName: this.modelName,
469
+ finishReason: event.finishReason,
470
+ usage: event.usage,
471
+ totalChunks: chunkCount,
472
+ promptLength: prompt.length,
473
+ responseLength: event.text?.length || 0,
474
+ });
475
+ },
476
+ onChunk: (event) => {
477
+ chunkCount++;
478
+ logger.debug(`[${functionTag}] Stream text chunk`, {
479
+ provider,
480
+ modelName: this.modelName,
481
+ chunkNumber: chunkCount,
482
+ chunkLength: event.chunk.text?.length || 0,
483
+ chunkType: event.chunk.type,
484
+ });
485
+ },
486
+ };
487
+ if (finalSchema) {
488
+ streamOptions.experimental_output = Output.object({
489
+ schema: finalSchema,
490
+ });
491
+ }
492
+ const result = streamText(streamOptions);
493
+ return result;
494
+ }
495
+ catch (err) {
496
+ logger.debug(`[${functionTag}] Exception`, {
497
+ provider,
498
+ modelName: this.modelName,
499
+ message: "Error in streaming text",
500
+ err: String(err),
501
+ promptLength: typeof optionsOrPrompt === "string"
502
+ ? optionsOrPrompt.length
503
+ : optionsOrPrompt.prompt.length,
504
+ });
505
+ throw err; // Re-throw error to trigger fallback
506
+ }
507
+ }
508
+ }
@@ -1,10 +1,14 @@
1
- import type { ZodType, ZodTypeDef } from 'zod';
2
- import { type StreamTextResult, type ToolSet, type Schema, type GenerateTextResult } from 'ai';
3
- import type { AIProvider, TextGenerationOptions, StreamTextOptions } from '../core/types.js';
1
+ import type { ZodType, ZodTypeDef } from "zod";
2
+ import { type StreamTextResult, type ToolSet, type Schema, type GenerateTextResult, type LanguageModelV1 } from "ai";
3
+ import type { AIProvider, TextGenerationOptions, StreamTextOptions } from "../core/types.js";
4
4
  export declare class OpenAI implements AIProvider {
5
5
  private modelName;
6
6
  private model;
7
7
  constructor(modelName?: string | null);
8
+ /**
9
+ * Get the underlying model for function calling
10
+ */
11
+ getModel(): LanguageModelV1;
8
12
  streamText(optionsOrPrompt: StreamTextOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamTextResult<ToolSet, unknown> | null>;
9
13
  generateText(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<GenerateTextResult<ToolSet, unknown> | null>;
10
14
  }