@juspay/neurolink 3.0.1 → 4.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (232) hide show
  1. package/CHANGELOG.md +66 -6
  2. package/README.md +318 -27
  3. package/dist/agent/direct-tools.d.ts +6 -6
  4. package/dist/chat/client-utils.d.ts +92 -0
  5. package/dist/chat/client-utils.js +298 -0
  6. package/dist/chat/index.d.ts +27 -0
  7. package/dist/chat/index.js +41 -0
  8. package/dist/chat/session-storage.d.ts +77 -0
  9. package/dist/chat/session-storage.js +233 -0
  10. package/dist/chat/session.d.ts +95 -0
  11. package/dist/chat/session.js +257 -0
  12. package/dist/chat/sse-handler.d.ts +49 -0
  13. package/dist/chat/sse-handler.js +266 -0
  14. package/dist/chat/types.d.ts +73 -0
  15. package/dist/chat/types.js +5 -0
  16. package/dist/chat/websocket-chat-handler.d.ts +36 -0
  17. package/dist/chat/websocket-chat-handler.js +262 -0
  18. package/dist/cli/commands/config.js +12 -12
  19. package/dist/cli/commands/mcp.js +3 -4
  20. package/dist/cli/index.d.ts +0 -7
  21. package/dist/cli/index.js +247 -28
  22. package/dist/config/configManager.d.ts +60 -0
  23. package/dist/config/configManager.js +300 -0
  24. package/dist/config/types.d.ts +136 -0
  25. package/dist/config/types.js +43 -0
  26. package/dist/core/analytics.d.ts +23 -0
  27. package/dist/core/analytics.js +131 -0
  28. package/dist/core/constants.d.ts +41 -0
  29. package/dist/core/constants.js +50 -0
  30. package/dist/core/defaults.d.ts +18 -0
  31. package/dist/core/defaults.js +29 -0
  32. package/dist/core/evaluation-config.d.ts +29 -0
  33. package/dist/core/evaluation-config.js +144 -0
  34. package/dist/core/evaluation-providers.d.ts +30 -0
  35. package/dist/core/evaluation-providers.js +187 -0
  36. package/dist/core/evaluation.d.ts +117 -0
  37. package/dist/core/evaluation.js +528 -0
  38. package/dist/core/factory.js +33 -25
  39. package/dist/core/types.d.ts +165 -6
  40. package/dist/core/types.js +3 -4
  41. package/dist/index.d.ts +9 -4
  42. package/dist/index.js +25 -4
  43. package/dist/lib/agent/direct-tools.d.ts +6 -6
  44. package/dist/lib/chat/client-utils.d.ts +92 -0
  45. package/dist/lib/chat/client-utils.js +298 -0
  46. package/dist/lib/chat/index.d.ts +27 -0
  47. package/dist/lib/chat/index.js +41 -0
  48. package/dist/lib/chat/session-storage.d.ts +77 -0
  49. package/dist/lib/chat/session-storage.js +233 -0
  50. package/dist/lib/chat/session.d.ts +95 -0
  51. package/dist/lib/chat/session.js +257 -0
  52. package/dist/lib/chat/sse-handler.d.ts +49 -0
  53. package/dist/lib/chat/sse-handler.js +266 -0
  54. package/dist/lib/chat/types.d.ts +73 -0
  55. package/dist/lib/chat/types.js +5 -0
  56. package/dist/lib/chat/websocket-chat-handler.d.ts +36 -0
  57. package/dist/lib/chat/websocket-chat-handler.js +262 -0
  58. package/dist/lib/config/configManager.d.ts +60 -0
  59. package/dist/lib/config/configManager.js +300 -0
  60. package/dist/lib/config/types.d.ts +136 -0
  61. package/dist/lib/config/types.js +43 -0
  62. package/dist/lib/core/analytics.d.ts +23 -0
  63. package/dist/lib/core/analytics.js +131 -0
  64. package/dist/lib/core/constants.d.ts +41 -0
  65. package/dist/lib/core/constants.js +50 -0
  66. package/dist/lib/core/defaults.d.ts +18 -0
  67. package/dist/lib/core/defaults.js +29 -0
  68. package/dist/lib/core/evaluation-config.d.ts +29 -0
  69. package/dist/lib/core/evaluation-config.js +144 -0
  70. package/dist/lib/core/evaluation-providers.d.ts +30 -0
  71. package/dist/lib/core/evaluation-providers.js +187 -0
  72. package/dist/lib/core/evaluation.d.ts +117 -0
  73. package/dist/lib/core/evaluation.js +528 -0
  74. package/dist/lib/core/factory.js +33 -26
  75. package/dist/lib/core/types.d.ts +165 -6
  76. package/dist/lib/core/types.js +3 -4
  77. package/dist/lib/index.d.ts +9 -4
  78. package/dist/lib/index.js +25 -4
  79. package/dist/lib/mcp/contracts/mcpContract.d.ts +118 -0
  80. package/dist/lib/mcp/contracts/mcpContract.js +5 -0
  81. package/dist/lib/mcp/dynamic-chain-executor.d.ts +201 -0
  82. package/dist/lib/mcp/dynamic-chain-executor.js +489 -0
  83. package/dist/lib/mcp/dynamic-orchestrator.d.ts +109 -0
  84. package/dist/lib/mcp/dynamic-orchestrator.js +351 -0
  85. package/dist/lib/mcp/error-manager.d.ts +254 -0
  86. package/dist/lib/mcp/error-manager.js +501 -0
  87. package/dist/lib/mcp/error-recovery.d.ts +158 -0
  88. package/dist/lib/mcp/error-recovery.js +405 -0
  89. package/dist/lib/mcp/function-calling.js +11 -3
  90. package/dist/lib/mcp/health-monitor.d.ts +256 -0
  91. package/dist/lib/mcp/health-monitor.js +621 -0
  92. package/dist/lib/mcp/logging.js +5 -0
  93. package/dist/lib/mcp/neurolink-mcp-client.js +2 -1
  94. package/dist/lib/mcp/orchestrator.d.ts +136 -5
  95. package/dist/lib/mcp/orchestrator.js +332 -16
  96. package/dist/lib/mcp/registry.d.ts +71 -16
  97. package/dist/lib/mcp/registry.js +104 -6
  98. package/dist/lib/mcp/semaphore-manager.d.ts +137 -0
  99. package/dist/lib/mcp/semaphore-manager.js +329 -0
  100. package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
  101. package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.js +5 -4
  102. package/dist/lib/mcp/session-manager.d.ts +186 -0
  103. package/dist/lib/mcp/session-manager.js +400 -0
  104. package/dist/lib/mcp/session-persistence.d.ts +93 -0
  105. package/dist/lib/mcp/session-persistence.js +298 -0
  106. package/dist/lib/mcp/tool-integration.js +1 -1
  107. package/dist/lib/mcp/tool-registry.d.ts +55 -34
  108. package/dist/lib/mcp/tool-registry.js +111 -97
  109. package/dist/lib/mcp/transport-manager.d.ts +153 -0
  110. package/dist/lib/mcp/transport-manager.js +330 -0
  111. package/dist/lib/mcp/unified-mcp.js +6 -1
  112. package/dist/lib/mcp/unified-registry.d.ts +54 -5
  113. package/dist/lib/mcp/unified-registry.js +139 -6
  114. package/dist/lib/neurolink.d.ts +101 -0
  115. package/dist/lib/neurolink.js +147 -1
  116. package/dist/lib/providers/agent-enhanced-provider.d.ts +11 -2
  117. package/dist/lib/providers/agent-enhanced-provider.js +86 -15
  118. package/dist/lib/providers/amazonBedrock.d.ts +9 -1
  119. package/dist/lib/providers/amazonBedrock.js +26 -2
  120. package/dist/lib/providers/analytics-helper.d.ts +53 -0
  121. package/dist/lib/providers/analytics-helper.js +151 -0
  122. package/dist/lib/providers/anthropic.d.ts +11 -1
  123. package/dist/lib/providers/anthropic.js +29 -4
  124. package/dist/lib/providers/azureOpenAI.d.ts +3 -1
  125. package/dist/lib/providers/azureOpenAI.js +28 -4
  126. package/dist/lib/providers/function-calling-provider.d.ts +9 -1
  127. package/dist/lib/providers/function-calling-provider.js +14 -1
  128. package/dist/lib/providers/googleAIStudio.d.ts +15 -1
  129. package/dist/lib/providers/googleAIStudio.js +32 -2
  130. package/dist/lib/providers/googleVertexAI.d.ts +9 -1
  131. package/dist/lib/providers/googleVertexAI.js +31 -2
  132. package/dist/lib/providers/huggingFace.d.ts +3 -1
  133. package/dist/lib/providers/huggingFace.js +26 -3
  134. package/dist/lib/providers/mcp-provider.d.ts +9 -1
  135. package/dist/lib/providers/mcp-provider.js +12 -0
  136. package/dist/lib/providers/mistralAI.d.ts +3 -1
  137. package/dist/lib/providers/mistralAI.js +25 -2
  138. package/dist/lib/providers/ollama.d.ts +3 -1
  139. package/dist/lib/providers/ollama.js +27 -4
  140. package/dist/lib/providers/openAI.d.ts +15 -1
  141. package/dist/lib/providers/openAI.js +32 -2
  142. package/dist/lib/proxy/proxy-fetch.js +8 -7
  143. package/dist/lib/services/streaming/streaming-manager.d.ts +29 -0
  144. package/dist/lib/services/streaming/streaming-manager.js +244 -0
  145. package/dist/lib/services/types.d.ts +155 -0
  146. package/dist/lib/services/types.js +2 -0
  147. package/dist/lib/services/websocket/websocket-server.d.ts +34 -0
  148. package/dist/lib/services/websocket/websocket-server.js +304 -0
  149. package/dist/lib/telemetry/index.d.ts +15 -0
  150. package/dist/lib/telemetry/index.js +22 -0
  151. package/dist/lib/telemetry/telemetry-service.d.ts +47 -0
  152. package/dist/lib/telemetry/telemetry-service.js +259 -0
  153. package/dist/lib/utils/streaming-utils.d.ts +67 -0
  154. package/dist/lib/utils/streaming-utils.js +201 -0
  155. package/dist/mcp/contracts/mcpContract.d.ts +118 -0
  156. package/dist/mcp/contracts/mcpContract.js +5 -0
  157. package/dist/mcp/dynamic-chain-executor.d.ts +201 -0
  158. package/dist/mcp/dynamic-chain-executor.js +489 -0
  159. package/dist/mcp/dynamic-orchestrator.d.ts +109 -0
  160. package/dist/mcp/dynamic-orchestrator.js +351 -0
  161. package/dist/mcp/error-manager.d.ts +254 -0
  162. package/dist/mcp/error-manager.js +501 -0
  163. package/dist/mcp/error-recovery.d.ts +158 -0
  164. package/dist/mcp/error-recovery.js +405 -0
  165. package/dist/mcp/function-calling.js +11 -3
  166. package/dist/mcp/health-monitor.d.ts +256 -0
  167. package/dist/mcp/health-monitor.js +621 -0
  168. package/dist/mcp/logging.js +5 -0
  169. package/dist/mcp/neurolink-mcp-client.js +2 -1
  170. package/dist/mcp/orchestrator.d.ts +136 -5
  171. package/dist/mcp/orchestrator.js +332 -16
  172. package/dist/mcp/plugins/core/neurolink-mcp.json +15 -15
  173. package/dist/mcp/registry.d.ts +71 -16
  174. package/dist/mcp/registry.js +104 -6
  175. package/dist/mcp/semaphore-manager.d.ts +137 -0
  176. package/dist/mcp/semaphore-manager.js +329 -0
  177. package/dist/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
  178. package/dist/mcp/servers/ai-providers/ai-workflow-tools.js +5 -4
  179. package/dist/mcp/session-manager.d.ts +186 -0
  180. package/dist/mcp/session-manager.js +400 -0
  181. package/dist/mcp/session-persistence.d.ts +93 -0
  182. package/dist/mcp/session-persistence.js +299 -0
  183. package/dist/mcp/tool-integration.js +1 -1
  184. package/dist/mcp/tool-registry.d.ts +55 -34
  185. package/dist/mcp/tool-registry.js +111 -97
  186. package/dist/mcp/transport-manager.d.ts +153 -0
  187. package/dist/mcp/transport-manager.js +331 -0
  188. package/dist/mcp/unified-mcp.js +6 -1
  189. package/dist/mcp/unified-registry.d.ts +54 -5
  190. package/dist/mcp/unified-registry.js +139 -6
  191. package/dist/neurolink.d.ts +101 -0
  192. package/dist/neurolink.js +147 -1
  193. package/dist/providers/agent-enhanced-provider.d.ts +11 -2
  194. package/dist/providers/agent-enhanced-provider.js +86 -15
  195. package/dist/providers/amazonBedrock.d.ts +9 -1
  196. package/dist/providers/amazonBedrock.js +26 -2
  197. package/dist/providers/analytics-helper.d.ts +53 -0
  198. package/dist/providers/analytics-helper.js +151 -0
  199. package/dist/providers/anthropic.d.ts +11 -1
  200. package/dist/providers/anthropic.js +29 -4
  201. package/dist/providers/azureOpenAI.d.ts +3 -1
  202. package/dist/providers/azureOpenAI.js +29 -4
  203. package/dist/providers/function-calling-provider.d.ts +9 -1
  204. package/dist/providers/function-calling-provider.js +14 -1
  205. package/dist/providers/googleAIStudio.d.ts +15 -1
  206. package/dist/providers/googleAIStudio.js +32 -2
  207. package/dist/providers/googleVertexAI.d.ts +9 -1
  208. package/dist/providers/googleVertexAI.js +31 -2
  209. package/dist/providers/huggingFace.d.ts +3 -1
  210. package/dist/providers/huggingFace.js +26 -3
  211. package/dist/providers/mcp-provider.d.ts +9 -1
  212. package/dist/providers/mcp-provider.js +12 -0
  213. package/dist/providers/mistralAI.d.ts +3 -1
  214. package/dist/providers/mistralAI.js +25 -2
  215. package/dist/providers/ollama.d.ts +3 -1
  216. package/dist/providers/ollama.js +27 -4
  217. package/dist/providers/openAI.d.ts +15 -1
  218. package/dist/providers/openAI.js +33 -2
  219. package/dist/proxy/proxy-fetch.js +8 -7
  220. package/dist/services/streaming/streaming-manager.d.ts +29 -0
  221. package/dist/services/streaming/streaming-manager.js +244 -0
  222. package/dist/services/types.d.ts +155 -0
  223. package/dist/services/types.js +2 -0
  224. package/dist/services/websocket/websocket-server.d.ts +34 -0
  225. package/dist/services/websocket/websocket-server.js +304 -0
  226. package/dist/telemetry/index.d.ts +15 -0
  227. package/dist/telemetry/index.js +22 -0
  228. package/dist/telemetry/telemetry-service.d.ts +47 -0
  229. package/dist/telemetry/telemetry-service.js +261 -0
  230. package/dist/utils/streaming-utils.d.ts +67 -0
  231. package/dist/utils/streaming-utils.js +201 -0
  232. package/package.json +245 -228
@@ -7,6 +7,8 @@
7
7
  import { AIProviderName } from "../core/types.js";
8
8
  import { logger } from "../utils/logger.js";
9
9
  import { createTimeoutController, TimeoutError, getDefaultTimeout, } from "../utils/timeout.js";
10
+ import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
11
+ import { evaluateResponse } from "../core/evaluation.js";
10
12
  export class AzureOpenAIProvider {
11
13
  name = AIProviderName.AZURE;
12
14
  apiKey;
@@ -69,12 +71,13 @@ export class AzureOpenAIProvider {
69
71
  async generateText(optionsOrPrompt, schema) {
70
72
  const functionTag = "AzureOpenAIProvider.generateText";
71
73
  const provider = "azure";
74
+ const startTime = Date.now();
72
75
  logger.debug(`[${functionTag}] Starting text generation`);
73
76
  // Parse parameters with backward compatibility
74
77
  const options = typeof optionsOrPrompt === "string"
75
78
  ? { prompt: optionsOrPrompt }
76
79
  : optionsOrPrompt;
77
- const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = "You are a helpful AI assistant.", timeout = getDefaultTimeout(provider, "generate"), } = options;
80
+ const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = "You are a helpful AI assistant.", timeout = getDefaultTimeout(provider, "generate"), } = options;
78
81
  logger.debug(`[${functionTag}] Prompt: "${prompt.substring(0, 100)}...", Temperature: ${temperature}, Max tokens: ${maxTokens}, Timeout: ${timeout}`);
79
82
  const messages = [];
80
83
  if (systemPrompt) {
@@ -101,7 +104,7 @@ export class AzureOpenAIProvider {
101
104
  timeoutController?.cleanup();
102
105
  logger.debug(`[${functionTag}] Success. Generated ${data.usage.completion_tokens} tokens`);
103
106
  const content = data.choices[0]?.message?.content || "";
104
- return {
107
+ const result = {
105
108
  content,
106
109
  provider: this.name,
107
110
  model: data.model,
@@ -112,6 +115,21 @@ export class AzureOpenAIProvider {
112
115
  },
113
116
  finishReason: data.choices[0]?.finish_reason || "stop",
114
117
  };
118
+ // Add analytics if enabled
119
+ if (options.enableAnalytics) {
120
+ result.analytics = {
121
+ provider: this.name,
122
+ model: data.model,
123
+ tokens: result.usage,
124
+ responseTime: Date.now() - startTime,
125
+ context: options.context,
126
+ };
127
+ }
128
+ // Add evaluation if enabled
129
+ if (options.enableEvaluation) {
130
+ result.evaluation = await evaluateResponse(options.prompt, content, options.context);
131
+ }
132
+ return result;
115
133
  }
116
134
  catch (error) {
117
135
  // Always cleanup timeout
@@ -148,7 +166,7 @@ export class AzureOpenAIProvider {
148
166
  const options = typeof optionsOrPrompt === "string"
149
167
  ? { prompt: optionsOrPrompt }
150
168
  : optionsOrPrompt;
151
- const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = "You are a helpful AI assistant.", timeout = getDefaultTimeout(provider, "stream"), } = options;
169
+ const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = "You are a helpful AI assistant.", timeout = getDefaultTimeout(provider, "stream"), } = options;
152
170
  logger.debug(`[${functionTag}] Streaming prompt: "${prompt.substring(0, 100)}...", Timeout: ${timeout}`);
153
171
  const messages = [];
154
172
  if (systemPrompt) {
@@ -262,7 +280,7 @@ export class AzureOpenAIProvider {
262
280
  const options = typeof optionsOrPrompt === "string"
263
281
  ? { prompt: optionsOrPrompt }
264
282
  : optionsOrPrompt;
265
- const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = "You are a helpful AI assistant.", } = options;
283
+ const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = "You are a helpful AI assistant.", } = options;
266
284
  logger.debug(`[AzureOpenAIProvider.generateTextStream] Streaming prompt: "${prompt.substring(0, 100)}..."`);
267
285
  const messages = [];
268
286
  if (systemPrompt) {
@@ -409,4 +427,10 @@ export class AzureOpenAIProvider {
409
427
  "content-filtering",
410
428
  ];
411
429
  }
430
+ async generate(optionsOrPrompt, analysisSchema) {
431
+ return this.generateText(optionsOrPrompt, analysisSchema);
432
+ }
433
+ async gen(optionsOrPrompt, analysisSchema) {
434
+ return this.generateText(optionsOrPrompt, analysisSchema);
435
+ }
412
436
  }
@@ -3,7 +3,7 @@
3
3
  * Integrates MCP tools directly with AI SDK's function calling capabilities
4
4
  * This is the missing piece that enables true AI function calling!
5
5
  */
6
- import type { AIProvider, TextGenerationOptions, StreamTextOptions } from "../core/types.js";
6
+ import type { AIProvider, TextGenerationOptions, StreamTextOptions, EnhancedGenerateTextResult } from "../core/types.js";
7
7
  import { type GenerateTextResult, type StreamTextResult, type ToolSet, type Schema } from "ai";
8
8
  import type { ZodType, ZodTypeDef } from "zod";
9
9
  /**
@@ -48,6 +48,14 @@ export declare class FunctionCallingProvider implements AIProvider {
48
48
  * Stream text with function calling support
49
49
  */
50
50
  streamText(optionsOrPrompt: StreamTextOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamTextResult<ToolSet, unknown> | null>;
51
+ /**
52
+ * Alias for generateText() - CLI-SDK consistency
53
+ */
54
+ generate(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: any): Promise<EnhancedGenerateTextResult | null>;
55
+ /**
56
+ * Short alias for generateText() - CLI-SDK consistency
57
+ */
58
+ gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: any): Promise<EnhancedGenerateTextResult | null>;
51
59
  }
52
60
  /**
53
61
  * Create a function-calling enhanced version of any AI provider
@@ -7,6 +7,7 @@ import { generateText as aiGenerateText, Output, } from "ai";
7
7
  import { getAvailableFunctionTools, executeFunctionCall, isFunctionCallingAvailable, } from "../mcp/function-calling.js";
8
8
  import { createExecutionContext } from "../mcp/context-manager.js";
9
9
  import { mcpLogger } from "../mcp/logging.js";
10
+ import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
10
11
  /**
11
12
  * Enhanced provider that enables real function calling with MCP tools
12
13
  */
@@ -106,7 +107,7 @@ export class FunctionCallingProvider {
106
107
  prompt: options.prompt,
107
108
  system: options.systemPrompt || "You are a helpful AI assistant.",
108
109
  temperature: options.temperature || 0.7,
109
- maxTokens: options.maxTokens || 500,
110
+ maxTokens: options.maxTokens ?? DEFAULT_MAX_TOKENS,
110
111
  tools: toolsWithExecution,
111
112
  toolChoice: "auto", // Let the AI decide when to use tools
112
113
  maxSteps: 5, // CRITICAL: Enable multi-turn tool execution
@@ -337,6 +338,18 @@ These functions provide accurate, real-time data. Use them actively to enhance y
337
338
  return this.baseProvider.streamText(options, analysisSchema);
338
339
  }
339
340
  }
341
+ /**
342
+ * Alias for generateText() - CLI-SDK consistency
343
+ */
344
+ async generate(optionsOrPrompt, analysisSchema) {
345
+ return this.generateText(optionsOrPrompt, analysisSchema);
346
+ }
347
+ /**
348
+ * Short alias for generateText() - CLI-SDK consistency
349
+ */
350
+ async gen(optionsOrPrompt, analysisSchema) {
351
+ return this.generateText(optionsOrPrompt, analysisSchema);
352
+ }
340
353
  }
341
354
  /**
342
355
  * Create a function-calling enhanced version of any AI provider
@@ -1,6 +1,6 @@
1
1
  import type { ZodType, ZodTypeDef } from "zod";
2
2
  import { type StreamTextResult, type ToolSet, type Schema, type GenerateTextResult, type LanguageModelV1 } from "ai";
3
- import type { AIProvider, TextGenerationOptions, StreamTextOptions } from "../core/types.js";
3
+ import type { AIProvider, TextGenerationOptions, StreamTextOptions, EnhancedGenerateTextResult } from "../core/types.js";
4
4
  export declare class GoogleAIStudio implements AIProvider {
5
5
  private modelName;
6
6
  /**
@@ -32,4 +32,18 @@ export declare class GoogleAIStudio implements AIProvider {
32
32
  * @returns Promise resolving to GenerateTextResult or null if operation fails
33
33
  */
34
34
  generateText(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<GenerateTextResult<ToolSet, unknown> | null>;
35
+ /**
36
+ * Alias for generateText() - CLI-SDK consistency
37
+ * @param optionsOrPrompt - TextGenerationOptions object or prompt string
38
+ * @param analysisSchema - Optional schema for output validation
39
+ * @returns Promise resolving to GenerateTextResult or null
40
+ */
41
+ generate(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<EnhancedGenerateTextResult | null>;
42
+ /**
43
+ * Short alias for generateText() - CLI-SDK consistency
44
+ * @param optionsOrPrompt - TextGenerationOptions object or prompt string
45
+ * @param analysisSchema - Optional schema for output validation
46
+ * @returns Promise resolving to GenerateTextResult or null
47
+ */
48
+ gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<EnhancedGenerateTextResult | null>;
35
49
  }
@@ -2,7 +2,9 @@ import { createGoogleGenerativeAI } from "@ai-sdk/google";
2
2
  import { streamText, generateText, Output, } from "ai";
3
3
  import { logger } from "../utils/logger.js";
4
4
  import { createTimeoutController, TimeoutError, getDefaultTimeout, } from "../utils/timeout.js";
5
+ import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
5
6
  import { createProxyFetch } from "../proxy/proxy-fetch.js";
7
+ import { evaluateResponse } from "../core/evaluation.js";
6
8
  // CRITICAL: Setup environment variables early for AI SDK compatibility
7
9
  // The AI SDK specifically looks for GOOGLE_GENERATIVE_AI_API_KEY
8
10
  // We need to ensure this is set before any AI SDK operations
@@ -113,7 +115,7 @@ export class GoogleAIStudio {
113
115
  const options = typeof optionsOrPrompt === "string"
114
116
  ? { prompt: optionsOrPrompt }
115
117
  : optionsOrPrompt;
116
- const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, tools, timeout = getDefaultTimeout(provider, "stream"), } = options;
118
+ const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, tools, timeout = getDefaultTimeout(provider, "stream"), } = options;
117
119
  // Use schema from options or fallback parameter
118
120
  const finalSchema = schema || analysisSchema;
119
121
  logger.debug(`[${functionTag}] Stream request started`, {
@@ -219,12 +221,13 @@ export class GoogleAIStudio {
219
221
  async generateText(optionsOrPrompt, analysisSchema) {
220
222
  const functionTag = "GoogleAIStudio.generateText";
221
223
  const provider = "google-ai";
224
+ const startTime = Date.now();
222
225
  try {
223
226
  // Parse parameters - support both string and options object
224
227
  const options = typeof optionsOrPrompt === "string"
225
228
  ? { prompt: optionsOrPrompt }
226
229
  : optionsOrPrompt;
227
- const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, tools, timeout = getDefaultTimeout(provider, "generate"), } = options;
230
+ const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, tools, timeout = getDefaultTimeout(provider, "generate"), } = options;
228
231
  // Use schema from options or fallback parameter
229
232
  const finalSchema = schema || analysisSchema;
230
233
  logger.debug(`[${functionTag}] Generate request started`, {
@@ -272,6 +275,15 @@ export class GoogleAIStudio {
272
275
  responseLength: result.text?.length || 0,
273
276
  timeout,
274
277
  });
278
+ // Add analytics if enabled
279
+ if (options.enableAnalytics) {
280
+ const { createAnalytics } = await import("./analytics-helper.js");
281
+ result.analytics = createAnalytics(provider, this.modelName, result, Date.now() - startTime, options.context);
282
+ }
283
+ // Add evaluation if enabled
284
+ if (options.enableEvaluation) {
285
+ result.evaluation = await evaluateResponse(prompt, result.text, options.context, options.evaluationDomain, options.toolUsageContext, options.conversationHistory);
286
+ }
275
287
  return result;
276
288
  }
277
289
  finally {
@@ -300,4 +312,22 @@ export class GoogleAIStudio {
300
312
  throw err; // Re-throw error to trigger fallback
301
313
  }
302
314
  }
315
+ /**
316
+ * Alias for generateText() - CLI-SDK consistency
317
+ * @param optionsOrPrompt - TextGenerationOptions object or prompt string
318
+ * @param analysisSchema - Optional schema for output validation
319
+ * @returns Promise resolving to GenerateTextResult or null
320
+ */
321
+ async generate(optionsOrPrompt, analysisSchema) {
322
+ return this.generateText(optionsOrPrompt, analysisSchema);
323
+ }
324
+ /**
325
+ * Short alias for generateText() - CLI-SDK consistency
326
+ * @param optionsOrPrompt - TextGenerationOptions object or prompt string
327
+ * @param analysisSchema - Optional schema for output validation
328
+ * @returns Promise resolving to GenerateTextResult or null
329
+ */
330
+ async gen(optionsOrPrompt, analysisSchema) {
331
+ return this.generateText(optionsOrPrompt, analysisSchema);
332
+ }
303
333
  }
@@ -1,6 +1,6 @@
1
1
  import type { ZodType, ZodTypeDef } from "zod";
2
2
  import { type StreamTextResult, type ToolSet, type Schema, type GenerateTextResult } from "ai";
3
- import type { AIProvider, TextGenerationOptions, StreamTextOptions } from "../core/types.js";
3
+ import type { AIProvider, TextGenerationOptions, StreamTextOptions, EnhancedGenerateTextResult } from "../core/types.js";
4
4
  export declare class GoogleVertexAI implements AIProvider {
5
5
  private modelName;
6
6
  /**
@@ -27,4 +27,12 @@ export declare class GoogleVertexAI implements AIProvider {
27
27
  * @returns Promise resolving to GenerateTextResult or null if operation fails
28
28
  */
29
29
  generateText(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<GenerateTextResult<ToolSet, unknown> | null>;
30
+ /**
31
+ * Alias for generateText() - CLI-SDK consistency
32
+ */
33
+ generate(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<EnhancedGenerateTextResult | null>;
34
+ /**
35
+ * Short alias for generateText() - CLI-SDK consistency
36
+ */
37
+ gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<EnhancedGenerateTextResult | null>;
30
38
  }
@@ -24,7 +24,9 @@ async function getCreateVertexAnthropic() {
24
24
  import { streamText, generateText, Output, } from "ai";
25
25
  import { logger } from "../utils/logger.js";
26
26
  import { createTimeoutController, TimeoutError, getDefaultTimeout, } from "../utils/timeout.js";
27
+ import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
27
28
  import { createProxyFetch } from "../proxy/proxy-fetch.js";
29
+ import { evaluateResponse } from "../core/evaluation.js";
28
30
  // Default system context
29
31
  const DEFAULT_SYSTEM_CONTEXT = {
30
32
  systemPrompt: "You are a helpful AI assistant.",
@@ -289,7 +291,7 @@ export class GoogleVertexAI {
289
291
  const options = typeof optionsOrPrompt === "string"
290
292
  ? { prompt: optionsOrPrompt }
291
293
  : optionsOrPrompt;
292
- const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, "stream"), } = options;
294
+ const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, "stream"), } = options;
293
295
  // Use schema from options or fallback parameter
294
296
  const finalSchema = schema || analysisSchema;
295
297
  logger.debug(`[${functionTag}] Stream request started`, {
@@ -392,12 +394,13 @@ export class GoogleVertexAI {
392
394
  async generateText(optionsOrPrompt, analysisSchema) {
393
395
  const functionTag = "GoogleVertexAI.generateText";
394
396
  const provider = "vertex";
397
+ const startTime = Date.now();
395
398
  try {
396
399
  // Parse parameters - support both string and options object
397
400
  const options = typeof optionsOrPrompt === "string"
398
401
  ? { prompt: optionsOrPrompt }
399
402
  : optionsOrPrompt;
400
- const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, "generate"), } = options;
403
+ const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, "generate"), } = options;
401
404
  // Use schema from options or fallback parameter
402
405
  const finalSchema = schema || analysisSchema;
403
406
  logger.debug(`[${functionTag}] Generate request started`, {
@@ -440,6 +443,20 @@ export class GoogleVertexAI {
440
443
  responseLength: result.text?.length || 0,
441
444
  timeout,
442
445
  });
446
+ // Add analytics if enabled
447
+ if (options.enableAnalytics) {
448
+ result.analytics = {
449
+ provider,
450
+ model: this.modelName,
451
+ tokens: result.usage,
452
+ responseTime: Date.now() - startTime,
453
+ context: options.context,
454
+ };
455
+ }
456
+ // Add evaluation if enabled
457
+ if (options.enableEvaluation) {
458
+ result.evaluation = await evaluateResponse(prompt, result.text, options.context);
459
+ }
443
460
  return result;
444
461
  }
445
462
  finally {
@@ -469,4 +486,16 @@ export class GoogleVertexAI {
469
486
  throw err; // Re-throw error to trigger fallback
470
487
  }
471
488
  }
489
+ /**
490
+ * Alias for generateText() - CLI-SDK consistency
491
+ */
492
+ async generate(optionsOrPrompt, analysisSchema) {
493
+ return this.generateText(optionsOrPrompt, analysisSchema);
494
+ }
495
+ /**
496
+ * Short alias for generateText() - CLI-SDK consistency
497
+ */
498
+ async gen(optionsOrPrompt, analysisSchema) {
499
+ return this.generateText(optionsOrPrompt, analysisSchema);
500
+ }
472
501
  }
@@ -1,6 +1,6 @@
1
1
  import type { ZodType, ZodTypeDef } from "zod";
2
2
  import { type StreamTextResult, type ToolSet, type Schema, type GenerateTextResult } from "ai";
3
- import type { AIProvider, TextGenerationOptions, StreamTextOptions } from "../core/types.js";
3
+ import type { AIProvider, TextGenerationOptions, StreamTextOptions, EnhancedGenerateTextResult } from "../core/types.js";
4
4
  export declare class HuggingFace implements AIProvider {
5
5
  private modelName;
6
6
  private client;
@@ -28,4 +28,6 @@ export declare class HuggingFace implements AIProvider {
28
28
  * @returns Promise resolving to GenerateTextResult or null if operation fails
29
29
  */
30
30
  generateText(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<GenerateTextResult<ToolSet, unknown> | null>;
31
+ generate(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: any): Promise<EnhancedGenerateTextResult | null>;
32
+ gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: any): Promise<EnhancedGenerateTextResult | null>;
31
33
  }
@@ -2,6 +2,8 @@ import { HfInference } from "@huggingface/inference";
2
2
  import { streamText, generateText, Output, } from "ai";
3
3
  import { logger } from "../utils/logger.js";
4
4
  import { createTimeoutController, TimeoutError, getDefaultTimeout, } from "../utils/timeout.js";
5
+ import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
6
+ import { evaluateResponse } from "../core/evaluation.js";
5
7
  // Default system context
6
8
  const DEFAULT_SYSTEM_CONTEXT = {
7
9
  systemPrompt: "You are a helpful AI assistant.",
@@ -107,7 +109,7 @@ class HuggingFaceLanguageModel {
107
109
  inputs: prompt,
108
110
  parameters: {
109
111
  temperature: options.temperature || 0.7,
110
- max_new_tokens: options.maxTokens || 500,
112
+ max_new_tokens: options.maxTokens ?? DEFAULT_MAX_TOKENS,
111
113
  return_full_text: false,
112
114
  do_sample: (options.temperature || 0.7) > 0,
113
115
  },
@@ -229,7 +231,7 @@ export class HuggingFace {
229
231
  const options = typeof optionsOrPrompt === "string"
230
232
  ? { prompt: optionsOrPrompt }
231
233
  : optionsOrPrompt;
232
- const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, "stream"), } = options;
234
+ const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, "stream"), } = options;
233
235
  // Use schema from options or fallback parameter
234
236
  const finalSchema = schema || analysisSchema;
235
237
  logger.debug(`[${functionTag}] Stream request started`, {
@@ -332,12 +334,13 @@ export class HuggingFace {
332
334
  async generateText(optionsOrPrompt, analysisSchema) {
333
335
  const functionTag = "HuggingFace.generateText";
334
336
  const provider = "huggingface";
337
+ const startTime = Date.now();
335
338
  try {
336
339
  // Parse parameters - support both string and options object
337
340
  const options = typeof optionsOrPrompt === "string"
338
341
  ? { prompt: optionsOrPrompt }
339
342
  : optionsOrPrompt;
340
- const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, "generate"), } = options;
343
+ const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, "generate"), } = options;
341
344
  // Use schema from options or fallback parameter
342
345
  const finalSchema = schema || analysisSchema;
343
346
  logger.debug(`[${functionTag}] Generate request started`, {
@@ -379,6 +382,20 @@ export class HuggingFace {
379
382
  responseLength: result.text?.length || 0,
380
383
  timeout,
381
384
  });
385
+ // Add analytics if enabled
386
+ if (options.enableAnalytics) {
387
+ result.analytics = {
388
+ provider,
389
+ model: this.modelName,
390
+ tokens: result.usage,
391
+ responseTime: Date.now() - startTime,
392
+ context: options.context,
393
+ };
394
+ }
395
+ // Add evaluation if enabled
396
+ if (options.enableEvaluation) {
397
+ result.evaluation = await evaluateResponse(prompt, result.text, options.context);
398
+ }
382
399
  return result;
383
400
  }
384
401
  finally {
@@ -407,4 +424,10 @@ export class HuggingFace {
407
424
  throw err; // Re-throw error to trigger fallback
408
425
  }
409
426
  }
427
+ async generate(optionsOrPrompt, analysisSchema) {
428
+ return this.generateText(optionsOrPrompt, analysisSchema);
429
+ }
430
+ async gen(optionsOrPrompt, analysisSchema) {
431
+ return this.generateText(optionsOrPrompt, analysisSchema);
432
+ }
410
433
  }
@@ -2,7 +2,7 @@
2
2
  * NeuroLink MCP-Aware AI Provider
3
3
  * Integrates MCP tools with AI providers following Lighthouse's pattern
4
4
  */
5
- import type { AIProvider, TextGenerationOptions, StreamTextOptions } from "../core/types.js";
5
+ import type { AIProvider, TextGenerationOptions, StreamTextOptions, EnhancedGenerateTextResult } from "../core/types.js";
6
6
  import type { StreamTextResult, ToolSet, Schema, GenerateTextResult } from "ai";
7
7
  import type { ZodType, ZodTypeDef } from "zod";
8
8
  /**
@@ -51,6 +51,14 @@ export declare class MCPAwareProvider implements AIProvider {
51
51
  * Clean up session
52
52
  */
53
53
  cleanup(): Promise<void>;
54
+ /**
55
+ * Alias for generateText() - CLI-SDK consistency
56
+ */
57
+ generate(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: any): Promise<EnhancedGenerateTextResult | null>;
58
+ /**
59
+ * Short alias for generateText() - CLI-SDK consistency
60
+ */
61
+ gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: any): Promise<EnhancedGenerateTextResult | null>;
54
62
  }
55
63
  /**
56
64
  * Create an MCP-aware provider
@@ -192,6 +192,18 @@ Please provide a response based on this information.`;
192
192
  this.mcpInitialized = false;
193
193
  }
194
194
  }
195
+ /**
196
+ * Alias for generateText() - CLI-SDK consistency
197
+ */
198
+ async generate(optionsOrPrompt, analysisSchema) {
199
+ return this.generateText(optionsOrPrompt, analysisSchema);
200
+ }
201
+ /**
202
+ * Short alias for generateText() - CLI-SDK consistency
203
+ */
204
+ async gen(optionsOrPrompt, analysisSchema) {
205
+ return this.generateText(optionsOrPrompt, analysisSchema);
206
+ }
195
207
  }
196
208
  /**
197
209
  * Create an MCP-aware provider
@@ -1,6 +1,6 @@
1
1
  import type { ZodType, ZodTypeDef } from "zod";
2
2
  import { type StreamTextResult, type ToolSet, type Schema, type GenerateTextResult } from "ai";
3
- import type { AIProvider, TextGenerationOptions, StreamTextOptions } from "../core/types.js";
3
+ import type { AIProvider, TextGenerationOptions, StreamTextOptions, EnhancedGenerateTextResult } from "../core/types.js";
4
4
  export declare class MistralAI implements AIProvider {
5
5
  private modelName;
6
6
  private client;
@@ -28,5 +28,7 @@ export declare class MistralAI implements AIProvider {
28
28
  * @returns Promise resolving to GenerateTextResult or null if operation fails
29
29
  */
30
30
  generateText(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<GenerateTextResult<ToolSet, unknown> | null>;
31
+ generate(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: any): Promise<EnhancedGenerateTextResult | null>;
32
+ gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: any): Promise<EnhancedGenerateTextResult | null>;
31
33
  }
32
34
  export default MistralAI;
@@ -2,6 +2,8 @@ import { createMistral } from "@ai-sdk/mistral";
2
2
  import { streamText, generateText, Output, } from "ai";
3
3
  import { logger } from "../utils/logger.js";
4
4
  import { createTimeoutController, TimeoutError, getDefaultTimeout, } from "../utils/timeout.js";
5
+ import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
6
+ import { evaluateResponse } from "../core/evaluation.js";
5
7
  // Default system context
6
8
  const DEFAULT_SYSTEM_CONTEXT = {
7
9
  systemPrompt: "You are a helpful AI assistant.",
@@ -89,7 +91,7 @@ export class MistralAI {
89
91
  const options = typeof optionsOrPrompt === "string"
90
92
  ? { prompt: optionsOrPrompt }
91
93
  : optionsOrPrompt;
92
- const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, "stream"), } = options;
94
+ const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, "stream"), } = options;
93
95
  // Use schema from options or fallback parameter
94
96
  const finalSchema = schema || analysisSchema;
95
97
  logger.debug(`[${functionTag}] Stream request started`, {
@@ -192,12 +194,13 @@ export class MistralAI {
192
194
  async generateText(optionsOrPrompt, analysisSchema) {
193
195
  const functionTag = "MistralAI.generateText";
194
196
  const provider = "mistral";
197
+ const startTime = Date.now();
195
198
  try {
196
199
  // Parse parameters - support both string and options object
197
200
  const options = typeof optionsOrPrompt === "string"
198
201
  ? { prompt: optionsOrPrompt }
199
202
  : optionsOrPrompt;
200
- const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, "generate"), } = options;
203
+ const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, "generate"), } = options;
201
204
  // Use schema from options or fallback parameter
202
205
  const finalSchema = schema || analysisSchema;
203
206
  logger.debug(`[${functionTag}] Generate request started`, {
@@ -239,6 +242,20 @@ export class MistralAI {
239
242
  responseLength: result.text?.length || 0,
240
243
  timeout,
241
244
  });
245
+ // Add analytics if enabled
246
+ if (options.enableAnalytics) {
247
+ result.analytics = {
248
+ provider,
249
+ model: this.modelName,
250
+ tokens: result.usage,
251
+ responseTime: Date.now() - startTime,
252
+ context: options.context,
253
+ };
254
+ }
255
+ // Add evaluation if enabled
256
+ if (options.enableEvaluation) {
257
+ result.evaluation = await evaluateResponse(prompt, result.text, options.context);
258
+ }
242
259
  return result;
243
260
  }
244
261
  finally {
@@ -267,5 +284,11 @@ export class MistralAI {
267
284
  throw err; // Re-throw error to trigger fallback
268
285
  }
269
286
  }
287
+ async generate(optionsOrPrompt, analysisSchema) {
288
+ return this.generateText(optionsOrPrompt, analysisSchema);
289
+ }
290
+ async gen(optionsOrPrompt, analysisSchema) {
291
+ return this.generateText(optionsOrPrompt, analysisSchema);
292
+ }
270
293
  }
271
294
  export default MistralAI;
@@ -10,7 +10,7 @@
10
10
  * - Health checking and service validation
11
11
  * - Streaming and non-streaming text generation
12
12
  */
13
- import type { AIProvider, TextGenerationOptions, StreamTextOptions } from "../core/types.js";
13
+ import type { AIProvider, TextGenerationOptions, StreamTextOptions, EnhancedGenerateTextResult } from "../core/types.js";
14
14
  import type { GenerateTextResult, StreamTextResult, ToolSet } from "ai";
15
15
  import type { ZodType, ZodTypeDef } from "zod";
16
16
  import type { Schema } from "ai";
@@ -48,4 +48,6 @@ export declare class Ollama implements AIProvider {
48
48
  * Generate streaming text using Ollama local models
49
49
  */
50
50
  streamText(optionsOrPrompt: StreamTextOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamTextResult<ToolSet, unknown> | null>;
51
+ generate(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: any): Promise<EnhancedGenerateTextResult | null>;
52
+ gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: any): Promise<EnhancedGenerateTextResult | null>;
51
53
  }
@@ -13,6 +13,8 @@
13
13
  import { streamText, generateText, Output } from "ai";
14
14
  import { logger } from "../utils/logger.js";
15
15
  import { getDefaultTimeout } from "../utils/timeout.js";
16
+ import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
17
+ import { evaluateResponse } from "../core/evaluation.js";
16
18
  // Default system context
17
19
  const DEFAULT_SYSTEM_CONTEXT = {
18
20
  systemPrompt: "You are a helpful AI assistant.",
@@ -106,7 +108,7 @@ class OllamaLanguageModel {
106
108
  stream: false,
107
109
  options: {
108
110
  temperature: options.temperature || 0.7,
109
- num_predict: options.maxTokens || 500,
111
+ num_predict: options.maxTokens ?? DEFAULT_MAX_TOKENS,
110
112
  },
111
113
  };
112
114
  const controller = new AbortController();
@@ -176,7 +178,7 @@ class OllamaLanguageModel {
176
178
  stream: true,
177
179
  options: {
178
180
  temperature: options.temperature || 0.7,
179
- num_predict: options.maxTokens || 500,
181
+ num_predict: options.maxTokens ?? DEFAULT_MAX_TOKENS,
180
182
  },
181
183
  };
182
184
  const controller = new AbortController();
@@ -391,12 +393,13 @@ export class Ollama {
391
393
  async generateText(optionsOrPrompt, analysisSchema) {
392
394
  const functionTag = "Ollama.generateText";
393
395
  const provider = "ollama";
396
+ const startTime = Date.now();
394
397
  try {
395
398
  // Parse parameters - support both string and options object
396
399
  const options = typeof optionsOrPrompt === "string"
397
400
  ? { prompt: optionsOrPrompt }
398
401
  : optionsOrPrompt;
399
- const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout, } = options;
402
+ const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout, } = options;
400
403
  // Use schema from options or fallback parameter
401
404
  const finalSchema = schema || analysisSchema;
402
405
  // Convert timeout to milliseconds if provided as string
@@ -437,6 +440,20 @@ export class Ollama {
437
440
  finishReason: result.finishReason,
438
441
  responseLength: result.text?.length || 0,
439
442
  });
443
+ // Add analytics if enabled
444
+ if (options.enableAnalytics) {
445
+ result.analytics = {
446
+ provider,
447
+ model: this.modelName,
448
+ tokens: result.usage,
449
+ responseTime: Date.now() - startTime,
450
+ context: options.context,
451
+ };
452
+ }
453
+ // Add evaluation if enabled
454
+ if (options.enableEvaluation) {
455
+ result.evaluation = await evaluateResponse(prompt, result.text, options.context);
456
+ }
440
457
  return result;
441
458
  }
442
459
  catch (err) {
@@ -461,7 +478,7 @@ export class Ollama {
461
478
  const options = typeof optionsOrPrompt === "string"
462
479
  ? { prompt: optionsOrPrompt }
463
480
  : optionsOrPrompt;
464
- const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout, } = options;
481
+ const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout, } = options;
465
482
  // Use schema from options or fallback parameter
466
483
  const finalSchema = schema || analysisSchema;
467
484
  // Convert timeout to milliseconds if provided as string
@@ -542,4 +559,10 @@ export class Ollama {
542
559
  throw err; // Re-throw error to trigger fallback
543
560
  }
544
561
  }
562
+ async generate(optionsOrPrompt, analysisSchema) {
563
+ return this.generateText(optionsOrPrompt, analysisSchema);
564
+ }
565
+ async gen(optionsOrPrompt, analysisSchema) {
566
+ return this.generateText(optionsOrPrompt, analysisSchema);
567
+ }
545
568
  }