@juspay/neurolink 3.0.1 → 4.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (192) hide show
  1. package/CHANGELOG.md +57 -6
  2. package/README.md +235 -2
  3. package/dist/agent/direct-tools.d.ts +6 -6
  4. package/dist/chat/client-utils.d.ts +92 -0
  5. package/dist/chat/client-utils.js +298 -0
  6. package/dist/chat/index.d.ts +27 -0
  7. package/dist/chat/index.js +41 -0
  8. package/dist/chat/session-storage.d.ts +77 -0
  9. package/dist/chat/session-storage.js +233 -0
  10. package/dist/chat/session.d.ts +95 -0
  11. package/dist/chat/session.js +257 -0
  12. package/dist/chat/sse-handler.d.ts +49 -0
  13. package/dist/chat/sse-handler.js +266 -0
  14. package/dist/chat/types.d.ts +73 -0
  15. package/dist/chat/types.js +5 -0
  16. package/dist/chat/websocket-chat-handler.d.ts +36 -0
  17. package/dist/chat/websocket-chat-handler.js +262 -0
  18. package/dist/cli/commands/config.js +12 -12
  19. package/dist/cli/commands/mcp.js +3 -4
  20. package/dist/cli/index.d.ts +0 -7
  21. package/dist/cli/index.js +247 -28
  22. package/dist/config/configManager.d.ts +60 -0
  23. package/dist/config/configManager.js +300 -0
  24. package/dist/config/types.d.ts +136 -0
  25. package/dist/config/types.js +43 -0
  26. package/dist/core/analytics.d.ts +23 -0
  27. package/dist/core/analytics.js +131 -0
  28. package/dist/core/constants.d.ts +41 -0
  29. package/dist/core/constants.js +50 -0
  30. package/dist/core/defaults.d.ts +18 -0
  31. package/dist/core/defaults.js +29 -0
  32. package/dist/core/evaluation-config.d.ts +29 -0
  33. package/dist/core/evaluation-config.js +144 -0
  34. package/dist/core/evaluation-providers.d.ts +30 -0
  35. package/dist/core/evaluation-providers.js +187 -0
  36. package/dist/core/evaluation.d.ts +117 -0
  37. package/dist/core/evaluation.js +528 -0
  38. package/dist/core/factory.js +33 -25
  39. package/dist/core/types.d.ts +165 -6
  40. package/dist/core/types.js +3 -4
  41. package/dist/index.d.ts +9 -4
  42. package/dist/index.js +25 -4
  43. package/dist/lib/agent/direct-tools.d.ts +6 -6
  44. package/dist/lib/chat/client-utils.d.ts +92 -0
  45. package/dist/lib/chat/client-utils.js +298 -0
  46. package/dist/lib/chat/index.d.ts +27 -0
  47. package/dist/lib/chat/index.js +41 -0
  48. package/dist/lib/chat/session-storage.d.ts +77 -0
  49. package/dist/lib/chat/session-storage.js +233 -0
  50. package/dist/lib/chat/session.d.ts +95 -0
  51. package/dist/lib/chat/session.js +257 -0
  52. package/dist/lib/chat/sse-handler.d.ts +49 -0
  53. package/dist/lib/chat/sse-handler.js +266 -0
  54. package/dist/lib/chat/types.d.ts +73 -0
  55. package/dist/lib/chat/types.js +5 -0
  56. package/dist/lib/chat/websocket-chat-handler.d.ts +36 -0
  57. package/dist/lib/chat/websocket-chat-handler.js +262 -0
  58. package/dist/lib/config/configManager.d.ts +60 -0
  59. package/dist/lib/config/configManager.js +300 -0
  60. package/dist/lib/config/types.d.ts +136 -0
  61. package/dist/lib/config/types.js +43 -0
  62. package/dist/lib/core/analytics.d.ts +23 -0
  63. package/dist/lib/core/analytics.js +131 -0
  64. package/dist/lib/core/constants.d.ts +41 -0
  65. package/dist/lib/core/constants.js +50 -0
  66. package/dist/lib/core/defaults.d.ts +18 -0
  67. package/dist/lib/core/defaults.js +29 -0
  68. package/dist/lib/core/evaluation-config.d.ts +29 -0
  69. package/dist/lib/core/evaluation-config.js +144 -0
  70. package/dist/lib/core/evaluation-providers.d.ts +30 -0
  71. package/dist/lib/core/evaluation-providers.js +187 -0
  72. package/dist/lib/core/evaluation.d.ts +117 -0
  73. package/dist/lib/core/evaluation.js +528 -0
  74. package/dist/lib/core/factory.js +33 -26
  75. package/dist/lib/core/types.d.ts +165 -6
  76. package/dist/lib/core/types.js +3 -4
  77. package/dist/lib/index.d.ts +9 -4
  78. package/dist/lib/index.js +25 -4
  79. package/dist/lib/mcp/contracts/mcpContract.d.ts +118 -0
  80. package/dist/lib/mcp/contracts/mcpContract.js +5 -0
  81. package/dist/lib/mcp/function-calling.js +11 -3
  82. package/dist/lib/mcp/logging.js +5 -0
  83. package/dist/lib/mcp/neurolink-mcp-client.js +2 -1
  84. package/dist/lib/mcp/orchestrator.js +18 -9
  85. package/dist/lib/mcp/registry.d.ts +49 -16
  86. package/dist/lib/mcp/registry.js +80 -6
  87. package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.js +5 -4
  88. package/dist/lib/mcp/tool-integration.js +1 -1
  89. package/dist/lib/mcp/tool-registry.d.ts +55 -34
  90. package/dist/lib/mcp/tool-registry.js +111 -97
  91. package/dist/lib/mcp/unified-mcp.js +6 -1
  92. package/dist/lib/mcp/unified-registry.d.ts +12 -4
  93. package/dist/lib/mcp/unified-registry.js +17 -4
  94. package/dist/lib/neurolink.d.ts +26 -0
  95. package/dist/lib/neurolink.js +43 -1
  96. package/dist/lib/providers/agent-enhanced-provider.d.ts +11 -2
  97. package/dist/lib/providers/agent-enhanced-provider.js +86 -15
  98. package/dist/lib/providers/amazonBedrock.d.ts +9 -1
  99. package/dist/lib/providers/amazonBedrock.js +26 -2
  100. package/dist/lib/providers/analytics-helper.d.ts +53 -0
  101. package/dist/lib/providers/analytics-helper.js +151 -0
  102. package/dist/lib/providers/anthropic.d.ts +11 -1
  103. package/dist/lib/providers/anthropic.js +29 -4
  104. package/dist/lib/providers/azureOpenAI.d.ts +3 -1
  105. package/dist/lib/providers/azureOpenAI.js +28 -4
  106. package/dist/lib/providers/function-calling-provider.d.ts +9 -1
  107. package/dist/lib/providers/function-calling-provider.js +14 -1
  108. package/dist/lib/providers/googleAIStudio.d.ts +15 -1
  109. package/dist/lib/providers/googleAIStudio.js +32 -2
  110. package/dist/lib/providers/googleVertexAI.d.ts +9 -1
  111. package/dist/lib/providers/googleVertexAI.js +31 -2
  112. package/dist/lib/providers/huggingFace.d.ts +3 -1
  113. package/dist/lib/providers/huggingFace.js +26 -3
  114. package/dist/lib/providers/mcp-provider.d.ts +9 -1
  115. package/dist/lib/providers/mcp-provider.js +12 -0
  116. package/dist/lib/providers/mistralAI.d.ts +3 -1
  117. package/dist/lib/providers/mistralAI.js +25 -2
  118. package/dist/lib/providers/ollama.d.ts +3 -1
  119. package/dist/lib/providers/ollama.js +27 -4
  120. package/dist/lib/providers/openAI.d.ts +15 -1
  121. package/dist/lib/providers/openAI.js +32 -2
  122. package/dist/lib/proxy/proxy-fetch.js +8 -7
  123. package/dist/lib/services/streaming/streaming-manager.d.ts +29 -0
  124. package/dist/lib/services/streaming/streaming-manager.js +244 -0
  125. package/dist/lib/services/types.d.ts +155 -0
  126. package/dist/lib/services/types.js +2 -0
  127. package/dist/lib/services/websocket/websocket-server.d.ts +34 -0
  128. package/dist/lib/services/websocket/websocket-server.js +304 -0
  129. package/dist/lib/telemetry/index.d.ts +15 -0
  130. package/dist/lib/telemetry/index.js +22 -0
  131. package/dist/lib/telemetry/telemetry-service.d.ts +47 -0
  132. package/dist/lib/telemetry/telemetry-service.js +259 -0
  133. package/dist/lib/utils/streaming-utils.d.ts +67 -0
  134. package/dist/lib/utils/streaming-utils.js +201 -0
  135. package/dist/mcp/contracts/mcpContract.d.ts +118 -0
  136. package/dist/mcp/contracts/mcpContract.js +5 -0
  137. package/dist/mcp/function-calling.js +11 -3
  138. package/dist/mcp/logging.js +5 -0
  139. package/dist/mcp/neurolink-mcp-client.js +2 -1
  140. package/dist/mcp/orchestrator.js +18 -9
  141. package/dist/mcp/registry.d.ts +49 -16
  142. package/dist/mcp/registry.js +80 -6
  143. package/dist/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
  144. package/dist/mcp/servers/ai-providers/ai-workflow-tools.js +5 -4
  145. package/dist/mcp/tool-integration.js +1 -1
  146. package/dist/mcp/tool-registry.d.ts +55 -34
  147. package/dist/mcp/tool-registry.js +111 -97
  148. package/dist/mcp/unified-mcp.js +6 -1
  149. package/dist/mcp/unified-registry.d.ts +12 -4
  150. package/dist/mcp/unified-registry.js +17 -4
  151. package/dist/neurolink.d.ts +26 -0
  152. package/dist/neurolink.js +43 -1
  153. package/dist/providers/agent-enhanced-provider.d.ts +11 -2
  154. package/dist/providers/agent-enhanced-provider.js +86 -15
  155. package/dist/providers/amazonBedrock.d.ts +9 -1
  156. package/dist/providers/amazonBedrock.js +26 -2
  157. package/dist/providers/analytics-helper.d.ts +53 -0
  158. package/dist/providers/analytics-helper.js +151 -0
  159. package/dist/providers/anthropic.d.ts +11 -1
  160. package/dist/providers/anthropic.js +29 -4
  161. package/dist/providers/azureOpenAI.d.ts +3 -1
  162. package/dist/providers/azureOpenAI.js +29 -4
  163. package/dist/providers/function-calling-provider.d.ts +9 -1
  164. package/dist/providers/function-calling-provider.js +14 -1
  165. package/dist/providers/googleAIStudio.d.ts +15 -1
  166. package/dist/providers/googleAIStudio.js +32 -2
  167. package/dist/providers/googleVertexAI.d.ts +9 -1
  168. package/dist/providers/googleVertexAI.js +31 -2
  169. package/dist/providers/huggingFace.d.ts +3 -1
  170. package/dist/providers/huggingFace.js +26 -3
  171. package/dist/providers/mcp-provider.d.ts +9 -1
  172. package/dist/providers/mcp-provider.js +12 -0
  173. package/dist/providers/mistralAI.d.ts +3 -1
  174. package/dist/providers/mistralAI.js +25 -2
  175. package/dist/providers/ollama.d.ts +3 -1
  176. package/dist/providers/ollama.js +27 -4
  177. package/dist/providers/openAI.d.ts +15 -1
  178. package/dist/providers/openAI.js +33 -2
  179. package/dist/proxy/proxy-fetch.js +8 -7
  180. package/dist/services/streaming/streaming-manager.d.ts +29 -0
  181. package/dist/services/streaming/streaming-manager.js +244 -0
  182. package/dist/services/types.d.ts +155 -0
  183. package/dist/services/types.js +2 -0
  184. package/dist/services/websocket/websocket-server.d.ts +34 -0
  185. package/dist/services/websocket/websocket-server.js +304 -0
  186. package/dist/telemetry/index.d.ts +15 -0
  187. package/dist/telemetry/index.js +22 -0
  188. package/dist/telemetry/telemetry-service.d.ts +47 -0
  189. package/dist/telemetry/telemetry-service.js +261 -0
  190. package/dist/utils/streaming-utils.d.ts +67 -0
  191. package/dist/utils/streaming-utils.js +201 -0
  192. package/package.json +18 -2
@@ -2,6 +2,8 @@ import { createMistral } from "@ai-sdk/mistral";
2
2
  import { streamText, generateText, Output, } from "ai";
3
3
  import { logger } from "../utils/logger.js";
4
4
  import { createTimeoutController, TimeoutError, getDefaultTimeout, } from "../utils/timeout.js";
5
+ import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
6
+ import { evaluateResponse } from "../core/evaluation.js";
5
7
  // Default system context
6
8
  const DEFAULT_SYSTEM_CONTEXT = {
7
9
  systemPrompt: "You are a helpful AI assistant.",
@@ -89,7 +91,7 @@ export class MistralAI {
89
91
  const options = typeof optionsOrPrompt === "string"
90
92
  ? { prompt: optionsOrPrompt }
91
93
  : optionsOrPrompt;
92
- const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, "stream"), } = options;
94
+ const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, "stream"), } = options;
93
95
  // Use schema from options or fallback parameter
94
96
  const finalSchema = schema || analysisSchema;
95
97
  logger.debug(`[${functionTag}] Stream request started`, {
@@ -192,12 +194,13 @@ export class MistralAI {
192
194
  async generateText(optionsOrPrompt, analysisSchema) {
193
195
  const functionTag = "MistralAI.generateText";
194
196
  const provider = "mistral";
197
+ const startTime = Date.now();
195
198
  try {
196
199
  // Parse parameters - support both string and options object
197
200
  const options = typeof optionsOrPrompt === "string"
198
201
  ? { prompt: optionsOrPrompt }
199
202
  : optionsOrPrompt;
200
- const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, "generate"), } = options;
203
+ const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, "generate"), } = options;
201
204
  // Use schema from options or fallback parameter
202
205
  const finalSchema = schema || analysisSchema;
203
206
  logger.debug(`[${functionTag}] Generate request started`, {
@@ -239,6 +242,20 @@ export class MistralAI {
239
242
  responseLength: result.text?.length || 0,
240
243
  timeout,
241
244
  });
245
+ // Add analytics if enabled
246
+ if (options.enableAnalytics) {
247
+ result.analytics = {
248
+ provider,
249
+ model: this.modelName,
250
+ tokens: result.usage,
251
+ responseTime: Date.now() - startTime,
252
+ context: options.context,
253
+ };
254
+ }
255
+ // Add evaluation if enabled
256
+ if (options.enableEvaluation) {
257
+ result.evaluation = await evaluateResponse(prompt, result.text, options.context);
258
+ }
242
259
  return result;
243
260
  }
244
261
  finally {
@@ -267,5 +284,11 @@ export class MistralAI {
267
284
  throw err; // Re-throw error to trigger fallback
268
285
  }
269
286
  }
287
+ async generate(optionsOrPrompt, analysisSchema) {
288
+ return this.generateText(optionsOrPrompt, analysisSchema);
289
+ }
290
+ async gen(optionsOrPrompt, analysisSchema) {
291
+ return this.generateText(optionsOrPrompt, analysisSchema);
292
+ }
270
293
  }
271
294
  export default MistralAI;
@@ -10,7 +10,7 @@
10
10
  * - Health checking and service validation
11
11
  * - Streaming and non-streaming text generation
12
12
  */
13
- import type { AIProvider, TextGenerationOptions, StreamTextOptions } from "../core/types.js";
13
+ import type { AIProvider, TextGenerationOptions, StreamTextOptions, EnhancedGenerateTextResult } from "../core/types.js";
14
14
  import type { GenerateTextResult, StreamTextResult, ToolSet } from "ai";
15
15
  import type { ZodType, ZodTypeDef } from "zod";
16
16
  import type { Schema } from "ai";
@@ -48,4 +48,6 @@ export declare class Ollama implements AIProvider {
48
48
  * Generate streaming text using Ollama local models
49
49
  */
50
50
  streamText(optionsOrPrompt: StreamTextOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamTextResult<ToolSet, unknown> | null>;
51
+ generate(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: any): Promise<EnhancedGenerateTextResult | null>;
52
+ gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: any): Promise<EnhancedGenerateTextResult | null>;
51
53
  }
@@ -13,6 +13,8 @@
13
13
  import { streamText, generateText, Output } from "ai";
14
14
  import { logger } from "../utils/logger.js";
15
15
  import { getDefaultTimeout, TimeoutError } from "../utils/timeout.js";
16
+ import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
17
+ import { evaluateResponse } from "../core/evaluation.js";
16
18
  // Default system context
17
19
  const DEFAULT_SYSTEM_CONTEXT = {
18
20
  systemPrompt: "You are a helpful AI assistant.",
@@ -106,7 +108,7 @@ class OllamaLanguageModel {
106
108
  stream: false,
107
109
  options: {
108
110
  temperature: options.temperature || 0.7,
109
- num_predict: options.maxTokens || 500,
111
+ num_predict: options.maxTokens ?? DEFAULT_MAX_TOKENS,
110
112
  },
111
113
  };
112
114
  const controller = new AbortController();
@@ -176,7 +178,7 @@ class OllamaLanguageModel {
176
178
  stream: true,
177
179
  options: {
178
180
  temperature: options.temperature || 0.7,
179
- num_predict: options.maxTokens || 500,
181
+ num_predict: options.maxTokens ?? DEFAULT_MAX_TOKENS,
180
182
  },
181
183
  };
182
184
  const controller = new AbortController();
@@ -391,12 +393,13 @@ export class Ollama {
391
393
  async generateText(optionsOrPrompt, analysisSchema) {
392
394
  const functionTag = "Ollama.generateText";
393
395
  const provider = "ollama";
396
+ const startTime = Date.now();
394
397
  try {
395
398
  // Parse parameters - support both string and options object
396
399
  const options = typeof optionsOrPrompt === "string"
397
400
  ? { prompt: optionsOrPrompt }
398
401
  : optionsOrPrompt;
399
- const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout, } = options;
402
+ const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout, } = options;
400
403
  // Use schema from options or fallback parameter
401
404
  const finalSchema = schema || analysisSchema;
402
405
  // Convert timeout to milliseconds if provided as string
@@ -437,6 +440,20 @@ export class Ollama {
437
440
  finishReason: result.finishReason,
438
441
  responseLength: result.text?.length || 0,
439
442
  });
443
+ // Add analytics if enabled
444
+ if (options.enableAnalytics) {
445
+ result.analytics = {
446
+ provider,
447
+ model: this.modelName,
448
+ tokens: result.usage,
449
+ responseTime: Date.now() - startTime,
450
+ context: options.context,
451
+ };
452
+ }
453
+ // Add evaluation if enabled
454
+ if (options.enableEvaluation) {
455
+ result.evaluation = await evaluateResponse(prompt, result.text, options.context);
456
+ }
440
457
  return result;
441
458
  }
442
459
  catch (err) {
@@ -461,7 +478,7 @@ export class Ollama {
461
478
  const options = typeof optionsOrPrompt === "string"
462
479
  ? { prompt: optionsOrPrompt }
463
480
  : optionsOrPrompt;
464
- const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout, } = options;
481
+ const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout, } = options;
465
482
  // Use schema from options or fallback parameter
466
483
  const finalSchema = schema || analysisSchema;
467
484
  // Convert timeout to milliseconds if provided as string
@@ -542,4 +559,10 @@ export class Ollama {
542
559
  throw err; // Re-throw error to trigger fallback
543
560
  }
544
561
  }
562
+ async generate(optionsOrPrompt, analysisSchema) {
563
+ return this.generateText(optionsOrPrompt, analysisSchema);
564
+ }
565
+ async gen(optionsOrPrompt, analysisSchema) {
566
+ return this.generateText(optionsOrPrompt, analysisSchema);
567
+ }
545
568
  }
@@ -1,6 +1,6 @@
1
1
  import type { ZodType, ZodTypeDef } from "zod";
2
2
  import { type StreamTextResult, type ToolSet, type Schema, type GenerateTextResult, type LanguageModelV1 } from "ai";
3
- import type { AIProvider, TextGenerationOptions, StreamTextOptions } from "../core/types.js";
3
+ import type { AIProvider, TextGenerationOptions, StreamTextOptions, EnhancedGenerateTextResult } from "../core/types.js";
4
4
  export declare class OpenAI implements AIProvider {
5
5
  private modelName;
6
6
  private model;
@@ -11,4 +11,18 @@ export declare class OpenAI implements AIProvider {
11
11
  getModel(): LanguageModelV1;
12
12
  streamText(optionsOrPrompt: StreamTextOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamTextResult<ToolSet, unknown> | null>;
13
13
  generateText(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<GenerateTextResult<ToolSet, unknown> | null>;
14
+ /**
15
+ * Alias for generateText() - CLI-SDK consistency
16
+ * @param optionsOrPrompt - TextGenerationOptions object or prompt string
17
+ * @param analysisSchema - Optional schema for output validation
18
+ * @returns Promise resolving to GenerateTextResult or null
19
+ */
20
+ generate(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<EnhancedGenerateTextResult | null>;
21
+ /**
22
+ * Short alias for generateText() - CLI-SDK consistency
23
+ * @param optionsOrPrompt - TextGenerationOptions object or prompt string
24
+ * @param analysisSchema - Optional schema for output validation
25
+ * @returns Promise resolving to GenerateTextResult or null
26
+ */
27
+ gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<EnhancedGenerateTextResult | null>;
14
28
  }
@@ -2,6 +2,9 @@ import { openai } from "@ai-sdk/openai";
2
2
  import { streamText, generateText, Output, } from "ai";
3
3
  import { logger } from "../utils/logger.js";
4
4
  import { createTimeoutController, getDefaultTimeout, TimeoutError, } from "../utils/timeout.js";
5
+ import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
6
+ import { evaluateResponse } from "../core/evaluation.js";
7
+ import { createAnalytics } from "../core/analytics.js";
5
8
  // Default system context
6
9
  const DEFAULT_SYSTEM_CONTEXT = {
7
10
  systemPrompt: "You are a helpful AI assistant.",
@@ -60,7 +63,7 @@ export class OpenAI {
60
63
  const options = typeof optionsOrPrompt === "string"
61
64
  ? { prompt: optionsOrPrompt }
62
65
  : optionsOrPrompt;
63
- const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, "stream"), } = options;
66
+ const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, "stream"), } = options;
64
67
  // Use schema from options or fallback parameter
65
68
  const finalSchema = schema || analysisSchema;
66
69
  logger.debug(`[${functionTag}] Stream text started`, {
@@ -152,12 +155,13 @@ export class OpenAI {
152
155
  async generateText(optionsOrPrompt, analysisSchema) {
153
156
  const functionTag = "OpenAI.generateText";
154
157
  const provider = "openai";
158
+ const startTime = Date.now();
155
159
  try {
156
160
  // Parse parameters - support both string and options object
157
161
  const options = typeof optionsOrPrompt === "string"
158
162
  ? { prompt: optionsOrPrompt }
159
163
  : optionsOrPrompt;
160
- const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, "generate"), } = options;
164
+ const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, "generate"), } = options;
161
165
  // Use schema from options or fallback parameter
162
166
  const finalSchema = schema || analysisSchema;
163
167
  logger.debug(`[${functionTag}] Generate text started`, {
@@ -198,6 +202,15 @@ export class OpenAI {
198
202
  responseLength: result.text?.length || 0,
199
203
  timeout,
200
204
  });
205
+ // Add analytics if enabled
206
+ if (options.enableAnalytics) {
207
+ const { createAnalytics } = await import("./analytics-helper.js");
208
+ result.analytics = createAnalytics(provider, this.modelName, result, Date.now() - startTime, options.context);
209
+ }
210
+ // Add evaluation if enabled
211
+ if (options.enableEvaluation) {
212
+ result.evaluation = await evaluateResponse(prompt, result.text, options.context, options.evaluationDomain, options.toolUsageContext, options.conversationHistory);
213
+ }
201
214
  return result;
202
215
  }
203
216
  finally {
@@ -226,4 +239,22 @@ export class OpenAI {
226
239
  throw err; // Re-throw error to trigger fallback
227
240
  }
228
241
  }
242
+ /**
243
+ * Alias for generateText() - CLI-SDK consistency
244
+ * @param optionsOrPrompt - TextGenerationOptions object or prompt string
245
+ * @param analysisSchema - Optional schema for output validation
246
+ * @returns Promise resolving to GenerateTextResult or null
247
+ */
248
+ async generate(optionsOrPrompt, analysisSchema) {
249
+ return this.generateText(optionsOrPrompt, analysisSchema);
250
+ }
251
+ /**
252
+ * Short alias for generateText() - CLI-SDK consistency
253
+ * @param optionsOrPrompt - TextGenerationOptions object or prompt string
254
+ * @param analysisSchema - Optional schema for output validation
255
+ * @returns Promise resolving to GenerateTextResult or null
256
+ */
257
+ async gen(optionsOrPrompt, analysisSchema) {
258
+ return this.generateText(optionsOrPrompt, analysisSchema);
259
+ }
229
260
  }
@@ -2,6 +2,7 @@
2
2
  * Proxy-aware fetch implementation for AI SDK providers
3
3
  * Implements the proven Vercel AI SDK proxy pattern using undici
4
4
  */
5
+ import { logger } from "../utils/logger.js";
5
6
  /**
6
7
  * Create a proxy-aware fetch function
7
8
  * This implements the community-validated approach for Vercel AI SDK
@@ -11,12 +12,12 @@ export function createProxyFetch() {
11
12
  const httpProxy = process.env.HTTP_PROXY || process.env.http_proxy;
12
13
  // If no proxy configured, return standard fetch
13
14
  if (!httpsProxy && !httpProxy) {
14
- console.log("[Proxy Fetch] No proxy environment variables found - using standard fetch");
15
+ logger.debug("[Proxy Fetch] No proxy environment variables found - using standard fetch");
15
16
  return fetch;
16
17
  }
17
- console.log(`[Proxy Fetch] Configuring proxy with undici ProxyAgent:`);
18
- console.log(`[Proxy Fetch] HTTP_PROXY: ${httpProxy || "not set"}`);
19
- console.log(`[Proxy Fetch] HTTPS_PROXY: ${httpsProxy || "not set"}`);
18
+ logger.debug(`[Proxy Fetch] Configuring proxy with undici ProxyAgent`);
19
+ logger.debug(`[Proxy Fetch] HTTP_PROXY: ${httpProxy || "not set"}`);
20
+ logger.debug(`[Proxy Fetch] HTTPS_PROXY: ${httpsProxy || "not set"}`);
20
21
  // Return proxy-aware fetch function
21
22
  return async (input, init) => {
22
23
  try {
@@ -30,7 +31,7 @@ export function createProxyFetch() {
30
31
  : new URL(input.url);
31
32
  const proxyUrl = url.protocol === "https:" ? httpsProxy : httpProxy;
32
33
  if (proxyUrl) {
33
- console.log(`[Proxy Fetch] Creating ProxyAgent for ${url.hostname} via ${proxyUrl}`);
34
+ logger.debug(`[Proxy Fetch] Creating ProxyAgent for ${url.hostname} via ${proxyUrl}`);
34
35
  // Create ProxyAgent
35
36
  const dispatcher = new ProxyAgent(proxyUrl);
36
37
  // Use undici fetch with dispatcher
@@ -38,12 +39,12 @@ export function createProxyFetch() {
38
39
  ...init,
39
40
  dispatcher: dispatcher,
40
41
  });
41
- console.log(`[Proxy Fetch] ✅ Request proxied successfully to ${url.hostname}`);
42
+ logger.debug(`[Proxy Fetch] ✅ Request proxied successfully to ${url.hostname}`);
42
43
  return response; // Type assertion to avoid complex type issues
43
44
  }
44
45
  }
45
46
  catch (error) {
46
- console.warn(`[Proxy Fetch] Proxy failed (${error.message}), falling back to direct connection`);
47
+ logger.warn(`[Proxy Fetch] Proxy failed (${error.message}), falling back to direct connection`);
47
48
  }
48
49
  // Fallback to standard fetch
49
50
  return fetch(input, init);
@@ -0,0 +1,29 @@
1
+ import { EventEmitter } from "events";
2
+ import type { StreamingSession, StreamingConfig, StreamingPoolConfig, StreamingMetrics, StreamingHealthStatus, BufferConfig } from "../types.js";
3
+ export declare class StreamingManager extends EventEmitter {
4
+ private activeSessions;
5
+ private streamingPools;
6
+ private metrics;
7
+ private healthCheckInterval?;
8
+ private startTime;
9
+ constructor();
10
+ createStreamingSession(config: StreamingConfig): Promise<StreamingSession>;
11
+ terminateStreamingSession(sessionId: string): Promise<void>;
12
+ pauseStreamingSession(sessionId: string): Promise<void>;
13
+ resumeStreamingSession(sessionId: string): Promise<void>;
14
+ optimizeStreamingLatency(sessionId: string): Promise<void>;
15
+ enableStreamingCompression(sessionId: string): Promise<void>;
16
+ configureStreamingBuffering(sessionId: string, bufferConfig: BufferConfig): Promise<void>;
17
+ createStreamingPool(poolId: string, config: StreamingPoolConfig): Promise<void>;
18
+ balanceStreamingLoad(poolId: string): Promise<void>;
19
+ scaleStreamingCapacity(poolId: string, scale: number): Promise<void>;
20
+ getStreamingMetrics(sessionId?: string): StreamingMetrics;
21
+ getStreamingHealthStatus(): StreamingHealthStatus;
22
+ private updateGlobalMetrics;
23
+ private startHealthMonitoring;
24
+ private roundRobinBalance;
25
+ private leastConnectionsBalance;
26
+ private weightedBalance;
27
+ private adaptiveBalance;
28
+ destroy(): void;
29
+ }
@@ -0,0 +1,244 @@
1
+ import { EventEmitter } from "events";
2
+ import { randomUUID } from "crypto";
3
+ export class StreamingManager extends EventEmitter {
4
+ activeSessions = new Map();
5
+ streamingPools = new Map();
6
+ metrics;
7
+ healthCheckInterval;
8
+ startTime;
9
+ constructor() {
10
+ super();
11
+ this.startTime = Date.now();
12
+ this.metrics = {
13
+ activeSessions: 0,
14
+ totalBytesTransferred: 0,
15
+ averageLatency: 0,
16
+ throughputBps: 0,
17
+ errorRate: 0,
18
+ connectionCount: 0,
19
+ uptime: 0,
20
+ };
21
+ this.startHealthMonitoring();
22
+ }
23
+ // Stream Lifecycle Management
24
+ async createStreamingSession(config) {
25
+ const sessionId = randomUUID();
26
+ const session = {
27
+ id: sessionId,
28
+ connectionId: config.provider, // Temporary, should be actual connection ID
29
+ provider: config.provider,
30
+ status: "active",
31
+ startTime: Date.now(),
32
+ lastActivity: Date.now(),
33
+ config,
34
+ metrics: {
35
+ bytesTransferred: 0,
36
+ messagesCount: 0,
37
+ averageLatency: 0,
38
+ errorCount: 0,
39
+ },
40
+ };
41
+ this.activeSessions.set(sessionId, session);
42
+ this.updateGlobalMetrics();
43
+ console.log(`[Streaming Manager] Created session ${sessionId} for provider ${config.provider}`);
44
+ this.emit("session-created", session);
45
+ return session;
46
+ }
47
+ async terminateStreamingSession(sessionId) {
48
+ const session = this.activeSessions.get(sessionId);
49
+ if (!session) {
50
+ throw new Error(`Session ${sessionId} not found`);
51
+ }
52
+ session.status = "terminated";
53
+ this.activeSessions.delete(sessionId);
54
+ this.updateGlobalMetrics();
55
+ console.log(`[Streaming Manager] Terminated session ${sessionId}`);
56
+ this.emit("session-terminated", session);
57
+ }
58
+ async pauseStreamingSession(sessionId) {
59
+ const session = this.activeSessions.get(sessionId);
60
+ if (!session) {
61
+ throw new Error(`Session ${sessionId} not found`);
62
+ }
63
+ if (session.status === "active") {
64
+ session.status = "paused";
65
+ console.log(`[Streaming Manager] Paused session ${sessionId}`);
66
+ this.emit("session-paused", session);
67
+ }
68
+ }
69
+ async resumeStreamingSession(sessionId) {
70
+ const session = this.activeSessions.get(sessionId);
71
+ if (!session) {
72
+ throw new Error(`Session ${sessionId} not found`);
73
+ }
74
+ if (session.status === "paused") {
75
+ session.status = "active";
76
+ session.lastActivity = Date.now();
77
+ console.log(`[Streaming Manager] Resumed session ${sessionId}`);
78
+ this.emit("session-resumed", session);
79
+ }
80
+ }
81
+ // Stream Optimization
82
+ async optimizeStreamingLatency(sessionId) {
83
+ const session = this.activeSessions.get(sessionId);
84
+ if (!session) {
85
+ return;
86
+ }
87
+ // Adaptive optimization based on current metrics
88
+ const currentLatency = session.metrics.averageLatency;
89
+ const targetLatency = session.config.latencyTarget;
90
+ if (currentLatency > targetLatency * 1.2) {
91
+ // Increase buffer size for better throughput
92
+ session.config.bufferSize = Math.min(session.config.bufferSize * 1.5, 16384);
93
+ session.config.streamingMode = "buffered";
94
+ }
95
+ else if (currentLatency < targetLatency * 0.8) {
96
+ // Decrease buffer size for better latency
97
+ session.config.bufferSize = Math.max(session.config.bufferSize * 0.8, 1024);
98
+ session.config.streamingMode = "real-time";
99
+ }
100
+ console.log(`[Streaming Manager] Optimized session ${sessionId}: latency=${currentLatency}ms, mode=${session.config.streamingMode}`);
101
+ }
102
+ async enableStreamingCompression(sessionId) {
103
+ const session = this.activeSessions.get(sessionId);
104
+ if (!session) {
105
+ return;
106
+ }
107
+ session.config.compressionEnabled = true;
108
+ console.log(`[Streaming Manager] Enabled compression for session ${sessionId}`);
109
+ }
110
+ async configureStreamingBuffering(sessionId, bufferConfig) {
111
+ const session = this.activeSessions.get(sessionId);
112
+ if (!session) {
113
+ return;
114
+ }
115
+ session.config.bufferSize = bufferConfig.maxSize;
116
+ session.config.maxChunkSize = Math.min(session.config.maxChunkSize, bufferConfig.flushThreshold);
117
+ console.log(`[Streaming Manager] Updated buffer config for session ${sessionId}:`, bufferConfig);
118
+ }
119
+ // Multi-Stream Coordination
120
+ async createStreamingPool(poolId, config) {
121
+ const pool = {
122
+ id: poolId,
123
+ maxSessions: config.maxConcurrentSessions,
124
+ activeSessions: new Set(),
125
+ config,
126
+ loadBalancer: config.loadBalancing,
127
+ };
128
+ this.streamingPools.set(poolId, pool);
129
+ console.log(`[Streaming Manager] Created pool ${poolId} with max ${config.maxConcurrentSessions} sessions`);
130
+ }
131
+ async balanceStreamingLoad(poolId) {
132
+ const pool = this.streamingPools.get(poolId);
133
+ if (!pool) {
134
+ return;
135
+ }
136
+ const activeSessions = Array.from(pool.activeSessions)
137
+ .map((sessionId) => this.activeSessions.get(sessionId))
138
+ .filter((session) => session && session.status === "active");
139
+ // Implement load balancing strategies
140
+ switch (pool.loadBalancer) {
141
+ case "round-robin":
142
+ this.roundRobinBalance(activeSessions);
143
+ break;
144
+ case "least-connections":
145
+ this.leastConnectionsBalance(activeSessions);
146
+ break;
147
+ case "weighted":
148
+ this.weightedBalance(activeSessions);
149
+ break;
150
+ case "adaptive":
151
+ this.adaptiveBalance(activeSessions);
152
+ break;
153
+ }
154
+ }
155
+ async scaleStreamingCapacity(poolId, scale) {
156
+ const pool = this.streamingPools.get(poolId);
157
+ if (!pool) {
158
+ return;
159
+ }
160
+ const newMaxSessions = Math.max(1, Math.floor(pool.maxSessions * scale));
161
+ pool.maxSessions = newMaxSessions;
162
+ pool.config.maxConcurrentSessions = newMaxSessions;
163
+ console.log(`[Streaming Manager] Scaled pool ${poolId} to ${newMaxSessions} max sessions (${scale}x)`);
164
+ }
165
+ // Performance Monitoring
166
+ getStreamingMetrics(sessionId) {
167
+ if (sessionId) {
168
+ const session = this.activeSessions.get(sessionId);
169
+ if (session) {
170
+ return {
171
+ sessionId,
172
+ activeSessions: 1,
173
+ totalBytesTransferred: session.metrics.bytesTransferred,
174
+ averageLatency: session.metrics.averageLatency,
175
+ throughputBps: session.metrics.bytesTransferred /
176
+ ((Date.now() - session.startTime) / 1000),
177
+ errorRate: session.metrics.errorCount /
178
+ Math.max(session.metrics.messagesCount, 1),
179
+ connectionCount: 1,
180
+ uptime: Date.now() - session.startTime,
181
+ };
182
+ }
183
+ }
184
+ return { ...this.metrics, uptime: Date.now() - this.startTime };
185
+ }
186
+ getStreamingHealthStatus() {
187
+ const metrics = this.getStreamingMetrics();
188
+ const issues = [];
189
+ let status = "healthy";
190
+ if (metrics.errorRate > 0.1) {
191
+ issues.push(`High error rate: ${(metrics.errorRate * 100).toFixed(1)}%`);
192
+ status = "degraded";
193
+ }
194
+ if (metrics.averageLatency > 1000) {
195
+ issues.push(`High latency: ${metrics.averageLatency}ms`);
196
+ status = status === "healthy" ? "degraded" : "unhealthy";
197
+ }
198
+ if (metrics.activeSessions === 0 && this.activeSessions.size > 0) {
199
+ issues.push("Session count mismatch");
200
+ status = "unhealthy";
201
+ }
202
+ return {
203
+ status,
204
+ activeSessions: metrics.activeSessions,
205
+ errorRate: metrics.errorRate,
206
+ averageLatency: metrics.averageLatency,
207
+ lastHealthCheck: Date.now(),
208
+ issues,
209
+ };
210
+ }
211
+ // Private helper methods
212
+ updateGlobalMetrics() {
213
+ this.metrics.activeSessions = this.activeSessions.size;
214
+ this.metrics.connectionCount = this.activeSessions.size;
215
+ }
216
+ startHealthMonitoring() {
217
+ this.healthCheckInterval = setInterval(() => {
218
+ const health = this.getStreamingHealthStatus();
219
+ if (health.status !== "healthy") {
220
+ console.warn("[Streaming Manager] Health check:", health);
221
+ this.emit("health-warning", health);
222
+ }
223
+ }, 30000); // Check every 30 seconds
224
+ }
225
+ roundRobinBalance(sessions) {
226
+ // Round-robin implementation
227
+ }
228
+ leastConnectionsBalance(sessions) {
229
+ // Least connections implementation
230
+ }
231
+ weightedBalance(sessions) {
232
+ // Weighted load balancing implementation
233
+ }
234
+ adaptiveBalance(sessions) {
235
+ // Adaptive load balancing implementation
236
+ }
237
+ destroy() {
238
+ if (this.healthCheckInterval) {
239
+ clearInterval(this.healthCheckInterval);
240
+ }
241
+ this.activeSessions.clear();
242
+ this.streamingPools.clear();
243
+ }
244
+ }