@juspay/neurolink 7.33.3 → 7.34.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (135) hide show
  1. package/CHANGELOG.md +15 -0
  2. package/README.md +37 -0
  3. package/dist/cli/commands/config.d.ts +3 -4
  4. package/dist/cli/commands/config.js +2 -3
  5. package/dist/cli/errorHandler.d.ts +1 -0
  6. package/dist/cli/errorHandler.js +28 -0
  7. package/dist/cli/factories/commandFactory.d.ts +23 -0
  8. package/dist/cli/factories/commandFactory.js +375 -60
  9. package/dist/cli/factories/ollamaCommandFactory.js +7 -1
  10. package/dist/cli/index.d.ts +1 -1
  11. package/dist/cli/index.js +9 -164
  12. package/dist/cli/loop/optionsSchema.d.ts +15 -0
  13. package/dist/cli/loop/optionsSchema.js +59 -0
  14. package/dist/cli/loop/session.d.ts +15 -0
  15. package/dist/cli/loop/session.js +252 -0
  16. package/dist/cli/parser.d.ts +1 -0
  17. package/dist/cli/parser.js +158 -0
  18. package/dist/cli/utils/ollamaUtils.js +6 -0
  19. package/dist/config/{conversationMemoryConfig.d.ts → conversationMemory.d.ts} +1 -1
  20. package/dist/core/baseProvider.js +43 -4
  21. package/dist/core/constants.d.ts +12 -3
  22. package/dist/core/constants.js +22 -6
  23. package/dist/core/conversationMemoryFactory.d.ts +23 -0
  24. package/dist/core/conversationMemoryFactory.js +144 -0
  25. package/dist/core/conversationMemoryInitializer.d.ts +14 -0
  26. package/dist/core/conversationMemoryInitializer.js +127 -0
  27. package/dist/core/conversationMemoryManager.d.ts +3 -2
  28. package/dist/core/conversationMemoryManager.js +4 -3
  29. package/dist/core/factory.js +19 -0
  30. package/dist/core/redisConversationMemoryManager.d.ts +73 -0
  31. package/dist/core/redisConversationMemoryManager.js +483 -0
  32. package/dist/core/types.d.ts +1 -1
  33. package/dist/factories/providerRegistry.js +2 -0
  34. package/dist/lib/config/{conversationMemoryConfig.d.ts → conversationMemory.d.ts} +1 -1
  35. package/dist/lib/core/baseProvider.js +43 -4
  36. package/dist/lib/core/constants.d.ts +12 -3
  37. package/dist/lib/core/constants.js +22 -6
  38. package/dist/lib/core/conversationMemoryFactory.d.ts +23 -0
  39. package/dist/lib/core/conversationMemoryFactory.js +144 -0
  40. package/dist/lib/core/conversationMemoryInitializer.d.ts +14 -0
  41. package/dist/lib/core/conversationMemoryInitializer.js +127 -0
  42. package/dist/lib/core/conversationMemoryManager.d.ts +3 -2
  43. package/dist/lib/core/conversationMemoryManager.js +4 -3
  44. package/dist/lib/core/factory.js +19 -0
  45. package/dist/lib/core/redisConversationMemoryManager.d.ts +73 -0
  46. package/dist/lib/core/redisConversationMemoryManager.js +483 -0
  47. package/dist/lib/core/types.d.ts +1 -1
  48. package/dist/lib/factories/providerRegistry.js +2 -0
  49. package/dist/lib/mcp/servers/aiProviders/aiWorkflowTools.js +2 -2
  50. package/dist/lib/neurolink.d.ts +15 -9
  51. package/dist/lib/neurolink.js +218 -67
  52. package/dist/lib/providers/amazonBedrock.d.ts +4 -4
  53. package/dist/lib/providers/amazonBedrock.js +2 -2
  54. package/dist/lib/providers/anthropic.d.ts +4 -4
  55. package/dist/lib/providers/anthropic.js +3 -12
  56. package/dist/lib/providers/anthropicBaseProvider.js +1 -2
  57. package/dist/lib/providers/azureOpenai.d.ts +4 -4
  58. package/dist/lib/providers/azureOpenai.js +49 -8
  59. package/dist/lib/providers/googleAiStudio.d.ts +4 -4
  60. package/dist/lib/providers/googleAiStudio.js +2 -2
  61. package/dist/lib/providers/googleVertex.js +2 -2
  62. package/dist/lib/providers/huggingFace.d.ts +4 -4
  63. package/dist/lib/providers/huggingFace.js +1 -2
  64. package/dist/lib/providers/litellm.d.ts +1 -1
  65. package/dist/lib/providers/litellm.js +1 -2
  66. package/dist/lib/providers/mistral.d.ts +4 -4
  67. package/dist/lib/providers/mistral.js +4 -4
  68. package/dist/lib/providers/ollama.js +7 -8
  69. package/dist/lib/providers/openAI.d.ts +4 -4
  70. package/dist/lib/providers/openAI.js +2 -2
  71. package/dist/lib/providers/openaiCompatible.js +5 -2
  72. package/dist/lib/providers/sagemaker/language-model.d.ts +5 -0
  73. package/dist/lib/providers/sagemaker/language-model.js +9 -1
  74. package/dist/lib/session/globalSessionState.d.ts +27 -0
  75. package/dist/lib/session/globalSessionState.js +77 -0
  76. package/dist/lib/types/{conversationTypes.d.ts → conversation.d.ts} +32 -0
  77. package/dist/lib/types/generateTypes.d.ts +1 -1
  78. package/dist/lib/types/streamTypes.d.ts +1 -1
  79. package/dist/lib/utils/conversationMemory.d.ts +22 -0
  80. package/dist/lib/utils/conversationMemory.js +121 -0
  81. package/dist/lib/utils/conversationMemoryUtils.d.ts +1 -1
  82. package/dist/lib/utils/conversationMemoryUtils.js +2 -2
  83. package/dist/lib/utils/messageBuilder.d.ts +1 -1
  84. package/dist/lib/utils/messageBuilder.js +1 -1
  85. package/dist/lib/utils/providerHealth.js +7 -3
  86. package/dist/lib/utils/redis.d.ts +42 -0
  87. package/dist/lib/utils/redis.js +263 -0
  88. package/dist/lib/utils/tokenLimits.d.ts +2 -2
  89. package/dist/lib/utils/tokenLimits.js +10 -3
  90. package/dist/mcp/servers/aiProviders/aiWorkflowTools.js +2 -2
  91. package/dist/neurolink.d.ts +15 -9
  92. package/dist/neurolink.js +218 -67
  93. package/dist/providers/amazonBedrock.d.ts +4 -4
  94. package/dist/providers/amazonBedrock.js +2 -2
  95. package/dist/providers/anthropic.d.ts +4 -4
  96. package/dist/providers/anthropic.js +3 -12
  97. package/dist/providers/anthropicBaseProvider.js +1 -2
  98. package/dist/providers/azureOpenai.d.ts +4 -4
  99. package/dist/providers/azureOpenai.js +49 -8
  100. package/dist/providers/googleAiStudio.d.ts +4 -4
  101. package/dist/providers/googleAiStudio.js +2 -2
  102. package/dist/providers/googleVertex.js +2 -2
  103. package/dist/providers/huggingFace.d.ts +4 -4
  104. package/dist/providers/huggingFace.js +1 -2
  105. package/dist/providers/litellm.d.ts +1 -1
  106. package/dist/providers/litellm.js +1 -2
  107. package/dist/providers/mistral.d.ts +4 -4
  108. package/dist/providers/mistral.js +4 -4
  109. package/dist/providers/ollama.js +7 -8
  110. package/dist/providers/openAI.d.ts +4 -4
  111. package/dist/providers/openAI.js +2 -2
  112. package/dist/providers/openaiCompatible.js +5 -2
  113. package/dist/providers/sagemaker/language-model.d.ts +5 -0
  114. package/dist/providers/sagemaker/language-model.js +9 -1
  115. package/dist/session/globalSessionState.d.ts +27 -0
  116. package/dist/session/globalSessionState.js +77 -0
  117. package/dist/types/{conversationTypes.d.ts → conversation.d.ts} +32 -0
  118. package/dist/types/generateTypes.d.ts +1 -1
  119. package/dist/types/streamTypes.d.ts +1 -1
  120. package/dist/utils/conversationMemory.d.ts +22 -0
  121. package/dist/utils/conversationMemory.js +121 -0
  122. package/dist/utils/conversationMemoryUtils.d.ts +1 -1
  123. package/dist/utils/conversationMemoryUtils.js +2 -2
  124. package/dist/utils/messageBuilder.d.ts +1 -1
  125. package/dist/utils/messageBuilder.js +1 -1
  126. package/dist/utils/providerHealth.js +7 -3
  127. package/dist/utils/redis.d.ts +42 -0
  128. package/dist/utils/redis.js +263 -0
  129. package/dist/utils/tokenLimits.d.ts +2 -2
  130. package/dist/utils/tokenLimits.js +10 -3
  131. package/package.json +3 -1
  132. /package/dist/config/{conversationMemoryConfig.js → conversationMemory.js} +0 -0
  133. /package/dist/lib/config/{conversationMemoryConfig.js → conversationMemory.js} +0 -0
  134. /package/dist/lib/types/{conversationTypes.js → conversation.js} +0 -0
  135. /package/dist/types/{conversationTypes.js → conversation.js} +0 -0
@@ -6,6 +6,7 @@ import { validateApiKey, createAzureAPIKeyConfig, createAzureEndpointConfig, } f
6
6
  import { logger } from "../utils/logger.js";
7
7
  import { buildMessagesArray } from "../utils/messageBuilder.js";
8
8
  import { createProxyFetch } from "../proxy/proxyFetch.js";
9
+ import { DEFAULT_MAX_STEPS } from "../core/constants.js";
9
10
  export class AzureOpenAIProvider extends BaseProvider {
10
11
  apiKey;
11
12
  resourceName;
@@ -19,9 +20,11 @@ export class AzureOpenAIProvider extends BaseProvider {
19
20
  this.resourceName = endpoint
20
21
  .replace("https://", "")
21
22
  .replace(/\/+$/, "") // Remove trailing slashes
22
- .replace(".openai.azure.com", "");
23
+ .replace(".openai.azure.com", "")
24
+ .replace(".cognitiveservices.azure.com", "");
23
25
  this.deployment =
24
26
  modelName ||
27
+ process.env.AZURE_OPENAI_MODEL ||
25
28
  process.env.AZURE_OPENAI_DEPLOYMENT ||
26
29
  process.env.AZURE_OPENAI_DEPLOYMENT_ID ||
27
30
  "gpt-4o";
@@ -34,6 +37,7 @@ export class AzureOpenAIProvider extends BaseProvider {
34
37
  validateApiKey(createAzureEndpointConfig());
35
38
  }
36
39
  // Create the Azure provider instance with proxy support
40
+ // Let the Azure SDK handle all URL construction automatically
37
41
  this.azureProvider = createAzure({
38
42
  resourceName: this.resourceName,
39
43
  apiKey: this.apiKey,
@@ -73,20 +77,57 @@ export class AzureOpenAIProvider extends BaseProvider {
73
77
  // executeGenerate removed - BaseProvider handles all generation with tools
74
78
  async executeStream(options, _analysisSchema) {
75
79
  try {
80
+ // Get ALL available tools (direct + MCP + external from options) - EXACTLY like BaseProvider
81
+ const shouldUseTools = !options.disableTools && this.supportsTools();
82
+ const baseTools = shouldUseTools ? await this.getAllTools() : {};
83
+ const tools = shouldUseTools
84
+ ? {
85
+ ...baseTools,
86
+ ...(options.tools || {}), // Include external tools passed from NeuroLink
87
+ }
88
+ : undefined;
89
+ // DEBUG: Log detailed tool information
90
+ logger.debug("Azure Stream - Tool Loading Debug", {
91
+ shouldUseTools,
92
+ baseToolsProvided: !!baseTools,
93
+ baseToolCount: baseTools ? Object.keys(baseTools).length : 0,
94
+ finalToolCount: tools ? Object.keys(tools).length : 0,
95
+ toolNames: tools ? Object.keys(tools).slice(0, 10) : [],
96
+ disableTools: options.disableTools,
97
+ supportsTools: this.supportsTools(),
98
+ externalToolsCount: options.tools
99
+ ? Object.keys(options.tools).length
100
+ : 0,
101
+ });
102
+ if (tools && Object.keys(tools).length > 0) {
103
+ logger.debug("Azure Stream - First 5 Tools Detail", {
104
+ tools: Object.keys(tools)
105
+ .slice(0, 5)
106
+ .map((name) => ({
107
+ name,
108
+ description: tools[name]?.description?.substring(0, 100),
109
+ })),
110
+ });
111
+ }
76
112
  // Build message array from options
77
113
  const messages = buildMessagesArray(options);
78
114
  const stream = await streamText({
79
115
  model: this.azureProvider(this.deployment),
80
116
  messages: messages,
81
- maxTokens: options.maxTokens || 1000,
82
- temperature: options.temperature || 0.7,
117
+ ...(options.maxTokens !== null && options.maxTokens !== undefined
118
+ ? { maxTokens: options.maxTokens }
119
+ : {}),
120
+ ...(options.temperature !== null && options.temperature !== undefined
121
+ ? { temperature: options.temperature }
122
+ : {}),
123
+ tools,
124
+ toolChoice: shouldUseTools ? "auto" : "none",
125
+ maxSteps: options.maxSteps || DEFAULT_MAX_STEPS,
83
126
  });
127
+ // Transform string stream to content object stream using BaseProvider method
128
+ const transformedStream = this.createTextStream(stream);
84
129
  return {
85
- stream: (async function* () {
86
- for await (const chunk of stream.textStream) {
87
- yield { content: chunk };
88
- }
89
- })(),
130
+ stream: transformedStream,
90
131
  provider: "azure",
91
132
  model: this.deployment,
92
133
  metadata: {
@@ -9,13 +9,13 @@ import { BaseProvider } from "../core/baseProvider.js";
9
9
  */
10
10
  export declare class GoogleAIStudioProvider extends BaseProvider {
11
11
  constructor(modelName?: string, sdk?: unknown);
12
- protected getProviderName(): AIProviderName;
13
- protected getDefaultModel(): string;
12
+ getProviderName(): AIProviderName;
13
+ getDefaultModel(): string;
14
14
  /**
15
15
  * 🔧 PHASE 2: Return AI SDK model instance for tool calling
16
16
  */
17
- protected getAISDKModel(): LanguageModelV1;
18
- protected handleProviderError(error: unknown): Error;
17
+ getAISDKModel(): LanguageModelV1;
18
+ handleProviderError(error: unknown): Error;
19
19
  protected executeStream(options: StreamOptions, _analysisSchema?: ZodUnknownSchema | Schema<unknown>): Promise<StreamResult>;
20
20
  private executeAudioStreamViaGeminiLive;
21
21
  private getApiKey;
@@ -5,7 +5,7 @@ import { BaseProvider } from "../core/baseProvider.js";
5
5
  import { logger } from "../utils/logger.js";
6
6
  import { createTimeoutController, TimeoutError } from "../utils/timeout.js";
7
7
  import { AuthenticationError, NetworkError, ProviderError, RateLimitError, } from "../types/errors.js";
8
- import { DEFAULT_MAX_TOKENS, DEFAULT_MAX_STEPS } from "../core/constants.js";
8
+ import { DEFAULT_MAX_STEPS } from "../core/constants.js";
9
9
  import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
10
10
  import { buildMessagesArray } from "../utils/messageBuilder.js";
11
11
  // Create Google GenAI client
@@ -96,7 +96,7 @@ export class GoogleAIStudioProvider extends BaseProvider {
96
96
  model,
97
97
  messages: messages,
98
98
  temperature: options.temperature,
99
- maxTokens: options.maxTokens || DEFAULT_MAX_TOKENS,
99
+ maxTokens: options.maxTokens, // No default limit - unlimited unless specified
100
100
  tools,
101
101
  maxSteps: options.maxSteps || DEFAULT_MAX_STEPS,
102
102
  toolChoice: shouldUseTools ? "auto" : "none",
@@ -4,7 +4,7 @@ import { streamText, Output, } from "ai";
4
4
  import { BaseProvider } from "../core/baseProvider.js";
5
5
  import { logger } from "../utils/logger.js";
6
6
  import { createTimeoutController, TimeoutError } from "../utils/timeout.js";
7
- import { DEFAULT_MAX_TOKENS, DEFAULT_MAX_STEPS } from "../core/constants.js";
7
+ import { DEFAULT_MAX_STEPS } from "../core/constants.js";
8
8
  import { ModelConfigurationManager } from "../core/modelConfiguration.js";
9
9
  import { validateApiKey, createVertexProjectConfig, createGoogleAuthConfig, } from "../utils/providerConfig.js";
10
10
  import fs from "fs";
@@ -945,7 +945,7 @@ export class GoogleVertexProvider extends BaseProvider {
945
945
  // This avoids hardcoded model-specific logic and repeated config lookups
946
946
  const shouldSetMaxTokens = this.shouldSetMaxTokensCached(modelName);
947
947
  const maxTokens = shouldSetMaxTokens
948
- ? options.maxTokens || DEFAULT_MAX_TOKENS
948
+ ? options.maxTokens // No default limit
949
949
  : undefined;
950
950
  // Build complete stream options with proper typing
951
951
  let streamOptions = {
@@ -68,12 +68,12 @@ export declare class HuggingFaceProvider extends BaseProvider {
68
68
  /**
69
69
  * Enhanced error handling with HuggingFace-specific guidance
70
70
  */
71
- protected handleProviderError(error: unknown): Error;
72
- protected getProviderName(): AIProviderName;
73
- protected getDefaultModel(): string;
71
+ handleProviderError(error: unknown): Error;
72
+ getProviderName(): AIProviderName;
73
+ getDefaultModel(): string;
74
74
  /**
75
75
  * Returns the Vercel AI SDK model instance for HuggingFace
76
76
  */
77
- protected getAISDKModel(): LanguageModelV1;
77
+ getAISDKModel(): LanguageModelV1;
78
78
  }
79
79
  export default HuggingFaceProvider;
@@ -3,7 +3,6 @@ import { streamText, } from "ai";
3
3
  import { BaseProvider } from "../core/baseProvider.js";
4
4
  import { logger } from "../utils/logger.js";
5
5
  import { createTimeoutController, TimeoutError } from "../utils/timeout.js";
6
- import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
7
6
  import { validateApiKey, createHuggingFaceConfig, getProviderModel, } from "../utils/providerConfig.js";
8
7
  import { buildMessagesArray } from "../utils/messageBuilder.js";
9
8
  import { createProxyFetch } from "../proxy/proxyFetch.js";
@@ -120,7 +119,7 @@ export class HuggingFaceProvider extends BaseProvider {
120
119
  model: this.model,
121
120
  messages: messages,
122
121
  temperature: options.temperature,
123
- maxTokens: options.maxTokens || DEFAULT_MAX_TOKENS,
122
+ maxTokens: options.maxTokens, // No default limit - unlimited unless specified
124
123
  tools: streamOptions.tools, // Tools format conversion handled by prepareStreamOptions
125
124
  toolChoice: streamOptions.toolChoice, // Tool choice handled by prepareStreamOptions
126
125
  abortSignal: timeoutController?.controller.signal,
@@ -19,7 +19,7 @@ export declare class LiteLLMProvider extends BaseProvider {
19
19
  * Returns the Vercel AI SDK model instance for LiteLLM
20
20
  */
21
21
  protected getAISDKModel(): LanguageModelV1;
22
- protected handleProviderError(error: unknown): Error;
22
+ handleProviderError(error: unknown): Error;
23
23
  /**
24
24
  * LiteLLM supports tools for compatible models
25
25
  */
@@ -3,7 +3,6 @@ import { streamText } from "ai";
3
3
  import { BaseProvider } from "../core/baseProvider.js";
4
4
  import { logger } from "../utils/logger.js";
5
5
  import { createTimeoutController, TimeoutError } from "../utils/timeout.js";
6
- import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
7
6
  import { getProviderModel } from "../utils/providerConfig.js";
8
7
  import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
9
8
  import { buildMessagesArray } from "../utils/messageBuilder.js";
@@ -127,7 +126,7 @@ export class LiteLLMProvider extends BaseProvider {
127
126
  model: this.model,
128
127
  messages: messages,
129
128
  temperature: options.temperature,
130
- maxTokens: options.maxTokens || DEFAULT_MAX_TOKENS,
129
+ maxTokens: options.maxTokens, // No default limit - unlimited unless specified
131
130
  tools: options.tools,
132
131
  toolChoice: "auto",
133
132
  abortSignal: timeoutController?.controller.signal,
@@ -11,13 +11,13 @@ export declare class MistralProvider extends BaseProvider {
11
11
  private model;
12
12
  constructor(modelName?: string, sdk?: unknown);
13
13
  protected executeStream(options: StreamOptions, _analysisSchema?: ValidationSchema): Promise<StreamResult>;
14
- protected getProviderName(): AIProviderName;
15
- protected getDefaultModel(): string;
14
+ getProviderName(): AIProviderName;
15
+ getDefaultModel(): string;
16
16
  /**
17
17
  * Returns the Vercel AI SDK model instance for Mistral
18
18
  */
19
- protected getAISDKModel(): LanguageModelV1;
20
- protected handleProviderError(error: unknown): Error;
19
+ getAISDKModel(): LanguageModelV1;
20
+ handleProviderError(error: unknown): Error;
21
21
  /**
22
22
  * Validate provider configuration
23
23
  */
@@ -3,7 +3,7 @@ import { streamText } from "ai";
3
3
  import { BaseProvider } from "../core/baseProvider.js";
4
4
  import { logger } from "../utils/logger.js";
5
5
  import { createTimeoutController, TimeoutError } from "../utils/timeout.js";
6
- import { DEFAULT_MAX_TOKENS, DEFAULT_MAX_STEPS } from "../core/constants.js";
6
+ import { DEFAULT_MAX_STEPS } from "../core/constants.js";
7
7
  import { validateApiKey, createMistralConfig, getProviderModel, } from "../utils/providerConfig.js";
8
8
  import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
9
9
  import { buildMessagesArray } from "../utils/messageBuilder.js";
@@ -54,7 +54,7 @@ export class MistralProvider extends BaseProvider {
54
54
  model: this.model,
55
55
  messages: messages,
56
56
  temperature: options.temperature,
57
- maxTokens: options.maxTokens || DEFAULT_MAX_TOKENS,
57
+ maxTokens: options.maxTokens, // No default limit - unlimited unless specified
58
58
  tools,
59
59
  maxSteps: options.maxSteps || DEFAULT_MAX_STEPS,
60
60
  toolChoice: shouldUseTools ? "auto" : "none",
@@ -111,8 +111,8 @@ export class MistralProvider extends BaseProvider {
111
111
  message.includes("Invalid API key")) {
112
112
  return new Error("Invalid Mistral API key. Please check your MISTRAL_API_KEY environment variable.");
113
113
  }
114
- if (message.includes("rate limit")) {
115
- return new Error("Mistral rate limit exceeded. Please try again later.");
114
+ if (message.includes("Rate limit exceeded")) {
115
+ return new Error("Mistral rate limit exceeded");
116
116
  }
117
117
  return new Error(`Mistral error: ${message}`);
118
118
  }
@@ -1,6 +1,5 @@
1
1
  import { BaseProvider } from "../core/baseProvider.js";
2
2
  import { logger } from "../utils/logger.js";
3
- import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
4
3
  import { modelConfig } from "../core/modelConfiguration.js";
5
4
  import { createProxyFetch } from "../proxy/proxyFetch.js";
6
5
  import { TimeoutError } from "../utils/timeout.js";
@@ -88,10 +87,10 @@ class OllamaLanguageModel {
88
87
  return {
89
88
  text: data.response,
90
89
  usage: {
91
- promptTokens: data.prompt_eval_count || this.estimateTokens(prompt),
92
- completionTokens: data.eval_count || this.estimateTokens(data.response),
93
- totalTokens: (data.prompt_eval_count || this.estimateTokens(prompt)) +
94
- (data.eval_count || this.estimateTokens(data.response)),
90
+ promptTokens: data.prompt_eval_count ?? this.estimateTokens(prompt),
91
+ completionTokens: data.eval_count ?? this.estimateTokens(String(data.response ?? "")),
92
+ totalTokens: (data.prompt_eval_count ?? this.estimateTokens(prompt)) +
93
+ (data.eval_count ?? this.estimateTokens(String(data.response ?? ""))),
95
94
  },
96
95
  finishReason: "stop",
97
96
  rawCall: {
@@ -271,7 +270,7 @@ export class OllamaProvider extends BaseProvider {
271
270
  * @returns true for supported models, false for unsupported models
272
271
  */
273
272
  supportsTools() {
274
- const modelName = this.modelName.toLowerCase();
273
+ const modelName = (this.modelName ?? getDefaultOllamaModel()).toLowerCase();
275
274
  // Get tool-capable models from configuration
276
275
  const ollamaConfig = modelConfig.getProviderConfiguration("ollama");
277
276
  const toolCapableModels = ollamaConfig?.modelBehavior?.toolCapableModels || [];
@@ -340,7 +339,7 @@ export class OllamaProvider extends BaseProvider {
340
339
  tool_choice: "auto",
341
340
  stream: true,
342
341
  temperature: options.temperature,
343
- max_tokens: options.maxTokens || DEFAULT_MAX_TOKENS,
342
+ max_tokens: options.maxTokens,
344
343
  }),
345
344
  signal: createAbortSignalWithTimeout(this.timeout),
346
345
  });
@@ -381,7 +380,7 @@ export class OllamaProvider extends BaseProvider {
381
380
  stream: true,
382
381
  options: {
383
382
  temperature: options.temperature,
384
- num_predict: options.maxTokens || DEFAULT_MAX_TOKENS,
383
+ num_predict: options.maxTokens,
385
384
  },
386
385
  }),
387
386
  signal: createAbortSignalWithTimeout(this.timeout),
@@ -11,13 +11,13 @@ import type { NeuroLink } from "../neurolink.js";
11
11
  export declare class OpenAIProvider extends BaseProvider {
12
12
  private model;
13
13
  constructor(modelName?: string, neurolink?: NeuroLink);
14
- protected getProviderName(): AIProviderName;
15
- protected getDefaultModel(): string;
14
+ getProviderName(): AIProviderName;
15
+ getDefaultModel(): string;
16
16
  /**
17
17
  * Returns the Vercel AI SDK model instance for OpenAI
18
18
  */
19
- protected getAISDKModel(): LanguageModelV1;
20
- protected handleProviderError(error: unknown): Error;
19
+ getAISDKModel(): LanguageModelV1;
20
+ handleProviderError(error: unknown): Error;
21
21
  /**
22
22
  * executeGenerate method removed - generation is now handled by BaseProvider.
23
23
  * For details on the changes and migration steps, refer to the BaseProvider documentation
@@ -5,7 +5,7 @@ import { BaseProvider } from "../core/baseProvider.js";
5
5
  import { logger } from "../utils/logger.js";
6
6
  import { createTimeoutController, TimeoutError } from "../utils/timeout.js";
7
7
  import { AuthenticationError, InvalidModelError, NetworkError, ProviderError, RateLimitError, } from "../types/errors.js";
8
- import { DEFAULT_MAX_TOKENS, DEFAULT_MAX_STEPS } from "../core/constants.js";
8
+ import { DEFAULT_MAX_STEPS } from "../core/constants.js";
9
9
  import { validateApiKey, createOpenAIConfig, getProviderModel, } from "../utils/providerConfig.js";
10
10
  import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
11
11
  import { buildMessagesArray } from "../utils/messageBuilder.js";
@@ -97,7 +97,7 @@ export class OpenAIProvider extends BaseProvider {
97
97
  model: this.model,
98
98
  messages: messages,
99
99
  temperature: options.temperature,
100
- maxTokens: options.maxTokens || DEFAULT_MAX_TOKENS,
100
+ maxTokens: options.maxTokens, // No default limit - unlimited unless specified
101
101
  tools,
102
102
  maxSteps: options.maxSteps || DEFAULT_MAX_STEPS,
103
103
  toolChoice: shouldUseTools ? "auto" : "none",
@@ -3,7 +3,6 @@ import { streamText } from "ai";
3
3
  import { BaseProvider } from "../core/baseProvider.js";
4
4
  import { logger } from "../utils/logger.js";
5
5
  import { createTimeoutController, TimeoutError } from "../utils/timeout.js";
6
- import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
7
6
  import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
8
7
  import { createProxyFetch } from "../proxy/proxyFetch.js";
9
8
  // Constants
@@ -163,7 +162,7 @@ export class OpenAICompatibleProvider extends BaseProvider {
163
162
  prompt: options.input.text,
164
163
  system: options.systemPrompt,
165
164
  temperature: options.temperature,
166
- maxTokens: options.maxTokens || DEFAULT_MAX_TOKENS,
165
+ maxTokens: options.maxTokens, // No default limit - unlimited unless specified
167
166
  tools: options.tools,
168
167
  toolChoice: "auto",
169
168
  abortSignal: timeoutController?.controller.signal,
@@ -207,12 +206,16 @@ export class OpenAICompatibleProvider extends BaseProvider {
207
206
  const modelsUrl = new URL("/v1/models", this.config.baseURL).toString();
208
207
  logger.debug(`Fetching available models from: ${modelsUrl}`);
209
208
  const proxyFetch = createProxyFetch();
209
+ const controller = new AbortController();
210
+ const t = setTimeout(() => controller.abort(), 5000);
210
211
  const response = await proxyFetch(modelsUrl, {
211
212
  headers: {
212
213
  Authorization: `Bearer ${this.config.apiKey}`,
213
214
  "Content-Type": "application/json",
214
215
  },
216
+ signal: controller.signal,
215
217
  });
218
+ clearTimeout(t);
216
219
  if (!response.ok) {
217
220
  logger.warn(`Models endpoint returned ${response.status}: ${response.statusText}`);
218
221
  return this.getFallbackModels();
@@ -9,6 +9,11 @@ import type { SageMakerConfig, SageMakerModelConfig } from "./types.js";
9
9
  import type { ConnectivityResult } from "../../types/typeAliases.js";
10
10
  /**
11
11
  * SageMaker Language Model implementing LanguageModelV1 interface
12
+ *
13
+ * Token Limit Behavior:
14
+ * - When maxTokens is undefined, SageMaker uses the model's default token limits
15
+ * - When maxTokens is specified, it sets max_new_tokens parameter explicitly
16
+ * - This aligns with the unlimited-by-default token policy across all providers
12
17
  */
13
18
  export declare class SageMakerLanguageModel implements LanguageModelV1 {
14
19
  readonly specificationVersion = "v1";
@@ -75,6 +75,11 @@ const DEFAULT_MAX_CONCURRENCY = 10;
75
75
  const DEFAULT_MIN_CONCURRENCY = 1;
76
76
  /**
77
77
  * SageMaker Language Model implementing LanguageModelV1 interface
78
+ *
79
+ * Token Limit Behavior:
80
+ * - When maxTokens is undefined, SageMaker uses the model's default token limits
81
+ * - When maxTokens is specified, it sets max_new_tokens parameter explicitly
82
+ * - This aligns with the unlimited-by-default token policy across all providers
78
83
  */
79
84
  export class SageMakerLanguageModel {
80
85
  specificationVersion = "v1";
@@ -345,7 +350,10 @@ export class SageMakerLanguageModel {
345
350
  const request = {
346
351
  inputs: promptText,
347
352
  parameters: {
348
- max_new_tokens: options.maxTokens || 512,
353
+ // Only include max_new_tokens if explicitly specified; let SageMaker use model defaults otherwise
354
+ ...(options.maxTokens !== undefined
355
+ ? { max_new_tokens: options.maxTokens }
356
+ : {}),
349
357
  temperature: options.temperature || 0.7,
350
358
  top_p: options.topP || 0.9,
351
359
  stop: options.stopSequences || [],
@@ -0,0 +1,27 @@
1
+ import { NeuroLink } from "../neurolink.js";
2
+ import type { ConversationMemoryConfig } from "../types/conversation.js";
3
+ type SessionVariableValue = string | number | boolean;
4
+ interface LoopSessionState {
5
+ neurolinkInstance: NeuroLink;
6
+ sessionId: string;
7
+ isActive: boolean;
8
+ conversationMemoryConfig?: ConversationMemoryConfig;
9
+ sessionVariables: Record<string, SessionVariableValue>;
10
+ }
11
+ export declare class GlobalSessionManager {
12
+ private static instance;
13
+ private loopSession;
14
+ static getInstance(): GlobalSessionManager;
15
+ setLoopSession(config?: ConversationMemoryConfig): string;
16
+ getLoopSession(): LoopSessionState | null;
17
+ clearLoopSession(): void;
18
+ getOrCreateNeuroLink(): NeuroLink;
19
+ getCurrentSessionId(): string | undefined;
20
+ setSessionVariable(key: string, value: SessionVariableValue): void;
21
+ getSessionVariable(key: string): SessionVariableValue | undefined;
22
+ getSessionVariables(): Record<string, SessionVariableValue>;
23
+ unsetSessionVariable(key: string): boolean;
24
+ clearSessionVariables(): void;
25
+ }
26
+ export declare const globalSession: GlobalSessionManager;
27
+ export {};
@@ -0,0 +1,77 @@
1
+ import { nanoid } from "nanoid";
2
+ import { NeuroLink } from "../neurolink.js";
3
+ export class GlobalSessionManager {
4
+ static instance;
5
+ loopSession = null;
6
+ static getInstance() {
7
+ if (!GlobalSessionManager.instance) {
8
+ GlobalSessionManager.instance = new GlobalSessionManager();
9
+ }
10
+ return GlobalSessionManager.instance;
11
+ }
12
+ setLoopSession(config) {
13
+ const sessionId = `NL_${nanoid()}`;
14
+ const neurolinkOptions = {};
15
+ if (config?.enabled) {
16
+ neurolinkOptions.conversationMemory = {
17
+ enabled: true,
18
+ maxSessions: config.maxSessions,
19
+ maxTurnsPerSession: config.maxTurnsPerSession,
20
+ };
21
+ }
22
+ this.loopSession = {
23
+ neurolinkInstance: new NeuroLink(neurolinkOptions),
24
+ sessionId,
25
+ isActive: true,
26
+ conversationMemoryConfig: config,
27
+ sessionVariables: {},
28
+ };
29
+ return sessionId;
30
+ }
31
+ getLoopSession() {
32
+ return this.loopSession?.isActive ? this.loopSession : null;
33
+ }
34
+ clearLoopSession() {
35
+ if (this.loopSession) {
36
+ this.loopSession.isActive = false;
37
+ this.loopSession = null;
38
+ }
39
+ }
40
+ getOrCreateNeuroLink() {
41
+ const session = this.getLoopSession();
42
+ return session ? session.neurolinkInstance : new NeuroLink();
43
+ }
44
+ getCurrentSessionId() {
45
+ return this.getLoopSession()?.sessionId;
46
+ }
47
+ // Session variable management
48
+ setSessionVariable(key, value) {
49
+ const session = this.getLoopSession();
50
+ if (session) {
51
+ session.sessionVariables[key] = value;
52
+ }
53
+ }
54
+ getSessionVariable(key) {
55
+ const session = this.getLoopSession();
56
+ return session?.sessionVariables[key];
57
+ }
58
+ getSessionVariables() {
59
+ const session = this.getLoopSession();
60
+ return session?.sessionVariables || {};
61
+ }
62
+ unsetSessionVariable(key) {
63
+ const session = this.getLoopSession();
64
+ if (session && key in session.sessionVariables) {
65
+ delete session.sessionVariables[key];
66
+ return true;
67
+ }
68
+ return false;
69
+ }
70
+ clearSessionVariables() {
71
+ const session = this.getLoopSession();
72
+ if (session) {
73
+ session.sessionVariables = {};
74
+ }
75
+ }
76
+ }
77
+ export const globalSession = GlobalSessionManager.getInstance();
@@ -103,3 +103,35 @@ export declare class ConversationMemoryError extends Error {
103
103
  details?: Record<string, unknown> | undefined;
104
104
  constructor(message: string, code: "STORAGE_ERROR" | "CONFIG_ERROR" | "SESSION_NOT_FOUND" | "CLEANUP_ERROR", details?: Record<string, unknown> | undefined);
105
105
  }
106
+ /**
107
+ * Session identifier for Redis storage operations
108
+ */
109
+ export type SessionIdentifier = {
110
+ sessionId: string;
111
+ userId?: string;
112
+ };
113
+ /**
114
+ * Redis storage configuration
115
+ */
116
+ export type RedisStorageConfig = {
117
+ /** Redis host (default: 'localhost') */
118
+ host?: string;
119
+ /** Redis port (default: 6379) */
120
+ port?: number;
121
+ /** Redis password (optional) */
122
+ password?: string;
123
+ /** Redis database number (default: 0) */
124
+ db?: number;
125
+ /** Key prefix for Redis keys (default: 'neurolink:conversation:') */
126
+ keyPrefix?: string;
127
+ /** Time-to-live in seconds (default: 86400, 24 hours) */
128
+ ttl?: number;
129
+ /** Additional Redis connection options */
130
+ connectionOptions?: {
131
+ connectTimeout?: number;
132
+ lazyConnect?: boolean;
133
+ retryDelayOnFailover?: number;
134
+ maxRetriesPerRequest?: number;
135
+ [key: string]: string | number | boolean | undefined;
136
+ };
137
+ };
@@ -3,7 +3,7 @@ import type { ValidationSchema, StandardRecord, ZodUnknownSchema } from "./typeA
3
3
  import type { AIProviderName } from "./providers.js";
4
4
  import type { AnalyticsData, TokenUsage } from "./analytics.js";
5
5
  import type { EvaluationData } from "./evaluation.js";
6
- import type { ChatMessage, ConversationMemoryConfig } from "./conversationTypes.js";
6
+ import type { ChatMessage, ConversationMemoryConfig } from "./conversation.js";
7
7
  import type { MiddlewareFactoryOptions } from "./middlewareTypes.js";
8
8
  import type { JsonValue } from "./common.js";
9
9
  /**
@@ -4,7 +4,7 @@ import type { AIProviderName, ProviderConfig } from "./providers.js";
4
4
  import type { AnalyticsData, TokenUsage } from "./analytics.js";
5
5
  import type { EvaluationData } from "./evaluation.js";
6
6
  import type { UnknownRecord, JsonValue } from "./common.js";
7
- import type { ChatMessage } from "./conversationTypes.js";
7
+ import type { ChatMessage } from "./conversation.js";
8
8
  import type { MiddlewareFactoryOptions } from "./middlewareTypes.js";
9
9
  /**
10
10
  * Progress tracking and metadata for streaming operations
@@ -0,0 +1,22 @@
1
+ /**
2
+ * Conversation Memory Utilities
3
+ * Handles configuration merging and conversation memory operations
4
+ */
5
+ import type { ConversationMemoryConfig, ChatMessage } from "../types/conversation.js";
6
+ import type { ConversationMemoryManager } from "../core/conversationMemoryManager.js";
7
+ import type { RedisConversationMemoryManager } from "../core/redisConversationMemoryManager.js";
8
+ import type { TextGenerationOptions, TextGenerationResult } from "../core/types.js";
9
+ /**
10
+ * Apply conversation memory defaults to user configuration
11
+ * Merges user config with environment variables and default values
12
+ */
13
+ export declare function applyConversationMemoryDefaults(userConfig?: Partial<ConversationMemoryConfig>): ConversationMemoryConfig;
14
+ /**
15
+ * Get conversation history as message array, summarizing if needed.
16
+ */
17
+ export declare function getConversationMessages(conversationMemory: ConversationMemoryManager | RedisConversationMemoryManager | null | undefined, options: TextGenerationOptions): Promise<ChatMessage[]>;
18
+ /**
19
+ * Store conversation turn for future context
20
+ * Saves user messages and AI responses for conversation memory
21
+ */
22
+ export declare function storeConversationTurn(conversationMemory: ConversationMemoryManager | RedisConversationMemoryManager | null | undefined, originalOptions: TextGenerationOptions, result: TextGenerationResult): Promise<void>;