@juspay/neurolink 8.3.0 → 8.4.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (123) hide show
  1. package/CHANGELOG.md +12 -0
  2. package/README.md +1 -0
  3. package/dist/adapters/providerImageAdapter.d.ts +1 -1
  4. package/dist/adapters/providerImageAdapter.js +62 -0
  5. package/dist/agent/directTools.d.ts +0 -72
  6. package/dist/agent/directTools.js +3 -74
  7. package/dist/cli/commands/config.d.ts +18 -18
  8. package/dist/cli/factories/commandFactory.js +1 -0
  9. package/dist/constants/enums.d.ts +1 -0
  10. package/dist/constants/enums.js +3 -1
  11. package/dist/constants/tokens.d.ts +3 -0
  12. package/dist/constants/tokens.js +3 -0
  13. package/dist/core/baseProvider.d.ts +56 -53
  14. package/dist/core/baseProvider.js +107 -1095
  15. package/dist/core/constants.d.ts +3 -0
  16. package/dist/core/constants.js +6 -3
  17. package/dist/core/modelConfiguration.js +10 -0
  18. package/dist/core/modules/GenerationHandler.d.ts +63 -0
  19. package/dist/core/modules/GenerationHandler.js +230 -0
  20. package/dist/core/modules/MessageBuilder.d.ts +39 -0
  21. package/dist/core/modules/MessageBuilder.js +179 -0
  22. package/dist/core/modules/StreamHandler.d.ts +52 -0
  23. package/dist/core/modules/StreamHandler.js +103 -0
  24. package/dist/core/modules/TelemetryHandler.d.ts +64 -0
  25. package/dist/core/modules/TelemetryHandler.js +170 -0
  26. package/dist/core/modules/ToolsManager.d.ts +98 -0
  27. package/dist/core/modules/ToolsManager.js +521 -0
  28. package/dist/core/modules/Utilities.d.ts +88 -0
  29. package/dist/core/modules/Utilities.js +329 -0
  30. package/dist/factories/providerRegistry.js +1 -1
  31. package/dist/lib/adapters/providerImageAdapter.d.ts +1 -1
  32. package/dist/lib/adapters/providerImageAdapter.js +62 -0
  33. package/dist/lib/agent/directTools.d.ts +0 -72
  34. package/dist/lib/agent/directTools.js +3 -74
  35. package/dist/lib/constants/enums.d.ts +1 -0
  36. package/dist/lib/constants/enums.js +3 -1
  37. package/dist/lib/constants/tokens.d.ts +3 -0
  38. package/dist/lib/constants/tokens.js +3 -0
  39. package/dist/lib/core/baseProvider.d.ts +56 -53
  40. package/dist/lib/core/baseProvider.js +107 -1095
  41. package/dist/lib/core/constants.d.ts +3 -0
  42. package/dist/lib/core/constants.js +6 -3
  43. package/dist/lib/core/modelConfiguration.js +10 -0
  44. package/dist/lib/core/modules/GenerationHandler.d.ts +63 -0
  45. package/dist/lib/core/modules/GenerationHandler.js +231 -0
  46. package/dist/lib/core/modules/MessageBuilder.d.ts +39 -0
  47. package/dist/lib/core/modules/MessageBuilder.js +180 -0
  48. package/dist/lib/core/modules/StreamHandler.d.ts +52 -0
  49. package/dist/lib/core/modules/StreamHandler.js +104 -0
  50. package/dist/lib/core/modules/TelemetryHandler.d.ts +64 -0
  51. package/dist/lib/core/modules/TelemetryHandler.js +171 -0
  52. package/dist/lib/core/modules/ToolsManager.d.ts +98 -0
  53. package/dist/lib/core/modules/ToolsManager.js +522 -0
  54. package/dist/lib/core/modules/Utilities.d.ts +88 -0
  55. package/dist/lib/core/modules/Utilities.js +330 -0
  56. package/dist/lib/factories/providerRegistry.js +1 -1
  57. package/dist/lib/mcp/servers/agent/directToolsServer.js +0 -1
  58. package/dist/lib/memory/mem0Initializer.d.ts +32 -1
  59. package/dist/lib/memory/mem0Initializer.js +55 -2
  60. package/dist/lib/models/modelRegistry.js +44 -0
  61. package/dist/lib/neurolink.d.ts +1 -1
  62. package/dist/lib/neurolink.js +43 -10
  63. package/dist/lib/providers/amazonBedrock.js +59 -10
  64. package/dist/lib/providers/anthropic.js +2 -30
  65. package/dist/lib/providers/azureOpenai.js +2 -24
  66. package/dist/lib/providers/googleAiStudio.js +2 -24
  67. package/dist/lib/providers/googleVertex.js +2 -45
  68. package/dist/lib/providers/huggingFace.js +3 -31
  69. package/dist/lib/providers/litellm.d.ts +1 -1
  70. package/dist/lib/providers/litellm.js +110 -44
  71. package/dist/lib/providers/mistral.js +5 -32
  72. package/dist/lib/providers/ollama.d.ts +1 -0
  73. package/dist/lib/providers/ollama.js +476 -129
  74. package/dist/lib/providers/openAI.js +2 -28
  75. package/dist/lib/providers/openaiCompatible.js +3 -31
  76. package/dist/lib/types/content.d.ts +16 -113
  77. package/dist/lib/types/content.js +16 -2
  78. package/dist/lib/types/conversation.d.ts +3 -17
  79. package/dist/lib/types/generateTypes.d.ts +2 -2
  80. package/dist/lib/types/index.d.ts +2 -0
  81. package/dist/lib/types/index.js +2 -0
  82. package/dist/lib/types/multimodal.d.ts +282 -0
  83. package/dist/lib/types/multimodal.js +101 -0
  84. package/dist/lib/types/streamTypes.d.ts +2 -2
  85. package/dist/lib/utils/imageProcessor.d.ts +1 -1
  86. package/dist/lib/utils/messageBuilder.js +25 -2
  87. package/dist/lib/utils/multimodalOptionsBuilder.d.ts +1 -1
  88. package/dist/lib/utils/pdfProcessor.d.ts +9 -0
  89. package/dist/lib/utils/pdfProcessor.js +67 -9
  90. package/dist/mcp/servers/agent/directToolsServer.js +0 -1
  91. package/dist/memory/mem0Initializer.d.ts +32 -1
  92. package/dist/memory/mem0Initializer.js +55 -2
  93. package/dist/models/modelRegistry.js +44 -0
  94. package/dist/neurolink.d.ts +1 -1
  95. package/dist/neurolink.js +43 -10
  96. package/dist/providers/amazonBedrock.js +59 -10
  97. package/dist/providers/anthropic.js +2 -30
  98. package/dist/providers/azureOpenai.js +2 -24
  99. package/dist/providers/googleAiStudio.js +2 -24
  100. package/dist/providers/googleVertex.js +2 -45
  101. package/dist/providers/huggingFace.js +3 -31
  102. package/dist/providers/litellm.d.ts +1 -1
  103. package/dist/providers/litellm.js +110 -44
  104. package/dist/providers/mistral.js +5 -32
  105. package/dist/providers/ollama.d.ts +1 -0
  106. package/dist/providers/ollama.js +476 -129
  107. package/dist/providers/openAI.js +2 -28
  108. package/dist/providers/openaiCompatible.js +3 -31
  109. package/dist/types/content.d.ts +16 -113
  110. package/dist/types/content.js +16 -2
  111. package/dist/types/conversation.d.ts +3 -17
  112. package/dist/types/generateTypes.d.ts +2 -2
  113. package/dist/types/index.d.ts +2 -0
  114. package/dist/types/index.js +2 -0
  115. package/dist/types/multimodal.d.ts +282 -0
  116. package/dist/types/multimodal.js +100 -0
  117. package/dist/types/streamTypes.d.ts +2 -2
  118. package/dist/utils/imageProcessor.d.ts +1 -1
  119. package/dist/utils/messageBuilder.js +25 -2
  120. package/dist/utils/multimodalOptionsBuilder.d.ts +1 -1
  121. package/dist/utils/pdfProcessor.d.ts +9 -0
  122. package/dist/utils/pdfProcessor.js +67 -9
  123. package/package.json +5 -2
@@ -48,6 +48,9 @@ export declare const PROVIDER_MAX_TOKENS: {
48
48
  azure: {
49
49
  default: number;
50
50
  };
51
+ mistral: {
52
+ default: number;
53
+ };
51
54
  ollama: {
52
55
  default: number;
53
56
  };
@@ -39,7 +39,7 @@ export const PROVIDER_MAX_TOKENS = {
39
39
  default: 64000,
40
40
  },
41
41
  openai: {
42
- default: 500000,
42
+ default: 128000,
43
43
  },
44
44
  "google-ai": {
45
45
  default: 64000,
@@ -51,13 +51,16 @@ export const PROVIDER_MAX_TOKENS = {
51
51
  default: 64000,
52
52
  },
53
53
  azure: {
54
- default: 32000,
54
+ default: 128000,
55
+ },
56
+ mistral: {
57
+ default: 128000,
55
58
  },
56
59
  ollama: {
57
60
  default: 64000,
58
61
  },
59
62
  litellm: {
60
- default: 500000,
63
+ default: 128000,
61
64
  },
62
65
  default: 64000,
63
66
  };
@@ -320,6 +320,16 @@ export class ModelConfigurationManager {
320
320
  MODEL_NAMES.OLLAMA.BALANCED,
321
321
  MODEL_NAMES.OLLAMA.QUALITY,
322
322
  ]),
323
+ toolCapableModels: this.getConfigArray("OLLAMA_TOOL_CAPABLE_MODELS", [
324
+ "llama3.1",
325
+ "mistral",
326
+ "hermes3",
327
+ "qwen2.5",
328
+ "codellama",
329
+ "dolphin",
330
+ "openchat",
331
+ "solar",
332
+ ]),
323
333
  specialHandling: this.getConfigObject("OLLAMA_SPECIAL_HANDLING", {
324
334
  baseUrl: this.getConfigValue("OLLAMA_BASE_URL", "http://localhost:11434"),
325
335
  }),
@@ -0,0 +1,63 @@
1
+ /**
2
+ * Generation Handler Module
3
+ *
4
+ * Handles text generation execution, result formatting, and tool information extraction.
5
+ * Extracted from BaseProvider to follow Single Responsibility Principle.
6
+ *
7
+ * Responsibilities:
8
+ * - Generation execution with AI SDK
9
+ * - Tool information extraction
10
+ * - Result formatting and enhancement
11
+ * - Response analysis and logging
12
+ *
13
+ * @module core/modules/GenerationHandler
14
+ */
15
+ import type { LanguageModelV1, CoreMessage, Tool } from "ai";
16
+ import { generateText } from "ai";
17
+ import type { TextGenerationOptions, EnhancedGenerateResult, AIProviderName, StandardRecord } from "../../types/index.js";
18
+ /**
19
+ * GenerationHandler class - Handles text generation operations for AI providers
20
+ */
21
+ export declare class GenerationHandler {
22
+ private readonly providerName;
23
+ private readonly modelName;
24
+ private readonly supportsToolsFn;
25
+ private readonly getTelemetryConfigFn;
26
+ private readonly handleToolStorageFn;
27
+ constructor(providerName: AIProviderName, modelName: string, supportsToolsFn: () => boolean, getTelemetryConfigFn: (options: TextGenerationOptions, type: string) => {
28
+ isEnabled: boolean;
29
+ functionId?: string;
30
+ metadata?: Record<string, string | number | boolean>;
31
+ } | undefined, handleToolStorageFn: (toolCalls: unknown[], toolResults: unknown[], options: TextGenerationOptions, timestamp: Date) => Promise<void>);
32
+ /**
33
+ * Execute the generation with AI SDK
34
+ */
35
+ executeGeneration(model: LanguageModelV1, messages: CoreMessage[], tools: Record<string, Tool>, options: TextGenerationOptions): Promise<Awaited<ReturnType<typeof generateText>>>;
36
+ /**
37
+ * Log generation completion information
38
+ */
39
+ logGenerationComplete(generateResult: Awaited<ReturnType<typeof generateText>>): void;
40
+ /**
41
+ * Extract tool information from generation result
42
+ */
43
+ extractToolInformation(generateResult: Awaited<ReturnType<typeof generateText>>): {
44
+ toolsUsed: string[];
45
+ toolExecutions: Array<{
46
+ name: string;
47
+ input: StandardRecord;
48
+ output: unknown;
49
+ }>;
50
+ };
51
+ /**
52
+ * Format the enhanced result
53
+ */
54
+ formatEnhancedResult(generateResult: Awaited<ReturnType<typeof generateText>>, tools: Record<string, Tool>, toolsUsed: string[], toolExecutions: Array<{
55
+ name: string;
56
+ input: StandardRecord;
57
+ output: unknown;
58
+ }>, options: TextGenerationOptions): EnhancedGenerateResult;
59
+ /**
60
+ * Analyze AI response structure and log detailed debugging information
61
+ */
62
+ analyzeAIResponse(result: Record<string, unknown>): void;
63
+ }
@@ -0,0 +1,230 @@
1
+ /**
2
+ * Generation Handler Module
3
+ *
4
+ * Handles text generation execution, result formatting, and tool information extraction.
5
+ * Extracted from BaseProvider to follow Single Responsibility Principle.
6
+ *
7
+ * Responsibilities:
8
+ * - Generation execution with AI SDK
9
+ * - Tool information extraction
10
+ * - Result formatting and enhancement
11
+ * - Response analysis and logging
12
+ *
13
+ * @module core/modules/GenerationHandler
14
+ */
15
+ import { generateText, Output } from "ai";
16
+ import { logger } from "../../utils/logger.js";
17
+ import { DEFAULT_MAX_STEPS } from "../constants.js";
18
+ /**
19
+ * GenerationHandler class - Handles text generation operations for AI providers
20
+ */
21
+ export class GenerationHandler {
22
+ providerName;
23
+ modelName;
24
+ supportsToolsFn;
25
+ getTelemetryConfigFn;
26
+ handleToolStorageFn;
27
+ constructor(providerName, modelName, supportsToolsFn, getTelemetryConfigFn, handleToolStorageFn) {
28
+ this.providerName = providerName;
29
+ this.modelName = modelName;
30
+ this.supportsToolsFn = supportsToolsFn;
31
+ this.getTelemetryConfigFn = getTelemetryConfigFn;
32
+ this.handleToolStorageFn = handleToolStorageFn;
33
+ }
34
+ /**
35
+ * Execute the generation with AI SDK
36
+ */
37
+ async executeGeneration(model, messages, tools, options) {
38
+ const shouldUseTools = !options.disableTools && this.supportsToolsFn();
39
+ const useStructuredOutput = !!options.schema &&
40
+ (options.output?.format === "json" ||
41
+ options.output?.format === "structured");
42
+ return await generateText({
43
+ model,
44
+ messages,
45
+ tools,
46
+ maxSteps: options.maxSteps || DEFAULT_MAX_STEPS,
47
+ toolChoice: shouldUseTools ? "auto" : "none",
48
+ temperature: options.temperature,
49
+ maxTokens: options.maxTokens,
50
+ ...(useStructuredOutput &&
51
+ options.schema && {
52
+ experimental_output: Output.object({ schema: options.schema }),
53
+ }),
54
+ experimental_telemetry: this.getTelemetryConfigFn(options, "generate"),
55
+ onStepFinish: ({ toolCalls, toolResults }) => {
56
+ logger.info("Tool execution completed", { toolResults, toolCalls });
57
+ // Handle tool execution storage
58
+ this.handleToolStorageFn(toolCalls, toolResults, options, new Date()).catch((error) => {
59
+ logger.warn("[GenerationHandler] Failed to store tool executions", {
60
+ provider: this.providerName,
61
+ error: error instanceof Error ? error.message : String(error),
62
+ });
63
+ });
64
+ },
65
+ });
66
+ }
67
+ /**
68
+ * Log generation completion information
69
+ */
70
+ logGenerationComplete(generateResult) {
71
+ logger.debug(`generateText completed`, {
72
+ provider: this.providerName,
73
+ model: this.modelName,
74
+ responseLength: generateResult.text?.length || 0,
75
+ toolResultsCount: generateResult.toolResults?.length || 0,
76
+ finishReason: generateResult.finishReason,
77
+ usage: generateResult.usage,
78
+ timestamp: Date.now(),
79
+ });
80
+ }
81
+ /**
82
+ * Extract tool information from generation result
83
+ */
84
+ extractToolInformation(generateResult) {
85
+ const toolsUsed = [];
86
+ const toolExecutions = [];
87
+ // Extract tool names from tool calls
88
+ if (generateResult.toolCalls && generateResult.toolCalls.length > 0) {
89
+ toolsUsed.push(...generateResult.toolCalls.map((tc) => {
90
+ return tc.toolName || tc.name || "unknown";
91
+ }));
92
+ }
93
+ // Extract from steps
94
+ if (generateResult.steps &&
95
+ Array.isArray(generateResult.steps)) {
96
+ const toolCallArgsMap = new Map();
97
+ for (const step of generateResult
98
+ .steps || []) {
99
+ // Collect tool calls and their arguments
100
+ if (step?.toolCalls && Array.isArray(step.toolCalls)) {
101
+ for (const toolCall of step.toolCalls) {
102
+ const tcRecord = toolCall;
103
+ const toolName = tcRecord.toolName ||
104
+ tcRecord.name ||
105
+ "unknown";
106
+ const toolId = tcRecord.toolCallId ||
107
+ tcRecord.id ||
108
+ toolName;
109
+ toolsUsed.push(toolName);
110
+ let callArgs = {};
111
+ if (tcRecord.args) {
112
+ callArgs = tcRecord.args;
113
+ }
114
+ else if (tcRecord.arguments) {
115
+ callArgs = tcRecord.arguments;
116
+ }
117
+ else if (tcRecord.parameters) {
118
+ callArgs = tcRecord.parameters;
119
+ }
120
+ toolCallArgsMap.set(toolId, callArgs);
121
+ toolCallArgsMap.set(toolName, callArgs);
122
+ }
123
+ }
124
+ // Process tool results
125
+ if (step?.toolResults && Array.isArray(step.toolResults)) {
126
+ for (const toolResult of step.toolResults) {
127
+ const trRecord = toolResult;
128
+ const toolName = trRecord.toolName || "unknown";
129
+ const toolId = trRecord.toolCallId || trRecord.id;
130
+ let toolArgs = {};
131
+ if (trRecord.args) {
132
+ toolArgs = trRecord.args;
133
+ }
134
+ else if (trRecord.arguments) {
135
+ toolArgs = trRecord.arguments;
136
+ }
137
+ else if (trRecord.parameters) {
138
+ toolArgs = trRecord.parameters;
139
+ }
140
+ else if (trRecord.input) {
141
+ toolArgs = trRecord.input;
142
+ }
143
+ else {
144
+ toolArgs = toolCallArgsMap.get(toolId || toolName) || {};
145
+ }
146
+ toolExecutions.push({
147
+ name: toolName,
148
+ input: toolArgs,
149
+ output: trRecord.result ?? "success",
150
+ });
151
+ }
152
+ }
153
+ }
154
+ }
155
+ return { toolsUsed: [...new Set(toolsUsed)], toolExecutions };
156
+ }
157
+ /**
158
+ * Format the enhanced result
159
+ */
160
+ formatEnhancedResult(generateResult, tools, toolsUsed, toolExecutions, options) {
161
+ // Structured output check
162
+ const useStructuredOutput = !!options.schema &&
163
+ (options.output?.format === "json" ||
164
+ options.output?.format === "structured");
165
+ const content = useStructuredOutput
166
+ ? JSON.stringify(generateResult.experimental_output)
167
+ : generateResult.text;
168
+ return {
169
+ content,
170
+ usage: {
171
+ input: generateResult.usage?.promptTokens || 0,
172
+ output: generateResult.usage?.completionTokens || 0,
173
+ total: generateResult.usage?.totalTokens || 0,
174
+ },
175
+ provider: this.providerName,
176
+ model: this.modelName,
177
+ toolCalls: generateResult.toolCalls
178
+ ? generateResult.toolCalls.map((tc) => ({
179
+ toolCallId: tc.toolCallId || "unknown",
180
+ toolName: tc.toolName || "unknown",
181
+ args: tc.args || {},
182
+ }))
183
+ : [],
184
+ toolResults: generateResult.toolResults || [],
185
+ toolsUsed,
186
+ toolExecutions,
187
+ availableTools: Object.keys(tools).map((name) => {
188
+ const tool = tools[name];
189
+ return {
190
+ name,
191
+ description: tool.description || "No description available",
192
+ parameters: tool.parameters || {},
193
+ server: tool.serverId || "direct",
194
+ };
195
+ }),
196
+ };
197
+ }
198
+ /**
199
+ * Analyze AI response structure and log detailed debugging information
200
+ */
201
+ analyzeAIResponse(result) {
202
+ logger.debug("NeuroLink Raw AI Response Analysis", {
203
+ provider: this.providerName,
204
+ model: this.modelName,
205
+ responseTextLength: result.text?.length || 0,
206
+ responsePreview: result.text?.substring(0, 500) + "...",
207
+ finishReason: result.finishReason,
208
+ usage: result.usage,
209
+ });
210
+ // Tool calls analysis
211
+ const toolCallsAnalysis = {
212
+ hasToolCalls: !!result.toolCalls,
213
+ toolCallsLength: result.toolCalls?.length || 0,
214
+ toolCalls: result.toolCalls?.map((toolCall, index) => {
215
+ const tcRecord = toolCall;
216
+ const toolName = tcRecord.toolName || tcRecord.name || "unknown";
217
+ return {
218
+ index: index + 1,
219
+ toolName,
220
+ toolId: tcRecord.toolCallId || tcRecord.id || "none",
221
+ hasArgs: !!tcRecord.args,
222
+ argsKeys: tcRecord.args && typeof tcRecord.args === "object"
223
+ ? Object.keys(tcRecord.args)
224
+ : [],
225
+ };
226
+ }) || [],
227
+ };
228
+ logger.debug("Tool Calls Analysis", toolCallsAnalysis);
229
+ }
230
+ }
@@ -0,0 +1,39 @@
1
+ /**
2
+ * Message Builder Module
3
+ *
4
+ * Handles all message construction logic for AI providers.
5
+ * Extracted from BaseProvider to follow Single Responsibility Principle.
6
+ *
7
+ * Responsibilities:
8
+ * - Building messages from text generation options
9
+ * - Building messages from stream options
10
+ * - Multimodal input detection
11
+ * - Message format conversion (to CoreMessage[])
12
+ *
13
+ * @module core/modules/MessageBuilder
14
+ */
15
+ import type { CoreMessage } from "ai";
16
+ import type { TextGenerationOptions, AIProviderName } from "../../types/index.js";
17
+ import type { StreamOptions } from "../../types/streamTypes.js";
18
+ /**
19
+ * MessageBuilder class - Handles message construction for AI providers
20
+ */
21
+ export declare class MessageBuilder {
22
+ private readonly providerName;
23
+ private readonly modelName;
24
+ constructor(providerName: AIProviderName, modelName: string);
25
+ /**
26
+ * Build messages array for generation
27
+ * Detects multimodal input and routes to appropriate message builder
28
+ */
29
+ buildMessages(options: TextGenerationOptions): Promise<CoreMessage[]>;
30
+ /**
31
+ * Build messages array for streaming operations
32
+ * This is a protected helper method that providers can use to build messages
33
+ * with automatic multimodal detection, eliminating code duplication
34
+ *
35
+ * @param options - Stream options or text generation options
36
+ * @returns Promise resolving to CoreMessage array ready for AI SDK
37
+ */
38
+ buildMessagesForStream(options: StreamOptions | TextGenerationOptions): Promise<CoreMessage[]>;
39
+ }
@@ -0,0 +1,179 @@
1
+ /**
2
+ * Message Builder Module
3
+ *
4
+ * Handles all message construction logic for AI providers.
5
+ * Extracted from BaseProvider to follow Single Responsibility Principle.
6
+ *
7
+ * Responsibilities:
8
+ * - Building messages from text generation options
9
+ * - Building messages from stream options
10
+ * - Multimodal input detection
11
+ * - Message format conversion (to CoreMessage[])
12
+ *
13
+ * @module core/modules/MessageBuilder
14
+ */
15
+ import { logger } from "../../utils/logger.js";
16
+ import { buildMessagesArray, buildMultimodalMessagesArray, } from "../../utils/messageBuilder.js";
17
+ /**
18
+ * MessageBuilder class - Handles message construction for AI providers
19
+ */
20
+ export class MessageBuilder {
21
+ providerName;
22
+ modelName;
23
+ constructor(providerName, modelName) {
24
+ this.providerName = providerName;
25
+ this.modelName = modelName;
26
+ }
27
+ /**
28
+ * Build messages array for generation
29
+ * Detects multimodal input and routes to appropriate message builder
30
+ */
31
+ async buildMessages(options) {
32
+ const hasMultimodalInput = (opts) => {
33
+ const input = opts.input;
34
+ const hasImages = !!input?.images?.length;
35
+ const hasContent = !!input?.content?.length;
36
+ const hasCSVFiles = !!input?.csvFiles?.length;
37
+ const hasPdfFiles = !!input?.pdfFiles?.length;
38
+ const hasFiles = !!input?.files?.length;
39
+ return hasImages || hasContent || hasCSVFiles || hasPdfFiles || hasFiles;
40
+ };
41
+ let messages;
42
+ if (hasMultimodalInput(options)) {
43
+ if (process.env.NEUROLINK_DEBUG === "true") {
44
+ logger.debug("Detected multimodal input, using multimodal message builder");
45
+ }
46
+ const input = options.input;
47
+ const multimodalOptions = {
48
+ input: {
49
+ text: options.prompt || options.input?.text || "",
50
+ images: input?.images,
51
+ content: input?.content,
52
+ csvFiles: input?.csvFiles,
53
+ pdfFiles: input?.pdfFiles,
54
+ files: input?.files,
55
+ },
56
+ csvOptions: options.csvOptions,
57
+ provider: options.provider,
58
+ model: options.model,
59
+ temperature: options.temperature,
60
+ maxTokens: options.maxTokens,
61
+ systemPrompt: options.systemPrompt,
62
+ enableAnalytics: options.enableAnalytics,
63
+ enableEvaluation: options.enableEvaluation,
64
+ context: options.context,
65
+ conversationHistory: options.conversationMessages,
66
+ };
67
+ messages = await buildMultimodalMessagesArray(multimodalOptions, this.providerName, this.modelName);
68
+ }
69
+ else {
70
+ if (process.env.NEUROLINK_DEBUG === "true") {
71
+ logger.debug("No multimodal input detected, using standard message builder");
72
+ }
73
+ messages = await buildMessagesArray(options);
74
+ }
75
+ // Convert messages to Vercel AI SDK format
76
+ return messages.map((msg) => {
77
+ if (typeof msg.content === "string") {
78
+ return {
79
+ role: msg.role,
80
+ content: msg.content,
81
+ };
82
+ }
83
+ else {
84
+ return {
85
+ role: msg.role,
86
+ content: msg.content.map((item) => {
87
+ if (item.type === "text") {
88
+ return { type: "text", text: item.text || "" };
89
+ }
90
+ else if (item.type === "image") {
91
+ return { type: "image", image: item.image || "" };
92
+ }
93
+ return item;
94
+ }),
95
+ };
96
+ }
97
+ });
98
+ }
99
+ /**
100
+ * Build messages array for streaming operations
101
+ * This is a protected helper method that providers can use to build messages
102
+ * with automatic multimodal detection, eliminating code duplication
103
+ *
104
+ * @param options - Stream options or text generation options
105
+ * @returns Promise resolving to CoreMessage array ready for AI SDK
106
+ */
107
+ async buildMessagesForStream(options) {
108
+ // Detect multimodal input
109
+ const hasMultimodalInput = (opts) => {
110
+ const input = opts.input;
111
+ const hasImages = !!input?.images?.length;
112
+ const hasContent = !!input?.content?.length;
113
+ const hasCSVFiles = !!input?.csvFiles?.length;
114
+ const hasPdfFiles = !!input?.pdfFiles?.length;
115
+ const hasFiles = !!input?.files?.length;
116
+ return hasImages || hasContent || hasCSVFiles || hasPdfFiles || hasFiles;
117
+ };
118
+ let messages;
119
+ if (hasMultimodalInput(options)) {
120
+ if (process.env.NEUROLINK_DEBUG === "true") {
121
+ logger.debug(`${this.providerName}: Detected multimodal input, using multimodal message builder`);
122
+ }
123
+ const input = options.input;
124
+ const multimodalOptions = {
125
+ input: {
126
+ text: options.prompt ||
127
+ options.input?.text ||
128
+ "",
129
+ images: input?.images,
130
+ content: input?.content,
131
+ csvFiles: input?.csvFiles,
132
+ pdfFiles: input?.pdfFiles,
133
+ files: input?.files,
134
+ },
135
+ csvOptions: options.csvOptions,
136
+ provider: options.provider,
137
+ model: options.model,
138
+ temperature: options.temperature,
139
+ maxTokens: options.maxTokens,
140
+ systemPrompt: options.systemPrompt,
141
+ enableAnalytics: options.enableAnalytics,
142
+ enableEvaluation: options.enableEvaluation,
143
+ context: options.context,
144
+ conversationHistory: options
145
+ .conversationMessages,
146
+ };
147
+ messages = await buildMultimodalMessagesArray(multimodalOptions, this.providerName, this.modelName);
148
+ }
149
+ else {
150
+ if (process.env.NEUROLINK_DEBUG === "true") {
151
+ logger.debug(`${this.providerName}: No multimodal input detected, using standard message builder`);
152
+ }
153
+ messages = await buildMessagesArray(options);
154
+ }
155
+ // Convert messages to Vercel AI SDK format
156
+ return messages.map((msg) => {
157
+ if (typeof msg.content === "string") {
158
+ return {
159
+ role: msg.role,
160
+ content: msg.content,
161
+ };
162
+ }
163
+ else {
164
+ return {
165
+ role: msg.role,
166
+ content: msg.content.map((item) => {
167
+ if (item.type === "text") {
168
+ return { type: "text", text: item.text || "" };
169
+ }
170
+ else if (item.type === "image") {
171
+ return { type: "image", image: item.image || "" };
172
+ }
173
+ return item;
174
+ }),
175
+ };
176
+ }
177
+ });
178
+ }
179
+ }
@@ -0,0 +1,52 @@
1
+ /**
2
+ * Stream Handler Module
3
+ *
4
+ * Handles streaming-related validation, result creation, and analytics.
5
+ * Extracted from BaseProvider to follow Single Responsibility Principle.
6
+ *
7
+ * Responsibilities:
8
+ * - Stream options validation
9
+ * - Text stream creation
10
+ * - Stream result formatting
11
+ * - Stream analytics creation
12
+ *
13
+ * @module core/modules/StreamHandler
14
+ */
15
+ import type { StreamOptions, StreamResult } from "../../types/streamTypes.js";
16
+ import type { UnknownRecord } from "../../types/common.js";
17
+ import type { AIProviderName } from "../../types/index.js";
18
+ /**
19
+ * StreamHandler class - Handles streaming operations for AI providers
20
+ */
21
+ export declare class StreamHandler {
22
+ private readonly providerName;
23
+ private readonly modelName;
24
+ constructor(providerName: AIProviderName, modelName: string);
25
+ /**
26
+ * Validate stream options - consolidates validation from 7/10 providers
27
+ */
28
+ validateStreamOptions(options: StreamOptions): void;
29
+ /**
30
+ * Create text stream transformation - consolidates identical logic from 7/10 providers
31
+ */
32
+ createTextStream(result: {
33
+ textStream: AsyncIterable<string>;
34
+ }): AsyncGenerator<{
35
+ content: string;
36
+ }>;
37
+ /**
38
+ * Create standardized stream result - consolidates result structure
39
+ */
40
+ createStreamResult(stream: AsyncGenerator<{
41
+ content: string;
42
+ }>, additionalProps?: Partial<StreamResult>): StreamResult;
43
+ /**
44
+ * Create stream analytics - consolidates analytics from 4/10 providers
45
+ */
46
+ createStreamAnalytics(result: UnknownRecord, startTime: number, options: StreamOptions): Promise<UnknownRecord | undefined>;
47
+ /**
48
+ * Validate streaming-only options (called before executeStream)
49
+ * Simpler validation for options object structure
50
+ */
51
+ validateStreamOptionsOnly(options: StreamOptions): void;
52
+ }