@juspay/neurolink 8.19.0 → 8.20.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. package/CHANGELOG.md +12 -0
  2. package/dist/adapters/providerImageAdapter.d.ts +12 -0
  3. package/dist/adapters/providerImageAdapter.js +30 -3
  4. package/dist/cli/loop/optionsSchema.js +4 -0
  5. package/dist/config/conversationMemory.d.ts +17 -1
  6. package/dist/config/conversationMemory.js +37 -10
  7. package/dist/core/baseProvider.js +23 -13
  8. package/dist/core/conversationMemoryFactory.js +0 -3
  9. package/dist/core/conversationMemoryInitializer.js +1 -9
  10. package/dist/core/conversationMemoryManager.d.ts +31 -8
  11. package/dist/core/conversationMemoryManager.js +174 -80
  12. package/dist/core/modules/GenerationHandler.d.ts +5 -0
  13. package/dist/core/modules/GenerationHandler.js +56 -9
  14. package/dist/core/redisConversationMemoryManager.d.ts +28 -13
  15. package/dist/core/redisConversationMemoryManager.js +211 -121
  16. package/dist/lib/adapters/providerImageAdapter.d.ts +12 -0
  17. package/dist/lib/adapters/providerImageAdapter.js +30 -3
  18. package/dist/lib/config/conversationMemory.d.ts +17 -1
  19. package/dist/lib/config/conversationMemory.js +37 -10
  20. package/dist/lib/core/baseProvider.js +23 -13
  21. package/dist/lib/core/conversationMemoryFactory.js +0 -3
  22. package/dist/lib/core/conversationMemoryInitializer.js +1 -9
  23. package/dist/lib/core/conversationMemoryManager.d.ts +31 -8
  24. package/dist/lib/core/conversationMemoryManager.js +174 -80
  25. package/dist/lib/core/modules/GenerationHandler.d.ts +5 -0
  26. package/dist/lib/core/modules/GenerationHandler.js +56 -9
  27. package/dist/lib/core/redisConversationMemoryManager.d.ts +28 -13
  28. package/dist/lib/core/redisConversationMemoryManager.js +211 -121
  29. package/dist/lib/mcp/servers/agent/directToolsServer.js +5 -0
  30. package/dist/lib/mcp/toolRegistry.js +5 -0
  31. package/dist/lib/neurolink.js +29 -22
  32. package/dist/lib/types/conversation.d.ts +58 -9
  33. package/dist/lib/types/generateTypes.d.ts +1 -0
  34. package/dist/lib/types/sdkTypes.d.ts +1 -1
  35. package/dist/lib/types/streamTypes.d.ts +1 -0
  36. package/dist/lib/utils/conversationMemory.d.ts +43 -1
  37. package/dist/lib/utils/conversationMemory.js +181 -5
  38. package/dist/lib/utils/conversationMemoryUtils.js +16 -1
  39. package/dist/lib/utils/fileDetector.d.ts +25 -0
  40. package/dist/lib/utils/fileDetector.js +433 -10
  41. package/dist/lib/utils/messageBuilder.js +6 -2
  42. package/dist/lib/utils/redis.js +0 -5
  43. package/dist/mcp/servers/agent/directToolsServer.js +5 -0
  44. package/dist/mcp/toolRegistry.js +5 -0
  45. package/dist/neurolink.js +29 -22
  46. package/dist/types/conversation.d.ts +58 -9
  47. package/dist/types/generateTypes.d.ts +1 -0
  48. package/dist/types/sdkTypes.d.ts +1 -1
  49. package/dist/types/streamTypes.d.ts +1 -0
  50. package/dist/utils/conversationMemory.d.ts +43 -1
  51. package/dist/utils/conversationMemory.js +181 -5
  52. package/dist/utils/conversationMemoryUtils.js +16 -1
  53. package/dist/utils/fileDetector.d.ts +25 -0
  54. package/dist/utils/fileDetector.js +433 -10
  55. package/dist/utils/messageBuilder.js +6 -2
  56. package/dist/utils/redis.js +0 -5
  57. package/package.json +1 -1
@@ -11,14 +11,10 @@ export type ConversationMemoryConfig = {
11
11
  enabled: boolean;
12
12
  /** Maximum number of sessions to keep in memory (default: 50) */
13
13
  maxSessions?: number;
14
- /** Maximum number of conversation turns to keep per session (default: 20) */
15
- maxTurnsPerSession?: number;
16
14
  /** Enable automatic summarization */
17
15
  enableSummarization?: boolean;
18
- /** Turn count to trigger summarization */
19
- summarizationThresholdTurns?: number;
20
- /** Target turn count for the summary */
21
- summarizationTargetTurns?: number;
16
+ /** Token threshold to trigger summarization (optional - defaults to 80% of model context) */
17
+ tokenThreshold?: number;
22
18
  /** Provider to use for summarization */
23
19
  summarizationProvider?: string;
24
20
  /** Model to use for summarization */
@@ -29,6 +25,12 @@ export type ConversationMemoryConfig = {
29
25
  mem0Config?: Mem0Config;
30
26
  /** Redis configuration (optional) - overrides environment variables */
31
27
  redisConfig?: RedisStorageConfig;
28
+ /** @deprecated Use tokenThreshold instead - Maximum number of conversation turns to keep per session (default: 20) */
29
+ maxTurnsPerSession?: number;
30
+ /** @deprecated Use tokenThreshold instead - Turn count to trigger summarization */
31
+ summarizationThresholdTurns?: number;
32
+ /** @deprecated Use tokenThreshold instead - Target turn count for the summary */
33
+ summarizationTargetTurns?: number;
32
34
  };
33
35
  /**
34
36
  * Complete memory for a conversation session
@@ -47,6 +49,16 @@ export type SessionMemory = {
47
49
  createdAt: number;
48
50
  /** When this session was last active */
49
51
  lastActivity: number;
52
+ /** Pointer to last summarized message ID (NEW - for token-based memory) */
53
+ summarizedUpToMessageId?: string;
54
+ /** Stored summary message that condenses conversation history up to summarizedUpToMessageId */
55
+ summarizedMessage?: string;
56
+ /** Per-session token threshold override (NEW - for token-based memory) */
57
+ tokenThreshold?: number;
58
+ /** Cached token count for performance (NEW - for token-based memory) */
59
+ lastTokenCount?: number;
60
+ /** When token count was last calculated (NEW - for token-based memory) */
61
+ lastCountedAt?: number;
50
62
  /** Optional session metadata */
51
63
  metadata?: {
52
64
  /** User role or permissions */
@@ -70,13 +82,13 @@ export type ConversationMemoryStats = {
70
82
  * Chat message format for conversation history
71
83
  */
72
84
  export type ChatMessage = {
85
+ /** Unique message identifier (required for token-based memory) */
86
+ id: string;
73
87
  /** Role/type of the message */
74
88
  role: "user" | "assistant" | "system" | "tool_call" | "tool_result";
75
89
  /** Content of the message */
76
90
  content: string;
77
- /** Message ID (optional) - for new format */
78
- id?: string;
79
- /** Timestamp (optional) - for new format */
91
+ /** Timestamp (ISO string) */
80
92
  timestamp?: string;
81
93
  /** Tool name (optional) - for tool_call/tool_result messages */
82
94
  tool?: string;
@@ -90,6 +102,17 @@ export type ChatMessage = {
90
102
  type?: string;
91
103
  error?: string;
92
104
  };
105
+ /** Message metadata (NEW - for token-based memory) */
106
+ metadata?: {
107
+ /** Is this a summary message? */
108
+ isSummary?: boolean;
109
+ /** First message ID that this summary covers */
110
+ summarizesFrom?: string;
111
+ /** Last message ID that this summary covers */
112
+ summarizesTo?: string;
113
+ /** Was this message truncated due to token limits? */
114
+ truncated?: boolean;
115
+ };
93
116
  };
94
117
  /**
95
118
  * Multimodal message types - Re-exported from multimodal.ts
@@ -152,6 +175,18 @@ export type SessionIdentifier = {
152
175
  sessionId: string;
153
176
  userId?: string;
154
177
  };
178
+ /**
179
+ * Options for storing a conversation turn
180
+ */
181
+ export type StoreConversationTurnOptions = {
182
+ sessionId: string;
183
+ userId?: string;
184
+ userMessage: string;
185
+ aiResponse: string;
186
+ startTimeStamp?: Date;
187
+ providerDetails?: ProviderDetails;
188
+ enableSummarization?: boolean;
189
+ };
155
190
  /**
156
191
  * Lightweight session metadata for efficient session listing
157
192
  * Contains only essential information without heavy message arrays
@@ -179,6 +214,16 @@ export type ConversationBase = {
179
214
  createdAt: string;
180
215
  /** When this conversation was last updated */
181
216
  updatedAt: string;
217
+ /** Pointer to last summarized message (token-based memory) */
218
+ summarizedUpToMessageId?: string;
219
+ /** Stored summary message that condenses conversation history up to summarizedUpToMessageId */
220
+ summarizedMessage?: string;
221
+ /** Per-session token threshold override */
222
+ tokenThreshold?: number;
223
+ /** Cached token count for efficiency */
224
+ lastTokenCount?: number;
225
+ /** Timestamp of last token count */
226
+ lastCountedAt?: number;
182
227
  };
183
228
  /**
184
229
  * Redis conversation storage object format
@@ -250,3 +295,7 @@ export type RedisStorageConfig = {
250
295
  [key: string]: string | number | boolean | undefined;
251
296
  };
252
297
  };
298
+ export type ProviderDetails = {
299
+ provider: string;
300
+ model: string;
301
+ };
@@ -350,6 +350,7 @@ export type TextGenerationOptions = {
350
350
  formatStyle?: "raw" | "markdown" | "json";
351
351
  includeHeaders?: boolean;
352
352
  };
353
+ enableSummarization?: boolean;
353
354
  };
354
355
  /**
355
356
  * Text generation result (consolidated from core types)
@@ -22,4 +22,4 @@ export type { Unknown, UnknownRecord, UnknownArray, JsonValue, JsonObject, JsonA
22
22
  export type { EvaluationData, EvaluationContext, EnhancedEvaluationResult, EvaluationRequest, EvaluationCriteria, } from "./evaluation.js";
23
23
  export type { TaskType, TaskClassification, ClassificationScores, ClassificationStats, ClassificationValidation, } from "./taskClassificationTypes.js";
24
24
  export type { DomainType, DomainConfig, DomainTemplate, DomainConfigOptions, DomainEvaluationCriteria, DomainValidationRule, } from "./domainTypes.js";
25
- export type { ConversationMemoryConfig, SessionMemory, ConversationMemoryStats, ChatMessage, MessageContent, MultimodalChatMessage, ConversationMemoryEvents, ConversationMemoryError, SessionIdentifier, SessionMetadata, RedisConversationObject, RedisStorageConfig, } from "./conversation.js";
25
+ export type { ConversationMemoryConfig, SessionMemory, ChatMessage, MessageContent, MultimodalChatMessage, ConversationMemoryEvents, ConversationMemoryError, SessionIdentifier, SessionMetadata, RedisConversationObject, RedisStorageConfig, } from "./conversation.js";
@@ -299,6 +299,7 @@ export type StreamOptions = {
299
299
  };
300
300
  conversationMessages?: ChatMessage[];
301
301
  middleware?: MiddlewareFactoryOptions;
302
+ enableSummarization?: boolean;
302
303
  };
303
304
  /**
304
305
  * Stream function result type - Primary output format for streaming
@@ -2,7 +2,7 @@
2
2
  * Conversation Memory Utilities
3
3
  * Handles configuration merging and conversation memory operations
4
4
  */
5
- import type { ConversationMemoryConfig, ChatMessage } from "../types/conversation.js";
5
+ import type { ConversationMemoryConfig, ChatMessage, SessionMemory } from "../types/conversation.js";
6
6
  import type { ConversationMemoryManager } from "../core/conversationMemoryManager.js";
7
7
  import type { RedisConversationMemoryManager } from "../core/redisConversationMemoryManager.js";
8
8
  import type { TextGenerationOptions, TextGenerationResult } from "../types/generateTypes.js";
@@ -20,3 +20,45 @@ export declare function getConversationMessages(conversationMemory: Conversation
20
20
  * Saves user messages and AI responses for conversation memory
21
21
  */
22
22
  export declare function storeConversationTurn(conversationMemory: ConversationMemoryManager | RedisConversationMemoryManager | null | undefined, originalOptions: TextGenerationOptions, result: TextGenerationResult, startTimeStamp?: Date | undefined): Promise<void>;
23
+ /**
24
+ * Build context messages from pointer onwards (token-based memory)
25
+ * Returns summary message (if exists) + all messages after the summarized pointer
26
+ * @param session - Session memory with pointer
27
+ * @returns Context messages to send to LLM
28
+ */
29
+ export declare function buildContextFromPointer(session: SessionMemory): ChatMessage[];
30
+ /**
31
+ * Create summarization prompt from message history
32
+ * Used by both in-memory and Redis conversation managers
33
+ * @param history - Messages to summarize
34
+ * @param previousSummary - Optional previous summary to build upon
35
+ */
36
+ export declare function createSummarizationPrompt(history: ChatMessage[], previousSummary?: string): string;
37
+ /**
38
+ * Calculate token threshold based on model's output token limit
39
+ * Uses existing provider token limits as proxy for context window
40
+ * @param provider - AI provider name
41
+ * @param model - Model name
42
+ * @returns Token threshold (80% of model's token limit)
43
+ */
44
+ export declare function calculateTokenThreshold(provider: string, model: string): number;
45
+ /**
46
+ * Get effective token threshold for a session
47
+ * Priority: session override > env var > model-based (80%) > fallback
48
+ * @param provider - AI provider name
49
+ * @param model - Model name
50
+ * @param envOverride - Environment variable override
51
+ * @param sessionOverride - Per-session token threshold override
52
+ * @returns Effective token threshold
53
+ */
54
+ export declare function getEffectiveTokenThreshold(provider: string, model: string, envOverride?: number, sessionOverride?: number): number;
55
+ /**
56
+ * Generate summary using configured provider and model
57
+ * Centralized summarization logic used by both ConversationMemoryManager and RedisConversationMemoryManager
58
+ * @param messages - Messages to summarize
59
+ * @param config - Conversation memory configuration containing provider/model settings
60
+ * @param previousSummary - Optional previous summary to build upon
61
+ * @param logPrefix - Prefix for log messages (e.g., "[ConversationMemory]" or "[RedisConversationMemoryManager]")
62
+ * @returns Summary text or null if generation fails
63
+ */
64
+ export declare function generateSummary(messages: ChatMessage[], config: ConversationMemoryConfig, logPrefix?: string, previousSummary?: string): Promise<string | null>;
@@ -2,8 +2,10 @@
2
2
  * Conversation Memory Utilities
3
3
  * Handles configuration merging and conversation memory operations
4
4
  */
5
- import { getConversationMemoryDefaults } from "../config/conversationMemory.js";
5
+ import { getConversationMemoryDefaults, MEMORY_THRESHOLD_PERCENTAGE, DEFAULT_FALLBACK_THRESHOLD, } from "../config/conversationMemory.js";
6
+ import { TokenUtils } from "../constants/tokens.js";
6
7
  import { logger } from "./logger.js";
8
+ import { NeuroLink } from "../neurolink.js";
7
9
  /**
8
10
  * Apply conversation memory defaults to user configuration
9
11
  * Merges user config with environment variables and default values
@@ -34,8 +36,8 @@ export async function getConversationMessages(conversationMemory, options) {
34
36
  try {
35
37
  // Extract userId from context
36
38
  const userId = options.context?.userId;
37
- // Remove duplicate summarization logic - it should be handled in ConversationMemoryManager
38
- const messages = await conversationMemory.buildContextMessages(sessionId, userId);
39
+ const enableSummarization = options.enableSummarization ?? undefined;
40
+ const messages = await conversationMemory.buildContextMessages(sessionId, userId, enableSummarization);
39
41
  logger.debug("[conversationMemoryUtils] Conversation messages retrieved successfully", {
40
42
  sessionId,
41
43
  messageCount: messages.length,
@@ -100,9 +102,24 @@ export async function storeConversationTurn(conversationMemory, originalOptions,
100
102
  return;
101
103
  }
102
104
  const userMessage = originalOptions.originalPrompt || originalOptions.prompt || "";
103
- const aiResponse = result.content;
105
+ const aiResponse = result.content ?? "";
106
+ let providerDetails = undefined;
107
+ if (result.provider && result.model) {
108
+ providerDetails = {
109
+ provider: result.provider,
110
+ model: result.model,
111
+ };
112
+ }
104
113
  try {
105
- await conversationMemory.storeConversationTurn(sessionId, userId, userMessage, aiResponse, startTimeStamp);
114
+ await conversationMemory.storeConversationTurn({
115
+ sessionId,
116
+ userId,
117
+ userMessage,
118
+ aiResponse,
119
+ startTimeStamp,
120
+ providerDetails,
121
+ enableSummarization: originalOptions.enableSummarization,
122
+ });
106
123
  logger.debug("[conversationMemoryUtils] Conversation turn stored successfully", {
107
124
  sessionId,
108
125
  userId,
@@ -121,3 +138,162 @@ export async function storeConversationTurn(conversationMemory, originalOptions,
121
138
  });
122
139
  }
123
140
  }
141
+ /**
142
+ * Build context messages from pointer onwards (token-based memory)
143
+ * Returns summary message (if exists) + all messages after the summarized pointer
144
+ * @param session - Session memory with pointer
145
+ * @returns Context messages to send to LLM
146
+ */
147
+ export function buildContextFromPointer(session) {
148
+ if (!session.summarizedUpToMessageId || !session.summarizedMessage) {
149
+ return session.messages;
150
+ }
151
+ // find a better way to wirte this
152
+ const pointerIndex = session.messages.findIndex((msg) => msg.id === session.summarizedUpToMessageId);
153
+ if (pointerIndex === -1) {
154
+ logger.warn("Pointer message not found, returning all messages", {
155
+ sessionId: session.sessionId,
156
+ pointer: session.summarizedUpToMessageId,
157
+ totalMessages: session.messages.length,
158
+ });
159
+ return session.messages;
160
+ }
161
+ const messagesAfterPointer = session.messages.slice(pointerIndex + 1);
162
+ // Construct context: summary message + recent messages
163
+ const summaryMessage = {
164
+ id: `summary-${session.summarizedUpToMessageId}`,
165
+ role: "system",
166
+ content: `Previous conversation summary: ${session.summarizedMessage}`,
167
+ timestamp: new Date().toISOString(),
168
+ metadata: {
169
+ isSummary: true,
170
+ summarizesTo: session.summarizedUpToMessageId,
171
+ },
172
+ };
173
+ logger.debug("Building context with summary", {
174
+ sessionId: session.sessionId,
175
+ pointerIndex,
176
+ messagesAfterPointer: messagesAfterPointer.length,
177
+ totalMessages: session.messages.length,
178
+ summaryLength: session.summarizedMessage.length,
179
+ });
180
+ return [summaryMessage, ...messagesAfterPointer];
181
+ }
182
+ /**
183
+ * Create summarization prompt from message history
184
+ * Used by both in-memory and Redis conversation managers
185
+ * @param history - Messages to summarize
186
+ * @param previousSummary - Optional previous summary to build upon
187
+ */
188
+ export function createSummarizationPrompt(history, previousSummary) {
189
+ const formattedHistory = history
190
+ .map((msg) => `${msg.role}: ${msg.content}`)
191
+ .join("\n\n");
192
+ const previousSummarySection = previousSummary
193
+ ? `Previous Summary:
194
+ ---
195
+ ${previousSummary}
196
+ ---
197
+
198
+ `
199
+ : "";
200
+ return `
201
+ You are a context summarization AI. Your task is to condense the following conversation history for another AI assistant.
202
+ ${previousSummary ? "Build upon the previous summary and incorporate the new conversation turns below." : ""}
203
+ The summary must be a concise, third-person narrative that retains all critical information, including key entities, technical details, decisions made, and any specific dates or times mentioned.
204
+ Ensure the summary flows logically and is ready to be used as context for the next turn in the conversation.
205
+
206
+ ${previousSummarySection}Conversation History to Summarize:
207
+ ---
208
+ ${formattedHistory}
209
+ ---
210
+ `.trim();
211
+ }
212
+ /**
213
+ * Calculate token threshold based on model's output token limit
214
+ * Uses existing provider token limits as proxy for context window
215
+ * @param provider - AI provider name
216
+ * @param model - Model name
217
+ * @returns Token threshold (80% of model's token limit)
218
+ */
219
+ export function calculateTokenThreshold(provider, model) {
220
+ try {
221
+ // Get model's token limit from existing TokenUtils
222
+ const modelTokenLimit = TokenUtils.getProviderTokenLimit(provider, model);
223
+ // Return 80% of token limit for conversation memory
224
+ // This is conservative since output limits are typically smaller than input limits
225
+ return Math.floor(modelTokenLimit * MEMORY_THRESHOLD_PERCENTAGE);
226
+ }
227
+ catch (error) {
228
+ logger.warn("Failed to calculate model threshold, using fallback", {
229
+ provider,
230
+ model,
231
+ error: error instanceof Error ? error.message : String(error),
232
+ });
233
+ return DEFAULT_FALLBACK_THRESHOLD;
234
+ }
235
+ }
236
+ /**
237
+ * Get effective token threshold for a session
238
+ * Priority: session override > env var > model-based (80%) > fallback
239
+ * @param provider - AI provider name
240
+ * @param model - Model name
241
+ * @param envOverride - Environment variable override
242
+ * @param sessionOverride - Per-session token threshold override
243
+ * @returns Effective token threshold
244
+ */
245
+ export function getEffectiveTokenThreshold(provider, model, envOverride, sessionOverride) {
246
+ // Priority 1: Session-level override
247
+ if (sessionOverride && sessionOverride > 0) {
248
+ return sessionOverride;
249
+ }
250
+ // Priority 2: Environment variable override
251
+ if (envOverride && envOverride > 0) {
252
+ return envOverride;
253
+ }
254
+ // Priority 3: Model-based calculation (80% of context window)
255
+ try {
256
+ return calculateTokenThreshold(provider, model);
257
+ }
258
+ catch (error) {
259
+ logger.warn("Failed to calculate effective threshold, using fallback", {
260
+ provider,
261
+ model,
262
+ error: error instanceof Error ? error.message : String(error),
263
+ });
264
+ // Priority 4: Fallback for unknown models
265
+ return DEFAULT_FALLBACK_THRESHOLD;
266
+ }
267
+ }
268
+ /**
269
+ * Generate summary using configured provider and model
270
+ * Centralized summarization logic used by both ConversationMemoryManager and RedisConversationMemoryManager
271
+ * @param messages - Messages to summarize
272
+ * @param config - Conversation memory configuration containing provider/model settings
273
+ * @param previousSummary - Optional previous summary to build upon
274
+ * @param logPrefix - Prefix for log messages (e.g., "[ConversationMemory]" or "[RedisConversationMemoryManager]")
275
+ * @returns Summary text or null if generation fails
276
+ */
277
+ export async function generateSummary(messages, config, logPrefix = "[ConversationMemory]", previousSummary) {
278
+ const summarizationPrompt = createSummarizationPrompt(messages, previousSummary);
279
+ const summarizer = new NeuroLink({
280
+ conversationMemory: { enabled: false },
281
+ });
282
+ try {
283
+ if (!config.summarizationProvider || !config.summarizationModel) {
284
+ logger.error(`${logPrefix} Missing summarization provider`);
285
+ return null;
286
+ }
287
+ const summaryResult = await summarizer.generate({
288
+ input: { text: summarizationPrompt },
289
+ provider: config.summarizationProvider,
290
+ model: config.summarizationModel,
291
+ disableTools: true,
292
+ });
293
+ return summaryResult.content || null;
294
+ }
295
+ catch (error) {
296
+ logger.error(`${logPrefix} Error generating summary`, { error });
297
+ return null;
298
+ }
299
+ }
@@ -58,8 +58,23 @@ export async function storeConversationTurn(conversationMemory, originalOptions,
58
58
  if (typeof sessionId !== "string" || !sessionId) {
59
59
  return;
60
60
  }
61
+ let providerDetails = undefined;
62
+ if (result.provider && result.model) {
63
+ providerDetails = {
64
+ provider: result.provider,
65
+ model: result.model,
66
+ };
67
+ }
61
68
  try {
62
- await conversationMemory.storeConversationTurn(sessionId, userId, originalOptions.originalPrompt || originalOptions.prompt || "", result.content, startTimeStamp);
69
+ await conversationMemory.storeConversationTurn({
70
+ sessionId,
71
+ userId,
72
+ userMessage: originalOptions.originalPrompt || originalOptions.prompt || "",
73
+ aiResponse: result.content,
74
+ startTimeStamp,
75
+ providerDetails,
76
+ enableSummarization: originalOptions.enableSummarization,
77
+ });
63
78
  logger.debug("Conversation turn stored", {
64
79
  sessionId,
65
80
  userId,
@@ -31,6 +31,31 @@ export declare class FileDetector {
31
31
  * @returns Processed file result with type and content
32
32
  */
33
33
  static detectAndProcess(input: FileInput, options?: FileDetectorOptions): Promise<FileProcessingResult>;
34
+ /**
35
+ * Try fallback parsing for a specific file type
36
+ * Used when file detection returns "unknown" but we want to try parsing anyway
37
+ */
38
+ private static tryFallbackParsing;
39
+ /**
40
+ * Check if content is valid text (UTF-8, mostly printable)
41
+ */
42
+ private static isValidText;
43
+ /**
44
+ * Guess the MIME type for text content based on content patterns
45
+ */
46
+ private static guessTextMimeType;
47
+ /**
48
+ * Strict YAML detection for guessTextMimeType
49
+ * Similar to ContentHeuristicStrategy but requires at least 2 indicators
50
+ * to avoid false positives from simple key: value patterns
51
+ */
52
+ private static looksLikeYAMLStrict;
53
+ /**
54
+ * Strict XML detection for guessTextMimeType
55
+ * Ensures content has proper XML declaration or valid tag structure with closing tags
56
+ * Prevents false positives from arbitrary content starting with <
57
+ */
58
+ private static looksLikeXMLStrict;
34
59
  /**
35
60
  * Detect file type using multi-strategy approach
36
61
  * Stops at first strategy with confidence >= threshold (default: 80%)