@juspay/neurolink 7.36.0 → 7.37.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (87) hide show
  1. package/CHANGELOG.md +12 -0
  2. package/dist/cli/commands/config.d.ts +18 -18
  3. package/dist/cli/factories/commandFactory.d.ts +24 -0
  4. package/dist/cli/factories/commandFactory.js +297 -245
  5. package/dist/config/taskClassificationConfig.d.ts +51 -0
  6. package/dist/config/taskClassificationConfig.js +148 -0
  7. package/dist/core/baseProvider.d.ts +40 -3
  8. package/dist/core/baseProvider.js +689 -352
  9. package/dist/core/constants.d.ts +2 -30
  10. package/dist/core/constants.js +15 -43
  11. package/dist/factories/providerFactory.js +23 -6
  12. package/dist/index.d.ts +3 -2
  13. package/dist/index.js +4 -3
  14. package/dist/lib/config/taskClassificationConfig.d.ts +51 -0
  15. package/dist/lib/config/taskClassificationConfig.js +148 -0
  16. package/dist/lib/core/baseProvider.d.ts +40 -3
  17. package/dist/lib/core/baseProvider.js +689 -352
  18. package/dist/lib/core/constants.d.ts +2 -30
  19. package/dist/lib/core/constants.js +15 -43
  20. package/dist/lib/factories/providerFactory.js +23 -6
  21. package/dist/lib/index.d.ts +3 -2
  22. package/dist/lib/index.js +4 -3
  23. package/dist/lib/mcp/externalServerManager.js +2 -2
  24. package/dist/lib/mcp/registry.js +2 -2
  25. package/dist/lib/mcp/servers/agent/directToolsServer.js +19 -10
  26. package/dist/lib/mcp/toolRegistry.js +4 -8
  27. package/dist/lib/neurolink.d.ts +82 -27
  28. package/dist/lib/neurolink.js +672 -713
  29. package/dist/lib/providers/amazonBedrock.js +2 -2
  30. package/dist/lib/providers/googleVertex.d.ts +3 -23
  31. package/dist/lib/providers/googleVertex.js +14 -342
  32. package/dist/lib/providers/openAI.d.ts +23 -0
  33. package/dist/lib/providers/openAI.js +313 -6
  34. package/dist/lib/providers/sagemaker/language-model.d.ts +2 -2
  35. package/dist/lib/sdk/toolRegistration.js +18 -1
  36. package/dist/lib/types/common.d.ts +98 -0
  37. package/dist/lib/types/index.d.ts +2 -0
  38. package/dist/lib/types/index.js +2 -0
  39. package/dist/lib/types/streamTypes.d.ts +13 -6
  40. package/dist/lib/types/taskClassificationTypes.d.ts +52 -0
  41. package/dist/lib/types/taskClassificationTypes.js +5 -0
  42. package/dist/lib/types/typeAliases.d.ts +3 -2
  43. package/dist/lib/utils/modelRouter.d.ts +107 -0
  44. package/dist/lib/utils/modelRouter.js +292 -0
  45. package/dist/lib/utils/parameterValidation.js +6 -25
  46. package/dist/lib/utils/promptRedaction.d.ts +29 -0
  47. package/dist/lib/utils/promptRedaction.js +62 -0
  48. package/dist/lib/utils/schemaConversion.d.ts +14 -0
  49. package/dist/lib/utils/schemaConversion.js +140 -0
  50. package/dist/lib/utils/taskClassificationUtils.d.ts +55 -0
  51. package/dist/lib/utils/taskClassificationUtils.js +149 -0
  52. package/dist/lib/utils/taskClassifier.d.ts +23 -0
  53. package/dist/lib/utils/taskClassifier.js +94 -0
  54. package/dist/lib/utils/transformationUtils.js +143 -5
  55. package/dist/mcp/externalServerManager.js +2 -2
  56. package/dist/mcp/registry.js +2 -2
  57. package/dist/mcp/servers/agent/directToolsServer.js +19 -10
  58. package/dist/mcp/toolRegistry.js +4 -8
  59. package/dist/neurolink.d.ts +82 -27
  60. package/dist/neurolink.js +672 -713
  61. package/dist/providers/amazonBedrock.js +2 -2
  62. package/dist/providers/googleVertex.d.ts +3 -23
  63. package/dist/providers/googleVertex.js +14 -342
  64. package/dist/providers/openAI.d.ts +23 -0
  65. package/dist/providers/openAI.js +313 -6
  66. package/dist/providers/sagemaker/language-model.d.ts +2 -2
  67. package/dist/sdk/toolRegistration.js +18 -1
  68. package/dist/types/common.d.ts +98 -0
  69. package/dist/types/index.d.ts +2 -0
  70. package/dist/types/index.js +2 -0
  71. package/dist/types/streamTypes.d.ts +13 -6
  72. package/dist/types/taskClassificationTypes.d.ts +52 -0
  73. package/dist/types/taskClassificationTypes.js +5 -0
  74. package/dist/types/typeAliases.d.ts +3 -2
  75. package/dist/utils/modelRouter.d.ts +107 -0
  76. package/dist/utils/modelRouter.js +292 -0
  77. package/dist/utils/parameterValidation.js +6 -25
  78. package/dist/utils/promptRedaction.d.ts +29 -0
  79. package/dist/utils/promptRedaction.js +62 -0
  80. package/dist/utils/schemaConversion.d.ts +14 -0
  81. package/dist/utils/schemaConversion.js +140 -0
  82. package/dist/utils/taskClassificationUtils.d.ts +55 -0
  83. package/dist/utils/taskClassificationUtils.js +149 -0
  84. package/dist/utils/taskClassifier.d.ts +23 -0
  85. package/dist/utils/taskClassifier.js +94 -0
  86. package/dist/utils/transformationUtils.js +143 -5
  87. package/package.json +3 -2
@@ -953,7 +953,9 @@ export class CLICommandFactory {
953
953
  temperature: enhancedOptions.temperature,
954
954
  maxTokens: enhancedOptions.maxTokens,
955
955
  systemPrompt: enhancedOptions.systemPrompt,
956
- timeout: enhancedOptions.timeout,
956
+ timeout: enhancedOptions.timeout
957
+ ? enhancedOptions.timeout * 1000
958
+ : undefined,
957
959
  disableTools: enhancedOptions.disableTools,
958
960
  enableAnalytics: enhancedOptions.enableAnalytics,
959
961
  enableEvaluation: enhancedOptions.enableEvaluation,
@@ -1001,6 +1003,288 @@ export class CLICommandFactory {
1001
1003
  handleError(error, "Generation");
1002
1004
  }
1003
1005
  }
1006
+ /**
1007
+ * Process context for streaming
1008
+ */
1009
+ static async processStreamContext(argv, options) {
1010
+ let inputText = argv.input;
1011
+ let contextMetadata;
1012
+ if (options.context && options.contextConfig) {
1013
+ const processedContextResult = ContextFactory.processContext(options.context, options.contextConfig);
1014
+ // Integrate context into prompt if configured
1015
+ if (processedContextResult.processedContext) {
1016
+ inputText = processedContextResult.processedContext + inputText;
1017
+ }
1018
+ // Add context metadata for analytics
1019
+ contextMetadata = {
1020
+ ...ContextFactory.extractAnalyticsContext(options.context),
1021
+ contextMode: processedContextResult.config.mode,
1022
+ contextTruncated: processedContextResult.metadata.truncated,
1023
+ };
1024
+ if (options.debug) {
1025
+ logger.debug("Context processed for streaming:", {
1026
+ mode: processedContextResult.config.mode,
1027
+ truncated: processedContextResult.metadata.truncated,
1028
+ processingTime: processedContextResult.metadata.processingTime,
1029
+ });
1030
+ }
1031
+ }
1032
+ return { inputText, contextMetadata };
1033
+ }
1034
+ /**
1035
+ * Execute dry-run streaming simulation
1036
+ */
1037
+ static async executeDryRunStream(options, contextMetadata) {
1038
+ if (!options.quiet) {
1039
+ logger.always(chalk.blue("šŸ”„ Dry-run streaming..."));
1040
+ }
1041
+ // Simulate streaming output
1042
+ const chunks = [
1043
+ "Mock ",
1044
+ "streaming ",
1045
+ "response ",
1046
+ "for ",
1047
+ "testing ",
1048
+ "purposes",
1049
+ ];
1050
+ let fullContent = "";
1051
+ for (const chunk of chunks) {
1052
+ process.stdout.write(chunk);
1053
+ fullContent += chunk;
1054
+ await new Promise((resolve) => setTimeout(resolve, 50)); // Simulate streaming delay
1055
+ }
1056
+ if (!options.quiet) {
1057
+ process.stdout.write("\n");
1058
+ }
1059
+ // Mock analytics and evaluation for dry-run
1060
+ if (options.enableAnalytics) {
1061
+ const mockAnalytics = {
1062
+ provider: options.provider || "auto",
1063
+ model: options.model || "test-model",
1064
+ requestDuration: 300,
1065
+ tokenUsage: {
1066
+ input: 10,
1067
+ output: 15,
1068
+ total: 25,
1069
+ },
1070
+ timestamp: new Date().toISOString(),
1071
+ context: contextMetadata,
1072
+ };
1073
+ const mockGenerateResult = {
1074
+ success: true,
1075
+ content: fullContent,
1076
+ analytics: mockAnalytics,
1077
+ model: mockAnalytics.model,
1078
+ toolsUsed: [],
1079
+ };
1080
+ const analyticsDisplay = this.formatAnalyticsForTextMode(mockGenerateResult);
1081
+ logger.always(analyticsDisplay);
1082
+ }
1083
+ if (options.enableEvaluation) {
1084
+ logger.always(chalk.blue("\nšŸ“Š Response Evaluation (Dry-run):"));
1085
+ logger.always(` Relevance: 8/10`);
1086
+ logger.always(` Accuracy: 9/10`);
1087
+ logger.always(` Completeness: 8/10`);
1088
+ logger.always(` Overall: 8.3/10`);
1089
+ logger.always(` Reasoning: Test evaluation response`);
1090
+ }
1091
+ if (options.output) {
1092
+ fs.writeFileSync(options.output, fullContent);
1093
+ if (!options.quiet) {
1094
+ logger.always(`\nOutput saved to ${options.output}`);
1095
+ }
1096
+ }
1097
+ if (options.debug) {
1098
+ logger.debug("\n" + chalk.yellow("Debug Information (Dry-run Streaming):"));
1099
+ logger.debug("Provider:", options.provider || "auto");
1100
+ logger.debug("Model:", options.model || "test-model");
1101
+ logger.debug("Mode: DRY-RUN (no actual API calls made)");
1102
+ }
1103
+ if (!globalSession.getCurrentSessionId()) {
1104
+ process.exit(0);
1105
+ }
1106
+ }
1107
+ /**
1108
+ * Execute real streaming with timeout handling
1109
+ */
1110
+ static async executeRealStream(argv, options, inputText, contextMetadata) {
1111
+ const sdk = globalSession.getOrCreateNeuroLink();
1112
+ const sessionVariables = globalSession.getSessionVariables();
1113
+ const enhancedOptions = { ...options, ...sessionVariables };
1114
+ const sessionId = globalSession.getCurrentSessionId();
1115
+ const context = sessionId
1116
+ ? { ...contextMetadata, sessionId }
1117
+ : contextMetadata;
1118
+ // Process CLI images if provided
1119
+ const imageBuffers = CLICommandFactory.processCliImages(argv.image);
1120
+ const stream = await sdk.stream({
1121
+ input: imageBuffers
1122
+ ? { text: inputText, images: imageBuffers }
1123
+ : { text: inputText },
1124
+ provider: enhancedOptions.provider,
1125
+ model: enhancedOptions.model,
1126
+ temperature: enhancedOptions.temperature,
1127
+ maxTokens: enhancedOptions.maxTokens,
1128
+ systemPrompt: enhancedOptions.systemPrompt,
1129
+ timeout: enhancedOptions.timeout
1130
+ ? enhancedOptions.timeout * 1000
1131
+ : undefined,
1132
+ disableTools: enhancedOptions.disableTools,
1133
+ enableAnalytics: enhancedOptions.enableAnalytics,
1134
+ enableEvaluation: enhancedOptions.enableEvaluation,
1135
+ evaluationDomain: enhancedOptions.evaluationDomain,
1136
+ toolUsageContext: enhancedOptions.toolUsageContext,
1137
+ context: context,
1138
+ factoryConfig: enhancedOptions.domain
1139
+ ? {
1140
+ domainType: enhancedOptions.domain,
1141
+ enhancementType: "domain-configuration",
1142
+ validateDomainData: true,
1143
+ }
1144
+ : undefined,
1145
+ });
1146
+ const fullContent = await this.processStreamWithTimeout(stream, options);
1147
+ await this.displayStreamResults(stream, fullContent, options);
1148
+ return fullContent;
1149
+ }
1150
+ /**
1151
+ * Process stream with timeout handling
1152
+ */
1153
+ static async processStreamWithTimeout(stream, options) {
1154
+ let fullContent = "";
1155
+ let contentReceived = false;
1156
+ const abortController = new AbortController();
1157
+ // Create timeout promise for stream consumption (30 seconds)
1158
+ const timeoutPromise = new Promise((_, reject) => {
1159
+ const timeoutId = setTimeout(() => {
1160
+ if (!contentReceived) {
1161
+ const timeoutError = new Error("\nāŒ Stream timeout - no content received within 30 seconds\n" +
1162
+ "This usually indicates authentication or network issues\n\n" +
1163
+ "šŸ”§ Try these steps:\n" +
1164
+ "1. Check your provider credentials are configured correctly\n" +
1165
+ `2. Test generate mode: neurolink generate "test" --provider ${options.provider}\n` +
1166
+ `3. Use debug mode: neurolink stream "test" --provider ${options.provider} --debug`);
1167
+ reject(timeoutError);
1168
+ }
1169
+ }, 30000);
1170
+ // Clean up timeout when aborted
1171
+ abortController.signal.addEventListener("abort", () => {
1172
+ clearTimeout(timeoutId);
1173
+ });
1174
+ });
1175
+ try {
1176
+ // Process the stream with timeout handling
1177
+ const streamIterator = stream.stream[Symbol.asyncIterator]();
1178
+ let timeoutActive = true;
1179
+ while (true) {
1180
+ let nextResult;
1181
+ if (timeoutActive && !contentReceived) {
1182
+ // Race between next chunk and timeout for first chunk only
1183
+ nextResult = await Promise.race([
1184
+ streamIterator.next(),
1185
+ timeoutPromise,
1186
+ ]);
1187
+ }
1188
+ else {
1189
+ // No timeout for subsequent chunks
1190
+ nextResult = await streamIterator.next();
1191
+ }
1192
+ if (nextResult.done) {
1193
+ break;
1194
+ }
1195
+ if (!contentReceived) {
1196
+ contentReceived = true;
1197
+ timeoutActive = false;
1198
+ abortController.abort(); // Cancel timeout
1199
+ }
1200
+ if (options.delay && options.delay > 0) {
1201
+ // Demo mode - add delay between chunks
1202
+ await new Promise((resolve) => setTimeout(resolve, options.delay));
1203
+ }
1204
+ const evt = nextResult.value;
1205
+ const isText = (o) => !!o &&
1206
+ typeof o === "object" &&
1207
+ typeof o.content === "string";
1208
+ const isAudio = (o) => !!o &&
1209
+ typeof o === "object" &&
1210
+ o.type === "audio";
1211
+ if (isText(evt)) {
1212
+ process.stdout.write(evt.content);
1213
+ fullContent += evt.content;
1214
+ }
1215
+ else if (isAudio(evt)) {
1216
+ if (options.debug && !options.quiet) {
1217
+ process.stdout.write("[audio-chunk]");
1218
+ }
1219
+ }
1220
+ }
1221
+ }
1222
+ catch (error) {
1223
+ abortController.abort(); // Clean up timeout
1224
+ throw error;
1225
+ }
1226
+ if (!contentReceived) {
1227
+ throw new Error("\nāŒ No content received from stream\n" +
1228
+ "Check your credentials and provider configuration");
1229
+ }
1230
+ if (!options.quiet) {
1231
+ process.stdout.write("\n");
1232
+ }
1233
+ return fullContent;
1234
+ }
1235
+ /**
1236
+ * Display analytics and evaluation results
1237
+ */
1238
+ static async displayStreamResults(stream, fullContent, options) {
1239
+ // Display analytics after streaming
1240
+ if (options.enableAnalytics && stream.analytics) {
1241
+ const resolvedAnalytics = await (stream.analytics instanceof Promise
1242
+ ? stream.analytics
1243
+ : Promise.resolve(stream.analytics));
1244
+ const streamAnalytics = {
1245
+ success: true,
1246
+ content: fullContent,
1247
+ analytics: resolvedAnalytics,
1248
+ model: stream.model,
1249
+ toolsUsed: stream.toolCalls?.map((tc) => tc.toolName) || [],
1250
+ };
1251
+ const analyticsDisplay = this.formatAnalyticsForTextMode(streamAnalytics);
1252
+ logger.always(analyticsDisplay);
1253
+ }
1254
+ // Display evaluation after streaming
1255
+ if (options.enableEvaluation && stream.evaluation) {
1256
+ const resolvedEvaluation = await (stream.evaluation instanceof Promise
1257
+ ? stream.evaluation
1258
+ : Promise.resolve(stream.evaluation));
1259
+ logger.always(chalk.blue("\nšŸ“Š Response Evaluation:"));
1260
+ logger.always(` Relevance: ${resolvedEvaluation.relevance}/10`);
1261
+ logger.always(` Accuracy: ${resolvedEvaluation.accuracy}/10`);
1262
+ logger.always(` Completeness: ${resolvedEvaluation.completeness}/10`);
1263
+ logger.always(` Overall: ${resolvedEvaluation.overall}/10`);
1264
+ if (resolvedEvaluation.reasoning) {
1265
+ logger.always(` Reasoning: ${resolvedEvaluation.reasoning}`);
1266
+ }
1267
+ }
1268
+ }
1269
+ /**
1270
+ * Handle stream output file writing and debug output
1271
+ */
1272
+ static async handleStreamOutput(options, fullContent) {
1273
+ // Handle output file if specified
1274
+ if (options.output) {
1275
+ fs.writeFileSync(options.output, fullContent);
1276
+ if (!options.quiet) {
1277
+ logger.always(`\nOutput saved to ${options.output}`);
1278
+ }
1279
+ }
1280
+ // Debug output for streaming
1281
+ if (options.debug) {
1282
+ await this.logStreamDebugInfo({
1283
+ provider: options.provider,
1284
+ model: options.model,
1285
+ });
1286
+ }
1287
+ }
1004
1288
  /**
1005
1289
  * Log debug information for stream result
1006
1290
  */
@@ -1057,252 +1341,14 @@ export class CLICommandFactory {
1057
1341
  if (options.delay) {
1058
1342
  await new Promise((resolve) => setTimeout(resolve, options.delay));
1059
1343
  }
1060
- // Process context if provided (same as generate command)
1061
- let inputText = argv.input;
1062
- let contextMetadata;
1063
- if (options.context && options.contextConfig) {
1064
- const processedContextResult = ContextFactory.processContext(options.context, options.contextConfig);
1065
- // Integrate context into prompt if configured
1066
- if (processedContextResult.processedContext) {
1067
- inputText = processedContextResult.processedContext + inputText;
1068
- }
1069
- // Add context metadata for analytics
1070
- contextMetadata = {
1071
- ...ContextFactory.extractAnalyticsContext(options.context),
1072
- contextMode: processedContextResult.config.mode,
1073
- contextTruncated: processedContextResult.metadata.truncated,
1074
- };
1075
- if (options.debug) {
1076
- logger.debug("Context processed for streaming:", {
1077
- mode: processedContextResult.config.mode,
1078
- truncated: processedContextResult.metadata.truncated,
1079
- processingTime: processedContextResult.metadata.processingTime,
1080
- });
1081
- }
1082
- }
1344
+ const { inputText, contextMetadata } = await this.processStreamContext(argv, options);
1083
1345
  // Handle dry-run mode for testing
1084
1346
  if (options.dryRun) {
1085
- if (!options.quiet) {
1086
- logger.always(chalk.blue("šŸ”„ Dry-run streaming..."));
1087
- }
1088
- // Simulate streaming output
1089
- const chunks = [
1090
- "Mock ",
1091
- "streaming ",
1092
- "response ",
1093
- "for ",
1094
- "testing ",
1095
- "purposes",
1096
- ];
1097
- let fullContent = "";
1098
- for (const chunk of chunks) {
1099
- process.stdout.write(chunk);
1100
- fullContent += chunk;
1101
- await new Promise((resolve) => setTimeout(resolve, 50)); // Simulate streaming delay
1102
- }
1103
- if (!options.quiet) {
1104
- process.stdout.write("\n");
1105
- }
1106
- // Mock analytics and evaluation for dry-run
1107
- if (options.enableAnalytics) {
1108
- const mockAnalytics = {
1109
- provider: options.provider || "auto",
1110
- model: options.model || "test-model",
1111
- requestDuration: 300,
1112
- tokenUsage: {
1113
- input: 10,
1114
- output: 15,
1115
- total: 25,
1116
- },
1117
- timestamp: new Date().toISOString(),
1118
- context: contextMetadata,
1119
- };
1120
- const mockGenerateResult = {
1121
- success: true,
1122
- content: fullContent,
1123
- analytics: mockAnalytics,
1124
- model: mockAnalytics.model,
1125
- toolsUsed: [],
1126
- };
1127
- const analyticsDisplay = this.formatAnalyticsForTextMode(mockGenerateResult);
1128
- logger.always(analyticsDisplay);
1129
- }
1130
- if (options.enableEvaluation) {
1131
- logger.always(chalk.blue("\nšŸ“Š Response Evaluation (Dry-run):"));
1132
- logger.always(` Relevance: 8/10`);
1133
- logger.always(` Accuracy: 9/10`);
1134
- logger.always(` Completeness: 8/10`);
1135
- logger.always(` Overall: 8.3/10`);
1136
- logger.always(` Reasoning: Test evaluation response`);
1137
- }
1138
- if (options.output) {
1139
- fs.writeFileSync(options.output, fullContent);
1140
- if (!options.quiet) {
1141
- logger.always(`\nOutput saved to ${options.output}`);
1142
- }
1143
- }
1144
- if (options.debug) {
1145
- logger.debug("\n" + chalk.yellow("Debug Information (Dry-run Streaming):"));
1146
- logger.debug("Provider:", options.provider || "auto");
1147
- logger.debug("Model:", options.model || "test-model");
1148
- logger.debug("Mode: DRY-RUN (no actual API calls made)");
1149
- }
1150
- if (!globalSession.getCurrentSessionId()) {
1151
- process.exit(0);
1152
- }
1153
- }
1154
- const sdk = globalSession.getOrCreateNeuroLink();
1155
- const sessionVariables = globalSession.getSessionVariables();
1156
- const enhancedOptions = { ...options, ...sessionVariables };
1157
- const sessionId = globalSession.getCurrentSessionId();
1158
- const context = sessionId
1159
- ? { ...contextMetadata, sessionId }
1160
- : contextMetadata;
1161
- // Process CLI images if provided
1162
- const imageBuffers = CLICommandFactory.processCliImages(argv.image);
1163
- const stream = await sdk.stream({
1164
- input: imageBuffers
1165
- ? { text: inputText, images: imageBuffers }
1166
- : { text: inputText },
1167
- provider: enhancedOptions.provider,
1168
- model: enhancedOptions.model,
1169
- temperature: enhancedOptions.temperature,
1170
- maxTokens: enhancedOptions.maxTokens,
1171
- systemPrompt: enhancedOptions.systemPrompt,
1172
- timeout: enhancedOptions.timeout,
1173
- disableTools: enhancedOptions.disableTools,
1174
- enableAnalytics: enhancedOptions.enableAnalytics,
1175
- enableEvaluation: enhancedOptions.enableEvaluation,
1176
- evaluationDomain: enhancedOptions.evaluationDomain,
1177
- toolUsageContext: enhancedOptions.toolUsageContext,
1178
- context: context,
1179
- factoryConfig: enhancedOptions.domain
1180
- ? {
1181
- domainType: enhancedOptions.domain,
1182
- enhancementType: "domain-configuration",
1183
- validateDomainData: true,
1184
- }
1185
- : undefined,
1186
- });
1187
- let fullContent = "";
1188
- let contentReceived = false;
1189
- const abortController = new AbortController();
1190
- // Create timeout promise for stream consumption (30 seconds)
1191
- const timeoutPromise = new Promise((_, reject) => {
1192
- const timeoutId = setTimeout(() => {
1193
- if (!contentReceived) {
1194
- const timeoutError = new Error("\nāŒ Stream timeout - no content received within 30 seconds\n" +
1195
- "This usually indicates authentication or network issues\n\n" +
1196
- "šŸ”§ Try these steps:\n" +
1197
- "1. Check your provider credentials are configured correctly\n" +
1198
- `2. Test generate mode: neurolink generate "test" --provider ${options.provider}\n` +
1199
- `3. Use debug mode: neurolink stream "test" --provider ${options.provider} --debug`);
1200
- reject(timeoutError);
1201
- }
1202
- }, 30000);
1203
- // Clean up timeout when aborted
1204
- abortController.signal.addEventListener("abort", () => {
1205
- clearTimeout(timeoutId);
1206
- });
1207
- });
1208
- try {
1209
- // Process the stream with timeout handling
1210
- const streamIterator = stream.stream[Symbol.asyncIterator]();
1211
- let timeoutActive = true;
1212
- while (true) {
1213
- let nextResult;
1214
- if (timeoutActive && !contentReceived) {
1215
- // Race between next chunk and timeout for first chunk only
1216
- nextResult = await Promise.race([
1217
- streamIterator.next(),
1218
- timeoutPromise,
1219
- ]);
1220
- }
1221
- else {
1222
- // No timeout for subsequent chunks
1223
- nextResult = await streamIterator.next();
1224
- }
1225
- if (nextResult.done) {
1226
- break;
1227
- }
1228
- if (!contentReceived) {
1229
- contentReceived = true;
1230
- timeoutActive = false;
1231
- abortController.abort(); // Cancel timeout
1232
- }
1233
- if (options.delay && options.delay > 0) {
1234
- // Demo mode - add delay between chunks
1235
- await new Promise((resolve) => setTimeout(resolve, options.delay));
1236
- }
1237
- const evt = nextResult.value;
1238
- const isText = (o) => !!o &&
1239
- typeof o === "object" &&
1240
- typeof o.content === "string";
1241
- const isAudio = (o) => !!o &&
1242
- typeof o === "object" &&
1243
- o.type === "audio";
1244
- if (isText(evt)) {
1245
- process.stdout.write(evt.content);
1246
- fullContent += evt.content;
1247
- }
1248
- else if (isAudio(evt)) {
1249
- if (options.debug && !options.quiet) {
1250
- process.stdout.write("[audio-chunk]");
1251
- }
1252
- }
1253
- }
1254
- }
1255
- catch (error) {
1256
- abortController.abort(); // Clean up timeout
1257
- throw error;
1258
- }
1259
- if (!contentReceived) {
1260
- throw new Error("\nāŒ No content received from stream\n" +
1261
- "Check your credentials and provider configuration");
1262
- }
1263
- if (!options.quiet) {
1264
- process.stdout.write("\n");
1265
- }
1266
- // šŸ”§ NEW: Display analytics and evaluation after streaming (similar to generate command)
1267
- if (options.enableAnalytics && stream.analytics) {
1268
- const resolvedAnalytics = await (stream.analytics instanceof Promise
1269
- ? stream.analytics
1270
- : Promise.resolve(stream.analytics));
1271
- const streamAnalytics = {
1272
- success: true,
1273
- content: fullContent,
1274
- analytics: resolvedAnalytics,
1275
- model: stream.model,
1276
- toolsUsed: stream.toolCalls?.map((tc) => tc.toolName) || [],
1277
- };
1278
- const analyticsDisplay = this.formatAnalyticsForTextMode(streamAnalytics);
1279
- logger.always(analyticsDisplay);
1280
- }
1281
- // šŸ”§ NEW: Display evaluation after streaming
1282
- if (options.enableEvaluation && stream.evaluation) {
1283
- const resolvedEvaluation = await (stream.evaluation instanceof Promise
1284
- ? stream.evaluation
1285
- : Promise.resolve(stream.evaluation));
1286
- logger.always(chalk.blue("\nšŸ“Š Response Evaluation:"));
1287
- logger.always(` Relevance: ${resolvedEvaluation.relevance}/10`);
1288
- logger.always(` Accuracy: ${resolvedEvaluation.accuracy}/10`);
1289
- logger.always(` Completeness: ${resolvedEvaluation.completeness}/10`);
1290
- logger.always(` Overall: ${resolvedEvaluation.overall}/10`);
1291
- if (resolvedEvaluation.reasoning) {
1292
- logger.always(` Reasoning: ${resolvedEvaluation.reasoning}`);
1293
- }
1294
- }
1295
- // Handle output file if specified
1296
- if (options.output) {
1297
- fs.writeFileSync(options.output, fullContent);
1298
- if (!options.quiet) {
1299
- logger.always(`\nOutput saved to ${options.output}`);
1300
- }
1301
- }
1302
- // šŸ”§ NEW: Debug output for streaming (similar to generate command)
1303
- if (options.debug) {
1304
- await this.logStreamDebugInfo(stream);
1347
+ await this.executeDryRunStream(options, contextMetadata);
1348
+ return;
1305
1349
  }
1350
+ const fullContent = await this.executeRealStream(argv, options, inputText, contextMetadata);
1351
+ await this.handleStreamOutput(options, fullContent);
1306
1352
  if (!globalSession.getCurrentSessionId()) {
1307
1353
  process.exit(0);
1308
1354
  }
@@ -1385,7 +1431,9 @@ export class CLICommandFactory {
1385
1431
  temperature: enhancedOptions.temperature,
1386
1432
  maxTokens: enhancedOptions.maxTokens,
1387
1433
  systemPrompt: enhancedOptions.systemPrompt,
1388
- timeout: enhancedOptions.timeout,
1434
+ timeout: enhancedOptions.timeout
1435
+ ? enhancedOptions.timeout * 1000
1436
+ : undefined,
1389
1437
  disableTools: enhancedOptions.disableTools,
1390
1438
  evaluationDomain: enhancedOptions.evaluationDomain,
1391
1439
  toolUsageContext: enhancedOptions.toolUsageContext,
@@ -1631,6 +1679,10 @@ export class CLICommandFactory {
1631
1679
  success = true;
1632
1680
  }
1633
1681
  else {
1682
+ // sessionId is guaranteed to exist when isAllSessions is false
1683
+ if (!argv.sessionId) {
1684
+ throw new Error("Session ID is required for clearing specific session");
1685
+ }
1634
1686
  success = await sdk.clearConversationSession(argv.sessionId);
1635
1687
  }
1636
1688
  if (spinner) {
@@ -0,0 +1,51 @@
1
+ /**
2
+ * Task Classification Configuration
3
+ * Contains patterns, keywords, and scoring weights for task classification
4
+ */
5
+ /**
6
+ * Regular expression patterns that indicate fast response tasks
7
+ */
8
+ export declare const FAST_PATTERNS: RegExp[];
9
+ /**
10
+ * Regular expression patterns that indicate reasoning tasks
11
+ */
12
+ export declare const REASONING_PATTERNS: RegExp[];
13
+ /**
14
+ * Keywords that indicate fast tasks regardless of context
15
+ */
16
+ export declare const FAST_KEYWORDS: string[];
17
+ /**
18
+ * Keywords that indicate reasoning tasks regardless of context
19
+ */
20
+ export declare const REASONING_KEYWORDS: string[];
21
+ /**
22
+ * Scoring weights for different classification factors
23
+ */
24
+ export declare const SCORING_WEIGHTS: {
25
+ readonly SHORT_PROMPT_BONUS: 2;
26
+ readonly LONG_PROMPT_BONUS: 1;
27
+ readonly PATTERN_MATCH_SCORE: 3;
28
+ readonly KEYWORD_MATCH_SCORE: 1;
29
+ readonly MULTIPLE_QUESTIONS_BONUS: 1;
30
+ readonly MULTI_SENTENCE_BONUS: 1;
31
+ readonly TECHNICAL_DOMAIN_BONUS: 1;
32
+ readonly SIMPLE_DEFINITION_BONUS: 2;
33
+ };
34
+ /**
35
+ * Classification thresholds and constraints
36
+ */
37
+ export declare const CLASSIFICATION_THRESHOLDS: {
38
+ readonly SHORT_PROMPT_LENGTH: 50;
39
+ readonly LONG_PROMPT_LENGTH: 200;
40
+ readonly SIMPLE_DEFINITION_LENGTH: 100;
41
+ readonly MIN_CONFIDENCE: 0.6;
42
+ readonly MAX_CONFIDENCE: 0.95;
43
+ readonly DEFAULT_CONFIDENCE: 0.5;
44
+ };
45
+ /**
46
+ * Domain-specific patterns for enhanced classification
47
+ */
48
+ export declare const DOMAIN_PATTERNS: {
49
+ readonly TECHNICAL: RegExp;
50
+ readonly SIMPLE_DEFINITION: RegExp;
51
+ };