@juspay/neurolink 4.2.0 → 5.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (123) hide show
  1. package/CHANGELOG.md +47 -2
  2. package/README.md +51 -60
  3. package/dist/chat/sse-handler.js +5 -4
  4. package/dist/chat/websocket-chat-handler.js +9 -9
  5. package/dist/cli/commands/mcp.js +1 -1
  6. package/dist/cli/commands/ollama.js +3 -3
  7. package/dist/cli/factories/command-factory.d.ts +14 -0
  8. package/dist/cli/factories/command-factory.js +129 -0
  9. package/dist/cli/index.js +27 -29
  10. package/dist/cli/utils/interactive-setup.js +2 -2
  11. package/dist/core/evaluation.d.ts +9 -9
  12. package/dist/core/evaluation.js +14 -14
  13. package/dist/core/types.d.ts +41 -48
  14. package/dist/core/types.js +1 -0
  15. package/dist/factories/compatibility-factory.d.ts +20 -0
  16. package/dist/factories/compatibility-factory.js +69 -0
  17. package/dist/factories/provider-generate-factory.d.ts +20 -0
  18. package/dist/factories/provider-generate-factory.js +87 -0
  19. package/dist/index.d.ts +4 -2
  20. package/dist/index.js +3 -1
  21. package/dist/lib/chat/sse-handler.js +5 -4
  22. package/dist/lib/chat/websocket-chat-handler.js +9 -9
  23. package/dist/lib/core/evaluation.d.ts +9 -9
  24. package/dist/lib/core/evaluation.js +14 -14
  25. package/dist/lib/core/types.d.ts +41 -48
  26. package/dist/lib/core/types.js +1 -0
  27. package/dist/lib/factories/compatibility-factory.d.ts +20 -0
  28. package/dist/lib/factories/compatibility-factory.js +69 -0
  29. package/dist/lib/factories/provider-generate-factory.d.ts +20 -0
  30. package/dist/lib/factories/provider-generate-factory.js +87 -0
  31. package/dist/lib/index.d.ts +4 -2
  32. package/dist/lib/index.js +3 -1
  33. package/dist/lib/mcp/client.js +5 -5
  34. package/dist/lib/mcp/dynamic-orchestrator.js +8 -8
  35. package/dist/lib/mcp/external-client.js +2 -2
  36. package/dist/lib/mcp/factory.d.ts +1 -1
  37. package/dist/lib/mcp/factory.js +1 -1
  38. package/dist/lib/mcp/neurolink-mcp-client.js +10 -10
  39. package/dist/lib/mcp/orchestrator.js +4 -4
  40. package/dist/lib/mcp/servers/ai-providers/ai-analysis-tools.js +10 -10
  41. package/dist/lib/mcp/servers/ai-providers/ai-core-server.js +5 -5
  42. package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
  43. package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.js +16 -16
  44. package/dist/lib/neurolink.d.ts +21 -73
  45. package/dist/lib/neurolink.js +230 -119
  46. package/dist/lib/providers/agent-enhanced-provider.d.ts +12 -8
  47. package/dist/lib/providers/agent-enhanced-provider.js +87 -96
  48. package/dist/lib/providers/amazonBedrock.d.ts +17 -8
  49. package/dist/lib/providers/amazonBedrock.js +60 -30
  50. package/dist/lib/providers/anthropic.d.ts +14 -10
  51. package/dist/lib/providers/anthropic.js +84 -154
  52. package/dist/lib/providers/azureOpenAI.d.ts +9 -6
  53. package/dist/lib/providers/azureOpenAI.js +70 -159
  54. package/dist/lib/providers/function-calling-provider.d.ts +14 -12
  55. package/dist/lib/providers/function-calling-provider.js +114 -64
  56. package/dist/lib/providers/googleAIStudio.d.ts +12 -19
  57. package/dist/lib/providers/googleAIStudio.js +65 -34
  58. package/dist/lib/providers/googleVertexAI.d.ts +11 -15
  59. package/dist/lib/providers/googleVertexAI.js +146 -118
  60. package/dist/lib/providers/huggingFace.d.ts +10 -11
  61. package/dist/lib/providers/huggingFace.js +61 -24
  62. package/dist/lib/providers/mcp-provider.d.ts +13 -8
  63. package/dist/lib/providers/mcp-provider.js +59 -18
  64. package/dist/lib/providers/mistralAI.d.ts +14 -11
  65. package/dist/lib/providers/mistralAI.js +60 -29
  66. package/dist/lib/providers/ollama.d.ts +9 -8
  67. package/dist/lib/providers/ollama.js +134 -91
  68. package/dist/lib/providers/openAI.d.ts +11 -12
  69. package/dist/lib/providers/openAI.js +132 -97
  70. package/dist/lib/types/generate-types.d.ts +79 -0
  71. package/dist/lib/types/generate-types.js +1 -0
  72. package/dist/lib/types/stream-types.d.ts +83 -0
  73. package/dist/lib/types/stream-types.js +1 -0
  74. package/dist/lib/utils/providerUtils-fixed.js +1 -1
  75. package/dist/lib/utils/streaming-utils.d.ts +14 -2
  76. package/dist/lib/utils/streaming-utils.js +0 -3
  77. package/dist/mcp/client.js +5 -5
  78. package/dist/mcp/dynamic-orchestrator.js +8 -8
  79. package/dist/mcp/external-client.js +2 -2
  80. package/dist/mcp/factory.d.ts +1 -1
  81. package/dist/mcp/factory.js +1 -1
  82. package/dist/mcp/neurolink-mcp-client.js +10 -10
  83. package/dist/mcp/orchestrator.js +4 -4
  84. package/dist/mcp/servers/ai-providers/ai-analysis-tools.js +10 -10
  85. package/dist/mcp/servers/ai-providers/ai-core-server.js +5 -5
  86. package/dist/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
  87. package/dist/mcp/servers/ai-providers/ai-workflow-tools.js +16 -16
  88. package/dist/neurolink.d.ts +21 -73
  89. package/dist/neurolink.js +230 -119
  90. package/dist/providers/agent-enhanced-provider.d.ts +12 -8
  91. package/dist/providers/agent-enhanced-provider.js +87 -95
  92. package/dist/providers/amazonBedrock.d.ts +17 -8
  93. package/dist/providers/amazonBedrock.js +60 -30
  94. package/dist/providers/anthropic.d.ts +14 -10
  95. package/dist/providers/anthropic.js +84 -154
  96. package/dist/providers/azureOpenAI.d.ts +9 -6
  97. package/dist/providers/azureOpenAI.js +70 -159
  98. package/dist/providers/function-calling-provider.d.ts +14 -12
  99. package/dist/providers/function-calling-provider.js +114 -64
  100. package/dist/providers/googleAIStudio.d.ts +12 -19
  101. package/dist/providers/googleAIStudio.js +65 -34
  102. package/dist/providers/googleVertexAI.d.ts +11 -15
  103. package/dist/providers/googleVertexAI.js +146 -118
  104. package/dist/providers/huggingFace.d.ts +10 -11
  105. package/dist/providers/huggingFace.js +61 -24
  106. package/dist/providers/mcp-provider.d.ts +13 -8
  107. package/dist/providers/mcp-provider.js +59 -18
  108. package/dist/providers/mistralAI.d.ts +14 -11
  109. package/dist/providers/mistralAI.js +60 -29
  110. package/dist/providers/ollama.d.ts +9 -8
  111. package/dist/providers/ollama.js +133 -90
  112. package/dist/providers/openAI.d.ts +11 -12
  113. package/dist/providers/openAI.js +132 -97
  114. package/dist/types/generate-types.d.ts +79 -0
  115. package/dist/types/generate-types.js +1 -0
  116. package/dist/types/stream-types.d.ts +83 -0
  117. package/dist/types/stream-types.js +1 -0
  118. package/dist/utils/providerUtils-fixed.js +1 -1
  119. package/dist/utils/streaming-utils.d.ts +14 -2
  120. package/dist/utils/streaming-utils.js +0 -3
  121. package/package.json +2 -3
  122. package/dist/cli/commands/agent-generate.d.ts +0 -1
  123. package/dist/cli/commands/agent-generate.js +0 -67
@@ -2,12 +2,11 @@
2
2
  * Agent-Enhanced Provider for NeuroLink CLI
3
3
  * Integrates direct tools with AI providers for true agent functionality
4
4
  */
5
- import { generateText, streamText, } from "ai";
5
+ import { generateText as aiGenerate } from "ai";
6
6
  import { google } from "@ai-sdk/google";
7
7
  import { openai } from "@ai-sdk/openai";
8
8
  import { anthropic } from "@ai-sdk/anthropic";
9
9
  import { getToolsForCategory, } from "../agent/direct-tools.js";
10
- import { StreamingEnhancer, StreamingMonitor, } from "../utils/streaming-utils.js";
11
10
  import { UnifiedMCPSystem } from "../mcp/unified-mcp.js";
12
11
  import { mcpLogger } from "../mcp/logging.js";
13
12
  import { parseTimeout } from "../utils/timeout.js";
@@ -258,7 +257,37 @@ export class AgentEnhancedProvider {
258
257
  }
259
258
  return { ...directTools, ...mcpTools };
260
259
  }
261
- async generateText(optionsOrPrompt) {
260
+ /**
261
+ * PRIMARY METHOD: Stream content using AI (recommended for new code)
262
+ * Future-ready for multi-modal capabilities with current text focus
263
+ */
264
+ async stream(optionsOrPrompt, analysisSchema) {
265
+ const functionTag = "AgentEnhancedProvider.stream";
266
+ const startTime = Date.now();
267
+ // Parse parameters - support both string and options object
268
+ const options = typeof optionsOrPrompt === "string"
269
+ ? { input: { text: optionsOrPrompt } }
270
+ : optionsOrPrompt;
271
+ // Validate input
272
+ if (!options?.input?.text ||
273
+ typeof options.input.text !== "string" ||
274
+ options.input.text.trim() === "") {
275
+ throw new Error("Stream options must include input.text as a non-empty string");
276
+ }
277
+ // Convert StreamOptions for internal use
278
+ const convertedOptions = {
279
+ prompt: options.input.text,
280
+ provider: options.provider,
281
+ model: options.model,
282
+ temperature: options.temperature,
283
+ maxTokens: options.maxTokens,
284
+ systemPrompt: options.systemPrompt,
285
+ timeout: options.timeout,
286
+ };
287
+ // Use stream method to get streaming result
288
+ return await this.stream(options);
289
+ }
290
+ async generate(optionsOrPrompt) {
262
291
  const startTime = Date.now();
263
292
  const options = typeof optionsOrPrompt === "string"
264
293
  ? { prompt: optionsOrPrompt }
@@ -284,7 +313,7 @@ export class AgentEnhancedProvider {
284
313
  }
285
314
  }
286
315
  // The AI SDK with maxSteps automatically handles tool calling and result integration
287
- const result = await generateText({
316
+ const result = await aiGenerate({
288
317
  model: this.model,
289
318
  prompt: systemPrompt
290
319
  ? `System: ${systemPrompt}\n\nUser: ${prompt}`
@@ -345,9 +374,21 @@ export class AgentEnhancedProvider {
345
374
  });
346
375
  // Return result with the formatted text
347
376
  return {
348
- ...result,
349
- text: finalText,
350
- finishReason: "stop",
377
+ content: finalText,
378
+ provider: this.getProviderName(),
379
+ model: this.getModelName(),
380
+ usage: result.usage
381
+ ? {
382
+ inputTokens: result.usage.promptTokens,
383
+ outputTokens: result.usage.completionTokens,
384
+ totalTokens: result.usage.totalTokens,
385
+ }
386
+ : undefined,
387
+ responseTime: 0,
388
+ toolsUsed: [],
389
+ toolExecutions: [],
390
+ enhancedWithTools: false,
391
+ availableTools: [],
351
392
  };
352
393
  }
353
394
  catch (error) {
@@ -357,9 +398,21 @@ export class AgentEnhancedProvider {
357
398
  // Fallback: return raw tool results
358
399
  const fallbackText = `Tool execution completed. Raw results: ${JSON.stringify(result.toolResults, null, 2)}`;
359
400
  return {
360
- ...result,
361
- text: fallbackText,
362
- finishReason: "stop",
401
+ content: fallbackText,
402
+ provider: this.getProviderName(),
403
+ model: this.getModelName(),
404
+ usage: result.usage
405
+ ? {
406
+ inputTokens: result.usage.promptTokens,
407
+ outputTokens: result.usage.completionTokens,
408
+ totalTokens: result.usage.totalTokens,
409
+ }
410
+ : undefined,
411
+ responseTime: 0,
412
+ toolsUsed: [],
413
+ toolExecutions: [],
414
+ enhancedWithTools: false,
415
+ availableTools: [],
363
416
  };
364
417
  }
365
418
  }
@@ -372,85 +425,26 @@ export class AgentEnhancedProvider {
372
425
  result.evaluation = await evaluateResponse(prompt, result.text, options.context);
373
426
  }
374
427
  // Return the full result - the AI SDK has already handled tool execution and integration
375
- return result;
376
- }
377
- catch (error) {
378
- console.error("[AgentEnhancedProvider] generateText error:", error);
379
- throw error;
380
- }
381
- }
382
- async streamText(optionsOrPrompt) {
383
- const options = typeof optionsOrPrompt === "string"
384
- ? { prompt: optionsOrPrompt }
385
- : optionsOrPrompt;
386
- const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt, timeout,
387
- // Phase 2: Enhanced streaming options
388
- enableProgressTracking, progressCallback, includeStreamingMetadata, streamingBufferSize, enableStreamingHeaders, customStreamingConfig, } = options;
389
- // Phase 2.1: Setup streaming enhancements
390
- const streamId = `agent_stream_${Date.now()}`;
391
- const streamingConfig = StreamingEnhancer.createStreamingConfig(options);
392
- if (enableProgressTracking) {
393
- StreamingMonitor.registerStream(streamId);
394
- }
395
- // Get combined tools (direct + MCP) if enabled
396
- const tools = this.config.enableTools ? await this.getCombinedTools() : {};
397
- try {
398
- // Parse timeout if provided
399
- let abortSignal;
400
- if (timeout) {
401
- const timeoutMs = typeof timeout === "string" ? parseTimeout(timeout) : timeout;
402
- if (timeoutMs !== undefined) {
403
- abortSignal = AbortSignal.timeout(timeoutMs);
404
- }
405
- }
406
- const result = await streamText({
407
- model: this.model,
408
- prompt: systemPrompt
409
- ? `System: ${systemPrompt}\n\nUser: ${prompt}`
410
- : prompt,
411
- tools,
412
- maxSteps: this.config.maxSteps,
413
- temperature,
414
- maxTokens,
415
- toolChoice: this.shouldForceToolUsage(prompt) ? "required" : "auto",
416
- abortSignal, // Pass abort signal for timeout support
417
- });
418
- // Phase 2.1: Apply streaming enhancements if enabled
419
- if (streamingConfig.progressTracking && result.textStream) {
420
- const enhancedCallback = streamingConfig.callback
421
- ? (progress) => {
422
- StreamingMonitor.updateStream(streamId, progress);
423
- streamingConfig.callback(progress);
424
- if (progress.phase === "complete") {
425
- StreamingMonitor.completeStream(streamId);
426
- }
428
+ return {
429
+ content: result.text,
430
+ provider: this.getProviderName(),
431
+ model: this.getModelName(),
432
+ usage: result.usage
433
+ ? {
434
+ inputTokens: result.usage.promptTokens,
435
+ outputTokens: result.usage.completionTokens,
436
+ totalTokens: result.usage.totalTokens,
427
437
  }
428
- : undefined;
429
- // Enhance the stream with progress tracking
430
- const enhancedStream = StreamingEnhancer.addProgressTracking(result.textStream, enhancedCallback, { streamId, bufferSize: streamingConfig.bufferSize });
431
- // Return enhanced result with tracking
432
- return {
433
- ...result,
434
- textStream: enhancedStream,
435
- // Phase 2.1: Add streaming metadata
436
- streamingMetadata: streamingConfig.metadata
437
- ? {
438
- streamId,
439
- provider: this.getProviderName(),
440
- model: this.getModelName(),
441
- enabledFeatures: {
442
- progressTracking: true,
443
- metadata: streamingConfig.metadata,
444
- headers: streamingConfig.headers,
445
- },
446
- }
447
- : undefined,
448
- };
449
- }
450
- return result;
438
+ : undefined,
439
+ responseTime: 0,
440
+ toolsUsed: [],
441
+ toolExecutions: [],
442
+ enhancedWithTools: false,
443
+ availableTools: [],
444
+ };
451
445
  }
452
446
  catch (error) {
453
- console.error("[AgentEnhancedProvider] streamText error:", error);
447
+ console.error("[AgentEnhancedProvider] generate error:", error);
454
448
  throw error;
455
449
  }
456
450
  }
@@ -500,12 +494,12 @@ export class AgentEnhancedProvider {
500
494
  for (const prompt of testPrompts) {
501
495
  try {
502
496
  logger.debug(`Testing: "${prompt}"`);
503
- const result = await this.generateText(prompt);
497
+ const result = await this.generate(prompt);
504
498
  if (!result) {
505
499
  results.push({
506
500
  prompt,
507
501
  success: false,
508
- error: "No result returned from generateText",
502
+ error: "No result returned from generate",
509
503
  });
510
504
  logger.warn(`❌ No result returned`);
511
505
  continue;
@@ -519,9 +513,9 @@ export class AgentEnhancedProvider {
519
513
  prompt,
520
514
  success,
521
515
  toolsCalled,
522
- response: result.text.substring(0, 100) + "...",
516
+ response: result.content.substring(0, 100) + "...",
523
517
  });
524
- logger.debug(`✅ Tools called: ${toolsCalled}, Response: ${result.text.substring(0, 50)}...`);
518
+ logger.debug(`✅ Tools called: ${toolsCalled}, Response: ${result.content.substring(0, 50)}...`);
525
519
  }
526
520
  catch (error) {
527
521
  results.push({
@@ -565,16 +559,13 @@ export class AgentEnhancedProvider {
565
559
  return providers;
566
560
  }
567
561
  /**
568
- * Alias for generateText() - CLI-SDK consistency
562
+ * Alias for generate() - CLI-SDK consistency
569
563
  */
570
- async generate(optionsOrPrompt, analysisSchema) {
571
- return this.generateText(optionsOrPrompt);
572
- }
573
564
  /**
574
- * Short alias for generateText() - CLI-SDK consistency
565
+ * Short alias for generate() - CLI-SDK consistency
575
566
  */
576
567
  async gen(optionsOrPrompt, analysisSchema) {
577
- return this.generateText(optionsOrPrompt);
568
+ return this.generate(optionsOrPrompt);
578
569
  }
579
570
  }
580
571
  /**
@@ -1,19 +1,28 @@
1
1
  import type { ZodType, ZodTypeDef } from "zod";
2
- import { type StreamTextResult, type ToolSet, type Schema, type GenerateTextResult } from "ai";
3
- import type { AIProvider, TextGenerationOptions, StreamTextOptions, EnhancedGenerateTextResult } from "../core/types.js";
2
+ import { type Schema } from "ai";
3
+ import type { GenerateResult } from "../types/generate-types.js";
4
+ import type { StreamOptions, StreamResult } from "../types/stream-types.js";
5
+ import type { AIProvider, TextGenerationOptions, EnhancedGenerateResult } from "../core/types.js";
4
6
  export declare class AmazonBedrock implements AIProvider {
5
7
  private modelName;
6
8
  private model;
7
9
  private bedrock;
8
10
  constructor(modelName?: string | null);
9
- streamText(optionsOrPrompt: StreamTextOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamTextResult<ToolSet, unknown> | null>;
10
- generateText(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<GenerateTextResult<ToolSet, unknown> | null>;
11
11
  /**
12
- * Alias for generateText() - CLI-SDK consistency
12
+ * LEGACY METHOD: Use stream() instead for new code
13
+ * @deprecated Use stream() method instead
13
14
  */
14
- generate(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<EnhancedGenerateTextResult | null>;
15
15
  /**
16
- * Short alias for generateText() - CLI-SDK consistency
16
+ * PRIMARY METHOD: Stream content using AI (recommended for new code)
17
+ * Future-ready for multi-modal capabilities with current text focus
17
18
  */
18
- gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<EnhancedGenerateTextResult | null>;
19
+ stream(optionsOrPrompt: StreamOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamResult>;
20
+ generate(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<GenerateResult>;
21
+ /**
22
+ * Alias for generate() - CLI-SDK consistency
23
+ */
24
+ /**
25
+ * Short alias for generate() - CLI-SDK consistency
26
+ */
27
+ gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<EnhancedGenerateResult | null>;
19
28
  }
@@ -123,22 +123,38 @@ export class AmazonBedrock {
123
123
  throw err;
124
124
  }
125
125
  }
126
- async streamText(optionsOrPrompt, analysisSchema) {
127
- const functionTag = "AmazonBedrock.streamText";
126
+ /**
127
+ * LEGACY METHOD: Use stream() instead for new code
128
+ * @deprecated Use stream() method instead
129
+ */
130
+ /**
131
+ * PRIMARY METHOD: Stream content using AI (recommended for new code)
132
+ * Future-ready for multi-modal capabilities with current text focus
133
+ */
134
+ async stream(optionsOrPrompt, analysisSchema) {
135
+ const functionTag = "AmazonBedrock.stream";
128
136
  const provider = "bedrock";
129
137
  let chunkCount = 0;
138
+ const startTime = Date.now();
130
139
  try {
131
140
  // Parse parameters - support both string and options object
132
141
  const options = typeof optionsOrPrompt === "string"
133
- ? { prompt: optionsOrPrompt }
142
+ ? { input: { text: optionsOrPrompt } }
134
143
  : optionsOrPrompt;
135
- const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, "stream"), } = options;
144
+ // Validate input
145
+ if (!options?.input?.text ||
146
+ typeof options.input.text !== "string" ||
147
+ options.input.text.trim() === "") {
148
+ throw new Error("Stream options must include input.text as a non-empty string");
149
+ }
150
+ // Extract prompt and other parameters
151
+ const { prompt = options.input.text, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, "stream"), } = options;
136
152
  // Use schema from options or fallback parameter
137
153
  const finalSchema = schema || analysisSchema;
138
154
  logger.debug(`[${functionTag}] Stream request started`, {
139
155
  provider,
140
156
  modelName: this.modelName,
141
- promptLength: prompt.length,
157
+ promptLength: prompt?.length || 0,
142
158
  temperature,
143
159
  maxTokens,
144
160
  timeout,
@@ -159,10 +175,9 @@ export class AmazonBedrock {
159
175
  const error = event.error;
160
176
  const errorMessage = error instanceof Error ? error.message : String(error);
161
177
  const errorStack = error instanceof Error ? error.stack : undefined;
162
- logger.error(`[${functionTag}] Stream text error`, {
178
+ logger.debug(`[${functionTag}] Stream error`, {
163
179
  provider,
164
180
  modelName: this.modelName,
165
- region: getAWSRegion(),
166
181
  error: errorMessage,
167
182
  stack: errorStack,
168
183
  promptLength: prompt.length,
@@ -170,10 +185,9 @@ export class AmazonBedrock {
170
185
  });
171
186
  },
172
187
  onFinish: (event) => {
173
- logger.debug(`[${functionTag}] Stream text finished`, {
188
+ logger.debug(`[${functionTag}] Stream finished`, {
174
189
  provider,
175
190
  modelName: this.modelName,
176
- region: getAWSRegion(),
177
191
  finishReason: event.finishReason,
178
192
  usage: event.usage,
179
193
  totalChunks: chunkCount,
@@ -183,7 +197,7 @@ export class AmazonBedrock {
183
197
  },
184
198
  onChunk: (event) => {
185
199
  chunkCount++;
186
- logger.debug(`[${functionTag}] Stream text chunk`, {
200
+ logger.debug(`[${functionTag}] Stream chunk`, {
187
201
  provider,
188
202
  modelName: this.modelName,
189
203
  chunkNumber: chunkCount,
@@ -197,42 +211,49 @@ export class AmazonBedrock {
197
211
  schema: finalSchema,
198
212
  });
199
213
  }
200
- // Direct streamText call - let the real error bubble up
201
214
  const result = streamText(streamOptions);
202
- logger.debug(`[${functionTag}] Stream text call successful`, {
215
+ logger.debug(`[${functionTag}] Stream request completed`, {
203
216
  provider,
204
217
  modelName: this.modelName,
205
- promptLength: prompt.length,
206
218
  });
207
- // For streaming, we can't clean up immediately, but the timeout will auto-clean
208
- // The user should handle the stream and any timeout errors
209
- return result;
219
+ // Convert to StreamResult format
220
+ return {
221
+ stream: (async function* () {
222
+ for await (const chunk of result.textStream) {
223
+ yield { content: chunk };
224
+ }
225
+ })(),
226
+ provider: "bedrock",
227
+ model: this.modelName,
228
+ metadata: {
229
+ streamId: `bedrock-${Date.now()}`,
230
+ startTime,
231
+ },
232
+ };
210
233
  }
211
234
  catch (err) {
212
235
  // Log timeout errors specifically
213
236
  if (err instanceof TimeoutError) {
214
- logger.error(`[${functionTag}] Timeout error`, {
237
+ logger.debug(`[${functionTag}] Timeout error`, {
215
238
  provider,
216
239
  modelName: this.modelName,
217
- region: getAWSRegion(),
218
240
  timeout: err.timeout,
219
241
  message: err.message,
220
242
  });
221
243
  }
222
244
  else {
223
- logger.error(`[${functionTag}] Exception`, {
245
+ logger.debug(`[${functionTag}] Exception`, {
224
246
  provider,
225
247
  modelName: this.modelName,
226
- region: getAWSRegion(),
227
- message: "Error in streaming text",
248
+ message: "Error in streaming content",
228
249
  err: String(err),
229
250
  });
230
251
  }
231
252
  throw err; // Re-throw error to trigger fallback
232
253
  }
233
254
  }
234
- async generateText(optionsOrPrompt, analysisSchema) {
235
- const functionTag = "AmazonBedrock.generateText";
255
+ async generate(optionsOrPrompt, analysisSchema) {
256
+ const functionTag = "AmazonBedrock.generate";
236
257
  const provider = "bedrock";
237
258
  const startTime = Date.now();
238
259
  try {
@@ -290,7 +311,19 @@ export class AmazonBedrock {
290
311
  if (options.enableEvaluation) {
291
312
  result.evaluation = await evaluateResponse(prompt, result.text, options.context);
292
313
  }
293
- return result;
314
+ return {
315
+ content: result.text,
316
+ provider: "bedrock",
317
+ model: this.modelName || "claude-3-sonnet",
318
+ usage: result.usage
319
+ ? {
320
+ inputTokens: result.usage.promptTokens,
321
+ outputTokens: result.usage.completionTokens,
322
+ totalTokens: result.usage.totalTokens,
323
+ }
324
+ : undefined,
325
+ responseTime: 0,
326
+ };
294
327
  }
295
328
  finally {
296
329
  // Always cleanup timeout
@@ -320,15 +353,12 @@ export class AmazonBedrock {
320
353
  }
321
354
  }
322
355
  /**
323
- * Alias for generateText() - CLI-SDK consistency
356
+ * Alias for generate() - CLI-SDK consistency
324
357
  */
325
- async generate(optionsOrPrompt, analysisSchema) {
326
- return this.generateText(optionsOrPrompt, analysisSchema);
327
- }
328
358
  /**
329
- * Short alias for generateText() - CLI-SDK consistency
359
+ * Short alias for generate() - CLI-SDK consistency
330
360
  */
331
361
  async gen(optionsOrPrompt, analysisSchema) {
332
- return this.generateText(optionsOrPrompt, analysisSchema);
362
+ return this.generate(optionsOrPrompt, analysisSchema);
333
363
  }
334
364
  }
@@ -4,9 +4,10 @@
4
4
  * Direct integration with Anthropic's Claude models via their native API.
5
5
  * Supports Claude 3.5 Sonnet, Claude 3.5 Haiku, and Claude 3 Opus.
6
6
  */
7
- import type { AIProvider, TextGenerationOptions, StreamTextOptions, EnhancedGenerateTextResult } from "../core/types.js";
7
+ import type { AIProvider, TextGenerationOptions, EnhancedGenerateResult } from "../core/types.js";
8
8
  import type { ZodType, ZodTypeDef } from "zod";
9
9
  import type { Schema } from "ai";
10
+ import type { StreamOptions, StreamResult } from "../types/stream-types.js";
10
11
  import { AIProviderName } from "../core/types.js";
11
12
  export declare class AnthropicProvider implements AIProvider {
12
13
  readonly name: AIProviderName;
@@ -17,10 +18,17 @@ export declare class AnthropicProvider implements AIProvider {
17
18
  private getApiKey;
18
19
  private getModel;
19
20
  private makeRequest;
20
- generateText(optionsOrPrompt: TextGenerationOptions | string, schema?: any): Promise<any>;
21
- streamText(optionsOrPrompt: StreamTextOptions | string, schema?: any): Promise<any>;
21
+ /**
22
+ * PRIMARY METHOD: Stream content using AI (recommended for new code)
23
+ * Future-ready for multi-modal capabilities with current text focus
24
+ */
25
+ stream(optionsOrPrompt: StreamOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamResult>;
26
+ generate(optionsOrPrompt: TextGenerationOptions | string, schema?: any): Promise<any>;
27
+ /**
28
+ * LEGACY METHOD: Use stream() instead for new code
29
+ * @deprecated Use stream() method instead
30
+ */
22
31
  private createAsyncIterable;
23
- generateTextStream(optionsOrPrompt: StreamTextOptions | string): AsyncGenerator<any, void, unknown>;
24
32
  testConnection(): Promise<{
25
33
  success: boolean;
26
34
  error?: string;
@@ -34,11 +42,7 @@ export declare class AnthropicProvider implements AIProvider {
34
42
  supportsSchema(): boolean;
35
43
  getCapabilities(): string[];
36
44
  /**
37
- * Alias for generateText() - CLI-SDK consistency
38
- */
39
- generate(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<EnhancedGenerateTextResult | null>;
40
- /**
41
- * Short alias for generateText() - CLI-SDK consistency
45
+ * Short alias for generate() - CLI-SDK consistency
42
46
  */
43
- gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<EnhancedGenerateTextResult | null>;
47
+ gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<EnhancedGenerateResult | null>;
44
48
  }