@juspay/neurolink 1.5.1 → 1.5.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. package/CHANGELOG.md +49 -0
  2. package/README.md +1 -1
  3. package/dist/cli/commands/config.d.ts +35 -35
  4. package/dist/cli/index.js +63 -19
  5. package/dist/core/factory.js +12 -11
  6. package/dist/lib/core/factory.d.ts +40 -0
  7. package/dist/lib/core/factory.js +162 -0
  8. package/dist/lib/core/types.d.ts +111 -0
  9. package/dist/lib/core/types.js +68 -0
  10. package/dist/lib/index.d.ts +56 -0
  11. package/dist/lib/index.js +62 -0
  12. package/dist/lib/mcp/context-manager.d.ts +164 -0
  13. package/dist/lib/mcp/context-manager.js +273 -0
  14. package/dist/lib/mcp/factory.d.ts +144 -0
  15. package/dist/lib/mcp/factory.js +141 -0
  16. package/dist/lib/mcp/orchestrator.d.ts +170 -0
  17. package/dist/lib/mcp/orchestrator.js +372 -0
  18. package/dist/lib/mcp/registry.d.ts +188 -0
  19. package/dist/lib/mcp/registry.js +373 -0
  20. package/dist/lib/mcp/servers/ai-providers/ai-analysis-tools.d.ts +21 -0
  21. package/dist/lib/mcp/servers/ai-providers/ai-analysis-tools.js +215 -0
  22. package/dist/lib/mcp/servers/ai-providers/ai-core-server.d.ts +10 -0
  23. package/dist/lib/mcp/servers/ai-providers/ai-core-server.js +303 -0
  24. package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.d.ts +101 -0
  25. package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.js +428 -0
  26. package/dist/lib/neurolink.d.ts +53 -0
  27. package/dist/lib/neurolink.js +155 -0
  28. package/dist/lib/providers/amazonBedrock.d.ts +11 -0
  29. package/dist/lib/providers/amazonBedrock.js +256 -0
  30. package/dist/lib/providers/anthropic.d.ts +34 -0
  31. package/dist/lib/providers/anthropic.js +308 -0
  32. package/dist/lib/providers/azureOpenAI.d.ts +37 -0
  33. package/dist/lib/providers/azureOpenAI.js +339 -0
  34. package/dist/lib/providers/googleAIStudio.d.ts +30 -0
  35. package/dist/lib/providers/googleAIStudio.js +216 -0
  36. package/dist/lib/providers/googleVertexAI.d.ts +30 -0
  37. package/dist/lib/providers/googleVertexAI.js +409 -0
  38. package/dist/lib/providers/index.d.ts +30 -0
  39. package/dist/lib/providers/index.js +25 -0
  40. package/dist/lib/providers/openAI.d.ts +10 -0
  41. package/dist/lib/providers/openAI.js +169 -0
  42. package/dist/lib/utils/logger.d.ts +12 -0
  43. package/dist/lib/utils/logger.js +25 -0
  44. package/dist/lib/utils/providerUtils.d.ts +17 -0
  45. package/dist/lib/utils/providerUtils.js +73 -0
  46. package/dist/mcp/servers/ai-providers/ai-core-server.js +11 -10
  47. package/dist/neurolink.js +13 -12
  48. package/dist/providers/amazonBedrock.js +22 -21
  49. package/dist/providers/anthropic.js +21 -20
  50. package/dist/providers/azureOpenAI.js +21 -20
  51. package/dist/providers/googleAIStudio.js +13 -12
  52. package/dist/providers/googleVertexAI.js +27 -26
  53. package/dist/providers/openAI.js +12 -11
  54. package/dist/utils/logger.d.ts +12 -0
  55. package/dist/utils/logger.js +25 -0
  56. package/dist/utils/providerUtils.d.ts +0 -3
  57. package/dist/utils/providerUtils.js +3 -2
  58. package/package.json +1 -1
@@ -0,0 +1,17 @@
1
+ /**
2
+ * Get the best available provider based on preferences and availability
3
+ * @param requestedProvider - Optional preferred provider name
4
+ * @returns The best provider name to use
5
+ */
6
+ export declare function getBestProvider(requestedProvider?: string): string;
7
+ /**
8
+ * Get available provider names
9
+ * @returns Array of available provider names
10
+ */
11
+ export declare function getAvailableProviders(): string[];
12
+ /**
13
+ * Validate provider name
14
+ * @param provider - Provider name to validate
15
+ * @returns True if provider name is valid
16
+ */
17
+ export declare function isValidProvider(provider: string): boolean;
@@ -0,0 +1,73 @@
1
+ /**
2
+ * Utility functions for AI provider management
3
+ */
4
+ import { logger } from './logger.js';
5
+ /**
6
+ * Get the best available provider based on preferences and availability
7
+ * @param requestedProvider - Optional preferred provider name
8
+ * @returns The best provider name to use
9
+ */
10
+ export function getBestProvider(requestedProvider) {
11
+ // If a specific provider is requested, return it
12
+ if (requestedProvider) {
13
+ return requestedProvider;
14
+ }
15
+ // Default fallback order based on environment variables - OpenAI first since it's most reliable
16
+ const providers = ['openai', 'vertex', 'bedrock', 'anthropic', 'azure', 'google-ai'];
17
+ // Check which providers have their required environment variables
18
+ for (const provider of providers) {
19
+ if (isProviderConfigured(provider)) {
20
+ logger.debug(`[getBestProvider] Selected provider: ${provider}`);
21
+ return provider;
22
+ }
23
+ }
24
+ // Default to bedrock if nothing is configured
25
+ logger.warn('[getBestProvider] No providers configured, defaulting to bedrock');
26
+ return 'bedrock';
27
+ }
28
+ /**
29
+ * Check if a provider has the minimum required configuration
30
+ * @param provider - Provider name to check
31
+ * @returns True if the provider appears to be configured
32
+ */
33
+ function isProviderConfigured(provider) {
34
+ switch (provider.toLowerCase()) {
35
+ case 'bedrock':
36
+ case 'amazon':
37
+ case 'aws':
38
+ return !!(process.env.AWS_ACCESS_KEY_ID && process.env.AWS_SECRET_ACCESS_KEY);
39
+ case 'vertex':
40
+ case 'google':
41
+ case 'gemini':
42
+ return !!(process.env.GOOGLE_VERTEX_PROJECT || process.env.GOOGLE_APPLICATION_CREDENTIALS);
43
+ case 'openai':
44
+ case 'gpt':
45
+ return !!process.env.OPENAI_API_KEY;
46
+ case 'anthropic':
47
+ case 'claude':
48
+ return !!process.env.ANTHROPIC_API_KEY;
49
+ case 'azure':
50
+ case 'azure-openai':
51
+ return !!process.env.AZURE_OPENAI_API_KEY;
52
+ case 'google-ai':
53
+ case 'google-studio':
54
+ return !!(process.env.GOOGLE_AI_API_KEY || process.env.GOOGLE_GENERATIVE_AI_API_KEY);
55
+ default:
56
+ return false;
57
+ }
58
+ }
59
+ /**
60
+ * Get available provider names
61
+ * @returns Array of available provider names
62
+ */
63
+ export function getAvailableProviders() {
64
+ return ['bedrock', 'vertex', 'openai', 'anthropic', 'azure', 'google-ai'];
65
+ }
66
+ /**
67
+ * Validate provider name
68
+ * @param provider - Provider name to validate
69
+ * @returns True if provider name is valid
70
+ */
71
+ export function isValidProvider(provider) {
72
+ return getAvailableProviders().includes(provider.toLowerCase());
73
+ }
@@ -7,6 +7,7 @@ import { z } from 'zod';
7
7
  import { createMCPServer } from '../../factory.js';
8
8
  import { AIProviderFactory } from '../../../core/factory.js';
9
9
  import { getBestProvider, getAvailableProviders } from '../../../utils/providerUtils.js';
10
+ import { logger } from '../../../utils/logger.js';
10
11
  import { analyzeAIUsageTool, benchmarkProviderPerformanceTool, optimizePromptParametersTool } from './ai-analysis-tools.js';
11
12
  import { generateTestCasesTool, refactorCodeTool, generateDocumentationTool, debugAIOutputTool } from './ai-workflow-tools.js';
12
13
  /**
@@ -68,7 +69,7 @@ aiCoreServer.registerTool({
68
69
  execute: async (params, context) => {
69
70
  const startTime = Date.now();
70
71
  try {
71
- console.log(`[AI-Core] Starting text generation: "${params.prompt.substring(0, 50)}..."`);
72
+ logger.debug(`[AI-Core] Starting text generation: "${params.prompt.substring(0, 50)}..."`);
72
73
  // Use existing AIProviderFactory with best provider selection
73
74
  const selectedProvider = params.provider || getBestProvider(params.provider);
74
75
  const provider = AIProviderFactory.createBestProvider(selectedProvider);
@@ -84,7 +85,7 @@ aiCoreServer.registerTool({
84
85
  throw new Error('AI provider returned null result');
85
86
  }
86
87
  const executionTime = Date.now() - startTime;
87
- console.log(`[AI-Core] Text generation successful in ${executionTime}ms using ${selectedProvider}`);
88
+ logger.debug(`[AI-Core] Text generation successful in ${executionTime}ms using ${selectedProvider}`);
88
89
  return {
89
90
  success: true,
90
91
  data: {
@@ -111,7 +112,7 @@ aiCoreServer.registerTool({
111
112
  catch (error) {
112
113
  const executionTime = Date.now() - startTime;
113
114
  const errorMessage = error instanceof Error ? error.message : String(error);
114
- console.error(`[AI-Core] Text generation failed: ${errorMessage}`);
115
+ logger.debug(`[AI-Core] Text generation failed: ${errorMessage}`);
115
116
  return {
116
117
  success: false,
117
118
  error: errorMessage,
@@ -139,7 +140,7 @@ aiCoreServer.registerTool({
139
140
  execute: async (params, context) => {
140
141
  const startTime = Date.now();
141
142
  try {
142
- console.log(`[AI-Core] Selecting provider with requirements:`, params.requirements);
143
+ logger.debug(`[AI-Core] Selecting provider with requirements:`, params.requirements);
143
144
  // Use existing provider selection logic
144
145
  const availableProviders = getAvailableProviders();
145
146
  const selectedProvider = getBestProvider(params.preferred);
@@ -152,7 +153,7 @@ aiCoreServer.registerTool({
152
153
  });
153
154
  const capabilities = getProviderCapabilities(selectedProvider);
154
155
  const executionTime = Date.now() - startTime;
155
- console.log(`[AI-Core] Selected provider: ${selectedProvider} in ${executionTime}ms`);
156
+ logger.debug(`[AI-Core] Selected provider: ${selectedProvider} in ${executionTime}ms`);
156
157
  return {
157
158
  success: true,
158
159
  data: {
@@ -179,7 +180,7 @@ aiCoreServer.registerTool({
179
180
  catch (error) {
180
181
  const executionTime = Date.now() - startTime;
181
182
  const errorMessage = error instanceof Error ? error.message : String(error);
182
- console.error(`[AI-Core] Provider selection failed: ${errorMessage}`);
183
+ logger.debug(`[AI-Core] Provider selection failed: ${errorMessage}`);
183
184
  return {
184
185
  success: false,
185
186
  error: errorMessage,
@@ -210,7 +211,7 @@ aiCoreServer.registerTool({
210
211
  execute: async (params, context) => {
211
212
  const startTime = Date.now();
212
213
  try {
213
- console.log(`[AI-Core] Checking provider status for: ${params.provider || 'all providers'}`);
214
+ logger.debug(`[AI-Core] Checking provider status for: ${params.provider || 'all providers'}`);
214
215
  const availableProviders = getAvailableProviders();
215
216
  const providerStatuses = [];
216
217
  const providersToCheck = params.provider ? [params.provider] : availableProviders;
@@ -240,7 +241,7 @@ aiCoreServer.registerTool({
240
241
  }
241
242
  }
242
243
  const executionTime = Date.now() - startTime;
243
- console.log(`[AI-Core] Provider status check completed in ${executionTime}ms`);
244
+ logger.debug(`[AI-Core] Provider status check completed in ${executionTime}ms`);
244
245
  return {
245
246
  success: true,
246
247
  data: {
@@ -268,7 +269,7 @@ aiCoreServer.registerTool({
268
269
  catch (error) {
269
270
  const executionTime = Date.now() - startTime;
270
271
  const errorMessage = error instanceof Error ? error.message : String(error);
271
- console.error(`[AI-Core] Provider status check failed: ${errorMessage}`);
272
+ logger.debug(`[AI-Core] Provider status check failed: ${errorMessage}`);
272
273
  return {
273
274
  success: false,
274
275
  error: errorMessage,
@@ -299,4 +300,4 @@ aiCoreServer.registerTool(refactorCodeTool);
299
300
  aiCoreServer.registerTool(generateDocumentationTool);
300
301
  aiCoreServer.registerTool(debugAIOutputTool);
301
302
  // Log successful server creation
302
- console.log('[AI-Core] NeuroLink AI Core Server v1.2.0 created with 10 tools:', Object.keys(aiCoreServer.tools));
303
+ logger.debug('[AI-Core] NeuroLink AI Core Server v1.2.0 created with 10 tools:', Object.keys(aiCoreServer.tools));
package/dist/neurolink.js CHANGED
@@ -6,6 +6,7 @@
6
6
  */
7
7
  import { AIProviderFactory, createBestAIProvider } from './index.js';
8
8
  import { getBestProvider } from './utils/providerUtils.js';
9
+ import { logger } from './utils/logger.js';
9
10
  export class NeuroLink {
10
11
  /**
11
12
  * Generate text using the best available AI provider with automatic fallback
@@ -14,13 +15,13 @@ export class NeuroLink {
14
15
  const startTime = Date.now();
15
16
  const functionTag = 'NeuroLink.generateText';
16
17
  // Define fallback provider priority order
17
- const providerPriority = ['openai', 'vertex', 'bedrock', 'google-ai'];
18
+ const providerPriority = ['openai', 'vertex', 'bedrock', 'anthropic', 'azure', 'google-ai'];
18
19
  const requestedProvider = options.provider === 'auto' ? undefined : options.provider;
19
20
  // If specific provider requested, try that first, then fallback to priority order
20
21
  const tryProviders = requestedProvider
21
22
  ? [requestedProvider, ...providerPriority.filter(p => p !== requestedProvider)]
22
23
  : providerPriority;
23
- console.log(`[${functionTag}] Starting text generation with fallback`, {
24
+ logger.debug(`[${functionTag}] Starting text generation with fallback`, {
24
25
  requestedProvider: requestedProvider || 'auto',
25
26
  tryProviders,
26
27
  promptLength: options.prompt.length
@@ -28,7 +29,7 @@ export class NeuroLink {
28
29
  let lastError = null;
29
30
  for (const providerName of tryProviders) {
30
31
  try {
31
- console.log(`[${functionTag}] Attempting provider`, { provider: providerName });
32
+ logger.debug(`[${functionTag}] Attempting provider`, { provider: providerName });
32
33
  const provider = AIProviderFactory.createProvider(providerName);
33
34
  const result = await provider.generateText({
34
35
  prompt: options.prompt,
@@ -40,7 +41,7 @@ export class NeuroLink {
40
41
  throw new Error('No response received from AI provider');
41
42
  }
42
43
  const responseTime = Date.now() - startTime;
43
- console.log(`[${functionTag}] Provider succeeded`, {
44
+ logger.debug(`[${functionTag}] Provider succeeded`, {
44
45
  provider: providerName,
45
46
  responseTime,
46
47
  usage: result.usage
@@ -55,7 +56,7 @@ export class NeuroLink {
55
56
  catch (error) {
56
57
  const errorMessage = error instanceof Error ? error.message : String(error);
57
58
  lastError = error instanceof Error ? error : new Error(errorMessage);
58
- console.warn(`[${functionTag}] Provider failed, trying next`, {
59
+ logger.debug(`[${functionTag}] Provider failed, trying next`, {
59
60
  provider: providerName,
60
61
  error: errorMessage,
61
62
  remainingProviders: tryProviders.slice(tryProviders.indexOf(providerName) + 1)
@@ -65,7 +66,7 @@ export class NeuroLink {
65
66
  }
66
67
  }
67
68
  // All providers failed
68
- console.error(`[${functionTag}] All providers failed`, {
69
+ logger.debug(`[${functionTag}] All providers failed`, {
69
70
  triedProviders: tryProviders,
70
71
  lastError: lastError?.message
71
72
  });
@@ -77,13 +78,13 @@ export class NeuroLink {
77
78
  async generateTextStream(options) {
78
79
  const functionTag = 'NeuroLink.generateTextStream';
79
80
  // Define fallback provider priority order
80
- const providerPriority = ['openai', 'vertex', 'bedrock', 'google-ai'];
81
+ const providerPriority = ['openai', 'vertex', 'bedrock', 'anthropic', 'azure', 'google-ai'];
81
82
  const requestedProvider = options.provider === 'auto' ? undefined : options.provider;
82
83
  // If specific provider requested, try that first, then fallback to priority order
83
84
  const tryProviders = requestedProvider
84
85
  ? [requestedProvider, ...providerPriority.filter(p => p !== requestedProvider)]
85
86
  : providerPriority;
86
- console.log(`[${functionTag}] Starting stream generation with fallback`, {
87
+ logger.debug(`[${functionTag}] Starting stream generation with fallback`, {
87
88
  requestedProvider: requestedProvider || 'auto',
88
89
  tryProviders,
89
90
  promptLength: options.prompt.length
@@ -91,7 +92,7 @@ export class NeuroLink {
91
92
  let lastError = null;
92
93
  for (const providerName of tryProviders) {
93
94
  try {
94
- console.log(`[${functionTag}] Attempting provider`, { provider: providerName });
95
+ logger.debug(`[${functionTag}] Attempting provider`, { provider: providerName });
95
96
  const provider = AIProviderFactory.createProvider(providerName);
96
97
  const result = await provider.streamText({
97
98
  prompt: options.prompt,
@@ -102,7 +103,7 @@ export class NeuroLink {
102
103
  if (!result) {
103
104
  throw new Error('No stream response received from AI provider');
104
105
  }
105
- console.log(`[${functionTag}] Provider succeeded`, { provider: providerName });
106
+ logger.debug(`[${functionTag}] Provider succeeded`, { provider: providerName });
106
107
  // Convert the AI SDK stream to our expected format
107
108
  async function* convertStream() {
108
109
  if (result && result.textStream) {
@@ -116,7 +117,7 @@ export class NeuroLink {
116
117
  catch (error) {
117
118
  const errorMessage = error instanceof Error ? error.message : String(error);
118
119
  lastError = error instanceof Error ? error : new Error(errorMessage);
119
- console.warn(`[${functionTag}] Provider failed, trying next`, {
120
+ logger.debug(`[${functionTag}] Provider failed, trying next`, {
120
121
  provider: providerName,
121
122
  error: errorMessage,
122
123
  remainingProviders: tryProviders.slice(tryProviders.indexOf(providerName) + 1)
@@ -126,7 +127,7 @@ export class NeuroLink {
126
127
  }
127
128
  }
128
129
  // All providers failed
129
- console.error(`[${functionTag}] All providers failed`, {
130
+ logger.debug(`[${functionTag}] All providers failed`, {
130
131
  triedProviders: tryProviders,
131
132
  lastError: lastError?.message
132
133
  });
@@ -1,5 +1,6 @@
1
1
  import { createAmazonBedrock } from '@ai-sdk/amazon-bedrock';
2
2
  import { streamText, generateText, Output } from 'ai';
3
+ import { logger } from '../utils/logger.js';
3
4
  // Default system context
4
5
  const DEFAULT_SYSTEM_CONTEXT = {
5
6
  systemPrompt: 'You are a helpful AI assistant.'
@@ -42,7 +43,7 @@ export class AmazonBedrock {
42
43
  const functionTag = 'AmazonBedrock.constructor';
43
44
  this.modelName = modelName || getBedrockModelId();
44
45
  try {
45
- console.log(`[${functionTag}] Function called`, {
46
+ logger.debug(`[${functionTag}] Function called`, {
46
47
  modelName: this.modelName,
47
48
  envBedrockModel: process.env.BEDROCK_MODEL,
48
49
  envBedrockModelId: process.env.BEDROCK_MODEL_ID,
@@ -54,7 +55,7 @@ export class AmazonBedrock {
54
55
  secretAccessKey: getAWSSecretAccessKey(),
55
56
  region: getAWSRegion()
56
57
  };
57
- console.log(`[${functionTag}] AWS config validation`, {
58
+ logger.debug(`[${functionTag}] AWS config validation`, {
58
59
  hasAccessKeyId: !!awsConfig.accessKeyId,
59
60
  hasSecretAccessKey: !!awsConfig.secretAccessKey,
60
61
  region: awsConfig.region || 'MISSING',
@@ -66,49 +67,49 @@ export class AmazonBedrock {
66
67
  const sessionToken = getAWSSessionToken();
67
68
  if (sessionToken) {
68
69
  awsConfig.sessionToken = sessionToken;
69
- console.log(`[${functionTag}] Session token added`, {
70
+ logger.debug(`[${functionTag}] Session token added`, {
70
71
  environment: 'dev'
71
72
  });
72
73
  }
73
74
  else {
74
- console.warn(`[${functionTag}] Session token missing`, {
75
+ logger.warn(`[${functionTag}] Session token missing`, {
75
76
  environment: 'dev'
76
77
  });
77
78
  }
78
79
  }
79
- console.log(`[${functionTag}] AWS config created`, {
80
+ logger.debug(`[${functionTag}] AWS config created`, {
80
81
  region: awsConfig.region,
81
82
  hasSessionToken: !!awsConfig.sessionToken
82
83
  });
83
- console.log(`[${functionTag}] Bedrock provider creating`, {
84
+ logger.debug(`[${functionTag}] Bedrock provider creating`, {
84
85
  modelName: this.modelName
85
86
  });
86
87
  // Create custom Bedrock provider instance with environment-based configuration
87
88
  this.bedrock = createAmazonBedrock(awsConfig);
88
- console.log(`[${functionTag}] Bedrock provider initialized`, {
89
+ logger.debug(`[${functionTag}] Bedrock provider initialized`, {
89
90
  modelName: this.modelName
90
91
  });
91
- console.log(`[${functionTag}] Model instance creating`, {
92
+ logger.debug(`[${functionTag}] Model instance creating`, {
92
93
  modelName: this.modelName
93
94
  });
94
95
  this.model = this.bedrock(this.modelName);
95
- console.log(`[${functionTag}] Model instance created`, {
96
+ logger.debug(`[${functionTag}] Model instance created`, {
96
97
  modelName: this.modelName
97
98
  });
98
- console.log(`[${functionTag}] Function result`, {
99
+ logger.debug(`[${functionTag}] Function result`, {
99
100
  modelName: this.modelName,
100
101
  region: awsConfig.region,
101
102
  hasSessionToken: !!awsConfig.sessionToken,
102
103
  success: true
103
104
  });
104
- console.log(`[${functionTag}] Initialization completed`, {
105
+ logger.debug(`[${functionTag}] Initialization completed`, {
105
106
  modelName: this.modelName,
106
107
  region: awsConfig.region,
107
108
  hasSessionToken: !!awsConfig.sessionToken
108
109
  });
109
110
  }
110
111
  catch (err) {
111
- console.error(`[${functionTag}] Initialization failed`, {
112
+ logger.error(`[${functionTag}] Initialization failed`, {
112
113
  message: 'Error in initializing Amazon Bedrock',
113
114
  modelName: this.modelName,
114
115
  region: getAWSRegion(),
@@ -130,7 +131,7 @@ export class AmazonBedrock {
130
131
  const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema } = options;
131
132
  // Use schema from options or fallback parameter
132
133
  const finalSchema = schema || analysisSchema;
133
- console.log(`[${functionTag}] Stream request started`, {
134
+ logger.debug(`[${functionTag}] Stream request started`, {
134
135
  provider,
135
136
  modelName: this.modelName,
136
137
  promptLength: prompt.length,
@@ -147,7 +148,7 @@ export class AmazonBedrock {
147
148
  const error = event.error;
148
149
  const errorMessage = error instanceof Error ? error.message : String(error);
149
150
  const errorStack = error instanceof Error ? error.stack : undefined;
150
- console.error(`[${functionTag}] Stream text error`, {
151
+ logger.error(`[${functionTag}] Stream text error`, {
151
152
  provider,
152
153
  modelName: this.modelName,
153
154
  region: getAWSRegion(),
@@ -158,7 +159,7 @@ export class AmazonBedrock {
158
159
  });
159
160
  },
160
161
  onFinish: (event) => {
161
- console.log(`[${functionTag}] Stream text finished`, {
162
+ logger.debug(`[${functionTag}] Stream text finished`, {
162
163
  provider,
163
164
  modelName: this.modelName,
164
165
  region: getAWSRegion(),
@@ -171,7 +172,7 @@ export class AmazonBedrock {
171
172
  },
172
173
  onChunk: (event) => {
173
174
  chunkCount++;
174
- console.debug(`[${functionTag}] Stream text chunk`, {
175
+ logger.debug(`[${functionTag}] Stream text chunk`, {
175
176
  provider,
176
177
  modelName: this.modelName,
177
178
  chunkNumber: chunkCount,
@@ -185,7 +186,7 @@ export class AmazonBedrock {
185
186
  }
186
187
  // Direct streamText call - let the real error bubble up
187
188
  const result = streamText(streamOptions);
188
- console.log(`[${functionTag}] Stream text call successful`, {
189
+ logger.debug(`[${functionTag}] Stream text call successful`, {
189
190
  provider,
190
191
  modelName: this.modelName,
191
192
  promptLength: prompt.length
@@ -193,7 +194,7 @@ export class AmazonBedrock {
193
194
  return result;
194
195
  }
195
196
  catch (err) {
196
- console.error(`[${functionTag}] Exception`, {
197
+ logger.error(`[${functionTag}] Exception`, {
197
198
  provider,
198
199
  modelName: this.modelName,
199
200
  region: getAWSRegion(),
@@ -214,7 +215,7 @@ export class AmazonBedrock {
214
215
  const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema } = options;
215
216
  // Use schema from options or fallback parameter
216
217
  const finalSchema = schema || analysisSchema;
217
- console.log(`[${functionTag}] Generate text started`, {
218
+ logger.debug(`[${functionTag}] Generate text started`, {
218
219
  provider,
219
220
  modelName: this.modelName,
220
221
  region: getAWSRegion(),
@@ -233,7 +234,7 @@ export class AmazonBedrock {
233
234
  generateOptions.experimental_output = Output.object({ schema: finalSchema });
234
235
  }
235
236
  const result = await generateText(generateOptions);
236
- console.log(`[${functionTag}] Generate text completed`, {
237
+ logger.debug(`[${functionTag}] Generate text completed`, {
237
238
  provider,
238
239
  modelName: this.modelName,
239
240
  usage: result.usage,
@@ -243,7 +244,7 @@ export class AmazonBedrock {
243
244
  return result;
244
245
  }
245
246
  catch (err) {
246
- console.error(`[${functionTag}] Exception`, {
247
+ logger.error(`[${functionTag}] Exception`, {
247
248
  provider,
248
249
  modelName: this.modelName,
249
250
  message: 'Error in generating text',
@@ -5,6 +5,7 @@
5
5
  * Supports Claude 3.5 Sonnet, Claude 3.5 Haiku, and Claude 3 Opus.
6
6
  */
7
7
  import { AIProviderName } from '../core/types.js';
8
+ import { logger } from '../utils/logger.js';
8
9
  export class AnthropicProvider {
9
10
  name = AIProviderName.ANTHROPIC;
10
11
  apiKey;
@@ -14,7 +15,7 @@ export class AnthropicProvider {
14
15
  this.apiKey = this.getApiKey();
15
16
  this.baseURL = process.env.ANTHROPIC_BASE_URL || 'https://api.anthropic.com';
16
17
  this.defaultModel = process.env.ANTHROPIC_MODEL || 'claude-3-5-sonnet-20241022';
17
- console.log(`[AnthropicProvider] Initialized with model: ${this.defaultModel}`);
18
+ logger.debug(`[AnthropicProvider] Initialized with model: ${this.defaultModel}`);
18
19
  }
19
20
  getApiKey() {
20
21
  const apiKey = process.env.ANTHROPIC_API_KEY;
@@ -34,8 +35,8 @@ export class AnthropicProvider {
34
35
  'anthropic-version': '2023-06-01',
35
36
  'anthropic-dangerous-direct-browser-access': 'true' // Required for browser usage
36
37
  };
37
- console.log(`[AnthropicProvider.makeRequest] ${stream ? 'Streaming' : 'Non-streaming'} request to ${url}`);
38
- console.log(`[AnthropicProvider.makeRequest] Model: ${body.model}, Max tokens: ${body.max_tokens}`);
38
+ logger.debug(`[AnthropicProvider.makeRequest] ${stream ? 'Streaming' : 'Non-streaming'} request to ${url}`);
39
+ logger.debug(`[AnthropicProvider.makeRequest] Model: ${body.model}, Max tokens: ${body.max_tokens}`);
39
40
  const response = await fetch(url, {
40
41
  method: 'POST',
41
42
  headers,
@@ -43,19 +44,19 @@ export class AnthropicProvider {
43
44
  });
44
45
  if (!response.ok) {
45
46
  const errorText = await response.text();
46
- console.error(`[AnthropicProvider.makeRequest] API error ${response.status}: ${errorText}`);
47
+ logger.error(`[AnthropicProvider.makeRequest] API error ${response.status}: ${errorText}`);
47
48
  throw new Error(`Anthropic API error ${response.status}: ${errorText}`);
48
49
  }
49
50
  return response;
50
51
  }
51
52
  async generateText(optionsOrPrompt, schema) {
52
- console.log('[AnthropicProvider.generateText] Starting text generation');
53
+ logger.debug('[AnthropicProvider.generateText] Starting text generation');
53
54
  // Parse parameters with backward compatibility
54
55
  const options = typeof optionsOrPrompt === 'string'
55
56
  ? { prompt: optionsOrPrompt }
56
57
  : optionsOrPrompt;
57
58
  const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = 'You are Claude, an AI assistant created by Anthropic. You are helpful, harmless, and honest.' } = options;
58
- console.log(`[AnthropicProvider.generateText] Prompt: "${prompt.substring(0, 100)}...", Temperature: ${temperature}, Max tokens: ${maxTokens}`);
59
+ logger.debug(`[AnthropicProvider.generateText] Prompt: "${prompt.substring(0, 100)}...", Temperature: ${temperature}, Max tokens: ${maxTokens}`);
59
60
  const requestBody = {
60
61
  model: this.getModel(),
61
62
  max_tokens: maxTokens,
@@ -71,7 +72,7 @@ export class AnthropicProvider {
71
72
  try {
72
73
  const response = await this.makeRequest('messages', requestBody);
73
74
  const data = await response.json();
74
- console.log(`[AnthropicProvider.generateText] Success. Generated ${data.usage.output_tokens} tokens`);
75
+ logger.debug(`[AnthropicProvider.generateText] Success. Generated ${data.usage.output_tokens} tokens`);
75
76
  const content = data.content.map(block => block.text).join('');
76
77
  return {
77
78
  content,
@@ -86,18 +87,18 @@ export class AnthropicProvider {
86
87
  };
87
88
  }
88
89
  catch (error) {
89
- console.error('[AnthropicProvider.generateText] Error:', error);
90
+ logger.error('[AnthropicProvider.generateText] Error:', error);
90
91
  throw error;
91
92
  }
92
93
  }
93
94
  async streamText(optionsOrPrompt, schema) {
94
- console.log('[AnthropicProvider.streamText] Starting text streaming');
95
+ logger.debug('[AnthropicProvider.streamText] Starting text streaming');
95
96
  // Parse parameters with backward compatibility
96
97
  const options = typeof optionsOrPrompt === 'string'
97
98
  ? { prompt: optionsOrPrompt }
98
99
  : optionsOrPrompt;
99
100
  const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = 'You are Claude, an AI assistant created by Anthropic. You are helpful, harmless, and honest.' } = options;
100
- console.log(`[AnthropicProvider.streamText] Streaming prompt: "${prompt.substring(0, 100)}..."`);
101
+ logger.debug(`[AnthropicProvider.streamText] Streaming prompt: "${prompt.substring(0, 100)}..."`);
101
102
  const requestBody = {
102
103
  model: this.getModel(),
103
104
  max_tokens: maxTokens,
@@ -125,7 +126,7 @@ export class AnthropicProvider {
125
126
  };
126
127
  }
127
128
  catch (error) {
128
- console.error('[AnthropicProvider.streamText] Error:', error);
129
+ logger.error('[AnthropicProvider.streamText] Error:', error);
129
130
  throw error;
130
131
  }
131
132
  }
@@ -156,7 +157,7 @@ export class AnthropicProvider {
156
157
  }
157
158
  }
158
159
  catch (parseError) {
159
- console.warn('[AnthropicProvider.createAsyncIterable] Failed to parse chunk:', parseError);
160
+ logger.warn('[AnthropicProvider.createAsyncIterable] Failed to parse chunk:', parseError);
160
161
  continue;
161
162
  }
162
163
  }
@@ -168,13 +169,13 @@ export class AnthropicProvider {
168
169
  }
169
170
  }
170
171
  async *generateTextStream(optionsOrPrompt) {
171
- console.log('[AnthropicProvider.generateTextStream] Starting text streaming');
172
+ logger.debug('[AnthropicProvider.generateTextStream] Starting text streaming');
172
173
  // Parse parameters with backward compatibility
173
174
  const options = typeof optionsOrPrompt === 'string'
174
175
  ? { prompt: optionsOrPrompt }
175
176
  : optionsOrPrompt;
176
177
  const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = 'You are Claude, an AI assistant created by Anthropic. You are helpful, harmless, and honest.' } = options;
177
- console.log(`[AnthropicProvider.generateTextStream] Streaming prompt: "${prompt.substring(0, 100)}..."`);
178
+ logger.debug(`[AnthropicProvider.generateTextStream] Streaming prompt: "${prompt.substring(0, 100)}..."`);
178
179
  const requestBody = {
179
180
  model: this.getModel(),
180
181
  max_tokens: maxTokens,
@@ -223,7 +224,7 @@ export class AnthropicProvider {
223
224
  }
224
225
  }
225
226
  catch (parseError) {
226
- console.warn('[AnthropicProvider.generateTextStream] Failed to parse chunk:', parseError);
227
+ logger.warn('[AnthropicProvider.generateTextStream] Failed to parse chunk:', parseError);
227
228
  continue;
228
229
  }
229
230
  }
@@ -233,15 +234,15 @@ export class AnthropicProvider {
233
234
  finally {
234
235
  reader.releaseLock();
235
236
  }
236
- console.log('[AnthropicProvider.generateTextStream] Streaming completed');
237
+ logger.debug('[AnthropicProvider.generateTextStream] Streaming completed');
237
238
  }
238
239
  catch (error) {
239
- console.error('[AnthropicProvider.generateTextStream] Error:', error);
240
+ logger.error('[AnthropicProvider.generateTextStream] Error:', error);
240
241
  throw error;
241
242
  }
242
243
  }
243
244
  async testConnection() {
244
- console.log('[AnthropicProvider.testConnection] Testing connection to Anthropic API');
245
+ logger.debug('[AnthropicProvider.testConnection] Testing connection to Anthropic API');
245
246
  const startTime = Date.now();
246
247
  try {
247
248
  await this.generateText({
@@ -249,7 +250,7 @@ export class AnthropicProvider {
249
250
  maxTokens: 5
250
251
  });
251
252
  const responseTime = Date.now() - startTime;
252
- console.log(`[AnthropicProvider.testConnection] Connection test successful (${responseTime}ms)`);
253
+ logger.debug(`[AnthropicProvider.testConnection] Connection test successful (${responseTime}ms)`);
253
254
  return {
254
255
  success: true,
255
256
  responseTime
@@ -257,7 +258,7 @@ export class AnthropicProvider {
257
258
  }
258
259
  catch (error) {
259
260
  const responseTime = Date.now() - startTime;
260
- console.error(`[AnthropicProvider.testConnection] Connection test failed (${responseTime}ms):`, error);
261
+ logger.error(`[AnthropicProvider.testConnection] Connection test failed (${responseTime}ms):`, error);
261
262
  return {
262
263
  success: false,
263
264
  error: error instanceof Error ? error.message : 'Unknown error',