@juspay/neurolink 1.5.1 → 1.5.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. package/CHANGELOG.md +49 -0
  2. package/README.md +1 -1
  3. package/dist/cli/commands/config.d.ts +35 -35
  4. package/dist/cli/index.js +63 -19
  5. package/dist/core/factory.js +12 -11
  6. package/dist/lib/core/factory.d.ts +40 -0
  7. package/dist/lib/core/factory.js +162 -0
  8. package/dist/lib/core/types.d.ts +111 -0
  9. package/dist/lib/core/types.js +68 -0
  10. package/dist/lib/index.d.ts +56 -0
  11. package/dist/lib/index.js +62 -0
  12. package/dist/lib/mcp/context-manager.d.ts +164 -0
  13. package/dist/lib/mcp/context-manager.js +273 -0
  14. package/dist/lib/mcp/factory.d.ts +144 -0
  15. package/dist/lib/mcp/factory.js +141 -0
  16. package/dist/lib/mcp/orchestrator.d.ts +170 -0
  17. package/dist/lib/mcp/orchestrator.js +372 -0
  18. package/dist/lib/mcp/registry.d.ts +188 -0
  19. package/dist/lib/mcp/registry.js +373 -0
  20. package/dist/lib/mcp/servers/ai-providers/ai-analysis-tools.d.ts +21 -0
  21. package/dist/lib/mcp/servers/ai-providers/ai-analysis-tools.js +215 -0
  22. package/dist/lib/mcp/servers/ai-providers/ai-core-server.d.ts +10 -0
  23. package/dist/lib/mcp/servers/ai-providers/ai-core-server.js +303 -0
  24. package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.d.ts +101 -0
  25. package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.js +428 -0
  26. package/dist/lib/neurolink.d.ts +53 -0
  27. package/dist/lib/neurolink.js +155 -0
  28. package/dist/lib/providers/amazonBedrock.d.ts +11 -0
  29. package/dist/lib/providers/amazonBedrock.js +256 -0
  30. package/dist/lib/providers/anthropic.d.ts +34 -0
  31. package/dist/lib/providers/anthropic.js +308 -0
  32. package/dist/lib/providers/azureOpenAI.d.ts +37 -0
  33. package/dist/lib/providers/azureOpenAI.js +339 -0
  34. package/dist/lib/providers/googleAIStudio.d.ts +30 -0
  35. package/dist/lib/providers/googleAIStudio.js +216 -0
  36. package/dist/lib/providers/googleVertexAI.d.ts +30 -0
  37. package/dist/lib/providers/googleVertexAI.js +409 -0
  38. package/dist/lib/providers/index.d.ts +30 -0
  39. package/dist/lib/providers/index.js +25 -0
  40. package/dist/lib/providers/openAI.d.ts +10 -0
  41. package/dist/lib/providers/openAI.js +169 -0
  42. package/dist/lib/utils/logger.d.ts +12 -0
  43. package/dist/lib/utils/logger.js +25 -0
  44. package/dist/lib/utils/providerUtils.d.ts +17 -0
  45. package/dist/lib/utils/providerUtils.js +73 -0
  46. package/dist/mcp/servers/ai-providers/ai-core-server.js +11 -10
  47. package/dist/neurolink.js +13 -12
  48. package/dist/providers/amazonBedrock.js +22 -21
  49. package/dist/providers/anthropic.js +21 -20
  50. package/dist/providers/azureOpenAI.js +21 -20
  51. package/dist/providers/googleAIStudio.js +13 -12
  52. package/dist/providers/googleVertexAI.js +27 -26
  53. package/dist/providers/openAI.js +12 -11
  54. package/dist/utils/logger.d.ts +12 -0
  55. package/dist/utils/logger.js +25 -0
  56. package/dist/utils/providerUtils.d.ts +0 -3
  57. package/dist/utils/providerUtils.js +3 -2
  58. package/package.json +1 -1
@@ -0,0 +1,256 @@
1
+ import { createAmazonBedrock } from '@ai-sdk/amazon-bedrock';
2
+ import { streamText, generateText, Output } from 'ai';
3
+ import { logger } from '../utils/logger.js';
4
+ // Default system context
5
+ const DEFAULT_SYSTEM_CONTEXT = {
6
+ systemPrompt: 'You are a helpful AI assistant.'
7
+ };
8
+ // Configuration helpers
9
+ const getBedrockModelId = () => {
10
+ return process.env.BEDROCK_MODEL ||
11
+ process.env.BEDROCK_MODEL_ID ||
12
+ 'arn:aws:bedrock:us-east-2:225681119357:inference-profile/us.anthropic.claude-3-7-sonnet-20250219-v1:0';
13
+ };
14
+ const getAWSAccessKeyId = () => {
15
+ const keyId = process.env.AWS_ACCESS_KEY_ID;
16
+ if (!keyId) {
17
+ throw new Error('AWS_ACCESS_KEY_ID environment variable is not set');
18
+ }
19
+ return keyId;
20
+ };
21
+ const getAWSSecretAccessKey = () => {
22
+ const secretKey = process.env.AWS_SECRET_ACCESS_KEY;
23
+ if (!secretKey) {
24
+ throw new Error('AWS_SECRET_ACCESS_KEY environment variable is not set');
25
+ }
26
+ return secretKey;
27
+ };
28
+ const getAWSRegion = () => {
29
+ return process.env.AWS_REGION || 'us-east-2';
30
+ };
31
+ const getAWSSessionToken = () => {
32
+ return process.env.AWS_SESSION_TOKEN;
33
+ };
34
+ const getAppEnvironment = () => {
35
+ return process.env.PUBLIC_APP_ENVIRONMENT || 'dev';
36
+ };
37
+ // Amazon Bedrock class with enhanced error handling using createAmazonBedrock
38
+ export class AmazonBedrock {
39
+ modelName;
40
+ model;
41
+ bedrock;
42
+ constructor(modelName) {
43
+ const functionTag = 'AmazonBedrock.constructor';
44
+ this.modelName = modelName || getBedrockModelId();
45
+ try {
46
+ logger.debug(`[${functionTag}] Function called`, {
47
+ modelName: this.modelName,
48
+ envBedrockModel: process.env.BEDROCK_MODEL,
49
+ envBedrockModelId: process.env.BEDROCK_MODEL_ID,
50
+ fallbackModel: 'arn:aws:bedrock:us-east-2:225681119357:inference-profile/us.anthropic.claude-3-7-sonnet-20250219-v1:0'
51
+ });
52
+ // Configure AWS credentials for custom Bedrock instance
53
+ const awsConfig = {
54
+ accessKeyId: getAWSAccessKeyId(),
55
+ secretAccessKey: getAWSSecretAccessKey(),
56
+ region: getAWSRegion()
57
+ };
58
+ logger.debug(`[${functionTag}] AWS config validation`, {
59
+ hasAccessKeyId: !!awsConfig.accessKeyId,
60
+ hasSecretAccessKey: !!awsConfig.secretAccessKey,
61
+ region: awsConfig.region || 'MISSING',
62
+ accessKeyIdLength: awsConfig.accessKeyId?.length || 0,
63
+ hasSessionToken: !!process.env.AWS_SESSION_TOKEN
64
+ });
65
+ // Add session token for development environment
66
+ if (getAppEnvironment() === 'dev') {
67
+ const sessionToken = getAWSSessionToken();
68
+ if (sessionToken) {
69
+ awsConfig.sessionToken = sessionToken;
70
+ logger.debug(`[${functionTag}] Session token added`, {
71
+ environment: 'dev'
72
+ });
73
+ }
74
+ else {
75
+ logger.warn(`[${functionTag}] Session token missing`, {
76
+ environment: 'dev'
77
+ });
78
+ }
79
+ }
80
+ logger.debug(`[${functionTag}] AWS config created`, {
81
+ region: awsConfig.region,
82
+ hasSessionToken: !!awsConfig.sessionToken
83
+ });
84
+ logger.debug(`[${functionTag}] Bedrock provider creating`, {
85
+ modelName: this.modelName
86
+ });
87
+ // Create custom Bedrock provider instance with environment-based configuration
88
+ this.bedrock = createAmazonBedrock(awsConfig);
89
+ logger.debug(`[${functionTag}] Bedrock provider initialized`, {
90
+ modelName: this.modelName
91
+ });
92
+ logger.debug(`[${functionTag}] Model instance creating`, {
93
+ modelName: this.modelName
94
+ });
95
+ this.model = this.bedrock(this.modelName);
96
+ logger.debug(`[${functionTag}] Model instance created`, {
97
+ modelName: this.modelName
98
+ });
99
+ logger.debug(`[${functionTag}] Function result`, {
100
+ modelName: this.modelName,
101
+ region: awsConfig.region,
102
+ hasSessionToken: !!awsConfig.sessionToken,
103
+ success: true
104
+ });
105
+ logger.debug(`[${functionTag}] Initialization completed`, {
106
+ modelName: this.modelName,
107
+ region: awsConfig.region,
108
+ hasSessionToken: !!awsConfig.sessionToken
109
+ });
110
+ }
111
+ catch (err) {
112
+ logger.error(`[${functionTag}] Initialization failed`, {
113
+ message: 'Error in initializing Amazon Bedrock',
114
+ modelName: this.modelName,
115
+ region: getAWSRegion(),
116
+ error: err instanceof Error ? err.message : String(err),
117
+ stack: err instanceof Error ? err.stack : undefined
118
+ });
119
+ throw err;
120
+ }
121
+ }
122
+ async streamText(optionsOrPrompt, analysisSchema) {
123
+ const functionTag = 'AmazonBedrock.streamText';
124
+ const provider = 'bedrock';
125
+ let chunkCount = 0;
126
+ try {
127
+ // Parse parameters - support both string and options object
128
+ const options = typeof optionsOrPrompt === 'string'
129
+ ? { prompt: optionsOrPrompt }
130
+ : optionsOrPrompt;
131
+ const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema } = options;
132
+ // Use schema from options or fallback parameter
133
+ const finalSchema = schema || analysisSchema;
134
+ logger.debug(`[${functionTag}] Stream request started`, {
135
+ provider,
136
+ modelName: this.modelName,
137
+ promptLength: prompt.length,
138
+ temperature,
139
+ maxTokens
140
+ });
141
+ const streamOptions = {
142
+ model: this.model,
143
+ prompt: prompt,
144
+ system: systemPrompt,
145
+ temperature,
146
+ maxTokens,
147
+ onError: (event) => {
148
+ const error = event.error;
149
+ const errorMessage = error instanceof Error ? error.message : String(error);
150
+ const errorStack = error instanceof Error ? error.stack : undefined;
151
+ logger.error(`[${functionTag}] Stream text error`, {
152
+ provider,
153
+ modelName: this.modelName,
154
+ region: getAWSRegion(),
155
+ error: errorMessage,
156
+ stack: errorStack,
157
+ promptLength: prompt.length,
158
+ chunkCount
159
+ });
160
+ },
161
+ onFinish: (event) => {
162
+ logger.debug(`[${functionTag}] Stream text finished`, {
163
+ provider,
164
+ modelName: this.modelName,
165
+ region: getAWSRegion(),
166
+ finishReason: event.finishReason,
167
+ usage: event.usage,
168
+ totalChunks: chunkCount,
169
+ promptLength: prompt.length,
170
+ responseLength: event.text?.length || 0
171
+ });
172
+ },
173
+ onChunk: (event) => {
174
+ chunkCount++;
175
+ logger.debug(`[${functionTag}] Stream text chunk`, {
176
+ provider,
177
+ modelName: this.modelName,
178
+ chunkNumber: chunkCount,
179
+ chunkLength: event.chunk.text?.length || 0,
180
+ chunkType: event.chunk.type
181
+ });
182
+ }
183
+ };
184
+ if (finalSchema) {
185
+ streamOptions.experimental_output = Output.object({ schema: finalSchema });
186
+ }
187
+ // Direct streamText call - let the real error bubble up
188
+ const result = streamText(streamOptions);
189
+ logger.debug(`[${functionTag}] Stream text call successful`, {
190
+ provider,
191
+ modelName: this.modelName,
192
+ promptLength: prompt.length
193
+ });
194
+ return result;
195
+ }
196
+ catch (err) {
197
+ logger.error(`[${functionTag}] Exception`, {
198
+ provider,
199
+ modelName: this.modelName,
200
+ region: getAWSRegion(),
201
+ message: 'Error in streaming text',
202
+ err: String(err)
203
+ });
204
+ throw err; // Re-throw error to trigger fallback
205
+ }
206
+ }
207
+ async generateText(optionsOrPrompt, analysisSchema) {
208
+ const functionTag = 'AmazonBedrock.generateText';
209
+ const provider = 'bedrock';
210
+ try {
211
+ // Parse parameters - support both string and options object
212
+ const options = typeof optionsOrPrompt === 'string'
213
+ ? { prompt: optionsOrPrompt }
214
+ : optionsOrPrompt;
215
+ const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema } = options;
216
+ // Use schema from options or fallback parameter
217
+ const finalSchema = schema || analysisSchema;
218
+ logger.debug(`[${functionTag}] Generate text started`, {
219
+ provider,
220
+ modelName: this.modelName,
221
+ region: getAWSRegion(),
222
+ promptLength: prompt.length,
223
+ temperature,
224
+ maxTokens
225
+ });
226
+ const generateOptions = {
227
+ model: this.model,
228
+ prompt: prompt,
229
+ system: systemPrompt,
230
+ temperature,
231
+ maxTokens
232
+ };
233
+ if (finalSchema) {
234
+ generateOptions.experimental_output = Output.object({ schema: finalSchema });
235
+ }
236
+ const result = await generateText(generateOptions);
237
+ logger.debug(`[${functionTag}] Generate text completed`, {
238
+ provider,
239
+ modelName: this.modelName,
240
+ usage: result.usage,
241
+ finishReason: result.finishReason,
242
+ responseLength: result.text?.length || 0
243
+ });
244
+ return result;
245
+ }
246
+ catch (err) {
247
+ logger.error(`[${functionTag}] Exception`, {
248
+ provider,
249
+ modelName: this.modelName,
250
+ message: 'Error in generating text',
251
+ err: String(err)
252
+ });
253
+ throw err; // Re-throw error to trigger fallback instead of returning null
254
+ }
255
+ }
256
+ }
@@ -0,0 +1,34 @@
1
+ /**
2
+ * Anthropic AI Provider (Direct API)
3
+ *
4
+ * Direct integration with Anthropic's Claude models via their native API.
5
+ * Supports Claude 3.5 Sonnet, Claude 3.5 Haiku, and Claude 3 Opus.
6
+ */
7
+ import type { AIProvider, TextGenerationOptions, StreamTextOptions } from '../core/types.js';
8
+ import { AIProviderName } from '../core/types.js';
9
+ export declare class AnthropicProvider implements AIProvider {
10
+ readonly name: AIProviderName;
11
+ private apiKey;
12
+ private baseURL;
13
+ private defaultModel;
14
+ constructor();
15
+ private getApiKey;
16
+ private getModel;
17
+ private makeRequest;
18
+ generateText(optionsOrPrompt: TextGenerationOptions | string, schema?: any): Promise<any>;
19
+ streamText(optionsOrPrompt: StreamTextOptions | string, schema?: any): Promise<any>;
20
+ private createAsyncIterable;
21
+ generateTextStream(optionsOrPrompt: StreamTextOptions | string): AsyncGenerator<any, void, unknown>;
22
+ testConnection(): Promise<{
23
+ success: boolean;
24
+ error?: string;
25
+ responseTime?: number;
26
+ }>;
27
+ isConfigured(): boolean;
28
+ getRequiredConfig(): string[];
29
+ getOptionalConfig(): string[];
30
+ getModels(): string[];
31
+ supportsStreaming(): boolean;
32
+ supportsSchema(): boolean;
33
+ getCapabilities(): string[];
34
+ }
@@ -0,0 +1,308 @@
1
+ /**
2
+ * Anthropic AI Provider (Direct API)
3
+ *
4
+ * Direct integration with Anthropic's Claude models via their native API.
5
+ * Supports Claude 3.5 Sonnet, Claude 3.5 Haiku, and Claude 3 Opus.
6
+ */
7
+ import { AIProviderName } from '../core/types.js';
8
+ import { logger } from '../utils/logger.js';
9
+ export class AnthropicProvider {
10
+ name = AIProviderName.ANTHROPIC;
11
+ apiKey;
12
+ baseURL;
13
+ defaultModel;
14
+ constructor() {
15
+ this.apiKey = this.getApiKey();
16
+ this.baseURL = process.env.ANTHROPIC_BASE_URL || 'https://api.anthropic.com';
17
+ this.defaultModel = process.env.ANTHROPIC_MODEL || 'claude-3-5-sonnet-20241022';
18
+ logger.debug(`[AnthropicProvider] Initialized with model: ${this.defaultModel}`);
19
+ }
20
+ getApiKey() {
21
+ const apiKey = process.env.ANTHROPIC_API_KEY;
22
+ if (!apiKey) {
23
+ throw new Error('ANTHROPIC_API_KEY environment variable is required');
24
+ }
25
+ return apiKey;
26
+ }
27
+ getModel() {
28
+ return this.defaultModel;
29
+ }
30
+ async makeRequest(endpoint, body, stream = false) {
31
+ const url = `${this.baseURL}/v1/${endpoint}`;
32
+ const headers = {
33
+ 'Content-Type': 'application/json',
34
+ 'x-api-key': this.apiKey,
35
+ 'anthropic-version': '2023-06-01',
36
+ 'anthropic-dangerous-direct-browser-access': 'true' // Required for browser usage
37
+ };
38
+ logger.debug(`[AnthropicProvider.makeRequest] ${stream ? 'Streaming' : 'Non-streaming'} request to ${url}`);
39
+ logger.debug(`[AnthropicProvider.makeRequest] Model: ${body.model}, Max tokens: ${body.max_tokens}`);
40
+ const response = await fetch(url, {
41
+ method: 'POST',
42
+ headers,
43
+ body: JSON.stringify(body)
44
+ });
45
+ if (!response.ok) {
46
+ const errorText = await response.text();
47
+ logger.error(`[AnthropicProvider.makeRequest] API error ${response.status}: ${errorText}`);
48
+ throw new Error(`Anthropic API error ${response.status}: ${errorText}`);
49
+ }
50
+ return response;
51
+ }
52
+ async generateText(optionsOrPrompt, schema) {
53
+ logger.debug('[AnthropicProvider.generateText] Starting text generation');
54
+ // Parse parameters with backward compatibility
55
+ const options = typeof optionsOrPrompt === 'string'
56
+ ? { prompt: optionsOrPrompt }
57
+ : optionsOrPrompt;
58
+ const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = 'You are Claude, an AI assistant created by Anthropic. You are helpful, harmless, and honest.' } = options;
59
+ logger.debug(`[AnthropicProvider.generateText] Prompt: "${prompt.substring(0, 100)}...", Temperature: ${temperature}, Max tokens: ${maxTokens}`);
60
+ const requestBody = {
61
+ model: this.getModel(),
62
+ max_tokens: maxTokens,
63
+ messages: [
64
+ {
65
+ role: 'user',
66
+ content: prompt
67
+ }
68
+ ],
69
+ temperature,
70
+ system: systemPrompt
71
+ };
72
+ try {
73
+ const response = await this.makeRequest('messages', requestBody);
74
+ const data = await response.json();
75
+ logger.debug(`[AnthropicProvider.generateText] Success. Generated ${data.usage.output_tokens} tokens`);
76
+ const content = data.content.map(block => block.text).join('');
77
+ return {
78
+ content,
79
+ provider: this.name,
80
+ model: data.model,
81
+ usage: {
82
+ promptTokens: data.usage.input_tokens,
83
+ completionTokens: data.usage.output_tokens,
84
+ totalTokens: data.usage.input_tokens + data.usage.output_tokens
85
+ },
86
+ finishReason: data.stop_reason
87
+ };
88
+ }
89
+ catch (error) {
90
+ logger.error('[AnthropicProvider.generateText] Error:', error);
91
+ throw error;
92
+ }
93
+ }
94
+ async streamText(optionsOrPrompt, schema) {
95
+ logger.debug('[AnthropicProvider.streamText] Starting text streaming');
96
+ // Parse parameters with backward compatibility
97
+ const options = typeof optionsOrPrompt === 'string'
98
+ ? { prompt: optionsOrPrompt }
99
+ : optionsOrPrompt;
100
+ const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = 'You are Claude, an AI assistant created by Anthropic. You are helpful, harmless, and honest.' } = options;
101
+ logger.debug(`[AnthropicProvider.streamText] Streaming prompt: "${prompt.substring(0, 100)}..."`);
102
+ const requestBody = {
103
+ model: this.getModel(),
104
+ max_tokens: maxTokens,
105
+ messages: [
106
+ {
107
+ role: 'user',
108
+ content: prompt
109
+ }
110
+ ],
111
+ temperature,
112
+ system: systemPrompt,
113
+ stream: true
114
+ };
115
+ try {
116
+ const response = await this.makeRequest('messages', requestBody, true);
117
+ if (!response.body) {
118
+ throw new Error('No response body received');
119
+ }
120
+ // Return a StreamTextResult-like object
121
+ return {
122
+ textStream: this.createAsyncIterable(response.body),
123
+ text: '',
124
+ usage: { promptTokens: 0, completionTokens: 0, totalTokens: 0 },
125
+ finishReason: 'end_turn'
126
+ };
127
+ }
128
+ catch (error) {
129
+ logger.error('[AnthropicProvider.streamText] Error:', error);
130
+ throw error;
131
+ }
132
+ }
133
+ async *createAsyncIterable(body) {
134
+ const reader = body.getReader();
135
+ const decoder = new TextDecoder();
136
+ let buffer = '';
137
+ try {
138
+ while (true) {
139
+ const { done, value } = await reader.read();
140
+ if (done)
141
+ break;
142
+ buffer += decoder.decode(value, { stream: true });
143
+ const lines = buffer.split('\n');
144
+ buffer = lines.pop() || '';
145
+ for (const line of lines) {
146
+ if (line.trim() === '')
147
+ continue;
148
+ if (line.startsWith('data: ')) {
149
+ const data = line.slice(6);
150
+ if (data.trim() === '[DONE]')
151
+ continue;
152
+ try {
153
+ const chunk = JSON.parse(data);
154
+ // Extract text content from different chunk types
155
+ if (chunk.type === 'content_block_delta' && chunk.delta?.text) {
156
+ yield chunk.delta.text;
157
+ }
158
+ }
159
+ catch (parseError) {
160
+ logger.warn('[AnthropicProvider.createAsyncIterable] Failed to parse chunk:', parseError);
161
+ continue;
162
+ }
163
+ }
164
+ }
165
+ }
166
+ }
167
+ finally {
168
+ reader.releaseLock();
169
+ }
170
+ }
171
+ async *generateTextStream(optionsOrPrompt) {
172
+ logger.debug('[AnthropicProvider.generateTextStream] Starting text streaming');
173
+ // Parse parameters with backward compatibility
174
+ const options = typeof optionsOrPrompt === 'string'
175
+ ? { prompt: optionsOrPrompt }
176
+ : optionsOrPrompt;
177
+ const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = 'You are Claude, an AI assistant created by Anthropic. You are helpful, harmless, and honest.' } = options;
178
+ logger.debug(`[AnthropicProvider.generateTextStream] Streaming prompt: "${prompt.substring(0, 100)}..."`);
179
+ const requestBody = {
180
+ model: this.getModel(),
181
+ max_tokens: maxTokens,
182
+ messages: [
183
+ {
184
+ role: 'user',
185
+ content: prompt
186
+ }
187
+ ],
188
+ temperature,
189
+ system: systemPrompt,
190
+ stream: true
191
+ };
192
+ try {
193
+ const response = await this.makeRequest('messages', requestBody, true);
194
+ if (!response.body) {
195
+ throw new Error('No response body received');
196
+ }
197
+ const reader = response.body.getReader();
198
+ const decoder = new TextDecoder();
199
+ let buffer = '';
200
+ try {
201
+ while (true) {
202
+ const { done, value } = await reader.read();
203
+ if (done)
204
+ break;
205
+ buffer += decoder.decode(value, { stream: true });
206
+ const lines = buffer.split('\n');
207
+ buffer = lines.pop() || '';
208
+ for (const line of lines) {
209
+ if (line.trim() === '')
210
+ continue;
211
+ if (line.startsWith('data: ')) {
212
+ const data = line.slice(6);
213
+ if (data.trim() === '[DONE]')
214
+ continue;
215
+ try {
216
+ const chunk = JSON.parse(data);
217
+ // Extract text content from different chunk types
218
+ if (chunk.type === 'content_block_delta' && chunk.delta?.text) {
219
+ yield {
220
+ content: chunk.delta.text,
221
+ provider: this.name,
222
+ model: this.getModel()
223
+ };
224
+ }
225
+ }
226
+ catch (parseError) {
227
+ logger.warn('[AnthropicProvider.generateTextStream] Failed to parse chunk:', parseError);
228
+ continue;
229
+ }
230
+ }
231
+ }
232
+ }
233
+ }
234
+ finally {
235
+ reader.releaseLock();
236
+ }
237
+ logger.debug('[AnthropicProvider.generateTextStream] Streaming completed');
238
+ }
239
+ catch (error) {
240
+ logger.error('[AnthropicProvider.generateTextStream] Error:', error);
241
+ throw error;
242
+ }
243
+ }
244
+ async testConnection() {
245
+ logger.debug('[AnthropicProvider.testConnection] Testing connection to Anthropic API');
246
+ const startTime = Date.now();
247
+ try {
248
+ await this.generateText({
249
+ prompt: 'Hello',
250
+ maxTokens: 5
251
+ });
252
+ const responseTime = Date.now() - startTime;
253
+ logger.debug(`[AnthropicProvider.testConnection] Connection test successful (${responseTime}ms)`);
254
+ return {
255
+ success: true,
256
+ responseTime
257
+ };
258
+ }
259
+ catch (error) {
260
+ const responseTime = Date.now() - startTime;
261
+ logger.error(`[AnthropicProvider.testConnection] Connection test failed (${responseTime}ms):`, error);
262
+ return {
263
+ success: false,
264
+ error: error instanceof Error ? error.message : 'Unknown error',
265
+ responseTime
266
+ };
267
+ }
268
+ }
269
+ isConfigured() {
270
+ try {
271
+ this.getApiKey();
272
+ return true;
273
+ }
274
+ catch {
275
+ return false;
276
+ }
277
+ }
278
+ getRequiredConfig() {
279
+ return ['ANTHROPIC_API_KEY'];
280
+ }
281
+ getOptionalConfig() {
282
+ return ['ANTHROPIC_MODEL', 'ANTHROPIC_BASE_URL'];
283
+ }
284
+ getModels() {
285
+ return [
286
+ 'claude-3-5-sonnet-20241022',
287
+ 'claude-3-5-haiku-20241022',
288
+ 'claude-3-opus-20240229',
289
+ 'claude-3-sonnet-20240229',
290
+ 'claude-3-haiku-20240307'
291
+ ];
292
+ }
293
+ supportsStreaming() {
294
+ return true;
295
+ }
296
+ supportsSchema() {
297
+ return false; // Anthropic doesn't have native JSON schema support like OpenAI
298
+ }
299
+ getCapabilities() {
300
+ return [
301
+ 'text-generation',
302
+ 'streaming',
303
+ 'conversation',
304
+ 'system-prompts',
305
+ 'long-context' // Claude models support up to 200k tokens
306
+ ];
307
+ }
308
+ }
@@ -0,0 +1,37 @@
1
+ /**
2
+ * Azure OpenAI Provider
3
+ *
4
+ * Enterprise-grade OpenAI integration through Microsoft Azure.
5
+ * Supports all OpenAI models with enhanced security and compliance.
6
+ */
7
+ import type { AIProvider, TextGenerationOptions, StreamTextOptions } from '../core/types.js';
8
+ import { AIProviderName } from '../core/types.js';
9
+ export declare class AzureOpenAIProvider implements AIProvider {
10
+ readonly name: AIProviderName;
11
+ private apiKey;
12
+ private endpoint;
13
+ private deploymentId;
14
+ private apiVersion;
15
+ constructor();
16
+ private getApiKey;
17
+ private getEndpoint;
18
+ private getDeploymentId;
19
+ private getApiUrl;
20
+ private makeRequest;
21
+ generateText(optionsOrPrompt: TextGenerationOptions | string, schema?: any): Promise<any>;
22
+ streamText(optionsOrPrompt: StreamTextOptions | string, schema?: any): Promise<any>;
23
+ private createAsyncIterable;
24
+ generateTextStream(optionsOrPrompt: StreamTextOptions | string): AsyncGenerator<any, void, unknown>;
25
+ testConnection(): Promise<{
26
+ success: boolean;
27
+ error?: string;
28
+ responseTime?: number;
29
+ }>;
30
+ isConfigured(): boolean;
31
+ getRequiredConfig(): string[];
32
+ getOptionalConfig(): string[];
33
+ getModels(): string[];
34
+ supportsStreaming(): boolean;
35
+ supportsSchema(): boolean;
36
+ getCapabilities(): string[];
37
+ }