@juspay/neurolink 1.2.3 → 1.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +108 -0
- package/README.md +213 -1138
- package/dist/cli/commands/config.d.ts +373 -0
- package/dist/cli/commands/config.js +532 -0
- package/dist/cli/commands/mcp.d.ts +7 -0
- package/dist/cli/commands/mcp.js +434 -0
- package/dist/cli/index.d.ts +9 -0
- package/dist/cli/index.js +451 -169
- package/dist/core/factory.js +10 -2
- package/dist/core/types.d.ts +3 -1
- package/dist/core/types.js +2 -0
- package/dist/index.d.ts +1 -1
- package/dist/index.js +1 -1
- package/dist/mcp/context-manager.d.ts +164 -0
- package/dist/mcp/context-manager.js +273 -0
- package/dist/mcp/factory.d.ts +144 -0
- package/dist/mcp/factory.js +141 -0
- package/dist/mcp/orchestrator.d.ts +170 -0
- package/dist/mcp/orchestrator.js +372 -0
- package/dist/mcp/registry.d.ts +188 -0
- package/dist/mcp/registry.js +373 -0
- package/dist/mcp/servers/ai-providers/ai-core-server.d.ts +10 -0
- package/dist/mcp/servers/ai-providers/ai-core-server.js +280 -0
- package/dist/neurolink.d.ts +2 -2
- package/dist/neurolink.js +1 -1
- package/dist/providers/anthropic.d.ts +34 -0
- package/dist/providers/anthropic.js +307 -0
- package/dist/providers/azureOpenAI.d.ts +37 -0
- package/dist/providers/azureOpenAI.js +338 -0
- package/dist/providers/index.d.ts +4 -0
- package/dist/providers/index.js +5 -1
- package/dist/utils/providerUtils.js +8 -2
- package/package.json +163 -97
|
@@ -0,0 +1,280 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* NeuroLink AI Core Server
|
|
3
|
+
* Wraps existing AI provider functionality as MCP tools for orchestration
|
|
4
|
+
* Integrates AIProviderFactory with Factory-First MCP architecture
|
|
5
|
+
*/
|
|
6
|
+
import { z } from 'zod';
|
|
7
|
+
import { createMCPServer } from '../../factory.js';
|
|
8
|
+
import { AIProviderFactory } from '../../../core/factory.js';
|
|
9
|
+
import { getBestProvider, getAvailableProviders } from '../../../utils/providerUtils.js';
|
|
10
|
+
/**
|
|
11
|
+
* AI Core Server - Central hub for AI provider tools
|
|
12
|
+
* Provides text generation, provider selection, and AI capabilities
|
|
13
|
+
*/
|
|
14
|
+
export const aiCoreServer = createMCPServer({
|
|
15
|
+
id: 'neurolink-ai-core',
|
|
16
|
+
title: 'NeuroLink AI Core',
|
|
17
|
+
description: 'Core AI provider tools with automatic fallback and orchestration',
|
|
18
|
+
category: 'ai-providers',
|
|
19
|
+
version: '1.0.0',
|
|
20
|
+
capabilities: [
|
|
21
|
+
'text-generation',
|
|
22
|
+
'provider-selection',
|
|
23
|
+
'automatic-fallback',
|
|
24
|
+
'usage-tracking',
|
|
25
|
+
'multi-provider-support'
|
|
26
|
+
]
|
|
27
|
+
});
|
|
28
|
+
/**
|
|
29
|
+
* Text Generation Input Schema
|
|
30
|
+
*/
|
|
31
|
+
const TextGenerationSchema = z.object({
|
|
32
|
+
prompt: z.string().min(1, 'Prompt is required'),
|
|
33
|
+
provider: z.enum(['openai', 'bedrock', 'vertex', 'anthropic']).optional(),
|
|
34
|
+
model: z.string().optional(),
|
|
35
|
+
temperature: z.number().min(0).max(2).optional(),
|
|
36
|
+
maxTokens: z.number().positive().optional(),
|
|
37
|
+
systemPrompt: z.string().optional()
|
|
38
|
+
});
|
|
39
|
+
/**
|
|
40
|
+
* Provider Selection Input Schema
|
|
41
|
+
*/
|
|
42
|
+
const ProviderSelectionSchema = z.object({
|
|
43
|
+
preferred: z.string().optional(),
|
|
44
|
+
requirements: z.object({
|
|
45
|
+
multimodal: z.boolean().optional(),
|
|
46
|
+
streaming: z.boolean().optional(),
|
|
47
|
+
maxTokens: z.number().optional(),
|
|
48
|
+
costEfficient: z.boolean().optional()
|
|
49
|
+
}).optional()
|
|
50
|
+
});
|
|
51
|
+
/**
|
|
52
|
+
* Register Text Generation Tool
|
|
53
|
+
* Core tool that leverages existing AIProviderFactory for text generation
|
|
54
|
+
*/
|
|
55
|
+
aiCoreServer.registerTool({
|
|
56
|
+
name: 'generate-text',
|
|
57
|
+
description: 'Generate text using AI providers with automatic fallback and provider selection',
|
|
58
|
+
category: 'text-generation',
|
|
59
|
+
inputSchema: TextGenerationSchema,
|
|
60
|
+
isImplemented: true,
|
|
61
|
+
execute: async (params, context) => {
|
|
62
|
+
const startTime = Date.now();
|
|
63
|
+
try {
|
|
64
|
+
console.log(`[AI-Core] Starting text generation: "${params.prompt.substring(0, 50)}..."`);
|
|
65
|
+
// Use existing AIProviderFactory with best provider selection
|
|
66
|
+
const selectedProvider = params.provider || getBestProvider(params.provider);
|
|
67
|
+
const provider = AIProviderFactory.createBestProvider(selectedProvider);
|
|
68
|
+
// Generate text using existing NeuroLink patterns
|
|
69
|
+
const result = await provider.generateText({
|
|
70
|
+
prompt: params.prompt,
|
|
71
|
+
model: params.model,
|
|
72
|
+
temperature: params.temperature,
|
|
73
|
+
maxTokens: params.maxTokens,
|
|
74
|
+
systemPrompt: params.systemPrompt
|
|
75
|
+
});
|
|
76
|
+
if (!result) {
|
|
77
|
+
throw new Error('AI provider returned null result');
|
|
78
|
+
}
|
|
79
|
+
const executionTime = Date.now() - startTime;
|
|
80
|
+
console.log(`[AI-Core] Text generation successful in ${executionTime}ms using ${selectedProvider}`);
|
|
81
|
+
return {
|
|
82
|
+
success: true,
|
|
83
|
+
data: {
|
|
84
|
+
text: result.text,
|
|
85
|
+
model: params.model || 'default',
|
|
86
|
+
provider: selectedProvider,
|
|
87
|
+
generatedAt: new Date().toISOString()
|
|
88
|
+
},
|
|
89
|
+
usage: {
|
|
90
|
+
tokens: result.usage?.totalTokens,
|
|
91
|
+
provider: selectedProvider,
|
|
92
|
+
model: params.model || 'default',
|
|
93
|
+
executionTime
|
|
94
|
+
},
|
|
95
|
+
metadata: {
|
|
96
|
+
toolName: 'generate-text',
|
|
97
|
+
serverId: 'neurolink-ai-core',
|
|
98
|
+
sessionId: context.sessionId,
|
|
99
|
+
timestamp: Date.now(),
|
|
100
|
+
executionTime
|
|
101
|
+
}
|
|
102
|
+
};
|
|
103
|
+
}
|
|
104
|
+
catch (error) {
|
|
105
|
+
const executionTime = Date.now() - startTime;
|
|
106
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
107
|
+
console.error(`[AI-Core] Text generation failed: ${errorMessage}`);
|
|
108
|
+
return {
|
|
109
|
+
success: false,
|
|
110
|
+
error: errorMessage,
|
|
111
|
+
metadata: {
|
|
112
|
+
toolName: 'generate-text',
|
|
113
|
+
serverId: 'neurolink-ai-core',
|
|
114
|
+
sessionId: context.sessionId,
|
|
115
|
+
timestamp: Date.now(),
|
|
116
|
+
executionTime
|
|
117
|
+
}
|
|
118
|
+
};
|
|
119
|
+
}
|
|
120
|
+
}
|
|
121
|
+
});
|
|
122
|
+
/**
|
|
123
|
+
* Register Provider Selection Tool
|
|
124
|
+
* Intelligent provider selection based on requirements and availability
|
|
125
|
+
*/
|
|
126
|
+
aiCoreServer.registerTool({
|
|
127
|
+
name: 'select-provider',
|
|
128
|
+
description: 'Select the best available AI provider based on requirements and availability',
|
|
129
|
+
category: 'provider-management',
|
|
130
|
+
inputSchema: ProviderSelectionSchema,
|
|
131
|
+
isImplemented: true,
|
|
132
|
+
execute: async (params, context) => {
|
|
133
|
+
const startTime = Date.now();
|
|
134
|
+
try {
|
|
135
|
+
console.log(`[AI-Core] Selecting provider with requirements:`, params.requirements);
|
|
136
|
+
// Use existing provider selection logic
|
|
137
|
+
const availableProviders = getAvailableProviders();
|
|
138
|
+
const selectedProvider = getBestProvider(params.preferred);
|
|
139
|
+
// Get provider capabilities (mock for now, can be enhanced)
|
|
140
|
+
const getProviderCapabilities = (provider) => ({
|
|
141
|
+
multimodal: provider === 'openai' || provider === 'vertex',
|
|
142
|
+
streaming: provider === 'openai' || provider === 'anthropic',
|
|
143
|
+
maxTokens: provider === 'anthropic' ? 100000 : 4000,
|
|
144
|
+
costEfficient: provider === 'openai' || provider === 'vertex'
|
|
145
|
+
});
|
|
146
|
+
const capabilities = getProviderCapabilities(selectedProvider);
|
|
147
|
+
const executionTime = Date.now() - startTime;
|
|
148
|
+
console.log(`[AI-Core] Selected provider: ${selectedProvider} in ${executionTime}ms`);
|
|
149
|
+
return {
|
|
150
|
+
success: true,
|
|
151
|
+
data: {
|
|
152
|
+
provider: selectedProvider,
|
|
153
|
+
available: availableProviders,
|
|
154
|
+
capabilities,
|
|
155
|
+
reason: params.preferred
|
|
156
|
+
? `Preferred provider ${params.preferred} selected`
|
|
157
|
+
: 'Best available provider selected',
|
|
158
|
+
selectedAt: new Date().toISOString()
|
|
159
|
+
},
|
|
160
|
+
usage: {
|
|
161
|
+
executionTime
|
|
162
|
+
},
|
|
163
|
+
metadata: {
|
|
164
|
+
toolName: 'select-provider',
|
|
165
|
+
serverId: 'neurolink-ai-core',
|
|
166
|
+
sessionId: context.sessionId,
|
|
167
|
+
timestamp: Date.now(),
|
|
168
|
+
executionTime
|
|
169
|
+
}
|
|
170
|
+
};
|
|
171
|
+
}
|
|
172
|
+
catch (error) {
|
|
173
|
+
const executionTime = Date.now() - startTime;
|
|
174
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
175
|
+
console.error(`[AI-Core] Provider selection failed: ${errorMessage}`);
|
|
176
|
+
return {
|
|
177
|
+
success: false,
|
|
178
|
+
error: errorMessage,
|
|
179
|
+
metadata: {
|
|
180
|
+
toolName: 'select-provider',
|
|
181
|
+
serverId: 'neurolink-ai-core',
|
|
182
|
+
sessionId: context.sessionId,
|
|
183
|
+
timestamp: Date.now(),
|
|
184
|
+
executionTime
|
|
185
|
+
}
|
|
186
|
+
};
|
|
187
|
+
}
|
|
188
|
+
}
|
|
189
|
+
});
|
|
190
|
+
/**
|
|
191
|
+
* Register Provider Status Tool
|
|
192
|
+
* Check health and availability of AI providers
|
|
193
|
+
*/
|
|
194
|
+
aiCoreServer.registerTool({
|
|
195
|
+
name: 'check-provider-status',
|
|
196
|
+
description: 'Check the health and availability status of AI providers',
|
|
197
|
+
category: 'provider-management',
|
|
198
|
+
inputSchema: z.object({
|
|
199
|
+
provider: z.string().optional(),
|
|
200
|
+
includeCapabilities: z.boolean().default(true)
|
|
201
|
+
}),
|
|
202
|
+
isImplemented: true,
|
|
203
|
+
execute: async (params, context) => {
|
|
204
|
+
const startTime = Date.now();
|
|
205
|
+
try {
|
|
206
|
+
console.log(`[AI-Core] Checking provider status for: ${params.provider || 'all providers'}`);
|
|
207
|
+
const availableProviders = getAvailableProviders();
|
|
208
|
+
const providerStatuses = [];
|
|
209
|
+
const providersToCheck = params.provider ? [params.provider] : availableProviders;
|
|
210
|
+
for (const provider of providersToCheck) {
|
|
211
|
+
try {
|
|
212
|
+
// Quick health check (can be enhanced with actual API calls)
|
|
213
|
+
const isAvailable = availableProviders.includes(provider);
|
|
214
|
+
providerStatuses.push({
|
|
215
|
+
provider,
|
|
216
|
+
status: isAvailable ? 'available' : 'unavailable',
|
|
217
|
+
capabilities: params.includeCapabilities ? {
|
|
218
|
+
textGeneration: true,
|
|
219
|
+
multimodal: provider === 'openai' || provider === 'vertex',
|
|
220
|
+
streaming: provider === 'openai' || provider === 'anthropic',
|
|
221
|
+
maxTokens: provider === 'anthropic' ? 100000 : 4000
|
|
222
|
+
} : undefined,
|
|
223
|
+
lastChecked: new Date().toISOString()
|
|
224
|
+
});
|
|
225
|
+
}
|
|
226
|
+
catch (error) {
|
|
227
|
+
providerStatuses.push({
|
|
228
|
+
provider,
|
|
229
|
+
status: 'error',
|
|
230
|
+
error: error instanceof Error ? error.message : String(error),
|
|
231
|
+
lastChecked: new Date().toISOString()
|
|
232
|
+
});
|
|
233
|
+
}
|
|
234
|
+
}
|
|
235
|
+
const executionTime = Date.now() - startTime;
|
|
236
|
+
console.log(`[AI-Core] Provider status check completed in ${executionTime}ms`);
|
|
237
|
+
return {
|
|
238
|
+
success: true,
|
|
239
|
+
data: {
|
|
240
|
+
providers: providerStatuses,
|
|
241
|
+
summary: {
|
|
242
|
+
total: providerStatuses.length,
|
|
243
|
+
available: providerStatuses.filter(p => p.status === 'available').length,
|
|
244
|
+
unavailable: providerStatuses.filter(p => p.status === 'unavailable').length,
|
|
245
|
+
errors: providerStatuses.filter(p => p.status === 'error').length
|
|
246
|
+
},
|
|
247
|
+
checkedAt: new Date().toISOString()
|
|
248
|
+
},
|
|
249
|
+
usage: {
|
|
250
|
+
executionTime
|
|
251
|
+
},
|
|
252
|
+
metadata: {
|
|
253
|
+
toolName: 'check-provider-status',
|
|
254
|
+
serverId: 'neurolink-ai-core',
|
|
255
|
+
sessionId: context.sessionId,
|
|
256
|
+
timestamp: Date.now(),
|
|
257
|
+
executionTime
|
|
258
|
+
}
|
|
259
|
+
};
|
|
260
|
+
}
|
|
261
|
+
catch (error) {
|
|
262
|
+
const executionTime = Date.now() - startTime;
|
|
263
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
264
|
+
console.error(`[AI-Core] Provider status check failed: ${errorMessage}`);
|
|
265
|
+
return {
|
|
266
|
+
success: false,
|
|
267
|
+
error: errorMessage,
|
|
268
|
+
metadata: {
|
|
269
|
+
toolName: 'check-provider-status',
|
|
270
|
+
serverId: 'neurolink-ai-core',
|
|
271
|
+
sessionId: context.sessionId,
|
|
272
|
+
timestamp: Date.now(),
|
|
273
|
+
executionTime
|
|
274
|
+
}
|
|
275
|
+
};
|
|
276
|
+
}
|
|
277
|
+
}
|
|
278
|
+
});
|
|
279
|
+
// Log successful server creation
|
|
280
|
+
console.log('[AI-Core] NeuroLink AI Core Server created with tools:', Object.keys(aiCoreServer.tools));
|
package/dist/neurolink.d.ts
CHANGED
|
@@ -7,7 +7,7 @@
|
|
|
7
7
|
import type { AIProviderName } from './core/types.js';
|
|
8
8
|
export interface TextGenerationOptions {
|
|
9
9
|
prompt: string;
|
|
10
|
-
provider?: 'openai' | 'bedrock' | 'vertex' | 'auto';
|
|
10
|
+
provider?: 'openai' | 'bedrock' | 'vertex' | 'anthropic' | 'azure' | 'auto';
|
|
11
11
|
temperature?: number;
|
|
12
12
|
maxTokens?: number;
|
|
13
13
|
systemPrompt?: string;
|
|
@@ -15,7 +15,7 @@ export interface TextGenerationOptions {
|
|
|
15
15
|
}
|
|
16
16
|
export interface StreamTextOptions {
|
|
17
17
|
prompt: string;
|
|
18
|
-
provider?: 'openai' | 'bedrock' | 'vertex' | 'auto';
|
|
18
|
+
provider?: 'openai' | 'bedrock' | 'vertex' | 'anthropic' | 'azure' | 'auto';
|
|
19
19
|
temperature?: number;
|
|
20
20
|
maxTokens?: number;
|
|
21
21
|
systemPrompt?: string;
|
package/dist/neurolink.js
CHANGED
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Anthropic AI Provider (Direct API)
|
|
3
|
+
*
|
|
4
|
+
* Direct integration with Anthropic's Claude models via their native API.
|
|
5
|
+
* Supports Claude 3.5 Sonnet, Claude 3.5 Haiku, and Claude 3 Opus.
|
|
6
|
+
*/
|
|
7
|
+
import type { AIProvider, TextGenerationOptions, StreamTextOptions } from '../core/types.js';
|
|
8
|
+
import { AIProviderName } from '../core/types.js';
|
|
9
|
+
export declare class AnthropicProvider implements AIProvider {
|
|
10
|
+
readonly name: AIProviderName;
|
|
11
|
+
private apiKey;
|
|
12
|
+
private baseURL;
|
|
13
|
+
private defaultModel;
|
|
14
|
+
constructor();
|
|
15
|
+
private getApiKey;
|
|
16
|
+
private getModel;
|
|
17
|
+
private makeRequest;
|
|
18
|
+
generateText(optionsOrPrompt: TextGenerationOptions | string, schema?: any): Promise<any>;
|
|
19
|
+
streamText(optionsOrPrompt: StreamTextOptions | string, schema?: any): Promise<any>;
|
|
20
|
+
private createAsyncIterable;
|
|
21
|
+
generateTextStream(optionsOrPrompt: StreamTextOptions | string): AsyncGenerator<any, void, unknown>;
|
|
22
|
+
testConnection(): Promise<{
|
|
23
|
+
success: boolean;
|
|
24
|
+
error?: string;
|
|
25
|
+
responseTime?: number;
|
|
26
|
+
}>;
|
|
27
|
+
isConfigured(): boolean;
|
|
28
|
+
getRequiredConfig(): string[];
|
|
29
|
+
getOptionalConfig(): string[];
|
|
30
|
+
getModels(): string[];
|
|
31
|
+
supportsStreaming(): boolean;
|
|
32
|
+
supportsSchema(): boolean;
|
|
33
|
+
getCapabilities(): string[];
|
|
34
|
+
}
|
|
@@ -0,0 +1,307 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Anthropic AI Provider (Direct API)
|
|
3
|
+
*
|
|
4
|
+
* Direct integration with Anthropic's Claude models via their native API.
|
|
5
|
+
* Supports Claude 3.5 Sonnet, Claude 3.5 Haiku, and Claude 3 Opus.
|
|
6
|
+
*/
|
|
7
|
+
import { AIProviderName } from '../core/types.js';
|
|
8
|
+
export class AnthropicProvider {
|
|
9
|
+
name = AIProviderName.ANTHROPIC;
|
|
10
|
+
apiKey;
|
|
11
|
+
baseURL;
|
|
12
|
+
defaultModel;
|
|
13
|
+
constructor() {
|
|
14
|
+
this.apiKey = this.getApiKey();
|
|
15
|
+
this.baseURL = process.env.ANTHROPIC_BASE_URL || 'https://api.anthropic.com';
|
|
16
|
+
this.defaultModel = process.env.ANTHROPIC_MODEL || 'claude-3-5-sonnet-20241022';
|
|
17
|
+
console.log(`[AnthropicProvider] Initialized with model: ${this.defaultModel}`);
|
|
18
|
+
}
|
|
19
|
+
getApiKey() {
|
|
20
|
+
const apiKey = process.env.ANTHROPIC_API_KEY;
|
|
21
|
+
if (!apiKey) {
|
|
22
|
+
throw new Error('ANTHROPIC_API_KEY environment variable is required');
|
|
23
|
+
}
|
|
24
|
+
return apiKey;
|
|
25
|
+
}
|
|
26
|
+
getModel() {
|
|
27
|
+
return this.defaultModel;
|
|
28
|
+
}
|
|
29
|
+
async makeRequest(endpoint, body, stream = false) {
|
|
30
|
+
const url = `${this.baseURL}/v1/${endpoint}`;
|
|
31
|
+
const headers = {
|
|
32
|
+
'Content-Type': 'application/json',
|
|
33
|
+
'x-api-key': this.apiKey,
|
|
34
|
+
'anthropic-version': '2023-06-01',
|
|
35
|
+
'anthropic-dangerous-direct-browser-access': 'true' // Required for browser usage
|
|
36
|
+
};
|
|
37
|
+
console.log(`[AnthropicProvider.makeRequest] ${stream ? 'Streaming' : 'Non-streaming'} request to ${url}`);
|
|
38
|
+
console.log(`[AnthropicProvider.makeRequest] Model: ${body.model}, Max tokens: ${body.max_tokens}`);
|
|
39
|
+
const response = await fetch(url, {
|
|
40
|
+
method: 'POST',
|
|
41
|
+
headers,
|
|
42
|
+
body: JSON.stringify(body)
|
|
43
|
+
});
|
|
44
|
+
if (!response.ok) {
|
|
45
|
+
const errorText = await response.text();
|
|
46
|
+
console.error(`[AnthropicProvider.makeRequest] API error ${response.status}: ${errorText}`);
|
|
47
|
+
throw new Error(`Anthropic API error ${response.status}: ${errorText}`);
|
|
48
|
+
}
|
|
49
|
+
return response;
|
|
50
|
+
}
|
|
51
|
+
async generateText(optionsOrPrompt, schema) {
|
|
52
|
+
console.log('[AnthropicProvider.generateText] Starting text generation');
|
|
53
|
+
// Parse parameters with backward compatibility
|
|
54
|
+
const options = typeof optionsOrPrompt === 'string'
|
|
55
|
+
? { prompt: optionsOrPrompt }
|
|
56
|
+
: optionsOrPrompt;
|
|
57
|
+
const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = 'You are Claude, an AI assistant created by Anthropic. You are helpful, harmless, and honest.' } = options;
|
|
58
|
+
console.log(`[AnthropicProvider.generateText] Prompt: "${prompt.substring(0, 100)}...", Temperature: ${temperature}, Max tokens: ${maxTokens}`);
|
|
59
|
+
const requestBody = {
|
|
60
|
+
model: this.getModel(),
|
|
61
|
+
max_tokens: maxTokens,
|
|
62
|
+
messages: [
|
|
63
|
+
{
|
|
64
|
+
role: 'user',
|
|
65
|
+
content: prompt
|
|
66
|
+
}
|
|
67
|
+
],
|
|
68
|
+
temperature,
|
|
69
|
+
system: systemPrompt
|
|
70
|
+
};
|
|
71
|
+
try {
|
|
72
|
+
const response = await this.makeRequest('messages', requestBody);
|
|
73
|
+
const data = await response.json();
|
|
74
|
+
console.log(`[AnthropicProvider.generateText] Success. Generated ${data.usage.output_tokens} tokens`);
|
|
75
|
+
const content = data.content.map(block => block.text).join('');
|
|
76
|
+
return {
|
|
77
|
+
content,
|
|
78
|
+
provider: this.name,
|
|
79
|
+
model: data.model,
|
|
80
|
+
usage: {
|
|
81
|
+
promptTokens: data.usage.input_tokens,
|
|
82
|
+
completionTokens: data.usage.output_tokens,
|
|
83
|
+
totalTokens: data.usage.input_tokens + data.usage.output_tokens
|
|
84
|
+
},
|
|
85
|
+
finishReason: data.stop_reason
|
|
86
|
+
};
|
|
87
|
+
}
|
|
88
|
+
catch (error) {
|
|
89
|
+
console.error('[AnthropicProvider.generateText] Error:', error);
|
|
90
|
+
throw error;
|
|
91
|
+
}
|
|
92
|
+
}
|
|
93
|
+
async streamText(optionsOrPrompt, schema) {
|
|
94
|
+
console.log('[AnthropicProvider.streamText] Starting text streaming');
|
|
95
|
+
// Parse parameters with backward compatibility
|
|
96
|
+
const options = typeof optionsOrPrompt === 'string'
|
|
97
|
+
? { prompt: optionsOrPrompt }
|
|
98
|
+
: optionsOrPrompt;
|
|
99
|
+
const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = 'You are Claude, an AI assistant created by Anthropic. You are helpful, harmless, and honest.' } = options;
|
|
100
|
+
console.log(`[AnthropicProvider.streamText] Streaming prompt: "${prompt.substring(0, 100)}..."`);
|
|
101
|
+
const requestBody = {
|
|
102
|
+
model: this.getModel(),
|
|
103
|
+
max_tokens: maxTokens,
|
|
104
|
+
messages: [
|
|
105
|
+
{
|
|
106
|
+
role: 'user',
|
|
107
|
+
content: prompt
|
|
108
|
+
}
|
|
109
|
+
],
|
|
110
|
+
temperature,
|
|
111
|
+
system: systemPrompt,
|
|
112
|
+
stream: true
|
|
113
|
+
};
|
|
114
|
+
try {
|
|
115
|
+
const response = await this.makeRequest('messages', requestBody, true);
|
|
116
|
+
if (!response.body) {
|
|
117
|
+
throw new Error('No response body received');
|
|
118
|
+
}
|
|
119
|
+
// Return a StreamTextResult-like object
|
|
120
|
+
return {
|
|
121
|
+
textStream: this.createAsyncIterable(response.body),
|
|
122
|
+
text: '',
|
|
123
|
+
usage: { promptTokens: 0, completionTokens: 0, totalTokens: 0 },
|
|
124
|
+
finishReason: 'end_turn'
|
|
125
|
+
};
|
|
126
|
+
}
|
|
127
|
+
catch (error) {
|
|
128
|
+
console.error('[AnthropicProvider.streamText] Error:', error);
|
|
129
|
+
throw error;
|
|
130
|
+
}
|
|
131
|
+
}
|
|
132
|
+
async *createAsyncIterable(body) {
|
|
133
|
+
const reader = body.getReader();
|
|
134
|
+
const decoder = new TextDecoder();
|
|
135
|
+
let buffer = '';
|
|
136
|
+
try {
|
|
137
|
+
while (true) {
|
|
138
|
+
const { done, value } = await reader.read();
|
|
139
|
+
if (done)
|
|
140
|
+
break;
|
|
141
|
+
buffer += decoder.decode(value, { stream: true });
|
|
142
|
+
const lines = buffer.split('\n');
|
|
143
|
+
buffer = lines.pop() || '';
|
|
144
|
+
for (const line of lines) {
|
|
145
|
+
if (line.trim() === '')
|
|
146
|
+
continue;
|
|
147
|
+
if (line.startsWith('data: ')) {
|
|
148
|
+
const data = line.slice(6);
|
|
149
|
+
if (data.trim() === '[DONE]')
|
|
150
|
+
continue;
|
|
151
|
+
try {
|
|
152
|
+
const chunk = JSON.parse(data);
|
|
153
|
+
// Extract text content from different chunk types
|
|
154
|
+
if (chunk.type === 'content_block_delta' && chunk.delta?.text) {
|
|
155
|
+
yield chunk.delta.text;
|
|
156
|
+
}
|
|
157
|
+
}
|
|
158
|
+
catch (parseError) {
|
|
159
|
+
console.warn('[AnthropicProvider.createAsyncIterable] Failed to parse chunk:', parseError);
|
|
160
|
+
continue;
|
|
161
|
+
}
|
|
162
|
+
}
|
|
163
|
+
}
|
|
164
|
+
}
|
|
165
|
+
}
|
|
166
|
+
finally {
|
|
167
|
+
reader.releaseLock();
|
|
168
|
+
}
|
|
169
|
+
}
|
|
170
|
+
async *generateTextStream(optionsOrPrompt) {
|
|
171
|
+
console.log('[AnthropicProvider.generateTextStream] Starting text streaming');
|
|
172
|
+
// Parse parameters with backward compatibility
|
|
173
|
+
const options = typeof optionsOrPrompt === 'string'
|
|
174
|
+
? { prompt: optionsOrPrompt }
|
|
175
|
+
: optionsOrPrompt;
|
|
176
|
+
const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = 'You are Claude, an AI assistant created by Anthropic. You are helpful, harmless, and honest.' } = options;
|
|
177
|
+
console.log(`[AnthropicProvider.generateTextStream] Streaming prompt: "${prompt.substring(0, 100)}..."`);
|
|
178
|
+
const requestBody = {
|
|
179
|
+
model: this.getModel(),
|
|
180
|
+
max_tokens: maxTokens,
|
|
181
|
+
messages: [
|
|
182
|
+
{
|
|
183
|
+
role: 'user',
|
|
184
|
+
content: prompt
|
|
185
|
+
}
|
|
186
|
+
],
|
|
187
|
+
temperature,
|
|
188
|
+
system: systemPrompt,
|
|
189
|
+
stream: true
|
|
190
|
+
};
|
|
191
|
+
try {
|
|
192
|
+
const response = await this.makeRequest('messages', requestBody, true);
|
|
193
|
+
if (!response.body) {
|
|
194
|
+
throw new Error('No response body received');
|
|
195
|
+
}
|
|
196
|
+
const reader = response.body.getReader();
|
|
197
|
+
const decoder = new TextDecoder();
|
|
198
|
+
let buffer = '';
|
|
199
|
+
try {
|
|
200
|
+
while (true) {
|
|
201
|
+
const { done, value } = await reader.read();
|
|
202
|
+
if (done)
|
|
203
|
+
break;
|
|
204
|
+
buffer += decoder.decode(value, { stream: true });
|
|
205
|
+
const lines = buffer.split('\n');
|
|
206
|
+
buffer = lines.pop() || '';
|
|
207
|
+
for (const line of lines) {
|
|
208
|
+
if (line.trim() === '')
|
|
209
|
+
continue;
|
|
210
|
+
if (line.startsWith('data: ')) {
|
|
211
|
+
const data = line.slice(6);
|
|
212
|
+
if (data.trim() === '[DONE]')
|
|
213
|
+
continue;
|
|
214
|
+
try {
|
|
215
|
+
const chunk = JSON.parse(data);
|
|
216
|
+
// Extract text content from different chunk types
|
|
217
|
+
if (chunk.type === 'content_block_delta' && chunk.delta?.text) {
|
|
218
|
+
yield {
|
|
219
|
+
content: chunk.delta.text,
|
|
220
|
+
provider: this.name,
|
|
221
|
+
model: this.getModel()
|
|
222
|
+
};
|
|
223
|
+
}
|
|
224
|
+
}
|
|
225
|
+
catch (parseError) {
|
|
226
|
+
console.warn('[AnthropicProvider.generateTextStream] Failed to parse chunk:', parseError);
|
|
227
|
+
continue;
|
|
228
|
+
}
|
|
229
|
+
}
|
|
230
|
+
}
|
|
231
|
+
}
|
|
232
|
+
}
|
|
233
|
+
finally {
|
|
234
|
+
reader.releaseLock();
|
|
235
|
+
}
|
|
236
|
+
console.log('[AnthropicProvider.generateTextStream] Streaming completed');
|
|
237
|
+
}
|
|
238
|
+
catch (error) {
|
|
239
|
+
console.error('[AnthropicProvider.generateTextStream] Error:', error);
|
|
240
|
+
throw error;
|
|
241
|
+
}
|
|
242
|
+
}
|
|
243
|
+
async testConnection() {
|
|
244
|
+
console.log('[AnthropicProvider.testConnection] Testing connection to Anthropic API');
|
|
245
|
+
const startTime = Date.now();
|
|
246
|
+
try {
|
|
247
|
+
await this.generateText({
|
|
248
|
+
prompt: 'Hello',
|
|
249
|
+
maxTokens: 5
|
|
250
|
+
});
|
|
251
|
+
const responseTime = Date.now() - startTime;
|
|
252
|
+
console.log(`[AnthropicProvider.testConnection] Connection test successful (${responseTime}ms)`);
|
|
253
|
+
return {
|
|
254
|
+
success: true,
|
|
255
|
+
responseTime
|
|
256
|
+
};
|
|
257
|
+
}
|
|
258
|
+
catch (error) {
|
|
259
|
+
const responseTime = Date.now() - startTime;
|
|
260
|
+
console.error(`[AnthropicProvider.testConnection] Connection test failed (${responseTime}ms):`, error);
|
|
261
|
+
return {
|
|
262
|
+
success: false,
|
|
263
|
+
error: error instanceof Error ? error.message : 'Unknown error',
|
|
264
|
+
responseTime
|
|
265
|
+
};
|
|
266
|
+
}
|
|
267
|
+
}
|
|
268
|
+
isConfigured() {
|
|
269
|
+
try {
|
|
270
|
+
this.getApiKey();
|
|
271
|
+
return true;
|
|
272
|
+
}
|
|
273
|
+
catch {
|
|
274
|
+
return false;
|
|
275
|
+
}
|
|
276
|
+
}
|
|
277
|
+
getRequiredConfig() {
|
|
278
|
+
return ['ANTHROPIC_API_KEY'];
|
|
279
|
+
}
|
|
280
|
+
getOptionalConfig() {
|
|
281
|
+
return ['ANTHROPIC_MODEL', 'ANTHROPIC_BASE_URL'];
|
|
282
|
+
}
|
|
283
|
+
getModels() {
|
|
284
|
+
return [
|
|
285
|
+
'claude-3-5-sonnet-20241022',
|
|
286
|
+
'claude-3-5-haiku-20241022',
|
|
287
|
+
'claude-3-opus-20240229',
|
|
288
|
+
'claude-3-sonnet-20240229',
|
|
289
|
+
'claude-3-haiku-20240307'
|
|
290
|
+
];
|
|
291
|
+
}
|
|
292
|
+
supportsStreaming() {
|
|
293
|
+
return true;
|
|
294
|
+
}
|
|
295
|
+
supportsSchema() {
|
|
296
|
+
return false; // Anthropic doesn't have native JSON schema support like OpenAI
|
|
297
|
+
}
|
|
298
|
+
getCapabilities() {
|
|
299
|
+
return [
|
|
300
|
+
'text-generation',
|
|
301
|
+
'streaming',
|
|
302
|
+
'conversation',
|
|
303
|
+
'system-prompts',
|
|
304
|
+
'long-context' // Claude models support up to 200k tokens
|
|
305
|
+
];
|
|
306
|
+
}
|
|
307
|
+
}
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Azure OpenAI Provider
|
|
3
|
+
*
|
|
4
|
+
* Enterprise-grade OpenAI integration through Microsoft Azure.
|
|
5
|
+
* Supports all OpenAI models with enhanced security and compliance.
|
|
6
|
+
*/
|
|
7
|
+
import type { AIProvider, TextGenerationOptions, StreamTextOptions } from '../core/types.js';
|
|
8
|
+
import { AIProviderName } from '../core/types.js';
|
|
9
|
+
export declare class AzureOpenAIProvider implements AIProvider {
|
|
10
|
+
readonly name: AIProviderName;
|
|
11
|
+
private apiKey;
|
|
12
|
+
private endpoint;
|
|
13
|
+
private deploymentId;
|
|
14
|
+
private apiVersion;
|
|
15
|
+
constructor();
|
|
16
|
+
private getApiKey;
|
|
17
|
+
private getEndpoint;
|
|
18
|
+
private getDeploymentId;
|
|
19
|
+
private getApiUrl;
|
|
20
|
+
private makeRequest;
|
|
21
|
+
generateText(optionsOrPrompt: TextGenerationOptions | string, schema?: any): Promise<any>;
|
|
22
|
+
streamText(optionsOrPrompt: StreamTextOptions | string, schema?: any): Promise<any>;
|
|
23
|
+
private createAsyncIterable;
|
|
24
|
+
generateTextStream(optionsOrPrompt: StreamTextOptions | string): AsyncGenerator<any, void, unknown>;
|
|
25
|
+
testConnection(): Promise<{
|
|
26
|
+
success: boolean;
|
|
27
|
+
error?: string;
|
|
28
|
+
responseTime?: number;
|
|
29
|
+
}>;
|
|
30
|
+
isConfigured(): boolean;
|
|
31
|
+
getRequiredConfig(): string[];
|
|
32
|
+
getOptionalConfig(): string[];
|
|
33
|
+
getModels(): string[];
|
|
34
|
+
supportsStreaming(): boolean;
|
|
35
|
+
supportsSchema(): boolean;
|
|
36
|
+
getCapabilities(): string[];
|
|
37
|
+
}
|