@juspay/neurolink 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +815 -0
- package/dist/core/factory.d.ts +40 -0
- package/dist/core/factory.js +149 -0
- package/dist/core/types.d.ts +77 -0
- package/dist/core/types.js +55 -0
- package/dist/index.d.ts +54 -0
- package/dist/index.js +60 -0
- package/dist/providers/amazonBedrock.d.ts +11 -0
- package/dist/providers/amazonBedrock.js +229 -0
- package/dist/providers/googleVertexAI.d.ts +30 -0
- package/dist/providers/googleVertexAI.js +283 -0
- package/dist/providers/index.d.ts +24 -0
- package/dist/providers/index.js +19 -0
- package/dist/providers/openAI.d.ts +10 -0
- package/dist/providers/openAI.js +145 -0
- package/dist/utils/providerUtils.d.ts +20 -0
- package/dist/utils/providerUtils.js +63 -0
- package/package.json +82 -0
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
import type { AIProvider, AIProviderName, SupportedModelName } from './types.js';
|
|
2
|
+
declare const componentIdentifier = "aiProviderFactory";
|
|
3
|
+
/**
|
|
4
|
+
* Factory for creating AI provider instances with centralized configuration
|
|
5
|
+
*/
|
|
6
|
+
export declare class AIProviderFactory {
|
|
7
|
+
/**
|
|
8
|
+
* Create a provider instance for the specified provider type
|
|
9
|
+
* @param providerName - Name of the provider ('vertex', 'bedrock', 'openai')
|
|
10
|
+
* @param modelName - Optional model name override
|
|
11
|
+
* @returns AIProvider instance
|
|
12
|
+
*/
|
|
13
|
+
static createProvider(providerName: string, modelName?: string | null): AIProvider;
|
|
14
|
+
/**
|
|
15
|
+
* Create a provider instance with specific provider enum and model
|
|
16
|
+
* @param provider - Provider enum value
|
|
17
|
+
* @param model - Specific model enum value
|
|
18
|
+
* @returns AIProvider instance
|
|
19
|
+
*/
|
|
20
|
+
static createProviderWithModel(provider: AIProviderName, model: SupportedModelName): AIProvider;
|
|
21
|
+
/**
|
|
22
|
+
* Create the best available provider automatically
|
|
23
|
+
* @param requestedProvider - Optional preferred provider
|
|
24
|
+
* @param modelName - Optional model name override
|
|
25
|
+
* @returns AIProvider instance
|
|
26
|
+
*/
|
|
27
|
+
static createBestProvider(requestedProvider?: string, modelName?: string | null): AIProvider;
|
|
28
|
+
/**
|
|
29
|
+
* Create primary and fallback provider instances
|
|
30
|
+
* @param primaryProvider - Primary provider name
|
|
31
|
+
* @param fallbackProvider - Fallback provider name
|
|
32
|
+
* @param modelName - Optional model name override
|
|
33
|
+
* @returns Object with primary and fallback providers
|
|
34
|
+
*/
|
|
35
|
+
static createProviderWithFallback(primaryProvider: string, fallbackProvider: string, modelName?: string | null): {
|
|
36
|
+
primary: AIProvider;
|
|
37
|
+
fallback: AIProvider;
|
|
38
|
+
};
|
|
39
|
+
}
|
|
40
|
+
export { componentIdentifier };
|
|
@@ -0,0 +1,149 @@
|
|
|
1
|
+
import { GoogleVertexAI, AmazonBedrock, OpenAI } from '../providers/index.js';
|
|
2
|
+
import { getBestProvider } from '../utils/providerUtils.js';
|
|
3
|
+
const componentIdentifier = 'aiProviderFactory';
|
|
4
|
+
/**
|
|
5
|
+
* Factory for creating AI provider instances with centralized configuration
|
|
6
|
+
*/
|
|
7
|
+
export class AIProviderFactory {
|
|
8
|
+
/**
|
|
9
|
+
* Create a provider instance for the specified provider type
|
|
10
|
+
* @param providerName - Name of the provider ('vertex', 'bedrock', 'openai')
|
|
11
|
+
* @param modelName - Optional model name override
|
|
12
|
+
* @returns AIProvider instance
|
|
13
|
+
*/
|
|
14
|
+
static createProvider(providerName, modelName) {
|
|
15
|
+
const functionTag = 'AIProviderFactory.createProvider';
|
|
16
|
+
console.log(`[${functionTag}] Provider creation started`, {
|
|
17
|
+
providerName,
|
|
18
|
+
modelName: modelName || 'default'
|
|
19
|
+
});
|
|
20
|
+
try {
|
|
21
|
+
let provider;
|
|
22
|
+
switch (providerName.toLowerCase()) {
|
|
23
|
+
case 'vertex':
|
|
24
|
+
case 'google':
|
|
25
|
+
case 'gemini':
|
|
26
|
+
provider = new GoogleVertexAI(modelName);
|
|
27
|
+
break;
|
|
28
|
+
case 'bedrock':
|
|
29
|
+
case 'amazon':
|
|
30
|
+
case 'aws':
|
|
31
|
+
provider = new AmazonBedrock(modelName);
|
|
32
|
+
break;
|
|
33
|
+
case 'openai':
|
|
34
|
+
case 'gpt':
|
|
35
|
+
provider = new OpenAI(modelName);
|
|
36
|
+
break;
|
|
37
|
+
default:
|
|
38
|
+
throw new Error(`Unknown provider: ${providerName}. Supported providers: vertex, bedrock, openai`);
|
|
39
|
+
}
|
|
40
|
+
console.log(`[${functionTag}] Provider creation succeeded`, {
|
|
41
|
+
providerName,
|
|
42
|
+
modelName: modelName || 'default',
|
|
43
|
+
providerType: provider.constructor.name
|
|
44
|
+
});
|
|
45
|
+
return provider;
|
|
46
|
+
}
|
|
47
|
+
catch (error) {
|
|
48
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
49
|
+
console.error(`[${functionTag}] Provider creation failed`, {
|
|
50
|
+
providerName,
|
|
51
|
+
modelName: modelName || 'default',
|
|
52
|
+
error: errorMessage
|
|
53
|
+
});
|
|
54
|
+
throw error;
|
|
55
|
+
}
|
|
56
|
+
}
|
|
57
|
+
/**
|
|
58
|
+
* Create a provider instance with specific provider enum and model
|
|
59
|
+
* @param provider - Provider enum value
|
|
60
|
+
* @param model - Specific model enum value
|
|
61
|
+
* @returns AIProvider instance
|
|
62
|
+
*/
|
|
63
|
+
static createProviderWithModel(provider, model) {
|
|
64
|
+
const functionTag = 'AIProviderFactory.createProviderWithModel';
|
|
65
|
+
console.log(`[${functionTag}] Provider model creation started`, {
|
|
66
|
+
provider,
|
|
67
|
+
model
|
|
68
|
+
});
|
|
69
|
+
try {
|
|
70
|
+
const providerInstance = this.createProvider(provider, model);
|
|
71
|
+
console.log(`[${functionTag}] Provider model creation succeeded`, {
|
|
72
|
+
provider,
|
|
73
|
+
model,
|
|
74
|
+
providerType: providerInstance.constructor.name
|
|
75
|
+
});
|
|
76
|
+
return providerInstance;
|
|
77
|
+
}
|
|
78
|
+
catch (error) {
|
|
79
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
80
|
+
console.error(`[${functionTag}] Provider model creation failed`, {
|
|
81
|
+
provider,
|
|
82
|
+
model,
|
|
83
|
+
error: errorMessage
|
|
84
|
+
});
|
|
85
|
+
throw error;
|
|
86
|
+
}
|
|
87
|
+
}
|
|
88
|
+
/**
|
|
89
|
+
* Create the best available provider automatically
|
|
90
|
+
* @param requestedProvider - Optional preferred provider
|
|
91
|
+
* @param modelName - Optional model name override
|
|
92
|
+
* @returns AIProvider instance
|
|
93
|
+
*/
|
|
94
|
+
static createBestProvider(requestedProvider, modelName) {
|
|
95
|
+
const functionTag = 'AIProviderFactory.createBestProvider';
|
|
96
|
+
try {
|
|
97
|
+
const bestProvider = getBestProvider(requestedProvider);
|
|
98
|
+
console.log(`[${functionTag}] Best provider selected`, {
|
|
99
|
+
requestedProvider: requestedProvider || 'auto',
|
|
100
|
+
selectedProvider: bestProvider,
|
|
101
|
+
modelName: modelName || 'default'
|
|
102
|
+
});
|
|
103
|
+
return this.createProvider(bestProvider, modelName);
|
|
104
|
+
}
|
|
105
|
+
catch (error) {
|
|
106
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
107
|
+
console.error(`[${functionTag}] Best provider selection failed`, {
|
|
108
|
+
requestedProvider: requestedProvider || 'auto',
|
|
109
|
+
error: errorMessage
|
|
110
|
+
});
|
|
111
|
+
throw error;
|
|
112
|
+
}
|
|
113
|
+
}
|
|
114
|
+
/**
|
|
115
|
+
* Create primary and fallback provider instances
|
|
116
|
+
* @param primaryProvider - Primary provider name
|
|
117
|
+
* @param fallbackProvider - Fallback provider name
|
|
118
|
+
* @param modelName - Optional model name override
|
|
119
|
+
* @returns Object with primary and fallback providers
|
|
120
|
+
*/
|
|
121
|
+
static createProviderWithFallback(primaryProvider, fallbackProvider, modelName) {
|
|
122
|
+
const functionTag = 'AIProviderFactory.createProviderWithFallback';
|
|
123
|
+
console.log(`[${functionTag}] Fallback provider setup started`, {
|
|
124
|
+
primaryProvider,
|
|
125
|
+
fallbackProvider,
|
|
126
|
+
modelName: modelName || 'default'
|
|
127
|
+
});
|
|
128
|
+
try {
|
|
129
|
+
const primary = this.createProvider(primaryProvider, modelName);
|
|
130
|
+
const fallback = this.createProvider(fallbackProvider, modelName);
|
|
131
|
+
console.log(`[${functionTag}] Fallback provider setup succeeded`, {
|
|
132
|
+
primaryProvider,
|
|
133
|
+
fallbackProvider,
|
|
134
|
+
modelName: modelName || 'default'
|
|
135
|
+
});
|
|
136
|
+
return { primary, fallback };
|
|
137
|
+
}
|
|
138
|
+
catch (error) {
|
|
139
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
140
|
+
console.error(`[${functionTag}] Fallback provider setup failed`, {
|
|
141
|
+
primaryProvider,
|
|
142
|
+
fallbackProvider,
|
|
143
|
+
error: errorMessage
|
|
144
|
+
});
|
|
145
|
+
throw error;
|
|
146
|
+
}
|
|
147
|
+
}
|
|
148
|
+
}
|
|
149
|
+
export { componentIdentifier };
|
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
import type { ZodType, ZodTypeDef } from 'zod';
|
|
2
|
+
import type { StreamTextResult, ToolSet, Schema, GenerateTextResult } from 'ai';
|
|
3
|
+
/**
|
|
4
|
+
* Supported AI Provider Names
|
|
5
|
+
*/
|
|
6
|
+
export declare enum AIProviderName {
|
|
7
|
+
BEDROCK = "bedrock",
|
|
8
|
+
OPENAI = "openai",
|
|
9
|
+
VERTEX = "vertex"
|
|
10
|
+
}
|
|
11
|
+
/**
|
|
12
|
+
* Supported Models for Amazon Bedrock
|
|
13
|
+
*/
|
|
14
|
+
export declare enum BedrockModels {
|
|
15
|
+
CLAUDE_3_SONNET = "anthropic.claude-3-sonnet-20240229-v1:0",
|
|
16
|
+
CLAUDE_3_HAIKU = "anthropic.claude-3-haiku-20240307-v1:0",
|
|
17
|
+
CLAUDE_3_5_SONNET = "anthropic.claude-3-5-sonnet-20240620-v1:0",
|
|
18
|
+
CLAUDE_3_7_SONNET = "arn:aws:bedrock:us-east-2:225681119357:inference-profile/us.anthropic.claude-3-7-sonnet-20250219-v1:0"
|
|
19
|
+
}
|
|
20
|
+
/**
|
|
21
|
+
* Supported Models for OpenAI
|
|
22
|
+
*/
|
|
23
|
+
export declare enum OpenAIModels {
|
|
24
|
+
GPT_4 = "gpt-4",
|
|
25
|
+
GPT_4_TURBO = "gpt-4-turbo",
|
|
26
|
+
GPT_4O = "gpt-4o",
|
|
27
|
+
GPT_4O_MINI = "gpt-4o-mini",
|
|
28
|
+
GPT_3_5_TURBO = "gpt-3.5-turbo"
|
|
29
|
+
}
|
|
30
|
+
/**
|
|
31
|
+
* Supported Models for Google Vertex AI
|
|
32
|
+
*/
|
|
33
|
+
export declare enum VertexModels {
|
|
34
|
+
CLAUDE_4_0_SONNET = "claude-sonnet-4@20250514",
|
|
35
|
+
GEMINI_2_5_FLASH = "gemini-2.5-flash-preview-05-20"
|
|
36
|
+
}
|
|
37
|
+
/**
|
|
38
|
+
* Union type of all supported model names
|
|
39
|
+
*/
|
|
40
|
+
export type SupportedModelName = BedrockModels | OpenAIModels | VertexModels;
|
|
41
|
+
/**
|
|
42
|
+
* Provider configuration specifying provider and its available models
|
|
43
|
+
*/
|
|
44
|
+
export interface ProviderConfig {
|
|
45
|
+
provider: AIProviderName;
|
|
46
|
+
models: SupportedModelName[];
|
|
47
|
+
}
|
|
48
|
+
/**
|
|
49
|
+
* Options for AI requests with unified provider configuration
|
|
50
|
+
*/
|
|
51
|
+
export interface StreamingOptions {
|
|
52
|
+
providers: ProviderConfig[];
|
|
53
|
+
temperature?: number;
|
|
54
|
+
maxTokens?: number;
|
|
55
|
+
systemPrompt?: string;
|
|
56
|
+
}
|
|
57
|
+
/**
|
|
58
|
+
* AI Provider interface
|
|
59
|
+
*/
|
|
60
|
+
export interface AIProvider {
|
|
61
|
+
streamText(prompt: string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamTextResult<ToolSet, unknown> | null>;
|
|
62
|
+
generateText(prompt: string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<GenerateTextResult<ToolSet, unknown> | null>;
|
|
63
|
+
}
|
|
64
|
+
/**
|
|
65
|
+
* Provider attempt result for iteration tracking
|
|
66
|
+
*/
|
|
67
|
+
export interface ProviderAttempt {
|
|
68
|
+
provider: AIProviderName;
|
|
69
|
+
model: SupportedModelName;
|
|
70
|
+
success: boolean;
|
|
71
|
+
error?: string;
|
|
72
|
+
stack?: string;
|
|
73
|
+
}
|
|
74
|
+
/**
|
|
75
|
+
* Default provider configurations
|
|
76
|
+
*/
|
|
77
|
+
export declare const DEFAULT_PROVIDER_CONFIGS: ProviderConfig[];
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Supported AI Provider Names
|
|
3
|
+
*/
|
|
4
|
+
export var AIProviderName;
|
|
5
|
+
(function (AIProviderName) {
|
|
6
|
+
AIProviderName["BEDROCK"] = "bedrock";
|
|
7
|
+
AIProviderName["OPENAI"] = "openai";
|
|
8
|
+
AIProviderName["VERTEX"] = "vertex";
|
|
9
|
+
})(AIProviderName || (AIProviderName = {}));
|
|
10
|
+
/**
|
|
11
|
+
* Supported Models for Amazon Bedrock
|
|
12
|
+
*/
|
|
13
|
+
export var BedrockModels;
|
|
14
|
+
(function (BedrockModels) {
|
|
15
|
+
BedrockModels["CLAUDE_3_SONNET"] = "anthropic.claude-3-sonnet-20240229-v1:0";
|
|
16
|
+
BedrockModels["CLAUDE_3_HAIKU"] = "anthropic.claude-3-haiku-20240307-v1:0";
|
|
17
|
+
BedrockModels["CLAUDE_3_5_SONNET"] = "anthropic.claude-3-5-sonnet-20240620-v1:0";
|
|
18
|
+
BedrockModels["CLAUDE_3_7_SONNET"] = "arn:aws:bedrock:us-east-2:225681119357:inference-profile/us.anthropic.claude-3-7-sonnet-20250219-v1:0";
|
|
19
|
+
})(BedrockModels || (BedrockModels = {}));
|
|
20
|
+
/**
|
|
21
|
+
* Supported Models for OpenAI
|
|
22
|
+
*/
|
|
23
|
+
export var OpenAIModels;
|
|
24
|
+
(function (OpenAIModels) {
|
|
25
|
+
OpenAIModels["GPT_4"] = "gpt-4";
|
|
26
|
+
OpenAIModels["GPT_4_TURBO"] = "gpt-4-turbo";
|
|
27
|
+
OpenAIModels["GPT_4O"] = "gpt-4o";
|
|
28
|
+
OpenAIModels["GPT_4O_MINI"] = "gpt-4o-mini";
|
|
29
|
+
OpenAIModels["GPT_3_5_TURBO"] = "gpt-3.5-turbo";
|
|
30
|
+
})(OpenAIModels || (OpenAIModels = {}));
|
|
31
|
+
/**
|
|
32
|
+
* Supported Models for Google Vertex AI
|
|
33
|
+
*/
|
|
34
|
+
export var VertexModels;
|
|
35
|
+
(function (VertexModels) {
|
|
36
|
+
VertexModels["CLAUDE_4_0_SONNET"] = "claude-sonnet-4@20250514";
|
|
37
|
+
VertexModels["GEMINI_2_5_FLASH"] = "gemini-2.5-flash-preview-05-20";
|
|
38
|
+
})(VertexModels || (VertexModels = {}));
|
|
39
|
+
/**
|
|
40
|
+
* Default provider configurations
|
|
41
|
+
*/
|
|
42
|
+
export const DEFAULT_PROVIDER_CONFIGS = [
|
|
43
|
+
{
|
|
44
|
+
provider: AIProviderName.BEDROCK,
|
|
45
|
+
models: [BedrockModels.CLAUDE_3_7_SONNET, BedrockModels.CLAUDE_3_5_SONNET]
|
|
46
|
+
},
|
|
47
|
+
{
|
|
48
|
+
provider: AIProviderName.VERTEX,
|
|
49
|
+
models: [VertexModels.CLAUDE_4_0_SONNET, VertexModels.GEMINI_2_5_FLASH]
|
|
50
|
+
},
|
|
51
|
+
{
|
|
52
|
+
provider: AIProviderName.OPENAI,
|
|
53
|
+
models: [OpenAIModels.GPT_4O, OpenAIModels.GPT_4O_MINI]
|
|
54
|
+
}
|
|
55
|
+
];
|
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* NeuroLink AI Toolkit
|
|
3
|
+
*
|
|
4
|
+
* A unified AI provider interface with support for multiple providers,
|
|
5
|
+
* automatic fallback, streaming, and tool integration.
|
|
6
|
+
*
|
|
7
|
+
* Extracted from lighthouse project's proven AI functionality.
|
|
8
|
+
*/
|
|
9
|
+
import { AIProviderFactory } from './core/factory.js';
|
|
10
|
+
export { AIProviderFactory };
|
|
11
|
+
export type { AIProvider, AIProviderName, ProviderConfig, StreamingOptions, ProviderAttempt, SupportedModelName } from './core/types.js';
|
|
12
|
+
export { BedrockModels, OpenAIModels, VertexModels, DEFAULT_PROVIDER_CONFIGS } from './core/types.js';
|
|
13
|
+
export { GoogleVertexAI, AmazonBedrock, OpenAI } from './providers/index.js';
|
|
14
|
+
export type { ProviderName } from './providers/index.js';
|
|
15
|
+
export { PROVIDERS, AVAILABLE_PROVIDERS } from './providers/index.js';
|
|
16
|
+
export { getBestProvider, getAvailableProviders, isValidProvider } from './utils/providerUtils.js';
|
|
17
|
+
export declare const VERSION = "1.0.0";
|
|
18
|
+
/**
|
|
19
|
+
* Quick start factory function
|
|
20
|
+
*
|
|
21
|
+
* @example
|
|
22
|
+
* ```typescript
|
|
23
|
+
* import { createAIProvider } from 'neurolink';
|
|
24
|
+
*
|
|
25
|
+
* const provider = createAIProvider('bedrock');
|
|
26
|
+
* const result = await provider.streamText('Hello, AI!');
|
|
27
|
+
* ```
|
|
28
|
+
*/
|
|
29
|
+
export declare function createAIProvider(providerName?: string, modelName?: string): import("./core/types.js").AIProvider;
|
|
30
|
+
/**
|
|
31
|
+
* Create provider with automatic fallback
|
|
32
|
+
*
|
|
33
|
+
* @example
|
|
34
|
+
* ```typescript
|
|
35
|
+
* import { createAIProviderWithFallback } from 'neurolink';
|
|
36
|
+
*
|
|
37
|
+
* const { primary, fallback } = createAIProviderWithFallback('bedrock', 'vertex');
|
|
38
|
+
* ```
|
|
39
|
+
*/
|
|
40
|
+
export declare function createAIProviderWithFallback(primaryProvider?: string, fallbackProvider?: string, modelName?: string): {
|
|
41
|
+
primary: import("./core/types.js").AIProvider;
|
|
42
|
+
fallback: import("./core/types.js").AIProvider;
|
|
43
|
+
};
|
|
44
|
+
/**
|
|
45
|
+
* Create the best available provider based on configuration
|
|
46
|
+
*
|
|
47
|
+
* @example
|
|
48
|
+
* ```typescript
|
|
49
|
+
* import { createBestAIProvider } from 'neurolink';
|
|
50
|
+
*
|
|
51
|
+
* const provider = createBestAIProvider();
|
|
52
|
+
* ```
|
|
53
|
+
*/
|
|
54
|
+
export declare function createBestAIProvider(requestedProvider?: string, modelName?: string): import("./core/types.js").AIProvider;
|
package/dist/index.js
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* NeuroLink AI Toolkit
|
|
3
|
+
*
|
|
4
|
+
* A unified AI provider interface with support for multiple providers,
|
|
5
|
+
* automatic fallback, streaming, and tool integration.
|
|
6
|
+
*
|
|
7
|
+
* Extracted from lighthouse project's proven AI functionality.
|
|
8
|
+
*/
|
|
9
|
+
// Core exports
|
|
10
|
+
import { AIProviderFactory } from './core/factory.js';
|
|
11
|
+
export { AIProviderFactory };
|
|
12
|
+
// Model enums
|
|
13
|
+
export { BedrockModels, OpenAIModels, VertexModels, DEFAULT_PROVIDER_CONFIGS } from './core/types.js';
|
|
14
|
+
// Provider exports
|
|
15
|
+
export { GoogleVertexAI, AmazonBedrock, OpenAI } from './providers/index.js';
|
|
16
|
+
export { PROVIDERS, AVAILABLE_PROVIDERS } from './providers/index.js';
|
|
17
|
+
// Utility exports
|
|
18
|
+
export { getBestProvider, getAvailableProviders, isValidProvider } from './utils/providerUtils.js';
|
|
19
|
+
// Version
|
|
20
|
+
export const VERSION = '1.0.0';
|
|
21
|
+
/**
|
|
22
|
+
* Quick start factory function
|
|
23
|
+
*
|
|
24
|
+
* @example
|
|
25
|
+
* ```typescript
|
|
26
|
+
* import { createAIProvider } from 'neurolink';
|
|
27
|
+
*
|
|
28
|
+
* const provider = createAIProvider('bedrock');
|
|
29
|
+
* const result = await provider.streamText('Hello, AI!');
|
|
30
|
+
* ```
|
|
31
|
+
*/
|
|
32
|
+
export function createAIProvider(providerName, modelName) {
|
|
33
|
+
return AIProviderFactory.createProvider(providerName || 'bedrock', modelName);
|
|
34
|
+
}
|
|
35
|
+
/**
|
|
36
|
+
* Create provider with automatic fallback
|
|
37
|
+
*
|
|
38
|
+
* @example
|
|
39
|
+
* ```typescript
|
|
40
|
+
* import { createAIProviderWithFallback } from 'neurolink';
|
|
41
|
+
*
|
|
42
|
+
* const { primary, fallback } = createAIProviderWithFallback('bedrock', 'vertex');
|
|
43
|
+
* ```
|
|
44
|
+
*/
|
|
45
|
+
export function createAIProviderWithFallback(primaryProvider, fallbackProvider, modelName) {
|
|
46
|
+
return AIProviderFactory.createProviderWithFallback(primaryProvider || 'bedrock', fallbackProvider || 'vertex', modelName);
|
|
47
|
+
}
|
|
48
|
+
/**
|
|
49
|
+
* Create the best available provider based on configuration
|
|
50
|
+
*
|
|
51
|
+
* @example
|
|
52
|
+
* ```typescript
|
|
53
|
+
* import { createBestAIProvider } from 'neurolink';
|
|
54
|
+
*
|
|
55
|
+
* const provider = createBestAIProvider();
|
|
56
|
+
* ```
|
|
57
|
+
*/
|
|
58
|
+
export function createBestAIProvider(requestedProvider, modelName) {
|
|
59
|
+
return AIProviderFactory.createBestProvider(requestedProvider, modelName);
|
|
60
|
+
}
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
import type { ZodType, ZodTypeDef } from 'zod';
|
|
2
|
+
import { type StreamTextResult, type ToolSet, type Schema, type GenerateTextResult } from 'ai';
|
|
3
|
+
import type { AIProvider } from '../core/types.js';
|
|
4
|
+
export declare class AmazonBedrock implements AIProvider {
|
|
5
|
+
private modelName;
|
|
6
|
+
private model;
|
|
7
|
+
private bedrock;
|
|
8
|
+
constructor(modelName?: string | null);
|
|
9
|
+
streamText(prompt: string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamTextResult<ToolSet, unknown> | null>;
|
|
10
|
+
generateText(prompt: string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<GenerateTextResult<ToolSet, unknown> | null>;
|
|
11
|
+
}
|
|
@@ -0,0 +1,229 @@
|
|
|
1
|
+
import { createAmazonBedrock } from '@ai-sdk/amazon-bedrock';
|
|
2
|
+
import { streamText, generateText, Output } from 'ai';
|
|
3
|
+
// Default system context
|
|
4
|
+
const DEFAULT_SYSTEM_CONTEXT = {
|
|
5
|
+
systemPrompt: 'You are a helpful AI assistant.'
|
|
6
|
+
};
|
|
7
|
+
// Configuration helpers
|
|
8
|
+
const getBedrockModelId = () => {
|
|
9
|
+
return process.env.BEDROCK_MODEL_ID || 'arn:aws:bedrock:us-east-2:account:inference-profile/us.anthropic.claude-3-7-sonnet-20250219-v1:0';
|
|
10
|
+
};
|
|
11
|
+
const getAWSAccessKeyId = () => {
|
|
12
|
+
const keyId = process.env.AWS_ACCESS_KEY_ID;
|
|
13
|
+
if (!keyId) {
|
|
14
|
+
throw new Error('AWS_ACCESS_KEY_ID environment variable is not set');
|
|
15
|
+
}
|
|
16
|
+
return keyId;
|
|
17
|
+
};
|
|
18
|
+
const getAWSSecretAccessKey = () => {
|
|
19
|
+
const secretKey = process.env.AWS_SECRET_ACCESS_KEY;
|
|
20
|
+
if (!secretKey) {
|
|
21
|
+
throw new Error('AWS_SECRET_ACCESS_KEY environment variable is not set');
|
|
22
|
+
}
|
|
23
|
+
return secretKey;
|
|
24
|
+
};
|
|
25
|
+
const getAWSRegion = () => {
|
|
26
|
+
return process.env.AWS_REGION || 'us-east-2';
|
|
27
|
+
};
|
|
28
|
+
const getAWSSessionToken = () => {
|
|
29
|
+
return process.env.AWS_SESSION_TOKEN;
|
|
30
|
+
};
|
|
31
|
+
const getAppEnvironment = () => {
|
|
32
|
+
return process.env.PUBLIC_APP_ENVIRONMENT || 'dev';
|
|
33
|
+
};
|
|
34
|
+
// Amazon Bedrock class with enhanced error handling using createAmazonBedrock
|
|
35
|
+
export class AmazonBedrock {
|
|
36
|
+
modelName;
|
|
37
|
+
model;
|
|
38
|
+
bedrock;
|
|
39
|
+
constructor(modelName) {
|
|
40
|
+
const functionTag = 'AmazonBedrock.constructor';
|
|
41
|
+
this.modelName = modelName || getBedrockModelId();
|
|
42
|
+
try {
|
|
43
|
+
console.log(`[${functionTag}] Function called`, { modelName: this.modelName });
|
|
44
|
+
// Configure AWS credentials for custom Bedrock instance
|
|
45
|
+
const awsConfig = {
|
|
46
|
+
accessKeyId: getAWSAccessKeyId(),
|
|
47
|
+
secretAccessKey: getAWSSecretAccessKey(),
|
|
48
|
+
region: getAWSRegion()
|
|
49
|
+
};
|
|
50
|
+
console.log(`[${functionTag}] AWS config validation`, {
|
|
51
|
+
hasAccessKeyId: !!awsConfig.accessKeyId,
|
|
52
|
+
hasSecretAccessKey: !!awsConfig.secretAccessKey,
|
|
53
|
+
region: awsConfig.region || 'MISSING'
|
|
54
|
+
});
|
|
55
|
+
// Add session token for development environment
|
|
56
|
+
if (getAppEnvironment() === 'dev') {
|
|
57
|
+
const sessionToken = getAWSSessionToken();
|
|
58
|
+
if (sessionToken) {
|
|
59
|
+
awsConfig.sessionToken = sessionToken;
|
|
60
|
+
console.log(`[${functionTag}] Session token added`, {
|
|
61
|
+
environment: 'dev'
|
|
62
|
+
});
|
|
63
|
+
}
|
|
64
|
+
else {
|
|
65
|
+
console.warn(`[${functionTag}] Session token missing`, {
|
|
66
|
+
environment: 'dev'
|
|
67
|
+
});
|
|
68
|
+
}
|
|
69
|
+
}
|
|
70
|
+
console.log(`[${functionTag}] AWS config created`, {
|
|
71
|
+
region: awsConfig.region,
|
|
72
|
+
hasSessionToken: !!awsConfig.sessionToken
|
|
73
|
+
});
|
|
74
|
+
console.log(`[${functionTag}] Bedrock provider creating`, {
|
|
75
|
+
modelName: this.modelName
|
|
76
|
+
});
|
|
77
|
+
// Create custom Bedrock provider instance with environment-based configuration
|
|
78
|
+
this.bedrock = createAmazonBedrock(awsConfig);
|
|
79
|
+
console.log(`[${functionTag}] Bedrock provider initialized`, {
|
|
80
|
+
modelName: this.modelName
|
|
81
|
+
});
|
|
82
|
+
console.log(`[${functionTag}] Model instance creating`, {
|
|
83
|
+
modelName: this.modelName
|
|
84
|
+
});
|
|
85
|
+
this.model = this.bedrock(this.modelName);
|
|
86
|
+
console.log(`[${functionTag}] Model instance created`, {
|
|
87
|
+
modelName: this.modelName
|
|
88
|
+
});
|
|
89
|
+
console.log(`[${functionTag}] Function result`, {
|
|
90
|
+
modelName: this.modelName,
|
|
91
|
+
region: awsConfig.region,
|
|
92
|
+
hasSessionToken: !!awsConfig.sessionToken,
|
|
93
|
+
success: true
|
|
94
|
+
});
|
|
95
|
+
console.log(`[${functionTag}] Initialization completed`, {
|
|
96
|
+
modelName: this.modelName,
|
|
97
|
+
region: awsConfig.region,
|
|
98
|
+
hasSessionToken: !!awsConfig.sessionToken
|
|
99
|
+
});
|
|
100
|
+
}
|
|
101
|
+
catch (err) {
|
|
102
|
+
console.error(`[${functionTag}] Initialization failed`, {
|
|
103
|
+
message: 'Error in initializing Amazon Bedrock',
|
|
104
|
+
modelName: this.modelName,
|
|
105
|
+
region: getAWSRegion(),
|
|
106
|
+
error: err instanceof Error ? err.message : String(err),
|
|
107
|
+
stack: err instanceof Error ? err.stack : undefined
|
|
108
|
+
});
|
|
109
|
+
throw err;
|
|
110
|
+
}
|
|
111
|
+
}
|
|
112
|
+
async streamText(prompt, analysisSchema) {
|
|
113
|
+
const functionTag = 'AmazonBedrock.streamText';
|
|
114
|
+
const provider = 'bedrock';
|
|
115
|
+
let chunkCount = 0;
|
|
116
|
+
try {
|
|
117
|
+
console.log(`[${functionTag}] Stream request started`, {
|
|
118
|
+
provider,
|
|
119
|
+
modelName: this.modelName,
|
|
120
|
+
promptLength: prompt.length
|
|
121
|
+
});
|
|
122
|
+
const streamOptions = {
|
|
123
|
+
model: this.model,
|
|
124
|
+
prompt: prompt,
|
|
125
|
+
system: DEFAULT_SYSTEM_CONTEXT.systemPrompt,
|
|
126
|
+
onError: (event) => {
|
|
127
|
+
const error = event.error;
|
|
128
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
129
|
+
const errorStack = error instanceof Error ? error.stack : undefined;
|
|
130
|
+
console.error(`[${functionTag}] Stream text error`, {
|
|
131
|
+
provider,
|
|
132
|
+
modelName: this.modelName,
|
|
133
|
+
region: getAWSRegion(),
|
|
134
|
+
error: errorMessage,
|
|
135
|
+
stack: errorStack,
|
|
136
|
+
promptLength: prompt.length,
|
|
137
|
+
chunkCount
|
|
138
|
+
});
|
|
139
|
+
},
|
|
140
|
+
onFinish: (event) => {
|
|
141
|
+
console.log(`[${functionTag}] Stream text finished`, {
|
|
142
|
+
provider,
|
|
143
|
+
modelName: this.modelName,
|
|
144
|
+
region: getAWSRegion(),
|
|
145
|
+
finishReason: event.finishReason,
|
|
146
|
+
usage: event.usage,
|
|
147
|
+
totalChunks: chunkCount,
|
|
148
|
+
promptLength: prompt.length,
|
|
149
|
+
responseLength: event.text?.length || 0
|
|
150
|
+
});
|
|
151
|
+
},
|
|
152
|
+
onChunk: (event) => {
|
|
153
|
+
chunkCount++;
|
|
154
|
+
console.debug(`[${functionTag}] Stream text chunk`, {
|
|
155
|
+
provider,
|
|
156
|
+
modelName: this.modelName,
|
|
157
|
+
chunkNumber: chunkCount,
|
|
158
|
+
chunkLength: event.chunk.text?.length || 0,
|
|
159
|
+
chunkType: event.chunk.type
|
|
160
|
+
});
|
|
161
|
+
}
|
|
162
|
+
};
|
|
163
|
+
if (analysisSchema) {
|
|
164
|
+
streamOptions.experimental_output = Output.object({ schema: analysisSchema });
|
|
165
|
+
}
|
|
166
|
+
console.log(`[${functionTag}] Stream text started`, {
|
|
167
|
+
provider,
|
|
168
|
+
modelName: this.modelName,
|
|
169
|
+
region: getAWSRegion(),
|
|
170
|
+
promptLength: prompt.length
|
|
171
|
+
});
|
|
172
|
+
// Direct streamText call - let the real error bubble up
|
|
173
|
+
const result = streamText(streamOptions);
|
|
174
|
+
console.log(`[${functionTag}] Stream text call successful`, {
|
|
175
|
+
provider,
|
|
176
|
+
modelName: this.modelName,
|
|
177
|
+
promptLength: prompt.length
|
|
178
|
+
});
|
|
179
|
+
return result;
|
|
180
|
+
}
|
|
181
|
+
catch (err) {
|
|
182
|
+
console.error(`[${functionTag}] Exception`, {
|
|
183
|
+
provider,
|
|
184
|
+
modelName: this.modelName,
|
|
185
|
+
region: getAWSRegion(),
|
|
186
|
+
message: 'Error in streaming text',
|
|
187
|
+
err: String(err)
|
|
188
|
+
});
|
|
189
|
+
throw err; // Re-throw error to trigger fallback
|
|
190
|
+
}
|
|
191
|
+
}
|
|
192
|
+
async generateText(prompt, analysisSchema) {
|
|
193
|
+
const functionTag = 'AmazonBedrock.generateText';
|
|
194
|
+
const provider = 'bedrock';
|
|
195
|
+
try {
|
|
196
|
+
const generateOptions = {
|
|
197
|
+
model: this.model,
|
|
198
|
+
prompt: prompt,
|
|
199
|
+
system: DEFAULT_SYSTEM_CONTEXT.systemPrompt
|
|
200
|
+
};
|
|
201
|
+
if (analysisSchema) {
|
|
202
|
+
generateOptions.experimental_output = Output.object({ schema: analysisSchema });
|
|
203
|
+
}
|
|
204
|
+
console.log(`[${functionTag}] Generate text started`, {
|
|
205
|
+
provider,
|
|
206
|
+
modelName: this.modelName,
|
|
207
|
+
region: getAWSRegion(),
|
|
208
|
+
promptLength: prompt.length
|
|
209
|
+
});
|
|
210
|
+
const result = await generateText(generateOptions);
|
|
211
|
+
console.log(`[${functionTag}] Generate text completed`, {
|
|
212
|
+
provider,
|
|
213
|
+
modelName: this.modelName,
|
|
214
|
+
usage: result.usage,
|
|
215
|
+
finishReason: result.finishReason
|
|
216
|
+
});
|
|
217
|
+
return result;
|
|
218
|
+
}
|
|
219
|
+
catch (err) {
|
|
220
|
+
console.error(`[${functionTag}] Exception`, {
|
|
221
|
+
provider,
|
|
222
|
+
modelName: this.modelName,
|
|
223
|
+
message: 'Error in generating text',
|
|
224
|
+
err: String(err)
|
|
225
|
+
});
|
|
226
|
+
throw err; // Re-throw error to trigger fallback instead of returning null
|
|
227
|
+
}
|
|
228
|
+
}
|
|
229
|
+
}
|