@juspay/neurolink 1.5.0 → 1.5.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +85 -0
- package/LICENSE +21 -0
- package/README.md +4 -2
- package/dist/cli/commands/config.d.ts +35 -35
- package/dist/cli/index.js +63 -19
- package/dist/core/factory.js +12 -11
- package/dist/lib/core/factory.d.ts +40 -0
- package/dist/lib/core/factory.js +162 -0
- package/dist/lib/core/types.d.ts +111 -0
- package/dist/lib/core/types.js +68 -0
- package/dist/lib/index.d.ts +56 -0
- package/dist/lib/index.js +62 -0
- package/dist/lib/mcp/context-manager.d.ts +164 -0
- package/dist/lib/mcp/context-manager.js +273 -0
- package/dist/lib/mcp/factory.d.ts +144 -0
- package/dist/lib/mcp/factory.js +141 -0
- package/dist/lib/mcp/orchestrator.d.ts +170 -0
- package/dist/lib/mcp/orchestrator.js +372 -0
- package/dist/lib/mcp/registry.d.ts +188 -0
- package/dist/lib/mcp/registry.js +373 -0
- package/dist/lib/mcp/servers/ai-providers/ai-analysis-tools.d.ts +21 -0
- package/dist/lib/mcp/servers/ai-providers/ai-analysis-tools.js +215 -0
- package/dist/lib/mcp/servers/ai-providers/ai-core-server.d.ts +10 -0
- package/dist/lib/mcp/servers/ai-providers/ai-core-server.js +303 -0
- package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.d.ts +101 -0
- package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.js +428 -0
- package/dist/lib/neurolink.d.ts +53 -0
- package/dist/lib/neurolink.js +155 -0
- package/dist/lib/providers/amazonBedrock.d.ts +11 -0
- package/dist/lib/providers/amazonBedrock.js +256 -0
- package/dist/lib/providers/anthropic.d.ts +34 -0
- package/dist/lib/providers/anthropic.js +308 -0
- package/dist/lib/providers/azureOpenAI.d.ts +37 -0
- package/dist/lib/providers/azureOpenAI.js +339 -0
- package/dist/lib/providers/googleAIStudio.d.ts +30 -0
- package/dist/lib/providers/googleAIStudio.js +216 -0
- package/dist/lib/providers/googleVertexAI.d.ts +30 -0
- package/dist/lib/providers/googleVertexAI.js +409 -0
- package/dist/lib/providers/index.d.ts +30 -0
- package/dist/lib/providers/index.js +25 -0
- package/dist/lib/providers/openAI.d.ts +10 -0
- package/dist/lib/providers/openAI.js +169 -0
- package/dist/lib/utils/logger.d.ts +12 -0
- package/dist/lib/utils/logger.js +25 -0
- package/dist/lib/utils/providerUtils.d.ts +17 -0
- package/dist/lib/utils/providerUtils.js +73 -0
- package/dist/mcp/servers/ai-providers/ai-core-server.js +11 -10
- package/dist/neurolink.js +13 -12
- package/dist/providers/amazonBedrock.js +22 -21
- package/dist/providers/anthropic.js +21 -20
- package/dist/providers/azureOpenAI.js +21 -20
- package/dist/providers/googleAIStudio.js +13 -12
- package/dist/providers/googleVertexAI.js +27 -26
- package/dist/providers/openAI.js +12 -11
- package/dist/utils/logger.d.ts +12 -0
- package/dist/utils/logger.js +25 -0
- package/dist/utils/providerUtils.d.ts +0 -3
- package/dist/utils/providerUtils.js +3 -2
- package/package.json +3 -17
|
@@ -0,0 +1,162 @@
|
|
|
1
|
+
import { GoogleVertexAI, AmazonBedrock, OpenAI, AnthropicProvider, AzureOpenAIProvider, GoogleAIStudio } from '../providers/index.js';
|
|
2
|
+
import { getBestProvider } from '../utils/providerUtils.js';
|
|
3
|
+
import { logger } from '../utils/logger.js';
|
|
4
|
+
const componentIdentifier = 'aiProviderFactory';
|
|
5
|
+
/**
|
|
6
|
+
* Factory for creating AI provider instances with centralized configuration
|
|
7
|
+
*/
|
|
8
|
+
export class AIProviderFactory {
|
|
9
|
+
/**
|
|
10
|
+
* Create a provider instance for the specified provider type
|
|
11
|
+
* @param providerName - Name of the provider ('vertex', 'bedrock', 'openai')
|
|
12
|
+
* @param modelName - Optional model name override
|
|
13
|
+
* @returns AIProvider instance
|
|
14
|
+
*/
|
|
15
|
+
static createProvider(providerName, modelName) {
|
|
16
|
+
const functionTag = 'AIProviderFactory.createProvider';
|
|
17
|
+
logger.debug(`[${functionTag}] Provider creation started`, {
|
|
18
|
+
providerName,
|
|
19
|
+
modelName: modelName || 'default'
|
|
20
|
+
});
|
|
21
|
+
try {
|
|
22
|
+
let provider;
|
|
23
|
+
switch (providerName.toLowerCase()) {
|
|
24
|
+
case 'vertex':
|
|
25
|
+
case 'google':
|
|
26
|
+
case 'gemini':
|
|
27
|
+
provider = new GoogleVertexAI(modelName);
|
|
28
|
+
break;
|
|
29
|
+
case 'bedrock':
|
|
30
|
+
case 'amazon':
|
|
31
|
+
case 'aws':
|
|
32
|
+
provider = new AmazonBedrock(modelName);
|
|
33
|
+
break;
|
|
34
|
+
case 'openai':
|
|
35
|
+
case 'gpt':
|
|
36
|
+
provider = new OpenAI(modelName);
|
|
37
|
+
break;
|
|
38
|
+
case 'anthropic':
|
|
39
|
+
case 'claude':
|
|
40
|
+
provider = new AnthropicProvider();
|
|
41
|
+
break;
|
|
42
|
+
case 'azure':
|
|
43
|
+
case 'azure-openai':
|
|
44
|
+
provider = new AzureOpenAIProvider();
|
|
45
|
+
break;
|
|
46
|
+
case 'google-ai':
|
|
47
|
+
case 'google-studio':
|
|
48
|
+
provider = new GoogleAIStudio(modelName);
|
|
49
|
+
break;
|
|
50
|
+
default:
|
|
51
|
+
throw new Error(`Unknown provider: ${providerName}. Supported providers: vertex, bedrock, openai, anthropic, azure, google-ai`);
|
|
52
|
+
}
|
|
53
|
+
logger.debug(`[${functionTag}] Provider creation succeeded`, {
|
|
54
|
+
providerName,
|
|
55
|
+
modelName: modelName || 'default',
|
|
56
|
+
providerType: provider.constructor.name
|
|
57
|
+
});
|
|
58
|
+
return provider;
|
|
59
|
+
}
|
|
60
|
+
catch (error) {
|
|
61
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
62
|
+
logger.debug(`[${functionTag}] Provider creation failed`, {
|
|
63
|
+
providerName,
|
|
64
|
+
modelName: modelName || 'default',
|
|
65
|
+
error: errorMessage
|
|
66
|
+
});
|
|
67
|
+
throw error;
|
|
68
|
+
}
|
|
69
|
+
}
|
|
70
|
+
/**
|
|
71
|
+
* Create a provider instance with specific provider enum and model
|
|
72
|
+
* @param provider - Provider enum value
|
|
73
|
+
* @param model - Specific model enum value
|
|
74
|
+
* @returns AIProvider instance
|
|
75
|
+
*/
|
|
76
|
+
static createProviderWithModel(provider, model) {
|
|
77
|
+
const functionTag = 'AIProviderFactory.createProviderWithModel';
|
|
78
|
+
logger.debug(`[${functionTag}] Provider model creation started`, {
|
|
79
|
+
provider,
|
|
80
|
+
model
|
|
81
|
+
});
|
|
82
|
+
try {
|
|
83
|
+
const providerInstance = this.createProvider(provider, model);
|
|
84
|
+
logger.debug(`[${functionTag}] Provider model creation succeeded`, {
|
|
85
|
+
provider,
|
|
86
|
+
model,
|
|
87
|
+
providerType: providerInstance.constructor.name
|
|
88
|
+
});
|
|
89
|
+
return providerInstance;
|
|
90
|
+
}
|
|
91
|
+
catch (error) {
|
|
92
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
93
|
+
logger.debug(`[${functionTag}] Provider model creation failed`, {
|
|
94
|
+
provider,
|
|
95
|
+
model,
|
|
96
|
+
error: errorMessage
|
|
97
|
+
});
|
|
98
|
+
throw error;
|
|
99
|
+
}
|
|
100
|
+
}
|
|
101
|
+
/**
|
|
102
|
+
* Create the best available provider automatically
|
|
103
|
+
* @param requestedProvider - Optional preferred provider
|
|
104
|
+
* @param modelName - Optional model name override
|
|
105
|
+
* @returns AIProvider instance
|
|
106
|
+
*/
|
|
107
|
+
static createBestProvider(requestedProvider, modelName) {
|
|
108
|
+
const functionTag = 'AIProviderFactory.createBestProvider';
|
|
109
|
+
try {
|
|
110
|
+
const bestProvider = getBestProvider(requestedProvider);
|
|
111
|
+
logger.debug(`[${functionTag}] Best provider selected`, {
|
|
112
|
+
requestedProvider: requestedProvider || 'auto',
|
|
113
|
+
selectedProvider: bestProvider,
|
|
114
|
+
modelName: modelName || 'default'
|
|
115
|
+
});
|
|
116
|
+
return this.createProvider(bestProvider, modelName);
|
|
117
|
+
}
|
|
118
|
+
catch (error) {
|
|
119
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
120
|
+
logger.debug(`[${functionTag}] Best provider selection failed`, {
|
|
121
|
+
requestedProvider: requestedProvider || 'auto',
|
|
122
|
+
error: errorMessage
|
|
123
|
+
});
|
|
124
|
+
throw error;
|
|
125
|
+
}
|
|
126
|
+
}
|
|
127
|
+
/**
|
|
128
|
+
* Create primary and fallback provider instances
|
|
129
|
+
* @param primaryProvider - Primary provider name
|
|
130
|
+
* @param fallbackProvider - Fallback provider name
|
|
131
|
+
* @param modelName - Optional model name override
|
|
132
|
+
* @returns Object with primary and fallback providers
|
|
133
|
+
*/
|
|
134
|
+
static createProviderWithFallback(primaryProvider, fallbackProvider, modelName) {
|
|
135
|
+
const functionTag = 'AIProviderFactory.createProviderWithFallback';
|
|
136
|
+
logger.debug(`[${functionTag}] Fallback provider setup started`, {
|
|
137
|
+
primaryProvider,
|
|
138
|
+
fallbackProvider,
|
|
139
|
+
modelName: modelName || 'default'
|
|
140
|
+
});
|
|
141
|
+
try {
|
|
142
|
+
const primary = this.createProvider(primaryProvider, modelName);
|
|
143
|
+
const fallback = this.createProvider(fallbackProvider, modelName);
|
|
144
|
+
logger.debug(`[${functionTag}] Fallback provider setup succeeded`, {
|
|
145
|
+
primaryProvider,
|
|
146
|
+
fallbackProvider,
|
|
147
|
+
modelName: modelName || 'default'
|
|
148
|
+
});
|
|
149
|
+
return { primary, fallback };
|
|
150
|
+
}
|
|
151
|
+
catch (error) {
|
|
152
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
153
|
+
logger.debug(`[${functionTag}] Fallback provider setup failed`, {
|
|
154
|
+
primaryProvider,
|
|
155
|
+
fallbackProvider,
|
|
156
|
+
error: errorMessage
|
|
157
|
+
});
|
|
158
|
+
throw error;
|
|
159
|
+
}
|
|
160
|
+
}
|
|
161
|
+
}
|
|
162
|
+
export { componentIdentifier };
|
|
@@ -0,0 +1,111 @@
|
|
|
1
|
+
import type { ZodType, ZodTypeDef } from 'zod';
|
|
2
|
+
import type { StreamTextResult, ToolSet, Schema, GenerateTextResult } from 'ai';
|
|
3
|
+
/**
|
|
4
|
+
* Supported AI Provider Names
|
|
5
|
+
*/
|
|
6
|
+
export declare enum AIProviderName {
|
|
7
|
+
BEDROCK = "bedrock",
|
|
8
|
+
OPENAI = "openai",
|
|
9
|
+
VERTEX = "vertex",
|
|
10
|
+
ANTHROPIC = "anthropic",
|
|
11
|
+
AZURE = "azure",
|
|
12
|
+
GOOGLE_AI = "google-ai"
|
|
13
|
+
}
|
|
14
|
+
/**
|
|
15
|
+
* Supported Models for Amazon Bedrock
|
|
16
|
+
*/
|
|
17
|
+
export declare enum BedrockModels {
|
|
18
|
+
CLAUDE_3_SONNET = "anthropic.claude-3-sonnet-20240229-v1:0",
|
|
19
|
+
CLAUDE_3_HAIKU = "anthropic.claude-3-haiku-20240307-v1:0",
|
|
20
|
+
CLAUDE_3_5_SONNET = "anthropic.claude-3-5-sonnet-20240620-v1:0",
|
|
21
|
+
CLAUDE_3_7_SONNET = "arn:aws:bedrock:us-east-2:225681119357:inference-profile/us.anthropic.claude-3-7-sonnet-20250219-v1:0"
|
|
22
|
+
}
|
|
23
|
+
/**
|
|
24
|
+
* Supported Models for OpenAI
|
|
25
|
+
*/
|
|
26
|
+
export declare enum OpenAIModels {
|
|
27
|
+
GPT_4 = "gpt-4",
|
|
28
|
+
GPT_4_TURBO = "gpt-4-turbo",
|
|
29
|
+
GPT_4O = "gpt-4o",
|
|
30
|
+
GPT_4O_MINI = "gpt-4o-mini",
|
|
31
|
+
GPT_3_5_TURBO = "gpt-3.5-turbo"
|
|
32
|
+
}
|
|
33
|
+
/**
|
|
34
|
+
* Supported Models for Google Vertex AI
|
|
35
|
+
*/
|
|
36
|
+
export declare enum VertexModels {
|
|
37
|
+
CLAUDE_4_0_SONNET = "claude-sonnet-4@20250514",
|
|
38
|
+
GEMINI_2_5_FLASH = "gemini-2.5-flash-preview-05-20"
|
|
39
|
+
}
|
|
40
|
+
/**
|
|
41
|
+
* Supported Models for Google AI Studio
|
|
42
|
+
*/
|
|
43
|
+
export declare enum GoogleAIModels {
|
|
44
|
+
GEMINI_1_5_PRO_LATEST = "gemini-1.5-pro-latest",
|
|
45
|
+
GEMINI_1_5_FLASH_LATEST = "gemini-1.5-flash-latest",
|
|
46
|
+
GEMINI_2_0_FLASH_EXP = "gemini-2.0-flash-exp",
|
|
47
|
+
GEMINI_1_0_PRO = "gemini-1.0-pro"
|
|
48
|
+
}
|
|
49
|
+
/**
|
|
50
|
+
* Union type of all supported model names
|
|
51
|
+
*/
|
|
52
|
+
export type SupportedModelName = BedrockModels | OpenAIModels | VertexModels | GoogleAIModels;
|
|
53
|
+
/**
|
|
54
|
+
* Provider configuration specifying provider and its available models
|
|
55
|
+
*/
|
|
56
|
+
export interface ProviderConfig {
|
|
57
|
+
provider: AIProviderName;
|
|
58
|
+
models: SupportedModelName[];
|
|
59
|
+
}
|
|
60
|
+
/**
|
|
61
|
+
* Options for AI requests with unified provider configuration
|
|
62
|
+
*/
|
|
63
|
+
export interface StreamingOptions {
|
|
64
|
+
providers: ProviderConfig[];
|
|
65
|
+
temperature?: number;
|
|
66
|
+
maxTokens?: number;
|
|
67
|
+
systemPrompt?: string;
|
|
68
|
+
}
|
|
69
|
+
/**
|
|
70
|
+
* Text generation options interface
|
|
71
|
+
*/
|
|
72
|
+
export interface TextGenerationOptions {
|
|
73
|
+
prompt: string;
|
|
74
|
+
model?: string;
|
|
75
|
+
temperature?: number;
|
|
76
|
+
maxTokens?: number;
|
|
77
|
+
systemPrompt?: string;
|
|
78
|
+
schema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>;
|
|
79
|
+
}
|
|
80
|
+
/**
|
|
81
|
+
* Stream text options interface
|
|
82
|
+
*/
|
|
83
|
+
export interface StreamTextOptions {
|
|
84
|
+
prompt: string;
|
|
85
|
+
model?: string;
|
|
86
|
+
temperature?: number;
|
|
87
|
+
maxTokens?: number;
|
|
88
|
+
systemPrompt?: string;
|
|
89
|
+
schema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>;
|
|
90
|
+
}
|
|
91
|
+
/**
|
|
92
|
+
* AI Provider interface with flexible parameter support
|
|
93
|
+
*/
|
|
94
|
+
export interface AIProvider {
|
|
95
|
+
streamText(optionsOrPrompt: StreamTextOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamTextResult<ToolSet, unknown> | null>;
|
|
96
|
+
generateText(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<GenerateTextResult<ToolSet, unknown> | null>;
|
|
97
|
+
}
|
|
98
|
+
/**
|
|
99
|
+
* Provider attempt result for iteration tracking
|
|
100
|
+
*/
|
|
101
|
+
export interface ProviderAttempt {
|
|
102
|
+
provider: AIProviderName;
|
|
103
|
+
model: SupportedModelName;
|
|
104
|
+
success: boolean;
|
|
105
|
+
error?: string;
|
|
106
|
+
stack?: string;
|
|
107
|
+
}
|
|
108
|
+
/**
|
|
109
|
+
* Default provider configurations
|
|
110
|
+
*/
|
|
111
|
+
export declare const DEFAULT_PROVIDER_CONFIGS: ProviderConfig[];
|
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Supported AI Provider Names
|
|
3
|
+
*/
|
|
4
|
+
export var AIProviderName;
|
|
5
|
+
(function (AIProviderName) {
|
|
6
|
+
AIProviderName["BEDROCK"] = "bedrock";
|
|
7
|
+
AIProviderName["OPENAI"] = "openai";
|
|
8
|
+
AIProviderName["VERTEX"] = "vertex";
|
|
9
|
+
AIProviderName["ANTHROPIC"] = "anthropic";
|
|
10
|
+
AIProviderName["AZURE"] = "azure";
|
|
11
|
+
AIProviderName["GOOGLE_AI"] = "google-ai";
|
|
12
|
+
})(AIProviderName || (AIProviderName = {}));
|
|
13
|
+
/**
|
|
14
|
+
* Supported Models for Amazon Bedrock
|
|
15
|
+
*/
|
|
16
|
+
export var BedrockModels;
|
|
17
|
+
(function (BedrockModels) {
|
|
18
|
+
BedrockModels["CLAUDE_3_SONNET"] = "anthropic.claude-3-sonnet-20240229-v1:0";
|
|
19
|
+
BedrockModels["CLAUDE_3_HAIKU"] = "anthropic.claude-3-haiku-20240307-v1:0";
|
|
20
|
+
BedrockModels["CLAUDE_3_5_SONNET"] = "anthropic.claude-3-5-sonnet-20240620-v1:0";
|
|
21
|
+
BedrockModels["CLAUDE_3_7_SONNET"] = "arn:aws:bedrock:us-east-2:225681119357:inference-profile/us.anthropic.claude-3-7-sonnet-20250219-v1:0";
|
|
22
|
+
})(BedrockModels || (BedrockModels = {}));
|
|
23
|
+
/**
|
|
24
|
+
* Supported Models for OpenAI
|
|
25
|
+
*/
|
|
26
|
+
export var OpenAIModels;
|
|
27
|
+
(function (OpenAIModels) {
|
|
28
|
+
OpenAIModels["GPT_4"] = "gpt-4";
|
|
29
|
+
OpenAIModels["GPT_4_TURBO"] = "gpt-4-turbo";
|
|
30
|
+
OpenAIModels["GPT_4O"] = "gpt-4o";
|
|
31
|
+
OpenAIModels["GPT_4O_MINI"] = "gpt-4o-mini";
|
|
32
|
+
OpenAIModels["GPT_3_5_TURBO"] = "gpt-3.5-turbo";
|
|
33
|
+
})(OpenAIModels || (OpenAIModels = {}));
|
|
34
|
+
/**
|
|
35
|
+
* Supported Models for Google Vertex AI
|
|
36
|
+
*/
|
|
37
|
+
export var VertexModels;
|
|
38
|
+
(function (VertexModels) {
|
|
39
|
+
VertexModels["CLAUDE_4_0_SONNET"] = "claude-sonnet-4@20250514";
|
|
40
|
+
VertexModels["GEMINI_2_5_FLASH"] = "gemini-2.5-flash-preview-05-20";
|
|
41
|
+
})(VertexModels || (VertexModels = {}));
|
|
42
|
+
/**
|
|
43
|
+
* Supported Models for Google AI Studio
|
|
44
|
+
*/
|
|
45
|
+
export var GoogleAIModels;
|
|
46
|
+
(function (GoogleAIModels) {
|
|
47
|
+
GoogleAIModels["GEMINI_1_5_PRO_LATEST"] = "gemini-1.5-pro-latest";
|
|
48
|
+
GoogleAIModels["GEMINI_1_5_FLASH_LATEST"] = "gemini-1.5-flash-latest";
|
|
49
|
+
GoogleAIModels["GEMINI_2_0_FLASH_EXP"] = "gemini-2.0-flash-exp";
|
|
50
|
+
GoogleAIModels["GEMINI_1_0_PRO"] = "gemini-1.0-pro";
|
|
51
|
+
})(GoogleAIModels || (GoogleAIModels = {}));
|
|
52
|
+
/**
|
|
53
|
+
* Default provider configurations
|
|
54
|
+
*/
|
|
55
|
+
export const DEFAULT_PROVIDER_CONFIGS = [
|
|
56
|
+
{
|
|
57
|
+
provider: AIProviderName.BEDROCK,
|
|
58
|
+
models: [BedrockModels.CLAUDE_3_7_SONNET, BedrockModels.CLAUDE_3_5_SONNET]
|
|
59
|
+
},
|
|
60
|
+
{
|
|
61
|
+
provider: AIProviderName.VERTEX,
|
|
62
|
+
models: [VertexModels.CLAUDE_4_0_SONNET, VertexModels.GEMINI_2_5_FLASH]
|
|
63
|
+
},
|
|
64
|
+
{
|
|
65
|
+
provider: AIProviderName.OPENAI,
|
|
66
|
+
models: [OpenAIModels.GPT_4O, OpenAIModels.GPT_4O_MINI]
|
|
67
|
+
}
|
|
68
|
+
];
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* NeuroLink AI Toolkit
|
|
3
|
+
*
|
|
4
|
+
* A unified AI provider interface with support for multiple providers,
|
|
5
|
+
* automatic fallback, streaming, and tool integration.
|
|
6
|
+
*
|
|
7
|
+
* Extracted from lighthouse project's proven AI functionality.
|
|
8
|
+
*/
|
|
9
|
+
import { AIProviderFactory } from './core/factory.js';
|
|
10
|
+
export { AIProviderFactory };
|
|
11
|
+
export type { AIProvider, AIProviderName, ProviderConfig, StreamingOptions, ProviderAttempt, SupportedModelName } from './core/types.js';
|
|
12
|
+
export { BedrockModels, OpenAIModels, VertexModels, DEFAULT_PROVIDER_CONFIGS } from './core/types.js';
|
|
13
|
+
export { GoogleVertexAI, AmazonBedrock, OpenAI, AnthropicProvider, AzureOpenAIProvider } from './providers/index.js';
|
|
14
|
+
export type { ProviderName } from './providers/index.js';
|
|
15
|
+
export { PROVIDERS, AVAILABLE_PROVIDERS } from './providers/index.js';
|
|
16
|
+
export { getBestProvider, getAvailableProviders, isValidProvider } from './utils/providerUtils.js';
|
|
17
|
+
export { NeuroLink } from './neurolink.js';
|
|
18
|
+
export type { TextGenerationOptions, StreamTextOptions, TextGenerationResult } from './neurolink.js';
|
|
19
|
+
export declare const VERSION = "1.0.0";
|
|
20
|
+
/**
|
|
21
|
+
* Quick start factory function
|
|
22
|
+
*
|
|
23
|
+
* @example
|
|
24
|
+
* ```typescript
|
|
25
|
+
* import { createAIProvider } from 'neurolink';
|
|
26
|
+
*
|
|
27
|
+
* const provider = createAIProvider('bedrock');
|
|
28
|
+
* const result = await provider.streamText('Hello, AI!');
|
|
29
|
+
* ```
|
|
30
|
+
*/
|
|
31
|
+
export declare function createAIProvider(providerName?: string, modelName?: string): import("./index.js").AIProvider;
|
|
32
|
+
/**
|
|
33
|
+
* Create provider with automatic fallback
|
|
34
|
+
*
|
|
35
|
+
* @example
|
|
36
|
+
* ```typescript
|
|
37
|
+
* import { createAIProviderWithFallback } from 'neurolink';
|
|
38
|
+
*
|
|
39
|
+
* const { primary, fallback } = createAIProviderWithFallback('bedrock', 'vertex');
|
|
40
|
+
* ```
|
|
41
|
+
*/
|
|
42
|
+
export declare function createAIProviderWithFallback(primaryProvider?: string, fallbackProvider?: string, modelName?: string): {
|
|
43
|
+
primary: import("./index.js").AIProvider;
|
|
44
|
+
fallback: import("./index.js").AIProvider;
|
|
45
|
+
};
|
|
46
|
+
/**
|
|
47
|
+
* Create the best available provider based on configuration
|
|
48
|
+
*
|
|
49
|
+
* @example
|
|
50
|
+
* ```typescript
|
|
51
|
+
* import { createBestAIProvider } from 'neurolink';
|
|
52
|
+
*
|
|
53
|
+
* const provider = createBestAIProvider();
|
|
54
|
+
* ```
|
|
55
|
+
*/
|
|
56
|
+
export declare function createBestAIProvider(requestedProvider?: string, modelName?: string): import("./index.js").AIProvider;
|
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* NeuroLink AI Toolkit
|
|
3
|
+
*
|
|
4
|
+
* A unified AI provider interface with support for multiple providers,
|
|
5
|
+
* automatic fallback, streaming, and tool integration.
|
|
6
|
+
*
|
|
7
|
+
* Extracted from lighthouse project's proven AI functionality.
|
|
8
|
+
*/
|
|
9
|
+
// Core exports
|
|
10
|
+
import { AIProviderFactory } from './core/factory.js';
|
|
11
|
+
export { AIProviderFactory };
|
|
12
|
+
// Model enums
|
|
13
|
+
export { BedrockModels, OpenAIModels, VertexModels, DEFAULT_PROVIDER_CONFIGS } from './core/types.js';
|
|
14
|
+
// Provider exports
|
|
15
|
+
export { GoogleVertexAI, AmazonBedrock, OpenAI, AnthropicProvider, AzureOpenAIProvider } from './providers/index.js';
|
|
16
|
+
export { PROVIDERS, AVAILABLE_PROVIDERS } from './providers/index.js';
|
|
17
|
+
// Utility exports
|
|
18
|
+
export { getBestProvider, getAvailableProviders, isValidProvider } from './utils/providerUtils.js';
|
|
19
|
+
// Main NeuroLink wrapper class
|
|
20
|
+
export { NeuroLink } from './neurolink.js';
|
|
21
|
+
// Version
|
|
22
|
+
export const VERSION = '1.0.0';
|
|
23
|
+
/**
|
|
24
|
+
* Quick start factory function
|
|
25
|
+
*
|
|
26
|
+
* @example
|
|
27
|
+
* ```typescript
|
|
28
|
+
* import { createAIProvider } from 'neurolink';
|
|
29
|
+
*
|
|
30
|
+
* const provider = createAIProvider('bedrock');
|
|
31
|
+
* const result = await provider.streamText('Hello, AI!');
|
|
32
|
+
* ```
|
|
33
|
+
*/
|
|
34
|
+
export function createAIProvider(providerName, modelName) {
|
|
35
|
+
return AIProviderFactory.createProvider(providerName || 'bedrock', modelName);
|
|
36
|
+
}
|
|
37
|
+
/**
|
|
38
|
+
* Create provider with automatic fallback
|
|
39
|
+
*
|
|
40
|
+
* @example
|
|
41
|
+
* ```typescript
|
|
42
|
+
* import { createAIProviderWithFallback } from 'neurolink';
|
|
43
|
+
*
|
|
44
|
+
* const { primary, fallback } = createAIProviderWithFallback('bedrock', 'vertex');
|
|
45
|
+
* ```
|
|
46
|
+
*/
|
|
47
|
+
export function createAIProviderWithFallback(primaryProvider, fallbackProvider, modelName) {
|
|
48
|
+
return AIProviderFactory.createProviderWithFallback(primaryProvider || 'bedrock', fallbackProvider || 'vertex', modelName);
|
|
49
|
+
}
|
|
50
|
+
/**
|
|
51
|
+
* Create the best available provider based on configuration
|
|
52
|
+
*
|
|
53
|
+
* @example
|
|
54
|
+
* ```typescript
|
|
55
|
+
* import { createBestAIProvider } from 'neurolink';
|
|
56
|
+
*
|
|
57
|
+
* const provider = createBestAIProvider();
|
|
58
|
+
* ```
|
|
59
|
+
*/
|
|
60
|
+
export function createBestAIProvider(requestedProvider, modelName) {
|
|
61
|
+
return AIProviderFactory.createBestProvider(requestedProvider, modelName);
|
|
62
|
+
}
|
|
@@ -0,0 +1,164 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* NeuroLink MCP Context Management System
|
|
3
|
+
* Unified context creation and management for all tool executions
|
|
4
|
+
* Ensures rich context flows through tool chain with session tracking
|
|
5
|
+
*/
|
|
6
|
+
import type { NeuroLinkExecutionContext } from './factory.js';
|
|
7
|
+
/**
|
|
8
|
+
* Context creation request interface
|
|
9
|
+
*/
|
|
10
|
+
export interface ContextRequest {
|
|
11
|
+
sessionId?: string;
|
|
12
|
+
userId?: string;
|
|
13
|
+
aiProvider?: string;
|
|
14
|
+
modelId?: string;
|
|
15
|
+
temperature?: number;
|
|
16
|
+
maxTokens?: number;
|
|
17
|
+
organizationId?: string;
|
|
18
|
+
projectId?: string;
|
|
19
|
+
environmentType?: 'development' | 'staging' | 'production';
|
|
20
|
+
frameworkType?: 'react' | 'vue' | 'svelte' | 'next' | 'nuxt' | 'sveltekit';
|
|
21
|
+
permissions?: string[];
|
|
22
|
+
securityLevel?: 'public' | 'private' | 'organization';
|
|
23
|
+
[key: string]: any;
|
|
24
|
+
}
|
|
25
|
+
/**
|
|
26
|
+
* Context manager for creating and managing execution contexts
|
|
27
|
+
* Provides rich context for all tool executions with session tracking
|
|
28
|
+
*/
|
|
29
|
+
export declare class ContextManager {
|
|
30
|
+
private sessionCounter;
|
|
31
|
+
private activeContexts;
|
|
32
|
+
/**
|
|
33
|
+
* Create a new execution context with rich information
|
|
34
|
+
*
|
|
35
|
+
* @param request Context creation request with optional fields
|
|
36
|
+
* @returns Complete execution context ready for tool chain
|
|
37
|
+
*/
|
|
38
|
+
createContext(request?: ContextRequest): NeuroLinkExecutionContext;
|
|
39
|
+
/**
|
|
40
|
+
* Add a tool to the execution chain
|
|
41
|
+
*
|
|
42
|
+
* @param context Execution context to modify
|
|
43
|
+
* @param toolName Name of the tool being executed
|
|
44
|
+
*/
|
|
45
|
+
addToToolChain(context: NeuroLinkExecutionContext, toolName: string): void;
|
|
46
|
+
/**
|
|
47
|
+
* Get the current tool chain for a context
|
|
48
|
+
*
|
|
49
|
+
* @param context Execution context
|
|
50
|
+
* @returns Array of tool names in execution order
|
|
51
|
+
*/
|
|
52
|
+
getToolChain(context: NeuroLinkExecutionContext): string[];
|
|
53
|
+
/**
|
|
54
|
+
* Set parent tool for nested tool execution
|
|
55
|
+
*
|
|
56
|
+
* @param context Execution context to modify
|
|
57
|
+
* @param parentToolId ID of the parent tool
|
|
58
|
+
*/
|
|
59
|
+
setParentTool(context: NeuroLinkExecutionContext, parentToolId: string): void;
|
|
60
|
+
/**
|
|
61
|
+
* Create child context for nested tool execution
|
|
62
|
+
*
|
|
63
|
+
* @param parentContext Parent execution context
|
|
64
|
+
* @param childToolName Name of the child tool
|
|
65
|
+
* @returns New child context with inherited properties
|
|
66
|
+
*/
|
|
67
|
+
createChildContext(parentContext: NeuroLinkExecutionContext, childToolName: string): NeuroLinkExecutionContext;
|
|
68
|
+
/**
|
|
69
|
+
* Get context by session ID
|
|
70
|
+
*
|
|
71
|
+
* @param sessionId Session identifier
|
|
72
|
+
* @returns Execution context or undefined if not found
|
|
73
|
+
*/
|
|
74
|
+
getContext(sessionId: string): NeuroLinkExecutionContext | undefined;
|
|
75
|
+
/**
|
|
76
|
+
* Update context with new information
|
|
77
|
+
*
|
|
78
|
+
* @param sessionId Session identifier
|
|
79
|
+
* @param updates Partial context updates
|
|
80
|
+
*/
|
|
81
|
+
updateContext(sessionId: string, updates: Partial<NeuroLinkExecutionContext>): void;
|
|
82
|
+
/**
|
|
83
|
+
* Remove context from active tracking
|
|
84
|
+
*
|
|
85
|
+
* @param sessionId Session identifier
|
|
86
|
+
*/
|
|
87
|
+
removeContext(sessionId: string): void;
|
|
88
|
+
/**
|
|
89
|
+
* Get all active contexts (for debugging/monitoring)
|
|
90
|
+
*
|
|
91
|
+
* @returns Array of all active contexts
|
|
92
|
+
*/
|
|
93
|
+
getActiveContexts(): NeuroLinkExecutionContext[];
|
|
94
|
+
/**
|
|
95
|
+
* Clear all active contexts
|
|
96
|
+
*/
|
|
97
|
+
clearAllContexts(): void;
|
|
98
|
+
/**
|
|
99
|
+
* Get context statistics
|
|
100
|
+
*
|
|
101
|
+
* @returns Context usage statistics
|
|
102
|
+
*/
|
|
103
|
+
getStats(): {
|
|
104
|
+
activeContexts: number;
|
|
105
|
+
totalSessionsCreated: number;
|
|
106
|
+
averageToolChainLength: number;
|
|
107
|
+
};
|
|
108
|
+
/**
|
|
109
|
+
* Generate unique session ID
|
|
110
|
+
*
|
|
111
|
+
* @returns Unique session identifier
|
|
112
|
+
*/
|
|
113
|
+
private generateSessionId;
|
|
114
|
+
/**
|
|
115
|
+
* Extract custom fields from request (excluding known fields)
|
|
116
|
+
*
|
|
117
|
+
* @param request Context creation request
|
|
118
|
+
* @returns Custom fields object
|
|
119
|
+
*/
|
|
120
|
+
private extractCustomFields;
|
|
121
|
+
}
|
|
122
|
+
/**
|
|
123
|
+
* Default context manager instance
|
|
124
|
+
* Can be used across the application for consistent context management
|
|
125
|
+
*/
|
|
126
|
+
export declare const defaultContextManager: ContextManager;
|
|
127
|
+
/**
|
|
128
|
+
* Utility function to create context with defaults
|
|
129
|
+
*
|
|
130
|
+
* @param request Optional context request
|
|
131
|
+
* @returns Execution context with sensible defaults
|
|
132
|
+
*/
|
|
133
|
+
export declare function createExecutionContext(request?: ContextRequest): NeuroLinkExecutionContext;
|
|
134
|
+
/**
|
|
135
|
+
* Utility function to add tool to default context manager
|
|
136
|
+
*
|
|
137
|
+
* @param context Execution context
|
|
138
|
+
* @param toolName Tool name to add
|
|
139
|
+
*/
|
|
140
|
+
export declare function addToolToChain(context: NeuroLinkExecutionContext, toolName: string): void;
|
|
141
|
+
/**
|
|
142
|
+
* Context validation utilities
|
|
143
|
+
*/
|
|
144
|
+
export declare class ContextValidator {
|
|
145
|
+
/**
|
|
146
|
+
* Validate context has required fields for tool execution
|
|
147
|
+
*
|
|
148
|
+
* @param context Execution context to validate
|
|
149
|
+
* @returns Validation result with details
|
|
150
|
+
*/
|
|
151
|
+
static validateContext(context: NeuroLinkExecutionContext): {
|
|
152
|
+
isValid: boolean;
|
|
153
|
+
errors: string[];
|
|
154
|
+
warnings: string[];
|
|
155
|
+
};
|
|
156
|
+
/**
|
|
157
|
+
* Validate context permissions for tool execution
|
|
158
|
+
*
|
|
159
|
+
* @param context Execution context
|
|
160
|
+
* @param requiredPermissions Permissions required by tool
|
|
161
|
+
* @returns Whether context has required permissions
|
|
162
|
+
*/
|
|
163
|
+
static hasPermissions(context: NeuroLinkExecutionContext, requiredPermissions: string[]): boolean;
|
|
164
|
+
}
|