@juspay/neurolink 8.32.0 → 8.34.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +13 -1
- package/README.md +284 -75
- package/dist/action/actionExecutor.d.ts +29 -0
- package/dist/action/actionExecutor.js +290 -0
- package/dist/action/actionInputs.d.ts +25 -0
- package/dist/action/actionInputs.js +293 -0
- package/dist/action/githubIntegration.d.ts +21 -0
- package/dist/action/githubIntegration.js +187 -0
- package/dist/action/index.d.ts +8 -0
- package/dist/action/index.js +11 -0
- package/dist/index.d.ts +145 -13
- package/dist/index.js +145 -13
- package/dist/lib/action/actionExecutor.d.ts +29 -0
- package/dist/lib/action/actionExecutor.js +291 -0
- package/dist/lib/action/actionInputs.d.ts +25 -0
- package/dist/lib/action/actionInputs.js +294 -0
- package/dist/lib/action/githubIntegration.d.ts +21 -0
- package/dist/lib/action/githubIntegration.js +188 -0
- package/dist/lib/action/index.d.ts +8 -0
- package/dist/lib/action/index.js +12 -0
- package/dist/lib/index.d.ts +145 -13
- package/dist/lib/index.js +145 -13
- package/dist/lib/mcp/externalServerManager.js +41 -7
- package/dist/lib/neurolink.d.ts +172 -0
- package/dist/lib/neurolink.js +172 -0
- package/dist/lib/types/actionTypes.d.ts +205 -0
- package/dist/lib/types/actionTypes.js +7 -0
- package/dist/lib/types/index.d.ts +1 -0
- package/dist/lib/utils/errorHandling.d.ts +8 -0
- package/dist/lib/utils/errorHandling.js +29 -0
- package/dist/mcp/externalServerManager.js +41 -7
- package/dist/neurolink.d.ts +172 -0
- package/dist/neurolink.js +172 -0
- package/dist/types/actionTypes.d.ts +205 -0
- package/dist/types/actionTypes.js +6 -0
- package/dist/types/index.d.ts +1 -0
- package/dist/utils/errorHandling.d.ts +8 -0
- package/dist/utils/errorHandling.js +29 -0
- package/package.json +11 -3
|
@@ -0,0 +1,188 @@
|
|
|
1
|
+
// src/lib/action/githubIntegration.ts
|
|
2
|
+
/**
|
|
3
|
+
* GitHub API integration for comments, outputs, and job summary
|
|
4
|
+
* @module action/githubIntegration
|
|
5
|
+
*/
|
|
6
|
+
import * as core from "@actions/core";
|
|
7
|
+
import * as github from "@actions/github";
|
|
8
|
+
/**
|
|
9
|
+
* Build comment body for PR/issue
|
|
10
|
+
*/
|
|
11
|
+
function buildCommentBody(inputs, result) {
|
|
12
|
+
const commentTag = `<!-- ${inputs.commentTag} -->`;
|
|
13
|
+
const parts = [commentTag];
|
|
14
|
+
parts.push("## 🤖 NeuroLink AI Response\n");
|
|
15
|
+
parts.push(result.response);
|
|
16
|
+
parts.push("\n---");
|
|
17
|
+
// Build metadata line
|
|
18
|
+
const metaParts = [];
|
|
19
|
+
if (result.model) {
|
|
20
|
+
metaParts.push(`\`${result.model}\``);
|
|
21
|
+
}
|
|
22
|
+
if (result.provider) {
|
|
23
|
+
metaParts.push(`via ${result.provider}`);
|
|
24
|
+
}
|
|
25
|
+
if (result.cost) {
|
|
26
|
+
metaParts.push(`$${result.cost.toFixed(6)}`);
|
|
27
|
+
}
|
|
28
|
+
if (result.usage?.totalTokens) {
|
|
29
|
+
metaParts.push(`${result.usage.totalTokens} tokens`);
|
|
30
|
+
}
|
|
31
|
+
parts.push(`<sub>Generated by [NeuroLink](https://github.com/juspay/neurolink)${metaParts.length ? ` using ${metaParts.join(" | ")}` : ""}</sub>`);
|
|
32
|
+
return parts.join("\n");
|
|
33
|
+
}
|
|
34
|
+
/**
|
|
35
|
+
* Find existing comment by tag
|
|
36
|
+
*/
|
|
37
|
+
async function findExistingComment(octokit, owner, repo, issueNumber, commentTag) {
|
|
38
|
+
const { data: comments } = await octokit.rest.issues.listComments({
|
|
39
|
+
owner,
|
|
40
|
+
repo,
|
|
41
|
+
issue_number: issueNumber,
|
|
42
|
+
});
|
|
43
|
+
const existing = comments.find((c) => c.body?.includes(`<!-- ${commentTag} -->`));
|
|
44
|
+
return existing?.id;
|
|
45
|
+
}
|
|
46
|
+
/**
|
|
47
|
+
* Post result as comment on PR or issue
|
|
48
|
+
*/
|
|
49
|
+
export async function postResultComment(inputs, result) {
|
|
50
|
+
if (!inputs.postComment) {
|
|
51
|
+
return { success: true };
|
|
52
|
+
}
|
|
53
|
+
const token = inputs.githubToken;
|
|
54
|
+
if (!token) {
|
|
55
|
+
core.warning("post_comment enabled but no github_token provided");
|
|
56
|
+
return { success: false, error: "No GitHub token" };
|
|
57
|
+
}
|
|
58
|
+
const context = github.context;
|
|
59
|
+
const issueNumber = context.payload.pull_request?.number || context.payload.issue?.number;
|
|
60
|
+
if (!issueNumber) {
|
|
61
|
+
core.warning("post_comment enabled but not in PR or issue context");
|
|
62
|
+
return { success: false, error: "Not in PR or issue context" };
|
|
63
|
+
}
|
|
64
|
+
const octokit = github.getOctokit(token);
|
|
65
|
+
const body = buildCommentBody(inputs, result);
|
|
66
|
+
try {
|
|
67
|
+
// Check for existing comment if update mode is enabled
|
|
68
|
+
if (inputs.updateExistingComment) {
|
|
69
|
+
const existingId = await findExistingComment(octokit, context.repo.owner, context.repo.repo, issueNumber, inputs.commentTag);
|
|
70
|
+
if (existingId) {
|
|
71
|
+
const { data } = await octokit.rest.issues.updateComment({
|
|
72
|
+
owner: context.repo.owner,
|
|
73
|
+
repo: context.repo.repo,
|
|
74
|
+
comment_id: existingId,
|
|
75
|
+
body,
|
|
76
|
+
});
|
|
77
|
+
core.info(`Updated existing comment #${existingId}`);
|
|
78
|
+
return { success: true, commentId: data.id, commentUrl: data.html_url };
|
|
79
|
+
}
|
|
80
|
+
}
|
|
81
|
+
// Create new comment
|
|
82
|
+
const { data } = await octokit.rest.issues.createComment({
|
|
83
|
+
owner: context.repo.owner,
|
|
84
|
+
repo: context.repo.repo,
|
|
85
|
+
issue_number: issueNumber,
|
|
86
|
+
body,
|
|
87
|
+
});
|
|
88
|
+
core.info(`Created comment #${data.id}`);
|
|
89
|
+
return { success: true, commentId: data.id, commentUrl: data.html_url };
|
|
90
|
+
}
|
|
91
|
+
catch (error) {
|
|
92
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
93
|
+
core.warning(`Failed to post comment: ${message}`);
|
|
94
|
+
return { success: false, error: message };
|
|
95
|
+
}
|
|
96
|
+
}
|
|
97
|
+
/**
|
|
98
|
+
* Write job summary
|
|
99
|
+
*/
|
|
100
|
+
export async function writeJobSummary(inputs, result) {
|
|
101
|
+
const summary = core.summary
|
|
102
|
+
.addHeading("NeuroLink Execution Summary", 2)
|
|
103
|
+
.addTable([
|
|
104
|
+
[
|
|
105
|
+
{ data: "Metric", header: true },
|
|
106
|
+
{ data: "Value", header: true },
|
|
107
|
+
],
|
|
108
|
+
["Provider", result.provider || "auto"],
|
|
109
|
+
["Model", result.model || "auto"],
|
|
110
|
+
["Input Tokens", String(result.usage?.promptTokens || "N/A")],
|
|
111
|
+
["Output Tokens", String(result.usage?.completionTokens || "N/A")],
|
|
112
|
+
["Total Tokens", String(result.usage?.totalTokens || "N/A")],
|
|
113
|
+
["Cost", result.cost ? `$${result.cost.toFixed(6)}` : "N/A"],
|
|
114
|
+
[
|
|
115
|
+
"Execution Time",
|
|
116
|
+
result.executionTime ? `${result.executionTime}ms` : "N/A",
|
|
117
|
+
],
|
|
118
|
+
]);
|
|
119
|
+
if (result.evaluation) {
|
|
120
|
+
summary.addHeading("Quality Evaluation", 3);
|
|
121
|
+
summary.addRaw(`Score: ${result.evaluation.overallScore}/100`);
|
|
122
|
+
}
|
|
123
|
+
summary.addHeading("Response", 3);
|
|
124
|
+
const truncatedResponse = result.response.length > 5000
|
|
125
|
+
? result.response.substring(0, 5000) + "..."
|
|
126
|
+
: result.response;
|
|
127
|
+
summary.addRaw(truncatedResponse);
|
|
128
|
+
try {
|
|
129
|
+
await summary.write();
|
|
130
|
+
}
|
|
131
|
+
catch (error) {
|
|
132
|
+
// Job summary may not be available in all environments (e.g., local testing)
|
|
133
|
+
core.warning(`Unable to write job summary: ${error instanceof Error ? error.message : String(error)}`);
|
|
134
|
+
}
|
|
135
|
+
}
|
|
136
|
+
/**
|
|
137
|
+
* Set all action outputs
|
|
138
|
+
*/
|
|
139
|
+
export function setActionOutputs(result, commentResult) {
|
|
140
|
+
core.setOutput("response", result.response);
|
|
141
|
+
core.setOutput("response_json", JSON.stringify(result.responseJson || {}));
|
|
142
|
+
if (result.provider) {
|
|
143
|
+
core.setOutput("provider", result.provider);
|
|
144
|
+
}
|
|
145
|
+
if (result.model) {
|
|
146
|
+
core.setOutput("model", result.model);
|
|
147
|
+
}
|
|
148
|
+
if (result.usage?.totalTokens) {
|
|
149
|
+
core.setOutput("tokens_used", result.usage.totalTokens.toString());
|
|
150
|
+
}
|
|
151
|
+
if (result.usage?.promptTokens) {
|
|
152
|
+
core.setOutput("prompt_tokens", result.usage.promptTokens.toString());
|
|
153
|
+
}
|
|
154
|
+
if (result.usage?.completionTokens) {
|
|
155
|
+
core.setOutput("completion_tokens", result.usage.completionTokens.toString());
|
|
156
|
+
}
|
|
157
|
+
if (result.cost) {
|
|
158
|
+
core.setOutput("cost", result.cost.toString());
|
|
159
|
+
}
|
|
160
|
+
if (result.executionTime) {
|
|
161
|
+
core.setOutput("execution_time", result.executionTime.toString());
|
|
162
|
+
}
|
|
163
|
+
if (result.evaluation?.overallScore) {
|
|
164
|
+
core.setOutput("evaluation_score", result.evaluation.overallScore.toString());
|
|
165
|
+
}
|
|
166
|
+
if (commentResult?.commentId) {
|
|
167
|
+
core.setOutput("comment_id", commentResult.commentId.toString());
|
|
168
|
+
}
|
|
169
|
+
}
|
|
170
|
+
/**
|
|
171
|
+
* Get all outputs as typed object (snake_case to match action.yml outputs)
|
|
172
|
+
*/
|
|
173
|
+
export function getActionOutputs(result, commentResult) {
|
|
174
|
+
return {
|
|
175
|
+
response: result.response,
|
|
176
|
+
response_json: JSON.stringify(result.responseJson || {}),
|
|
177
|
+
provider: result.provider,
|
|
178
|
+
model: result.model,
|
|
179
|
+
tokens_used: result.usage?.totalTokens?.toString(),
|
|
180
|
+
prompt_tokens: result.usage?.promptTokens?.toString(),
|
|
181
|
+
completion_tokens: result.usage?.completionTokens?.toString(),
|
|
182
|
+
cost: result.cost?.toString(),
|
|
183
|
+
execution_time: result.executionTime?.toString(),
|
|
184
|
+
evaluation_score: result.evaluation?.overallScore?.toString(),
|
|
185
|
+
comment_id: commentResult?.commentId?.toString(),
|
|
186
|
+
};
|
|
187
|
+
}
|
|
188
|
+
//# sourceMappingURL=githubIntegration.js.map
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* GitHub Action module exports
|
|
3
|
+
* @module action
|
|
4
|
+
*/
|
|
5
|
+
export { parseActionInputs, validateProviderKey, buildEnvironmentVariables, validateActionInputs, maskSecrets, } from "./actionInputs.js";
|
|
6
|
+
export { buildCliArgs, installNeurolink, executeNeurolink, runNeurolink, transformCliResponse, } from "./actionExecutor.js";
|
|
7
|
+
export { postResultComment, writeJobSummary, setActionOutputs, getActionOutputs, } from "./githubIntegration.js";
|
|
8
|
+
export type { ActionInputs, ActionExecutionResult, ActionCommentResult, ActionOutput, ActionProviderKeys, ActionAWSConfig, ActionGoogleCloudConfig, ActionThinkingConfig, ActionMultimodalInputs, ActionTokenUsage, ActionEvaluation, ActionInputValidation, CliResponse, CliTokenUsage, } from "../types/actionTypes.js";
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
// src/lib/action/index.ts
|
|
2
|
+
/**
|
|
3
|
+
* GitHub Action module exports
|
|
4
|
+
* @module action
|
|
5
|
+
*/
|
|
6
|
+
// Input handling
|
|
7
|
+
export { parseActionInputs, validateProviderKey, buildEnvironmentVariables, validateActionInputs, maskSecrets, } from "./actionInputs.js";
|
|
8
|
+
// Execution
|
|
9
|
+
export { buildCliArgs, installNeurolink, executeNeurolink, runNeurolink, transformCliResponse, } from "./actionExecutor.js";
|
|
10
|
+
// GitHub integration
|
|
11
|
+
export { postResultComment, writeJobSummary, setActionOutputs, getActionOutputs, } from "./githubIntegration.js";
|
|
12
|
+
//# sourceMappingURL=index.js.map
|
package/dist/lib/index.d.ts
CHANGED
|
@@ -1,10 +1,35 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* NeuroLink AI Toolkit
|
|
3
3
|
*
|
|
4
|
-
* A unified AI provider interface with support for
|
|
5
|
-
* automatic fallback, streaming,
|
|
4
|
+
* A unified AI provider interface with support for 13+ providers,
|
|
5
|
+
* automatic fallback, streaming, MCP tool integration, HITL security,
|
|
6
|
+
* Redis persistence, and enterprise-grade middleware.
|
|
6
7
|
*
|
|
7
|
-
*
|
|
8
|
+
* NeuroLink provides comprehensive AI functionality with battle-tested
|
|
9
|
+
* patterns extracted from production systems at Juspay.
|
|
10
|
+
*
|
|
11
|
+
* @packageDocumentation
|
|
12
|
+
* @module @juspay/neurolink
|
|
13
|
+
* @category Core
|
|
14
|
+
*
|
|
15
|
+
* @example
|
|
16
|
+
* ```typescript
|
|
17
|
+
* import { NeuroLink } from '@juspay/neurolink';
|
|
18
|
+
*
|
|
19
|
+
* // Create NeuroLink instance
|
|
20
|
+
* const neurolink = new NeuroLink();
|
|
21
|
+
*
|
|
22
|
+
* // Generate with any provider
|
|
23
|
+
* const result = await neurolink.generate({
|
|
24
|
+
* input: { text: 'Explain quantum computing' },
|
|
25
|
+
* provider: 'vertex',
|
|
26
|
+
* model: 'gemini-3-flash'
|
|
27
|
+
* });
|
|
28
|
+
*
|
|
29
|
+
* console.log(result.content);
|
|
30
|
+
* ```
|
|
31
|
+
*
|
|
32
|
+
* @since 1.0.0
|
|
8
33
|
*/
|
|
9
34
|
import { AIProviderFactory } from "./core/factory.js";
|
|
10
35
|
export { AIProviderFactory };
|
|
@@ -29,37 +54,117 @@ export type { NeuroLinkMiddleware, MiddlewareContext, MiddlewareFactoryOptions,
|
|
|
29
54
|
export { MiddlewareFactory } from "./middleware/factory.js";
|
|
30
55
|
export declare const VERSION = "1.0.0";
|
|
31
56
|
/**
|
|
32
|
-
* Quick start factory function
|
|
57
|
+
* Quick start factory function for creating AI provider instances.
|
|
33
58
|
*
|
|
34
|
-
*
|
|
59
|
+
* Creates a configured AI provider instance ready for immediate use.
|
|
60
|
+
* Supports all 13 providers: OpenAI, Anthropic, Google AI Studio,
|
|
61
|
+
* Google Vertex, AWS Bedrock, AWS SageMaker, Azure OpenAI, Hugging Face,
|
|
62
|
+
* LiteLLM, Mistral, Ollama, OpenAI Compatible, and OpenRouter.
|
|
63
|
+
*
|
|
64
|
+
* @category Factory
|
|
65
|
+
*
|
|
66
|
+
* @param providerName - The AI provider name (e.g., 'bedrock', 'vertex', 'openai')
|
|
67
|
+
* @param modelName - Optional model name to override provider default
|
|
68
|
+
* @returns Promise resolving to configured AI provider instance
|
|
69
|
+
*
|
|
70
|
+
* @example Basic usage
|
|
35
71
|
* ```typescript
|
|
36
72
|
* import { createAIProvider } from '@juspay/neurolink';
|
|
37
73
|
*
|
|
38
74
|
* const provider = await createAIProvider('bedrock');
|
|
39
75
|
* const result = await provider.stream({ input: { text: 'Hello, AI!' } });
|
|
40
76
|
* ```
|
|
77
|
+
*
|
|
78
|
+
* @example With custom model
|
|
79
|
+
* ```typescript
|
|
80
|
+
* const provider = await createAIProvider('vertex', 'gemini-3-flash');
|
|
81
|
+
* ```
|
|
82
|
+
*
|
|
83
|
+
* @see {@link AIProviderFactory.createProvider}
|
|
84
|
+
* @see {@link NeuroLink} for the main SDK class
|
|
85
|
+
* @since 1.0.0
|
|
41
86
|
*/
|
|
42
87
|
export declare function createAIProvider(providerName?: string, modelName?: string): Promise<import("./types/providers.js").AIProvider>;
|
|
43
88
|
/**
|
|
44
|
-
* Create provider with automatic fallback
|
|
89
|
+
* Create provider with automatic fallback for production resilience.
|
|
45
90
|
*
|
|
46
|
-
*
|
|
91
|
+
* Creates both primary and fallback provider instances for high-availability
|
|
92
|
+
* deployments. Automatically switches to fallback on primary provider failure.
|
|
93
|
+
*
|
|
94
|
+
* @category Factory
|
|
95
|
+
*
|
|
96
|
+
* @param primaryProvider - Primary AI provider name (default: 'bedrock')
|
|
97
|
+
* @param fallbackProvider - Fallback AI provider name (default: 'vertex')
|
|
98
|
+
* @param modelName - Optional model name for both providers
|
|
99
|
+
* @returns Promise resolving to object with primary and fallback providers
|
|
100
|
+
*
|
|
101
|
+
* @example Production failover setup
|
|
47
102
|
* ```typescript
|
|
48
103
|
* import { createAIProviderWithFallback } from '@juspay/neurolink';
|
|
49
104
|
*
|
|
50
105
|
* const { primary, fallback } = await createAIProviderWithFallback('bedrock', 'vertex');
|
|
106
|
+
*
|
|
107
|
+
* try {
|
|
108
|
+
* const result = await primary.generate({ input: { text: 'Hello!' } });
|
|
109
|
+
* } catch (error) {
|
|
110
|
+
* // Automatically use fallback
|
|
111
|
+
* const result = await fallback.generate({ input: { text: 'Hello!' } });
|
|
112
|
+
* }
|
|
51
113
|
* ```
|
|
114
|
+
*
|
|
115
|
+
* @example Multi-region setup
|
|
116
|
+
* ```typescript
|
|
117
|
+
* const { primary, fallback } = await createAIProviderWithFallback(
|
|
118
|
+
* 'vertex', // Primary: US region
|
|
119
|
+
* 'bedrock', // Fallback: Global
|
|
120
|
+
* 'claude-3-sonnet'
|
|
121
|
+
* );
|
|
122
|
+
* ```
|
|
123
|
+
*
|
|
124
|
+
* @see {@link AIProviderFactory.createProviderWithFallback}
|
|
125
|
+
* @since 1.0.0
|
|
52
126
|
*/
|
|
53
127
|
export declare function createAIProviderWithFallback(primaryProvider?: string, fallbackProvider?: string, modelName?: string): Promise<import("./types/typeAliases.js").ProviderPairResult<import("./types/providers.js").AIProvider>>;
|
|
54
128
|
/**
|
|
55
|
-
* Create the best available provider based on configuration
|
|
129
|
+
* Create the best available provider based on environment configuration.
|
|
56
130
|
*
|
|
57
|
-
*
|
|
131
|
+
* Intelligently selects the best provider based on available API keys
|
|
132
|
+
* in environment variables. Automatically detects and configures the
|
|
133
|
+
* optimal provider without manual configuration.
|
|
134
|
+
*
|
|
135
|
+
* @category Factory
|
|
136
|
+
*
|
|
137
|
+
* @param requestedProvider - Optional preferred provider name
|
|
138
|
+
* @param modelName - Optional model name
|
|
139
|
+
* @returns Promise resolving to the best configured provider
|
|
140
|
+
*
|
|
141
|
+
* @example Automatic provider selection
|
|
58
142
|
* ```typescript
|
|
59
143
|
* import { createBestAIProvider } from '@juspay/neurolink';
|
|
60
144
|
*
|
|
145
|
+
* // Automatically uses provider with configured API key
|
|
61
146
|
* const provider = await createBestAIProvider();
|
|
147
|
+
* const result = await provider.generate({ input: { text: 'Hello!' } });
|
|
148
|
+
* ```
|
|
149
|
+
*
|
|
150
|
+
* @example With provider preference
|
|
151
|
+
* ```typescript
|
|
152
|
+
* // Tries to use OpenAI, falls back to available provider
|
|
153
|
+
* const provider = await createBestAIProvider('openai');
|
|
62
154
|
* ```
|
|
155
|
+
*
|
|
156
|
+
* @remarks
|
|
157
|
+
* Environment variables checked (in order):
|
|
158
|
+
* - OPENAI_API_KEY
|
|
159
|
+
* - ANTHROPIC_API_KEY
|
|
160
|
+
* - GOOGLE_API_KEY
|
|
161
|
+
* - VERTEX_PROJECT_ID + credentials
|
|
162
|
+
* - AWS credentials for Bedrock
|
|
163
|
+
* - And more...
|
|
164
|
+
*
|
|
165
|
+
* @see {@link AIProviderFactory.createBestProvider}
|
|
166
|
+
* @see {@link getBestProvider} for provider detection utility
|
|
167
|
+
* @since 1.0.0
|
|
63
168
|
*/
|
|
64
169
|
export declare function createBestAIProvider(requestedProvider?: string, modelName?: string): Promise<import("./types/providers.js").AIProvider>;
|
|
65
170
|
/**
|
|
@@ -97,19 +202,46 @@ export declare function getTelemetryStatus(): Promise<{
|
|
|
97
202
|
}>;
|
|
98
203
|
export type { TextGenerationOptions, TextGenerationResult, AnalyticsData, EvaluationData, } from "./types/index.js";
|
|
99
204
|
/**
|
|
100
|
-
*
|
|
101
|
-
* Provides standalone generateText function for existing code that uses it
|
|
205
|
+
* Legacy generateText function for backward compatibility.
|
|
102
206
|
*
|
|
103
|
-
*
|
|
207
|
+
* Provides standalone text generation function for existing code.
|
|
208
|
+
* For new code, use {@link NeuroLink.generate} instead which provides
|
|
209
|
+
* more features including streaming, tools, and structured output.
|
|
210
|
+
*
|
|
211
|
+
* @category Legacy
|
|
212
|
+
* @deprecated Use {@link NeuroLink.generate} for new code
|
|
213
|
+
*
|
|
214
|
+
* @param options - Text generation options
|
|
215
|
+
* @param options.prompt - Input prompt text
|
|
216
|
+
* @param options.provider - AI provider name (e.g., 'bedrock', 'openai')
|
|
217
|
+
* @param options.model - Model name to use
|
|
218
|
+
* @param options.temperature - Sampling temperature (0-2)
|
|
219
|
+
* @param options.maxTokens - Maximum tokens to generate
|
|
220
|
+
* @returns Promise resolving to text generation result with content and metadata
|
|
221
|
+
*
|
|
222
|
+
* @example Basic text generation
|
|
104
223
|
* ```typescript
|
|
105
224
|
* import { generateText } from '@juspay/neurolink';
|
|
106
225
|
*
|
|
107
226
|
* const result = await generateText({
|
|
108
|
-
* prompt: '
|
|
227
|
+
* prompt: 'Explain quantum computing in simple terms',
|
|
109
228
|
* provider: 'bedrock',
|
|
110
229
|
* model: 'claude-3-sonnet'
|
|
111
230
|
* });
|
|
112
231
|
* console.log(result.content);
|
|
113
232
|
* ```
|
|
233
|
+
*
|
|
234
|
+
* @example With temperature control
|
|
235
|
+
* ```typescript
|
|
236
|
+
* const result = await generateText({
|
|
237
|
+
* prompt: 'Write a creative story',
|
|
238
|
+
* provider: 'openai',
|
|
239
|
+
* temperature: 1.5,
|
|
240
|
+
* maxTokens: 500
|
|
241
|
+
* });
|
|
242
|
+
* ```
|
|
243
|
+
*
|
|
244
|
+
* @see {@link NeuroLink.generate} for modern API with more features
|
|
245
|
+
* @since 1.0.0
|
|
114
246
|
*/
|
|
115
247
|
export declare function generateText(options: import("./types/index.js").TextGenerationOptions): Promise<import("./types/index.js").TextGenerationResult>;
|
package/dist/lib/index.js
CHANGED
|
@@ -1,10 +1,35 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* NeuroLink AI Toolkit
|
|
3
3
|
*
|
|
4
|
-
* A unified AI provider interface with support for
|
|
5
|
-
* automatic fallback, streaming,
|
|
4
|
+
* A unified AI provider interface with support for 13+ providers,
|
|
5
|
+
* automatic fallback, streaming, MCP tool integration, HITL security,
|
|
6
|
+
* Redis persistence, and enterprise-grade middleware.
|
|
6
7
|
*
|
|
7
|
-
*
|
|
8
|
+
* NeuroLink provides comprehensive AI functionality with battle-tested
|
|
9
|
+
* patterns extracted from production systems at Juspay.
|
|
10
|
+
*
|
|
11
|
+
* @packageDocumentation
|
|
12
|
+
* @module @juspay/neurolink
|
|
13
|
+
* @category Core
|
|
14
|
+
*
|
|
15
|
+
* @example
|
|
16
|
+
* ```typescript
|
|
17
|
+
* import { NeuroLink } from '@juspay/neurolink';
|
|
18
|
+
*
|
|
19
|
+
* // Create NeuroLink instance
|
|
20
|
+
* const neurolink = new NeuroLink();
|
|
21
|
+
*
|
|
22
|
+
* // Generate with any provider
|
|
23
|
+
* const result = await neurolink.generate({
|
|
24
|
+
* input: { text: 'Explain quantum computing' },
|
|
25
|
+
* provider: 'vertex',
|
|
26
|
+
* model: 'gemini-3-flash'
|
|
27
|
+
* });
|
|
28
|
+
*
|
|
29
|
+
* console.log(result.content);
|
|
30
|
+
* ```
|
|
31
|
+
*
|
|
32
|
+
* @since 1.0.0
|
|
8
33
|
*/
|
|
9
34
|
// Core exports
|
|
10
35
|
import { AIProviderFactory } from "./core/factory.js";
|
|
@@ -28,41 +53,121 @@ export { MiddlewareFactory } from "./middleware/factory.js";
|
|
|
28
53
|
// Version
|
|
29
54
|
export const VERSION = "1.0.0";
|
|
30
55
|
/**
|
|
31
|
-
* Quick start factory function
|
|
56
|
+
* Quick start factory function for creating AI provider instances.
|
|
32
57
|
*
|
|
33
|
-
*
|
|
58
|
+
* Creates a configured AI provider instance ready for immediate use.
|
|
59
|
+
* Supports all 13 providers: OpenAI, Anthropic, Google AI Studio,
|
|
60
|
+
* Google Vertex, AWS Bedrock, AWS SageMaker, Azure OpenAI, Hugging Face,
|
|
61
|
+
* LiteLLM, Mistral, Ollama, OpenAI Compatible, and OpenRouter.
|
|
62
|
+
*
|
|
63
|
+
* @category Factory
|
|
64
|
+
*
|
|
65
|
+
* @param providerName - The AI provider name (e.g., 'bedrock', 'vertex', 'openai')
|
|
66
|
+
* @param modelName - Optional model name to override provider default
|
|
67
|
+
* @returns Promise resolving to configured AI provider instance
|
|
68
|
+
*
|
|
69
|
+
* @example Basic usage
|
|
34
70
|
* ```typescript
|
|
35
71
|
* import { createAIProvider } from '@juspay/neurolink';
|
|
36
72
|
*
|
|
37
73
|
* const provider = await createAIProvider('bedrock');
|
|
38
74
|
* const result = await provider.stream({ input: { text: 'Hello, AI!' } });
|
|
39
75
|
* ```
|
|
76
|
+
*
|
|
77
|
+
* @example With custom model
|
|
78
|
+
* ```typescript
|
|
79
|
+
* const provider = await createAIProvider('vertex', 'gemini-3-flash');
|
|
80
|
+
* ```
|
|
81
|
+
*
|
|
82
|
+
* @see {@link AIProviderFactory.createProvider}
|
|
83
|
+
* @see {@link NeuroLink} for the main SDK class
|
|
84
|
+
* @since 1.0.0
|
|
40
85
|
*/
|
|
41
86
|
export async function createAIProvider(providerName, modelName) {
|
|
42
87
|
return await AIProviderFactory.createProvider(providerName || "bedrock", modelName);
|
|
43
88
|
}
|
|
44
89
|
/**
|
|
45
|
-
* Create provider with automatic fallback
|
|
90
|
+
* Create provider with automatic fallback for production resilience.
|
|
46
91
|
*
|
|
47
|
-
*
|
|
92
|
+
* Creates both primary and fallback provider instances for high-availability
|
|
93
|
+
* deployments. Automatically switches to fallback on primary provider failure.
|
|
94
|
+
*
|
|
95
|
+
* @category Factory
|
|
96
|
+
*
|
|
97
|
+
* @param primaryProvider - Primary AI provider name (default: 'bedrock')
|
|
98
|
+
* @param fallbackProvider - Fallback AI provider name (default: 'vertex')
|
|
99
|
+
* @param modelName - Optional model name for both providers
|
|
100
|
+
* @returns Promise resolving to object with primary and fallback providers
|
|
101
|
+
*
|
|
102
|
+
* @example Production failover setup
|
|
48
103
|
* ```typescript
|
|
49
104
|
* import { createAIProviderWithFallback } from '@juspay/neurolink';
|
|
50
105
|
*
|
|
51
106
|
* const { primary, fallback } = await createAIProviderWithFallback('bedrock', 'vertex');
|
|
107
|
+
*
|
|
108
|
+
* try {
|
|
109
|
+
* const result = await primary.generate({ input: { text: 'Hello!' } });
|
|
110
|
+
* } catch (error) {
|
|
111
|
+
* // Automatically use fallback
|
|
112
|
+
* const result = await fallback.generate({ input: { text: 'Hello!' } });
|
|
113
|
+
* }
|
|
52
114
|
* ```
|
|
115
|
+
*
|
|
116
|
+
* @example Multi-region setup
|
|
117
|
+
* ```typescript
|
|
118
|
+
* const { primary, fallback } = await createAIProviderWithFallback(
|
|
119
|
+
* 'vertex', // Primary: US region
|
|
120
|
+
* 'bedrock', // Fallback: Global
|
|
121
|
+
* 'claude-3-sonnet'
|
|
122
|
+
* );
|
|
123
|
+
* ```
|
|
124
|
+
*
|
|
125
|
+
* @see {@link AIProviderFactory.createProviderWithFallback}
|
|
126
|
+
* @since 1.0.0
|
|
53
127
|
*/
|
|
54
128
|
export async function createAIProviderWithFallback(primaryProvider, fallbackProvider, modelName) {
|
|
55
129
|
return await AIProviderFactory.createProviderWithFallback(primaryProvider || "bedrock", fallbackProvider || "vertex", modelName);
|
|
56
130
|
}
|
|
57
131
|
/**
|
|
58
|
-
* Create the best available provider based on configuration
|
|
132
|
+
* Create the best available provider based on environment configuration.
|
|
59
133
|
*
|
|
60
|
-
*
|
|
134
|
+
* Intelligently selects the best provider based on available API keys
|
|
135
|
+
* in environment variables. Automatically detects and configures the
|
|
136
|
+
* optimal provider without manual configuration.
|
|
137
|
+
*
|
|
138
|
+
* @category Factory
|
|
139
|
+
*
|
|
140
|
+
* @param requestedProvider - Optional preferred provider name
|
|
141
|
+
* @param modelName - Optional model name
|
|
142
|
+
* @returns Promise resolving to the best configured provider
|
|
143
|
+
*
|
|
144
|
+
* @example Automatic provider selection
|
|
61
145
|
* ```typescript
|
|
62
146
|
* import { createBestAIProvider } from '@juspay/neurolink';
|
|
63
147
|
*
|
|
148
|
+
* // Automatically uses provider with configured API key
|
|
64
149
|
* const provider = await createBestAIProvider();
|
|
150
|
+
* const result = await provider.generate({ input: { text: 'Hello!' } });
|
|
151
|
+
* ```
|
|
152
|
+
*
|
|
153
|
+
* @example With provider preference
|
|
154
|
+
* ```typescript
|
|
155
|
+
* // Tries to use OpenAI, falls back to available provider
|
|
156
|
+
* const provider = await createBestAIProvider('openai');
|
|
65
157
|
* ```
|
|
158
|
+
*
|
|
159
|
+
* @remarks
|
|
160
|
+
* Environment variables checked (in order):
|
|
161
|
+
* - OPENAI_API_KEY
|
|
162
|
+
* - ANTHROPIC_API_KEY
|
|
163
|
+
* - GOOGLE_API_KEY
|
|
164
|
+
* - VERTEX_PROJECT_ID + credentials
|
|
165
|
+
* - AWS credentials for Bedrock
|
|
166
|
+
* - And more...
|
|
167
|
+
*
|
|
168
|
+
* @see {@link AIProviderFactory.createBestProvider}
|
|
169
|
+
* @see {@link getBestProvider} for provider detection utility
|
|
170
|
+
* @since 1.0.0
|
|
66
171
|
*/
|
|
67
172
|
export async function createBestAIProvider(requestedProvider, modelName) {
|
|
68
173
|
return await AIProviderFactory.createBestProvider(requestedProvider, modelName);
|
|
@@ -121,20 +226,47 @@ export async function getTelemetryStatus() {
|
|
|
121
226
|
return getStatus();
|
|
122
227
|
}
|
|
123
228
|
/**
|
|
124
|
-
*
|
|
125
|
-
* Provides standalone generateText function for existing code that uses it
|
|
229
|
+
* Legacy generateText function for backward compatibility.
|
|
126
230
|
*
|
|
127
|
-
*
|
|
231
|
+
* Provides standalone text generation function for existing code.
|
|
232
|
+
* For new code, use {@link NeuroLink.generate} instead which provides
|
|
233
|
+
* more features including streaming, tools, and structured output.
|
|
234
|
+
*
|
|
235
|
+
* @category Legacy
|
|
236
|
+
* @deprecated Use {@link NeuroLink.generate} for new code
|
|
237
|
+
*
|
|
238
|
+
* @param options - Text generation options
|
|
239
|
+
* @param options.prompt - Input prompt text
|
|
240
|
+
* @param options.provider - AI provider name (e.g., 'bedrock', 'openai')
|
|
241
|
+
* @param options.model - Model name to use
|
|
242
|
+
* @param options.temperature - Sampling temperature (0-2)
|
|
243
|
+
* @param options.maxTokens - Maximum tokens to generate
|
|
244
|
+
* @returns Promise resolving to text generation result with content and metadata
|
|
245
|
+
*
|
|
246
|
+
* @example Basic text generation
|
|
128
247
|
* ```typescript
|
|
129
248
|
* import { generateText } from '@juspay/neurolink';
|
|
130
249
|
*
|
|
131
250
|
* const result = await generateText({
|
|
132
|
-
* prompt: '
|
|
251
|
+
* prompt: 'Explain quantum computing in simple terms',
|
|
133
252
|
* provider: 'bedrock',
|
|
134
253
|
* model: 'claude-3-sonnet'
|
|
135
254
|
* });
|
|
136
255
|
* console.log(result.content);
|
|
137
256
|
* ```
|
|
257
|
+
*
|
|
258
|
+
* @example With temperature control
|
|
259
|
+
* ```typescript
|
|
260
|
+
* const result = await generateText({
|
|
261
|
+
* prompt: 'Write a creative story',
|
|
262
|
+
* provider: 'openai',
|
|
263
|
+
* temperature: 1.5,
|
|
264
|
+
* maxTokens: 500
|
|
265
|
+
* });
|
|
266
|
+
* ```
|
|
267
|
+
*
|
|
268
|
+
* @see {@link NeuroLink.generate} for modern API with more features
|
|
269
|
+
* @since 1.0.0
|
|
138
270
|
*/
|
|
139
271
|
export async function generateText(options) {
|
|
140
272
|
// Create instance on-demand without auto-instantiation
|