@juspay/neurolink 1.0.0 ā 1.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +525 -24
- package/dist/cli/index.js +308 -0
- package/dist/core/types.d.ts +25 -3
- package/dist/index.d.ts +2 -0
- package/dist/index.js +1 -0
- package/dist/neurolink.d.ts +53 -0
- package/dist/neurolink.js +101 -0
- package/dist/providers/amazonBedrock.d.ts +3 -3
- package/dist/providers/amazonBedrock.js +51 -25
- package/dist/providers/googleVertexAI.d.ts +3 -3
- package/dist/providers/googleVertexAI.js +156 -31
- package/dist/providers/openAI.d.ts +3 -3
- package/dist/providers/openAI.js +42 -19
- package/dist/utils/providerUtils.js +2 -2
- package/package.json +21 -5
|
@@ -0,0 +1,308 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
/**
|
|
3
|
+
* NeuroLink CLI - Enhanced Simplified Approach
|
|
4
|
+
*
|
|
5
|
+
* Professional CLI experience with minimal maintenance overhead.
|
|
6
|
+
* Features: Spinners, colors, batch processing, provider testing, rich help
|
|
7
|
+
* Implementation: ~300 lines using simple JS utility functions
|
|
8
|
+
*/
|
|
9
|
+
import { NeuroLink } from '@juspay/neurolink';
|
|
10
|
+
import yargs from 'yargs';
|
|
11
|
+
import { hideBin } from 'yargs/helpers';
|
|
12
|
+
import ora from 'ora';
|
|
13
|
+
import chalk from 'chalk';
|
|
14
|
+
import fs from 'fs';
|
|
15
|
+
import { fileURLToPath } from 'url';
|
|
16
|
+
import { dirname } from 'path';
|
|
17
|
+
// Get current directory for ESM
|
|
18
|
+
const __filename = fileURLToPath(import.meta.url);
|
|
19
|
+
const __dirname = dirname(__filename);
|
|
20
|
+
// Utility Functions (Simple, Zero Maintenance)
|
|
21
|
+
function formatOutput(result, format = 'text') {
|
|
22
|
+
if (format === 'json') {
|
|
23
|
+
return JSON.stringify(result, null, 2);
|
|
24
|
+
}
|
|
25
|
+
// Smart text formatting
|
|
26
|
+
if (result?.content) {
|
|
27
|
+
return result.content;
|
|
28
|
+
}
|
|
29
|
+
if (typeof result === 'string') {
|
|
30
|
+
return result;
|
|
31
|
+
}
|
|
32
|
+
return JSON.stringify(result, null, 2);
|
|
33
|
+
}
|
|
34
|
+
function handleError(error, context) {
|
|
35
|
+
console.error(chalk.red(`ā ${context} failed: ${error.message}`));
|
|
36
|
+
// Smart hints for common errors (just string matching!)
|
|
37
|
+
if (error.message.includes('API key')) {
|
|
38
|
+
console.error(chalk.yellow('š” Set API key: export OPENAI_API_KEY=sk-...'));
|
|
39
|
+
console.error(chalk.yellow('š” Or set: export AWS_REGION=us-east-1'));
|
|
40
|
+
console.error(chalk.yellow('š” Or set: export GOOGLE_APPLICATION_CREDENTIALS=/path/to/key.json'));
|
|
41
|
+
}
|
|
42
|
+
if (error.message.includes('rate limit')) {
|
|
43
|
+
console.error(chalk.yellow('š” Try again in a few moments or use --provider vertex'));
|
|
44
|
+
}
|
|
45
|
+
if (error.message.includes('not authorized')) {
|
|
46
|
+
console.error(chalk.yellow('š” Check your account permissions for the selected model'));
|
|
47
|
+
console.error(chalk.yellow('š” For AWS Bedrock: Use inference profile ARNs'));
|
|
48
|
+
}
|
|
49
|
+
process.exit(1);
|
|
50
|
+
}
|
|
51
|
+
function validateConfig() {
|
|
52
|
+
const hasOpenAI = !!process.env.OPENAI_API_KEY;
|
|
53
|
+
const hasAWS = !!(process.env.AWS_REGION || process.env.AWS_ACCESS_KEY_ID);
|
|
54
|
+
const hasGoogle = !!(process.env.GOOGLE_APPLICATION_CREDENTIALS || process.env.GOOGLE_SERVICE_ACCOUNT_KEY);
|
|
55
|
+
if (!hasOpenAI && !hasAWS && !hasGoogle) {
|
|
56
|
+
console.error(chalk.red('ā ļø No AI provider credentials found'));
|
|
57
|
+
console.error(chalk.yellow('š” Set one of:'));
|
|
58
|
+
console.error(chalk.yellow(' ⢠OPENAI_API_KEY=sk-...'));
|
|
59
|
+
console.error(chalk.yellow(' ⢠AWS_REGION=us-east-1 (+ AWS credentials)'));
|
|
60
|
+
console.error(chalk.yellow(' ⢠GOOGLE_APPLICATION_CREDENTIALS=/path/to/key.json'));
|
|
61
|
+
console.error(chalk.blue('\nš See: https://github.com/juspay/neurolink#setup'));
|
|
62
|
+
process.exit(1);
|
|
63
|
+
}
|
|
64
|
+
}
|
|
65
|
+
// Initialize SDK
|
|
66
|
+
const sdk = new NeuroLink();
|
|
67
|
+
// Enhanced CLI with Professional UX
|
|
68
|
+
const cli = yargs(hideBin(process.argv))
|
|
69
|
+
.scriptName('neurolink')
|
|
70
|
+
.usage(chalk.blue('š§ $0 <command> [options]'))
|
|
71
|
+
.middleware([validateConfig])
|
|
72
|
+
.version()
|
|
73
|
+
.help()
|
|
74
|
+
.strict()
|
|
75
|
+
.demandCommand(1, chalk.red('ā Specify a command'))
|
|
76
|
+
.epilogue(chalk.blue('š” For more info: https://github.com/juspay/neurolink'))
|
|
77
|
+
// Generate Text Command - Core functionality with professional UX
|
|
78
|
+
.command('generate-text <prompt>', 'Generate text using AI providers', (yargs) => yargs
|
|
79
|
+
.positional('prompt', {
|
|
80
|
+
type: 'string',
|
|
81
|
+
description: 'Text prompt for AI generation'
|
|
82
|
+
})
|
|
83
|
+
.option('provider', {
|
|
84
|
+
choices: ['auto', 'openai', 'bedrock', 'vertex'],
|
|
85
|
+
default: 'auto',
|
|
86
|
+
description: 'AI provider to use (auto-selects best available)'
|
|
87
|
+
})
|
|
88
|
+
.option('temperature', {
|
|
89
|
+
type: 'number',
|
|
90
|
+
default: 0.7,
|
|
91
|
+
description: 'Creativity level (0.0 = focused, 1.0 = creative)'
|
|
92
|
+
})
|
|
93
|
+
.option('max-tokens', {
|
|
94
|
+
type: 'number',
|
|
95
|
+
default: 500,
|
|
96
|
+
description: 'Maximum tokens to generate'
|
|
97
|
+
})
|
|
98
|
+
.option('format', {
|
|
99
|
+
choices: ['text', 'json'],
|
|
100
|
+
default: 'text',
|
|
101
|
+
description: 'Output format'
|
|
102
|
+
})
|
|
103
|
+
.example('$0 generate-text "Hello world"', 'Basic text generation')
|
|
104
|
+
.example('$0 generate-text "Write a story" --provider openai', 'Use specific provider')
|
|
105
|
+
.example('$0 generate-text "Technical doc" --format json', 'Get JSON output'), async (argv) => {
|
|
106
|
+
const spinner = ora('š¤ Generating text...').start();
|
|
107
|
+
try {
|
|
108
|
+
if (!argv.prompt) {
|
|
109
|
+
throw new Error('Prompt is required');
|
|
110
|
+
}
|
|
111
|
+
const result = await sdk.generateText({
|
|
112
|
+
prompt: argv.prompt,
|
|
113
|
+
provider: argv.provider === 'auto' ? undefined : argv.provider,
|
|
114
|
+
temperature: argv.temperature,
|
|
115
|
+
maxTokens: argv.maxTokens
|
|
116
|
+
});
|
|
117
|
+
spinner.succeed(chalk.green('ā
Text generated successfully!'));
|
|
118
|
+
console.log(formatOutput(result, argv.format));
|
|
119
|
+
// Show usage info for text format
|
|
120
|
+
if (argv.format === 'text' && result.usage) {
|
|
121
|
+
console.log(chalk.blue(`ā¹ļø ${result.usage.totalTokens} tokens used`));
|
|
122
|
+
}
|
|
123
|
+
}
|
|
124
|
+
catch (error) {
|
|
125
|
+
spinner.fail();
|
|
126
|
+
handleError(error, 'Text generation');
|
|
127
|
+
}
|
|
128
|
+
})
|
|
129
|
+
// Stream Text Command - Real-time generation
|
|
130
|
+
.command('stream <prompt>', 'Stream text generation in real-time', (yargs) => yargs
|
|
131
|
+
.positional('prompt', {
|
|
132
|
+
type: 'string',
|
|
133
|
+
description: 'Text prompt for streaming'
|
|
134
|
+
})
|
|
135
|
+
.option('provider', {
|
|
136
|
+
choices: ['auto', 'openai', 'bedrock', 'vertex'],
|
|
137
|
+
default: 'auto',
|
|
138
|
+
description: 'AI provider to use'
|
|
139
|
+
})
|
|
140
|
+
.option('temperature', {
|
|
141
|
+
type: 'number',
|
|
142
|
+
default: 0.7,
|
|
143
|
+
description: 'Creativity level'
|
|
144
|
+
})
|
|
145
|
+
.example('$0 stream "Tell me a story"', 'Stream a story in real-time')
|
|
146
|
+
.example('$0 stream "Explain AI" --provider vertex', 'Stream with specific provider'), async (argv) => {
|
|
147
|
+
console.log(chalk.blue(`š Streaming from ${argv.provider} provider...\n`));
|
|
148
|
+
try {
|
|
149
|
+
if (!argv.prompt) {
|
|
150
|
+
throw new Error('Prompt is required');
|
|
151
|
+
}
|
|
152
|
+
const stream = await sdk.generateTextStream({
|
|
153
|
+
prompt: argv.prompt,
|
|
154
|
+
provider: argv.provider === 'auto' ? undefined : argv.provider,
|
|
155
|
+
temperature: argv.temperature
|
|
156
|
+
});
|
|
157
|
+
for await (const chunk of stream) {
|
|
158
|
+
process.stdout.write(chunk.content);
|
|
159
|
+
}
|
|
160
|
+
console.log('\n');
|
|
161
|
+
}
|
|
162
|
+
catch (error) {
|
|
163
|
+
handleError(error, 'Text streaming');
|
|
164
|
+
}
|
|
165
|
+
})
|
|
166
|
+
// Batch Processing Command - Power user feature with simple implementation
|
|
167
|
+
.command('batch <file>', 'Process multiple prompts from a file', (yargs) => yargs
|
|
168
|
+
.positional('file', {
|
|
169
|
+
type: 'string',
|
|
170
|
+
description: 'File with prompts (one per line)'
|
|
171
|
+
})
|
|
172
|
+
.option('output', {
|
|
173
|
+
type: 'string',
|
|
174
|
+
description: 'Output file for results (default: stdout)'
|
|
175
|
+
})
|
|
176
|
+
.option('delay', {
|
|
177
|
+
type: 'number',
|
|
178
|
+
default: 1000,
|
|
179
|
+
description: 'Delay between requests in milliseconds'
|
|
180
|
+
})
|
|
181
|
+
.option('provider', {
|
|
182
|
+
choices: ['auto', 'openai', 'bedrock', 'vertex'],
|
|
183
|
+
default: 'auto',
|
|
184
|
+
description: 'AI provider to use'
|
|
185
|
+
})
|
|
186
|
+
.example('$0 batch prompts.txt', 'Process prompts from file')
|
|
187
|
+
.example('$0 batch prompts.txt --output results.json', 'Save results to file')
|
|
188
|
+
.example('$0 batch prompts.txt --delay 2000', 'Add 2s delay between requests'), async (argv) => {
|
|
189
|
+
try {
|
|
190
|
+
// Validate file argument
|
|
191
|
+
if (!argv.file) {
|
|
192
|
+
throw new Error('File path is required');
|
|
193
|
+
}
|
|
194
|
+
// Read and validate input file
|
|
195
|
+
if (!fs.existsSync(argv.file)) {
|
|
196
|
+
throw new Error(`File not found: ${argv.file}`);
|
|
197
|
+
}
|
|
198
|
+
const prompts = fs.readFileSync(argv.file, 'utf8')
|
|
199
|
+
.split('\n')
|
|
200
|
+
.map(line => line.trim())
|
|
201
|
+
.filter(Boolean);
|
|
202
|
+
if (prompts.length === 0) {
|
|
203
|
+
throw new Error('No prompts found in file');
|
|
204
|
+
}
|
|
205
|
+
const results = [];
|
|
206
|
+
console.log(chalk.blue(`š¦ Processing ${prompts.length} prompts...\n`));
|
|
207
|
+
// Sequential processing with progress tracking
|
|
208
|
+
for (let i = 0; i < prompts.length; i++) {
|
|
209
|
+
const spinner = ora(`Processing ${i + 1}/${prompts.length}: ${prompts[i].substring(0, 50)}...`).start();
|
|
210
|
+
try {
|
|
211
|
+
const result = await sdk.generateText({
|
|
212
|
+
prompt: prompts[i],
|
|
213
|
+
provider: argv.provider === 'auto' ? undefined : argv.provider
|
|
214
|
+
});
|
|
215
|
+
results.push({
|
|
216
|
+
prompt: prompts[i],
|
|
217
|
+
response: result.content
|
|
218
|
+
});
|
|
219
|
+
spinner.succeed(`${i + 1}/${prompts.length} completed`);
|
|
220
|
+
}
|
|
221
|
+
catch (error) {
|
|
222
|
+
results.push({
|
|
223
|
+
prompt: prompts[i],
|
|
224
|
+
error: error.message
|
|
225
|
+
});
|
|
226
|
+
spinner.fail(`${i + 1}/${prompts.length} failed: ${error.message}`);
|
|
227
|
+
}
|
|
228
|
+
// Add delay between requests (except for last one)
|
|
229
|
+
if (argv.delay && i < prompts.length - 1) {
|
|
230
|
+
await new Promise(resolve => setTimeout(resolve, argv.delay));
|
|
231
|
+
}
|
|
232
|
+
}
|
|
233
|
+
// Output results
|
|
234
|
+
const output = JSON.stringify(results, null, 2);
|
|
235
|
+
if (argv.output) {
|
|
236
|
+
fs.writeFileSync(argv.output, output);
|
|
237
|
+
console.log(chalk.green(`\nā
Results saved to ${argv.output}`));
|
|
238
|
+
}
|
|
239
|
+
else {
|
|
240
|
+
console.log('\n' + output);
|
|
241
|
+
}
|
|
242
|
+
}
|
|
243
|
+
catch (error) {
|
|
244
|
+
handleError(error, 'Batch processing');
|
|
245
|
+
}
|
|
246
|
+
})
|
|
247
|
+
// Provider Status Command - Testing and diagnostics
|
|
248
|
+
.command('status', 'Check AI provider connectivity and performance', (yargs) => yargs
|
|
249
|
+
.option('verbose', {
|
|
250
|
+
type: 'boolean',
|
|
251
|
+
default: false,
|
|
252
|
+
alias: 'v',
|
|
253
|
+
description: 'Show detailed information'
|
|
254
|
+
})
|
|
255
|
+
.example('$0 status', 'Check all providers')
|
|
256
|
+
.example('$0 status --verbose', 'Show detailed status information'), async (argv) => {
|
|
257
|
+
console.log(chalk.blue('š Checking AI provider status...\n'));
|
|
258
|
+
const providers = ['openai', 'bedrock', 'vertex'];
|
|
259
|
+
const results = [];
|
|
260
|
+
for (const provider of providers) {
|
|
261
|
+
const spinner = ora(`Testing ${provider}`).start();
|
|
262
|
+
try {
|
|
263
|
+
const start = Date.now();
|
|
264
|
+
await sdk.generateText({
|
|
265
|
+
prompt: 'test',
|
|
266
|
+
provider,
|
|
267
|
+
maxTokens: 1
|
|
268
|
+
});
|
|
269
|
+
const duration = Date.now() - start;
|
|
270
|
+
results.push({
|
|
271
|
+
provider,
|
|
272
|
+
status: 'working',
|
|
273
|
+
responseTime: duration
|
|
274
|
+
});
|
|
275
|
+
spinner.succeed(`${provider}: ${chalk.green('ā
Working')} (${duration}ms)`);
|
|
276
|
+
}
|
|
277
|
+
catch (error) {
|
|
278
|
+
results.push({
|
|
279
|
+
provider,
|
|
280
|
+
status: 'failed',
|
|
281
|
+
error: error.message
|
|
282
|
+
});
|
|
283
|
+
spinner.fail(`${provider}: ${chalk.red('ā Failed')} - ${error.message}`);
|
|
284
|
+
}
|
|
285
|
+
}
|
|
286
|
+
// Show summary
|
|
287
|
+
const working = results.filter(r => r.status === 'working').length;
|
|
288
|
+
const total = results.length;
|
|
289
|
+
console.log(chalk.blue(`\nš Summary: ${working}/${total} providers working`));
|
|
290
|
+
if (argv.verbose) {
|
|
291
|
+
console.log(chalk.blue('\nš Detailed Results:'));
|
|
292
|
+
console.log(JSON.stringify(results, null, 2));
|
|
293
|
+
}
|
|
294
|
+
})
|
|
295
|
+
// Get Best Provider Command - Auto-selection testing
|
|
296
|
+
.command('get-best-provider', 'Show the best available AI provider', () => { }, async () => {
|
|
297
|
+
const spinner = ora('šÆ Finding best provider...').start();
|
|
298
|
+
try {
|
|
299
|
+
const provider = await sdk.getBestProvider();
|
|
300
|
+
spinner.succeed(chalk.green(`ā
Best provider: ${provider}`));
|
|
301
|
+
}
|
|
302
|
+
catch (error) {
|
|
303
|
+
spinner.fail();
|
|
304
|
+
handleError(error, 'Provider selection');
|
|
305
|
+
}
|
|
306
|
+
});
|
|
307
|
+
// Execute CLI
|
|
308
|
+
cli.parse();
|
package/dist/core/types.d.ts
CHANGED
|
@@ -55,11 +55,33 @@ export interface StreamingOptions {
|
|
|
55
55
|
systemPrompt?: string;
|
|
56
56
|
}
|
|
57
57
|
/**
|
|
58
|
-
*
|
|
58
|
+
* Text generation options interface
|
|
59
|
+
*/
|
|
60
|
+
export interface TextGenerationOptions {
|
|
61
|
+
prompt: string;
|
|
62
|
+
model?: string;
|
|
63
|
+
temperature?: number;
|
|
64
|
+
maxTokens?: number;
|
|
65
|
+
systemPrompt?: string;
|
|
66
|
+
schema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>;
|
|
67
|
+
}
|
|
68
|
+
/**
|
|
69
|
+
* Stream text options interface
|
|
70
|
+
*/
|
|
71
|
+
export interface StreamTextOptions {
|
|
72
|
+
prompt: string;
|
|
73
|
+
model?: string;
|
|
74
|
+
temperature?: number;
|
|
75
|
+
maxTokens?: number;
|
|
76
|
+
systemPrompt?: string;
|
|
77
|
+
schema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>;
|
|
78
|
+
}
|
|
79
|
+
/**
|
|
80
|
+
* AI Provider interface with flexible parameter support
|
|
59
81
|
*/
|
|
60
82
|
export interface AIProvider {
|
|
61
|
-
streamText(
|
|
62
|
-
generateText(
|
|
83
|
+
streamText(optionsOrPrompt: StreamTextOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamTextResult<ToolSet, unknown> | null>;
|
|
84
|
+
generateText(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<GenerateTextResult<ToolSet, unknown> | null>;
|
|
63
85
|
}
|
|
64
86
|
/**
|
|
65
87
|
* Provider attempt result for iteration tracking
|
package/dist/index.d.ts
CHANGED
|
@@ -14,6 +14,8 @@ export { GoogleVertexAI, AmazonBedrock, OpenAI } from './providers/index.js';
|
|
|
14
14
|
export type { ProviderName } from './providers/index.js';
|
|
15
15
|
export { PROVIDERS, AVAILABLE_PROVIDERS } from './providers/index.js';
|
|
16
16
|
export { getBestProvider, getAvailableProviders, isValidProvider } from './utils/providerUtils.js';
|
|
17
|
+
export { NeuroLink } from './neurolink.js';
|
|
18
|
+
export type { TextGenerationOptions, StreamTextOptions, TextGenerationResult } from './neurolink.js';
|
|
17
19
|
export declare const VERSION = "1.0.0";
|
|
18
20
|
/**
|
|
19
21
|
* Quick start factory function
|
package/dist/index.js
CHANGED
|
@@ -16,6 +16,7 @@ export { GoogleVertexAI, AmazonBedrock, OpenAI } from './providers/index.js';
|
|
|
16
16
|
export { PROVIDERS, AVAILABLE_PROVIDERS } from './providers/index.js';
|
|
17
17
|
// Utility exports
|
|
18
18
|
export { getBestProvider, getAvailableProviders, isValidProvider } from './utils/providerUtils.js';
|
|
19
|
+
// Main NeuroLink wrapper class export export { NeuroLink } from './neurolink.js';
|
|
19
20
|
// Version
|
|
20
21
|
export const VERSION = '1.0.0';
|
|
21
22
|
/**
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* NeuroLink - Unified AI Interface
|
|
3
|
+
*
|
|
4
|
+
* Simple wrapper around the AI provider system to provide a clean API
|
|
5
|
+
* for CLI and other consumers.
|
|
6
|
+
*/
|
|
7
|
+
import type { AIProviderName } from './core/types.js';
|
|
8
|
+
export interface TextGenerationOptions {
|
|
9
|
+
prompt: string;
|
|
10
|
+
provider?: 'openai' | 'bedrock' | 'vertex' | 'auto';
|
|
11
|
+
temperature?: number;
|
|
12
|
+
maxTokens?: number;
|
|
13
|
+
systemPrompt?: string;
|
|
14
|
+
schema?: any;
|
|
15
|
+
}
|
|
16
|
+
export interface StreamTextOptions {
|
|
17
|
+
prompt: string;
|
|
18
|
+
provider?: 'openai' | 'bedrock' | 'vertex' | 'auto';
|
|
19
|
+
temperature?: number;
|
|
20
|
+
maxTokens?: number;
|
|
21
|
+
systemPrompt?: string;
|
|
22
|
+
}
|
|
23
|
+
export interface TextGenerationResult {
|
|
24
|
+
content: string;
|
|
25
|
+
provider?: string;
|
|
26
|
+
model?: string;
|
|
27
|
+
usage?: {
|
|
28
|
+
promptTokens?: number;
|
|
29
|
+
completionTokens?: number;
|
|
30
|
+
totalTokens?: number;
|
|
31
|
+
};
|
|
32
|
+
responseTime?: number;
|
|
33
|
+
}
|
|
34
|
+
export declare class NeuroLink {
|
|
35
|
+
/**
|
|
36
|
+
* Generate text using the best available AI provider
|
|
37
|
+
*/
|
|
38
|
+
generateText(options: TextGenerationOptions): Promise<TextGenerationResult>;
|
|
39
|
+
/**
|
|
40
|
+
* Generate streaming text using the best available AI provider
|
|
41
|
+
*/
|
|
42
|
+
generateTextStream(options: StreamTextOptions): Promise<AsyncIterable<{
|
|
43
|
+
content: string;
|
|
44
|
+
}>>;
|
|
45
|
+
/**
|
|
46
|
+
* Get the best available AI provider
|
|
47
|
+
*/
|
|
48
|
+
getBestProvider(): Promise<string>;
|
|
49
|
+
/**
|
|
50
|
+
* Test a specific provider
|
|
51
|
+
*/
|
|
52
|
+
testProvider(providerName: AIProviderName, testPrompt?: string): Promise<boolean>;
|
|
53
|
+
}
|
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* NeuroLink - Unified AI Interface
|
|
3
|
+
*
|
|
4
|
+
* Simple wrapper around the AI provider system to provide a clean API
|
|
5
|
+
* for CLI and other consumers.
|
|
6
|
+
*/
|
|
7
|
+
import { AIProviderFactory, createBestAIProvider } from './index.js';
|
|
8
|
+
import { getBestProvider } from './utils/providerUtils.js';
|
|
9
|
+
export class NeuroLink {
|
|
10
|
+
/**
|
|
11
|
+
* Generate text using the best available AI provider
|
|
12
|
+
*/
|
|
13
|
+
async generateText(options) {
|
|
14
|
+
const startTime = Date.now();
|
|
15
|
+
try {
|
|
16
|
+
let provider;
|
|
17
|
+
let providerName;
|
|
18
|
+
if (options.provider && options.provider !== 'auto') {
|
|
19
|
+
provider = AIProviderFactory.createProvider(options.provider);
|
|
20
|
+
providerName = options.provider;
|
|
21
|
+
}
|
|
22
|
+
else {
|
|
23
|
+
provider = createBestAIProvider();
|
|
24
|
+
providerName = await getBestProvider();
|
|
25
|
+
}
|
|
26
|
+
const result = await provider.generateText({
|
|
27
|
+
prompt: options.prompt,
|
|
28
|
+
temperature: options.temperature,
|
|
29
|
+
maxTokens: options.maxTokens,
|
|
30
|
+
systemPrompt: options.systemPrompt
|
|
31
|
+
}, options.schema);
|
|
32
|
+
if (!result) {
|
|
33
|
+
throw new Error('No response received from AI provider');
|
|
34
|
+
}
|
|
35
|
+
const responseTime = Date.now() - startTime;
|
|
36
|
+
return {
|
|
37
|
+
content: result.text,
|
|
38
|
+
provider: providerName,
|
|
39
|
+
usage: result.usage,
|
|
40
|
+
responseTime
|
|
41
|
+
};
|
|
42
|
+
}
|
|
43
|
+
catch (error) {
|
|
44
|
+
throw new Error(`Failed to generate text: ${error instanceof Error ? error.message : 'Unknown error'}`);
|
|
45
|
+
}
|
|
46
|
+
}
|
|
47
|
+
/**
|
|
48
|
+
* Generate streaming text using the best available AI provider
|
|
49
|
+
*/
|
|
50
|
+
async generateTextStream(options) {
|
|
51
|
+
try {
|
|
52
|
+
let provider;
|
|
53
|
+
if (options.provider && options.provider !== 'auto') {
|
|
54
|
+
provider = AIProviderFactory.createProvider(options.provider);
|
|
55
|
+
}
|
|
56
|
+
else {
|
|
57
|
+
provider = createBestAIProvider();
|
|
58
|
+
}
|
|
59
|
+
const result = await provider.streamText({
|
|
60
|
+
prompt: options.prompt,
|
|
61
|
+
temperature: options.temperature,
|
|
62
|
+
maxTokens: options.maxTokens,
|
|
63
|
+
systemPrompt: options.systemPrompt
|
|
64
|
+
});
|
|
65
|
+
if (!result) {
|
|
66
|
+
throw new Error('No stream response received from AI provider');
|
|
67
|
+
}
|
|
68
|
+
// Convert the AI SDK stream to our expected format
|
|
69
|
+
async function* convertStream() {
|
|
70
|
+
if (result && result.textStream) {
|
|
71
|
+
for await (const chunk of result.textStream) {
|
|
72
|
+
yield { content: chunk };
|
|
73
|
+
}
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
return convertStream();
|
|
77
|
+
}
|
|
78
|
+
catch (error) {
|
|
79
|
+
throw new Error(`Failed to stream text: ${error instanceof Error ? error.message : 'Unknown error'}`);
|
|
80
|
+
}
|
|
81
|
+
}
|
|
82
|
+
/**
|
|
83
|
+
* Get the best available AI provider
|
|
84
|
+
*/
|
|
85
|
+
async getBestProvider() {
|
|
86
|
+
return await getBestProvider();
|
|
87
|
+
}
|
|
88
|
+
/**
|
|
89
|
+
* Test a specific provider
|
|
90
|
+
*/
|
|
91
|
+
async testProvider(providerName, testPrompt = 'test') {
|
|
92
|
+
try {
|
|
93
|
+
const provider = AIProviderFactory.createProvider(providerName);
|
|
94
|
+
await provider.generateText(testPrompt);
|
|
95
|
+
return true;
|
|
96
|
+
}
|
|
97
|
+
catch (error) {
|
|
98
|
+
return false;
|
|
99
|
+
}
|
|
100
|
+
}
|
|
101
|
+
}
|
|
@@ -1,11 +1,11 @@
|
|
|
1
1
|
import type { ZodType, ZodTypeDef } from 'zod';
|
|
2
2
|
import { type StreamTextResult, type ToolSet, type Schema, type GenerateTextResult } from 'ai';
|
|
3
|
-
import type { AIProvider } from '../core/types.js';
|
|
3
|
+
import type { AIProvider, TextGenerationOptions, StreamTextOptions } from '../core/types.js';
|
|
4
4
|
export declare class AmazonBedrock implements AIProvider {
|
|
5
5
|
private modelName;
|
|
6
6
|
private model;
|
|
7
7
|
private bedrock;
|
|
8
8
|
constructor(modelName?: string | null);
|
|
9
|
-
streamText(
|
|
10
|
-
generateText(
|
|
9
|
+
streamText(optionsOrPrompt: StreamTextOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamTextResult<ToolSet, unknown> | null>;
|
|
10
|
+
generateText(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<GenerateTextResult<ToolSet, unknown> | null>;
|
|
11
11
|
}
|
|
@@ -6,7 +6,9 @@ const DEFAULT_SYSTEM_CONTEXT = {
|
|
|
6
6
|
};
|
|
7
7
|
// Configuration helpers
|
|
8
8
|
const getBedrockModelId = () => {
|
|
9
|
-
return process.env.
|
|
9
|
+
return process.env.BEDROCK_MODEL ||
|
|
10
|
+
process.env.BEDROCK_MODEL_ID ||
|
|
11
|
+
'arn:aws:bedrock:us-east-2:225681119357:inference-profile/us.anthropic.claude-3-7-sonnet-20250219-v1:0';
|
|
10
12
|
};
|
|
11
13
|
const getAWSAccessKeyId = () => {
|
|
12
14
|
const keyId = process.env.AWS_ACCESS_KEY_ID;
|
|
@@ -40,7 +42,12 @@ export class AmazonBedrock {
|
|
|
40
42
|
const functionTag = 'AmazonBedrock.constructor';
|
|
41
43
|
this.modelName = modelName || getBedrockModelId();
|
|
42
44
|
try {
|
|
43
|
-
console.log(`[${functionTag}] Function called`, {
|
|
45
|
+
console.log(`[${functionTag}] Function called`, {
|
|
46
|
+
modelName: this.modelName,
|
|
47
|
+
envBedrockModel: process.env.BEDROCK_MODEL,
|
|
48
|
+
envBedrockModelId: process.env.BEDROCK_MODEL_ID,
|
|
49
|
+
fallbackModel: 'arn:aws:bedrock:us-east-2:225681119357:inference-profile/us.anthropic.claude-3-7-sonnet-20250219-v1:0'
|
|
50
|
+
});
|
|
44
51
|
// Configure AWS credentials for custom Bedrock instance
|
|
45
52
|
const awsConfig = {
|
|
46
53
|
accessKeyId: getAWSAccessKeyId(),
|
|
@@ -50,7 +57,9 @@ export class AmazonBedrock {
|
|
|
50
57
|
console.log(`[${functionTag}] AWS config validation`, {
|
|
51
58
|
hasAccessKeyId: !!awsConfig.accessKeyId,
|
|
52
59
|
hasSecretAccessKey: !!awsConfig.secretAccessKey,
|
|
53
|
-
region: awsConfig.region || 'MISSING'
|
|
60
|
+
region: awsConfig.region || 'MISSING',
|
|
61
|
+
accessKeyIdLength: awsConfig.accessKeyId?.length || 0,
|
|
62
|
+
hasSessionToken: !!process.env.AWS_SESSION_TOKEN
|
|
54
63
|
});
|
|
55
64
|
// Add session token for development environment
|
|
56
65
|
if (getAppEnvironment() === 'dev') {
|
|
@@ -109,20 +118,31 @@ export class AmazonBedrock {
|
|
|
109
118
|
throw err;
|
|
110
119
|
}
|
|
111
120
|
}
|
|
112
|
-
async streamText(
|
|
121
|
+
async streamText(optionsOrPrompt, analysisSchema) {
|
|
113
122
|
const functionTag = 'AmazonBedrock.streamText';
|
|
114
123
|
const provider = 'bedrock';
|
|
115
124
|
let chunkCount = 0;
|
|
116
125
|
try {
|
|
126
|
+
// Parse parameters - support both string and options object
|
|
127
|
+
const options = typeof optionsOrPrompt === 'string'
|
|
128
|
+
? { prompt: optionsOrPrompt }
|
|
129
|
+
: optionsOrPrompt;
|
|
130
|
+
const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema } = options;
|
|
131
|
+
// Use schema from options or fallback parameter
|
|
132
|
+
const finalSchema = schema || analysisSchema;
|
|
117
133
|
console.log(`[${functionTag}] Stream request started`, {
|
|
118
134
|
provider,
|
|
119
135
|
modelName: this.modelName,
|
|
120
|
-
promptLength: prompt.length
|
|
136
|
+
promptLength: prompt.length,
|
|
137
|
+
temperature,
|
|
138
|
+
maxTokens
|
|
121
139
|
});
|
|
122
140
|
const streamOptions = {
|
|
123
141
|
model: this.model,
|
|
124
142
|
prompt: prompt,
|
|
125
|
-
system:
|
|
143
|
+
system: systemPrompt,
|
|
144
|
+
temperature,
|
|
145
|
+
maxTokens,
|
|
126
146
|
onError: (event) => {
|
|
127
147
|
const error = event.error;
|
|
128
148
|
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
@@ -160,15 +180,9 @@ export class AmazonBedrock {
|
|
|
160
180
|
});
|
|
161
181
|
}
|
|
162
182
|
};
|
|
163
|
-
if (
|
|
164
|
-
streamOptions.experimental_output = Output.object({ schema:
|
|
183
|
+
if (finalSchema) {
|
|
184
|
+
streamOptions.experimental_output = Output.object({ schema: finalSchema });
|
|
165
185
|
}
|
|
166
|
-
console.log(`[${functionTag}] Stream text started`, {
|
|
167
|
-
provider,
|
|
168
|
-
modelName: this.modelName,
|
|
169
|
-
region: getAWSRegion(),
|
|
170
|
-
promptLength: prompt.length
|
|
171
|
-
});
|
|
172
186
|
// Direct streamText call - let the real error bubble up
|
|
173
187
|
const result = streamText(streamOptions);
|
|
174
188
|
console.log(`[${functionTag}] Stream text call successful`, {
|
|
@@ -189,30 +203,42 @@ export class AmazonBedrock {
|
|
|
189
203
|
throw err; // Re-throw error to trigger fallback
|
|
190
204
|
}
|
|
191
205
|
}
|
|
192
|
-
async generateText(
|
|
206
|
+
async generateText(optionsOrPrompt, analysisSchema) {
|
|
193
207
|
const functionTag = 'AmazonBedrock.generateText';
|
|
194
208
|
const provider = 'bedrock';
|
|
195
209
|
try {
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
prompt:
|
|
199
|
-
|
|
200
|
-
};
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
}
|
|
210
|
+
// Parse parameters - support both string and options object
|
|
211
|
+
const options = typeof optionsOrPrompt === 'string'
|
|
212
|
+
? { prompt: optionsOrPrompt }
|
|
213
|
+
: optionsOrPrompt;
|
|
214
|
+
const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema } = options;
|
|
215
|
+
// Use schema from options or fallback parameter
|
|
216
|
+
const finalSchema = schema || analysisSchema;
|
|
204
217
|
console.log(`[${functionTag}] Generate text started`, {
|
|
205
218
|
provider,
|
|
206
219
|
modelName: this.modelName,
|
|
207
220
|
region: getAWSRegion(),
|
|
208
|
-
promptLength: prompt.length
|
|
221
|
+
promptLength: prompt.length,
|
|
222
|
+
temperature,
|
|
223
|
+
maxTokens
|
|
209
224
|
});
|
|
225
|
+
const generateOptions = {
|
|
226
|
+
model: this.model,
|
|
227
|
+
prompt: prompt,
|
|
228
|
+
system: systemPrompt,
|
|
229
|
+
temperature,
|
|
230
|
+
maxTokens
|
|
231
|
+
};
|
|
232
|
+
if (finalSchema) {
|
|
233
|
+
generateOptions.experimental_output = Output.object({ schema: finalSchema });
|
|
234
|
+
}
|
|
210
235
|
const result = await generateText(generateOptions);
|
|
211
236
|
console.log(`[${functionTag}] Generate text completed`, {
|
|
212
237
|
provider,
|
|
213
238
|
modelName: this.modelName,
|
|
214
239
|
usage: result.usage,
|
|
215
|
-
finishReason: result.finishReason
|
|
240
|
+
finishReason: result.finishReason,
|
|
241
|
+
responseLength: result.text?.length || 0
|
|
216
242
|
});
|
|
217
243
|
return result;
|
|
218
244
|
}
|