@juspay/neurolink 1.5.1 โ 1.5.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +49 -0
- package/README.md +1 -1
- package/dist/cli/commands/config.d.ts +35 -35
- package/dist/cli/index.js +63 -19
- package/dist/core/factory.js +12 -11
- package/dist/lib/core/factory.d.ts +40 -0
- package/dist/lib/core/factory.js +162 -0
- package/dist/lib/core/types.d.ts +111 -0
- package/dist/lib/core/types.js +68 -0
- package/dist/lib/index.d.ts +56 -0
- package/dist/lib/index.js +62 -0
- package/dist/lib/mcp/context-manager.d.ts +164 -0
- package/dist/lib/mcp/context-manager.js +273 -0
- package/dist/lib/mcp/factory.d.ts +144 -0
- package/dist/lib/mcp/factory.js +141 -0
- package/dist/lib/mcp/orchestrator.d.ts +170 -0
- package/dist/lib/mcp/orchestrator.js +372 -0
- package/dist/lib/mcp/registry.d.ts +188 -0
- package/dist/lib/mcp/registry.js +373 -0
- package/dist/lib/mcp/servers/ai-providers/ai-analysis-tools.d.ts +21 -0
- package/dist/lib/mcp/servers/ai-providers/ai-analysis-tools.js +215 -0
- package/dist/lib/mcp/servers/ai-providers/ai-core-server.d.ts +10 -0
- package/dist/lib/mcp/servers/ai-providers/ai-core-server.js +303 -0
- package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.d.ts +101 -0
- package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.js +428 -0
- package/dist/lib/neurolink.d.ts +53 -0
- package/dist/lib/neurolink.js +155 -0
- package/dist/lib/providers/amazonBedrock.d.ts +11 -0
- package/dist/lib/providers/amazonBedrock.js +256 -0
- package/dist/lib/providers/anthropic.d.ts +34 -0
- package/dist/lib/providers/anthropic.js +308 -0
- package/dist/lib/providers/azureOpenAI.d.ts +37 -0
- package/dist/lib/providers/azureOpenAI.js +339 -0
- package/dist/lib/providers/googleAIStudio.d.ts +30 -0
- package/dist/lib/providers/googleAIStudio.js +216 -0
- package/dist/lib/providers/googleVertexAI.d.ts +30 -0
- package/dist/lib/providers/googleVertexAI.js +409 -0
- package/dist/lib/providers/index.d.ts +30 -0
- package/dist/lib/providers/index.js +25 -0
- package/dist/lib/providers/openAI.d.ts +10 -0
- package/dist/lib/providers/openAI.js +169 -0
- package/dist/lib/utils/logger.d.ts +12 -0
- package/dist/lib/utils/logger.js +25 -0
- package/dist/lib/utils/providerUtils.d.ts +17 -0
- package/dist/lib/utils/providerUtils.js +73 -0
- package/dist/mcp/servers/ai-providers/ai-core-server.js +11 -10
- package/dist/neurolink.js +13 -12
- package/dist/providers/amazonBedrock.js +22 -21
- package/dist/providers/anthropic.js +21 -20
- package/dist/providers/azureOpenAI.js +21 -20
- package/dist/providers/googleAIStudio.js +13 -12
- package/dist/providers/googleVertexAI.js +27 -26
- package/dist/providers/openAI.js +12 -11
- package/dist/utils/logger.d.ts +12 -0
- package/dist/utils/logger.js +25 -0
- package/dist/utils/providerUtils.d.ts +0 -3
- package/dist/utils/providerUtils.js +3 -2
- package/package.json +1 -1
package/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,54 @@
|
|
|
1
1
|
# @juspay/neurolink
|
|
2
2
|
|
|
3
|
+
## 1.5.2
|
|
4
|
+
|
|
5
|
+
### Patch Changes
|
|
6
|
+
|
|
7
|
+
- **๐ง Production-Ready CLI Logging System**: Fixed critical logging system for clean production output
|
|
8
|
+
- **Issue**: CLI showed excessive debug output during normal operation, breaking demo presentations
|
|
9
|
+
- **Root Cause**: Mixed console.log statements bypassed conditional logger system
|
|
10
|
+
- **Solution**: Systematic replacement of all console.log with logger.debug across codebase
|
|
11
|
+
- **Impact**: **Clean CLI output by default** with conditional debug available via `NEUROLINK_DEBUG=true`
|
|
12
|
+
|
|
13
|
+
- **๐ Enhanced Provider Fallback Logic**: Fixed incomplete provider fallback coverage
|
|
14
|
+
- **Issue**: Provider fallback only attempted 4 of 6 providers (missing Anthropic & Azure)
|
|
15
|
+
- **Root Cause**: Incomplete provider array in NeuroLink class fallback logic
|
|
16
|
+
- **Solution**: Updated to include all 6 providers: `['openai', 'vertex', 'bedrock', 'anthropic', 'azure', 'google-ai']`
|
|
17
|
+
- **Impact**: **100% provider coverage** with comprehensive fallback for maximum reliability
|
|
18
|
+
|
|
19
|
+
- **๐งน Console Statement Cleanup**: Systematic cleanup of debug output across entire codebase
|
|
20
|
+
- **Files Updated**: `src/lib/neurolink.ts`, `src/lib/core/factory.ts`, `src/lib/providers/openAI.ts`, `src/lib/mcp/servers/ai-providers/ai-core-server.ts`
|
|
21
|
+
- **Pattern**: Replaced 200+ `console.log()` statements with `logger.debug()` calls
|
|
22
|
+
- **Result**: Professional CLI behavior suitable for production deployment and demos
|
|
23
|
+
|
|
24
|
+
### Technical Changes
|
|
25
|
+
|
|
26
|
+
- **Production CLI Output**: Clean spinner โ success โ content (zero debug noise)
|
|
27
|
+
- **Debug Mode Available**: Full debug logging with `NEUROLINK_DEBUG=true` environment variable
|
|
28
|
+
- **Complete Provider Support**: All 6 AI providers now included in automatic fallback
|
|
29
|
+
- **Error Handling**: Provider-level error logs preserved for troubleshooting
|
|
30
|
+
- **Conditional Logging**: Debug messages only appear when explicitly enabled
|
|
31
|
+
- **Demo Ready**: CLI output suitable for presentations and production use
|
|
32
|
+
|
|
33
|
+
### CLI Behavior
|
|
34
|
+
|
|
35
|
+
```bash
|
|
36
|
+
# Production/Demo Mode (Clean Output)
|
|
37
|
+
node dist/cli/cli/index.js generate-text "test" --max-tokens 5
|
|
38
|
+
# Output: โ ๐ค Generating text... โ โ
Text generated successfully! [content]
|
|
39
|
+
|
|
40
|
+
# Debug Mode (Full Logging)
|
|
41
|
+
NEUROLINK_DEBUG=true node dist/cli/cli/index.js generate-text "test" --max-tokens 5
|
|
42
|
+
# Output: [debug logs] + spinner + success + content
|
|
43
|
+
```
|
|
44
|
+
|
|
45
|
+
### Backward Compatibility
|
|
46
|
+
|
|
47
|
+
- **100% API Compatible**: No breaking changes to public interfaces
|
|
48
|
+
- **Environment Variables**: `NEUROLINK_DEBUG=true` works as documented
|
|
49
|
+
- **Provider Selection**: All existing provider configurations continue working
|
|
50
|
+
- **CLI Commands**: All commands maintain same functionality with cleaner output
|
|
51
|
+
|
|
3
52
|
## 1.5.1
|
|
4
53
|
|
|
5
54
|
### Patch Changes
|
package/README.md
CHANGED
|
@@ -18,8 +18,8 @@ declare const ConfigSchema: z.ZodObject<{
|
|
|
18
18
|
apiKey?: string | undefined;
|
|
19
19
|
baseURL?: string | undefined;
|
|
20
20
|
}, {
|
|
21
|
-
apiKey?: string | undefined;
|
|
22
21
|
model?: string | undefined;
|
|
22
|
+
apiKey?: string | undefined;
|
|
23
23
|
baseURL?: string | undefined;
|
|
24
24
|
}>>;
|
|
25
25
|
bedrock: z.ZodOptional<z.ZodObject<{
|
|
@@ -73,8 +73,8 @@ declare const ConfigSchema: z.ZodObject<{
|
|
|
73
73
|
model: string;
|
|
74
74
|
apiKey?: string | undefined;
|
|
75
75
|
}, {
|
|
76
|
-
apiKey?: string | undefined;
|
|
77
76
|
model?: string | undefined;
|
|
77
|
+
apiKey?: string | undefined;
|
|
78
78
|
}>>;
|
|
79
79
|
azure: z.ZodOptional<z.ZodObject<{
|
|
80
80
|
apiKey: z.ZodOptional<z.ZodString>;
|
|
@@ -87,8 +87,8 @@ declare const ConfigSchema: z.ZodObject<{
|
|
|
87
87
|
endpoint?: string | undefined;
|
|
88
88
|
deploymentId?: string | undefined;
|
|
89
89
|
}, {
|
|
90
|
-
apiKey?: string | undefined;
|
|
91
90
|
model?: string | undefined;
|
|
91
|
+
apiKey?: string | undefined;
|
|
92
92
|
endpoint?: string | undefined;
|
|
93
93
|
deploymentId?: string | undefined;
|
|
94
94
|
}>>;
|
|
@@ -99,8 +99,8 @@ declare const ConfigSchema: z.ZodObject<{
|
|
|
99
99
|
model: string;
|
|
100
100
|
apiKey?: string | undefined;
|
|
101
101
|
}, {
|
|
102
|
-
apiKey?: string | undefined;
|
|
103
102
|
model?: string | undefined;
|
|
103
|
+
apiKey?: string | undefined;
|
|
104
104
|
}>>;
|
|
105
105
|
huggingface: z.ZodOptional<z.ZodObject<{
|
|
106
106
|
apiKey: z.ZodOptional<z.ZodString>;
|
|
@@ -109,15 +109,10 @@ declare const ConfigSchema: z.ZodObject<{
|
|
|
109
109
|
model: string;
|
|
110
110
|
apiKey?: string | undefined;
|
|
111
111
|
}, {
|
|
112
|
-
apiKey?: string | undefined;
|
|
113
112
|
model?: string | undefined;
|
|
113
|
+
apiKey?: string | undefined;
|
|
114
114
|
}>>;
|
|
115
115
|
}, "strip", z.ZodTypeAny, {
|
|
116
|
-
openai?: {
|
|
117
|
-
model: string;
|
|
118
|
-
apiKey?: string | undefined;
|
|
119
|
-
baseURL?: string | undefined;
|
|
120
|
-
} | undefined;
|
|
121
116
|
bedrock?: {
|
|
122
117
|
model: string;
|
|
123
118
|
region?: string | undefined;
|
|
@@ -125,6 +120,11 @@ declare const ConfigSchema: z.ZodObject<{
|
|
|
125
120
|
secretAccessKey?: string | undefined;
|
|
126
121
|
sessionToken?: string | undefined;
|
|
127
122
|
} | undefined;
|
|
123
|
+
openai?: {
|
|
124
|
+
model: string;
|
|
125
|
+
apiKey?: string | undefined;
|
|
126
|
+
baseURL?: string | undefined;
|
|
127
|
+
} | undefined;
|
|
128
128
|
vertex?: {
|
|
129
129
|
model: string;
|
|
130
130
|
location: string;
|
|
@@ -153,11 +153,6 @@ declare const ConfigSchema: z.ZodObject<{
|
|
|
153
153
|
apiKey?: string | undefined;
|
|
154
154
|
} | undefined;
|
|
155
155
|
}, {
|
|
156
|
-
openai?: {
|
|
157
|
-
apiKey?: string | undefined;
|
|
158
|
-
model?: string | undefined;
|
|
159
|
-
baseURL?: string | undefined;
|
|
160
|
-
} | undefined;
|
|
161
156
|
bedrock?: {
|
|
162
157
|
model?: string | undefined;
|
|
163
158
|
region?: string | undefined;
|
|
@@ -165,6 +160,11 @@ declare const ConfigSchema: z.ZodObject<{
|
|
|
165
160
|
secretAccessKey?: string | undefined;
|
|
166
161
|
sessionToken?: string | undefined;
|
|
167
162
|
} | undefined;
|
|
163
|
+
openai?: {
|
|
164
|
+
model?: string | undefined;
|
|
165
|
+
apiKey?: string | undefined;
|
|
166
|
+
baseURL?: string | undefined;
|
|
167
|
+
} | undefined;
|
|
168
168
|
vertex?: {
|
|
169
169
|
model?: string | undefined;
|
|
170
170
|
projectId?: string | undefined;
|
|
@@ -175,22 +175,22 @@ declare const ConfigSchema: z.ZodObject<{
|
|
|
175
175
|
privateKey?: string | undefined;
|
|
176
176
|
} | undefined;
|
|
177
177
|
anthropic?: {
|
|
178
|
-
apiKey?: string | undefined;
|
|
179
178
|
model?: string | undefined;
|
|
179
|
+
apiKey?: string | undefined;
|
|
180
180
|
} | undefined;
|
|
181
181
|
azure?: {
|
|
182
|
-
apiKey?: string | undefined;
|
|
183
182
|
model?: string | undefined;
|
|
183
|
+
apiKey?: string | undefined;
|
|
184
184
|
endpoint?: string | undefined;
|
|
185
185
|
deploymentId?: string | undefined;
|
|
186
186
|
} | undefined;
|
|
187
187
|
'google-ai'?: {
|
|
188
|
-
apiKey?: string | undefined;
|
|
189
188
|
model?: string | undefined;
|
|
189
|
+
apiKey?: string | undefined;
|
|
190
190
|
} | undefined;
|
|
191
191
|
huggingface?: {
|
|
192
|
-
apiKey?: string | undefined;
|
|
193
192
|
model?: string | undefined;
|
|
193
|
+
apiKey?: string | undefined;
|
|
194
194
|
} | undefined;
|
|
195
195
|
}>>;
|
|
196
196
|
profiles: z.ZodDefault<z.ZodRecord<z.ZodString, z.ZodAny>>;
|
|
@@ -217,13 +217,8 @@ declare const ConfigSchema: z.ZodObject<{
|
|
|
217
217
|
cacheStrategy?: "file" | "memory" | "redis" | undefined;
|
|
218
218
|
}>>;
|
|
219
219
|
}, "strip", z.ZodTypeAny, {
|
|
220
|
-
defaultProvider: "
|
|
220
|
+
defaultProvider: "bedrock" | "openai" | "vertex" | "anthropic" | "azure" | "google-ai" | "auto" | "huggingface";
|
|
221
221
|
providers: {
|
|
222
|
-
openai?: {
|
|
223
|
-
model: string;
|
|
224
|
-
apiKey?: string | undefined;
|
|
225
|
-
baseURL?: string | undefined;
|
|
226
|
-
} | undefined;
|
|
227
222
|
bedrock?: {
|
|
228
223
|
model: string;
|
|
229
224
|
region?: string | undefined;
|
|
@@ -231,6 +226,11 @@ declare const ConfigSchema: z.ZodObject<{
|
|
|
231
226
|
secretAccessKey?: string | undefined;
|
|
232
227
|
sessionToken?: string | undefined;
|
|
233
228
|
} | undefined;
|
|
229
|
+
openai?: {
|
|
230
|
+
model: string;
|
|
231
|
+
apiKey?: string | undefined;
|
|
232
|
+
baseURL?: string | undefined;
|
|
233
|
+
} | undefined;
|
|
234
234
|
vertex?: {
|
|
235
235
|
model: string;
|
|
236
236
|
location: string;
|
|
@@ -269,13 +269,8 @@ declare const ConfigSchema: z.ZodObject<{
|
|
|
269
269
|
cacheStrategy: "file" | "memory" | "redis";
|
|
270
270
|
};
|
|
271
271
|
}, {
|
|
272
|
-
defaultProvider?: "
|
|
272
|
+
defaultProvider?: "bedrock" | "openai" | "vertex" | "anthropic" | "azure" | "google-ai" | "auto" | "huggingface" | undefined;
|
|
273
273
|
providers?: {
|
|
274
|
-
openai?: {
|
|
275
|
-
apiKey?: string | undefined;
|
|
276
|
-
model?: string | undefined;
|
|
277
|
-
baseURL?: string | undefined;
|
|
278
|
-
} | undefined;
|
|
279
274
|
bedrock?: {
|
|
280
275
|
model?: string | undefined;
|
|
281
276
|
region?: string | undefined;
|
|
@@ -283,6 +278,11 @@ declare const ConfigSchema: z.ZodObject<{
|
|
|
283
278
|
secretAccessKey?: string | undefined;
|
|
284
279
|
sessionToken?: string | undefined;
|
|
285
280
|
} | undefined;
|
|
281
|
+
openai?: {
|
|
282
|
+
model?: string | undefined;
|
|
283
|
+
apiKey?: string | undefined;
|
|
284
|
+
baseURL?: string | undefined;
|
|
285
|
+
} | undefined;
|
|
286
286
|
vertex?: {
|
|
287
287
|
model?: string | undefined;
|
|
288
288
|
projectId?: string | undefined;
|
|
@@ -293,22 +293,22 @@ declare const ConfigSchema: z.ZodObject<{
|
|
|
293
293
|
privateKey?: string | undefined;
|
|
294
294
|
} | undefined;
|
|
295
295
|
anthropic?: {
|
|
296
|
-
apiKey?: string | undefined;
|
|
297
296
|
model?: string | undefined;
|
|
297
|
+
apiKey?: string | undefined;
|
|
298
298
|
} | undefined;
|
|
299
299
|
azure?: {
|
|
300
|
-
apiKey?: string | undefined;
|
|
301
300
|
model?: string | undefined;
|
|
301
|
+
apiKey?: string | undefined;
|
|
302
302
|
endpoint?: string | undefined;
|
|
303
303
|
deploymentId?: string | undefined;
|
|
304
304
|
} | undefined;
|
|
305
305
|
'google-ai'?: {
|
|
306
|
-
apiKey?: string | undefined;
|
|
307
306
|
model?: string | undefined;
|
|
307
|
+
apiKey?: string | undefined;
|
|
308
308
|
} | undefined;
|
|
309
309
|
huggingface?: {
|
|
310
|
-
apiKey?: string | undefined;
|
|
311
310
|
model?: string | undefined;
|
|
311
|
+
apiKey?: string | undefined;
|
|
312
312
|
} | undefined;
|
|
313
313
|
} | undefined;
|
|
314
314
|
profiles?: Record<string, any> | undefined;
|
package/dist/cli/index.js
CHANGED
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
* Features: Spinners, colors, batch processing, provider testing, rich help
|
|
7
7
|
* Implementation: ~300 lines using simple JS utility functions
|
|
8
8
|
*/
|
|
9
|
-
import { NeuroLink } from '
|
|
9
|
+
import { NeuroLink } from '../lib/neurolink.js';
|
|
10
10
|
import yargs from 'yargs'; // Keep default import
|
|
11
11
|
import { hideBin } from 'yargs/helpers';
|
|
12
12
|
import ora from 'ora';
|
|
@@ -49,6 +49,7 @@ function handleError(error, context) {
|
|
|
49
49
|
let isAuthError = false;
|
|
50
50
|
let genericMessage = specificErrorMessage; // Initialize genericMessage with the specific one
|
|
51
51
|
if (originalErrorMessageLowerCase.includes('api_key') ||
|
|
52
|
+
originalErrorMessageLowerCase.includes('google_ai_api_key') ||
|
|
52
53
|
originalErrorMessageLowerCase.includes('aws_access_key_id') ||
|
|
53
54
|
originalErrorMessageLowerCase.includes('aws_secret_access_key') ||
|
|
54
55
|
originalErrorMessageLowerCase.includes('aws_session_token') ||
|
|
@@ -61,6 +62,7 @@ function handleError(error, context) {
|
|
|
61
62
|
}
|
|
62
63
|
else if ( // Fallback to checking the full stringified error if direct message didn't match
|
|
63
64
|
errorStringLowerCase.includes('api_key') ||
|
|
65
|
+
errorStringLowerCase.includes('google_ai_api_key') ||
|
|
64
66
|
errorStringLowerCase.includes('aws_access_key_id') ||
|
|
65
67
|
errorStringLowerCase.includes('aws_secret_access_key') ||
|
|
66
68
|
errorStringLowerCase.includes('aws_session_token') ||
|
|
@@ -96,9 +98,10 @@ function handleError(error, context) {
|
|
|
96
98
|
console.error(chalk.red(`โ ${context} failed: ${genericMessage}`));
|
|
97
99
|
// Smart hints for common errors (just string matching!)
|
|
98
100
|
if (genericMessage.toLowerCase().includes('api key') || genericMessage.toLowerCase().includes('credential')) {
|
|
99
|
-
console.error(chalk.yellow('๐ก Set API key: export
|
|
100
|
-
console.error(chalk.yellow('๐ก Or set
|
|
101
|
-
console.error(chalk.yellow('๐ก Or set
|
|
101
|
+
console.error(chalk.yellow('๐ก Set Google AI Studio API key (RECOMMENDED): export GOOGLE_AI_API_KEY=AIza-...'));
|
|
102
|
+
console.error(chalk.yellow('๐ก Or set OpenAI API key: export OPENAI_API_KEY=sk-...'));
|
|
103
|
+
console.error(chalk.yellow('๐ก Or set AWS Bedrock credentials: export AWS_ACCESS_KEY_ID=... AWS_SECRET_ACCESS_KEY=... AWS_REGION=us-east-1'));
|
|
104
|
+
console.error(chalk.yellow('๐ก Or set Google Vertex AI credentials: export GOOGLE_APPLICATION_CREDENTIALS=/path/to/key.json'));
|
|
102
105
|
console.error(chalk.yellow('๐ก Or set Anthropic API key: export ANTHROPIC_API_KEY=sk-ant-...'));
|
|
103
106
|
console.error(chalk.yellow('๐ก Or set Azure OpenAI credentials: export AZURE_OPENAI_API_KEY=... AZURE_OPENAI_ENDPOINT=...'));
|
|
104
107
|
}
|
|
@@ -112,14 +115,16 @@ function handleError(error, context) {
|
|
|
112
115
|
process.exit(1);
|
|
113
116
|
}
|
|
114
117
|
function validateConfig() {
|
|
118
|
+
const hasGoogleAI = !!process.env.GOOGLE_AI_API_KEY;
|
|
115
119
|
const hasOpenAI = !!process.env.OPENAI_API_KEY;
|
|
116
120
|
const hasAWS = !!(process.env.AWS_REGION || process.env.AWS_ACCESS_KEY_ID);
|
|
117
121
|
const hasGoogle = !!(process.env.GOOGLE_APPLICATION_CREDENTIALS || process.env.GOOGLE_SERVICE_ACCOUNT_KEY || process.env.GOOGLE_AUTH_CLIENT_EMAIL);
|
|
118
122
|
const hasAnthropic = !!process.env.ANTHROPIC_API_KEY;
|
|
119
123
|
const hasAzure = !!(process.env.AZURE_OPENAI_API_KEY && process.env.AZURE_OPENAI_ENDPOINT);
|
|
120
|
-
if (!hasOpenAI && !hasAWS && !hasGoogle && !hasAnthropic && !hasAzure) {
|
|
124
|
+
if (!hasGoogleAI && !hasOpenAI && !hasAWS && !hasGoogle && !hasAnthropic && !hasAzure) {
|
|
121
125
|
console.error(chalk.red('โ ๏ธ No AI provider credentials found'));
|
|
122
126
|
console.error(chalk.yellow('๐ก Set one of:'));
|
|
127
|
+
console.error(chalk.yellow(' โข GOOGLE_AI_API_KEY=AIza-...'));
|
|
123
128
|
console.error(chalk.yellow(' โข OPENAI_API_KEY=sk-...'));
|
|
124
129
|
console.error(chalk.yellow(' โข AWS_REGION=us-east-1 (+ AWS credentials)'));
|
|
125
130
|
console.error(chalk.yellow(' โข GOOGLE_APPLICATION_CREDENTIALS=/path/to/key.json'));
|
|
@@ -147,11 +152,18 @@ const cli = yargs(args)
|
|
|
147
152
|
.epilogue('For more info: https://github.com/juspay/neurolink')
|
|
148
153
|
.showHelpOnFail(true, 'Specify --help for available options')
|
|
149
154
|
.middleware((argv) => {
|
|
150
|
-
//
|
|
155
|
+
// Control SDK logging based on debug flag
|
|
156
|
+
if (argv.debug) {
|
|
157
|
+
process.env.NEUROLINK_DEBUG = 'true';
|
|
158
|
+
}
|
|
159
|
+
else if (typeof argv.debug !== 'undefined') {
|
|
160
|
+
// Only set to false if debug flag was explicitly provided
|
|
161
|
+
process.env.NEUROLINK_DEBUG = 'false';
|
|
162
|
+
}
|
|
163
|
+
// Keep existing quiet middleware
|
|
151
164
|
if (process.env.NEUROLINK_QUIET === 'true' && typeof argv.quiet === 'undefined') {
|
|
152
165
|
argv.quiet = true;
|
|
153
166
|
}
|
|
154
|
-
// NEUROLINK_DEBUG will be handled by option defaults
|
|
155
167
|
})
|
|
156
168
|
.fail((msg, err, yargsInstance) => {
|
|
157
169
|
const exitProcess = () => {
|
|
@@ -259,13 +271,19 @@ const cli = yargs(args)
|
|
|
259
271
|
};
|
|
260
272
|
process.stdout.write(JSON.stringify(jsonOutput, null, 2) + '\n');
|
|
261
273
|
}
|
|
262
|
-
else {
|
|
274
|
+
else if (argv.debug) {
|
|
275
|
+
// Debug mode: Show AI response + full metadata
|
|
263
276
|
if (result.content)
|
|
264
277
|
console.log('\n' + result.content + '\n');
|
|
265
278
|
console.log(JSON.stringify({ provider: result.provider, usage: result.usage, responseTime: result.responseTime }, null, 2));
|
|
266
279
|
if (result.usage)
|
|
267
280
|
console.log(chalk.blue(`โน๏ธ ${result.usage.totalTokens} tokens used`));
|
|
268
281
|
}
|
|
282
|
+
else {
|
|
283
|
+
// Default mode: Clean AI response only
|
|
284
|
+
if (result.content)
|
|
285
|
+
console.log(result.content);
|
|
286
|
+
}
|
|
269
287
|
// Explicitly exit to prevent hanging, especially with Google AI Studio
|
|
270
288
|
process.exit(0);
|
|
271
289
|
}
|
|
@@ -290,9 +308,16 @@ const cli = yargs(args)
|
|
|
290
308
|
.positional('prompt', { type: 'string', description: 'Text prompt for streaming', demandOption: true })
|
|
291
309
|
.option('provider', { choices: ['auto', 'openai', 'bedrock', 'vertex', 'anthropic', 'azure', 'google-ai'], default: 'auto', description: 'AI provider to use' })
|
|
292
310
|
.option('temperature', { type: 'number', default: 0.7, description: 'Creativity level' })
|
|
311
|
+
.option('debug', { type: 'boolean', default: false, description: 'Enable debug mode with interleaved logging' })
|
|
293
312
|
.example('$0 stream "Tell me a story"', 'Stream a story in real-time'), async (argv) => {
|
|
294
|
-
|
|
295
|
-
|
|
313
|
+
// Default mode: Simple streaming message
|
|
314
|
+
// Debug mode: More detailed information
|
|
315
|
+
if (!argv.quiet && !argv.debug) {
|
|
316
|
+
console.log(chalk.blue('๐ Streaming...'));
|
|
317
|
+
}
|
|
318
|
+
else if (!argv.quiet && argv.debug) {
|
|
319
|
+
console.log(chalk.blue(`๐ Streaming from ${argv.provider} provider with debug logging...\n`));
|
|
320
|
+
}
|
|
296
321
|
try {
|
|
297
322
|
const stream = await sdk.generateTextStream({
|
|
298
323
|
prompt: argv.prompt,
|
|
@@ -301,9 +326,11 @@ const cli = yargs(args)
|
|
|
301
326
|
});
|
|
302
327
|
for await (const chunk of stream) {
|
|
303
328
|
process.stdout.write(chunk.content);
|
|
329
|
+
// In debug mode, interleaved logging would appear here
|
|
330
|
+
// (SDK logs are controlled by NEUROLINK_DEBUG set in middleware)
|
|
304
331
|
}
|
|
305
332
|
if (!argv.quiet)
|
|
306
|
-
process.stdout.write('\n'); // Ensure newline after stream
|
|
333
|
+
process.stdout.write('\n'); // Ensure newline after stream
|
|
307
334
|
}
|
|
308
335
|
catch (error) {
|
|
309
336
|
handleError(error, 'Text streaming');
|
|
@@ -320,6 +347,7 @@ const cli = yargs(args)
|
|
|
320
347
|
.option('temperature', { type: 'number', description: 'Global temperature for batch jobs' })
|
|
321
348
|
.option('max-tokens', { type: 'number', description: 'Global max tokens for batch jobs' })
|
|
322
349
|
.option('system', { type: 'string', description: 'Global system prompt for batch jobs' })
|
|
350
|
+
.option('debug', { type: 'boolean', default: false, description: 'Enable debug mode with detailed per-item logging' })
|
|
323
351
|
.example('$0 batch prompts.txt --output results.json', 'Process and save to file'), async (argv) => {
|
|
324
352
|
const spinner = argv.quiet ? null : ora().start();
|
|
325
353
|
try {
|
|
@@ -552,18 +580,34 @@ const cli = yargs(args)
|
|
|
552
580
|
// which will be caught by the main .fail() handler.
|
|
553
581
|
)
|
|
554
582
|
// Get Best Provider Command
|
|
555
|
-
.command('get-best-provider', 'Show the best available AI provider', (yargsInstance) => yargsInstance
|
|
556
|
-
|
|
583
|
+
.command('get-best-provider', 'Show the best available AI provider', (yargsInstance) => yargsInstance
|
|
584
|
+
.usage('Usage: $0 get-best-provider [options]')
|
|
585
|
+
.option('debug', { type: 'boolean', default: false, description: 'Enable debug mode with selection reasoning' })
|
|
586
|
+
.example('$0 get-best-provider', 'Get best provider')
|
|
587
|
+
.example('$0 get-best-provider --debug', 'Show selection logic'), async (argv) => {
|
|
588
|
+
const spinner = argv.quiet ? null : ora('๐ฏ Finding best provider...').start();
|
|
557
589
|
try {
|
|
558
590
|
const provider = await sdk.getBestProvider();
|
|
559
|
-
spinner
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
591
|
+
if (spinner) {
|
|
592
|
+
if (argv.debug) {
|
|
593
|
+
spinner.succeed(chalk.green(`โ
Best provider selected: ${provider}`));
|
|
594
|
+
}
|
|
595
|
+
else {
|
|
596
|
+
spinner.succeed(chalk.green('โ
Provider found'));
|
|
597
|
+
}
|
|
598
|
+
}
|
|
599
|
+
if (argv.debug) {
|
|
600
|
+
// Debug mode: Show selection reasoning and metadata
|
|
601
|
+
console.log(`\nBest available provider: ${provider}`);
|
|
602
|
+
console.log(`Selection based on: availability, performance, and configuration`);
|
|
603
|
+
}
|
|
604
|
+
else {
|
|
605
|
+
// Default mode: Clean provider name only
|
|
606
|
+
console.log(provider);
|
|
607
|
+
}
|
|
564
608
|
}
|
|
565
609
|
catch (error) {
|
|
566
|
-
if (spinner.isSpinning)
|
|
610
|
+
if (spinner && spinner.isSpinning)
|
|
567
611
|
spinner.fail();
|
|
568
612
|
handleError(error, 'Provider selection');
|
|
569
613
|
}
|
package/dist/core/factory.js
CHANGED
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
import { GoogleVertexAI, AmazonBedrock, OpenAI, AnthropicProvider, AzureOpenAIProvider, GoogleAIStudio } from '../providers/index.js';
|
|
2
2
|
import { getBestProvider } from '../utils/providerUtils.js';
|
|
3
|
+
import { logger } from '../utils/logger.js';
|
|
3
4
|
const componentIdentifier = 'aiProviderFactory';
|
|
4
5
|
/**
|
|
5
6
|
* Factory for creating AI provider instances with centralized configuration
|
|
@@ -13,7 +14,7 @@ export class AIProviderFactory {
|
|
|
13
14
|
*/
|
|
14
15
|
static createProvider(providerName, modelName) {
|
|
15
16
|
const functionTag = 'AIProviderFactory.createProvider';
|
|
16
|
-
|
|
17
|
+
logger.debug(`[${functionTag}] Provider creation started`, {
|
|
17
18
|
providerName,
|
|
18
19
|
modelName: modelName || 'default'
|
|
19
20
|
});
|
|
@@ -49,7 +50,7 @@ export class AIProviderFactory {
|
|
|
49
50
|
default:
|
|
50
51
|
throw new Error(`Unknown provider: ${providerName}. Supported providers: vertex, bedrock, openai, anthropic, azure, google-ai`);
|
|
51
52
|
}
|
|
52
|
-
|
|
53
|
+
logger.debug(`[${functionTag}] Provider creation succeeded`, {
|
|
53
54
|
providerName,
|
|
54
55
|
modelName: modelName || 'default',
|
|
55
56
|
providerType: provider.constructor.name
|
|
@@ -58,7 +59,7 @@ export class AIProviderFactory {
|
|
|
58
59
|
}
|
|
59
60
|
catch (error) {
|
|
60
61
|
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
61
|
-
|
|
62
|
+
logger.debug(`[${functionTag}] Provider creation failed`, {
|
|
62
63
|
providerName,
|
|
63
64
|
modelName: modelName || 'default',
|
|
64
65
|
error: errorMessage
|
|
@@ -74,13 +75,13 @@ export class AIProviderFactory {
|
|
|
74
75
|
*/
|
|
75
76
|
static createProviderWithModel(provider, model) {
|
|
76
77
|
const functionTag = 'AIProviderFactory.createProviderWithModel';
|
|
77
|
-
|
|
78
|
+
logger.debug(`[${functionTag}] Provider model creation started`, {
|
|
78
79
|
provider,
|
|
79
80
|
model
|
|
80
81
|
});
|
|
81
82
|
try {
|
|
82
83
|
const providerInstance = this.createProvider(provider, model);
|
|
83
|
-
|
|
84
|
+
logger.debug(`[${functionTag}] Provider model creation succeeded`, {
|
|
84
85
|
provider,
|
|
85
86
|
model,
|
|
86
87
|
providerType: providerInstance.constructor.name
|
|
@@ -89,7 +90,7 @@ export class AIProviderFactory {
|
|
|
89
90
|
}
|
|
90
91
|
catch (error) {
|
|
91
92
|
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
92
|
-
|
|
93
|
+
logger.debug(`[${functionTag}] Provider model creation failed`, {
|
|
93
94
|
provider,
|
|
94
95
|
model,
|
|
95
96
|
error: errorMessage
|
|
@@ -107,7 +108,7 @@ export class AIProviderFactory {
|
|
|
107
108
|
const functionTag = 'AIProviderFactory.createBestProvider';
|
|
108
109
|
try {
|
|
109
110
|
const bestProvider = getBestProvider(requestedProvider);
|
|
110
|
-
|
|
111
|
+
logger.debug(`[${functionTag}] Best provider selected`, {
|
|
111
112
|
requestedProvider: requestedProvider || 'auto',
|
|
112
113
|
selectedProvider: bestProvider,
|
|
113
114
|
modelName: modelName || 'default'
|
|
@@ -116,7 +117,7 @@ export class AIProviderFactory {
|
|
|
116
117
|
}
|
|
117
118
|
catch (error) {
|
|
118
119
|
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
119
|
-
|
|
120
|
+
logger.debug(`[${functionTag}] Best provider selection failed`, {
|
|
120
121
|
requestedProvider: requestedProvider || 'auto',
|
|
121
122
|
error: errorMessage
|
|
122
123
|
});
|
|
@@ -132,7 +133,7 @@ export class AIProviderFactory {
|
|
|
132
133
|
*/
|
|
133
134
|
static createProviderWithFallback(primaryProvider, fallbackProvider, modelName) {
|
|
134
135
|
const functionTag = 'AIProviderFactory.createProviderWithFallback';
|
|
135
|
-
|
|
136
|
+
logger.debug(`[${functionTag}] Fallback provider setup started`, {
|
|
136
137
|
primaryProvider,
|
|
137
138
|
fallbackProvider,
|
|
138
139
|
modelName: modelName || 'default'
|
|
@@ -140,7 +141,7 @@ export class AIProviderFactory {
|
|
|
140
141
|
try {
|
|
141
142
|
const primary = this.createProvider(primaryProvider, modelName);
|
|
142
143
|
const fallback = this.createProvider(fallbackProvider, modelName);
|
|
143
|
-
|
|
144
|
+
logger.debug(`[${functionTag}] Fallback provider setup succeeded`, {
|
|
144
145
|
primaryProvider,
|
|
145
146
|
fallbackProvider,
|
|
146
147
|
modelName: modelName || 'default'
|
|
@@ -149,7 +150,7 @@ export class AIProviderFactory {
|
|
|
149
150
|
}
|
|
150
151
|
catch (error) {
|
|
151
152
|
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
152
|
-
|
|
153
|
+
logger.debug(`[${functionTag}] Fallback provider setup failed`, {
|
|
153
154
|
primaryProvider,
|
|
154
155
|
fallbackProvider,
|
|
155
156
|
error: errorMessage
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
import type { AIProvider, AIProviderName, SupportedModelName } from './types.js';
|
|
2
|
+
declare const componentIdentifier = "aiProviderFactory";
|
|
3
|
+
/**
|
|
4
|
+
* Factory for creating AI provider instances with centralized configuration
|
|
5
|
+
*/
|
|
6
|
+
export declare class AIProviderFactory {
|
|
7
|
+
/**
|
|
8
|
+
* Create a provider instance for the specified provider type
|
|
9
|
+
* @param providerName - Name of the provider ('vertex', 'bedrock', 'openai')
|
|
10
|
+
* @param modelName - Optional model name override
|
|
11
|
+
* @returns AIProvider instance
|
|
12
|
+
*/
|
|
13
|
+
static createProvider(providerName: string, modelName?: string | null): AIProvider;
|
|
14
|
+
/**
|
|
15
|
+
* Create a provider instance with specific provider enum and model
|
|
16
|
+
* @param provider - Provider enum value
|
|
17
|
+
* @param model - Specific model enum value
|
|
18
|
+
* @returns AIProvider instance
|
|
19
|
+
*/
|
|
20
|
+
static createProviderWithModel(provider: AIProviderName, model: SupportedModelName): AIProvider;
|
|
21
|
+
/**
|
|
22
|
+
* Create the best available provider automatically
|
|
23
|
+
* @param requestedProvider - Optional preferred provider
|
|
24
|
+
* @param modelName - Optional model name override
|
|
25
|
+
* @returns AIProvider instance
|
|
26
|
+
*/
|
|
27
|
+
static createBestProvider(requestedProvider?: string, modelName?: string | null): AIProvider;
|
|
28
|
+
/**
|
|
29
|
+
* Create primary and fallback provider instances
|
|
30
|
+
* @param primaryProvider - Primary provider name
|
|
31
|
+
* @param fallbackProvider - Fallback provider name
|
|
32
|
+
* @param modelName - Optional model name override
|
|
33
|
+
* @returns Object with primary and fallback providers
|
|
34
|
+
*/
|
|
35
|
+
static createProviderWithFallback(primaryProvider: string, fallbackProvider: string, modelName?: string | null): {
|
|
36
|
+
primary: AIProvider;
|
|
37
|
+
fallback: AIProvider;
|
|
38
|
+
};
|
|
39
|
+
}
|
|
40
|
+
export { componentIdentifier };
|