@juspay/neurolink 2.1.0 â 3.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +36 -9
- package/README.md +17 -39
- package/dist/cli/index.js +40 -18
- package/dist/lib/mcp/plugins/filesystem-mcp.d.ts +1 -1
- package/dist/lib/neurolink.d.ts +2 -0
- package/dist/lib/neurolink.js +5 -3
- package/dist/lib/providers/agent-enhanced-provider.js +61 -53
- package/dist/lib/providers/amazonBedrock.js +11 -7
- package/dist/lib/providers/anthropic.js +13 -11
- package/dist/lib/providers/azureOpenAI.js +10 -10
- package/dist/lib/providers/googleAIStudio.js +14 -7
- package/dist/lib/providers/googleVertexAI.js +14 -7
- package/dist/lib/providers/huggingFace.js +11 -7
- package/dist/lib/providers/mistralAI.js +11 -7
- package/dist/lib/providers/ollama.js +12 -4
- package/dist/lib/providers/openAI.js +11 -7
- package/dist/lib/providers/timeout-wrapper.d.ts +2 -2
- package/dist/lib/providers/timeout-wrapper.js +3 -3
- package/dist/lib/proxy/proxy-fetch.d.ts +18 -0
- package/dist/lib/proxy/proxy-fetch.js +64 -0
- package/dist/lib/utils/timeout.d.ts +4 -4
- package/dist/lib/utils/timeout.js +42 -34
- package/dist/mcp/plugins/filesystem-mcp.d.ts +1 -1
- package/dist/mcp/plugins/filesystem-mcp.js +1 -1
- package/dist/neurolink.d.ts +2 -0
- package/dist/neurolink.js +5 -3
- package/dist/providers/agent-enhanced-provider.js +61 -53
- package/dist/providers/amazonBedrock.js +11 -7
- package/dist/providers/anthropic.js +13 -11
- package/dist/providers/azureOpenAI.js +10 -10
- package/dist/providers/googleAIStudio.js +14 -7
- package/dist/providers/googleVertexAI.js +14 -7
- package/dist/providers/huggingFace.js +11 -7
- package/dist/providers/mistralAI.js +11 -7
- package/dist/providers/ollama.js +12 -4
- package/dist/providers/openAI.js +11 -7
- package/dist/providers/timeout-wrapper.d.ts +2 -2
- package/dist/providers/timeout-wrapper.js +3 -3
- package/dist/proxy/proxy-fetch.d.ts +18 -0
- package/dist/proxy/proxy-fetch.js +64 -0
- package/dist/utils/timeout.d.ts +4 -4
- package/dist/utils/timeout.js +42 -34
- package/package.json +2 -1
package/CHANGELOG.md
CHANGED
|
@@ -1,35 +1,62 @@
|
|
|
1
|
-
|
|
1
|
+
## [3.0.1](https://github.com/juspay/neurolink/compare/v3.0.0...v3.0.1) (2025-07-01)
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
### Bug Fixes
|
|
5
|
+
|
|
6
|
+
* **cli:** honor --model parameter in CLI commands ([467ea85](https://github.com/juspay/neurolink/commit/467ea8548688a9db6046c98dbfd268ecd297605c))
|
|
7
|
+
|
|
8
|
+
# [3.0.0](https://github.com/juspay/neurolink/compare/v2.1.0...v3.0.0) (2025-07-01)
|
|
2
9
|
|
|
3
10
|
|
|
4
11
|
### Features
|
|
5
12
|
|
|
6
|
-
* **
|
|
13
|
+
* **proxy:** add comprehensive enterprise proxy support across all providers ([9668e67](https://github.com/juspay/neurolink/commit/9668e67dfaa27831ba85d45fdf5b7739de902b28))
|
|
7
14
|
|
|
8
|
-
# [2.0.0](https://github.com/juspay/neurolink/compare/v1.11.3...v2.0.0) (2025-06-28)
|
|
9
15
|
|
|
16
|
+
### BREAKING CHANGES
|
|
17
|
+
|
|
18
|
+
* **proxy:** None - fully backward compatible
|
|
19
|
+
|
|
20
|
+
Files modified:
|
|
21
|
+
- docs/ENTERPRISE-PROXY-SETUP.md (NEW) - Comprehensive enterprise proxy guide
|
|
22
|
+
- docs/PROVIDER-CONFIGURATION.md - Added proxy configuration section
|
|
23
|
+
- docs/CLI-GUIDE.md - Added proxy environment variables documentation
|
|
24
|
+
- docs/ENVIRONMENT-VARIABLES.md - Added proxy configuration examples
|
|
25
|
+
- docs/TROUBLESHOOTING.md - Added proxy troubleshooting procedures
|
|
26
|
+
- .env.example - Added proxy environment variables
|
|
27
|
+
- memory-bank/ - Updated with proxy implementation milestone
|
|
28
|
+
- .clinerules - Added proxy success patterns
|
|
29
|
+
- CHANGELOG.md - Added v2.2.0 proxy support entry
|
|
30
|
+
- package.json - Updated description with enterprise features
|
|
31
|
+
- README.md - Removed outdated content
|
|
32
|
+
|
|
33
|
+
# [2.1.0](https://github.com/juspay/neurolink/compare/v2.0.0...v2.1.0) (2025-06-29)
|
|
10
34
|
|
|
11
35
|
### Features
|
|
12
36
|
|
|
13
|
-
|
|
37
|
+
- **timeout:** add comprehensive timeout support for all AI providers ([8610f4a](https://github.com/juspay/neurolink/commit/8610f4ade418345b0395ab72af6e675f6eec6f93))
|
|
38
|
+
|
|
39
|
+
# [2.0.0](https://github.com/juspay/neurolink/compare/v1.11.3...v2.0.0) (2025-06-28)
|
|
40
|
+
|
|
41
|
+
### Features
|
|
14
42
|
|
|
43
|
+
- **cli:** add command variations and stream agent support ([5fc4c26](https://github.com/juspay/neurolink/commit/5fc4c26b23bd189be52272521bdd2ca40dd55837))
|
|
15
44
|
|
|
16
45
|
### BREAKING CHANGES
|
|
17
46
|
|
|
18
|
-
|
|
47
|
+
- **cli:** 'generate-text' command is deprecated and will be removed in v2.0
|
|
19
48
|
|
|
20
49
|
## [1.11.3](https://github.com/juspay/neurolink/compare/v1.11.2...v1.11.3) (2025-06-22)
|
|
21
50
|
|
|
22
|
-
|
|
23
51
|
### Bug Fixes
|
|
24
52
|
|
|
25
|
-
|
|
53
|
+
- resolve MCP external tools returning raw JSON instead of human-readable responses ([921a12b](https://github.com/juspay/neurolink/commit/921a12b5b31ca96bbfe3f1db05001ddb84470e14))
|
|
26
54
|
|
|
27
55
|
## [1.11.2](https://github.com/juspay/neurolink/compare/v1.11.1...v1.11.2) (2025-06-22)
|
|
28
56
|
|
|
29
|
-
|
|
30
57
|
### Bug Fixes
|
|
31
58
|
|
|
32
|
-
|
|
59
|
+
- **ci:** refactor auto-converted Node.js scripts ([4088888](https://github.com/juspay/neurolink/commit/408888863f8223e64269423412f5c79a35ddfe36))
|
|
33
60
|
|
|
34
61
|
## [1.11.1](https://github.com/juspay/neurolink/compare/v1.11.0...v1.11.1) (2025-06-21)
|
|
35
62
|
|
package/README.md
CHANGED
|
@@ -11,28 +11,6 @@
|
|
|
11
11
|
|
|
12
12
|
**NeuroLink** unifies OpenAI, Bedrock, Vertex AI, Google AI Studio, Anthropic, Azure OpenAI, Hugging Face, Ollama, and Mistral AI with intelligent fallback and streaming support. Available as both a **programmatic SDK** and **professional CLI tool**. Extracted from production use at Juspay.
|
|
13
13
|
|
|
14
|
-
## đĨ **Latest Breakthrough: Full MCP Tool Integration Operational** (June 21, 2025)
|
|
15
|
-
|
|
16
|
-
**MAJOR SUCCESS**: All blocking TypeScript compilation errors resolved + Complete CLI MCP integration achieved!
|
|
17
|
-
|
|
18
|
-
â
**Function Calling Ready**: AI can now execute real filesystem operations, data analysis, and system commands
|
|
19
|
-
â
**Production Validated**: 23,230+ token MCP context loading confirmed via comprehensive CLI testing
|
|
20
|
-
â
**Zero Build Errors**: Clean TypeScript compilation after resolving all 13 blocking errors
|
|
21
|
-
â
**CLI Tool Integration**: Both `generate`/`gen` and `agent-generate` commands use full MCP capabilities
|
|
22
|
-
â
**Backward Compatible**: Tools enabled by default with opt-out flag for traditional usage
|
|
23
|
-
|
|
24
|
-
```bash
|
|
25
|
-
# NEW: AI can now access your filesystem and execute tools (use preferred commands)
|
|
26
|
-
npx @juspay/neurolink generate "List files in this directory" --provider google-ai
|
|
27
|
-
|
|
28
|
-
# Alternative shorter command
|
|
29
|
-
npx @juspay/neurolink gen "List files in this directory" --provider google-ai
|
|
30
|
-
|
|
31
|
-
# â ī¸ DEPRECATED: generate-text will be removed in v2.0 (use 'generate' or 'gen' instead)
|
|
32
|
-
# This command shows a deprecation warning and is kept for backward compatibility only
|
|
33
|
-
npx @juspay/neurolink generate-text "List files in this directory" --provider google-ai
|
|
34
|
-
```
|
|
35
|
-
|
|
36
14
|
## đ Quick Start
|
|
37
15
|
|
|
38
16
|
### Install & Run (2 minutes)
|
|
@@ -61,7 +39,7 @@ import { createBestAIProvider } from "@juspay/neurolink";
|
|
|
61
39
|
const provider = createBestAIProvider();
|
|
62
40
|
const result = await provider.generateText({
|
|
63
41
|
prompt: "Write a haiku about programming",
|
|
64
|
-
timeout:
|
|
42
|
+
timeout: "30s", // Optional: Set custom timeout (default: 30s)
|
|
65
43
|
});
|
|
66
44
|
|
|
67
45
|
console.log(result.text);
|
|
@@ -101,14 +79,14 @@ npx @juspay/neurolink status
|
|
|
101
79
|
|
|
102
80
|
## đ ī¸ MCP Integration Status (v1.11.1) â
**PRODUCTION READY**
|
|
103
81
|
|
|
104
|
-
| Component | Status | Description
|
|
105
|
-
| ------------------- | ------------------ |
|
|
106
|
-
| Built-in Tools | â
**Working** | Time tool, utilities - fully functional
|
|
107
|
-
| External Discovery | â
**Working** | 58+ MCP servers auto-discovered from all AI tools
|
|
108
|
-
| Tool Execution | â
**Working** | Real-time AI tool calling with built-in tools
|
|
82
|
+
| Component | Status | Description |
|
|
83
|
+
| ------------------- | ------------------ | ---------------------------------------------------------- |
|
|
84
|
+
| Built-in Tools | â
**Working** | Time tool, utilities - fully functional |
|
|
85
|
+
| External Discovery | â
**Working** | 58+ MCP servers auto-discovered from all AI tools |
|
|
86
|
+
| Tool Execution | â
**Working** | Real-time AI tool calling with built-in tools |
|
|
109
87
|
| **External Tools** | â
**SOLVED** | **Two-step tool calling fixed - human-readable responses** |
|
|
110
|
-
| **CLI Integration** | â
**READY** | **Production-ready AI assistant with external tools**
|
|
111
|
-
| External Activation | đ§ **Development** | Discovery complete, activation protocol in progress
|
|
88
|
+
| **CLI Integration** | â
**READY** | **Production-ready AI assistant with external tools** |
|
|
89
|
+
| External Activation | đ§ **Development** | Discovery complete, activation protocol in progress |
|
|
112
90
|
|
|
113
91
|
### â
Quick MCP Test (v1.7.1)
|
|
114
92
|
|
|
@@ -213,16 +191,16 @@ npx @juspay/neurolink batch prompts.txt --timeout 45s --output results.json
|
|
|
213
191
|
export const POST: RequestHandler = async ({ request }) => {
|
|
214
192
|
const { message } = await request.json();
|
|
215
193
|
const provider = createBestAIProvider();
|
|
216
|
-
|
|
194
|
+
|
|
217
195
|
try {
|
|
218
|
-
const result = await provider.streamText({
|
|
196
|
+
const result = await provider.streamText({
|
|
219
197
|
prompt: message,
|
|
220
|
-
timeout:
|
|
198
|
+
timeout: "2m", // 2 minutes for streaming
|
|
221
199
|
});
|
|
222
200
|
return new Response(result.toReadableStream());
|
|
223
201
|
} catch (error) {
|
|
224
|
-
if (error.name ===
|
|
225
|
-
return new Response(
|
|
202
|
+
if (error.name === "TimeoutError") {
|
|
203
|
+
return new Response("Request timed out", { status: 408 });
|
|
226
204
|
}
|
|
227
205
|
throw error;
|
|
228
206
|
}
|
|
@@ -232,12 +210,12 @@ export const POST: RequestHandler = async ({ request }) => {
|
|
|
232
210
|
export async function POST(request: NextRequest) {
|
|
233
211
|
const { prompt } = await request.json();
|
|
234
212
|
const provider = createBestAIProvider();
|
|
235
|
-
|
|
236
|
-
const result = await provider.generateText({
|
|
213
|
+
|
|
214
|
+
const result = await provider.generateText({
|
|
237
215
|
prompt,
|
|
238
|
-
timeout: process.env.AI_TIMEOUT ||
|
|
216
|
+
timeout: process.env.AI_TIMEOUT || "30s", // Configurable timeout
|
|
239
217
|
});
|
|
240
|
-
|
|
218
|
+
|
|
241
219
|
return NextResponse.json({ text: result.text });
|
|
242
220
|
}
|
|
243
221
|
```
|
package/dist/cli/index.js
CHANGED
|
@@ -256,6 +256,10 @@ const cli = yargs(args)
|
|
|
256
256
|
type: "string",
|
|
257
257
|
default: "30s",
|
|
258
258
|
description: "Timeout for the request (e.g., 30s, 2m, 1h, 5000)",
|
|
259
|
+
})
|
|
260
|
+
.option("model", {
|
|
261
|
+
type: "string",
|
|
262
|
+
description: "Specific model to use (e.g. gemini-2.5-pro, gemini-2.5-flash)",
|
|
259
263
|
})
|
|
260
264
|
.option("disable-tools", {
|
|
261
265
|
type: "boolean",
|
|
@@ -268,7 +272,7 @@ const cli = yargs(args)
|
|
|
268
272
|
.example('$0 generate-text "Hello world" --disable-tools', "Use without tool integration"), async (argv) => {
|
|
269
273
|
// Check if generate-text was used specifically (for deprecation warning)
|
|
270
274
|
const usedCommand = argv._[0];
|
|
271
|
-
if (usedCommand ===
|
|
275
|
+
if (usedCommand === "generate-text" && !argv.quiet) {
|
|
272
276
|
console.warn(chalk.yellow('â ī¸ Warning: "generate-text" is deprecated. Use "generate" or "gen" instead for multimodal support.'));
|
|
273
277
|
}
|
|
274
278
|
let originalConsole = {};
|
|
@@ -296,6 +300,7 @@ const cli = yargs(args)
|
|
|
296
300
|
provider: argv.provider === "auto"
|
|
297
301
|
? undefined
|
|
298
302
|
: argv.provider,
|
|
303
|
+
model: argv.model,
|
|
299
304
|
temperature: argv.temperature,
|
|
300
305
|
maxTokens: argv.maxTokens,
|
|
301
306
|
systemPrompt: argv.system,
|
|
@@ -318,7 +323,7 @@ const cli = yargs(args)
|
|
|
318
323
|
})();
|
|
319
324
|
const agentProvider = new AgentEnhancedProvider({
|
|
320
325
|
provider: supportedProvider,
|
|
321
|
-
model:
|
|
326
|
+
model: argv.model, // Use specified model or default
|
|
322
327
|
toolCategory: "all", // Enable all tool categories
|
|
323
328
|
});
|
|
324
329
|
generatePromise = agentProvider.generateText(argv.prompt);
|
|
@@ -331,20 +336,26 @@ const cli = yargs(args)
|
|
|
331
336
|
spinner.succeed(chalk.green("â
Text generated successfully!"));
|
|
332
337
|
}
|
|
333
338
|
// Handle both AgentEnhancedProvider (AI SDK) and standard NeuroLink SDK responses
|
|
334
|
-
const responseText = result
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
339
|
+
const responseText = result
|
|
340
|
+
? result.text || result.content || ""
|
|
341
|
+
: "";
|
|
342
|
+
const responseUsage = result
|
|
343
|
+
? result.usage || {
|
|
344
|
+
promptTokens: 0,
|
|
345
|
+
completionTokens: 0,
|
|
346
|
+
totalTokens: 0,
|
|
347
|
+
}
|
|
348
|
+
: { promptTokens: 0, completionTokens: 0, totalTokens: 0 };
|
|
340
349
|
if (argv.format === "json") {
|
|
341
350
|
const jsonOutput = {
|
|
342
351
|
content: responseText,
|
|
343
|
-
provider: result
|
|
352
|
+
provider: result
|
|
353
|
+
? result.provider || argv.provider
|
|
354
|
+
: argv.provider,
|
|
344
355
|
usage: responseUsage,
|
|
345
|
-
responseTime: result ?
|
|
346
|
-
toolCalls: result ?
|
|
347
|
-
toolResults: result ?
|
|
356
|
+
responseTime: result ? result.responseTime || 0 : 0,
|
|
357
|
+
toolCalls: result ? result.toolCalls || [] : [],
|
|
358
|
+
toolResults: result ? result.toolResults || [] : [],
|
|
348
359
|
};
|
|
349
360
|
process.stdout.write(JSON.stringify(jsonOutput, null, 2) + "\n");
|
|
350
361
|
}
|
|
@@ -354,7 +365,9 @@ const cli = yargs(args)
|
|
|
354
365
|
console.log("\n" + responseText + "\n");
|
|
355
366
|
}
|
|
356
367
|
// Show tool calls if any
|
|
357
|
-
if (result &&
|
|
368
|
+
if (result &&
|
|
369
|
+
result.toolCalls &&
|
|
370
|
+
result.toolCalls.length > 0) {
|
|
358
371
|
console.log(chalk.blue("đ§ Tools Called:"));
|
|
359
372
|
for (const toolCall of result.toolCalls) {
|
|
360
373
|
console.log(`- ${toolCall.toolName}`);
|
|
@@ -363,7 +376,9 @@ const cli = yargs(args)
|
|
|
363
376
|
console.log();
|
|
364
377
|
}
|
|
365
378
|
// Show tool results if any
|
|
366
|
-
if (result &&
|
|
379
|
+
if (result &&
|
|
380
|
+
result.toolResults &&
|
|
381
|
+
result.toolResults.length > 0) {
|
|
367
382
|
console.log(chalk.blue("đ Tool Results:"));
|
|
368
383
|
for (const toolResult of result.toolResults) {
|
|
369
384
|
console.log(`- ${toolResult.toolCallId}`);
|
|
@@ -372,9 +387,11 @@ const cli = yargs(args)
|
|
|
372
387
|
console.log();
|
|
373
388
|
}
|
|
374
389
|
console.log(JSON.stringify({
|
|
375
|
-
provider: result
|
|
390
|
+
provider: result
|
|
391
|
+
? result.provider || argv.provider
|
|
392
|
+
: argv.provider,
|
|
376
393
|
usage: responseUsage,
|
|
377
|
-
responseTime: result ?
|
|
394
|
+
responseTime: result ? result.responseTime || 0 : 0,
|
|
378
395
|
}, null, 2));
|
|
379
396
|
if (responseUsage.totalTokens) {
|
|
380
397
|
console.log(chalk.blue(`âšī¸ ${responseUsage.totalTokens} tokens used`));
|
|
@@ -438,6 +455,10 @@ const cli = yargs(args)
|
|
|
438
455
|
type: "string",
|
|
439
456
|
default: "2m",
|
|
440
457
|
description: "Timeout for streaming (e.g., 30s, 2m, 1h)",
|
|
458
|
+
})
|
|
459
|
+
.option("model", {
|
|
460
|
+
type: "string",
|
|
461
|
+
description: "Specific model to use (e.g., gemini-1.5-pro-latest, gemini-2.0-flash-exp)",
|
|
441
462
|
})
|
|
442
463
|
.option("debug", {
|
|
443
464
|
type: "boolean",
|
|
@@ -469,6 +490,7 @@ const cli = yargs(args)
|
|
|
469
490
|
provider: argv.provider === "auto"
|
|
470
491
|
? undefined
|
|
471
492
|
: argv.provider,
|
|
493
|
+
model: argv.model,
|
|
472
494
|
temperature: argv.temperature,
|
|
473
495
|
timeout: argv.timeout,
|
|
474
496
|
});
|
|
@@ -489,7 +511,7 @@ const cli = yargs(args)
|
|
|
489
511
|
})();
|
|
490
512
|
const agentProvider = new AgentEnhancedProvider({
|
|
491
513
|
provider: supportedProvider,
|
|
492
|
-
model:
|
|
514
|
+
model: argv.model, // Use specified model or default
|
|
493
515
|
toolCategory: "all", // Enable all tool categories
|
|
494
516
|
});
|
|
495
517
|
// Note: AgentEnhancedProvider doesn't support streaming with tools yet
|
|
@@ -501,7 +523,7 @@ const cli = yargs(args)
|
|
|
501
523
|
const DELAY_MS = 50;
|
|
502
524
|
for (let i = 0; i < text.length; i += CHUNK_SIZE) {
|
|
503
525
|
process.stdout.write(text.slice(i, i + CHUNK_SIZE));
|
|
504
|
-
await new Promise(resolve => setTimeout(resolve, DELAY_MS)); // Small delay
|
|
526
|
+
await new Promise((resolve) => setTimeout(resolve, DELAY_MS)); // Small delay
|
|
505
527
|
}
|
|
506
528
|
if (!argv.quiet) {
|
|
507
529
|
process.stdout.write("\n");
|
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
* Implements the new MCP contract with security sandbox
|
|
4
4
|
* Based on research document recommendations
|
|
5
5
|
*/
|
|
6
|
-
import { MCP, MCPMetadata, ExecutionContext } from "../contracts/mcp-contract.js";
|
|
6
|
+
import { MCP, type MCPMetadata, type ExecutionContext } from "../contracts/mcp-contract.js";
|
|
7
7
|
/**
|
|
8
8
|
* FileSystem MCP Configuration
|
|
9
9
|
*/
|
package/dist/lib/neurolink.d.ts
CHANGED
|
@@ -8,6 +8,7 @@ import type { AIProviderName } from "./core/types.js";
|
|
|
8
8
|
export interface TextGenerationOptions {
|
|
9
9
|
prompt: string;
|
|
10
10
|
provider?: "openai" | "bedrock" | "vertex" | "anthropic" | "azure" | "google-ai" | "huggingface" | "ollama" | "mistral" | "auto";
|
|
11
|
+
model?: string;
|
|
11
12
|
temperature?: number;
|
|
12
13
|
maxTokens?: number;
|
|
13
14
|
systemPrompt?: string;
|
|
@@ -18,6 +19,7 @@ export interface TextGenerationOptions {
|
|
|
18
19
|
export interface StreamTextOptions {
|
|
19
20
|
prompt: string;
|
|
20
21
|
provider?: "openai" | "bedrock" | "vertex" | "anthropic" | "azure" | "google-ai" | "huggingface" | "ollama" | "mistral" | "auto";
|
|
22
|
+
model?: string;
|
|
21
23
|
temperature?: number;
|
|
22
24
|
maxTokens?: number;
|
|
23
25
|
systemPrompt?: string;
|
package/dist/lib/neurolink.js
CHANGED
|
@@ -127,7 +127,7 @@ export class NeuroLink {
|
|
|
127
127
|
// Create tool-aware system prompt
|
|
128
128
|
const enhancedSystemPrompt = this.createToolAwareSystemPrompt(options.systemPrompt, availableTools);
|
|
129
129
|
// Create provider with MCP enabled using best provider function
|
|
130
|
-
const provider = await AIProviderFactory.createBestProvider(providerName,
|
|
130
|
+
const provider = await AIProviderFactory.createBestProvider(providerName, options.model, true);
|
|
131
131
|
// Generate text with automatic tool detection
|
|
132
132
|
const result = await provider.generateText({
|
|
133
133
|
prompt: options.prompt,
|
|
@@ -217,9 +217,10 @@ export class NeuroLink {
|
|
|
217
217
|
logger.debug(`[${functionTag}] Attempting provider`, {
|
|
218
218
|
provider: providerName,
|
|
219
219
|
});
|
|
220
|
-
const provider = await AIProviderFactory.createProvider(providerName);
|
|
220
|
+
const provider = await AIProviderFactory.createProvider(providerName, options.model);
|
|
221
221
|
const result = await provider.generateText({
|
|
222
222
|
prompt: options.prompt,
|
|
223
|
+
model: options.model,
|
|
223
224
|
temperature: options.temperature,
|
|
224
225
|
maxTokens: options.maxTokens,
|
|
225
226
|
systemPrompt: options.systemPrompt,
|
|
@@ -343,9 +344,10 @@ Note: Tool integration is currently in development. Please provide helpful respo
|
|
|
343
344
|
logger.debug(`[${functionTag}] Attempting provider`, {
|
|
344
345
|
provider: providerName,
|
|
345
346
|
});
|
|
346
|
-
const provider = await AIProviderFactory.createProvider(providerName);
|
|
347
|
+
const provider = await AIProviderFactory.createProvider(providerName, options.model);
|
|
347
348
|
const result = await provider.streamText({
|
|
348
349
|
prompt: options.prompt,
|
|
350
|
+
model: options.model,
|
|
349
351
|
temperature: options.temperature,
|
|
350
352
|
maxTokens: options.maxTokens,
|
|
351
353
|
systemPrompt: options.systemPrompt,
|