@juspay/neurolink 9.51.4 → 9.52.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +6 -0
- package/dist/browser/neurolink.min.js +227 -225
- package/dist/cli/loop/optionsSchema.d.ts +1 -1
- package/dist/core/factory.d.ts +2 -2
- package/dist/core/factory.js +4 -4
- package/dist/factories/providerFactory.d.ts +4 -4
- package/dist/factories/providerFactory.js +20 -7
- package/dist/factories/providerRegistry.d.ts +5 -0
- package/dist/factories/providerRegistry.js +45 -26
- package/dist/lib/core/factory.d.ts +2 -2
- package/dist/lib/core/factory.js +4 -4
- package/dist/lib/factories/providerFactory.d.ts +4 -4
- package/dist/lib/factories/providerFactory.js +20 -7
- package/dist/lib/factories/providerRegistry.d.ts +5 -0
- package/dist/lib/factories/providerRegistry.js +45 -26
- package/dist/lib/neurolink.d.ts +21 -0
- package/dist/lib/neurolink.js +69 -6
- package/dist/lib/providers/amazonBedrock.d.ts +6 -1
- package/dist/lib/providers/amazonBedrock.js +14 -2
- package/dist/lib/providers/amazonSagemaker.d.ts +7 -1
- package/dist/lib/providers/amazonSagemaker.js +21 -3
- package/dist/lib/providers/anthropic.d.ts +4 -1
- package/dist/lib/providers/anthropic.js +18 -5
- package/dist/lib/providers/azureOpenai.d.ts +2 -1
- package/dist/lib/providers/azureOpenai.js +10 -5
- package/dist/lib/providers/googleAiStudio.d.ts +4 -1
- package/dist/lib/providers/googleAiStudio.js +6 -7
- package/dist/lib/providers/googleVertex.d.ts +3 -1
- package/dist/lib/providers/googleVertex.js +96 -17
- package/dist/lib/providers/huggingFace.d.ts +2 -1
- package/dist/lib/providers/huggingFace.js +4 -4
- package/dist/lib/providers/litellm.d.ts +5 -1
- package/dist/lib/providers/litellm.js +14 -9
- package/dist/lib/providers/mistral.d.ts +2 -1
- package/dist/lib/providers/mistral.js +2 -2
- package/dist/lib/providers/ollama.d.ts +3 -1
- package/dist/lib/providers/ollama.js +2 -2
- package/dist/lib/providers/openAI.d.ts +5 -1
- package/dist/lib/providers/openAI.js +15 -5
- package/dist/lib/providers/openRouter.d.ts +5 -1
- package/dist/lib/providers/openRouter.js +17 -5
- package/dist/lib/providers/openaiCompatible.d.ts +4 -1
- package/dist/lib/providers/openaiCompatible.js +15 -3
- package/dist/lib/types/configTypes.d.ts +7 -0
- package/dist/lib/types/generateTypes.d.ts +13 -0
- package/dist/lib/types/providers.d.ts +75 -0
- package/dist/lib/types/streamTypes.d.ts +7 -1
- package/dist/neurolink.d.ts +21 -0
- package/dist/neurolink.js +69 -6
- package/dist/providers/amazonBedrock.d.ts +6 -1
- package/dist/providers/amazonBedrock.js +14 -2
- package/dist/providers/amazonSagemaker.d.ts +7 -1
- package/dist/providers/amazonSagemaker.js +21 -3
- package/dist/providers/anthropic.d.ts +4 -1
- package/dist/providers/anthropic.js +18 -5
- package/dist/providers/azureOpenai.d.ts +2 -1
- package/dist/providers/azureOpenai.js +10 -5
- package/dist/providers/googleAiStudio.d.ts +4 -1
- package/dist/providers/googleAiStudio.js +6 -7
- package/dist/providers/googleVertex.d.ts +3 -1
- package/dist/providers/googleVertex.js +96 -17
- package/dist/providers/huggingFace.d.ts +2 -1
- package/dist/providers/huggingFace.js +4 -4
- package/dist/providers/litellm.d.ts +5 -1
- package/dist/providers/litellm.js +14 -9
- package/dist/providers/mistral.d.ts +2 -1
- package/dist/providers/mistral.js +2 -2
- package/dist/providers/ollama.d.ts +3 -1
- package/dist/providers/ollama.js +2 -2
- package/dist/providers/openAI.d.ts +5 -1
- package/dist/providers/openAI.js +15 -5
- package/dist/providers/openRouter.d.ts +5 -1
- package/dist/providers/openRouter.js +17 -5
- package/dist/providers/openaiCompatible.d.ts +4 -1
- package/dist/providers/openaiCompatible.js +15 -3
- package/dist/types/configTypes.d.ts +7 -0
- package/dist/types/generateTypes.d.ts +13 -0
- package/dist/types/providers.d.ts +75 -0
- package/dist/types/streamTypes.d.ts +7 -1
- package/package.json +3 -2
|
@@ -14,21 +14,21 @@ const getHuggingFaceApiKey = () => {
|
|
|
14
14
|
const getDefaultHuggingFaceModel = () => {
|
|
15
15
|
return getProviderModel("HUGGINGFACE_MODEL", "microsoft/DialoGPT-medium");
|
|
16
16
|
};
|
|
17
|
-
// Note:
|
|
17
|
+
// Note: hasNeurolinkCredentials["huggingFace"] now directly imported from consolidated utility
|
|
18
18
|
/**
|
|
19
19
|
* HuggingFace Provider - BaseProvider Implementation
|
|
20
20
|
* Using AI SDK with HuggingFace's OpenAI-compatible endpoint
|
|
21
21
|
*/
|
|
22
22
|
export class HuggingFaceProvider extends BaseProvider {
|
|
23
23
|
model;
|
|
24
|
-
constructor(modelName) {
|
|
24
|
+
constructor(modelName, _sdk, credentials) {
|
|
25
25
|
super(modelName, "huggingface");
|
|
26
26
|
// Get API key and validate
|
|
27
|
-
const apiKey = getHuggingFaceApiKey();
|
|
27
|
+
const apiKey = credentials?.apiKey ?? getHuggingFaceApiKey();
|
|
28
28
|
// Create HuggingFace provider using unified router endpoint (2025) with proxy support
|
|
29
29
|
const huggingface = createOpenAI({
|
|
30
30
|
apiKey: apiKey,
|
|
31
|
-
baseURL: "https://router.huggingface.co/v1",
|
|
31
|
+
baseURL: credentials?.baseURL ?? "https://router.huggingface.co/v1",
|
|
32
32
|
fetch: createProxyFetch(),
|
|
33
33
|
});
|
|
34
34
|
// Initialize model
|
|
@@ -9,10 +9,14 @@ import type { StreamOptions, StreamResult } from "../types/streamTypes.js";
|
|
|
9
9
|
*/
|
|
10
10
|
export declare class LiteLLMProvider extends BaseProvider {
|
|
11
11
|
private model;
|
|
12
|
+
private credentials?;
|
|
12
13
|
private static modelsCache;
|
|
13
14
|
private static modelsCacheTime;
|
|
14
15
|
private static readonly MODELS_CACHE_DURATION;
|
|
15
|
-
constructor(modelName?: string, sdk?: unknown
|
|
16
|
+
constructor(modelName?: string, sdk?: unknown, _region?: string, credentials?: {
|
|
17
|
+
apiKey?: string;
|
|
18
|
+
baseURL?: string;
|
|
19
|
+
});
|
|
16
20
|
protected getProviderName(): AIProviderName;
|
|
17
21
|
protected getDefaultModel(): string;
|
|
18
22
|
/**
|
|
@@ -42,12 +42,15 @@ const getDefaultLiteLLMModel = () => {
|
|
|
42
42
|
*/
|
|
43
43
|
export class LiteLLMProvider extends BaseProvider {
|
|
44
44
|
model;
|
|
45
|
+
credentials;
|
|
45
46
|
// Cache for available models to avoid repeated API calls
|
|
46
47
|
static modelsCache = [];
|
|
47
48
|
static modelsCacheTime = 0;
|
|
48
49
|
static MODELS_CACHE_DURATION = 10 * 60 * 1000; // 10 minutes
|
|
49
|
-
constructor(modelName, sdk) {
|
|
50
|
+
constructor(modelName, sdk, _region, credentials) {
|
|
50
51
|
super(modelName, "litellm", sdk);
|
|
52
|
+
// Store per-request credentials for use in embed/embedMany/fetchModelsFromAPI
|
|
53
|
+
this.credentials = credentials;
|
|
51
54
|
// Initialize LiteLLM using OpenAI SDK with explicit configuration
|
|
52
55
|
const config = getLiteLLMConfig();
|
|
53
56
|
// Create OpenAI SDK instance configured for LiteLLM proxy
|
|
@@ -56,8 +59,8 @@ export class LiteLLMProvider extends BaseProvider {
|
|
|
56
59
|
// with a custom baseURL and apiKey. This ensures all requests are routed through the LiteLLM
|
|
57
60
|
// proxy, allowing access to multiple models and custom authentication.
|
|
58
61
|
const customOpenAI = createOpenAI({
|
|
59
|
-
baseURL: config.baseURL,
|
|
60
|
-
apiKey: config.apiKey,
|
|
62
|
+
baseURL: credentials?.baseURL ?? config.baseURL,
|
|
63
|
+
apiKey: credentials?.apiKey ?? config.apiKey,
|
|
61
64
|
fetch: createProxyFetch(),
|
|
62
65
|
});
|
|
63
66
|
this.model = customOpenAI.chat(this.modelName || getDefaultLiteLLMModel());
|
|
@@ -374,8 +377,8 @@ export class LiteLLMProvider extends BaseProvider {
|
|
|
374
377
|
process.env.LITELLM_EMBEDDING_MODEL ||
|
|
375
378
|
"gemini-embedding-001";
|
|
376
379
|
const customOpenAI = createOpenAI({
|
|
377
|
-
baseURL: config.baseURL,
|
|
378
|
-
apiKey: config.apiKey,
|
|
380
|
+
baseURL: this.credentials?.baseURL ?? config.baseURL,
|
|
381
|
+
apiKey: this.credentials?.apiKey ?? config.apiKey,
|
|
379
382
|
fetch: createProxyFetch(),
|
|
380
383
|
});
|
|
381
384
|
const embeddingModel = customOpenAI.textEmbeddingModel(embeddingModelName);
|
|
@@ -394,8 +397,8 @@ export class LiteLLMProvider extends BaseProvider {
|
|
|
394
397
|
process.env.LITELLM_EMBEDDING_MODEL ||
|
|
395
398
|
"gemini-embedding-001";
|
|
396
399
|
const customOpenAI = createOpenAI({
|
|
397
|
-
baseURL: config.baseURL,
|
|
398
|
-
apiKey: config.apiKey,
|
|
400
|
+
baseURL: this.credentials?.baseURL ?? config.baseURL,
|
|
401
|
+
apiKey: this.credentials?.apiKey ?? config.apiKey,
|
|
399
402
|
fetch: createProxyFetch(),
|
|
400
403
|
});
|
|
401
404
|
const embeddingModel = customOpenAI.textEmbeddingModel(embeddingModelName);
|
|
@@ -458,7 +461,9 @@ export class LiteLLMProvider extends BaseProvider {
|
|
|
458
461
|
async fetchModelsFromAPI() {
|
|
459
462
|
const functionTag = "LiteLLMProvider.fetchModelsFromAPI";
|
|
460
463
|
const config = getLiteLLMConfig();
|
|
461
|
-
const
|
|
464
|
+
const resolvedBaseURL = this.credentials?.baseURL ?? config.baseURL;
|
|
465
|
+
const resolvedApiKey = this.credentials?.apiKey ?? config.apiKey;
|
|
466
|
+
const modelsUrl = `${resolvedBaseURL}/v1/models`;
|
|
462
467
|
const controller = new AbortController();
|
|
463
468
|
const timeoutId = setTimeout(() => controller.abort(), 5000); // 5 second timeout
|
|
464
469
|
try {
|
|
@@ -467,7 +472,7 @@ export class LiteLLMProvider extends BaseProvider {
|
|
|
467
472
|
const response = await proxyFetch(modelsUrl, {
|
|
468
473
|
method: "GET",
|
|
469
474
|
headers: {
|
|
470
|
-
Authorization: `Bearer ${
|
|
475
|
+
Authorization: `Bearer ${resolvedApiKey}`,
|
|
471
476
|
"Content-Type": "application/json",
|
|
472
477
|
},
|
|
473
478
|
signal: controller.signal,
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import { type LanguageModel } from "ai";
|
|
2
2
|
import type { AIProviderName } from "../constants/enums.js";
|
|
3
3
|
import { BaseProvider } from "../core/baseProvider.js";
|
|
4
|
+
import type { NeurolinkCredentials } from "../types/providers.js";
|
|
4
5
|
import type { StreamOptions, StreamResult } from "../types/streamTypes.js";
|
|
5
6
|
import type { ValidationSchema } from "../types/typeAliases.js";
|
|
6
7
|
/**
|
|
@@ -9,7 +10,7 @@ import type { ValidationSchema } from "../types/typeAliases.js";
|
|
|
9
10
|
*/
|
|
10
11
|
export declare class MistralProvider extends BaseProvider {
|
|
11
12
|
private model;
|
|
12
|
-
constructor(modelName?: string, sdk?: unknown);
|
|
13
|
+
constructor(modelName?: string, sdk?: unknown, _region?: string, credentials?: NeurolinkCredentials["mistral"]);
|
|
13
14
|
protected executeStream(options: StreamOptions, _analysisSchema?: ValidationSchema): Promise<StreamResult>;
|
|
14
15
|
getProviderName(): AIProviderName;
|
|
15
16
|
getDefaultModel(): string;
|
|
@@ -23,14 +23,14 @@ const getDefaultMistralModel = () => {
|
|
|
23
23
|
*/
|
|
24
24
|
export class MistralProvider extends BaseProvider {
|
|
25
25
|
model;
|
|
26
|
-
constructor(modelName, sdk) {
|
|
26
|
+
constructor(modelName, sdk, _region, credentials) {
|
|
27
27
|
// Type guard for NeuroLink parameter validation
|
|
28
28
|
const validatedNeurolink = sdk && typeof sdk === "object" && "getInMemoryServers" in sdk
|
|
29
29
|
? sdk
|
|
30
30
|
: undefined;
|
|
31
31
|
super(modelName, "mistral", validatedNeurolink);
|
|
32
32
|
// Initialize Mistral model with API key validation and proxy support
|
|
33
|
-
const apiKey = getMistralApiKey();
|
|
33
|
+
const apiKey = credentials?.apiKey ?? getMistralApiKey();
|
|
34
34
|
const mistral = createMistral({
|
|
35
35
|
apiKey: apiKey,
|
|
36
36
|
fetch: createProxyFetch(),
|
|
@@ -18,7 +18,9 @@ export declare class OllamaProvider extends BaseProvider {
|
|
|
18
18
|
private ollamaModel;
|
|
19
19
|
private baseUrl;
|
|
20
20
|
private timeout;
|
|
21
|
-
constructor(modelName?: string
|
|
21
|
+
constructor(modelName?: string, credentials?: {
|
|
22
|
+
baseURL?: string;
|
|
23
|
+
});
|
|
22
24
|
protected getProviderName(): AIProviderName;
|
|
23
25
|
protected getDefaultModel(): string;
|
|
24
26
|
/**
|
|
@@ -523,9 +523,9 @@ export class OllamaProvider extends BaseProvider {
|
|
|
523
523
|
ollamaModel;
|
|
524
524
|
baseUrl;
|
|
525
525
|
timeout;
|
|
526
|
-
constructor(modelName) {
|
|
526
|
+
constructor(modelName, credentials) {
|
|
527
527
|
super(modelName, "ollama");
|
|
528
|
-
this.baseUrl = getOllamaBaseUrl();
|
|
528
|
+
this.baseUrl = credentials?.baseURL ?? getOllamaBaseUrl();
|
|
529
529
|
this.timeout = getOllamaTimeout();
|
|
530
530
|
// Initialize Ollama model
|
|
531
531
|
this.ollamaModel = new OllamaLanguageModel(this.modelName || getDefaultOllamaModel(), this.baseUrl, this.timeout);
|
|
@@ -10,7 +10,11 @@ import type { ValidationSchema } from "../types/typeAliases.js";
|
|
|
10
10
|
*/
|
|
11
11
|
export declare class OpenAIProvider extends BaseProvider {
|
|
12
12
|
private model;
|
|
13
|
-
|
|
13
|
+
private credentials?;
|
|
14
|
+
constructor(modelName?: string, neurolink?: NeuroLink, _region?: string, credentials?: {
|
|
15
|
+
apiKey?: string;
|
|
16
|
+
baseURL?: string;
|
|
17
|
+
});
|
|
14
18
|
/**
|
|
15
19
|
* Check if this provider supports tool/function calling
|
|
16
20
|
*/
|
|
@@ -36,11 +36,14 @@ const streamTracer = trace.getTracer("neurolink.provider.openai");
|
|
|
36
36
|
*/
|
|
37
37
|
export class OpenAIProvider extends BaseProvider {
|
|
38
38
|
model;
|
|
39
|
-
|
|
39
|
+
credentials;
|
|
40
|
+
constructor(modelName, neurolink, _region, credentials) {
|
|
40
41
|
super(modelName || getOpenAIModel(), AIProviderName.OPENAI, neurolink);
|
|
42
|
+
this.credentials = credentials;
|
|
41
43
|
// Initialize OpenAI provider with proxy support
|
|
42
44
|
const openai = createOpenAI({
|
|
43
|
-
apiKey: getOpenAIApiKey(),
|
|
45
|
+
apiKey: credentials?.apiKey ?? getOpenAIApiKey(),
|
|
46
|
+
...(credentials?.baseURL ? { baseURL: credentials.baseURL } : {}),
|
|
44
47
|
fetch: createProxyFetch(),
|
|
45
48
|
});
|
|
46
49
|
// Initialize model
|
|
@@ -554,9 +557,12 @@ export class OpenAIProvider extends BaseProvider {
|
|
|
554
557
|
});
|
|
555
558
|
try {
|
|
556
559
|
// Create embedding model using the AI SDK
|
|
557
|
-
// Create the OpenAI provider
|
|
560
|
+
// Create the OpenAI provider, preferring per-instance credentials over env vars
|
|
558
561
|
const openai = createOpenAI({
|
|
559
|
-
apiKey: getOpenAIApiKey(),
|
|
562
|
+
apiKey: this.credentials?.apiKey ?? getOpenAIApiKey(),
|
|
563
|
+
...(this.credentials?.baseURL
|
|
564
|
+
? { baseURL: this.credentials.baseURL }
|
|
565
|
+
: {}),
|
|
560
566
|
fetch: createProxyFetch(),
|
|
561
567
|
});
|
|
562
568
|
// Get the text embedding model
|
|
@@ -596,8 +602,12 @@ export class OpenAIProvider extends BaseProvider {
|
|
|
596
602
|
count: texts.length,
|
|
597
603
|
});
|
|
598
604
|
try {
|
|
605
|
+
// Prefer per-instance credentials over env vars
|
|
599
606
|
const openai = createOpenAI({
|
|
600
|
-
apiKey: getOpenAIApiKey(),
|
|
607
|
+
apiKey: this.credentials?.apiKey ?? getOpenAIApiKey(),
|
|
608
|
+
...(this.credentials?.baseURL
|
|
609
|
+
? { baseURL: this.credentials.baseURL }
|
|
610
|
+
: {}),
|
|
601
611
|
fetch: createProxyFetch(),
|
|
602
612
|
});
|
|
603
613
|
const embeddingModel = openai.textEmbeddingModel(embeddingModelName);
|
|
@@ -10,12 +10,16 @@ import type { StreamOptions, StreamResult } from "../types/streamTypes.js";
|
|
|
10
10
|
export declare class OpenRouterProvider extends BaseProvider {
|
|
11
11
|
private model;
|
|
12
12
|
private openRouterClient;
|
|
13
|
+
private config;
|
|
13
14
|
private static modelsCache;
|
|
14
15
|
private static modelsCacheTime;
|
|
15
16
|
private static readonly MODELS_CACHE_DURATION;
|
|
16
17
|
private static toolCapableModels;
|
|
17
18
|
private static capabilitiesCached;
|
|
18
|
-
constructor(modelName?: string, sdk?: unknown
|
|
19
|
+
constructor(modelName?: string, sdk?: unknown, _region?: string, credentials?: {
|
|
20
|
+
apiKey?: string;
|
|
21
|
+
baseURL?: string;
|
|
22
|
+
});
|
|
19
23
|
protected getProviderName(): AIProviderName;
|
|
20
24
|
protected getDefaultModel(): string;
|
|
21
25
|
/**
|
|
@@ -47,6 +47,7 @@ const getDefaultOpenRouterModel = () => {
|
|
|
47
47
|
export class OpenRouterProvider extends BaseProvider {
|
|
48
48
|
model;
|
|
49
49
|
openRouterClient;
|
|
50
|
+
config;
|
|
50
51
|
// Cache for available models to avoid repeated API calls
|
|
51
52
|
static modelsCache = [];
|
|
52
53
|
static modelsCacheTime = 0;
|
|
@@ -54,10 +55,20 @@ export class OpenRouterProvider extends BaseProvider {
|
|
|
54
55
|
// Cache for model capabilities (which models support tools)
|
|
55
56
|
static toolCapableModels = new Set();
|
|
56
57
|
static capabilitiesCached = false;
|
|
57
|
-
constructor(modelName, sdk) {
|
|
58
|
+
constructor(modelName, sdk, _region, credentials) {
|
|
58
59
|
super(modelName, AIProviderName.OPENROUTER, sdk);
|
|
59
|
-
//
|
|
60
|
-
|
|
60
|
+
// Build config: prefer credentials over env vars to avoid throwing when env vars are absent
|
|
61
|
+
if (credentials?.apiKey) {
|
|
62
|
+
this.config = {
|
|
63
|
+
apiKey: credentials.apiKey,
|
|
64
|
+
referer: process.env.OPENROUTER_REFERER,
|
|
65
|
+
appName: process.env.OPENROUTER_APP_NAME,
|
|
66
|
+
};
|
|
67
|
+
}
|
|
68
|
+
else {
|
|
69
|
+
this.config = getOpenRouterConfig(); // throws if OPENROUTER_API_KEY missing
|
|
70
|
+
}
|
|
71
|
+
const config = this.config;
|
|
61
72
|
// Build headers for attribution on openrouter.ai/activity dashboard
|
|
62
73
|
const headers = {};
|
|
63
74
|
if (config.referer) {
|
|
@@ -69,6 +80,7 @@ export class OpenRouterProvider extends BaseProvider {
|
|
|
69
80
|
// Create OpenRouter client with optional attribution headers
|
|
70
81
|
this.openRouterClient = createOpenRouter({
|
|
71
82
|
apiKey: config.apiKey,
|
|
83
|
+
...(credentials?.baseURL ? { baseURL: credentials.baseURL } : {}),
|
|
72
84
|
...(Object.keys(headers).length > 0 && { headers }),
|
|
73
85
|
});
|
|
74
86
|
// Initialize model with OpenRouter client
|
|
@@ -437,7 +449,7 @@ export class OpenRouterProvider extends BaseProvider {
|
|
|
437
449
|
*/
|
|
438
450
|
async fetchModelsFromAPI() {
|
|
439
451
|
const functionTag = "OpenRouterProvider.fetchModelsFromAPI";
|
|
440
|
-
const config =
|
|
452
|
+
const config = this.config;
|
|
441
453
|
const modelsUrl = "https://openrouter.ai/api/v1/models";
|
|
442
454
|
const controller = new AbortController();
|
|
443
455
|
const timeoutId = setTimeout(() => controller.abort(), MODELS_DISCOVERY_TIMEOUT_MS);
|
|
@@ -499,7 +511,7 @@ export class OpenRouterProvider extends BaseProvider {
|
|
|
499
511
|
return; // Already cached
|
|
500
512
|
}
|
|
501
513
|
try {
|
|
502
|
-
const config =
|
|
514
|
+
const config = this.config;
|
|
503
515
|
const modelsUrl = "https://openrouter.ai/api/v1/models";
|
|
504
516
|
const controller = new AbortController();
|
|
505
517
|
const timeoutId = setTimeout(() => controller.abort(), MODELS_DISCOVERY_TIMEOUT_MS);
|
|
@@ -12,7 +12,10 @@ export declare class OpenAICompatibleProvider extends BaseProvider {
|
|
|
12
12
|
private config;
|
|
13
13
|
private discoveredModel?;
|
|
14
14
|
private customOpenAI;
|
|
15
|
-
constructor(modelName?: string, sdk?: unknown
|
|
15
|
+
constructor(modelName?: string, sdk?: unknown, _region?: string, credentials?: {
|
|
16
|
+
apiKey?: string;
|
|
17
|
+
baseURL?: string;
|
|
18
|
+
});
|
|
16
19
|
protected getProviderName(): AIProviderName;
|
|
17
20
|
protected getDefaultModel(): string;
|
|
18
21
|
/**
|
|
@@ -45,10 +45,22 @@ export class OpenAICompatibleProvider extends BaseProvider {
|
|
|
45
45
|
config;
|
|
46
46
|
discoveredModel;
|
|
47
47
|
customOpenAI;
|
|
48
|
-
constructor(modelName, sdk) {
|
|
48
|
+
constructor(modelName, sdk, _region, credentials) {
|
|
49
49
|
super(modelName, "openai-compatible", sdk);
|
|
50
|
-
//
|
|
51
|
-
|
|
50
|
+
// Build config: prefer credentials over env vars to avoid throwing when env vars are absent
|
|
51
|
+
if (credentials?.apiKey && credentials?.baseURL) {
|
|
52
|
+
this.config = {
|
|
53
|
+
apiKey: credentials.apiKey,
|
|
54
|
+
baseURL: credentials.baseURL,
|
|
55
|
+
};
|
|
56
|
+
}
|
|
57
|
+
else {
|
|
58
|
+
const envConfig = getOpenAICompatibleConfig(); // throws if env vars missing
|
|
59
|
+
this.config = {
|
|
60
|
+
apiKey: credentials?.apiKey ?? envConfig.apiKey,
|
|
61
|
+
baseURL: credentials?.baseURL ?? envConfig.baseURL,
|
|
62
|
+
};
|
|
63
|
+
}
|
|
52
64
|
// Create OpenAI SDK instance configured for custom endpoint
|
|
53
65
|
// This allows us to use OpenAI-compatible API by simply changing the baseURL
|
|
54
66
|
this.customOpenAI = createOpenAI({
|
|
@@ -11,6 +11,7 @@ import type { RoutingStrategy } from "../mcp/routing/index.js";
|
|
|
11
11
|
import type { CacheStrategy } from "../mcp/caching/index.js";
|
|
12
12
|
import type { ToolMiddleware } from "../mcp/toolIntegration.js";
|
|
13
13
|
import type { MastraAuthProvider, AuthProviderType, AuthProviderConfig, Auth0Config, ClerkConfig, FirebaseConfig, SupabaseConfig, WorkOSConfig, BetterAuthConfig, JWTConfig, OAuth2Config, CognitoConfig, KeycloakConfig, AuthenticatedContext } from "./authTypes.js";
|
|
14
|
+
import type { NeurolinkCredentials } from "./providers.js";
|
|
14
15
|
/**
|
|
15
16
|
* Main NeuroLink configuration type
|
|
16
17
|
*/
|
|
@@ -39,6 +40,12 @@ export type NeurolinkConstructorConfig = {
|
|
|
39
40
|
auth?: NeuroLinkAuthConfig;
|
|
40
41
|
/** TaskManager configuration (scheduled and self-running tasks) */
|
|
41
42
|
tasks?: TaskManagerConfig;
|
|
43
|
+
/**
|
|
44
|
+
* Per-provider credential overrides.
|
|
45
|
+
* When set here, applies as the default for all generate()/stream() calls
|
|
46
|
+
* from this NeuroLink instance. Per-call credentials override these.
|
|
47
|
+
*/
|
|
48
|
+
credentials?: NeurolinkCredentials;
|
|
42
49
|
};
|
|
43
50
|
/**
|
|
44
51
|
* Configuration for MCP enhancement modules wired into generate()/stream() paths.
|
|
@@ -11,6 +11,7 @@ import type { DirectorModeOptions, DirectorSegment, VideoGenerationResult, Video
|
|
|
11
11
|
import type { PPTGenerationResult, PPTOutputOptions } from "./pptTypes.js";
|
|
12
12
|
import type { TTSOptions, TTSResult } from "./ttsTypes.js";
|
|
13
13
|
import type { StandardRecord, ValidationSchema, ZodUnknownSchema } from "./typeAliases.js";
|
|
14
|
+
import type { NeurolinkCredentials } from "./providers.js";
|
|
14
15
|
/**
|
|
15
16
|
* Generate function options type - Primary method for content generation
|
|
16
17
|
* Supports multimodal content while maintaining backward compatibility
|
|
@@ -424,6 +425,12 @@ export type GenerateOptions = {
|
|
|
424
425
|
auth?: {
|
|
425
426
|
token: string;
|
|
426
427
|
};
|
|
428
|
+
/**
|
|
429
|
+
* Per-provider credential overrides for this request.
|
|
430
|
+
* Overrides instance-level credentials set in `new NeuroLink({ credentials })`.
|
|
431
|
+
* Unset providers fall through to instance credentials, then environment variables.
|
|
432
|
+
*/
|
|
433
|
+
credentials?: NeurolinkCredentials;
|
|
427
434
|
/**
|
|
428
435
|
* Per-call memory control.
|
|
429
436
|
*
|
|
@@ -942,6 +949,12 @@ export type TextGenerationOptions = {
|
|
|
942
949
|
/** Thinking level (Gemini 3: minimal|low|medium|high). Ignored for Anthropic. */
|
|
943
950
|
thinkingLevel?: "minimal" | "low" | "medium" | "high";
|
|
944
951
|
};
|
|
952
|
+
/**
|
|
953
|
+
* Per-provider credential overrides for this request.
|
|
954
|
+
* Overrides instance-level credentials set in `new NeuroLink({ credentials })`.
|
|
955
|
+
* Unset providers fall through to instance credentials, then environment variables.
|
|
956
|
+
*/
|
|
957
|
+
credentials?: NeurolinkCredentials;
|
|
945
958
|
/**
|
|
946
959
|
* Optional request identifier for observability and log correlation.
|
|
947
960
|
* When provided, this ID is forwarded to spans, logs, and telemetry so
|
|
@@ -71,6 +71,81 @@ export type AWSCredentialConfig = {
|
|
|
71
71
|
/** Optional service endpoint override (e.g., VPC/Gov endpoints) */
|
|
72
72
|
endpoint?: string;
|
|
73
73
|
};
|
|
74
|
+
/**
|
|
75
|
+
* Per-provider credential overrides for generate() / stream() calls.
|
|
76
|
+
*
|
|
77
|
+
* When set on `NeurolinkConstructorConfig.credentials`, applies as the default
|
|
78
|
+
* for all calls from that NeuroLink instance. When set on
|
|
79
|
+
* `GenerateOptions.credentials` or `StreamOptions.credentials`, overrides the
|
|
80
|
+
* instance default for that single call.
|
|
81
|
+
*
|
|
82
|
+
* Unset providers fall through to environment variables (existing behaviour).
|
|
83
|
+
*/
|
|
84
|
+
export type NeurolinkCredentials = {
|
|
85
|
+
openai?: {
|
|
86
|
+
apiKey?: string;
|
|
87
|
+
baseURL?: string;
|
|
88
|
+
};
|
|
89
|
+
anthropic?: {
|
|
90
|
+
apiKey?: string;
|
|
91
|
+
oauthToken?: string;
|
|
92
|
+
};
|
|
93
|
+
googleAiStudio?: {
|
|
94
|
+
apiKey?: string;
|
|
95
|
+
};
|
|
96
|
+
vertex?: {
|
|
97
|
+
projectId?: string;
|
|
98
|
+
location?: string;
|
|
99
|
+
/** Vertex Express Mode — simplified API-key auth */
|
|
100
|
+
apiKey?: string;
|
|
101
|
+
/** Full service-account JSON string */
|
|
102
|
+
serviceAccountKey?: string;
|
|
103
|
+
/** Inline service-account fields (alternative to serviceAccountKey) */
|
|
104
|
+
clientEmail?: string;
|
|
105
|
+
privateKey?: string;
|
|
106
|
+
};
|
|
107
|
+
bedrock?: {
|
|
108
|
+
accessKeyId?: string;
|
|
109
|
+
secretAccessKey?: string;
|
|
110
|
+
sessionToken?: string;
|
|
111
|
+
region?: string;
|
|
112
|
+
};
|
|
113
|
+
sagemaker?: {
|
|
114
|
+
accessKeyId?: string;
|
|
115
|
+
secretAccessKey?: string;
|
|
116
|
+
sessionToken?: string;
|
|
117
|
+
region?: string;
|
|
118
|
+
endpoint?: string;
|
|
119
|
+
};
|
|
120
|
+
azure?: {
|
|
121
|
+
apiKey?: string;
|
|
122
|
+
resourceName?: string;
|
|
123
|
+
deploymentName?: string;
|
|
124
|
+
apiVersion?: string;
|
|
125
|
+
};
|
|
126
|
+
mistral?: {
|
|
127
|
+
apiKey?: string;
|
|
128
|
+
};
|
|
129
|
+
huggingFace?: {
|
|
130
|
+
apiKey?: string;
|
|
131
|
+
baseURL?: string;
|
|
132
|
+
};
|
|
133
|
+
openrouter?: {
|
|
134
|
+
apiKey?: string;
|
|
135
|
+
baseURL?: string;
|
|
136
|
+
};
|
|
137
|
+
litellm?: {
|
|
138
|
+
apiKey?: string;
|
|
139
|
+
baseURL?: string;
|
|
140
|
+
};
|
|
141
|
+
openaiCompatible?: {
|
|
142
|
+
apiKey?: string;
|
|
143
|
+
baseURL?: string;
|
|
144
|
+
};
|
|
145
|
+
ollama?: {
|
|
146
|
+
baseURL?: string;
|
|
147
|
+
};
|
|
148
|
+
};
|
|
74
149
|
/**
|
|
75
150
|
* AWS Credential Validation Result
|
|
76
151
|
*/
|
|
@@ -9,7 +9,7 @@ import type { JsonValue, UnknownRecord } from "./common.js";
|
|
|
9
9
|
import type { Content, ImageWithAltText } from "./content.js";
|
|
10
10
|
import type { ChatMessage } from "./conversation.js";
|
|
11
11
|
import type { AdditionalMemoryUser } from "./generateTypes.js";
|
|
12
|
-
import type { AIModelProviderConfig } from "./providers.js";
|
|
12
|
+
import type { AIModelProviderConfig, NeurolinkCredentials } from "./providers.js";
|
|
13
13
|
import type { TTSChunk, TTSOptions } from "./ttsTypes.js";
|
|
14
14
|
import type { StandardRecord, ValidationSchema } from "./typeAliases.js";
|
|
15
15
|
/**
|
|
@@ -444,6 +444,12 @@ export type StreamOptions = {
|
|
|
444
444
|
auth?: {
|
|
445
445
|
token: string;
|
|
446
446
|
};
|
|
447
|
+
/**
|
|
448
|
+
* Per-provider credential overrides for this request.
|
|
449
|
+
* Overrides instance-level credentials set in `new NeuroLink({ credentials })`.
|
|
450
|
+
* Unset providers fall through to instance credentials, then environment variables.
|
|
451
|
+
*/
|
|
452
|
+
credentials?: NeurolinkCredentials;
|
|
447
453
|
/**
|
|
448
454
|
* Per-call memory control.
|
|
449
455
|
*
|
package/dist/neurolink.d.ts
CHANGED
|
@@ -73,6 +73,27 @@ export declare class NeuroLink {
|
|
|
73
73
|
private authProvider?;
|
|
74
74
|
private pendingAuthConfig?;
|
|
75
75
|
private authInitPromise?;
|
|
76
|
+
private credentials?;
|
|
77
|
+
/**
|
|
78
|
+
* Merge instance-level credentials with per-call credentials.
|
|
79
|
+
*
|
|
80
|
+
* Semantics: **deep merge at the provider level.** For each provider key
|
|
81
|
+
* present in both `this.credentials` and `callCredentials`, the per-call
|
|
82
|
+
* fields are merged ON TOP of the instance-level fields, so fields not
|
|
83
|
+
* mentioned in the per-call slice are preserved.
|
|
84
|
+
*
|
|
85
|
+
* Example:
|
|
86
|
+
* ```
|
|
87
|
+
* instance: { openai: { apiKey: "key1", baseURL: "url1" } }
|
|
88
|
+
* per-call: { openai: { apiKey: "key2" } }
|
|
89
|
+
* merged: { openai: { apiKey: "key2", baseURL: "url1" } } // baseURL preserved
|
|
90
|
+
* ```
|
|
91
|
+
*
|
|
92
|
+
* Providers present only in one source are carried through unchanged.
|
|
93
|
+
* Unrelated providers (not overridden in callCredentials) are carried through
|
|
94
|
+
* from instance credentials unchanged.
|
|
95
|
+
*/
|
|
96
|
+
private resolveCredentials;
|
|
76
97
|
private hitlManager?;
|
|
77
98
|
private _sessionCostUsd;
|
|
78
99
|
private fileRegistry;
|