@juspay/neurolink 9.51.3 → 9.52.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (118) hide show
  1. package/CHANGELOG.md +12 -0
  2. package/dist/artifacts/artifactStore.d.ts +56 -0
  3. package/dist/artifacts/artifactStore.js +143 -0
  4. package/dist/browser/neurolink.min.js +311 -298
  5. package/dist/cli/commands/mcp.d.ts +6 -0
  6. package/dist/cli/commands/mcp.js +128 -86
  7. package/dist/cli/loop/optionsSchema.d.ts +1 -1
  8. package/dist/core/factory.d.ts +2 -2
  9. package/dist/core/factory.js +4 -4
  10. package/dist/core/redisConversationMemoryManager.js +20 -0
  11. package/dist/factories/providerFactory.d.ts +4 -4
  12. package/dist/factories/providerFactory.js +20 -7
  13. package/dist/factories/providerRegistry.d.ts +5 -0
  14. package/dist/factories/providerRegistry.js +45 -26
  15. package/dist/lib/artifacts/artifactStore.d.ts +56 -0
  16. package/dist/lib/artifacts/artifactStore.js +144 -0
  17. package/dist/lib/core/factory.d.ts +2 -2
  18. package/dist/lib/core/factory.js +4 -4
  19. package/dist/lib/core/redisConversationMemoryManager.js +20 -0
  20. package/dist/lib/factories/providerFactory.d.ts +4 -4
  21. package/dist/lib/factories/providerFactory.js +20 -7
  22. package/dist/lib/factories/providerRegistry.d.ts +5 -0
  23. package/dist/lib/factories/providerRegistry.js +45 -26
  24. package/dist/lib/mcp/externalServerManager.d.ts +6 -0
  25. package/dist/lib/mcp/externalServerManager.js +9 -0
  26. package/dist/lib/mcp/mcpOutputNormalizer.d.ts +49 -0
  27. package/dist/lib/mcp/mcpOutputNormalizer.js +182 -0
  28. package/dist/lib/mcp/toolDiscoveryService.d.ts +10 -0
  29. package/dist/lib/mcp/toolDiscoveryService.js +32 -1
  30. package/dist/lib/memory/memoryRetrievalTools.d.ts +64 -9
  31. package/dist/lib/memory/memoryRetrievalTools.js +77 -9
  32. package/dist/lib/neurolink.d.ts +23 -0
  33. package/dist/lib/neurolink.js +128 -86
  34. package/dist/lib/providers/amazonBedrock.d.ts +6 -1
  35. package/dist/lib/providers/amazonBedrock.js +14 -2
  36. package/dist/lib/providers/amazonSagemaker.d.ts +7 -1
  37. package/dist/lib/providers/amazonSagemaker.js +21 -3
  38. package/dist/lib/providers/anthropic.d.ts +4 -1
  39. package/dist/lib/providers/anthropic.js +18 -5
  40. package/dist/lib/providers/azureOpenai.d.ts +2 -1
  41. package/dist/lib/providers/azureOpenai.js +10 -5
  42. package/dist/lib/providers/googleAiStudio.d.ts +4 -1
  43. package/dist/lib/providers/googleAiStudio.js +6 -7
  44. package/dist/lib/providers/googleVertex.d.ts +3 -1
  45. package/dist/lib/providers/googleVertex.js +96 -17
  46. package/dist/lib/providers/huggingFace.d.ts +2 -1
  47. package/dist/lib/providers/huggingFace.js +4 -4
  48. package/dist/lib/providers/litellm.d.ts +5 -1
  49. package/dist/lib/providers/litellm.js +14 -9
  50. package/dist/lib/providers/mistral.d.ts +2 -1
  51. package/dist/lib/providers/mistral.js +2 -2
  52. package/dist/lib/providers/ollama.d.ts +3 -1
  53. package/dist/lib/providers/ollama.js +2 -2
  54. package/dist/lib/providers/openAI.d.ts +5 -1
  55. package/dist/lib/providers/openAI.js +15 -5
  56. package/dist/lib/providers/openRouter.d.ts +5 -1
  57. package/dist/lib/providers/openRouter.js +17 -5
  58. package/dist/lib/providers/openaiCompatible.d.ts +4 -1
  59. package/dist/lib/providers/openaiCompatible.js +15 -3
  60. package/dist/lib/session/globalSessionState.js +44 -1
  61. package/dist/lib/types/artifactTypes.d.ts +63 -0
  62. package/dist/lib/types/artifactTypes.js +11 -0
  63. package/dist/lib/types/configTypes.d.ts +39 -0
  64. package/dist/lib/types/conversation.d.ts +7 -0
  65. package/dist/lib/types/generateTypes.d.ts +13 -0
  66. package/dist/lib/types/index.d.ts +2 -0
  67. package/dist/lib/types/mcpOutputTypes.d.ts +40 -0
  68. package/dist/lib/types/mcpOutputTypes.js +9 -0
  69. package/dist/lib/types/providers.d.ts +75 -0
  70. package/dist/lib/types/streamTypes.d.ts +7 -1
  71. package/dist/mcp/externalServerManager.d.ts +6 -0
  72. package/dist/mcp/externalServerManager.js +9 -0
  73. package/dist/mcp/mcpOutputNormalizer.d.ts +49 -0
  74. package/dist/mcp/mcpOutputNormalizer.js +181 -0
  75. package/dist/mcp/toolDiscoveryService.d.ts +10 -0
  76. package/dist/mcp/toolDiscoveryService.js +32 -1
  77. package/dist/memory/memoryRetrievalTools.d.ts +64 -9
  78. package/dist/memory/memoryRetrievalTools.js +77 -9
  79. package/dist/neurolink.d.ts +23 -0
  80. package/dist/neurolink.js +128 -86
  81. package/dist/providers/amazonBedrock.d.ts +6 -1
  82. package/dist/providers/amazonBedrock.js +14 -2
  83. package/dist/providers/amazonSagemaker.d.ts +7 -1
  84. package/dist/providers/amazonSagemaker.js +21 -3
  85. package/dist/providers/anthropic.d.ts +4 -1
  86. package/dist/providers/anthropic.js +18 -5
  87. package/dist/providers/azureOpenai.d.ts +2 -1
  88. package/dist/providers/azureOpenai.js +10 -5
  89. package/dist/providers/googleAiStudio.d.ts +4 -1
  90. package/dist/providers/googleAiStudio.js +6 -7
  91. package/dist/providers/googleVertex.d.ts +3 -1
  92. package/dist/providers/googleVertex.js +96 -17
  93. package/dist/providers/huggingFace.d.ts +2 -1
  94. package/dist/providers/huggingFace.js +4 -4
  95. package/dist/providers/litellm.d.ts +5 -1
  96. package/dist/providers/litellm.js +14 -9
  97. package/dist/providers/mistral.d.ts +2 -1
  98. package/dist/providers/mistral.js +2 -2
  99. package/dist/providers/ollama.d.ts +3 -1
  100. package/dist/providers/ollama.js +2 -2
  101. package/dist/providers/openAI.d.ts +5 -1
  102. package/dist/providers/openAI.js +15 -5
  103. package/dist/providers/openRouter.d.ts +5 -1
  104. package/dist/providers/openRouter.js +17 -5
  105. package/dist/providers/openaiCompatible.d.ts +4 -1
  106. package/dist/providers/openaiCompatible.js +15 -3
  107. package/dist/session/globalSessionState.js +44 -1
  108. package/dist/types/artifactTypes.d.ts +63 -0
  109. package/dist/types/artifactTypes.js +10 -0
  110. package/dist/types/configTypes.d.ts +39 -0
  111. package/dist/types/conversation.d.ts +7 -0
  112. package/dist/types/generateTypes.d.ts +13 -0
  113. package/dist/types/index.d.ts +2 -0
  114. package/dist/types/mcpOutputTypes.d.ts +40 -0
  115. package/dist/types/mcpOutputTypes.js +8 -0
  116. package/dist/types/providers.d.ts +75 -0
  117. package/dist/types/streamTypes.d.ts +7 -1
  118. package/package.json +3 -2
@@ -8,7 +8,12 @@ export declare class AmazonBedrockProvider extends BaseProvider {
8
8
  private bedrockClient;
9
9
  private conversationHistory;
10
10
  private region;
11
- constructor(modelName?: string, neurolink?: NeuroLink, region?: string);
11
+ constructor(modelName?: string, neurolink?: NeuroLink, region?: string, credentials?: {
12
+ accessKeyId?: string;
13
+ secretAccessKey?: string;
14
+ sessionToken?: string;
15
+ region?: string;
16
+ });
12
17
  /**
13
18
  * Perform initial health check to catch credential/connectivity issues early
14
19
  * This prevents the health check failure we saw in production logs
@@ -18,9 +18,10 @@ export class AmazonBedrockProvider extends BaseProvider {
18
18
  bedrockClient;
19
19
  conversationHistory = [];
20
20
  region;
21
- constructor(modelName, neurolink, region) {
21
+ constructor(modelName, neurolink, region, credentials) {
22
22
  super(modelName, "bedrock", neurolink);
23
- this.region = region || process.env.AWS_REGION || "us-east-1";
23
+ this.region =
24
+ credentials?.region || region || process.env.AWS_REGION || "us-east-1";
24
25
  logger.debug("[AmazonBedrockProvider] Starting constructor with extensive logging for debugging");
25
26
  // Log environment variables for debugging
26
27
  logger.debug(`[AmazonBedrockProvider] Environment check: AWS_REGION=${process.env.AWS_REGION || "undefined"}, AWS_ACCESS_KEY_ID=${process.env.AWS_ACCESS_KEY_ID ? "SET" : "undefined"}, AWS_SECRET_ACCESS_KEY=${process.env.AWS_SECRET_ACCESS_KEY ? "SET" : "undefined"}`);
@@ -35,6 +36,17 @@ export class AmazonBedrockProvider extends BaseProvider {
35
36
  // 2. Environment variables
36
37
  // 3. AWS config files
37
38
  // 4. Instance metadata
39
+ ...(credentials?.accessKeyId && credentials?.secretAccessKey
40
+ ? {
41
+ credentials: {
42
+ accessKeyId: credentials.accessKeyId,
43
+ secretAccessKey: credentials.secretAccessKey,
44
+ ...(credentials.sessionToken
45
+ ? { sessionToken: credentials.sessionToken }
46
+ : {}),
47
+ },
48
+ }
49
+ : {}),
38
50
  });
39
51
  logger.debug(`[AmazonBedrockProvider] Successfully created BedrockRuntimeClient with model: ${this.modelName}, region: ${this.region}`);
40
52
  }
@@ -17,7 +17,13 @@ export declare class AmazonSageMakerProvider extends BaseProvider {
17
17
  private sagemakerModel;
18
18
  private sagemakerConfig;
19
19
  private modelConfig;
20
- constructor(modelName?: string, endpointName?: string, region?: string, neurolink?: NeuroLink);
20
+ constructor(modelName?: string, endpointName?: string, region?: string, neurolink?: NeuroLink, credentials?: {
21
+ accessKeyId?: string;
22
+ secretAccessKey?: string;
23
+ sessionToken?: string;
24
+ region?: string;
25
+ endpoint?: string;
26
+ });
21
27
  protected getProviderName(): AIProviderName;
22
28
  protected getDefaultModel(): string;
23
29
  protected getAISDKModel(): LanguageModel;
@@ -17,11 +17,29 @@ export class AmazonSageMakerProvider extends BaseProvider {
17
17
  sagemakerModel;
18
18
  sagemakerConfig;
19
19
  modelConfig;
20
- constructor(modelName, endpointName, region, neurolink) {
20
+ constructor(modelName, endpointName, region, neurolink, credentials) {
21
21
  super(modelName, "sagemaker", neurolink);
22
22
  try {
23
- // Load and validate configuration
24
- this.sagemakerConfig = getSageMakerConfig(region);
23
+ // Load and validate configuration, then overlay per-request credentials
24
+ const baseConfig = getSageMakerConfig(credentials?.region ?? region);
25
+ this.sagemakerConfig = {
26
+ ...baseConfig,
27
+ ...(credentials?.region !== undefined && {
28
+ region: credentials.region,
29
+ }),
30
+ ...(credentials?.accessKeyId !== undefined && {
31
+ accessKeyId: credentials.accessKeyId,
32
+ }),
33
+ ...(credentials?.secretAccessKey !== undefined && {
34
+ secretAccessKey: credentials.secretAccessKey,
35
+ }),
36
+ ...(credentials?.sessionToken !== undefined && {
37
+ sessionToken: credentials.sessionToken,
38
+ }),
39
+ ...(credentials?.endpoint !== undefined && {
40
+ endpoint: credentials.endpoint,
41
+ }),
42
+ };
25
43
  this.modelConfig = getSageMakerModelConfig(endpointName || getDefaultSageMakerEndpoint());
26
44
  // Create the SageMaker LanguageModel implementation.
27
45
  // SageMakerLanguageModel implements SageMakerAsLanguageModel which is
@@ -41,7 +41,10 @@ export declare class AnthropicProvider extends BaseProvider {
41
41
  * @param sdk - Optional NeuroLink SDK instance
42
42
  * @param config - Optional configuration options for auth, subscription tier, and beta features
43
43
  */
44
- constructor(modelName?: string, sdk?: unknown, config?: AnthropicProviderConfig);
44
+ constructor(modelName?: string, sdk?: unknown, config?: AnthropicProviderConfig, credentials?: {
45
+ apiKey?: string;
46
+ oauthToken?: string;
47
+ });
45
48
  /**
46
49
  * Get authentication headers based on current auth method and configuration.
47
50
  *
@@ -219,14 +219,27 @@ export class AnthropicProvider extends BaseProvider {
219
219
  * @param sdk - Optional NeuroLink SDK instance
220
220
  * @param config - Optional configuration options for auth, subscription tier, and beta features
221
221
  */
222
- constructor(modelName, sdk, config) {
223
- // Pre-compute effective model with tier validation before calling super
224
- const oauthToken = config?.oauthToken ?? getOAuthToken();
222
+ constructor(modelName, sdk, config, credentials) {
223
+ // Pre-compute effective model with tier validation before calling super.
224
+ //
225
+ // When per-request credentials supply an apiKey (without oauthToken),
226
+ // force api_key auth — skip OAuth detection entirely so the caller's
227
+ // key is used rather than a stale OAuth token from ~/.neurolink/.
228
+ const forceApiKey = !!(credentials?.apiKey && !credentials?.oauthToken);
229
+ const oauthToken = forceApiKey
230
+ ? null
231
+ : ((credentials?.oauthToken
232
+ ? { accessToken: credentials.oauthToken }
233
+ : null) ??
234
+ config?.oauthToken ??
235
+ getOAuthToken());
225
236
  // Resolve auth method FIRST so that tier detection uses the chosen method.
226
237
  // If ANTHROPIC_AUTH_METHOD=api_key wins over an existing OAuth token, the
227
238
  // tier must reflect api_key mode (full model access) rather than the OAuth
228
239
  // token's subscription level.
229
- const authMethod = config?.authMethod ?? detectAuthMethod(oauthToken);
240
+ const authMethod = forceApiKey
241
+ ? "api_key"
242
+ : (config?.authMethod ?? detectAuthMethod(oauthToken));
230
243
  const subscriptionTier = config?.subscriptionTier ??
231
244
  (authMethod === "oauth" ? detectSubscriptionTier(oauthToken) : "api");
232
245
  const targetModel = modelName || getDefaultAnthropicModel();
@@ -306,7 +319,7 @@ export class AnthropicProvider extends BaseProvider {
306
319
  }
307
320
  else {
308
321
  // Traditional API key authentication
309
- const apiKeyToUse = config?.apiKey ?? getAnthropicApiKey();
322
+ const apiKeyToUse = credentials?.apiKey ?? config?.apiKey ?? getAnthropicApiKey();
310
323
  anthropic = createAnthropic({
311
324
  apiKey: apiKeyToUse,
312
325
  headers,
@@ -2,13 +2,14 @@ import { type LanguageModel } from "ai";
2
2
  import { type AIProviderName } from "../constants/enums.js";
3
3
  import { BaseProvider } from "../core/baseProvider.js";
4
4
  import type { StreamOptions, StreamResult } from "../types/streamTypes.js";
5
+ import type { NeurolinkCredentials } from "../types/providers.js";
5
6
  export declare class AzureOpenAIProvider extends BaseProvider {
6
7
  private apiKey;
7
8
  private resourceName;
8
9
  private deployment;
9
10
  private apiVersion;
10
11
  private azureProvider;
11
- constructor(modelName?: string, sdk?: unknown);
12
+ constructor(modelName?: string, sdk?: unknown, _region?: string, credentials?: NeurolinkCredentials["azure"]);
12
13
  getProviderName(): AIProviderName;
13
14
  getDefaultModel(): string;
14
15
  /**
@@ -14,22 +14,27 @@ export class AzureOpenAIProvider extends BaseProvider {
14
14
  deployment;
15
15
  apiVersion;
16
16
  azureProvider;
17
- constructor(modelName, sdk) {
17
+ constructor(modelName, sdk, _region, credentials) {
18
18
  super(modelName, "azure", sdk);
19
- this.apiKey = process.env.AZURE_OPENAI_API_KEY || "";
19
+ this.apiKey = credentials?.apiKey || process.env.AZURE_OPENAI_API_KEY || "";
20
20
  const endpoint = process.env.AZURE_OPENAI_ENDPOINT || "";
21
- this.resourceName = endpoint
21
+ const envResourceName = endpoint
22
22
  .replace("https://", "")
23
23
  .replace(/\/+$/, "") // Remove trailing slashes
24
24
  .replace(".openai.azure.com", "")
25
25
  .replace(".cognitiveservices.azure.com", "");
26
+ this.resourceName = credentials?.resourceName || envResourceName;
26
27
  this.deployment =
27
- modelName ||
28
+ credentials?.deploymentName ||
29
+ modelName ||
28
30
  process.env.AZURE_OPENAI_MODEL ||
29
31
  process.env.AZURE_OPENAI_DEPLOYMENT ||
30
32
  process.env.AZURE_OPENAI_DEPLOYMENT_ID ||
31
33
  "gpt-4o";
32
- this.apiVersion = process.env.AZURE_API_VERSION || APIVersions.AZURE_LATEST;
34
+ this.apiVersion =
35
+ credentials?.apiVersion ||
36
+ process.env.AZURE_API_VERSION ||
37
+ APIVersions.AZURE_LATEST;
33
38
  // Configuration validation - now using consolidated utility
34
39
  if (!this.apiKey) {
35
40
  validateApiKey(createAzureAPIKeyConfig());
@@ -36,7 +36,10 @@ import type { ZodUnknownSchema } from "../types/typeAliases.js";
36
36
  * Solution: Simplify schema or use disableTools: true
37
37
  */
38
38
  export declare class GoogleAIStudioProvider extends BaseProvider {
39
- constructor(modelName?: string, sdk?: unknown);
39
+ private credentials?;
40
+ constructor(modelName?: string, sdk?: unknown, credentials?: {
41
+ apiKey?: string;
42
+ });
40
43
  getProviderName(): AIProviderName;
41
44
  getDefaultModel(): string;
42
45
  /**
@@ -65,8 +65,10 @@ async function createGoogleGenAIClient(apiKey) {
65
65
  * Solution: Simplify schema or use disableTools: true
66
66
  */
67
67
  export class GoogleAIStudioProvider extends BaseProvider {
68
- constructor(modelName, sdk) {
68
+ credentials;
69
+ constructor(modelName, sdk, credentials) {
69
70
  super(modelName, "google-ai", sdk);
71
+ this.credentials = credentials;
70
72
  logger.debug("GoogleAIStudioProvider initialized", {
71
73
  model: this.modelName,
72
74
  provider: this.providerName,
@@ -432,11 +434,6 @@ export class GoogleAIStudioProvider extends BaseProvider {
432
434
  }
433
435
  this.validateStreamOptions(options);
434
436
  const startTime = Date.now();
435
- const apiKey = this.getApiKey();
436
- // Ensure environment variable is set for @ai-sdk/google
437
- if (!process.env.GOOGLE_GENERATIVE_AI_API_KEY) {
438
- process.env.GOOGLE_GENERATIVE_AI_API_KEY = apiKey;
439
- }
440
437
  const model = await this.getAISDKModelWithMiddleware(options);
441
438
  const timeout = this.getTimeout(options);
442
439
  const timeoutController = createTimeoutController(timeout, this.providerName, "stream");
@@ -1214,7 +1211,9 @@ export class GoogleAIStudioProvider extends BaseProvider {
1214
1211
  }
1215
1212
  }
1216
1213
  getApiKey() {
1217
- const apiKey = process.env.GOOGLE_AI_API_KEY || process.env.GOOGLE_GENERATIVE_AI_API_KEY;
1214
+ const apiKey = this.credentials?.apiKey ||
1215
+ process.env.GOOGLE_AI_API_KEY ||
1216
+ process.env.GOOGLE_GENERATIVE_AI_API_KEY;
1218
1217
  if (!apiKey) {
1219
1218
  throw new AuthenticationError("GOOGLE_AI_API_KEY or GOOGLE_GENERATIVE_AI_API_KEY environment variable is not set", this.providerName);
1220
1219
  }
@@ -3,6 +3,7 @@ import type { ZodType } from "zod";
3
3
  import { type AIProviderName } from "../constants/enums.js";
4
4
  import { BaseProvider } from "../core/baseProvider.js";
5
5
  import type { EnhancedGenerateResult, TextGenerationOptions } from "../types/generateTypes.js";
6
+ import type { NeurolinkCredentials } from "../types/providers.js";
6
7
  import type { StreamOptions, StreamResult } from "../types/streamTypes.js";
7
8
  /**
8
9
  * Vertex Model Aliases
@@ -70,13 +71,14 @@ export declare class GoogleVertexProvider extends BaseProvider {
70
71
  private location;
71
72
  private registeredTools;
72
73
  private toolContext;
74
+ private credentials;
73
75
  private static modelConfigCache;
74
76
  private static modelConfigCacheTime;
75
77
  private static readonly CACHE_DURATION;
76
78
  private static readonly MAX_CACHE_SIZE;
77
79
  private static maxTokensCache;
78
80
  private static maxTokensCacheTime;
79
- constructor(modelName?: string, _providerName?: string, sdk?: unknown, region?: string);
81
+ constructor(modelName?: string, _providerName?: string, sdk?: unknown, region?: string, credentials?: NeurolinkCredentials["vertex"]);
80
82
  protected getProviderName(): AIProviderName;
81
83
  protected getDefaultModel(): string;
82
84
  /**
@@ -96,9 +96,9 @@ const hasGoogleCredentials = () => {
96
96
  // Module-level cache for runtime-created credentials file to avoid per-request writes
97
97
  let cachedCredentialsPath = null;
98
98
  // Enhanced Vertex settings creation with authentication fallback and proxy support
99
- const createVertexSettings = async (region) => {
100
- const location = region || getVertexLocation();
101
- const project = getVertexProjectId();
99
+ const createVertexSettings = async (region, credentials) => {
100
+ const location = credentials?.location || region || getVertexLocation();
101
+ const project = credentials?.projectId || getVertexProjectId();
102
102
  const baseSettings = {
103
103
  project,
104
104
  location,
@@ -115,6 +115,35 @@ const createVertexSettings = async (region) => {
115
115
  project,
116
116
  });
117
117
  }
118
+ // ── Per-request credentials (highest priority, never touches module-level cache) ──
119
+ if (credentials) {
120
+ // Express Mode: API key auth (Vertex Express)
121
+ if (credentials.apiKey) {
122
+ return { ...baseSettings, apiKey: credentials.apiKey };
123
+ }
124
+ // Resolve client_email / private_key from inline fields or serviceAccountKey JSON
125
+ const resolvedClientEmail = credentials.clientEmail ||
126
+ (credentials.serviceAccountKey
127
+ ? JSON.parse(credentials.serviceAccountKey)
128
+ .client_email
129
+ : undefined);
130
+ const resolvedPrivateKey = credentials.privateKey ||
131
+ (credentials.serviceAccountKey
132
+ ? JSON.parse(credentials.serviceAccountKey)
133
+ .private_key
134
+ : undefined);
135
+ if (resolvedClientEmail && resolvedPrivateKey) {
136
+ return {
137
+ ...baseSettings,
138
+ googleAuthOptions: {
139
+ credentials: {
140
+ client_email: resolvedClientEmail,
141
+ private_key: resolvedPrivateKey.replace(/\\n/g, "\n"),
142
+ },
143
+ },
144
+ };
145
+ }
146
+ }
118
147
  // 🎯 OPTION 2: Create credentials file from environment variables at runtime
119
148
  // This solves the problem where GOOGLE_APPLICATION_CREDENTIALS exists in ZSHRC locally
120
149
  // but the file doesn't exist on production servers
@@ -290,20 +319,21 @@ const createVertexSettings = async (region) => {
290
319
  return baseSettings;
291
320
  };
292
321
  // Create Anthropic-specific Vertex settings with the same authentication and proxy support
293
- const createVertexAnthropicSettings = async (region) => {
322
+ const createVertexAnthropicSettings = async (region, credentials) => {
294
323
  // The @ai-sdk/google-vertex SDK constructs Anthropic URLs as:
295
324
  // https://{location}-aiplatform.googleapis.com/...
296
325
  // When location is "global", this creates "https://global-aiplatform.googleapis.com"
297
326
  // which is invalid. The correct global endpoint omits the region prefix entirely.
298
327
  // Since the SDK doesn't handle this, redirect "global" to "us-east5" for Anthropic.
299
328
  const anthropicRegion = !region || region === "global" ? "us-east5" : region;
300
- const baseVertexSettings = await createVertexSettings(anthropicRegion);
329
+ const baseVertexSettings = await createVertexSettings(anthropicRegion, credentials);
301
330
  // GoogleVertexAnthropicProviderSettings extends GoogleVertexProviderSettings
302
331
  // so we can use the same settings with proper typing
303
332
  return {
304
333
  project: baseVertexSettings.project,
305
334
  location: baseVertexSettings.location,
306
335
  fetch: baseVertexSettings.fetch,
336
+ ...(baseVertexSettings.apiKey && { apiKey: baseVertexSettings.apiKey }),
307
337
  ...(baseVertexSettings.googleAuthOptions && {
308
338
  googleAuthOptions: baseVertexSettings.googleAuthOptions,
309
339
  }),
@@ -397,6 +427,7 @@ export class GoogleVertexProvider extends BaseProvider {
397
427
  location;
398
428
  registeredTools = new Map();
399
429
  toolContext = {};
430
+ credentials;
400
431
  // Memory-managed cache for model configuration lookups to avoid repeated calls
401
432
  // Uses WeakMap for automatic cleanup and bounded LRU for recently used models
402
433
  static modelConfigCache = new Map();
@@ -406,15 +437,17 @@ export class GoogleVertexProvider extends BaseProvider {
406
437
  // Memory-managed cache for maxTokens handling decisions to optimize streaming performance
407
438
  static maxTokensCache = new Map();
408
439
  static maxTokensCacheTime = 0;
409
- constructor(modelName, _providerName, sdk, region) {
440
+ constructor(modelName, _providerName, sdk, region, credentials) {
410
441
  super(modelName, "vertex", sdk);
442
+ this.credentials = credentials;
411
443
  // Validate Google Cloud credentials - now using consolidated utility
412
- if (!hasGoogleCredentials()) {
444
+ // Skip env-var validation when per-request credentials are provided
445
+ if (!credentials && !hasGoogleCredentials()) {
413
446
  validateApiKey(createGoogleAuthConfig());
414
447
  }
415
448
  // Initialize Google Cloud configuration
416
- this.projectId = getVertexProjectId();
417
- this.location = region || getVertexLocation();
449
+ this.projectId = credentials?.projectId || getVertexProjectId();
450
+ this.location = credentials?.location || region || getVertexLocation();
418
451
  logger.debug("[GoogleVertexProvider] Constructor initialized", {
419
452
  regionParam: region,
420
453
  resolvedLocation: this.location,
@@ -549,7 +582,7 @@ export class GoogleVertexProvider extends BaseProvider {
549
582
  message: "Starting Vertex settings creation with network configuration analysis",
550
583
  });
551
584
  try {
552
- const vertexSettings = await createVertexSettings(this.location);
585
+ const vertexSettings = await createVertexSettings(this.location, this.credentials);
553
586
  const vertexSettingsEndTime = process.hrtime.bigint();
554
587
  const vertexSettingsDurationNs = vertexSettingsEndTime - vertexSettingsStartTime;
555
588
  logger.debug(`[GoogleVertexProvider] ✅ LOG_POINT_V009_VERTEX_SETTINGS_SUCCESS`, {
@@ -1084,8 +1117,11 @@ export class GoogleVertexProvider extends BaseProvider {
1084
1117
  * Create @google/genai client configured for Vertex AI
1085
1118
  */
1086
1119
  async createVertexGenAIClient(regionOverride) {
1087
- const project = getVertexProjectId();
1088
- const location = regionOverride || this.location || getVertexLocation();
1120
+ const project = this.credentials?.projectId || getVertexProjectId();
1121
+ const location = this.credentials?.location ||
1122
+ regionOverride ||
1123
+ this.location ||
1124
+ getVertexLocation();
1089
1125
  const mod = await import("@google/genai");
1090
1126
  const ctor = mod.GoogleGenAI;
1091
1127
  if (!ctor) {
@@ -1099,7 +1135,42 @@ export class GoogleVertexProvider extends BaseProvider {
1099
1135
  });
1100
1136
  }
1101
1137
  const Ctor = ctor;
1102
- // Use vertexai mode with project and location
1138
+ // Per-request credentials: Express Mode (API key)
1139
+ if (this.credentials?.apiKey) {
1140
+ // Cast via unknown because GoogleGenAIClass union doesn't include apiKey+vertexai
1141
+ return new Ctor({
1142
+ vertexai: true,
1143
+ project,
1144
+ location,
1145
+ apiKey: this.credentials.apiKey,
1146
+ });
1147
+ }
1148
+ // Per-request credentials: inline service account
1149
+ if (this.credentials?.clientEmail || this.credentials?.serviceAccountKey) {
1150
+ const clientEmail = this.credentials.clientEmail ||
1151
+ (this.credentials.serviceAccountKey
1152
+ ? JSON.parse(this.credentials.serviceAccountKey).client_email
1153
+ : undefined);
1154
+ const privateKey = this.credentials.privateKey ||
1155
+ (this.credentials.serviceAccountKey
1156
+ ? JSON.parse(this.credentials.serviceAccountKey).private_key
1157
+ : undefined);
1158
+ if (clientEmail && privateKey) {
1159
+ // Cast via unknown because GoogleGenAIClass union doesn't include googleAuthOptions
1160
+ return new Ctor({
1161
+ vertexai: true,
1162
+ project,
1163
+ location,
1164
+ googleAuthOptions: {
1165
+ credentials: {
1166
+ client_email: clientEmail,
1167
+ private_key: privateKey.replace(/\\n/g, "\n"),
1168
+ },
1169
+ },
1170
+ });
1171
+ }
1172
+ }
1173
+ // Fallback: env-var / ADC auth
1103
1174
  return new Ctor({
1104
1175
  vertexai: true,
1105
1176
  project,
@@ -1849,7 +1920,15 @@ export class GoogleVertexProvider extends BaseProvider {
1849
1920
  return null;
1850
1921
  }
1851
1922
  // 2. Authentication Validation
1852
- const authValidation = await this.validateVertexAuthentication();
1923
+ // Per-request credentials bypass env-var auth checks entirely
1924
+ const hasPerRequestAuth = this.credentials &&
1925
+ (this.credentials.apiKey ||
1926
+ this.credentials.clientEmail ||
1927
+ this.credentials.privateKey ||
1928
+ this.credentials.serviceAccountKey);
1929
+ const authValidation = hasPerRequestAuth
1930
+ ? { isValid: true, method: "per_request_credentials", issues: [] }
1931
+ : await this.validateVertexAuthentication();
1853
1932
  if (!authValidation.isValid) {
1854
1933
  logger.error("[GoogleVertexProvider] ❌ Authentication validation failed", {
1855
1934
  validationId,
@@ -1927,7 +2006,7 @@ export class GoogleVertexProvider extends BaseProvider {
1927
2006
  projectId: projectValidation.projectId,
1928
2007
  region: projectValidation.region,
1929
2008
  });
1930
- const vertexAnthropicSettings = await createVertexAnthropicSettings(this.location);
2009
+ const vertexAnthropicSettings = await createVertexAnthropicSettings(this.location, this.credentials);
1931
2010
  // 7. Settings Validation
1932
2011
  if (!vertexAnthropicSettings.project ||
1933
2012
  !vertexAnthropicSettings.location) {
@@ -2844,7 +2923,7 @@ export class GoogleVertexProvider extends BaseProvider {
2844
2923
  });
2845
2924
  try {
2846
2925
  // Create the Vertex provider with current settings
2847
- const vertexSettings = await createVertexSettings(this.location);
2926
+ const vertexSettings = await createVertexSettings(this.location, this.credentials);
2848
2927
  const vertex = createVertex(vertexSettings);
2849
2928
  // Get the text embedding model
2850
2929
  const embeddingModel = vertex.textEmbeddingModel(embeddingModelName);
@@ -2883,7 +2962,7 @@ export class GoogleVertexProvider extends BaseProvider {
2883
2962
  count: texts.length,
2884
2963
  });
2885
2964
  try {
2886
- const vertexSettings = await createVertexSettings(this.location);
2965
+ const vertexSettings = await createVertexSettings(this.location, this.credentials);
2887
2966
  const vertex = createVertex(vertexSettings);
2888
2967
  const embeddingModel = vertex.textEmbeddingModel(embeddingModelName);
2889
2968
  const result = await embedMany({
@@ -2,6 +2,7 @@ import { type LanguageModel, type Schema } from "ai";
2
2
  import type { ZodType } from "zod";
3
3
  import type { AIProviderName } from "../constants/enums.js";
4
4
  import { BaseProvider } from "../core/baseProvider.js";
5
+ import type { NeurolinkCredentials } from "../types/providers.js";
5
6
  import type { StreamOptions, StreamResult } from "../types/streamTypes.js";
6
7
  /**
7
8
  * HuggingFace Provider - BaseProvider Implementation
@@ -9,7 +10,7 @@ import type { StreamOptions, StreamResult } from "../types/streamTypes.js";
9
10
  */
10
11
  export declare class HuggingFaceProvider extends BaseProvider {
11
12
  private model;
12
- constructor(modelName?: string);
13
+ constructor(modelName?: string, _sdk?: unknown, credentials?: NeurolinkCredentials["huggingFace"]);
13
14
  /**
14
15
  * HuggingFace Tool Calling Support (Enhanced 2025)
15
16
  *
@@ -14,21 +14,21 @@ const getHuggingFaceApiKey = () => {
14
14
  const getDefaultHuggingFaceModel = () => {
15
15
  return getProviderModel("HUGGINGFACE_MODEL", "microsoft/DialoGPT-medium");
16
16
  };
17
- // Note: hasHuggingFaceCredentials now directly imported from consolidated utility
17
+ // Note: hasNeurolinkCredentials["huggingFace"] now directly imported from consolidated utility
18
18
  /**
19
19
  * HuggingFace Provider - BaseProvider Implementation
20
20
  * Using AI SDK with HuggingFace's OpenAI-compatible endpoint
21
21
  */
22
22
  export class HuggingFaceProvider extends BaseProvider {
23
23
  model;
24
- constructor(modelName) {
24
+ constructor(modelName, _sdk, credentials) {
25
25
  super(modelName, "huggingface");
26
26
  // Get API key and validate
27
- const apiKey = getHuggingFaceApiKey();
27
+ const apiKey = credentials?.apiKey ?? getHuggingFaceApiKey();
28
28
  // Create HuggingFace provider using unified router endpoint (2025) with proxy support
29
29
  const huggingface = createOpenAI({
30
30
  apiKey: apiKey,
31
- baseURL: "https://router.huggingface.co/v1",
31
+ baseURL: credentials?.baseURL ?? "https://router.huggingface.co/v1",
32
32
  fetch: createProxyFetch(),
33
33
  });
34
34
  // Initialize model
@@ -9,10 +9,14 @@ import type { StreamOptions, StreamResult } from "../types/streamTypes.js";
9
9
  */
10
10
  export declare class LiteLLMProvider extends BaseProvider {
11
11
  private model;
12
+ private credentials?;
12
13
  private static modelsCache;
13
14
  private static modelsCacheTime;
14
15
  private static readonly MODELS_CACHE_DURATION;
15
- constructor(modelName?: string, sdk?: unknown);
16
+ constructor(modelName?: string, sdk?: unknown, _region?: string, credentials?: {
17
+ apiKey?: string;
18
+ baseURL?: string;
19
+ });
16
20
  protected getProviderName(): AIProviderName;
17
21
  protected getDefaultModel(): string;
18
22
  /**
@@ -42,12 +42,15 @@ const getDefaultLiteLLMModel = () => {
42
42
  */
43
43
  export class LiteLLMProvider extends BaseProvider {
44
44
  model;
45
+ credentials;
45
46
  // Cache for available models to avoid repeated API calls
46
47
  static modelsCache = [];
47
48
  static modelsCacheTime = 0;
48
49
  static MODELS_CACHE_DURATION = 10 * 60 * 1000; // 10 minutes
49
- constructor(modelName, sdk) {
50
+ constructor(modelName, sdk, _region, credentials) {
50
51
  super(modelName, "litellm", sdk);
52
+ // Store per-request credentials for use in embed/embedMany/fetchModelsFromAPI
53
+ this.credentials = credentials;
51
54
  // Initialize LiteLLM using OpenAI SDK with explicit configuration
52
55
  const config = getLiteLLMConfig();
53
56
  // Create OpenAI SDK instance configured for LiteLLM proxy
@@ -56,8 +59,8 @@ export class LiteLLMProvider extends BaseProvider {
56
59
  // with a custom baseURL and apiKey. This ensures all requests are routed through the LiteLLM
57
60
  // proxy, allowing access to multiple models and custom authentication.
58
61
  const customOpenAI = createOpenAI({
59
- baseURL: config.baseURL,
60
- apiKey: config.apiKey,
62
+ baseURL: credentials?.baseURL ?? config.baseURL,
63
+ apiKey: credentials?.apiKey ?? config.apiKey,
61
64
  fetch: createProxyFetch(),
62
65
  });
63
66
  this.model = customOpenAI.chat(this.modelName || getDefaultLiteLLMModel());
@@ -374,8 +377,8 @@ export class LiteLLMProvider extends BaseProvider {
374
377
  process.env.LITELLM_EMBEDDING_MODEL ||
375
378
  "gemini-embedding-001";
376
379
  const customOpenAI = createOpenAI({
377
- baseURL: config.baseURL,
378
- apiKey: config.apiKey,
380
+ baseURL: this.credentials?.baseURL ?? config.baseURL,
381
+ apiKey: this.credentials?.apiKey ?? config.apiKey,
379
382
  fetch: createProxyFetch(),
380
383
  });
381
384
  const embeddingModel = customOpenAI.textEmbeddingModel(embeddingModelName);
@@ -394,8 +397,8 @@ export class LiteLLMProvider extends BaseProvider {
394
397
  process.env.LITELLM_EMBEDDING_MODEL ||
395
398
  "gemini-embedding-001";
396
399
  const customOpenAI = createOpenAI({
397
- baseURL: config.baseURL,
398
- apiKey: config.apiKey,
400
+ baseURL: this.credentials?.baseURL ?? config.baseURL,
401
+ apiKey: this.credentials?.apiKey ?? config.apiKey,
399
402
  fetch: createProxyFetch(),
400
403
  });
401
404
  const embeddingModel = customOpenAI.textEmbeddingModel(embeddingModelName);
@@ -458,7 +461,9 @@ export class LiteLLMProvider extends BaseProvider {
458
461
  async fetchModelsFromAPI() {
459
462
  const functionTag = "LiteLLMProvider.fetchModelsFromAPI";
460
463
  const config = getLiteLLMConfig();
461
- const modelsUrl = `${config.baseURL}/v1/models`;
464
+ const resolvedBaseURL = this.credentials?.baseURL ?? config.baseURL;
465
+ const resolvedApiKey = this.credentials?.apiKey ?? config.apiKey;
466
+ const modelsUrl = `${resolvedBaseURL}/v1/models`;
462
467
  const controller = new AbortController();
463
468
  const timeoutId = setTimeout(() => controller.abort(), 5000); // 5 second timeout
464
469
  try {
@@ -467,7 +472,7 @@ export class LiteLLMProvider extends BaseProvider {
467
472
  const response = await proxyFetch(modelsUrl, {
468
473
  method: "GET",
469
474
  headers: {
470
- Authorization: `Bearer ${config.apiKey}`,
475
+ Authorization: `Bearer ${resolvedApiKey}`,
471
476
  "Content-Type": "application/json",
472
477
  },
473
478
  signal: controller.signal,
@@ -1,6 +1,7 @@
1
1
  import { type LanguageModel } from "ai";
2
2
  import type { AIProviderName } from "../constants/enums.js";
3
3
  import { BaseProvider } from "../core/baseProvider.js";
4
+ import type { NeurolinkCredentials } from "../types/providers.js";
4
5
  import type { StreamOptions, StreamResult } from "../types/streamTypes.js";
5
6
  import type { ValidationSchema } from "../types/typeAliases.js";
6
7
  /**
@@ -9,7 +10,7 @@ import type { ValidationSchema } from "../types/typeAliases.js";
9
10
  */
10
11
  export declare class MistralProvider extends BaseProvider {
11
12
  private model;
12
- constructor(modelName?: string, sdk?: unknown);
13
+ constructor(modelName?: string, sdk?: unknown, _region?: string, credentials?: NeurolinkCredentials["mistral"]);
13
14
  protected executeStream(options: StreamOptions, _analysisSchema?: ValidationSchema): Promise<StreamResult>;
14
15
  getProviderName(): AIProviderName;
15
16
  getDefaultModel(): string;
@@ -23,14 +23,14 @@ const getDefaultMistralModel = () => {
23
23
  */
24
24
  export class MistralProvider extends BaseProvider {
25
25
  model;
26
- constructor(modelName, sdk) {
26
+ constructor(modelName, sdk, _region, credentials) {
27
27
  // Type guard for NeuroLink parameter validation
28
28
  const validatedNeurolink = sdk && typeof sdk === "object" && "getInMemoryServers" in sdk
29
29
  ? sdk
30
30
  : undefined;
31
31
  super(modelName, "mistral", validatedNeurolink);
32
32
  // Initialize Mistral model with API key validation and proxy support
33
- const apiKey = getMistralApiKey();
33
+ const apiKey = credentials?.apiKey ?? getMistralApiKey();
34
34
  const mistral = createMistral({
35
35
  apiKey: apiKey,
36
36
  fetch: createProxyFetch(),