@juspay/neurolink 9.58.0 → 9.59.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -968,6 +968,40 @@ export declare class NeuroLink {
968
968
  * @see {@link NeuroLink.executeTool} for events related to tool execution
969
969
  */
970
970
  getEventEmitter(): TypedEventEmitter<NeuroLinkEvents>;
971
+ /**
972
+ * Curator P1-1: synchronous credential health check for a single provider.
973
+ *
974
+ * Drives a tiny real call against the provider (1-token completion or
975
+ * `/models` listing depending on provider) to confirm the configured
976
+ * credentials are valid. Useful at startup so a service can refuse to
977
+ * boot if its primary provider's credentials are broken instead of
978
+ * discovering the problem on first user request.
979
+ *
980
+ * @example
981
+ * ```ts
982
+ * const health = await neurolink.checkCredentials({ provider: "litellm" });
983
+ * if (health.status !== "ok") {
984
+ * throw new Error(`provider not ready: ${health.detail}`);
985
+ * }
986
+ * ```
987
+ *
988
+ * @param input - the provider to check
989
+ * @returns `{ provider, status, detail }`. Possible status values:
990
+ * - `"ok"` — credentials valid and provider reachable
991
+ * - `"missing"` — required env / credentials not configured
992
+ * - `"expired"` — credentials present but rejected (401/403)
993
+ * - `"denied"` — credentials valid but team not whitelisted for any model
994
+ * - `"network"` — provider unreachable (timeout, ECONNREFUSED, DNS)
995
+ * - `"unknown"` — other error; consult `detail`
996
+ */
997
+ checkCredentials(input: {
998
+ provider: string;
999
+ model?: string;
1000
+ }): Promise<{
1001
+ provider: string;
1002
+ status: "ok" | "missing" | "expired" | "denied" | "network" | "unknown";
1003
+ detail: string;
1004
+ }>;
971
1005
  /**
972
1006
  * Emit tool start event with execution tracking
973
1007
  * @param toolName - Name of the tool being executed
@@ -52,7 +52,7 @@ import { resolveDynamicArgument } from "./dynamic/dynamicResolver.js";
52
52
  import { initializeHippocampus } from "./memory/hippocampusInitializer.js";
53
53
  import { createMemoryRetrievalTools } from "./memory/memoryRetrievalTools.js";
54
54
  import { getMetricsAggregator, MetricsAggregator, } from "./observability/metricsAggregator.js";
55
- import { SpanStatus, SpanType, CircuitBreakerOpenError, ConversationMemoryError, AuthenticationError, AuthorizationError, InvalidModelError, } from "./types/index.js";
55
+ import { SpanStatus, SpanType, CircuitBreakerOpenError, ConversationMemoryError, AuthenticationError, AuthorizationError, InvalidModelError, ModelAccessDeniedError, } from "./types/index.js";
56
56
  import { SpanSerializer } from "./observability/utils/spanSerializer.js";
57
57
  import { flushOpenTelemetry, getLangfuseHealthStatus, initializeOpenTelemetry, isOpenTelemetryInitialized, runWithCurrentLangfuseContext, setLangfuseContext, shutdownOpenTelemetry, } from "./services/server/ai/observability/instrumentation.js";
58
58
  import { TaskManager } from "./tasks/taskManager.js";
@@ -187,6 +187,13 @@ function isNonRetryableProviderError(error) {
187
187
  if (error instanceof AuthorizationError) {
188
188
  return true;
189
189
  }
190
+ // Curator P1-1: model-access-denied is permanent for the (provider, model)
191
+ // pair until the team whitelist changes. Retrying with the same config
192
+ // would just waste a second roundtrip. Caller / fallback-orchestrator
193
+ // should pick a different model.
194
+ if (error instanceof ModelAccessDeniedError) {
195
+ return true;
196
+ }
190
197
  // Check for HTTP status codes on error objects (e.g., from Vercel AI SDK)
191
198
  if (error && typeof error === "object") {
192
199
  const err = error;
@@ -6087,6 +6094,87 @@ Current user's request: ${currentInput}`;
6087
6094
  getEventEmitter() {
6088
6095
  return this.emitter;
6089
6096
  }
6097
+ /**
6098
+ * Curator P1-1: synchronous credential health check for a single provider.
6099
+ *
6100
+ * Drives a tiny real call against the provider (1-token completion or
6101
+ * `/models` listing depending on provider) to confirm the configured
6102
+ * credentials are valid. Useful at startup so a service can refuse to
6103
+ * boot if its primary provider's credentials are broken instead of
6104
+ * discovering the problem on first user request.
6105
+ *
6106
+ * @example
6107
+ * ```ts
6108
+ * const health = await neurolink.checkCredentials({ provider: "litellm" });
6109
+ * if (health.status !== "ok") {
6110
+ * throw new Error(`provider not ready: ${health.detail}`);
6111
+ * }
6112
+ * ```
6113
+ *
6114
+ * @param input - the provider to check
6115
+ * @returns `{ provider, status, detail }`. Possible status values:
6116
+ * - `"ok"` — credentials valid and provider reachable
6117
+ * - `"missing"` — required env / credentials not configured
6118
+ * - `"expired"` — credentials present but rejected (401/403)
6119
+ * - `"denied"` — credentials valid but team not whitelisted for any model
6120
+ * - `"network"` — provider unreachable (timeout, ECONNREFUSED, DNS)
6121
+ * - `"unknown"` — other error; consult `detail`
6122
+ */
6123
+ async checkCredentials(input) {
6124
+ const { provider, model } = input;
6125
+ const probeText = "ping";
6126
+ try {
6127
+ // 1-token probe is cheap, exercises auth + routing without much cost.
6128
+ await this.generate({
6129
+ provider: provider,
6130
+ ...(model && { model }),
6131
+ input: { text: probeText },
6132
+ maxTokens: 16,
6133
+ disableTools: true,
6134
+ });
6135
+ return { provider, status: "ok", detail: "credentials valid" };
6136
+ }
6137
+ catch (err) {
6138
+ const msg = err instanceof Error ? err.message : String(err);
6139
+ const lower = msg.toLowerCase();
6140
+ if (err instanceof ModelAccessDeniedError) {
6141
+ return {
6142
+ provider,
6143
+ status: "denied",
6144
+ detail: msg,
6145
+ };
6146
+ }
6147
+ if (lower.includes("authentication") ||
6148
+ lower.includes("401") ||
6149
+ lower.includes("invalid api key") ||
6150
+ lower.includes("incorrect api key") ||
6151
+ lower.includes("api_key_invalid") ||
6152
+ lower.includes("token has expired") ||
6153
+ lower.includes("expired credentials")) {
6154
+ return { provider, status: "expired", detail: msg };
6155
+ }
6156
+ if (lower.includes("not configured") ||
6157
+ lower.includes("missing api") ||
6158
+ lower.includes("api key is required") ||
6159
+ lower.includes("no api key") ||
6160
+ lower.includes("application default credentials") ||
6161
+ lower.includes("google_application_credentials") ||
6162
+ lower.includes("project_id") ||
6163
+ lower.includes("default credentials") ||
6164
+ lower.includes("service account")) {
6165
+ return { provider, status: "missing", detail: msg };
6166
+ }
6167
+ if (lower.includes("econnrefused") ||
6168
+ lower.includes("enotfound") ||
6169
+ lower.includes("could not resolve") ||
6170
+ lower.includes("timeout") ||
6171
+ lower.includes("network") ||
6172
+ lower.includes("cannot connect")) {
6173
+ return { provider, status: "network", detail: msg };
6174
+ }
6175
+ return { provider, status: "unknown", detail: msg };
6176
+ }
6177
+ }
6090
6178
  // ========================================
6091
6179
  // ENHANCED: Tool Event Emission API
6092
6180
  // ========================================
@@ -5,7 +5,7 @@ import { BaseProvider } from "../core/baseProvider.js";
5
5
  import { DEFAULT_MAX_STEPS } from "../core/constants.js";
6
6
  import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
7
7
  import { createProxyFetch } from "../proxy/proxyFetch.js";
8
- import { AuthenticationError, InvalidModelError, NetworkError, ProviderError, RateLimitError, } from "../types/index.js";
8
+ import { AuthenticationError, InvalidModelError, ModelAccessDeniedError, NetworkError, ProviderError, RateLimitError, isModelAccessDeniedMessage, parseAllowedModels, } from "../types/index.js";
9
9
  import { isAbortError } from "../utils/errorHandling.js";
10
10
  import { emitToolEndFromStepFinish } from "../utils/toolEndEmitter.js";
11
11
  import { logger } from "../utils/logger.js";
@@ -100,6 +100,17 @@ export class LiteLLMProvider extends BaseProvider {
100
100
  return new NetworkError("LiteLLM proxy server not available. Please start the LiteLLM proxy server at " +
101
101
  `${process.env.LITELLM_BASE_URL || "http://localhost:4000"}`, this.providerName);
102
102
  }
103
+ // Curator P1-1: detect "team not allowed to access model" responses
104
+ // and surface as ModelAccessDeniedError with the allowed_models array
105
+ // parsed from the body. Must run before the generic "API key" check
106
+ // because LiteLLM phrases this as a 403 distinct from auth.
107
+ if (isModelAccessDeniedMessage(errorRecord.message)) {
108
+ return new ModelAccessDeniedError(errorRecord.message, {
109
+ provider: this.providerName,
110
+ requestedModel: this.modelName,
111
+ allowedModels: parseAllowedModels(errorRecord.message),
112
+ });
113
+ }
103
114
  if (errorRecord.message.includes("API_KEY_INVALID") ||
104
115
  errorRecord.message.includes("Invalid API key")) {
105
116
  return new AuthenticationError("Invalid LiteLLM configuration. Please check your LITELLM_API_KEY environment variable.", this.providerName);
@@ -235,10 +235,27 @@ export class OpenAIProvider extends BaseProvider {
235
235
  const errorType = errorObj?.type && typeof errorObj.type === "string"
236
236
  ? errorObj.type
237
237
  : undefined;
238
+ const statusCode = typeof errorObj?.status === "number"
239
+ ? errorObj.status
240
+ : typeof errorObj?.statusCode === "number"
241
+ ? errorObj.statusCode
242
+ : undefined;
243
+ // Curator P1-1 / Reviewer Finding #4: only the explicit auth markers
244
+ // map to AuthenticationError. Earlier we treated every
245
+ // `invalid_request_error` as an auth failure — that's OpenAI's catch-all
246
+ // for any bad request (unsupported parameter, malformed JSON, etc.) and
247
+ // mislabelled them as "invalid API key". Use credential-specific
248
+ // signals only.
238
249
  if (message.includes("API_KEY_INVALID") ||
239
250
  message.includes("Invalid API key") ||
240
- errorType === "invalid_api_key") {
241
- return new AuthenticationError("Invalid OpenAI API key. Please check your OPENAI_API_KEY environment variable.", this.providerName);
251
+ message.includes("Incorrect API key") ||
252
+ message.includes("invalid_api_key") ||
253
+ errorType === "invalid_api_key" ||
254
+ statusCode === 401) {
255
+ return new AuthenticationError(message.includes("Incorrect API key") ||
256
+ message.includes("Invalid API key")
257
+ ? message
258
+ : "Invalid OpenAI API key. Please check your OPENAI_API_KEY environment variable.", this.providerName);
242
259
  }
243
260
  if (message.includes("rate limit") || errorType === "rate_limit_error") {
244
261
  return new RateLimitError("OpenAI rate limit exceeded. Please try again later.", this.providerName);
@@ -104,3 +104,45 @@ export declare class ModelAccessError extends BaseError {
104
104
  readonly requiredTier: string;
105
105
  constructor(model: string, tier: string, requiredTier: string);
106
106
  }
107
+ /**
108
+ * Curator P1-1: thrown when a provider rejects a request because the
109
+ * caller's team / API key is not whitelisted for the requested model.
110
+ *
111
+ * LiteLLM's `team not allowed to access model. This team can only access
112
+ * models=['glm-latest', 'kimi-latest', ...]` is the canonical example —
113
+ * the list is parsed off the error body so callers / fallback orchestrators
114
+ * can choose a whitelisted alternative without scraping strings.
115
+ */
116
+ export declare class ModelAccessDeniedError extends ProviderError {
117
+ readonly requestedModel: string | undefined;
118
+ readonly allowedModels: string[] | undefined;
119
+ readonly code: "MODEL_ACCESS_DENIED";
120
+ constructor(message: string, options?: {
121
+ provider?: string;
122
+ requestedModel?: string;
123
+ allowedModels?: string[];
124
+ });
125
+ }
126
+ /**
127
+ * Parse the `allowed_models` array out of a provider error message body.
128
+ * Currently targets the LiteLLM team-whitelist response shape:
129
+ *
130
+ * "team not allowed to access model. This team can only access
131
+ * models=['glm-latest', 'kimi-latest', 'open-large']"
132
+ *
133
+ * Implementation note: deliberately uses `indexOf`/`slice` instead of a
134
+ * single `/models\s*=\s*\[([^\]]*)\]/` regex. CodeQL flagged the latter
135
+ * as `js/polynomial-redos` because the `[^\]]*` greedy quantifier on
136
+ * library-supplied input can be exploited by a crafted long string. The
137
+ * indexOf/slice path is O(n) with no backtracking and we additionally
138
+ * cap the input length.
139
+ *
140
+ * Returns undefined when no list is found.
141
+ */
142
+ export declare function parseAllowedModels(message: string): string[] | undefined;
143
+ /**
144
+ * Returns true when `message` looks like a model-access-denied response
145
+ * (LiteLLM "team not allowed", generic "not allowed to access model",
146
+ * or "team can only access models=[...]").
147
+ */
148
+ export declare function isModelAccessDeniedMessage(message: string): boolean;
@@ -165,4 +165,98 @@ export class ModelAccessError extends BaseError {
165
165
  this.requiredTier = requiredTier;
166
166
  }
167
167
  }
168
+ /**
169
+ * Curator P1-1: thrown when a provider rejects a request because the
170
+ * caller's team / API key is not whitelisted for the requested model.
171
+ *
172
+ * LiteLLM's `team not allowed to access model. This team can only access
173
+ * models=['glm-latest', 'kimi-latest', ...]` is the canonical example —
174
+ * the list is parsed off the error body so callers / fallback orchestrators
175
+ * can choose a whitelisted alternative without scraping strings.
176
+ */
177
+ export class ModelAccessDeniedError extends ProviderError {
178
+ requestedModel;
179
+ allowedModels;
180
+ code = "MODEL_ACCESS_DENIED";
181
+ constructor(message, options = {}) {
182
+ super(message, options.provider);
183
+ this.name = "ModelAccessDeniedError";
184
+ this.requestedModel = options.requestedModel;
185
+ this.allowedModels = options.allowedModels;
186
+ }
187
+ }
188
+ /** Maximum body length we'll attempt to parse. Real provider error
189
+ * bodies are well under 10 KB; longer inputs are either truncated
190
+ * log output or a deliberate ReDoS attempt. */
191
+ const MAX_ALLOWED_MODELS_INPUT = 10_000;
192
+ /**
193
+ * Parse the `allowed_models` array out of a provider error message body.
194
+ * Currently targets the LiteLLM team-whitelist response shape:
195
+ *
196
+ * "team not allowed to access model. This team can only access
197
+ * models=['glm-latest', 'kimi-latest', 'open-large']"
198
+ *
199
+ * Implementation note: deliberately uses `indexOf`/`slice` instead of a
200
+ * single `/models\s*=\s*\[([^\]]*)\]/` regex. CodeQL flagged the latter
201
+ * as `js/polynomial-redos` because the `[^\]]*` greedy quantifier on
202
+ * library-supplied input can be exploited by a crafted long string. The
203
+ * indexOf/slice path is O(n) with no backtracking and we additionally
204
+ * cap the input length.
205
+ *
206
+ * Returns undefined when no list is found.
207
+ */
208
+ export function parseAllowedModels(message) {
209
+ if (typeof message !== "string" || message.length === 0) {
210
+ return undefined;
211
+ }
212
+ if (message.length > MAX_ALLOWED_MODELS_INPUT) {
213
+ return undefined;
214
+ }
215
+ // Locate `models` keyword case-insensitively, then walk forward to
216
+ // confirm `=` and `[` markers — no regex backtracking.
217
+ const lower = message.toLowerCase();
218
+ let idx = lower.indexOf("models", 0);
219
+ while (idx !== -1) {
220
+ let cursor = idx + "models".length;
221
+ // Skip whitespace
222
+ while (cursor < message.length && /\s/.test(message[cursor])) {
223
+ cursor++;
224
+ }
225
+ if (message[cursor] !== "=") {
226
+ idx = lower.indexOf("models", idx + 1);
227
+ continue;
228
+ }
229
+ cursor++;
230
+ while (cursor < message.length && /\s/.test(message[cursor])) {
231
+ cursor++;
232
+ }
233
+ if (message[cursor] !== "[") {
234
+ idx = lower.indexOf("models", idx + 1);
235
+ continue;
236
+ }
237
+ const open = cursor;
238
+ const close = message.indexOf("]", open + 1);
239
+ if (close === -1) {
240
+ return undefined;
241
+ }
242
+ const inside = message.slice(open + 1, close);
243
+ const items = inside
244
+ .split(",")
245
+ .map((s) => s.trim().replace(/^['"]|['"]$/g, ""))
246
+ .filter((s) => s.length > 0);
247
+ return items.length > 0 ? items : undefined;
248
+ }
249
+ return undefined;
250
+ }
251
+ /**
252
+ * Returns true when `message` looks like a model-access-denied response
253
+ * (LiteLLM "team not allowed", generic "not allowed to access model",
254
+ * or "team can only access models=[...]").
255
+ */
256
+ export function isModelAccessDeniedMessage(message) {
257
+ const lower = message.toLowerCase();
258
+ return ((lower.includes("team") && lower.includes("not allowed")) ||
259
+ lower.includes("team can only access") ||
260
+ /not\s+allowed\s+to\s+access\s+(this\s+)?model/i.test(message));
261
+ }
168
262
  //# sourceMappingURL=errors.js.map
@@ -968,6 +968,40 @@ export declare class NeuroLink {
968
968
  * @see {@link NeuroLink.executeTool} for events related to tool execution
969
969
  */
970
970
  getEventEmitter(): TypedEventEmitter<NeuroLinkEvents>;
971
+ /**
972
+ * Curator P1-1: synchronous credential health check for a single provider.
973
+ *
974
+ * Drives a tiny real call against the provider (1-token completion or
975
+ * `/models` listing depending on provider) to confirm the configured
976
+ * credentials are valid. Useful at startup so a service can refuse to
977
+ * boot if its primary provider's credentials are broken instead of
978
+ * discovering the problem on first user request.
979
+ *
980
+ * @example
981
+ * ```ts
982
+ * const health = await neurolink.checkCredentials({ provider: "litellm" });
983
+ * if (health.status !== "ok") {
984
+ * throw new Error(`provider not ready: ${health.detail}`);
985
+ * }
986
+ * ```
987
+ *
988
+ * @param input - the provider to check
989
+ * @returns `{ provider, status, detail }`. Possible status values:
990
+ * - `"ok"` — credentials valid and provider reachable
991
+ * - `"missing"` — required env / credentials not configured
992
+ * - `"expired"` — credentials present but rejected (401/403)
993
+ * - `"denied"` — credentials valid but team not whitelisted for any model
994
+ * - `"network"` — provider unreachable (timeout, ECONNREFUSED, DNS)
995
+ * - `"unknown"` — other error; consult `detail`
996
+ */
997
+ checkCredentials(input: {
998
+ provider: string;
999
+ model?: string;
1000
+ }): Promise<{
1001
+ provider: string;
1002
+ status: "ok" | "missing" | "expired" | "denied" | "network" | "unknown";
1003
+ detail: string;
1004
+ }>;
971
1005
  /**
972
1006
  * Emit tool start event with execution tracking
973
1007
  * @param toolName - Name of the tool being executed
package/dist/neurolink.js CHANGED
@@ -52,7 +52,7 @@ import { resolveDynamicArgument } from "./dynamic/dynamicResolver.js";
52
52
  import { initializeHippocampus } from "./memory/hippocampusInitializer.js";
53
53
  import { createMemoryRetrievalTools } from "./memory/memoryRetrievalTools.js";
54
54
  import { getMetricsAggregator, MetricsAggregator, } from "./observability/metricsAggregator.js";
55
- import { SpanStatus, SpanType, CircuitBreakerOpenError, ConversationMemoryError, AuthenticationError, AuthorizationError, InvalidModelError, } from "./types/index.js";
55
+ import { SpanStatus, SpanType, CircuitBreakerOpenError, ConversationMemoryError, AuthenticationError, AuthorizationError, InvalidModelError, ModelAccessDeniedError, } from "./types/index.js";
56
56
  import { SpanSerializer } from "./observability/utils/spanSerializer.js";
57
57
  import { flushOpenTelemetry, getLangfuseHealthStatus, initializeOpenTelemetry, isOpenTelemetryInitialized, runWithCurrentLangfuseContext, setLangfuseContext, shutdownOpenTelemetry, } from "./services/server/ai/observability/instrumentation.js";
58
58
  import { TaskManager } from "./tasks/taskManager.js";
@@ -187,6 +187,13 @@ function isNonRetryableProviderError(error) {
187
187
  if (error instanceof AuthorizationError) {
188
188
  return true;
189
189
  }
190
+ // Curator P1-1: model-access-denied is permanent for the (provider, model)
191
+ // pair until the team whitelist changes. Retrying with the same config
192
+ // would just waste a second roundtrip. Caller / fallback-orchestrator
193
+ // should pick a different model.
194
+ if (error instanceof ModelAccessDeniedError) {
195
+ return true;
196
+ }
190
197
  // Check for HTTP status codes on error objects (e.g., from Vercel AI SDK)
191
198
  if (error && typeof error === "object") {
192
199
  const err = error;
@@ -6087,6 +6094,87 @@ Current user's request: ${currentInput}`;
6087
6094
  getEventEmitter() {
6088
6095
  return this.emitter;
6089
6096
  }
6097
+ /**
6098
+ * Curator P1-1: synchronous credential health check for a single provider.
6099
+ *
6100
+ * Drives a tiny real call against the provider (1-token completion or
6101
+ * `/models` listing depending on provider) to confirm the configured
6102
+ * credentials are valid. Useful at startup so a service can refuse to
6103
+ * boot if its primary provider's credentials are broken instead of
6104
+ * discovering the problem on first user request.
6105
+ *
6106
+ * @example
6107
+ * ```ts
6108
+ * const health = await neurolink.checkCredentials({ provider: "litellm" });
6109
+ * if (health.status !== "ok") {
6110
+ * throw new Error(`provider not ready: ${health.detail}`);
6111
+ * }
6112
+ * ```
6113
+ *
6114
+ * @param input - the provider to check
6115
+ * @returns `{ provider, status, detail }`. Possible status values:
6116
+ * - `"ok"` — credentials valid and provider reachable
6117
+ * - `"missing"` — required env / credentials not configured
6118
+ * - `"expired"` — credentials present but rejected (401/403)
6119
+ * - `"denied"` — credentials valid but team not whitelisted for any model
6120
+ * - `"network"` — provider unreachable (timeout, ECONNREFUSED, DNS)
6121
+ * - `"unknown"` — other error; consult `detail`
6122
+ */
6123
+ async checkCredentials(input) {
6124
+ const { provider, model } = input;
6125
+ const probeText = "ping";
6126
+ try {
6127
+ // 1-token probe is cheap, exercises auth + routing without much cost.
6128
+ await this.generate({
6129
+ provider: provider,
6130
+ ...(model && { model }),
6131
+ input: { text: probeText },
6132
+ maxTokens: 16,
6133
+ disableTools: true,
6134
+ });
6135
+ return { provider, status: "ok", detail: "credentials valid" };
6136
+ }
6137
+ catch (err) {
6138
+ const msg = err instanceof Error ? err.message : String(err);
6139
+ const lower = msg.toLowerCase();
6140
+ if (err instanceof ModelAccessDeniedError) {
6141
+ return {
6142
+ provider,
6143
+ status: "denied",
6144
+ detail: msg,
6145
+ };
6146
+ }
6147
+ if (lower.includes("authentication") ||
6148
+ lower.includes("401") ||
6149
+ lower.includes("invalid api key") ||
6150
+ lower.includes("incorrect api key") ||
6151
+ lower.includes("api_key_invalid") ||
6152
+ lower.includes("token has expired") ||
6153
+ lower.includes("expired credentials")) {
6154
+ return { provider, status: "expired", detail: msg };
6155
+ }
6156
+ if (lower.includes("not configured") ||
6157
+ lower.includes("missing api") ||
6158
+ lower.includes("api key is required") ||
6159
+ lower.includes("no api key") ||
6160
+ lower.includes("application default credentials") ||
6161
+ lower.includes("google_application_credentials") ||
6162
+ lower.includes("project_id") ||
6163
+ lower.includes("default credentials") ||
6164
+ lower.includes("service account")) {
6165
+ return { provider, status: "missing", detail: msg };
6166
+ }
6167
+ if (lower.includes("econnrefused") ||
6168
+ lower.includes("enotfound") ||
6169
+ lower.includes("could not resolve") ||
6170
+ lower.includes("timeout") ||
6171
+ lower.includes("network") ||
6172
+ lower.includes("cannot connect")) {
6173
+ return { provider, status: "network", detail: msg };
6174
+ }
6175
+ return { provider, status: "unknown", detail: msg };
6176
+ }
6177
+ }
6090
6178
  // ========================================
6091
6179
  // ENHANCED: Tool Event Emission API
6092
6180
  // ========================================
@@ -5,7 +5,7 @@ import { BaseProvider } from "../core/baseProvider.js";
5
5
  import { DEFAULT_MAX_STEPS } from "../core/constants.js";
6
6
  import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
7
7
  import { createProxyFetch } from "../proxy/proxyFetch.js";
8
- import { AuthenticationError, InvalidModelError, NetworkError, ProviderError, RateLimitError, } from "../types/index.js";
8
+ import { AuthenticationError, InvalidModelError, ModelAccessDeniedError, NetworkError, ProviderError, RateLimitError, isModelAccessDeniedMessage, parseAllowedModels, } from "../types/index.js";
9
9
  import { isAbortError } from "../utils/errorHandling.js";
10
10
  import { emitToolEndFromStepFinish } from "../utils/toolEndEmitter.js";
11
11
  import { logger } from "../utils/logger.js";
@@ -100,6 +100,17 @@ export class LiteLLMProvider extends BaseProvider {
100
100
  return new NetworkError("LiteLLM proxy server not available. Please start the LiteLLM proxy server at " +
101
101
  `${process.env.LITELLM_BASE_URL || "http://localhost:4000"}`, this.providerName);
102
102
  }
103
+ // Curator P1-1: detect "team not allowed to access model" responses
104
+ // and surface as ModelAccessDeniedError with the allowed_models array
105
+ // parsed from the body. Must run before the generic "API key" check
106
+ // because LiteLLM phrases this as a 403 distinct from auth.
107
+ if (isModelAccessDeniedMessage(errorRecord.message)) {
108
+ return new ModelAccessDeniedError(errorRecord.message, {
109
+ provider: this.providerName,
110
+ requestedModel: this.modelName,
111
+ allowedModels: parseAllowedModels(errorRecord.message),
112
+ });
113
+ }
103
114
  if (errorRecord.message.includes("API_KEY_INVALID") ||
104
115
  errorRecord.message.includes("Invalid API key")) {
105
116
  return new AuthenticationError("Invalid LiteLLM configuration. Please check your LITELLM_API_KEY environment variable.", this.providerName);
@@ -235,10 +235,27 @@ export class OpenAIProvider extends BaseProvider {
235
235
  const errorType = errorObj?.type && typeof errorObj.type === "string"
236
236
  ? errorObj.type
237
237
  : undefined;
238
+ const statusCode = typeof errorObj?.status === "number"
239
+ ? errorObj.status
240
+ : typeof errorObj?.statusCode === "number"
241
+ ? errorObj.statusCode
242
+ : undefined;
243
+ // Curator P1-1 / Reviewer Finding #4: only the explicit auth markers
244
+ // map to AuthenticationError. Earlier we treated every
245
+ // `invalid_request_error` as an auth failure — that's OpenAI's catch-all
246
+ // for any bad request (unsupported parameter, malformed JSON, etc.) and
247
+ // mislabelled them as "invalid API key". Use credential-specific
248
+ // signals only.
238
249
  if (message.includes("API_KEY_INVALID") ||
239
250
  message.includes("Invalid API key") ||
240
- errorType === "invalid_api_key") {
241
- return new AuthenticationError("Invalid OpenAI API key. Please check your OPENAI_API_KEY environment variable.", this.providerName);
251
+ message.includes("Incorrect API key") ||
252
+ message.includes("invalid_api_key") ||
253
+ errorType === "invalid_api_key" ||
254
+ statusCode === 401) {
255
+ return new AuthenticationError(message.includes("Incorrect API key") ||
256
+ message.includes("Invalid API key")
257
+ ? message
258
+ : "Invalid OpenAI API key. Please check your OPENAI_API_KEY environment variable.", this.providerName);
242
259
  }
243
260
  if (message.includes("rate limit") || errorType === "rate_limit_error") {
244
261
  return new RateLimitError("OpenAI rate limit exceeded. Please try again later.", this.providerName);
@@ -104,3 +104,45 @@ export declare class ModelAccessError extends BaseError {
104
104
  readonly requiredTier: string;
105
105
  constructor(model: string, tier: string, requiredTier: string);
106
106
  }
107
+ /**
108
+ * Curator P1-1: thrown when a provider rejects a request because the
109
+ * caller's team / API key is not whitelisted for the requested model.
110
+ *
111
+ * LiteLLM's `team not allowed to access model. This team can only access
112
+ * models=['glm-latest', 'kimi-latest', ...]` is the canonical example —
113
+ * the list is parsed off the error body so callers / fallback orchestrators
114
+ * can choose a whitelisted alternative without scraping strings.
115
+ */
116
+ export declare class ModelAccessDeniedError extends ProviderError {
117
+ readonly requestedModel: string | undefined;
118
+ readonly allowedModels: string[] | undefined;
119
+ readonly code: "MODEL_ACCESS_DENIED";
120
+ constructor(message: string, options?: {
121
+ provider?: string;
122
+ requestedModel?: string;
123
+ allowedModels?: string[];
124
+ });
125
+ }
126
+ /**
127
+ * Parse the `allowed_models` array out of a provider error message body.
128
+ * Currently targets the LiteLLM team-whitelist response shape:
129
+ *
130
+ * "team not allowed to access model. This team can only access
131
+ * models=['glm-latest', 'kimi-latest', 'open-large']"
132
+ *
133
+ * Implementation note: deliberately uses `indexOf`/`slice` instead of a
134
+ * single `/models\s*=\s*\[([^\]]*)\]/` regex. CodeQL flagged the latter
135
+ * as `js/polynomial-redos` because the `[^\]]*` greedy quantifier on
136
+ * library-supplied input can be exploited by a crafted long string. The
137
+ * indexOf/slice path is O(n) with no backtracking and we additionally
138
+ * cap the input length.
139
+ *
140
+ * Returns undefined when no list is found.
141
+ */
142
+ export declare function parseAllowedModels(message: string): string[] | undefined;
143
+ /**
144
+ * Returns true when `message` looks like a model-access-denied response
145
+ * (LiteLLM "team not allowed", generic "not allowed to access model",
146
+ * or "team can only access models=[...]").
147
+ */
148
+ export declare function isModelAccessDeniedMessage(message: string): boolean;