@juspay/neurolink 9.59.2 → 9.59.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. package/CHANGELOG.md +12 -0
  2. package/dist/browser/neurolink.min.js +355 -355
  3. package/dist/cli/commands/proxy.js +10 -5
  4. package/dist/core/baseProvider.d.ts +10 -3
  5. package/dist/core/baseProvider.js +8 -3
  6. package/dist/core/modules/StreamHandler.d.ts +22 -3
  7. package/dist/core/modules/StreamHandler.js +42 -20
  8. package/dist/lib/core/baseProvider.d.ts +10 -3
  9. package/dist/lib/core/baseProvider.js +8 -3
  10. package/dist/lib/core/modules/StreamHandler.d.ts +22 -3
  11. package/dist/lib/core/modules/StreamHandler.js +42 -20
  12. package/dist/lib/neurolink.js +57 -3
  13. package/dist/lib/providers/anthropic.js +13 -1
  14. package/dist/lib/providers/anthropicBaseProvider.js +30 -2
  15. package/dist/lib/providers/azureOpenai.js +12 -1
  16. package/dist/lib/providers/googleAiStudio.js +12 -1
  17. package/dist/lib/providers/googleVertex.js +11 -1
  18. package/dist/lib/providers/huggingFace.js +29 -2
  19. package/dist/lib/providers/litellm.js +44 -4
  20. package/dist/lib/providers/mistral.js +12 -1
  21. package/dist/lib/providers/openAI.js +34 -3
  22. package/dist/lib/providers/openRouter.js +33 -2
  23. package/dist/lib/providers/openaiCompatible.js +34 -2
  24. package/dist/lib/services/server/ai/observability/instrumentation.js +7 -2
  25. package/dist/lib/types/index.d.ts +1 -0
  26. package/dist/lib/types/index.js +2 -0
  27. package/dist/lib/types/noOutputSentinel.d.ts +26 -0
  28. package/dist/lib/types/noOutputSentinel.js +2 -0
  29. package/dist/lib/types/stream.d.ts +2 -1
  30. package/dist/lib/utils/noOutputSentinel.d.ts +80 -0
  31. package/dist/lib/utils/noOutputSentinel.js +193 -0
  32. package/dist/neurolink.js +57 -3
  33. package/dist/providers/anthropic.js +13 -1
  34. package/dist/providers/anthropicBaseProvider.js +30 -2
  35. package/dist/providers/azureOpenai.js +12 -1
  36. package/dist/providers/googleAiStudio.js +12 -1
  37. package/dist/providers/googleVertex.js +11 -1
  38. package/dist/providers/huggingFace.js +29 -2
  39. package/dist/providers/litellm.js +44 -4
  40. package/dist/providers/mistral.js +12 -1
  41. package/dist/providers/openAI.js +34 -3
  42. package/dist/providers/openRouter.js +33 -2
  43. package/dist/providers/openaiCompatible.js +34 -2
  44. package/dist/services/server/ai/observability/instrumentation.js +7 -2
  45. package/dist/types/index.d.ts +1 -0
  46. package/dist/types/index.js +2 -0
  47. package/dist/types/noOutputSentinel.d.ts +26 -0
  48. package/dist/types/noOutputSentinel.js +1 -0
  49. package/dist/types/stream.d.ts +2 -1
  50. package/dist/utils/noOutputSentinel.d.ts +80 -0
  51. package/dist/utils/noOutputSentinel.js +192 -0
  52. package/package.json +1 -1
@@ -1443,27 +1443,32 @@ export const proxyGuardCommand = {
1443
1443
  return;
1444
1444
  }
1445
1445
  logger.always(`[guard] update available: ${runningVersion} → ${result.latestVersion}`);
1446
- // 2. Wait for quiet traffic
1447
- const maxQuietWaitMs = 60 * 60 * 1000; // 1 hour max wait
1446
+ // 2. Best-effort quiet wait — try for a brief window, but proceed
1447
+ // regardless. The install (pnpm add -g) is non-disruptive; only the
1448
+ // restart causes a ~1-3s blip. Blocking updates for hours/days because
1449
+ // traffic never goes silent is worse than a brief interruption.
1450
+ const maxQuietWaitMs = 5 * 60 * 1000; // 5 minutes max, then proceed
1448
1451
  const quietPollMs = 10_000; // check every 10s
1449
1452
  const quietStart = Date.now();
1450
1453
  while (Date.now() - quietStart < maxQuietWaitMs) {
1451
- // Bail out if parent proxy died during the wait
1452
1454
  if (getProcessStatus(parentPid) === "not_running") {
1453
1455
  logger.always(`[guard] parent process died during quiet-wait, aborting update`);
1454
1456
  return;
1455
1457
  }
1456
1458
  const quietStatus = checkTrafficQuiet(QUIET_THRESHOLD_MS);
1457
1459
  if (quietStatus.isQuiet) {
1460
+ logger.always(`[guard] traffic quiet, proceeding with update`);
1458
1461
  break;
1459
1462
  }
1460
1463
  logger.debug(`[guard] traffic active (last activity ${Math.round(quietStatus.silenceDurationMs / 1000)}s ago), waiting...`);
1461
1464
  await new Promise((r) => setTimeout(r, quietPollMs));
1462
1465
  }
1466
+ // Proceed with install regardless — don't block updates indefinitely.
1467
+ // The install itself (pnpm add -g) doesn't affect the running process.
1468
+ // Only the restart afterwards causes a brief interruption.
1463
1469
  const finalQuiet = checkTrafficQuiet(QUIET_THRESHOLD_MS);
1464
1470
  if (!finalQuiet.isQuiet) {
1465
- logger.always(`[guard] traffic didn't quiet down within 1 hour, skipping update cycle`);
1466
- return;
1471
+ logger.always(`[guard] traffic still active after ${Math.round(maxQuietWaitMs / 1000)}s wait, proceeding with update anyway (restart will briefly interrupt in-flight requests)`);
1467
1472
  }
1468
1473
  // 3. Install update (validate version string before passing to shell)
1469
1474
  if (!/^\d+\.\d+\.\d+$/.test(result.latestVersion)) {
@@ -279,13 +279,20 @@ export declare abstract class BaseProvider implements AIProvider {
279
279
  */
280
280
  protected validateStreamOptions(options: StreamOptions): void;
281
281
  /**
282
- * Create text stream transformation - delegated to StreamHandler
282
+ * Create text stream transformation - delegated to StreamHandler.
283
+ * Reviewer follow-up: forwards the optional `getUnderlyingError`
284
+ * callback so providers can capture upstream errors via
285
+ * `streamText`'s `onError` and have them flow into the
286
+ * NoOutputGeneratedError sentinel's `providerError` /
287
+ * `modelResponseRaw`.
283
288
  */
284
289
  protected createTextStream(result: {
285
290
  textStream: AsyncIterable<string>;
286
- }): AsyncGenerator<{
291
+ finishReason?: Promise<unknown> | unknown;
292
+ totalUsage?: Promise<unknown> | unknown;
293
+ }, getUnderlyingError?: () => unknown): AsyncGenerator<{
287
294
  content: string;
288
- }>;
295
+ } | import("../types/index.js").StreamNoOutputSentinel>;
289
296
  /**
290
297
  * Create standardized stream result - delegated to StreamHandler
291
298
  */
@@ -1019,10 +1019,15 @@ export class BaseProvider {
1019
1019
  this.streamHandler.validateStreamOptions(options);
1020
1020
  }
1021
1021
  /**
1022
- * Create text stream transformation - delegated to StreamHandler
1022
+ * Create text stream transformation - delegated to StreamHandler.
1023
+ * Reviewer follow-up: forwards the optional `getUnderlyingError`
1024
+ * callback so providers can capture upstream errors via
1025
+ * `streamText`'s `onError` and have them flow into the
1026
+ * NoOutputGeneratedError sentinel's `providerError` /
1027
+ * `modelResponseRaw`.
1023
1028
  */
1024
- createTextStream(result) {
1025
- return this.streamHandler.createTextStream(result);
1029
+ createTextStream(result, getUnderlyingError) {
1030
+ return this.streamHandler.createTextStream(result, getUnderlyingError);
1026
1031
  }
1027
1032
  /**
1028
1033
  * Create standardized stream result - delegated to StreamHandler
@@ -12,7 +12,7 @@
12
12
  *
13
13
  * @module core/modules/StreamHandler
14
14
  */
15
- import type { StreamOptions, StreamResult, UnknownRecord, AIProviderName } from "../../types/index.js";
15
+ import type { StreamOptions, StreamResult, UnknownRecord, AIProviderName, StreamNoOutputSentinel } from "../../types/index.js";
16
16
  /**
17
17
  * StreamHandler class - Handles streaming operations for AI providers
18
18
  */
@@ -30,9 +30,28 @@ export declare class StreamHandler {
30
30
  */
31
31
  createTextStream(result: {
32
32
  textStream: AsyncIterable<string>;
33
- }): AsyncGenerator<{
33
+ /**
34
+ * Optional metadata getters from the AI SDK's StreamTextResult. These
35
+ * reject with NoOutputGeneratedError when no output is produced, which
36
+ * is exactly the path Curator's P3-6 fix needs to enrich. We attempt
37
+ * to await them in the catch block; whichever resolve get included in
38
+ * the sentinel chunk metadata.
39
+ */
40
+ finishReason?: Promise<unknown> | unknown;
41
+ totalUsage?: Promise<unknown> | unknown;
42
+ },
43
+ /**
44
+ * Reviewer follow-up: optional getter for the provider's captured
45
+ * upstream error (typically wired from `streamText`'s `onError`
46
+ * callback). When set, the sentinel's `providerError` /
47
+ * `modelResponseRaw` reflect the real upstream cause instead of the
48
+ * AI SDK's generic "No output generated" message. Callers that don't
49
+ * capture upstream errors can omit this — the sentinel still
50
+ * populates with the AI SDK error.
51
+ */
52
+ getUnderlyingError?: () => unknown): AsyncGenerator<{
34
53
  content: string;
35
- }>;
54
+ } | StreamNoOutputSentinel>;
36
55
  /**
37
56
  * Create standardized stream result - consolidates result structure
38
57
  */
@@ -16,6 +16,7 @@ import { trace, context as otelContext, SpanStatusCode, } from "@opentelemetry/a
16
16
  import { tracers, ATTR, withSpan } from "../../telemetry/index.js";
17
17
  import { logger } from "../../utils/logger.js";
18
18
  import { validateStreamOptions as validateStreamOpts, ValidationError, createValidationSummary, } from "../../utils/parameterValidation.js";
19
+ import { buildNoOutputSentinel, detectPostStreamNoOutput, stampNoOutputSpan, } from "../../utils/noOutputSentinel.js";
19
20
  import { STEP_LIMITS } from "../constants.js";
20
21
  import { createAnalytics } from "../analytics.js";
21
22
  import { nanoid } from "nanoid";
@@ -83,7 +84,17 @@ export class StreamHandler {
83
84
  * Create text stream transformation - consolidates identical logic from 7/10 providers
84
85
  * Tracks TTFC (Time To First Chunk), chunk count, and total bytes streamed.
85
86
  */
86
- createTextStream(result) {
87
+ createTextStream(result,
88
+ /**
89
+ * Reviewer follow-up: optional getter for the provider's captured
90
+ * upstream error (typically wired from `streamText`'s `onError`
91
+ * callback). When set, the sentinel's `providerError` /
92
+ * `modelResponseRaw` reflect the real upstream cause instead of the
93
+ * AI SDK's generic "No output generated" message. Callers that don't
94
+ * capture upstream errors can omit this — the sentinel still
95
+ * populates with the AI SDK error.
96
+ */
97
+ getUnderlyingError) {
87
98
  const providerName = this.providerName;
88
99
  return (async function* () {
89
100
  let chunkCount = 0;
@@ -113,32 +124,43 @@ export class StreamHandler {
113
124
  // rather than crashing the process with an unhandled rejection.
114
125
  if (NoOutputGeneratedError.isInstance(error)) {
115
126
  logger.warn(`${providerName}: Stream produced no output (NoOutputGeneratedError), returning empty stream`);
116
- // Curator P2-5: stamp the active OTel span so ContextEnricher.onEnd()
117
- // surfaces a WARNING-level Langfuse observation instead of defaulting
118
- // to DEFAULT with no status message.
119
- try {
120
- const activeSpan = trace.getSpan(otelContext.active());
121
- if (activeSpan) {
122
- activeSpan.setAttribute("neurolink.no_output", true);
123
- }
124
- }
125
- catch {
126
- // Tracing not initialized — ignore.
127
- }
127
+ // Curator P3-6: build the enriched sentinel using the shared
128
+ // helper so every provider yields the same shape. Pass the
129
+ // captured upstream error (if any) so providerError /
130
+ // modelResponseRaw carry the real cause.
131
+ const sentinel = await buildNoOutputSentinel(error, result, getUnderlyingError?.());
132
+ // Curator P2-5 + P3-6: stamp the active OTel span so
133
+ // ContextEnricher.onEnd() surfaces a WARNING-level Langfuse
134
+ // observation with finishReason + token usage. Centralized in
135
+ // stampNoOutputSpan so every wired site stamps consistently.
136
+ stampNoOutputSpan(sentinel);
128
137
  // S4 fix: yield a sentinel chunk so Pipeline B can detect the empty stream
129
138
  // and set the span to WARNING status instead of OK
130
- yield {
131
- content: "",
132
- metadata: {
133
- noOutput: true,
134
- errorType: "NoOutputGeneratedError",
135
- },
136
- };
139
+ yield sentinel;
140
+ // Reviewer follow-up: must return here. Falling through to the
141
+ // post-stream detection block below would yield a SECOND sentinel
142
+ // chunk (verified with synthetic NoOutputGeneratedError stream:
143
+ // count=2 sentinels). The catch block's yield is sufficient.
144
+ return;
137
145
  }
138
146
  else {
139
147
  throw error;
140
148
  }
141
149
  }
150
+ // Curator P3-6 (round-2 fix): the production trigger sets
151
+ // NoOutputGeneratedError on `result.finishReason` rejection — NOT
152
+ // thrown from textStream iteration. Surface that path here so the
153
+ // sentinel actually fires for real-world no-output streams. The
154
+ // catch above remains as a defensive path for failure modes that
155
+ // do throw from textStream.
156
+ if (chunkCount === 0) {
157
+ const detected = await detectPostStreamNoOutput(result, getUnderlyingError?.());
158
+ if (detected) {
159
+ logger.warn(`${providerName}: Stream produced no output (NoOutputGeneratedError) — caught from finishReason rejection`);
160
+ stampNoOutputSpan(detected.sentinel);
161
+ yield detected.sentinel;
162
+ }
163
+ }
142
164
  // Record completion metrics on the active span
143
165
  const activeSpan = trace.getSpan(otelContext.active());
144
166
  if (activeSpan) {
@@ -279,13 +279,20 @@ export declare abstract class BaseProvider implements AIProvider {
279
279
  */
280
280
  protected validateStreamOptions(options: StreamOptions): void;
281
281
  /**
282
- * Create text stream transformation - delegated to StreamHandler
282
+ * Create text stream transformation - delegated to StreamHandler.
283
+ * Reviewer follow-up: forwards the optional `getUnderlyingError`
284
+ * callback so providers can capture upstream errors via
285
+ * `streamText`'s `onError` and have them flow into the
286
+ * NoOutputGeneratedError sentinel's `providerError` /
287
+ * `modelResponseRaw`.
283
288
  */
284
289
  protected createTextStream(result: {
285
290
  textStream: AsyncIterable<string>;
286
- }): AsyncGenerator<{
291
+ finishReason?: Promise<unknown> | unknown;
292
+ totalUsage?: Promise<unknown> | unknown;
293
+ }, getUnderlyingError?: () => unknown): AsyncGenerator<{
287
294
  content: string;
288
- }>;
295
+ } | import("../types/index.js").StreamNoOutputSentinel>;
289
296
  /**
290
297
  * Create standardized stream result - delegated to StreamHandler
291
298
  */
@@ -1019,10 +1019,15 @@ export class BaseProvider {
1019
1019
  this.streamHandler.validateStreamOptions(options);
1020
1020
  }
1021
1021
  /**
1022
- * Create text stream transformation - delegated to StreamHandler
1022
+ * Create text stream transformation - delegated to StreamHandler.
1023
+ * Reviewer follow-up: forwards the optional `getUnderlyingError`
1024
+ * callback so providers can capture upstream errors via
1025
+ * `streamText`'s `onError` and have them flow into the
1026
+ * NoOutputGeneratedError sentinel's `providerError` /
1027
+ * `modelResponseRaw`.
1023
1028
  */
1024
- createTextStream(result) {
1025
- return this.streamHandler.createTextStream(result);
1029
+ createTextStream(result, getUnderlyingError) {
1030
+ return this.streamHandler.createTextStream(result, getUnderlyingError);
1026
1031
  }
1027
1032
  /**
1028
1033
  * Create standardized stream result - delegated to StreamHandler
@@ -12,7 +12,7 @@
12
12
  *
13
13
  * @module core/modules/StreamHandler
14
14
  */
15
- import type { StreamOptions, StreamResult, UnknownRecord, AIProviderName } from "../../types/index.js";
15
+ import type { StreamOptions, StreamResult, UnknownRecord, AIProviderName, StreamNoOutputSentinel } from "../../types/index.js";
16
16
  /**
17
17
  * StreamHandler class - Handles streaming operations for AI providers
18
18
  */
@@ -30,9 +30,28 @@ export declare class StreamHandler {
30
30
  */
31
31
  createTextStream(result: {
32
32
  textStream: AsyncIterable<string>;
33
- }): AsyncGenerator<{
33
+ /**
34
+ * Optional metadata getters from the AI SDK's StreamTextResult. These
35
+ * reject with NoOutputGeneratedError when no output is produced, which
36
+ * is exactly the path Curator's P3-6 fix needs to enrich. We attempt
37
+ * to await them in the catch block; whichever resolve get included in
38
+ * the sentinel chunk metadata.
39
+ */
40
+ finishReason?: Promise<unknown> | unknown;
41
+ totalUsage?: Promise<unknown> | unknown;
42
+ },
43
+ /**
44
+ * Reviewer follow-up: optional getter for the provider's captured
45
+ * upstream error (typically wired from `streamText`'s `onError`
46
+ * callback). When set, the sentinel's `providerError` /
47
+ * `modelResponseRaw` reflect the real upstream cause instead of the
48
+ * AI SDK's generic "No output generated" message. Callers that don't
49
+ * capture upstream errors can omit this — the sentinel still
50
+ * populates with the AI SDK error.
51
+ */
52
+ getUnderlyingError?: () => unknown): AsyncGenerator<{
34
53
  content: string;
35
- }>;
54
+ } | StreamNoOutputSentinel>;
36
55
  /**
37
56
  * Create standardized stream result - consolidates result structure
38
57
  */
@@ -16,6 +16,7 @@ import { trace, context as otelContext, SpanStatusCode, } from "@opentelemetry/a
16
16
  import { tracers, ATTR, withSpan } from "../../telemetry/index.js";
17
17
  import { logger } from "../../utils/logger.js";
18
18
  import { validateStreamOptions as validateStreamOpts, ValidationError, createValidationSummary, } from "../../utils/parameterValidation.js";
19
+ import { buildNoOutputSentinel, detectPostStreamNoOutput, stampNoOutputSpan, } from "../../utils/noOutputSentinel.js";
19
20
  import { STEP_LIMITS } from "../constants.js";
20
21
  import { createAnalytics } from "../analytics.js";
21
22
  import { nanoid } from "nanoid";
@@ -83,7 +84,17 @@ export class StreamHandler {
83
84
  * Create text stream transformation - consolidates identical logic from 7/10 providers
84
85
  * Tracks TTFC (Time To First Chunk), chunk count, and total bytes streamed.
85
86
  */
86
- createTextStream(result) {
87
+ createTextStream(result,
88
+ /**
89
+ * Reviewer follow-up: optional getter for the provider's captured
90
+ * upstream error (typically wired from `streamText`'s `onError`
91
+ * callback). When set, the sentinel's `providerError` /
92
+ * `modelResponseRaw` reflect the real upstream cause instead of the
93
+ * AI SDK's generic "No output generated" message. Callers that don't
94
+ * capture upstream errors can omit this — the sentinel still
95
+ * populates with the AI SDK error.
96
+ */
97
+ getUnderlyingError) {
87
98
  const providerName = this.providerName;
88
99
  return (async function* () {
89
100
  let chunkCount = 0;
@@ -113,32 +124,43 @@ export class StreamHandler {
113
124
  // rather than crashing the process with an unhandled rejection.
114
125
  if (NoOutputGeneratedError.isInstance(error)) {
115
126
  logger.warn(`${providerName}: Stream produced no output (NoOutputGeneratedError), returning empty stream`);
116
- // Curator P2-5: stamp the active OTel span so ContextEnricher.onEnd()
117
- // surfaces a WARNING-level Langfuse observation instead of defaulting
118
- // to DEFAULT with no status message.
119
- try {
120
- const activeSpan = trace.getSpan(otelContext.active());
121
- if (activeSpan) {
122
- activeSpan.setAttribute("neurolink.no_output", true);
123
- }
124
- }
125
- catch {
126
- // Tracing not initialized — ignore.
127
- }
127
+ // Curator P3-6: build the enriched sentinel using the shared
128
+ // helper so every provider yields the same shape. Pass the
129
+ // captured upstream error (if any) so providerError /
130
+ // modelResponseRaw carry the real cause.
131
+ const sentinel = await buildNoOutputSentinel(error, result, getUnderlyingError?.());
132
+ // Curator P2-5 + P3-6: stamp the active OTel span so
133
+ // ContextEnricher.onEnd() surfaces a WARNING-level Langfuse
134
+ // observation with finishReason + token usage. Centralized in
135
+ // stampNoOutputSpan so every wired site stamps consistently.
136
+ stampNoOutputSpan(sentinel);
128
137
  // S4 fix: yield a sentinel chunk so Pipeline B can detect the empty stream
129
138
  // and set the span to WARNING status instead of OK
130
- yield {
131
- content: "",
132
- metadata: {
133
- noOutput: true,
134
- errorType: "NoOutputGeneratedError",
135
- },
136
- };
139
+ yield sentinel;
140
+ // Reviewer follow-up: must return here. Falling through to the
141
+ // post-stream detection block below would yield a SECOND sentinel
142
+ // chunk (verified with synthetic NoOutputGeneratedError stream:
143
+ // count=2 sentinels). The catch block's yield is sufficient.
144
+ return;
137
145
  }
138
146
  else {
139
147
  throw error;
140
148
  }
141
149
  }
150
+ // Curator P3-6 (round-2 fix): the production trigger sets
151
+ // NoOutputGeneratedError on `result.finishReason` rejection — NOT
152
+ // thrown from textStream iteration. Surface that path here so the
153
+ // sentinel actually fires for real-world no-output streams. The
154
+ // catch above remains as a defensive path for failure modes that
155
+ // do throw from textStream.
156
+ if (chunkCount === 0) {
157
+ const detected = await detectPostStreamNoOutput(result, getUnderlyingError?.());
158
+ if (detected) {
159
+ logger.warn(`${providerName}: Stream produced no output (NoOutputGeneratedError) — caught from finishReason rejection`);
160
+ stampNoOutputSpan(detected.sentinel);
161
+ yield detected.sentinel;
162
+ }
163
+ }
142
164
  // Record completion metrics on the active span
143
165
  const activeSpan = trace.getSpan(otelContext.active());
144
166
  if (activeSpan) {
@@ -5218,9 +5218,36 @@ Current user's request: ${currentInput}`;
5218
5218
  // single `generation:end` event with cost data. Cost listeners
5219
5219
  // subscribe here; previously the stream path never fired it.
5220
5220
  let resolvedUsage;
5221
+ // Reviewer follow-up: track *non-sentinel output chunks* (text,
5222
+ // audio, image — anything the SDK considers real output) so the
5223
+ // fallback gate fires only when the stream produced nothing
5224
+ // useful. Counting only text content here would have spuriously
5225
+ // triggered fallback for valid audio-only (Google Live) and
5226
+ // image-only streams. The sentinel is the only thing we exclude
5227
+ // — that path can mask real provider failures (DNS, auth,
5228
+ // retry-exhaustion) that AI SDK rejects with
5229
+ // NoOutputGeneratedError, and we want fallback to fire there.
5230
+ let realOutputChunks = 0;
5221
5231
  try {
5222
5232
  for await (const chunk of mcpStream) {
5223
5233
  chunkCount++;
5234
+ const isNoOutputSentinel = chunk !== null &&
5235
+ typeof chunk === "object" &&
5236
+ "metadata" in chunk &&
5237
+ chunk.metadata
5238
+ ?.noOutput === true;
5239
+ const hasTextContent = chunk &&
5240
+ "content" in chunk &&
5241
+ typeof chunk.content === "string" &&
5242
+ chunk.content.length > 0;
5243
+ const hasMediaPayload = chunk !== null &&
5244
+ typeof chunk === "object" &&
5245
+ "type" in chunk &&
5246
+ (chunk.type === "audio" ||
5247
+ chunk.type === "image");
5248
+ if (!isNoOutputSentinel && (hasTextContent || hasMediaPayload)) {
5249
+ realOutputChunks++;
5250
+ }
5224
5251
  if (chunk &&
5225
5252
  "content" in chunk &&
5226
5253
  typeof chunk.content === "string") {
@@ -5232,13 +5259,17 @@ Current user's request: ${currentInput}`;
5232
5259
  metadata: {
5233
5260
  chunkIndex: chunkCount,
5234
5261
  totalLength: accumulatedContent.length,
5262
+ ...(isNoOutputSentinel && { noOutput: true }),
5235
5263
  },
5236
5264
  timestamp: Date.now(),
5237
5265
  });
5238
5266
  }
5239
5267
  yield chunk;
5240
5268
  }
5241
- if (chunkCount === 0 &&
5269
+ // Reviewer follow-up: fire fallback when no *non-sentinel*
5270
+ // output was produced — sentinel-only and truly empty streams
5271
+ // both qualify, but media-only streams (audio/image) do not.
5272
+ if (realOutputChunks === 0 &&
5242
5273
  !metadata.fallbackAttempted &&
5243
5274
  !enhancedOptions.disableInternalFallback &&
5244
5275
  streamState.toolCalls.length === 0 &&
@@ -5735,9 +5766,32 @@ Current user's request: ${currentInput}`;
5735
5766
  streamState.finishReason =
5736
5767
  fallbackResult.finishReason ?? streamState.finishReason;
5737
5768
  }
5769
+ // Reviewer follow-up: count *real* output chunks for the fallback
5770
+ // success gate, mirroring the primary stream wrapper. A fallback
5771
+ // that yields only the NoOutputSentinel must not be treated as
5772
+ // success — that's the same masked-failure scenario as the primary.
5738
5773
  let fallbackChunkCount = 0;
5774
+ let fallbackRealOutputChunks = 0;
5739
5775
  for await (const fallbackChunk of fallbackResult.stream) {
5740
5776
  fallbackChunkCount++;
5777
+ const isFallbackNoOutputSentinel = fallbackChunk !== null &&
5778
+ typeof fallbackChunk === "object" &&
5779
+ "metadata" in fallbackChunk &&
5780
+ fallbackChunk.metadata
5781
+ ?.noOutput === true;
5782
+ const fallbackHasTextContent = fallbackChunk &&
5783
+ "content" in fallbackChunk &&
5784
+ typeof fallbackChunk.content === "string" &&
5785
+ fallbackChunk.content.length > 0;
5786
+ const fallbackHasMediaPayload = fallbackChunk !== null &&
5787
+ typeof fallbackChunk === "object" &&
5788
+ "type" in fallbackChunk &&
5789
+ (fallbackChunk.type === "audio" ||
5790
+ fallbackChunk.type === "image");
5791
+ if (!isFallbackNoOutputSentinel &&
5792
+ (fallbackHasTextContent || fallbackHasMediaPayload)) {
5793
+ fallbackRealOutputChunks++;
5794
+ }
5741
5795
  if (fallbackChunk &&
5742
5796
  "content" in fallbackChunk &&
5743
5797
  typeof fallbackChunk.content === "string") {
@@ -5746,10 +5800,10 @@ Current user's request: ${currentInput}`;
5746
5800
  }
5747
5801
  yield fallbackChunk;
5748
5802
  }
5749
- if (fallbackChunkCount === 0 &&
5803
+ if (fallbackRealOutputChunks === 0 &&
5750
5804
  fallbackToolCalls.length === 0 &&
5751
5805
  fallbackToolResults.length === 0) {
5752
- throw new Error(`Fallback provider ${fallbackRoute.provider} also returned 0 chunks`);
5806
+ throw new Error(`Fallback provider ${fallbackRoute.provider} also returned 0 real output chunks (chunkCount=${fallbackChunkCount}, sentinel-only or empty)`);
5753
5807
  }
5754
5808
  // Fallback succeeded - likely guardrails blocked primary
5755
5809
  metadata.fallbackProvider = fallbackRoute.provider;
@@ -790,6 +790,10 @@ export class AnthropicProvider extends BaseProvider {
790
790
  "gen_ai.request.model": getModelId(model, this.modelName || "unknown"),
791
791
  },
792
792
  });
793
+ // Reviewer follow-up: capture upstream provider errors via onError
794
+ // so the post-stream NoOutput sentinel carries the real cause in
795
+ // providerError / modelResponseRaw.
796
+ let capturedProviderError;
793
797
  let result;
794
798
  try {
795
799
  result = streamText({
@@ -802,6 +806,14 @@ export class AnthropicProvider extends BaseProvider {
802
806
  stopWhen: stepCountIs(options.maxSteps || DEFAULT_MAX_STEPS),
803
807
  toolChoice: resolveToolChoice(options, tools, shouldUseTools),
804
808
  abortSignal: composeAbortSignals(options.abortSignal, timeoutController?.controller.signal),
809
+ onError: (event) => {
810
+ capturedProviderError = event.error;
811
+ logger.error("Anthropic: Stream error", {
812
+ error: event.error instanceof Error
813
+ ? event.error.message
814
+ : String(event.error),
815
+ });
816
+ },
805
817
  experimental_repairToolCall: this.getToolCallRepairFn(options),
806
818
  experimental_telemetry: this.telemetryHandler.getTelemetryConfig(options),
807
819
  onStepFinish: ({ toolCalls, toolResults }) => {
@@ -868,7 +880,7 @@ export class AnthropicProvider extends BaseProvider {
868
880
  streamSpan.end();
869
881
  });
870
882
  timeoutController?.cleanup();
871
- const transformedStream = this.createTextStream(result);
883
+ const transformedStream = this.createTextStream(result, () => capturedProviderError);
872
884
  // ✅ Note: Vercel AI SDK's streamText() method limitations with tools
873
885
  // The streamText() function doesn't provide the same tool result access as generateText()
874
886
  // Full tool support is now available with real streaming
@@ -5,6 +5,7 @@ import { AnthropicModels } from "../constants/enums.js";
5
5
  import { BaseProvider } from "../core/baseProvider.js";
6
6
  import { AuthenticationError, NetworkError, ProviderError, RateLimitError, } from "../types/index.js";
7
7
  import { logger } from "../utils/logger.js";
8
+ import { buildNoOutputSentinel, detectPostStreamNoOutput, stampNoOutputSpan, } from "../utils/noOutputSentinel.js";
8
9
  import { calculateCost } from "../utils/pricing.js";
9
10
  import { createAnthropicBaseConfig, validateApiKey, } from "../utils/providerConfig.js";
10
11
  import { composeAbortSignals, createTimeoutController, TimeoutError, } from "../utils/timeout.js";
@@ -81,6 +82,10 @@ export class AnthropicProviderV2 extends BaseProvider {
81
82
  "gen_ai.request.model": getModelId(model, this.modelName || "unknown"),
82
83
  },
83
84
  });
85
+ // Reviewer follow-up: capture upstream provider errors via onError
86
+ // so the post-stream NoOutput detect can propagate the real cause
87
+ // into the sentinel's providerError / modelResponseRaw.
88
+ let capturedProviderError;
84
89
  let result;
85
90
  try {
86
91
  result = streamText({
@@ -95,6 +100,14 @@ export class AnthropicProviderV2 extends BaseProvider {
95
100
  abortSignal: composeAbortSignals(options.abortSignal, timeoutController?.controller.signal),
96
101
  experimental_telemetry: this.telemetryHandler.getTelemetryConfig(options),
97
102
  experimental_repairToolCall: this.getToolCallRepairFn(options),
103
+ onError: (event) => {
104
+ capturedProviderError = event.error;
105
+ logger.error("AnthropicBaseProvider: Stream error", {
106
+ error: event.error instanceof Error
107
+ ? event.error.message
108
+ : String(event.error),
109
+ });
110
+ },
98
111
  onStepFinish: ({ toolCalls, toolResults }) => {
99
112
  this.handleToolExecutionStorage(toolCalls, toolResults, options, new Date()).catch((error) => {
100
113
  logger.warn("[AnthropicBaseProvider] Failed to store tool executions", {
@@ -153,19 +166,34 @@ export class AnthropicProviderV2 extends BaseProvider {
153
166
  timeoutController?.cleanup();
154
167
  // Transform string stream to content object stream (match Google AI pattern)
155
168
  const transformedStream = async function* () {
169
+ let chunkCount = 0;
156
170
  try {
157
171
  for await (const chunk of result.textStream) {
172
+ chunkCount++;
158
173
  yield { content: chunk };
159
174
  }
160
175
  }
161
176
  catch (streamError) {
162
- // AI SDK v6 throws NoOutputGeneratedError when the stream produced no output.
163
177
  if (NoOutputGeneratedError.isInstance(streamError)) {
164
- logger.warn("AnthropicBaseProvider: Stream produced no output (NoOutputGeneratedError)");
178
+ logger.warn("AnthropicBaseProvider: Stream produced no output (NoOutputGeneratedError) — caught from textStream");
179
+ const sentinel = await buildNoOutputSentinel(streamError, result, capturedProviderError);
180
+ stampNoOutputSpan(sentinel);
181
+ yield sentinel;
165
182
  return;
166
183
  }
167
184
  throw streamError;
168
185
  }
186
+ // Curator P3-6 (round-2 fix): production trigger sets the error
187
+ // on result.finishReason rejection, not on textStream iteration.
188
+ // Surface that path here so the sentinel actually fires.
189
+ if (chunkCount === 0) {
190
+ const detected = await detectPostStreamNoOutput(result, capturedProviderError);
191
+ if (detected) {
192
+ logger.warn("AnthropicBaseProvider: Stream produced no output (NoOutputGeneratedError) — caught from finishReason rejection");
193
+ stampNoOutputSpan(detected.sentinel);
194
+ yield detected.sentinel;
195
+ }
196
+ }
169
197
  };
170
198
  return {
171
199
  stream: transformedStream(),
@@ -111,6 +111,9 @@ export class AzureOpenAIProvider extends BaseProvider {
111
111
  // Using protected helper from BaseProvider to eliminate code duplication
112
112
  const messages = await this.buildMessagesForStream(options);
113
113
  const model = await this.getAISDKModelWithMiddleware(options);
114
+ // Reviewer follow-up: capture upstream provider errors via onError
115
+ // so the post-stream NoOutput sentinel carries the real cause.
116
+ let capturedProviderError;
114
117
  const stream = await streamText({
115
118
  model,
116
119
  messages: messages,
@@ -126,6 +129,14 @@ export class AzureOpenAIProvider extends BaseProvider {
126
129
  abortSignal: composeAbortSignals(options.abortSignal, timeoutController?.controller.signal),
127
130
  experimental_telemetry: this.telemetryHandler.getTelemetryConfig(options),
128
131
  experimental_repairToolCall: this.getToolCallRepairFn(options),
132
+ onError: (event) => {
133
+ capturedProviderError = event.error;
134
+ logger.error("AzureOpenAI: Stream error", {
135
+ error: event.error instanceof Error
136
+ ? event.error.message
137
+ : String(event.error),
138
+ });
139
+ },
129
140
  onStepFinish: (event) => {
130
141
  emitToolEndFromStepFinish(this.neurolink?.getEventEmitter(), event.toolResults);
131
142
  this.handleToolExecutionStorage([...event.toolCalls], [...event.toolResults], options, new Date()).catch((error) => {
@@ -138,7 +149,7 @@ export class AzureOpenAIProvider extends BaseProvider {
138
149
  });
139
150
  timeoutController?.cleanup();
140
151
  // Transform string stream to content object stream using BaseProvider method
141
- const transformedStream = this.createTextStream(stream);
152
+ const transformedStream = this.createTextStream(stream, () => capturedProviderError);
142
153
  return {
143
154
  stream: transformedStream,
144
155
  provider: "azure",