@juspay/neurolink 9.59.1 → 9.59.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. package/CHANGELOG.md +12 -0
  2. package/dist/browser/neurolink.min.js +355 -355
  3. package/dist/core/baseProvider.d.ts +10 -3
  4. package/dist/core/baseProvider.js +8 -3
  5. package/dist/core/modules/StreamHandler.d.ts +22 -3
  6. package/dist/core/modules/StreamHandler.js +42 -20
  7. package/dist/lib/core/baseProvider.d.ts +10 -3
  8. package/dist/lib/core/baseProvider.js +8 -3
  9. package/dist/lib/core/modules/StreamHandler.d.ts +22 -3
  10. package/dist/lib/core/modules/StreamHandler.js +42 -20
  11. package/dist/lib/neurolink.js +361 -39
  12. package/dist/lib/providers/anthropic.js +13 -1
  13. package/dist/lib/providers/anthropicBaseProvider.js +30 -2
  14. package/dist/lib/providers/azureOpenai.js +12 -1
  15. package/dist/lib/providers/googleAiStudio.js +12 -1
  16. package/dist/lib/providers/googleVertex.js +11 -1
  17. package/dist/lib/providers/huggingFace.js +29 -2
  18. package/dist/lib/providers/litellm.js +44 -4
  19. package/dist/lib/providers/mistral.js +12 -1
  20. package/dist/lib/providers/openAI.js +34 -3
  21. package/dist/lib/providers/openRouter.js +33 -2
  22. package/dist/lib/providers/openaiCompatible.js +34 -2
  23. package/dist/lib/services/server/ai/observability/instrumentation.js +7 -2
  24. package/dist/lib/types/index.d.ts +1 -0
  25. package/dist/lib/types/index.js +2 -0
  26. package/dist/lib/types/noOutputSentinel.d.ts +26 -0
  27. package/dist/lib/types/noOutputSentinel.js +2 -0
  28. package/dist/lib/types/stream.d.ts +2 -1
  29. package/dist/lib/utils/noOutputSentinel.d.ts +80 -0
  30. package/dist/lib/utils/noOutputSentinel.js +193 -0
  31. package/dist/neurolink.js +361 -39
  32. package/dist/providers/anthropic.js +13 -1
  33. package/dist/providers/anthropicBaseProvider.js +30 -2
  34. package/dist/providers/azureOpenai.js +12 -1
  35. package/dist/providers/googleAiStudio.js +12 -1
  36. package/dist/providers/googleVertex.js +11 -1
  37. package/dist/providers/huggingFace.js +29 -2
  38. package/dist/providers/litellm.js +44 -4
  39. package/dist/providers/mistral.js +12 -1
  40. package/dist/providers/openAI.js +34 -3
  41. package/dist/providers/openRouter.js +33 -2
  42. package/dist/providers/openaiCompatible.js +34 -2
  43. package/dist/services/server/ai/observability/instrumentation.js +7 -2
  44. package/dist/types/index.d.ts +1 -0
  45. package/dist/types/index.js +2 -0
  46. package/dist/types/noOutputSentinel.d.ts +26 -0
  47. package/dist/types/noOutputSentinel.js +1 -0
  48. package/dist/types/stream.d.ts +2 -1
  49. package/dist/utils/noOutputSentinel.d.ts +80 -0
  50. package/dist/utils/noOutputSentinel.js +192 -0
  51. package/package.json +1 -1
@@ -111,6 +111,9 @@ export class AzureOpenAIProvider extends BaseProvider {
111
111
  // Using protected helper from BaseProvider to eliminate code duplication
112
112
  const messages = await this.buildMessagesForStream(options);
113
113
  const model = await this.getAISDKModelWithMiddleware(options);
114
+ // Reviewer follow-up: capture upstream provider errors via onError
115
+ // so the post-stream NoOutput sentinel carries the real cause.
116
+ let capturedProviderError;
114
117
  const stream = await streamText({
115
118
  model,
116
119
  messages: messages,
@@ -126,6 +129,14 @@ export class AzureOpenAIProvider extends BaseProvider {
126
129
  abortSignal: composeAbortSignals(options.abortSignal, timeoutController?.controller.signal),
127
130
  experimental_telemetry: this.telemetryHandler.getTelemetryConfig(options),
128
131
  experimental_repairToolCall: this.getToolCallRepairFn(options),
132
+ onError: (event) => {
133
+ capturedProviderError = event.error;
134
+ logger.error("AzureOpenAI: Stream error", {
135
+ error: event.error instanceof Error
136
+ ? event.error.message
137
+ : String(event.error),
138
+ });
139
+ },
129
140
  onStepFinish: (event) => {
130
141
  emitToolEndFromStepFinish(this.neurolink?.getEventEmitter(), event.toolResults);
131
142
  this.handleToolExecutionStorage([...event.toolCalls], [...event.toolResults], options, new Date()).catch((error) => {
@@ -138,7 +149,7 @@ export class AzureOpenAIProvider extends BaseProvider {
138
149
  });
139
150
  timeoutController?.cleanup();
140
151
  // Transform string stream to content object stream using BaseProvider method
141
- const transformedStream = this.createTextStream(stream);
152
+ const transformedStream = this.createTextStream(stream, () => capturedProviderError);
142
153
  return {
143
154
  stream: transformedStream,
144
155
  provider: "azure",
@@ -466,6 +466,9 @@ export class GoogleAIStudioProvider extends BaseProvider {
466
466
  const messages = await this.buildMessagesForStream(options);
467
467
  const collectedToolCalls = [];
468
468
  const collectedToolResults = [];
469
+ // Reviewer follow-up: capture upstream provider errors via onError
470
+ // so the post-stream NoOutput sentinel carries the real cause.
471
+ let capturedProviderError;
469
472
  const result = await streamText({
470
473
  model,
471
474
  messages: messages,
@@ -477,6 +480,14 @@ export class GoogleAIStudioProvider extends BaseProvider {
477
480
  abortSignal: composeAbortSignals(options.abortSignal, timeoutController?.controller.signal),
478
481
  experimental_telemetry: this.telemetryHandler.getTelemetryConfig(options),
479
482
  experimental_repairToolCall: this.getToolCallRepairFn(options),
483
+ onError: (event) => {
484
+ capturedProviderError = event.error;
485
+ logger.error("GoogleAiStudio: Stream error", {
486
+ error: event.error instanceof Error
487
+ ? event.error.message
488
+ : String(event.error),
489
+ });
490
+ },
480
491
  // Gemini 3: use thinkingLevel via providerOptions
481
492
  // Gemini 2.5: use thinkingBudget via providerOptions
482
493
  ...(options.thinkingConfig?.enabled && {
@@ -540,7 +551,7 @@ export class GoogleAIStudioProvider extends BaseProvider {
540
551
  })
541
552
  .finally(() => timeoutController?.cleanup());
542
553
  // Transform string stream to content object stream using BaseProvider method
543
- const transformedStream = this.createTextStream(result);
554
+ const transformedStream = this.createTextStream(result, () => capturedProviderError);
544
555
  // Create analytics promise that resolves after stream completion
545
556
  const analyticsPromise = streamAnalyticsCollector.createAnalytics(this.providerName, this.modelName, toAnalyticsStreamResult(result), Date.now() - startTime, {
546
557
  requestId: `google-ai-stream-${Date.now()}`,
@@ -877,10 +877,16 @@ export class GoogleVertexProvider extends BaseProvider {
877
877
  }
878
878
  async executeAISDKStream(options, analysisSchema, modelName) {
879
879
  const functionTag = "GoogleVertexProvider.executeStream";
880
+ // Reviewer follow-up: include `capturedProviderError` in the
881
+ // tracking object so the streamText `onError` callback (in
882
+ // buildAISDKStreamOptions) can write to it; the post-stream
883
+ // NoOutput sentinel reads it via the `getUnderlyingError` getter
884
+ // passed to createTextStream.
880
885
  const tracking = {
881
886
  chunkCount: 0,
882
887
  collectedToolCalls: [],
883
888
  collectedToolResults: [],
889
+ capturedProviderError: undefined,
884
890
  };
885
891
  const timeoutController = createTimeoutController(this.getTimeout(options), this.providerName, "stream");
886
892
  try {
@@ -909,7 +915,7 @@ export class GoogleVertexProvider extends BaseProvider {
909
915
  timeoutController,
910
916
  });
911
917
  return {
912
- stream: this.createTextStream(result),
918
+ stream: this.createTextStream(result, () => tracking.capturedProviderError),
913
919
  provider: this.providerName,
914
920
  model: this.modelName,
915
921
  ...(shouldUseTools && {
@@ -1011,6 +1017,10 @@ export class GoogleVertexProvider extends BaseProvider {
1011
1017
  const errorMessage = event.error instanceof Error
1012
1018
  ? event.error.message
1013
1019
  : String(event.error);
1020
+ // Reviewer follow-up: capture the upstream error so the
1021
+ // post-stream NoOutput sentinel can surface it via
1022
+ // providerError / modelResponseRaw.
1023
+ tracking.capturedProviderError = event.error;
1014
1024
  logger.error(`${functionTag}: Stream error`, {
1015
1025
  provider: this.providerName,
1016
1026
  modelName: this.modelName,
@@ -5,6 +5,7 @@ import { DEFAULT_MAX_STEPS } from "../core/constants.js";
5
5
  import { createProxyFetch } from "../proxy/proxyFetch.js";
6
6
  import { emitToolEndFromStepFinish } from "../utils/toolEndEmitter.js";
7
7
  import { logger } from "../utils/logger.js";
8
+ import { buildNoOutputSentinel, detectPostStreamNoOutput, stampNoOutputSpan, } from "../utils/noOutputSentinel.js";
8
9
  import { createHuggingFaceConfig, getProviderModel, validateApiKey, } from "../utils/providerConfig.js";
9
10
  import { composeAbortSignals, createTimeoutController, TimeoutError, } from "../utils/timeout.js";
10
11
  import { resolveToolChoice } from "../utils/toolChoice.js";
@@ -128,6 +129,10 @@ export class HuggingFaceProvider extends BaseProvider {
128
129
  ? { ...options, systemPrompt: streamOptions.system }
129
130
  : options;
130
131
  const messages = await this.buildMessagesForStream(messagesOptions);
132
+ // Reviewer follow-up: capture upstream provider errors via onError
133
+ // so the post-stream NoOutput detect can propagate the real cause
134
+ // into the sentinel's providerError / modelResponseRaw.
135
+ let capturedProviderError;
131
136
  const result = await streamText({
132
137
  model: this.model,
133
138
  messages: messages,
@@ -141,6 +146,14 @@ export class HuggingFaceProvider extends BaseProvider {
141
146
  abortSignal: composeAbortSignals(options.abortSignal, timeoutController?.controller.signal),
142
147
  experimental_telemetry: this.telemetryHandler.getTelemetryConfig(options),
143
148
  experimental_repairToolCall: this.getToolCallRepairFn(options),
149
+ onError: (event) => {
150
+ capturedProviderError = event.error;
151
+ logger.error("HuggingFace: Stream error", {
152
+ error: event.error instanceof Error
153
+ ? event.error.message
154
+ : String(event.error),
155
+ });
156
+ },
144
157
  onStepFinish: ({ toolCalls, toolResults }) => {
145
158
  emitToolEndFromStepFinish(this.neurolink?.getEventEmitter(), toolResults);
146
159
  this.handleToolExecutionStorage(toolCalls, toolResults, options, new Date()).catch((error) => {
@@ -154,19 +167,33 @@ export class HuggingFaceProvider extends BaseProvider {
154
167
  timeoutController?.cleanup();
155
168
  // Transform stream to match StreamResult interface with enhanced tool call parsing
156
169
  const transformedStream = async function* () {
170
+ let chunkCount = 0;
157
171
  try {
158
172
  for await (const chunk of result.textStream) {
173
+ chunkCount++;
159
174
  yield { content: chunk };
160
175
  }
161
176
  }
162
177
  catch (streamError) {
163
- // AI SDK v6 throws NoOutputGeneratedError when the stream produced no output.
164
178
  if (NoOutputGeneratedError.isInstance(streamError)) {
165
- logger.warn("HuggingFace: Stream produced no output (NoOutputGeneratedError)");
179
+ logger.warn("HuggingFace: Stream produced no output (NoOutputGeneratedError) — caught from textStream");
180
+ const sentinel = await buildNoOutputSentinel(streamError, result, capturedProviderError);
181
+ stampNoOutputSpan(sentinel);
182
+ yield sentinel;
166
183
  return;
167
184
  }
168
185
  throw streamError;
169
186
  }
187
+ // Curator P3-6 (round-2 fix): production trigger comes through
188
+ // the result.finishReason rejection, not textStream throws.
189
+ if (chunkCount === 0) {
190
+ const detected = await detectPostStreamNoOutput(result, capturedProviderError);
191
+ if (detected) {
192
+ logger.warn("HuggingFace: Stream produced no output (NoOutputGeneratedError) — caught from finishReason rejection");
193
+ stampNoOutputSpan(detected.sentinel);
194
+ yield detected.sentinel;
195
+ }
196
+ }
170
197
  };
171
198
  return {
172
199
  stream: transformedStream(),
@@ -9,6 +9,7 @@ import { AuthenticationError, InvalidModelError, ModelAccessDeniedError, Network
9
9
  import { isAbortError } from "../utils/errorHandling.js";
10
10
  import { emitToolEndFromStepFinish } from "../utils/toolEndEmitter.js";
11
11
  import { logger } from "../utils/logger.js";
12
+ import { buildNoOutputSentinel, detectPostStreamNoOutput, stampNoOutputSpan, } from "../utils/noOutputSentinel.js";
12
13
  import { calculateCost } from "../utils/pricing.js";
13
14
  import { getProviderModel } from "../utils/providerConfig.js";
14
15
  import { composeAbortSignals, createTimeoutController, TimeoutError, } from "../utils/timeout.js";
@@ -140,6 +141,11 @@ export class LiteLLMProvider extends BaseProvider {
140
141
  this.validateStreamOptions(options);
141
142
  const startTime = Date.now();
142
143
  let chunkCount = 0; // Track chunk count for debugging
144
+ // Reviewer follow-up: capture upstream provider errors via onError so
145
+ // the post-stream NoOutput detect can propagate the *real* cause
146
+ // (content_filter, provider crash, etc.) into the sentinel's
147
+ // providerError / modelResponseRaw instead of "No output generated".
148
+ let capturedProviderError;
143
149
  const timeout = this.getTimeout(options);
144
150
  const timeoutController = createTimeoutController(timeout, this.providerName, "stream");
145
151
  try {
@@ -185,6 +191,10 @@ export class LiteLLMProvider extends BaseProvider {
185
191
  onError: (event) => {
186
192
  const error = event.error;
187
193
  const errorMessage = error instanceof Error ? error.message : String(error);
194
+ // Reviewer follow-up: propagate the captured error to the
195
+ // post-stream NoOutput sentinel so telemetry sees the real
196
+ // provider cause instead of "No output generated".
197
+ capturedProviderError = error;
188
198
  logger.error(`LiteLLM: Stream error`, {
189
199
  provider: this.providerName,
190
200
  modelName: this.modelName,
@@ -312,7 +322,7 @@ export class LiteLLMProvider extends BaseProvider {
312
322
  streamSpan.end();
313
323
  });
314
324
  timeoutController?.cleanup();
315
- const transformedStream = this.createLiteLLMTransformedStream(result);
325
+ const transformedStream = this.createLiteLLMTransformedStream(result, () => capturedProviderError);
316
326
  // Create analytics promise that resolves after stream completion
317
327
  const analyticsPromise = streamAnalyticsCollector.createAnalytics(this.providerName, this.modelName, result, Date.now() - startTime, {
318
328
  requestId: options.requestId ??
@@ -339,7 +349,13 @@ export class LiteLLMProvider extends BaseProvider {
339
349
  throw this.handleProviderError(error);
340
350
  }
341
351
  }
342
- async *createLiteLLMTransformedStream(result) {
352
+ async *createLiteLLMTransformedStream(result, getCapturedProviderError) {
353
+ // Reviewer follow-up: gate the post-stream NoOutput detect on
354
+ // *content yielded*, not raw chunk count. AI SDK fullStream emits
355
+ // control events ({ type: "start" }, "step-start", etc.) before any
356
+ // text-delta — those incremented chunkCount and made the post-stream
357
+ // detect dead even when zero text was produced.
358
+ let contentYielded = 0;
343
359
  try {
344
360
  const streamToUse = result.fullStream || result.textStream;
345
361
  for await (const chunk of streamToUse) {
@@ -355,6 +371,7 @@ export class LiteLLMProvider extends BaseProvider {
355
371
  if ("textDelta" in chunk) {
356
372
  const textDelta = chunk.textDelta;
357
373
  if (textDelta) {
374
+ contentYielded++;
358
375
  yield { content: textDelta };
359
376
  }
360
377
  }
@@ -368,17 +385,40 @@ export class LiteLLMProvider extends BaseProvider {
368
385
  }
369
386
  }
370
387
  else if (typeof chunk === "string") {
388
+ contentYielded++;
371
389
  yield { content: chunk };
372
390
  }
373
391
  }
374
392
  }
375
393
  catch (streamError) {
376
394
  if (NoOutputGeneratedError.isInstance(streamError)) {
377
- logger.warn("LiteLLM: Stream produced no output (NoOutputGeneratedError) — propagating to fallback chain");
378
- throw streamError;
395
+ logger.warn("LiteLLM: Stream produced no output (NoOutputGeneratedError) — caught from textStream");
396
+ // Yield the enriched sentinel so downstream telemetry has
397
+ // finishReason / usage / providerError. Match the other
398
+ // providers' pattern: yield + return (no throw). NeuroLink's
399
+ // iteration fallback at neurolink.ts only fires for
400
+ // looksLikeModelAccessDenied errors, so a NoOutput throw here
401
+ // would NOT trigger any fallback — and it would mask the
402
+ // already-yielded sentinel from consumers expecting a clean
403
+ // stream. The sentinel itself signals the no-output condition.
404
+ const sentinel = await buildNoOutputSentinel(streamError, result, getCapturedProviderError?.());
405
+ stampNoOutputSpan(sentinel);
406
+ yield sentinel;
407
+ return;
379
408
  }
380
409
  throw streamError;
381
410
  }
411
+ // Curator P3-6 (round-2 fix): production trigger sets the error on
412
+ // result.finishReason rejection (NOT thrown from textStream).
413
+ // Surface that path here, matching the catch above (yield + return).
414
+ if (contentYielded === 0) {
415
+ const detected = await detectPostStreamNoOutput(result, getCapturedProviderError?.());
416
+ if (detected) {
417
+ logger.warn("LiteLLM: Stream produced no output (NoOutputGeneratedError) — caught from finishReason rejection");
418
+ stampNoOutputSpan(detected.sentinel);
419
+ yield detected.sentinel;
420
+ }
421
+ }
382
422
  }
383
423
  /**
384
424
  * Generate an embedding for a single text input
@@ -58,6 +58,9 @@ export class MistralProvider extends BaseProvider {
58
58
  // Using protected helper from BaseProvider to eliminate code duplication
59
59
  const messages = await this.buildMessagesForStream(options);
60
60
  const model = await this.getAISDKModelWithMiddleware(options); // This is where network connection happens!
61
+ // Reviewer follow-up: capture upstream provider errors via onError
62
+ // so the post-stream NoOutput sentinel carries the real cause.
63
+ let capturedProviderError;
61
64
  const result = await streamText({
62
65
  model,
63
66
  messages: messages,
@@ -69,6 +72,14 @@ export class MistralProvider extends BaseProvider {
69
72
  abortSignal: composeAbortSignals(options.abortSignal, timeoutController?.controller.signal),
70
73
  experimental_telemetry: this.telemetryHandler.getTelemetryConfig(options),
71
74
  experimental_repairToolCall: this.getToolCallRepairFn(options),
75
+ onError: (event) => {
76
+ capturedProviderError = event.error;
77
+ logger.error("Mistral: Stream error", {
78
+ error: event.error instanceof Error
79
+ ? event.error.message
80
+ : String(event.error),
81
+ });
82
+ },
72
83
  onStepFinish: ({ toolCalls, toolResults }) => {
73
84
  emitToolEndFromStepFinish(this.neurolink?.getEventEmitter(), toolResults);
74
85
  this.handleToolExecutionStorage(toolCalls, toolResults, options, new Date()).catch((error) => {
@@ -81,7 +92,7 @@ export class MistralProvider extends BaseProvider {
81
92
  });
82
93
  timeoutController?.cleanup();
83
94
  // Transform string stream to content object stream using BaseProvider method
84
- const transformedStream = this.createTextStream(result);
95
+ const transformedStream = this.createTextStream(result, () => capturedProviderError);
85
96
  // Create analytics promise that resolves after stream completion
86
97
  const analyticsPromise = streamAnalyticsCollector.createAnalytics(this.providerName, this.modelName, toAnalyticsStreamResult(result), Date.now() - startTime, {
87
98
  requestId: `mistral-stream-${Date.now()}`,
@@ -8,6 +8,7 @@ import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
8
8
  import { createProxyFetch } from "../proxy/proxyFetch.js";
9
9
  import { AuthenticationError, InvalidModelError, NetworkError, ProviderError, RateLimitError, } from "../types/index.js";
10
10
  import { logger } from "../utils/logger.js";
11
+ import { buildNoOutputSentinel, detectPostStreamNoOutput, stampNoOutputSpan, } from "../utils/noOutputSentinel.js";
11
12
  import { calculateCost } from "../utils/pricing.js";
12
13
  import { createOpenAIConfig, getProviderModel, validateApiKey, } from "../utils/providerConfig.js";
13
14
  import { isZodSchema } from "../utils/schemaConversion.js";
@@ -347,6 +348,10 @@ export class OpenAIProvider extends BaseProvider {
347
348
  "gen_ai.request.model": getModelId(model) || this.modelName || "unknown",
348
349
  },
349
350
  });
351
+ // Reviewer follow-up: capture upstream provider errors via onError
352
+ // so the post-stream NoOutput detect can propagate the *real* cause
353
+ // into the sentinel's providerError / modelResponseRaw.
354
+ let capturedProviderError;
350
355
  let result;
351
356
  try {
352
357
  result = streamText({
@@ -361,6 +366,14 @@ export class OpenAIProvider extends BaseProvider {
361
366
  abortSignal: composeAbortSignals(options.abortSignal, timeoutController?.controller.signal),
362
367
  experimental_repairToolCall: this.getToolCallRepairFn(options),
363
368
  experimental_telemetry: this.telemetryHandler.getTelemetryConfig(options),
369
+ onError: (event) => {
370
+ capturedProviderError = event.error;
371
+ logger.error("OpenAI: Stream error", {
372
+ error: event.error instanceof Error
373
+ ? event.error.message
374
+ : String(event.error),
375
+ });
376
+ },
364
377
  onStepFinish: ({ toolCalls, toolResults }) => {
365
378
  logger.info("Tool execution completed", {
366
379
  toolResults,
@@ -424,7 +437,7 @@ export class OpenAIProvider extends BaseProvider {
424
437
  hasToolResults: !!result.toolResults,
425
438
  resultType: typeof result,
426
439
  });
427
- const transformedStream = this.createOpenAITransformedStream(result, shouldUseTools, tools);
440
+ const transformedStream = this.createOpenAITransformedStream(result, shouldUseTools, tools, () => capturedProviderError);
428
441
  // Create analytics promise that resolves after stream completion
429
442
  const analyticsPromise = streamAnalyticsCollector.createAnalytics(this.providerName, this.modelName, result, Date.now() - startTime, {
430
443
  requestId: `openai-stream-${Date.now()}`,
@@ -446,7 +459,7 @@ export class OpenAIProvider extends BaseProvider {
446
459
  throw this.handleProviderError(error);
447
460
  }
448
461
  }
449
- async *createOpenAITransformedStream(result, shouldUseTools, tools) {
462
+ async *createOpenAITransformedStream(result, shouldUseTools, tools, getCapturedProviderError) {
450
463
  try {
451
464
  logger.debug(`OpenAI: Starting stream transformation`, {
452
465
  hasTextStream: !!result.textStream,
@@ -503,11 +516,29 @@ export class OpenAIProvider extends BaseProvider {
503
516
  });
504
517
  if (contentYielded === 0) {
505
518
  logger.warn(`OpenAI: No content was yielded from stream despite processing ${chunkCount} chunks`);
519
+ // Curator P3-6 (round-2 fix): when no content was yielded, the
520
+ // production trigger sets NoOutputGeneratedError on
521
+ // result.finishReason rejection (NOT on the textStream itself).
522
+ // Surface that rejection here so the enriched sentinel actually
523
+ // fires for real-world no-output streams.
524
+ const detected = await detectPostStreamNoOutput(result, getCapturedProviderError?.());
525
+ if (detected) {
526
+ logger.warn("OpenAI: Stream produced no output (NoOutputGeneratedError) — caught from finishReason rejection");
527
+ stampNoOutputSpan(detected.sentinel);
528
+ yield detected.sentinel;
529
+ }
506
530
  }
507
531
  }
508
532
  catch (streamError) {
509
533
  if (NoOutputGeneratedError.isInstance(streamError)) {
510
- logger.warn("OpenAI: Stream produced no output (NoOutputGeneratedError)");
534
+ logger.warn("OpenAI: Stream produced no output (NoOutputGeneratedError) — caught from textStream");
535
+ // Defensive: AI SDK *can* throw this from textStream in some
536
+ // failure modes (catastrophic transform errors). Keep this path
537
+ // for completeness; the production trigger goes through the
538
+ // post-loop detect above.
539
+ const sentinel = await buildNoOutputSentinel(streamError, result, getCapturedProviderError?.());
540
+ stampNoOutputSpan(sentinel);
541
+ yield sentinel;
511
542
  return;
512
543
  }
513
544
  logger.error(`OpenAI: Stream transformation error:`, streamError);
@@ -8,6 +8,7 @@ import { createProxyFetch } from "../proxy/proxyFetch.js";
8
8
  import { isAbortError } from "../utils/errorHandling.js";
9
9
  import { emitToolEndFromStepFinish } from "../utils/toolEndEmitter.js";
10
10
  import { logger } from "../utils/logger.js";
11
+ import { buildNoOutputSentinel, detectPostStreamNoOutput, stampNoOutputSpan, } from "../utils/noOutputSentinel.js";
11
12
  import { getProviderModel } from "../utils/providerConfig.js";
12
13
  import { composeAbortSignals, createTimeoutController, TimeoutError, } from "../utils/timeout.js";
13
14
  import { resolveToolChoice } from "../utils/toolChoice.js";
@@ -218,6 +219,12 @@ export class OpenRouterProvider extends BaseProvider {
218
219
  this.validateStreamOptions(options);
219
220
  const startTime = Date.now();
220
221
  let chunkCount = 0; // Track chunk count for debugging
222
+ // Reviewer follow-up: capture upstream provider errors via onError so
223
+ // the post-stream NoOutput detect can propagate the *real* cause
224
+ // (e.g. content_filter, provider crash) into the sentinel's
225
+ // providerError / modelResponseRaw instead of the AI SDK's generic
226
+ // "No output generated" message.
227
+ let capturedProviderError;
221
228
  const timeout = this.getTimeout(options);
222
229
  const timeoutController = createTimeoutController(timeout, this.providerName, "stream");
223
230
  try {
@@ -257,6 +264,10 @@ export class OpenRouterProvider extends BaseProvider {
257
264
  onError: (event) => {
258
265
  const error = event.error;
259
266
  const errorMessage = error instanceof Error ? error.message : String(error);
267
+ // Reviewer follow-up: propagate the captured error to the
268
+ // post-stream NoOutput sentinel so telemetry sees the real
269
+ // provider cause instead of "No output generated".
270
+ capturedProviderError = error;
260
271
  logger.error(`OpenRouter: Stream error`, {
261
272
  provider: this.providerName,
262
273
  modelName: this.modelName,
@@ -315,6 +326,12 @@ export class OpenRouterProvider extends BaseProvider {
315
326
  .finally(() => timeoutController?.cleanup());
316
327
  // Transform stream to content object stream using fullStream (handles both text and tool calls)
317
328
  const transformedStream = (async function* () {
329
+ // Reviewer follow-up: gate the post-stream NoOutput detect on
330
+ // *content yielded*, not raw chunk count. AI SDK fullStream emits
331
+ // control events ({ type: "start" }, "step-start", etc.) before
332
+ // any text-delta — those incremented `chunkCount` and made the
333
+ // post-stream check dead even when zero text was produced.
334
+ let contentYielded = 0;
318
335
  try {
319
336
  // Try fullStream first (handles both text and tool calls), fallback to textStream
320
337
  const streamToUse = result.fullStream || result.textStream;
@@ -335,6 +352,7 @@ export class OpenRouterProvider extends BaseProvider {
335
352
  // Text delta from fullStream
336
353
  const textDelta = chunk.textDelta;
337
354
  if (textDelta) {
355
+ contentYielded++;
338
356
  yield { content: textDelta };
339
357
  }
340
358
  }
@@ -352,18 +370,31 @@ export class OpenRouterProvider extends BaseProvider {
352
370
  }
353
371
  else if (typeof chunk === "string") {
354
372
  // Direct string chunk from textStream fallback
373
+ contentYielded++;
355
374
  yield { content: chunk };
356
375
  }
357
376
  }
358
377
  }
359
378
  catch (streamError) {
360
- // AI SDK v6 throws NoOutputGeneratedError when the stream produced no output.
361
379
  if (NoOutputGeneratedError.isInstance(streamError)) {
362
- logger.warn("OpenRouter: Stream produced no output (NoOutputGeneratedError)");
380
+ logger.warn("OpenRouter: Stream produced no output (NoOutputGeneratedError) — caught from textStream");
381
+ const sentinel = await buildNoOutputSentinel(streamError, result, capturedProviderError);
382
+ stampNoOutputSpan(sentinel);
383
+ yield sentinel;
363
384
  return;
364
385
  }
365
386
  throw streamError;
366
387
  }
388
+ // Curator P3-6 (round-2 fix): production trigger comes through
389
+ // result.finishReason rejection, not textStream throws.
390
+ if (contentYielded === 0) {
391
+ const detected = await detectPostStreamNoOutput(result, capturedProviderError);
392
+ if (detected) {
393
+ logger.warn("OpenRouter: Stream produced no output (NoOutputGeneratedError) — caught from finishReason rejection");
394
+ stampNoOutputSpan(detected.sentinel);
395
+ yield detected.sentinel;
396
+ }
397
+ }
367
398
  })();
368
399
  // Create analytics promise that resolves after stream completion
369
400
  const analyticsPromise = streamAnalyticsCollector.createAnalytics(this.providerName, this.modelName, result, Date.now() - startTime, {
@@ -6,6 +6,7 @@ import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
6
6
  import { createProxyFetch } from "../proxy/proxyFetch.js";
7
7
  import { emitToolEndFromStepFinish } from "../utils/toolEndEmitter.js";
8
8
  import { logger } from "../utils/logger.js";
9
+ import { buildNoOutputSentinel, detectPostStreamNoOutput, stampNoOutputSpan, } from "../utils/noOutputSentinel.js";
9
10
  import { composeAbortSignals, createTimeoutController, TimeoutError, } from "../utils/timeout.js";
10
11
  import { resolveToolChoice } from "../utils/toolChoice.js";
11
12
  import { toAnalyticsStreamResult } from "./providerTypeUtils.js";
@@ -182,6 +183,10 @@ export class OpenAICompatibleProvider extends BaseProvider {
182
183
  // Using protected helper from BaseProvider to eliminate code duplication
183
184
  const messages = await this.buildMessagesForStream(options);
184
185
  const model = await this.getAISDKModelWithMiddleware(options); // This is where network connection happens!
186
+ // Reviewer follow-up: capture upstream provider errors via onError
187
+ // so the post-stream NoOutput detect can propagate the real cause
188
+ // into the sentinel's providerError / modelResponseRaw.
189
+ let capturedProviderError;
185
190
  const result = streamText({
186
191
  model,
187
192
  messages: messages,
@@ -197,6 +202,14 @@ export class OpenAICompatibleProvider extends BaseProvider {
197
202
  abortSignal: composeAbortSignals(options.abortSignal, timeoutController?.controller.signal),
198
203
  experimental_telemetry: this.telemetryHandler.getTelemetryConfig(options),
199
204
  experimental_repairToolCall: this.getToolCallRepairFn(options),
205
+ onError: (event) => {
206
+ capturedProviderError = event.error;
207
+ logger.error("OpenAI-compatible: Stream error", {
208
+ error: event.error instanceof Error
209
+ ? event.error.message
210
+ : String(event.error),
211
+ });
212
+ },
200
213
  onStepFinish: (event) => {
201
214
  emitToolEndFromStepFinish(this.neurolink?.getEventEmitter(), event.toolResults);
202
215
  this.handleToolExecutionStorage([...event.toolCalls], [...event.toolResults], options, new Date()).catch((error) => {
@@ -210,19 +223,38 @@ export class OpenAICompatibleProvider extends BaseProvider {
210
223
  timeoutController?.cleanup();
211
224
  // Transform stream to match StreamResult interface
212
225
  const transformedStream = async function* () {
226
+ let chunkCount = 0;
213
227
  try {
214
228
  for await (const chunk of result.textStream) {
229
+ chunkCount++;
215
230
  yield { content: chunk };
216
231
  }
217
232
  }
218
233
  catch (streamError) {
219
- // AI SDK v6 throws NoOutputGeneratedError when the stream produced no output.
234
+ // AI SDK v6 *can* throw NoOutputGeneratedError from textStream
235
+ // iteration in some failure modes (e.g. catastrophic transform
236
+ // errors); keep this catch as a defensive path.
220
237
  if (NoOutputGeneratedError.isInstance(streamError)) {
221
- logger.warn("OpenAI-compatible: Stream produced no output (NoOutputGeneratedError)");
238
+ logger.warn("OpenAI-compatible: Stream produced no output (NoOutputGeneratedError) — caught from textStream");
239
+ const sentinel = await buildNoOutputSentinel(streamError, result, capturedProviderError);
240
+ stampNoOutputSpan(sentinel);
241
+ yield sentinel;
222
242
  return;
223
243
  }
224
244
  throw streamError;
225
245
  }
246
+ // Curator P3-6 (round-2 fix): the production trigger doesn't
247
+ // throw from textStream — AI SDK rejects `result.finishReason`
248
+ // instead. Surface that rejection here so the enriched sentinel
249
+ // actually fires for real-world no-output streams.
250
+ if (chunkCount === 0) {
251
+ const detected = await detectPostStreamNoOutput(result, capturedProviderError);
252
+ if (detected) {
253
+ logger.warn("OpenAI-compatible: Stream produced no output (NoOutputGeneratedError) — caught from finishReason rejection");
254
+ stampNoOutputSpan(detected.sentinel);
255
+ yield detected.sentinel;
256
+ }
257
+ }
226
258
  };
227
259
  // Create analytics promise that resolves after stream completion
228
260
  const analyticsPromise = streamAnalyticsCollector.createAnalytics(this.providerName, this.modelName, toAnalyticsStreamResult(result), Date.now() - startTime, {
@@ -181,8 +181,13 @@ function applyNonErrorLangfuseLevel(attrs) {
181
181
  }
182
182
  if (attrs["neurolink.no_output"] === true) {
183
183
  attrs["langfuse.level"] = "WARNING";
184
- attrs["langfuse.status_message"] =
185
- "Stream produced no output (NoOutputGeneratedError)";
184
+ // Preserve any enriched status message StreamHandler already set
185
+ // (carries finishReason / token counts via buildNoOutputStatusMessage).
186
+ // Only fall back to the generic message when none was set upstream.
187
+ if (typeof attrs["langfuse.status_message"] !== "string") {
188
+ attrs["langfuse.status_message"] =
189
+ "Stream produced no output (NoOutputGeneratedError)";
190
+ }
186
191
  return;
187
192
  }
188
193
  if (reasonStr === "aborted") {
@@ -58,3 +58,4 @@ export * from "./imageGen.js";
58
58
  export * from "./elicitation.js";
59
59
  export * from "./dynamic.js";
60
60
  export * from "./streamDedup.js";
61
+ export * from "./noOutputSentinel.js";
@@ -62,3 +62,5 @@ export * from "./elicitation.js";
62
62
  export * from "./dynamic.js";
63
63
  // Curator P2-4 dedup: per-stream AsyncLocalStorage context
64
64
  export * from "./streamDedup.js";
65
+ // Curator P3-6: NoOutputGeneratedError sentinel chunk shape
66
+ export * from "./noOutputSentinel.js";
@@ -0,0 +1,26 @@
1
+ /**
2
+ * Curator P3-6: shape of the sentinel chunk yielded by every provider's
3
+ * stream-transformation generator when AI SDK throws
4
+ * `NoOutputGeneratedError`. Built by `buildNoOutputSentinel` in
5
+ * `src/lib/utils/noOutputSentinel.ts`.
6
+ */
7
+ export type StreamNoOutputSentinel = {
8
+ content: "";
9
+ metadata: {
10
+ noOutput: true;
11
+ errorType: "NoOutputGeneratedError";
12
+ finishReason: unknown;
13
+ usage: unknown;
14
+ providerError: string;
15
+ modelResponseRaw: string | undefined;
16
+ };
17
+ };
18
+ /**
19
+ * Subset of AI SDK's `StreamTextResult` that the sentinel builder reads.
20
+ * Both fields are Promises in production but typed loosely so callers
21
+ * can pass either the Promise or a resolved value.
22
+ */
23
+ export type StreamNoOutputSentinelResultLike = {
24
+ finishReason?: Promise<unknown> | unknown;
25
+ totalUsage?: Promise<unknown> | unknown;
26
+ };
@@ -0,0 +1 @@
1
+ export {};
@@ -8,6 +8,7 @@ import type { TokenUsage } from "./analytics.js";
8
8
  import type { JsonValue, UnknownRecord } from "./common.js";
9
9
  import type { Content, ImageWithAltText } from "./content.js";
10
10
  import type { ChatMessage } from "./conversation.js";
11
+ import type { StreamNoOutputSentinel } from "./noOutputSentinel.js";
11
12
  import type { AdditionalMemoryUser } from "./generate.js";
12
13
  import type { AIModelProviderConfig, NeurolinkCredentials } from "./providers.js";
13
14
  import type { TTSChunk, TTSOptions } from "./tts.js";
@@ -487,7 +488,7 @@ export type StreamOptions = {
487
488
  export type StreamResult = {
488
489
  stream: AsyncIterable<{
489
490
  content: string;
490
- } | {
491
+ } | StreamNoOutputSentinel | {
491
492
  type: "audio";
492
493
  audio: AudioChunk;
493
494
  } | {