@juspay/neurolink 9.59.2 → 9.59.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +12 -0
- package/dist/browser/neurolink.min.js +355 -355
- package/dist/cli/commands/proxy.js +10 -5
- package/dist/core/baseProvider.d.ts +10 -3
- package/dist/core/baseProvider.js +8 -3
- package/dist/core/modules/StreamHandler.d.ts +22 -3
- package/dist/core/modules/StreamHandler.js +42 -20
- package/dist/lib/core/baseProvider.d.ts +10 -3
- package/dist/lib/core/baseProvider.js +8 -3
- package/dist/lib/core/modules/StreamHandler.d.ts +22 -3
- package/dist/lib/core/modules/StreamHandler.js +42 -20
- package/dist/lib/neurolink.js +57 -3
- package/dist/lib/providers/anthropic.js +13 -1
- package/dist/lib/providers/anthropicBaseProvider.js +30 -2
- package/dist/lib/providers/azureOpenai.js +12 -1
- package/dist/lib/providers/googleAiStudio.js +12 -1
- package/dist/lib/providers/googleVertex.js +11 -1
- package/dist/lib/providers/huggingFace.js +29 -2
- package/dist/lib/providers/litellm.js +44 -4
- package/dist/lib/providers/mistral.js +12 -1
- package/dist/lib/providers/openAI.js +34 -3
- package/dist/lib/providers/openRouter.js +33 -2
- package/dist/lib/providers/openaiCompatible.js +34 -2
- package/dist/lib/services/server/ai/observability/instrumentation.js +7 -2
- package/dist/lib/types/index.d.ts +1 -0
- package/dist/lib/types/index.js +2 -0
- package/dist/lib/types/noOutputSentinel.d.ts +26 -0
- package/dist/lib/types/noOutputSentinel.js +2 -0
- package/dist/lib/types/stream.d.ts +2 -1
- package/dist/lib/utils/noOutputSentinel.d.ts +80 -0
- package/dist/lib/utils/noOutputSentinel.js +193 -0
- package/dist/neurolink.js +57 -3
- package/dist/providers/anthropic.js +13 -1
- package/dist/providers/anthropicBaseProvider.js +30 -2
- package/dist/providers/azureOpenai.js +12 -1
- package/dist/providers/googleAiStudio.js +12 -1
- package/dist/providers/googleVertex.js +11 -1
- package/dist/providers/huggingFace.js +29 -2
- package/dist/providers/litellm.js +44 -4
- package/dist/providers/mistral.js +12 -1
- package/dist/providers/openAI.js +34 -3
- package/dist/providers/openRouter.js +33 -2
- package/dist/providers/openaiCompatible.js +34 -2
- package/dist/services/server/ai/observability/instrumentation.js +7 -2
- package/dist/types/index.d.ts +1 -0
- package/dist/types/index.js +2 -0
- package/dist/types/noOutputSentinel.d.ts +26 -0
- package/dist/types/noOutputSentinel.js +1 -0
- package/dist/types/stream.d.ts +2 -1
- package/dist/utils/noOutputSentinel.d.ts +80 -0
- package/dist/utils/noOutputSentinel.js +192 -0
- package/package.json +1 -1
|
@@ -9,6 +9,7 @@ import { AuthenticationError, InvalidModelError, ModelAccessDeniedError, Network
|
|
|
9
9
|
import { isAbortError } from "../utils/errorHandling.js";
|
|
10
10
|
import { emitToolEndFromStepFinish } from "../utils/toolEndEmitter.js";
|
|
11
11
|
import { logger } from "../utils/logger.js";
|
|
12
|
+
import { buildNoOutputSentinel, detectPostStreamNoOutput, stampNoOutputSpan, } from "../utils/noOutputSentinel.js";
|
|
12
13
|
import { calculateCost } from "../utils/pricing.js";
|
|
13
14
|
import { getProviderModel } from "../utils/providerConfig.js";
|
|
14
15
|
import { composeAbortSignals, createTimeoutController, TimeoutError, } from "../utils/timeout.js";
|
|
@@ -140,6 +141,11 @@ export class LiteLLMProvider extends BaseProvider {
|
|
|
140
141
|
this.validateStreamOptions(options);
|
|
141
142
|
const startTime = Date.now();
|
|
142
143
|
let chunkCount = 0; // Track chunk count for debugging
|
|
144
|
+
// Reviewer follow-up: capture upstream provider errors via onError so
|
|
145
|
+
// the post-stream NoOutput detect can propagate the *real* cause
|
|
146
|
+
// (content_filter, provider crash, etc.) into the sentinel's
|
|
147
|
+
// providerError / modelResponseRaw instead of "No output generated".
|
|
148
|
+
let capturedProviderError;
|
|
143
149
|
const timeout = this.getTimeout(options);
|
|
144
150
|
const timeoutController = createTimeoutController(timeout, this.providerName, "stream");
|
|
145
151
|
try {
|
|
@@ -185,6 +191,10 @@ export class LiteLLMProvider extends BaseProvider {
|
|
|
185
191
|
onError: (event) => {
|
|
186
192
|
const error = event.error;
|
|
187
193
|
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
194
|
+
// Reviewer follow-up: propagate the captured error to the
|
|
195
|
+
// post-stream NoOutput sentinel so telemetry sees the real
|
|
196
|
+
// provider cause instead of "No output generated".
|
|
197
|
+
capturedProviderError = error;
|
|
188
198
|
logger.error(`LiteLLM: Stream error`, {
|
|
189
199
|
provider: this.providerName,
|
|
190
200
|
modelName: this.modelName,
|
|
@@ -312,7 +322,7 @@ export class LiteLLMProvider extends BaseProvider {
|
|
|
312
322
|
streamSpan.end();
|
|
313
323
|
});
|
|
314
324
|
timeoutController?.cleanup();
|
|
315
|
-
const transformedStream = this.createLiteLLMTransformedStream(result);
|
|
325
|
+
const transformedStream = this.createLiteLLMTransformedStream(result, () => capturedProviderError);
|
|
316
326
|
// Create analytics promise that resolves after stream completion
|
|
317
327
|
const analyticsPromise = streamAnalyticsCollector.createAnalytics(this.providerName, this.modelName, result, Date.now() - startTime, {
|
|
318
328
|
requestId: options.requestId ??
|
|
@@ -339,7 +349,13 @@ export class LiteLLMProvider extends BaseProvider {
|
|
|
339
349
|
throw this.handleProviderError(error);
|
|
340
350
|
}
|
|
341
351
|
}
|
|
342
|
-
async *createLiteLLMTransformedStream(result) {
|
|
352
|
+
async *createLiteLLMTransformedStream(result, getCapturedProviderError) {
|
|
353
|
+
// Reviewer follow-up: gate the post-stream NoOutput detect on
|
|
354
|
+
// *content yielded*, not raw chunk count. AI SDK fullStream emits
|
|
355
|
+
// control events ({ type: "start" }, "step-start", etc.) before any
|
|
356
|
+
// text-delta — those incremented chunkCount and made the post-stream
|
|
357
|
+
// detect dead even when zero text was produced.
|
|
358
|
+
let contentYielded = 0;
|
|
343
359
|
try {
|
|
344
360
|
const streamToUse = result.fullStream || result.textStream;
|
|
345
361
|
for await (const chunk of streamToUse) {
|
|
@@ -355,6 +371,7 @@ export class LiteLLMProvider extends BaseProvider {
|
|
|
355
371
|
if ("textDelta" in chunk) {
|
|
356
372
|
const textDelta = chunk.textDelta;
|
|
357
373
|
if (textDelta) {
|
|
374
|
+
contentYielded++;
|
|
358
375
|
yield { content: textDelta };
|
|
359
376
|
}
|
|
360
377
|
}
|
|
@@ -368,17 +385,40 @@ export class LiteLLMProvider extends BaseProvider {
|
|
|
368
385
|
}
|
|
369
386
|
}
|
|
370
387
|
else if (typeof chunk === "string") {
|
|
388
|
+
contentYielded++;
|
|
371
389
|
yield { content: chunk };
|
|
372
390
|
}
|
|
373
391
|
}
|
|
374
392
|
}
|
|
375
393
|
catch (streamError) {
|
|
376
394
|
if (NoOutputGeneratedError.isInstance(streamError)) {
|
|
377
|
-
logger.warn("LiteLLM: Stream produced no output (NoOutputGeneratedError) —
|
|
378
|
-
|
|
395
|
+
logger.warn("LiteLLM: Stream produced no output (NoOutputGeneratedError) — caught from textStream");
|
|
396
|
+
// Yield the enriched sentinel so downstream telemetry has
|
|
397
|
+
// finishReason / usage / providerError. Match the other
|
|
398
|
+
// providers' pattern: yield + return (no throw). NeuroLink's
|
|
399
|
+
// iteration fallback at neurolink.ts only fires for
|
|
400
|
+
// looksLikeModelAccessDenied errors, so a NoOutput throw here
|
|
401
|
+
// would NOT trigger any fallback — and it would mask the
|
|
402
|
+
// already-yielded sentinel from consumers expecting a clean
|
|
403
|
+
// stream. The sentinel itself signals the no-output condition.
|
|
404
|
+
const sentinel = await buildNoOutputSentinel(streamError, result, getCapturedProviderError?.());
|
|
405
|
+
stampNoOutputSpan(sentinel);
|
|
406
|
+
yield sentinel;
|
|
407
|
+
return;
|
|
379
408
|
}
|
|
380
409
|
throw streamError;
|
|
381
410
|
}
|
|
411
|
+
// Curator P3-6 (round-2 fix): production trigger sets the error on
|
|
412
|
+
// result.finishReason rejection (NOT thrown from textStream).
|
|
413
|
+
// Surface that path here, matching the catch above (yield + return).
|
|
414
|
+
if (contentYielded === 0) {
|
|
415
|
+
const detected = await detectPostStreamNoOutput(result, getCapturedProviderError?.());
|
|
416
|
+
if (detected) {
|
|
417
|
+
logger.warn("LiteLLM: Stream produced no output (NoOutputGeneratedError) — caught from finishReason rejection");
|
|
418
|
+
stampNoOutputSpan(detected.sentinel);
|
|
419
|
+
yield detected.sentinel;
|
|
420
|
+
}
|
|
421
|
+
}
|
|
382
422
|
}
|
|
383
423
|
/**
|
|
384
424
|
* Generate an embedding for a single text input
|
|
@@ -58,6 +58,9 @@ export class MistralProvider extends BaseProvider {
|
|
|
58
58
|
// Using protected helper from BaseProvider to eliminate code duplication
|
|
59
59
|
const messages = await this.buildMessagesForStream(options);
|
|
60
60
|
const model = await this.getAISDKModelWithMiddleware(options); // This is where network connection happens!
|
|
61
|
+
// Reviewer follow-up: capture upstream provider errors via onError
|
|
62
|
+
// so the post-stream NoOutput sentinel carries the real cause.
|
|
63
|
+
let capturedProviderError;
|
|
61
64
|
const result = await streamText({
|
|
62
65
|
model,
|
|
63
66
|
messages: messages,
|
|
@@ -69,6 +72,14 @@ export class MistralProvider extends BaseProvider {
|
|
|
69
72
|
abortSignal: composeAbortSignals(options.abortSignal, timeoutController?.controller.signal),
|
|
70
73
|
experimental_telemetry: this.telemetryHandler.getTelemetryConfig(options),
|
|
71
74
|
experimental_repairToolCall: this.getToolCallRepairFn(options),
|
|
75
|
+
onError: (event) => {
|
|
76
|
+
capturedProviderError = event.error;
|
|
77
|
+
logger.error("Mistral: Stream error", {
|
|
78
|
+
error: event.error instanceof Error
|
|
79
|
+
? event.error.message
|
|
80
|
+
: String(event.error),
|
|
81
|
+
});
|
|
82
|
+
},
|
|
72
83
|
onStepFinish: ({ toolCalls, toolResults }) => {
|
|
73
84
|
emitToolEndFromStepFinish(this.neurolink?.getEventEmitter(), toolResults);
|
|
74
85
|
this.handleToolExecutionStorage(toolCalls, toolResults, options, new Date()).catch((error) => {
|
|
@@ -81,7 +92,7 @@ export class MistralProvider extends BaseProvider {
|
|
|
81
92
|
});
|
|
82
93
|
timeoutController?.cleanup();
|
|
83
94
|
// Transform string stream to content object stream using BaseProvider method
|
|
84
|
-
const transformedStream = this.createTextStream(result);
|
|
95
|
+
const transformedStream = this.createTextStream(result, () => capturedProviderError);
|
|
85
96
|
// Create analytics promise that resolves after stream completion
|
|
86
97
|
const analyticsPromise = streamAnalyticsCollector.createAnalytics(this.providerName, this.modelName, toAnalyticsStreamResult(result), Date.now() - startTime, {
|
|
87
98
|
requestId: `mistral-stream-${Date.now()}`,
|
package/dist/providers/openAI.js
CHANGED
|
@@ -8,6 +8,7 @@ import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
|
|
|
8
8
|
import { createProxyFetch } from "../proxy/proxyFetch.js";
|
|
9
9
|
import { AuthenticationError, InvalidModelError, NetworkError, ProviderError, RateLimitError, } from "../types/index.js";
|
|
10
10
|
import { logger } from "../utils/logger.js";
|
|
11
|
+
import { buildNoOutputSentinel, detectPostStreamNoOutput, stampNoOutputSpan, } from "../utils/noOutputSentinel.js";
|
|
11
12
|
import { calculateCost } from "../utils/pricing.js";
|
|
12
13
|
import { createOpenAIConfig, getProviderModel, validateApiKey, } from "../utils/providerConfig.js";
|
|
13
14
|
import { isZodSchema } from "../utils/schemaConversion.js";
|
|
@@ -347,6 +348,10 @@ export class OpenAIProvider extends BaseProvider {
|
|
|
347
348
|
"gen_ai.request.model": getModelId(model) || this.modelName || "unknown",
|
|
348
349
|
},
|
|
349
350
|
});
|
|
351
|
+
// Reviewer follow-up: capture upstream provider errors via onError
|
|
352
|
+
// so the post-stream NoOutput detect can propagate the *real* cause
|
|
353
|
+
// into the sentinel's providerError / modelResponseRaw.
|
|
354
|
+
let capturedProviderError;
|
|
350
355
|
let result;
|
|
351
356
|
try {
|
|
352
357
|
result = streamText({
|
|
@@ -361,6 +366,14 @@ export class OpenAIProvider extends BaseProvider {
|
|
|
361
366
|
abortSignal: composeAbortSignals(options.abortSignal, timeoutController?.controller.signal),
|
|
362
367
|
experimental_repairToolCall: this.getToolCallRepairFn(options),
|
|
363
368
|
experimental_telemetry: this.telemetryHandler.getTelemetryConfig(options),
|
|
369
|
+
onError: (event) => {
|
|
370
|
+
capturedProviderError = event.error;
|
|
371
|
+
logger.error("OpenAI: Stream error", {
|
|
372
|
+
error: event.error instanceof Error
|
|
373
|
+
? event.error.message
|
|
374
|
+
: String(event.error),
|
|
375
|
+
});
|
|
376
|
+
},
|
|
364
377
|
onStepFinish: ({ toolCalls, toolResults }) => {
|
|
365
378
|
logger.info("Tool execution completed", {
|
|
366
379
|
toolResults,
|
|
@@ -424,7 +437,7 @@ export class OpenAIProvider extends BaseProvider {
|
|
|
424
437
|
hasToolResults: !!result.toolResults,
|
|
425
438
|
resultType: typeof result,
|
|
426
439
|
});
|
|
427
|
-
const transformedStream = this.createOpenAITransformedStream(result, shouldUseTools, tools);
|
|
440
|
+
const transformedStream = this.createOpenAITransformedStream(result, shouldUseTools, tools, () => capturedProviderError);
|
|
428
441
|
// Create analytics promise that resolves after stream completion
|
|
429
442
|
const analyticsPromise = streamAnalyticsCollector.createAnalytics(this.providerName, this.modelName, result, Date.now() - startTime, {
|
|
430
443
|
requestId: `openai-stream-${Date.now()}`,
|
|
@@ -446,7 +459,7 @@ export class OpenAIProvider extends BaseProvider {
|
|
|
446
459
|
throw this.handleProviderError(error);
|
|
447
460
|
}
|
|
448
461
|
}
|
|
449
|
-
async *createOpenAITransformedStream(result, shouldUseTools, tools) {
|
|
462
|
+
async *createOpenAITransformedStream(result, shouldUseTools, tools, getCapturedProviderError) {
|
|
450
463
|
try {
|
|
451
464
|
logger.debug(`OpenAI: Starting stream transformation`, {
|
|
452
465
|
hasTextStream: !!result.textStream,
|
|
@@ -503,11 +516,29 @@ export class OpenAIProvider extends BaseProvider {
|
|
|
503
516
|
});
|
|
504
517
|
if (contentYielded === 0) {
|
|
505
518
|
logger.warn(`OpenAI: No content was yielded from stream despite processing ${chunkCount} chunks`);
|
|
519
|
+
// Curator P3-6 (round-2 fix): when no content was yielded, the
|
|
520
|
+
// production trigger sets NoOutputGeneratedError on
|
|
521
|
+
// result.finishReason rejection (NOT on the textStream itself).
|
|
522
|
+
// Surface that rejection here so the enriched sentinel actually
|
|
523
|
+
// fires for real-world no-output streams.
|
|
524
|
+
const detected = await detectPostStreamNoOutput(result, getCapturedProviderError?.());
|
|
525
|
+
if (detected) {
|
|
526
|
+
logger.warn("OpenAI: Stream produced no output (NoOutputGeneratedError) — caught from finishReason rejection");
|
|
527
|
+
stampNoOutputSpan(detected.sentinel);
|
|
528
|
+
yield detected.sentinel;
|
|
529
|
+
}
|
|
506
530
|
}
|
|
507
531
|
}
|
|
508
532
|
catch (streamError) {
|
|
509
533
|
if (NoOutputGeneratedError.isInstance(streamError)) {
|
|
510
|
-
logger.warn("OpenAI: Stream produced no output (NoOutputGeneratedError)");
|
|
534
|
+
logger.warn("OpenAI: Stream produced no output (NoOutputGeneratedError) — caught from textStream");
|
|
535
|
+
// Defensive: AI SDK *can* throw this from textStream in some
|
|
536
|
+
// failure modes (catastrophic transform errors). Keep this path
|
|
537
|
+
// for completeness; the production trigger goes through the
|
|
538
|
+
// post-loop detect above.
|
|
539
|
+
const sentinel = await buildNoOutputSentinel(streamError, result, getCapturedProviderError?.());
|
|
540
|
+
stampNoOutputSpan(sentinel);
|
|
541
|
+
yield sentinel;
|
|
511
542
|
return;
|
|
512
543
|
}
|
|
513
544
|
logger.error(`OpenAI: Stream transformation error:`, streamError);
|
|
@@ -8,6 +8,7 @@ import { createProxyFetch } from "../proxy/proxyFetch.js";
|
|
|
8
8
|
import { isAbortError } from "../utils/errorHandling.js";
|
|
9
9
|
import { emitToolEndFromStepFinish } from "../utils/toolEndEmitter.js";
|
|
10
10
|
import { logger } from "../utils/logger.js";
|
|
11
|
+
import { buildNoOutputSentinel, detectPostStreamNoOutput, stampNoOutputSpan, } from "../utils/noOutputSentinel.js";
|
|
11
12
|
import { getProviderModel } from "../utils/providerConfig.js";
|
|
12
13
|
import { composeAbortSignals, createTimeoutController, TimeoutError, } from "../utils/timeout.js";
|
|
13
14
|
import { resolveToolChoice } from "../utils/toolChoice.js";
|
|
@@ -218,6 +219,12 @@ export class OpenRouterProvider extends BaseProvider {
|
|
|
218
219
|
this.validateStreamOptions(options);
|
|
219
220
|
const startTime = Date.now();
|
|
220
221
|
let chunkCount = 0; // Track chunk count for debugging
|
|
222
|
+
// Reviewer follow-up: capture upstream provider errors via onError so
|
|
223
|
+
// the post-stream NoOutput detect can propagate the *real* cause
|
|
224
|
+
// (e.g. content_filter, provider crash) into the sentinel's
|
|
225
|
+
// providerError / modelResponseRaw instead of the AI SDK's generic
|
|
226
|
+
// "No output generated" message.
|
|
227
|
+
let capturedProviderError;
|
|
221
228
|
const timeout = this.getTimeout(options);
|
|
222
229
|
const timeoutController = createTimeoutController(timeout, this.providerName, "stream");
|
|
223
230
|
try {
|
|
@@ -257,6 +264,10 @@ export class OpenRouterProvider extends BaseProvider {
|
|
|
257
264
|
onError: (event) => {
|
|
258
265
|
const error = event.error;
|
|
259
266
|
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
267
|
+
// Reviewer follow-up: propagate the captured error to the
|
|
268
|
+
// post-stream NoOutput sentinel so telemetry sees the real
|
|
269
|
+
// provider cause instead of "No output generated".
|
|
270
|
+
capturedProviderError = error;
|
|
260
271
|
logger.error(`OpenRouter: Stream error`, {
|
|
261
272
|
provider: this.providerName,
|
|
262
273
|
modelName: this.modelName,
|
|
@@ -315,6 +326,12 @@ export class OpenRouterProvider extends BaseProvider {
|
|
|
315
326
|
.finally(() => timeoutController?.cleanup());
|
|
316
327
|
// Transform stream to content object stream using fullStream (handles both text and tool calls)
|
|
317
328
|
const transformedStream = (async function* () {
|
|
329
|
+
// Reviewer follow-up: gate the post-stream NoOutput detect on
|
|
330
|
+
// *content yielded*, not raw chunk count. AI SDK fullStream emits
|
|
331
|
+
// control events ({ type: "start" }, "step-start", etc.) before
|
|
332
|
+
// any text-delta — those incremented `chunkCount` and made the
|
|
333
|
+
// post-stream check dead even when zero text was produced.
|
|
334
|
+
let contentYielded = 0;
|
|
318
335
|
try {
|
|
319
336
|
// Try fullStream first (handles both text and tool calls), fallback to textStream
|
|
320
337
|
const streamToUse = result.fullStream || result.textStream;
|
|
@@ -335,6 +352,7 @@ export class OpenRouterProvider extends BaseProvider {
|
|
|
335
352
|
// Text delta from fullStream
|
|
336
353
|
const textDelta = chunk.textDelta;
|
|
337
354
|
if (textDelta) {
|
|
355
|
+
contentYielded++;
|
|
338
356
|
yield { content: textDelta };
|
|
339
357
|
}
|
|
340
358
|
}
|
|
@@ -352,18 +370,31 @@ export class OpenRouterProvider extends BaseProvider {
|
|
|
352
370
|
}
|
|
353
371
|
else if (typeof chunk === "string") {
|
|
354
372
|
// Direct string chunk from textStream fallback
|
|
373
|
+
contentYielded++;
|
|
355
374
|
yield { content: chunk };
|
|
356
375
|
}
|
|
357
376
|
}
|
|
358
377
|
}
|
|
359
378
|
catch (streamError) {
|
|
360
|
-
// AI SDK v6 throws NoOutputGeneratedError when the stream produced no output.
|
|
361
379
|
if (NoOutputGeneratedError.isInstance(streamError)) {
|
|
362
|
-
logger.warn("OpenRouter: Stream produced no output (NoOutputGeneratedError)");
|
|
380
|
+
logger.warn("OpenRouter: Stream produced no output (NoOutputGeneratedError) — caught from textStream");
|
|
381
|
+
const sentinel = await buildNoOutputSentinel(streamError, result, capturedProviderError);
|
|
382
|
+
stampNoOutputSpan(sentinel);
|
|
383
|
+
yield sentinel;
|
|
363
384
|
return;
|
|
364
385
|
}
|
|
365
386
|
throw streamError;
|
|
366
387
|
}
|
|
388
|
+
// Curator P3-6 (round-2 fix): production trigger comes through
|
|
389
|
+
// result.finishReason rejection, not textStream throws.
|
|
390
|
+
if (contentYielded === 0) {
|
|
391
|
+
const detected = await detectPostStreamNoOutput(result, capturedProviderError);
|
|
392
|
+
if (detected) {
|
|
393
|
+
logger.warn("OpenRouter: Stream produced no output (NoOutputGeneratedError) — caught from finishReason rejection");
|
|
394
|
+
stampNoOutputSpan(detected.sentinel);
|
|
395
|
+
yield detected.sentinel;
|
|
396
|
+
}
|
|
397
|
+
}
|
|
367
398
|
})();
|
|
368
399
|
// Create analytics promise that resolves after stream completion
|
|
369
400
|
const analyticsPromise = streamAnalyticsCollector.createAnalytics(this.providerName, this.modelName, result, Date.now() - startTime, {
|
|
@@ -6,6 +6,7 @@ import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
|
|
|
6
6
|
import { createProxyFetch } from "../proxy/proxyFetch.js";
|
|
7
7
|
import { emitToolEndFromStepFinish } from "../utils/toolEndEmitter.js";
|
|
8
8
|
import { logger } from "../utils/logger.js";
|
|
9
|
+
import { buildNoOutputSentinel, detectPostStreamNoOutput, stampNoOutputSpan, } from "../utils/noOutputSentinel.js";
|
|
9
10
|
import { composeAbortSignals, createTimeoutController, TimeoutError, } from "../utils/timeout.js";
|
|
10
11
|
import { resolveToolChoice } from "../utils/toolChoice.js";
|
|
11
12
|
import { toAnalyticsStreamResult } from "./providerTypeUtils.js";
|
|
@@ -182,6 +183,10 @@ export class OpenAICompatibleProvider extends BaseProvider {
|
|
|
182
183
|
// Using protected helper from BaseProvider to eliminate code duplication
|
|
183
184
|
const messages = await this.buildMessagesForStream(options);
|
|
184
185
|
const model = await this.getAISDKModelWithMiddleware(options); // This is where network connection happens!
|
|
186
|
+
// Reviewer follow-up: capture upstream provider errors via onError
|
|
187
|
+
// so the post-stream NoOutput detect can propagate the real cause
|
|
188
|
+
// into the sentinel's providerError / modelResponseRaw.
|
|
189
|
+
let capturedProviderError;
|
|
185
190
|
const result = streamText({
|
|
186
191
|
model,
|
|
187
192
|
messages: messages,
|
|
@@ -197,6 +202,14 @@ export class OpenAICompatibleProvider extends BaseProvider {
|
|
|
197
202
|
abortSignal: composeAbortSignals(options.abortSignal, timeoutController?.controller.signal),
|
|
198
203
|
experimental_telemetry: this.telemetryHandler.getTelemetryConfig(options),
|
|
199
204
|
experimental_repairToolCall: this.getToolCallRepairFn(options),
|
|
205
|
+
onError: (event) => {
|
|
206
|
+
capturedProviderError = event.error;
|
|
207
|
+
logger.error("OpenAI-compatible: Stream error", {
|
|
208
|
+
error: event.error instanceof Error
|
|
209
|
+
? event.error.message
|
|
210
|
+
: String(event.error),
|
|
211
|
+
});
|
|
212
|
+
},
|
|
200
213
|
onStepFinish: (event) => {
|
|
201
214
|
emitToolEndFromStepFinish(this.neurolink?.getEventEmitter(), event.toolResults);
|
|
202
215
|
this.handleToolExecutionStorage([...event.toolCalls], [...event.toolResults], options, new Date()).catch((error) => {
|
|
@@ -210,19 +223,38 @@ export class OpenAICompatibleProvider extends BaseProvider {
|
|
|
210
223
|
timeoutController?.cleanup();
|
|
211
224
|
// Transform stream to match StreamResult interface
|
|
212
225
|
const transformedStream = async function* () {
|
|
226
|
+
let chunkCount = 0;
|
|
213
227
|
try {
|
|
214
228
|
for await (const chunk of result.textStream) {
|
|
229
|
+
chunkCount++;
|
|
215
230
|
yield { content: chunk };
|
|
216
231
|
}
|
|
217
232
|
}
|
|
218
233
|
catch (streamError) {
|
|
219
|
-
// AI SDK v6
|
|
234
|
+
// AI SDK v6 *can* throw NoOutputGeneratedError from textStream
|
|
235
|
+
// iteration in some failure modes (e.g. catastrophic transform
|
|
236
|
+
// errors); keep this catch as a defensive path.
|
|
220
237
|
if (NoOutputGeneratedError.isInstance(streamError)) {
|
|
221
|
-
logger.warn("OpenAI-compatible: Stream produced no output (NoOutputGeneratedError)");
|
|
238
|
+
logger.warn("OpenAI-compatible: Stream produced no output (NoOutputGeneratedError) — caught from textStream");
|
|
239
|
+
const sentinel = await buildNoOutputSentinel(streamError, result, capturedProviderError);
|
|
240
|
+
stampNoOutputSpan(sentinel);
|
|
241
|
+
yield sentinel;
|
|
222
242
|
return;
|
|
223
243
|
}
|
|
224
244
|
throw streamError;
|
|
225
245
|
}
|
|
246
|
+
// Curator P3-6 (round-2 fix): the production trigger doesn't
|
|
247
|
+
// throw from textStream — AI SDK rejects `result.finishReason`
|
|
248
|
+
// instead. Surface that rejection here so the enriched sentinel
|
|
249
|
+
// actually fires for real-world no-output streams.
|
|
250
|
+
if (chunkCount === 0) {
|
|
251
|
+
const detected = await detectPostStreamNoOutput(result, capturedProviderError);
|
|
252
|
+
if (detected) {
|
|
253
|
+
logger.warn("OpenAI-compatible: Stream produced no output (NoOutputGeneratedError) — caught from finishReason rejection");
|
|
254
|
+
stampNoOutputSpan(detected.sentinel);
|
|
255
|
+
yield detected.sentinel;
|
|
256
|
+
}
|
|
257
|
+
}
|
|
226
258
|
};
|
|
227
259
|
// Create analytics promise that resolves after stream completion
|
|
228
260
|
const analyticsPromise = streamAnalyticsCollector.createAnalytics(this.providerName, this.modelName, toAnalyticsStreamResult(result), Date.now() - startTime, {
|
|
@@ -181,8 +181,13 @@ function applyNonErrorLangfuseLevel(attrs) {
|
|
|
181
181
|
}
|
|
182
182
|
if (attrs["neurolink.no_output"] === true) {
|
|
183
183
|
attrs["langfuse.level"] = "WARNING";
|
|
184
|
-
|
|
185
|
-
|
|
184
|
+
// Preserve any enriched status message StreamHandler already set
|
|
185
|
+
// (carries finishReason / token counts via buildNoOutputStatusMessage).
|
|
186
|
+
// Only fall back to the generic message when none was set upstream.
|
|
187
|
+
if (typeof attrs["langfuse.status_message"] !== "string") {
|
|
188
|
+
attrs["langfuse.status_message"] =
|
|
189
|
+
"Stream produced no output (NoOutputGeneratedError)";
|
|
190
|
+
}
|
|
186
191
|
return;
|
|
187
192
|
}
|
|
188
193
|
if (reasonStr === "aborted") {
|
package/dist/types/index.d.ts
CHANGED
package/dist/types/index.js
CHANGED
|
@@ -62,3 +62,5 @@ export * from "./elicitation.js";
|
|
|
62
62
|
export * from "./dynamic.js";
|
|
63
63
|
// Curator P2-4 dedup: per-stream AsyncLocalStorage context
|
|
64
64
|
export * from "./streamDedup.js";
|
|
65
|
+
// Curator P3-6: NoOutputGeneratedError sentinel chunk shape
|
|
66
|
+
export * from "./noOutputSentinel.js";
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Curator P3-6: shape of the sentinel chunk yielded by every provider's
|
|
3
|
+
* stream-transformation generator when AI SDK throws
|
|
4
|
+
* `NoOutputGeneratedError`. Built by `buildNoOutputSentinel` in
|
|
5
|
+
* `src/lib/utils/noOutputSentinel.ts`.
|
|
6
|
+
*/
|
|
7
|
+
export type StreamNoOutputSentinel = {
|
|
8
|
+
content: "";
|
|
9
|
+
metadata: {
|
|
10
|
+
noOutput: true;
|
|
11
|
+
errorType: "NoOutputGeneratedError";
|
|
12
|
+
finishReason: unknown;
|
|
13
|
+
usage: unknown;
|
|
14
|
+
providerError: string;
|
|
15
|
+
modelResponseRaw: string | undefined;
|
|
16
|
+
};
|
|
17
|
+
};
|
|
18
|
+
/**
|
|
19
|
+
* Subset of AI SDK's `StreamTextResult` that the sentinel builder reads.
|
|
20
|
+
* Both fields are Promises in production but typed loosely so callers
|
|
21
|
+
* can pass either the Promise or a resolved value.
|
|
22
|
+
*/
|
|
23
|
+
export type StreamNoOutputSentinelResultLike = {
|
|
24
|
+
finishReason?: Promise<unknown> | unknown;
|
|
25
|
+
totalUsage?: Promise<unknown> | unknown;
|
|
26
|
+
};
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
package/dist/types/stream.d.ts
CHANGED
|
@@ -8,6 +8,7 @@ import type { TokenUsage } from "./analytics.js";
|
|
|
8
8
|
import type { JsonValue, UnknownRecord } from "./common.js";
|
|
9
9
|
import type { Content, ImageWithAltText } from "./content.js";
|
|
10
10
|
import type { ChatMessage } from "./conversation.js";
|
|
11
|
+
import type { StreamNoOutputSentinel } from "./noOutputSentinel.js";
|
|
11
12
|
import type { AdditionalMemoryUser } from "./generate.js";
|
|
12
13
|
import type { AIModelProviderConfig, NeurolinkCredentials } from "./providers.js";
|
|
13
14
|
import type { TTSChunk, TTSOptions } from "./tts.js";
|
|
@@ -487,7 +488,7 @@ export type StreamOptions = {
|
|
|
487
488
|
export type StreamResult = {
|
|
488
489
|
stream: AsyncIterable<{
|
|
489
490
|
content: string;
|
|
490
|
-
} | {
|
|
491
|
+
} | StreamNoOutputSentinel | {
|
|
491
492
|
type: "audio";
|
|
492
493
|
audio: AudioChunk;
|
|
493
494
|
} | {
|
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Curator P3-6: shared builder for the `NoOutputGeneratedError` sentinel
|
|
3
|
+
* chunk. Each provider's stream-transformation generator catches the AI
|
|
4
|
+
* SDK's `NoOutputGeneratedError` and yields this sentinel so downstream
|
|
5
|
+
* telemetry has finish reason + token usage + provider error context
|
|
6
|
+
* instead of just `{ noOutput: true, errorType: "..." }`.
|
|
7
|
+
*
|
|
8
|
+
* The AI SDK rejects `result.finishReason` / `result.totalUsage` in this
|
|
9
|
+
* branch today (see `ai/src/generate-text/stream-text.ts` ~L1078); we
|
|
10
|
+
* still attempt to await them so a future SDK version surfacing partial
|
|
11
|
+
* values populates the sentinel automatically. When they reject we keep
|
|
12
|
+
* conservative defaults (`finishReason: "error"`, zero usage).
|
|
13
|
+
*/
|
|
14
|
+
import type { StreamNoOutputSentinel, StreamNoOutputSentinelResultLike } from "../types/index.js";
|
|
15
|
+
export declare function buildNoOutputSentinel(error: unknown, result?: StreamNoOutputSentinelResultLike,
|
|
16
|
+
/**
|
|
17
|
+
* Reviewer follow-up: AI SDK v6 wraps the AI SDK's
|
|
18
|
+
* `NoOutputGeneratedError` without preserving the underlying provider
|
|
19
|
+
* error in `error.cause`, and rejects `result.finishReason` /
|
|
20
|
+
* `result.totalUsage` with the wrapped error too. To differentiate
|
|
21
|
+
* content-filter / stop-sequence / provider-crash, providers can
|
|
22
|
+
* capture the upstream error (e.g. via streamText's `onError`
|
|
23
|
+
* callback) and pass it here. When provided, it takes precedence
|
|
24
|
+
* over the AI SDK error for `providerError` and `modelResponseRaw`.
|
|
25
|
+
*/
|
|
26
|
+
underlyingError?: unknown): Promise<StreamNoOutputSentinel>;
|
|
27
|
+
/**
|
|
28
|
+
* Curator P3-6 (round-2): the AI SDK v6 path that sets
|
|
29
|
+
* `NoOutputGeneratedError` does NOT throw it from `result.textStream`
|
|
30
|
+
* iteration — it sets the error as a *promise rejection* on
|
|
31
|
+
* `result.finishReason` / `result.totalUsage` / `result.steps` (see
|
|
32
|
+
* `ai/src/generate-text/stream-text.ts` ~L1078). Providers that only
|
|
33
|
+
* catch errors thrown from `for await (chunk of result.textStream)` will
|
|
34
|
+
* miss the production trigger entirely: the stream completes silently
|
|
35
|
+
* with 0 chunks and the rejection bubbles as an unhandled rejection.
|
|
36
|
+
*
|
|
37
|
+
* This helper surfaces the rejection by awaiting `result.finishReason`
|
|
38
|
+
* after the stream completes. Providers must call this AFTER iterating
|
|
39
|
+
* the textStream when 0 chunks were yielded — the returned sentinel
|
|
40
|
+
* (if non-null) carries the enriched metadata Curator's report needed.
|
|
41
|
+
*/
|
|
42
|
+
export declare function detectPostStreamNoOutput(result: StreamNoOutputSentinelResultLike,
|
|
43
|
+
/**
|
|
44
|
+
* Optional provider-captured underlying error (e.g. from streamText's
|
|
45
|
+
* `onError` callback). When provided, the resulting sentinel will carry
|
|
46
|
+
* the real provider error in `providerError` / `modelResponseRaw`
|
|
47
|
+
* instead of the AI SDK's generic "No output generated" message.
|
|
48
|
+
*/
|
|
49
|
+
underlyingError?: unknown): Promise<{
|
|
50
|
+
sentinel: StreamNoOutputSentinel;
|
|
51
|
+
error: Error;
|
|
52
|
+
} | null>;
|
|
53
|
+
/**
|
|
54
|
+
* Reviewer follow-up: every provider's post-stream NoOutput detect must
|
|
55
|
+
* stamp the active OTel span so Pipeline B (`ContextEnricher.onEnd()` →
|
|
56
|
+
* `applyNonErrorLangfuseLevel`) surfaces a WARNING-level Langfuse
|
|
57
|
+
* observation with the enriched status message. Without this, only
|
|
58
|
+
* `StreamHandler`-based providers produced the rich telemetry; the
|
|
59
|
+
* provider-specific paths (openAI, openaiCompatible, litellm,
|
|
60
|
+
* huggingFace, openRouter, anthropicBaseProvider) yielded the sentinel
|
|
61
|
+
* to direct stream consumers but Pipeline B saw nothing.
|
|
62
|
+
*
|
|
63
|
+
* Stamps three attributes:
|
|
64
|
+
* - `neurolink.no_output = true` (Pipeline B trigger)
|
|
65
|
+
* - `langfuse.status_message` (enriched, with finishReason + tokens)
|
|
66
|
+
* - `neurolink.no_output.finish_reason` (raw finish reason)
|
|
67
|
+
*
|
|
68
|
+
* Safe to call when tracing isn't initialized — silently no-ops.
|
|
69
|
+
*/
|
|
70
|
+
export declare function stampNoOutputSpan(sentinel: StreamNoOutputSentinel): void;
|
|
71
|
+
/**
|
|
72
|
+
* Build the OTel `langfuse.status_message` summary string for a no-output
|
|
73
|
+
* stream. Used by `StreamHandler.createTextStream` and any future provider
|
|
74
|
+
* that wants to stamp the active span with the same enriched message.
|
|
75
|
+
*
|
|
76
|
+
* Reviewer follow-up: AI SDK v4 used `promptTokens` / `completionTokens`,
|
|
77
|
+
* v6 uses `inputTokens` / `outputTokens`. Read both shapes so the message
|
|
78
|
+
* is correct whichever version surfaced partial usage data.
|
|
79
|
+
*/
|
|
80
|
+
export declare function buildNoOutputStatusMessage(finishReason: unknown, usage: unknown): string;
|