@juspay/neurolink 9.59.1 → 9.59.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +12 -0
- package/dist/browser/neurolink.min.js +355 -355
- package/dist/core/baseProvider.d.ts +10 -3
- package/dist/core/baseProvider.js +8 -3
- package/dist/core/modules/StreamHandler.d.ts +22 -3
- package/dist/core/modules/StreamHandler.js +42 -20
- package/dist/lib/core/baseProvider.d.ts +10 -3
- package/dist/lib/core/baseProvider.js +8 -3
- package/dist/lib/core/modules/StreamHandler.d.ts +22 -3
- package/dist/lib/core/modules/StreamHandler.js +42 -20
- package/dist/lib/neurolink.js +361 -39
- package/dist/lib/providers/anthropic.js +13 -1
- package/dist/lib/providers/anthropicBaseProvider.js +30 -2
- package/dist/lib/providers/azureOpenai.js +12 -1
- package/dist/lib/providers/googleAiStudio.js +12 -1
- package/dist/lib/providers/googleVertex.js +11 -1
- package/dist/lib/providers/huggingFace.js +29 -2
- package/dist/lib/providers/litellm.js +44 -4
- package/dist/lib/providers/mistral.js +12 -1
- package/dist/lib/providers/openAI.js +34 -3
- package/dist/lib/providers/openRouter.js +33 -2
- package/dist/lib/providers/openaiCompatible.js +34 -2
- package/dist/lib/services/server/ai/observability/instrumentation.js +7 -2
- package/dist/lib/types/index.d.ts +1 -0
- package/dist/lib/types/index.js +2 -0
- package/dist/lib/types/noOutputSentinel.d.ts +26 -0
- package/dist/lib/types/noOutputSentinel.js +2 -0
- package/dist/lib/types/stream.d.ts +2 -1
- package/dist/lib/utils/noOutputSentinel.d.ts +80 -0
- package/dist/lib/utils/noOutputSentinel.js +193 -0
- package/dist/neurolink.js +361 -39
- package/dist/providers/anthropic.js +13 -1
- package/dist/providers/anthropicBaseProvider.js +30 -2
- package/dist/providers/azureOpenai.js +12 -1
- package/dist/providers/googleAiStudio.js +12 -1
- package/dist/providers/googleVertex.js +11 -1
- package/dist/providers/huggingFace.js +29 -2
- package/dist/providers/litellm.js +44 -4
- package/dist/providers/mistral.js +12 -1
- package/dist/providers/openAI.js +34 -3
- package/dist/providers/openRouter.js +33 -2
- package/dist/providers/openaiCompatible.js +34 -2
- package/dist/services/server/ai/observability/instrumentation.js +7 -2
- package/dist/types/index.d.ts +1 -0
- package/dist/types/index.js +2 -0
- package/dist/types/noOutputSentinel.d.ts +26 -0
- package/dist/types/noOutputSentinel.js +1 -0
- package/dist/types/stream.d.ts +2 -1
- package/dist/utils/noOutputSentinel.d.ts +80 -0
- package/dist/utils/noOutputSentinel.js +192 -0
- package/package.json +1 -1
|
@@ -279,13 +279,20 @@ export declare abstract class BaseProvider implements AIProvider {
|
|
|
279
279
|
*/
|
|
280
280
|
protected validateStreamOptions(options: StreamOptions): void;
|
|
281
281
|
/**
|
|
282
|
-
* Create text stream transformation - delegated to StreamHandler
|
|
282
|
+
* Create text stream transformation - delegated to StreamHandler.
|
|
283
|
+
* Reviewer follow-up: forwards the optional `getUnderlyingError`
|
|
284
|
+
* callback so providers can capture upstream errors via
|
|
285
|
+
* `streamText`'s `onError` and have them flow into the
|
|
286
|
+
* NoOutputGeneratedError sentinel's `providerError` /
|
|
287
|
+
* `modelResponseRaw`.
|
|
283
288
|
*/
|
|
284
289
|
protected createTextStream(result: {
|
|
285
290
|
textStream: AsyncIterable<string>;
|
|
286
|
-
|
|
291
|
+
finishReason?: Promise<unknown> | unknown;
|
|
292
|
+
totalUsage?: Promise<unknown> | unknown;
|
|
293
|
+
}, getUnderlyingError?: () => unknown): AsyncGenerator<{
|
|
287
294
|
content: string;
|
|
288
|
-
}>;
|
|
295
|
+
} | import("../types/index.js").StreamNoOutputSentinel>;
|
|
289
296
|
/**
|
|
290
297
|
* Create standardized stream result - delegated to StreamHandler
|
|
291
298
|
*/
|
|
@@ -1019,10 +1019,15 @@ export class BaseProvider {
|
|
|
1019
1019
|
this.streamHandler.validateStreamOptions(options);
|
|
1020
1020
|
}
|
|
1021
1021
|
/**
|
|
1022
|
-
* Create text stream transformation - delegated to StreamHandler
|
|
1022
|
+
* Create text stream transformation - delegated to StreamHandler.
|
|
1023
|
+
* Reviewer follow-up: forwards the optional `getUnderlyingError`
|
|
1024
|
+
* callback so providers can capture upstream errors via
|
|
1025
|
+
* `streamText`'s `onError` and have them flow into the
|
|
1026
|
+
* NoOutputGeneratedError sentinel's `providerError` /
|
|
1027
|
+
* `modelResponseRaw`.
|
|
1023
1028
|
*/
|
|
1024
|
-
createTextStream(result) {
|
|
1025
|
-
return this.streamHandler.createTextStream(result);
|
|
1029
|
+
createTextStream(result, getUnderlyingError) {
|
|
1030
|
+
return this.streamHandler.createTextStream(result, getUnderlyingError);
|
|
1026
1031
|
}
|
|
1027
1032
|
/**
|
|
1028
1033
|
* Create standardized stream result - delegated to StreamHandler
|
|
@@ -12,7 +12,7 @@
|
|
|
12
12
|
*
|
|
13
13
|
* @module core/modules/StreamHandler
|
|
14
14
|
*/
|
|
15
|
-
import type { StreamOptions, StreamResult, UnknownRecord, AIProviderName } from "../../types/index.js";
|
|
15
|
+
import type { StreamOptions, StreamResult, UnknownRecord, AIProviderName, StreamNoOutputSentinel } from "../../types/index.js";
|
|
16
16
|
/**
|
|
17
17
|
* StreamHandler class - Handles streaming operations for AI providers
|
|
18
18
|
*/
|
|
@@ -30,9 +30,28 @@ export declare class StreamHandler {
|
|
|
30
30
|
*/
|
|
31
31
|
createTextStream(result: {
|
|
32
32
|
textStream: AsyncIterable<string>;
|
|
33
|
-
|
|
33
|
+
/**
|
|
34
|
+
* Optional metadata getters from the AI SDK's StreamTextResult. These
|
|
35
|
+
* reject with NoOutputGeneratedError when no output is produced, which
|
|
36
|
+
* is exactly the path Curator's P3-6 fix needs to enrich. We attempt
|
|
37
|
+
* to await them in the catch block; whichever resolve get included in
|
|
38
|
+
* the sentinel chunk metadata.
|
|
39
|
+
*/
|
|
40
|
+
finishReason?: Promise<unknown> | unknown;
|
|
41
|
+
totalUsage?: Promise<unknown> | unknown;
|
|
42
|
+
},
|
|
43
|
+
/**
|
|
44
|
+
* Reviewer follow-up: optional getter for the provider's captured
|
|
45
|
+
* upstream error (typically wired from `streamText`'s `onError`
|
|
46
|
+
* callback). When set, the sentinel's `providerError` /
|
|
47
|
+
* `modelResponseRaw` reflect the real upstream cause instead of the
|
|
48
|
+
* AI SDK's generic "No output generated" message. Callers that don't
|
|
49
|
+
* capture upstream errors can omit this — the sentinel still
|
|
50
|
+
* populates with the AI SDK error.
|
|
51
|
+
*/
|
|
52
|
+
getUnderlyingError?: () => unknown): AsyncGenerator<{
|
|
34
53
|
content: string;
|
|
35
|
-
}>;
|
|
54
|
+
} | StreamNoOutputSentinel>;
|
|
36
55
|
/**
|
|
37
56
|
* Create standardized stream result - consolidates result structure
|
|
38
57
|
*/
|
|
@@ -16,6 +16,7 @@ import { trace, context as otelContext, SpanStatusCode, } from "@opentelemetry/a
|
|
|
16
16
|
import { tracers, ATTR, withSpan } from "../../telemetry/index.js";
|
|
17
17
|
import { logger } from "../../utils/logger.js";
|
|
18
18
|
import { validateStreamOptions as validateStreamOpts, ValidationError, createValidationSummary, } from "../../utils/parameterValidation.js";
|
|
19
|
+
import { buildNoOutputSentinel, detectPostStreamNoOutput, stampNoOutputSpan, } from "../../utils/noOutputSentinel.js";
|
|
19
20
|
import { STEP_LIMITS } from "../constants.js";
|
|
20
21
|
import { createAnalytics } from "../analytics.js";
|
|
21
22
|
import { nanoid } from "nanoid";
|
|
@@ -83,7 +84,17 @@ export class StreamHandler {
|
|
|
83
84
|
* Create text stream transformation - consolidates identical logic from 7/10 providers
|
|
84
85
|
* Tracks TTFC (Time To First Chunk), chunk count, and total bytes streamed.
|
|
85
86
|
*/
|
|
86
|
-
createTextStream(result
|
|
87
|
+
createTextStream(result,
|
|
88
|
+
/**
|
|
89
|
+
* Reviewer follow-up: optional getter for the provider's captured
|
|
90
|
+
* upstream error (typically wired from `streamText`'s `onError`
|
|
91
|
+
* callback). When set, the sentinel's `providerError` /
|
|
92
|
+
* `modelResponseRaw` reflect the real upstream cause instead of the
|
|
93
|
+
* AI SDK's generic "No output generated" message. Callers that don't
|
|
94
|
+
* capture upstream errors can omit this — the sentinel still
|
|
95
|
+
* populates with the AI SDK error.
|
|
96
|
+
*/
|
|
97
|
+
getUnderlyingError) {
|
|
87
98
|
const providerName = this.providerName;
|
|
88
99
|
return (async function* () {
|
|
89
100
|
let chunkCount = 0;
|
|
@@ -113,32 +124,43 @@ export class StreamHandler {
|
|
|
113
124
|
// rather than crashing the process with an unhandled rejection.
|
|
114
125
|
if (NoOutputGeneratedError.isInstance(error)) {
|
|
115
126
|
logger.warn(`${providerName}: Stream produced no output (NoOutputGeneratedError), returning empty stream`);
|
|
116
|
-
// Curator
|
|
117
|
-
//
|
|
118
|
-
//
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
// Tracing not initialized — ignore.
|
|
127
|
-
}
|
|
127
|
+
// Curator P3-6: build the enriched sentinel using the shared
|
|
128
|
+
// helper so every provider yields the same shape. Pass the
|
|
129
|
+
// captured upstream error (if any) so providerError /
|
|
130
|
+
// modelResponseRaw carry the real cause.
|
|
131
|
+
const sentinel = await buildNoOutputSentinel(error, result, getUnderlyingError?.());
|
|
132
|
+
// Curator P2-5 + P3-6: stamp the active OTel span so
|
|
133
|
+
// ContextEnricher.onEnd() surfaces a WARNING-level Langfuse
|
|
134
|
+
// observation with finishReason + token usage. Centralized in
|
|
135
|
+
// stampNoOutputSpan so every wired site stamps consistently.
|
|
136
|
+
stampNoOutputSpan(sentinel);
|
|
128
137
|
// S4 fix: yield a sentinel chunk so Pipeline B can detect the empty stream
|
|
129
138
|
// and set the span to WARNING status instead of OK
|
|
130
|
-
yield
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
};
|
|
139
|
+
yield sentinel;
|
|
140
|
+
// Reviewer follow-up: must return here. Falling through to the
|
|
141
|
+
// post-stream detection block below would yield a SECOND sentinel
|
|
142
|
+
// chunk (verified with synthetic NoOutputGeneratedError stream:
|
|
143
|
+
// count=2 sentinels). The catch block's yield is sufficient.
|
|
144
|
+
return;
|
|
137
145
|
}
|
|
138
146
|
else {
|
|
139
147
|
throw error;
|
|
140
148
|
}
|
|
141
149
|
}
|
|
150
|
+
// Curator P3-6 (round-2 fix): the production trigger sets
|
|
151
|
+
// NoOutputGeneratedError on `result.finishReason` rejection — NOT
|
|
152
|
+
// thrown from textStream iteration. Surface that path here so the
|
|
153
|
+
// sentinel actually fires for real-world no-output streams. The
|
|
154
|
+
// catch above remains as a defensive path for failure modes that
|
|
155
|
+
// do throw from textStream.
|
|
156
|
+
if (chunkCount === 0) {
|
|
157
|
+
const detected = await detectPostStreamNoOutput(result, getUnderlyingError?.());
|
|
158
|
+
if (detected) {
|
|
159
|
+
logger.warn(`${providerName}: Stream produced no output (NoOutputGeneratedError) — caught from finishReason rejection`);
|
|
160
|
+
stampNoOutputSpan(detected.sentinel);
|
|
161
|
+
yield detected.sentinel;
|
|
162
|
+
}
|
|
163
|
+
}
|
|
142
164
|
// Record completion metrics on the active span
|
|
143
165
|
const activeSpan = trace.getSpan(otelContext.active());
|
|
144
166
|
if (activeSpan) {
|
|
@@ -279,13 +279,20 @@ export declare abstract class BaseProvider implements AIProvider {
|
|
|
279
279
|
*/
|
|
280
280
|
protected validateStreamOptions(options: StreamOptions): void;
|
|
281
281
|
/**
|
|
282
|
-
* Create text stream transformation - delegated to StreamHandler
|
|
282
|
+
* Create text stream transformation - delegated to StreamHandler.
|
|
283
|
+
* Reviewer follow-up: forwards the optional `getUnderlyingError`
|
|
284
|
+
* callback so providers can capture upstream errors via
|
|
285
|
+
* `streamText`'s `onError` and have them flow into the
|
|
286
|
+
* NoOutputGeneratedError sentinel's `providerError` /
|
|
287
|
+
* `modelResponseRaw`.
|
|
283
288
|
*/
|
|
284
289
|
protected createTextStream(result: {
|
|
285
290
|
textStream: AsyncIterable<string>;
|
|
286
|
-
|
|
291
|
+
finishReason?: Promise<unknown> | unknown;
|
|
292
|
+
totalUsage?: Promise<unknown> | unknown;
|
|
293
|
+
}, getUnderlyingError?: () => unknown): AsyncGenerator<{
|
|
287
294
|
content: string;
|
|
288
|
-
}>;
|
|
295
|
+
} | import("../types/index.js").StreamNoOutputSentinel>;
|
|
289
296
|
/**
|
|
290
297
|
* Create standardized stream result - delegated to StreamHandler
|
|
291
298
|
*/
|
|
@@ -1019,10 +1019,15 @@ export class BaseProvider {
|
|
|
1019
1019
|
this.streamHandler.validateStreamOptions(options);
|
|
1020
1020
|
}
|
|
1021
1021
|
/**
|
|
1022
|
-
* Create text stream transformation - delegated to StreamHandler
|
|
1022
|
+
* Create text stream transformation - delegated to StreamHandler.
|
|
1023
|
+
* Reviewer follow-up: forwards the optional `getUnderlyingError`
|
|
1024
|
+
* callback so providers can capture upstream errors via
|
|
1025
|
+
* `streamText`'s `onError` and have them flow into the
|
|
1026
|
+
* NoOutputGeneratedError sentinel's `providerError` /
|
|
1027
|
+
* `modelResponseRaw`.
|
|
1023
1028
|
*/
|
|
1024
|
-
createTextStream(result) {
|
|
1025
|
-
return this.streamHandler.createTextStream(result);
|
|
1029
|
+
createTextStream(result, getUnderlyingError) {
|
|
1030
|
+
return this.streamHandler.createTextStream(result, getUnderlyingError);
|
|
1026
1031
|
}
|
|
1027
1032
|
/**
|
|
1028
1033
|
* Create standardized stream result - delegated to StreamHandler
|
|
@@ -12,7 +12,7 @@
|
|
|
12
12
|
*
|
|
13
13
|
* @module core/modules/StreamHandler
|
|
14
14
|
*/
|
|
15
|
-
import type { StreamOptions, StreamResult, UnknownRecord, AIProviderName } from "../../types/index.js";
|
|
15
|
+
import type { StreamOptions, StreamResult, UnknownRecord, AIProviderName, StreamNoOutputSentinel } from "../../types/index.js";
|
|
16
16
|
/**
|
|
17
17
|
* StreamHandler class - Handles streaming operations for AI providers
|
|
18
18
|
*/
|
|
@@ -30,9 +30,28 @@ export declare class StreamHandler {
|
|
|
30
30
|
*/
|
|
31
31
|
createTextStream(result: {
|
|
32
32
|
textStream: AsyncIterable<string>;
|
|
33
|
-
|
|
33
|
+
/**
|
|
34
|
+
* Optional metadata getters from the AI SDK's StreamTextResult. These
|
|
35
|
+
* reject with NoOutputGeneratedError when no output is produced, which
|
|
36
|
+
* is exactly the path Curator's P3-6 fix needs to enrich. We attempt
|
|
37
|
+
* to await them in the catch block; whichever resolve get included in
|
|
38
|
+
* the sentinel chunk metadata.
|
|
39
|
+
*/
|
|
40
|
+
finishReason?: Promise<unknown> | unknown;
|
|
41
|
+
totalUsage?: Promise<unknown> | unknown;
|
|
42
|
+
},
|
|
43
|
+
/**
|
|
44
|
+
* Reviewer follow-up: optional getter for the provider's captured
|
|
45
|
+
* upstream error (typically wired from `streamText`'s `onError`
|
|
46
|
+
* callback). When set, the sentinel's `providerError` /
|
|
47
|
+
* `modelResponseRaw` reflect the real upstream cause instead of the
|
|
48
|
+
* AI SDK's generic "No output generated" message. Callers that don't
|
|
49
|
+
* capture upstream errors can omit this — the sentinel still
|
|
50
|
+
* populates with the AI SDK error.
|
|
51
|
+
*/
|
|
52
|
+
getUnderlyingError?: () => unknown): AsyncGenerator<{
|
|
34
53
|
content: string;
|
|
35
|
-
}>;
|
|
54
|
+
} | StreamNoOutputSentinel>;
|
|
36
55
|
/**
|
|
37
56
|
* Create standardized stream result - consolidates result structure
|
|
38
57
|
*/
|
|
@@ -16,6 +16,7 @@ import { trace, context as otelContext, SpanStatusCode, } from "@opentelemetry/a
|
|
|
16
16
|
import { tracers, ATTR, withSpan } from "../../telemetry/index.js";
|
|
17
17
|
import { logger } from "../../utils/logger.js";
|
|
18
18
|
import { validateStreamOptions as validateStreamOpts, ValidationError, createValidationSummary, } from "../../utils/parameterValidation.js";
|
|
19
|
+
import { buildNoOutputSentinel, detectPostStreamNoOutput, stampNoOutputSpan, } from "../../utils/noOutputSentinel.js";
|
|
19
20
|
import { STEP_LIMITS } from "../constants.js";
|
|
20
21
|
import { createAnalytics } from "../analytics.js";
|
|
21
22
|
import { nanoid } from "nanoid";
|
|
@@ -83,7 +84,17 @@ export class StreamHandler {
|
|
|
83
84
|
* Create text stream transformation - consolidates identical logic from 7/10 providers
|
|
84
85
|
* Tracks TTFC (Time To First Chunk), chunk count, and total bytes streamed.
|
|
85
86
|
*/
|
|
86
|
-
createTextStream(result
|
|
87
|
+
createTextStream(result,
|
|
88
|
+
/**
|
|
89
|
+
* Reviewer follow-up: optional getter for the provider's captured
|
|
90
|
+
* upstream error (typically wired from `streamText`'s `onError`
|
|
91
|
+
* callback). When set, the sentinel's `providerError` /
|
|
92
|
+
* `modelResponseRaw` reflect the real upstream cause instead of the
|
|
93
|
+
* AI SDK's generic "No output generated" message. Callers that don't
|
|
94
|
+
* capture upstream errors can omit this — the sentinel still
|
|
95
|
+
* populates with the AI SDK error.
|
|
96
|
+
*/
|
|
97
|
+
getUnderlyingError) {
|
|
87
98
|
const providerName = this.providerName;
|
|
88
99
|
return (async function* () {
|
|
89
100
|
let chunkCount = 0;
|
|
@@ -113,32 +124,43 @@ export class StreamHandler {
|
|
|
113
124
|
// rather than crashing the process with an unhandled rejection.
|
|
114
125
|
if (NoOutputGeneratedError.isInstance(error)) {
|
|
115
126
|
logger.warn(`${providerName}: Stream produced no output (NoOutputGeneratedError), returning empty stream`);
|
|
116
|
-
// Curator
|
|
117
|
-
//
|
|
118
|
-
//
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
// Tracing not initialized — ignore.
|
|
127
|
-
}
|
|
127
|
+
// Curator P3-6: build the enriched sentinel using the shared
|
|
128
|
+
// helper so every provider yields the same shape. Pass the
|
|
129
|
+
// captured upstream error (if any) so providerError /
|
|
130
|
+
// modelResponseRaw carry the real cause.
|
|
131
|
+
const sentinel = await buildNoOutputSentinel(error, result, getUnderlyingError?.());
|
|
132
|
+
// Curator P2-5 + P3-6: stamp the active OTel span so
|
|
133
|
+
// ContextEnricher.onEnd() surfaces a WARNING-level Langfuse
|
|
134
|
+
// observation with finishReason + token usage. Centralized in
|
|
135
|
+
// stampNoOutputSpan so every wired site stamps consistently.
|
|
136
|
+
stampNoOutputSpan(sentinel);
|
|
128
137
|
// S4 fix: yield a sentinel chunk so Pipeline B can detect the empty stream
|
|
129
138
|
// and set the span to WARNING status instead of OK
|
|
130
|
-
yield
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
};
|
|
139
|
+
yield sentinel;
|
|
140
|
+
// Reviewer follow-up: must return here. Falling through to the
|
|
141
|
+
// post-stream detection block below would yield a SECOND sentinel
|
|
142
|
+
// chunk (verified with synthetic NoOutputGeneratedError stream:
|
|
143
|
+
// count=2 sentinels). The catch block's yield is sufficient.
|
|
144
|
+
return;
|
|
137
145
|
}
|
|
138
146
|
else {
|
|
139
147
|
throw error;
|
|
140
148
|
}
|
|
141
149
|
}
|
|
150
|
+
// Curator P3-6 (round-2 fix): the production trigger sets
|
|
151
|
+
// NoOutputGeneratedError on `result.finishReason` rejection — NOT
|
|
152
|
+
// thrown from textStream iteration. Surface that path here so the
|
|
153
|
+
// sentinel actually fires for real-world no-output streams. The
|
|
154
|
+
// catch above remains as a defensive path for failure modes that
|
|
155
|
+
// do throw from textStream.
|
|
156
|
+
if (chunkCount === 0) {
|
|
157
|
+
const detected = await detectPostStreamNoOutput(result, getUnderlyingError?.());
|
|
158
|
+
if (detected) {
|
|
159
|
+
logger.warn(`${providerName}: Stream produced no output (NoOutputGeneratedError) — caught from finishReason rejection`);
|
|
160
|
+
stampNoOutputSpan(detected.sentinel);
|
|
161
|
+
yield detected.sentinel;
|
|
162
|
+
}
|
|
163
|
+
}
|
|
142
164
|
// Record completion metrics on the active span
|
|
143
165
|
const activeSpan = trace.getSpan(otelContext.active());
|
|
144
166
|
if (activeSpan) {
|