@juspay/neurolink 9.59.1 → 9.59.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. package/CHANGELOG.md +12 -0
  2. package/dist/browser/neurolink.min.js +355 -355
  3. package/dist/core/baseProvider.d.ts +10 -3
  4. package/dist/core/baseProvider.js +8 -3
  5. package/dist/core/modules/StreamHandler.d.ts +22 -3
  6. package/dist/core/modules/StreamHandler.js +42 -20
  7. package/dist/lib/core/baseProvider.d.ts +10 -3
  8. package/dist/lib/core/baseProvider.js +8 -3
  9. package/dist/lib/core/modules/StreamHandler.d.ts +22 -3
  10. package/dist/lib/core/modules/StreamHandler.js +42 -20
  11. package/dist/lib/neurolink.js +361 -39
  12. package/dist/lib/providers/anthropic.js +13 -1
  13. package/dist/lib/providers/anthropicBaseProvider.js +30 -2
  14. package/dist/lib/providers/azureOpenai.js +12 -1
  15. package/dist/lib/providers/googleAiStudio.js +12 -1
  16. package/dist/lib/providers/googleVertex.js +11 -1
  17. package/dist/lib/providers/huggingFace.js +29 -2
  18. package/dist/lib/providers/litellm.js +44 -4
  19. package/dist/lib/providers/mistral.js +12 -1
  20. package/dist/lib/providers/openAI.js +34 -3
  21. package/dist/lib/providers/openRouter.js +33 -2
  22. package/dist/lib/providers/openaiCompatible.js +34 -2
  23. package/dist/lib/services/server/ai/observability/instrumentation.js +7 -2
  24. package/dist/lib/types/index.d.ts +1 -0
  25. package/dist/lib/types/index.js +2 -0
  26. package/dist/lib/types/noOutputSentinel.d.ts +26 -0
  27. package/dist/lib/types/noOutputSentinel.js +2 -0
  28. package/dist/lib/types/stream.d.ts +2 -1
  29. package/dist/lib/utils/noOutputSentinel.d.ts +80 -0
  30. package/dist/lib/utils/noOutputSentinel.js +193 -0
  31. package/dist/neurolink.js +361 -39
  32. package/dist/providers/anthropic.js +13 -1
  33. package/dist/providers/anthropicBaseProvider.js +30 -2
  34. package/dist/providers/azureOpenai.js +12 -1
  35. package/dist/providers/googleAiStudio.js +12 -1
  36. package/dist/providers/googleVertex.js +11 -1
  37. package/dist/providers/huggingFace.js +29 -2
  38. package/dist/providers/litellm.js +44 -4
  39. package/dist/providers/mistral.js +12 -1
  40. package/dist/providers/openAI.js +34 -3
  41. package/dist/providers/openRouter.js +33 -2
  42. package/dist/providers/openaiCompatible.js +34 -2
  43. package/dist/services/server/ai/observability/instrumentation.js +7 -2
  44. package/dist/types/index.d.ts +1 -0
  45. package/dist/types/index.js +2 -0
  46. package/dist/types/noOutputSentinel.d.ts +26 -0
  47. package/dist/types/noOutputSentinel.js +1 -0
  48. package/dist/types/stream.d.ts +2 -1
  49. package/dist/utils/noOutputSentinel.d.ts +80 -0
  50. package/dist/utils/noOutputSentinel.js +192 -0
  51. package/package.json +1 -1
@@ -279,13 +279,20 @@ export declare abstract class BaseProvider implements AIProvider {
279
279
  */
280
280
  protected validateStreamOptions(options: StreamOptions): void;
281
281
  /**
282
- * Create text stream transformation - delegated to StreamHandler
282
+ * Create text stream transformation - delegated to StreamHandler.
283
+ * Reviewer follow-up: forwards the optional `getUnderlyingError`
284
+ * callback so providers can capture upstream errors via
285
+ * `streamText`'s `onError` and have them flow into the
286
+ * NoOutputGeneratedError sentinel's `providerError` /
287
+ * `modelResponseRaw`.
283
288
  */
284
289
  protected createTextStream(result: {
285
290
  textStream: AsyncIterable<string>;
286
- }): AsyncGenerator<{
291
+ finishReason?: Promise<unknown> | unknown;
292
+ totalUsage?: Promise<unknown> | unknown;
293
+ }, getUnderlyingError?: () => unknown): AsyncGenerator<{
287
294
  content: string;
288
- }>;
295
+ } | import("../types/index.js").StreamNoOutputSentinel>;
289
296
  /**
290
297
  * Create standardized stream result - delegated to StreamHandler
291
298
  */
@@ -1019,10 +1019,15 @@ export class BaseProvider {
1019
1019
  this.streamHandler.validateStreamOptions(options);
1020
1020
  }
1021
1021
  /**
1022
- * Create text stream transformation - delegated to StreamHandler
1022
+ * Create text stream transformation - delegated to StreamHandler.
1023
+ * Reviewer follow-up: forwards the optional `getUnderlyingError`
1024
+ * callback so providers can capture upstream errors via
1025
+ * `streamText`'s `onError` and have them flow into the
1026
+ * NoOutputGeneratedError sentinel's `providerError` /
1027
+ * `modelResponseRaw`.
1023
1028
  */
1024
- createTextStream(result) {
1025
- return this.streamHandler.createTextStream(result);
1029
+ createTextStream(result, getUnderlyingError) {
1030
+ return this.streamHandler.createTextStream(result, getUnderlyingError);
1026
1031
  }
1027
1032
  /**
1028
1033
  * Create standardized stream result - delegated to StreamHandler
@@ -12,7 +12,7 @@
12
12
  *
13
13
  * @module core/modules/StreamHandler
14
14
  */
15
- import type { StreamOptions, StreamResult, UnknownRecord, AIProviderName } from "../../types/index.js";
15
+ import type { StreamOptions, StreamResult, UnknownRecord, AIProviderName, StreamNoOutputSentinel } from "../../types/index.js";
16
16
  /**
17
17
  * StreamHandler class - Handles streaming operations for AI providers
18
18
  */
@@ -30,9 +30,28 @@ export declare class StreamHandler {
30
30
  */
31
31
  createTextStream(result: {
32
32
  textStream: AsyncIterable<string>;
33
- }): AsyncGenerator<{
33
+ /**
34
+ * Optional metadata getters from the AI SDK's StreamTextResult. These
35
+ * reject with NoOutputGeneratedError when no output is produced, which
36
+ * is exactly the path Curator's P3-6 fix needs to enrich. We attempt
37
+ * to await them in the catch block; whichever resolve get included in
38
+ * the sentinel chunk metadata.
39
+ */
40
+ finishReason?: Promise<unknown> | unknown;
41
+ totalUsage?: Promise<unknown> | unknown;
42
+ },
43
+ /**
44
+ * Reviewer follow-up: optional getter for the provider's captured
45
+ * upstream error (typically wired from `streamText`'s `onError`
46
+ * callback). When set, the sentinel's `providerError` /
47
+ * `modelResponseRaw` reflect the real upstream cause instead of the
48
+ * AI SDK's generic "No output generated" message. Callers that don't
49
+ * capture upstream errors can omit this — the sentinel still
50
+ * populates with the AI SDK error.
51
+ */
52
+ getUnderlyingError?: () => unknown): AsyncGenerator<{
34
53
  content: string;
35
- }>;
54
+ } | StreamNoOutputSentinel>;
36
55
  /**
37
56
  * Create standardized stream result - consolidates result structure
38
57
  */
@@ -16,6 +16,7 @@ import { trace, context as otelContext, SpanStatusCode, } from "@opentelemetry/a
16
16
  import { tracers, ATTR, withSpan } from "../../telemetry/index.js";
17
17
  import { logger } from "../../utils/logger.js";
18
18
  import { validateStreamOptions as validateStreamOpts, ValidationError, createValidationSummary, } from "../../utils/parameterValidation.js";
19
+ import { buildNoOutputSentinel, detectPostStreamNoOutput, stampNoOutputSpan, } from "../../utils/noOutputSentinel.js";
19
20
  import { STEP_LIMITS } from "../constants.js";
20
21
  import { createAnalytics } from "../analytics.js";
21
22
  import { nanoid } from "nanoid";
@@ -83,7 +84,17 @@ export class StreamHandler {
83
84
  * Create text stream transformation - consolidates identical logic from 7/10 providers
84
85
  * Tracks TTFC (Time To First Chunk), chunk count, and total bytes streamed.
85
86
  */
86
- createTextStream(result) {
87
+ createTextStream(result,
88
+ /**
89
+ * Reviewer follow-up: optional getter for the provider's captured
90
+ * upstream error (typically wired from `streamText`'s `onError`
91
+ * callback). When set, the sentinel's `providerError` /
92
+ * `modelResponseRaw` reflect the real upstream cause instead of the
93
+ * AI SDK's generic "No output generated" message. Callers that don't
94
+ * capture upstream errors can omit this — the sentinel still
95
+ * populates with the AI SDK error.
96
+ */
97
+ getUnderlyingError) {
87
98
  const providerName = this.providerName;
88
99
  return (async function* () {
89
100
  let chunkCount = 0;
@@ -113,32 +124,43 @@ export class StreamHandler {
113
124
  // rather than crashing the process with an unhandled rejection.
114
125
  if (NoOutputGeneratedError.isInstance(error)) {
115
126
  logger.warn(`${providerName}: Stream produced no output (NoOutputGeneratedError), returning empty stream`);
116
- // Curator P2-5: stamp the active OTel span so ContextEnricher.onEnd()
117
- // surfaces a WARNING-level Langfuse observation instead of defaulting
118
- // to DEFAULT with no status message.
119
- try {
120
- const activeSpan = trace.getSpan(otelContext.active());
121
- if (activeSpan) {
122
- activeSpan.setAttribute("neurolink.no_output", true);
123
- }
124
- }
125
- catch {
126
- // Tracing not initialized — ignore.
127
- }
127
+ // Curator P3-6: build the enriched sentinel using the shared
128
+ // helper so every provider yields the same shape. Pass the
129
+ // captured upstream error (if any) so providerError /
130
+ // modelResponseRaw carry the real cause.
131
+ const sentinel = await buildNoOutputSentinel(error, result, getUnderlyingError?.());
132
+ // Curator P2-5 + P3-6: stamp the active OTel span so
133
+ // ContextEnricher.onEnd() surfaces a WARNING-level Langfuse
134
+ // observation with finishReason + token usage. Centralized in
135
+ // stampNoOutputSpan so every wired site stamps consistently.
136
+ stampNoOutputSpan(sentinel);
128
137
  // S4 fix: yield a sentinel chunk so Pipeline B can detect the empty stream
129
138
  // and set the span to WARNING status instead of OK
130
- yield {
131
- content: "",
132
- metadata: {
133
- noOutput: true,
134
- errorType: "NoOutputGeneratedError",
135
- },
136
- };
139
+ yield sentinel;
140
+ // Reviewer follow-up: must return here. Falling through to the
141
+ // post-stream detection block below would yield a SECOND sentinel
142
+ // chunk (verified with synthetic NoOutputGeneratedError stream:
143
+ // count=2 sentinels). The catch block's yield is sufficient.
144
+ return;
137
145
  }
138
146
  else {
139
147
  throw error;
140
148
  }
141
149
  }
150
+ // Curator P3-6 (round-2 fix): the production trigger sets
151
+ // NoOutputGeneratedError on `result.finishReason` rejection — NOT
152
+ // thrown from textStream iteration. Surface that path here so the
153
+ // sentinel actually fires for real-world no-output streams. The
154
+ // catch above remains as a defensive path for failure modes that
155
+ // do throw from textStream.
156
+ if (chunkCount === 0) {
157
+ const detected = await detectPostStreamNoOutput(result, getUnderlyingError?.());
158
+ if (detected) {
159
+ logger.warn(`${providerName}: Stream produced no output (NoOutputGeneratedError) — caught from finishReason rejection`);
160
+ stampNoOutputSpan(detected.sentinel);
161
+ yield detected.sentinel;
162
+ }
163
+ }
142
164
  // Record completion metrics on the active span
143
165
  const activeSpan = trace.getSpan(otelContext.active());
144
166
  if (activeSpan) {
@@ -279,13 +279,20 @@ export declare abstract class BaseProvider implements AIProvider {
279
279
  */
280
280
  protected validateStreamOptions(options: StreamOptions): void;
281
281
  /**
282
- * Create text stream transformation - delegated to StreamHandler
282
+ * Create text stream transformation - delegated to StreamHandler.
283
+ * Reviewer follow-up: forwards the optional `getUnderlyingError`
284
+ * callback so providers can capture upstream errors via
285
+ * `streamText`'s `onError` and have them flow into the
286
+ * NoOutputGeneratedError sentinel's `providerError` /
287
+ * `modelResponseRaw`.
283
288
  */
284
289
  protected createTextStream(result: {
285
290
  textStream: AsyncIterable<string>;
286
- }): AsyncGenerator<{
291
+ finishReason?: Promise<unknown> | unknown;
292
+ totalUsage?: Promise<unknown> | unknown;
293
+ }, getUnderlyingError?: () => unknown): AsyncGenerator<{
287
294
  content: string;
288
- }>;
295
+ } | import("../types/index.js").StreamNoOutputSentinel>;
289
296
  /**
290
297
  * Create standardized stream result - delegated to StreamHandler
291
298
  */
@@ -1019,10 +1019,15 @@ export class BaseProvider {
1019
1019
  this.streamHandler.validateStreamOptions(options);
1020
1020
  }
1021
1021
  /**
1022
- * Create text stream transformation - delegated to StreamHandler
1022
+ * Create text stream transformation - delegated to StreamHandler.
1023
+ * Reviewer follow-up: forwards the optional `getUnderlyingError`
1024
+ * callback so providers can capture upstream errors via
1025
+ * `streamText`'s `onError` and have them flow into the
1026
+ * NoOutputGeneratedError sentinel's `providerError` /
1027
+ * `modelResponseRaw`.
1023
1028
  */
1024
- createTextStream(result) {
1025
- return this.streamHandler.createTextStream(result);
1029
+ createTextStream(result, getUnderlyingError) {
1030
+ return this.streamHandler.createTextStream(result, getUnderlyingError);
1026
1031
  }
1027
1032
  /**
1028
1033
  * Create standardized stream result - delegated to StreamHandler
@@ -12,7 +12,7 @@
12
12
  *
13
13
  * @module core/modules/StreamHandler
14
14
  */
15
- import type { StreamOptions, StreamResult, UnknownRecord, AIProviderName } from "../../types/index.js";
15
+ import type { StreamOptions, StreamResult, UnknownRecord, AIProviderName, StreamNoOutputSentinel } from "../../types/index.js";
16
16
  /**
17
17
  * StreamHandler class - Handles streaming operations for AI providers
18
18
  */
@@ -30,9 +30,28 @@ export declare class StreamHandler {
30
30
  */
31
31
  createTextStream(result: {
32
32
  textStream: AsyncIterable<string>;
33
- }): AsyncGenerator<{
33
+ /**
34
+ * Optional metadata getters from the AI SDK's StreamTextResult. These
35
+ * reject with NoOutputGeneratedError when no output is produced, which
36
+ * is exactly the path Curator's P3-6 fix needs to enrich. We attempt
37
+ * to await them in the catch block; whichever resolve get included in
38
+ * the sentinel chunk metadata.
39
+ */
40
+ finishReason?: Promise<unknown> | unknown;
41
+ totalUsage?: Promise<unknown> | unknown;
42
+ },
43
+ /**
44
+ * Reviewer follow-up: optional getter for the provider's captured
45
+ * upstream error (typically wired from `streamText`'s `onError`
46
+ * callback). When set, the sentinel's `providerError` /
47
+ * `modelResponseRaw` reflect the real upstream cause instead of the
48
+ * AI SDK's generic "No output generated" message. Callers that don't
49
+ * capture upstream errors can omit this — the sentinel still
50
+ * populates with the AI SDK error.
51
+ */
52
+ getUnderlyingError?: () => unknown): AsyncGenerator<{
34
53
  content: string;
35
- }>;
54
+ } | StreamNoOutputSentinel>;
36
55
  /**
37
56
  * Create standardized stream result - consolidates result structure
38
57
  */
@@ -16,6 +16,7 @@ import { trace, context as otelContext, SpanStatusCode, } from "@opentelemetry/a
16
16
  import { tracers, ATTR, withSpan } from "../../telemetry/index.js";
17
17
  import { logger } from "../../utils/logger.js";
18
18
  import { validateStreamOptions as validateStreamOpts, ValidationError, createValidationSummary, } from "../../utils/parameterValidation.js";
19
+ import { buildNoOutputSentinel, detectPostStreamNoOutput, stampNoOutputSpan, } from "../../utils/noOutputSentinel.js";
19
20
  import { STEP_LIMITS } from "../constants.js";
20
21
  import { createAnalytics } from "../analytics.js";
21
22
  import { nanoid } from "nanoid";
@@ -83,7 +84,17 @@ export class StreamHandler {
83
84
  * Create text stream transformation - consolidates identical logic from 7/10 providers
84
85
  * Tracks TTFC (Time To First Chunk), chunk count, and total bytes streamed.
85
86
  */
86
- createTextStream(result) {
87
+ createTextStream(result,
88
+ /**
89
+ * Reviewer follow-up: optional getter for the provider's captured
90
+ * upstream error (typically wired from `streamText`'s `onError`
91
+ * callback). When set, the sentinel's `providerError` /
92
+ * `modelResponseRaw` reflect the real upstream cause instead of the
93
+ * AI SDK's generic "No output generated" message. Callers that don't
94
+ * capture upstream errors can omit this — the sentinel still
95
+ * populates with the AI SDK error.
96
+ */
97
+ getUnderlyingError) {
87
98
  const providerName = this.providerName;
88
99
  return (async function* () {
89
100
  let chunkCount = 0;
@@ -113,32 +124,43 @@ export class StreamHandler {
113
124
  // rather than crashing the process with an unhandled rejection.
114
125
  if (NoOutputGeneratedError.isInstance(error)) {
115
126
  logger.warn(`${providerName}: Stream produced no output (NoOutputGeneratedError), returning empty stream`);
116
- // Curator P2-5: stamp the active OTel span so ContextEnricher.onEnd()
117
- // surfaces a WARNING-level Langfuse observation instead of defaulting
118
- // to DEFAULT with no status message.
119
- try {
120
- const activeSpan = trace.getSpan(otelContext.active());
121
- if (activeSpan) {
122
- activeSpan.setAttribute("neurolink.no_output", true);
123
- }
124
- }
125
- catch {
126
- // Tracing not initialized — ignore.
127
- }
127
+ // Curator P3-6: build the enriched sentinel using the shared
128
+ // helper so every provider yields the same shape. Pass the
129
+ // captured upstream error (if any) so providerError /
130
+ // modelResponseRaw carry the real cause.
131
+ const sentinel = await buildNoOutputSentinel(error, result, getUnderlyingError?.());
132
+ // Curator P2-5 + P3-6: stamp the active OTel span so
133
+ // ContextEnricher.onEnd() surfaces a WARNING-level Langfuse
134
+ // observation with finishReason + token usage. Centralized in
135
+ // stampNoOutputSpan so every wired site stamps consistently.
136
+ stampNoOutputSpan(sentinel);
128
137
  // S4 fix: yield a sentinel chunk so Pipeline B can detect the empty stream
129
138
  // and set the span to WARNING status instead of OK
130
- yield {
131
- content: "",
132
- metadata: {
133
- noOutput: true,
134
- errorType: "NoOutputGeneratedError",
135
- },
136
- };
139
+ yield sentinel;
140
+ // Reviewer follow-up: must return here. Falling through to the
141
+ // post-stream detection block below would yield a SECOND sentinel
142
+ // chunk (verified with synthetic NoOutputGeneratedError stream:
143
+ // count=2 sentinels). The catch block's yield is sufficient.
144
+ return;
137
145
  }
138
146
  else {
139
147
  throw error;
140
148
  }
141
149
  }
150
+ // Curator P3-6 (round-2 fix): the production trigger sets
151
+ // NoOutputGeneratedError on `result.finishReason` rejection — NOT
152
+ // thrown from textStream iteration. Surface that path here so the
153
+ // sentinel actually fires for real-world no-output streams. The
154
+ // catch above remains as a defensive path for failure modes that
155
+ // do throw from textStream.
156
+ if (chunkCount === 0) {
157
+ const detected = await detectPostStreamNoOutput(result, getUnderlyingError?.());
158
+ if (detected) {
159
+ logger.warn(`${providerName}: Stream produced no output (NoOutputGeneratedError) — caught from finishReason rejection`);
160
+ stampNoOutputSpan(detected.sentinel);
161
+ yield detected.sentinel;
162
+ }
163
+ }
142
164
  // Record completion metrics on the active span
143
165
  const activeSpan = trace.getSpan(otelContext.active());
144
166
  if (activeSpan) {