@juspay/neurolink 9.59.2 → 9.59.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +12 -0
- package/dist/browser/neurolink.min.js +355 -355
- package/dist/cli/commands/proxy.js +10 -5
- package/dist/core/baseProvider.d.ts +10 -3
- package/dist/core/baseProvider.js +8 -3
- package/dist/core/modules/StreamHandler.d.ts +22 -3
- package/dist/core/modules/StreamHandler.js +42 -20
- package/dist/lib/core/baseProvider.d.ts +10 -3
- package/dist/lib/core/baseProvider.js +8 -3
- package/dist/lib/core/modules/StreamHandler.d.ts +22 -3
- package/dist/lib/core/modules/StreamHandler.js +42 -20
- package/dist/lib/neurolink.js +57 -3
- package/dist/lib/providers/anthropic.js +13 -1
- package/dist/lib/providers/anthropicBaseProvider.js +30 -2
- package/dist/lib/providers/azureOpenai.js +12 -1
- package/dist/lib/providers/googleAiStudio.js +12 -1
- package/dist/lib/providers/googleVertex.js +11 -1
- package/dist/lib/providers/huggingFace.js +29 -2
- package/dist/lib/providers/litellm.js +44 -4
- package/dist/lib/providers/mistral.js +12 -1
- package/dist/lib/providers/openAI.js +34 -3
- package/dist/lib/providers/openRouter.js +33 -2
- package/dist/lib/providers/openaiCompatible.js +34 -2
- package/dist/lib/services/server/ai/observability/instrumentation.js +7 -2
- package/dist/lib/types/index.d.ts +1 -0
- package/dist/lib/types/index.js +2 -0
- package/dist/lib/types/noOutputSentinel.d.ts +26 -0
- package/dist/lib/types/noOutputSentinel.js +2 -0
- package/dist/lib/types/stream.d.ts +2 -1
- package/dist/lib/utils/noOutputSentinel.d.ts +80 -0
- package/dist/lib/utils/noOutputSentinel.js +193 -0
- package/dist/neurolink.js +57 -3
- package/dist/providers/anthropic.js +13 -1
- package/dist/providers/anthropicBaseProvider.js +30 -2
- package/dist/providers/azureOpenai.js +12 -1
- package/dist/providers/googleAiStudio.js +12 -1
- package/dist/providers/googleVertex.js +11 -1
- package/dist/providers/huggingFace.js +29 -2
- package/dist/providers/litellm.js +44 -4
- package/dist/providers/mistral.js +12 -1
- package/dist/providers/openAI.js +34 -3
- package/dist/providers/openRouter.js +33 -2
- package/dist/providers/openaiCompatible.js +34 -2
- package/dist/services/server/ai/observability/instrumentation.js +7 -2
- package/dist/types/index.d.ts +1 -0
- package/dist/types/index.js +2 -0
- package/dist/types/noOutputSentinel.d.ts +26 -0
- package/dist/types/noOutputSentinel.js +1 -0
- package/dist/types/stream.d.ts +2 -1
- package/dist/utils/noOutputSentinel.d.ts +80 -0
- package/dist/utils/noOutputSentinel.js +192 -0
- package/package.json +1 -1
|
@@ -0,0 +1,192 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Curator P3-6: shared builder for the `NoOutputGeneratedError` sentinel
|
|
3
|
+
* chunk. Each provider's stream-transformation generator catches the AI
|
|
4
|
+
* SDK's `NoOutputGeneratedError` and yields this sentinel so downstream
|
|
5
|
+
* telemetry has finish reason + token usage + provider error context
|
|
6
|
+
* instead of just `{ noOutput: true, errorType: "..." }`.
|
|
7
|
+
*
|
|
8
|
+
* The AI SDK rejects `result.finishReason` / `result.totalUsage` in this
|
|
9
|
+
* branch today (see `ai/src/generate-text/stream-text.ts` ~L1078); we
|
|
10
|
+
* still attempt to await them so a future SDK version surfacing partial
|
|
11
|
+
* values populates the sentinel automatically. When they reject we keep
|
|
12
|
+
* conservative defaults (`finishReason: "error"`, zero usage).
|
|
13
|
+
*/
|
|
14
|
+
import { NoOutputGeneratedError } from "ai";
|
|
15
|
+
import { trace, context as otelContext } from "@opentelemetry/api";
|
|
16
|
+
export async function buildNoOutputSentinel(error, result,
|
|
17
|
+
/**
|
|
18
|
+
* Reviewer follow-up: AI SDK v6 wraps the AI SDK's
|
|
19
|
+
* `NoOutputGeneratedError` without preserving the underlying provider
|
|
20
|
+
* error in `error.cause`, and rejects `result.finishReason` /
|
|
21
|
+
* `result.totalUsage` with the wrapped error too. To differentiate
|
|
22
|
+
* content-filter / stop-sequence / provider-crash, providers can
|
|
23
|
+
* capture the upstream error (e.g. via streamText's `onError`
|
|
24
|
+
* callback) and pass it here. When provided, it takes precedence
|
|
25
|
+
* over the AI SDK error for `providerError` and `modelResponseRaw`.
|
|
26
|
+
*/
|
|
27
|
+
underlyingError) {
|
|
28
|
+
let finishReason = "error";
|
|
29
|
+
// Reviewer follow-up: include both AI SDK v4 (promptTokens /
|
|
30
|
+
// completionTokens) and v6 (inputTokens / outputTokens) keys in the
|
|
31
|
+
// default usage so downstream consumers reading either shape see
|
|
32
|
+
// correct zeros instead of `undefined`. Also keep `totalTokens` for
|
|
33
|
+
// back-compat.
|
|
34
|
+
let usage = {
|
|
35
|
+
promptTokens: 0,
|
|
36
|
+
completionTokens: 0,
|
|
37
|
+
inputTokens: 0,
|
|
38
|
+
outputTokens: 0,
|
|
39
|
+
totalTokens: 0,
|
|
40
|
+
};
|
|
41
|
+
if (result) {
|
|
42
|
+
try {
|
|
43
|
+
if (result.finishReason !== undefined) {
|
|
44
|
+
finishReason = await Promise.resolve(result.finishReason);
|
|
45
|
+
}
|
|
46
|
+
}
|
|
47
|
+
catch {
|
|
48
|
+
// Expected: AI SDK rejects with the same NoOutputGeneratedError.
|
|
49
|
+
}
|
|
50
|
+
try {
|
|
51
|
+
if (result.totalUsage !== undefined) {
|
|
52
|
+
usage = await Promise.resolve(result.totalUsage);
|
|
53
|
+
}
|
|
54
|
+
}
|
|
55
|
+
catch {
|
|
56
|
+
// Expected: AI SDK rejects with the same NoOutputGeneratedError.
|
|
57
|
+
}
|
|
58
|
+
}
|
|
59
|
+
// Prefer the provider-captured underlying error for `providerError` /
|
|
60
|
+
// `modelResponseRaw` since the AI SDK NoOutputGeneratedError doesn't
|
|
61
|
+
// carry the actual upstream cause. Fall back to the AI SDK error.
|
|
62
|
+
const messageSource = underlyingError instanceof Error
|
|
63
|
+
? underlyingError
|
|
64
|
+
: underlyingError !== undefined
|
|
65
|
+
? new Error(String(underlyingError))
|
|
66
|
+
: error instanceof Error
|
|
67
|
+
? error
|
|
68
|
+
: new Error(String(error));
|
|
69
|
+
const providerError = messageSource.message;
|
|
70
|
+
const causeFromSource = messageSource.cause;
|
|
71
|
+
// Reviewer follow-up: guard the `error.cause` access so it doesn't
|
|
72
|
+
// throw a TypeError when `error` is null/undefined (only valid object
|
|
73
|
+
// values can be indexed safely).
|
|
74
|
+
const causeFromError = error !== null && typeof error === "object"
|
|
75
|
+
? error.cause
|
|
76
|
+
: undefined;
|
|
77
|
+
const cause = causeFromSource !== undefined ? causeFromSource : causeFromError;
|
|
78
|
+
// Reviewer follow-up: always populate `modelResponseRaw` so downstream
|
|
79
|
+
// telemetry consumers can rely on the field being a string. When neither
|
|
80
|
+
// an `underlyingError` nor a `cause` is available, fall back to error
|
|
81
|
+
// name + message so we still carry *something* about what the provider
|
|
82
|
+
// returned.
|
|
83
|
+
const modelResponseRaw = cause !== undefined
|
|
84
|
+
? String(cause).slice(0, 500)
|
|
85
|
+
: `${messageSource.name}: ${messageSource.message}`.slice(0, 500);
|
|
86
|
+
return {
|
|
87
|
+
content: "",
|
|
88
|
+
metadata: {
|
|
89
|
+
noOutput: true,
|
|
90
|
+
errorType: "NoOutputGeneratedError",
|
|
91
|
+
finishReason,
|
|
92
|
+
usage,
|
|
93
|
+
providerError,
|
|
94
|
+
modelResponseRaw,
|
|
95
|
+
},
|
|
96
|
+
};
|
|
97
|
+
}
|
|
98
|
+
/**
|
|
99
|
+
* Curator P3-6 (round-2): the AI SDK v6 path that sets
|
|
100
|
+
* `NoOutputGeneratedError` does NOT throw it from `result.textStream`
|
|
101
|
+
* iteration — it sets the error as a *promise rejection* on
|
|
102
|
+
* `result.finishReason` / `result.totalUsage` / `result.steps` (see
|
|
103
|
+
* `ai/src/generate-text/stream-text.ts` ~L1078). Providers that only
|
|
104
|
+
* catch errors thrown from `for await (chunk of result.textStream)` will
|
|
105
|
+
* miss the production trigger entirely: the stream completes silently
|
|
106
|
+
* with 0 chunks and the rejection bubbles as an unhandled rejection.
|
|
107
|
+
*
|
|
108
|
+
* This helper surfaces the rejection by awaiting `result.finishReason`
|
|
109
|
+
* after the stream completes. Providers must call this AFTER iterating
|
|
110
|
+
* the textStream when 0 chunks were yielded — the returned sentinel
|
|
111
|
+
* (if non-null) carries the enriched metadata Curator's report needed.
|
|
112
|
+
*/
|
|
113
|
+
export async function detectPostStreamNoOutput(result,
|
|
114
|
+
/**
|
|
115
|
+
* Optional provider-captured underlying error (e.g. from streamText's
|
|
116
|
+
* `onError` callback). When provided, the resulting sentinel will carry
|
|
117
|
+
* the real provider error in `providerError` / `modelResponseRaw`
|
|
118
|
+
* instead of the AI SDK's generic "No output generated" message.
|
|
119
|
+
*/
|
|
120
|
+
underlyingError) {
|
|
121
|
+
if (result.finishReason === undefined) {
|
|
122
|
+
return null;
|
|
123
|
+
}
|
|
124
|
+
try {
|
|
125
|
+
await Promise.resolve(result.finishReason);
|
|
126
|
+
// No rejection — the stream completed normally with a valid finish
|
|
127
|
+
// reason; this is the empty-but-not-erroring case (e.g. AI SDK
|
|
128
|
+
// recorded a step with no text), not the no-output failure.
|
|
129
|
+
return null;
|
|
130
|
+
}
|
|
131
|
+
catch (err) {
|
|
132
|
+
if (NoOutputGeneratedError.isInstance(err)) {
|
|
133
|
+
return {
|
|
134
|
+
sentinel: await buildNoOutputSentinel(err, result, underlyingError),
|
|
135
|
+
error: err,
|
|
136
|
+
};
|
|
137
|
+
}
|
|
138
|
+
// Other rejection types (network errors, parse errors) are not the
|
|
139
|
+
// bug-confirmed scenario — let the caller's existing error handling
|
|
140
|
+
// surface them.
|
|
141
|
+
return null;
|
|
142
|
+
}
|
|
143
|
+
}
|
|
144
|
+
/**
|
|
145
|
+
* Reviewer follow-up: every provider's post-stream NoOutput detect must
|
|
146
|
+
* stamp the active OTel span so Pipeline B (`ContextEnricher.onEnd()` →
|
|
147
|
+
* `applyNonErrorLangfuseLevel`) surfaces a WARNING-level Langfuse
|
|
148
|
+
* observation with the enriched status message. Without this, only
|
|
149
|
+
* `StreamHandler`-based providers produced the rich telemetry; the
|
|
150
|
+
* provider-specific paths (openAI, openaiCompatible, litellm,
|
|
151
|
+
* huggingFace, openRouter, anthropicBaseProvider) yielded the sentinel
|
|
152
|
+
* to direct stream consumers but Pipeline B saw nothing.
|
|
153
|
+
*
|
|
154
|
+
* Stamps three attributes:
|
|
155
|
+
* - `neurolink.no_output = true` (Pipeline B trigger)
|
|
156
|
+
* - `langfuse.status_message` (enriched, with finishReason + tokens)
|
|
157
|
+
* - `neurolink.no_output.finish_reason` (raw finish reason)
|
|
158
|
+
*
|
|
159
|
+
* Safe to call when tracing isn't initialized — silently no-ops.
|
|
160
|
+
*/
|
|
161
|
+
export function stampNoOutputSpan(sentinel) {
|
|
162
|
+
try {
|
|
163
|
+
const activeSpan = trace.getSpan(otelContext.active());
|
|
164
|
+
if (!activeSpan) {
|
|
165
|
+
return;
|
|
166
|
+
}
|
|
167
|
+
activeSpan.setAttribute("neurolink.no_output", true);
|
|
168
|
+
activeSpan.setAttribute("langfuse.status_message", buildNoOutputStatusMessage(sentinel.metadata.finishReason, sentinel.metadata.usage));
|
|
169
|
+
activeSpan.setAttribute("neurolink.no_output.finish_reason", String(sentinel.metadata.finishReason));
|
|
170
|
+
}
|
|
171
|
+
catch {
|
|
172
|
+
// Tracing not initialized — ignore.
|
|
173
|
+
}
|
|
174
|
+
}
|
|
175
|
+
/**
|
|
176
|
+
* Build the OTel `langfuse.status_message` summary string for a no-output
|
|
177
|
+
* stream. Used by `StreamHandler.createTextStream` and any future provider
|
|
178
|
+
* that wants to stamp the active span with the same enriched message.
|
|
179
|
+
*
|
|
180
|
+
* Reviewer follow-up: AI SDK v4 used `promptTokens` / `completionTokens`,
|
|
181
|
+
* v6 uses `inputTokens` / `outputTokens`. Read both shapes so the message
|
|
182
|
+
* is correct whichever version surfaced partial usage data.
|
|
183
|
+
*/
|
|
184
|
+
export function buildNoOutputStatusMessage(finishReason, usage) {
|
|
185
|
+
const u = usage;
|
|
186
|
+
const inputTokens = u?.inputTokens ?? u?.promptTokens ?? 0;
|
|
187
|
+
const outputTokens = u?.outputTokens ?? u?.completionTokens ?? 0;
|
|
188
|
+
return (`Stream produced no output (NoOutputGeneratedError): ` +
|
|
189
|
+
`finishReason=${String(finishReason)}, ` +
|
|
190
|
+
`inputTokens=${inputTokens}, ` +
|
|
191
|
+
`outputTokens=${outputTokens}`);
|
|
192
|
+
}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@juspay/neurolink",
|
|
3
|
-
"version": "9.59.
|
|
3
|
+
"version": "9.59.4",
|
|
4
4
|
"packageManager": "pnpm@10.15.1",
|
|
5
5
|
"description": "Universal AI Development Platform with working MCP integration, multi-provider support, and professional CLI. Built-in tools operational, 58+ external MCP servers discoverable. Connect to filesystem, GitHub, database operations, and more. Build, test, and deploy AI applications with 13 providers: OpenAI, Anthropic, Google AI, AWS Bedrock, Azure, Hugging Face, Ollama, and Mistral AI.",
|
|
6
6
|
"author": {
|