@xsai-ext/telemetry 0.4.4 → 0.5.0-beta.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.js +296 -267
- package/package.json +3 -2
package/dist/index.js
CHANGED
|
@@ -1,8 +1,9 @@
|
|
|
1
|
-
import { embed as embed$1, clean, embedMany as embedMany$1, trampoline, chat, responseJSON,
|
|
1
|
+
import { embed as embed$1, clean, embedMany as embedMany$1, trampoline, resolveStepOptions, chat, responseJSON, InvalidResponseError, stepCountAtLeast, executeTool, shouldStop, determineStepType, DelayedPromise, objCamelToSnake } from 'xsai';
|
|
2
2
|
export * from 'xsai';
|
|
3
3
|
import { trace, SpanStatusCode } from '@opentelemetry/api';
|
|
4
|
+
import { createControlledStream, errorControllers, closeControllers, EventSourceParserStream, JsonMessageTransformStream } from '@xsai/shared-stream';
|
|
4
5
|
|
|
5
|
-
var version = "0.
|
|
6
|
+
var version = "0.5.0-beta.2";
|
|
6
7
|
var pkg = {
|
|
7
8
|
version: version};
|
|
8
9
|
|
|
@@ -131,127 +132,123 @@ const wrapTool = (tool, tracer) => ({
|
|
|
131
132
|
|
|
132
133
|
const generateText = async (options) => {
|
|
133
134
|
const tracer = getTracer();
|
|
134
|
-
const rawGenerateText = async (options2) =>
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
const steps = options2.steps ? structuredClone(options2.steps) : [];
|
|
145
|
-
const toolCalls = [];
|
|
146
|
-
const toolResults = [];
|
|
147
|
-
const { finish_reason: finishReason, message } = choices[0];
|
|
148
|
-
const msgToolCalls = message?.tool_calls ?? [];
|
|
149
|
-
const stepType = determineStepType({
|
|
150
|
-
finishReason,
|
|
151
|
-
maxSteps: options2.maxSteps ?? 1,
|
|
152
|
-
stepsLength: steps.length,
|
|
153
|
-
toolCallsLength: msgToolCalls.length
|
|
135
|
+
const rawGenerateText = async (options2) => {
|
|
136
|
+
const messages = options2.steps == null ? structuredClone(options2.messages) : options2.messages;
|
|
137
|
+
const steps = options2.steps ?? [];
|
|
138
|
+
const stepOptions = await resolveStepOptions({
|
|
139
|
+
messages,
|
|
140
|
+
model: options2.model,
|
|
141
|
+
prepareStep: options2.prepareStep,
|
|
142
|
+
stepNumber: steps.length,
|
|
143
|
+
steps,
|
|
144
|
+
toolChoice: options2.toolChoice
|
|
154
145
|
});
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
146
|
+
return recordSpan(chatSpan({
|
|
147
|
+
...options2,
|
|
148
|
+
messages: stepOptions.messages,
|
|
149
|
+
model: stepOptions.model,
|
|
150
|
+
toolChoice: stepOptions.toolChoice
|
|
151
|
+
}, tracer), async (span) => chat({
|
|
152
|
+
...options2,
|
|
153
|
+
maxSteps: void 0,
|
|
154
|
+
messages: stepOptions.messages,
|
|
155
|
+
model: stepOptions.model,
|
|
156
|
+
steps: void 0,
|
|
157
|
+
stopWhen: void 0,
|
|
158
|
+
stream: false,
|
|
159
|
+
toolChoice: stepOptions.toolChoice
|
|
160
|
+
}).then(responseJSON).then(async (res) => {
|
|
161
|
+
const { choices, usage } = res;
|
|
162
|
+
if (!choices?.length) {
|
|
163
|
+
const responseBody = JSON.stringify(res);
|
|
164
|
+
throw new InvalidResponseError(`No choices returned, response body: ${responseBody}`, {
|
|
165
|
+
reason: "no_choices",
|
|
166
|
+
responseBody
|
|
164
167
|
});
|
|
165
|
-
toolCalls.push(completionToolCall);
|
|
166
|
-
toolResults.push(completionToolResult);
|
|
167
|
-
messages.push(message2);
|
|
168
168
|
}
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
finishReason,
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
169
|
+
const toolCalls = [];
|
|
170
|
+
const toolResults = [];
|
|
171
|
+
const { finish_reason: finishReason, message } = choices[0];
|
|
172
|
+
const msgToolCalls = message?.tool_calls ?? [];
|
|
173
|
+
const stopWhen = options2.stopWhen ?? stepCountAtLeast(1);
|
|
174
|
+
messages.push(message);
|
|
175
|
+
span.setAttribute("gen_ai.output.messages", JSON.stringify([message]));
|
|
176
|
+
if (msgToolCalls.length > 0) {
|
|
177
|
+
const results = await Promise.all(
|
|
178
|
+
msgToolCalls.map(async (toolCall) => executeTool({
|
|
179
|
+
abortSignal: options2.abortSignal,
|
|
180
|
+
messages,
|
|
181
|
+
toolCall,
|
|
182
|
+
tools: options2.tools
|
|
183
|
+
}))
|
|
184
|
+
);
|
|
185
|
+
for (const { completionToolCall, completionToolResult, message: message2 } of results) {
|
|
186
|
+
toolCalls.push(completionToolCall);
|
|
187
|
+
toolResults.push(completionToolResult);
|
|
188
|
+
messages.push(message2);
|
|
189
|
+
}
|
|
190
|
+
}
|
|
191
|
+
const stopStep = {
|
|
192
|
+
finishReason,
|
|
193
|
+
text: Array.isArray(message.content) ? message.content.filter((m) => m.type === "text").map((m) => m.text).join("\n") : message.content,
|
|
194
|
+
toolCalls,
|
|
195
|
+
toolResults,
|
|
196
|
+
usage
|
|
196
197
|
};
|
|
197
|
-
|
|
198
|
-
return async () => rawGenerateText({
|
|
199
|
-
...options2,
|
|
198
|
+
const stop = shouldStop(stopWhen, {
|
|
200
199
|
messages,
|
|
201
|
-
|
|
200
|
+
step: stopStep,
|
|
201
|
+
steps: [...steps, stopStep]
|
|
202
202
|
});
|
|
203
|
-
|
|
204
|
-
|
|
203
|
+
const willContinue = toolCalls.length > 0 && !stop;
|
|
204
|
+
const step = {
|
|
205
|
+
...stopStep,
|
|
206
|
+
stepType: determineStepType({
|
|
207
|
+
finishReason,
|
|
208
|
+
stepsLength: steps.length,
|
|
209
|
+
toolCallsLength: toolCalls.length,
|
|
210
|
+
willContinue
|
|
211
|
+
})
|
|
212
|
+
};
|
|
213
|
+
steps.push(step);
|
|
214
|
+
span.setAttributes({
|
|
215
|
+
"gen_ai.response.finish_reasons": [step.finishReason],
|
|
216
|
+
"gen_ai.usage.input_tokens": step.usage.prompt_tokens,
|
|
217
|
+
"gen_ai.usage.output_tokens": step.usage.completion_tokens
|
|
218
|
+
});
|
|
219
|
+
if (options2.onStepFinish)
|
|
220
|
+
await options2.onStepFinish(step);
|
|
221
|
+
if (!willContinue) {
|
|
222
|
+
return {
|
|
223
|
+
finishReason: step.finishReason,
|
|
224
|
+
messages,
|
|
225
|
+
reasoningText: message.reasoning ?? message.reasoning_content,
|
|
226
|
+
steps,
|
|
227
|
+
text: step.text,
|
|
228
|
+
toolCalls: step.toolCalls,
|
|
229
|
+
toolResults: step.toolResults,
|
|
230
|
+
usage: step.usage
|
|
231
|
+
};
|
|
232
|
+
} else {
|
|
233
|
+
return async () => rawGenerateText({
|
|
234
|
+
...options2,
|
|
235
|
+
messages,
|
|
236
|
+
steps
|
|
237
|
+
});
|
|
238
|
+
}
|
|
239
|
+
}));
|
|
240
|
+
};
|
|
205
241
|
return trampoline(async () => rawGenerateText({
|
|
206
242
|
...options,
|
|
207
243
|
tools: options.tools?.map((tool) => wrapTool(tool, tracer))
|
|
208
244
|
}));
|
|
209
245
|
};
|
|
210
246
|
|
|
211
|
-
const parseChunk = (text) => {
|
|
212
|
-
if (!text || !text.startsWith("data:"))
|
|
213
|
-
return [void 0, false];
|
|
214
|
-
const content = text.slice("data:".length);
|
|
215
|
-
const data = content.startsWith(" ") ? content.slice(1) : content;
|
|
216
|
-
if (data === "[DONE]") {
|
|
217
|
-
return [void 0, true];
|
|
218
|
-
}
|
|
219
|
-
if (data.startsWith("{") && data.includes('"error":')) {
|
|
220
|
-
throw new Error(`Error from server: ${data}`);
|
|
221
|
-
}
|
|
222
|
-
const chunk = JSON.parse(data);
|
|
223
|
-
return [chunk, false];
|
|
224
|
-
};
|
|
225
|
-
const transformChunk = () => {
|
|
226
|
-
const decoder = new TextDecoder();
|
|
227
|
-
let buffer = "";
|
|
228
|
-
return new TransformStream({
|
|
229
|
-
transform: async (chunk, controller) => {
|
|
230
|
-
const text = decoder.decode(chunk, { stream: true });
|
|
231
|
-
buffer += text;
|
|
232
|
-
const lines = buffer.split("\n");
|
|
233
|
-
buffer = lines.pop() ?? "";
|
|
234
|
-
for (const line of lines) {
|
|
235
|
-
try {
|
|
236
|
-
const [chunk2, isEnd] = parseChunk(line);
|
|
237
|
-
if (isEnd)
|
|
238
|
-
break;
|
|
239
|
-
if (chunk2) {
|
|
240
|
-
controller.enqueue(chunk2);
|
|
241
|
-
}
|
|
242
|
-
} catch (error) {
|
|
243
|
-
controller.error(error);
|
|
244
|
-
}
|
|
245
|
-
}
|
|
246
|
-
}
|
|
247
|
-
});
|
|
248
|
-
};
|
|
249
|
-
|
|
250
247
|
const streamText = (options) => {
|
|
251
248
|
const tracer = getTracer();
|
|
252
249
|
const steps = [];
|
|
253
250
|
const messages = structuredClone(options.messages);
|
|
254
|
-
const
|
|
251
|
+
const stopWhen = options.stopWhen ?? stepCountAtLeast(1);
|
|
255
252
|
let usage;
|
|
256
253
|
let totalUsage;
|
|
257
254
|
let reasoningField;
|
|
@@ -259,14 +256,11 @@ const streamText = (options) => {
|
|
|
259
256
|
const resultMessages = new DelayedPromise();
|
|
260
257
|
const resultUsage = new DelayedPromise();
|
|
261
258
|
const resultTotalUsage = new DelayedPromise();
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
const eventStream = new ReadableStream({ start: (controller) => eventCtrl = controller });
|
|
266
|
-
const textStream = new ReadableStream({ start: (controller) => textCtrl = controller });
|
|
267
|
-
const reasoningTextStream = new ReadableStream({ start: (controller) => reasoningTextCtrl = controller });
|
|
259
|
+
const [eventStream, eventCtrl] = createControlledStream();
|
|
260
|
+
const [textStream, textCtrl] = createControlledStream();
|
|
261
|
+
const [reasoningTextStream, reasoningTextCtrl] = createControlledStream();
|
|
268
262
|
const pushEvent = (stepEvent) => {
|
|
269
|
-
eventCtrl?.enqueue(stepEvent);
|
|
263
|
+
eventCtrl.current?.enqueue(stepEvent);
|
|
270
264
|
void options.onEvent?.(stepEvent);
|
|
271
265
|
};
|
|
272
266
|
const pushStep = (step) => {
|
|
@@ -274,176 +268,211 @@ const streamText = (options) => {
|
|
|
274
268
|
void options.onStepFinish?.(step);
|
|
275
269
|
};
|
|
276
270
|
const tools = options.tools != null && options.tools.length > 0 ? options.tools.map((tool) => wrapTool(tool, tracer)) : void 0;
|
|
277
|
-
const doStream = async () =>
|
|
278
|
-
const
|
|
279
|
-
...options,
|
|
280
|
-
maxSteps: void 0,
|
|
271
|
+
const doStream = async () => {
|
|
272
|
+
const stepOptions = await resolveStepOptions({
|
|
281
273
|
messages,
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
274
|
+
model: options.model,
|
|
275
|
+
prepareStep: options.prepareStep,
|
|
276
|
+
stepNumber: steps.length,
|
|
277
|
+
steps,
|
|
278
|
+
toolChoice: options.toolChoice
|
|
285
279
|
});
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
}
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
280
|
+
return recordSpan(chatSpan({
|
|
281
|
+
...options,
|
|
282
|
+
messages: stepOptions.messages,
|
|
283
|
+
model: stepOptions.model,
|
|
284
|
+
toolChoice: stepOptions.toolChoice
|
|
285
|
+
}, tracer), async (span) => {
|
|
286
|
+
const { body: stream } = await chat({
|
|
287
|
+
...options,
|
|
288
|
+
maxSteps: void 0,
|
|
289
|
+
messages: stepOptions.messages,
|
|
290
|
+
model: stepOptions.model,
|
|
291
|
+
stopWhen: void 0,
|
|
292
|
+
stream: true,
|
|
293
|
+
streamOptions: options.streamOptions != null ? objCamelToSnake(options.streamOptions) : void 0,
|
|
294
|
+
toolChoice: stepOptions.toolChoice,
|
|
295
|
+
tools
|
|
296
|
+
});
|
|
297
|
+
const pushUsage = (u) => {
|
|
298
|
+
usage = u;
|
|
299
|
+
totalUsage = totalUsage ? {
|
|
300
|
+
completion_tokens: totalUsage.completion_tokens + u.completion_tokens,
|
|
301
|
+
prompt_tokens: totalUsage.prompt_tokens + u.prompt_tokens,
|
|
302
|
+
total_tokens: totalUsage.total_tokens + u.total_tokens
|
|
303
|
+
} : u;
|
|
304
|
+
};
|
|
305
|
+
let text = "";
|
|
306
|
+
let reasoningText;
|
|
307
|
+
const pushText = (content) => {
|
|
308
|
+
textCtrl.current?.enqueue(content);
|
|
309
|
+
text += content;
|
|
310
|
+
};
|
|
311
|
+
const pushReasoningText = (reasoningContent) => {
|
|
312
|
+
if (reasoningText == null)
|
|
313
|
+
reasoningText = "";
|
|
314
|
+
reasoningTextCtrl.current?.enqueue(reasoningContent);
|
|
315
|
+
reasoningText += reasoningContent;
|
|
316
|
+
};
|
|
317
|
+
const tool_calls = [];
|
|
318
|
+
const toolCalls = [];
|
|
319
|
+
const toolResults = [];
|
|
320
|
+
let finishReason = "other";
|
|
321
|
+
await stream.pipeThrough(new TextDecoderStream()).pipeThrough(new EventSourceParserStream()).pipeThrough(new JsonMessageTransformStream()).pipeTo(new WritableStream({
|
|
322
|
+
abort: (reason) => {
|
|
323
|
+
errorControllers(reason, eventCtrl, textCtrl, reasoningTextCtrl);
|
|
324
|
+
},
|
|
325
|
+
close: () => {
|
|
326
|
+
},
|
|
327
|
+
// eslint-disable-next-line sonarjs/cognitive-complexity
|
|
328
|
+
write: (chunk) => {
|
|
329
|
+
if (chunk.usage)
|
|
330
|
+
pushUsage(chunk.usage);
|
|
331
|
+
if (chunk.choices == null || chunk.choices.length === 0)
|
|
332
|
+
return;
|
|
333
|
+
const choice = chunk.choices[0];
|
|
334
|
+
if (choice.delta.reasoning != null) {
|
|
335
|
+
if (reasoningField !== "reasoning")
|
|
336
|
+
reasoningField = "reasoning";
|
|
337
|
+
pushEvent({ text: choice.delta.reasoning, type: "reasoning-delta" });
|
|
338
|
+
pushReasoningText(choice.delta.reasoning);
|
|
339
|
+
} else if (choice.delta.reasoning_content != null) {
|
|
340
|
+
if (reasoningField !== "reasoning_content")
|
|
341
|
+
reasoningField = "reasoning_content";
|
|
342
|
+
pushEvent({ text: choice.delta.reasoning_content, type: "reasoning-delta" });
|
|
343
|
+
pushReasoningText(choice.delta.reasoning_content);
|
|
345
344
|
}
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
if (
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
345
|
+
if (choice.finish_reason != null)
|
|
346
|
+
finishReason = choice.finish_reason;
|
|
347
|
+
if (choice.delta.tool_calls?.length === 0 || choice.delta.tool_calls == null) {
|
|
348
|
+
if (choice.delta.content != null) {
|
|
349
|
+
pushEvent({ text: choice.delta.content, type: "text-delta" });
|
|
350
|
+
pushText(choice.delta.content);
|
|
351
|
+
} else if (choice.delta.refusal != null) {
|
|
352
|
+
pushEvent({ error: choice.delta.refusal, type: "error" });
|
|
353
|
+
} else if (choice.finish_reason != null) {
|
|
354
|
+
pushEvent({ finishReason: choice.finish_reason, type: "finish", usage });
|
|
355
|
+
}
|
|
356
|
+
} else {
|
|
357
|
+
for (const toolCall of choice.delta.tool_calls) {
|
|
358
|
+
const { index } = toolCall;
|
|
359
|
+
if (!tool_calls.at(index)) {
|
|
360
|
+
tool_calls[index] = {
|
|
361
|
+
...toolCall,
|
|
362
|
+
function: {
|
|
363
|
+
...toolCall.function,
|
|
364
|
+
arguments: toolCall.function.arguments ?? ""
|
|
365
|
+
}
|
|
366
|
+
};
|
|
367
|
+
pushEvent({
|
|
368
|
+
toolCallId: toolCall.id,
|
|
369
|
+
toolName: toolCall.function.name,
|
|
370
|
+
type: "tool-call-streaming-start"
|
|
371
|
+
});
|
|
372
|
+
} else {
|
|
373
|
+
tool_calls[index].function.arguments += toolCall.function.arguments;
|
|
374
|
+
pushEvent({
|
|
375
|
+
argsTextDelta: toolCall.function.arguments,
|
|
376
|
+
toolCallId: toolCall.id,
|
|
377
|
+
toolName: toolCall.function.name ?? tool_calls[index].function.name,
|
|
378
|
+
type: "tool-call-delta"
|
|
379
|
+
});
|
|
380
|
+
}
|
|
370
381
|
}
|
|
371
382
|
}
|
|
372
383
|
}
|
|
373
|
-
}
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
384
|
+
}));
|
|
385
|
+
const message = {
|
|
386
|
+
...reasoningField != null ? { [reasoningField]: reasoningText } : {},
|
|
387
|
+
content: text,
|
|
388
|
+
role: "assistant",
|
|
389
|
+
tool_calls: tool_calls.length > 0 ? tool_calls : void 0
|
|
390
|
+
};
|
|
391
|
+
messages.push(message);
|
|
392
|
+
span.setAttribute("gen_ai.output.messages", JSON.stringify([message]));
|
|
393
|
+
if (tool_calls.length !== 0) {
|
|
394
|
+
const validToolCalls = tool_calls.filter((tc) => tc != null);
|
|
395
|
+
const results = await Promise.all(
|
|
396
|
+
validToolCalls.map(async (toolCall) => executeTool({
|
|
397
|
+
abortSignal: options.abortSignal,
|
|
398
|
+
messages,
|
|
399
|
+
toolCall,
|
|
400
|
+
tools
|
|
401
|
+
}))
|
|
402
|
+
);
|
|
403
|
+
for (const { completionToolCall, completionToolResult, message: message2 } of results) {
|
|
404
|
+
toolCalls.push(completionToolCall);
|
|
405
|
+
toolResults.push(completionToolResult);
|
|
406
|
+
messages.push(message2);
|
|
407
|
+
pushEvent({ ...completionToolCall, type: "tool-call" });
|
|
408
|
+
pushEvent({ ...completionToolResult, type: "tool-result" });
|
|
409
|
+
}
|
|
410
|
+
} else {
|
|
411
|
+
pushEvent({
|
|
412
|
+
finishReason,
|
|
413
|
+
type: "finish",
|
|
414
|
+
usage
|
|
392
415
|
});
|
|
393
|
-
toolCalls.push(completionToolCall);
|
|
394
|
-
toolResults.push(completionToolResult);
|
|
395
|
-
messages.push(message2);
|
|
396
|
-
pushEvent({ ...completionToolCall, type: "tool-call" });
|
|
397
|
-
pushEvent({ ...completionToolResult, type: "tool-result" });
|
|
398
416
|
}
|
|
399
|
-
|
|
400
|
-
pushEvent({
|
|
417
|
+
const stopStep = {
|
|
401
418
|
finishReason,
|
|
402
|
-
|
|
419
|
+
text,
|
|
420
|
+
toolCalls,
|
|
421
|
+
toolResults,
|
|
403
422
|
usage
|
|
423
|
+
};
|
|
424
|
+
const stop = shouldStop(stopWhen, {
|
|
425
|
+
messages,
|
|
426
|
+
step: stopStep,
|
|
427
|
+
steps: [...steps, stopStep]
|
|
404
428
|
});
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
429
|
+
const willContinue = toolCalls.length > 0 && !stop;
|
|
430
|
+
const step = {
|
|
431
|
+
...stopStep,
|
|
432
|
+
stepType: determineStepType({
|
|
433
|
+
finishReason,
|
|
434
|
+
stepsLength: steps.length,
|
|
435
|
+
toolCallsLength: toolCalls.length,
|
|
436
|
+
willContinue
|
|
437
|
+
})
|
|
438
|
+
};
|
|
439
|
+
pushStep(step);
|
|
440
|
+
span.setAttributes({
|
|
441
|
+
"gen_ai.response.finish_reasons": [step.finishReason],
|
|
442
|
+
...step.usage && {
|
|
443
|
+
"gen_ai.usage.input_tokens": step.usage.prompt_tokens,
|
|
444
|
+
"gen_ai.usage.output_tokens": step.usage.completion_tokens
|
|
445
|
+
}
|
|
446
|
+
});
|
|
447
|
+
if (willContinue)
|
|
448
|
+
return async () => doStream();
|
|
421
449
|
});
|
|
422
|
-
|
|
423
|
-
return async () => doStream();
|
|
424
|
-
});
|
|
450
|
+
};
|
|
425
451
|
void (async () => {
|
|
452
|
+
let finalError;
|
|
426
453
|
try {
|
|
427
454
|
await trampoline(async () => doStream());
|
|
428
|
-
eventCtrl?.close();
|
|
429
|
-
textCtrl?.close();
|
|
430
|
-
reasoningTextCtrl?.close();
|
|
431
455
|
} catch (err) {
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
|
|
456
|
+
finalError = err;
|
|
457
|
+
}
|
|
458
|
+
try {
|
|
459
|
+
await options.onFinish?.(steps.at(-1));
|
|
460
|
+
} catch (err) {
|
|
461
|
+
finalError ??= err;
|
|
462
|
+
}
|
|
463
|
+
if (finalError != null) {
|
|
464
|
+
errorControllers(finalError, eventCtrl, textCtrl, reasoningTextCtrl);
|
|
465
|
+
resultSteps.reject(finalError);
|
|
466
|
+
resultMessages.reject(finalError);
|
|
467
|
+
resultUsage.reject(finalError);
|
|
468
|
+
resultTotalUsage.reject(finalError);
|
|
469
|
+
return;
|
|
446
470
|
}
|
|
471
|
+
closeControllers(eventCtrl, textCtrl, reasoningTextCtrl);
|
|
472
|
+
resultSteps.resolve(steps);
|
|
473
|
+
resultMessages.resolve(messages);
|
|
474
|
+
resultUsage.resolve(usage);
|
|
475
|
+
resultTotalUsage.resolve(totalUsage);
|
|
447
476
|
})();
|
|
448
477
|
return {
|
|
449
478
|
fullStream: eventStream,
|
package/package.json
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@xsai-ext/telemetry",
|
|
3
3
|
"type": "module",
|
|
4
|
-
"version": "0.
|
|
4
|
+
"version": "0.5.0-beta.2",
|
|
5
5
|
"description": "extra-small AI SDK.",
|
|
6
6
|
"author": "Moeru AI",
|
|
7
7
|
"license": "MIT",
|
|
@@ -30,7 +30,8 @@
|
|
|
30
30
|
],
|
|
31
31
|
"dependencies": {
|
|
32
32
|
"@opentelemetry/api": "^1.9.0",
|
|
33
|
-
"xsai": "~0.
|
|
33
|
+
"@xsai/shared-stream": "~0.5.0-beta.2",
|
|
34
|
+
"xsai": "~0.5.0-beta.2"
|
|
34
35
|
},
|
|
35
36
|
"devDependencies": {
|
|
36
37
|
"@langfuse/otel": "^4.5.1",
|