@xsai-ext/telemetry 0.4.0-beta.8 → 0.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.ts +3 -4
- package/dist/index.js +168 -290
- package/package.json +7 -8
package/dist/index.d.ts
CHANGED
|
@@ -1,10 +1,9 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import { Attributes } from '@opentelemetry/api';
|
|
2
2
|
import { WithUnknown, GenerateTextOptions, GenerateTextResult, StreamTextOptions, StreamTextResult } from 'xsai';
|
|
3
3
|
export * from 'xsai';
|
|
4
4
|
|
|
5
|
-
type TelemetryMetadata = Record<string, AttributeValue>;
|
|
6
5
|
interface TelemetryOptions {
|
|
7
|
-
|
|
6
|
+
attributes?: Attributes;
|
|
8
7
|
}
|
|
9
8
|
type WithTelemetry<T> = T & {
|
|
10
9
|
telemetry?: TelemetryOptions;
|
|
@@ -23,4 +22,4 @@ declare const generateText: (options: WithUnknown<WithTelemetry<GenerateTextOpti
|
|
|
23
22
|
declare const streamText: (options: WithUnknown<WithTelemetry<StreamTextOptions>>) => StreamTextResult;
|
|
24
23
|
|
|
25
24
|
export { generateText, streamText };
|
|
26
|
-
export type {
|
|
25
|
+
export type { TelemetryOptions, WithTelemetry };
|
package/dist/index.js
CHANGED
|
@@ -1,91 +1,37 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import { trampoline, chat, responseJSON, determineStepType, executeTool, DelayedPromise, objCamelToSnake } from 'xsai';
|
|
2
2
|
export * from 'xsai';
|
|
3
3
|
import { trace, SpanStatusCode } from '@opentelemetry/api';
|
|
4
4
|
|
|
5
|
-
const
|
|
6
|
-
|
|
7
|
-
)
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
"
|
|
12
|
-
"
|
|
13
|
-
"gen_ai.
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
5
|
+
const chatSpan = (options, tracer) => ({
|
|
6
|
+
attributes: {
|
|
7
|
+
"gen_ai.input.messages": JSON.stringify(options.messages),
|
|
8
|
+
"gen_ai.operation.name": "chat",
|
|
9
|
+
"gen_ai.provider.name": "openai",
|
|
10
|
+
"gen_ai.request.choice.count": 1,
|
|
11
|
+
"gen_ai.request.frequency_penalty": options.frequencyPenalty,
|
|
12
|
+
"gen_ai.request.model": options.model,
|
|
13
|
+
"gen_ai.request.presence_penalty": options.presencePenalty,
|
|
14
|
+
"gen_ai.request.seed": options.seed,
|
|
15
|
+
"gen_ai.request.stop_sequences": options.stop == null ? void 0 : Array.isArray(options.stop) ? options.stop : [options.stop],
|
|
16
|
+
"gen_ai.request.temperature": options.temperature,
|
|
17
|
+
"gen_ai.request.top_k": options.topK,
|
|
18
|
+
"gen_ai.request.top_p": options.topP,
|
|
19
|
+
"gen_ai.response.id": crypto.randomUUID(),
|
|
20
|
+
"gen_ai.response.model": options.model,
|
|
21
|
+
"gen_ai.tool.definitions": JSON.stringify(options.tools?.map((tool) => ({ function: tool.function, type: tool.type }))),
|
|
22
|
+
"server.address": new URL(options.baseURL).host,
|
|
23
|
+
...options.telemetry?.attributes
|
|
24
|
+
// TODO: gen_ai.output.type
|
|
25
|
+
},
|
|
26
|
+
name: `chat ${options.model}`,
|
|
27
|
+
tracer
|
|
23
28
|
});
|
|
24
29
|
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
throw new Error(`No choices returned, response body: ${JSON.stringify(res)}`);
|
|
29
|
-
const messages = [];
|
|
30
|
-
const toolCalls = [];
|
|
31
|
-
const { finish_reason: finishReason, message } = choices[0];
|
|
32
|
-
const msgToolCalls = message?.tool_calls ?? [];
|
|
33
|
-
const stepType = determineStepType({
|
|
34
|
-
finishReason,
|
|
35
|
-
maxSteps: options.maxSteps ?? 1,
|
|
36
|
-
stepsLength: options.steps?.length ?? 0,
|
|
37
|
-
toolCallsLength: msgToolCalls.length
|
|
38
|
-
});
|
|
39
|
-
messages.push(clean({
|
|
40
|
-
...message,
|
|
41
|
-
reasoning_content: void 0
|
|
42
|
-
}));
|
|
43
|
-
if (finishReason !== "stop" || stepType !== "done") {
|
|
44
|
-
for (const toolCall of msgToolCalls) {
|
|
45
|
-
toolCalls.push({
|
|
46
|
-
args: toolCall.function.arguments,
|
|
47
|
-
toolCallId: toolCall.id,
|
|
48
|
-
toolCallType: toolCall.type,
|
|
49
|
-
toolName: toolCall.function.name
|
|
50
|
-
});
|
|
51
|
-
}
|
|
52
|
-
}
|
|
53
|
-
return [
|
|
54
|
-
{
|
|
55
|
-
finishReason,
|
|
56
|
-
stepType,
|
|
57
|
-
text: message.content,
|
|
58
|
-
toolCalls,
|
|
59
|
-
usage
|
|
60
|
-
},
|
|
61
|
-
{
|
|
62
|
-
messages,
|
|
63
|
-
msgToolCalls,
|
|
64
|
-
reasoningText: message.reasoning_content
|
|
65
|
-
}
|
|
66
|
-
];
|
|
67
|
-
};
|
|
68
|
-
const extractGenerateTextStepPost = async (options, msgToolCalls) => {
|
|
69
|
-
const inputMessages = structuredClone(options.messages);
|
|
70
|
-
const outputMessages = [];
|
|
71
|
-
const toolResults = [];
|
|
72
|
-
for (const toolCall of msgToolCalls) {
|
|
73
|
-
const { completionToolResult, message } = await executeTool({
|
|
74
|
-
abortSignal: options.abortSignal,
|
|
75
|
-
messages: inputMessages,
|
|
76
|
-
toolCall,
|
|
77
|
-
tools: options.tools
|
|
78
|
-
});
|
|
79
|
-
toolResults.push(completionToolResult);
|
|
80
|
-
outputMessages.push(message);
|
|
81
|
-
}
|
|
82
|
-
return [
|
|
83
|
-
toolResults,
|
|
84
|
-
outputMessages
|
|
85
|
-
];
|
|
86
|
-
};
|
|
30
|
+
var version = "0.4.0";
|
|
31
|
+
var pkg = {
|
|
32
|
+
version: version};
|
|
87
33
|
|
|
88
|
-
const getTracer = () => trace.getTracer("@xsai-ext/telemetry");
|
|
34
|
+
const getTracer = () => trace.getTracer("@xsai-ext/telemetry", pkg.version);
|
|
89
35
|
|
|
90
36
|
const recordErrorOnSpan = (span, error) => {
|
|
91
37
|
if (error instanceof Error) {
|
|
@@ -122,44 +68,28 @@ const recordSpan = async ({
|
|
|
122
68
|
throw error;
|
|
123
69
|
}
|
|
124
70
|
});
|
|
125
|
-
const recordSpanSync = ({
|
|
126
|
-
attributes,
|
|
127
|
-
endWhenDone = true,
|
|
128
|
-
name,
|
|
129
|
-
tracer
|
|
130
|
-
}, fn) => tracer.startActiveSpan(name, { attributes }, (span) => {
|
|
131
|
-
try {
|
|
132
|
-
const result = fn(span);
|
|
133
|
-
if (endWhenDone)
|
|
134
|
-
span.end();
|
|
135
|
-
return result;
|
|
136
|
-
} catch (error) {
|
|
137
|
-
try {
|
|
138
|
-
recordErrorOnSpan(span, error);
|
|
139
|
-
} finally {
|
|
140
|
-
span.end();
|
|
141
|
-
}
|
|
142
|
-
throw error;
|
|
143
|
-
}
|
|
144
|
-
});
|
|
145
|
-
|
|
146
|
-
const stringifyTool = ({ function: func, type }) => JSON.stringify({ description: func.description, inputSchema: func.parameters, name: func.name, type });
|
|
147
71
|
|
|
148
72
|
const wrapTool = (tool, tracer) => ({
|
|
149
73
|
execute: async (input, options) => recordSpan({
|
|
150
74
|
attributes: {
|
|
151
|
-
"
|
|
152
|
-
"
|
|
153
|
-
"
|
|
154
|
-
"
|
|
155
|
-
"
|
|
75
|
+
"gen_ai.operation.name": "execute_tool",
|
|
76
|
+
"gen_ai.tool.call.arguments": JSON.stringify(input),
|
|
77
|
+
"gen_ai.tool.call.description": tool.function.description,
|
|
78
|
+
"gen_ai.tool.call.id": options.toolCallId,
|
|
79
|
+
"gen_ai.tool.call.name": tool.function.name
|
|
156
80
|
},
|
|
157
|
-
name:
|
|
81
|
+
name: `execute_tool ${tool.function.name}`,
|
|
158
82
|
tracer
|
|
159
83
|
}, async (span) => {
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
84
|
+
try {
|
|
85
|
+
const result = await tool.execute(input, options);
|
|
86
|
+
span.setAttribute("gen_ai.tool.call.result", JSON.stringify(result));
|
|
87
|
+
return result;
|
|
88
|
+
} catch (err) {
|
|
89
|
+
if (err instanceof Error)
|
|
90
|
+
span.setAttribute("error.type", err.message);
|
|
91
|
+
throw err;
|
|
92
|
+
}
|
|
163
93
|
}),
|
|
164
94
|
function: tool.function,
|
|
165
95
|
type: tool.type
|
|
@@ -167,65 +97,63 @@ const wrapTool = (tool, tracer) => ({
|
|
|
167
97
|
|
|
168
98
|
const generateText = async (options) => {
|
|
169
99
|
const tracer = getTracer();
|
|
170
|
-
const rawGenerateText = async (options2) => {
|
|
100
|
+
const rawGenerateText = async (options2) => recordSpan(chatSpan(options2, tracer), async (span) => chat({
|
|
101
|
+
...options2,
|
|
102
|
+
maxSteps: void 0,
|
|
103
|
+
steps: void 0,
|
|
104
|
+
stream: false
|
|
105
|
+
}).then(responseJSON).then(async (res) => {
|
|
106
|
+
const { choices, usage } = res;
|
|
107
|
+
if (!choices?.length)
|
|
108
|
+
throw new Error(`No choices returned, response body: ${JSON.stringify(res)}`);
|
|
171
109
|
const messages = structuredClone(options2.messages);
|
|
172
110
|
const steps = options2.steps ? structuredClone(options2.steps) : [];
|
|
173
|
-
const
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
"ai.prompt.messages": JSON.stringify(messages),
|
|
183
|
-
"ai.response.model": options2.model,
|
|
184
|
-
"gen_ai.request.model": options2.model,
|
|
185
|
-
"gen_ai.response.id": crypto.randomUUID(),
|
|
186
|
-
"gen_ai.response.model": options2.model,
|
|
187
|
-
"gen_ai.system": "xsai"
|
|
188
|
-
},
|
|
189
|
-
name: "ai.generateText.doGenerate",
|
|
190
|
-
tracer
|
|
191
|
-
}, async (span) => {
|
|
192
|
-
const res = await chat({
|
|
193
|
-
...options2,
|
|
194
|
-
maxSteps: void 0,
|
|
195
|
-
steps: void 0,
|
|
196
|
-
stream: false,
|
|
197
|
-
telemetry: void 0
|
|
198
|
-
}).then(responseJSON);
|
|
199
|
-
const [step2, { messages: msgs, msgToolCalls: msgToolCalls2, reasoningText: reasoningText2 }] = await extractGenerateTextStep({
|
|
200
|
-
...options2,
|
|
201
|
-
messages,
|
|
202
|
-
steps
|
|
203
|
-
}, res);
|
|
204
|
-
span.setAttributes({
|
|
205
|
-
...step2.text != null && step2.toolCalls.length === 0 ? { "ai.response.text": step2.text } : {},
|
|
206
|
-
...step2.toolCalls.length > 0 ? { "ai.response.toolCalls": JSON.stringify(step2.toolCalls) } : {},
|
|
207
|
-
"ai.response.finishReason": step2.finishReason,
|
|
208
|
-
"ai.usage.completionTokens": step2.usage.completion_tokens,
|
|
209
|
-
"ai.usage.promptTokens": step2.usage.prompt_tokens,
|
|
210
|
-
"gen_ai.response.finish_reasons": [step2.finishReason],
|
|
211
|
-
"gen_ai.usage.input_tokens": step2.usage.prompt_tokens,
|
|
212
|
-
"gen_ai.usage.output_tokens": step2.usage.completion_tokens
|
|
213
|
-
});
|
|
214
|
-
return [step2, { messages: msgs, msgToolCalls: msgToolCalls2, reasoningText: reasoningText2 }];
|
|
111
|
+
const toolCalls = [];
|
|
112
|
+
const toolResults = [];
|
|
113
|
+
const { finish_reason: finishReason, message } = choices[0];
|
|
114
|
+
const msgToolCalls = message?.tool_calls ?? [];
|
|
115
|
+
const stepType = determineStepType({
|
|
116
|
+
finishReason,
|
|
117
|
+
maxSteps: options2.maxSteps ?? 1,
|
|
118
|
+
stepsLength: steps.length,
|
|
119
|
+
toolCallsLength: msgToolCalls.length
|
|
215
120
|
});
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
121
|
+
messages.push(message);
|
|
122
|
+
span.setAttribute("gen_ai.output.messages", JSON.stringify([message]));
|
|
123
|
+
if (finishReason !== "stop" && stepType !== "done") {
|
|
124
|
+
for (const toolCall of msgToolCalls) {
|
|
125
|
+
const { completionToolCall, completionToolResult, message: message2 } = await executeTool({
|
|
126
|
+
abortSignal: options2.abortSignal,
|
|
127
|
+
messages,
|
|
128
|
+
toolCall,
|
|
129
|
+
tools: options2.tools
|
|
130
|
+
});
|
|
131
|
+
toolCalls.push(completionToolCall);
|
|
132
|
+
toolResults.push(completionToolResult);
|
|
133
|
+
messages.push(message2);
|
|
134
|
+
}
|
|
135
|
+
}
|
|
136
|
+
const step = {
|
|
137
|
+
finishReason,
|
|
138
|
+
stepType,
|
|
139
|
+
text: Array.isArray(message.content) ? message.content.filter((m) => m.type === "text").map((m) => m.text).join("\n") : message.content,
|
|
140
|
+
toolCalls,
|
|
141
|
+
toolResults,
|
|
142
|
+
usage
|
|
143
|
+
};
|
|
220
144
|
steps.push(step);
|
|
221
|
-
|
|
145
|
+
span.setAttributes({
|
|
146
|
+
"gen_ai.response.finish_reasons": [step.finishReason],
|
|
147
|
+
"gen_ai.usage.input_tokens": step.usage.prompt_tokens,
|
|
148
|
+
"gen_ai.usage.output_tokens": step.usage.completion_tokens
|
|
149
|
+
});
|
|
222
150
|
if (options2.onStepFinish)
|
|
223
151
|
await options2.onStepFinish(step);
|
|
224
152
|
if (step.finishReason === "stop" || step.stepType === "done") {
|
|
225
153
|
return {
|
|
226
154
|
finishReason: step.finishReason,
|
|
227
155
|
messages,
|
|
228
|
-
reasoningText,
|
|
156
|
+
reasoningText: message.reasoning_content,
|
|
229
157
|
steps,
|
|
230
158
|
text: step.text,
|
|
231
159
|
toolCalls: step.toolCalls,
|
|
@@ -239,33 +167,13 @@ const generateText = async (options) => {
|
|
|
239
167
|
steps
|
|
240
168
|
});
|
|
241
169
|
}
|
|
242
|
-
};
|
|
243
|
-
return
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
"ai.prompt": JSON.stringify({ messages: options.messages })
|
|
248
|
-
},
|
|
249
|
-
name: "ai.generateText",
|
|
250
|
-
tracer
|
|
251
|
-
}, async (span) => {
|
|
252
|
-
const result = await trampoline(async () => rawGenerateText({
|
|
253
|
-
...options,
|
|
254
|
-
tools: options.tools?.map((tool) => wrapTool(tool, tracer))
|
|
255
|
-
}));
|
|
256
|
-
span.setAttributes({
|
|
257
|
-
...result.toolCalls.length > 0 ? { "ai.response.toolCalls": JSON.stringify(result.toolCalls) } : {},
|
|
258
|
-
...result.text != null ? { "ai.response.text": result.text } : {},
|
|
259
|
-
"ai.response.finishReason": result.finishReason,
|
|
260
|
-
"ai.usage.completionTokens": result.usage.completion_tokens,
|
|
261
|
-
"ai.usage.promptTokens": result.usage.prompt_tokens
|
|
262
|
-
});
|
|
263
|
-
return result;
|
|
264
|
-
});
|
|
170
|
+
}));
|
|
171
|
+
return trampoline(async () => rawGenerateText({
|
|
172
|
+
...options,
|
|
173
|
+
tools: options.tools?.map((tool) => wrapTool(tool, tracer))
|
|
174
|
+
}));
|
|
265
175
|
};
|
|
266
176
|
|
|
267
|
-
const now = () => globalThis?.performance?.now() ?? Date.now();
|
|
268
|
-
|
|
269
177
|
const parseChunk = (text) => {
|
|
270
178
|
if (!text || !text.startsWith("data:"))
|
|
271
179
|
return [void 0, false];
|
|
@@ -318,8 +226,10 @@ const streamText = (options) => {
|
|
|
318
226
|
const resultTotalUsage = new DelayedPromise();
|
|
319
227
|
let eventCtrl;
|
|
320
228
|
let textCtrl;
|
|
229
|
+
let reasoningTextCtrl;
|
|
321
230
|
const eventStream = new ReadableStream({ start: (controller) => eventCtrl = controller });
|
|
322
231
|
const textStream = new ReadableStream({ start: (controller) => textCtrl = controller });
|
|
232
|
+
const reasoningTextStream = new ReadableStream({ start: (controller) => reasoningTextCtrl = controller });
|
|
323
233
|
const pushEvent = (stepEvent) => {
|
|
324
234
|
eventCtrl?.enqueue(stepEvent);
|
|
325
235
|
void options.onEvent?.(stepEvent);
|
|
@@ -329,26 +239,7 @@ const streamText = (options) => {
|
|
|
329
239
|
void options.onStepFinish?.(step);
|
|
330
240
|
};
|
|
331
241
|
const tools = options.tools != null && options.tools.length > 0 ? options.tools.map((tool) => wrapTool(tool, tracer)) : void 0;
|
|
332
|
-
const doStream = async () => recordSpan({
|
|
333
|
-
attributes: {
|
|
334
|
-
...idAttributes(),
|
|
335
|
-
...commonAttributes("ai.streamText.doStream", options.model),
|
|
336
|
-
...metadataAttributes(options.telemetry?.metadata),
|
|
337
|
-
...tools != null && tools.length > 0 && {
|
|
338
|
-
"ai.prompt.toolChoice": JSON.stringify(options.toolChoice ?? { type: "auto" }),
|
|
339
|
-
"ai.prompt.tools": tools.map(stringifyTool)
|
|
340
|
-
},
|
|
341
|
-
"ai.prompt.messages": JSON.stringify(options.messages),
|
|
342
|
-
"ai.response.model": options.model,
|
|
343
|
-
"gen_ai.request.model": options.model,
|
|
344
|
-
"gen_ai.response.id": crypto.randomUUID(),
|
|
345
|
-
"gen_ai.response.model": options.model,
|
|
346
|
-
"gen_ai.system": "xsai"
|
|
347
|
-
},
|
|
348
|
-
name: "ai.streamText.doStream",
|
|
349
|
-
tracer
|
|
350
|
-
}, async (span) => {
|
|
351
|
-
const startMs = now();
|
|
242
|
+
const doStream = async () => recordSpan(chatSpan({ ...options, messages }, tracer), async (span) => {
|
|
352
243
|
const { body: stream } = await chat({
|
|
353
244
|
...options,
|
|
354
245
|
maxSteps: void 0,
|
|
@@ -366,15 +257,21 @@ const streamText = (options) => {
|
|
|
366
257
|
} : { ...u };
|
|
367
258
|
};
|
|
368
259
|
let text = "";
|
|
260
|
+
let reasoningText;
|
|
369
261
|
const pushText = (content) => {
|
|
370
262
|
textCtrl?.enqueue(content);
|
|
371
263
|
text += content;
|
|
372
264
|
};
|
|
265
|
+
const pushReasoningText = (reasoningContent) => {
|
|
266
|
+
if (reasoningText == null)
|
|
267
|
+
reasoningText = "";
|
|
268
|
+
reasoningTextCtrl?.enqueue(reasoningContent);
|
|
269
|
+
reasoningText += reasoningContent;
|
|
270
|
+
};
|
|
373
271
|
const tool_calls = [];
|
|
374
272
|
const toolCalls = [];
|
|
375
273
|
const toolResults = [];
|
|
376
274
|
let finishReason = "other";
|
|
377
|
-
let firstChunk = true;
|
|
378
275
|
await stream.pipeThrough(transformChunk()).pipeTo(new WritableStream({
|
|
379
276
|
abort: (reason) => {
|
|
380
277
|
eventCtrl?.error(reason);
|
|
@@ -384,23 +281,15 @@ const streamText = (options) => {
|
|
|
384
281
|
},
|
|
385
282
|
// eslint-disable-next-line sonarjs/cognitive-complexity
|
|
386
283
|
write: (chunk) => {
|
|
387
|
-
if (firstChunk) {
|
|
388
|
-
const msToFirstChunk = now() - startMs;
|
|
389
|
-
span.addEvent("ai.stream.firstChunk", {
|
|
390
|
-
"ai.response.msToFirstChunk": msToFirstChunk
|
|
391
|
-
});
|
|
392
|
-
span.setAttributes({
|
|
393
|
-
"ai.response.msToFirstChunk": msToFirstChunk
|
|
394
|
-
});
|
|
395
|
-
firstChunk = false;
|
|
396
|
-
}
|
|
397
284
|
if (chunk.usage)
|
|
398
285
|
pushUsage(chunk.usage);
|
|
399
286
|
if (chunk.choices == null || chunk.choices.length === 0)
|
|
400
287
|
return;
|
|
401
288
|
const choice = chunk.choices[0];
|
|
402
|
-
if (choice.delta.reasoning_content != null)
|
|
289
|
+
if (choice.delta.reasoning_content != null) {
|
|
403
290
|
pushEvent({ text: choice.delta.reasoning_content, type: "reasoning-delta" });
|
|
291
|
+
pushReasoningText(choice.delta.reasoning_content);
|
|
292
|
+
}
|
|
404
293
|
if (choice.finish_reason != null)
|
|
405
294
|
finishReason = choice.finish_reason;
|
|
406
295
|
if (choice.delta.tool_calls?.length === 0 || choice.delta.tool_calls == null) {
|
|
@@ -416,22 +305,44 @@ const streamText = (options) => {
|
|
|
416
305
|
for (const toolCall of choice.delta.tool_calls) {
|
|
417
306
|
const { index } = toolCall;
|
|
418
307
|
if (!tool_calls.at(index)) {
|
|
419
|
-
tool_calls[index] =
|
|
420
|
-
|
|
308
|
+
tool_calls[index] = {
|
|
309
|
+
...toolCall,
|
|
310
|
+
function: {
|
|
311
|
+
...toolCall.function,
|
|
312
|
+
arguments: toolCall.function.arguments ?? ""
|
|
313
|
+
}
|
|
314
|
+
};
|
|
315
|
+
pushEvent({
|
|
316
|
+
toolCallId: toolCall.id,
|
|
317
|
+
toolName: toolCall.function.name,
|
|
318
|
+
type: "tool-call-streaming-start"
|
|
319
|
+
});
|
|
421
320
|
} else {
|
|
422
321
|
tool_calls[index].function.arguments += toolCall.function.arguments;
|
|
423
|
-
pushEvent({
|
|
322
|
+
pushEvent({
|
|
323
|
+
argsTextDelta: toolCall.function.arguments,
|
|
324
|
+
toolCallId: toolCall.id,
|
|
325
|
+
toolName: toolCall.function.name ?? tool_calls[index].function.name,
|
|
326
|
+
type: "tool-call-delta"
|
|
327
|
+
});
|
|
424
328
|
}
|
|
425
329
|
}
|
|
426
330
|
}
|
|
427
331
|
}
|
|
428
332
|
}));
|
|
429
|
-
|
|
333
|
+
const message = {
|
|
334
|
+
content: text,
|
|
335
|
+
reasoning_content: reasoningText,
|
|
336
|
+
role: "assistant",
|
|
337
|
+
tool_calls: tool_calls.length > 0 ? tool_calls : void 0
|
|
338
|
+
};
|
|
339
|
+
messages.push(message);
|
|
340
|
+
span.setAttribute("gen_ai.output.messages", JSON.stringify([message]));
|
|
430
341
|
if (tool_calls.length !== 0) {
|
|
431
342
|
for (const toolCall of tool_calls) {
|
|
432
343
|
if (toolCall == null)
|
|
433
344
|
continue;
|
|
434
|
-
const { completionToolCall, completionToolResult, message } = await executeTool({
|
|
345
|
+
const { completionToolCall, completionToolResult, message: message2 } = await executeTool({
|
|
435
346
|
abortSignal: options.abortSignal,
|
|
436
347
|
messages,
|
|
437
348
|
toolCall,
|
|
@@ -439,7 +350,7 @@ const streamText = (options) => {
|
|
|
439
350
|
});
|
|
440
351
|
toolCalls.push(completionToolCall);
|
|
441
352
|
toolResults.push(completionToolResult);
|
|
442
|
-
messages.push(
|
|
353
|
+
messages.push(message2);
|
|
443
354
|
pushEvent({ ...completionToolCall, type: "tool-call" });
|
|
444
355
|
pushEvent({ ...completionToolResult, type: "tool-result" });
|
|
445
356
|
}
|
|
@@ -459,19 +370,9 @@ const streamText = (options) => {
|
|
|
459
370
|
usage
|
|
460
371
|
};
|
|
461
372
|
pushStep(step);
|
|
462
|
-
const msToFinish = now() - startMs;
|
|
463
|
-
span.addEvent("ai.stream.finish");
|
|
464
373
|
span.setAttributes({
|
|
465
|
-
"ai.response.msToFinish": msToFinish,
|
|
466
|
-
...step.toolCalls.length > 0 && { "ai.response.toolCalls": JSON.stringify(step.toolCalls) },
|
|
467
|
-
"ai.response.finishReason": step.finishReason,
|
|
468
|
-
"ai.response.text": step.text != null ? step.text : "",
|
|
469
374
|
"gen_ai.response.finish_reasons": [step.finishReason],
|
|
470
375
|
...step.usage && {
|
|
471
|
-
"ai.response.avgOutputTokensPerSecond": 1e3 * (step.usage.completion_tokens ?? 0) / msToFinish,
|
|
472
|
-
"ai.usage.inputTokens": step.usage.prompt_tokens,
|
|
473
|
-
"ai.usage.outputTokens": step.usage.completion_tokens,
|
|
474
|
-
"ai.usage.totalTokens": step.usage.total_tokens,
|
|
475
376
|
"gen_ai.usage.input_tokens": step.usage.prompt_tokens,
|
|
476
377
|
"gen_ai.usage.output_tokens": step.usage.completion_tokens
|
|
477
378
|
}
|
|
@@ -479,61 +380,38 @@ const streamText = (options) => {
|
|
|
479
380
|
if (toolCalls.length !== 0 && steps.length < maxSteps)
|
|
480
381
|
return async () => doStream();
|
|
481
382
|
});
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
"ai.response.text": finishStep.text != null ? finishStep.text : ""
|
|
515
|
-
});
|
|
516
|
-
}
|
|
517
|
-
if (totalUsage) {
|
|
518
|
-
rootSpan.setAttributes({
|
|
519
|
-
"ai.usage.inputTokens": totalUsage.prompt_tokens,
|
|
520
|
-
"ai.usage.outputTokens": totalUsage.completion_tokens,
|
|
521
|
-
"ai.usage.totalTokens": totalUsage.total_tokens
|
|
522
|
-
});
|
|
523
|
-
}
|
|
524
|
-
void options.onFinish?.(finishStep);
|
|
525
|
-
rootSpan.end();
|
|
526
|
-
}
|
|
527
|
-
})();
|
|
528
|
-
return {
|
|
529
|
-
fullStream: eventStream,
|
|
530
|
-
messages: resultMessages.promise,
|
|
531
|
-
steps: resultSteps.promise,
|
|
532
|
-
textStream,
|
|
533
|
-
totalUsage: resultTotalUsage.promise,
|
|
534
|
-
usage: resultUsage.promise
|
|
535
|
-
};
|
|
536
|
-
});
|
|
383
|
+
void (async () => {
|
|
384
|
+
try {
|
|
385
|
+
await trampoline(async () => doStream());
|
|
386
|
+
eventCtrl?.close();
|
|
387
|
+
textCtrl?.close();
|
|
388
|
+
reasoningTextCtrl?.close();
|
|
389
|
+
} catch (err) {
|
|
390
|
+
eventCtrl?.error(err);
|
|
391
|
+
textCtrl?.error(err);
|
|
392
|
+
reasoningTextCtrl?.error(err);
|
|
393
|
+
resultSteps.reject(err);
|
|
394
|
+
resultMessages.reject(err);
|
|
395
|
+
resultUsage.reject(err);
|
|
396
|
+
resultTotalUsage.reject(err);
|
|
397
|
+
} finally {
|
|
398
|
+
resultSteps.resolve(steps);
|
|
399
|
+
resultMessages.resolve(messages);
|
|
400
|
+
resultUsage.resolve(usage);
|
|
401
|
+
resultTotalUsage.resolve(totalUsage);
|
|
402
|
+
const finishStep = steps.at(-1);
|
|
403
|
+
void options.onFinish?.(finishStep);
|
|
404
|
+
}
|
|
405
|
+
})();
|
|
406
|
+
return {
|
|
407
|
+
fullStream: eventStream,
|
|
408
|
+
messages: resultMessages.promise,
|
|
409
|
+
reasoningTextStream,
|
|
410
|
+
steps: resultSteps.promise,
|
|
411
|
+
textStream,
|
|
412
|
+
totalUsage: resultTotalUsage.promise,
|
|
413
|
+
usage: resultUsage.promise
|
|
414
|
+
};
|
|
537
415
|
};
|
|
538
416
|
|
|
539
417
|
export { generateText, streamText };
|
package/package.json
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@xsai-ext/telemetry",
|
|
3
3
|
"type": "module",
|
|
4
|
-
"version": "0.4.0
|
|
4
|
+
"version": "0.4.0",
|
|
5
5
|
"description": "extra-small AI SDK.",
|
|
6
6
|
"author": "Moeru AI",
|
|
7
7
|
"license": "MIT",
|
|
@@ -30,18 +30,17 @@
|
|
|
30
30
|
],
|
|
31
31
|
"dependencies": {
|
|
32
32
|
"@opentelemetry/api": "^1.9.0",
|
|
33
|
-
"xsai": "~0.4.0
|
|
33
|
+
"xsai": "~0.4.0"
|
|
34
34
|
},
|
|
35
35
|
"devDependencies": {
|
|
36
|
-
"@
|
|
37
|
-
"@opentelemetry/sdk-trace-
|
|
38
|
-
"
|
|
39
|
-
"
|
|
40
|
-
"zod": "^4.1.5"
|
|
36
|
+
"@langfuse/otel": "^4.5.1",
|
|
37
|
+
"@opentelemetry/sdk-trace-base": "^2.2.0",
|
|
38
|
+
"@opentelemetry/sdk-trace-node": "^2.2.0",
|
|
39
|
+
"zod": "^4.2.1"
|
|
41
40
|
},
|
|
42
41
|
"scripts": {
|
|
43
42
|
"build": "pkgroll",
|
|
44
|
-
"test": "vitest run
|
|
43
|
+
"test": "vitest run"
|
|
45
44
|
},
|
|
46
45
|
"main": "./dist/index.js",
|
|
47
46
|
"types": "./dist/index.d.ts"
|