@xsai-ext/telemetry 0.4.3 → 0.5.0-beta.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/dist/index.js +312 -250
  2. package/package.json +3 -2
package/dist/index.js CHANGED
@@ -1,8 +1,9 @@
1
- import { embed as embed$1, clean, embedMany as embedMany$1, trampoline, chat, responseJSON, determineStepType, executeTool, DelayedPromise, objCamelToSnake } from 'xsai';
1
+ import { embed as embed$1, clean, embedMany as embedMany$1, trampoline, resolveStepOptions, chat, responseJSON, InvalidResponseError, stepCountAtLeast, executeTool, shouldStop, determineStepType, RemoteAPIError, JSONParseError, DelayedPromise, objCamelToSnake } from 'xsai';
2
2
  export * from 'xsai';
3
3
  import { trace, SpanStatusCode } from '@opentelemetry/api';
4
+ import { EventSourceParserStream } from 'eventsource-parser/stream';
4
5
 
5
- var version = "0.4.3";
6
+ var version = "0.5.0-beta.1";
6
7
  var pkg = {
7
8
  version: version};
8
9
 
@@ -131,118 +132,139 @@ const wrapTool = (tool, tracer) => ({
131
132
 
132
133
  const generateText = async (options) => {
133
134
  const tracer = getTracer();
134
- const rawGenerateText = async (options2) => recordSpan(chatSpan(options2, tracer), async (span) => chat({
135
- ...options2,
136
- maxSteps: void 0,
137
- steps: void 0,
138
- stream: false
139
- }).then(responseJSON).then(async (res) => {
140
- const { choices, usage } = res;
141
- if (!choices?.length)
142
- throw new Error(`No choices returned, response body: ${JSON.stringify(res)}`);
143
- const messages = structuredClone(options2.messages);
144
- const steps = options2.steps ? structuredClone(options2.steps) : [];
145
- const toolCalls = [];
146
- const toolResults = [];
147
- const { finish_reason: finishReason, message } = choices[0];
148
- const msgToolCalls = message?.tool_calls ?? [];
149
- const stepType = determineStepType({
150
- finishReason,
151
- maxSteps: options2.maxSteps ?? 1,
152
- stepsLength: steps.length,
153
- toolCallsLength: msgToolCalls.length
135
+ const rawGenerateText = async (options2) => {
136
+ const messages = options2.steps == null ? structuredClone(options2.messages) : options2.messages;
137
+ const steps = options2.steps ?? [];
138
+ const stepOptions = await resolveStepOptions({
139
+ messages,
140
+ model: options2.model,
141
+ prepareStep: options2.prepareStep,
142
+ stepNumber: steps.length,
143
+ steps,
144
+ toolChoice: options2.toolChoice
154
145
  });
155
- messages.push(message);
156
- span.setAttribute("gen_ai.output.messages", JSON.stringify([message]));
157
- if (finishReason !== "stop" && stepType !== "done") {
158
- for (const toolCall of msgToolCalls) {
159
- const { completionToolCall, completionToolResult, message: message2 } = await executeTool({
160
- abortSignal: options2.abortSignal,
161
- messages,
162
- toolCall,
163
- tools: options2.tools
146
+ return recordSpan(chatSpan({
147
+ ...options2,
148
+ messages: stepOptions.messages,
149
+ model: stepOptions.model,
150
+ toolChoice: stepOptions.toolChoice
151
+ }, tracer), async (span) => chat({
152
+ ...options2,
153
+ maxSteps: void 0,
154
+ messages: stepOptions.messages,
155
+ model: stepOptions.model,
156
+ steps: void 0,
157
+ stopWhen: void 0,
158
+ stream: false,
159
+ toolChoice: stepOptions.toolChoice
160
+ }).then(responseJSON).then(async (res) => {
161
+ const { choices, usage } = res;
162
+ if (!choices?.length) {
163
+ const responseBody = JSON.stringify(res);
164
+ throw new InvalidResponseError(`No choices returned, response body: ${responseBody}`, {
165
+ reason: "no_choices",
166
+ responseBody
164
167
  });
165
- toolCalls.push(completionToolCall);
166
- toolResults.push(completionToolResult);
167
- messages.push(message2);
168
168
  }
169
- }
170
- const step = {
171
- finishReason,
172
- stepType,
173
- text: Array.isArray(message.content) ? message.content.filter((m) => m.type === "text").map((m) => m.text).join("\n") : message.content,
174
- toolCalls,
175
- toolResults,
176
- usage
177
- };
178
- steps.push(step);
179
- span.setAttributes({
180
- "gen_ai.response.finish_reasons": [step.finishReason],
181
- "gen_ai.usage.input_tokens": step.usage.prompt_tokens,
182
- "gen_ai.usage.output_tokens": step.usage.completion_tokens
183
- });
184
- if (options2.onStepFinish)
185
- await options2.onStepFinish(step);
186
- if (step.finishReason === "stop" || step.stepType === "done") {
187
- return {
188
- finishReason: step.finishReason,
189
- messages,
190
- reasoningText: message.reasoning_content,
191
- steps,
192
- text: step.text,
193
- toolCalls: step.toolCalls,
194
- toolResults: step.toolResults,
195
- usage: step.usage
169
+ const toolCalls = [];
170
+ const toolResults = [];
171
+ const { finish_reason: finishReason, message } = choices[0];
172
+ const msgToolCalls = message?.tool_calls ?? [];
173
+ const stopWhen = options2.stopWhen ?? stepCountAtLeast(1);
174
+ messages.push(message);
175
+ span.setAttribute("gen_ai.output.messages", JSON.stringify([message]));
176
+ if (msgToolCalls.length > 0) {
177
+ const results = await Promise.all(
178
+ msgToolCalls.map(async (toolCall) => executeTool({
179
+ abortSignal: options2.abortSignal,
180
+ messages,
181
+ toolCall,
182
+ tools: options2.tools
183
+ }))
184
+ );
185
+ for (const { completionToolCall, completionToolResult, message: message2 } of results) {
186
+ toolCalls.push(completionToolCall);
187
+ toolResults.push(completionToolResult);
188
+ messages.push(message2);
189
+ }
190
+ }
191
+ const stopStep = {
192
+ finishReason,
193
+ text: Array.isArray(message.content) ? message.content.filter((m) => m.type === "text").map((m) => m.text).join("\n") : message.content,
194
+ toolCalls,
195
+ toolResults,
196
+ usage
196
197
  };
197
- } else {
198
- return async () => rawGenerateText({
199
- ...options2,
198
+ const stop = shouldStop(stopWhen, {
200
199
  messages,
201
- steps
200
+ step: stopStep,
201
+ steps: [...steps, stopStep]
202
202
  });
203
- }
204
- }));
203
+ const willContinue = toolCalls.length > 0 && !stop;
204
+ const step = {
205
+ ...stopStep,
206
+ stepType: determineStepType({
207
+ finishReason,
208
+ stepsLength: steps.length,
209
+ toolCallsLength: toolCalls.length,
210
+ willContinue
211
+ })
212
+ };
213
+ steps.push(step);
214
+ span.setAttributes({
215
+ "gen_ai.response.finish_reasons": [step.finishReason],
216
+ "gen_ai.usage.input_tokens": step.usage.prompt_tokens,
217
+ "gen_ai.usage.output_tokens": step.usage.completion_tokens
218
+ });
219
+ if (options2.onStepFinish)
220
+ await options2.onStepFinish(step);
221
+ if (!willContinue) {
222
+ return {
223
+ finishReason: step.finishReason,
224
+ messages,
225
+ reasoningText: message.reasoning ?? message.reasoning_content,
226
+ steps,
227
+ text: step.text,
228
+ toolCalls: step.toolCalls,
229
+ toolResults: step.toolResults,
230
+ usage: step.usage
231
+ };
232
+ } else {
233
+ return async () => rawGenerateText({
234
+ ...options2,
235
+ messages,
236
+ steps
237
+ });
238
+ }
239
+ }));
240
+ };
205
241
  return trampoline(async () => rawGenerateText({
206
242
  ...options,
207
243
  tools: options.tools?.map((tool) => wrapTool(tool, tracer))
208
244
  }));
209
245
  };
210
246
 
211
- const parseChunk = (text) => {
212
- if (!text || !text.startsWith("data:"))
213
- return [void 0, false];
214
- const content = text.slice("data:".length);
215
- const data = content.startsWith(" ") ? content.slice(1) : content;
216
- if (data === "[DONE]") {
217
- return [void 0, true];
218
- }
247
+ const parseChunk = (data) => {
219
248
  if (data.startsWith("{") && data.includes('"error":')) {
220
- throw new Error(`Error from server: ${data}`);
249
+ throw new RemoteAPIError(`Error from server: ${data}`, {
250
+ responseBody: data
251
+ });
252
+ }
253
+ try {
254
+ return JSON.parse(data);
255
+ } catch (cause) {
256
+ throw new JSONParseError(`Failed to parse stream chunk JSON: ${data}`, {
257
+ cause,
258
+ text: data
259
+ });
221
260
  }
222
- const chunk = JSON.parse(data);
223
- return [chunk, false];
224
261
  };
225
262
  const transformChunk = () => {
226
- const decoder = new TextDecoder();
227
- let buffer = "";
228
263
  return new TransformStream({
229
264
  transform: async (chunk, controller) => {
230
- const text = decoder.decode(chunk, { stream: true });
231
- buffer += text;
232
- const lines = buffer.split("\n");
233
- buffer = lines.pop() ?? "";
234
- for (const line of lines) {
235
- try {
236
- const [chunk2, isEnd] = parseChunk(line);
237
- if (isEnd)
238
- break;
239
- if (chunk2) {
240
- controller.enqueue(chunk2);
241
- }
242
- } catch (error) {
243
- controller.error(error);
244
- }
245
- }
265
+ if (!chunk.data || chunk.data === "[DONE]")
266
+ return;
267
+ controller.enqueue(parseChunk(chunk.data));
246
268
  }
247
269
  });
248
270
  };
@@ -251,7 +273,7 @@ const streamText = (options) => {
251
273
  const tracer = getTracer();
252
274
  const steps = [];
253
275
  const messages = structuredClone(options.messages);
254
- const maxSteps = options.maxSteps ?? 1;
276
+ const stopWhen = options.stopWhen ?? stepCountAtLeast(1);
255
277
  let usage;
256
278
  let totalUsage;
257
279
  let reasoningField;
@@ -274,176 +296,216 @@ const streamText = (options) => {
274
296
  void options.onStepFinish?.(step);
275
297
  };
276
298
  const tools = options.tools != null && options.tools.length > 0 ? options.tools.map((tool) => wrapTool(tool, tracer)) : void 0;
277
- const doStream = async () => recordSpan(chatSpan({ ...options, messages }, tracer), async (span) => {
278
- const { body: stream } = await chat({
279
- ...options,
280
- maxSteps: void 0,
299
+ const doStream = async () => {
300
+ const stepOptions = await resolveStepOptions({
281
301
  messages,
282
- stream: true,
283
- streamOptions: options.streamOptions != null ? objCamelToSnake(options.streamOptions) : void 0,
284
- tools
302
+ model: options.model,
303
+ prepareStep: options.prepareStep,
304
+ stepNumber: steps.length,
305
+ steps,
306
+ toolChoice: options.toolChoice
285
307
  });
286
- const pushUsage = (u) => {
287
- usage = u;
288
- totalUsage = totalUsage ? {
289
- completion_tokens: totalUsage.completion_tokens + u.completion_tokens,
290
- prompt_tokens: totalUsage.prompt_tokens + u.prompt_tokens,
291
- total_tokens: totalUsage.total_tokens + u.total_tokens
292
- } : u;
293
- };
294
- let text = "";
295
- let reasoningText;
296
- const pushText = (content) => {
297
- textCtrl?.enqueue(content);
298
- text += content;
299
- };
300
- const pushReasoningText = (reasoningContent) => {
301
- if (reasoningText == null)
302
- reasoningText = "";
303
- reasoningTextCtrl?.enqueue(reasoningContent);
304
- reasoningText += reasoningContent;
305
- };
306
- const tool_calls = [];
307
- const toolCalls = [];
308
- const toolResults = [];
309
- let finishReason = "other";
310
- await stream.pipeThrough(transformChunk()).pipeTo(new WritableStream({
311
- abort: (reason) => {
312
- eventCtrl?.error(reason);
313
- textCtrl?.error(reason);
314
- },
315
- close: () => {
316
- },
317
- // eslint-disable-next-line sonarjs/cognitive-complexity
318
- write: (chunk) => {
319
- if (chunk.usage)
320
- pushUsage(chunk.usage);
321
- if (chunk.choices == null || chunk.choices.length === 0)
322
- return;
323
- const choice = chunk.choices[0];
324
- if (choice.delta.reasoning != null) {
325
- if (reasoningField !== "reasoning")
326
- reasoningField = "reasoning";
327
- pushEvent({ text: choice.delta.reasoning, type: "reasoning-delta" });
328
- pushReasoningText(choice.delta.reasoning);
329
- } else if (choice.delta.reasoning_content != null) {
330
- if (reasoningField !== "reasoning_content")
331
- reasoningField = "reasoning_content";
332
- pushEvent({ text: choice.delta.reasoning_content, type: "reasoning-delta" });
333
- pushReasoningText(choice.delta.reasoning_content);
334
- }
335
- if (choice.finish_reason != null)
336
- finishReason = choice.finish_reason;
337
- if (choice.delta.tool_calls?.length === 0 || choice.delta.tool_calls == null) {
338
- if (choice.delta.content != null) {
339
- pushEvent({ text: choice.delta.content, type: "text-delta" });
340
- pushText(choice.delta.content);
341
- } else if (choice.delta.refusal != null) {
342
- pushEvent({ error: choice.delta.refusal, type: "error" });
343
- } else if (choice.finish_reason != null) {
344
- pushEvent({ finishReason: choice.finish_reason, type: "finish", usage });
308
+ return recordSpan(chatSpan({
309
+ ...options,
310
+ messages: stepOptions.messages,
311
+ model: stepOptions.model,
312
+ toolChoice: stepOptions.toolChoice
313
+ }, tracer), async (span) => {
314
+ const { body: stream } = await chat({
315
+ ...options,
316
+ maxSteps: void 0,
317
+ messages: stepOptions.messages,
318
+ model: stepOptions.model,
319
+ stopWhen: void 0,
320
+ stream: true,
321
+ streamOptions: options.streamOptions != null ? objCamelToSnake(options.streamOptions) : void 0,
322
+ toolChoice: stepOptions.toolChoice,
323
+ tools
324
+ });
325
+ const pushUsage = (u) => {
326
+ usage = u;
327
+ totalUsage = totalUsage ? {
328
+ completion_tokens: totalUsage.completion_tokens + u.completion_tokens,
329
+ prompt_tokens: totalUsage.prompt_tokens + u.prompt_tokens,
330
+ total_tokens: totalUsage.total_tokens + u.total_tokens
331
+ } : u;
332
+ };
333
+ let text = "";
334
+ let reasoningText;
335
+ const pushText = (content) => {
336
+ textCtrl?.enqueue(content);
337
+ text += content;
338
+ };
339
+ const pushReasoningText = (reasoningContent) => {
340
+ if (reasoningText == null)
341
+ reasoningText = "";
342
+ reasoningTextCtrl?.enqueue(reasoningContent);
343
+ reasoningText += reasoningContent;
344
+ };
345
+ const tool_calls = [];
346
+ const toolCalls = [];
347
+ const toolResults = [];
348
+ let finishReason = "other";
349
+ await stream.pipeThrough(new TextDecoderStream()).pipeThrough(new EventSourceParserStream()).pipeThrough(transformChunk()).pipeTo(new WritableStream({
350
+ abort: (reason) => {
351
+ eventCtrl?.error(reason);
352
+ textCtrl?.error(reason);
353
+ },
354
+ close: () => {
355
+ },
356
+ // eslint-disable-next-line sonarjs/cognitive-complexity
357
+ write: (chunk) => {
358
+ if (chunk.usage)
359
+ pushUsage(chunk.usage);
360
+ if (chunk.choices == null || chunk.choices.length === 0)
361
+ return;
362
+ const choice = chunk.choices[0];
363
+ if (choice.delta.reasoning != null) {
364
+ if (reasoningField !== "reasoning")
365
+ reasoningField = "reasoning";
366
+ pushEvent({ text: choice.delta.reasoning, type: "reasoning-delta" });
367
+ pushReasoningText(choice.delta.reasoning);
368
+ } else if (choice.delta.reasoning_content != null) {
369
+ if (reasoningField !== "reasoning_content")
370
+ reasoningField = "reasoning_content";
371
+ pushEvent({ text: choice.delta.reasoning_content, type: "reasoning-delta" });
372
+ pushReasoningText(choice.delta.reasoning_content);
345
373
  }
346
- } else {
347
- for (const toolCall of choice.delta.tool_calls) {
348
- const { index } = toolCall;
349
- if (!tool_calls.at(index)) {
350
- tool_calls[index] = {
351
- ...toolCall,
352
- function: {
353
- ...toolCall.function,
354
- arguments: toolCall.function.arguments ?? ""
355
- }
356
- };
357
- pushEvent({
358
- toolCallId: toolCall.id,
359
- toolName: toolCall.function.name,
360
- type: "tool-call-streaming-start"
361
- });
362
- } else {
363
- tool_calls[index].function.arguments += toolCall.function.arguments;
364
- pushEvent({
365
- argsTextDelta: toolCall.function.arguments,
366
- toolCallId: toolCall.id,
367
- toolName: toolCall.function.name ?? tool_calls[index].function.name,
368
- type: "tool-call-delta"
369
- });
374
+ if (choice.finish_reason != null)
375
+ finishReason = choice.finish_reason;
376
+ if (choice.delta.tool_calls?.length === 0 || choice.delta.tool_calls == null) {
377
+ if (choice.delta.content != null) {
378
+ pushEvent({ text: choice.delta.content, type: "text-delta" });
379
+ pushText(choice.delta.content);
380
+ } else if (choice.delta.refusal != null) {
381
+ pushEvent({ error: choice.delta.refusal, type: "error" });
382
+ } else if (choice.finish_reason != null) {
383
+ pushEvent({ finishReason: choice.finish_reason, type: "finish", usage });
384
+ }
385
+ } else {
386
+ for (const toolCall of choice.delta.tool_calls) {
387
+ const { index } = toolCall;
388
+ if (!tool_calls.at(index)) {
389
+ tool_calls[index] = {
390
+ ...toolCall,
391
+ function: {
392
+ ...toolCall.function,
393
+ arguments: toolCall.function.arguments ?? ""
394
+ }
395
+ };
396
+ pushEvent({
397
+ toolCallId: toolCall.id,
398
+ toolName: toolCall.function.name,
399
+ type: "tool-call-streaming-start"
400
+ });
401
+ } else {
402
+ tool_calls[index].function.arguments += toolCall.function.arguments;
403
+ pushEvent({
404
+ argsTextDelta: toolCall.function.arguments,
405
+ toolCallId: toolCall.id,
406
+ toolName: toolCall.function.name ?? tool_calls[index].function.name,
407
+ type: "tool-call-delta"
408
+ });
409
+ }
370
410
  }
371
411
  }
372
412
  }
373
- }
374
- }));
375
- const message = {
376
- ...reasoningField != null ? { [reasoningField]: reasoningText } : {},
377
- content: text,
378
- role: "assistant",
379
- tool_calls: tool_calls.length > 0 ? tool_calls : void 0
380
- };
381
- messages.push(message);
382
- span.setAttribute("gen_ai.output.messages", JSON.stringify([message]));
383
- if (tool_calls.length !== 0) {
384
- for (const toolCall of tool_calls) {
385
- if (toolCall == null)
386
- continue;
387
- const { completionToolCall, completionToolResult, message: message2 } = await executeTool({
388
- abortSignal: options.abortSignal,
389
- messages,
390
- toolCall,
391
- tools
413
+ }));
414
+ const message = {
415
+ ...reasoningField != null ? { [reasoningField]: reasoningText } : {},
416
+ content: text,
417
+ role: "assistant",
418
+ tool_calls: tool_calls.length > 0 ? tool_calls : void 0
419
+ };
420
+ messages.push(message);
421
+ span.setAttribute("gen_ai.output.messages", JSON.stringify([message]));
422
+ if (tool_calls.length !== 0) {
423
+ const validToolCalls = tool_calls.filter((tc) => tc != null);
424
+ const results = await Promise.all(
425
+ validToolCalls.map(async (toolCall) => executeTool({
426
+ abortSignal: options.abortSignal,
427
+ messages,
428
+ toolCall,
429
+ tools
430
+ }))
431
+ );
432
+ for (const { completionToolCall, completionToolResult, message: message2 } of results) {
433
+ toolCalls.push(completionToolCall);
434
+ toolResults.push(completionToolResult);
435
+ messages.push(message2);
436
+ pushEvent({ ...completionToolCall, type: "tool-call" });
437
+ pushEvent({ ...completionToolResult, type: "tool-result" });
438
+ }
439
+ } else {
440
+ pushEvent({
441
+ finishReason,
442
+ type: "finish",
443
+ usage
392
444
  });
393
- toolCalls.push(completionToolCall);
394
- toolResults.push(completionToolResult);
395
- messages.push(message2);
396
- pushEvent({ ...completionToolCall, type: "tool-call" });
397
- pushEvent({ ...completionToolResult, type: "tool-result" });
398
445
  }
399
- } else {
400
- pushEvent({
446
+ const stopStep = {
401
447
  finishReason,
402
- type: "finish",
448
+ text,
449
+ toolCalls,
450
+ toolResults,
403
451
  usage
452
+ };
453
+ const stop = shouldStop(stopWhen, {
454
+ messages,
455
+ step: stopStep,
456
+ steps: [...steps, stopStep]
404
457
  });
405
- }
406
- const step = {
407
- finishReason,
408
- stepType: determineStepType({ finishReason, maxSteps, stepsLength: steps.length, toolCallsLength: toolCalls.length }),
409
- text,
410
- toolCalls,
411
- toolResults,
412
- usage
413
- };
414
- pushStep(step);
415
- span.setAttributes({
416
- "gen_ai.response.finish_reasons": [step.finishReason],
417
- ...step.usage && {
418
- "gen_ai.usage.input_tokens": step.usage.prompt_tokens,
419
- "gen_ai.usage.output_tokens": step.usage.completion_tokens
420
- }
458
+ const willContinue = toolCalls.length > 0 && !stop;
459
+ const step = {
460
+ ...stopStep,
461
+ stepType: determineStepType({
462
+ finishReason,
463
+ stepsLength: steps.length,
464
+ toolCallsLength: toolCalls.length,
465
+ willContinue
466
+ })
467
+ };
468
+ pushStep(step);
469
+ span.setAttributes({
470
+ "gen_ai.response.finish_reasons": [step.finishReason],
471
+ ...step.usage && {
472
+ "gen_ai.usage.input_tokens": step.usage.prompt_tokens,
473
+ "gen_ai.usage.output_tokens": step.usage.completion_tokens
474
+ }
475
+ });
476
+ if (willContinue)
477
+ return async () => doStream();
421
478
  });
422
- if (toolCalls.length !== 0 && steps.length < maxSteps)
423
- return async () => doStream();
424
- });
479
+ };
425
480
  void (async () => {
481
+ let finalError;
426
482
  try {
427
483
  await trampoline(async () => doStream());
428
- eventCtrl?.close();
429
- textCtrl?.close();
430
- reasoningTextCtrl?.close();
431
484
  } catch (err) {
432
- eventCtrl?.error(err);
433
- textCtrl?.error(err);
434
- reasoningTextCtrl?.error(err);
435
- resultSteps.reject(err);
436
- resultMessages.reject(err);
437
- resultUsage.reject(err);
438
- resultTotalUsage.reject(err);
439
- } finally {
440
- resultSteps.resolve(steps);
441
- resultMessages.resolve(messages);
442
- resultUsage.resolve(usage);
443
- resultTotalUsage.resolve(totalUsage);
444
- const finishStep = steps.at(-1);
445
- void options.onFinish?.(finishStep);
485
+ finalError = err;
486
+ }
487
+ try {
488
+ await options.onFinish?.(steps.at(-1));
489
+ } catch (err) {
490
+ finalError ??= err;
491
+ }
492
+ if (finalError != null) {
493
+ eventCtrl?.error(finalError);
494
+ textCtrl?.error(finalError);
495
+ reasoningTextCtrl?.error(finalError);
496
+ resultSteps.reject(finalError);
497
+ resultMessages.reject(finalError);
498
+ resultUsage.reject(finalError);
499
+ resultTotalUsage.reject(finalError);
500
+ return;
446
501
  }
502
+ eventCtrl?.close();
503
+ textCtrl?.close();
504
+ reasoningTextCtrl?.close();
505
+ resultSteps.resolve(steps);
506
+ resultMessages.resolve(messages);
507
+ resultUsage.resolve(usage);
508
+ resultTotalUsage.resolve(totalUsage);
447
509
  })();
448
510
  return {
449
511
  fullStream: eventStream,
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "@xsai-ext/telemetry",
3
3
  "type": "module",
4
- "version": "0.4.3",
4
+ "version": "0.5.0-beta.1",
5
5
  "description": "extra-small AI SDK.",
6
6
  "author": "Moeru AI",
7
7
  "license": "MIT",
@@ -30,7 +30,8 @@
30
30
  ],
31
31
  "dependencies": {
32
32
  "@opentelemetry/api": "^1.9.0",
33
- "xsai": "~0.4.3"
33
+ "eventsource-parser": "^3.0.6",
34
+ "xsai": "~0.5.0-beta.1"
34
35
  },
35
36
  "devDependencies": {
36
37
  "@langfuse/otel": "^4.5.1",