@lobehub/chat 1.68.8 → 1.68.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (74) hide show
  1. package/CHANGELOG.md +50 -0
  2. package/changelog/v1.json +18 -0
  3. package/docs/usage/providers/ppio.mdx +5 -5
  4. package/docs/usage/providers/ppio.zh-CN.mdx +7 -7
  5. package/locales/ar/chat.json +5 -1
  6. package/locales/ar/models.json +6 -9
  7. package/locales/bg-BG/chat.json +5 -1
  8. package/locales/bg-BG/models.json +6 -9
  9. package/locales/de-DE/chat.json +5 -1
  10. package/locales/de-DE/models.json +6 -9
  11. package/locales/en-US/chat.json +5 -1
  12. package/locales/en-US/models.json +6 -9
  13. package/locales/es-ES/chat.json +5 -1
  14. package/locales/es-ES/models.json +6 -9
  15. package/locales/fa-IR/chat.json +5 -1
  16. package/locales/fa-IR/models.json +6 -9
  17. package/locales/fr-FR/chat.json +5 -1
  18. package/locales/fr-FR/models.json +6 -9
  19. package/locales/it-IT/chat.json +5 -1
  20. package/locales/it-IT/models.json +6 -9
  21. package/locales/ja-JP/chat.json +5 -1
  22. package/locales/ja-JP/models.json +6 -9
  23. package/locales/ko-KR/chat.json +5 -1
  24. package/locales/ko-KR/models.json +6 -9
  25. package/locales/nl-NL/chat.json +5 -1
  26. package/locales/nl-NL/models.json +6 -9
  27. package/locales/pl-PL/chat.json +5 -1
  28. package/locales/pl-PL/models.json +6 -9
  29. package/locales/pt-BR/chat.json +5 -1
  30. package/locales/pt-BR/models.json +6 -9
  31. package/locales/ru-RU/chat.json +5 -1
  32. package/locales/ru-RU/models.json +6 -9
  33. package/locales/tr-TR/chat.json +5 -1
  34. package/locales/tr-TR/models.json +6 -9
  35. package/locales/vi-VN/chat.json +5 -1
  36. package/locales/vi-VN/models.json +6 -9
  37. package/locales/zh-CN/chat.json +5 -1
  38. package/locales/zh-CN/models.json +6 -9
  39. package/locales/zh-TW/chat.json +5 -1
  40. package/locales/zh-TW/models.json +6 -9
  41. package/package.json +3 -1
  42. package/src/config/aiModels/perplexity.ts +36 -20
  43. package/src/config/modelProviders/ppio.ts +1 -1
  44. package/src/database/client/migrations.json +8 -3
  45. package/src/features/Conversation/Extras/Usage/UsageDetail/ModelCard.tsx +27 -9
  46. package/src/features/Conversation/Extras/Usage/UsageDetail/index.tsx +77 -35
  47. package/src/features/Conversation/Extras/Usage/UsageDetail/tokens.test.ts +253 -0
  48. package/src/features/Conversation/Extras/Usage/UsageDetail/tokens.ts +65 -46
  49. package/src/libs/agent-runtime/baichuan/index.test.ts +58 -1
  50. package/src/libs/agent-runtime/groq/index.test.ts +36 -284
  51. package/src/libs/agent-runtime/mistral/index.test.ts +39 -300
  52. package/src/libs/agent-runtime/perplexity/index.test.ts +12 -10
  53. package/src/libs/agent-runtime/providerTestUtils.ts +58 -0
  54. package/src/libs/agent-runtime/togetherai/index.test.ts +7 -295
  55. package/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.test.ts +3 -0
  56. package/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.ts +5 -2
  57. package/src/libs/agent-runtime/utils/streams/anthropic.test.ts +89 -5
  58. package/src/libs/agent-runtime/utils/streams/anthropic.ts +25 -8
  59. package/src/libs/agent-runtime/utils/streams/openai.test.ts +188 -84
  60. package/src/libs/agent-runtime/utils/streams/openai.ts +8 -17
  61. package/src/libs/agent-runtime/utils/usageConverter.test.ts +249 -0
  62. package/src/libs/agent-runtime/utils/usageConverter.ts +50 -0
  63. package/src/libs/agent-runtime/zeroone/index.test.ts +7 -294
  64. package/src/libs/langchain/loaders/epub/__tests__/__snapshots__/index.test.ts.snap +238 -0
  65. package/src/libs/langchain/loaders/epub/__tests__/demo.epub +0 -0
  66. package/src/libs/langchain/loaders/epub/__tests__/index.test.ts +24 -0
  67. package/src/libs/langchain/loaders/epub/index.ts +21 -0
  68. package/src/libs/langchain/loaders/index.ts +9 -0
  69. package/src/libs/langchain/types.ts +2 -1
  70. package/src/locales/default/chat.ts +4 -0
  71. package/src/server/utils/tempFileManager.ts +70 -0
  72. package/src/types/message/base.ts +14 -4
  73. package/src/utils/filter.test.ts +0 -122
  74. package/src/utils/filter.ts +0 -29
@@ -348,94 +348,198 @@ describe('OpenAIStream', () => {
348
348
  ]);
349
349
  });
350
350
 
351
- it('should streaming token usage', async () => {
352
- const data = [
353
- {
354
- id: 'chatcmpl-B7CcnaeK3jqWBMOhxg7SSKFwlk7dC',
355
- object: 'chat.completion.chunk',
356
- created: 1741056525,
357
- model: 'gpt-4o-mini-2024-07-18',
358
- choices: [{ index: 0, delta: { role: 'assistant', content: '' } }],
359
- service_tier: 'default',
360
- system_fingerprint: 'fp_06737a9306',
361
- },
362
- {
363
- id: 'chatcmpl-B7CcnaeK3jqWBMOhxg7SSKFwlk7dC',
364
- object: 'chat.completion.chunk',
365
- created: 1741056525,
366
- model: 'gpt-4o-mini-2024-07-18',
367
- choices: [{ index: 0, delta: { content: '你好!' } }],
368
- service_tier: 'default',
369
- system_fingerprint: 'fp_06737a9306',
370
- },
371
- {
372
- id: 'chatcmpl-B7CcnaeK3jqWBMOhxg7SSKFwlk7dC',
373
- object: 'chat.completion.chunk',
374
- created: 1741056525,
375
- model: 'gpt-4o-mini-2024-07-18',
376
- choices: [{ index: 0, delta: {}, finish_reason: 'stop' }],
377
- service_tier: 'default',
378
- system_fingerprint: 'fp_06737a9306',
379
- },
380
- {
381
- id: 'chatcmpl-B7CcnaeK3jqWBMOhxg7SSKFwlk7dC',
382
- object: 'chat.completion.chunk',
383
- created: 1741056525,
384
- model: 'gpt-4o-mini-2024-07-18',
385
- choices: [],
386
- service_tier: 'default',
387
- system_fingerprint: 'fp_06737a9306',
388
- usage: {
389
- prompt_tokens: 1646,
390
- completion_tokens: 11,
391
- total_tokens: 1657,
392
- prompt_tokens_details: { audio_tokens: 0, cached_tokens: 0 },
393
- completion_tokens_details: {
394
- accepted_prediction_tokens: 0,
395
- audio_tokens: 0,
396
- reasoning_tokens: 0,
397
- rejected_prediction_tokens: 0,
351
+ describe('token usage', () => {
352
+ it('should streaming token usage', async () => {
353
+ const data = [
354
+ {
355
+ id: 'chatcmpl-B7CcnaeK3jqWBMOhxg7SSKFwlk7dC',
356
+ object: 'chat.completion.chunk',
357
+ created: 1741056525,
358
+ model: 'gpt-4o-mini-2024-07-18',
359
+ choices: [{ index: 0, delta: { role: 'assistant', content: '' } }],
360
+ service_tier: 'default',
361
+ system_fingerprint: 'fp_06737a9306',
362
+ },
363
+ {
364
+ id: 'chatcmpl-B7CcnaeK3jqWBMOhxg7SSKFwlk7dC',
365
+ object: 'chat.completion.chunk',
366
+ created: 1741056525,
367
+ model: 'gpt-4o-mini-2024-07-18',
368
+ choices: [{ index: 0, delta: { content: '你好!' } }],
369
+ service_tier: 'default',
370
+ system_fingerprint: 'fp_06737a9306',
371
+ },
372
+ {
373
+ id: 'chatcmpl-B7CcnaeK3jqWBMOhxg7SSKFwlk7dC',
374
+ object: 'chat.completion.chunk',
375
+ created: 1741056525,
376
+ model: 'gpt-4o-mini-2024-07-18',
377
+ choices: [{ index: 0, delta: {}, finish_reason: 'stop' }],
378
+ service_tier: 'default',
379
+ system_fingerprint: 'fp_06737a9306',
380
+ },
381
+ {
382
+ id: 'chatcmpl-B7CcnaeK3jqWBMOhxg7SSKFwlk7dC',
383
+ object: 'chat.completion.chunk',
384
+ created: 1741056525,
385
+ model: 'gpt-4o-mini-2024-07-18',
386
+ choices: [],
387
+ service_tier: 'default',
388
+ system_fingerprint: 'fp_06737a9306',
389
+ usage: {
390
+ prompt_tokens: 1646,
391
+ completion_tokens: 11,
392
+ total_tokens: 1657,
393
+ prompt_tokens_details: { audio_tokens: 0, cached_tokens: 0 },
394
+ completion_tokens_details: {
395
+ accepted_prediction_tokens: 0,
396
+ audio_tokens: 0,
397
+ reasoning_tokens: 0,
398
+ rejected_prediction_tokens: 0,
399
+ },
398
400
  },
399
401
  },
400
- },
401
- ];
402
+ ];
402
403
 
403
- const mockOpenAIStream = new ReadableStream({
404
- start(controller) {
405
- data.forEach((chunk) => {
406
- controller.enqueue(chunk);
407
- });
404
+ const mockOpenAIStream = new ReadableStream({
405
+ start(controller) {
406
+ data.forEach((chunk) => {
407
+ controller.enqueue(chunk);
408
+ });
408
409
 
409
- controller.close();
410
- },
410
+ controller.close();
411
+ },
412
+ });
413
+
414
+ const protocolStream = OpenAIStream(mockOpenAIStream);
415
+
416
+ const decoder = new TextDecoder();
417
+ const chunks = [];
418
+
419
+ // @ts-ignore
420
+ for await (const chunk of protocolStream) {
421
+ chunks.push(decoder.decode(chunk, { stream: true }));
422
+ }
423
+
424
+ expect(chunks).toEqual(
425
+ [
426
+ 'id: chatcmpl-B7CcnaeK3jqWBMOhxg7SSKFwlk7dC',
427
+ 'event: text',
428
+ `data: ""\n`,
429
+ 'id: chatcmpl-B7CcnaeK3jqWBMOhxg7SSKFwlk7dC',
430
+ 'event: text',
431
+ `data: "你好!"\n`,
432
+ 'id: chatcmpl-B7CcnaeK3jqWBMOhxg7SSKFwlk7dC',
433
+ 'event: stop',
434
+ `data: "stop"\n`,
435
+ 'id: chatcmpl-B7CcnaeK3jqWBMOhxg7SSKFwlk7dC',
436
+ 'event: usage',
437
+ `data: {"inputCacheMissTokens":1646,"inputTextTokens":1646,"outputTextTokens":11,"totalInputTokens":1646,"totalOutputTokens":11,"totalTokens":1657}\n`,
438
+ ].map((i) => `${i}\n`),
439
+ );
411
440
  });
412
441
 
413
- const protocolStream = OpenAIStream(mockOpenAIStream);
442
+ it('should streaming litellm token usage', async () => {
443
+ const data = [
444
+ {
445
+ id: 'chatcmpl-c1f6a6a6-fcf8-463a-96bf-cf634d3e98a5',
446
+ created: 1741188058,
447
+ model: 'gpt-4o-mini',
448
+ object: 'chat.completion.chunk',
449
+ system_fingerprint: 'fp_06737a9306',
450
+ choices: [{ index: 0, delta: { content: ' #' } }],
451
+ stream_options: { include_usage: true },
452
+ },
453
+ {
454
+ id: 'chatcmpl-c1f6a6a6-fcf8-463a-96bf-cf634d3e98a5',
455
+ created: 1741188068,
456
+ model: 'gpt-4o-mini',
457
+ object: 'chat.completion.chunk',
458
+ system_fingerprint: 'fp_06737a9306',
459
+ choices: [{ index: 0, delta: { content: '.' } }],
460
+ stream_options: { include_usage: true },
461
+ },
462
+ {
463
+ id: 'chatcmpl-c1f6a6a6-fcf8-463a-96bf-cf634d3e98a5',
464
+ created: 1741188068,
465
+ model: 'gpt-4o-mini',
466
+ object: 'chat.completion.chunk',
467
+ system_fingerprint: 'fp_06737a9306',
468
+ choices: [{ finish_reason: 'stop', index: 0, delta: {} }],
469
+ stream_options: { include_usage: true },
470
+ },
471
+ {
472
+ id: 'chatcmpl-c1f6a6a6-fcf8-463a-96bf-cf634d3e98a5',
473
+ created: 1741188068,
474
+ model: 'gpt-4o-mini',
475
+ object: 'chat.completion.chunk',
476
+ system_fingerprint: 'fp_06737a9306',
477
+ choices: [{ index: 0, delta: {} }],
478
+ stream_options: { include_usage: true },
479
+ },
480
+ {
481
+ id: 'chatcmpl-c1f6a6a6-fcf8-463a-96bf-cf634d3e98a5',
482
+ created: 1741188068,
483
+ model: 'gpt-4o-mini',
484
+ object: 'chat.completion.chunk',
485
+ system_fingerprint: 'fp_06737a9306',
486
+ choices: [{ index: 0, delta: {} }],
487
+ stream_options: { include_usage: true },
488
+ usage: {
489
+ completion_tokens: 1720,
490
+ prompt_tokens: 1797,
491
+ total_tokens: 3517,
492
+ completion_tokens_details: {
493
+ accepted_prediction_tokens: 0,
494
+ audio_tokens: 0,
495
+ reasoning_tokens: 0,
496
+ rejected_prediction_tokens: 0,
497
+ },
498
+ prompt_tokens_details: { audio_tokens: 0, cached_tokens: 0 },
499
+ },
500
+ },
501
+ ];
414
502
 
415
- const decoder = new TextDecoder();
416
- const chunks = [];
503
+ const mockOpenAIStream = new ReadableStream({
504
+ start(controller) {
505
+ data.forEach((chunk) => {
506
+ controller.enqueue(chunk);
507
+ });
417
508
 
418
- // @ts-ignore
419
- for await (const chunk of protocolStream) {
420
- chunks.push(decoder.decode(chunk, { stream: true }));
421
- }
509
+ controller.close();
510
+ },
511
+ });
422
512
 
423
- expect(chunks).toEqual(
424
- [
425
- 'id: chatcmpl-B7CcnaeK3jqWBMOhxg7SSKFwlk7dC',
426
- 'event: text',
427
- `data: ""\n`,
428
- 'id: chatcmpl-B7CcnaeK3jqWBMOhxg7SSKFwlk7dC',
429
- 'event: text',
430
- `data: "你好!"\n`,
431
- 'id: chatcmpl-B7CcnaeK3jqWBMOhxg7SSKFwlk7dC',
432
- 'event: stop',
433
- `data: "stop"\n`,
434
- 'id: chatcmpl-B7CcnaeK3jqWBMOhxg7SSKFwlk7dC',
435
- 'event: usage',
436
- `data: {"acceptedPredictionTokens":0,"cachedTokens":0,"inputAudioTokens":0,"inputTokens":1646,"outputAudioTokens":0,"outputTokens":11,"reasoningTokens":0,"rejectedPredictionTokens":0,"totalTokens":1657}\n`,
437
- ].map((i) => `${i}\n`),
438
- );
513
+ const protocolStream = OpenAIStream(mockOpenAIStream);
514
+
515
+ const decoder = new TextDecoder();
516
+ const chunks = [];
517
+
518
+ // @ts-ignore
519
+ for await (const chunk of protocolStream) {
520
+ chunks.push(decoder.decode(chunk, { stream: true }));
521
+ }
522
+
523
+ expect(chunks).toEqual(
524
+ [
525
+ 'id: chatcmpl-c1f6a6a6-fcf8-463a-96bf-cf634d3e98a5',
526
+ 'event: text',
527
+ `data: " #"\n`,
528
+ 'id: chatcmpl-c1f6a6a6-fcf8-463a-96bf-cf634d3e98a5',
529
+ 'event: text',
530
+ `data: "."\n`,
531
+ 'id: chatcmpl-c1f6a6a6-fcf8-463a-96bf-cf634d3e98a5',
532
+ 'event: stop',
533
+ `data: "stop"\n`,
534
+ 'id: chatcmpl-c1f6a6a6-fcf8-463a-96bf-cf634d3e98a5',
535
+ 'event: data',
536
+ `data: {"delta":{},"id":"chatcmpl-c1f6a6a6-fcf8-463a-96bf-cf634d3e98a5","index":0}\n`,
537
+ 'id: chatcmpl-c1f6a6a6-fcf8-463a-96bf-cf634d3e98a5',
538
+ 'event: usage',
539
+ `data: {"inputCacheMissTokens":1797,"inputTextTokens":1797,"outputTextTokens":1720,"totalInputTokens":1797,"totalOutputTokens":1720,"totalTokens":3517}\n`,
540
+ ].map((i) => `${i}\n`),
541
+ );
542
+ });
439
543
  });
440
544
 
441
545
  describe('Tools Calling', () => {
@@ -840,7 +944,7 @@ describe('OpenAIStream', () => {
840
944
  `data: "帮助。"\n`,
841
945
  'id: 1',
842
946
  'event: usage',
843
- `data: {"cachedTokens":0,"inputCacheMissTokens":6,"inputTokens":6,"outputTokens":104,"reasoningTokens":70,"totalTokens":110}\n`,
947
+ `data: {"inputCacheMissTokens":6,"inputTextTokens":6,"outputReasoningTokens":70,"outputTextTokens":34,"totalInputTokens":6,"totalOutputTokens":104,"totalTokens":110}\n`,
844
948
  ].map((i) => `${i}\n`),
845
949
  );
846
950
  });
@@ -1059,7 +1163,7 @@ describe('OpenAIStream', () => {
1059
1163
  `data: "帮助。"\n`,
1060
1164
  'id: 1',
1061
1165
  'event: usage',
1062
- `data: {"cachedTokens":0,"inputCacheMissTokens":6,"inputTokens":6,"outputTokens":104,"reasoningTokens":70,"totalTokens":110}\n`,
1166
+ `data: {"inputCacheMissTokens":6,"inputTextTokens":6,"outputReasoningTokens":70,"outputTextTokens":34,"totalInputTokens":6,"totalOutputTokens":104,"totalTokens":110}\n`,
1063
1167
  ].map((i) => `${i}\n`),
1064
1168
  );
1065
1169
  });
@@ -1260,7 +1364,7 @@ describe('OpenAIStream', () => {
1260
1364
  `data: "帮助。"\n`,
1261
1365
  'id: 1',
1262
1366
  'event: usage',
1263
- `data: {"cachedTokens":0,"inputCacheMissTokens":6,"inputTokens":6,"outputTokens":104,"reasoningTokens":70,"totalTokens":110}\n`,
1367
+ `data: {"inputCacheMissTokens":6,"inputTextTokens":6,"outputReasoningTokens":70,"outputTextTokens":34,"totalInputTokens":6,"totalOutputTokens":104,"totalTokens":110}\n`,
1264
1368
  ].map((i) => `${i}\n`),
1265
1369
  );
1266
1370
  });
@@ -1461,7 +1565,7 @@ describe('OpenAIStream', () => {
1461
1565
  `data: "帮助。"\n`,
1462
1566
  'id: 1',
1463
1567
  'event: usage',
1464
- `data: {"cachedTokens":0,"inputCacheMissTokens":6,"inputTokens":6,"outputTokens":104,"reasoningTokens":70,"totalTokens":110}\n`,
1568
+ `data: {"inputCacheMissTokens":6,"inputTextTokens":6,"outputReasoningTokens":70,"outputTextTokens":34,"totalInputTokens":6,"totalOutputTokens":104,"totalTokens":110}\n`,
1465
1569
  ].map((i) => `${i}\n`),
1466
1570
  );
1467
1571
  });
@@ -1662,7 +1766,7 @@ describe('OpenAIStream', () => {
1662
1766
  `data: "帮助。"\n`,
1663
1767
  'id: 1',
1664
1768
  'event: usage',
1665
- `data: {"cachedTokens":0,"inputCacheMissTokens":6,"inputTokens":6,"outputTokens":104,"reasoningTokens":70,"totalTokens":110}\n`,
1769
+ `data: {"inputCacheMissTokens":6,"inputTextTokens":6,"outputReasoningTokens":70,"outputTextTokens":34,"totalInputTokens":6,"totalOutputTokens":104,"totalTokens":110}\n`,
1666
1770
  ].map((i) => `${i}\n`),
1667
1771
  );
1668
1772
  });
@@ -1,10 +1,11 @@
1
1
  import OpenAI from 'openai';
2
2
  import type { Stream } from 'openai/streaming';
3
3
 
4
- import { ChatMessageError, CitationItem, ModelTokensUsage } from '@/types/message';
4
+ import { ChatMessageError, CitationItem } from '@/types/message';
5
5
 
6
6
  import { AgentRuntimeErrorType, ILobeAgentRuntimeErrorType } from '../../error';
7
7
  import { ChatStreamCallbacks } from '../../types';
8
+ import { convertUsage } from '../usageConverter';
8
9
  import {
9
10
  FIRST_CHUNK_ERROR_KEY,
10
11
  StreamContext,
@@ -18,22 +19,6 @@ import {
18
19
  generateToolCallId,
19
20
  } from './protocol';
20
21
 
21
- const convertUsage = (usage: OpenAI.Completions.CompletionUsage): ModelTokensUsage => {
22
- return {
23
- acceptedPredictionTokens: usage.completion_tokens_details?.accepted_prediction_tokens,
24
- cachedTokens:
25
- (usage as any).prompt_cache_hit_tokens || usage.prompt_tokens_details?.cached_tokens,
26
- inputAudioTokens: usage.prompt_tokens_details?.audio_tokens,
27
- inputCacheMissTokens: (usage as any).prompt_cache_miss_tokens,
28
- inputTokens: usage.prompt_tokens,
29
- outputAudioTokens: usage.completion_tokens_details?.audio_tokens,
30
- outputTokens: usage.completion_tokens,
31
- reasoningTokens: usage.completion_tokens_details?.reasoning_tokens,
32
- rejectedPredictionTokens: usage.completion_tokens_details?.rejected_prediction_tokens,
33
- totalTokens: usage.total_tokens,
34
- };
35
- };
36
-
37
22
  export const transformOpenAIStream = (
38
23
  chunk: OpenAI.ChatCompletionChunk,
39
24
  streamContext: StreamContext,
@@ -193,6 +178,12 @@ export const transformOpenAIStream = (
193
178
  return { data: item.delta, id: chunk.id, type: 'data' };
194
179
  }
195
180
 
181
+ // litellm 的返回结果中,存在 delta 为空,但是有 usage 的情况
182
+ if (chunk.usage) {
183
+ const usage = chunk.usage;
184
+ return { data: convertUsage(usage), id: chunk.id, type: 'usage' };
185
+ }
186
+
196
187
  // 其余情况下,返回 delta 和 index
197
188
  return {
198
189
  data: { delta: item.delta, id: chunk.id, index: item.index },
@@ -0,0 +1,249 @@
1
+ import OpenAI from 'openai';
2
+ import { describe, expect, it } from 'vitest';
3
+
4
+ import { convertUsage } from './usageConverter';
5
+
6
+ describe('convertUsage', () => {
7
+ it('should convert basic OpenAI usage data correctly', () => {
8
+ // Arrange
9
+ const openaiUsage: OpenAI.Completions.CompletionUsage = {
10
+ prompt_tokens: 100,
11
+ completion_tokens: 50,
12
+ total_tokens: 150,
13
+ };
14
+
15
+ // Act
16
+ const result = convertUsage(openaiUsage);
17
+
18
+ // Assert
19
+ expect(result).toEqual({
20
+ inputTextTokens: 100,
21
+ totalInputTokens: 100,
22
+ totalOutputTokens: 50,
23
+ outputTextTokens: 50,
24
+ totalTokens: 150,
25
+ });
26
+ });
27
+
28
+ it('should handle PPLX citation tokens correctly', () => {
29
+ // Arrange
30
+ const pplxUsage = {
31
+ prompt_tokens: 80,
32
+ citation_tokens: 20,
33
+ completion_tokens: 50,
34
+ total_tokens: 150,
35
+ } as OpenAI.Completions.CompletionUsage;
36
+
37
+ // Act
38
+ const result = convertUsage(pplxUsage);
39
+
40
+ // Assert
41
+ expect(result).toEqual({
42
+ inputTextTokens: 80,
43
+ inputCitationTokens: 20,
44
+ totalInputTokens: 100,
45
+ totalOutputTokens: 50,
46
+ outputTextTokens: 50,
47
+ totalTokens: 170, // 150 + 20 (citation tokens)
48
+ });
49
+ });
50
+
51
+ it('should handle cached tokens correctly', () => {
52
+ // Arrange
53
+ const usageWithCache = {
54
+ prompt_tokens: 100,
55
+ prompt_cache_hit_tokens: 30,
56
+ prompt_cache_miss_tokens: 70,
57
+ completion_tokens: 50,
58
+ total_tokens: 150,
59
+ } as OpenAI.Completions.CompletionUsage;
60
+
61
+ // Act
62
+ const result = convertUsage(usageWithCache);
63
+
64
+ // Assert
65
+ expect(result).toEqual({
66
+ inputTextTokens: 100,
67
+ inputCachedTokens: 30,
68
+ inputCacheMissTokens: 70,
69
+ totalInputTokens: 100,
70
+ totalOutputTokens: 50,
71
+ outputTextTokens: 50,
72
+ totalTokens: 150,
73
+ });
74
+ });
75
+
76
+ it('should handle cached tokens using prompt_tokens_details', () => {
77
+ // Arrange
78
+ const usageWithTokenDetails = {
79
+ prompt_tokens: 100,
80
+ prompt_tokens_details: {
81
+ cached_tokens: 30,
82
+ },
83
+ completion_tokens: 50,
84
+ total_tokens: 150,
85
+ } as OpenAI.Completions.CompletionUsage;
86
+
87
+ // Act
88
+ const result = convertUsage(usageWithTokenDetails);
89
+
90
+ // Assert
91
+ expect(result).toEqual({
92
+ inputTextTokens: 100,
93
+ inputCachedTokens: 30,
94
+ inputCacheMissTokens: 70, // 100 - 30
95
+ totalInputTokens: 100,
96
+ totalOutputTokens: 50,
97
+ outputTextTokens: 50,
98
+ totalTokens: 150,
99
+ });
100
+ });
101
+
102
+ it('should handle audio tokens in input correctly', () => {
103
+ // Arrange
104
+ const usageWithAudioInput = {
105
+ prompt_tokens: 100,
106
+ prompt_tokens_details: {
107
+ audio_tokens: 20,
108
+ },
109
+ completion_tokens: 50,
110
+ total_tokens: 150,
111
+ } as OpenAI.Completions.CompletionUsage;
112
+
113
+ // Act
114
+ const result = convertUsage(usageWithAudioInput);
115
+
116
+ // Assert
117
+ expect(result).toEqual({
118
+ inputTextTokens: 100,
119
+ inputAudioTokens: 20,
120
+ totalInputTokens: 100,
121
+ totalOutputTokens: 50,
122
+ outputTextTokens: 50,
123
+ totalTokens: 150,
124
+ });
125
+ });
126
+
127
+ it('should handle detailed output tokens correctly', () => {
128
+ // Arrange
129
+ const usageWithOutputDetails = {
130
+ prompt_tokens: 100,
131
+ completion_tokens: 100,
132
+ completion_tokens_details: {
133
+ reasoning_tokens: 30,
134
+ audio_tokens: 20,
135
+ },
136
+ total_tokens: 200,
137
+ } as OpenAI.Completions.CompletionUsage;
138
+
139
+ // Act
140
+ const result = convertUsage(usageWithOutputDetails);
141
+
142
+ // Assert
143
+ expect(result).toEqual({
144
+ inputTextTokens: 100,
145
+ totalInputTokens: 100,
146
+ totalOutputTokens: 100,
147
+ outputReasoningTokens: 30,
148
+ outputAudioTokens: 20,
149
+ outputTextTokens: 50, // 100 - 30 - 20
150
+ totalTokens: 200,
151
+ });
152
+ });
153
+
154
+ it('should handle prediction tokens correctly', () => {
155
+ // Arrange
156
+ const usageWithPredictions = {
157
+ prompt_tokens: 100,
158
+ completion_tokens: 80,
159
+ completion_tokens_details: {
160
+ accepted_prediction_tokens: 30,
161
+ rejected_prediction_tokens: 10,
162
+ },
163
+ total_tokens: 180,
164
+ } as OpenAI.Completions.CompletionUsage;
165
+
166
+ // Act
167
+ const result = convertUsage(usageWithPredictions);
168
+
169
+ // Assert
170
+ expect(result).toEqual({
171
+ inputTextTokens: 100,
172
+ totalInputTokens: 100,
173
+ totalOutputTokens: 80,
174
+ outputTextTokens: 80,
175
+ acceptedPredictionTokens: 30,
176
+ rejectedPredictionTokens: 10,
177
+ totalTokens: 180,
178
+ });
179
+ });
180
+
181
+ it('should handle complex usage with all fields correctly', () => {
182
+ // Arrange
183
+ const complexUsage = {
184
+ prompt_tokens: 150,
185
+ prompt_tokens_details: {
186
+ audio_tokens: 50,
187
+ cached_tokens: 40,
188
+ },
189
+ citation_tokens: 30,
190
+ completion_tokens: 120,
191
+ completion_tokens_details: {
192
+ reasoning_tokens: 40,
193
+ audio_tokens: 30,
194
+ accepted_prediction_tokens: 20,
195
+ rejected_prediction_tokens: 5,
196
+ },
197
+ total_tokens: 300,
198
+ } as OpenAI.Completions.CompletionUsage;
199
+
200
+ // Act
201
+ const result = convertUsage(complexUsage);
202
+
203
+ // Assert
204
+ expect(result).toEqual({
205
+ inputTextTokens: 150,
206
+ inputAudioTokens: 50,
207
+ inputCachedTokens: 40,
208
+ inputCacheMissTokens: 140, // 180 - 40 (totalInputTokens - cachedTokens)
209
+ inputCitationTokens: 30,
210
+ totalInputTokens: 180, // 150 + 30
211
+ outputTextTokens: 50, // 120 - 40 - 30
212
+ outputReasoningTokens: 40,
213
+ outputAudioTokens: 30,
214
+ totalOutputTokens: 120,
215
+ acceptedPredictionTokens: 20,
216
+ rejectedPredictionTokens: 5,
217
+ totalTokens: 330, // 300 + 30 (citation_tokens)
218
+ });
219
+ });
220
+
221
+ it('should omit zero or undefined values in the final output', () => {
222
+ // Arrange
223
+ const usageWithZeros = {
224
+ prompt_tokens: 100,
225
+ completion_tokens: 50,
226
+ total_tokens: 150,
227
+ completion_tokens_details: {
228
+ reasoning_tokens: 0,
229
+ audio_tokens: undefined,
230
+ },
231
+ } as OpenAI.Completions.CompletionUsage;
232
+
233
+ // Act
234
+ const result = convertUsage(usageWithZeros);
235
+
236
+ // Assert
237
+ expect(result).toEqual({
238
+ inputTextTokens: 100,
239
+ totalInputTokens: 100,
240
+ totalOutputTokens: 50,
241
+ outputTextTokens: 50,
242
+ totalTokens: 150,
243
+ });
244
+
245
+ // These should not be present in the result
246
+ expect(result).not.toHaveProperty('outputReasoningTokens');
247
+ expect(result).not.toHaveProperty('outputAudioTokens');
248
+ });
249
+ });
@@ -0,0 +1,50 @@
1
+ import OpenAI from 'openai';
2
+
3
+ import { ModelTokensUsage } from '@/types/message';
4
+
5
+ export const convertUsage = (usage: OpenAI.Completions.CompletionUsage): ModelTokensUsage => {
6
+ // 目前只有 pplx 才有 citation_tokens
7
+ const inputTextTokens = usage.prompt_tokens || 0;
8
+ const inputCitationTokens = (usage as any).citation_tokens || 0;
9
+ const totalInputTokens = inputCitationTokens + inputTextTokens;
10
+
11
+ const cachedTokens =
12
+ (usage as any).prompt_cache_hit_tokens || usage.prompt_tokens_details?.cached_tokens;
13
+
14
+ const inputCacheMissTokens =
15
+ (usage as any).prompt_cache_miss_tokens || totalInputTokens - cachedTokens;
16
+
17
+ const totalOutputTokens = usage.completion_tokens;
18
+ const outputReasoning = usage.completion_tokens_details?.reasoning_tokens || 0;
19
+ const outputAudioTokens = usage.completion_tokens_details?.audio_tokens || 0;
20
+ const outputTextTokens = totalOutputTokens - outputReasoning - outputAudioTokens;
21
+
22
+ const totalTokens = inputCitationTokens + usage.total_tokens;
23
+
24
+ const data = {
25
+ acceptedPredictionTokens: usage.completion_tokens_details?.accepted_prediction_tokens,
26
+ inputAudioTokens: usage.prompt_tokens_details?.audio_tokens,
27
+ inputCacheMissTokens: inputCacheMissTokens,
28
+ inputCachedTokens: cachedTokens,
29
+ inputCitationTokens: inputCitationTokens,
30
+ inputTextTokens: inputTextTokens,
31
+ outputAudioTokens: outputAudioTokens,
32
+ outputReasoningTokens: outputReasoning,
33
+ outputTextTokens: outputTextTokens,
34
+ rejectedPredictionTokens: usage.completion_tokens_details?.rejected_prediction_tokens,
35
+ totalInputTokens,
36
+ totalOutputTokens: totalOutputTokens,
37
+ totalTokens,
38
+ } satisfies ModelTokensUsage;
39
+
40
+ const finalData = {};
41
+
42
+ Object.entries(data).forEach(([key, value]) => {
43
+ if (!!value) {
44
+ // @ts-ignore
45
+ finalData[key] = value;
46
+ }
47
+ });
48
+
49
+ return finalData;
50
+ };