@lobehub/chat 1.68.9 → 1.68.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. package/CHANGELOG.md +25 -0
  2. package/changelog/v1.json +9 -0
  3. package/docs/usage/providers/ppio.mdx +5 -5
  4. package/docs/usage/providers/ppio.zh-CN.mdx +7 -7
  5. package/locales/ar/chat.json +5 -1
  6. package/locales/ar/models.json +6 -9
  7. package/locales/bg-BG/chat.json +5 -1
  8. package/locales/bg-BG/models.json +6 -9
  9. package/locales/de-DE/chat.json +5 -1
  10. package/locales/de-DE/models.json +6 -9
  11. package/locales/en-US/chat.json +5 -1
  12. package/locales/en-US/models.json +6 -9
  13. package/locales/es-ES/chat.json +5 -1
  14. package/locales/es-ES/models.json +6 -9
  15. package/locales/fa-IR/chat.json +5 -1
  16. package/locales/fa-IR/models.json +6 -9
  17. package/locales/fr-FR/chat.json +5 -1
  18. package/locales/fr-FR/models.json +6 -9
  19. package/locales/it-IT/chat.json +5 -1
  20. package/locales/it-IT/models.json +6 -9
  21. package/locales/ja-JP/chat.json +5 -1
  22. package/locales/ja-JP/models.json +6 -9
  23. package/locales/ko-KR/chat.json +5 -1
  24. package/locales/ko-KR/models.json +6 -9
  25. package/locales/nl-NL/chat.json +5 -1
  26. package/locales/nl-NL/models.json +6 -9
  27. package/locales/pl-PL/chat.json +5 -1
  28. package/locales/pl-PL/models.json +6 -9
  29. package/locales/pt-BR/chat.json +5 -1
  30. package/locales/pt-BR/models.json +6 -9
  31. package/locales/ru-RU/chat.json +5 -1
  32. package/locales/ru-RU/models.json +6 -9
  33. package/locales/tr-TR/chat.json +5 -1
  34. package/locales/tr-TR/models.json +6 -9
  35. package/locales/vi-VN/chat.json +5 -1
  36. package/locales/vi-VN/models.json +6 -9
  37. package/locales/zh-CN/chat.json +5 -1
  38. package/locales/zh-CN/models.json +6 -9
  39. package/locales/zh-TW/chat.json +5 -1
  40. package/locales/zh-TW/models.json +6 -9
  41. package/package.json +1 -1
  42. package/src/config/aiModels/perplexity.ts +36 -20
  43. package/src/config/modelProviders/ppio.ts +1 -1
  44. package/src/features/Conversation/Extras/Usage/UsageDetail/ModelCard.tsx +27 -9
  45. package/src/features/Conversation/Extras/Usage/UsageDetail/index.tsx +77 -35
  46. package/src/features/Conversation/Extras/Usage/UsageDetail/tokens.test.ts +253 -0
  47. package/src/features/Conversation/Extras/Usage/UsageDetail/tokens.ts +65 -46
  48. package/src/libs/agent-runtime/baichuan/index.test.ts +58 -1
  49. package/src/libs/agent-runtime/groq/index.test.ts +36 -284
  50. package/src/libs/agent-runtime/mistral/index.test.ts +39 -300
  51. package/src/libs/agent-runtime/perplexity/index.test.ts +12 -10
  52. package/src/libs/agent-runtime/providerTestUtils.ts +58 -0
  53. package/src/libs/agent-runtime/togetherai/index.test.ts +7 -295
  54. package/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.test.ts +3 -0
  55. package/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.ts +5 -2
  56. package/src/libs/agent-runtime/utils/streams/anthropic.test.ts +89 -5
  57. package/src/libs/agent-runtime/utils/streams/anthropic.ts +25 -8
  58. package/src/libs/agent-runtime/utils/streams/openai.test.ts +188 -84
  59. package/src/libs/agent-runtime/utils/streams/openai.ts +8 -17
  60. package/src/libs/agent-runtime/utils/usageConverter.test.ts +249 -0
  61. package/src/libs/agent-runtime/utils/usageConverter.ts +50 -0
  62. package/src/libs/agent-runtime/zeroone/index.test.ts +7 -294
  63. package/src/locales/default/chat.ts +4 -0
  64. package/src/types/message/base.ts +14 -4
  65. package/src/utils/filter.test.ts +0 -122
  66. package/src/utils/filter.ts +0 -29
@@ -1,6 +1,25 @@
1
1
  import { AIChatModelCard } from '@/types/aiModel';
2
2
 
3
3
  const perplexityChatModels: AIChatModelCard[] = [
4
+ {
5
+ abilities: {
6
+ reasoning: true,
7
+ search: true,
8
+ },
9
+ contextWindowTokens: 127_072,
10
+ description:
11
+ 'Deep Research 进行全面的专家级研究,并将其综合成可访问、可作的报告。',
12
+ displayName: 'Sonar Deep Research',
13
+ enabled: true,
14
+ id: 'sonar-deep-research',
15
+ maxOutput: 8192,
16
+ pricing: { input: 2, output: 8 },
17
+ releasedAt: '2025-02-14',
18
+ settings: {
19
+ searchImpl: 'internal',
20
+ },
21
+ type: 'chat',
22
+ },
4
23
  {
5
24
  abilities: {
6
25
  reasoning: true,
@@ -12,6 +31,8 @@ const perplexityChatModels: AIChatModelCard[] = [
12
31
  enabled: true,
13
32
  id: 'sonar-reasoning-pro',
14
33
  maxOutput: 8192,
34
+ pricing: { input: 2, output: 8 },
35
+ releasedAt: '2025-01-21',
15
36
  settings: {
16
37
  searchImpl: 'internal',
17
38
  },
@@ -28,6 +49,8 @@ const perplexityChatModels: AIChatModelCard[] = [
28
49
  enabled: true,
29
50
  id: 'sonar-reasoning',
30
51
  maxOutput: 8192,
52
+ pricing: { input: 1, output: 5 },
53
+ releasedAt: '2025-01-21',
31
54
  settings: {
32
55
  searchImpl: 'internal',
33
56
  },
@@ -42,6 +65,8 @@ const perplexityChatModels: AIChatModelCard[] = [
42
65
  displayName: 'Sonar Pro',
43
66
  enabled: true,
44
67
  id: 'sonar-pro',
68
+ pricing: { input: 3, output: 15 },
69
+ releasedAt: '2025-01-21',
45
70
  settings: {
46
71
  searchImpl: 'internal',
47
72
  },
@@ -56,34 +81,25 @@ const perplexityChatModels: AIChatModelCard[] = [
56
81
  displayName: 'Sonar',
57
82
  enabled: true,
58
83
  id: 'sonar',
84
+ pricing: { input: 1, output: 1 },
85
+ releasedAt: '2025-01-21',
59
86
  settings: {
60
87
  searchImpl: 'internal',
61
88
  },
62
- type: 'chat',
63
- },
64
- // The following will be deprecated on 02-22
65
- {
66
- contextWindowTokens: 127_072,
67
- description:
68
- 'Llama 3.1 Sonar Small Online 模型,具备8B参数,支持约127,000个标记的上下文长度,专为在线聊天设计,能高效处理各种文本交互。',
69
- displayName: 'Llama 3.1 Sonar Small Online',
70
- id: 'llama-3.1-sonar-small-128k-online',
71
- type: 'chat',
72
- },
73
- {
74
- contextWindowTokens: 127_072,
75
- description:
76
- 'Llama 3.1 Sonar Large Online 模型,具备70B参数,支持约127,000个标记的上下文长度,适用于高容量和多样化聊天任务。',
77
- displayName: 'Llama 3.1 Sonar Large Online',
78
- id: 'llama-3.1-sonar-large-128k-online',
89
+
79
90
  type: 'chat',
80
91
  },
81
92
  {
93
+ abilities: {
94
+ reasoning: true,
95
+ },
82
96
  contextWindowTokens: 127_072,
83
97
  description:
84
- 'Llama 3.1 Sonar Huge Online 模型,具备405B参数,支持约127,000个标记的上下文长度,设计用于复杂的在线聊天应用。',
85
- displayName: 'Llama 3.1 Sonar Huge Online',
86
- id: 'llama-3.1-sonar-huge-128k-online',
98
+ 'R1-1776 DeepSeek R1 模型的一个版本,经过后训练,可提供未经审查、无偏见的事实信息。',
99
+ displayName: 'R1 1776',
100
+ id: 'r1-1776',
101
+ pricing: { input: 2, output: 8 },
102
+ releasedAt: '2025-02-18',
87
103
  type: 'chat',
88
104
  },
89
105
  ];
@@ -243,7 +243,7 @@ const PPIO: ModelProviderCard = {
243
243
  sdkType: 'openai',
244
244
  showModelFetcher: true,
245
245
  },
246
- url: 'https://ppinfra.com/user/register?invited_by=RQIMOC',
246
+ url: 'https://ppinfra.com/user/register?invited_by=RQIMOC&utm_source=github_lobechat',
247
247
  };
248
248
 
249
249
  export default PPIO;
@@ -2,7 +2,7 @@ import { ModelIcon } from '@lobehub/icons';
2
2
  import { Icon, Tooltip } from '@lobehub/ui';
3
3
  import { Segmented } from 'antd';
4
4
  import { createStyles } from 'antd-style';
5
- import { ArrowDownToDot, ArrowUpFromDot, CircleFadingArrowUp } from 'lucide-react';
5
+ import { ArrowDownToDot, ArrowUpFromDot, BookUp2Icon, CircleFadingArrowUp } from 'lucide-react';
6
6
  import { memo } from 'react';
7
7
  import { useTranslation } from 'react-i18next';
8
8
  import { Flexbox } from 'react-layout-kit';
@@ -45,12 +45,16 @@ const ModelCard = memo<ModelCardProps>(({ pricing, id, provider, displayName })
45
45
  pricing?.cachedInput,
46
46
  pricing?.currency as ModelPriceCurrency,
47
47
  );
48
+ const writeCacheInputPrice = formatPriceByCurrency(
49
+ pricing?.writeCacheInput,
50
+ pricing?.currency as ModelPriceCurrency,
51
+ );
48
52
  const outputPrice = formatPriceByCurrency(
49
53
  pricing?.output,
50
54
  pricing?.currency as ModelPriceCurrency,
51
55
  );
52
56
  return (
53
- <>
57
+ <Flexbox gap={8}>
54
58
  <Flexbox
55
59
  align={'center'}
56
60
  className={styles.container}
@@ -91,26 +95,38 @@ const ModelCard = memo<ModelCardProps>(({ pricing, id, provider, displayName })
91
95
  </Flexbox>
92
96
  )}
93
97
  </Flexbox>
94
- {isShowCredit && (
98
+ {isShowCredit ? (
95
99
  <Flexbox horizontal justify={'space-between'}>
96
100
  <div />
97
101
  <Flexbox align={'center'} className={styles.pricing} gap={8} horizontal>
98
102
  {t('messages.modelCard.creditPricing')}:
103
+ {pricing?.cachedInput && (
104
+ <Tooltip
105
+ title={t('messages.modelCard.pricing.inputCachedTokens', {
106
+ amount: cachedInputPrice,
107
+ })}
108
+ >
109
+ <Flexbox gap={2} horizontal>
110
+ <Icon icon={CircleFadingArrowUp} />
111
+ {cachedInputPrice}
112
+ </Flexbox>
113
+ </Tooltip>
114
+ )}
99
115
  <Tooltip title={t('messages.modelCard.pricing.inputTokens', { amount: inputPrice })}>
100
116
  <Flexbox gap={2} horizontal>
101
117
  <Icon icon={ArrowUpFromDot} />
102
118
  {inputPrice}
103
119
  </Flexbox>
104
120
  </Tooltip>
105
- {pricing?.cachedInput && (
121
+ {pricing?.writeCacheInput && (
106
122
  <Tooltip
107
- title={t('messages.modelCard.pricing.inputCachedTokens', {
108
- amount: cachedInputPrice,
123
+ title={t('messages.modelCard.pricing.writeCacheInputTokens', {
124
+ amount: writeCacheInputPrice,
109
125
  })}
110
126
  >
111
127
  <Flexbox gap={2} horizontal>
112
- <Icon icon={CircleFadingArrowUp} />
113
- {cachedInputPrice}
128
+ <Icon icon={BookUp2Icon} />
129
+ {writeCacheInputPrice}
114
130
  </Flexbox>
115
131
  </Tooltip>
116
132
  )}
@@ -122,8 +138,10 @@ const ModelCard = memo<ModelCardProps>(({ pricing, id, provider, displayName })
122
138
  </Tooltip>
123
139
  </Flexbox>
124
140
  </Flexbox>
141
+ ) : (
142
+ <div style={{ height: 18 }} />
125
143
  )}
126
- </>
144
+ </Flexbox>
127
145
  );
128
146
  });
129
147
 
@@ -37,6 +37,12 @@ const TokenDetail = memo<TokenDetailProps>(({ usage, model, provider }) => {
37
37
  title: t('messages.tokenDetails.inputAudio'),
38
38
  value: isShowCredit ? detailTokens.inputAudio.credit : detailTokens.inputAudio.token,
39
39
  },
40
+ !!detailTokens.inputCitation && {
41
+ color: theme.orange,
42
+ id: 'inputText',
43
+ title: t('messages.tokenDetails.inputCitation'),
44
+ value: isShowCredit ? detailTokens.inputCitation.credit : detailTokens.inputCitation.token,
45
+ },
40
46
  !!detailTokens.inputText && {
41
47
  color: theme.green,
42
48
  id: 'inputText',
@@ -46,11 +52,13 @@ const TokenDetail = memo<TokenDetailProps>(({ usage, model, provider }) => {
46
52
  ].filter(Boolean) as TokenProgressItem[];
47
53
 
48
54
  const outputDetails = [
49
- !!detailTokens.reasoning && {
55
+ !!detailTokens.outputReasoning && {
50
56
  color: theme.pink,
51
57
  id: 'reasoning',
52
58
  title: t('messages.tokenDetails.reasoning'),
53
- value: isShowCredit ? detailTokens.reasoning.credit : detailTokens.reasoning.token,
59
+ value: isShowCredit
60
+ ? detailTokens.outputReasoning.credit
61
+ : detailTokens.outputReasoning.token,
54
62
  },
55
63
  !!detailTokens.outputAudio && {
56
64
  color: theme.cyan9,
@@ -67,18 +75,26 @@ const TokenDetail = memo<TokenDetailProps>(({ usage, model, provider }) => {
67
75
  ].filter(Boolean) as TokenProgressItem[];
68
76
 
69
77
  const totalDetail = [
70
- !!detailTokens.uncachedInput && {
78
+ !!detailTokens.inputCacheMiss && {
71
79
  color: theme.colorFill,
72
80
 
73
81
  id: 'uncachedInput',
74
82
  title: t('messages.tokenDetails.inputUncached'),
75
- value: isShowCredit ? detailTokens.uncachedInput.credit : detailTokens.uncachedInput.token,
83
+ value: isShowCredit ? detailTokens.inputCacheMiss.credit : detailTokens.inputCacheMiss.token,
76
84
  },
77
- !!detailTokens.cachedInput && {
85
+ !!detailTokens.inputCached && {
78
86
  color: theme.orange,
79
- id: 'cachedInput',
87
+ id: 'inputCached',
80
88
  title: t('messages.tokenDetails.inputCached'),
81
- value: isShowCredit ? detailTokens.cachedInput.credit : detailTokens.cachedInput.token,
89
+ value: isShowCredit ? detailTokens.inputCached.credit : detailTokens.inputCached.token,
90
+ },
91
+ !!detailTokens.inputCachedWrite && {
92
+ color: theme.yellow,
93
+ id: 'cachedWriteInput',
94
+ title: t('messages.tokenDetails.inputWriteCached'),
95
+ value: isShowCredit
96
+ ? detailTokens.inputCachedWrite.credit
97
+ : detailTokens.inputCachedWrite.token,
82
98
  },
83
99
  !!detailTokens.totalOutput && {
84
100
  color: theme.colorSuccess,
@@ -91,43 +107,69 @@ const TokenDetail = memo<TokenDetailProps>(({ usage, model, provider }) => {
91
107
  const displayTotal =
92
108
  isShowCredit && !!detailTokens.totalTokens
93
109
  ? formatNumber(detailTokens.totalTokens.credit)
94
- : formatNumber(usage.totalTokens);
110
+ : formatNumber(detailTokens.totalTokens!.token);
95
111
 
112
+ const averagePricing = formatNumber(
113
+ detailTokens.totalTokens!.credit / detailTokens.totalTokens!.token,
114
+ 2,
115
+ );
96
116
  return (
97
117
  <Popover
98
118
  arrow={false}
99
119
  content={
100
- <Flexbox gap={20} style={{ minWidth: 200 }}>
120
+ <Flexbox gap={8} style={{ minWidth: 200 }}>
101
121
  {modelCard && <ModelCard {...modelCard} provider={provider} />}
102
- {inputDetails.length > 1 && (
103
- <>
104
- <Flexbox align={'center'} gap={4} horizontal justify={'space-between'} width={'100%'}>
105
- <div style={{ color: theme.colorTextDescription }}>
106
- {t('messages.tokenDetails.inputTitle')}
107
- </div>
122
+
123
+ <Flexbox gap={20}>
124
+ {inputDetails.length > 1 && (
125
+ <Flexbox gap={4}>
126
+ <Flexbox
127
+ align={'center'}
128
+ gap={4}
129
+ horizontal
130
+ justify={'space-between'}
131
+ width={'100%'}
132
+ >
133
+ <div style={{ color: theme.colorTextDescription, fontSize: 12 }}>
134
+ {t('messages.tokenDetails.inputTitle')}
135
+ </div>
136
+ </Flexbox>
137
+ <TokenProgress data={inputDetails} showIcon />
108
138
  </Flexbox>
109
- <TokenProgress data={inputDetails} showIcon />
110
- </>
111
- )}
112
- {outputDetails.length > 1 && (
113
- <>
114
- <Flexbox align={'center'} gap={4} horizontal justify={'space-between'} width={'100%'}>
115
- <div style={{ color: theme.colorTextDescription }}>
116
- {t('messages.tokenDetails.outputTitle')}
139
+ )}
140
+ {outputDetails.length > 1 && (
141
+ <>
142
+ <Flexbox
143
+ align={'center'}
144
+ gap={4}
145
+ horizontal
146
+ justify={'space-between'}
147
+ width={'100%'}
148
+ >
149
+ <div style={{ color: theme.colorTextDescription }}>
150
+ {t('messages.tokenDetails.outputTitle')}
151
+ </div>
152
+ </Flexbox>
153
+ <TokenProgress data={outputDetails} showIcon />
154
+ </>
155
+ )}
156
+ <Flexbox>
157
+ <TokenProgress data={totalDetail} showIcon />
158
+ <Divider style={{ marginBlock: 8 }} />
159
+ <Flexbox align={'center'} gap={4} horizontal justify={'space-between'}>
160
+ <div style={{ color: theme.colorTextSecondary }}>
161
+ {t('messages.tokenDetails.total')}
117
162
  </div>
163
+ <div style={{ fontWeight: 500 }}>{displayTotal}</div>
118
164
  </Flexbox>
119
- <TokenProgress data={outputDetails} showIcon />
120
- </>
121
- )}
122
-
123
- <Flexbox>
124
- <TokenProgress data={totalDetail} showIcon />
125
- <Divider style={{ marginBlock: 8 }} />
126
- <Flexbox align={'center'} gap={4} horizontal justify={'space-between'}>
127
- <div style={{ color: theme.colorTextSecondary }}>
128
- {t('messages.tokenDetails.total')}
129
- </div>
130
- <div style={{ fontWeight: 500 }}>{displayTotal}</div>
165
+ {isShowCredit && (
166
+ <Flexbox align={'center'} gap={4} horizontal justify={'space-between'}>
167
+ <div style={{ color: theme.colorTextSecondary }}>
168
+ {t('messages.tokenDetails.average')}
169
+ </div>
170
+ <div style={{ fontWeight: 500 }}>{averagePricing}</div>
171
+ </Flexbox>
172
+ )}
131
173
  </Flexbox>
132
174
  </Flexbox>
133
175
  </Flexbox>
@@ -0,0 +1,253 @@
1
+ import { describe, expect, it } from 'vitest';
2
+
3
+ import { LobeDefaultAiModelListItem } from '@/types/aiModel';
4
+ import { ModelTokensUsage } from '@/types/message';
5
+
6
+ import { getDetailsToken } from './tokens';
7
+
8
+ describe('getDetailsToken', () => {
9
+ // 基本测试数据
10
+ const mockModelCard: LobeDefaultAiModelListItem = {
11
+ pricing: {
12
+ input: 0.01,
13
+ output: 0.02,
14
+ cachedInput: 0.005,
15
+ audioInput: 0.03,
16
+ audioOutput: 0.04,
17
+ },
18
+ } as LobeDefaultAiModelListItem;
19
+
20
+ it('should return empty object when usage is empty', () => {
21
+ const usage: ModelTokensUsage = {};
22
+ const result = getDetailsToken(usage);
23
+
24
+ expect(result).toEqual({
25
+ cachedInput: undefined,
26
+ inputAudio: undefined,
27
+ inputCitation: undefined,
28
+ inputText: undefined,
29
+ outputAudio: undefined,
30
+ outputText: undefined,
31
+ reasoning: undefined,
32
+ totalOutput: undefined,
33
+ totalTokens: undefined,
34
+ uncachedInput: undefined,
35
+ });
36
+ });
37
+
38
+ it('should handle inputTextTokens correctly', () => {
39
+ const usage: ModelTokensUsage = {
40
+ inputTextTokens: 100,
41
+ };
42
+
43
+ const result = getDetailsToken(usage, mockModelCard);
44
+
45
+ expect(result.inputText).toEqual({
46
+ credit: 1, // 100 * 0.01 = 1
47
+ token: 100,
48
+ });
49
+ });
50
+
51
+ it('should handle legacy inputTokens property', () => {
52
+ const usage = {
53
+ inputTokens: 100,
54
+ } as any;
55
+
56
+ const result = getDetailsToken(usage, mockModelCard);
57
+
58
+ expect(result.inputText).toEqual({
59
+ credit: 1, // 100 * 0.01 = 1
60
+ token: 100,
61
+ });
62
+ });
63
+
64
+ it('should handle cachedTokens correctly', () => {
65
+ const usage = {
66
+ totalInputTokens: 200,
67
+ cachedTokens: 50,
68
+ } as ModelTokensUsage;
69
+
70
+ const result = getDetailsToken(usage, mockModelCard);
71
+
72
+ expect(result.inputCached).toEqual({
73
+ credit: 0, // 50 * 0.005 = 0.25, rounded to 0
74
+ token: 50,
75
+ });
76
+
77
+ expect(result.inputCacheMiss).toEqual({
78
+ credit: 2, // (200 - 50) * 0.01 = 1.5, rounded to 2
79
+ token: 150,
80
+ });
81
+ });
82
+
83
+ it('should handle outputTokens correctly', () => {
84
+ const usage = { outputTokens: 150 } as ModelTokensUsage;
85
+
86
+ const result = getDetailsToken(usage, mockModelCard);
87
+
88
+ expect(result.outputText).toEqual({
89
+ credit: 3, // 150 * 0.02 = 3
90
+ token: 150,
91
+ });
92
+
93
+ expect(result.totalOutput).toEqual({
94
+ credit: 3,
95
+ token: 150,
96
+ });
97
+ });
98
+
99
+ it('should handle reasoningTokens correctly', () => {
100
+ const usage = {
101
+ outputTokens: 200,
102
+ reasoningTokens: 50,
103
+ } as ModelTokensUsage;
104
+
105
+ const result = getDetailsToken(usage, mockModelCard);
106
+
107
+ expect(result.outputReasoning).toEqual({
108
+ credit: 1, // 50 * 0.02 = 1
109
+ token: 50,
110
+ });
111
+
112
+ expect(result.outputText).toEqual({
113
+ credit: 3, // (200 - 50) * 0.02 = 3
114
+ token: 150,
115
+ });
116
+ });
117
+
118
+ it('should handle audio tokens correctly', () => {
119
+ const usage = {
120
+ inputAudioTokens: 100,
121
+ outputAudioTokens: 50,
122
+ outputTokens: 150,
123
+ } as ModelTokensUsage;
124
+
125
+ const result = getDetailsToken(usage, mockModelCard);
126
+
127
+ expect(result.inputAudio).toEqual({
128
+ credit: 3, // 100 * 0.03 = 3
129
+ token: 100,
130
+ });
131
+
132
+ expect(result.outputAudio).toEqual({
133
+ credit: 2, // 50 * 0.04 = 2
134
+ id: 'outputAudio',
135
+ token: 50,
136
+ });
137
+
138
+ expect(result.outputText).toEqual({
139
+ credit: 2, // (150 - 50) * 0.02 = 2
140
+ token: 100,
141
+ });
142
+ });
143
+
144
+ it('should handle inputCitationTokens correctly', () => {
145
+ const usage: ModelTokensUsage = {
146
+ inputCitationTokens: 75,
147
+ };
148
+
149
+ const result = getDetailsToken(usage, mockModelCard);
150
+
151
+ expect(result.inputCitation).toEqual({
152
+ credit: 1, // 75 * 0.01 = 0.75, rounded to 1
153
+ token: 75,
154
+ });
155
+ });
156
+
157
+ it('should handle totalTokens correctly', () => {
158
+ const usage = {
159
+ totalTokens: 500,
160
+ totalInputTokens: 200,
161
+ inputCachedTokens: 50,
162
+ outputTokens: 300,
163
+ } as ModelTokensUsage;
164
+
165
+ const result = getDetailsToken(usage, mockModelCard);
166
+
167
+ // uncachedInput: (200 - 50) * 0.01 = 1.5 -> 2
168
+ // cachedInput: 50 * 0.005 = 0.25 -> 0
169
+ // totalOutput: 300 * 0.02 = 6
170
+ // totalCredit = 2 + 0 + 6 = 8
171
+
172
+ expect(result.totalTokens).toEqual({
173
+ credit: 8,
174
+ token: 500,
175
+ });
176
+ });
177
+
178
+ it('should handle missing pricing information', () => {
179
+ const usage = { inputTextTokens: 100, outputTokens: 200 } as ModelTokensUsage;
180
+
181
+ const result = getDetailsToken(usage);
182
+
183
+ expect(result.inputText).toEqual({
184
+ credit: '-',
185
+ token: 100,
186
+ });
187
+
188
+ expect(result.outputText).toEqual({
189
+ credit: '-',
190
+ token: 200,
191
+ });
192
+ });
193
+
194
+ it('should handle complex scenario with all token types', () => {
195
+ const usage: ModelTokensUsage = {
196
+ totalTokens: 1000,
197
+ totalInputTokens: 400,
198
+ inputTextTokens: 300,
199
+ inputAudioTokens: 50,
200
+ inputCitationTokens: 50,
201
+ inputCachedTokens: 100,
202
+ totalOutputTokens: 600,
203
+ outputAudioTokens: 100,
204
+ outputReasoningTokens: 200,
205
+ };
206
+
207
+ const result = getDetailsToken(usage, mockModelCard);
208
+
209
+ expect(result).toMatchObject({
210
+ inputCached: {
211
+ credit: 1, // 100 * 0.005 = 0.5, rounded to 1
212
+ token: 100,
213
+ },
214
+ inputCacheMiss: {
215
+ credit: 3, // (400 - 100) * 0.01 = 3
216
+ token: 300,
217
+ },
218
+ inputText: {
219
+ credit: 3, // 300 * 0.01 = 3
220
+ token: 300,
221
+ },
222
+ inputAudio: {
223
+ credit: 2, // 50 * 0.03 = 1.5, rounded to 2
224
+ token: 50,
225
+ },
226
+ inputCitation: {
227
+ credit: 1, // 50 * 0.01 = 0.5, rounded to 1
228
+ token: 50,
229
+ },
230
+ outputAudio: {
231
+ credit: 4, // 100 * 0.04 = 4
232
+ id: 'outputAudio',
233
+ token: 100,
234
+ },
235
+ outputReasoning: {
236
+ credit: 4, // 200 * 0.02 = 4
237
+ token: 200,
238
+ },
239
+ outputText: {
240
+ credit: 6, // (600 - 200 - 100) * 0.02 = 6
241
+ token: 300,
242
+ },
243
+ totalOutput: {
244
+ credit: 12, // 600 * 0.02 = 12
245
+ token: 600,
246
+ },
247
+ totalTokens: {
248
+ credit: 16, // 3 + 1 + 12 = 16
249
+ token: 1000,
250
+ },
251
+ });
252
+ });
253
+ });