@lobehub/chat 1.133.1 → 1.133.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (130) hide show
  1. package/.cursor/rules/project-introduce.mdc +19 -25
  2. package/.cursor/rules/project-structure.mdc +102 -221
  3. package/.cursor/rules/{rules-attach.mdc → rules-index.mdc} +2 -11
  4. package/.cursor/rules/typescript.mdc +3 -53
  5. package/.vscode/settings.json +2 -1
  6. package/AGENTS.md +33 -54
  7. package/CHANGELOG.md +58 -0
  8. package/CLAUDE.md +1 -26
  9. package/changelog/v1.json +21 -0
  10. package/locales/ar/chat.json +5 -0
  11. package/locales/ar/image.json +7 -0
  12. package/locales/ar/models.json +2 -2
  13. package/locales/bg-BG/chat.json +5 -0
  14. package/locales/bg-BG/image.json +7 -0
  15. package/locales/de-DE/chat.json +5 -0
  16. package/locales/de-DE/image.json +7 -0
  17. package/locales/en-US/chat.json +5 -0
  18. package/locales/en-US/image.json +7 -0
  19. package/locales/es-ES/chat.json +5 -0
  20. package/locales/es-ES/image.json +7 -0
  21. package/locales/es-ES/tool.json +1 -1
  22. package/locales/fa-IR/chat.json +5 -0
  23. package/locales/fa-IR/image.json +7 -0
  24. package/locales/fa-IR/models.json +2 -2
  25. package/locales/fr-FR/chat.json +5 -0
  26. package/locales/fr-FR/image.json +7 -0
  27. package/locales/fr-FR/models.json +2 -2
  28. package/locales/it-IT/chat.json +5 -0
  29. package/locales/it-IT/image.json +7 -0
  30. package/locales/ja-JP/chat.json +5 -0
  31. package/locales/ja-JP/image.json +7 -0
  32. package/locales/ko-KR/chat.json +5 -0
  33. package/locales/ko-KR/image.json +7 -0
  34. package/locales/nl-NL/chat.json +5 -0
  35. package/locales/nl-NL/image.json +7 -0
  36. package/locales/pl-PL/chat.json +5 -0
  37. package/locales/pl-PL/image.json +7 -0
  38. package/locales/pt-BR/chat.json +5 -0
  39. package/locales/pt-BR/image.json +7 -0
  40. package/locales/ru-RU/chat.json +5 -0
  41. package/locales/ru-RU/image.json +7 -0
  42. package/locales/ru-RU/tool.json +1 -1
  43. package/locales/tr-TR/chat.json +5 -0
  44. package/locales/tr-TR/image.json +7 -0
  45. package/locales/tr-TR/models.json +2 -2
  46. package/locales/vi-VN/chat.json +5 -0
  47. package/locales/vi-VN/image.json +7 -0
  48. package/locales/zh-CN/chat.json +5 -0
  49. package/locales/zh-CN/image.json +7 -0
  50. package/locales/zh-TW/chat.json +5 -0
  51. package/locales/zh-TW/image.json +7 -0
  52. package/package.json +4 -5
  53. package/packages/const/package.json +4 -0
  54. package/packages/const/src/currency.ts +2 -0
  55. package/packages/const/src/index.ts +1 -0
  56. package/packages/model-bank/package.json +2 -1
  57. package/packages/model-bank/src/aiModels/google.ts +6 -0
  58. package/packages/model-bank/src/aiModels/openai.ts +6 -22
  59. package/packages/model-bank/src/standard-parameters/index.ts +56 -46
  60. package/packages/model-runtime/package.json +1 -0
  61. package/packages/model-runtime/src/core/RouterRuntime/createRuntime.ts +4 -2
  62. package/packages/model-runtime/src/core/openaiCompatibleFactory/createImage.ts +12 -2
  63. package/packages/model-runtime/src/core/openaiCompatibleFactory/index.ts +16 -5
  64. package/packages/model-runtime/src/core/streams/anthropic.ts +25 -36
  65. package/packages/model-runtime/src/core/streams/google/google-ai.test.ts +1 -1
  66. package/packages/model-runtime/src/core/streams/google/index.ts +18 -42
  67. package/packages/model-runtime/src/core/streams/openai/openai.test.ts +7 -10
  68. package/packages/model-runtime/src/core/streams/openai/openai.ts +14 -11
  69. package/packages/model-runtime/src/core/streams/openai/responsesStream.ts +11 -5
  70. package/packages/model-runtime/src/core/streams/protocol.ts +25 -6
  71. package/packages/model-runtime/src/core/streams/qwen.ts +2 -2
  72. package/packages/model-runtime/src/core/streams/spark.ts +3 -3
  73. package/packages/model-runtime/src/core/streams/vertex-ai.test.ts +2 -2
  74. package/packages/model-runtime/src/core/streams/vertex-ai.ts +14 -23
  75. package/packages/model-runtime/src/core/usageConverters/anthropic.test.ts +99 -0
  76. package/packages/model-runtime/src/core/usageConverters/anthropic.ts +73 -0
  77. package/packages/model-runtime/src/core/usageConverters/google-ai.test.ts +88 -0
  78. package/packages/model-runtime/src/core/usageConverters/google-ai.ts +55 -0
  79. package/packages/model-runtime/src/core/usageConverters/index.ts +4 -0
  80. package/packages/model-runtime/src/core/usageConverters/openai.test.ts +429 -0
  81. package/packages/model-runtime/src/core/usageConverters/openai.ts +152 -0
  82. package/packages/model-runtime/src/core/usageConverters/utils/computeChatCost.test.ts +455 -0
  83. package/packages/model-runtime/src/core/usageConverters/utils/computeChatCost.ts +293 -0
  84. package/packages/model-runtime/src/core/usageConverters/utils/computeImageCost.test.ts +47 -0
  85. package/packages/model-runtime/src/core/usageConverters/utils/computeImageCost.ts +121 -0
  86. package/packages/model-runtime/src/core/usageConverters/utils/index.ts +11 -0
  87. package/packages/model-runtime/src/core/usageConverters/utils/withUsageCost.ts +19 -0
  88. package/packages/model-runtime/src/index.ts +2 -0
  89. package/packages/model-runtime/src/providers/anthropic/index.ts +48 -1
  90. package/packages/model-runtime/src/providers/google/createImage.ts +11 -2
  91. package/packages/model-runtime/src/providers/google/index.ts +8 -1
  92. package/packages/model-runtime/src/providers/openai/__snapshots__/index.test.ts.snap +7 -0
  93. package/packages/model-runtime/src/providers/zhipu/index.ts +3 -1
  94. package/packages/model-runtime/src/types/chat.ts +5 -3
  95. package/packages/model-runtime/src/types/image.ts +20 -9
  96. package/packages/model-runtime/src/utils/getModelPricing.ts +36 -0
  97. package/packages/obervability-otel/package.json +2 -2
  98. package/packages/ssrf-safe-fetch/index.test.ts +343 -0
  99. package/packages/ssrf-safe-fetch/index.ts +37 -0
  100. package/packages/ssrf-safe-fetch/package.json +17 -0
  101. package/packages/ssrf-safe-fetch/vitest.config.mts +10 -0
  102. package/packages/types/src/message/base.ts +43 -17
  103. package/packages/utils/src/client/apiKeyManager.test.ts +70 -0
  104. package/packages/utils/src/client/apiKeyManager.ts +41 -0
  105. package/packages/utils/src/client/index.ts +2 -0
  106. package/packages/utils/src/fetch/fetchSSE.ts +4 -4
  107. package/packages/utils/src/index.ts +1 -0
  108. package/packages/utils/src/toolManifest.ts +2 -1
  109. package/src/app/(backend)/webapi/proxy/route.ts +2 -13
  110. package/src/app/[variants]/(main)/chat/(workspace)/@conversation/default.tsx +2 -0
  111. package/src/app/[variants]/(main)/chat/(workspace)/@conversation/features/ChatMinimap/index.tsx +335 -0
  112. package/src/app/[variants]/(main)/chat/(workspace)/_layout/Desktop/TopicPanel.tsx +4 -0
  113. package/src/app/[variants]/(main)/image/@menu/features/ConfigPanel/components/QualitySelect.tsx +23 -0
  114. package/src/app/[variants]/(main)/image/@menu/features/ConfigPanel/index.tsx +9 -0
  115. package/src/features/Conversation/Extras/Usage/UsageDetail/tokens.test.ts +13 -13
  116. package/src/features/Conversation/Extras/Usage/UsageDetail/tokens.ts +1 -1
  117. package/src/features/Conversation/components/ChatItem/index.tsx +56 -2
  118. package/src/features/Conversation/components/VirtualizedList/VirtuosoContext.ts +88 -0
  119. package/src/features/Conversation/components/VirtualizedList/index.tsx +15 -1
  120. package/src/locales/default/chat.ts +5 -0
  121. package/src/locales/default/image.ts +7 -0
  122. package/src/server/modules/EdgeConfig/index.ts +1 -1
  123. package/src/server/routers/async/image.ts +9 -1
  124. package/src/services/_auth.ts +12 -12
  125. package/src/services/chat/contextEngineering.ts +2 -3
  126. package/.cursor/rules/backend-architecture.mdc +0 -176
  127. package/.cursor/rules/code-review.mdc +0 -58
  128. package/.cursor/rules/cursor-ux.mdc +0 -32
  129. package/.cursor/rules/define-database-model.mdc +0 -8
  130. package/.cursor/rules/system-role.mdc +0 -31
@@ -0,0 +1,88 @@
1
+ import { GenerateContentResponseUsageMetadata, MediaModality } from '@google/genai';
2
+ import type { Pricing } from 'model-bank';
3
+ import { describe, expect, it } from 'vitest';
4
+
5
+ import { convertGoogleAIUsage } from './google-ai';
6
+
7
+ describe('convertGoogleAIUsage', () => {
8
+ it('should convert usage details with text and image breakdown', () => {
9
+ const usage: GenerateContentResponseUsageMetadata = {
10
+ cachedContentTokenCount: 6,
11
+ candidatesTokenCount: 40,
12
+ candidatesTokensDetails: [
13
+ { modality: MediaModality.TEXT, tokenCount: 30 },
14
+ { modality: MediaModality.IMAGE, tokenCount: 10 },
15
+ ],
16
+ promptTokenCount: 70,
17
+ promptTokensDetails: [
18
+ { modality: MediaModality.TEXT, tokenCount: 60 },
19
+ { modality: MediaModality.IMAGE, tokenCount: 5 },
20
+ ],
21
+ thoughtsTokenCount: 12,
22
+ totalTokenCount: 122,
23
+ };
24
+
25
+ const result = convertGoogleAIUsage(usage);
26
+
27
+ expect(result).toEqual({
28
+ inputAudioTokens: undefined,
29
+ inputCacheMissTokens: 64,
30
+ inputCachedTokens: 6,
31
+ inputImageTokens: 5,
32
+ inputTextTokens: 60,
33
+ outputImageTokens: 10,
34
+ outputReasoningTokens: 12,
35
+ outputTextTokens: 30,
36
+ totalInputTokens: 70,
37
+ totalOutputTokens: 52,
38
+ totalTokens: 122,
39
+ });
40
+ });
41
+
42
+ it('should fall back to total tokens when text modality missing', () => {
43
+ const usage: GenerateContentResponseUsageMetadata = {
44
+ cachedContentTokenCount: undefined,
45
+ candidatesTokenCount: 55,
46
+ candidatesTokensDetails: [{ modality: MediaModality.IMAGE, tokenCount: 15 }],
47
+ promptTokenCount: 40,
48
+ promptTokensDetails: [{ modality: MediaModality.IMAGE, tokenCount: 3 }],
49
+ thoughtsTokenCount: 5,
50
+ totalTokenCount: 100,
51
+ };
52
+
53
+ const result = convertGoogleAIUsage(usage);
54
+
55
+ expect(result).toEqual({
56
+ inputAudioTokens: undefined,
57
+ inputCacheMissTokens: undefined,
58
+ inputCachedTokens: undefined,
59
+ inputImageTokens: 3,
60
+ inputTextTokens: undefined,
61
+ outputImageTokens: 15,
62
+ outputReasoningTokens: 5,
63
+ outputTextTokens: 40,
64
+ totalInputTokens: 40,
65
+ totalOutputTokens: 60,
66
+ totalTokens: 100,
67
+ });
68
+ });
69
+
70
+ it('should attach cost when pricing provided', () => {
71
+ const usage: GenerateContentResponseUsageMetadata = {
72
+ candidatesTokenCount: 100_000,
73
+ promptTokenCount: 200_000,
74
+ totalTokenCount: 300_000,
75
+ };
76
+
77
+ const pricing: Pricing = {
78
+ units: [
79
+ { name: 'textInput', rate: 1, strategy: 'fixed', unit: 'millionTokens' },
80
+ { name: 'textOutput', rate: 2, strategy: 'fixed', unit: 'millionTokens' },
81
+ ],
82
+ };
83
+
84
+ const result = convertGoogleAIUsage(usage, pricing);
85
+
86
+ expect(result.cost).toBeCloseTo(0.4, 10);
87
+ });
88
+ });
@@ -0,0 +1,55 @@
1
+ import {
2
+ GenerateContentResponseUsageMetadata,
3
+ MediaModality,
4
+ ModalityTokenCount,
5
+ } from '@google/genai';
6
+ import type { Pricing } from 'model-bank';
7
+
8
+ import { ModelUsage } from '@/types/message';
9
+
10
+ import { withUsageCost } from './utils/withUsageCost';
11
+
12
+ const getTokenCount = (details: ModalityTokenCount[] | undefined, modality: MediaModality) => {
13
+ return details?.find((detail) => detail?.modality === modality)?.tokenCount;
14
+ };
15
+
16
+ export const convertGoogleAIUsage = (
17
+ usage: GenerateContentResponseUsageMetadata,
18
+ pricing?: Pricing,
19
+ ): ModelUsage => {
20
+ const inputCacheMissTokens =
21
+ usage.promptTokenCount && usage.cachedContentTokenCount
22
+ ? usage.promptTokenCount - usage.cachedContentTokenCount
23
+ : undefined;
24
+
25
+ const reasoningTokens = usage.thoughtsTokenCount;
26
+ const candidatesDetails = usage.candidatesTokensDetails;
27
+ const totalCandidatesTokens =
28
+ usage.candidatesTokenCount ??
29
+ candidatesDetails?.reduce((sum, detail) => sum + (detail?.tokenCount ?? 0), 0) ??
30
+ 0;
31
+
32
+ const outputImageTokens = getTokenCount(candidatesDetails, MediaModality.IMAGE) ?? 0;
33
+ const textTokensFromDetails = getTokenCount(candidatesDetails, MediaModality.TEXT);
34
+ const outputTextTokens =
35
+ typeof textTokensFromDetails === 'number' && textTokensFromDetails > 0
36
+ ? textTokensFromDetails
37
+ : Math.max(0, totalCandidatesTokens - outputImageTokens);
38
+ const totalOutputTokens = totalCandidatesTokens + (reasoningTokens ?? 0);
39
+
40
+ const normalizedUsage = {
41
+ inputAudioTokens: getTokenCount(usage.promptTokensDetails, MediaModality.AUDIO),
42
+ inputCacheMissTokens,
43
+ inputCachedTokens: usage.cachedContentTokenCount,
44
+ inputImageTokens: getTokenCount(usage.promptTokensDetails, MediaModality.IMAGE),
45
+ inputTextTokens: getTokenCount(usage.promptTokensDetails, MediaModality.TEXT),
46
+ outputImageTokens,
47
+ outputReasoningTokens: reasoningTokens,
48
+ outputTextTokens,
49
+ totalInputTokens: usage.promptTokenCount,
50
+ totalOutputTokens,
51
+ totalTokens: usage.totalTokenCount,
52
+ } satisfies ModelUsage;
53
+
54
+ return withUsageCost(normalizedUsage, pricing);
55
+ };
@@ -0,0 +1,4 @@
1
+ export { convertAnthropicUsage } from './anthropic';
2
+ export { convertGoogleAIUsage } from './google-ai';
3
+ export { convertOpenAIResponseUsage, convertOpenAIUsage } from './openai';
4
+ export { computeImageCost } from './utils/computeImageCost';
@@ -0,0 +1,429 @@
1
+ import type { Pricing } from 'model-bank';
2
+ import OpenAI from 'openai';
3
+ import { describe, expect, it } from 'vitest';
4
+
5
+ import { convertOpenAIImageUsage, convertOpenAIResponseUsage, convertOpenAIUsage } from './openai';
6
+
7
+ describe('convertUsage', () => {
8
+ it('should convert basic OpenAI usage data correctly', () => {
9
+ // Arrange
10
+ const openaiUsage: OpenAI.Completions.CompletionUsage = {
11
+ prompt_tokens: 100,
12
+ completion_tokens: 50,
13
+ total_tokens: 150,
14
+ };
15
+
16
+ // Act
17
+ const result = convertOpenAIUsage(openaiUsage);
18
+
19
+ // Assert
20
+ expect(result).toEqual({
21
+ inputTextTokens: 100,
22
+ totalInputTokens: 100,
23
+ totalOutputTokens: 50,
24
+ outputTextTokens: 50,
25
+ totalTokens: 150,
26
+ });
27
+ });
28
+
29
+ it('should handle PPLX citation tokens correctly', () => {
30
+ // Arrange
31
+ const pplxUsage = {
32
+ prompt_tokens: 80,
33
+ citation_tokens: 20,
34
+ completion_tokens: 50,
35
+ total_tokens: 150,
36
+ } as OpenAI.Completions.CompletionUsage;
37
+
38
+ // Act
39
+ const result = convertOpenAIUsage(pplxUsage);
40
+
41
+ // Assert
42
+ expect(result).toEqual({
43
+ inputTextTokens: 80,
44
+ inputCitationTokens: 20,
45
+ totalInputTokens: 100,
46
+ totalOutputTokens: 50,
47
+ outputTextTokens: 50,
48
+ totalTokens: 170, // 150 + 20 (citation tokens)
49
+ });
50
+ });
51
+
52
+ it('should handle cached tokens correctly', () => {
53
+ // Arrange
54
+ const usageWithCache = {
55
+ prompt_tokens: 100,
56
+ prompt_cache_hit_tokens: 30,
57
+ prompt_cache_miss_tokens: 70,
58
+ completion_tokens: 50,
59
+ total_tokens: 150,
60
+ } as OpenAI.Completions.CompletionUsage;
61
+
62
+ // Act
63
+ const result = convertOpenAIUsage(usageWithCache);
64
+
65
+ // Assert
66
+ expect(result).toEqual({
67
+ inputTextTokens: 100,
68
+ inputCachedTokens: 30,
69
+ inputCacheMissTokens: 70,
70
+ totalInputTokens: 100,
71
+ totalOutputTokens: 50,
72
+ outputTextTokens: 50,
73
+ totalTokens: 150,
74
+ });
75
+ });
76
+
77
+ it('should handle cached tokens using prompt_tokens_details', () => {
78
+ // Arrange
79
+ const usageWithTokenDetails = {
80
+ prompt_tokens: 100,
81
+ prompt_tokens_details: {
82
+ cached_tokens: 30,
83
+ },
84
+ completion_tokens: 50,
85
+ total_tokens: 150,
86
+ } as OpenAI.Completions.CompletionUsage;
87
+
88
+ // Act
89
+ const result = convertOpenAIUsage(usageWithTokenDetails);
90
+
91
+ // Assert
92
+ expect(result).toEqual({
93
+ inputTextTokens: 100,
94
+ inputCachedTokens: 30,
95
+ inputCacheMissTokens: 70, // 100 - 30
96
+ totalInputTokens: 100,
97
+ totalOutputTokens: 50,
98
+ outputTextTokens: 50,
99
+ totalTokens: 150,
100
+ });
101
+ });
102
+
103
+ it('should handle audio tokens in input correctly', () => {
104
+ // Arrange
105
+ const usageWithAudioInput = {
106
+ prompt_tokens: 100,
107
+ prompt_tokens_details: {
108
+ audio_tokens: 20,
109
+ },
110
+ completion_tokens: 50,
111
+ total_tokens: 150,
112
+ } as OpenAI.Completions.CompletionUsage;
113
+
114
+ // Act
115
+ const result = convertOpenAIUsage(usageWithAudioInput);
116
+
117
+ // Assert
118
+ expect(result).toEqual({
119
+ inputTextTokens: 100,
120
+ inputAudioTokens: 20,
121
+ totalInputTokens: 100,
122
+ totalOutputTokens: 50,
123
+ outputTextTokens: 50,
124
+ totalTokens: 150,
125
+ });
126
+ });
127
+
128
+ it('should handle detailed output tokens correctly', () => {
129
+ // Arrange
130
+ const usageWithOutputDetails = {
131
+ prompt_tokens: 100,
132
+ completion_tokens: 100,
133
+ completion_tokens_details: {
134
+ reasoning_tokens: 30,
135
+ audio_tokens: 20,
136
+ },
137
+ total_tokens: 200,
138
+ } as OpenAI.Completions.CompletionUsage;
139
+
140
+ // Act
141
+ const result = convertOpenAIUsage(usageWithOutputDetails);
142
+
143
+ // Assert
144
+ expect(result).toEqual({
145
+ inputTextTokens: 100,
146
+ totalInputTokens: 100,
147
+ totalOutputTokens: 100,
148
+ outputReasoningTokens: 30,
149
+ outputAudioTokens: 20,
150
+ outputTextTokens: 50, // 100 - 30 - 20
151
+ totalTokens: 200,
152
+ });
153
+ });
154
+
155
+ it('should handle prediction tokens correctly', () => {
156
+ // Arrange
157
+ const usageWithPredictions = {
158
+ prompt_tokens: 100,
159
+ completion_tokens: 80,
160
+ completion_tokens_details: {
161
+ accepted_prediction_tokens: 30,
162
+ rejected_prediction_tokens: 10,
163
+ },
164
+ total_tokens: 180,
165
+ } as OpenAI.Completions.CompletionUsage;
166
+
167
+ // Act
168
+ const result = convertOpenAIUsage(usageWithPredictions);
169
+
170
+ // Assert
171
+ expect(result).toEqual({
172
+ inputTextTokens: 100,
173
+ totalInputTokens: 100,
174
+ totalOutputTokens: 80,
175
+ outputTextTokens: 80,
176
+ acceptedPredictionTokens: 30,
177
+ rejectedPredictionTokens: 10,
178
+ totalTokens: 180,
179
+ });
180
+ });
181
+
182
+ it('should handle complex usage with all fields correctly', () => {
183
+ // Arrange
184
+ const complexUsage = {
185
+ prompt_tokens: 150,
186
+ prompt_tokens_details: {
187
+ audio_tokens: 50,
188
+ cached_tokens: 40,
189
+ },
190
+ citation_tokens: 30,
191
+ completion_tokens: 120,
192
+ completion_tokens_details: {
193
+ reasoning_tokens: 40,
194
+ audio_tokens: 30,
195
+ accepted_prediction_tokens: 20,
196
+ rejected_prediction_tokens: 5,
197
+ },
198
+ total_tokens: 300,
199
+ } as OpenAI.Completions.CompletionUsage;
200
+
201
+ // Act
202
+ const result = convertOpenAIUsage(complexUsage);
203
+
204
+ // Assert
205
+ expect(result).toEqual({
206
+ inputTextTokens: 150,
207
+ inputAudioTokens: 50,
208
+ inputCachedTokens: 40,
209
+ inputCacheMissTokens: 140, // 180 - 40 (totalInputTokens - cachedTokens)
210
+ inputCitationTokens: 30,
211
+ totalInputTokens: 180, // 150 + 30
212
+ outputTextTokens: 50, // 120 - 40 - 30
213
+ outputReasoningTokens: 40,
214
+ outputAudioTokens: 30,
215
+ totalOutputTokens: 120,
216
+ acceptedPredictionTokens: 20,
217
+ rejectedPredictionTokens: 5,
218
+ totalTokens: 330, // 300 + 30 (citation_tokens)
219
+ });
220
+ });
221
+
222
+ it('should omit zero or undefined values in the final output', () => {
223
+ // Arrange
224
+ const usageWithZeros = {
225
+ prompt_tokens: 100,
226
+ completion_tokens: 50,
227
+ total_tokens: 150,
228
+ completion_tokens_details: {
229
+ reasoning_tokens: 0,
230
+ audio_tokens: undefined,
231
+ },
232
+ } as OpenAI.Completions.CompletionUsage;
233
+
234
+ // Act
235
+ const result = convertOpenAIUsage(usageWithZeros);
236
+
237
+ // Assert
238
+ expect(result).toEqual({
239
+ inputTextTokens: 100,
240
+ totalInputTokens: 100,
241
+ totalOutputTokens: 50,
242
+ outputTextTokens: 50,
243
+ totalTokens: 150,
244
+ });
245
+
246
+ // These should not be present in the result
247
+ expect(result).not.toHaveProperty('outputReasoningTokens');
248
+ expect(result).not.toHaveProperty('outputAudioTokens');
249
+ });
250
+
251
+ it('should handle XAI provider correctly where completion_tokens does not include reasoning_tokens', () => {
252
+ // Arrange
253
+ const xaiUsage: OpenAI.Completions.CompletionUsage = {
254
+ prompt_tokens: 6103,
255
+ completion_tokens: 66, // 这个不包含 reasoning_tokens
256
+ total_tokens: 6550,
257
+ prompt_tokens_details: {
258
+ audio_tokens: 0,
259
+ cached_tokens: 0,
260
+ },
261
+ completion_tokens_details: {
262
+ accepted_prediction_tokens: 0,
263
+ audio_tokens: 0,
264
+ reasoning_tokens: 381, // 这是额外的 reasoning tokens
265
+ rejected_prediction_tokens: 0,
266
+ },
267
+ };
268
+
269
+ // Act
270
+ const xaiResult = convertOpenAIUsage(xaiUsage, {
271
+ provider: 'xai',
272
+ });
273
+
274
+ // Assert
275
+ expect(xaiResult).toMatchObject({
276
+ totalInputTokens: 6103,
277
+ totalOutputTokens: 447, // 66 + 381,xai的reasoning_tokens和completion_tokens价格一样
278
+ outputTextTokens: 66, // 不减去 reasoning_tokens
279
+ outputReasoningTokens: 381,
280
+ totalTokens: 6550,
281
+ });
282
+
283
+ // 测试其他 provider(默认行为)
284
+ const defaultResult = convertOpenAIUsage(xaiUsage);
285
+
286
+ // 默认行为: outputTextTokens 应该是 completion_tokens - reasoning_tokens - audio_tokens = 66 - 381 - 0 = -315
287
+ expect(defaultResult.outputTextTokens).toBe(-315);
288
+ expect(defaultResult).toMatchObject({
289
+ totalInputTokens: 6103,
290
+ totalOutputTokens: 66,
291
+ outputTextTokens: -315, // 负数确实会出现在结果中
292
+ outputReasoningTokens: 381,
293
+ totalTokens: 6550,
294
+ });
295
+ });
296
+
297
+ it('should handle output image tokens correctly', () => {
298
+ // Arrange
299
+ const usageWithImage = {
300
+ prompt_tokens: 100,
301
+ completion_tokens: 200,
302
+ completion_tokens_details: {
303
+ image_tokens: 60,
304
+ reasoning_tokens: 30,
305
+ },
306
+ total_tokens: 300,
307
+ } as OpenAI.Completions.CompletionUsage;
308
+
309
+ // Act
310
+ const result = convertOpenAIUsage(usageWithImage);
311
+
312
+ // Assert
313
+ expect(result).toEqual({
314
+ inputTextTokens: 100,
315
+ totalInputTokens: 100,
316
+ totalOutputTokens: 200,
317
+ outputImageTokens: 60,
318
+ outputReasoningTokens: 30,
319
+ outputTextTokens: 110, // 200 - 60 - 30
320
+ totalTokens: 300,
321
+ });
322
+ });
323
+
324
+ it('should handle response output image tokens correctly for ResponseUsage', () => {
325
+ // Arrange
326
+ const responseUsage = {
327
+ input_tokens: 100,
328
+ input_tokens_details: {
329
+ cached_tokens: 0,
330
+ },
331
+ output_tokens: 200,
332
+ output_tokens_details: {
333
+ image_tokens: 60,
334
+ reasoning_tokens: 30,
335
+ },
336
+ total_tokens: 300,
337
+ } as OpenAI.Responses.ResponseUsage;
338
+
339
+ // Act
340
+ const result = convertOpenAIResponseUsage(responseUsage);
341
+
342
+ // Assert
343
+ expect(result).toEqual({
344
+ inputTextTokens: 100,
345
+ inputCacheMissTokens: 100, // 100 - 0
346
+ totalInputTokens: 100,
347
+ totalOutputTokens: 200,
348
+ outputImageTokens: 60,
349
+ outputReasoningTokens: 30,
350
+ outputTextTokens: 170, // 200 - 30
351
+ totalTokens: 300,
352
+ });
353
+ });
354
+
355
+ it('should enrich completion usage with pricing cost when pricing is provided', () => {
356
+ const pricing: Pricing = {
357
+ units: [
358
+ { name: 'textInput', rate: 1, strategy: 'fixed', unit: 'millionTokens' },
359
+ { name: 'textOutput', rate: 2, strategy: 'fixed', unit: 'millionTokens' },
360
+ ],
361
+ };
362
+
363
+ const usage: OpenAI.Completions.CompletionUsage = {
364
+ completion_tokens: 500_000,
365
+ prompt_tokens: 1_000_000,
366
+ total_tokens: 1_500_000,
367
+ };
368
+
369
+ const result = convertOpenAIUsage(usage, { pricing });
370
+
371
+ expect(result.cost).toBeCloseTo(2, 10);
372
+ });
373
+
374
+ it('should enrich response usage with pricing cost when pricing is provided', () => {
375
+ const pricing: Pricing = {
376
+ units: [
377
+ { name: 'textInput', rate: 1, strategy: 'fixed', unit: 'millionTokens' },
378
+ { name: 'textOutput', rate: 1, strategy: 'fixed', unit: 'millionTokens' },
379
+ ],
380
+ };
381
+
382
+ const responseUsage = {
383
+ input_tokens: 1_000_000,
384
+ output_tokens: 1_000_000,
385
+ total_tokens: 2_000_000,
386
+ } as OpenAI.Responses.ResponseUsage;
387
+
388
+ const result = convertOpenAIResponseUsage(responseUsage, { pricing });
389
+
390
+ expect(result.cost).toBeCloseTo(2, 10);
391
+ });
392
+ });
393
+
394
+ describe('convertOpenAIImageUsage', () => {
395
+ it('should convert gpt-image-1 usage data correctly', () => {
396
+ // Arrange - Based on actual gpt-image-1 logs
397
+ const gptImage1Usage: OpenAI.Images.ImagesResponse.Usage = {
398
+ input_tokens: 14,
399
+ input_tokens_details: {
400
+ text_tokens: 14,
401
+ image_tokens: 0,
402
+ },
403
+ output_tokens: 4160,
404
+ total_tokens: 4174,
405
+ };
406
+
407
+ const pricing: Pricing = {
408
+ units: [
409
+ { name: 'textInput', rate: 5, strategy: 'fixed', unit: 'millionTokens' },
410
+ { name: 'imageInput', rate: 10, strategy: 'fixed', unit: 'millionTokens' },
411
+ { name: 'imageOutput', rate: 40, strategy: 'fixed', unit: 'millionTokens' },
412
+ ],
413
+ };
414
+
415
+ // Act
416
+ const result = convertOpenAIImageUsage(gptImage1Usage, pricing);
417
+
418
+ // Assert
419
+ expect(result).toEqual({
420
+ inputTextTokens: 14,
421
+ inputImageTokens: 0,
422
+ outputImageTokens: 4160,
423
+ totalInputTokens: 14,
424
+ totalOutputTokens: 4160,
425
+ totalTokens: 4174,
426
+ cost: 0.16647, // Based on pricing: 14 * 5/1M + 0 * 10/1M + 4160 * 40/1M = 0.00007 + 0 + 0.1664 = 0.16647
427
+ });
428
+ });
429
+ });