@lobehub/chat 1.111.1 → 1.111.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (99) hide show
  1. package/.cursor/rules/code-review.mdc +2 -19
  2. package/.cursor/rules/cursor-ux.mdc +0 -72
  3. package/.cursor/rules/project-introduce.mdc +5 -5
  4. package/.cursor/rules/react-component.mdc +92 -73
  5. package/.cursor/rules/rules-attach.mdc +28 -61
  6. package/.cursor/rules/system-role.mdc +8 -20
  7. package/.cursor/rules/typescript.mdc +55 -14
  8. package/CHANGELOG.md +25 -0
  9. package/changelog/v1.json +5 -0
  10. package/package.json +1 -1
  11. package/packages/types/src/aiModel.ts +67 -46
  12. package/packages/types/src/llm.ts +3 -3
  13. package/src/app/[variants]/(main)/discover/(detail)/model/[...slugs]/features/Details/Overview/ProviderList/index.tsx +23 -12
  14. package/src/app/[variants]/(main)/discover/(detail)/provider/[...slugs]/features/Details/Overview/ModelList/index.tsx +23 -10
  15. package/src/app/[variants]/(main)/settings/provider/features/ModelList/ModelItem.tsx +21 -12
  16. package/src/config/aiModels/ai21.ts +8 -4
  17. package/src/config/aiModels/ai360.ts +28 -14
  18. package/src/config/aiModels/aihubmix.ts +174 -86
  19. package/src/config/aiModels/anthropic.ts +97 -38
  20. package/src/config/aiModels/azure.ts +54 -32
  21. package/src/config/aiModels/azureai.ts +63 -37
  22. package/src/config/aiModels/baichuan.ts +24 -12
  23. package/src/config/aiModels/bedrock.ts +60 -30
  24. package/src/config/aiModels/cohere.ts +60 -30
  25. package/src/config/aiModels/deepseek.ts +10 -6
  26. package/src/config/aiModels/fireworksai.ts +88 -44
  27. package/src/config/aiModels/giteeai.ts +1 -1
  28. package/src/config/aiModels/github.ts +44 -26
  29. package/src/config/aiModels/google.ts +119 -68
  30. package/src/config/aiModels/groq.ts +48 -24
  31. package/src/config/aiModels/higress.ts +617 -310
  32. package/src/config/aiModels/hunyuan.ts +105 -54
  33. package/src/config/aiModels/infiniai.ts +104 -52
  34. package/src/config/aiModels/internlm.ts +16 -8
  35. package/src/config/aiModels/jina.ts +4 -2
  36. package/src/config/aiModels/minimax.ts +11 -10
  37. package/src/config/aiModels/mistral.ts +40 -20
  38. package/src/config/aiModels/moonshot.ts +42 -22
  39. package/src/config/aiModels/novita.ts +196 -98
  40. package/src/config/aiModels/openai.ts +270 -137
  41. package/src/config/aiModels/openrouter.ts +205 -100
  42. package/src/config/aiModels/perplexity.ts +36 -6
  43. package/src/config/aiModels/ppio.ts +76 -38
  44. package/src/config/aiModels/qwen.ts +257 -133
  45. package/src/config/aiModels/sambanova.ts +56 -28
  46. package/src/config/aiModels/sensenova.ts +100 -50
  47. package/src/config/aiModels/siliconcloud.ts +224 -112
  48. package/src/config/aiModels/stepfun.ts +44 -22
  49. package/src/config/aiModels/taichu.ts +8 -4
  50. package/src/config/aiModels/tencentcloud.ts +12 -6
  51. package/src/config/aiModels/upstage.ts +8 -4
  52. package/src/config/aiModels/v0.ts +15 -12
  53. package/src/config/aiModels/vertexai.ts +49 -27
  54. package/src/config/aiModels/volcengine.ts +110 -51
  55. package/src/config/aiModels/wenxin.ts +179 -73
  56. package/src/config/aiModels/xai.ts +33 -19
  57. package/src/config/aiModels/zeroone.ts +48 -24
  58. package/src/config/aiModels/zhipu.ts +118 -69
  59. package/src/config/modelProviders/ai21.ts +0 -8
  60. package/src/config/modelProviders/ai360.ts +0 -20
  61. package/src/config/modelProviders/anthropic.ts +0 -56
  62. package/src/config/modelProviders/baichuan.ts +0 -30
  63. package/src/config/modelProviders/bedrock.ts +0 -74
  64. package/src/config/modelProviders/deepseek.ts +0 -13
  65. package/src/config/modelProviders/fireworksai.ts +0 -88
  66. package/src/config/modelProviders/google.ts +0 -59
  67. package/src/config/modelProviders/groq.ts +0 -48
  68. package/src/config/modelProviders/higress.ts +0 -727
  69. package/src/config/modelProviders/hunyuan.ts +0 -45
  70. package/src/config/modelProviders/infiniai.ts +0 -60
  71. package/src/config/modelProviders/internlm.ts +0 -8
  72. package/src/config/modelProviders/mistral.ts +0 -48
  73. package/src/config/modelProviders/modelscope.ts +2 -1
  74. package/src/config/modelProviders/openai.ts +5 -100
  75. package/src/config/modelProviders/openrouter.ts +0 -77
  76. package/src/config/modelProviders/ppio.ts +0 -95
  77. package/src/config/modelProviders/qwen.ts +0 -165
  78. package/src/config/modelProviders/sensenova.ts +0 -45
  79. package/src/config/modelProviders/siliconcloud.ts +0 -266
  80. package/src/config/modelProviders/stepfun.ts +0 -60
  81. package/src/config/modelProviders/taichu.ts +0 -10
  82. package/src/config/modelProviders/wenxin.ts +0 -90
  83. package/src/config/modelProviders/xai.ts +0 -16
  84. package/src/config/modelProviders/zeroone.ts +0 -60
  85. package/src/config/modelProviders/zhipu.ts +0 -80
  86. package/src/features/Conversation/Extras/Usage/UsageDetail/ModelCard.tsx +4 -3
  87. package/src/features/Conversation/Extras/Usage/UsageDetail/pricing.ts +25 -15
  88. package/src/features/Conversation/Extras/Usage/UsageDetail/tokens.test.ts +7 -5
  89. package/src/features/Conversation/Extras/Usage/UsageDetail/tokens.ts +6 -5
  90. package/src/libs/model-runtime/utils/openaiCompatibleFactory/index.test.ts +54 -8
  91. package/src/server/routers/lambda/agent.ts +2 -2
  92. package/src/server/routers/lambda/config/__snapshots__/index.test.ts.snap +0 -28
  93. package/src/server/services/discover/index.ts +7 -6
  94. package/src/server/services/user/index.ts +1 -2
  95. package/src/utils/__snapshots__/parseModels.test.ts.snap +28 -4
  96. package/src/utils/_deprecated/__snapshots__/parseModels.test.ts.snap +0 -8
  97. package/src/utils/parseModels.test.ts +60 -9
  98. package/src/utils/pricing.test.ts +183 -0
  99. package/src/utils/pricing.ts +90 -0
@@ -10,11 +10,6 @@ const ZhiPu: ModelProviderCard = {
10
10
  description: 'GLM-Zero-Preview具备强大的复杂推理能力,在逻辑推理、数学、编程等领域表现优异。',
11
11
  displayName: 'GLM-Zero-Preview',
12
12
  id: 'glm-zero-preview',
13
- pricing: {
14
- currency: 'CNY',
15
- input: 10,
16
- output: 10,
17
- },
18
13
  },
19
14
  {
20
15
  contextWindowTokens: 128_000,
@@ -23,11 +18,6 @@ const ZhiPu: ModelProviderCard = {
23
18
  enabled: true,
24
19
  functionCall: true,
25
20
  id: 'glm-4-flash',
26
- pricing: {
27
- currency: 'CNY',
28
- input: 0,
29
- output: 0,
30
- },
31
21
  },
32
22
  {
33
23
  contextWindowTokens: 128_000,
@@ -36,11 +26,6 @@ const ZhiPu: ModelProviderCard = {
36
26
  enabled: true,
37
27
  functionCall: true,
38
28
  id: 'glm-4-flashx',
39
- pricing: {
40
- currency: 'CNY',
41
- input: 0.1,
42
- output: 0.1,
43
- },
44
29
  },
45
30
  {
46
31
  contextWindowTokens: 1_024_000,
@@ -48,11 +33,6 @@ const ZhiPu: ModelProviderCard = {
48
33
  displayName: 'GLM-4-Long',
49
34
  functionCall: true,
50
35
  id: 'glm-4-long',
51
- pricing: {
52
- currency: 'CNY',
53
- input: 1,
54
- output: 1,
55
- },
56
36
  },
57
37
  {
58
38
  contextWindowTokens: 128_000,
@@ -61,11 +41,6 @@ const ZhiPu: ModelProviderCard = {
61
41
  enabled: true,
62
42
  functionCall: true,
63
43
  id: 'glm-4-air',
64
- pricing: {
65
- currency: 'CNY',
66
- input: 1,
67
- output: 1,
68
- },
69
44
  },
70
45
  {
71
46
  contextWindowTokens: 8192,
@@ -74,11 +49,6 @@ const ZhiPu: ModelProviderCard = {
74
49
  enabled: true,
75
50
  functionCall: true,
76
51
  id: 'glm-4-airx',
77
- pricing: {
78
- currency: 'CNY',
79
- input: 10,
80
- output: 10,
81
- },
82
52
  },
83
53
  {
84
54
  contextWindowTokens: 128_000,
@@ -87,11 +57,6 @@ const ZhiPu: ModelProviderCard = {
87
57
  displayName: 'GLM-4-AllTools',
88
58
  functionCall: true,
89
59
  id: 'glm-4-alltools',
90
- pricing: {
91
- currency: 'CNY',
92
- input: 100,
93
- output: 100,
94
- },
95
60
  },
96
61
  {
97
62
  contextWindowTokens: 128_000,
@@ -101,11 +66,6 @@ const ZhiPu: ModelProviderCard = {
101
66
  enabled: true,
102
67
  functionCall: true,
103
68
  id: 'glm-4-plus',
104
- pricing: {
105
- currency: 'CNY',
106
- input: 50,
107
- output: 50,
108
- },
109
69
  },
110
70
  {
111
71
  contextWindowTokens: 128_000,
@@ -113,11 +73,6 @@ const ZhiPu: ModelProviderCard = {
113
73
  displayName: 'GLM-4-0520',
114
74
  functionCall: true,
115
75
  id: 'glm-4-0520',
116
- pricing: {
117
- currency: 'CNY',
118
- input: 100,
119
- output: 100,
120
- },
121
76
  },
122
77
  {
123
78
  contextWindowTokens: 128_000,
@@ -125,11 +80,6 @@ const ZhiPu: ModelProviderCard = {
125
80
  displayName: 'GLM-4',
126
81
  functionCall: true,
127
82
  id: 'glm-4',
128
- pricing: {
129
- currency: 'CNY',
130
- input: 100,
131
- output: 100,
132
- },
133
83
  },
134
84
  {
135
85
  contextWindowTokens: 8192,
@@ -138,11 +88,6 @@ const ZhiPu: ModelProviderCard = {
138
88
  displayName: 'GLM-4V-Flash',
139
89
  enabled: true,
140
90
  id: 'glm-4v-flash',
141
- pricing: {
142
- currency: 'CNY',
143
- input: 0,
144
- output: 0,
145
- },
146
91
  releasedAt: '2024-12-09',
147
92
  vision: true,
148
93
  },
@@ -152,11 +97,6 @@ const ZhiPu: ModelProviderCard = {
152
97
  displayName: 'GLM-4V-Plus',
153
98
  enabled: true,
154
99
  id: 'glm-4v-plus',
155
- pricing: {
156
- currency: 'CNY',
157
- input: 10,
158
- output: 10,
159
- },
160
100
  vision: true,
161
101
  },
162
102
  {
@@ -164,11 +104,6 @@ const ZhiPu: ModelProviderCard = {
164
104
  description: 'GLM-4V 提供强大的图像理解与推理能力,支持多种视觉任务。',
165
105
  displayName: 'GLM-4V',
166
106
  id: 'glm-4v',
167
- pricing: {
168
- currency: 'CNY',
169
- input: 50,
170
- output: 50,
171
- },
172
107
  vision: true,
173
108
  },
174
109
  {
@@ -177,33 +112,18 @@ const ZhiPu: ModelProviderCard = {
177
112
  'CodeGeeX-4 是强大的AI编程助手,支持多种编程语言的智能问答与代码补全,提升开发效率。',
178
113
  displayName: 'CodeGeeX-4',
179
114
  id: 'codegeex-4',
180
- pricing: {
181
- currency: 'CNY',
182
- input: 0.1,
183
- output: 0.1,
184
- },
185
115
  },
186
116
  {
187
117
  contextWindowTokens: 4096,
188
118
  description: 'CharGLM-3 专为角色扮演与情感陪伴设计,支持超长多轮记忆与个性化对话,应用广泛。',
189
119
  displayName: 'CharGLM-3',
190
120
  id: 'charglm-3',
191
- pricing: {
192
- currency: 'CNY',
193
- input: 15,
194
- output: 15,
195
- },
196
121
  },
197
122
  {
198
123
  contextWindowTokens: 8192,
199
124
  description: 'Emohaa 是心理模型,具备专业咨询能力,帮助用户理解情感问题。',
200
125
  displayName: 'Emohaa',
201
126
  id: 'emohaa',
202
- pricing: {
203
- currency: 'CNY',
204
- input: 15,
205
- output: 15,
206
- },
207
127
  },
208
128
  ],
209
129
  checkModel: 'glm-4-flash-250414',
@@ -10,6 +10,7 @@ import { getPrice } from '@/features/Conversation/Extras/Usage/UsageDetail/prici
10
10
  import { useGlobalStore } from '@/store/global';
11
11
  import { systemStatusSelectors } from '@/store/global/selectors';
12
12
  import { LobeDefaultAiModelListItem } from '@/types/aiModel';
13
+ import { getCachedTextInputUnitRate, getWriteCacheInputUnitRate } from '@/utils/pricing';
13
14
 
14
15
  export const useStyles = createStyles(({ css, token }) => {
15
16
  return {
@@ -38,7 +39,7 @@ const ModelCard = memo<ModelCardProps>(({ pricing, id, provider, displayName })
38
39
  const isShowCredit = useGlobalStore(systemStatusSelectors.isShowCredit) && !!pricing;
39
40
  const updateSystemStatus = useGlobalStore((s) => s.updateSystemStatus);
40
41
 
41
- const formatPrice = getPrice(pricing || {});
42
+ const formatPrice = getPrice(pricing || { units: [] });
42
43
 
43
44
  return (
44
45
  <Flexbox gap={8}>
@@ -87,7 +88,7 @@ const ModelCard = memo<ModelCardProps>(({ pricing, id, provider, displayName })
87
88
  <div />
88
89
  <Flexbox align={'center'} className={styles.pricing} gap={8} horizontal>
89
90
  {t('messages.modelCard.creditPricing')}:
90
- {pricing?.cachedInput && (
91
+ {getCachedTextInputUnitRate(pricing) && (
91
92
  <Tooltip
92
93
  title={t('messages.modelCard.pricing.inputCachedTokens', {
93
94
  amount: formatPrice.cachedInput,
@@ -99,7 +100,7 @@ const ModelCard = memo<ModelCardProps>(({ pricing, id, provider, displayName })
99
100
  </Flexbox>
100
101
  </Tooltip>
101
102
  )}
102
- {pricing?.writeCacheInput && (
103
+ {getWriteCacheInputUnitRate(pricing) && (
103
104
  <Tooltip
104
105
  title={t('messages.modelCard.pricing.writeCacheInputTokens', {
105
106
  amount: formatPrice.writeCacheInput,
@@ -1,21 +1,31 @@
1
- import { ChatModelPricing } from '@/types/aiModel';
1
+ import { Pricing } from '@/types/aiModel';
2
2
  import { ModelPriceCurrency } from '@/types/llm';
3
3
  import { formatPriceByCurrency } from '@/utils/format';
4
+ import {
5
+ getCachedTextInputUnitRate,
6
+ getTextInputUnitRate,
7
+ getTextOutputUnitRate,
8
+ getWriteCacheInputUnitRate,
9
+ } from '@/utils/pricing';
4
10
 
5
- export const getPrice = (pricing: ChatModelPricing) => {
6
- const inputPrice = formatPriceByCurrency(pricing?.input, pricing?.currency as ModelPriceCurrency);
7
- const cachedInputPrice = formatPriceByCurrency(
8
- pricing?.cachedInput,
9
- pricing?.currency as ModelPriceCurrency,
10
- );
11
- const writeCacheInputPrice = formatPriceByCurrency(
12
- pricing?.writeCacheInput,
13
- pricing?.currency as ModelPriceCurrency,
14
- );
15
- const outputPrice = formatPriceByCurrency(
16
- pricing?.output,
17
- pricing?.currency as ModelPriceCurrency,
18
- );
11
+ export const getPrice = (pricing: Pricing) => {
12
+ const inputRate = getTextInputUnitRate(pricing);
13
+ const outputRate = getTextOutputUnitRate(pricing);
14
+ const cachedInputRate = getCachedTextInputUnitRate(pricing);
15
+ const writeCacheInputRate = getWriteCacheInputUnitRate(pricing);
16
+
17
+ const inputPrice = inputRate
18
+ ? formatPriceByCurrency(inputRate, pricing?.currency as ModelPriceCurrency)
19
+ : '0';
20
+ const cachedInputPrice = cachedInputRate
21
+ ? formatPriceByCurrency(cachedInputRate, pricing?.currency as ModelPriceCurrency)
22
+ : '0';
23
+ const writeCacheInputPrice = writeCacheInputRate
24
+ ? formatPriceByCurrency(writeCacheInputRate, pricing?.currency as ModelPriceCurrency)
25
+ : '0';
26
+ const outputPrice = outputRate
27
+ ? formatPriceByCurrency(outputRate, pricing?.currency as ModelPriceCurrency)
28
+ : '0';
19
29
 
20
30
  return {
21
31
  cachedInput: Number(cachedInputPrice),
@@ -9,11 +9,13 @@ describe('getDetailsToken', () => {
9
9
  // 基本测试数据
10
10
  const mockModelCard: LobeDefaultAiModelListItem = {
11
11
  pricing: {
12
- input: 0.01,
13
- output: 0.02,
14
- cachedInput: 0.005,
15
- audioInput: 0.03,
16
- audioOutput: 0.04,
12
+ units: [
13
+ { name: 'textInput', unit: 'millionTokens', strategy: 'fixed', rate: 0.01 },
14
+ { name: 'textOutput', unit: 'millionTokens', strategy: 'fixed', rate: 0.02 },
15
+ { name: 'textInput_cacheRead', unit: 'millionTokens', strategy: 'fixed', rate: 0.005 },
16
+ { name: 'audioInput', unit: 'millionTokens', strategy: 'fixed', rate: 0.03 },
17
+ { name: 'audioOutput', unit: 'millionTokens', strategy: 'fixed', rate: 0.04 },
18
+ ],
17
19
  },
18
20
  } as LobeDefaultAiModelListItem;
19
21
 
@@ -1,5 +1,6 @@
1
- import { ChatModelPricing, LobeDefaultAiModelListItem } from '@/types/aiModel';
1
+ import { LobeDefaultAiModelListItem } from '@/types/aiModel';
2
2
  import { ModelTokensUsage } from '@/types/message';
3
+ import { getAudioInputUnitRate, getAudioOutputUnitRate } from '@/utils/pricing';
3
4
 
4
5
  import { getPrice } from './pricing';
5
6
 
@@ -32,7 +33,7 @@ export const getDetailsToken = (
32
33
  : totalInputTokens - (inputCacheTokens || 0);
33
34
 
34
35
  // Pricing
35
- const formatPrice = getPrice(modelCard?.pricing as ChatModelPricing);
36
+ const formatPrice = getPrice(modelCard?.pricing || { units: [] });
36
37
 
37
38
  const inputCacheMissCredit = (
38
39
  !!inputCacheMissTokens ? calcCredit(inputCacheMissTokens, formatPrice.input) : 0
@@ -50,7 +51,7 @@ export const getDetailsToken = (
50
51
  !!totalOutputTokens ? calcCredit(totalOutputTokens, formatPrice.output) : 0
51
52
  ) as number;
52
53
  const totalInputCredit = (
53
- !!totalInputTokens ? calcCredit(totalInputTokens, formatPrice.output) : 0
54
+ !!totalInputTokens ? calcCredit(totalInputTokens, formatPrice.input) : 0
54
55
  ) as number;
55
56
 
56
57
  const totalCredit =
@@ -59,7 +60,7 @@ export const getDetailsToken = (
59
60
  return {
60
61
  inputAudio: !!usage.inputAudioTokens
61
62
  ? {
62
- credit: calcCredit(usage.inputAudioTokens, modelCard?.pricing?.audioInput),
63
+ credit: calcCredit(usage.inputAudioTokens, getAudioInputUnitRate(modelCard?.pricing)),
63
64
  token: usage.inputAudioTokens,
64
65
  }
65
66
  : undefined,
@@ -87,7 +88,7 @@ export const getDetailsToken = (
87
88
 
88
89
  outputAudio: !!usage.outputAudioTokens
89
90
  ? {
90
- credit: calcCredit(usage.outputAudioTokens, modelCard?.pricing?.audioOutput),
91
+ credit: calcCredit(usage.outputAudioTokens, getAudioOutputUnitRate(modelCard?.pricing)),
91
92
  id: 'outputAudio',
92
93
  token: usage.outputAudioTokens,
93
94
  }
@@ -1352,9 +1352,26 @@ describe('LobeOpenAICompatibleFactory', () => {
1352
1352
  id: 'gpt-4o',
1353
1353
  maxOutput: 4096,
1354
1354
  pricing: {
1355
- cachedInput: 1.25,
1356
- input: 2.5,
1357
- output: 10,
1355
+ units: [
1356
+ {
1357
+ name: 'textInput_cacheRead',
1358
+ rate: 1.25,
1359
+ strategy: 'fixed',
1360
+ unit: 'millionTokens',
1361
+ },
1362
+ {
1363
+ name: 'textInput',
1364
+ rate: 2.5,
1365
+ strategy: 'fixed',
1366
+ unit: 'millionTokens',
1367
+ },
1368
+ {
1369
+ name: 'textOutput',
1370
+ rate: 10,
1371
+ strategy: 'fixed',
1372
+ unit: 'millionTokens',
1373
+ },
1374
+ ],
1358
1375
  },
1359
1376
  providerId: 'azure',
1360
1377
  releasedAt: '2024-05-13',
@@ -1374,8 +1391,20 @@ describe('LobeOpenAICompatibleFactory', () => {
1374
1391
  id: 'claude-3-haiku-20240307',
1375
1392
  maxOutput: 4096,
1376
1393
  pricing: {
1377
- input: 0.25,
1378
- output: 1.25,
1394
+ units: [
1395
+ {
1396
+ name: 'textInput',
1397
+ rate: 0.25,
1398
+ strategy: 'fixed',
1399
+ unit: 'millionTokens',
1400
+ },
1401
+ {
1402
+ name: 'textOutput',
1403
+ rate: 1.25,
1404
+ strategy: 'fixed',
1405
+ unit: 'millionTokens',
1406
+ },
1407
+ ],
1379
1408
  },
1380
1409
  providerId: 'anthropic',
1381
1410
  releasedAt: '2024-03-07',
@@ -1400,9 +1429,26 @@ describe('LobeOpenAICompatibleFactory', () => {
1400
1429
  id: 'gpt-4o-mini',
1401
1430
  maxOutput: 4096,
1402
1431
  pricing: {
1403
- cachedInput: 0.075,
1404
- input: 0.15,
1405
- output: 0.6,
1432
+ units: [
1433
+ {
1434
+ name: 'textInput_cacheRead',
1435
+ rate: 0.075,
1436
+ strategy: 'fixed',
1437
+ unit: 'millionTokens',
1438
+ },
1439
+ {
1440
+ name: 'textInput',
1441
+ rate: 0.15,
1442
+ strategy: 'fixed',
1443
+ unit: 'millionTokens',
1444
+ },
1445
+ {
1446
+ name: 'textOutput',
1447
+ rate: 0.6,
1448
+ strategy: 'fixed',
1449
+ unit: 'millionTokens',
1450
+ },
1451
+ ],
1406
1452
  },
1407
1453
  providerId: 'azure',
1408
1454
  releasedAt: '2023-10-26',
@@ -93,8 +93,8 @@ export const agentRouter = router({
93
93
  const user = await UserModel.findById(ctx.serverDB, ctx.userId);
94
94
  if (!user) return DEFAULT_AGENT_CONFIG;
95
95
 
96
- await ctx.agentService.createInbox();
97
- pino.info('create inbox session');
96
+ const res = await ctx.agentService.createInbox();
97
+ pino.info({ res }, 'create inbox session');
98
98
  }
99
99
  }
100
100
 
@@ -26,10 +26,6 @@ exports[`configRouter > getGlobalConfig > Model Provider env > OPENAI_MODEL_LIST
26
26
  "enabled": true,
27
27
  "functionCall": true,
28
28
  "id": "gpt-4-0125-preview",
29
- "pricing": {
30
- "input": 10,
31
- "output": 30,
32
- },
33
29
  },
34
30
  ],
35
31
  }
@@ -44,10 +40,6 @@ exports[`configRouter > getGlobalConfig > Model Provider env > OPENAI_MODEL_LIST
44
40
  "enabled": true,
45
41
  "functionCall": true,
46
42
  "id": "gpt-3.5-turbo-1106",
47
- "pricing": {
48
- "input": 1,
49
- "output": 2,
50
- },
51
43
  },
52
44
  {
53
45
  "contextWindowTokens": 16385,
@@ -56,10 +48,6 @@ exports[`configRouter > getGlobalConfig > Model Provider env > OPENAI_MODEL_LIST
56
48
  "enabled": true,
57
49
  "functionCall": true,
58
50
  "id": "gpt-3.5-turbo",
59
- "pricing": {
60
- "input": 0.5,
61
- "output": 1.5,
62
- },
63
51
  },
64
52
  {
65
53
  "contextWindowTokens": 8192,
@@ -68,10 +56,6 @@ exports[`configRouter > getGlobalConfig > Model Provider env > OPENAI_MODEL_LIST
68
56
  "enabled": true,
69
57
  "functionCall": true,
70
58
  "id": "gpt-4",
71
- "pricing": {
72
- "input": 30,
73
- "output": 60,
74
- },
75
59
  },
76
60
  {
77
61
  "contextWindowTokens": 32768,
@@ -80,10 +64,6 @@ exports[`configRouter > getGlobalConfig > Model Provider env > OPENAI_MODEL_LIST
80
64
  "enabled": true,
81
65
  "functionCall": true,
82
66
  "id": "gpt-4-32k",
83
- "pricing": {
84
- "input": 60,
85
- "output": 120,
86
- },
87
67
  },
88
68
  {
89
69
  "contextWindowTokens": 128000,
@@ -92,10 +72,6 @@ exports[`configRouter > getGlobalConfig > Model Provider env > OPENAI_MODEL_LIST
92
72
  "enabled": true,
93
73
  "functionCall": true,
94
74
  "id": "gpt-4-1106-preview",
95
- "pricing": {
96
- "input": 10,
97
- "output": 30,
98
- },
99
75
  },
100
76
  {
101
77
  "displayName": "gpt-4-vision",
@@ -113,10 +89,6 @@ exports[`configRouter > getGlobalConfig > Model Provider env > OPENAI_MODEL_LIST
113
89
  "enabled": true,
114
90
  "functionCall": true,
115
91
  "id": "gpt-4-1106-preview",
116
- "pricing": {
117
- "input": 10,
118
- "output": 30,
119
- },
120
92
  }
121
93
  `;
122
94
 
@@ -43,6 +43,7 @@ import {
43
43
  ProviderQueryParams,
44
44
  ProviderSorts,
45
45
  } from '@/types/discover';
46
+ import { getAudioInputUnitRate, getTextInputUnitRate, getTextOutputUnitRate } from '@/utils/pricing';
46
47
 
47
48
  const log = debug('lobe-server:discover');
48
49
 
@@ -1142,13 +1143,13 @@ export class DiscoverService {
1142
1143
  list = list.sort((a, b) => {
1143
1144
  if (order === 'asc') {
1144
1145
  return (
1145
- (a.pricing?.input || a.pricing?.audioInput || 0) -
1146
- (b.pricing?.input || b.pricing?.audioInput || 0)
1146
+ (getTextInputUnitRate(a.pricing) || getAudioInputUnitRate(a.pricing) || 0) -
1147
+ (getTextInputUnitRate(b.pricing) || getAudioInputUnitRate(b.pricing) || 0)
1147
1148
  );
1148
1149
  } else {
1149
1150
  return (
1150
- (b.pricing?.input || b.pricing?.audioInput || 0) -
1151
- (a.pricing?.input || a.pricing?.audioInput || 0)
1151
+ (getTextInputUnitRate(b.pricing) || getAudioInputUnitRate(b.pricing) || 0) -
1152
+ (getTextInputUnitRate(a.pricing) || getAudioInputUnitRate(a.pricing) || 0)
1152
1153
  );
1153
1154
  }
1154
1155
  });
@@ -1157,9 +1158,9 @@ export class DiscoverService {
1157
1158
  case ModelSorts.OutputPrice: {
1158
1159
  list = list.sort((a, b) => {
1159
1160
  if (order === 'asc') {
1160
- return (a.pricing?.output || 0) - (b.pricing?.output || 0);
1161
+ return (getTextOutputUnitRate(a.pricing) || 0) - (getTextOutputUnitRate(b.pricing) || 0);
1161
1162
  } else {
1162
- return (b.pricing?.output || 0) - (a.pricing?.output || 0);
1163
+ return (getTextOutputUnitRate(b.pricing) || 0) - (getTextOutputUnitRate(a.pricing) || 0);
1163
1164
  }
1164
1165
  });
1165
1166
  break;
@@ -125,8 +125,7 @@ export class UserService {
125
125
  }
126
126
  return Buffer.from(file);
127
127
  } catch (error) {
128
- // @ts-expect-error 这里很奇怪,升级了 pino 就报错,我怀疑是 pino 的问题
129
- pino.error('Failed to get user avatar:', error);
128
+ pino.error({ error }, 'Failed to get user avatar');
130
129
  }
131
130
  };
132
131
  }
@@ -112,8 +112,20 @@ exports[`transformToChatModelCards > should have file with builtin models like g
112
112
  "enabled": true,
113
113
  "id": "gpt-4-0125-preview",
114
114
  "pricing": {
115
- "input": 10,
116
- "output": 30,
115
+ "units": [
116
+ {
117
+ "name": "textInput",
118
+ "rate": 10,
119
+ "strategy": "fixed",
120
+ "unit": "millionTokens",
121
+ },
122
+ {
123
+ "name": "textOutput",
124
+ "rate": 30,
125
+ "strategy": "fixed",
126
+ "unit": "millionTokens",
127
+ },
128
+ ],
117
129
  },
118
130
  "providerId": "openai",
119
131
  "releasedAt": "2024-01-25",
@@ -132,8 +144,20 @@ exports[`transformToChatModelCards > should have file with builtin models like g
132
144
  "enabled": true,
133
145
  "id": "gpt-4-turbo-2024-04-09",
134
146
  "pricing": {
135
- "input": 10,
136
- "output": 30,
147
+ "units": [
148
+ {
149
+ "name": "textInput",
150
+ "rate": 10,
151
+ "strategy": "fixed",
152
+ "unit": "millionTokens",
153
+ },
154
+ {
155
+ "name": "textOutput",
156
+ "rate": 30,
157
+ "strategy": "fixed",
158
+ "unit": "millionTokens",
159
+ },
160
+ ],
137
161
  },
138
162
  "providerId": "openai",
139
163
  "releasedAt": "2024-04-09",
@@ -89,10 +89,6 @@ exports[`transformToChatModelCards > should have file with builtin models like g
89
89
  "files": true,
90
90
  "functionCall": true,
91
91
  "id": "gpt-4-0125-preview",
92
- "pricing": {
93
- "input": 10,
94
- "output": 30,
95
- },
96
92
  },
97
93
  {
98
94
  "contextWindowTokens": 128000,
@@ -102,10 +98,6 @@ exports[`transformToChatModelCards > should have file with builtin models like g
102
98
  "files": true,
103
99
  "functionCall": true,
104
100
  "id": "gpt-4-turbo-2024-04-09",
105
- "pricing": {
106
- "input": 10,
107
- "output": 30,
108
- },
109
101
  "vision": true,
110
102
  },
111
103
  ]