@lobehub/chat 1.111.1 → 1.111.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (131) hide show
  1. package/.cursor/rules/code-review.mdc +2 -19
  2. package/.cursor/rules/cursor-ux.mdc +0 -72
  3. package/.cursor/rules/project-introduce.mdc +5 -5
  4. package/.cursor/rules/react-component.mdc +92 -73
  5. package/.cursor/rules/rules-attach.mdc +28 -61
  6. package/.cursor/rules/system-role.mdc +8 -20
  7. package/.cursor/rules/typescript.mdc +55 -14
  8. package/CHANGELOG.md +52 -0
  9. package/changelog/v1.json +14 -0
  10. package/locales/ar/models.json +21 -3
  11. package/locales/bg-BG/models.json +21 -3
  12. package/locales/de-DE/models.json +21 -3
  13. package/locales/en-US/models.json +21 -3
  14. package/locales/es-ES/models.json +21 -3
  15. package/locales/fa-IR/models.json +21 -3
  16. package/locales/fr-FR/models.json +21 -3
  17. package/locales/it-IT/models.json +21 -3
  18. package/locales/ja-JP/models.json +21 -3
  19. package/locales/ko-KR/models.json +21 -3
  20. package/locales/nl-NL/models.json +21 -3
  21. package/locales/pl-PL/models.json +21 -3
  22. package/locales/pt-BR/models.json +21 -3
  23. package/locales/ru-RU/models.json +21 -3
  24. package/locales/tr-TR/models.json +21 -3
  25. package/locales/vi-VN/models.json +21 -3
  26. package/locales/zh-CN/models.json +21 -3
  27. package/locales/zh-TW/models.json +21 -3
  28. package/package.json +1 -1
  29. package/packages/types/src/aiModel.ts +67 -46
  30. package/packages/types/src/hotkey.ts +2 -0
  31. package/packages/types/src/llm.ts +3 -3
  32. package/src/app/[variants]/(main)/_layout/Desktop/SideBar/PinList/index.tsx +3 -3
  33. package/src/app/[variants]/(main)/_layout/Desktop/SideBar/TopActions.test.tsx +1 -0
  34. package/src/app/[variants]/(main)/_layout/Desktop/SideBar/TopActions.tsx +11 -2
  35. package/src/app/[variants]/(main)/_layout/Desktop/SideBar/index.tsx +2 -2
  36. package/src/app/[variants]/(main)/chat/(workspace)/_layout/Desktop/ChatHeader/Main.tsx +2 -2
  37. package/src/app/[variants]/(main)/chat/_layout/Desktop/SessionPanel.tsx +2 -2
  38. package/src/app/[variants]/(main)/discover/(detail)/model/[...slugs]/features/Details/Overview/ProviderList/index.tsx +23 -12
  39. package/src/app/[variants]/(main)/discover/(detail)/provider/[...slugs]/features/Details/Overview/ModelList/index.tsx +23 -10
  40. package/src/app/[variants]/(main)/settings/provider/features/ModelList/ModelItem.tsx +21 -12
  41. package/src/components/Thinking/index.tsx +53 -13
  42. package/src/config/aiModels/ai21.ts +8 -4
  43. package/src/config/aiModels/ai360.ts +28 -14
  44. package/src/config/aiModels/aihubmix.ts +174 -86
  45. package/src/config/aiModels/anthropic.ts +97 -38
  46. package/src/config/aiModels/azure.ts +54 -32
  47. package/src/config/aiModels/azureai.ts +63 -37
  48. package/src/config/aiModels/baichuan.ts +24 -12
  49. package/src/config/aiModels/bedrock.ts +60 -30
  50. package/src/config/aiModels/cohere.ts +60 -30
  51. package/src/config/aiModels/deepseek.ts +10 -6
  52. package/src/config/aiModels/fireworksai.ts +88 -44
  53. package/src/config/aiModels/giteeai.ts +1 -1
  54. package/src/config/aiModels/github.ts +44 -26
  55. package/src/config/aiModels/google.ts +119 -68
  56. package/src/config/aiModels/groq.ts +48 -24
  57. package/src/config/aiModels/higress.ts +617 -310
  58. package/src/config/aiModels/hunyuan.ts +105 -54
  59. package/src/config/aiModels/infiniai.ts +104 -52
  60. package/src/config/aiModels/internlm.ts +16 -8
  61. package/src/config/aiModels/jina.ts +4 -2
  62. package/src/config/aiModels/minimax.ts +11 -10
  63. package/src/config/aiModels/mistral.ts +40 -20
  64. package/src/config/aiModels/moonshot.ts +42 -22
  65. package/src/config/aiModels/novita.ts +196 -98
  66. package/src/config/aiModels/openai.ts +270 -137
  67. package/src/config/aiModels/openrouter.ts +205 -100
  68. package/src/config/aiModels/perplexity.ts +36 -6
  69. package/src/config/aiModels/ppio.ts +76 -38
  70. package/src/config/aiModels/qwen.ts +257 -133
  71. package/src/config/aiModels/sambanova.ts +56 -28
  72. package/src/config/aiModels/sensenova.ts +100 -50
  73. package/src/config/aiModels/siliconcloud.ts +224 -112
  74. package/src/config/aiModels/stepfun.ts +44 -22
  75. package/src/config/aiModels/taichu.ts +8 -4
  76. package/src/config/aiModels/tencentcloud.ts +12 -6
  77. package/src/config/aiModels/upstage.ts +8 -4
  78. package/src/config/aiModels/v0.ts +15 -12
  79. package/src/config/aiModels/vertexai.ts +49 -27
  80. package/src/config/aiModels/volcengine.ts +110 -51
  81. package/src/config/aiModels/wenxin.ts +179 -73
  82. package/src/config/aiModels/xai.ts +33 -19
  83. package/src/config/aiModels/zeroone.ts +48 -24
  84. package/src/config/aiModels/zhipu.ts +118 -69
  85. package/src/config/modelProviders/ai21.ts +0 -8
  86. package/src/config/modelProviders/ai360.ts +0 -20
  87. package/src/config/modelProviders/anthropic.ts +0 -56
  88. package/src/config/modelProviders/baichuan.ts +0 -30
  89. package/src/config/modelProviders/bedrock.ts +0 -74
  90. package/src/config/modelProviders/deepseek.ts +0 -13
  91. package/src/config/modelProviders/fireworksai.ts +0 -88
  92. package/src/config/modelProviders/google.ts +0 -59
  93. package/src/config/modelProviders/groq.ts +0 -48
  94. package/src/config/modelProviders/higress.ts +0 -727
  95. package/src/config/modelProviders/hunyuan.ts +0 -45
  96. package/src/config/modelProviders/infiniai.ts +0 -60
  97. package/src/config/modelProviders/internlm.ts +0 -8
  98. package/src/config/modelProviders/mistral.ts +0 -48
  99. package/src/config/modelProviders/modelscope.ts +2 -1
  100. package/src/config/modelProviders/openai.ts +5 -100
  101. package/src/config/modelProviders/openrouter.ts +0 -77
  102. package/src/config/modelProviders/ppio.ts +0 -95
  103. package/src/config/modelProviders/qwen.ts +0 -165
  104. package/src/config/modelProviders/sensenova.ts +0 -45
  105. package/src/config/modelProviders/siliconcloud.ts +0 -266
  106. package/src/config/modelProviders/stepfun.ts +0 -60
  107. package/src/config/modelProviders/taichu.ts +0 -10
  108. package/src/config/modelProviders/wenxin.ts +0 -90
  109. package/src/config/modelProviders/xai.ts +0 -16
  110. package/src/config/modelProviders/zeroone.ts +0 -60
  111. package/src/config/modelProviders/zhipu.ts +0 -80
  112. package/src/const/hotkeys.ts +6 -0
  113. package/src/features/Conversation/Extras/Usage/UsageDetail/ModelCard.tsx +4 -3
  114. package/src/features/Conversation/Extras/Usage/UsageDetail/pricing.ts +25 -15
  115. package/src/features/Conversation/Extras/Usage/UsageDetail/tokens.test.ts +7 -5
  116. package/src/features/Conversation/Extras/Usage/UsageDetail/tokens.ts +6 -5
  117. package/src/hooks/useHotkeys/chatScope.ts +2 -2
  118. package/src/hooks/useHotkeys/globalScope.ts +16 -4
  119. package/src/hooks/usePinnedAgentState.ts +21 -0
  120. package/src/hooks/useSwitchSession.ts +1 -1
  121. package/src/libs/model-runtime/utils/openaiCompatibleFactory/index.test.ts +54 -8
  122. package/src/locales/default/hotkey.ts +4 -0
  123. package/src/server/routers/lambda/agent.ts +2 -2
  124. package/src/server/routers/lambda/config/__snapshots__/index.test.ts.snap +0 -28
  125. package/src/server/services/discover/index.ts +7 -6
  126. package/src/server/services/user/index.ts +1 -2
  127. package/src/utils/__snapshots__/parseModels.test.ts.snap +28 -4
  128. package/src/utils/_deprecated/__snapshots__/parseModels.test.ts.snap +0 -8
  129. package/src/utils/parseModels.test.ts +60 -9
  130. package/src/utils/pricing.test.ts +183 -0
  131. package/src/utils/pricing.ts +90 -0
@@ -12,12 +12,6 @@ const Anthropic: ModelProviderCard = {
12
12
  functionCall: true,
13
13
  id: 'claude-opus-4-20250514',
14
14
  maxOutput: 32_000,
15
- pricing: {
16
- cachedInput: 7.5,
17
- input: 30,
18
- output: 150,
19
- writeCacheInput: 37.5,
20
- },
21
15
  releasedAt: '2025-05-14',
22
16
  vision: true,
23
17
  },
@@ -30,12 +24,6 @@ const Anthropic: ModelProviderCard = {
30
24
  functionCall: true,
31
25
  id: 'claude-sonnet-4-20250514',
32
26
  maxOutput: 64_000,
33
- pricing: {
34
- cachedInput: 1.5,
35
- input: 6,
36
- output: 30,
37
- writeCacheInput: 7.5,
38
- },
39
27
  releasedAt: '2025-05-14',
40
28
  vision: true,
41
29
  },
@@ -48,12 +36,6 @@ const Anthropic: ModelProviderCard = {
48
36
  functionCall: true,
49
37
  id: 'claude-3-7-sonnet-20250219',
50
38
  maxOutput: 64_000,
51
- pricing: {
52
- cachedInput: 0.1,
53
- input: 1,
54
- output: 5,
55
- writeCacheInput: 1.25,
56
- },
57
39
  releasedAt: '2025-02-24',
58
40
  },
59
41
  {
@@ -65,12 +47,6 @@ const Anthropic: ModelProviderCard = {
65
47
  functionCall: true,
66
48
  id: 'claude-3-5-haiku-20241022',
67
49
  maxOutput: 8192,
68
- pricing: {
69
- cachedInput: 0.1,
70
- input: 1,
71
- output: 5,
72
- writeCacheInput: 1.25,
73
- },
74
50
  releasedAt: '2024-11-05',
75
51
  },
76
52
  {
@@ -82,12 +58,6 @@ const Anthropic: ModelProviderCard = {
82
58
  functionCall: true,
83
59
  id: 'claude-3-5-sonnet-20241022',
84
60
  maxOutput: 8192,
85
- pricing: {
86
- cachedInput: 0.3,
87
- input: 3,
88
- output: 15,
89
- writeCacheInput: 3.75,
90
- },
91
61
  releasedAt: '2024-10-22',
92
62
  vision: true,
93
63
  },
@@ -99,12 +69,6 @@ const Anthropic: ModelProviderCard = {
99
69
  functionCall: true,
100
70
  id: 'claude-3-5-sonnet-20240620',
101
71
  maxOutput: 8192,
102
- pricing: {
103
- cachedInput: 0.3,
104
- input: 3,
105
- output: 15,
106
- writeCacheInput: 3.75,
107
- },
108
72
  releasedAt: '2024-06-20',
109
73
  vision: true,
110
74
  },
@@ -116,10 +80,6 @@ const Anthropic: ModelProviderCard = {
116
80
  functionCall: true,
117
81
  id: 'claude-3-haiku-20240307',
118
82
  maxOutput: 4096,
119
- pricing: {
120
- input: 0.25,
121
- output: 1.25,
122
- },
123
83
  releasedAt: '2024-03-07',
124
84
  vision: true,
125
85
  },
@@ -131,10 +91,6 @@ const Anthropic: ModelProviderCard = {
131
91
  functionCall: true,
132
92
  id: 'claude-3-sonnet-20240229',
133
93
  maxOutput: 4096,
134
- pricing: {
135
- input: 3,
136
- output: 15,
137
- },
138
94
  releasedAt: '2024-02-29',
139
95
  vision: true,
140
96
  },
@@ -147,10 +103,6 @@ const Anthropic: ModelProviderCard = {
147
103
  functionCall: true,
148
104
  id: 'claude-3-opus-20240229',
149
105
  maxOutput: 4096,
150
- pricing: {
151
- input: 15,
152
- output: 75,
153
- },
154
106
  releasedAt: '2024-02-29',
155
107
  vision: true,
156
108
  },
@@ -161,10 +113,6 @@ const Anthropic: ModelProviderCard = {
161
113
  displayName: 'Claude 2.1',
162
114
  id: 'claude-2.1',
163
115
  maxOutput: 4096,
164
- pricing: {
165
- input: 8,
166
- output: 24,
167
- },
168
116
  releasedAt: '2023-11-21',
169
117
  },
170
118
  {
@@ -174,10 +122,6 @@ const Anthropic: ModelProviderCard = {
174
122
  displayName: 'Claude 2.0',
175
123
  id: 'claude-2.0',
176
124
  maxOutput: 4096,
177
- pricing: {
178
- input: 8,
179
- output: 24,
180
- },
181
125
  releasedAt: '2023-07-11',
182
126
  },
183
127
  ],
@@ -12,11 +12,6 @@ const Baichuan: ModelProviderCard = {
12
12
  functionCall: true,
13
13
  id: 'Baichuan4',
14
14
  maxOutput: 4096,
15
- pricing: {
16
- currency: 'CNY',
17
- input: 100,
18
- output: 100,
19
- },
20
15
  },
21
16
  {
22
17
  contextWindowTokens: 32_768,
@@ -27,11 +22,6 @@ const Baichuan: ModelProviderCard = {
27
22
  functionCall: true,
28
23
  id: 'Baichuan4-Turbo',
29
24
  maxOutput: 4096,
30
- pricing: {
31
- currency: 'CNY',
32
- input: 15,
33
- output: 15,
34
- },
35
25
  },
36
26
  {
37
27
  contextWindowTokens: 32_768,
@@ -42,11 +32,6 @@ const Baichuan: ModelProviderCard = {
42
32
  functionCall: true,
43
33
  id: 'Baichuan4-Air',
44
34
  maxOutput: 4096,
45
- pricing: {
46
- currency: 'CNY',
47
- input: 0.98,
48
- output: 0.98,
49
- },
50
35
  },
51
36
  {
52
37
  contextWindowTokens: 32_768,
@@ -56,11 +41,6 @@ const Baichuan: ModelProviderCard = {
56
41
  functionCall: true,
57
42
  id: 'Baichuan3-Turbo',
58
43
  maxOutput: 8192,
59
- pricing: {
60
- currency: 'CNY',
61
- input: 12,
62
- output: 12,
63
- },
64
44
  },
65
45
  {
66
46
  contextWindowTokens: 128_000,
@@ -69,11 +49,6 @@ const Baichuan: ModelProviderCard = {
69
49
  displayName: 'Baichuan 3 Turbo 128k',
70
50
  id: 'Baichuan3-Turbo-128k',
71
51
  maxOutput: 4096,
72
- pricing: {
73
- currency: 'CNY',
74
- input: 24,
75
- output: 24,
76
- },
77
52
  },
78
53
  {
79
54
  contextWindowTokens: 32_768,
@@ -82,11 +57,6 @@ const Baichuan: ModelProviderCard = {
82
57
  displayName: 'Baichuan 2 Turbo',
83
58
  id: 'Baichuan2-Turbo',
84
59
  maxOutput: 8192,
85
- pricing: {
86
- currency: 'CNY',
87
- input: 8,
88
- output: 8,
89
- },
90
60
  },
91
61
  ],
92
62
  checkModel: 'Baichuan3-Turbo',
@@ -35,12 +35,6 @@ const Bedrock: ModelProviderCard = {
35
35
  functionCall: true,
36
36
  id: 'us.anthropic.claude-3-7-sonnet-20250219-v1:0',
37
37
  maxOutput: 8192,
38
- pricing: {
39
- cachedInput: 0.1,
40
- input: 1,
41
- output: 5,
42
- writeCacheInput: 1.25,
43
- },
44
38
  releasedAt: '2025-02-24',
45
39
  },
46
40
  {
@@ -52,12 +46,6 @@ const Bedrock: ModelProviderCard = {
52
46
  functionCall: true,
53
47
  id: 'us.anthropic.claude-3-7-sonnet-20250219-v1:0',
54
48
  maxOutput: 64_000,
55
- pricing: {
56
- cachedInput: 0.1,
57
- input: 1,
58
- output: 5,
59
- writeCacheInput: 1.25,
60
- },
61
49
  releasedAt: '2025-02-24',
62
50
  },
63
51
  {
@@ -69,12 +57,6 @@ const Bedrock: ModelProviderCard = {
69
57
  functionCall: true,
70
58
  id: 'anthropic.claude-3-5-haiku-20241022-v1:0',
71
59
  maxOutput: 8192,
72
- pricing: {
73
- cachedInput: 0.1,
74
- input: 1,
75
- output: 5,
76
- writeCacheInput: 1.25,
77
- },
78
60
  releasedAt: '2024-11-05',
79
61
  },
80
62
  {
@@ -85,10 +67,6 @@ const Bedrock: ModelProviderCard = {
85
67
  enabled: true,
86
68
  functionCall: true,
87
69
  id: 'anthropic.claude-3-5-sonnet-20241022-v2:0',
88
- pricing: {
89
- input: 3,
90
- output: 15,
91
- },
92
70
  vision: true,
93
71
  },
94
72
  {
@@ -99,10 +77,6 @@ const Bedrock: ModelProviderCard = {
99
77
  enabled: true,
100
78
  functionCall: true,
101
79
  id: 'us.anthropic.claude-3-5-sonnet-20241022-v2:0',
102
- pricing: {
103
- input: 3,
104
- output: 15,
105
- },
106
80
  vision: true,
107
81
  },
108
82
  {
@@ -113,10 +87,6 @@ const Bedrock: ModelProviderCard = {
113
87
  enabled: true,
114
88
  functionCall: true,
115
89
  id: 'anthropic.claude-3-5-sonnet-20240620-v1:0',
116
- pricing: {
117
- input: 3,
118
- output: 15,
119
- },
120
90
  vision: true,
121
91
  },
122
92
  {
@@ -127,10 +97,6 @@ const Bedrock: ModelProviderCard = {
127
97
  enabled: true,
128
98
  functionCall: true,
129
99
  id: 'anthropic.claude-3-haiku-20240307-v1:0',
130
- pricing: {
131
- input: 0.25,
132
- output: 1.25,
133
- },
134
100
  vision: true,
135
101
  },
136
102
  {
@@ -141,10 +107,6 @@ const Bedrock: ModelProviderCard = {
141
107
  enabled: true,
142
108
  functionCall: true,
143
109
  id: 'anthropic.claude-3-sonnet-20240229-v1:0',
144
- pricing: {
145
- input: 3,
146
- output: 15,
147
- },
148
110
  vision: true,
149
111
  },
150
112
  {
@@ -155,10 +117,6 @@ const Bedrock: ModelProviderCard = {
155
117
  enabled: true,
156
118
  functionCall: true,
157
119
  id: 'anthropic.claude-3-opus-20240229-v1:0',
158
- pricing: {
159
- input: 15,
160
- output: 75,
161
- },
162
120
  vision: true,
163
121
  },
164
122
  {
@@ -167,10 +125,6 @@ const Bedrock: ModelProviderCard = {
167
125
  'Claude 2 的更新版,具有双倍的上下文窗口,以及在长文档和 RAG 上下文中的可靠性、幻觉率和基于证据的准确性的改进。',
168
126
  displayName: 'Claude 2.1',
169
127
  id: 'anthropic.claude-v2:1',
170
- pricing: {
171
- input: 8,
172
- output: 24,
173
- },
174
128
  },
175
129
  {
176
130
  contextWindowTokens: 100_000,
@@ -178,10 +132,6 @@ const Bedrock: ModelProviderCard = {
178
132
  'Anthropic 在从复杂对话和创意内容生成到详细指令跟随的广泛任务中都表现出高度能力的模型。',
179
133
  displayName: 'Claude 2.0',
180
134
  id: 'anthropic.claude-v2',
181
- pricing: {
182
- input: 8,
183
- output: 24,
184
- },
185
135
  },
186
136
  {
187
137
  contextWindowTokens: 100_000,
@@ -189,10 +139,6 @@ const Bedrock: ModelProviderCard = {
189
139
  '一款快速、经济且仍然非常有能力的模型,可以处理包括日常对话、文本分析、总结和文档问答在内的一系列任务。',
190
140
  displayName: 'Claude Instant',
191
141
  id: 'anthropic.claude-instant-v1',
192
- pricing: {
193
- input: 0.8,
194
- output: 2.4,
195
- },
196
142
  },
197
143
  {
198
144
  contextWindowTokens: 128_000,
@@ -202,10 +148,6 @@ const Bedrock: ModelProviderCard = {
202
148
  enabled: true,
203
149
  functionCall: true,
204
150
  id: 'meta.llama3-1-8b-instruct-v1:0',
205
- pricing: {
206
- input: 0.22,
207
- output: 0.22,
208
- },
209
151
  },
210
152
  {
211
153
  contextWindowTokens: 128_000,
@@ -215,10 +157,6 @@ const Bedrock: ModelProviderCard = {
215
157
  enabled: true,
216
158
  functionCall: true,
217
159
  id: 'meta.llama3-1-70b-instruct-v1:0',
218
- pricing: {
219
- input: 0.99,
220
- output: 0.99,
221
- },
222
160
  },
223
161
  {
224
162
  contextWindowTokens: 128_000,
@@ -228,10 +166,6 @@ const Bedrock: ModelProviderCard = {
228
166
  enabled: true,
229
167
  functionCall: true,
230
168
  id: 'meta.llama3-1-405b-instruct-v1:0',
231
- pricing: {
232
- input: 5.32,
233
- output: 16,
234
- },
235
169
  },
236
170
  {
237
171
  contextWindowTokens: 8000,
@@ -239,10 +173,6 @@ const Bedrock: ModelProviderCard = {
239
173
  'Meta Llama 3 是一款面向开发者、研究人员和企业的开放大型语言模型 (LLM),旨在帮助他们构建、实验并负责任地扩展他们的生成 AI 想法。作为全球社区创新的基础系统的一部分,它非常适合计算能力和资源有限、边缘设备和更快的训练时间。',
240
174
  displayName: 'Llama 3 8B Instruct',
241
175
  id: 'meta.llama3-8b-instruct-v1:0',
242
- pricing: {
243
- input: 0.3,
244
- output: 0.6,
245
- },
246
176
  },
247
177
  {
248
178
  contextWindowTokens: 8000,
@@ -250,10 +180,6 @@ const Bedrock: ModelProviderCard = {
250
180
  'Meta Llama 3 是一款面向开发者、研究人员和企业的开放大型语言模型 (LLM),旨在帮助他们构建、实验并负责任地扩展他们的生成 AI 想法。作为全球社区创新的基础系统的一部分,它非常适合内容创建、对话 AI、语言理解、研发和企业应用。',
251
181
  displayName: 'Llama 3 70B Instruct',
252
182
  id: 'meta.llama3-70b-instruct-v1:0',
253
- pricing: {
254
- input: 2.65,
255
- output: 3.5,
256
- },
257
183
  },
258
184
  /*
259
185
  // TODO: Not support for now
@@ -11,13 +11,6 @@ const DeepSeek: ModelProviderCard = {
11
11
  enabled: true,
12
12
  functionCall: true,
13
13
  id: 'deepseek-chat',
14
- pricing: {
15
- // 2025.2.9 之后涨价
16
- cachedInput: 0.1,
17
- currency: 'CNY',
18
- input: 1,
19
- output: 2,
20
- },
21
14
  releasedAt: '2024-12-26',
22
15
  },
23
16
  {
@@ -27,12 +20,6 @@ const DeepSeek: ModelProviderCard = {
27
20
  displayName: 'DeepSeek R1',
28
21
  enabled: true,
29
22
  id: 'deepseek-reasoner',
30
- pricing: {
31
- cachedInput: 1,
32
- currency: 'CNY',
33
- input: 4,
34
- output: 16,
35
- },
36
23
  releasedAt: '2025-01-20',
37
24
  },
38
25
  ],
@@ -11,10 +11,6 @@ const FireworksAI: ModelProviderCard = {
11
11
  displayName: 'Llama 3.3 70B Instruct',
12
12
  enabled: true,
13
13
  id: 'accounts/fireworks/models/llama-v3p3-70b-instruct',
14
- pricing: {
15
- input: 0.9,
16
- output: 0.9,
17
- },
18
14
  },
19
15
  {
20
16
  contextWindowTokens: 131_072,
@@ -23,10 +19,6 @@ const FireworksAI: ModelProviderCard = {
23
19
  displayName: 'Llama 3.2 3B Instruct',
24
20
  enabled: true,
25
21
  id: 'accounts/fireworks/models/llama-v3p2-3b-instruct',
26
- pricing: {
27
- input: 0.1,
28
- output: 0.1,
29
- },
30
22
  },
31
23
  {
32
24
  contextWindowTokens: 131_072,
@@ -35,10 +27,6 @@ const FireworksAI: ModelProviderCard = {
35
27
  displayName: 'Llama 3.2 11B Vision Instruct',
36
28
  enabled: true,
37
29
  id: 'accounts/fireworks/models/llama-v3p2-11b-vision-instruct',
38
- pricing: {
39
- input: 0.2,
40
- output: 0.2,
41
- },
42
30
  vision: true,
43
31
  },
44
32
  {
@@ -48,10 +36,6 @@ const FireworksAI: ModelProviderCard = {
48
36
  displayName: 'Llama 3.2 90B Vision Instruct',
49
37
  enabled: true,
50
38
  id: 'accounts/fireworks/models/llama-v3p2-90b-vision-instruct',
51
- pricing: {
52
- input: 0.9,
53
- output: 0.9,
54
- },
55
39
  vision: true,
56
40
  },
57
41
  {
@@ -60,10 +44,6 @@ const FireworksAI: ModelProviderCard = {
60
44
  'Meta Llama 3.1 系列是多语言大语言模型(LLM)集合,包含 8B、70B 和 405B 三种参数规模的预训练和指令微调生成模型。Llama 3.1 指令微调文本模型(8B、70B、405B)专为多语言对话应用优化,并在常见的行业基准测试中优于许多现有的开源和闭源聊天模型。',
61
45
  displayName: 'Llama 3.1 8B Instruct',
62
46
  id: 'accounts/fireworks/models/llama-v3p1-8b-instruct',
63
- pricing: {
64
- input: 0.2,
65
- output: 0.2,
66
- },
67
47
  },
68
48
  {
69
49
  contextWindowTokens: 131_072,
@@ -72,10 +52,6 @@ const FireworksAI: ModelProviderCard = {
72
52
  displayName: 'Llama 3.1 70B Instruct',
73
53
  functionCall: true,
74
54
  id: 'accounts/fireworks/models/llama-v3p1-70b-instruct',
75
- pricing: {
76
- input: 0.9,
77
- output: 0.9,
78
- },
79
55
  },
80
56
  {
81
57
  contextWindowTokens: 131_072,
@@ -84,10 +60,6 @@ const FireworksAI: ModelProviderCard = {
84
60
  displayName: 'Llama 3.1 405B Instruct',
85
61
  functionCall: true,
86
62
  id: 'accounts/fireworks/models/llama-v3p1-405b-instruct',
87
- pricing: {
88
- input: 3,
89
- output: 3,
90
- },
91
63
  },
92
64
  {
93
65
  contextWindowTokens: 8192,
@@ -95,10 +67,6 @@ const FireworksAI: ModelProviderCard = {
95
67
  'Meta 开发并发布了 Meta Llama 3 系列大语言模型(LLM),这是一个包含 8B 和 70B 参数规模的预训练和指令微调生成文本模型的集合。Llama 3 指令微调模型专为对话应用场景优化,并在常见的行业基准测试中优于许多现有的开源聊天模型。',
96
68
  displayName: 'Llama 3 8B Instruct',
97
69
  id: 'accounts/fireworks/models/llama-v3-8b-instruct',
98
- pricing: {
99
- input: 0.2,
100
- output: 0.2,
101
- },
102
70
  },
103
71
  {
104
72
  contextWindowTokens: 8192,
@@ -106,10 +74,6 @@ const FireworksAI: ModelProviderCard = {
106
74
  'Meta 开发并发布了 Meta Llama 3 系列大语言模型(LLM),该系列包含 8B 和 70B 参数规模的预训练和指令微调生成文本模型。Llama 3 指令微调模型专为对话应用场景优化,并在常见的行业基准测试中优于许多现有的开源聊天模型。',
107
75
  displayName: 'Llama 3 70B Instruct',
108
76
  id: 'accounts/fireworks/models/llama-v3-70b-instruct',
109
- pricing: {
110
- input: 0.9,
111
- output: 0.9,
112
- },
113
77
  },
114
78
  {
115
79
  contextWindowTokens: 8192,
@@ -117,10 +81,6 @@ const FireworksAI: ModelProviderCard = {
117
81
  'Meta Llama 3 指令微调模型专为对话应用场景优化,并在常见的行业基准测试中优于许多现有的开源聊天模型。Llama 3 8B Instruct(HF 版本)是 Llama 3 8B Instruct 的原始 FP16 版本,其结果应与官方 Hugging Face 实现一致。',
118
82
  displayName: 'Llama 3 8B Instruct (HF version)',
119
83
  id: 'accounts/fireworks/models/llama-v3-8b-instruct-hf',
120
- pricing: {
121
- input: 0.2,
122
- output: 0.2,
123
- },
124
84
  },
125
85
  {
126
86
  contextWindowTokens: 32_768,
@@ -128,10 +88,6 @@ const FireworksAI: ModelProviderCard = {
128
88
  displayName: 'Mistral Small 3 Instruct',
129
89
  enabled: true,
130
90
  id: 'accounts/fireworks/models/mistral-small-24b-instruct-2501',
131
- pricing: {
132
- input: 0.9,
133
- output: 0.9,
134
- },
135
91
  },
136
92
  {
137
93
  contextWindowTokens: 32_768,
@@ -139,10 +95,6 @@ const FireworksAI: ModelProviderCard = {
139
95
  'Mixtral MoE 8x7B Instruct 是 Mixtral MoE 8x7B 的指令微调版本,已启用聊天完成功能 API。',
140
96
  displayName: 'Mixtral MoE 8x7B Instruct',
141
97
  id: 'accounts/fireworks/models/mixtral-8x7b-instruct',
142
- pricing: {
143
- input: 0.5,
144
- output: 0.5,
145
- },
146
98
  },
147
99
  {
148
100
  contextWindowTokens: 65_536,
@@ -151,10 +103,6 @@ const FireworksAI: ModelProviderCard = {
151
103
  displayName: 'Mixtral MoE 8x22B Instruct',
152
104
  functionCall: true,
153
105
  id: 'accounts/fireworks/models/mixtral-8x22b-instruct',
154
- pricing: {
155
- input: 1.2,
156
- output: 1.2,
157
- },
158
106
  },
159
107
  {
160
108
  contextWindowTokens: 32_064,
@@ -163,10 +111,6 @@ const FireworksAI: ModelProviderCard = {
163
111
  displayName: 'Phi 3.5 Vision Instruct',
164
112
  enabled: true,
165
113
  id: 'accounts/fireworks/models/phi-3-vision-128k-instruct',
166
- pricing: {
167
- input: 0.2,
168
- output: 0.2,
169
- },
170
114
  vision: true,
171
115
  },
172
116
  {
@@ -175,10 +119,6 @@ const FireworksAI: ModelProviderCard = {
175
119
  'MythoMix 的改进版,可能是其更为完善的变体,是 MythoLogic-L2 和 Huginn 的合并,采用了高度实验性的张量类型合并技术。由于其独特的性质,该模型在讲故事和角色扮演方面表现出色。',
176
120
  displayName: 'MythoMax L2 13b',
177
121
  id: 'accounts/fireworks/models/mythomax-l2-13b',
178
- pricing: {
179
- input: 0.2,
180
- output: 0.2,
181
- },
182
122
  },
183
123
  {
184
124
  contextWindowTokens: 131_072,
@@ -187,10 +127,6 @@ const FireworksAI: ModelProviderCard = {
187
127
  displayName: 'Deepseek V3',
188
128
  enabled: true,
189
129
  id: 'accounts/fireworks/models/deepseek-v3',
190
- pricing: {
191
- input: 0.9,
192
- output: 0.9,
193
- },
194
130
  },
195
131
  {
196
132
  contextWindowTokens: 163_840,
@@ -199,10 +135,6 @@ const FireworksAI: ModelProviderCard = {
199
135
  displayName: 'Deepseek R1',
200
136
  enabled: true,
201
137
  id: 'accounts/fireworks/models/deepseek-r1',
202
- pricing: {
203
- input: 8,
204
- output: 8,
205
- },
206
138
  },
207
139
  {
208
140
  contextWindowTokens: 32_768,
@@ -211,10 +143,6 @@ const FireworksAI: ModelProviderCard = {
211
143
  displayName: 'Qwen Qwq 32b Preview',
212
144
  enabled: true,
213
145
  id: 'accounts/fireworks/models/qwen-qwq-32b-preview',
214
- pricing: {
215
- input: 0.9,
216
- output: 0.9,
217
- },
218
146
  },
219
147
  {
220
148
  contextWindowTokens: 32_768,
@@ -223,10 +151,6 @@ const FireworksAI: ModelProviderCard = {
223
151
  displayName: 'Qwen2.5 72B Instruct',
224
152
  enabled: true,
225
153
  id: 'accounts/fireworks/models/qwen2p5-72b-instruct',
226
- pricing: {
227
- input: 0.9,
228
- output: 0.9,
229
- },
230
154
  },
231
155
  {
232
156
  contextWindowTokens: 32_768,
@@ -234,10 +158,6 @@ const FireworksAI: ModelProviderCard = {
234
158
  displayName: 'Qwen2 VL 72B Instruct',
235
159
  enabled: true,
236
160
  id: 'accounts/fireworks/models/qwen2-vl-72b-instruct',
237
- pricing: {
238
- input: 0.9,
239
- output: 0.9,
240
- },
241
161
  vision: true,
242
162
  },
243
163
  {
@@ -247,10 +167,6 @@ const FireworksAI: ModelProviderCard = {
247
167
  displayName: 'Qwen2.5-Coder-32B-Instruct',
248
168
  enabled: true,
249
169
  id: 'accounts/fireworks/models/qwen2p5-coder-32b-instruct',
250
- pricing: {
251
- input: 0.9,
252
- output: 0.9,
253
- },
254
170
  },
255
171
  {
256
172
  contextWindowTokens: 32_768,
@@ -259,10 +175,6 @@ const FireworksAI: ModelProviderCard = {
259
175
  displayName: 'Yi-Large',
260
176
  enabled: true,
261
177
  id: 'accounts/yi-01-ai/models/yi-large',
262
- pricing: {
263
- input: 3,
264
- output: 3,
265
- },
266
178
  },
267
179
  ],
268
180
  checkModel: 'accounts/fireworks/models/llama-v3p2-3b-instruct',