@roo-code/types 1.22.0 → 1.23.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +2107 -6
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +2353 -539
- package/dist/index.d.ts +2353 -539
- package/dist/index.js +2053 -5
- package/dist/index.js.map +1 -1
- package/package.json +1 -1
package/dist/index.js
CHANGED
|
@@ -1,3 +1,1987 @@
|
|
|
1
|
+
// src/providers/anthropic.ts
|
|
2
|
+
var anthropicDefaultModelId = "claude-sonnet-4-20250514";
|
|
3
|
+
var anthropicModels = {
|
|
4
|
+
"claude-sonnet-4-20250514": {
|
|
5
|
+
maxTokens: 64e3,
|
|
6
|
+
// Overridden to 8k if `enableReasoningEffort` is false.
|
|
7
|
+
contextWindow: 2e5,
|
|
8
|
+
supportsImages: true,
|
|
9
|
+
supportsComputerUse: true,
|
|
10
|
+
supportsPromptCache: true,
|
|
11
|
+
inputPrice: 3,
|
|
12
|
+
// $3 per million input tokens
|
|
13
|
+
outputPrice: 15,
|
|
14
|
+
// $15 per million output tokens
|
|
15
|
+
cacheWritesPrice: 3.75,
|
|
16
|
+
// $3.75 per million tokens
|
|
17
|
+
cacheReadsPrice: 0.3,
|
|
18
|
+
// $0.30 per million tokens
|
|
19
|
+
supportsReasoningBudget: true
|
|
20
|
+
},
|
|
21
|
+
"claude-opus-4-20250514": {
|
|
22
|
+
maxTokens: 32e3,
|
|
23
|
+
// Overridden to 8k if `enableReasoningEffort` is false.
|
|
24
|
+
contextWindow: 2e5,
|
|
25
|
+
supportsImages: true,
|
|
26
|
+
supportsComputerUse: true,
|
|
27
|
+
supportsPromptCache: true,
|
|
28
|
+
inputPrice: 15,
|
|
29
|
+
// $15 per million input tokens
|
|
30
|
+
outputPrice: 75,
|
|
31
|
+
// $75 per million output tokens
|
|
32
|
+
cacheWritesPrice: 18.75,
|
|
33
|
+
// $18.75 per million tokens
|
|
34
|
+
cacheReadsPrice: 1.5,
|
|
35
|
+
// $1.50 per million tokens
|
|
36
|
+
supportsReasoningBudget: true
|
|
37
|
+
},
|
|
38
|
+
"claude-3-7-sonnet-20250219:thinking": {
|
|
39
|
+
maxTokens: 128e3,
|
|
40
|
+
// Unlocked by passing `beta` flag to the model. Otherwise, it's 64k.
|
|
41
|
+
contextWindow: 2e5,
|
|
42
|
+
supportsImages: true,
|
|
43
|
+
supportsComputerUse: true,
|
|
44
|
+
supportsPromptCache: true,
|
|
45
|
+
inputPrice: 3,
|
|
46
|
+
// $3 per million input tokens
|
|
47
|
+
outputPrice: 15,
|
|
48
|
+
// $15 per million output tokens
|
|
49
|
+
cacheWritesPrice: 3.75,
|
|
50
|
+
// $3.75 per million tokens
|
|
51
|
+
cacheReadsPrice: 0.3,
|
|
52
|
+
// $0.30 per million tokens
|
|
53
|
+
supportsReasoningBudget: true,
|
|
54
|
+
requiredReasoningBudget: true
|
|
55
|
+
},
|
|
56
|
+
"claude-3-7-sonnet-20250219": {
|
|
57
|
+
maxTokens: 8192,
|
|
58
|
+
// Since we already have a `:thinking` virtual model we aren't setting `supportsReasoningBudget: true` here.
|
|
59
|
+
contextWindow: 2e5,
|
|
60
|
+
supportsImages: true,
|
|
61
|
+
supportsComputerUse: true,
|
|
62
|
+
supportsPromptCache: true,
|
|
63
|
+
inputPrice: 3,
|
|
64
|
+
// $3 per million input tokens
|
|
65
|
+
outputPrice: 15,
|
|
66
|
+
// $15 per million output tokens
|
|
67
|
+
cacheWritesPrice: 3.75,
|
|
68
|
+
// $3.75 per million tokens
|
|
69
|
+
cacheReadsPrice: 0.3
|
|
70
|
+
// $0.30 per million tokens
|
|
71
|
+
},
|
|
72
|
+
"claude-3-5-sonnet-20241022": {
|
|
73
|
+
maxTokens: 8192,
|
|
74
|
+
contextWindow: 2e5,
|
|
75
|
+
supportsImages: true,
|
|
76
|
+
supportsComputerUse: true,
|
|
77
|
+
supportsPromptCache: true,
|
|
78
|
+
inputPrice: 3,
|
|
79
|
+
// $3 per million input tokens
|
|
80
|
+
outputPrice: 15,
|
|
81
|
+
// $15 per million output tokens
|
|
82
|
+
cacheWritesPrice: 3.75,
|
|
83
|
+
// $3.75 per million tokens
|
|
84
|
+
cacheReadsPrice: 0.3
|
|
85
|
+
// $0.30 per million tokens
|
|
86
|
+
},
|
|
87
|
+
"claude-3-5-haiku-20241022": {
|
|
88
|
+
maxTokens: 8192,
|
|
89
|
+
contextWindow: 2e5,
|
|
90
|
+
supportsImages: false,
|
|
91
|
+
supportsPromptCache: true,
|
|
92
|
+
inputPrice: 1,
|
|
93
|
+
outputPrice: 5,
|
|
94
|
+
cacheWritesPrice: 1.25,
|
|
95
|
+
cacheReadsPrice: 0.1
|
|
96
|
+
},
|
|
97
|
+
"claude-3-opus-20240229": {
|
|
98
|
+
maxTokens: 4096,
|
|
99
|
+
contextWindow: 2e5,
|
|
100
|
+
supportsImages: true,
|
|
101
|
+
supportsPromptCache: true,
|
|
102
|
+
inputPrice: 15,
|
|
103
|
+
outputPrice: 75,
|
|
104
|
+
cacheWritesPrice: 18.75,
|
|
105
|
+
cacheReadsPrice: 1.5
|
|
106
|
+
},
|
|
107
|
+
"claude-3-haiku-20240307": {
|
|
108
|
+
maxTokens: 4096,
|
|
109
|
+
contextWindow: 2e5,
|
|
110
|
+
supportsImages: true,
|
|
111
|
+
supportsPromptCache: true,
|
|
112
|
+
inputPrice: 0.25,
|
|
113
|
+
outputPrice: 1.25,
|
|
114
|
+
cacheWritesPrice: 0.3,
|
|
115
|
+
cacheReadsPrice: 0.03
|
|
116
|
+
}
|
|
117
|
+
};
|
|
118
|
+
var ANTHROPIC_DEFAULT_MAX_TOKENS = 8192;
|
|
119
|
+
|
|
120
|
+
// src/providers/bedrock.ts
|
|
121
|
+
var bedrockDefaultModelId = "anthropic.claude-sonnet-4-20250514-v1:0";
|
|
122
|
+
var bedrockDefaultPromptRouterModelId = "anthropic.claude-3-sonnet-20240229-v1:0";
|
|
123
|
+
var bedrockModels = {
|
|
124
|
+
"amazon.nova-pro-v1:0": {
|
|
125
|
+
maxTokens: 5e3,
|
|
126
|
+
contextWindow: 3e5,
|
|
127
|
+
supportsImages: true,
|
|
128
|
+
supportsComputerUse: false,
|
|
129
|
+
supportsPromptCache: true,
|
|
130
|
+
inputPrice: 0.8,
|
|
131
|
+
outputPrice: 3.2,
|
|
132
|
+
cacheWritesPrice: 0.8,
|
|
133
|
+
// per million tokens
|
|
134
|
+
cacheReadsPrice: 0.2,
|
|
135
|
+
// per million tokens
|
|
136
|
+
minTokensPerCachePoint: 1,
|
|
137
|
+
maxCachePoints: 1,
|
|
138
|
+
cachableFields: ["system"]
|
|
139
|
+
},
|
|
140
|
+
"amazon.nova-pro-latency-optimized-v1:0": {
|
|
141
|
+
maxTokens: 5e3,
|
|
142
|
+
contextWindow: 3e5,
|
|
143
|
+
supportsImages: true,
|
|
144
|
+
supportsComputerUse: false,
|
|
145
|
+
supportsPromptCache: false,
|
|
146
|
+
inputPrice: 1,
|
|
147
|
+
outputPrice: 4,
|
|
148
|
+
cacheWritesPrice: 1,
|
|
149
|
+
// per million tokens
|
|
150
|
+
cacheReadsPrice: 0.25,
|
|
151
|
+
// per million tokens
|
|
152
|
+
description: "Amazon Nova Pro with latency optimized inference"
|
|
153
|
+
},
|
|
154
|
+
"amazon.nova-lite-v1:0": {
|
|
155
|
+
maxTokens: 5e3,
|
|
156
|
+
contextWindow: 3e5,
|
|
157
|
+
supportsImages: true,
|
|
158
|
+
supportsComputerUse: false,
|
|
159
|
+
supportsPromptCache: true,
|
|
160
|
+
inputPrice: 0.06,
|
|
161
|
+
outputPrice: 0.24,
|
|
162
|
+
cacheWritesPrice: 0.06,
|
|
163
|
+
// per million tokens
|
|
164
|
+
cacheReadsPrice: 0.015,
|
|
165
|
+
// per million tokens
|
|
166
|
+
minTokensPerCachePoint: 1,
|
|
167
|
+
maxCachePoints: 1,
|
|
168
|
+
cachableFields: ["system"]
|
|
169
|
+
},
|
|
170
|
+
"amazon.nova-micro-v1:0": {
|
|
171
|
+
maxTokens: 5e3,
|
|
172
|
+
contextWindow: 128e3,
|
|
173
|
+
supportsImages: false,
|
|
174
|
+
supportsComputerUse: false,
|
|
175
|
+
supportsPromptCache: true,
|
|
176
|
+
inputPrice: 0.035,
|
|
177
|
+
outputPrice: 0.14,
|
|
178
|
+
cacheWritesPrice: 0.035,
|
|
179
|
+
// per million tokens
|
|
180
|
+
cacheReadsPrice: 875e-5,
|
|
181
|
+
// per million tokens
|
|
182
|
+
minTokensPerCachePoint: 1,
|
|
183
|
+
maxCachePoints: 1,
|
|
184
|
+
cachableFields: ["system"]
|
|
185
|
+
},
|
|
186
|
+
"anthropic.claude-sonnet-4-20250514-v1:0": {
|
|
187
|
+
maxTokens: 8192,
|
|
188
|
+
contextWindow: 2e5,
|
|
189
|
+
supportsImages: true,
|
|
190
|
+
supportsComputerUse: true,
|
|
191
|
+
supportsPromptCache: true,
|
|
192
|
+
inputPrice: 3,
|
|
193
|
+
outputPrice: 15,
|
|
194
|
+
cacheWritesPrice: 3.75,
|
|
195
|
+
cacheReadsPrice: 0.3,
|
|
196
|
+
minTokensPerCachePoint: 1024,
|
|
197
|
+
maxCachePoints: 4,
|
|
198
|
+
cachableFields: ["system", "messages", "tools"]
|
|
199
|
+
},
|
|
200
|
+
"anthropic.claude-opus-4-20250514-v1:0": {
|
|
201
|
+
maxTokens: 8192,
|
|
202
|
+
contextWindow: 2e5,
|
|
203
|
+
supportsImages: true,
|
|
204
|
+
supportsComputerUse: true,
|
|
205
|
+
supportsPromptCache: true,
|
|
206
|
+
inputPrice: 15,
|
|
207
|
+
outputPrice: 75,
|
|
208
|
+
cacheWritesPrice: 18.75,
|
|
209
|
+
cacheReadsPrice: 1.5,
|
|
210
|
+
minTokensPerCachePoint: 1024,
|
|
211
|
+
maxCachePoints: 4,
|
|
212
|
+
cachableFields: ["system", "messages", "tools"]
|
|
213
|
+
},
|
|
214
|
+
"anthropic.claude-3-7-sonnet-20250219-v1:0": {
|
|
215
|
+
maxTokens: 8192,
|
|
216
|
+
contextWindow: 2e5,
|
|
217
|
+
supportsImages: true,
|
|
218
|
+
supportsComputerUse: true,
|
|
219
|
+
supportsPromptCache: true,
|
|
220
|
+
inputPrice: 3,
|
|
221
|
+
outputPrice: 15,
|
|
222
|
+
cacheWritesPrice: 3.75,
|
|
223
|
+
cacheReadsPrice: 0.3,
|
|
224
|
+
minTokensPerCachePoint: 1024,
|
|
225
|
+
maxCachePoints: 4,
|
|
226
|
+
cachableFields: ["system", "messages", "tools"]
|
|
227
|
+
},
|
|
228
|
+
"anthropic.claude-3-5-sonnet-20241022-v2:0": {
|
|
229
|
+
maxTokens: 8192,
|
|
230
|
+
contextWindow: 2e5,
|
|
231
|
+
supportsImages: true,
|
|
232
|
+
supportsComputerUse: true,
|
|
233
|
+
supportsPromptCache: true,
|
|
234
|
+
inputPrice: 3,
|
|
235
|
+
outputPrice: 15,
|
|
236
|
+
cacheWritesPrice: 3.75,
|
|
237
|
+
cacheReadsPrice: 0.3,
|
|
238
|
+
minTokensPerCachePoint: 1024,
|
|
239
|
+
maxCachePoints: 4,
|
|
240
|
+
cachableFields: ["system", "messages", "tools"]
|
|
241
|
+
},
|
|
242
|
+
"anthropic.claude-3-5-haiku-20241022-v1:0": {
|
|
243
|
+
maxTokens: 8192,
|
|
244
|
+
contextWindow: 2e5,
|
|
245
|
+
supportsImages: false,
|
|
246
|
+
supportsPromptCache: true,
|
|
247
|
+
inputPrice: 0.8,
|
|
248
|
+
outputPrice: 4,
|
|
249
|
+
cacheWritesPrice: 1,
|
|
250
|
+
cacheReadsPrice: 0.08,
|
|
251
|
+
minTokensPerCachePoint: 2048,
|
|
252
|
+
maxCachePoints: 4,
|
|
253
|
+
cachableFields: ["system", "messages", "tools"]
|
|
254
|
+
},
|
|
255
|
+
"anthropic.claude-3-5-sonnet-20240620-v1:0": {
|
|
256
|
+
maxTokens: 8192,
|
|
257
|
+
contextWindow: 2e5,
|
|
258
|
+
supportsImages: true,
|
|
259
|
+
supportsPromptCache: false,
|
|
260
|
+
inputPrice: 3,
|
|
261
|
+
outputPrice: 15
|
|
262
|
+
},
|
|
263
|
+
"anthropic.claude-3-opus-20240229-v1:0": {
|
|
264
|
+
maxTokens: 4096,
|
|
265
|
+
contextWindow: 2e5,
|
|
266
|
+
supportsImages: true,
|
|
267
|
+
supportsPromptCache: false,
|
|
268
|
+
inputPrice: 15,
|
|
269
|
+
outputPrice: 75
|
|
270
|
+
},
|
|
271
|
+
"anthropic.claude-3-sonnet-20240229-v1:0": {
|
|
272
|
+
maxTokens: 4096,
|
|
273
|
+
contextWindow: 2e5,
|
|
274
|
+
supportsImages: true,
|
|
275
|
+
supportsPromptCache: false,
|
|
276
|
+
inputPrice: 3,
|
|
277
|
+
outputPrice: 15
|
|
278
|
+
},
|
|
279
|
+
"anthropic.claude-3-haiku-20240307-v1:0": {
|
|
280
|
+
maxTokens: 4096,
|
|
281
|
+
contextWindow: 2e5,
|
|
282
|
+
supportsImages: true,
|
|
283
|
+
supportsPromptCache: false,
|
|
284
|
+
inputPrice: 0.25,
|
|
285
|
+
outputPrice: 1.25
|
|
286
|
+
},
|
|
287
|
+
"anthropic.claude-2-1-v1:0": {
|
|
288
|
+
maxTokens: 4096,
|
|
289
|
+
contextWindow: 1e5,
|
|
290
|
+
supportsImages: false,
|
|
291
|
+
supportsPromptCache: false,
|
|
292
|
+
inputPrice: 8,
|
|
293
|
+
outputPrice: 24,
|
|
294
|
+
description: "Claude 2.1"
|
|
295
|
+
},
|
|
296
|
+
"anthropic.claude-2-0-v1:0": {
|
|
297
|
+
maxTokens: 4096,
|
|
298
|
+
contextWindow: 1e5,
|
|
299
|
+
supportsImages: false,
|
|
300
|
+
supportsPromptCache: false,
|
|
301
|
+
inputPrice: 8,
|
|
302
|
+
outputPrice: 24,
|
|
303
|
+
description: "Claude 2.0"
|
|
304
|
+
},
|
|
305
|
+
"anthropic.claude-instant-v1:0": {
|
|
306
|
+
maxTokens: 4096,
|
|
307
|
+
contextWindow: 1e5,
|
|
308
|
+
supportsImages: false,
|
|
309
|
+
supportsPromptCache: false,
|
|
310
|
+
inputPrice: 0.8,
|
|
311
|
+
outputPrice: 2.4,
|
|
312
|
+
description: "Claude Instant"
|
|
313
|
+
},
|
|
314
|
+
"deepseek.r1-v1:0": {
|
|
315
|
+
maxTokens: 32768,
|
|
316
|
+
contextWindow: 128e3,
|
|
317
|
+
supportsImages: false,
|
|
318
|
+
supportsPromptCache: false,
|
|
319
|
+
inputPrice: 1.35,
|
|
320
|
+
outputPrice: 5.4
|
|
321
|
+
},
|
|
322
|
+
"meta.llama3-3-70b-instruct-v1:0": {
|
|
323
|
+
maxTokens: 8192,
|
|
324
|
+
contextWindow: 128e3,
|
|
325
|
+
supportsImages: false,
|
|
326
|
+
supportsComputerUse: false,
|
|
327
|
+
supportsPromptCache: false,
|
|
328
|
+
inputPrice: 0.72,
|
|
329
|
+
outputPrice: 0.72,
|
|
330
|
+
description: "Llama 3.3 Instruct (70B)"
|
|
331
|
+
},
|
|
332
|
+
"meta.llama3-2-90b-instruct-v1:0": {
|
|
333
|
+
maxTokens: 8192,
|
|
334
|
+
contextWindow: 128e3,
|
|
335
|
+
supportsImages: true,
|
|
336
|
+
supportsComputerUse: false,
|
|
337
|
+
supportsPromptCache: false,
|
|
338
|
+
inputPrice: 0.72,
|
|
339
|
+
outputPrice: 0.72,
|
|
340
|
+
description: "Llama 3.2 Instruct (90B)"
|
|
341
|
+
},
|
|
342
|
+
"meta.llama3-2-11b-instruct-v1:0": {
|
|
343
|
+
maxTokens: 8192,
|
|
344
|
+
contextWindow: 128e3,
|
|
345
|
+
supportsImages: true,
|
|
346
|
+
supportsComputerUse: false,
|
|
347
|
+
supportsPromptCache: false,
|
|
348
|
+
inputPrice: 0.16,
|
|
349
|
+
outputPrice: 0.16,
|
|
350
|
+
description: "Llama 3.2 Instruct (11B)"
|
|
351
|
+
},
|
|
352
|
+
"meta.llama3-2-3b-instruct-v1:0": {
|
|
353
|
+
maxTokens: 8192,
|
|
354
|
+
contextWindow: 128e3,
|
|
355
|
+
supportsImages: false,
|
|
356
|
+
supportsComputerUse: false,
|
|
357
|
+
supportsPromptCache: false,
|
|
358
|
+
inputPrice: 0.15,
|
|
359
|
+
outputPrice: 0.15,
|
|
360
|
+
description: "Llama 3.2 Instruct (3B)"
|
|
361
|
+
},
|
|
362
|
+
"meta.llama3-2-1b-instruct-v1:0": {
|
|
363
|
+
maxTokens: 8192,
|
|
364
|
+
contextWindow: 128e3,
|
|
365
|
+
supportsImages: false,
|
|
366
|
+
supportsComputerUse: false,
|
|
367
|
+
supportsPromptCache: false,
|
|
368
|
+
inputPrice: 0.1,
|
|
369
|
+
outputPrice: 0.1,
|
|
370
|
+
description: "Llama 3.2 Instruct (1B)"
|
|
371
|
+
},
|
|
372
|
+
"meta.llama3-1-405b-instruct-v1:0": {
|
|
373
|
+
maxTokens: 8192,
|
|
374
|
+
contextWindow: 128e3,
|
|
375
|
+
supportsImages: false,
|
|
376
|
+
supportsComputerUse: false,
|
|
377
|
+
supportsPromptCache: false,
|
|
378
|
+
inputPrice: 2.4,
|
|
379
|
+
outputPrice: 2.4,
|
|
380
|
+
description: "Llama 3.1 Instruct (405B)"
|
|
381
|
+
},
|
|
382
|
+
"meta.llama3-1-70b-instruct-v1:0": {
|
|
383
|
+
maxTokens: 8192,
|
|
384
|
+
contextWindow: 128e3,
|
|
385
|
+
supportsImages: false,
|
|
386
|
+
supportsComputerUse: false,
|
|
387
|
+
supportsPromptCache: false,
|
|
388
|
+
inputPrice: 0.72,
|
|
389
|
+
outputPrice: 0.72,
|
|
390
|
+
description: "Llama 3.1 Instruct (70B)"
|
|
391
|
+
},
|
|
392
|
+
"meta.llama3-1-70b-instruct-latency-optimized-v1:0": {
|
|
393
|
+
maxTokens: 8192,
|
|
394
|
+
contextWindow: 128e3,
|
|
395
|
+
supportsImages: false,
|
|
396
|
+
supportsComputerUse: false,
|
|
397
|
+
supportsPromptCache: false,
|
|
398
|
+
inputPrice: 0.9,
|
|
399
|
+
outputPrice: 0.9,
|
|
400
|
+
description: "Llama 3.1 Instruct (70B) (w/ latency optimized inference)"
|
|
401
|
+
},
|
|
402
|
+
"meta.llama3-1-8b-instruct-v1:0": {
|
|
403
|
+
maxTokens: 8192,
|
|
404
|
+
contextWindow: 8e3,
|
|
405
|
+
supportsImages: false,
|
|
406
|
+
supportsComputerUse: false,
|
|
407
|
+
supportsPromptCache: false,
|
|
408
|
+
inputPrice: 0.22,
|
|
409
|
+
outputPrice: 0.22,
|
|
410
|
+
description: "Llama 3.1 Instruct (8B)"
|
|
411
|
+
},
|
|
412
|
+
"meta.llama3-70b-instruct-v1:0": {
|
|
413
|
+
maxTokens: 2048,
|
|
414
|
+
contextWindow: 8e3,
|
|
415
|
+
supportsImages: false,
|
|
416
|
+
supportsComputerUse: false,
|
|
417
|
+
supportsPromptCache: false,
|
|
418
|
+
inputPrice: 2.65,
|
|
419
|
+
outputPrice: 3.5
|
|
420
|
+
},
|
|
421
|
+
"meta.llama3-8b-instruct-v1:0": {
|
|
422
|
+
maxTokens: 2048,
|
|
423
|
+
contextWindow: 4e3,
|
|
424
|
+
supportsImages: false,
|
|
425
|
+
supportsComputerUse: false,
|
|
426
|
+
supportsPromptCache: false,
|
|
427
|
+
inputPrice: 0.3,
|
|
428
|
+
outputPrice: 0.6
|
|
429
|
+
},
|
|
430
|
+
"amazon.titan-text-lite-v1:0": {
|
|
431
|
+
maxTokens: 4096,
|
|
432
|
+
contextWindow: 8e3,
|
|
433
|
+
supportsImages: false,
|
|
434
|
+
supportsComputerUse: false,
|
|
435
|
+
supportsPromptCache: false,
|
|
436
|
+
inputPrice: 0.15,
|
|
437
|
+
outputPrice: 0.2,
|
|
438
|
+
description: "Amazon Titan Text Lite"
|
|
439
|
+
},
|
|
440
|
+
"amazon.titan-text-express-v1:0": {
|
|
441
|
+
maxTokens: 4096,
|
|
442
|
+
contextWindow: 8e3,
|
|
443
|
+
supportsImages: false,
|
|
444
|
+
supportsComputerUse: false,
|
|
445
|
+
supportsPromptCache: false,
|
|
446
|
+
inputPrice: 0.2,
|
|
447
|
+
outputPrice: 0.6,
|
|
448
|
+
description: "Amazon Titan Text Express"
|
|
449
|
+
},
|
|
450
|
+
"amazon.titan-text-embeddings-v1:0": {
|
|
451
|
+
maxTokens: 8192,
|
|
452
|
+
contextWindow: 8e3,
|
|
453
|
+
supportsImages: false,
|
|
454
|
+
supportsComputerUse: false,
|
|
455
|
+
supportsPromptCache: false,
|
|
456
|
+
inputPrice: 0.1,
|
|
457
|
+
description: "Amazon Titan Text Embeddings"
|
|
458
|
+
},
|
|
459
|
+
"amazon.titan-text-embeddings-v2:0": {
|
|
460
|
+
maxTokens: 8192,
|
|
461
|
+
contextWindow: 8e3,
|
|
462
|
+
supportsImages: false,
|
|
463
|
+
supportsComputerUse: false,
|
|
464
|
+
supportsPromptCache: false,
|
|
465
|
+
inputPrice: 0.02,
|
|
466
|
+
description: "Amazon Titan Text Embeddings V2"
|
|
467
|
+
}
|
|
468
|
+
};
|
|
469
|
+
var BEDROCK_DEFAULT_TEMPERATURE = 0.3;
|
|
470
|
+
var BEDROCK_MAX_TOKENS = 4096;
|
|
471
|
+
var BEDROCK_REGION_INFO = {
|
|
472
|
+
/*
|
|
473
|
+
* This JSON generated by AWS's AI assistant - Amazon Q on March 29, 2025
|
|
474
|
+
*
|
|
475
|
+
* - Africa (Cape Town) region does not appear to support Amazon Bedrock at this time.
|
|
476
|
+
* - Some Asia Pacific regions, such as Asia Pacific (Hong Kong) and Asia Pacific (Jakarta), are not listed among the supported regions for Bedrock services.
|
|
477
|
+
* - Middle East regions, including Middle East (Bahrain) and Middle East (UAE), are not mentioned in the list of supported regions for Bedrock. [3]
|
|
478
|
+
* - China regions (Beijing and Ningxia) are not listed as supported for Amazon Bedrock.
|
|
479
|
+
* - Some newer or specialized AWS regions may not have Bedrock support yet.
|
|
480
|
+
*/
|
|
481
|
+
"us.": { regionId: "us-east-1", description: "US East (N. Virginia)", pattern: "us-", multiRegion: true },
|
|
482
|
+
"use.": { regionId: "us-east-1", description: "US East (N. Virginia)" },
|
|
483
|
+
"use1.": { regionId: "us-east-1", description: "US East (N. Virginia)" },
|
|
484
|
+
"use2.": { regionId: "us-east-2", description: "US East (Ohio)" },
|
|
485
|
+
"usw.": { regionId: "us-west-2", description: "US West (Oregon)" },
|
|
486
|
+
"usw2.": { regionId: "us-west-2", description: "US West (Oregon)" },
|
|
487
|
+
"ug.": {
|
|
488
|
+
regionId: "us-gov-west-1",
|
|
489
|
+
description: "AWS GovCloud (US-West)",
|
|
490
|
+
pattern: "us-gov-",
|
|
491
|
+
multiRegion: true
|
|
492
|
+
},
|
|
493
|
+
"uge1.": { regionId: "us-gov-east-1", description: "AWS GovCloud (US-East)" },
|
|
494
|
+
"ugw1.": { regionId: "us-gov-west-1", description: "AWS GovCloud (US-West)" },
|
|
495
|
+
"eu.": { regionId: "eu-west-1", description: "Europe (Ireland)", pattern: "eu-", multiRegion: true },
|
|
496
|
+
"euw1.": { regionId: "eu-west-1", description: "Europe (Ireland)" },
|
|
497
|
+
"euw2.": { regionId: "eu-west-2", description: "Europe (London)" },
|
|
498
|
+
"euw3.": { regionId: "eu-west-3", description: "Europe (Paris)" },
|
|
499
|
+
"euc1.": { regionId: "eu-central-1", description: "Europe (Frankfurt)" },
|
|
500
|
+
"euc2.": { regionId: "eu-central-2", description: "Europe (Zurich)" },
|
|
501
|
+
"eun1.": { regionId: "eu-north-1", description: "Europe (Stockholm)" },
|
|
502
|
+
"eus1.": { regionId: "eu-south-1", description: "Europe (Milan)" },
|
|
503
|
+
"eus2.": { regionId: "eu-south-2", description: "Europe (Spain)" },
|
|
504
|
+
"ap.": {
|
|
505
|
+
regionId: "ap-southeast-1",
|
|
506
|
+
description: "Asia Pacific (Singapore)",
|
|
507
|
+
pattern: "ap-",
|
|
508
|
+
multiRegion: true
|
|
509
|
+
},
|
|
510
|
+
"ape1.": { regionId: "ap-east-1", description: "Asia Pacific (Hong Kong)" },
|
|
511
|
+
"apne1.": { regionId: "ap-northeast-1", description: "Asia Pacific (Tokyo)" },
|
|
512
|
+
"apne2.": { regionId: "ap-northeast-2", description: "Asia Pacific (Seoul)" },
|
|
513
|
+
"apne3.": { regionId: "ap-northeast-3", description: "Asia Pacific (Osaka)" },
|
|
514
|
+
"aps1.": { regionId: "ap-south-1", description: "Asia Pacific (Mumbai)" },
|
|
515
|
+
"aps2.": { regionId: "ap-south-2", description: "Asia Pacific (Hyderabad)" },
|
|
516
|
+
"apse1.": { regionId: "ap-southeast-1", description: "Asia Pacific (Singapore)" },
|
|
517
|
+
"apse2.": { regionId: "ap-southeast-2", description: "Asia Pacific (Sydney)" },
|
|
518
|
+
"ca.": { regionId: "ca-central-1", description: "Canada (Central)", pattern: "ca-", multiRegion: true },
|
|
519
|
+
"cac1.": { regionId: "ca-central-1", description: "Canada (Central)" },
|
|
520
|
+
"sa.": { regionId: "sa-east-1", description: "South America (S\xE3o Paulo)", pattern: "sa-", multiRegion: true },
|
|
521
|
+
"sae1.": { regionId: "sa-east-1", description: "South America (S\xE3o Paulo)" },
|
|
522
|
+
// These are not official - they weren't generated by Amazon Q nor were
|
|
523
|
+
// found in the AWS documentation but another Roo contributor found apac.
|
|
524
|
+
// Was needed so I've added the pattern of the other geo zones.
|
|
525
|
+
"apac.": { regionId: "ap-southeast-1", description: "Default APAC region", pattern: "ap-", multiRegion: true },
|
|
526
|
+
"emea.": { regionId: "eu-west-1", description: "Default EMEA region", pattern: "eu-", multiRegion: true },
|
|
527
|
+
"amer.": { regionId: "us-east-1", description: "Default Americas region", pattern: "us-", multiRegion: true }
|
|
528
|
+
};
|
|
529
|
+
var BEDROCK_REGIONS = Object.values(BEDROCK_REGION_INFO).map((info) => ({ value: info.regionId, label: info.regionId })).filter((region, index, self) => index === self.findIndex((r) => r.value === region.value)).sort((a, b) => a.value.localeCompare(b.value));
|
|
530
|
+
|
|
531
|
+
// src/providers/chutes.ts
|
|
532
|
+
var chutesDefaultModelId = "deepseek-ai/DeepSeek-R1-0528";
|
|
533
|
+
var chutesModels = {
|
|
534
|
+
"deepseek-ai/DeepSeek-R1-0528": {
|
|
535
|
+
maxTokens: 32768,
|
|
536
|
+
contextWindow: 163840,
|
|
537
|
+
supportsImages: false,
|
|
538
|
+
supportsPromptCache: false,
|
|
539
|
+
inputPrice: 0,
|
|
540
|
+
outputPrice: 0,
|
|
541
|
+
description: "DeepSeek R1 0528 model."
|
|
542
|
+
},
|
|
543
|
+
"deepseek-ai/DeepSeek-R1": {
|
|
544
|
+
maxTokens: 32768,
|
|
545
|
+
contextWindow: 163840,
|
|
546
|
+
supportsImages: false,
|
|
547
|
+
supportsPromptCache: false,
|
|
548
|
+
inputPrice: 0,
|
|
549
|
+
outputPrice: 0,
|
|
550
|
+
description: "DeepSeek R1 model."
|
|
551
|
+
},
|
|
552
|
+
"deepseek-ai/DeepSeek-V3": {
|
|
553
|
+
maxTokens: 32768,
|
|
554
|
+
contextWindow: 163840,
|
|
555
|
+
supportsImages: false,
|
|
556
|
+
supportsPromptCache: false,
|
|
557
|
+
inputPrice: 0,
|
|
558
|
+
outputPrice: 0,
|
|
559
|
+
description: "DeepSeek V3 model."
|
|
560
|
+
},
|
|
561
|
+
"unsloth/Llama-3.3-70B-Instruct": {
|
|
562
|
+
maxTokens: 32768,
|
|
563
|
+
// From Groq
|
|
564
|
+
contextWindow: 131072,
|
|
565
|
+
// From Groq
|
|
566
|
+
supportsImages: false,
|
|
567
|
+
supportsPromptCache: false,
|
|
568
|
+
inputPrice: 0,
|
|
569
|
+
outputPrice: 0,
|
|
570
|
+
description: "Unsloth Llama 3.3 70B Instruct model."
|
|
571
|
+
},
|
|
572
|
+
"chutesai/Llama-4-Scout-17B-16E-Instruct": {
|
|
573
|
+
maxTokens: 32768,
|
|
574
|
+
contextWindow: 512e3,
|
|
575
|
+
supportsImages: false,
|
|
576
|
+
supportsPromptCache: false,
|
|
577
|
+
inputPrice: 0,
|
|
578
|
+
outputPrice: 0,
|
|
579
|
+
description: "ChutesAI Llama 4 Scout 17B Instruct model, 512K context."
|
|
580
|
+
},
|
|
581
|
+
"unsloth/Mistral-Nemo-Instruct-2407": {
|
|
582
|
+
maxTokens: 32768,
|
|
583
|
+
contextWindow: 128e3,
|
|
584
|
+
supportsImages: false,
|
|
585
|
+
supportsPromptCache: false,
|
|
586
|
+
inputPrice: 0,
|
|
587
|
+
outputPrice: 0,
|
|
588
|
+
description: "Unsloth Mistral Nemo Instruct model."
|
|
589
|
+
},
|
|
590
|
+
"unsloth/gemma-3-12b-it": {
|
|
591
|
+
maxTokens: 32768,
|
|
592
|
+
contextWindow: 131072,
|
|
593
|
+
supportsImages: false,
|
|
594
|
+
supportsPromptCache: false,
|
|
595
|
+
inputPrice: 0,
|
|
596
|
+
outputPrice: 0,
|
|
597
|
+
description: "Unsloth Gemma 3 12B IT model."
|
|
598
|
+
},
|
|
599
|
+
"NousResearch/DeepHermes-3-Llama-3-8B-Preview": {
|
|
600
|
+
maxTokens: 32768,
|
|
601
|
+
contextWindow: 131072,
|
|
602
|
+
supportsImages: false,
|
|
603
|
+
supportsPromptCache: false,
|
|
604
|
+
inputPrice: 0,
|
|
605
|
+
outputPrice: 0,
|
|
606
|
+
description: "Nous DeepHermes 3 Llama 3 8B Preview model."
|
|
607
|
+
},
|
|
608
|
+
"unsloth/gemma-3-4b-it": {
|
|
609
|
+
maxTokens: 32768,
|
|
610
|
+
contextWindow: 131072,
|
|
611
|
+
supportsImages: false,
|
|
612
|
+
supportsPromptCache: false,
|
|
613
|
+
inputPrice: 0,
|
|
614
|
+
outputPrice: 0,
|
|
615
|
+
description: "Unsloth Gemma 3 4B IT model."
|
|
616
|
+
},
|
|
617
|
+
"nvidia/Llama-3_3-Nemotron-Super-49B-v1": {
|
|
618
|
+
maxTokens: 32768,
|
|
619
|
+
contextWindow: 131072,
|
|
620
|
+
supportsImages: false,
|
|
621
|
+
supportsPromptCache: false,
|
|
622
|
+
inputPrice: 0,
|
|
623
|
+
outputPrice: 0,
|
|
624
|
+
description: "Nvidia Llama 3.3 Nemotron Super 49B model."
|
|
625
|
+
},
|
|
626
|
+
"nvidia/Llama-3_1-Nemotron-Ultra-253B-v1": {
|
|
627
|
+
maxTokens: 32768,
|
|
628
|
+
contextWindow: 131072,
|
|
629
|
+
supportsImages: false,
|
|
630
|
+
supportsPromptCache: false,
|
|
631
|
+
inputPrice: 0,
|
|
632
|
+
outputPrice: 0,
|
|
633
|
+
description: "Nvidia Llama 3.1 Nemotron Ultra 253B model."
|
|
634
|
+
},
|
|
635
|
+
"chutesai/Llama-4-Maverick-17B-128E-Instruct-FP8": {
|
|
636
|
+
maxTokens: 32768,
|
|
637
|
+
contextWindow: 256e3,
|
|
638
|
+
supportsImages: false,
|
|
639
|
+
supportsPromptCache: false,
|
|
640
|
+
inputPrice: 0,
|
|
641
|
+
outputPrice: 0,
|
|
642
|
+
description: "ChutesAI Llama 4 Maverick 17B Instruct FP8 model."
|
|
643
|
+
},
|
|
644
|
+
"deepseek-ai/DeepSeek-V3-Base": {
|
|
645
|
+
maxTokens: 32768,
|
|
646
|
+
contextWindow: 163840,
|
|
647
|
+
supportsImages: false,
|
|
648
|
+
supportsPromptCache: false,
|
|
649
|
+
inputPrice: 0,
|
|
650
|
+
outputPrice: 0,
|
|
651
|
+
description: "DeepSeek V3 Base model."
|
|
652
|
+
},
|
|
653
|
+
"deepseek-ai/DeepSeek-R1-Zero": {
|
|
654
|
+
maxTokens: 32768,
|
|
655
|
+
contextWindow: 163840,
|
|
656
|
+
supportsImages: false,
|
|
657
|
+
supportsPromptCache: false,
|
|
658
|
+
inputPrice: 0,
|
|
659
|
+
outputPrice: 0,
|
|
660
|
+
description: "DeepSeek R1 Zero model."
|
|
661
|
+
},
|
|
662
|
+
"deepseek-ai/DeepSeek-V3-0324": {
|
|
663
|
+
maxTokens: 32768,
|
|
664
|
+
contextWindow: 163840,
|
|
665
|
+
supportsImages: false,
|
|
666
|
+
supportsPromptCache: false,
|
|
667
|
+
inputPrice: 0,
|
|
668
|
+
outputPrice: 0,
|
|
669
|
+
description: "DeepSeek V3 (0324) model."
|
|
670
|
+
},
|
|
671
|
+
"Qwen/Qwen3-235B-A22B": {
|
|
672
|
+
maxTokens: 32768,
|
|
673
|
+
contextWindow: 40960,
|
|
674
|
+
supportsImages: false,
|
|
675
|
+
supportsPromptCache: false,
|
|
676
|
+
inputPrice: 0,
|
|
677
|
+
outputPrice: 0,
|
|
678
|
+
description: "Qwen3 235B A22B model."
|
|
679
|
+
},
|
|
680
|
+
"Qwen/Qwen3-32B": {
|
|
681
|
+
maxTokens: 32768,
|
|
682
|
+
contextWindow: 40960,
|
|
683
|
+
supportsImages: false,
|
|
684
|
+
supportsPromptCache: false,
|
|
685
|
+
inputPrice: 0,
|
|
686
|
+
outputPrice: 0,
|
|
687
|
+
description: "Qwen3 32B model."
|
|
688
|
+
},
|
|
689
|
+
"Qwen/Qwen3-30B-A3B": {
|
|
690
|
+
maxTokens: 32768,
|
|
691
|
+
contextWindow: 40960,
|
|
692
|
+
supportsImages: false,
|
|
693
|
+
supportsPromptCache: false,
|
|
694
|
+
inputPrice: 0,
|
|
695
|
+
outputPrice: 0,
|
|
696
|
+
description: "Qwen3 30B A3B model."
|
|
697
|
+
},
|
|
698
|
+
"Qwen/Qwen3-14B": {
|
|
699
|
+
maxTokens: 32768,
|
|
700
|
+
contextWindow: 40960,
|
|
701
|
+
supportsImages: false,
|
|
702
|
+
supportsPromptCache: false,
|
|
703
|
+
inputPrice: 0,
|
|
704
|
+
outputPrice: 0,
|
|
705
|
+
description: "Qwen3 14B model."
|
|
706
|
+
},
|
|
707
|
+
"Qwen/Qwen3-8B": {
|
|
708
|
+
maxTokens: 32768,
|
|
709
|
+
contextWindow: 40960,
|
|
710
|
+
supportsImages: false,
|
|
711
|
+
supportsPromptCache: false,
|
|
712
|
+
inputPrice: 0,
|
|
713
|
+
outputPrice: 0,
|
|
714
|
+
description: "Qwen3 8B model."
|
|
715
|
+
},
|
|
716
|
+
"microsoft/MAI-DS-R1-FP8": {
|
|
717
|
+
maxTokens: 32768,
|
|
718
|
+
contextWindow: 163840,
|
|
719
|
+
supportsImages: false,
|
|
720
|
+
supportsPromptCache: false,
|
|
721
|
+
inputPrice: 0,
|
|
722
|
+
outputPrice: 0,
|
|
723
|
+
description: "Microsoft MAI-DS-R1 FP8 model."
|
|
724
|
+
},
|
|
725
|
+
"tngtech/DeepSeek-R1T-Chimera": {
|
|
726
|
+
maxTokens: 32768,
|
|
727
|
+
contextWindow: 163840,
|
|
728
|
+
supportsImages: false,
|
|
729
|
+
supportsPromptCache: false,
|
|
730
|
+
inputPrice: 0,
|
|
731
|
+
outputPrice: 0,
|
|
732
|
+
description: "TNGTech DeepSeek R1T Chimera model."
|
|
733
|
+
}
|
|
734
|
+
};
|
|
735
|
+
|
|
736
|
+
// src/providers/deepseek.ts
|
|
737
|
+
var deepSeekDefaultModelId = "deepseek-chat";
|
|
738
|
+
var deepSeekModels = {
|
|
739
|
+
"deepseek-chat": {
|
|
740
|
+
maxTokens: 8192,
|
|
741
|
+
contextWindow: 64e3,
|
|
742
|
+
supportsImages: false,
|
|
743
|
+
supportsPromptCache: true,
|
|
744
|
+
inputPrice: 0.27,
|
|
745
|
+
// $0.27 per million tokens (cache miss)
|
|
746
|
+
outputPrice: 1.1,
|
|
747
|
+
// $1.10 per million tokens
|
|
748
|
+
cacheWritesPrice: 0.27,
|
|
749
|
+
// $0.27 per million tokens (cache miss)
|
|
750
|
+
cacheReadsPrice: 0.07,
|
|
751
|
+
// $0.07 per million tokens (cache hit).
|
|
752
|
+
description: `DeepSeek-V3 achieves a significant breakthrough in inference speed over previous models. It tops the leaderboard among open-source models and rivals the most advanced closed-source models globally.`
|
|
753
|
+
},
|
|
754
|
+
"deepseek-reasoner": {
|
|
755
|
+
maxTokens: 8192,
|
|
756
|
+
contextWindow: 64e3,
|
|
757
|
+
supportsImages: false,
|
|
758
|
+
supportsPromptCache: true,
|
|
759
|
+
inputPrice: 0.55,
|
|
760
|
+
// $0.55 per million tokens (cache miss)
|
|
761
|
+
outputPrice: 2.19,
|
|
762
|
+
// $2.19 per million tokens
|
|
763
|
+
cacheWritesPrice: 0.55,
|
|
764
|
+
// $0.55 per million tokens (cache miss)
|
|
765
|
+
cacheReadsPrice: 0.14,
|
|
766
|
+
// $0.14 per million tokens (cache hit)
|
|
767
|
+
description: `DeepSeek-R1 achieves performance comparable to OpenAI-o1 across math, code, and reasoning tasks. Supports Chain of Thought reasoning with up to 32K tokens.`
|
|
768
|
+
}
|
|
769
|
+
};
|
|
770
|
+
var DEEP_SEEK_DEFAULT_TEMPERATURE = 0.6;
|
|
771
|
+
|
|
772
|
+
// src/providers/gemini.ts
|
|
773
|
+
var geminiDefaultModelId = "gemini-2.0-flash-001";
|
|
774
|
+
var geminiModels = {
|
|
775
|
+
"gemini-2.5-flash-preview-04-17:thinking": {
|
|
776
|
+
maxTokens: 65535,
|
|
777
|
+
contextWindow: 1048576,
|
|
778
|
+
supportsImages: true,
|
|
779
|
+
supportsPromptCache: false,
|
|
780
|
+
inputPrice: 0.15,
|
|
781
|
+
outputPrice: 3.5,
|
|
782
|
+
maxThinkingTokens: 24576,
|
|
783
|
+
supportsReasoningBudget: true,
|
|
784
|
+
requiredReasoningBudget: true
|
|
785
|
+
},
|
|
786
|
+
"gemini-2.5-flash-preview-04-17": {
|
|
787
|
+
maxTokens: 65535,
|
|
788
|
+
contextWindow: 1048576,
|
|
789
|
+
supportsImages: true,
|
|
790
|
+
supportsPromptCache: false,
|
|
791
|
+
inputPrice: 0.15,
|
|
792
|
+
outputPrice: 0.6
|
|
793
|
+
},
|
|
794
|
+
"gemini-2.5-flash-preview-05-20:thinking": {
|
|
795
|
+
maxTokens: 65535,
|
|
796
|
+
contextWindow: 1048576,
|
|
797
|
+
supportsImages: true,
|
|
798
|
+
supportsPromptCache: true,
|
|
799
|
+
inputPrice: 0.15,
|
|
800
|
+
outputPrice: 3.5,
|
|
801
|
+
cacheReadsPrice: 0.0375,
|
|
802
|
+
cacheWritesPrice: 1,
|
|
803
|
+
maxThinkingTokens: 24576,
|
|
804
|
+
supportsReasoningBudget: true,
|
|
805
|
+
requiredReasoningBudget: true
|
|
806
|
+
},
|
|
807
|
+
"gemini-2.5-flash-preview-05-20": {
|
|
808
|
+
maxTokens: 65535,
|
|
809
|
+
contextWindow: 1048576,
|
|
810
|
+
supportsImages: true,
|
|
811
|
+
supportsPromptCache: true,
|
|
812
|
+
inputPrice: 0.15,
|
|
813
|
+
outputPrice: 0.6,
|
|
814
|
+
cacheReadsPrice: 0.0375,
|
|
815
|
+
cacheWritesPrice: 1
|
|
816
|
+
},
|
|
817
|
+
"gemini-2.5-pro-exp-03-25": {
|
|
818
|
+
maxTokens: 65535,
|
|
819
|
+
contextWindow: 1048576,
|
|
820
|
+
supportsImages: true,
|
|
821
|
+
supportsPromptCache: false,
|
|
822
|
+
inputPrice: 0,
|
|
823
|
+
outputPrice: 0
|
|
824
|
+
},
|
|
825
|
+
"gemini-2.5-pro-preview-03-25": {
|
|
826
|
+
maxTokens: 65535,
|
|
827
|
+
contextWindow: 1048576,
|
|
828
|
+
supportsImages: true,
|
|
829
|
+
supportsPromptCache: true,
|
|
830
|
+
inputPrice: 2.5,
|
|
831
|
+
// This is the pricing for prompts above 200k tokens.
|
|
832
|
+
outputPrice: 15,
|
|
833
|
+
cacheReadsPrice: 0.625,
|
|
834
|
+
cacheWritesPrice: 4.5,
|
|
835
|
+
tiers: [
|
|
836
|
+
{
|
|
837
|
+
contextWindow: 2e5,
|
|
838
|
+
inputPrice: 1.25,
|
|
839
|
+
outputPrice: 10,
|
|
840
|
+
cacheReadsPrice: 0.31
|
|
841
|
+
},
|
|
842
|
+
{
|
|
843
|
+
contextWindow: Infinity,
|
|
844
|
+
inputPrice: 2.5,
|
|
845
|
+
outputPrice: 15,
|
|
846
|
+
cacheReadsPrice: 0.625
|
|
847
|
+
}
|
|
848
|
+
]
|
|
849
|
+
},
|
|
850
|
+
"gemini-2.5-pro-preview-05-06": {
|
|
851
|
+
maxTokens: 65535,
|
|
852
|
+
contextWindow: 1048576,
|
|
853
|
+
supportsImages: true,
|
|
854
|
+
supportsPromptCache: true,
|
|
855
|
+
inputPrice: 2.5,
|
|
856
|
+
// This is the pricing for prompts above 200k tokens.
|
|
857
|
+
outputPrice: 15,
|
|
858
|
+
cacheReadsPrice: 0.625,
|
|
859
|
+
cacheWritesPrice: 4.5,
|
|
860
|
+
tiers: [
|
|
861
|
+
{
|
|
862
|
+
contextWindow: 2e5,
|
|
863
|
+
inputPrice: 1.25,
|
|
864
|
+
outputPrice: 10,
|
|
865
|
+
cacheReadsPrice: 0.31
|
|
866
|
+
},
|
|
867
|
+
{
|
|
868
|
+
contextWindow: Infinity,
|
|
869
|
+
inputPrice: 2.5,
|
|
870
|
+
outputPrice: 15,
|
|
871
|
+
cacheReadsPrice: 0.625
|
|
872
|
+
}
|
|
873
|
+
]
|
|
874
|
+
},
|
|
875
|
+
"gemini-2.0-flash-001": {
|
|
876
|
+
maxTokens: 8192,
|
|
877
|
+
contextWindow: 1048576,
|
|
878
|
+
supportsImages: true,
|
|
879
|
+
supportsPromptCache: true,
|
|
880
|
+
inputPrice: 0.1,
|
|
881
|
+
outputPrice: 0.4,
|
|
882
|
+
cacheReadsPrice: 0.025,
|
|
883
|
+
cacheWritesPrice: 1
|
|
884
|
+
},
|
|
885
|
+
"gemini-2.0-flash-lite-preview-02-05": {
|
|
886
|
+
maxTokens: 8192,
|
|
887
|
+
contextWindow: 1048576,
|
|
888
|
+
supportsImages: true,
|
|
889
|
+
supportsPromptCache: false,
|
|
890
|
+
inputPrice: 0,
|
|
891
|
+
outputPrice: 0
|
|
892
|
+
},
|
|
893
|
+
"gemini-2.0-pro-exp-02-05": {
|
|
894
|
+
maxTokens: 8192,
|
|
895
|
+
contextWindow: 2097152,
|
|
896
|
+
supportsImages: true,
|
|
897
|
+
supportsPromptCache: false,
|
|
898
|
+
inputPrice: 0,
|
|
899
|
+
outputPrice: 0
|
|
900
|
+
},
|
|
901
|
+
"gemini-2.0-flash-thinking-exp-01-21": {
|
|
902
|
+
maxTokens: 65536,
|
|
903
|
+
contextWindow: 1048576,
|
|
904
|
+
supportsImages: true,
|
|
905
|
+
supportsPromptCache: false,
|
|
906
|
+
inputPrice: 0,
|
|
907
|
+
outputPrice: 0
|
|
908
|
+
},
|
|
909
|
+
"gemini-2.0-flash-thinking-exp-1219": {
|
|
910
|
+
maxTokens: 8192,
|
|
911
|
+
contextWindow: 32767,
|
|
912
|
+
supportsImages: true,
|
|
913
|
+
supportsPromptCache: false,
|
|
914
|
+
inputPrice: 0,
|
|
915
|
+
outputPrice: 0
|
|
916
|
+
},
|
|
917
|
+
"gemini-2.0-flash-exp": {
|
|
918
|
+
maxTokens: 8192,
|
|
919
|
+
contextWindow: 1048576,
|
|
920
|
+
supportsImages: true,
|
|
921
|
+
supportsPromptCache: false,
|
|
922
|
+
inputPrice: 0,
|
|
923
|
+
outputPrice: 0
|
|
924
|
+
},
|
|
925
|
+
"gemini-1.5-flash-002": {
|
|
926
|
+
maxTokens: 8192,
|
|
927
|
+
contextWindow: 1048576,
|
|
928
|
+
supportsImages: true,
|
|
929
|
+
supportsPromptCache: true,
|
|
930
|
+
inputPrice: 0.15,
|
|
931
|
+
// This is the pricing for prompts above 128k tokens.
|
|
932
|
+
outputPrice: 0.6,
|
|
933
|
+
cacheReadsPrice: 0.0375,
|
|
934
|
+
cacheWritesPrice: 1,
|
|
935
|
+
tiers: [
|
|
936
|
+
{
|
|
937
|
+
contextWindow: 128e3,
|
|
938
|
+
inputPrice: 0.075,
|
|
939
|
+
outputPrice: 0.3,
|
|
940
|
+
cacheReadsPrice: 0.01875
|
|
941
|
+
},
|
|
942
|
+
{
|
|
943
|
+
contextWindow: Infinity,
|
|
944
|
+
inputPrice: 0.15,
|
|
945
|
+
outputPrice: 0.6,
|
|
946
|
+
cacheReadsPrice: 0.0375
|
|
947
|
+
}
|
|
948
|
+
]
|
|
949
|
+
},
|
|
950
|
+
"gemini-1.5-flash-exp-0827": {
|
|
951
|
+
maxTokens: 8192,
|
|
952
|
+
contextWindow: 1048576,
|
|
953
|
+
supportsImages: true,
|
|
954
|
+
supportsPromptCache: false,
|
|
955
|
+
inputPrice: 0,
|
|
956
|
+
outputPrice: 0
|
|
957
|
+
},
|
|
958
|
+
"gemini-1.5-flash-8b-exp-0827": {
|
|
959
|
+
maxTokens: 8192,
|
|
960
|
+
contextWindow: 1048576,
|
|
961
|
+
supportsImages: true,
|
|
962
|
+
supportsPromptCache: false,
|
|
963
|
+
inputPrice: 0,
|
|
964
|
+
outputPrice: 0
|
|
965
|
+
},
|
|
966
|
+
"gemini-1.5-pro-002": {
|
|
967
|
+
maxTokens: 8192,
|
|
968
|
+
contextWindow: 2097152,
|
|
969
|
+
supportsImages: true,
|
|
970
|
+
supportsPromptCache: false,
|
|
971
|
+
inputPrice: 0,
|
|
972
|
+
outputPrice: 0
|
|
973
|
+
},
|
|
974
|
+
"gemini-1.5-pro-exp-0827": {
|
|
975
|
+
maxTokens: 8192,
|
|
976
|
+
contextWindow: 2097152,
|
|
977
|
+
supportsImages: true,
|
|
978
|
+
supportsPromptCache: false,
|
|
979
|
+
inputPrice: 0,
|
|
980
|
+
outputPrice: 0
|
|
981
|
+
},
|
|
982
|
+
"gemini-exp-1206": {
|
|
983
|
+
maxTokens: 8192,
|
|
984
|
+
contextWindow: 2097152,
|
|
985
|
+
supportsImages: true,
|
|
986
|
+
supportsPromptCache: false,
|
|
987
|
+
inputPrice: 0,
|
|
988
|
+
outputPrice: 0
|
|
989
|
+
}
|
|
990
|
+
};
|
|
991
|
+
|
|
992
|
+
// src/providers/glama.ts
|
|
993
|
+
var glamaDefaultModelId = "anthropic/claude-3-7-sonnet";
|
|
994
|
+
var glamaDefaultModelInfo = {
|
|
995
|
+
maxTokens: 8192,
|
|
996
|
+
contextWindow: 2e5,
|
|
997
|
+
supportsImages: true,
|
|
998
|
+
supportsComputerUse: true,
|
|
999
|
+
supportsPromptCache: true,
|
|
1000
|
+
inputPrice: 3,
|
|
1001
|
+
outputPrice: 15,
|
|
1002
|
+
cacheWritesPrice: 3.75,
|
|
1003
|
+
cacheReadsPrice: 0.3,
|
|
1004
|
+
description: "Claude 3.7 Sonnet is an advanced large language model with improved reasoning, coding, and problem-solving capabilities. It introduces a hybrid reasoning approach, allowing users to choose between rapid responses and extended, step-by-step processing for complex tasks. The model demonstrates notable improvements in coding, particularly in front-end development and full-stack updates, and excels in agentic workflows, where it can autonomously navigate multi-step processes. Claude 3.7 Sonnet maintains performance parity with its predecessor in standard mode while offering an extended reasoning mode for enhanced accuracy in math, coding, and instruction-following tasks. Read more at the [blog post here](https://www.anthropic.com/news/claude-3-7-sonnet)"
|
|
1005
|
+
};
|
|
1006
|
+
var GLAMA_DEFAULT_TEMPERATURE = 0;
|
|
1007
|
+
|
|
1008
|
+
// src/providers/groq.ts
|
|
1009
|
+
var groqDefaultModelId = "llama-3.3-70b-versatile";
|
|
1010
|
+
var groqModels = {
|
|
1011
|
+
// Models based on API response: https://api.groq.com/openai/v1/models
|
|
1012
|
+
"llama-3.1-8b-instant": {
|
|
1013
|
+
maxTokens: 131072,
|
|
1014
|
+
contextWindow: 131072,
|
|
1015
|
+
supportsImages: false,
|
|
1016
|
+
supportsPromptCache: false,
|
|
1017
|
+
inputPrice: 0,
|
|
1018
|
+
outputPrice: 0,
|
|
1019
|
+
description: "Meta Llama 3.1 8B Instant model, 128K context."
|
|
1020
|
+
},
|
|
1021
|
+
"llama-3.3-70b-versatile": {
|
|
1022
|
+
maxTokens: 32768,
|
|
1023
|
+
contextWindow: 131072,
|
|
1024
|
+
supportsImages: false,
|
|
1025
|
+
supportsPromptCache: false,
|
|
1026
|
+
inputPrice: 0,
|
|
1027
|
+
outputPrice: 0,
|
|
1028
|
+
description: "Meta Llama 3.3 70B Versatile model, 128K context."
|
|
1029
|
+
},
|
|
1030
|
+
"meta-llama/llama-4-scout-17b-16e-instruct": {
|
|
1031
|
+
maxTokens: 8192,
|
|
1032
|
+
contextWindow: 131072,
|
|
1033
|
+
supportsImages: false,
|
|
1034
|
+
supportsPromptCache: false,
|
|
1035
|
+
inputPrice: 0,
|
|
1036
|
+
outputPrice: 0,
|
|
1037
|
+
description: "Meta Llama 4 Scout 17B Instruct model, 128K context."
|
|
1038
|
+
},
|
|
1039
|
+
"meta-llama/llama-4-maverick-17b-128e-instruct": {
|
|
1040
|
+
maxTokens: 8192,
|
|
1041
|
+
contextWindow: 131072,
|
|
1042
|
+
supportsImages: false,
|
|
1043
|
+
supportsPromptCache: false,
|
|
1044
|
+
inputPrice: 0,
|
|
1045
|
+
outputPrice: 0,
|
|
1046
|
+
description: "Meta Llama 4 Maverick 17B Instruct model, 128K context."
|
|
1047
|
+
},
|
|
1048
|
+
"mistral-saba-24b": {
|
|
1049
|
+
maxTokens: 32768,
|
|
1050
|
+
contextWindow: 32768,
|
|
1051
|
+
supportsImages: false,
|
|
1052
|
+
supportsPromptCache: false,
|
|
1053
|
+
inputPrice: 0,
|
|
1054
|
+
outputPrice: 0,
|
|
1055
|
+
description: "Mistral Saba 24B model, 32K context."
|
|
1056
|
+
},
|
|
1057
|
+
"qwen-qwq-32b": {
|
|
1058
|
+
maxTokens: 131072,
|
|
1059
|
+
contextWindow: 131072,
|
|
1060
|
+
supportsImages: false,
|
|
1061
|
+
supportsPromptCache: false,
|
|
1062
|
+
inputPrice: 0,
|
|
1063
|
+
outputPrice: 0,
|
|
1064
|
+
description: "Alibaba Qwen QwQ 32B model, 128K context."
|
|
1065
|
+
},
|
|
1066
|
+
"deepseek-r1-distill-llama-70b": {
|
|
1067
|
+
maxTokens: 131072,
|
|
1068
|
+
contextWindow: 131072,
|
|
1069
|
+
supportsImages: false,
|
|
1070
|
+
supportsPromptCache: false,
|
|
1071
|
+
inputPrice: 0,
|
|
1072
|
+
outputPrice: 0,
|
|
1073
|
+
description: "DeepSeek R1 Distill Llama 70B model, 128K context."
|
|
1074
|
+
}
|
|
1075
|
+
};
|
|
1076
|
+
|
|
1077
|
+
// src/providers/lite-llm.ts
|
|
1078
|
+
var litellmDefaultModelId = "claude-3-7-sonnet-20250219";
|
|
1079
|
+
var litellmDefaultModelInfo = {
|
|
1080
|
+
maxTokens: 8192,
|
|
1081
|
+
contextWindow: 2e5,
|
|
1082
|
+
supportsImages: true,
|
|
1083
|
+
supportsComputerUse: true,
|
|
1084
|
+
supportsPromptCache: true,
|
|
1085
|
+
inputPrice: 3,
|
|
1086
|
+
outputPrice: 15,
|
|
1087
|
+
cacheWritesPrice: 3.75,
|
|
1088
|
+
cacheReadsPrice: 0.3
|
|
1089
|
+
};
|
|
1090
|
+
var LITELLM_COMPUTER_USE_MODELS = /* @__PURE__ */ new Set([
|
|
1091
|
+
"claude-3-5-sonnet-latest",
|
|
1092
|
+
"claude-opus-4-20250514",
|
|
1093
|
+
"claude-sonnet-4-20250514",
|
|
1094
|
+
"claude-3-7-sonnet-latest",
|
|
1095
|
+
"claude-3-7-sonnet-20250219",
|
|
1096
|
+
"claude-3-5-sonnet-20241022",
|
|
1097
|
+
"vertex_ai/claude-3-5-sonnet",
|
|
1098
|
+
"vertex_ai/claude-3-5-sonnet-v2",
|
|
1099
|
+
"vertex_ai/claude-3-5-sonnet-v2@20241022",
|
|
1100
|
+
"vertex_ai/claude-3-7-sonnet@20250219",
|
|
1101
|
+
"vertex_ai/claude-opus-4@20250514",
|
|
1102
|
+
"vertex_ai/claude-sonnet-4@20250514",
|
|
1103
|
+
"openrouter/anthropic/claude-3.5-sonnet",
|
|
1104
|
+
"openrouter/anthropic/claude-3.5-sonnet:beta",
|
|
1105
|
+
"openrouter/anthropic/claude-3.7-sonnet",
|
|
1106
|
+
"openrouter/anthropic/claude-3.7-sonnet:beta",
|
|
1107
|
+
"anthropic.claude-opus-4-20250514-v1:0",
|
|
1108
|
+
"anthropic.claude-sonnet-4-20250514-v1:0",
|
|
1109
|
+
"anthropic.claude-3-7-sonnet-20250219-v1:0",
|
|
1110
|
+
"anthropic.claude-3-5-sonnet-20241022-v2:0",
|
|
1111
|
+
"us.anthropic.claude-3-5-sonnet-20241022-v2:0",
|
|
1112
|
+
"us.anthropic.claude-3-7-sonnet-20250219-v1:0",
|
|
1113
|
+
"us.anthropic.claude-opus-4-20250514-v1:0",
|
|
1114
|
+
"us.anthropic.claude-sonnet-4-20250514-v1:0",
|
|
1115
|
+
"eu.anthropic.claude-3-5-sonnet-20241022-v2:0",
|
|
1116
|
+
"eu.anthropic.claude-3-7-sonnet-20250219-v1:0",
|
|
1117
|
+
"eu.anthropic.claude-opus-4-20250514-v1:0",
|
|
1118
|
+
"eu.anthropic.claude-sonnet-4-20250514-v1:0",
|
|
1119
|
+
"snowflake/claude-3-5-sonnet"
|
|
1120
|
+
]);
|
|
1121
|
+
|
|
1122
|
+
// src/providers/lm-studio.ts
|
|
1123
|
+
var LMSTUDIO_DEFAULT_TEMPERATURE = 0;
|
|
1124
|
+
|
|
1125
|
+
// src/providers/mistral.ts
|
|
1126
|
+
var mistralDefaultModelId = "codestral-latest";
|
|
1127
|
+
var mistralModels = {
|
|
1128
|
+
"codestral-latest": {
|
|
1129
|
+
maxTokens: 256e3,
|
|
1130
|
+
contextWindow: 256e3,
|
|
1131
|
+
supportsImages: false,
|
|
1132
|
+
supportsPromptCache: false,
|
|
1133
|
+
inputPrice: 0.3,
|
|
1134
|
+
outputPrice: 0.9
|
|
1135
|
+
},
|
|
1136
|
+
"mistral-large-latest": {
|
|
1137
|
+
maxTokens: 131e3,
|
|
1138
|
+
contextWindow: 131e3,
|
|
1139
|
+
supportsImages: false,
|
|
1140
|
+
supportsPromptCache: false,
|
|
1141
|
+
inputPrice: 2,
|
|
1142
|
+
outputPrice: 6
|
|
1143
|
+
},
|
|
1144
|
+
"ministral-8b-latest": {
|
|
1145
|
+
maxTokens: 131e3,
|
|
1146
|
+
contextWindow: 131e3,
|
|
1147
|
+
supportsImages: false,
|
|
1148
|
+
supportsPromptCache: false,
|
|
1149
|
+
inputPrice: 0.1,
|
|
1150
|
+
outputPrice: 0.1
|
|
1151
|
+
},
|
|
1152
|
+
"ministral-3b-latest": {
|
|
1153
|
+
maxTokens: 131e3,
|
|
1154
|
+
contextWindow: 131e3,
|
|
1155
|
+
supportsImages: false,
|
|
1156
|
+
supportsPromptCache: false,
|
|
1157
|
+
inputPrice: 0.04,
|
|
1158
|
+
outputPrice: 0.04
|
|
1159
|
+
},
|
|
1160
|
+
"mistral-small-latest": {
|
|
1161
|
+
maxTokens: 32e3,
|
|
1162
|
+
contextWindow: 32e3,
|
|
1163
|
+
supportsImages: false,
|
|
1164
|
+
supportsPromptCache: false,
|
|
1165
|
+
inputPrice: 0.2,
|
|
1166
|
+
outputPrice: 0.6
|
|
1167
|
+
},
|
|
1168
|
+
"pixtral-large-latest": {
|
|
1169
|
+
maxTokens: 131e3,
|
|
1170
|
+
contextWindow: 131e3,
|
|
1171
|
+
supportsImages: true,
|
|
1172
|
+
supportsPromptCache: false,
|
|
1173
|
+
inputPrice: 2,
|
|
1174
|
+
outputPrice: 6
|
|
1175
|
+
}
|
|
1176
|
+
};
|
|
1177
|
+
var MISTRAL_DEFAULT_TEMPERATURE = 0;
|
|
1178
|
+
|
|
1179
|
+
// src/providers/openai.ts
|
|
1180
|
+
var openAiNativeDefaultModelId = "gpt-4.1";
|
|
1181
|
+
var openAiNativeModels = {
|
|
1182
|
+
"gpt-4.1": {
|
|
1183
|
+
maxTokens: 32768,
|
|
1184
|
+
contextWindow: 1047576,
|
|
1185
|
+
supportsImages: true,
|
|
1186
|
+
supportsPromptCache: true,
|
|
1187
|
+
inputPrice: 2,
|
|
1188
|
+
outputPrice: 8,
|
|
1189
|
+
cacheReadsPrice: 0.5
|
|
1190
|
+
},
|
|
1191
|
+
"gpt-4.1-mini": {
|
|
1192
|
+
maxTokens: 32768,
|
|
1193
|
+
contextWindow: 1047576,
|
|
1194
|
+
supportsImages: true,
|
|
1195
|
+
supportsPromptCache: true,
|
|
1196
|
+
inputPrice: 0.4,
|
|
1197
|
+
outputPrice: 1.6,
|
|
1198
|
+
cacheReadsPrice: 0.1
|
|
1199
|
+
},
|
|
1200
|
+
"gpt-4.1-nano": {
|
|
1201
|
+
maxTokens: 32768,
|
|
1202
|
+
contextWindow: 1047576,
|
|
1203
|
+
supportsImages: true,
|
|
1204
|
+
supportsPromptCache: true,
|
|
1205
|
+
inputPrice: 0.1,
|
|
1206
|
+
outputPrice: 0.4,
|
|
1207
|
+
cacheReadsPrice: 0.025
|
|
1208
|
+
},
|
|
1209
|
+
o3: {
|
|
1210
|
+
maxTokens: 1e5,
|
|
1211
|
+
contextWindow: 2e5,
|
|
1212
|
+
supportsImages: true,
|
|
1213
|
+
supportsPromptCache: true,
|
|
1214
|
+
inputPrice: 10,
|
|
1215
|
+
outputPrice: 40,
|
|
1216
|
+
cacheReadsPrice: 2.5,
|
|
1217
|
+
supportsReasoningEffort: true,
|
|
1218
|
+
reasoningEffort: "medium"
|
|
1219
|
+
},
|
|
1220
|
+
"o3-high": {
|
|
1221
|
+
maxTokens: 1e5,
|
|
1222
|
+
contextWindow: 2e5,
|
|
1223
|
+
supportsImages: true,
|
|
1224
|
+
supportsPromptCache: true,
|
|
1225
|
+
inputPrice: 10,
|
|
1226
|
+
outputPrice: 40,
|
|
1227
|
+
cacheReadsPrice: 2.5,
|
|
1228
|
+
reasoningEffort: "high"
|
|
1229
|
+
},
|
|
1230
|
+
"o3-low": {
|
|
1231
|
+
maxTokens: 1e5,
|
|
1232
|
+
contextWindow: 2e5,
|
|
1233
|
+
supportsImages: true,
|
|
1234
|
+
supportsPromptCache: true,
|
|
1235
|
+
inputPrice: 10,
|
|
1236
|
+
outputPrice: 40,
|
|
1237
|
+
cacheReadsPrice: 2.5,
|
|
1238
|
+
reasoningEffort: "low"
|
|
1239
|
+
},
|
|
1240
|
+
"o4-mini": {
|
|
1241
|
+
maxTokens: 1e5,
|
|
1242
|
+
contextWindow: 2e5,
|
|
1243
|
+
supportsImages: true,
|
|
1244
|
+
supportsPromptCache: true,
|
|
1245
|
+
inputPrice: 1.1,
|
|
1246
|
+
outputPrice: 4.4,
|
|
1247
|
+
cacheReadsPrice: 0.275,
|
|
1248
|
+
supportsReasoningEffort: true,
|
|
1249
|
+
reasoningEffort: "medium"
|
|
1250
|
+
},
|
|
1251
|
+
"o4-mini-high": {
|
|
1252
|
+
maxTokens: 1e5,
|
|
1253
|
+
contextWindow: 2e5,
|
|
1254
|
+
supportsImages: true,
|
|
1255
|
+
supportsPromptCache: true,
|
|
1256
|
+
inputPrice: 1.1,
|
|
1257
|
+
outputPrice: 4.4,
|
|
1258
|
+
cacheReadsPrice: 0.275,
|
|
1259
|
+
reasoningEffort: "high"
|
|
1260
|
+
},
|
|
1261
|
+
"o4-mini-low": {
|
|
1262
|
+
maxTokens: 1e5,
|
|
1263
|
+
contextWindow: 2e5,
|
|
1264
|
+
supportsImages: true,
|
|
1265
|
+
supportsPromptCache: true,
|
|
1266
|
+
inputPrice: 1.1,
|
|
1267
|
+
outputPrice: 4.4,
|
|
1268
|
+
cacheReadsPrice: 0.275,
|
|
1269
|
+
reasoningEffort: "low"
|
|
1270
|
+
},
|
|
1271
|
+
"o3-mini": {
|
|
1272
|
+
maxTokens: 1e5,
|
|
1273
|
+
contextWindow: 2e5,
|
|
1274
|
+
supportsImages: false,
|
|
1275
|
+
supportsPromptCache: true,
|
|
1276
|
+
inputPrice: 1.1,
|
|
1277
|
+
outputPrice: 4.4,
|
|
1278
|
+
cacheReadsPrice: 0.55,
|
|
1279
|
+
supportsReasoningEffort: true,
|
|
1280
|
+
reasoningEffort: "medium"
|
|
1281
|
+
},
|
|
1282
|
+
"o3-mini-high": {
|
|
1283
|
+
maxTokens: 1e5,
|
|
1284
|
+
contextWindow: 2e5,
|
|
1285
|
+
supportsImages: false,
|
|
1286
|
+
supportsPromptCache: true,
|
|
1287
|
+
inputPrice: 1.1,
|
|
1288
|
+
outputPrice: 4.4,
|
|
1289
|
+
cacheReadsPrice: 0.55,
|
|
1290
|
+
reasoningEffort: "high"
|
|
1291
|
+
},
|
|
1292
|
+
"o3-mini-low": {
|
|
1293
|
+
maxTokens: 1e5,
|
|
1294
|
+
contextWindow: 2e5,
|
|
1295
|
+
supportsImages: false,
|
|
1296
|
+
supportsPromptCache: true,
|
|
1297
|
+
inputPrice: 1.1,
|
|
1298
|
+
outputPrice: 4.4,
|
|
1299
|
+
cacheReadsPrice: 0.55,
|
|
1300
|
+
reasoningEffort: "low"
|
|
1301
|
+
},
|
|
1302
|
+
o1: {
|
|
1303
|
+
maxTokens: 1e5,
|
|
1304
|
+
contextWindow: 2e5,
|
|
1305
|
+
supportsImages: true,
|
|
1306
|
+
supportsPromptCache: true,
|
|
1307
|
+
inputPrice: 15,
|
|
1308
|
+
outputPrice: 60,
|
|
1309
|
+
cacheReadsPrice: 7.5
|
|
1310
|
+
},
|
|
1311
|
+
"o1-preview": {
|
|
1312
|
+
maxTokens: 32768,
|
|
1313
|
+
contextWindow: 128e3,
|
|
1314
|
+
supportsImages: true,
|
|
1315
|
+
supportsPromptCache: true,
|
|
1316
|
+
inputPrice: 15,
|
|
1317
|
+
outputPrice: 60,
|
|
1318
|
+
cacheReadsPrice: 7.5
|
|
1319
|
+
},
|
|
1320
|
+
"o1-mini": {
|
|
1321
|
+
maxTokens: 65536,
|
|
1322
|
+
contextWindow: 128e3,
|
|
1323
|
+
supportsImages: true,
|
|
1324
|
+
supportsPromptCache: true,
|
|
1325
|
+
inputPrice: 1.1,
|
|
1326
|
+
outputPrice: 4.4,
|
|
1327
|
+
cacheReadsPrice: 0.55
|
|
1328
|
+
},
|
|
1329
|
+
"gpt-4.5-preview": {
|
|
1330
|
+
maxTokens: 16384,
|
|
1331
|
+
contextWindow: 128e3,
|
|
1332
|
+
supportsImages: true,
|
|
1333
|
+
supportsPromptCache: true,
|
|
1334
|
+
inputPrice: 75,
|
|
1335
|
+
outputPrice: 150,
|
|
1336
|
+
cacheReadsPrice: 37.5
|
|
1337
|
+
},
|
|
1338
|
+
"gpt-4o": {
|
|
1339
|
+
maxTokens: 16384,
|
|
1340
|
+
contextWindow: 128e3,
|
|
1341
|
+
supportsImages: true,
|
|
1342
|
+
supportsPromptCache: true,
|
|
1343
|
+
inputPrice: 2.5,
|
|
1344
|
+
outputPrice: 10,
|
|
1345
|
+
cacheReadsPrice: 1.25
|
|
1346
|
+
},
|
|
1347
|
+
"gpt-4o-mini": {
|
|
1348
|
+
maxTokens: 16384,
|
|
1349
|
+
contextWindow: 128e3,
|
|
1350
|
+
supportsImages: true,
|
|
1351
|
+
supportsPromptCache: true,
|
|
1352
|
+
inputPrice: 0.15,
|
|
1353
|
+
outputPrice: 0.6,
|
|
1354
|
+
cacheReadsPrice: 0.075
|
|
1355
|
+
}
|
|
1356
|
+
};
|
|
1357
|
+
var openAiModelInfoSaneDefaults = {
|
|
1358
|
+
maxTokens: -1,
|
|
1359
|
+
contextWindow: 128e3,
|
|
1360
|
+
supportsImages: true,
|
|
1361
|
+
supportsPromptCache: false,
|
|
1362
|
+
inputPrice: 0,
|
|
1363
|
+
outputPrice: 0
|
|
1364
|
+
};
|
|
1365
|
+
var azureOpenAiDefaultApiVersion = "2024-08-01-preview";
|
|
1366
|
+
var OPENAI_NATIVE_DEFAULT_TEMPERATURE = 0;
|
|
1367
|
+
var OPENAI_AZURE_AI_INFERENCE_PATH = "/models/chat/completions";
|
|
1368
|
+
|
|
1369
|
+
// src/providers/openrouter.ts
|
|
1370
|
+
var openRouterDefaultModelId = "anthropic/claude-sonnet-4";
|
|
1371
|
+
var openRouterDefaultModelInfo = {
|
|
1372
|
+
maxTokens: 8192,
|
|
1373
|
+
contextWindow: 2e5,
|
|
1374
|
+
supportsImages: true,
|
|
1375
|
+
supportsComputerUse: true,
|
|
1376
|
+
supportsPromptCache: true,
|
|
1377
|
+
inputPrice: 3,
|
|
1378
|
+
outputPrice: 15,
|
|
1379
|
+
cacheWritesPrice: 3.75,
|
|
1380
|
+
cacheReadsPrice: 0.3,
|
|
1381
|
+
description: "Claude 3.7 Sonnet is an advanced large language model with improved reasoning, coding, and problem-solving capabilities. It introduces a hybrid reasoning approach, allowing users to choose between rapid responses and extended, step-by-step processing for complex tasks. The model demonstrates notable improvements in coding, particularly in front-end development and full-stack updates, and excels in agentic workflows, where it can autonomously navigate multi-step processes. Claude 3.7 Sonnet maintains performance parity with its predecessor in standard mode while offering an extended reasoning mode for enhanced accuracy in math, coding, and instruction-following tasks. Read more at the [blog post here](https://www.anthropic.com/news/claude-3-7-sonnet)"
|
|
1382
|
+
};
|
|
1383
|
+
var OPENROUTER_DEFAULT_PROVIDER_NAME = "[default]";
|
|
1384
|
+
var OPEN_ROUTER_PROMPT_CACHING_MODELS = /* @__PURE__ */ new Set([
|
|
1385
|
+
"anthropic/claude-3-haiku",
|
|
1386
|
+
"anthropic/claude-3-haiku:beta",
|
|
1387
|
+
"anthropic/claude-3-opus",
|
|
1388
|
+
"anthropic/claude-3-opus:beta",
|
|
1389
|
+
"anthropic/claude-3-sonnet",
|
|
1390
|
+
"anthropic/claude-3-sonnet:beta",
|
|
1391
|
+
"anthropic/claude-3.5-haiku",
|
|
1392
|
+
"anthropic/claude-3.5-haiku-20241022",
|
|
1393
|
+
"anthropic/claude-3.5-haiku-20241022:beta",
|
|
1394
|
+
"anthropic/claude-3.5-haiku:beta",
|
|
1395
|
+
"anthropic/claude-3.5-sonnet",
|
|
1396
|
+
"anthropic/claude-3.5-sonnet-20240620",
|
|
1397
|
+
"anthropic/claude-3.5-sonnet-20240620:beta",
|
|
1398
|
+
"anthropic/claude-3.5-sonnet:beta",
|
|
1399
|
+
"anthropic/claude-3.7-sonnet",
|
|
1400
|
+
"anthropic/claude-3.7-sonnet:beta",
|
|
1401
|
+
"anthropic/claude-3.7-sonnet:thinking",
|
|
1402
|
+
"anthropic/claude-sonnet-4",
|
|
1403
|
+
"anthropic/claude-opus-4",
|
|
1404
|
+
"google/gemini-2.5-pro-preview",
|
|
1405
|
+
"google/gemini-2.5-flash-preview",
|
|
1406
|
+
"google/gemini-2.5-flash-preview:thinking",
|
|
1407
|
+
"google/gemini-2.5-flash-preview-05-20",
|
|
1408
|
+
"google/gemini-2.5-flash-preview-05-20:thinking",
|
|
1409
|
+
"google/gemini-2.0-flash-001",
|
|
1410
|
+
"google/gemini-flash-1.5",
|
|
1411
|
+
"google/gemini-flash-1.5-8b"
|
|
1412
|
+
]);
|
|
1413
|
+
var OPEN_ROUTER_COMPUTER_USE_MODELS = /* @__PURE__ */ new Set([
|
|
1414
|
+
"anthropic/claude-3.5-sonnet",
|
|
1415
|
+
"anthropic/claude-3.5-sonnet:beta",
|
|
1416
|
+
"anthropic/claude-3.7-sonnet",
|
|
1417
|
+
"anthropic/claude-3.7-sonnet:beta",
|
|
1418
|
+
"anthropic/claude-3.7-sonnet:thinking",
|
|
1419
|
+
"anthropic/claude-sonnet-4",
|
|
1420
|
+
"anthropic/claude-opus-4"
|
|
1421
|
+
]);
|
|
1422
|
+
var OPEN_ROUTER_REASONING_BUDGET_MODELS = /* @__PURE__ */ new Set([
|
|
1423
|
+
"anthropic/claude-3.7-sonnet:beta",
|
|
1424
|
+
"anthropic/claude-3.7-sonnet:thinking",
|
|
1425
|
+
"anthropic/claude-opus-4",
|
|
1426
|
+
"anthropic/claude-sonnet-4",
|
|
1427
|
+
"google/gemini-2.5-flash-preview-05-20",
|
|
1428
|
+
"google/gemini-2.5-flash-preview-05-20:thinking"
|
|
1429
|
+
]);
|
|
1430
|
+
var OPEN_ROUTER_REQUIRED_REASONING_BUDGET_MODELS = /* @__PURE__ */ new Set([
|
|
1431
|
+
"anthropic/claude-3.7-sonnet:thinking",
|
|
1432
|
+
"google/gemini-2.5-flash-preview-05-20:thinking"
|
|
1433
|
+
]);
|
|
1434
|
+
|
|
1435
|
+
// src/providers/requesty.ts
|
|
1436
|
+
var requestyDefaultModelId = "coding/claude-4-sonnet";
|
|
1437
|
+
var requestyDefaultModelInfo = {
|
|
1438
|
+
maxTokens: 8192,
|
|
1439
|
+
contextWindow: 2e5,
|
|
1440
|
+
supportsImages: true,
|
|
1441
|
+
supportsComputerUse: true,
|
|
1442
|
+
supportsPromptCache: true,
|
|
1443
|
+
inputPrice: 3,
|
|
1444
|
+
outputPrice: 15,
|
|
1445
|
+
cacheWritesPrice: 3.75,
|
|
1446
|
+
cacheReadsPrice: 0.3,
|
|
1447
|
+
description: "The best coding model, optimized by Requesty, and automatically routed to the fastest provider. Claude 4 Sonnet is an advanced large language model with improved reasoning, coding, and problem-solving capabilities."
|
|
1448
|
+
};
|
|
1449
|
+
|
|
1450
|
+
// src/providers/unbound.ts
|
|
1451
|
+
var unboundDefaultModelId = "anthropic/claude-3-7-sonnet-20250219";
|
|
1452
|
+
var unboundDefaultModelInfo = {
|
|
1453
|
+
maxTokens: 8192,
|
|
1454
|
+
contextWindow: 2e5,
|
|
1455
|
+
supportsImages: true,
|
|
1456
|
+
supportsPromptCache: true,
|
|
1457
|
+
inputPrice: 3,
|
|
1458
|
+
outputPrice: 15,
|
|
1459
|
+
cacheWritesPrice: 3.75,
|
|
1460
|
+
cacheReadsPrice: 0.3
|
|
1461
|
+
};
|
|
1462
|
+
|
|
1463
|
+
// src/providers/vertex.ts
|
|
1464
|
+
var vertexDefaultModelId = "claude-sonnet-4@20250514";
|
|
1465
|
+
var vertexModels = {
|
|
1466
|
+
"gemini-2.5-flash-preview-05-20:thinking": {
|
|
1467
|
+
maxTokens: 65535,
|
|
1468
|
+
contextWindow: 1048576,
|
|
1469
|
+
supportsImages: true,
|
|
1470
|
+
supportsPromptCache: true,
|
|
1471
|
+
inputPrice: 0.15,
|
|
1472
|
+
outputPrice: 3.5,
|
|
1473
|
+
maxThinkingTokens: 24576,
|
|
1474
|
+
supportsReasoningBudget: true,
|
|
1475
|
+
requiredReasoningBudget: true
|
|
1476
|
+
},
|
|
1477
|
+
"gemini-2.5-flash-preview-05-20": {
|
|
1478
|
+
maxTokens: 65535,
|
|
1479
|
+
contextWindow: 1048576,
|
|
1480
|
+
supportsImages: true,
|
|
1481
|
+
supportsPromptCache: true,
|
|
1482
|
+
inputPrice: 0.15,
|
|
1483
|
+
outputPrice: 0.6
|
|
1484
|
+
},
|
|
1485
|
+
"gemini-2.5-flash-preview-04-17:thinking": {
|
|
1486
|
+
maxTokens: 65535,
|
|
1487
|
+
contextWindow: 1048576,
|
|
1488
|
+
supportsImages: true,
|
|
1489
|
+
supportsPromptCache: false,
|
|
1490
|
+
inputPrice: 0.15,
|
|
1491
|
+
outputPrice: 3.5,
|
|
1492
|
+
maxThinkingTokens: 24576,
|
|
1493
|
+
supportsReasoningBudget: true,
|
|
1494
|
+
requiredReasoningBudget: true
|
|
1495
|
+
},
|
|
1496
|
+
"gemini-2.5-flash-preview-04-17": {
|
|
1497
|
+
maxTokens: 65535,
|
|
1498
|
+
contextWindow: 1048576,
|
|
1499
|
+
supportsImages: true,
|
|
1500
|
+
supportsPromptCache: false,
|
|
1501
|
+
inputPrice: 0.15,
|
|
1502
|
+
outputPrice: 0.6
|
|
1503
|
+
},
|
|
1504
|
+
"gemini-2.5-pro-preview-03-25": {
|
|
1505
|
+
maxTokens: 65535,
|
|
1506
|
+
contextWindow: 1048576,
|
|
1507
|
+
supportsImages: true,
|
|
1508
|
+
supportsPromptCache: true,
|
|
1509
|
+
inputPrice: 2.5,
|
|
1510
|
+
outputPrice: 15
|
|
1511
|
+
},
|
|
1512
|
+
"gemini-2.5-pro-preview-05-06": {
|
|
1513
|
+
maxTokens: 65535,
|
|
1514
|
+
contextWindow: 1048576,
|
|
1515
|
+
supportsImages: true,
|
|
1516
|
+
supportsPromptCache: true,
|
|
1517
|
+
inputPrice: 2.5,
|
|
1518
|
+
outputPrice: 15
|
|
1519
|
+
},
|
|
1520
|
+
"gemini-2.5-pro-exp-03-25": {
|
|
1521
|
+
maxTokens: 65535,
|
|
1522
|
+
contextWindow: 1048576,
|
|
1523
|
+
supportsImages: true,
|
|
1524
|
+
supportsPromptCache: false,
|
|
1525
|
+
inputPrice: 0,
|
|
1526
|
+
outputPrice: 0
|
|
1527
|
+
},
|
|
1528
|
+
"gemini-2.0-pro-exp-02-05": {
|
|
1529
|
+
maxTokens: 8192,
|
|
1530
|
+
contextWindow: 2097152,
|
|
1531
|
+
supportsImages: true,
|
|
1532
|
+
supportsPromptCache: false,
|
|
1533
|
+
inputPrice: 0,
|
|
1534
|
+
outputPrice: 0
|
|
1535
|
+
},
|
|
1536
|
+
"gemini-2.0-flash-001": {
|
|
1537
|
+
maxTokens: 8192,
|
|
1538
|
+
contextWindow: 1048576,
|
|
1539
|
+
supportsImages: true,
|
|
1540
|
+
supportsPromptCache: true,
|
|
1541
|
+
inputPrice: 0.15,
|
|
1542
|
+
outputPrice: 0.6
|
|
1543
|
+
},
|
|
1544
|
+
"gemini-2.0-flash-lite-001": {
|
|
1545
|
+
maxTokens: 8192,
|
|
1546
|
+
contextWindow: 1048576,
|
|
1547
|
+
supportsImages: true,
|
|
1548
|
+
supportsPromptCache: false,
|
|
1549
|
+
inputPrice: 0.075,
|
|
1550
|
+
outputPrice: 0.3
|
|
1551
|
+
},
|
|
1552
|
+
"gemini-2.0-flash-thinking-exp-01-21": {
|
|
1553
|
+
maxTokens: 8192,
|
|
1554
|
+
contextWindow: 32768,
|
|
1555
|
+
supportsImages: true,
|
|
1556
|
+
supportsPromptCache: false,
|
|
1557
|
+
inputPrice: 0,
|
|
1558
|
+
outputPrice: 0
|
|
1559
|
+
},
|
|
1560
|
+
"gemini-1.5-flash-002": {
|
|
1561
|
+
maxTokens: 8192,
|
|
1562
|
+
contextWindow: 1048576,
|
|
1563
|
+
supportsImages: true,
|
|
1564
|
+
supportsPromptCache: true,
|
|
1565
|
+
inputPrice: 0.075,
|
|
1566
|
+
outputPrice: 0.3
|
|
1567
|
+
},
|
|
1568
|
+
"gemini-1.5-pro-002": {
|
|
1569
|
+
maxTokens: 8192,
|
|
1570
|
+
contextWindow: 2097152,
|
|
1571
|
+
supportsImages: true,
|
|
1572
|
+
supportsPromptCache: false,
|
|
1573
|
+
inputPrice: 1.25,
|
|
1574
|
+
outputPrice: 5
|
|
1575
|
+
},
|
|
1576
|
+
"claude-sonnet-4@20250514": {
|
|
1577
|
+
maxTokens: 8192,
|
|
1578
|
+
contextWindow: 2e5,
|
|
1579
|
+
supportsImages: true,
|
|
1580
|
+
supportsComputerUse: true,
|
|
1581
|
+
supportsPromptCache: true,
|
|
1582
|
+
inputPrice: 3,
|
|
1583
|
+
outputPrice: 15,
|
|
1584
|
+
cacheWritesPrice: 3.75,
|
|
1585
|
+
cacheReadsPrice: 0.3,
|
|
1586
|
+
supportsReasoningBudget: true
|
|
1587
|
+
},
|
|
1588
|
+
"claude-opus-4@20250514": {
|
|
1589
|
+
maxTokens: 8192,
|
|
1590
|
+
contextWindow: 2e5,
|
|
1591
|
+
supportsImages: true,
|
|
1592
|
+
supportsComputerUse: true,
|
|
1593
|
+
supportsPromptCache: true,
|
|
1594
|
+
inputPrice: 15,
|
|
1595
|
+
outputPrice: 75,
|
|
1596
|
+
cacheWritesPrice: 18.75,
|
|
1597
|
+
cacheReadsPrice: 1.5
|
|
1598
|
+
},
|
|
1599
|
+
"claude-3-7-sonnet@20250219:thinking": {
|
|
1600
|
+
maxTokens: 64e3,
|
|
1601
|
+
contextWindow: 2e5,
|
|
1602
|
+
supportsImages: true,
|
|
1603
|
+
supportsComputerUse: true,
|
|
1604
|
+
supportsPromptCache: true,
|
|
1605
|
+
inputPrice: 3,
|
|
1606
|
+
outputPrice: 15,
|
|
1607
|
+
cacheWritesPrice: 3.75,
|
|
1608
|
+
cacheReadsPrice: 0.3,
|
|
1609
|
+
supportsReasoningBudget: true,
|
|
1610
|
+
requiredReasoningBudget: true
|
|
1611
|
+
},
|
|
1612
|
+
"claude-3-7-sonnet@20250219": {
|
|
1613
|
+
maxTokens: 8192,
|
|
1614
|
+
contextWindow: 2e5,
|
|
1615
|
+
supportsImages: true,
|
|
1616
|
+
supportsComputerUse: true,
|
|
1617
|
+
supportsPromptCache: true,
|
|
1618
|
+
inputPrice: 3,
|
|
1619
|
+
outputPrice: 15,
|
|
1620
|
+
cacheWritesPrice: 3.75,
|
|
1621
|
+
cacheReadsPrice: 0.3
|
|
1622
|
+
},
|
|
1623
|
+
"claude-3-5-sonnet-v2@20241022": {
|
|
1624
|
+
maxTokens: 8192,
|
|
1625
|
+
contextWindow: 2e5,
|
|
1626
|
+
supportsImages: true,
|
|
1627
|
+
supportsComputerUse: true,
|
|
1628
|
+
supportsPromptCache: true,
|
|
1629
|
+
inputPrice: 3,
|
|
1630
|
+
outputPrice: 15,
|
|
1631
|
+
cacheWritesPrice: 3.75,
|
|
1632
|
+
cacheReadsPrice: 0.3
|
|
1633
|
+
},
|
|
1634
|
+
"claude-3-5-sonnet@20240620": {
|
|
1635
|
+
maxTokens: 8192,
|
|
1636
|
+
contextWindow: 2e5,
|
|
1637
|
+
supportsImages: true,
|
|
1638
|
+
supportsPromptCache: true,
|
|
1639
|
+
inputPrice: 3,
|
|
1640
|
+
outputPrice: 15,
|
|
1641
|
+
cacheWritesPrice: 3.75,
|
|
1642
|
+
cacheReadsPrice: 0.3
|
|
1643
|
+
},
|
|
1644
|
+
"claude-3-5-haiku@20241022": {
|
|
1645
|
+
maxTokens: 8192,
|
|
1646
|
+
contextWindow: 2e5,
|
|
1647
|
+
supportsImages: false,
|
|
1648
|
+
supportsPromptCache: true,
|
|
1649
|
+
inputPrice: 1,
|
|
1650
|
+
outputPrice: 5,
|
|
1651
|
+
cacheWritesPrice: 1.25,
|
|
1652
|
+
cacheReadsPrice: 0.1
|
|
1653
|
+
},
|
|
1654
|
+
"claude-3-opus@20240229": {
|
|
1655
|
+
maxTokens: 4096,
|
|
1656
|
+
contextWindow: 2e5,
|
|
1657
|
+
supportsImages: true,
|
|
1658
|
+
supportsPromptCache: true,
|
|
1659
|
+
inputPrice: 15,
|
|
1660
|
+
outputPrice: 75,
|
|
1661
|
+
cacheWritesPrice: 18.75,
|
|
1662
|
+
cacheReadsPrice: 1.5
|
|
1663
|
+
},
|
|
1664
|
+
"claude-3-haiku@20240307": {
|
|
1665
|
+
maxTokens: 4096,
|
|
1666
|
+
contextWindow: 2e5,
|
|
1667
|
+
supportsImages: true,
|
|
1668
|
+
supportsPromptCache: true,
|
|
1669
|
+
inputPrice: 0.25,
|
|
1670
|
+
outputPrice: 1.25,
|
|
1671
|
+
cacheWritesPrice: 0.3,
|
|
1672
|
+
cacheReadsPrice: 0.03
|
|
1673
|
+
}
|
|
1674
|
+
};
|
|
1675
|
+
var VERTEX_REGIONS = [
|
|
1676
|
+
{ value: "us-east5", label: "us-east5" },
|
|
1677
|
+
{ value: "us-central1", label: "us-central1" },
|
|
1678
|
+
{ value: "europe-west1", label: "europe-west1" },
|
|
1679
|
+
{ value: "europe-west4", label: "europe-west4" },
|
|
1680
|
+
{ value: "asia-southeast1", label: "asia-southeast1" }
|
|
1681
|
+
];
|
|
1682
|
+
|
|
1683
|
+
// src/providers/vscode-llm.ts
|
|
1684
|
+
var vscodeLlmDefaultModelId = "claude-3.5-sonnet";
|
|
1685
|
+
var vscodeLlmModels = {
|
|
1686
|
+
"gpt-3.5-turbo": {
|
|
1687
|
+
contextWindow: 12114,
|
|
1688
|
+
supportsImages: false,
|
|
1689
|
+
supportsPromptCache: false,
|
|
1690
|
+
inputPrice: 0,
|
|
1691
|
+
outputPrice: 0,
|
|
1692
|
+
family: "gpt-3.5-turbo",
|
|
1693
|
+
version: "gpt-3.5-turbo-0613",
|
|
1694
|
+
name: "GPT 3.5 Turbo",
|
|
1695
|
+
supportsToolCalling: true,
|
|
1696
|
+
maxInputTokens: 12114
|
|
1697
|
+
},
|
|
1698
|
+
"gpt-4o-mini": {
|
|
1699
|
+
contextWindow: 12115,
|
|
1700
|
+
supportsImages: false,
|
|
1701
|
+
supportsPromptCache: false,
|
|
1702
|
+
inputPrice: 0,
|
|
1703
|
+
outputPrice: 0,
|
|
1704
|
+
family: "gpt-4o-mini",
|
|
1705
|
+
version: "gpt-4o-mini-2024-07-18",
|
|
1706
|
+
name: "GPT-4o mini",
|
|
1707
|
+
supportsToolCalling: true,
|
|
1708
|
+
maxInputTokens: 12115
|
|
1709
|
+
},
|
|
1710
|
+
"gpt-4": {
|
|
1711
|
+
contextWindow: 28501,
|
|
1712
|
+
supportsImages: false,
|
|
1713
|
+
supportsPromptCache: false,
|
|
1714
|
+
inputPrice: 0,
|
|
1715
|
+
outputPrice: 0,
|
|
1716
|
+
family: "gpt-4",
|
|
1717
|
+
version: "gpt-4-0613",
|
|
1718
|
+
name: "GPT 4",
|
|
1719
|
+
supportsToolCalling: true,
|
|
1720
|
+
maxInputTokens: 28501
|
|
1721
|
+
},
|
|
1722
|
+
"gpt-4-0125-preview": {
|
|
1723
|
+
contextWindow: 63826,
|
|
1724
|
+
supportsImages: false,
|
|
1725
|
+
supportsPromptCache: false,
|
|
1726
|
+
inputPrice: 0,
|
|
1727
|
+
outputPrice: 0,
|
|
1728
|
+
family: "gpt-4-turbo",
|
|
1729
|
+
version: "gpt-4-0125-preview",
|
|
1730
|
+
name: "GPT 4 Turbo",
|
|
1731
|
+
supportsToolCalling: true,
|
|
1732
|
+
maxInputTokens: 63826
|
|
1733
|
+
},
|
|
1734
|
+
"gpt-4o": {
|
|
1735
|
+
contextWindow: 63827,
|
|
1736
|
+
supportsImages: true,
|
|
1737
|
+
supportsPromptCache: false,
|
|
1738
|
+
inputPrice: 0,
|
|
1739
|
+
outputPrice: 0,
|
|
1740
|
+
family: "gpt-4o",
|
|
1741
|
+
version: "gpt-4o-2024-11-20",
|
|
1742
|
+
name: "GPT-4o",
|
|
1743
|
+
supportsToolCalling: true,
|
|
1744
|
+
maxInputTokens: 63827
|
|
1745
|
+
},
|
|
1746
|
+
o1: {
|
|
1747
|
+
contextWindow: 19827,
|
|
1748
|
+
supportsImages: false,
|
|
1749
|
+
supportsPromptCache: false,
|
|
1750
|
+
inputPrice: 0,
|
|
1751
|
+
outputPrice: 0,
|
|
1752
|
+
family: "o1-ga",
|
|
1753
|
+
version: "o1-2024-12-17",
|
|
1754
|
+
name: "o1 (Preview)",
|
|
1755
|
+
supportsToolCalling: true,
|
|
1756
|
+
maxInputTokens: 19827
|
|
1757
|
+
},
|
|
1758
|
+
"o3-mini": {
|
|
1759
|
+
contextWindow: 63827,
|
|
1760
|
+
supportsImages: false,
|
|
1761
|
+
supportsPromptCache: false,
|
|
1762
|
+
inputPrice: 0,
|
|
1763
|
+
outputPrice: 0,
|
|
1764
|
+
family: "o3-mini",
|
|
1765
|
+
version: "o3-mini-2025-01-31",
|
|
1766
|
+
name: "o3-mini",
|
|
1767
|
+
supportsToolCalling: true,
|
|
1768
|
+
maxInputTokens: 63827
|
|
1769
|
+
},
|
|
1770
|
+
"claude-3.5-sonnet": {
|
|
1771
|
+
contextWindow: 81638,
|
|
1772
|
+
supportsImages: true,
|
|
1773
|
+
supportsPromptCache: false,
|
|
1774
|
+
inputPrice: 0,
|
|
1775
|
+
outputPrice: 0,
|
|
1776
|
+
family: "claude-3.5-sonnet",
|
|
1777
|
+
version: "claude-3.5-sonnet",
|
|
1778
|
+
name: "Claude 3.5 Sonnet",
|
|
1779
|
+
supportsToolCalling: true,
|
|
1780
|
+
maxInputTokens: 81638
|
|
1781
|
+
},
|
|
1782
|
+
"gemini-2.0-flash-001": {
|
|
1783
|
+
contextWindow: 127827,
|
|
1784
|
+
supportsImages: true,
|
|
1785
|
+
supportsPromptCache: false,
|
|
1786
|
+
inputPrice: 0,
|
|
1787
|
+
outputPrice: 0,
|
|
1788
|
+
family: "gemini-2.0-flash",
|
|
1789
|
+
version: "gemini-2.0-flash-001",
|
|
1790
|
+
name: "Gemini 2.0 Flash",
|
|
1791
|
+
supportsToolCalling: false,
|
|
1792
|
+
maxInputTokens: 127827
|
|
1793
|
+
},
|
|
1794
|
+
"gemini-2.5-pro": {
|
|
1795
|
+
contextWindow: 63830,
|
|
1796
|
+
supportsImages: true,
|
|
1797
|
+
supportsPromptCache: false,
|
|
1798
|
+
inputPrice: 0,
|
|
1799
|
+
outputPrice: 0,
|
|
1800
|
+
family: "gemini-2.5-pro",
|
|
1801
|
+
version: "gemini-2.5-pro-preview-03-25",
|
|
1802
|
+
name: "Gemini 2.5 Pro (Preview)",
|
|
1803
|
+
supportsToolCalling: true,
|
|
1804
|
+
maxInputTokens: 63830
|
|
1805
|
+
},
|
|
1806
|
+
"o4-mini": {
|
|
1807
|
+
contextWindow: 111446,
|
|
1808
|
+
supportsImages: false,
|
|
1809
|
+
supportsPromptCache: false,
|
|
1810
|
+
inputPrice: 0,
|
|
1811
|
+
outputPrice: 0,
|
|
1812
|
+
family: "o4-mini",
|
|
1813
|
+
version: "o4-mini-2025-04-16",
|
|
1814
|
+
name: "o4-mini (Preview)",
|
|
1815
|
+
supportsToolCalling: true,
|
|
1816
|
+
maxInputTokens: 111446
|
|
1817
|
+
},
|
|
1818
|
+
"gpt-4.1": {
|
|
1819
|
+
contextWindow: 111446,
|
|
1820
|
+
supportsImages: true,
|
|
1821
|
+
supportsPromptCache: false,
|
|
1822
|
+
inputPrice: 0,
|
|
1823
|
+
outputPrice: 0,
|
|
1824
|
+
family: "gpt-4.1",
|
|
1825
|
+
version: "gpt-4.1-2025-04-14",
|
|
1826
|
+
name: "GPT-4.1 (Preview)",
|
|
1827
|
+
supportsToolCalling: true,
|
|
1828
|
+
maxInputTokens: 111446
|
|
1829
|
+
}
|
|
1830
|
+
};
|
|
1831
|
+
|
|
1832
|
+
// src/providers/xai.ts
|
|
1833
|
+
var xaiDefaultModelId = "grok-3";
|
|
1834
|
+
var xaiModels = {
|
|
1835
|
+
"grok-3-beta": {
|
|
1836
|
+
maxTokens: 8192,
|
|
1837
|
+
contextWindow: 131072,
|
|
1838
|
+
supportsImages: false,
|
|
1839
|
+
supportsPromptCache: false,
|
|
1840
|
+
inputPrice: 3,
|
|
1841
|
+
outputPrice: 15,
|
|
1842
|
+
description: "xAI's Grok-3 beta model with 131K context window"
|
|
1843
|
+
},
|
|
1844
|
+
"grok-3-fast-beta": {
|
|
1845
|
+
maxTokens: 8192,
|
|
1846
|
+
contextWindow: 131072,
|
|
1847
|
+
supportsImages: false,
|
|
1848
|
+
supportsPromptCache: false,
|
|
1849
|
+
inputPrice: 5,
|
|
1850
|
+
outputPrice: 25,
|
|
1851
|
+
description: "xAI's Grok-3 fast beta model with 131K context window"
|
|
1852
|
+
},
|
|
1853
|
+
"grok-3-mini-beta": {
|
|
1854
|
+
maxTokens: 8192,
|
|
1855
|
+
contextWindow: 131072,
|
|
1856
|
+
supportsImages: false,
|
|
1857
|
+
supportsPromptCache: false,
|
|
1858
|
+
inputPrice: 0.3,
|
|
1859
|
+
outputPrice: 0.5,
|
|
1860
|
+
description: "xAI's Grok-3 mini beta model with 131K context window",
|
|
1861
|
+
supportsReasoningEffort: true
|
|
1862
|
+
},
|
|
1863
|
+
"grok-3-mini-fast-beta": {
|
|
1864
|
+
maxTokens: 8192,
|
|
1865
|
+
contextWindow: 131072,
|
|
1866
|
+
supportsImages: false,
|
|
1867
|
+
supportsPromptCache: false,
|
|
1868
|
+
inputPrice: 0.6,
|
|
1869
|
+
outputPrice: 4,
|
|
1870
|
+
description: "xAI's Grok-3 mini fast beta model with 131K context window",
|
|
1871
|
+
supportsReasoningEffort: true
|
|
1872
|
+
},
|
|
1873
|
+
"grok-3": {
|
|
1874
|
+
maxTokens: 8192,
|
|
1875
|
+
contextWindow: 131072,
|
|
1876
|
+
supportsImages: false,
|
|
1877
|
+
supportsPromptCache: false,
|
|
1878
|
+
inputPrice: 3,
|
|
1879
|
+
outputPrice: 15,
|
|
1880
|
+
description: "xAI's Grok-3 model with 131K context window"
|
|
1881
|
+
},
|
|
1882
|
+
"grok-3-fast": {
|
|
1883
|
+
maxTokens: 8192,
|
|
1884
|
+
contextWindow: 131072,
|
|
1885
|
+
supportsImages: false,
|
|
1886
|
+
supportsPromptCache: false,
|
|
1887
|
+
inputPrice: 5,
|
|
1888
|
+
outputPrice: 25,
|
|
1889
|
+
description: "xAI's Grok-3 fast model with 131K context window"
|
|
1890
|
+
},
|
|
1891
|
+
"grok-3-mini": {
|
|
1892
|
+
maxTokens: 8192,
|
|
1893
|
+
contextWindow: 131072,
|
|
1894
|
+
supportsImages: false,
|
|
1895
|
+
supportsPromptCache: false,
|
|
1896
|
+
inputPrice: 0.3,
|
|
1897
|
+
outputPrice: 0.5,
|
|
1898
|
+
description: "xAI's Grok-3 mini model with 131K context window",
|
|
1899
|
+
supportsReasoningEffort: true
|
|
1900
|
+
},
|
|
1901
|
+
"grok-3-mini-fast": {
|
|
1902
|
+
maxTokens: 8192,
|
|
1903
|
+
contextWindow: 131072,
|
|
1904
|
+
supportsImages: false,
|
|
1905
|
+
supportsPromptCache: false,
|
|
1906
|
+
inputPrice: 0.6,
|
|
1907
|
+
outputPrice: 4,
|
|
1908
|
+
description: "xAI's Grok-3 mini fast model with 131K context window",
|
|
1909
|
+
supportsReasoningEffort: true
|
|
1910
|
+
},
|
|
1911
|
+
"grok-2-latest": {
|
|
1912
|
+
maxTokens: 8192,
|
|
1913
|
+
contextWindow: 131072,
|
|
1914
|
+
supportsImages: false,
|
|
1915
|
+
supportsPromptCache: false,
|
|
1916
|
+
inputPrice: 2,
|
|
1917
|
+
outputPrice: 10,
|
|
1918
|
+
description: "xAI's Grok-2 model - latest version with 131K context window"
|
|
1919
|
+
},
|
|
1920
|
+
"grok-2": {
|
|
1921
|
+
maxTokens: 8192,
|
|
1922
|
+
contextWindow: 131072,
|
|
1923
|
+
supportsImages: false,
|
|
1924
|
+
supportsPromptCache: false,
|
|
1925
|
+
inputPrice: 2,
|
|
1926
|
+
outputPrice: 10,
|
|
1927
|
+
description: "xAI's Grok-2 model with 131K context window"
|
|
1928
|
+
},
|
|
1929
|
+
"grok-2-1212": {
|
|
1930
|
+
maxTokens: 8192,
|
|
1931
|
+
contextWindow: 131072,
|
|
1932
|
+
supportsImages: false,
|
|
1933
|
+
supportsPromptCache: false,
|
|
1934
|
+
inputPrice: 2,
|
|
1935
|
+
outputPrice: 10,
|
|
1936
|
+
description: "xAI's Grok-2 model (version 1212) with 131K context window"
|
|
1937
|
+
},
|
|
1938
|
+
"grok-2-vision-latest": {
|
|
1939
|
+
maxTokens: 8192,
|
|
1940
|
+
contextWindow: 32768,
|
|
1941
|
+
supportsImages: true,
|
|
1942
|
+
supportsPromptCache: false,
|
|
1943
|
+
inputPrice: 2,
|
|
1944
|
+
outputPrice: 10,
|
|
1945
|
+
description: "xAI's Grok-2 Vision model - latest version with image support and 32K context window"
|
|
1946
|
+
},
|
|
1947
|
+
"grok-2-vision": {
|
|
1948
|
+
maxTokens: 8192,
|
|
1949
|
+
contextWindow: 32768,
|
|
1950
|
+
supportsImages: true,
|
|
1951
|
+
supportsPromptCache: false,
|
|
1952
|
+
inputPrice: 2,
|
|
1953
|
+
outputPrice: 10,
|
|
1954
|
+
description: "xAI's Grok-2 Vision model with image support and 32K context window"
|
|
1955
|
+
},
|
|
1956
|
+
"grok-2-vision-1212": {
|
|
1957
|
+
maxTokens: 8192,
|
|
1958
|
+
contextWindow: 32768,
|
|
1959
|
+
supportsImages: true,
|
|
1960
|
+
supportsPromptCache: false,
|
|
1961
|
+
inputPrice: 2,
|
|
1962
|
+
outputPrice: 10,
|
|
1963
|
+
description: "xAI's Grok-2 Vision model (version 1212) with image support and 32K context window"
|
|
1964
|
+
},
|
|
1965
|
+
"grok-vision-beta": {
|
|
1966
|
+
maxTokens: 8192,
|
|
1967
|
+
contextWindow: 8192,
|
|
1968
|
+
supportsImages: true,
|
|
1969
|
+
supportsPromptCache: false,
|
|
1970
|
+
inputPrice: 5,
|
|
1971
|
+
outputPrice: 15,
|
|
1972
|
+
description: "xAI's Grok Vision Beta model with image support and 8K context window"
|
|
1973
|
+
},
|
|
1974
|
+
"grok-beta": {
|
|
1975
|
+
maxTokens: 8192,
|
|
1976
|
+
contextWindow: 131072,
|
|
1977
|
+
supportsImages: false,
|
|
1978
|
+
supportsPromptCache: false,
|
|
1979
|
+
inputPrice: 5,
|
|
1980
|
+
outputPrice: 15,
|
|
1981
|
+
description: "xAI's Grok Beta model (legacy) with 131K context window"
|
|
1982
|
+
}
|
|
1983
|
+
};
|
|
1984
|
+
|
|
1
1985
|
// src/codebase-index.ts
|
|
2
1986
|
import { z } from "zod";
|
|
3
1987
|
var codebaseIndexConfigSchema = z.object({
|
|
@@ -41,16 +2025,19 @@ var organizationSettingsSchema = z2.object({
|
|
|
41
2025
|
maxReadFileLine: z2.number().optional(),
|
|
42
2026
|
fuzzyMatchThreshold: z2.number().optional()
|
|
43
2027
|
}).optional(),
|
|
2028
|
+
cloudSettings: z2.object({
|
|
2029
|
+
recordTaskMessages: z2.boolean().optional()
|
|
2030
|
+
}).optional(),
|
|
44
2031
|
allowList: organizationAllowListSchema
|
|
45
2032
|
});
|
|
46
2033
|
|
|
47
2034
|
// src/experiment.ts
|
|
48
2035
|
import { z as z3 } from "zod";
|
|
49
|
-
var experimentIds = ["
|
|
2036
|
+
var experimentIds = ["powerSteering", "concurrentFileReads"];
|
|
50
2037
|
var experimentIdsSchema = z3.enum(experimentIds);
|
|
51
2038
|
var experimentsSchema = z3.object({
|
|
52
|
-
|
|
53
|
-
|
|
2039
|
+
powerSteering: z3.boolean(),
|
|
2040
|
+
concurrentFileReads: z3.boolean()
|
|
54
2041
|
});
|
|
55
2042
|
|
|
56
2043
|
// src/global-settings.ts
|
|
@@ -464,6 +2451,7 @@ var clineSays = [
|
|
|
464
2451
|
"rooignore_error",
|
|
465
2452
|
"diff_error",
|
|
466
2453
|
"condense_context",
|
|
2454
|
+
"condense_context_error",
|
|
467
2455
|
"codebase_search_result"
|
|
468
2456
|
];
|
|
469
2457
|
var clineSaySchema = z7.enum(clineSays);
|
|
@@ -528,6 +2516,7 @@ var TelemetryEventName = /* @__PURE__ */ ((TelemetryEventName2) => {
|
|
|
528
2516
|
return TelemetryEventName2;
|
|
529
2517
|
})(TelemetryEventName || {});
|
|
530
2518
|
var appPropertiesSchema = z8.object({
|
|
2519
|
+
appName: z8.string(),
|
|
531
2520
|
appVersion: z8.string(),
|
|
532
2521
|
vscodeVersion: z8.string(),
|
|
533
2522
|
platform: z8.string(),
|
|
@@ -565,7 +2554,9 @@ var rooCodeTelemetryEventSchema = z8.discriminatedUnion("type", [
|
|
|
565
2554
|
"Schema Validation Error" /* SCHEMA_VALIDATION_ERROR */,
|
|
566
2555
|
"Diff Application Error" /* DIFF_APPLICATION_ERROR */,
|
|
567
2556
|
"Shell Integration Error" /* SHELL_INTEGRATION_ERROR */,
|
|
568
|
-
"Consecutive Mistake Error" /* CONSECUTIVE_MISTAKE_ERROR
|
|
2557
|
+
"Consecutive Mistake Error" /* CONSECUTIVE_MISTAKE_ERROR */,
|
|
2558
|
+
"Context Condensed" /* CONTEXT_CONDENSED */,
|
|
2559
|
+
"Sliding Window Truncation" /* SLIDING_WINDOW_TRUNCATION */
|
|
569
2560
|
]),
|
|
570
2561
|
properties: telemetryPropertiesSchema
|
|
571
2562
|
}),
|
|
@@ -762,7 +2753,9 @@ var globalSettingsSchema = z12.object({
|
|
|
762
2753
|
alwaysAllowExecute: z12.boolean().optional(),
|
|
763
2754
|
allowedCommands: z12.array(z12.string()).optional(),
|
|
764
2755
|
allowedMaxRequests: z12.number().nullish(),
|
|
2756
|
+
autoCondenseContext: z12.boolean().optional(),
|
|
765
2757
|
autoCondenseContextPercent: z12.number().optional(),
|
|
2758
|
+
maxConcurrentFileReads: z12.number().optional(),
|
|
766
2759
|
browserToolEnabled: z12.boolean().optional(),
|
|
767
2760
|
browserViewportSize: z12.string().optional(),
|
|
768
2761
|
screenshotQuality: z12.number().optional(),
|
|
@@ -830,7 +2823,9 @@ var GLOBAL_SETTINGS_KEYS = keysOf()([
|
|
|
830
2823
|
"alwaysAllowExecute",
|
|
831
2824
|
"allowedCommands",
|
|
832
2825
|
"allowedMaxRequests",
|
|
2826
|
+
"autoCondenseContext",
|
|
833
2827
|
"autoCondenseContextPercent",
|
|
2828
|
+
"maxConcurrentFileReads",
|
|
834
2829
|
"browserToolEnabled",
|
|
835
2830
|
"browserViewportSize",
|
|
836
2831
|
"screenshotQuality",
|
|
@@ -1073,18 +3068,44 @@ var commandExecutionStatusSchema = z14.discriminatedUnion("status", [
|
|
|
1073
3068
|
})
|
|
1074
3069
|
]);
|
|
1075
3070
|
export {
|
|
3071
|
+
ANTHROPIC_DEFAULT_MAX_TOKENS,
|
|
3072
|
+
BEDROCK_DEFAULT_TEMPERATURE,
|
|
3073
|
+
BEDROCK_MAX_TOKENS,
|
|
3074
|
+
BEDROCK_REGIONS,
|
|
3075
|
+
BEDROCK_REGION_INFO,
|
|
3076
|
+
DEEP_SEEK_DEFAULT_TEMPERATURE,
|
|
3077
|
+
GLAMA_DEFAULT_TEMPERATURE,
|
|
1076
3078
|
GLOBAL_SETTINGS_KEYS,
|
|
1077
3079
|
GLOBAL_STATE_KEYS,
|
|
1078
3080
|
IpcMessageType,
|
|
1079
3081
|
IpcOrigin,
|
|
3082
|
+
LITELLM_COMPUTER_USE_MODELS,
|
|
3083
|
+
LMSTUDIO_DEFAULT_TEMPERATURE,
|
|
3084
|
+
MISTRAL_DEFAULT_TEMPERATURE,
|
|
3085
|
+
OPENAI_AZURE_AI_INFERENCE_PATH,
|
|
3086
|
+
OPENAI_NATIVE_DEFAULT_TEMPERATURE,
|
|
3087
|
+
OPENROUTER_DEFAULT_PROVIDER_NAME,
|
|
3088
|
+
OPEN_ROUTER_COMPUTER_USE_MODELS,
|
|
3089
|
+
OPEN_ROUTER_PROMPT_CACHING_MODELS,
|
|
3090
|
+
OPEN_ROUTER_REASONING_BUDGET_MODELS,
|
|
3091
|
+
OPEN_ROUTER_REQUIRED_REASONING_BUDGET_MODELS,
|
|
1080
3092
|
ORGANIZATION_ALLOW_ALL,
|
|
1081
3093
|
PROVIDER_SETTINGS_KEYS,
|
|
1082
3094
|
RooCodeEventName,
|
|
1083
3095
|
SECRET_STATE_KEYS,
|
|
1084
3096
|
TaskCommandName,
|
|
1085
3097
|
TelemetryEventName,
|
|
3098
|
+
VERTEX_REGIONS,
|
|
1086
3099
|
ackSchema,
|
|
3100
|
+
anthropicDefaultModelId,
|
|
3101
|
+
anthropicModels,
|
|
1087
3102
|
appPropertiesSchema,
|
|
3103
|
+
azureOpenAiDefaultApiVersion,
|
|
3104
|
+
bedrockDefaultModelId,
|
|
3105
|
+
bedrockDefaultPromptRouterModelId,
|
|
3106
|
+
bedrockModels,
|
|
3107
|
+
chutesDefaultModelId,
|
|
3108
|
+
chutesModels,
|
|
1088
3109
|
clineAskSchema,
|
|
1089
3110
|
clineAsks,
|
|
1090
3111
|
clineMessageSchema,
|
|
@@ -1100,10 +3121,18 @@ export {
|
|
|
1100
3121
|
customModePromptsSchema,
|
|
1101
3122
|
customModesSettingsSchema,
|
|
1102
3123
|
customSupportPromptsSchema,
|
|
3124
|
+
deepSeekDefaultModelId,
|
|
3125
|
+
deepSeekModels,
|
|
1103
3126
|
experimentIds,
|
|
1104
3127
|
experimentIdsSchema,
|
|
1105
3128
|
experimentsSchema,
|
|
3129
|
+
geminiDefaultModelId,
|
|
3130
|
+
geminiModels,
|
|
3131
|
+
glamaDefaultModelId,
|
|
3132
|
+
glamaDefaultModelInfo,
|
|
1106
3133
|
globalSettingsSchema,
|
|
3134
|
+
groqDefaultModelId,
|
|
3135
|
+
groqModels,
|
|
1107
3136
|
groupEntrySchema,
|
|
1108
3137
|
groupOptionsSchema,
|
|
1109
3138
|
historyItemSchema,
|
|
@@ -1115,10 +3144,19 @@ export {
|
|
|
1115
3144
|
keysOf,
|
|
1116
3145
|
languages,
|
|
1117
3146
|
languagesSchema,
|
|
3147
|
+
litellmDefaultModelId,
|
|
3148
|
+
litellmDefaultModelInfo,
|
|
3149
|
+
mistralDefaultModelId,
|
|
3150
|
+
mistralModels,
|
|
1118
3151
|
modeConfigSchema,
|
|
1119
3152
|
modelInfoSchema,
|
|
1120
3153
|
modelParameters,
|
|
1121
3154
|
modelParametersSchema,
|
|
3155
|
+
openAiModelInfoSaneDefaults,
|
|
3156
|
+
openAiNativeDefaultModelId,
|
|
3157
|
+
openAiNativeModels,
|
|
3158
|
+
openRouterDefaultModelId,
|
|
3159
|
+
openRouterDefaultModelInfo,
|
|
1122
3160
|
organizationAllowListSchema,
|
|
1123
3161
|
organizationSettingsSchema,
|
|
1124
3162
|
promptComponentSchema,
|
|
@@ -1129,6 +3167,8 @@ export {
|
|
|
1129
3167
|
providerSettingsSchemaDiscriminated,
|
|
1130
3168
|
reasoningEfforts,
|
|
1131
3169
|
reasoningEffortsSchema,
|
|
3170
|
+
requestyDefaultModelId,
|
|
3171
|
+
requestyDefaultModelInfo,
|
|
1132
3172
|
rooCodeEventsSchema,
|
|
1133
3173
|
rooCodeSettingsSchema,
|
|
1134
3174
|
rooCodeTelemetryEventSchema,
|
|
@@ -1145,6 +3185,14 @@ export {
|
|
|
1145
3185
|
toolNames,
|
|
1146
3186
|
toolNamesSchema,
|
|
1147
3187
|
toolProgressStatusSchema,
|
|
1148
|
-
toolUsageSchema
|
|
3188
|
+
toolUsageSchema,
|
|
3189
|
+
unboundDefaultModelId,
|
|
3190
|
+
unboundDefaultModelInfo,
|
|
3191
|
+
vertexDefaultModelId,
|
|
3192
|
+
vertexModels,
|
|
3193
|
+
vscodeLlmDefaultModelId,
|
|
3194
|
+
vscodeLlmModels,
|
|
3195
|
+
xaiDefaultModelId,
|
|
3196
|
+
xaiModels
|
|
1149
3197
|
};
|
|
1150
3198
|
//# sourceMappingURL=index.js.map
|