@roo-code/types 1.91.0 → 1.93.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +460 -146
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +486 -318
- package/dist/index.d.ts +486 -318
- package/dist/index.js +455 -143
- package/dist/index.js.map +1 -1
- package/package.json +1 -1
package/dist/index.cjs
CHANGED
|
@@ -30,7 +30,8 @@ __export(index_exports, {
|
|
|
30
30
|
BEDROCK_GLOBAL_INFERENCE_MODEL_IDS: () => BEDROCK_GLOBAL_INFERENCE_MODEL_IDS,
|
|
31
31
|
BEDROCK_MAX_TOKENS: () => BEDROCK_MAX_TOKENS,
|
|
32
32
|
BEDROCK_REGIONS: () => BEDROCK_REGIONS,
|
|
33
|
-
|
|
33
|
+
BEDROCK_SERVICE_TIER_MODEL_IDS: () => BEDROCK_SERVICE_TIER_MODEL_IDS,
|
|
34
|
+
BEDROCK_SERVICE_TIER_PRICING: () => BEDROCK_SERVICE_TIER_PRICING,
|
|
34
35
|
CODEBASE_INDEX_DEFAULTS: () => CODEBASE_INDEX_DEFAULTS,
|
|
35
36
|
CONSENT_COOKIE_NAME: () => CONSENT_COOKIE_NAME,
|
|
36
37
|
CONTEXT_MANAGEMENT_EVENTS: () => CONTEXT_MANAGEMENT_EVENTS,
|
|
@@ -120,6 +121,7 @@ __export(index_exports, {
|
|
|
120
121
|
chutesModels: () => chutesModels,
|
|
121
122
|
claudeCodeDefaultModelId: () => claudeCodeDefaultModelId,
|
|
122
123
|
claudeCodeModels: () => claudeCodeModels,
|
|
124
|
+
claudeCodeReasoningConfig: () => claudeCodeReasoningConfig,
|
|
123
125
|
clineAskSchema: () => clineAskSchema,
|
|
124
126
|
clineAsks: () => clineAsks,
|
|
125
127
|
clineMessageSchema: () => clineMessageSchema,
|
|
@@ -134,7 +136,6 @@ __export(index_exports, {
|
|
|
134
136
|
commandIds: () => commandIds,
|
|
135
137
|
contextCondenseSchema: () => contextCondenseSchema,
|
|
136
138
|
contextTruncationSchema: () => contextTruncationSchema,
|
|
137
|
-
convertModelNameForVertex: () => convertModelNameForVertex,
|
|
138
139
|
customModePromptsSchema: () => customModePromptsSchema,
|
|
139
140
|
customModesSettingsSchema: () => customModesSettingsSchema,
|
|
140
141
|
customProviders: () => customProviders,
|
|
@@ -156,6 +157,7 @@ __export(index_exports, {
|
|
|
156
157
|
extensionBridgeEventSchema: () => extensionBridgeEventSchema,
|
|
157
158
|
extensionInstanceSchema: () => extensionInstanceSchema,
|
|
158
159
|
extractApiProviderErrorProperties: () => extractApiProviderErrorProperties,
|
|
160
|
+
extractMessageFromJsonPayload: () => extractMessageFromJsonPayload,
|
|
159
161
|
fauxProviders: () => fauxProviders,
|
|
160
162
|
featherlessDefaultModelId: () => featherlessDefaultModelId,
|
|
161
163
|
featherlessModels: () => featherlessModels,
|
|
@@ -165,7 +167,6 @@ __export(index_exports, {
|
|
|
165
167
|
geminiDefaultModelId: () => geminiDefaultModelId,
|
|
166
168
|
geminiModels: () => geminiModels,
|
|
167
169
|
getApiProtocol: () => getApiProtocol,
|
|
168
|
-
getClaudeCodeModelId: () => getClaudeCodeModelId,
|
|
169
170
|
getEffectiveProtocol: () => getEffectiveProtocol,
|
|
170
171
|
getErrorMessage: () => getErrorMessage,
|
|
171
172
|
getErrorStatusCode: () => getErrorStatusCode,
|
|
@@ -237,6 +238,7 @@ __export(index_exports, {
|
|
|
237
238
|
moonshotDefaultModelId: () => moonshotDefaultModelId,
|
|
238
239
|
moonshotModels: () => moonshotModels,
|
|
239
240
|
nonBlockingAsks: () => nonBlockingAsks,
|
|
241
|
+
normalizeClaudeCodeModelId: () => normalizeClaudeCodeModelId,
|
|
240
242
|
ollamaDefaultModelId: () => ollamaDefaultModelId,
|
|
241
243
|
ollamaDefaultModelInfo: () => ollamaDefaultModelInfo,
|
|
242
244
|
openAiModelInfoSaneDefaults: () => openAiModelInfoSaneDefaults,
|
|
@@ -896,6 +898,7 @@ var anthropicModels = {
|
|
|
896
898
|
supportsImages: true,
|
|
897
899
|
supportsPromptCache: true,
|
|
898
900
|
supportsNativeTools: true,
|
|
901
|
+
defaultToolProtocol: "native",
|
|
899
902
|
inputPrice: 3,
|
|
900
903
|
// $3 per million input tokens (≤200K context)
|
|
901
904
|
outputPrice: 15,
|
|
@@ -929,6 +932,7 @@ var anthropicModels = {
|
|
|
929
932
|
supportsImages: true,
|
|
930
933
|
supportsPromptCache: true,
|
|
931
934
|
supportsNativeTools: true,
|
|
935
|
+
defaultToolProtocol: "native",
|
|
932
936
|
inputPrice: 3,
|
|
933
937
|
// $3 per million input tokens (≤200K context)
|
|
934
938
|
outputPrice: 15,
|
|
@@ -961,6 +965,7 @@ var anthropicModels = {
|
|
|
961
965
|
supportsImages: true,
|
|
962
966
|
supportsPromptCache: true,
|
|
963
967
|
supportsNativeTools: true,
|
|
968
|
+
defaultToolProtocol: "native",
|
|
964
969
|
inputPrice: 5,
|
|
965
970
|
// $5 per million input tokens
|
|
966
971
|
outputPrice: 25,
|
|
@@ -978,6 +983,7 @@ var anthropicModels = {
|
|
|
978
983
|
supportsImages: true,
|
|
979
984
|
supportsPromptCache: true,
|
|
980
985
|
supportsNativeTools: true,
|
|
986
|
+
defaultToolProtocol: "native",
|
|
981
987
|
inputPrice: 15,
|
|
982
988
|
// $15 per million input tokens
|
|
983
989
|
outputPrice: 75,
|
|
@@ -995,6 +1001,7 @@ var anthropicModels = {
|
|
|
995
1001
|
supportsImages: true,
|
|
996
1002
|
supportsPromptCache: true,
|
|
997
1003
|
supportsNativeTools: true,
|
|
1004
|
+
defaultToolProtocol: "native",
|
|
998
1005
|
inputPrice: 15,
|
|
999
1006
|
// $15 per million input tokens
|
|
1000
1007
|
outputPrice: 75,
|
|
@@ -1012,6 +1019,7 @@ var anthropicModels = {
|
|
|
1012
1019
|
supportsImages: true,
|
|
1013
1020
|
supportsPromptCache: true,
|
|
1014
1021
|
supportsNativeTools: true,
|
|
1022
|
+
defaultToolProtocol: "native",
|
|
1015
1023
|
inputPrice: 3,
|
|
1016
1024
|
// $3 per million input tokens
|
|
1017
1025
|
outputPrice: 15,
|
|
@@ -1030,6 +1038,7 @@ var anthropicModels = {
|
|
|
1030
1038
|
supportsImages: true,
|
|
1031
1039
|
supportsPromptCache: true,
|
|
1032
1040
|
supportsNativeTools: true,
|
|
1041
|
+
defaultToolProtocol: "native",
|
|
1033
1042
|
inputPrice: 3,
|
|
1034
1043
|
// $3 per million input tokens
|
|
1035
1044
|
outputPrice: 15,
|
|
@@ -1045,6 +1054,7 @@ var anthropicModels = {
|
|
|
1045
1054
|
supportsImages: true,
|
|
1046
1055
|
supportsPromptCache: true,
|
|
1047
1056
|
supportsNativeTools: true,
|
|
1057
|
+
defaultToolProtocol: "native",
|
|
1048
1058
|
inputPrice: 3,
|
|
1049
1059
|
// $3 per million input tokens
|
|
1050
1060
|
outputPrice: 15,
|
|
@@ -1060,6 +1070,7 @@ var anthropicModels = {
|
|
|
1060
1070
|
supportsImages: false,
|
|
1061
1071
|
supportsPromptCache: true,
|
|
1062
1072
|
supportsNativeTools: true,
|
|
1073
|
+
defaultToolProtocol: "native",
|
|
1063
1074
|
inputPrice: 1,
|
|
1064
1075
|
outputPrice: 5,
|
|
1065
1076
|
cacheWritesPrice: 1.25,
|
|
@@ -1071,6 +1082,7 @@ var anthropicModels = {
|
|
|
1071
1082
|
supportsImages: true,
|
|
1072
1083
|
supportsPromptCache: true,
|
|
1073
1084
|
supportsNativeTools: true,
|
|
1085
|
+
defaultToolProtocol: "native",
|
|
1074
1086
|
inputPrice: 15,
|
|
1075
1087
|
outputPrice: 75,
|
|
1076
1088
|
cacheWritesPrice: 18.75,
|
|
@@ -1082,6 +1094,7 @@ var anthropicModels = {
|
|
|
1082
1094
|
supportsImages: true,
|
|
1083
1095
|
supportsPromptCache: true,
|
|
1084
1096
|
supportsNativeTools: true,
|
|
1097
|
+
defaultToolProtocol: "native",
|
|
1085
1098
|
inputPrice: 0.25,
|
|
1086
1099
|
outputPrice: 1.25,
|
|
1087
1100
|
cacheWritesPrice: 0.3,
|
|
@@ -1093,6 +1106,7 @@ var anthropicModels = {
|
|
|
1093
1106
|
supportsImages: true,
|
|
1094
1107
|
supportsPromptCache: true,
|
|
1095
1108
|
supportsNativeTools: true,
|
|
1109
|
+
defaultToolProtocol: "native",
|
|
1096
1110
|
inputPrice: 1,
|
|
1097
1111
|
outputPrice: 5,
|
|
1098
1112
|
cacheWritesPrice: 1.25,
|
|
@@ -1245,6 +1259,7 @@ var bedrockModels = {
|
|
|
1245
1259
|
supportsPromptCache: true,
|
|
1246
1260
|
supportsReasoningBudget: true,
|
|
1247
1261
|
supportsNativeTools: true,
|
|
1262
|
+
defaultToolProtocol: "native",
|
|
1248
1263
|
inputPrice: 3,
|
|
1249
1264
|
outputPrice: 15,
|
|
1250
1265
|
cacheWritesPrice: 3.75,
|
|
@@ -1299,6 +1314,22 @@ var bedrockModels = {
|
|
|
1299
1314
|
maxCachePoints: 1,
|
|
1300
1315
|
cachableFields: ["system"]
|
|
1301
1316
|
},
|
|
1317
|
+
"amazon.nova-2-lite-v1:0": {
|
|
1318
|
+
maxTokens: 65535,
|
|
1319
|
+
contextWindow: 1e6,
|
|
1320
|
+
supportsImages: true,
|
|
1321
|
+
supportsPromptCache: true,
|
|
1322
|
+
supportsNativeTools: true,
|
|
1323
|
+
inputPrice: 0.33,
|
|
1324
|
+
outputPrice: 2.75,
|
|
1325
|
+
cacheWritesPrice: 0,
|
|
1326
|
+
cacheReadsPrice: 0.0825,
|
|
1327
|
+
// 75% less than input price
|
|
1328
|
+
minTokensPerCachePoint: 1,
|
|
1329
|
+
maxCachePoints: 1,
|
|
1330
|
+
cachableFields: ["system"],
|
|
1331
|
+
description: "Amazon Nova 2 Lite - Comparable to Claude Haiku 4.5"
|
|
1332
|
+
},
|
|
1302
1333
|
"amazon.nova-micro-v1:0": {
|
|
1303
1334
|
maxTokens: 5e3,
|
|
1304
1335
|
contextWindow: 128e3,
|
|
@@ -1322,6 +1353,7 @@ var bedrockModels = {
|
|
|
1322
1353
|
supportsPromptCache: true,
|
|
1323
1354
|
supportsReasoningBudget: true,
|
|
1324
1355
|
supportsNativeTools: true,
|
|
1356
|
+
defaultToolProtocol: "native",
|
|
1325
1357
|
inputPrice: 3,
|
|
1326
1358
|
outputPrice: 15,
|
|
1327
1359
|
cacheWritesPrice: 3.75,
|
|
@@ -1337,6 +1369,7 @@ var bedrockModels = {
|
|
|
1337
1369
|
supportsPromptCache: true,
|
|
1338
1370
|
supportsReasoningBudget: true,
|
|
1339
1371
|
supportsNativeTools: true,
|
|
1372
|
+
defaultToolProtocol: "native",
|
|
1340
1373
|
inputPrice: 15,
|
|
1341
1374
|
outputPrice: 75,
|
|
1342
1375
|
cacheWritesPrice: 18.75,
|
|
@@ -1352,6 +1385,7 @@ var bedrockModels = {
|
|
|
1352
1385
|
supportsPromptCache: true,
|
|
1353
1386
|
supportsReasoningBudget: true,
|
|
1354
1387
|
supportsNativeTools: true,
|
|
1388
|
+
defaultToolProtocol: "native",
|
|
1355
1389
|
inputPrice: 5,
|
|
1356
1390
|
outputPrice: 25,
|
|
1357
1391
|
cacheWritesPrice: 6.25,
|
|
@@ -1367,6 +1401,7 @@ var bedrockModels = {
|
|
|
1367
1401
|
supportsPromptCache: true,
|
|
1368
1402
|
supportsReasoningBudget: true,
|
|
1369
1403
|
supportsNativeTools: true,
|
|
1404
|
+
defaultToolProtocol: "native",
|
|
1370
1405
|
inputPrice: 15,
|
|
1371
1406
|
outputPrice: 75,
|
|
1372
1407
|
cacheWritesPrice: 18.75,
|
|
@@ -1382,6 +1417,7 @@ var bedrockModels = {
|
|
|
1382
1417
|
supportsPromptCache: true,
|
|
1383
1418
|
supportsReasoningBudget: true,
|
|
1384
1419
|
supportsNativeTools: true,
|
|
1420
|
+
defaultToolProtocol: "native",
|
|
1385
1421
|
inputPrice: 3,
|
|
1386
1422
|
outputPrice: 15,
|
|
1387
1423
|
cacheWritesPrice: 3.75,
|
|
@@ -1396,6 +1432,7 @@ var bedrockModels = {
|
|
|
1396
1432
|
supportsImages: true,
|
|
1397
1433
|
supportsPromptCache: true,
|
|
1398
1434
|
supportsNativeTools: true,
|
|
1435
|
+
defaultToolProtocol: "native",
|
|
1399
1436
|
inputPrice: 3,
|
|
1400
1437
|
outputPrice: 15,
|
|
1401
1438
|
cacheWritesPrice: 3.75,
|
|
@@ -1410,6 +1447,7 @@ var bedrockModels = {
|
|
|
1410
1447
|
supportsImages: false,
|
|
1411
1448
|
supportsPromptCache: true,
|
|
1412
1449
|
supportsNativeTools: true,
|
|
1450
|
+
defaultToolProtocol: "native",
|
|
1413
1451
|
inputPrice: 0.8,
|
|
1414
1452
|
outputPrice: 4,
|
|
1415
1453
|
cacheWritesPrice: 1,
|
|
@@ -1425,6 +1463,7 @@ var bedrockModels = {
|
|
|
1425
1463
|
supportsPromptCache: true,
|
|
1426
1464
|
supportsReasoningBudget: true,
|
|
1427
1465
|
supportsNativeTools: true,
|
|
1466
|
+
defaultToolProtocol: "native",
|
|
1428
1467
|
inputPrice: 1,
|
|
1429
1468
|
outputPrice: 5,
|
|
1430
1469
|
cacheWritesPrice: 1.25,
|
|
@@ -1441,6 +1480,7 @@ var bedrockModels = {
|
|
|
1441
1480
|
supportsImages: true,
|
|
1442
1481
|
supportsPromptCache: false,
|
|
1443
1482
|
supportsNativeTools: true,
|
|
1483
|
+
defaultToolProtocol: "native",
|
|
1444
1484
|
inputPrice: 3,
|
|
1445
1485
|
outputPrice: 15
|
|
1446
1486
|
},
|
|
@@ -1450,6 +1490,7 @@ var bedrockModels = {
|
|
|
1450
1490
|
supportsImages: true,
|
|
1451
1491
|
supportsPromptCache: false,
|
|
1452
1492
|
supportsNativeTools: true,
|
|
1493
|
+
defaultToolProtocol: "native",
|
|
1453
1494
|
inputPrice: 15,
|
|
1454
1495
|
outputPrice: 75
|
|
1455
1496
|
},
|
|
@@ -1459,6 +1500,7 @@ var bedrockModels = {
|
|
|
1459
1500
|
supportsImages: true,
|
|
1460
1501
|
supportsPromptCache: false,
|
|
1461
1502
|
supportsNativeTools: true,
|
|
1503
|
+
defaultToolProtocol: "native",
|
|
1462
1504
|
inputPrice: 3,
|
|
1463
1505
|
outputPrice: 15
|
|
1464
1506
|
},
|
|
@@ -1468,6 +1510,7 @@ var bedrockModels = {
|
|
|
1468
1510
|
supportsImages: true,
|
|
1469
1511
|
supportsPromptCache: false,
|
|
1470
1512
|
supportsNativeTools: true,
|
|
1513
|
+
defaultToolProtocol: "native",
|
|
1471
1514
|
inputPrice: 0.25,
|
|
1472
1515
|
outputPrice: 1.25
|
|
1473
1516
|
},
|
|
@@ -1477,6 +1520,7 @@ var bedrockModels = {
|
|
|
1477
1520
|
supportsImages: false,
|
|
1478
1521
|
supportsPromptCache: false,
|
|
1479
1522
|
supportsNativeTools: true,
|
|
1523
|
+
defaultToolProtocol: "native",
|
|
1480
1524
|
inputPrice: 8,
|
|
1481
1525
|
outputPrice: 24,
|
|
1482
1526
|
description: "Claude 2.1"
|
|
@@ -1487,6 +1531,7 @@ var bedrockModels = {
|
|
|
1487
1531
|
supportsImages: false,
|
|
1488
1532
|
supportsPromptCache: false,
|
|
1489
1533
|
supportsNativeTools: true,
|
|
1534
|
+
defaultToolProtocol: "native",
|
|
1490
1535
|
inputPrice: 8,
|
|
1491
1536
|
outputPrice: 24,
|
|
1492
1537
|
description: "Claude 2.0"
|
|
@@ -1497,6 +1542,7 @@ var bedrockModels = {
|
|
|
1497
1542
|
supportsImages: false,
|
|
1498
1543
|
supportsPromptCache: false,
|
|
1499
1544
|
supportsNativeTools: true,
|
|
1545
|
+
defaultToolProtocol: "native",
|
|
1500
1546
|
inputPrice: 0.8,
|
|
1501
1547
|
outputPrice: 2.4,
|
|
1502
1548
|
description: "Claude Instant"
|
|
@@ -1779,6 +1825,29 @@ var BEDROCK_GLOBAL_INFERENCE_MODEL_IDS = [
|
|
|
1779
1825
|
"anthropic.claude-haiku-4-5-20251001-v1:0",
|
|
1780
1826
|
"anthropic.claude-opus-4-5-20251101-v1:0"
|
|
1781
1827
|
];
|
|
1828
|
+
var BEDROCK_SERVICE_TIER_MODEL_IDS = [
|
|
1829
|
+
// Amazon Nova models
|
|
1830
|
+
"amazon.nova-lite-v1:0",
|
|
1831
|
+
"amazon.nova-2-lite-v1:0",
|
|
1832
|
+
"amazon.nova-pro-v1:0",
|
|
1833
|
+
"amazon.nova-pro-latency-optimized-v1:0",
|
|
1834
|
+
// DeepSeek models
|
|
1835
|
+
"deepseek.r1-v1:0",
|
|
1836
|
+
// Qwen models
|
|
1837
|
+
"qwen.qwen3-next-80b-a3b",
|
|
1838
|
+
"qwen.qwen3-coder-480b-a35b-v1:0",
|
|
1839
|
+
// OpenAI GPT-OSS models
|
|
1840
|
+
"openai.gpt-oss-20b-1:0",
|
|
1841
|
+
"openai.gpt-oss-120b-1:0"
|
|
1842
|
+
];
|
|
1843
|
+
var BEDROCK_SERVICE_TIER_PRICING = {
|
|
1844
|
+
STANDARD: 1,
|
|
1845
|
+
// Base price
|
|
1846
|
+
FLEX: 0.5,
|
|
1847
|
+
// 50% discount from standard
|
|
1848
|
+
PRIORITY: 1.75
|
|
1849
|
+
// 75% premium over standard
|
|
1850
|
+
};
|
|
1782
1851
|
|
|
1783
1852
|
// src/providers/cerebras.ts
|
|
1784
1853
|
var cerebrasDefaultModelId = "gpt-oss-120b";
|
|
@@ -1790,6 +1859,7 @@ var cerebrasModels = {
|
|
|
1790
1859
|
supportsImages: false,
|
|
1791
1860
|
supportsPromptCache: false,
|
|
1792
1861
|
supportsNativeTools: true,
|
|
1862
|
+
defaultToolProtocol: "native",
|
|
1793
1863
|
inputPrice: 0,
|
|
1794
1864
|
outputPrice: 0,
|
|
1795
1865
|
description: "Highly intelligent general purpose model with up to 1,000 tokens/s"
|
|
@@ -1801,6 +1871,7 @@ var cerebrasModels = {
|
|
|
1801
1871
|
supportsImages: false,
|
|
1802
1872
|
supportsPromptCache: false,
|
|
1803
1873
|
supportsNativeTools: true,
|
|
1874
|
+
defaultToolProtocol: "native",
|
|
1804
1875
|
inputPrice: 0,
|
|
1805
1876
|
outputPrice: 0,
|
|
1806
1877
|
description: "Intelligent model with ~1400 tokens/s"
|
|
@@ -1812,6 +1883,7 @@ var cerebrasModels = {
|
|
|
1812
1883
|
supportsImages: false,
|
|
1813
1884
|
supportsPromptCache: false,
|
|
1814
1885
|
supportsNativeTools: true,
|
|
1886
|
+
defaultToolProtocol: "native",
|
|
1815
1887
|
inputPrice: 0,
|
|
1816
1888
|
outputPrice: 0,
|
|
1817
1889
|
description: "Powerful model with ~2600 tokens/s"
|
|
@@ -1823,6 +1895,7 @@ var cerebrasModels = {
|
|
|
1823
1895
|
supportsImages: false,
|
|
1824
1896
|
supportsPromptCache: false,
|
|
1825
1897
|
supportsNativeTools: true,
|
|
1898
|
+
defaultToolProtocol: "native",
|
|
1826
1899
|
inputPrice: 0,
|
|
1827
1900
|
outputPrice: 0,
|
|
1828
1901
|
description: "SOTA coding performance with ~2500 tokens/s"
|
|
@@ -1834,6 +1907,7 @@ var cerebrasModels = {
|
|
|
1834
1907
|
supportsImages: false,
|
|
1835
1908
|
supportsPromptCache: false,
|
|
1836
1909
|
supportsNativeTools: true,
|
|
1910
|
+
defaultToolProtocol: "native",
|
|
1837
1911
|
inputPrice: 0,
|
|
1838
1912
|
outputPrice: 0,
|
|
1839
1913
|
description: "OpenAI GPT OSS model with ~2800 tokens/s\n\n\u2022 64K context window\n\u2022 Excels at efficient reasoning across science, math, and coding"
|
|
@@ -1848,6 +1922,8 @@ var chutesModels = {
|
|
|
1848
1922
|
contextWindow: 163840,
|
|
1849
1923
|
supportsImages: false,
|
|
1850
1924
|
supportsPromptCache: false,
|
|
1925
|
+
supportsNativeTools: true,
|
|
1926
|
+
defaultToolProtocol: "native",
|
|
1851
1927
|
inputPrice: 0,
|
|
1852
1928
|
outputPrice: 0,
|
|
1853
1929
|
description: "DeepSeek R1 0528 model."
|
|
@@ -1857,6 +1933,8 @@ var chutesModels = {
|
|
|
1857
1933
|
contextWindow: 163840,
|
|
1858
1934
|
supportsImages: false,
|
|
1859
1935
|
supportsPromptCache: false,
|
|
1936
|
+
supportsNativeTools: true,
|
|
1937
|
+
defaultToolProtocol: "native",
|
|
1860
1938
|
inputPrice: 0,
|
|
1861
1939
|
outputPrice: 0,
|
|
1862
1940
|
description: "DeepSeek R1 model."
|
|
@@ -1866,6 +1944,8 @@ var chutesModels = {
|
|
|
1866
1944
|
contextWindow: 163840,
|
|
1867
1945
|
supportsImages: false,
|
|
1868
1946
|
supportsPromptCache: false,
|
|
1947
|
+
supportsNativeTools: true,
|
|
1948
|
+
defaultToolProtocol: "native",
|
|
1869
1949
|
inputPrice: 0,
|
|
1870
1950
|
outputPrice: 0,
|
|
1871
1951
|
description: "DeepSeek V3 model."
|
|
@@ -1875,6 +1955,8 @@ var chutesModels = {
|
|
|
1875
1955
|
contextWindow: 163840,
|
|
1876
1956
|
supportsImages: false,
|
|
1877
1957
|
supportsPromptCache: false,
|
|
1958
|
+
supportsNativeTools: true,
|
|
1959
|
+
defaultToolProtocol: "native",
|
|
1878
1960
|
inputPrice: 0,
|
|
1879
1961
|
outputPrice: 0,
|
|
1880
1962
|
description: "DeepSeek V3.1 model."
|
|
@@ -1884,6 +1966,8 @@ var chutesModels = {
|
|
|
1884
1966
|
contextWindow: 163840,
|
|
1885
1967
|
supportsImages: false,
|
|
1886
1968
|
supportsPromptCache: false,
|
|
1969
|
+
supportsNativeTools: true,
|
|
1970
|
+
defaultToolProtocol: "native",
|
|
1887
1971
|
inputPrice: 0.23,
|
|
1888
1972
|
outputPrice: 0.9,
|
|
1889
1973
|
description: "DeepSeek\u2011V3.1\u2011Terminus is an update to V3.1 that improves language consistency by reducing CN/EN mix\u2011ups and eliminating random characters, while strengthening agent capabilities with notably better Code Agent and Search Agent performance."
|
|
@@ -1893,6 +1977,8 @@ var chutesModels = {
|
|
|
1893
1977
|
contextWindow: 163840,
|
|
1894
1978
|
supportsImages: false,
|
|
1895
1979
|
supportsPromptCache: false,
|
|
1980
|
+
supportsNativeTools: true,
|
|
1981
|
+
defaultToolProtocol: "native",
|
|
1896
1982
|
inputPrice: 1,
|
|
1897
1983
|
outputPrice: 3,
|
|
1898
1984
|
description: "DeepSeek-V3.1-turbo is an FP8, speculative-decoding turbo variant optimized for ultra-fast single-shot queries (~200 TPS), with outputs close to the originals and solid function calling/reasoning/structured output, priced at $1/M input and $3/M output tokens, using 2\xD7 quota per request and not intended for bulk workloads."
|
|
@@ -1902,6 +1988,8 @@ var chutesModels = {
|
|
|
1902
1988
|
contextWindow: 163840,
|
|
1903
1989
|
supportsImages: false,
|
|
1904
1990
|
supportsPromptCache: false,
|
|
1991
|
+
supportsNativeTools: true,
|
|
1992
|
+
defaultToolProtocol: "native",
|
|
1905
1993
|
inputPrice: 0.25,
|
|
1906
1994
|
outputPrice: 0.35,
|
|
1907
1995
|
description: "DeepSeek-V3.2-Exp is an experimental LLM that introduces DeepSeek Sparse Attention to improve long\u2011context training and inference efficiency while maintaining performance comparable to V3.1\u2011Terminus."
|
|
@@ -1913,6 +2001,8 @@ var chutesModels = {
|
|
|
1913
2001
|
// From Groq
|
|
1914
2002
|
supportsImages: false,
|
|
1915
2003
|
supportsPromptCache: false,
|
|
2004
|
+
supportsNativeTools: true,
|
|
2005
|
+
defaultToolProtocol: "native",
|
|
1916
2006
|
inputPrice: 0,
|
|
1917
2007
|
outputPrice: 0,
|
|
1918
2008
|
description: "Unsloth Llama 3.3 70B Instruct model."
|
|
@@ -1922,6 +2012,8 @@ var chutesModels = {
|
|
|
1922
2012
|
contextWindow: 512e3,
|
|
1923
2013
|
supportsImages: false,
|
|
1924
2014
|
supportsPromptCache: false,
|
|
2015
|
+
supportsNativeTools: true,
|
|
2016
|
+
defaultToolProtocol: "native",
|
|
1925
2017
|
inputPrice: 0,
|
|
1926
2018
|
outputPrice: 0,
|
|
1927
2019
|
description: "ChutesAI Llama 4 Scout 17B Instruct model, 512K context."
|
|
@@ -1931,6 +2023,8 @@ var chutesModels = {
|
|
|
1931
2023
|
contextWindow: 128e3,
|
|
1932
2024
|
supportsImages: false,
|
|
1933
2025
|
supportsPromptCache: false,
|
|
2026
|
+
supportsNativeTools: true,
|
|
2027
|
+
defaultToolProtocol: "native",
|
|
1934
2028
|
inputPrice: 0,
|
|
1935
2029
|
outputPrice: 0,
|
|
1936
2030
|
description: "Unsloth Mistral Nemo Instruct model."
|
|
@@ -1940,6 +2034,8 @@ var chutesModels = {
|
|
|
1940
2034
|
contextWindow: 131072,
|
|
1941
2035
|
supportsImages: false,
|
|
1942
2036
|
supportsPromptCache: false,
|
|
2037
|
+
supportsNativeTools: true,
|
|
2038
|
+
defaultToolProtocol: "native",
|
|
1943
2039
|
inputPrice: 0,
|
|
1944
2040
|
outputPrice: 0,
|
|
1945
2041
|
description: "Unsloth Gemma 3 12B IT model."
|
|
@@ -1949,6 +2045,8 @@ var chutesModels = {
|
|
|
1949
2045
|
contextWindow: 131072,
|
|
1950
2046
|
supportsImages: false,
|
|
1951
2047
|
supportsPromptCache: false,
|
|
2048
|
+
supportsNativeTools: true,
|
|
2049
|
+
defaultToolProtocol: "native",
|
|
1952
2050
|
inputPrice: 0,
|
|
1953
2051
|
outputPrice: 0,
|
|
1954
2052
|
description: "Nous DeepHermes 3 Llama 3 8B Preview model."
|
|
@@ -1958,6 +2056,8 @@ var chutesModels = {
|
|
|
1958
2056
|
contextWindow: 131072,
|
|
1959
2057
|
supportsImages: false,
|
|
1960
2058
|
supportsPromptCache: false,
|
|
2059
|
+
supportsNativeTools: true,
|
|
2060
|
+
defaultToolProtocol: "native",
|
|
1961
2061
|
inputPrice: 0,
|
|
1962
2062
|
outputPrice: 0,
|
|
1963
2063
|
description: "Unsloth Gemma 3 4B IT model."
|
|
@@ -1967,6 +2067,8 @@ var chutesModels = {
|
|
|
1967
2067
|
contextWindow: 131072,
|
|
1968
2068
|
supportsImages: false,
|
|
1969
2069
|
supportsPromptCache: false,
|
|
2070
|
+
supportsNativeTools: true,
|
|
2071
|
+
defaultToolProtocol: "native",
|
|
1970
2072
|
inputPrice: 0,
|
|
1971
2073
|
outputPrice: 0,
|
|
1972
2074
|
description: "Nvidia Llama 3.3 Nemotron Super 49B model."
|
|
@@ -1976,6 +2078,8 @@ var chutesModels = {
|
|
|
1976
2078
|
contextWindow: 131072,
|
|
1977
2079
|
supportsImages: false,
|
|
1978
2080
|
supportsPromptCache: false,
|
|
2081
|
+
supportsNativeTools: true,
|
|
2082
|
+
defaultToolProtocol: "native",
|
|
1979
2083
|
inputPrice: 0,
|
|
1980
2084
|
outputPrice: 0,
|
|
1981
2085
|
description: "Nvidia Llama 3.1 Nemotron Ultra 253B model."
|
|
@@ -1985,6 +2089,8 @@ var chutesModels = {
|
|
|
1985
2089
|
contextWindow: 256e3,
|
|
1986
2090
|
supportsImages: false,
|
|
1987
2091
|
supportsPromptCache: false,
|
|
2092
|
+
supportsNativeTools: true,
|
|
2093
|
+
defaultToolProtocol: "native",
|
|
1988
2094
|
inputPrice: 0,
|
|
1989
2095
|
outputPrice: 0,
|
|
1990
2096
|
description: "ChutesAI Llama 4 Maverick 17B Instruct FP8 model."
|
|
@@ -1994,6 +2100,8 @@ var chutesModels = {
|
|
|
1994
2100
|
contextWindow: 163840,
|
|
1995
2101
|
supportsImages: false,
|
|
1996
2102
|
supportsPromptCache: false,
|
|
2103
|
+
supportsNativeTools: true,
|
|
2104
|
+
defaultToolProtocol: "native",
|
|
1997
2105
|
inputPrice: 0,
|
|
1998
2106
|
outputPrice: 0,
|
|
1999
2107
|
description: "DeepSeek V3 Base model."
|
|
@@ -2003,6 +2111,8 @@ var chutesModels = {
|
|
|
2003
2111
|
contextWindow: 163840,
|
|
2004
2112
|
supportsImages: false,
|
|
2005
2113
|
supportsPromptCache: false,
|
|
2114
|
+
supportsNativeTools: true,
|
|
2115
|
+
defaultToolProtocol: "native",
|
|
2006
2116
|
inputPrice: 0,
|
|
2007
2117
|
outputPrice: 0,
|
|
2008
2118
|
description: "DeepSeek R1 Zero model."
|
|
@@ -2012,6 +2122,8 @@ var chutesModels = {
|
|
|
2012
2122
|
contextWindow: 163840,
|
|
2013
2123
|
supportsImages: false,
|
|
2014
2124
|
supportsPromptCache: false,
|
|
2125
|
+
supportsNativeTools: true,
|
|
2126
|
+
defaultToolProtocol: "native",
|
|
2015
2127
|
inputPrice: 0,
|
|
2016
2128
|
outputPrice: 0,
|
|
2017
2129
|
description: "DeepSeek V3 (0324) model."
|
|
@@ -2021,6 +2133,8 @@ var chutesModels = {
|
|
|
2021
2133
|
contextWindow: 262144,
|
|
2022
2134
|
supportsImages: false,
|
|
2023
2135
|
supportsPromptCache: false,
|
|
2136
|
+
supportsNativeTools: true,
|
|
2137
|
+
defaultToolProtocol: "native",
|
|
2024
2138
|
inputPrice: 0,
|
|
2025
2139
|
outputPrice: 0,
|
|
2026
2140
|
description: "Qwen3 235B A22B Instruct 2507 model with 262K context window."
|
|
@@ -2030,6 +2144,8 @@ var chutesModels = {
|
|
|
2030
2144
|
contextWindow: 40960,
|
|
2031
2145
|
supportsImages: false,
|
|
2032
2146
|
supportsPromptCache: false,
|
|
2147
|
+
supportsNativeTools: true,
|
|
2148
|
+
defaultToolProtocol: "native",
|
|
2033
2149
|
inputPrice: 0,
|
|
2034
2150
|
outputPrice: 0,
|
|
2035
2151
|
description: "Qwen3 235B A22B model."
|
|
@@ -2039,6 +2155,8 @@ var chutesModels = {
|
|
|
2039
2155
|
contextWindow: 40960,
|
|
2040
2156
|
supportsImages: false,
|
|
2041
2157
|
supportsPromptCache: false,
|
|
2158
|
+
supportsNativeTools: true,
|
|
2159
|
+
defaultToolProtocol: "native",
|
|
2042
2160
|
inputPrice: 0,
|
|
2043
2161
|
outputPrice: 0,
|
|
2044
2162
|
description: "Qwen3 32B model."
|
|
@@ -2048,6 +2166,8 @@ var chutesModels = {
|
|
|
2048
2166
|
contextWindow: 40960,
|
|
2049
2167
|
supportsImages: false,
|
|
2050
2168
|
supportsPromptCache: false,
|
|
2169
|
+
supportsNativeTools: true,
|
|
2170
|
+
defaultToolProtocol: "native",
|
|
2051
2171
|
inputPrice: 0,
|
|
2052
2172
|
outputPrice: 0,
|
|
2053
2173
|
description: "Qwen3 30B A3B model."
|
|
@@ -2057,6 +2177,8 @@ var chutesModels = {
|
|
|
2057
2177
|
contextWindow: 40960,
|
|
2058
2178
|
supportsImages: false,
|
|
2059
2179
|
supportsPromptCache: false,
|
|
2180
|
+
supportsNativeTools: true,
|
|
2181
|
+
defaultToolProtocol: "native",
|
|
2060
2182
|
inputPrice: 0,
|
|
2061
2183
|
outputPrice: 0,
|
|
2062
2184
|
description: "Qwen3 14B model."
|
|
@@ -2066,6 +2188,8 @@ var chutesModels = {
|
|
|
2066
2188
|
contextWindow: 40960,
|
|
2067
2189
|
supportsImages: false,
|
|
2068
2190
|
supportsPromptCache: false,
|
|
2191
|
+
supportsNativeTools: true,
|
|
2192
|
+
defaultToolProtocol: "native",
|
|
2069
2193
|
inputPrice: 0,
|
|
2070
2194
|
outputPrice: 0,
|
|
2071
2195
|
description: "Qwen3 8B model."
|
|
@@ -2075,6 +2199,8 @@ var chutesModels = {
|
|
|
2075
2199
|
contextWindow: 163840,
|
|
2076
2200
|
supportsImages: false,
|
|
2077
2201
|
supportsPromptCache: false,
|
|
2202
|
+
supportsNativeTools: true,
|
|
2203
|
+
defaultToolProtocol: "native",
|
|
2078
2204
|
inputPrice: 0,
|
|
2079
2205
|
outputPrice: 0,
|
|
2080
2206
|
description: "Microsoft MAI-DS-R1 FP8 model."
|
|
@@ -2084,6 +2210,8 @@ var chutesModels = {
|
|
|
2084
2210
|
contextWindow: 163840,
|
|
2085
2211
|
supportsImages: false,
|
|
2086
2212
|
supportsPromptCache: false,
|
|
2213
|
+
supportsNativeTools: true,
|
|
2214
|
+
defaultToolProtocol: "native",
|
|
2087
2215
|
inputPrice: 0,
|
|
2088
2216
|
outputPrice: 0,
|
|
2089
2217
|
description: "TNGTech DeepSeek R1T Chimera model."
|
|
@@ -2093,6 +2221,8 @@ var chutesModels = {
|
|
|
2093
2221
|
contextWindow: 151329,
|
|
2094
2222
|
supportsImages: false,
|
|
2095
2223
|
supportsPromptCache: false,
|
|
2224
|
+
supportsNativeTools: true,
|
|
2225
|
+
defaultToolProtocol: "native",
|
|
2096
2226
|
inputPrice: 0,
|
|
2097
2227
|
outputPrice: 0,
|
|
2098
2228
|
description: "GLM-4.5-Air model with 151,329 token context window and 106B total parameters with 12B activated."
|
|
@@ -2102,6 +2232,8 @@ var chutesModels = {
|
|
|
2102
2232
|
contextWindow: 131072,
|
|
2103
2233
|
supportsImages: false,
|
|
2104
2234
|
supportsPromptCache: false,
|
|
2235
|
+
supportsNativeTools: true,
|
|
2236
|
+
defaultToolProtocol: "native",
|
|
2105
2237
|
inputPrice: 0,
|
|
2106
2238
|
outputPrice: 0,
|
|
2107
2239
|
description: "GLM-4.5-FP8 model with 128k token context window, optimized for agent-based applications with MoE architecture."
|
|
@@ -2111,6 +2243,8 @@ var chutesModels = {
|
|
|
2111
2243
|
contextWindow: 131072,
|
|
2112
2244
|
supportsImages: false,
|
|
2113
2245
|
supportsPromptCache: false,
|
|
2246
|
+
supportsNativeTools: true,
|
|
2247
|
+
defaultToolProtocol: "native",
|
|
2114
2248
|
inputPrice: 1,
|
|
2115
2249
|
outputPrice: 3,
|
|
2116
2250
|
description: "GLM-4.5-turbo model with 128K token context window, optimized for fast inference."
|
|
@@ -2120,6 +2254,8 @@ var chutesModels = {
|
|
|
2120
2254
|
contextWindow: 202752,
|
|
2121
2255
|
supportsImages: false,
|
|
2122
2256
|
supportsPromptCache: false,
|
|
2257
|
+
supportsNativeTools: true,
|
|
2258
|
+
defaultToolProtocol: "native",
|
|
2123
2259
|
inputPrice: 0,
|
|
2124
2260
|
outputPrice: 0,
|
|
2125
2261
|
description: "GLM-4.6 introduces major upgrades over GLM-4.5, including a longer 200K-token context window for complex tasks, stronger coding performance in benchmarks and real-world tools (such as Claude Code, Cline, Roo Code, and Kilo Code), improved reasoning with tool use during inference, more capable and efficient agent integration, and refined writing that better matches human style, readability, and natural role-play scenarios."
|
|
@@ -2130,6 +2266,8 @@ var chutesModels = {
|
|
|
2130
2266
|
contextWindow: 202752,
|
|
2131
2267
|
supportsImages: false,
|
|
2132
2268
|
supportsPromptCache: false,
|
|
2269
|
+
supportsNativeTools: true,
|
|
2270
|
+
defaultToolProtocol: "native",
|
|
2133
2271
|
inputPrice: 1.15,
|
|
2134
2272
|
outputPrice: 3.25,
|
|
2135
2273
|
description: "GLM-4.6-turbo model with 200K-token context window, optimized for fast inference."
|
|
@@ -2139,6 +2277,8 @@ var chutesModels = {
|
|
|
2139
2277
|
contextWindow: 128e3,
|
|
2140
2278
|
supportsImages: false,
|
|
2141
2279
|
supportsPromptCache: false,
|
|
2280
|
+
supportsNativeTools: true,
|
|
2281
|
+
defaultToolProtocol: "native",
|
|
2142
2282
|
inputPrice: 0,
|
|
2143
2283
|
outputPrice: 0,
|
|
2144
2284
|
description: "LongCat Flash Thinking FP8 model with 128K context window, optimized for complex reasoning and coding tasks."
|
|
@@ -2148,6 +2288,8 @@ var chutesModels = {
|
|
|
2148
2288
|
contextWindow: 262144,
|
|
2149
2289
|
supportsImages: false,
|
|
2150
2290
|
supportsPromptCache: false,
|
|
2291
|
+
supportsNativeTools: true,
|
|
2292
|
+
defaultToolProtocol: "native",
|
|
2151
2293
|
inputPrice: 0,
|
|
2152
2294
|
outputPrice: 0,
|
|
2153
2295
|
description: "Qwen3 Coder 480B A35B Instruct FP8 model, optimized for coding tasks."
|
|
@@ -2157,6 +2299,8 @@ var chutesModels = {
|
|
|
2157
2299
|
contextWindow: 75e3,
|
|
2158
2300
|
supportsImages: false,
|
|
2159
2301
|
supportsPromptCache: false,
|
|
2302
|
+
supportsNativeTools: true,
|
|
2303
|
+
defaultToolProtocol: "native",
|
|
2160
2304
|
inputPrice: 0.1481,
|
|
2161
2305
|
outputPrice: 0.5926,
|
|
2162
2306
|
description: "Moonshot AI Kimi K2 Instruct model with 75k context window."
|
|
@@ -2166,6 +2310,8 @@ var chutesModels = {
|
|
|
2166
2310
|
contextWindow: 262144,
|
|
2167
2311
|
supportsImages: false,
|
|
2168
2312
|
supportsPromptCache: false,
|
|
2313
|
+
supportsNativeTools: true,
|
|
2314
|
+
defaultToolProtocol: "native",
|
|
2169
2315
|
inputPrice: 0.1999,
|
|
2170
2316
|
outputPrice: 0.8001,
|
|
2171
2317
|
description: "Moonshot AI Kimi K2 Instruct 0905 model with 256k context window."
|
|
@@ -2175,6 +2321,8 @@ var chutesModels = {
|
|
|
2175
2321
|
contextWindow: 262144,
|
|
2176
2322
|
supportsImages: false,
|
|
2177
2323
|
supportsPromptCache: false,
|
|
2324
|
+
supportsNativeTools: true,
|
|
2325
|
+
defaultToolProtocol: "native",
|
|
2178
2326
|
inputPrice: 0.077968332,
|
|
2179
2327
|
outputPrice: 0.31202496,
|
|
2180
2328
|
description: "Qwen3 235B A22B Thinking 2507 model with 262K context window."
|
|
@@ -2184,6 +2332,8 @@ var chutesModels = {
|
|
|
2184
2332
|
contextWindow: 131072,
|
|
2185
2333
|
supportsImages: false,
|
|
2186
2334
|
supportsPromptCache: false,
|
|
2335
|
+
supportsNativeTools: true,
|
|
2336
|
+
defaultToolProtocol: "native",
|
|
2187
2337
|
inputPrice: 0,
|
|
2188
2338
|
outputPrice: 0,
|
|
2189
2339
|
description: "Fast, stable instruction-tuned model optimized for complex tasks, RAG, and tool use without thinking traces."
|
|
@@ -2193,6 +2343,8 @@ var chutesModels = {
|
|
|
2193
2343
|
contextWindow: 131072,
|
|
2194
2344
|
supportsImages: false,
|
|
2195
2345
|
supportsPromptCache: false,
|
|
2346
|
+
supportsNativeTools: true,
|
|
2347
|
+
defaultToolProtocol: "native",
|
|
2196
2348
|
inputPrice: 0,
|
|
2197
2349
|
outputPrice: 0,
|
|
2198
2350
|
description: "Reasoning-first model with structured thinking traces for multi-step problems, math proofs, and code synthesis."
|
|
@@ -2202,6 +2354,8 @@ var chutesModels = {
|
|
|
2202
2354
|
contextWindow: 262144,
|
|
2203
2355
|
supportsImages: true,
|
|
2204
2356
|
supportsPromptCache: false,
|
|
2357
|
+
supportsNativeTools: true,
|
|
2358
|
+
defaultToolProtocol: "native",
|
|
2205
2359
|
inputPrice: 0.16,
|
|
2206
2360
|
outputPrice: 0.65,
|
|
2207
2361
|
description: "Qwen3\u2011VL\u2011235B\u2011A22B\u2011Thinking is an open\u2011weight MoE vision\u2011language model (235B total, ~22B activated) optimized for deliberate multi\u2011step reasoning with strong text\u2011image\u2011video understanding and long\u2011context capabilities."
|
|
@@ -2210,139 +2364,71 @@ var chutesModels = {
|
|
|
2210
2364
|
var chutesDefaultModelInfo = chutesModels[chutesDefaultModelId];
|
|
2211
2365
|
|
|
2212
2366
|
// src/providers/claude-code.ts
|
|
2213
|
-
var
|
|
2214
|
-
function convertModelNameForVertex(modelName) {
|
|
2215
|
-
return modelName.replace(VERTEX_DATE_PATTERN, "@$1");
|
|
2216
|
-
}
|
|
2217
|
-
var claudeCodeDefaultModelId = "claude-sonnet-4-5";
|
|
2218
|
-
var CLAUDE_CODE_DEFAULT_MAX_OUTPUT_TOKENS = 16e3;
|
|
2219
|
-
function getClaudeCodeModelId(baseModelId, useVertex = false) {
|
|
2220
|
-
return useVertex ? convertModelNameForVertex(baseModelId) : baseModelId;
|
|
2221
|
-
}
|
|
2367
|
+
var DATE_SUFFIX_PATTERN = /-\d{8}$/;
|
|
2222
2368
|
var claudeCodeModels = {
|
|
2223
|
-
"claude-
|
|
2224
|
-
|
|
2225
|
-
|
|
2226
|
-
|
|
2227
|
-
// Claude Code does report cache tokens
|
|
2228
|
-
supportsReasoningEffort: false,
|
|
2229
|
-
supportsReasoningBudget: false,
|
|
2230
|
-
requiredReasoningBudget: false,
|
|
2231
|
-
// Claude Code manages its own tools and temperature via the CLI
|
|
2232
|
-
supportsNativeTools: false,
|
|
2233
|
-
supportsTemperature: false
|
|
2234
|
-
},
|
|
2235
|
-
"claude-sonnet-4-5-20250929[1m]": {
|
|
2236
|
-
...anthropicModels["claude-sonnet-4-5"],
|
|
2237
|
-
contextWindow: 1e6,
|
|
2238
|
-
// 1M token context window (requires [1m] suffix)
|
|
2239
|
-
supportsImages: false,
|
|
2240
|
-
supportsPromptCache: true,
|
|
2241
|
-
// Claude Code does report cache tokens
|
|
2242
|
-
supportsReasoningEffort: false,
|
|
2243
|
-
supportsReasoningBudget: false,
|
|
2244
|
-
requiredReasoningBudget: false,
|
|
2245
|
-
// Claude Code manages its own tools and temperature via the CLI
|
|
2246
|
-
supportsNativeTools: false,
|
|
2247
|
-
supportsTemperature: false
|
|
2248
|
-
},
|
|
2249
|
-
"claude-sonnet-4-20250514": {
|
|
2250
|
-
...anthropicModels["claude-sonnet-4-20250514"],
|
|
2251
|
-
supportsImages: false,
|
|
2252
|
-
supportsPromptCache: true,
|
|
2253
|
-
// Claude Code does report cache tokens
|
|
2254
|
-
supportsReasoningEffort: false,
|
|
2255
|
-
supportsReasoningBudget: false,
|
|
2256
|
-
requiredReasoningBudget: false,
|
|
2257
|
-
// Claude Code manages its own tools and temperature via the CLI
|
|
2258
|
-
supportsNativeTools: false,
|
|
2259
|
-
supportsTemperature: false
|
|
2260
|
-
},
|
|
2261
|
-
"claude-opus-4-5-20251101": {
|
|
2262
|
-
...anthropicModels["claude-opus-4-5-20251101"],
|
|
2263
|
-
supportsImages: false,
|
|
2264
|
-
supportsPromptCache: true,
|
|
2265
|
-
// Claude Code does report cache tokens
|
|
2266
|
-
supportsReasoningEffort: false,
|
|
2267
|
-
supportsReasoningBudget: false,
|
|
2268
|
-
requiredReasoningBudget: false,
|
|
2269
|
-
// Claude Code manages its own tools and temperature via the CLI
|
|
2270
|
-
supportsNativeTools: false,
|
|
2271
|
-
supportsTemperature: false
|
|
2272
|
-
},
|
|
2273
|
-
"claude-opus-4-1-20250805": {
|
|
2274
|
-
...anthropicModels["claude-opus-4-1-20250805"],
|
|
2275
|
-
supportsImages: false,
|
|
2276
|
-
supportsPromptCache: true,
|
|
2277
|
-
// Claude Code does report cache tokens
|
|
2278
|
-
supportsReasoningEffort: false,
|
|
2279
|
-
supportsReasoningBudget: false,
|
|
2280
|
-
requiredReasoningBudget: false,
|
|
2281
|
-
// Claude Code manages its own tools and temperature via the CLI
|
|
2282
|
-
supportsNativeTools: false,
|
|
2283
|
-
supportsTemperature: false
|
|
2284
|
-
},
|
|
2285
|
-
"claude-opus-4-20250514": {
|
|
2286
|
-
...anthropicModels["claude-opus-4-20250514"],
|
|
2287
|
-
supportsImages: false,
|
|
2288
|
-
supportsPromptCache: true,
|
|
2289
|
-
// Claude Code does report cache tokens
|
|
2290
|
-
supportsReasoningEffort: false,
|
|
2291
|
-
supportsReasoningBudget: false,
|
|
2292
|
-
requiredReasoningBudget: false,
|
|
2293
|
-
// Claude Code manages its own tools and temperature via the CLI
|
|
2294
|
-
supportsNativeTools: false,
|
|
2295
|
-
supportsTemperature: false
|
|
2296
|
-
},
|
|
2297
|
-
"claude-3-7-sonnet-20250219": {
|
|
2298
|
-
...anthropicModels["claude-3-7-sonnet-20250219"],
|
|
2299
|
-
supportsImages: false,
|
|
2300
|
-
supportsPromptCache: true,
|
|
2301
|
-
// Claude Code does report cache tokens
|
|
2302
|
-
supportsReasoningEffort: false,
|
|
2303
|
-
supportsReasoningBudget: false,
|
|
2304
|
-
requiredReasoningBudget: false,
|
|
2305
|
-
// Claude Code manages its own tools and temperature via the CLI
|
|
2306
|
-
supportsNativeTools: false,
|
|
2307
|
-
supportsTemperature: false
|
|
2308
|
-
},
|
|
2309
|
-
"claude-3-5-sonnet-20241022": {
|
|
2310
|
-
...anthropicModels["claude-3-5-sonnet-20241022"],
|
|
2311
|
-
supportsImages: false,
|
|
2369
|
+
"claude-haiku-4-5": {
|
|
2370
|
+
maxTokens: 32768,
|
|
2371
|
+
contextWindow: 2e5,
|
|
2372
|
+
supportsImages: true,
|
|
2312
2373
|
supportsPromptCache: true,
|
|
2313
|
-
|
|
2314
|
-
|
|
2315
|
-
|
|
2316
|
-
|
|
2317
|
-
|
|
2318
|
-
supportsNativeTools: false,
|
|
2319
|
-
supportsTemperature: false
|
|
2374
|
+
supportsNativeTools: true,
|
|
2375
|
+
defaultToolProtocol: "native",
|
|
2376
|
+
supportsReasoningEffort: ["disable", "low", "medium", "high"],
|
|
2377
|
+
reasoningEffort: "medium",
|
|
2378
|
+
description: "Claude Haiku 4.5 - Fast and efficient with thinking"
|
|
2320
2379
|
},
|
|
2321
|
-
"claude-
|
|
2322
|
-
|
|
2323
|
-
|
|
2380
|
+
"claude-sonnet-4-5": {
|
|
2381
|
+
maxTokens: 32768,
|
|
2382
|
+
contextWindow: 2e5,
|
|
2383
|
+
supportsImages: true,
|
|
2324
2384
|
supportsPromptCache: true,
|
|
2325
|
-
|
|
2326
|
-
|
|
2327
|
-
|
|
2328
|
-
|
|
2329
|
-
|
|
2330
|
-
supportsNativeTools: false,
|
|
2331
|
-
supportsTemperature: false
|
|
2385
|
+
supportsNativeTools: true,
|
|
2386
|
+
defaultToolProtocol: "native",
|
|
2387
|
+
supportsReasoningEffort: ["disable", "low", "medium", "high"],
|
|
2388
|
+
reasoningEffort: "medium",
|
|
2389
|
+
description: "Claude Sonnet 4.5 - Balanced performance with thinking"
|
|
2332
2390
|
},
|
|
2333
|
-
"claude-
|
|
2334
|
-
|
|
2335
|
-
|
|
2391
|
+
"claude-opus-4-5": {
|
|
2392
|
+
maxTokens: 32768,
|
|
2393
|
+
contextWindow: 2e5,
|
|
2394
|
+
supportsImages: true,
|
|
2336
2395
|
supportsPromptCache: true,
|
|
2337
|
-
|
|
2338
|
-
|
|
2339
|
-
|
|
2340
|
-
|
|
2341
|
-
|
|
2342
|
-
supportsNativeTools: false,
|
|
2343
|
-
supportsTemperature: false
|
|
2396
|
+
supportsNativeTools: true,
|
|
2397
|
+
defaultToolProtocol: "native",
|
|
2398
|
+
supportsReasoningEffort: ["disable", "low", "medium", "high"],
|
|
2399
|
+
reasoningEffort: "medium",
|
|
2400
|
+
description: "Claude Opus 4.5 - Most capable with thinking"
|
|
2344
2401
|
}
|
|
2345
2402
|
};
|
|
2403
|
+
var claudeCodeDefaultModelId = "claude-sonnet-4-5";
|
|
2404
|
+
var MODEL_FAMILY_PATTERNS = [
|
|
2405
|
+
// Opus models (any version) → claude-opus-4-5
|
|
2406
|
+
{ pattern: /opus/i, target: "claude-opus-4-5" },
|
|
2407
|
+
// Haiku models (any version) → claude-haiku-4-5
|
|
2408
|
+
{ pattern: /haiku/i, target: "claude-haiku-4-5" },
|
|
2409
|
+
// Sonnet models (any version) → claude-sonnet-4-5
|
|
2410
|
+
{ pattern: /sonnet/i, target: "claude-sonnet-4-5" }
|
|
2411
|
+
];
|
|
2412
|
+
function normalizeClaudeCodeModelId(modelId) {
|
|
2413
|
+
if (Object.hasOwn(claudeCodeModels, modelId)) {
|
|
2414
|
+
return modelId;
|
|
2415
|
+
}
|
|
2416
|
+
const withoutDate = modelId.replace(DATE_SUFFIX_PATTERN, "");
|
|
2417
|
+
if (Object.hasOwn(claudeCodeModels, withoutDate)) {
|
|
2418
|
+
return withoutDate;
|
|
2419
|
+
}
|
|
2420
|
+
for (const { pattern, target } of MODEL_FAMILY_PATTERNS) {
|
|
2421
|
+
if (pattern.test(modelId)) {
|
|
2422
|
+
return target;
|
|
2423
|
+
}
|
|
2424
|
+
}
|
|
2425
|
+
return claudeCodeDefaultModelId;
|
|
2426
|
+
}
|
|
2427
|
+
var claudeCodeReasoningConfig = {
|
|
2428
|
+
low: { budgetTokens: 16e3 },
|
|
2429
|
+
medium: { budgetTokens: 32e3 },
|
|
2430
|
+
high: { budgetTokens: 64e3 }
|
|
2431
|
+
};
|
|
2346
2432
|
|
|
2347
2433
|
// src/providers/deepseek.ts
|
|
2348
2434
|
var deepSeekDefaultModelId = "deepseek-chat";
|
|
@@ -2373,6 +2459,7 @@ var deepSeekModels = {
|
|
|
2373
2459
|
supportsPromptCache: true,
|
|
2374
2460
|
supportsNativeTools: true,
|
|
2375
2461
|
defaultToolProtocol: "native",
|
|
2462
|
+
preserveReasoning: true,
|
|
2376
2463
|
inputPrice: 0.28,
|
|
2377
2464
|
// $0.28 per million tokens (cache miss) - Updated Dec 9, 2025
|
|
2378
2465
|
outputPrice: 0.42,
|
|
@@ -2384,7 +2471,7 @@ var deepSeekModels = {
|
|
|
2384
2471
|
description: `DeepSeek-V3.2 (Thinking Mode) achieves performance comparable to OpenAI-o1 across math, code, and reasoning tasks. Supports Chain of Thought reasoning with up to 8K output tokens. Supports JSON output, tool calls, and chat prefix completion (beta).`
|
|
2385
2472
|
}
|
|
2386
2473
|
};
|
|
2387
|
-
var DEEP_SEEK_DEFAULT_TEMPERATURE = 0;
|
|
2474
|
+
var DEEP_SEEK_DEFAULT_TEMPERATURE = 0.3;
|
|
2388
2475
|
|
|
2389
2476
|
// src/providers/doubao.ts
|
|
2390
2477
|
var doubaoDefaultModelId = "doubao-seed-1-6-250615";
|
|
@@ -2395,6 +2482,7 @@ var doubaoModels = {
|
|
|
2395
2482
|
supportsImages: true,
|
|
2396
2483
|
supportsPromptCache: true,
|
|
2397
2484
|
supportsNativeTools: true,
|
|
2485
|
+
defaultToolProtocol: "native",
|
|
2398
2486
|
inputPrice: 1e-4,
|
|
2399
2487
|
// $0.0001 per million tokens (cache miss)
|
|
2400
2488
|
outputPrice: 4e-4,
|
|
@@ -2411,6 +2499,7 @@ var doubaoModels = {
|
|
|
2411
2499
|
supportsImages: true,
|
|
2412
2500
|
supportsPromptCache: true,
|
|
2413
2501
|
supportsNativeTools: true,
|
|
2502
|
+
defaultToolProtocol: "native",
|
|
2414
2503
|
inputPrice: 2e-4,
|
|
2415
2504
|
// $0.0002 per million tokens
|
|
2416
2505
|
outputPrice: 8e-4,
|
|
@@ -2427,6 +2516,7 @@ var doubaoModels = {
|
|
|
2427
2516
|
supportsImages: true,
|
|
2428
2517
|
supportsPromptCache: true,
|
|
2429
2518
|
supportsNativeTools: true,
|
|
2519
|
+
defaultToolProtocol: "native",
|
|
2430
2520
|
inputPrice: 15e-5,
|
|
2431
2521
|
// $0.00015 per million tokens
|
|
2432
2522
|
outputPrice: 6e-4,
|
|
@@ -2503,6 +2593,7 @@ var fireworksModels = {
|
|
|
2503
2593
|
supportsImages: false,
|
|
2504
2594
|
supportsPromptCache: true,
|
|
2505
2595
|
supportsNativeTools: true,
|
|
2596
|
+
defaultToolProtocol: "native",
|
|
2506
2597
|
inputPrice: 0.6,
|
|
2507
2598
|
outputPrice: 2.5,
|
|
2508
2599
|
cacheReadsPrice: 0.15,
|
|
@@ -2514,6 +2605,7 @@ var fireworksModels = {
|
|
|
2514
2605
|
supportsImages: false,
|
|
2515
2606
|
supportsPromptCache: false,
|
|
2516
2607
|
supportsNativeTools: true,
|
|
2608
|
+
defaultToolProtocol: "native",
|
|
2517
2609
|
inputPrice: 0.6,
|
|
2518
2610
|
outputPrice: 2.5,
|
|
2519
2611
|
description: "Kimi K2 is a state-of-the-art mixture-of-experts (MoE) language model with 32 billion activated parameters and 1 trillion total parameters. Trained with the Muon optimizer, Kimi K2 achieves exceptional performance across frontier knowledge, reasoning, and coding tasks while being meticulously optimized for agentic capabilities."
|
|
@@ -2524,6 +2616,7 @@ var fireworksModels = {
|
|
|
2524
2616
|
supportsImages: false,
|
|
2525
2617
|
supportsPromptCache: false,
|
|
2526
2618
|
supportsNativeTools: true,
|
|
2619
|
+
defaultToolProtocol: "native",
|
|
2527
2620
|
inputPrice: 0.3,
|
|
2528
2621
|
outputPrice: 1.2,
|
|
2529
2622
|
description: "MiniMax M2 is a high-performance language model with 204.8K context window, optimized for long-context understanding and generation tasks."
|
|
@@ -2534,6 +2627,7 @@ var fireworksModels = {
|
|
|
2534
2627
|
supportsImages: false,
|
|
2535
2628
|
supportsPromptCache: false,
|
|
2536
2629
|
supportsNativeTools: true,
|
|
2630
|
+
defaultToolProtocol: "native",
|
|
2537
2631
|
inputPrice: 0.22,
|
|
2538
2632
|
outputPrice: 0.88,
|
|
2539
2633
|
description: "Latest Qwen3 thinking model, competitive against the best closed source models in Jul 2025."
|
|
@@ -2544,6 +2638,7 @@ var fireworksModels = {
|
|
|
2544
2638
|
supportsImages: false,
|
|
2545
2639
|
supportsPromptCache: false,
|
|
2546
2640
|
supportsNativeTools: true,
|
|
2641
|
+
defaultToolProtocol: "native",
|
|
2547
2642
|
inputPrice: 0.45,
|
|
2548
2643
|
outputPrice: 1.8,
|
|
2549
2644
|
description: "Qwen3's most agentic code model to date."
|
|
@@ -2554,6 +2649,7 @@ var fireworksModels = {
|
|
|
2554
2649
|
supportsImages: false,
|
|
2555
2650
|
supportsPromptCache: false,
|
|
2556
2651
|
supportsNativeTools: true,
|
|
2652
|
+
defaultToolProtocol: "native",
|
|
2557
2653
|
inputPrice: 3,
|
|
2558
2654
|
outputPrice: 8,
|
|
2559
2655
|
description: "05/28 updated checkpoint of Deepseek R1. Its overall performance is now approaching that of leading models, such as O3 and Gemini 2.5 Pro. Compared to the previous version, the upgraded model shows significant improvements in handling complex reasoning tasks, and this version also offers a reduced hallucination rate, enhanced support for function calling, and better experience for vibe coding. Note that fine-tuning for this model is only available through contacting fireworks at https://fireworks.ai/company/contact-us."
|
|
@@ -2564,6 +2660,7 @@ var fireworksModels = {
|
|
|
2564
2660
|
supportsImages: false,
|
|
2565
2661
|
supportsPromptCache: false,
|
|
2566
2662
|
supportsNativeTools: true,
|
|
2663
|
+
defaultToolProtocol: "native",
|
|
2567
2664
|
inputPrice: 0.9,
|
|
2568
2665
|
outputPrice: 0.9,
|
|
2569
2666
|
description: "A strong Mixture-of-Experts (MoE) language model with 671B total parameters with 37B activated for each token from Deepseek. Note that fine-tuning for this model is only available through contacting fireworks at https://fireworks.ai/company/contact-us."
|
|
@@ -2574,6 +2671,7 @@ var fireworksModels = {
|
|
|
2574
2671
|
supportsImages: false,
|
|
2575
2672
|
supportsPromptCache: false,
|
|
2576
2673
|
supportsNativeTools: true,
|
|
2674
|
+
defaultToolProtocol: "native",
|
|
2577
2675
|
inputPrice: 0.56,
|
|
2578
2676
|
outputPrice: 1.68,
|
|
2579
2677
|
description: "DeepSeek v3.1 is an improved version of the v3 model with enhanced performance, better reasoning capabilities, and improved code generation. This Mixture-of-Experts (MoE) model maintains the same 671B total parameters with 37B activated per token."
|
|
@@ -2584,6 +2682,7 @@ var fireworksModels = {
|
|
|
2584
2682
|
supportsImages: false,
|
|
2585
2683
|
supportsPromptCache: false,
|
|
2586
2684
|
supportsNativeTools: true,
|
|
2685
|
+
defaultToolProtocol: "native",
|
|
2587
2686
|
inputPrice: 0.55,
|
|
2588
2687
|
outputPrice: 2.19,
|
|
2589
2688
|
description: "Z.ai GLM-4.5 with 355B total parameters and 32B active parameters. Features unified reasoning, coding, and intelligent agent capabilities."
|
|
@@ -2594,6 +2693,7 @@ var fireworksModels = {
|
|
|
2594
2693
|
supportsImages: false,
|
|
2595
2694
|
supportsPromptCache: false,
|
|
2596
2695
|
supportsNativeTools: true,
|
|
2696
|
+
defaultToolProtocol: "native",
|
|
2597
2697
|
inputPrice: 0.55,
|
|
2598
2698
|
outputPrice: 2.19,
|
|
2599
2699
|
description: "Z.ai GLM-4.5-Air with 106B total parameters and 12B active parameters. Features unified reasoning, coding, and intelligent agent capabilities."
|
|
@@ -2604,6 +2704,7 @@ var fireworksModels = {
|
|
|
2604
2704
|
supportsImages: false,
|
|
2605
2705
|
supportsPromptCache: false,
|
|
2606
2706
|
supportsNativeTools: true,
|
|
2707
|
+
defaultToolProtocol: "native",
|
|
2607
2708
|
inputPrice: 0.55,
|
|
2608
2709
|
outputPrice: 2.19,
|
|
2609
2710
|
description: "Z.ai GLM-4.6 is an advanced coding model with exceptional performance on complex programming tasks. Features improved reasoning capabilities and enhanced code generation quality, making it ideal for software development workflows."
|
|
@@ -2614,6 +2715,7 @@ var fireworksModels = {
|
|
|
2614
2715
|
supportsImages: false,
|
|
2615
2716
|
supportsPromptCache: false,
|
|
2616
2717
|
supportsNativeTools: true,
|
|
2718
|
+
defaultToolProtocol: "native",
|
|
2617
2719
|
inputPrice: 0.07,
|
|
2618
2720
|
outputPrice: 0.3,
|
|
2619
2721
|
description: "OpenAI gpt-oss-20b: Compact model for local/edge deployments. Optimized for low-latency and resource-constrained environments with chain-of-thought output, adjustable reasoning, and agentic workflows."
|
|
@@ -2624,6 +2726,7 @@ var fireworksModels = {
|
|
|
2624
2726
|
supportsImages: false,
|
|
2625
2727
|
supportsPromptCache: false,
|
|
2626
2728
|
supportsNativeTools: true,
|
|
2729
|
+
defaultToolProtocol: "native",
|
|
2627
2730
|
inputPrice: 0.15,
|
|
2628
2731
|
outputPrice: 0.6,
|
|
2629
2732
|
description: "OpenAI gpt-oss-120b: Production-grade, general-purpose model that fits on a single H100 GPU. Features complex reasoning, configurable effort, full chain-of-thought transparency, and supports function calling, tool use, and structured outputs."
|
|
@@ -2631,16 +2734,18 @@ var fireworksModels = {
|
|
|
2631
2734
|
};
|
|
2632
2735
|
|
|
2633
2736
|
// src/providers/gemini.ts
|
|
2634
|
-
var geminiDefaultModelId = "gemini-
|
|
2737
|
+
var geminiDefaultModelId = "gemini-3-pro-preview";
|
|
2635
2738
|
var geminiModels = {
|
|
2636
2739
|
"gemini-3-pro-preview": {
|
|
2637
2740
|
maxTokens: 65536,
|
|
2638
2741
|
contextWindow: 1048576,
|
|
2639
2742
|
supportsImages: true,
|
|
2640
2743
|
supportsNativeTools: true,
|
|
2744
|
+
defaultToolProtocol: "native",
|
|
2641
2745
|
supportsPromptCache: true,
|
|
2642
2746
|
supportsReasoningEffort: ["low", "high"],
|
|
2643
2747
|
reasoningEffort: "low",
|
|
2748
|
+
includedTools: ["write_file", "edit_file"],
|
|
2644
2749
|
supportsTemperature: true,
|
|
2645
2750
|
defaultTemperature: 1,
|
|
2646
2751
|
inputPrice: 4,
|
|
@@ -2658,13 +2763,32 @@ var geminiModels = {
|
|
|
2658
2763
|
}
|
|
2659
2764
|
]
|
|
2660
2765
|
},
|
|
2766
|
+
"gemini-3-flash-preview": {
|
|
2767
|
+
maxTokens: 65536,
|
|
2768
|
+
contextWindow: 1048576,
|
|
2769
|
+
supportsImages: true,
|
|
2770
|
+
supportsNativeTools: true,
|
|
2771
|
+
defaultToolProtocol: "native",
|
|
2772
|
+
supportsPromptCache: true,
|
|
2773
|
+
supportsReasoningEffort: ["minimal", "low", "medium", "high"],
|
|
2774
|
+
reasoningEffort: "medium",
|
|
2775
|
+
includedTools: ["write_file", "edit_file"],
|
|
2776
|
+
supportsTemperature: true,
|
|
2777
|
+
defaultTemperature: 1,
|
|
2778
|
+
inputPrice: 0.3,
|
|
2779
|
+
outputPrice: 2.5,
|
|
2780
|
+
cacheReadsPrice: 0.075,
|
|
2781
|
+
cacheWritesPrice: 1
|
|
2782
|
+
},
|
|
2661
2783
|
// 2.5 Pro models
|
|
2662
2784
|
"gemini-2.5-pro": {
|
|
2663
2785
|
maxTokens: 64e3,
|
|
2664
2786
|
contextWindow: 1048576,
|
|
2665
2787
|
supportsImages: true,
|
|
2666
2788
|
supportsNativeTools: true,
|
|
2789
|
+
defaultToolProtocol: "native",
|
|
2667
2790
|
supportsPromptCache: true,
|
|
2791
|
+
includedTools: ["write_file", "edit_file"],
|
|
2668
2792
|
inputPrice: 2.5,
|
|
2669
2793
|
// This is the pricing for prompts above 200k tokens.
|
|
2670
2794
|
outputPrice: 15,
|
|
@@ -2693,7 +2817,9 @@ var geminiModels = {
|
|
|
2693
2817
|
contextWindow: 1048576,
|
|
2694
2818
|
supportsImages: true,
|
|
2695
2819
|
supportsNativeTools: true,
|
|
2820
|
+
defaultToolProtocol: "native",
|
|
2696
2821
|
supportsPromptCache: true,
|
|
2822
|
+
includedTools: ["write_file", "edit_file"],
|
|
2697
2823
|
inputPrice: 2.5,
|
|
2698
2824
|
// This is the pricing for prompts above 200k tokens.
|
|
2699
2825
|
outputPrice: 15,
|
|
@@ -2721,7 +2847,9 @@ var geminiModels = {
|
|
|
2721
2847
|
contextWindow: 1048576,
|
|
2722
2848
|
supportsImages: true,
|
|
2723
2849
|
supportsNativeTools: true,
|
|
2850
|
+
defaultToolProtocol: "native",
|
|
2724
2851
|
supportsPromptCache: true,
|
|
2852
|
+
includedTools: ["write_file", "edit_file"],
|
|
2725
2853
|
inputPrice: 2.5,
|
|
2726
2854
|
// This is the pricing for prompts above 200k tokens.
|
|
2727
2855
|
outputPrice: 15,
|
|
@@ -2747,7 +2875,9 @@ var geminiModels = {
|
|
|
2747
2875
|
contextWindow: 1048576,
|
|
2748
2876
|
supportsImages: true,
|
|
2749
2877
|
supportsNativeTools: true,
|
|
2878
|
+
defaultToolProtocol: "native",
|
|
2750
2879
|
supportsPromptCache: true,
|
|
2880
|
+
includedTools: ["write_file", "edit_file"],
|
|
2751
2881
|
inputPrice: 2.5,
|
|
2752
2882
|
// This is the pricing for prompts above 200k tokens.
|
|
2753
2883
|
outputPrice: 15,
|
|
@@ -2776,7 +2906,9 @@ var geminiModels = {
|
|
|
2776
2906
|
contextWindow: 1048576,
|
|
2777
2907
|
supportsImages: true,
|
|
2778
2908
|
supportsNativeTools: true,
|
|
2909
|
+
defaultToolProtocol: "native",
|
|
2779
2910
|
supportsPromptCache: true,
|
|
2911
|
+
includedTools: ["write_file", "edit_file"],
|
|
2780
2912
|
inputPrice: 0.3,
|
|
2781
2913
|
outputPrice: 2.5,
|
|
2782
2914
|
cacheReadsPrice: 0.075,
|
|
@@ -2789,7 +2921,9 @@ var geminiModels = {
|
|
|
2789
2921
|
contextWindow: 1048576,
|
|
2790
2922
|
supportsImages: true,
|
|
2791
2923
|
supportsNativeTools: true,
|
|
2924
|
+
defaultToolProtocol: "native",
|
|
2792
2925
|
supportsPromptCache: true,
|
|
2926
|
+
includedTools: ["write_file", "edit_file"],
|
|
2793
2927
|
inputPrice: 0.3,
|
|
2794
2928
|
outputPrice: 2.5,
|
|
2795
2929
|
cacheReadsPrice: 0.075,
|
|
@@ -2802,7 +2936,9 @@ var geminiModels = {
|
|
|
2802
2936
|
contextWindow: 1048576,
|
|
2803
2937
|
supportsImages: true,
|
|
2804
2938
|
supportsNativeTools: true,
|
|
2939
|
+
defaultToolProtocol: "native",
|
|
2805
2940
|
supportsPromptCache: true,
|
|
2941
|
+
includedTools: ["write_file", "edit_file"],
|
|
2806
2942
|
inputPrice: 0.3,
|
|
2807
2943
|
outputPrice: 2.5,
|
|
2808
2944
|
cacheReadsPrice: 0.075,
|
|
@@ -2816,7 +2952,9 @@ var geminiModels = {
|
|
|
2816
2952
|
contextWindow: 1048576,
|
|
2817
2953
|
supportsImages: true,
|
|
2818
2954
|
supportsNativeTools: true,
|
|
2955
|
+
defaultToolProtocol: "native",
|
|
2819
2956
|
supportsPromptCache: true,
|
|
2957
|
+
includedTools: ["write_file", "edit_file"],
|
|
2820
2958
|
inputPrice: 0.1,
|
|
2821
2959
|
outputPrice: 0.4,
|
|
2822
2960
|
cacheReadsPrice: 0.025,
|
|
@@ -2829,7 +2967,9 @@ var geminiModels = {
|
|
|
2829
2967
|
contextWindow: 1048576,
|
|
2830
2968
|
supportsImages: true,
|
|
2831
2969
|
supportsNativeTools: true,
|
|
2970
|
+
defaultToolProtocol: "native",
|
|
2832
2971
|
supportsPromptCache: true,
|
|
2972
|
+
includedTools: ["write_file", "edit_file"],
|
|
2833
2973
|
inputPrice: 0.1,
|
|
2834
2974
|
outputPrice: 0.4,
|
|
2835
2975
|
cacheReadsPrice: 0.025,
|
|
@@ -2849,6 +2989,7 @@ var groqModels = {
|
|
|
2849
2989
|
supportsImages: false,
|
|
2850
2990
|
supportsPromptCache: false,
|
|
2851
2991
|
supportsNativeTools: true,
|
|
2992
|
+
defaultToolProtocol: "native",
|
|
2852
2993
|
inputPrice: 0.05,
|
|
2853
2994
|
outputPrice: 0.08,
|
|
2854
2995
|
description: "Meta Llama 3.1 8B Instant model, 128K context."
|
|
@@ -2859,6 +3000,7 @@ var groqModels = {
|
|
|
2859
3000
|
supportsImages: false,
|
|
2860
3001
|
supportsPromptCache: false,
|
|
2861
3002
|
supportsNativeTools: true,
|
|
3003
|
+
defaultToolProtocol: "native",
|
|
2862
3004
|
inputPrice: 0.59,
|
|
2863
3005
|
outputPrice: 0.79,
|
|
2864
3006
|
description: "Meta Llama 3.3 70B Versatile model, 128K context."
|
|
@@ -2869,6 +3011,7 @@ var groqModels = {
|
|
|
2869
3011
|
supportsImages: false,
|
|
2870
3012
|
supportsPromptCache: false,
|
|
2871
3013
|
supportsNativeTools: true,
|
|
3014
|
+
defaultToolProtocol: "native",
|
|
2872
3015
|
inputPrice: 0.11,
|
|
2873
3016
|
outputPrice: 0.34,
|
|
2874
3017
|
description: "Meta Llama 4 Scout 17B Instruct model, 128K context."
|
|
@@ -2906,6 +3049,7 @@ var groqModels = {
|
|
|
2906
3049
|
supportsImages: false,
|
|
2907
3050
|
supportsPromptCache: false,
|
|
2908
3051
|
supportsNativeTools: true,
|
|
3052
|
+
defaultToolProtocol: "native",
|
|
2909
3053
|
inputPrice: 0.29,
|
|
2910
3054
|
outputPrice: 0.59,
|
|
2911
3055
|
description: "Alibaba Qwen 3 32B model, 128K context."
|
|
@@ -2936,6 +3080,7 @@ var groqModels = {
|
|
|
2936
3080
|
supportsImages: false,
|
|
2937
3081
|
supportsPromptCache: true,
|
|
2938
3082
|
supportsNativeTools: true,
|
|
3083
|
+
defaultToolProtocol: "native",
|
|
2939
3084
|
inputPrice: 0.6,
|
|
2940
3085
|
outputPrice: 2.5,
|
|
2941
3086
|
cacheReadsPrice: 0.15,
|
|
@@ -2947,6 +3092,7 @@ var groqModels = {
|
|
|
2947
3092
|
supportsImages: false,
|
|
2948
3093
|
supportsPromptCache: false,
|
|
2949
3094
|
supportsNativeTools: true,
|
|
3095
|
+
defaultToolProtocol: "native",
|
|
2950
3096
|
inputPrice: 0.15,
|
|
2951
3097
|
outputPrice: 0.75,
|
|
2952
3098
|
description: "GPT-OSS 120B is OpenAI's flagship open source model, built on a Mixture-of-Experts (MoE) architecture with 20 billion parameters and 128 experts."
|
|
@@ -2957,6 +3103,7 @@ var groqModels = {
|
|
|
2957
3103
|
supportsImages: false,
|
|
2958
3104
|
supportsPromptCache: false,
|
|
2959
3105
|
supportsNativeTools: true,
|
|
3106
|
+
defaultToolProtocol: "native",
|
|
2960
3107
|
inputPrice: 0.1,
|
|
2961
3108
|
outputPrice: 0.5,
|
|
2962
3109
|
description: "GPT-OSS 20B is OpenAI's flagship open source model, built on a Mixture-of-Experts (MoE) architecture with 20 billion parameters and 32 experts."
|
|
@@ -3020,6 +3167,7 @@ var litellmDefaultModelInfo = {
|
|
|
3020
3167
|
supportsImages: true,
|
|
3021
3168
|
supportsPromptCache: true,
|
|
3022
3169
|
supportsNativeTools: true,
|
|
3170
|
+
defaultToolProtocol: "native",
|
|
3023
3171
|
inputPrice: 3,
|
|
3024
3172
|
outputPrice: 15,
|
|
3025
3173
|
cacheWritesPrice: 3.75,
|
|
@@ -3050,6 +3198,7 @@ var mistralModels = {
|
|
|
3050
3198
|
supportsImages: true,
|
|
3051
3199
|
supportsPromptCache: false,
|
|
3052
3200
|
supportsNativeTools: true,
|
|
3201
|
+
defaultToolProtocol: "native",
|
|
3053
3202
|
inputPrice: 2,
|
|
3054
3203
|
outputPrice: 5
|
|
3055
3204
|
},
|
|
@@ -3059,6 +3208,7 @@ var mistralModels = {
|
|
|
3059
3208
|
supportsImages: true,
|
|
3060
3209
|
supportsPromptCache: false,
|
|
3061
3210
|
supportsNativeTools: true,
|
|
3211
|
+
defaultToolProtocol: "native",
|
|
3062
3212
|
inputPrice: 0.4,
|
|
3063
3213
|
outputPrice: 2
|
|
3064
3214
|
},
|
|
@@ -3068,6 +3218,7 @@ var mistralModels = {
|
|
|
3068
3218
|
supportsImages: true,
|
|
3069
3219
|
supportsPromptCache: false,
|
|
3070
3220
|
supportsNativeTools: true,
|
|
3221
|
+
defaultToolProtocol: "native",
|
|
3071
3222
|
inputPrice: 0.4,
|
|
3072
3223
|
outputPrice: 2
|
|
3073
3224
|
},
|
|
@@ -3077,6 +3228,7 @@ var mistralModels = {
|
|
|
3077
3228
|
supportsImages: false,
|
|
3078
3229
|
supportsPromptCache: false,
|
|
3079
3230
|
supportsNativeTools: true,
|
|
3231
|
+
defaultToolProtocol: "native",
|
|
3080
3232
|
inputPrice: 0.3,
|
|
3081
3233
|
outputPrice: 0.9
|
|
3082
3234
|
},
|
|
@@ -3086,6 +3238,7 @@ var mistralModels = {
|
|
|
3086
3238
|
supportsImages: false,
|
|
3087
3239
|
supportsPromptCache: false,
|
|
3088
3240
|
supportsNativeTools: true,
|
|
3241
|
+
defaultToolProtocol: "native",
|
|
3089
3242
|
inputPrice: 2,
|
|
3090
3243
|
outputPrice: 6
|
|
3091
3244
|
},
|
|
@@ -3095,6 +3248,7 @@ var mistralModels = {
|
|
|
3095
3248
|
supportsImages: false,
|
|
3096
3249
|
supportsPromptCache: false,
|
|
3097
3250
|
supportsNativeTools: true,
|
|
3251
|
+
defaultToolProtocol: "native",
|
|
3098
3252
|
inputPrice: 0.1,
|
|
3099
3253
|
outputPrice: 0.1
|
|
3100
3254
|
},
|
|
@@ -3104,6 +3258,7 @@ var mistralModels = {
|
|
|
3104
3258
|
supportsImages: false,
|
|
3105
3259
|
supportsPromptCache: false,
|
|
3106
3260
|
supportsNativeTools: true,
|
|
3261
|
+
defaultToolProtocol: "native",
|
|
3107
3262
|
inputPrice: 0.04,
|
|
3108
3263
|
outputPrice: 0.04
|
|
3109
3264
|
},
|
|
@@ -3113,6 +3268,7 @@ var mistralModels = {
|
|
|
3113
3268
|
supportsImages: false,
|
|
3114
3269
|
supportsPromptCache: false,
|
|
3115
3270
|
supportsNativeTools: true,
|
|
3271
|
+
defaultToolProtocol: "native",
|
|
3116
3272
|
inputPrice: 0.2,
|
|
3117
3273
|
outputPrice: 0.6
|
|
3118
3274
|
},
|
|
@@ -3122,6 +3278,7 @@ var mistralModels = {
|
|
|
3122
3278
|
supportsImages: true,
|
|
3123
3279
|
supportsPromptCache: false,
|
|
3124
3280
|
supportsNativeTools: true,
|
|
3281
|
+
defaultToolProtocol: "native",
|
|
3125
3282
|
inputPrice: 2,
|
|
3126
3283
|
outputPrice: 6
|
|
3127
3284
|
}
|
|
@@ -3137,6 +3294,7 @@ var moonshotModels = {
|
|
|
3137
3294
|
supportsImages: false,
|
|
3138
3295
|
supportsPromptCache: true,
|
|
3139
3296
|
supportsNativeTools: true,
|
|
3297
|
+
defaultToolProtocol: "native",
|
|
3140
3298
|
inputPrice: 0.6,
|
|
3141
3299
|
// $0.60 per million tokens (cache miss)
|
|
3142
3300
|
outputPrice: 2.5,
|
|
@@ -3153,6 +3311,7 @@ var moonshotModels = {
|
|
|
3153
3311
|
supportsImages: false,
|
|
3154
3312
|
supportsPromptCache: true,
|
|
3155
3313
|
supportsNativeTools: true,
|
|
3314
|
+
defaultToolProtocol: "native",
|
|
3156
3315
|
inputPrice: 0.6,
|
|
3157
3316
|
outputPrice: 2.5,
|
|
3158
3317
|
cacheReadsPrice: 0.15,
|
|
@@ -3164,6 +3323,7 @@ var moonshotModels = {
|
|
|
3164
3323
|
supportsImages: false,
|
|
3165
3324
|
supportsPromptCache: true,
|
|
3166
3325
|
supportsNativeTools: true,
|
|
3326
|
+
defaultToolProtocol: "native",
|
|
3167
3327
|
inputPrice: 2.4,
|
|
3168
3328
|
// $2.40 per million tokens (cache miss)
|
|
3169
3329
|
outputPrice: 10,
|
|
@@ -3183,6 +3343,7 @@ var moonshotModels = {
|
|
|
3183
3343
|
// Text-only (no image/vision support)
|
|
3184
3344
|
supportsPromptCache: true,
|
|
3185
3345
|
supportsNativeTools: true,
|
|
3346
|
+
defaultToolProtocol: "native",
|
|
3186
3347
|
inputPrice: 0.6,
|
|
3187
3348
|
// $0.60 per million tokens (cache miss)
|
|
3188
3349
|
outputPrice: 2.5,
|
|
@@ -3222,6 +3383,7 @@ var openAiNativeModels = {
|
|
|
3222
3383
|
maxTokens: 128e3,
|
|
3223
3384
|
contextWindow: 4e5,
|
|
3224
3385
|
supportsNativeTools: true,
|
|
3386
|
+
defaultToolProtocol: "native",
|
|
3225
3387
|
includedTools: ["apply_patch"],
|
|
3226
3388
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3227
3389
|
supportsImages: true,
|
|
@@ -3240,6 +3402,7 @@ var openAiNativeModels = {
|
|
|
3240
3402
|
maxTokens: 128e3,
|
|
3241
3403
|
contextWindow: 4e5,
|
|
3242
3404
|
supportsNativeTools: true,
|
|
3405
|
+
defaultToolProtocol: "native",
|
|
3243
3406
|
includedTools: ["apply_patch"],
|
|
3244
3407
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3245
3408
|
supportsImages: true,
|
|
@@ -3262,6 +3425,7 @@ var openAiNativeModels = {
|
|
|
3262
3425
|
maxTokens: 16384,
|
|
3263
3426
|
contextWindow: 128e3,
|
|
3264
3427
|
supportsNativeTools: true,
|
|
3428
|
+
defaultToolProtocol: "native",
|
|
3265
3429
|
includedTools: ["apply_patch"],
|
|
3266
3430
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3267
3431
|
supportsImages: true,
|
|
@@ -3275,6 +3439,7 @@ var openAiNativeModels = {
|
|
|
3275
3439
|
maxTokens: 128e3,
|
|
3276
3440
|
contextWindow: 4e5,
|
|
3277
3441
|
supportsNativeTools: true,
|
|
3442
|
+
defaultToolProtocol: "native",
|
|
3278
3443
|
includedTools: ["apply_patch"],
|
|
3279
3444
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3280
3445
|
supportsImages: true,
|
|
@@ -3297,6 +3462,7 @@ var openAiNativeModels = {
|
|
|
3297
3462
|
maxTokens: 128e3,
|
|
3298
3463
|
contextWindow: 4e5,
|
|
3299
3464
|
supportsNativeTools: true,
|
|
3465
|
+
defaultToolProtocol: "native",
|
|
3300
3466
|
includedTools: ["apply_patch"],
|
|
3301
3467
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3302
3468
|
supportsImages: true,
|
|
@@ -3315,6 +3481,7 @@ var openAiNativeModels = {
|
|
|
3315
3481
|
maxTokens: 128e3,
|
|
3316
3482
|
contextWindow: 4e5,
|
|
3317
3483
|
supportsNativeTools: true,
|
|
3484
|
+
defaultToolProtocol: "native",
|
|
3318
3485
|
includedTools: ["apply_patch"],
|
|
3319
3486
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3320
3487
|
supportsImages: true,
|
|
@@ -3332,6 +3499,7 @@ var openAiNativeModels = {
|
|
|
3332
3499
|
maxTokens: 128e3,
|
|
3333
3500
|
contextWindow: 4e5,
|
|
3334
3501
|
supportsNativeTools: true,
|
|
3502
|
+
defaultToolProtocol: "native",
|
|
3335
3503
|
includedTools: ["apply_patch"],
|
|
3336
3504
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3337
3505
|
supportsImages: true,
|
|
@@ -3353,6 +3521,7 @@ var openAiNativeModels = {
|
|
|
3353
3521
|
maxTokens: 128e3,
|
|
3354
3522
|
contextWindow: 4e5,
|
|
3355
3523
|
supportsNativeTools: true,
|
|
3524
|
+
defaultToolProtocol: "native",
|
|
3356
3525
|
includedTools: ["apply_patch"],
|
|
3357
3526
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3358
3527
|
supportsImages: true,
|
|
@@ -3374,6 +3543,7 @@ var openAiNativeModels = {
|
|
|
3374
3543
|
maxTokens: 128e3,
|
|
3375
3544
|
contextWindow: 4e5,
|
|
3376
3545
|
supportsNativeTools: true,
|
|
3546
|
+
defaultToolProtocol: "native",
|
|
3377
3547
|
includedTools: ["apply_patch"],
|
|
3378
3548
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3379
3549
|
supportsImages: true,
|
|
@@ -3391,6 +3561,7 @@ var openAiNativeModels = {
|
|
|
3391
3561
|
maxTokens: 128e3,
|
|
3392
3562
|
contextWindow: 4e5,
|
|
3393
3563
|
supportsNativeTools: true,
|
|
3564
|
+
defaultToolProtocol: "native",
|
|
3394
3565
|
includedTools: ["apply_patch"],
|
|
3395
3566
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3396
3567
|
supportsImages: true,
|
|
@@ -3409,6 +3580,7 @@ var openAiNativeModels = {
|
|
|
3409
3580
|
maxTokens: 128e3,
|
|
3410
3581
|
contextWindow: 4e5,
|
|
3411
3582
|
supportsNativeTools: true,
|
|
3583
|
+
defaultToolProtocol: "native",
|
|
3412
3584
|
includedTools: ["apply_patch"],
|
|
3413
3585
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3414
3586
|
supportsImages: true,
|
|
@@ -3422,6 +3594,7 @@ var openAiNativeModels = {
|
|
|
3422
3594
|
maxTokens: 32768,
|
|
3423
3595
|
contextWindow: 1047576,
|
|
3424
3596
|
supportsNativeTools: true,
|
|
3597
|
+
defaultToolProtocol: "native",
|
|
3425
3598
|
includedTools: ["apply_patch"],
|
|
3426
3599
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3427
3600
|
supportsImages: true,
|
|
@@ -3438,6 +3611,7 @@ var openAiNativeModels = {
|
|
|
3438
3611
|
maxTokens: 32768,
|
|
3439
3612
|
contextWindow: 1047576,
|
|
3440
3613
|
supportsNativeTools: true,
|
|
3614
|
+
defaultToolProtocol: "native",
|
|
3441
3615
|
includedTools: ["apply_patch"],
|
|
3442
3616
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3443
3617
|
supportsImages: true,
|
|
@@ -3454,6 +3628,7 @@ var openAiNativeModels = {
|
|
|
3454
3628
|
maxTokens: 32768,
|
|
3455
3629
|
contextWindow: 1047576,
|
|
3456
3630
|
supportsNativeTools: true,
|
|
3631
|
+
defaultToolProtocol: "native",
|
|
3457
3632
|
includedTools: ["apply_patch"],
|
|
3458
3633
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3459
3634
|
supportsImages: true,
|
|
@@ -3470,6 +3645,7 @@ var openAiNativeModels = {
|
|
|
3470
3645
|
maxTokens: 1e5,
|
|
3471
3646
|
contextWindow: 2e5,
|
|
3472
3647
|
supportsNativeTools: true,
|
|
3648
|
+
defaultToolProtocol: "native",
|
|
3473
3649
|
supportsImages: true,
|
|
3474
3650
|
supportsPromptCache: true,
|
|
3475
3651
|
inputPrice: 2,
|
|
@@ -3487,6 +3663,7 @@ var openAiNativeModels = {
|
|
|
3487
3663
|
maxTokens: 1e5,
|
|
3488
3664
|
contextWindow: 2e5,
|
|
3489
3665
|
supportsNativeTools: true,
|
|
3666
|
+
defaultToolProtocol: "native",
|
|
3490
3667
|
supportsImages: true,
|
|
3491
3668
|
supportsPromptCache: true,
|
|
3492
3669
|
inputPrice: 2,
|
|
@@ -3499,6 +3676,7 @@ var openAiNativeModels = {
|
|
|
3499
3676
|
maxTokens: 1e5,
|
|
3500
3677
|
contextWindow: 2e5,
|
|
3501
3678
|
supportsNativeTools: true,
|
|
3679
|
+
defaultToolProtocol: "native",
|
|
3502
3680
|
supportsImages: true,
|
|
3503
3681
|
supportsPromptCache: true,
|
|
3504
3682
|
inputPrice: 2,
|
|
@@ -3511,6 +3689,7 @@ var openAiNativeModels = {
|
|
|
3511
3689
|
maxTokens: 1e5,
|
|
3512
3690
|
contextWindow: 2e5,
|
|
3513
3691
|
supportsNativeTools: true,
|
|
3692
|
+
defaultToolProtocol: "native",
|
|
3514
3693
|
supportsImages: true,
|
|
3515
3694
|
supportsPromptCache: true,
|
|
3516
3695
|
inputPrice: 1.1,
|
|
@@ -3528,6 +3707,7 @@ var openAiNativeModels = {
|
|
|
3528
3707
|
maxTokens: 1e5,
|
|
3529
3708
|
contextWindow: 2e5,
|
|
3530
3709
|
supportsNativeTools: true,
|
|
3710
|
+
defaultToolProtocol: "native",
|
|
3531
3711
|
supportsImages: true,
|
|
3532
3712
|
supportsPromptCache: true,
|
|
3533
3713
|
inputPrice: 1.1,
|
|
@@ -3540,6 +3720,7 @@ var openAiNativeModels = {
|
|
|
3540
3720
|
maxTokens: 1e5,
|
|
3541
3721
|
contextWindow: 2e5,
|
|
3542
3722
|
supportsNativeTools: true,
|
|
3723
|
+
defaultToolProtocol: "native",
|
|
3543
3724
|
supportsImages: true,
|
|
3544
3725
|
supportsPromptCache: true,
|
|
3545
3726
|
inputPrice: 1.1,
|
|
@@ -3552,6 +3733,7 @@ var openAiNativeModels = {
|
|
|
3552
3733
|
maxTokens: 1e5,
|
|
3553
3734
|
contextWindow: 2e5,
|
|
3554
3735
|
supportsNativeTools: true,
|
|
3736
|
+
defaultToolProtocol: "native",
|
|
3555
3737
|
supportsImages: false,
|
|
3556
3738
|
supportsPromptCache: true,
|
|
3557
3739
|
inputPrice: 1.1,
|
|
@@ -3565,6 +3747,7 @@ var openAiNativeModels = {
|
|
|
3565
3747
|
maxTokens: 1e5,
|
|
3566
3748
|
contextWindow: 2e5,
|
|
3567
3749
|
supportsNativeTools: true,
|
|
3750
|
+
defaultToolProtocol: "native",
|
|
3568
3751
|
supportsImages: false,
|
|
3569
3752
|
supportsPromptCache: true,
|
|
3570
3753
|
inputPrice: 1.1,
|
|
@@ -3577,6 +3760,7 @@ var openAiNativeModels = {
|
|
|
3577
3760
|
maxTokens: 1e5,
|
|
3578
3761
|
contextWindow: 2e5,
|
|
3579
3762
|
supportsNativeTools: true,
|
|
3763
|
+
defaultToolProtocol: "native",
|
|
3580
3764
|
supportsImages: false,
|
|
3581
3765
|
supportsPromptCache: true,
|
|
3582
3766
|
inputPrice: 1.1,
|
|
@@ -3589,6 +3773,7 @@ var openAiNativeModels = {
|
|
|
3589
3773
|
maxTokens: 1e5,
|
|
3590
3774
|
contextWindow: 2e5,
|
|
3591
3775
|
supportsNativeTools: true,
|
|
3776
|
+
defaultToolProtocol: "native",
|
|
3592
3777
|
supportsImages: true,
|
|
3593
3778
|
supportsPromptCache: true,
|
|
3594
3779
|
inputPrice: 15,
|
|
@@ -3600,6 +3785,7 @@ var openAiNativeModels = {
|
|
|
3600
3785
|
maxTokens: 32768,
|
|
3601
3786
|
contextWindow: 128e3,
|
|
3602
3787
|
supportsNativeTools: true,
|
|
3788
|
+
defaultToolProtocol: "native",
|
|
3603
3789
|
supportsImages: true,
|
|
3604
3790
|
supportsPromptCache: true,
|
|
3605
3791
|
inputPrice: 15,
|
|
@@ -3611,6 +3797,7 @@ var openAiNativeModels = {
|
|
|
3611
3797
|
maxTokens: 65536,
|
|
3612
3798
|
contextWindow: 128e3,
|
|
3613
3799
|
supportsNativeTools: true,
|
|
3800
|
+
defaultToolProtocol: "native",
|
|
3614
3801
|
supportsImages: true,
|
|
3615
3802
|
supportsPromptCache: true,
|
|
3616
3803
|
inputPrice: 1.1,
|
|
@@ -3622,6 +3809,7 @@ var openAiNativeModels = {
|
|
|
3622
3809
|
maxTokens: 16384,
|
|
3623
3810
|
contextWindow: 128e3,
|
|
3624
3811
|
supportsNativeTools: true,
|
|
3812
|
+
defaultToolProtocol: "native",
|
|
3625
3813
|
supportsImages: true,
|
|
3626
3814
|
supportsPromptCache: true,
|
|
3627
3815
|
inputPrice: 2.5,
|
|
@@ -3636,6 +3824,7 @@ var openAiNativeModels = {
|
|
|
3636
3824
|
maxTokens: 16384,
|
|
3637
3825
|
contextWindow: 128e3,
|
|
3638
3826
|
supportsNativeTools: true,
|
|
3827
|
+
defaultToolProtocol: "native",
|
|
3639
3828
|
supportsImages: true,
|
|
3640
3829
|
supportsPromptCache: true,
|
|
3641
3830
|
inputPrice: 0.15,
|
|
@@ -3650,6 +3839,7 @@ var openAiNativeModels = {
|
|
|
3650
3839
|
maxTokens: 16384,
|
|
3651
3840
|
contextWindow: 2e5,
|
|
3652
3841
|
supportsNativeTools: true,
|
|
3842
|
+
defaultToolProtocol: "native",
|
|
3653
3843
|
supportsImages: false,
|
|
3654
3844
|
supportsPromptCache: false,
|
|
3655
3845
|
inputPrice: 1.5,
|
|
@@ -3663,6 +3853,7 @@ var openAiNativeModels = {
|
|
|
3663
3853
|
maxTokens: 128e3,
|
|
3664
3854
|
contextWindow: 4e5,
|
|
3665
3855
|
supportsNativeTools: true,
|
|
3856
|
+
defaultToolProtocol: "native",
|
|
3666
3857
|
includedTools: ["apply_patch"],
|
|
3667
3858
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3668
3859
|
supportsImages: true,
|
|
@@ -3684,6 +3875,7 @@ var openAiNativeModels = {
|
|
|
3684
3875
|
maxTokens: 128e3,
|
|
3685
3876
|
contextWindow: 4e5,
|
|
3686
3877
|
supportsNativeTools: true,
|
|
3878
|
+
defaultToolProtocol: "native",
|
|
3687
3879
|
includedTools: ["apply_patch"],
|
|
3688
3880
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3689
3881
|
supportsImages: true,
|
|
@@ -3705,6 +3897,7 @@ var openAiNativeModels = {
|
|
|
3705
3897
|
maxTokens: 128e3,
|
|
3706
3898
|
contextWindow: 4e5,
|
|
3707
3899
|
supportsNativeTools: true,
|
|
3900
|
+
defaultToolProtocol: "native",
|
|
3708
3901
|
includedTools: ["apply_patch"],
|
|
3709
3902
|
excludedTools: ["apply_diff", "write_to_file"],
|
|
3710
3903
|
supportsImages: true,
|
|
@@ -3727,7 +3920,8 @@ var openAiModelInfoSaneDefaults = {
|
|
|
3727
3920
|
supportsPromptCache: false,
|
|
3728
3921
|
inputPrice: 0,
|
|
3729
3922
|
outputPrice: 0,
|
|
3730
|
-
supportsNativeTools: true
|
|
3923
|
+
supportsNativeTools: true,
|
|
3924
|
+
defaultToolProtocol: "native"
|
|
3731
3925
|
};
|
|
3732
3926
|
var azureOpenAiDefaultApiVersion = "2024-08-01-preview";
|
|
3733
3927
|
var OPENAI_NATIVE_DEFAULT_TEMPERATURE = 0;
|
|
@@ -3840,6 +4034,8 @@ var requestyDefaultModelInfo = {
|
|
|
3840
4034
|
contextWindow: 2e5,
|
|
3841
4035
|
supportsImages: true,
|
|
3842
4036
|
supportsPromptCache: true,
|
|
4037
|
+
supportsNativeTools: true,
|
|
4038
|
+
defaultToolProtocol: "native",
|
|
3843
4039
|
inputPrice: 3,
|
|
3844
4040
|
outputPrice: 15,
|
|
3845
4041
|
cacheWritesPrice: 3.75,
|
|
@@ -3896,6 +4092,7 @@ var sambaNovaModels = {
|
|
|
3896
4092
|
supportsImages: false,
|
|
3897
4093
|
supportsPromptCache: false,
|
|
3898
4094
|
supportsNativeTools: true,
|
|
4095
|
+
defaultToolProtocol: "native",
|
|
3899
4096
|
inputPrice: 0.1,
|
|
3900
4097
|
outputPrice: 0.2,
|
|
3901
4098
|
description: "Meta Llama 3.1 8B Instruct model with 16K context window."
|
|
@@ -3906,6 +4103,7 @@ var sambaNovaModels = {
|
|
|
3906
4103
|
supportsImages: false,
|
|
3907
4104
|
supportsPromptCache: false,
|
|
3908
4105
|
supportsNativeTools: true,
|
|
4106
|
+
defaultToolProtocol: "native",
|
|
3909
4107
|
inputPrice: 0.6,
|
|
3910
4108
|
outputPrice: 1.2,
|
|
3911
4109
|
description: "Meta Llama 3.3 70B Instruct model with 128K context window."
|
|
@@ -3917,6 +4115,7 @@ var sambaNovaModels = {
|
|
|
3917
4115
|
supportsPromptCache: false,
|
|
3918
4116
|
supportsReasoningBudget: true,
|
|
3919
4117
|
supportsNativeTools: true,
|
|
4118
|
+
defaultToolProtocol: "native",
|
|
3920
4119
|
inputPrice: 5,
|
|
3921
4120
|
outputPrice: 7,
|
|
3922
4121
|
description: "DeepSeek R1 reasoning model with 32K context window."
|
|
@@ -3927,6 +4126,7 @@ var sambaNovaModels = {
|
|
|
3927
4126
|
supportsImages: false,
|
|
3928
4127
|
supportsPromptCache: false,
|
|
3929
4128
|
supportsNativeTools: true,
|
|
4129
|
+
defaultToolProtocol: "native",
|
|
3930
4130
|
inputPrice: 3,
|
|
3931
4131
|
outputPrice: 4.5,
|
|
3932
4132
|
description: "DeepSeek V3 model with 32K context window."
|
|
@@ -3937,6 +4137,7 @@ var sambaNovaModels = {
|
|
|
3937
4137
|
supportsImages: false,
|
|
3938
4138
|
supportsPromptCache: false,
|
|
3939
4139
|
supportsNativeTools: true,
|
|
4140
|
+
defaultToolProtocol: "native",
|
|
3940
4141
|
inputPrice: 3,
|
|
3941
4142
|
outputPrice: 4.5,
|
|
3942
4143
|
description: "DeepSeek V3.1 model with 32K context window."
|
|
@@ -3956,6 +4157,7 @@ var sambaNovaModels = {
|
|
|
3956
4157
|
supportsImages: true,
|
|
3957
4158
|
supportsPromptCache: false,
|
|
3958
4159
|
supportsNativeTools: true,
|
|
4160
|
+
defaultToolProtocol: "native",
|
|
3959
4161
|
inputPrice: 0.63,
|
|
3960
4162
|
outputPrice: 1.8,
|
|
3961
4163
|
description: "Meta Llama 4 Maverick 17B 128E Instruct model with 128K context window."
|
|
@@ -3975,6 +4177,7 @@ var sambaNovaModels = {
|
|
|
3975
4177
|
supportsImages: false,
|
|
3976
4178
|
supportsPromptCache: false,
|
|
3977
4179
|
supportsNativeTools: true,
|
|
4180
|
+
defaultToolProtocol: "native",
|
|
3978
4181
|
inputPrice: 0.4,
|
|
3979
4182
|
outputPrice: 0.8,
|
|
3980
4183
|
description: "Alibaba Qwen 3 32B model with 8K context window."
|
|
@@ -3985,6 +4188,7 @@ var sambaNovaModels = {
|
|
|
3985
4188
|
supportsImages: false,
|
|
3986
4189
|
supportsPromptCache: false,
|
|
3987
4190
|
supportsNativeTools: true,
|
|
4191
|
+
defaultToolProtocol: "native",
|
|
3988
4192
|
inputPrice: 0.22,
|
|
3989
4193
|
outputPrice: 0.59,
|
|
3990
4194
|
description: "OpenAI gpt oss 120b model with 128k context window."
|
|
@@ -4013,9 +4217,11 @@ var vertexModels = {
|
|
|
4013
4217
|
contextWindow: 1048576,
|
|
4014
4218
|
supportsImages: true,
|
|
4015
4219
|
supportsNativeTools: true,
|
|
4220
|
+
defaultToolProtocol: "native",
|
|
4016
4221
|
supportsPromptCache: true,
|
|
4017
4222
|
supportsReasoningEffort: ["low", "high"],
|
|
4018
4223
|
reasoningEffort: "low",
|
|
4224
|
+
includedTools: ["write_file", "edit_file"],
|
|
4019
4225
|
supportsTemperature: true,
|
|
4020
4226
|
defaultTemperature: 1,
|
|
4021
4227
|
inputPrice: 4,
|
|
@@ -4033,12 +4239,31 @@ var vertexModels = {
|
|
|
4033
4239
|
}
|
|
4034
4240
|
]
|
|
4035
4241
|
},
|
|
4242
|
+
"gemini-3-flash-preview": {
|
|
4243
|
+
maxTokens: 65536,
|
|
4244
|
+
contextWindow: 1048576,
|
|
4245
|
+
supportsImages: true,
|
|
4246
|
+
supportsNativeTools: true,
|
|
4247
|
+
defaultToolProtocol: "native",
|
|
4248
|
+
supportsPromptCache: true,
|
|
4249
|
+
supportsReasoningEffort: ["minimal", "low", "medium", "high"],
|
|
4250
|
+
reasoningEffort: "medium",
|
|
4251
|
+
includedTools: ["write_file", "edit_file"],
|
|
4252
|
+
supportsTemperature: true,
|
|
4253
|
+
defaultTemperature: 1,
|
|
4254
|
+
inputPrice: 0.3,
|
|
4255
|
+
outputPrice: 2.5,
|
|
4256
|
+
cacheReadsPrice: 0.075,
|
|
4257
|
+
cacheWritesPrice: 1
|
|
4258
|
+
},
|
|
4036
4259
|
"gemini-2.5-flash-preview-05-20:thinking": {
|
|
4037
4260
|
maxTokens: 65535,
|
|
4038
4261
|
contextWindow: 1048576,
|
|
4039
4262
|
supportsImages: true,
|
|
4040
4263
|
supportsNativeTools: true,
|
|
4264
|
+
defaultToolProtocol: "native",
|
|
4041
4265
|
supportsPromptCache: true,
|
|
4266
|
+
includedTools: ["write_file", "edit_file"],
|
|
4042
4267
|
inputPrice: 0.15,
|
|
4043
4268
|
outputPrice: 3.5,
|
|
4044
4269
|
maxThinkingTokens: 24576,
|
|
@@ -4050,7 +4275,9 @@ var vertexModels = {
|
|
|
4050
4275
|
contextWindow: 1048576,
|
|
4051
4276
|
supportsImages: true,
|
|
4052
4277
|
supportsNativeTools: true,
|
|
4278
|
+
defaultToolProtocol: "native",
|
|
4053
4279
|
supportsPromptCache: true,
|
|
4280
|
+
includedTools: ["write_file", "edit_file"],
|
|
4054
4281
|
inputPrice: 0.15,
|
|
4055
4282
|
outputPrice: 0.6
|
|
4056
4283
|
},
|
|
@@ -4059,7 +4286,9 @@ var vertexModels = {
|
|
|
4059
4286
|
contextWindow: 1048576,
|
|
4060
4287
|
supportsImages: true,
|
|
4061
4288
|
supportsNativeTools: true,
|
|
4289
|
+
defaultToolProtocol: "native",
|
|
4062
4290
|
supportsPromptCache: true,
|
|
4291
|
+
includedTools: ["write_file", "edit_file"],
|
|
4063
4292
|
inputPrice: 0.3,
|
|
4064
4293
|
outputPrice: 2.5,
|
|
4065
4294
|
cacheReadsPrice: 0.075,
|
|
@@ -4072,7 +4301,9 @@ var vertexModels = {
|
|
|
4072
4301
|
contextWindow: 1048576,
|
|
4073
4302
|
supportsImages: true,
|
|
4074
4303
|
supportsNativeTools: true,
|
|
4304
|
+
defaultToolProtocol: "native",
|
|
4075
4305
|
supportsPromptCache: false,
|
|
4306
|
+
includedTools: ["write_file", "edit_file"],
|
|
4076
4307
|
inputPrice: 0.15,
|
|
4077
4308
|
outputPrice: 3.5,
|
|
4078
4309
|
maxThinkingTokens: 24576,
|
|
@@ -4084,7 +4315,9 @@ var vertexModels = {
|
|
|
4084
4315
|
contextWindow: 1048576,
|
|
4085
4316
|
supportsImages: true,
|
|
4086
4317
|
supportsNativeTools: true,
|
|
4318
|
+
defaultToolProtocol: "native",
|
|
4087
4319
|
supportsPromptCache: false,
|
|
4320
|
+
includedTools: ["write_file", "edit_file"],
|
|
4088
4321
|
inputPrice: 0.15,
|
|
4089
4322
|
outputPrice: 0.6
|
|
4090
4323
|
},
|
|
@@ -4093,7 +4326,9 @@ var vertexModels = {
|
|
|
4093
4326
|
contextWindow: 1048576,
|
|
4094
4327
|
supportsImages: true,
|
|
4095
4328
|
supportsNativeTools: true,
|
|
4329
|
+
defaultToolProtocol: "native",
|
|
4096
4330
|
supportsPromptCache: true,
|
|
4331
|
+
includedTools: ["write_file", "edit_file"],
|
|
4097
4332
|
inputPrice: 2.5,
|
|
4098
4333
|
outputPrice: 15
|
|
4099
4334
|
},
|
|
@@ -4102,7 +4337,9 @@ var vertexModels = {
|
|
|
4102
4337
|
contextWindow: 1048576,
|
|
4103
4338
|
supportsImages: true,
|
|
4104
4339
|
supportsNativeTools: true,
|
|
4340
|
+
defaultToolProtocol: "native",
|
|
4105
4341
|
supportsPromptCache: true,
|
|
4342
|
+
includedTools: ["write_file", "edit_file"],
|
|
4106
4343
|
inputPrice: 2.5,
|
|
4107
4344
|
outputPrice: 15
|
|
4108
4345
|
},
|
|
@@ -4111,7 +4348,9 @@ var vertexModels = {
|
|
|
4111
4348
|
contextWindow: 1048576,
|
|
4112
4349
|
supportsImages: true,
|
|
4113
4350
|
supportsNativeTools: true,
|
|
4351
|
+
defaultToolProtocol: "native",
|
|
4114
4352
|
supportsPromptCache: true,
|
|
4353
|
+
includedTools: ["write_file", "edit_file"],
|
|
4115
4354
|
inputPrice: 2.5,
|
|
4116
4355
|
outputPrice: 15,
|
|
4117
4356
|
maxThinkingTokens: 32768,
|
|
@@ -4122,7 +4361,9 @@ var vertexModels = {
|
|
|
4122
4361
|
contextWindow: 1048576,
|
|
4123
4362
|
supportsImages: true,
|
|
4124
4363
|
supportsNativeTools: true,
|
|
4364
|
+
defaultToolProtocol: "native",
|
|
4125
4365
|
supportsPromptCache: true,
|
|
4366
|
+
includedTools: ["write_file", "edit_file"],
|
|
4126
4367
|
inputPrice: 2.5,
|
|
4127
4368
|
outputPrice: 15,
|
|
4128
4369
|
maxThinkingTokens: 32768,
|
|
@@ -4148,7 +4389,9 @@ var vertexModels = {
|
|
|
4148
4389
|
contextWindow: 1048576,
|
|
4149
4390
|
supportsImages: true,
|
|
4150
4391
|
supportsNativeTools: true,
|
|
4392
|
+
defaultToolProtocol: "native",
|
|
4151
4393
|
supportsPromptCache: false,
|
|
4394
|
+
includedTools: ["write_file", "edit_file"],
|
|
4152
4395
|
inputPrice: 0,
|
|
4153
4396
|
outputPrice: 0
|
|
4154
4397
|
},
|
|
@@ -4157,7 +4400,9 @@ var vertexModels = {
|
|
|
4157
4400
|
contextWindow: 2097152,
|
|
4158
4401
|
supportsImages: true,
|
|
4159
4402
|
supportsNativeTools: true,
|
|
4403
|
+
defaultToolProtocol: "native",
|
|
4160
4404
|
supportsPromptCache: false,
|
|
4405
|
+
includedTools: ["write_file", "edit_file"],
|
|
4161
4406
|
inputPrice: 0,
|
|
4162
4407
|
outputPrice: 0
|
|
4163
4408
|
},
|
|
@@ -4166,7 +4411,9 @@ var vertexModels = {
|
|
|
4166
4411
|
contextWindow: 1048576,
|
|
4167
4412
|
supportsImages: true,
|
|
4168
4413
|
supportsNativeTools: true,
|
|
4414
|
+
defaultToolProtocol: "native",
|
|
4169
4415
|
supportsPromptCache: true,
|
|
4416
|
+
includedTools: ["write_file", "edit_file"],
|
|
4170
4417
|
inputPrice: 0.15,
|
|
4171
4418
|
outputPrice: 0.6
|
|
4172
4419
|
},
|
|
@@ -4175,7 +4422,9 @@ var vertexModels = {
|
|
|
4175
4422
|
contextWindow: 1048576,
|
|
4176
4423
|
supportsImages: true,
|
|
4177
4424
|
supportsNativeTools: true,
|
|
4425
|
+
defaultToolProtocol: "native",
|
|
4178
4426
|
supportsPromptCache: false,
|
|
4427
|
+
includedTools: ["write_file", "edit_file"],
|
|
4179
4428
|
inputPrice: 0.075,
|
|
4180
4429
|
outputPrice: 0.3
|
|
4181
4430
|
},
|
|
@@ -4184,7 +4433,9 @@ var vertexModels = {
|
|
|
4184
4433
|
contextWindow: 32768,
|
|
4185
4434
|
supportsImages: true,
|
|
4186
4435
|
supportsNativeTools: true,
|
|
4436
|
+
defaultToolProtocol: "native",
|
|
4187
4437
|
supportsPromptCache: false,
|
|
4438
|
+
includedTools: ["write_file", "edit_file"],
|
|
4188
4439
|
inputPrice: 0,
|
|
4189
4440
|
outputPrice: 0
|
|
4190
4441
|
},
|
|
@@ -4193,7 +4444,9 @@ var vertexModels = {
|
|
|
4193
4444
|
contextWindow: 1048576,
|
|
4194
4445
|
supportsImages: true,
|
|
4195
4446
|
supportsNativeTools: true,
|
|
4447
|
+
defaultToolProtocol: "native",
|
|
4196
4448
|
supportsPromptCache: true,
|
|
4449
|
+
includedTools: ["write_file", "edit_file"],
|
|
4197
4450
|
inputPrice: 0.075,
|
|
4198
4451
|
outputPrice: 0.3
|
|
4199
4452
|
},
|
|
@@ -4202,7 +4455,9 @@ var vertexModels = {
|
|
|
4202
4455
|
contextWindow: 2097152,
|
|
4203
4456
|
supportsImages: true,
|
|
4204
4457
|
supportsNativeTools: true,
|
|
4458
|
+
defaultToolProtocol: "native",
|
|
4205
4459
|
supportsPromptCache: false,
|
|
4460
|
+
includedTools: ["write_file", "edit_file"],
|
|
4206
4461
|
inputPrice: 1.25,
|
|
4207
4462
|
outputPrice: 5
|
|
4208
4463
|
},
|
|
@@ -4348,7 +4603,9 @@ var vertexModels = {
|
|
|
4348
4603
|
contextWindow: 1048576,
|
|
4349
4604
|
supportsImages: true,
|
|
4350
4605
|
supportsNativeTools: true,
|
|
4606
|
+
defaultToolProtocol: "native",
|
|
4351
4607
|
supportsPromptCache: true,
|
|
4608
|
+
includedTools: ["write_file", "edit_file"],
|
|
4352
4609
|
inputPrice: 0.1,
|
|
4353
4610
|
outputPrice: 0.4,
|
|
4354
4611
|
cacheReadsPrice: 0.025,
|
|
@@ -4650,6 +4907,7 @@ var xaiModels = {
|
|
|
4650
4907
|
supportsImages: true,
|
|
4651
4908
|
supportsPromptCache: true,
|
|
4652
4909
|
supportsNativeTools: true,
|
|
4910
|
+
defaultToolProtocol: "native",
|
|
4653
4911
|
inputPrice: 0.2,
|
|
4654
4912
|
outputPrice: 1.5,
|
|
4655
4913
|
cacheWritesPrice: 0.02,
|
|
@@ -4664,6 +4922,7 @@ var xaiModels = {
|
|
|
4664
4922
|
supportsImages: true,
|
|
4665
4923
|
supportsPromptCache: true,
|
|
4666
4924
|
supportsNativeTools: true,
|
|
4925
|
+
defaultToolProtocol: "native",
|
|
4667
4926
|
inputPrice: 0.2,
|
|
4668
4927
|
outputPrice: 0.5,
|
|
4669
4928
|
cacheWritesPrice: 0.05,
|
|
@@ -4678,6 +4937,7 @@ var xaiModels = {
|
|
|
4678
4937
|
supportsImages: true,
|
|
4679
4938
|
supportsPromptCache: true,
|
|
4680
4939
|
supportsNativeTools: true,
|
|
4940
|
+
defaultToolProtocol: "native",
|
|
4681
4941
|
inputPrice: 0.2,
|
|
4682
4942
|
outputPrice: 0.5,
|
|
4683
4943
|
cacheWritesPrice: 0.05,
|
|
@@ -4692,6 +4952,7 @@ var xaiModels = {
|
|
|
4692
4952
|
supportsImages: true,
|
|
4693
4953
|
supportsPromptCache: true,
|
|
4694
4954
|
supportsNativeTools: true,
|
|
4955
|
+
defaultToolProtocol: "native",
|
|
4695
4956
|
inputPrice: 0.2,
|
|
4696
4957
|
outputPrice: 0.5,
|
|
4697
4958
|
cacheWritesPrice: 0.05,
|
|
@@ -4706,6 +4967,7 @@ var xaiModels = {
|
|
|
4706
4967
|
supportsImages: true,
|
|
4707
4968
|
supportsPromptCache: true,
|
|
4708
4969
|
supportsNativeTools: true,
|
|
4970
|
+
defaultToolProtocol: "native",
|
|
4709
4971
|
inputPrice: 0.2,
|
|
4710
4972
|
outputPrice: 0.5,
|
|
4711
4973
|
cacheWritesPrice: 0.05,
|
|
@@ -4720,6 +4982,7 @@ var xaiModels = {
|
|
|
4720
4982
|
supportsImages: true,
|
|
4721
4983
|
supportsPromptCache: true,
|
|
4722
4984
|
supportsNativeTools: true,
|
|
4985
|
+
defaultToolProtocol: "native",
|
|
4723
4986
|
inputPrice: 3,
|
|
4724
4987
|
outputPrice: 15,
|
|
4725
4988
|
cacheWritesPrice: 0.75,
|
|
@@ -4734,6 +4997,7 @@ var xaiModels = {
|
|
|
4734
4997
|
supportsImages: true,
|
|
4735
4998
|
supportsPromptCache: true,
|
|
4736
4999
|
supportsNativeTools: true,
|
|
5000
|
+
defaultToolProtocol: "native",
|
|
4737
5001
|
inputPrice: 0.3,
|
|
4738
5002
|
outputPrice: 0.5,
|
|
4739
5003
|
cacheWritesPrice: 0.07,
|
|
@@ -4750,6 +5014,7 @@ var xaiModels = {
|
|
|
4750
5014
|
supportsImages: true,
|
|
4751
5015
|
supportsPromptCache: true,
|
|
4752
5016
|
supportsNativeTools: true,
|
|
5017
|
+
defaultToolProtocol: "native",
|
|
4753
5018
|
inputPrice: 3,
|
|
4754
5019
|
outputPrice: 15,
|
|
4755
5020
|
cacheWritesPrice: 0.75,
|
|
@@ -4863,6 +5128,7 @@ var internationalZAiModels = {
|
|
|
4863
5128
|
supportsImages: false,
|
|
4864
5129
|
supportsPromptCache: true,
|
|
4865
5130
|
supportsNativeTools: true,
|
|
5131
|
+
defaultToolProtocol: "native",
|
|
4866
5132
|
inputPrice: 0.6,
|
|
4867
5133
|
outputPrice: 2.2,
|
|
4868
5134
|
cacheWritesPrice: 0,
|
|
@@ -4875,6 +5141,7 @@ var internationalZAiModels = {
|
|
|
4875
5141
|
supportsImages: false,
|
|
4876
5142
|
supportsPromptCache: true,
|
|
4877
5143
|
supportsNativeTools: true,
|
|
5144
|
+
defaultToolProtocol: "native",
|
|
4878
5145
|
inputPrice: 0.2,
|
|
4879
5146
|
outputPrice: 1.1,
|
|
4880
5147
|
cacheWritesPrice: 0,
|
|
@@ -4887,6 +5154,7 @@ var internationalZAiModels = {
|
|
|
4887
5154
|
supportsImages: false,
|
|
4888
5155
|
supportsPromptCache: true,
|
|
4889
5156
|
supportsNativeTools: true,
|
|
5157
|
+
defaultToolProtocol: "native",
|
|
4890
5158
|
inputPrice: 2.2,
|
|
4891
5159
|
outputPrice: 8.9,
|
|
4892
5160
|
cacheWritesPrice: 0,
|
|
@@ -4899,6 +5167,7 @@ var internationalZAiModels = {
|
|
|
4899
5167
|
supportsImages: false,
|
|
4900
5168
|
supportsPromptCache: true,
|
|
4901
5169
|
supportsNativeTools: true,
|
|
5170
|
+
defaultToolProtocol: "native",
|
|
4902
5171
|
inputPrice: 1.1,
|
|
4903
5172
|
outputPrice: 4.5,
|
|
4904
5173
|
cacheWritesPrice: 0,
|
|
@@ -4911,6 +5180,7 @@ var internationalZAiModels = {
|
|
|
4911
5180
|
supportsImages: false,
|
|
4912
5181
|
supportsPromptCache: true,
|
|
4913
5182
|
supportsNativeTools: true,
|
|
5183
|
+
defaultToolProtocol: "native",
|
|
4914
5184
|
inputPrice: 0,
|
|
4915
5185
|
outputPrice: 0,
|
|
4916
5186
|
cacheWritesPrice: 0,
|
|
@@ -4923,6 +5193,7 @@ var internationalZAiModels = {
|
|
|
4923
5193
|
supportsImages: true,
|
|
4924
5194
|
supportsPromptCache: true,
|
|
4925
5195
|
supportsNativeTools: true,
|
|
5196
|
+
defaultToolProtocol: "native",
|
|
4926
5197
|
inputPrice: 0.6,
|
|
4927
5198
|
outputPrice: 1.8,
|
|
4928
5199
|
cacheWritesPrice: 0,
|
|
@@ -4935,6 +5206,7 @@ var internationalZAiModels = {
|
|
|
4935
5206
|
supportsImages: false,
|
|
4936
5207
|
supportsPromptCache: true,
|
|
4937
5208
|
supportsNativeTools: true,
|
|
5209
|
+
defaultToolProtocol: "native",
|
|
4938
5210
|
inputPrice: 0.6,
|
|
4939
5211
|
outputPrice: 2.2,
|
|
4940
5212
|
cacheWritesPrice: 0,
|
|
@@ -4947,6 +5219,7 @@ var internationalZAiModels = {
|
|
|
4947
5219
|
supportsImages: false,
|
|
4948
5220
|
supportsPromptCache: false,
|
|
4949
5221
|
supportsNativeTools: true,
|
|
5222
|
+
defaultToolProtocol: "native",
|
|
4950
5223
|
inputPrice: 0.1,
|
|
4951
5224
|
outputPrice: 0.1,
|
|
4952
5225
|
cacheWritesPrice: 0,
|
|
@@ -4962,6 +5235,7 @@ var mainlandZAiModels = {
|
|
|
4962
5235
|
supportsImages: false,
|
|
4963
5236
|
supportsPromptCache: true,
|
|
4964
5237
|
supportsNativeTools: true,
|
|
5238
|
+
defaultToolProtocol: "native",
|
|
4965
5239
|
inputPrice: 0.29,
|
|
4966
5240
|
outputPrice: 1.14,
|
|
4967
5241
|
cacheWritesPrice: 0,
|
|
@@ -4974,6 +5248,7 @@ var mainlandZAiModels = {
|
|
|
4974
5248
|
supportsImages: false,
|
|
4975
5249
|
supportsPromptCache: true,
|
|
4976
5250
|
supportsNativeTools: true,
|
|
5251
|
+
defaultToolProtocol: "native",
|
|
4977
5252
|
inputPrice: 0.1,
|
|
4978
5253
|
outputPrice: 0.6,
|
|
4979
5254
|
cacheWritesPrice: 0,
|
|
@@ -4986,6 +5261,7 @@ var mainlandZAiModels = {
|
|
|
4986
5261
|
supportsImages: false,
|
|
4987
5262
|
supportsPromptCache: true,
|
|
4988
5263
|
supportsNativeTools: true,
|
|
5264
|
+
defaultToolProtocol: "native",
|
|
4989
5265
|
inputPrice: 0.29,
|
|
4990
5266
|
outputPrice: 1.14,
|
|
4991
5267
|
cacheWritesPrice: 0,
|
|
@@ -4998,6 +5274,7 @@ var mainlandZAiModels = {
|
|
|
4998
5274
|
supportsImages: false,
|
|
4999
5275
|
supportsPromptCache: true,
|
|
5000
5276
|
supportsNativeTools: true,
|
|
5277
|
+
defaultToolProtocol: "native",
|
|
5001
5278
|
inputPrice: 0.1,
|
|
5002
5279
|
outputPrice: 0.6,
|
|
5003
5280
|
cacheWritesPrice: 0,
|
|
@@ -5010,6 +5287,7 @@ var mainlandZAiModels = {
|
|
|
5010
5287
|
supportsImages: false,
|
|
5011
5288
|
supportsPromptCache: true,
|
|
5012
5289
|
supportsNativeTools: true,
|
|
5290
|
+
defaultToolProtocol: "native",
|
|
5013
5291
|
inputPrice: 0,
|
|
5014
5292
|
outputPrice: 0,
|
|
5015
5293
|
cacheWritesPrice: 0,
|
|
@@ -5022,6 +5300,7 @@ var mainlandZAiModels = {
|
|
|
5022
5300
|
supportsImages: true,
|
|
5023
5301
|
supportsPromptCache: true,
|
|
5024
5302
|
supportsNativeTools: true,
|
|
5303
|
+
defaultToolProtocol: "native",
|
|
5025
5304
|
inputPrice: 0.29,
|
|
5026
5305
|
outputPrice: 0.93,
|
|
5027
5306
|
cacheWritesPrice: 0,
|
|
@@ -5034,6 +5313,7 @@ var mainlandZAiModels = {
|
|
|
5034
5313
|
supportsImages: false,
|
|
5035
5314
|
supportsPromptCache: true,
|
|
5036
5315
|
supportsNativeTools: true,
|
|
5316
|
+
defaultToolProtocol: "native",
|
|
5037
5317
|
inputPrice: 0.29,
|
|
5038
5318
|
outputPrice: 1.14,
|
|
5039
5319
|
cacheWritesPrice: 0,
|
|
@@ -5087,6 +5367,7 @@ var minimaxModels = {
|
|
|
5087
5367
|
supportsImages: false,
|
|
5088
5368
|
supportsPromptCache: true,
|
|
5089
5369
|
supportsNativeTools: true,
|
|
5370
|
+
defaultToolProtocol: "native",
|
|
5090
5371
|
preserveReasoning: true,
|
|
5091
5372
|
inputPrice: 0.3,
|
|
5092
5373
|
outputPrice: 1.2,
|
|
@@ -5100,6 +5381,7 @@ var minimaxModels = {
|
|
|
5100
5381
|
supportsImages: false,
|
|
5101
5382
|
supportsPromptCache: true,
|
|
5102
5383
|
supportsNativeTools: true,
|
|
5384
|
+
defaultToolProtocol: "native",
|
|
5103
5385
|
preserveReasoning: true,
|
|
5104
5386
|
inputPrice: 0.3,
|
|
5105
5387
|
outputPrice: 1.2,
|
|
@@ -5282,10 +5564,7 @@ var anthropicSchema = apiModelIdProviderModelSchema.extend({
|
|
|
5282
5564
|
anthropicBeta1MContext: import_zod8.z.boolean().optional()
|
|
5283
5565
|
// Enable 'context-1m-2025-08-07' beta for 1M context window.
|
|
5284
5566
|
});
|
|
5285
|
-
var claudeCodeSchema = apiModelIdProviderModelSchema.extend({
|
|
5286
|
-
claudeCodePath: import_zod8.z.string().optional(),
|
|
5287
|
-
claudeCodeMaxOutputTokens: import_zod8.z.number().int().min(1).max(2e5).optional()
|
|
5288
|
-
});
|
|
5567
|
+
var claudeCodeSchema = apiModelIdProviderModelSchema.extend({});
|
|
5289
5568
|
var openRouterSchema = baseProviderSettingsSchema.extend({
|
|
5290
5569
|
openRouterApiKey: import_zod8.z.string().optional(),
|
|
5291
5570
|
openRouterModelId: import_zod8.z.string().optional(),
|
|
@@ -5310,8 +5589,10 @@ var bedrockSchema = apiModelIdProviderModelSchema.extend({
|
|
|
5310
5589
|
awsModelContextWindow: import_zod8.z.number().optional(),
|
|
5311
5590
|
awsBedrockEndpointEnabled: import_zod8.z.boolean().optional(),
|
|
5312
5591
|
awsBedrockEndpoint: import_zod8.z.string().optional(),
|
|
5313
|
-
awsBedrock1MContext: import_zod8.z.boolean().optional()
|
|
5592
|
+
awsBedrock1MContext: import_zod8.z.boolean().optional(),
|
|
5314
5593
|
// Enable 'context-1m-2025-08-07' beta for 1M context window.
|
|
5594
|
+
awsBedrockServiceTier: import_zod8.z.enum(["STANDARD", "FLEX", "PRIORITY"]).optional()
|
|
5595
|
+
// AWS Bedrock service tier selection
|
|
5315
5596
|
});
|
|
5316
5597
|
var vertexSchema = apiModelIdProviderModelSchema.extend({
|
|
5317
5598
|
vertexKeyFile: import_zod8.z.string().optional(),
|
|
@@ -5958,11 +6239,44 @@ function getErrorStatusCode(error) {
|
|
|
5958
6239
|
}
|
|
5959
6240
|
return void 0;
|
|
5960
6241
|
}
|
|
6242
|
+
function extractMessageFromJsonPayload(message) {
|
|
6243
|
+
const jsonStartIndex = message.indexOf("{");
|
|
6244
|
+
if (jsonStartIndex === -1) {
|
|
6245
|
+
return void 0;
|
|
6246
|
+
}
|
|
6247
|
+
const potentialJson = message.slice(jsonStartIndex);
|
|
6248
|
+
try {
|
|
6249
|
+
const parsed = JSON.parse(potentialJson);
|
|
6250
|
+
if (parsed?.error?.message && typeof parsed.error.message === "string") {
|
|
6251
|
+
return parsed.error.message;
|
|
6252
|
+
}
|
|
6253
|
+
if (parsed?.message && typeof parsed.message === "string") {
|
|
6254
|
+
return parsed.message;
|
|
6255
|
+
}
|
|
6256
|
+
} catch {
|
|
6257
|
+
}
|
|
6258
|
+
return void 0;
|
|
6259
|
+
}
|
|
5961
6260
|
function getErrorMessage(error) {
|
|
6261
|
+
let message;
|
|
5962
6262
|
if (isOpenAISdkError(error)) {
|
|
5963
|
-
|
|
6263
|
+
message = error.error?.metadata?.raw || error.error?.message || error.message;
|
|
6264
|
+
} else if (error instanceof Error) {
|
|
6265
|
+
message = error.message;
|
|
6266
|
+
} else if (typeof error === "object" && error !== null && "message" in error) {
|
|
6267
|
+
const msgValue = error.message;
|
|
6268
|
+
if (typeof msgValue === "string") {
|
|
6269
|
+
message = msgValue;
|
|
6270
|
+
}
|
|
5964
6271
|
}
|
|
5965
|
-
|
|
6272
|
+
if (!message) {
|
|
6273
|
+
return void 0;
|
|
6274
|
+
}
|
|
6275
|
+
const extractedMessage = extractMessageFromJsonPayload(message);
|
|
6276
|
+
if (extractedMessage) {
|
|
6277
|
+
return extractedMessage;
|
|
6278
|
+
}
|
|
6279
|
+
return message;
|
|
5966
6280
|
}
|
|
5967
6281
|
function shouldReportApiErrorToTelemetry(errorCode, errorMessage) {
|
|
5968
6282
|
if (errorCode !== void 0 && EXPECTED_API_ERROR_CODES.has(errorCode)) {
|
|
@@ -6191,7 +6505,6 @@ var globalSettingsSchema = import_zod14.z.object({
|
|
|
6191
6505
|
alwaysAllowWriteProtected: import_zod14.z.boolean().optional(),
|
|
6192
6506
|
writeDelayMs: import_zod14.z.number().min(0).optional(),
|
|
6193
6507
|
alwaysAllowBrowser: import_zod14.z.boolean().optional(),
|
|
6194
|
-
alwaysApproveResubmit: import_zod14.z.boolean().optional(),
|
|
6195
6508
|
requestDelaySeconds: import_zod14.z.number().optional(),
|
|
6196
6509
|
alwaysAllowMcp: import_zod14.z.boolean().optional(),
|
|
6197
6510
|
alwaysAllowModeSwitch: import_zod14.z.boolean().optional(),
|
|
@@ -6199,7 +6512,6 @@ var globalSettingsSchema = import_zod14.z.object({
|
|
|
6199
6512
|
alwaysAllowExecute: import_zod14.z.boolean().optional(),
|
|
6200
6513
|
alwaysAllowFollowupQuestions: import_zod14.z.boolean().optional(),
|
|
6201
6514
|
followupAutoApproveTimeoutMs: import_zod14.z.number().optional(),
|
|
6202
|
-
alwaysAllowUpdateTodoList: import_zod14.z.boolean().optional(),
|
|
6203
6515
|
allowedCommands: import_zod14.z.array(import_zod14.z.string()).optional(),
|
|
6204
6516
|
deniedCommands: import_zod14.z.array(import_zod14.z.string()).optional(),
|
|
6205
6517
|
commandExecutionTimeout: import_zod14.z.number().optional(),
|
|
@@ -6361,14 +6673,12 @@ var EVALS_SETTINGS = {
|
|
|
6361
6673
|
alwaysAllowWriteProtected: false,
|
|
6362
6674
|
writeDelayMs: 1e3,
|
|
6363
6675
|
alwaysAllowBrowser: true,
|
|
6364
|
-
alwaysApproveResubmit: true,
|
|
6365
6676
|
requestDelaySeconds: 10,
|
|
6366
6677
|
alwaysAllowMcp: true,
|
|
6367
6678
|
alwaysAllowModeSwitch: true,
|
|
6368
6679
|
alwaysAllowSubtasks: true,
|
|
6369
6680
|
alwaysAllowExecute: true,
|
|
6370
6681
|
alwaysAllowFollowupQuestions: true,
|
|
6371
|
-
alwaysAllowUpdateTodoList: true,
|
|
6372
6682
|
followupAutoApproveTimeoutMs: 0,
|
|
6373
6683
|
allowedCommands: ["*"],
|
|
6374
6684
|
commandExecutionTimeout: 20,
|
|
@@ -6498,10 +6808,11 @@ var organizationDefaultSettingsSchema = globalSettingsSchema.pick({
|
|
|
6498
6808
|
terminalShellIntegrationTimeout: import_zod16.z.number().int().nonnegative().optional()
|
|
6499
6809
|
})
|
|
6500
6810
|
);
|
|
6501
|
-
var workspaceTaskVisibilitySchema = import_zod16.z.enum(["all", "list-only", "full-lockdown"]);
|
|
6811
|
+
var workspaceTaskVisibilitySchema = import_zod16.z.enum(["all", "list-only", "admins-and-creator", "creator", "full-lockdown"]);
|
|
6502
6812
|
var organizationCloudSettingsSchema = import_zod16.z.object({
|
|
6503
6813
|
recordTaskMessages: import_zod16.z.boolean().optional(),
|
|
6504
6814
|
enableTaskSharing: import_zod16.z.boolean().optional(),
|
|
6815
|
+
allowPublicTaskSharing: import_zod16.z.boolean().optional(),
|
|
6505
6816
|
taskShareExpirationDays: import_zod16.z.number().int().positive().optional(),
|
|
6506
6817
|
allowMembersViewAllTasks: import_zod16.z.boolean().optional(),
|
|
6507
6818
|
workspaceTaskVisibility: workspaceTaskVisibilitySchema.optional()
|
|
@@ -6541,6 +6852,7 @@ var ORGANIZATION_DEFAULT = {
|
|
|
6541
6852
|
cloudSettings: {
|
|
6542
6853
|
recordTaskMessages: true,
|
|
6543
6854
|
enableTaskSharing: true,
|
|
6855
|
+
allowPublicTaskSharing: true,
|
|
6544
6856
|
taskShareExpirationDays: 30,
|
|
6545
6857
|
allowMembersViewAllTasks: true
|
|
6546
6858
|
},
|
|
@@ -7077,7 +7389,8 @@ var commandExecutionStatusSchema = import_zod21.z.discriminatedUnion("status", [
|
|
|
7077
7389
|
BEDROCK_GLOBAL_INFERENCE_MODEL_IDS,
|
|
7078
7390
|
BEDROCK_MAX_TOKENS,
|
|
7079
7391
|
BEDROCK_REGIONS,
|
|
7080
|
-
|
|
7392
|
+
BEDROCK_SERVICE_TIER_MODEL_IDS,
|
|
7393
|
+
BEDROCK_SERVICE_TIER_PRICING,
|
|
7081
7394
|
CODEBASE_INDEX_DEFAULTS,
|
|
7082
7395
|
CONSENT_COOKIE_NAME,
|
|
7083
7396
|
CONTEXT_MANAGEMENT_EVENTS,
|
|
@@ -7167,6 +7480,7 @@ var commandExecutionStatusSchema = import_zod21.z.discriminatedUnion("status", [
|
|
|
7167
7480
|
chutesModels,
|
|
7168
7481
|
claudeCodeDefaultModelId,
|
|
7169
7482
|
claudeCodeModels,
|
|
7483
|
+
claudeCodeReasoningConfig,
|
|
7170
7484
|
clineAskSchema,
|
|
7171
7485
|
clineAsks,
|
|
7172
7486
|
clineMessageSchema,
|
|
@@ -7181,7 +7495,6 @@ var commandExecutionStatusSchema = import_zod21.z.discriminatedUnion("status", [
|
|
|
7181
7495
|
commandIds,
|
|
7182
7496
|
contextCondenseSchema,
|
|
7183
7497
|
contextTruncationSchema,
|
|
7184
|
-
convertModelNameForVertex,
|
|
7185
7498
|
customModePromptsSchema,
|
|
7186
7499
|
customModesSettingsSchema,
|
|
7187
7500
|
customProviders,
|
|
@@ -7203,6 +7516,7 @@ var commandExecutionStatusSchema = import_zod21.z.discriminatedUnion("status", [
|
|
|
7203
7516
|
extensionBridgeEventSchema,
|
|
7204
7517
|
extensionInstanceSchema,
|
|
7205
7518
|
extractApiProviderErrorProperties,
|
|
7519
|
+
extractMessageFromJsonPayload,
|
|
7206
7520
|
fauxProviders,
|
|
7207
7521
|
featherlessDefaultModelId,
|
|
7208
7522
|
featherlessModels,
|
|
@@ -7212,7 +7526,6 @@ var commandExecutionStatusSchema = import_zod21.z.discriminatedUnion("status", [
|
|
|
7212
7526
|
geminiDefaultModelId,
|
|
7213
7527
|
geminiModels,
|
|
7214
7528
|
getApiProtocol,
|
|
7215
|
-
getClaudeCodeModelId,
|
|
7216
7529
|
getEffectiveProtocol,
|
|
7217
7530
|
getErrorMessage,
|
|
7218
7531
|
getErrorStatusCode,
|
|
@@ -7284,6 +7597,7 @@ var commandExecutionStatusSchema = import_zod21.z.discriminatedUnion("status", [
|
|
|
7284
7597
|
moonshotDefaultModelId,
|
|
7285
7598
|
moonshotModels,
|
|
7286
7599
|
nonBlockingAsks,
|
|
7600
|
+
normalizeClaudeCodeModelId,
|
|
7287
7601
|
ollamaDefaultModelId,
|
|
7288
7602
|
ollamaDefaultModelInfo,
|
|
7289
7603
|
openAiModelInfoSaneDefaults,
|