@mariozechner/pi-ai 0.49.2 → 0.50.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +32 -22
- package/dist/api-registry.d.ts +20 -0
- package/dist/api-registry.d.ts.map +1 -0
- package/dist/api-registry.js +44 -0
- package/dist/api-registry.js.map +1 -0
- package/dist/cli.d.ts.map +1 -1
- package/dist/cli.js +22 -67
- package/dist/cli.js.map +1 -1
- package/dist/env-api-keys.d.ts +9 -0
- package/dist/env-api-keys.d.ts.map +1 -0
- package/dist/env-api-keys.js +91 -0
- package/dist/env-api-keys.js.map +1 -0
- package/dist/index.d.ts +4 -0
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +4 -0
- package/dist/index.js.map +1 -1
- package/dist/models.generated.d.ts +649 -126
- package/dist/models.generated.d.ts.map +1 -1
- package/dist/models.generated.js +679 -151
- package/dist/models.generated.js.map +1 -1
- package/dist/providers/amazon-bedrock.d.ts +3 -2
- package/dist/providers/amazon-bedrock.d.ts.map +1 -1
- package/dist/providers/amazon-bedrock.js +52 -5
- package/dist/providers/amazon-bedrock.js.map +1 -1
- package/dist/providers/anthropic.d.ts +3 -2
- package/dist/providers/anthropic.d.ts.map +1 -1
- package/dist/providers/anthropic.js +35 -10
- package/dist/providers/anthropic.js.map +1 -1
- package/dist/providers/azure-openai-responses.d.ts +15 -0
- package/dist/providers/azure-openai-responses.d.ts.map +1 -0
- package/dist/providers/azure-openai-responses.js +184 -0
- package/dist/providers/azure-openai-responses.js.map +1 -0
- package/dist/providers/google-gemini-cli.d.ts +3 -2
- package/dist/providers/google-gemini-cli.d.ts.map +1 -1
- package/dist/providers/google-gemini-cli.js +69 -1
- package/dist/providers/google-gemini-cli.js.map +1 -1
- package/dist/providers/google-vertex.d.ts +3 -2
- package/dist/providers/google-vertex.d.ts.map +1 -1
- package/dist/providers/google-vertex.js +85 -5
- package/dist/providers/google-vertex.js.map +1 -1
- package/dist/providers/google.d.ts +3 -2
- package/dist/providers/google.d.ts.map +1 -1
- package/dist/providers/google.js +88 -7
- package/dist/providers/google.js.map +1 -1
- package/dist/providers/openai-codex-responses.d.ts +3 -2
- package/dist/providers/openai-codex-responses.d.ts.map +1 -1
- package/dist/providers/openai-codex-responses.js +71 -311
- package/dist/providers/openai-codex-responses.js.map +1 -1
- package/dist/providers/openai-completions.d.ts +5 -2
- package/dist/providers/openai-completions.d.ts.map +1 -1
- package/dist/providers/openai-completions.js +84 -43
- package/dist/providers/openai-completions.js.map +1 -1
- package/dist/providers/openai-responses-shared.d.ts +17 -0
- package/dist/providers/openai-responses-shared.d.ts.map +1 -0
- package/dist/providers/openai-responses-shared.js +424 -0
- package/dist/providers/openai-responses-shared.js.map +1 -0
- package/dist/providers/openai-responses.d.ts +3 -2
- package/dist/providers/openai-responses.d.ts.map +1 -1
- package/dist/providers/openai-responses.js +31 -402
- package/dist/providers/openai-responses.js.map +1 -1
- package/dist/providers/register-builtins.d.ts +3 -0
- package/dist/providers/register-builtins.d.ts.map +1 -0
- package/dist/providers/register-builtins.js +63 -0
- package/dist/providers/register-builtins.js.map +1 -0
- package/dist/providers/simple-options.d.ts +8 -0
- package/dist/providers/simple-options.d.ts.map +1 -0
- package/dist/providers/simple-options.js +32 -0
- package/dist/providers/simple-options.js.map +1 -0
- package/dist/stream.d.ts +5 -10
- package/dist/stream.d.ts.map +1 -1
- package/dist/stream.js +21 -404
- package/dist/stream.js.map +1 -1
- package/dist/types.d.ts +24 -22
- package/dist/types.d.ts.map +1 -1
- package/dist/types.js +0 -1
- package/dist/types.js.map +1 -1
- package/dist/utils/event-stream.d.ts +2 -0
- package/dist/utils/event-stream.d.ts.map +1 -1
- package/dist/utils/event-stream.js +4 -0
- package/dist/utils/event-stream.js.map +1 -1
- package/dist/utils/oauth/anthropic.d.ts +2 -1
- package/dist/utils/oauth/anthropic.d.ts.map +1 -1
- package/dist/utils/oauth/anthropic.js +13 -0
- package/dist/utils/oauth/anthropic.js.map +1 -1
- package/dist/utils/oauth/github-copilot.d.ts +2 -1
- package/dist/utils/oauth/github-copilot.d.ts.map +1 -1
- package/dist/utils/oauth/github-copilot.js +25 -0
- package/dist/utils/oauth/github-copilot.js.map +1 -1
- package/dist/utils/oauth/google-antigravity.d.ts +2 -1
- package/dist/utils/oauth/google-antigravity.d.ts.map +1 -1
- package/dist/utils/oauth/google-antigravity.js +19 -0
- package/dist/utils/oauth/google-antigravity.js.map +1 -1
- package/dist/utils/oauth/google-gemini-cli.d.ts +2 -1
- package/dist/utils/oauth/google-gemini-cli.d.ts.map +1 -1
- package/dist/utils/oauth/google-gemini-cli.js +19 -0
- package/dist/utils/oauth/google-gemini-cli.js.map +1 -1
- package/dist/utils/oauth/index.d.ts +26 -16
- package/dist/utils/oauth/index.d.ts.map +1 -1
- package/dist/utils/oauth/index.js +65 -84
- package/dist/utils/oauth/index.js.map +1 -1
- package/dist/utils/oauth/openai-codex.d.ts +7 -1
- package/dist/utils/oauth/openai-codex.d.ts.map +1 -1
- package/dist/utils/oauth/openai-codex.js +46 -8
- package/dist/utils/oauth/openai-codex.js.map +1 -1
- package/dist/utils/oauth/types.d.ts +28 -6
- package/dist/utils/oauth/types.d.ts.map +1 -1
- package/dist/utils/oauth/types.js.map +1 -1
- package/package.json +3 -1
package/dist/models.generated.js
CHANGED
|
@@ -1297,6 +1297,586 @@ export const MODELS = {
|
|
|
1297
1297
|
maxTokens: 64000,
|
|
1298
1298
|
},
|
|
1299
1299
|
},
|
|
1300
|
+
"azure-openai-responses": {
|
|
1301
|
+
"codex-mini-latest": {
|
|
1302
|
+
id: "codex-mini-latest",
|
|
1303
|
+
name: "Codex Mini",
|
|
1304
|
+
api: "azure-openai-responses",
|
|
1305
|
+
provider: "azure-openai-responses",
|
|
1306
|
+
baseUrl: "",
|
|
1307
|
+
reasoning: true,
|
|
1308
|
+
input: ["text"],
|
|
1309
|
+
cost: {
|
|
1310
|
+
input: 1.5,
|
|
1311
|
+
output: 6,
|
|
1312
|
+
cacheRead: 0.375,
|
|
1313
|
+
cacheWrite: 0,
|
|
1314
|
+
},
|
|
1315
|
+
contextWindow: 200000,
|
|
1316
|
+
maxTokens: 100000,
|
|
1317
|
+
},
|
|
1318
|
+
"gpt-4": {
|
|
1319
|
+
id: "gpt-4",
|
|
1320
|
+
name: "GPT-4",
|
|
1321
|
+
api: "azure-openai-responses",
|
|
1322
|
+
provider: "azure-openai-responses",
|
|
1323
|
+
baseUrl: "",
|
|
1324
|
+
reasoning: false,
|
|
1325
|
+
input: ["text"],
|
|
1326
|
+
cost: {
|
|
1327
|
+
input: 30,
|
|
1328
|
+
output: 60,
|
|
1329
|
+
cacheRead: 0,
|
|
1330
|
+
cacheWrite: 0,
|
|
1331
|
+
},
|
|
1332
|
+
contextWindow: 8192,
|
|
1333
|
+
maxTokens: 8192,
|
|
1334
|
+
},
|
|
1335
|
+
"gpt-4-turbo": {
|
|
1336
|
+
id: "gpt-4-turbo",
|
|
1337
|
+
name: "GPT-4 Turbo",
|
|
1338
|
+
api: "azure-openai-responses",
|
|
1339
|
+
provider: "azure-openai-responses",
|
|
1340
|
+
baseUrl: "",
|
|
1341
|
+
reasoning: false,
|
|
1342
|
+
input: ["text", "image"],
|
|
1343
|
+
cost: {
|
|
1344
|
+
input: 10,
|
|
1345
|
+
output: 30,
|
|
1346
|
+
cacheRead: 0,
|
|
1347
|
+
cacheWrite: 0,
|
|
1348
|
+
},
|
|
1349
|
+
contextWindow: 128000,
|
|
1350
|
+
maxTokens: 4096,
|
|
1351
|
+
},
|
|
1352
|
+
"gpt-4.1": {
|
|
1353
|
+
id: "gpt-4.1",
|
|
1354
|
+
name: "GPT-4.1",
|
|
1355
|
+
api: "azure-openai-responses",
|
|
1356
|
+
provider: "azure-openai-responses",
|
|
1357
|
+
baseUrl: "",
|
|
1358
|
+
reasoning: false,
|
|
1359
|
+
input: ["text", "image"],
|
|
1360
|
+
cost: {
|
|
1361
|
+
input: 2,
|
|
1362
|
+
output: 8,
|
|
1363
|
+
cacheRead: 0.5,
|
|
1364
|
+
cacheWrite: 0,
|
|
1365
|
+
},
|
|
1366
|
+
contextWindow: 1047576,
|
|
1367
|
+
maxTokens: 32768,
|
|
1368
|
+
},
|
|
1369
|
+
"gpt-4.1-mini": {
|
|
1370
|
+
id: "gpt-4.1-mini",
|
|
1371
|
+
name: "GPT-4.1 mini",
|
|
1372
|
+
api: "azure-openai-responses",
|
|
1373
|
+
provider: "azure-openai-responses",
|
|
1374
|
+
baseUrl: "",
|
|
1375
|
+
reasoning: false,
|
|
1376
|
+
input: ["text", "image"],
|
|
1377
|
+
cost: {
|
|
1378
|
+
input: 0.4,
|
|
1379
|
+
output: 1.6,
|
|
1380
|
+
cacheRead: 0.1,
|
|
1381
|
+
cacheWrite: 0,
|
|
1382
|
+
},
|
|
1383
|
+
contextWindow: 1047576,
|
|
1384
|
+
maxTokens: 32768,
|
|
1385
|
+
},
|
|
1386
|
+
"gpt-4.1-nano": {
|
|
1387
|
+
id: "gpt-4.1-nano",
|
|
1388
|
+
name: "GPT-4.1 nano",
|
|
1389
|
+
api: "azure-openai-responses",
|
|
1390
|
+
provider: "azure-openai-responses",
|
|
1391
|
+
baseUrl: "",
|
|
1392
|
+
reasoning: false,
|
|
1393
|
+
input: ["text", "image"],
|
|
1394
|
+
cost: {
|
|
1395
|
+
input: 0.1,
|
|
1396
|
+
output: 0.4,
|
|
1397
|
+
cacheRead: 0.03,
|
|
1398
|
+
cacheWrite: 0,
|
|
1399
|
+
},
|
|
1400
|
+
contextWindow: 1047576,
|
|
1401
|
+
maxTokens: 32768,
|
|
1402
|
+
},
|
|
1403
|
+
"gpt-4o": {
|
|
1404
|
+
id: "gpt-4o",
|
|
1405
|
+
name: "GPT-4o",
|
|
1406
|
+
api: "azure-openai-responses",
|
|
1407
|
+
provider: "azure-openai-responses",
|
|
1408
|
+
baseUrl: "",
|
|
1409
|
+
reasoning: false,
|
|
1410
|
+
input: ["text", "image"],
|
|
1411
|
+
cost: {
|
|
1412
|
+
input: 2.5,
|
|
1413
|
+
output: 10,
|
|
1414
|
+
cacheRead: 1.25,
|
|
1415
|
+
cacheWrite: 0,
|
|
1416
|
+
},
|
|
1417
|
+
contextWindow: 128000,
|
|
1418
|
+
maxTokens: 16384,
|
|
1419
|
+
},
|
|
1420
|
+
"gpt-4o-2024-05-13": {
|
|
1421
|
+
id: "gpt-4o-2024-05-13",
|
|
1422
|
+
name: "GPT-4o (2024-05-13)",
|
|
1423
|
+
api: "azure-openai-responses",
|
|
1424
|
+
provider: "azure-openai-responses",
|
|
1425
|
+
baseUrl: "",
|
|
1426
|
+
reasoning: false,
|
|
1427
|
+
input: ["text", "image"],
|
|
1428
|
+
cost: {
|
|
1429
|
+
input: 5,
|
|
1430
|
+
output: 15,
|
|
1431
|
+
cacheRead: 0,
|
|
1432
|
+
cacheWrite: 0,
|
|
1433
|
+
},
|
|
1434
|
+
contextWindow: 128000,
|
|
1435
|
+
maxTokens: 4096,
|
|
1436
|
+
},
|
|
1437
|
+
"gpt-4o-2024-08-06": {
|
|
1438
|
+
id: "gpt-4o-2024-08-06",
|
|
1439
|
+
name: "GPT-4o (2024-08-06)",
|
|
1440
|
+
api: "azure-openai-responses",
|
|
1441
|
+
provider: "azure-openai-responses",
|
|
1442
|
+
baseUrl: "",
|
|
1443
|
+
reasoning: false,
|
|
1444
|
+
input: ["text", "image"],
|
|
1445
|
+
cost: {
|
|
1446
|
+
input: 2.5,
|
|
1447
|
+
output: 10,
|
|
1448
|
+
cacheRead: 1.25,
|
|
1449
|
+
cacheWrite: 0,
|
|
1450
|
+
},
|
|
1451
|
+
contextWindow: 128000,
|
|
1452
|
+
maxTokens: 16384,
|
|
1453
|
+
},
|
|
1454
|
+
"gpt-4o-2024-11-20": {
|
|
1455
|
+
id: "gpt-4o-2024-11-20",
|
|
1456
|
+
name: "GPT-4o (2024-11-20)",
|
|
1457
|
+
api: "azure-openai-responses",
|
|
1458
|
+
provider: "azure-openai-responses",
|
|
1459
|
+
baseUrl: "",
|
|
1460
|
+
reasoning: false,
|
|
1461
|
+
input: ["text", "image"],
|
|
1462
|
+
cost: {
|
|
1463
|
+
input: 2.5,
|
|
1464
|
+
output: 10,
|
|
1465
|
+
cacheRead: 1.25,
|
|
1466
|
+
cacheWrite: 0,
|
|
1467
|
+
},
|
|
1468
|
+
contextWindow: 128000,
|
|
1469
|
+
maxTokens: 16384,
|
|
1470
|
+
},
|
|
1471
|
+
"gpt-4o-mini": {
|
|
1472
|
+
id: "gpt-4o-mini",
|
|
1473
|
+
name: "GPT-4o mini",
|
|
1474
|
+
api: "azure-openai-responses",
|
|
1475
|
+
provider: "azure-openai-responses",
|
|
1476
|
+
baseUrl: "",
|
|
1477
|
+
reasoning: false,
|
|
1478
|
+
input: ["text", "image"],
|
|
1479
|
+
cost: {
|
|
1480
|
+
input: 0.15,
|
|
1481
|
+
output: 0.6,
|
|
1482
|
+
cacheRead: 0.08,
|
|
1483
|
+
cacheWrite: 0,
|
|
1484
|
+
},
|
|
1485
|
+
contextWindow: 128000,
|
|
1486
|
+
maxTokens: 16384,
|
|
1487
|
+
},
|
|
1488
|
+
"gpt-5": {
|
|
1489
|
+
id: "gpt-5",
|
|
1490
|
+
name: "GPT-5",
|
|
1491
|
+
api: "azure-openai-responses",
|
|
1492
|
+
provider: "azure-openai-responses",
|
|
1493
|
+
baseUrl: "",
|
|
1494
|
+
reasoning: true,
|
|
1495
|
+
input: ["text", "image"],
|
|
1496
|
+
cost: {
|
|
1497
|
+
input: 1.25,
|
|
1498
|
+
output: 10,
|
|
1499
|
+
cacheRead: 0.125,
|
|
1500
|
+
cacheWrite: 0,
|
|
1501
|
+
},
|
|
1502
|
+
contextWindow: 400000,
|
|
1503
|
+
maxTokens: 128000,
|
|
1504
|
+
},
|
|
1505
|
+
"gpt-5-chat-latest": {
|
|
1506
|
+
id: "gpt-5-chat-latest",
|
|
1507
|
+
name: "GPT-5 Chat Latest",
|
|
1508
|
+
api: "azure-openai-responses",
|
|
1509
|
+
provider: "azure-openai-responses",
|
|
1510
|
+
baseUrl: "",
|
|
1511
|
+
reasoning: false,
|
|
1512
|
+
input: ["text", "image"],
|
|
1513
|
+
cost: {
|
|
1514
|
+
input: 1.25,
|
|
1515
|
+
output: 10,
|
|
1516
|
+
cacheRead: 0.125,
|
|
1517
|
+
cacheWrite: 0,
|
|
1518
|
+
},
|
|
1519
|
+
contextWindow: 128000,
|
|
1520
|
+
maxTokens: 16384,
|
|
1521
|
+
},
|
|
1522
|
+
"gpt-5-codex": {
|
|
1523
|
+
id: "gpt-5-codex",
|
|
1524
|
+
name: "GPT-5-Codex",
|
|
1525
|
+
api: "azure-openai-responses",
|
|
1526
|
+
provider: "azure-openai-responses",
|
|
1527
|
+
baseUrl: "",
|
|
1528
|
+
reasoning: true,
|
|
1529
|
+
input: ["text", "image"],
|
|
1530
|
+
cost: {
|
|
1531
|
+
input: 1.25,
|
|
1532
|
+
output: 10,
|
|
1533
|
+
cacheRead: 0.125,
|
|
1534
|
+
cacheWrite: 0,
|
|
1535
|
+
},
|
|
1536
|
+
contextWindow: 400000,
|
|
1537
|
+
maxTokens: 128000,
|
|
1538
|
+
},
|
|
1539
|
+
"gpt-5-mini": {
|
|
1540
|
+
id: "gpt-5-mini",
|
|
1541
|
+
name: "GPT-5 Mini",
|
|
1542
|
+
api: "azure-openai-responses",
|
|
1543
|
+
provider: "azure-openai-responses",
|
|
1544
|
+
baseUrl: "",
|
|
1545
|
+
reasoning: true,
|
|
1546
|
+
input: ["text", "image"],
|
|
1547
|
+
cost: {
|
|
1548
|
+
input: 0.25,
|
|
1549
|
+
output: 2,
|
|
1550
|
+
cacheRead: 0.025,
|
|
1551
|
+
cacheWrite: 0,
|
|
1552
|
+
},
|
|
1553
|
+
contextWindow: 400000,
|
|
1554
|
+
maxTokens: 128000,
|
|
1555
|
+
},
|
|
1556
|
+
"gpt-5-nano": {
|
|
1557
|
+
id: "gpt-5-nano",
|
|
1558
|
+
name: "GPT-5 Nano",
|
|
1559
|
+
api: "azure-openai-responses",
|
|
1560
|
+
provider: "azure-openai-responses",
|
|
1561
|
+
baseUrl: "",
|
|
1562
|
+
reasoning: true,
|
|
1563
|
+
input: ["text", "image"],
|
|
1564
|
+
cost: {
|
|
1565
|
+
input: 0.05,
|
|
1566
|
+
output: 0.4,
|
|
1567
|
+
cacheRead: 0.005,
|
|
1568
|
+
cacheWrite: 0,
|
|
1569
|
+
},
|
|
1570
|
+
contextWindow: 400000,
|
|
1571
|
+
maxTokens: 128000,
|
|
1572
|
+
},
|
|
1573
|
+
"gpt-5-pro": {
|
|
1574
|
+
id: "gpt-5-pro",
|
|
1575
|
+
name: "GPT-5 Pro",
|
|
1576
|
+
api: "azure-openai-responses",
|
|
1577
|
+
provider: "azure-openai-responses",
|
|
1578
|
+
baseUrl: "",
|
|
1579
|
+
reasoning: true,
|
|
1580
|
+
input: ["text", "image"],
|
|
1581
|
+
cost: {
|
|
1582
|
+
input: 15,
|
|
1583
|
+
output: 120,
|
|
1584
|
+
cacheRead: 0,
|
|
1585
|
+
cacheWrite: 0,
|
|
1586
|
+
},
|
|
1587
|
+
contextWindow: 400000,
|
|
1588
|
+
maxTokens: 272000,
|
|
1589
|
+
},
|
|
1590
|
+
"gpt-5.1": {
|
|
1591
|
+
id: "gpt-5.1",
|
|
1592
|
+
name: "GPT-5.1",
|
|
1593
|
+
api: "azure-openai-responses",
|
|
1594
|
+
provider: "azure-openai-responses",
|
|
1595
|
+
baseUrl: "",
|
|
1596
|
+
reasoning: true,
|
|
1597
|
+
input: ["text", "image"],
|
|
1598
|
+
cost: {
|
|
1599
|
+
input: 1.25,
|
|
1600
|
+
output: 10,
|
|
1601
|
+
cacheRead: 0.13,
|
|
1602
|
+
cacheWrite: 0,
|
|
1603
|
+
},
|
|
1604
|
+
contextWindow: 400000,
|
|
1605
|
+
maxTokens: 128000,
|
|
1606
|
+
},
|
|
1607
|
+
"gpt-5.1-chat-latest": {
|
|
1608
|
+
id: "gpt-5.1-chat-latest",
|
|
1609
|
+
name: "GPT-5.1 Chat",
|
|
1610
|
+
api: "azure-openai-responses",
|
|
1611
|
+
provider: "azure-openai-responses",
|
|
1612
|
+
baseUrl: "",
|
|
1613
|
+
reasoning: true,
|
|
1614
|
+
input: ["text", "image"],
|
|
1615
|
+
cost: {
|
|
1616
|
+
input: 1.25,
|
|
1617
|
+
output: 10,
|
|
1618
|
+
cacheRead: 0.125,
|
|
1619
|
+
cacheWrite: 0,
|
|
1620
|
+
},
|
|
1621
|
+
contextWindow: 128000,
|
|
1622
|
+
maxTokens: 16384,
|
|
1623
|
+
},
|
|
1624
|
+
"gpt-5.1-codex": {
|
|
1625
|
+
id: "gpt-5.1-codex",
|
|
1626
|
+
name: "GPT-5.1 Codex",
|
|
1627
|
+
api: "azure-openai-responses",
|
|
1628
|
+
provider: "azure-openai-responses",
|
|
1629
|
+
baseUrl: "",
|
|
1630
|
+
reasoning: true,
|
|
1631
|
+
input: ["text", "image"],
|
|
1632
|
+
cost: {
|
|
1633
|
+
input: 1.25,
|
|
1634
|
+
output: 10,
|
|
1635
|
+
cacheRead: 0.125,
|
|
1636
|
+
cacheWrite: 0,
|
|
1637
|
+
},
|
|
1638
|
+
contextWindow: 400000,
|
|
1639
|
+
maxTokens: 128000,
|
|
1640
|
+
},
|
|
1641
|
+
"gpt-5.1-codex-max": {
|
|
1642
|
+
id: "gpt-5.1-codex-max",
|
|
1643
|
+
name: "GPT-5.1 Codex Max",
|
|
1644
|
+
api: "azure-openai-responses",
|
|
1645
|
+
provider: "azure-openai-responses",
|
|
1646
|
+
baseUrl: "",
|
|
1647
|
+
reasoning: true,
|
|
1648
|
+
input: ["text", "image"],
|
|
1649
|
+
cost: {
|
|
1650
|
+
input: 1.25,
|
|
1651
|
+
output: 10,
|
|
1652
|
+
cacheRead: 0.125,
|
|
1653
|
+
cacheWrite: 0,
|
|
1654
|
+
},
|
|
1655
|
+
contextWindow: 400000,
|
|
1656
|
+
maxTokens: 128000,
|
|
1657
|
+
},
|
|
1658
|
+
"gpt-5.1-codex-mini": {
|
|
1659
|
+
id: "gpt-5.1-codex-mini",
|
|
1660
|
+
name: "GPT-5.1 Codex mini",
|
|
1661
|
+
api: "azure-openai-responses",
|
|
1662
|
+
provider: "azure-openai-responses",
|
|
1663
|
+
baseUrl: "",
|
|
1664
|
+
reasoning: true,
|
|
1665
|
+
input: ["text", "image"],
|
|
1666
|
+
cost: {
|
|
1667
|
+
input: 0.25,
|
|
1668
|
+
output: 2,
|
|
1669
|
+
cacheRead: 0.025,
|
|
1670
|
+
cacheWrite: 0,
|
|
1671
|
+
},
|
|
1672
|
+
contextWindow: 400000,
|
|
1673
|
+
maxTokens: 128000,
|
|
1674
|
+
},
|
|
1675
|
+
"gpt-5.2": {
|
|
1676
|
+
id: "gpt-5.2",
|
|
1677
|
+
name: "GPT-5.2",
|
|
1678
|
+
api: "azure-openai-responses",
|
|
1679
|
+
provider: "azure-openai-responses",
|
|
1680
|
+
baseUrl: "",
|
|
1681
|
+
reasoning: true,
|
|
1682
|
+
input: ["text", "image"],
|
|
1683
|
+
cost: {
|
|
1684
|
+
input: 1.75,
|
|
1685
|
+
output: 14,
|
|
1686
|
+
cacheRead: 0.175,
|
|
1687
|
+
cacheWrite: 0,
|
|
1688
|
+
},
|
|
1689
|
+
contextWindow: 400000,
|
|
1690
|
+
maxTokens: 128000,
|
|
1691
|
+
},
|
|
1692
|
+
"gpt-5.2-chat-latest": {
|
|
1693
|
+
id: "gpt-5.2-chat-latest",
|
|
1694
|
+
name: "GPT-5.2 Chat",
|
|
1695
|
+
api: "azure-openai-responses",
|
|
1696
|
+
provider: "azure-openai-responses",
|
|
1697
|
+
baseUrl: "",
|
|
1698
|
+
reasoning: true,
|
|
1699
|
+
input: ["text", "image"],
|
|
1700
|
+
cost: {
|
|
1701
|
+
input: 1.75,
|
|
1702
|
+
output: 14,
|
|
1703
|
+
cacheRead: 0.175,
|
|
1704
|
+
cacheWrite: 0,
|
|
1705
|
+
},
|
|
1706
|
+
contextWindow: 128000,
|
|
1707
|
+
maxTokens: 16384,
|
|
1708
|
+
},
|
|
1709
|
+
"gpt-5.2-codex": {
|
|
1710
|
+
id: "gpt-5.2-codex",
|
|
1711
|
+
name: "GPT-5.2 Codex",
|
|
1712
|
+
api: "azure-openai-responses",
|
|
1713
|
+
provider: "azure-openai-responses",
|
|
1714
|
+
baseUrl: "",
|
|
1715
|
+
reasoning: true,
|
|
1716
|
+
input: ["text", "image"],
|
|
1717
|
+
cost: {
|
|
1718
|
+
input: 1.75,
|
|
1719
|
+
output: 14,
|
|
1720
|
+
cacheRead: 0.175,
|
|
1721
|
+
cacheWrite: 0,
|
|
1722
|
+
},
|
|
1723
|
+
contextWindow: 400000,
|
|
1724
|
+
maxTokens: 128000,
|
|
1725
|
+
},
|
|
1726
|
+
"gpt-5.2-pro": {
|
|
1727
|
+
id: "gpt-5.2-pro",
|
|
1728
|
+
name: "GPT-5.2 Pro",
|
|
1729
|
+
api: "azure-openai-responses",
|
|
1730
|
+
provider: "azure-openai-responses",
|
|
1731
|
+
baseUrl: "",
|
|
1732
|
+
reasoning: true,
|
|
1733
|
+
input: ["text", "image"],
|
|
1734
|
+
cost: {
|
|
1735
|
+
input: 21,
|
|
1736
|
+
output: 168,
|
|
1737
|
+
cacheRead: 0,
|
|
1738
|
+
cacheWrite: 0,
|
|
1739
|
+
},
|
|
1740
|
+
contextWindow: 400000,
|
|
1741
|
+
maxTokens: 128000,
|
|
1742
|
+
},
|
|
1743
|
+
"o1": {
|
|
1744
|
+
id: "o1",
|
|
1745
|
+
name: "o1",
|
|
1746
|
+
api: "azure-openai-responses",
|
|
1747
|
+
provider: "azure-openai-responses",
|
|
1748
|
+
baseUrl: "",
|
|
1749
|
+
reasoning: true,
|
|
1750
|
+
input: ["text", "image"],
|
|
1751
|
+
cost: {
|
|
1752
|
+
input: 15,
|
|
1753
|
+
output: 60,
|
|
1754
|
+
cacheRead: 7.5,
|
|
1755
|
+
cacheWrite: 0,
|
|
1756
|
+
},
|
|
1757
|
+
contextWindow: 200000,
|
|
1758
|
+
maxTokens: 100000,
|
|
1759
|
+
},
|
|
1760
|
+
"o1-pro": {
|
|
1761
|
+
id: "o1-pro",
|
|
1762
|
+
name: "o1-pro",
|
|
1763
|
+
api: "azure-openai-responses",
|
|
1764
|
+
provider: "azure-openai-responses",
|
|
1765
|
+
baseUrl: "",
|
|
1766
|
+
reasoning: true,
|
|
1767
|
+
input: ["text", "image"],
|
|
1768
|
+
cost: {
|
|
1769
|
+
input: 150,
|
|
1770
|
+
output: 600,
|
|
1771
|
+
cacheRead: 0,
|
|
1772
|
+
cacheWrite: 0,
|
|
1773
|
+
},
|
|
1774
|
+
contextWindow: 200000,
|
|
1775
|
+
maxTokens: 100000,
|
|
1776
|
+
},
|
|
1777
|
+
"o3": {
|
|
1778
|
+
id: "o3",
|
|
1779
|
+
name: "o3",
|
|
1780
|
+
api: "azure-openai-responses",
|
|
1781
|
+
provider: "azure-openai-responses",
|
|
1782
|
+
baseUrl: "",
|
|
1783
|
+
reasoning: true,
|
|
1784
|
+
input: ["text", "image"],
|
|
1785
|
+
cost: {
|
|
1786
|
+
input: 2,
|
|
1787
|
+
output: 8,
|
|
1788
|
+
cacheRead: 0.5,
|
|
1789
|
+
cacheWrite: 0,
|
|
1790
|
+
},
|
|
1791
|
+
contextWindow: 200000,
|
|
1792
|
+
maxTokens: 100000,
|
|
1793
|
+
},
|
|
1794
|
+
"o3-deep-research": {
|
|
1795
|
+
id: "o3-deep-research",
|
|
1796
|
+
name: "o3-deep-research",
|
|
1797
|
+
api: "azure-openai-responses",
|
|
1798
|
+
provider: "azure-openai-responses",
|
|
1799
|
+
baseUrl: "",
|
|
1800
|
+
reasoning: true,
|
|
1801
|
+
input: ["text", "image"],
|
|
1802
|
+
cost: {
|
|
1803
|
+
input: 10,
|
|
1804
|
+
output: 40,
|
|
1805
|
+
cacheRead: 2.5,
|
|
1806
|
+
cacheWrite: 0,
|
|
1807
|
+
},
|
|
1808
|
+
contextWindow: 200000,
|
|
1809
|
+
maxTokens: 100000,
|
|
1810
|
+
},
|
|
1811
|
+
"o3-mini": {
|
|
1812
|
+
id: "o3-mini",
|
|
1813
|
+
name: "o3-mini",
|
|
1814
|
+
api: "azure-openai-responses",
|
|
1815
|
+
provider: "azure-openai-responses",
|
|
1816
|
+
baseUrl: "",
|
|
1817
|
+
reasoning: true,
|
|
1818
|
+
input: ["text"],
|
|
1819
|
+
cost: {
|
|
1820
|
+
input: 1.1,
|
|
1821
|
+
output: 4.4,
|
|
1822
|
+
cacheRead: 0.55,
|
|
1823
|
+
cacheWrite: 0,
|
|
1824
|
+
},
|
|
1825
|
+
contextWindow: 200000,
|
|
1826
|
+
maxTokens: 100000,
|
|
1827
|
+
},
|
|
1828
|
+
"o3-pro": {
|
|
1829
|
+
id: "o3-pro",
|
|
1830
|
+
name: "o3-pro",
|
|
1831
|
+
api: "azure-openai-responses",
|
|
1832
|
+
provider: "azure-openai-responses",
|
|
1833
|
+
baseUrl: "",
|
|
1834
|
+
reasoning: true,
|
|
1835
|
+
input: ["text", "image"],
|
|
1836
|
+
cost: {
|
|
1837
|
+
input: 20,
|
|
1838
|
+
output: 80,
|
|
1839
|
+
cacheRead: 0,
|
|
1840
|
+
cacheWrite: 0,
|
|
1841
|
+
},
|
|
1842
|
+
contextWindow: 200000,
|
|
1843
|
+
maxTokens: 100000,
|
|
1844
|
+
},
|
|
1845
|
+
"o4-mini": {
|
|
1846
|
+
id: "o4-mini",
|
|
1847
|
+
name: "o4-mini",
|
|
1848
|
+
api: "azure-openai-responses",
|
|
1849
|
+
provider: "azure-openai-responses",
|
|
1850
|
+
baseUrl: "",
|
|
1851
|
+
reasoning: true,
|
|
1852
|
+
input: ["text", "image"],
|
|
1853
|
+
cost: {
|
|
1854
|
+
input: 1.1,
|
|
1855
|
+
output: 4.4,
|
|
1856
|
+
cacheRead: 0.28,
|
|
1857
|
+
cacheWrite: 0,
|
|
1858
|
+
},
|
|
1859
|
+
contextWindow: 200000,
|
|
1860
|
+
maxTokens: 100000,
|
|
1861
|
+
},
|
|
1862
|
+
"o4-mini-deep-research": {
|
|
1863
|
+
id: "o4-mini-deep-research",
|
|
1864
|
+
name: "o4-mini-deep-research",
|
|
1865
|
+
api: "azure-openai-responses",
|
|
1866
|
+
provider: "azure-openai-responses",
|
|
1867
|
+
baseUrl: "",
|
|
1868
|
+
reasoning: true,
|
|
1869
|
+
input: ["text", "image"],
|
|
1870
|
+
cost: {
|
|
1871
|
+
input: 2,
|
|
1872
|
+
output: 8,
|
|
1873
|
+
cacheRead: 0.5,
|
|
1874
|
+
cacheWrite: 0,
|
|
1875
|
+
},
|
|
1876
|
+
contextWindow: 200000,
|
|
1877
|
+
maxTokens: 100000,
|
|
1878
|
+
},
|
|
1879
|
+
},
|
|
1300
1880
|
"cerebras": {
|
|
1301
1881
|
"gpt-oss-120b": {
|
|
1302
1882
|
id: "gpt-oss-120b",
|
|
@@ -1500,7 +2080,7 @@ export const MODELS = {
|
|
|
1500
2080
|
cacheRead: 0,
|
|
1501
2081
|
cacheWrite: 0,
|
|
1502
2082
|
},
|
|
1503
|
-
contextWindow:
|
|
2083
|
+
contextWindow: 64000,
|
|
1504
2084
|
maxTokens: 16384,
|
|
1505
2085
|
},
|
|
1506
2086
|
"gpt-4o": {
|
|
@@ -1540,24 +2120,6 @@ export const MODELS = {
|
|
|
1540
2120
|
contextWindow: 128000,
|
|
1541
2121
|
maxTokens: 128000,
|
|
1542
2122
|
},
|
|
1543
|
-
"gpt-5-codex": {
|
|
1544
|
-
id: "gpt-5-codex",
|
|
1545
|
-
name: "GPT-5-Codex",
|
|
1546
|
-
api: "openai-responses",
|
|
1547
|
-
provider: "github-copilot",
|
|
1548
|
-
baseUrl: "https://api.individual.githubcopilot.com",
|
|
1549
|
-
headers: { "User-Agent": "GitHubCopilotChat/0.35.0", "Editor-Version": "vscode/1.107.0", "Editor-Plugin-Version": "copilot-chat/0.35.0", "Copilot-Integration-Id": "vscode-chat" },
|
|
1550
|
-
reasoning: true,
|
|
1551
|
-
input: ["text", "image"],
|
|
1552
|
-
cost: {
|
|
1553
|
-
input: 0,
|
|
1554
|
-
output: 0,
|
|
1555
|
-
cacheRead: 0,
|
|
1556
|
-
cacheWrite: 0,
|
|
1557
|
-
},
|
|
1558
|
-
contextWindow: 128000,
|
|
1559
|
-
maxTokens: 128000,
|
|
1560
|
-
},
|
|
1561
2123
|
"gpt-5-mini": {
|
|
1562
2124
|
id: "gpt-5-mini",
|
|
1563
2125
|
name: "GPT-5-mini",
|
|
@@ -3415,7 +3977,7 @@ export const MODELS = {
|
|
|
3415
3977
|
cost: {
|
|
3416
3978
|
input: 1.25,
|
|
3417
3979
|
output: 10,
|
|
3418
|
-
cacheRead: 0.
|
|
3980
|
+
cacheRead: 0.125,
|
|
3419
3981
|
cacheWrite: 0,
|
|
3420
3982
|
},
|
|
3421
3983
|
contextWindow: 400000,
|
|
@@ -3466,7 +4028,7 @@ export const MODELS = {
|
|
|
3466
4028
|
cost: {
|
|
3467
4029
|
input: 0.25,
|
|
3468
4030
|
output: 2,
|
|
3469
|
-
cacheRead: 0.
|
|
4031
|
+
cacheRead: 0.025,
|
|
3470
4032
|
cacheWrite: 0,
|
|
3471
4033
|
},
|
|
3472
4034
|
contextWindow: 400000,
|
|
@@ -3483,7 +4045,7 @@ export const MODELS = {
|
|
|
3483
4045
|
cost: {
|
|
3484
4046
|
input: 0.05,
|
|
3485
4047
|
output: 0.4,
|
|
3486
|
-
cacheRead: 0.
|
|
4048
|
+
cacheRead: 0.005,
|
|
3487
4049
|
cacheWrite: 0,
|
|
3488
4050
|
},
|
|
3489
4051
|
contextWindow: 400000,
|
|
@@ -3884,40 +4446,6 @@ export const MODELS = {
|
|
|
3884
4446
|
},
|
|
3885
4447
|
},
|
|
3886
4448
|
"opencode": {
|
|
3887
|
-
"alpha-gd4": {
|
|
3888
|
-
id: "alpha-gd4",
|
|
3889
|
-
name: "Alpha GD4",
|
|
3890
|
-
api: "anthropic-messages",
|
|
3891
|
-
provider: "opencode",
|
|
3892
|
-
baseUrl: "https://opencode.ai/zen",
|
|
3893
|
-
reasoning: true,
|
|
3894
|
-
input: ["text"],
|
|
3895
|
-
cost: {
|
|
3896
|
-
input: 0.5,
|
|
3897
|
-
output: 2,
|
|
3898
|
-
cacheRead: 0.15,
|
|
3899
|
-
cacheWrite: 0,
|
|
3900
|
-
},
|
|
3901
|
-
contextWindow: 262144,
|
|
3902
|
-
maxTokens: 32768,
|
|
3903
|
-
},
|
|
3904
|
-
"alpha-glm-4.7": {
|
|
3905
|
-
id: "alpha-glm-4.7",
|
|
3906
|
-
name: "Alpha GLM-4.7",
|
|
3907
|
-
api: "openai-completions",
|
|
3908
|
-
provider: "opencode",
|
|
3909
|
-
baseUrl: "https://opencode.ai/zen/v1",
|
|
3910
|
-
reasoning: true,
|
|
3911
|
-
input: ["text"],
|
|
3912
|
-
cost: {
|
|
3913
|
-
input: 0.6,
|
|
3914
|
-
output: 2.2,
|
|
3915
|
-
cacheRead: 0.6,
|
|
3916
|
-
cacheWrite: 0,
|
|
3917
|
-
},
|
|
3918
|
-
contextWindow: 204800,
|
|
3919
|
-
maxTokens: 131072,
|
|
3920
|
-
},
|
|
3921
4449
|
"big-pickle": {
|
|
3922
4450
|
id: "big-pickle",
|
|
3923
4451
|
name: "Big Pickle",
|
|
@@ -4088,6 +4616,23 @@ export const MODELS = {
|
|
|
4088
4616
|
contextWindow: 204800,
|
|
4089
4617
|
maxTokens: 131072,
|
|
4090
4618
|
},
|
|
4619
|
+
"glm-4.7": {
|
|
4620
|
+
id: "glm-4.7",
|
|
4621
|
+
name: "GLM-4.7",
|
|
4622
|
+
api: "openai-completions",
|
|
4623
|
+
provider: "opencode",
|
|
4624
|
+
baseUrl: "https://opencode.ai/zen/v1",
|
|
4625
|
+
reasoning: true,
|
|
4626
|
+
input: ["text"],
|
|
4627
|
+
cost: {
|
|
4628
|
+
input: 0.6,
|
|
4629
|
+
output: 2.2,
|
|
4630
|
+
cacheRead: 0.1,
|
|
4631
|
+
cacheWrite: 0,
|
|
4632
|
+
},
|
|
4633
|
+
contextWindow: 204800,
|
|
4634
|
+
maxTokens: 131072,
|
|
4635
|
+
},
|
|
4091
4636
|
"glm-4.7-free": {
|
|
4092
4637
|
id: "glm-4.7-free",
|
|
4093
4638
|
name: "GLM-4.7",
|
|
@@ -4526,8 +5071,8 @@ export const MODELS = {
|
|
|
4526
5071
|
cost: {
|
|
4527
5072
|
input: 0.7999999999999999,
|
|
4528
5073
|
output: 4,
|
|
4529
|
-
cacheRead: 0
|
|
4530
|
-
cacheWrite:
|
|
5074
|
+
cacheRead: 0,
|
|
5075
|
+
cacheWrite: 0,
|
|
4531
5076
|
},
|
|
4532
5077
|
contextWindow: 200000,
|
|
4533
5078
|
maxTokens: 8192,
|
|
@@ -4802,7 +5347,7 @@ export const MODELS = {
|
|
|
4802
5347
|
cacheWrite: 0,
|
|
4803
5348
|
},
|
|
4804
5349
|
contextWindow: 262144,
|
|
4805
|
-
maxTokens:
|
|
5350
|
+
maxTokens: 32768,
|
|
4806
5351
|
},
|
|
4807
5352
|
"cohere/command-r-08-2024": {
|
|
4808
5353
|
id: "cohere/command-r-08-2024",
|
|
@@ -5071,7 +5616,7 @@ export const MODELS = {
|
|
|
5071
5616
|
input: 0.09999999999999999,
|
|
5072
5617
|
output: 0.39999999999999997,
|
|
5073
5618
|
cacheRead: 0.024999999999999998,
|
|
5074
|
-
cacheWrite: 0.
|
|
5619
|
+
cacheWrite: 0.08333333333333334,
|
|
5075
5620
|
},
|
|
5076
5621
|
contextWindow: 1048576,
|
|
5077
5622
|
maxTokens: 8192,
|
|
@@ -5139,7 +5684,7 @@ export const MODELS = {
|
|
|
5139
5684
|
input: 0.09999999999999999,
|
|
5140
5685
|
output: 0.39999999999999997,
|
|
5141
5686
|
cacheRead: 0.01,
|
|
5142
|
-
cacheWrite: 0.
|
|
5687
|
+
cacheWrite: 0.08333333333333334,
|
|
5143
5688
|
},
|
|
5144
5689
|
contextWindow: 1048576,
|
|
5145
5690
|
maxTokens: 65535,
|
|
@@ -5156,10 +5701,10 @@ export const MODELS = {
|
|
|
5156
5701
|
input: 0.09999999999999999,
|
|
5157
5702
|
output: 0.39999999999999997,
|
|
5158
5703
|
cacheRead: 0.01,
|
|
5159
|
-
cacheWrite: 0.
|
|
5704
|
+
cacheWrite: 0.08333333333333334,
|
|
5160
5705
|
},
|
|
5161
5706
|
contextWindow: 1048576,
|
|
5162
|
-
maxTokens:
|
|
5707
|
+
maxTokens: 65535,
|
|
5163
5708
|
},
|
|
5164
5709
|
"google/gemini-2.5-flash-preview-09-2025": {
|
|
5165
5710
|
id: "google/gemini-2.5-flash-preview-09-2025",
|
|
@@ -5172,8 +5717,8 @@ export const MODELS = {
|
|
|
5172
5717
|
cost: {
|
|
5173
5718
|
input: 0.3,
|
|
5174
5719
|
output: 2.5,
|
|
5175
|
-
cacheRead: 0.
|
|
5176
|
-
cacheWrite: 0.
|
|
5720
|
+
cacheRead: 0.03,
|
|
5721
|
+
cacheWrite: 0.08333333333333334,
|
|
5177
5722
|
},
|
|
5178
5723
|
contextWindow: 1048576,
|
|
5179
5724
|
maxTokens: 65535,
|
|
@@ -5206,7 +5751,7 @@ export const MODELS = {
|
|
|
5206
5751
|
cost: {
|
|
5207
5752
|
input: 1.25,
|
|
5208
5753
|
output: 10,
|
|
5209
|
-
cacheRead: 0.
|
|
5754
|
+
cacheRead: 0.125,
|
|
5210
5755
|
cacheWrite: 0.375,
|
|
5211
5756
|
},
|
|
5212
5757
|
contextWindow: 1048576,
|
|
@@ -5223,7 +5768,7 @@ export const MODELS = {
|
|
|
5223
5768
|
cost: {
|
|
5224
5769
|
input: 1.25,
|
|
5225
5770
|
output: 10,
|
|
5226
|
-
cacheRead: 0.
|
|
5771
|
+
cacheRead: 0.125,
|
|
5227
5772
|
cacheWrite: 0.375,
|
|
5228
5773
|
},
|
|
5229
5774
|
contextWindow: 1048576,
|
|
@@ -5241,7 +5786,7 @@ export const MODELS = {
|
|
|
5241
5786
|
input: 0.5,
|
|
5242
5787
|
output: 3,
|
|
5243
5788
|
cacheRead: 0.049999999999999996,
|
|
5244
|
-
cacheWrite: 0,
|
|
5789
|
+
cacheWrite: 0.08333333333333334,
|
|
5245
5790
|
},
|
|
5246
5791
|
contextWindow: 1048576,
|
|
5247
5792
|
maxTokens: 65535,
|
|
@@ -5528,12 +6073,12 @@ export const MODELS = {
|
|
|
5528
6073
|
input: ["text"],
|
|
5529
6074
|
cost: {
|
|
5530
6075
|
input: 0.27,
|
|
5531
|
-
output: 1.
|
|
6076
|
+
output: 1.1,
|
|
5532
6077
|
cacheRead: 0,
|
|
5533
6078
|
cacheWrite: 0,
|
|
5534
6079
|
},
|
|
5535
6080
|
contextWindow: 196608,
|
|
5536
|
-
maxTokens:
|
|
6081
|
+
maxTokens: 196608,
|
|
5537
6082
|
},
|
|
5538
6083
|
"mistralai/codestral-2508": {
|
|
5539
6084
|
id: "mistralai/codestral-2508",
|
|
@@ -5822,7 +6367,7 @@ export const MODELS = {
|
|
|
5822
6367
|
cacheWrite: 0,
|
|
5823
6368
|
},
|
|
5824
6369
|
contextWindow: 131072,
|
|
5825
|
-
maxTokens:
|
|
6370
|
+
maxTokens: 16384,
|
|
5826
6371
|
},
|
|
5827
6372
|
"mistralai/mistral-saba": {
|
|
5828
6373
|
id: "mistralai/mistral-saba",
|
|
@@ -7535,7 +8080,7 @@ export const MODELS = {
|
|
|
7535
8080
|
cost: {
|
|
7536
8081
|
input: 0.22,
|
|
7537
8082
|
output: 1.7999999999999998,
|
|
7538
|
-
cacheRead: 0,
|
|
8083
|
+
cacheRead: 0.022,
|
|
7539
8084
|
cacheWrite: 0,
|
|
7540
8085
|
},
|
|
7541
8086
|
contextWindow: 262144,
|
|
@@ -7643,6 +8188,23 @@ export const MODELS = {
|
|
|
7643
8188
|
contextWindow: 262144,
|
|
7644
8189
|
maxTokens: 4096,
|
|
7645
8190
|
},
|
|
8191
|
+
"qwen/qwen3-vl-235b-a22b-thinking": {
|
|
8192
|
+
id: "qwen/qwen3-vl-235b-a22b-thinking",
|
|
8193
|
+
name: "Qwen: Qwen3 VL 235B A22B Thinking",
|
|
8194
|
+
api: "openai-completions",
|
|
8195
|
+
provider: "openrouter",
|
|
8196
|
+
baseUrl: "https://openrouter.ai/api/v1",
|
|
8197
|
+
reasoning: true,
|
|
8198
|
+
input: ["text", "image"],
|
|
8199
|
+
cost: {
|
|
8200
|
+
input: 0.44999999999999996,
|
|
8201
|
+
output: 3.5,
|
|
8202
|
+
cacheRead: 0,
|
|
8203
|
+
cacheWrite: 0,
|
|
8204
|
+
},
|
|
8205
|
+
contextWindow: 262144,
|
|
8206
|
+
maxTokens: 262144,
|
|
8207
|
+
},
|
|
7646
8208
|
"qwen/qwen3-vl-30b-a3b-instruct": {
|
|
7647
8209
|
id: "qwen/qwen3-vl-30b-a3b-instruct",
|
|
7648
8210
|
name: "Qwen: Qwen3 VL 30B A3B Instruct",
|
|
@@ -7654,7 +8216,7 @@ export const MODELS = {
|
|
|
7654
8216
|
cost: {
|
|
7655
8217
|
input: 0.15,
|
|
7656
8218
|
output: 0.6,
|
|
7657
|
-
cacheRead: 0,
|
|
8219
|
+
cacheRead: 0.075,
|
|
7658
8220
|
cacheWrite: 0,
|
|
7659
8221
|
},
|
|
7660
8222
|
contextWindow: 262144,
|
|
@@ -8034,23 +8596,6 @@ export const MODELS = {
|
|
|
8034
8596
|
contextWindow: 262144,
|
|
8035
8597
|
maxTokens: 4096,
|
|
8036
8598
|
},
|
|
8037
|
-
"xiaomi/mimo-v2-flash:free": {
|
|
8038
|
-
id: "xiaomi/mimo-v2-flash:free",
|
|
8039
|
-
name: "Xiaomi: MiMo-V2-Flash (free)",
|
|
8040
|
-
api: "openai-completions",
|
|
8041
|
-
provider: "openrouter",
|
|
8042
|
-
baseUrl: "https://openrouter.ai/api/v1",
|
|
8043
|
-
reasoning: true,
|
|
8044
|
-
input: ["text"],
|
|
8045
|
-
cost: {
|
|
8046
|
-
input: 0,
|
|
8047
|
-
output: 0,
|
|
8048
|
-
cacheRead: 0,
|
|
8049
|
-
cacheWrite: 0,
|
|
8050
|
-
},
|
|
8051
|
-
contextWindow: 262144,
|
|
8052
|
-
maxTokens: 65536,
|
|
8053
|
-
},
|
|
8054
8599
|
"z-ai/glm-4-32b": {
|
|
8055
8600
|
id: "z-ai/glm-4-32b",
|
|
8056
8601
|
name: "Z.AI: GLM 4 32B ",
|
|
@@ -8164,7 +8709,7 @@ export const MODELS = {
|
|
|
8164
8709
|
cost: {
|
|
8165
8710
|
input: 0.44,
|
|
8166
8711
|
output: 1.76,
|
|
8167
|
-
cacheRead: 0,
|
|
8712
|
+
cacheRead: 0.11,
|
|
8168
8713
|
cacheWrite: 0,
|
|
8169
8714
|
},
|
|
8170
8715
|
contextWindow: 204800,
|
|
@@ -8204,6 +8749,23 @@ export const MODELS = {
|
|
|
8204
8749
|
contextWindow: 202752,
|
|
8205
8750
|
maxTokens: 65535,
|
|
8206
8751
|
},
|
|
8752
|
+
"z-ai/glm-4.7-flash": {
|
|
8753
|
+
id: "z-ai/glm-4.7-flash",
|
|
8754
|
+
name: "Z.AI: GLM 4.7 Flash",
|
|
8755
|
+
api: "openai-completions",
|
|
8756
|
+
provider: "openrouter",
|
|
8757
|
+
baseUrl: "https://openrouter.ai/api/v1",
|
|
8758
|
+
reasoning: true,
|
|
8759
|
+
input: ["text"],
|
|
8760
|
+
cost: {
|
|
8761
|
+
input: 0.07,
|
|
8762
|
+
output: 0.39999999999999997,
|
|
8763
|
+
cacheRead: 0.01,
|
|
8764
|
+
cacheWrite: 0,
|
|
8765
|
+
},
|
|
8766
|
+
contextWindow: 200000,
|
|
8767
|
+
maxTokens: 131072,
|
|
8768
|
+
},
|
|
8207
8769
|
},
|
|
8208
8770
|
"vercel-ai-gateway": {
|
|
8209
8771
|
"alibaba/qwen-3-14b": {
|
|
@@ -8336,7 +8898,7 @@ export const MODELS = {
|
|
|
8336
8898
|
cost: {
|
|
8337
8899
|
input: 1,
|
|
8338
8900
|
output: 5,
|
|
8339
|
-
cacheRead: 0,
|
|
8901
|
+
cacheRead: 0.19999999999999998,
|
|
8340
8902
|
cacheWrite: 0,
|
|
8341
8903
|
},
|
|
8342
8904
|
contextWindow: 1000000,
|
|
@@ -8393,23 +8955,6 @@ export const MODELS = {
|
|
|
8393
8955
|
contextWindow: 200000,
|
|
8394
8956
|
maxTokens: 4096,
|
|
8395
8957
|
},
|
|
8396
|
-
"anthropic/claude-3-opus": {
|
|
8397
|
-
id: "anthropic/claude-3-opus",
|
|
8398
|
-
name: "Claude 3 Opus",
|
|
8399
|
-
api: "anthropic-messages",
|
|
8400
|
-
provider: "vercel-ai-gateway",
|
|
8401
|
-
baseUrl: "https://ai-gateway.vercel.sh",
|
|
8402
|
-
reasoning: false,
|
|
8403
|
-
input: ["text", "image"],
|
|
8404
|
-
cost: {
|
|
8405
|
-
input: 15,
|
|
8406
|
-
output: 75,
|
|
8407
|
-
cacheRead: 0,
|
|
8408
|
-
cacheWrite: 0,
|
|
8409
|
-
},
|
|
8410
|
-
contextWindow: 200000,
|
|
8411
|
-
maxTokens: 8192,
|
|
8412
|
-
},
|
|
8413
8958
|
"anthropic/claude-3.5-haiku": {
|
|
8414
8959
|
id: "anthropic/claude-3.5-haiku",
|
|
8415
8960
|
name: "Claude 3.5 Haiku",
|
|
@@ -8560,7 +9105,7 @@ export const MODELS = {
|
|
|
8560
9105
|
cacheRead: 0.3,
|
|
8561
9106
|
cacheWrite: 3.75,
|
|
8562
9107
|
},
|
|
8563
|
-
contextWindow:
|
|
9108
|
+
contextWindow: 1000000,
|
|
8564
9109
|
maxTokens: 64000,
|
|
8565
9110
|
},
|
|
8566
9111
|
"anthropic/claude-sonnet-4.5": {
|
|
@@ -8577,7 +9122,7 @@ export const MODELS = {
|
|
|
8577
9122
|
cacheRead: 0.3,
|
|
8578
9123
|
cacheWrite: 3.75,
|
|
8579
9124
|
},
|
|
8580
|
-
contextWindow:
|
|
9125
|
+
contextWindow: 1000000,
|
|
8581
9126
|
maxTokens: 64000,
|
|
8582
9127
|
},
|
|
8583
9128
|
"bytedance/seed-1.6": {
|
|
@@ -8699,40 +9244,6 @@ export const MODELS = {
|
|
|
8699
9244
|
contextWindow: 128000,
|
|
8700
9245
|
maxTokens: 64000,
|
|
8701
9246
|
},
|
|
8702
|
-
"google/gemini-2.0-flash": {
|
|
8703
|
-
id: "google/gemini-2.0-flash",
|
|
8704
|
-
name: "Gemini 2.0 Flash",
|
|
8705
|
-
api: "anthropic-messages",
|
|
8706
|
-
provider: "vercel-ai-gateway",
|
|
8707
|
-
baseUrl: "https://ai-gateway.vercel.sh",
|
|
8708
|
-
reasoning: false,
|
|
8709
|
-
input: ["text", "image"],
|
|
8710
|
-
cost: {
|
|
8711
|
-
input: 0.09999999999999999,
|
|
8712
|
-
output: 0.39999999999999997,
|
|
8713
|
-
cacheRead: 0.024999999999999998,
|
|
8714
|
-
cacheWrite: 0,
|
|
8715
|
-
},
|
|
8716
|
-
contextWindow: 1000000,
|
|
8717
|
-
maxTokens: 8192,
|
|
8718
|
-
},
|
|
8719
|
-
"google/gemini-2.0-flash-lite": {
|
|
8720
|
-
id: "google/gemini-2.0-flash-lite",
|
|
8721
|
-
name: "Gemini 2.0 Flash Lite",
|
|
8722
|
-
api: "anthropic-messages",
|
|
8723
|
-
provider: "vercel-ai-gateway",
|
|
8724
|
-
baseUrl: "https://ai-gateway.vercel.sh",
|
|
8725
|
-
reasoning: false,
|
|
8726
|
-
input: ["text", "image"],
|
|
8727
|
-
cost: {
|
|
8728
|
-
input: 0.075,
|
|
8729
|
-
output: 0.3,
|
|
8730
|
-
cacheRead: 0,
|
|
8731
|
-
cacheWrite: 0,
|
|
8732
|
-
},
|
|
8733
|
-
contextWindow: 1048576,
|
|
8734
|
-
maxTokens: 8192,
|
|
8735
|
-
},
|
|
8736
9247
|
"google/gemini-2.5-flash": {
|
|
8737
9248
|
id: "google/gemini-2.5-flash",
|
|
8738
9249
|
name: "Gemini 2.5 Flash",
|
|
@@ -8740,15 +9251,15 @@ export const MODELS = {
|
|
|
8740
9251
|
provider: "vercel-ai-gateway",
|
|
8741
9252
|
baseUrl: "https://ai-gateway.vercel.sh",
|
|
8742
9253
|
reasoning: true,
|
|
8743
|
-
input: ["text"
|
|
9254
|
+
input: ["text"],
|
|
8744
9255
|
cost: {
|
|
8745
9256
|
input: 0.3,
|
|
8746
9257
|
output: 2.5,
|
|
8747
|
-
cacheRead: 0
|
|
9258
|
+
cacheRead: 0,
|
|
8748
9259
|
cacheWrite: 0,
|
|
8749
9260
|
},
|
|
8750
9261
|
contextWindow: 1000000,
|
|
8751
|
-
maxTokens:
|
|
9262
|
+
maxTokens: 65536,
|
|
8752
9263
|
},
|
|
8753
9264
|
"google/gemini-2.5-flash-lite": {
|
|
8754
9265
|
id: "google/gemini-2.5-flash-lite",
|
|
@@ -8808,11 +9319,11 @@ export const MODELS = {
|
|
|
8808
9319
|
provider: "vercel-ai-gateway",
|
|
8809
9320
|
baseUrl: "https://ai-gateway.vercel.sh",
|
|
8810
9321
|
reasoning: true,
|
|
8811
|
-
input: ["text"
|
|
9322
|
+
input: ["text"],
|
|
8812
9323
|
cost: {
|
|
8813
9324
|
input: 1.25,
|
|
8814
9325
|
output: 10,
|
|
8815
|
-
cacheRead: 0
|
|
9326
|
+
cacheRead: 0,
|
|
8816
9327
|
cacheWrite: 0,
|
|
8817
9328
|
},
|
|
8818
9329
|
contextWindow: 1048576,
|
|
@@ -10223,7 +10734,7 @@ export const MODELS = {
|
|
|
10223
10734
|
cost: {
|
|
10224
10735
|
input: 0.19999999999999998,
|
|
10225
10736
|
output: 1.1,
|
|
10226
|
-
cacheRead: 0,
|
|
10737
|
+
cacheRead: 0.03,
|
|
10227
10738
|
cacheWrite: 0,
|
|
10228
10739
|
},
|
|
10229
10740
|
contextWindow: 128000,
|
|
@@ -10314,6 +10825,23 @@ export const MODELS = {
|
|
|
10314
10825
|
contextWindow: 202752,
|
|
10315
10826
|
maxTokens: 120000,
|
|
10316
10827
|
},
|
|
10828
|
+
"zai/glm-4.7-flashx": {
|
|
10829
|
+
id: "zai/glm-4.7-flashx",
|
|
10830
|
+
name: "GLM 4.7 FlashX",
|
|
10831
|
+
api: "anthropic-messages",
|
|
10832
|
+
provider: "vercel-ai-gateway",
|
|
10833
|
+
baseUrl: "https://ai-gateway.vercel.sh",
|
|
10834
|
+
reasoning: true,
|
|
10835
|
+
input: ["text"],
|
|
10836
|
+
cost: {
|
|
10837
|
+
input: 0.06,
|
|
10838
|
+
output: 0.39999999999999997,
|
|
10839
|
+
cacheRead: 0.01,
|
|
10840
|
+
cacheWrite: 0,
|
|
10841
|
+
},
|
|
10842
|
+
contextWindow: 200000,
|
|
10843
|
+
maxTokens: 128000,
|
|
10844
|
+
},
|
|
10317
10845
|
},
|
|
10318
10846
|
"xai": {
|
|
10319
10847
|
"grok-2": {
|