@mariozechner/pi-ai 0.12.10 → 0.12.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1077,6 +1077,23 @@ export const MODELS = {
1077
1077
  contextWindow: 128000,
1078
1078
  maxTokens: 16384,
1079
1079
  },
1080
+ "gpt-5.1-codex-max": {
1081
+ id: "gpt-5.1-codex-max",
1082
+ name: "GPT-5.1 Codex Max",
1083
+ api: "openai-responses",
1084
+ provider: "openai",
1085
+ baseUrl: "https://api.openai.com/v1",
1086
+ reasoning: true,
1087
+ input: ["text", "image"],
1088
+ cost: {
1089
+ input: 1.25,
1090
+ output: 10,
1091
+ cacheRead: 0.125,
1092
+ cacheWrite: 0,
1093
+ },
1094
+ contextWindow: 400000,
1095
+ maxTokens: 128000,
1096
+ },
1080
1097
  "o3": {
1081
1098
  id: "o3",
1082
1099
  name: "o3",
@@ -1196,23 +1213,6 @@ export const MODELS = {
1196
1213
  contextWindow: 128000,
1197
1214
  maxTokens: 16384,
1198
1215
  },
1199
- "gpt-5.1-codex-max": {
1200
- id: "gpt-5.1-codex-max",
1201
- name: "GPT-5.1 Codex Max",
1202
- api: "openai-responses",
1203
- provider: "openai",
1204
- baseUrl: "https://api.openai.com/v1",
1205
- reasoning: true,
1206
- input: ["text", "image"],
1207
- cost: {
1208
- input: 1.25,
1209
- output: 10,
1210
- cacheRead: 0.125,
1211
- cacheWrite: 0,
1212
- },
1213
- contextWindow: 400000,
1214
- maxTokens: 128000,
1215
- },
1216
1216
  },
1217
1217
  groq: {
1218
1218
  "llama-3.1-8b-instant": {
@@ -2022,6 +2022,57 @@ export const MODELS = {
2022
2022
  contextWindow: 1000000,
2023
2023
  maxTokens: 65535,
2024
2024
  },
2025
+ "mistralai/ministral-14b-2512": {
2026
+ id: "mistralai/ministral-14b-2512",
2027
+ name: "Mistral: Ministral 3 14B 2512",
2028
+ api: "openai-completions",
2029
+ provider: "openrouter",
2030
+ baseUrl: "https://openrouter.ai/api/v1",
2031
+ reasoning: false,
2032
+ input: ["text", "image"],
2033
+ cost: {
2034
+ input: 0.19999999999999998,
2035
+ output: 0.19999999999999998,
2036
+ cacheRead: 0,
2037
+ cacheWrite: 0,
2038
+ },
2039
+ contextWindow: 262144,
2040
+ maxTokens: 4096,
2041
+ },
2042
+ "mistralai/ministral-8b-2512": {
2043
+ id: "mistralai/ministral-8b-2512",
2044
+ name: "Mistral: Ministral 3 8B 2512",
2045
+ api: "openai-completions",
2046
+ provider: "openrouter",
2047
+ baseUrl: "https://openrouter.ai/api/v1",
2048
+ reasoning: false,
2049
+ input: ["text", "image"],
2050
+ cost: {
2051
+ input: 0.15,
2052
+ output: 0.15,
2053
+ cacheRead: 0,
2054
+ cacheWrite: 0,
2055
+ },
2056
+ contextWindow: 262144,
2057
+ maxTokens: 4096,
2058
+ },
2059
+ "mistralai/ministral-3b-2512": {
2060
+ id: "mistralai/ministral-3b-2512",
2061
+ name: "Mistral: Ministral 3 3B 2512",
2062
+ api: "openai-completions",
2063
+ provider: "openrouter",
2064
+ baseUrl: "https://openrouter.ai/api/v1",
2065
+ reasoning: false,
2066
+ input: ["text", "image"],
2067
+ cost: {
2068
+ input: 0.09999999999999999,
2069
+ output: 0.09999999999999999,
2070
+ cacheRead: 0,
2071
+ cacheWrite: 0,
2072
+ },
2073
+ contextWindow: 131072,
2074
+ maxTokens: 4096,
2075
+ },
2025
2076
  "mistralai/mistral-large-2512": {
2026
2077
  id: "mistralai/mistral-large-2512",
2027
2078
  name: "Mistral: Mistral Large 3 2512",
@@ -3348,39 +3399,39 @@ export const MODELS = {
3348
3399
  contextWindow: 400000,
3349
3400
  maxTokens: 128000,
3350
3401
  },
3351
- "openai/gpt-oss-120b:exacto": {
3352
- id: "openai/gpt-oss-120b:exacto",
3353
- name: "OpenAI: gpt-oss-120b (exacto)",
3402
+ "openai/gpt-oss-120b": {
3403
+ id: "openai/gpt-oss-120b",
3404
+ name: "OpenAI: gpt-oss-120b",
3354
3405
  api: "openai-completions",
3355
3406
  provider: "openrouter",
3356
3407
  baseUrl: "https://openrouter.ai/api/v1",
3357
3408
  reasoning: true,
3358
3409
  input: ["text"],
3359
3410
  cost: {
3360
- input: 0.04,
3361
- output: 0.19999999999999998,
3411
+ input: 0.039,
3412
+ output: 0.19,
3362
3413
  cacheRead: 0,
3363
3414
  cacheWrite: 0,
3364
3415
  },
3365
3416
  contextWindow: 131072,
3366
- maxTokens: 32768,
3417
+ maxTokens: 4096,
3367
3418
  },
3368
- "openai/gpt-oss-120b": {
3369
- id: "openai/gpt-oss-120b",
3370
- name: "OpenAI: gpt-oss-120b",
3419
+ "openai/gpt-oss-120b:exacto": {
3420
+ id: "openai/gpt-oss-120b:exacto",
3421
+ name: "OpenAI: gpt-oss-120b (exacto)",
3371
3422
  api: "openai-completions",
3372
3423
  provider: "openrouter",
3373
3424
  baseUrl: "https://openrouter.ai/api/v1",
3374
3425
  reasoning: true,
3375
3426
  input: ["text"],
3376
3427
  cost: {
3377
- input: 0.04,
3378
- output: 0.19999999999999998,
3428
+ input: 0.039,
3429
+ output: 0.19,
3379
3430
  cacheRead: 0,
3380
3431
  cacheWrite: 0,
3381
3432
  },
3382
3433
  contextWindow: 131072,
3383
- maxTokens: 32768,
3434
+ maxTokens: 4096,
3384
3435
  },
3385
3436
  "openai/gpt-oss-20b:free": {
3386
3437
  id: "openai/gpt-oss-20b:free",