@mariozechner/pi-ai 0.23.4 → 0.24.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2175,6 +2175,23 @@ export const MODELS = {
2175
2175
  contextWindow: 128000,
2176
2176
  maxTokens: 16384,
2177
2177
  },
2178
+ "devstral-2512": {
2179
+ id: "devstral-2512",
2180
+ name: "Devstral 2",
2181
+ api: "openai-completions",
2182
+ provider: "mistral",
2183
+ baseUrl: "https://api.mistral.ai/v1",
2184
+ reasoning: false,
2185
+ input: ["text"],
2186
+ cost: {
2187
+ input: 0,
2188
+ output: 0,
2189
+ cacheRead: 0,
2190
+ cacheWrite: 0,
2191
+ },
2192
+ contextWindow: 262144,
2193
+ maxTokens: 262144,
2194
+ },
2178
2195
  "ministral-3b-latest": {
2179
2196
  id: "ministral-3b-latest",
2180
2197
  name: "Ministral 3B",
@@ -2235,8 +2252,8 @@ export const MODELS = {
2235
2252
  reasoning: false,
2236
2253
  input: ["text", "image"],
2237
2254
  cost: {
2238
- input: 0.1,
2239
- output: 0.3,
2255
+ input: 0,
2256
+ output: 0,
2240
2257
  cacheRead: 0,
2241
2258
  cacheWrite: 0,
2242
2259
  },
@@ -2580,11 +2597,10 @@ export const MODELS = {
2580
2597
  "oswe-vscode-prime": {
2581
2598
  id: "oswe-vscode-prime",
2582
2599
  name: "Raptor Mini (Preview)",
2583
- api: "openai-completions",
2600
+ api: "openai-responses",
2584
2601
  provider: "github-copilot",
2585
2602
  baseUrl: "https://api.individual.githubcopilot.com",
2586
2603
  headers: { "User-Agent": "GitHubCopilotChat/0.35.0", "Editor-Version": "vscode/1.107.0", "Editor-Plugin-Version": "copilot-chat/0.35.0", "Copilot-Integration-Id": "vscode-chat" },
2587
- compat: { "supportsStore": false, "supportsDeveloperRole": false, "supportsReasoningEffort": false },
2588
2604
  reasoning: true,
2589
2605
  input: ["text", "image"],
2590
2606
  cost: {
@@ -2906,6 +2922,23 @@ export const MODELS = {
2906
2922
  contextWindow: 256000,
2907
2923
  maxTokens: 4096,
2908
2924
  },
2925
+ "nvidia/nemotron-3-nano-30b-a3b": {
2926
+ id: "nvidia/nemotron-3-nano-30b-a3b",
2927
+ name: "NVIDIA: Nemotron 3 Nano 30B A3B",
2928
+ api: "openai-completions",
2929
+ provider: "openrouter",
2930
+ baseUrl: "https://openrouter.ai/api/v1",
2931
+ reasoning: true,
2932
+ input: ["text"],
2933
+ cost: {
2934
+ input: 0.06,
2935
+ output: 0.24,
2936
+ cacheRead: 0,
2937
+ cacheWrite: 0,
2938
+ },
2939
+ contextWindow: 262144,
2940
+ maxTokens: 4096,
2941
+ },
2909
2942
  "openai/gpt-5.2-chat": {
2910
2943
  id: "openai/gpt-5.2-chat",
2911
2944
  name: "OpenAI: GPT-5.2 Chat",
@@ -3187,13 +3220,13 @@ export const MODELS = {
3187
3220
  reasoning: true,
3188
3221
  input: ["text"],
3189
3222
  cost: {
3190
- input: 0.26,
3223
+ input: 0.24,
3191
3224
  output: 0.38,
3192
- cacheRead: 0,
3225
+ cacheRead: 0.11,
3193
3226
  cacheWrite: 0,
3194
3227
  },
3195
3228
  contextWindow: 163840,
3196
- maxTokens: 65536,
3229
+ maxTokens: 163840,
3197
3230
  },
3198
3231
  "prime-intellect/intellect-3": {
3199
3232
  id: "prime-intellect/intellect-3",
@@ -5448,13 +5481,13 @@ export const MODELS = {
5448
5481
  reasoning: true,
5449
5482
  input: ["text"],
5450
5483
  cost: {
5451
- input: 0.15,
5452
- output: 0.75,
5453
- cacheRead: 0,
5484
+ input: 0.19999999999999998,
5485
+ output: 0.88,
5486
+ cacheRead: 0.106,
5454
5487
  cacheWrite: 0,
5455
5488
  },
5456
- contextWindow: 8192,
5457
- maxTokens: 7168,
5489
+ contextWindow: 163840,
5490
+ maxTokens: 4096,
5458
5491
  },
5459
5492
  "mistralai/mistral-small-3.1-24b-instruct:free": {
5460
5493
  id: "mistralai/mistral-small-3.1-24b-instruct:free",