@mariozechner/pi-ai 0.16.0 → 0.18.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1988,6 +1988,23 @@ export const MODELS = {
1988
1988
  },
1989
1989
  },
1990
1990
  openrouter: {
1991
+ "mistralai/devstral-2512:free": {
1992
+ id: "mistralai/devstral-2512:free",
1993
+ name: "Mistral: Devstral 2 2512 (free)",
1994
+ api: "openai-completions",
1995
+ provider: "openrouter",
1996
+ baseUrl: "https://openrouter.ai/api/v1",
1997
+ reasoning: false,
1998
+ input: ["text"],
1999
+ cost: {
2000
+ input: 0,
2001
+ output: 0,
2002
+ cacheRead: 0,
2003
+ cacheWrite: 0,
2004
+ },
2005
+ contextWindow: 262144,
2006
+ maxTokens: 4096,
2007
+ },
1991
2008
  "relace/relace-search": {
1992
2009
  id: "relace/relace-search",
1993
2010
  name: "Relace: Relace Search",
@@ -2173,7 +2190,7 @@ export const MODELS = {
2173
2190
  cacheWrite: 0,
2174
2191
  },
2175
2192
  contextWindow: 131072,
2176
- maxTokens: 4096,
2193
+ maxTokens: 131072,
2177
2194
  },
2178
2195
  "deepseek/deepseek-v3.2": {
2179
2196
  id: "deepseek/deepseek-v3.2",
@@ -2490,13 +2507,13 @@ export const MODELS = {
2490
2507
  reasoning: true,
2491
2508
  input: ["text"],
2492
2509
  cost: {
2493
- input: 0.255,
2510
+ input: 0.254,
2494
2511
  output: 1.02,
2495
- cacheRead: 0,
2512
+ cacheRead: 0.127,
2496
2513
  cacheWrite: 0,
2497
2514
  },
2498
- contextWindow: 204800,
2499
- maxTokens: 131072,
2515
+ contextWindow: 262144,
2516
+ maxTokens: 4096,
2500
2517
  },
2501
2518
  "deepcogito/cogito-v2-preview-llama-405b": {
2502
2519
  id: "deepcogito/cogito-v2-preview-llama-405b",
@@ -3272,13 +3289,13 @@ export const MODELS = {
3272
3289
  reasoning: true,
3273
3290
  input: ["text"],
3274
3291
  cost: {
3275
- input: 0.19999999999999998,
3276
- output: 0.7999999999999999,
3292
+ input: 0.15,
3293
+ output: 0.75,
3277
3294
  cacheRead: 0,
3278
3295
  cacheWrite: 0,
3279
3296
  },
3280
- contextWindow: 163840,
3281
- maxTokens: 163840,
3297
+ contextWindow: 8192,
3298
+ maxTokens: 7168,
3282
3299
  },
3283
3300
  "openai/gpt-4o-audio-preview": {
3284
3301
  id: "openai/gpt-4o-audio-preview",
@@ -4513,8 +4530,8 @@ export const MODELS = {
4513
4530
  reasoning: false,
4514
4531
  input: ["text", "image"],
4515
4532
  cost: {
4516
- input: 0.049999999999999996,
4517
- output: 0.22,
4533
+ input: 0.04,
4534
+ output: 0.15,
4518
4535
  cacheRead: 0,
4519
4536
  cacheWrite: 0,
4520
4537
  },
@@ -5252,22 +5269,22 @@ export const MODELS = {
5252
5269
  contextWindow: 128000,
5253
5270
  maxTokens: 16384,
5254
5271
  },
5255
- "meta-llama/llama-3.1-8b-instruct": {
5256
- id: "meta-llama/llama-3.1-8b-instruct",
5257
- name: "Meta: Llama 3.1 8B Instruct",
5272
+ "meta-llama/llama-3.1-405b-instruct": {
5273
+ id: "meta-llama/llama-3.1-405b-instruct",
5274
+ name: "Meta: Llama 3.1 405B Instruct",
5258
5275
  api: "openai-completions",
5259
5276
  provider: "openrouter",
5260
5277
  baseUrl: "https://openrouter.ai/api/v1",
5261
5278
  reasoning: false,
5262
5279
  input: ["text"],
5263
5280
  cost: {
5264
- input: 0.02,
5265
- output: 0.03,
5281
+ input: 3.5,
5282
+ output: 3.5,
5266
5283
  cacheRead: 0,
5267
5284
  cacheWrite: 0,
5268
5285
  },
5269
- contextWindow: 131072,
5270
- maxTokens: 16384,
5286
+ contextWindow: 130815,
5287
+ maxTokens: 4096,
5271
5288
  },
5272
5289
  "meta-llama/llama-3.1-70b-instruct": {
5273
5290
  id: "meta-llama/llama-3.1-70b-instruct",
@@ -5286,22 +5303,22 @@ export const MODELS = {
5286
5303
  contextWindow: 131072,
5287
5304
  maxTokens: 4096,
5288
5305
  },
5289
- "meta-llama/llama-3.1-405b-instruct": {
5290
- id: "meta-llama/llama-3.1-405b-instruct",
5291
- name: "Meta: Llama 3.1 405B Instruct",
5306
+ "meta-llama/llama-3.1-8b-instruct": {
5307
+ id: "meta-llama/llama-3.1-8b-instruct",
5308
+ name: "Meta: Llama 3.1 8B Instruct",
5292
5309
  api: "openai-completions",
5293
5310
  provider: "openrouter",
5294
5311
  baseUrl: "https://openrouter.ai/api/v1",
5295
5312
  reasoning: false,
5296
5313
  input: ["text"],
5297
5314
  cost: {
5298
- input: 3.5,
5299
- output: 3.5,
5315
+ input: 0.02,
5316
+ output: 0.03,
5300
5317
  cacheRead: 0,
5301
5318
  cacheWrite: 0,
5302
5319
  },
5303
- contextWindow: 130815,
5304
- maxTokens: 4096,
5320
+ contextWindow: 131072,
5321
+ maxTokens: 16384,
5305
5322
  },
5306
5323
  "mistralai/mistral-nemo": {
5307
5324
  id: "mistralai/mistral-nemo",
@@ -5320,9 +5337,9 @@ export const MODELS = {
5320
5337
  contextWindow: 131072,
5321
5338
  maxTokens: 16384,
5322
5339
  },
5323
- "openai/gpt-4o-mini": {
5324
- id: "openai/gpt-4o-mini",
5325
- name: "OpenAI: GPT-4o-mini",
5340
+ "openai/gpt-4o-mini-2024-07-18": {
5341
+ id: "openai/gpt-4o-mini-2024-07-18",
5342
+ name: "OpenAI: GPT-4o-mini (2024-07-18)",
5326
5343
  api: "openai-completions",
5327
5344
  provider: "openrouter",
5328
5345
  baseUrl: "https://openrouter.ai/api/v1",
@@ -5337,9 +5354,9 @@ export const MODELS = {
5337
5354
  contextWindow: 128000,
5338
5355
  maxTokens: 16384,
5339
5356
  },
5340
- "openai/gpt-4o-mini-2024-07-18": {
5341
- id: "openai/gpt-4o-mini-2024-07-18",
5342
- name: "OpenAI: GPT-4o-mini (2024-07-18)",
5357
+ "openai/gpt-4o-mini": {
5358
+ id: "openai/gpt-4o-mini",
5359
+ name: "OpenAI: GPT-4o-mini",
5343
5360
  api: "openai-completions",
5344
5361
  provider: "openrouter",
5345
5362
  baseUrl: "https://openrouter.ai/api/v1",
@@ -5490,34 +5507,34 @@ export const MODELS = {
5490
5507
  contextWindow: 128000,
5491
5508
  maxTokens: 4096,
5492
5509
  },
5493
- "meta-llama/llama-3-70b-instruct": {
5494
- id: "meta-llama/llama-3-70b-instruct",
5495
- name: "Meta: Llama 3 70B Instruct",
5510
+ "meta-llama/llama-3-8b-instruct": {
5511
+ id: "meta-llama/llama-3-8b-instruct",
5512
+ name: "Meta: Llama 3 8B Instruct",
5496
5513
  api: "openai-completions",
5497
5514
  provider: "openrouter",
5498
5515
  baseUrl: "https://openrouter.ai/api/v1",
5499
5516
  reasoning: false,
5500
5517
  input: ["text"],
5501
5518
  cost: {
5502
- input: 0.3,
5503
- output: 0.39999999999999997,
5519
+ input: 0.03,
5520
+ output: 0.06,
5504
5521
  cacheRead: 0,
5505
5522
  cacheWrite: 0,
5506
5523
  },
5507
5524
  contextWindow: 8192,
5508
5525
  maxTokens: 16384,
5509
5526
  },
5510
- "meta-llama/llama-3-8b-instruct": {
5511
- id: "meta-llama/llama-3-8b-instruct",
5512
- name: "Meta: Llama 3 8B Instruct",
5527
+ "meta-llama/llama-3-70b-instruct": {
5528
+ id: "meta-llama/llama-3-70b-instruct",
5529
+ name: "Meta: Llama 3 70B Instruct",
5513
5530
  api: "openai-completions",
5514
5531
  provider: "openrouter",
5515
5532
  baseUrl: "https://openrouter.ai/api/v1",
5516
5533
  reasoning: false,
5517
5534
  input: ["text"],
5518
5535
  cost: {
5519
- input: 0.03,
5520
- output: 0.06,
5536
+ input: 0.3,
5537
+ output: 0.39999999999999997,
5521
5538
  cacheRead: 0,
5522
5539
  cacheWrite: 0,
5523
5540
  },
@@ -5711,9 +5728,9 @@ export const MODELS = {
5711
5728
  contextWindow: 16385,
5712
5729
  maxTokens: 4096,
5713
5730
  },
5714
- "openai/gpt-4-0314": {
5715
- id: "openai/gpt-4-0314",
5716
- name: "OpenAI: GPT-4 (older v0314)",
5731
+ "openai/gpt-4": {
5732
+ id: "openai/gpt-4",
5733
+ name: "OpenAI: GPT-4",
5717
5734
  api: "openai-completions",
5718
5735
  provider: "openrouter",
5719
5736
  baseUrl: "https://openrouter.ai/api/v1",
@@ -5728,38 +5745,38 @@ export const MODELS = {
5728
5745
  contextWindow: 8191,
5729
5746
  maxTokens: 4096,
5730
5747
  },
5731
- "openai/gpt-4": {
5732
- id: "openai/gpt-4",
5733
- name: "OpenAI: GPT-4",
5748
+ "openai/gpt-3.5-turbo": {
5749
+ id: "openai/gpt-3.5-turbo",
5750
+ name: "OpenAI: GPT-3.5 Turbo",
5734
5751
  api: "openai-completions",
5735
5752
  provider: "openrouter",
5736
5753
  baseUrl: "https://openrouter.ai/api/v1",
5737
5754
  reasoning: false,
5738
5755
  input: ["text"],
5739
5756
  cost: {
5740
- input: 30,
5741
- output: 60,
5757
+ input: 0.5,
5758
+ output: 1.5,
5742
5759
  cacheRead: 0,
5743
5760
  cacheWrite: 0,
5744
5761
  },
5745
- contextWindow: 8191,
5762
+ contextWindow: 16385,
5746
5763
  maxTokens: 4096,
5747
5764
  },
5748
- "openai/gpt-3.5-turbo": {
5749
- id: "openai/gpt-3.5-turbo",
5750
- name: "OpenAI: GPT-3.5 Turbo",
5765
+ "openai/gpt-4-0314": {
5766
+ id: "openai/gpt-4-0314",
5767
+ name: "OpenAI: GPT-4 (older v0314)",
5751
5768
  api: "openai-completions",
5752
5769
  provider: "openrouter",
5753
5770
  baseUrl: "https://openrouter.ai/api/v1",
5754
5771
  reasoning: false,
5755
5772
  input: ["text"],
5756
5773
  cost: {
5757
- input: 0.5,
5758
- output: 1.5,
5774
+ input: 30,
5775
+ output: 60,
5759
5776
  cacheRead: 0,
5760
5777
  cacheWrite: 0,
5761
5778
  },
5762
- contextWindow: 16385,
5779
+ contextWindow: 8191,
5763
5780
  maxTokens: 4096,
5764
5781
  },
5765
5782
  "openrouter/auto": {