@mariozechner/pi-ai 0.7.7 → 0.7.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1895,9 +1895,9 @@ export const MODELS = {
1895
1895
  reasoning: true,
1896
1896
  input: ["text", "image"],
1897
1897
  cost: {
1898
- input: 1.5,
1899
- output: 6,
1900
- cacheRead: 0.375,
1898
+ input: 0.25,
1899
+ output: 2,
1900
+ cacheRead: 0.024999999999999998,
1901
1901
  cacheWrite: 0,
1902
1902
  },
1903
1903
  contextWindow: 400000,
@@ -2286,13 +2286,13 @@ export const MODELS = {
2286
2286
  reasoning: true,
2287
2287
  input: ["text"],
2288
2288
  cost: {
2289
- input: 0.6,
2290
- output: 2.2,
2289
+ input: 0.44999999999999996,
2290
+ output: 1.9,
2291
2291
  cacheRead: 0,
2292
2292
  cacheWrite: 0,
2293
2293
  },
2294
- contextWindow: 204800,
2295
- maxTokens: 131072,
2294
+ contextWindow: 202752,
2295
+ maxTokens: 4096,
2296
2296
  },
2297
2297
  "anthropic/claude-sonnet-4.5": {
2298
2298
  id: "anthropic/claude-sonnet-4.5",
@@ -4776,34 +4776,34 @@ export const MODELS = {
4776
4776
  contextWindow: 200000,
4777
4777
  maxTokens: 8192,
4778
4778
  },
4779
- "mistralai/ministral-3b": {
4780
- id: "mistralai/ministral-3b",
4781
- name: "Mistral: Ministral 3B",
4779
+ "mistralai/ministral-8b": {
4780
+ id: "mistralai/ministral-8b",
4781
+ name: "Mistral: Ministral 8B",
4782
4782
  api: "openai-completions",
4783
4783
  provider: "openrouter",
4784
4784
  baseUrl: "https://openrouter.ai/api/v1",
4785
4785
  reasoning: false,
4786
4786
  input: ["text"],
4787
4787
  cost: {
4788
- input: 0.04,
4789
- output: 0.04,
4788
+ input: 0.09999999999999999,
4789
+ output: 0.09999999999999999,
4790
4790
  cacheRead: 0,
4791
4791
  cacheWrite: 0,
4792
4792
  },
4793
4793
  contextWindow: 131072,
4794
4794
  maxTokens: 4096,
4795
4795
  },
4796
- "mistralai/ministral-8b": {
4797
- id: "mistralai/ministral-8b",
4798
- name: "Mistral: Ministral 8B",
4796
+ "mistralai/ministral-3b": {
4797
+ id: "mistralai/ministral-3b",
4798
+ name: "Mistral: Ministral 3B",
4799
4799
  api: "openai-completions",
4800
4800
  provider: "openrouter",
4801
4801
  baseUrl: "https://openrouter.ai/api/v1",
4802
4802
  reasoning: false,
4803
4803
  input: ["text"],
4804
4804
  cost: {
4805
- input: 0.09999999999999999,
4806
- output: 0.09999999999999999,
4805
+ input: 0.04,
4806
+ output: 0.04,
4807
4807
  cacheRead: 0,
4808
4808
  cacheWrite: 0,
4809
4809
  },
@@ -5082,9 +5082,9 @@ export const MODELS = {
5082
5082
  contextWindow: 131072,
5083
5083
  maxTokens: 16384,
5084
5084
  },
5085
- "openai/gpt-4o-mini": {
5086
- id: "openai/gpt-4o-mini",
5087
- name: "OpenAI: GPT-4o-mini",
5085
+ "openai/gpt-4o-mini-2024-07-18": {
5086
+ id: "openai/gpt-4o-mini-2024-07-18",
5087
+ name: "OpenAI: GPT-4o-mini (2024-07-18)",
5088
5088
  api: "openai-completions",
5089
5089
  provider: "openrouter",
5090
5090
  baseUrl: "https://openrouter.ai/api/v1",
@@ -5099,9 +5099,9 @@ export const MODELS = {
5099
5099
  contextWindow: 128000,
5100
5100
  maxTokens: 16384,
5101
5101
  },
5102
- "openai/gpt-4o-mini-2024-07-18": {
5103
- id: "openai/gpt-4o-mini-2024-07-18",
5104
- name: "OpenAI: GPT-4o-mini (2024-07-18)",
5102
+ "openai/gpt-4o-mini": {
5103
+ id: "openai/gpt-4o-mini",
5104
+ name: "OpenAI: GPT-4o-mini",
5105
5105
  api: "openai-completions",
5106
5106
  provider: "openrouter",
5107
5107
  baseUrl: "https://openrouter.ai/api/v1",
@@ -5218,6 +5218,23 @@ export const MODELS = {
5218
5218
  contextWindow: 128000,
5219
5219
  maxTokens: 4096,
5220
5220
  },
5221
+ "openai/gpt-4o-2024-05-13": {
5222
+ id: "openai/gpt-4o-2024-05-13",
5223
+ name: "OpenAI: GPT-4o (2024-05-13)",
5224
+ api: "openai-completions",
5225
+ provider: "openrouter",
5226
+ baseUrl: "https://openrouter.ai/api/v1",
5227
+ reasoning: false,
5228
+ input: ["text", "image"],
5229
+ cost: {
5230
+ input: 5,
5231
+ output: 15,
5232
+ cacheRead: 0,
5233
+ cacheWrite: 0,
5234
+ },
5235
+ contextWindow: 128000,
5236
+ maxTokens: 4096,
5237
+ },
5221
5238
  "openai/gpt-4o": {
5222
5239
  id: "openai/gpt-4o",
5223
5240
  name: "OpenAI: GPT-4o",
@@ -5252,22 +5269,22 @@ export const MODELS = {
5252
5269
  contextWindow: 128000,
5253
5270
  maxTokens: 64000,
5254
5271
  },
5255
- "openai/gpt-4o-2024-05-13": {
5256
- id: "openai/gpt-4o-2024-05-13",
5257
- name: "OpenAI: GPT-4o (2024-05-13)",
5272
+ "meta-llama/llama-3-70b-instruct": {
5273
+ id: "meta-llama/llama-3-70b-instruct",
5274
+ name: "Meta: Llama 3 70B Instruct",
5258
5275
  api: "openai-completions",
5259
5276
  provider: "openrouter",
5260
5277
  baseUrl: "https://openrouter.ai/api/v1",
5261
5278
  reasoning: false,
5262
- input: ["text", "image"],
5279
+ input: ["text"],
5263
5280
  cost: {
5264
- input: 5,
5265
- output: 15,
5281
+ input: 0.3,
5282
+ output: 0.39999999999999997,
5266
5283
  cacheRead: 0,
5267
5284
  cacheWrite: 0,
5268
5285
  },
5269
- contextWindow: 128000,
5270
- maxTokens: 4096,
5286
+ contextWindow: 8192,
5287
+ maxTokens: 16384,
5271
5288
  },
5272
5289
  "meta-llama/llama-3-8b-instruct": {
5273
5290
  id: "meta-llama/llama-3-8b-instruct",
@@ -5286,23 +5303,6 @@ export const MODELS = {
5286
5303
  contextWindow: 8192,
5287
5304
  maxTokens: 16384,
5288
5305
  },
5289
- "meta-llama/llama-3-70b-instruct": {
5290
- id: "meta-llama/llama-3-70b-instruct",
5291
- name: "Meta: Llama 3 70B Instruct",
5292
- api: "openai-completions",
5293
- provider: "openrouter",
5294
- baseUrl: "https://openrouter.ai/api/v1",
5295
- reasoning: false,
5296
- input: ["text"],
5297
- cost: {
5298
- input: 0.3,
5299
- output: 0.39999999999999997,
5300
- cacheRead: 0,
5301
- cacheWrite: 0,
5302
- },
5303
- contextWindow: 8192,
5304
- maxTokens: 16384,
5305
- },
5306
5306
  "mistralai/mixtral-8x22b-instruct": {
5307
5307
  id: "mistralai/mixtral-8x22b-instruct",
5308
5308
  name: "Mistral: Mixtral 8x22B Instruct",
@@ -5541,38 +5541,38 @@ export const MODELS = {
5541
5541
  contextWindow: 8191,
5542
5542
  maxTokens: 4096,
5543
5543
  },
5544
- "openai/gpt-3.5-turbo": {
5545
- id: "openai/gpt-3.5-turbo",
5546
- name: "OpenAI: GPT-3.5 Turbo",
5544
+ "openai/gpt-4": {
5545
+ id: "openai/gpt-4",
5546
+ name: "OpenAI: GPT-4",
5547
5547
  api: "openai-completions",
5548
5548
  provider: "openrouter",
5549
5549
  baseUrl: "https://openrouter.ai/api/v1",
5550
5550
  reasoning: false,
5551
5551
  input: ["text"],
5552
5552
  cost: {
5553
- input: 0.5,
5554
- output: 1.5,
5553
+ input: 30,
5554
+ output: 60,
5555
5555
  cacheRead: 0,
5556
5556
  cacheWrite: 0,
5557
5557
  },
5558
- contextWindow: 16385,
5558
+ contextWindow: 8191,
5559
5559
  maxTokens: 4096,
5560
5560
  },
5561
- "openai/gpt-4": {
5562
- id: "openai/gpt-4",
5563
- name: "OpenAI: GPT-4",
5561
+ "openai/gpt-3.5-turbo": {
5562
+ id: "openai/gpt-3.5-turbo",
5563
+ name: "OpenAI: GPT-3.5 Turbo",
5564
5564
  api: "openai-completions",
5565
5565
  provider: "openrouter",
5566
5566
  baseUrl: "https://openrouter.ai/api/v1",
5567
5567
  reasoning: false,
5568
5568
  input: ["text"],
5569
5569
  cost: {
5570
- input: 30,
5571
- output: 60,
5570
+ input: 0.5,
5571
+ output: 1.5,
5572
5572
  cacheRead: 0,
5573
5573
  cacheWrite: 0,
5574
5574
  },
5575
- contextWindow: 8191,
5575
+ contextWindow: 16385,
5576
5576
  maxTokens: 4096,
5577
5577
  },
5578
5578
  "openrouter/auto": {