@mariozechner/pi-ai 0.11.3 → 0.11.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -5031,34 +5031,34 @@ export const MODELS = {
5031
5031
  contextWindow: 32768,
5032
5032
  maxTokens: 4096,
5033
5033
  },
5034
- "cohere/command-r-08-2024": {
5035
- id: "cohere/command-r-08-2024",
5036
- name: "Cohere: Command R (08-2024)",
5034
+ "cohere/command-r-plus-08-2024": {
5035
+ id: "cohere/command-r-plus-08-2024",
5036
+ name: "Cohere: Command R+ (08-2024)",
5037
5037
  api: "openai-completions",
5038
5038
  provider: "openrouter",
5039
5039
  baseUrl: "https://openrouter.ai/api/v1",
5040
5040
  reasoning: false,
5041
5041
  input: ["text"],
5042
5042
  cost: {
5043
- input: 0.15,
5044
- output: 0.6,
5043
+ input: 2.5,
5044
+ output: 10,
5045
5045
  cacheRead: 0,
5046
5046
  cacheWrite: 0,
5047
5047
  },
5048
5048
  contextWindow: 128000,
5049
5049
  maxTokens: 4000,
5050
5050
  },
5051
- "cohere/command-r-plus-08-2024": {
5052
- id: "cohere/command-r-plus-08-2024",
5053
- name: "Cohere: Command R+ (08-2024)",
5051
+ "cohere/command-r-08-2024": {
5052
+ id: "cohere/command-r-08-2024",
5053
+ name: "Cohere: Command R (08-2024)",
5054
5054
  api: "openai-completions",
5055
5055
  provider: "openrouter",
5056
5056
  baseUrl: "https://openrouter.ai/api/v1",
5057
5057
  reasoning: false,
5058
5058
  input: ["text"],
5059
5059
  cost: {
5060
- input: 2.5,
5061
- output: 10,
5060
+ input: 0.15,
5061
+ output: 0.6,
5062
5062
  cacheRead: 0,
5063
5063
  cacheWrite: 0,
5064
5064
  },
@@ -5184,9 +5184,9 @@ export const MODELS = {
5184
5184
  contextWindow: 131072,
5185
5185
  maxTokens: 16384,
5186
5186
  },
5187
- "openai/gpt-4o-mini-2024-07-18": {
5188
- id: "openai/gpt-4o-mini-2024-07-18",
5189
- name: "OpenAI: GPT-4o-mini (2024-07-18)",
5187
+ "openai/gpt-4o-mini": {
5188
+ id: "openai/gpt-4o-mini",
5189
+ name: "OpenAI: GPT-4o-mini",
5190
5190
  api: "openai-completions",
5191
5191
  provider: "openrouter",
5192
5192
  baseUrl: "https://openrouter.ai/api/v1",
@@ -5201,9 +5201,9 @@ export const MODELS = {
5201
5201
  contextWindow: 128000,
5202
5202
  maxTokens: 16384,
5203
5203
  },
5204
- "openai/gpt-4o-mini": {
5205
- id: "openai/gpt-4o-mini",
5206
- name: "OpenAI: GPT-4o-mini",
5204
+ "openai/gpt-4o-mini-2024-07-18": {
5205
+ id: "openai/gpt-4o-mini-2024-07-18",
5206
+ name: "OpenAI: GPT-4o-mini (2024-07-18)",
5207
5207
  api: "openai-completions",
5208
5208
  provider: "openrouter",
5209
5209
  baseUrl: "https://openrouter.ai/api/v1",
@@ -5303,23 +5303,6 @@ export const MODELS = {
5303
5303
  contextWindow: 128000,
5304
5304
  maxTokens: 4096,
5305
5305
  },
5306
- "openai/gpt-4o-2024-05-13": {
5307
- id: "openai/gpt-4o-2024-05-13",
5308
- name: "OpenAI: GPT-4o (2024-05-13)",
5309
- api: "openai-completions",
5310
- provider: "openrouter",
5311
- baseUrl: "https://openrouter.ai/api/v1",
5312
- reasoning: false,
5313
- input: ["text", "image"],
5314
- cost: {
5315
- input: 5,
5316
- output: 15,
5317
- cacheRead: 0,
5318
- cacheWrite: 0,
5319
- },
5320
- contextWindow: 128000,
5321
- maxTokens: 4096,
5322
- },
5323
5306
  "openai/gpt-4o": {
5324
5307
  id: "openai/gpt-4o",
5325
5308
  name: "OpenAI: GPT-4o",
@@ -5354,22 +5337,22 @@ export const MODELS = {
5354
5337
  contextWindow: 128000,
5355
5338
  maxTokens: 64000,
5356
5339
  },
5357
- "meta-llama/llama-3-70b-instruct": {
5358
- id: "meta-llama/llama-3-70b-instruct",
5359
- name: "Meta: Llama 3 70B Instruct",
5340
+ "openai/gpt-4o-2024-05-13": {
5341
+ id: "openai/gpt-4o-2024-05-13",
5342
+ name: "OpenAI: GPT-4o (2024-05-13)",
5360
5343
  api: "openai-completions",
5361
5344
  provider: "openrouter",
5362
5345
  baseUrl: "https://openrouter.ai/api/v1",
5363
5346
  reasoning: false,
5364
- input: ["text"],
5347
+ input: ["text", "image"],
5365
5348
  cost: {
5366
- input: 0.3,
5367
- output: 0.39999999999999997,
5349
+ input: 5,
5350
+ output: 15,
5368
5351
  cacheRead: 0,
5369
5352
  cacheWrite: 0,
5370
5353
  },
5371
- contextWindow: 8192,
5372
- maxTokens: 16384,
5354
+ contextWindow: 128000,
5355
+ maxTokens: 4096,
5373
5356
  },
5374
5357
  "meta-llama/llama-3-8b-instruct": {
5375
5358
  id: "meta-llama/llama-3-8b-instruct",
@@ -5388,6 +5371,23 @@ export const MODELS = {
5388
5371
  contextWindow: 8192,
5389
5372
  maxTokens: 16384,
5390
5373
  },
5374
+ "meta-llama/llama-3-70b-instruct": {
5375
+ id: "meta-llama/llama-3-70b-instruct",
5376
+ name: "Meta: Llama 3 70B Instruct",
5377
+ api: "openai-completions",
5378
+ provider: "openrouter",
5379
+ baseUrl: "https://openrouter.ai/api/v1",
5380
+ reasoning: false,
5381
+ input: ["text"],
5382
+ cost: {
5383
+ input: 0.3,
5384
+ output: 0.39999999999999997,
5385
+ cacheRead: 0,
5386
+ cacheWrite: 0,
5387
+ },
5388
+ contextWindow: 8192,
5389
+ maxTokens: 16384,
5390
+ },
5391
5391
  "mistralai/mixtral-8x22b-instruct": {
5392
5392
  id: "mistralai/mixtral-8x22b-instruct",
5393
5393
  name: "Mistral: Mixtral 8x22B Instruct",
@@ -5473,38 +5473,38 @@ export const MODELS = {
5473
5473
  contextWindow: 128000,
5474
5474
  maxTokens: 4096,
5475
5475
  },
5476
- "openai/gpt-3.5-turbo-0613": {
5477
- id: "openai/gpt-3.5-turbo-0613",
5478
- name: "OpenAI: GPT-3.5 Turbo (older v0613)",
5476
+ "openai/gpt-4-turbo-preview": {
5477
+ id: "openai/gpt-4-turbo-preview",
5478
+ name: "OpenAI: GPT-4 Turbo Preview",
5479
5479
  api: "openai-completions",
5480
5480
  provider: "openrouter",
5481
5481
  baseUrl: "https://openrouter.ai/api/v1",
5482
5482
  reasoning: false,
5483
5483
  input: ["text"],
5484
5484
  cost: {
5485
- input: 1,
5486
- output: 2,
5485
+ input: 10,
5486
+ output: 30,
5487
5487
  cacheRead: 0,
5488
5488
  cacheWrite: 0,
5489
5489
  },
5490
- contextWindow: 4095,
5490
+ contextWindow: 128000,
5491
5491
  maxTokens: 4096,
5492
5492
  },
5493
- "openai/gpt-4-turbo-preview": {
5494
- id: "openai/gpt-4-turbo-preview",
5495
- name: "OpenAI: GPT-4 Turbo Preview",
5493
+ "openai/gpt-3.5-turbo-0613": {
5494
+ id: "openai/gpt-3.5-turbo-0613",
5495
+ name: "OpenAI: GPT-3.5 Turbo (older v0613)",
5496
5496
  api: "openai-completions",
5497
5497
  provider: "openrouter",
5498
5498
  baseUrl: "https://openrouter.ai/api/v1",
5499
5499
  reasoning: false,
5500
5500
  input: ["text"],
5501
5501
  cost: {
5502
- input: 10,
5503
- output: 30,
5502
+ input: 1,
5503
+ output: 2,
5504
5504
  cacheRead: 0,
5505
5505
  cacheWrite: 0,
5506
5506
  },
5507
- contextWindow: 128000,
5507
+ contextWindow: 4095,
5508
5508
  maxTokens: 4096,
5509
5509
  },
5510
5510
  "mistralai/mistral-small": {
@@ -5609,38 +5609,38 @@ export const MODELS = {
5609
5609
  contextWindow: 8191,
5610
5610
  maxTokens: 4096,
5611
5611
  },
5612
- "openai/gpt-4": {
5613
- id: "openai/gpt-4",
5614
- name: "OpenAI: GPT-4",
5612
+ "openai/gpt-3.5-turbo": {
5613
+ id: "openai/gpt-3.5-turbo",
5614
+ name: "OpenAI: GPT-3.5 Turbo",
5615
5615
  api: "openai-completions",
5616
5616
  provider: "openrouter",
5617
5617
  baseUrl: "https://openrouter.ai/api/v1",
5618
5618
  reasoning: false,
5619
5619
  input: ["text"],
5620
5620
  cost: {
5621
- input: 30,
5622
- output: 60,
5621
+ input: 0.5,
5622
+ output: 1.5,
5623
5623
  cacheRead: 0,
5624
5624
  cacheWrite: 0,
5625
5625
  },
5626
- contextWindow: 8191,
5626
+ contextWindow: 16385,
5627
5627
  maxTokens: 4096,
5628
5628
  },
5629
- "openai/gpt-3.5-turbo": {
5630
- id: "openai/gpt-3.5-turbo",
5631
- name: "OpenAI: GPT-3.5 Turbo",
5629
+ "openai/gpt-4": {
5630
+ id: "openai/gpt-4",
5631
+ name: "OpenAI: GPT-4",
5632
5632
  api: "openai-completions",
5633
5633
  provider: "openrouter",
5634
5634
  baseUrl: "https://openrouter.ai/api/v1",
5635
5635
  reasoning: false,
5636
5636
  input: ["text"],
5637
5637
  cost: {
5638
- input: 0.5,
5639
- output: 1.5,
5638
+ input: 30,
5639
+ output: 60,
5640
5640
  cacheRead: 0,
5641
5641
  cacheWrite: 0,
5642
5642
  },
5643
- contextWindow: 16385,
5643
+ contextWindow: 8191,
5644
5644
  maxTokens: 4096,
5645
5645
  },
5646
5646
  "openrouter/auto": {