@mariozechner/pi-ai 0.12.13 → 0.12.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2224,7 +2224,7 @@ export const MODELS = {
2224
2224
  cacheWrite: 6.25,
2225
2225
  },
2226
2226
  contextWindow: 200000,
2227
- maxTokens: 64000,
2227
+ maxTokens: 32000,
2228
2228
  },
2229
2229
  "allenai/olmo-3-7b-instruct": {
2230
2230
  id: "allenai/olmo-3-7b-instruct",
@@ -5150,34 +5150,34 @@ export const MODELS = {
5150
5150
  contextWindow: 32768,
5151
5151
  maxTokens: 4096,
5152
5152
  },
5153
- "cohere/command-r-08-2024": {
5154
- id: "cohere/command-r-08-2024",
5155
- name: "Cohere: Command R (08-2024)",
5153
+ "cohere/command-r-plus-08-2024": {
5154
+ id: "cohere/command-r-plus-08-2024",
5155
+ name: "Cohere: Command R+ (08-2024)",
5156
5156
  api: "openai-completions",
5157
5157
  provider: "openrouter",
5158
5158
  baseUrl: "https://openrouter.ai/api/v1",
5159
5159
  reasoning: false,
5160
5160
  input: ["text"],
5161
5161
  cost: {
5162
- input: 0.15,
5163
- output: 0.6,
5162
+ input: 2.5,
5163
+ output: 10,
5164
5164
  cacheRead: 0,
5165
5165
  cacheWrite: 0,
5166
5166
  },
5167
5167
  contextWindow: 128000,
5168
5168
  maxTokens: 4000,
5169
5169
  },
5170
- "cohere/command-r-plus-08-2024": {
5171
- id: "cohere/command-r-plus-08-2024",
5172
- name: "Cohere: Command R+ (08-2024)",
5170
+ "cohere/command-r-08-2024": {
5171
+ id: "cohere/command-r-08-2024",
5172
+ name: "Cohere: Command R (08-2024)",
5173
5173
  api: "openai-completions",
5174
5174
  provider: "openrouter",
5175
5175
  baseUrl: "https://openrouter.ai/api/v1",
5176
5176
  reasoning: false,
5177
5177
  input: ["text"],
5178
5178
  cost: {
5179
- input: 2.5,
5180
- output: 10,
5179
+ input: 0.15,
5180
+ output: 0.6,
5181
5181
  cacheRead: 0,
5182
5182
  cacheWrite: 0,
5183
5183
  },
@@ -5252,38 +5252,38 @@ export const MODELS = {
5252
5252
  contextWindow: 131072,
5253
5253
  maxTokens: 16384,
5254
5254
  },
5255
- "meta-llama/llama-3.1-405b-instruct": {
5256
- id: "meta-llama/llama-3.1-405b-instruct",
5257
- name: "Meta: Llama 3.1 405B Instruct",
5255
+ "meta-llama/llama-3.1-70b-instruct": {
5256
+ id: "meta-llama/llama-3.1-70b-instruct",
5257
+ name: "Meta: Llama 3.1 70B Instruct",
5258
5258
  api: "openai-completions",
5259
5259
  provider: "openrouter",
5260
5260
  baseUrl: "https://openrouter.ai/api/v1",
5261
5261
  reasoning: false,
5262
5262
  input: ["text"],
5263
5263
  cost: {
5264
- input: 3.5,
5265
- output: 3.5,
5264
+ input: 0.39999999999999997,
5265
+ output: 0.39999999999999997,
5266
5266
  cacheRead: 0,
5267
5267
  cacheWrite: 0,
5268
5268
  },
5269
- contextWindow: 130815,
5269
+ contextWindow: 131072,
5270
5270
  maxTokens: 4096,
5271
5271
  },
5272
- "meta-llama/llama-3.1-70b-instruct": {
5273
- id: "meta-llama/llama-3.1-70b-instruct",
5274
- name: "Meta: Llama 3.1 70B Instruct",
5272
+ "meta-llama/llama-3.1-405b-instruct": {
5273
+ id: "meta-llama/llama-3.1-405b-instruct",
5274
+ name: "Meta: Llama 3.1 405B Instruct",
5275
5275
  api: "openai-completions",
5276
5276
  provider: "openrouter",
5277
5277
  baseUrl: "https://openrouter.ai/api/v1",
5278
5278
  reasoning: false,
5279
5279
  input: ["text"],
5280
5280
  cost: {
5281
- input: 0.39999999999999997,
5282
- output: 0.39999999999999997,
5281
+ input: 3.5,
5282
+ output: 3.5,
5283
5283
  cacheRead: 0,
5284
5284
  cacheWrite: 0,
5285
5285
  },
5286
- contextWindow: 131072,
5286
+ contextWindow: 130815,
5287
5287
  maxTokens: 4096,
5288
5288
  },
5289
5289
  "mistralai/mistral-nemo": {
@@ -5303,9 +5303,9 @@ export const MODELS = {
5303
5303
  contextWindow: 131072,
5304
5304
  maxTokens: 16384,
5305
5305
  },
5306
- "openai/gpt-4o-mini-2024-07-18": {
5307
- id: "openai/gpt-4o-mini-2024-07-18",
5308
- name: "OpenAI: GPT-4o-mini (2024-07-18)",
5306
+ "openai/gpt-4o-mini": {
5307
+ id: "openai/gpt-4o-mini",
5308
+ name: "OpenAI: GPT-4o-mini",
5309
5309
  api: "openai-completions",
5310
5310
  provider: "openrouter",
5311
5311
  baseUrl: "https://openrouter.ai/api/v1",
@@ -5320,9 +5320,9 @@ export const MODELS = {
5320
5320
  contextWindow: 128000,
5321
5321
  maxTokens: 16384,
5322
5322
  },
5323
- "openai/gpt-4o-mini": {
5324
- id: "openai/gpt-4o-mini",
5325
- name: "OpenAI: GPT-4o-mini",
5323
+ "openai/gpt-4o-mini-2024-07-18": {
5324
+ id: "openai/gpt-4o-mini-2024-07-18",
5325
+ name: "OpenAI: GPT-4o-mini (2024-07-18)",
5326
5326
  api: "openai-completions",
5327
5327
  provider: "openrouter",
5328
5328
  baseUrl: "https://openrouter.ai/api/v1",
@@ -5422,23 +5422,6 @@ export const MODELS = {
5422
5422
  contextWindow: 128000,
5423
5423
  maxTokens: 4096,
5424
5424
  },
5425
- "openai/gpt-4o-2024-05-13": {
5426
- id: "openai/gpt-4o-2024-05-13",
5427
- name: "OpenAI: GPT-4o (2024-05-13)",
5428
- api: "openai-completions",
5429
- provider: "openrouter",
5430
- baseUrl: "https://openrouter.ai/api/v1",
5431
- reasoning: false,
5432
- input: ["text", "image"],
5433
- cost: {
5434
- input: 5,
5435
- output: 15,
5436
- cacheRead: 0,
5437
- cacheWrite: 0,
5438
- },
5439
- contextWindow: 128000,
5440
- maxTokens: 4096,
5441
- },
5442
5425
  "openai/gpt-4o": {
5443
5426
  id: "openai/gpt-4o",
5444
5427
  name: "OpenAI: GPT-4o",
@@ -5473,22 +5456,22 @@ export const MODELS = {
5473
5456
  contextWindow: 128000,
5474
5457
  maxTokens: 64000,
5475
5458
  },
5476
- "meta-llama/llama-3-70b-instruct": {
5477
- id: "meta-llama/llama-3-70b-instruct",
5478
- name: "Meta: Llama 3 70B Instruct",
5459
+ "openai/gpt-4o-2024-05-13": {
5460
+ id: "openai/gpt-4o-2024-05-13",
5461
+ name: "OpenAI: GPT-4o (2024-05-13)",
5479
5462
  api: "openai-completions",
5480
5463
  provider: "openrouter",
5481
5464
  baseUrl: "https://openrouter.ai/api/v1",
5482
5465
  reasoning: false,
5483
- input: ["text"],
5466
+ input: ["text", "image"],
5484
5467
  cost: {
5485
- input: 0.3,
5486
- output: 0.39999999999999997,
5468
+ input: 5,
5469
+ output: 15,
5487
5470
  cacheRead: 0,
5488
5471
  cacheWrite: 0,
5489
5472
  },
5490
- contextWindow: 8192,
5491
- maxTokens: 16384,
5473
+ contextWindow: 128000,
5474
+ maxTokens: 4096,
5492
5475
  },
5493
5476
  "meta-llama/llama-3-8b-instruct": {
5494
5477
  id: "meta-llama/llama-3-8b-instruct",
@@ -5507,6 +5490,23 @@ export const MODELS = {
5507
5490
  contextWindow: 8192,
5508
5491
  maxTokens: 16384,
5509
5492
  },
5493
+ "meta-llama/llama-3-70b-instruct": {
5494
+ id: "meta-llama/llama-3-70b-instruct",
5495
+ name: "Meta: Llama 3 70B Instruct",
5496
+ api: "openai-completions",
5497
+ provider: "openrouter",
5498
+ baseUrl: "https://openrouter.ai/api/v1",
5499
+ reasoning: false,
5500
+ input: ["text"],
5501
+ cost: {
5502
+ input: 0.3,
5503
+ output: 0.39999999999999997,
5504
+ cacheRead: 0,
5505
+ cacheWrite: 0,
5506
+ },
5507
+ contextWindow: 8192,
5508
+ maxTokens: 16384,
5509
+ },
5510
5510
  "mistralai/mixtral-8x22b-instruct": {
5511
5511
  id: "mistralai/mixtral-8x22b-instruct",
5512
5512
  name: "Mistral: Mixtral 8x22B Instruct",
@@ -5592,38 +5592,38 @@ export const MODELS = {
5592
5592
  contextWindow: 128000,
5593
5593
  maxTokens: 4096,
5594
5594
  },
5595
- "openai/gpt-3.5-turbo-0613": {
5596
- id: "openai/gpt-3.5-turbo-0613",
5597
- name: "OpenAI: GPT-3.5 Turbo (older v0613)",
5595
+ "openai/gpt-4-turbo-preview": {
5596
+ id: "openai/gpt-4-turbo-preview",
5597
+ name: "OpenAI: GPT-4 Turbo Preview",
5598
5598
  api: "openai-completions",
5599
5599
  provider: "openrouter",
5600
5600
  baseUrl: "https://openrouter.ai/api/v1",
5601
5601
  reasoning: false,
5602
5602
  input: ["text"],
5603
5603
  cost: {
5604
- input: 1,
5605
- output: 2,
5604
+ input: 10,
5605
+ output: 30,
5606
5606
  cacheRead: 0,
5607
5607
  cacheWrite: 0,
5608
5608
  },
5609
- contextWindow: 4095,
5609
+ contextWindow: 128000,
5610
5610
  maxTokens: 4096,
5611
5611
  },
5612
- "openai/gpt-4-turbo-preview": {
5613
- id: "openai/gpt-4-turbo-preview",
5614
- name: "OpenAI: GPT-4 Turbo Preview",
5612
+ "openai/gpt-3.5-turbo-0613": {
5613
+ id: "openai/gpt-3.5-turbo-0613",
5614
+ name: "OpenAI: GPT-3.5 Turbo (older v0613)",
5615
5615
  api: "openai-completions",
5616
5616
  provider: "openrouter",
5617
5617
  baseUrl: "https://openrouter.ai/api/v1",
5618
5618
  reasoning: false,
5619
5619
  input: ["text"],
5620
5620
  cost: {
5621
- input: 10,
5622
- output: 30,
5621
+ input: 1,
5622
+ output: 2,
5623
5623
  cacheRead: 0,
5624
5624
  cacheWrite: 0,
5625
5625
  },
5626
- contextWindow: 128000,
5626
+ contextWindow: 4095,
5627
5627
  maxTokens: 4096,
5628
5628
  },
5629
5629
  "mistralai/mistral-tiny": {