@mariozechner/pi-ai 0.10.2 → 0.11.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/models.generated.js
CHANGED
|
@@ -2039,6 +2039,23 @@ export const MODELS = {
|
|
|
2039
2039
|
contextWindow: 200000,
|
|
2040
2040
|
maxTokens: 64000,
|
|
2041
2041
|
},
|
|
2042
|
+
"openrouter/bert-nebulon-alpha": {
|
|
2043
|
+
id: "openrouter/bert-nebulon-alpha",
|
|
2044
|
+
name: "Bert-Nebulon Alpha",
|
|
2045
|
+
api: "openai-completions",
|
|
2046
|
+
provider: "openrouter",
|
|
2047
|
+
baseUrl: "https://openrouter.ai/api/v1",
|
|
2048
|
+
reasoning: false,
|
|
2049
|
+
input: ["text", "image"],
|
|
2050
|
+
cost: {
|
|
2051
|
+
input: 0,
|
|
2052
|
+
output: 0,
|
|
2053
|
+
cacheRead: 0,
|
|
2054
|
+
cacheWrite: 0,
|
|
2055
|
+
},
|
|
2056
|
+
contextWindow: 256000,
|
|
2057
|
+
maxTokens: 4096,
|
|
2058
|
+
},
|
|
2042
2059
|
"allenai/olmo-3-7b-instruct": {
|
|
2043
2060
|
id: "allenai/olmo-3-7b-instruct",
|
|
2044
2061
|
name: "AllenAI: Olmo 3 7B Instruct",
|
|
@@ -2507,13 +2524,13 @@ export const MODELS = {
|
|
|
2507
2524
|
reasoning: true,
|
|
2508
2525
|
input: ["text"],
|
|
2509
2526
|
cost: {
|
|
2510
|
-
input: 0.
|
|
2511
|
-
output: 1.
|
|
2527
|
+
input: 0.44,
|
|
2528
|
+
output: 1.76,
|
|
2512
2529
|
cacheRead: 0,
|
|
2513
2530
|
cacheWrite: 0,
|
|
2514
2531
|
},
|
|
2515
|
-
contextWindow:
|
|
2516
|
-
maxTokens:
|
|
2532
|
+
contextWindow: 204800,
|
|
2533
|
+
maxTokens: 131072,
|
|
2517
2534
|
},
|
|
2518
2535
|
"anthropic/claude-sonnet-4.5": {
|
|
2519
2536
|
id: "anthropic/claude-sonnet-4.5",
|
|
@@ -3375,7 +3392,7 @@ export const MODELS = {
|
|
|
3375
3392
|
input: ["text"],
|
|
3376
3393
|
cost: {
|
|
3377
3394
|
input: 0.35,
|
|
3378
|
-
output: 1.
|
|
3395
|
+
output: 1.55,
|
|
3379
3396
|
cacheRead: 0,
|
|
3380
3397
|
cacheWrite: 0,
|
|
3381
3398
|
},
|
|
@@ -5014,34 +5031,34 @@ export const MODELS = {
|
|
|
5014
5031
|
contextWindow: 32768,
|
|
5015
5032
|
maxTokens: 4096,
|
|
5016
5033
|
},
|
|
5017
|
-
"cohere/command-r-
|
|
5018
|
-
id: "cohere/command-r-
|
|
5019
|
-
name: "Cohere: Command R
|
|
5034
|
+
"cohere/command-r-08-2024": {
|
|
5035
|
+
id: "cohere/command-r-08-2024",
|
|
5036
|
+
name: "Cohere: Command R (08-2024)",
|
|
5020
5037
|
api: "openai-completions",
|
|
5021
5038
|
provider: "openrouter",
|
|
5022
5039
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
5023
5040
|
reasoning: false,
|
|
5024
5041
|
input: ["text"],
|
|
5025
5042
|
cost: {
|
|
5026
|
-
input:
|
|
5027
|
-
output:
|
|
5043
|
+
input: 0.15,
|
|
5044
|
+
output: 0.6,
|
|
5028
5045
|
cacheRead: 0,
|
|
5029
5046
|
cacheWrite: 0,
|
|
5030
5047
|
},
|
|
5031
5048
|
contextWindow: 128000,
|
|
5032
5049
|
maxTokens: 4000,
|
|
5033
5050
|
},
|
|
5034
|
-
"cohere/command-r-08-2024": {
|
|
5035
|
-
id: "cohere/command-r-08-2024",
|
|
5036
|
-
name: "Cohere: Command R (08-2024)",
|
|
5051
|
+
"cohere/command-r-plus-08-2024": {
|
|
5052
|
+
id: "cohere/command-r-plus-08-2024",
|
|
5053
|
+
name: "Cohere: Command R+ (08-2024)",
|
|
5037
5054
|
api: "openai-completions",
|
|
5038
5055
|
provider: "openrouter",
|
|
5039
5056
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
5040
5057
|
reasoning: false,
|
|
5041
5058
|
input: ["text"],
|
|
5042
5059
|
cost: {
|
|
5043
|
-
input:
|
|
5044
|
-
output:
|
|
5060
|
+
input: 2.5,
|
|
5061
|
+
output: 10,
|
|
5045
5062
|
cacheRead: 0,
|
|
5046
5063
|
cacheWrite: 0,
|
|
5047
5064
|
},
|
|
@@ -5082,23 +5099,6 @@ export const MODELS = {
|
|
|
5082
5099
|
contextWindow: 128000,
|
|
5083
5100
|
maxTokens: 4096,
|
|
5084
5101
|
},
|
|
5085
|
-
"nousresearch/hermes-3-llama-3.1-70b": {
|
|
5086
|
-
id: "nousresearch/hermes-3-llama-3.1-70b",
|
|
5087
|
-
name: "Nous: Hermes 3 70B Instruct",
|
|
5088
|
-
api: "openai-completions",
|
|
5089
|
-
provider: "openrouter",
|
|
5090
|
-
baseUrl: "https://openrouter.ai/api/v1",
|
|
5091
|
-
reasoning: false,
|
|
5092
|
-
input: ["text"],
|
|
5093
|
-
cost: {
|
|
5094
|
-
input: 0.3,
|
|
5095
|
-
output: 0.3,
|
|
5096
|
-
cacheRead: 0,
|
|
5097
|
-
cacheWrite: 0,
|
|
5098
|
-
},
|
|
5099
|
-
contextWindow: 65536,
|
|
5100
|
-
maxTokens: 4096,
|
|
5101
|
-
},
|
|
5102
5102
|
"openai/gpt-4o-2024-08-06": {
|
|
5103
5103
|
id: "openai/gpt-4o-2024-08-06",
|
|
5104
5104
|
name: "OpenAI: GPT-4o (2024-08-06)",
|
|
@@ -5184,9 +5184,9 @@ export const MODELS = {
|
|
|
5184
5184
|
contextWindow: 131072,
|
|
5185
5185
|
maxTokens: 16384,
|
|
5186
5186
|
},
|
|
5187
|
-
"openai/gpt-4o-mini": {
|
|
5188
|
-
id: "openai/gpt-4o-mini",
|
|
5189
|
-
name: "OpenAI: GPT-4o-mini",
|
|
5187
|
+
"openai/gpt-4o-mini-2024-07-18": {
|
|
5188
|
+
id: "openai/gpt-4o-mini-2024-07-18",
|
|
5189
|
+
name: "OpenAI: GPT-4o-mini (2024-07-18)",
|
|
5190
5190
|
api: "openai-completions",
|
|
5191
5191
|
provider: "openrouter",
|
|
5192
5192
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
@@ -5201,9 +5201,9 @@ export const MODELS = {
|
|
|
5201
5201
|
contextWindow: 128000,
|
|
5202
5202
|
maxTokens: 16384,
|
|
5203
5203
|
},
|
|
5204
|
-
"openai/gpt-4o-mini
|
|
5205
|
-
id: "openai/gpt-4o-mini
|
|
5206
|
-
name: "OpenAI: GPT-4o-mini
|
|
5204
|
+
"openai/gpt-4o-mini": {
|
|
5205
|
+
id: "openai/gpt-4o-mini",
|
|
5206
|
+
name: "OpenAI: GPT-4o-mini",
|
|
5207
5207
|
api: "openai-completions",
|
|
5208
5208
|
provider: "openrouter",
|
|
5209
5209
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
@@ -5303,6 +5303,23 @@ export const MODELS = {
|
|
|
5303
5303
|
contextWindow: 128000,
|
|
5304
5304
|
maxTokens: 4096,
|
|
5305
5305
|
},
|
|
5306
|
+
"openai/gpt-4o-2024-05-13": {
|
|
5307
|
+
id: "openai/gpt-4o-2024-05-13",
|
|
5308
|
+
name: "OpenAI: GPT-4o (2024-05-13)",
|
|
5309
|
+
api: "openai-completions",
|
|
5310
|
+
provider: "openrouter",
|
|
5311
|
+
baseUrl: "https://openrouter.ai/api/v1",
|
|
5312
|
+
reasoning: false,
|
|
5313
|
+
input: ["text", "image"],
|
|
5314
|
+
cost: {
|
|
5315
|
+
input: 5,
|
|
5316
|
+
output: 15,
|
|
5317
|
+
cacheRead: 0,
|
|
5318
|
+
cacheWrite: 0,
|
|
5319
|
+
},
|
|
5320
|
+
contextWindow: 128000,
|
|
5321
|
+
maxTokens: 4096,
|
|
5322
|
+
},
|
|
5306
5323
|
"openai/gpt-4o": {
|
|
5307
5324
|
id: "openai/gpt-4o",
|
|
5308
5325
|
name: "OpenAI: GPT-4o",
|
|
@@ -5337,22 +5354,22 @@ export const MODELS = {
|
|
|
5337
5354
|
contextWindow: 128000,
|
|
5338
5355
|
maxTokens: 64000,
|
|
5339
5356
|
},
|
|
5340
|
-
"
|
|
5341
|
-
id: "
|
|
5342
|
-
name: "
|
|
5357
|
+
"meta-llama/llama-3-70b-instruct": {
|
|
5358
|
+
id: "meta-llama/llama-3-70b-instruct",
|
|
5359
|
+
name: "Meta: Llama 3 70B Instruct",
|
|
5343
5360
|
api: "openai-completions",
|
|
5344
5361
|
provider: "openrouter",
|
|
5345
5362
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
5346
5363
|
reasoning: false,
|
|
5347
|
-
input: ["text"
|
|
5364
|
+
input: ["text"],
|
|
5348
5365
|
cost: {
|
|
5349
|
-
input:
|
|
5350
|
-
output:
|
|
5366
|
+
input: 0.3,
|
|
5367
|
+
output: 0.39999999999999997,
|
|
5351
5368
|
cacheRead: 0,
|
|
5352
5369
|
cacheWrite: 0,
|
|
5353
5370
|
},
|
|
5354
|
-
contextWindow:
|
|
5355
|
-
maxTokens:
|
|
5371
|
+
contextWindow: 8192,
|
|
5372
|
+
maxTokens: 16384,
|
|
5356
5373
|
},
|
|
5357
5374
|
"meta-llama/llama-3-8b-instruct": {
|
|
5358
5375
|
id: "meta-llama/llama-3-8b-instruct",
|
|
@@ -5371,23 +5388,6 @@ export const MODELS = {
|
|
|
5371
5388
|
contextWindow: 8192,
|
|
5372
5389
|
maxTokens: 16384,
|
|
5373
5390
|
},
|
|
5374
|
-
"meta-llama/llama-3-70b-instruct": {
|
|
5375
|
-
id: "meta-llama/llama-3-70b-instruct",
|
|
5376
|
-
name: "Meta: Llama 3 70B Instruct",
|
|
5377
|
-
api: "openai-completions",
|
|
5378
|
-
provider: "openrouter",
|
|
5379
|
-
baseUrl: "https://openrouter.ai/api/v1",
|
|
5380
|
-
reasoning: false,
|
|
5381
|
-
input: ["text"],
|
|
5382
|
-
cost: {
|
|
5383
|
-
input: 0.3,
|
|
5384
|
-
output: 0.39999999999999997,
|
|
5385
|
-
cacheRead: 0,
|
|
5386
|
-
cacheWrite: 0,
|
|
5387
|
-
},
|
|
5388
|
-
contextWindow: 8192,
|
|
5389
|
-
maxTokens: 16384,
|
|
5390
|
-
},
|
|
5391
5391
|
"mistralai/mixtral-8x22b-instruct": {
|
|
5392
5392
|
id: "mistralai/mixtral-8x22b-instruct",
|
|
5393
5393
|
name: "Mistral: Mixtral 8x22B Instruct",
|
|
@@ -5473,38 +5473,38 @@ export const MODELS = {
|
|
|
5473
5473
|
contextWindow: 128000,
|
|
5474
5474
|
maxTokens: 4096,
|
|
5475
5475
|
},
|
|
5476
|
-
"openai/gpt-
|
|
5477
|
-
id: "openai/gpt-
|
|
5478
|
-
name: "OpenAI: GPT-
|
|
5476
|
+
"openai/gpt-3.5-turbo-0613": {
|
|
5477
|
+
id: "openai/gpt-3.5-turbo-0613",
|
|
5478
|
+
name: "OpenAI: GPT-3.5 Turbo (older v0613)",
|
|
5479
5479
|
api: "openai-completions",
|
|
5480
5480
|
provider: "openrouter",
|
|
5481
5481
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
5482
5482
|
reasoning: false,
|
|
5483
5483
|
input: ["text"],
|
|
5484
5484
|
cost: {
|
|
5485
|
-
input:
|
|
5486
|
-
output:
|
|
5485
|
+
input: 1,
|
|
5486
|
+
output: 2,
|
|
5487
5487
|
cacheRead: 0,
|
|
5488
5488
|
cacheWrite: 0,
|
|
5489
5489
|
},
|
|
5490
|
-
contextWindow:
|
|
5490
|
+
contextWindow: 4095,
|
|
5491
5491
|
maxTokens: 4096,
|
|
5492
5492
|
},
|
|
5493
|
-
"openai/gpt-
|
|
5494
|
-
id: "openai/gpt-
|
|
5495
|
-
name: "OpenAI: GPT-
|
|
5493
|
+
"openai/gpt-4-turbo-preview": {
|
|
5494
|
+
id: "openai/gpt-4-turbo-preview",
|
|
5495
|
+
name: "OpenAI: GPT-4 Turbo Preview",
|
|
5496
5496
|
api: "openai-completions",
|
|
5497
5497
|
provider: "openrouter",
|
|
5498
5498
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
5499
5499
|
reasoning: false,
|
|
5500
5500
|
input: ["text"],
|
|
5501
5501
|
cost: {
|
|
5502
|
-
input:
|
|
5503
|
-
output:
|
|
5502
|
+
input: 10,
|
|
5503
|
+
output: 30,
|
|
5504
5504
|
cacheRead: 0,
|
|
5505
5505
|
cacheWrite: 0,
|
|
5506
5506
|
},
|
|
5507
|
-
contextWindow:
|
|
5507
|
+
contextWindow: 128000,
|
|
5508
5508
|
maxTokens: 4096,
|
|
5509
5509
|
},
|
|
5510
5510
|
"mistralai/mistral-small": {
|
|
@@ -5609,38 +5609,38 @@ export const MODELS = {
|
|
|
5609
5609
|
contextWindow: 8191,
|
|
5610
5610
|
maxTokens: 4096,
|
|
5611
5611
|
},
|
|
5612
|
-
"openai/gpt-
|
|
5613
|
-
id: "openai/gpt-
|
|
5614
|
-
name: "OpenAI: GPT-
|
|
5612
|
+
"openai/gpt-4": {
|
|
5613
|
+
id: "openai/gpt-4",
|
|
5614
|
+
name: "OpenAI: GPT-4",
|
|
5615
5615
|
api: "openai-completions",
|
|
5616
5616
|
provider: "openrouter",
|
|
5617
5617
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
5618
5618
|
reasoning: false,
|
|
5619
5619
|
input: ["text"],
|
|
5620
5620
|
cost: {
|
|
5621
|
-
input:
|
|
5622
|
-
output:
|
|
5621
|
+
input: 30,
|
|
5622
|
+
output: 60,
|
|
5623
5623
|
cacheRead: 0,
|
|
5624
5624
|
cacheWrite: 0,
|
|
5625
5625
|
},
|
|
5626
|
-
contextWindow:
|
|
5626
|
+
contextWindow: 8191,
|
|
5627
5627
|
maxTokens: 4096,
|
|
5628
5628
|
},
|
|
5629
|
-
"openai/gpt-
|
|
5630
|
-
id: "openai/gpt-
|
|
5631
|
-
name: "OpenAI: GPT-
|
|
5629
|
+
"openai/gpt-3.5-turbo": {
|
|
5630
|
+
id: "openai/gpt-3.5-turbo",
|
|
5631
|
+
name: "OpenAI: GPT-3.5 Turbo",
|
|
5632
5632
|
api: "openai-completions",
|
|
5633
5633
|
provider: "openrouter",
|
|
5634
5634
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
5635
5635
|
reasoning: false,
|
|
5636
5636
|
input: ["text"],
|
|
5637
5637
|
cost: {
|
|
5638
|
-
input:
|
|
5639
|
-
output:
|
|
5638
|
+
input: 0.5,
|
|
5639
|
+
output: 1.5,
|
|
5640
5640
|
cacheRead: 0,
|
|
5641
5641
|
cacheWrite: 0,
|
|
5642
5642
|
},
|
|
5643
|
-
contextWindow:
|
|
5643
|
+
contextWindow: 16385,
|
|
5644
5644
|
maxTokens: 4096,
|
|
5645
5645
|
},
|
|
5646
5646
|
"openrouter/auto": {
|