@mariozechner/pi-ai 0.14.2 → 0.16.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/models.generated.js
CHANGED
|
@@ -2005,6 +2005,23 @@ export const MODELS = {
|
|
|
2005
2005
|
contextWindow: 256000,
|
|
2006
2006
|
maxTokens: 128000,
|
|
2007
2007
|
},
|
|
2008
|
+
"z-ai/glm-4.6v": {
|
|
2009
|
+
id: "z-ai/glm-4.6v",
|
|
2010
|
+
name: "Z.AI: GLM 4.6V",
|
|
2011
|
+
api: "openai-completions",
|
|
2012
|
+
provider: "openrouter",
|
|
2013
|
+
baseUrl: "https://openrouter.ai/api/v1",
|
|
2014
|
+
reasoning: true,
|
|
2015
|
+
input: ["text", "image"],
|
|
2016
|
+
cost: {
|
|
2017
|
+
input: 0.3,
|
|
2018
|
+
output: 0.8999999999999999,
|
|
2019
|
+
cacheRead: 0.049999999999999996,
|
|
2020
|
+
cacheWrite: 0,
|
|
2021
|
+
},
|
|
2022
|
+
contextWindow: 131072,
|
|
2023
|
+
maxTokens: 24000,
|
|
2024
|
+
},
|
|
2008
2025
|
"openai/gpt-5.1-codex-max": {
|
|
2009
2026
|
id: "openai/gpt-5.1-codex-max",
|
|
2010
2027
|
name: "OpenAI: GPT-5.1-Codex-Max",
|
|
@@ -4445,13 +4462,13 @@ export const MODELS = {
|
|
|
4445
4462
|
reasoning: true,
|
|
4446
4463
|
input: ["text"],
|
|
4447
4464
|
cost: {
|
|
4448
|
-
input: 0.
|
|
4449
|
-
output: 0.
|
|
4450
|
-
cacheRead: 0,
|
|
4465
|
+
input: 0.19999999999999998,
|
|
4466
|
+
output: 0.88,
|
|
4467
|
+
cacheRead: 0.106,
|
|
4451
4468
|
cacheWrite: 0,
|
|
4452
4469
|
},
|
|
4453
|
-
contextWindow:
|
|
4454
|
-
maxTokens:
|
|
4470
|
+
contextWindow: 163840,
|
|
4471
|
+
maxTokens: 4096,
|
|
4455
4472
|
},
|
|
4456
4473
|
"mistralai/mistral-small-3.1-24b-instruct:free": {
|
|
4457
4474
|
id: "mistralai/mistral-small-3.1-24b-instruct:free",
|
|
@@ -4496,8 +4513,8 @@ export const MODELS = {
|
|
|
4496
4513
|
reasoning: false,
|
|
4497
4514
|
input: ["text", "image"],
|
|
4498
4515
|
cost: {
|
|
4499
|
-
input: 0.
|
|
4500
|
-
output: 0.
|
|
4516
|
+
input: 0.049999999999999996,
|
|
4517
|
+
output: 0.22,
|
|
4501
4518
|
cacheRead: 0,
|
|
4502
4519
|
cacheWrite: 0,
|
|
4503
4520
|
},
|
|
@@ -5031,34 +5048,34 @@ export const MODELS = {
|
|
|
5031
5048
|
contextWindow: 200000,
|
|
5032
5049
|
maxTokens: 8192,
|
|
5033
5050
|
},
|
|
5034
|
-
"mistralai/ministral-
|
|
5035
|
-
id: "mistralai/ministral-
|
|
5036
|
-
name: "Mistral: Ministral
|
|
5051
|
+
"mistralai/ministral-3b": {
|
|
5052
|
+
id: "mistralai/ministral-3b",
|
|
5053
|
+
name: "Mistral: Ministral 3B",
|
|
5037
5054
|
api: "openai-completions",
|
|
5038
5055
|
provider: "openrouter",
|
|
5039
5056
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
5040
5057
|
reasoning: false,
|
|
5041
5058
|
input: ["text"],
|
|
5042
5059
|
cost: {
|
|
5043
|
-
input: 0.
|
|
5044
|
-
output: 0.
|
|
5060
|
+
input: 0.04,
|
|
5061
|
+
output: 0.04,
|
|
5045
5062
|
cacheRead: 0,
|
|
5046
5063
|
cacheWrite: 0,
|
|
5047
5064
|
},
|
|
5048
5065
|
contextWindow: 131072,
|
|
5049
5066
|
maxTokens: 4096,
|
|
5050
5067
|
},
|
|
5051
|
-
"mistralai/ministral-
|
|
5052
|
-
id: "mistralai/ministral-
|
|
5053
|
-
name: "Mistral: Ministral
|
|
5068
|
+
"mistralai/ministral-8b": {
|
|
5069
|
+
id: "mistralai/ministral-8b",
|
|
5070
|
+
name: "Mistral: Ministral 8B",
|
|
5054
5071
|
api: "openai-completions",
|
|
5055
5072
|
provider: "openrouter",
|
|
5056
5073
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
5057
5074
|
reasoning: false,
|
|
5058
5075
|
input: ["text"],
|
|
5059
5076
|
cost: {
|
|
5060
|
-
input: 0.
|
|
5061
|
-
output: 0.
|
|
5077
|
+
input: 0.09999999999999999,
|
|
5078
|
+
output: 0.09999999999999999,
|
|
5062
5079
|
cacheRead: 0,
|
|
5063
5080
|
cacheWrite: 0,
|
|
5064
5081
|
},
|
|
@@ -5252,38 +5269,38 @@ export const MODELS = {
|
|
|
5252
5269
|
contextWindow: 131072,
|
|
5253
5270
|
maxTokens: 16384,
|
|
5254
5271
|
},
|
|
5255
|
-
"meta-llama/llama-3.1-
|
|
5256
|
-
id: "meta-llama/llama-3.1-
|
|
5257
|
-
name: "Meta: Llama 3.1
|
|
5272
|
+
"meta-llama/llama-3.1-70b-instruct": {
|
|
5273
|
+
id: "meta-llama/llama-3.1-70b-instruct",
|
|
5274
|
+
name: "Meta: Llama 3.1 70B Instruct",
|
|
5258
5275
|
api: "openai-completions",
|
|
5259
5276
|
provider: "openrouter",
|
|
5260
5277
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
5261
5278
|
reasoning: false,
|
|
5262
5279
|
input: ["text"],
|
|
5263
5280
|
cost: {
|
|
5264
|
-
input:
|
|
5265
|
-
output:
|
|
5281
|
+
input: 0.39999999999999997,
|
|
5282
|
+
output: 0.39999999999999997,
|
|
5266
5283
|
cacheRead: 0,
|
|
5267
5284
|
cacheWrite: 0,
|
|
5268
5285
|
},
|
|
5269
|
-
contextWindow:
|
|
5286
|
+
contextWindow: 131072,
|
|
5270
5287
|
maxTokens: 4096,
|
|
5271
5288
|
},
|
|
5272
|
-
"meta-llama/llama-3.1-
|
|
5273
|
-
id: "meta-llama/llama-3.1-
|
|
5274
|
-
name: "Meta: Llama 3.1
|
|
5289
|
+
"meta-llama/llama-3.1-405b-instruct": {
|
|
5290
|
+
id: "meta-llama/llama-3.1-405b-instruct",
|
|
5291
|
+
name: "Meta: Llama 3.1 405B Instruct",
|
|
5275
5292
|
api: "openai-completions",
|
|
5276
5293
|
provider: "openrouter",
|
|
5277
5294
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
5278
5295
|
reasoning: false,
|
|
5279
5296
|
input: ["text"],
|
|
5280
5297
|
cost: {
|
|
5281
|
-
input:
|
|
5282
|
-
output:
|
|
5298
|
+
input: 3.5,
|
|
5299
|
+
output: 3.5,
|
|
5283
5300
|
cacheRead: 0,
|
|
5284
5301
|
cacheWrite: 0,
|
|
5285
5302
|
},
|
|
5286
|
-
contextWindow:
|
|
5303
|
+
contextWindow: 130815,
|
|
5287
5304
|
maxTokens: 4096,
|
|
5288
5305
|
},
|
|
5289
5306
|
"mistralai/mistral-nemo": {
|
|
@@ -5303,9 +5320,9 @@ export const MODELS = {
|
|
|
5303
5320
|
contextWindow: 131072,
|
|
5304
5321
|
maxTokens: 16384,
|
|
5305
5322
|
},
|
|
5306
|
-
"openai/gpt-4o-mini
|
|
5307
|
-
id: "openai/gpt-4o-mini
|
|
5308
|
-
name: "OpenAI: GPT-4o-mini
|
|
5323
|
+
"openai/gpt-4o-mini": {
|
|
5324
|
+
id: "openai/gpt-4o-mini",
|
|
5325
|
+
name: "OpenAI: GPT-4o-mini",
|
|
5309
5326
|
api: "openai-completions",
|
|
5310
5327
|
provider: "openrouter",
|
|
5311
5328
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
@@ -5320,9 +5337,9 @@ export const MODELS = {
|
|
|
5320
5337
|
contextWindow: 128000,
|
|
5321
5338
|
maxTokens: 16384,
|
|
5322
5339
|
},
|
|
5323
|
-
"openai/gpt-4o-mini": {
|
|
5324
|
-
id: "openai/gpt-4o-mini",
|
|
5325
|
-
name: "OpenAI: GPT-4o-mini",
|
|
5340
|
+
"openai/gpt-4o-mini-2024-07-18": {
|
|
5341
|
+
id: "openai/gpt-4o-mini-2024-07-18",
|
|
5342
|
+
name: "OpenAI: GPT-4o-mini (2024-07-18)",
|
|
5326
5343
|
api: "openai-completions",
|
|
5327
5344
|
provider: "openrouter",
|
|
5328
5345
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
@@ -5422,23 +5439,6 @@ export const MODELS = {
|
|
|
5422
5439
|
contextWindow: 128000,
|
|
5423
5440
|
maxTokens: 4096,
|
|
5424
5441
|
},
|
|
5425
|
-
"openai/gpt-4o-2024-05-13": {
|
|
5426
|
-
id: "openai/gpt-4o-2024-05-13",
|
|
5427
|
-
name: "OpenAI: GPT-4o (2024-05-13)",
|
|
5428
|
-
api: "openai-completions",
|
|
5429
|
-
provider: "openrouter",
|
|
5430
|
-
baseUrl: "https://openrouter.ai/api/v1",
|
|
5431
|
-
reasoning: false,
|
|
5432
|
-
input: ["text", "image"],
|
|
5433
|
-
cost: {
|
|
5434
|
-
input: 5,
|
|
5435
|
-
output: 15,
|
|
5436
|
-
cacheRead: 0,
|
|
5437
|
-
cacheWrite: 0,
|
|
5438
|
-
},
|
|
5439
|
-
contextWindow: 128000,
|
|
5440
|
-
maxTokens: 4096,
|
|
5441
|
-
},
|
|
5442
5442
|
"openai/gpt-4o": {
|
|
5443
5443
|
id: "openai/gpt-4o",
|
|
5444
5444
|
name: "OpenAI: GPT-4o",
|
|
@@ -5473,6 +5473,23 @@ export const MODELS = {
|
|
|
5473
5473
|
contextWindow: 128000,
|
|
5474
5474
|
maxTokens: 64000,
|
|
5475
5475
|
},
|
|
5476
|
+
"openai/gpt-4o-2024-05-13": {
|
|
5477
|
+
id: "openai/gpt-4o-2024-05-13",
|
|
5478
|
+
name: "OpenAI: GPT-4o (2024-05-13)",
|
|
5479
|
+
api: "openai-completions",
|
|
5480
|
+
provider: "openrouter",
|
|
5481
|
+
baseUrl: "https://openrouter.ai/api/v1",
|
|
5482
|
+
reasoning: false,
|
|
5483
|
+
input: ["text", "image"],
|
|
5484
|
+
cost: {
|
|
5485
|
+
input: 5,
|
|
5486
|
+
output: 15,
|
|
5487
|
+
cacheRead: 0,
|
|
5488
|
+
cacheWrite: 0,
|
|
5489
|
+
},
|
|
5490
|
+
contextWindow: 128000,
|
|
5491
|
+
maxTokens: 4096,
|
|
5492
|
+
},
|
|
5476
5493
|
"meta-llama/llama-3-70b-instruct": {
|
|
5477
5494
|
id: "meta-llama/llama-3-70b-instruct",
|
|
5478
5495
|
name: "Meta: Llama 3 70B Instruct",
|
|
@@ -5592,38 +5609,38 @@ export const MODELS = {
|
|
|
5592
5609
|
contextWindow: 128000,
|
|
5593
5610
|
maxTokens: 4096,
|
|
5594
5611
|
},
|
|
5595
|
-
"openai/gpt-
|
|
5596
|
-
id: "openai/gpt-
|
|
5597
|
-
name: "OpenAI: GPT-
|
|
5612
|
+
"openai/gpt-4-turbo-preview": {
|
|
5613
|
+
id: "openai/gpt-4-turbo-preview",
|
|
5614
|
+
name: "OpenAI: GPT-4 Turbo Preview",
|
|
5598
5615
|
api: "openai-completions",
|
|
5599
5616
|
provider: "openrouter",
|
|
5600
5617
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
5601
5618
|
reasoning: false,
|
|
5602
5619
|
input: ["text"],
|
|
5603
5620
|
cost: {
|
|
5604
|
-
input:
|
|
5605
|
-
output:
|
|
5621
|
+
input: 10,
|
|
5622
|
+
output: 30,
|
|
5606
5623
|
cacheRead: 0,
|
|
5607
5624
|
cacheWrite: 0,
|
|
5608
5625
|
},
|
|
5609
|
-
contextWindow:
|
|
5626
|
+
contextWindow: 128000,
|
|
5610
5627
|
maxTokens: 4096,
|
|
5611
5628
|
},
|
|
5612
|
-
"openai/gpt-
|
|
5613
|
-
id: "openai/gpt-
|
|
5614
|
-
name: "OpenAI: GPT-
|
|
5629
|
+
"openai/gpt-3.5-turbo-0613": {
|
|
5630
|
+
id: "openai/gpt-3.5-turbo-0613",
|
|
5631
|
+
name: "OpenAI: GPT-3.5 Turbo (older v0613)",
|
|
5615
5632
|
api: "openai-completions",
|
|
5616
5633
|
provider: "openrouter",
|
|
5617
5634
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
5618
5635
|
reasoning: false,
|
|
5619
5636
|
input: ["text"],
|
|
5620
5637
|
cost: {
|
|
5621
|
-
input:
|
|
5622
|
-
output:
|
|
5638
|
+
input: 1,
|
|
5639
|
+
output: 2,
|
|
5623
5640
|
cacheRead: 0,
|
|
5624
5641
|
cacheWrite: 0,
|
|
5625
5642
|
},
|
|
5626
|
-
contextWindow:
|
|
5643
|
+
contextWindow: 4095,
|
|
5627
5644
|
maxTokens: 4096,
|
|
5628
5645
|
},
|
|
5629
5646
|
"mistralai/mistral-tiny": {
|