@mariozechner/pi-ai 0.10.0 → 0.10.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/models.generated.js
CHANGED
|
@@ -1954,6 +1954,23 @@ export const MODELS = {
|
|
|
1954
1954
|
},
|
|
1955
1955
|
},
|
|
1956
1956
|
openrouter: {
|
|
1957
|
+
"prime-intellect/intellect-3": {
|
|
1958
|
+
id: "prime-intellect/intellect-3",
|
|
1959
|
+
name: "Prime Intellect: INTELLECT-3",
|
|
1960
|
+
api: "openai-completions",
|
|
1961
|
+
provider: "openrouter",
|
|
1962
|
+
baseUrl: "https://openrouter.ai/api/v1",
|
|
1963
|
+
reasoning: true,
|
|
1964
|
+
input: ["text"],
|
|
1965
|
+
cost: {
|
|
1966
|
+
input: 0.19999999999999998,
|
|
1967
|
+
output: 1.1,
|
|
1968
|
+
cacheRead: 0,
|
|
1969
|
+
cacheWrite: 0,
|
|
1970
|
+
},
|
|
1971
|
+
contextWindow: 131072,
|
|
1972
|
+
maxTokens: 131072,
|
|
1973
|
+
},
|
|
1957
1974
|
"tngtech/tng-r1t-chimera:free": {
|
|
1958
1975
|
id: "tngtech/tng-r1t-chimera:free",
|
|
1959
1976
|
name: "TNG: R1T Chimera (free)",
|
|
@@ -2235,8 +2252,8 @@ export const MODELS = {
|
|
|
2235
2252
|
reasoning: true,
|
|
2236
2253
|
input: ["text"],
|
|
2237
2254
|
cost: {
|
|
2238
|
-
input: 0.
|
|
2239
|
-
output:
|
|
2255
|
+
input: 0.255,
|
|
2256
|
+
output: 1.02,
|
|
2240
2257
|
cacheRead: 0,
|
|
2241
2258
|
cacheWrite: 0,
|
|
2242
2259
|
},
|
|
@@ -4980,34 +4997,34 @@ export const MODELS = {
|
|
|
4980
4997
|
contextWindow: 32768,
|
|
4981
4998
|
maxTokens: 4096,
|
|
4982
4999
|
},
|
|
4983
|
-
"cohere/command-r-08-2024": {
|
|
4984
|
-
id: "cohere/command-r-08-2024",
|
|
4985
|
-
name: "Cohere: Command R (08-2024)",
|
|
5000
|
+
"cohere/command-r-plus-08-2024": {
|
|
5001
|
+
id: "cohere/command-r-plus-08-2024",
|
|
5002
|
+
name: "Cohere: Command R+ (08-2024)",
|
|
4986
5003
|
api: "openai-completions",
|
|
4987
5004
|
provider: "openrouter",
|
|
4988
5005
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
4989
5006
|
reasoning: false,
|
|
4990
5007
|
input: ["text"],
|
|
4991
5008
|
cost: {
|
|
4992
|
-
input:
|
|
4993
|
-
output:
|
|
5009
|
+
input: 2.5,
|
|
5010
|
+
output: 10,
|
|
4994
5011
|
cacheRead: 0,
|
|
4995
5012
|
cacheWrite: 0,
|
|
4996
5013
|
},
|
|
4997
5014
|
contextWindow: 128000,
|
|
4998
5015
|
maxTokens: 4000,
|
|
4999
5016
|
},
|
|
5000
|
-
"cohere/command-r-
|
|
5001
|
-
id: "cohere/command-r-
|
|
5002
|
-
name: "Cohere: Command R
|
|
5017
|
+
"cohere/command-r-08-2024": {
|
|
5018
|
+
id: "cohere/command-r-08-2024",
|
|
5019
|
+
name: "Cohere: Command R (08-2024)",
|
|
5003
5020
|
api: "openai-completions",
|
|
5004
5021
|
provider: "openrouter",
|
|
5005
5022
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
5006
5023
|
reasoning: false,
|
|
5007
5024
|
input: ["text"],
|
|
5008
5025
|
cost: {
|
|
5009
|
-
input:
|
|
5010
|
-
output:
|
|
5026
|
+
input: 0.15,
|
|
5027
|
+
output: 0.6,
|
|
5011
5028
|
cacheRead: 0,
|
|
5012
5029
|
cacheWrite: 0,
|
|
5013
5030
|
},
|
|
@@ -5150,9 +5167,9 @@ export const MODELS = {
|
|
|
5150
5167
|
contextWindow: 131072,
|
|
5151
5168
|
maxTokens: 16384,
|
|
5152
5169
|
},
|
|
5153
|
-
"openai/gpt-4o-mini
|
|
5154
|
-
id: "openai/gpt-4o-mini
|
|
5155
|
-
name: "OpenAI: GPT-4o-mini
|
|
5170
|
+
"openai/gpt-4o-mini": {
|
|
5171
|
+
id: "openai/gpt-4o-mini",
|
|
5172
|
+
name: "OpenAI: GPT-4o-mini",
|
|
5156
5173
|
api: "openai-completions",
|
|
5157
5174
|
provider: "openrouter",
|
|
5158
5175
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
@@ -5167,9 +5184,9 @@ export const MODELS = {
|
|
|
5167
5184
|
contextWindow: 128000,
|
|
5168
5185
|
maxTokens: 16384,
|
|
5169
5186
|
},
|
|
5170
|
-
"openai/gpt-4o-mini": {
|
|
5171
|
-
id: "openai/gpt-4o-mini",
|
|
5172
|
-
name: "OpenAI: GPT-4o-mini",
|
|
5187
|
+
"openai/gpt-4o-mini-2024-07-18": {
|
|
5188
|
+
id: "openai/gpt-4o-mini-2024-07-18",
|
|
5189
|
+
name: "OpenAI: GPT-4o-mini (2024-07-18)",
|
|
5173
5190
|
api: "openai-completions",
|
|
5174
5191
|
provider: "openrouter",
|
|
5175
5192
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
@@ -5269,23 +5286,6 @@ export const MODELS = {
|
|
|
5269
5286
|
contextWindow: 128000,
|
|
5270
5287
|
maxTokens: 4096,
|
|
5271
5288
|
},
|
|
5272
|
-
"openai/gpt-4o-2024-05-13": {
|
|
5273
|
-
id: "openai/gpt-4o-2024-05-13",
|
|
5274
|
-
name: "OpenAI: GPT-4o (2024-05-13)",
|
|
5275
|
-
api: "openai-completions",
|
|
5276
|
-
provider: "openrouter",
|
|
5277
|
-
baseUrl: "https://openrouter.ai/api/v1",
|
|
5278
|
-
reasoning: false,
|
|
5279
|
-
input: ["text", "image"],
|
|
5280
|
-
cost: {
|
|
5281
|
-
input: 5,
|
|
5282
|
-
output: 15,
|
|
5283
|
-
cacheRead: 0,
|
|
5284
|
-
cacheWrite: 0,
|
|
5285
|
-
},
|
|
5286
|
-
contextWindow: 128000,
|
|
5287
|
-
maxTokens: 4096,
|
|
5288
|
-
},
|
|
5289
5289
|
"openai/gpt-4o": {
|
|
5290
5290
|
id: "openai/gpt-4o",
|
|
5291
5291
|
name: "OpenAI: GPT-4o",
|
|
@@ -5320,22 +5320,22 @@ export const MODELS = {
|
|
|
5320
5320
|
contextWindow: 128000,
|
|
5321
5321
|
maxTokens: 64000,
|
|
5322
5322
|
},
|
|
5323
|
-
"
|
|
5324
|
-
id: "
|
|
5325
|
-
name: "
|
|
5323
|
+
"openai/gpt-4o-2024-05-13": {
|
|
5324
|
+
id: "openai/gpt-4o-2024-05-13",
|
|
5325
|
+
name: "OpenAI: GPT-4o (2024-05-13)",
|
|
5326
5326
|
api: "openai-completions",
|
|
5327
5327
|
provider: "openrouter",
|
|
5328
5328
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
5329
5329
|
reasoning: false,
|
|
5330
|
-
input: ["text"],
|
|
5330
|
+
input: ["text", "image"],
|
|
5331
5331
|
cost: {
|
|
5332
|
-
input:
|
|
5333
|
-
output:
|
|
5332
|
+
input: 5,
|
|
5333
|
+
output: 15,
|
|
5334
5334
|
cacheRead: 0,
|
|
5335
5335
|
cacheWrite: 0,
|
|
5336
5336
|
},
|
|
5337
|
-
contextWindow:
|
|
5338
|
-
maxTokens:
|
|
5337
|
+
contextWindow: 128000,
|
|
5338
|
+
maxTokens: 4096,
|
|
5339
5339
|
},
|
|
5340
5340
|
"meta-llama/llama-3-8b-instruct": {
|
|
5341
5341
|
id: "meta-llama/llama-3-8b-instruct",
|
|
@@ -5354,6 +5354,23 @@ export const MODELS = {
|
|
|
5354
5354
|
contextWindow: 8192,
|
|
5355
5355
|
maxTokens: 16384,
|
|
5356
5356
|
},
|
|
5357
|
+
"meta-llama/llama-3-70b-instruct": {
|
|
5358
|
+
id: "meta-llama/llama-3-70b-instruct",
|
|
5359
|
+
name: "Meta: Llama 3 70B Instruct",
|
|
5360
|
+
api: "openai-completions",
|
|
5361
|
+
provider: "openrouter",
|
|
5362
|
+
baseUrl: "https://openrouter.ai/api/v1",
|
|
5363
|
+
reasoning: false,
|
|
5364
|
+
input: ["text"],
|
|
5365
|
+
cost: {
|
|
5366
|
+
input: 0.3,
|
|
5367
|
+
output: 0.39999999999999997,
|
|
5368
|
+
cacheRead: 0,
|
|
5369
|
+
cacheWrite: 0,
|
|
5370
|
+
},
|
|
5371
|
+
contextWindow: 8192,
|
|
5372
|
+
maxTokens: 16384,
|
|
5373
|
+
},
|
|
5357
5374
|
"mistralai/mixtral-8x22b-instruct": {
|
|
5358
5375
|
id: "mistralai/mixtral-8x22b-instruct",
|
|
5359
5376
|
name: "Mistral: Mixtral 8x22B Instruct",
|
|
@@ -5439,38 +5456,38 @@ export const MODELS = {
|
|
|
5439
5456
|
contextWindow: 128000,
|
|
5440
5457
|
maxTokens: 4096,
|
|
5441
5458
|
},
|
|
5442
|
-
"openai/gpt-
|
|
5443
|
-
id: "openai/gpt-
|
|
5444
|
-
name: "OpenAI: GPT-
|
|
5459
|
+
"openai/gpt-4-turbo-preview": {
|
|
5460
|
+
id: "openai/gpt-4-turbo-preview",
|
|
5461
|
+
name: "OpenAI: GPT-4 Turbo Preview",
|
|
5445
5462
|
api: "openai-completions",
|
|
5446
5463
|
provider: "openrouter",
|
|
5447
5464
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
5448
5465
|
reasoning: false,
|
|
5449
5466
|
input: ["text"],
|
|
5450
5467
|
cost: {
|
|
5451
|
-
input:
|
|
5452
|
-
output:
|
|
5468
|
+
input: 10,
|
|
5469
|
+
output: 30,
|
|
5453
5470
|
cacheRead: 0,
|
|
5454
5471
|
cacheWrite: 0,
|
|
5455
5472
|
},
|
|
5456
|
-
contextWindow:
|
|
5473
|
+
contextWindow: 128000,
|
|
5457
5474
|
maxTokens: 4096,
|
|
5458
5475
|
},
|
|
5459
|
-
"openai/gpt-
|
|
5460
|
-
id: "openai/gpt-
|
|
5461
|
-
name: "OpenAI: GPT-
|
|
5476
|
+
"openai/gpt-3.5-turbo-0613": {
|
|
5477
|
+
id: "openai/gpt-3.5-turbo-0613",
|
|
5478
|
+
name: "OpenAI: GPT-3.5 Turbo (older v0613)",
|
|
5462
5479
|
api: "openai-completions",
|
|
5463
5480
|
provider: "openrouter",
|
|
5464
5481
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
5465
5482
|
reasoning: false,
|
|
5466
5483
|
input: ["text"],
|
|
5467
5484
|
cost: {
|
|
5468
|
-
input:
|
|
5469
|
-
output:
|
|
5485
|
+
input: 1,
|
|
5486
|
+
output: 2,
|
|
5470
5487
|
cacheRead: 0,
|
|
5471
5488
|
cacheWrite: 0,
|
|
5472
5489
|
},
|
|
5473
|
-
contextWindow:
|
|
5490
|
+
contextWindow: 4095,
|
|
5474
5491
|
maxTokens: 4096,
|
|
5475
5492
|
},
|
|
5476
5493
|
"mistralai/mistral-small": {
|
|
@@ -5575,38 +5592,38 @@ export const MODELS = {
|
|
|
5575
5592
|
contextWindow: 8191,
|
|
5576
5593
|
maxTokens: 4096,
|
|
5577
5594
|
},
|
|
5578
|
-
"openai/gpt-
|
|
5579
|
-
id: "openai/gpt-
|
|
5580
|
-
name: "OpenAI: GPT-
|
|
5595
|
+
"openai/gpt-3.5-turbo": {
|
|
5596
|
+
id: "openai/gpt-3.5-turbo",
|
|
5597
|
+
name: "OpenAI: GPT-3.5 Turbo",
|
|
5581
5598
|
api: "openai-completions",
|
|
5582
5599
|
provider: "openrouter",
|
|
5583
5600
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
5584
5601
|
reasoning: false,
|
|
5585
5602
|
input: ["text"],
|
|
5586
5603
|
cost: {
|
|
5587
|
-
input:
|
|
5588
|
-
output:
|
|
5604
|
+
input: 0.5,
|
|
5605
|
+
output: 1.5,
|
|
5589
5606
|
cacheRead: 0,
|
|
5590
5607
|
cacheWrite: 0,
|
|
5591
5608
|
},
|
|
5592
|
-
contextWindow:
|
|
5609
|
+
contextWindow: 16385,
|
|
5593
5610
|
maxTokens: 4096,
|
|
5594
5611
|
},
|
|
5595
|
-
"openai/gpt-
|
|
5596
|
-
id: "openai/gpt-
|
|
5597
|
-
name: "OpenAI: GPT-
|
|
5612
|
+
"openai/gpt-4": {
|
|
5613
|
+
id: "openai/gpt-4",
|
|
5614
|
+
name: "OpenAI: GPT-4",
|
|
5598
5615
|
api: "openai-completions",
|
|
5599
5616
|
provider: "openrouter",
|
|
5600
5617
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
5601
5618
|
reasoning: false,
|
|
5602
5619
|
input: ["text"],
|
|
5603
5620
|
cost: {
|
|
5604
|
-
input:
|
|
5605
|
-
output:
|
|
5621
|
+
input: 30,
|
|
5622
|
+
output: 60,
|
|
5606
5623
|
cacheRead: 0,
|
|
5607
5624
|
cacheWrite: 0,
|
|
5608
5625
|
},
|
|
5609
|
-
contextWindow:
|
|
5626
|
+
contextWindow: 8191,
|
|
5610
5627
|
maxTokens: 4096,
|
|
5611
5628
|
},
|
|
5612
5629
|
"openrouter/auto": {
|