@mariozechner/pi-ai 0.11.4 → 0.11.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/models.generated.js
CHANGED
|
@@ -1971,6 +1971,23 @@ export const MODELS = {
|
|
|
1971
1971
|
},
|
|
1972
1972
|
},
|
|
1973
1973
|
openrouter: {
|
|
1974
|
+
"deepseek/deepseek-v3.2": {
|
|
1975
|
+
id: "deepseek/deepseek-v3.2",
|
|
1976
|
+
name: "DeepSeek: DeepSeek V3.2",
|
|
1977
|
+
api: "openai-completions",
|
|
1978
|
+
provider: "openrouter",
|
|
1979
|
+
baseUrl: "https://openrouter.ai/api/v1",
|
|
1980
|
+
reasoning: true,
|
|
1981
|
+
input: ["text"],
|
|
1982
|
+
cost: {
|
|
1983
|
+
input: 0.28,
|
|
1984
|
+
output: 0.42,
|
|
1985
|
+
cacheRead: 0.028,
|
|
1986
|
+
cacheWrite: 0,
|
|
1987
|
+
},
|
|
1988
|
+
contextWindow: 131072,
|
|
1989
|
+
maxTokens: 64000,
|
|
1990
|
+
},
|
|
1974
1991
|
"prime-intellect/intellect-3": {
|
|
1975
1992
|
id: "prime-intellect/intellect-3",
|
|
1976
1993
|
name: "Prime Intellect: INTELLECT-3",
|
|
@@ -5116,6 +5133,23 @@ export const MODELS = {
|
|
|
5116
5133
|
contextWindow: 128000,
|
|
5117
5134
|
maxTokens: 16384,
|
|
5118
5135
|
},
|
|
5136
|
+
"meta-llama/llama-3.1-70b-instruct": {
|
|
5137
|
+
id: "meta-llama/llama-3.1-70b-instruct",
|
|
5138
|
+
name: "Meta: Llama 3.1 70B Instruct",
|
|
5139
|
+
api: "openai-completions",
|
|
5140
|
+
provider: "openrouter",
|
|
5141
|
+
baseUrl: "https://openrouter.ai/api/v1",
|
|
5142
|
+
reasoning: false,
|
|
5143
|
+
input: ["text"],
|
|
5144
|
+
cost: {
|
|
5145
|
+
input: 0.39999999999999997,
|
|
5146
|
+
output: 0.39999999999999997,
|
|
5147
|
+
cacheRead: 0,
|
|
5148
|
+
cacheWrite: 0,
|
|
5149
|
+
},
|
|
5150
|
+
contextWindow: 131072,
|
|
5151
|
+
maxTokens: 4096,
|
|
5152
|
+
},
|
|
5119
5153
|
"meta-llama/llama-3.1-8b-instruct": {
|
|
5120
5154
|
id: "meta-llama/llama-3.1-8b-instruct",
|
|
5121
5155
|
name: "Meta: Llama 3.1 8B Instruct",
|
|
@@ -5150,23 +5184,6 @@ export const MODELS = {
|
|
|
5150
5184
|
contextWindow: 130815,
|
|
5151
5185
|
maxTokens: 4096,
|
|
5152
5186
|
},
|
|
5153
|
-
"meta-llama/llama-3.1-70b-instruct": {
|
|
5154
|
-
id: "meta-llama/llama-3.1-70b-instruct",
|
|
5155
|
-
name: "Meta: Llama 3.1 70B Instruct",
|
|
5156
|
-
api: "openai-completions",
|
|
5157
|
-
provider: "openrouter",
|
|
5158
|
-
baseUrl: "https://openrouter.ai/api/v1",
|
|
5159
|
-
reasoning: false,
|
|
5160
|
-
input: ["text"],
|
|
5161
|
-
cost: {
|
|
5162
|
-
input: 0.39999999999999997,
|
|
5163
|
-
output: 0.39999999999999997,
|
|
5164
|
-
cacheRead: 0,
|
|
5165
|
-
cacheWrite: 0,
|
|
5166
|
-
},
|
|
5167
|
-
contextWindow: 131072,
|
|
5168
|
-
maxTokens: 4096,
|
|
5169
|
-
},
|
|
5170
5187
|
"mistralai/mistral-nemo": {
|
|
5171
5188
|
id: "mistralai/mistral-nemo",
|
|
5172
5189
|
name: "Mistral: Mistral Nemo",
|
|
@@ -5303,6 +5320,23 @@ export const MODELS = {
|
|
|
5303
5320
|
contextWindow: 128000,
|
|
5304
5321
|
maxTokens: 4096,
|
|
5305
5322
|
},
|
|
5323
|
+
"openai/gpt-4o-2024-05-13": {
|
|
5324
|
+
id: "openai/gpt-4o-2024-05-13",
|
|
5325
|
+
name: "OpenAI: GPT-4o (2024-05-13)",
|
|
5326
|
+
api: "openai-completions",
|
|
5327
|
+
provider: "openrouter",
|
|
5328
|
+
baseUrl: "https://openrouter.ai/api/v1",
|
|
5329
|
+
reasoning: false,
|
|
5330
|
+
input: ["text", "image"],
|
|
5331
|
+
cost: {
|
|
5332
|
+
input: 5,
|
|
5333
|
+
output: 15,
|
|
5334
|
+
cacheRead: 0,
|
|
5335
|
+
cacheWrite: 0,
|
|
5336
|
+
},
|
|
5337
|
+
contextWindow: 128000,
|
|
5338
|
+
maxTokens: 4096,
|
|
5339
|
+
},
|
|
5306
5340
|
"openai/gpt-4o": {
|
|
5307
5341
|
id: "openai/gpt-4o",
|
|
5308
5342
|
name: "OpenAI: GPT-4o",
|
|
@@ -5337,22 +5371,22 @@ export const MODELS = {
|
|
|
5337
5371
|
contextWindow: 128000,
|
|
5338
5372
|
maxTokens: 64000,
|
|
5339
5373
|
},
|
|
5340
|
-
"
|
|
5341
|
-
id: "
|
|
5342
|
-
name: "
|
|
5374
|
+
"meta-llama/llama-3-70b-instruct": {
|
|
5375
|
+
id: "meta-llama/llama-3-70b-instruct",
|
|
5376
|
+
name: "Meta: Llama 3 70B Instruct",
|
|
5343
5377
|
api: "openai-completions",
|
|
5344
5378
|
provider: "openrouter",
|
|
5345
5379
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
5346
5380
|
reasoning: false,
|
|
5347
|
-
input: ["text"
|
|
5381
|
+
input: ["text"],
|
|
5348
5382
|
cost: {
|
|
5349
|
-
input:
|
|
5350
|
-
output:
|
|
5383
|
+
input: 0.3,
|
|
5384
|
+
output: 0.39999999999999997,
|
|
5351
5385
|
cacheRead: 0,
|
|
5352
5386
|
cacheWrite: 0,
|
|
5353
5387
|
},
|
|
5354
|
-
contextWindow:
|
|
5355
|
-
maxTokens:
|
|
5388
|
+
contextWindow: 8192,
|
|
5389
|
+
maxTokens: 16384,
|
|
5356
5390
|
},
|
|
5357
5391
|
"meta-llama/llama-3-8b-instruct": {
|
|
5358
5392
|
id: "meta-llama/llama-3-8b-instruct",
|
|
@@ -5371,23 +5405,6 @@ export const MODELS = {
|
|
|
5371
5405
|
contextWindow: 8192,
|
|
5372
5406
|
maxTokens: 16384,
|
|
5373
5407
|
},
|
|
5374
|
-
"meta-llama/llama-3-70b-instruct": {
|
|
5375
|
-
id: "meta-llama/llama-3-70b-instruct",
|
|
5376
|
-
name: "Meta: Llama 3 70B Instruct",
|
|
5377
|
-
api: "openai-completions",
|
|
5378
|
-
provider: "openrouter",
|
|
5379
|
-
baseUrl: "https://openrouter.ai/api/v1",
|
|
5380
|
-
reasoning: false,
|
|
5381
|
-
input: ["text"],
|
|
5382
|
-
cost: {
|
|
5383
|
-
input: 0.3,
|
|
5384
|
-
output: 0.39999999999999997,
|
|
5385
|
-
cacheRead: 0,
|
|
5386
|
-
cacheWrite: 0,
|
|
5387
|
-
},
|
|
5388
|
-
contextWindow: 8192,
|
|
5389
|
-
maxTokens: 16384,
|
|
5390
|
-
},
|
|
5391
5408
|
"mistralai/mixtral-8x22b-instruct": {
|
|
5392
5409
|
id: "mistralai/mixtral-8x22b-instruct",
|
|
5393
5410
|
name: "Mistral: Mixtral 8x22B Instruct",
|
|
@@ -5473,38 +5490,38 @@ export const MODELS = {
|
|
|
5473
5490
|
contextWindow: 128000,
|
|
5474
5491
|
maxTokens: 4096,
|
|
5475
5492
|
},
|
|
5476
|
-
"openai/gpt-
|
|
5477
|
-
id: "openai/gpt-
|
|
5478
|
-
name: "OpenAI: GPT-
|
|
5493
|
+
"openai/gpt-3.5-turbo-0613": {
|
|
5494
|
+
id: "openai/gpt-3.5-turbo-0613",
|
|
5495
|
+
name: "OpenAI: GPT-3.5 Turbo (older v0613)",
|
|
5479
5496
|
api: "openai-completions",
|
|
5480
5497
|
provider: "openrouter",
|
|
5481
5498
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
5482
5499
|
reasoning: false,
|
|
5483
5500
|
input: ["text"],
|
|
5484
5501
|
cost: {
|
|
5485
|
-
input:
|
|
5486
|
-
output:
|
|
5502
|
+
input: 1,
|
|
5503
|
+
output: 2,
|
|
5487
5504
|
cacheRead: 0,
|
|
5488
5505
|
cacheWrite: 0,
|
|
5489
5506
|
},
|
|
5490
|
-
contextWindow:
|
|
5507
|
+
contextWindow: 4095,
|
|
5491
5508
|
maxTokens: 4096,
|
|
5492
5509
|
},
|
|
5493
|
-
"openai/gpt-
|
|
5494
|
-
id: "openai/gpt-
|
|
5495
|
-
name: "OpenAI: GPT-
|
|
5510
|
+
"openai/gpt-4-turbo-preview": {
|
|
5511
|
+
id: "openai/gpt-4-turbo-preview",
|
|
5512
|
+
name: "OpenAI: GPT-4 Turbo Preview",
|
|
5496
5513
|
api: "openai-completions",
|
|
5497
5514
|
provider: "openrouter",
|
|
5498
5515
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
5499
5516
|
reasoning: false,
|
|
5500
5517
|
input: ["text"],
|
|
5501
5518
|
cost: {
|
|
5502
|
-
input:
|
|
5503
|
-
output:
|
|
5519
|
+
input: 10,
|
|
5520
|
+
output: 30,
|
|
5504
5521
|
cacheRead: 0,
|
|
5505
5522
|
cacheWrite: 0,
|
|
5506
5523
|
},
|
|
5507
|
-
contextWindow:
|
|
5524
|
+
contextWindow: 128000,
|
|
5508
5525
|
maxTokens: 4096,
|
|
5509
5526
|
},
|
|
5510
5527
|
"mistralai/mistral-small": {
|
|
@@ -5592,38 +5609,38 @@ export const MODELS = {
|
|
|
5592
5609
|
contextWindow: 16385,
|
|
5593
5610
|
maxTokens: 4096,
|
|
5594
5611
|
},
|
|
5595
|
-
"openai/gpt-
|
|
5596
|
-
id: "openai/gpt-
|
|
5597
|
-
name: "OpenAI: GPT-
|
|
5612
|
+
"openai/gpt-3.5-turbo": {
|
|
5613
|
+
id: "openai/gpt-3.5-turbo",
|
|
5614
|
+
name: "OpenAI: GPT-3.5 Turbo",
|
|
5598
5615
|
api: "openai-completions",
|
|
5599
5616
|
provider: "openrouter",
|
|
5600
5617
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
5601
5618
|
reasoning: false,
|
|
5602
5619
|
input: ["text"],
|
|
5603
5620
|
cost: {
|
|
5604
|
-
input:
|
|
5605
|
-
output:
|
|
5621
|
+
input: 0.5,
|
|
5622
|
+
output: 1.5,
|
|
5606
5623
|
cacheRead: 0,
|
|
5607
5624
|
cacheWrite: 0,
|
|
5608
5625
|
},
|
|
5609
|
-
contextWindow:
|
|
5626
|
+
contextWindow: 16385,
|
|
5610
5627
|
maxTokens: 4096,
|
|
5611
5628
|
},
|
|
5612
|
-
"openai/gpt-
|
|
5613
|
-
id: "openai/gpt-
|
|
5614
|
-
name: "OpenAI: GPT-
|
|
5629
|
+
"openai/gpt-4-0314": {
|
|
5630
|
+
id: "openai/gpt-4-0314",
|
|
5631
|
+
name: "OpenAI: GPT-4 (older v0314)",
|
|
5615
5632
|
api: "openai-completions",
|
|
5616
5633
|
provider: "openrouter",
|
|
5617
5634
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
5618
5635
|
reasoning: false,
|
|
5619
5636
|
input: ["text"],
|
|
5620
5637
|
cost: {
|
|
5621
|
-
input:
|
|
5622
|
-
output:
|
|
5638
|
+
input: 30,
|
|
5639
|
+
output: 60,
|
|
5623
5640
|
cacheRead: 0,
|
|
5624
5641
|
cacheWrite: 0,
|
|
5625
5642
|
},
|
|
5626
|
-
contextWindow:
|
|
5643
|
+
contextWindow: 8191,
|
|
5627
5644
|
maxTokens: 4096,
|
|
5628
5645
|
},
|
|
5629
5646
|
"openai/gpt-4": {
|