@mariozechner/pi-ai 0.29.0 → 0.30.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/models.generated.js
CHANGED
|
@@ -6356,23 +6356,6 @@ export const MODELS = {
|
|
|
6356
6356
|
contextWindow: 128000,
|
|
6357
6357
|
maxTokens: 16384,
|
|
6358
6358
|
},
|
|
6359
|
-
"meta-llama/llama-3.1-70b-instruct": {
|
|
6360
|
-
id: "meta-llama/llama-3.1-70b-instruct",
|
|
6361
|
-
name: "Meta: Llama 3.1 70B Instruct",
|
|
6362
|
-
api: "openai-completions",
|
|
6363
|
-
provider: "openrouter",
|
|
6364
|
-
baseUrl: "https://openrouter.ai/api/v1",
|
|
6365
|
-
reasoning: false,
|
|
6366
|
-
input: ["text"],
|
|
6367
|
-
cost: {
|
|
6368
|
-
input: 0.39999999999999997,
|
|
6369
|
-
output: 0.39999999999999997,
|
|
6370
|
-
cacheRead: 0,
|
|
6371
|
-
cacheWrite: 0,
|
|
6372
|
-
},
|
|
6373
|
-
contextWindow: 131072,
|
|
6374
|
-
maxTokens: 4096,
|
|
6375
|
-
},
|
|
6376
6359
|
"meta-llama/llama-3.1-8b-instruct": {
|
|
6377
6360
|
id: "meta-llama/llama-3.1-8b-instruct",
|
|
6378
6361
|
name: "Meta: Llama 3.1 8B Instruct",
|
|
@@ -6407,6 +6390,23 @@ export const MODELS = {
|
|
|
6407
6390
|
contextWindow: 10000,
|
|
6408
6391
|
maxTokens: 4096,
|
|
6409
6392
|
},
|
|
6393
|
+
"meta-llama/llama-3.1-70b-instruct": {
|
|
6394
|
+
id: "meta-llama/llama-3.1-70b-instruct",
|
|
6395
|
+
name: "Meta: Llama 3.1 70B Instruct",
|
|
6396
|
+
api: "openai-completions",
|
|
6397
|
+
provider: "openrouter",
|
|
6398
|
+
baseUrl: "https://openrouter.ai/api/v1",
|
|
6399
|
+
reasoning: false,
|
|
6400
|
+
input: ["text"],
|
|
6401
|
+
cost: {
|
|
6402
|
+
input: 0.39999999999999997,
|
|
6403
|
+
output: 0.39999999999999997,
|
|
6404
|
+
cacheRead: 0,
|
|
6405
|
+
cacheWrite: 0,
|
|
6406
|
+
},
|
|
6407
|
+
contextWindow: 131072,
|
|
6408
|
+
maxTokens: 4096,
|
|
6409
|
+
},
|
|
6410
6410
|
"mistralai/mistral-nemo": {
|
|
6411
6411
|
id: "mistralai/mistral-nemo",
|
|
6412
6412
|
name: "Mistral: Mistral Nemo",
|
|
@@ -6543,6 +6543,23 @@ export const MODELS = {
|
|
|
6543
6543
|
contextWindow: 128000,
|
|
6544
6544
|
maxTokens: 4096,
|
|
6545
6545
|
},
|
|
6546
|
+
"openai/gpt-4o-2024-05-13": {
|
|
6547
|
+
id: "openai/gpt-4o-2024-05-13",
|
|
6548
|
+
name: "OpenAI: GPT-4o (2024-05-13)",
|
|
6549
|
+
api: "openai-completions",
|
|
6550
|
+
provider: "openrouter",
|
|
6551
|
+
baseUrl: "https://openrouter.ai/api/v1",
|
|
6552
|
+
reasoning: false,
|
|
6553
|
+
input: ["text", "image"],
|
|
6554
|
+
cost: {
|
|
6555
|
+
input: 5,
|
|
6556
|
+
output: 15,
|
|
6557
|
+
cacheRead: 0,
|
|
6558
|
+
cacheWrite: 0,
|
|
6559
|
+
},
|
|
6560
|
+
contextWindow: 128000,
|
|
6561
|
+
maxTokens: 4096,
|
|
6562
|
+
},
|
|
6546
6563
|
"openai/gpt-4o": {
|
|
6547
6564
|
id: "openai/gpt-4o",
|
|
6548
6565
|
name: "OpenAI: GPT-4o",
|
|
@@ -6577,23 +6594,6 @@ export const MODELS = {
|
|
|
6577
6594
|
contextWindow: 128000,
|
|
6578
6595
|
maxTokens: 64000,
|
|
6579
6596
|
},
|
|
6580
|
-
"openai/gpt-4o-2024-05-13": {
|
|
6581
|
-
id: "openai/gpt-4o-2024-05-13",
|
|
6582
|
-
name: "OpenAI: GPT-4o (2024-05-13)",
|
|
6583
|
-
api: "openai-completions",
|
|
6584
|
-
provider: "openrouter",
|
|
6585
|
-
baseUrl: "https://openrouter.ai/api/v1",
|
|
6586
|
-
reasoning: false,
|
|
6587
|
-
input: ["text", "image"],
|
|
6588
|
-
cost: {
|
|
6589
|
-
input: 5,
|
|
6590
|
-
output: 15,
|
|
6591
|
-
cacheRead: 0,
|
|
6592
|
-
cacheWrite: 0,
|
|
6593
|
-
},
|
|
6594
|
-
contextWindow: 128000,
|
|
6595
|
-
maxTokens: 4096,
|
|
6596
|
-
},
|
|
6597
6597
|
"meta-llama/llama-3-70b-instruct": {
|
|
6598
6598
|
id: "meta-llama/llama-3-70b-instruct",
|
|
6599
6599
|
name: "Meta: Llama 3 70B Instruct",
|
|
@@ -6713,38 +6713,38 @@ export const MODELS = {
|
|
|
6713
6713
|
contextWindow: 128000,
|
|
6714
6714
|
maxTokens: 4096,
|
|
6715
6715
|
},
|
|
6716
|
-
"openai/gpt-
|
|
6717
|
-
id: "openai/gpt-
|
|
6718
|
-
name: "OpenAI: GPT-
|
|
6716
|
+
"openai/gpt-3.5-turbo-0613": {
|
|
6717
|
+
id: "openai/gpt-3.5-turbo-0613",
|
|
6718
|
+
name: "OpenAI: GPT-3.5 Turbo (older v0613)",
|
|
6719
6719
|
api: "openai-completions",
|
|
6720
6720
|
provider: "openrouter",
|
|
6721
6721
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
6722
6722
|
reasoning: false,
|
|
6723
6723
|
input: ["text"],
|
|
6724
6724
|
cost: {
|
|
6725
|
-
input:
|
|
6726
|
-
output:
|
|
6725
|
+
input: 1,
|
|
6726
|
+
output: 2,
|
|
6727
6727
|
cacheRead: 0,
|
|
6728
6728
|
cacheWrite: 0,
|
|
6729
6729
|
},
|
|
6730
|
-
contextWindow:
|
|
6730
|
+
contextWindow: 4095,
|
|
6731
6731
|
maxTokens: 4096,
|
|
6732
6732
|
},
|
|
6733
|
-
"openai/gpt-
|
|
6734
|
-
id: "openai/gpt-
|
|
6735
|
-
name: "OpenAI: GPT-
|
|
6733
|
+
"openai/gpt-4-turbo-preview": {
|
|
6734
|
+
id: "openai/gpt-4-turbo-preview",
|
|
6735
|
+
name: "OpenAI: GPT-4 Turbo Preview",
|
|
6736
6736
|
api: "openai-completions",
|
|
6737
6737
|
provider: "openrouter",
|
|
6738
6738
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
6739
6739
|
reasoning: false,
|
|
6740
6740
|
input: ["text"],
|
|
6741
6741
|
cost: {
|
|
6742
|
-
input:
|
|
6743
|
-
output:
|
|
6742
|
+
input: 10,
|
|
6743
|
+
output: 30,
|
|
6744
6744
|
cacheRead: 0,
|
|
6745
6745
|
cacheWrite: 0,
|
|
6746
6746
|
},
|
|
6747
|
-
contextWindow:
|
|
6747
|
+
contextWindow: 128000,
|
|
6748
6748
|
maxTokens: 4096,
|
|
6749
6749
|
},
|
|
6750
6750
|
"mistralai/mistral-tiny": {
|