@mariozechner/pi-ai 0.30.1 → 0.30.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/models.generated.js
CHANGED
|
@@ -6356,6 +6356,23 @@ export const MODELS = {
|
|
|
6356
6356
|
contextWindow: 128000,
|
|
6357
6357
|
maxTokens: 16384,
|
|
6358
6358
|
},
|
|
6359
|
+
"meta-llama/llama-3.1-70b-instruct": {
|
|
6360
|
+
id: "meta-llama/llama-3.1-70b-instruct",
|
|
6361
|
+
name: "Meta: Llama 3.1 70B Instruct",
|
|
6362
|
+
api: "openai-completions",
|
|
6363
|
+
provider: "openrouter",
|
|
6364
|
+
baseUrl: "https://openrouter.ai/api/v1",
|
|
6365
|
+
reasoning: false,
|
|
6366
|
+
input: ["text"],
|
|
6367
|
+
cost: {
|
|
6368
|
+
input: 0.39999999999999997,
|
|
6369
|
+
output: 0.39999999999999997,
|
|
6370
|
+
cacheRead: 0,
|
|
6371
|
+
cacheWrite: 0,
|
|
6372
|
+
},
|
|
6373
|
+
contextWindow: 131072,
|
|
6374
|
+
maxTokens: 4096,
|
|
6375
|
+
},
|
|
6359
6376
|
"meta-llama/llama-3.1-8b-instruct": {
|
|
6360
6377
|
id: "meta-llama/llama-3.1-8b-instruct",
|
|
6361
6378
|
name: "Meta: Llama 3.1 8B Instruct",
|
|
@@ -6390,23 +6407,6 @@ export const MODELS = {
|
|
|
6390
6407
|
contextWindow: 10000,
|
|
6391
6408
|
maxTokens: 4096,
|
|
6392
6409
|
},
|
|
6393
|
-
"meta-llama/llama-3.1-70b-instruct": {
|
|
6394
|
-
id: "meta-llama/llama-3.1-70b-instruct",
|
|
6395
|
-
name: "Meta: Llama 3.1 70B Instruct",
|
|
6396
|
-
api: "openai-completions",
|
|
6397
|
-
provider: "openrouter",
|
|
6398
|
-
baseUrl: "https://openrouter.ai/api/v1",
|
|
6399
|
-
reasoning: false,
|
|
6400
|
-
input: ["text"],
|
|
6401
|
-
cost: {
|
|
6402
|
-
input: 0.39999999999999997,
|
|
6403
|
-
output: 0.39999999999999997,
|
|
6404
|
-
cacheRead: 0,
|
|
6405
|
-
cacheWrite: 0,
|
|
6406
|
-
},
|
|
6407
|
-
contextWindow: 131072,
|
|
6408
|
-
maxTokens: 4096,
|
|
6409
|
-
},
|
|
6410
6410
|
"mistralai/mistral-nemo": {
|
|
6411
6411
|
id: "mistralai/mistral-nemo",
|
|
6412
6412
|
name: "Mistral: Mistral Nemo",
|
|
@@ -6543,23 +6543,6 @@ export const MODELS = {
|
|
|
6543
6543
|
contextWindow: 128000,
|
|
6544
6544
|
maxTokens: 4096,
|
|
6545
6545
|
},
|
|
6546
|
-
"openai/gpt-4o-2024-05-13": {
|
|
6547
|
-
id: "openai/gpt-4o-2024-05-13",
|
|
6548
|
-
name: "OpenAI: GPT-4o (2024-05-13)",
|
|
6549
|
-
api: "openai-completions",
|
|
6550
|
-
provider: "openrouter",
|
|
6551
|
-
baseUrl: "https://openrouter.ai/api/v1",
|
|
6552
|
-
reasoning: false,
|
|
6553
|
-
input: ["text", "image"],
|
|
6554
|
-
cost: {
|
|
6555
|
-
input: 5,
|
|
6556
|
-
output: 15,
|
|
6557
|
-
cacheRead: 0,
|
|
6558
|
-
cacheWrite: 0,
|
|
6559
|
-
},
|
|
6560
|
-
contextWindow: 128000,
|
|
6561
|
-
maxTokens: 4096,
|
|
6562
|
-
},
|
|
6563
6546
|
"openai/gpt-4o": {
|
|
6564
6547
|
id: "openai/gpt-4o",
|
|
6565
6548
|
name: "OpenAI: GPT-4o",
|
|
@@ -6594,6 +6577,23 @@ export const MODELS = {
|
|
|
6594
6577
|
contextWindow: 128000,
|
|
6595
6578
|
maxTokens: 64000,
|
|
6596
6579
|
},
|
|
6580
|
+
"openai/gpt-4o-2024-05-13": {
|
|
6581
|
+
id: "openai/gpt-4o-2024-05-13",
|
|
6582
|
+
name: "OpenAI: GPT-4o (2024-05-13)",
|
|
6583
|
+
api: "openai-completions",
|
|
6584
|
+
provider: "openrouter",
|
|
6585
|
+
baseUrl: "https://openrouter.ai/api/v1",
|
|
6586
|
+
reasoning: false,
|
|
6587
|
+
input: ["text", "image"],
|
|
6588
|
+
cost: {
|
|
6589
|
+
input: 5,
|
|
6590
|
+
output: 15,
|
|
6591
|
+
cacheRead: 0,
|
|
6592
|
+
cacheWrite: 0,
|
|
6593
|
+
},
|
|
6594
|
+
contextWindow: 128000,
|
|
6595
|
+
maxTokens: 4096,
|
|
6596
|
+
},
|
|
6597
6597
|
"meta-llama/llama-3-70b-instruct": {
|
|
6598
6598
|
id: "meta-llama/llama-3-70b-instruct",
|
|
6599
6599
|
name: "Meta: Llama 3 70B Instruct",
|
|
@@ -6713,38 +6713,38 @@ export const MODELS = {
|
|
|
6713
6713
|
contextWindow: 128000,
|
|
6714
6714
|
maxTokens: 4096,
|
|
6715
6715
|
},
|
|
6716
|
-
"openai/gpt-
|
|
6717
|
-
id: "openai/gpt-
|
|
6718
|
-
name: "OpenAI: GPT-
|
|
6716
|
+
"openai/gpt-4-turbo-preview": {
|
|
6717
|
+
id: "openai/gpt-4-turbo-preview",
|
|
6718
|
+
name: "OpenAI: GPT-4 Turbo Preview",
|
|
6719
6719
|
api: "openai-completions",
|
|
6720
6720
|
provider: "openrouter",
|
|
6721
6721
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
6722
6722
|
reasoning: false,
|
|
6723
6723
|
input: ["text"],
|
|
6724
6724
|
cost: {
|
|
6725
|
-
input:
|
|
6726
|
-
output:
|
|
6725
|
+
input: 10,
|
|
6726
|
+
output: 30,
|
|
6727
6727
|
cacheRead: 0,
|
|
6728
6728
|
cacheWrite: 0,
|
|
6729
6729
|
},
|
|
6730
|
-
contextWindow:
|
|
6730
|
+
contextWindow: 128000,
|
|
6731
6731
|
maxTokens: 4096,
|
|
6732
6732
|
},
|
|
6733
|
-
"openai/gpt-
|
|
6734
|
-
id: "openai/gpt-
|
|
6735
|
-
name: "OpenAI: GPT-
|
|
6733
|
+
"openai/gpt-3.5-turbo-0613": {
|
|
6734
|
+
id: "openai/gpt-3.5-turbo-0613",
|
|
6735
|
+
name: "OpenAI: GPT-3.5 Turbo (older v0613)",
|
|
6736
6736
|
api: "openai-completions",
|
|
6737
6737
|
provider: "openrouter",
|
|
6738
6738
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
6739
6739
|
reasoning: false,
|
|
6740
6740
|
input: ["text"],
|
|
6741
6741
|
cost: {
|
|
6742
|
-
input:
|
|
6743
|
-
output:
|
|
6742
|
+
input: 1,
|
|
6743
|
+
output: 2,
|
|
6744
6744
|
cacheRead: 0,
|
|
6745
6745
|
cacheWrite: 0,
|
|
6746
6746
|
},
|
|
6747
|
-
contextWindow:
|
|
6747
|
+
contextWindow: 4095,
|
|
6748
6748
|
maxTokens: 4096,
|
|
6749
6749
|
},
|
|
6750
6750
|
"mistralai/mistral-tiny": {
|