@mariozechner/pi-ai 0.19.2 → 0.20.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/models.generated.js
CHANGED
|
@@ -5730,34 +5730,34 @@ export const MODELS = {
|
|
|
5730
5730
|
contextWindow: 32768,
|
|
5731
5731
|
maxTokens: 4096,
|
|
5732
5732
|
},
|
|
5733
|
-
"cohere/command-r-
|
|
5734
|
-
id: "cohere/command-r-
|
|
5735
|
-
name: "Cohere: Command R
|
|
5733
|
+
"cohere/command-r-08-2024": {
|
|
5734
|
+
id: "cohere/command-r-08-2024",
|
|
5735
|
+
name: "Cohere: Command R (08-2024)",
|
|
5736
5736
|
api: "openai-completions",
|
|
5737
5737
|
provider: "openrouter",
|
|
5738
5738
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
5739
5739
|
reasoning: false,
|
|
5740
5740
|
input: ["text"],
|
|
5741
5741
|
cost: {
|
|
5742
|
-
input:
|
|
5743
|
-
output:
|
|
5742
|
+
input: 0.15,
|
|
5743
|
+
output: 0.6,
|
|
5744
5744
|
cacheRead: 0,
|
|
5745
5745
|
cacheWrite: 0,
|
|
5746
5746
|
},
|
|
5747
5747
|
contextWindow: 128000,
|
|
5748
5748
|
maxTokens: 4000,
|
|
5749
5749
|
},
|
|
5750
|
-
"cohere/command-r-08-2024": {
|
|
5751
|
-
id: "cohere/command-r-08-2024",
|
|
5752
|
-
name: "Cohere: Command R (08-2024)",
|
|
5750
|
+
"cohere/command-r-plus-08-2024": {
|
|
5751
|
+
id: "cohere/command-r-plus-08-2024",
|
|
5752
|
+
name: "Cohere: Command R+ (08-2024)",
|
|
5753
5753
|
api: "openai-completions",
|
|
5754
5754
|
provider: "openrouter",
|
|
5755
5755
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
5756
5756
|
reasoning: false,
|
|
5757
5757
|
input: ["text"],
|
|
5758
5758
|
cost: {
|
|
5759
|
-
input:
|
|
5760
|
-
output:
|
|
5759
|
+
input: 2.5,
|
|
5760
|
+
output: 10,
|
|
5761
5761
|
cacheRead: 0,
|
|
5762
5762
|
cacheWrite: 0,
|
|
5763
5763
|
},
|
|
@@ -5832,38 +5832,38 @@ export const MODELS = {
|
|
|
5832
5832
|
contextWindow: 131072,
|
|
5833
5833
|
maxTokens: 16384,
|
|
5834
5834
|
},
|
|
5835
|
-
"meta-llama/llama-3.1-
|
|
5836
|
-
id: "meta-llama/llama-3.1-
|
|
5837
|
-
name: "Meta: Llama 3.1
|
|
5835
|
+
"meta-llama/llama-3.1-405b-instruct": {
|
|
5836
|
+
id: "meta-llama/llama-3.1-405b-instruct",
|
|
5837
|
+
name: "Meta: Llama 3.1 405B Instruct",
|
|
5838
5838
|
api: "openai-completions",
|
|
5839
5839
|
provider: "openrouter",
|
|
5840
5840
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
5841
5841
|
reasoning: false,
|
|
5842
5842
|
input: ["text"],
|
|
5843
5843
|
cost: {
|
|
5844
|
-
input:
|
|
5845
|
-
output:
|
|
5844
|
+
input: 3.5,
|
|
5845
|
+
output: 3.5,
|
|
5846
5846
|
cacheRead: 0,
|
|
5847
5847
|
cacheWrite: 0,
|
|
5848
5848
|
},
|
|
5849
|
-
contextWindow:
|
|
5849
|
+
contextWindow: 130815,
|
|
5850
5850
|
maxTokens: 4096,
|
|
5851
5851
|
},
|
|
5852
|
-
"meta-llama/llama-3.1-
|
|
5853
|
-
id: "meta-llama/llama-3.1-
|
|
5854
|
-
name: "Meta: Llama 3.1
|
|
5852
|
+
"meta-llama/llama-3.1-70b-instruct": {
|
|
5853
|
+
id: "meta-llama/llama-3.1-70b-instruct",
|
|
5854
|
+
name: "Meta: Llama 3.1 70B Instruct",
|
|
5855
5855
|
api: "openai-completions",
|
|
5856
5856
|
provider: "openrouter",
|
|
5857
5857
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
5858
5858
|
reasoning: false,
|
|
5859
5859
|
input: ["text"],
|
|
5860
5860
|
cost: {
|
|
5861
|
-
input:
|
|
5862
|
-
output:
|
|
5861
|
+
input: 0.39999999999999997,
|
|
5862
|
+
output: 0.39999999999999997,
|
|
5863
5863
|
cacheRead: 0,
|
|
5864
5864
|
cacheWrite: 0,
|
|
5865
5865
|
},
|
|
5866
|
-
contextWindow:
|
|
5866
|
+
contextWindow: 131072,
|
|
5867
5867
|
maxTokens: 4096,
|
|
5868
5868
|
},
|
|
5869
5869
|
"mistralai/mistral-nemo": {
|
|
@@ -5883,9 +5883,9 @@ export const MODELS = {
|
|
|
5883
5883
|
contextWindow: 131072,
|
|
5884
5884
|
maxTokens: 16384,
|
|
5885
5885
|
},
|
|
5886
|
-
"openai/gpt-4o-mini": {
|
|
5887
|
-
id: "openai/gpt-4o-mini",
|
|
5888
|
-
name: "OpenAI: GPT-4o-mini",
|
|
5886
|
+
"openai/gpt-4o-mini-2024-07-18": {
|
|
5887
|
+
id: "openai/gpt-4o-mini-2024-07-18",
|
|
5888
|
+
name: "OpenAI: GPT-4o-mini (2024-07-18)",
|
|
5889
5889
|
api: "openai-completions",
|
|
5890
5890
|
provider: "openrouter",
|
|
5891
5891
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
@@ -5900,9 +5900,9 @@ export const MODELS = {
|
|
|
5900
5900
|
contextWindow: 128000,
|
|
5901
5901
|
maxTokens: 16384,
|
|
5902
5902
|
},
|
|
5903
|
-
"openai/gpt-4o-mini
|
|
5904
|
-
id: "openai/gpt-4o-mini
|
|
5905
|
-
name: "OpenAI: GPT-4o-mini
|
|
5903
|
+
"openai/gpt-4o-mini": {
|
|
5904
|
+
id: "openai/gpt-4o-mini",
|
|
5905
|
+
name: "OpenAI: GPT-4o-mini",
|
|
5906
5906
|
api: "openai-completions",
|
|
5907
5907
|
provider: "openrouter",
|
|
5908
5908
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
@@ -6053,34 +6053,34 @@ export const MODELS = {
|
|
|
6053
6053
|
contextWindow: 128000,
|
|
6054
6054
|
maxTokens: 64000,
|
|
6055
6055
|
},
|
|
6056
|
-
"meta-llama/llama-3-
|
|
6057
|
-
id: "meta-llama/llama-3-
|
|
6058
|
-
name: "Meta: Llama 3
|
|
6056
|
+
"meta-llama/llama-3-70b-instruct": {
|
|
6057
|
+
id: "meta-llama/llama-3-70b-instruct",
|
|
6058
|
+
name: "Meta: Llama 3 70B Instruct",
|
|
6059
6059
|
api: "openai-completions",
|
|
6060
6060
|
provider: "openrouter",
|
|
6061
6061
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
6062
6062
|
reasoning: false,
|
|
6063
6063
|
input: ["text"],
|
|
6064
6064
|
cost: {
|
|
6065
|
-
input: 0.
|
|
6066
|
-
output: 0.
|
|
6065
|
+
input: 0.3,
|
|
6066
|
+
output: 0.39999999999999997,
|
|
6067
6067
|
cacheRead: 0,
|
|
6068
6068
|
cacheWrite: 0,
|
|
6069
6069
|
},
|
|
6070
6070
|
contextWindow: 8192,
|
|
6071
6071
|
maxTokens: 16384,
|
|
6072
6072
|
},
|
|
6073
|
-
"meta-llama/llama-3-
|
|
6074
|
-
id: "meta-llama/llama-3-
|
|
6075
|
-
name: "Meta: Llama 3
|
|
6073
|
+
"meta-llama/llama-3-8b-instruct": {
|
|
6074
|
+
id: "meta-llama/llama-3-8b-instruct",
|
|
6075
|
+
name: "Meta: Llama 3 8B Instruct",
|
|
6076
6076
|
api: "openai-completions",
|
|
6077
6077
|
provider: "openrouter",
|
|
6078
6078
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
6079
6079
|
reasoning: false,
|
|
6080
6080
|
input: ["text"],
|
|
6081
6081
|
cost: {
|
|
6082
|
-
input: 0.
|
|
6083
|
-
output: 0.
|
|
6082
|
+
input: 0.03,
|
|
6083
|
+
output: 0.06,
|
|
6084
6084
|
cacheRead: 0,
|
|
6085
6085
|
cacheWrite: 0,
|
|
6086
6086
|
},
|
|
@@ -6172,38 +6172,38 @@ export const MODELS = {
|
|
|
6172
6172
|
contextWindow: 128000,
|
|
6173
6173
|
maxTokens: 4096,
|
|
6174
6174
|
},
|
|
6175
|
-
"openai/gpt-
|
|
6176
|
-
id: "openai/gpt-
|
|
6177
|
-
name: "OpenAI: GPT-
|
|
6175
|
+
"openai/gpt-3.5-turbo-0613": {
|
|
6176
|
+
id: "openai/gpt-3.5-turbo-0613",
|
|
6177
|
+
name: "OpenAI: GPT-3.5 Turbo (older v0613)",
|
|
6178
6178
|
api: "openai-completions",
|
|
6179
6179
|
provider: "openrouter",
|
|
6180
6180
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
6181
6181
|
reasoning: false,
|
|
6182
6182
|
input: ["text"],
|
|
6183
6183
|
cost: {
|
|
6184
|
-
input:
|
|
6185
|
-
output:
|
|
6184
|
+
input: 1,
|
|
6185
|
+
output: 2,
|
|
6186
6186
|
cacheRead: 0,
|
|
6187
6187
|
cacheWrite: 0,
|
|
6188
6188
|
},
|
|
6189
|
-
contextWindow:
|
|
6189
|
+
contextWindow: 4095,
|
|
6190
6190
|
maxTokens: 4096,
|
|
6191
6191
|
},
|
|
6192
|
-
"openai/gpt-
|
|
6193
|
-
id: "openai/gpt-
|
|
6194
|
-
name: "OpenAI: GPT-
|
|
6192
|
+
"openai/gpt-4-turbo-preview": {
|
|
6193
|
+
id: "openai/gpt-4-turbo-preview",
|
|
6194
|
+
name: "OpenAI: GPT-4 Turbo Preview",
|
|
6195
6195
|
api: "openai-completions",
|
|
6196
6196
|
provider: "openrouter",
|
|
6197
6197
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
6198
6198
|
reasoning: false,
|
|
6199
6199
|
input: ["text"],
|
|
6200
6200
|
cost: {
|
|
6201
|
-
input:
|
|
6202
|
-
output:
|
|
6201
|
+
input: 10,
|
|
6202
|
+
output: 30,
|
|
6203
6203
|
cacheRead: 0,
|
|
6204
6204
|
cacheWrite: 0,
|
|
6205
6205
|
},
|
|
6206
|
-
contextWindow:
|
|
6206
|
+
contextWindow: 128000,
|
|
6207
6207
|
maxTokens: 4096,
|
|
6208
6208
|
},
|
|
6209
6209
|
"mistralai/mistral-tiny": {
|
|
@@ -6274,9 +6274,9 @@ export const MODELS = {
|
|
|
6274
6274
|
contextWindow: 16385,
|
|
6275
6275
|
maxTokens: 4096,
|
|
6276
6276
|
},
|
|
6277
|
-
"openai/gpt-4": {
|
|
6278
|
-
id: "openai/gpt-4",
|
|
6279
|
-
name: "OpenAI: GPT-4",
|
|
6277
|
+
"openai/gpt-4-0314": {
|
|
6278
|
+
id: "openai/gpt-4-0314",
|
|
6279
|
+
name: "OpenAI: GPT-4 (older v0314)",
|
|
6280
6280
|
api: "openai-completions",
|
|
6281
6281
|
provider: "openrouter",
|
|
6282
6282
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
@@ -6291,9 +6291,9 @@ export const MODELS = {
|
|
|
6291
6291
|
contextWindow: 8191,
|
|
6292
6292
|
maxTokens: 4096,
|
|
6293
6293
|
},
|
|
6294
|
-
"openai/gpt-4
|
|
6295
|
-
id: "openai/gpt-4
|
|
6296
|
-
name: "OpenAI: GPT-4
|
|
6294
|
+
"openai/gpt-4": {
|
|
6295
|
+
id: "openai/gpt-4",
|
|
6296
|
+
name: "OpenAI: GPT-4",
|
|
6297
6297
|
api: "openai-completions",
|
|
6298
6298
|
provider: "openrouter",
|
|
6299
6299
|
baseUrl: "https://openrouter.ai/api/v1",
|