@mariozechner/pi-ai 0.19.1 → 0.20.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/models.generated.js
CHANGED
|
@@ -3605,23 +3605,6 @@ export const MODELS = {
|
|
|
3605
3605
|
contextWindow: 262144,
|
|
3606
3606
|
maxTokens: 4096,
|
|
3607
3607
|
},
|
|
3608
|
-
"meituan/longcat-flash-chat:free": {
|
|
3609
|
-
id: "meituan/longcat-flash-chat:free",
|
|
3610
|
-
name: "Meituan: LongCat Flash Chat (free)",
|
|
3611
|
-
api: "openai-completions",
|
|
3612
|
-
provider: "openrouter",
|
|
3613
|
-
baseUrl: "https://openrouter.ai/api/v1",
|
|
3614
|
-
reasoning: false,
|
|
3615
|
-
input: ["text"],
|
|
3616
|
-
cost: {
|
|
3617
|
-
input: 0,
|
|
3618
|
-
output: 0,
|
|
3619
|
-
cacheRead: 0,
|
|
3620
|
-
cacheWrite: 0,
|
|
3621
|
-
},
|
|
3622
|
-
contextWindow: 131072,
|
|
3623
|
-
maxTokens: 131072,
|
|
3624
|
-
},
|
|
3625
3608
|
"qwen/qwen-plus-2025-07-28": {
|
|
3626
3609
|
id: "qwen/qwen-plus-2025-07-28",
|
|
3627
3610
|
name: "Qwen: Qwen Plus 0728",
|
|
@@ -5747,34 +5730,34 @@ export const MODELS = {
|
|
|
5747
5730
|
contextWindow: 32768,
|
|
5748
5731
|
maxTokens: 4096,
|
|
5749
5732
|
},
|
|
5750
|
-
"cohere/command-r-08-2024": {
|
|
5751
|
-
id: "cohere/command-r-08-2024",
|
|
5752
|
-
name: "Cohere: Command R (08-2024)",
|
|
5733
|
+
"cohere/command-r-plus-08-2024": {
|
|
5734
|
+
id: "cohere/command-r-plus-08-2024",
|
|
5735
|
+
name: "Cohere: Command R+ (08-2024)",
|
|
5753
5736
|
api: "openai-completions",
|
|
5754
5737
|
provider: "openrouter",
|
|
5755
5738
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
5756
5739
|
reasoning: false,
|
|
5757
5740
|
input: ["text"],
|
|
5758
5741
|
cost: {
|
|
5759
|
-
input:
|
|
5760
|
-
output:
|
|
5742
|
+
input: 2.5,
|
|
5743
|
+
output: 10,
|
|
5761
5744
|
cacheRead: 0,
|
|
5762
5745
|
cacheWrite: 0,
|
|
5763
5746
|
},
|
|
5764
5747
|
contextWindow: 128000,
|
|
5765
5748
|
maxTokens: 4000,
|
|
5766
5749
|
},
|
|
5767
|
-
"cohere/command-r-
|
|
5768
|
-
id: "cohere/command-r-
|
|
5769
|
-
name: "Cohere: Command R
|
|
5750
|
+
"cohere/command-r-08-2024": {
|
|
5751
|
+
id: "cohere/command-r-08-2024",
|
|
5752
|
+
name: "Cohere: Command R (08-2024)",
|
|
5770
5753
|
api: "openai-completions",
|
|
5771
5754
|
provider: "openrouter",
|
|
5772
5755
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
5773
5756
|
reasoning: false,
|
|
5774
5757
|
input: ["text"],
|
|
5775
5758
|
cost: {
|
|
5776
|
-
input:
|
|
5777
|
-
output:
|
|
5759
|
+
input: 0.15,
|
|
5760
|
+
output: 0.6,
|
|
5778
5761
|
cacheRead: 0,
|
|
5779
5762
|
cacheWrite: 0,
|
|
5780
5763
|
},
|
|
@@ -5849,38 +5832,38 @@ export const MODELS = {
|
|
|
5849
5832
|
contextWindow: 131072,
|
|
5850
5833
|
maxTokens: 16384,
|
|
5851
5834
|
},
|
|
5852
|
-
"meta-llama/llama-3.1-
|
|
5853
|
-
id: "meta-llama/llama-3.1-
|
|
5854
|
-
name: "Meta: Llama 3.1
|
|
5835
|
+
"meta-llama/llama-3.1-70b-instruct": {
|
|
5836
|
+
id: "meta-llama/llama-3.1-70b-instruct",
|
|
5837
|
+
name: "Meta: Llama 3.1 70B Instruct",
|
|
5855
5838
|
api: "openai-completions",
|
|
5856
5839
|
provider: "openrouter",
|
|
5857
5840
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
5858
5841
|
reasoning: false,
|
|
5859
5842
|
input: ["text"],
|
|
5860
5843
|
cost: {
|
|
5861
|
-
input:
|
|
5862
|
-
output:
|
|
5844
|
+
input: 0.39999999999999997,
|
|
5845
|
+
output: 0.39999999999999997,
|
|
5863
5846
|
cacheRead: 0,
|
|
5864
5847
|
cacheWrite: 0,
|
|
5865
5848
|
},
|
|
5866
|
-
contextWindow:
|
|
5849
|
+
contextWindow: 131072,
|
|
5867
5850
|
maxTokens: 4096,
|
|
5868
5851
|
},
|
|
5869
|
-
"meta-llama/llama-3.1-
|
|
5870
|
-
id: "meta-llama/llama-3.1-
|
|
5871
|
-
name: "Meta: Llama 3.1
|
|
5852
|
+
"meta-llama/llama-3.1-405b-instruct": {
|
|
5853
|
+
id: "meta-llama/llama-3.1-405b-instruct",
|
|
5854
|
+
name: "Meta: Llama 3.1 405B Instruct",
|
|
5872
5855
|
api: "openai-completions",
|
|
5873
5856
|
provider: "openrouter",
|
|
5874
5857
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
5875
5858
|
reasoning: false,
|
|
5876
5859
|
input: ["text"],
|
|
5877
5860
|
cost: {
|
|
5878
|
-
input:
|
|
5879
|
-
output:
|
|
5861
|
+
input: 3.5,
|
|
5862
|
+
output: 3.5,
|
|
5880
5863
|
cacheRead: 0,
|
|
5881
5864
|
cacheWrite: 0,
|
|
5882
5865
|
},
|
|
5883
|
-
contextWindow:
|
|
5866
|
+
contextWindow: 130815,
|
|
5884
5867
|
maxTokens: 4096,
|
|
5885
5868
|
},
|
|
5886
5869
|
"mistralai/mistral-nemo": {
|
|
@@ -5900,9 +5883,9 @@ export const MODELS = {
|
|
|
5900
5883
|
contextWindow: 131072,
|
|
5901
5884
|
maxTokens: 16384,
|
|
5902
5885
|
},
|
|
5903
|
-
"openai/gpt-4o-mini
|
|
5904
|
-
id: "openai/gpt-4o-mini
|
|
5905
|
-
name: "OpenAI: GPT-4o-mini
|
|
5886
|
+
"openai/gpt-4o-mini": {
|
|
5887
|
+
id: "openai/gpt-4o-mini",
|
|
5888
|
+
name: "OpenAI: GPT-4o-mini",
|
|
5906
5889
|
api: "openai-completions",
|
|
5907
5890
|
provider: "openrouter",
|
|
5908
5891
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
@@ -5917,9 +5900,9 @@ export const MODELS = {
|
|
|
5917
5900
|
contextWindow: 128000,
|
|
5918
5901
|
maxTokens: 16384,
|
|
5919
5902
|
},
|
|
5920
|
-
"openai/gpt-4o-mini": {
|
|
5921
|
-
id: "openai/gpt-4o-mini",
|
|
5922
|
-
name: "OpenAI: GPT-4o-mini",
|
|
5903
|
+
"openai/gpt-4o-mini-2024-07-18": {
|
|
5904
|
+
id: "openai/gpt-4o-mini-2024-07-18",
|
|
5905
|
+
name: "OpenAI: GPT-4o-mini (2024-07-18)",
|
|
5923
5906
|
api: "openai-completions",
|
|
5924
5907
|
provider: "openrouter",
|
|
5925
5908
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
@@ -6070,34 +6053,34 @@ export const MODELS = {
|
|
|
6070
6053
|
contextWindow: 128000,
|
|
6071
6054
|
maxTokens: 64000,
|
|
6072
6055
|
},
|
|
6073
|
-
"meta-llama/llama-3-
|
|
6074
|
-
id: "meta-llama/llama-3-
|
|
6075
|
-
name: "Meta: Llama 3
|
|
6056
|
+
"meta-llama/llama-3-8b-instruct": {
|
|
6057
|
+
id: "meta-llama/llama-3-8b-instruct",
|
|
6058
|
+
name: "Meta: Llama 3 8B Instruct",
|
|
6076
6059
|
api: "openai-completions",
|
|
6077
6060
|
provider: "openrouter",
|
|
6078
6061
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
6079
6062
|
reasoning: false,
|
|
6080
6063
|
input: ["text"],
|
|
6081
6064
|
cost: {
|
|
6082
|
-
input: 0.
|
|
6083
|
-
output: 0.
|
|
6065
|
+
input: 0.03,
|
|
6066
|
+
output: 0.06,
|
|
6084
6067
|
cacheRead: 0,
|
|
6085
6068
|
cacheWrite: 0,
|
|
6086
6069
|
},
|
|
6087
6070
|
contextWindow: 8192,
|
|
6088
6071
|
maxTokens: 16384,
|
|
6089
6072
|
},
|
|
6090
|
-
"meta-llama/llama-3-
|
|
6091
|
-
id: "meta-llama/llama-3-
|
|
6092
|
-
name: "Meta: Llama 3
|
|
6073
|
+
"meta-llama/llama-3-70b-instruct": {
|
|
6074
|
+
id: "meta-llama/llama-3-70b-instruct",
|
|
6075
|
+
name: "Meta: Llama 3 70B Instruct",
|
|
6093
6076
|
api: "openai-completions",
|
|
6094
6077
|
provider: "openrouter",
|
|
6095
6078
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
6096
6079
|
reasoning: false,
|
|
6097
6080
|
input: ["text"],
|
|
6098
6081
|
cost: {
|
|
6099
|
-
input: 0.
|
|
6100
|
-
output: 0.
|
|
6082
|
+
input: 0.3,
|
|
6083
|
+
output: 0.39999999999999997,
|
|
6101
6084
|
cacheRead: 0,
|
|
6102
6085
|
cacheWrite: 0,
|
|
6103
6086
|
},
|
|
@@ -6189,38 +6172,38 @@ export const MODELS = {
|
|
|
6189
6172
|
contextWindow: 128000,
|
|
6190
6173
|
maxTokens: 4096,
|
|
6191
6174
|
},
|
|
6192
|
-
"openai/gpt-
|
|
6193
|
-
id: "openai/gpt-
|
|
6194
|
-
name: "OpenAI: GPT-
|
|
6175
|
+
"openai/gpt-4-turbo-preview": {
|
|
6176
|
+
id: "openai/gpt-4-turbo-preview",
|
|
6177
|
+
name: "OpenAI: GPT-4 Turbo Preview",
|
|
6195
6178
|
api: "openai-completions",
|
|
6196
6179
|
provider: "openrouter",
|
|
6197
6180
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
6198
6181
|
reasoning: false,
|
|
6199
6182
|
input: ["text"],
|
|
6200
6183
|
cost: {
|
|
6201
|
-
input:
|
|
6202
|
-
output:
|
|
6184
|
+
input: 10,
|
|
6185
|
+
output: 30,
|
|
6203
6186
|
cacheRead: 0,
|
|
6204
6187
|
cacheWrite: 0,
|
|
6205
6188
|
},
|
|
6206
|
-
contextWindow:
|
|
6189
|
+
contextWindow: 128000,
|
|
6207
6190
|
maxTokens: 4096,
|
|
6208
6191
|
},
|
|
6209
|
-
"openai/gpt-
|
|
6210
|
-
id: "openai/gpt-
|
|
6211
|
-
name: "OpenAI: GPT-
|
|
6192
|
+
"openai/gpt-3.5-turbo-0613": {
|
|
6193
|
+
id: "openai/gpt-3.5-turbo-0613",
|
|
6194
|
+
name: "OpenAI: GPT-3.5 Turbo (older v0613)",
|
|
6212
6195
|
api: "openai-completions",
|
|
6213
6196
|
provider: "openrouter",
|
|
6214
6197
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
6215
6198
|
reasoning: false,
|
|
6216
6199
|
input: ["text"],
|
|
6217
6200
|
cost: {
|
|
6218
|
-
input:
|
|
6219
|
-
output:
|
|
6201
|
+
input: 1,
|
|
6202
|
+
output: 2,
|
|
6220
6203
|
cacheRead: 0,
|
|
6221
6204
|
cacheWrite: 0,
|
|
6222
6205
|
},
|
|
6223
|
-
contextWindow:
|
|
6206
|
+
contextWindow: 4095,
|
|
6224
6207
|
maxTokens: 4096,
|
|
6225
6208
|
},
|
|
6226
6209
|
"mistralai/mistral-tiny": {
|
|
@@ -6291,9 +6274,9 @@ export const MODELS = {
|
|
|
6291
6274
|
contextWindow: 16385,
|
|
6292
6275
|
maxTokens: 4096,
|
|
6293
6276
|
},
|
|
6294
|
-
"openai/gpt-4
|
|
6295
|
-
id: "openai/gpt-4
|
|
6296
|
-
name: "OpenAI: GPT-4
|
|
6277
|
+
"openai/gpt-4": {
|
|
6278
|
+
id: "openai/gpt-4",
|
|
6279
|
+
name: "OpenAI: GPT-4",
|
|
6297
6280
|
api: "openai-completions",
|
|
6298
6281
|
provider: "openrouter",
|
|
6299
6282
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
@@ -6308,9 +6291,9 @@ export const MODELS = {
|
|
|
6308
6291
|
contextWindow: 8191,
|
|
6309
6292
|
maxTokens: 4096,
|
|
6310
6293
|
},
|
|
6311
|
-
"openai/gpt-4": {
|
|
6312
|
-
id: "openai/gpt-4",
|
|
6313
|
-
name: "OpenAI: GPT-4",
|
|
6294
|
+
"openai/gpt-4-0314": {
|
|
6295
|
+
id: "openai/gpt-4-0314",
|
|
6296
|
+
name: "OpenAI: GPT-4 (older v0314)",
|
|
6314
6297
|
api: "openai-completions",
|
|
6315
6298
|
provider: "openrouter",
|
|
6316
6299
|
baseUrl: "https://openrouter.ai/api/v1",
|