@mariozechner/pi-ai 0.22.2 → 0.22.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2802,6 +2802,40 @@ export const MODELS = {
2802
2802
  },
2803
2803
  },
2804
2804
  "openrouter": {
2805
+ "mistralai/mistral-small-creative": {
2806
+ id: "mistralai/mistral-small-creative",
2807
+ name: "Mistral: Mistral Small Creative",
2808
+ api: "openai-completions",
2809
+ provider: "openrouter",
2810
+ baseUrl: "https://openrouter.ai/api/v1",
2811
+ reasoning: false,
2812
+ input: ["text"],
2813
+ cost: {
2814
+ input: 0.09999999999999999,
2815
+ output: 0.3,
2816
+ cacheRead: 0,
2817
+ cacheWrite: 0,
2818
+ },
2819
+ contextWindow: 32768,
2820
+ maxTokens: 4096,
2821
+ },
2822
+ "xiaomi/mimo-v2-flash:free": {
2823
+ id: "xiaomi/mimo-v2-flash:free",
2824
+ name: "Xiaomi: MiMo-V2-Flash (free)",
2825
+ api: "openai-completions",
2826
+ provider: "openrouter",
2827
+ baseUrl: "https://openrouter.ai/api/v1",
2828
+ reasoning: true,
2829
+ input: ["text"],
2830
+ cost: {
2831
+ input: 0,
2832
+ output: 0,
2833
+ cacheRead: 0,
2834
+ cacheWrite: 0,
2835
+ },
2836
+ contextWindow: 262144,
2837
+ maxTokens: 262144,
2838
+ },
2805
2839
  "nvidia/nemotron-3-nano-30b-a3b:free": {
2806
2840
  id: "nvidia/nemotron-3-nano-30b-a3b:free",
2807
2841
  name: "NVIDIA: Nemotron 3 Nano 30B A3B (free)",
@@ -3425,7 +3459,7 @@ export const MODELS = {
3425
3459
  cost: {
3426
3460
  input: 0.19999999999999998,
3427
3461
  output: 1,
3428
- cacheRead: 0,
3462
+ cacheRead: 0.03,
3429
3463
  cacheWrite: 0,
3430
3464
  },
3431
3465
  contextWindow: 196608,
@@ -3644,13 +3678,13 @@ export const MODELS = {
3644
3678
  reasoning: true,
3645
3679
  input: ["text"],
3646
3680
  cost: {
3647
- input: 0.38,
3648
- output: 1.69,
3649
- cacheRead: 0.06,
3681
+ input: 0.39,
3682
+ output: 1.9,
3683
+ cacheRead: 0,
3650
3684
  cacheWrite: 0,
3651
3685
  },
3652
- contextWindow: 202752,
3653
- maxTokens: 202752,
3686
+ contextWindow: 204800,
3687
+ maxTokens: 204800,
3654
3688
  },
3655
3689
  "z-ai/glm-4.6:exacto": {
3656
3690
  id: "z-ai/glm-4.6:exacto",
@@ -6100,34 +6134,34 @@ export const MODELS = {
6100
6134
  contextWindow: 32768,
6101
6135
  maxTokens: 4096,
6102
6136
  },
6103
- "cohere/command-r-08-2024": {
6104
- id: "cohere/command-r-08-2024",
6105
- name: "Cohere: Command R (08-2024)",
6137
+ "cohere/command-r-plus-08-2024": {
6138
+ id: "cohere/command-r-plus-08-2024",
6139
+ name: "Cohere: Command R+ (08-2024)",
6106
6140
  api: "openai-completions",
6107
6141
  provider: "openrouter",
6108
6142
  baseUrl: "https://openrouter.ai/api/v1",
6109
6143
  reasoning: false,
6110
6144
  input: ["text"],
6111
6145
  cost: {
6112
- input: 0.15,
6113
- output: 0.6,
6146
+ input: 2.5,
6147
+ output: 10,
6114
6148
  cacheRead: 0,
6115
6149
  cacheWrite: 0,
6116
6150
  },
6117
6151
  contextWindow: 128000,
6118
6152
  maxTokens: 4000,
6119
6153
  },
6120
- "cohere/command-r-plus-08-2024": {
6121
- id: "cohere/command-r-plus-08-2024",
6122
- name: "Cohere: Command R+ (08-2024)",
6154
+ "cohere/command-r-08-2024": {
6155
+ id: "cohere/command-r-08-2024",
6156
+ name: "Cohere: Command R (08-2024)",
6123
6157
  api: "openai-completions",
6124
6158
  provider: "openrouter",
6125
6159
  baseUrl: "https://openrouter.ai/api/v1",
6126
6160
  reasoning: false,
6127
6161
  input: ["text"],
6128
6162
  cost: {
6129
- input: 2.5,
6130
- output: 10,
6163
+ input: 0.15,
6164
+ output: 0.6,
6131
6165
  cacheRead: 0,
6132
6166
  cacheWrite: 0,
6133
6167
  },
@@ -6185,22 +6219,22 @@ export const MODELS = {
6185
6219
  contextWindow: 128000,
6186
6220
  maxTokens: 16384,
6187
6221
  },
6188
- "meta-llama/llama-3.1-8b-instruct": {
6189
- id: "meta-llama/llama-3.1-8b-instruct",
6190
- name: "Meta: Llama 3.1 8B Instruct",
6222
+ "meta-llama/llama-3.1-405b-instruct": {
6223
+ id: "meta-llama/llama-3.1-405b-instruct",
6224
+ name: "Meta: Llama 3.1 405B Instruct",
6191
6225
  api: "openai-completions",
6192
6226
  provider: "openrouter",
6193
6227
  baseUrl: "https://openrouter.ai/api/v1",
6194
6228
  reasoning: false,
6195
6229
  input: ["text"],
6196
6230
  cost: {
6197
- input: 0.02,
6198
- output: 0.03,
6231
+ input: 3.5,
6232
+ output: 3.5,
6199
6233
  cacheRead: 0,
6200
6234
  cacheWrite: 0,
6201
6235
  },
6202
- contextWindow: 131072,
6203
- maxTokens: 16384,
6236
+ contextWindow: 130815,
6237
+ maxTokens: 4096,
6204
6238
  },
6205
6239
  "meta-llama/llama-3.1-70b-instruct": {
6206
6240
  id: "meta-llama/llama-3.1-70b-instruct",
@@ -6219,22 +6253,22 @@ export const MODELS = {
6219
6253
  contextWindow: 131072,
6220
6254
  maxTokens: 4096,
6221
6255
  },
6222
- "meta-llama/llama-3.1-405b-instruct": {
6223
- id: "meta-llama/llama-3.1-405b-instruct",
6224
- name: "Meta: Llama 3.1 405B Instruct",
6256
+ "meta-llama/llama-3.1-8b-instruct": {
6257
+ id: "meta-llama/llama-3.1-8b-instruct",
6258
+ name: "Meta: Llama 3.1 8B Instruct",
6225
6259
  api: "openai-completions",
6226
6260
  provider: "openrouter",
6227
6261
  baseUrl: "https://openrouter.ai/api/v1",
6228
6262
  reasoning: false,
6229
6263
  input: ["text"],
6230
6264
  cost: {
6231
- input: 3.5,
6232
- output: 3.5,
6265
+ input: 0.02,
6266
+ output: 0.03,
6233
6267
  cacheRead: 0,
6234
6268
  cacheWrite: 0,
6235
6269
  },
6236
- contextWindow: 130815,
6237
- maxTokens: 4096,
6270
+ contextWindow: 131072,
6271
+ maxTokens: 16384,
6238
6272
  },
6239
6273
  "mistralai/mistral-nemo": {
6240
6274
  id: "mistralai/mistral-nemo",
@@ -6372,6 +6406,23 @@ export const MODELS = {
6372
6406
  contextWindow: 128000,
6373
6407
  maxTokens: 4096,
6374
6408
  },
6409
+ "openai/gpt-4o-2024-05-13": {
6410
+ id: "openai/gpt-4o-2024-05-13",
6411
+ name: "OpenAI: GPT-4o (2024-05-13)",
6412
+ api: "openai-completions",
6413
+ provider: "openrouter",
6414
+ baseUrl: "https://openrouter.ai/api/v1",
6415
+ reasoning: false,
6416
+ input: ["text", "image"],
6417
+ cost: {
6418
+ input: 5,
6419
+ output: 15,
6420
+ cacheRead: 0,
6421
+ cacheWrite: 0,
6422
+ },
6423
+ contextWindow: 128000,
6424
+ maxTokens: 4096,
6425
+ },
6375
6426
  "openai/gpt-4o": {
6376
6427
  id: "openai/gpt-4o",
6377
6428
  name: "OpenAI: GPT-4o",
@@ -6406,23 +6457,6 @@ export const MODELS = {
6406
6457
  contextWindow: 128000,
6407
6458
  maxTokens: 64000,
6408
6459
  },
6409
- "openai/gpt-4o-2024-05-13": {
6410
- id: "openai/gpt-4o-2024-05-13",
6411
- name: "OpenAI: GPT-4o (2024-05-13)",
6412
- api: "openai-completions",
6413
- provider: "openrouter",
6414
- baseUrl: "https://openrouter.ai/api/v1",
6415
- reasoning: false,
6416
- input: ["text", "image"],
6417
- cost: {
6418
- input: 5,
6419
- output: 15,
6420
- cacheRead: 0,
6421
- cacheWrite: 0,
6422
- },
6423
- contextWindow: 128000,
6424
- maxTokens: 4096,
6425
- },
6426
6460
  "meta-llama/llama-3-8b-instruct": {
6427
6461
  id: "meta-llama/llama-3-8b-instruct",
6428
6462
  name: "Meta: Llama 3 8B Instruct",
@@ -6661,38 +6695,38 @@ export const MODELS = {
6661
6695
  contextWindow: 8191,
6662
6696
  maxTokens: 4096,
6663
6697
  },
6664
- "openai/gpt-3.5-turbo": {
6665
- id: "openai/gpt-3.5-turbo",
6666
- name: "OpenAI: GPT-3.5 Turbo",
6698
+ "openai/gpt-4": {
6699
+ id: "openai/gpt-4",
6700
+ name: "OpenAI: GPT-4",
6667
6701
  api: "openai-completions",
6668
6702
  provider: "openrouter",
6669
6703
  baseUrl: "https://openrouter.ai/api/v1",
6670
6704
  reasoning: false,
6671
6705
  input: ["text"],
6672
6706
  cost: {
6673
- input: 0.5,
6674
- output: 1.5,
6707
+ input: 30,
6708
+ output: 60,
6675
6709
  cacheRead: 0,
6676
6710
  cacheWrite: 0,
6677
6711
  },
6678
- contextWindow: 16385,
6712
+ contextWindow: 8191,
6679
6713
  maxTokens: 4096,
6680
6714
  },
6681
- "openai/gpt-4": {
6682
- id: "openai/gpt-4",
6683
- name: "OpenAI: GPT-4",
6715
+ "openai/gpt-3.5-turbo": {
6716
+ id: "openai/gpt-3.5-turbo",
6717
+ name: "OpenAI: GPT-3.5 Turbo",
6684
6718
  api: "openai-completions",
6685
6719
  provider: "openrouter",
6686
6720
  baseUrl: "https://openrouter.ai/api/v1",
6687
6721
  reasoning: false,
6688
6722
  input: ["text"],
6689
6723
  cost: {
6690
- input: 30,
6691
- output: 60,
6724
+ input: 0.5,
6725
+ output: 1.5,
6692
6726
  cacheRead: 0,
6693
6727
  cacheWrite: 0,
6694
6728
  },
6695
- contextWindow: 8191,
6729
+ contextWindow: 16385,
6696
6730
  maxTokens: 4096,
6697
6731
  },
6698
6732
  "openrouter/auto": {