@mariozechner/pi-ai 0.7.9 → 0.7.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1852,6 +1852,40 @@ export const MODELS = {
1852
1852
  },
1853
1853
  },
1854
1854
  openrouter: {
1855
+ "openrouter/sherlock-dash-alpha": {
1856
+ id: "openrouter/sherlock-dash-alpha",
1857
+ name: "Sherlock Dash Alpha",
1858
+ api: "openai-completions",
1859
+ provider: "openrouter",
1860
+ baseUrl: "https://openrouter.ai/api/v1",
1861
+ reasoning: false,
1862
+ input: ["text", "image"],
1863
+ cost: {
1864
+ input: 0,
1865
+ output: 0,
1866
+ cacheRead: 0,
1867
+ cacheWrite: 0,
1868
+ },
1869
+ contextWindow: 1840000,
1870
+ maxTokens: 64000,
1871
+ },
1872
+ "openrouter/sherlock-think-alpha": {
1873
+ id: "openrouter/sherlock-think-alpha",
1874
+ name: "Sherlock Think Alpha",
1875
+ api: "openai-completions",
1876
+ provider: "openrouter",
1877
+ baseUrl: "https://openrouter.ai/api/v1",
1878
+ reasoning: true,
1879
+ input: ["text", "image"],
1880
+ cost: {
1881
+ input: 0,
1882
+ output: 0,
1883
+ cacheRead: 0,
1884
+ cacheWrite: 0,
1885
+ },
1886
+ contextWindow: 1840000,
1887
+ maxTokens: 64000,
1888
+ },
1855
1889
  "openai/gpt-5.1": {
1856
1890
  id: "openai/gpt-5.1",
1857
1891
  name: "OpenAI: GPT-5.1",
@@ -3017,13 +3051,13 @@ export const MODELS = {
3017
3051
  reasoning: true,
3018
3052
  input: ["text"],
3019
3053
  cost: {
3020
- input: 0,
3021
- output: 0,
3054
+ input: 0.04,
3055
+ output: 0.39999999999999997,
3022
3056
  cacheRead: 0,
3023
3057
  cacheWrite: 0,
3024
3058
  },
3025
3059
  contextWindow: 131072,
3026
- maxTokens: 4096,
3060
+ maxTokens: 131072,
3027
3061
  },
3028
3062
  "openai/gpt-oss-120b:exacto": {
3029
3063
  id: "openai/gpt-oss-120b:exacto",
@@ -3722,23 +3756,6 @@ export const MODELS = {
3722
3756
  contextWindow: 200000,
3723
3757
  maxTokens: 100000,
3724
3758
  },
3725
- "meta-llama/llama-3.3-8b-instruct:free": {
3726
- id: "meta-llama/llama-3.3-8b-instruct:free",
3727
- name: "Meta: Llama 3.3 8B Instruct (free)",
3728
- api: "openai-completions",
3729
- provider: "openrouter",
3730
- baseUrl: "https://openrouter.ai/api/v1",
3731
- reasoning: false,
3732
- input: ["text"],
3733
- cost: {
3734
- input: 0,
3735
- output: 0,
3736
- cacheRead: 0,
3737
- cacheWrite: 0,
3738
- },
3739
- contextWindow: 128000,
3740
- maxTokens: 4028,
3741
- },
3742
3759
  "nousresearch/deephermes-3-mistral-24b-preview": {
3743
3760
  id: "nousresearch/deephermes-3-mistral-24b-preview",
3744
3761
  name: "Nous: DeepHermes 3 Mistral 24B Preview",
@@ -4079,23 +4096,6 @@ export const MODELS = {
4079
4096
  contextWindow: 131072,
4080
4097
  maxTokens: 4096,
4081
4098
  },
4082
- "meta-llama/llama-4-maverick:free": {
4083
- id: "meta-llama/llama-4-maverick:free",
4084
- name: "Meta: Llama 4 Maverick (free)",
4085
- api: "openai-completions",
4086
- provider: "openrouter",
4087
- baseUrl: "https://openrouter.ai/api/v1",
4088
- reasoning: false,
4089
- input: ["text", "image"],
4090
- cost: {
4091
- input: 0,
4092
- output: 0,
4093
- cacheRead: 0,
4094
- cacheWrite: 0,
4095
- },
4096
- contextWindow: 128000,
4097
- maxTokens: 4028,
4098
- },
4099
4099
  "meta-llama/llama-4-maverick": {
4100
4100
  id: "meta-llama/llama-4-maverick",
4101
4101
  name: "Meta: Llama 4 Maverick",
@@ -4113,23 +4113,6 @@ export const MODELS = {
4113
4113
  contextWindow: 1048576,
4114
4114
  maxTokens: 16384,
4115
4115
  },
4116
- "meta-llama/llama-4-scout:free": {
4117
- id: "meta-llama/llama-4-scout:free",
4118
- name: "Meta: Llama 4 Scout (free)",
4119
- api: "openai-completions",
4120
- provider: "openrouter",
4121
- baseUrl: "https://openrouter.ai/api/v1",
4122
- reasoning: false,
4123
- input: ["text", "image"],
4124
- cost: {
4125
- input: 0,
4126
- output: 0,
4127
- cacheRead: 0,
4128
- cacheWrite: 0,
4129
- },
4130
- contextWindow: 128000,
4131
- maxTokens: 4028,
4132
- },
4133
4116
  "meta-llama/llama-4-scout": {
4134
4117
  id: "meta-llama/llama-4-scout",
4135
4118
  name: "Meta: Llama 4 Scout",
@@ -4776,34 +4759,34 @@ export const MODELS = {
4776
4759
  contextWindow: 200000,
4777
4760
  maxTokens: 8192,
4778
4761
  },
4779
- "mistralai/ministral-8b": {
4780
- id: "mistralai/ministral-8b",
4781
- name: "Mistral: Ministral 8B",
4762
+ "mistralai/ministral-3b": {
4763
+ id: "mistralai/ministral-3b",
4764
+ name: "Mistral: Ministral 3B",
4782
4765
  api: "openai-completions",
4783
4766
  provider: "openrouter",
4784
4767
  baseUrl: "https://openrouter.ai/api/v1",
4785
4768
  reasoning: false,
4786
4769
  input: ["text"],
4787
4770
  cost: {
4788
- input: 0.09999999999999999,
4789
- output: 0.09999999999999999,
4771
+ input: 0.04,
4772
+ output: 0.04,
4790
4773
  cacheRead: 0,
4791
4774
  cacheWrite: 0,
4792
4775
  },
4793
4776
  contextWindow: 131072,
4794
4777
  maxTokens: 4096,
4795
4778
  },
4796
- "mistralai/ministral-3b": {
4797
- id: "mistralai/ministral-3b",
4798
- name: "Mistral: Ministral 3B",
4779
+ "mistralai/ministral-8b": {
4780
+ id: "mistralai/ministral-8b",
4781
+ name: "Mistral: Ministral 8B",
4799
4782
  api: "openai-completions",
4800
4783
  provider: "openrouter",
4801
4784
  baseUrl: "https://openrouter.ai/api/v1",
4802
4785
  reasoning: false,
4803
4786
  input: ["text"],
4804
4787
  cost: {
4805
- input: 0.04,
4806
- output: 0.04,
4788
+ input: 0.09999999999999999,
4789
+ output: 0.09999999999999999,
4807
4790
  cacheRead: 0,
4808
4791
  cacheWrite: 0,
4809
4792
  },
@@ -4836,8 +4819,8 @@ export const MODELS = {
4836
4819
  reasoning: false,
4837
4820
  input: ["text"],
4838
4821
  cost: {
4839
- input: 0.6,
4840
- output: 0.6,
4822
+ input: 1.2,
4823
+ output: 1.2,
4841
4824
  cacheRead: 0,
4842
4825
  cacheWrite: 0,
4843
4826
  },
@@ -4912,34 +4895,34 @@ export const MODELS = {
4912
4895
  contextWindow: 32768,
4913
4896
  maxTokens: 4096,
4914
4897
  },
4915
- "cohere/command-r-08-2024": {
4916
- id: "cohere/command-r-08-2024",
4917
- name: "Cohere: Command R (08-2024)",
4898
+ "cohere/command-r-plus-08-2024": {
4899
+ id: "cohere/command-r-plus-08-2024",
4900
+ name: "Cohere: Command R+ (08-2024)",
4918
4901
  api: "openai-completions",
4919
4902
  provider: "openrouter",
4920
4903
  baseUrl: "https://openrouter.ai/api/v1",
4921
4904
  reasoning: false,
4922
4905
  input: ["text"],
4923
4906
  cost: {
4924
- input: 0.15,
4925
- output: 0.6,
4907
+ input: 2.5,
4908
+ output: 10,
4926
4909
  cacheRead: 0,
4927
4910
  cacheWrite: 0,
4928
4911
  },
4929
4912
  contextWindow: 128000,
4930
4913
  maxTokens: 4000,
4931
4914
  },
4932
- "cohere/command-r-plus-08-2024": {
4933
- id: "cohere/command-r-plus-08-2024",
4934
- name: "Cohere: Command R+ (08-2024)",
4915
+ "cohere/command-r-08-2024": {
4916
+ id: "cohere/command-r-08-2024",
4917
+ name: "Cohere: Command R (08-2024)",
4935
4918
  api: "openai-completions",
4936
4919
  provider: "openrouter",
4937
4920
  baseUrl: "https://openrouter.ai/api/v1",
4938
4921
  reasoning: false,
4939
4922
  input: ["text"],
4940
4923
  cost: {
4941
- input: 2.5,
4942
- output: 10,
4924
+ input: 0.15,
4925
+ output: 0.6,
4943
4926
  cacheRead: 0,
4944
4927
  cacheWrite: 0,
4945
4928
  },
@@ -5014,22 +4997,22 @@ export const MODELS = {
5014
4997
  contextWindow: 128000,
5015
4998
  maxTokens: 16384,
5016
4999
  },
5017
- "meta-llama/llama-3.1-8b-instruct": {
5018
- id: "meta-llama/llama-3.1-8b-instruct",
5019
- name: "Meta: Llama 3.1 8B Instruct",
5000
+ "meta-llama/llama-3.1-70b-instruct": {
5001
+ id: "meta-llama/llama-3.1-70b-instruct",
5002
+ name: "Meta: Llama 3.1 70B Instruct",
5020
5003
  api: "openai-completions",
5021
5004
  provider: "openrouter",
5022
5005
  baseUrl: "https://openrouter.ai/api/v1",
5023
5006
  reasoning: false,
5024
5007
  input: ["text"],
5025
5008
  cost: {
5026
- input: 0.02,
5027
- output: 0.03,
5009
+ input: 0.39999999999999997,
5010
+ output: 0.39999999999999997,
5028
5011
  cacheRead: 0,
5029
5012
  cacheWrite: 0,
5030
5013
  },
5031
5014
  contextWindow: 131072,
5032
- maxTokens: 16384,
5015
+ maxTokens: 4096,
5033
5016
  },
5034
5017
  "meta-llama/llama-3.1-405b-instruct": {
5035
5018
  id: "meta-llama/llama-3.1-405b-instruct",
@@ -5048,22 +5031,22 @@ export const MODELS = {
5048
5031
  contextWindow: 130815,
5049
5032
  maxTokens: 4096,
5050
5033
  },
5051
- "meta-llama/llama-3.1-70b-instruct": {
5052
- id: "meta-llama/llama-3.1-70b-instruct",
5053
- name: "Meta: Llama 3.1 70B Instruct",
5034
+ "meta-llama/llama-3.1-8b-instruct": {
5035
+ id: "meta-llama/llama-3.1-8b-instruct",
5036
+ name: "Meta: Llama 3.1 8B Instruct",
5054
5037
  api: "openai-completions",
5055
5038
  provider: "openrouter",
5056
5039
  baseUrl: "https://openrouter.ai/api/v1",
5057
5040
  reasoning: false,
5058
5041
  input: ["text"],
5059
5042
  cost: {
5060
- input: 0.39999999999999997,
5061
- output: 0.39999999999999997,
5043
+ input: 0.02,
5044
+ output: 0.03,
5062
5045
  cacheRead: 0,
5063
5046
  cacheWrite: 0,
5064
5047
  },
5065
5048
  contextWindow: 131072,
5066
- maxTokens: 4096,
5049
+ maxTokens: 16384,
5067
5050
  },
5068
5051
  "mistralai/mistral-nemo": {
5069
5052
  id: "mistralai/mistral-nemo",
@@ -5116,23 +5099,6 @@ export const MODELS = {
5116
5099
  contextWindow: 128000,
5117
5100
  maxTokens: 16384,
5118
5101
  },
5119
- "anthropic/claude-3.5-sonnet-20240620": {
5120
- id: "anthropic/claude-3.5-sonnet-20240620",
5121
- name: "Anthropic: Claude 3.5 Sonnet (2024-06-20)",
5122
- api: "openai-completions",
5123
- provider: "openrouter",
5124
- baseUrl: "https://openrouter.ai/api/v1",
5125
- reasoning: false,
5126
- input: ["text", "image"],
5127
- cost: {
5128
- input: 3,
5129
- output: 15,
5130
- cacheRead: 0.3,
5131
- cacheWrite: 3.75,
5132
- },
5133
- contextWindow: 200000,
5134
- maxTokens: 8192,
5135
- },
5136
5102
  "sao10k/l3-euryale-70b": {
5137
5103
  id: "sao10k/l3-euryale-70b",
5138
5104
  name: "Sao10k: Llama 3 Euryale 70B v2.1",
@@ -5218,23 +5184,6 @@ export const MODELS = {
5218
5184
  contextWindow: 128000,
5219
5185
  maxTokens: 4096,
5220
5186
  },
5221
- "openai/gpt-4o-2024-05-13": {
5222
- id: "openai/gpt-4o-2024-05-13",
5223
- name: "OpenAI: GPT-4o (2024-05-13)",
5224
- api: "openai-completions",
5225
- provider: "openrouter",
5226
- baseUrl: "https://openrouter.ai/api/v1",
5227
- reasoning: false,
5228
- input: ["text", "image"],
5229
- cost: {
5230
- input: 5,
5231
- output: 15,
5232
- cacheRead: 0,
5233
- cacheWrite: 0,
5234
- },
5235
- contextWindow: 128000,
5236
- maxTokens: 4096,
5237
- },
5238
5187
  "openai/gpt-4o": {
5239
5188
  id: "openai/gpt-4o",
5240
5189
  name: "OpenAI: GPT-4o",
@@ -5269,6 +5218,23 @@ export const MODELS = {
5269
5218
  contextWindow: 128000,
5270
5219
  maxTokens: 64000,
5271
5220
  },
5221
+ "openai/gpt-4o-2024-05-13": {
5222
+ id: "openai/gpt-4o-2024-05-13",
5223
+ name: "OpenAI: GPT-4o (2024-05-13)",
5224
+ api: "openai-completions",
5225
+ provider: "openrouter",
5226
+ baseUrl: "https://openrouter.ai/api/v1",
5227
+ reasoning: false,
5228
+ input: ["text", "image"],
5229
+ cost: {
5230
+ input: 5,
5231
+ output: 15,
5232
+ cacheRead: 0,
5233
+ cacheWrite: 0,
5234
+ },
5235
+ contextWindow: 128000,
5236
+ maxTokens: 4096,
5237
+ },
5272
5238
  "meta-llama/llama-3-70b-instruct": {
5273
5239
  id: "meta-llama/llama-3-70b-instruct",
5274
5240
  name: "Meta: Llama 3 70B Instruct",
@@ -5388,38 +5354,38 @@ export const MODELS = {
5388
5354
  contextWindow: 128000,
5389
5355
  maxTokens: 4096,
5390
5356
  },
5391
- "openai/gpt-3.5-turbo-0613": {
5392
- id: "openai/gpt-3.5-turbo-0613",
5393
- name: "OpenAI: GPT-3.5 Turbo (older v0613)",
5357
+ "openai/gpt-4-turbo-preview": {
5358
+ id: "openai/gpt-4-turbo-preview",
5359
+ name: "OpenAI: GPT-4 Turbo Preview",
5394
5360
  api: "openai-completions",
5395
5361
  provider: "openrouter",
5396
5362
  baseUrl: "https://openrouter.ai/api/v1",
5397
5363
  reasoning: false,
5398
5364
  input: ["text"],
5399
5365
  cost: {
5400
- input: 1,
5401
- output: 2,
5366
+ input: 10,
5367
+ output: 30,
5402
5368
  cacheRead: 0,
5403
5369
  cacheWrite: 0,
5404
5370
  },
5405
- contextWindow: 4095,
5371
+ contextWindow: 128000,
5406
5372
  maxTokens: 4096,
5407
5373
  },
5408
- "openai/gpt-4-turbo-preview": {
5409
- id: "openai/gpt-4-turbo-preview",
5410
- name: "OpenAI: GPT-4 Turbo Preview",
5374
+ "openai/gpt-3.5-turbo-0613": {
5375
+ id: "openai/gpt-3.5-turbo-0613",
5376
+ name: "OpenAI: GPT-3.5 Turbo (older v0613)",
5411
5377
  api: "openai-completions",
5412
5378
  provider: "openrouter",
5413
5379
  baseUrl: "https://openrouter.ai/api/v1",
5414
5380
  reasoning: false,
5415
5381
  input: ["text"],
5416
5382
  cost: {
5417
- input: 10,
5418
- output: 30,
5383
+ input: 1,
5384
+ output: 2,
5419
5385
  cacheRead: 0,
5420
5386
  cacheWrite: 0,
5421
5387
  },
5422
- contextWindow: 128000,
5388
+ contextWindow: 4095,
5423
5389
  maxTokens: 4096,
5424
5390
  },
5425
5391
  "mistralai/mistral-small": {