@mariozechner/pi-ai 0.7.5 → 0.7.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -805,6 +805,23 @@ export const MODELS = {
805
805
  contextWindow: 200000,
806
806
  maxTokens: 100000,
807
807
  },
808
+ "gpt-5.1": {
809
+ id: "gpt-5.1",
810
+ name: "GPT-5.1",
811
+ api: "openai-responses",
812
+ provider: "openai",
813
+ baseUrl: "https://api.openai.com/v1",
814
+ reasoning: true,
815
+ input: ["text", "image"],
816
+ cost: {
817
+ input: 1.25,
818
+ output: 10,
819
+ cacheRead: 0.13,
820
+ cacheWrite: 0,
821
+ },
822
+ contextWindow: 400000,
823
+ maxTokens: 128000,
824
+ },
808
825
  "codex-mini-latest": {
809
826
  id: "codex-mini-latest",
810
827
  name: "Codex Mini",
@@ -1835,31 +1852,65 @@ export const MODELS = {
1835
1852
  },
1836
1853
  },
1837
1854
  openrouter: {
1838
- "kwaipilot/kat-coder-pro:free": {
1839
- id: "kwaipilot/kat-coder-pro:free",
1840
- name: "Kwaipilot: KAT-Coder-Pro V1 (free)",
1855
+ "openai/gpt-5.1": {
1856
+ id: "openai/gpt-5.1",
1857
+ name: "OpenAI: GPT-5.1",
1841
1858
  api: "openai-completions",
1842
1859
  provider: "openrouter",
1843
1860
  baseUrl: "https://openrouter.ai/api/v1",
1844
- reasoning: false,
1845
- input: ["text"],
1861
+ reasoning: true,
1862
+ input: ["text", "image"],
1846
1863
  cost: {
1847
- input: 0,
1848
- output: 0,
1849
- cacheRead: 0,
1864
+ input: 1.25,
1865
+ output: 10,
1866
+ cacheRead: 0.125,
1850
1867
  cacheWrite: 0,
1851
1868
  },
1852
- contextWindow: 256000,
1853
- maxTokens: 32000,
1869
+ contextWindow: 400000,
1870
+ maxTokens: 128000,
1854
1871
  },
1855
- "openrouter/polaris-alpha": {
1856
- id: "openrouter/polaris-alpha",
1857
- name: "Polaris Alpha",
1872
+ "openai/gpt-5.1-codex": {
1873
+ id: "openai/gpt-5.1-codex",
1874
+ name: "OpenAI: GPT-5.1-Codex",
1858
1875
  api: "openai-completions",
1859
1876
  provider: "openrouter",
1860
1877
  baseUrl: "https://openrouter.ai/api/v1",
1861
- reasoning: false,
1878
+ reasoning: true,
1879
+ input: ["text", "image"],
1880
+ cost: {
1881
+ input: 1.25,
1882
+ output: 10,
1883
+ cacheRead: 0.125,
1884
+ cacheWrite: 0,
1885
+ },
1886
+ contextWindow: 400000,
1887
+ maxTokens: 128000,
1888
+ },
1889
+ "openai/gpt-5.1-codex-mini": {
1890
+ id: "openai/gpt-5.1-codex-mini",
1891
+ name: "OpenAI: GPT-5.1-Codex-Mini",
1892
+ api: "openai-completions",
1893
+ provider: "openrouter",
1894
+ baseUrl: "https://openrouter.ai/api/v1",
1895
+ reasoning: true,
1862
1896
  input: ["text", "image"],
1897
+ cost: {
1898
+ input: 1.5,
1899
+ output: 6,
1900
+ cacheRead: 0.375,
1901
+ cacheWrite: 0,
1902
+ },
1903
+ contextWindow: 400000,
1904
+ maxTokens: 100000,
1905
+ },
1906
+ "kwaipilot/kat-coder-pro:free": {
1907
+ id: "kwaipilot/kat-coder-pro:free",
1908
+ name: "Kwaipilot: KAT-Coder-Pro V1 (free)",
1909
+ api: "openai-completions",
1910
+ provider: "openrouter",
1911
+ baseUrl: "https://openrouter.ai/api/v1",
1912
+ reasoning: false,
1913
+ input: ["text"],
1863
1914
  cost: {
1864
1915
  input: 0,
1865
1916
  output: 0,
@@ -1867,7 +1918,7 @@ export const MODELS = {
1867
1918
  cacheWrite: 0,
1868
1919
  },
1869
1920
  contextWindow: 256000,
1870
- maxTokens: 128000,
1921
+ maxTokens: 32000,
1871
1922
  },
1872
1923
  "moonshotai/kimi-k2-thinking": {
1873
1924
  id: "moonshotai/kimi-k2-thinking",
@@ -2966,13 +3017,13 @@ export const MODELS = {
2966
3017
  reasoning: true,
2967
3018
  input: ["text"],
2968
3019
  cost: {
2969
- input: 0.04,
2970
- output: 0.39999999999999997,
3020
+ input: 0,
3021
+ output: 0,
2971
3022
  cacheRead: 0,
2972
3023
  cacheWrite: 0,
2973
3024
  },
2974
3025
  contextWindow: 131072,
2975
- maxTokens: 131072,
3026
+ maxTokens: 4096,
2976
3027
  },
2977
3028
  "openai/gpt-oss-120b:exacto": {
2978
3029
  id: "openai/gpt-oss-120b:exacto",
@@ -3103,7 +3154,7 @@ export const MODELS = {
3103
3154
  input: ["text"],
3104
3155
  cost: {
3105
3156
  input: 0.35,
3106
- output: 1.55,
3157
+ output: 1.5,
3107
3158
  cacheRead: 0,
3108
3159
  cacheWrite: 0,
3109
3160
  },
@@ -4963,6 +5014,23 @@ export const MODELS = {
4963
5014
  contextWindow: 128000,
4964
5015
  maxTokens: 16384,
4965
5016
  },
5017
+ "meta-llama/llama-3.1-8b-instruct": {
5018
+ id: "meta-llama/llama-3.1-8b-instruct",
5019
+ name: "Meta: Llama 3.1 8B Instruct",
5020
+ api: "openai-completions",
5021
+ provider: "openrouter",
5022
+ baseUrl: "https://openrouter.ai/api/v1",
5023
+ reasoning: false,
5024
+ input: ["text"],
5025
+ cost: {
5026
+ input: 0.02,
5027
+ output: 0.03,
5028
+ cacheRead: 0,
5029
+ cacheWrite: 0,
5030
+ },
5031
+ contextWindow: 131072,
5032
+ maxTokens: 16384,
5033
+ },
4966
5034
  "meta-llama/llama-3.1-405b-instruct": {
4967
5035
  id: "meta-llama/llama-3.1-405b-instruct",
4968
5036
  name: "Meta: Llama 3.1 405B Instruct",
@@ -4997,23 +5065,6 @@ export const MODELS = {
4997
5065
  contextWindow: 131072,
4998
5066
  maxTokens: 4096,
4999
5067
  },
5000
- "meta-llama/llama-3.1-8b-instruct": {
5001
- id: "meta-llama/llama-3.1-8b-instruct",
5002
- name: "Meta: Llama 3.1 8B Instruct",
5003
- api: "openai-completions",
5004
- provider: "openrouter",
5005
- baseUrl: "https://openrouter.ai/api/v1",
5006
- reasoning: false,
5007
- input: ["text"],
5008
- cost: {
5009
- input: 0.02,
5010
- output: 0.03,
5011
- cacheRead: 0,
5012
- cacheWrite: 0,
5013
- },
5014
- contextWindow: 131072,
5015
- maxTokens: 16384,
5016
- },
5017
5068
  "mistralai/mistral-nemo": {
5018
5069
  id: "mistralai/mistral-nemo",
5019
5070
  name: "Mistral: Mistral Nemo",
@@ -5167,23 +5218,6 @@ export const MODELS = {
5167
5218
  contextWindow: 128000,
5168
5219
  maxTokens: 4096,
5169
5220
  },
5170
- "openai/gpt-4o-2024-05-13": {
5171
- id: "openai/gpt-4o-2024-05-13",
5172
- name: "OpenAI: GPT-4o (2024-05-13)",
5173
- api: "openai-completions",
5174
- provider: "openrouter",
5175
- baseUrl: "https://openrouter.ai/api/v1",
5176
- reasoning: false,
5177
- input: ["text", "image"],
5178
- cost: {
5179
- input: 5,
5180
- output: 15,
5181
- cacheRead: 0,
5182
- cacheWrite: 0,
5183
- },
5184
- contextWindow: 128000,
5185
- maxTokens: 4096,
5186
- },
5187
5221
  "openai/gpt-4o": {
5188
5222
  id: "openai/gpt-4o",
5189
5223
  name: "OpenAI: GPT-4o",
@@ -5218,22 +5252,22 @@ export const MODELS = {
5218
5252
  contextWindow: 128000,
5219
5253
  maxTokens: 64000,
5220
5254
  },
5221
- "meta-llama/llama-3-70b-instruct": {
5222
- id: "meta-llama/llama-3-70b-instruct",
5223
- name: "Meta: Llama 3 70B Instruct",
5255
+ "openai/gpt-4o-2024-05-13": {
5256
+ id: "openai/gpt-4o-2024-05-13",
5257
+ name: "OpenAI: GPT-4o (2024-05-13)",
5224
5258
  api: "openai-completions",
5225
5259
  provider: "openrouter",
5226
5260
  baseUrl: "https://openrouter.ai/api/v1",
5227
5261
  reasoning: false,
5228
- input: ["text"],
5262
+ input: ["text", "image"],
5229
5263
  cost: {
5230
- input: 0.3,
5231
- output: 0.39999999999999997,
5264
+ input: 5,
5265
+ output: 15,
5232
5266
  cacheRead: 0,
5233
5267
  cacheWrite: 0,
5234
5268
  },
5235
- contextWindow: 8192,
5236
- maxTokens: 16384,
5269
+ contextWindow: 128000,
5270
+ maxTokens: 4096,
5237
5271
  },
5238
5272
  "meta-llama/llama-3-8b-instruct": {
5239
5273
  id: "meta-llama/llama-3-8b-instruct",
@@ -5252,6 +5286,23 @@ export const MODELS = {
5252
5286
  contextWindow: 8192,
5253
5287
  maxTokens: 16384,
5254
5288
  },
5289
+ "meta-llama/llama-3-70b-instruct": {
5290
+ id: "meta-llama/llama-3-70b-instruct",
5291
+ name: "Meta: Llama 3 70B Instruct",
5292
+ api: "openai-completions",
5293
+ provider: "openrouter",
5294
+ baseUrl: "https://openrouter.ai/api/v1",
5295
+ reasoning: false,
5296
+ input: ["text"],
5297
+ cost: {
5298
+ input: 0.3,
5299
+ output: 0.39999999999999997,
5300
+ cacheRead: 0,
5301
+ cacheWrite: 0,
5302
+ },
5303
+ contextWindow: 8192,
5304
+ maxTokens: 16384,
5305
+ },
5255
5306
  "mistralai/mixtral-8x22b-instruct": {
5256
5307
  id: "mistralai/mixtral-8x22b-instruct",
5257
5308
  name: "Mistral: Mixtral 8x22B Instruct",
@@ -5473,9 +5524,9 @@ export const MODELS = {
5473
5524
  contextWindow: 16385,
5474
5525
  maxTokens: 4096,
5475
5526
  },
5476
- "openai/gpt-4": {
5477
- id: "openai/gpt-4",
5478
- name: "OpenAI: GPT-4",
5527
+ "openai/gpt-4-0314": {
5528
+ id: "openai/gpt-4-0314",
5529
+ name: "OpenAI: GPT-4 (older v0314)",
5479
5530
  api: "openai-completions",
5480
5531
  provider: "openrouter",
5481
5532
  baseUrl: "https://openrouter.ai/api/v1",
@@ -5507,9 +5558,9 @@ export const MODELS = {
5507
5558
  contextWindow: 16385,
5508
5559
  maxTokens: 4096,
5509
5560
  },
5510
- "openai/gpt-4-0314": {
5511
- id: "openai/gpt-4-0314",
5512
- name: "OpenAI: GPT-4 (older v0314)",
5561
+ "openai/gpt-4": {
5562
+ id: "openai/gpt-4",
5563
+ name: "OpenAI: GPT-4",
5513
5564
  api: "openai-completions",
5514
5565
  provider: "openrouter",
5515
5566
  baseUrl: "https://openrouter.ai/api/v1",
@@ -5524,6 +5575,23 @@ export const MODELS = {
5524
5575
  contextWindow: 8191,
5525
5576
  maxTokens: 4096,
5526
5577
  },
5578
+ "openrouter/auto": {
5579
+ id: "openrouter/auto",
5580
+ name: "OpenRouter: Auto Router",
5581
+ api: "openai-completions",
5582
+ provider: "openrouter",
5583
+ baseUrl: "https://openrouter.ai/api/v1",
5584
+ reasoning: true,
5585
+ input: ["text", "image"],
5586
+ cost: {
5587
+ input: 0,
5588
+ output: 0,
5589
+ cacheRead: 0,
5590
+ cacheWrite: 0,
5591
+ },
5592
+ contextWindow: 2000000,
5593
+ maxTokens: 30000,
5594
+ },
5527
5595
  },
5528
5596
  };
5529
5597
  //# sourceMappingURL=models.generated.js.map