@mariozechner/pi-ai 0.7.5 → 0.7.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -803,6 +803,23 @@ export declare const MODELS: {
803
803
  contextWindow: number;
804
804
  maxTokens: number;
805
805
  };
806
+ readonly "gpt-5.1": {
807
+ id: string;
808
+ name: string;
809
+ api: "openai-responses";
810
+ provider: string;
811
+ baseUrl: string;
812
+ reasoning: true;
813
+ input: ("image" | "text")[];
814
+ cost: {
815
+ input: number;
816
+ output: number;
817
+ cacheRead: number;
818
+ cacheWrite: number;
819
+ };
820
+ contextWindow: number;
821
+ maxTokens: number;
822
+ };
806
823
  readonly "codex-mini-latest": {
807
824
  id: string;
808
825
  name: string;
@@ -1833,14 +1850,14 @@ export declare const MODELS: {
1833
1850
  };
1834
1851
  };
1835
1852
  readonly openrouter: {
1836
- readonly "kwaipilot/kat-coder-pro:free": {
1853
+ readonly "openai/gpt-5.1": {
1837
1854
  id: string;
1838
1855
  name: string;
1839
1856
  api: "openai-completions";
1840
1857
  provider: string;
1841
1858
  baseUrl: string;
1842
- reasoning: false;
1843
- input: "text"[];
1859
+ reasoning: true;
1860
+ input: ("image" | "text")[];
1844
1861
  cost: {
1845
1862
  input: number;
1846
1863
  output: number;
@@ -1850,13 +1867,13 @@ export declare const MODELS: {
1850
1867
  contextWindow: number;
1851
1868
  maxTokens: number;
1852
1869
  };
1853
- readonly "openrouter/polaris-alpha": {
1870
+ readonly "openai/gpt-5.1-codex": {
1854
1871
  id: string;
1855
1872
  name: string;
1856
1873
  api: "openai-completions";
1857
1874
  provider: string;
1858
1875
  baseUrl: string;
1859
- reasoning: false;
1876
+ reasoning: true;
1860
1877
  input: ("image" | "text")[];
1861
1878
  cost: {
1862
1879
  input: number;
@@ -1867,6 +1884,40 @@ export declare const MODELS: {
1867
1884
  contextWindow: number;
1868
1885
  maxTokens: number;
1869
1886
  };
1887
+ readonly "openai/gpt-5.1-codex-mini": {
1888
+ id: string;
1889
+ name: string;
1890
+ api: "openai-completions";
1891
+ provider: string;
1892
+ baseUrl: string;
1893
+ reasoning: true;
1894
+ input: ("image" | "text")[];
1895
+ cost: {
1896
+ input: number;
1897
+ output: number;
1898
+ cacheRead: number;
1899
+ cacheWrite: number;
1900
+ };
1901
+ contextWindow: number;
1902
+ maxTokens: number;
1903
+ };
1904
+ readonly "kwaipilot/kat-coder-pro:free": {
1905
+ id: string;
1906
+ name: string;
1907
+ api: "openai-completions";
1908
+ provider: string;
1909
+ baseUrl: string;
1910
+ reasoning: false;
1911
+ input: "text"[];
1912
+ cost: {
1913
+ input: number;
1914
+ output: number;
1915
+ cacheRead: number;
1916
+ cacheWrite: number;
1917
+ };
1918
+ contextWindow: number;
1919
+ maxTokens: number;
1920
+ };
1870
1921
  readonly "moonshotai/kimi-k2-thinking": {
1871
1922
  id: string;
1872
1923
  name: string;
@@ -4961,7 +5012,7 @@ export declare const MODELS: {
4961
5012
  contextWindow: number;
4962
5013
  maxTokens: number;
4963
5014
  };
4964
- readonly "meta-llama/llama-3.1-405b-instruct": {
5015
+ readonly "meta-llama/llama-3.1-8b-instruct": {
4965
5016
  id: string;
4966
5017
  name: string;
4967
5018
  api: "openai-completions";
@@ -4978,7 +5029,7 @@ export declare const MODELS: {
4978
5029
  contextWindow: number;
4979
5030
  maxTokens: number;
4980
5031
  };
4981
- readonly "meta-llama/llama-3.1-70b-instruct": {
5032
+ readonly "meta-llama/llama-3.1-405b-instruct": {
4982
5033
  id: string;
4983
5034
  name: string;
4984
5035
  api: "openai-completions";
@@ -4995,7 +5046,7 @@ export declare const MODELS: {
4995
5046
  contextWindow: number;
4996
5047
  maxTokens: number;
4997
5048
  };
4998
- readonly "meta-llama/llama-3.1-8b-instruct": {
5049
+ readonly "meta-llama/llama-3.1-70b-instruct": {
4999
5050
  id: string;
5000
5051
  name: string;
5001
5052
  api: "openai-completions";
@@ -5165,7 +5216,7 @@ export declare const MODELS: {
5165
5216
  contextWindow: number;
5166
5217
  maxTokens: number;
5167
5218
  };
5168
- readonly "openai/gpt-4o-2024-05-13": {
5219
+ readonly "openai/gpt-4o": {
5169
5220
  id: string;
5170
5221
  name: string;
5171
5222
  api: "openai-completions";
@@ -5182,7 +5233,7 @@ export declare const MODELS: {
5182
5233
  contextWindow: number;
5183
5234
  maxTokens: number;
5184
5235
  };
5185
- readonly "openai/gpt-4o": {
5236
+ readonly "openai/gpt-4o:extended": {
5186
5237
  id: string;
5187
5238
  name: string;
5188
5239
  api: "openai-completions";
@@ -5199,7 +5250,7 @@ export declare const MODELS: {
5199
5250
  contextWindow: number;
5200
5251
  maxTokens: number;
5201
5252
  };
5202
- readonly "openai/gpt-4o:extended": {
5253
+ readonly "openai/gpt-4o-2024-05-13": {
5203
5254
  id: string;
5204
5255
  name: string;
5205
5256
  api: "openai-completions";
@@ -5216,7 +5267,7 @@ export declare const MODELS: {
5216
5267
  contextWindow: number;
5217
5268
  maxTokens: number;
5218
5269
  };
5219
- readonly "meta-llama/llama-3-70b-instruct": {
5270
+ readonly "meta-llama/llama-3-8b-instruct": {
5220
5271
  id: string;
5221
5272
  name: string;
5222
5273
  api: "openai-completions";
@@ -5233,7 +5284,7 @@ export declare const MODELS: {
5233
5284
  contextWindow: number;
5234
5285
  maxTokens: number;
5235
5286
  };
5236
- readonly "meta-llama/llama-3-8b-instruct": {
5287
+ readonly "meta-llama/llama-3-70b-instruct": {
5237
5288
  id: string;
5238
5289
  name: string;
5239
5290
  api: "openai-completions";
@@ -5471,7 +5522,7 @@ export declare const MODELS: {
5471
5522
  contextWindow: number;
5472
5523
  maxTokens: number;
5473
5524
  };
5474
- readonly "openai/gpt-4": {
5525
+ readonly "openai/gpt-4-0314": {
5475
5526
  id: string;
5476
5527
  name: string;
5477
5528
  api: "openai-completions";
@@ -5505,7 +5556,7 @@ export declare const MODELS: {
5505
5556
  contextWindow: number;
5506
5557
  maxTokens: number;
5507
5558
  };
5508
- readonly "openai/gpt-4-0314": {
5559
+ readonly "openai/gpt-4": {
5509
5560
  id: string;
5510
5561
  name: string;
5511
5562
  api: "openai-completions";
@@ -5522,6 +5573,23 @@ export declare const MODELS: {
5522
5573
  contextWindow: number;
5523
5574
  maxTokens: number;
5524
5575
  };
5576
+ readonly "openrouter/auto": {
5577
+ id: string;
5578
+ name: string;
5579
+ api: "openai-completions";
5580
+ provider: string;
5581
+ baseUrl: string;
5582
+ reasoning: true;
5583
+ input: ("image" | "text")[];
5584
+ cost: {
5585
+ input: number;
5586
+ output: number;
5587
+ cacheRead: number;
5588
+ cacheWrite: number;
5589
+ };
5590
+ contextWindow: number;
5591
+ maxTokens: number;
5592
+ };
5525
5593
  };
5526
5594
  };
5527
5595
  //# sourceMappingURL=models.generated.d.ts.map