@mariozechner/pi-ai 0.7.4 → 0.7.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -803,6 +803,23 @@ export declare const MODELS: {
803
803
  contextWindow: number;
804
804
  maxTokens: number;
805
805
  };
806
+ readonly "gpt-5.1": {
807
+ id: string;
808
+ name: string;
809
+ api: "openai-responses";
810
+ provider: string;
811
+ baseUrl: string;
812
+ reasoning: true;
813
+ input: ("image" | "text")[];
814
+ cost: {
815
+ input: number;
816
+ output: number;
817
+ cacheRead: number;
818
+ cacheWrite: number;
819
+ };
820
+ contextWindow: number;
821
+ maxTokens: number;
822
+ };
806
823
  readonly "codex-mini-latest": {
807
824
  id: string;
808
825
  name: string;
@@ -1833,14 +1850,14 @@ export declare const MODELS: {
1833
1850
  };
1834
1851
  };
1835
1852
  readonly openrouter: {
1836
- readonly "kwaipilot/kat-coder-pro:free": {
1853
+ readonly "openai/gpt-5.1": {
1837
1854
  id: string;
1838
1855
  name: string;
1839
1856
  api: "openai-completions";
1840
1857
  provider: string;
1841
1858
  baseUrl: string;
1842
- reasoning: false;
1843
- input: "text"[];
1859
+ reasoning: true;
1860
+ input: ("image" | "text")[];
1844
1861
  cost: {
1845
1862
  input: number;
1846
1863
  output: number;
@@ -1850,13 +1867,13 @@ export declare const MODELS: {
1850
1867
  contextWindow: number;
1851
1868
  maxTokens: number;
1852
1869
  };
1853
- readonly "openrouter/polaris-alpha": {
1870
+ readonly "openai/gpt-5.1-codex": {
1854
1871
  id: string;
1855
1872
  name: string;
1856
1873
  api: "openai-completions";
1857
1874
  provider: string;
1858
1875
  baseUrl: string;
1859
- reasoning: false;
1876
+ reasoning: true;
1860
1877
  input: ("image" | "text")[];
1861
1878
  cost: {
1862
1879
  input: number;
@@ -1867,6 +1884,40 @@ export declare const MODELS: {
1867
1884
  contextWindow: number;
1868
1885
  maxTokens: number;
1869
1886
  };
1887
+ readonly "openai/gpt-5.1-codex-mini": {
1888
+ id: string;
1889
+ name: string;
1890
+ api: "openai-completions";
1891
+ provider: string;
1892
+ baseUrl: string;
1893
+ reasoning: true;
1894
+ input: ("image" | "text")[];
1895
+ cost: {
1896
+ input: number;
1897
+ output: number;
1898
+ cacheRead: number;
1899
+ cacheWrite: number;
1900
+ };
1901
+ contextWindow: number;
1902
+ maxTokens: number;
1903
+ };
1904
+ readonly "kwaipilot/kat-coder-pro:free": {
1905
+ id: string;
1906
+ name: string;
1907
+ api: "openai-completions";
1908
+ provider: string;
1909
+ baseUrl: string;
1910
+ reasoning: false;
1911
+ input: "text"[];
1912
+ cost: {
1913
+ input: number;
1914
+ output: number;
1915
+ cacheRead: number;
1916
+ cacheWrite: number;
1917
+ };
1918
+ contextWindow: number;
1919
+ maxTokens: number;
1920
+ };
1870
1921
  readonly "moonshotai/kimi-k2-thinking": {
1871
1922
  id: string;
1872
1923
  name: string;
@@ -4723,7 +4774,7 @@ export declare const MODELS: {
4723
4774
  contextWindow: number;
4724
4775
  maxTokens: number;
4725
4776
  };
4726
- readonly "mistralai/ministral-8b": {
4777
+ readonly "mistralai/ministral-3b": {
4727
4778
  id: string;
4728
4779
  name: string;
4729
4780
  api: "openai-completions";
@@ -4740,7 +4791,7 @@ export declare const MODELS: {
4740
4791
  contextWindow: number;
4741
4792
  maxTokens: number;
4742
4793
  };
4743
- readonly "mistralai/ministral-3b": {
4794
+ readonly "mistralai/ministral-8b": {
4744
4795
  id: string;
4745
4796
  name: string;
4746
4797
  api: "openai-completions";
@@ -5029,7 +5080,7 @@ export declare const MODELS: {
5029
5080
  contextWindow: number;
5030
5081
  maxTokens: number;
5031
5082
  };
5032
- readonly "openai/gpt-4o-mini-2024-07-18": {
5083
+ readonly "openai/gpt-4o-mini": {
5033
5084
  id: string;
5034
5085
  name: string;
5035
5086
  api: "openai-completions";
@@ -5046,7 +5097,7 @@ export declare const MODELS: {
5046
5097
  contextWindow: number;
5047
5098
  maxTokens: number;
5048
5099
  };
5049
- readonly "openai/gpt-4o-mini": {
5100
+ readonly "openai/gpt-4o-mini-2024-07-18": {
5050
5101
  id: string;
5051
5102
  name: string;
5052
5103
  api: "openai-completions";
@@ -5165,7 +5216,7 @@ export declare const MODELS: {
5165
5216
  contextWindow: number;
5166
5217
  maxTokens: number;
5167
5218
  };
5168
- readonly "openai/gpt-4o-2024-05-13": {
5219
+ readonly "openai/gpt-4o": {
5169
5220
  id: string;
5170
5221
  name: string;
5171
5222
  api: "openai-completions";
@@ -5182,7 +5233,7 @@ export declare const MODELS: {
5182
5233
  contextWindow: number;
5183
5234
  maxTokens: number;
5184
5235
  };
5185
- readonly "openai/gpt-4o": {
5236
+ readonly "openai/gpt-4o:extended": {
5186
5237
  id: string;
5187
5238
  name: string;
5188
5239
  api: "openai-completions";
@@ -5199,7 +5250,7 @@ export declare const MODELS: {
5199
5250
  contextWindow: number;
5200
5251
  maxTokens: number;
5201
5252
  };
5202
- readonly "openai/gpt-4o:extended": {
5253
+ readonly "openai/gpt-4o-2024-05-13": {
5203
5254
  id: string;
5204
5255
  name: string;
5205
5256
  api: "openai-completions";
@@ -5216,7 +5267,7 @@ export declare const MODELS: {
5216
5267
  contextWindow: number;
5217
5268
  maxTokens: number;
5218
5269
  };
5219
- readonly "meta-llama/llama-3-70b-instruct": {
5270
+ readonly "meta-llama/llama-3-8b-instruct": {
5220
5271
  id: string;
5221
5272
  name: string;
5222
5273
  api: "openai-completions";
@@ -5233,7 +5284,7 @@ export declare const MODELS: {
5233
5284
  contextWindow: number;
5234
5285
  maxTokens: number;
5235
5286
  };
5236
- readonly "meta-llama/llama-3-8b-instruct": {
5287
+ readonly "meta-llama/llama-3-70b-instruct": {
5237
5288
  id: string;
5238
5289
  name: string;
5239
5290
  api: "openai-completions";
@@ -5488,7 +5539,7 @@ export declare const MODELS: {
5488
5539
  contextWindow: number;
5489
5540
  maxTokens: number;
5490
5541
  };
5491
- readonly "openai/gpt-4": {
5542
+ readonly "openai/gpt-3.5-turbo": {
5492
5543
  id: string;
5493
5544
  name: string;
5494
5545
  api: "openai-completions";
@@ -5505,7 +5556,7 @@ export declare const MODELS: {
5505
5556
  contextWindow: number;
5506
5557
  maxTokens: number;
5507
5558
  };
5508
- readonly "openai/gpt-3.5-turbo": {
5559
+ readonly "openai/gpt-4": {
5509
5560
  id: string;
5510
5561
  name: string;
5511
5562
  api: "openai-completions";
@@ -5522,6 +5573,23 @@ export declare const MODELS: {
5522
5573
  contextWindow: number;
5523
5574
  maxTokens: number;
5524
5575
  };
5576
+ readonly "openrouter/auto": {
5577
+ id: string;
5578
+ name: string;
5579
+ api: "openai-completions";
5580
+ provider: string;
5581
+ baseUrl: string;
5582
+ reasoning: true;
5583
+ input: ("image" | "text")[];
5584
+ cost: {
5585
+ input: number;
5586
+ output: number;
5587
+ cacheRead: number;
5588
+ cacheWrite: number;
5589
+ };
5590
+ contextWindow: number;
5591
+ maxTokens: number;
5592
+ };
5525
5593
  };
5526
5594
  };
5527
5595
  //# sourceMappingURL=models.generated.d.ts.map