@mariozechner/pi-ai 0.7.10 → 0.7.11
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/models.generated.d.ts +45 -79
- package/dist/models.generated.d.ts.map +1 -1
- package/dist/models.generated.js +101 -135
- package/dist/models.generated.js.map +1 -1
- package/package.json +1 -1
|
@@ -1850,6 +1850,40 @@ export declare const MODELS: {
|
|
|
1850
1850
|
};
|
|
1851
1851
|
};
|
|
1852
1852
|
readonly openrouter: {
|
|
1853
|
+
readonly "openrouter/sherlock-dash-alpha": {
|
|
1854
|
+
id: string;
|
|
1855
|
+
name: string;
|
|
1856
|
+
api: "openai-completions";
|
|
1857
|
+
provider: string;
|
|
1858
|
+
baseUrl: string;
|
|
1859
|
+
reasoning: false;
|
|
1860
|
+
input: ("image" | "text")[];
|
|
1861
|
+
cost: {
|
|
1862
|
+
input: number;
|
|
1863
|
+
output: number;
|
|
1864
|
+
cacheRead: number;
|
|
1865
|
+
cacheWrite: number;
|
|
1866
|
+
};
|
|
1867
|
+
contextWindow: number;
|
|
1868
|
+
maxTokens: number;
|
|
1869
|
+
};
|
|
1870
|
+
readonly "openrouter/sherlock-think-alpha": {
|
|
1871
|
+
id: string;
|
|
1872
|
+
name: string;
|
|
1873
|
+
api: "openai-completions";
|
|
1874
|
+
provider: string;
|
|
1875
|
+
baseUrl: string;
|
|
1876
|
+
reasoning: true;
|
|
1877
|
+
input: ("image" | "text")[];
|
|
1878
|
+
cost: {
|
|
1879
|
+
input: number;
|
|
1880
|
+
output: number;
|
|
1881
|
+
cacheRead: number;
|
|
1882
|
+
cacheWrite: number;
|
|
1883
|
+
};
|
|
1884
|
+
contextWindow: number;
|
|
1885
|
+
maxTokens: number;
|
|
1886
|
+
};
|
|
1853
1887
|
readonly "openai/gpt-5.1": {
|
|
1854
1888
|
id: string;
|
|
1855
1889
|
name: string;
|
|
@@ -3720,23 +3754,6 @@ export declare const MODELS: {
|
|
|
3720
3754
|
contextWindow: number;
|
|
3721
3755
|
maxTokens: number;
|
|
3722
3756
|
};
|
|
3723
|
-
readonly "meta-llama/llama-3.3-8b-instruct:free": {
|
|
3724
|
-
id: string;
|
|
3725
|
-
name: string;
|
|
3726
|
-
api: "openai-completions";
|
|
3727
|
-
provider: string;
|
|
3728
|
-
baseUrl: string;
|
|
3729
|
-
reasoning: false;
|
|
3730
|
-
input: "text"[];
|
|
3731
|
-
cost: {
|
|
3732
|
-
input: number;
|
|
3733
|
-
output: number;
|
|
3734
|
-
cacheRead: number;
|
|
3735
|
-
cacheWrite: number;
|
|
3736
|
-
};
|
|
3737
|
-
contextWindow: number;
|
|
3738
|
-
maxTokens: number;
|
|
3739
|
-
};
|
|
3740
3757
|
readonly "nousresearch/deephermes-3-mistral-24b-preview": {
|
|
3741
3758
|
id: string;
|
|
3742
3759
|
name: string;
|
|
@@ -4077,23 +4094,6 @@ export declare const MODELS: {
|
|
|
4077
4094
|
contextWindow: number;
|
|
4078
4095
|
maxTokens: number;
|
|
4079
4096
|
};
|
|
4080
|
-
readonly "meta-llama/llama-4-maverick:free": {
|
|
4081
|
-
id: string;
|
|
4082
|
-
name: string;
|
|
4083
|
-
api: "openai-completions";
|
|
4084
|
-
provider: string;
|
|
4085
|
-
baseUrl: string;
|
|
4086
|
-
reasoning: false;
|
|
4087
|
-
input: ("image" | "text")[];
|
|
4088
|
-
cost: {
|
|
4089
|
-
input: number;
|
|
4090
|
-
output: number;
|
|
4091
|
-
cacheRead: number;
|
|
4092
|
-
cacheWrite: number;
|
|
4093
|
-
};
|
|
4094
|
-
contextWindow: number;
|
|
4095
|
-
maxTokens: number;
|
|
4096
|
-
};
|
|
4097
4097
|
readonly "meta-llama/llama-4-maverick": {
|
|
4098
4098
|
id: string;
|
|
4099
4099
|
name: string;
|
|
@@ -4111,23 +4111,6 @@ export declare const MODELS: {
|
|
|
4111
4111
|
contextWindow: number;
|
|
4112
4112
|
maxTokens: number;
|
|
4113
4113
|
};
|
|
4114
|
-
readonly "meta-llama/llama-4-scout:free": {
|
|
4115
|
-
id: string;
|
|
4116
|
-
name: string;
|
|
4117
|
-
api: "openai-completions";
|
|
4118
|
-
provider: string;
|
|
4119
|
-
baseUrl: string;
|
|
4120
|
-
reasoning: false;
|
|
4121
|
-
input: ("image" | "text")[];
|
|
4122
|
-
cost: {
|
|
4123
|
-
input: number;
|
|
4124
|
-
output: number;
|
|
4125
|
-
cacheRead: number;
|
|
4126
|
-
cacheWrite: number;
|
|
4127
|
-
};
|
|
4128
|
-
contextWindow: number;
|
|
4129
|
-
maxTokens: number;
|
|
4130
|
-
};
|
|
4131
4114
|
readonly "meta-llama/llama-4-scout": {
|
|
4132
4115
|
id: string;
|
|
4133
4116
|
name: string;
|
|
@@ -4910,7 +4893,7 @@ export declare const MODELS: {
|
|
|
4910
4893
|
contextWindow: number;
|
|
4911
4894
|
maxTokens: number;
|
|
4912
4895
|
};
|
|
4913
|
-
readonly "cohere/command-r-08-2024": {
|
|
4896
|
+
readonly "cohere/command-r-plus-08-2024": {
|
|
4914
4897
|
id: string;
|
|
4915
4898
|
name: string;
|
|
4916
4899
|
api: "openai-completions";
|
|
@@ -4927,7 +4910,7 @@ export declare const MODELS: {
|
|
|
4927
4910
|
contextWindow: number;
|
|
4928
4911
|
maxTokens: number;
|
|
4929
4912
|
};
|
|
4930
|
-
readonly "cohere/command-r-
|
|
4913
|
+
readonly "cohere/command-r-08-2024": {
|
|
4931
4914
|
id: string;
|
|
4932
4915
|
name: string;
|
|
4933
4916
|
api: "openai-completions";
|
|
@@ -5012,7 +4995,7 @@ export declare const MODELS: {
|
|
|
5012
4995
|
contextWindow: number;
|
|
5013
4996
|
maxTokens: number;
|
|
5014
4997
|
};
|
|
5015
|
-
readonly "meta-llama/llama-3.1-
|
|
4998
|
+
readonly "meta-llama/llama-3.1-70b-instruct": {
|
|
5016
4999
|
id: string;
|
|
5017
5000
|
name: string;
|
|
5018
5001
|
api: "openai-completions";
|
|
@@ -5046,7 +5029,7 @@ export declare const MODELS: {
|
|
|
5046
5029
|
contextWindow: number;
|
|
5047
5030
|
maxTokens: number;
|
|
5048
5031
|
};
|
|
5049
|
-
readonly "meta-llama/llama-3.1-
|
|
5032
|
+
readonly "meta-llama/llama-3.1-8b-instruct": {
|
|
5050
5033
|
id: string;
|
|
5051
5034
|
name: string;
|
|
5052
5035
|
api: "openai-completions";
|
|
@@ -5080,23 +5063,6 @@ export declare const MODELS: {
|
|
|
5080
5063
|
contextWindow: number;
|
|
5081
5064
|
maxTokens: number;
|
|
5082
5065
|
};
|
|
5083
|
-
readonly "openai/gpt-4o-mini": {
|
|
5084
|
-
id: string;
|
|
5085
|
-
name: string;
|
|
5086
|
-
api: "openai-completions";
|
|
5087
|
-
provider: string;
|
|
5088
|
-
baseUrl: string;
|
|
5089
|
-
reasoning: false;
|
|
5090
|
-
input: ("image" | "text")[];
|
|
5091
|
-
cost: {
|
|
5092
|
-
input: number;
|
|
5093
|
-
output: number;
|
|
5094
|
-
cacheRead: number;
|
|
5095
|
-
cacheWrite: number;
|
|
5096
|
-
};
|
|
5097
|
-
contextWindow: number;
|
|
5098
|
-
maxTokens: number;
|
|
5099
|
-
};
|
|
5100
5066
|
readonly "openai/gpt-4o-mini-2024-07-18": {
|
|
5101
5067
|
id: string;
|
|
5102
5068
|
name: string;
|
|
@@ -5114,7 +5080,7 @@ export declare const MODELS: {
|
|
|
5114
5080
|
contextWindow: number;
|
|
5115
5081
|
maxTokens: number;
|
|
5116
5082
|
};
|
|
5117
|
-
readonly "
|
|
5083
|
+
readonly "openai/gpt-4o-mini": {
|
|
5118
5084
|
id: string;
|
|
5119
5085
|
name: string;
|
|
5120
5086
|
api: "openai-completions";
|
|
@@ -5267,7 +5233,7 @@ export declare const MODELS: {
|
|
|
5267
5233
|
contextWindow: number;
|
|
5268
5234
|
maxTokens: number;
|
|
5269
5235
|
};
|
|
5270
|
-
readonly "meta-llama/llama-3-
|
|
5236
|
+
readonly "meta-llama/llama-3-70b-instruct": {
|
|
5271
5237
|
id: string;
|
|
5272
5238
|
name: string;
|
|
5273
5239
|
api: "openai-completions";
|
|
@@ -5284,7 +5250,7 @@ export declare const MODELS: {
|
|
|
5284
5250
|
contextWindow: number;
|
|
5285
5251
|
maxTokens: number;
|
|
5286
5252
|
};
|
|
5287
|
-
readonly "meta-llama/llama-3-
|
|
5253
|
+
readonly "meta-llama/llama-3-8b-instruct": {
|
|
5288
5254
|
id: string;
|
|
5289
5255
|
name: string;
|
|
5290
5256
|
api: "openai-completions";
|
|
@@ -5386,7 +5352,7 @@ export declare const MODELS: {
|
|
|
5386
5352
|
contextWindow: number;
|
|
5387
5353
|
maxTokens: number;
|
|
5388
5354
|
};
|
|
5389
|
-
readonly "openai/gpt-
|
|
5355
|
+
readonly "openai/gpt-4-turbo-preview": {
|
|
5390
5356
|
id: string;
|
|
5391
5357
|
name: string;
|
|
5392
5358
|
api: "openai-completions";
|
|
@@ -5403,7 +5369,7 @@ export declare const MODELS: {
|
|
|
5403
5369
|
contextWindow: number;
|
|
5404
5370
|
maxTokens: number;
|
|
5405
5371
|
};
|
|
5406
|
-
readonly "openai/gpt-
|
|
5372
|
+
readonly "openai/gpt-3.5-turbo-0613": {
|
|
5407
5373
|
id: string;
|
|
5408
5374
|
name: string;
|
|
5409
5375
|
api: "openai-completions";
|
|
@@ -5539,7 +5505,7 @@ export declare const MODELS: {
|
|
|
5539
5505
|
contextWindow: number;
|
|
5540
5506
|
maxTokens: number;
|
|
5541
5507
|
};
|
|
5542
|
-
readonly "openai/gpt-
|
|
5508
|
+
readonly "openai/gpt-4": {
|
|
5543
5509
|
id: string;
|
|
5544
5510
|
name: string;
|
|
5545
5511
|
api: "openai-completions";
|
|
@@ -5556,7 +5522,7 @@ export declare const MODELS: {
|
|
|
5556
5522
|
contextWindow: number;
|
|
5557
5523
|
maxTokens: number;
|
|
5558
5524
|
};
|
|
5559
|
-
readonly "openai/gpt-
|
|
5525
|
+
readonly "openai/gpt-3.5-turbo": {
|
|
5560
5526
|
id: string;
|
|
5561
5527
|
name: string;
|
|
5562
5528
|
api: "openai-completions";
|