@mariozechner/pi-ai 0.18.1 → 0.18.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/models.generated.d.ts +169 -16
- package/dist/models.generated.d.ts.map +1 -1
- package/dist/models.generated.js +246 -93
- package/dist/models.generated.js.map +1 -1
- package/dist/providers/anthropic.d.ts.map +1 -1
- package/dist/providers/anthropic.js +2 -0
- package/dist/providers/anthropic.js.map +1 -1
- package/dist/providers/openai-completions.d.ts.map +1 -1
- package/dist/providers/openai-completions.js +8 -0
- package/dist/providers/openai-completions.js.map +1 -1
- package/package.json +1 -1
|
@@ -837,6 +837,23 @@ export declare const MODELS: {
|
|
|
837
837
|
contextWindow: number;
|
|
838
838
|
maxTokens: number;
|
|
839
839
|
};
|
|
840
|
+
readonly "gpt-5.2-pro": {
|
|
841
|
+
id: string;
|
|
842
|
+
name: string;
|
|
843
|
+
api: "openai-responses";
|
|
844
|
+
provider: string;
|
|
845
|
+
baseUrl: string;
|
|
846
|
+
reasoning: true;
|
|
847
|
+
input: ("image" | "text")[];
|
|
848
|
+
cost: {
|
|
849
|
+
input: number;
|
|
850
|
+
output: number;
|
|
851
|
+
cacheRead: number;
|
|
852
|
+
cacheWrite: number;
|
|
853
|
+
};
|
|
854
|
+
contextWindow: number;
|
|
855
|
+
maxTokens: number;
|
|
856
|
+
};
|
|
840
857
|
readonly "gpt-4-turbo": {
|
|
841
858
|
id: string;
|
|
842
859
|
name: string;
|
|
@@ -888,6 +905,23 @@ export declare const MODELS: {
|
|
|
888
905
|
contextWindow: number;
|
|
889
906
|
maxTokens: number;
|
|
890
907
|
};
|
|
908
|
+
readonly "gpt-5.2-chat-latest": {
|
|
909
|
+
id: string;
|
|
910
|
+
name: string;
|
|
911
|
+
api: "openai-responses";
|
|
912
|
+
provider: string;
|
|
913
|
+
baseUrl: string;
|
|
914
|
+
reasoning: true;
|
|
915
|
+
input: ("image" | "text")[];
|
|
916
|
+
cost: {
|
|
917
|
+
input: number;
|
|
918
|
+
output: number;
|
|
919
|
+
cacheRead: number;
|
|
920
|
+
cacheWrite: number;
|
|
921
|
+
};
|
|
922
|
+
contextWindow: number;
|
|
923
|
+
maxTokens: number;
|
|
924
|
+
};
|
|
891
925
|
readonly "gpt-5.1": {
|
|
892
926
|
id: string;
|
|
893
927
|
name: string;
|
|
@@ -1177,6 +1211,23 @@ export declare const MODELS: {
|
|
|
1177
1211
|
contextWindow: number;
|
|
1178
1212
|
maxTokens: number;
|
|
1179
1213
|
};
|
|
1214
|
+
readonly "gpt-5.2": {
|
|
1215
|
+
id: string;
|
|
1216
|
+
name: string;
|
|
1217
|
+
api: "openai-responses";
|
|
1218
|
+
provider: string;
|
|
1219
|
+
baseUrl: string;
|
|
1220
|
+
reasoning: true;
|
|
1221
|
+
input: ("image" | "text")[];
|
|
1222
|
+
cost: {
|
|
1223
|
+
input: number;
|
|
1224
|
+
output: number;
|
|
1225
|
+
cacheRead: number;
|
|
1226
|
+
cacheWrite: number;
|
|
1227
|
+
};
|
|
1228
|
+
contextWindow: number;
|
|
1229
|
+
maxTokens: number;
|
|
1230
|
+
};
|
|
1180
1231
|
readonly "gpt-5.1-chat-latest": {
|
|
1181
1232
|
id: string;
|
|
1182
1233
|
name: string;
|
|
@@ -2088,6 +2139,23 @@ export declare const MODELS: {
|
|
|
2088
2139
|
contextWindow: number;
|
|
2089
2140
|
maxTokens: number;
|
|
2090
2141
|
};
|
|
2142
|
+
readonly "mistral-small-2506": {
|
|
2143
|
+
id: string;
|
|
2144
|
+
name: string;
|
|
2145
|
+
api: "openai-completions";
|
|
2146
|
+
provider: string;
|
|
2147
|
+
baseUrl: string;
|
|
2148
|
+
reasoning: false;
|
|
2149
|
+
input: ("image" | "text")[];
|
|
2150
|
+
cost: {
|
|
2151
|
+
input: number;
|
|
2152
|
+
output: number;
|
|
2153
|
+
cacheRead: number;
|
|
2154
|
+
cacheWrite: number;
|
|
2155
|
+
};
|
|
2156
|
+
contextWindow: number;
|
|
2157
|
+
maxTokens: number;
|
|
2158
|
+
};
|
|
2091
2159
|
readonly "ministral-3b-latest": {
|
|
2092
2160
|
id: string;
|
|
2093
2161
|
name: string;
|
|
@@ -2396,6 +2464,57 @@ export declare const MODELS: {
|
|
|
2396
2464
|
};
|
|
2397
2465
|
};
|
|
2398
2466
|
readonly openrouter: {
|
|
2467
|
+
readonly "openai/gpt-5.2-chat": {
|
|
2468
|
+
id: string;
|
|
2469
|
+
name: string;
|
|
2470
|
+
api: "openai-completions";
|
|
2471
|
+
provider: string;
|
|
2472
|
+
baseUrl: string;
|
|
2473
|
+
reasoning: false;
|
|
2474
|
+
input: ("image" | "text")[];
|
|
2475
|
+
cost: {
|
|
2476
|
+
input: number;
|
|
2477
|
+
output: number;
|
|
2478
|
+
cacheRead: number;
|
|
2479
|
+
cacheWrite: number;
|
|
2480
|
+
};
|
|
2481
|
+
contextWindow: number;
|
|
2482
|
+
maxTokens: number;
|
|
2483
|
+
};
|
|
2484
|
+
readonly "openai/gpt-5.2-pro": {
|
|
2485
|
+
id: string;
|
|
2486
|
+
name: string;
|
|
2487
|
+
api: "openai-completions";
|
|
2488
|
+
provider: string;
|
|
2489
|
+
baseUrl: string;
|
|
2490
|
+
reasoning: true;
|
|
2491
|
+
input: ("image" | "text")[];
|
|
2492
|
+
cost: {
|
|
2493
|
+
input: number;
|
|
2494
|
+
output: number;
|
|
2495
|
+
cacheRead: number;
|
|
2496
|
+
cacheWrite: number;
|
|
2497
|
+
};
|
|
2498
|
+
contextWindow: number;
|
|
2499
|
+
maxTokens: number;
|
|
2500
|
+
};
|
|
2501
|
+
readonly "openai/gpt-5.2": {
|
|
2502
|
+
id: string;
|
|
2503
|
+
name: string;
|
|
2504
|
+
api: "openai-completions";
|
|
2505
|
+
provider: string;
|
|
2506
|
+
baseUrl: string;
|
|
2507
|
+
reasoning: true;
|
|
2508
|
+
input: ("image" | "text")[];
|
|
2509
|
+
cost: {
|
|
2510
|
+
input: number;
|
|
2511
|
+
output: number;
|
|
2512
|
+
cacheRead: number;
|
|
2513
|
+
cacheWrite: number;
|
|
2514
|
+
};
|
|
2515
|
+
contextWindow: number;
|
|
2516
|
+
maxTokens: number;
|
|
2517
|
+
};
|
|
2399
2518
|
readonly "mistralai/devstral-2512:free": {
|
|
2400
2519
|
id: string;
|
|
2401
2520
|
name: string;
|
|
@@ -2413,6 +2532,23 @@ export declare const MODELS: {
|
|
|
2413
2532
|
contextWindow: number;
|
|
2414
2533
|
maxTokens: number;
|
|
2415
2534
|
};
|
|
2535
|
+
readonly "mistralai/devstral-2512": {
|
|
2536
|
+
id: string;
|
|
2537
|
+
name: string;
|
|
2538
|
+
api: "openai-completions";
|
|
2539
|
+
provider: string;
|
|
2540
|
+
baseUrl: string;
|
|
2541
|
+
reasoning: false;
|
|
2542
|
+
input: "text"[];
|
|
2543
|
+
cost: {
|
|
2544
|
+
input: number;
|
|
2545
|
+
output: number;
|
|
2546
|
+
cacheRead: number;
|
|
2547
|
+
cacheWrite: number;
|
|
2548
|
+
};
|
|
2549
|
+
contextWindow: number;
|
|
2550
|
+
maxTokens: number;
|
|
2551
|
+
};
|
|
2416
2552
|
readonly "relace/relace-search": {
|
|
2417
2553
|
id: string;
|
|
2418
2554
|
name: string;
|
|
@@ -2447,6 +2583,23 @@ export declare const MODELS: {
|
|
|
2447
2583
|
contextWindow: number;
|
|
2448
2584
|
maxTokens: number;
|
|
2449
2585
|
};
|
|
2586
|
+
readonly "nex-agi/deepseek-v3.1-nex-n1:free": {
|
|
2587
|
+
id: string;
|
|
2588
|
+
name: string;
|
|
2589
|
+
api: "openai-completions";
|
|
2590
|
+
provider: string;
|
|
2591
|
+
baseUrl: string;
|
|
2592
|
+
reasoning: false;
|
|
2593
|
+
input: "text"[];
|
|
2594
|
+
cost: {
|
|
2595
|
+
input: number;
|
|
2596
|
+
output: number;
|
|
2597
|
+
cacheRead: number;
|
|
2598
|
+
cacheWrite: number;
|
|
2599
|
+
};
|
|
2600
|
+
contextWindow: number;
|
|
2601
|
+
maxTokens: number;
|
|
2602
|
+
};
|
|
2450
2603
|
readonly "openai/gpt-5.1-codex-max": {
|
|
2451
2604
|
id: string;
|
|
2452
2605
|
name: string;
|
|
@@ -4187,7 +4340,7 @@ export declare const MODELS: {
|
|
|
4187
4340
|
api: "openai-completions";
|
|
4188
4341
|
provider: string;
|
|
4189
4342
|
baseUrl: string;
|
|
4190
|
-
reasoning:
|
|
4343
|
+
reasoning: false;
|
|
4191
4344
|
input: "text"[];
|
|
4192
4345
|
cost: {
|
|
4193
4346
|
input: number;
|
|
@@ -5473,7 +5626,7 @@ export declare const MODELS: {
|
|
|
5473
5626
|
contextWindow: number;
|
|
5474
5627
|
maxTokens: number;
|
|
5475
5628
|
};
|
|
5476
|
-
readonly "mistralai/ministral-
|
|
5629
|
+
readonly "mistralai/ministral-8b": {
|
|
5477
5630
|
id: string;
|
|
5478
5631
|
name: string;
|
|
5479
5632
|
api: "openai-completions";
|
|
@@ -5490,7 +5643,7 @@ export declare const MODELS: {
|
|
|
5490
5643
|
contextWindow: number;
|
|
5491
5644
|
maxTokens: number;
|
|
5492
5645
|
};
|
|
5493
|
-
readonly "mistralai/ministral-
|
|
5646
|
+
readonly "mistralai/ministral-3b": {
|
|
5494
5647
|
id: string;
|
|
5495
5648
|
name: string;
|
|
5496
5649
|
api: "openai-completions";
|
|
@@ -5677,7 +5830,7 @@ export declare const MODELS: {
|
|
|
5677
5830
|
contextWindow: number;
|
|
5678
5831
|
maxTokens: number;
|
|
5679
5832
|
};
|
|
5680
|
-
readonly "meta-llama/llama-3.1-
|
|
5833
|
+
readonly "meta-llama/llama-3.1-8b-instruct": {
|
|
5681
5834
|
id: string;
|
|
5682
5835
|
name: string;
|
|
5683
5836
|
api: "openai-completions";
|
|
@@ -5694,7 +5847,7 @@ export declare const MODELS: {
|
|
|
5694
5847
|
contextWindow: number;
|
|
5695
5848
|
maxTokens: number;
|
|
5696
5849
|
};
|
|
5697
|
-
readonly "meta-llama/llama-3.1-
|
|
5850
|
+
readonly "meta-llama/llama-3.1-405b-instruct": {
|
|
5698
5851
|
id: string;
|
|
5699
5852
|
name: string;
|
|
5700
5853
|
api: "openai-completions";
|
|
@@ -5711,7 +5864,7 @@ export declare const MODELS: {
|
|
|
5711
5864
|
contextWindow: number;
|
|
5712
5865
|
maxTokens: number;
|
|
5713
5866
|
};
|
|
5714
|
-
readonly "meta-llama/llama-3.1-
|
|
5867
|
+
readonly "meta-llama/llama-3.1-70b-instruct": {
|
|
5715
5868
|
id: string;
|
|
5716
5869
|
name: string;
|
|
5717
5870
|
api: "openai-completions";
|
|
@@ -5864,7 +6017,7 @@ export declare const MODELS: {
|
|
|
5864
6017
|
contextWindow: number;
|
|
5865
6018
|
maxTokens: number;
|
|
5866
6019
|
};
|
|
5867
|
-
readonly "openai/gpt-4o": {
|
|
6020
|
+
readonly "openai/gpt-4o-2024-05-13": {
|
|
5868
6021
|
id: string;
|
|
5869
6022
|
name: string;
|
|
5870
6023
|
api: "openai-completions";
|
|
@@ -5881,7 +6034,7 @@ export declare const MODELS: {
|
|
|
5881
6034
|
contextWindow: number;
|
|
5882
6035
|
maxTokens: number;
|
|
5883
6036
|
};
|
|
5884
|
-
readonly "openai/gpt-4o
|
|
6037
|
+
readonly "openai/gpt-4o": {
|
|
5885
6038
|
id: string;
|
|
5886
6039
|
name: string;
|
|
5887
6040
|
api: "openai-completions";
|
|
@@ -5898,7 +6051,7 @@ export declare const MODELS: {
|
|
|
5898
6051
|
contextWindow: number;
|
|
5899
6052
|
maxTokens: number;
|
|
5900
6053
|
};
|
|
5901
|
-
readonly "openai/gpt-4o
|
|
6054
|
+
readonly "openai/gpt-4o:extended": {
|
|
5902
6055
|
id: string;
|
|
5903
6056
|
name: string;
|
|
5904
6057
|
api: "openai-completions";
|
|
@@ -5915,7 +6068,7 @@ export declare const MODELS: {
|
|
|
5915
6068
|
contextWindow: number;
|
|
5916
6069
|
maxTokens: number;
|
|
5917
6070
|
};
|
|
5918
|
-
readonly "meta-llama/llama-3-
|
|
6071
|
+
readonly "meta-llama/llama-3-70b-instruct": {
|
|
5919
6072
|
id: string;
|
|
5920
6073
|
name: string;
|
|
5921
6074
|
api: "openai-completions";
|
|
@@ -5932,7 +6085,7 @@ export declare const MODELS: {
|
|
|
5932
6085
|
contextWindow: number;
|
|
5933
6086
|
maxTokens: number;
|
|
5934
6087
|
};
|
|
5935
|
-
readonly "meta-llama/llama-3-
|
|
6088
|
+
readonly "meta-llama/llama-3-8b-instruct": {
|
|
5936
6089
|
id: string;
|
|
5937
6090
|
name: string;
|
|
5938
6091
|
api: "openai-completions";
|
|
@@ -6034,7 +6187,7 @@ export declare const MODELS: {
|
|
|
6034
6187
|
contextWindow: number;
|
|
6035
6188
|
maxTokens: number;
|
|
6036
6189
|
};
|
|
6037
|
-
readonly "openai/gpt-
|
|
6190
|
+
readonly "openai/gpt-3.5-turbo-0613": {
|
|
6038
6191
|
id: string;
|
|
6039
6192
|
name: string;
|
|
6040
6193
|
api: "openai-completions";
|
|
@@ -6051,7 +6204,7 @@ export declare const MODELS: {
|
|
|
6051
6204
|
contextWindow: number;
|
|
6052
6205
|
maxTokens: number;
|
|
6053
6206
|
};
|
|
6054
|
-
readonly "openai/gpt-
|
|
6207
|
+
readonly "openai/gpt-4-turbo-preview": {
|
|
6055
6208
|
id: string;
|
|
6056
6209
|
name: string;
|
|
6057
6210
|
api: "openai-completions";
|
|
@@ -6136,7 +6289,7 @@ export declare const MODELS: {
|
|
|
6136
6289
|
contextWindow: number;
|
|
6137
6290
|
maxTokens: number;
|
|
6138
6291
|
};
|
|
6139
|
-
readonly "openai/gpt-4": {
|
|
6292
|
+
readonly "openai/gpt-4-0314": {
|
|
6140
6293
|
id: string;
|
|
6141
6294
|
name: string;
|
|
6142
6295
|
api: "openai-completions";
|
|
@@ -6153,7 +6306,7 @@ export declare const MODELS: {
|
|
|
6153
6306
|
contextWindow: number;
|
|
6154
6307
|
maxTokens: number;
|
|
6155
6308
|
};
|
|
6156
|
-
readonly "openai/gpt-
|
|
6309
|
+
readonly "openai/gpt-4": {
|
|
6157
6310
|
id: string;
|
|
6158
6311
|
name: string;
|
|
6159
6312
|
api: "openai-completions";
|
|
@@ -6170,7 +6323,7 @@ export declare const MODELS: {
|
|
|
6170
6323
|
contextWindow: number;
|
|
6171
6324
|
maxTokens: number;
|
|
6172
6325
|
};
|
|
6173
|
-
readonly "openai/gpt-
|
|
6326
|
+
readonly "openai/gpt-3.5-turbo": {
|
|
6174
6327
|
id: string;
|
|
6175
6328
|
name: string;
|
|
6176
6329
|
api: "openai-completions";
|