@mariozechner/pi-ai 0.18.2 → 0.18.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/models.generated.d.ts +121 -2
- package/dist/models.generated.d.ts.map +1 -1
- package/dist/models.generated.js +134 -15
- package/dist/models.generated.js.map +1 -1
- package/dist/providers/openai-completions.d.ts.map +1 -1
- package/dist/providers/openai-completions.js +8 -0
- package/dist/providers/openai-completions.js.map +1 -1
- package/package.json +1 -1
|
@@ -837,6 +837,23 @@ export declare const MODELS: {
|
|
|
837
837
|
contextWindow: number;
|
|
838
838
|
maxTokens: number;
|
|
839
839
|
};
|
|
840
|
+
readonly "gpt-5.2-pro": {
|
|
841
|
+
id: string;
|
|
842
|
+
name: string;
|
|
843
|
+
api: "openai-responses";
|
|
844
|
+
provider: string;
|
|
845
|
+
baseUrl: string;
|
|
846
|
+
reasoning: true;
|
|
847
|
+
input: ("image" | "text")[];
|
|
848
|
+
cost: {
|
|
849
|
+
input: number;
|
|
850
|
+
output: number;
|
|
851
|
+
cacheRead: number;
|
|
852
|
+
cacheWrite: number;
|
|
853
|
+
};
|
|
854
|
+
contextWindow: number;
|
|
855
|
+
maxTokens: number;
|
|
856
|
+
};
|
|
840
857
|
readonly "gpt-4-turbo": {
|
|
841
858
|
id: string;
|
|
842
859
|
name: string;
|
|
@@ -888,6 +905,23 @@ export declare const MODELS: {
|
|
|
888
905
|
contextWindow: number;
|
|
889
906
|
maxTokens: number;
|
|
890
907
|
};
|
|
908
|
+
readonly "gpt-5.2-chat-latest": {
|
|
909
|
+
id: string;
|
|
910
|
+
name: string;
|
|
911
|
+
api: "openai-responses";
|
|
912
|
+
provider: string;
|
|
913
|
+
baseUrl: string;
|
|
914
|
+
reasoning: true;
|
|
915
|
+
input: ("image" | "text")[];
|
|
916
|
+
cost: {
|
|
917
|
+
input: number;
|
|
918
|
+
output: number;
|
|
919
|
+
cacheRead: number;
|
|
920
|
+
cacheWrite: number;
|
|
921
|
+
};
|
|
922
|
+
contextWindow: number;
|
|
923
|
+
maxTokens: number;
|
|
924
|
+
};
|
|
891
925
|
readonly "gpt-5.1": {
|
|
892
926
|
id: string;
|
|
893
927
|
name: string;
|
|
@@ -1177,6 +1211,23 @@ export declare const MODELS: {
|
|
|
1177
1211
|
contextWindow: number;
|
|
1178
1212
|
maxTokens: number;
|
|
1179
1213
|
};
|
|
1214
|
+
readonly "gpt-5.2": {
|
|
1215
|
+
id: string;
|
|
1216
|
+
name: string;
|
|
1217
|
+
api: "openai-responses";
|
|
1218
|
+
provider: string;
|
|
1219
|
+
baseUrl: string;
|
|
1220
|
+
reasoning: true;
|
|
1221
|
+
input: ("image" | "text")[];
|
|
1222
|
+
cost: {
|
|
1223
|
+
input: number;
|
|
1224
|
+
output: number;
|
|
1225
|
+
cacheRead: number;
|
|
1226
|
+
cacheWrite: number;
|
|
1227
|
+
};
|
|
1228
|
+
contextWindow: number;
|
|
1229
|
+
maxTokens: number;
|
|
1230
|
+
};
|
|
1180
1231
|
readonly "gpt-5.1-chat-latest": {
|
|
1181
1232
|
id: string;
|
|
1182
1233
|
name: string;
|
|
@@ -2088,6 +2139,23 @@ export declare const MODELS: {
|
|
|
2088
2139
|
contextWindow: number;
|
|
2089
2140
|
maxTokens: number;
|
|
2090
2141
|
};
|
|
2142
|
+
readonly "mistral-small-2506": {
|
|
2143
|
+
id: string;
|
|
2144
|
+
name: string;
|
|
2145
|
+
api: "openai-completions";
|
|
2146
|
+
provider: string;
|
|
2147
|
+
baseUrl: string;
|
|
2148
|
+
reasoning: false;
|
|
2149
|
+
input: ("image" | "text")[];
|
|
2150
|
+
cost: {
|
|
2151
|
+
input: number;
|
|
2152
|
+
output: number;
|
|
2153
|
+
cacheRead: number;
|
|
2154
|
+
cacheWrite: number;
|
|
2155
|
+
};
|
|
2156
|
+
contextWindow: number;
|
|
2157
|
+
maxTokens: number;
|
|
2158
|
+
};
|
|
2091
2159
|
readonly "ministral-3b-latest": {
|
|
2092
2160
|
id: string;
|
|
2093
2161
|
name: string;
|
|
@@ -2396,6 +2464,57 @@ export declare const MODELS: {
|
|
|
2396
2464
|
};
|
|
2397
2465
|
};
|
|
2398
2466
|
readonly openrouter: {
|
|
2467
|
+
readonly "openai/gpt-5.2-chat": {
|
|
2468
|
+
id: string;
|
|
2469
|
+
name: string;
|
|
2470
|
+
api: "openai-completions";
|
|
2471
|
+
provider: string;
|
|
2472
|
+
baseUrl: string;
|
|
2473
|
+
reasoning: false;
|
|
2474
|
+
input: ("image" | "text")[];
|
|
2475
|
+
cost: {
|
|
2476
|
+
input: number;
|
|
2477
|
+
output: number;
|
|
2478
|
+
cacheRead: number;
|
|
2479
|
+
cacheWrite: number;
|
|
2480
|
+
};
|
|
2481
|
+
contextWindow: number;
|
|
2482
|
+
maxTokens: number;
|
|
2483
|
+
};
|
|
2484
|
+
readonly "openai/gpt-5.2-pro": {
|
|
2485
|
+
id: string;
|
|
2486
|
+
name: string;
|
|
2487
|
+
api: "openai-completions";
|
|
2488
|
+
provider: string;
|
|
2489
|
+
baseUrl: string;
|
|
2490
|
+
reasoning: true;
|
|
2491
|
+
input: ("image" | "text")[];
|
|
2492
|
+
cost: {
|
|
2493
|
+
input: number;
|
|
2494
|
+
output: number;
|
|
2495
|
+
cacheRead: number;
|
|
2496
|
+
cacheWrite: number;
|
|
2497
|
+
};
|
|
2498
|
+
contextWindow: number;
|
|
2499
|
+
maxTokens: number;
|
|
2500
|
+
};
|
|
2501
|
+
readonly "openai/gpt-5.2": {
|
|
2502
|
+
id: string;
|
|
2503
|
+
name: string;
|
|
2504
|
+
api: "openai-completions";
|
|
2505
|
+
provider: string;
|
|
2506
|
+
baseUrl: string;
|
|
2507
|
+
reasoning: true;
|
|
2508
|
+
input: ("image" | "text")[];
|
|
2509
|
+
cost: {
|
|
2510
|
+
input: number;
|
|
2511
|
+
output: number;
|
|
2512
|
+
cacheRead: number;
|
|
2513
|
+
cacheWrite: number;
|
|
2514
|
+
};
|
|
2515
|
+
contextWindow: number;
|
|
2516
|
+
maxTokens: number;
|
|
2517
|
+
};
|
|
2399
2518
|
readonly "mistralai/devstral-2512:free": {
|
|
2400
2519
|
id: string;
|
|
2401
2520
|
name: string;
|
|
@@ -2470,7 +2589,7 @@ export declare const MODELS: {
|
|
|
2470
2589
|
api: "openai-completions";
|
|
2471
2590
|
provider: string;
|
|
2472
2591
|
baseUrl: string;
|
|
2473
|
-
reasoning:
|
|
2592
|
+
reasoning: false;
|
|
2474
2593
|
input: "text"[];
|
|
2475
2594
|
cost: {
|
|
2476
2595
|
input: number;
|
|
@@ -4221,7 +4340,7 @@ export declare const MODELS: {
|
|
|
4221
4340
|
api: "openai-completions";
|
|
4222
4341
|
provider: string;
|
|
4223
4342
|
baseUrl: string;
|
|
4224
|
-
reasoning:
|
|
4343
|
+
reasoning: false;
|
|
4225
4344
|
input: "text"[];
|
|
4226
4345
|
cost: {
|
|
4227
4346
|
input: number;
|