@mariozechner/pi-ai 0.36.0 → 0.37.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/models.d.ts +1 -1
- package/dist/models.d.ts.map +1 -1
- package/dist/models.generated.d.ts +17 -425
- package/dist/models.generated.d.ts.map +1 -1
- package/dist/models.generated.js +20 -428
- package/dist/models.generated.js.map +1 -1
- package/dist/models.js +3 -2
- package/dist/models.js.map +1 -1
- package/dist/providers/openai-codex/request-transformer.d.ts.map +1 -1
- package/dist/providers/openai-codex/request-transformer.js +10 -5
- package/dist/providers/openai-codex/request-transformer.js.map +1 -1
- package/dist/providers/openai-codex-responses.d.ts.map +1 -1
- package/dist/providers/openai-codex-responses.js +2 -0
- package/dist/providers/openai-codex-responses.js.map +1 -1
- package/dist/stream.d.ts.map +1 -1
- package/dist/stream.js.map +1 -1
- package/dist/types.d.ts +2 -2
- package/dist/types.d.ts.map +1 -1
- package/dist/types.js.map +1 -1
- package/dist/utils/oauth/github-copilot.d.ts +2 -0
- package/dist/utils/oauth/github-copilot.d.ts.map +1 -1
- package/dist/utils/oauth/github-copilot.js +28 -5
- package/dist/utils/oauth/github-copilot.js.map +1 -1
- package/dist/utils/oauth/google-antigravity.d.ts +3 -1
- package/dist/utils/oauth/google-antigravity.d.ts.map +1 -1
- package/dist/utils/oauth/google-antigravity.js +100 -19
- package/dist/utils/oauth/google-antigravity.js.map +1 -1
- package/dist/utils/oauth/google-gemini-cli.d.ts +3 -1
- package/dist/utils/oauth/google-gemini-cli.d.ts.map +1 -1
- package/dist/utils/oauth/google-gemini-cli.js +100 -19
- package/dist/utils/oauth/google-gemini-cli.js.map +1 -1
- package/dist/utils/oauth/index.d.ts.map +1 -1
- package/dist/utils/oauth/index.js +5 -5
- package/dist/utils/oauth/index.js.map +1 -1
- package/dist/utils/oauth/openai-codex.d.ts +8 -0
- package/dist/utils/oauth/openai-codex.d.ts.map +1 -1
- package/dist/utils/oauth/openai-codex.js +67 -3
- package/dist/utils/oauth/openai-codex.js.map +1 -1
- package/package.json +1 -1
package/dist/models.d.ts
CHANGED
|
@@ -9,7 +9,7 @@ export declare function getModels<TProvider extends KnownProvider>(provider: TPr
|
|
|
9
9
|
export declare function calculateCost<TApi extends Api>(model: Model<TApi>, usage: Usage): Usage["cost"];
|
|
10
10
|
/**
|
|
11
11
|
* Check if a model supports xhigh thinking level.
|
|
12
|
-
* Currently only certain OpenAI models support this.
|
|
12
|
+
* Currently only certain OpenAI Codex models support this.
|
|
13
13
|
*/
|
|
14
14
|
export declare function supportsXhigh<TApi extends Api>(model: Model<TApi>): boolean;
|
|
15
15
|
/**
|
package/dist/models.d.ts.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"models.d.ts","sourceRoot":"","sources":["../src/models.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,MAAM,EAAE,MAAM,uBAAuB,CAAC;AAC/C,OAAO,KAAK,EAAE,GAAG,EAAE,aAAa,EAAE,KAAK,EAAE,KAAK,EAAE,MAAM,YAAY,CAAC;AAanE,KAAK,QAAQ,CACZ,SAAS,SAAS,aAAa,EAC/B,QAAQ,SAAS,MAAM,CAAC,OAAO,MAAM,CAAC,CAAC,SAAS,CAAC,IAC9C,CAAC,OAAO,MAAM,CAAC,CAAC,SAAS,CAAC,CAAC,QAAQ,CAAC,SAAS;IAAE,GAAG,EAAE,MAAM,IAAI,CAAA;CAAE,GAAG,CAAC,IAAI,SAAS,GAAG,GAAG,IAAI,GAAG,KAAK,CAAC,GAAG,KAAK,CAAC;AAEjH,wBAAgB,QAAQ,CAAC,SAAS,SAAS,aAAa,EAAE,QAAQ,SAAS,MAAM,CAAC,OAAO,MAAM,CAAC,CAAC,SAAS,CAAC,EAC1G,QAAQ,EAAE,SAAS,EACnB,OAAO,EAAE,QAAQ,GACf,KAAK,CAAC,QAAQ,CAAC,SAAS,EAAE,QAAQ,CAAC,CAAC,
|
|
1
|
+
{"version":3,"file":"models.d.ts","sourceRoot":"","sources":["../src/models.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,MAAM,EAAE,MAAM,uBAAuB,CAAC;AAC/C,OAAO,KAAK,EAAE,GAAG,EAAE,aAAa,EAAE,KAAK,EAAE,KAAK,EAAE,MAAM,YAAY,CAAC;AAanE,KAAK,QAAQ,CACZ,SAAS,SAAS,aAAa,EAC/B,QAAQ,SAAS,MAAM,CAAC,OAAO,MAAM,CAAC,CAAC,SAAS,CAAC,IAC9C,CAAC,OAAO,MAAM,CAAC,CAAC,SAAS,CAAC,CAAC,QAAQ,CAAC,SAAS;IAAE,GAAG,EAAE,MAAM,IAAI,CAAA;CAAE,GAAG,CAAC,IAAI,SAAS,GAAG,GAAG,IAAI,GAAG,KAAK,CAAC,GAAG,KAAK,CAAC;AAEjH,wBAAgB,QAAQ,CAAC,SAAS,SAAS,aAAa,EAAE,QAAQ,SAAS,MAAM,CAAC,OAAO,MAAM,CAAC,CAAC,SAAS,CAAC,EAC1G,QAAQ,EAAE,SAAS,EACnB,OAAO,EAAE,QAAQ,GACf,KAAK,CAAC,QAAQ,CAAC,SAAS,EAAE,QAAQ,CAAC,CAAC,CAGtC;AAED,wBAAgB,YAAY,IAAI,aAAa,EAAE,CAE9C;AAED,wBAAgB,SAAS,CAAC,SAAS,SAAS,aAAa,EACxD,QAAQ,EAAE,SAAS,GACjB,KAAK,CAAC,QAAQ,CAAC,SAAS,EAAE,MAAM,CAAC,OAAO,MAAM,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,EAAE,CAGhE;AAED,wBAAgB,aAAa,CAAC,IAAI,SAAS,GAAG,EAAE,KAAK,EAAE,KAAK,CAAC,IAAI,CAAC,EAAE,KAAK,EAAE,KAAK,GAAG,KAAK,CAAC,MAAM,CAAC,CAO/F;AAKD;;;GAGG;AACH,wBAAgB,aAAa,CAAC,IAAI,SAAS,GAAG,EAAE,KAAK,EAAE,KAAK,CAAC,IAAI,CAAC,GAAG,OAAO,CAE3E;AAED;;;GAGG;AACH,wBAAgB,cAAc,CAAC,IAAI,SAAS,GAAG,EAC9C,CAAC,EAAE,KAAK,CAAC,IAAI,CAAC,GAAG,IAAI,GAAG,SAAS,EACjC,CAAC,EAAE,KAAK,CAAC,IAAI,CAAC,GAAG,IAAI,GAAG,SAAS,GAC/B,OAAO,CAGT","sourcesContent":["import { MODELS } from \"./models.generated.js\";\nimport type { Api, KnownProvider, Model, Usage } from \"./types.js\";\n\nconst modelRegistry: Map<string, Map<string, Model<Api>>> = new Map();\n\n// Initialize registry from MODELS on module load\nfor (const [provider, models] of Object.entries(MODELS)) {\n\tconst providerModels = new Map<string, Model<Api>>();\n\tfor (const [id, model] of Object.entries(models)) {\n\t\tproviderModels.set(id, model as Model<Api>);\n\t}\n\tmodelRegistry.set(provider, providerModels);\n}\n\ntype ModelApi<\n\tTProvider extends KnownProvider,\n\tTModelId extends keyof (typeof MODELS)[TProvider],\n> = (typeof MODELS)[TProvider][TModelId] extends { api: infer TApi } ? (TApi extends Api ? TApi : never) : never;\n\nexport function getModel<TProvider extends KnownProvider, TModelId extends keyof (typeof MODELS)[TProvider]>(\n\tprovider: TProvider,\n\tmodelId: TModelId,\n): Model<ModelApi<TProvider, TModelId>> {\n\tconst providerModels = modelRegistry.get(provider);\n\treturn providerModels?.get(modelId as string) as Model<ModelApi<TProvider, TModelId>>;\n}\n\nexport function getProviders(): KnownProvider[] {\n\treturn Array.from(modelRegistry.keys()) as KnownProvider[];\n}\n\nexport function getModels<TProvider extends KnownProvider>(\n\tprovider: TProvider,\n): Model<ModelApi<TProvider, keyof (typeof MODELS)[TProvider]>>[] {\n\tconst models = modelRegistry.get(provider);\n\treturn models ? (Array.from(models.values()) as Model<ModelApi<TProvider, keyof (typeof MODELS)[TProvider]>>[]) : [];\n}\n\nexport function calculateCost<TApi extends Api>(model: Model<TApi>, usage: Usage): Usage[\"cost\"] {\n\tusage.cost.input = (model.cost.input / 1000000) * usage.input;\n\tusage.cost.output = (model.cost.output / 1000000) * usage.output;\n\tusage.cost.cacheRead = (model.cost.cacheRead / 1000000) * usage.cacheRead;\n\tusage.cost.cacheWrite = (model.cost.cacheWrite / 1000000) * usage.cacheWrite;\n\tusage.cost.total = usage.cost.input + usage.cost.output + usage.cost.cacheRead + usage.cost.cacheWrite;\n\treturn usage.cost;\n}\n\n/** Models that support xhigh thinking level */\nconst XHIGH_MODELS = new Set([\"gpt-5.1-codex-max\", \"gpt-5.2\", \"gpt-5.2-codex\"]);\n\n/**\n * Check if a model supports xhigh thinking level.\n * Currently only certain OpenAI Codex models support this.\n */\nexport function supportsXhigh<TApi extends Api>(model: Model<TApi>): boolean {\n\treturn XHIGH_MODELS.has(model.id);\n}\n\n/**\n * Check if two models are equal by comparing both their id and provider.\n * Returns false if either model is null or undefined.\n */\nexport function modelsAreEqual<TApi extends Api>(\n\ta: Model<TApi> | null | undefined,\n\tb: Model<TApi> | null | undefined,\n): boolean {\n\tif (!a || !b) return false;\n\treturn a.id === b.id && a.provider === b.provider;\n}\n"]}
|
|
@@ -2972,40 +2972,6 @@ export declare const MODELS: {
|
|
|
2972
2972
|
contextWindow: number;
|
|
2973
2973
|
maxTokens: number;
|
|
2974
2974
|
};
|
|
2975
|
-
readonly "gpt-5-codex-mini-high": {
|
|
2976
|
-
id: string;
|
|
2977
|
-
name: string;
|
|
2978
|
-
api: "openai-codex-responses";
|
|
2979
|
-
provider: string;
|
|
2980
|
-
baseUrl: string;
|
|
2981
|
-
reasoning: true;
|
|
2982
|
-
input: ("image" | "text")[];
|
|
2983
|
-
cost: {
|
|
2984
|
-
input: number;
|
|
2985
|
-
output: number;
|
|
2986
|
-
cacheRead: number;
|
|
2987
|
-
cacheWrite: number;
|
|
2988
|
-
};
|
|
2989
|
-
contextWindow: number;
|
|
2990
|
-
maxTokens: number;
|
|
2991
|
-
};
|
|
2992
|
-
readonly "gpt-5-codex-mini-medium": {
|
|
2993
|
-
id: string;
|
|
2994
|
-
name: string;
|
|
2995
|
-
api: "openai-codex-responses";
|
|
2996
|
-
provider: string;
|
|
2997
|
-
baseUrl: string;
|
|
2998
|
-
reasoning: true;
|
|
2999
|
-
input: ("image" | "text")[];
|
|
3000
|
-
cost: {
|
|
3001
|
-
input: number;
|
|
3002
|
-
output: number;
|
|
3003
|
-
cacheRead: number;
|
|
3004
|
-
cacheWrite: number;
|
|
3005
|
-
};
|
|
3006
|
-
contextWindow: number;
|
|
3007
|
-
maxTokens: number;
|
|
3008
|
-
};
|
|
3009
2975
|
readonly "gpt-5-mini": {
|
|
3010
2976
|
id: string;
|
|
3011
2977
|
name: string;
|
|
@@ -3091,40 +3057,6 @@ export declare const MODELS: {
|
|
|
3091
3057
|
contextWindow: number;
|
|
3092
3058
|
maxTokens: number;
|
|
3093
3059
|
};
|
|
3094
|
-
readonly "gpt-5.1-codex-high": {
|
|
3095
|
-
id: string;
|
|
3096
|
-
name: string;
|
|
3097
|
-
api: "openai-codex-responses";
|
|
3098
|
-
provider: string;
|
|
3099
|
-
baseUrl: string;
|
|
3100
|
-
reasoning: true;
|
|
3101
|
-
input: ("image" | "text")[];
|
|
3102
|
-
cost: {
|
|
3103
|
-
input: number;
|
|
3104
|
-
output: number;
|
|
3105
|
-
cacheRead: number;
|
|
3106
|
-
cacheWrite: number;
|
|
3107
|
-
};
|
|
3108
|
-
contextWindow: number;
|
|
3109
|
-
maxTokens: number;
|
|
3110
|
-
};
|
|
3111
|
-
readonly "gpt-5.1-codex-low": {
|
|
3112
|
-
id: string;
|
|
3113
|
-
name: string;
|
|
3114
|
-
api: "openai-codex-responses";
|
|
3115
|
-
provider: string;
|
|
3116
|
-
baseUrl: string;
|
|
3117
|
-
reasoning: true;
|
|
3118
|
-
input: ("image" | "text")[];
|
|
3119
|
-
cost: {
|
|
3120
|
-
input: number;
|
|
3121
|
-
output: number;
|
|
3122
|
-
cacheRead: number;
|
|
3123
|
-
cacheWrite: number;
|
|
3124
|
-
};
|
|
3125
|
-
contextWindow: number;
|
|
3126
|
-
maxTokens: number;
|
|
3127
|
-
};
|
|
3128
3060
|
readonly "gpt-5.1-codex-max": {
|
|
3129
3061
|
id: string;
|
|
3130
3062
|
name: string;
|
|
@@ -3142,91 +3074,6 @@ export declare const MODELS: {
|
|
|
3142
3074
|
contextWindow: number;
|
|
3143
3075
|
maxTokens: number;
|
|
3144
3076
|
};
|
|
3145
|
-
readonly "gpt-5.1-codex-max-high": {
|
|
3146
|
-
id: string;
|
|
3147
|
-
name: string;
|
|
3148
|
-
api: "openai-codex-responses";
|
|
3149
|
-
provider: string;
|
|
3150
|
-
baseUrl: string;
|
|
3151
|
-
reasoning: true;
|
|
3152
|
-
input: ("image" | "text")[];
|
|
3153
|
-
cost: {
|
|
3154
|
-
input: number;
|
|
3155
|
-
output: number;
|
|
3156
|
-
cacheRead: number;
|
|
3157
|
-
cacheWrite: number;
|
|
3158
|
-
};
|
|
3159
|
-
contextWindow: number;
|
|
3160
|
-
maxTokens: number;
|
|
3161
|
-
};
|
|
3162
|
-
readonly "gpt-5.1-codex-max-low": {
|
|
3163
|
-
id: string;
|
|
3164
|
-
name: string;
|
|
3165
|
-
api: "openai-codex-responses";
|
|
3166
|
-
provider: string;
|
|
3167
|
-
baseUrl: string;
|
|
3168
|
-
reasoning: true;
|
|
3169
|
-
input: ("image" | "text")[];
|
|
3170
|
-
cost: {
|
|
3171
|
-
input: number;
|
|
3172
|
-
output: number;
|
|
3173
|
-
cacheRead: number;
|
|
3174
|
-
cacheWrite: number;
|
|
3175
|
-
};
|
|
3176
|
-
contextWindow: number;
|
|
3177
|
-
maxTokens: number;
|
|
3178
|
-
};
|
|
3179
|
-
readonly "gpt-5.1-codex-max-medium": {
|
|
3180
|
-
id: string;
|
|
3181
|
-
name: string;
|
|
3182
|
-
api: "openai-codex-responses";
|
|
3183
|
-
provider: string;
|
|
3184
|
-
baseUrl: string;
|
|
3185
|
-
reasoning: true;
|
|
3186
|
-
input: ("image" | "text")[];
|
|
3187
|
-
cost: {
|
|
3188
|
-
input: number;
|
|
3189
|
-
output: number;
|
|
3190
|
-
cacheRead: number;
|
|
3191
|
-
cacheWrite: number;
|
|
3192
|
-
};
|
|
3193
|
-
contextWindow: number;
|
|
3194
|
-
maxTokens: number;
|
|
3195
|
-
};
|
|
3196
|
-
readonly "gpt-5.1-codex-max-xhigh": {
|
|
3197
|
-
id: string;
|
|
3198
|
-
name: string;
|
|
3199
|
-
api: "openai-codex-responses";
|
|
3200
|
-
provider: string;
|
|
3201
|
-
baseUrl: string;
|
|
3202
|
-
reasoning: true;
|
|
3203
|
-
input: ("image" | "text")[];
|
|
3204
|
-
cost: {
|
|
3205
|
-
input: number;
|
|
3206
|
-
output: number;
|
|
3207
|
-
cacheRead: number;
|
|
3208
|
-
cacheWrite: number;
|
|
3209
|
-
};
|
|
3210
|
-
contextWindow: number;
|
|
3211
|
-
maxTokens: number;
|
|
3212
|
-
};
|
|
3213
|
-
readonly "gpt-5.1-codex-medium": {
|
|
3214
|
-
id: string;
|
|
3215
|
-
name: string;
|
|
3216
|
-
api: "openai-codex-responses";
|
|
3217
|
-
provider: string;
|
|
3218
|
-
baseUrl: string;
|
|
3219
|
-
reasoning: true;
|
|
3220
|
-
input: ("image" | "text")[];
|
|
3221
|
-
cost: {
|
|
3222
|
-
input: number;
|
|
3223
|
-
output: number;
|
|
3224
|
-
cacheRead: number;
|
|
3225
|
-
cacheWrite: number;
|
|
3226
|
-
};
|
|
3227
|
-
contextWindow: number;
|
|
3228
|
-
maxTokens: number;
|
|
3229
|
-
};
|
|
3230
3077
|
readonly "gpt-5.1-codex-mini": {
|
|
3231
3078
|
id: string;
|
|
3232
3079
|
name: string;
|
|
@@ -3244,108 +3091,6 @@ export declare const MODELS: {
|
|
|
3244
3091
|
contextWindow: number;
|
|
3245
3092
|
maxTokens: number;
|
|
3246
3093
|
};
|
|
3247
|
-
readonly "gpt-5.1-codex-mini-high": {
|
|
3248
|
-
id: string;
|
|
3249
|
-
name: string;
|
|
3250
|
-
api: "openai-codex-responses";
|
|
3251
|
-
provider: string;
|
|
3252
|
-
baseUrl: string;
|
|
3253
|
-
reasoning: true;
|
|
3254
|
-
input: ("image" | "text")[];
|
|
3255
|
-
cost: {
|
|
3256
|
-
input: number;
|
|
3257
|
-
output: number;
|
|
3258
|
-
cacheRead: number;
|
|
3259
|
-
cacheWrite: number;
|
|
3260
|
-
};
|
|
3261
|
-
contextWindow: number;
|
|
3262
|
-
maxTokens: number;
|
|
3263
|
-
};
|
|
3264
|
-
readonly "gpt-5.1-codex-mini-medium": {
|
|
3265
|
-
id: string;
|
|
3266
|
-
name: string;
|
|
3267
|
-
api: "openai-codex-responses";
|
|
3268
|
-
provider: string;
|
|
3269
|
-
baseUrl: string;
|
|
3270
|
-
reasoning: true;
|
|
3271
|
-
input: ("image" | "text")[];
|
|
3272
|
-
cost: {
|
|
3273
|
-
input: number;
|
|
3274
|
-
output: number;
|
|
3275
|
-
cacheRead: number;
|
|
3276
|
-
cacheWrite: number;
|
|
3277
|
-
};
|
|
3278
|
-
contextWindow: number;
|
|
3279
|
-
maxTokens: number;
|
|
3280
|
-
};
|
|
3281
|
-
readonly "gpt-5.1-high": {
|
|
3282
|
-
id: string;
|
|
3283
|
-
name: string;
|
|
3284
|
-
api: "openai-codex-responses";
|
|
3285
|
-
provider: string;
|
|
3286
|
-
baseUrl: string;
|
|
3287
|
-
reasoning: true;
|
|
3288
|
-
input: ("image" | "text")[];
|
|
3289
|
-
cost: {
|
|
3290
|
-
input: number;
|
|
3291
|
-
output: number;
|
|
3292
|
-
cacheRead: number;
|
|
3293
|
-
cacheWrite: number;
|
|
3294
|
-
};
|
|
3295
|
-
contextWindow: number;
|
|
3296
|
-
maxTokens: number;
|
|
3297
|
-
};
|
|
3298
|
-
readonly "gpt-5.1-low": {
|
|
3299
|
-
id: string;
|
|
3300
|
-
name: string;
|
|
3301
|
-
api: "openai-codex-responses";
|
|
3302
|
-
provider: string;
|
|
3303
|
-
baseUrl: string;
|
|
3304
|
-
reasoning: true;
|
|
3305
|
-
input: ("image" | "text")[];
|
|
3306
|
-
cost: {
|
|
3307
|
-
input: number;
|
|
3308
|
-
output: number;
|
|
3309
|
-
cacheRead: number;
|
|
3310
|
-
cacheWrite: number;
|
|
3311
|
-
};
|
|
3312
|
-
contextWindow: number;
|
|
3313
|
-
maxTokens: number;
|
|
3314
|
-
};
|
|
3315
|
-
readonly "gpt-5.1-medium": {
|
|
3316
|
-
id: string;
|
|
3317
|
-
name: string;
|
|
3318
|
-
api: "openai-codex-responses";
|
|
3319
|
-
provider: string;
|
|
3320
|
-
baseUrl: string;
|
|
3321
|
-
reasoning: true;
|
|
3322
|
-
input: ("image" | "text")[];
|
|
3323
|
-
cost: {
|
|
3324
|
-
input: number;
|
|
3325
|
-
output: number;
|
|
3326
|
-
cacheRead: number;
|
|
3327
|
-
cacheWrite: number;
|
|
3328
|
-
};
|
|
3329
|
-
contextWindow: number;
|
|
3330
|
-
maxTokens: number;
|
|
3331
|
-
};
|
|
3332
|
-
readonly "gpt-5.1-none": {
|
|
3333
|
-
id: string;
|
|
3334
|
-
name: string;
|
|
3335
|
-
api: "openai-codex-responses";
|
|
3336
|
-
provider: string;
|
|
3337
|
-
baseUrl: string;
|
|
3338
|
-
reasoning: true;
|
|
3339
|
-
input: ("image" | "text")[];
|
|
3340
|
-
cost: {
|
|
3341
|
-
input: number;
|
|
3342
|
-
output: number;
|
|
3343
|
-
cacheRead: number;
|
|
3344
|
-
cacheWrite: number;
|
|
3345
|
-
};
|
|
3346
|
-
contextWindow: number;
|
|
3347
|
-
maxTokens: number;
|
|
3348
|
-
};
|
|
3349
3094
|
readonly "gpt-5.2": {
|
|
3350
3095
|
id: string;
|
|
3351
3096
|
name: string;
|
|
@@ -3380,159 +3125,6 @@ export declare const MODELS: {
|
|
|
3380
3125
|
contextWindow: number;
|
|
3381
3126
|
maxTokens: number;
|
|
3382
3127
|
};
|
|
3383
|
-
readonly "gpt-5.2-codex-high": {
|
|
3384
|
-
id: string;
|
|
3385
|
-
name: string;
|
|
3386
|
-
api: "openai-codex-responses";
|
|
3387
|
-
provider: string;
|
|
3388
|
-
baseUrl: string;
|
|
3389
|
-
reasoning: true;
|
|
3390
|
-
input: ("image" | "text")[];
|
|
3391
|
-
cost: {
|
|
3392
|
-
input: number;
|
|
3393
|
-
output: number;
|
|
3394
|
-
cacheRead: number;
|
|
3395
|
-
cacheWrite: number;
|
|
3396
|
-
};
|
|
3397
|
-
contextWindow: number;
|
|
3398
|
-
maxTokens: number;
|
|
3399
|
-
};
|
|
3400
|
-
readonly "gpt-5.2-codex-low": {
|
|
3401
|
-
id: string;
|
|
3402
|
-
name: string;
|
|
3403
|
-
api: "openai-codex-responses";
|
|
3404
|
-
provider: string;
|
|
3405
|
-
baseUrl: string;
|
|
3406
|
-
reasoning: true;
|
|
3407
|
-
input: ("image" | "text")[];
|
|
3408
|
-
cost: {
|
|
3409
|
-
input: number;
|
|
3410
|
-
output: number;
|
|
3411
|
-
cacheRead: number;
|
|
3412
|
-
cacheWrite: number;
|
|
3413
|
-
};
|
|
3414
|
-
contextWindow: number;
|
|
3415
|
-
maxTokens: number;
|
|
3416
|
-
};
|
|
3417
|
-
readonly "gpt-5.2-codex-medium": {
|
|
3418
|
-
id: string;
|
|
3419
|
-
name: string;
|
|
3420
|
-
api: "openai-codex-responses";
|
|
3421
|
-
provider: string;
|
|
3422
|
-
baseUrl: string;
|
|
3423
|
-
reasoning: true;
|
|
3424
|
-
input: ("image" | "text")[];
|
|
3425
|
-
cost: {
|
|
3426
|
-
input: number;
|
|
3427
|
-
output: number;
|
|
3428
|
-
cacheRead: number;
|
|
3429
|
-
cacheWrite: number;
|
|
3430
|
-
};
|
|
3431
|
-
contextWindow: number;
|
|
3432
|
-
maxTokens: number;
|
|
3433
|
-
};
|
|
3434
|
-
readonly "gpt-5.2-codex-xhigh": {
|
|
3435
|
-
id: string;
|
|
3436
|
-
name: string;
|
|
3437
|
-
api: "openai-codex-responses";
|
|
3438
|
-
provider: string;
|
|
3439
|
-
baseUrl: string;
|
|
3440
|
-
reasoning: true;
|
|
3441
|
-
input: ("image" | "text")[];
|
|
3442
|
-
cost: {
|
|
3443
|
-
input: number;
|
|
3444
|
-
output: number;
|
|
3445
|
-
cacheRead: number;
|
|
3446
|
-
cacheWrite: number;
|
|
3447
|
-
};
|
|
3448
|
-
contextWindow: number;
|
|
3449
|
-
maxTokens: number;
|
|
3450
|
-
};
|
|
3451
|
-
readonly "gpt-5.2-high": {
|
|
3452
|
-
id: string;
|
|
3453
|
-
name: string;
|
|
3454
|
-
api: "openai-codex-responses";
|
|
3455
|
-
provider: string;
|
|
3456
|
-
baseUrl: string;
|
|
3457
|
-
reasoning: true;
|
|
3458
|
-
input: ("image" | "text")[];
|
|
3459
|
-
cost: {
|
|
3460
|
-
input: number;
|
|
3461
|
-
output: number;
|
|
3462
|
-
cacheRead: number;
|
|
3463
|
-
cacheWrite: number;
|
|
3464
|
-
};
|
|
3465
|
-
contextWindow: number;
|
|
3466
|
-
maxTokens: number;
|
|
3467
|
-
};
|
|
3468
|
-
readonly "gpt-5.2-low": {
|
|
3469
|
-
id: string;
|
|
3470
|
-
name: string;
|
|
3471
|
-
api: "openai-codex-responses";
|
|
3472
|
-
provider: string;
|
|
3473
|
-
baseUrl: string;
|
|
3474
|
-
reasoning: true;
|
|
3475
|
-
input: ("image" | "text")[];
|
|
3476
|
-
cost: {
|
|
3477
|
-
input: number;
|
|
3478
|
-
output: number;
|
|
3479
|
-
cacheRead: number;
|
|
3480
|
-
cacheWrite: number;
|
|
3481
|
-
};
|
|
3482
|
-
contextWindow: number;
|
|
3483
|
-
maxTokens: number;
|
|
3484
|
-
};
|
|
3485
|
-
readonly "gpt-5.2-medium": {
|
|
3486
|
-
id: string;
|
|
3487
|
-
name: string;
|
|
3488
|
-
api: "openai-codex-responses";
|
|
3489
|
-
provider: string;
|
|
3490
|
-
baseUrl: string;
|
|
3491
|
-
reasoning: true;
|
|
3492
|
-
input: ("image" | "text")[];
|
|
3493
|
-
cost: {
|
|
3494
|
-
input: number;
|
|
3495
|
-
output: number;
|
|
3496
|
-
cacheRead: number;
|
|
3497
|
-
cacheWrite: number;
|
|
3498
|
-
};
|
|
3499
|
-
contextWindow: number;
|
|
3500
|
-
maxTokens: number;
|
|
3501
|
-
};
|
|
3502
|
-
readonly "gpt-5.2-none": {
|
|
3503
|
-
id: string;
|
|
3504
|
-
name: string;
|
|
3505
|
-
api: "openai-codex-responses";
|
|
3506
|
-
provider: string;
|
|
3507
|
-
baseUrl: string;
|
|
3508
|
-
reasoning: true;
|
|
3509
|
-
input: ("image" | "text")[];
|
|
3510
|
-
cost: {
|
|
3511
|
-
input: number;
|
|
3512
|
-
output: number;
|
|
3513
|
-
cacheRead: number;
|
|
3514
|
-
cacheWrite: number;
|
|
3515
|
-
};
|
|
3516
|
-
contextWindow: number;
|
|
3517
|
-
maxTokens: number;
|
|
3518
|
-
};
|
|
3519
|
-
readonly "gpt-5.2-xhigh": {
|
|
3520
|
-
id: string;
|
|
3521
|
-
name: string;
|
|
3522
|
-
api: "openai-codex-responses";
|
|
3523
|
-
provider: string;
|
|
3524
|
-
baseUrl: string;
|
|
3525
|
-
reasoning: true;
|
|
3526
|
-
input: ("image" | "text")[];
|
|
3527
|
-
cost: {
|
|
3528
|
-
input: number;
|
|
3529
|
-
output: number;
|
|
3530
|
-
cacheRead: number;
|
|
3531
|
-
cacheWrite: number;
|
|
3532
|
-
};
|
|
3533
|
-
contextWindow: number;
|
|
3534
|
-
maxTokens: number;
|
|
3535
|
-
};
|
|
3536
3128
|
};
|
|
3537
3129
|
readonly openrouter: {
|
|
3538
3130
|
readonly "ai21/jamba-large-1.7": {
|
|
@@ -3705,23 +3297,6 @@ export declare const MODELS: {
|
|
|
3705
3297
|
contextWindow: number;
|
|
3706
3298
|
maxTokens: number;
|
|
3707
3299
|
};
|
|
3708
|
-
readonly "anthropic/claude-3-opus": {
|
|
3709
|
-
id: string;
|
|
3710
|
-
name: string;
|
|
3711
|
-
api: "openai-completions";
|
|
3712
|
-
provider: string;
|
|
3713
|
-
baseUrl: string;
|
|
3714
|
-
reasoning: false;
|
|
3715
|
-
input: ("image" | "text")[];
|
|
3716
|
-
cost: {
|
|
3717
|
-
input: number;
|
|
3718
|
-
output: number;
|
|
3719
|
-
cacheRead: number;
|
|
3720
|
-
cacheWrite: number;
|
|
3721
|
-
};
|
|
3722
|
-
contextWindow: number;
|
|
3723
|
-
maxTokens: number;
|
|
3724
|
-
};
|
|
3725
3300
|
readonly "anthropic/claude-3.5-haiku": {
|
|
3726
3301
|
id: string;
|
|
3727
3302
|
name: string;
|
|
@@ -7207,6 +6782,23 @@ export declare const MODELS: {
|
|
|
7207
6782
|
contextWindow: number;
|
|
7208
6783
|
maxTokens: number;
|
|
7209
6784
|
};
|
|
6785
|
+
readonly "tngtech/tng-r1t-chimera:free": {
|
|
6786
|
+
id: string;
|
|
6787
|
+
name: string;
|
|
6788
|
+
api: "openai-completions";
|
|
6789
|
+
provider: string;
|
|
6790
|
+
baseUrl: string;
|
|
6791
|
+
reasoning: true;
|
|
6792
|
+
input: "text"[];
|
|
6793
|
+
cost: {
|
|
6794
|
+
input: number;
|
|
6795
|
+
output: number;
|
|
6796
|
+
cacheRead: number;
|
|
6797
|
+
cacheWrite: number;
|
|
6798
|
+
};
|
|
6799
|
+
contextWindow: number;
|
|
6800
|
+
maxTokens: number;
|
|
6801
|
+
};
|
|
7210
6802
|
readonly "x-ai/grok-3": {
|
|
7211
6803
|
id: string;
|
|
7212
6804
|
name: string;
|