@mariozechner/pi-ai 0.35.0 → 0.36.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. package/dist/cli.d.ts.map +1 -1
  2. package/dist/cli.js +17 -1
  3. package/dist/cli.js.map +1 -1
  4. package/dist/models.generated.d.ts +640 -43
  5. package/dist/models.generated.d.ts.map +1 -1
  6. package/dist/models.generated.js +673 -76
  7. package/dist/models.generated.js.map +1 -1
  8. package/dist/providers/openai-codex/constants.d.ts +21 -0
  9. package/dist/providers/openai-codex/constants.d.ts.map +1 -0
  10. package/dist/providers/openai-codex/constants.js +21 -0
  11. package/dist/providers/openai-codex/constants.js.map +1 -0
  12. package/dist/providers/openai-codex/prompts/codex-instructions.md +105 -0
  13. package/dist/providers/openai-codex/prompts/codex.d.ts +11 -0
  14. package/dist/providers/openai-codex/prompts/codex.d.ts.map +1 -0
  15. package/dist/providers/openai-codex/prompts/codex.js +184 -0
  16. package/dist/providers/openai-codex/prompts/codex.js.map +1 -0
  17. package/dist/providers/openai-codex/prompts/pi-codex-bridge.d.ts +6 -0
  18. package/dist/providers/openai-codex/prompts/pi-codex-bridge.d.ts.map +1 -0
  19. package/dist/providers/openai-codex/prompts/pi-codex-bridge.js +48 -0
  20. package/dist/providers/openai-codex/prompts/pi-codex-bridge.js.map +1 -0
  21. package/dist/providers/openai-codex/request-transformer.d.ts +41 -0
  22. package/dist/providers/openai-codex/request-transformer.d.ts.map +1 -0
  23. package/dist/providers/openai-codex/request-transformer.js +242 -0
  24. package/dist/providers/openai-codex/request-transformer.js.map +1 -0
  25. package/dist/providers/openai-codex/response-handler.d.ts +19 -0
  26. package/dist/providers/openai-codex/response-handler.d.ts.map +1 -0
  27. package/dist/providers/openai-codex/response-handler.js +107 -0
  28. package/dist/providers/openai-codex/response-handler.js.map +1 -0
  29. package/dist/providers/openai-codex-responses.d.ts +10 -0
  30. package/dist/providers/openai-codex-responses.d.ts.map +1 -0
  31. package/dist/providers/openai-codex-responses.js +528 -0
  32. package/dist/providers/openai-codex-responses.js.map +1 -0
  33. package/dist/stream.d.ts.map +1 -1
  34. package/dist/stream.js +27 -1
  35. package/dist/stream.js.map +1 -1
  36. package/dist/types.d.ts +4 -2
  37. package/dist/types.d.ts.map +1 -1
  38. package/dist/types.js.map +1 -1
  39. package/dist/utils/oauth/index.d.ts +1 -0
  40. package/dist/utils/oauth/index.d.ts.map +1 -1
  41. package/dist/utils/oauth/index.js +11 -0
  42. package/dist/utils/oauth/index.js.map +1 -1
  43. package/dist/utils/oauth/openai-codex.d.ts +20 -0
  44. package/dist/utils/oauth/openai-codex.d.ts.map +1 -0
  45. package/dist/utils/oauth/openai-codex.js +278 -0
  46. package/dist/utils/oauth/openai-codex.js.map +1 -0
  47. package/dist/utils/oauth/types.d.ts +2 -1
  48. package/dist/utils/oauth/types.d.ts.map +1 -1
  49. package/dist/utils/oauth/types.js.map +1 -1
  50. package/package.json +2 -2
@@ -2770,6 +2770,637 @@ export const MODELS = {
2770
2770
  maxTokens: 100000,
2771
2771
  },
2772
2772
  },
2773
+ "openai-codex": {
2774
+ "codex-mini-latest": {
2775
+ id: "codex-mini-latest",
2776
+ name: "Codex Mini Latest",
2777
+ api: "openai-codex-responses",
2778
+ provider: "openai-codex",
2779
+ baseUrl: "https://chatgpt.com/backend-api",
2780
+ reasoning: true,
2781
+ input: ["text", "image"],
2782
+ cost: {
2783
+ input: 0,
2784
+ output: 0,
2785
+ cacheRead: 0,
2786
+ cacheWrite: 0,
2787
+ },
2788
+ contextWindow: 400000,
2789
+ maxTokens: 128000,
2790
+ },
2791
+ "gpt-5": {
2792
+ id: "gpt-5",
2793
+ name: "gpt-5",
2794
+ api: "openai-codex-responses",
2795
+ provider: "openai-codex",
2796
+ baseUrl: "https://chatgpt.com/backend-api",
2797
+ reasoning: true,
2798
+ input: ["text", "image"],
2799
+ cost: {
2800
+ input: 0,
2801
+ output: 0,
2802
+ cacheRead: 0,
2803
+ cacheWrite: 0,
2804
+ },
2805
+ contextWindow: 400000,
2806
+ maxTokens: 128000,
2807
+ },
2808
+ "gpt-5-codex": {
2809
+ id: "gpt-5-codex",
2810
+ name: "gpt-5-codex",
2811
+ api: "openai-codex-responses",
2812
+ provider: "openai-codex",
2813
+ baseUrl: "https://chatgpt.com/backend-api",
2814
+ reasoning: true,
2815
+ input: ["text", "image"],
2816
+ cost: {
2817
+ input: 0,
2818
+ output: 0,
2819
+ cacheRead: 0,
2820
+ cacheWrite: 0,
2821
+ },
2822
+ contextWindow: 400000,
2823
+ maxTokens: 128000,
2824
+ },
2825
+ "gpt-5-codex-mini": {
2826
+ id: "gpt-5-codex-mini",
2827
+ name: "gpt-5-codex-mini",
2828
+ api: "openai-codex-responses",
2829
+ provider: "openai-codex",
2830
+ baseUrl: "https://chatgpt.com/backend-api",
2831
+ reasoning: true,
2832
+ input: ["text", "image"],
2833
+ cost: {
2834
+ input: 0,
2835
+ output: 0,
2836
+ cacheRead: 0,
2837
+ cacheWrite: 0,
2838
+ },
2839
+ contextWindow: 400000,
2840
+ maxTokens: 128000,
2841
+ },
2842
+ "gpt-5-codex-mini-high": {
2843
+ id: "gpt-5-codex-mini-high",
2844
+ name: "gpt-5-codex-mini-high",
2845
+ api: "openai-codex-responses",
2846
+ provider: "openai-codex",
2847
+ baseUrl: "https://chatgpt.com/backend-api",
2848
+ reasoning: true,
2849
+ input: ["text", "image"],
2850
+ cost: {
2851
+ input: 0,
2852
+ output: 0,
2853
+ cacheRead: 0,
2854
+ cacheWrite: 0,
2855
+ },
2856
+ contextWindow: 400000,
2857
+ maxTokens: 128000,
2858
+ },
2859
+ "gpt-5-codex-mini-medium": {
2860
+ id: "gpt-5-codex-mini-medium",
2861
+ name: "gpt-5-codex-mini-medium",
2862
+ api: "openai-codex-responses",
2863
+ provider: "openai-codex",
2864
+ baseUrl: "https://chatgpt.com/backend-api",
2865
+ reasoning: true,
2866
+ input: ["text", "image"],
2867
+ cost: {
2868
+ input: 0,
2869
+ output: 0,
2870
+ cacheRead: 0,
2871
+ cacheWrite: 0,
2872
+ },
2873
+ contextWindow: 400000,
2874
+ maxTokens: 128000,
2875
+ },
2876
+ "gpt-5-mini": {
2877
+ id: "gpt-5-mini",
2878
+ name: "gpt-5-mini",
2879
+ api: "openai-codex-responses",
2880
+ provider: "openai-codex",
2881
+ baseUrl: "https://chatgpt.com/backend-api",
2882
+ reasoning: true,
2883
+ input: ["text", "image"],
2884
+ cost: {
2885
+ input: 0,
2886
+ output: 0,
2887
+ cacheRead: 0,
2888
+ cacheWrite: 0,
2889
+ },
2890
+ contextWindow: 400000,
2891
+ maxTokens: 128000,
2892
+ },
2893
+ "gpt-5-nano": {
2894
+ id: "gpt-5-nano",
2895
+ name: "gpt-5-nano",
2896
+ api: "openai-codex-responses",
2897
+ provider: "openai-codex",
2898
+ baseUrl: "https://chatgpt.com/backend-api",
2899
+ reasoning: true,
2900
+ input: ["text", "image"],
2901
+ cost: {
2902
+ input: 0,
2903
+ output: 0,
2904
+ cacheRead: 0,
2905
+ cacheWrite: 0,
2906
+ },
2907
+ contextWindow: 400000,
2908
+ maxTokens: 128000,
2909
+ },
2910
+ "gpt-5.1": {
2911
+ id: "gpt-5.1",
2912
+ name: "GPT-5.1",
2913
+ api: "openai-codex-responses",
2914
+ provider: "openai-codex",
2915
+ baseUrl: "https://chatgpt.com/backend-api",
2916
+ reasoning: true,
2917
+ input: ["text", "image"],
2918
+ cost: {
2919
+ input: 0,
2920
+ output: 0,
2921
+ cacheRead: 0,
2922
+ cacheWrite: 0,
2923
+ },
2924
+ contextWindow: 400000,
2925
+ maxTokens: 128000,
2926
+ },
2927
+ "gpt-5.1-chat-latest": {
2928
+ id: "gpt-5.1-chat-latest",
2929
+ name: "gpt-5.1-chat-latest",
2930
+ api: "openai-codex-responses",
2931
+ provider: "openai-codex",
2932
+ baseUrl: "https://chatgpt.com/backend-api",
2933
+ reasoning: true,
2934
+ input: ["text", "image"],
2935
+ cost: {
2936
+ input: 0,
2937
+ output: 0,
2938
+ cacheRead: 0,
2939
+ cacheWrite: 0,
2940
+ },
2941
+ contextWindow: 400000,
2942
+ maxTokens: 128000,
2943
+ },
2944
+ "gpt-5.1-codex": {
2945
+ id: "gpt-5.1-codex",
2946
+ name: "GPT-5.1 Codex",
2947
+ api: "openai-codex-responses",
2948
+ provider: "openai-codex",
2949
+ baseUrl: "https://chatgpt.com/backend-api",
2950
+ reasoning: true,
2951
+ input: ["text", "image"],
2952
+ cost: {
2953
+ input: 0,
2954
+ output: 0,
2955
+ cacheRead: 0,
2956
+ cacheWrite: 0,
2957
+ },
2958
+ contextWindow: 400000,
2959
+ maxTokens: 128000,
2960
+ },
2961
+ "gpt-5.1-codex-high": {
2962
+ id: "gpt-5.1-codex-high",
2963
+ name: "gpt-5.1-codex-high",
2964
+ api: "openai-codex-responses",
2965
+ provider: "openai-codex",
2966
+ baseUrl: "https://chatgpt.com/backend-api",
2967
+ reasoning: true,
2968
+ input: ["text", "image"],
2969
+ cost: {
2970
+ input: 0,
2971
+ output: 0,
2972
+ cacheRead: 0,
2973
+ cacheWrite: 0,
2974
+ },
2975
+ contextWindow: 400000,
2976
+ maxTokens: 128000,
2977
+ },
2978
+ "gpt-5.1-codex-low": {
2979
+ id: "gpt-5.1-codex-low",
2980
+ name: "gpt-5.1-codex-low",
2981
+ api: "openai-codex-responses",
2982
+ provider: "openai-codex",
2983
+ baseUrl: "https://chatgpt.com/backend-api",
2984
+ reasoning: true,
2985
+ input: ["text", "image"],
2986
+ cost: {
2987
+ input: 0,
2988
+ output: 0,
2989
+ cacheRead: 0,
2990
+ cacheWrite: 0,
2991
+ },
2992
+ contextWindow: 400000,
2993
+ maxTokens: 128000,
2994
+ },
2995
+ "gpt-5.1-codex-max": {
2996
+ id: "gpt-5.1-codex-max",
2997
+ name: "GPT-5.1 Codex Max",
2998
+ api: "openai-codex-responses",
2999
+ provider: "openai-codex",
3000
+ baseUrl: "https://chatgpt.com/backend-api",
3001
+ reasoning: true,
3002
+ input: ["text", "image"],
3003
+ cost: {
3004
+ input: 0,
3005
+ output: 0,
3006
+ cacheRead: 0,
3007
+ cacheWrite: 0,
3008
+ },
3009
+ contextWindow: 400000,
3010
+ maxTokens: 128000,
3011
+ },
3012
+ "gpt-5.1-codex-max-high": {
3013
+ id: "gpt-5.1-codex-max-high",
3014
+ name: "gpt-5.1-codex-max-high",
3015
+ api: "openai-codex-responses",
3016
+ provider: "openai-codex",
3017
+ baseUrl: "https://chatgpt.com/backend-api",
3018
+ reasoning: true,
3019
+ input: ["text", "image"],
3020
+ cost: {
3021
+ input: 0,
3022
+ output: 0,
3023
+ cacheRead: 0,
3024
+ cacheWrite: 0,
3025
+ },
3026
+ contextWindow: 400000,
3027
+ maxTokens: 128000,
3028
+ },
3029
+ "gpt-5.1-codex-max-low": {
3030
+ id: "gpt-5.1-codex-max-low",
3031
+ name: "gpt-5.1-codex-max-low",
3032
+ api: "openai-codex-responses",
3033
+ provider: "openai-codex",
3034
+ baseUrl: "https://chatgpt.com/backend-api",
3035
+ reasoning: true,
3036
+ input: ["text", "image"],
3037
+ cost: {
3038
+ input: 0,
3039
+ output: 0,
3040
+ cacheRead: 0,
3041
+ cacheWrite: 0,
3042
+ },
3043
+ contextWindow: 400000,
3044
+ maxTokens: 128000,
3045
+ },
3046
+ "gpt-5.1-codex-max-medium": {
3047
+ id: "gpt-5.1-codex-max-medium",
3048
+ name: "gpt-5.1-codex-max-medium",
3049
+ api: "openai-codex-responses",
3050
+ provider: "openai-codex",
3051
+ baseUrl: "https://chatgpt.com/backend-api",
3052
+ reasoning: true,
3053
+ input: ["text", "image"],
3054
+ cost: {
3055
+ input: 0,
3056
+ output: 0,
3057
+ cacheRead: 0,
3058
+ cacheWrite: 0,
3059
+ },
3060
+ contextWindow: 400000,
3061
+ maxTokens: 128000,
3062
+ },
3063
+ "gpt-5.1-codex-max-xhigh": {
3064
+ id: "gpt-5.1-codex-max-xhigh",
3065
+ name: "gpt-5.1-codex-max-xhigh",
3066
+ api: "openai-codex-responses",
3067
+ provider: "openai-codex",
3068
+ baseUrl: "https://chatgpt.com/backend-api",
3069
+ reasoning: true,
3070
+ input: ["text", "image"],
3071
+ cost: {
3072
+ input: 0,
3073
+ output: 0,
3074
+ cacheRead: 0,
3075
+ cacheWrite: 0,
3076
+ },
3077
+ contextWindow: 400000,
3078
+ maxTokens: 128000,
3079
+ },
3080
+ "gpt-5.1-codex-medium": {
3081
+ id: "gpt-5.1-codex-medium",
3082
+ name: "gpt-5.1-codex-medium",
3083
+ api: "openai-codex-responses",
3084
+ provider: "openai-codex",
3085
+ baseUrl: "https://chatgpt.com/backend-api",
3086
+ reasoning: true,
3087
+ input: ["text", "image"],
3088
+ cost: {
3089
+ input: 0,
3090
+ output: 0,
3091
+ cacheRead: 0,
3092
+ cacheWrite: 0,
3093
+ },
3094
+ contextWindow: 400000,
3095
+ maxTokens: 128000,
3096
+ },
3097
+ "gpt-5.1-codex-mini": {
3098
+ id: "gpt-5.1-codex-mini",
3099
+ name: "GPT-5.1 Codex Mini",
3100
+ api: "openai-codex-responses",
3101
+ provider: "openai-codex",
3102
+ baseUrl: "https://chatgpt.com/backend-api",
3103
+ reasoning: true,
3104
+ input: ["text", "image"],
3105
+ cost: {
3106
+ input: 0,
3107
+ output: 0,
3108
+ cacheRead: 0,
3109
+ cacheWrite: 0,
3110
+ },
3111
+ contextWindow: 400000,
3112
+ maxTokens: 128000,
3113
+ },
3114
+ "gpt-5.1-codex-mini-high": {
3115
+ id: "gpt-5.1-codex-mini-high",
3116
+ name: "gpt-5.1-codex-mini-high",
3117
+ api: "openai-codex-responses",
3118
+ provider: "openai-codex",
3119
+ baseUrl: "https://chatgpt.com/backend-api",
3120
+ reasoning: true,
3121
+ input: ["text", "image"],
3122
+ cost: {
3123
+ input: 0,
3124
+ output: 0,
3125
+ cacheRead: 0,
3126
+ cacheWrite: 0,
3127
+ },
3128
+ contextWindow: 400000,
3129
+ maxTokens: 128000,
3130
+ },
3131
+ "gpt-5.1-codex-mini-medium": {
3132
+ id: "gpt-5.1-codex-mini-medium",
3133
+ name: "gpt-5.1-codex-mini-medium",
3134
+ api: "openai-codex-responses",
3135
+ provider: "openai-codex",
3136
+ baseUrl: "https://chatgpt.com/backend-api",
3137
+ reasoning: true,
3138
+ input: ["text", "image"],
3139
+ cost: {
3140
+ input: 0,
3141
+ output: 0,
3142
+ cacheRead: 0,
3143
+ cacheWrite: 0,
3144
+ },
3145
+ contextWindow: 400000,
3146
+ maxTokens: 128000,
3147
+ },
3148
+ "gpt-5.1-high": {
3149
+ id: "gpt-5.1-high",
3150
+ name: "gpt-5.1-high",
3151
+ api: "openai-codex-responses",
3152
+ provider: "openai-codex",
3153
+ baseUrl: "https://chatgpt.com/backend-api",
3154
+ reasoning: true,
3155
+ input: ["text", "image"],
3156
+ cost: {
3157
+ input: 0,
3158
+ output: 0,
3159
+ cacheRead: 0,
3160
+ cacheWrite: 0,
3161
+ },
3162
+ contextWindow: 400000,
3163
+ maxTokens: 128000,
3164
+ },
3165
+ "gpt-5.1-low": {
3166
+ id: "gpt-5.1-low",
3167
+ name: "gpt-5.1-low",
3168
+ api: "openai-codex-responses",
3169
+ provider: "openai-codex",
3170
+ baseUrl: "https://chatgpt.com/backend-api",
3171
+ reasoning: true,
3172
+ input: ["text", "image"],
3173
+ cost: {
3174
+ input: 0,
3175
+ output: 0,
3176
+ cacheRead: 0,
3177
+ cacheWrite: 0,
3178
+ },
3179
+ contextWindow: 400000,
3180
+ maxTokens: 128000,
3181
+ },
3182
+ "gpt-5.1-medium": {
3183
+ id: "gpt-5.1-medium",
3184
+ name: "gpt-5.1-medium",
3185
+ api: "openai-codex-responses",
3186
+ provider: "openai-codex",
3187
+ baseUrl: "https://chatgpt.com/backend-api",
3188
+ reasoning: true,
3189
+ input: ["text", "image"],
3190
+ cost: {
3191
+ input: 0,
3192
+ output: 0,
3193
+ cacheRead: 0,
3194
+ cacheWrite: 0,
3195
+ },
3196
+ contextWindow: 400000,
3197
+ maxTokens: 128000,
3198
+ },
3199
+ "gpt-5.1-none": {
3200
+ id: "gpt-5.1-none",
3201
+ name: "gpt-5.1-none",
3202
+ api: "openai-codex-responses",
3203
+ provider: "openai-codex",
3204
+ baseUrl: "https://chatgpt.com/backend-api",
3205
+ reasoning: true,
3206
+ input: ["text", "image"],
3207
+ cost: {
3208
+ input: 0,
3209
+ output: 0,
3210
+ cacheRead: 0,
3211
+ cacheWrite: 0,
3212
+ },
3213
+ contextWindow: 400000,
3214
+ maxTokens: 128000,
3215
+ },
3216
+ "gpt-5.2": {
3217
+ id: "gpt-5.2",
3218
+ name: "GPT-5.2",
3219
+ api: "openai-codex-responses",
3220
+ provider: "openai-codex",
3221
+ baseUrl: "https://chatgpt.com/backend-api",
3222
+ reasoning: true,
3223
+ input: ["text", "image"],
3224
+ cost: {
3225
+ input: 0,
3226
+ output: 0,
3227
+ cacheRead: 0,
3228
+ cacheWrite: 0,
3229
+ },
3230
+ contextWindow: 400000,
3231
+ maxTokens: 128000,
3232
+ },
3233
+ "gpt-5.2-codex": {
3234
+ id: "gpt-5.2-codex",
3235
+ name: "GPT-5.2 Codex",
3236
+ api: "openai-codex-responses",
3237
+ provider: "openai-codex",
3238
+ baseUrl: "https://chatgpt.com/backend-api",
3239
+ reasoning: true,
3240
+ input: ["text", "image"],
3241
+ cost: {
3242
+ input: 0,
3243
+ output: 0,
3244
+ cacheRead: 0,
3245
+ cacheWrite: 0,
3246
+ },
3247
+ contextWindow: 400000,
3248
+ maxTokens: 128000,
3249
+ },
3250
+ "gpt-5.2-codex-high": {
3251
+ id: "gpt-5.2-codex-high",
3252
+ name: "gpt-5.2-codex-high",
3253
+ api: "openai-codex-responses",
3254
+ provider: "openai-codex",
3255
+ baseUrl: "https://chatgpt.com/backend-api",
3256
+ reasoning: true,
3257
+ input: ["text", "image"],
3258
+ cost: {
3259
+ input: 0,
3260
+ output: 0,
3261
+ cacheRead: 0,
3262
+ cacheWrite: 0,
3263
+ },
3264
+ contextWindow: 400000,
3265
+ maxTokens: 128000,
3266
+ },
3267
+ "gpt-5.2-codex-low": {
3268
+ id: "gpt-5.2-codex-low",
3269
+ name: "gpt-5.2-codex-low",
3270
+ api: "openai-codex-responses",
3271
+ provider: "openai-codex",
3272
+ baseUrl: "https://chatgpt.com/backend-api",
3273
+ reasoning: true,
3274
+ input: ["text", "image"],
3275
+ cost: {
3276
+ input: 0,
3277
+ output: 0,
3278
+ cacheRead: 0,
3279
+ cacheWrite: 0,
3280
+ },
3281
+ contextWindow: 400000,
3282
+ maxTokens: 128000,
3283
+ },
3284
+ "gpt-5.2-codex-medium": {
3285
+ id: "gpt-5.2-codex-medium",
3286
+ name: "gpt-5.2-codex-medium",
3287
+ api: "openai-codex-responses",
3288
+ provider: "openai-codex",
3289
+ baseUrl: "https://chatgpt.com/backend-api",
3290
+ reasoning: true,
3291
+ input: ["text", "image"],
3292
+ cost: {
3293
+ input: 0,
3294
+ output: 0,
3295
+ cacheRead: 0,
3296
+ cacheWrite: 0,
3297
+ },
3298
+ contextWindow: 400000,
3299
+ maxTokens: 128000,
3300
+ },
3301
+ "gpt-5.2-codex-xhigh": {
3302
+ id: "gpt-5.2-codex-xhigh",
3303
+ name: "gpt-5.2-codex-xhigh",
3304
+ api: "openai-codex-responses",
3305
+ provider: "openai-codex",
3306
+ baseUrl: "https://chatgpt.com/backend-api",
3307
+ reasoning: true,
3308
+ input: ["text", "image"],
3309
+ cost: {
3310
+ input: 0,
3311
+ output: 0,
3312
+ cacheRead: 0,
3313
+ cacheWrite: 0,
3314
+ },
3315
+ contextWindow: 400000,
3316
+ maxTokens: 128000,
3317
+ },
3318
+ "gpt-5.2-high": {
3319
+ id: "gpt-5.2-high",
3320
+ name: "gpt-5.2-high",
3321
+ api: "openai-codex-responses",
3322
+ provider: "openai-codex",
3323
+ baseUrl: "https://chatgpt.com/backend-api",
3324
+ reasoning: true,
3325
+ input: ["text", "image"],
3326
+ cost: {
3327
+ input: 0,
3328
+ output: 0,
3329
+ cacheRead: 0,
3330
+ cacheWrite: 0,
3331
+ },
3332
+ contextWindow: 400000,
3333
+ maxTokens: 128000,
3334
+ },
3335
+ "gpt-5.2-low": {
3336
+ id: "gpt-5.2-low",
3337
+ name: "gpt-5.2-low",
3338
+ api: "openai-codex-responses",
3339
+ provider: "openai-codex",
3340
+ baseUrl: "https://chatgpt.com/backend-api",
3341
+ reasoning: true,
3342
+ input: ["text", "image"],
3343
+ cost: {
3344
+ input: 0,
3345
+ output: 0,
3346
+ cacheRead: 0,
3347
+ cacheWrite: 0,
3348
+ },
3349
+ contextWindow: 400000,
3350
+ maxTokens: 128000,
3351
+ },
3352
+ "gpt-5.2-medium": {
3353
+ id: "gpt-5.2-medium",
3354
+ name: "gpt-5.2-medium",
3355
+ api: "openai-codex-responses",
3356
+ provider: "openai-codex",
3357
+ baseUrl: "https://chatgpt.com/backend-api",
3358
+ reasoning: true,
3359
+ input: ["text", "image"],
3360
+ cost: {
3361
+ input: 0,
3362
+ output: 0,
3363
+ cacheRead: 0,
3364
+ cacheWrite: 0,
3365
+ },
3366
+ contextWindow: 400000,
3367
+ maxTokens: 128000,
3368
+ },
3369
+ "gpt-5.2-none": {
3370
+ id: "gpt-5.2-none",
3371
+ name: "gpt-5.2-none",
3372
+ api: "openai-codex-responses",
3373
+ provider: "openai-codex",
3374
+ baseUrl: "https://chatgpt.com/backend-api",
3375
+ reasoning: true,
3376
+ input: ["text", "image"],
3377
+ cost: {
3378
+ input: 0,
3379
+ output: 0,
3380
+ cacheRead: 0,
3381
+ cacheWrite: 0,
3382
+ },
3383
+ contextWindow: 400000,
3384
+ maxTokens: 128000,
3385
+ },
3386
+ "gpt-5.2-xhigh": {
3387
+ id: "gpt-5.2-xhigh",
3388
+ name: "gpt-5.2-xhigh",
3389
+ api: "openai-codex-responses",
3390
+ provider: "openai-codex",
3391
+ baseUrl: "https://chatgpt.com/backend-api",
3392
+ reasoning: true,
3393
+ input: ["text", "image"],
3394
+ cost: {
3395
+ input: 0,
3396
+ output: 0,
3397
+ cacheRead: 0,
3398
+ cacheWrite: 0,
3399
+ },
3400
+ contextWindow: 400000,
3401
+ maxTokens: 128000,
3402
+ },
3403
+ },
2773
3404
  "openrouter": {
2774
3405
  "ai21/jamba-large-1.7": {
2775
3406
  id: "ai21/jamba-large-1.7",
@@ -2822,23 +3453,6 @@ export const MODELS = {
2822
3453
  contextWindow: 131072,
2823
3454
  maxTokens: 131072,
2824
3455
  },
2825
- "alibaba/tongyi-deepresearch-30b-a3b:free": {
2826
- id: "alibaba/tongyi-deepresearch-30b-a3b:free",
2827
- name: "Tongyi DeepResearch 30B A3B (free)",
2828
- api: "openai-completions",
2829
- provider: "openrouter",
2830
- baseUrl: "https://openrouter.ai/api/v1",
2831
- reasoning: true,
2832
- input: ["text"],
2833
- cost: {
2834
- input: 0,
2835
- output: 0,
2836
- cacheRead: 0,
2837
- cacheWrite: 0,
2838
- },
2839
- contextWindow: 131072,
2840
- maxTokens: 131072,
2841
- },
2842
3456
  "allenai/olmo-3-7b-instruct": {
2843
3457
  id: "allenai/olmo-3-7b-instruct",
2844
3458
  name: "AllenAI: Olmo 3 7B Instruct",
@@ -3392,13 +4006,13 @@ export const MODELS = {
3392
4006
  reasoning: true,
3393
4007
  input: ["text"],
3394
4008
  cost: {
3395
- input: 0.19999999999999998,
3396
- output: 0.88,
3397
- cacheRead: 0.106,
4009
+ input: 0.19,
4010
+ output: 0.87,
4011
+ cacheRead: 0,
3398
4012
  cacheWrite: 0,
3399
4013
  },
3400
4014
  contextWindow: 163840,
3401
- maxTokens: 4096,
4015
+ maxTokens: 65536,
3402
4016
  },
3403
4017
  "deepseek/deepseek-chat-v3.1": {
3404
4018
  id: "deepseek/deepseek-chat-v3.1",
@@ -3426,13 +4040,13 @@ export const MODELS = {
3426
4040
  reasoning: true,
3427
4041
  input: ["text"],
3428
4042
  cost: {
3429
- input: 0.3,
3430
- output: 1.2,
4043
+ input: 0.7,
4044
+ output: 2.4,
3431
4045
  cacheRead: 0,
3432
4046
  cacheWrite: 0,
3433
4047
  },
3434
4048
  contextWindow: 163840,
3435
- maxTokens: 4096,
4049
+ maxTokens: 163840,
3436
4050
  },
3437
4051
  "deepseek/deepseek-r1-0528": {
3438
4052
  id: "deepseek/deepseek-r1-0528",
@@ -3808,6 +4422,23 @@ export const MODELS = {
3808
4422
  contextWindow: 128000,
3809
4423
  maxTokens: 16384,
3810
4424
  },
4425
+ "kwaipilot/kat-coder-pro": {
4426
+ id: "kwaipilot/kat-coder-pro",
4427
+ name: "Kwaipilot: KAT-Coder-Pro V1",
4428
+ api: "openai-completions",
4429
+ provider: "openrouter",
4430
+ baseUrl: "https://openrouter.ai/api/v1",
4431
+ reasoning: false,
4432
+ input: ["text"],
4433
+ cost: {
4434
+ input: 0.207,
4435
+ output: 0.828,
4436
+ cacheRead: 0.0414,
4437
+ cacheWrite: 0,
4438
+ },
4439
+ contextWindow: 256000,
4440
+ maxTokens: 128000,
4441
+ },
3811
4442
  "kwaipilot/kat-coder-pro:free": {
3812
4443
  id: "kwaipilot/kat-coder-pro:free",
3813
4444
  name: "Kwaipilot: KAT-Coder-Pro V1 (free)",
@@ -4633,13 +5264,13 @@ export const MODELS = {
4633
5264
  reasoning: false,
4634
5265
  input: ["text"],
4635
5266
  cost: {
4636
- input: 0.456,
4637
- output: 1.8399999999999999,
5267
+ input: 0.5,
5268
+ output: 2.4,
4638
5269
  cacheRead: 0,
4639
5270
  cacheWrite: 0,
4640
5271
  },
4641
5272
  contextWindow: 131072,
4642
- maxTokens: 131072,
5273
+ maxTokens: 4096,
4643
5274
  },
4644
5275
  "moonshotai/kimi-k2-0905": {
4645
5276
  id: "moonshotai/kimi-k2-0905",
@@ -4684,13 +5315,13 @@ export const MODELS = {
4684
5315
  reasoning: true,
4685
5316
  input: ["text"],
4686
5317
  cost: {
4687
- input: 0.39999999999999997,
4688
- output: 1.75,
5318
+ input: 0.32,
5319
+ output: 0.48,
4689
5320
  cacheRead: 0,
4690
5321
  cacheWrite: 0,
4691
5322
  },
4692
5323
  contextWindow: 262144,
4693
- maxTokens: 65535,
5324
+ maxTokens: 4096,
4694
5325
  },
4695
5326
  "nex-agi/deepseek-v3.1-nex-n1:free": {
4696
5327
  id: "nex-agi/deepseek-v3.1-nex-n1:free",
@@ -4726,23 +5357,6 @@ export const MODELS = {
4726
5357
  contextWindow: 32768,
4727
5358
  maxTokens: 32768,
4728
5359
  },
4729
- "nousresearch/hermes-4-405b": {
4730
- id: "nousresearch/hermes-4-405b",
4731
- name: "Nous: Hermes 4 405B",
4732
- api: "openai-completions",
4733
- provider: "openrouter",
4734
- baseUrl: "https://openrouter.ai/api/v1",
4735
- reasoning: true,
4736
- input: ["text"],
4737
- cost: {
4738
- input: 0.3,
4739
- output: 1.2,
4740
- cacheRead: 0,
4741
- cacheWrite: 0,
4742
- },
4743
- contextWindow: 131072,
4744
- maxTokens: 131072,
4745
- },
4746
5360
  "nousresearch/hermes-4-70b": {
4747
5361
  id: "nousresearch/hermes-4-70b",
4748
5362
  name: "Nous: Hermes 4 70B",
@@ -5557,7 +6171,7 @@ export const MODELS = {
5557
6171
  cacheWrite: 0,
5558
6172
  },
5559
6173
  contextWindow: 131072,
5560
- maxTokens: 131072,
6174
+ maxTokens: 4096,
5561
6175
  },
5562
6176
  "openai/gpt-oss-safeguard-20b": {
5563
6177
  id: "openai/gpt-oss-safeguard-20b",
@@ -6214,8 +6828,8 @@ export const MODELS = {
6214
6828
  reasoning: false,
6215
6829
  input: ["text", "image"],
6216
6830
  cost: {
6217
- input: 0.19999999999999998,
6218
- output: 1.2,
6831
+ input: 0.12,
6832
+ output: 0.56,
6219
6833
  cacheRead: 0,
6220
6834
  cacheWrite: 0,
6221
6835
  },
@@ -6231,8 +6845,8 @@ export const MODELS = {
6231
6845
  reasoning: true,
6232
6846
  input: ["text", "image"],
6233
6847
  cost: {
6234
- input: 0.3,
6235
- output: 1.2,
6848
+ input: 0.44999999999999996,
6849
+ output: 3.5,
6236
6850
  cacheRead: 0,
6237
6851
  cacheWrite: 0,
6238
6852
  },
@@ -6460,23 +7074,6 @@ export const MODELS = {
6460
7074
  contextWindow: 163840,
6461
7075
  maxTokens: 65536,
6462
7076
  },
6463
- "tngtech/tng-r1t-chimera:free": {
6464
- id: "tngtech/tng-r1t-chimera:free",
6465
- name: "TNG: R1T Chimera (free)",
6466
- api: "openai-completions",
6467
- provider: "openrouter",
6468
- baseUrl: "https://openrouter.ai/api/v1",
6469
- reasoning: true,
6470
- input: ["text"],
6471
- cost: {
6472
- input: 0,
6473
- output: 0,
6474
- cacheRead: 0,
6475
- cacheWrite: 0,
6476
- },
6477
- contextWindow: 163840,
6478
- maxTokens: 163840,
6479
- },
6480
7077
  "x-ai/grok-3": {
6481
7078
  id: "x-ai/grok-3",
6482
7079
  name: "xAI: Grok 3",
@@ -6673,13 +7270,13 @@ export const MODELS = {
6673
7270
  reasoning: true,
6674
7271
  input: ["text"],
6675
7272
  cost: {
6676
- input: 0.13,
6677
- output: 0.85,
7273
+ input: 0.049999999999999996,
7274
+ output: 0.22,
6678
7275
  cacheRead: 0,
6679
7276
  cacheWrite: 0,
6680
7277
  },
6681
7278
  contextWindow: 131072,
6682
- maxTokens: 98304,
7279
+ maxTokens: 131072,
6683
7280
  },
6684
7281
  "z-ai/glm-4.5-air:free": {
6685
7282
  id: "z-ai/glm-4.5-air:free",
@@ -6696,7 +7293,7 @@ export const MODELS = {
6696
7293
  cacheWrite: 0,
6697
7294
  },
6698
7295
  contextWindow: 131072,
6699
- maxTokens: 131072,
7296
+ maxTokens: 96000,
6700
7297
  },
6701
7298
  "z-ai/glm-4.5v": {
6702
7299
  id: "z-ai/glm-4.5v",
@@ -6775,13 +7372,13 @@ export const MODELS = {
6775
7372
  reasoning: true,
6776
7373
  input: ["text"],
6777
7374
  cost: {
6778
- input: 0.39999999999999997,
6779
- output: 1.5,
7375
+ input: 0.16,
7376
+ output: 0.7999999999999999,
6780
7377
  cacheRead: 0,
6781
7378
  cacheWrite: 0,
6782
7379
  },
6783
7380
  contextWindow: 202752,
6784
- maxTokens: 65535,
7381
+ maxTokens: 4096,
6785
7382
  },
6786
7383
  },
6787
7384
  "xai": {