@mariozechner/pi-ai 0.37.8 → 0.38.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -412,6 +412,23 @@ export const MODELS = {
412
412
  contextWindow: 131072,
413
413
  maxTokens: 40960,
414
414
  },
415
+ "zai-glm-4.7": {
416
+ id: "zai-glm-4.7",
417
+ name: "Z.AI GLM-4.7",
418
+ api: "openai-completions",
419
+ provider: "cerebras",
420
+ baseUrl: "https://api.cerebras.ai/v1",
421
+ reasoning: false,
422
+ input: ["text"],
423
+ cost: {
424
+ input: 0,
425
+ output: 0,
426
+ cacheRead: 0,
427
+ cacheWrite: 0,
428
+ },
429
+ contextWindow: 131072,
430
+ maxTokens: 40000,
431
+ },
415
432
  },
416
433
  "github-copilot": {
417
434
  "claude-haiku-4.5": {
@@ -2771,108 +2788,6 @@ export const MODELS = {
2771
2788
  },
2772
2789
  },
2773
2790
  "openai-codex": {
2774
- "codex-mini-latest": {
2775
- id: "codex-mini-latest",
2776
- name: "Codex Mini Latest",
2777
- api: "openai-codex-responses",
2778
- provider: "openai-codex",
2779
- baseUrl: "https://chatgpt.com/backend-api",
2780
- reasoning: true,
2781
- input: ["text", "image"],
2782
- cost: {
2783
- input: 1.5,
2784
- output: 6,
2785
- cacheRead: 0.375,
2786
- cacheWrite: 0,
2787
- },
2788
- contextWindow: 400000,
2789
- maxTokens: 128000,
2790
- },
2791
- "gpt-5": {
2792
- id: "gpt-5",
2793
- name: "gpt-5",
2794
- api: "openai-codex-responses",
2795
- provider: "openai-codex",
2796
- baseUrl: "https://chatgpt.com/backend-api",
2797
- reasoning: true,
2798
- input: ["text", "image"],
2799
- cost: {
2800
- input: 1.25,
2801
- output: 10,
2802
- cacheRead: 0.125,
2803
- cacheWrite: 0,
2804
- },
2805
- contextWindow: 400000,
2806
- maxTokens: 128000,
2807
- },
2808
- "gpt-5-codex": {
2809
- id: "gpt-5-codex",
2810
- name: "gpt-5-codex",
2811
- api: "openai-codex-responses",
2812
- provider: "openai-codex",
2813
- baseUrl: "https://chatgpt.com/backend-api",
2814
- reasoning: true,
2815
- input: ["text", "image"],
2816
- cost: {
2817
- input: 1.25,
2818
- output: 10,
2819
- cacheRead: 0.125,
2820
- cacheWrite: 0,
2821
- },
2822
- contextWindow: 400000,
2823
- maxTokens: 128000,
2824
- },
2825
- "gpt-5-codex-mini": {
2826
- id: "gpt-5-codex-mini",
2827
- name: "gpt-5-codex-mini",
2828
- api: "openai-codex-responses",
2829
- provider: "openai-codex",
2830
- baseUrl: "https://chatgpt.com/backend-api",
2831
- reasoning: true,
2832
- input: ["text", "image"],
2833
- cost: {
2834
- input: 0.25,
2835
- output: 2,
2836
- cacheRead: 0.025,
2837
- cacheWrite: 0,
2838
- },
2839
- contextWindow: 400000,
2840
- maxTokens: 128000,
2841
- },
2842
- "gpt-5-mini": {
2843
- id: "gpt-5-mini",
2844
- name: "gpt-5-mini",
2845
- api: "openai-codex-responses",
2846
- provider: "openai-codex",
2847
- baseUrl: "https://chatgpt.com/backend-api",
2848
- reasoning: true,
2849
- input: ["text", "image"],
2850
- cost: {
2851
- input: 0.25,
2852
- output: 2,
2853
- cacheRead: 0.025,
2854
- cacheWrite: 0,
2855
- },
2856
- contextWindow: 400000,
2857
- maxTokens: 128000,
2858
- },
2859
- "gpt-5-nano": {
2860
- id: "gpt-5-nano",
2861
- name: "gpt-5-nano",
2862
- api: "openai-codex-responses",
2863
- provider: "openai-codex",
2864
- baseUrl: "https://chatgpt.com/backend-api",
2865
- reasoning: true,
2866
- input: ["text", "image"],
2867
- cost: {
2868
- input: 0.05,
2869
- output: 0.4,
2870
- cacheRead: 0.005,
2871
- cacheWrite: 0,
2872
- },
2873
- contextWindow: 400000,
2874
- maxTokens: 128000,
2875
- },
2876
2791
  "gpt-5.1": {
2877
2792
  id: "gpt-5.1",
2878
2793
  name: "GPT-5.1",
@@ -2887,41 +2802,7 @@ export const MODELS = {
2887
2802
  cacheRead: 0.125,
2888
2803
  cacheWrite: 0,
2889
2804
  },
2890
- contextWindow: 400000,
2891
- maxTokens: 128000,
2892
- },
2893
- "gpt-5.1-chat-latest": {
2894
- id: "gpt-5.1-chat-latest",
2895
- name: "gpt-5.1-chat-latest",
2896
- api: "openai-codex-responses",
2897
- provider: "openai-codex",
2898
- baseUrl: "https://chatgpt.com/backend-api",
2899
- reasoning: true,
2900
- input: ["text", "image"],
2901
- cost: {
2902
- input: 1.25,
2903
- output: 10,
2904
- cacheRead: 0.125,
2905
- cacheWrite: 0,
2906
- },
2907
- contextWindow: 400000,
2908
- maxTokens: 128000,
2909
- },
2910
- "gpt-5.1-codex": {
2911
- id: "gpt-5.1-codex",
2912
- name: "GPT-5.1 Codex",
2913
- api: "openai-codex-responses",
2914
- provider: "openai-codex",
2915
- baseUrl: "https://chatgpt.com/backend-api",
2916
- reasoning: true,
2917
- input: ["text", "image"],
2918
- cost: {
2919
- input: 1.25,
2920
- output: 10,
2921
- cacheRead: 0.125,
2922
- cacheWrite: 0,
2923
- },
2924
- contextWindow: 400000,
2805
+ contextWindow: 272000,
2925
2806
  maxTokens: 128000,
2926
2807
  },
2927
2808
  "gpt-5.1-codex-max": {
@@ -2938,7 +2819,7 @@ export const MODELS = {
2938
2819
  cacheRead: 0.125,
2939
2820
  cacheWrite: 0,
2940
2821
  },
2941
- contextWindow: 400000,
2822
+ contextWindow: 272000,
2942
2823
  maxTokens: 128000,
2943
2824
  },
2944
2825
  "gpt-5.1-codex-mini": {
@@ -2955,7 +2836,7 @@ export const MODELS = {
2955
2836
  cacheRead: 0.025,
2956
2837
  cacheWrite: 0,
2957
2838
  },
2958
- contextWindow: 400000,
2839
+ contextWindow: 272000,
2959
2840
  maxTokens: 128000,
2960
2841
  },
2961
2842
  "gpt-5.2": {
@@ -2972,7 +2853,7 @@ export const MODELS = {
2972
2853
  cacheRead: 0.175,
2973
2854
  cacheWrite: 0,
2974
2855
  },
2975
- contextWindow: 400000,
2856
+ contextWindow: 272000,
2976
2857
  maxTokens: 128000,
2977
2858
  },
2978
2859
  "gpt-5.2-codex": {
@@ -2989,7 +2870,7 @@ export const MODELS = {
2989
2870
  cacheRead: 0.175,
2990
2871
  cacheWrite: 0,
2991
2872
  },
2992
- contextWindow: 400000,
2873
+ contextWindow: 272000,
2993
2874
  maxTokens: 128000,
2994
2875
  },
2995
2876
  },