@mariozechner/pi-ai 0.67.6 → 0.67.68

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1944,23 +1944,6 @@ export const MODELS = {
1944
1944
  },
1945
1945
  },
1946
1946
  "azure-openai-responses": {
1947
- "codex-mini-latest": {
1948
- id: "codex-mini-latest",
1949
- name: "Codex Mini",
1950
- api: "azure-openai-responses",
1951
- provider: "azure-openai-responses",
1952
- baseUrl: "",
1953
- reasoning: true,
1954
- input: ["text"],
1955
- cost: {
1956
- input: 1.5,
1957
- output: 6,
1958
- cacheRead: 0.375,
1959
- cacheWrite: 0,
1960
- },
1961
- contextWindow: 200000,
1962
- maxTokens: 100000,
1963
- },
1964
1947
  "gpt-4": {
1965
1948
  id: "gpt-4",
1966
1949
  name: "GPT-4",
@@ -2767,6 +2750,24 @@ export const MODELS = {
2767
2750
  contextWindow: 1000000,
2768
2751
  maxTokens: 64000,
2769
2752
  },
2753
+ "claude-opus-4.7": {
2754
+ id: "claude-opus-4.7",
2755
+ name: "Claude Opus 4.7",
2756
+ api: "anthropic-messages",
2757
+ provider: "github-copilot",
2758
+ baseUrl: "https://api.individual.githubcopilot.com",
2759
+ headers: { "User-Agent": "GitHubCopilotChat/0.35.0", "Editor-Version": "vscode/1.107.0", "Editor-Plugin-Version": "copilot-chat/0.35.0", "Copilot-Integration-Id": "vscode-chat" },
2760
+ reasoning: true,
2761
+ input: ["text", "image"],
2762
+ cost: {
2763
+ input: 0,
2764
+ output: 0,
2765
+ cacheRead: 0,
2766
+ cacheWrite: 0,
2767
+ },
2768
+ contextWindow: 144000,
2769
+ maxTokens: 64000,
2770
+ },
2770
2771
  "claude-sonnet-4": {
2771
2772
  id: "claude-sonnet-4",
2772
2773
  name: "Claude Sonnet 4",
@@ -5319,23 +5320,6 @@ export const MODELS = {
5319
5320
  },
5320
5321
  },
5321
5322
  "openai": {
5322
- "codex-mini-latest": {
5323
- id: "codex-mini-latest",
5324
- name: "Codex Mini",
5325
- api: "openai-responses",
5326
- provider: "openai",
5327
- baseUrl: "https://api.openai.com/v1",
5328
- reasoning: true,
5329
- input: ["text"],
5330
- cost: {
5331
- input: 1.5,
5332
- output: 6,
5333
- cacheRead: 0.375,
5334
- cacheWrite: 0,
5335
- },
5336
- contextWindow: 200000,
5337
- maxTokens: 100000,
5338
- },
5339
5323
  "gpt-4": {
5340
5324
  id: "gpt-4",
5341
5325
  name: "GPT-4",
@@ -6841,9 +6825,9 @@ export const MODELS = {
6841
6825
  "minimax-m2.5": {
6842
6826
  id: "minimax-m2.5",
6843
6827
  name: "MiniMax M2.5",
6844
- api: "anthropic-messages",
6828
+ api: "openai-completions",
6845
6829
  provider: "opencode-go",
6846
- baseUrl: "https://opencode.ai/zen/go",
6830
+ baseUrl: "https://opencode.ai/zen/go/v1",
6847
6831
  reasoning: true,
6848
6832
  input: ["text"],
6849
6833
  cost: {
@@ -6853,7 +6837,7 @@ export const MODELS = {
6853
6837
  cacheWrite: 0,
6854
6838
  },
6855
6839
  contextWindow: 204800,
6856
- maxTokens: 131072,
6840
+ maxTokens: 65536,
6857
6841
  },
6858
6842
  "minimax-m2.7": {
6859
6843
  id: "minimax-m2.7",
@@ -7526,7 +7510,7 @@ export const MODELS = {
7526
7510
  api: "openai-completions",
7527
7511
  provider: "openrouter",
7528
7512
  baseUrl: "https://openrouter.ai/api/v1",
7529
- reasoning: true,
7513
+ reasoning: false,
7530
7514
  input: ["text"],
7531
7515
  cost: {
7532
7516
  input: 0.19999999999999998,
@@ -7614,13 +7598,13 @@ export const MODELS = {
7614
7598
  reasoning: true,
7615
7599
  input: ["text"],
7616
7600
  cost: {
7617
- input: 0.26,
7618
- output: 0.38,
7619
- cacheRead: 0.13,
7601
+ input: 0.25899999999999995,
7602
+ output: 0.42,
7603
+ cacheRead: 0.135,
7620
7604
  cacheWrite: 0,
7621
7605
  },
7622
7606
  contextWindow: 163840,
7623
- maxTokens: 4096,
7607
+ maxTokens: 163840,
7624
7608
  },
7625
7609
  "deepseek/deepseek-v3.2-exp": {
7626
7610
  id: "deepseek/deepseek-v3.2-exp",
@@ -7670,7 +7654,7 @@ export const MODELS = {
7670
7654
  cacheRead: 0.024999999999999998,
7671
7655
  cacheWrite: 0.08333333333333334,
7672
7656
  },
7673
- contextWindow: 1048576,
7657
+ contextWindow: 1000000,
7674
7658
  maxTokens: 8192,
7675
7659
  },
7676
7660
  "google/gemini-2.0-flash-lite-001": {
@@ -7869,13 +7853,13 @@ export const MODELS = {
7869
7853
  reasoning: true,
7870
7854
  input: ["text", "image"],
7871
7855
  cost: {
7872
- input: 0.07,
7873
- output: 0.39999999999999997,
7874
- cacheRead: 0.04,
7856
+ input: 0.08,
7857
+ output: 0.35,
7858
+ cacheRead: 0.01,
7875
7859
  cacheWrite: 0,
7876
7860
  },
7877
7861
  contextWindow: 262144,
7878
- maxTokens: 262144,
7862
+ maxTokens: 4096,
7879
7863
  },
7880
7864
  "google/gemma-4-26b-a4b-it:free": {
7881
7865
  id: "google/gemma-4-26b-a4b-it:free",
@@ -8622,7 +8606,7 @@ export const MODELS = {
8622
8606
  cacheRead: 0.07,
8623
8607
  cacheWrite: 0,
8624
8608
  },
8625
- contextWindow: 262144,
8609
+ contextWindow: 256000,
8626
8610
  maxTokens: 4096,
8627
8611
  },
8628
8612
  "nex-agi/deepseek-v3.1-nex-n1": {
@@ -8719,9 +8703,9 @@ export const MODELS = {
8719
8703
  reasoning: true,
8720
8704
  input: ["text"],
8721
8705
  cost: {
8722
- input: 0.09999999999999999,
8723
- output: 0.5,
8724
- cacheRead: 0.09999999999999999,
8706
+ input: 0.09,
8707
+ output: 0.44999999999999996,
8708
+ cacheRead: 0,
8725
8709
  cacheWrite: 0,
8726
8710
  },
8727
8711
  contextWindow: 262144,
@@ -10504,13 +10488,13 @@ export const MODELS = {
10504
10488
  reasoning: true,
10505
10489
  input: ["text", "image"],
10506
10490
  cost: {
10507
- input: 0.049999999999999996,
10491
+ input: 0.09999999999999999,
10508
10492
  output: 0.15,
10509
10493
  cacheRead: 0,
10510
10494
  cacheWrite: 0,
10511
10495
  },
10512
- contextWindow: 256000,
10513
- maxTokens: 32768,
10496
+ contextWindow: 262144,
10497
+ maxTokens: 4096,
10514
10498
  },
10515
10499
  "qwen/qwen3.5-flash-02-23": {
10516
10500
  id: "qwen/qwen3.5-flash-02-23",
@@ -13066,7 +13050,7 @@ export const MODELS = {
13066
13050
  },
13067
13051
  "openai/gpt-oss-20b": {
13068
13052
  id: "openai/gpt-oss-20b",
13069
- name: "gpt-oss-20b",
13053
+ name: "GPT OSS 120B",
13070
13054
  api: "anthropic-messages",
13071
13055
  provider: "vercel-ai-gateway",
13072
13056
  baseUrl: "https://ai-gateway.vercel.sh",
@@ -13083,7 +13067,7 @@ export const MODELS = {
13083
13067
  },
13084
13068
  "openai/gpt-oss-safeguard-20b": {
13085
13069
  id: "openai/gpt-oss-safeguard-20b",
13086
- name: "gpt-oss-safeguard-20b",
13070
+ name: "GPT OSS Safeguard 20B",
13087
13071
  api: "anthropic-messages",
13088
13072
  provider: "vercel-ai-gateway",
13089
13073
  baseUrl: "https://ai-gateway.vercel.sh",
@@ -13343,7 +13327,7 @@ export const MODELS = {
13343
13327
  provider: "vercel-ai-gateway",
13344
13328
  baseUrl: "https://ai-gateway.vercel.sh",
13345
13329
  reasoning: false,
13346
- input: ["text"],
13330
+ input: ["text", "image"],
13347
13331
  cost: {
13348
13332
  input: 0.19999999999999998,
13349
13333
  output: 0.5,
@@ -13360,7 +13344,7 @@ export const MODELS = {
13360
13344
  provider: "vercel-ai-gateway",
13361
13345
  baseUrl: "https://ai-gateway.vercel.sh",
13362
13346
  reasoning: true,
13363
- input: ["text"],
13347
+ input: ["text", "image"],
13364
13348
  cost: {
13365
13349
  input: 0.19999999999999998,
13366
13350
  output: 0.5,
@@ -13377,7 +13361,7 @@ export const MODELS = {
13377
13361
  provider: "vercel-ai-gateway",
13378
13362
  baseUrl: "https://ai-gateway.vercel.sh",
13379
13363
  reasoning: false,
13380
- input: ["text"],
13364
+ input: ["text", "image"],
13381
13365
  cost: {
13382
13366
  input: 0.19999999999999998,
13383
13367
  output: 0.5,
@@ -13394,7 +13378,7 @@ export const MODELS = {
13394
13378
  provider: "vercel-ai-gateway",
13395
13379
  baseUrl: "https://ai-gateway.vercel.sh",
13396
13380
  reasoning: true,
13397
- input: ["text"],
13381
+ input: ["text", "image"],
13398
13382
  cost: {
13399
13383
  input: 0.19999999999999998,
13400
13384
  output: 0.5,
@@ -13411,7 +13395,7 @@ export const MODELS = {
13411
13395
  provider: "vercel-ai-gateway",
13412
13396
  baseUrl: "https://ai-gateway.vercel.sh",
13413
13397
  reasoning: true,
13414
- input: ["text"],
13398
+ input: ["text", "image"],
13415
13399
  cost: {
13416
13400
  input: 2,
13417
13401
  output: 6,
@@ -13428,7 +13412,7 @@ export const MODELS = {
13428
13412
  provider: "vercel-ai-gateway",
13429
13413
  baseUrl: "https://ai-gateway.vercel.sh",
13430
13414
  reasoning: true,
13431
- input: ["text"],
13415
+ input: ["text", "image"],
13432
13416
  cost: {
13433
13417
  input: 2,
13434
13418
  output: 6,