@mariozechner/pi-ai 0.46.0 → 0.48.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. package/dist/constants.d.ts +6 -0
  2. package/dist/constants.d.ts.map +1 -0
  3. package/dist/constants.js +14 -0
  4. package/dist/constants.js.map +1 -0
  5. package/dist/index.d.ts +1 -1
  6. package/dist/index.d.ts.map +1 -1
  7. package/dist/index.js +1 -1
  8. package/dist/index.js.map +1 -1
  9. package/dist/models.generated.d.ts +20 -111
  10. package/dist/models.generated.d.ts.map +1 -1
  11. package/dist/models.generated.js +50 -136
  12. package/dist/models.generated.js.map +1 -1
  13. package/dist/providers/amazon-bedrock.d.ts.map +1 -1
  14. package/dist/providers/amazon-bedrock.js +7 -3
  15. package/dist/providers/amazon-bedrock.js.map +1 -1
  16. package/dist/providers/google-shared.d.ts.map +1 -1
  17. package/dist/providers/google-shared.js +2 -1
  18. package/dist/providers/google-shared.js.map +1 -1
  19. package/dist/providers/openai-codex-responses.d.ts +0 -2
  20. package/dist/providers/openai-codex-responses.d.ts.map +1 -1
  21. package/dist/providers/openai-codex-responses.js +476 -489
  22. package/dist/providers/openai-codex-responses.js.map +1 -1
  23. package/dist/providers/openai-completions.d.ts.map +1 -1
  24. package/dist/providers/openai-completions.js +18 -10
  25. package/dist/providers/openai-completions.js.map +1 -1
  26. package/dist/providers/transform-messages.d.ts.map +1 -1
  27. package/dist/providers/transform-messages.js +7 -0
  28. package/dist/providers/transform-messages.js.map +1 -1
  29. package/dist/utils/validation.d.ts +1 -1
  30. package/dist/utils/validation.d.ts.map +1 -1
  31. package/dist/utils/validation.js +7 -4
  32. package/dist/utils/validation.js.map +1 -1
  33. package/package.json +1 -1
  34. package/dist/providers/openai-codex/constants.d.ts +0 -21
  35. package/dist/providers/openai-codex/constants.d.ts.map +0 -1
  36. package/dist/providers/openai-codex/constants.js +0 -21
  37. package/dist/providers/openai-codex/constants.js.map +0 -1
  38. package/dist/providers/openai-codex/index.d.ts +0 -7
  39. package/dist/providers/openai-codex/index.d.ts.map +0 -1
  40. package/dist/providers/openai-codex/index.js +0 -7
  41. package/dist/providers/openai-codex/index.js.map +0 -1
  42. package/dist/providers/openai-codex/prompts/codex.d.ts +0 -3
  43. package/dist/providers/openai-codex/prompts/codex.d.ts.map +0 -1
  44. package/dist/providers/openai-codex/prompts/codex.js +0 -323
  45. package/dist/providers/openai-codex/prompts/codex.js.map +0 -1
  46. package/dist/providers/openai-codex/prompts/pi-codex-bridge.d.ts +0 -7
  47. package/dist/providers/openai-codex/prompts/pi-codex-bridge.d.ts.map +0 -1
  48. package/dist/providers/openai-codex/prompts/pi-codex-bridge.js +0 -50
  49. package/dist/providers/openai-codex/prompts/pi-codex-bridge.js.map +0 -1
  50. package/dist/providers/openai-codex/prompts/system-prompt.d.ts +0 -10
  51. package/dist/providers/openai-codex/prompts/system-prompt.d.ts.map +0 -1
  52. package/dist/providers/openai-codex/prompts/system-prompt.js +0 -15
  53. package/dist/providers/openai-codex/prompts/system-prompt.js.map +0 -1
  54. package/dist/providers/openai-codex/request-transformer.d.ts +0 -44
  55. package/dist/providers/openai-codex/request-transformer.d.ts.map +0 -1
  56. package/dist/providers/openai-codex/request-transformer.js +0 -99
  57. package/dist/providers/openai-codex/request-transformer.js.map +0 -1
  58. package/dist/providers/openai-codex/response-handler.d.ts +0 -19
  59. package/dist/providers/openai-codex/response-handler.d.ts.map +0 -1
  60. package/dist/providers/openai-codex/response-handler.js +0 -107
  61. package/dist/providers/openai-codex/response-handler.js.map +0 -1
@@ -1720,24 +1720,6 @@ export const MODELS = {
1720
1720
  contextWindow: 128000,
1721
1721
  maxTokens: 64000,
1722
1722
  },
1723
- "oswe-vscode-prime": {
1724
- id: "oswe-vscode-prime",
1725
- name: "Raptor Mini (Preview)",
1726
- api: "openai-responses",
1727
- provider: "github-copilot",
1728
- baseUrl: "https://api.individual.githubcopilot.com",
1729
- headers: { "User-Agent": "GitHubCopilotChat/0.35.0", "Editor-Version": "vscode/1.107.0", "Editor-Plugin-Version": "copilot-chat/0.35.0", "Copilot-Integration-Id": "vscode-chat" },
1730
- reasoning: true,
1731
- input: ["text", "image"],
1732
- cost: {
1733
- input: 0,
1734
- output: 0,
1735
- cacheRead: 0,
1736
- cacheWrite: 0,
1737
- },
1738
- contextWindow: 200000,
1739
- maxTokens: 64000,
1740
- },
1741
1723
  },
1742
1724
  "google": {
1743
1725
  "gemini-1.5-flash": {
@@ -4431,23 +4413,6 @@ export const MODELS = {
4431
4413
  contextWindow: 131072,
4432
4414
  maxTokens: 131072,
4433
4415
  },
4434
- "allenai/olmo-3-7b-instruct": {
4435
- id: "allenai/olmo-3-7b-instruct",
4436
- name: "AllenAI: Olmo 3 7B Instruct",
4437
- api: "openai-completions",
4438
- provider: "openrouter",
4439
- baseUrl: "https://openrouter.ai/api/v1",
4440
- reasoning: false,
4441
- input: ["text"],
4442
- cost: {
4443
- input: 0.09999999999999999,
4444
- output: 0.19999999999999998,
4445
- cacheRead: 0,
4446
- cacheWrite: 0,
4447
- },
4448
- contextWindow: 65536,
4449
- maxTokens: 65536,
4450
- },
4451
4416
  "allenai/olmo-3.1-32b-instruct": {
4452
4417
  id: "allenai/olmo-3.1-32b-instruct",
4453
4418
  name: "AllenAI: Olmo 3.1 32B Instruct",
@@ -5002,12 +4967,12 @@ export const MODELS = {
5002
4967
  input: ["text"],
5003
4968
  cost: {
5004
4969
  input: 0.7,
5005
- output: 2.4,
4970
+ output: 2.5,
5006
4971
  cacheRead: 0,
5007
4972
  cacheWrite: 0,
5008
4973
  },
5009
- contextWindow: 163840,
5010
- maxTokens: 163840,
4974
+ contextWindow: 64000,
4975
+ maxTokens: 16000,
5011
4976
  },
5012
4977
  "deepseek/deepseek-r1-0528": {
5013
4978
  id: "deepseek/deepseek-r1-0528",
@@ -5207,12 +5172,29 @@ export const MODELS = {
5207
5172
  cost: {
5208
5173
  input: 0.09999999999999999,
5209
5174
  output: 0.39999999999999997,
5210
- cacheRead: 0,
5211
- cacheWrite: 0,
5175
+ cacheRead: 0.01,
5176
+ cacheWrite: 1,
5212
5177
  },
5213
5178
  contextWindow: 1048576,
5214
5179
  maxTokens: 65536,
5215
5180
  },
5181
+ "google/gemini-2.5-flash-preview-09-2025": {
5182
+ id: "google/gemini-2.5-flash-preview-09-2025",
5183
+ name: "Google: Gemini 2.5 Flash Preview 09-2025",
5184
+ api: "openai-completions",
5185
+ provider: "openrouter",
5186
+ baseUrl: "https://openrouter.ai/api/v1",
5187
+ reasoning: true,
5188
+ input: ["text", "image"],
5189
+ cost: {
5190
+ input: 0.3,
5191
+ output: 2.5,
5192
+ cacheRead: 0.075,
5193
+ cacheWrite: 0.3833,
5194
+ },
5195
+ contextWindow: 1048576,
5196
+ maxTokens: 65535,
5197
+ },
5216
5198
  "google/gemini-2.5-pro": {
5217
5199
  id: "google/gemini-2.5-pro",
5218
5200
  name: "Google: Gemini 2.5 Pro",
@@ -5383,23 +5365,6 @@ export const MODELS = {
5383
5365
  contextWindow: 256000,
5384
5366
  maxTokens: 128000,
5385
5367
  },
5386
- "meta-llama/llama-3-70b-instruct": {
5387
- id: "meta-llama/llama-3-70b-instruct",
5388
- name: "Meta: Llama 3 70B Instruct",
5389
- api: "openai-completions",
5390
- provider: "openrouter",
5391
- baseUrl: "https://openrouter.ai/api/v1",
5392
- reasoning: false,
5393
- input: ["text"],
5394
- cost: {
5395
- input: 0.3,
5396
- output: 0.39999999999999997,
5397
- cacheRead: 0,
5398
- cacheWrite: 0,
5399
- },
5400
- contextWindow: 8192,
5401
- maxTokens: 16384,
5402
- },
5403
5368
  "meta-llama/llama-3-8b-instruct": {
5404
5369
  id: "meta-llama/llama-3-8b-instruct",
5405
5370
  name: "Meta: Llama 3 8B Instruct",
@@ -5664,12 +5629,12 @@ export const MODELS = {
5664
5629
  reasoning: false,
5665
5630
  input: ["text"],
5666
5631
  cost: {
5667
- input: 0.07,
5668
- output: 0.28,
5632
+ input: 0.09999999999999999,
5633
+ output: 0.3,
5669
5634
  cacheRead: 0,
5670
5635
  cacheWrite: 0,
5671
5636
  },
5672
- contextWindow: 128000,
5637
+ contextWindow: 131072,
5673
5638
  maxTokens: 4096,
5674
5639
  },
5675
5640
  "mistralai/ministral-14b-2512": {
@@ -5757,40 +5722,6 @@ export const MODELS = {
5757
5722
  contextWindow: 262144,
5758
5723
  maxTokens: 4096,
5759
5724
  },
5760
- "mistralai/mistral-7b-instruct": {
5761
- id: "mistralai/mistral-7b-instruct",
5762
- name: "Mistral: Mistral 7B Instruct",
5763
- api: "openai-completions",
5764
- provider: "openrouter",
5765
- baseUrl: "https://openrouter.ai/api/v1",
5766
- reasoning: false,
5767
- input: ["text"],
5768
- cost: {
5769
- input: 0.028,
5770
- output: 0.054,
5771
- cacheRead: 0,
5772
- cacheWrite: 0,
5773
- },
5774
- contextWindow: 32768,
5775
- maxTokens: 16384,
5776
- },
5777
- "mistralai/mistral-7b-instruct:free": {
5778
- id: "mistralai/mistral-7b-instruct:free",
5779
- name: "Mistral: Mistral 7B Instruct (free)",
5780
- api: "openai-completions",
5781
- provider: "openrouter",
5782
- baseUrl: "https://openrouter.ai/api/v1",
5783
- reasoning: false,
5784
- input: ["text"],
5785
- cost: {
5786
- input: 0,
5787
- output: 0,
5788
- cacheRead: 0,
5789
- cacheWrite: 0,
5790
- },
5791
- contextWindow: 32768,
5792
- maxTokens: 16384,
5793
- },
5794
5725
  "mistralai/mistral-large": {
5795
5726
  id: "mistralai/mistral-large",
5796
5727
  name: "Mistral Large",
@@ -5908,7 +5839,7 @@ export const MODELS = {
5908
5839
  cacheWrite: 0,
5909
5840
  },
5910
5841
  contextWindow: 131072,
5911
- maxTokens: 16384,
5842
+ maxTokens: 131072,
5912
5843
  },
5913
5844
  "mistralai/mistral-saba": {
5914
5845
  id: "mistralai/mistral-saba",
@@ -6352,23 +6283,6 @@ export const MODELS = {
6352
6283
  contextWindow: 128000,
6353
6284
  maxTokens: 4096,
6354
6285
  },
6355
- "openai/codex-mini": {
6356
- id: "openai/codex-mini",
6357
- name: "OpenAI: Codex Mini",
6358
- api: "openai-completions",
6359
- provider: "openrouter",
6360
- baseUrl: "https://openrouter.ai/api/v1",
6361
- reasoning: true,
6362
- input: ["text", "image"],
6363
- cost: {
6364
- input: 1.5,
6365
- output: 6,
6366
- cacheRead: 0.375,
6367
- cacheWrite: 0,
6368
- },
6369
- contextWindow: 200000,
6370
- maxTokens: 100000,
6371
- },
6372
6286
  "openai/gpt-3.5-turbo": {
6373
6287
  id: "openai/gpt-3.5-turbo",
6374
6288
  name: "OpenAI: GPT-3.5 Turbo",
@@ -7415,13 +7329,13 @@ export const MODELS = {
7415
7329
  reasoning: true,
7416
7330
  input: ["text"],
7417
7331
  cost: {
7418
- input: 0.18,
7419
- output: 0.54,
7332
+ input: 0.19999999999999998,
7333
+ output: 0.6,
7420
7334
  cacheRead: 0,
7421
7335
  cacheWrite: 0,
7422
7336
  },
7423
7337
  contextWindow: 40960,
7424
- maxTokens: 40960,
7338
+ maxTokens: 4096,
7425
7339
  },
7426
7340
  "qwen/qwen3-235b-a22b-2507": {
7427
7341
  id: "qwen/qwen3-235b-a22b-2507",
@@ -7695,6 +7609,23 @@ export const MODELS = {
7695
7609
  contextWindow: 262144,
7696
7610
  maxTokens: 4096,
7697
7611
  },
7612
+ "qwen/qwen3-next-80b-a3b-instruct:free": {
7613
+ id: "qwen/qwen3-next-80b-a3b-instruct:free",
7614
+ name: "Qwen: Qwen3 Next 80B A3B Instruct (free)",
7615
+ api: "openai-completions",
7616
+ provider: "openrouter",
7617
+ baseUrl: "https://openrouter.ai/api/v1",
7618
+ reasoning: false,
7619
+ input: ["text"],
7620
+ cost: {
7621
+ input: 0,
7622
+ output: 0,
7623
+ cacheRead: 0,
7624
+ cacheWrite: 0,
7625
+ },
7626
+ contextWindow: 262144,
7627
+ maxTokens: 4096,
7628
+ },
7698
7629
  "qwen/qwen3-next-80b-a3b-thinking": {
7699
7630
  id: "qwen/qwen3-next-80b-a3b-thinking",
7700
7631
  name: "Qwen: Qwen3 Next 80B A3B Thinking",
@@ -7729,23 +7660,6 @@ export const MODELS = {
7729
7660
  contextWindow: 262144,
7730
7661
  maxTokens: 4096,
7731
7662
  },
7732
- "qwen/qwen3-vl-235b-a22b-thinking": {
7733
- id: "qwen/qwen3-vl-235b-a22b-thinking",
7734
- name: "Qwen: Qwen3 VL 235B A22B Thinking",
7735
- api: "openai-completions",
7736
- provider: "openrouter",
7737
- baseUrl: "https://openrouter.ai/api/v1",
7738
- reasoning: true,
7739
- input: ["text", "image"],
7740
- cost: {
7741
- input: 0.44999999999999996,
7742
- output: 3.5,
7743
- cacheRead: 0,
7744
- cacheWrite: 0,
7745
- },
7746
- contextWindow: 262144,
7747
- maxTokens: 262144,
7748
- },
7749
7663
  "qwen/qwen3-vl-30b-a3b-instruct": {
7750
7664
  id: "qwen/qwen3-vl-30b-a3b-instruct",
7751
7665
  name: "Qwen: Qwen3 VL 30B A3B Instruct",
@@ -8129,13 +8043,13 @@ export const MODELS = {
8129
8043
  reasoning: true,
8130
8044
  input: ["text"],
8131
8045
  cost: {
8132
- input: 0.09999999999999999,
8133
- output: 0.3,
8134
- cacheRead: 0.02,
8046
+ input: 0.09,
8047
+ output: 0.29,
8048
+ cacheRead: 0,
8135
8049
  cacheWrite: 0,
8136
8050
  },
8137
8051
  contextWindow: 262144,
8138
- maxTokens: 32000,
8052
+ maxTokens: 4096,
8139
8053
  },
8140
8054
  "xiaomi/mimo-v2-flash:free": {
8141
8055
  id: "xiaomi/mimo-v2-flash:free",