@mariozechner/pi-ai 0.48.0 → 0.49.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. package/README.md +23 -2
  2. package/dist/index.d.ts +0 -1
  3. package/dist/index.d.ts.map +1 -1
  4. package/dist/index.js +0 -1
  5. package/dist/index.js.map +1 -1
  6. package/dist/models.generated.d.ts +0 -17
  7. package/dist/models.generated.d.ts.map +1 -1
  8. package/dist/models.generated.js +17 -34
  9. package/dist/models.generated.js.map +1 -1
  10. package/dist/providers/amazon-bedrock.d.ts.map +1 -1
  11. package/dist/providers/amazon-bedrock.js +9 -7
  12. package/dist/providers/amazon-bedrock.js.map +1 -1
  13. package/dist/providers/anthropic.d.ts.map +1 -1
  14. package/dist/providers/anthropic.js +33 -19
  15. package/dist/providers/anthropic.js.map +1 -1
  16. package/dist/providers/google-gemini-cli.d.ts.map +1 -1
  17. package/dist/providers/google-gemini-cli.js +2 -28
  18. package/dist/providers/google-gemini-cli.js.map +1 -1
  19. package/dist/providers/google-shared.d.ts +1 -1
  20. package/dist/providers/google-shared.d.ts.map +1 -1
  21. package/dist/providers/google-shared.js +8 -3
  22. package/dist/providers/google-shared.js.map +1 -1
  23. package/dist/providers/google-vertex.d.ts.map +1 -1
  24. package/dist/providers/google-vertex.js +1 -0
  25. package/dist/providers/google-vertex.js.map +1 -1
  26. package/dist/providers/google.d.ts.map +1 -1
  27. package/dist/providers/google.js +1 -0
  28. package/dist/providers/google.js.map +1 -1
  29. package/dist/providers/openai-codex-responses.d.ts.map +1 -1
  30. package/dist/providers/openai-codex-responses.js +21 -26
  31. package/dist/providers/openai-codex-responses.js.map +1 -1
  32. package/dist/providers/openai-completions.d.ts.map +1 -1
  33. package/dist/providers/openai-completions.js +17 -7
  34. package/dist/providers/openai-completions.js.map +1 -1
  35. package/dist/providers/openai-responses.d.ts.map +1 -1
  36. package/dist/providers/openai-responses.js +36 -3
  37. package/dist/providers/openai-responses.js.map +1 -1
  38. package/dist/providers/transform-messages.d.ts +7 -2
  39. package/dist/providers/transform-messages.d.ts.map +1 -1
  40. package/dist/providers/transform-messages.js +37 -29
  41. package/dist/providers/transform-messages.js.map +1 -1
  42. package/dist/stream.d.ts.map +1 -1
  43. package/dist/stream.js +48 -19
  44. package/dist/stream.js.map +1 -1
  45. package/dist/types.d.ts +13 -4
  46. package/dist/types.d.ts.map +1 -1
  47. package/dist/types.js.map +1 -1
  48. package/package.json +1 -1
  49. package/dist/constants.d.ts +0 -6
  50. package/dist/constants.d.ts.map +0 -1
  51. package/dist/constants.js +0 -14
  52. package/dist/constants.js.map +0 -1
@@ -1332,23 +1332,6 @@ export const MODELS = {
1332
1332
  contextWindow: 131000,
1333
1333
  maxTokens: 32000,
1334
1334
  },
1335
- "zai-glm-4.6": {
1336
- id: "zai-glm-4.6",
1337
- name: "Z.AI GLM-4.6",
1338
- api: "openai-completions",
1339
- provider: "cerebras",
1340
- baseUrl: "https://api.cerebras.ai/v1",
1341
- reasoning: false,
1342
- input: ["text"],
1343
- cost: {
1344
- input: 0,
1345
- output: 0,
1346
- cacheRead: 0,
1347
- cacheWrite: 0,
1348
- },
1349
- contextWindow: 131072,
1350
- maxTokens: 40960,
1351
- },
1352
1335
  "zai-glm-4.7": {
1353
1336
  id: "zai-glm-4.7",
1354
1337
  name: "Z.AI GLM-4.7",
@@ -4983,13 +4966,13 @@ export const MODELS = {
4983
4966
  reasoning: true,
4984
4967
  input: ["text"],
4985
4968
  cost: {
4986
- input: 0.44999999999999996,
4987
- output: 2.1500000000000004,
4969
+ input: 0.39999999999999997,
4970
+ output: 1.75,
4988
4971
  cacheRead: 0,
4989
4972
  cacheWrite: 0,
4990
4973
  },
4991
- contextWindow: 131072,
4992
- maxTokens: 32768,
4974
+ contextWindow: 163840,
4975
+ maxTokens: 65536,
4993
4976
  },
4994
4977
  "deepseek/deepseek-r1-distill-llama-70b": {
4995
4978
  id: "deepseek/deepseek-r1-distill-llama-70b",
@@ -5088,7 +5071,7 @@ export const MODELS = {
5088
5071
  input: 0.09999999999999999,
5089
5072
  output: 0.39999999999999997,
5090
5073
  cacheRead: 0.024999999999999998,
5091
- cacheWrite: 0.18330000000000002,
5074
+ cacheWrite: 0.0833,
5092
5075
  },
5093
5076
  contextWindow: 1048576,
5094
5077
  maxTokens: 8192,
@@ -5139,7 +5122,7 @@ export const MODELS = {
5139
5122
  input: 0.3,
5140
5123
  output: 2.5,
5141
5124
  cacheRead: 0.03,
5142
- cacheWrite: 0.3833,
5125
+ cacheWrite: 0.08333333333333334,
5143
5126
  },
5144
5127
  contextWindow: 1048576,
5145
5128
  maxTokens: 65535,
@@ -5156,7 +5139,7 @@ export const MODELS = {
5156
5139
  input: 0.09999999999999999,
5157
5140
  output: 0.39999999999999997,
5158
5141
  cacheRead: 0.01,
5159
- cacheWrite: 0.18330000000000002,
5142
+ cacheWrite: 0.0833,
5160
5143
  },
5161
5144
  contextWindow: 1048576,
5162
5145
  maxTokens: 65535,
@@ -5173,7 +5156,7 @@ export const MODELS = {
5173
5156
  input: 0.09999999999999999,
5174
5157
  output: 0.39999999999999997,
5175
5158
  cacheRead: 0.01,
5176
- cacheWrite: 1,
5159
+ cacheWrite: 0.0833,
5177
5160
  },
5178
5161
  contextWindow: 1048576,
5179
5162
  maxTokens: 65536,
@@ -5190,7 +5173,7 @@ export const MODELS = {
5190
5173
  input: 0.3,
5191
5174
  output: 2.5,
5192
5175
  cacheRead: 0.075,
5193
- cacheWrite: 0.3833,
5176
+ cacheWrite: 0.0833,
5194
5177
  },
5195
5178
  contextWindow: 1048576,
5196
5179
  maxTokens: 65535,
@@ -5207,7 +5190,7 @@ export const MODELS = {
5207
5190
  input: 1.25,
5208
5191
  output: 10,
5209
5192
  cacheRead: 0.125,
5210
- cacheWrite: 1.625,
5193
+ cacheWrite: 0.375,
5211
5194
  },
5212
5195
  contextWindow: 1048576,
5213
5196
  maxTokens: 65536,
@@ -5224,7 +5207,7 @@ export const MODELS = {
5224
5207
  input: 1.25,
5225
5208
  output: 10,
5226
5209
  cacheRead: 0.31,
5227
- cacheWrite: 1.625,
5210
+ cacheWrite: 0.375,
5228
5211
  },
5229
5212
  contextWindow: 1048576,
5230
5213
  maxTokens: 65536,
@@ -5241,7 +5224,7 @@ export const MODELS = {
5241
5224
  input: 1.25,
5242
5225
  output: 10,
5243
5226
  cacheRead: 0.31,
5244
- cacheWrite: 1.625,
5227
+ cacheWrite: 0.375,
5245
5228
  },
5246
5229
  contextWindow: 1048576,
5247
5230
  maxTokens: 65535,
@@ -5275,7 +5258,7 @@ export const MODELS = {
5275
5258
  input: 2,
5276
5259
  output: 12,
5277
5260
  cacheRead: 0.19999999999999998,
5278
- cacheWrite: 2.375,
5261
+ cacheWrite: 0.375,
5279
5262
  },
5280
5263
  contextWindow: 1048576,
5281
5264
  maxTokens: 65536,
@@ -7640,8 +7623,8 @@ export const MODELS = {
7640
7623
  cacheRead: 0,
7641
7624
  cacheWrite: 0,
7642
7625
  },
7643
- contextWindow: 262144,
7644
- maxTokens: 262144,
7626
+ contextWindow: 128000,
7627
+ maxTokens: 4096,
7645
7628
  },
7646
7629
  "qwen/qwen3-vl-235b-a22b-instruct": {
7647
7630
  id: "qwen/qwen3-vl-235b-a22b-instruct",
@@ -10204,8 +10187,8 @@ export const MODELS = {
10204
10187
  reasoning: true,
10205
10188
  input: ["text"],
10206
10189
  cost: {
10207
- input: 0.098,
10208
- output: 0.293,
10190
+ input: 0.09,
10191
+ output: 0.29,
10209
10192
  cacheRead: 0,
10210
10193
  cacheWrite: 0,
10211
10194
  },