@draht/ai 2026.3.3 → 2026.3.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. package/README.md +3 -0
  2. package/bedrock-provider.d.ts +1 -0
  3. package/bedrock-provider.js +1 -0
  4. package/dist/bedrock-provider.d.ts +5 -0
  5. package/dist/bedrock-provider.d.ts.map +1 -0
  6. package/dist/bedrock-provider.js +6 -0
  7. package/dist/bedrock-provider.js.map +1 -0
  8. package/dist/env-api-keys.d.ts.map +1 -1
  9. package/dist/env-api-keys.js +8 -3
  10. package/dist/env-api-keys.js.map +1 -1
  11. package/dist/index.d.ts +1 -1
  12. package/dist/index.d.ts.map +1 -1
  13. package/dist/index.js +0 -1
  14. package/dist/index.js.map +1 -1
  15. package/dist/models.generated.d.ts +106 -2
  16. package/dist/models.generated.d.ts.map +1 -1
  17. package/dist/models.generated.js +150 -46
  18. package/dist/models.generated.js.map +1 -1
  19. package/dist/oauth.d.ts +2 -0
  20. package/dist/oauth.d.ts.map +1 -0
  21. package/dist/oauth.js +2 -0
  22. package/dist/oauth.js.map +1 -0
  23. package/dist/providers/amazon-bedrock.d.ts.map +1 -1
  24. package/dist/providers/amazon-bedrock.js +15 -3
  25. package/dist/providers/amazon-bedrock.js.map +1 -1
  26. package/dist/providers/google-gemini-cli.d.ts.map +1 -1
  27. package/dist/providers/google-gemini-cli.js +12 -3
  28. package/dist/providers/google-gemini-cli.js.map +1 -1
  29. package/dist/providers/google-vertex.d.ts.map +1 -1
  30. package/dist/providers/google-vertex.js +2 -2
  31. package/dist/providers/google-vertex.js.map +1 -1
  32. package/dist/providers/google.d.ts.map +1 -1
  33. package/dist/providers/google.js +2 -2
  34. package/dist/providers/google.js.map +1 -1
  35. package/dist/providers/openai-codex-responses.d.ts.map +1 -1
  36. package/dist/providers/openai-codex-responses.js +4 -2
  37. package/dist/providers/openai-codex-responses.js.map +1 -1
  38. package/dist/providers/openai-completions.d.ts.map +1 -1
  39. package/dist/providers/openai-completions.js +16 -1
  40. package/dist/providers/openai-completions.js.map +1 -1
  41. package/dist/providers/register-builtins.d.ts +7 -0
  42. package/dist/providers/register-builtins.d.ts.map +1 -1
  43. package/dist/providers/register-builtins.js +72 -3
  44. package/dist/providers/register-builtins.js.map +1 -1
  45. package/dist/stream.d.ts +0 -1
  46. package/dist/stream.d.ts.map +1 -1
  47. package/dist/stream.js +0 -1
  48. package/dist/stream.js.map +1 -1
  49. package/dist/types.d.ts +3 -1
  50. package/dist/types.d.ts.map +1 -1
  51. package/dist/types.js.map +1 -1
  52. package/dist/utils/oauth/index.d.ts +0 -1
  53. package/dist/utils/oauth/index.d.ts.map +1 -1
  54. package/dist/utils/oauth/index.js +0 -2
  55. package/dist/utils/oauth/index.js.map +1 -1
  56. package/oauth.d.ts +1 -0
  57. package/oauth.js +1 -0
  58. package/package.json +22 -4
  59. package/dist/utils/http-proxy.d.ts +0 -2
  60. package/dist/utils/http-proxy.d.ts.map +0 -1
  61. package/dist/utils/http-proxy.js +0 -15
  62. package/dist/utils/http-proxy.js.map +0 -1
@@ -3168,6 +3168,23 @@ export const MODELS = {
3168
3168
  contextWindow: 1000000,
3169
3169
  maxTokens: 64000,
3170
3170
  },
3171
+ "gemini-3.1-flash-lite-preview": {
3172
+ id: "gemini-3.1-flash-lite-preview",
3173
+ name: "Gemini 3.1 Flash Lite Preview",
3174
+ api: "google-generative-ai",
3175
+ provider: "google",
3176
+ baseUrl: "https://generativelanguage.googleapis.com/v1beta",
3177
+ reasoning: true,
3178
+ input: ["text", "image"],
3179
+ cost: {
3180
+ input: 0,
3181
+ output: 0,
3182
+ cacheRead: 0,
3183
+ cacheWrite: 0,
3184
+ },
3185
+ contextWindow: 1048576,
3186
+ maxTokens: 65536,
3187
+ },
3171
3188
  "gemini-3.1-pro-preview": {
3172
3189
  id: "gemini-3.1-pro-preview",
3173
3190
  name: "Gemini 3.1 Pro Preview",
@@ -3357,9 +3374,9 @@ export const MODELS = {
3357
3374
  contextWindow: 1048576,
3358
3375
  maxTokens: 65535,
3359
3376
  },
3360
- "gemini-3-pro-high": {
3361
- id: "gemini-3-pro-high",
3362
- name: "Gemini 3 Pro High (Antigravity)",
3377
+ "gemini-3.1-pro-high": {
3378
+ id: "gemini-3.1-pro-high",
3379
+ name: "Gemini 3.1 Pro High (Antigravity)",
3363
3380
  api: "google-gemini-cli",
3364
3381
  provider: "google-antigravity",
3365
3382
  baseUrl: "https://daily-cloudcode-pa.sandbox.googleapis.com",
@@ -3374,9 +3391,9 @@ export const MODELS = {
3374
3391
  contextWindow: 1048576,
3375
3392
  maxTokens: 65535,
3376
3393
  },
3377
- "gemini-3-pro-low": {
3378
- id: "gemini-3-pro-low",
3379
- name: "Gemini 3 Pro Low (Antigravity)",
3394
+ "gemini-3.1-pro-low": {
3395
+ id: "gemini-3.1-pro-low",
3396
+ name: "Gemini 3.1 Pro Low (Antigravity)",
3380
3397
  api: "google-gemini-cli",
3381
3398
  provider: "google-antigravity",
3382
3399
  baseUrl: "https://daily-cloudcode-pa.sandbox.googleapis.com",
@@ -6152,6 +6169,59 @@ export const MODELS = {
6152
6169
  maxTokens: 131072,
6153
6170
  },
6154
6171
  },
6172
+ "opencode-go": {
6173
+ "glm-5": {
6174
+ id: "glm-5",
6175
+ name: "GLM-5",
6176
+ api: "openai-completions",
6177
+ provider: "opencode-go",
6178
+ baseUrl: "https://opencode.ai/zen/go/v1",
6179
+ reasoning: true,
6180
+ input: ["text"],
6181
+ cost: {
6182
+ input: 1,
6183
+ output: 3.2,
6184
+ cacheRead: 0.2,
6185
+ cacheWrite: 0,
6186
+ },
6187
+ contextWindow: 204800,
6188
+ maxTokens: 131072,
6189
+ },
6190
+ "kimi-k2.5": {
6191
+ id: "kimi-k2.5",
6192
+ name: "Kimi K2.5",
6193
+ api: "openai-completions",
6194
+ provider: "opencode-go",
6195
+ baseUrl: "https://opencode.ai/zen/go/v1",
6196
+ reasoning: true,
6197
+ input: ["text", "image"],
6198
+ cost: {
6199
+ input: 0.6,
6200
+ output: 3,
6201
+ cacheRead: 0.1,
6202
+ cacheWrite: 0,
6203
+ },
6204
+ contextWindow: 262144,
6205
+ maxTokens: 65536,
6206
+ },
6207
+ "minimax-m2.5": {
6208
+ id: "minimax-m2.5",
6209
+ name: "MiniMax M2.5",
6210
+ api: "anthropic-messages",
6211
+ provider: "opencode-go",
6212
+ baseUrl: "https://opencode.ai/zen/go",
6213
+ reasoning: true,
6214
+ input: ["text"],
6215
+ cost: {
6216
+ input: 0.3,
6217
+ output: 1.2,
6218
+ cacheRead: 0.03,
6219
+ cacheWrite: 0,
6220
+ },
6221
+ contextWindow: 204800,
6222
+ maxTokens: 131072,
6223
+ },
6224
+ },
6155
6225
  "openrouter": {
6156
6226
  "ai21/jamba-large-1.7": {
6157
6227
  id: "ai21/jamba-large-1.7",
@@ -7149,8 +7219,8 @@ export const MODELS = {
7149
7219
  input: ["text"],
7150
7220
  cost: {
7151
7221
  input: 0.25,
7152
- output: 1,
7153
- cacheRead: 0,
7222
+ output: 0.75,
7223
+ cacheRead: 0.024999999999999998,
7154
7224
  cacheWrite: 0,
7155
7225
  },
7156
7226
  contextWindow: 128000,
@@ -7166,8 +7236,8 @@ export const MODELS = {
7166
7236
  input: ["text"],
7167
7237
  cost: {
7168
7238
  input: 0.25,
7169
- output: 1,
7170
- cacheRead: 0,
7239
+ output: 0.75,
7240
+ cacheRead: 0.024999999999999998,
7171
7241
  cacheWrite: 0,
7172
7242
  },
7173
7243
  contextWindow: 128000,
@@ -8618,6 +8688,23 @@ export const MODELS = {
8618
8688
  contextWindow: 400000,
8619
8689
  maxTokens: 128000,
8620
8690
  },
8691
+ "openai/gpt-5.3-chat": {
8692
+ id: "openai/gpt-5.3-chat",
8693
+ name: "OpenAI: GPT-5.3 Chat",
8694
+ api: "openai-completions",
8695
+ provider: "openrouter",
8696
+ baseUrl: "https://openrouter.ai/api/v1",
8697
+ reasoning: false,
8698
+ input: ["text", "image"],
8699
+ cost: {
8700
+ input: 1.75,
8701
+ output: 14,
8702
+ cacheRead: 0.175,
8703
+ cacheWrite: 0,
8704
+ },
8705
+ contextWindow: 128000,
8706
+ maxTokens: 16384,
8707
+ },
8621
8708
  "openai/gpt-5.3-codex": {
8622
8709
  id: "openai/gpt-5.3-codex",
8623
8710
  name: "OpenAI: GPT-5.3-Codex",
@@ -8984,9 +9071,9 @@ export const MODELS = {
8984
9071
  reasoning: false,
8985
9072
  input: ["text"],
8986
9073
  cost: {
8987
- input: 1.5999999999999999,
8988
- output: 6.3999999999999995,
8989
- cacheRead: 0.32,
9074
+ input: 1.04,
9075
+ output: 4.16,
9076
+ cacheRead: 0.20800000000000002,
8990
9077
  cacheWrite: 0,
8991
9078
  },
8992
9079
  contextWindow: 32768,
@@ -9018,8 +9105,8 @@ export const MODELS = {
9018
9105
  reasoning: false,
9019
9106
  input: ["text"],
9020
9107
  cost: {
9021
- input: 0.39999999999999997,
9022
- output: 1.2,
9108
+ input: 0.26,
9109
+ output: 0.78,
9023
9110
  cacheRead: 0,
9024
9111
  cacheWrite: 0,
9025
9112
  },
@@ -9035,8 +9122,8 @@ export const MODELS = {
9035
9122
  reasoning: true,
9036
9123
  input: ["text"],
9037
9124
  cost: {
9038
- input: 0.39999999999999997,
9039
- output: 1.2,
9125
+ input: 0.26,
9126
+ output: 0.78,
9040
9127
  cacheRead: 0,
9041
9128
  cacheWrite: 0,
9042
9129
  },
@@ -9052,9 +9139,9 @@ export const MODELS = {
9052
9139
  reasoning: false,
9053
9140
  input: ["text"],
9054
9141
  cost: {
9055
- input: 0.049999999999999996,
9056
- output: 0.19999999999999998,
9057
- cacheRead: 0.01,
9142
+ input: 0.0325,
9143
+ output: 0.13,
9144
+ cacheRead: 0.006500000000000001,
9058
9145
  cacheWrite: 0,
9059
9146
  },
9060
9147
  contextWindow: 131072,
@@ -9290,9 +9377,9 @@ export const MODELS = {
9290
9377
  reasoning: false,
9291
9378
  input: ["text"],
9292
9379
  cost: {
9293
- input: 0.3,
9294
- output: 1.5,
9295
- cacheRead: 0.06,
9380
+ input: 0.195,
9381
+ output: 0.975,
9382
+ cacheRead: 0.039,
9296
9383
  cacheWrite: 0,
9297
9384
  },
9298
9385
  contextWindow: 1000000,
@@ -9324,9 +9411,9 @@ export const MODELS = {
9324
9411
  reasoning: false,
9325
9412
  input: ["text"],
9326
9413
  cost: {
9327
- input: 1,
9328
- output: 5,
9329
- cacheRead: 0.19999999999999998,
9414
+ input: 0.65,
9415
+ output: 3.25,
9416
+ cacheRead: 0.13,
9330
9417
  cacheWrite: 0,
9331
9418
  },
9332
9419
  contextWindow: 1000000,
@@ -9392,8 +9479,8 @@ export const MODELS = {
9392
9479
  reasoning: true,
9393
9480
  input: ["text"],
9394
9481
  cost: {
9395
- input: 1.2,
9396
- output: 6,
9482
+ input: 0.78,
9483
+ output: 3.9,
9397
9484
  cacheRead: 0,
9398
9485
  cacheWrite: 0,
9399
9486
  },
@@ -9579,9 +9666,9 @@ export const MODELS = {
9579
9666
  reasoning: true,
9580
9667
  input: ["text", "image"],
9581
9668
  cost: {
9582
- input: 0.3,
9583
- output: 2.4,
9584
- cacheRead: 0.3,
9669
+ input: 0.26,
9670
+ output: 2.08,
9671
+ cacheRead: 0,
9585
9672
  cacheWrite: 0,
9586
9673
  },
9587
9674
  contextWindow: 262144,
@@ -9596,9 +9683,9 @@ export const MODELS = {
9596
9683
  reasoning: true,
9597
9684
  input: ["text", "image"],
9598
9685
  cost: {
9599
- input: 0.27,
9600
- output: 2.16,
9601
- cacheRead: 0.27,
9686
+ input: 0.195,
9687
+ output: 1.56,
9688
+ cacheRead: 0,
9602
9689
  cacheWrite: 0,
9603
9690
  },
9604
9691
  contextWindow: 262144,
@@ -9613,9 +9700,9 @@ export const MODELS = {
9613
9700
  reasoning: true,
9614
9701
  input: ["text", "image"],
9615
9702
  cost: {
9616
- input: 0.22499999999999998,
9617
- output: 1.7999999999999998,
9618
- cacheRead: 0.22499999999999998,
9703
+ input: 0.1625,
9704
+ output: 1.3,
9705
+ cacheRead: 0,
9619
9706
  cacheWrite: 0,
9620
9707
  },
9621
9708
  contextWindow: 262144,
@@ -9630,9 +9717,9 @@ export const MODELS = {
9630
9717
  reasoning: true,
9631
9718
  input: ["text", "image"],
9632
9719
  cost: {
9633
- input: 0.55,
9634
- output: 3.5,
9635
- cacheRead: 0.55,
9720
+ input: 0.39,
9721
+ output: 2.34,
9722
+ cacheRead: 0,
9636
9723
  cacheWrite: 0,
9637
9724
  },
9638
9725
  contextWindow: 262144,
@@ -9664,8 +9751,8 @@ export const MODELS = {
9664
9751
  reasoning: true,
9665
9752
  input: ["text", "image"],
9666
9753
  cost: {
9667
- input: 0.39999999999999997,
9668
- output: 2.4,
9754
+ input: 0.26,
9755
+ output: 1.56,
9669
9756
  cacheRead: 0,
9670
9757
  cacheWrite: 0,
9671
9758
  },
@@ -10089,13 +10176,13 @@ export const MODELS = {
10089
10176
  reasoning: true,
10090
10177
  input: ["text"],
10091
10178
  cost: {
10092
- input: 0.35,
10093
- output: 1.71,
10179
+ input: 0.39,
10180
+ output: 1.9,
10094
10181
  cacheRead: 0,
10095
10182
  cacheWrite: 0,
10096
10183
  },
10097
- contextWindow: 202752,
10098
- maxTokens: 131072,
10184
+ contextWindow: 204800,
10185
+ maxTokens: 204800,
10099
10186
  },
10100
10187
  "z-ai/glm-4.6:exacto": {
10101
10188
  id: "z-ai/glm-4.6:exacto",
@@ -10932,6 +11019,23 @@ export const MODELS = {
10932
11019
  contextWindow: 1000000,
10933
11020
  maxTokens: 64000,
10934
11021
  },
11022
+ "inception/mercury-2": {
11023
+ id: "inception/mercury-2",
11024
+ name: "Mercury 2",
11025
+ api: "anthropic-messages",
11026
+ provider: "vercel-ai-gateway",
11027
+ baseUrl: "https://ai-gateway.vercel.sh",
11028
+ reasoning: true,
11029
+ input: ["text"],
11030
+ cost: {
11031
+ input: 0.25,
11032
+ output: 0.75,
11033
+ cacheRead: 0.024999999999999998,
11034
+ cacheWrite: 0,
11035
+ },
11036
+ contextWindow: 128000,
11037
+ maxTokens: 128000,
11038
+ },
10935
11039
  "inception/mercury-coder-small": {
10936
11040
  id: "inception/mercury-coder-small",
10937
11041
  name: "Mercury Coder Small Beta",