@hyperspaceng/neural-ai 0.68.2 → 0.70.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. package/README.md +1 -1
  2. package/dist/env-api-keys.d.ts +9 -0
  3. package/dist/env-api-keys.d.ts.map +1 -1
  4. package/dist/env-api-keys.js +41 -31
  5. package/dist/env-api-keys.js.map +1 -1
  6. package/dist/index.d.ts +2 -2
  7. package/dist/index.d.ts.map +1 -1
  8. package/dist/index.js +1 -1
  9. package/dist/index.js.map +1 -1
  10. package/dist/models.d.ts +1 -1
  11. package/dist/models.d.ts.map +1 -1
  12. package/dist/models.generated.d.ts +409 -0
  13. package/dist/models.generated.d.ts.map +1 -1
  14. package/dist/models.generated.js +436 -39
  15. package/dist/models.generated.js.map +1 -1
  16. package/dist/models.js +5 -2
  17. package/dist/models.js.map +1 -1
  18. package/dist/providers/amazon-bedrock.d.ts.map +1 -1
  19. package/dist/providers/amazon-bedrock.js.map +1 -1
  20. package/dist/providers/anthropic.d.ts.map +1 -1
  21. package/dist/providers/anthropic.js +28 -11
  22. package/dist/providers/anthropic.js.map +1 -1
  23. package/dist/providers/google-vertex.d.ts.map +1 -1
  24. package/dist/providers/google-vertex.js +34 -13
  25. package/dist/providers/google-vertex.js.map +1 -1
  26. package/dist/providers/openai-codex-responses.d.ts.map +1 -1
  27. package/dist/providers/openai-codex-responses.js +8 -7
  28. package/dist/providers/openai-codex-responses.js.map +1 -1
  29. package/dist/providers/openai-completions.d.ts.map +1 -1
  30. package/dist/providers/openai-completions.js +53 -31
  31. package/dist/providers/openai-completions.js.map +1 -1
  32. package/dist/providers/openai-responses.d.ts.map +1 -1
  33. package/dist/providers/openai-responses.js +19 -19
  34. package/dist/providers/openai-responses.js.map +1 -1
  35. package/dist/providers/transform-messages.d.ts.map +1 -1
  36. package/dist/providers/transform-messages.js +2 -0
  37. package/dist/providers/transform-messages.js.map +1 -1
  38. package/dist/types.d.ts +21 -2
  39. package/dist/types.d.ts.map +1 -1
  40. package/dist/types.js.map +1 -1
  41. package/dist/utils/typebox-helpers.d.ts +1 -1
  42. package/dist/utils/typebox-helpers.d.ts.map +1 -1
  43. package/dist/utils/typebox-helpers.js +1 -1
  44. package/dist/utils/typebox-helpers.js.map +1 -1
  45. package/dist/utils/validation.d.ts.map +1 -1
  46. package/dist/utils/validation.js +242 -41
  47. package/dist/utils/validation.js.map +1 -1
  48. package/package.json +2 -4
@@ -323,6 +323,40 @@ export declare const MODELS: {
323
323
  contextWindow: number;
324
324
  maxTokens: number;
325
325
  };
326
+ readonly "au.anthropic.claude-opus-4-6-v1": {
327
+ id: string;
328
+ name: string;
329
+ api: "bedrock-converse-stream";
330
+ provider: string;
331
+ baseUrl: string;
332
+ reasoning: true;
333
+ input: ("image" | "text")[];
334
+ cost: {
335
+ input: number;
336
+ output: number;
337
+ cacheRead: number;
338
+ cacheWrite: number;
339
+ };
340
+ contextWindow: number;
341
+ maxTokens: number;
342
+ };
343
+ readonly "au.anthropic.claude-sonnet-4-6": {
344
+ id: string;
345
+ name: string;
346
+ api: "bedrock-converse-stream";
347
+ provider: string;
348
+ baseUrl: string;
349
+ reasoning: true;
350
+ input: ("image" | "text")[];
351
+ cost: {
352
+ input: number;
353
+ output: number;
354
+ cacheRead: number;
355
+ cacheWrite: number;
356
+ };
357
+ contextWindow: number;
358
+ maxTokens: number;
359
+ };
326
360
  readonly "deepseek.r1-v1:0": {
327
361
  id: string;
328
362
  name: string;
@@ -2486,6 +2520,23 @@ export declare const MODELS: {
2486
2520
  contextWindow: number;
2487
2521
  maxTokens: number;
2488
2522
  };
2523
+ readonly "gpt-5.5": {
2524
+ id: string;
2525
+ name: string;
2526
+ api: "azure-openai-responses";
2527
+ provider: string;
2528
+ baseUrl: string;
2529
+ reasoning: true;
2530
+ input: ("image" | "text")[];
2531
+ cost: {
2532
+ input: number;
2533
+ output: number;
2534
+ cacheRead: number;
2535
+ cacheWrite: number;
2536
+ };
2537
+ contextWindow: number;
2538
+ maxTokens: number;
2539
+ };
2489
2540
  readonly o1: {
2490
2541
  id: string;
2491
2542
  name: string;
@@ -3014,6 +3065,9 @@ export declare const MODELS: {
3014
3065
  "Editor-Plugin-Version": string;
3015
3066
  "Copilot-Integration-Id": string;
3016
3067
  };
3068
+ compat: {
3069
+ supportsEagerToolInputStreaming: false;
3070
+ };
3017
3071
  reasoning: true;
3018
3072
  input: ("image" | "text")[];
3019
3073
  cost: {
@@ -3106,6 +3160,9 @@ export declare const MODELS: {
3106
3160
  "Editor-Plugin-Version": string;
3107
3161
  "Copilot-Integration-Id": string;
3108
3162
  };
3163
+ compat: {
3164
+ supportsEagerToolInputStreaming: false;
3165
+ };
3109
3166
  reasoning: true;
3110
3167
  input: ("image" | "text")[];
3111
3168
  cost: {
@@ -3129,6 +3186,9 @@ export declare const MODELS: {
3129
3186
  "Editor-Plugin-Version": string;
3130
3187
  "Copilot-Integration-Id": string;
3131
3188
  };
3189
+ compat: {
3190
+ supportsEagerToolInputStreaming: false;
3191
+ };
3132
3192
  reasoning: true;
3133
3193
  input: ("image" | "text")[];
3134
3194
  cost: {
@@ -4315,6 +4375,23 @@ export declare const MODELS: {
4315
4375
  contextWindow: number;
4316
4376
  maxTokens: number;
4317
4377
  };
4378
+ readonly "gemini-3.1-flash-lite-preview": {
4379
+ id: string;
4380
+ name: string;
4381
+ api: "google-gemini-cli";
4382
+ provider: string;
4383
+ baseUrl: string;
4384
+ reasoning: true;
4385
+ input: ("image" | "text")[];
4386
+ cost: {
4387
+ input: number;
4388
+ output: number;
4389
+ cacheRead: number;
4390
+ cacheWrite: number;
4391
+ };
4392
+ contextWindow: number;
4393
+ maxTokens: number;
4394
+ };
4318
4395
  readonly "gemini-3.1-pro-preview": {
4319
4396
  id: string;
4320
4397
  name: string;
@@ -5293,6 +5370,9 @@ export declare const MODELS: {
5293
5370
  api: "anthropic-messages";
5294
5371
  provider: string;
5295
5372
  baseUrl: string;
5373
+ headers: {
5374
+ "User-Agent": string;
5375
+ };
5296
5376
  reasoning: true;
5297
5377
  input: ("image" | "text")[];
5298
5378
  cost: {
@@ -5310,6 +5390,9 @@ export declare const MODELS: {
5310
5390
  api: "anthropic-messages";
5311
5391
  provider: string;
5312
5392
  baseUrl: string;
5393
+ headers: {
5394
+ "User-Agent": string;
5395
+ };
5313
5396
  reasoning: true;
5314
5397
  input: ("image" | "text")[];
5315
5398
  cost: {
@@ -5327,6 +5410,9 @@ export declare const MODELS: {
5327
5410
  api: "anthropic-messages";
5328
5411
  provider: string;
5329
5412
  baseUrl: string;
5413
+ headers: {
5414
+ "User-Agent": string;
5415
+ };
5330
5416
  reasoning: true;
5331
5417
  input: "text"[];
5332
5418
  cost: {
@@ -6400,6 +6486,23 @@ export declare const MODELS: {
6400
6486
  contextWindow: number;
6401
6487
  maxTokens: number;
6402
6488
  };
6489
+ readonly "gpt-5.5": {
6490
+ id: string;
6491
+ name: string;
6492
+ api: "openai-responses";
6493
+ provider: string;
6494
+ baseUrl: string;
6495
+ reasoning: true;
6496
+ input: ("image" | "text")[];
6497
+ cost: {
6498
+ input: number;
6499
+ output: number;
6500
+ cacheRead: number;
6501
+ cacheWrite: number;
6502
+ };
6503
+ contextWindow: number;
6504
+ maxTokens: number;
6505
+ };
6403
6506
  readonly o1: {
6404
6507
  id: string;
6405
6508
  name: string;
@@ -6691,6 +6794,23 @@ export declare const MODELS: {
6691
6794
  contextWindow: number;
6692
6795
  maxTokens: number;
6693
6796
  };
6797
+ readonly "gpt-5.5": {
6798
+ id: string;
6799
+ name: string;
6800
+ api: "openai-codex-responses";
6801
+ provider: string;
6802
+ baseUrl: string;
6803
+ reasoning: true;
6804
+ input: ("image" | "text")[];
6805
+ cost: {
6806
+ input: number;
6807
+ output: number;
6808
+ cacheRead: number;
6809
+ cacheWrite: number;
6810
+ };
6811
+ contextWindow: number;
6812
+ maxTokens: number;
6813
+ };
6694
6814
  };
6695
6815
  readonly opencode: {
6696
6816
  readonly "big-pickle": {
@@ -7169,6 +7289,57 @@ export declare const MODELS: {
7169
7289
  contextWindow: number;
7170
7290
  maxTokens: number;
7171
7291
  };
7292
+ readonly "gpt-5.5": {
7293
+ id: string;
7294
+ name: string;
7295
+ api: "openai-responses";
7296
+ provider: string;
7297
+ baseUrl: string;
7298
+ reasoning: true;
7299
+ input: ("image" | "text")[];
7300
+ cost: {
7301
+ input: number;
7302
+ output: number;
7303
+ cacheRead: number;
7304
+ cacheWrite: number;
7305
+ };
7306
+ contextWindow: number;
7307
+ maxTokens: number;
7308
+ };
7309
+ readonly "gpt-5.5-pro": {
7310
+ id: string;
7311
+ name: string;
7312
+ api: "openai-responses";
7313
+ provider: string;
7314
+ baseUrl: string;
7315
+ reasoning: true;
7316
+ input: ("image" | "text")[];
7317
+ cost: {
7318
+ input: number;
7319
+ output: number;
7320
+ cacheRead: number;
7321
+ cacheWrite: number;
7322
+ };
7323
+ contextWindow: number;
7324
+ maxTokens: number;
7325
+ };
7326
+ readonly "hy3-preview-free": {
7327
+ id: string;
7328
+ name: string;
7329
+ api: "openai-completions";
7330
+ provider: string;
7331
+ baseUrl: string;
7332
+ reasoning: true;
7333
+ input: "text"[];
7334
+ cost: {
7335
+ input: number;
7336
+ output: number;
7337
+ cacheRead: number;
7338
+ cacheWrite: number;
7339
+ };
7340
+ contextWindow: number;
7341
+ maxTokens: number;
7342
+ };
7172
7343
  readonly "kimi-k2.5": {
7173
7344
  id: string;
7174
7345
  name: string;
@@ -7324,6 +7495,40 @@ export declare const MODELS: {
7324
7495
  };
7325
7496
  };
7326
7497
  readonly "opencode-go": {
7498
+ readonly "deepseek-v4-flash": {
7499
+ id: string;
7500
+ name: string;
7501
+ api: "openai-completions";
7502
+ provider: string;
7503
+ baseUrl: string;
7504
+ reasoning: true;
7505
+ input: "text"[];
7506
+ cost: {
7507
+ input: number;
7508
+ output: number;
7509
+ cacheRead: number;
7510
+ cacheWrite: number;
7511
+ };
7512
+ contextWindow: number;
7513
+ maxTokens: number;
7514
+ };
7515
+ readonly "deepseek-v4-pro": {
7516
+ id: string;
7517
+ name: string;
7518
+ api: "openai-completions";
7519
+ provider: string;
7520
+ baseUrl: string;
7521
+ reasoning: true;
7522
+ input: "text"[];
7523
+ cost: {
7524
+ input: number;
7525
+ output: number;
7526
+ cacheRead: number;
7527
+ cacheWrite: number;
7528
+ };
7529
+ contextWindow: number;
7530
+ maxTokens: number;
7531
+ };
7327
7532
  readonly "glm-5": {
7328
7533
  id: string;
7329
7534
  name: string;
@@ -8261,6 +8466,40 @@ export declare const MODELS: {
8261
8466
  contextWindow: number;
8262
8467
  maxTokens: number;
8263
8468
  };
8469
+ readonly "deepseek/deepseek-v4-flash": {
8470
+ id: string;
8471
+ name: string;
8472
+ api: "openai-completions";
8473
+ provider: string;
8474
+ baseUrl: string;
8475
+ reasoning: true;
8476
+ input: "text"[];
8477
+ cost: {
8478
+ input: number;
8479
+ output: number;
8480
+ cacheRead: number;
8481
+ cacheWrite: number;
8482
+ };
8483
+ contextWindow: number;
8484
+ maxTokens: number;
8485
+ };
8486
+ readonly "deepseek/deepseek-v4-pro": {
8487
+ id: string;
8488
+ name: string;
8489
+ api: "openai-completions";
8490
+ provider: string;
8491
+ baseUrl: string;
8492
+ reasoning: true;
8493
+ input: "text"[];
8494
+ cost: {
8495
+ input: number;
8496
+ output: number;
8497
+ cacheRead: number;
8498
+ cacheWrite: number;
8499
+ };
8500
+ contextWindow: number;
8501
+ maxTokens: number;
8502
+ };
8264
8503
  readonly "essentialai/rnj-1-instruct": {
8265
8504
  id: string;
8266
8505
  name: string;
@@ -8482,6 +8721,40 @@ export declare const MODELS: {
8482
8721
  contextWindow: number;
8483
8722
  maxTokens: number;
8484
8723
  };
8724
+ readonly "google/gemma-3-12b-it": {
8725
+ id: string;
8726
+ name: string;
8727
+ api: "openai-completions";
8728
+ provider: string;
8729
+ baseUrl: string;
8730
+ reasoning: false;
8731
+ input: ("image" | "text")[];
8732
+ cost: {
8733
+ input: number;
8734
+ output: number;
8735
+ cacheRead: number;
8736
+ cacheWrite: number;
8737
+ };
8738
+ contextWindow: number;
8739
+ maxTokens: number;
8740
+ };
8741
+ readonly "google/gemma-3-27b-it": {
8742
+ id: string;
8743
+ name: string;
8744
+ api: "openai-completions";
8745
+ provider: string;
8746
+ baseUrl: string;
8747
+ reasoning: false;
8748
+ input: ("image" | "text")[];
8749
+ cost: {
8750
+ input: number;
8751
+ output: number;
8752
+ cacheRead: number;
8753
+ cacheWrite: number;
8754
+ };
8755
+ contextWindow: number;
8756
+ maxTokens: number;
8757
+ };
8485
8758
  readonly "google/gemma-4-26b-a4b-it": {
8486
8759
  id: string;
8487
8760
  name: string;
@@ -8567,6 +8840,23 @@ export declare const MODELS: {
8567
8840
  contextWindow: number;
8568
8841
  maxTokens: number;
8569
8842
  };
8843
+ readonly "inclusionai/ling-2.6-1t:free": {
8844
+ id: string;
8845
+ name: string;
8846
+ api: "openai-completions";
8847
+ provider: string;
8848
+ baseUrl: string;
8849
+ reasoning: false;
8850
+ input: "text"[];
8851
+ cost: {
8852
+ input: number;
8853
+ output: number;
8854
+ cacheRead: number;
8855
+ cacheWrite: number;
8856
+ };
8857
+ contextWindow: number;
8858
+ maxTokens: number;
8859
+ };
8570
8860
  readonly "inclusionai/ling-2.6-flash:free": {
8571
8861
  id: string;
8572
8862
  name: string;
@@ -10080,6 +10370,40 @@ export declare const MODELS: {
10080
10370
  contextWindow: number;
10081
10371
  maxTokens: number;
10082
10372
  };
10373
+ readonly "openai/gpt-5.5": {
10374
+ id: string;
10375
+ name: string;
10376
+ api: "openai-completions";
10377
+ provider: string;
10378
+ baseUrl: string;
10379
+ reasoning: true;
10380
+ input: ("image" | "text")[];
10381
+ cost: {
10382
+ input: number;
10383
+ output: number;
10384
+ cacheRead: number;
10385
+ cacheWrite: number;
10386
+ };
10387
+ contextWindow: number;
10388
+ maxTokens: number;
10389
+ };
10390
+ readonly "openai/gpt-5.5-pro": {
10391
+ id: string;
10392
+ name: string;
10393
+ api: "openai-completions";
10394
+ provider: string;
10395
+ baseUrl: string;
10396
+ reasoning: true;
10397
+ input: ("image" | "text")[];
10398
+ cost: {
10399
+ input: number;
10400
+ output: number;
10401
+ cacheRead: number;
10402
+ cacheWrite: number;
10403
+ };
10404
+ contextWindow: number;
10405
+ maxTokens: number;
10406
+ };
10083
10407
  readonly "openai/gpt-audio": {
10084
10408
  id: string;
10085
10409
  name: string;
@@ -11236,6 +11560,23 @@ export declare const MODELS: {
11236
11560
  contextWindow: number;
11237
11561
  maxTokens: number;
11238
11562
  };
11563
+ readonly "tencent/hy3-preview:free": {
11564
+ id: string;
11565
+ name: string;
11566
+ api: "openai-completions";
11567
+ provider: string;
11568
+ baseUrl: string;
11569
+ reasoning: true;
11570
+ input: "text"[];
11571
+ cost: {
11572
+ input: number;
11573
+ output: number;
11574
+ cacheRead: number;
11575
+ cacheWrite: number;
11576
+ };
11577
+ contextWindow: number;
11578
+ maxTokens: number;
11579
+ };
11239
11580
  readonly "thedrummer/rocinante-12b": {
11240
11581
  id: string;
11241
11582
  name: string;
@@ -12445,6 +12786,40 @@ export declare const MODELS: {
12445
12786
  contextWindow: number;
12446
12787
  maxTokens: number;
12447
12788
  };
12789
+ readonly "deepseek/deepseek-v4-flash": {
12790
+ id: string;
12791
+ name: string;
12792
+ api: "anthropic-messages";
12793
+ provider: string;
12794
+ baseUrl: string;
12795
+ reasoning: true;
12796
+ input: "text"[];
12797
+ cost: {
12798
+ input: number;
12799
+ output: number;
12800
+ cacheRead: number;
12801
+ cacheWrite: number;
12802
+ };
12803
+ contextWindow: number;
12804
+ maxTokens: number;
12805
+ };
12806
+ readonly "deepseek/deepseek-v4-pro": {
12807
+ id: string;
12808
+ name: string;
12809
+ api: "anthropic-messages";
12810
+ provider: string;
12811
+ baseUrl: string;
12812
+ reasoning: true;
12813
+ input: "text"[];
12814
+ cost: {
12815
+ input: number;
12816
+ output: number;
12817
+ cacheRead: number;
12818
+ cacheWrite: number;
12819
+ };
12820
+ contextWindow: number;
12821
+ maxTokens: number;
12822
+ };
12448
12823
  readonly "google/gemini-2.0-flash": {
12449
12824
  id: string;
12450
12825
  name: string;
@@ -13720,6 +14095,40 @@ export declare const MODELS: {
13720
14095
  contextWindow: number;
13721
14096
  maxTokens: number;
13722
14097
  };
14098
+ readonly "openai/gpt-5.5": {
14099
+ id: string;
14100
+ name: string;
14101
+ api: "anthropic-messages";
14102
+ provider: string;
14103
+ baseUrl: string;
14104
+ reasoning: true;
14105
+ input: ("image" | "text")[];
14106
+ cost: {
14107
+ input: number;
14108
+ output: number;
14109
+ cacheRead: number;
14110
+ cacheWrite: number;
14111
+ };
14112
+ contextWindow: number;
14113
+ maxTokens: number;
14114
+ };
14115
+ readonly "openai/gpt-5.5-pro": {
14116
+ id: string;
14117
+ name: string;
14118
+ api: "anthropic-messages";
14119
+ provider: string;
14120
+ baseUrl: string;
14121
+ reasoning: true;
14122
+ input: ("image" | "text")[];
14123
+ cost: {
14124
+ input: number;
14125
+ output: number;
14126
+ cacheRead: number;
14127
+ cacheWrite: number;
14128
+ };
14129
+ contextWindow: number;
14130
+ maxTokens: number;
14131
+ };
13723
14132
  readonly "openai/gpt-oss-20b": {
13724
14133
  id: string;
13725
14134
  name: string;