@hyperspaceng/neural-ai 0.69.1 → 0.70.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. package/README.md +4 -1
  2. package/dist/env-api-keys.d.ts +9 -0
  3. package/dist/env-api-keys.d.ts.map +1 -1
  4. package/dist/env-api-keys.js +42 -31
  5. package/dist/env-api-keys.js.map +1 -1
  6. package/dist/models.d.ts +2 -1
  7. package/dist/models.d.ts.map +1 -1
  8. package/dist/models.generated.d.ts +298 -195
  9. package/dist/models.generated.d.ts.map +1 -1
  10. package/dist/models.generated.js +291 -200
  11. package/dist/models.generated.js.map +1 -1
  12. package/dist/models.js +7 -2
  13. package/dist/models.js.map +1 -1
  14. package/dist/providers/anthropic.d.ts.map +1 -1
  15. package/dist/providers/anthropic.js +35 -15
  16. package/dist/providers/anthropic.js.map +1 -1
  17. package/dist/providers/azure-openai-responses.d.ts.map +1 -1
  18. package/dist/providers/azure-openai-responses.js +7 -4
  19. package/dist/providers/azure-openai-responses.js.map +1 -1
  20. package/dist/providers/google-vertex.d.ts.map +1 -1
  21. package/dist/providers/google-vertex.js +34 -13
  22. package/dist/providers/google-vertex.js.map +1 -1
  23. package/dist/providers/openai-codex-responses.d.ts.map +1 -1
  24. package/dist/providers/openai-codex-responses.js +9 -8
  25. package/dist/providers/openai-codex-responses.js.map +1 -1
  26. package/dist/providers/openai-completions.d.ts.map +1 -1
  27. package/dist/providers/openai-completions.js +97 -45
  28. package/dist/providers/openai-completions.js.map +1 -1
  29. package/dist/providers/openai-responses.d.ts.map +1 -1
  30. package/dist/providers/openai-responses.js +26 -23
  31. package/dist/providers/openai-responses.js.map +1 -1
  32. package/dist/providers/simple-options.d.ts.map +1 -1
  33. package/dist/providers/simple-options.js +2 -0
  34. package/dist/providers/simple-options.js.map +1 -1
  35. package/dist/types.d.ts +35 -4
  36. package/dist/types.d.ts.map +1 -1
  37. package/dist/types.js.map +1 -1
  38. package/package.json +1 -1
@@ -2744,6 +2744,64 @@ export declare const MODELS: {
2744
2744
  maxTokens: number;
2745
2745
  };
2746
2746
  };
2747
+ readonly deepseek: {
2748
+ readonly "deepseek-v4-flash": {
2749
+ id: string;
2750
+ name: string;
2751
+ api: "openai-completions";
2752
+ provider: string;
2753
+ baseUrl: string;
2754
+ compat: {
2755
+ requiresReasoningContentOnAssistantMessages: true;
2756
+ thinkingFormat: "deepseek";
2757
+ reasoningEffortMap: {
2758
+ minimal: string;
2759
+ low: string;
2760
+ medium: string;
2761
+ high: string;
2762
+ xhigh: string;
2763
+ };
2764
+ };
2765
+ reasoning: true;
2766
+ input: "text"[];
2767
+ cost: {
2768
+ input: number;
2769
+ output: number;
2770
+ cacheRead: number;
2771
+ cacheWrite: number;
2772
+ };
2773
+ contextWindow: number;
2774
+ maxTokens: number;
2775
+ };
2776
+ readonly "deepseek-v4-pro": {
2777
+ id: string;
2778
+ name: string;
2779
+ api: "openai-completions";
2780
+ provider: string;
2781
+ baseUrl: string;
2782
+ compat: {
2783
+ requiresReasoningContentOnAssistantMessages: true;
2784
+ thinkingFormat: "deepseek";
2785
+ reasoningEffortMap: {
2786
+ minimal: string;
2787
+ low: string;
2788
+ medium: string;
2789
+ high: string;
2790
+ xhigh: string;
2791
+ };
2792
+ };
2793
+ reasoning: true;
2794
+ input: "text"[];
2795
+ cost: {
2796
+ input: number;
2797
+ output: number;
2798
+ cacheRead: number;
2799
+ cacheWrite: number;
2800
+ };
2801
+ contextWindow: number;
2802
+ maxTokens: number;
2803
+ };
2804
+ };
2747
2805
  readonly fireworks: {
2748
2806
  readonly "accounts/fireworks/models/deepseek-v3p1": {
2749
2807
  id: string;
@@ -3065,6 +3123,9 @@ export declare const MODELS: {
3065
3123
  "Editor-Plugin-Version": string;
3066
3124
  "Copilot-Integration-Id": string;
3067
3125
  };
3126
+ compat: {
3127
+ supportsEagerToolInputStreaming: false;
3128
+ };
3068
3129
  reasoning: true;
3069
3130
  input: ("image" | "text")[];
3070
3131
  cost: {
@@ -3157,6 +3218,9 @@ export declare const MODELS: {
3157
3218
  "Editor-Plugin-Version": string;
3158
3219
  "Copilot-Integration-Id": string;
3159
3220
  };
3221
+ compat: {
3222
+ supportsEagerToolInputStreaming: false;
3223
+ };
3160
3224
  reasoning: true;
3161
3225
  input: ("image" | "text")[];
3162
3226
  cost: {
@@ -3180,6 +3244,9 @@ export declare const MODELS: {
3180
3244
  "Editor-Plugin-Version": string;
3181
3245
  "Copilot-Integration-Id": string;
3182
3246
  };
3247
+ compat: {
3248
+ supportsEagerToolInputStreaming: false;
3249
+ };
3183
3250
  reasoning: true;
3184
3251
  input: ("image" | "text")[];
3185
3252
  cost: {
@@ -3635,6 +3702,29 @@ export declare const MODELS: {
3635
3702
  contextWindow: number;
3636
3703
  maxTokens: number;
3637
3704
  };
3705
+ readonly "gpt-5.5": {
3706
+ id: string;
3707
+ name: string;
3708
+ api: "openai-responses";
3709
+ provider: string;
3710
+ baseUrl: string;
3711
+ headers: {
3712
+ "User-Agent": string;
3713
+ "Editor-Version": string;
3714
+ "Editor-Plugin-Version": string;
3715
+ "Copilot-Integration-Id": string;
3716
+ };
3717
+ reasoning: true;
3718
+ input: ("image" | "text")[];
3719
+ cost: {
3720
+ input: number;
3721
+ output: number;
3722
+ cacheRead: number;
3723
+ cacheWrite: number;
3724
+ };
3725
+ contextWindow: number;
3726
+ maxTokens: number;
3727
+ };
3638
3728
  readonly "grok-code-fast-1": {
3639
3729
  id: string;
3640
3730
  name: string;
@@ -5361,6 +5451,9 @@ export declare const MODELS: {
5361
5451
  api: "anthropic-messages";
5362
5452
  provider: string;
5363
5453
  baseUrl: string;
5454
+ headers: {
5455
+ "User-Agent": string;
5456
+ };
5364
5457
  reasoning: true;
5365
5458
  input: ("image" | "text")[];
5366
5459
  cost: {
@@ -5378,6 +5471,9 @@ export declare const MODELS: {
5378
5471
  api: "anthropic-messages";
5379
5472
  provider: string;
5380
5473
  baseUrl: string;
5474
+ headers: {
5475
+ "User-Agent": string;
5476
+ };
5381
5477
  reasoning: true;
5382
5478
  input: ("image" | "text")[];
5383
5479
  cost: {
@@ -5395,6 +5491,9 @@ export declare const MODELS: {
5395
5491
  api: "anthropic-messages";
5396
5492
  provider: string;
5397
5493
  baseUrl: string;
5494
+ headers: {
5495
+ "User-Agent": string;
5496
+ };
5398
5497
  reasoning: true;
5399
5498
  input: "text"[];
5400
5499
  cost: {
@@ -6776,6 +6875,23 @@ export declare const MODELS: {
6776
6875
  contextWindow: number;
6777
6876
  maxTokens: number;
6778
6877
  };
6878
+ readonly "gpt-5.5": {
6879
+ id: string;
6880
+ name: string;
6881
+ api: "openai-codex-responses";
6882
+ provider: string;
6883
+ baseUrl: string;
6884
+ reasoning: true;
6885
+ input: ("image" | "text")[];
6886
+ cost: {
6887
+ input: number;
6888
+ output: number;
6889
+ cacheRead: number;
6890
+ cacheWrite: number;
6891
+ };
6892
+ contextWindow: number;
6893
+ maxTokens: number;
6894
+ };
6779
6895
  };
6780
6896
  readonly opencode: {
6781
6897
  readonly "big-pickle": {
@@ -7254,6 +7370,40 @@ export declare const MODELS: {
7254
7370
  contextWindow: number;
7255
7371
  maxTokens: number;
7256
7372
  };
7373
+ readonly "gpt-5.5": {
7374
+ id: string;
7375
+ name: string;
7376
+ api: "openai-responses";
7377
+ provider: string;
7378
+ baseUrl: string;
7379
+ reasoning: true;
7380
+ input: ("image" | "text")[];
7381
+ cost: {
7382
+ input: number;
7383
+ output: number;
7384
+ cacheRead: number;
7385
+ cacheWrite: number;
7386
+ };
7387
+ contextWindow: number;
7388
+ maxTokens: number;
7389
+ };
7390
+ readonly "gpt-5.5-pro": {
7391
+ id: string;
7392
+ name: string;
7393
+ api: "openai-responses";
7394
+ provider: string;
7395
+ baseUrl: string;
7396
+ reasoning: true;
7397
+ input: ("image" | "text")[];
7398
+ cost: {
7399
+ input: number;
7400
+ output: number;
7401
+ cacheRead: number;
7402
+ cacheWrite: number;
7403
+ };
7404
+ contextWindow: number;
7405
+ maxTokens: number;
7406
+ };
7257
7407
  readonly "hy3-preview-free": {
7258
7408
  id: string;
7259
7409
  name: string;
@@ -7426,6 +7576,46 @@ export declare const MODELS: {
7426
7576
  };
7427
7577
  };
7428
7578
  readonly "opencode-go": {
7579
+ readonly "deepseek-v4-flash": {
7580
+ id: string;
7581
+ name: string;
7582
+ api: "openai-completions";
7583
+ provider: string;
7584
+ baseUrl: string;
7585
+ compat: {
7586
+ requiresReasoningContentOnAssistantMessages: true;
7587
+ };
7588
+ reasoning: true;
7589
+ input: "text"[];
7590
+ cost: {
7591
+ input: number;
7592
+ output: number;
7593
+ cacheRead: number;
7594
+ cacheWrite: number;
7595
+ };
7596
+ contextWindow: number;
7597
+ maxTokens: number;
7598
+ };
7599
+ readonly "deepseek-v4-pro": {
7600
+ id: string;
7601
+ name: string;
7602
+ api: "openai-completions";
7603
+ provider: string;
7604
+ baseUrl: string;
7605
+ compat: {
7606
+ requiresReasoningContentOnAssistantMessages: true;
7607
+ };
7608
+ reasoning: true;
7609
+ input: "text"[];
7610
+ cost: {
7611
+ input: number;
7612
+ output: number;
7613
+ cacheRead: number;
7614
+ cacheWrite: number;
7615
+ };
7616
+ contextWindow: number;
7617
+ maxTokens: number;
7618
+ };
7429
7619
  readonly "glm-5": {
7430
7620
  id: string;
7431
7621
  name: string;
@@ -8369,6 +8559,9 @@ export declare const MODELS: {
8369
8559
  api: "openai-completions";
8370
8560
  provider: string;
8371
8561
  baseUrl: string;
8562
+ compat: {
8563
+ requiresReasoningContentOnAssistantMessages: true;
8564
+ };
8372
8565
  reasoning: true;
8373
8566
  input: "text"[];
8374
8567
  cost: {
@@ -8386,6 +8579,9 @@ export declare const MODELS: {
8386
8579
  api: "openai-completions";
8387
8580
  provider: string;
8388
8581
  baseUrl: string;
8582
+ compat: {
8583
+ requiresReasoningContentOnAssistantMessages: true;
8584
+ };
8389
8585
  reasoning: true;
8390
8586
  input: "text"[];
8391
8587
  cost: {
@@ -8618,6 +8814,40 @@ export declare const MODELS: {
8618
8814
  contextWindow: number;
8619
8815
  maxTokens: number;
8620
8816
  };
8817
+ readonly "google/gemma-3-12b-it": {
8818
+ id: string;
8819
+ name: string;
8820
+ api: "openai-completions";
8821
+ provider: string;
8822
+ baseUrl: string;
8823
+ reasoning: false;
8824
+ input: ("image" | "text")[];
8825
+ cost: {
8826
+ input: number;
8827
+ output: number;
8828
+ cacheRead: number;
8829
+ cacheWrite: number;
8830
+ };
8831
+ contextWindow: number;
8832
+ maxTokens: number;
8833
+ };
8834
+ readonly "google/gemma-3-27b-it": {
8835
+ id: string;
8836
+ name: string;
8837
+ api: "openai-completions";
8838
+ provider: string;
8839
+ baseUrl: string;
8840
+ reasoning: false;
8841
+ input: ("image" | "text")[];
8842
+ cost: {
8843
+ input: number;
8844
+ output: number;
8845
+ cacheRead: number;
8846
+ cacheWrite: number;
8847
+ };
8848
+ contextWindow: number;
8849
+ maxTokens: number;
8850
+ };
8621
8851
  readonly "google/gemma-4-26b-a4b-it": {
8622
8852
  id: string;
8623
8853
  name: string;
@@ -10233,6 +10463,40 @@ export declare const MODELS: {
10233
10463
  contextWindow: number;
10234
10464
  maxTokens: number;
10235
10465
  };
10466
+ readonly "openai/gpt-5.5": {
10467
+ id: string;
10468
+ name: string;
10469
+ api: "openai-completions";
10470
+ provider: string;
10471
+ baseUrl: string;
10472
+ reasoning: true;
10473
+ input: ("image" | "text")[];
10474
+ cost: {
10475
+ input: number;
10476
+ output: number;
10477
+ cacheRead: number;
10478
+ cacheWrite: number;
10479
+ };
10480
+ contextWindow: number;
10481
+ maxTokens: number;
10482
+ };
10483
+ readonly "openai/gpt-5.5-pro": {
10484
+ id: string;
10485
+ name: string;
10486
+ api: "openai-completions";
10487
+ provider: string;
10488
+ baseUrl: string;
10489
+ reasoning: true;
10490
+ input: ("image" | "text")[];
10491
+ cost: {
10492
+ input: number;
10493
+ output: number;
10494
+ cacheRead: number;
10495
+ cacheWrite: number;
10496
+ };
10497
+ contextWindow: number;
10498
+ maxTokens: number;
10499
+ };
10236
10500
  readonly "openai/gpt-audio": {
10237
10501
  id: string;
10238
10502
  name: string;
@@ -13924,6 +14188,40 @@ export declare const MODELS: {
13924
14188
  contextWindow: number;
13925
14189
  maxTokens: number;
13926
14190
  };
14191
+ readonly "openai/gpt-5.5": {
14192
+ id: string;
14193
+ name: string;
14194
+ api: "anthropic-messages";
14195
+ provider: string;
14196
+ baseUrl: string;
14197
+ reasoning: true;
14198
+ input: ("image" | "text")[];
14199
+ cost: {
14200
+ input: number;
14201
+ output: number;
14202
+ cacheRead: number;
14203
+ cacheWrite: number;
14204
+ };
14205
+ contextWindow: number;
14206
+ maxTokens: number;
14207
+ };
14208
+ readonly "openai/gpt-5.5-pro": {
14209
+ id: string;
14210
+ name: string;
14211
+ api: "anthropic-messages";
14212
+ provider: string;
14213
+ baseUrl: string;
14214
+ reasoning: true;
14215
+ input: ("image" | "text")[];
14216
+ cost: {
14217
+ input: number;
14218
+ output: number;
14219
+ cacheRead: number;
14220
+ cacheWrite: number;
14221
+ };
14222
+ contextWindow: number;
14223
+ maxTokens: number;
14224
+ };
13927
14225
  readonly "openai/gpt-oss-20b": {
13928
14226
  id: string;
13929
14227
  name: string;
@@ -15050,27 +15348,6 @@ export declare const MODELS: {
15050
15348
  };
15051
15349
  };
15052
15350
  readonly zai: {
15053
- readonly "glm-4.5": {
15054
- id: string;
15055
- name: string;
15056
- api: "openai-completions";
15057
- provider: string;
15058
- baseUrl: string;
15059
- compat: {
15060
- supportsDeveloperRole: false;
15061
- thinkingFormat: "zai";
15062
- };
15063
- reasoning: true;
15064
- input: "text"[];
15065
- cost: {
15066
- input: number;
15067
- output: number;
15068
- cacheRead: number;
15069
- cacheWrite: number;
15070
- };
15071
- contextWindow: number;
15072
- maxTokens: number;
15073
- };
15074
15351
  readonly "glm-4.5-air": {
15075
15352
  id: string;
15076
15353
  name: string;
@@ -15092,92 +15369,6 @@ export declare const MODELS: {
15092
15369
  contextWindow: number;
15093
15370
  maxTokens: number;
15094
15371
  };
15095
- readonly "glm-4.5-flash": {
15096
- id: string;
15097
- name: string;
15098
- api: "openai-completions";
15099
- provider: string;
15100
- baseUrl: string;
15101
- compat: {
15102
- supportsDeveloperRole: false;
15103
- thinkingFormat: "zai";
15104
- };
15105
- reasoning: true;
15106
- input: "text"[];
15107
- cost: {
15108
- input: number;
15109
- output: number;
15110
- cacheRead: number;
15111
- cacheWrite: number;
15112
- };
15113
- contextWindow: number;
15114
- maxTokens: number;
15115
- };
15116
- readonly "glm-4.5v": {
15117
- id: string;
15118
- name: string;
15119
- api: "openai-completions";
15120
- provider: string;
15121
- baseUrl: string;
15122
- compat: {
15123
- supportsDeveloperRole: false;
15124
- thinkingFormat: "zai";
15125
- };
15126
- reasoning: true;
15127
- input: ("image" | "text")[];
15128
- cost: {
15129
- input: number;
15130
- output: number;
15131
- cacheRead: number;
15132
- cacheWrite: number;
15133
- };
15134
- contextWindow: number;
15135
- maxTokens: number;
15136
- };
15137
- readonly "glm-4.6": {
15138
- id: string;
15139
- name: string;
15140
- api: "openai-completions";
15141
- provider: string;
15142
- baseUrl: string;
15143
- compat: {
15144
- supportsDeveloperRole: false;
15145
- thinkingFormat: "zai";
15146
- zaiToolStream: true;
15147
- };
15148
- reasoning: true;
15149
- input: "text"[];
15150
- cost: {
15151
- input: number;
15152
- output: number;
15153
- cacheRead: number;
15154
- cacheWrite: number;
15155
- };
15156
- contextWindow: number;
15157
- maxTokens: number;
15158
- };
15159
- readonly "glm-4.6v": {
15160
- id: string;
15161
- name: string;
15162
- api: "openai-completions";
15163
- provider: string;
15164
- baseUrl: string;
15165
- compat: {
15166
- supportsDeveloperRole: false;
15167
- thinkingFormat: "zai";
15168
- zaiToolStream: true;
15169
- };
15170
- reasoning: true;
15171
- input: ("image" | "text")[];
15172
- cost: {
15173
- input: number;
15174
- output: number;
15175
- cacheRead: number;
15176
- cacheWrite: number;
15177
- };
15178
- contextWindow: number;
15179
- maxTokens: number;
15180
- };
15181
15372
  readonly "glm-4.7": {
15182
15373
  id: string;
15183
15374
  name: string;
@@ -15200,72 +15391,6 @@ export declare const MODELS: {
15200
15391
  contextWindow: number;
15201
15392
  maxTokens: number;
15202
15393
  };
15203
- readonly "glm-4.7-flash": {
15204
- id: string;
15205
- name: string;
15206
- api: "openai-completions";
15207
- provider: string;
15208
- baseUrl: string;
15209
- compat: {
15210
- supportsDeveloperRole: false;
15211
- thinkingFormat: "zai";
15212
- zaiToolStream: true;
15213
- };
15214
- reasoning: true;
15215
- input: "text"[];
15216
- cost: {
15217
- input: number;
15218
- output: number;
15219
- cacheRead: number;
15220
- cacheWrite: number;
15221
- };
15222
- contextWindow: number;
15223
- maxTokens: number;
15224
- };
15225
- readonly "glm-4.7-flashx": {
15226
- id: string;
15227
- name: string;
15228
- api: "openai-completions";
15229
- provider: string;
15230
- baseUrl: string;
15231
- compat: {
15232
- supportsDeveloperRole: false;
15233
- thinkingFormat: "zai";
15234
- zaiToolStream: true;
15235
- };
15236
- reasoning: true;
15237
- input: "text"[];
15238
- cost: {
15239
- input: number;
15240
- output: number;
15241
- cacheRead: number;
15242
- cacheWrite: number;
15243
- };
15244
- contextWindow: number;
15245
- maxTokens: number;
15246
- };
15247
- readonly "glm-5": {
15248
- id: string;
15249
- name: string;
15250
- api: "openai-completions";
15251
- provider: string;
15252
- baseUrl: string;
15253
- compat: {
15254
- supportsDeveloperRole: false;
15255
- thinkingFormat: "zai";
15256
- zaiToolStream: true;
15257
- };
15258
- reasoning: true;
15259
- input: "text"[];
15260
- cost: {
15261
- input: number;
15262
- output: number;
15263
- cacheRead: number;
15264
- cacheWrite: number;
15265
- };
15266
- contextWindow: number;
15267
- maxTokens: number;
15268
- };
15269
15394
  readonly "glm-5-turbo": {
15270
15395
  id: string;
15271
15396
  name: string;
@@ -15310,28 +15435,6 @@ export declare const MODELS: {
15310
15435
  contextWindow: number;
15311
15436
  maxTokens: number;
15312
15437
  };
15313
- readonly "glm-5v-turbo": {
15314
- id: string;
15315
- name: string;
15316
- api: "openai-completions";
15317
- provider: string;
15318
- baseUrl: string;
15319
- compat: {
15320
- supportsDeveloperRole: false;
15321
- thinkingFormat: "zai";
15322
- zaiToolStream: true;
15323
- };
15324
- reasoning: true;
15325
- input: ("image" | "text")[];
15326
- cost: {
15327
- input: number;
15328
- output: number;
15329
- cacheRead: number;
15330
- cacheWrite: number;
15331
- };
15332
- contextWindow: number;
15333
- maxTokens: number;
15334
- };
15335
15438
  };
15336
15439
  };
15337
15440
  //# sourceMappingURL=models.generated.d.ts.map