@hyperspaceng/neural-ai 0.67.4 → 0.67.69

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. package/dist/index.d.ts +2 -2
  2. package/dist/index.d.ts.map +1 -1
  3. package/dist/index.js.map +1 -1
  4. package/dist/models.d.ts +1 -1
  5. package/dist/models.d.ts.map +1 -1
  6. package/dist/models.generated.d.ts +27 -72
  7. package/dist/models.generated.d.ts.map +1 -1
  8. package/dist/models.generated.js +43 -93
  9. package/dist/models.generated.js.map +1 -1
  10. package/dist/models.js +5 -2
  11. package/dist/models.js.map +1 -1
  12. package/dist/providers/amazon-bedrock.d.ts +18 -0
  13. package/dist/providers/amazon-bedrock.d.ts.map +1 -1
  14. package/dist/providers/amazon-bedrock.js +37 -3
  15. package/dist/providers/amazon-bedrock.js.map +1 -1
  16. package/dist/providers/anthropic.d.ts +16 -2
  17. package/dist/providers/anthropic.d.ts.map +1 -1
  18. package/dist/providers/anthropic.js +36 -15
  19. package/dist/providers/anthropic.js.map +1 -1
  20. package/dist/providers/azure-openai-responses.d.ts.map +1 -1
  21. package/dist/providers/azure-openai-responses.js +5 -1
  22. package/dist/providers/azure-openai-responses.js.map +1 -1
  23. package/dist/providers/faux.d.ts.map +1 -1
  24. package/dist/providers/faux.js +1 -0
  25. package/dist/providers/faux.js.map +1 -1
  26. package/dist/providers/google-gemini-cli.d.ts.map +1 -1
  27. package/dist/providers/google-gemini-cli.js +3 -0
  28. package/dist/providers/google-gemini-cli.js.map +1 -1
  29. package/dist/providers/mistral.d.ts +3 -0
  30. package/dist/providers/mistral.d.ts.map +1 -1
  31. package/dist/providers/mistral.js +31 -4
  32. package/dist/providers/mistral.js.map +1 -1
  33. package/dist/providers/openai-codex-responses.d.ts.map +1 -1
  34. package/dist/providers/openai-codex-responses.js +10 -7
  35. package/dist/providers/openai-codex-responses.js.map +1 -1
  36. package/dist/providers/openai-completions.d.ts.map +1 -1
  37. package/dist/providers/openai-completions.js +9 -2
  38. package/dist/providers/openai-completions.js.map +1 -1
  39. package/dist/providers/openai-responses-shared.d.ts +1 -0
  40. package/dist/providers/openai-responses-shared.d.ts.map +1 -1
  41. package/dist/providers/openai-responses-shared.js +3 -1
  42. package/dist/providers/openai-responses-shared.js.map +1 -1
  43. package/dist/providers/openai-responses.d.ts.map +1 -1
  44. package/dist/providers/openai-responses.js +6 -2
  45. package/dist/providers/openai-responses.js.map +1 -1
  46. package/dist/providers/simple-options.d.ts.map +1 -1
  47. package/dist/providers/simple-options.js +1 -0
  48. package/dist/providers/simple-options.js.map +1 -1
  49. package/dist/types.d.ts +9 -0
  50. package/dist/types.d.ts.map +1 -1
  51. package/dist/types.js.map +1 -1
  52. package/dist/utils/headers.d.ts +2 -0
  53. package/dist/utils/headers.d.ts.map +1 -0
  54. package/dist/utils/headers.js +8 -0
  55. package/dist/utils/headers.js.map +1 -0
  56. package/package.json +4 -4
@@ -1944,23 +1944,6 @@ export const MODELS = {
1944
1944
  },
1945
1945
  },
1946
1946
  "azure-openai-responses": {
1947
- "codex-mini-latest": {
1948
- id: "codex-mini-latest",
1949
- name: "Codex Mini",
1950
- api: "azure-openai-responses",
1951
- provider: "azure-openai-responses",
1952
- baseUrl: "",
1953
- reasoning: true,
1954
- input: ["text"],
1955
- cost: {
1956
- input: 1.5,
1957
- output: 6,
1958
- cacheRead: 0.375,
1959
- cacheWrite: 0,
1960
- },
1961
- contextWindow: 200000,
1962
- maxTokens: 100000,
1963
- },
1964
1947
  "gpt-4": {
1965
1948
  id: "gpt-4",
1966
1949
  name: "GPT-4",
@@ -2767,6 +2750,24 @@ export const MODELS = {
2767
2750
  contextWindow: 1000000,
2768
2751
  maxTokens: 64000,
2769
2752
  },
2753
+ "claude-opus-4.7": {
2754
+ id: "claude-opus-4.7",
2755
+ name: "Claude Opus 4.7",
2756
+ api: "anthropic-messages",
2757
+ provider: "github-copilot",
2758
+ baseUrl: "https://api.individual.githubcopilot.com",
2759
+ headers: { "User-Agent": "GitHubCopilotChat/0.35.0", "Editor-Version": "vscode/1.107.0", "Editor-Plugin-Version": "copilot-chat/0.35.0", "Copilot-Integration-Id": "vscode-chat" },
2760
+ reasoning: true,
2761
+ input: ["text", "image"],
2762
+ cost: {
2763
+ input: 0,
2764
+ output: 0,
2765
+ cacheRead: 0,
2766
+ cacheWrite: 0,
2767
+ },
2768
+ contextWindow: 144000,
2769
+ maxTokens: 64000,
2770
+ },
2770
2771
  "claude-sonnet-4": {
2771
2772
  id: "claude-sonnet-4",
2772
2773
  name: "Claude Sonnet 4",
@@ -4767,9 +4768,9 @@ export const MODELS = {
4767
4768
  },
4768
4769
  },
4769
4770
  "kimi-coding": {
4770
- "k2p5": {
4771
- id: "k2p5",
4772
- name: "Kimi K2.5",
4771
+ "kimi-for-coding": {
4772
+ id: "kimi-for-coding",
4773
+ name: "Kimi For Coding",
4773
4774
  api: "anthropic-messages",
4774
4775
  provider: "kimi-coding",
4775
4776
  baseUrl: "https://api.kimi.com/coding",
@@ -5319,23 +5320,6 @@ export const MODELS = {
5319
5320
  },
5320
5321
  },
5321
5322
  "openai": {
5322
- "codex-mini-latest": {
5323
- id: "codex-mini-latest",
5324
- name: "Codex Mini",
5325
- api: "openai-responses",
5326
- provider: "openai",
5327
- baseUrl: "https://api.openai.com/v1",
5328
- reasoning: true,
5329
- input: ["text"],
5330
- cost: {
5331
- input: 1.5,
5332
- output: 6,
5333
- cacheRead: 0.375,
5334
- cacheWrite: 0,
5335
- },
5336
- contextWindow: 200000,
5337
- maxTokens: 100000,
5338
- },
5339
5323
  "gpt-4": {
5340
5324
  id: "gpt-4",
5341
5325
  name: "GPT-4",
@@ -6841,9 +6825,9 @@ export const MODELS = {
6841
6825
  "minimax-m2.5": {
6842
6826
  id: "minimax-m2.5",
6843
6827
  name: "MiniMax M2.5",
6844
- api: "anthropic-messages",
6828
+ api: "openai-completions",
6845
6829
  provider: "opencode-go",
6846
- baseUrl: "https://opencode.ai/zen/go",
6830
+ baseUrl: "https://opencode.ai/zen/go/v1",
6847
6831
  reasoning: true,
6848
6832
  input: ["text"],
6849
6833
  cost: {
@@ -6853,7 +6837,7 @@ export const MODELS = {
6853
6837
  cacheWrite: 0,
6854
6838
  },
6855
6839
  contextWindow: 204800,
6856
- maxTokens: 131072,
6840
+ maxTokens: 65536,
6857
6841
  },
6858
6842
  "minimax-m2.7": {
6859
6843
  id: "minimax-m2.7",
@@ -7526,7 +7510,7 @@ export const MODELS = {
7526
7510
  api: "openai-completions",
7527
7511
  provider: "openrouter",
7528
7512
  baseUrl: "https://openrouter.ai/api/v1",
7529
- reasoning: true,
7513
+ reasoning: false,
7530
7514
  input: ["text"],
7531
7515
  cost: {
7532
7516
  input: 0.19999999999999998,
@@ -7614,13 +7598,13 @@ export const MODELS = {
7614
7598
  reasoning: true,
7615
7599
  input: ["text"],
7616
7600
  cost: {
7617
- input: 0.26,
7618
- output: 0.38,
7619
- cacheRead: 0.13,
7601
+ input: 0.25899999999999995,
7602
+ output: 0.42,
7603
+ cacheRead: 0.135,
7620
7604
  cacheWrite: 0,
7621
7605
  },
7622
7606
  contextWindow: 163840,
7623
- maxTokens: 4096,
7607
+ maxTokens: 163840,
7624
7608
  },
7625
7609
  "deepseek/deepseek-v3.2-exp": {
7626
7610
  id: "deepseek/deepseek-v3.2-exp",
@@ -7869,13 +7853,13 @@ export const MODELS = {
7869
7853
  reasoning: true,
7870
7854
  input: ["text", "image"],
7871
7855
  cost: {
7872
- input: 0.07,
7873
- output: 0.39999999999999997,
7874
- cacheRead: 0.04,
7856
+ input: 0.08,
7857
+ output: 0.35,
7858
+ cacheRead: 0.01,
7875
7859
  cacheWrite: 0,
7876
7860
  },
7877
7861
  contextWindow: 262144,
7878
- maxTokens: 262144,
7862
+ maxTokens: 4096,
7879
7863
  },
7880
7864
  "google/gemma-4-26b-a4b-it:free": {
7881
7865
  id: "google/gemma-4-26b-a4b-it:free",
@@ -8022,13 +8006,13 @@ export const MODELS = {
8022
8006
  reasoning: false,
8023
8007
  input: ["text"],
8024
8008
  cost: {
8025
- input: 0.09999999999999999,
8026
- output: 0.32,
8009
+ input: 0.12,
8010
+ output: 0.38,
8027
8011
  cacheRead: 0,
8028
8012
  cacheWrite: 0,
8029
8013
  },
8030
8014
  contextWindow: 131072,
8031
- maxTokens: 16384,
8015
+ maxTokens: 131072,
8032
8016
  },
8033
8017
  "meta-llama/llama-3.3-70b-instruct:free": {
8034
8018
  id: "meta-llama/llama-3.3-70b-instruct:free",
@@ -8047,23 +8031,6 @@ export const MODELS = {
8047
8031
  contextWindow: 65536,
8048
8032
  maxTokens: 4096,
8049
8033
  },
8050
- "meta-llama/llama-4-maverick": {
8051
- id: "meta-llama/llama-4-maverick",
8052
- name: "Meta: Llama 4 Maverick",
8053
- api: "openai-completions",
8054
- provider: "openrouter",
8055
- baseUrl: "https://openrouter.ai/api/v1",
8056
- reasoning: false,
8057
- input: ["text", "image"],
8058
- cost: {
8059
- input: 0.15,
8060
- output: 0.6,
8061
- cacheRead: 0,
8062
- cacheWrite: 0,
8063
- },
8064
- contextWindow: 1048576,
8065
- maxTokens: 16384,
8066
- },
8067
8034
  "meta-llama/llama-4-scout": {
8068
8035
  id: "meta-llama/llama-4-scout",
8069
8036
  name: "Meta: Llama 4 Scout",
@@ -8719,9 +8686,9 @@ export const MODELS = {
8719
8686
  reasoning: true,
8720
8687
  input: ["text"],
8721
8688
  cost: {
8722
- input: 0.09999999999999999,
8723
- output: 0.5,
8724
- cacheRead: 0.09999999999999999,
8689
+ input: 0.09,
8690
+ output: 0.44999999999999996,
8691
+ cacheRead: 0,
8725
8692
  cacheWrite: 0,
8726
8693
  },
8727
8694
  contextWindow: 262144,
@@ -9101,23 +9068,6 @@ export const MODELS = {
9101
9068
  contextWindow: 128000,
9102
9069
  maxTokens: 16384,
9103
9070
  },
9104
- "openai/gpt-4o:extended": {
9105
- id: "openai/gpt-4o:extended",
9106
- name: "OpenAI: GPT-4o (extended)",
9107
- api: "openai-completions",
9108
- provider: "openrouter",
9109
- baseUrl: "https://openrouter.ai/api/v1",
9110
- reasoning: false,
9111
- input: ["text", "image"],
9112
- cost: {
9113
- input: 6,
9114
- output: 18,
9115
- cacheRead: 0,
9116
- cacheWrite: 0,
9117
- },
9118
- contextWindow: 128000,
9119
- maxTokens: 64000,
9120
- },
9121
9071
  "openai/gpt-5": {
9122
9072
  id: "openai/gpt-5",
9123
9073
  name: "OpenAI: GPT-5",
@@ -10008,7 +9958,7 @@ export const MODELS = {
10008
9958
  api: "openai-completions",
10009
9959
  provider: "openrouter",
10010
9960
  baseUrl: "https://openrouter.ai/api/v1",
10011
- reasoning: true,
9961
+ reasoning: false,
10012
9962
  input: ["text"],
10013
9963
  cost: {
10014
9964
  input: 0.071,
@@ -10504,13 +10454,13 @@ export const MODELS = {
10504
10454
  reasoning: true,
10505
10455
  input: ["text", "image"],
10506
10456
  cost: {
10507
- input: 0.049999999999999996,
10457
+ input: 0.09999999999999999,
10508
10458
  output: 0.15,
10509
10459
  cacheRead: 0,
10510
10460
  cacheWrite: 0,
10511
10461
  },
10512
- contextWindow: 256000,
10513
- maxTokens: 32768,
10462
+ contextWindow: 262144,
10463
+ maxTokens: 4096,
10514
10464
  },
10515
10465
  "qwen/qwen3.5-flash-02-23": {
10516
10466
  id: "qwen/qwen3.5-flash-02-23",