@botpress/cognitive 0.1.44 → 0.1.46

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,28 +1,28 @@
1
1
 
2
- > @botpress/cognitive@0.1.44 build /home/runner/work/botpress/botpress/packages/cognitive
2
+ > @botpress/cognitive@0.1.46 build /home/runner/work/botpress/botpress/packages/cognitive
3
3
  > pnpm build:type && pnpm build:neutral && size-limit
4
4
 
5
5
 
6
- > @botpress/cognitive@0.1.44 build:type /home/runner/work/botpress/botpress/packages/cognitive
6
+ > @botpress/cognitive@0.1.46 build:type /home/runner/work/botpress/botpress/packages/cognitive
7
7
  > tsup --tsconfig tsconfig.build.json ./src/index.ts --dts-resolve --dts-only --clean
8
8
 
9
9
  CLI Building entry: ./src/index.ts
10
10
  CLI Using tsconfig: tsconfig.build.json
11
11
  CLI tsup v8.0.2
12
12
  DTS Build start
13
- DTS ⚡️ Build success in 9218ms
14
- DTS dist/index.d.ts 625.93 KB
13
+ DTS ⚡️ Build success in 9559ms
14
+ DTS dist/index.d.ts 627.32 KB
15
15
 
16
- > @botpress/cognitive@0.1.44 build:neutral /home/runner/work/botpress/botpress/packages/cognitive
16
+ > @botpress/cognitive@0.1.46 build:neutral /home/runner/work/botpress/botpress/packages/cognitive
17
17
  > ts-node -T ./build.ts --neutral
18
18
 
19
19
  Done
20
20
 
21
21
  dist/index.cjs
22
22
  Size limit: 50 kB
23
- Size: 14.49 kB brotlied
23
+ Size: 14.65 kB brotlied
24
24
 
25
25
  dist/index.mjs
26
26
  Size limit: 50 kB
27
- Size: 14.33 kB brotlied
27
+ Size: 14.48 kB brotlied
28
28
 
package/dist/index.cjs CHANGED
@@ -665,7 +665,8 @@ var models = {
665
665
  costPer1MTokens: 10
666
666
  },
667
667
  tags: ["recommended", "reasoning", "general-purpose"],
668
- lifecycle: "live"
668
+ lifecycle: "live",
669
+ aliases: ["gpt-5"]
669
670
  },
670
671
  "openai:gpt-5-mini-2025-08-07": {
671
672
  id: "openai:gpt-5-mini-2025-08-07",
@@ -680,7 +681,8 @@ var models = {
680
681
  costPer1MTokens: 2
681
682
  },
682
683
  tags: ["recommended", "reasoning", "general-purpose"],
683
- lifecycle: "live"
684
+ lifecycle: "live",
685
+ aliases: ["gpt-5-mini"]
684
686
  },
685
687
  "openai:gpt-5-nano-2025-08-07": {
686
688
  id: "openai:gpt-5-nano-2025-08-07",
@@ -695,7 +697,8 @@ var models = {
695
697
  costPer1MTokens: 0.4
696
698
  },
697
699
  tags: ["low-cost", "reasoning", "general-purpose"],
698
- lifecycle: "live"
700
+ lifecycle: "live",
701
+ aliases: ["gpt-5-nano"]
699
702
  },
700
703
  "openai:o4-mini-2025-04-16": {
701
704
  id: "openai:o4-mini-2025-04-16",
@@ -710,7 +713,8 @@ var models = {
710
713
  costPer1MTokens: 4.4
711
714
  },
712
715
  tags: ["reasoning", "vision", "coding"],
713
- lifecycle: "live"
716
+ lifecycle: "live",
717
+ aliases: ["o4-mini"]
714
718
  },
715
719
  "openai:o3-2025-04-16": {
716
720
  id: "openai:o3-2025-04-16",
@@ -725,7 +729,8 @@ var models = {
725
729
  costPer1MTokens: 8
726
730
  },
727
731
  tags: ["reasoning", "vision", "coding"],
728
- lifecycle: "live"
732
+ lifecycle: "live",
733
+ aliases: ["o3"]
729
734
  },
730
735
  "openai:gpt-4.1-2025-04-14": {
731
736
  id: "openai:gpt-4.1-2025-04-14",
@@ -740,7 +745,8 @@ var models = {
740
745
  costPer1MTokens: 8
741
746
  },
742
747
  tags: ["recommended", "vision", "general-purpose"],
743
- lifecycle: "live"
748
+ lifecycle: "live",
749
+ aliases: ["gpt-4.1"]
744
750
  },
745
751
  "openai:gpt-4.1-mini-2025-04-14": {
746
752
  id: "openai:gpt-4.1-mini-2025-04-14",
@@ -755,7 +761,8 @@ var models = {
755
761
  costPer1MTokens: 1.6
756
762
  },
757
763
  tags: ["recommended", "vision", "general-purpose"],
758
- lifecycle: "live"
764
+ lifecycle: "live",
765
+ aliases: ["gpt-4.1-mini"]
759
766
  },
760
767
  "openai:gpt-4.1-nano-2025-04-14": {
761
768
  id: "openai:gpt-4.1-nano-2025-04-14",
@@ -770,7 +777,8 @@ var models = {
770
777
  costPer1MTokens: 0.4
771
778
  },
772
779
  tags: ["low-cost", "vision", "general-purpose"],
773
- lifecycle: "live"
780
+ lifecycle: "live",
781
+ aliases: ["gpt-4.1-nano"]
774
782
  },
775
783
  "openai:o3-mini-2025-01-31": {
776
784
  id: "openai:o3-mini-2025-01-31",
@@ -785,7 +793,8 @@ var models = {
785
793
  costPer1MTokens: 4.4
786
794
  },
787
795
  tags: ["reasoning", "general-purpose", "coding"],
788
- lifecycle: "live"
796
+ lifecycle: "live",
797
+ aliases: ["o3-mini"]
789
798
  },
790
799
  "openai:o1-2024-12-17": {
791
800
  id: "openai:o1-2024-12-17",
@@ -815,7 +824,8 @@ var models = {
815
824
  costPer1MTokens: 4.4
816
825
  },
817
826
  tags: ["reasoning", "vision", "general-purpose"],
818
- lifecycle: "live"
827
+ lifecycle: "live",
828
+ aliases: ["o1-mini"]
819
829
  },
820
830
  "openai:gpt-4o-mini-2024-07-18": {
821
831
  id: "openai:gpt-4o-mini-2024-07-18",
@@ -830,7 +840,8 @@ var models = {
830
840
  costPer1MTokens: 0.6
831
841
  },
832
842
  tags: ["recommended", "vision", "low-cost", "general-purpose", "function-calling"],
833
- lifecycle: "live"
843
+ lifecycle: "live",
844
+ aliases: ["gpt-4o-mini"]
834
845
  },
835
846
  "openai:gpt-4o-2024-11-20": {
836
847
  id: "openai:gpt-4o-2024-11-20",
@@ -845,7 +856,8 @@ var models = {
845
856
  costPer1MTokens: 10
846
857
  },
847
858
  tags: ["recommended", "vision", "general-purpose", "coding", "agents", "function-calling"],
848
- lifecycle: "live"
859
+ lifecycle: "live",
860
+ aliases: ["gpt-4o"]
849
861
  },
850
862
  "openai:gpt-4o-2024-08-06": {
851
863
  id: "openai:gpt-4o-2024-08-06",
@@ -907,6 +919,22 @@ var models = {
907
919
  tags: ["deprecated", "general-purpose", "low-cost"],
908
920
  lifecycle: "deprecated"
909
921
  },
922
+ "anthropic:claude-sonnet-4-5-2025092": {
923
+ id: "anthropic:claude-sonnet-4-5-2025092",
924
+ name: "Claude Sonnet 4.5",
925
+ description: "Claude Sonnet 4.5 is Anthropic's most advanced Sonnet model to date, optimized for real-world agents and coding workflows. It delivers state-of-the-art performance on coding benchmarks, with improvements across system design, code security, and specification adherence.",
926
+ input: {
927
+ maxTokens: 2e5,
928
+ costPer1MTokens: 3
929
+ },
930
+ output: {
931
+ maxTokens: 64e3,
932
+ costPer1MTokens: 15
933
+ },
934
+ tags: ["recommended", "reasoning", "agents", "vision", "general-purpose", "coding"],
935
+ lifecycle: "live",
936
+ aliases: ["claude-sonnet-4-5"]
937
+ },
910
938
  "anthropic:claude-sonnet-4-20250514": {
911
939
  id: "anthropic:claude-sonnet-4-20250514",
912
940
  name: "Claude Sonnet 4",
@@ -920,7 +948,8 @@ var models = {
920
948
  costPer1MTokens: 15
921
949
  },
922
950
  tags: ["recommended", "reasoning", "agents", "vision", "general-purpose", "coding"],
923
- lifecycle: "live"
951
+ lifecycle: "live",
952
+ aliases: ["claude-sonnet-4"]
924
953
  },
925
954
  "anthropic:claude-sonnet-4-reasoning-20250514": {
926
955
  id: "anthropic:claude-sonnet-4-reasoning-20250514",
@@ -935,7 +964,8 @@ var models = {
935
964
  costPer1MTokens: 15
936
965
  },
937
966
  tags: ["deprecated", "vision", "reasoning", "general-purpose", "agents", "coding"],
938
- lifecycle: "deprecated"
967
+ lifecycle: "deprecated",
968
+ aliases: ["claude-sonnet-4-reasoning"]
939
969
  },
940
970
  "anthropic:claude-3-7-sonnet-20250219": {
941
971
  id: "anthropic:claude-3-7-sonnet-20250219",
@@ -1147,8 +1177,8 @@ var models = {
1147
1177
  tags: ["general-purpose"],
1148
1178
  lifecycle: "live"
1149
1179
  },
1150
- "groq:openai/gpt-oss-20b": {
1151
- id: "groq:openai/gpt-oss-20b",
1180
+ "groq:gpt-oss-20b": {
1181
+ id: "groq:gpt-oss-20b",
1152
1182
  name: "GPT-OSS 20B (Preview)",
1153
1183
  description: "gpt-oss-20b is a compact, open-weight language model optimized for low-latency. It shares the same training foundation and capabilities as the GPT-OSS 120B model, with faster responses and lower cost.",
1154
1184
  input: {
@@ -1160,10 +1190,11 @@ var models = {
1160
1190
  costPer1MTokens: 0.5
1161
1191
  },
1162
1192
  tags: ["preview", "general-purpose", "reasoning", "low-cost"],
1163
- lifecycle: "live"
1193
+ lifecycle: "live",
1194
+ aliases: ["openai/gpt-oss-20b"]
1164
1195
  },
1165
- "groq:openai/gpt-oss-120b": {
1166
- id: "groq:openai/gpt-oss-120b",
1196
+ "groq:gpt-oss-120b": {
1197
+ id: "groq:gpt-oss-120b",
1167
1198
  name: "GPT-OSS 120B (Preview)",
1168
1199
  description: "gpt-oss-120b is a high-performance, open-weight language model designed for production-grade, general-purpose use cases. It excels at complex reasoning and supports configurable reasoning effort, full chain-of-thought transparency for easier debugging and trust, and native agentic capabilities for function calling, tool use, and structured outputs.",
1169
1200
  input: {
@@ -1175,7 +1206,8 @@ var models = {
1175
1206
  costPer1MTokens: 0.75
1176
1207
  },
1177
1208
  tags: ["preview", "general-purpose", "reasoning"],
1178
- lifecycle: "live"
1209
+ lifecycle: "live",
1210
+ aliases: ["openai/gpt-oss-120b"]
1179
1211
  },
1180
1212
  "groq:deepseek-r1-distill-llama-70b": {
1181
1213
  id: "groq:deepseek-r1-distill-llama-70b",
@@ -1557,7 +1589,7 @@ var models = {
1557
1589
  costPer1MTokens: 8
1558
1590
  },
1559
1591
  tags: ["reasoning", "general-purpose", "coding"],
1560
- lifecycle: "live",
1592
+ lifecycle: "deprecated",
1561
1593
  aliases: ["accounts/fireworks/models/deepseek-r1"]
1562
1594
  },
1563
1595
  "fireworks-ai:deepseek-r1-basic": {
@@ -1572,8 +1604,8 @@ var models = {
1572
1604
  maxTokens: 32768,
1573
1605
  costPer1MTokens: 2.19
1574
1606
  },
1575
- tags: ["recommended", "reasoning", "general-purpose", "coding"],
1576
- lifecycle: "live",
1607
+ tags: ["reasoning", "general-purpose", "coding"],
1608
+ lifecycle: "deprecated",
1577
1609
  aliases: ["accounts/fireworks/models/deepseek-r1-basic"]
1578
1610
  },
1579
1611
  "fireworks-ai:deepseek-v3": {
@@ -1652,8 +1684,8 @@ var models = {
1652
1684
  maxTokens: 65536,
1653
1685
  costPer1MTokens: 1.2
1654
1686
  },
1655
- tags: ["general-purpose"],
1656
- lifecycle: "live",
1687
+ tags: ["deprecated", "general-purpose"],
1688
+ lifecycle: "deprecated",
1657
1689
  aliases: ["accounts/fireworks/models/mixtral-8x22b-instruct"]
1658
1690
  },
1659
1691
  "fireworks-ai:mixtral-8x7b-instruct": {
@@ -1705,25 +1737,6 @@ var models = {
1705
1737
  aliases: ["accounts/fireworks/models/gemma2-9b-it"]
1706
1738
  }
1707
1739
  };
1708
- var knownTags = [
1709
- "auto",
1710
- "best",
1711
- "fast",
1712
- "reasoning",
1713
- "cheapest",
1714
- "balance",
1715
- "recommended",
1716
- "reasoning",
1717
- "general-purpose",
1718
- "low-cost",
1719
- "vision",
1720
- "coding",
1721
- "function-calling",
1722
- "agents",
1723
- "storytelling",
1724
- "preview",
1725
- "roleplay"
1726
- ];
1727
1740
  var defaultModel = {
1728
1741
  id: "",
1729
1742
  name: "",
@@ -1742,12 +1755,13 @@ var defaultModel = {
1742
1755
 
1743
1756
  // src/cognitive-v2/index.ts
1744
1757
  var isBrowser = () => typeof window !== "undefined" && typeof window.fetch === "function";
1745
- var CognitiveBeta = class {
1758
+ var CognitiveBeta = class _CognitiveBeta {
1746
1759
  _axiosClient;
1747
1760
  _apiUrl;
1748
1761
  _timeout;
1749
1762
  _withCredentials;
1750
1763
  _headers;
1764
+ _debug = false;
1751
1765
  constructor(props) {
1752
1766
  this._apiUrl = props.apiUrl || "https://api.botpress.cloud";
1753
1767
  this._timeout = props.timeout || 60001;
@@ -1759,12 +1773,25 @@ var CognitiveBeta = class {
1759
1773
  if (props.token) {
1760
1774
  this._headers["Authorization"] = `Bearer ${props.token}`;
1761
1775
  }
1776
+ if (props.debug) {
1777
+ this._debug = true;
1778
+ this._headers["X-Debug"] = "1";
1779
+ }
1762
1780
  this._axiosClient = import_axios.default.create({
1763
1781
  headers: this._headers,
1764
1782
  withCredentials: this._withCredentials,
1765
1783
  baseURL: this._apiUrl
1766
1784
  });
1767
1785
  }
1786
+ clone() {
1787
+ return new _CognitiveBeta({
1788
+ apiUrl: this._apiUrl,
1789
+ timeout: this._timeout,
1790
+ withCredentials: this._withCredentials,
1791
+ headers: this._headers,
1792
+ debug: this._debug
1793
+ });
1794
+ }
1768
1795
  async generateText(input, options = {}) {
1769
1796
  const signal = options.signal ?? AbortSignal.timeout(this._timeout);
1770
1797
  const { data } = await this._withServerRetry(
@@ -1897,7 +1924,7 @@ var getCognitiveV2Model = (model) => {
1897
1924
  if (alias) {
1898
1925
  return alias;
1899
1926
  }
1900
- if (knownTags.includes(model)) {
1927
+ if (["auto", "fast", "best"].includes(model)) {
1901
1928
  return { ...defaultModel, id: model, name: model };
1902
1929
  }
1903
1930
  return void 0;
@@ -2156,6 +2183,7 @@ var Cognitive = class _Cognitive {
2156
2183
  _provider;
2157
2184
  _downtimes = [];
2158
2185
  _useBeta = false;
2186
+ _debug = false;
2159
2187
  _events = createNanoEvents();
2160
2188
  constructor(props) {
2161
2189
  this._client = getExtendedClient(props.client);
@@ -2172,7 +2200,9 @@ var Cognitive = class _Cognitive {
2172
2200
  client: this._client.clone(),
2173
2201
  provider: this._provider,
2174
2202
  timeout: this._timeoutMs,
2175
- maxRetries: this._maxRetries
2203
+ maxRetries: this._maxRetries,
2204
+ __debug: this._debug,
2205
+ __experimental_beta: this._useBeta
2176
2206
  });
2177
2207
  copy._models = [...this._models];
2178
2208
  copy._preferences = this._preferences ? { ...this._preferences } : null;