@botpress/cognitive 0.1.45 → 0.1.47

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,28 +1,28 @@
1
1
 
2
- > @botpress/cognitive@0.1.45 build /home/runner/work/botpress/botpress/packages/cognitive
2
+ > @botpress/cognitive@0.1.47 build /home/runner/work/botpress/botpress/packages/cognitive
3
3
  > pnpm build:type && pnpm build:neutral && size-limit
4
4
 
5
5
 
6
- > @botpress/cognitive@0.1.45 build:type /home/runner/work/botpress/botpress/packages/cognitive
6
+ > @botpress/cognitive@0.1.47 build:type /home/runner/work/botpress/botpress/packages/cognitive
7
7
  > tsup --tsconfig tsconfig.build.json ./src/index.ts --dts-resolve --dts-only --clean
8
8
 
9
9
  CLI Building entry: ./src/index.ts
10
10
  CLI Using tsconfig: tsconfig.build.json
11
11
  CLI tsup v8.0.2
12
12
  DTS Build start
13
- DTS ⚡️ Build success in 9468ms
14
- DTS dist/index.d.ts 625.92 KB
13
+ DTS ⚡️ Build success in 8896ms
14
+ DTS dist/index.d.ts 627.35 KB
15
15
 
16
- > @botpress/cognitive@0.1.45 build:neutral /home/runner/work/botpress/botpress/packages/cognitive
16
+ > @botpress/cognitive@0.1.47 build:neutral /home/runner/work/botpress/botpress/packages/cognitive
17
17
  > ts-node -T ./build.ts --neutral
18
18
 
19
19
  Done
20
20
 
21
21
  dist/index.cjs
22
22
  Size limit: 50 kB
23
- Size: 14.55 kB brotlied
23
+ Size: 14.68 kB brotlied
24
24
 
25
25
  dist/index.mjs
26
26
  Size limit: 50 kB
27
- Size: 14.43 kB brotlied
27
+ Size: 14.56 kB brotlied
28
28
 
package/dist/index.cjs CHANGED
@@ -665,7 +665,8 @@ var models = {
665
665
  costPer1MTokens: 10
666
666
  },
667
667
  tags: ["recommended", "reasoning", "general-purpose"],
668
- lifecycle: "live"
668
+ lifecycle: "live",
669
+ aliases: ["gpt-5"]
669
670
  },
670
671
  "openai:gpt-5-mini-2025-08-07": {
671
672
  id: "openai:gpt-5-mini-2025-08-07",
@@ -680,7 +681,8 @@ var models = {
680
681
  costPer1MTokens: 2
681
682
  },
682
683
  tags: ["recommended", "reasoning", "general-purpose"],
683
- lifecycle: "live"
684
+ lifecycle: "live",
685
+ aliases: ["gpt-5-mini"]
684
686
  },
685
687
  "openai:gpt-5-nano-2025-08-07": {
686
688
  id: "openai:gpt-5-nano-2025-08-07",
@@ -695,7 +697,8 @@ var models = {
695
697
  costPer1MTokens: 0.4
696
698
  },
697
699
  tags: ["low-cost", "reasoning", "general-purpose"],
698
- lifecycle: "live"
700
+ lifecycle: "live",
701
+ aliases: ["gpt-5-nano"]
699
702
  },
700
703
  "openai:o4-mini-2025-04-16": {
701
704
  id: "openai:o4-mini-2025-04-16",
@@ -710,7 +713,8 @@ var models = {
710
713
  costPer1MTokens: 4.4
711
714
  },
712
715
  tags: ["reasoning", "vision", "coding"],
713
- lifecycle: "live"
716
+ lifecycle: "live",
717
+ aliases: ["o4-mini"]
714
718
  },
715
719
  "openai:o3-2025-04-16": {
716
720
  id: "openai:o3-2025-04-16",
@@ -725,7 +729,8 @@ var models = {
725
729
  costPer1MTokens: 8
726
730
  },
727
731
  tags: ["reasoning", "vision", "coding"],
728
- lifecycle: "live"
732
+ lifecycle: "live",
733
+ aliases: ["o3"]
729
734
  },
730
735
  "openai:gpt-4.1-2025-04-14": {
731
736
  id: "openai:gpt-4.1-2025-04-14",
@@ -740,7 +745,8 @@ var models = {
740
745
  costPer1MTokens: 8
741
746
  },
742
747
  tags: ["recommended", "vision", "general-purpose"],
743
- lifecycle: "live"
748
+ lifecycle: "live",
749
+ aliases: ["gpt-4.1"]
744
750
  },
745
751
  "openai:gpt-4.1-mini-2025-04-14": {
746
752
  id: "openai:gpt-4.1-mini-2025-04-14",
@@ -755,7 +761,8 @@ var models = {
755
761
  costPer1MTokens: 1.6
756
762
  },
757
763
  tags: ["recommended", "vision", "general-purpose"],
758
- lifecycle: "live"
764
+ lifecycle: "live",
765
+ aliases: ["gpt-4.1-mini"]
759
766
  },
760
767
  "openai:gpt-4.1-nano-2025-04-14": {
761
768
  id: "openai:gpt-4.1-nano-2025-04-14",
@@ -770,7 +777,8 @@ var models = {
770
777
  costPer1MTokens: 0.4
771
778
  },
772
779
  tags: ["low-cost", "vision", "general-purpose"],
773
- lifecycle: "live"
780
+ lifecycle: "live",
781
+ aliases: ["gpt-4.1-nano"]
774
782
  },
775
783
  "openai:o3-mini-2025-01-31": {
776
784
  id: "openai:o3-mini-2025-01-31",
@@ -785,7 +793,8 @@ var models = {
785
793
  costPer1MTokens: 4.4
786
794
  },
787
795
  tags: ["reasoning", "general-purpose", "coding"],
788
- lifecycle: "live"
796
+ lifecycle: "live",
797
+ aliases: ["o3-mini"]
789
798
  },
790
799
  "openai:o1-2024-12-17": {
791
800
  id: "openai:o1-2024-12-17",
@@ -815,7 +824,8 @@ var models = {
815
824
  costPer1MTokens: 4.4
816
825
  },
817
826
  tags: ["reasoning", "vision", "general-purpose"],
818
- lifecycle: "live"
827
+ lifecycle: "live",
828
+ aliases: ["o1-mini"]
819
829
  },
820
830
  "openai:gpt-4o-mini-2024-07-18": {
821
831
  id: "openai:gpt-4o-mini-2024-07-18",
@@ -830,7 +840,8 @@ var models = {
830
840
  costPer1MTokens: 0.6
831
841
  },
832
842
  tags: ["recommended", "vision", "low-cost", "general-purpose", "function-calling"],
833
- lifecycle: "live"
843
+ lifecycle: "live",
844
+ aliases: ["gpt-4o-mini"]
834
845
  },
835
846
  "openai:gpt-4o-2024-11-20": {
836
847
  id: "openai:gpt-4o-2024-11-20",
@@ -845,7 +856,8 @@ var models = {
845
856
  costPer1MTokens: 10
846
857
  },
847
858
  tags: ["recommended", "vision", "general-purpose", "coding", "agents", "function-calling"],
848
- lifecycle: "live"
859
+ lifecycle: "live",
860
+ aliases: ["gpt-4o"]
849
861
  },
850
862
  "openai:gpt-4o-2024-08-06": {
851
863
  id: "openai:gpt-4o-2024-08-06",
@@ -907,6 +919,22 @@ var models = {
907
919
  tags: ["deprecated", "general-purpose", "low-cost"],
908
920
  lifecycle: "deprecated"
909
921
  },
922
+ "anthropic:claude-sonnet-4-5-20250929": {
923
+ id: "anthropic:claude-sonnet-4-5-20250929",
924
+ name: "Claude Sonnet 4.5",
925
+ description: "Claude Sonnet 4.5 is Anthropic's most advanced Sonnet model to date, optimized for real-world agents and coding workflows. It delivers state-of-the-art performance on coding benchmarks, with improvements across system design, code security, and specification adherence.",
926
+ input: {
927
+ maxTokens: 2e5,
928
+ costPer1MTokens: 3
929
+ },
930
+ output: {
931
+ maxTokens: 64e3,
932
+ costPer1MTokens: 15
933
+ },
934
+ tags: ["recommended", "reasoning", "agents", "vision", "general-purpose", "coding"],
935
+ lifecycle: "live",
936
+ aliases: ["claude-sonnet-4-5"]
937
+ },
910
938
  "anthropic:claude-sonnet-4-20250514": {
911
939
  id: "anthropic:claude-sonnet-4-20250514",
912
940
  name: "Claude Sonnet 4",
@@ -920,7 +948,8 @@ var models = {
920
948
  costPer1MTokens: 15
921
949
  },
922
950
  tags: ["recommended", "reasoning", "agents", "vision", "general-purpose", "coding"],
923
- lifecycle: "live"
951
+ lifecycle: "live",
952
+ aliases: ["claude-sonnet-4"]
924
953
  },
925
954
  "anthropic:claude-sonnet-4-reasoning-20250514": {
926
955
  id: "anthropic:claude-sonnet-4-reasoning-20250514",
@@ -935,7 +964,8 @@ var models = {
935
964
  costPer1MTokens: 15
936
965
  },
937
966
  tags: ["deprecated", "vision", "reasoning", "general-purpose", "agents", "coding"],
938
- lifecycle: "deprecated"
967
+ lifecycle: "deprecated",
968
+ aliases: ["claude-sonnet-4-reasoning"]
939
969
  },
940
970
  "anthropic:claude-3-7-sonnet-20250219": {
941
971
  id: "anthropic:claude-3-7-sonnet-20250219",
@@ -1057,8 +1087,8 @@ var models = {
1057
1087
  tags: ["recommended", "reasoning", "agents", "general-purpose", "vision", "coding"],
1058
1088
  lifecycle: "live"
1059
1089
  },
1060
- "google-ai:models/gemini-2.0-flash": {
1061
- id: "google-ai:models/gemini-2.0-flash",
1090
+ "google-ai:gemini-2.0-flash": {
1091
+ id: "google-ai:gemini-2.0-flash",
1062
1092
  name: "Gemini 2.0 Flash",
1063
1093
  description: "Next-gen Gemini model with improved capabilities, superior speed, native tool use, multimodal generation, and 1M token context window.",
1064
1094
  input: {
@@ -1070,7 +1100,8 @@ var models = {
1070
1100
  costPer1MTokens: 0.4
1071
1101
  },
1072
1102
  tags: ["low-cost", "general-purpose", "vision"],
1073
- lifecycle: "live"
1103
+ lifecycle: "live",
1104
+ aliases: ["models/gemini-2.0-flash"]
1074
1105
  },
1075
1106
  "cerebras:gpt-oss-120b": {
1076
1107
  id: "cerebras:gpt-oss-120b",
@@ -1147,8 +1178,8 @@ var models = {
1147
1178
  tags: ["general-purpose"],
1148
1179
  lifecycle: "live"
1149
1180
  },
1150
- "groq:openai/gpt-oss-20b": {
1151
- id: "groq:openai/gpt-oss-20b",
1181
+ "groq:gpt-oss-20b": {
1182
+ id: "groq:gpt-oss-20b",
1152
1183
  name: "GPT-OSS 20B (Preview)",
1153
1184
  description: "gpt-oss-20b is a compact, open-weight language model optimized for low-latency. It shares the same training foundation and capabilities as the GPT-OSS 120B model, with faster responses and lower cost.",
1154
1185
  input: {
@@ -1160,10 +1191,11 @@ var models = {
1160
1191
  costPer1MTokens: 0.5
1161
1192
  },
1162
1193
  tags: ["preview", "general-purpose", "reasoning", "low-cost"],
1163
- lifecycle: "live"
1194
+ lifecycle: "live",
1195
+ aliases: ["openai/gpt-oss-20b"]
1164
1196
  },
1165
- "groq:openai/gpt-oss-120b": {
1166
- id: "groq:openai/gpt-oss-120b",
1197
+ "groq:gpt-oss-120b": {
1198
+ id: "groq:gpt-oss-120b",
1167
1199
  name: "GPT-OSS 120B (Preview)",
1168
1200
  description: "gpt-oss-120b is a high-performance, open-weight language model designed for production-grade, general-purpose use cases. It excels at complex reasoning and supports configurable reasoning effort, full chain-of-thought transparency for easier debugging and trust, and native agentic capabilities for function calling, tool use, and structured outputs.",
1169
1201
  input: {
@@ -1175,7 +1207,8 @@ var models = {
1175
1207
  costPer1MTokens: 0.75
1176
1208
  },
1177
1209
  tags: ["preview", "general-purpose", "reasoning"],
1178
- lifecycle: "live"
1210
+ lifecycle: "live",
1211
+ aliases: ["openai/gpt-oss-120b"]
1179
1212
  },
1180
1213
  "groq:deepseek-r1-distill-llama-70b": {
1181
1214
  id: "groq:deepseek-r1-distill-llama-70b",
@@ -1557,7 +1590,7 @@ var models = {
1557
1590
  costPer1MTokens: 8
1558
1591
  },
1559
1592
  tags: ["reasoning", "general-purpose", "coding"],
1560
- lifecycle: "live",
1593
+ lifecycle: "discontinued",
1561
1594
  aliases: ["accounts/fireworks/models/deepseek-r1"]
1562
1595
  },
1563
1596
  "fireworks-ai:deepseek-r1-basic": {
@@ -1572,8 +1605,8 @@ var models = {
1572
1605
  maxTokens: 32768,
1573
1606
  costPer1MTokens: 2.19
1574
1607
  },
1575
- tags: ["recommended", "reasoning", "general-purpose", "coding"],
1576
- lifecycle: "live",
1608
+ tags: ["reasoning", "general-purpose", "coding"],
1609
+ lifecycle: "discontinued",
1577
1610
  aliases: ["accounts/fireworks/models/deepseek-r1-basic"]
1578
1611
  },
1579
1612
  "fireworks-ai:deepseek-v3": {
@@ -1589,7 +1622,7 @@ var models = {
1589
1622
  costPer1MTokens: 0.9
1590
1623
  },
1591
1624
  tags: ["deprecated", "general-purpose"],
1592
- lifecycle: "deprecated",
1625
+ lifecycle: "discontinued",
1593
1626
  aliases: ["accounts/fireworks/models/deepseek-v3"]
1594
1627
  },
1595
1628
  "fireworks-ai:llama-v3p1-405b-instruct": {
@@ -1605,7 +1638,7 @@ var models = {
1605
1638
  costPer1MTokens: 3
1606
1639
  },
1607
1640
  tags: ["deprecated", "general-purpose"],
1608
- lifecycle: "deprecated",
1641
+ lifecycle: "discontinued",
1609
1642
  aliases: ["accounts/fireworks/models/llama-v3p1-405b-instruct"]
1610
1643
  },
1611
1644
  "fireworks-ai:llama-v3p1-70b-instruct": {
@@ -1621,7 +1654,7 @@ var models = {
1621
1654
  costPer1MTokens: 0.9
1622
1655
  },
1623
1656
  tags: ["deprecated", "general-purpose"],
1624
- lifecycle: "deprecated",
1657
+ lifecycle: "discontinued",
1625
1658
  aliases: ["accounts/fireworks/models/llama-v3p1-70b-instruct"]
1626
1659
  },
1627
1660
  "fireworks-ai:llama-v3p1-8b-instruct": {
@@ -1652,8 +1685,8 @@ var models = {
1652
1685
  maxTokens: 65536,
1653
1686
  costPer1MTokens: 1.2
1654
1687
  },
1655
- tags: ["general-purpose"],
1656
- lifecycle: "live",
1688
+ tags: ["deprecated", "general-purpose"],
1689
+ lifecycle: "discontinued",
1657
1690
  aliases: ["accounts/fireworks/models/mixtral-8x22b-instruct"]
1658
1691
  },
1659
1692
  "fireworks-ai:mixtral-8x7b-instruct": {
@@ -1701,29 +1734,10 @@ var models = {
1701
1734
  costPer1MTokens: 0.2
1702
1735
  },
1703
1736
  tags: ["deprecated", "low-cost", "general-purpose"],
1704
- lifecycle: "deprecated",
1737
+ lifecycle: "discontinued",
1705
1738
  aliases: ["accounts/fireworks/models/gemma2-9b-it"]
1706
1739
  }
1707
1740
  };
1708
- var knownTags = [
1709
- "auto",
1710
- "best",
1711
- "fast",
1712
- "reasoning",
1713
- "cheapest",
1714
- "balance",
1715
- "recommended",
1716
- "reasoning",
1717
- "general-purpose",
1718
- "low-cost",
1719
- "vision",
1720
- "coding",
1721
- "function-calling",
1722
- "agents",
1723
- "storytelling",
1724
- "preview",
1725
- "roleplay"
1726
- ];
1727
1741
  var defaultModel = {
1728
1742
  id: "",
1729
1743
  name: "",
@@ -1907,11 +1921,14 @@ var getCognitiveV2Model = (model) => {
1907
1921
  if (models[model]) {
1908
1922
  return models[model];
1909
1923
  }
1910
- const alias = Object.values(models).find((x) => x.aliases?.includes(model));
1924
+ const [_provider, baseModel] = model.split(":");
1925
+ const alias = Object.values(models).find(
1926
+ (x) => x.aliases ? x.aliases.includes(model) || baseModel && x.aliases.includes(baseModel) : false
1927
+ );
1911
1928
  if (alias) {
1912
1929
  return alias;
1913
1930
  }
1914
- if (knownTags.includes(model)) {
1931
+ if (["auto", "fast", "best"].includes(model)) {
1915
1932
  return { ...defaultModel, id: model, name: model };
1916
1933
  }
1917
1934
  return void 0;