@botpress/cognitive 0.1.45 → 0.1.47

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.ts CHANGED
@@ -18983,7 +18983,7 @@ declare class Cognitive {
18983
18983
  private _generateContent;
18984
18984
  }
18985
18985
 
18986
- type Models = 'auto' | 'best' | 'fast' | 'anthropic:claude-3-5-haiku-20241022' | 'anthropic:claude-3-5-sonnet-20240620' | 'anthropic:claude-3-5-sonnet-20241022' | 'anthropic:claude-3-7-sonnet-20250219' | 'anthropic:claude-3-haiku-20240307' | 'anthropic:claude-sonnet-4-20250514' | 'cerebras:gpt-oss-120b' | 'cerebras:llama-4-scout-17b-16e-instruct' | 'cerebras:llama3.1-8b' | 'cerebras:llama3.3-70b' | 'cerebras:qwen-3-32b' | 'fireworks-ai:deepseek-r1' | 'fireworks-ai:deepseek-r1-0528' | 'fireworks-ai:deepseek-r1-basic' | 'fireworks-ai:deepseek-v3-0324' | 'fireworks-ai:gpt-oss-120b' | 'fireworks-ai:gpt-oss-20b' | 'fireworks-ai:llama-v3p1-8b-instruct' | 'fireworks-ai:llama-v3p3-70b-instruct' | 'fireworks-ai:llama4-maverick-instruct-basic' | 'fireworks-ai:llama4-scout-instruct-basic' | 'fireworks-ai:mixtral-8x22b-instruct' | 'fireworks-ai:mixtral-8x7b-instruct' | 'fireworks-ai:mythomax-l2-13b' | 'google-ai:gemini-2.5-flash' | 'google-ai:gemini-2.5-pro' | 'google-ai:models/gemini-2.0-flash' | 'groq:deepseek-r1-distill-llama-70b' | 'groq:gemma2-9b-it' | 'groq:llama-3.1-8b-instant' | 'groq:llama-3.3-70b-versatile' | 'groq:openai/gpt-oss-120b' | 'groq:openai/gpt-oss-20b' | 'openai:gpt-4.1-2025-04-14' | 'openai:gpt-4.1-mini-2025-04-14' | 'openai:gpt-4.1-nano-2025-04-14' | 'openai:gpt-4o-2024-11-20' | 'openai:gpt-4o-mini-2024-07-18' | 'openai:gpt-5-2025-08-07' | 'openai:gpt-5-mini-2025-08-07' | 'openai:gpt-5-nano-2025-08-07' | 'openai:o1-2024-12-17' | 'openai:o1-mini-2024-09-12' | 'openai:o3-2025-04-16' | 'openai:o3-mini-2025-01-31' | 'openai:o4-mini-2025-04-16' | 'openrouter:gpt-oss-120b' | 'xai:grok-3' | 'xai:grok-3-mini' | 'xai:grok-4-0709' | 'xai:grok-4-fast-non-reasoning' | 'xai:grok-4-fast-reasoning' | 'xai:grok-code-fast-1' | ({} & string);
18986
+ type Models = 'auto' | 'best' | 'fast' | 'anthropic:claude-3-5-haiku-20241022' | 'anthropic:claude-3-5-sonnet-20240620' | 'anthropic:claude-3-5-sonnet-20241022' | 'anthropic:claude-3-7-sonnet-20250219' | 'anthropic:claude-3-haiku-20240307' | 'anthropic:claude-sonnet-4-20250514' | 'anthropic:claude-sonnet-4-5-20250929' | 'cerebras:gpt-oss-120b' | 'cerebras:llama-4-scout-17b-16e-instruct' | 'cerebras:llama3.1-8b' | 'cerebras:llama3.3-70b' | 'cerebras:qwen-3-32b' | 'fireworks-ai:deepseek-r1-0528' | 'fireworks-ai:deepseek-v3-0324' | 'fireworks-ai:gpt-oss-120b' | 'fireworks-ai:gpt-oss-20b' | 'fireworks-ai:llama-v3p1-8b-instruct' | 'fireworks-ai:llama-v3p3-70b-instruct' | 'fireworks-ai:llama4-maverick-instruct-basic' | 'fireworks-ai:llama4-scout-instruct-basic' | 'fireworks-ai:mixtral-8x7b-instruct' | 'fireworks-ai:mythomax-l2-13b' | 'google-ai:gemini-2.0-flash' | 'google-ai:gemini-2.5-flash' | 'google-ai:gemini-2.5-pro' | 'groq:deepseek-r1-distill-llama-70b' | 'groq:gemma2-9b-it' | 'groq:gpt-oss-120b' | 'groq:gpt-oss-20b' | 'groq:llama-3.1-8b-instant' | 'groq:llama-3.3-70b-versatile' | 'openai:gpt-4.1-2025-04-14' | 'openai:gpt-4.1-mini-2025-04-14' | 'openai:gpt-4.1-nano-2025-04-14' | 'openai:gpt-4o-2024-11-20' | 'openai:gpt-4o-mini-2024-07-18' | 'openai:gpt-5-2025-08-07' | 'openai:gpt-5-mini-2025-08-07' | 'openai:gpt-5-nano-2025-08-07' | 'openai:o1-2024-12-17' | 'openai:o1-mini-2024-09-12' | 'openai:o3-2025-04-16' | 'openai:o3-mini-2025-01-31' | 'openai:o4-mini-2025-04-16' | 'openrouter:gpt-oss-120b' | 'xai:grok-3' | 'xai:grok-3-mini' | 'xai:grok-4-0709' | 'xai:grok-4-fast-non-reasoning' | 'xai:grok-4-fast-reasoning' | 'xai:grok-code-fast-1' | 'openai:gpt-5' | 'openai:gpt-5-mini' | 'openai:gpt-5-nano' | 'openai:o4-mini' | 'openai:o3' | 'openai:gpt-4.1' | 'openai:gpt-4.1-mini' | 'openai:gpt-4.1-nano' | 'openai:o3-mini' | 'openai:o1-mini' | 'openai:gpt-4o-mini' | 'openai:gpt-4o' | 'anthropic:claude-sonnet-4-5' | 'anthropic:claude-sonnet-4' | 'anthropic:claude-sonnet-4-reasoning' | 'google-ai:models/gemini-2.0-flash' | 'groq:openai/gpt-oss-20b' | 'groq:openai/gpt-oss-120b' | 'fireworks-ai:accounts/fireworks/models/gpt-oss-20b' | 'fireworks-ai:accounts/fireworks/models/gpt-oss-120b' | 'fireworks-ai:accounts/fireworks/models/deepseek-r1-0528' | 'fireworks-ai:accounts/fireworks/models/deepseek-v3-0324' | 'fireworks-ai:accounts/fireworks/models/llama4-maverick-instruct-basic' | 'fireworks-ai:accounts/fireworks/models/llama4-scout-instruct-basic' | 'fireworks-ai:accounts/fireworks/models/llama-v3p3-70b-instruct' | 'fireworks-ai:accounts/fireworks/models/deepseek-r1' | 'fireworks-ai:accounts/fireworks/models/deepseek-r1-basic' | 'fireworks-ai:accounts/fireworks/models/deepseek-v3' | 'fireworks-ai:accounts/fireworks/models/llama-v3p1-405b-instruct' | 'fireworks-ai:accounts/fireworks/models/llama-v3p1-70b-instruct' | 'fireworks-ai:accounts/fireworks/models/llama-v3p1-8b-instruct' | 'fireworks-ai:accounts/fireworks/models/mixtral-8x22b-instruct' | 'fireworks-ai:accounts/fireworks/models/mixtral-8x7b-instruct' | 'fireworks-ai:accounts/fireworks/models/mythomax-l2-13b' | 'fireworks-ai:accounts/fireworks/models/gemma2-9b-it' | ({} & string);
18987
18987
  type CognitiveRequest = {
18988
18988
  /**
18989
18989
  * @minItems 1
@@ -19054,6 +19054,7 @@ type CognitiveStreamChunk = {
19054
19054
  };
19055
19055
  type CognitiveResponse = {
19056
19056
  output: string;
19057
+ reasoning?: string;
19057
19058
  metadata: {
19058
19059
  provider: string;
19059
19060
  model?: string;
@@ -19072,7 +19073,7 @@ type CognitiveResponse = {
19072
19073
  stopReason?: 'stop' | 'length' | 'content_filter' | 'error';
19073
19074
  reasoningEffort?: string;
19074
19075
  warnings?: {
19075
- type: 'parameter_ignored' | 'provider_limitation' | 'deprecated_model' | 'fallback_used';
19076
+ type: 'parameter_ignored' | 'provider_limitation' | 'deprecated_model' | 'discontinued_model' | 'fallback_used';
19076
19077
  message: string;
19077
19078
  }[];
19078
19079
  /**
package/dist/index.mjs CHANGED
@@ -649,7 +649,8 @@ var models = {
649
649
  costPer1MTokens: 10
650
650
  },
651
651
  tags: ["recommended", "reasoning", "general-purpose"],
652
- lifecycle: "live"
652
+ lifecycle: "live",
653
+ aliases: ["gpt-5"]
653
654
  },
654
655
  "openai:gpt-5-mini-2025-08-07": {
655
656
  id: "openai:gpt-5-mini-2025-08-07",
@@ -664,7 +665,8 @@ var models = {
664
665
  costPer1MTokens: 2
665
666
  },
666
667
  tags: ["recommended", "reasoning", "general-purpose"],
667
- lifecycle: "live"
668
+ lifecycle: "live",
669
+ aliases: ["gpt-5-mini"]
668
670
  },
669
671
  "openai:gpt-5-nano-2025-08-07": {
670
672
  id: "openai:gpt-5-nano-2025-08-07",
@@ -679,7 +681,8 @@ var models = {
679
681
  costPer1MTokens: 0.4
680
682
  },
681
683
  tags: ["low-cost", "reasoning", "general-purpose"],
682
- lifecycle: "live"
684
+ lifecycle: "live",
685
+ aliases: ["gpt-5-nano"]
683
686
  },
684
687
  "openai:o4-mini-2025-04-16": {
685
688
  id: "openai:o4-mini-2025-04-16",
@@ -694,7 +697,8 @@ var models = {
694
697
  costPer1MTokens: 4.4
695
698
  },
696
699
  tags: ["reasoning", "vision", "coding"],
697
- lifecycle: "live"
700
+ lifecycle: "live",
701
+ aliases: ["o4-mini"]
698
702
  },
699
703
  "openai:o3-2025-04-16": {
700
704
  id: "openai:o3-2025-04-16",
@@ -709,7 +713,8 @@ var models = {
709
713
  costPer1MTokens: 8
710
714
  },
711
715
  tags: ["reasoning", "vision", "coding"],
712
- lifecycle: "live"
716
+ lifecycle: "live",
717
+ aliases: ["o3"]
713
718
  },
714
719
  "openai:gpt-4.1-2025-04-14": {
715
720
  id: "openai:gpt-4.1-2025-04-14",
@@ -724,7 +729,8 @@ var models = {
724
729
  costPer1MTokens: 8
725
730
  },
726
731
  tags: ["recommended", "vision", "general-purpose"],
727
- lifecycle: "live"
732
+ lifecycle: "live",
733
+ aliases: ["gpt-4.1"]
728
734
  },
729
735
  "openai:gpt-4.1-mini-2025-04-14": {
730
736
  id: "openai:gpt-4.1-mini-2025-04-14",
@@ -739,7 +745,8 @@ var models = {
739
745
  costPer1MTokens: 1.6
740
746
  },
741
747
  tags: ["recommended", "vision", "general-purpose"],
742
- lifecycle: "live"
748
+ lifecycle: "live",
749
+ aliases: ["gpt-4.1-mini"]
743
750
  },
744
751
  "openai:gpt-4.1-nano-2025-04-14": {
745
752
  id: "openai:gpt-4.1-nano-2025-04-14",
@@ -754,7 +761,8 @@ var models = {
754
761
  costPer1MTokens: 0.4
755
762
  },
756
763
  tags: ["low-cost", "vision", "general-purpose"],
757
- lifecycle: "live"
764
+ lifecycle: "live",
765
+ aliases: ["gpt-4.1-nano"]
758
766
  },
759
767
  "openai:o3-mini-2025-01-31": {
760
768
  id: "openai:o3-mini-2025-01-31",
@@ -769,7 +777,8 @@ var models = {
769
777
  costPer1MTokens: 4.4
770
778
  },
771
779
  tags: ["reasoning", "general-purpose", "coding"],
772
- lifecycle: "live"
780
+ lifecycle: "live",
781
+ aliases: ["o3-mini"]
773
782
  },
774
783
  "openai:o1-2024-12-17": {
775
784
  id: "openai:o1-2024-12-17",
@@ -799,7 +808,8 @@ var models = {
799
808
  costPer1MTokens: 4.4
800
809
  },
801
810
  tags: ["reasoning", "vision", "general-purpose"],
802
- lifecycle: "live"
811
+ lifecycle: "live",
812
+ aliases: ["o1-mini"]
803
813
  },
804
814
  "openai:gpt-4o-mini-2024-07-18": {
805
815
  id: "openai:gpt-4o-mini-2024-07-18",
@@ -814,7 +824,8 @@ var models = {
814
824
  costPer1MTokens: 0.6
815
825
  },
816
826
  tags: ["recommended", "vision", "low-cost", "general-purpose", "function-calling"],
817
- lifecycle: "live"
827
+ lifecycle: "live",
828
+ aliases: ["gpt-4o-mini"]
818
829
  },
819
830
  "openai:gpt-4o-2024-11-20": {
820
831
  id: "openai:gpt-4o-2024-11-20",
@@ -829,7 +840,8 @@ var models = {
829
840
  costPer1MTokens: 10
830
841
  },
831
842
  tags: ["recommended", "vision", "general-purpose", "coding", "agents", "function-calling"],
832
- lifecycle: "live"
843
+ lifecycle: "live",
844
+ aliases: ["gpt-4o"]
833
845
  },
834
846
  "openai:gpt-4o-2024-08-06": {
835
847
  id: "openai:gpt-4o-2024-08-06",
@@ -891,6 +903,22 @@ var models = {
891
903
  tags: ["deprecated", "general-purpose", "low-cost"],
892
904
  lifecycle: "deprecated"
893
905
  },
906
+ "anthropic:claude-sonnet-4-5-20250929": {
907
+ id: "anthropic:claude-sonnet-4-5-20250929",
908
+ name: "Claude Sonnet 4.5",
909
+ description: "Claude Sonnet 4.5 is Anthropic's most advanced Sonnet model to date, optimized for real-world agents and coding workflows. It delivers state-of-the-art performance on coding benchmarks, with improvements across system design, code security, and specification adherence.",
910
+ input: {
911
+ maxTokens: 2e5,
912
+ costPer1MTokens: 3
913
+ },
914
+ output: {
915
+ maxTokens: 64e3,
916
+ costPer1MTokens: 15
917
+ },
918
+ tags: ["recommended", "reasoning", "agents", "vision", "general-purpose", "coding"],
919
+ lifecycle: "live",
920
+ aliases: ["claude-sonnet-4-5"]
921
+ },
894
922
  "anthropic:claude-sonnet-4-20250514": {
895
923
  id: "anthropic:claude-sonnet-4-20250514",
896
924
  name: "Claude Sonnet 4",
@@ -904,7 +932,8 @@ var models = {
904
932
  costPer1MTokens: 15
905
933
  },
906
934
  tags: ["recommended", "reasoning", "agents", "vision", "general-purpose", "coding"],
907
- lifecycle: "live"
935
+ lifecycle: "live",
936
+ aliases: ["claude-sonnet-4"]
908
937
  },
909
938
  "anthropic:claude-sonnet-4-reasoning-20250514": {
910
939
  id: "anthropic:claude-sonnet-4-reasoning-20250514",
@@ -919,7 +948,8 @@ var models = {
919
948
  costPer1MTokens: 15
920
949
  },
921
950
  tags: ["deprecated", "vision", "reasoning", "general-purpose", "agents", "coding"],
922
- lifecycle: "deprecated"
951
+ lifecycle: "deprecated",
952
+ aliases: ["claude-sonnet-4-reasoning"]
923
953
  },
924
954
  "anthropic:claude-3-7-sonnet-20250219": {
925
955
  id: "anthropic:claude-3-7-sonnet-20250219",
@@ -1041,8 +1071,8 @@ var models = {
1041
1071
  tags: ["recommended", "reasoning", "agents", "general-purpose", "vision", "coding"],
1042
1072
  lifecycle: "live"
1043
1073
  },
1044
- "google-ai:models/gemini-2.0-flash": {
1045
- id: "google-ai:models/gemini-2.0-flash",
1074
+ "google-ai:gemini-2.0-flash": {
1075
+ id: "google-ai:gemini-2.0-flash",
1046
1076
  name: "Gemini 2.0 Flash",
1047
1077
  description: "Next-gen Gemini model with improved capabilities, superior speed, native tool use, multimodal generation, and 1M token context window.",
1048
1078
  input: {
@@ -1054,7 +1084,8 @@ var models = {
1054
1084
  costPer1MTokens: 0.4
1055
1085
  },
1056
1086
  tags: ["low-cost", "general-purpose", "vision"],
1057
- lifecycle: "live"
1087
+ lifecycle: "live",
1088
+ aliases: ["models/gemini-2.0-flash"]
1058
1089
  },
1059
1090
  "cerebras:gpt-oss-120b": {
1060
1091
  id: "cerebras:gpt-oss-120b",
@@ -1131,8 +1162,8 @@ var models = {
1131
1162
  tags: ["general-purpose"],
1132
1163
  lifecycle: "live"
1133
1164
  },
1134
- "groq:openai/gpt-oss-20b": {
1135
- id: "groq:openai/gpt-oss-20b",
1165
+ "groq:gpt-oss-20b": {
1166
+ id: "groq:gpt-oss-20b",
1136
1167
  name: "GPT-OSS 20B (Preview)",
1137
1168
  description: "gpt-oss-20b is a compact, open-weight language model optimized for low-latency. It shares the same training foundation and capabilities as the GPT-OSS 120B model, with faster responses and lower cost.",
1138
1169
  input: {
@@ -1144,10 +1175,11 @@ var models = {
1144
1175
  costPer1MTokens: 0.5
1145
1176
  },
1146
1177
  tags: ["preview", "general-purpose", "reasoning", "low-cost"],
1147
- lifecycle: "live"
1178
+ lifecycle: "live",
1179
+ aliases: ["openai/gpt-oss-20b"]
1148
1180
  },
1149
- "groq:openai/gpt-oss-120b": {
1150
- id: "groq:openai/gpt-oss-120b",
1181
+ "groq:gpt-oss-120b": {
1182
+ id: "groq:gpt-oss-120b",
1151
1183
  name: "GPT-OSS 120B (Preview)",
1152
1184
  description: "gpt-oss-120b is a high-performance, open-weight language model designed for production-grade, general-purpose use cases. It excels at complex reasoning and supports configurable reasoning effort, full chain-of-thought transparency for easier debugging and trust, and native agentic capabilities for function calling, tool use, and structured outputs.",
1153
1185
  input: {
@@ -1159,7 +1191,8 @@ var models = {
1159
1191
  costPer1MTokens: 0.75
1160
1192
  },
1161
1193
  tags: ["preview", "general-purpose", "reasoning"],
1162
- lifecycle: "live"
1194
+ lifecycle: "live",
1195
+ aliases: ["openai/gpt-oss-120b"]
1163
1196
  },
1164
1197
  "groq:deepseek-r1-distill-llama-70b": {
1165
1198
  id: "groq:deepseek-r1-distill-llama-70b",
@@ -1541,7 +1574,7 @@ var models = {
1541
1574
  costPer1MTokens: 8
1542
1575
  },
1543
1576
  tags: ["reasoning", "general-purpose", "coding"],
1544
- lifecycle: "live",
1577
+ lifecycle: "discontinued",
1545
1578
  aliases: ["accounts/fireworks/models/deepseek-r1"]
1546
1579
  },
1547
1580
  "fireworks-ai:deepseek-r1-basic": {
@@ -1556,8 +1589,8 @@ var models = {
1556
1589
  maxTokens: 32768,
1557
1590
  costPer1MTokens: 2.19
1558
1591
  },
1559
- tags: ["recommended", "reasoning", "general-purpose", "coding"],
1560
- lifecycle: "live",
1592
+ tags: ["reasoning", "general-purpose", "coding"],
1593
+ lifecycle: "discontinued",
1561
1594
  aliases: ["accounts/fireworks/models/deepseek-r1-basic"]
1562
1595
  },
1563
1596
  "fireworks-ai:deepseek-v3": {
@@ -1573,7 +1606,7 @@ var models = {
1573
1606
  costPer1MTokens: 0.9
1574
1607
  },
1575
1608
  tags: ["deprecated", "general-purpose"],
1576
- lifecycle: "deprecated",
1609
+ lifecycle: "discontinued",
1577
1610
  aliases: ["accounts/fireworks/models/deepseek-v3"]
1578
1611
  },
1579
1612
  "fireworks-ai:llama-v3p1-405b-instruct": {
@@ -1589,7 +1622,7 @@ var models = {
1589
1622
  costPer1MTokens: 3
1590
1623
  },
1591
1624
  tags: ["deprecated", "general-purpose"],
1592
- lifecycle: "deprecated",
1625
+ lifecycle: "discontinued",
1593
1626
  aliases: ["accounts/fireworks/models/llama-v3p1-405b-instruct"]
1594
1627
  },
1595
1628
  "fireworks-ai:llama-v3p1-70b-instruct": {
@@ -1605,7 +1638,7 @@ var models = {
1605
1638
  costPer1MTokens: 0.9
1606
1639
  },
1607
1640
  tags: ["deprecated", "general-purpose"],
1608
- lifecycle: "deprecated",
1641
+ lifecycle: "discontinued",
1609
1642
  aliases: ["accounts/fireworks/models/llama-v3p1-70b-instruct"]
1610
1643
  },
1611
1644
  "fireworks-ai:llama-v3p1-8b-instruct": {
@@ -1636,8 +1669,8 @@ var models = {
1636
1669
  maxTokens: 65536,
1637
1670
  costPer1MTokens: 1.2
1638
1671
  },
1639
- tags: ["general-purpose"],
1640
- lifecycle: "live",
1672
+ tags: ["deprecated", "general-purpose"],
1673
+ lifecycle: "discontinued",
1641
1674
  aliases: ["accounts/fireworks/models/mixtral-8x22b-instruct"]
1642
1675
  },
1643
1676
  "fireworks-ai:mixtral-8x7b-instruct": {
@@ -1685,29 +1718,10 @@ var models = {
1685
1718
  costPer1MTokens: 0.2
1686
1719
  },
1687
1720
  tags: ["deprecated", "low-cost", "general-purpose"],
1688
- lifecycle: "deprecated",
1721
+ lifecycle: "discontinued",
1689
1722
  aliases: ["accounts/fireworks/models/gemma2-9b-it"]
1690
1723
  }
1691
1724
  };
1692
- var knownTags = [
1693
- "auto",
1694
- "best",
1695
- "fast",
1696
- "reasoning",
1697
- "cheapest",
1698
- "balance",
1699
- "recommended",
1700
- "reasoning",
1701
- "general-purpose",
1702
- "low-cost",
1703
- "vision",
1704
- "coding",
1705
- "function-calling",
1706
- "agents",
1707
- "storytelling",
1708
- "preview",
1709
- "roleplay"
1710
- ];
1711
1725
  var defaultModel = {
1712
1726
  id: "",
1713
1727
  name: "",
@@ -1891,11 +1905,14 @@ var getCognitiveV2Model = (model) => {
1891
1905
  if (models[model]) {
1892
1906
  return models[model];
1893
1907
  }
1894
- const alias = Object.values(models).find((x) => x.aliases?.includes(model));
1908
+ const [_provider, baseModel] = model.split(":");
1909
+ const alias = Object.values(models).find(
1910
+ (x) => x.aliases ? x.aliases.includes(model) || baseModel && x.aliases.includes(baseModel) : false
1911
+ );
1895
1912
  if (alias) {
1896
1913
  return alias;
1897
1914
  }
1898
- if (knownTags.includes(model)) {
1915
+ if (["auto", "fast", "best"].includes(model)) {
1899
1916
  return { ...defaultModel, id: model, name: model };
1900
1917
  }
1901
1918
  return void 0;