@botpress/cognitive 0.1.44 → 0.1.46
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.turbo/turbo-build.log +7 -7
- package/dist/index.cjs +77 -47
- package/dist/index.cjs.map +2 -2
- package/dist/index.d.ts +14 -5
- package/dist/index.mjs +77 -47
- package/dist/index.mjs.map +2 -2
- package/package.json +1 -1
- package/refresh-models.ts +9 -16
package/dist/index.d.ts
CHANGED
|
@@ -214,6 +214,7 @@ type CognitiveProps = {
|
|
|
214
214
|
maxRetries?: number;
|
|
215
215
|
/** Whether to use the beta client. Restricted to authorized users. */
|
|
216
216
|
__experimental_beta?: boolean;
|
|
217
|
+
__debug?: boolean;
|
|
217
218
|
};
|
|
218
219
|
type Events = {
|
|
219
220
|
aborted: (req: Request, reason?: string) => void;
|
|
@@ -1026,6 +1027,7 @@ type ClientConfig = {
|
|
|
1026
1027
|
headers: Headers;
|
|
1027
1028
|
withCredentials: boolean;
|
|
1028
1029
|
timeout: number;
|
|
1030
|
+
debug: boolean;
|
|
1029
1031
|
};
|
|
1030
1032
|
|
|
1031
1033
|
type CommonClientProps = {
|
|
@@ -1033,6 +1035,7 @@ type CommonClientProps = {
|
|
|
1033
1035
|
timeout?: number;
|
|
1034
1036
|
headers?: Headers;
|
|
1035
1037
|
retry?: RetryConfig;
|
|
1038
|
+
debug?: boolean;
|
|
1036
1039
|
};
|
|
1037
1040
|
type SimplifyTuple<T> = T extends [...infer A] ? {
|
|
1038
1041
|
[K in keyof A]: Simplify<A[K]>;
|
|
@@ -18069,12 +18072,12 @@ declare class Client extends Client$1 implements IClient {
|
|
|
18069
18072
|
pictureUrl?: string;
|
|
18070
18073
|
}>;
|
|
18071
18074
|
events: (props: {
|
|
18075
|
+
status?: "pending" | "ignored" | "processed" | "failed" | "scheduled" | undefined;
|
|
18072
18076
|
type?: string | undefined;
|
|
18073
18077
|
userId?: string | undefined;
|
|
18074
18078
|
conversationId?: string | undefined;
|
|
18075
18079
|
workflowId?: string | undefined;
|
|
18076
18080
|
messageId?: string | undefined;
|
|
18077
|
-
status?: "pending" | "ignored" | "processed" | "failed" | "scheduled" | undefined;
|
|
18078
18081
|
}) => AsyncCollection<{
|
|
18079
18082
|
id: string;
|
|
18080
18083
|
createdAt: string;
|
|
@@ -18125,13 +18128,13 @@ declare class Client extends Client$1 implements IClient {
|
|
|
18125
18128
|
pictureUrl?: string;
|
|
18126
18129
|
}>;
|
|
18127
18130
|
tasks: (props: {
|
|
18131
|
+
status?: ("timeout" | "pending" | "failed" | "in_progress" | "completed" | "blocked" | "paused" | "cancelled")[] | undefined;
|
|
18128
18132
|
tags?: {
|
|
18129
18133
|
[x: string]: string;
|
|
18130
18134
|
} | undefined;
|
|
18131
18135
|
type?: string | undefined;
|
|
18132
18136
|
userId?: string | undefined;
|
|
18133
18137
|
conversationId?: string | undefined;
|
|
18134
|
-
status?: ("pending" | "failed" | "in_progress" | "completed" | "blocked" | "paused" | "timeout" | "cancelled")[] | undefined;
|
|
18135
18138
|
parentTaskId?: string | undefined;
|
|
18136
18139
|
}) => AsyncCollection<{
|
|
18137
18140
|
id: string;
|
|
@@ -18964,6 +18967,7 @@ declare class Cognitive {
|
|
|
18964
18967
|
protected _provider: ModelProvider;
|
|
18965
18968
|
protected _downtimes: ModelPreferences['downtimes'];
|
|
18966
18969
|
protected _useBeta: boolean;
|
|
18970
|
+
protected _debug: boolean;
|
|
18967
18971
|
private _events;
|
|
18968
18972
|
constructor(props: CognitiveProps);
|
|
18969
18973
|
get client(): ExtendedClient;
|
|
@@ -18979,7 +18983,7 @@ declare class Cognitive {
|
|
|
18979
18983
|
private _generateContent;
|
|
18980
18984
|
}
|
|
18981
18985
|
|
|
18982
|
-
type Models = 'auto' | 'best' | 'fast' | '
|
|
18986
|
+
type Models = 'auto' | 'best' | 'fast' | 'anthropic:claude-3-5-haiku-20241022' | 'anthropic:claude-3-5-sonnet-20240620' | 'anthropic:claude-3-5-sonnet-20241022' | 'anthropic:claude-3-7-sonnet-20250219' | 'anthropic:claude-3-haiku-20240307' | 'anthropic:claude-sonnet-4-20250514' | 'anthropic:claude-sonnet-4-5-2025092' | 'cerebras:gpt-oss-120b' | 'cerebras:llama-4-scout-17b-16e-instruct' | 'cerebras:llama3.1-8b' | 'cerebras:llama3.3-70b' | 'cerebras:qwen-3-32b' | 'fireworks-ai:deepseek-r1-0528' | 'fireworks-ai:deepseek-v3-0324' | 'fireworks-ai:gpt-oss-120b' | 'fireworks-ai:gpt-oss-20b' | 'fireworks-ai:llama-v3p1-8b-instruct' | 'fireworks-ai:llama-v3p3-70b-instruct' | 'fireworks-ai:llama4-maverick-instruct-basic' | 'fireworks-ai:llama4-scout-instruct-basic' | 'fireworks-ai:mixtral-8x7b-instruct' | 'fireworks-ai:mythomax-l2-13b' | 'google-ai:gemini-2.5-flash' | 'google-ai:gemini-2.5-pro' | 'google-ai:models/gemini-2.0-flash' | 'groq:deepseek-r1-distill-llama-70b' | 'groq:gemma2-9b-it' | 'groq:gpt-oss-120b' | 'groq:gpt-oss-20b' | 'groq:llama-3.1-8b-instant' | 'groq:llama-3.3-70b-versatile' | 'openai:gpt-4.1-2025-04-14' | 'openai:gpt-4.1-mini-2025-04-14' | 'openai:gpt-4.1-nano-2025-04-14' | 'openai:gpt-4o-2024-11-20' | 'openai:gpt-4o-mini-2024-07-18' | 'openai:gpt-5-2025-08-07' | 'openai:gpt-5-mini-2025-08-07' | 'openai:gpt-5-nano-2025-08-07' | 'openai:o1-2024-12-17' | 'openai:o1-mini-2024-09-12' | 'openai:o3-2025-04-16' | 'openai:o3-mini-2025-01-31' | 'openai:o4-mini-2025-04-16' | 'openrouter:gpt-oss-120b' | 'xai:grok-3' | 'xai:grok-3-mini' | 'xai:grok-4-0709' | 'xai:grok-4-fast-non-reasoning' | 'xai:grok-4-fast-reasoning' | 'xai:grok-code-fast-1' | 'openai:gpt-5' | 'openai:gpt-5-mini' | 'openai:gpt-5-nano' | 'openai:o4-mini' | 'openai:o3' | 'openai:gpt-4.1' | 'openai:gpt-4.1-mini' | 'openai:gpt-4.1-nano' | 'openai:o3-mini' | 'openai:o1-mini' | 'openai:gpt-4o-mini' | 'openai:gpt-4o' | 'anthropic:claude-sonnet-4-5' | 'anthropic:claude-sonnet-4' | 'anthropic:claude-sonnet-4-reasoning' | 'groq:openai/gpt-oss-20b' | 'groq:openai/gpt-oss-120b' | 'fireworks-ai:accounts/fireworks/models/gpt-oss-20b' | 'fireworks-ai:accounts/fireworks/models/gpt-oss-120b' | 'fireworks-ai:accounts/fireworks/models/deepseek-r1-0528' | 'fireworks-ai:accounts/fireworks/models/deepseek-v3-0324' | 'fireworks-ai:accounts/fireworks/models/llama4-maverick-instruct-basic' | 'fireworks-ai:accounts/fireworks/models/llama4-scout-instruct-basic' | 'fireworks-ai:accounts/fireworks/models/llama-v3p3-70b-instruct' | 'fireworks-ai:accounts/fireworks/models/deepseek-r1' | 'fireworks-ai:accounts/fireworks/models/deepseek-r1-basic' | 'fireworks-ai:accounts/fireworks/models/deepseek-v3' | 'fireworks-ai:accounts/fireworks/models/llama-v3p1-405b-instruct' | 'fireworks-ai:accounts/fireworks/models/llama-v3p1-70b-instruct' | 'fireworks-ai:accounts/fireworks/models/llama-v3p1-8b-instruct' | 'fireworks-ai:accounts/fireworks/models/mixtral-8x22b-instruct' | 'fireworks-ai:accounts/fireworks/models/mixtral-8x7b-instruct' | 'fireworks-ai:accounts/fireworks/models/mythomax-l2-13b' | 'fireworks-ai:accounts/fireworks/models/gemma2-9b-it' | ({} & string);
|
|
18983
18987
|
type CognitiveRequest = {
|
|
18984
18988
|
/**
|
|
18985
18989
|
* @minItems 1
|
|
@@ -19050,6 +19054,7 @@ type CognitiveStreamChunk = {
|
|
|
19050
19054
|
};
|
|
19051
19055
|
type CognitiveResponse = {
|
|
19052
19056
|
output: string;
|
|
19057
|
+
reasoning?: string;
|
|
19053
19058
|
metadata: {
|
|
19054
19059
|
provider: string;
|
|
19055
19060
|
model?: string;
|
|
@@ -19068,7 +19073,7 @@ type CognitiveResponse = {
|
|
|
19068
19073
|
stopReason?: 'stop' | 'length' | 'content_filter' | 'error';
|
|
19069
19074
|
reasoningEffort?: string;
|
|
19070
19075
|
warnings?: {
|
|
19071
|
-
type: 'parameter_ignored' | 'provider_limitation' | 'deprecated_model' | 'fallback_used';
|
|
19076
|
+
type: 'parameter_ignored' | 'provider_limitation' | 'deprecated_model' | 'discontinued_model' | 'fallback_used';
|
|
19072
19077
|
message: string;
|
|
19073
19078
|
}[];
|
|
19074
19079
|
/**
|
|
@@ -19116,19 +19121,23 @@ type ClientProps = {
|
|
|
19116
19121
|
botId?: string;
|
|
19117
19122
|
token?: string;
|
|
19118
19123
|
withCredentials?: boolean;
|
|
19124
|
+
debug?: boolean;
|
|
19119
19125
|
headers?: Record<string, string>;
|
|
19120
19126
|
};
|
|
19121
19127
|
type RequestOptions = {
|
|
19122
19128
|
signal?: AbortSignal;
|
|
19123
19129
|
timeout?: number;
|
|
19124
19130
|
};
|
|
19131
|
+
|
|
19125
19132
|
declare class CognitiveBeta {
|
|
19126
19133
|
private _axiosClient;
|
|
19127
19134
|
private readonly _apiUrl;
|
|
19128
19135
|
private readonly _timeout;
|
|
19129
19136
|
private readonly _withCredentials;
|
|
19130
19137
|
private readonly _headers;
|
|
19138
|
+
private readonly _debug;
|
|
19131
19139
|
constructor(props: ClientProps);
|
|
19140
|
+
clone(): CognitiveBeta;
|
|
19132
19141
|
generateText(input: CognitiveRequest, options?: RequestOptions): Promise<CognitiveResponse>;
|
|
19133
19142
|
listModels(): Promise<Model[]>;
|
|
19134
19143
|
generateTextStream(request: CognitiveRequest, options?: RequestOptions): AsyncGenerator<CognitiveStreamChunk, void, unknown>;
|
|
@@ -19138,4 +19147,4 @@ declare class CognitiveBeta {
|
|
|
19138
19147
|
}
|
|
19139
19148
|
declare const getCognitiveV2Model: (model: string) => Model | undefined;
|
|
19140
19149
|
|
|
19141
|
-
export { type BotpressClientLike, Cognitive, CognitiveBeta, type CognitiveRequest, type CognitiveResponse, type CognitiveStreamChunk, type Events, type GenerateContentInput, type GenerateContentOutput, type Model$1 as Model, type ModelPreferences, ModelProvider, RemoteModelProvider, getCognitiveV2Model };
|
|
19150
|
+
export { type BotpressClientLike, Cognitive, CognitiveBeta, type CognitiveRequest, type CognitiveResponse, type CognitiveStreamChunk, type Events, type GenerateContentInput, type GenerateContentOutput, type Model$1 as Model, type ModelPreferences, ModelProvider, type Models, RemoteModelProvider, getCognitiveV2Model };
|
package/dist/index.mjs
CHANGED
|
@@ -649,7 +649,8 @@ var models = {
|
|
|
649
649
|
costPer1MTokens: 10
|
|
650
650
|
},
|
|
651
651
|
tags: ["recommended", "reasoning", "general-purpose"],
|
|
652
|
-
lifecycle: "live"
|
|
652
|
+
lifecycle: "live",
|
|
653
|
+
aliases: ["gpt-5"]
|
|
653
654
|
},
|
|
654
655
|
"openai:gpt-5-mini-2025-08-07": {
|
|
655
656
|
id: "openai:gpt-5-mini-2025-08-07",
|
|
@@ -664,7 +665,8 @@ var models = {
|
|
|
664
665
|
costPer1MTokens: 2
|
|
665
666
|
},
|
|
666
667
|
tags: ["recommended", "reasoning", "general-purpose"],
|
|
667
|
-
lifecycle: "live"
|
|
668
|
+
lifecycle: "live",
|
|
669
|
+
aliases: ["gpt-5-mini"]
|
|
668
670
|
},
|
|
669
671
|
"openai:gpt-5-nano-2025-08-07": {
|
|
670
672
|
id: "openai:gpt-5-nano-2025-08-07",
|
|
@@ -679,7 +681,8 @@ var models = {
|
|
|
679
681
|
costPer1MTokens: 0.4
|
|
680
682
|
},
|
|
681
683
|
tags: ["low-cost", "reasoning", "general-purpose"],
|
|
682
|
-
lifecycle: "live"
|
|
684
|
+
lifecycle: "live",
|
|
685
|
+
aliases: ["gpt-5-nano"]
|
|
683
686
|
},
|
|
684
687
|
"openai:o4-mini-2025-04-16": {
|
|
685
688
|
id: "openai:o4-mini-2025-04-16",
|
|
@@ -694,7 +697,8 @@ var models = {
|
|
|
694
697
|
costPer1MTokens: 4.4
|
|
695
698
|
},
|
|
696
699
|
tags: ["reasoning", "vision", "coding"],
|
|
697
|
-
lifecycle: "live"
|
|
700
|
+
lifecycle: "live",
|
|
701
|
+
aliases: ["o4-mini"]
|
|
698
702
|
},
|
|
699
703
|
"openai:o3-2025-04-16": {
|
|
700
704
|
id: "openai:o3-2025-04-16",
|
|
@@ -709,7 +713,8 @@ var models = {
|
|
|
709
713
|
costPer1MTokens: 8
|
|
710
714
|
},
|
|
711
715
|
tags: ["reasoning", "vision", "coding"],
|
|
712
|
-
lifecycle: "live"
|
|
716
|
+
lifecycle: "live",
|
|
717
|
+
aliases: ["o3"]
|
|
713
718
|
},
|
|
714
719
|
"openai:gpt-4.1-2025-04-14": {
|
|
715
720
|
id: "openai:gpt-4.1-2025-04-14",
|
|
@@ -724,7 +729,8 @@ var models = {
|
|
|
724
729
|
costPer1MTokens: 8
|
|
725
730
|
},
|
|
726
731
|
tags: ["recommended", "vision", "general-purpose"],
|
|
727
|
-
lifecycle: "live"
|
|
732
|
+
lifecycle: "live",
|
|
733
|
+
aliases: ["gpt-4.1"]
|
|
728
734
|
},
|
|
729
735
|
"openai:gpt-4.1-mini-2025-04-14": {
|
|
730
736
|
id: "openai:gpt-4.1-mini-2025-04-14",
|
|
@@ -739,7 +745,8 @@ var models = {
|
|
|
739
745
|
costPer1MTokens: 1.6
|
|
740
746
|
},
|
|
741
747
|
tags: ["recommended", "vision", "general-purpose"],
|
|
742
|
-
lifecycle: "live"
|
|
748
|
+
lifecycle: "live",
|
|
749
|
+
aliases: ["gpt-4.1-mini"]
|
|
743
750
|
},
|
|
744
751
|
"openai:gpt-4.1-nano-2025-04-14": {
|
|
745
752
|
id: "openai:gpt-4.1-nano-2025-04-14",
|
|
@@ -754,7 +761,8 @@ var models = {
|
|
|
754
761
|
costPer1MTokens: 0.4
|
|
755
762
|
},
|
|
756
763
|
tags: ["low-cost", "vision", "general-purpose"],
|
|
757
|
-
lifecycle: "live"
|
|
764
|
+
lifecycle: "live",
|
|
765
|
+
aliases: ["gpt-4.1-nano"]
|
|
758
766
|
},
|
|
759
767
|
"openai:o3-mini-2025-01-31": {
|
|
760
768
|
id: "openai:o3-mini-2025-01-31",
|
|
@@ -769,7 +777,8 @@ var models = {
|
|
|
769
777
|
costPer1MTokens: 4.4
|
|
770
778
|
},
|
|
771
779
|
tags: ["reasoning", "general-purpose", "coding"],
|
|
772
|
-
lifecycle: "live"
|
|
780
|
+
lifecycle: "live",
|
|
781
|
+
aliases: ["o3-mini"]
|
|
773
782
|
},
|
|
774
783
|
"openai:o1-2024-12-17": {
|
|
775
784
|
id: "openai:o1-2024-12-17",
|
|
@@ -799,7 +808,8 @@ var models = {
|
|
|
799
808
|
costPer1MTokens: 4.4
|
|
800
809
|
},
|
|
801
810
|
tags: ["reasoning", "vision", "general-purpose"],
|
|
802
|
-
lifecycle: "live"
|
|
811
|
+
lifecycle: "live",
|
|
812
|
+
aliases: ["o1-mini"]
|
|
803
813
|
},
|
|
804
814
|
"openai:gpt-4o-mini-2024-07-18": {
|
|
805
815
|
id: "openai:gpt-4o-mini-2024-07-18",
|
|
@@ -814,7 +824,8 @@ var models = {
|
|
|
814
824
|
costPer1MTokens: 0.6
|
|
815
825
|
},
|
|
816
826
|
tags: ["recommended", "vision", "low-cost", "general-purpose", "function-calling"],
|
|
817
|
-
lifecycle: "live"
|
|
827
|
+
lifecycle: "live",
|
|
828
|
+
aliases: ["gpt-4o-mini"]
|
|
818
829
|
},
|
|
819
830
|
"openai:gpt-4o-2024-11-20": {
|
|
820
831
|
id: "openai:gpt-4o-2024-11-20",
|
|
@@ -829,7 +840,8 @@ var models = {
|
|
|
829
840
|
costPer1MTokens: 10
|
|
830
841
|
},
|
|
831
842
|
tags: ["recommended", "vision", "general-purpose", "coding", "agents", "function-calling"],
|
|
832
|
-
lifecycle: "live"
|
|
843
|
+
lifecycle: "live",
|
|
844
|
+
aliases: ["gpt-4o"]
|
|
833
845
|
},
|
|
834
846
|
"openai:gpt-4o-2024-08-06": {
|
|
835
847
|
id: "openai:gpt-4o-2024-08-06",
|
|
@@ -891,6 +903,22 @@ var models = {
|
|
|
891
903
|
tags: ["deprecated", "general-purpose", "low-cost"],
|
|
892
904
|
lifecycle: "deprecated"
|
|
893
905
|
},
|
|
906
|
+
"anthropic:claude-sonnet-4-5-2025092": {
|
|
907
|
+
id: "anthropic:claude-sonnet-4-5-2025092",
|
|
908
|
+
name: "Claude Sonnet 4.5",
|
|
909
|
+
description: "Claude Sonnet 4.5 is Anthropic's most advanced Sonnet model to date, optimized for real-world agents and coding workflows. It delivers state-of-the-art performance on coding benchmarks, with improvements across system design, code security, and specification adherence.",
|
|
910
|
+
input: {
|
|
911
|
+
maxTokens: 2e5,
|
|
912
|
+
costPer1MTokens: 3
|
|
913
|
+
},
|
|
914
|
+
output: {
|
|
915
|
+
maxTokens: 64e3,
|
|
916
|
+
costPer1MTokens: 15
|
|
917
|
+
},
|
|
918
|
+
tags: ["recommended", "reasoning", "agents", "vision", "general-purpose", "coding"],
|
|
919
|
+
lifecycle: "live",
|
|
920
|
+
aliases: ["claude-sonnet-4-5"]
|
|
921
|
+
},
|
|
894
922
|
"anthropic:claude-sonnet-4-20250514": {
|
|
895
923
|
id: "anthropic:claude-sonnet-4-20250514",
|
|
896
924
|
name: "Claude Sonnet 4",
|
|
@@ -904,7 +932,8 @@ var models = {
|
|
|
904
932
|
costPer1MTokens: 15
|
|
905
933
|
},
|
|
906
934
|
tags: ["recommended", "reasoning", "agents", "vision", "general-purpose", "coding"],
|
|
907
|
-
lifecycle: "live"
|
|
935
|
+
lifecycle: "live",
|
|
936
|
+
aliases: ["claude-sonnet-4"]
|
|
908
937
|
},
|
|
909
938
|
"anthropic:claude-sonnet-4-reasoning-20250514": {
|
|
910
939
|
id: "anthropic:claude-sonnet-4-reasoning-20250514",
|
|
@@ -919,7 +948,8 @@ var models = {
|
|
|
919
948
|
costPer1MTokens: 15
|
|
920
949
|
},
|
|
921
950
|
tags: ["deprecated", "vision", "reasoning", "general-purpose", "agents", "coding"],
|
|
922
|
-
lifecycle: "deprecated"
|
|
951
|
+
lifecycle: "deprecated",
|
|
952
|
+
aliases: ["claude-sonnet-4-reasoning"]
|
|
923
953
|
},
|
|
924
954
|
"anthropic:claude-3-7-sonnet-20250219": {
|
|
925
955
|
id: "anthropic:claude-3-7-sonnet-20250219",
|
|
@@ -1131,8 +1161,8 @@ var models = {
|
|
|
1131
1161
|
tags: ["general-purpose"],
|
|
1132
1162
|
lifecycle: "live"
|
|
1133
1163
|
},
|
|
1134
|
-
"groq:
|
|
1135
|
-
id: "groq:
|
|
1164
|
+
"groq:gpt-oss-20b": {
|
|
1165
|
+
id: "groq:gpt-oss-20b",
|
|
1136
1166
|
name: "GPT-OSS 20B (Preview)",
|
|
1137
1167
|
description: "gpt-oss-20b is a compact, open-weight language model optimized for low-latency. It shares the same training foundation and capabilities as the GPT-OSS 120B model, with faster responses and lower cost.",
|
|
1138
1168
|
input: {
|
|
@@ -1144,10 +1174,11 @@ var models = {
|
|
|
1144
1174
|
costPer1MTokens: 0.5
|
|
1145
1175
|
},
|
|
1146
1176
|
tags: ["preview", "general-purpose", "reasoning", "low-cost"],
|
|
1147
|
-
lifecycle: "live"
|
|
1177
|
+
lifecycle: "live",
|
|
1178
|
+
aliases: ["openai/gpt-oss-20b"]
|
|
1148
1179
|
},
|
|
1149
|
-
"groq:
|
|
1150
|
-
id: "groq:
|
|
1180
|
+
"groq:gpt-oss-120b": {
|
|
1181
|
+
id: "groq:gpt-oss-120b",
|
|
1151
1182
|
name: "GPT-OSS 120B (Preview)",
|
|
1152
1183
|
description: "gpt-oss-120b is a high-performance, open-weight language model designed for production-grade, general-purpose use cases. It excels at complex reasoning and supports configurable reasoning effort, full chain-of-thought transparency for easier debugging and trust, and native agentic capabilities for function calling, tool use, and structured outputs.",
|
|
1153
1184
|
input: {
|
|
@@ -1159,7 +1190,8 @@ var models = {
|
|
|
1159
1190
|
costPer1MTokens: 0.75
|
|
1160
1191
|
},
|
|
1161
1192
|
tags: ["preview", "general-purpose", "reasoning"],
|
|
1162
|
-
lifecycle: "live"
|
|
1193
|
+
lifecycle: "live",
|
|
1194
|
+
aliases: ["openai/gpt-oss-120b"]
|
|
1163
1195
|
},
|
|
1164
1196
|
"groq:deepseek-r1-distill-llama-70b": {
|
|
1165
1197
|
id: "groq:deepseek-r1-distill-llama-70b",
|
|
@@ -1541,7 +1573,7 @@ var models = {
|
|
|
1541
1573
|
costPer1MTokens: 8
|
|
1542
1574
|
},
|
|
1543
1575
|
tags: ["reasoning", "general-purpose", "coding"],
|
|
1544
|
-
lifecycle: "
|
|
1576
|
+
lifecycle: "deprecated",
|
|
1545
1577
|
aliases: ["accounts/fireworks/models/deepseek-r1"]
|
|
1546
1578
|
},
|
|
1547
1579
|
"fireworks-ai:deepseek-r1-basic": {
|
|
@@ -1556,8 +1588,8 @@ var models = {
|
|
|
1556
1588
|
maxTokens: 32768,
|
|
1557
1589
|
costPer1MTokens: 2.19
|
|
1558
1590
|
},
|
|
1559
|
-
tags: ["
|
|
1560
|
-
lifecycle: "
|
|
1591
|
+
tags: ["reasoning", "general-purpose", "coding"],
|
|
1592
|
+
lifecycle: "deprecated",
|
|
1561
1593
|
aliases: ["accounts/fireworks/models/deepseek-r1-basic"]
|
|
1562
1594
|
},
|
|
1563
1595
|
"fireworks-ai:deepseek-v3": {
|
|
@@ -1636,8 +1668,8 @@ var models = {
|
|
|
1636
1668
|
maxTokens: 65536,
|
|
1637
1669
|
costPer1MTokens: 1.2
|
|
1638
1670
|
},
|
|
1639
|
-
tags: ["general-purpose"],
|
|
1640
|
-
lifecycle: "
|
|
1671
|
+
tags: ["deprecated", "general-purpose"],
|
|
1672
|
+
lifecycle: "deprecated",
|
|
1641
1673
|
aliases: ["accounts/fireworks/models/mixtral-8x22b-instruct"]
|
|
1642
1674
|
},
|
|
1643
1675
|
"fireworks-ai:mixtral-8x7b-instruct": {
|
|
@@ -1689,25 +1721,6 @@ var models = {
|
|
|
1689
1721
|
aliases: ["accounts/fireworks/models/gemma2-9b-it"]
|
|
1690
1722
|
}
|
|
1691
1723
|
};
|
|
1692
|
-
var knownTags = [
|
|
1693
|
-
"auto",
|
|
1694
|
-
"best",
|
|
1695
|
-
"fast",
|
|
1696
|
-
"reasoning",
|
|
1697
|
-
"cheapest",
|
|
1698
|
-
"balance",
|
|
1699
|
-
"recommended",
|
|
1700
|
-
"reasoning",
|
|
1701
|
-
"general-purpose",
|
|
1702
|
-
"low-cost",
|
|
1703
|
-
"vision",
|
|
1704
|
-
"coding",
|
|
1705
|
-
"function-calling",
|
|
1706
|
-
"agents",
|
|
1707
|
-
"storytelling",
|
|
1708
|
-
"preview",
|
|
1709
|
-
"roleplay"
|
|
1710
|
-
];
|
|
1711
1724
|
var defaultModel = {
|
|
1712
1725
|
id: "",
|
|
1713
1726
|
name: "",
|
|
@@ -1726,12 +1739,13 @@ var defaultModel = {
|
|
|
1726
1739
|
|
|
1727
1740
|
// src/cognitive-v2/index.ts
|
|
1728
1741
|
var isBrowser = () => typeof window !== "undefined" && typeof window.fetch === "function";
|
|
1729
|
-
var CognitiveBeta = class {
|
|
1742
|
+
var CognitiveBeta = class _CognitiveBeta {
|
|
1730
1743
|
_axiosClient;
|
|
1731
1744
|
_apiUrl;
|
|
1732
1745
|
_timeout;
|
|
1733
1746
|
_withCredentials;
|
|
1734
1747
|
_headers;
|
|
1748
|
+
_debug = false;
|
|
1735
1749
|
constructor(props) {
|
|
1736
1750
|
this._apiUrl = props.apiUrl || "https://api.botpress.cloud";
|
|
1737
1751
|
this._timeout = props.timeout || 60001;
|
|
@@ -1743,12 +1757,25 @@ var CognitiveBeta = class {
|
|
|
1743
1757
|
if (props.token) {
|
|
1744
1758
|
this._headers["Authorization"] = `Bearer ${props.token}`;
|
|
1745
1759
|
}
|
|
1760
|
+
if (props.debug) {
|
|
1761
|
+
this._debug = true;
|
|
1762
|
+
this._headers["X-Debug"] = "1";
|
|
1763
|
+
}
|
|
1746
1764
|
this._axiosClient = axios.create({
|
|
1747
1765
|
headers: this._headers,
|
|
1748
1766
|
withCredentials: this._withCredentials,
|
|
1749
1767
|
baseURL: this._apiUrl
|
|
1750
1768
|
});
|
|
1751
1769
|
}
|
|
1770
|
+
clone() {
|
|
1771
|
+
return new _CognitiveBeta({
|
|
1772
|
+
apiUrl: this._apiUrl,
|
|
1773
|
+
timeout: this._timeout,
|
|
1774
|
+
withCredentials: this._withCredentials,
|
|
1775
|
+
headers: this._headers,
|
|
1776
|
+
debug: this._debug
|
|
1777
|
+
});
|
|
1778
|
+
}
|
|
1752
1779
|
async generateText(input, options = {}) {
|
|
1753
1780
|
const signal = options.signal ?? AbortSignal.timeout(this._timeout);
|
|
1754
1781
|
const { data } = await this._withServerRetry(
|
|
@@ -1881,7 +1908,7 @@ var getCognitiveV2Model = (model) => {
|
|
|
1881
1908
|
if (alias) {
|
|
1882
1909
|
return alias;
|
|
1883
1910
|
}
|
|
1884
|
-
if (
|
|
1911
|
+
if (["auto", "fast", "best"].includes(model)) {
|
|
1885
1912
|
return { ...defaultModel, id: model, name: model };
|
|
1886
1913
|
}
|
|
1887
1914
|
return void 0;
|
|
@@ -2140,6 +2167,7 @@ var Cognitive = class _Cognitive {
|
|
|
2140
2167
|
_provider;
|
|
2141
2168
|
_downtimes = [];
|
|
2142
2169
|
_useBeta = false;
|
|
2170
|
+
_debug = false;
|
|
2143
2171
|
_events = createNanoEvents();
|
|
2144
2172
|
constructor(props) {
|
|
2145
2173
|
this._client = getExtendedClient(props.client);
|
|
@@ -2156,7 +2184,9 @@ var Cognitive = class _Cognitive {
|
|
|
2156
2184
|
client: this._client.clone(),
|
|
2157
2185
|
provider: this._provider,
|
|
2158
2186
|
timeout: this._timeoutMs,
|
|
2159
|
-
maxRetries: this._maxRetries
|
|
2187
|
+
maxRetries: this._maxRetries,
|
|
2188
|
+
__debug: this._debug,
|
|
2189
|
+
__experimental_beta: this._useBeta
|
|
2160
2190
|
});
|
|
2161
2191
|
copy._models = [...this._models];
|
|
2162
2192
|
copy._preferences = this._preferences ? { ...this._preferences } : null;
|