@botpress/cognitive 0.1.35 → 0.1.37
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.turbo/turbo-build.log +7 -7
- package/dist/index.cjs +212 -7
- package/dist/index.cjs.map +4 -4
- package/dist/index.d.ts +172 -13
- package/dist/index.mjs +211 -7
- package/dist/index.mjs.map +4 -4
- package/eslint.config.mjs +13 -0
- package/package.json +1 -1
package/dist/index.d.ts
CHANGED
|
@@ -127,7 +127,7 @@ type GenerateContentOutput = {
|
|
|
127
127
|
cost: number;
|
|
128
128
|
};
|
|
129
129
|
};
|
|
130
|
-
type Model$
|
|
130
|
+
type Model$2 = {
|
|
131
131
|
id: string;
|
|
132
132
|
name: string;
|
|
133
133
|
description: string;
|
|
@@ -144,7 +144,7 @@ type Model$1 = {
|
|
|
144
144
|
};
|
|
145
145
|
};
|
|
146
146
|
|
|
147
|
-
type Model = Model$
|
|
147
|
+
type Model$1 = Model$2 & {
|
|
148
148
|
ref: ModelRef;
|
|
149
149
|
integration: string;
|
|
150
150
|
};
|
|
@@ -159,7 +159,7 @@ type ModelPreferences = {
|
|
|
159
159
|
}>;
|
|
160
160
|
};
|
|
161
161
|
declare abstract class ModelProvider {
|
|
162
|
-
abstract fetchInstalledModels(): Promise<Model[]>;
|
|
162
|
+
abstract fetchInstalledModels(): Promise<Model$1[]>;
|
|
163
163
|
abstract fetchModelPreferences(): Promise<ModelPreferences | null>;
|
|
164
164
|
abstract saveModelPreferences(preferences: ModelPreferences): Promise<void>;
|
|
165
165
|
abstract deleteModelPreferences(): Promise<void>;
|
|
@@ -168,7 +168,7 @@ declare class RemoteModelProvider extends ModelProvider {
|
|
|
168
168
|
private _client;
|
|
169
169
|
constructor(client: BotpressClientLike);
|
|
170
170
|
private _fetchInstalledIntegrationNames;
|
|
171
|
-
fetchInstalledModels(): Promise<Model[]>;
|
|
171
|
+
fetchInstalledModels(): Promise<Model$1[]>;
|
|
172
172
|
fetchModelPreferences(): Promise<ModelPreferences | null>;
|
|
173
173
|
saveModelPreferences(preferences: ModelPreferences): Promise<void>;
|
|
174
174
|
deleteModelPreferences(): Promise<void>;
|
|
@@ -212,6 +212,8 @@ type CognitiveProps = {
|
|
|
212
212
|
timeout?: number;
|
|
213
213
|
/** Max retry attempts */
|
|
214
214
|
maxRetries?: number;
|
|
215
|
+
/** Whether to use the beta client. Restricted to authorized users. */
|
|
216
|
+
__experimental_beta?: boolean;
|
|
215
217
|
};
|
|
216
218
|
type Events = {
|
|
217
219
|
aborted: (req: Request, reason?: string) => void;
|
|
@@ -1297,6 +1299,10 @@ interface UpsertFileResponse$1 {
|
|
|
1297
1299
|
*/
|
|
1298
1300
|
name?: string;
|
|
1299
1301
|
};
|
|
1302
|
+
/**
|
|
1303
|
+
* Indicates the indexing stack used to index this file. Present only when file has been successfully indexed. A value of "v2" denotes the latest stack, "v1" denotes the legacy stack.
|
|
1304
|
+
*/
|
|
1305
|
+
indexingStack?: "v1" | "v2";
|
|
1300
1306
|
/**
|
|
1301
1307
|
* URL to upload the file content. File content needs to be sent to this URL via a PUT request.
|
|
1302
1308
|
*/
|
|
@@ -1842,6 +1848,10 @@ interface CreateEventRequestBody {
|
|
|
1842
1848
|
* ID of the [User](#schema_user) to link the event to.
|
|
1843
1849
|
*/
|
|
1844
1850
|
userId?: string;
|
|
1851
|
+
/**
|
|
1852
|
+
* ID of the [Workflow](#schema_workflow) to link the event to.
|
|
1853
|
+
*/
|
|
1854
|
+
workflowId?: string;
|
|
1845
1855
|
/**
|
|
1846
1856
|
* ID of the [Message](#schema_message) to link the event to.
|
|
1847
1857
|
*/
|
|
@@ -1952,6 +1962,7 @@ interface ListEventsRequestQuery {
|
|
|
1952
1962
|
conversationId?: string;
|
|
1953
1963
|
userId?: string;
|
|
1954
1964
|
messageId?: string;
|
|
1965
|
+
workflowId?: string;
|
|
1955
1966
|
status?: "pending" | "ignored" | "processed" | "failed" | "scheduled";
|
|
1956
1967
|
}
|
|
1957
1968
|
interface ListEventsRequestParams {
|
|
@@ -6491,7 +6502,7 @@ interface CreateBotResponse {
|
|
|
6491
6502
|
*/
|
|
6492
6503
|
signingSecret: string;
|
|
6493
6504
|
/**
|
|
6494
|
-
* A mapping of integrations to their configuration
|
|
6505
|
+
* A mapping of integrations to their configuration. If the `x-multiple-integrations` header is present, this object is keyed by integration aliases. Otherwise, this object is keyed by integration ids.
|
|
6495
6506
|
*/
|
|
6496
6507
|
integrations: {
|
|
6497
6508
|
[k: string]: {
|
|
@@ -7122,7 +7133,7 @@ interface UpdateBotResponse {
|
|
|
7122
7133
|
*/
|
|
7123
7134
|
signingSecret: string;
|
|
7124
7135
|
/**
|
|
7125
|
-
* A mapping of integrations to their configuration
|
|
7136
|
+
* A mapping of integrations to their configuration. If the `x-multiple-integrations` header is present, this object is keyed by integration aliases. Otherwise, this object is keyed by integration ids.
|
|
7126
7137
|
*/
|
|
7127
7138
|
integrations: {
|
|
7128
7139
|
[k: string]: {
|
|
@@ -7585,7 +7596,7 @@ interface GetBotResponse {
|
|
|
7585
7596
|
*/
|
|
7586
7597
|
signingSecret: string;
|
|
7587
7598
|
/**
|
|
7588
|
-
* A mapping of integrations to their configuration
|
|
7599
|
+
* A mapping of integrations to their configuration. If the `x-multiple-integrations` header is present, this object is keyed by integration aliases. Otherwise, this object is keyed by integration ids.
|
|
7589
7600
|
*/
|
|
7590
7601
|
integrations: {
|
|
7591
7602
|
[k: string]: {
|
|
@@ -15352,6 +15363,10 @@ interface UpsertFileResponse {
|
|
|
15352
15363
|
*/
|
|
15353
15364
|
name?: string;
|
|
15354
15365
|
};
|
|
15366
|
+
/**
|
|
15367
|
+
* Indicates the indexing stack used to index this file. Present only when file has been successfully indexed. A value of "v2" denotes the latest stack, "v1" denotes the legacy stack.
|
|
15368
|
+
*/
|
|
15369
|
+
indexingStack?: "v1" | "v2";
|
|
15355
15370
|
/**
|
|
15356
15371
|
* URL to upload the file content. File content needs to be sent to this URL via a PUT request.
|
|
15357
15372
|
*/
|
|
@@ -15474,6 +15489,10 @@ interface ListFilesResponse {
|
|
|
15474
15489
|
*/
|
|
15475
15490
|
name?: string;
|
|
15476
15491
|
};
|
|
15492
|
+
/**
|
|
15493
|
+
* Indicates the indexing stack used to index this file. Present only when file has been successfully indexed. A value of "v2" denotes the latest stack, "v1" denotes the legacy stack.
|
|
15494
|
+
*/
|
|
15495
|
+
indexingStack?: "v1" | "v2";
|
|
15477
15496
|
}[];
|
|
15478
15497
|
meta: {
|
|
15479
15498
|
/**
|
|
@@ -15578,6 +15597,10 @@ interface GetFileResponse {
|
|
|
15578
15597
|
*/
|
|
15579
15598
|
name?: string;
|
|
15580
15599
|
};
|
|
15600
|
+
/**
|
|
15601
|
+
* Indicates the indexing stack used to index this file. Present only when file has been successfully indexed. A value of "v2" denotes the latest stack, "v1" denotes the legacy stack.
|
|
15602
|
+
*/
|
|
15603
|
+
indexingStack?: "v1" | "v2";
|
|
15581
15604
|
};
|
|
15582
15605
|
}
|
|
15583
15606
|
|
|
@@ -15696,6 +15719,10 @@ interface UpdateFileMetadataResponse {
|
|
|
15696
15719
|
*/
|
|
15697
15720
|
name?: string;
|
|
15698
15721
|
};
|
|
15722
|
+
/**
|
|
15723
|
+
* Indicates the indexing stack used to index this file. Present only when file has been successfully indexed. A value of "v2" denotes the latest stack, "v1" denotes the legacy stack.
|
|
15724
|
+
*/
|
|
15725
|
+
indexingStack?: "v1" | "v2";
|
|
15699
15726
|
};
|
|
15700
15727
|
}
|
|
15701
15728
|
|
|
@@ -15802,6 +15829,10 @@ interface CopyFileResponse {
|
|
|
15802
15829
|
*/
|
|
15803
15830
|
name?: string;
|
|
15804
15831
|
};
|
|
15832
|
+
/**
|
|
15833
|
+
* Indicates the indexing stack used to index this file. Present only when file has been successfully indexed. A value of "v2" denotes the latest stack, "v1" denotes the legacy stack.
|
|
15834
|
+
*/
|
|
15835
|
+
indexingStack?: "v1" | "v2";
|
|
15805
15836
|
};
|
|
15806
15837
|
}
|
|
15807
15838
|
|
|
@@ -17972,7 +18003,7 @@ declare function toApiError(err: unknown): Error;
|
|
|
17972
18003
|
type IClient = Simplify<Client$1 & {
|
|
17973
18004
|
uploadFile: (input: UploadFileInput) => Promise<UploadFileOutput>;
|
|
17974
18005
|
}>;
|
|
17975
|
-
type ClientProps = CommonClientProps & {
|
|
18006
|
+
type ClientProps$2 = CommonClientProps & {
|
|
17976
18007
|
integrationId?: string;
|
|
17977
18008
|
workspaceId?: string;
|
|
17978
18009
|
botId?: string;
|
|
@@ -17980,7 +18011,7 @@ type ClientProps = CommonClientProps & {
|
|
|
17980
18011
|
};
|
|
17981
18012
|
declare class Client extends Client$1 implements IClient {
|
|
17982
18013
|
readonly config: Readonly<ClientConfig>;
|
|
17983
|
-
constructor(clientProps?: ClientProps);
|
|
18014
|
+
constructor(clientProps?: ClientProps$2);
|
|
17984
18015
|
get list(): {
|
|
17985
18016
|
conversations: (props: {
|
|
17986
18017
|
tags?: {
|
|
@@ -18019,6 +18050,7 @@ declare class Client extends Client$1 implements IClient {
|
|
|
18019
18050
|
type?: string | undefined;
|
|
18020
18051
|
userId?: string | undefined;
|
|
18021
18052
|
conversationId?: string | undefined;
|
|
18053
|
+
workflowId?: string | undefined;
|
|
18022
18054
|
messageId?: string | undefined;
|
|
18023
18055
|
status?: "pending" | "ignored" | "processed" | "failed" | "scheduled" | undefined;
|
|
18024
18056
|
}) => AsyncCollection<{
|
|
@@ -18323,6 +18355,7 @@ declare class Client extends Client$1 implements IClient {
|
|
|
18323
18355
|
id?: string;
|
|
18324
18356
|
name?: string;
|
|
18325
18357
|
};
|
|
18358
|
+
indexingStack?: "v1" | "v2";
|
|
18326
18359
|
}>;
|
|
18327
18360
|
filePassages: (props: {
|
|
18328
18361
|
id: string;
|
|
@@ -18900,25 +18933,151 @@ declare class Cognitive {
|
|
|
18900
18933
|
request: InterceptorManager<Request>;
|
|
18901
18934
|
response: InterceptorManager<Response>;
|
|
18902
18935
|
};
|
|
18903
|
-
protected _models: Model[];
|
|
18936
|
+
protected _models: Model$1[];
|
|
18904
18937
|
protected _timeoutMs: number;
|
|
18905
18938
|
protected _maxRetries: number;
|
|
18906
18939
|
protected _client: ExtendedClient;
|
|
18907
18940
|
protected _preferences: ModelPreferences | null;
|
|
18908
18941
|
protected _provider: ModelProvider;
|
|
18909
18942
|
protected _downtimes: ModelPreferences['downtimes'];
|
|
18943
|
+
protected _useBeta: boolean;
|
|
18910
18944
|
private _events;
|
|
18911
18945
|
constructor(props: CognitiveProps);
|
|
18912
18946
|
get client(): ExtendedClient;
|
|
18913
18947
|
clone(): Cognitive;
|
|
18914
18948
|
on<K extends keyof Events>(this: this, event: K, cb: Events[K]): Unsubscribe;
|
|
18915
|
-
fetchInstalledModels(): Promise<Model[]>;
|
|
18949
|
+
fetchInstalledModels(): Promise<Model$1[]>;
|
|
18916
18950
|
fetchPreferences(): Promise<ModelPreferences>;
|
|
18917
18951
|
setPreferences(preferences: ModelPreferences, save?: boolean): Promise<void>;
|
|
18918
18952
|
private _cleanupOldDowntimes;
|
|
18919
18953
|
private _selectModel;
|
|
18920
|
-
getModelDetails(model: string): Promise<Model>;
|
|
18954
|
+
getModelDetails(model: string): Promise<Model$1>;
|
|
18921
18955
|
generateContent(input: InputProps): Promise<Response>;
|
|
18956
|
+
private _generateContent;
|
|
18922
18957
|
}
|
|
18923
18958
|
|
|
18924
|
-
|
|
18959
|
+
type CognitiveRequest = {
|
|
18960
|
+
/**
|
|
18961
|
+
* @minItems 1
|
|
18962
|
+
*/
|
|
18963
|
+
messages: {
|
|
18964
|
+
role: 'user' | 'assistant' | 'system';
|
|
18965
|
+
content: string | {
|
|
18966
|
+
type: 'text' | 'image';
|
|
18967
|
+
text?: string;
|
|
18968
|
+
url?: string;
|
|
18969
|
+
mimeType?: string;
|
|
18970
|
+
[k: string]: any;
|
|
18971
|
+
}[];
|
|
18972
|
+
type?: string;
|
|
18973
|
+
}[];
|
|
18974
|
+
/**
|
|
18975
|
+
* Model ID or routing goal for automatic selection.
|
|
18976
|
+
*/
|
|
18977
|
+
model?: 'auto' | 'auto-best' | 'auto-fast' | 'auto-reasoning' | 'auto-cheapest' | 'auto-balance' | 'anthropic/claude-3-5-haiku-20241022' | 'anthropic/claude-3-5-sonnet-20240620' | 'anthropic/claude-3-5-sonnet-20241022' | 'anthropic/claude-3-7-sonnet-20250219' | 'anthropic/claude-3-haiku-20240307' | 'anthropic/claude-sonnet-4-20250514' | 'cerebras/gpt-oss-120b' | 'cerebras/llama-4-scout-17b-16e-instruct' | 'cerebras/llama3.1-8b' | 'cerebras/llama3.3-70b' | 'cerebras/qwen-3-32b' | 'google-ai/gemini-2.5-flash' | 'google-ai/gemini-2.5-pro' | 'google-ai/models/gemini-2.0-flash' | 'groq/openai/gpt-oss-120b' | 'groq/openai/gpt-oss-20b' | 'openai/gpt-4.1-2025-04-14' | 'openai/gpt-4.1-mini-2025-04-14' | 'openai/gpt-4.1-nano-2025-04-14' | 'openai/gpt-4o-2024-11-20' | 'openai/gpt-4o-mini-2024-07-18' | 'openai/gpt-5-2025-08-07' | 'openai/gpt-5-mini-2025-08-07' | 'openai/gpt-5-nano-2025-08-07' | 'openai/o1-2024-12-17' | 'openai/o1-mini-2024-09-12' | 'openai/o3-2025-04-16' | 'openai/o3-mini-2025-01-31' | 'openai/o4-mini-2025-04-16' | 'openrouter/gpt-oss-120b';
|
|
18978
|
+
systemPrompt?: string;
|
|
18979
|
+
temperature?: number;
|
|
18980
|
+
maxTokens?: number;
|
|
18981
|
+
stopSequences?: string | string[];
|
|
18982
|
+
stream?: boolean;
|
|
18983
|
+
responseFormat?: 'text' | 'json';
|
|
18984
|
+
reasoningEffort?: 'low' | 'medium' | 'high';
|
|
18985
|
+
};
|
|
18986
|
+
type CognitiveStreamChunk = {
|
|
18987
|
+
output?: string;
|
|
18988
|
+
created: number;
|
|
18989
|
+
finished?: boolean;
|
|
18990
|
+
metadata?: {
|
|
18991
|
+
provider: string;
|
|
18992
|
+
model?: string;
|
|
18993
|
+
usage: {
|
|
18994
|
+
inputTokens: number;
|
|
18995
|
+
outputTokens: number;
|
|
18996
|
+
reasoningTokens?: number;
|
|
18997
|
+
};
|
|
18998
|
+
cost?: number;
|
|
18999
|
+
cached?: boolean;
|
|
19000
|
+
latency?: number;
|
|
19001
|
+
stopReason?: string;
|
|
19002
|
+
reasoningEffort?: string;
|
|
19003
|
+
warnings?: {
|
|
19004
|
+
type: 'parameter_ignored' | 'provider_limitation' | 'deprecated_model' | 'fallback_used';
|
|
19005
|
+
message: string;
|
|
19006
|
+
}[];
|
|
19007
|
+
/**
|
|
19008
|
+
* List of models that were tried and failed
|
|
19009
|
+
*/
|
|
19010
|
+
fallbackPath?: string[];
|
|
19011
|
+
};
|
|
19012
|
+
};
|
|
19013
|
+
type CognitiveResponse = {
|
|
19014
|
+
output: string;
|
|
19015
|
+
metadata: {
|
|
19016
|
+
provider: string;
|
|
19017
|
+
model?: string;
|
|
19018
|
+
usage: {
|
|
19019
|
+
inputTokens: number;
|
|
19020
|
+
outputTokens: number;
|
|
19021
|
+
reasoningTokens?: number;
|
|
19022
|
+
};
|
|
19023
|
+
cost?: number;
|
|
19024
|
+
cached?: boolean;
|
|
19025
|
+
latency?: number;
|
|
19026
|
+
stopReason?: string;
|
|
19027
|
+
reasoningEffort?: string;
|
|
19028
|
+
warnings?: {
|
|
19029
|
+
type: 'parameter_ignored' | 'provider_limitation' | 'deprecated_model' | 'fallback_used';
|
|
19030
|
+
message: string;
|
|
19031
|
+
}[];
|
|
19032
|
+
/**
|
|
19033
|
+
* List of models that were tried and failed
|
|
19034
|
+
*/
|
|
19035
|
+
fallbackPath?: string[];
|
|
19036
|
+
};
|
|
19037
|
+
error?: string;
|
|
19038
|
+
};
|
|
19039
|
+
type Model = {
|
|
19040
|
+
id: string;
|
|
19041
|
+
name: string;
|
|
19042
|
+
description: string;
|
|
19043
|
+
tags?: ('recommended' | 'deprecated' | 'general-purpose' | 'low-cost' | 'flagship' | 'vision' | 'coding' | 'agents' | 'function-calling' | 'roleplay' | 'storytelling' | 'reasoning' | 'preview')[];
|
|
19044
|
+
input: {
|
|
19045
|
+
maxTokens: number;
|
|
19046
|
+
/**
|
|
19047
|
+
* Cost per 1 million tokens, in U.S. dollars
|
|
19048
|
+
*/
|
|
19049
|
+
costPer1MTokens: number;
|
|
19050
|
+
};
|
|
19051
|
+
output: {
|
|
19052
|
+
maxTokens: number;
|
|
19053
|
+
/**
|
|
19054
|
+
* Cost per 1 million tokens, in U.S. dollars
|
|
19055
|
+
*/
|
|
19056
|
+
costPer1MTokens: number;
|
|
19057
|
+
};
|
|
19058
|
+
};
|
|
19059
|
+
|
|
19060
|
+
type ClientProps = {
|
|
19061
|
+
baseUrl?: string;
|
|
19062
|
+
timeout?: number;
|
|
19063
|
+
botId?: string;
|
|
19064
|
+
token?: string;
|
|
19065
|
+
headers?: Record<string, string>;
|
|
19066
|
+
};
|
|
19067
|
+
type RequestOptions = {
|
|
19068
|
+
signal?: AbortSignal;
|
|
19069
|
+
timeout?: number;
|
|
19070
|
+
};
|
|
19071
|
+
declare class CognitiveBeta {
|
|
19072
|
+
private _axiosClient;
|
|
19073
|
+
private readonly _config;
|
|
19074
|
+
constructor(props: ClientProps);
|
|
19075
|
+
generateText(input: CognitiveRequest, options?: RequestOptions): Promise<CognitiveResponse>;
|
|
19076
|
+
listModels(input: void, options?: RequestOptions): Promise<Model[]>;
|
|
19077
|
+
generateTextStream(request: CognitiveRequest, options?: RequestOptions): AsyncGenerator<CognitiveStreamChunk, void, unknown>;
|
|
19078
|
+
private _ndjson;
|
|
19079
|
+
private _isRetryableServerError;
|
|
19080
|
+
private _withServerRetry;
|
|
19081
|
+
}
|
|
19082
|
+
|
|
19083
|
+
export { type BotpressClientLike, Cognitive, CognitiveBeta, type CognitiveRequest, type CognitiveResponse, type CognitiveStreamChunk, type Events, type GenerateContentInput, type GenerateContentOutput, type Model$1 as Model, type ModelPreferences, ModelProvider, RemoteModelProvider };
|
package/dist/index.mjs
CHANGED
|
@@ -497,25 +497,25 @@ var require_backoff = __commonJS({
|
|
|
497
497
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
498
498
|
var options_1 = require_options();
|
|
499
499
|
var delay_factory_1 = require_delay_factory();
|
|
500
|
-
function
|
|
500
|
+
function backOff3(request, options) {
|
|
501
501
|
if (options === void 0) {
|
|
502
502
|
options = {};
|
|
503
503
|
}
|
|
504
504
|
return __awaiter(this, void 0, void 0, function() {
|
|
505
|
-
var sanitizedOptions,
|
|
505
|
+
var sanitizedOptions, backOff4;
|
|
506
506
|
return __generator(this, function(_a) {
|
|
507
507
|
switch (_a.label) {
|
|
508
508
|
case 0:
|
|
509
509
|
sanitizedOptions = options_1.getSanitizedOptions(options);
|
|
510
|
-
|
|
511
|
-
return [4,
|
|
510
|
+
backOff4 = new BackOff(request, sanitizedOptions);
|
|
511
|
+
return [4, backOff4.execute()];
|
|
512
512
|
case 1:
|
|
513
513
|
return [2, _a.sent()];
|
|
514
514
|
}
|
|
515
515
|
});
|
|
516
516
|
});
|
|
517
517
|
}
|
|
518
|
-
exports.backOff =
|
|
518
|
+
exports.backOff = backOff3;
|
|
519
519
|
var BackOff = (
|
|
520
520
|
/** @class */
|
|
521
521
|
function() {
|
|
@@ -591,7 +591,7 @@ var require_backoff = __commonJS({
|
|
|
591
591
|
});
|
|
592
592
|
|
|
593
593
|
// src/client.ts
|
|
594
|
-
var
|
|
594
|
+
var import_exponential_backoff2 = __toESM(require_backoff());
|
|
595
595
|
|
|
596
596
|
// ../../node_modules/.pnpm/nanoevents@9.1.0/node_modules/nanoevents/index.js
|
|
597
597
|
var createNanoEvents = () => ({
|
|
@@ -650,6 +650,159 @@ var getExtendedClient = (_client) => {
|
|
|
650
650
|
};
|
|
651
651
|
};
|
|
652
652
|
|
|
653
|
+
// src/cognitive_beta/index.ts
|
|
654
|
+
var import_exponential_backoff = __toESM(require_backoff());
|
|
655
|
+
import axios from "axios";
|
|
656
|
+
var isBrowser = () => typeof window !== "undefined" && typeof window.fetch === "function";
|
|
657
|
+
var CognitiveBeta = class {
|
|
658
|
+
_axiosClient;
|
|
659
|
+
_config;
|
|
660
|
+
constructor(props) {
|
|
661
|
+
this._config = {
|
|
662
|
+
baseUrl: props.baseUrl || "https://cognitive.botpress.cloud",
|
|
663
|
+
timeout: props.timeout || 60001,
|
|
664
|
+
token: props.token || "",
|
|
665
|
+
botId: props.botId || "",
|
|
666
|
+
headers: props.headers || {}
|
|
667
|
+
};
|
|
668
|
+
this._axiosClient = axios.create({
|
|
669
|
+
headers: {
|
|
670
|
+
Authorization: `Bearer ${this._config.token}`,
|
|
671
|
+
"X-Bot-Id": this._config.botId,
|
|
672
|
+
...this._config.headers
|
|
673
|
+
},
|
|
674
|
+
baseURL: this._config.baseUrl
|
|
675
|
+
});
|
|
676
|
+
}
|
|
677
|
+
async generateText(input, options = {}) {
|
|
678
|
+
const signal = options.signal ?? AbortSignal.timeout(this._config.timeout);
|
|
679
|
+
const { data } = await this._withServerRetry(
|
|
680
|
+
() => this._axiosClient.post("/v1/generate-text", input, {
|
|
681
|
+
signal,
|
|
682
|
+
timeout: options.timeout ?? this._config.timeout
|
|
683
|
+
})
|
|
684
|
+
);
|
|
685
|
+
return data;
|
|
686
|
+
}
|
|
687
|
+
async listModels(input, options = {}) {
|
|
688
|
+
const signal = options.signal ?? AbortSignal.timeout(this._config.timeout);
|
|
689
|
+
const { data } = await this._withServerRetry(
|
|
690
|
+
() => this._axiosClient.post("/v1/models", input, {
|
|
691
|
+
signal,
|
|
692
|
+
timeout: options.timeout ?? this._config.timeout
|
|
693
|
+
})
|
|
694
|
+
);
|
|
695
|
+
return data;
|
|
696
|
+
}
|
|
697
|
+
async *generateTextStream(request, options = {}) {
|
|
698
|
+
const signal = options.signal ?? AbortSignal.timeout(this._config.timeout);
|
|
699
|
+
if (isBrowser()) {
|
|
700
|
+
const res2 = await fetch(`${this._config.baseUrl}/v1/generate-text-stream`, {
|
|
701
|
+
method: "POST",
|
|
702
|
+
headers: {
|
|
703
|
+
Authorization: `Bearer ${this._config.token}`,
|
|
704
|
+
"X-Bot-Id": this._config.botId,
|
|
705
|
+
"Content-Type": "application/json"
|
|
706
|
+
},
|
|
707
|
+
body: JSON.stringify({ ...request, stream: true }),
|
|
708
|
+
signal
|
|
709
|
+
});
|
|
710
|
+
if (!res2.ok) {
|
|
711
|
+
const text = await res2.text().catch(() => "");
|
|
712
|
+
const err = new Error(`HTTP ${res2.status}: ${text || res2.statusText}`);
|
|
713
|
+
err.response = { status: res2.status, data: text };
|
|
714
|
+
throw err;
|
|
715
|
+
}
|
|
716
|
+
const body = res2.body;
|
|
717
|
+
if (!body) {
|
|
718
|
+
throw new Error("No response body received for streaming request");
|
|
719
|
+
}
|
|
720
|
+
const reader = body.getReader();
|
|
721
|
+
const iterable = async function* () {
|
|
722
|
+
for (; ; ) {
|
|
723
|
+
const { value, done } = await reader.read();
|
|
724
|
+
if (done) {
|
|
725
|
+
break;
|
|
726
|
+
}
|
|
727
|
+
if (value) {
|
|
728
|
+
yield value;
|
|
729
|
+
}
|
|
730
|
+
}
|
|
731
|
+
}();
|
|
732
|
+
for await (const obj of this._ndjson(iterable)) {
|
|
733
|
+
yield obj;
|
|
734
|
+
}
|
|
735
|
+
return;
|
|
736
|
+
}
|
|
737
|
+
const res = await this._withServerRetry(
|
|
738
|
+
() => this._axiosClient.post(
|
|
739
|
+
"/v1/generate-text-stream",
|
|
740
|
+
{ ...request, stream: true },
|
|
741
|
+
{
|
|
742
|
+
responseType: "stream",
|
|
743
|
+
signal,
|
|
744
|
+
timeout: options.timeout ?? this._config.timeout
|
|
745
|
+
}
|
|
746
|
+
)
|
|
747
|
+
);
|
|
748
|
+
const nodeStream = res.data;
|
|
749
|
+
if (!nodeStream) {
|
|
750
|
+
throw new Error("No response body received for streaming request");
|
|
751
|
+
}
|
|
752
|
+
for await (const obj of this._ndjson(nodeStream)) {
|
|
753
|
+
yield obj;
|
|
754
|
+
}
|
|
755
|
+
}
|
|
756
|
+
async *_ndjson(stream) {
|
|
757
|
+
const decoder = new TextDecoder("utf-8");
|
|
758
|
+
let buffer = "";
|
|
759
|
+
for await (const chunk of stream) {
|
|
760
|
+
buffer += decoder.decode(chunk, { stream: true });
|
|
761
|
+
for (; ; ) {
|
|
762
|
+
const i = buffer.indexOf("\n");
|
|
763
|
+
if (i < 0) {
|
|
764
|
+
break;
|
|
765
|
+
}
|
|
766
|
+
const line = buffer.slice(0, i).replace(/\r$/, "");
|
|
767
|
+
buffer = buffer.slice(i + 1);
|
|
768
|
+
if (!line) {
|
|
769
|
+
continue;
|
|
770
|
+
}
|
|
771
|
+
yield JSON.parse(line);
|
|
772
|
+
}
|
|
773
|
+
}
|
|
774
|
+
buffer += decoder.decode();
|
|
775
|
+
const tail = buffer.trim();
|
|
776
|
+
if (tail) {
|
|
777
|
+
yield JSON.parse(tail);
|
|
778
|
+
}
|
|
779
|
+
}
|
|
780
|
+
_isRetryableServerError(error) {
|
|
781
|
+
if (axios.isAxiosError(error)) {
|
|
782
|
+
if (!error.response) {
|
|
783
|
+
return true;
|
|
784
|
+
}
|
|
785
|
+
const status = error.response?.status;
|
|
786
|
+
if (status && [502, 503, 504].includes(status)) {
|
|
787
|
+
return true;
|
|
788
|
+
}
|
|
789
|
+
if (error.code && ["ECONNABORTED", "ECONNRESET", "ETIMEDOUT", "EAI_AGAIN", "ENOTFOUND", "EPIPE"].includes(error.code)) {
|
|
790
|
+
return true;
|
|
791
|
+
}
|
|
792
|
+
}
|
|
793
|
+
return false;
|
|
794
|
+
}
|
|
795
|
+
async _withServerRetry(fn) {
|
|
796
|
+
return (0, import_exponential_backoff.backOff)(fn, {
|
|
797
|
+
numOfAttempts: 3,
|
|
798
|
+
startingDelay: 300,
|
|
799
|
+
timeMultiple: 2,
|
|
800
|
+
jitter: "full",
|
|
801
|
+
retry: (e) => this._isRetryableServerError(e)
|
|
802
|
+
});
|
|
803
|
+
}
|
|
804
|
+
};
|
|
805
|
+
|
|
653
806
|
// src/errors.ts
|
|
654
807
|
var getActionFromError = (error) => {
|
|
655
808
|
if (!isBotpressError(error)) {
|
|
@@ -902,12 +1055,14 @@ var Cognitive = class {
|
|
|
902
1055
|
_preferences = null;
|
|
903
1056
|
_provider;
|
|
904
1057
|
_downtimes = [];
|
|
1058
|
+
_useBeta = false;
|
|
905
1059
|
_events = createNanoEvents();
|
|
906
1060
|
constructor(props) {
|
|
907
1061
|
this._client = getExtendedClient(props.client);
|
|
908
1062
|
this._provider = props.provider ?? new RemoteModelProvider(props.client);
|
|
909
1063
|
this._timeoutMs = props.timeout ?? this._timeoutMs;
|
|
910
1064
|
this._maxRetries = props.maxRetries ?? this._maxRetries;
|
|
1065
|
+
this._useBeta = props.__experimental_beta ?? false;
|
|
911
1066
|
}
|
|
912
1067
|
get client() {
|
|
913
1068
|
return this._client;
|
|
@@ -994,6 +1149,54 @@ var Cognitive = class {
|
|
|
994
1149
|
return def;
|
|
995
1150
|
}
|
|
996
1151
|
async generateContent(input) {
|
|
1152
|
+
if (!this._useBeta) {
|
|
1153
|
+
return this._generateContent(input);
|
|
1154
|
+
}
|
|
1155
|
+
const betaClient = new CognitiveBeta({
|
|
1156
|
+
headers: this._client.config.headers,
|
|
1157
|
+
baseUrl: this._client.config.apiUrl.includes(".cloud") ? "https://cognitive.botpress.cloud" : "https://cognitive.botpress.dev"
|
|
1158
|
+
});
|
|
1159
|
+
const response = await betaClient.generateText(input);
|
|
1160
|
+
return {
|
|
1161
|
+
output: {
|
|
1162
|
+
id: "beta-output",
|
|
1163
|
+
provider: response.metadata.provider,
|
|
1164
|
+
model: response.metadata.model,
|
|
1165
|
+
choices: [
|
|
1166
|
+
{
|
|
1167
|
+
type: "text",
|
|
1168
|
+
content: response.output,
|
|
1169
|
+
role: "assistant",
|
|
1170
|
+
index: 0,
|
|
1171
|
+
stopReason: response.metadata.stopReason
|
|
1172
|
+
}
|
|
1173
|
+
],
|
|
1174
|
+
usage: {
|
|
1175
|
+
inputTokens: response.metadata.usage.inputTokens,
|
|
1176
|
+
inputCost: 0,
|
|
1177
|
+
outputTokens: response.metadata.usage.outputTokens,
|
|
1178
|
+
outputCost: response.metadata.cost ?? 0
|
|
1179
|
+
},
|
|
1180
|
+
botpress: {
|
|
1181
|
+
cost: response.metadata.cost ?? 0
|
|
1182
|
+
}
|
|
1183
|
+
},
|
|
1184
|
+
meta: {
|
|
1185
|
+
cached: response.metadata.cached,
|
|
1186
|
+
model: { integration: response.metadata.provider, model: response.metadata.model },
|
|
1187
|
+
latency: response.metadata.latency,
|
|
1188
|
+
cost: {
|
|
1189
|
+
input: 0,
|
|
1190
|
+
output: response.metadata.cost || 0
|
|
1191
|
+
},
|
|
1192
|
+
tokens: {
|
|
1193
|
+
input: response.metadata.usage.inputTokens,
|
|
1194
|
+
output: response.metadata.usage.outputTokens
|
|
1195
|
+
}
|
|
1196
|
+
}
|
|
1197
|
+
};
|
|
1198
|
+
}
|
|
1199
|
+
async _generateContent(input) {
|
|
997
1200
|
const start = Date.now();
|
|
998
1201
|
const signal = input.signal ?? AbortSignal.timeout(this._timeoutMs);
|
|
999
1202
|
const client = this._client.abortable(signal);
|
|
@@ -1001,7 +1204,7 @@ var Cognitive = class {
|
|
|
1001
1204
|
let integration;
|
|
1002
1205
|
let model;
|
|
1003
1206
|
this._events.emit("request", props);
|
|
1004
|
-
const { output, meta } = await (0,
|
|
1207
|
+
const { output, meta } = await (0, import_exponential_backoff2.backOff)(
|
|
1005
1208
|
async () => {
|
|
1006
1209
|
const selection = await this._selectModel(input.model ?? "best");
|
|
1007
1210
|
integration = selection.integration;
|
|
@@ -1066,6 +1269,7 @@ var Cognitive = class {
|
|
|
1066
1269
|
};
|
|
1067
1270
|
export {
|
|
1068
1271
|
Cognitive,
|
|
1272
|
+
CognitiveBeta,
|
|
1069
1273
|
ModelProvider,
|
|
1070
1274
|
RemoteModelProvider
|
|
1071
1275
|
};
|