@cognigy/rest-api-client 2025.16.0 → 2025.17.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. package/CHANGELOG.md +5 -0
  2. package/build/shared/charts/descriptors/data/debugMessage.js +13 -3
  3. package/build/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +48 -49
  4. package/build/shared/charts/descriptors/nlu/generativeSlotFiller/prompt.js +31 -2
  5. package/build/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +11 -2
  6. package/build/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +12 -3
  7. package/build/shared/charts/descriptors/transcripts/getTranscript.js +23 -3
  8. package/build/shared/charts/descriptors/voice/mappers/setSessionConfig.mapper.js +3 -0
  9. package/build/shared/generativeAI/getPrompt.js +75 -0
  10. package/build/shared/generativeAI/utils/generativeAIPrompts.js +613 -0
  11. package/build/shared/generativeAI/utils/prompts/contextAwareUserQueryRephrasing.js +84 -0
  12. package/build/shared/interfaces/generativeAI/IGenerativeAIModels.js +1 -0
  13. package/build/shared/interfaces/messageAPI/handover.js +6 -0
  14. package/build/shared/interfaces/resources/ILargeLanguageModel.js +1 -0
  15. package/build/test.js +39 -0
  16. package/dist/esm/shared/charts/descriptors/data/debugMessage.js +13 -3
  17. package/dist/esm/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +48 -50
  18. package/dist/esm/shared/charts/descriptors/nlu/generativeSlotFiller/prompt.js +29 -1
  19. package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +11 -2
  20. package/dist/esm/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +12 -3
  21. package/dist/esm/shared/charts/descriptors/transcripts/getTranscript.js +23 -3
  22. package/dist/esm/shared/charts/descriptors/voice/mappers/setSessionConfig.mapper.js +3 -0
  23. package/dist/esm/shared/generativeAI/getPrompt.js +68 -0
  24. package/dist/esm/shared/generativeAI/utils/generativeAIPrompts.js +610 -0
  25. package/dist/esm/shared/generativeAI/utils/prompts/contextAwareUserQueryRephrasing.js +81 -0
  26. package/dist/esm/shared/interfaces/generativeAI/IGenerativeAIModels.js +1 -0
  27. package/dist/esm/shared/interfaces/messageAPI/handover.js +6 -0
  28. package/dist/esm/shared/interfaces/resources/ILargeLanguageModel.js +1 -0
  29. package/dist/esm/shared/interfaces/restAPI/management/authentication/ICreateJWTToken.js +1 -0
  30. package/dist/esm/test.js +39 -0
  31. package/package.json +1 -1
  32. package/types/index.d.ts +42 -19
@@ -0,0 +1,81 @@
1
+ const contextAwareUserQueryRephrasingBasePrompt = `You are classifying and rephrasing user queries. Your rephrased user queries will be used as input for RAG and other LLM calls.
2
+
3
+ Instructions:
4
+ - Do not respond to the user query as in a real conversation.
5
+ - Determine whether the latest user query relates to the recent chat history.
6
+ - If it does, rephrase the latest user query, possibly including details from the previous chat history.
7
+ - If it does not, respond with "false".
8
+
9
+ What follows are some example conversations, followed last by the real conversation which you are working on.`;
10
+ const example1 = [
11
+ { role: "user", content: "Hi, my name is Micheal. I'm looking for support regarding an issue." },
12
+ { role: "assistant", content: "Great, let's get you connected with an agent. What is your customer ID?" },
13
+ { role: "user", content: "My ID is S0-F45T" },
14
+ { role: "assistant", content: "false" },
15
+ ];
16
+ const example2 = [
17
+ { role: "user", content: "The Toyota Proace City looks quite nice. I'm looking to fit a bunch of stuff in the car. How much capacity does it have?" },
18
+ { role: "assistant", content: "There are two variants: L1 has 3,8 m3 loading volume and L2 has 4,4 m3 loading volume." },
19
+ { role: "user", content: "And how much can I load?" },
20
+ { role: "assistant", content: "What is the maximum payload and towing capacity of the Toyota Proace City?" },
21
+ ];
22
+ const example3 = [
23
+ { role: "user", content: "I am looking for a new smartphone." },
24
+ { role: "assistant", content: "What features are you interested in?" },
25
+ { role: "user", content: "I want a good camera and long battery life." },
26
+ { role: "assistant", content: "Great! Are you looking for a specific brand or operating system, like Android or iOS?" },
27
+ { role: "user", content: "I prefer Android devices." },
28
+ { role: "assistant", content: "Do you have a budget in mind?" },
29
+ { role: "user", content: "I would like to keep it under $800." },
30
+ { role: "user", content: "Can you recommend a model?" },
31
+ { role: "assistant", content: "Can you suggest an Android smartphone under $800 with a good camera and long battery life?" },
32
+ ];
33
+ export const contextAwareUserQueryRephrasingChatPrompt = [
34
+ {
35
+ role: "system",
36
+ content: contextAwareUserQueryRephrasingBasePrompt,
37
+ },
38
+ ...example1,
39
+ ...example2,
40
+ ...example3,
41
+ ];
42
+ const mapExampleToPrompt = (example) => {
43
+ return example.map(message => message.role === "user" ? `User: ${message.content}` : `Assistant: ${message.content}`).join("\n");
44
+ };
45
+ export const alternativeContextAwareUserQueryRephrasingChatPrompt = [
46
+ {
47
+ role: "system",
48
+ content: `# Role and Objective
49
+ You are classifying and rephrasing user queries. Your rephrased user queries will be used as input for RAG and other LLM calls.
50
+
51
+ # Instructions
52
+ - Do not respond to the user query as in a real conversation.
53
+ - Determine whether the latest user query relates to the previous messages.
54
+ - If it does relate, rephrase the latest user query, possibly including details from the previous messages.
55
+ - If it does not relate, respond with "false".
56
+
57
+ ## Rephrasing
58
+ - View the previous messages and look at related context in the immediate past.
59
+ - Pull relevant context from those messages and include them in the rephrased user query.
60
+ - Such context include, but is not limited to, user or product information, names, and dates.
61
+
62
+ # Output Format
63
+ - Rephrased user query
64
+ - or false, if unrelated to the previous messages
65
+
66
+ # Examples
67
+
68
+ ## Example 1
69
+ ${mapExampleToPrompt(example1)}
70
+
71
+ ## Example 2
72
+ ${mapExampleToPrompt(example2)}
73
+
74
+ ## Example 3
75
+ ${mapExampleToPrompt(example3)}
76
+
77
+ # Final instructions and prompt to think step by step
78
+ - Let’s think step-by-step.`,
79
+ },
80
+ ];
81
+ //# sourceMappingURL=contextAwareUserQueryRephrasing.js.map
@@ -33,6 +33,7 @@ export const generativeAIModels = [
33
33
  "claude-sonnet-4-0",
34
34
  "text-bison@001",
35
35
  "custom-model",
36
+ "custom-embedding-model",
36
37
  "gemini-1.0-pro",
37
38
  "gemini-1.5-pro",
38
39
  "gemini-1.5-flash",
@@ -130,6 +130,12 @@ export const createHandoverRequestDataSchema = {
130
130
  enableHandoverConnectMessageRingCentralEngage: {
131
131
  type: "boolean"
132
132
  },
133
+ enableHandoverDisconnectMessageSalesforceMIAW: {
134
+ type: "boolean"
135
+ },
136
+ enableHandoverConnectMessageSalesforceMIAW: {
137
+ type: "boolean"
138
+ },
133
139
  "notifySessionId": {
134
140
  "type": "string",
135
141
  },
@@ -54,6 +54,7 @@ export const openAICompatibleMetaSchema = {
54
54
  customModel: { type: "string" },
55
55
  baseCustomUrl: { type: "string" },
56
56
  customAuthHeader: { type: "string" },
57
+ embeddingVectorSize: { type: "number" },
57
58
  }
58
59
  };
59
60
  export const azureOpenAIMetaSchema = {
@@ -0,0 +1 @@
1
+ //# sourceMappingURL=ICreateJWTToken.js.map
@@ -0,0 +1,39 @@
1
+ /* import { RestAPIClient, TRestAPIClient } from "./RestAPIClient";
2
+ import * as fs from "fs";
3
+ const FormData = require("form-data");
4
+
5
+ const OAUTH_CLIENT_ID = "cognigy-ui";
6
+ const OAUTH_CLIENT_SECRET =
7
+ "KR7yxR3rAhZ9sEn923dZ5KeNs9SVuwBjHxXKpmqtvSNXw5xWz35Y5YRtTBt96Jaa";
8
+ const baseUrl = "https://api.test";
9
+
10
+ const instance = new RestAPIClient({
11
+ numberOfRetries: 2,
12
+ baseUrl,
13
+ versions: {
14
+ administration: "2.0",
15
+ external: "2.0",
16
+ metrics: "2.0",
17
+ resources: "2.0",
18
+ sessions: "2.0"
19
+ },
20
+ timeout: 10000
21
+ });
22
+
23
+ (async () => {
24
+
25
+ const base64SnapshotString = fs.readFileSync('./src/IDE/fixtures/snapshots/overrideSnapshotConnections_project.csnap')
26
+ const form = new FormData();
27
+
28
+ form.append("projectId", projectId);
29
+ form.append("file", base64SnapshotString, "snapshot.csnap");
30
+
31
+ const slot = await instance.uploadExtension({
32
+ projectId: "your-project-id"
33
+ fs.readFileSync('./src/IDE/fixtures/snapshots/overrideSnapshotConnections_project.csnap'),
34
+ user: "your-user-id",
35
+ });
36
+ console.log(slot);
37
+ })();
38
+ */
39
+ //# sourceMappingURL=test.js.map
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@cognigy/rest-api-client",
3
- "version": "2025.16.0",
3
+ "version": "2025.17.0",
4
4
  "description": "Cognigy REST-Client",
5
5
  "main": "build/index.js",
6
6
  "module": "dist/esm/index.js",
package/types/index.d.ts CHANGED
@@ -4372,6 +4372,7 @@ declare const generativeAIModels: readonly [
4372
4372
  "claude-sonnet-4-0",
4373
4373
  "text-bison@001",
4374
4374
  "custom-model",
4375
+ "custom-embedding-model",
4375
4376
  "gemini-1.0-pro",
4376
4377
  "gemini-1.5-pro",
4377
4378
  "gemini-1.5-flash",
@@ -4418,6 +4419,7 @@ declare const generativeAIModels: readonly [
4418
4419
  * - claude-instant-v1
4419
4420
  * - claude-3-opus-20240229
4420
4421
  * - custom-model
4422
+ * - custom-embedding-model
4421
4423
  * - gemini-2.0-flash
4422
4424
  * - gemini-2.0-flash-lite
4423
4425
  * - mistral-large-2411
@@ -8756,6 +8758,7 @@ export interface IOpenAICompatibleMeta {
8756
8758
  customModel: string;
8757
8759
  baseCustomUrl: string;
8758
8760
  customAuthHeader?: string;
8761
+ embeddingVectorSize?: number;
8759
8762
  }
8760
8763
  export interface IAlephAlphaMeta {
8761
8764
  customModel?: string;
@@ -9913,6 +9916,22 @@ export interface INodeToExecute {
9913
9916
  };
9914
9917
  type?: "stopExecution" | "resetCognigyScriptInput" | "trackAnalyticsStep";
9915
9918
  }
9919
+ export interface OpenAIChatMessage {
9920
+ role: "system" | "user" | "assistant";
9921
+ content: string;
9922
+ }
9923
+ export interface IGenerativeSlot {
9924
+ tag: string;
9925
+ description: string;
9926
+ value: string;
9927
+ optional?: boolean;
9928
+ validation?: {
9929
+ type: string;
9930
+ value: string;
9931
+ invalidReason: string;
9932
+ };
9933
+ invalid?: boolean;
9934
+ }
9916
9935
  export declare type TCompletionPrompt = {
9917
9936
  prompt: string;
9918
9937
  };
@@ -9924,6 +9943,8 @@ export declare type TChatMessage = {
9924
9943
  export declare type TChatPrompt = {
9925
9944
  messages: Array<TChatMessage>;
9926
9945
  };
9946
+ export declare type TALLPrompts = TCompletionPrompt | TChatPrompt | Array<OpenAIChatMessage>;
9947
+ export declare type TPromptParserFun = (originalMsg: TALLPrompts) => TALLPrompts;
9927
9948
  export declare type TSessionUsageInformation = {
9928
9949
  [key: string]: {
9929
9950
  llmDisplayName: string;
@@ -10077,11 +10098,15 @@ export declare type ValueOf<T> = T[keyof T];
10077
10098
  * @param limit: the maximum number of entries to read
10078
10099
  * @param rolesWhiteList: (optional) the roles for which the entries should be included, if not provided or empty all entries will be read
10079
10100
  * @param excludeDataOnlyMessagesFilter: (optional) the roles for which data only messages (emtpy text field) will be excluded if type is input/output
10101
+ * @param useTextAlternativeForLLM: (optional) if true, graphical outputs will be included in the transcript, either using the value from the graphical description / fallbackText field, or by converting the data where possible
10102
+ * @param includeTextAlternativeInTranscript: (optional) if true, the text alternative for LLM will be included in the transcript
10080
10103
  */
10081
10104
  export declare type TReadTranscriptOptions = {
10082
10105
  limit: number;
10083
10106
  rolesWhiteList?: TTranscriptRoles[];
10084
10107
  excludeDataOnlyMessagesFilter?: (TranscriptRole.USER | TranscriptRole.AGENT | TranscriptRole.ASSISTANT)[];
10108
+ useTextAlternativeForLLM?: boolean;
10109
+ includeTextAlternativeInTranscript?: boolean;
10085
10110
  };
10086
10111
  declare enum TranscriptRole {
10087
10112
  ASSISTANT = "assistant",
@@ -10119,6 +10144,7 @@ export declare type TTranscriptAgentOutput = {
10119
10144
  payload: {
10120
10145
  text?: string;
10121
10146
  data?: any;
10147
+ textAlternative?: string;
10122
10148
  };
10123
10149
  };
10124
10150
  export declare type TTranscriptAssistantOutput = {
@@ -10128,6 +10154,7 @@ export declare type TTranscriptAssistantOutput = {
10128
10154
  payload: {
10129
10155
  text?: string;
10130
10156
  data?: any;
10157
+ textAlternative?: string;
10131
10158
  };
10132
10159
  };
10133
10160
  export declare type TTranscriptAssistantToolCall = {
@@ -10189,22 +10216,6 @@ export interface ITool {
10189
10216
  export interface IToolParameters {
10190
10217
  [key: string]: any;
10191
10218
  }
10192
- export interface OpenAIChatMessage {
10193
- role: "system" | "user" | "assistant";
10194
- content: string;
10195
- }
10196
- export interface IGenerativeSlot {
10197
- tag: string;
10198
- description: string;
10199
- value: string;
10200
- optional?: boolean;
10201
- validation?: {
10202
- type: string;
10203
- value: string;
10204
- invalidReason: string;
10205
- };
10206
- invalid?: boolean;
10207
- }
10208
10219
  export interface IRunGenerativeAIPromptOptions {
10209
10220
  /**
10210
10221
  * @deprecated should not inject the prompt anymore, use getPrompt() instead
@@ -10231,7 +10242,7 @@ export interface IRunGenerativeAIPromptOptions {
10231
10242
  */
10232
10243
  toolChoice?: "auto" | "required" | "none";
10233
10244
  /** promptFiller - Injected Function to replace the possible prompt variable for the use case data on runtime . */
10234
- promptParser?: (rawPrompt: TCompletionPrompt | TChatPrompt) => TCompletionPrompt | TChatPrompt;
10245
+ promptParser?: (rawPrompt: TALLPrompts) => TALLPrompts;
10235
10246
  /** temperature - (Optional) The temperature range to determine how much the OpenAI should vary its response. Defaults to 0.7 */
10236
10247
  temperature?: number;
10237
10248
  /** model - (Optional) The OpenAI model to use. Defaults to 'gpt-4o' */
@@ -10401,10 +10412,13 @@ export declare type ISearchTagsFilterOps = "and" | "or";
10401
10412
  export interface IProviderOauth2Fields extends IAzureOpenAIProviderOauth2Fields {
10402
10413
  tokenCacheKey: string;
10403
10414
  }
10415
+ export interface ILLMProviderMeta extends IAzureOpenAIMeta {
10416
+ customAuthHeader?: string;
10417
+ }
10404
10418
  export interface ISearchLLMCredentials {
10405
10419
  provider: TGenerativeAIProviders;
10406
10420
  connectionFields: IAzureOpenAIProviderFieldsV2 | IProviderOauth2Fields | IAwsBedrockProviderFields | IAwsBedrockIamProviderFields | IAlephAlphaProviderFields;
10407
- providerMetaData: IAzureOpenAIMeta;
10421
+ providerMetaData: ILLMProviderMeta;
10408
10422
  }
10409
10423
  export interface ISearchTagsData {
10410
10424
  tags: string[];
@@ -10678,6 +10692,7 @@ export interface IActions {
10678
10692
  rephraseSentenceWithAI(sentence: string, options: IRephraseSentenceWithAIOptions): Promise<string>;
10679
10693
  rephraseMultipleSentencesWithAI(sentences: string[], options: IRephraseSentenceWithAIOptions): Promise<string[]>;
10680
10694
  requestHandover?: (text: string, cancel: string, userId: string, sessionId: string, requestHandover: string, inputAnalyticsData: IAnalyticsSourceData, handoverVersion?: IHandoverRequestStatus["handoverVersion"], repeatHandoverMessage?: boolean, sendResolveEvent?: boolean, resolveBehavior?: IHandoverRequestStatus["resolveBehavior"], nodeType?: IHandoverRequestStatus["nodeType"], providerResponse?: any, sendOnQueueEvent?: boolean, sendOnActiveEvent?: boolean) => void;
10695
+ runGenerativeAIPromptForUseCase?: (options: IRunGenerativeAIPromptOptions, useCase: TGenerativeAIUseCases, subUseCase?: string, promptParser?: TPromptParserFun, nodeAnalyticsParams?: TNodeAnalyticsParams) => Promise<any>;
10681
10696
  runGenerativeAIPrompt?: (options: IRunGenerativeAIPromptOptions, useCase: TGenerativeAIUseCases, nodeAnalyticsParams?: TNodeAnalyticsParams) => Promise<any>;
10682
10697
  resetContext?: () => object;
10683
10698
  resetFormBrain?: () => Promise<void>;
@@ -23321,14 +23336,16 @@ export interface IScheduleSimulationRestDataParams_2_0 {
23321
23336
  }
23322
23337
  export interface IScheduleSimulationRestDataBody_2_0 {
23323
23338
  name: string;
23324
- largeLanguageModelReferenceId: string;
23325
23339
  runConfig: {
23326
23340
  flowReferenceId: string;
23327
23341
  localeReferenceId?: string;
23328
23342
  entrypoint: string;
23343
+ largeLanguageModelReferenceId?: string;
23344
+ userId?: string;
23329
23345
  };
23330
23346
  projectReference: string;
23331
23347
  numberOfExecutions: number;
23348
+ endPointType: string;
23332
23349
  }
23333
23350
  export interface IScheduleSimulationRestData_2_0 extends IScheduleSimulationRestDataParams_2_0, IScheduleSimulationRestDataBody_2_0 {
23334
23351
  }
@@ -23496,12 +23513,18 @@ export interface IGetPersonaOptionsRestData_2_0 {
23496
23513
  export interface IMissionType_2_0 {
23497
23514
  name: string;
23498
23515
  description: string;
23516
+ successCriteria: SuccessCriteria[];
23517
+ }
23518
+ export interface SuccessCriteria {
23519
+ name: string;
23520
+ description: string;
23499
23521
  }
23500
23522
  export interface IPersonaType_2_0 {
23501
23523
  name: string;
23502
23524
  description: string;
23503
23525
  }
23504
23526
  export interface IGetPersonaOptionsRestReturnValue_2_0 {
23527
+ simulationName: string;
23505
23528
  missionTypes: IMissionType_2_0[];
23506
23529
  personaTypes: IPersonaType_2_0[];
23507
23530
  }