@cognigy/rest-api-client 2025.16.0 → 2025.17.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +5 -0
- package/build/shared/charts/descriptors/data/debugMessage.js +13 -3
- package/build/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +48 -49
- package/build/shared/charts/descriptors/nlu/generativeSlotFiller/prompt.js +31 -2
- package/build/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +11 -2
- package/build/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +12 -3
- package/build/shared/charts/descriptors/transcripts/getTranscript.js +23 -3
- package/build/shared/charts/descriptors/voice/mappers/setSessionConfig.mapper.js +3 -0
- package/build/shared/generativeAI/getPrompt.js +75 -0
- package/build/shared/generativeAI/utils/generativeAIPrompts.js +613 -0
- package/build/shared/generativeAI/utils/prompts/contextAwareUserQueryRephrasing.js +84 -0
- package/build/shared/interfaces/generativeAI/IGenerativeAIModels.js +1 -0
- package/build/shared/interfaces/messageAPI/handover.js +6 -0
- package/build/shared/interfaces/resources/ILargeLanguageModel.js +1 -0
- package/build/test.js +39 -0
- package/dist/esm/shared/charts/descriptors/data/debugMessage.js +13 -3
- package/dist/esm/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +48 -50
- package/dist/esm/shared/charts/descriptors/nlu/generativeSlotFiller/prompt.js +29 -1
- package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +11 -2
- package/dist/esm/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +12 -3
- package/dist/esm/shared/charts/descriptors/transcripts/getTranscript.js +23 -3
- package/dist/esm/shared/charts/descriptors/voice/mappers/setSessionConfig.mapper.js +3 -0
- package/dist/esm/shared/generativeAI/getPrompt.js +68 -0
- package/dist/esm/shared/generativeAI/utils/generativeAIPrompts.js +610 -0
- package/dist/esm/shared/generativeAI/utils/prompts/contextAwareUserQueryRephrasing.js +81 -0
- package/dist/esm/shared/interfaces/generativeAI/IGenerativeAIModels.js +1 -0
- package/dist/esm/shared/interfaces/messageAPI/handover.js +6 -0
- package/dist/esm/shared/interfaces/resources/ILargeLanguageModel.js +1 -0
- package/dist/esm/shared/interfaces/restAPI/management/authentication/ICreateJWTToken.js +1 -0
- package/dist/esm/test.js +39 -0
- package/package.json +1 -1
- package/types/index.d.ts +42 -19
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.alternativeContextAwareUserQueryRephrasingChatPrompt = exports.contextAwareUserQueryRephrasingChatPrompt = void 0;
|
|
4
|
+
const contextAwareUserQueryRephrasingBasePrompt = `You are classifying and rephrasing user queries. Your rephrased user queries will be used as input for RAG and other LLM calls.
|
|
5
|
+
|
|
6
|
+
Instructions:
|
|
7
|
+
- Do not respond to the user query as in a real conversation.
|
|
8
|
+
- Determine whether the latest user query relates to the recent chat history.
|
|
9
|
+
- If it does, rephrase the latest user query, possibly including details from the previous chat history.
|
|
10
|
+
- If it does not, respond with "false".
|
|
11
|
+
|
|
12
|
+
What follows are some example conversations, followed last by the real conversation which you are working on.`;
|
|
13
|
+
const example1 = [
|
|
14
|
+
{ role: "user", content: "Hi, my name is Micheal. I'm looking for support regarding an issue." },
|
|
15
|
+
{ role: "assistant", content: "Great, let's get you connected with an agent. What is your customer ID?" },
|
|
16
|
+
{ role: "user", content: "My ID is S0-F45T" },
|
|
17
|
+
{ role: "assistant", content: "false" },
|
|
18
|
+
];
|
|
19
|
+
const example2 = [
|
|
20
|
+
{ role: "user", content: "The Toyota Proace City looks quite nice. I'm looking to fit a bunch of stuff in the car. How much capacity does it have?" },
|
|
21
|
+
{ role: "assistant", content: "There are two variants: L1 has 3,8 m3 loading volume and L2 has 4,4 m3 loading volume." },
|
|
22
|
+
{ role: "user", content: "And how much can I load?" },
|
|
23
|
+
{ role: "assistant", content: "What is the maximum payload and towing capacity of the Toyota Proace City?" },
|
|
24
|
+
];
|
|
25
|
+
const example3 = [
|
|
26
|
+
{ role: "user", content: "I am looking for a new smartphone." },
|
|
27
|
+
{ role: "assistant", content: "What features are you interested in?" },
|
|
28
|
+
{ role: "user", content: "I want a good camera and long battery life." },
|
|
29
|
+
{ role: "assistant", content: "Great! Are you looking for a specific brand or operating system, like Android or iOS?" },
|
|
30
|
+
{ role: "user", content: "I prefer Android devices." },
|
|
31
|
+
{ role: "assistant", content: "Do you have a budget in mind?" },
|
|
32
|
+
{ role: "user", content: "I would like to keep it under $800." },
|
|
33
|
+
{ role: "user", content: "Can you recommend a model?" },
|
|
34
|
+
{ role: "assistant", content: "Can you suggest an Android smartphone under $800 with a good camera and long battery life?" },
|
|
35
|
+
];
|
|
36
|
+
exports.contextAwareUserQueryRephrasingChatPrompt = [
|
|
37
|
+
{
|
|
38
|
+
role: "system",
|
|
39
|
+
content: contextAwareUserQueryRephrasingBasePrompt,
|
|
40
|
+
},
|
|
41
|
+
...example1,
|
|
42
|
+
...example2,
|
|
43
|
+
...example3,
|
|
44
|
+
];
|
|
45
|
+
const mapExampleToPrompt = (example) => {
|
|
46
|
+
return example.map(message => message.role === "user" ? `User: ${message.content}` : `Assistant: ${message.content}`).join("\n");
|
|
47
|
+
};
|
|
48
|
+
exports.alternativeContextAwareUserQueryRephrasingChatPrompt = [
|
|
49
|
+
{
|
|
50
|
+
role: "system",
|
|
51
|
+
content: `# Role and Objective
|
|
52
|
+
You are classifying and rephrasing user queries. Your rephrased user queries will be used as input for RAG and other LLM calls.
|
|
53
|
+
|
|
54
|
+
# Instructions
|
|
55
|
+
- Do not respond to the user query as in a real conversation.
|
|
56
|
+
- Determine whether the latest user query relates to the previous messages.
|
|
57
|
+
- If it does relate, rephrase the latest user query, possibly including details from the previous messages.
|
|
58
|
+
- If it does not relate, respond with "false".
|
|
59
|
+
|
|
60
|
+
## Rephrasing
|
|
61
|
+
- View the previous messages and look at related context in the immediate past.
|
|
62
|
+
- Pull relevant context from those messages and include them in the rephrased user query.
|
|
63
|
+
- Such context include, but is not limited to, user or product information, names, and dates.
|
|
64
|
+
|
|
65
|
+
# Output Format
|
|
66
|
+
- Rephrased user query
|
|
67
|
+
- or false, if unrelated to the previous messages
|
|
68
|
+
|
|
69
|
+
# Examples
|
|
70
|
+
|
|
71
|
+
## Example 1
|
|
72
|
+
${mapExampleToPrompt(example1)}
|
|
73
|
+
|
|
74
|
+
## Example 2
|
|
75
|
+
${mapExampleToPrompt(example2)}
|
|
76
|
+
|
|
77
|
+
## Example 3
|
|
78
|
+
${mapExampleToPrompt(example3)}
|
|
79
|
+
|
|
80
|
+
# Final instructions and prompt to think step by step
|
|
81
|
+
- Let’s think step-by-step.`,
|
|
82
|
+
},
|
|
83
|
+
];
|
|
84
|
+
//# sourceMappingURL=contextAwareUserQueryRephrasing.js.map
|
|
@@ -133,6 +133,12 @@ exports.createHandoverRequestDataSchema = {
|
|
|
133
133
|
enableHandoverConnectMessageRingCentralEngage: {
|
|
134
134
|
type: "boolean"
|
|
135
135
|
},
|
|
136
|
+
enableHandoverDisconnectMessageSalesforceMIAW: {
|
|
137
|
+
type: "boolean"
|
|
138
|
+
},
|
|
139
|
+
enableHandoverConnectMessageSalesforceMIAW: {
|
|
140
|
+
type: "boolean"
|
|
141
|
+
},
|
|
136
142
|
"notifySessionId": {
|
|
137
143
|
"type": "string",
|
|
138
144
|
},
|
package/build/test.js
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
/* import { RestAPIClient, TRestAPIClient } from "./RestAPIClient";
|
|
2
|
+
import * as fs from "fs";
|
|
3
|
+
const FormData = require("form-data");
|
|
4
|
+
|
|
5
|
+
const OAUTH_CLIENT_ID = "cognigy-ui";
|
|
6
|
+
const OAUTH_CLIENT_SECRET =
|
|
7
|
+
"KR7yxR3rAhZ9sEn923dZ5KeNs9SVuwBjHxXKpmqtvSNXw5xWz35Y5YRtTBt96Jaa";
|
|
8
|
+
const baseUrl = "https://api.test";
|
|
9
|
+
|
|
10
|
+
const instance = new RestAPIClient({
|
|
11
|
+
numberOfRetries: 2,
|
|
12
|
+
baseUrl,
|
|
13
|
+
versions: {
|
|
14
|
+
administration: "2.0",
|
|
15
|
+
external: "2.0",
|
|
16
|
+
metrics: "2.0",
|
|
17
|
+
resources: "2.0",
|
|
18
|
+
sessions: "2.0"
|
|
19
|
+
},
|
|
20
|
+
timeout: 10000
|
|
21
|
+
});
|
|
22
|
+
|
|
23
|
+
(async () => {
|
|
24
|
+
|
|
25
|
+
const base64SnapshotString = fs.readFileSync('./src/IDE/fixtures/snapshots/overrideSnapshotConnections_project.csnap')
|
|
26
|
+
const form = new FormData();
|
|
27
|
+
|
|
28
|
+
form.append("projectId", projectId);
|
|
29
|
+
form.append("file", base64SnapshotString, "snapshot.csnap");
|
|
30
|
+
|
|
31
|
+
const slot = await instance.uploadExtension({
|
|
32
|
+
projectId: "your-project-id"
|
|
33
|
+
fs.readFileSync('./src/IDE/fixtures/snapshots/overrideSnapshotConnections_project.csnap'),
|
|
34
|
+
user: "your-user-id",
|
|
35
|
+
});
|
|
36
|
+
console.log(slot);
|
|
37
|
+
})();
|
|
38
|
+
*/
|
|
39
|
+
//# sourceMappingURL=test.js.map
|
|
@@ -58,12 +58,22 @@ export const DEBUG_MESSAGE = createNodeDescriptor({
|
|
|
58
58
|
function: ({ cognigy, config }) => __awaiter(void 0, void 0, void 0, function* () {
|
|
59
59
|
const { api } = cognigy;
|
|
60
60
|
const { level, message, header } = config;
|
|
61
|
-
|
|
61
|
+
let messageToOutput = message;
|
|
62
|
+
//Atp message can be of type any since cognigyScript can return any type
|
|
63
|
+
// whereas logDebugMessage expects a string or object
|
|
64
|
+
// so we need to change the type of message to string if not string or object
|
|
65
|
+
if (message === undefined || message === null) {
|
|
66
|
+
return;
|
|
67
|
+
}
|
|
68
|
+
else if (typeof message !== "string" && typeof message !== "object") {
|
|
69
|
+
messageToOutput = JSON.stringify(message);
|
|
70
|
+
}
|
|
71
|
+
if (level) {
|
|
62
72
|
if (level === "info") {
|
|
63
|
-
api.logDebugMessage(
|
|
73
|
+
api.logDebugMessage(messageToOutput, header);
|
|
64
74
|
}
|
|
65
75
|
if (level === "error") {
|
|
66
|
-
api.logDebugError(
|
|
76
|
+
api.logDebugError(messageToOutput, header);
|
|
67
77
|
}
|
|
68
78
|
}
|
|
69
79
|
})
|
|
@@ -4,6 +4,7 @@ import { createNodeDescriptor } from "../../createNodeDescriptor";
|
|
|
4
4
|
import { GO_TO } from "../logic";
|
|
5
5
|
import { writeLLMDebugLogs } from "../nlu/generativeSlotFiller/prompt";
|
|
6
6
|
import { randomUUID } from 'crypto';
|
|
7
|
+
import { isOpenAIChatPrompt } from "../../../generativeAI/getPrompt";
|
|
7
8
|
import { InternalServerError } from "../../../errors";
|
|
8
9
|
/**
|
|
9
10
|
* Returns the simplified english name for a language given a language code
|
|
@@ -547,7 +548,7 @@ export const SEARCH_EXTRACT_OUTPUT = createNodeDescriptor({
|
|
|
547
548
|
},
|
|
548
549
|
tags: ["ai", "knowledgeSearch", "knowledge", "search"],
|
|
549
550
|
function: (knowledgeSearchParams) => __awaiter(void 0, void 0, void 0, function* () {
|
|
550
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k
|
|
551
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k;
|
|
551
552
|
const { cognigy, config, nodeId } = knowledgeSearchParams;
|
|
552
553
|
const { input, api } = cognigy;
|
|
553
554
|
const { topK, searchString, searchStoreLocation, searchStoreLocationContextKey, searchStoreLocationInputKey, searchSourceTags, searchSourceTagsFilterOp, temperature, maxTokens, topP, presencePenalty, frequencyPenalty, useStop, stop, storeLocation, contextKey, inputKey, timeout, timeoutMessage, outputFallback, outputMode, mode, errorHandling, errorHandlingGotoTarget, streamStopTokens, followUpDetection, debugLogTokenCount, debugLogRequestAndCompletion } = config;
|
|
@@ -574,53 +575,24 @@ export const SEARCH_EXTRACT_OUTPUT = createNodeDescriptor({
|
|
|
574
575
|
// check if follow up detection is active and if yes, handle accordingly
|
|
575
576
|
// this is "context aware search"
|
|
576
577
|
if (followUpDetection === "transcript") {
|
|
577
|
-
let prompt;
|
|
578
|
-
let lastRoundTrip;
|
|
579
|
-
// this is a fallback in case the node was created before this function was added and followUpDetectionSteps is undefined
|
|
580
|
-
followUpDetectionSteps = followUpDetectionSteps || 2;
|
|
581
578
|
// check whether we're in an flow execution that's not the first
|
|
582
579
|
// as it doesn't make sense to check for follow ups in the first execution
|
|
583
580
|
if (input.execution > 1) {
|
|
581
|
+
// this is a fallback in case the node was created before this function was added and followUpDetectionSteps is undefined
|
|
582
|
+
followUpDetectionSteps = followUpDetectionSteps || 2;
|
|
584
583
|
// always remember the last thing the user said (needed later)
|
|
585
|
-
|
|
586
|
-
.lastConversationEntries) === null || _b === void 0 ? void 0 : _b.slice(
|
|
587
|
-
// if follow up detection is set to 2 or more, we use the conversation transcript
|
|
588
|
-
// as reference. Start at the second entry, because the first one is the current
|
|
589
|
-
const recentConversation = (_c = cognigy
|
|
590
|
-
.lastConversationEntries) === null || _c === void 0 ? void 0 : _c.slice(1, followUpDetectionSteps + 1).reverse().map(entry => "- " + (entry.source === "user" ? "USER: " : "BOT: ") + entry.text).join("\n");
|
|
591
|
-
prompt = `Below is the transcript of a conversation:
|
|
592
|
-
${recentConversation}
|
|
593
|
-
USER: ${searchString}
|
|
594
|
-
Does the last USER input refer to the conversation before?
|
|
595
|
-
Answer with "true" or "false". Answer:`;
|
|
596
|
-
let promptResponse;
|
|
597
|
-
// set the detailed results to true to get the token usage
|
|
598
|
-
const returnDetailedResults = true;
|
|
584
|
+
const chatHistory = (_b = cognigy
|
|
585
|
+
.lastConversationEntries) === null || _b === void 0 ? void 0 : _b.slice(0, followUpDetectionSteps + 1).reverse();
|
|
599
586
|
try {
|
|
600
|
-
const
|
|
601
|
-
|
|
587
|
+
const promptData = {
|
|
588
|
+
// set the detailed results to true to get the token usage
|
|
589
|
+
detailedResults: true
|
|
590
|
+
};
|
|
591
|
+
const rephrasedUserQueryResponse = yield api.runGenerativeAIPromptForUseCase(promptData, "answerExtraction", "contextAwareUserQueryRephrasing", getContextAwareUserQueryRephrasingPromptParser(chatHistory));
|
|
592
|
+
const promptResponse = rephrasedUserQueryResponse.result;
|
|
602
593
|
// if we're in adminconsole, process debugging options
|
|
603
|
-
writeLLMDebugLogs("Search Extract Output Follow Up Detection", prompt,
|
|
604
|
-
|
|
605
|
-
if (promptResponse === null || promptResponse === void 0 ? void 0 : promptResponse.toLowerCase().includes("true")) {
|
|
606
|
-
prompt = `You are tasked to rewrite a question based on a context, so that the question is clearer.
|
|
607
|
-
|
|
608
|
-
Example:
|
|
609
|
-
Context:
|
|
610
|
-
USER: Where is Germany?
|
|
611
|
-
BOT: Germany is in Europe.
|
|
612
|
-
Question: Is that a continent?
|
|
613
|
-
New: Is Europe a continent?
|
|
614
|
-
|
|
615
|
-
Task:
|
|
616
|
-
Context:
|
|
617
|
-
${lastRoundTrip}
|
|
618
|
-
Question: ${searchString}
|
|
619
|
-
New: `;
|
|
620
|
-
const secondFollowUpResponse = yield api.runGenerativeAIPrompt({ prompt, detailedResults: returnDetailedResults }, "answerExtraction");
|
|
621
|
-
promptResponse = secondFollowUpResponse.result;
|
|
622
|
-
// if we're in adminconsole, process debugging options
|
|
623
|
-
writeLLMDebugLogs("Search Extract Output Follow Up Detection 2", prompt, secondFollowUpResponse, debugLogTokenCount, false, cognigy);
|
|
594
|
+
writeLLMDebugLogs("Search Extract Output Follow Up Detection", prompt, rephrasedUserQueryResponse, debugLogTokenCount, false, cognigy);
|
|
595
|
+
if ((promptResponse === null || promptResponse === void 0 ? void 0 : promptResponse.toLowerCase()) !== "false") {
|
|
624
596
|
// the actual search string to now use is the rewritten question
|
|
625
597
|
actualSearchString = promptResponse;
|
|
626
598
|
api.logDebugMessage(`UI__DEBUG_MODE__SEO__MESSAGE '${actualSearchString}'`);
|
|
@@ -634,7 +606,7 @@ New: `;
|
|
|
634
606
|
let knowledgeSearchResponseData;
|
|
635
607
|
// handle errors from external services, depending on the settings
|
|
636
608
|
const handleServiceError = (error) => __awaiter(void 0, void 0, void 0, function* () {
|
|
637
|
-
var
|
|
609
|
+
var _l;
|
|
638
610
|
const compactError = {
|
|
639
611
|
message: (error === null || error === void 0 ? void 0 : error.message) || error,
|
|
640
612
|
};
|
|
@@ -670,7 +642,7 @@ New: `;
|
|
|
670
642
|
isSnapshotError: !!(metadata === null || metadata === void 0 ? void 0 : metadata.snapshotId),
|
|
671
643
|
});
|
|
672
644
|
}
|
|
673
|
-
if ((
|
|
645
|
+
if ((_l = error === null || error === void 0 ? void 0 : error.originalErrorDetails) === null || _l === void 0 ? void 0 : _l.code) {
|
|
674
646
|
compactError["code"] = error.originalErrorDetails.code;
|
|
675
647
|
}
|
|
676
648
|
let searchStoreDataWithError = {
|
|
@@ -743,7 +715,7 @@ New: `;
|
|
|
743
715
|
// Perform knowledge search
|
|
744
716
|
try {
|
|
745
717
|
// Set understood to true so that the interaction doesn't look false in our analytics
|
|
746
|
-
(
|
|
718
|
+
(_c = api.setAnalyticsData) === null || _c === void 0 ? void 0 : _c.call(api, "understood", "true");
|
|
747
719
|
input.understood = true;
|
|
748
720
|
const knowledgeSearchResponse = yield api.knowledgeSearch(data);
|
|
749
721
|
writeLLMDebugLogs("Search Extract Output Embeddings Call", data.query, undefined, debugLogTokenCount, false, cognigy);
|
|
@@ -776,7 +748,7 @@ New: `;
|
|
|
776
748
|
}
|
|
777
749
|
// #endregion 1 Perform Search
|
|
778
750
|
// #region 2 Perform Answer Extraction
|
|
779
|
-
let documents = (
|
|
751
|
+
let documents = (_d = knowledgeSearchResponseData === null || knowledgeSearchResponseData === void 0 ? void 0 : knowledgeSearchResponseData.topK) === null || _d === void 0 ? void 0 : _d.map(result => result === null || result === void 0 ? void 0 : result.text).join(' ');
|
|
780
752
|
const replacedUserInput = input.text + (actualSearchString !== input.text ? ` possibly meaning "${actualSearchString}"` : "");
|
|
781
753
|
prompt = prompt.replace(/@userInput/g, replacedUserInput);
|
|
782
754
|
prompt = prompt.replace(/@foundDocuments/g, documents);
|
|
@@ -981,7 +953,7 @@ New: `;
|
|
|
981
953
|
{
|
|
982
954
|
"separator": true,
|
|
983
955
|
"type": "TextBlock",
|
|
984
|
-
"text": (
|
|
956
|
+
"text": (_e = knowledgeSearchResponseData === null || knowledgeSearchResponseData === void 0 ? void 0 : knowledgeSearchResponseData.topK) === null || _e === void 0 ? void 0 : _e[0].text,
|
|
985
957
|
"wrap": true,
|
|
986
958
|
"spacing": "Padding"
|
|
987
959
|
}
|
|
@@ -1002,7 +974,7 @@ New: `;
|
|
|
1002
974
|
"version": "1.6"
|
|
1003
975
|
};
|
|
1004
976
|
// @ts-ignore
|
|
1005
|
-
if ((
|
|
977
|
+
if ((_h = (_g = (_f = knowledgeSearchResponseData === null || knowledgeSearchResponseData === void 0 ? void 0 : knowledgeSearchResponseData.topK) === null || _f === void 0 ? void 0 : _f[0]) === null || _g === void 0 ? void 0 : _g.chunkMetaData) === null || _h === void 0 ? void 0 : _h.url) {
|
|
1006
978
|
ADAPTIVE_CARD_RESULT.body[2].items[0].columns[1].items.push({
|
|
1007
979
|
"type": "ActionSet",
|
|
1008
980
|
"actions": [
|
|
@@ -1010,7 +982,7 @@ New: `;
|
|
|
1010
982
|
"type": "Action.OpenUrl",
|
|
1011
983
|
"title": "Open Source",
|
|
1012
984
|
// @ts-ignore
|
|
1013
|
-
"url": (
|
|
985
|
+
"url": (_j = knowledgeSearchResponseData === null || knowledgeSearchResponseData === void 0 ? void 0 : knowledgeSearchResponseData.topK) === null || _j === void 0 ? void 0 : _j[0].chunkMetaData.url
|
|
1014
986
|
}
|
|
1015
987
|
],
|
|
1016
988
|
"separator": true
|
|
@@ -1040,7 +1012,7 @@ New: `;
|
|
|
1040
1012
|
yield api.output(promptResponse, null);
|
|
1041
1013
|
}
|
|
1042
1014
|
else if (mainPromptResponse.finishReason) {
|
|
1043
|
-
(
|
|
1015
|
+
(_k = api.output) === null || _k === void 0 ? void 0 : _k.call(api, "", {
|
|
1044
1016
|
_cognigy: {
|
|
1045
1017
|
_preventTranscript: true,
|
|
1046
1018
|
_messageId,
|
|
@@ -1065,4 +1037,30 @@ New: `;
|
|
|
1065
1037
|
}
|
|
1066
1038
|
})
|
|
1067
1039
|
});
|
|
1040
|
+
/**
|
|
1041
|
+
* Parses the prompt for the context-aware user query rephrasing.
|
|
1042
|
+
* It replaces the "@@chatHistory" variable with the chat history messages.
|
|
1043
|
+
* It replaces the "@@userQuery" variable with the last user message.
|
|
1044
|
+
*
|
|
1045
|
+
* @param chatHistory - The chat history to be used for context.
|
|
1046
|
+
* @return A function that takes a raw prompt and returns the modified prompt.
|
|
1047
|
+
*/
|
|
1048
|
+
export function getContextAwareUserQueryRephrasingPromptParser(chatHistory) {
|
|
1049
|
+
return (rawPrompt) => {
|
|
1050
|
+
if (isOpenAIChatPrompt(rawPrompt)) {
|
|
1051
|
+
const modifiedPrompt = [...rawPrompt];
|
|
1052
|
+
for (const message of chatHistory) {
|
|
1053
|
+
const role = message.source === "user" ? "user" : "assistant";
|
|
1054
|
+
modifiedPrompt.push({
|
|
1055
|
+
role,
|
|
1056
|
+
content: message.text
|
|
1057
|
+
});
|
|
1058
|
+
}
|
|
1059
|
+
return modifiedPrompt;
|
|
1060
|
+
}
|
|
1061
|
+
else {
|
|
1062
|
+
throw new InternalServerError(`Invalid prompt type for context-aware user query rephrasing. Expected a chat prompt.`);
|
|
1063
|
+
}
|
|
1064
|
+
};
|
|
1065
|
+
}
|
|
1068
1066
|
//# sourceMappingURL=searchExtractOutput.js.map
|
|
@@ -151,6 +151,10 @@ export const writeLLMDebugLogs = (label, prompt, response, debugLogTokenCount, d
|
|
|
151
151
|
completionTokenMessage = ` (${completionTokens} Tokens)`;
|
|
152
152
|
}
|
|
153
153
|
}
|
|
154
|
+
let promptString = prompt;
|
|
155
|
+
if (typeof prompt != "string") {
|
|
156
|
+
promptString = promptToString(prompt);
|
|
157
|
+
}
|
|
154
158
|
let inputLabelKey = "UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__REQUEST";
|
|
155
159
|
let headerKey = "UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__HEADER";
|
|
156
160
|
if (nodeType === "llmPromptV2") {
|
|
@@ -158,7 +162,7 @@ export const writeLLMDebugLogs = (label, prompt, response, debugLogTokenCount, d
|
|
|
158
162
|
headerKey = "UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__HEADER_WITH_SYSTEM_PROMPT";
|
|
159
163
|
}
|
|
160
164
|
;
|
|
161
|
-
api.logDebugMessage(`${inputLabelKey}${requestTokenMessage}:<br>${
|
|
165
|
+
api.logDebugMessage(`${inputLabelKey}${requestTokenMessage}:<br>${promptString}<br><br>UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__COMPLETION${completionTokenMessage}:<br>${responseOutputFormatted}`, headerKey);
|
|
162
166
|
}
|
|
163
167
|
catch (err) { }
|
|
164
168
|
}
|
|
@@ -198,4 +202,28 @@ export const convertChatToPrompt = (chat) => {
|
|
|
198
202
|
prompt += "assistant: ";
|
|
199
203
|
return prompt;
|
|
200
204
|
};
|
|
205
|
+
/**
|
|
206
|
+
* Converts a TALLPrompts object into a string representation.
|
|
207
|
+
* @param prompt The prompt to convert to a string
|
|
208
|
+
* @returns The string representation of the prompt
|
|
209
|
+
*/
|
|
210
|
+
export function promptToString(prompt) {
|
|
211
|
+
if ("prompt" in prompt) {
|
|
212
|
+
// TCompletionPrompt
|
|
213
|
+
return prompt.prompt;
|
|
214
|
+
}
|
|
215
|
+
else if ("messages" in prompt) {
|
|
216
|
+
// TChatPrompt
|
|
217
|
+
return prompt.messages
|
|
218
|
+
.map((msg) => `[${msg.role}] ${msg.content}`)
|
|
219
|
+
.join("\n");
|
|
220
|
+
}
|
|
221
|
+
else if (Array.isArray(prompt)) {
|
|
222
|
+
// OpenAIChatMessage[]
|
|
223
|
+
return prompt
|
|
224
|
+
.map((msg) => `[${msg.role}] ${msg.content}`)
|
|
225
|
+
.join("\n");
|
|
226
|
+
}
|
|
227
|
+
return "";
|
|
228
|
+
}
|
|
201
229
|
//# sourceMappingURL=prompt.js.map
|
|
@@ -408,6 +408,13 @@ export const AI_AGENT_JOB = createNodeDescriptor({
|
|
|
408
408
|
step: 0.1
|
|
409
409
|
}
|
|
410
410
|
},
|
|
411
|
+
{
|
|
412
|
+
key: "useTextAlternativeForLLM",
|
|
413
|
+
label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__INCLUDE_ALL_OUTPUT_TYPES__LABEL",
|
|
414
|
+
type: "toggle",
|
|
415
|
+
description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__INCLUDE_ALL_OUTPUT_TYPES__DESCRIPTION",
|
|
416
|
+
defaultValue: true,
|
|
417
|
+
},
|
|
411
418
|
{
|
|
412
419
|
key: "logErrorToSystem",
|
|
413
420
|
label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOG_TO_SYSTEM__LABEL",
|
|
@@ -803,6 +810,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
|
|
|
803
810
|
"timeoutInMs",
|
|
804
811
|
"maxTokens",
|
|
805
812
|
"temperature",
|
|
813
|
+
"useTextAlternativeForLLM",
|
|
806
814
|
],
|
|
807
815
|
},
|
|
808
816
|
{
|
|
@@ -850,7 +858,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
|
|
|
850
858
|
function: ({ cognigy, config, childConfigs, nodeId }) => __awaiter(void 0, void 0, void 0, function* () {
|
|
851
859
|
var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23;
|
|
852
860
|
const { api, context, input, profile, flowReferenceId } = cognigy;
|
|
853
|
-
const { aiAgent, llmProviderReferenceId, name: jobName, description: jobDescription, instructions: jobInstructions, outputImmediately, toolChoice, useStrict, memoryType, selectedProfileFields, memoryContextInjection, knowledgeSearchBehavior, knowledgeSearchTags, knowledgeSearchTagsFilterOp, knowledgeSearchAiAgentKnowledge, knowledgeSearchJobKnowledge, knowledgeSearchJobStore, knowledgeSearchGenerateSearchPrompt, knowledgeSearchTopK, timeoutInMs, maxTokens, temperature, logErrorToSystem, storeErrorInInput, errorHandling, errorHandlingGotoTarget, errorMessage, debugConfig, debugLogTokenCount, debugLogSystemPrompt, debugLogToolDefinitions, debugResult, debugLogLLMLatency, storeLocation, contextKey, inputKey, streamStoreCopyInInput, streamStopTokens, processImages, transcriptImageHandling, sessionParams } = config;
|
|
861
|
+
const { aiAgent, llmProviderReferenceId, name: jobName, description: jobDescription, instructions: jobInstructions, outputImmediately, toolChoice, useStrict, memoryType, selectedProfileFields, memoryContextInjection, knowledgeSearchBehavior, knowledgeSearchTags, knowledgeSearchTagsFilterOp, knowledgeSearchAiAgentKnowledge, knowledgeSearchJobKnowledge, knowledgeSearchJobStore, knowledgeSearchGenerateSearchPrompt, knowledgeSearchTopK, timeoutInMs, maxTokens, temperature, useTextAlternativeForLLM, logErrorToSystem, storeErrorInInput, errorHandling, errorHandlingGotoTarget, errorMessage, debugConfig, debugLogTokenCount, debugLogSystemPrompt, debugLogToolDefinitions, debugResult, debugLogLLMLatency, storeLocation, contextKey, inputKey, streamStoreCopyInInput, streamStopTokens, processImages, transcriptImageHandling, sessionParams } = config;
|
|
854
862
|
try {
|
|
855
863
|
if (!aiAgent) {
|
|
856
864
|
throw new Error("Could not resolve AI Agent reference in AI Agent Node");
|
|
@@ -1128,7 +1136,8 @@ export const AI_AGENT_JOB = createNodeDescriptor({
|
|
|
1128
1136
|
const transcript = yield api.getTranscript({
|
|
1129
1137
|
limit: 50,
|
|
1130
1138
|
rolesWhiteList: [TranscriptRole.USER, TranscriptRole.ASSISTANT, TranscriptRole.TOOL],
|
|
1131
|
-
excludeDataOnlyMessagesFilter: [TranscriptRole.ASSISTANT]
|
|
1139
|
+
excludeDataOnlyMessagesFilter: [TranscriptRole.ASSISTANT],
|
|
1140
|
+
useTextAlternativeForLLM,
|
|
1132
1141
|
});
|
|
1133
1142
|
// For knowledgeSearch "always", we enhance the user input with the knowledge search response data
|
|
1134
1143
|
if (knowledgeSearchBehavior === "always" &&
|
|
@@ -388,6 +388,13 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
|
|
|
388
388
|
]
|
|
389
389
|
}
|
|
390
390
|
},
|
|
391
|
+
{
|
|
392
|
+
key: "useTextAlternativeForLLM",
|
|
393
|
+
label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__INCLUDE_ALL_OUTPUT_TYPES__LABEL",
|
|
394
|
+
type: "toggle",
|
|
395
|
+
description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__INCLUDE_ALL_OUTPUT_TYPES__DESCRIPTION",
|
|
396
|
+
defaultValue: true,
|
|
397
|
+
},
|
|
391
398
|
{
|
|
392
399
|
key: "customModelOptions",
|
|
393
400
|
label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__CUSTOM_MODEL_OPTIONS__LABEL",
|
|
@@ -539,7 +546,8 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
|
|
|
539
546
|
"frequencyPenalty",
|
|
540
547
|
"useStop",
|
|
541
548
|
"stop",
|
|
542
|
-
"seed"
|
|
549
|
+
"seed",
|
|
550
|
+
"useTextAlternativeForLLM",
|
|
543
551
|
]
|
|
544
552
|
},
|
|
545
553
|
{
|
|
@@ -628,7 +636,7 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
|
|
|
628
636
|
var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
|
|
629
637
|
const { api, input, flowReferenceId } = cognigy;
|
|
630
638
|
const { temperature, maxTokens, topP, presencePenalty, frequencyPenalty, useStop, stop, storeLocation, contextKey, inputKey, timeout, streamStopTokens, streamStopTokenOverrides, debugLogTokenCount, debugLogRequestAndCompletion, debugLogLLMLatency, debugLogToolDefinitions, llmProviderReferenceId, usePromptMode, chatTranscriptSteps, responseFormat, streamStoreCopyInInput, seed, immediateOutput, customModelOptions, customRequestOptions, errorHandling = "continue", // default behavior for LLM Prompt node was, continue its execution even though an error occurred (deviating it from the SEO node) & do not output an error message on UI explicitly. However, error is always stored in the input or context object. We can use an extra "say" node to output it.
|
|
631
|
-
errorHandlingGotoTarget, errorMessage, logErrorToSystem, processImages, transcriptImageHandling, toolChoice, useStrict } = config;
|
|
639
|
+
errorHandlingGotoTarget, errorMessage, useTextAlternativeForLLM, logErrorToSystem, processImages, transcriptImageHandling, toolChoice, useStrict } = config;
|
|
632
640
|
let prompt = config.prompt || "";
|
|
633
641
|
const { traceId } = input;
|
|
634
642
|
// check if custom variables are used and if they have a length modifier
|
|
@@ -773,7 +781,8 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
|
|
|
773
781
|
const transcript = yield api.getTranscript({
|
|
774
782
|
limit: chatTranscriptSteps || 50,
|
|
775
783
|
rolesWhiteList: [TranscriptRole.USER, TranscriptRole.ASSISTANT, TranscriptRole.TOOL],
|
|
776
|
-
excludeDataOnlyMessagesFilter: [TranscriptRole.ASSISTANT]
|
|
784
|
+
excludeDataOnlyMessagesFilter: [TranscriptRole.ASSISTANT],
|
|
785
|
+
useTextAlternativeForLLM,
|
|
777
786
|
});
|
|
778
787
|
llmPromptOptions["transcript"] = transcript;
|
|
779
788
|
llmPromptOptions["chat"] = [{
|
|
@@ -68,6 +68,13 @@ export const GET_TRANSCRIPT = createNodeDescriptor({
|
|
|
68
68
|
value: "context",
|
|
69
69
|
}
|
|
70
70
|
},
|
|
71
|
+
{
|
|
72
|
+
key: "includeTextAlternativeInTranscript",
|
|
73
|
+
type: "toggle",
|
|
74
|
+
label: "UI__NODE_EDITOR__GET_TRANSCRIPT__FIELDS__INCLUDE_TEXT_ALTERNATIVE_IN_TRANSCRIPT__LABEL",
|
|
75
|
+
description: "UI__NODE_EDITOR__GET_TRANSCRIPT__FIELDS__INCLUDE_TEXT_ALTERNATIVE_IN_TRANSCRIPT__DESCRIPTION",
|
|
76
|
+
defaultValue: true,
|
|
77
|
+
},
|
|
71
78
|
],
|
|
72
79
|
sections: [
|
|
73
80
|
{
|
|
@@ -79,18 +86,31 @@ export const GET_TRANSCRIPT = createNodeDescriptor({
|
|
|
79
86
|
"inputKey",
|
|
80
87
|
"contextKey",
|
|
81
88
|
]
|
|
82
|
-
}
|
|
89
|
+
},
|
|
90
|
+
{
|
|
91
|
+
key: "advanced",
|
|
92
|
+
label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__SECTIONS__ADVANCED__LABEL",
|
|
93
|
+
defaultCollapsed: true,
|
|
94
|
+
fields: [
|
|
95
|
+
"includeTextAlternativeInTranscript",
|
|
96
|
+
],
|
|
97
|
+
},
|
|
83
98
|
],
|
|
84
99
|
form: [
|
|
85
100
|
{ type: "field", key: "limit" },
|
|
86
101
|
{ type: "section", key: "storage" },
|
|
102
|
+
{ type: "section", key: "advanced" },
|
|
87
103
|
],
|
|
88
104
|
tags: ["service", "transcripts"],
|
|
89
105
|
function: ({ cognigy, config }) => __awaiter(void 0, void 0, void 0, function* () {
|
|
90
106
|
var _a;
|
|
91
|
-
const { limit, storeLocation, inputKey, contextKey } = config;
|
|
107
|
+
const { limit, storeLocation, inputKey, contextKey, includeTextAlternativeInTranscript } = config;
|
|
92
108
|
const { api } = cognigy;
|
|
93
|
-
const transcript = yield api.getTranscript({
|
|
109
|
+
const transcript = yield api.getTranscript({
|
|
110
|
+
limit,
|
|
111
|
+
excludeDataOnlyMessagesFilter: [TranscriptRole.AGENT],
|
|
112
|
+
includeTextAlternativeInTranscript,
|
|
113
|
+
});
|
|
94
114
|
if (storeLocation === "context") {
|
|
95
115
|
(_a = api.addToContext) === null || _a === void 0 ? void 0 : _a.call(api, contextKey, transcript, "simple");
|
|
96
116
|
}
|
|
@@ -143,6 +143,9 @@ class SessionConfigMapper extends BaseMapper {
|
|
|
143
143
|
const timeout = Number(spAsrTimeout || asrTimeout);
|
|
144
144
|
recognizer.asrTimeout = timeout / 1000 || undefined;
|
|
145
145
|
}
|
|
146
|
+
else if (asrEnabled === false || spAsrEnabled === false) {
|
|
147
|
+
recognizer.asrTimeout = 0;
|
|
148
|
+
}
|
|
146
149
|
return recognizer;
|
|
147
150
|
}
|
|
148
151
|
isDtmfEnabled(sessionParams, dtmf) {
|
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
/** Custom Modules */
|
|
2
|
+
import { generativeAIPrompts } from "./utils/generativeAIPrompts";
|
|
3
|
+
import { InternalServerError } from "../errors/internalServerError";
|
|
4
|
+
export const isCompletionPrompt = (data) => {
|
|
5
|
+
return typeof data === "object" && data !== null && "prompt" in data;
|
|
6
|
+
};
|
|
7
|
+
export const isChatPrompt = (data) => {
|
|
8
|
+
return typeof data === "object" && data !== null && "messages" in data;
|
|
9
|
+
};
|
|
10
|
+
export const isOpenAIChatPrompt = (data) => {
|
|
11
|
+
return Array.isArray(data) &&
|
|
12
|
+
data.every((item) => typeof item === "object" &&
|
|
13
|
+
item !== null &&
|
|
14
|
+
("role" in item) &&
|
|
15
|
+
("content" in item) &&
|
|
16
|
+
(item.role === "system" || item.role === "user" || item.role === "assistant") &&
|
|
17
|
+
(typeof item.content === "string"));
|
|
18
|
+
};
|
|
19
|
+
/**
|
|
20
|
+
* Gets the current prompts for the passed model/useCase
|
|
21
|
+
* @param model - The model to get the prompt for
|
|
22
|
+
* @param useCase - The use case to get the prompt for
|
|
23
|
+
* @param subUseCase - Optional sub-use case to get a specific prompt
|
|
24
|
+
* @param promptParser - Optional function to modify the prompt before returning it
|
|
25
|
+
* @returns {TALLPrompts}
|
|
26
|
+
*/
|
|
27
|
+
export const getPrompt = (model, useCase, subUseCase, promptParser) => {
|
|
28
|
+
var _a;
|
|
29
|
+
const loggerMeta = {
|
|
30
|
+
module: "getPrompt.ts",
|
|
31
|
+
label: "generativeAI",
|
|
32
|
+
function: "getPrompt",
|
|
33
|
+
model,
|
|
34
|
+
useCase,
|
|
35
|
+
subUseCase
|
|
36
|
+
};
|
|
37
|
+
let modelPrompts = (_a = generativeAIPrompts[`${model}`]) !== null && _a !== void 0 ? _a : generativeAIPrompts["default"];
|
|
38
|
+
if (!modelPrompts) {
|
|
39
|
+
throw new InternalServerError(`Neither the model "${model}" nor the default fallback have predefined prompts`, undefined, loggerMeta);
|
|
40
|
+
}
|
|
41
|
+
let prompt = modelPrompts[`${useCase}`];
|
|
42
|
+
// generativeAIPrompts[model] has no prompt for use case, so try to fallback to default prompt
|
|
43
|
+
if (!prompt) {
|
|
44
|
+
modelPrompts = generativeAIPrompts["default"];
|
|
45
|
+
if (!modelPrompts) {
|
|
46
|
+
throw new InternalServerError(`The default fallback has no predefined prompts`, undefined, loggerMeta);
|
|
47
|
+
}
|
|
48
|
+
prompt = modelPrompts[`${useCase}`];
|
|
49
|
+
}
|
|
50
|
+
if (!prompt) {
|
|
51
|
+
throw new InternalServerError(`Neither the model "${model}" nor the default fallback define a prompt for useCase "${useCase}"`, undefined, loggerMeta);
|
|
52
|
+
}
|
|
53
|
+
if (subUseCase && prompt && typeof prompt === "object" && `${subUseCase}` in prompt) {
|
|
54
|
+
prompt = prompt[`${subUseCase}`];
|
|
55
|
+
}
|
|
56
|
+
if (!prompt) {
|
|
57
|
+
throw new InternalServerError(`The prompt defined for the model "${model}" or the default fallback, useCase "${useCase}", and subUseCase "${subUseCase}" is invalid`, undefined, loggerMeta);
|
|
58
|
+
}
|
|
59
|
+
try {
|
|
60
|
+
return promptParser
|
|
61
|
+
? promptParser(JSON.parse(JSON.stringify(prompt)))
|
|
62
|
+
: JSON.parse(JSON.stringify(prompt));
|
|
63
|
+
}
|
|
64
|
+
catch (error) {
|
|
65
|
+
throw new InternalServerError(`Error while parsing prompt for model: ${model} and useCase: ${useCase} and subUseCase: ${subUseCase}`, undefined, Object.assign({ originalError: error }, loggerMeta));
|
|
66
|
+
}
|
|
67
|
+
};
|
|
68
|
+
//# sourceMappingURL=getPrompt.js.map
|