@cognigy/rest-api-client 0.19.0 → 0.20.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/build/RestAPIClient.js +7 -0
- package/build/apigroups/AdministrationAPIGroup_2_0.js +6 -1
- package/build/apigroups/JWTAuthAPIGroup_2_0.js +12 -0
- package/build/apigroups/ResourcesAPIGroup_2_0.js +19 -8
- package/build/apigroups/index.js +3 -1
- package/build/shared/charts/descriptors/agentAssist/index.js +3 -1
- package/build/shared/charts/descriptors/agentAssist/sendData.js +74 -0
- package/build/shared/charts/descriptors/analytics/addMemory.js +51 -0
- package/build/shared/charts/descriptors/analytics/completeGoal.js +4 -3
- package/build/shared/charts/descriptors/analytics/helper.js +20 -0
- package/build/shared/charts/descriptors/analytics/index.js +5 -3
- package/build/shared/charts/descriptors/analytics/{trackMilestone.js → trackGoal.js} +32 -25
- package/build/shared/charts/descriptors/data/copySlotsToContext.js +1 -1
- package/build/shared/charts/descriptors/index.js +14 -2
- package/build/shared/charts/descriptors/knowledgeSearch/knowledgeSearchV2.js +1 -1
- package/build/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +26 -16
- package/build/shared/charts/descriptors/logic/disableSlotFillers.js +1 -1
- package/build/shared/charts/descriptors/logic/enableSlotFillers.js +1 -1
- package/build/shared/charts/descriptors/logic/thinkV2.js +111 -4
- package/build/shared/charts/descriptors/nlu/cleanText.js +1 -1
- package/build/shared/charts/descriptors/nlu/executeCognigyNLU.js +1 -1
- package/build/shared/charts/descriptors/nlu/fuzzySearch.js +1 -1
- package/build/shared/charts/descriptors/nlu/generativeSlotFiller/generativeSlotFiller.js +1 -1
- package/build/shared/charts/descriptors/nlu/generativeSlotFiller/generativeSlotFillerFallback.js +1 -1
- package/build/shared/charts/descriptors/nlu/generativeSlotFiller/generativeSlotFillerSuccess.js +1 -1
- package/build/shared/charts/descriptors/nlu/generativeSlotFiller/prompt.js +4 -5
- package/build/shared/charts/descriptors/nlu/matchPattern.js +1 -1
- package/build/shared/charts/descriptors/nlu/regexSlotFiller.js +1 -1
- package/build/shared/charts/descriptors/service/GPTConversation.js +1 -1
- package/build/shared/charts/descriptors/service/GPTPrompt.js +26 -32
- package/build/shared/charts/descriptors/service/LLMEntityExtract.js +1 -1
- package/build/shared/charts/descriptors/service/aiAgent/aiAgentHandover.js +92 -0
- package/build/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +1146 -0
- package/build/shared/charts/descriptors/service/aiAgent/aiAgentJobDefault.js +31 -0
- package/build/shared/charts/descriptors/service/aiAgent/aiAgentJobTool.js +139 -0
- package/build/shared/charts/descriptors/service/aiAgent/aiAgentToolAnswer.js +120 -0
- package/build/shared/charts/descriptors/service/aiAgent/helper.js +222 -0
- package/build/shared/charts/descriptors/service/handoverV2.js +68 -13
- package/build/shared/charts/descriptors/service/index.js +11 -1
- package/build/shared/charts/descriptors/transcripts/addTranscriptStep.js +413 -0
- package/build/shared/charts/descriptors/transcripts/getTranscript.js +104 -0
- package/build/shared/charts/descriptors/transcripts/index.js +8 -0
- package/build/shared/charts/descriptors/voice/mappers/base.mapper.js +20 -0
- package/build/shared/charts/descriptors/voice/mappers/setSessionConfig.mapper.js +183 -107
- package/build/shared/charts/descriptors/voice/mappers/transfer.mapper.js +6 -9
- package/build/shared/charts/descriptors/voice/nodes/continuousAsr.js +3 -4
- package/build/shared/charts/descriptors/voicegateway/nodes/callRecording.js +8 -1
- package/build/shared/charts/descriptors/voicegateway/nodes/handover.js +4 -2
- package/build/shared/charts/descriptors/voicegateway/nodes/hangup.js +4 -2
- package/build/shared/charts/descriptors/voicegateway/nodes/helper/utils.js +14 -0
- package/build/shared/charts/descriptors/voicegateway/nodes/playURL.js +4 -5
- package/build/shared/charts/descriptors/voicegateway/nodes/sendMessage.js +8 -1
- package/build/shared/charts/descriptors/voicegateway/nodes/sendMetaData.js +7 -3
- package/build/shared/charts/descriptors/voicegateway/nodes/setSessionParams.js +8 -1
- package/build/shared/charts/descriptors/voicegateway2/nodes/setSessionConfig.js +15 -4
- package/build/shared/charts/descriptors/voicegateway2/nodes/transfer.js +15 -3
- package/build/shared/constants.js +7 -1
- package/build/shared/handoverClients/interfaces/THandoverEventType.js +1 -0
- package/build/shared/helper/logFullConfigToDebugMode.js +1 -1
- package/build/shared/helper/nlu/textCleaner.js +1 -1
- package/build/shared/interfaces/{restAPI/resources/milestone/v2.0/IMilestoneStep_2_0.js → IEndpointSettings.js} +1 -1
- package/build/shared/interfaces/IProfile.js +1 -0
- package/build/shared/interfaces/IProfileSchema.js +1 -0
- package/build/shared/interfaces/analytics/IAnalyticsSourceData.js +22 -20
- package/build/shared/interfaces/{restAPI/resources/milestone/v2.0/IMilestone_2_0.js → analytics/IGoalAnalytics.js} +1 -1
- package/build/shared/interfaces/generativeAI/IGenerativeAIModels.js +19 -2
- package/build/shared/interfaces/generativeAI/tools.js +3 -0
- package/build/shared/interfaces/handover.js +3 -1
- package/build/shared/interfaces/messageAPI/endpoints.js +6 -1
- package/build/shared/interfaces/messageAPI/handover.js +2 -0
- package/build/shared/interfaces/resources/IAiAgent.js +52 -0
- package/build/shared/interfaces/resources/IAuditEvent.js +2 -1
- package/build/shared/interfaces/resources/IEndpoint.js +2 -1
- package/build/shared/interfaces/resources/{IMilestone.js → IGoal.js} +14 -14
- package/build/shared/interfaces/resources/ILargeLanguageModel.js +29 -2
- package/build/shared/interfaces/resources/INodeDescriptorSet.js +4 -1
- package/build/shared/interfaces/resources/TResourceType.js +16 -8
- package/build/shared/interfaces/resources/settings/IGenerativeAISettings.js +5 -1
- package/build/shared/interfaces/restAPI/{resources/milestone/v2.0/ICloneMilestoneRest_2_0.js → administration/user/v2.0/IGetPinnedResources_2_0.js} +1 -1
- package/build/shared/interfaces/restAPI/administration/user/v2.0/IPinResourceRest_2_0.js +3 -0
- package/build/shared/interfaces/restAPI/resources/aiAgent/v2.0/IAiAgentHiringTemplate_2_0.js +32 -0
- package/build/shared/interfaces/restAPI/resources/aiAgent/v2.0/IAiAgent_2_0.js +3 -0
- package/build/shared/interfaces/restAPI/resources/aiAgent/v2.0/ICreateAiAgentRest_2_0.js +3 -0
- package/build/shared/interfaces/restAPI/resources/aiAgent/v2.0/IDeleteAiAgentRest_2_0.js +3 -0
- package/build/shared/interfaces/restAPI/resources/aiAgent/v2.0/IGetAiAgentHiringTemplates_2_0.js +3 -0
- package/build/shared/interfaces/restAPI/resources/aiAgent/v2.0/IHireAiAgent_2_0.js +3 -0
- package/build/shared/interfaces/restAPI/resources/aiAgent/v2.0/IIndexAiAgentRest_2_0.js +3 -0
- package/build/shared/interfaces/restAPI/resources/aiAgent/v2.0/IReadAiAgentRest_2_0.js +3 -0
- package/build/shared/interfaces/restAPI/resources/aiAgent/v2.0/IUpdateAiAgentRest_2_0.js +3 -0
- package/build/shared/interfaces/restAPI/resources/{milestone/v2.0/ICreateMilestoneRest_2_0.js → aiAgent/v2.0/IValidateAiAgentNameRest_2_0.js} +1 -1
- package/build/shared/interfaces/restAPI/resources/goal/v2.0/ICloneGoalRest_2_0.js +3 -0
- package/build/shared/interfaces/{analytics/IMilestoneAnalytics.js → restAPI/resources/goal/v2.0/ICreateGoalRest_2_0.js} +1 -1
- package/build/shared/interfaces/restAPI/resources/goal/v2.0/IDeleteGoalRest_2_0.js +3 -0
- package/build/shared/interfaces/restAPI/resources/goal/v2.0/IGoalIndexItem_2_0.js +3 -0
- package/build/shared/interfaces/restAPI/resources/goal/v2.0/IGoalStepMetric_2_0.js +3 -0
- package/build/shared/interfaces/restAPI/resources/goal/v2.0/IGoalStep_2_0.js +3 -0
- package/build/shared/interfaces/restAPI/resources/goal/v2.0/IGoal_2_0.js +3 -0
- package/build/shared/interfaces/restAPI/resources/goal/v2.0/IIndexGoalsRest_2_0.js +3 -0
- package/build/shared/interfaces/restAPI/resources/goal/v2.0/IReadGoalRest_2_0.js +3 -0
- package/build/shared/interfaces/restAPI/resources/goal/v2.0/IUpdateGoalRest_2_0.js +3 -0
- package/build/shared/interfaces/security/IPermission.js +6 -2
- package/build/shared/interfaces/security/IPinnedResource.js +3 -0
- package/build/shared/interfaces/security/index.js +1 -1
- package/build/shared/interfaces/transcripts/transcripts.js +33 -0
- package/package.json +1 -1
- package/types/index.d.ts +931 -224
- package/build/shared/interfaces/license.js.map +0 -1
- package/build/shared/interfaces/restAPI/resources/knowledgeSearchIndex/v2.0/ICreateKnowledgeSearchIndexRest_2_0.js +0 -3
- package/build/shared/interfaces/restAPI/resources/knowledgeSearchIndex/v2.0/IDeleteKnowledgeSearchIndexRest_2_0.js +0 -3
- package/build/shared/interfaces/restAPI/resources/milestone/v2.0/IDeleteMilestoneRest_2_0.js +0 -3
- package/build/shared/interfaces/restAPI/resources/milestone/v2.0/IIndexMilestonesRest_2_0.js +0 -3
- package/build/shared/interfaces/restAPI/resources/milestone/v2.0/IMilestoneIndexItem_2_0.js +0 -3
- package/build/shared/interfaces/restAPI/resources/milestone/v2.0/IMilestoneStepMetric_2_0.js +0 -3
- package/build/shared/interfaces/restAPI/resources/milestone/v2.0/IReadMilestoneRest_2_0.js +0 -3
- package/build/shared/interfaces/restAPI/resources/milestone/v2.0/IUpdateMilestoneRest_2_0.js +0 -3
- package/build/test.js +0 -27
- /package/build/shared/interfaces/restAPI/resources/{knowledgeSearchIndex → aiAgent}/v2.0/index.js +0 -0
- /package/build/shared/interfaces/restAPI/resources/{milestone → goal}/v2.0/index.js +0 -0
|
@@ -29,7 +29,7 @@ exports.DISABLE_SLOT_FILLERS = (0, createNodeDescriptor_1.createNodeDescriptor)(
|
|
|
29
29
|
label: "UI__NODE_EDITOR__DISABLE_SLOT_FILLERS__RESET__LABEL",
|
|
30
30
|
}
|
|
31
31
|
],
|
|
32
|
-
tags: ["nlu"],
|
|
32
|
+
tags: ["ai", "nlu"],
|
|
33
33
|
function: async ({ cognigy, config }) => {
|
|
34
34
|
const { slotFillersToDisable, reset } = config;
|
|
35
35
|
const { api, flowReferenceId } = cognigy;
|
|
@@ -24,7 +24,7 @@ exports.ENABLE_SLOT_FILLERS = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
24
24
|
description: "UI__NODE_EDITOR__ENABLE_SLOT_FILLERS__SLOT_FILLERS_TO_ENABLE__DESCRIPTION"
|
|
25
25
|
}
|
|
26
26
|
],
|
|
27
|
-
tags: ["nlu"],
|
|
27
|
+
tags: ["ai", "nlu"],
|
|
28
28
|
function: async ({ cognigy, config }) => {
|
|
29
29
|
const { slotFillersToEnable } = config;
|
|
30
30
|
const { api, flowReferenceId } = cognigy;
|
|
@@ -3,7 +3,9 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
|
|
3
3
|
exports.THINK_V2 = void 0;
|
|
4
4
|
/* Custom modules */
|
|
5
5
|
const createNodeDescriptor_1 = require("../../createNodeDescriptor");
|
|
6
|
+
const logic_1 = require("../logic");
|
|
6
7
|
const logFullConfigToDebugMode_1 = require("../../../helper/logFullConfigToDebugMode");
|
|
8
|
+
const errors_1 = require("../../../errors");
|
|
7
9
|
/**
|
|
8
10
|
* Node name: 'think'
|
|
9
11
|
*
|
|
@@ -69,6 +71,56 @@ exports.THINK_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
69
71
|
value: "intent"
|
|
70
72
|
}
|
|
71
73
|
},
|
|
74
|
+
{
|
|
75
|
+
key: "logErrorToSystem",
|
|
76
|
+
label: "UI__NODE_EDITOR__THINK_V2__LOG_ERROR_TO_SYSTEM__LABEL",
|
|
77
|
+
description: "UI__NODE_EDITOR__THINK_V2__LOG_ERROR_TO_SYSTEM__DESCRIPTION",
|
|
78
|
+
type: "toggle",
|
|
79
|
+
defaultValue: false,
|
|
80
|
+
},
|
|
81
|
+
{
|
|
82
|
+
key: "errorHandling",
|
|
83
|
+
type: "select",
|
|
84
|
+
label: "UI__NODE_EDITOR__THINK_V2__HANDLE_ERROR__LABEL",
|
|
85
|
+
description: "UI__NODE_EDITOR__THINK_V2__HANDLE_ERROR__DESCRIPTION",
|
|
86
|
+
defaultValue: "continue",
|
|
87
|
+
params: {
|
|
88
|
+
options: [
|
|
89
|
+
{
|
|
90
|
+
label: "UI__NODE_EDITOR__THINK_V2__HANDLE_ERROR__OPTIONS__STOP__LABEL",
|
|
91
|
+
value: "stop"
|
|
92
|
+
},
|
|
93
|
+
{
|
|
94
|
+
label: "UI__NODE_EDITOR__THINK_V2__HANDLE_ERROR__OPTIONS__CONTINUE__LABEL",
|
|
95
|
+
value: "continue"
|
|
96
|
+
},
|
|
97
|
+
{
|
|
98
|
+
label: "UI__NODE_EDITOR__THINK_V2__HANDLE_ERROR__ERROR__OPTIONS__GOTO__LABEL",
|
|
99
|
+
value: "goto"
|
|
100
|
+
},
|
|
101
|
+
]
|
|
102
|
+
}
|
|
103
|
+
},
|
|
104
|
+
{
|
|
105
|
+
key: "errorMessage",
|
|
106
|
+
label: "UI__NODE_EDITOR__THINK_V2__ERROR_MESSAGE__LABEL",
|
|
107
|
+
type: "cognigyText",
|
|
108
|
+
description: "UI__NODE_EDITOR__THINK_V2__ERROR_MESSAGE__DESCRIPTION",
|
|
109
|
+
defaultValue: "",
|
|
110
|
+
condition: {
|
|
111
|
+
key: "errorHandling",
|
|
112
|
+
value: "continue"
|
|
113
|
+
}
|
|
114
|
+
},
|
|
115
|
+
{
|
|
116
|
+
key: "errorHandlingGotoTarget",
|
|
117
|
+
type: "flowNode",
|
|
118
|
+
label: "UI__NODE_EDITOR__THINK_V2__ERROR__GOTO_NODE__LABEL",
|
|
119
|
+
condition: {
|
|
120
|
+
key: "errorHandling",
|
|
121
|
+
value: "goto"
|
|
122
|
+
}
|
|
123
|
+
},
|
|
72
124
|
],
|
|
73
125
|
sections: [
|
|
74
126
|
{
|
|
@@ -79,23 +131,78 @@ exports.THINK_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
79
131
|
"thinkType",
|
|
80
132
|
"data"
|
|
81
133
|
]
|
|
134
|
+
},
|
|
135
|
+
{
|
|
136
|
+
key: "errors",
|
|
137
|
+
label: "UI__NODE_EDITOR__THINK_V2__SECTIONS__ERRORS__LABEL",
|
|
138
|
+
defaultCollapsed: true,
|
|
139
|
+
fields: [
|
|
140
|
+
"logErrorToSystem",
|
|
141
|
+
"errorHandling",
|
|
142
|
+
"errorMessage",
|
|
143
|
+
"errorHandlingGotoTarget",
|
|
144
|
+
]
|
|
82
145
|
}
|
|
83
146
|
],
|
|
84
147
|
form: [
|
|
85
148
|
{ type: "field", key: "text" },
|
|
86
149
|
{ type: "field", key: "intent" },
|
|
87
150
|
{ type: "section", key: "advanced" },
|
|
151
|
+
{ type: "section", key: "errors" },
|
|
88
152
|
],
|
|
89
153
|
preview: {
|
|
90
154
|
key: "text",
|
|
91
155
|
type: "text",
|
|
92
156
|
},
|
|
93
157
|
tags: ["basic", "logic", "recursion", "inject"],
|
|
94
|
-
function: async ({ cognigy, config }) => {
|
|
95
|
-
const { text, data, intent, thinkType, } = config;
|
|
96
|
-
const { api } = cognigy;
|
|
158
|
+
function: async ({ cognigy, config, nodeId }) => {
|
|
159
|
+
const { text, data, intent, thinkType, errorHandling = "continue", errorHandlingGotoTarget, errorMessage, logErrorToSystem, } = config;
|
|
160
|
+
const { api, input } = cognigy;
|
|
161
|
+
const { traceId } = input;
|
|
97
162
|
(0, logFullConfigToDebugMode_1.logFullConfigToDebugMode)(cognigy, config);
|
|
98
|
-
if (thinkType === "
|
|
163
|
+
if ((thinkType === "default" && typeof text !== "string")
|
|
164
|
+
|| (thinkType === "intent" && typeof intent !== "string")) {
|
|
165
|
+
const errorDetails = {
|
|
166
|
+
message: "Think input should be string",
|
|
167
|
+
originalInput: JSON.stringify(text || intent)
|
|
168
|
+
};
|
|
169
|
+
if (logErrorToSystem) {
|
|
170
|
+
api.log("error", JSON.stringify(errorDetails));
|
|
171
|
+
}
|
|
172
|
+
api.logDebugError(errorDetails, "UI__DEBUG_MODE__THINK_NODE__ERROR");
|
|
173
|
+
// handle error depending on the settings
|
|
174
|
+
if (errorHandling === "continue") {
|
|
175
|
+
// output the provided error message
|
|
176
|
+
if (errorMessage) {
|
|
177
|
+
api.output(errorMessage, null);
|
|
178
|
+
}
|
|
179
|
+
}
|
|
180
|
+
else if (errorHandling === "goto") {
|
|
181
|
+
if (!errorHandlingGotoTarget) {
|
|
182
|
+
throw new Error("GoTo Target is required");
|
|
183
|
+
}
|
|
184
|
+
const gotoParams = {
|
|
185
|
+
cognigy,
|
|
186
|
+
childConfigs: [],
|
|
187
|
+
nodeId,
|
|
188
|
+
config: {
|
|
189
|
+
flowNode: {
|
|
190
|
+
flow: errorHandlingGotoTarget.flow,
|
|
191
|
+
node: errorHandlingGotoTarget.node,
|
|
192
|
+
},
|
|
193
|
+
injectedText: undefined,
|
|
194
|
+
injectedData: undefined,
|
|
195
|
+
executionMode: "continue",
|
|
196
|
+
absorbContext: false
|
|
197
|
+
}
|
|
198
|
+
};
|
|
199
|
+
await logic_1.GO_TO.function(gotoParams);
|
|
200
|
+
}
|
|
201
|
+
else {
|
|
202
|
+
throw new errors_1.InternalServerError(errorDetails.message, { traceId });
|
|
203
|
+
}
|
|
204
|
+
}
|
|
205
|
+
else if (thinkType === "intent") {
|
|
99
206
|
api.thinkV2(`cIntent:${intent}`, null);
|
|
100
207
|
}
|
|
101
208
|
else if (text || data) {
|
|
@@ -8,7 +8,7 @@ exports.CLEAN_TEXT = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
8
8
|
type: "cleanText",
|
|
9
9
|
defaultLabel: "Clean Text",
|
|
10
10
|
summary: "UI__NODE_EDITOR__NLU__CLEAN_TEXT__DEFAULT_SUMMARY",
|
|
11
|
-
tags: ["nlu", "clean", "cleaning", "parser", "parsing"],
|
|
11
|
+
tags: ["ai", "nlu", "clean", "cleaning", "parser", "parsing"],
|
|
12
12
|
preview: {
|
|
13
13
|
key: "textToClean",
|
|
14
14
|
type: "text"
|
|
@@ -127,7 +127,7 @@ exports.EXECUTE_COGNIGY_NLU = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
127
127
|
{ type: "field", key: "inputKey" },
|
|
128
128
|
{ type: "section", key: "advanced" }
|
|
129
129
|
],
|
|
130
|
-
tags: ["nlu"],
|
|
130
|
+
tags: ["ai", "nlu"],
|
|
131
131
|
function: async ({ cognigy, config }) => {
|
|
132
132
|
const { api, input } = cognigy;
|
|
133
133
|
const { text, data, mode, contextKey, inputKey, parseIntents, parseSlots, parseSystemSlots, findType, processDefaultReply } = config;
|
|
@@ -129,7 +129,7 @@ exports.GENERATIVE_SLOT_FILLER = (0, createNodeDescriptor_1.createNodeDescriptor
|
|
|
129
129
|
{ type: "section", key: "storage" },
|
|
130
130
|
],
|
|
131
131
|
appearance: {},
|
|
132
|
-
tags: ["nlu"],
|
|
132
|
+
tags: ["ai", "nlu"],
|
|
133
133
|
function: async ({ cognigy, config, childConfigs, nodeId, organisationId }) => {
|
|
134
134
|
var _a;
|
|
135
135
|
const { api, lastConversationEntries } = cognigy;
|
|
@@ -142,7 +142,6 @@ const writeLLMDebugLogs = async (label, prompt, response, debugLogTokenCount, de
|
|
|
142
142
|
return;
|
|
143
143
|
}
|
|
144
144
|
// stringify the response if it is an object
|
|
145
|
-
const responseOutput = typeof response === "object" ? JSON.stringify(response.result || response) : response;
|
|
146
145
|
const responseOutputFormatted = typeof response === "object" ? JSON.stringify(response.result || response, null, 4) : response;
|
|
147
146
|
// debug logs are only processed for the interaction panel
|
|
148
147
|
if (debugLogRequestAndCompletion) {
|
|
@@ -151,11 +150,11 @@ const writeLLMDebugLogs = async (label, prompt, response, debugLogTokenCount, de
|
|
|
151
150
|
let completionTokenMessage = "";
|
|
152
151
|
if (debugLogTokenCount) {
|
|
153
152
|
if (prompt) {
|
|
154
|
-
const requestTokens = (
|
|
153
|
+
const requestTokens = (_a = response === null || response === void 0 ? void 0 : response.tokenUsage) === null || _a === void 0 ? void 0 : _a.inputTokens;
|
|
155
154
|
requestTokenMessage = ` (${requestTokens} Tokens)`;
|
|
156
155
|
}
|
|
157
156
|
if (response) {
|
|
158
|
-
const completionTokens = (
|
|
157
|
+
const completionTokens = (_b = response === null || response === void 0 ? void 0 : response.tokenUsage) === null || _b === void 0 ? void 0 : _b.outputTokens;
|
|
159
158
|
completionTokenMessage = ` (${completionTokens} Tokens)`;
|
|
160
159
|
}
|
|
161
160
|
}
|
|
@@ -167,8 +166,8 @@ const writeLLMDebugLogs = async (label, prompt, response, debugLogTokenCount, de
|
|
|
167
166
|
try {
|
|
168
167
|
let requestTokens = 0;
|
|
169
168
|
let completionTokens = 0;
|
|
170
|
-
requestTokens = (
|
|
171
|
-
completionTokens = (
|
|
169
|
+
requestTokens = (_c = response.tokenUsage) === null || _c === void 0 ? void 0 : _c.inputTokens;
|
|
170
|
+
completionTokens = (_d = response.tokenUsage) === null || _d === void 0 ? void 0 : _d.outputTokens;
|
|
172
171
|
const requestTokenMessage = requestTokens || "unknown";
|
|
173
172
|
const completionTokenMessage = completionTokens || "unknown";
|
|
174
173
|
const totalTokens = (requestTokens + completionTokens) || "unknown";
|
|
@@ -11,7 +11,7 @@ exports.MATCH_PATTERN = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
11
11
|
type: "text"
|
|
12
12
|
},
|
|
13
13
|
summary: "UI__NODE_EDITOR__NLU__MATCH_PATTERN__SUMMARY",
|
|
14
|
-
tags: ["nlu", "pattern", "patterns"],
|
|
14
|
+
tags: ["ai", "nlu", "pattern", "patterns"],
|
|
15
15
|
fields: [
|
|
16
16
|
{
|
|
17
17
|
key: "patterns",
|
|
@@ -36,7 +36,7 @@ exports.REGEX_SLOT_FILLER = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
36
36
|
label: "UI__NODE_EDITOR__NLU__REGEX_SLOT_FILLER__FIELDS__SLOT__LABEL"
|
|
37
37
|
},
|
|
38
38
|
],
|
|
39
|
-
tags: ["nlu", "regular", "expression", "slot"],
|
|
39
|
+
tags: ["ai", "nlu", "regular", "expression", "slot"],
|
|
40
40
|
function: async (regexSlotFillerParams) => {
|
|
41
41
|
const { cognigy, config } = regexSlotFillerParams;
|
|
42
42
|
const { api } = cognigy;
|
|
@@ -379,7 +379,7 @@ exports.GPT_CONVERSATION = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
379
379
|
{ type: "section", key: "advanced" },
|
|
380
380
|
{ type: "section", key: "timeouts" }
|
|
381
381
|
],
|
|
382
|
-
tags: ["
|
|
382
|
+
tags: ["ai"],
|
|
383
383
|
function: async ({ cognigy, config }) => {
|
|
384
384
|
const { api, context, input } = cognigy;
|
|
385
385
|
const { debug, timeoutMessage } = config;
|
|
@@ -112,7 +112,7 @@ exports.GPT_PROMPT = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
112
112
|
label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__MAX_TOKENS__LABEL",
|
|
113
113
|
type: "slider",
|
|
114
114
|
description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__MAX_TOKENS__DESCRIPTION",
|
|
115
|
-
defaultValue:
|
|
115
|
+
defaultValue: 1000,
|
|
116
116
|
params: {
|
|
117
117
|
min: 1,
|
|
118
118
|
max: 4000,
|
|
@@ -195,7 +195,7 @@ exports.GPT_PROMPT = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
195
195
|
key: "immediateOutput",
|
|
196
196
|
label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__IMMEDIATEOUTPUT__LABEL",
|
|
197
197
|
type: "toggle",
|
|
198
|
-
defaultValue:
|
|
198
|
+
defaultValue: true,
|
|
199
199
|
condition: {
|
|
200
200
|
or: [
|
|
201
201
|
{
|
|
@@ -407,6 +407,13 @@ exports.GPT_PROMPT = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
407
407
|
type: "json",
|
|
408
408
|
defaultValue: {}
|
|
409
409
|
},
|
|
410
|
+
{
|
|
411
|
+
key: "logErrorToSystem",
|
|
412
|
+
label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__LOG_ERROR_TO_SYSTEM__LABEL",
|
|
413
|
+
description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__LOG_ERROR_TO_SYSTEM__DESCRIPTION",
|
|
414
|
+
type: "toggle",
|
|
415
|
+
defaultValue: false,
|
|
416
|
+
},
|
|
410
417
|
{
|
|
411
418
|
key: "errorHandling",
|
|
412
419
|
type: "select",
|
|
@@ -494,6 +501,7 @@ exports.GPT_PROMPT = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
494
501
|
label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__SECTIONS__ERROR_HANDLING__LABEL",
|
|
495
502
|
defaultCollapsed: true,
|
|
496
503
|
fields: [
|
|
504
|
+
"logErrorToSystem",
|
|
497
505
|
"errorHandling",
|
|
498
506
|
"errorMessage",
|
|
499
507
|
"errorHandlingGotoTarget",
|
|
@@ -531,12 +539,12 @@ exports.GPT_PROMPT = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
531
539
|
{ type: "section", key: "debugging" },
|
|
532
540
|
],
|
|
533
541
|
appearance: {},
|
|
534
|
-
tags: ["
|
|
542
|
+
tags: ["ai", "llm", "gpt", "generative ai", "openai", "azure", "prompt"],
|
|
535
543
|
function: async ({ cognigy, config, nodeId }) => {
|
|
536
544
|
var _a, _b, _c;
|
|
537
545
|
const { api, input } = cognigy;
|
|
538
546
|
const { temperature, maxTokens, topP, presencePenalty, frequencyPenalty, useStop, stop, storeLocation, contextKey, inputKey, timeout, streamStopTokens, streamStopTokenOverrides, debugLogTokenCount, debugLogRequestAndCompletion, llmProviderReferenceId, useChatMode, chatTranscriptSteps, responseFormat, streamStoreCopyInInput, detailedResults, seed, immediateOutput, customModelOptions, customRequestOptions, errorHandling = "continue", // default behavior for LLM Prompt node was, continue its execution even though an error occurred (deviating it from the SEO node) & do not output an error message on UI explicitly. However, error is always stored in the input or context object. We can use an extra "say" node to output it.
|
|
539
|
-
errorHandlingGotoTarget, errorMessage, } = config;
|
|
547
|
+
errorHandlingGotoTarget, errorMessage, logErrorToSystem, } = config;
|
|
540
548
|
let prompt = config.prompt;
|
|
541
549
|
const { traceId } = input;
|
|
542
550
|
// check if custom variables are used and if they have a length modifier
|
|
@@ -636,7 +644,8 @@ exports.GPT_PROMPT = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
636
644
|
},
|
|
637
645
|
streamStopTokens,
|
|
638
646
|
streamStopTokenOverrides,
|
|
639
|
-
|
|
647
|
+
// set to true in order to get token usage
|
|
648
|
+
detailedResults: true,
|
|
640
649
|
seed: Number(seed) ? Number(seed) : undefined,
|
|
641
650
|
customModelOptions,
|
|
642
651
|
customRequestOptions
|
|
@@ -659,35 +668,17 @@ exports.GPT_PROMPT = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
659
668
|
}
|
|
660
669
|
const response = await api.runGenerativeAIPrompt(data, "gptPromptNode");
|
|
661
670
|
const isFollowSessionActive = api.getMetadata().isFollowSessionActive;
|
|
662
|
-
if (detailedResults) {
|
|
663
|
-
if ((response === null || response === void 0 ? void 0 : response.usage) && response.usage.promptTokens && response.usage.completionTokens) {
|
|
664
|
-
// the api has usage results, but in Azure format - reformat to OpenAI format
|
|
665
|
-
const u = response.usage;
|
|
666
|
-
response.usage = {
|
|
667
|
-
prompt_tokens: u.promptTokens,
|
|
668
|
-
completion_tokens: u.completionTokens,
|
|
669
|
-
total_tokens: u.promptTokens + u.completionTokens,
|
|
670
|
-
calculation_method: "api"
|
|
671
|
-
};
|
|
672
|
-
}
|
|
673
|
-
else if (!response.usage) {
|
|
674
|
-
// if the api didn't return native usage results, compute them
|
|
675
|
-
const promptContent = (response.messages) ? JSON.stringify(response.messages) : prompt;
|
|
676
|
-
delete response.messages;
|
|
677
|
-
const prompt_tokens = await api.countGPTTokens(promptContent);
|
|
678
|
-
const completion_tokens = await api.countGPTTokens((typeof response.result === "object") ? JSON.stringify(response.result) : response.result);
|
|
679
|
-
response.usage = {
|
|
680
|
-
prompt_tokens,
|
|
681
|
-
completion_tokens,
|
|
682
|
-
total_tokens: prompt_tokens + completion_tokens,
|
|
683
|
-
calculation_method: "estimate"
|
|
684
|
-
};
|
|
685
|
-
}
|
|
686
|
-
}
|
|
687
671
|
// if we're in adminconsole or following a session, process debugging options
|
|
688
672
|
(input.endpointType === "adminconsole" || isFollowSessionActive) && (0, prompt_1.writeLLMDebugLogs)("LLM Prompt", debugPrompt, response, debugLogTokenCount, debugLogRequestAndCompletion, cognigy);
|
|
673
|
+
let responseToStore;
|
|
674
|
+
if (detailedResults) {
|
|
675
|
+
responseToStore = response;
|
|
676
|
+
}
|
|
677
|
+
else {
|
|
678
|
+
responseToStore = response.result;
|
|
679
|
+
}
|
|
689
680
|
if (storeLocation === "context") {
|
|
690
|
-
api.addToContext(contextKey,
|
|
681
|
+
api.addToContext(contextKey, responseToStore, "simple");
|
|
691
682
|
// output result immediately if toggle is set
|
|
692
683
|
if (immediateOutput) {
|
|
693
684
|
const resultToOutput = typeof ((response === null || response === void 0 ? void 0 : response.result) || response) === "object" ? JSON.stringify((response === null || response === void 0 ? void 0 : response.result) || response, undefined, 2) : (response === null || response === void 0 ? void 0 : response.result) || response;
|
|
@@ -696,7 +687,7 @@ exports.GPT_PROMPT = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
696
687
|
}
|
|
697
688
|
else if (storeLocation === "input" || (storeLocation === "stream" && streamStoreCopyInInput)) {
|
|
698
689
|
// @ts-ignore
|
|
699
|
-
api.addToInput(inputKey,
|
|
690
|
+
api.addToInput(inputKey, responseToStore);
|
|
700
691
|
// output result immediately if toggle is set and we're storing into input
|
|
701
692
|
// this means we don't output the result again if we streamed
|
|
702
693
|
if (storeLocation === "input" && immediateOutput) {
|
|
@@ -718,6 +709,9 @@ exports.GPT_PROMPT = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
718
709
|
requestId: (_c = error.meta) === null || _c === void 0 ? void 0 : _c.requestId
|
|
719
710
|
};
|
|
720
711
|
}
|
|
712
|
+
if (logErrorToSystem) {
|
|
713
|
+
api.log("error", JSON.stringify(errorDetailsBase));
|
|
714
|
+
}
|
|
721
715
|
api.logDebugError(errorDetailsBase, "UI__DEBUG_MODE__LLM_PROMPT__ERROR");
|
|
722
716
|
await handleServiceError(errorDetails);
|
|
723
717
|
return;
|
|
@@ -201,7 +201,7 @@ exports.LLM_ENTITY_EXTRACT = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
201
201
|
{ type: "section", key: "debugging" }
|
|
202
202
|
],
|
|
203
203
|
appearance: {},
|
|
204
|
-
tags: ["
|
|
204
|
+
tags: ["ai", "llm", "gpt", "generative ai", "openai", "azure", "prompt", "entity", "extract"],
|
|
205
205
|
function: async ({ cognigy, config }) => {
|
|
206
206
|
var _a, _b;
|
|
207
207
|
const { api, input } = cognigy;
|
|
@@ -0,0 +1,92 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/* Custom modules */
|
|
3
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
4
|
+
exports.AI_AGENT_HANDOVER = void 0;
|
|
5
|
+
/* Interfaces */
|
|
6
|
+
const createNodeDescriptor_1 = require("../../../createNodeDescriptor");
|
|
7
|
+
exports.AI_AGENT_HANDOVER = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
8
|
+
type: "aiAgentHandover",
|
|
9
|
+
defaultLabel: "AI Agent Handover",
|
|
10
|
+
summary: "UI__NODE_EDITOR__SERVICE__AI_AGENT_HANDOVER__SUMMARY",
|
|
11
|
+
appearance: {
|
|
12
|
+
showIcon: false,
|
|
13
|
+
color: "#7F199B",
|
|
14
|
+
},
|
|
15
|
+
behavior: {
|
|
16
|
+
stopping: true
|
|
17
|
+
},
|
|
18
|
+
fields: [
|
|
19
|
+
{
|
|
20
|
+
key: "flowNode",
|
|
21
|
+
type: "flowNode",
|
|
22
|
+
label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_HANDOVER__FIELDS__FLOW_NODE__LABEL",
|
|
23
|
+
params: {
|
|
24
|
+
required: true
|
|
25
|
+
}
|
|
26
|
+
},
|
|
27
|
+
{
|
|
28
|
+
key: "parseIntents",
|
|
29
|
+
type: "toggle",
|
|
30
|
+
label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_HANDOVER__FIELDS__PARSE_INTENTS__LABEL",
|
|
31
|
+
description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_HANDOVER__FIELDS__PARSE_INTENTS__DESCRIPTION",
|
|
32
|
+
defaultValue: false,
|
|
33
|
+
},
|
|
34
|
+
{
|
|
35
|
+
key: "parseKeyphrases",
|
|
36
|
+
type: "toggle",
|
|
37
|
+
label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_HANDOVER__FIELDS__PARSE_KEYPHRASES__LABEL",
|
|
38
|
+
description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_HANDOVER__FIELDS__PARSE_KEYPHRASES__DESCRIPTION",
|
|
39
|
+
defaultValue: false,
|
|
40
|
+
},
|
|
41
|
+
],
|
|
42
|
+
sections: [
|
|
43
|
+
{
|
|
44
|
+
key: "advanced",
|
|
45
|
+
label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_HANDOVER__SECTIONS__ADVANCED__LABEL",
|
|
46
|
+
defaultCollapsed: true,
|
|
47
|
+
fields: [
|
|
48
|
+
"parseIntents",
|
|
49
|
+
"parseKeyphrases",
|
|
50
|
+
]
|
|
51
|
+
}
|
|
52
|
+
],
|
|
53
|
+
form: [
|
|
54
|
+
{ type: "field", key: "flowNode" },
|
|
55
|
+
{ type: "section", key: "advanced" }
|
|
56
|
+
],
|
|
57
|
+
preview: {
|
|
58
|
+
key: "flowNode",
|
|
59
|
+
type: "resource",
|
|
60
|
+
},
|
|
61
|
+
tags: ["ai", "aiAgent"],
|
|
62
|
+
function: async ({ cognigy, config, nodeId: thisNodeId }) => {
|
|
63
|
+
var _a, _b;
|
|
64
|
+
const { flowNode: { flow: flowId, node: nodeId }, parseIntents, parseKeyphrases } = config;
|
|
65
|
+
const { api } = cognigy;
|
|
66
|
+
if (!flowId) {
|
|
67
|
+
throw new Error("flowId is required");
|
|
68
|
+
}
|
|
69
|
+
if (!nodeId) {
|
|
70
|
+
throw new Error("nodeId is required");
|
|
71
|
+
}
|
|
72
|
+
if (!((_a = api.checkThink) === null || _a === void 0 ? void 0 : _a.call(api, thisNodeId))) {
|
|
73
|
+
api.resetNextNodes();
|
|
74
|
+
(_b = api.setThinkMarker) === null || _b === void 0 ? void 0 : _b.call(api, config.flowNode.flow);
|
|
75
|
+
// If Execution is to continue, execute Flow
|
|
76
|
+
await api.executeFlow({
|
|
77
|
+
flowNode: {
|
|
78
|
+
flow: flowId,
|
|
79
|
+
node: nodeId,
|
|
80
|
+
isGoto: true,
|
|
81
|
+
},
|
|
82
|
+
absorbContext: true,
|
|
83
|
+
parseIntents,
|
|
84
|
+
parseKeyphrases
|
|
85
|
+
});
|
|
86
|
+
}
|
|
87
|
+
else {
|
|
88
|
+
throw new Error("Infinite Loop Detected");
|
|
89
|
+
}
|
|
90
|
+
},
|
|
91
|
+
});
|
|
92
|
+
//# sourceMappingURL=aiAgentHandover.js.map
|