@cognigy/rest-api-client 2025.15.1 → 2025.17.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +15 -0
- package/build/apigroups/MetricsAPIGroup_2_0.js +10 -0
- package/build/apigroups/ResourcesAPIGroup_2_0.js +6 -0
- package/build/apigroups/SimulationAPIGroup_2_0.js +4 -0
- package/build/shared/charts/descriptors/data/debugMessage.js +13 -3
- package/build/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +48 -49
- package/build/shared/charts/descriptors/logic/if/if.js +2 -2
- package/build/shared/charts/descriptors/logic/switch/switch.js +30 -21
- package/build/shared/charts/descriptors/message/question/question.js +3 -3
- package/build/shared/charts/descriptors/message/question/utils/validateQuestionAnswer.js +2 -2
- package/build/shared/charts/descriptors/nlu/generativeSlotFiller/prompt.js +31 -2
- package/build/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +40 -24
- package/build/shared/charts/descriptors/service/aiAgent/helpers/createToolDefinitions.js +4 -4
- package/build/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +36 -21
- package/build/shared/charts/descriptors/transcripts/addTranscriptStep.js +3 -3
- package/build/shared/charts/descriptors/transcripts/getTranscript.js +23 -3
- package/build/shared/charts/descriptors/voice/mappers/setSessionConfig.mapper.js +3 -0
- package/build/shared/generativeAI/getPrompt.js +75 -0
- package/build/shared/generativeAI/utils/generativeAIPrompts.js +613 -0
- package/build/shared/generativeAI/utils/prompts/contextAwareUserQueryRephrasing.js +84 -0
- package/build/shared/interfaces/generativeAI/IGenerativeAIModels.js +2 -0
- package/build/shared/interfaces/messageAPI/handover.js +6 -0
- package/build/shared/interfaces/resources/IGetAiAgentJobsTools.js +3 -0
- package/build/shared/interfaces/resources/IKnowledgeDescriptor.js +38 -5
- package/build/shared/interfaces/resources/ILargeLanguageModel.js +16 -1
- package/build/shared/interfaces/restAPI/metrics/callCounter/v3.0/ICallCounterPreAggregatedValue_3_0.js +3 -0
- package/build/shared/interfaces/restAPI/metrics/callCounter/v3.0/IGetCallCounterOrganisationRest_3_0.js +3 -0
- package/build/shared/interfaces/restAPI/metrics/callCounter/v3.0/IGetCallCounterRest_3_0.js +3 -0
- package/build/shared/interfaces/restAPI/metrics/callCounter/v3.0/index.js +3 -0
- package/build/shared/interfaces/restAPI/resources/aiAgent/v2.0/IAiAgentJobNodeWithTools_2_0.js +65 -0
- package/build/shared/interfaces/restAPI/resources/aiAgent/v2.0/IGetAiAgentJobAndToolsRest_2_0 .js +4 -0
- package/build/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/IIndexKnowledgeDescriptorsRest_2_0.js +3 -0
- package/build/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/extension/IRunKnowledgeExtensionRest_2_0.js +3 -0
- package/build/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/index.js +16 -0
- package/build/shared/interfaces/restAPI/simulation/simulationRunBatch/IStopSimulationRunBatchRest_2_0.js +3 -0
- package/build/shared/interfaces/security/ICallCounterPreAggregatedValue.js +3 -0
- package/build/test.js +39 -0
- package/dist/esm/apigroups/MetricsAPIGroup_2_0.js +10 -0
- package/dist/esm/apigroups/ResourcesAPIGroup_2_0.js +6 -0
- package/dist/esm/apigroups/SimulationAPIGroup_2_0.js +4 -0
- package/dist/esm/shared/charts/descriptors/data/debugMessage.js +13 -3
- package/dist/esm/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +48 -50
- package/dist/esm/shared/charts/descriptors/logic/if/if.js +2 -2
- package/dist/esm/shared/charts/descriptors/logic/switch/switch.js +30 -21
- package/dist/esm/shared/charts/descriptors/message/question/question.js +3 -3
- package/dist/esm/shared/charts/descriptors/message/question/utils/validateQuestionAnswer.js +4 -3
- package/dist/esm/shared/charts/descriptors/nlu/generativeSlotFiller/prompt.js +29 -1
- package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +39 -23
- package/dist/esm/shared/charts/descriptors/service/aiAgent/helpers/createToolDefinitions.js +4 -4
- package/dist/esm/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +43 -28
- package/dist/esm/shared/charts/descriptors/transcripts/addTranscriptStep.js +3 -3
- package/dist/esm/shared/charts/descriptors/transcripts/getTranscript.js +23 -3
- package/dist/esm/shared/charts/descriptors/voice/mappers/setSessionConfig.mapper.js +3 -0
- package/dist/esm/shared/generativeAI/getPrompt.js +68 -0
- package/dist/esm/shared/generativeAI/utils/generativeAIPrompts.js +610 -0
- package/dist/esm/shared/generativeAI/utils/prompts/contextAwareUserQueryRephrasing.js +81 -0
- package/dist/esm/shared/interfaces/generativeAI/IGenerativeAIModels.js +2 -0
- package/dist/esm/shared/interfaces/messageAPI/handover.js +6 -0
- package/dist/esm/shared/interfaces/resources/IGetAiAgentJobsTools.js +2 -0
- package/dist/esm/shared/interfaces/resources/IKnowledgeDescriptor.js +37 -5
- package/dist/esm/shared/interfaces/resources/ILargeLanguageModel.js +14 -0
- package/dist/esm/shared/interfaces/restAPI/management/authentication/ICreateJWTToken.js +1 -0
- package/dist/esm/shared/interfaces/restAPI/metrics/callCounter/v3.0/ICallCounterPreAggregatedValue_3_0.js +2 -0
- package/dist/esm/shared/interfaces/restAPI/metrics/callCounter/v3.0/IGetCallCounterOrganisationRest_3_0.js +2 -0
- package/dist/esm/shared/interfaces/restAPI/metrics/callCounter/v3.0/IGetCallCounterRest_3_0.js +2 -0
- package/dist/esm/shared/interfaces/restAPI/metrics/callCounter/v3.0/index.js +2 -0
- package/dist/esm/shared/interfaces/restAPI/resources/aiAgent/v2.0/IAiAgentJobNodeWithTools_2_0.js +65 -0
- package/dist/esm/shared/interfaces/restAPI/resources/aiAgent/v2.0/IGetAiAgentJobAndToolsRest_2_0 .js +3 -0
- package/dist/esm/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/IIndexKnowledgeDescriptorsRest_2_0.js +2 -0
- package/dist/esm/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/extension/IRunKnowledgeExtensionRest_2_0.js +2 -0
- package/dist/esm/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/index.js +2 -1
- package/dist/esm/shared/interfaces/restAPI/simulation/simulationRunBatch/IStopSimulationRunBatchRest_2_0.js +2 -0
- package/dist/esm/shared/interfaces/security/ICallCounterPreAggregatedValue.js +2 -0
- package/dist/esm/test.js +39 -0
- package/package.json +1 -1
- package/types/index.d.ts +299 -42
|
@@ -11,7 +11,7 @@ var __rest = (this && this.__rest) || function (s, e) {
|
|
|
11
11
|
return t;
|
|
12
12
|
};
|
|
13
13
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
14
|
-
exports.AI_AGENT_JOB = void 0;
|
|
14
|
+
exports.AI_AGENT_JOB = exports.AI_AGENT_TOOLS_WHITELIST = void 0;
|
|
15
15
|
/* Custom modules */
|
|
16
16
|
const createNodeDescriptor_1 = require("../../../createNodeDescriptor");
|
|
17
17
|
const crypto_1 = require("crypto");
|
|
@@ -23,6 +23,7 @@ const generateSearchPrompt_1 = require("./helpers/generateSearchPrompt");
|
|
|
23
23
|
const getUserMemory_1 = require("./helpers/getUserMemory");
|
|
24
24
|
const createToolDefinitions_1 = require("./helpers/createToolDefinitions");
|
|
25
25
|
const transcripts_1 = require("../../../../interfaces/transcripts/transcripts");
|
|
26
|
+
exports.AI_AGENT_TOOLS_WHITELIST = ["aiAgentJobDefault", "aiAgentJobTool", "aiAgentJobMCPTool"];
|
|
26
27
|
exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
27
28
|
type: "aiAgentJob",
|
|
28
29
|
defaultLabel: "AI Agent",
|
|
@@ -31,7 +32,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
31
32
|
collapsable: true,
|
|
32
33
|
placement: {
|
|
33
34
|
children: {
|
|
34
|
-
whitelist:
|
|
35
|
+
whitelist: exports.AI_AGENT_TOOLS_WHITELIST,
|
|
35
36
|
},
|
|
36
37
|
},
|
|
37
38
|
},
|
|
@@ -420,6 +421,13 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
420
421
|
step: 0.1
|
|
421
422
|
}
|
|
422
423
|
},
|
|
424
|
+
{
|
|
425
|
+
key: "useTextAlternativeForLLM",
|
|
426
|
+
label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__INCLUDE_ALL_OUTPUT_TYPES__LABEL",
|
|
427
|
+
type: "toggle",
|
|
428
|
+
description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__INCLUDE_ALL_OUTPUT_TYPES__DESCRIPTION",
|
|
429
|
+
defaultValue: true,
|
|
430
|
+
},
|
|
423
431
|
{
|
|
424
432
|
key: "logErrorToSystem",
|
|
425
433
|
label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOG_TO_SYSTEM__LABEL",
|
|
@@ -815,6 +823,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
815
823
|
"timeoutInMs",
|
|
816
824
|
"maxTokens",
|
|
817
825
|
"temperature",
|
|
826
|
+
"useTextAlternativeForLLM",
|
|
818
827
|
],
|
|
819
828
|
},
|
|
820
829
|
{
|
|
@@ -860,9 +869,9 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
860
869
|
],
|
|
861
870
|
tags: ["ai", "aiAgent"],
|
|
862
871
|
function: async ({ cognigy, config, childConfigs, nodeId }) => {
|
|
863
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21;
|
|
872
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23;
|
|
864
873
|
const { api, context, input, profile, flowReferenceId } = cognigy;
|
|
865
|
-
const { aiAgent, llmProviderReferenceId, name: jobName, description: jobDescription, instructions: jobInstructions, outputImmediately, toolChoice, useStrict, memoryType, selectedProfileFields, memoryContextInjection, knowledgeSearchBehavior, knowledgeSearchTags, knowledgeSearchTagsFilterOp, knowledgeSearchAiAgentKnowledge, knowledgeSearchJobKnowledge, knowledgeSearchJobStore, knowledgeSearchGenerateSearchPrompt, knowledgeSearchTopK, timeoutInMs, maxTokens, temperature, logErrorToSystem, storeErrorInInput, errorHandling, errorHandlingGotoTarget, errorMessage, debugConfig, debugLogTokenCount, debugLogSystemPrompt, debugLogToolDefinitions, debugResult, debugLogLLMLatency, storeLocation, contextKey, inputKey, streamStoreCopyInInput, streamStopTokens, processImages, transcriptImageHandling, sessionParams } = config;
|
|
874
|
+
const { aiAgent, llmProviderReferenceId, name: jobName, description: jobDescription, instructions: jobInstructions, outputImmediately, toolChoice, useStrict, memoryType, selectedProfileFields, memoryContextInjection, knowledgeSearchBehavior, knowledgeSearchTags, knowledgeSearchTagsFilterOp, knowledgeSearchAiAgentKnowledge, knowledgeSearchJobKnowledge, knowledgeSearchJobStore, knowledgeSearchGenerateSearchPrompt, knowledgeSearchTopK, timeoutInMs, maxTokens, temperature, useTextAlternativeForLLM, logErrorToSystem, storeErrorInInput, errorHandling, errorHandlingGotoTarget, errorMessage, debugConfig, debugLogTokenCount, debugLogSystemPrompt, debugLogToolDefinitions, debugResult, debugLogLLMLatency, storeLocation, contextKey, inputKey, streamStoreCopyInInput, streamStopTokens, processImages, transcriptImageHandling, sessionParams } = config;
|
|
866
875
|
try {
|
|
867
876
|
if (!aiAgent) {
|
|
868
877
|
throw new Error("Could not resolve AI Agent reference in AI Agent Node");
|
|
@@ -910,7 +919,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
910
919
|
throw new Error(`[VG2] Error on AI Agent Job node. Error message: ${error.message}`);
|
|
911
920
|
}
|
|
912
921
|
}
|
|
913
|
-
const
|
|
922
|
+
const _24 = profile, { profileId, accepted_gdpr, prevent_data_collection, privacy_policy } = _24, cleanedProfile = __rest(_24, ["profileId", "accepted_gdpr", "prevent_data_collection", "privacy_policy"]);
|
|
914
923
|
const userMemory = (0, getUserMemory_1.getUserMemory)(memoryType, selectedProfileFields, aiAgent, cleanedProfile);
|
|
915
924
|
/**
|
|
916
925
|
* ----- Knowledge Search Section -----
|
|
@@ -1140,7 +1149,8 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
1140
1149
|
const transcript = await api.getTranscript({
|
|
1141
1150
|
limit: 50,
|
|
1142
1151
|
rolesWhiteList: [transcripts_1.TranscriptRole.USER, transcripts_1.TranscriptRole.ASSISTANT, transcripts_1.TranscriptRole.TOOL],
|
|
1143
|
-
excludeDataOnlyMessagesFilter: [transcripts_1.TranscriptRole.ASSISTANT]
|
|
1152
|
+
excludeDataOnlyMessagesFilter: [transcripts_1.TranscriptRole.ASSISTANT],
|
|
1153
|
+
useTextAlternativeForLLM,
|
|
1144
1154
|
});
|
|
1145
1155
|
// For knowledgeSearch "always", we enhance the user input with the knowledge search response data
|
|
1146
1156
|
if (knowledgeSearchBehavior === "always" &&
|
|
@@ -1225,14 +1235,20 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
1225
1235
|
const mainToolCall = llmResult.toolCalls[0];
|
|
1226
1236
|
let isMcpToolCall = false;
|
|
1227
1237
|
// Find the child node with the toolId of the tool call
|
|
1228
|
-
let toolChild =
|
|
1238
|
+
let toolChild = undefined;
|
|
1239
|
+
for (const child of childConfigs) {
|
|
1240
|
+
if (child.type === "aiAgentJobTool" && ((_5 = child.config) === null || _5 === void 0 ? void 0 : _5.toolId) && await api.parseCognigyScriptText((_6 = child.config) === null || _6 === void 0 ? void 0 : _6.toolId) === mainToolCall.function.name) {
|
|
1241
|
+
toolChild = child;
|
|
1242
|
+
break;
|
|
1243
|
+
}
|
|
1244
|
+
}
|
|
1229
1245
|
if (!toolChild && toolMap.has(mainToolCall.function.name)) {
|
|
1230
1246
|
// If the tool call is from an MCP tool, set the next node to the corresponding child node
|
|
1231
1247
|
toolChild = childConfigs.find(child => child.id === toolMap.get(mainToolCall.function.name));
|
|
1232
1248
|
isMcpToolCall = true;
|
|
1233
1249
|
}
|
|
1234
1250
|
if (mainToolCall.function.name !== "retrieve_knowledge" && toolChild === undefined) {
|
|
1235
|
-
(
|
|
1251
|
+
(_7 = api.logDebugError) === null || _7 === void 0 ? void 0 : _7.call(api, `UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__BODY <b>${mainToolCall.function.name}</b>`, "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__HEADER");
|
|
1236
1252
|
}
|
|
1237
1253
|
// Add last tool call to session state for loading it from Tool Answer Node
|
|
1238
1254
|
api.updateSessionStateValues({
|
|
@@ -1240,21 +1256,21 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
1240
1256
|
flow: flowReferenceId,
|
|
1241
1257
|
node: nodeId,
|
|
1242
1258
|
} }, (isMcpToolCall && {
|
|
1243
|
-
mcpServerUrl: (
|
|
1244
|
-
timeout: (
|
|
1259
|
+
mcpServerUrl: (_8 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _8 === void 0 ? void 0 : _8.mcpServerUrl,
|
|
1260
|
+
timeout: (_9 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _9 === void 0 ? void 0 : _9.timeout,
|
|
1245
1261
|
mcpToolNode: toolChild === null || toolChild === void 0 ? void 0 : toolChild.id,
|
|
1246
1262
|
})), { toolCall: mainToolCall }),
|
|
1247
1263
|
});
|
|
1248
1264
|
// if there are any parameters/arguments, add them to the input slots
|
|
1249
1265
|
if (mainToolCall.function.arguments) {
|
|
1250
|
-
input.aiAgent = Object.assign(Object.assign({}, input.aiAgent), { toolArgs: Object.assign(Object.assign({}, (
|
|
1266
|
+
input.aiAgent = Object.assign(Object.assign({}, input.aiAgent), { toolArgs: Object.assign(Object.assign({}, (_11 = (_10 = input.aiAgent) === null || _10 === void 0 ? void 0 : _10.toolArgs) !== null && _11 !== void 0 ? _11 : {}), mainToolCall.function.arguments) });
|
|
1251
1267
|
}
|
|
1252
1268
|
// Debug Message for Tool Calls, configured in the Tool Node
|
|
1253
|
-
if ((
|
|
1254
|
-
const toolId = isMcpToolCall ? mainToolCall.function.name : api.parseCognigyScriptText(toolChild.config.toolId);
|
|
1269
|
+
if ((_12 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _12 === void 0 ? void 0 : _12.debugMessage) {
|
|
1270
|
+
const toolId = isMcpToolCall ? mainToolCall.function.name : await api.parseCognigyScriptText(toolChild.config.toolId);
|
|
1255
1271
|
const messageLines = [`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER:</b> ${toolId}`];
|
|
1256
1272
|
// Arguments / Parameters Slots
|
|
1257
|
-
const slots = ((
|
|
1273
|
+
const slots = ((_13 = mainToolCall === null || mainToolCall === void 0 ? void 0 : mainToolCall.function) === null || _13 === void 0 ? void 0 : _13.arguments) && Object.keys(mainToolCall.function.arguments);
|
|
1258
1274
|
const hasSlots = slots && slots.length > 0;
|
|
1259
1275
|
messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__SLOTS</b>${hasSlots ? "" : " -"}`);
|
|
1260
1276
|
if (hasSlots) {
|
|
@@ -1269,7 +1285,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
1269
1285
|
messageLines.push(`- ${slot}: ${slotValueAsString}`);
|
|
1270
1286
|
});
|
|
1271
1287
|
}
|
|
1272
|
-
(
|
|
1288
|
+
(_14 = api.logDebugMessage) === null || _14 === void 0 ? void 0 : _14.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER");
|
|
1273
1289
|
}
|
|
1274
1290
|
if (toolChild) {
|
|
1275
1291
|
api.setNextNode(toolChild.id);
|
|
@@ -1294,11 +1310,11 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
1294
1310
|
}
|
|
1295
1311
|
// Optionally output the result immediately
|
|
1296
1312
|
if (llmResult.result && outputImmediately && !llmPromptOptions.stream) {
|
|
1297
|
-
await ((
|
|
1313
|
+
await ((_15 = api.output) === null || _15 === void 0 ? void 0 : _15.call(api, llmResult.result, {}));
|
|
1298
1314
|
}
|
|
1299
1315
|
else if (llmResult.finishReason && llmPromptOptions.stream) {
|
|
1300
1316
|
// send the finishReason as last output for a stream
|
|
1301
|
-
(
|
|
1317
|
+
(_16 = api.output) === null || _16 === void 0 ? void 0 : _16.call(api, "", {
|
|
1302
1318
|
_cognigy: {
|
|
1303
1319
|
_preventTranscript: true,
|
|
1304
1320
|
_messageId,
|
|
@@ -1321,7 +1337,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
1321
1337
|
}
|
|
1322
1338
|
// Add response to Cognigy Input/Context for further usage
|
|
1323
1339
|
if (storeLocation === "context") {
|
|
1324
|
-
(
|
|
1340
|
+
(_17 = api.addToContext) === null || _17 === void 0 ? void 0 : _17.call(api, contextKey, llmResult, "simple");
|
|
1325
1341
|
}
|
|
1326
1342
|
else if (storeLocation === "input") {
|
|
1327
1343
|
api.addToInput(inputKey, llmResult);
|
|
@@ -1334,14 +1350,14 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
1334
1350
|
const errorDetails = {
|
|
1335
1351
|
name: (error === null || error === void 0 ? void 0 : error.name) || "Error",
|
|
1336
1352
|
code: (error === null || error === void 0 ? void 0 : error.code) || (error === null || error === void 0 ? void 0 : error.httpStatusCode),
|
|
1337
|
-
message: (error === null || error === void 0 ? void 0 : error.message) || ((
|
|
1353
|
+
message: (error === null || error === void 0 ? void 0 : error.message) || ((_18 = error.originalErrorDetails) === null || _18 === void 0 ? void 0 : _18.message),
|
|
1338
1354
|
};
|
|
1339
|
-
(
|
|
1355
|
+
(_19 = api.emitEvent) === null || _19 === void 0 ? void 0 : _19.call(api, "nodeError", { nodeId, flowId: flowReferenceId, errorMessage: error });
|
|
1340
1356
|
if (logErrorToSystem) {
|
|
1341
|
-
(
|
|
1357
|
+
(_20 = api.log) === null || _20 === void 0 ? void 0 : _20.call(api, "error", JSON.stringify(errorDetails));
|
|
1342
1358
|
}
|
|
1343
1359
|
if (errorHandling !== "stop") {
|
|
1344
|
-
(
|
|
1360
|
+
(_21 = api.logDebugError) === null || _21 === void 0 ? void 0 : _21.call(api, errorDetails.message + (errorDetails.code ? ` (error code: ${errorDetails.code})` : ""), errorDetails.name);
|
|
1345
1361
|
}
|
|
1346
1362
|
if (storeErrorInInput) {
|
|
1347
1363
|
input.aiAgent = input.aiAgent || {};
|
|
@@ -1350,7 +1366,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
1350
1366
|
if (errorHandling === "continue") {
|
|
1351
1367
|
// output the timeout message
|
|
1352
1368
|
if (errorMessage) {
|
|
1353
|
-
await ((
|
|
1369
|
+
await ((_22 = api.output) === null || _22 === void 0 ? void 0 : _22.call(api, errorMessage, null));
|
|
1354
1370
|
}
|
|
1355
1371
|
// Set default node as next node
|
|
1356
1372
|
const defaultChild = childConfigs.find(child => child.type === "aiAgentJobDefault");
|
|
@@ -1362,7 +1378,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
1362
1378
|
if (!errorHandlingGotoTarget) {
|
|
1363
1379
|
throw new Error("GoTo Target is required");
|
|
1364
1380
|
}
|
|
1365
|
-
if (!((
|
|
1381
|
+
if (!((_23 = api.checkThink) === null || _23 === void 0 ? void 0 : _23.call(api, nodeId))) {
|
|
1366
1382
|
api.resetNextNodes();
|
|
1367
1383
|
await api.executeFlow({
|
|
1368
1384
|
flowNode: {
|
|
@@ -36,11 +36,11 @@ const createToolDefinitions = async (childConfigs, api, useStrict) => {
|
|
|
36
36
|
}
|
|
37
37
|
const toolId = child.config.toolId;
|
|
38
38
|
if ((child.type === "aiAgentJobTool" || child.type === "llmPromptTool") &&
|
|
39
|
-
(!child.config.condition || !!api.parseCognigyScriptCondition(child.config.condition))) {
|
|
39
|
+
(!child.config.condition || !!await api.parseCognigyScriptCondition(child.config.condition))) {
|
|
40
40
|
if (!toolId) {
|
|
41
41
|
throw new Error(`Tool ID is missing in Tool Node configuration.`);
|
|
42
42
|
}
|
|
43
|
-
const parsedToolId = api.parseCognigyScriptText(toolId);
|
|
43
|
+
const parsedToolId = await api.parseCognigyScriptText(toolId);
|
|
44
44
|
if (!(0, createSystemMessage_1.validateToolId)(parsedToolId)) {
|
|
45
45
|
throw new Error(`Tool ID ${parsedToolId} is not valid. Please use only alphanumeric characters, dashes and underscores.`);
|
|
46
46
|
}
|
|
@@ -53,7 +53,7 @@ const createToolDefinitions = async (childConfigs, api, useStrict) => {
|
|
|
53
53
|
type: "function",
|
|
54
54
|
function: {
|
|
55
55
|
name: parsedToolId,
|
|
56
|
-
description: api.parseCognigyScriptText(child.config.description),
|
|
56
|
+
description: await api.parseCognigyScriptText(child.config.description),
|
|
57
57
|
},
|
|
58
58
|
};
|
|
59
59
|
if (useStrict) {
|
|
@@ -65,7 +65,7 @@ const createToolDefinitions = async (childConfigs, api, useStrict) => {
|
|
|
65
65
|
tools.push(tool);
|
|
66
66
|
}
|
|
67
67
|
if ((child.type === "aiAgentJobMCPTool" || child.type === "llmPromptMCPTool") &&
|
|
68
|
-
(!child.config.condition || !!api.parseCognigyScriptCondition(child.config.condition))) {
|
|
68
|
+
(!child.config.condition || !!await api.parseCognigyScriptCondition(child.config.condition))) {
|
|
69
69
|
if (!child.config.mcpServerUrl) {
|
|
70
70
|
throw new Error(`MCP Server URL is missing in Tool Node configuration.`);
|
|
71
71
|
}
|
|
@@ -401,6 +401,13 @@ exports.LLM_PROMPT_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
401
401
|
]
|
|
402
402
|
}
|
|
403
403
|
},
|
|
404
|
+
{
|
|
405
|
+
key: "useTextAlternativeForLLM",
|
|
406
|
+
label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__INCLUDE_ALL_OUTPUT_TYPES__LABEL",
|
|
407
|
+
type: "toggle",
|
|
408
|
+
description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__INCLUDE_ALL_OUTPUT_TYPES__DESCRIPTION",
|
|
409
|
+
defaultValue: true,
|
|
410
|
+
},
|
|
404
411
|
{
|
|
405
412
|
key: "customModelOptions",
|
|
406
413
|
label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__CUSTOM_MODEL_OPTIONS__LABEL",
|
|
@@ -552,7 +559,8 @@ exports.LLM_PROMPT_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
552
559
|
"frequencyPenalty",
|
|
553
560
|
"useStop",
|
|
554
561
|
"stop",
|
|
555
|
-
"seed"
|
|
562
|
+
"seed",
|
|
563
|
+
"useTextAlternativeForLLM",
|
|
556
564
|
]
|
|
557
565
|
},
|
|
558
566
|
{
|
|
@@ -638,10 +646,10 @@ exports.LLM_PROMPT_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
638
646
|
},
|
|
639
647
|
tags: ["ai", "llm", "gpt", "generative ai", "openai", "azure", "prompt"],
|
|
640
648
|
function: async ({ cognigy, config, childConfigs, nodeId }) => {
|
|
641
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v;
|
|
649
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
|
|
642
650
|
const { api, input, flowReferenceId } = cognigy;
|
|
643
651
|
const { temperature, maxTokens, topP, presencePenalty, frequencyPenalty, useStop, stop, storeLocation, contextKey, inputKey, timeout, streamStopTokens, streamStopTokenOverrides, debugLogTokenCount, debugLogRequestAndCompletion, debugLogLLMLatency, debugLogToolDefinitions, llmProviderReferenceId, usePromptMode, chatTranscriptSteps, responseFormat, streamStoreCopyInInput, seed, immediateOutput, customModelOptions, customRequestOptions, errorHandling = "continue", // default behavior for LLM Prompt node was, continue its execution even though an error occurred (deviating it from the SEO node) & do not output an error message on UI explicitly. However, error is always stored in the input or context object. We can use an extra "say" node to output it.
|
|
644
|
-
errorHandlingGotoTarget, errorMessage, logErrorToSystem, processImages, transcriptImageHandling, toolChoice, useStrict } = config;
|
|
652
|
+
errorHandlingGotoTarget, errorMessage, useTextAlternativeForLLM, logErrorToSystem, processImages, transcriptImageHandling, toolChoice, useStrict } = config;
|
|
645
653
|
let prompt = config.prompt || "";
|
|
646
654
|
const { traceId } = input;
|
|
647
655
|
// check if custom variables are used and if they have a length modifier
|
|
@@ -786,7 +794,8 @@ exports.LLM_PROMPT_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
786
794
|
const transcript = await api.getTranscript({
|
|
787
795
|
limit: chatTranscriptSteps || 50,
|
|
788
796
|
rolesWhiteList: [transcripts_1.TranscriptRole.USER, transcripts_1.TranscriptRole.ASSISTANT, transcripts_1.TranscriptRole.TOOL],
|
|
789
|
-
excludeDataOnlyMessagesFilter: [transcripts_1.TranscriptRole.ASSISTANT]
|
|
797
|
+
excludeDataOnlyMessagesFilter: [transcripts_1.TranscriptRole.ASSISTANT],
|
|
798
|
+
useTextAlternativeForLLM,
|
|
790
799
|
});
|
|
791
800
|
llmPromptOptions["transcript"] = transcript;
|
|
792
801
|
llmPromptOptions["chat"] = [{
|
|
@@ -830,14 +839,20 @@ exports.LLM_PROMPT_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
830
839
|
const mainToolCall = llmResult.toolCalls[0];
|
|
831
840
|
let isMcpToolCall = false;
|
|
832
841
|
// Find the child node with the toolId of the tool call
|
|
833
|
-
let toolChild =
|
|
842
|
+
let toolChild = undefined;
|
|
843
|
+
for (const child of childConfigs) {
|
|
844
|
+
if (child.type === "llmPromptTool" && ((_e = child.config) === null || _e === void 0 ? void 0 : _e.toolId) && await api.parseCognigyScriptText((_f = child.config) === null || _f === void 0 ? void 0 : _f.toolId) === mainToolCall.function.name) {
|
|
845
|
+
toolChild = child;
|
|
846
|
+
break;
|
|
847
|
+
}
|
|
848
|
+
}
|
|
834
849
|
if (!toolChild && toolMap.has(mainToolCall.function.name)) {
|
|
835
850
|
// If the tool call is from an MCP tool, set the next node to the corresponding child node
|
|
836
851
|
toolChild = childConfigs.find(child => child.id === toolMap.get(mainToolCall.function.name));
|
|
837
852
|
isMcpToolCall = true;
|
|
838
853
|
}
|
|
839
854
|
if (mainToolCall.function.name !== "retrieve_knowledge" && toolChild === undefined) {
|
|
840
|
-
(
|
|
855
|
+
(_g = api.logDebugError) === null || _g === void 0 ? void 0 : _g.call(api, `UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__BODY <b>${mainToolCall.function.name}</b>`, "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__HEADER");
|
|
841
856
|
}
|
|
842
857
|
// Add last tool call to session state for loading it from Tool Answer Node
|
|
843
858
|
api.updateSessionStateValues({
|
|
@@ -845,20 +860,20 @@ exports.LLM_PROMPT_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
845
860
|
flow: flowReferenceId,
|
|
846
861
|
node: nodeId,
|
|
847
862
|
} }, (isMcpToolCall && {
|
|
848
|
-
mcpServerUrl: (
|
|
849
|
-
timeout: (
|
|
863
|
+
mcpServerUrl: (_h = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _h === void 0 ? void 0 : _h.mcpServerUrl,
|
|
864
|
+
timeout: (_j = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _j === void 0 ? void 0 : _j.timeout,
|
|
850
865
|
mcpToolNode: toolChild === null || toolChild === void 0 ? void 0 : toolChild.id,
|
|
851
866
|
})), { toolCall: mainToolCall }),
|
|
852
867
|
});
|
|
853
868
|
// if there are any parameters/arguments, add them to the input slots
|
|
854
869
|
if (mainToolCall.function.arguments) {
|
|
855
|
-
input.llmPrompt = Object.assign(Object.assign({}, input.llmPrompt), { toolArgs: Object.assign(Object.assign({}, (
|
|
870
|
+
input.llmPrompt = Object.assign(Object.assign({}, input.llmPrompt), { toolArgs: Object.assign(Object.assign({}, (_l = (_k = input.llmPrompt) === null || _k === void 0 ? void 0 : _k.toolArgs) !== null && _l !== void 0 ? _l : {}), mainToolCall.function.arguments) });
|
|
856
871
|
}
|
|
857
872
|
// Debug Message for Tool Calls, configured in the Tool Node
|
|
858
|
-
if ((
|
|
859
|
-
const messageLines = [`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER:</b> ${api.parseCognigyScriptText(toolChild.config.toolId)}`];
|
|
873
|
+
if ((_m = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _m === void 0 ? void 0 : _m.debugMessage) {
|
|
874
|
+
const messageLines = [`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER:</b> ${await api.parseCognigyScriptText(toolChild.config.toolId)}`];
|
|
860
875
|
// Arguments / Parameters Slots
|
|
861
|
-
const slots = ((
|
|
876
|
+
const slots = ((_o = mainToolCall === null || mainToolCall === void 0 ? void 0 : mainToolCall.function) === null || _o === void 0 ? void 0 : _o.arguments) && Object.keys(mainToolCall.function.arguments);
|
|
862
877
|
const hasSlots = slots && slots.length > 0;
|
|
863
878
|
messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__SLOTS</b>${hasSlots ? "" : " -"}`);
|
|
864
879
|
if (hasSlots) {
|
|
@@ -873,7 +888,7 @@ exports.LLM_PROMPT_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
873
888
|
messageLines.push(`- ${slot}: ${slotValueAsString}`);
|
|
874
889
|
});
|
|
875
890
|
}
|
|
876
|
-
(
|
|
891
|
+
(_p = api.logDebugMessage) === null || _p === void 0 ? void 0 : _p.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER");
|
|
877
892
|
}
|
|
878
893
|
if (toolChild) {
|
|
879
894
|
api.setNextNode(toolChild.id);
|
|
@@ -898,11 +913,11 @@ exports.LLM_PROMPT_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
898
913
|
// we stringify objects (e.g. results coming from JSON Mode)
|
|
899
914
|
// so that the transcript only contains text
|
|
900
915
|
const resultToOutput = typeof ((llmResult === null || llmResult === void 0 ? void 0 : llmResult.result) || llmResult) === "object" ? JSON.stringify((llmResult === null || llmResult === void 0 ? void 0 : llmResult.result) || llmResult, undefined, 2) : (llmResult === null || llmResult === void 0 ? void 0 : llmResult.result) || llmResult;
|
|
901
|
-
await ((
|
|
916
|
+
await ((_q = api.output) === null || _q === void 0 ? void 0 : _q.call(api, resultToOutput, {}));
|
|
902
917
|
}
|
|
903
918
|
else if (llmResult.finishReason && llmPromptOptions.stream) {
|
|
904
919
|
// send the finishReason as last output for a stream
|
|
905
|
-
(
|
|
920
|
+
(_r = api.output) === null || _r === void 0 ? void 0 : _r.call(api, "", {
|
|
906
921
|
_cognigy: {
|
|
907
922
|
_preventTranscript: true,
|
|
908
923
|
_messageId,
|
|
@@ -925,7 +940,7 @@ exports.LLM_PROMPT_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
925
940
|
}
|
|
926
941
|
// Add response to Cognigy Input/Context for further usage
|
|
927
942
|
if (storeLocation === "context") {
|
|
928
|
-
(
|
|
943
|
+
(_s = api.addToContext) === null || _s === void 0 ? void 0 : _s.call(api, contextKey, llmResult, "simple");
|
|
929
944
|
}
|
|
930
945
|
else if (storeLocation === "input") {
|
|
931
946
|
api.addToInput(inputKey, llmResult);
|
|
@@ -938,19 +953,19 @@ exports.LLM_PROMPT_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
938
953
|
const errorDetailsBase = {
|
|
939
954
|
name: error === null || error === void 0 ? void 0 : error.name,
|
|
940
955
|
code: (error === null || error === void 0 ? void 0 : error.code) || (error === null || error === void 0 ? void 0 : error.httpStatusCode),
|
|
941
|
-
message: (error === null || error === void 0 ? void 0 : error.message) || ((
|
|
956
|
+
message: (error === null || error === void 0 ? void 0 : error.message) || ((_t = error.originalErrorDetails) === null || _t === void 0 ? void 0 : _t.message),
|
|
942
957
|
};
|
|
943
958
|
const errorDetails = Object.assign(Object.assign({}, errorDetailsBase), { originalErrorDetails: error === null || error === void 0 ? void 0 : error.originalErrorDetails });
|
|
944
959
|
// return the requestId if it exist in the error obj.
|
|
945
|
-
if ((
|
|
960
|
+
if ((_u = error.meta) === null || _u === void 0 ? void 0 : _u.requestId) {
|
|
946
961
|
errorDetails["meta"] = {
|
|
947
|
-
requestId: (
|
|
962
|
+
requestId: (_v = error.meta) === null || _v === void 0 ? void 0 : _v.requestId
|
|
948
963
|
};
|
|
949
964
|
}
|
|
950
965
|
if (logErrorToSystem) {
|
|
951
|
-
(
|
|
966
|
+
(_w = api.log) === null || _w === void 0 ? void 0 : _w.call(api, "error", JSON.stringify(errorDetailsBase));
|
|
952
967
|
}
|
|
953
|
-
(
|
|
968
|
+
(_x = api.logDebugError) === null || _x === void 0 ? void 0 : _x.call(api, errorDetailsBase, "UI__DEBUG_MODE__LLM_PROMPT__ERROR");
|
|
954
969
|
await handleServiceError(errorDetails);
|
|
955
970
|
return;
|
|
956
971
|
}
|
|
@@ -328,7 +328,7 @@ exports.ADD_TRANSCRIPT_STEP = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
328
328
|
},
|
|
329
329
|
tags: ["service", "transcripts"],
|
|
330
330
|
function: async ({ cognigy, config }) => {
|
|
331
|
-
const { role,
|
|
331
|
+
const { role, text, data, name, id, input, toolCallId, assistantType, content, header, message, metadata } = config;
|
|
332
332
|
const { api } = cognigy;
|
|
333
333
|
let log = null;
|
|
334
334
|
switch (role) {
|
|
@@ -344,7 +344,7 @@ exports.ADD_TRANSCRIPT_STEP = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
344
344
|
};
|
|
345
345
|
break;
|
|
346
346
|
case transcripts_1.TranscriptRole.ASSISTANT:
|
|
347
|
-
if (
|
|
347
|
+
if (assistantType === transcripts_1.TranscriptEntryType.OUTPUT) {
|
|
348
348
|
log = {
|
|
349
349
|
role: transcripts_1.TranscriptRole.ASSISTANT,
|
|
350
350
|
type: transcripts_1.TranscriptEntryType.OUTPUT,
|
|
@@ -355,7 +355,7 @@ exports.ADD_TRANSCRIPT_STEP = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
355
355
|
}
|
|
356
356
|
};
|
|
357
357
|
}
|
|
358
|
-
else if (
|
|
358
|
+
else if (assistantType === transcripts_1.TranscriptEntryType.TOOL_CALL) {
|
|
359
359
|
log = {
|
|
360
360
|
role: transcripts_1.TranscriptRole.ASSISTANT,
|
|
361
361
|
type: transcripts_1.TranscriptEntryType.TOOL_CALL,
|
|
@@ -70,6 +70,13 @@ exports.GET_TRANSCRIPT = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
70
70
|
value: "context",
|
|
71
71
|
}
|
|
72
72
|
},
|
|
73
|
+
{
|
|
74
|
+
key: "includeTextAlternativeInTranscript",
|
|
75
|
+
type: "toggle",
|
|
76
|
+
label: "UI__NODE_EDITOR__GET_TRANSCRIPT__FIELDS__INCLUDE_TEXT_ALTERNATIVE_IN_TRANSCRIPT__LABEL",
|
|
77
|
+
description: "UI__NODE_EDITOR__GET_TRANSCRIPT__FIELDS__INCLUDE_TEXT_ALTERNATIVE_IN_TRANSCRIPT__DESCRIPTION",
|
|
78
|
+
defaultValue: true,
|
|
79
|
+
},
|
|
73
80
|
],
|
|
74
81
|
sections: [
|
|
75
82
|
{
|
|
@@ -81,18 +88,31 @@ exports.GET_TRANSCRIPT = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
81
88
|
"inputKey",
|
|
82
89
|
"contextKey",
|
|
83
90
|
]
|
|
84
|
-
}
|
|
91
|
+
},
|
|
92
|
+
{
|
|
93
|
+
key: "advanced",
|
|
94
|
+
label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__SECTIONS__ADVANCED__LABEL",
|
|
95
|
+
defaultCollapsed: true,
|
|
96
|
+
fields: [
|
|
97
|
+
"includeTextAlternativeInTranscript",
|
|
98
|
+
],
|
|
99
|
+
},
|
|
85
100
|
],
|
|
86
101
|
form: [
|
|
87
102
|
{ type: "field", key: "limit" },
|
|
88
103
|
{ type: "section", key: "storage" },
|
|
104
|
+
{ type: "section", key: "advanced" },
|
|
89
105
|
],
|
|
90
106
|
tags: ["service", "transcripts"],
|
|
91
107
|
function: async ({ cognigy, config }) => {
|
|
92
108
|
var _a;
|
|
93
|
-
const { limit, storeLocation, inputKey, contextKey } = config;
|
|
109
|
+
const { limit, storeLocation, inputKey, contextKey, includeTextAlternativeInTranscript } = config;
|
|
94
110
|
const { api } = cognigy;
|
|
95
|
-
const transcript = await api.getTranscript({
|
|
111
|
+
const transcript = await api.getTranscript({
|
|
112
|
+
limit,
|
|
113
|
+
excludeDataOnlyMessagesFilter: [transcripts_1.TranscriptRole.AGENT],
|
|
114
|
+
includeTextAlternativeInTranscript,
|
|
115
|
+
});
|
|
96
116
|
if (storeLocation === "context") {
|
|
97
117
|
(_a = api.addToContext) === null || _a === void 0 ? void 0 : _a.call(api, contextKey, transcript, "simple");
|
|
98
118
|
}
|
|
@@ -146,6 +146,9 @@ class SessionConfigMapper extends base_mapper_1.BaseMapper {
|
|
|
146
146
|
const timeout = Number(spAsrTimeout || asrTimeout);
|
|
147
147
|
recognizer.asrTimeout = timeout / 1000 || undefined;
|
|
148
148
|
}
|
|
149
|
+
else if (asrEnabled === false || spAsrEnabled === false) {
|
|
150
|
+
recognizer.asrTimeout = 0;
|
|
151
|
+
}
|
|
149
152
|
return recognizer;
|
|
150
153
|
}
|
|
151
154
|
isDtmfEnabled(sessionParams, dtmf) {
|
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.getPrompt = exports.isOpenAIChatPrompt = exports.isChatPrompt = exports.isCompletionPrompt = void 0;
|
|
4
|
+
/** Custom Modules */
|
|
5
|
+
const generativeAIPrompts_1 = require("./utils/generativeAIPrompts");
|
|
6
|
+
const internalServerError_1 = require("../errors/internalServerError");
|
|
7
|
+
const isCompletionPrompt = (data) => {
|
|
8
|
+
return typeof data === "object" && data !== null && "prompt" in data;
|
|
9
|
+
};
|
|
10
|
+
exports.isCompletionPrompt = isCompletionPrompt;
|
|
11
|
+
const isChatPrompt = (data) => {
|
|
12
|
+
return typeof data === "object" && data !== null && "messages" in data;
|
|
13
|
+
};
|
|
14
|
+
exports.isChatPrompt = isChatPrompt;
|
|
15
|
+
const isOpenAIChatPrompt = (data) => {
|
|
16
|
+
return Array.isArray(data) &&
|
|
17
|
+
data.every((item) => typeof item === "object" &&
|
|
18
|
+
item !== null &&
|
|
19
|
+
("role" in item) &&
|
|
20
|
+
("content" in item) &&
|
|
21
|
+
(item.role === "system" || item.role === "user" || item.role === "assistant") &&
|
|
22
|
+
(typeof item.content === "string"));
|
|
23
|
+
};
|
|
24
|
+
exports.isOpenAIChatPrompt = isOpenAIChatPrompt;
|
|
25
|
+
/**
|
|
26
|
+
* Gets the current prompts for the passed model/useCase
|
|
27
|
+
* @param model - The model to get the prompt for
|
|
28
|
+
* @param useCase - The use case to get the prompt for
|
|
29
|
+
* @param subUseCase - Optional sub-use case to get a specific prompt
|
|
30
|
+
* @param promptParser - Optional function to modify the prompt before returning it
|
|
31
|
+
* @returns {TALLPrompts}
|
|
32
|
+
*/
|
|
33
|
+
const getPrompt = (model, useCase, subUseCase, promptParser) => {
|
|
34
|
+
var _a;
|
|
35
|
+
const loggerMeta = {
|
|
36
|
+
module: "getPrompt.ts",
|
|
37
|
+
label: "generativeAI",
|
|
38
|
+
function: "getPrompt",
|
|
39
|
+
model,
|
|
40
|
+
useCase,
|
|
41
|
+
subUseCase
|
|
42
|
+
};
|
|
43
|
+
let modelPrompts = (_a = generativeAIPrompts_1.generativeAIPrompts[`${model}`]) !== null && _a !== void 0 ? _a : generativeAIPrompts_1.generativeAIPrompts["default"];
|
|
44
|
+
if (!modelPrompts) {
|
|
45
|
+
throw new internalServerError_1.InternalServerError(`Neither the model "${model}" nor the default fallback have predefined prompts`, undefined, loggerMeta);
|
|
46
|
+
}
|
|
47
|
+
let prompt = modelPrompts[`${useCase}`];
|
|
48
|
+
// generativeAIPrompts[model] has no prompt for use case, so try to fallback to default prompt
|
|
49
|
+
if (!prompt) {
|
|
50
|
+
modelPrompts = generativeAIPrompts_1.generativeAIPrompts["default"];
|
|
51
|
+
if (!modelPrompts) {
|
|
52
|
+
throw new internalServerError_1.InternalServerError(`The default fallback has no predefined prompts`, undefined, loggerMeta);
|
|
53
|
+
}
|
|
54
|
+
prompt = modelPrompts[`${useCase}`];
|
|
55
|
+
}
|
|
56
|
+
if (!prompt) {
|
|
57
|
+
throw new internalServerError_1.InternalServerError(`Neither the model "${model}" nor the default fallback define a prompt for useCase "${useCase}"`, undefined, loggerMeta);
|
|
58
|
+
}
|
|
59
|
+
if (subUseCase && prompt && typeof prompt === "object" && `${subUseCase}` in prompt) {
|
|
60
|
+
prompt = prompt[`${subUseCase}`];
|
|
61
|
+
}
|
|
62
|
+
if (!prompt) {
|
|
63
|
+
throw new internalServerError_1.InternalServerError(`The prompt defined for the model "${model}" or the default fallback, useCase "${useCase}", and subUseCase "${subUseCase}" is invalid`, undefined, loggerMeta);
|
|
64
|
+
}
|
|
65
|
+
try {
|
|
66
|
+
return promptParser
|
|
67
|
+
? promptParser(JSON.parse(JSON.stringify(prompt)))
|
|
68
|
+
: JSON.parse(JSON.stringify(prompt));
|
|
69
|
+
}
|
|
70
|
+
catch (error) {
|
|
71
|
+
throw new internalServerError_1.InternalServerError(`Error while parsing prompt for model: ${model} and useCase: ${useCase} and subUseCase: ${subUseCase}`, undefined, Object.assign({ originalError: error }, loggerMeta));
|
|
72
|
+
}
|
|
73
|
+
};
|
|
74
|
+
exports.getPrompt = getPrompt;
|
|
75
|
+
//# sourceMappingURL=getPrompt.js.map
|