@cognigy/rest-api-client 4.98.0 → 4.100.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +10 -0
- package/build/RestAPIClient.js +7 -0
- package/build/apigroups/AIOpsCenterAPIGroup_2_0.js +22 -0
- package/build/apigroups/ResourcesAPIGroup_2_0.js +11 -9
- package/build/apigroups/index.js +3 -1
- package/build/shared/charts/descriptors/agentAssist/htmlTemplates/knowledgeAssistTemplate.js +0 -44
- package/build/shared/charts/descriptors/agentAssist/htmlTemplates/secureForms/stage0.js +0 -45
- package/build/shared/charts/descriptors/agentAssist/htmlTemplates/secureForms/stage1.js +0 -43
- package/build/shared/charts/descriptors/agentAssist/htmlTemplates/secureForms/stageError.js +0 -45
- package/build/shared/charts/descriptors/connectionNodes/generativeAIProviders/index.js +6 -2
- package/build/shared/charts/descriptors/connectionNodes/generativeAIProviders/mistralProviderConnection.js +11 -0
- package/build/shared/charts/descriptors/index.js +7 -1
- package/build/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +10 -1
- package/build/shared/charts/descriptors/service/GPTPrompt.js +14 -4
- package/build/shared/charts/descriptors/service/LLMEntityExtract.js +1 -1
- package/build/shared/charts/descriptors/service/LLMModerate.js +275 -0
- package/build/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +185 -63
- package/build/shared/charts/descriptors/service/aiAgent/aiAgentJobCallMCPTool.js +180 -0
- package/build/shared/charts/descriptors/service/aiAgent/aiAgentJobMCPTool.js +196 -0
- package/build/shared/charts/descriptors/service/aiAgent/aiAgentToolAnswer.js +5 -1
- package/build/shared/charts/descriptors/service/index.js +9 -1
- package/build/shared/charts/descriptors/service/niceCXOneAAHAuthenticationConnection.js +12 -0
- package/build/shared/charts/descriptors/voice/mappers/hangup.mapper.js +9 -8
- package/build/shared/charts/descriptors/voice/mappers/setSessionConfig.mapper.js +9 -10
- package/build/shared/charts/descriptors/voice/nodes/hangup.js +1 -1
- package/build/shared/charts/descriptors/voicegateway/nodes/hangup.js +1 -1
- package/build/shared/charts/descriptors/voicegateway2/nodes/hangup.js +24 -2
- package/build/shared/charts/descriptors/voicegateway2/nodes/setSessionConfig.js +25 -5
- package/build/shared/interfaces/amqpInterface.js +3 -0
- package/build/shared/interfaces/generativeAI/IGenerativeAIModels.js +10 -1
- package/build/shared/interfaces/messageAPI/endpoints.js +44 -119
- package/build/shared/interfaces/resources/IEndpoint.js +23 -4
- package/build/shared/interfaces/resources/ILargeLanguageModel.js +10 -1
- package/build/shared/interfaces/resources/ISipConnectivityInfo.js +16 -0
- package/build/shared/interfaces/resources/IWebrtcWidgetConfig.js +11 -0
- package/build/shared/interfaces/restAPI/metrics/conversationCounter/v3.0/IConversationCounterPreAggregatedValue_3_0.js +3 -0
- package/build/shared/interfaces/restAPI/metrics/conversationCounter/v3.0/IGetConversationCounterOrganisationRest_3_0.js +3 -0
- package/build/shared/interfaces/restAPI/metrics/conversationCounter/v3.0/IGetConversationCounterRest_3_0.js +3 -0
- package/build/shared/interfaces/restAPI/metrics/conversationCounter/v3.0/index.js +3 -0
- package/build/shared/interfaces/restAPI/opsCenter/alerts/IIndexOpsCenterAlertsRest_2_0.js +3 -0
- package/build/shared/interfaces/restAPI/opsCenter/alerts/IOpsCenterAlert.js +3 -0
- package/build/shared/interfaces/restAPI/opsCenter/component/IOpsCenterComponents.js +34 -0
- package/build/shared/interfaces/restAPI/opsCenter/errors/IDeleteOpsCenterErrorRest_2_0.js +3 -0
- package/build/shared/interfaces/restAPI/opsCenter/errors/IGetOpsCenterErrorRest_2_0.js +3 -0
- package/build/shared/interfaces/restAPI/opsCenter/errors/IIndexOpsCenterErrorsRest_2_0.js +3 -0
- package/build/shared/interfaces/restAPI/opsCenter/errors/IOpsCenterError.js +3 -0
- package/build/shared/interfaces/restAPI/opsCenter/metrics/IGetOpsCenterMetrics.js +3 -0
- package/build/shared/interfaces/restAPI/opsCenter/metrics/IGetOpsCenterMetricsConfigRest_2_0.js +3 -0
- package/build/shared/interfaces/restAPI/opsCenter/metrics/IGetOpsCenterMetricsRangeRest_2_0.js +3 -0
- package/build/shared/interfaces/restAPI/opsCenter/metrics/IGetOpsCenterMetricsRest_2_0.js +3 -0
- package/build/shared/interfaces/restAPI/opsCenter/metrics/IOpsCenterMetricsConfig.js +6 -0
- package/build/shared/interfaces/restAPI/opsCenter/observationConfig/IGetOpsCenterObservationConfigRest_2_0.js +3 -0
- package/build/shared/interfaces/restAPI/opsCenter/observationConfig/IOpsCenterObservationConfig.js +4 -0
- package/build/shared/interfaces/restAPI/opsCenter/observationConfig/IPatchOpsCenterObservationConfigRest_2_0.js +3 -0
- package/build/shared/interfaces/restAPI/opsCenter/observationConfig/ISetupOpsCenterObservationConfigRest_2_0.js +3 -0
- package/build/shared/interfaces/security/IConversationCounterPreAggregatedValue.js +3 -0
- package/build/shared/interfaces/security/IIdentityProvider.js +1 -0
- package/build/shared/json-schema.js +2 -1
- package/dist/esm/RestAPIClient.js +7 -0
- package/dist/esm/apigroups/AIOpsCenterAPIGroup_2_0.js +18 -0
- package/dist/esm/apigroups/ResourcesAPIGroup_2_0.js +11 -9
- package/dist/esm/apigroups/index.js +1 -0
- package/dist/esm/shared/charts/descriptors/agentAssist/htmlTemplates/knowledgeAssistTemplate.js +0 -44
- package/dist/esm/shared/charts/descriptors/agentAssist/htmlTemplates/secureForms/stage0.js +0 -45
- package/dist/esm/shared/charts/descriptors/agentAssist/htmlTemplates/secureForms/stage1.js +0 -43
- package/dist/esm/shared/charts/descriptors/agentAssist/htmlTemplates/secureForms/stageError.js +0 -45
- package/dist/esm/shared/charts/descriptors/connectionNodes/generativeAIProviders/index.js +4 -1
- package/dist/esm/shared/charts/descriptors/connectionNodes/generativeAIProviders/mistralProviderConnection.js +8 -0
- package/dist/esm/shared/charts/descriptors/index.js +8 -2
- package/dist/esm/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +12 -3
- package/dist/esm/shared/charts/descriptors/service/GPTPrompt.js +18 -8
- package/dist/esm/shared/charts/descriptors/service/LLMEntityExtract.js +1 -1
- package/dist/esm/shared/charts/descriptors/service/LLMModerate.js +273 -0
- package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +185 -63
- package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJobCallMCPTool.js +178 -0
- package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJobMCPTool.js +193 -0
- package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentToolAnswer.js +5 -1
- package/dist/esm/shared/charts/descriptors/service/index.js +4 -0
- package/dist/esm/shared/charts/descriptors/service/niceCXOneAAHAuthenticationConnection.js +9 -0
- package/dist/esm/shared/charts/descriptors/voice/mappers/hangup.mapper.js +9 -8
- package/dist/esm/shared/charts/descriptors/voice/mappers/setSessionConfig.mapper.js +9 -10
- package/dist/esm/shared/charts/descriptors/voice/nodes/hangup.js +1 -1
- package/dist/esm/shared/charts/descriptors/voicegateway/nodes/hangup.js +1 -1
- package/dist/esm/shared/charts/descriptors/voicegateway2/nodes/hangup.js +24 -2
- package/dist/esm/shared/charts/descriptors/voicegateway2/nodes/setSessionConfig.js +25 -5
- package/dist/esm/shared/interfaces/amqpInterface.js +3 -0
- package/dist/esm/shared/interfaces/generativeAI/IGenerativeAIModels.js +10 -1
- package/dist/esm/shared/interfaces/messageAPI/endpoints.js +43 -118
- package/dist/esm/shared/interfaces/resources/IEndpoint.js +26 -7
- package/dist/esm/shared/interfaces/resources/ILargeLanguageModel.js +9 -0
- package/dist/esm/shared/interfaces/resources/ISipConnectivityInfo.js +13 -0
- package/dist/esm/shared/interfaces/resources/IWebrtcWidgetConfig.js +8 -0
- package/dist/esm/shared/interfaces/restAPI/metrics/conversationCounter/v3.0/IConversationCounterPreAggregatedValue_3_0.js +2 -0
- package/dist/esm/shared/interfaces/restAPI/metrics/conversationCounter/v3.0/IGetConversationCounterOrganisationRest_3_0.js +2 -0
- package/dist/esm/shared/interfaces/restAPI/metrics/conversationCounter/v3.0/IGetConversationCounterRest_3_0.js +2 -0
- package/dist/esm/shared/interfaces/restAPI/metrics/conversationCounter/v3.0/index.js +2 -0
- package/dist/esm/shared/interfaces/restAPI/opsCenter/alerts/IIndexOpsCenterAlertsRest_2_0.js +2 -0
- package/dist/esm/shared/interfaces/restAPI/opsCenter/alerts/IOpsCenterAlert.js +2 -0
- package/dist/esm/shared/interfaces/restAPI/opsCenter/component/IOpsCenterComponents.js +31 -0
- package/dist/esm/shared/interfaces/restAPI/opsCenter/errors/IDeleteOpsCenterErrorRest_2_0.js +2 -0
- package/dist/esm/shared/interfaces/restAPI/opsCenter/errors/IGetOpsCenterErrorRest_2_0.js +2 -0
- package/dist/esm/shared/interfaces/restAPI/opsCenter/errors/IIndexOpsCenterErrorsRest_2_0.js +2 -0
- package/dist/esm/shared/interfaces/restAPI/opsCenter/errors/IOpsCenterError.js +2 -0
- package/dist/esm/shared/interfaces/restAPI/opsCenter/metrics/IGetOpsCenterMetrics.js +2 -0
- package/dist/esm/shared/interfaces/restAPI/opsCenter/metrics/IGetOpsCenterMetricsConfigRest_2_0.js +2 -0
- package/dist/esm/shared/interfaces/restAPI/opsCenter/metrics/IGetOpsCenterMetricsRangeRest_2_0.js +2 -0
- package/dist/esm/shared/interfaces/restAPI/opsCenter/metrics/IGetOpsCenterMetricsRest_2_0.js +2 -0
- package/dist/esm/shared/interfaces/restAPI/opsCenter/metrics/IOpsCenterMetricsConfig.js +5 -0
- package/dist/esm/shared/interfaces/restAPI/opsCenter/observationConfig/IGetOpsCenterObservationConfigRest_2_0.js +2 -0
- package/dist/esm/shared/interfaces/restAPI/opsCenter/observationConfig/IOpsCenterObservationConfig.js +3 -0
- package/dist/esm/shared/interfaces/restAPI/opsCenter/observationConfig/IPatchOpsCenterObservationConfigRest_2_0.js +2 -0
- package/dist/esm/shared/interfaces/restAPI/opsCenter/observationConfig/ISetupOpsCenterObservationConfigRest_2_0.js +2 -0
- package/dist/esm/shared/interfaces/security/IConversationCounterPreAggregatedValue.js +2 -0
- package/dist/esm/shared/interfaces/security/IIdentityProvider.js +1 -0
- package/dist/esm/shared/json-schema.js +2 -1
- package/package.json +1 -1
- package/types/index.d.ts +392 -43
|
@@ -17,7 +17,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
|
|
|
17
17
|
collapsable: true,
|
|
18
18
|
placement: {
|
|
19
19
|
children: {
|
|
20
|
-
whitelist: ["aiAgentJobDefault", "aiAgentJobTool"],
|
|
20
|
+
whitelist: ["aiAgentJobDefault", "aiAgentJobTool", "aiAgentJobMCPTool"],
|
|
21
21
|
},
|
|
22
22
|
},
|
|
23
23
|
},
|
|
@@ -822,7 +822,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
|
|
|
822
822
|
],
|
|
823
823
|
tags: ["ai", "aiAgent"],
|
|
824
824
|
function: ({ cognigy, config, childConfigs, nodeId }) => __awaiter(void 0, void 0, void 0, function* () {
|
|
825
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12;
|
|
825
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19;
|
|
826
826
|
const { api, context, input, profile, flowReferenceId } = cognigy;
|
|
827
827
|
const { aiAgent, llmProviderReferenceId, name: jobName, description: jobDescription, instructions: jobInstructions, outputImmediately, toolChoice, useStrict, memoryType, selectedProfileFields, memoryContextInjection, knowledgeSearchBehavior, knowledgeSearchTags, knowledgeSearchTagsFilterOp, knowledgeSearchAiAgentKnowledge, knowledgeSearchJobKnowledge, knowledgeSearchJobStore, knowledgeSearchGenerateSearchPrompt, knowledgeSearchTopK, timeoutInMs, maxTokens, temperature, logErrorToSystem, storeErrorInInput, errorHandling, errorHandlingGotoTarget, errorMessage, debugConfig, debugLogTokenCount, debugResult, storeLocation, contextKey, inputKey, streamStoreCopyInInput, streamStopTokens, processImages, transcriptImageHandling, sessionParams } = config;
|
|
828
828
|
try {
|
|
@@ -862,7 +862,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
|
|
|
862
862
|
const voiceSettings = voiceConfigParamsToVoiceSettings(config, api);
|
|
863
863
|
const payload = setSessionConfig.handleVGInput(voiceSettings, sessionParams || {}, api);
|
|
864
864
|
if (payload && Object.keys(payload).length > 0) {
|
|
865
|
-
api.say(null, {
|
|
865
|
+
(_b = api.say) === null || _b === void 0 ? void 0 : _b.call(api, null, {
|
|
866
866
|
_cognigy: payload,
|
|
867
867
|
});
|
|
868
868
|
}
|
|
@@ -872,16 +872,16 @@ export const AI_AGENT_JOB = createNodeDescriptor({
|
|
|
872
872
|
throw new Error(`[VG2] Error on AI Agent Job node. Error message: ${error.message}`);
|
|
873
873
|
}
|
|
874
874
|
}
|
|
875
|
-
const
|
|
875
|
+
const _20 = profile, { profileId, accepted_gdpr, prevent_data_collection, privacy_policy } = _20, cleanedProfile = __rest(_20, ["profileId", "accepted_gdpr", "prevent_data_collection", "privacy_policy"]);
|
|
876
876
|
const userMemory = getUserMemory(memoryType, selectedProfileFields, aiAgent, cleanedProfile);
|
|
877
877
|
/**
|
|
878
878
|
* ----- Knowledge Search Section -----
|
|
879
879
|
*/
|
|
880
880
|
let knowledgeSearchResponseData;
|
|
881
881
|
const sessionState = yield api.loadSessionState();
|
|
882
|
-
const lastToolCall = (
|
|
882
|
+
const lastToolCall = (_c = sessionState.lastToolCall) === null || _c === void 0 ? void 0 : _c.toolCall;
|
|
883
883
|
if (knowledgeSearchBehavior === "always" ||
|
|
884
|
-
(knowledgeSearchBehavior === "onDemand" && ((
|
|
884
|
+
(knowledgeSearchBehavior === "onDemand" && ((_d = lastToolCall === null || lastToolCall === void 0 ? void 0 : lastToolCall.function) === null || _d === void 0 ? void 0 : _d.name) === "retrieve_knowledge")) {
|
|
885
885
|
const knowledgeStoreIds = [];
|
|
886
886
|
if (knowledgeSearchAiAgentKnowledge && aiAgent.knowledgeReferenceId) {
|
|
887
887
|
knowledgeStoreIds.push(aiAgent.knowledgeReferenceId);
|
|
@@ -889,8 +889,8 @@ export const AI_AGENT_JOB = createNodeDescriptor({
|
|
|
889
889
|
if (knowledgeSearchJobKnowledge && knowledgeSearchJobStore) {
|
|
890
890
|
knowledgeStoreIds.push(knowledgeSearchJobStore);
|
|
891
891
|
}
|
|
892
|
-
if (knowledgeStoreIds.length > 0 && (input.text || ((
|
|
893
|
-
let query = ((
|
|
892
|
+
if (knowledgeStoreIds.length > 0 && (input.text || ((_f = (_e = lastToolCall === null || lastToolCall === void 0 ? void 0 : lastToolCall.function) === null || _e === void 0 ? void 0 : _e.arguments) === null || _f === void 0 ? void 0 : _f.generated_prompt))) {
|
|
893
|
+
let query = ((_h = (_g = lastToolCall === null || lastToolCall === void 0 ? void 0 : lastToolCall.function) === null || _g === void 0 ? void 0 : _g.arguments) === null || _h === void 0 ? void 0 : _h.generated_prompt) || input.text;
|
|
894
894
|
if (knowledgeSearchBehavior === "always" && knowledgeSearchGenerateSearchPrompt) {
|
|
895
895
|
const generatedSearchPrompt = yield generateSearchPrompt({ api, userMemory, llmProviderReferenceId, debugLogTokenCount, memoryContextInjection });
|
|
896
896
|
if (generatedSearchPrompt === null || generatedSearchPrompt === void 0 ? void 0 : generatedSearchPrompt.generated_prompt) {
|
|
@@ -906,10 +906,10 @@ export const AI_AGENT_JOB = createNodeDescriptor({
|
|
|
906
906
|
knowledgeStoreIds,
|
|
907
907
|
};
|
|
908
908
|
if (knowledgeSearchBehavior === "onDemand") {
|
|
909
|
-
const generated_buffer_phrase = (
|
|
909
|
+
const generated_buffer_phrase = (_k = (_j = lastToolCall === null || lastToolCall === void 0 ? void 0 : lastToolCall.function) === null || _j === void 0 ? void 0 : _j.arguments) === null || _k === void 0 ? void 0 : _k.generated_buffer_phrase;
|
|
910
910
|
if (generated_buffer_phrase) {
|
|
911
911
|
// output the generated buffer phrase. Don't add it to the transcript, else the LLM will repeat it next time.
|
|
912
|
-
yield ((
|
|
912
|
+
yield ((_l = api.output) === null || _l === void 0 ? void 0 : _l.call(api, generated_buffer_phrase, {
|
|
913
913
|
_cognigy: {
|
|
914
914
|
_preventTranscript: true
|
|
915
915
|
}
|
|
@@ -939,7 +939,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
|
|
|
939
939
|
if (query) {
|
|
940
940
|
messageLines.push(`\n<b>UI__DEBUG_MODE__AI_AGENT_JOB__KNOWLEDGE_SEARCH__SEARCH_PROMPT</b> ${query}`);
|
|
941
941
|
}
|
|
942
|
-
if ((
|
|
942
|
+
if ((_m = knowledgeSearchResponseData === null || knowledgeSearchResponseData === void 0 ? void 0 : knowledgeSearchResponseData.topK) === null || _m === void 0 ? void 0 : _m.length) {
|
|
943
943
|
knowledgeSearchResponseData === null || knowledgeSearchResponseData === void 0 ? void 0 : knowledgeSearchResponseData.topK.forEach((result, index) => {
|
|
944
944
|
var _a;
|
|
945
945
|
messageLines.push(`\nTop ${index + 1}:`);
|
|
@@ -951,7 +951,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
|
|
|
951
951
|
else {
|
|
952
952
|
messageLines.push("UI__DEBUG_MODE__AI_AGENT_JOB__KNOWLEDGE_SEARCH__NO_RESULTS");
|
|
953
953
|
}
|
|
954
|
-
(
|
|
954
|
+
(_o = api.logDebugMessage) === null || _o === void 0 ? void 0 : _o.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__KNOWLEDGE_SEARCH__HEADER");
|
|
955
955
|
}
|
|
956
956
|
// Knowledge Search "onDemand" responds to a tool call
|
|
957
957
|
if (knowledgeSearchBehavior === "onDemand") {
|
|
@@ -980,7 +980,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
|
|
|
980
980
|
yield api.addTranscriptStep(toolAnswer);
|
|
981
981
|
}
|
|
982
982
|
}
|
|
983
|
-
if (((
|
|
983
|
+
if (((_p = lastToolCall === null || lastToolCall === void 0 ? void 0 : lastToolCall.function) === null || _p === void 0 ? void 0 : _p.name) === "retrieve_knowledge") {
|
|
984
984
|
// remove the retrieve_knowledge toolCall from session state to avoid infinite loops
|
|
985
985
|
api.updateSessionStateValues({
|
|
986
986
|
lastToolCall: undefined
|
|
@@ -992,32 +992,39 @@ export const AI_AGENT_JOB = createNodeDescriptor({
|
|
|
992
992
|
// create the system Message from the AI Agent resource and this Node's config storage
|
|
993
993
|
const systemMessage = createSystemMessage(aiAgent, input, jobName, jobDescription, jobInstructions, userMemory, memoryContextInjection, isOnDemandKnowledgeStoreConfigured ? "onDemand" : "none");
|
|
994
994
|
// Create Tools JSON
|
|
995
|
+
/** This is the list of tools that are used in the AI Agent Job */
|
|
995
996
|
const tools = [];
|
|
997
|
+
/** Array of tool IDs for deduping */
|
|
996
998
|
const toolIds = [];
|
|
997
|
-
|
|
999
|
+
/** Map of MCP tool IDs to their respective node IDs they were loaded from */
|
|
1000
|
+
const toolMap = new Map();
|
|
1001
|
+
/** Array of tool names for listing in the debug message */
|
|
1002
|
+
const toolNames = [];
|
|
1003
|
+
for (const child of childConfigs) {
|
|
998
1004
|
if (child.type === "aiAgentJobDefault") {
|
|
999
|
-
|
|
1005
|
+
continue;
|
|
1000
1006
|
}
|
|
1001
|
-
;
|
|
1002
1007
|
const toolId = child.config.toolId;
|
|
1003
|
-
if (
|
|
1004
|
-
|
|
1005
|
-
|
|
1006
|
-
|
|
1007
|
-
|
|
1008
|
-
|
|
1009
|
-
|
|
1010
|
-
|
|
1011
|
-
|
|
1012
|
-
|
|
1013
|
-
|
|
1014
|
-
|
|
1008
|
+
if (child.type === "aiAgentJobTool" &&
|
|
1009
|
+
(!child.config.condition || !!api.parseCognigyScriptCondition(child.config.condition))) {
|
|
1010
|
+
if (!toolId) {
|
|
1011
|
+
throw new Error(`Tool ID is missing in Tool Node configuration.`);
|
|
1012
|
+
}
|
|
1013
|
+
const parsedToolId = api.parseCognigyScriptText(toolId);
|
|
1014
|
+
if (!validateToolId(parsedToolId)) {
|
|
1015
|
+
throw new Error(`Tool ID ${parsedToolId} is not valid. Please use only alphanumeric characters, dashes and underscores.`);
|
|
1016
|
+
}
|
|
1017
|
+
if (toolIds.includes(parsedToolId)) {
|
|
1018
|
+
throw new Error(`Tool ID ${parsedToolId} is not unique. Please ensure each tool has a unique id.`);
|
|
1019
|
+
}
|
|
1020
|
+
toolIds.push(parsedToolId);
|
|
1021
|
+
toolNames.push(parsedToolId);
|
|
1015
1022
|
const tool = {
|
|
1016
1023
|
type: "function",
|
|
1017
1024
|
function: {
|
|
1018
1025
|
name: parsedToolId,
|
|
1019
1026
|
description: api.parseCognigyScriptText(child.config.description),
|
|
1020
|
-
}
|
|
1027
|
+
},
|
|
1021
1028
|
};
|
|
1022
1029
|
if (useStrict) {
|
|
1023
1030
|
tool.function.strict = true;
|
|
@@ -1027,7 +1034,106 @@ export const AI_AGENT_JOB = createNodeDescriptor({
|
|
|
1027
1034
|
}
|
|
1028
1035
|
tools.push(tool);
|
|
1029
1036
|
}
|
|
1030
|
-
|
|
1037
|
+
if (child.type === "aiAgentJobMCPTool" &&
|
|
1038
|
+
(!child.config.condition || !!api.parseCognigyScriptCondition(child.config.condition))) {
|
|
1039
|
+
if (!child.config.mcpServerUrl) {
|
|
1040
|
+
throw new Error(`MCP Server URL is missing in Tool Node configuration.`);
|
|
1041
|
+
}
|
|
1042
|
+
const mcpServerUrl = child.config.mcpServerUrl;
|
|
1043
|
+
const timeout = child.config.timeout;
|
|
1044
|
+
const cacheTools = child.config.cacheTools;
|
|
1045
|
+
const sendDebug = child.config.debugMessageFetchedTools;
|
|
1046
|
+
const toolFilter = child.config.toolFilter;
|
|
1047
|
+
let mcpTools = null;
|
|
1048
|
+
try {
|
|
1049
|
+
mcpTools = yield api.fetchMcpTools({
|
|
1050
|
+
mcpServerUrl,
|
|
1051
|
+
timeout,
|
|
1052
|
+
cacheTools,
|
|
1053
|
+
});
|
|
1054
|
+
}
|
|
1055
|
+
catch (error) {
|
|
1056
|
+
const errorDetails = error instanceof Error
|
|
1057
|
+
? {
|
|
1058
|
+
name: error.name,
|
|
1059
|
+
message: error.message,
|
|
1060
|
+
}
|
|
1061
|
+
: error;
|
|
1062
|
+
(_q = api.logDebugError) === null || _q === void 0 ? void 0 : _q.call(api, `Unable to connect to MCP Server:<br>${JSON.stringify(errorDetails, null, 2)}`, child.config.name);
|
|
1063
|
+
}
|
|
1064
|
+
if (mcpTools) {
|
|
1065
|
+
if (sendDebug) {
|
|
1066
|
+
if (mcpTools.length === 0) {
|
|
1067
|
+
(_r = api.logDebugMessage) === null || _r === void 0 ? void 0 : _r.call(api, `No tools fetched from MCP Tool "${child.config.name}".`, "MCP Tool");
|
|
1068
|
+
}
|
|
1069
|
+
if (mcpTools.length > 0) {
|
|
1070
|
+
const messageLines = [`Fetched tools from MCP Tool "${child.config.name}"`];
|
|
1071
|
+
mcpTools.forEach((tool) => {
|
|
1072
|
+
messageLines.push(`<br>- <b>${tool.name}</b>: ${tool.description}`);
|
|
1073
|
+
if (child.config.debugMessageParameters && tool.inputSchema) {
|
|
1074
|
+
messageLines.push(` <b>Parameters</b>:`);
|
|
1075
|
+
Object.keys(tool.inputSchema.properties).forEach((key) => {
|
|
1076
|
+
const parameter = tool.inputSchema.properties[key];
|
|
1077
|
+
const requiredText = tool.inputSchema.required && !tool.inputSchema.required.includes(key) ? " (optional)" : "";
|
|
1078
|
+
if (parameter.description) {
|
|
1079
|
+
messageLines.push(` - ${key} (${parameter.type}): ${parameter.description}${requiredText}`);
|
|
1080
|
+
}
|
|
1081
|
+
else {
|
|
1082
|
+
messageLines.push(` - ${key}: ${parameter.type}${requiredText}`);
|
|
1083
|
+
}
|
|
1084
|
+
});
|
|
1085
|
+
}
|
|
1086
|
+
});
|
|
1087
|
+
(_s = api.logDebugMessage) === null || _s === void 0 ? void 0 : _s.call(api, messageLines.join("\n"), "MCP Tool");
|
|
1088
|
+
}
|
|
1089
|
+
}
|
|
1090
|
+
const filteredMcpTools = mcpTools.filter((tool) => {
|
|
1091
|
+
if (toolFilter && toolFilter !== "none") {
|
|
1092
|
+
if (toolFilter === "whitelist" && child.config.whitelist) {
|
|
1093
|
+
const whitelist = child.config.whitelist.map((item) => item.trim());
|
|
1094
|
+
return whitelist.includes(tool.name);
|
|
1095
|
+
}
|
|
1096
|
+
else if (toolFilter === "blacklist") {
|
|
1097
|
+
// If the blacklist is falsy, all tools are allowed
|
|
1098
|
+
if (!child.config.blacklist) {
|
|
1099
|
+
return true;
|
|
1100
|
+
}
|
|
1101
|
+
const blacklist = child.config.blacklist.map((item) => item.trim());
|
|
1102
|
+
return !blacklist.includes(tool.name);
|
|
1103
|
+
}
|
|
1104
|
+
}
|
|
1105
|
+
else {
|
|
1106
|
+
return true;
|
|
1107
|
+
}
|
|
1108
|
+
});
|
|
1109
|
+
const structuredMcpTools = [];
|
|
1110
|
+
filteredMcpTools.forEach((tool) => {
|
|
1111
|
+
var _a;
|
|
1112
|
+
if (toolIds.includes(tool.name)) {
|
|
1113
|
+
(_a = api.logDebugError) === null || _a === void 0 ? void 0 : _a.call(api, `Tool "${tool.name}" from MCP Tool "${child.config.name}" is not unique and will not be added. Please ensure each tool has a unique id.`);
|
|
1114
|
+
return;
|
|
1115
|
+
}
|
|
1116
|
+
// add tool to the list of tool ids to prevent duplicates
|
|
1117
|
+
toolIds.push(tool.name);
|
|
1118
|
+
toolNames.push(`${tool.name} (${child.config.name})`);
|
|
1119
|
+
toolMap.set(tool.name, child.id);
|
|
1120
|
+
const structuredTool = {
|
|
1121
|
+
type: "function",
|
|
1122
|
+
function: {
|
|
1123
|
+
name: tool.name,
|
|
1124
|
+
description: tool.description,
|
|
1125
|
+
},
|
|
1126
|
+
};
|
|
1127
|
+
if (tool.inputSchema) {
|
|
1128
|
+
structuredTool.function.parameters = tool.inputSchema;
|
|
1129
|
+
}
|
|
1130
|
+
structuredMcpTools.push(structuredTool);
|
|
1131
|
+
});
|
|
1132
|
+
tools.push(...structuredMcpTools);
|
|
1133
|
+
}
|
|
1134
|
+
}
|
|
1135
|
+
}
|
|
1136
|
+
;
|
|
1031
1137
|
// Optional Debug Message with the config
|
|
1032
1138
|
if (debugConfig) {
|
|
1033
1139
|
const messageLines = [];
|
|
@@ -1035,14 +1141,14 @@ export const AI_AGENT_JOB = createNodeDescriptor({
|
|
|
1035
1141
|
messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__AI_AGENT_NAME__LABEL</b> ${aiAgent.name}`);
|
|
1036
1142
|
messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__JOB_NAME__LABEL</b> ${jobName}`);
|
|
1037
1143
|
// Safety settings
|
|
1038
|
-
messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_AVOID_HARMFUL_CONTENT</b> ${(
|
|
1039
|
-
messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_AVOID_UNGROUNDED_CONTENT</b> ${(
|
|
1040
|
-
messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_AVOID_COPYRIGHT_INFRINGEMENTS</b> ${(
|
|
1041
|
-
messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_PREVENT_JAILBREAK_AND_MANIPULATION</b> ${(
|
|
1144
|
+
messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_AVOID_HARMFUL_CONTENT</b> ${(_t = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _t === void 0 ? void 0 : _t.avoidHarmfulContent}`);
|
|
1145
|
+
messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_AVOID_UNGROUNDED_CONTENT</b> ${(_u = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _u === void 0 ? void 0 : _u.avoidUngroundedContent}`);
|
|
1146
|
+
messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_AVOID_COPYRIGHT_INFRINGEMENTS</b> ${(_v = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _v === void 0 ? void 0 : _v.avoidCopyrightInfringements}`);
|
|
1147
|
+
messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_PREVENT_JAILBREAK_AND_MANIPULATION</b> ${(_w = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _w === void 0 ? void 0 : _w.preventJailbreakAndManipulation}`);
|
|
1042
1148
|
// Tools
|
|
1043
|
-
if (
|
|
1149
|
+
if (toolNames.length > 0) {
|
|
1044
1150
|
messageLines.push("<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__TOOLS__LABEL</b>");
|
|
1045
|
-
|
|
1151
|
+
toolNames.forEach(toolName => messageLines.push(`- ${toolName}`));
|
|
1046
1152
|
}
|
|
1047
1153
|
// Memory
|
|
1048
1154
|
if (memoryType !== "none") {
|
|
@@ -1094,7 +1200,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
|
|
|
1094
1200
|
messageLines.push(`UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__TTS_VOICE ${config.ttsVoice || 'UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__NOT_SET'}`);
|
|
1095
1201
|
messageLines.push(`UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__TTS_LABEL ${config.ttsLabel || 'UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__NOT_SET'}`);
|
|
1096
1202
|
messageLines.push(`UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__TTS_DISABLE_CACHE ${config.ttsDisableCache || 'UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__NOT_SET'}`);
|
|
1097
|
-
(
|
|
1203
|
+
(_x = api.logDebugMessage) === null || _x === void 0 ? void 0 : _x.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__HEADER");
|
|
1098
1204
|
}
|
|
1099
1205
|
// keep this after the debug message since the "retrieve_knowledge" tool is implicit
|
|
1100
1206
|
// we only add this tool if at least one knowledge source is enabled
|
|
@@ -1138,14 +1244,14 @@ export const AI_AGENT_JOB = createNodeDescriptor({
|
|
|
1138
1244
|
transcript.length > 0 &&
|
|
1139
1245
|
transcript[transcript.length - 1].role === TranscriptRole.USER) {
|
|
1140
1246
|
const userInput = transcript[transcript.length - 1];
|
|
1141
|
-
const enhancedInput = `## Knowledge Source Context\nAdditional Context from the knowledge source: \n${JSON.stringify(knowledgeSearchResponseData)}\n\n\n${((
|
|
1247
|
+
const enhancedInput = `## Knowledge Source Context\nAdditional Context from the knowledge source: \n${JSON.stringify(knowledgeSearchResponseData)}\n\n\n${((_y = userInput === null || userInput === void 0 ? void 0 : userInput.payload) === null || _y === void 0 ? void 0 : _y.text) || input.text}`;
|
|
1142
1248
|
transcript[transcript.length - 1].payload.text = enhancedInput;
|
|
1143
1249
|
}
|
|
1144
1250
|
const isStreamingChannel = input.channel === "webchat3" || input.channel === "adminconsole";
|
|
1145
1251
|
const _messageId = randomUUID();
|
|
1146
1252
|
const llmPromptOptions = Object.assign(Object.assign({ prompt: "", chat: systemMessage,
|
|
1147
1253
|
// Temp fix to override the transcript if needed
|
|
1148
|
-
transcript: ((
|
|
1254
|
+
transcript: ((_z = context === null || context === void 0 ? void 0 : context._cognigy) === null || _z === void 0 ? void 0 : _z.transcript) ? [...context._cognigy.transcript] : transcript, detailedResults: true, timeoutInMs: timeoutInMs !== null && timeoutInMs !== void 0 ? timeoutInMs : 8000, maxTokens: maxTokens !== null && maxTokens !== void 0 ? maxTokens : 4000, temperature: temperature !== null && temperature !== void 0 ? temperature : 0.7, topP: 1, frequencyPenalty: 0, presencePenalty: 0, responseFormat: "text", stream: storeLocation === "stream", streamOnDataHandler: (text) => {
|
|
1149
1255
|
var _a;
|
|
1150
1256
|
text = isStreamingChannel ? text : text.trim();
|
|
1151
1257
|
if (text) {
|
|
@@ -1169,45 +1275,51 @@ export const AI_AGENT_JOB = createNodeDescriptor({
|
|
|
1169
1275
|
};
|
|
1170
1276
|
}
|
|
1171
1277
|
// Set understood to true so that an AI Agent interaction doesn't look false in our analytics
|
|
1172
|
-
(
|
|
1278
|
+
(_0 = api.setAnalyticsData) === null || _0 === void 0 ? void 0 : _0.call(api, "understood", "true");
|
|
1173
1279
|
input.understood = true;
|
|
1174
|
-
const fullLlmResult = yield ((
|
|
1280
|
+
const fullLlmResult = yield ((_1 = api.runGenerativeAIPrompt) === null || _1 === void 0 ? void 0 : _1.call(api, llmPromptOptions, "aiAgent"));
|
|
1175
1281
|
const { messages } = fullLlmResult, llmResult = __rest(fullLlmResult, ["messages"]);
|
|
1176
1282
|
const llmProvider = llmResult === null || llmResult === void 0 ? void 0 : llmResult.provider;
|
|
1177
1283
|
const tokenUsage = fullLlmResult.tokenUsage;
|
|
1178
1284
|
// Send optional debug message with token usage
|
|
1179
1285
|
if (debugLogTokenCount && tokenUsage) {
|
|
1180
|
-
(
|
|
1286
|
+
(_2 = api.logDebugMessage) === null || _2 === void 0 ? void 0 : _2.call(api, tokenUsage, "UI__DEBUG_MODE__AI_AGENT_JOB__TOKEN_USAGE__HEADER");
|
|
1181
1287
|
}
|
|
1182
1288
|
// Identify if the result is a tool call
|
|
1183
1289
|
// If response is a tool call, set next node for Tools
|
|
1184
1290
|
if (llmResult.finishReason === "tool_calls" && llmResult.toolCalls.length > 0) {
|
|
1185
1291
|
const mainToolCall = llmResult.toolCalls[0];
|
|
1292
|
+
let isMcpToolCall = false;
|
|
1186
1293
|
// Find the child node with the toolId of the tool call
|
|
1187
|
-
|
|
1294
|
+
let toolChild = childConfigs.find(child => { var _a, _b; return child.type === "aiAgentJobTool" && ((_a = child.config) === null || _a === void 0 ? void 0 : _a.toolId) && api.parseCognigyScriptText((_b = child.config) === null || _b === void 0 ? void 0 : _b.toolId) === mainToolCall.function.name; });
|
|
1295
|
+
if (!toolChild && toolMap.has(mainToolCall.function.name)) {
|
|
1296
|
+
// If the tool call is from an MCP tool, set the next node to the corresponding child node
|
|
1297
|
+
toolChild = childConfigs.find(child => child.id === toolMap.get(mainToolCall.function.name));
|
|
1298
|
+
isMcpToolCall = true;
|
|
1299
|
+
}
|
|
1188
1300
|
if (mainToolCall.function.name !== "retrieve_knowledge" && toolChild === undefined) {
|
|
1189
|
-
(
|
|
1301
|
+
(_3 = api.logDebugError) === null || _3 === void 0 ? void 0 : _3.call(api, `UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__BODY <b>${mainToolCall.function.name}</b>`, "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__HEADER");
|
|
1190
1302
|
}
|
|
1191
1303
|
// Add last tool call to session state for loading it from Tool Answer Node
|
|
1192
1304
|
api.updateSessionStateValues({
|
|
1193
|
-
lastToolCall: {
|
|
1194
|
-
llmProvider,
|
|
1195
|
-
aiAgentJobNode: {
|
|
1305
|
+
lastToolCall: Object.assign(Object.assign({ llmProvider, aiAgentJobNode: {
|
|
1196
1306
|
flow: flowReferenceId,
|
|
1197
|
-
node: nodeId
|
|
1198
|
-
},
|
|
1199
|
-
|
|
1200
|
-
|
|
1307
|
+
node: nodeId,
|
|
1308
|
+
} }, (isMcpToolCall && {
|
|
1309
|
+
mcpServerUrl: (_4 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _4 === void 0 ? void 0 : _4.mcpServerUrl,
|
|
1310
|
+
timeout: (_5 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _5 === void 0 ? void 0 : _5.timeout,
|
|
1311
|
+
mcpToolNode: toolChild === null || toolChild === void 0 ? void 0 : toolChild.id,
|
|
1312
|
+
})), { toolCall: mainToolCall }),
|
|
1201
1313
|
});
|
|
1202
1314
|
// if there are any parameters/arguments, add them to the input slots
|
|
1203
1315
|
if (mainToolCall.function.arguments) {
|
|
1204
|
-
input.aiAgent = Object.assign(Object.assign({}, input.aiAgent), { toolArgs: Object.assign(Object.assign({}, (
|
|
1316
|
+
input.aiAgent = Object.assign(Object.assign({}, input.aiAgent), { toolArgs: Object.assign(Object.assign({}, (_7 = (_6 = input.aiAgent) === null || _6 === void 0 ? void 0 : _6.toolArgs) !== null && _7 !== void 0 ? _7 : {}), mainToolCall.function.arguments) });
|
|
1205
1317
|
}
|
|
1206
1318
|
// Debug Message for Tool Calls, configured in the Tool Node
|
|
1207
|
-
if ((
|
|
1319
|
+
if ((_8 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _8 === void 0 ? void 0 : _8.debugMessage) {
|
|
1208
1320
|
const messageLines = [`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER:</b> ${api.parseCognigyScriptText(toolChild.config.toolId)}`];
|
|
1209
1321
|
// Arguments / Parameters Slots
|
|
1210
|
-
const slots = ((
|
|
1322
|
+
const slots = ((_9 = mainToolCall === null || mainToolCall === void 0 ? void 0 : mainToolCall.function) === null || _9 === void 0 ? void 0 : _9.arguments) && Object.keys(mainToolCall.function.arguments);
|
|
1211
1323
|
const hasSlots = slots && slots.length > 0;
|
|
1212
1324
|
messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__SLOTS</b>${hasSlots ? "" : " -"}`);
|
|
1213
1325
|
if (hasSlots) {
|
|
@@ -1222,7 +1334,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
|
|
|
1222
1334
|
messageLines.push(`- ${slot}: ${slotValueAsString}`);
|
|
1223
1335
|
});
|
|
1224
1336
|
}
|
|
1225
|
-
(
|
|
1337
|
+
(_10 = api.logDebugMessage) === null || _10 === void 0 ? void 0 : _10.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER");
|
|
1226
1338
|
}
|
|
1227
1339
|
if (toolChild) {
|
|
1228
1340
|
api.setNextNode(toolChild.id);
|
|
@@ -1247,7 +1359,17 @@ export const AI_AGENT_JOB = createNodeDescriptor({
|
|
|
1247
1359
|
}
|
|
1248
1360
|
// Optionally output the result immediately
|
|
1249
1361
|
if (llmResult.result && outputImmediately && !llmPromptOptions.stream) {
|
|
1250
|
-
yield ((
|
|
1362
|
+
yield ((_11 = api.output) === null || _11 === void 0 ? void 0 : _11.call(api, llmResult.result, {}));
|
|
1363
|
+
}
|
|
1364
|
+
else if (llmResult.finishReason && llmPromptOptions.stream) {
|
|
1365
|
+
// send the finishReason as last output for a stream
|
|
1366
|
+
(_12 = api.output) === null || _12 === void 0 ? void 0 : _12.call(api, "", {
|
|
1367
|
+
_cognigy: {
|
|
1368
|
+
_preventTranscript: true,
|
|
1369
|
+
_messageId,
|
|
1370
|
+
_finishReason: llmResult.finishReason,
|
|
1371
|
+
}
|
|
1372
|
+
});
|
|
1251
1373
|
}
|
|
1252
1374
|
// If we are streaming and we got a result, also store it into the transcript, since streamed chunks are not stored there
|
|
1253
1375
|
if (llmResult.result && llmPromptOptions.stream) {
|
|
@@ -1264,7 +1386,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
|
|
|
1264
1386
|
}
|
|
1265
1387
|
// Add response to Cognigy Input/Context for further usage
|
|
1266
1388
|
if (storeLocation === "context") {
|
|
1267
|
-
(
|
|
1389
|
+
(_13 = api.addToContext) === null || _13 === void 0 ? void 0 : _13.call(api, contextKey, llmResult, "simple");
|
|
1268
1390
|
}
|
|
1269
1391
|
else if (storeLocation === "input") {
|
|
1270
1392
|
api.addToInput(inputKey, llmResult);
|
|
@@ -1277,14 +1399,14 @@ export const AI_AGENT_JOB = createNodeDescriptor({
|
|
|
1277
1399
|
const errorDetails = {
|
|
1278
1400
|
name: (error === null || error === void 0 ? void 0 : error.name) || "Error",
|
|
1279
1401
|
code: (error === null || error === void 0 ? void 0 : error.code) || (error === null || error === void 0 ? void 0 : error.httpStatusCode),
|
|
1280
|
-
message: (error === null || error === void 0 ? void 0 : error.message) || ((
|
|
1402
|
+
message: (error === null || error === void 0 ? void 0 : error.message) || ((_14 = error.originalErrorDetails) === null || _14 === void 0 ? void 0 : _14.message),
|
|
1281
1403
|
};
|
|
1282
|
-
(
|
|
1404
|
+
(_15 = api.emitEvent) === null || _15 === void 0 ? void 0 : _15.call(api, "nodeError", { nodeId, flowId: flowReferenceId, errorMessage: error });
|
|
1283
1405
|
if (logErrorToSystem) {
|
|
1284
|
-
(
|
|
1406
|
+
(_16 = api.log) === null || _16 === void 0 ? void 0 : _16.call(api, "error", JSON.stringify(errorDetails));
|
|
1285
1407
|
}
|
|
1286
1408
|
if (errorHandling !== "stop") {
|
|
1287
|
-
(
|
|
1409
|
+
(_17 = api.logDebugError) === null || _17 === void 0 ? void 0 : _17.call(api, errorDetails.message + (errorDetails.code ? ` (error code: ${errorDetails.code})` : ""), errorDetails.name);
|
|
1288
1410
|
}
|
|
1289
1411
|
if (storeErrorInInput) {
|
|
1290
1412
|
input.aiAgent = input.aiAgent || {};
|
|
@@ -1293,7 +1415,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
|
|
|
1293
1415
|
if (errorHandling === "continue") {
|
|
1294
1416
|
// output the timeout message
|
|
1295
1417
|
if (errorMessage) {
|
|
1296
|
-
yield ((
|
|
1418
|
+
yield ((_18 = api.output) === null || _18 === void 0 ? void 0 : _18.call(api, errorMessage, null));
|
|
1297
1419
|
}
|
|
1298
1420
|
// Set default node as next node
|
|
1299
1421
|
const defaultChild = childConfigs.find(child => child.type === "aiAgentJobDefault");
|
|
@@ -1305,7 +1427,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
|
|
|
1305
1427
|
if (!errorHandlingGotoTarget) {
|
|
1306
1428
|
throw new Error("GoTo Target is required");
|
|
1307
1429
|
}
|
|
1308
|
-
if (!((
|
|
1430
|
+
if (!((_19 = api.checkThink) === null || _19 === void 0 ? void 0 : _19.call(api, nodeId))) {
|
|
1309
1431
|
api.resetNextNodes();
|
|
1310
1432
|
yield api.executeFlow({
|
|
1311
1433
|
flowNode: {
|
|
@@ -0,0 +1,178 @@
|
|
|
1
|
+
import { __awaiter } from "tslib";
|
|
2
|
+
/* Custom modules */
|
|
3
|
+
import { createNodeDescriptor } from "../../../createNodeDescriptor";
|
|
4
|
+
import { TranscriptEntryType, TranscriptRole, } from "../../../../interfaces/transcripts/transcripts";
|
|
5
|
+
export const AI_AGENT_JOB_CALL_MCP_TOOL = createNodeDescriptor({
|
|
6
|
+
type: "aiAgentJobCallMCPTool",
|
|
7
|
+
defaultLabel: "Call MCP Tool",
|
|
8
|
+
summary: "UI__NODE_EDITOR__SERVICE__AI_AGENT_CALL_MCP_TOOL__SUMMARY",
|
|
9
|
+
parentType: "",
|
|
10
|
+
tokens: [
|
|
11
|
+
{
|
|
12
|
+
type: "input",
|
|
13
|
+
label: "MCP Tool Result",
|
|
14
|
+
script: "input.aiAgent.toolResult",
|
|
15
|
+
},
|
|
16
|
+
],
|
|
17
|
+
fields: [
|
|
18
|
+
{
|
|
19
|
+
key: "storeLocation",
|
|
20
|
+
type: "select",
|
|
21
|
+
label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STORE_LOCATION__LABEL",
|
|
22
|
+
defaultValue: "input",
|
|
23
|
+
params: {
|
|
24
|
+
options: [
|
|
25
|
+
{
|
|
26
|
+
label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STORE_LOCATION__OPTIONS__INPUT__LABEL",
|
|
27
|
+
value: "input",
|
|
28
|
+
},
|
|
29
|
+
{
|
|
30
|
+
label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STORE_LOCATION__OPTIONS__CONTEXT__LABEL",
|
|
31
|
+
value: "context",
|
|
32
|
+
},
|
|
33
|
+
],
|
|
34
|
+
},
|
|
35
|
+
},
|
|
36
|
+
{
|
|
37
|
+
key: "inputKey",
|
|
38
|
+
type: "cognigyText",
|
|
39
|
+
label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__INPUT_KEY__LABEL",
|
|
40
|
+
defaultValue: "aiAgent.toolResult",
|
|
41
|
+
condition: {
|
|
42
|
+
key: "storeLocation",
|
|
43
|
+
value: "input",
|
|
44
|
+
},
|
|
45
|
+
},
|
|
46
|
+
{
|
|
47
|
+
key: "contextKey",
|
|
48
|
+
type: "cognigyText",
|
|
49
|
+
label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__CONTEXT_KEY__LABEL",
|
|
50
|
+
defaultValue: "aiAgent.toolResult",
|
|
51
|
+
condition: {
|
|
52
|
+
key: "storeLocation",
|
|
53
|
+
value: "context",
|
|
54
|
+
},
|
|
55
|
+
},
|
|
56
|
+
{
|
|
57
|
+
key: "resolveImmediately",
|
|
58
|
+
label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_CALL_MCP_TOOL__FIELDS__RESOLVE_IMMEDIATELY__LABEL",
|
|
59
|
+
type: "toggle",
|
|
60
|
+
description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_CALL_MCP_TOOL__FIELDS__RESOLVE_IMMEDIATELY__DESCRIPTION",
|
|
61
|
+
defaultValue: false,
|
|
62
|
+
},
|
|
63
|
+
{
|
|
64
|
+
key: "debugToolResult",
|
|
65
|
+
label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_CALL_MCP_TOOL__FIELDS__DEBUG_TOOL_RESULT__LABEL",
|
|
66
|
+
type: "toggle",
|
|
67
|
+
description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_CALL_MCP_TOOL__FIELDS__DEBUG_TOOL_RESULT__DESCRIPTION",
|
|
68
|
+
defaultValue: false,
|
|
69
|
+
},
|
|
70
|
+
],
|
|
71
|
+
sections: [
|
|
72
|
+
{
|
|
73
|
+
key: "storage",
|
|
74
|
+
label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_CALL_MCP_TOOL__SECTIONS__STORAGE__LABEL",
|
|
75
|
+
defaultCollapsed: false,
|
|
76
|
+
fields: ["storeLocation", "inputKey", "contextKey"],
|
|
77
|
+
},
|
|
78
|
+
{
|
|
79
|
+
key: "debugging",
|
|
80
|
+
label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__SECTIONS__DEBUG_SETTINGS__LABEL",
|
|
81
|
+
defaultCollapsed: true,
|
|
82
|
+
fields: ["debugToolResult"],
|
|
83
|
+
},
|
|
84
|
+
],
|
|
85
|
+
form: [
|
|
86
|
+
{ type: "field", key: "resolveImmediately" },
|
|
87
|
+
{ type: "section", key: "storage" },
|
|
88
|
+
{ type: "section", key: "debugging" },
|
|
89
|
+
],
|
|
90
|
+
appearance: {
|
|
91
|
+
color: "#252525",
|
|
92
|
+
},
|
|
93
|
+
tags: ["ai", "aiAgent"],
|
|
94
|
+
function: ({ cognigy, config }) => __awaiter(void 0, void 0, void 0, function* () {
|
|
95
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k;
|
|
96
|
+
const { api } = cognigy;
|
|
97
|
+
const { storeLocation, contextKey, inputKey, resolveImmediately, debugToolResult } = config;
|
|
98
|
+
const sessionState = yield api.loadSessionState();
|
|
99
|
+
const toolCall = (_a = sessionState.lastToolCall) === null || _a === void 0 ? void 0 : _a.toolCall;
|
|
100
|
+
const aiAgentJobNode = (_b = sessionState.lastToolCall) === null || _b === void 0 ? void 0 : _b.aiAgentJobNode;
|
|
101
|
+
const mcpServerUrl = (_c = sessionState.lastToolCall) === null || _c === void 0 ? void 0 : _c.mcpServerUrl;
|
|
102
|
+
const timeout = (_d = sessionState.lastToolCall) === null || _d === void 0 ? void 0 : _d.timeout;
|
|
103
|
+
if (!(toolCall === null || toolCall === void 0 ? void 0 : toolCall.id)) {
|
|
104
|
+
(_e = api.logDebugError) === null || _e === void 0 ? void 0 : _e.call(api, "UI__DEBUG_MODE__AI_AGENT_ANSWER__ERROR__MESSAGE");
|
|
105
|
+
}
|
|
106
|
+
if (toolCall && aiAgentJobNode && mcpServerUrl && timeout) {
|
|
107
|
+
let toolResult = null;
|
|
108
|
+
let fullResult = null;
|
|
109
|
+
try {
|
|
110
|
+
toolResult = yield api.executeMcpTool({
|
|
111
|
+
mcpServerUrl,
|
|
112
|
+
toolName: toolCall.function.name,
|
|
113
|
+
toolArgs: toolCall.function.arguments,
|
|
114
|
+
timeout,
|
|
115
|
+
});
|
|
116
|
+
fullResult = JSON.stringify(toolResult, null, 2);
|
|
117
|
+
if (debugToolResult) {
|
|
118
|
+
(_f = api.logDebugMessage) === null || _f === void 0 ? void 0 : _f.call(api, `Tool <b>${(_g = toolCall === null || toolCall === void 0 ? void 0 : toolCall.function) === null || _g === void 0 ? void 0 : _g.name}</b> called successfully.<br><br><b>Result:</b><br>${fullResult}`);
|
|
119
|
+
}
|
|
120
|
+
}
|
|
121
|
+
catch (error) {
|
|
122
|
+
const errorDetails = error instanceof Error
|
|
123
|
+
? {
|
|
124
|
+
name: error.name,
|
|
125
|
+
message: error.message,
|
|
126
|
+
}
|
|
127
|
+
: error;
|
|
128
|
+
(_h = api.logDebugError) === null || _h === void 0 ? void 0 : _h.call(api, `Failed to execute MCP Tool ${(_j = toolCall === null || toolCall === void 0 ? void 0 : toolCall.function) === null || _j === void 0 ? void 0 : _j.name}:<br>${JSON.stringify(errorDetails, null, 2)}`);
|
|
129
|
+
}
|
|
130
|
+
// Add result to Cognigy Input/Context for further usage
|
|
131
|
+
if (storeLocation === "context") {
|
|
132
|
+
(_k = api.addToContext) === null || _k === void 0 ? void 0 : _k.call(api, contextKey, toolResult, "simple");
|
|
133
|
+
}
|
|
134
|
+
else if (storeLocation === "input") {
|
|
135
|
+
api.addToInput(inputKey, toolResult);
|
|
136
|
+
}
|
|
137
|
+
const { flow, node } = aiAgentJobNode;
|
|
138
|
+
if (resolveImmediately && fullResult && flow && node) {
|
|
139
|
+
// Add Tool Call Message to Transcript
|
|
140
|
+
const toolCallTranscriptStep = {
|
|
141
|
+
role: TranscriptRole.ASSISTANT,
|
|
142
|
+
type: TranscriptEntryType.TOOL_CALL,
|
|
143
|
+
source: "system",
|
|
144
|
+
payload: {
|
|
145
|
+
name: toolCall.function.name,
|
|
146
|
+
id: toolCall.id,
|
|
147
|
+
input: toolCall.function.arguments,
|
|
148
|
+
},
|
|
149
|
+
};
|
|
150
|
+
yield api.addTranscriptStep(toolCallTranscriptStep);
|
|
151
|
+
// Add Tool Answer Message to Transcript
|
|
152
|
+
const toolAnswer = {
|
|
153
|
+
role: TranscriptRole.TOOL,
|
|
154
|
+
type: TranscriptEntryType.TOOL_ANSWER,
|
|
155
|
+
source: "system",
|
|
156
|
+
payload: {
|
|
157
|
+
toolCallId: toolCall.id,
|
|
158
|
+
content: fullResult,
|
|
159
|
+
},
|
|
160
|
+
};
|
|
161
|
+
yield api.addTranscriptStep(toolAnswer);
|
|
162
|
+
api.resetNextNodes();
|
|
163
|
+
// remove the call from the session state, because the call has been answered
|
|
164
|
+
api.updateSessionStateValues({
|
|
165
|
+
lastToolCall: undefined,
|
|
166
|
+
});
|
|
167
|
+
yield api.executeFlow({
|
|
168
|
+
flowNode: {
|
|
169
|
+
flow,
|
|
170
|
+
node,
|
|
171
|
+
},
|
|
172
|
+
absorbContext: true,
|
|
173
|
+
});
|
|
174
|
+
}
|
|
175
|
+
}
|
|
176
|
+
}),
|
|
177
|
+
});
|
|
178
|
+
//# sourceMappingURL=aiAgentJobCallMCPTool.js.map
|