@cognigy/rest-api-client 4.97.0 → 4.99.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +14 -0
- package/build/RestAPIClient.js +7 -0
- package/build/apigroups/AIOpsCenterAPIGroup_2_0.js +22 -0
- package/build/apigroups/ResourcesAPIGroup_2_0.js +8 -8
- package/build/apigroups/index.js +3 -1
- package/build/shared/charts/descriptors/agentAssist/htmlTemplates/knowledgeAssistTemplate.js +0 -44
- package/build/shared/charts/descriptors/agentAssist/htmlTemplates/secureForms/stage0.js +0 -45
- package/build/shared/charts/descriptors/agentAssist/htmlTemplates/secureForms/stage1.js +0 -43
- package/build/shared/charts/descriptors/agentAssist/htmlTemplates/secureForms/stageError.js +0 -45
- package/build/shared/charts/descriptors/analytics/completeGoal.js +2 -2
- package/build/shared/charts/descriptors/connectionNodes/generativeAIProviders/index.js +6 -2
- package/build/shared/charts/descriptors/connectionNodes/generativeAIProviders/mistralProviderConnection.js +11 -0
- package/build/shared/charts/descriptors/index.js +2 -0
- package/build/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +10 -1
- package/build/shared/charts/descriptors/logic/think.js +2 -4
- package/build/shared/charts/descriptors/message/say.js +6 -3
- package/build/shared/charts/descriptors/service/GPTPrompt.js +14 -4
- package/build/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +226 -66
- package/build/shared/charts/descriptors/service/aiAgent/aiAgentJobCallMCPTool.js +180 -0
- package/build/shared/charts/descriptors/service/aiAgent/aiAgentJobMCPTool.js +196 -0
- package/build/shared/charts/descriptors/service/aiAgent/aiAgentToolAnswer.js +5 -1
- package/build/shared/charts/descriptors/service/index.js +5 -1
- package/build/shared/charts/descriptors/voice/mappers/hangup.mapper.js +9 -8
- package/build/shared/charts/descriptors/voice/nodes/hangup.js +1 -1
- package/build/shared/charts/descriptors/voicegateway/nodes/hangup.js +1 -1
- package/build/shared/charts/descriptors/voicegateway2/nodes/hangup.js +24 -2
- package/build/shared/constants.js +8 -1
- package/build/shared/interfaces/IProfileSchema.js +1 -1
- package/build/shared/interfaces/amqpInterface.js +3 -0
- package/build/shared/interfaces/generativeAI/IGenerativeAIModels.js +10 -1
- package/build/shared/interfaces/messageAPI/endpoints.js +6 -1
- package/build/shared/interfaces/messageAPI/handover.js +6 -1
- package/build/shared/interfaces/resources/IAiAgent.js +3 -1
- package/build/shared/interfaces/resources/ILargeLanguageModel.js +10 -1
- package/build/shared/interfaces/resources/settings/IGenerativeAISettings.js +4 -0
- package/build/shared/interfaces/restAPI/opsCenter/alerts/IIndexOpsCenterAlertsRest_2_0.js +3 -0
- package/build/shared/interfaces/restAPI/opsCenter/alerts/IOpsCenterAlert.js +3 -0
- package/build/shared/interfaces/restAPI/opsCenter/component/IOpsCenterComponents.js +34 -0
- package/build/shared/interfaces/restAPI/opsCenter/errors/IDeleteOpsCenterErrorRest_2_0.js +3 -0
- package/build/shared/interfaces/restAPI/opsCenter/errors/IGetOpsCenterErrorRest_2_0.js +3 -0
- package/build/shared/interfaces/restAPI/opsCenter/errors/IIndexOpsCenterErrorsRest_2_0.js +3 -0
- package/build/shared/interfaces/restAPI/opsCenter/errors/IOpsCenterError.js +3 -0
- package/build/shared/interfaces/restAPI/opsCenter/metrics/IGetOpsCenterMetrics.js +3 -0
- package/build/shared/interfaces/restAPI/opsCenter/metrics/IGetOpsCenterMetricsConfigRest_2_0.js +3 -0
- package/build/shared/interfaces/restAPI/opsCenter/metrics/IGetOpsCenterMetricsRangeRest_2_0.js +3 -0
- package/build/shared/interfaces/restAPI/opsCenter/metrics/IGetOpsCenterMetricsRest_2_0.js +3 -0
- package/build/shared/interfaces/restAPI/opsCenter/metrics/IOpsCenterMetricsConfig.js +6 -0
- package/build/shared/interfaces/restAPI/opsCenter/observationConfig/IGetOpsCenterObservationConfigRest_2_0.js +3 -0
- package/build/shared/interfaces/restAPI/opsCenter/observationConfig/IOpsCenterObservationConfig.js +4 -0
- package/build/shared/interfaces/restAPI/opsCenter/observationConfig/IPatchOpsCenterObservationConfigRest_2_0.js +3 -0
- package/build/shared/interfaces/restAPI/opsCenter/observationConfig/ISetupOpsCenterObservationConfigRest_2_0.js +3 -0
- package/build/shared/interfaces/security/IIdentityProvider.js +1 -0
- package/dist/esm/RestAPIClient.js +7 -0
- package/dist/esm/apigroups/AIOpsCenterAPIGroup_2_0.js +18 -0
- package/dist/esm/apigroups/ResourcesAPIGroup_2_0.js +8 -8
- package/dist/esm/apigroups/index.js +1 -0
- package/dist/esm/shared/charts/descriptors/agentAssist/htmlTemplates/knowledgeAssistTemplate.js +0 -44
- package/dist/esm/shared/charts/descriptors/agentAssist/htmlTemplates/secureForms/stage0.js +0 -45
- package/dist/esm/shared/charts/descriptors/agentAssist/htmlTemplates/secureForms/stage1.js +0 -43
- package/dist/esm/shared/charts/descriptors/agentAssist/htmlTemplates/secureForms/stageError.js +0 -45
- package/dist/esm/shared/charts/descriptors/analytics/completeGoal.js +2 -2
- package/dist/esm/shared/charts/descriptors/connectionNodes/generativeAIProviders/index.js +4 -1
- package/dist/esm/shared/charts/descriptors/connectionNodes/generativeAIProviders/mistralProviderConnection.js +8 -0
- package/dist/esm/shared/charts/descriptors/index.js +3 -1
- package/dist/esm/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +12 -3
- package/dist/esm/shared/charts/descriptors/logic/think.js +2 -4
- package/dist/esm/shared/charts/descriptors/message/say.js +6 -3
- package/dist/esm/shared/charts/descriptors/service/GPTPrompt.js +18 -8
- package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +226 -66
- package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJobCallMCPTool.js +178 -0
- package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJobMCPTool.js +193 -0
- package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentToolAnswer.js +5 -1
- package/dist/esm/shared/charts/descriptors/service/index.js +2 -0
- package/dist/esm/shared/charts/descriptors/voice/mappers/hangup.mapper.js +9 -8
- package/dist/esm/shared/charts/descriptors/voice/nodes/hangup.js +1 -1
- package/dist/esm/shared/charts/descriptors/voicegateway/nodes/hangup.js +1 -1
- package/dist/esm/shared/charts/descriptors/voicegateway2/nodes/hangup.js +24 -2
- package/dist/esm/shared/constants.js +7 -0
- package/dist/esm/shared/interfaces/IProfileSchema.js +1 -1
- package/dist/esm/shared/interfaces/amqpInterface.js +3 -0
- package/dist/esm/shared/interfaces/generativeAI/IGenerativeAIModels.js +10 -1
- package/dist/esm/shared/interfaces/messageAPI/endpoints.js +6 -1
- package/dist/esm/shared/interfaces/messageAPI/handover.js +6 -1
- package/dist/esm/shared/interfaces/resources/IAiAgent.js +3 -1
- package/dist/esm/shared/interfaces/resources/ILargeLanguageModel.js +9 -0
- package/dist/esm/shared/interfaces/resources/settings/IGenerativeAISettings.js +4 -0
- package/dist/esm/shared/interfaces/restAPI/opsCenter/alerts/IIndexOpsCenterAlertsRest_2_0.js +2 -0
- package/dist/esm/shared/interfaces/restAPI/opsCenter/alerts/IOpsCenterAlert.js +2 -0
- package/dist/esm/shared/interfaces/restAPI/opsCenter/component/IOpsCenterComponents.js +31 -0
- package/dist/esm/shared/interfaces/restAPI/opsCenter/errors/IDeleteOpsCenterErrorRest_2_0.js +2 -0
- package/dist/esm/shared/interfaces/restAPI/opsCenter/errors/IGetOpsCenterErrorRest_2_0.js +2 -0
- package/dist/esm/shared/interfaces/restAPI/opsCenter/errors/IIndexOpsCenterErrorsRest_2_0.js +2 -0
- package/dist/esm/shared/interfaces/restAPI/opsCenter/errors/IOpsCenterError.js +2 -0
- package/dist/esm/shared/interfaces/restAPI/opsCenter/metrics/IGetOpsCenterMetrics.js +2 -0
- package/dist/esm/shared/interfaces/restAPI/opsCenter/metrics/IGetOpsCenterMetricsConfigRest_2_0.js +2 -0
- package/dist/esm/shared/interfaces/restAPI/opsCenter/metrics/IGetOpsCenterMetricsRangeRest_2_0.js +2 -0
- package/dist/esm/shared/interfaces/restAPI/opsCenter/metrics/IGetOpsCenterMetricsRest_2_0.js +2 -0
- package/dist/esm/shared/interfaces/restAPI/opsCenter/metrics/IOpsCenterMetricsConfig.js +5 -0
- package/dist/esm/shared/interfaces/restAPI/opsCenter/observationConfig/IGetOpsCenterObservationConfigRest_2_0.js +2 -0
- package/dist/esm/shared/interfaces/restAPI/opsCenter/observationConfig/IOpsCenterObservationConfig.js +3 -0
- package/dist/esm/shared/interfaces/restAPI/opsCenter/observationConfig/IPatchOpsCenterObservationConfigRest_2_0.js +2 -0
- package/dist/esm/shared/interfaces/restAPI/opsCenter/observationConfig/ISetupOpsCenterObservationConfigRest_2_0.js +2 -0
- package/dist/esm/shared/interfaces/security/IIdentityProvider.js +1 -0
- package/package.json +2 -2
- package/types/index.d.ts +336 -7
|
@@ -30,7 +30,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
30
30
|
collapsable: true,
|
|
31
31
|
placement: {
|
|
32
32
|
children: {
|
|
33
|
-
whitelist: ["aiAgentJobDefault", "aiAgentJobTool"],
|
|
33
|
+
whitelist: ["aiAgentJobDefault", "aiAgentJobTool", "aiAgentJobMCPTool"],
|
|
34
34
|
},
|
|
35
35
|
},
|
|
36
36
|
},
|
|
@@ -652,11 +652,10 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
652
652
|
defaultValue: "",
|
|
653
653
|
type: "ttsSelect",
|
|
654
654
|
label: "_unused_",
|
|
655
|
-
description: "_unused_",
|
|
656
655
|
params: {
|
|
657
656
|
languageKey: "config.ttsLanguage",
|
|
658
657
|
modelKey: "config.ttsModel",
|
|
659
|
-
voiceKey: "config.ttsVoice"
|
|
658
|
+
voiceKey: "config.ttsVoice",
|
|
660
659
|
},
|
|
661
660
|
condition: {
|
|
662
661
|
key: "voiceSetting",
|
|
@@ -681,6 +680,37 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
681
680
|
type: "ttsSelect",
|
|
682
681
|
defaultValue: "",
|
|
683
682
|
label: "_unused_",
|
|
683
|
+
},
|
|
684
|
+
{
|
|
685
|
+
key: "ttsLabel",
|
|
686
|
+
type: "cognigyText",
|
|
687
|
+
defaultValue: "",
|
|
688
|
+
label: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__TTS_LABEL__LABEL",
|
|
689
|
+
description: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__TTS_LABEL__DESCRIPTION",
|
|
690
|
+
condition: {
|
|
691
|
+
and: [
|
|
692
|
+
{
|
|
693
|
+
negate: true,
|
|
694
|
+
key: "ttsVendor",
|
|
695
|
+
value: "", // We show the field for all vendors as soon as it is defined
|
|
696
|
+
},
|
|
697
|
+
{
|
|
698
|
+
key: "voiceSetting",
|
|
699
|
+
value: "jobVoice"
|
|
700
|
+
}
|
|
701
|
+
]
|
|
702
|
+
},
|
|
703
|
+
},
|
|
704
|
+
{
|
|
705
|
+
key: "ttsDisableCache",
|
|
706
|
+
type: "toggle",
|
|
707
|
+
defaultValue: false,
|
|
708
|
+
label: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__TTS_DISABLE_CACHE__LABEL",
|
|
709
|
+
description: "UI__NODE_EDITOR__VOICEGATEWAY2__SET_SESSION_CONFIG__TTS_DISABLE_CACHE__DESCRIPTION",
|
|
710
|
+
condition: {
|
|
711
|
+
key: "voiceSetting",
|
|
712
|
+
value: "jobVoice"
|
|
713
|
+
},
|
|
684
714
|
}
|
|
685
715
|
],
|
|
686
716
|
sections: [
|
|
@@ -731,6 +761,8 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
731
761
|
fields: [
|
|
732
762
|
"voiceSetting",
|
|
733
763
|
"ttsVendor",
|
|
764
|
+
"ttsLabel",
|
|
765
|
+
"ttsDisableCache",
|
|
734
766
|
],
|
|
735
767
|
},
|
|
736
768
|
{
|
|
@@ -803,7 +835,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
803
835
|
],
|
|
804
836
|
tags: ["ai", "aiAgent"],
|
|
805
837
|
function: async ({ cognigy, config, childConfigs, nodeId }) => {
|
|
806
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12;
|
|
838
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19;
|
|
807
839
|
const { api, context, input, profile, flowReferenceId } = cognigy;
|
|
808
840
|
const { aiAgent, llmProviderReferenceId, name: jobName, description: jobDescription, instructions: jobInstructions, outputImmediately, toolChoice, useStrict, memoryType, selectedProfileFields, memoryContextInjection, knowledgeSearchBehavior, knowledgeSearchTags, knowledgeSearchTagsFilterOp, knowledgeSearchAiAgentKnowledge, knowledgeSearchJobKnowledge, knowledgeSearchJobStore, knowledgeSearchGenerateSearchPrompt, knowledgeSearchTopK, timeoutInMs, maxTokens, temperature, logErrorToSystem, storeErrorInInput, errorHandling, errorHandlingGotoTarget, errorMessage, debugConfig, debugLogTokenCount, debugResult, storeLocation, contextKey, inputKey, streamStoreCopyInInput, streamStopTokens, processImages, transcriptImageHandling, sessionParams } = config;
|
|
809
841
|
try {
|
|
@@ -819,6 +851,8 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
819
851
|
ttsLanguage: aiAgent.voiceConfigs.ttsLanguage,
|
|
820
852
|
ttsModel: aiAgent.voiceConfigs.ttsModel,
|
|
821
853
|
ttsVoice: aiAgent.voiceConfigs.ttsVoice,
|
|
854
|
+
ttsLabel: aiAgent.voiceConfigs.ttsLabel,
|
|
855
|
+
ttsDisableCache: aiAgent.voiceConfigs.ttsDisableCache,
|
|
822
856
|
});
|
|
823
857
|
}
|
|
824
858
|
else {
|
|
@@ -828,6 +862,8 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
828
862
|
ttsLanguage: "",
|
|
829
863
|
ttsModel: "",
|
|
830
864
|
ttsVoice: "",
|
|
865
|
+
ttsLabel: "",
|
|
866
|
+
ttsDisableCache: false,
|
|
831
867
|
});
|
|
832
868
|
}
|
|
833
869
|
}
|
|
@@ -839,7 +875,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
839
875
|
const voiceSettings = (0, setSessionConfig_mapper_2.voiceConfigParamsToVoiceSettings)(config, api);
|
|
840
876
|
const payload = setSessionConfig_mapper_1.setSessionConfig.handleVGInput(voiceSettings, sessionParams || {}, api);
|
|
841
877
|
if (payload && Object.keys(payload).length > 0) {
|
|
842
|
-
api.say(null, {
|
|
878
|
+
(_b = api.say) === null || _b === void 0 ? void 0 : _b.call(api, null, {
|
|
843
879
|
_cognigy: payload,
|
|
844
880
|
});
|
|
845
881
|
}
|
|
@@ -849,16 +885,16 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
849
885
|
throw new Error(`[VG2] Error on AI Agent Job node. Error message: ${error.message}`);
|
|
850
886
|
}
|
|
851
887
|
}
|
|
852
|
-
const
|
|
888
|
+
const _20 = profile, { profileId, accepted_gdpr, prevent_data_collection, privacy_policy } = _20, cleanedProfile = __rest(_20, ["profileId", "accepted_gdpr", "prevent_data_collection", "privacy_policy"]);
|
|
853
889
|
const userMemory = (0, getUserMemory_1.getUserMemory)(memoryType, selectedProfileFields, aiAgent, cleanedProfile);
|
|
854
890
|
/**
|
|
855
891
|
* ----- Knowledge Search Section -----
|
|
856
892
|
*/
|
|
857
893
|
let knowledgeSearchResponseData;
|
|
858
894
|
const sessionState = await api.loadSessionState();
|
|
859
|
-
const lastToolCall = (
|
|
895
|
+
const lastToolCall = (_c = sessionState.lastToolCall) === null || _c === void 0 ? void 0 : _c.toolCall;
|
|
860
896
|
if (knowledgeSearchBehavior === "always" ||
|
|
861
|
-
(knowledgeSearchBehavior === "onDemand" && ((
|
|
897
|
+
(knowledgeSearchBehavior === "onDemand" && ((_d = lastToolCall === null || lastToolCall === void 0 ? void 0 : lastToolCall.function) === null || _d === void 0 ? void 0 : _d.name) === "retrieve_knowledge")) {
|
|
862
898
|
const knowledgeStoreIds = [];
|
|
863
899
|
if (knowledgeSearchAiAgentKnowledge && aiAgent.knowledgeReferenceId) {
|
|
864
900
|
knowledgeStoreIds.push(aiAgent.knowledgeReferenceId);
|
|
@@ -866,8 +902,8 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
866
902
|
if (knowledgeSearchJobKnowledge && knowledgeSearchJobStore) {
|
|
867
903
|
knowledgeStoreIds.push(knowledgeSearchJobStore);
|
|
868
904
|
}
|
|
869
|
-
if (knowledgeStoreIds.length > 0 && (input.text || ((
|
|
870
|
-
let query = ((
|
|
905
|
+
if (knowledgeStoreIds.length > 0 && (input.text || ((_f = (_e = lastToolCall === null || lastToolCall === void 0 ? void 0 : lastToolCall.function) === null || _e === void 0 ? void 0 : _e.arguments) === null || _f === void 0 ? void 0 : _f.generated_prompt))) {
|
|
906
|
+
let query = ((_h = (_g = lastToolCall === null || lastToolCall === void 0 ? void 0 : lastToolCall.function) === null || _g === void 0 ? void 0 : _g.arguments) === null || _h === void 0 ? void 0 : _h.generated_prompt) || input.text;
|
|
871
907
|
if (knowledgeSearchBehavior === "always" && knowledgeSearchGenerateSearchPrompt) {
|
|
872
908
|
const generatedSearchPrompt = await (0, generateSearchPrompt_1.generateSearchPrompt)({ api, userMemory, llmProviderReferenceId, debugLogTokenCount, memoryContextInjection });
|
|
873
909
|
if (generatedSearchPrompt === null || generatedSearchPrompt === void 0 ? void 0 : generatedSearchPrompt.generated_prompt) {
|
|
@@ -883,10 +919,10 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
883
919
|
knowledgeStoreIds,
|
|
884
920
|
};
|
|
885
921
|
if (knowledgeSearchBehavior === "onDemand") {
|
|
886
|
-
const generated_buffer_phrase = (
|
|
922
|
+
const generated_buffer_phrase = (_k = (_j = lastToolCall === null || lastToolCall === void 0 ? void 0 : lastToolCall.function) === null || _j === void 0 ? void 0 : _j.arguments) === null || _k === void 0 ? void 0 : _k.generated_buffer_phrase;
|
|
887
923
|
if (generated_buffer_phrase) {
|
|
888
924
|
// output the generated buffer phrase. Don't add it to the transcript, else the LLM will repeat it next time.
|
|
889
|
-
await ((
|
|
925
|
+
await ((_l = api.output) === null || _l === void 0 ? void 0 : _l.call(api, generated_buffer_phrase, {
|
|
890
926
|
_cognigy: {
|
|
891
927
|
_preventTranscript: true
|
|
892
928
|
}
|
|
@@ -916,7 +952,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
916
952
|
if (query) {
|
|
917
953
|
messageLines.push(`\n<b>UI__DEBUG_MODE__AI_AGENT_JOB__KNOWLEDGE_SEARCH__SEARCH_PROMPT</b> ${query}`);
|
|
918
954
|
}
|
|
919
|
-
if ((
|
|
955
|
+
if ((_m = knowledgeSearchResponseData === null || knowledgeSearchResponseData === void 0 ? void 0 : knowledgeSearchResponseData.topK) === null || _m === void 0 ? void 0 : _m.length) {
|
|
920
956
|
knowledgeSearchResponseData === null || knowledgeSearchResponseData === void 0 ? void 0 : knowledgeSearchResponseData.topK.forEach((result, index) => {
|
|
921
957
|
var _a;
|
|
922
958
|
messageLines.push(`\nTop ${index + 1}:`);
|
|
@@ -928,7 +964,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
928
964
|
else {
|
|
929
965
|
messageLines.push("UI__DEBUG_MODE__AI_AGENT_JOB__KNOWLEDGE_SEARCH__NO_RESULTS");
|
|
930
966
|
}
|
|
931
|
-
(
|
|
967
|
+
(_o = api.logDebugMessage) === null || _o === void 0 ? void 0 : _o.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__KNOWLEDGE_SEARCH__HEADER");
|
|
932
968
|
}
|
|
933
969
|
// Knowledge Search "onDemand" responds to a tool call
|
|
934
970
|
if (knowledgeSearchBehavior === "onDemand") {
|
|
@@ -957,7 +993,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
957
993
|
await api.addTranscriptStep(toolAnswer);
|
|
958
994
|
}
|
|
959
995
|
}
|
|
960
|
-
if (((
|
|
996
|
+
if (((_p = lastToolCall === null || lastToolCall === void 0 ? void 0 : lastToolCall.function) === null || _p === void 0 ? void 0 : _p.name) === "retrieve_knowledge") {
|
|
961
997
|
// remove the retrieve_knowledge toolCall from session state to avoid infinite loops
|
|
962
998
|
api.updateSessionStateValues({
|
|
963
999
|
lastToolCall: undefined
|
|
@@ -969,32 +1005,39 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
969
1005
|
// create the system Message from the AI Agent resource and this Node's config storage
|
|
970
1006
|
const systemMessage = (0, createSystemMessage_1.createSystemMessage)(aiAgent, input, jobName, jobDescription, jobInstructions, userMemory, memoryContextInjection, isOnDemandKnowledgeStoreConfigured ? "onDemand" : "none");
|
|
971
1007
|
// Create Tools JSON
|
|
1008
|
+
/** This is the list of tools that are used in the AI Agent Job */
|
|
972
1009
|
const tools = [];
|
|
1010
|
+
/** Array of tool IDs for deduping */
|
|
973
1011
|
const toolIds = [];
|
|
974
|
-
|
|
1012
|
+
/** Map of MCP tool IDs to their respective node IDs they were loaded from */
|
|
1013
|
+
const toolMap = new Map();
|
|
1014
|
+
/** Array of tool names for listing in the debug message */
|
|
1015
|
+
const toolNames = [];
|
|
1016
|
+
for (const child of childConfigs) {
|
|
975
1017
|
if (child.type === "aiAgentJobDefault") {
|
|
976
|
-
|
|
1018
|
+
continue;
|
|
977
1019
|
}
|
|
978
|
-
;
|
|
979
1020
|
const toolId = child.config.toolId;
|
|
980
|
-
if (
|
|
981
|
-
|
|
982
|
-
|
|
983
|
-
|
|
984
|
-
|
|
985
|
-
|
|
986
|
-
|
|
987
|
-
|
|
988
|
-
|
|
989
|
-
|
|
990
|
-
|
|
991
|
-
|
|
1021
|
+
if (child.type === "aiAgentJobTool" &&
|
|
1022
|
+
(!child.config.condition || !!api.parseCognigyScriptCondition(child.config.condition))) {
|
|
1023
|
+
if (!toolId) {
|
|
1024
|
+
throw new Error(`Tool ID is missing in Tool Node configuration.`);
|
|
1025
|
+
}
|
|
1026
|
+
const parsedToolId = api.parseCognigyScriptText(toolId);
|
|
1027
|
+
if (!(0, createSystemMessage_1.validateToolId)(parsedToolId)) {
|
|
1028
|
+
throw new Error(`Tool ID ${parsedToolId} is not valid. Please use only alphanumeric characters, dashes and underscores.`);
|
|
1029
|
+
}
|
|
1030
|
+
if (toolIds.includes(parsedToolId)) {
|
|
1031
|
+
throw new Error(`Tool ID ${parsedToolId} is not unique. Please ensure each tool has a unique id.`);
|
|
1032
|
+
}
|
|
1033
|
+
toolIds.push(parsedToolId);
|
|
1034
|
+
toolNames.push(parsedToolId);
|
|
992
1035
|
const tool = {
|
|
993
1036
|
type: "function",
|
|
994
1037
|
function: {
|
|
995
1038
|
name: parsedToolId,
|
|
996
1039
|
description: api.parseCognigyScriptText(child.config.description),
|
|
997
|
-
}
|
|
1040
|
+
},
|
|
998
1041
|
};
|
|
999
1042
|
if (useStrict) {
|
|
1000
1043
|
tool.function.strict = true;
|
|
@@ -1004,7 +1047,106 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
1004
1047
|
}
|
|
1005
1048
|
tools.push(tool);
|
|
1006
1049
|
}
|
|
1007
|
-
|
|
1050
|
+
if (child.type === "aiAgentJobMCPTool" &&
|
|
1051
|
+
(!child.config.condition || !!api.parseCognigyScriptCondition(child.config.condition))) {
|
|
1052
|
+
if (!child.config.mcpServerUrl) {
|
|
1053
|
+
throw new Error(`MCP Server URL is missing in Tool Node configuration.`);
|
|
1054
|
+
}
|
|
1055
|
+
const mcpServerUrl = child.config.mcpServerUrl;
|
|
1056
|
+
const timeout = child.config.timeout;
|
|
1057
|
+
const cacheTools = child.config.cacheTools;
|
|
1058
|
+
const sendDebug = child.config.debugMessageFetchedTools;
|
|
1059
|
+
const toolFilter = child.config.toolFilter;
|
|
1060
|
+
let mcpTools = null;
|
|
1061
|
+
try {
|
|
1062
|
+
mcpTools = await api.fetchMcpTools({
|
|
1063
|
+
mcpServerUrl,
|
|
1064
|
+
timeout,
|
|
1065
|
+
cacheTools,
|
|
1066
|
+
});
|
|
1067
|
+
}
|
|
1068
|
+
catch (error) {
|
|
1069
|
+
const errorDetails = error instanceof Error
|
|
1070
|
+
? {
|
|
1071
|
+
name: error.name,
|
|
1072
|
+
message: error.message,
|
|
1073
|
+
}
|
|
1074
|
+
: error;
|
|
1075
|
+
(_q = api.logDebugError) === null || _q === void 0 ? void 0 : _q.call(api, `Unable to connect to MCP Server:<br>${JSON.stringify(errorDetails, null, 2)}`, child.config.name);
|
|
1076
|
+
}
|
|
1077
|
+
if (mcpTools) {
|
|
1078
|
+
if (sendDebug) {
|
|
1079
|
+
if (mcpTools.length === 0) {
|
|
1080
|
+
(_r = api.logDebugMessage) === null || _r === void 0 ? void 0 : _r.call(api, `No tools fetched from MCP Tool "${child.config.name}".`, "MCP Tool");
|
|
1081
|
+
}
|
|
1082
|
+
if (mcpTools.length > 0) {
|
|
1083
|
+
const messageLines = [`Fetched tools from MCP Tool "${child.config.name}"`];
|
|
1084
|
+
mcpTools.forEach((tool) => {
|
|
1085
|
+
messageLines.push(`<br>- <b>${tool.name}</b>: ${tool.description}`);
|
|
1086
|
+
if (child.config.debugMessageParameters && tool.inputSchema) {
|
|
1087
|
+
messageLines.push(` <b>Parameters</b>:`);
|
|
1088
|
+
Object.keys(tool.inputSchema.properties).forEach((key) => {
|
|
1089
|
+
const parameter = tool.inputSchema.properties[key];
|
|
1090
|
+
const requiredText = tool.inputSchema.required && !tool.inputSchema.required.includes(key) ? " (optional)" : "";
|
|
1091
|
+
if (parameter.description) {
|
|
1092
|
+
messageLines.push(` - ${key} (${parameter.type}): ${parameter.description}${requiredText}`);
|
|
1093
|
+
}
|
|
1094
|
+
else {
|
|
1095
|
+
messageLines.push(` - ${key}: ${parameter.type}${requiredText}`);
|
|
1096
|
+
}
|
|
1097
|
+
});
|
|
1098
|
+
}
|
|
1099
|
+
});
|
|
1100
|
+
(_s = api.logDebugMessage) === null || _s === void 0 ? void 0 : _s.call(api, messageLines.join("\n"), "MCP Tool");
|
|
1101
|
+
}
|
|
1102
|
+
}
|
|
1103
|
+
const filteredMcpTools = mcpTools.filter((tool) => {
|
|
1104
|
+
if (toolFilter && toolFilter !== "none") {
|
|
1105
|
+
if (toolFilter === "whitelist" && child.config.whitelist) {
|
|
1106
|
+
const whitelist = child.config.whitelist.map((item) => item.trim());
|
|
1107
|
+
return whitelist.includes(tool.name);
|
|
1108
|
+
}
|
|
1109
|
+
else if (toolFilter === "blacklist") {
|
|
1110
|
+
// If the blacklist is falsy, all tools are allowed
|
|
1111
|
+
if (!child.config.blacklist) {
|
|
1112
|
+
return true;
|
|
1113
|
+
}
|
|
1114
|
+
const blacklist = child.config.blacklist.map((item) => item.trim());
|
|
1115
|
+
return !blacklist.includes(tool.name);
|
|
1116
|
+
}
|
|
1117
|
+
}
|
|
1118
|
+
else {
|
|
1119
|
+
return true;
|
|
1120
|
+
}
|
|
1121
|
+
});
|
|
1122
|
+
const structuredMcpTools = [];
|
|
1123
|
+
filteredMcpTools.forEach((tool) => {
|
|
1124
|
+
var _a;
|
|
1125
|
+
if (toolIds.includes(tool.name)) {
|
|
1126
|
+
(_a = api.logDebugError) === null || _a === void 0 ? void 0 : _a.call(api, `Tool "${tool.name}" from MCP Tool "${child.config.name}" is not unique and will not be added. Please ensure each tool has a unique id.`);
|
|
1127
|
+
return;
|
|
1128
|
+
}
|
|
1129
|
+
// add tool to the list of tool ids to prevent duplicates
|
|
1130
|
+
toolIds.push(tool.name);
|
|
1131
|
+
toolNames.push(`${tool.name} (${child.config.name})`);
|
|
1132
|
+
toolMap.set(tool.name, child.id);
|
|
1133
|
+
const structuredTool = {
|
|
1134
|
+
type: "function",
|
|
1135
|
+
function: {
|
|
1136
|
+
name: tool.name,
|
|
1137
|
+
description: tool.description,
|
|
1138
|
+
},
|
|
1139
|
+
};
|
|
1140
|
+
if (tool.inputSchema) {
|
|
1141
|
+
structuredTool.function.parameters = tool.inputSchema;
|
|
1142
|
+
}
|
|
1143
|
+
structuredMcpTools.push(structuredTool);
|
|
1144
|
+
});
|
|
1145
|
+
tools.push(...structuredMcpTools);
|
|
1146
|
+
}
|
|
1147
|
+
}
|
|
1148
|
+
}
|
|
1149
|
+
;
|
|
1008
1150
|
// Optional Debug Message with the config
|
|
1009
1151
|
if (debugConfig) {
|
|
1010
1152
|
const messageLines = [];
|
|
@@ -1012,14 +1154,14 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
1012
1154
|
messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__AI_AGENT_NAME__LABEL</b> ${aiAgent.name}`);
|
|
1013
1155
|
messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__JOB_NAME__LABEL</b> ${jobName}`);
|
|
1014
1156
|
// Safety settings
|
|
1015
|
-
messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_AVOID_HARMFUL_CONTENT</b> ${(
|
|
1016
|
-
messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_AVOID_UNGROUNDED_CONTENT</b> ${(
|
|
1017
|
-
messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_AVOID_COPYRIGHT_INFRINGEMENTS</b> ${(
|
|
1018
|
-
messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_PREVENT_JAILBREAK_AND_MANIPULATION</b> ${(
|
|
1157
|
+
messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_AVOID_HARMFUL_CONTENT</b> ${(_t = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _t === void 0 ? void 0 : _t.avoidHarmfulContent}`);
|
|
1158
|
+
messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_AVOID_UNGROUNDED_CONTENT</b> ${(_u = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _u === void 0 ? void 0 : _u.avoidUngroundedContent}`);
|
|
1159
|
+
messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_AVOID_COPYRIGHT_INFRINGEMENTS</b> ${(_v = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _v === void 0 ? void 0 : _v.avoidCopyrightInfringements}`);
|
|
1160
|
+
messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_PREVENT_JAILBREAK_AND_MANIPULATION</b> ${(_w = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _w === void 0 ? void 0 : _w.preventJailbreakAndManipulation}`);
|
|
1019
1161
|
// Tools
|
|
1020
|
-
if (
|
|
1162
|
+
if (toolNames.length > 0) {
|
|
1021
1163
|
messageLines.push("<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__TOOLS__LABEL</b>");
|
|
1022
|
-
|
|
1164
|
+
toolNames.forEach(toolName => messageLines.push(`- ${toolName}`));
|
|
1023
1165
|
}
|
|
1024
1166
|
// Memory
|
|
1025
1167
|
if (memoryType !== "none") {
|
|
@@ -1069,7 +1211,9 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
1069
1211
|
messageLines.push(`UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__TTS_LANGUAGE ${config.ttsLanguage || 'UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__NOT_SET'}`);
|
|
1070
1212
|
messageLines.push(`UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__TTS_MODEL ${config.ttsModel || 'UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__NOT_SET'}`);
|
|
1071
1213
|
messageLines.push(`UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__TTS_VOICE ${config.ttsVoice || 'UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__NOT_SET'}`);
|
|
1072
|
-
(
|
|
1214
|
+
messageLines.push(`UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__TTS_LABEL ${config.ttsLabel || 'UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__NOT_SET'}`);
|
|
1215
|
+
messageLines.push(`UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__TTS_DISABLE_CACHE ${config.ttsDisableCache || 'UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__NOT_SET'}`);
|
|
1216
|
+
(_x = api.logDebugMessage) === null || _x === void 0 ? void 0 : _x.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__HEADER");
|
|
1073
1217
|
}
|
|
1074
1218
|
// keep this after the debug message since the "retrieve_knowledge" tool is implicit
|
|
1075
1219
|
// we only add this tool if at least one knowledge source is enabled
|
|
@@ -1113,14 +1257,14 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
1113
1257
|
transcript.length > 0 &&
|
|
1114
1258
|
transcript[transcript.length - 1].role === transcripts_1.TranscriptRole.USER) {
|
|
1115
1259
|
const userInput = transcript[transcript.length - 1];
|
|
1116
|
-
const enhancedInput = `## Knowledge Source Context\nAdditional Context from the knowledge source: \n${JSON.stringify(knowledgeSearchResponseData)}\n\n\n${((
|
|
1260
|
+
const enhancedInput = `## Knowledge Source Context\nAdditional Context from the knowledge source: \n${JSON.stringify(knowledgeSearchResponseData)}\n\n\n${((_y = userInput === null || userInput === void 0 ? void 0 : userInput.payload) === null || _y === void 0 ? void 0 : _y.text) || input.text}`;
|
|
1117
1261
|
transcript[transcript.length - 1].payload.text = enhancedInput;
|
|
1118
1262
|
}
|
|
1119
1263
|
const isStreamingChannel = input.channel === "webchat3" || input.channel === "adminconsole";
|
|
1120
1264
|
const _messageId = (0, crypto_1.randomUUID)();
|
|
1121
1265
|
const llmPromptOptions = Object.assign(Object.assign({ prompt: "", chat: systemMessage,
|
|
1122
1266
|
// Temp fix to override the transcript if needed
|
|
1123
|
-
transcript: ((
|
|
1267
|
+
transcript: ((_z = context === null || context === void 0 ? void 0 : context._cognigy) === null || _z === void 0 ? void 0 : _z.transcript) ? [...context._cognigy.transcript] : transcript, detailedResults: true, timeoutInMs: timeoutInMs !== null && timeoutInMs !== void 0 ? timeoutInMs : 8000, maxTokens: maxTokens !== null && maxTokens !== void 0 ? maxTokens : 4000, temperature: temperature !== null && temperature !== void 0 ? temperature : 0.7, topP: 1, frequencyPenalty: 0, presencePenalty: 0, responseFormat: "text", stream: storeLocation === "stream", streamOnDataHandler: (text) => {
|
|
1124
1268
|
var _a;
|
|
1125
1269
|
text = isStreamingChannel ? text : text.trim();
|
|
1126
1270
|
if (text) {
|
|
@@ -1144,45 +1288,51 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
1144
1288
|
};
|
|
1145
1289
|
}
|
|
1146
1290
|
// Set understood to true so that an AI Agent interaction doesn't look false in our analytics
|
|
1147
|
-
(
|
|
1291
|
+
(_0 = api.setAnalyticsData) === null || _0 === void 0 ? void 0 : _0.call(api, "understood", "true");
|
|
1148
1292
|
input.understood = true;
|
|
1149
|
-
const fullLlmResult = await ((
|
|
1293
|
+
const fullLlmResult = await ((_1 = api.runGenerativeAIPrompt) === null || _1 === void 0 ? void 0 : _1.call(api, llmPromptOptions, "aiAgent"));
|
|
1150
1294
|
const { messages } = fullLlmResult, llmResult = __rest(fullLlmResult, ["messages"]);
|
|
1151
1295
|
const llmProvider = llmResult === null || llmResult === void 0 ? void 0 : llmResult.provider;
|
|
1152
1296
|
const tokenUsage = fullLlmResult.tokenUsage;
|
|
1153
1297
|
// Send optional debug message with token usage
|
|
1154
1298
|
if (debugLogTokenCount && tokenUsage) {
|
|
1155
|
-
(
|
|
1299
|
+
(_2 = api.logDebugMessage) === null || _2 === void 0 ? void 0 : _2.call(api, tokenUsage, "UI__DEBUG_MODE__AI_AGENT_JOB__TOKEN_USAGE__HEADER");
|
|
1156
1300
|
}
|
|
1157
1301
|
// Identify if the result is a tool call
|
|
1158
1302
|
// If response is a tool call, set next node for Tools
|
|
1159
1303
|
if (llmResult.finishReason === "tool_calls" && llmResult.toolCalls.length > 0) {
|
|
1160
1304
|
const mainToolCall = llmResult.toolCalls[0];
|
|
1305
|
+
let isMcpToolCall = false;
|
|
1161
1306
|
// Find the child node with the toolId of the tool call
|
|
1162
|
-
|
|
1163
|
-
if (toolChild
|
|
1164
|
-
|
|
1307
|
+
let toolChild = childConfigs.find(child => { var _a, _b; return child.type === "aiAgentJobTool" && ((_a = child.config) === null || _a === void 0 ? void 0 : _a.toolId) && api.parseCognigyScriptText((_b = child.config) === null || _b === void 0 ? void 0 : _b.toolId) === mainToolCall.function.name; });
|
|
1308
|
+
if (!toolChild && toolMap.has(mainToolCall.function.name)) {
|
|
1309
|
+
// If the tool call is from an MCP tool, set the next node to the corresponding child node
|
|
1310
|
+
toolChild = childConfigs.find(child => child.id === toolMap.get(mainToolCall.function.name));
|
|
1311
|
+
isMcpToolCall = true;
|
|
1312
|
+
}
|
|
1313
|
+
if (mainToolCall.function.name !== "retrieve_knowledge" && toolChild === undefined) {
|
|
1314
|
+
(_3 = api.logDebugError) === null || _3 === void 0 ? void 0 : _3.call(api, `UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__BODY <b>${mainToolCall.function.name}</b>`, "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__HEADER");
|
|
1165
1315
|
}
|
|
1166
1316
|
// Add last tool call to session state for loading it from Tool Answer Node
|
|
1167
1317
|
api.updateSessionStateValues({
|
|
1168
|
-
lastToolCall: {
|
|
1169
|
-
llmProvider,
|
|
1170
|
-
aiAgentJobNode: {
|
|
1318
|
+
lastToolCall: Object.assign(Object.assign({ llmProvider, aiAgentJobNode: {
|
|
1171
1319
|
flow: flowReferenceId,
|
|
1172
|
-
node: nodeId
|
|
1173
|
-
},
|
|
1174
|
-
|
|
1175
|
-
|
|
1320
|
+
node: nodeId,
|
|
1321
|
+
} }, (isMcpToolCall && {
|
|
1322
|
+
mcpServerUrl: (_4 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _4 === void 0 ? void 0 : _4.mcpServerUrl,
|
|
1323
|
+
timeout: (_5 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _5 === void 0 ? void 0 : _5.timeout,
|
|
1324
|
+
mcpToolNode: toolChild === null || toolChild === void 0 ? void 0 : toolChild.id,
|
|
1325
|
+
})), { toolCall: mainToolCall }),
|
|
1176
1326
|
});
|
|
1177
1327
|
// if there are any parameters/arguments, add them to the input slots
|
|
1178
1328
|
if (mainToolCall.function.arguments) {
|
|
1179
|
-
input.aiAgent = Object.assign(Object.assign({}, input.aiAgent), { toolArgs: Object.assign(Object.assign({}, (
|
|
1329
|
+
input.aiAgent = Object.assign(Object.assign({}, input.aiAgent), { toolArgs: Object.assign(Object.assign({}, (_7 = (_6 = input.aiAgent) === null || _6 === void 0 ? void 0 : _6.toolArgs) !== null && _7 !== void 0 ? _7 : {}), mainToolCall.function.arguments) });
|
|
1180
1330
|
}
|
|
1181
1331
|
// Debug Message for Tool Calls, configured in the Tool Node
|
|
1182
|
-
if ((
|
|
1332
|
+
if ((_8 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _8 === void 0 ? void 0 : _8.debugMessage) {
|
|
1183
1333
|
const messageLines = [`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER:</b> ${api.parseCognigyScriptText(toolChild.config.toolId)}`];
|
|
1184
1334
|
// Arguments / Parameters Slots
|
|
1185
|
-
const slots = ((
|
|
1335
|
+
const slots = ((_9 = mainToolCall === null || mainToolCall === void 0 ? void 0 : mainToolCall.function) === null || _9 === void 0 ? void 0 : _9.arguments) && Object.keys(mainToolCall.function.arguments);
|
|
1186
1336
|
const hasSlots = slots && slots.length > 0;
|
|
1187
1337
|
messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__SLOTS</b>${hasSlots ? "" : " -"}`);
|
|
1188
1338
|
if (hasSlots) {
|
|
@@ -1197,7 +1347,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
1197
1347
|
messageLines.push(`- ${slot}: ${slotValueAsString}`);
|
|
1198
1348
|
});
|
|
1199
1349
|
}
|
|
1200
|
-
(
|
|
1350
|
+
(_10 = api.logDebugMessage) === null || _10 === void 0 ? void 0 : _10.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER");
|
|
1201
1351
|
}
|
|
1202
1352
|
if (toolChild) {
|
|
1203
1353
|
api.setNextNode(toolChild.id);
|
|
@@ -1222,7 +1372,17 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
1222
1372
|
}
|
|
1223
1373
|
// Optionally output the result immediately
|
|
1224
1374
|
if (llmResult.result && outputImmediately && !llmPromptOptions.stream) {
|
|
1225
|
-
await ((
|
|
1375
|
+
await ((_11 = api.output) === null || _11 === void 0 ? void 0 : _11.call(api, llmResult.result, {}));
|
|
1376
|
+
}
|
|
1377
|
+
else if (llmResult.finishReason && llmPromptOptions.stream) {
|
|
1378
|
+
// send the finishReason as last output for a stream
|
|
1379
|
+
(_12 = api.output) === null || _12 === void 0 ? void 0 : _12.call(api, "", {
|
|
1380
|
+
_cognigy: {
|
|
1381
|
+
_preventTranscript: true,
|
|
1382
|
+
_messageId,
|
|
1383
|
+
_finishReason: llmResult.finishReason,
|
|
1384
|
+
}
|
|
1385
|
+
});
|
|
1226
1386
|
}
|
|
1227
1387
|
// If we are streaming and we got a result, also store it into the transcript, since streamed chunks are not stored there
|
|
1228
1388
|
if (llmResult.result && llmPromptOptions.stream) {
|
|
@@ -1239,7 +1399,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
1239
1399
|
}
|
|
1240
1400
|
// Add response to Cognigy Input/Context for further usage
|
|
1241
1401
|
if (storeLocation === "context") {
|
|
1242
|
-
(
|
|
1402
|
+
(_13 = api.addToContext) === null || _13 === void 0 ? void 0 : _13.call(api, contextKey, llmResult, "simple");
|
|
1243
1403
|
}
|
|
1244
1404
|
else if (storeLocation === "input") {
|
|
1245
1405
|
api.addToInput(inputKey, llmResult);
|
|
@@ -1252,14 +1412,14 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
1252
1412
|
const errorDetails = {
|
|
1253
1413
|
name: (error === null || error === void 0 ? void 0 : error.name) || "Error",
|
|
1254
1414
|
code: (error === null || error === void 0 ? void 0 : error.code) || (error === null || error === void 0 ? void 0 : error.httpStatusCode),
|
|
1255
|
-
message: (error === null || error === void 0 ? void 0 : error.message) || ((
|
|
1415
|
+
message: (error === null || error === void 0 ? void 0 : error.message) || ((_14 = error.originalErrorDetails) === null || _14 === void 0 ? void 0 : _14.message),
|
|
1256
1416
|
};
|
|
1257
|
-
(
|
|
1417
|
+
(_15 = api.emitEvent) === null || _15 === void 0 ? void 0 : _15.call(api, "nodeError", { nodeId, flowId: flowReferenceId, errorMessage: error });
|
|
1258
1418
|
if (logErrorToSystem) {
|
|
1259
|
-
(
|
|
1419
|
+
(_16 = api.log) === null || _16 === void 0 ? void 0 : _16.call(api, "error", JSON.stringify(errorDetails));
|
|
1260
1420
|
}
|
|
1261
1421
|
if (errorHandling !== "stop") {
|
|
1262
|
-
(
|
|
1422
|
+
(_17 = api.logDebugError) === null || _17 === void 0 ? void 0 : _17.call(api, errorDetails.message + (errorDetails.code ? ` (error code: ${errorDetails.code})` : ""), errorDetails.name);
|
|
1263
1423
|
}
|
|
1264
1424
|
if (storeErrorInInput) {
|
|
1265
1425
|
input.aiAgent = input.aiAgent || {};
|
|
@@ -1268,7 +1428,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
1268
1428
|
if (errorHandling === "continue") {
|
|
1269
1429
|
// output the timeout message
|
|
1270
1430
|
if (errorMessage) {
|
|
1271
|
-
await ((
|
|
1431
|
+
await ((_18 = api.output) === null || _18 === void 0 ? void 0 : _18.call(api, errorMessage, null));
|
|
1272
1432
|
}
|
|
1273
1433
|
// Set default node as next node
|
|
1274
1434
|
const defaultChild = childConfigs.find(child => child.type === "aiAgentJobDefault");
|
|
@@ -1280,7 +1440,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
1280
1440
|
if (!errorHandlingGotoTarget) {
|
|
1281
1441
|
throw new Error("GoTo Target is required");
|
|
1282
1442
|
}
|
|
1283
|
-
if (!((
|
|
1443
|
+
if (!((_19 = api.checkThink) === null || _19 === void 0 ? void 0 : _19.call(api, nodeId))) {
|
|
1284
1444
|
api.resetNextNodes();
|
|
1285
1445
|
await api.executeFlow({
|
|
1286
1446
|
flowNode: {
|