@cognigy/rest-api-client 2025.12.0 → 2025.13.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +5 -0
- package/build/apigroups/ResourcesAPIGroup_2_0.js +4 -0
- package/build/shared/charts/descriptors/analytics/trackGoal.js +3 -1
- package/build/shared/charts/descriptors/index.js +5 -0
- package/build/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +4 -2
- package/build/shared/charts/descriptors/message/question/question.js +12 -1
- package/build/shared/charts/descriptors/service/GPTPrompt.js +15 -1
- package/build/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +32 -173
- package/build/shared/charts/descriptors/service/aiAgent/aiAgentJobTool.js +2 -2
- package/build/shared/charts/descriptors/service/aiAgent/helpers/createToolDefinitions.js +175 -0
- package/build/shared/charts/descriptors/service/aiAgent/loadAiAgent.js +194 -0
- package/build/shared/charts/descriptors/service/handoverV2.js +1 -1
- package/build/shared/charts/descriptors/service/index.js +11 -1
- package/build/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +922 -0
- package/build/shared/charts/descriptors/service/llmPrompt/llmPromptDefault.js +31 -0
- package/build/shared/charts/descriptors/service/llmPrompt/llmPromptMCPTool.js +196 -0
- package/build/shared/charts/descriptors/service/llmPrompt/llmPromptTool.js +139 -0
- package/build/shared/constants.js +1 -5
- package/build/shared/interfaces/debugEvents/IGoalCompletedEventPayload.js +3 -0
- package/build/shared/interfaces/debugEvents/TDebugEventType.js +1 -0
- package/build/shared/interfaces/generativeAI/IGenerativeAIModels.js +12 -1
- package/build/shared/interfaces/resources/knowledgeStore/IKnowledgeChunk.js +2 -1
- package/build/shared/interfaces/resources/settings/IGenerativeAISettings.js +5 -18
- package/build/shared/interfaces/restAPI/operations/generateOutput/v2.0/index.js +3 -0
- package/build/shared/interfaces/security/IPermission.js +2 -0
- package/build/shared/interfaces/security/IRole.js +3 -1
- package/build/shared/interfaces/security/index.js +1 -1
- package/dist/esm/apigroups/ResourcesAPIGroup_2_0.js +4 -0
- package/dist/esm/shared/charts/descriptors/analytics/trackGoal.js +3 -1
- package/dist/esm/shared/charts/descriptors/index.js +6 -1
- package/dist/esm/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +4 -2
- package/dist/esm/shared/charts/descriptors/message/question/question.js +12 -1
- package/dist/esm/shared/charts/descriptors/service/GPTPrompt.js +15 -1
- package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +33 -174
- package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJobTool.js +2 -2
- package/dist/esm/shared/charts/descriptors/service/aiAgent/helpers/createToolDefinitions.js +172 -0
- package/dist/esm/shared/charts/descriptors/service/aiAgent/loadAiAgent.js +192 -0
- package/dist/esm/shared/charts/descriptors/service/handoverV2.js +1 -1
- package/dist/esm/shared/charts/descriptors/service/index.js +5 -0
- package/dist/esm/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +909 -0
- package/dist/esm/shared/charts/descriptors/service/llmPrompt/llmPromptDefault.js +28 -0
- package/dist/esm/shared/charts/descriptors/service/llmPrompt/llmPromptMCPTool.js +193 -0
- package/dist/esm/shared/charts/descriptors/service/llmPrompt/llmPromptTool.js +136 -0
- package/dist/esm/shared/constants.js +1 -5
- package/dist/esm/shared/interfaces/debugEvents/IGoalCompletedEventPayload.js +2 -0
- package/dist/esm/shared/interfaces/debugEvents/TDebugEventType.js +1 -0
- package/dist/esm/shared/interfaces/generativeAI/IGenerativeAIModels.js +12 -1
- package/dist/esm/shared/interfaces/resources/knowledgeStore/IKnowledgeChunk.js +2 -1
- package/dist/esm/shared/interfaces/resources/settings/IGenerativeAISettings.js +4 -17
- package/dist/esm/shared/interfaces/restAPI/operations/generateOutput/v2.0/index.js +2 -0
- package/dist/esm/shared/interfaces/security/IPermission.js +2 -0
- package/dist/esm/shared/interfaces/security/IRole.js +3 -1
- package/dist/esm/shared/interfaces/security/index.js +1 -1
- package/package.json +1 -1
- package/types/index.d.ts +83 -42
|
@@ -5,9 +5,10 @@ import { GO_TO } from "../logic";
|
|
|
5
5
|
import { randomUUID } from 'crypto';
|
|
6
6
|
import { createLastConverationString, createLastConversationChatObject, createLastUserInputString, writeLLMDebugLogs } from "../nlu/generativeSlotFiller/prompt";
|
|
7
7
|
import { InternalServerError } from "../../../errors";
|
|
8
|
+
import { TranscriptEntryType, TranscriptRole } from "../../../interfaces/transcripts/transcripts";
|
|
8
9
|
export const GPT_PROMPT = createNodeDescriptor({
|
|
9
10
|
type: "completeText",
|
|
10
|
-
defaultLabel: "LLM Prompt",
|
|
11
|
+
defaultLabel: "LLM Prompt (legacy)",
|
|
11
12
|
summary: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__DESCRIPTION",
|
|
12
13
|
fields: [
|
|
13
14
|
{
|
|
@@ -643,6 +644,7 @@ export const GPT_PROMPT = createNodeDescriptor({
|
|
|
643
644
|
api.output(text, {
|
|
644
645
|
_cognigy: {
|
|
645
646
|
_messageId,
|
|
647
|
+
_preventTranscript: true
|
|
646
648
|
}
|
|
647
649
|
});
|
|
648
650
|
}
|
|
@@ -700,6 +702,18 @@ export const GPT_PROMPT = createNodeDescriptor({
|
|
|
700
702
|
const resultToOutput = typeof ((response === null || response === void 0 ? void 0 : response.result) || response) === "object" ? JSON.stringify((response === null || response === void 0 ? void 0 : response.result) || response, undefined, 2) : (response === null || response === void 0 ? void 0 : response.result) || response;
|
|
701
703
|
yield api.output(resultToOutput, null);
|
|
702
704
|
}
|
|
705
|
+
else if (storeLocation === "stream") {
|
|
706
|
+
const transcriptContent = {
|
|
707
|
+
role: TranscriptRole.ASSISTANT,
|
|
708
|
+
type: TranscriptEntryType.OUTPUT,
|
|
709
|
+
source: "assistant",
|
|
710
|
+
payload: {
|
|
711
|
+
text: ((response === null || response === void 0 ? void 0 : response.result) || response),
|
|
712
|
+
data: {},
|
|
713
|
+
}
|
|
714
|
+
};
|
|
715
|
+
yield api.addTranscriptStep(transcriptContent);
|
|
716
|
+
}
|
|
703
717
|
if (storeLocation === "stream" && responseToStore.finishReason) {
|
|
704
718
|
// send the finishReason as last output for a stream
|
|
705
719
|
(_a = api.output) === null || _a === void 0 ? void 0 : _a.call(api, "", {
|
|
@@ -5,9 +5,10 @@ import { randomUUID } from 'crypto';
|
|
|
5
5
|
import { setSessionConfig } from "../../voice/mappers/setSessionConfig.mapper";
|
|
6
6
|
import { voiceConfigParamsToVoiceSettings } from "../../voice/mappers/setSessionConfig.mapper";
|
|
7
7
|
import { logFullConfigToDebugMode } from "../../../../helper/logFullConfigToDebugMode";
|
|
8
|
-
import { createSystemMessage,
|
|
8
|
+
import { createSystemMessage, getCognigyBrandMessage } from "./helpers/createSystemMessage";
|
|
9
9
|
import { generateSearchPrompt } from "./helpers/generateSearchPrompt";
|
|
10
10
|
import { getUserMemory } from "./helpers/getUserMemory";
|
|
11
|
+
import { createToolDefinitions } from "./helpers/createToolDefinitions";
|
|
11
12
|
import { TranscriptEntryType, TranscriptRole } from "../../../../interfaces/transcripts/transcripts";
|
|
12
13
|
export const AI_AGENT_JOB = createNodeDescriptor({
|
|
13
14
|
type: "aiAgentJob",
|
|
@@ -65,7 +66,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
|
|
|
65
66
|
{
|
|
66
67
|
key: "name",
|
|
67
68
|
label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__JOB_NAME__LABEL",
|
|
68
|
-
type: "
|
|
69
|
+
type: "cognigyLLMText",
|
|
69
70
|
defaultValue: "Customer Support Specialist",
|
|
70
71
|
params: {
|
|
71
72
|
required: true,
|
|
@@ -838,7 +839,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
|
|
|
838
839
|
],
|
|
839
840
|
tags: ["ai", "aiAgent"],
|
|
840
841
|
function: ({ cognigy, config, childConfigs, nodeId }) => __awaiter(void 0, void 0, void 0, function* () {
|
|
841
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20
|
|
842
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20;
|
|
842
843
|
const { api, context, input, profile, flowReferenceId } = cognigy;
|
|
843
844
|
const { aiAgent, llmProviderReferenceId, name: jobName, description: jobDescription, instructions: jobInstructions, outputImmediately, toolChoice, useStrict, memoryType, selectedProfileFields, memoryContextInjection, knowledgeSearchBehavior, knowledgeSearchTags, knowledgeSearchTagsFilterOp, knowledgeSearchAiAgentKnowledge, knowledgeSearchJobKnowledge, knowledgeSearchJobStore, knowledgeSearchGenerateSearchPrompt, knowledgeSearchTopK, timeoutInMs, maxTokens, temperature, logErrorToSystem, storeErrorInInput, errorHandling, errorHandlingGotoTarget, errorMessage, debugConfig, debugLogTokenCount, debugLogSystemPrompt, debugLogToolDefinitions, debugResult, storeLocation, contextKey, inputKey, streamStoreCopyInInput, streamStopTokens, processImages, transcriptImageHandling, sessionParams } = config;
|
|
844
845
|
try {
|
|
@@ -888,7 +889,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
|
|
|
888
889
|
throw new Error(`[VG2] Error on AI Agent Job node. Error message: ${error.message}`);
|
|
889
890
|
}
|
|
890
891
|
}
|
|
891
|
-
const
|
|
892
|
+
const _21 = profile, { profileId, accepted_gdpr, prevent_data_collection, privacy_policy } = _21, cleanedProfile = __rest(_21, ["profileId", "accepted_gdpr", "prevent_data_collection", "privacy_policy"]);
|
|
892
893
|
const userMemory = getUserMemory(memoryType, selectedProfileFields, aiAgent, cleanedProfile);
|
|
893
894
|
/**
|
|
894
895
|
* ----- Knowledge Search Section -----
|
|
@@ -1013,149 +1014,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
|
|
|
1013
1014
|
const debugSystemMessage = (_r = (_q = systemMessage[0]) === null || _q === void 0 ? void 0 : _q.content) === null || _r === void 0 ? void 0 : _r.replace(`${getCognigyBrandMessage()}\n`, "");
|
|
1014
1015
|
(_s = api.logDebugMessage) === null || _s === void 0 ? void 0 : _s.call(api, debugSystemMessage, "UI__DEBUG_MODE__AI_AGENT_JOB__SYSTEM_PROMPT__HEADER");
|
|
1015
1016
|
}
|
|
1016
|
-
|
|
1017
|
-
/** This is the list of tools that are used in the AI Agent Job */
|
|
1018
|
-
const tools = [];
|
|
1019
|
-
/** Array of tool IDs for deduping */
|
|
1020
|
-
const toolIds = [];
|
|
1021
|
-
/** Map of MCP tool IDs to their respective node IDs they were loaded from */
|
|
1022
|
-
const toolMap = new Map();
|
|
1023
|
-
/** Array of tool names for listing in the debug message */
|
|
1024
|
-
const toolNames = [];
|
|
1025
|
-
for (const child of childConfigs) {
|
|
1026
|
-
if (child.type === "aiAgentJobDefault") {
|
|
1027
|
-
continue;
|
|
1028
|
-
}
|
|
1029
|
-
const toolId = child.config.toolId;
|
|
1030
|
-
if (child.type === "aiAgentJobTool" &&
|
|
1031
|
-
(!child.config.condition || !!api.parseCognigyScriptCondition(child.config.condition))) {
|
|
1032
|
-
if (!toolId) {
|
|
1033
|
-
throw new Error(`Tool ID is missing in Tool Node configuration.`);
|
|
1034
|
-
}
|
|
1035
|
-
const parsedToolId = api.parseCognigyScriptText(toolId);
|
|
1036
|
-
if (!validateToolId(parsedToolId)) {
|
|
1037
|
-
throw new Error(`Tool ID ${parsedToolId} is not valid. Please use only alphanumeric characters, dashes and underscores.`);
|
|
1038
|
-
}
|
|
1039
|
-
if (toolIds.includes(parsedToolId)) {
|
|
1040
|
-
throw new Error(`Tool ID ${parsedToolId} is not unique. Please ensure each tool has a unique id.`);
|
|
1041
|
-
}
|
|
1042
|
-
toolIds.push(parsedToolId);
|
|
1043
|
-
toolNames.push(parsedToolId);
|
|
1044
|
-
const tool = {
|
|
1045
|
-
type: "function",
|
|
1046
|
-
function: {
|
|
1047
|
-
name: parsedToolId,
|
|
1048
|
-
description: api.parseCognigyScriptText(child.config.description),
|
|
1049
|
-
},
|
|
1050
|
-
};
|
|
1051
|
-
if (useStrict) {
|
|
1052
|
-
tool.function.strict = true;
|
|
1053
|
-
}
|
|
1054
|
-
if (child.config.useParameters) {
|
|
1055
|
-
tool.function.parameters = child.config.parameters;
|
|
1056
|
-
}
|
|
1057
|
-
tools.push(tool);
|
|
1058
|
-
}
|
|
1059
|
-
if (child.type === "aiAgentJobMCPTool" &&
|
|
1060
|
-
(!child.config.condition || !!api.parseCognigyScriptCondition(child.config.condition))) {
|
|
1061
|
-
if (!child.config.mcpServerUrl) {
|
|
1062
|
-
throw new Error(`MCP Server URL is missing in Tool Node configuration.`);
|
|
1063
|
-
}
|
|
1064
|
-
const mcpServerUrl = child.config.mcpServerUrl;
|
|
1065
|
-
const timeout = child.config.timeout;
|
|
1066
|
-
const cacheTools = child.config.cacheTools;
|
|
1067
|
-
const sendDebug = child.config.debugMessageFetchedTools;
|
|
1068
|
-
const toolFilter = child.config.toolFilter;
|
|
1069
|
-
let mcpTools = null;
|
|
1070
|
-
try {
|
|
1071
|
-
mcpTools = yield api.fetchMcpTools({
|
|
1072
|
-
mcpServerUrl,
|
|
1073
|
-
timeout,
|
|
1074
|
-
cacheTools,
|
|
1075
|
-
});
|
|
1076
|
-
}
|
|
1077
|
-
catch (error) {
|
|
1078
|
-
const errorDetails = error instanceof Error
|
|
1079
|
-
? {
|
|
1080
|
-
name: error.name,
|
|
1081
|
-
message: error.message,
|
|
1082
|
-
}
|
|
1083
|
-
: error;
|
|
1084
|
-
(_t = api.logDebugError) === null || _t === void 0 ? void 0 : _t.call(api, `Unable to connect to MCP Server:<br>${JSON.stringify(errorDetails, null, 2)}`, child.config.name);
|
|
1085
|
-
}
|
|
1086
|
-
if (mcpTools) {
|
|
1087
|
-
if (sendDebug) {
|
|
1088
|
-
if (mcpTools.length === 0) {
|
|
1089
|
-
(_u = api.logDebugMessage) === null || _u === void 0 ? void 0 : _u.call(api, `No tools fetched from MCP Tool "${child.config.name}".`, "MCP Tool");
|
|
1090
|
-
}
|
|
1091
|
-
if (mcpTools.length > 0) {
|
|
1092
|
-
const messageLines = [`Fetched tools from MCP Tool "${child.config.name}"`];
|
|
1093
|
-
mcpTools.forEach((tool) => {
|
|
1094
|
-
messageLines.push(`<br>- <b>${tool.name}</b>: ${tool.description}`);
|
|
1095
|
-
if (child.config.debugMessageParameters && tool.inputSchema) {
|
|
1096
|
-
messageLines.push(` <b>Parameters</b>:`);
|
|
1097
|
-
Object.keys(tool.inputSchema.properties).forEach((key) => {
|
|
1098
|
-
const parameter = tool.inputSchema.properties[key];
|
|
1099
|
-
const requiredText = tool.inputSchema.required && !tool.inputSchema.required.includes(key) ? " (optional)" : "";
|
|
1100
|
-
if (parameter.description) {
|
|
1101
|
-
messageLines.push(` - ${key} (${parameter.type}): ${parameter.description}${requiredText}`);
|
|
1102
|
-
}
|
|
1103
|
-
else {
|
|
1104
|
-
messageLines.push(` - ${key}: ${parameter.type}${requiredText}`);
|
|
1105
|
-
}
|
|
1106
|
-
});
|
|
1107
|
-
}
|
|
1108
|
-
});
|
|
1109
|
-
(_v = api.logDebugMessage) === null || _v === void 0 ? void 0 : _v.call(api, messageLines.join("\n"), "MCP Tool");
|
|
1110
|
-
}
|
|
1111
|
-
}
|
|
1112
|
-
const filteredMcpTools = mcpTools.filter((tool) => {
|
|
1113
|
-
if (toolFilter && toolFilter !== "none") {
|
|
1114
|
-
if (toolFilter === "whitelist" && child.config.whitelist) {
|
|
1115
|
-
const whitelist = child.config.whitelist.map((item) => item.trim());
|
|
1116
|
-
return whitelist.includes(tool.name);
|
|
1117
|
-
}
|
|
1118
|
-
else if (toolFilter === "blacklist") {
|
|
1119
|
-
// If the blacklist is falsy, all tools are allowed
|
|
1120
|
-
if (!child.config.blacklist) {
|
|
1121
|
-
return true;
|
|
1122
|
-
}
|
|
1123
|
-
const blacklist = child.config.blacklist.map((item) => item.trim());
|
|
1124
|
-
return !blacklist.includes(tool.name);
|
|
1125
|
-
}
|
|
1126
|
-
}
|
|
1127
|
-
else {
|
|
1128
|
-
return true;
|
|
1129
|
-
}
|
|
1130
|
-
});
|
|
1131
|
-
const structuredMcpTools = [];
|
|
1132
|
-
filteredMcpTools.forEach((tool) => {
|
|
1133
|
-
var _a;
|
|
1134
|
-
if (toolIds.includes(tool.name)) {
|
|
1135
|
-
(_a = api.logDebugError) === null || _a === void 0 ? void 0 : _a.call(api, `Tool "${tool.name}" from MCP Tool "${child.config.name}" is not unique and will not be added. Please ensure each tool has a unique id.`);
|
|
1136
|
-
return;
|
|
1137
|
-
}
|
|
1138
|
-
// add tool to the list of tool ids to prevent duplicates
|
|
1139
|
-
toolIds.push(tool.name);
|
|
1140
|
-
toolNames.push(`${tool.name} (${child.config.name})`);
|
|
1141
|
-
toolMap.set(tool.name, child.id);
|
|
1142
|
-
const structuredTool = {
|
|
1143
|
-
type: "function",
|
|
1144
|
-
function: {
|
|
1145
|
-
name: tool.name,
|
|
1146
|
-
description: tool.description,
|
|
1147
|
-
},
|
|
1148
|
-
};
|
|
1149
|
-
if (tool.inputSchema) {
|
|
1150
|
-
structuredTool.function.parameters = tool.inputSchema;
|
|
1151
|
-
}
|
|
1152
|
-
structuredMcpTools.push(structuredTool);
|
|
1153
|
-
});
|
|
1154
|
-
tools.push(...structuredMcpTools);
|
|
1155
|
-
}
|
|
1156
|
-
}
|
|
1157
|
-
}
|
|
1158
|
-
;
|
|
1017
|
+
const { toolIds, toolNames, toolMap, tools } = yield createToolDefinitions(childConfigs, api, useStrict);
|
|
1159
1018
|
// we only add this tool if at least one knowledge source is enabled
|
|
1160
1019
|
if (isOnDemandKnowledgeStoreConfigured) {
|
|
1161
1020
|
const knowledgeTool = {
|
|
@@ -1187,7 +1046,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
|
|
|
1187
1046
|
tools.push(knowledgeTool);
|
|
1188
1047
|
}
|
|
1189
1048
|
if (debugLogToolDefinitions) {
|
|
1190
|
-
(
|
|
1049
|
+
(_t = api.logDebugMessage) === null || _t === void 0 ? void 0 : _t.call(api, tools, "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_DEFINITIONS");
|
|
1191
1050
|
}
|
|
1192
1051
|
// Optional Debug Message with the config
|
|
1193
1052
|
if (debugConfig) {
|
|
@@ -1196,10 +1055,10 @@ export const AI_AGENT_JOB = createNodeDescriptor({
|
|
|
1196
1055
|
messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__AI_AGENT_NAME__LABEL</b> ${aiAgent.name}`);
|
|
1197
1056
|
messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__JOB_NAME__LABEL</b> ${jobName}`);
|
|
1198
1057
|
// Safety settings
|
|
1199
|
-
messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_AVOID_HARMFUL_CONTENT</b> ${(
|
|
1200
|
-
messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_AVOID_UNGROUNDED_CONTENT</b> ${(
|
|
1201
|
-
messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_AVOID_COPYRIGHT_INFRINGEMENTS</b> ${(
|
|
1202
|
-
messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_PREVENT_JAILBREAK_AND_MANIPULATION</b> ${(
|
|
1058
|
+
messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_AVOID_HARMFUL_CONTENT</b> ${(_u = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _u === void 0 ? void 0 : _u.avoidHarmfulContent}`);
|
|
1059
|
+
messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_AVOID_UNGROUNDED_CONTENT</b> ${(_v = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _v === void 0 ? void 0 : _v.avoidUngroundedContent}`);
|
|
1060
|
+
messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_AVOID_COPYRIGHT_INFRINGEMENTS</b> ${(_w = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _w === void 0 ? void 0 : _w.avoidCopyrightInfringements}`);
|
|
1061
|
+
messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__SAFETY_SETTINGS_PREVENT_JAILBREAK_AND_MANIPULATION</b> ${(_x = aiAgent === null || aiAgent === void 0 ? void 0 : aiAgent.safetySettings) === null || _x === void 0 ? void 0 : _x.preventJailbreakAndManipulation}`);
|
|
1203
1062
|
// Tools
|
|
1204
1063
|
if (toolNames.length > 0) {
|
|
1205
1064
|
messageLines.push("<b>UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__TOOLS__LABEL</b>");
|
|
@@ -1255,7 +1114,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
|
|
|
1255
1114
|
messageLines.push(`UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__TTS_VOICE ${config.ttsVoice || 'UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__NOT_SET'}`);
|
|
1256
1115
|
messageLines.push(`UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__TTS_LABEL ${config.ttsLabel || 'UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__NOT_SET'}`);
|
|
1257
1116
|
messageLines.push(`UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__TTS_DISABLE_CACHE ${config.ttsDisableCache || 'UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__VOICE_SETTING__NOT_SET'}`);
|
|
1258
|
-
(
|
|
1117
|
+
(_y = api.logDebugMessage) === null || _y === void 0 ? void 0 : _y.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__CONFIGURATION__HEADER");
|
|
1259
1118
|
}
|
|
1260
1119
|
const transcript = yield api.getTranscript({
|
|
1261
1120
|
limit: 50,
|
|
@@ -1269,14 +1128,14 @@ export const AI_AGENT_JOB = createNodeDescriptor({
|
|
|
1269
1128
|
transcript.length > 0 &&
|
|
1270
1129
|
transcript[transcript.length - 1].role === TranscriptRole.USER) {
|
|
1271
1130
|
const userInput = transcript[transcript.length - 1];
|
|
1272
|
-
const enhancedInput = `## Knowledge Source Context\nAdditional Context from the knowledge source: \n${JSON.stringify(knowledgeSearchResponseData)}\n\n\n${((
|
|
1131
|
+
const enhancedInput = `## Knowledge Source Context\nAdditional Context from the knowledge source: \n${JSON.stringify(knowledgeSearchResponseData)}\n\n\n${((_z = userInput === null || userInput === void 0 ? void 0 : userInput.payload) === null || _z === void 0 ? void 0 : _z.text) || input.text}`;
|
|
1273
1132
|
transcript[transcript.length - 1].payload.text = enhancedInput;
|
|
1274
1133
|
}
|
|
1275
1134
|
const isStreamingChannel = input.channel === "webchat3" || input.channel === "adminconsole";
|
|
1276
1135
|
const _messageId = randomUUID();
|
|
1277
1136
|
const llmPromptOptions = Object.assign(Object.assign({ prompt: "", chat: systemMessage,
|
|
1278
1137
|
// Temp fix to override the transcript if needed
|
|
1279
|
-
transcript: ((
|
|
1138
|
+
transcript: ((_0 = context === null || context === void 0 ? void 0 : context._cognigy) === null || _0 === void 0 ? void 0 : _0.transcript) ? [...context._cognigy.transcript] : transcript, detailedResults: true, timeoutInMs: timeoutInMs !== null && timeoutInMs !== void 0 ? timeoutInMs : 8000, maxTokens: maxTokens !== null && maxTokens !== void 0 ? maxTokens : 4000, temperature: temperature !== null && temperature !== void 0 ? temperature : 0.7, topP: 1, frequencyPenalty: 0, presencePenalty: 0, responseFormat: "text", stream: storeLocation === "stream", streamOnDataHandler: (text) => {
|
|
1280
1139
|
var _a;
|
|
1281
1140
|
text = isStreamingChannel ? text : text.trim();
|
|
1282
1141
|
if (text) {
|
|
@@ -1300,15 +1159,15 @@ export const AI_AGENT_JOB = createNodeDescriptor({
|
|
|
1300
1159
|
};
|
|
1301
1160
|
}
|
|
1302
1161
|
// Set understood to true so that an AI Agent interaction doesn't look false in our analytics
|
|
1303
|
-
(
|
|
1162
|
+
(_1 = api.setAnalyticsData) === null || _1 === void 0 ? void 0 : _1.call(api, "understood", "true");
|
|
1304
1163
|
input.understood = true;
|
|
1305
|
-
const fullLlmResult = yield ((
|
|
1164
|
+
const fullLlmResult = yield ((_2 = api.runGenerativeAIPrompt) === null || _2 === void 0 ? void 0 : _2.call(api, llmPromptOptions, "aiAgent"));
|
|
1306
1165
|
const { messages } = fullLlmResult, llmResult = __rest(fullLlmResult, ["messages"]);
|
|
1307
1166
|
const llmProvider = llmResult === null || llmResult === void 0 ? void 0 : llmResult.provider;
|
|
1308
1167
|
const tokenUsage = fullLlmResult.tokenUsage;
|
|
1309
1168
|
// Send optional debug message with token usage
|
|
1310
1169
|
if (debugLogTokenCount && tokenUsage) {
|
|
1311
|
-
(
|
|
1170
|
+
(_3 = api.logDebugMessage) === null || _3 === void 0 ? void 0 : _3.call(api, tokenUsage, "UI__DEBUG_MODE__AI_AGENT_JOB__TOKEN_USAGE__HEADER");
|
|
1312
1171
|
}
|
|
1313
1172
|
// Identify if the result is a tool call
|
|
1314
1173
|
// If response is a tool call, set next node for Tools
|
|
@@ -1323,7 +1182,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
|
|
|
1323
1182
|
isMcpToolCall = true;
|
|
1324
1183
|
}
|
|
1325
1184
|
if (mainToolCall.function.name !== "retrieve_knowledge" && toolChild === undefined) {
|
|
1326
|
-
(
|
|
1185
|
+
(_4 = api.logDebugError) === null || _4 === void 0 ? void 0 : _4.call(api, `UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__BODY <b>${mainToolCall.function.name}</b>`, "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__HEADER");
|
|
1327
1186
|
}
|
|
1328
1187
|
// Add last tool call to session state for loading it from Tool Answer Node
|
|
1329
1188
|
api.updateSessionStateValues({
|
|
@@ -1331,21 +1190,21 @@ export const AI_AGENT_JOB = createNodeDescriptor({
|
|
|
1331
1190
|
flow: flowReferenceId,
|
|
1332
1191
|
node: nodeId,
|
|
1333
1192
|
} }, (isMcpToolCall && {
|
|
1334
|
-
mcpServerUrl: (
|
|
1335
|
-
timeout: (
|
|
1193
|
+
mcpServerUrl: (_5 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _5 === void 0 ? void 0 : _5.mcpServerUrl,
|
|
1194
|
+
timeout: (_6 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _6 === void 0 ? void 0 : _6.timeout,
|
|
1336
1195
|
mcpToolNode: toolChild === null || toolChild === void 0 ? void 0 : toolChild.id,
|
|
1337
1196
|
})), { toolCall: mainToolCall }),
|
|
1338
1197
|
});
|
|
1339
1198
|
// if there are any parameters/arguments, add them to the input slots
|
|
1340
1199
|
if (mainToolCall.function.arguments) {
|
|
1341
|
-
input.aiAgent = Object.assign(Object.assign({}, input.aiAgent), { toolArgs: Object.assign(Object.assign({}, (
|
|
1200
|
+
input.aiAgent = Object.assign(Object.assign({}, input.aiAgent), { toolArgs: Object.assign(Object.assign({}, (_8 = (_7 = input.aiAgent) === null || _7 === void 0 ? void 0 : _7.toolArgs) !== null && _8 !== void 0 ? _8 : {}), mainToolCall.function.arguments) });
|
|
1342
1201
|
}
|
|
1343
1202
|
// Debug Message for Tool Calls, configured in the Tool Node
|
|
1344
|
-
if ((
|
|
1203
|
+
if ((_9 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _9 === void 0 ? void 0 : _9.debugMessage) {
|
|
1345
1204
|
const toolId = isMcpToolCall ? mainToolCall.function.name : api.parseCognigyScriptText(toolChild.config.toolId);
|
|
1346
1205
|
const messageLines = [`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER:</b> ${toolId}`];
|
|
1347
1206
|
// Arguments / Parameters Slots
|
|
1348
|
-
const slots = ((
|
|
1207
|
+
const slots = ((_10 = mainToolCall === null || mainToolCall === void 0 ? void 0 : mainToolCall.function) === null || _10 === void 0 ? void 0 : _10.arguments) && Object.keys(mainToolCall.function.arguments);
|
|
1349
1208
|
const hasSlots = slots && slots.length > 0;
|
|
1350
1209
|
messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__SLOTS</b>${hasSlots ? "" : " -"}`);
|
|
1351
1210
|
if (hasSlots) {
|
|
@@ -1360,7 +1219,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
|
|
|
1360
1219
|
messageLines.push(`- ${slot}: ${slotValueAsString}`);
|
|
1361
1220
|
});
|
|
1362
1221
|
}
|
|
1363
|
-
(
|
|
1222
|
+
(_11 = api.logDebugMessage) === null || _11 === void 0 ? void 0 : _11.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER");
|
|
1364
1223
|
}
|
|
1365
1224
|
if (toolChild) {
|
|
1366
1225
|
api.setNextNode(toolChild.id);
|
|
@@ -1385,11 +1244,11 @@ export const AI_AGENT_JOB = createNodeDescriptor({
|
|
|
1385
1244
|
}
|
|
1386
1245
|
// Optionally output the result immediately
|
|
1387
1246
|
if (llmResult.result && outputImmediately && !llmPromptOptions.stream) {
|
|
1388
|
-
yield ((
|
|
1247
|
+
yield ((_12 = api.output) === null || _12 === void 0 ? void 0 : _12.call(api, llmResult.result, {}));
|
|
1389
1248
|
}
|
|
1390
1249
|
else if (llmResult.finishReason && llmPromptOptions.stream) {
|
|
1391
1250
|
// send the finishReason as last output for a stream
|
|
1392
|
-
(
|
|
1251
|
+
(_13 = api.output) === null || _13 === void 0 ? void 0 : _13.call(api, "", {
|
|
1393
1252
|
_cognigy: {
|
|
1394
1253
|
_preventTranscript: true,
|
|
1395
1254
|
_messageId,
|
|
@@ -1412,7 +1271,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
|
|
|
1412
1271
|
}
|
|
1413
1272
|
// Add response to Cognigy Input/Context for further usage
|
|
1414
1273
|
if (storeLocation === "context") {
|
|
1415
|
-
(
|
|
1274
|
+
(_14 = api.addToContext) === null || _14 === void 0 ? void 0 : _14.call(api, contextKey, llmResult, "simple");
|
|
1416
1275
|
}
|
|
1417
1276
|
else if (storeLocation === "input") {
|
|
1418
1277
|
api.addToInput(inputKey, llmResult);
|
|
@@ -1425,14 +1284,14 @@ export const AI_AGENT_JOB = createNodeDescriptor({
|
|
|
1425
1284
|
const errorDetails = {
|
|
1426
1285
|
name: (error === null || error === void 0 ? void 0 : error.name) || "Error",
|
|
1427
1286
|
code: (error === null || error === void 0 ? void 0 : error.code) || (error === null || error === void 0 ? void 0 : error.httpStatusCode),
|
|
1428
|
-
message: (error === null || error === void 0 ? void 0 : error.message) || ((
|
|
1287
|
+
message: (error === null || error === void 0 ? void 0 : error.message) || ((_15 = error.originalErrorDetails) === null || _15 === void 0 ? void 0 : _15.message),
|
|
1429
1288
|
};
|
|
1430
|
-
(
|
|
1289
|
+
(_16 = api.emitEvent) === null || _16 === void 0 ? void 0 : _16.call(api, "nodeError", { nodeId, flowId: flowReferenceId, errorMessage: error });
|
|
1431
1290
|
if (logErrorToSystem) {
|
|
1432
|
-
(
|
|
1291
|
+
(_17 = api.log) === null || _17 === void 0 ? void 0 : _17.call(api, "error", JSON.stringify(errorDetails));
|
|
1433
1292
|
}
|
|
1434
1293
|
if (errorHandling !== "stop") {
|
|
1435
|
-
(
|
|
1294
|
+
(_18 = api.logDebugError) === null || _18 === void 0 ? void 0 : _18.call(api, errorDetails.message + (errorDetails.code ? ` (error code: ${errorDetails.code})` : ""), errorDetails.name);
|
|
1436
1295
|
}
|
|
1437
1296
|
if (storeErrorInInput) {
|
|
1438
1297
|
input.aiAgent = input.aiAgent || {};
|
|
@@ -1441,7 +1300,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
|
|
|
1441
1300
|
if (errorHandling === "continue") {
|
|
1442
1301
|
// output the timeout message
|
|
1443
1302
|
if (errorMessage) {
|
|
1444
|
-
yield ((
|
|
1303
|
+
yield ((_19 = api.output) === null || _19 === void 0 ? void 0 : _19.call(api, errorMessage, null));
|
|
1445
1304
|
}
|
|
1446
1305
|
// Set default node as next node
|
|
1447
1306
|
const defaultChild = childConfigs.find(child => child.type === "aiAgentJobDefault");
|
|
@@ -1453,7 +1312,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
|
|
|
1453
1312
|
if (!errorHandlingGotoTarget) {
|
|
1454
1313
|
throw new Error("GoTo Target is required");
|
|
1455
1314
|
}
|
|
1456
|
-
if (!((
|
|
1315
|
+
if (!((_20 = api.checkThink) === null || _20 === void 0 ? void 0 : _20.call(api, nodeId))) {
|
|
1457
1316
|
api.resetNextNodes();
|
|
1458
1317
|
yield api.executeFlow({
|
|
1459
1318
|
flowNode: {
|
|
@@ -26,7 +26,7 @@ export const AI_AGENT_JOB_TOOL = createNodeDescriptor({
|
|
|
26
26
|
key: "toolId",
|
|
27
27
|
label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_TOOL__FIELDS__TOOL_ID__LABEL",
|
|
28
28
|
description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_TOOL__FIELDS__TOOL_ID__DESCRIPTION",
|
|
29
|
-
type: "
|
|
29
|
+
type: "cognigyLLMText",
|
|
30
30
|
defaultValue: "unlock_account",
|
|
31
31
|
params: {
|
|
32
32
|
required: true,
|
|
@@ -39,7 +39,7 @@ export const AI_AGENT_JOB_TOOL = createNodeDescriptor({
|
|
|
39
39
|
key: "description",
|
|
40
40
|
label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_TOOL__FIELDS__DESCRIPTION__LABEL",
|
|
41
41
|
description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_TOOL__FIELDS__DESCRIPTION__DESCRIPTION",
|
|
42
|
-
type: "
|
|
42
|
+
type: "cognigyLLMText",
|
|
43
43
|
defaultValue: "This tool unlocks a locked user account.",
|
|
44
44
|
params: {
|
|
45
45
|
required: true,
|
|
@@ -0,0 +1,172 @@
|
|
|
1
|
+
import { __awaiter } from "tslib";
|
|
2
|
+
import { validateToolId } from "./createSystemMessage";
|
|
3
|
+
/**
|
|
4
|
+
* Creates the tool definitions for the AI Agent Job and LLM Prompt v2 Nodes
|
|
5
|
+
* @param childConfigs Child node configurations
|
|
6
|
+
* @param api Cognigy API
|
|
7
|
+
* @param useStrict Whether to use strict mode for the tools
|
|
8
|
+
* @returns An object containing the tool definitions
|
|
9
|
+
*/
|
|
10
|
+
export const createToolDefinitions = (childConfigs, api, useStrict) => __awaiter(void 0, void 0, void 0, function* () {
|
|
11
|
+
var _a, _b, _c;
|
|
12
|
+
// Create Tools JSON
|
|
13
|
+
/** This is the list of tools that are used in the AI Agent Job */
|
|
14
|
+
const tools = [];
|
|
15
|
+
/** Array of tool IDs for deduping */
|
|
16
|
+
const toolIds = [];
|
|
17
|
+
/** Map of MCP tool IDs to their respective node IDs they were loaded from */
|
|
18
|
+
const toolMap = new Map();
|
|
19
|
+
/** Array of tool names for listing in the debug message */
|
|
20
|
+
const toolNames = [];
|
|
21
|
+
// if no child configs are provided, return empty tool definitions
|
|
22
|
+
if (!childConfigs || childConfigs.length === 0) {
|
|
23
|
+
return {
|
|
24
|
+
toolIds: [],
|
|
25
|
+
toolNames: [],
|
|
26
|
+
toolMap: new Map(),
|
|
27
|
+
tools: [],
|
|
28
|
+
};
|
|
29
|
+
}
|
|
30
|
+
// Loop through all child nodes and create the tools
|
|
31
|
+
for (const child of childConfigs) {
|
|
32
|
+
if (child.type === "aiAgentJobDefault" || child.type === "llmPromptDefault") {
|
|
33
|
+
continue;
|
|
34
|
+
}
|
|
35
|
+
const toolId = child.config.toolId;
|
|
36
|
+
if ((child.type === "aiAgentJobTool" || child.type === "llmPromptTool") &&
|
|
37
|
+
(!child.config.condition || !!api.parseCognigyScriptCondition(child.config.condition))) {
|
|
38
|
+
if (!toolId) {
|
|
39
|
+
throw new Error(`Tool ID is missing in Tool Node configuration.`);
|
|
40
|
+
}
|
|
41
|
+
const parsedToolId = api.parseCognigyScriptText(toolId);
|
|
42
|
+
if (!validateToolId(parsedToolId)) {
|
|
43
|
+
throw new Error(`Tool ID ${parsedToolId} is not valid. Please use only alphanumeric characters, dashes and underscores.`);
|
|
44
|
+
}
|
|
45
|
+
if (toolIds.includes(parsedToolId)) {
|
|
46
|
+
throw new Error(`Tool ID ${parsedToolId} is not unique. Please ensure each tool has a unique id.`);
|
|
47
|
+
}
|
|
48
|
+
toolIds.push(parsedToolId);
|
|
49
|
+
toolNames.push(parsedToolId);
|
|
50
|
+
const tool = {
|
|
51
|
+
type: "function",
|
|
52
|
+
function: {
|
|
53
|
+
name: parsedToolId,
|
|
54
|
+
description: api.parseCognigyScriptText(child.config.description),
|
|
55
|
+
},
|
|
56
|
+
};
|
|
57
|
+
if (useStrict) {
|
|
58
|
+
tool.function.strict = true;
|
|
59
|
+
}
|
|
60
|
+
if (child.config.useParameters) {
|
|
61
|
+
tool.function.parameters = child.config.parameters;
|
|
62
|
+
}
|
|
63
|
+
tools.push(tool);
|
|
64
|
+
}
|
|
65
|
+
if ((child.type === "aiAgentJobMCPTool" || child.type === "llmPromptMCPTool") &&
|
|
66
|
+
(!child.config.condition || !!api.parseCognigyScriptCondition(child.config.condition))) {
|
|
67
|
+
if (!child.config.mcpServerUrl) {
|
|
68
|
+
throw new Error(`MCP Server URL is missing in Tool Node configuration.`);
|
|
69
|
+
}
|
|
70
|
+
const mcpServerUrl = child.config.mcpServerUrl;
|
|
71
|
+
const timeout = child.config.timeout;
|
|
72
|
+
const cacheTools = child.config.cacheTools;
|
|
73
|
+
const sendDebug = child.config.debugMessageFetchedTools;
|
|
74
|
+
const toolFilter = child.config.toolFilter;
|
|
75
|
+
let mcpTools = null;
|
|
76
|
+
try {
|
|
77
|
+
mcpTools = yield api.fetchMcpTools({
|
|
78
|
+
mcpServerUrl,
|
|
79
|
+
timeout,
|
|
80
|
+
cacheTools,
|
|
81
|
+
});
|
|
82
|
+
}
|
|
83
|
+
catch (error) {
|
|
84
|
+
const errorDetails = error instanceof Error
|
|
85
|
+
? {
|
|
86
|
+
name: error.name,
|
|
87
|
+
message: error.message,
|
|
88
|
+
}
|
|
89
|
+
: error;
|
|
90
|
+
(_a = api.logDebugError) === null || _a === void 0 ? void 0 : _a.call(api, `Unable to connect to MCP Server:<br>${JSON.stringify(errorDetails, null, 2)}`, child.config.name);
|
|
91
|
+
}
|
|
92
|
+
if (mcpTools) {
|
|
93
|
+
if (sendDebug) {
|
|
94
|
+
if (mcpTools.length === 0) {
|
|
95
|
+
(_b = api.logDebugMessage) === null || _b === void 0 ? void 0 : _b.call(api, `No tools fetched from MCP Tool "${child.config.name}".`, "MCP Tool");
|
|
96
|
+
}
|
|
97
|
+
if (mcpTools.length > 0) {
|
|
98
|
+
const messageLines = [`Fetched tools from MCP Tool "${child.config.name}"`];
|
|
99
|
+
mcpTools.forEach((tool) => {
|
|
100
|
+
messageLines.push(`<br>- <b>${tool.name}</b>: ${tool.description}`);
|
|
101
|
+
if (child.config.debugMessageParameters && tool.inputSchema) {
|
|
102
|
+
messageLines.push(` <b>Parameters</b>:`);
|
|
103
|
+
Object.keys(tool.inputSchema.properties).forEach((key) => {
|
|
104
|
+
const parameter = tool.inputSchema.properties[key];
|
|
105
|
+
const requiredText = tool.inputSchema.required && !tool.inputSchema.required.includes(key) ? " (optional)" : "";
|
|
106
|
+
if (parameter.description) {
|
|
107
|
+
messageLines.push(` - ${key} (${parameter.type}): ${parameter.description}${requiredText}`);
|
|
108
|
+
}
|
|
109
|
+
else {
|
|
110
|
+
messageLines.push(` - ${key}: ${parameter.type}${requiredText}`);
|
|
111
|
+
}
|
|
112
|
+
});
|
|
113
|
+
}
|
|
114
|
+
});
|
|
115
|
+
(_c = api.logDebugMessage) === null || _c === void 0 ? void 0 : _c.call(api, messageLines.join("\n"), "MCP Tool");
|
|
116
|
+
}
|
|
117
|
+
}
|
|
118
|
+
const filteredMcpTools = mcpTools.filter((tool) => {
|
|
119
|
+
if (toolFilter && toolFilter !== "none") {
|
|
120
|
+
if (toolFilter === "whitelist" && child.config.whitelist) {
|
|
121
|
+
const whitelist = child.config.whitelist.map((item) => item.trim());
|
|
122
|
+
return whitelist.includes(tool.name);
|
|
123
|
+
}
|
|
124
|
+
else if (toolFilter === "blacklist") {
|
|
125
|
+
// If the blacklist is falsy, all tools are allowed
|
|
126
|
+
if (!child.config.blacklist) {
|
|
127
|
+
return true;
|
|
128
|
+
}
|
|
129
|
+
const blacklist = child.config.blacklist.map((item) => item.trim());
|
|
130
|
+
return !blacklist.includes(tool.name);
|
|
131
|
+
}
|
|
132
|
+
}
|
|
133
|
+
else {
|
|
134
|
+
return true;
|
|
135
|
+
}
|
|
136
|
+
});
|
|
137
|
+
const structuredMcpTools = [];
|
|
138
|
+
filteredMcpTools.forEach((tool) => {
|
|
139
|
+
var _a;
|
|
140
|
+
if (toolIds.includes(tool.name)) {
|
|
141
|
+
(_a = api.logDebugError) === null || _a === void 0 ? void 0 : _a.call(api, `Tool "${tool.name}" from MCP Tool "${child.config.name}" is not unique and will not be added. Please ensure each tool has a unique id.`);
|
|
142
|
+
return;
|
|
143
|
+
}
|
|
144
|
+
// add tool to the list of tool ids to prevent duplicates
|
|
145
|
+
toolIds.push(tool.name);
|
|
146
|
+
toolNames.push(`${tool.name} (${child.config.name})`);
|
|
147
|
+
toolMap.set(tool.name, child.id);
|
|
148
|
+
const structuredTool = {
|
|
149
|
+
type: "function",
|
|
150
|
+
function: {
|
|
151
|
+
name: tool.name,
|
|
152
|
+
description: tool.description,
|
|
153
|
+
},
|
|
154
|
+
};
|
|
155
|
+
if (tool.inputSchema) {
|
|
156
|
+
structuredTool.function.parameters = tool.inputSchema;
|
|
157
|
+
}
|
|
158
|
+
structuredMcpTools.push(structuredTool);
|
|
159
|
+
});
|
|
160
|
+
tools.push(...structuredMcpTools);
|
|
161
|
+
}
|
|
162
|
+
}
|
|
163
|
+
}
|
|
164
|
+
;
|
|
165
|
+
return {
|
|
166
|
+
toolIds,
|
|
167
|
+
toolNames,
|
|
168
|
+
toolMap,
|
|
169
|
+
tools,
|
|
170
|
+
};
|
|
171
|
+
});
|
|
172
|
+
//# sourceMappingURL=createToolDefinitions.js.map
|