@cognigy/rest-api-client 2025.21.0 → 2025.23.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +15 -0
- package/build/apigroups/SimulationAPIGroup_2_0.js +6 -1
- package/build/shared/charts/descriptors/analytics/trackGoal.js +2 -2
- package/build/shared/charts/descriptors/connectionNodes/generativeAIProviders/azureOpenAIProviderOauth2Connection.js +3 -1
- package/build/shared/charts/descriptors/index.js +1 -0
- package/build/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +5 -5
- package/build/shared/charts/descriptors/service/GPTPrompt.js +4 -4
- package/build/shared/charts/descriptors/service/agentTools/knowledgeTool.js +340 -0
- package/build/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +7 -5
- package/build/shared/charts/descriptors/service/aiAgent/helpers/createToolDefinitions.js +35 -32
- package/build/shared/charts/descriptors/service/index.js +3 -1
- package/build/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +7 -5
- package/build/shared/charts/descriptors/voice/mappers/msTeamsTransfer.mapper.js +36 -0
- package/build/shared/charts/descriptors/voicegateway2/index.js +16 -11
- package/build/shared/charts/descriptors/voicegateway2/nodes/msTeamsTransfer.js +126 -0
- package/build/shared/interfaces/IProfile.js +5 -1
- package/build/shared/interfaces/generativeAI/IGenerativeAIModels.js +3 -0
- package/build/shared/interfaces/resources/IAuditEvent.js +6 -1
- package/build/shared/interfaces/resources/IChartNode.js +7 -1
- package/build/shared/interfaces/resources/INodeDescriptorSet.js +11 -1
- package/build/shared/interfaces/resources/settings/IAgentSettings.js +12 -7
- package/build/shared/interfaces/resources/settings/IPiiDataRedactionSettings.js +142 -0
- package/build/shared/interfaces/resources/settings/index.js +4 -1
- package/build/shared/interfaces/restAPI/resources/project/v2.0/settings/IAgentSettings_2_0.js +104 -0
- package/build/shared/interfaces/restAPI/simulation/persona/IGeneratePersonaFromTranscriptRest_2_0.js +20 -0
- package/build/shared/interfaces/restAPI/simulation/simulation/ICloneSimulationRest_2_0.js +3 -0
- package/dist/esm/apigroups/SimulationAPIGroup_2_0.js +6 -1
- package/dist/esm/shared/charts/descriptors/analytics/trackGoal.js +2 -2
- package/dist/esm/shared/charts/descriptors/connectionNodes/generativeAIProviders/azureOpenAIProviderOauth2Connection.js +3 -1
- package/dist/esm/shared/charts/descriptors/index.js +2 -1
- package/dist/esm/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +5 -5
- package/dist/esm/shared/charts/descriptors/service/GPTPrompt.js +4 -4
- package/dist/esm/shared/charts/descriptors/service/agentTools/knowledgeTool.js +338 -0
- package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +7 -5
- package/dist/esm/shared/charts/descriptors/service/aiAgent/helpers/createToolDefinitions.js +35 -32
- package/dist/esm/shared/charts/descriptors/service/index.js +1 -0
- package/dist/esm/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +7 -5
- package/dist/esm/shared/charts/descriptors/voice/mappers/msTeamsTransfer.mapper.js +33 -0
- package/dist/esm/shared/charts/descriptors/voicegateway2/index.js +16 -11
- package/dist/esm/shared/charts/descriptors/voicegateway2/nodes/msTeamsTransfer.js +124 -0
- package/dist/esm/shared/interfaces/IProfile.js +4 -0
- package/dist/esm/shared/interfaces/generativeAI/IGenerativeAIModels.js +3 -0
- package/dist/esm/shared/interfaces/resources/IAuditEvent.js +6 -1
- package/dist/esm/shared/interfaces/resources/IChartNode.js +6 -0
- package/dist/esm/shared/interfaces/resources/INodeDescriptorSet.js +11 -1
- package/dist/esm/shared/interfaces/resources/settings/IAgentSettings.js +16 -11
- package/dist/esm/shared/interfaces/resources/settings/IPiiDataRedactionSettings.js +139 -0
- package/dist/esm/shared/interfaces/resources/settings/index.js +5 -4
- package/dist/esm/shared/interfaces/restAPI/resources/project/v2.0/settings/IAgentSettings_2_0.js +103 -1
- package/dist/esm/shared/interfaces/restAPI/simulation/persona/IGeneratePersonaFromTranscriptRest_2_0.js +19 -0
- package/dist/esm/shared/interfaces/restAPI/simulation/simulation/ICloneSimulationRest_2_0.js +2 -0
- package/package.json +1 -1
- package/types/index.d.ts +317 -36
package/CHANGELOG.md
CHANGED
|
@@ -1,8 +1,23 @@
|
|
|
1
|
+
# 2025.23.0
|
|
2
|
+
Released: November 11th, 2025
|
|
3
|
+
|
|
4
|
+
Released state of package up to date with Cognigy.AI v2025.23.0
|
|
5
|
+
|
|
6
|
+
# 2025.22.0
|
|
7
|
+
Released: October 28th, 2025
|
|
8
|
+
|
|
9
|
+
Released state of package up to date with Cognigy.AI v2025.22.0
|
|
10
|
+
|
|
1
11
|
# 2025.21.0
|
|
2
12
|
Released: October 14th, 2025
|
|
3
13
|
|
|
4
14
|
Released state of package up to date with Cognigy.AI v2025.21.0
|
|
5
15
|
|
|
16
|
+
# 2025.20.0
|
|
17
|
+
Released: October 02nd, 2025
|
|
18
|
+
|
|
19
|
+
Released state of package up to date with Cognigy.AI v2025.20.0
|
|
20
|
+
|
|
6
21
|
# 2025.19.0
|
|
7
22
|
Released: September 16th, 2025
|
|
8
23
|
|
|
@@ -35,6 +35,10 @@ function SimulationAPIGroup_2_0(instance) {
|
|
|
35
35
|
var { simulationReference } = _a, args = __rest(_a, ["simulationReference"]);
|
|
36
36
|
return (0, GenericAPIFn_1.GenericAPIFn)(`/testing/beta/simulations/${simulationReference}/schedule`, "POST", self)(args, options);
|
|
37
37
|
},
|
|
38
|
+
cloneSimulation: (_a, options) => {
|
|
39
|
+
var { simulationReference } = _a, args = __rest(_a, ["simulationReference"]);
|
|
40
|
+
return (0, GenericAPIFn_1.GenericAPIFn)(`/testing/beta/simulations/${simulationReference}/clone`, "POST", self)(args, options);
|
|
41
|
+
},
|
|
38
42
|
indexSimulationRunBatches: (args, options) => (0, GenericAPIFn_1.GenericAPIFn)(`/testing/beta/simulations/batches?${(0, query_1.stringifyQuery)(args)}`, "GET", self)(undefined, options),
|
|
39
43
|
getAllSimulationRunBatches: (_a, options) => {
|
|
40
44
|
var { simulationReference } = _a, args = __rest(_a, ["simulationReference"]);
|
|
@@ -59,7 +63,8 @@ function SimulationAPIGroup_2_0(instance) {
|
|
|
59
63
|
getPersonaOptions: (args, options) => (0, GenericAPIFn_1.GenericAPIFn)("/testing/beta/personas/options", "POST", self)(args, options),
|
|
60
64
|
generatePersona: (args, options) => (0, GenericAPIFn_1.GenericAPIFn)("/testing/beta/personas/generate", "POST", self)(args, options),
|
|
61
65
|
regeneratePersonaField: (args, options) => (0, GenericAPIFn_1.GenericAPIFn)("/testing/beta/personas/regenerate-field", "POST", self)(args, options),
|
|
62
|
-
generateBulkPersona: (args, options) => (0, GenericAPIFn_1.GenericAPIFn)("/testing/beta/personas/generate-bulk", "POST", self)(args, options)
|
|
66
|
+
generateBulkPersona: (args, options) => (0, GenericAPIFn_1.GenericAPIFn)("/testing/beta/personas/generate-bulk", "POST", self)(args, options),
|
|
67
|
+
generatePersonaFromTranscript: (args, options) => (0, GenericAPIFn_1.GenericAPIFn)("/testing/beta/personas/from-transcript", "POST", self)(args, options)
|
|
63
68
|
};
|
|
64
69
|
}
|
|
65
70
|
exports.SimulationAPIGroup_2_0 = SimulationAPIGroup_2_0;
|
|
@@ -4,7 +4,7 @@ exports.TRACK_GOAL = void 0;
|
|
|
4
4
|
/* Custom modules */
|
|
5
5
|
const createNodeDescriptor_1 = require("../../createNodeDescriptor");
|
|
6
6
|
/* Npm modules */
|
|
7
|
-
const
|
|
7
|
+
const crypto = require("crypto");
|
|
8
8
|
/**
|
|
9
9
|
* Node name: 'trackGoal'
|
|
10
10
|
*
|
|
@@ -58,7 +58,7 @@ exports.TRACK_GOAL = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
58
58
|
}
|
|
59
59
|
// Create a new cycle id if there is a start step
|
|
60
60
|
if (hasStartStep) {
|
|
61
|
-
cycleId =
|
|
61
|
+
cycleId = crypto.randomUUID();
|
|
62
62
|
activeCycleIds[goalId] = cycleId;
|
|
63
63
|
const sessionStateAnalyticsUpdate = Object.assign(Object.assign({}, sessionState.analytics), { goalCycleIds: activeCycleIds });
|
|
64
64
|
api.setSessionState("analytics", sessionStateAnalyticsUpdate);
|
|
@@ -8,7 +8,9 @@ exports.AZURE_OPEN_AI_OAUTH2_PROVIDER_CONNECTION = {
|
|
|
8
8
|
{ fieldName: "clientId", label: "UI__CONNECTION_EDITOR__FIELD_CLIENT_ID" },
|
|
9
9
|
{ fieldName: "clientSecret", label: "UI__CONNECTION_EDITOR__FIELD_CLIENT_SECRET" },
|
|
10
10
|
{ fieldName: "oauthUrl", label: "UI__CONNECTION_EDITOR__FIELD_OAUTH2_URL" },
|
|
11
|
-
{ fieldName: "scope", label: "UI__CONNECTION_EDITOR__FIELD_SCOPE" }
|
|
11
|
+
{ fieldName: "scope", label: "UI__CONNECTION_EDITOR__FIELD_SCOPE" },
|
|
12
|
+
{ fieldName: "additionalHeaderName", label: "UI__CONNECTION_EDITOR__FIELD_OAUTH2_ADDITIONAL_HEADER_NAME", required: false },
|
|
13
|
+
{ fieldName: "additionalHeaderValue", label: "UI__CONNECTION_EDITOR__FIELD_OAUTH2_ADDITIONAL_HEADER_VALUE", required: false }
|
|
12
14
|
]
|
|
13
15
|
};
|
|
14
16
|
//# sourceMappingURL=azureOpenAIProviderOauth2Connection.js.map
|
|
@@ -149,6 +149,7 @@ if (process.env.DISABLE_FEATURE_TRANSCRIPT_MANAGER !== "true") {
|
|
|
149
149
|
nodes.push(service_1.AI_AGENT_JOB_MCP_TOOL);
|
|
150
150
|
nodes.push(service_1.AI_AGENT_JOB_CALL_MCP_TOOL);
|
|
151
151
|
nodes.push(service_1.AI_AGENT_TOOL_ANSWER);
|
|
152
|
+
nodes.push(service_1.KNOWLEDGE_TOOL);
|
|
152
153
|
nodes.push(service_1.AI_AGENT_HANDOVER);
|
|
153
154
|
nodes.push(service_1.LOAD_AI_AGENT);
|
|
154
155
|
}
|
|
@@ -23,8 +23,8 @@ const getLanguageName = (languageCode) => {
|
|
|
23
23
|
// remove the "region" part from language codes (e.g. en-US becomes en, the -US gets stripped)
|
|
24
24
|
const languageOnly = languageCode.split('-')[0];
|
|
25
25
|
// our runtimes support this, but it's not reflected in every project's typings!
|
|
26
|
-
// this flag is used to reset a breaking-change behavior (returning "flamish" instead of "dutch (belgium)") to its former behavior
|
|
27
|
-
// @ts-ignore
|
|
26
|
+
// this flag is used to reset a breaking-change behavior (returning "flamish" instead of "dutch (belgium)") to its former behavior
|
|
27
|
+
// @ts-ignore
|
|
28
28
|
const languageName = (new Intl.DisplayNames(["en-US"], { type: "language", languageDisplay: "standard", fallback: 'none' }).of(languageOnly));
|
|
29
29
|
return languageName;
|
|
30
30
|
};
|
|
@@ -656,7 +656,7 @@ exports.SEARCH_EXTRACT_OUTPUT = (0, createNodeDescriptor_1.createNodeDescriptor)
|
|
|
656
656
|
// add error to context or input
|
|
657
657
|
switch (searchStoreLocation) {
|
|
658
658
|
case "context":
|
|
659
|
-
// @ts-ignore
|
|
659
|
+
// @ts-ignore
|
|
660
660
|
api.addToContext(searchStoreLocationContextKey, searchStoreDataWithError, "simple");
|
|
661
661
|
break;
|
|
662
662
|
default:
|
|
@@ -738,7 +738,7 @@ exports.SEARCH_EXTRACT_OUTPUT = (0, createNodeDescriptor_1.createNodeDescriptor)
|
|
|
738
738
|
// Save result
|
|
739
739
|
switch (searchStoreLocation) {
|
|
740
740
|
case "context":
|
|
741
|
-
// @ts-ignore
|
|
741
|
+
// @ts-ignore
|
|
742
742
|
api.addToContext(searchStoreLocationContextKey, knowledgeSearchResponseData, "simple");
|
|
743
743
|
break;
|
|
744
744
|
case "default":
|
|
@@ -767,7 +767,7 @@ exports.SEARCH_EXTRACT_OUTPUT = (0, createNodeDescriptor_1.createNodeDescriptor)
|
|
|
767
767
|
if (documents && (mode !== "s")) {
|
|
768
768
|
// check if we received streamed output at all
|
|
769
769
|
let streamedOutput = false;
|
|
770
|
-
const isStreamingChannel = input.
|
|
770
|
+
const isStreamingChannel = input.endpointType === "webchat3" || input.channel === "adminconsole";
|
|
771
771
|
const _messageId = (0, crypto_1.randomUUID)();
|
|
772
772
|
const promptData = {
|
|
773
773
|
prompt,
|
|
@@ -4,7 +4,7 @@ exports.GPT_PROMPT = void 0;
|
|
|
4
4
|
/* Custom modules */
|
|
5
5
|
const createNodeDescriptor_1 = require("../../createNodeDescriptor");
|
|
6
6
|
const logic_1 = require("../logic");
|
|
7
|
-
const
|
|
7
|
+
const crypto = require("crypto");
|
|
8
8
|
const prompt_1 = require("../nlu/generativeSlotFiller/prompt");
|
|
9
9
|
const errors_1 = require("../../../errors");
|
|
10
10
|
const transcripts_1 = require("../../../interfaces/transcripts/transcripts");
|
|
@@ -621,7 +621,7 @@ exports.GPT_PROMPT = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
621
621
|
// add error to context or input
|
|
622
622
|
switch (storeLocation) {
|
|
623
623
|
case "context":
|
|
624
|
-
// @ts-ignore
|
|
624
|
+
// @ts-ignore
|
|
625
625
|
api.addToContext(contextKey, errorResponse, "simple");
|
|
626
626
|
break;
|
|
627
627
|
default:
|
|
@@ -659,8 +659,8 @@ exports.GPT_PROMPT = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
659
659
|
}
|
|
660
660
|
};
|
|
661
661
|
try {
|
|
662
|
-
const isStreamingChannel = input.
|
|
663
|
-
const _messageId =
|
|
662
|
+
const isStreamingChannel = input.endpointType === "webchat3" || input.channel === "adminconsole";
|
|
663
|
+
const _messageId = crypto.randomUUID();
|
|
664
664
|
const data = {
|
|
665
665
|
prompt,
|
|
666
666
|
temperature,
|
|
@@ -0,0 +1,340 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.KNOWLEDGE_TOOL = void 0;
|
|
4
|
+
/* Custom modules */
|
|
5
|
+
const createNodeDescriptor_1 = require("../../../createNodeDescriptor");
|
|
6
|
+
const transcripts_1 = require("../../../../interfaces/transcripts/transcripts");
|
|
7
|
+
exports.KNOWLEDGE_TOOL = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
8
|
+
type: "knowledgeTool",
|
|
9
|
+
defaultLabel: "Knowledge Tool",
|
|
10
|
+
parentType: ["aiAgentJob", "llmPromptV2"],
|
|
11
|
+
constraints: {
|
|
12
|
+
editable: true,
|
|
13
|
+
deletable: true,
|
|
14
|
+
collapsable: true,
|
|
15
|
+
creatable: true,
|
|
16
|
+
movable: true,
|
|
17
|
+
placement: {
|
|
18
|
+
predecessor: {
|
|
19
|
+
whitelist: []
|
|
20
|
+
}
|
|
21
|
+
},
|
|
22
|
+
childFlowCreatable: false
|
|
23
|
+
},
|
|
24
|
+
behavior: {
|
|
25
|
+
stopping: true
|
|
26
|
+
},
|
|
27
|
+
preview: {
|
|
28
|
+
type: "text",
|
|
29
|
+
key: "toolId"
|
|
30
|
+
},
|
|
31
|
+
fields: [
|
|
32
|
+
{
|
|
33
|
+
key: "knowledgeStoreId",
|
|
34
|
+
label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_KNOWLEDGE_TOOL__FIELDS__KNOWLEDGE_STORE__LABEL",
|
|
35
|
+
type: "knowledgeStoreSelect",
|
|
36
|
+
},
|
|
37
|
+
{
|
|
38
|
+
key: "toolId",
|
|
39
|
+
label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_TOOL__FIELDS__TOOL_ID__LABEL",
|
|
40
|
+
description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_TOOL__FIELDS__TOOL_ID__DESCRIPTION",
|
|
41
|
+
type: "cognigyLLMText",
|
|
42
|
+
defaultValue: "retrieve_knowledge_and_data",
|
|
43
|
+
params: {
|
|
44
|
+
required: true,
|
|
45
|
+
rows: 1,
|
|
46
|
+
multiline: false,
|
|
47
|
+
maxLength: 64,
|
|
48
|
+
}
|
|
49
|
+
},
|
|
50
|
+
{
|
|
51
|
+
key: "description",
|
|
52
|
+
label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_TOOL__FIELDS__DESCRIPTION__LABEL",
|
|
53
|
+
description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_TOOL__FIELDS__DESCRIPTION__DESCRIPTION",
|
|
54
|
+
type: "cognigyLLMText",
|
|
55
|
+
defaultValue: "Find the answer to general prompts or questions searching the attached data sources. It focuses exclusively on a knowledge search and does not execute tasks like small talk, calculations, or script running.",
|
|
56
|
+
params: {
|
|
57
|
+
required: true,
|
|
58
|
+
rows: 5,
|
|
59
|
+
multiline: true
|
|
60
|
+
}
|
|
61
|
+
},
|
|
62
|
+
{
|
|
63
|
+
key: "parameters",
|
|
64
|
+
label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_TOOL__FIELDS__PARAMETERS__LABEL",
|
|
65
|
+
description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_TOOL__FIELDS__PARAMETERS__DESCRIPTION",
|
|
66
|
+
type: "toolParameters",
|
|
67
|
+
defaultValue: `{
|
|
68
|
+
"type": "object",
|
|
69
|
+
"properties": {
|
|
70
|
+
"generated_prompt": {
|
|
71
|
+
"type": "string",
|
|
72
|
+
"description": "Generated question including the context of the conversation (I want to know...)."
|
|
73
|
+
},
|
|
74
|
+
"generated_buffer_phrase": {
|
|
75
|
+
"type": "string",
|
|
76
|
+
"description": "A generated delay or stalling phrase. Consider the context. Adapt to your speech style and language."
|
|
77
|
+
},
|
|
78
|
+
},
|
|
79
|
+
"required": ["generated_prompt", "generated_buffer_phrase"],
|
|
80
|
+
"additionalProperties": false
|
|
81
|
+
}`,
|
|
82
|
+
params: {
|
|
83
|
+
required: false,
|
|
84
|
+
},
|
|
85
|
+
},
|
|
86
|
+
{
|
|
87
|
+
key: "debugMessage",
|
|
88
|
+
type: "toggle",
|
|
89
|
+
label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_TOOL__FIELDS__DEBUG_MESSAGE__LABEL",
|
|
90
|
+
description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_TOOL__FIELDS__DEBUG_MESSAGE__DESCRIPTION",
|
|
91
|
+
defaultValue: true,
|
|
92
|
+
},
|
|
93
|
+
{
|
|
94
|
+
key: "condition",
|
|
95
|
+
label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_TOOL__FIELDS__CONDITION__LABEL",
|
|
96
|
+
description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_TOOL__FIELDS__CONDITION__DESCRIPTION",
|
|
97
|
+
type: "cognigyText",
|
|
98
|
+
defaultValue: "",
|
|
99
|
+
},
|
|
100
|
+
{
|
|
101
|
+
key: "topK",
|
|
102
|
+
type: "slider",
|
|
103
|
+
label: "UI__NODE_EDITOR__KNOWLEDGE_SEARCH__TOP_K__LABEL",
|
|
104
|
+
description: "UI__NODE_EDITOR__KNOWLEDGE_SEARCH__TOP_K__DESCRIPTION",
|
|
105
|
+
defaultValue: 5,
|
|
106
|
+
params: {
|
|
107
|
+
min: 1,
|
|
108
|
+
max: 10
|
|
109
|
+
}
|
|
110
|
+
},
|
|
111
|
+
{
|
|
112
|
+
key: "storeLocation",
|
|
113
|
+
type: "select",
|
|
114
|
+
label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_KNOWLEDGE_TOOL__FIELDS__STORE_LOCATION__LABEL",
|
|
115
|
+
params: {
|
|
116
|
+
options: [
|
|
117
|
+
{
|
|
118
|
+
label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_KNOWLEDGE_TOOL__FIELDS__STORE_LOCATION__OPTIONS__NONE__LABEL",
|
|
119
|
+
value: "none"
|
|
120
|
+
},
|
|
121
|
+
{
|
|
122
|
+
label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_KNOWLEDGE_TOOL__FIELDS__STORE_LOCATION__OPTIONS__INPUT__LABEL",
|
|
123
|
+
value: "input"
|
|
124
|
+
},
|
|
125
|
+
{
|
|
126
|
+
label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_KNOWLEDGE_TOOL__FIELDS__STORE_LOCATION__OPTIONS__CONTEXT__LABEL",
|
|
127
|
+
value: "context"
|
|
128
|
+
}
|
|
129
|
+
],
|
|
130
|
+
},
|
|
131
|
+
defaultValue: "none"
|
|
132
|
+
},
|
|
133
|
+
{
|
|
134
|
+
key: "storeLocationInputKey",
|
|
135
|
+
type: "cognigyText",
|
|
136
|
+
label: "UI__NODE_EDITOR__KNOWLEDGE_SEARCH__INPUT_KEY__LABEL",
|
|
137
|
+
description: "UI__NODE_EDITOR__KNOWLEDGE_SEARCH__INPUT_KEY__DESCRIPTION",
|
|
138
|
+
defaultValue: "knowledgeSearch",
|
|
139
|
+
condition: {
|
|
140
|
+
key: "storeLocation",
|
|
141
|
+
value: "input"
|
|
142
|
+
}
|
|
143
|
+
},
|
|
144
|
+
{
|
|
145
|
+
key: "storeLocationContextKey",
|
|
146
|
+
type: "cognigyText",
|
|
147
|
+
label: "UI__NODE_EDITOR__KNOWLEDGE_SEARCH__CONTEXT_KEY__LABEL",
|
|
148
|
+
description: "UI__NODE_EDITOR__KNOWLEDGE_SEARCH__CONTEXT_KEY__DESCRIPTION",
|
|
149
|
+
defaultValue: "knowledgeSearch",
|
|
150
|
+
condition: {
|
|
151
|
+
key: "storeLocation",
|
|
152
|
+
value: "context"
|
|
153
|
+
}
|
|
154
|
+
},
|
|
155
|
+
{
|
|
156
|
+
key: "sourceTags",
|
|
157
|
+
type: "knowledgeSourceTags",
|
|
158
|
+
label: "UI__NODE_EDITOR__KNOWLEDGE_SEARCH__SOURCE_TAGS__LABEL",
|
|
159
|
+
description: "UI__NODE_EDITOR__KNOWLEDGE_SEARCH__SOURCE_TAGS__DESCRIPTION",
|
|
160
|
+
params: {
|
|
161
|
+
tagLimit: 5
|
|
162
|
+
}
|
|
163
|
+
},
|
|
164
|
+
{
|
|
165
|
+
key: "sourceTagsFilterOp",
|
|
166
|
+
type: "select",
|
|
167
|
+
label: "UI__NODE_EDITOR__KNOWLEDGE_SEARCH__SOURCE_TAGS_FILTER_OP__LABEL",
|
|
168
|
+
description: "UI__NODE_EDITOR__SEARCH_EXTRACT_OUTPUT__FIELDS__SOURCE_TAGS_FILTER_OP__DESCRIPTION",
|
|
169
|
+
defaultValue: "and",
|
|
170
|
+
params: {
|
|
171
|
+
options: [
|
|
172
|
+
{
|
|
173
|
+
label: "UI__NODE_EDITOR__SEARCH_EXTRACT_OUTPUT__FIELDS__SOURCE_TAGS_FILTER_OP__OPTIONS__AND__LABEL",
|
|
174
|
+
value: "and"
|
|
175
|
+
},
|
|
176
|
+
{
|
|
177
|
+
label: "UI__NODE_EDITOR__SEARCH_EXTRACT_OUTPUT__FIELDS__SOURCE_TAGS_FILTER_OP__OPTIONS__OR__LABEL",
|
|
178
|
+
value: "or"
|
|
179
|
+
},
|
|
180
|
+
]
|
|
181
|
+
}
|
|
182
|
+
},
|
|
183
|
+
],
|
|
184
|
+
sections: [
|
|
185
|
+
{
|
|
186
|
+
key: "debugging",
|
|
187
|
+
label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__SECTIONS__DEBUG_SETTINGS__LABEL",
|
|
188
|
+
defaultCollapsed: true,
|
|
189
|
+
fields: [
|
|
190
|
+
"debugMessage",
|
|
191
|
+
],
|
|
192
|
+
},
|
|
193
|
+
{
|
|
194
|
+
key: "advanced",
|
|
195
|
+
label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__SECTIONS__ADVANCED__LABEL",
|
|
196
|
+
defaultCollapsed: true,
|
|
197
|
+
fields: [
|
|
198
|
+
"topK",
|
|
199
|
+
"storeLocation",
|
|
200
|
+
"storeLocationInputKey",
|
|
201
|
+
"storeLocationContextKey",
|
|
202
|
+
"sourceTags",
|
|
203
|
+
"sourceTagsFilterOp",
|
|
204
|
+
"condition",
|
|
205
|
+
],
|
|
206
|
+
},
|
|
207
|
+
],
|
|
208
|
+
form: [
|
|
209
|
+
{ type: "field", key: "knowledgeStoreId" },
|
|
210
|
+
{ type: "field", key: "toolId" },
|
|
211
|
+
{ type: "field", key: "description" },
|
|
212
|
+
{ type: "section", key: "debugging" },
|
|
213
|
+
{ type: "section", key: "advanced" },
|
|
214
|
+
],
|
|
215
|
+
appearance: {
|
|
216
|
+
color: "white",
|
|
217
|
+
textColor: "#252525",
|
|
218
|
+
variant: "mini",
|
|
219
|
+
},
|
|
220
|
+
function: async ({ cognigy, config, nodeId: thisNodeId }) => {
|
|
221
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o;
|
|
222
|
+
const { api, context, input } = cognigy;
|
|
223
|
+
const { knowledgeStoreId, debugMessage, topK, storeLocation, storeLocationInputKey, storeLocationContextKey, sourceTags, sourceTagsFilterOp, } = config;
|
|
224
|
+
const sessionState = await api.loadSessionState();
|
|
225
|
+
const toolCall = (_a = sessionState.lastToolCall) === null || _a === void 0 ? void 0 : _a.toolCall;
|
|
226
|
+
const aiAgentJobNode = (_b = sessionState.lastToolCall) === null || _b === void 0 ? void 0 : _b.aiAgentJobNode;
|
|
227
|
+
if (!(toolCall === null || toolCall === void 0 ? void 0 : toolCall.id)) {
|
|
228
|
+
(_c = api.logDebugError) === null || _c === void 0 ? void 0 : _c.call(api, "UI__DEBUG_MODE__AI_AGENT_ANSWER__ERROR__MESSAGE");
|
|
229
|
+
}
|
|
230
|
+
if (toolCall && aiAgentJobNode && knowledgeStoreId && (input.text || ((_e = (_d = toolCall === null || toolCall === void 0 ? void 0 : toolCall.function) === null || _d === void 0 ? void 0 : _d.arguments) === null || _e === void 0 ? void 0 : _e.generated_prompt))) {
|
|
231
|
+
if (!((_f = api.checkThink) === null || _f === void 0 ? void 0 : _f.call(api, thisNodeId))) {
|
|
232
|
+
let query = ((_h = (_g = toolCall === null || toolCall === void 0 ? void 0 : toolCall.function) === null || _g === void 0 ? void 0 : _g.arguments) === null || _h === void 0 ? void 0 : _h.generated_prompt) || input.text;
|
|
233
|
+
const data = {
|
|
234
|
+
language: input.language,
|
|
235
|
+
query,
|
|
236
|
+
topK,
|
|
237
|
+
traceId: input.traceId,
|
|
238
|
+
disableSensitiveLogging: false,
|
|
239
|
+
knowledgeStoreIds: [knowledgeStoreId],
|
|
240
|
+
};
|
|
241
|
+
const generated_buffer_phrase = (_k = (_j = toolCall === null || toolCall === void 0 ? void 0 : toolCall.function) === null || _j === void 0 ? void 0 : _j.arguments) === null || _k === void 0 ? void 0 : _k.generated_buffer_phrase;
|
|
242
|
+
if (generated_buffer_phrase) {
|
|
243
|
+
// output the generated buffer phrase. Don't add it to the transcript, else the LLM will repeat it next time.
|
|
244
|
+
await ((_l = api.output) === null || _l === void 0 ? void 0 : _l.call(api, generated_buffer_phrase, {
|
|
245
|
+
_cognigy: {
|
|
246
|
+
_preventTranscript: true
|
|
247
|
+
}
|
|
248
|
+
}));
|
|
249
|
+
}
|
|
250
|
+
if (sourceTags && sourceTags.length > 0) {
|
|
251
|
+
// convert each knowledgeSourceTag to a string
|
|
252
|
+
sourceTags.forEach((tag, index) => {
|
|
253
|
+
sourceTags[index] = tag.toString();
|
|
254
|
+
});
|
|
255
|
+
data.tagsData = {
|
|
256
|
+
tags: sourceTags,
|
|
257
|
+
op: sourceTagsFilterOp
|
|
258
|
+
};
|
|
259
|
+
}
|
|
260
|
+
const knowledgeSearchResponse = await api.knowledgeSearch(data);
|
|
261
|
+
// Handle possible response errors
|
|
262
|
+
if ((knowledgeSearchResponse === null || knowledgeSearchResponse === void 0 ? void 0 : knowledgeSearchResponse.status) !== "success") {
|
|
263
|
+
const errorMessage = (knowledgeSearchResponse === null || knowledgeSearchResponse === void 0 ? void 0 : knowledgeSearchResponse.error) || "empty";
|
|
264
|
+
throw new Error(`Error while performing knowledge search. Remote returned error: ${errorMessage}`);
|
|
265
|
+
}
|
|
266
|
+
// Store full response data in input or context
|
|
267
|
+
if (storeLocation === "input" && storeLocationInputKey) {
|
|
268
|
+
input[storeLocationInputKey] = knowledgeSearchResponse;
|
|
269
|
+
}
|
|
270
|
+
else if (storeLocation === "context" && storeLocationContextKey) {
|
|
271
|
+
context[storeLocationContextKey] = knowledgeSearchResponse;
|
|
272
|
+
}
|
|
273
|
+
const knowledgeSearchResponseData = knowledgeSearchResponse.data;
|
|
274
|
+
// Optional Debug Message of Knowledge Search Results
|
|
275
|
+
if (debugMessage) {
|
|
276
|
+
const messageLines = [];
|
|
277
|
+
if (query) {
|
|
278
|
+
messageLines.push(`\n<b>UI__DEBUG_MODE__AI_AGENT_JOB__KNOWLEDGE_SEARCH__SEARCH_PROMPT</b> ${query}`);
|
|
279
|
+
}
|
|
280
|
+
if ((_m = knowledgeSearchResponseData === null || knowledgeSearchResponseData === void 0 ? void 0 : knowledgeSearchResponseData.topK) === null || _m === void 0 ? void 0 : _m.length) {
|
|
281
|
+
knowledgeSearchResponseData === null || knowledgeSearchResponseData === void 0 ? void 0 : knowledgeSearchResponseData.topK.forEach((result, index) => {
|
|
282
|
+
var _a;
|
|
283
|
+
messageLines.push(`\nTop ${index + 1}:`);
|
|
284
|
+
messageLines.push(`Distance: ${result.distance}`);
|
|
285
|
+
messageLines.push(`Source Name: ${(_a = result.sourceMetaData) === null || _a === void 0 ? void 0 : _a.sourceName}`);
|
|
286
|
+
messageLines.push(`Text: ${result.text}`);
|
|
287
|
+
});
|
|
288
|
+
}
|
|
289
|
+
else {
|
|
290
|
+
messageLines.push("UI__DEBUG_MODE__AI_AGENT_JOB__KNOWLEDGE_SEARCH__NO_RESULTS");
|
|
291
|
+
}
|
|
292
|
+
(_o = api.logDebugMessage) === null || _o === void 0 ? void 0 : _o.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__KNOWLEDGE_SEARCH__HEADER");
|
|
293
|
+
}
|
|
294
|
+
const { flow, node } = aiAgentJobNode;
|
|
295
|
+
if (flow && node) {
|
|
296
|
+
// Add Tool Call Message to Transcript
|
|
297
|
+
const toolCallTranscriptStep = {
|
|
298
|
+
role: transcripts_1.TranscriptRole.ASSISTANT,
|
|
299
|
+
type: transcripts_1.TranscriptEntryType.TOOL_CALL,
|
|
300
|
+
source: "system",
|
|
301
|
+
payload: {
|
|
302
|
+
name: toolCall.function.name,
|
|
303
|
+
id: toolCall.id,
|
|
304
|
+
input: toolCall.function.arguments,
|
|
305
|
+
}
|
|
306
|
+
};
|
|
307
|
+
await api.addTranscriptStep(toolCallTranscriptStep);
|
|
308
|
+
// Add Tool Answer Message to Transcript
|
|
309
|
+
const toolAnswer = {
|
|
310
|
+
role: transcripts_1.TranscriptRole.TOOL,
|
|
311
|
+
type: transcripts_1.TranscriptEntryType.TOOL_ANSWER,
|
|
312
|
+
source: "system",
|
|
313
|
+
payload: {
|
|
314
|
+
toolCallId: toolCall.id,
|
|
315
|
+
name: toolCall.function.name,
|
|
316
|
+
content: `We have this context as answer from the knowledge source:\n${JSON.stringify(knowledgeSearchResponseData)}`,
|
|
317
|
+
}
|
|
318
|
+
};
|
|
319
|
+
await api.addTranscriptStep(toolAnswer);
|
|
320
|
+
api.resetNextNodes();
|
|
321
|
+
// remove the call from the session state, because the call has been answered
|
|
322
|
+
api.updateSessionStateValues({
|
|
323
|
+
lastToolCall: undefined
|
|
324
|
+
});
|
|
325
|
+
await api.executeFlow({
|
|
326
|
+
flowNode: {
|
|
327
|
+
flow,
|
|
328
|
+
node,
|
|
329
|
+
},
|
|
330
|
+
absorbContext: true,
|
|
331
|
+
});
|
|
332
|
+
}
|
|
333
|
+
}
|
|
334
|
+
else {
|
|
335
|
+
throw new Error("Infinite Loop Detected");
|
|
336
|
+
}
|
|
337
|
+
}
|
|
338
|
+
}
|
|
339
|
+
});
|
|
340
|
+
//# sourceMappingURL=knowledgeTool.js.map
|
|
@@ -12,6 +12,8 @@ var __rest = (this && this.__rest) || function (s, e) {
|
|
|
12
12
|
};
|
|
13
13
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
14
14
|
exports.AI_AGENT_JOB = exports.AI_AGENT_TOOLS_WHITELIST = void 0;
|
|
15
|
+
/* Npm modules */
|
|
16
|
+
const crypto = require("crypto");
|
|
15
17
|
/* Custom modules */
|
|
16
18
|
const createNodeDescriptor_1 = require("../../../createNodeDescriptor");
|
|
17
19
|
const setSessionConfig_mapper_1 = require("../../voice/mappers/setSessionConfig.mapper");
|
|
@@ -21,9 +23,8 @@ const createSystemMessage_1 = require("./helpers/createSystemMessage");
|
|
|
21
23
|
const generateSearchPrompt_1 = require("./helpers/generateSearchPrompt");
|
|
22
24
|
const getUserMemory_1 = require("./helpers/getUserMemory");
|
|
23
25
|
const createToolDefinitions_1 = require("./helpers/createToolDefinitions");
|
|
24
|
-
const uuid_1 = require("uuid");
|
|
25
26
|
const transcripts_1 = require("../../../../interfaces/transcripts/transcripts");
|
|
26
|
-
exports.AI_AGENT_TOOLS_WHITELIST = ["aiAgentJobDefault", "aiAgentJobTool", "aiAgentJobMCPTool"];
|
|
27
|
+
exports.AI_AGENT_TOOLS_WHITELIST = ["aiAgentJobDefault", "aiAgentJobTool", "aiAgentJobMCPTool", "knowledgeTool"];
|
|
27
28
|
exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
28
29
|
type: "aiAgentJob",
|
|
29
30
|
defaultLabel: "AI Agent",
|
|
@@ -1207,6 +1208,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
1207
1208
|
rolesWhiteList: [transcripts_1.TranscriptRole.USER, transcripts_1.TranscriptRole.ASSISTANT, transcripts_1.TranscriptRole.TOOL],
|
|
1208
1209
|
excludeDataOnlyMessagesFilter: [transcripts_1.TranscriptRole.ASSISTANT],
|
|
1209
1210
|
useTextAlternativeForLLM,
|
|
1211
|
+
excludeUserEventMessages: true,
|
|
1210
1212
|
});
|
|
1211
1213
|
// For knowledgeSearch "always", we enhance the user input with the knowledge search response data
|
|
1212
1214
|
if (knowledgeSearchBehavior === "always" &&
|
|
@@ -1218,8 +1220,8 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
1218
1220
|
const enhancedInput = `## Knowledge Source Context\nAdditional Context from the knowledge source: \n${JSON.stringify(knowledgeSearchResponseData)}\n\n\n${((_z = userInput === null || userInput === void 0 ? void 0 : userInput.payload) === null || _z === void 0 ? void 0 : _z.text) || input.text}`;
|
|
1219
1221
|
transcript[transcript.length - 1].payload.text = enhancedInput;
|
|
1220
1222
|
}
|
|
1221
|
-
const isStreamingChannel = input.
|
|
1222
|
-
const _messageId =
|
|
1223
|
+
const isStreamingChannel = input.endpointType === "webchat3" || input.channel === "adminconsole";
|
|
1224
|
+
const _messageId = crypto.randomUUID();
|
|
1223
1225
|
const enableAdvancedLogging = advancedLogging && loggingWebhookUrl && (conditionForLogging === "" || !!conditionForLogging);
|
|
1224
1226
|
const llmPromptOptions = Object.assign(Object.assign(Object.assign({ prompt: "", chat: systemMessage,
|
|
1225
1227
|
// Temp fix to override the transcript if needed
|
|
@@ -1296,7 +1298,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
1296
1298
|
// Find the child node with the toolId of the tool call
|
|
1297
1299
|
let toolChild = undefined;
|
|
1298
1300
|
for (const child of childConfigs) {
|
|
1299
|
-
if (child.type
|
|
1301
|
+
if (!["aiAgentJobDefault", "aiAgentJobMCPTool"].includes(child.type) && ((_5 = child.config) === null || _5 === void 0 ? void 0 : _5.toolId) && await api.parseCognigyScriptText((_6 = child.config) === null || _6 === void 0 ? void 0 : _6.toolId) === mainToolCall.function.name) {
|
|
1300
1302
|
toolChild = child;
|
|
1301
1303
|
break;
|
|
1302
1304
|
}
|
|
@@ -35,35 +35,6 @@ const createToolDefinitions = async (childConfigs, api, useStrict) => {
|
|
|
35
35
|
continue;
|
|
36
36
|
}
|
|
37
37
|
const toolId = child.config.toolId;
|
|
38
|
-
if ((child.type === "aiAgentJobTool" || child.type === "llmPromptTool") &&
|
|
39
|
-
(!child.config.condition || !!await api.parseCognigyScriptCondition(child.config.condition))) {
|
|
40
|
-
if (!toolId) {
|
|
41
|
-
throw new Error(`Tool ID is missing in Tool Node configuration.`);
|
|
42
|
-
}
|
|
43
|
-
const parsedToolId = await api.parseCognigyScriptText(toolId);
|
|
44
|
-
if (!(0, createSystemMessage_1.validateToolId)(parsedToolId)) {
|
|
45
|
-
throw new Error(`Tool ID ${parsedToolId} is not valid. Please use only alphanumeric characters, dashes and underscores.`);
|
|
46
|
-
}
|
|
47
|
-
if (toolIds.includes(parsedToolId)) {
|
|
48
|
-
throw new Error(`Tool ID ${parsedToolId} is not unique. Please ensure each tool has a unique id.`);
|
|
49
|
-
}
|
|
50
|
-
toolIds.push(parsedToolId);
|
|
51
|
-
toolNames.push(parsedToolId);
|
|
52
|
-
const tool = {
|
|
53
|
-
type: "function",
|
|
54
|
-
function: {
|
|
55
|
-
name: parsedToolId,
|
|
56
|
-
description: await api.parseCognigyScriptText(child.config.description),
|
|
57
|
-
},
|
|
58
|
-
};
|
|
59
|
-
if (useStrict) {
|
|
60
|
-
tool.function.strict = true;
|
|
61
|
-
}
|
|
62
|
-
if (child.config.useParameters) {
|
|
63
|
-
tool.function.parameters = child.config.parameters;
|
|
64
|
-
}
|
|
65
|
-
tools.push(tool);
|
|
66
|
-
}
|
|
67
38
|
if ((child.type === "aiAgentJobMCPTool" || child.type === "llmPromptMCPTool") &&
|
|
68
39
|
(!child.config.condition || !!await api.parseCognigyScriptCondition(child.config.condition))) {
|
|
69
40
|
if (!child.config.mcpServerUrl) {
|
|
@@ -76,13 +47,16 @@ const createToolDefinitions = async (childConfigs, api, useStrict) => {
|
|
|
76
47
|
const toolFilter = child.config.toolFilter;
|
|
77
48
|
const mcpHeaders = child.config.mcpHeaders;
|
|
78
49
|
let mcpTools = null;
|
|
50
|
+
let fetchedFromCache = null;
|
|
79
51
|
try {
|
|
80
|
-
|
|
52
|
+
const fetched = await api.fetchMcpTools({
|
|
81
53
|
mcpServerUrl,
|
|
82
54
|
timeout,
|
|
83
55
|
cacheTools,
|
|
84
56
|
mcpHeaders,
|
|
85
57
|
});
|
|
58
|
+
mcpTools = fetched.tools;
|
|
59
|
+
fetchedFromCache = fetched.fromCache;
|
|
86
60
|
}
|
|
87
61
|
catch (error) {
|
|
88
62
|
const errorDetails = error instanceof Error
|
|
@@ -95,11 +69,12 @@ const createToolDefinitions = async (childConfigs, api, useStrict) => {
|
|
|
95
69
|
}
|
|
96
70
|
if (mcpTools) {
|
|
97
71
|
if (sendDebug) {
|
|
72
|
+
const sourceLabel = fetchedFromCache ? "from cache" : "from MCP server";
|
|
98
73
|
if (mcpTools.length === 0) {
|
|
99
|
-
(_b = api.logDebugMessage) === null || _b === void 0 ? void 0 : _b.call(api, `No tools fetched from MCP Tool "${child.config.name}".`, "MCP Tool");
|
|
74
|
+
(_b = api.logDebugMessage) === null || _b === void 0 ? void 0 : _b.call(api, `No tools fetched from MCP Tool "${child.config.name}" (${sourceLabel}).`, "MCP Tool");
|
|
100
75
|
}
|
|
101
76
|
if (mcpTools.length > 0) {
|
|
102
|
-
const messageLines = [`Fetched tools from MCP Tool "${child.config.name}"`];
|
|
77
|
+
const messageLines = [`Fetched tools from MCP Tool "${child.config.name}" (${sourceLabel})`];
|
|
103
78
|
mcpTools.forEach((tool) => {
|
|
104
79
|
messageLines.push(`<br>- <b>${tool.name}</b>: ${tool.description}`);
|
|
105
80
|
if (child.config.debugMessageParameters && tool.inputSchema) {
|
|
@@ -164,6 +139,34 @@ const createToolDefinitions = async (childConfigs, api, useStrict) => {
|
|
|
164
139
|
tools.push(...structuredMcpTools);
|
|
165
140
|
}
|
|
166
141
|
}
|
|
142
|
+
if (!["llmPromptMCPTool", "aiAgentJobMCPTool"].includes(child.type) && (!child.config.condition || !!await api.parseCognigyScriptCondition(child.config.condition))) {
|
|
143
|
+
if (!toolId) {
|
|
144
|
+
throw new Error(`Tool ID is missing in Tool Node configuration.`);
|
|
145
|
+
}
|
|
146
|
+
const parsedToolId = await api.parseCognigyScriptText(toolId);
|
|
147
|
+
if (!(0, createSystemMessage_1.validateToolId)(parsedToolId)) {
|
|
148
|
+
throw new Error(`Tool ID ${parsedToolId} is not valid. Please use only alphanumeric characters, dashes and underscores.`);
|
|
149
|
+
}
|
|
150
|
+
if (toolIds.includes(parsedToolId)) {
|
|
151
|
+
throw new Error(`Tool ID ${parsedToolId} is not unique. Please ensure each tool has a unique id.`);
|
|
152
|
+
}
|
|
153
|
+
toolIds.push(parsedToolId);
|
|
154
|
+
toolNames.push(parsedToolId);
|
|
155
|
+
const tool = {
|
|
156
|
+
type: "function",
|
|
157
|
+
function: {
|
|
158
|
+
name: parsedToolId,
|
|
159
|
+
description: await api.parseCognigyScriptText(child.config.description),
|
|
160
|
+
},
|
|
161
|
+
};
|
|
162
|
+
if (useStrict) {
|
|
163
|
+
tool.function.strict = true;
|
|
164
|
+
}
|
|
165
|
+
if (child.config.useParameters) {
|
|
166
|
+
tool.function.parameters = child.config.parameters;
|
|
167
|
+
}
|
|
168
|
+
tools.push(tool);
|
|
169
|
+
}
|
|
167
170
|
}
|
|
168
171
|
;
|
|
169
172
|
return {
|