@cognigy/rest-api-client 2025.21.0 → 2025.23.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. package/CHANGELOG.md +15 -0
  2. package/build/apigroups/SimulationAPIGroup_2_0.js +6 -1
  3. package/build/shared/charts/descriptors/analytics/trackGoal.js +2 -2
  4. package/build/shared/charts/descriptors/connectionNodes/generativeAIProviders/azureOpenAIProviderOauth2Connection.js +3 -1
  5. package/build/shared/charts/descriptors/index.js +1 -0
  6. package/build/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +5 -5
  7. package/build/shared/charts/descriptors/service/GPTPrompt.js +4 -4
  8. package/build/shared/charts/descriptors/service/agentTools/knowledgeTool.js +340 -0
  9. package/build/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +7 -5
  10. package/build/shared/charts/descriptors/service/aiAgent/helpers/createToolDefinitions.js +35 -32
  11. package/build/shared/charts/descriptors/service/index.js +3 -1
  12. package/build/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +7 -5
  13. package/build/shared/charts/descriptors/voice/mappers/msTeamsTransfer.mapper.js +36 -0
  14. package/build/shared/charts/descriptors/voicegateway2/index.js +16 -11
  15. package/build/shared/charts/descriptors/voicegateway2/nodes/msTeamsTransfer.js +126 -0
  16. package/build/shared/interfaces/IProfile.js +5 -1
  17. package/build/shared/interfaces/generativeAI/IGenerativeAIModels.js +3 -0
  18. package/build/shared/interfaces/resources/IAuditEvent.js +6 -1
  19. package/build/shared/interfaces/resources/IChartNode.js +7 -1
  20. package/build/shared/interfaces/resources/INodeDescriptorSet.js +11 -1
  21. package/build/shared/interfaces/resources/settings/IAgentSettings.js +12 -7
  22. package/build/shared/interfaces/resources/settings/IPiiDataRedactionSettings.js +142 -0
  23. package/build/shared/interfaces/resources/settings/index.js +4 -1
  24. package/build/shared/interfaces/restAPI/resources/project/v2.0/settings/IAgentSettings_2_0.js +104 -0
  25. package/build/shared/interfaces/restAPI/simulation/persona/IGeneratePersonaFromTranscriptRest_2_0.js +20 -0
  26. package/build/shared/interfaces/restAPI/simulation/simulation/ICloneSimulationRest_2_0.js +3 -0
  27. package/dist/esm/apigroups/SimulationAPIGroup_2_0.js +6 -1
  28. package/dist/esm/shared/charts/descriptors/analytics/trackGoal.js +2 -2
  29. package/dist/esm/shared/charts/descriptors/connectionNodes/generativeAIProviders/azureOpenAIProviderOauth2Connection.js +3 -1
  30. package/dist/esm/shared/charts/descriptors/index.js +2 -1
  31. package/dist/esm/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +5 -5
  32. package/dist/esm/shared/charts/descriptors/service/GPTPrompt.js +4 -4
  33. package/dist/esm/shared/charts/descriptors/service/agentTools/knowledgeTool.js +338 -0
  34. package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +7 -5
  35. package/dist/esm/shared/charts/descriptors/service/aiAgent/helpers/createToolDefinitions.js +35 -32
  36. package/dist/esm/shared/charts/descriptors/service/index.js +1 -0
  37. package/dist/esm/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +7 -5
  38. package/dist/esm/shared/charts/descriptors/voice/mappers/msTeamsTransfer.mapper.js +33 -0
  39. package/dist/esm/shared/charts/descriptors/voicegateway2/index.js +16 -11
  40. package/dist/esm/shared/charts/descriptors/voicegateway2/nodes/msTeamsTransfer.js +124 -0
  41. package/dist/esm/shared/interfaces/IProfile.js +4 -0
  42. package/dist/esm/shared/interfaces/generativeAI/IGenerativeAIModels.js +3 -0
  43. package/dist/esm/shared/interfaces/resources/IAuditEvent.js +6 -1
  44. package/dist/esm/shared/interfaces/resources/IChartNode.js +6 -0
  45. package/dist/esm/shared/interfaces/resources/INodeDescriptorSet.js +11 -1
  46. package/dist/esm/shared/interfaces/resources/settings/IAgentSettings.js +16 -11
  47. package/dist/esm/shared/interfaces/resources/settings/IPiiDataRedactionSettings.js +139 -0
  48. package/dist/esm/shared/interfaces/resources/settings/index.js +5 -4
  49. package/dist/esm/shared/interfaces/restAPI/resources/project/v2.0/settings/IAgentSettings_2_0.js +103 -1
  50. package/dist/esm/shared/interfaces/restAPI/simulation/persona/IGeneratePersonaFromTranscriptRest_2_0.js +19 -0
  51. package/dist/esm/shared/interfaces/restAPI/simulation/simulation/ICloneSimulationRest_2_0.js +2 -0
  52. package/package.json +1 -1
  53. package/types/index.d.ts +317 -36
@@ -1,3 +1,107 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.EPiiBehaviorType = void 0;
4
+ /**
5
+ * @openapi
6
+ *
7
+ * components:
8
+ * schemas:
9
+ * EPiiBehaviorType:
10
+ * type: string
11
+ * description: PII redaction behavior types
12
+ * enum:
13
+ * - predefined-alias
14
+ * - custom-alias
15
+ *
16
+ * IRedactionBehavior:
17
+ * oneOf:
18
+ * - type: object
19
+ * properties:
20
+ * type:
21
+ * type: string
22
+ * enum: [predefined-alias]
23
+ * customAlias:
24
+ * type: string
25
+ * nullable: true
26
+ * required: [type, customAlias]
27
+ * - type: object
28
+ * properties:
29
+ * type:
30
+ * type: string
31
+ * enum: [custom-alias]
32
+ * customAlias:
33
+ * type: string
34
+ * description: Custom alias to replace PII data with
35
+ * required: [type, customAlias]
36
+ *
37
+ * IRedactionScope:
38
+ * type: object
39
+ * properties:
40
+ * logs:
41
+ * type: boolean
42
+ * description: Whether to redact PII in logs
43
+ * analytics:
44
+ * type: boolean
45
+ * description: Whether to redact PII in analytics
46
+ * required: [logs, analytics]
47
+ *
48
+ * IPiiFieldSettings:
49
+ * type: object
50
+ * properties:
51
+ * enabled:
52
+ * type: boolean
53
+ * description: Whether PII redaction is enabled for this field type
54
+ * behavior:
55
+ * $ref: '#/components/schemas/IRedactionBehavior'
56
+ * scope:
57
+ * $ref: '#/components/schemas/IRedactionScope'
58
+ * name:
59
+ * type: string
60
+ * description: Display name for the PII field type
61
+ * required: [enabled, behavior, scope, name]
62
+ *
63
+ * ICustomPatternSettings:
64
+ * type: object
65
+ * properties:
66
+ * behavior:
67
+ * $ref: '#/components/schemas/IRedactionBehavior'
68
+ * scope:
69
+ * $ref: '#/components/schemas/IRedactionScope'
70
+ * name:
71
+ * type: string
72
+ * description: Display name for the custom PII field type
73
+ * regex:
74
+ * type: string
75
+ * description: Regular expression pattern to match custom PII data (RE2 syntax)
76
+ * minLength: 1
77
+ * maxLength: 2000
78
+ * required: [behavior, scope, name, regex]
79
+ *
80
+ * IPiiDataRedactionSettings_2_0:
81
+ * type: object
82
+ * properties:
83
+ * emailAddress:
84
+ * $ref: '#/components/schemas/IPiiFieldSettings'
85
+ * phoneNumber:
86
+ * $ref: '#/components/schemas/IPiiFieldSettings'
87
+ * creditCard:
88
+ * $ref: '#/components/schemas/IPiiFieldSettings'
89
+ * ssn:
90
+ * $ref: '#/components/schemas/IPiiFieldSettings'
91
+ * ipAddressV4:
92
+ * $ref: '#/components/schemas/IPiiFieldSettings'
93
+ * ipAddressV6:
94
+ * $ref: '#/components/schemas/IPiiFieldSettings'
95
+ * customTypes:
96
+ * type: object
97
+ * additionalProperties:
98
+ * $ref: '#/components/schemas/ICustomPatternSettings'
99
+ * description: User-defined custom PII patterns with regex matching
100
+ * required: [emailAddress, phoneNumber, creditCard, ssn, ipAddressV4, ipAddressV6]
101
+ */
102
+ var EPiiBehaviorType;
103
+ (function (EPiiBehaviorType) {
104
+ EPiiBehaviorType["PREDEFINED_ALIAS"] = "predefined-alias";
105
+ EPiiBehaviorType["CUSTOM_ALIAS"] = "custom-alias";
106
+ })(EPiiBehaviorType = exports.EPiiBehaviorType || (exports.EPiiBehaviorType = {}));
3
107
  //# sourceMappingURL=IAgentSettings_2_0.js.map
@@ -0,0 +1,20 @@
1
+ "use strict";
2
+ /**
3
+ * POST /testing/beta/personas/from-transcript
4
+ *
5
+ * Create a scenario draft by analyzing a conversation transcript. The system uses LLM to extract user intent,
6
+ * conversation goals, and user characteristics from the transcript, then generates an appropriate persona package.
7
+ *
8
+ * The endpoint supports:
9
+ * - Fetching transcript via sessionId from analytics service (RPC call)
10
+ * - Direct transcript content via transcriptContent
11
+ * - Optional flow context for enhanced persona generation
12
+ * - Content safety validation (warnings only, non-blocking)
13
+ *
14
+ * Priority: If both sessionId and transcriptContent are provided, sessionId takes precedence.
15
+ *
16
+ * operationId: createScenarioFromTranscript
17
+ * tags: Personas
18
+ */
19
+ Object.defineProperty(exports, "__esModule", { value: true });
20
+ //# sourceMappingURL=IGeneratePersonaFromTranscriptRest_2_0.js.map
@@ -0,0 +1,3 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ //# sourceMappingURL=ICloneSimulationRest_2_0.js.map
@@ -22,6 +22,10 @@ export function SimulationAPIGroup_2_0(instance) {
22
22
  var { simulationReference } = _a, args = __rest(_a, ["simulationReference"]);
23
23
  return GenericAPIFn(`/testing/beta/simulations/${simulationReference}/schedule`, "POST", self)(args, options);
24
24
  },
25
+ cloneSimulation: (_a, options) => {
26
+ var { simulationReference } = _a, args = __rest(_a, ["simulationReference"]);
27
+ return GenericAPIFn(`/testing/beta/simulations/${simulationReference}/clone`, "POST", self)(args, options);
28
+ },
25
29
  indexSimulationRunBatches: (args, options) => GenericAPIFn(`/testing/beta/simulations/batches?${stringifyQuery(args)}`, "GET", self)(undefined, options),
26
30
  getAllSimulationRunBatches: (_a, options) => {
27
31
  var { simulationReference } = _a, args = __rest(_a, ["simulationReference"]);
@@ -46,7 +50,8 @@ export function SimulationAPIGroup_2_0(instance) {
46
50
  getPersonaOptions: (args, options) => GenericAPIFn("/testing/beta/personas/options", "POST", self)(args, options),
47
51
  generatePersona: (args, options) => GenericAPIFn("/testing/beta/personas/generate", "POST", self)(args, options),
48
52
  regeneratePersonaField: (args, options) => GenericAPIFn("/testing/beta/personas/regenerate-field", "POST", self)(args, options),
49
- generateBulkPersona: (args, options) => GenericAPIFn("/testing/beta/personas/generate-bulk", "POST", self)(args, options)
53
+ generateBulkPersona: (args, options) => GenericAPIFn("/testing/beta/personas/generate-bulk", "POST", self)(args, options),
54
+ generatePersonaFromTranscript: (args, options) => GenericAPIFn("/testing/beta/personas/from-transcript", "POST", self)(args, options)
50
55
  };
51
56
  }
52
57
  //# sourceMappingURL=SimulationAPIGroup_2_0.js.map
@@ -2,7 +2,7 @@ import { __awaiter } from "tslib";
2
2
  /* Custom modules */
3
3
  import { createNodeDescriptor } from "../../createNodeDescriptor";
4
4
  /* Npm modules */
5
- import { v4 as uuidv4 } from "uuid";
5
+ import * as crypto from "crypto";
6
6
  /**
7
7
  * Node name: 'trackGoal'
8
8
  *
@@ -56,7 +56,7 @@ export const TRACK_GOAL = createNodeDescriptor({
56
56
  }
57
57
  // Create a new cycle id if there is a start step
58
58
  if (hasStartStep) {
59
- cycleId = uuidv4();
59
+ cycleId = crypto.randomUUID();
60
60
  activeCycleIds[goalId] = cycleId;
61
61
  const sessionStateAnalyticsUpdate = Object.assign(Object.assign({}, sessionState.analytics), { goalCycleIds: activeCycleIds });
62
62
  api.setSessionState("analytics", sessionStateAnalyticsUpdate);
@@ -5,7 +5,9 @@ export const AZURE_OPEN_AI_OAUTH2_PROVIDER_CONNECTION = {
5
5
  { fieldName: "clientId", label: "UI__CONNECTION_EDITOR__FIELD_CLIENT_ID" },
6
6
  { fieldName: "clientSecret", label: "UI__CONNECTION_EDITOR__FIELD_CLIENT_SECRET" },
7
7
  { fieldName: "oauthUrl", label: "UI__CONNECTION_EDITOR__FIELD_OAUTH2_URL" },
8
- { fieldName: "scope", label: "UI__CONNECTION_EDITOR__FIELD_SCOPE" }
8
+ { fieldName: "scope", label: "UI__CONNECTION_EDITOR__FIELD_SCOPE" },
9
+ { fieldName: "additionalHeaderName", label: "UI__CONNECTION_EDITOR__FIELD_OAUTH2_ADDITIONAL_HEADER_NAME", required: false },
10
+ { fieldName: "additionalHeaderValue", label: "UI__CONNECTION_EDITOR__FIELD_OAUTH2_ADDITIONAL_HEADER_VALUE", required: false }
9
11
  ]
10
12
  };
11
13
  //# sourceMappingURL=azureOpenAIProviderOauth2Connection.js.map
@@ -11,7 +11,7 @@ import { REGEX_SLOT_FILLER, EXECUTE_COGNIGY_NLU, ADD_LEXICON_KEYPHRASE, FUZZY_SE
11
11
  import { KNOWLEDGE_SEARCH, KNOWLEDGE_SEARCH_V2, SEARCH_EXTRACT_OUTPUT } from "./knowledgeSearch";
12
12
  import { CONTINUOUS_ASR, DTMF, HANG_UP, PLAY, TRANSFER_VOICE, SESSION_SPEECH_PARAMETERS, USER_INPUT_TIMEOUT, SEND_METADATA, BARGE_IN, MUTE_SPEECH_INPUT, } from "./voice";
13
13
  import { ACTIVATE_PROFILE, COMPLETE_GOAL, DEACTIVATE_PROFILE, DELETE_PROFILE, MERGE_PROFILE, UPDATE_PROFILE, ADD_MEMORY, BLIND_MODE, OVERWRITE_ANALYTICS, SET_RATING, REQUEST_RATING, TRACK_GOAL, } from "./analytics";
14
- import { HANDOVER, HANDOVER_V2, CHECK_AGENT_AVAILABILITY, HTTP_REQUEST, HTTP_CONNECTION_BASIC, HTTP_CONNECTION_APIKEYAUTHKEY, HTTP_CONNECTION_APIKEYXKEY, HTTP_CONNECTION_OAUTH2, JWT_SECRET_CONNECTION, TRIGGER_FUNCTION, ON_SCHEDULING_ERROR, ON_SCHEDULED, GPT_PROMPT, LLM_PROMPT_V2, LLM_PROMPT_DEFAULT, LLM_PROMPT_MCP_TOOL, LLM_PROMPT_TOOL, CLOSE_HANDOVER, HANDOVER_INACTIVITY_TIMER, GPT_CONVERSATION, GPT_CONVERSATION_SUMMARY, LLM_ENTITY_EXTRACT, AI_AGENT_JOB, AI_AGENT_JOB_DEFAULT, AI_AGENT_JOB_TOOL, AI_AGENT_JOB_MCP_TOOL, AI_AGENT_JOB_CALL_MCP_TOOL, AI_AGENT_TOOL_ANSWER, AI_AGENT_HANDOVER, LIVE_AGENT_CONNECTION, RINGCENTRAL_ENGAGE_CONNECTION, CHATWOOT_CONNECTION, EIGHT_BY_EIGHT_CONNECTION, GENESYS_CLOUD_CONNECTION, GENESYS_CLOUD_CONNECTION_OM, LLM_MODERATE, NICECXONEAAH_AUTHENTICATION_CONNECTION, LOAD_AI_AGENT, AIOPS_CENTER_WEBHOOKS_CONNECTION } from "./service";
14
+ import { HANDOVER, HANDOVER_V2, CHECK_AGENT_AVAILABILITY, HTTP_REQUEST, HTTP_CONNECTION_BASIC, HTTP_CONNECTION_APIKEYAUTHKEY, HTTP_CONNECTION_APIKEYXKEY, HTTP_CONNECTION_OAUTH2, JWT_SECRET_CONNECTION, TRIGGER_FUNCTION, ON_SCHEDULING_ERROR, ON_SCHEDULED, GPT_PROMPT, LLM_PROMPT_V2, LLM_PROMPT_DEFAULT, LLM_PROMPT_MCP_TOOL, LLM_PROMPT_TOOL, CLOSE_HANDOVER, HANDOVER_INACTIVITY_TIMER, GPT_CONVERSATION, GPT_CONVERSATION_SUMMARY, LLM_ENTITY_EXTRACT, AI_AGENT_JOB, AI_AGENT_JOB_DEFAULT, AI_AGENT_JOB_TOOL, AI_AGENT_JOB_MCP_TOOL, AI_AGENT_JOB_CALL_MCP_TOOL, AI_AGENT_TOOL_ANSWER, KNOWLEDGE_TOOL, AI_AGENT_HANDOVER, LIVE_AGENT_CONNECTION, RINGCENTRAL_ENGAGE_CONNECTION, CHATWOOT_CONNECTION, EIGHT_BY_EIGHT_CONNECTION, GENESYS_CLOUD_CONNECTION, GENESYS_CLOUD_CONNECTION_OM, LLM_MODERATE, NICECXONEAAH_AUTHENTICATION_CONNECTION, LOAD_AI_AGENT, AIOPS_CENTER_WEBHOOKS_CONNECTION } from "./service";
15
15
  import { INIT_APP_SESSION, GET_APP_SESSION_PIN, SET_HTML_APP_STATE, SET_ADAPTIVE_CARD_APP_STATE, } from "./apps";
16
16
  import { SET_IFRAME_TILE, SET_HTML_TILE, SEND_TILE_DATA, SET_SECURE_FORMS_TILE, SET_ADAPTIVE_CARD_TILE, SET_AGENT_ASSIST_GRID, NEXT_ACTION_ASSIST, SENTIMENT_ASSIST, TRANSCRIPT_ASSIST, IDENTITY_ASSIST, KNOWLEDGE_ASSIST, } from "./agentAssist";
17
17
  import { ASSIST_INFO } from "./liveAgent";
@@ -146,6 +146,7 @@ if (process.env.DISABLE_FEATURE_TRANSCRIPT_MANAGER !== "true") {
146
146
  nodes.push(AI_AGENT_JOB_MCP_TOOL);
147
147
  nodes.push(AI_AGENT_JOB_CALL_MCP_TOOL);
148
148
  nodes.push(AI_AGENT_TOOL_ANSWER);
149
+ nodes.push(KNOWLEDGE_TOOL);
149
150
  nodes.push(AI_AGENT_HANDOVER);
150
151
  nodes.push(LOAD_AI_AGENT);
151
152
  }
@@ -21,8 +21,8 @@ const getLanguageName = (languageCode) => {
21
21
  // remove the "region" part from language codes (e.g. en-US becomes en, the -US gets stripped)
22
22
  const languageOnly = languageCode.split('-')[0];
23
23
  // our runtimes support this, but it's not reflected in every project's typings!
24
- // this flag is used to reset a breaking-change behavior (returning "flamish" instead of "dutch (belgium)") to its former behavior
25
- // @ts-ignore
24
+ // this flag is used to reset a breaking-change behavior (returning "flamish" instead of "dutch (belgium)") to its former behavior
25
+ // @ts-ignore
26
26
  const languageName = (new Intl.DisplayNames(["en-US"], { type: "language", languageDisplay: "standard", fallback: 'none' }).of(languageOnly));
27
27
  return languageName;
28
28
  };
@@ -654,7 +654,7 @@ export const SEARCH_EXTRACT_OUTPUT = createNodeDescriptor({
654
654
  // add error to context or input
655
655
  switch (searchStoreLocation) {
656
656
  case "context":
657
- // @ts-ignore
657
+ // @ts-ignore
658
658
  api.addToContext(searchStoreLocationContextKey, searchStoreDataWithError, "simple");
659
659
  break;
660
660
  default:
@@ -736,7 +736,7 @@ export const SEARCH_EXTRACT_OUTPUT = createNodeDescriptor({
736
736
  // Save result
737
737
  switch (searchStoreLocation) {
738
738
  case "context":
739
- // @ts-ignore
739
+ // @ts-ignore
740
740
  api.addToContext(searchStoreLocationContextKey, knowledgeSearchResponseData, "simple");
741
741
  break;
742
742
  case "default":
@@ -765,7 +765,7 @@ export const SEARCH_EXTRACT_OUTPUT = createNodeDescriptor({
765
765
  if (documents && (mode !== "s")) {
766
766
  // check if we received streamed output at all
767
767
  let streamedOutput = false;
768
- const isStreamingChannel = input.channel === "webchat3" || input.channel === "adminconsole";
768
+ const isStreamingChannel = input.endpointType === "webchat3" || input.channel === "adminconsole";
769
769
  const _messageId = randomUUID();
770
770
  const promptData = {
771
771
  prompt,
@@ -2,7 +2,7 @@ import { __awaiter } from "tslib";
2
2
  /* Custom modules */
3
3
  import { createNodeDescriptor } from "../../createNodeDescriptor";
4
4
  import { GO_TO } from "../logic";
5
- import { v4 as randomUUID } from "uuid";
5
+ import * as crypto from "crypto";
6
6
  import { createLastConverationString, createLastConversationChatObject, createLastUserInputString, writeLLMDebugLogs } from "../nlu/generativeSlotFiller/prompt";
7
7
  import { InternalServerError } from "../../../errors";
8
8
  import { TranscriptEntryType, TranscriptRole } from "../../../interfaces/transcripts/transcripts";
@@ -619,7 +619,7 @@ export const GPT_PROMPT = createNodeDescriptor({
619
619
  // add error to context or input
620
620
  switch (storeLocation) {
621
621
  case "context":
622
- // @ts-ignore
622
+ // @ts-ignore
623
623
  api.addToContext(contextKey, errorResponse, "simple");
624
624
  break;
625
625
  default:
@@ -657,8 +657,8 @@ export const GPT_PROMPT = createNodeDescriptor({
657
657
  }
658
658
  });
659
659
  try {
660
- const isStreamingChannel = input.channel === "webchat3" || input.channel === "adminconsole";
661
- const _messageId = randomUUID();
660
+ const isStreamingChannel = input.endpointType === "webchat3" || input.channel === "adminconsole";
661
+ const _messageId = crypto.randomUUID();
662
662
  const data = {
663
663
  prompt,
664
664
  temperature,
@@ -0,0 +1,338 @@
1
+ import { __awaiter } from "tslib";
2
+ /* Custom modules */
3
+ import { createNodeDescriptor } from "../../../createNodeDescriptor";
4
+ import { TranscriptEntryType, TranscriptRole } from "../../../../interfaces/transcripts/transcripts";
5
+ export const KNOWLEDGE_TOOL = createNodeDescriptor({
6
+ type: "knowledgeTool",
7
+ defaultLabel: "Knowledge Tool",
8
+ parentType: ["aiAgentJob", "llmPromptV2"],
9
+ constraints: {
10
+ editable: true,
11
+ deletable: true,
12
+ collapsable: true,
13
+ creatable: true,
14
+ movable: true,
15
+ placement: {
16
+ predecessor: {
17
+ whitelist: []
18
+ }
19
+ },
20
+ childFlowCreatable: false
21
+ },
22
+ behavior: {
23
+ stopping: true
24
+ },
25
+ preview: {
26
+ type: "text",
27
+ key: "toolId"
28
+ },
29
+ fields: [
30
+ {
31
+ key: "knowledgeStoreId",
32
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_KNOWLEDGE_TOOL__FIELDS__KNOWLEDGE_STORE__LABEL",
33
+ type: "knowledgeStoreSelect",
34
+ },
35
+ {
36
+ key: "toolId",
37
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_TOOL__FIELDS__TOOL_ID__LABEL",
38
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_TOOL__FIELDS__TOOL_ID__DESCRIPTION",
39
+ type: "cognigyLLMText",
40
+ defaultValue: "retrieve_knowledge_and_data",
41
+ params: {
42
+ required: true,
43
+ rows: 1,
44
+ multiline: false,
45
+ maxLength: 64,
46
+ }
47
+ },
48
+ {
49
+ key: "description",
50
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_TOOL__FIELDS__DESCRIPTION__LABEL",
51
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_TOOL__FIELDS__DESCRIPTION__DESCRIPTION",
52
+ type: "cognigyLLMText",
53
+ defaultValue: "Find the answer to general prompts or questions searching the attached data sources. It focuses exclusively on a knowledge search and does not execute tasks like small talk, calculations, or script running.",
54
+ params: {
55
+ required: true,
56
+ rows: 5,
57
+ multiline: true
58
+ }
59
+ },
60
+ {
61
+ key: "parameters",
62
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_TOOL__FIELDS__PARAMETERS__LABEL",
63
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_TOOL__FIELDS__PARAMETERS__DESCRIPTION",
64
+ type: "toolParameters",
65
+ defaultValue: `{
66
+ "type": "object",
67
+ "properties": {
68
+ "generated_prompt": {
69
+ "type": "string",
70
+ "description": "Generated question including the context of the conversation (I want to know...)."
71
+ },
72
+ "generated_buffer_phrase": {
73
+ "type": "string",
74
+ "description": "A generated delay or stalling phrase. Consider the context. Adapt to your speech style and language."
75
+ },
76
+ },
77
+ "required": ["generated_prompt", "generated_buffer_phrase"],
78
+ "additionalProperties": false
79
+ }`,
80
+ params: {
81
+ required: false,
82
+ },
83
+ },
84
+ {
85
+ key: "debugMessage",
86
+ type: "toggle",
87
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_TOOL__FIELDS__DEBUG_MESSAGE__LABEL",
88
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_TOOL__FIELDS__DEBUG_MESSAGE__DESCRIPTION",
89
+ defaultValue: true,
90
+ },
91
+ {
92
+ key: "condition",
93
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_TOOL__FIELDS__CONDITION__LABEL",
94
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_TOOL__FIELDS__CONDITION__DESCRIPTION",
95
+ type: "cognigyText",
96
+ defaultValue: "",
97
+ },
98
+ {
99
+ key: "topK",
100
+ type: "slider",
101
+ label: "UI__NODE_EDITOR__KNOWLEDGE_SEARCH__TOP_K__LABEL",
102
+ description: "UI__NODE_EDITOR__KNOWLEDGE_SEARCH__TOP_K__DESCRIPTION",
103
+ defaultValue: 5,
104
+ params: {
105
+ min: 1,
106
+ max: 10
107
+ }
108
+ },
109
+ {
110
+ key: "storeLocation",
111
+ type: "select",
112
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_KNOWLEDGE_TOOL__FIELDS__STORE_LOCATION__LABEL",
113
+ params: {
114
+ options: [
115
+ {
116
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_KNOWLEDGE_TOOL__FIELDS__STORE_LOCATION__OPTIONS__NONE__LABEL",
117
+ value: "none"
118
+ },
119
+ {
120
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_KNOWLEDGE_TOOL__FIELDS__STORE_LOCATION__OPTIONS__INPUT__LABEL",
121
+ value: "input"
122
+ },
123
+ {
124
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_KNOWLEDGE_TOOL__FIELDS__STORE_LOCATION__OPTIONS__CONTEXT__LABEL",
125
+ value: "context"
126
+ }
127
+ ],
128
+ },
129
+ defaultValue: "none"
130
+ },
131
+ {
132
+ key: "storeLocationInputKey",
133
+ type: "cognigyText",
134
+ label: "UI__NODE_EDITOR__KNOWLEDGE_SEARCH__INPUT_KEY__LABEL",
135
+ description: "UI__NODE_EDITOR__KNOWLEDGE_SEARCH__INPUT_KEY__DESCRIPTION",
136
+ defaultValue: "knowledgeSearch",
137
+ condition: {
138
+ key: "storeLocation",
139
+ value: "input"
140
+ }
141
+ },
142
+ {
143
+ key: "storeLocationContextKey",
144
+ type: "cognigyText",
145
+ label: "UI__NODE_EDITOR__KNOWLEDGE_SEARCH__CONTEXT_KEY__LABEL",
146
+ description: "UI__NODE_EDITOR__KNOWLEDGE_SEARCH__CONTEXT_KEY__DESCRIPTION",
147
+ defaultValue: "knowledgeSearch",
148
+ condition: {
149
+ key: "storeLocation",
150
+ value: "context"
151
+ }
152
+ },
153
+ {
154
+ key: "sourceTags",
155
+ type: "knowledgeSourceTags",
156
+ label: "UI__NODE_EDITOR__KNOWLEDGE_SEARCH__SOURCE_TAGS__LABEL",
157
+ description: "UI__NODE_EDITOR__KNOWLEDGE_SEARCH__SOURCE_TAGS__DESCRIPTION",
158
+ params: {
159
+ tagLimit: 5
160
+ }
161
+ },
162
+ {
163
+ key: "sourceTagsFilterOp",
164
+ type: "select",
165
+ label: "UI__NODE_EDITOR__KNOWLEDGE_SEARCH__SOURCE_TAGS_FILTER_OP__LABEL",
166
+ description: "UI__NODE_EDITOR__SEARCH_EXTRACT_OUTPUT__FIELDS__SOURCE_TAGS_FILTER_OP__DESCRIPTION",
167
+ defaultValue: "and",
168
+ params: {
169
+ options: [
170
+ {
171
+ label: "UI__NODE_EDITOR__SEARCH_EXTRACT_OUTPUT__FIELDS__SOURCE_TAGS_FILTER_OP__OPTIONS__AND__LABEL",
172
+ value: "and"
173
+ },
174
+ {
175
+ label: "UI__NODE_EDITOR__SEARCH_EXTRACT_OUTPUT__FIELDS__SOURCE_TAGS_FILTER_OP__OPTIONS__OR__LABEL",
176
+ value: "or"
177
+ },
178
+ ]
179
+ }
180
+ },
181
+ ],
182
+ sections: [
183
+ {
184
+ key: "debugging",
185
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__SECTIONS__DEBUG_SETTINGS__LABEL",
186
+ defaultCollapsed: true,
187
+ fields: [
188
+ "debugMessage",
189
+ ],
190
+ },
191
+ {
192
+ key: "advanced",
193
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__SECTIONS__ADVANCED__LABEL",
194
+ defaultCollapsed: true,
195
+ fields: [
196
+ "topK",
197
+ "storeLocation",
198
+ "storeLocationInputKey",
199
+ "storeLocationContextKey",
200
+ "sourceTags",
201
+ "sourceTagsFilterOp",
202
+ "condition",
203
+ ],
204
+ },
205
+ ],
206
+ form: [
207
+ { type: "field", key: "knowledgeStoreId" },
208
+ { type: "field", key: "toolId" },
209
+ { type: "field", key: "description" },
210
+ { type: "section", key: "debugging" },
211
+ { type: "section", key: "advanced" },
212
+ ],
213
+ appearance: {
214
+ color: "white",
215
+ textColor: "#252525",
216
+ variant: "mini",
217
+ },
218
+ function: ({ cognigy, config, nodeId: thisNodeId }) => __awaiter(void 0, void 0, void 0, function* () {
219
+ var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o;
220
+ const { api, context, input } = cognigy;
221
+ const { knowledgeStoreId, debugMessage, topK, storeLocation, storeLocationInputKey, storeLocationContextKey, sourceTags, sourceTagsFilterOp, } = config;
222
+ const sessionState = yield api.loadSessionState();
223
+ const toolCall = (_a = sessionState.lastToolCall) === null || _a === void 0 ? void 0 : _a.toolCall;
224
+ const aiAgentJobNode = (_b = sessionState.lastToolCall) === null || _b === void 0 ? void 0 : _b.aiAgentJobNode;
225
+ if (!(toolCall === null || toolCall === void 0 ? void 0 : toolCall.id)) {
226
+ (_c = api.logDebugError) === null || _c === void 0 ? void 0 : _c.call(api, "UI__DEBUG_MODE__AI_AGENT_ANSWER__ERROR__MESSAGE");
227
+ }
228
+ if (toolCall && aiAgentJobNode && knowledgeStoreId && (input.text || ((_e = (_d = toolCall === null || toolCall === void 0 ? void 0 : toolCall.function) === null || _d === void 0 ? void 0 : _d.arguments) === null || _e === void 0 ? void 0 : _e.generated_prompt))) {
229
+ if (!((_f = api.checkThink) === null || _f === void 0 ? void 0 : _f.call(api, thisNodeId))) {
230
+ let query = ((_h = (_g = toolCall === null || toolCall === void 0 ? void 0 : toolCall.function) === null || _g === void 0 ? void 0 : _g.arguments) === null || _h === void 0 ? void 0 : _h.generated_prompt) || input.text;
231
+ const data = {
232
+ language: input.language,
233
+ query,
234
+ topK,
235
+ traceId: input.traceId,
236
+ disableSensitiveLogging: false,
237
+ knowledgeStoreIds: [knowledgeStoreId],
238
+ };
239
+ const generated_buffer_phrase = (_k = (_j = toolCall === null || toolCall === void 0 ? void 0 : toolCall.function) === null || _j === void 0 ? void 0 : _j.arguments) === null || _k === void 0 ? void 0 : _k.generated_buffer_phrase;
240
+ if (generated_buffer_phrase) {
241
+ // output the generated buffer phrase. Don't add it to the transcript, else the LLM will repeat it next time.
242
+ yield ((_l = api.output) === null || _l === void 0 ? void 0 : _l.call(api, generated_buffer_phrase, {
243
+ _cognigy: {
244
+ _preventTranscript: true
245
+ }
246
+ }));
247
+ }
248
+ if (sourceTags && sourceTags.length > 0) {
249
+ // convert each knowledgeSourceTag to a string
250
+ sourceTags.forEach((tag, index) => {
251
+ sourceTags[index] = tag.toString();
252
+ });
253
+ data.tagsData = {
254
+ tags: sourceTags,
255
+ op: sourceTagsFilterOp
256
+ };
257
+ }
258
+ const knowledgeSearchResponse = yield api.knowledgeSearch(data);
259
+ // Handle possible response errors
260
+ if ((knowledgeSearchResponse === null || knowledgeSearchResponse === void 0 ? void 0 : knowledgeSearchResponse.status) !== "success") {
261
+ const errorMessage = (knowledgeSearchResponse === null || knowledgeSearchResponse === void 0 ? void 0 : knowledgeSearchResponse.error) || "empty";
262
+ throw new Error(`Error while performing knowledge search. Remote returned error: ${errorMessage}`);
263
+ }
264
+ // Store full response data in input or context
265
+ if (storeLocation === "input" && storeLocationInputKey) {
266
+ input[storeLocationInputKey] = knowledgeSearchResponse;
267
+ }
268
+ else if (storeLocation === "context" && storeLocationContextKey) {
269
+ context[storeLocationContextKey] = knowledgeSearchResponse;
270
+ }
271
+ const knowledgeSearchResponseData = knowledgeSearchResponse.data;
272
+ // Optional Debug Message of Knowledge Search Results
273
+ if (debugMessage) {
274
+ const messageLines = [];
275
+ if (query) {
276
+ messageLines.push(`\n<b>UI__DEBUG_MODE__AI_AGENT_JOB__KNOWLEDGE_SEARCH__SEARCH_PROMPT</b> ${query}`);
277
+ }
278
+ if ((_m = knowledgeSearchResponseData === null || knowledgeSearchResponseData === void 0 ? void 0 : knowledgeSearchResponseData.topK) === null || _m === void 0 ? void 0 : _m.length) {
279
+ knowledgeSearchResponseData === null || knowledgeSearchResponseData === void 0 ? void 0 : knowledgeSearchResponseData.topK.forEach((result, index) => {
280
+ var _a;
281
+ messageLines.push(`\nTop ${index + 1}:`);
282
+ messageLines.push(`Distance: ${result.distance}`);
283
+ messageLines.push(`Source Name: ${(_a = result.sourceMetaData) === null || _a === void 0 ? void 0 : _a.sourceName}`);
284
+ messageLines.push(`Text: ${result.text}`);
285
+ });
286
+ }
287
+ else {
288
+ messageLines.push("UI__DEBUG_MODE__AI_AGENT_JOB__KNOWLEDGE_SEARCH__NO_RESULTS");
289
+ }
290
+ (_o = api.logDebugMessage) === null || _o === void 0 ? void 0 : _o.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__KNOWLEDGE_SEARCH__HEADER");
291
+ }
292
+ const { flow, node } = aiAgentJobNode;
293
+ if (flow && node) {
294
+ // Add Tool Call Message to Transcript
295
+ const toolCallTranscriptStep = {
296
+ role: TranscriptRole.ASSISTANT,
297
+ type: TranscriptEntryType.TOOL_CALL,
298
+ source: "system",
299
+ payload: {
300
+ name: toolCall.function.name,
301
+ id: toolCall.id,
302
+ input: toolCall.function.arguments,
303
+ }
304
+ };
305
+ yield api.addTranscriptStep(toolCallTranscriptStep);
306
+ // Add Tool Answer Message to Transcript
307
+ const toolAnswer = {
308
+ role: TranscriptRole.TOOL,
309
+ type: TranscriptEntryType.TOOL_ANSWER,
310
+ source: "system",
311
+ payload: {
312
+ toolCallId: toolCall.id,
313
+ name: toolCall.function.name,
314
+ content: `We have this context as answer from the knowledge source:\n${JSON.stringify(knowledgeSearchResponseData)}`,
315
+ }
316
+ };
317
+ yield api.addTranscriptStep(toolAnswer);
318
+ api.resetNextNodes();
319
+ // remove the call from the session state, because the call has been answered
320
+ api.updateSessionStateValues({
321
+ lastToolCall: undefined
322
+ });
323
+ yield api.executeFlow({
324
+ flowNode: {
325
+ flow,
326
+ node,
327
+ },
328
+ absorbContext: true,
329
+ });
330
+ }
331
+ }
332
+ else {
333
+ throw new Error("Infinite Loop Detected");
334
+ }
335
+ }
336
+ })
337
+ });
338
+ //# sourceMappingURL=knowledgeTool.js.map
@@ -1,4 +1,6 @@
1
1
  import { __awaiter, __rest } from "tslib";
2
+ /* Npm modules */
3
+ import * as crypto from "crypto";
2
4
  /* Custom modules */
3
5
  import { createNodeDescriptor } from "../../../createNodeDescriptor";
4
6
  import { setSessionConfig } from "../../voice/mappers/setSessionConfig.mapper";
@@ -8,9 +10,8 @@ import { createSystemMessage, getCognigyBrandMessage } from "./helpers/createSys
8
10
  import { generateSearchPrompt } from "./helpers/generateSearchPrompt";
9
11
  import { getUserMemory } from "./helpers/getUserMemory";
10
12
  import { createToolDefinitions } from "./helpers/createToolDefinitions";
11
- import { v4 as randomUUID } from "uuid";
12
13
  import { TranscriptEntryType, TranscriptRole } from "../../../../interfaces/transcripts/transcripts";
13
- export const AI_AGENT_TOOLS_WHITELIST = ["aiAgentJobDefault", "aiAgentJobTool", "aiAgentJobMCPTool"];
14
+ export const AI_AGENT_TOOLS_WHITELIST = ["aiAgentJobDefault", "aiAgentJobTool", "aiAgentJobMCPTool", "knowledgeTool"];
14
15
  export const AI_AGENT_JOB = createNodeDescriptor({
15
16
  type: "aiAgentJob",
16
17
  defaultLabel: "AI Agent",
@@ -1194,6 +1195,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1194
1195
  rolesWhiteList: [TranscriptRole.USER, TranscriptRole.ASSISTANT, TranscriptRole.TOOL],
1195
1196
  excludeDataOnlyMessagesFilter: [TranscriptRole.ASSISTANT],
1196
1197
  useTextAlternativeForLLM,
1198
+ excludeUserEventMessages: true,
1197
1199
  });
1198
1200
  // For knowledgeSearch "always", we enhance the user input with the knowledge search response data
1199
1201
  if (knowledgeSearchBehavior === "always" &&
@@ -1205,8 +1207,8 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1205
1207
  const enhancedInput = `## Knowledge Source Context\nAdditional Context from the knowledge source: \n${JSON.stringify(knowledgeSearchResponseData)}\n\n\n${((_z = userInput === null || userInput === void 0 ? void 0 : userInput.payload) === null || _z === void 0 ? void 0 : _z.text) || input.text}`;
1206
1208
  transcript[transcript.length - 1].payload.text = enhancedInput;
1207
1209
  }
1208
- const isStreamingChannel = input.channel === "webchat3" || input.channel === "adminconsole";
1209
- const _messageId = randomUUID();
1210
+ const isStreamingChannel = input.endpointType === "webchat3" || input.channel === "adminconsole";
1211
+ const _messageId = crypto.randomUUID();
1210
1212
  const enableAdvancedLogging = advancedLogging && loggingWebhookUrl && (conditionForLogging === "" || !!conditionForLogging);
1211
1213
  const llmPromptOptions = Object.assign(Object.assign(Object.assign({ prompt: "", chat: systemMessage,
1212
1214
  // Temp fix to override the transcript if needed
@@ -1283,7 +1285,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1283
1285
  // Find the child node with the toolId of the tool call
1284
1286
  let toolChild = undefined;
1285
1287
  for (const child of childConfigs) {
1286
- if (child.type === "aiAgentJobTool" && ((_5 = child.config) === null || _5 === void 0 ? void 0 : _5.toolId) && (yield api.parseCognigyScriptText((_6 = child.config) === null || _6 === void 0 ? void 0 : _6.toolId)) === mainToolCall.function.name) {
1288
+ if (!["aiAgentJobDefault", "aiAgentJobMCPTool"].includes(child.type) && ((_5 = child.config) === null || _5 === void 0 ? void 0 : _5.toolId) && (yield api.parseCognigyScriptText((_6 = child.config) === null || _6 === void 0 ? void 0 : _6.toolId)) === mainToolCall.function.name) {
1287
1289
  toolChild = child;
1288
1290
  break;
1289
1291
  }