@cognigy/rest-api-client 2025.12.0 → 2025.14.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (75) hide show
  1. package/CHANGELOG.md +10 -0
  2. package/build/apigroups/ResourcesAPIGroup_2_0.js +8 -1
  3. package/build/apigroups/SimulationAPIGroup_2_0.js +4 -1
  4. package/build/shared/charts/descriptors/analytics/trackGoal.js +3 -1
  5. package/build/shared/charts/descriptors/connectionNodes/internalStorageProviders/amazonStorageProviderConnection.js +7 -1
  6. package/build/shared/charts/descriptors/index.js +5 -0
  7. package/build/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +10 -6
  8. package/build/shared/charts/descriptors/message/question/question.js +12 -1
  9. package/build/shared/charts/descriptors/nlu/generativeSlotFiller/prompt.js +11 -3
  10. package/build/shared/charts/descriptors/service/GPTPrompt.js +21 -1
  11. package/build/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +71 -175
  12. package/build/shared/charts/descriptors/service/aiAgent/aiAgentJobTool.js +2 -2
  13. package/build/shared/charts/descriptors/service/aiAgent/helpers/createToolDefinitions.js +175 -0
  14. package/build/shared/charts/descriptors/service/aiAgent/loadAiAgent.js +194 -0
  15. package/build/shared/charts/descriptors/service/handoverV2.js +1 -1
  16. package/build/shared/charts/descriptors/service/index.js +11 -1
  17. package/build/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +959 -0
  18. package/build/shared/charts/descriptors/service/llmPrompt/llmPromptDefault.js +31 -0
  19. package/build/shared/charts/descriptors/service/llmPrompt/llmPromptMCPTool.js +196 -0
  20. package/build/shared/charts/descriptors/service/llmPrompt/llmPromptTool.js +139 -0
  21. package/build/shared/constants.js +1 -5
  22. package/build/shared/interfaces/debugEvents/IGoalCompletedEventPayload.js +3 -0
  23. package/build/shared/interfaces/debugEvents/TDebugEventType.js +1 -0
  24. package/build/shared/interfaces/generativeAI/IGenerativeAIModels.js +12 -1
  25. package/build/shared/interfaces/messageAPI/handover.js +6 -0
  26. package/build/shared/interfaces/resources/ISimulation.js +9 -0
  27. package/build/shared/interfaces/resources/TResourceType.js +3 -0
  28. package/build/shared/interfaces/resources/knowledgeStore/IKnowledgeChunk.js +2 -1
  29. package/build/shared/interfaces/resources/settings/IGenerativeAISettings.js +5 -18
  30. package/build/shared/interfaces/restAPI/operations/generateOutput/v2.0/index.js +3 -0
  31. package/build/shared/interfaces/restAPI/resources/chart/v2.0/IReadFlowChartAiAgentsRest_2_0.js +3 -0
  32. package/build/shared/interfaces/restAPI/simulation/persona/IGeneratePersonaRest_2_0.js +3 -0
  33. package/build/shared/interfaces/restAPI/simulation/persona/IGetPersonaOptionsRest_2_0.js +3 -0
  34. package/build/shared/interfaces/restAPI/simulation/persona/IRegeneratePersonaFieldRest_2_0.js +3 -0
  35. package/build/shared/interfaces/security/IPermission.js +4 -0
  36. package/build/shared/interfaces/security/IRole.js +5 -1
  37. package/build/shared/interfaces/security/index.js +1 -1
  38. package/dist/esm/apigroups/ResourcesAPIGroup_2_0.js +8 -1
  39. package/dist/esm/apigroups/SimulationAPIGroup_2_0.js +4 -1
  40. package/dist/esm/shared/charts/descriptors/analytics/trackGoal.js +3 -1
  41. package/dist/esm/shared/charts/descriptors/connectionNodes/internalStorageProviders/amazonStorageProviderConnection.js +7 -1
  42. package/dist/esm/shared/charts/descriptors/index.js +6 -1
  43. package/dist/esm/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +10 -6
  44. package/dist/esm/shared/charts/descriptors/message/question/question.js +12 -1
  45. package/dist/esm/shared/charts/descriptors/nlu/generativeSlotFiller/prompt.js +11 -3
  46. package/dist/esm/shared/charts/descriptors/service/GPTPrompt.js +21 -1
  47. package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +72 -176
  48. package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJobTool.js +2 -2
  49. package/dist/esm/shared/charts/descriptors/service/aiAgent/helpers/createToolDefinitions.js +172 -0
  50. package/dist/esm/shared/charts/descriptors/service/aiAgent/loadAiAgent.js +192 -0
  51. package/dist/esm/shared/charts/descriptors/service/handoverV2.js +1 -1
  52. package/dist/esm/shared/charts/descriptors/service/index.js +5 -0
  53. package/dist/esm/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +946 -0
  54. package/dist/esm/shared/charts/descriptors/service/llmPrompt/llmPromptDefault.js +28 -0
  55. package/dist/esm/shared/charts/descriptors/service/llmPrompt/llmPromptMCPTool.js +193 -0
  56. package/dist/esm/shared/charts/descriptors/service/llmPrompt/llmPromptTool.js +136 -0
  57. package/dist/esm/shared/constants.js +1 -5
  58. package/dist/esm/shared/interfaces/debugEvents/IGoalCompletedEventPayload.js +2 -0
  59. package/dist/esm/shared/interfaces/debugEvents/TDebugEventType.js +1 -0
  60. package/dist/esm/shared/interfaces/generativeAI/IGenerativeAIModels.js +12 -1
  61. package/dist/esm/shared/interfaces/messageAPI/handover.js +6 -0
  62. package/dist/esm/shared/interfaces/resources/ISimulation.js +6 -0
  63. package/dist/esm/shared/interfaces/resources/TResourceType.js +3 -0
  64. package/dist/esm/shared/interfaces/resources/knowledgeStore/IKnowledgeChunk.js +2 -1
  65. package/dist/esm/shared/interfaces/resources/settings/IGenerativeAISettings.js +4 -17
  66. package/dist/esm/shared/interfaces/restAPI/operations/generateOutput/v2.0/index.js +2 -0
  67. package/dist/esm/shared/interfaces/restAPI/resources/chart/v2.0/IReadFlowChartAiAgentsRest_2_0.js +2 -0
  68. package/dist/esm/shared/interfaces/restAPI/simulation/persona/IGeneratePersonaRest_2_0.js +2 -0
  69. package/dist/esm/shared/interfaces/restAPI/simulation/persona/IGetPersonaOptionsRest_2_0.js +2 -0
  70. package/dist/esm/shared/interfaces/restAPI/simulation/persona/IRegeneratePersonaFieldRest_2_0.js +2 -0
  71. package/dist/esm/shared/interfaces/security/IPermission.js +4 -0
  72. package/dist/esm/shared/interfaces/security/IRole.js +5 -1
  73. package/dist/esm/shared/interfaces/security/index.js +1 -1
  74. package/package.json +1 -1
  75. package/types/index.d.ts +2093 -1927
@@ -38,7 +38,10 @@ export function SimulationAPIGroup_2_0(instance) {
38
38
  readSimulationRun: (_a, options) => {
39
39
  var { simulationReference, simulationRunBatchReference, simulationRunReference } = _a, args = __rest(_a, ["simulationReference", "simulationRunBatchReference", "simulationRunReference"]);
40
40
  return GenericAPIFn(`/testing/beta/simulations/${simulationReference}/batches/${simulationRunBatchReference}/runs/${simulationRunReference}?${stringifyQuery(args)}`, "GET", self)(undefined, options);
41
- }
41
+ },
42
+ getPersonaOptions: (args, options) => GenericAPIFn("/testing/beta/personas/options", "POST", self)(args, options),
43
+ generatePersona: (args, options) => GenericAPIFn("/testing/beta/personas/generate", "POST", self)(args, options),
44
+ regeneratePersonaField: (args, options) => GenericAPIFn("/testing/beta/personas/regenerate-field", "POST", self)(args, options)
42
45
  };
43
46
  }
44
47
  //# sourceMappingURL=SimulationAPIGroup_2_0.js.map
@@ -45,7 +45,7 @@ export const TRACK_GOAL = createNodeDescriptor({
45
45
  return;
46
46
  }
47
47
  const sessionState = api.getSessionStateCopy();
48
- const { selectedSteps, version, name, goalId, } = config.goal;
48
+ const { selectedSteps, referenceId, version, name, goalId, } = config.goal;
49
49
  const activeCycleIds = ((_a = sessionState.analytics) === null || _a === void 0 ? void 0 : _a.goalCycleIds) || {};
50
50
  let cycleId = activeCycleIds[goalId];
51
51
  const hasStartStep = selectedSteps.some(step => step.type === "start");
@@ -73,10 +73,12 @@ export const TRACK_GOAL = createNodeDescriptor({
73
73
  organisationId,
74
74
  sessionId,
75
75
  version,
76
+ referenceId,
76
77
  timestamp: new Date(),
77
78
  goalCycleId: cycleId,
78
79
  goalId,
79
80
  stepId: step.stepId,
81
+ stepType: step.type,
80
82
  endpointUrlToken,
81
83
  endpointName,
82
84
  endpointType,
@@ -6,6 +6,12 @@ export const AMAZON_STORAGE_PROVIDER_CONNECTION = {
6
6
  { fieldName: "secretAccessKey", label: "UI__CONNECTION_EDITOR__FIELD_SECRET_ACCESS_KEY" },
7
7
  { fieldName: "region", label: "UI__CONNECTION_EDITOR__FIELD_REGION" },
8
8
  { fieldName: "bucketName", label: "UI__CONNECTION_EDITOR__FIELD_BUCKET_NAME" },
9
- ]
9
+ {
10
+ fieldName: "customUrl",
11
+ label: "UI__CONNECTION_EDITOR__FIELD_CUSTOM_URL",
12
+ required: false,
13
+ description: "UI__CONNECTION_EDITOR__FIELD_CUSTOM_URL_AWS__DESCRIPTION"
14
+ },
15
+ ],
10
16
  };
11
17
  //# sourceMappingURL=amazonStorageProviderConnection.js.map
@@ -11,7 +11,7 @@ import { REGEX_SLOT_FILLER, EXECUTE_COGNIGY_NLU, ADD_LEXICON_KEYPHRASE, FUZZY_SE
11
11
  import { KNOWLEDGE_SEARCH, KNOWLEDGE_SEARCH_V2, SEARCH_EXTRACT_OUTPUT } from "./knowledgeSearch";
12
12
  import { CONTINUOUS_ASR, DTMF, HANG_UP, PLAY, TRANSFER_VOICE, SESSION_SPEECH_PARAMETERS, USER_INPUT_TIMEOUT, SEND_METADATA, BARGE_IN, MUTE_SPEECH_INPUT, } from "./voice";
13
13
  import { ACTIVATE_PROFILE, COMPLETE_GOAL, DEACTIVATE_PROFILE, DELETE_PROFILE, MERGE_PROFILE, UPDATE_PROFILE, ADD_MEMORY, BLIND_MODE, OVERWRITE_ANALYTICS, SET_RATING, REQUEST_RATING, TRACK_GOAL, } from "./analytics";
14
- import { HANDOVER, HANDOVER_V2, CHECK_AGENT_AVAILABILITY, HTTP_REQUEST, HTTP_CONNECTION_BASIC, HTTP_CONNECTION_APIKEYAUTHKEY, HTTP_CONNECTION_APIKEYXKEY, HTTP_CONNECTION_OAUTH2, JWT_SECRET_CONNECTION, TRIGGER_FUNCTION, ON_SCHEDULING_ERROR, ON_SCHEDULED, GPT_PROMPT, CLOSE_HANDOVER, HANDOVER_INACTIVITY_TIMER, GPT_CONVERSATION, GPT_CONVERSATION_SUMMARY, LLM_ENTITY_EXTRACT, AI_AGENT_JOB, AI_AGENT_JOB_DEFAULT, AI_AGENT_JOB_TOOL, AI_AGENT_JOB_MCP_TOOL, AI_AGENT_JOB_CALL_MCP_TOOL, AI_AGENT_TOOL_ANSWER, AI_AGENT_HANDOVER, LIVE_AGENT_CONNECTION, RINGCENTRAL_ENGAGE_CONNECTION, CHATWOOT_CONNECTION, EIGHT_BY_EIGHT_CONNECTION, GENESYS_CLOUD_CONNECTION, GENESYS_CLOUD_CONNECTION_OM, LLM_MODERATE, NICECXONEAAH_AUTHENTICATION_CONNECTION, AIOPS_CENTER_WEBHOOKS_CONNECTION } from "./service";
14
+ import { HANDOVER, HANDOVER_V2, CHECK_AGENT_AVAILABILITY, HTTP_REQUEST, HTTP_CONNECTION_BASIC, HTTP_CONNECTION_APIKEYAUTHKEY, HTTP_CONNECTION_APIKEYXKEY, HTTP_CONNECTION_OAUTH2, JWT_SECRET_CONNECTION, TRIGGER_FUNCTION, ON_SCHEDULING_ERROR, ON_SCHEDULED, GPT_PROMPT, LLM_PROMPT_V2, LLM_PROMPT_DEFAULT, LLM_PROMPT_MCP_TOOL, LLM_PROMPT_TOOL, CLOSE_HANDOVER, HANDOVER_INACTIVITY_TIMER, GPT_CONVERSATION, GPT_CONVERSATION_SUMMARY, LLM_ENTITY_EXTRACT, AI_AGENT_JOB, AI_AGENT_JOB_DEFAULT, AI_AGENT_JOB_TOOL, AI_AGENT_JOB_MCP_TOOL, AI_AGENT_JOB_CALL_MCP_TOOL, AI_AGENT_TOOL_ANSWER, AI_AGENT_HANDOVER, LIVE_AGENT_CONNECTION, RINGCENTRAL_ENGAGE_CONNECTION, CHATWOOT_CONNECTION, EIGHT_BY_EIGHT_CONNECTION, GENESYS_CLOUD_CONNECTION, GENESYS_CLOUD_CONNECTION_OM, LLM_MODERATE, NICECXONEAAH_AUTHENTICATION_CONNECTION, LOAD_AI_AGENT, AIOPS_CENTER_WEBHOOKS_CONNECTION } from "./service";
15
15
  import { INIT_APP_SESSION, GET_APP_SESSION_PIN, SET_HTML_APP_STATE, SET_ADAPTIVE_CARD_APP_STATE, } from "./apps";
16
16
  import { SET_IFRAME_TILE, SET_HTML_TILE, SEND_TILE_DATA, SET_SECURE_FORMS_TILE, SET_ADAPTIVE_CARD_TILE, SET_AGENT_ASSIST_GRID, NEXT_ACTION_ASSIST, SENTIMENT_ASSIST, TRANSCRIPT_ASSIST, IDENTITY_ASSIST, KNOWLEDGE_ASSIST, } from "./agentAssist";
17
17
  import { ASSIST_INFO } from "./liveAgent";
@@ -88,6 +88,10 @@ const nodes = [
88
88
  CHECK_AGENT_AVAILABILITY,
89
89
  HTTP_REQUEST,
90
90
  GPT_PROMPT,
91
+ LLM_PROMPT_V2,
92
+ LLM_PROMPT_DEFAULT,
93
+ LLM_PROMPT_MCP_TOOL,
94
+ LLM_PROMPT_TOOL,
91
95
  GPT_CONVERSATION,
92
96
  GPT_CONVERSATION_SUMMARY,
93
97
  LLM_ENTITY_EXTRACT,
@@ -143,6 +147,7 @@ if (process.env.DISABLE_FEATURE_TRANSCRIPT_MANAGER !== "true") {
143
147
  nodes.push(AI_AGENT_JOB_CALL_MCP_TOOL);
144
148
  nodes.push(AI_AGENT_TOOL_ANSWER);
145
149
  nodes.push(AI_AGENT_HANDOVER);
150
+ nodes.push(LOAD_AI_AGENT);
146
151
  }
147
152
  if (process.env.FEATURE_USE_COGNIGY_LIVE_AGENT === "true") {
148
153
  nodes.push(ASSIST_INFO);
@@ -582,10 +582,12 @@ export const SEARCH_EXTRACT_OUTPUT = createNodeDescriptor({
582
582
  // as it doesn't make sense to check for follow ups in the first execution
583
583
  if (input.execution > 1) {
584
584
  // always remember the last thing the user said (needed later)
585
- lastRoundTrip = (_b = cognigy.lastConversationEntries) === null || _b === void 0 ? void 0 : _b.slice(1, 3).reverse().map(entry => "- " + (entry.source === "user" ? "USER: " : "BOT: ") + entry.text).join("\n");
585
+ lastRoundTrip = (_b = cognigy
586
+ .lastConversationEntries) === null || _b === void 0 ? void 0 : _b.slice(1, followUpDetectionSteps + 1).reverse().map(entry => "- " + (entry.source === "user" ? "USER: " : "BOT: ") + entry.text).join("\n");
586
587
  // if follow up detection is set to 2 or more, we use the conversation transcript
587
588
  // as reference. Start at the second entry, because the first one is the current
588
- const recentConversation = (_c = cognigy.lastConversationEntries) === null || _c === void 0 ? void 0 : _c.slice(1, followUpDetectionSteps + 1).reverse().map(entry => "- " + (entry.source === "user" ? "USER: " : "BOT: ") + entry.text).join("\n");
589
+ const recentConversation = (_c = cognigy
590
+ .lastConversationEntries) === null || _c === void 0 ? void 0 : _c.slice(1, followUpDetectionSteps + 1).reverse().map(entry => "- " + (entry.source === "user" ? "USER: " : "BOT: ") + entry.text).join("\n");
589
591
  prompt = `Below is the transcript of a conversation:
590
592
  ${recentConversation}
591
593
  USER: ${searchString}
@@ -637,10 +639,12 @@ New: `;
637
639
  message: (error === null || error === void 0 ? void 0 : error.message) || error,
638
640
  };
639
641
  api.logDebugError(JSON.stringify(compactError, undefined, 2), "Search Extract Output: Error");
640
- api.emitToOpsCenter({
641
- subComponent: "KnowledgeAIQueries",
642
- title: error === null || error === void 0 ? void 0 : error.message
643
- });
642
+ if (!(error instanceof InternalServerError)) {
643
+ api.emitToOpsCenter({
644
+ subComponent: "KnowledgeAIQueries",
645
+ title: error === null || error === void 0 ? void 0 : error.message
646
+ });
647
+ }
644
648
  if ((_m = error === null || error === void 0 ? void 0 : error.originalErrorDetails) === null || _m === void 0 ? void 0 : _m.code) {
645
649
  compactError["code"] = error.originalErrorDetails.code;
646
650
  }
@@ -307,7 +307,7 @@ export const QUESTION = createNodeDescriptor({
307
307
  },
308
308
  {
309
309
  key: "repromptLLMPrompt",
310
- type: "cognigyText",
310
+ type: "cognigyLLMText",
311
311
  label: "UI__NODE_EDITOR__MESSAGE__QUESTION__QUESTION__FIELDS__REPROMPT_LLM_PROMPT__LABEL",
312
312
  description: "UI__NODE_EDITOR__MESSAGE__QUESTION__QUESTION__FIELDS__REPROMPT_LLM_PROMPT__DESCRIPTION",
313
313
  defaultValue: `You are a chatbot that helps a user.
@@ -740,6 +740,17 @@ DO NOT talk about other topics. Do not offer general assistance.`,
740
740
  },
741
741
  defaultValue: true,
742
742
  },
743
+ {
744
+ key: "escalateAnswersRepeatHandoverMessage",
745
+ type: "toggle",
746
+ label: "UI__NODE_EDITOR__MESSAGE__QUESTION__QUESTION__FIELDS__ESCALATE_ANSWERS_REPEAT_HANDOVER_MESSAGE__LABEL",
747
+ description: "UI__NODE_EDITOR__MESSAGE__QUESTION__QUESTION__FIELDS__ESCALATE_ANSWERS_REPEAT_HANDOVER_MESSAGE__DESCRIPTION",
748
+ defaultValue: false,
749
+ condition: {
750
+ key: "escalateAnswersAction",
751
+ value: "handover"
752
+ }
753
+ },
743
754
  {
744
755
  key: "escalateAnswersHandoverCancelIntent",
745
756
  type: "cognigyText",
@@ -125,8 +125,9 @@ export const createLastUserInputString = (lastConversationEntries, turnLimit = 1
125
125
  * @param debugLogTokenCount whether to log the token count
126
126
  * @param debugLogRequestAndCompletion whether to log the request and completion
127
127
  * @param cognigy the cognigy object (input, api, etc)
128
+ * @param nodeType the type of the node (optional)
128
129
  */
129
- export const writeLLMDebugLogs = (label, prompt, response, debugLogTokenCount, debugLogRequestAndCompletion, cognigy) => __awaiter(void 0, void 0, void 0, function* () {
130
+ export const writeLLMDebugLogs = (label, prompt, response, debugLogTokenCount, debugLogRequestAndCompletion, cognigy, nodeType) => __awaiter(void 0, void 0, void 0, function* () {
130
131
  var _a, _b, _c, _d;
131
132
  const { api, input } = cognigy;
132
133
  if (input.endpointType !== "adminconsole" && !api.getMetadata().isFollowSessionActive) {
@@ -143,14 +144,21 @@ export const writeLLMDebugLogs = (label, prompt, response, debugLogTokenCount, d
143
144
  if (debugLogTokenCount) {
144
145
  if (prompt) {
145
146
  const requestTokens = (_a = response === null || response === void 0 ? void 0 : response.tokenUsage) === null || _a === void 0 ? void 0 : _a.inputTokens;
146
- requestTokenMessage = ` (${requestTokens} Tokens)`;
147
+ requestTokenMessage = ` (${nodeType === "llmPromptV2" ? "UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__FULL_REQUEST: " : ""}${requestTokens} Tokens)`;
147
148
  }
148
149
  if (response) {
149
150
  const completionTokens = (_b = response === null || response === void 0 ? void 0 : response.tokenUsage) === null || _b === void 0 ? void 0 : _b.outputTokens;
150
151
  completionTokenMessage = ` (${completionTokens} Tokens)`;
151
152
  }
152
153
  }
153
- api.logDebugMessage(`UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__REQUEST${requestTokenMessage}:<br>${prompt}<br><br>UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__COMPLETION${completionTokenMessage}:<br>${responseOutputFormatted}`, "UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__HEADER");
154
+ let inputLabelKey = "UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__REQUEST";
155
+ let headerKey = "UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__HEADER";
156
+ if (nodeType === "llmPromptV2") {
157
+ inputLabelKey = "UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__SYSTEM_PROMPT";
158
+ headerKey = "UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__HEADER_WITH_SYSTEM_PROMPT";
159
+ }
160
+ ;
161
+ api.logDebugMessage(`${inputLabelKey}${requestTokenMessage}:<br>${prompt}<br><br>UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__COMPLETION${completionTokenMessage}:<br>${responseOutputFormatted}`, headerKey);
154
162
  }
155
163
  catch (err) { }
156
164
  }
@@ -5,9 +5,10 @@ import { GO_TO } from "../logic";
5
5
  import { randomUUID } from 'crypto';
6
6
  import { createLastConverationString, createLastConversationChatObject, createLastUserInputString, writeLLMDebugLogs } from "../nlu/generativeSlotFiller/prompt";
7
7
  import { InternalServerError } from "../../../errors";
8
+ import { TranscriptEntryType, TranscriptRole } from "../../../interfaces/transcripts/transcripts";
8
9
  export const GPT_PROMPT = createNodeDescriptor({
9
10
  type: "completeText",
10
- defaultLabel: "LLM Prompt",
11
+ defaultLabel: "LLM Prompt (legacy)",
11
12
  summary: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__DESCRIPTION",
12
13
  fields: [
13
14
  {
@@ -584,6 +585,12 @@ export const GPT_PROMPT = createNodeDescriptor({
584
585
  const errorResponse = {
585
586
  error: compactError,
586
587
  };
588
+ if (!(error instanceof InternalServerError)) {
589
+ api.emitToOpsCenter({
590
+ subComponent: "LargeLanguageModelCalls",
591
+ title: error === null || error === void 0 ? void 0 : error.message
592
+ });
593
+ }
587
594
  // add error to context or input
588
595
  switch (storeLocation) {
589
596
  case "context":
@@ -643,6 +650,7 @@ export const GPT_PROMPT = createNodeDescriptor({
643
650
  api.output(text, {
644
651
  _cognigy: {
645
652
  _messageId,
653
+ _preventTranscript: true
646
654
  }
647
655
  });
648
656
  }
@@ -700,6 +708,18 @@ export const GPT_PROMPT = createNodeDescriptor({
700
708
  const resultToOutput = typeof ((response === null || response === void 0 ? void 0 : response.result) || response) === "object" ? JSON.stringify((response === null || response === void 0 ? void 0 : response.result) || response, undefined, 2) : (response === null || response === void 0 ? void 0 : response.result) || response;
701
709
  yield api.output(resultToOutput, null);
702
710
  }
711
+ else if (storeLocation === "stream") {
712
+ const transcriptContent = {
713
+ role: TranscriptRole.ASSISTANT,
714
+ type: TranscriptEntryType.OUTPUT,
715
+ source: "assistant",
716
+ payload: {
717
+ text: ((response === null || response === void 0 ? void 0 : response.result) || response),
718
+ data: {},
719
+ }
720
+ };
721
+ yield api.addTranscriptStep(transcriptContent);
722
+ }
703
723
  if (storeLocation === "stream" && responseToStore.finishReason) {
704
724
  // send the finishReason as last output for a stream
705
725
  (_a = api.output) === null || _a === void 0 ? void 0 : _a.call(api, "", {