@cognigy/rest-api-client 0.15.0 → 0.16.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +11 -0
- package/build/GenericTusFn.js +64 -0
- package/build/RestAPIClient.js +3 -0
- package/build/apigroups/AdministrationAPIGroup_2_0.js +7 -2
- package/build/apigroups/ResourcesAPIGroup_2_0.js +28 -23
- package/build/{shared/interfaces/agentAssist/ISendTileUpdateReferenceParams.js → apigroups/TTusAPIOperation.js} +1 -1
- package/build/apigroups/index.js +2 -0
- package/build/connector/AxiosAdapter.js +5 -4
- package/build/shared/charts/descriptors/agentAssist/constants/constants.js +7 -0
- package/build/shared/charts/descriptors/agentAssist/helpers/sentiment.helper.js +41 -0
- package/build/shared/charts/descriptors/agentAssist/htmlTemplates/identityAssistTemplate.js +54 -0
- package/build/shared/charts/descriptors/agentAssist/htmlTemplates/knowledgeAssistTemplate.js +187 -0
- package/build/shared/charts/descriptors/agentAssist/htmlTemplates/nextActionWidgetTemplate.js +93 -0
- package/build/shared/charts/descriptors/agentAssist/htmlTemplates/secureForms/stage0.js +81 -0
- package/build/shared/charts/descriptors/agentAssist/htmlTemplates/secureForms/stage1.js +84 -0
- package/build/shared/charts/descriptors/agentAssist/htmlTemplates/secureForms/stage2.js +97 -0
- package/build/shared/charts/descriptors/agentAssist/htmlTemplates/secureForms/stageError.js +85 -0
- package/build/shared/charts/descriptors/agentAssist/htmlTemplates/sentimentAnalysisTemplate.js +41 -0
- package/build/shared/charts/descriptors/agentAssist/htmlTemplates/stylesPartial.js +15 -0
- package/build/shared/charts/descriptors/agentAssist/htmlTemplates/transcriptAssistTemplate.js +53 -0
- package/build/shared/charts/descriptors/agentAssist/identityAssist.js +135 -0
- package/build/shared/charts/descriptors/agentAssist/index.js +15 -1
- package/build/shared/charts/descriptors/agentAssist/knowledgeAssist.js +625 -0
- package/build/shared/charts/descriptors/agentAssist/nextActionAssist.js +77 -0
- package/build/shared/charts/descriptors/agentAssist/sentimentAssist.js +86 -0
- package/build/shared/charts/descriptors/agentAssist/setAdaptiveCardTile.js +2 -2
- package/build/shared/charts/descriptors/agentAssist/setAgentAssistGrid.js +105 -0
- package/build/shared/charts/descriptors/agentAssist/setHtmlTile.js +2 -2
- package/build/shared/charts/descriptors/agentAssist/setIframeTile.js +2 -2
- package/build/shared/charts/descriptors/agentAssist/setSecureFormsTile.js +171 -0
- package/build/shared/charts/descriptors/agentAssist/transcriptAssist.js +72 -0
- package/build/shared/charts/descriptors/allFields.js +6 -0
- package/build/shared/charts/descriptors/connectionNodes/generativeAIProviders/TGenerativeAIConnectionFields.js +3 -0
- package/build/shared/charts/descriptors/connectionNodes/generativeAIProviders/alephAlphaProviderConnection.js +11 -0
- package/build/shared/charts/descriptors/connectionNodes/generativeAIProviders/anthropicProviderConnection.js +1 -1
- package/build/shared/charts/descriptors/connectionNodes/generativeAIProviders/azureOpenAIProviderConnectionV2.js +1 -1
- package/build/shared/charts/descriptors/connectionNodes/generativeAIProviders/index.js +6 -2
- package/build/shared/charts/descriptors/connectionNodes/generativeAIProviders/openAIProviderConnection.js +1 -1
- package/build/shared/charts/descriptors/connectionNodes/internalStorageProviders/amazonStorageProviderConnection.js +4 -4
- package/build/shared/charts/descriptors/connectionNodes/internalStorageProviders/azureBlobStorageProviderConnection.js +3 -3
- package/build/shared/charts/descriptors/connectionNodes/internalStorageProviders/googleCloudStorageProviderConnection.js +3 -3
- package/build/shared/charts/descriptors/connectionNodes/mongoDB/mongoDBConnection.js +1 -1
- package/build/shared/charts/descriptors/connectionNodes/smtp/oAuth2Connection.js +8 -8
- package/build/shared/charts/descriptors/connectionNodes/smtp/serviceConnection.js +2 -2
- package/build/shared/charts/descriptors/connectionNodes/smtp/smtpConnection.js +5 -5
- package/build/shared/charts/descriptors/connectionNodes/speechProviders/awsSpeechProviderConnection.js +4 -4
- package/build/shared/charts/descriptors/connectionNodes/speechProviders/microsoftSpeechProviderConnection.js +2 -2
- package/build/shared/charts/descriptors/connectionNodes/sql/sqlConnection.js +5 -5
- package/build/shared/charts/descriptors/connectionNodes/translationProviders/deeplTranslationProviderConnection.js +15 -0
- package/build/shared/charts/descriptors/connectionNodes/translationProviders/googleTranslationProviderConnection.js +15 -0
- package/build/shared/charts/descriptors/connectionNodes/translationProviders/index.js +23 -0
- package/build/shared/charts/descriptors/connectionNodes/translationProviders/microsoftTranslationProviderConnection.js +21 -0
- package/build/shared/charts/descriptors/index.js +13 -2
- package/build/shared/charts/descriptors/knowledgeSearch/constants/constants.js +6 -0
- package/build/shared/charts/descriptors/knowledgeSearch/index.js +5 -1
- package/build/shared/charts/descriptors/knowledgeSearch/knowledgeSearch.js +7 -2
- package/build/shared/charts/descriptors/knowledgeSearch/knowledgeSearchV2.js +151 -0
- package/build/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +985 -0
- package/build/shared/charts/descriptors/logic/goTo.js +3 -1
- package/build/shared/charts/descriptors/message/question/optionalQuestion.js +2 -2
- package/build/shared/charts/descriptors/message/question/question.js +215 -8
- package/build/shared/charts/descriptors/nlu/cleanText.js +1 -0
- package/build/shared/charts/descriptors/nlu/generativeSlotFiller/prompt.js +103 -5
- package/build/shared/charts/descriptors/service/GPTConversation.js +125 -23
- package/build/shared/charts/descriptors/service/GPTPrompt.js +148 -12
- package/build/shared/charts/descriptors/service/checkAgentAvailability.js +15 -1
- package/build/shared/charts/descriptors/service/handoverV2.js +45 -0
- package/build/shared/{interfaces/twilioInterface.js → charts/descriptors/voice/interface/IBandwidth.js} +1 -1
- package/build/shared/charts/descriptors/voice/mappers/base.mapper.js +14 -0
- package/build/shared/charts/descriptors/voice/mappers/hangup.mapper.js +26 -0
- package/build/shared/charts/descriptors/voice/mappers/muteSpeechInput.mapper.js +17 -2
- package/build/shared/charts/descriptors/voice/mappers/play.mapper.js +54 -0
- package/build/shared/charts/descriptors/voice/mappers/record.mapper.js +7 -0
- package/build/shared/charts/descriptors/voice/mappers/sendMetadata.mapper.js +15 -0
- package/build/shared/charts/descriptors/voice/mappers/setSessionConfig.mapper.js +38 -8
- package/build/shared/charts/descriptors/voice/mappers/transfer.mapper.js +31 -9
- package/build/shared/charts/descriptors/voice/nodes/muteSpeechInput.js +1 -1
- package/build/shared/charts/descriptors/voice/nodes/play.js +1 -1
- package/build/shared/charts/descriptors/voice/nodes/sessionSpeechParameters.js +118 -4
- package/build/shared/charts/descriptors/voice/nodes/transfer.js +11 -3
- package/build/shared/charts/descriptors/voice/utils/vgConstants.js +14 -0
- package/build/shared/charts/descriptors/voicegateway/index.js +14 -11
- package/build/shared/charts/descriptors/voicegateway/utils/paramUtils.js +1 -5
- package/build/shared/charts/descriptors/voicegateway2/nodes/muteSpeechInput.js +12 -1
- package/build/shared/charts/descriptors/voicegateway2/nodes/play.js +6 -0
- package/build/shared/charts/descriptors/voicegateway2/nodes/setSessionConfig.js +158 -10
- package/build/shared/charts/descriptors/voicegateway2/nodes/transfer.js +296 -43
- package/build/shared/charts/descriptors/voicegateway2/utils/helper.js +30 -4
- package/build/shared/charts/helpers/generativeAI/rephraseSentenceWithAi.js +1 -1
- package/build/shared/constants.js +5 -1
- package/build/shared/handoverClients/interfaces/THandoverEventType.js +3 -1
- package/build/shared/helper/BaseContext.js +2 -15
- package/build/shared/helper/nlu/textCleaner.js +25 -3
- package/build/shared/interfaces/IEndpointTranslationSettings.js +9 -0
- package/build/shared/interfaces/IOrganisation.js +2 -1
- package/build/shared/interfaces/agentAssist/ISendConfigUpdateParams.js +3 -0
- package/build/shared/interfaces/agentAssist/ISendUpdateReferenceParams.js +3 -0
- package/build/shared/interfaces/amqpInterface.js +18 -22
- package/build/shared/interfaces/channels/genesysBotConnector.js +21 -0
- package/build/shared/interfaces/channels/niceCXOne.js +8 -0
- package/build/shared/interfaces/generativeAI/IGenerativeAIModels.js +15 -7
- package/build/shared/interfaces/handover.js +57 -3
- package/build/shared/interfaces/journeys/IJourney.js +6 -0
- package/build/shared/interfaces/journeys/IJourneyTrackEvent.js +2 -0
- package/build/shared/interfaces/messageAPI/endpoints.js +91 -8
- package/build/shared/interfaces/resources/IAuditEvent.js +1 -0
- package/build/shared/interfaces/resources/IConnectionSchema.js +3 -0
- package/build/shared/interfaces/resources/IEndpoint.js +1 -0
- package/build/shared/interfaces/resources/IExtension.js +3 -0
- package/build/shared/interfaces/resources/ILargeLanguageModel.js +24 -5
- package/build/shared/interfaces/resources/INodeDescriptorSet.js +23 -1
- package/build/shared/interfaces/resources/TNLUConnectorType.js +0 -1
- package/build/shared/interfaces/resources/TRestChannelType.js +16 -11
- package/build/shared/interfaces/resources/TWebhookChannelType.js +6 -1
- package/build/shared/interfaces/resources/knowledgeStore/IKnowledgeChunk.js +21 -3
- package/build/shared/interfaces/resources/knowledgeStore/IKnowledgeSource.js +13 -2
- package/build/shared/interfaces/resources/knowledgeStore/IKnowledgeStore.js +1 -1
- package/build/shared/interfaces/resources/settings/IAgentSettings.js +3 -23
- package/build/shared/interfaces/resources/settings/IGenerativeAISettings.js +4 -0
- package/build/shared/interfaces/resources/settings/ITranslationSettings.js +63 -0
- package/build/shared/interfaces/resources/settings/index.js +4 -1
- package/build/shared/interfaces/restAPI/administration/voiceGateway/v2.0/IReadVoiceGatewayAccountRest_2_0.js +3 -0
- package/build/shared/interfaces/restAPI/administration/voiceGateway/v2.0/ISetupVoiceGatewayAccountRest_2_0.js +3 -0
- package/build/shared/interfaces/restAPI/administration/voiceGateway/v2.0/index.js +3 -0
- package/build/shared/interfaces/restAPI/metrics/knowledgeQueryCounter/v2.0/IGetKnowledgeQueryCounterOrganisationRest_2_0.js +3 -0
- package/build/shared/interfaces/restAPI/metrics/knowledgeQueryCounter/v2.0/IGetKnowledgeQueryCounterRest_2_0.js +3 -0
- package/build/shared/interfaces/restAPI/metrics/knowledgeQueryCounter/v2.0/IKnowlegeQueryCounterAggregatedValue_2_0.js +3 -0
- package/build/shared/interfaces/restAPI/metrics/knowledgeQueryCounter/v2.0/index.js +3 -0
- package/build/shared/interfaces/restAPI/resources/nluconnector/v2.0/TNLUConnectorType_2_0.js +0 -2
- package/build/shared/interfaces/restAPI/resources/uploadResumable/v2.0/IUploadResumableRest_2_0.js +17 -0
- package/build/shared/interfaces/restAPI/resources/uploadResumable/v2.0/index.js +3 -0
- package/build/shared/interfaces/security/IKnowledgeQueryCounterAggregatedValue.js +3 -0
- package/build/shared/interfaces/security/IPermission.js +2 -0
- package/build/shared/interfaces/security/IRole.js +3 -1
- package/build/test.js +27 -0
- package/package.json +19 -17
- package/types/index.d.ts +581 -129
|
@@ -135,9 +135,11 @@ exports.GO_TO = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
135
135
|
if (!api.checkThink(thisNodeId)) {
|
|
136
136
|
api.resetNextNodes();
|
|
137
137
|
api.setThinkMarker(config.flowNode.flow);
|
|
138
|
+
// We always set the next node, not only if executionMode is "wait",
|
|
139
|
+
// otherwise we would not go to the new flow if a default reply is triggered without continue after setting
|
|
140
|
+
await api.goToNode(config.flowNode);
|
|
138
141
|
// Check if execution is to wait at Node for Input
|
|
139
142
|
if (executionMode === "wait") {
|
|
140
|
-
await api.goToNode(config.flowNode);
|
|
141
143
|
return;
|
|
142
144
|
}
|
|
143
145
|
if (injectedText) {
|
|
@@ -333,12 +333,12 @@ exports.OPTIONAL_QUESTION = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
333
333
|
generativeAI_temperature: config.generativeAI_temperature,
|
|
334
334
|
generativeAI_amountOfLastUserInputs: config.generativeAI_amountOfLastUserInputs,
|
|
335
335
|
generativeAI_customInputs: config.generativeAI_customInputs,
|
|
336
|
-
|
|
336
|
+
promptType: "question",
|
|
337
337
|
questionType: config.type,
|
|
338
338
|
question: undefined,
|
|
339
339
|
answer: undefined,
|
|
340
340
|
};
|
|
341
|
-
rephraseWithAIParams.
|
|
341
|
+
rephraseWithAIParams.promptType = "question";
|
|
342
342
|
rephraseWithAIParams.questionType = config.type;
|
|
343
343
|
await say_1.SAY.function({ cognigy, config: Object.assign({ handoverOutput, say }, rephraseWithAIParams), childConfigs: [], nodeId, organisationId });
|
|
344
344
|
if (onQuestionChild) {
|
|
@@ -52,6 +52,7 @@ const getRephraseWithAIFields_1 = require("../../../helpers/generativeAI/getReph
|
|
|
52
52
|
const cleanTextUtils_1 = require("./utils/cleanTextUtils");
|
|
53
53
|
const textCleaner_1 = require("../../../../helper/nlu/textCleaner");
|
|
54
54
|
const constants_1 = require("../../../../constants");
|
|
55
|
+
const prompt_1 = require("../../nlu/generativeSlotFiller/prompt");
|
|
55
56
|
var QuestionTypes;
|
|
56
57
|
(function (QuestionTypes) {
|
|
57
58
|
QuestionTypes[QuestionTypes["date"] = 0] = "date";
|
|
@@ -220,6 +221,25 @@ exports.QUESTION = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
220
221
|
defaultValue: "Not sure I understood this correctly.",
|
|
221
222
|
params: {
|
|
222
223
|
multiline: true
|
|
224
|
+
},
|
|
225
|
+
condition: {
|
|
226
|
+
and: [
|
|
227
|
+
{
|
|
228
|
+
key: "repromptType",
|
|
229
|
+
value: "say",
|
|
230
|
+
negate: true
|
|
231
|
+
},
|
|
232
|
+
{
|
|
233
|
+
key: "repromptType",
|
|
234
|
+
value: "llm",
|
|
235
|
+
negate: true
|
|
236
|
+
},
|
|
237
|
+
{
|
|
238
|
+
key: "repromptType",
|
|
239
|
+
value: "execute",
|
|
240
|
+
negate: true
|
|
241
|
+
}
|
|
242
|
+
]
|
|
223
243
|
}
|
|
224
244
|
},
|
|
225
245
|
{
|
|
@@ -229,6 +249,133 @@ exports.QUESTION = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
229
249
|
description: "UI__NODE_EDITOR__MESSAGE__QUESTION__QUESTION__FIELDS__REPROMPT_CONDITION__DESCRIPTION",
|
|
230
250
|
defaultValue: ""
|
|
231
251
|
},
|
|
252
|
+
{
|
|
253
|
+
key: "repromptType",
|
|
254
|
+
type: "select",
|
|
255
|
+
label: "UI__NODE_EDITOR__MESSAGE__QUESTION__QUESTION__FIELDS__REPROMPT_REPROMPT_TYPE__LABEL",
|
|
256
|
+
defaultValue: "text",
|
|
257
|
+
params: {
|
|
258
|
+
options: [
|
|
259
|
+
{
|
|
260
|
+
label: "UI__NODE_EDITOR__MESSAGE__QUESTION__QUESTION__FIELDS__REPROMPT_REPROMPT_TYPE__OPTION_TEXT",
|
|
261
|
+
value: "text"
|
|
262
|
+
},
|
|
263
|
+
{
|
|
264
|
+
label: "UI__NODE_EDITOR__MESSAGE__QUESTION__QUESTION__FIELDS__REPROMPT_REPROMPT_TYPE__OPTION_SAY",
|
|
265
|
+
value: "say"
|
|
266
|
+
},
|
|
267
|
+
{
|
|
268
|
+
label: "UI__NODE_EDITOR__MESSAGE__QUESTION__QUESTION__FIELDS__REPROMPT_REPROMPT_TYPE__OPTION_LLM",
|
|
269
|
+
value: "llm"
|
|
270
|
+
},
|
|
271
|
+
{
|
|
272
|
+
label: "UI__NODE_EDITOR__MESSAGE__QUESTION__QUESTION__FIELDS__REPROMPT_REPROMPT_TYPE__OPTION_EXECUTE",
|
|
273
|
+
value: "execute"
|
|
274
|
+
},
|
|
275
|
+
]
|
|
276
|
+
}
|
|
277
|
+
},
|
|
278
|
+
{
|
|
279
|
+
key: "repromptSay",
|
|
280
|
+
type: "say",
|
|
281
|
+
label: "UI__NODE_EDITOR__MESSAGE__QUESTION__QUESTION__FIELDS__REPROMPT_SAY__LABEL",
|
|
282
|
+
defaultValue: {
|
|
283
|
+
data: "{}",
|
|
284
|
+
type: "text",
|
|
285
|
+
linear: false,
|
|
286
|
+
loop: false,
|
|
287
|
+
text: ["Sorry, I didn't understand that correctly."],
|
|
288
|
+
_cognigy: {},
|
|
289
|
+
},
|
|
290
|
+
condition: {
|
|
291
|
+
key: "repromptType",
|
|
292
|
+
value: "say",
|
|
293
|
+
}
|
|
294
|
+
},
|
|
295
|
+
{
|
|
296
|
+
key: "repromptLLMProvider",
|
|
297
|
+
type: "llmSelect",
|
|
298
|
+
label: "UI__NODE_EDITOR__MESSAGE__QUESTION__QUESTION__FIELDS__REPROMPT_LLM_SELECT__LABEL",
|
|
299
|
+
defaultValue: "default",
|
|
300
|
+
condition: {
|
|
301
|
+
key: "repromptType",
|
|
302
|
+
value: "llm",
|
|
303
|
+
}
|
|
304
|
+
},
|
|
305
|
+
{
|
|
306
|
+
key: "repromptLLMPrompt",
|
|
307
|
+
type: "cognigyText",
|
|
308
|
+
label: "UI__NODE_EDITOR__MESSAGE__QUESTION__QUESTION__FIELDS__REPROMPT_LLM_PROMPT__LABEL",
|
|
309
|
+
description: "UI__NODE_EDITOR__MESSAGE__QUESTION__QUESTION__FIELDS__REPROMPT_LLM_PROMPT__DESCRIPTION",
|
|
310
|
+
defaultValue: `You are a chatbot that helps a user.
|
|
311
|
+
You asked the user for INSERT_ASK (e.g. "their customer number") and an incorrect answer was given.
|
|
312
|
+
You now need to guide the user back in a polite way and ask for INSERT_ASK (e.g. "their customer number") again.
|
|
313
|
+
DO NOT talk about other topics. Do not offer general assistance.`,
|
|
314
|
+
params: {
|
|
315
|
+
multiline: true
|
|
316
|
+
},
|
|
317
|
+
condition: {
|
|
318
|
+
key: "repromptType",
|
|
319
|
+
value: "llm",
|
|
320
|
+
}
|
|
321
|
+
},
|
|
322
|
+
{
|
|
323
|
+
key: "repromptLLMTurns",
|
|
324
|
+
label: "UI__NODE_EDITOR__SERVICE__QUESTION__QUESTION__FIELDS__TRANSCRIPT_STEPS__LABEL",
|
|
325
|
+
type: "slider",
|
|
326
|
+
description: "UI__NODE_EDITOR__SERVICE__QUESTION__QUESTION__FIELDS__TRANSCRIPT_STEPS__DESCRIPTION",
|
|
327
|
+
defaultValue: 3,
|
|
328
|
+
params: {
|
|
329
|
+
min: 1,
|
|
330
|
+
max: 10,
|
|
331
|
+
step: 1
|
|
332
|
+
},
|
|
333
|
+
condition: {
|
|
334
|
+
key: "repromptType",
|
|
335
|
+
value: "llm",
|
|
336
|
+
}
|
|
337
|
+
},
|
|
338
|
+
{
|
|
339
|
+
key: "repromptFlowNode",
|
|
340
|
+
type: "flowNode",
|
|
341
|
+
label: "UI__NODE_EDITOR__EXECUTE_FLOW__FLOW_NODE__LABEL",
|
|
342
|
+
condition: {
|
|
343
|
+
key: "repromptType",
|
|
344
|
+
value: "execute",
|
|
345
|
+
}
|
|
346
|
+
},
|
|
347
|
+
{
|
|
348
|
+
key: "repromptParseIntents",
|
|
349
|
+
type: "toggle",
|
|
350
|
+
label: "UI__NODE_EDITOR__EXECUTE_FLOW__PARSE_INTENTS__LABEL",
|
|
351
|
+
description: "UI__NODE_EDITOR__EXECUTE_FLOW__PARSE_INTENTS__DESCRIPTION",
|
|
352
|
+
defaultValue: true,
|
|
353
|
+
condition: {
|
|
354
|
+
key: "repromptType",
|
|
355
|
+
value: "execute",
|
|
356
|
+
}
|
|
357
|
+
},
|
|
358
|
+
{
|
|
359
|
+
key: "repromptParseKeyphrases",
|
|
360
|
+
type: "toggle",
|
|
361
|
+
label: "UI__NODE_EDITOR__EXECUTE_FLOW__PARSE_KEYPHRASES__LABEL",
|
|
362
|
+
description: "UI__NODE_EDITOR__EXECUTE_FLOW__PARSE_KEYPHRASES__DESCRIPTION",
|
|
363
|
+
defaultValue: true,
|
|
364
|
+
condition: {
|
|
365
|
+
key: "repromptType",
|
|
366
|
+
value: "execute",
|
|
367
|
+
}
|
|
368
|
+
},
|
|
369
|
+
{
|
|
370
|
+
key: "repromptAbsorbContext",
|
|
371
|
+
type: "toggle",
|
|
372
|
+
label: "UI__NODE_EDITOR__EXECUTE_FLOW__ABSORB_CONTEXT__LABEL",
|
|
373
|
+
description: "UI__NODE_EDITOR__EXECUTE_FLOW__ABSORB_CONTEXT__DESCRIPTION",
|
|
374
|
+
condition: {
|
|
375
|
+
key: "repromptType",
|
|
376
|
+
value: "execute",
|
|
377
|
+
}
|
|
378
|
+
},
|
|
232
379
|
{
|
|
233
380
|
key: "validationRepeat",
|
|
234
381
|
type: "toggle",
|
|
@@ -1356,6 +1503,15 @@ exports.QUESTION = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
1356
1503
|
label: "UI__NODE_EDITOR__MESSAGE__QUESTION__QUESTION__SECTIONS__REPROMPT__LABEL",
|
|
1357
1504
|
defaultCollapsed: true,
|
|
1358
1505
|
fields: [
|
|
1506
|
+
"repromptType",
|
|
1507
|
+
"repromptLLMProvider",
|
|
1508
|
+
"repromptLLMPrompt",
|
|
1509
|
+
"repromptLLMTurns",
|
|
1510
|
+
"repromptSay",
|
|
1511
|
+
"repromptFlowNode",
|
|
1512
|
+
"repromptParseIntents",
|
|
1513
|
+
"repromptParseKeyphrases",
|
|
1514
|
+
"repromptAbsorbContext",
|
|
1359
1515
|
"validationMessage",
|
|
1360
1516
|
"validationRepeat",
|
|
1361
1517
|
"repromptCondition",
|
|
@@ -1534,14 +1690,14 @@ exports.QUESTION = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
1534
1690
|
//#endregion DescriptorFields
|
|
1535
1691
|
function: async ({ cognigy, nodeId, organisationId, config, inputOptions }) => {
|
|
1536
1692
|
var _a, _b, _c;
|
|
1537
|
-
const { say, type, validationMessage, validationRepeat, storeResultInContext, contextKey, storeInContactProfile, profileKey, storeDetailedResults, parseResultOnEntry, repromptCondition, maxExecutionDiff, resultLocation, skipRepromptOnIntent, onlyAcceptEscalationIntents, escalateAnswersAction, escalateAnswersThreshold, escalateAnswersGotoTarget, escalateAnswersExecuteTarget, escalateAnswersGotoExecutionMode, escalateAnswersInjectedText, escalateAnswersInjectedData, escalateAnswersMessage, escalateAnswersRepromptPrevention, escalateAnswersOnce, escalateAnswersHandoverText, escalateAnswersRepeatHandoverMessage, escalateAnswersHandoverCancelIntent, escalateAnswersHandoverQuickReply, escalateAnswersHandoverChatwootInboxId, escalateAnswersHandoverLiveAgentInboxId, escalateAnswersHandoverAdditionalCategoryIds, escalateAnswersHandoverSendTranscriptAsFirstMessage, escalateAnswersHandoverSalesforcePrechatEntities, escalateAnswersHandoverSalesforcePrechatDetails, escalateAnswersHandoverGenesysLanguage, escalateAnswersHandoverGenesysSkills, escalateAnswersHandoverGenesysPriority, escalateAnswersHandoverGenesysCustomAttributes, escalateAnswersHandoverEightByEightChannelId, escalateAnswersHandoverEightByEightQueueId, escalateAnswersHandoverEightByEightJSONProps, escalateAnswersHandoverSendResolveEvent, escalateAnswersHandoverResolveBehavior, escalateAnswersAgentAssistInitMessage, escalateAnswersAllowAgentInject, escalateAnswersSendOnActiveEvent, escalateAnswersSendOnQueueEvent, escalateIntentsAction, escalateIntentsValidIntents, escalateIntentsThreshold, escalateIntentsGotoTarget, escalateIntentsExecuteTarget, escalateIntentsGotoExecutionMode, escalateIntentsInjectedText, escalateIntentsInjectedData, escalateIntentsMessage, escalateIntentsHandoverText, escalateIntentsRepeatHandoverMessage, escalateIntentsHandoverCancelIntent, escalateIntentsHandoverQuickReply, escalateIntentsHandoverChatwootInboxId, escalateIntentsHandoverLiveAgentInboxId, escalateIntentsHandoverAdditionalCategoryIds, escalateIntentHandoverSendTranscriptAsFirstMessage, escalateIntentsHandoverSalesforcePrechatEntities, escalateIntentsHandoverSalesforcePrechatDetails, escalateIntentsHandoverGenesysLanguage, escalateIntentsHandoverGenesysSkills, escalateIntentsHandoverGenesysPriority, escalateIntentsHandoverGenesysCustomAttributes, escalateIntentsHandoverEightByEightChannelId, escalateIntentsHandoverEightByEightQueueId, escalateIntentsHandoverEightByEightJSONProps, escalateIntentsRepromptPrevention, escalateIntentsHandoverSendResolveEvent, escalateIntentsHandoverResolveBehavior, escalateIntentsAgentAssistInitMessage, escalateIntentsAllowAgentInject, escalateIntentsSendOnActiveEvent, escalateIntentsSendOnQueueEvent, reconfirmationBehaviour, reconfirmationQuestion, reconfirmationQuestionReprompt, handoverOutput, cleanTextLocale, cleanDisallowedSymbols, additionalAllowedCharacters, additionalSpecialPhrases, resolveSpelledOutNumbers, resolvePhoneticAlphabet, additionalPhoneticAlphabet, replaceSpecialWords, additionalMappedSymbols, resolveSpelledOutAlphabet, resolvePhoneticCounters, contractSingleCharacters, contractNumberGroups, trimResult, runNLUAfterCleaning, overwrittenBaseAnswer } = config;
|
|
1693
|
+
const { say, type, validationMessage, repromptLLMProvider, repromptType, repromptLLMPrompt, repromptLLMTurns, repromptSay, repromptFlowNode, repromptParseIntents, repromptParseKeyphrases, repromptAbsorbContext, validationRepeat, storeResultInContext, contextKey, storeInContactProfile, profileKey, storeDetailedResults, parseResultOnEntry, repromptCondition, maxExecutionDiff, resultLocation, skipRepromptOnIntent, onlyAcceptEscalationIntents, escalateAnswersAction, escalateAnswersThreshold, escalateAnswersGotoTarget, escalateAnswersExecuteTarget, escalateAnswersGotoExecutionMode, escalateAnswersInjectedText, escalateAnswersInjectedData, escalateAnswersMessage, escalateAnswersRepromptPrevention, escalateAnswersOnce, escalateAnswersHandoverText, escalateAnswersRepeatHandoverMessage, escalateAnswersHandoverCancelIntent, escalateAnswersHandoverQuickReply, escalateAnswersHandoverChatwootInboxId, escalateAnswersHandoverLiveAgentInboxId, escalateAnswersHandoverAdditionalCategoryIds, escalateAnswersHandoverSendTranscriptAsFirstMessage, escalateAnswersHandoverSalesforcePrechatEntities, escalateAnswersHandoverSalesforcePrechatDetails, escalateAnswersHandoverGenesysLanguage, escalateAnswersHandoverGenesysSkills, escalateAnswersHandoverGenesysPriority, escalateAnswersHandoverGenesysCustomAttributes, escalateAnswersHandoverEightByEightChannelId, escalateAnswersHandoverEightByEightQueueId, escalateAnswersHandoverEightByEightJSONProps, escalateAnswersHandoverSendResolveEvent, escalateAnswersHandoverResolveBehavior, escalateAnswersAgentAssistInitMessage, escalateAnswersAllowAgentInject, escalateAnswersSendOnActiveEvent, escalateAnswersSendOnQueueEvent, escalateIntentsAction, escalateIntentsValidIntents, escalateIntentsThreshold, escalateIntentsGotoTarget, escalateIntentsExecuteTarget, escalateIntentsGotoExecutionMode, escalateIntentsInjectedText, escalateIntentsInjectedData, escalateIntentsMessage, escalateIntentsHandoverText, escalateIntentsRepeatHandoverMessage, escalateIntentsHandoverCancelIntent, escalateIntentsHandoverQuickReply, escalateIntentsHandoverChatwootInboxId, escalateIntentsHandoverLiveAgentInboxId, escalateIntentsHandoverAdditionalCategoryIds, escalateIntentHandoverSendTranscriptAsFirstMessage, escalateIntentsHandoverSalesforcePrechatEntities, escalateIntentsHandoverSalesforcePrechatDetails, escalateIntentsHandoverGenesysLanguage, escalateIntentsHandoverGenesysSkills, escalateIntentsHandoverGenesysPriority, escalateIntentsHandoverGenesysCustomAttributes, escalateIntentsHandoverEightByEightChannelId, escalateIntentsHandoverEightByEightQueueId, escalateIntentsHandoverEightByEightJSONProps, escalateIntentsRepromptPrevention, escalateIntentsHandoverSendResolveEvent, escalateIntentsHandoverResolveBehavior, escalateIntentsAgentAssistInitMessage, escalateIntentsAllowAgentInject, escalateIntentsSendOnActiveEvent, escalateIntentsSendOnQueueEvent, reconfirmationBehaviour, reconfirmationQuestion, reconfirmationQuestionReprompt, handoverOutput, cleanTextLocale, cleanDisallowedSymbols, additionalAllowedCharacters, additionalSpecialPhrases, resolveSpelledOutNumbers, resolvePhoneticAlphabet, additionalPhoneticAlphabet, replaceSpecialWords, additionalMappedSymbols, resolveSpelledOutAlphabet, resolvePhoneticCounters, contractSingleCharacters, contractNumberGroups, trimResult, runNLUAfterCleaning, overwrittenBaseAnswer } = config;
|
|
1538
1694
|
const { input, context, profile, api } = cognigy;
|
|
1539
1695
|
const rephraseWithAIParams = {
|
|
1540
1696
|
generativeAI_rephraseOutputMode: config.generativeAI_rephraseOutputMode,
|
|
1541
1697
|
generativeAI_temperature: config.generativeAI_temperature,
|
|
1542
1698
|
generativeAI_amountOfLastUserInputs: config.generativeAI_amountOfLastUserInputs,
|
|
1543
1699
|
generativeAI_customInputs: config.generativeAI_customInputs,
|
|
1544
|
-
|
|
1700
|
+
promptType: undefined,
|
|
1545
1701
|
questionType: undefined,
|
|
1546
1702
|
question: undefined,
|
|
1547
1703
|
answer: undefined,
|
|
@@ -1560,6 +1716,7 @@ exports.QUESTION = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
1560
1716
|
const options = {
|
|
1561
1717
|
cleanDisallowedSymbols,
|
|
1562
1718
|
contractSingleCharacters,
|
|
1719
|
+
replaceSpecialPhrases: replaceSpecialWords,
|
|
1563
1720
|
replaceSpecialWords,
|
|
1564
1721
|
resolveSpelledOutNumbers,
|
|
1565
1722
|
resolvePhoneticAlphabet,
|
|
@@ -1735,7 +1892,7 @@ exports.QUESTION = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
1735
1892
|
// #region 2 FirstTimeNodeIsHit
|
|
1736
1893
|
// if it's the first execution and you don't force evaluation on first execution, ask the question
|
|
1737
1894
|
if (isFirstExecution && !parseResultOnEntry) {
|
|
1738
|
-
rephraseWithAIParams.
|
|
1895
|
+
rephraseWithAIParams.promptType = "question";
|
|
1739
1896
|
rephraseWithAIParams.questionType = config.type;
|
|
1740
1897
|
await say_1.SAY.function({ cognigy, childConfigs: [], nodeId, organisationId, config: Object.assign({ say }, rephraseWithAIParams) });
|
|
1741
1898
|
if (config.type === "date" && !config.datepicker_hidePicker) {
|
|
@@ -2110,17 +2267,67 @@ exports.QUESTION = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
2110
2267
|
if (skipRepromptOnIntent && cognigy.input.intent) {
|
|
2111
2268
|
sayReprompt = false;
|
|
2112
2269
|
}
|
|
2113
|
-
if
|
|
2270
|
+
// We will only output a reprompt if the user is not in the first execution
|
|
2271
|
+
// and no skip condition is true
|
|
2272
|
+
if ((validationMessage || repromptType) && !isFirstExecution && sayReprompt) {
|
|
2114
2273
|
activeQuestion.repromptCount++;
|
|
2115
2274
|
/* Output Reprompt Message */
|
|
2116
|
-
rephraseWithAIParams.
|
|
2275
|
+
rephraseWithAIParams.promptType = "reprompt";
|
|
2117
2276
|
rephraseWithAIParams.questionType = config.type;
|
|
2118
2277
|
rephraseWithAIParams.question = say.text[0];
|
|
2119
2278
|
rephraseWithAIParams.answer = input.text;
|
|
2120
|
-
|
|
2279
|
+
if (sayReprompt) {
|
|
2280
|
+
switch (repromptType) {
|
|
2281
|
+
case "say":
|
|
2282
|
+
await say_1.SAY.function(Object.assign({ cognigy, childConfigs: [], nodeId, organisationId, config: { say: repromptSay } }, rephraseWithAIParams));
|
|
2283
|
+
break;
|
|
2284
|
+
case "execute":
|
|
2285
|
+
// if a question reprompt is set to execute flow and we have just executed
|
|
2286
|
+
// and come back here, we want to skip the reprompt
|
|
2287
|
+
if (api.getSystemContext(`lastReprompt-${nodeId}`) !== cognigy.input.execution) {
|
|
2288
|
+
// set myself as next Node on the stack, so we can return to me
|
|
2289
|
+
api.setNextNode(nodeId);
|
|
2290
|
+
// remember when this escalation happened
|
|
2291
|
+
api.setSystemContext(`lastReprompt-${nodeId}`, cognigy.input.execution);
|
|
2292
|
+
await executeFlow_1.EXECUTE_FLOW.function({
|
|
2293
|
+
cognigy,
|
|
2294
|
+
childConfigs: [],
|
|
2295
|
+
nodeId,
|
|
2296
|
+
organisationId,
|
|
2297
|
+
config: {
|
|
2298
|
+
flowNode: repromptFlowNode,
|
|
2299
|
+
absorbContext: repromptAbsorbContext,
|
|
2300
|
+
parseIntents: repromptParseIntents,
|
|
2301
|
+
parseKeyphrases: repromptParseKeyphrases
|
|
2302
|
+
}
|
|
2303
|
+
});
|
|
2304
|
+
return;
|
|
2305
|
+
}
|
|
2306
|
+
else {
|
|
2307
|
+
api.deleteSystemContext(`lastReprompt-${nodeId}`);
|
|
2308
|
+
}
|
|
2309
|
+
break;
|
|
2310
|
+
case "llm":
|
|
2311
|
+
const data = {
|
|
2312
|
+
prompt: "",
|
|
2313
|
+
chat: (0, prompt_1.createLastConversationChatObject)(cognigy.lastConversationEntries, repromptLLMPrompt, repromptLLMTurns),
|
|
2314
|
+
temperature: 0.7,
|
|
2315
|
+
timeoutInMs: 5000,
|
|
2316
|
+
useCase: "promptNode",
|
|
2317
|
+
};
|
|
2318
|
+
if (repromptLLMProvider && repromptLLMProvider !== "default") {
|
|
2319
|
+
data["llmProviderReferenceId"] = repromptLLMProvider;
|
|
2320
|
+
}
|
|
2321
|
+
const repromptMessage = await api.runGenerativeAIPrompt(data, "gptPromptNode");
|
|
2322
|
+
await say_1.SAY.function({ cognigy, childConfigs: [], nodeId, organisationId, config: Object.assign({ handoverOutput, say: { type: "text", text: [repromptMessage] } }, rephraseWithAIParams) });
|
|
2323
|
+
break;
|
|
2324
|
+
default: // this is also "text"
|
|
2325
|
+
await say_1.SAY.function({ cognigy, childConfigs: [], nodeId, organisationId, config: Object.assign({ handoverOutput, say: { type: "text", text: [validationMessage] } }, rephraseWithAIParams) });
|
|
2326
|
+
}
|
|
2327
|
+
}
|
|
2121
2328
|
/* If repeat toggle is on, also output question (and maybe datepicker) again */
|
|
2122
2329
|
if (validationRepeat) {
|
|
2123
|
-
rephraseWithAIParams.
|
|
2330
|
+
rephraseWithAIParams.promptType = "question";
|
|
2124
2331
|
rephraseWithAIParams.questionType = config.type;
|
|
2125
2332
|
await say_1.SAY.function({
|
|
2126
2333
|
cognigy,
|
|
@@ -2136,7 +2343,7 @@ exports.QUESTION = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
2136
2343
|
}
|
|
2137
2344
|
else {
|
|
2138
2345
|
// output actual question
|
|
2139
|
-
rephraseWithAIParams.
|
|
2346
|
+
rephraseWithAIParams.promptType = "question";
|
|
2140
2347
|
rephraseWithAIParams.questionType = config.type;
|
|
2141
2348
|
await say_1.SAY.function({
|
|
2142
2349
|
cognigy,
|
|
@@ -63,6 +63,7 @@ exports.CLEAN_TEXT = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
63
63
|
cleanDisallowedSymbols,
|
|
64
64
|
contractSingleCharacters,
|
|
65
65
|
replaceSpecialWords,
|
|
66
|
+
replaceSpecialPhrases: replaceSpecialWords,
|
|
66
67
|
resolveSpelledOutNumbers,
|
|
67
68
|
resolvePhoneticAlphabet,
|
|
68
69
|
resolvePhoneticCounters,
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.createLastUserInputString = exports.createLastConverationString = exports.createInvalidAnswerPrompt = exports.createQuestionPrompt = exports.createExtractionPrompt = void 0;
|
|
3
|
+
exports.convertChatToPrompt = exports.writeLLMDebugLogs = exports.createLastUserInputString = exports.createLastConversationChatObject = exports.createLastConverationString = exports.createInvalidAnswerPrompt = exports.createQuestionPrompt = exports.createExtractionPrompt = void 0;
|
|
4
4
|
const createExtractionPrompt = (slots, lastConversationEntries) => {
|
|
5
5
|
const userInput = lastConversationEntries.filter(entry => entry.source === "user").map(entry => "- " + entry.text).join("\n");
|
|
6
6
|
const conversation = (0, exports.createLastConverationString)(lastConversationEntries);
|
|
@@ -59,18 +59,116 @@ exports.createInvalidAnswerPrompt = createInvalidAnswerPrompt;
|
|
|
59
59
|
- BOT: Hello
|
|
60
60
|
- USER: Hi
|
|
61
61
|
*/
|
|
62
|
-
const createLastConverationString = (lastConversationEntries) => {
|
|
63
|
-
const conversation = lastConversationEntries === null || lastConversationEntries === void 0 ? void 0 : lastConversationEntries.map(entry => "- " + (entry.source === "user" ? "USER: " : "BOT: ") + entry.text).reverse().join("\n");
|
|
62
|
+
const createLastConverationString = (lastConversationEntries, turnLimit = 100) => {
|
|
63
|
+
const conversation = lastConversationEntries === null || lastConversationEntries === void 0 ? void 0 : lastConversationEntries.map(entry => "- " + (entry.source === "user" ? "USER: " : "BOT: ") + entry.text).slice(0, turnLimit).reverse().join("\n");
|
|
64
64
|
return conversation || "";
|
|
65
65
|
};
|
|
66
66
|
exports.createLastConverationString = createLastConverationString;
|
|
67
|
+
/* returns recentConversation as an openai formatted chat prompt, e.g.
|
|
68
|
+
[
|
|
69
|
+
{
|
|
70
|
+
"role": "system",
|
|
71
|
+
"content": "Imagine you're Michael and you're working in a agency for insurance sales for CNP Santander Insurance Europe. Your job is to call customers who have just gotten a loan from a bank. Your job is to sell them loan default insurance.\n\nIt's important you act tactfully and slowly approach the topic. Your main goal is to get the customer to confirm they want to the insurance, but don't be pushy.\n\nThe customer name is Vito Volpe. The loan amount for this customer is 10000 and the cost for the insurance is 0.89% of the loan amount per year, paid with the insurance payments. The customer can opt out at any time. \n\nOnce the customer is has confirmed they want the insurance, answer with \"SUCCESS\" only.\nIf the customer wants to reschedule the call, confirm the time and then answer with \"RESCHEDULE\" only.\n\nDon't immediately start with the offer. Make sure the customer is comfortable first.\n\nDon't discuss other insurance products. Don't negotiate on pricing.\n\nUse simple wording only. Speak in short sentences."
|
|
72
|
+
},
|
|
73
|
+
{
|
|
74
|
+
"role": "user",
|
|
75
|
+
"content": "Hello?"
|
|
76
|
+
},
|
|
77
|
+
{
|
|
78
|
+
"role": "assistant",
|
|
79
|
+
"content": "Good day, is this Mr. Vito Volpe I am speaking with?"
|
|
80
|
+
}
|
|
81
|
+
]
|
|
82
|
+
*/
|
|
83
|
+
const createLastConversationChatObject = (lastConversationEntries, systemMessage, turnLimit = 100) => {
|
|
84
|
+
const conversation = [];
|
|
85
|
+
if (systemMessage) {
|
|
86
|
+
conversation.push({
|
|
87
|
+
role: "system",
|
|
88
|
+
content: systemMessage
|
|
89
|
+
});
|
|
90
|
+
}
|
|
91
|
+
lastConversationEntries === null || lastConversationEntries === void 0 ? void 0 : lastConversationEntries.slice(0, turnLimit).reverse().map(entry => {
|
|
92
|
+
conversation.push({
|
|
93
|
+
role: entry.source === "user" ? "user" : "assistant",
|
|
94
|
+
content: entry.text
|
|
95
|
+
});
|
|
96
|
+
});
|
|
97
|
+
return conversation;
|
|
98
|
+
};
|
|
99
|
+
exports.createLastConversationChatObject = createLastConversationChatObject;
|
|
67
100
|
/* returns recentUserInput as a multiline string:
|
|
68
101
|
- Hi
|
|
69
102
|
- I want to order...
|
|
70
103
|
*/
|
|
71
|
-
const createLastUserInputString = (lastConversationEntries) => {
|
|
72
|
-
const userInput = lastConversationEntries.filter(entry => entry.source === "user").map(entry => "- " + entry.text).reverse().join("\n");
|
|
104
|
+
const createLastUserInputString = (lastConversationEntries, turnLimit = 100) => {
|
|
105
|
+
const userInput = lastConversationEntries.filter(entry => entry.source === "user").map(entry => "- " + entry.text).slice(0, turnLimit).reverse().join("\n");
|
|
73
106
|
return userInput || "";
|
|
74
107
|
};
|
|
75
108
|
exports.createLastUserInputString = createLastUserInputString;
|
|
109
|
+
/**
|
|
110
|
+
* Writes debug logs for LLM calls
|
|
111
|
+
* @param label the label of the log
|
|
112
|
+
* @param prompt the prompt used for the LLM call
|
|
113
|
+
* @param response the response of the LLM call
|
|
114
|
+
* @param debugLogTokenCount whether to log the token count
|
|
115
|
+
* @param debugLogRequestAndCompletion whether to log the request and completion
|
|
116
|
+
* @param cognigy the cognigy object (input, api, etc)
|
|
117
|
+
*/
|
|
118
|
+
const writeLLMDebugLogs = async (label, prompt, response, debugLogTokenCount, debugLogRequestAndCompletion, cognigy) => {
|
|
119
|
+
const { api } = cognigy;
|
|
120
|
+
// debug logs are only processed for the interaction panel
|
|
121
|
+
if (debugLogRequestAndCompletion) {
|
|
122
|
+
try {
|
|
123
|
+
let requestTokenMessage = "";
|
|
124
|
+
let completionTokenMessage = "";
|
|
125
|
+
if (debugLogTokenCount) {
|
|
126
|
+
if (prompt) {
|
|
127
|
+
const requestTokens = await api.countGPTTokens(prompt);
|
|
128
|
+
requestTokenMessage = ` (${requestTokens} Tokens)`;
|
|
129
|
+
}
|
|
130
|
+
if (response) {
|
|
131
|
+
const completionTokens = await api.countGPTTokens(response);
|
|
132
|
+
completionTokenMessage = ` (${completionTokens} Tokens)`;
|
|
133
|
+
}
|
|
134
|
+
}
|
|
135
|
+
api.log('debug', `${label} - Request${requestTokenMessage}: '${prompt}' - Completion${completionTokenMessage}: '${response}'`);
|
|
136
|
+
}
|
|
137
|
+
catch (err) { }
|
|
138
|
+
}
|
|
139
|
+
else if (debugLogTokenCount) {
|
|
140
|
+
try {
|
|
141
|
+
let requestTokens = 0;
|
|
142
|
+
let completionTokens = 0;
|
|
143
|
+
requestTokens = prompt && await api.countGPTTokens(prompt);
|
|
144
|
+
completionTokens = response && await api.countGPTTokens(response);
|
|
145
|
+
const completionMessage = response ? ` - Completion Tokens: ${completionTokens} - Total Tokens: ${requestTokens + completionTokens}` : "";
|
|
146
|
+
api.log('debug', `${label} - Request Tokens: ${requestTokens} ${completionMessage}`);
|
|
147
|
+
}
|
|
148
|
+
catch (err) { }
|
|
149
|
+
}
|
|
150
|
+
};
|
|
151
|
+
exports.writeLLMDebugLogs = writeLLMDebugLogs;
|
|
152
|
+
/**
|
|
153
|
+
* Converts an OpenAI chat object to a prompt string
|
|
154
|
+
* @param chat The OpenAI Chat Object
|
|
155
|
+
* @returns The concatenated string
|
|
156
|
+
*/
|
|
157
|
+
const convertChatToPrompt = (chat) => {
|
|
158
|
+
let prompt = "";
|
|
159
|
+
for (const message of chat) {
|
|
160
|
+
if (message.role === "system") {
|
|
161
|
+
prompt += `${message.content}\n`;
|
|
162
|
+
}
|
|
163
|
+
else if (message.role === "user") {
|
|
164
|
+
prompt += `user: ${message.content}\n`;
|
|
165
|
+
}
|
|
166
|
+
else if (message.role === "assistant") {
|
|
167
|
+
prompt += `assistant: ${message.content}\n`;
|
|
168
|
+
}
|
|
169
|
+
}
|
|
170
|
+
prompt += "assistant: ";
|
|
171
|
+
return prompt;
|
|
172
|
+
};
|
|
173
|
+
exports.convertChatToPrompt = convertChatToPrompt;
|
|
76
174
|
//# sourceMappingURL=prompt.js.map
|