@cognigy/rest-api-client 0.15.0 → 0.17.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (146) hide show
  1. package/CHANGELOG.md +17 -0
  2. package/build/GenericTusFn.js +64 -0
  3. package/build/RestAPIClient.js +3 -0
  4. package/build/apigroups/AdministrationAPIGroup_2_0.js +7 -2
  5. package/build/apigroups/ResourcesAPIGroup_2_0.js +28 -23
  6. package/build/{shared/interfaces/agentAssist/ISendTileUpdateReferenceParams.js → apigroups/TTusAPIOperation.js} +1 -1
  7. package/build/apigroups/index.js +2 -0
  8. package/build/connector/AxiosAdapter.js +5 -4
  9. package/build/shared/charts/descriptors/agentAssist/constants/constants.js +7 -0
  10. package/build/shared/charts/descriptors/agentAssist/helpers/sentiment.helper.js +41 -0
  11. package/build/shared/charts/descriptors/agentAssist/htmlTemplates/identityAssistTemplate.js +54 -0
  12. package/build/shared/charts/descriptors/agentAssist/htmlTemplates/knowledgeAssistTemplate.js +203 -0
  13. package/build/shared/charts/descriptors/agentAssist/htmlTemplates/nextActionWidgetTemplate.js +93 -0
  14. package/build/shared/charts/descriptors/agentAssist/htmlTemplates/secureForms/stage0.js +81 -0
  15. package/build/shared/charts/descriptors/agentAssist/htmlTemplates/secureForms/stage1.js +84 -0
  16. package/build/shared/charts/descriptors/agentAssist/htmlTemplates/secureForms/stage2.js +97 -0
  17. package/build/shared/charts/descriptors/agentAssist/htmlTemplates/secureForms/stageError.js +85 -0
  18. package/build/shared/charts/descriptors/agentAssist/htmlTemplates/sentimentAnalysisTemplate.js +41 -0
  19. package/build/shared/charts/descriptors/agentAssist/htmlTemplates/stylesPartial.js +15 -0
  20. package/build/shared/charts/descriptors/agentAssist/htmlTemplates/transcriptAssistTemplate.js +53 -0
  21. package/build/shared/charts/descriptors/agentAssist/identityAssist.js +135 -0
  22. package/build/shared/charts/descriptors/agentAssist/index.js +15 -1
  23. package/build/shared/charts/descriptors/agentAssist/knowledgeAssist.js +625 -0
  24. package/build/shared/charts/descriptors/agentAssist/nextActionAssist.js +77 -0
  25. package/build/shared/charts/descriptors/agentAssist/sentimentAssist.js +86 -0
  26. package/build/shared/charts/descriptors/agentAssist/setAdaptiveCardTile.js +2 -2
  27. package/build/shared/charts/descriptors/agentAssist/setAgentAssistGrid.js +120 -0
  28. package/build/shared/charts/descriptors/agentAssist/setHtmlTile.js +2 -2
  29. package/build/shared/charts/descriptors/agentAssist/setIframeTile.js +2 -2
  30. package/build/shared/charts/descriptors/agentAssist/setSecureFormsTile.js +171 -0
  31. package/build/shared/charts/descriptors/agentAssist/transcriptAssist.js +72 -0
  32. package/build/shared/charts/descriptors/allFields.js +12 -0
  33. package/build/shared/charts/descriptors/connectionNodes/generativeAIProviders/TGenerativeAIConnectionFields.js +3 -0
  34. package/build/shared/charts/descriptors/connectionNodes/generativeAIProviders/alephAlphaProviderConnection.js +11 -0
  35. package/build/shared/charts/descriptors/connectionNodes/generativeAIProviders/anthropicProviderConnection.js +1 -1
  36. package/build/shared/charts/descriptors/connectionNodes/generativeAIProviders/azureOpenAIProviderConnectionV2.js +1 -1
  37. package/build/shared/charts/descriptors/connectionNodes/generativeAIProviders/index.js +6 -2
  38. package/build/shared/charts/descriptors/connectionNodes/generativeAIProviders/openAIProviderConnection.js +1 -1
  39. package/build/shared/charts/descriptors/connectionNodes/internalStorageProviders/amazonStorageProviderConnection.js +4 -4
  40. package/build/shared/charts/descriptors/connectionNodes/internalStorageProviders/azureBlobStorageProviderConnection.js +3 -3
  41. package/build/shared/charts/descriptors/connectionNodes/internalStorageProviders/googleCloudStorageProviderConnection.js +3 -3
  42. package/build/shared/charts/descriptors/connectionNodes/mongoDB/mongoDBConnection.js +1 -1
  43. package/build/shared/charts/descriptors/connectionNodes/smtp/oAuth2Connection.js +8 -8
  44. package/build/shared/charts/descriptors/connectionNodes/smtp/serviceConnection.js +2 -2
  45. package/build/shared/charts/descriptors/connectionNodes/smtp/smtpConnection.js +5 -5
  46. package/build/shared/charts/descriptors/connectionNodes/speechProviders/awsSpeechProviderConnection.js +4 -4
  47. package/build/shared/charts/descriptors/connectionNodes/speechProviders/microsoftSpeechProviderConnection.js +2 -2
  48. package/build/shared/charts/descriptors/connectionNodes/sql/sqlConnection.js +5 -5
  49. package/build/shared/charts/descriptors/connectionNodes/translationProviders/deeplTranslationProviderConnection.js +15 -0
  50. package/build/shared/charts/descriptors/connectionNodes/translationProviders/googleTranslationProviderConnection.js +15 -0
  51. package/build/shared/charts/descriptors/connectionNodes/translationProviders/index.js +23 -0
  52. package/build/shared/charts/descriptors/connectionNodes/translationProviders/microsoftTranslationProviderConnection.js +21 -0
  53. package/build/shared/charts/descriptors/index.js +13 -2
  54. package/build/shared/charts/descriptors/knowledgeSearch/constants/constants.js +6 -0
  55. package/build/shared/charts/descriptors/knowledgeSearch/index.js +5 -1
  56. package/build/shared/charts/descriptors/knowledgeSearch/knowledgeSearch.js +7 -2
  57. package/build/shared/charts/descriptors/knowledgeSearch/knowledgeSearchV2.js +151 -0
  58. package/build/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +985 -0
  59. package/build/shared/charts/descriptors/logic/goTo.js +2 -0
  60. package/build/shared/charts/descriptors/message/question/optionalQuestion.js +2 -2
  61. package/build/shared/charts/descriptors/message/question/question.js +215 -8
  62. package/build/shared/charts/descriptors/nlu/cleanText.js +1 -0
  63. package/build/shared/charts/descriptors/nlu/generativeSlotFiller/prompt.js +107 -5
  64. package/build/shared/charts/descriptors/service/GPTConversation.js +125 -23
  65. package/build/shared/charts/descriptors/service/GPTPrompt.js +148 -12
  66. package/build/shared/charts/descriptors/service/checkAgentAvailability.js +15 -1
  67. package/build/shared/charts/descriptors/service/handoverV2.js +45 -0
  68. package/build/shared/{interfaces/twilioInterface.js → charts/descriptors/voice/interface/IBandwidth.js} +1 -1
  69. package/build/shared/charts/descriptors/voice/mappers/base.mapper.js +14 -0
  70. package/build/shared/charts/descriptors/voice/mappers/hangup.mapper.js +26 -0
  71. package/build/shared/charts/descriptors/voice/mappers/muteSpeechInput.mapper.js +17 -2
  72. package/build/shared/charts/descriptors/voice/mappers/play.mapper.js +54 -0
  73. package/build/shared/charts/descriptors/voice/mappers/record.mapper.js +7 -0
  74. package/build/shared/charts/descriptors/voice/mappers/sendMetadata.mapper.js +15 -0
  75. package/build/shared/charts/descriptors/voice/mappers/setSessionConfig.mapper.js +56 -20
  76. package/build/shared/charts/descriptors/voice/mappers/transfer.mapper.js +39 -13
  77. package/build/shared/charts/descriptors/voice/nodes/muteSpeechInput.js +1 -1
  78. package/build/shared/charts/descriptors/voice/nodes/play.js +1 -1
  79. package/build/shared/charts/descriptors/voice/nodes/sessionSpeechParameters.js +120 -29
  80. package/build/shared/charts/descriptors/voice/nodes/transfer.js +11 -3
  81. package/build/shared/charts/descriptors/voice/utils/vgConstants.js +14 -0
  82. package/build/shared/charts/descriptors/voicegateway/index.js +14 -11
  83. package/build/shared/charts/descriptors/voicegateway/utils/paramUtils.js +1 -5
  84. package/build/shared/charts/descriptors/voicegateway2/nodes/muteSpeechInput.js +12 -1
  85. package/build/shared/charts/descriptors/voicegateway2/nodes/play.js +6 -0
  86. package/build/shared/charts/descriptors/voicegateway2/nodes/setSessionConfig.js +159 -36
  87. package/build/shared/charts/descriptors/voicegateway2/nodes/transfer.js +296 -43
  88. package/build/shared/charts/descriptors/voicegateway2/utils/helper.js +30 -4
  89. package/build/shared/charts/helpers/generativeAI/rephraseSentenceWithAi.js +1 -1
  90. package/build/shared/constants.js +5 -1
  91. package/build/shared/handoverClients/interfaces/THandoverEventType.js +3 -1
  92. package/build/shared/helper/BaseContext.js +33 -15
  93. package/build/shared/helper/nlu/textCleaner.js +25 -3
  94. package/build/shared/interfaces/IEndpointTranslationSettings.js +9 -0
  95. package/build/shared/interfaces/IOrganisation.js +2 -1
  96. package/build/shared/interfaces/agentAssist/ISendConfigUpdateParams.js +3 -0
  97. package/build/shared/interfaces/agentAssist/ISendUpdateReferenceParams.js +3 -0
  98. package/build/shared/interfaces/amqpInterface.js +18 -22
  99. package/build/shared/interfaces/channels/genesysBotConnector.js +21 -0
  100. package/build/shared/interfaces/channels/niceCXOne.js +8 -0
  101. package/build/shared/interfaces/generativeAI/IGenerativeAIModels.js +17 -7
  102. package/build/shared/interfaces/handover.js +57 -3
  103. package/build/shared/interfaces/journeys/IJourney.js +6 -0
  104. package/build/shared/interfaces/journeys/IJourneyTrackEvent.js +2 -0
  105. package/build/shared/interfaces/messageAPI/endpoints.js +93 -8
  106. package/build/shared/interfaces/messageAPI/handover.js +4 -4
  107. package/build/shared/interfaces/resources/IAuditEvent.js +1 -0
  108. package/build/shared/interfaces/resources/IConnectionSchema.js +3 -0
  109. package/build/shared/interfaces/resources/IEndpoint.js +1 -0
  110. package/build/shared/interfaces/resources/IExtension.js +3 -0
  111. package/build/shared/interfaces/resources/ILargeLanguageModel.js +24 -5
  112. package/build/shared/interfaces/resources/INodeDescriptorSet.js +24 -1
  113. package/build/shared/interfaces/resources/TNLUConnectorType.js +0 -1
  114. package/build/shared/interfaces/resources/TRestChannelType.js +16 -11
  115. package/build/shared/interfaces/resources/TSocketChannelType.js +5 -1
  116. package/build/shared/interfaces/resources/TWebhookChannelType.js +6 -1
  117. package/build/shared/interfaces/resources/knowledgeStore/IKnowledgeChunk.js +21 -3
  118. package/build/shared/interfaces/resources/knowledgeStore/IKnowledgeSource.js +13 -2
  119. package/build/shared/interfaces/resources/knowledgeStore/IKnowledgeStore.js +1 -1
  120. package/build/shared/interfaces/resources/settings/IAgentSettings.js +3 -23
  121. package/build/shared/interfaces/resources/settings/IGenerativeAISettings.js +4 -0
  122. package/build/shared/interfaces/resources/settings/ITranslationSettings.js +63 -0
  123. package/build/shared/interfaces/resources/settings/index.js +4 -1
  124. package/build/shared/interfaces/restAPI/administration/voiceGateway/v2.0/IReadVoiceGatewayAccountRest_2_0.js +3 -0
  125. package/build/shared/interfaces/restAPI/administration/voiceGateway/v2.0/ISetupVoiceGatewayAccountRest_2_0.js +3 -0
  126. package/build/shared/interfaces/restAPI/administration/voiceGateway/v2.0/index.js +3 -0
  127. package/build/shared/interfaces/restAPI/metrics/callCounter/v2.0/ICallCounterAggregatedValue_2_0.js +3 -0
  128. package/build/shared/interfaces/restAPI/metrics/callCounter/v2.0/IGetCallCounterOrganisationRest_2_0.js +3 -0
  129. package/build/shared/interfaces/restAPI/metrics/callCounter/v2.0/IGetCallCounterRest_2_0.js +3 -0
  130. package/build/shared/interfaces/restAPI/metrics/callCounter/v2.0/index.js +3 -0
  131. package/build/shared/interfaces/restAPI/metrics/knowledgeQueryCounter/v2.0/IGetKnowledgeQueryCounterOrganisationRest_2_0.js +3 -0
  132. package/build/shared/interfaces/restAPI/metrics/knowledgeQueryCounter/v2.0/IGetKnowledgeQueryCounterRest_2_0.js +3 -0
  133. package/build/shared/interfaces/restAPI/metrics/knowledgeQueryCounter/v2.0/IKnowlegeQueryCounterAggregatedValue_2_0.js +3 -0
  134. package/build/shared/interfaces/restAPI/metrics/knowledgeQueryCounter/v2.0/index.js +3 -0
  135. package/build/shared/interfaces/restAPI/resources/nluconnector/v2.0/TNLUConnectorType_2_0.js +0 -2
  136. package/build/shared/interfaces/restAPI/resources/uploadResumable/v2.0/IUploadResumableRest_2_0.js +17 -0
  137. package/build/shared/interfaces/restAPI/resources/uploadResumable/v2.0/index.js +3 -0
  138. package/build/shared/interfaces/security/ICallCounterAggregatedValue.js +3 -0
  139. package/build/shared/interfaces/security/IKnowledgeQueryCounterAggregatedValue.js +3 -0
  140. package/build/shared/interfaces/security/IPermission.js +3 -0
  141. package/build/shared/interfaces/security/IRole.js +9 -1
  142. package/build/shared/interfaces/security/index.js +2 -1
  143. package/build/shared/interfaces/trainer/ITrainerRecord.js +2 -0
  144. package/build/test.js +27 -0
  145. package/package.json +19 -17
  146. package/types/index.d.ts +823 -140
@@ -22,6 +22,25 @@ exports.GPT_CONVERSATION = (0, createNodeDescriptor_1.createNodeDescriptor)({
22
22
  text: "UI__NODE_EDITOR__SERVICE__GPT_CONVERSATION__FIELDS__CUSTOM_DESC__TEXT"
23
23
  }
24
24
  },
25
+ {
26
+ key: "handleOutputs",
27
+ type: "select",
28
+ label: "UI__NODE_EDITOR__SERVICE__GPT_CONVERSATION__FIELDS__HANDLE_OUTPUT__LABEL",
29
+ description: "UI__NODE_EDITOR__SERVICE__GPT_CONVERSATION__FIELDS__HANDLE_OUTPUT__DESCRIPTION",
30
+ defaultValue: "output",
31
+ params: {
32
+ options: [
33
+ {
34
+ label: "UI__NODE_EDITOR__SERVICE__GPT_CONVERSATION__FIELDS__HANDLE_OUTPUT__OPTIONS__OUPTUT__LABEL",
35
+ value: "output"
36
+ },
37
+ {
38
+ label: "UI__NODE_EDITOR__SERVICE__GPT_CONVERSATION__FIELDS__HANDLE_OUTPUT__OPTIONS__STORE__LABEL",
39
+ value: "store"
40
+ }
41
+ ]
42
+ }
43
+ },
25
44
  {
26
45
  key: "mode",
27
46
  type: "select",
@@ -153,6 +172,42 @@ exports.GPT_CONVERSATION = (0, createNodeDescriptor_1.createNodeDescriptor)({
153
172
  description: "UI__NODE_EDITOR__SERVICE__GPT_CONVERSATION__FIELDS__INJECT_SLOTS__DESCRIPTION",
154
173
  defaultValue: `{{input.slots.DATE[0].start.plain ? "Date: " + input.slots.DATE[0].start.plain : "" }}`
155
174
  },
175
+ {
176
+ key: "groundingMode",
177
+ type: "select",
178
+ label: "UI__NODE_EDITOR__SERVICE__GPT_CONVERSATION__FIELDS__GROUNDING_MODE__LABEL",
179
+ description: "UI__NODE_EDITOR__SERVICE__GPT_CONVERSATION__FIELDS__GROUNDING_MODE__DESCRIPTION",
180
+ defaultValue: "custom",
181
+ params: {
182
+ options: [
183
+ {
184
+ label: "UI__NODE_EDITOR__SERVICE__GPT_CONVERSATION__FIELDS__GROUNDING_MODE__OPTIONS__SEARCH__LABEL",
185
+ value: "search"
186
+ },
187
+ {
188
+ label: "UI__NODE_EDITOR__SERVICE__GPT_CONVERSATION__FIELDS__GROUNDING_MODE__OPTIONS__CUSTOM__LABEL",
189
+ value: "custom"
190
+ },
191
+ {
192
+ label: "UI__NODE_EDITOR__SERVICE__GPT_CONVERSATION__FIELDS__GROUNDING_MODE__OPTIONS__NONE__LABEL",
193
+ value: "none"
194
+ },
195
+ ]
196
+ }
197
+ },
198
+ {
199
+ key: "knowledgeStoreId",
200
+ type: "knowledgeStoreSelect",
201
+ label: "UI__NODE_EDITOR__KNOWLEDGE_SEARCH__KNOWLEDGE_STORE_ID__LABEL",
202
+ description: "UI__NODE_EDITOR__KNOWLEDGE_SEARCH__KNOWLEDGE_STORE_ID__DESCRIPTION",
203
+ params: {
204
+ required: true
205
+ },
206
+ condition: {
207
+ key: "groundingMode",
208
+ value: "search"
209
+ }
210
+ },
156
211
  {
157
212
  key: "knowledge",
158
213
  type: "cognigyText",
@@ -162,6 +217,10 @@ exports.GPT_CONVERSATION = (0, createNodeDescriptor_1.createNodeDescriptor)({
162
217
  params: {
163
218
  multiline: true,
164
219
  rows: 5
220
+ },
221
+ condition: {
222
+ key: "groundingMode",
223
+ value: "custom"
165
224
  }
166
225
  },
167
226
  {
@@ -269,7 +328,8 @@ exports.GPT_CONVERSATION = (0, createNodeDescriptor_1.createNodeDescriptor)({
269
328
  "presence_penalty",
270
329
  "frequency_penalty",
271
330
  "useStop",
272
- "stop"
331
+ "stop",
332
+ "handleOutputs"
273
333
  ]
274
334
  },
275
335
  {
@@ -294,6 +354,8 @@ exports.GPT_CONVERSATION = (0, createNodeDescriptor_1.createNodeDescriptor)({
294
354
  label: "UI__NODE_EDITOR__SERVICE__GPT_CONVERSATION__SECTIONS__GROUNDING__LABEL",
295
355
  defaultCollapsed: true,
296
356
  fields: [
357
+ "groundingMode",
358
+ "knowledgeStoreId",
297
359
  "knowledge",
298
360
  "injectedSlots"
299
361
  ]
@@ -384,7 +446,7 @@ exports.GPT_CONVERSATION = (0, createNodeDescriptor_1.createNodeDescriptor)({
384
446
  api.log("error", "Error in parsing response: " + error.message);
385
447
  }
386
448
  try {
387
- await handleCommands(context, input, api, debug, commands, config.stepLabels);
449
+ await handleCommands(context, input, api, debug, commands, config);
388
450
  }
389
451
  catch (error) {
390
452
  if (debug && input.channel === "adminconsole")
@@ -397,7 +459,7 @@ exports.GPT_CONVERSATION = (0, createNodeDescriptor_1.createNodeDescriptor)({
397
459
  }
398
460
  });
399
461
  async function buildPrompt(context, input, api, config) {
400
- const { botName, character, companyName, companyDescription, strictness, furtherExamples, task, steps, injectedSlots, knowledge, mode, availableActions, slotsToCollect } = config;
462
+ const { botName, character, companyName, companyDescription, strictness, furtherExamples, task, steps, injectedSlots, mode, availableActions, slotsToCollect } = config;
401
463
  // clean the text to prevent injections
402
464
  const userText = input.text.replace(/bot:/gi, "").trim();
403
465
  let prompt;
@@ -445,15 +507,7 @@ async function buildPrompt(context, input, api, config) {
445
507
  strictnessLevel = "Do no deviate from the task steps and don't answer unrelated questions. Only answer question about the provided knowledge or information provided by the user. Pretend you don't know anything else about the world.";
446
508
  break;
447
509
  }
448
- // check if knowledge was injected for grouding
449
- let injectedKnowledge;
450
- if (knowledge) {
451
- injectedKnowledge = `You have the following contextual document from your internal knowledge base:
452
- "${knowledge}" `;
453
- }
454
- else {
455
- injectedKnowledge = `You have not been given any contextual document from your internal knowledge base to answer the user's question. Don't make up an answer.`;
456
- }
510
+ const injectedKnowledge = await buildGroundingKnowledge(input, api, config);
457
511
  prompt = `You are a chatbot in customer service for ${companyName} (${companyDescription}). ${botName ? "Your name is " + botName : ""}
458
512
 
459
513
  You can generate example outputs in the following format:
@@ -505,15 +559,7 @@ Bot: [{`;
505
559
  strictnessLevel = "Only answer questions if you have been provided with the answer from the knowledge base. Pretend you don't know anything else about the world.";
506
560
  break;
507
561
  }
508
- // check if knowledge was injected for grouding
509
- let injectedKnowledge;
510
- if (knowledge) {
511
- injectedKnowledge = `You have the following contextual document from your internal knowledge base:
512
- "${knowledge}" `;
513
- }
514
- else {
515
- injectedKnowledge = `You have not been given any contextual document from your internal knowledge base to answer the user's question. Don't make up an answer.`;
516
- }
562
+ const injectedKnowledge = await buildGroundingKnowledge(input, api, config);
517
563
  prompt = `You are a chatbot in customer service for ${companyName} (${companyDescription}). ${botName ? "Your name is " + botName : ""}
518
564
 
519
565
  Your task is to answer the users questions in a truthful and helpful way. Only base your answer on information provided above and not on the bot's previous answers.
@@ -638,14 +684,21 @@ async function parseResponse(api, input, debug, response) {
638
684
  * @param commands The commands to handle
639
685
  * @param config Node Config
640
686
  */
641
- async function handleCommands(context, input, api, debug, commands, stepLabels) {
687
+ async function handleCommands(context, input, api, debug, commands, config) {
642
688
  let outputs = false;
689
+ const { stepLabels, handleOutputs } = config;
643
690
  context.action = null;
691
+ const outputBuffer = [];
644
692
  for (let command of commands) {
645
693
  try {
646
694
  if (command.message) {
647
695
  context.lastOutput = command.message;
648
- api.output(command.message, null);
696
+ if (handleOutputs === "output") {
697
+ api.output(command.message, null);
698
+ }
699
+ else {
700
+ outputBuffer.push(command.message);
701
+ }
649
702
  outputs = true;
650
703
  }
651
704
  if (command.action) {
@@ -674,5 +727,54 @@ async function handleCommands(context, input, api, debug, commands, stepLabels)
674
727
  api.output("Sorry, I was absentminded for a second, could you say that again please?", null);
675
728
  api.log("error", JSON.stringify(commands));
676
729
  }
730
+ if (outputBuffer && outputBuffer.length > 0) {
731
+ input.promptResult = outputBuffer;
732
+ }
733
+ }
734
+ /**
735
+ * Build the knowledge to be injected into the GPT-3 request
736
+ * @param input The Cognigy input object
737
+ * @param api The Cognigy API object
738
+ * @param config The node config
739
+ * @returns
740
+ */
741
+ async function buildGroundingKnowledge(input, api, config) {
742
+ var _a, _b, _c;
743
+ const { groundingMode, knowledge } = config;
744
+ let injectedKnowledge = "";
745
+ switch (groundingMode) {
746
+ case "none":
747
+ injectedKnowledge = `You have not been given any contextual document from your internal knowledge base to answer the user's question. Don't make up an answer.`;
748
+ break;
749
+ case "search":
750
+ const knowledgeStoreId = (_a = config === null || config === void 0 ? void 0 : config.knowledgeStoreId) === null || _a === void 0 ? void 0 : _a.trim();
751
+ const { language, traceId, text } = input;
752
+ const data = {
753
+ language: language,
754
+ query: text,
755
+ topK: 3,
756
+ traceId: traceId,
757
+ disableSensitiveLogging: false
758
+ };
759
+ // Add knowledgeStoreIds to data
760
+ if (knowledgeStoreId) {
761
+ data.knowledgeStoreIds = [knowledgeStoreId];
762
+ }
763
+ // Perform knowledge search
764
+ const response = await api.knowledgeSearch(data);
765
+ let documents = (_c = (_b = response === null || response === void 0 ? void 0 : response.data) === null || _b === void 0 ? void 0 : _b.topK) === null || _c === void 0 ? void 0 : _c.map(result => result === null || result === void 0 ? void 0 : result.text).join(' ');
766
+ if (documents) {
767
+ injectedKnowledge = `You have the following contextual document from your internal knowledge base: "${documents}" `;
768
+ }
769
+ else {
770
+ api.log("debug", "Tried to perform knowledge search, but no documents were found");
771
+ injectedKnowledge = `You have not been given any contextual document from your internal knowledge base to answer the user's question. Don't make up an answer.`;
772
+ }
773
+ break;
774
+ case "custom":
775
+ default:
776
+ injectedKnowledge = `You have the following contextual document from your internal knowledge base: "${knowledge}" `;
777
+ }
778
+ return injectedKnowledge;
677
779
  }
678
780
  //# sourceMappingURL=GPTConversation.js.map
@@ -9,17 +9,52 @@ exports.GPT_PROMPT = (0, createNodeDescriptor_1.createNodeDescriptor)({
9
9
  defaultLabel: "LLM Prompt",
10
10
  summary: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__DESCRIPTION",
11
11
  fields: [
12
+ {
13
+ key: "llmProviderReferenceId",
14
+ type: "llmSelect",
15
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__LLM_SELECT__LABEL",
16
+ defaultValue: "default",
17
+ params: {
18
+ required: true
19
+ }
20
+ },
12
21
  {
13
22
  key: "prompt",
14
- label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__PROMPT__LABEL",
23
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__PROMPT_SYSTEM__LABEL",
15
24
  type: "cognigyText",
16
- description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__PROMPT__DESCRIPTION",
25
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__PROMPT_SYSTEM__DESCRIPTION",
17
26
  params: {
18
- required: true,
19
- multiline: true
27
+ multiline: true,
28
+ rows: 5,
29
+ required: true
20
30
  },
21
31
  defaultValue: ""
22
32
  },
33
+ {
34
+ key: "chatTranscriptSteps",
35
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__TRANSCRIPT_STEPS__LABEL",
36
+ type: "slider",
37
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__TRANSCRIPT_STEPS__DESCRIPTION",
38
+ defaultValue: 3,
39
+ params: {
40
+ min: 0,
41
+ max: 10,
42
+ step: 1
43
+ },
44
+ condition: {
45
+ key: "useChatMode",
46
+ value: true,
47
+ }
48
+ },
49
+ {
50
+ key: "useChatMode",
51
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__USE_CHAT__LABEL",
52
+ type: "toggle",
53
+ params: {
54
+ required: true
55
+ },
56
+ defaultValue: true
57
+ },
23
58
  {
24
59
  key: "samplingMethod",
25
60
  label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__SAMPLING_METHOD__LABEL",
@@ -134,6 +169,7 @@ exports.GPT_PROMPT = (0, createNodeDescriptor_1.createNodeDescriptor)({
134
169
  key: "storeLocation",
135
170
  type: "select",
136
171
  label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STORE_LOCATION__LABEL",
172
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STORE_LOCATION__DESCRIPTION",
137
173
  defaultValue: "input",
138
174
  params: {
139
175
  options: [
@@ -144,6 +180,10 @@ exports.GPT_PROMPT = (0, createNodeDescriptor_1.createNodeDescriptor)({
144
180
  {
145
181
  label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STORE_LOCATION__OPTIONS__CONTEXT__LABEL",
146
182
  value: "context"
183
+ },
184
+ {
185
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STORE_LOCATION__OPTIONS__STREAM__LABEL",
186
+ value: "stream"
147
187
  }
148
188
  ],
149
189
  required: true
@@ -169,6 +209,51 @@ exports.GPT_PROMPT = (0, createNodeDescriptor_1.createNodeDescriptor)({
169
209
  value: "context",
170
210
  }
171
211
  },
212
+ {
213
+ key: "streamStopTokens",
214
+ type: "textArray",
215
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STREAM_STOP_TOKENS__LABEL",
216
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STREAM_STOP_TOKENS__DESCRIPTION",
217
+ defaultValue: [".", "!", "?"],
218
+ condition: {
219
+ key: "storeLocation",
220
+ value: "stream",
221
+ }
222
+ },
223
+ {
224
+ key: "streamDescription",
225
+ type: "description",
226
+ label: " ",
227
+ params: {
228
+ text: "Please note streaming may not be supported by all LLM providers. If streaming is not supported, the result is written to the Input object."
229
+ },
230
+ condition: {
231
+ key: "storeLocation",
232
+ value: "stream",
233
+ }
234
+ },
235
+ {
236
+ key: "debugLogTokenCount",
237
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__DEBUGLOGTOKENCOUNT__LABEL",
238
+ type: "toggle",
239
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__DEBUGLOGTOKENCOUNT__DESCRIPTION",
240
+ defaultValue: false
241
+ },
242
+ {
243
+ key: "debugLogRequestAndCompletion",
244
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__DEBUGLOGREQUESTANDCOMPLETION__LABEL",
245
+ type: "toggle",
246
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__DEBUGLOGREQUESTANDCOMPLETION__DESCRIPTION",
247
+ defaultValue: false
248
+ },
249
+ {
250
+ key: "debugDescription",
251
+ type: "description",
252
+ label: " ",
253
+ params: {
254
+ text: "For performance reasons, debug logging will only be active when testing from Interaction Panel."
255
+ }
256
+ },
172
257
  ],
173
258
  sections: [
174
259
  {
@@ -194,27 +279,59 @@ exports.GPT_PROMPT = (0, createNodeDescriptor_1.createNodeDescriptor)({
194
279
  defaultCollapsed: true,
195
280
  fields: [
196
281
  "storeLocation",
282
+ "streamDescription",
197
283
  "inputKey",
198
284
  "contextKey",
285
+ "streamStopTokens"
286
+ ]
287
+ },
288
+ {
289
+ key: "debugging",
290
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__SECTIONS__DEBUGGING__LABEL",
291
+ defaultCollapsed: true,
292
+ fields: [
293
+ "debugDescription",
294
+ "debugLogTokenCount",
295
+ "debugLogRequestAndCompletion"
199
296
  ]
200
297
  }
201
298
  ],
202
299
  form: [
300
+ { type: "field", key: "llmProviderReferenceId" },
203
301
  { type: "field", key: "prompt" },
302
+ { type: "field", key: "useChatMode" },
303
+ { type: "field", key: "chatTranscriptSteps" },
204
304
  { type: "section", key: "advanced" },
205
305
  { type: "section", key: "storage" },
306
+ { type: "section", key: "debugging" }
206
307
  ],
207
308
  appearance: {},
208
- tags: ["service"],
309
+ tags: ["service", "llm", "gpt", "generative ai", "openai", "azure", "prompt"],
209
310
  function: async ({ cognigy, config }) => {
210
311
  var _a;
211
- const { api } = cognigy;
212
- const { temperature, maxTokens, topP, presencePenalty, frequencyPenalty, useStop, stop, storeLocation, contextKey, inputKey, timeout, } = config;
312
+ const { api, input } = cognigy;
313
+ const { temperature, maxTokens, topP, presencePenalty, frequencyPenalty, useStop, stop, storeLocation, contextKey, inputKey, timeout, streamStopTokens, debugLogTokenCount, debugLogRequestAndCompletion, llmProviderReferenceId, useChatMode, chatTranscriptSteps } = config;
213
314
  let prompt = config.prompt;
214
- const recentConversation = (0, prompt_1.createLastConverationString)(cognigy.lastConversationEntries) + "\n";
215
- const recentUserInputs = (0, prompt_1.createLastUserInputString)(cognigy.lastConversationEntries) + "\n";
216
- prompt = prompt.replace(/@cognigyRecentConversation/, recentConversation);
217
- prompt = prompt.replace(/@cognigyRecentUserInputs/, recentUserInputs);
315
+ // check if custom variables are used and if they have a length modifier
316
+ // works only for a single variable per prompt
317
+ if (prompt.includes("@cognigyRecentConversation")) {
318
+ let turnLimit;
319
+ if (prompt.match(/@cognigyRecentConversation:(\d+)/)) {
320
+ // @cognigyRecentConversation has a length modifier (e.g. @cognigyRecentConversation:5), so we just want to return the top 5 turns
321
+ turnLimit = Number(prompt.match(/@cognigyRecentConversation:(\d+)/)[1]);
322
+ }
323
+ const recentConversation = (0, prompt_1.createLastConverationString)(cognigy.lastConversationEntries, turnLimit) + "\n";
324
+ prompt = prompt.replace(/@cognigyRecentConversation(:\d+)?/, recentConversation);
325
+ }
326
+ if (prompt.includes("@cognigyRecentUserInputs")) {
327
+ let turnLimit;
328
+ if (prompt.match(/@cognigyRecentUserInputs:(\d+)/)) {
329
+ // @cognigyRecentUserInputs has a length modifier (e.g. @cognigyRecentUserInputs:5), so we just want to return the top 5 entries
330
+ turnLimit = Number(prompt.match(/@cognigyRecentUserInputs:(\d+)/)[1]);
331
+ }
332
+ const recentUserInputs = (0, prompt_1.createLastUserInputString)(cognigy.lastConversationEntries, turnLimit) + "\n";
333
+ prompt = prompt.replace(/@cognigyRecentUserInputs(:\d+)?/, recentUserInputs);
334
+ }
218
335
  try {
219
336
  const data = {
220
337
  prompt,
@@ -225,15 +342,34 @@ exports.GPT_PROMPT = (0, createNodeDescriptor_1.createNodeDescriptor)({
225
342
  frequencyPenalty,
226
343
  timeoutInMs: timeout,
227
344
  useCase: "promptNode",
345
+ stream: storeLocation === "stream",
346
+ streamOnDataHandler: (text) => {
347
+ text = text.trim();
348
+ if (text) {
349
+ api.output(text, null);
350
+ }
351
+ },
352
+ streamStopTokens
228
353
  };
229
354
  if (useStop) {
230
355
  data["stop"] = stop;
231
356
  }
357
+ if (llmProviderReferenceId && llmProviderReferenceId !== "default") {
358
+ data["llmProviderReferenceId"] = llmProviderReferenceId;
359
+ }
360
+ let debugPrompt = prompt;
361
+ if (useChatMode && (prompt || chatTranscriptSteps)) {
362
+ data["chat"] = (0, prompt_1.createLastConversationChatObject)(cognigy.lastConversationEntries, prompt, chatTranscriptSteps);
363
+ data.prompt = "";
364
+ debugPrompt = JSON.stringify(data["chat"]);
365
+ }
232
366
  const response = await api.runGenerativeAIPrompt(data, "gptPromptNode");
367
+ // if we're in adminconsole, process debugging options
368
+ input.channel === "adminconsole" && (0, prompt_1.writeLLMDebugLogs)("LLM Prompt", debugPrompt, response, debugLogTokenCount, debugLogRequestAndCompletion, cognigy);
233
369
  if (storeLocation === "context") {
234
370
  api.addToContext(contextKey, response, "simple");
235
371
  }
236
- else {
372
+ else if (storeLocation === "input") {
237
373
  // @ts-ignore
238
374
  api.addToInput(inputKey, response);
239
375
  }
@@ -31,6 +31,18 @@ exports.CHECK_AGENT_AVAILABILITY = (0, createNodeDescriptor_1.createNodeDescript
31
31
  label: "UI__NODE_EDITOR__SERVICE__CHECK_AGENT_AVAILABILITY__FIELDS__LIVEAGENT_INBOX_ID__LABEL",
32
32
  description: "UI__NODE_EDITOR__SERVICE__CHECK_AGENT_AVAILABILITY__FIELDS__LIVEAGENT_INBOX_ID__DESCRIPTION"
33
33
  },
34
+ {
35
+ key: constants_1.COGNIGY_LIVE_AGENT_DESCRIPTOR_FIELDS.LIVE_AGENT_SKILLS,
36
+ type: "cognigyTextArray",
37
+ label: "UI__NODE_EDITOR__SERVICE__HANDOVER_TO_AGENT__FIELDS__LIVE_AGENT_SKILLS__LABEL",
38
+ description: "UI__NODE_EDITOR__SERVICE__HANDOVER_TO_AGENT__FIELDS__LIVE_AGENT_SKILLS__DESCRIPTION",
39
+ },
40
+ {
41
+ key: constants_1.COGNIGY_LIVE_AGENT_DESCRIPTOR_FIELDS.LIVE_AGENT_LANGUAGES,
42
+ type: "cognigyTextArray",
43
+ label: "UI__NODE_EDITOR__SERVICE__HANDOVER_TO_AGENT__FIELDS__LIVE_AGENT_LANGUAGES__LABEL",
44
+ description: "UI__NODE_EDITOR__SERVICE__HANDOVER_TO_AGENT__FIELDS__LIVE_AGENT_LANGUAGES__DESCRIPTION",
45
+ },
34
46
  {
35
47
  key: "genesysCloudSkills",
36
48
  type: "textArray",
@@ -148,7 +160,9 @@ exports.CHECK_AGENT_AVAILABILITY = (0, createNodeDescriptor_1.createNodeDescript
148
160
  label: "UI__NODE_EDITOR__SERVICE__CHECK_AGENT_AVAILABILITY__SECTIONS__LIVEAGENT_SETTINGS__LABEL",
149
161
  defaultCollapsed: true,
150
162
  fields: [
151
- constants_1.COGNIGY_LIVE_AGENT_DESCRIPTOR_FIELDS.LIVE_AGENT_INBOX_ID
163
+ constants_1.COGNIGY_LIVE_AGENT_DESCRIPTOR_FIELDS.LIVE_AGENT_INBOX_ID,
164
+ constants_1.COGNIGY_LIVE_AGENT_DESCRIPTOR_FIELDS.LIVE_AGENT_SKILLS,
165
+ constants_1.COGNIGY_LIVE_AGENT_DESCRIPTOR_FIELDS.LIVE_AGENT_LANGUAGES,
152
166
  ]
153
167
  },
154
168
  {
@@ -77,6 +77,48 @@ exports.HANDOVER_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
77
77
  label: "UI__NODE_EDITOR__SERVICE__HANDOVER_TO_AGENT__FIELDS__LIVE_AGENT_INBOXID__LABEL",
78
78
  description: "UI__NODE_EDITOR__SERVICE__HANDOVER_TO_AGENT__FIELDS__LIVE_AGENT_INBOXID__DESCRIPTION"
79
79
  },
80
+ {
81
+ key: constants_1.COGNIGY_LIVE_AGENT_DESCRIPTOR_FIELDS.LIVE_AGENT_SKILLS,
82
+ type: "cognigyTextArray",
83
+ label: "UI__NODE_EDITOR__SERVICE__HANDOVER_TO_AGENT__FIELDS__LIVE_AGENT_SKILLS__LABEL",
84
+ description: "UI__NODE_EDITOR__SERVICE__HANDOVER_TO_AGENT__FIELDS__LIVE_AGENT_SKILLS__DESCRIPTION",
85
+ },
86
+ {
87
+ key: constants_1.COGNIGY_LIVE_AGENT_DESCRIPTOR_FIELDS.LIVE_AGENT_LANGUAGES,
88
+ type: "cognigyTextArray",
89
+ label: "UI__NODE_EDITOR__SERVICE__HANDOVER_TO_AGENT__FIELDS__LIVE_AGENT_LANGUAGES__LABEL",
90
+ description: "UI__NODE_EDITOR__SERVICE__HANDOVER_TO_AGENT__FIELDS__LIVE_AGENT_LANGUAGES__DESCRIPTION",
91
+ },
92
+ {
93
+ key: constants_1.COGNIGY_LIVE_AGENT_DESCRIPTOR_FIELDS.LIVE_AGENT_PRIORITY,
94
+ type: "select",
95
+ defaultValue: 'none',
96
+ label: "UI__NODE_EDITOR__SERVICE__HANDOVER_TO_AGENT__FIELDS__LIVE_AGENT_PRIORITY__LABEL",
97
+ params: {
98
+ options: [
99
+ {
100
+ label: "UI__NODE_EDITOR__SERVICE__HANDOVER_TO_AGENT__FIELDS__LIVE_AGENT_PRIORITY__OPTIONS__NONE__LABEL",
101
+ value: 'none'
102
+ },
103
+ {
104
+ label: "UI__NODE_EDITOR__SERVICE__HANDOVER_TO_AGENT__FIELDS__LIVE_AGENT_PRIORITY__OPTIONS__URGENT__LABEL",
105
+ value: "urgent"
106
+ },
107
+ {
108
+ label: "UI__NODE_EDITOR__SERVICE__HANDOVER_TO_AGENT__FIELDS__LIVE_AGENT_PRIORITY__OPTIONS__HIGH__LABEL",
109
+ value: "high"
110
+ },
111
+ {
112
+ label: "UI__NODE_EDITOR__SERVICE__HANDOVER_TO_AGENT__FIELDS__LIVE_AGENT_PRIORITY__OPTIONS__MEDIUM__LABEL",
113
+ value: "medium"
114
+ },
115
+ {
116
+ label: "UI__NODE_EDITOR__SERVICE__HANDOVER_TO_AGENT__FIELDS__LIVE_AGENT_PRIORITY__OPTIONS__LOW__LABEL",
117
+ value: "low"
118
+ },
119
+ ],
120
+ }
121
+ },
80
122
  {
81
123
  key: constants_1.COGNIGY_LIVE_AGENT_DESCRIPTOR_FIELDS.AGENT_ASSIST_INIT_MESSAGE,
82
124
  type: "cognigyText",
@@ -219,6 +261,9 @@ exports.HANDOVER_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
219
261
  defaultCollapsed: true,
220
262
  fields: [
221
263
  constants_1.COGNIGY_LIVE_AGENT_DESCRIPTOR_FIELDS.LIVE_AGENT_INBOX_ID,
264
+ constants_1.COGNIGY_LIVE_AGENT_DESCRIPTOR_FIELDS.LIVE_AGENT_SKILLS,
265
+ constants_1.COGNIGY_LIVE_AGENT_DESCRIPTOR_FIELDS.LIVE_AGENT_LANGUAGES,
266
+ constants_1.COGNIGY_LIVE_AGENT_DESCRIPTOR_FIELDS.LIVE_AGENT_PRIORITY,
222
267
  constants_1.COGNIGY_LIVE_AGENT_DESCRIPTOR_FIELDS.AGENT_ASSIST_INIT_MESSAGE,
223
268
  constants_1.COGNIGY_LIVE_AGENT_DESCRIPTOR_FIELDS.ALLOW_AGENT_INJECT,
224
269
  ]
@@ -1,3 +1,3 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- //# sourceMappingURL=twilioInterface.js.map
3
+ //# sourceMappingURL=IBandwidth.js.map
@@ -34,6 +34,20 @@ class BaseMapper {
34
34
  }
35
35
  buildPayload(payload) {
36
36
  switch (this.endpointType) {
37
+ case "bandwidth":
38
+ {
39
+ if (!Array.isArray(payload)) {
40
+ payload = [payload];
41
+ }
42
+ return {
43
+ _bandwidth: {
44
+ json: {
45
+ activities: payload
46
+ }
47
+ }
48
+ };
49
+ }
50
+ ;
37
51
  case "audioCodes":
38
52
  if (!Array.isArray(payload)) {
39
53
  payload = [payload];
@@ -4,6 +4,8 @@ exports.hangUp = void 0;
4
4
  exports.hangUp = {
5
5
  handleInput(endpointType, hangUpReason) {
6
6
  switch (endpointType) {
7
+ case "bandwidth":
8
+ return this.handleBandwidthInput(hangUpReason);
7
9
  case "audioCodes":
8
10
  return this.handleAudioCodesInput(hangUpReason);
9
11
  case "voiceGateway2":
@@ -51,6 +53,30 @@ exports.hangUp = {
51
53
  };
52
54
  }
53
55
  return payload;
56
+ },
57
+ handleBandwidthInput(hangupReason) {
58
+ const payload = {
59
+ _bandwidth: {
60
+ json: {
61
+ activities: [{
62
+ type: "event",
63
+ name: "hangup",
64
+ activityParams: {}
65
+ }]
66
+ }
67
+ }
68
+ };
69
+ if (hangupReason) {
70
+ payload._bandwidth.json.activities[0].activityParams = {
71
+ hangupReason
72
+ };
73
+ }
74
+ else {
75
+ payload._bandwidth.json.activities[0].activityParams = {
76
+ hangupReason: "Hangup the call"
77
+ };
78
+ }
79
+ return payload;
54
80
  }
55
81
  };
56
82
  //# sourceMappingURL=hangup.mapper.js.map
@@ -7,6 +7,10 @@ class MuteSpeechInputMapper extends base_mapper_1.BaseMapper {
7
7
  handleInput(input, api, isGenericNode = false) {
8
8
  try {
9
9
  switch (this.endpointType) {
10
+ case "bandwidth": {
11
+ const output = this.handleBandwidthInput(input, api);
12
+ return this.buildPayload(output);
13
+ }
10
14
  case "audioCodes": {
11
15
  const output = this.handleAudioCodesInput(input, api);
12
16
  return this.buildPayload(output);
@@ -27,11 +31,12 @@ class MuteSpeechInputMapper extends base_mapper_1.BaseMapper {
27
31
  }
28
32
  }
29
33
  handleVGInput(input, api) {
30
- const { muteSpeechInput = false } = input;
34
+ const { muteSpeechInput = false, muteDtmfInput = false } = input;
31
35
  const output = {
32
36
  channelConfig: {
33
37
  mute: {
34
- muteSpeechInput
38
+ muteSpeechInput,
39
+ muteDtmfInput
35
40
  }
36
41
  }
37
42
  };
@@ -47,6 +52,16 @@ class MuteSpeechInputMapper extends base_mapper_1.BaseMapper {
47
52
  }
48
53
  };
49
54
  }
55
+ handleBandwidthInput(input, api) {
56
+ const { muteSpeechInput = false } = input;
57
+ return {
58
+ type: "event",
59
+ name: "config",
60
+ sessionParams: {
61
+ enableSpeechInput: !muteSpeechInput
62
+ }
63
+ };
64
+ }
50
65
  }
51
66
  exports.MuteSpeechInputMapper = MuteSpeechInputMapper;
52
67
  //# sourceMappingURL=muteSpeechInput.mapper.js.map