@tiledesk/tiledesk-tybot-connector 0.2.93-rc1 → 0.2.94

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -5,12 +5,14 @@
5
5
  available on:
6
6
  ▶️ https://www.npmjs.com/package/@tiledesk/tiledesk-tybot-connector
7
7
 
8
+ # v0.2.94
9
+ - Added support for chat history for AskGPTv2 action
8
10
 
9
- # v0.2.93-rc1
10
- - Set specific system_context for each AI smodels
11
+ # v0.2.93
12
+ - Added model contexts
11
13
 
12
14
  # v0.2.92
13
- - Improves GPTTask action: added support for history
15
+ - Improved GPTTask Action with support for history
14
16
 
15
17
  # v0.2.91
16
18
  - Added voice flow attributes: dnis, callId, ani
@@ -442,7 +442,7 @@ class TiledeskChatbotUtil {
442
442
  }
443
443
  }
444
444
 
445
- static async transcriptJSON(transcript) {
445
+ static transcriptJSON(transcript) {
446
446
  const regexp = /(<.*>)/gm;
447
447
  const parts = transcript.split(regexp);
448
448
  // console.log("parts:", parts);
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@tiledesk/tiledesk-tybot-connector",
3
- "version": "0.2.93-rc1",
3
+ "version": "0.2.94",
4
4
  "description": "Tiledesk Tybot connector",
5
5
  "main": "index.js",
6
6
  "scripts": {
@@ -3,6 +3,9 @@ const { TiledeskChatbot } = require('../../models/TiledeskChatbot');
3
3
  const { Filler } = require('../Filler');
4
4
  let https = require("https");
5
5
  const { DirIntent } = require("./DirIntent");
6
+ const { TiledeskChatbotConst } = require("../../models/TiledeskChatbotConst");
7
+ const { TiledeskChatbotUtil } = require("../../models/TiledeskChatbotUtil");
8
+ const assert = require("assert");
6
9
  require('dotenv').config();
7
10
 
8
11
  class DirAskGPTV2 {
@@ -62,15 +65,16 @@ class DirAskGPTV2 {
62
65
  let temperature;
63
66
  let max_tokens;
64
67
  let top_k;
68
+ let transcript;
65
69
  //let default_context = "You are an helpful assistant for question-answering tasks.\nUse ONLY the following pieces of retrieved context to answer the question.\nIf you don't know the answer, just say that you don't know.\nIf none of the retrieved context answer the question, add this word to the end <NOANS>\n\n{context}";
66
70
 
67
71
  let contexts = {
68
- "gpt-3.5-turbo": "You are an helpful assistant for question-answering tasks.\nUse ONLY the pieces of retrieved context delimited by #### to answer the question.\nIf you don't know the answer, just say that you don't know.\nIf and only if none of the retrieved context is useful for your task, add this word to the end <NOANS>\n\n####{context}####",
72
+ "gpt-3.5-turbo": "You are an helpful assistant for question-answering tasks.\nUse ONLY the pieces of retrieved context delimited by #### to answer the question.\nIf you don't know the answer, just say: \"I don't know<NOANS>\"\n\n####{context}####",
69
73
  "gpt-4": "You are an helpful assistant for question-answering tasks.\nUse ONLY the pieces of retrieved context delimited by #### to answer the question.\nIf you don't know the answer, just say that you don't know.\nIf and only if none of the retrieved context is useful for your task, add this word to the end <NOANS>\n\n####{context}####",
70
- "gpt-4-turbo-preview": "You are an helpful assistant for question-answering tasks.\nUse ONLY the pieces of retrieved context delimited by #### to answer the question.\nIf you don't know the answer, just say that you don't know.\nIf the context does not contain sufficient information to generate an accurate and informative answer, return <NOANS>\n\n####{context}####",
74
+ "gpt-4-turbo-preview": "You are an helpful assistant for question-answering tasks.\nUse ONLY the pieces of retrieved context delimited by #### to answer the question.\nIf you don't know the answer, just say that you don't know.\nIf and only if none of the retrieved context is useful for your task, add this word to the end <NOANS>\n\n####{context}####",
71
75
  "gpt-4o": "You are an helpful assistant for question-answering tasks.\nUse ONLY the pieces of retrieved context delimited by #### to answer the question.\nIf you don't know the answer, just say that you don't know.\nIf the context does not contain sufficient information to generate an accurate and informative answer, return <NOANS>\n\n####{context}####",
72
76
  "gpt-4o-mini": "You are an helpful assistant for question-answering tasks.\nUse ONLY the pieces of retrieved context delimited by #### to answer the question.\nIf you don't know the answer, just say that you don't know.\nIf the context does not contain sufficient information to generate an accurate and informative answer, return <NOANS>\n\n####{context}####"
73
- }
77
+ }
74
78
 
75
79
  let source = null;
76
80
 
@@ -113,6 +117,18 @@ class DirAskGPTV2 {
113
117
  const filled_question = filler.fill(action.question, requestVariables);
114
118
  const filled_context = filler.fill(action.context, requestVariables)
115
119
 
120
+ if (action.history) {
121
+ let transcript_string = await TiledeskChatbot.getParameterStatic(
122
+ this.context.tdcache,
123
+ this.context.requestId,
124
+ TiledeskChatbotConst.REQ_TRANSCRIPT_KEY
125
+ )
126
+ if (this.log) { console.log("DirAskGPT transcript string: ", transcript_string) }
127
+
128
+ transcript = await TiledeskChatbotUtil.transcriptJSON(transcript_string);
129
+ if (this.log) { console.log("DirAskGPT transcript ", transcript) }
130
+ }
131
+
116
132
  const server_base_url = process.env.API_ENDPOINT || process.env.API_URL;
117
133
  const kb_endpoint = process.env.KB_ENDPOINT_QA
118
134
 
@@ -181,8 +197,12 @@ class DirAskGPTV2 {
181
197
  json.system_context = filled_context;
182
198
  }
183
199
 
184
- if (this.log) { console.log("DirAskGPT json:", json); }
200
+ if (transcript) {
201
+ json.chat_history_dict = await this.transcriptToLLM(transcript);
202
+ }
185
203
 
204
+ if (this.log) { console.log("DirAskGPT json:", json); }
205
+
186
206
  const HTTPREQUEST = {
187
207
  // url: server_base_url + "/" + this.context.projectId + "/kb/qa",
188
208
  url: kb_endpoint + "/qa",
@@ -478,6 +498,54 @@ class DirAskGPTV2 {
478
498
  })
479
499
  }
480
500
 
501
+ /**
502
+ * Transforms the transcirpt array in a dictionary like '0': { "question": "xxx", "answer":"xxx"}
503
+ * merging consecutive messages with the same role in a single question or answer.
504
+ * If the first message was sent from assistant, this will be deleted.
505
+ */
506
+ async transcriptToLLM(transcript) {
507
+
508
+ let objectTranscript = {};
509
+
510
+ if (transcript.length === 0) {
511
+ return objectTranscript;
512
+ }
513
+
514
+ let mergedTranscript = [];
515
+ let current = transcript[0];
516
+
517
+ for (let i = 1; i < transcript.length; i++) {
518
+ if (transcript[i].role === current.role) {
519
+ current.content += '\n' + transcript[i].content;
520
+ } else {
521
+ mergedTranscript.push(current);
522
+ current = transcript[i]
523
+ }
524
+ }
525
+ mergedTranscript.push(current);
526
+
527
+ if (mergedTranscript[0].role === 'assistant') {
528
+ mergedTranscript.splice(0, 1)
529
+ }
530
+
531
+ let counter = 0;
532
+ for (let i = 0; i < mergedTranscript.length - 1; i += 2) {
533
+ // Check if [i] is role user and [i+1] is role assistant??
534
+ assert(mergedTranscript[i].role === 'user');
535
+ assert(mergedTranscript[i+1].role === 'assistant');
536
+
537
+ if (!mergedTranscript[i].content.startsWith('/')) {
538
+ objectTranscript[counter] = {
539
+ question: mergedTranscript[i].content,
540
+ answer: mergedTranscript[i+1].content
541
+ }
542
+ counter++;
543
+ }
544
+ }
545
+
546
+ return objectTranscript;
547
+ }
548
+
481
549
 
482
550
  }
483
551