@tiledesk/tiledesk-tybot-connector 2.0.29 → 2.0.30

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@tiledesk/tiledesk-tybot-connector",
3
- "version": "2.0.29",
3
+ "version": "2.0.30",
4
4
  "description": "Tiledesk Tybot connector",
5
5
  "main": "index.js",
6
6
  "scripts": {
@@ -57,6 +57,7 @@ const { DirReplaceBotV3 } = require('./directives/DirReplaceBotV3');
57
57
  const { DirAiTask, DirAiPrompt } = require('./directives/DirAiPrompt');
58
58
  const { DirWebResponse } = require('./directives/DirWebResponse');
59
59
  const { DirConnectBlock } = require('./directives/DirConnectBlock');
60
+ const { DirAiCondition } = require('./directives/DirAiCondition');
60
61
 
61
62
  const winston = require('../utils/winston');
62
63
  const { DirFlowLog } = require('./directives/DirFlowLog');
@@ -452,8 +453,6 @@ class DirectivesChatbotPlug {
452
453
  else if (directive_name === Directives.WAIT) {
453
454
  new DirWait(context).execute(directive, async () => {
454
455
  let next_dir = await this.nextDirective(this.directives);
455
- const t3e = Date.now();
456
- console.log(`[TIMER] Single wait executed in ${t3e - t3}ms`);
457
456
  this.process(next_dir);
458
457
  });
459
458
  }
@@ -595,6 +594,18 @@ class DirectivesChatbotPlug {
595
594
  }
596
595
  });
597
596
  }
597
+ else if (directive_name === Directives.AI_CONDITION) {
598
+ new DirAiCondition(context).execute(directive, async (stop) => {
599
+ if (stop == true) {
600
+ winston.debug("(DirectivesChatbotPlug) DirAskGPTV2 Stopping Actions on: ", directive);
601
+ this.theend();
602
+ }
603
+ else {
604
+ let next_dir = await this.nextDirective(this.directives);
605
+ this.process(next_dir);
606
+ }
607
+ });
608
+ }
598
609
  else if (directive_name === Directives.WHATSAPP_ATTRIBUTE) {
599
610
  new DirWhatsappByAttribute(context).execute(directive, async (stop) => {
600
611
  let next_dir = await this.nextDirective(this.directives);
@@ -0,0 +1,535 @@
1
+ const axios = require("axios").default;
2
+ const { TiledeskChatbot } = require("../../engine/TiledeskChatbot");
3
+ const { Filler } = require("../Filler");
4
+ let https = require("https");
5
+ const { DirIntent } = require("./DirIntent");
6
+ const { TiledeskChatbotConst } = require("../../engine/TiledeskChatbotConst");
7
+ const { TiledeskChatbotUtil } = require("../../utils/TiledeskChatbotUtil");
8
+ require('dotenv').config();
9
+ const winston = require('../../utils/winston');
10
+ const Utils = require("../../utils/HttpUtils");
11
+ const utils = require("../../utils/HttpUtils");
12
+ const httpUtils = require("../../utils/HttpUtils");
13
+ const integrationService = require("../../services/IntegrationService");
14
+ const { Logger } = require("../../Logger");
15
+ const { randomUUID } = require("crypto");
16
+
17
+
18
+ class DirAiCondition {
19
+
20
+ constructor(context) {
21
+ if (!context) {
22
+ throw new Error('context object is mandatory');
23
+ }
24
+ this.context = context;
25
+ this.chatbot = this.context.chatbot;
26
+ this.tdcache = this.context.tdcache;
27
+ this.requestId = this.context.requestId;
28
+ this.projectId = this.context.projectId;
29
+ this.token = this.context.token;
30
+ this.API_ENDPOINT = this.context.API_ENDPOINT;
31
+
32
+ this.intentDir = new DirIntent(context);
33
+ this.logger = new Logger({ request_id: this.requestId, dev: this.context.supportRequest?.draft, intent_id: this.context.reply?.attributes?.intent_info?.intent_id });
34
+ }
35
+
36
+ execute(directive, callback) {
37
+ winston.verbose("Execute AiAiCondition directive");
38
+ let action;
39
+ if (directive.action) {
40
+ action = directive.action;
41
+ }
42
+ else {
43
+ this.logger.error("Incorrect action for ", directive.name, directive)
44
+ winston.debug("DirAiAiCondition Incorrect directive: ", directive);
45
+ callback();
46
+ return;
47
+ }
48
+ this.go(action, (stop) => {
49
+ this.logger.native("[Ai Condition] Executed");
50
+ callback(stop);
51
+ })
52
+ }
53
+
54
+ async go(action, callback) {
55
+ winston.debug("DirAiCondition action:", action);
56
+ if (!this.tdcache) {
57
+ winston.error("Error: DirAiCondition tdcache is mandatory");
58
+ callback();
59
+ return;
60
+ }
61
+
62
+ let intents = action.intents;
63
+ // intents = [
64
+ // {
65
+ // "label": "26efa629-686e-4a23-a2f8-38c8f5beb408",
66
+ // "prompt": "user asking for medical information",
67
+ // "conditionIntentId": "#9b1c29c1671847dba6db561f771a142e"
68
+ // }
69
+ // ]
70
+ let fallbackIntent = action.fallbackIntent; // non condition met block
71
+ let errorIntent = action.errorIntent; // On error block
72
+ await this.checkMandatoryParameters(action).catch( async (missing_param) => {
73
+ const error = "AiPrompt Error: '" + missing_param + "' attribute is undefined"
74
+ this.logger.error(error);
75
+ await this.chatbot.addParameter("flowError", error);
76
+ if (errorIntent) {
77
+ await this.#executeIntent(errorIntent);
78
+ callback(true);
79
+ return Promise.reject();
80
+ }
81
+ callback();
82
+ return Promise.reject();
83
+ })
84
+
85
+ // fill attributes
86
+ let requestVariables = null;
87
+ requestVariables =
88
+ await TiledeskChatbot.allParametersStatic(
89
+ this.tdcache, this.requestId
90
+ )
91
+ const filler = new Filler();
92
+
93
+ let conditions = "";
94
+ intents.forEach( function(intent) {
95
+ let filled_prompt = filler.fill(intent.prompt, requestVariables);
96
+ conditions += `- label: ${intent.label} when: ${filled_prompt}\n`
97
+ });
98
+
99
+ let instructions = filler.fill(action.instructions, requestVariables);
100
+ let prompt_header = "Reply with the label satisfying the corresponding condition or with “fallback” if all conditions are false.\nIf more than one condition is true, answer with the first label corresponding to the true condition, following the order from top to bottom."
101
+ let condition_prompt = TiledeskChatbotUtil.AiConditionPromptBuilder(prompt_header, intents, instructions)
102
+
103
+ // let raw_condition_prompt = `Reply with the label satisfying the corresponding condition or with “fallback” if all conditions are false.
104
+ // If more than one condition is true, answer with the first label corresponding to the true condition, following the order from top to bottom.
105
+ // ${conditions}
106
+ // ${instructions}`
107
+
108
+ // const filled_question = condition_prompt; //filler.fill(action.question, requestVariables);
109
+ const filled_context = filler.fill(action.context, requestVariables);
110
+
111
+ // evaluate
112
+
113
+ let AI_endpoint = process.env.AI_ENDPOINT;
114
+ winston.verbose("DirAiPrompt AI_endpoint " + AI_endpoint);
115
+
116
+ let headers = {
117
+ 'Content-Type': 'application/json'
118
+ }
119
+
120
+ let answer = "";
121
+ let key;
122
+ let publicKey = false;
123
+ let ollama_integration;
124
+
125
+ if (action.llm === 'ollama') {
126
+ ollama_integration = await integrationService.getIntegration(this.projectId, action.llm, this.token).catch( async (err) => {
127
+ this.logger.error("[AI Condition] Error getting ollama integration.")
128
+ winston.error("DirAiPrompt Error getting ollama integration: ", err);
129
+ await this.chatbot.addParameter("flowError", "Ollama integration not found");
130
+ if (errorIntent) {
131
+ await this.#executeIntent(errorIntent);
132
+ callback(true);
133
+ return;
134
+ }
135
+ callback();
136
+ return;
137
+ });
138
+
139
+ } else {
140
+ key = await integrationService.getKeyFromIntegrations(this.projectId, action.llm, this.token);
141
+
142
+ if (!key && action.llm === "openai") {
143
+ this.logger.native("[AI Condition] OpenAI key not found in Integration. Retrieve shared OpenAI key.")
144
+ key = process.env.GPTKEY;
145
+ publicKey = true;
146
+ }
147
+
148
+ if (!key) {
149
+ this.logger.error("[AI Condition] llm key not found");
150
+ winston.error("Error: DirAiPrompt llm key not found");
151
+ await this.chatbot.addParameter("flowError", "AiPrompt Error: missing key for llm " + action.llm);
152
+ if (errorIntent) {
153
+ await this.#executeIntent(errorIntent);
154
+ callback(true);
155
+ return;
156
+ }
157
+ callback();
158
+ return;
159
+ }
160
+ }
161
+
162
+ if (publicKey === true) {
163
+ try {
164
+ let keep_going = await this.checkQuoteAvailability(this.projectId, this.token)
165
+ if (keep_going === false) {
166
+ this.logger.warn("[AI Condition] OpenAI tokens quota exceeded");
167
+ await this.chatbot.addParameter("flowError", "GPT Error: tokens quota exceeded");
168
+ if (errorIntent) {
169
+ await this.#executeIntent(errorIntent);
170
+ callback();
171
+ return;
172
+ }
173
+ callback();
174
+ return;
175
+ }
176
+ } catch (err) {
177
+ this.logger.error("An error occured on checking token quota availability");
178
+ await this.chatbot.addParameter("flowError", "An error occured on checking token quota availability");
179
+ if (errorIntent) {
180
+ await this.#executeIntent(errorIntent);
181
+ callback();
182
+ return;
183
+ }
184
+ callback();
185
+ return;
186
+ }
187
+ }
188
+
189
+ let json = {
190
+ question: condition_prompt,
191
+ llm: action.llm,
192
+ model: action.model,
193
+ llm_key: key,
194
+ temperature: action.temperature,
195
+ max_tokens: action.max_tokens
196
+ }
197
+
198
+ if (action.context) {
199
+ json.system_context = filled_context;
200
+ }
201
+ // if (transcript) {
202
+ // json.chat_history_dict = await this.transcriptToLLM(transcript);
203
+ // }
204
+
205
+ if (action.llm === 'ollama') {
206
+ json.llm_key = "";
207
+ json.model = {
208
+ name: action.model,
209
+ url: ollama_integration.value.url,
210
+ token: ollama_integration.value.token
211
+ }
212
+ json.stream = false
213
+
214
+ }
215
+
216
+ winston.debug("DirAiPrompt json: ", json);
217
+
218
+ const HTTPREQUEST = {
219
+ url: AI_endpoint + "/ask",
220
+ headers: headers,
221
+ json: json,
222
+ method: 'POST'
223
+ }
224
+ winston.debug("DirAiPrompt HttpRequest: ", HTTPREQUEST);
225
+
226
+ httpUtils.request(
227
+ HTTPREQUEST, async (err, resbody) => {
228
+ if (err) {
229
+ winston.error("DirAiPrompt openai err: ", err);
230
+ await this.#assignAttributes(action, answer);
231
+ let error;
232
+ if (err.response?.data?.detail[0]) {
233
+ error = err.response.data.detail[0]?.msg;
234
+ } else if (err.response?.data?.detail?.answer) {
235
+ error = err.response.data.detail.answer;
236
+ } else {
237
+ error = JSON.stringify(err.response.data);
238
+ }
239
+ this.logger.error("[AI Condition] error executing action: ", error);
240
+ if (errorIntent) {
241
+ await this.chatbot.addParameter("flowError", "[AI Condition] error executing action: condition label not found in intents list");
242
+ await this.#executeIntent(errorIntent);
243
+ callback(true);
244
+ return;
245
+ }
246
+ callback();
247
+ return;
248
+ } else {
249
+
250
+ winston.debug("DirAiPrompt resbody: ", resbody);
251
+ answer = resbody.answer;
252
+ this.logger.native("[AI Condition] answer: ", answer);
253
+
254
+ // if (publicKey === true) {
255
+ // let tokens_usage = {
256
+ // tokens: resbody.usage.total_token,
257
+ // model: json.model
258
+ // }
259
+ // quotasService.updateQuote(this.projectId, this.token, tokens_usage);
260
+ // }
261
+
262
+ await this.#assignAttributes(action, answer);
263
+
264
+ if (answer === "fallback") {
265
+ if (fallbackIntent) {
266
+ await this.#executeIntent(fallbackIntent)
267
+ if (callback) {
268
+ callback(true);
269
+ return;
270
+ }
271
+ }
272
+ }
273
+ else {
274
+ let answer_found = null;
275
+ intents.forEach( i => {
276
+ if (i.label === answer) {
277
+ answer_found = i;
278
+ }
279
+ });
280
+ if (answer_found) {
281
+ await this.#executeIntent(answer_found.conditionIntentId)
282
+ if (callback) {
283
+ callback(true);
284
+ return;
285
+ }
286
+ }
287
+ else { // if (answer === "fallback") {
288
+ if (fallbackIntent) {
289
+ await this.#executeIntent(fallbackIntent)
290
+ if (callback) {
291
+ callback(true);
292
+ return;
293
+ }
294
+ }
295
+ else {
296
+ this.logger.error("[AI Condition] Fallback connector not found");
297
+ }
298
+ }
299
+ }
300
+ this.logger.error("[AI Condition] error executing action: condition label not found in intents list");
301
+ callback();
302
+ return;
303
+ }
304
+ }
305
+ )
306
+ }
307
+
308
+ async checkMandatoryParameters(action) {
309
+ return new Promise((resolve, reject) => {
310
+ let params = ['llm', 'model']; // mandatory params
311
+ params.forEach((p) => {
312
+ if (!action[p]) {
313
+ reject(p)
314
+ }
315
+ })
316
+ resolve(true);
317
+ })
318
+ }
319
+
320
+ /**
321
+ * Transforms the transcirpt array in a dictionary like '0': { "question": "xxx", "answer":"xxx"}
322
+ * merging consecutive messages with the same role in a single question or answer.
323
+ * If the first message was sent from assistant, this will be deleted.
324
+ */
325
+ // async transcriptToLLM(transcript) {
326
+
327
+ // let objectTranscript = {};
328
+
329
+ // if (transcript.length === 0) {
330
+ // return objectTranscript;
331
+ // }
332
+
333
+ // let mergedTranscript = [];
334
+ // let current = transcript[0];
335
+
336
+ // for (let i = 1; i < transcript.length; i++) {
337
+ // if (transcript[i].role === current.role) {
338
+ // current.content += '\n' + transcript[i].content;
339
+ // } else {
340
+ // mergedTranscript.push(current);
341
+ // current = transcript[i]
342
+ // }
343
+ // }
344
+ // mergedTranscript.push(current);
345
+
346
+ // if (mergedTranscript[0].role === 'assistant') {
347
+ // mergedTranscript.splice(0, 1)
348
+ // }
349
+
350
+ // let counter = 0;
351
+ // for (let i = 0; i < mergedTranscript.length - 1; i += 2) {
352
+ // // Check if [i] is role user and [i+1] is role assistant??
353
+ // assert(mergedTranscript[i].role === 'user');
354
+ // assert(mergedTranscript[i+1].role === 'assistant');
355
+
356
+ // if (!mergedTranscript[i].content.startsWith('/')) {
357
+ // objectTranscript[counter] = {
358
+ // question: mergedTranscript[i].content,
359
+ // answer: mergedTranscript[i+1].content
360
+ // }
361
+ // counter++;
362
+ // }
363
+ // }
364
+
365
+ // return objectTranscript;
366
+ // }
367
+
368
+ async #executeCondition(result, trueIntent, trueIntentAttributes, falseIntent, falseIntentAttributes, callback) {
369
+ let trueIntentDirective = null;
370
+ if (trueIntent) {
371
+ trueIntentDirective = DirIntent.intentDirectiveFor(trueIntent, trueIntentAttributes);
372
+ }
373
+ let falseIntentDirective = null;
374
+ if (falseIntent) {
375
+ falseIntentDirective = DirIntent.intentDirectiveFor(falseIntent, falseIntentAttributes);
376
+ }
377
+ if (result === true) {
378
+ if (trueIntentDirective) {
379
+ this.logger.native("[AI Condition] executing true condition");
380
+ this.intentDir.execute(trueIntentDirective, () => {
381
+ if (callback) {
382
+ callback();
383
+ }
384
+ })
385
+ }
386
+ else {
387
+ this.logger.native("[AI Condition] no block connected to true condition");
388
+ winston.debug("DirAiPrompt No trueIntentDirective specified");
389
+ if (callback) {
390
+ callback();
391
+ }
392
+ }
393
+ }
394
+ else {
395
+ if (falseIntentDirective) {
396
+ this.logger.native("[AI Condition] executing false condition");
397
+ this.intentDir.execute(falseIntentDirective, () => {
398
+ if (callback) {
399
+ callback();
400
+ }
401
+ });
402
+ }
403
+ else {
404
+ this.logger.native("[AI Condition] no block connected to false condition");
405
+ winston.debug("DirAiPrompt No falseIntentDirective specified");
406
+ if (callback) {
407
+ callback();
408
+ }
409
+ }
410
+ }
411
+ }
412
+
413
+ async #assignAttributes(action, answer) {
414
+ winston.debug("DirAiPrompt assignAttributes action: ", action)
415
+ winston.debug("DirAiPrompt assignAttributes answer: " + answer)
416
+
417
+ if (this.context.tdcache) {
418
+ if (action.assignReplyTo && answer) {
419
+ await TiledeskChatbot.addParameterStatic(this.context.tdcache, this.context.requestId, action.assignReplyTo, answer);
420
+ }
421
+ }
422
+ }
423
+
424
+ async #executeIntent(destinationIntentId, callback) {
425
+ let intentDirective = null;
426
+ if (destinationIntentId) {
427
+ intentDirective = DirIntent.intentDirectiveFor(destinationIntentId, null);
428
+ }
429
+ if (intentDirective) {
430
+ this.logger.native("[AI Condition] executing destinationIntentId");
431
+ this.intentDir.execute(intentDirective, () => {
432
+ if (callback) {
433
+ callback();
434
+ }
435
+ })
436
+ }
437
+ else {
438
+ this.logger.native("[AI Condition] no block connected to intentId:", destinationIntentId);
439
+ winston.debug("[AI Condition] no block connected to intentId:" + destinationIntentId);
440
+ if (callback) {
441
+ callback();
442
+ }
443
+ }
444
+ }
445
+
446
+ async getKeyFromKbSettings() {
447
+ return new Promise((resolve) => {
448
+
449
+ const KB_HTTPREQUEST = {
450
+ url: this.API_ENDPOINT + "/" + this.context.projectId + "/kbsettings",
451
+ headers: {
452
+ 'Content-Type': 'application/json',
453
+ 'Authorization': 'JWT ' + this.context.token
454
+ },
455
+ method: "GET"
456
+ }
457
+ winston.debug("DirAiPrompt KB HttpRequest", KB_HTTPREQUEST);
458
+
459
+ httpUtils.request(
460
+ KB_HTTPREQUEST, async (err, resbody) => {
461
+ if (err) {
462
+ winston.error("(httprequest) DirAiPrompt Get KnowledgeBase err: " + err.message);
463
+ resolve(null);
464
+ } else {
465
+ if (!resbody.gptkey) {
466
+ resolve(null);
467
+ } else {
468
+ resolve(resbody.gptkey);
469
+ }
470
+ }
471
+ }
472
+ )
473
+ })
474
+ }
475
+
476
+ async checkQuoteAvailability() {
477
+ return new Promise((resolve) => {
478
+
479
+ const HTTPREQUEST = {
480
+ url: this.API_ENDPOINT + "/" + this.context.projectId + "/quotes/tokens",
481
+ headers: {
482
+ 'Content-Type': 'application/json',
483
+ 'Authorization': 'JWT ' + this.context.token
484
+ },
485
+ method: "GET"
486
+ }
487
+ winston.debug("DirAiPrompt check quote availability HttpRequest", HTTPREQUEST);
488
+
489
+ httpUtils.request(
490
+ HTTPREQUEST, async (err, resbody) => {
491
+ if (err) {
492
+ resolve(true)
493
+ } else {
494
+ if (resbody.isAvailable === true) {
495
+ resolve(true)
496
+ } else {
497
+ resolve(false)
498
+ }
499
+ }
500
+ }
501
+ )
502
+ })
503
+ }
504
+
505
+ async updateQuote(tokens_usage) {
506
+ return new Promise((resolve, reject) => {
507
+
508
+ const HTTPREQUEST = {
509
+ url: this.API_ENDPOINT + "/" + this.context.projectId + "/quotes/incr/tokens",
510
+ headers: {
511
+ 'Content-Type': 'application/json',
512
+ 'Authorization': 'JWT ' + this.context.token
513
+ },
514
+ json: tokens_usage,
515
+ method: "POST"
516
+ }
517
+ winston.debug("DirAiPrompt update quote HttpRequest", HTTPREQUEST);
518
+
519
+ httpUtils.request(
520
+ HTTPREQUEST, async (err, resbody) => {
521
+ if (err) {
522
+ winston.error("(httprequest) DirAiPrompt Increment tokens quote err: ", err);
523
+ reject(false)
524
+ } else {
525
+ winston.debug("(httprequest) DirAiPrompt Increment token quote resbody: ", resbody);
526
+ resolve(true);
527
+ }
528
+ }
529
+ )
530
+ })
531
+ }
532
+
533
+ }
534
+
535
+ module.exports = { DirAiCondition }
@@ -13,6 +13,7 @@ const httpUtils = require("../../utils/HttpUtils");
13
13
  const integrationService = require("../../services/IntegrationService");
14
14
  const { Logger } = require("../../Logger");
15
15
  const assert = require("assert");
16
+ const quotasService = require("../../services/QuotasService");
16
17
 
17
18
 
18
19
  class DirAiPrompt {
@@ -117,6 +118,7 @@ class DirAiPrompt {
117
118
  }
118
119
 
119
120
  let key;
121
+ let publicKey = false;
120
122
  let ollama_integration;
121
123
 
122
124
  if (action.llm === 'ollama') {
@@ -135,7 +137,13 @@ class DirAiPrompt {
135
137
 
136
138
  } else {
137
139
  key = await integrationService.getKeyFromIntegrations(this.projectId, action.llm, this.token);
138
-
140
+
141
+ if (!key && action.llm === "openai") {
142
+ this.logger.native("[AI Prompt] OpenAI key not found in Integration. Retrieve shared OpenAI key.")
143
+ key = process.env.GPTKEY;
144
+ publicKey = true;
145
+ }
146
+
139
147
  if (!key) {
140
148
  this.logger.error("[AI Prompt] llm key not found in integrations");
141
149
  winston.error("Error: DirAiPrompt llm key not found in integrations");
@@ -150,6 +158,33 @@ class DirAiPrompt {
150
158
  }
151
159
  }
152
160
 
161
+ if (publicKey === true) {
162
+ try {
163
+ let keep_going = await quotasService.checkQuoteAvailability(this.projectId, this.token)
164
+ if (keep_going === false) {
165
+ this.logger.warn("[AI Prompt] OpenAI tokens quota exceeded");
166
+ await this.chatbot.addParameter("flowError", "GPT Error: tokens quota exceeded");
167
+ if (falseIntent) {
168
+ await this.#executeCondition(false, trueIntent, trueIntentAttributes, falseIntent, falseIntentAttributes);
169
+ callback();
170
+ return;
171
+ }
172
+ callback();
173
+ return;
174
+ }
175
+ } catch (err) {
176
+ this.logger.error("An error occured on checking token quota availability");
177
+ await this.chatbot.addParameter("flowError", "An error occured on checking token quota availability");
178
+ if (falseIntent) {
179
+ await this.#executeCondition(false, trueIntent, trueIntentAttributes, falseIntent, falseIntentAttributes);
180
+ callback();
181
+ return;
182
+ }
183
+ callback();
184
+ return;
185
+ }
186
+ }
187
+
153
188
  let json = {
154
189
  question: filled_question,
155
190
  llm: action.llm,
@@ -214,6 +249,14 @@ class DirAiPrompt {
214
249
  winston.debug("DirAiPrompt resbody: ", resbody);
215
250
  answer = resbody.answer;
216
251
  this.logger.native("[AI Prompt] answer: ", answer);
252
+
253
+ if (publicKey === true) {
254
+ let tokens_usage = {
255
+ tokens: resbody.usage.total_token,
256
+ model: json.model
257
+ }
258
+ quotasService.updateQuote(this.projectId, this.token, tokens_usage);
259
+ }
217
260
 
218
261
  await this.#assignAttributes(action, answer);
219
262
 
@@ -97,7 +97,10 @@ class DirAskGPTV2 {
97
97
  "gpt-4o-mini": "You are an helpful assistant for question-answering tasks. Follow these steps carefully:\n1. Answer in the same language of the user question, regardless of the retrieved context language\n2. Use ONLY the pieces of the retrieved context to answer the question.\n3. If the retrieved context does not contain sufficient information to generate an accurate and informative answer, return <NOANS>\n\n==Retrieved context start==\n{context}\n==Retrieved context end==",
98
98
  "gpt-4.1": "You are an helpful assistant for question-answering tasks. Follow these steps carefully:\n1. Answer in the same language of the user question, regardless of the retrieved context language\n2. Use ONLY the pieces of the retrieved context to answer the question.\n3. If the retrieved context does not contain sufficient information to generate an accurate and informative answer, append <NOANS> at the end of the answer\n\n==Retrieved context start==\n{context}\n==Retrieved context end==",
99
99
  "gpt-4.1-mini": "You are an helpful assistant for question-answering tasks. Follow these steps carefully:\n1. Answer in the same language of the user question, regardless of the retrieved context language\n2. Use ONLY the pieces of the retrieved context to answer the question.\n3. If the retrieved context does not contain sufficient information to generate an accurate and informative answer, append <NOANS> at the end of the answer\n\n==Retrieved context start==\n{context}\n==Retrieved context end==",
100
- "gpt-4.1-nano": "You are an helpful assistant for question-answering tasks. Follow these steps carefully:\n1. Answer in the same language of the user question, regardless of the retrieved context language\n2. Use ONLY the pieces of the retrieved context to answer the question.\n3. If the retrieved context does not contain sufficient information to generate an accurate and informative answer, append <NOANS> at the end of the answer\n\n==Retrieved context start==\n{context}\n==Retrieved context end=="
100
+ "gpt-4.1-nano": "You are an helpful assistant for question-answering tasks. Follow these steps carefully:\n1. Answer in the same language of the user question, regardless of the retrieved context language\n2. Use ONLY the pieces of the retrieved context to answer the question.\n3. If the retrieved context does not contain sufficient information to generate an accurate and informative answer, append <NOANS> at the end of the answer\n\n==Retrieved context start==\n{context}\n==Retrieved context end==",
101
+ "gpt-5": "You are an helpful assistant for question-answering tasks. Follow these steps carefully:\n1. Answer in the same language of the user question, regardless of the retrieved context language\n2. Use ONLY the pieces of the retrieved context to answer the question.\n3. If the retrieved context does not contain sufficient information to generate an accurate and informative answer, append <NOANS> at the end of the answer\n\n==Retrieved context start==\n{context}\n==Retrieved context end==",
102
+ "gpt-5-mini": "You are an helpful assistant for question-answering tasks. Follow these steps carefully:\n1. Answer in the same language of the user question, regardless of the retrieved context language\n2. Use ONLY the pieces of the retrieved context to answer the question.\n3. If the retrieved context does not contain sufficient information to generate an accurate and informative answer, append <NOANS> at the end of the answer\n\n==Retrieved context start==\n{context}\n==Retrieved context end==",
103
+ "gpt-5-nano": "You are an helpful assistant for question-answering tasks. Follow these steps carefully:\n1. Answer in the same language of the user question, regardless of the retrieved context language\n2. Use ONLY the pieces of the retrieved context to answer the question.\n3. If the retrieved context does not contain sufficient information to generate an accurate and informative answer, append <NOANS> at the end of the answer\n\n==Retrieved context start==\n{context}\n==Retrieved context end=="
101
104
  }
102
105
 
103
106
  let source = null;
@@ -37,6 +37,7 @@ class Directives {
37
37
  static ASK_GPT_V2 = "askgptv2";
38
38
  static GPT_TASK = "gpt_task";
39
39
  static AI_PROMPT = "ai_prompt";
40
+ static AI_CONDITION = "ai_condition";
40
41
  /**** INTEGRATIONS ****/
41
42
  static QAPLA = 'qapla';
42
43
  static MAKE = 'make';