strapi-plugin-faqchatbot 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,902 @@
1
+ import OpenAI from "openai";
2
+ const bootstrap = ({ strapi }) => {
3
+ const UID = "plugin::faqchatbot.faqqa";
4
+ const updateEmbedding = async (params, existingEntry) => {
5
+ const { data } = params;
6
+ const question = data.question ?? existingEntry?.question;
7
+ const answer = data.answer ?? existingEntry?.answer;
8
+ if (!question || !answer) return;
9
+ const textToEmbed = `Q: ${question}
10
+ A: ${answer}`;
11
+ const embedding = await strapi.plugin("faqchatbot").service("embed").generateEmbedding(textToEmbed);
12
+ if (embedding) {
13
+ data.embedding = embedding;
14
+ }
15
+ };
16
+ strapi.db.lifecycles.subscribe({
17
+ models: [UID],
18
+ async beforeCreate(event) {
19
+ await updateEmbedding(event.params);
20
+ },
21
+ async beforeUpdate(event) {
22
+ const { where } = event.params;
23
+ const existingEntry = await strapi.db.query(UID).findOne({ where });
24
+ await updateEmbedding(event.params, existingEntry);
25
+ }
26
+ });
27
+ };
28
+ const destroy = ({ strapi }) => {
29
+ };
30
+ const register = ({ strapi }) => {
31
+ };
32
+ const config$2 = {
33
+ default: {},
34
+ validator() {
35
+ }
36
+ };
37
+ const kind = "collectionType";
38
+ const collectionName = "chatbot_config_faqqas";
39
+ const info = {
40
+ singularName: "faqqa",
41
+ pluralName: "faqqas",
42
+ displayName: "Chatbot-FAQ"
43
+ };
44
+ const options = {
45
+ draftAndPublish: true
46
+ };
47
+ const attributes = {
48
+ question: {
49
+ type: "string",
50
+ required: true
51
+ },
52
+ answer: {
53
+ type: "richtext",
54
+ required: true
55
+ },
56
+ embedding: {
57
+ type: "json"
58
+ }
59
+ };
60
+ const schema = {
61
+ kind,
62
+ collectionName,
63
+ info,
64
+ options,
65
+ attributes
66
+ };
67
+ const faqqa = { schema };
68
+ const contentTypes = {
69
+ faqqa
70
+ };
71
+ const controller = ({ strapi }) => ({
72
+ index(ctx) {
73
+ ctx.body = strapi.plugin("faqchatbot").service("service").getWelcomeMessage();
74
+ }
75
+ });
76
+ const config$1 = ({ strapi }) => ({
77
+ async index(ctx) {
78
+ const settings = await strapi.plugin("faqchatbot").service("config").getConfig();
79
+ const contentTypes2 = Object.values(strapi.contentTypes).filter((ct) => ct.uid.startsWith("api::")).map((ct) => ({
80
+ uid: ct.uid,
81
+ displayName: ct.info.displayName,
82
+ attributes: Object.keys(ct.attributes).map((attr) => ({
83
+ name: attr
84
+ }))
85
+ }));
86
+ ctx.body = {
87
+ settings,
88
+ contentTypes: contentTypes2
89
+ };
90
+ },
91
+ async update(ctx) {
92
+ const settings = ctx.request.body;
93
+ const data = await strapi.plugin("faqchatbot").service("config").setConfig(settings);
94
+ ctx.body = data;
95
+ }
96
+ });
97
+ const openai$1 = new OpenAI({
98
+ apiKey: process.env.OPENAI_API_KEY
99
+ });
100
+ async function getContactLink(strapi) {
101
+ const pluginStore = strapi.store({
102
+ environment: null,
103
+ type: "plugin",
104
+ name: "faqchatbot"
105
+ });
106
+ const settings = await pluginStore.get({ key: "settings" });
107
+ return settings?.contactLink || null;
108
+ }
109
+ async function getInstructions(strapi) {
110
+ const pluginStore = strapi.store({
111
+ environment: null,
112
+ type: "plugin",
113
+ name: "faqchatbot"
114
+ });
115
+ const settings = await pluginStore.get({ key: "settings" });
116
+ return {
117
+ system: settings?.systemInstructions || "",
118
+ response: settings?.responseInstructions || ""
119
+ };
120
+ }
121
+ async function getActiveCollections(strapi) {
122
+ try {
123
+ const pluginStore = strapi.store({
124
+ environment: null,
125
+ type: "plugin",
126
+ name: "faqchatbot"
127
+ });
128
+ const settings = await pluginStore.get({ key: "collections" });
129
+ if (!settings) return [];
130
+ const activeList = [];
131
+ for (const item of settings) {
132
+ const ignored = ["faqitem", "item"];
133
+ const name = item.name.toLowerCase();
134
+ const hasEnabledFields = item.fields?.some((f) => f.enabled);
135
+ if (!hasEnabledFields || ignored.includes(name)) {
136
+ continue;
137
+ }
138
+ const uid = `api::${item.name}.${item.name}`;
139
+ const contentType = strapi.contentTypes[uid];
140
+ if (!contentType) {
141
+ console.warn(` [WARNING] Content type not found for UID: ${uid}`);
142
+ continue;
143
+ }
144
+ const fields = Object.keys(contentType.attributes).filter((key) => {
145
+ const attr = contentType.attributes[key];
146
+ return [
147
+ "string",
148
+ "text",
149
+ "email",
150
+ "uid",
151
+ "richtext",
152
+ "enumeration",
153
+ "integer",
154
+ "biginteger",
155
+ "decimal",
156
+ "float",
157
+ "date",
158
+ "datetime",
159
+ "time",
160
+ "relation"
161
+ ].includes(attr.type);
162
+ });
163
+ activeList.push({ name: item.name, fields });
164
+ }
165
+ return activeList;
166
+ } catch (err) {
167
+ console.error(" [ERROR] Error loading active collections:", err);
168
+ return [];
169
+ }
170
+ }
171
+ async function rephraseQuestion(history, question) {
172
+ if (!history || !Array.isArray(history) || history.length === 0) {
173
+ return question;
174
+ }
175
+ try {
176
+ const response = await openai$1.chat.completions.create({
177
+ model: "gpt-4o-mini",
178
+ temperature: 0,
179
+ messages: [
180
+ {
181
+ role: "system",
182
+ content: `You are a Search Query Optimizer.
183
+ Your task is to determine if the user's new message is a **Follow-up** or a **New Topic** and if a follow-up just rewrite the question .
184
+ Do NOT return any explanations, only the optimized search string.
185
+
186
+ ### RULES
187
+ 1. **Dependency Check (The "Pronoun" Rule):**
188
+ - ONLY combine with history if the new question contains **Pronouns** ("it", "that", "they") or is **Grammatically Incomplete** ("How much?", "Where do I buy?", "Is it refundable?").
189
+
190
+ 2. **Independence Check (The "Specifics" Rule):**
191
+ - If the user asks a complete question containing a **New Specific Noun** or **Scenario** (e.g., "Group of 7 people", "Booking for pets"), treat it as a **Standalone Query**.
192
+ - **Do NOT** attach the previous topic to it.
193
+ - *Example:* History="Commuter Pass", Input="Can I book for a group of 7?" -> Output="Group booking for 7 people" (Correct).
194
+ - *Bad Output:* "Group booking for Commuter Pass" (Incorrect).
195
+
196
+ 3. **Output:**
197
+ - Return ONLY the optimized search string.`
198
+ },
199
+ ...history.slice(-4),
200
+ { role: "user", content: question }
201
+ ]
202
+ });
203
+ const rewritten = response.choices[0].message.content?.trim();
204
+ if (!rewritten) return question;
205
+ const lower = rewritten.toLowerCase();
206
+ if (lower.includes("unavailable") || lower.includes("sorry") || lower.includes("i am") || lower.includes("cannot") || rewritten.length > 120) {
207
+ return question;
208
+ }
209
+ return rewritten;
210
+ } catch (err) {
211
+ console.error("Error in rephraseQuestion:", err);
212
+ return question;
213
+ }
214
+ }
215
+ function sanitizeFilters(filters) {
216
+ if (!filters || typeof filters !== "object") return filters;
217
+ if (Array.isArray(filters)) {
218
+ return filters.map(sanitizeFilters);
219
+ }
220
+ const operators = [
221
+ "eq",
222
+ "ne",
223
+ "lt",
224
+ "gt",
225
+ "lte",
226
+ "gte",
227
+ "in",
228
+ "notIn",
229
+ "contains",
230
+ "notContains",
231
+ "containsi",
232
+ "notContainsi",
233
+ "null",
234
+ "notNull",
235
+ "between",
236
+ "startsWith",
237
+ "endsWith",
238
+ "or",
239
+ "and",
240
+ "not"
241
+ ];
242
+ const newFilters = {};
243
+ for (const key in filters) {
244
+ let newKey = key;
245
+ if (operators.includes(key) && !key.startsWith("$")) {
246
+ newKey = `$${key}`;
247
+ }
248
+ newFilters[newKey] = sanitizeFilters(filters[key]);
249
+ }
250
+ return newFilters;
251
+ }
252
+ function updateJsonContext(prevContext, question) {
253
+ const MAX_HISTORY = 10;
254
+ const ctx = { ...prevContext || {} };
255
+ ctx.history = Array.isArray(ctx.history) ? ctx.history : [];
256
+ ctx.history.push(question);
257
+ if (ctx.history.length > MAX_HISTORY) ctx.history.shift();
258
+ const words = question.toLowerCase().replace(/[^\w\s]/g, "").split(" ").filter((w) => w.length > 3);
259
+ ctx.keywords = [.../* @__PURE__ */ new Set([...ctx.keywords || [], ...words])];
260
+ ctx.lastQuestion = question;
261
+ return ctx;
262
+ }
263
+ async function searchRealtime(strapi, plan, activeCollections) {
264
+ if (!plan || !plan.collection) {
265
+ return null;
266
+ }
267
+ const sanitizedFilters = sanitizeFilters(plan.filters || {});
268
+ const config2 = activeCollections.find((c) => c.name === plan.collection);
269
+ if (!config2) {
270
+ return null;
271
+ }
272
+ const uid = `api::${plan.collection}.${plan.collection}`;
273
+ try {
274
+ if (plan.operation === "count") {
275
+ const count = await strapi.entityService.count(uid, {
276
+ filters: sanitizedFilters
277
+ });
278
+ return {
279
+ type: "count",
280
+ collection: plan.collection,
281
+ value: count
282
+ };
283
+ }
284
+ const result = await strapi.entityService.findMany(uid, {
285
+ filters: sanitizedFilters,
286
+ sort: plan.sort,
287
+ limit: 10
288
+ });
289
+ const cleaned = result.map((row) => {
290
+ const clean = {};
291
+ for (const f of config2.fields) clean[f] = row[f];
292
+ return clean;
293
+ });
294
+ return {
295
+ type: "list",
296
+ collection: plan.collection,
297
+ schema: config2.fields,
298
+ items: cleaned
299
+ };
300
+ } catch (err) {
301
+ console.error("Realtime search error:", err);
302
+ return null;
303
+ }
304
+ }
305
+ function cosineSimilarity(a, b) {
306
+ if (!a || !b || a.length !== b.length) return 0;
307
+ let dot = 0;
308
+ let normA = 0;
309
+ let normB = 0;
310
+ for (let i = 0; i < a.length; i++) {
311
+ dot += a[i] * b[i];
312
+ normA += a[i] * a[i];
313
+ normB += b[i] * b[i];
314
+ }
315
+ return dot / (Math.sqrt(normA) * Math.sqrt(normB));
316
+ }
317
+ async function searchFAQ(question, strapi) {
318
+ const embedding = await openai$1.embeddings.create({
319
+ model: "text-embedding-3-small",
320
+ input: question
321
+ });
322
+ let queryVector = embedding.data[0].embedding;
323
+ if (!queryVector || !queryVector.length) {
324
+ return [];
325
+ }
326
+ const faqs = await strapi.db.connection("chatbot_config_faqqas").select("answer", "embedding").whereNotNull("embedding").whereNotNull("published_at");
327
+ if (!faqs.length) return [];
328
+ const scored = faqs.map((f) => {
329
+ let dbVector = f.embedding;
330
+ try {
331
+ if (typeof dbVector === "string") {
332
+ dbVector = JSON.parse(dbVector);
333
+ }
334
+ dbVector = Array.isArray(dbVector) ? dbVector.map((n) => Number(n)) : [];
335
+ if (!Array.isArray(dbVector) || dbVector.length !== queryVector.length) {
336
+ return { answer: f.answer, similarity: 0 };
337
+ }
338
+ return {
339
+ answer: f.answer,
340
+ similarity: cosineSimilarity(queryVector, dbVector)
341
+ };
342
+ } catch (err) {
343
+ return { answer: f.answer, similarity: 0 };
344
+ }
345
+ });
346
+ scored.sort((a, b) => b.similarity - a.similarity);
347
+ if (!scored.length || scored[0].similarity < 0.4) {
348
+ return [];
349
+ }
350
+ return scored.slice(0, 3).map((s) => s.answer);
351
+ }
352
+ async function simplePlanner(question, activeCollections, instructions) {
353
+ const response = await openai$1.chat.completions.create({
354
+ model: "gpt-4o-mini",
355
+ temperature: 0,
356
+ messages: [
357
+ {
358
+ role: "system",
359
+ content: `
360
+ ${instructions.system || ""}
361
+ You are a STRICT database query planner that converts user questions into Strapi query JSON.
362
+
363
+ --------------------------------
364
+ CORE TASK
365
+ --------------------------------
366
+ Return ONLY valid JSON. No text. No explanation.
367
+
368
+ --------------------------------
369
+ COLLECTION SELECTION
370
+ --------------------------------
371
+ - Choose the most relevant collection from the available list.
372
+ - Never invent collection names.
373
+
374
+ --------------------------------
375
+ FIELD RULES
376
+ --------------------------------
377
+ - Only use fields that exist in the selected collection schema.
378
+ - Never hallucinate fields.
379
+
380
+
381
+ --------------------------------
382
+ LOCATION NORMALIZATION (CRITICAL)
383
+ --------------------------------
384
+ The database stores locations in the format:
385
+ City Name (AIRPORT_CODE)
386
+
387
+ Before generating filters, you MUST normalize
388
+ all user-provided places into the nearest
389
+ major city or airport name.
390
+
391
+ RULES:
392
+
393
+ 1. SMALL TOWNS / VILLAGES
394
+ - Convert to nearest major airport city.
395
+ Example:
396
+ "Kalveerampalayam" → "Coimbatore"
397
+ "Kollam" → "Trivandrum"
398
+ "Alappuzha" → "Kochi"
399
+
400
+ 2. OLD OR LOCAL NAMES
401
+ - Convert to modern official city name.
402
+ Example:
403
+ "Madras" → "Chennai"
404
+ "Cochin" → "Kochi"
405
+ "Bombay" → "Mumbai"
406
+
407
+ 3. SUBURBS / DISTRICTS
408
+ - Convert to main metro city.
409
+ Example:
410
+ "Brooklyn" → "New York"
411
+ "Noida" → "Delhi"
412
+
413
+ 4. AIRPORT CODES
414
+ - If user provides code (COK, MAA, JFK),
415
+ search using containsi for that code.
416
+
417
+ Example:
418
+ User: "flight from COK"
419
+ Filter:
420
+ { "origin": { "containsi": "COK" } }
421
+
422
+ 5. ALWAYS MATCH DATABASE STRINGS
423
+ - Use containsi
424
+ - Never use raw spelling if DB format differs
425
+ - Prefer airport code if available
426
+
427
+ --------------------------------
428
+ TEXT FILTER RULES (VERY IMPORTANT)
429
+ --------------------------------
430
+ - For city names, titles, destinations, names → ALWAYS use "containsi"
431
+ - NEVER use "eq" for text
432
+ - NEVER use "in" for text arrays
433
+ - For multiple text values use "$or" with containsi
434
+
435
+ Example:
436
+ User: "flight to paris or amsterdam"
437
+ Filters:
438
+ {
439
+ "$or": [
440
+ { "destination": { "containsi": "paris" } },
441
+ { "destination": { "containsi": "amsterdam" } }
442
+ ]
443
+ }
444
+
445
+ --------------------------------
446
+ NUMBER FILTER RULES
447
+ --------------------------------
448
+ - For price, fare, amount → use lt, lte, gt, gte, between
449
+ - "under" → lte
450
+ - "above" → gte
451
+ - "between" → between
452
+
453
+ --------------------------------
454
+ OPERATION RULES
455
+ --------------------------------
456
+ - "how many", "count" → operation = "count"
457
+ - otherwise → operation = "list"
458
+
459
+ --------------------------------
460
+ SORT RULES
461
+ --------------------------------
462
+ - "cheapest", "lowest" → sort ["fare:asc"]
463
+ - "highest", "expensive" → sort ["fare:desc"]
464
+ - Only add sort if user implies ranking
465
+
466
+ --------------------------------
467
+ INTENT CLASSIFICATION (CRITICAL)
468
+ --------------------------------
469
+ First decide intent:
470
+
471
+ INTENT = "realtime"
472
+ - User asks about availability, price, list, count, search, show items
473
+ - Mentions data stored in collections
474
+
475
+ INTENT = "faq"
476
+ - User asks "who is", "what is", "explain", "details about"
477
+ - General knowledge
478
+ - No clear database entity
479
+
480
+ If no clear database match → ALWAYS choose "faq"
481
+ NEVER force a collection.
482
+
483
+ OUTPUT FORMAT
484
+
485
+ Return ONLY JSON.
486
+
487
+ If no database match exists, return:
488
+
489
+ {
490
+ "collection": null
491
+ }
492
+
493
+ Otherwise return:
494
+
495
+ {
496
+ "collection": "name",
497
+ "operation": "list" | "count",
498
+ "filters": {},
499
+ "sort": []
500
+ }
501
+
502
+
503
+ --------------------------------
504
+ AVAILABLE COLLECTIONS
505
+ --------------------------------
506
+ ${JSON.stringify(activeCollections, null, 2)}
507
+ `
508
+ },
509
+ {
510
+ role: "user",
511
+ content: question
512
+ }
513
+ ]
514
+ });
515
+ try {
516
+ const raw = response.choices[0].message.content || "{}";
517
+ const cleaned = raw.replace(/```json/g, "").replace(/```/g, "").trim();
518
+ const plan = JSON.parse(cleaned);
519
+ return plan;
520
+ } catch (err) {
521
+ return null;
522
+ }
523
+ }
524
+ async function realtimeInterpreterAI(question, realtimeData) {
525
+ if (!realtimeData) return null;
526
+ const response = await openai$1.chat.completions.create({
527
+ model: "gpt-4o-mini",
528
+ temperature: 0.2,
529
+ messages: [
530
+ {
531
+ role: "system",
532
+ content: `
533
+ You are a realtime data interpreter.
534
+
535
+ Convert database JSON into a SHORT natural language summary.
536
+
537
+ Rules:
538
+ - Do NOT output JSON
539
+ - Do NOT hallucinate
540
+ - If count → say number
541
+ - If list → summarize important fields only
542
+ - Max 3–4 lines
543
+ `
544
+ },
545
+ {
546
+ role: "user",
547
+ content: `
548
+ QUESTION: ${question}
549
+
550
+ REALTIME DATA:
551
+ ${JSON.stringify(realtimeData)}
552
+ `
553
+ }
554
+ ]
555
+ });
556
+ const text = response.choices[0].message.content;
557
+ return text;
558
+ }
559
+ async function finalAggregator(ctx, question, faq, realtimeMeta, realtimeText, contactLink, instructions) {
560
+ ctx.set("Content-Type", "text/event-stream");
561
+ ctx.set("Cache-Control", "no-cache");
562
+ ctx.set("Connection", "keep-alive");
563
+ ctx.status = 200;
564
+ ctx.res.flushHeaders?.();
565
+ const stream = await openai$1.chat.completions.create({
566
+ model: "gpt-4o-mini",
567
+ temperature: 0.3,
568
+ stream: true,
569
+ messages: [
570
+ {
571
+ role: "system",
572
+ content: `
573
+
574
+ ${instructions.response || ""}
575
+ You are an intelligent AI Assistant for a website chatbot.
576
+
577
+ INPUTS:
578
+ - FAQ semantic answers
579
+ - REALTIME_META (structured database info)
580
+ - REALTIME_TEXT (human summary)
581
+ - User question
582
+
583
+ --------------------------------
584
+ RESPONSE LENGTH RULE
585
+ --------------------------------
586
+ Default → SHORT & PRECISE (2–3 lines max)
587
+
588
+ If the user's question contains:
589
+ "explain", "details", "more", "elaborate", "why", "how"
590
+ → Provide LONGER detailed answer.
591
+
592
+ If FAQ answer is long:
593
+ → Summarize unless user asked for detail.
594
+
595
+ --------------------------------
596
+ CORE RULE
597
+ --------------------------------
598
+ REALTIME_META decides logic.
599
+ REALTIME_TEXT decides wording.
600
+
601
+ --------------------------------
602
+ CONTACT INTENT RULE
603
+ --------------------------------
604
+ If user asks about contacting support, customer service, help, or similar:
605
+
606
+ AND contactLink is provided:
607
+ Return ONLY this link in a short sentence.
608
+
609
+ Example:
610
+ "You can contact us here: https://example.com/contact"
611
+
612
+ --------------------------------
613
+ ANSWER LOGIC
614
+ --------------------------------
615
+
616
+ CASE 1 — REALTIME_META.type = "count"
617
+ Return ONE sentence with the number.
618
+
619
+ CASE 2 — REALTIME_META.type = "list"
620
+ Use REALTIME_TEXT as main answer.
621
+
622
+ CASE 3 — REALTIME_META = null
623
+ Use FAQ.
624
+
625
+ CASE 4 — BOTH EXIST
626
+ Use REALTIME_TEXT as main + FAQ as support.
627
+
628
+ CASE 5 — NOTHING
629
+ Say information unavailable.
630
+
631
+ Never show JSON.
632
+ Never hallucinate.
633
+ Max 5 lines.
634
+ `
635
+ },
636
+ {
637
+ role: "user",
638
+ content: `
639
+ QUESTION: ${question}
640
+
641
+ CONTACT_LINK:
642
+ ${contactLink || "NOT_AVAILABLE"}
643
+
644
+ FAQ:
645
+ ${JSON.stringify(faq)}
646
+
647
+ REALTIME_META:
648
+ ${JSON.stringify(realtimeMeta)}
649
+
650
+ REALTIME_TEXT:
651
+ ${realtimeText}
652
+ `
653
+ }
654
+ ]
655
+ });
656
+ for await (const chunk of stream) {
657
+ const token = chunk.choices?.[0]?.delta?.content;
658
+ if (token) {
659
+ ctx.res.write(`data: ${token}
660
+
661
+ `);
662
+ }
663
+ }
664
+ if (realtimeMeta && realtimeMeta.type === "list") {
665
+ const cardsPayload = {
666
+ title: realtimeMeta.collection,
667
+ schema: realtimeMeta.schema,
668
+ items: realtimeMeta.items
669
+ };
670
+ ctx.res.write(`event: cards
671
+ `);
672
+ ctx.res.write(`data: ${JSON.stringify(cardsPayload)}
673
+
674
+ `);
675
+ }
676
+ ctx.res.write("data: [DONE]\n\n");
677
+ ctx.res.end();
678
+ }
679
+ async function validateOpenAIKey(key) {
680
+ try {
681
+ const temp = new OpenAI({ apiKey: key });
682
+ await temp.models.list();
683
+ return true;
684
+ } catch {
685
+ return false;
686
+ }
687
+ }
688
+ const ask = ({ strapi }) => ({
689
+ async validateKey(ctx) {
690
+ const { key } = ctx.request.body;
691
+ const isValid = await validateOpenAIKey(key);
692
+ ctx.body = { valid: isValid };
693
+ },
694
+ async ask(ctx) {
695
+ const { question, history = [] } = ctx.request.body;
696
+ const instructions = await getInstructions(strapi);
697
+ let jsonContext = ctx.request.body.context || {};
698
+ jsonContext = updateJsonContext(jsonContext, question);
699
+ ctx.set("X-User-Context", JSON.stringify(jsonContext));
700
+ try {
701
+ const activeCollections = await getActiveCollections(strapi);
702
+ if (!activeCollections || activeCollections.length === 0) {
703
+ }
704
+ const rewritten = await rephraseQuestion(history, question);
705
+ const contactLink = await getContactLink(strapi);
706
+ const faqResults = await searchFAQ(rewritten, strapi);
707
+ const plan = await simplePlanner(rewritten, activeCollections, instructions);
708
+ let realtimeResults = null;
709
+ let realtimeAIText = null;
710
+ if (plan && plan.collection) {
711
+ realtimeResults = await searchRealtime(strapi, plan, activeCollections);
712
+ realtimeAIText = await realtimeInterpreterAI(rewritten, realtimeResults);
713
+ } else {
714
+ }
715
+ await finalAggregator(
716
+ ctx,
717
+ rewritten,
718
+ faqResults,
719
+ realtimeResults,
720
+ realtimeAIText,
721
+ contactLink,
722
+ instructions
723
+ );
724
+ return;
725
+ } catch (err) {
726
+ console.error("[ERROR]", err);
727
+ ctx.body = { type: "text", content: "Error occurred." };
728
+ }
729
+ }
730
+ });
731
+ const suggestQuestions = ({ strapi }) => ({
732
+ async getSuggested(ctx) {
733
+ const pluginStore = strapi.store({
734
+ environment: null,
735
+ type: "plugin",
736
+ name: "faqchatbot"
737
+ });
738
+ const settings = await pluginStore.get({ key: "settings" });
739
+ ctx.body = {
740
+ suggestedQuestions: settings?.suggestedQuestions || []
741
+ };
742
+ }
743
+ });
744
+ const cardMapping = ({ strapi }) => ({
745
+ async index(ctx) {
746
+ const pluginStore = strapi.store({
747
+ environment: null,
748
+ type: "plugin",
749
+ name: "faqchatbot"
750
+ });
751
+ const settings = await pluginStore.get({ key: "settings" });
752
+ ctx.body = {
753
+ cardStyles: settings?.cardStyles || {}
754
+ };
755
+ }
756
+ });
757
+ const controllers = {
758
+ controller,
759
+ config: config$1,
760
+ ask,
761
+ suggestQuestions,
762
+ cardMapping
763
+ };
764
+ const middlewares = {};
765
+ const policies = {};
766
+ const admin = {
767
+ type: "admin",
768
+ routes: [
769
+ {
770
+ method: "GET",
771
+ path: "/collections",
772
+ handler: "config.index",
773
+ config: {
774
+ auth: false
775
+ }
776
+ },
777
+ {
778
+ method: "POST",
779
+ path: "/collections",
780
+ handler: "config.update",
781
+ config: {
782
+ auth: false
783
+ }
784
+ }
785
+ ]
786
+ };
787
+ const contentApi = () => ({
788
+ type: "content-api",
789
+ routes: [
790
+ {
791
+ method: "GET",
792
+ path: "/",
793
+ handler: "controller.index",
794
+ config: {
795
+ auth: false
796
+ }
797
+ },
798
+ {
799
+ method: "POST",
800
+ path: "/ask",
801
+ handler: "ask.ask",
802
+ config: {
803
+ auth: false
804
+ }
805
+ },
806
+ {
807
+ method: "GET",
808
+ path: "/suggested-questions",
809
+ handler: "suggestQuestions.getSuggested",
810
+ config: {
811
+ auth: false
812
+ }
813
+ },
814
+ {
815
+ method: "GET",
816
+ path: "/card-mapping",
817
+ handler: "cardMapping.index",
818
+ config: { auth: false }
819
+ },
820
+ {
821
+ method: "POST",
822
+ path: "/validate-key",
823
+ handler: "ask.validateKey",
824
+ config: {
825
+ auth: false
826
+ }
827
+ }
828
+ ]
829
+ });
830
+ const routes = {
831
+ admin,
832
+ "content-api": contentApi
833
+ };
834
+ const openai = new OpenAI({
835
+ apiKey: process.env.OPENAI_API_KEY
836
+ });
837
+ const embed = ({ strapi }) => ({
838
+ async generateEmbedding(text) {
839
+ try {
840
+ if (!process.env.OPENAI_API_KEY) {
841
+ return null;
842
+ }
843
+ const response = await openai.embeddings.create({
844
+ model: "text-embedding-3-small",
845
+ input: text
846
+ });
847
+ return response.data[0].embedding;
848
+ } catch (error) {
849
+ strapi.log.error("Error generating embedding via OpenAI:");
850
+ console.error(error);
851
+ return null;
852
+ }
853
+ }
854
+ });
855
+ const config = ({ strapi }) => ({
856
+ async getConfig() {
857
+ const pluginStore = strapi.store({
858
+ environment: null,
859
+ type: "plugin",
860
+ name: "faqchatbot"
861
+ });
862
+ const settings = await pluginStore.get({ key: "settings" });
863
+ return settings && typeof settings === "object" ? settings : {};
864
+ },
865
+ async setConfig(newSettings) {
866
+ const pluginStore = strapi.store({
867
+ environment: null,
868
+ type: "plugin",
869
+ name: "faqchatbot"
870
+ });
871
+ const existingRaw = await pluginStore.get({ key: "settings" });
872
+ const existingSettings = existingRaw && typeof existingRaw === "object" ? existingRaw : {};
873
+ const mergedSettings = {
874
+ ...existingSettings,
875
+ ...newSettings
876
+ };
877
+ await pluginStore.set({
878
+ key: "settings",
879
+ value: mergedSettings
880
+ });
881
+ return mergedSettings;
882
+ }
883
+ });
884
+ const services = {
885
+ config,
886
+ embed
887
+ };
888
+ const index = {
889
+ register,
890
+ bootstrap,
891
+ destroy,
892
+ config: config$2,
893
+ controllers,
894
+ routes,
895
+ services,
896
+ contentTypes,
897
+ policies,
898
+ middlewares
899
+ };
900
+ export {
901
+ index as default
902
+ };