@intlayer/backend 5.4.1 → 5.5.0-canary.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (111) hide show
  1. package/dist/cjs/controllers/ai.controller.cjs +60 -52
  2. package/dist/cjs/controllers/ai.controller.cjs.map +1 -1
  3. package/dist/cjs/controllers/dictionary.controller.cjs +5 -0
  4. package/dist/cjs/controllers/dictionary.controller.cjs.map +1 -1
  5. package/dist/cjs/export.cjs +4 -2
  6. package/dist/cjs/export.cjs.map +1 -1
  7. package/dist/cjs/routes/ai.routes.cjs +6 -0
  8. package/dist/cjs/routes/ai.routes.cjs.map +1 -1
  9. package/dist/cjs/services/dictionary.service.cjs +6 -1
  10. package/dist/cjs/services/dictionary.service.cjs.map +1 -1
  11. package/dist/cjs/services/sessionAuth.service.cjs +7 -7
  12. package/dist/cjs/services/sessionAuth.service.cjs.map +1 -1
  13. package/dist/cjs/utils/AI/aiSdk.cjs +140 -0
  14. package/dist/cjs/utils/AI/aiSdk.cjs.map +1 -0
  15. package/dist/cjs/utils/AI/askDocQuestion/PROMPT.md +2 -1
  16. package/dist/cjs/utils/AI/askDocQuestion/askDocQuestion.cjs +32 -27
  17. package/dist/cjs/utils/AI/askDocQuestion/askDocQuestion.cjs.map +1 -1
  18. package/dist/cjs/utils/AI/askDocQuestion/embeddings.json +7374 -0
  19. package/dist/cjs/utils/AI/auditDictionary/PROMPT.md +4 -0
  20. package/dist/cjs/utils/AI/auditDictionary/index.cjs +36 -43
  21. package/dist/cjs/utils/AI/auditDictionary/index.cjs.map +1 -1
  22. package/dist/cjs/utils/AI/auditDictionaryField/PROMPT.md +4 -0
  23. package/dist/cjs/utils/AI/auditDictionaryField/index.cjs +34 -28
  24. package/dist/cjs/utils/AI/auditDictionaryField/index.cjs.map +1 -1
  25. package/dist/cjs/utils/AI/auditDictionaryMetadata/PROMPT.md +4 -0
  26. package/dist/cjs/utils/AI/auditDictionaryMetadata/index.cjs +23 -23
  27. package/dist/cjs/utils/AI/auditDictionaryMetadata/index.cjs.map +1 -1
  28. package/dist/cjs/utils/{auditTag → AI/auditTag}/PROMPT.md +4 -0
  29. package/dist/cjs/utils/{auditTag → AI/auditTag}/index.cjs +27 -27
  30. package/dist/cjs/utils/AI/auditTag/index.cjs.map +1 -0
  31. package/dist/cjs/utils/AI/autocomplete/PROMPT.md +4 -0
  32. package/dist/cjs/utils/AI/autocomplete/index.cjs +25 -22
  33. package/dist/cjs/utils/AI/autocomplete/index.cjs.map +1 -1
  34. package/dist/cjs/utils/AI/translateJSON/PROMPT.md +53 -0
  35. package/dist/cjs/utils/AI/translateJSON/index.cjs +106 -0
  36. package/dist/cjs/utils/AI/translateJSON/index.cjs.map +1 -0
  37. package/dist/cjs/utils/extractJSON.cjs +52 -0
  38. package/dist/cjs/utils/extractJSON.cjs.map +1 -0
  39. package/dist/esm/controllers/ai.controller.mjs +58 -51
  40. package/dist/esm/controllers/ai.controller.mjs.map +1 -1
  41. package/dist/esm/controllers/dictionary.controller.mjs +5 -0
  42. package/dist/esm/controllers/dictionary.controller.mjs.map +1 -1
  43. package/dist/esm/export.mjs +3 -2
  44. package/dist/esm/export.mjs.map +1 -1
  45. package/dist/esm/routes/ai.routes.mjs +8 -1
  46. package/dist/esm/routes/ai.routes.mjs.map +1 -1
  47. package/dist/esm/services/dictionary.service.mjs +6 -1
  48. package/dist/esm/services/dictionary.service.mjs.map +1 -1
  49. package/dist/esm/services/sessionAuth.service.mjs +2 -2
  50. package/dist/esm/services/sessionAuth.service.mjs.map +1 -1
  51. package/dist/esm/utils/AI/aiSdk.mjs +115 -0
  52. package/dist/esm/utils/AI/aiSdk.mjs.map +1 -0
  53. package/dist/esm/utils/AI/askDocQuestion/PROMPT.md +2 -1
  54. package/dist/esm/utils/AI/askDocQuestion/askDocQuestion.mjs +32 -27
  55. package/dist/esm/utils/AI/askDocQuestion/askDocQuestion.mjs.map +1 -1
  56. package/dist/esm/utils/AI/askDocQuestion/embeddings.json +7374 -0
  57. package/dist/esm/utils/AI/auditDictionary/PROMPT.md +4 -0
  58. package/dist/esm/utils/AI/auditDictionary/index.mjs +36 -43
  59. package/dist/esm/utils/AI/auditDictionary/index.mjs.map +1 -1
  60. package/dist/esm/utils/AI/auditDictionaryField/PROMPT.md +4 -0
  61. package/dist/esm/utils/AI/auditDictionaryField/index.mjs +34 -28
  62. package/dist/esm/utils/AI/auditDictionaryField/index.mjs.map +1 -1
  63. package/dist/esm/utils/AI/auditDictionaryMetadata/PROMPT.md +4 -0
  64. package/dist/esm/utils/AI/auditDictionaryMetadata/index.mjs +23 -23
  65. package/dist/esm/utils/AI/auditDictionaryMetadata/index.mjs.map +1 -1
  66. package/dist/esm/utils/{auditTag → AI/auditTag}/PROMPT.md +4 -0
  67. package/dist/esm/utils/AI/auditTag/index.mjs +49 -0
  68. package/dist/esm/utils/AI/auditTag/index.mjs.map +1 -0
  69. package/dist/esm/utils/AI/autocomplete/PROMPT.md +4 -0
  70. package/dist/esm/utils/AI/autocomplete/index.mjs +25 -22
  71. package/dist/esm/utils/AI/autocomplete/index.mjs.map +1 -1
  72. package/dist/esm/utils/AI/translateJSON/PROMPT.md +53 -0
  73. package/dist/esm/utils/AI/translateJSON/index.mjs +81 -0
  74. package/dist/esm/utils/AI/translateJSON/index.mjs.map +1 -0
  75. package/dist/esm/utils/extractJSON.mjs +28 -0
  76. package/dist/esm/utils/extractJSON.mjs.map +1 -0
  77. package/dist/types/controllers/ai.controller.d.ts +12 -21
  78. package/dist/types/controllers/ai.controller.d.ts.map +1 -1
  79. package/dist/types/controllers/dictionary.controller.d.ts.map +1 -1
  80. package/dist/types/export.d.ts +12 -11
  81. package/dist/types/export.d.ts.map +1 -1
  82. package/dist/types/routes/ai.routes.d.ts +5 -0
  83. package/dist/types/routes/ai.routes.d.ts.map +1 -1
  84. package/dist/types/services/dictionary.service.d.ts +2 -2
  85. package/dist/types/services/dictionary.service.d.ts.map +1 -1
  86. package/dist/types/services/sessionAuth.service.d.ts +2 -2
  87. package/dist/types/services/sessionAuth.service.d.ts.map +1 -1
  88. package/dist/types/utils/AI/aiSdk.d.ts +41 -0
  89. package/dist/types/utils/AI/aiSdk.d.ts.map +1 -0
  90. package/dist/types/utils/AI/askDocQuestion/askDocQuestion.d.ts +1 -1
  91. package/dist/types/utils/AI/askDocQuestion/askDocQuestion.d.ts.map +1 -1
  92. package/dist/types/utils/AI/auditDictionary/index.d.ts +10 -15
  93. package/dist/types/utils/AI/auditDictionary/index.d.ts.map +1 -1
  94. package/dist/types/utils/AI/auditDictionaryField/index.d.ts +9 -14
  95. package/dist/types/utils/AI/auditDictionaryField/index.d.ts.map +1 -1
  96. package/dist/types/utils/AI/auditDictionaryMetadata/index.d.ts +7 -13
  97. package/dist/types/utils/AI/auditDictionaryMetadata/index.d.ts.map +1 -1
  98. package/dist/types/utils/AI/auditTag/index.d.ts +18 -0
  99. package/dist/types/utils/AI/auditTag/index.d.ts.map +1 -0
  100. package/dist/types/utils/AI/autocomplete/index.d.ts +6 -12
  101. package/dist/types/utils/AI/autocomplete/index.d.ts.map +1 -1
  102. package/dist/types/utils/AI/translateJSON/index.d.ts +24 -0
  103. package/dist/types/utils/AI/translateJSON/index.d.ts.map +1 -0
  104. package/dist/types/utils/extractJSON.d.ts +6 -0
  105. package/dist/types/utils/extractJSON.d.ts.map +1 -0
  106. package/package.json +15 -11
  107. package/dist/cjs/utils/auditTag/index.cjs.map +0 -1
  108. package/dist/esm/utils/auditTag/index.mjs +0 -49
  109. package/dist/esm/utils/auditTag/index.mjs.map +0 -1
  110. package/dist/types/utils/auditTag/index.d.ts +0 -30
  111. package/dist/types/utils/auditTag/index.d.ts.map +0 -1
@@ -13,10 +13,11 @@ If you don't have enough information to answer the question, not answer using ex
13
13
  If your have a doubt about something, you should ask more question to the user.
14
14
  If a user stuggles with a question, and you cant find an answer, you can :
15
15
 
16
+ - Mention that some specific bugs may not be documented yet. Encourage the user to share their solution in the chat. This helps our team analyze common issues and improve the bot's responses for future users.
16
17
  - suggest to the user to submit an issue on the Intlayer repository: https://github.com/aymericzip/intlayer/issues. Our team generally responds to issues within a day. Notice that your issue will also probably help other users.
17
18
  - the multiple example of implementations of Intlayer in the repository: https://github.com/aymericzip/intlayer/tree/main/examples
18
19
 
19
- You can also suggest when it's possible to the user to star the Intlayer repository: https://github.com/aymericzip/intlayer to support the project. It really help us.
20
+ You can also suggest (when it's appropriate), to star the Intlayer repository: https://github.com/aymericzip/intlayer to support the project. It really help us.
20
21
 
21
22
  Here some useful urls to know more about Intlayer:
22
23
  https://intlayer.org/docs
@@ -1,11 +1,13 @@
1
1
  import { getBlogs } from "@intlayer/blog";
2
2
  import { Locales } from "@intlayer/config";
3
3
  import { getDocs, getFequentQuestions } from "@intlayer/docs";
4
+ import { streamText } from "ai";
4
5
  import dotenv from "dotenv";
5
6
  import fs, { readFileSync } from "fs";
6
7
  import { OpenAI } from "openai";
7
8
  import { dirname, join } from "path";
8
9
  import { fileURLToPath } from "url";
10
+ import { AIProvider, getAIConfig } from "../aiSdk.mjs";
9
11
  import embeddingsList from "./embeddings.json" with { type: "json" };
10
12
  const vectorStore = [];
11
13
  const MODEL = "gpt-4o-2024-11-20";
@@ -41,10 +43,13 @@ const chunkText = (text) => {
41
43
  };
42
44
  const generateEmbedding = async (text) => {
43
45
  try {
44
- const openai = new OpenAI({ apiKey: process.env.OPENAI_API_KEY });
45
- const response = await openai.embeddings.create({
46
+ await getAIConfig({
47
+ provider: AIProvider.OPENAI,
48
+ apiKey: process.env.OPENAI_API_KEY
49
+ });
50
+ const openaiClient = new OpenAI({ apiKey: process.env.OPENAI_API_KEY });
51
+ const response = await openaiClient.embeddings.create({
46
52
  model: EMBEDDING_MODEL,
47
- // Specify the embedding model
48
53
  input: text
49
54
  });
50
55
  return response.data[0].embedding;
@@ -126,38 +131,38 @@ const initPrompt = {
126
131
  content: CHAT_GPT_PROMPT
127
132
  };
128
133
  const askDocQuestion = async (messages, options) => {
129
- const openai = new OpenAI({ apiKey: process.env.OPENAI_API_KEY });
130
- const userMessages = messages.filter((message) => message.role === "user");
131
- const query = userMessages.map((message) => `- ${message.content}`).join("\n");
134
+ const query = messages.map((message) => `- ${message.content}`).join("\n");
132
135
  const relevantFilesReferences = await searchChunkReference(query);
133
- const messagesList = [
134
- {
135
- ...initPrompt,
136
- content: initPrompt.content.replace(
137
- "{{relevantFilesReferences}}",
138
- relevantFilesReferences.length === 0 ? "Not relevant file found related to the question." : relevantFilesReferences.map(
139
- (doc, idx) => `[Chunk ${idx}] docKey = "${doc.fileKey}":
136
+ const systemPrompt = initPrompt.content.replace(
137
+ "{{relevantFilesReferences}}",
138
+ relevantFilesReferences.length === 0 ? "Not relevant file found related to the question." : relevantFilesReferences.map(
139
+ (doc, idx) => `[Chunk ${idx}] docKey = "${doc.fileKey}":
140
140
  ${doc.content}`
141
- ).join("\n\n")
142
- // Insert relevant docs into the prompt
143
- )
144
- },
141
+ ).join("\n\n")
142
+ // Insert relevant docs into the prompt
143
+ );
144
+ const aiMessages = [
145
+ { role: "system", content: systemPrompt },
145
146
  ...messages
146
- // Include all user and assistant messages
147
147
  ];
148
- const response = await openai.chat.completions.create({
148
+ const aiConfig = await getAIConfig({
149
+ provider: AIProvider.OPENAI,
149
150
  model: MODEL,
150
151
  temperature: MODEL_TEMPERATURE,
151
- messages: messagesList,
152
- stream: true
152
+ apiKey: process.env.OPENAI_API_KEY
153
153
  });
154
+ if (!aiConfig) {
155
+ throw new Error("Failed to initialize AI configuration");
156
+ }
154
157
  let fullResponse = "";
155
- for await (const chunk of response) {
156
- const content = chunk.choices[0]?.delta?.content || "";
157
- if (content) {
158
- fullResponse += content;
159
- options?.onMessage?.(content);
160
- }
158
+ const stream = streamText({
159
+ model: aiConfig.model,
160
+ temperature: aiConfig.temperature,
161
+ messages: aiMessages
162
+ });
163
+ for await (const chunk of stream.textStream) {
164
+ fullResponse += chunk;
165
+ options?.onMessage?.(chunk);
161
166
  }
162
167
  const relatedFiles = [
163
168
  ...new Set(relevantFilesReferences.map((doc) => doc.fileKey))
@@ -1 +1 @@
1
- {"version":3,"sources":["../../../../../src/utils/AI/askDocQuestion/askDocQuestion.ts"],"sourcesContent":["import { getBlogs } from '@intlayer/blog';\nimport { Locales } from '@intlayer/config';\nimport { getDocs, getFequentQuestions } from '@intlayer/docs';\nimport dotenv from 'dotenv';\nimport fs, { readFileSync } from 'fs';\nimport { OpenAI } from 'openai';\nimport { dirname, join } from 'path';\nimport { fileURLToPath } from 'url';\nimport embeddingsList from './embeddings.json' with { type: 'json' };\n\ntype VectorStoreEl = {\n fileKey: string;\n chunkNumber: number;\n content: string;\n embedding: number[];\n};\n\n/**\n * Simple in-memory vector store to hold document embeddings and their content.\n * Each entry contains:\n * - fileKey: A unique key identifying the file\n * - chunkNumber: The number of the chunk within the document\n * - content: The chunk content\n * - embedding: The numerical embedding vector for the chunk\n */\nconst vectorStore: VectorStoreEl[] = [];\n\n// Constants defining OpenAI's token and character limits\nconst MODEL: OpenAI.Chat.ChatModel = 'gpt-4o-2024-11-20'; // Model to use for chat completions\nconst MODEL_TEMPERATURE = 0.1; // Temperature to use for chat completions\nconst EMBEDDING_MODEL: OpenAI.Embeddings.EmbeddingModel =\n 'text-embedding-3-large'; // Model to use for embedding generation\nconst OVERLAP_TOKENS = 200; // Number of tokens to overlap between chunks\nconst MAX_CHUNK_TOKENS = 800; // Maximum number of tokens per chunk\nconst CHAR_BY_TOKEN = 4.15; // Approximate pessimistically the number of characters per token // Can use `tiktoken` or other tokenizers to calculate it more precisely\nconst MAX_CHARS = MAX_CHUNK_TOKENS * CHAR_BY_TOKEN;\nconst OVERLAP_CHARS = OVERLAP_TOKENS * CHAR_BY_TOKEN;\nconst MAX_RELEVANT_CHUNKS_NB = 8; // Maximum number of relevant chunks to attach to chatGPT context\nconst MIN_RELEVANT_CHUNKS_SIMILARITY = 0.25; // Minimum similarity required for a chunk to be considered relevant\n\n/**\n * Splits a given text into chunks ensuring each chunk does not exceed MAX_CHARS.\n * @param text - The input text to split.\n * @returns - Array of text chunks.\n */\nconst chunkText = (text: string): string[] => {\n const chunks: string[] = [];\n let start = 0;\n\n while (start < text.length) {\n let end = Math.min(start + MAX_CHARS, text.length);\n\n // Ensure we don't cut words in the middle (find nearest space)\n if (end < text.length) {\n const lastSpace = text.lastIndexOf(' ', end);\n if (lastSpace > start) {\n end = lastSpace;\n }\n }\n\n chunks.push(text.substring(start, end));\n\n // Move start forward correctly\n const nextStart = end - OVERLAP_CHARS;\n if (nextStart <= start) {\n // Prevent infinite loop if overlap is too large\n start = end;\n } else {\n start = nextStart;\n }\n }\n\n return chunks;\n};\n\n/**\n * Generates an embedding for a given text using OpenAI's embedding API.\n * Trims the text if it exceeds the maximum allowed characters.\n *\n * @param text - The input text to generate an embedding for\n * @returns The embedding vector as a number array\n */\nconst generateEmbedding = async (text: string): Promise<number[]> => {\n try {\n const openai = new OpenAI({ apiKey: process.env.OPENAI_API_KEY });\n const response = await openai.embeddings.create({\n model: EMBEDDING_MODEL, // Specify the embedding model\n input: text,\n });\n\n return response.data[0].embedding; // Return the generated embedding\n } catch (error) {\n console.error('Error generating embedding:', error);\n return [];\n }\n};\n\n/**\n * Calculates the cosine similarity between two vectors.\n * Cosine similarity measures the cosine of the angle between two vectors in an inner product space.\n * Used to determine the similarity between chunks of text.\n *\n * @param vecA - The first vector\n * @param vecB - The second vector\n * @returns The cosine similarity score\n */\nconst cosineSimilarity = (vecA: number[], vecB: number[]): number => {\n // Calculate the dot product of the two vectors\n const dotProduct = vecA.reduce((sum, a, idx) => sum + a * vecB[idx], 0);\n\n // Calculate the magnitude (Euclidean norm) of each vector\n const magnitudeA = Math.sqrt(vecA.reduce((sum, a) => sum + a * a, 0));\n const magnitudeB = Math.sqrt(vecB.reduce((sum, b) => sum + b * b, 0));\n\n // Compute and return the cosine similarity\n return dotProduct / (magnitudeA * magnitudeB);\n};\n\n/**\n * Indexes all Markdown documents by generating embeddings for each chunk and storing them in memory.\n * Also updates the embeddings.json file if new embeddings are generated.\n */\nexport const indexMarkdownFiles = async (): Promise<void> => {\n const env = process.env.NODE_ENV;\n dotenv.config({\n path: [`.env.${env}.local`, `.env.${env}`, '.env.local', '.env'],\n });\n\n // Retrieve documentation and blog posts in English locale\n const frequentQuestions = getFequentQuestions();\n const docs = getDocs(Locales.ENGLISH);\n const blogs = getBlogs(Locales.ENGLISH);\n\n let result: Record<string, number[]> = {}; // Object to hold updated embeddings\n\n const files = { ...docs, ...blogs, ...frequentQuestions }; // Combine docs and blogs into a single object\n\n // Iterate over each file key (identifier) in the combined files\n for (const fileKey of Object.keys(files)) {\n // Split the document into chunks based on headings\n const fileChunks = chunkText(files[fileKey as keyof typeof files]);\n\n // Iterate over each chunk within the current file\n for (const chunkIndex of Object.keys(fileChunks)) {\n const chunkNumber = Number(chunkIndex) + 1; // Chunk number starts at 1\n const chunksNumber = fileChunks.length;\n\n const fileChunk = fileChunks[\n chunkIndex as keyof typeof fileChunks\n ] as string;\n\n const embeddingKeyName = `${fileKey}/chunk_${chunkNumber}`; // Unique key for the chunk\n\n // Retrieve precomputed embedding if available\n const docEmbedding = embeddingsList[\n embeddingKeyName as keyof typeof embeddingsList\n ] as number[] | undefined;\n\n let embedding = docEmbedding; // Use existing embedding if available\n\n if (!embedding) {\n embedding = await generateEmbedding(fileChunk); // Generate embedding if not present\n }\n\n // Update the result object with the new embedding\n result = { ...result, [embeddingKeyName]: embedding };\n\n // Store the embedding and content in the in-memory vector store\n vectorStore.push({\n fileKey,\n chunkNumber,\n embedding,\n content: fileChunk,\n });\n\n console.info(`- Indexed: ${embeddingKeyName}/${chunksNumber}`);\n }\n }\n\n if (process.env.NODE_ENV === 'development') {\n try {\n // Compare the newly generated embeddings with existing ones\n if (JSON.stringify(result) !== JSON.stringify(embeddingsList)) {\n // If there are new embeddings, save them to embeddings.json\n fs.writeFileSync(\n 'src/utils/AI/askDocQuestion/embeddings.json',\n JSON.stringify(result, null, 2)\n );\n }\n } catch (error) {\n console.error(error); // Log any errors during the file write process\n }\n }\n};\n\n// Automatically index Markdown files\nindexMarkdownFiles();\n\n/**\n * Searches the indexed documents for the most relevant chunks based on a query.\n * Utilizes cosine similarity to find the closest matching embeddings.\n *\n * @param query - The search query provided by the user\n * @returns An array of the top matching document chunks' content\n */\nexport const searchChunkReference = async (\n query: string\n): Promise<VectorStoreEl[]> => {\n // Generate an embedding for the user's query\n const queryEmbedding = await generateEmbedding(query);\n\n // Calculate similarity scores between the query embedding and each document's embedding\n const results = vectorStore\n .map((chunk) => ({\n ...chunk,\n similarity: cosineSimilarity(queryEmbedding, chunk.embedding), // Add similarity score to each doc\n }))\n .filter((chunk) => chunk.similarity > MIN_RELEVANT_CHUNKS_SIMILARITY) // Filter out documents with low similarity scores\n .sort((a, b) => b.similarity - a.similarity) // Sort documents by highest similarity first\n .slice(0, MAX_RELEVANT_CHUNKS_NB); // Select the top 6 most similar documents\n\n // Return the content of the top matching documents\n return results;\n};\n\n// Define the structure of messages used in chat completions\nexport type ChatCompletionRequestMessage = {\n role: 'system' | 'user' | 'assistant'; // The role of the message sender\n content: string; // The text content of the message\n};\n\n/**\n * Reads the content of a file synchronously.\n *\n * @function\n * @param relativeFilePath - The relative or absolute path to the target file.\n * @returns The entire contents of the specified file as a UTF-8 encoded string.\n */\nconst getFileContent = (relativeFilePath: string): string => {\n const __dirname = dirname(fileURLToPath(import.meta.url));\n const absolutePath = join(__dirname, relativeFilePath);\n const fileContent = readFileSync(absolutePath, 'utf-8');\n return fileContent;\n};\n\nconst CHAT_GPT_PROMPT = getFileContent('./PROMPT.md');\n\n// Initial prompt configuration for the chatbot\nexport const initPrompt: ChatCompletionRequestMessage = {\n role: 'system',\n content: CHAT_GPT_PROMPT,\n};\n\nexport type AskDocQuestionResult = {\n response: string;\n relatedFiles: string[];\n};\n\nexport type AskDocQuestionOptions = {\n onMessage?: (chunk: string) => void;\n};\n\n/**\n * Handles the \"Ask a question\" endpoint in an Express.js route.\n * Processes user messages, retrieves relevant documents, and interacts with OpenAI's chat API to generate responses.\n *\n * @param messages - An array of chat messages from the user and assistant\n * @returns The assistant's response as a string\n */\nexport const askDocQuestion = async (\n messages: ChatCompletionRequestMessage[],\n options?: AskDocQuestionOptions\n): Promise<AskDocQuestionResult> => {\n const openai = new OpenAI({ apiKey: process.env.OPENAI_API_KEY });\n\n // Assistant's response are filtered out otherwise the chatbot will be stuck in a self-referential loop\n // Note that the embedding precision will be lowered if the user change of context in the chat\n const userMessages = messages.filter((message) => message.role === 'user');\n\n // Format the user's question to keep only the relevant keywords\n const query = userMessages\n .map((message) => `- ${message.content}`)\n .join('\\n');\n\n // 1) Find relevant documents based on the user's question\n const relevantFilesReferences = await searchChunkReference(query);\n\n // 2) Integrate the relevant documents into the initial system prompt\n const messagesList: ChatCompletionRequestMessage[] = [\n {\n ...initPrompt,\n content: initPrompt.content.replace(\n '{{relevantFilesReferences}}',\n relevantFilesReferences.length === 0\n ? 'Not relevant file found related to the question.'\n : relevantFilesReferences\n .map(\n (doc, idx) =>\n `[Chunk ${idx}] docKey = \"${doc.fileKey}\":\\n${doc.content}`\n )\n .join('\\n\\n') // Insert relevant docs into the prompt\n ),\n },\n ...messages, // Include all user and assistant messages\n ];\n\n // 3) Send the compiled messages to OpenAI's Chat Completion API (using a specific model)\n const response = await openai.chat.completions.create({\n model: MODEL,\n temperature: MODEL_TEMPERATURE,\n messages: messagesList,\n stream: true,\n });\n\n let fullResponse = '';\n for await (const chunk of response) {\n const content = chunk.choices[0]?.delta?.content || '';\n if (content) {\n fullResponse += content;\n options?.onMessage?.(content);\n }\n }\n\n // 4) Extract unique related files\n const relatedFiles = [\n ...new Set(relevantFilesReferences.map((doc) => doc.fileKey)),\n ];\n\n // 5) Return the assistant's response to the user\n return {\n response: fullResponse ?? 'Error: No result found',\n relatedFiles,\n };\n};\n"],"mappings":"AAAA,SAAS,gBAAgB;AACzB,SAAS,eAAe;AACxB,SAAS,SAAS,2BAA2B;AAC7C,OAAO,YAAY;AACnB,OAAO,MAAM,oBAAoB;AACjC,SAAS,cAAc;AACvB,SAAS,SAAS,YAAY;AAC9B,SAAS,qBAAqB;AAC9B,OAAO,oBAAoB,oBAAoB,KAAK,EAAE,MAAM,OAAO;AAiBnE,MAAM,cAA+B,CAAC;AAGtC,MAAM,QAA+B;AACrC,MAAM,oBAAoB;AAC1B,MAAM,kBACJ;AACF,MAAM,iBAAiB;AACvB,MAAM,mBAAmB;AACzB,MAAM,gBAAgB;AACtB,MAAM,YAAY,mBAAmB;AACrC,MAAM,gBAAgB,iBAAiB;AACvC,MAAM,yBAAyB;AAC/B,MAAM,iCAAiC;AAOvC,MAAM,YAAY,CAAC,SAA2B;AAC5C,QAAM,SAAmB,CAAC;AAC1B,MAAI,QAAQ;AAEZ,SAAO,QAAQ,KAAK,QAAQ;AAC1B,QAAI,MAAM,KAAK,IAAI,QAAQ,WAAW,KAAK,MAAM;AAGjD,QAAI,MAAM,KAAK,QAAQ;AACrB,YAAM,YAAY,KAAK,YAAY,KAAK,GAAG;AAC3C,UAAI,YAAY,OAAO;AACrB,cAAM;AAAA,MACR;AAAA,IACF;AAEA,WAAO,KAAK,KAAK,UAAU,OAAO,GAAG,CAAC;AAGtC,UAAM,YAAY,MAAM;AACxB,QAAI,aAAa,OAAO;AAEtB,cAAQ;AAAA,IACV,OAAO;AACL,cAAQ;AAAA,IACV;AAAA,EACF;AAEA,SAAO;AACT;AASA,MAAM,oBAAoB,OAAO,SAAoC;AACnE,MAAI;AACF,UAAM,SAAS,IAAI,OAAO,EAAE,QAAQ,QAAQ,IAAI,eAAe,CAAC;AAChE,UAAM,WAAW,MAAM,OAAO,WAAW,OAAO;AAAA,MAC9C,OAAO;AAAA;AAAA,MACP,OAAO;AAAA,IACT,CAAC;AAED,WAAO,SAAS,KAAK,CAAC,EAAE;AAAA,EAC1B,SAAS,OAAO;AACd,YAAQ,MAAM,+BAA+B,KAAK;AAClD,WAAO,CAAC;AAAA,EACV;AACF;AAWA,MAAM,mBAAmB,CAAC,MAAgB,SAA2B;AAEnE,QAAM,aAAa,KAAK,OAAO,CAAC,KAAK,GAAG,QAAQ,MAAM,IAAI,KAAK,GAAG,GAAG,CAAC;AAGtE,QAAM,aAAa,KAAK,KAAK,KAAK,OAAO,CAAC,KAAK,MAAM,MAAM,IAAI,GAAG,CAAC,CAAC;AACpE,QAAM,aAAa,KAAK,KAAK,KAAK,OAAO,CAAC,KAAK,MAAM,MAAM,IAAI,GAAG,CAAC,CAAC;AAGpE,SAAO,cAAc,aAAa;AACpC;AAMO,MAAM,qBAAqB,YAA2B;AAC3D,QAAM,MAAM,QAAQ,IAAI;AACxB,SAAO,OAAO;AAAA,IACZ,MAAM,CAAC,QAAQ,GAAG,UAAU,QAAQ,GAAG,IAAI,cAAc,MAAM;AAAA,EACjE,CAAC;AAGD,QAAM,oBAAoB,oBAAoB;AAC9C,QAAM,OAAO,QAAQ,QAAQ,OAAO;AACpC,QAAM,QAAQ,SAAS,QAAQ,OAAO;AAEtC,MAAI,SAAmC,CAAC;AAExC,QAAM,QAAQ,EAAE,GAAG,MAAM,GAAG,OAAO,GAAG,kBAAkB;AAGxD,aAAW,WAAW,OAAO,KAAK,KAAK,GAAG;AAExC,UAAM,aAAa,UAAU,MAAM,OAA6B,CAAC;AAGjE,eAAW,cAAc,OAAO,KAAK,UAAU,GAAG;AAChD,YAAM,cAAc,OAAO,UAAU,IAAI;AACzC,YAAM,eAAe,WAAW;AAEhC,YAAM,YAAY,WAChB,UACF;AAEA,YAAM,mBAAmB,GAAG,OAAO,UAAU,WAAW;AAGxD,YAAM,eAAe,eACnB,gBACF;AAEA,UAAI,YAAY;AAEhB,UAAI,CAAC,WAAW;AACd,oBAAY,MAAM,kBAAkB,SAAS;AAAA,MAC/C;AAGA,eAAS,EAAE,GAAG,QAAQ,CAAC,gBAAgB,GAAG,UAAU;AAGpD,kBAAY,KAAK;AAAA,QACf;AAAA,QACA;AAAA,QACA;AAAA,QACA,SAAS;AAAA,MACX,CAAC;AAED,cAAQ,KAAK,cAAc,gBAAgB,IAAI,YAAY,EAAE;AAAA,IAC/D;AAAA,EACF;AAEA,MAAI,QAAQ,IAAI,aAAa,eAAe;AAC1C,QAAI;AAEF,UAAI,KAAK,UAAU,MAAM,MAAM,KAAK,UAAU,cAAc,GAAG;AAE7D,WAAG;AAAA,UACD;AAAA,UACA,KAAK,UAAU,QAAQ,MAAM,CAAC;AAAA,QAChC;AAAA,MACF;AAAA,IACF,SAAS,OAAO;AACd,cAAQ,MAAM,KAAK;AAAA,IACrB;AAAA,EACF;AACF;AAGA,mBAAmB;AASZ,MAAM,uBAAuB,OAClC,UAC6B;AAE7B,QAAM,iBAAiB,MAAM,kBAAkB,KAAK;AAGpD,QAAM,UAAU,YACb,IAAI,CAAC,WAAW;AAAA,IACf,GAAG;AAAA,IACH,YAAY,iBAAiB,gBAAgB,MAAM,SAAS;AAAA;AAAA,EAC9D,EAAE,EACD,OAAO,CAAC,UAAU,MAAM,aAAa,8BAA8B,EACnE,KAAK,CAAC,GAAG,MAAM,EAAE,aAAa,EAAE,UAAU,EAC1C,MAAM,GAAG,sBAAsB;AAGlC,SAAO;AACT;AAeA,MAAM,iBAAiB,CAAC,qBAAqC;AAC3D,QAAM,YAAY,QAAQ,cAAc,YAAY,GAAG,CAAC;AACxD,QAAM,eAAe,KAAK,WAAW,gBAAgB;AACrD,QAAM,cAAc,aAAa,cAAc,OAAO;AACtD,SAAO;AACT;AAEA,MAAM,kBAAkB,eAAe,aAAa;AAG7C,MAAM,aAA2C;AAAA,EACtD,MAAM;AAAA,EACN,SAAS;AACX;AAkBO,MAAM,iBAAiB,OAC5B,UACA,YACkC;AAClC,QAAM,SAAS,IAAI,OAAO,EAAE,QAAQ,QAAQ,IAAI,eAAe,CAAC;AAIhE,QAAM,eAAe,SAAS,OAAO,CAAC,YAAY,QAAQ,SAAS,MAAM;AAGzE,QAAM,QAAQ,aACX,IAAI,CAAC,YAAY,KAAK,QAAQ,OAAO,EAAE,EACvC,KAAK,IAAI;AAGZ,QAAM,0BAA0B,MAAM,qBAAqB,KAAK;AAGhE,QAAM,eAA+C;AAAA,IACnD;AAAA,MACE,GAAG;AAAA,MACH,SAAS,WAAW,QAAQ;AAAA,QAC1B;AAAA,QACA,wBAAwB,WAAW,IAC/B,qDACA,wBACG;AAAA,UACC,CAAC,KAAK,QACJ,UAAU,GAAG,eAAe,IAAI,OAAO;AAAA,EAAO,IAAI,OAAO;AAAA,QAC7D,EACC,KAAK,MAAM;AAAA;AAAA,MACpB;AAAA,IACF;AAAA,IACA,GAAG;AAAA;AAAA,EACL;AAGA,QAAM,WAAW,MAAM,OAAO,KAAK,YAAY,OAAO;AAAA,IACpD,OAAO;AAAA,IACP,aAAa;AAAA,IACb,UAAU;AAAA,IACV,QAAQ;AAAA,EACV,CAAC;AAED,MAAI,eAAe;AACnB,mBAAiB,SAAS,UAAU;AAClC,UAAM,UAAU,MAAM,QAAQ,CAAC,GAAG,OAAO,WAAW;AACpD,QAAI,SAAS;AACX,sBAAgB;AAChB,eAAS,YAAY,OAAO;AAAA,IAC9B;AAAA,EACF;AAGA,QAAM,eAAe;AAAA,IACnB,GAAG,IAAI,IAAI,wBAAwB,IAAI,CAAC,QAAQ,IAAI,OAAO,CAAC;AAAA,EAC9D;AAGA,SAAO;AAAA,IACL,UAAU,gBAAgB;AAAA,IAC1B;AAAA,EACF;AACF;","names":[]}
1
+ {"version":3,"sources":["../../../../../src/utils/AI/askDocQuestion/askDocQuestion.ts"],"sourcesContent":["import { getBlogs } from '@intlayer/blog';\nimport { Locales } from '@intlayer/config';\nimport { getDocs, getFequentQuestions } from '@intlayer/docs';\nimport { streamText } from 'ai';\nimport dotenv from 'dotenv';\nimport fs, { readFileSync } from 'fs';\nimport { OpenAI } from 'openai';\nimport { dirname, join } from 'path';\nimport { fileURLToPath } from 'url';\nimport { AIProvider, getAIConfig } from '../aiSdk';\nimport embeddingsList from './embeddings.json' with { type: 'json' };\n\ntype VectorStoreEl = {\n fileKey: string;\n chunkNumber: number;\n content: string;\n embedding: number[];\n};\n\n/**\n * Simple in-memory vector store to hold document embeddings and their content.\n * Each entry contains:\n * - fileKey: A unique key identifying the file\n * - chunkNumber: The number of the chunk within the document\n * - content: The chunk content\n * - embedding: The numerical embedding vector for the chunk\n */\nconst vectorStore: VectorStoreEl[] = [];\n\n// Constants defining model and settings\nconst MODEL = 'gpt-4o-2024-11-20'; // Model to use for chat completions\nconst MODEL_TEMPERATURE = 0.1; // Temperature to use for chat completions\nconst EMBEDDING_MODEL = 'text-embedding-3-large'; // Model to use for embedding generation\nconst OVERLAP_TOKENS = 200; // Number of tokens to overlap between chunks\nconst MAX_CHUNK_TOKENS = 800; // Maximum number of tokens per chunk\nconst CHAR_BY_TOKEN = 4.15; // Approximate pessimistically the number of characters per token // Can use `tiktoken` or other tokenizers to calculate it more precisely\nconst MAX_CHARS = MAX_CHUNK_TOKENS * CHAR_BY_TOKEN;\nconst OVERLAP_CHARS = OVERLAP_TOKENS * CHAR_BY_TOKEN;\nconst MAX_RELEVANT_CHUNKS_NB = 8; // Maximum number of relevant chunks to attach to chatGPT context\nconst MIN_RELEVANT_CHUNKS_SIMILARITY = 0.25; // Minimum similarity required for a chunk to be considered relevant\n\n/**\n * Splits a given text into chunks ensuring each chunk does not exceed MAX_CHARS.\n * @param text - The input text to split.\n * @returns - Array of text chunks.\n */\nconst chunkText = (text: string): string[] => {\n const chunks: string[] = [];\n let start = 0;\n\n while (start < text.length) {\n let end = Math.min(start + MAX_CHARS, text.length);\n\n // Ensure we don't cut words in the middle (find nearest space)\n if (end < text.length) {\n const lastSpace = text.lastIndexOf(' ', end);\n if (lastSpace > start) {\n end = lastSpace;\n }\n }\n\n chunks.push(text.substring(start, end));\n\n // Move start forward correctly\n const nextStart = end - OVERLAP_CHARS;\n if (nextStart <= start) {\n // Prevent infinite loop if overlap is too large\n start = end;\n } else {\n start = nextStart;\n }\n }\n\n return chunks;\n};\n\n/**\n * Generates an embedding for a given text using OpenAI's embedding API.\n * Trims the text if it exceeds the maximum allowed characters.\n *\n * @param text - The input text to generate an embedding for\n * @returns The embedding vector as a number array\n */\nconst generateEmbedding = async (text: string): Promise<number[]> => {\n try {\n // Set API key through the SDK configuration\n await getAIConfig({\n provider: AIProvider.OPENAI,\n apiKey: process.env.OPENAI_API_KEY,\n });\n\n const openaiClient = new OpenAI({ apiKey: process.env.OPENAI_API_KEY });\n\n const response = await openaiClient.embeddings.create({\n model: EMBEDDING_MODEL,\n input: text,\n });\n\n return response.data[0].embedding;\n } catch (error) {\n console.error('Error generating embedding:', error);\n return [];\n }\n};\n\n/**\n * Calculates the cosine similarity between two vectors.\n * Cosine similarity measures the cosine of the angle between two vectors in an inner product space.\n * Used to determine the similarity between chunks of text.\n *\n * @param vecA - The first vector\n * @param vecB - The second vector\n * @returns The cosine similarity score\n */\nconst cosineSimilarity = (vecA: number[], vecB: number[]): number => {\n // Calculate the dot product of the two vectors\n const dotProduct = vecA.reduce((sum, a, idx) => sum + a * vecB[idx], 0);\n\n // Calculate the magnitude (Euclidean norm) of each vector\n const magnitudeA = Math.sqrt(vecA.reduce((sum, a) => sum + a * a, 0));\n const magnitudeB = Math.sqrt(vecB.reduce((sum, b) => sum + b * b, 0));\n\n // Compute and return the cosine similarity\n return dotProduct / (magnitudeA * magnitudeB);\n};\n\n/**\n * Indexes all Markdown documents by generating embeddings for each chunk and storing them in memory.\n * Also updates the embeddings.json file if new embeddings are generated.\n */\nexport const indexMarkdownFiles = async (): Promise<void> => {\n const env = process.env.NODE_ENV;\n dotenv.config({\n path: [`.env.${env}.local`, `.env.${env}`, '.env.local', '.env'],\n });\n\n // Retrieve documentation and blog posts in English locale\n const frequentQuestions = getFequentQuestions();\n const docs = getDocs(Locales.ENGLISH);\n const blogs = getBlogs(Locales.ENGLISH);\n\n let result: Record<string, number[]> = {}; // Object to hold updated embeddings\n\n const files = { ...docs, ...blogs, ...frequentQuestions }; // Combine docs and blogs into a single object\n\n // Iterate over each file key (identifier) in the combined files\n for (const fileKey of Object.keys(files)) {\n // Split the document into chunks based on headings\n const fileChunks = chunkText(files[fileKey as keyof typeof files]);\n\n // Iterate over each chunk within the current file\n for (const chunkIndex of Object.keys(fileChunks)) {\n const chunkNumber = Number(chunkIndex) + 1; // Chunk number starts at 1\n const chunksNumber = fileChunks.length;\n\n const fileChunk = fileChunks[\n chunkIndex as keyof typeof fileChunks\n ] as string;\n\n const embeddingKeyName = `${fileKey}/chunk_${chunkNumber}`; // Unique key for the chunk\n\n // Retrieve precomputed embedding if available\n const docEmbedding = embeddingsList[\n embeddingKeyName as keyof typeof embeddingsList\n ] as number[] | undefined;\n\n let embedding = docEmbedding; // Use existing embedding if available\n\n if (!embedding) {\n embedding = await generateEmbedding(fileChunk); // Generate embedding if not present\n }\n\n // Update the result object with the new embedding\n result = { ...result, [embeddingKeyName]: embedding };\n\n // Store the embedding and content in the in-memory vector store\n vectorStore.push({\n fileKey,\n chunkNumber,\n embedding,\n content: fileChunk,\n });\n\n console.info(`- Indexed: ${embeddingKeyName}/${chunksNumber}`);\n }\n }\n\n if (process.env.NODE_ENV === 'development') {\n try {\n // Compare the newly generated embeddings with existing ones\n if (JSON.stringify(result) !== JSON.stringify(embeddingsList)) {\n // If there are new embeddings, save them to embeddings.json\n fs.writeFileSync(\n 'src/utils/AI/askDocQuestion/embeddings.json',\n JSON.stringify(result, null, 2)\n );\n }\n } catch (error) {\n console.error(error); // Log any errors during the file write process\n }\n }\n};\n\n// Automatically index Markdown files\nindexMarkdownFiles();\n\n/**\n * Searches the indexed documents for the most relevant chunks based on a query.\n * Utilizes cosine similarity to find the closest matching embeddings.\n *\n * @param query - The search query provided by the user\n * @returns An array of the top matching document chunks' content\n */\nexport const searchChunkReference = async (\n query: string\n): Promise<VectorStoreEl[]> => {\n // Generate an embedding for the user's query\n const queryEmbedding = await generateEmbedding(query);\n\n // Calculate similarity scores between the query embedding and each document's embedding\n const results = vectorStore\n .map((chunk) => ({\n ...chunk,\n similarity: cosineSimilarity(queryEmbedding, chunk.embedding), // Add similarity score to each doc\n }))\n .filter((chunk) => chunk.similarity > MIN_RELEVANT_CHUNKS_SIMILARITY) // Filter out documents with low similarity scores\n .sort((a, b) => b.similarity - a.similarity) // Sort documents by highest similarity first\n .slice(0, MAX_RELEVANT_CHUNKS_NB); // Select the top 6 most similar documents\n\n // Return the content of the top matching documents\n return results;\n};\n\n// Define the structure of messages used in chat completions\nexport type ChatCompletionRequestMessage = {\n role: 'system' | 'user' | 'assistant'; // The role of the message sender\n content: string; // The text content of the message\n};\n\n/**\n * Reads the content of a file synchronously.\n *\n * @function\n * @param relativeFilePath - The relative or absolute path to the target file.\n * @returns The entire contents of the specified file as a UTF-8 encoded string.\n */\nconst getFileContent = (relativeFilePath: string): string => {\n const __dirname = dirname(fileURLToPath(import.meta.url));\n const absolutePath = join(__dirname, relativeFilePath);\n const fileContent = readFileSync(absolutePath, 'utf-8');\n return fileContent;\n};\n\nconst CHAT_GPT_PROMPT = getFileContent('./PROMPT.md');\n\n// Initial prompt configuration for the chatbot\nexport const initPrompt: ChatCompletionRequestMessage = {\n role: 'system',\n content: CHAT_GPT_PROMPT,\n};\n\nexport type AskDocQuestionResult = {\n response: string;\n relatedFiles: string[];\n};\n\nexport type AskDocQuestionOptions = {\n onMessage?: (chunk: string) => void;\n};\n\n/**\n * Handles the \"Ask a question\" endpoint in an Express.js route.\n * Processes user messages, retrieves relevant documents, and interacts with AI models to generate responses.\n *\n * @param messages - An array of chat messages from the user and assistant\n * @returns The assistant's response as a string\n */\nexport const askDocQuestion = async (\n messages: ChatCompletionRequestMessage[],\n options?: AskDocQuestionOptions\n): Promise<AskDocQuestionResult> => {\n // Format the user's question to keep only the relevant keywords\n const query = messages.map((message) => `- ${message.content}`).join('\\n');\n\n // 1) Find relevant documents based on the user's question\n const relevantFilesReferences = await searchChunkReference(query);\n\n // 2) Integrate the relevant documents into the initial system prompt\n const systemPrompt = initPrompt.content.replace(\n '{{relevantFilesReferences}}',\n relevantFilesReferences.length === 0\n ? 'Not relevant file found related to the question.'\n : relevantFilesReferences\n .map(\n (doc, idx) =>\n `[Chunk ${idx}] docKey = \"${doc.fileKey}\":\\n${doc.content}`\n )\n .join('\\n\\n') // Insert relevant docs into the prompt\n );\n\n // Format messages for AI SDK\n const aiMessages = [\n { role: 'system' as const, content: systemPrompt },\n ...messages,\n ];\n\n // Get AI configuration\n const aiConfig = await getAIConfig({\n provider: AIProvider.OPENAI,\n model: MODEL,\n temperature: MODEL_TEMPERATURE,\n apiKey: process.env.OPENAI_API_KEY!,\n });\n\n if (!aiConfig) {\n throw new Error('Failed to initialize AI configuration');\n }\n\n // 3) Use the AI SDK to stream the response\n let fullResponse = '';\n const stream = streamText({\n model: aiConfig.model,\n temperature: aiConfig.temperature,\n messages: aiMessages,\n });\n\n // Process the stream\n for await (const chunk of stream.textStream) {\n fullResponse += chunk;\n options?.onMessage?.(chunk);\n }\n\n // 4) Extract unique related files\n const relatedFiles = [\n ...new Set(relevantFilesReferences.map((doc) => doc.fileKey)),\n ];\n\n // 5) Return the assistant's response to the user\n return {\n response: fullResponse ?? 'Error: No result found',\n relatedFiles,\n };\n};\n"],"mappings":"AAAA,SAAS,gBAAgB;AACzB,SAAS,eAAe;AACxB,SAAS,SAAS,2BAA2B;AAC7C,SAAS,kBAAkB;AAC3B,OAAO,YAAY;AACnB,OAAO,MAAM,oBAAoB;AACjC,SAAS,cAAc;AACvB,SAAS,SAAS,YAAY;AAC9B,SAAS,qBAAqB;AAC9B,SAAS,YAAY,mBAAmB;AACxC,OAAO,oBAAoB,oBAAoB,KAAK,EAAE,MAAM,OAAO;AAiBnE,MAAM,cAA+B,CAAC;AAGtC,MAAM,QAAQ;AACd,MAAM,oBAAoB;AAC1B,MAAM,kBAAkB;AACxB,MAAM,iBAAiB;AACvB,MAAM,mBAAmB;AACzB,MAAM,gBAAgB;AACtB,MAAM,YAAY,mBAAmB;AACrC,MAAM,gBAAgB,iBAAiB;AACvC,MAAM,yBAAyB;AAC/B,MAAM,iCAAiC;AAOvC,MAAM,YAAY,CAAC,SAA2B;AAC5C,QAAM,SAAmB,CAAC;AAC1B,MAAI,QAAQ;AAEZ,SAAO,QAAQ,KAAK,QAAQ;AAC1B,QAAI,MAAM,KAAK,IAAI,QAAQ,WAAW,KAAK,MAAM;AAGjD,QAAI,MAAM,KAAK,QAAQ;AACrB,YAAM,YAAY,KAAK,YAAY,KAAK,GAAG;AAC3C,UAAI,YAAY,OAAO;AACrB,cAAM;AAAA,MACR;AAAA,IACF;AAEA,WAAO,KAAK,KAAK,UAAU,OAAO,GAAG,CAAC;AAGtC,UAAM,YAAY,MAAM;AACxB,QAAI,aAAa,OAAO;AAEtB,cAAQ;AAAA,IACV,OAAO;AACL,cAAQ;AAAA,IACV;AAAA,EACF;AAEA,SAAO;AACT;AASA,MAAM,oBAAoB,OAAO,SAAoC;AACnE,MAAI;AAEF,UAAM,YAAY;AAAA,MAChB,UAAU,WAAW;AAAA,MACrB,QAAQ,QAAQ,IAAI;AAAA,IACtB,CAAC;AAED,UAAM,eAAe,IAAI,OAAO,EAAE,QAAQ,QAAQ,IAAI,eAAe,CAAC;AAEtE,UAAM,WAAW,MAAM,aAAa,WAAW,OAAO;AAAA,MACpD,OAAO;AAAA,MACP,OAAO;AAAA,IACT,CAAC;AAED,WAAO,SAAS,KAAK,CAAC,EAAE;AAAA,EAC1B,SAAS,OAAO;AACd,YAAQ,MAAM,+BAA+B,KAAK;AAClD,WAAO,CAAC;AAAA,EACV;AACF;AAWA,MAAM,mBAAmB,CAAC,MAAgB,SAA2B;AAEnE,QAAM,aAAa,KAAK,OAAO,CAAC,KAAK,GAAG,QAAQ,MAAM,IAAI,KAAK,GAAG,GAAG,CAAC;AAGtE,QAAM,aAAa,KAAK,KAAK,KAAK,OAAO,CAAC,KAAK,MAAM,MAAM,IAAI,GAAG,CAAC,CAAC;AACpE,QAAM,aAAa,KAAK,KAAK,KAAK,OAAO,CAAC,KAAK,MAAM,MAAM,IAAI,GAAG,CAAC,CAAC;AAGpE,SAAO,cAAc,aAAa;AACpC;AAMO,MAAM,qBAAqB,YAA2B;AAC3D,QAAM,MAAM,QAAQ,IAAI;AACxB,SAAO,OAAO;AAAA,IACZ,MAAM,CAAC,QAAQ,GAAG,UAAU,QAAQ,GAAG,IAAI,cAAc,MAAM;AAAA,EACjE,CAAC;AAGD,QAAM,oBAAoB,oBAAoB;AAC9C,QAAM,OAAO,QAAQ,QAAQ,OAAO;AACpC,QAAM,QAAQ,SAAS,QAAQ,OAAO;AAEtC,MAAI,SAAmC,CAAC;AAExC,QAAM,QAAQ,EAAE,GAAG,MAAM,GAAG,OAAO,GAAG,kBAAkB;AAGxD,aAAW,WAAW,OAAO,KAAK,KAAK,GAAG;AAExC,UAAM,aAAa,UAAU,MAAM,OAA6B,CAAC;AAGjE,eAAW,cAAc,OAAO,KAAK,UAAU,GAAG;AAChD,YAAM,cAAc,OAAO,UAAU,IAAI;AACzC,YAAM,eAAe,WAAW;AAEhC,YAAM,YAAY,WAChB,UACF;AAEA,YAAM,mBAAmB,GAAG,OAAO,UAAU,WAAW;AAGxD,YAAM,eAAe,eACnB,gBACF;AAEA,UAAI,YAAY;AAEhB,UAAI,CAAC,WAAW;AACd,oBAAY,MAAM,kBAAkB,SAAS;AAAA,MAC/C;AAGA,eAAS,EAAE,GAAG,QAAQ,CAAC,gBAAgB,GAAG,UAAU;AAGpD,kBAAY,KAAK;AAAA,QACf;AAAA,QACA;AAAA,QACA;AAAA,QACA,SAAS;AAAA,MACX,CAAC;AAED,cAAQ,KAAK,cAAc,gBAAgB,IAAI,YAAY,EAAE;AAAA,IAC/D;AAAA,EACF;AAEA,MAAI,QAAQ,IAAI,aAAa,eAAe;AAC1C,QAAI;AAEF,UAAI,KAAK,UAAU,MAAM,MAAM,KAAK,UAAU,cAAc,GAAG;AAE7D,WAAG;AAAA,UACD;AAAA,UACA,KAAK,UAAU,QAAQ,MAAM,CAAC;AAAA,QAChC;AAAA,MACF;AAAA,IACF,SAAS,OAAO;AACd,cAAQ,MAAM,KAAK;AAAA,IACrB;AAAA,EACF;AACF;AAGA,mBAAmB;AASZ,MAAM,uBAAuB,OAClC,UAC6B;AAE7B,QAAM,iBAAiB,MAAM,kBAAkB,KAAK;AAGpD,QAAM,UAAU,YACb,IAAI,CAAC,WAAW;AAAA,IACf,GAAG;AAAA,IACH,YAAY,iBAAiB,gBAAgB,MAAM,SAAS;AAAA;AAAA,EAC9D,EAAE,EACD,OAAO,CAAC,UAAU,MAAM,aAAa,8BAA8B,EACnE,KAAK,CAAC,GAAG,MAAM,EAAE,aAAa,EAAE,UAAU,EAC1C,MAAM,GAAG,sBAAsB;AAGlC,SAAO;AACT;AAeA,MAAM,iBAAiB,CAAC,qBAAqC;AAC3D,QAAM,YAAY,QAAQ,cAAc,YAAY,GAAG,CAAC;AACxD,QAAM,eAAe,KAAK,WAAW,gBAAgB;AACrD,QAAM,cAAc,aAAa,cAAc,OAAO;AACtD,SAAO;AACT;AAEA,MAAM,kBAAkB,eAAe,aAAa;AAG7C,MAAM,aAA2C;AAAA,EACtD,MAAM;AAAA,EACN,SAAS;AACX;AAkBO,MAAM,iBAAiB,OAC5B,UACA,YACkC;AAElC,QAAM,QAAQ,SAAS,IAAI,CAAC,YAAY,KAAK,QAAQ,OAAO,EAAE,EAAE,KAAK,IAAI;AAGzE,QAAM,0BAA0B,MAAM,qBAAqB,KAAK;AAGhE,QAAM,eAAe,WAAW,QAAQ;AAAA,IACtC;AAAA,IACA,wBAAwB,WAAW,IAC/B,qDACA,wBACG;AAAA,MACC,CAAC,KAAK,QACJ,UAAU,GAAG,eAAe,IAAI,OAAO;AAAA,EAAO,IAAI,OAAO;AAAA,IAC7D,EACC,KAAK,MAAM;AAAA;AAAA,EACpB;AAGA,QAAM,aAAa;AAAA,IACjB,EAAE,MAAM,UAAmB,SAAS,aAAa;AAAA,IACjD,GAAG;AAAA,EACL;AAGA,QAAM,WAAW,MAAM,YAAY;AAAA,IACjC,UAAU,WAAW;AAAA,IACrB,OAAO;AAAA,IACP,aAAa;AAAA,IACb,QAAQ,QAAQ,IAAI;AAAA,EACtB,CAAC;AAED,MAAI,CAAC,UAAU;AACb,UAAM,IAAI,MAAM,uCAAuC;AAAA,EACzD;AAGA,MAAI,eAAe;AACnB,QAAM,SAAS,WAAW;AAAA,IACxB,OAAO,SAAS;AAAA,IAChB,aAAa,SAAS;AAAA,IACtB,UAAU;AAAA,EACZ,CAAC;AAGD,mBAAiB,SAAS,OAAO,YAAY;AAC3C,oBAAgB;AAChB,aAAS,YAAY,KAAK;AAAA,EAC5B;AAGA,QAAM,eAAe;AAAA,IACnB,GAAG,IAAI,IAAI,wBAAwB,IAAI,CAAC,QAAQ,IAAI,OAAO,CAAC;AAAA,EAC9D;AAGA,SAAO;AAAA,IACL,UAAU,gBAAgB;AAAA,IAC1B;AAAA,EACF;AACF;","names":[]}