@intlayer/backend 5.4.1 → 5.5.0-canary.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (111) hide show
  1. package/dist/cjs/controllers/ai.controller.cjs +60 -52
  2. package/dist/cjs/controllers/ai.controller.cjs.map +1 -1
  3. package/dist/cjs/controllers/dictionary.controller.cjs +5 -0
  4. package/dist/cjs/controllers/dictionary.controller.cjs.map +1 -1
  5. package/dist/cjs/export.cjs +4 -2
  6. package/dist/cjs/export.cjs.map +1 -1
  7. package/dist/cjs/routes/ai.routes.cjs +6 -0
  8. package/dist/cjs/routes/ai.routes.cjs.map +1 -1
  9. package/dist/cjs/services/dictionary.service.cjs +6 -1
  10. package/dist/cjs/services/dictionary.service.cjs.map +1 -1
  11. package/dist/cjs/services/sessionAuth.service.cjs +7 -7
  12. package/dist/cjs/services/sessionAuth.service.cjs.map +1 -1
  13. package/dist/cjs/utils/AI/aiSdk.cjs +140 -0
  14. package/dist/cjs/utils/AI/aiSdk.cjs.map +1 -0
  15. package/dist/cjs/utils/AI/askDocQuestion/PROMPT.md +2 -1
  16. package/dist/cjs/utils/AI/askDocQuestion/askDocQuestion.cjs +32 -27
  17. package/dist/cjs/utils/AI/askDocQuestion/askDocQuestion.cjs.map +1 -1
  18. package/dist/cjs/utils/AI/askDocQuestion/embeddings.json +7374 -0
  19. package/dist/cjs/utils/AI/auditDictionary/PROMPT.md +4 -0
  20. package/dist/cjs/utils/AI/auditDictionary/index.cjs +36 -43
  21. package/dist/cjs/utils/AI/auditDictionary/index.cjs.map +1 -1
  22. package/dist/cjs/utils/AI/auditDictionaryField/PROMPT.md +4 -0
  23. package/dist/cjs/utils/AI/auditDictionaryField/index.cjs +34 -28
  24. package/dist/cjs/utils/AI/auditDictionaryField/index.cjs.map +1 -1
  25. package/dist/cjs/utils/AI/auditDictionaryMetadata/PROMPT.md +4 -0
  26. package/dist/cjs/utils/AI/auditDictionaryMetadata/index.cjs +23 -23
  27. package/dist/cjs/utils/AI/auditDictionaryMetadata/index.cjs.map +1 -1
  28. package/dist/cjs/utils/{auditTag → AI/auditTag}/PROMPT.md +4 -0
  29. package/dist/cjs/utils/{auditTag → AI/auditTag}/index.cjs +27 -27
  30. package/dist/cjs/utils/AI/auditTag/index.cjs.map +1 -0
  31. package/dist/cjs/utils/AI/autocomplete/PROMPT.md +4 -0
  32. package/dist/cjs/utils/AI/autocomplete/index.cjs +25 -22
  33. package/dist/cjs/utils/AI/autocomplete/index.cjs.map +1 -1
  34. package/dist/cjs/utils/AI/translateJSON/PROMPT.md +53 -0
  35. package/dist/cjs/utils/AI/translateJSON/index.cjs +106 -0
  36. package/dist/cjs/utils/AI/translateJSON/index.cjs.map +1 -0
  37. package/dist/cjs/utils/extractJSON.cjs +52 -0
  38. package/dist/cjs/utils/extractJSON.cjs.map +1 -0
  39. package/dist/esm/controllers/ai.controller.mjs +58 -51
  40. package/dist/esm/controllers/ai.controller.mjs.map +1 -1
  41. package/dist/esm/controllers/dictionary.controller.mjs +5 -0
  42. package/dist/esm/controllers/dictionary.controller.mjs.map +1 -1
  43. package/dist/esm/export.mjs +3 -2
  44. package/dist/esm/export.mjs.map +1 -1
  45. package/dist/esm/routes/ai.routes.mjs +8 -1
  46. package/dist/esm/routes/ai.routes.mjs.map +1 -1
  47. package/dist/esm/services/dictionary.service.mjs +6 -1
  48. package/dist/esm/services/dictionary.service.mjs.map +1 -1
  49. package/dist/esm/services/sessionAuth.service.mjs +2 -2
  50. package/dist/esm/services/sessionAuth.service.mjs.map +1 -1
  51. package/dist/esm/utils/AI/aiSdk.mjs +115 -0
  52. package/dist/esm/utils/AI/aiSdk.mjs.map +1 -0
  53. package/dist/esm/utils/AI/askDocQuestion/PROMPT.md +2 -1
  54. package/dist/esm/utils/AI/askDocQuestion/askDocQuestion.mjs +32 -27
  55. package/dist/esm/utils/AI/askDocQuestion/askDocQuestion.mjs.map +1 -1
  56. package/dist/esm/utils/AI/askDocQuestion/embeddings.json +7374 -0
  57. package/dist/esm/utils/AI/auditDictionary/PROMPT.md +4 -0
  58. package/dist/esm/utils/AI/auditDictionary/index.mjs +36 -43
  59. package/dist/esm/utils/AI/auditDictionary/index.mjs.map +1 -1
  60. package/dist/esm/utils/AI/auditDictionaryField/PROMPT.md +4 -0
  61. package/dist/esm/utils/AI/auditDictionaryField/index.mjs +34 -28
  62. package/dist/esm/utils/AI/auditDictionaryField/index.mjs.map +1 -1
  63. package/dist/esm/utils/AI/auditDictionaryMetadata/PROMPT.md +4 -0
  64. package/dist/esm/utils/AI/auditDictionaryMetadata/index.mjs +23 -23
  65. package/dist/esm/utils/AI/auditDictionaryMetadata/index.mjs.map +1 -1
  66. package/dist/esm/utils/{auditTag → AI/auditTag}/PROMPT.md +4 -0
  67. package/dist/esm/utils/AI/auditTag/index.mjs +49 -0
  68. package/dist/esm/utils/AI/auditTag/index.mjs.map +1 -0
  69. package/dist/esm/utils/AI/autocomplete/PROMPT.md +4 -0
  70. package/dist/esm/utils/AI/autocomplete/index.mjs +25 -22
  71. package/dist/esm/utils/AI/autocomplete/index.mjs.map +1 -1
  72. package/dist/esm/utils/AI/translateJSON/PROMPT.md +53 -0
  73. package/dist/esm/utils/AI/translateJSON/index.mjs +81 -0
  74. package/dist/esm/utils/AI/translateJSON/index.mjs.map +1 -0
  75. package/dist/esm/utils/extractJSON.mjs +28 -0
  76. package/dist/esm/utils/extractJSON.mjs.map +1 -0
  77. package/dist/types/controllers/ai.controller.d.ts +12 -21
  78. package/dist/types/controllers/ai.controller.d.ts.map +1 -1
  79. package/dist/types/controllers/dictionary.controller.d.ts.map +1 -1
  80. package/dist/types/export.d.ts +12 -11
  81. package/dist/types/export.d.ts.map +1 -1
  82. package/dist/types/routes/ai.routes.d.ts +5 -0
  83. package/dist/types/routes/ai.routes.d.ts.map +1 -1
  84. package/dist/types/services/dictionary.service.d.ts +2 -2
  85. package/dist/types/services/dictionary.service.d.ts.map +1 -1
  86. package/dist/types/services/sessionAuth.service.d.ts +2 -2
  87. package/dist/types/services/sessionAuth.service.d.ts.map +1 -1
  88. package/dist/types/utils/AI/aiSdk.d.ts +41 -0
  89. package/dist/types/utils/AI/aiSdk.d.ts.map +1 -0
  90. package/dist/types/utils/AI/askDocQuestion/askDocQuestion.d.ts +1 -1
  91. package/dist/types/utils/AI/askDocQuestion/askDocQuestion.d.ts.map +1 -1
  92. package/dist/types/utils/AI/auditDictionary/index.d.ts +10 -15
  93. package/dist/types/utils/AI/auditDictionary/index.d.ts.map +1 -1
  94. package/dist/types/utils/AI/auditDictionaryField/index.d.ts +9 -14
  95. package/dist/types/utils/AI/auditDictionaryField/index.d.ts.map +1 -1
  96. package/dist/types/utils/AI/auditDictionaryMetadata/index.d.ts +7 -13
  97. package/dist/types/utils/AI/auditDictionaryMetadata/index.d.ts.map +1 -1
  98. package/dist/types/utils/AI/auditTag/index.d.ts +18 -0
  99. package/dist/types/utils/AI/auditTag/index.d.ts.map +1 -0
  100. package/dist/types/utils/AI/autocomplete/index.d.ts +6 -12
  101. package/dist/types/utils/AI/autocomplete/index.d.ts.map +1 -1
  102. package/dist/types/utils/AI/translateJSON/index.d.ts +24 -0
  103. package/dist/types/utils/AI/translateJSON/index.d.ts.map +1 -0
  104. package/dist/types/utils/extractJSON.d.ts +6 -0
  105. package/dist/types/utils/extractJSON.d.ts.map +1 -0
  106. package/package.json +15 -11
  107. package/dist/cjs/utils/auditTag/index.cjs.map +0 -1
  108. package/dist/esm/utils/auditTag/index.mjs +0 -49
  109. package/dist/esm/utils/auditTag/index.mjs.map +0 -1
  110. package/dist/types/utils/auditTag/index.d.ts +0 -30
  111. package/dist/types/utils/auditTag/index.d.ts.map +0 -1
@@ -138,6 +138,10 @@ You are an expert in internationalization, copy writing and content management.
138
138
  - The import of the `t` function was imported from `react-intlayer` instead of `intlayer`.
139
139
  - A type `Dictionary` was added to the file to strengthen the content declaration.
140
140
 
141
+ **Application Context**
142
+
143
+ {{applicationContext}}
144
+
141
145
  **Tags Instructions:**
142
146
 
143
147
  {{tagsInstructions}}
@@ -21,73 +21,66 @@ __export(auditDictionary_exports, {
21
21
  auditDictionary: () => auditDictionary
22
22
  });
23
23
  module.exports = __toCommonJS(auditDictionary_exports);
24
+ var import_core = require("@intlayer/core");
25
+ var import_logger = require('./../../../logger/index.cjs');
26
+ var import_ai = require("ai");
24
27
  var import_fs = require("fs");
25
28
  var import_path = require("path");
26
29
  var import_url = require("url");
27
- var import_core = require("@intlayer/core");
28
- var import_logger = require('./../../../logger/index.cjs');
29
- var import_openai = require("openai");
30
+ var import_aiSdk = require('../aiSdk.cjs');
30
31
  const import_meta = {};
31
32
  const __dirname = (0, import_path.dirname)((0, import_url.fileURLToPath)(import_meta.url));
32
- const getFileContent = (relativeFilePath) => {
33
- const absolutePath = (0, import_path.join)(__dirname, relativeFilePath);
34
- const fileContent = (0, import_fs.readFileSync)(absolutePath, "utf-8");
35
- return fileContent;
36
- };
37
- const FILE_TEMPLATE = {
38
- ts: getFileContent("./TS_FORMAT.md"),
39
- tsx: getFileContent("./TSX_FORMAT.md"),
40
- js: getFileContent("./MJS_FORMAT.md"),
41
- mjs: getFileContent("./MJS_FORMAT.md"),
42
- cjs: getFileContent("./CJS_FORMAT.md"),
43
- jsx: getFileContent("./JSX_FORMAT.md"),
44
- json: getFileContent("./JSON_FORMAT.md")
33
+ const getFileContent = (filePath) => {
34
+ return (0, import_fs.readFileSync)((0, import_path.join)(__dirname, filePath), { encoding: "utf-8" });
45
35
  };
46
36
  const CHAT_GPT_PROMPT = getFileContent("./PROMPT.md");
47
37
  const formatLocaleWithName = (locale) => {
48
- const localeName = (0, import_core.getLocaleName)(locale);
49
- return `${locale}: ${localeName}`;
38
+ return `${locale}: ${(0, import_core.getLocaleName)(locale)}`;
39
+ };
40
+ const formatTagInstructions = (tags) => {
41
+ if (!tags || tags.length === 0) {
42
+ return "";
43
+ }
44
+ return `Based on the dictionary content, identify specific tags from the list below that would be relevant:
45
+
46
+ ${tags.map(({ key, description }) => `- ${key}: ${description}`).join("\n\n")}`;
50
47
  };
51
- const formatTagInstructions = (tags = []) => tags.map((tag) => `- ${tag.key}: ${tag.instructions}`).join("\n\n");
52
48
  const auditDictionary = async ({
53
49
  fileContent,
54
50
  filePath,
55
- model,
56
- openAiApiKey,
57
- customPrompt,
58
- temperature,
51
+ aiOptions,
59
52
  locales,
60
53
  defaultLocale,
61
54
  tags
62
55
  }) => {
63
56
  try {
64
- const openai = new import_openai.OpenAI({
65
- apiKey: openAiApiKey ?? process.env.OPENAI_API_KEY
57
+ const otherLocales = locales.filter((locale) => locale !== defaultLocale);
58
+ const aiConfig = await (0, import_aiSdk.getAIConfig)({
59
+ provider: import_aiSdk.AIProvider.OPENAI,
60
+ model: "gpt-4o-mini",
61
+ apiKey: process.env.OPENAI_API_KEY,
62
+ ...aiOptions
66
63
  });
67
- const splitted = (filePath ?? ".json").split(".");
68
- const fileExtension = splitted[splitted.length - 1];
69
- const prompt = customPrompt ?? CHAT_GPT_PROMPT.replace("{{filePath}}", filePath ?? "Not provided").replace(
64
+ const prompt = CHAT_GPT_PROMPT.replace(
70
65
  "{{defaultLocale}}",
71
- `{${formatLocaleWithName(defaultLocale)}}`
66
+ formatLocaleWithName(defaultLocale)
72
67
  ).replace(
73
68
  "{{otherLocales}}",
74
- `{${locales.map(formatLocaleWithName).join(", ")}}`
75
- ).replace(
76
- "{{declarationsContentTemplate}}",
77
- FILE_TEMPLATE[fileExtension]
78
- ).replace("{{fileContent}}", fileContent).replace("{{tagsInstructions}}", formatTagInstructions(tags));
79
- const chatCompletion = await openai.chat.completions.create({
80
- model: openAiApiKey ? model ?? "gpt-4o-2024-11-20" : "gpt-4o-2024-11-20",
81
- temperature: openAiApiKey ? temperature ?? 0.1 : 0.1,
69
+ `{${otherLocales.map(formatLocaleWithName).join(", ")}}`
70
+ ).replace("{{filePath}}", filePath ?? "").replace("{{fileContent}}", fileContent).replace("{{applicationContext}}", aiOptions?.applicationContext ?? "").replace("{{tagsInstructions}}", formatTagInstructions(tags));
71
+ if (!aiConfig) {
72
+ import_logger.logger.error("Failed to configure AI model");
73
+ return void 0;
74
+ }
75
+ const { text: newContent, usage } = await (0, import_ai.generateText)({
76
+ model: aiConfig.model,
77
+ temperature: aiConfig.temperature,
82
78
  messages: [{ role: "system", content: prompt }]
83
79
  });
84
- const newContent = chatCompletion.choices[0].message?.content;
85
- import_logger.logger.info(
86
- `${chatCompletion.usage?.total_tokens} tokens used in the request`
87
- );
80
+ import_logger.logger.info(`${usage?.totalTokens ?? 0} tokens used in the request`);
88
81
  return {
89
- fileContent: newContent ?? "",
90
- tokenUsed: chatCompletion.usage?.total_tokens ?? 0
82
+ fileContent: newContent,
83
+ tokenUsed: usage?.totalTokens ?? 0
91
84
  };
92
85
  } catch (error) {
93
86
  console.error(error);
@@ -1 +1 @@
1
- {"version":3,"sources":["../../../../../src/utils/AI/auditDictionary/index.ts"],"sourcesContent":["import { readFileSync } from 'fs';\nimport { dirname, join } from 'path';\nimport { fileURLToPath } from 'url';\nimport { getLocaleName } from '@intlayer/core';\nimport { logger } from '@logger';\nimport type { Locales } from 'intlayer';\nimport { OpenAI } from 'openai';\nimport type { Tag } from '@/types/tag.types';\n\nconst __dirname = dirname(fileURLToPath(import.meta.url));\n\nexport type AIOptions = {\n model?: string;\n temperature?: number;\n openAiApiKey?: string;\n};\n\nexport type AuditOptions = {\n locales: Locales[];\n defaultLocale: Locales;\n fileContent: string;\n filePath?: string;\n customPrompt?: string;\n tags?: Tag[];\n} & AIOptions;\nexport type AuditFileResultData = { fileContent: string; tokenUsed: number };\n\n/**\n * Reads the content of a file synchronously.\n *\n * @function\n * @param relativeFilePath - The relative or absolute path to the target file.\n * @returns The entire contents of the specified file as a UTF-8 encoded string.\n */\nconst getFileContent = (relativeFilePath: string): string => {\n const absolutePath = join(__dirname, relativeFilePath);\n const fileContent = readFileSync(absolutePath, 'utf-8');\n return fileContent;\n};\n\nconst FILE_TEMPLATE: Record<string, string> = {\n ts: getFileContent('./TS_FORMAT.md'),\n tsx: getFileContent('./TSX_FORMAT.md'),\n js: getFileContent('./MJS_FORMAT.md'),\n mjs: getFileContent('./MJS_FORMAT.md'),\n cjs: getFileContent('./CJS_FORMAT.md'),\n jsx: getFileContent('./JSX_FORMAT.md'),\n json: getFileContent('./JSON_FORMAT.md'),\n};\n\n// The prompt template to send to ChatGPT, requesting an audit of content declaration files.\nconst CHAT_GPT_PROMPT = getFileContent('./PROMPT.md');\n\n/**\n * Formats a locale with its full name and returns a string representation.\n *\n * @function\n * @param locale - A locale from the project's configuration (e.g., 'en-US', 'fr-FR').\n * @returns A formatted string combining the locale's name and code. Example: \"English (US): en-US\".\n */\nconst formatLocaleWithName = (locale: Locales): string => {\n // getLocaleName returns a human-readable name for the locale.\n const localeName = getLocaleName(locale);\n\n // Concatenate both the readable name and the locale code.\n return `${locale}: ${localeName}`;\n};\n\n/**\n * Formats an array of tags with their keys and instructions.\n *\n * @function\n * @param tags - An array of tags from the project's configuration.\n * @returns A string representation of the tags, with their keys and instructions.\n */\nconst formatTagInstructions = (tags: Tag[] = []) =>\n tags.map((tag) => `- ${tag.key}: ${tag.instructions}`).join('\\n\\n');\n\n/**\n * Audits a content declaration file by constructing a prompt for ChatGPT.\n * The prompt includes details about the project's locales, file paths of content declarations,\n * and requests for identifying issues or inconsistencies. It prints the prompt for each file,\n * and could be adapted to send requests to the ChatGPT model.\n */\nexport const auditDictionary = async ({\n fileContent,\n filePath,\n model,\n openAiApiKey,\n customPrompt,\n temperature,\n locales,\n defaultLocale,\n tags,\n}: AuditOptions): Promise<AuditFileResultData | undefined> => {\n try {\n // Optionally, you could initialize and configure the OpenAI client here, if you intend to make API calls.\n // Uncomment and configure the following lines if you have `openai` installed and want to call the API:\n\n const openai = new OpenAI({\n apiKey: openAiApiKey ?? process.env.OPENAI_API_KEY,\n });\n\n // Read the file's content.\n const splitted = (filePath ?? '.json').split('.');\n const fileExtension = splitted[splitted.length - 1];\n\n // Prepare the prompt for ChatGPT by replacing placeholders with actual values.\n const prompt =\n customPrompt ??\n CHAT_GPT_PROMPT.replace('{{filePath}}', filePath ?? 'Not provided')\n .replace(\n '{{defaultLocale}}',\n `{${formatLocaleWithName(defaultLocale)}}`\n )\n .replace(\n '{{otherLocales}}',\n `{${locales.map(formatLocaleWithName).join(', ')}}`\n )\n .replace(\n '{{declarationsContentTemplate}}',\n FILE_TEMPLATE[fileExtension]\n )\n .replace('{{fileContent}}', fileContent)\n .replace('{{tagsInstructions}}', formatTagInstructions(tags));\n\n // Example of how you might request a completion from ChatGPT:\n const chatCompletion = await openai.chat.completions.create({\n model: openAiApiKey\n ? (model ?? 'gpt-4o-2024-11-20')\n : 'gpt-4o-2024-11-20',\n temperature: openAiApiKey ? (temperature ?? 0.1) : 0.1,\n messages: [{ role: 'system', content: prompt }],\n });\n\n const newContent = chatCompletion.choices[0].message?.content;\n\n logger.info(\n `${chatCompletion.usage?.total_tokens} tokens used in the request`\n );\n\n return {\n fileContent: newContent ?? '',\n tokenUsed: chatCompletion.usage?.total_tokens ?? 0,\n };\n } catch (error) {\n console.error(error);\n }\n};\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,gBAA6B;AAC7B,kBAA8B;AAC9B,iBAA8B;AAC9B,kBAA8B;AAC9B,oBAAuB;AAEvB,oBAAuB;AANvB;AASA,MAAM,gBAAY,yBAAQ,0BAAc,YAAY,GAAG,CAAC;AAyBxD,MAAM,iBAAiB,CAAC,qBAAqC;AAC3D,QAAM,mBAAe,kBAAK,WAAW,gBAAgB;AACrD,QAAM,kBAAc,wBAAa,cAAc,OAAO;AACtD,SAAO;AACT;AAEA,MAAM,gBAAwC;AAAA,EAC5C,IAAI,eAAe,gBAAgB;AAAA,EACnC,KAAK,eAAe,iBAAiB;AAAA,EACrC,IAAI,eAAe,iBAAiB;AAAA,EACpC,KAAK,eAAe,iBAAiB;AAAA,EACrC,KAAK,eAAe,iBAAiB;AAAA,EACrC,KAAK,eAAe,iBAAiB;AAAA,EACrC,MAAM,eAAe,kBAAkB;AACzC;AAGA,MAAM,kBAAkB,eAAe,aAAa;AASpD,MAAM,uBAAuB,CAAC,WAA4B;AAExD,QAAM,iBAAa,2BAAc,MAAM;AAGvC,SAAO,GAAG,MAAM,KAAK,UAAU;AACjC;AASA,MAAM,wBAAwB,CAAC,OAAc,CAAC,MAC5C,KAAK,IAAI,CAAC,QAAQ,KAAK,IAAI,GAAG,KAAK,IAAI,YAAY,EAAE,EAAE,KAAK,MAAM;AAQ7D,MAAM,kBAAkB,OAAO;AAAA,EACpC;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF,MAA8D;AAC5D,MAAI;AAIF,UAAM,SAAS,IAAI,qBAAO;AAAA,MACxB,QAAQ,gBAAgB,QAAQ,IAAI;AAAA,IACtC,CAAC;AAGD,UAAM,YAAY,YAAY,SAAS,MAAM,GAAG;AAChD,UAAM,gBAAgB,SAAS,SAAS,SAAS,CAAC;AAGlD,UAAM,SACJ,gBACA,gBAAgB,QAAQ,gBAAgB,YAAY,cAAc,EAC/D;AAAA,MACC;AAAA,MACA,IAAI,qBAAqB,aAAa,CAAC;AAAA,IACzC,EACC;AAAA,MACC;AAAA,MACA,IAAI,QAAQ,IAAI,oBAAoB,EAAE,KAAK,IAAI,CAAC;AAAA,IAClD,EACC;AAAA,MACC;AAAA,MACA,cAAc,aAAa;AAAA,IAC7B,EACC,QAAQ,mBAAmB,WAAW,EACtC,QAAQ,wBAAwB,sBAAsB,IAAI,CAAC;AAGhE,UAAM,iBAAiB,MAAM,OAAO,KAAK,YAAY,OAAO;AAAA,MAC1D,OAAO,eACF,SAAS,sBACV;AAAA,MACJ,aAAa,eAAgB,eAAe,MAAO;AAAA,MACnD,UAAU,CAAC,EAAE,MAAM,UAAU,SAAS,OAAO,CAAC;AAAA,IAChD,CAAC;AAED,UAAM,aAAa,eAAe,QAAQ,CAAC,EAAE,SAAS;AAEtD,yBAAO;AAAA,MACL,GAAG,eAAe,OAAO,YAAY;AAAA,IACvC;AAEA,WAAO;AAAA,MACL,aAAa,cAAc;AAAA,MAC3B,WAAW,eAAe,OAAO,gBAAgB;AAAA,IACnD;AAAA,EACF,SAAS,OAAO;AACd,YAAQ,MAAM,KAAK;AAAA,EACrB;AACF;","names":[]}
1
+ {"version":3,"sources":["../../../../../src/utils/AI/auditDictionary/index.ts"],"sourcesContent":["import type { Tag } from '@/types/tag.types';\nimport { getLocaleName } from '@intlayer/core';\nimport { logger } from '@logger';\nimport { generateText } from 'ai';\nimport { readFileSync } from 'fs';\nimport type { Locales } from 'intlayer';\nimport { dirname, join } from 'path';\nimport { fileURLToPath } from 'url';\nimport { AIOptions, AIProvider, getAIConfig } from '../aiSdk';\n\nconst __dirname = dirname(fileURLToPath(import.meta.url));\n\n// Get the content of a file at the specified path\nconst getFileContent = (filePath: string) => {\n return readFileSync(join(__dirname, filePath), { encoding: 'utf-8' });\n};\n\nexport type AuditOptions = {\n fileContent: string;\n filePath?: string;\n locales: Locales[];\n defaultLocale: Locales;\n tags: Tag[];\n aiOptions?: AIOptions;\n};\n\nexport type AuditFileResultData = {\n fileContent: string;\n tokenUsed: number;\n};\n\n// The prompt template to send to the AI model\nconst CHAT_GPT_PROMPT = getFileContent('./PROMPT.md');\n\n/**\n * Format a locale with its name.\n *\n * @param locale - The locale to format.\n * @returns A string in the format \"locale: name\", e.g. \"en: English\".\n */\nconst formatLocaleWithName = (locale: Locales): string => {\n return `${locale}: ${getLocaleName(locale)}`;\n};\n\n/**\n * Formats tag instructions for the AI prompt.\n * Creates a string with all available tags and their descriptions.\n *\n * @param tags - The list of tags to format.\n * @returns A formatted string with tag instructions.\n */\nconst formatTagInstructions = (tags: Tag[]): string => {\n if (!tags || tags.length === 0) {\n return '';\n }\n\n // Prepare the tag instructions.\n return `Based on the dictionary content, identify specific tags from the list below that would be relevant:\n \n${tags.map(({ key, description }) => `- ${key}: ${description}`).join('\\n\\n')}`;\n};\n\n/**\n * Audits a content declaration file by constructing a prompt for AI models.\n * The prompt includes details about the project's locales, file paths of content declarations,\n * and requests for identifying issues or inconsistencies.\n */\nexport const auditDictionary = async ({\n fileContent,\n filePath,\n aiOptions,\n locales,\n defaultLocale,\n tags,\n}: AuditOptions): Promise<AuditFileResultData | undefined> => {\n try {\n const otherLocales = locales.filter((locale) => locale !== defaultLocale);\n\n // Get the appropriate AI model configuration\n const aiConfig = await getAIConfig({\n provider: AIProvider.OPENAI,\n model: 'gpt-4o-mini',\n apiKey: process.env.OPENAI_API_KEY,\n ...aiOptions,\n });\n\n // Prepare the prompt for AI by replacing placeholders with actual values.\n const prompt = CHAT_GPT_PROMPT.replace(\n '{{defaultLocale}}',\n formatLocaleWithName(defaultLocale)\n )\n .replace(\n '{{otherLocales}}',\n `{${otherLocales.map(formatLocaleWithName).join(', ')}}`\n )\n .replace('{{filePath}}', filePath ?? '')\n .replace('{{fileContent}}', fileContent)\n .replace('{{applicationContext}}', aiOptions?.applicationContext ?? '')\n .replace('{{tagsInstructions}}', formatTagInstructions(tags));\n\n if (!aiConfig) {\n logger.error('Failed to configure AI model');\n return undefined;\n }\n\n // Use the AI SDK to generate the completion\n const { text: newContent, usage } = await generateText({\n model: aiConfig.model,\n temperature: aiConfig.temperature,\n messages: [{ role: 'system', content: prompt }],\n });\n\n logger.info(`${usage?.totalTokens ?? 0} tokens used in the request`);\n\n return {\n fileContent: newContent,\n tokenUsed: usage?.totalTokens ?? 0,\n };\n } catch (error) {\n console.error(error);\n }\n};\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AACA,kBAA8B;AAC9B,oBAAuB;AACvB,gBAA6B;AAC7B,gBAA6B;AAE7B,kBAA8B;AAC9B,iBAA8B;AAC9B,mBAAmD;AARnD;AAUA,MAAM,gBAAY,yBAAQ,0BAAc,YAAY,GAAG,CAAC;AAGxD,MAAM,iBAAiB,CAAC,aAAqB;AAC3C,aAAO,4BAAa,kBAAK,WAAW,QAAQ,GAAG,EAAE,UAAU,QAAQ,CAAC;AACtE;AAiBA,MAAM,kBAAkB,eAAe,aAAa;AAQpD,MAAM,uBAAuB,CAAC,WAA4B;AACxD,SAAO,GAAG,MAAM,SAAK,2BAAc,MAAM,CAAC;AAC5C;AASA,MAAM,wBAAwB,CAAC,SAAwB;AACrD,MAAI,CAAC,QAAQ,KAAK,WAAW,GAAG;AAC9B,WAAO;AAAA,EACT;AAGA,SAAO;AAAA;AAAA,EAEP,KAAK,IAAI,CAAC,EAAE,KAAK,YAAY,MAAM,KAAK,GAAG,KAAK,WAAW,EAAE,EAAE,KAAK,MAAM,CAAC;AAC7E;AAOO,MAAM,kBAAkB,OAAO;AAAA,EACpC;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF,MAA8D;AAC5D,MAAI;AACF,UAAM,eAAe,QAAQ,OAAO,CAAC,WAAW,WAAW,aAAa;AAGxE,UAAM,WAAW,UAAM,0BAAY;AAAA,MACjC,UAAU,wBAAW;AAAA,MACrB,OAAO;AAAA,MACP,QAAQ,QAAQ,IAAI;AAAA,MACpB,GAAG;AAAA,IACL,CAAC;AAGD,UAAM,SAAS,gBAAgB;AAAA,MAC7B;AAAA,MACA,qBAAqB,aAAa;AAAA,IACpC,EACG;AAAA,MACC;AAAA,MACA,IAAI,aAAa,IAAI,oBAAoB,EAAE,KAAK,IAAI,CAAC;AAAA,IACvD,EACC,QAAQ,gBAAgB,YAAY,EAAE,EACtC,QAAQ,mBAAmB,WAAW,EACtC,QAAQ,0BAA0B,WAAW,sBAAsB,EAAE,EACrE,QAAQ,wBAAwB,sBAAsB,IAAI,CAAC;AAE9D,QAAI,CAAC,UAAU;AACb,2BAAO,MAAM,8BAA8B;AAC3C,aAAO;AAAA,IACT;AAGA,UAAM,EAAE,MAAM,YAAY,MAAM,IAAI,UAAM,wBAAa;AAAA,MACrD,OAAO,SAAS;AAAA,MAChB,aAAa,SAAS;AAAA,MACtB,UAAU,CAAC,EAAE,MAAM,UAAU,SAAS,OAAO,CAAC;AAAA,IAChD,CAAC;AAED,yBAAO,KAAK,GAAG,OAAO,eAAe,CAAC,6BAA6B;AAEnE,WAAO;AAAA,MACL,aAAa;AAAA,MACb,WAAW,OAAO,eAAe;AAAA,IACnC;AAAA,EACF,SAAS,OAAO;AACd,YAAQ,MAAM,KAAK;AAAA,EACrB;AACF;","names":[]}
@@ -108,6 +108,10 @@ You are an expert in internationalization, copy writing and content management.
108
108
 
109
109
  Développeurs, Responsables de contenu
110
110
 
111
+ **Application Context**
112
+
113
+ {{applicationContext}}
114
+
111
115
  **Tags Instructions:**
112
116
 
113
117
  {{tagsInstructions}}
@@ -21,55 +21,61 @@ __export(auditDictionaryField_exports, {
21
21
  auditDictionaryField: () => auditDictionaryField
22
22
  });
23
23
  module.exports = __toCommonJS(auditDictionaryField_exports);
24
+ var import_core = require("@intlayer/core");
25
+ var import_logger = require('./../../../logger/index.cjs');
26
+ var import_ai = require("ai");
24
27
  var import_fs = require("fs");
25
28
  var import_path = require("path");
26
29
  var import_url = require("url");
27
- var import_core = require("@intlayer/core");
28
- var import_logger = require('./../../../logger/index.cjs');
29
- var import_openai = require("openai");
30
+ var import_aiSdk = require('../aiSdk.cjs');
30
31
  const import_meta = {};
31
32
  const __dirname = (0, import_path.dirname)((0, import_url.fileURLToPath)(import_meta.url));
32
- const getFileContent = (relativeFilePath) => {
33
- const absolutePath = (0, import_path.join)(__dirname, relativeFilePath);
34
- const fileContent = (0, import_fs.readFileSync)(absolutePath, "utf-8");
35
- return fileContent;
33
+ const getFileContent = (filePath) => {
34
+ return (0, import_fs.readFileSync)((0, import_path.join)(__dirname, filePath), { encoding: "utf-8" });
36
35
  };
37
36
  const CHAT_GPT_PROMPT = getFileContent("./PROMPT.md");
38
37
  const formatLocaleWithName = (locale) => {
39
- const localeName = (0, import_core.getLocaleName)(locale);
40
- return `${locale}: ${localeName}`;
38
+ return `${locale}: ${(0, import_core.getLocaleName)(locale)}`;
39
+ };
40
+ const formatTagInstructions = (tags) => {
41
+ if (!tags || tags.length === 0) {
42
+ return "";
43
+ }
44
+ return `Based on the dictionary content, identify specific tags from the list below that would be relevant:
45
+
46
+ ${tags.map(({ key, description }) => `- ${key}: ${description}`).join("\n\n")}`;
41
47
  };
42
- const formatTagInstructions = (tags = []) => tags.map((tag) => `- ${tag.key}: ${tag.instructions}`).join("\n\n");
43
48
  const auditDictionaryField = async ({
44
49
  fileContent,
45
- model,
46
- openAiApiKey,
47
- temperature,
48
- customPrompt,
50
+ aiOptions,
49
51
  locales,
50
52
  keyPath,
51
53
  tags
52
54
  }) => {
53
55
  try {
54
- const openai = new import_openai.OpenAI({
55
- apiKey: openAiApiKey ?? process.env.OPENAI_API_KEY
56
- });
57
- const prompt = customPrompt ?? CHAT_GPT_PROMPT.replace(
56
+ const prompt = CHAT_GPT_PROMPT.replace(
58
57
  "{{otherLocales}}",
59
58
  `{${locales.map(formatLocaleWithName).join(", ")}}`
60
- ).replace("{{keyPath}}", JSON.stringify(keyPath)).replace("{{fileContent}}", fileContent).replace("{{tagsInstructions}}", formatTagInstructions(tags));
61
- const chatCompletion = await openai.chat.completions.create({
62
- model: openAiApiKey ? model ?? "gpt-4o-2024-11-20" : "gpt-4o-2024-11-20",
63
- temperature: openAiApiKey ? temperature ?? 0.1 : 0.1,
59
+ ).replace("{{keyPath}}", JSON.stringify(keyPath)).replace("{{fileContent}}", fileContent).replace("{{applicationContext}}", aiOptions?.applicationContext ?? "").replace("{{tagsInstructions}}", formatTagInstructions(tags));
60
+ const aiConfig = await (0, import_aiSdk.getAIConfig)({
61
+ provider: import_aiSdk.AIProvider.OPENAI,
62
+ model: "gpt-4o-mini",
63
+ apiKey: process.env.OPENAI_API_KEY,
64
+ ...aiOptions
65
+ });
66
+ if (!aiConfig) {
67
+ import_logger.logger.error("Failed to configure AI model");
68
+ return void 0;
69
+ }
70
+ const { text: newContent, usage } = await (0, import_ai.generateText)({
71
+ model: aiConfig.model,
72
+ temperature: aiConfig.temperature,
64
73
  messages: [{ role: "system", content: prompt }]
65
74
  });
66
- const newContent = chatCompletion.choices[0].message?.content;
67
- import_logger.logger.info(
68
- `${chatCompletion.usage?.total_tokens} tokens used in the request`
69
- );
75
+ import_logger.logger.info(`${usage?.totalTokens ?? 0} tokens used in the request`);
70
76
  return {
71
- fileContent: newContent ?? "",
72
- tokenUsed: chatCompletion.usage?.total_tokens ?? 0
77
+ fileContent: newContent,
78
+ tokenUsed: usage?.totalTokens ?? 0
73
79
  };
74
80
  } catch (error) {
75
81
  console.error(error);
@@ -1 +1 @@
1
- {"version":3,"sources":["../../../../../src/utils/AI/auditDictionaryField/index.ts"],"sourcesContent":["import { readFileSync } from 'fs';\nimport { dirname, join } from 'path';\nimport { fileURLToPath } from 'url';\nimport { getLocaleName, type KeyPath } from '@intlayer/core';\nimport { logger } from '@logger';\nimport type { Locales } from 'intlayer';\nimport { OpenAI } from 'openai';\nimport type { Tag } from '@/types/tag.types';\n\nconst __dirname = dirname(fileURLToPath(import.meta.url));\n\nexport type AIOptions = {\n model?: string;\n temperature?: number;\n openAiApiKey?: string;\n};\n\nexport type AuditDictionaryFieldOptions = {\n locales: Locales[];\n fileContent: string;\n customPrompt?: string;\n keyPath: KeyPath[];\n tags?: Tag[];\n} & AIOptions;\nexport type AuditDictionaryFieldResultData = {\n fileContent: string;\n tokenUsed: number;\n};\n\n/**\n * Reads the content of a file synchronously.\n *\n * @function\n * @param relativeFilePath - The relative or absolute path to the target file.\n * @returns The entire contents of the specified file as a UTF-8 encoded string.\n */\nconst getFileContent = (relativeFilePath: string): string => {\n const absolutePath = join(__dirname, relativeFilePath);\n const fileContent = readFileSync(absolutePath, 'utf-8');\n return fileContent;\n};\n\n// The prompt template to send to ChatGPT, requesting an audit of content declaration files.\nconst CHAT_GPT_PROMPT = getFileContent('./PROMPT.md');\n\n/**\n * Formats a locale with its full name and returns a string representation.\n *\n * @function\n * @param locale - A locale from the project's configuration (e.g., 'en-US', 'fr-FR').\n * @returns A formatted string combining the locale's name and code. Example: \"English (US): en-US\".\n */\nconst formatLocaleWithName = (locale: Locales): string => {\n // getLocaleName returns a human-readable name for the locale.\n const localeName = getLocaleName(locale);\n\n // Concatenate both the readable name and the locale code.\n return `${locale}: ${localeName}`;\n};\n\n/**\n * Formats an array of tags with their keys and instructions.\n *\n * @function\n * @param tags - An array of tags from the project's configuration.\n * @returns A string representation of the tags, with their keys and instructions.\n */\nconst formatTagInstructions = (tags: Tag[] = []) =>\n tags.map((tag) => `- ${tag.key}: ${tag.instructions}`).join('\\n\\n');\n\n/**\n * Audits a content declaration file by constructing a prompt for ChatGPT.\n * The prompt includes details about the project's locales, file paths of content declarations,\n * and requests for identifying issues or inconsistencies. It prints the prompt for each file,\n * and could be adapted to send requests to the ChatGPT model.\n */\nexport const auditDictionaryField = async ({\n fileContent,\n model,\n openAiApiKey,\n temperature,\n customPrompt,\n locales,\n keyPath,\n tags,\n}: AuditDictionaryFieldOptions): Promise<\n AuditDictionaryFieldResultData | undefined\n> => {\n try {\n // Optionally, you could initialize and configure the OpenAI client here, if you intend to make API calls.\n // Uncomment and configure the following lines if you have `openai` installed and want to call the API:\n\n const openai = new OpenAI({\n apiKey: openAiApiKey ?? process.env.OPENAI_API_KEY,\n });\n\n // Prepare the prompt for ChatGPT by replacing placeholders with actual values.\n const prompt =\n customPrompt ??\n CHAT_GPT_PROMPT.replace(\n '{{otherLocales}}',\n `{${locales.map(formatLocaleWithName).join(', ')}}`\n )\n .replace('{{keyPath}}', JSON.stringify(keyPath))\n .replace('{{fileContent}}', fileContent)\n .replace('{{tagsInstructions}}', formatTagInstructions(tags));\n\n // Example of how you might request a completion from ChatGPT:\n const chatCompletion = await openai.chat.completions.create({\n model: openAiApiKey\n ? (model ?? 'gpt-4o-2024-11-20')\n : 'gpt-4o-2024-11-20',\n temperature: openAiApiKey ? (temperature ?? 0.1) : 0.1,\n messages: [{ role: 'system', content: prompt }],\n });\n\n const newContent = chatCompletion.choices[0].message?.content;\n\n logger.info(\n `${chatCompletion.usage?.total_tokens} tokens used in the request`\n );\n\n return {\n fileContent: newContent ?? '',\n tokenUsed: chatCompletion.usage?.total_tokens ?? 0,\n };\n } catch (error) {\n console.error(error);\n }\n};\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,gBAA6B;AAC7B,kBAA8B;AAC9B,iBAA8B;AAC9B,kBAA4C;AAC5C,oBAAuB;AAEvB,oBAAuB;AANvB;AASA,MAAM,gBAAY,yBAAQ,0BAAc,YAAY,GAAG,CAAC;AA2BxD,MAAM,iBAAiB,CAAC,qBAAqC;AAC3D,QAAM,mBAAe,kBAAK,WAAW,gBAAgB;AACrD,QAAM,kBAAc,wBAAa,cAAc,OAAO;AACtD,SAAO;AACT;AAGA,MAAM,kBAAkB,eAAe,aAAa;AASpD,MAAM,uBAAuB,CAAC,WAA4B;AAExD,QAAM,iBAAa,2BAAc,MAAM;AAGvC,SAAO,GAAG,MAAM,KAAK,UAAU;AACjC;AASA,MAAM,wBAAwB,CAAC,OAAc,CAAC,MAC5C,KAAK,IAAI,CAAC,QAAQ,KAAK,IAAI,GAAG,KAAK,IAAI,YAAY,EAAE,EAAE,KAAK,MAAM;AAQ7D,MAAM,uBAAuB,OAAO;AAAA,EACzC;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF,MAEK;AACH,MAAI;AAIF,UAAM,SAAS,IAAI,qBAAO;AAAA,MACxB,QAAQ,gBAAgB,QAAQ,IAAI;AAAA,IACtC,CAAC;AAGD,UAAM,SACJ,gBACA,gBAAgB;AAAA,MACd;AAAA,MACA,IAAI,QAAQ,IAAI,oBAAoB,EAAE,KAAK,IAAI,CAAC;AAAA,IAClD,EACG,QAAQ,eAAe,KAAK,UAAU,OAAO,CAAC,EAC9C,QAAQ,mBAAmB,WAAW,EACtC,QAAQ,wBAAwB,sBAAsB,IAAI,CAAC;AAGhE,UAAM,iBAAiB,MAAM,OAAO,KAAK,YAAY,OAAO;AAAA,MAC1D,OAAO,eACF,SAAS,sBACV;AAAA,MACJ,aAAa,eAAgB,eAAe,MAAO;AAAA,MACnD,UAAU,CAAC,EAAE,MAAM,UAAU,SAAS,OAAO,CAAC;AAAA,IAChD,CAAC;AAED,UAAM,aAAa,eAAe,QAAQ,CAAC,EAAE,SAAS;AAEtD,yBAAO;AAAA,MACL,GAAG,eAAe,OAAO,YAAY;AAAA,IACvC;AAEA,WAAO;AAAA,MACL,aAAa,cAAc;AAAA,MAC3B,WAAW,eAAe,OAAO,gBAAgB;AAAA,IACnD;AAAA,EACF,SAAS,OAAO;AACd,YAAQ,MAAM,KAAK;AAAA,EACrB;AACF;","names":[]}
1
+ {"version":3,"sources":["../../../../../src/utils/AI/auditDictionaryField/index.ts"],"sourcesContent":["import type { Tag } from '@/types/tag.types';\nimport { getLocaleName, type KeyPath } from '@intlayer/core';\nimport { logger } from '@logger';\nimport { generateText } from 'ai';\nimport { readFileSync } from 'fs';\nimport type { Locales } from 'intlayer';\nimport { dirname, join } from 'path';\nimport { fileURLToPath } from 'url';\nimport { AIOptions, AIProvider, getAIConfig } from '../aiSdk';\n\nconst __dirname = dirname(fileURLToPath(import.meta.url));\n\n// Get the content of a file at the specified path\nconst getFileContent = (filePath: string) => {\n return readFileSync(join(__dirname, filePath), { encoding: 'utf-8' });\n};\n\nexport type AuditDictionaryFieldOptions = {\n fileContent: string;\n locales: Locales[];\n keyPath: KeyPath[];\n tags: Tag[];\n aiOptions?: AIOptions;\n};\n\nexport type AuditDictionaryFieldResultData = {\n fileContent: string;\n tokenUsed: number;\n};\n\n// The prompt template to send to the AI model\nconst CHAT_GPT_PROMPT = getFileContent('./PROMPT.md');\n\n/**\n * Format a locale with its name.\n *\n * @param locale - The locale to format.\n * @returns A string in the format \"locale: name\", e.g. \"en: English\".\n */\nconst formatLocaleWithName = (locale: Locales): string => {\n return `${locale}: ${getLocaleName(locale)}`;\n};\n\n/**\n * Formats tag instructions for the AI prompt.\n *\n * @param tags - Array of tags to format\n * @returns A formatted string with tag instructions\n */\nconst formatTagInstructions = (tags: Tag[]): string => {\n if (!tags || tags.length === 0) {\n return '';\n }\n\n return `Based on the dictionary content, identify specific tags from the list below that would be relevant:\n \n${tags.map(({ key, description }) => `- ${key}: ${description}`).join('\\n\\n')}`;\n};\n\n/**\n * Audits a content declaration file by constructing a prompt for AI models.\n * The prompt includes details about the project's locales, file paths of content declarations,\n * and requests for identifying issues or inconsistencies.\n */\nexport const auditDictionaryField = async ({\n fileContent,\n aiOptions,\n locales,\n keyPath,\n tags,\n}: AuditDictionaryFieldOptions): Promise<\n AuditDictionaryFieldResultData | undefined\n> => {\n try {\n // Prepare the prompt for AI by replacing placeholders with actual values.\n const prompt = CHAT_GPT_PROMPT.replace(\n '{{otherLocales}}',\n `{${locales.map(formatLocaleWithName).join(', ')}}`\n )\n .replace('{{keyPath}}', JSON.stringify(keyPath))\n .replace('{{fileContent}}', fileContent)\n .replace('{{applicationContext}}', aiOptions?.applicationContext ?? '')\n .replace('{{tagsInstructions}}', formatTagInstructions(tags));\n\n // Get the appropriate AI model configuration\n const aiConfig = await getAIConfig({\n provider: AIProvider.OPENAI,\n model: 'gpt-4o-mini',\n apiKey: process.env.OPENAI_API_KEY,\n ...aiOptions,\n });\n\n if (!aiConfig) {\n logger.error('Failed to configure AI model');\n return undefined;\n }\n\n // Use the AI SDK to generate the completion\n const { text: newContent, usage } = await generateText({\n model: aiConfig.model,\n temperature: aiConfig.temperature,\n messages: [{ role: 'system', content: prompt }],\n });\n\n logger.info(`${usage?.totalTokens ?? 0} tokens used in the request`);\n\n return {\n fileContent: newContent,\n tokenUsed: usage?.totalTokens ?? 0,\n };\n } catch (error) {\n console.error(error);\n }\n};\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AACA,kBAA4C;AAC5C,oBAAuB;AACvB,gBAA6B;AAC7B,gBAA6B;AAE7B,kBAA8B;AAC9B,iBAA8B;AAC9B,mBAAmD;AARnD;AAUA,MAAM,gBAAY,yBAAQ,0BAAc,YAAY,GAAG,CAAC;AAGxD,MAAM,iBAAiB,CAAC,aAAqB;AAC3C,aAAO,4BAAa,kBAAK,WAAW,QAAQ,GAAG,EAAE,UAAU,QAAQ,CAAC;AACtE;AAgBA,MAAM,kBAAkB,eAAe,aAAa;AAQpD,MAAM,uBAAuB,CAAC,WAA4B;AACxD,SAAO,GAAG,MAAM,SAAK,2BAAc,MAAM,CAAC;AAC5C;AAQA,MAAM,wBAAwB,CAAC,SAAwB;AACrD,MAAI,CAAC,QAAQ,KAAK,WAAW,GAAG;AAC9B,WAAO;AAAA,EACT;AAEA,SAAO;AAAA;AAAA,EAEP,KAAK,IAAI,CAAC,EAAE,KAAK,YAAY,MAAM,KAAK,GAAG,KAAK,WAAW,EAAE,EAAE,KAAK,MAAM,CAAC;AAC7E;AAOO,MAAM,uBAAuB,OAAO;AAAA,EACzC;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF,MAEK;AACH,MAAI;AAEF,UAAM,SAAS,gBAAgB;AAAA,MAC7B;AAAA,MACA,IAAI,QAAQ,IAAI,oBAAoB,EAAE,KAAK,IAAI,CAAC;AAAA,IAClD,EACG,QAAQ,eAAe,KAAK,UAAU,OAAO,CAAC,EAC9C,QAAQ,mBAAmB,WAAW,EACtC,QAAQ,0BAA0B,WAAW,sBAAsB,EAAE,EACrE,QAAQ,wBAAwB,sBAAsB,IAAI,CAAC;AAG9D,UAAM,WAAW,UAAM,0BAAY;AAAA,MACjC,UAAU,wBAAW;AAAA,MACrB,OAAO;AAAA,MACP,QAAQ,QAAQ,IAAI;AAAA,MACpB,GAAG;AAAA,IACL,CAAC;AAED,QAAI,CAAC,UAAU;AACb,2BAAO,MAAM,8BAA8B;AAC3C,aAAO;AAAA,IACT;AAGA,UAAM,EAAE,MAAM,YAAY,MAAM,IAAI,UAAM,wBAAa;AAAA,MACrD,OAAO,SAAS;AAAA,MAChB,aAAa,SAAS;AAAA,MACtB,UAAU,CAAC,EAAE,MAAM,UAAU,SAAS,OAAO,CAAC;AAAA,IAChD,CAAC;AAED,yBAAO,KAAK,GAAG,OAAO,eAAe,CAAC,6BAA6B;AAEnE,WAAO;AAAA,MACL,aAAa;AAAA,MACb,WAAW,OAAO,eAAe;AAAA,IACnC;AAAA,EACF,SAAS,OAAO;AACd,YAAQ,MAAM,KAAK;AAAA,EACrB;AACF;","names":[]}
@@ -65,6 +65,10 @@ export default metadataContent;
65
65
  }
66
66
  ```
67
67
 
68
+ **Application Context**
69
+
70
+ {{applicationContext}}
71
+
68
72
  **List of existing Tags:**
69
73
 
70
74
  Here the list of existing tags as a context to help you to pick related ones.
@@ -22,50 +22,50 @@ __export(auditDictionaryMetadata_exports, {
22
22
  });
23
23
  module.exports = __toCommonJS(auditDictionaryMetadata_exports);
24
24
  var import_logger = require('./../../../logger/index.cjs');
25
+ var import_ai = require("ai");
25
26
  var import_fs = require("fs");
26
- var import_openai = require("openai");
27
27
  var import_path = require("path");
28
28
  var import_url = require("url");
29
+ var import_aiSdk = require('../aiSdk.cjs');
29
30
  const import_meta = {};
30
31
  const __dirname = (0, import_path.dirname)((0, import_url.fileURLToPath)(import_meta.url));
31
- const getFileContent = (relativeFilePath) => {
32
- const absolutePath = (0, import_path.join)(__dirname, relativeFilePath);
33
- const fileContent = (0, import_fs.readFileSync)(absolutePath, "utf-8");
34
- return fileContent;
32
+ const getFileContent = (filePath) => {
33
+ return (0, import_fs.readFileSync)((0, import_path.join)(__dirname, filePath), { encoding: "utf-8" });
35
34
  };
36
35
  const CHAT_GPT_PROMPT = getFileContent("./PROMPT.md");
37
36
  const auditDictionaryMetadata = async ({
38
- model,
39
- openAiApiKey,
40
- temperature,
41
- customPrompt,
37
+ aiOptions,
42
38
  tags,
43
39
  fileContent
44
40
  }) => {
45
41
  try {
46
- const openai = new import_openai.OpenAI({
47
- apiKey: openAiApiKey ?? process.env.OPENAI_API_KEY
48
- });
49
- const prompt = customPrompt ?? CHAT_GPT_PROMPT.replace(
42
+ const prompt = CHAT_GPT_PROMPT.replace(
50
43
  "{{tags}}",
51
44
  `${JSON.stringify(
52
45
  tags.map(({ key, description }) => `- ${key}: ${description}`).join("\n\n"),
53
46
  null,
54
47
  2
55
48
  )}`
56
- ).replace("{{contentDeclaration}}", fileContent);
57
- const chatCompletion = await openai.chat.completions.create({
58
- model: openAiApiKey ? model ?? "gpt-4o-2024-11-20" : "gpt-4o-2024-11-20",
59
- temperature: openAiApiKey ? temperature ?? 0.1 : 0.1,
49
+ ).replace("{{contentDeclaration}}", fileContent).replace("{{applicationContext}}", aiOptions?.applicationContext ?? "");
50
+ const aiConfig = await (0, import_aiSdk.getAIConfig)({
51
+ provider: import_aiSdk.AIProvider.OPENAI,
52
+ model: "gpt-4o-mini",
53
+ apiKey: process.env.OPENAI_API_KEY,
54
+ ...aiOptions
55
+ });
56
+ if (!aiConfig) {
57
+ import_logger.logger.error("Failed to configure AI model");
58
+ return void 0;
59
+ }
60
+ const { text: newContent, usage } = await (0, import_ai.generateText)({
61
+ model: aiConfig.model,
62
+ temperature: aiConfig.temperature,
60
63
  messages: [{ role: "system", content: prompt }]
61
64
  });
62
- const newContent = chatCompletion.choices[0].message?.content;
63
- import_logger.logger.info(
64
- `${chatCompletion.usage?.total_tokens} tokens used in the request`
65
- );
65
+ import_logger.logger.info(`${usage?.totalTokens ?? 0} tokens used in the request`);
66
66
  return {
67
- fileContent: newContent ?? "",
68
- tokenUsed: chatCompletion.usage?.total_tokens ?? 0
67
+ fileContent: newContent,
68
+ tokenUsed: usage?.totalTokens ?? 0
69
69
  };
70
70
  } catch (error) {
71
71
  console.error(error);
@@ -1 +1 @@
1
- {"version":3,"sources":["../../../../../src/utils/AI/auditDictionaryMetadata/index.ts"],"sourcesContent":["import type { Tag } from '@/types/tag.types';\nimport { logger } from '@logger';\nimport { readFileSync } from 'fs';\nimport { OpenAI } from 'openai';\nimport { dirname, join } from 'path';\nimport { fileURLToPath } from 'url';\n\nconst __dirname = dirname(fileURLToPath(import.meta.url));\n\nexport type AIOptions = {\n model?: string;\n temperature?: number;\n openAiApiKey?: string;\n};\n\nexport type AuditOptions = {\n tags: Tag[];\n fileContent: string;\n customPrompt?: string;\n} & AIOptions;\nexport type AuditFileResultData = { fileContent: string; tokenUsed: number };\n\n/**\n * Reads the content of a file synchronously.\n *\n * @function\n * @param relativeFilePath - The relative or absolute path to the target file.\n * @returns The entire contents of the specified file as a UTF-8 encoded string.\n */\nconst getFileContent = (relativeFilePath: string): string => {\n const absolutePath = join(__dirname, relativeFilePath);\n const fileContent = readFileSync(absolutePath, 'utf-8');\n return fileContent;\n};\n\n// The prompt template to send to ChatGPT, requesting an audit of content declaration files.\nconst CHAT_GPT_PROMPT = getFileContent('./PROMPT.md');\n\n/**\n * Audits a content declaration file by constructing a prompt for ChatGPT.\n * The prompt includes details about the project's locales, file paths of content declarations,\n * and requests for identifying issues or inconsistencies. It prints the prompt for each file,\n * and could be adapted to send requests to the ChatGPT model.\n *\n */\nexport const auditDictionaryMetadata = async ({\n model,\n openAiApiKey,\n temperature,\n customPrompt,\n tags,\n fileContent,\n}: AuditOptions): Promise<AuditFileResultData | undefined> => {\n try {\n // Optionally, you could initialize and configure the OpenAI client here, if you intend to make API calls.\n // Uncomment and configure the following lines if you have `openai` installed and want to call the API:\n\n const openai = new OpenAI({\n apiKey: openAiApiKey ?? process.env.OPENAI_API_KEY,\n });\n\n // Prepare the prompt for ChatGPT by replacing placeholders with actual values.\n const prompt =\n customPrompt ??\n CHAT_GPT_PROMPT.replace(\n '{{tags}}',\n `${JSON.stringify(\n tags\n .map(({ key, description }) => `- ${key}: ${description}`)\n .join('\\n\\n'),\n null,\n 2\n )}`\n ).replace('{{contentDeclaration}}', fileContent);\n\n // Example of how you might request a completion from ChatGPT:\n const chatCompletion = await openai.chat.completions.create({\n model: openAiApiKey\n ? (model ?? 'gpt-4o-2024-11-20')\n : 'gpt-4o-2024-11-20',\n temperature: openAiApiKey ? (temperature ?? 0.1) : 0.1,\n messages: [{ role: 'system', content: prompt }],\n });\n\n const newContent = chatCompletion.choices[0].message?.content;\n\n logger.info(\n `${chatCompletion.usage?.total_tokens} tokens used in the request`\n );\n\n return {\n fileContent: newContent ?? '',\n tokenUsed: chatCompletion.usage?.total_tokens ?? 0,\n };\n } catch (error) {\n console.error(error);\n }\n};\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AACA,oBAAuB;AACvB,gBAA6B;AAC7B,oBAAuB;AACvB,kBAA8B;AAC9B,iBAA8B;AAL9B;AAOA,MAAM,gBAAY,yBAAQ,0BAAc,YAAY,GAAG,CAAC;AAsBxD,MAAM,iBAAiB,CAAC,qBAAqC;AAC3D,QAAM,mBAAe,kBAAK,WAAW,gBAAgB;AACrD,QAAM,kBAAc,wBAAa,cAAc,OAAO;AACtD,SAAO;AACT;AAGA,MAAM,kBAAkB,eAAe,aAAa;AAS7C,MAAM,0BAA0B,OAAO;AAAA,EAC5C;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF,MAA8D;AAC5D,MAAI;AAIF,UAAM,SAAS,IAAI,qBAAO;AAAA,MACxB,QAAQ,gBAAgB,QAAQ,IAAI;AAAA,IACtC,CAAC;AAGD,UAAM,SACJ,gBACA,gBAAgB;AAAA,MACd;AAAA,MACA,GAAG,KAAK;AAAA,QACN,KACG,IAAI,CAAC,EAAE,KAAK,YAAY,MAAM,KAAK,GAAG,KAAK,WAAW,EAAE,EACxD,KAAK,MAAM;AAAA,QACd;AAAA,QACA;AAAA,MACF,CAAC;AAAA,IACH,EAAE,QAAQ,0BAA0B,WAAW;AAGjD,UAAM,iBAAiB,MAAM,OAAO,KAAK,YAAY,OAAO;AAAA,MAC1D,OAAO,eACF,SAAS,sBACV;AAAA,MACJ,aAAa,eAAgB,eAAe,MAAO;AAAA,MACnD,UAAU,CAAC,EAAE,MAAM,UAAU,SAAS,OAAO,CAAC;AAAA,IAChD,CAAC;AAED,UAAM,aAAa,eAAe,QAAQ,CAAC,EAAE,SAAS;AAEtD,yBAAO;AAAA,MACL,GAAG,eAAe,OAAO,YAAY;AAAA,IACvC;AAEA,WAAO;AAAA,MACL,aAAa,cAAc;AAAA,MAC3B,WAAW,eAAe,OAAO,gBAAgB;AAAA,IACnD;AAAA,EACF,SAAS,OAAO;AACd,YAAQ,MAAM,KAAK;AAAA,EACrB;AACF;","names":[]}
1
+ {"version":3,"sources":["../../../../../src/utils/AI/auditDictionaryMetadata/index.ts"],"sourcesContent":["import type { Tag } from '@/types/tag.types';\nimport { logger } from '@logger';\nimport { generateText } from 'ai';\nimport { readFileSync } from 'fs';\nimport { dirname, join } from 'path';\nimport { fileURLToPath } from 'url';\nimport { AIOptions, AIProvider, getAIConfig } from '../aiSdk';\n\nconst __dirname = dirname(fileURLToPath(import.meta.url));\n\n// Get the content of a file at the specified path\nconst getFileContent = (filePath: string) => {\n return readFileSync(join(__dirname, filePath), { encoding: 'utf-8' });\n};\n\nexport type AuditOptions = {\n fileContent: string;\n tags: Tag[];\n aiOptions?: AIOptions;\n};\n\nexport type AuditFileResultData = {\n fileContent: string;\n tokenUsed: number;\n};\n\n// The prompt template to send to AI models\nconst CHAT_GPT_PROMPT = getFileContent('./PROMPT.md');\n\n/**\n * Audits a content declaration file by constructing a prompt for AI models.\n * The prompt includes details about the project's locales, file paths of content declarations,\n * and requests for identifying issues or inconsistencies.\n */\nexport const auditDictionaryMetadata = async ({\n aiOptions,\n tags,\n fileContent,\n}: AuditOptions): Promise<AuditFileResultData | undefined> => {\n try {\n // Prepare the prompt for AI by replacing placeholders with actual values.\n const prompt = CHAT_GPT_PROMPT.replace(\n '{{tags}}',\n `${JSON.stringify(\n tags\n .map(({ key, description }) => `- ${key}: ${description}`)\n .join('\\n\\n'),\n null,\n 2\n )}`\n )\n .replace('{{contentDeclaration}}', fileContent)\n .replace('{{applicationContext}}', aiOptions?.applicationContext ?? '');\n\n // Get the appropriate AI model configuration\n const aiConfig = await getAIConfig({\n provider: AIProvider.OPENAI,\n model: 'gpt-4o-mini',\n apiKey: process.env.OPENAI_API_KEY,\n ...aiOptions,\n });\n\n if (!aiConfig) {\n logger.error('Failed to configure AI model');\n return undefined;\n }\n\n // Use the AI SDK to generate the completion\n const { text: newContent, usage } = await generateText({\n model: aiConfig.model,\n temperature: aiConfig.temperature,\n messages: [{ role: 'system', content: prompt }],\n });\n\n logger.info(`${usage?.totalTokens ?? 0} tokens used in the request`);\n\n return {\n fileContent: newContent,\n tokenUsed: usage?.totalTokens ?? 0,\n };\n } catch (error) {\n console.error(error);\n }\n};\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AACA,oBAAuB;AACvB,gBAA6B;AAC7B,gBAA6B;AAC7B,kBAA8B;AAC9B,iBAA8B;AAC9B,mBAAmD;AANnD;AAQA,MAAM,gBAAY,yBAAQ,0BAAc,YAAY,GAAG,CAAC;AAGxD,MAAM,iBAAiB,CAAC,aAAqB;AAC3C,aAAO,4BAAa,kBAAK,WAAW,QAAQ,GAAG,EAAE,UAAU,QAAQ,CAAC;AACtE;AAcA,MAAM,kBAAkB,eAAe,aAAa;AAO7C,MAAM,0BAA0B,OAAO;AAAA,EAC5C;AAAA,EACA;AAAA,EACA;AACF,MAA8D;AAC5D,MAAI;AAEF,UAAM,SAAS,gBAAgB;AAAA,MAC7B;AAAA,MACA,GAAG,KAAK;AAAA,QACN,KACG,IAAI,CAAC,EAAE,KAAK,YAAY,MAAM,KAAK,GAAG,KAAK,WAAW,EAAE,EACxD,KAAK,MAAM;AAAA,QACd;AAAA,QACA;AAAA,MACF,CAAC;AAAA,IACH,EACG,QAAQ,0BAA0B,WAAW,EAC7C,QAAQ,0BAA0B,WAAW,sBAAsB,EAAE;AAGxE,UAAM,WAAW,UAAM,0BAAY;AAAA,MACjC,UAAU,wBAAW;AAAA,MACrB,OAAO;AAAA,MACP,QAAQ,QAAQ,IAAI;AAAA,MACpB,GAAG;AAAA,IACL,CAAC;AAED,QAAI,CAAC,UAAU;AACb,2BAAO,MAAM,8BAA8B;AAC3C,aAAO;AAAA,IACT;AAGA,UAAM,EAAE,MAAM,YAAY,MAAM,IAAI,UAAM,wBAAa;AAAA,MACrD,OAAO,SAAS;AAAA,MAChB,aAAa,SAAS;AAAA,MACtB,UAAU,CAAC,EAAE,MAAM,UAAU,SAAS,OAAO,CAAC;AAAA,IAChD,CAAC;AAED,yBAAO,KAAK,GAAG,OAAO,eAAe,CAAC,6BAA6B;AAEnE,WAAO;AAAA,MACL,aAAa;AAAA,MACb,WAAW,OAAO,eAAe;AAAA,IACnC;AAAA,EACF,SAAS,OAAO;AACd,YAAQ,MAAM,KAAK;AAAA,EACrB;AACF;","names":[]}
@@ -21,6 +21,10 @@ Your role is to review a tag. A tag is attached to a content declaration and is
21
21
  - **Do Not Alter Structure:** If the file structure is correct, do not modify it. Only add, update, or remove content declarations as necessary.
22
22
  - **Return Only Final File Content:** Provide the updated file content without any additional comments or explanations.
23
23
 
24
+ **Application Context**
25
+
26
+ {{applicationContext}}
27
+
24
28
  **Tags to Audit:**
25
29
 
26
30
  {{tag}}
@@ -21,47 +21,47 @@ __export(auditTag_exports, {
21
21
  auditTag: () => auditTag
22
22
  });
23
23
  module.exports = __toCommonJS(auditTag_exports);
24
+ var import_logger = require('./../../../logger/index.cjs');
25
+ var import_ai = require("ai");
24
26
  var import_fs = require("fs");
25
27
  var import_path = require("path");
26
28
  var import_url = require("url");
27
- var import_logger = require('./../../logger/index.cjs');
28
- var import_openai = require("openai");
29
+ var import_aiSdk = require('../aiSdk.cjs');
29
30
  const import_meta = {};
30
31
  const __dirname = (0, import_path.dirname)((0, import_url.fileURLToPath)(import_meta.url));
31
- const getFileContent = (relativeFilePath) => {
32
- const absolutePath = (0, import_path.join)(__dirname, relativeFilePath);
33
- const fileContent = (0, import_fs.readFileSync)(absolutePath, "utf-8");
34
- return fileContent;
32
+ const getFileContent = (filePath) => {
33
+ return (0, import_fs.readFileSync)((0, import_path.join)(__dirname, filePath), { encoding: "utf-8" });
35
34
  };
36
35
  const CHAT_GPT_PROMPT = getFileContent("./PROMPT.md");
37
36
  const auditTag = async ({
38
- model,
39
- openAiApiKey,
40
- customPrompt,
41
- temperature,
42
- tag,
43
- dictionaries
37
+ aiOptions,
38
+ dictionaries,
39
+ tag
44
40
  }) => {
45
41
  try {
46
- const openai = new import_openai.OpenAI({
47
- apiKey: openAiApiKey ?? process.env.OPENAI_API_KEY
42
+ const prompt = CHAT_GPT_PROMPT.replace(
43
+ "{{tag.description}}",
44
+ tag.description ?? ""
45
+ ).replace("{{tag.key}}", tag.key).replace("{{dictionaries}}", JSON.stringify(dictionaries, null, 2)).replace("{{applicationContext}}", aiOptions?.applicationContext ?? "");
46
+ const aiConfig = await (0, import_aiSdk.getAIConfig)({
47
+ provider: import_aiSdk.AIProvider.OPENAI,
48
+ model: "gpt-4o-mini",
49
+ apiKey: process.env.OPENAI_API_KEY,
50
+ ...aiOptions
48
51
  });
49
- const prompt = customPrompt ?? CHAT_GPT_PROMPT.replace("{{tag}}", `${JSON.stringify(tag)}`).replace(
50
- "{{contentDeclarations}}",
51
- dictionaries.map((dictionary) => `- ${JSON.stringify(dictionary)}`).join("\n\n")
52
- );
53
- const chatCompletion = await openai.chat.completions.create({
54
- model: openAiApiKey ? model ?? "gpt-4o-2024-11-20" : "gpt-4o-2024-11-20",
55
- temperature: openAiApiKey ? temperature ?? 0.1 : 0.1,
52
+ if (!aiConfig) {
53
+ import_logger.logger.error("Failed to configure AI model");
54
+ return void 0;
55
+ }
56
+ const { text: newContent, usage } = await (0, import_ai.generateText)({
57
+ model: aiConfig.model,
58
+ temperature: aiConfig.temperature,
56
59
  messages: [{ role: "system", content: prompt }]
57
60
  });
58
- const newContent = chatCompletion.choices[0].message?.content;
59
- import_logger.logger.info(
60
- `${chatCompletion.usage?.total_tokens} tokens used in the request`
61
- );
61
+ import_logger.logger.info(`${usage?.totalTokens ?? 0} tokens used in the request`);
62
62
  return {
63
- fileContent: newContent ?? "",
64
- tokenUsed: chatCompletion.usage?.total_tokens ?? 0
63
+ fileContent: newContent,
64
+ tokenUsed: usage?.totalTokens ?? 0
65
65
  };
66
66
  } catch (error) {
67
67
  console.error(error);
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../../../../../src/utils/AI/auditTag/index.ts"],"sourcesContent":["import type { Dictionary } from '@/types/dictionary.types';\nimport type { Tag } from '@/types/tag.types';\nimport { logger } from '@logger';\nimport { generateText } from 'ai';\nimport { readFileSync } from 'fs';\nimport { dirname, join } from 'path';\nimport { fileURLToPath } from 'url';\nimport { AIOptions, AIProvider, getAIConfig } from '../aiSdk';\n\nconst __dirname = dirname(fileURLToPath(import.meta.url));\n\n// Get the content of a file at the specified path\nconst getFileContent = (filePath: string) => {\n return readFileSync(join(__dirname, filePath), { encoding: 'utf-8' });\n};\n\nexport type AuditOptions = {\n dictionaries: Dictionary[];\n tag: Tag;\n aiOptions?: AIOptions;\n};\n\nexport type TranslateJSONResultData = {\n fileContent: string;\n tokenUsed: number;\n};\n\n// The prompt template to send to AI models\nconst CHAT_GPT_PROMPT = getFileContent('./PROMPT.md');\n\n/**\n * Audits a tag by constructing a prompt for AI models.\n * The prompt includes details about the tag and related dictionaries.\n */\nexport const auditTag = async ({\n aiOptions,\n dictionaries,\n tag,\n}: AuditOptions): Promise<TranslateJSONResultData | undefined> => {\n try {\n // Prepare the prompt for AI by replacing placeholders with actual values.\n const prompt = CHAT_GPT_PROMPT.replace(\n '{{tag.description}}',\n tag.description ?? ''\n )\n .replace('{{tag.key}}', tag.key)\n .replace('{{dictionaries}}', JSON.stringify(dictionaries, null, 2))\n .replace('{{applicationContext}}', aiOptions?.applicationContext ?? '');\n\n // Get the appropriate AI model configuration\n const aiConfig = await getAIConfig({\n provider: AIProvider.OPENAI,\n model: 'gpt-4o-mini',\n apiKey: process.env.OPENAI_API_KEY,\n ...aiOptions,\n });\n\n if (!aiConfig) {\n logger.error('Failed to configure AI model');\n return undefined;\n }\n\n // Use the AI SDK to generate the completion\n const { text: newContent, usage } = await generateText({\n model: aiConfig.model,\n temperature: aiConfig.temperature,\n messages: [{ role: 'system', content: prompt }],\n });\n\n logger.info(`${usage?.totalTokens ?? 0} tokens used in the request`);\n\n return {\n fileContent: newContent,\n tokenUsed: usage?.totalTokens ?? 0,\n };\n } catch (error) {\n console.error(error);\n }\n};\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAEA,oBAAuB;AACvB,gBAA6B;AAC7B,gBAA6B;AAC7B,kBAA8B;AAC9B,iBAA8B;AAC9B,mBAAmD;AAPnD;AASA,MAAM,gBAAY,yBAAQ,0BAAc,YAAY,GAAG,CAAC;AAGxD,MAAM,iBAAiB,CAAC,aAAqB;AAC3C,aAAO,4BAAa,kBAAK,WAAW,QAAQ,GAAG,EAAE,UAAU,QAAQ,CAAC;AACtE;AAcA,MAAM,kBAAkB,eAAe,aAAa;AAM7C,MAAM,WAAW,OAAO;AAAA,EAC7B;AAAA,EACA;AAAA,EACA;AACF,MAAkE;AAChE,MAAI;AAEF,UAAM,SAAS,gBAAgB;AAAA,MAC7B;AAAA,MACA,IAAI,eAAe;AAAA,IACrB,EACG,QAAQ,eAAe,IAAI,GAAG,EAC9B,QAAQ,oBAAoB,KAAK,UAAU,cAAc,MAAM,CAAC,CAAC,EACjE,QAAQ,0BAA0B,WAAW,sBAAsB,EAAE;AAGxE,UAAM,WAAW,UAAM,0BAAY;AAAA,MACjC,UAAU,wBAAW;AAAA,MACrB,OAAO;AAAA,MACP,QAAQ,QAAQ,IAAI;AAAA,MACpB,GAAG;AAAA,IACL,CAAC;AAED,QAAI,CAAC,UAAU;AACb,2BAAO,MAAM,8BAA8B;AAC3C,aAAO;AAAA,IACT;AAGA,UAAM,EAAE,MAAM,YAAY,MAAM,IAAI,UAAM,wBAAa;AAAA,MACrD,OAAO,SAAS;AAAA,MAChB,aAAa,SAAS;AAAA,MACtB,UAAU,CAAC,EAAE,MAAM,UAAU,SAAS,OAAO,CAAC;AAAA,IAChD,CAAC;AAED,yBAAO,KAAK,GAAG,OAAO,eAAe,CAAC,6BAA6B;AAEnE,WAAO;AAAA,MACL,aAAa;AAAA,MACb,WAAW,OAAO,eAAe;AAAA,IACnC;AAAA,EACF,SAAS,OAAO;AACd,YAAQ,MAAM,KAAK;AAAA,EACrB;AACF;","names":[]}
@@ -11,3 +11,7 @@ Your completion should not exceed one sentence. Minimize the completion length i
11
11
  The user input will be provided in the next user message: { role: 'user', content: 'xxx' }
12
12
 
13
13
  You should return your autocompletion without any additional text or formatting.
14
+
15
+ **Application Context**
16
+
17
+ {{applicationContext}}
@@ -21,46 +21,49 @@ __export(autocomplete_exports, {
21
21
  autocomplete: () => autocomplete
22
22
  });
23
23
  module.exports = __toCommonJS(autocomplete_exports);
24
+ var import_logger = require('./../../../logger/index.cjs');
25
+ var import_ai = require("ai");
24
26
  var import_fs = require("fs");
25
27
  var import_path = require("path");
26
28
  var import_url = require("url");
27
- var import_logger = require('./../../../logger/index.cjs');
28
- var import_openai = require("openai");
29
+ var import_aiSdk = require('../aiSdk.cjs');
29
30
  const import_meta = {};
30
31
  const __dirname = (0, import_path.dirname)((0, import_url.fileURLToPath)(import_meta.url));
31
- const getFileContent = (relativeFilePath) => {
32
- const absolutePath = (0, import_path.join)(__dirname, relativeFilePath);
33
- const fileContent = (0, import_fs.readFileSync)(absolutePath, "utf-8");
34
- return fileContent;
32
+ const getFileContent = (filePath) => {
33
+ return (0, import_fs.readFileSync)((0, import_path.join)(__dirname, filePath), { encoding: "utf-8" });
35
34
  };
36
35
  const CHAT_GPT_PROMPT = getFileContent("./PROMPT.md");
37
36
  const autocomplete = async ({
38
37
  text,
39
- model,
40
- openAiApiKey,
41
- temperature,
42
- customPrompt
38
+ aiOptions
43
39
  }) => {
44
40
  try {
45
- const openai = new import_openai.OpenAI({
46
- apiKey: openAiApiKey ?? process.env.OPENAI_API_KEY
41
+ const prompt = CHAT_GPT_PROMPT.replace(
42
+ "{{applicationContext}}",
43
+ aiOptions?.applicationContext ?? ""
44
+ );
45
+ const aiConfig = await (0, import_aiSdk.getAIConfig)({
46
+ model: "gpt-4o-mini",
47
+ provider: import_aiSdk.AIProvider.OPENAI,
48
+ apiKey: process.env.OPENAI_API_KEY,
49
+ ...aiOptions
47
50
  });
48
- const prompt = customPrompt ?? CHAT_GPT_PROMPT;
49
- const chatCompletion = await openai.chat.completions.create({
50
- model: openAiApiKey ? model ?? "gpt-4o-mini" : "gpt-4o-mini",
51
- temperature: openAiApiKey ? temperature ?? 0.1 : 0.1,
51
+ if (!aiConfig) {
52
+ import_logger.logger.error("Failed to configure AI model");
53
+ return void 0;
54
+ }
55
+ const { text: newContent, usage } = await (0, import_ai.generateText)({
56
+ model: aiConfig.model,
57
+ temperature: aiConfig.temperature,
52
58
  messages: [
53
59
  { role: "system", content: prompt },
54
60
  { role: "user", content: text }
55
61
  ]
56
62
  });
57
- const newContent = chatCompletion.choices[0].message?.content;
58
- import_logger.logger.info(
59
- `${chatCompletion.usage?.total_tokens} tokens used in the request`
60
- );
63
+ import_logger.logger.info(`${usage?.totalTokens ?? 0} tokens used in the request`);
61
64
  return {
62
- autocompletion: newContent ?? "",
63
- tokenUsed: chatCompletion.usage?.total_tokens ?? 0
65
+ autocompletion: newContent,
66
+ tokenUsed: usage?.totalTokens ?? 0
64
67
  };
65
68
  } catch (error) {
66
69
  console.error(error);