@empiricalrun/test-gen 0.42.22 → 0.42.24
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +16 -0
- package/dist/agent/browsing/index.d.ts.map +1 -1
- package/dist/agent/browsing/index.js +2 -2
- package/dist/agent/codegen/create-test-block.d.ts.map +1 -1
- package/dist/agent/codegen/create-test-block.js +1 -2
- package/dist/agent/codegen/fix-ts-errors.d.ts.map +1 -1
- package/dist/agent/codegen/fix-ts-errors.js +3 -2
- package/dist/agent/codegen/lexical-scoped-vars.d.ts.map +1 -1
- package/dist/agent/codegen/lexical-scoped-vars.js +1 -2
- package/dist/agent/codegen/skills-retriever.d.ts.map +1 -1
- package/dist/agent/codegen/skills-retriever.js +3 -2
- package/dist/agent/codegen/update-flow.d.ts.map +1 -1
- package/dist/agent/codegen/update-flow.js +5 -5
- package/dist/agent/codegen/use-skill.d.ts.map +1 -1
- package/dist/agent/codegen/use-skill.js +3 -2
- package/dist/agent/enrich-prompt/index.d.ts +3 -5
- package/dist/agent/enrich-prompt/index.d.ts.map +1 -1
- package/dist/agent/enrich-prompt/index.js +2 -13
- package/dist/agent/infer-agent/index.d.ts.map +1 -1
- package/dist/agent/infer-agent/index.js +1 -2
- package/dist/agent/master/element-annotation.d.ts.map +1 -1
- package/dist/agent/master/element-annotation.js +1 -2
- package/dist/agent/planner/run.d.ts.map +1 -1
- package/dist/agent/planner/run.js +1 -2
- package/dist/browser-injected-scripts/annotate-elements.js +9 -0
- package/package.json +6 -7
- package/vitest.config.ts +2 -3
- package/dist/prompts/lib/index.d.ts +0 -8
- package/dist/prompts/lib/index.d.ts.map +0 -1
- package/dist/prompts/lib/index.js +0 -118
- package/dist/prompts/lib/vitest-plugin.d.ts +0 -8
- package/dist/prompts/lib/vitest-plugin.d.ts.map +0 -1
- package/dist/prompts/lib/vitest-plugin.js +0 -20
package/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,21 @@
|
|
|
1
1
|
# @empiricalrun/test-gen
|
|
2
2
|
|
|
3
|
+
## 0.42.24
|
|
4
|
+
|
|
5
|
+
### Patch Changes
|
|
6
|
+
|
|
7
|
+
- 005632b: fix: restore z-index after annotations
|
|
8
|
+
- 4f8e042: chore: use handlebars compiler from llm package
|
|
9
|
+
|
|
10
|
+
## 0.42.23
|
|
11
|
+
|
|
12
|
+
### Patch Changes
|
|
13
|
+
|
|
14
|
+
- b11f2cc: feat: move handlebars compiler to llm package
|
|
15
|
+
- 853384f: chore: Replace getPrompt with compilePrompt for colocated prompts
|
|
16
|
+
- Updated dependencies [b11f2cc]
|
|
17
|
+
- @empiricalrun/llm@0.9.33
|
|
18
|
+
|
|
3
19
|
## 0.42.22
|
|
4
20
|
|
|
5
21
|
### Patch Changes
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../../src/agent/browsing/index.ts"],"names":[],"mappings":"AAAA,OAAO,
|
|
1
|
+
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../../src/agent/browsing/index.ts"],"names":[],"mappings":"AAAA,OAAO,EAAiB,GAAG,EAAE,WAAW,EAAE,MAAM,mBAAmB,CAAC;AACpE,OAAO,KAAK,EAAE,oBAAoB,EAAE,MAAM,4BAA4B,CAAC;AACvE,OAAO,EAAE,IAAI,EAAE,MAAM,YAAY,CAAC;AAElC,OAAO,EAAE,iBAAiB,EAAE,MAAM,eAAe,CAAC;AAQlD,MAAM,MAAM,oBAAoB,GAAG,OAAO,CAAC,oBAAoB,CAAC,GAAG;IACjE,YAAY,CAAC,EAAE;QACb,iBAAiB,CAAC,EAAE,MAAM,EAAE,CAAC;KAC9B,CAAC;CACH,CAAC;AAEF,wBAAsB,6BAA6B,CAAC,EAClD,MAAM,EACN,IAAI,EACJ,OAAO,EACP,GAAG,EACH,OAAO,EACP,KAAK,GACN,EAAE;IACD,MAAM,EAAE,MAAM,CAAC;IACf,IAAI,EAAE,IAAI,CAAC;IACX,OAAO,EAAE,iBAAiB,CAAC;IAC3B,GAAG,EAAE,GAAG,CAAC;IACT,KAAK,CAAC,EAAE,WAAW,CAAC;IACpB,OAAO,EAAE,oBAAoB,CAAC;CAC/B,GAAG,OAAO,CAAC,MAAM,EAAE,GAAG,SAAS,CAAC,CAiEhC"}
|
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
3
|
exports.executeTaskUsingBrowsingAgent = void 0;
|
|
4
|
+
const llm_1 = require("@empiricalrun/llm");
|
|
4
5
|
const constants_1 = require("../../constants");
|
|
5
6
|
const promptTemplate_0 = "{{#section \"system\"}}\nYou are a browser automation agent who is given a task to generate code for navigation and assertion. This task is your\ngoal and you must achieve it.\n\nYou will be provided with already executed actions and basis that you need to pick the next step to achieve the task.\nRemember that the goal must be achieved.\n\nYou will be provided with the web page snapshot in the form of Document Object Model. Based on the goal and available\ntool calls you need to pick the appropriate tool call.\n\nInstructions:\n- Take actions one at a time. Do not try to take multiple actions\n- You can respond with multiple assertions in one shot\n- Do not repeat the same actions again otherwise your response will be marked INVALID\n- Avoid repeating errors which we got while executing the last action\n- Stick to the task provided to you and mark the task done once the task is complete\n- Do not execute any action which is not mentioned in the task\n- Do not repeat actions which are already executed more than twice otherwise your response will be marked INVALID\n- Always refer to \"Executed actions\" before deciding your next action for completion of the task.\n- End the task done if all actions required for task are executed\n{{/section}}\n\n{{#section \"user\"}}\nTask:\n{{task}}\n\nCurrent page snapshot:\n{{pageSnapshot}}\n{{/section}}";
|
|
6
|
-
const lib_1 = require("../../prompts/lib");
|
|
7
7
|
const reporter_1 = require("../../reporter");
|
|
8
8
|
const html_1 = require("../../utils/html");
|
|
9
9
|
const utils_1 = require("../utils");
|
|
@@ -29,7 +29,7 @@ async function executeTaskUsingBrowsingAgent({ action, page, actions, llm, optio
|
|
|
29
29
|
const pageSnapshot = (0, html_1.sanitizeHtml)(pageContent, options.htmlSanitize);
|
|
30
30
|
sanitizationSpan?.end({ output: { pageSnapshot } });
|
|
31
31
|
const promptSpan = browsingAgentSpan?.span({ name: "page-prompt" });
|
|
32
|
-
const messages =
|
|
32
|
+
const messages = (0, llm_1.compilePrompt)(promptTemplate_0, { pageSnapshot, task: action });
|
|
33
33
|
promptSpan?.end({ output: { messages } });
|
|
34
34
|
let completion;
|
|
35
35
|
completion = await (0, o1_completion_1.getO1Completion)({
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"create-test-block.d.ts","sourceRoot":"","sources":["../../../src/agent/codegen/create-test-block.ts"],"names":[],"mappings":"AAAA,OAAO,
|
|
1
|
+
{"version":3,"file":"create-test-block.d.ts","sourceRoot":"","sources":["../../../src/agent/codegen/create-test-block.ts"],"names":[],"mappings":"AAAA,OAAO,EAIL,WAAW,EACZ,MAAM,mBAAmB,CAAC;AAC3B,OAAO,KAAK,EACV,QAAQ,EACR,oBAAoB,EACrB,MAAM,4BAA4B,CAAC;AAcpC,wBAAsB,wBAAwB,CAAC,EAC7C,QAAQ,EACR,IAAI,EACJ,OAAO,EACP,KAAK,GACN,EAAE;IACD,QAAQ,EAAE,QAAQ,CAAC;IACnB,IAAI,EAAE,MAAM,CAAC;IACb,OAAO,CAAC,EAAE,oBAAoB,CAAC;IAC/B,KAAK,CAAC,EAAE,WAAW,CAAC;CACrB,+BAqDA"}
|
|
@@ -7,7 +7,6 @@ const context_1 = require("../../bin/utils/context");
|
|
|
7
7
|
const web_1 = require("../../bin/utils/platform/web");
|
|
8
8
|
const constants_1 = require("../../constants");
|
|
9
9
|
const promptTemplate_0 = "{{#section \"system\"}}\nYou are a software test engineer who is given a task to write an empty test block.\nBased on the inputs you need to create an empty playwright test block with correctly imported fixture.\n\nThe test will contain a test name which you will need to use to build the empty test case block.\n\nYou will be provided with current tests, fixtures and page object models for you to use and create test case block as\nper the task provided to you.\n\nBefore responding you need to ensure that the code change is minimal and the change is reusable across tests. You need\nto ensure the code follows DRY principle.\n\nHere is the list of current tests and fixtures:\n\n{{testFiles}}\n\nHere is the list of current page object models:\n\n{{pageFiles}}\n{{/section}}\n\n{{#section \"user\"}}\nFollowing is the test scenario for which you need to write the empty test case block:\ntest name:\n{{scenarioName}}\n\ntask:\ncreate an empty test case block for the following test steps:\n{{scenario}}\n\ntest file path: {{scenarioFile}}\n\n------\n\nYou also need to ensure that the empty test case block has a starting page to begin test.\n\nIn order to identify the right page with which the test should start, follow the steps:\n- based on the similarities with other test cases mentioned in the file, identify the right page fixture to be imported\n- Read the page fixture methods step by step. Identify whether the fixture handles navigating to a page.\n- Identify whether other tests using the page fixture had to add separate steps for navigation or not\n- Based on the above analysis there will be following cases and choose either for the given test scenario:\n-- Case 1: if the test case scenario provided inside the task mentions about page navigation, then use that page\nnavigation. skip other cases if this case is satisfied.\n-- Case 2: refer other test cases which import similar fixtures and infer the first page navigation of this test case.\nYou should prefer tests which are in the same file. Tests within same file have higher overlaps in first page\nnavigation.\n- Once the page fixture is decided, look for userContext fixture in files. If its available then add \"userContext\" to\nthe test case block\n\n\n\nFollow these instructions before responding with output:\n- Read the code line by line and achieve the task provided to you\n- Read the dependencies of the code block by scanning through file paths and file provided to you. refer the same file\npath while responding with update\n- Focus only on the test case provided and associated JS methods called from the test case.\n- Respond only with the new empty test case block to be created and nothing else.\n- DO NOT respond with any backticks or markdown syntax\n- If \"userContext\" fixture is available in fixtures file, ensure importing that fixture in the test case block.\n- Provide a reason based on the test steps provided to you on why you chose the fixture or page.goto statement. The\nreason should be one of the list steps provided to you and mention why the case was chosen\n{{/section}}";
|
|
10
|
-
const lib_1 = require("../../prompts/lib");
|
|
11
10
|
const session_1 = require("../../session");
|
|
12
11
|
async function createEmptyTestCaseBlock({ testCase, file, options, trace, }) {
|
|
13
12
|
const logger = new logger_1.CustomLogger({ useReporter: false });
|
|
@@ -29,7 +28,7 @@ async function createEmptyTestCaseBlock({ testCase, file, options, trace, }) {
|
|
|
29
28
|
const promptSpan = trace?.span({
|
|
30
29
|
name: "build-create-empty-test-case-prompt",
|
|
31
30
|
});
|
|
32
|
-
const prompt =
|
|
31
|
+
const prompt = (0, llm_1.compilePrompt)(promptTemplate_0, {
|
|
33
32
|
testFiles: context.codePrompt,
|
|
34
33
|
pageFiles: context.pomPrompt,
|
|
35
34
|
scenarioName: testCase.name,
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"fix-ts-errors.d.ts","sourceRoot":"","sources":["../../../src/agent/codegen/fix-ts-errors.ts"],"names":[],"mappings":"AAAA,OAAO,
|
|
1
|
+
{"version":3,"file":"fix-ts-errors.d.ts","sourceRoot":"","sources":["../../../src/agent/codegen/fix-ts-errors.ts"],"names":[],"mappings":"AAAA,OAAO,EAAsB,WAAW,EAAE,MAAM,mBAAmB,CAAC;AACpE,OAAO,KAAK,EACV,QAAQ,EACR,oBAAoB,EACrB,MAAM,4BAA4B,CAAC;AAGpC,OAAO,EAAE,YAAY,EAAE,MAAM,kBAAkB,CAAC;AAUhD,wBAAsB,8BAA8B,CAAC,EACnD,KAAK,EACL,MAA2B,EAC3B,IAAI,EACJ,OAAO,EACP,eAAe,EACf,QAAQ,EACR,OAAO,GACR,EAAE;IACD,KAAK,CAAC,EAAE,WAAW,CAAC;IACpB,MAAM,CAAC,EAAE,YAAY,CAAC;IACtB,IAAI,EAAE,MAAM,CAAC;IACb,OAAO,EAAE,MAAM,CAAC;IAChB,eAAe,EAAE,MAAM,CAAC;IACxB,QAAQ,EAAE,QAAQ,CAAC;IACnB,OAAO,CAAC,EAAE,oBAAoB,CAAC;CAChC,iBAwEA"}
|
|
@@ -9,6 +9,7 @@ const fs_extra_1 = __importDefault(require("fs-extra"));
|
|
|
9
9
|
const logger_1 = require("../../bin/logger");
|
|
10
10
|
const web_1 = require("../../bin/utils/platform/web");
|
|
11
11
|
const constants_1 = require("../../constants");
|
|
12
|
+
const promptTemplate_0 = "{{#section \"system\"}}\nYou are a software engineer who is given a task to fix semantic and syntactical errors in a typescript file\n'{{scenarioFile}}' provided to you.\nYou will be provided with fixtures and page object models to use and fix errors.\n\nHere is the list of fixtures available:\n\n{{fixtureFiles}}\n\nHere is the list of current page object models available to you:\n\n{{pageFiles}}\n\nUse the above files to fix the errors.\n\nFollow these guidelines before responding with output\n- Ensure there are no type issues in the given {{scenarioFile}} file\n- For the given file respond with only the code\n- Do not respond with markdown syntax or backticks\n- Do not modify anything else apart from the code required to fix typescript error\n- Do not modify any other scenarios apart from the provided scenario name\n- Do not respond with any explanation. Respond only with the updated code.\n{{/section}}\n\n{{#section \"user\"}}\n'{{scenarioFile}}' with scenario name '{{scenarioName}}', has following typescript errors which you need to fix:\n{{errors}}\n\nHere is the content of the '{{scenarioFile}}':\n\n{{fileContent}}\n{{/section}}";
|
|
12
13
|
async function validateAndFixTypescriptErrors({ trace, logger = new logger_1.CustomLogger(), file, pomCode, nonSpecFileCode, testCase, options, }) {
|
|
13
14
|
const validateTypesSpan = trace?.span({ name: "detect-type-errors-in-file" });
|
|
14
15
|
logger.log("Validating types...");
|
|
@@ -34,14 +35,14 @@ async function validateAndFixTypescriptErrors({ trace, logger = new logger_1.Cus
|
|
|
34
35
|
logger.warn("Found few errors while validating types. Trying to fix errors...");
|
|
35
36
|
errors.forEach((e) => console.warn(e));
|
|
36
37
|
const promptSpan = trace?.span({ name: "fix-type-errors-prompt" });
|
|
37
|
-
const instruction =
|
|
38
|
+
const instruction = (0, llm_1.compilePrompt)(promptTemplate_0, {
|
|
38
39
|
pageFiles: pomCode || "",
|
|
39
40
|
fixtureFiles: nonSpecFileCode || "",
|
|
40
41
|
scenarioFile: file,
|
|
41
42
|
errors: errors,
|
|
42
43
|
fileContent: fileContent,
|
|
43
44
|
scenarioName: testCase.name,
|
|
44
|
-
}
|
|
45
|
+
});
|
|
45
46
|
promptSpan?.end({ output: { instruction } });
|
|
46
47
|
const llm = new llm_1.LLM({
|
|
47
48
|
trace,
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"lexical-scoped-vars.d.ts","sourceRoot":"","sources":["../../../src/agent/codegen/lexical-scoped-vars.ts"],"names":[],"mappings":"AAAA,OAAO,
|
|
1
|
+
{"version":3,"file":"lexical-scoped-vars.d.ts","sourceRoot":"","sources":["../../../src/agent/codegen/lexical-scoped-vars.ts"],"names":[],"mappings":"AAAA,OAAO,EAAsB,WAAW,EAAE,MAAM,mBAAmB,CAAC;AACpE,OAAO,KAAK,EAAE,oBAAoB,EAAE,MAAM,4BAA4B,CAAC;AASvE,wBAAsB,oBAAoB,CAAC,EACzC,KAAK,EACL,IAAI,EACJ,cAAc,EACd,OAAO,GACR,EAAE;IACD,KAAK,CAAC,EAAE,WAAW,CAAC;IACpB,IAAI,EAAE,MAAM,CAAC;IACb,cAAc,EAAE,MAAM,CAAC;IACvB,OAAO,CAAC,EAAE,oBAAoB,CAAC;CAChC,qBAoDA"}
|
|
@@ -4,12 +4,11 @@ exports.getLexicalScopedVars = void 0;
|
|
|
4
4
|
const llm_1 = require("@empiricalrun/llm");
|
|
5
5
|
const constants_1 = require("../../constants");
|
|
6
6
|
const promptTemplate_0 = "{{#section \"system\"}}\nYou are a software engineer tasked with analysing Typescript code to identify all variables available in the lexical\nscope at a specific reference point within a file. You will be given a file that contains multiple Playwright tests or\npage object models, along with a reference point inside the file. Your goal is to evaluate the list of all variables\navailable in the lexical scope at that reference point.\n\nTo accomplish this, you need to evaluate the Abstract Syntax Tree (AST) and accumulate all variables that are in the\nlexical scope, which includes:\n1. Variables declared within the test before the reference point.\n2. Arguments of the function.\n3. Variables defined in the parent scope. Identify all variables available in the lexical scope at a specific execution\nreference point within a file, considering only those variables that have been declared and assigned prior to the\nexecution of this point in the code.\n4. Global variables defined in the file.\n\nBefore responding:\n- Ignore variables imported from the `\"./pages\"` path.\n- keep in mind temporal dead zone phenomenon before responding with variables\n{{/section}}\n\n{{#section \"user\"}}\nFile:\n{{testFile}}\n\nReference point:\n{{referencePoint}}\n{{/section}}";
|
|
7
|
-
const lib_1 = require("../../prompts/lib");
|
|
8
7
|
async function getLexicalScopedVars({ trace, file, referencePoint, options, }) {
|
|
9
8
|
const fetchLexicalScopedVarsSpan = trace?.span({
|
|
10
9
|
name: "lexical-scoped-vars",
|
|
11
10
|
});
|
|
12
|
-
const messages =
|
|
11
|
+
const messages = (0, llm_1.compilePrompt)(promptTemplate_0, {
|
|
13
12
|
testFile: file || "",
|
|
14
13
|
referencePoint: referencePoint || "",
|
|
15
14
|
});
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"skills-retriever.d.ts","sourceRoot":"","sources":["../../../src/agent/codegen/skills-retriever.ts"],"names":[],"mappings":"AAAA,OAAO,
|
|
1
|
+
{"version":3,"file":"skills-retriever.d.ts","sourceRoot":"","sources":["../../../src/agent/codegen/skills-retriever.ts"],"names":[],"mappings":"AAAA,OAAO,EAAsB,WAAW,EAAE,MAAM,mBAAmB,CAAC;AACpE,OAAO,KAAK,EACV,QAAQ,EACR,oBAAoB,EACrB,MAAM,4BAA4B,CAAC;AAmBpC,eAAO,MAAM,cAAc;cAMf,QAAQ;;;;;;;;;;IAqFnB,CAAC;AAEF,wBAAsB,oBAAoB,CAAC,EACzC,QAAQ,EACR,OAAO,EACP,KAAK,GACN,EAAE;IACD,QAAQ,EAAE,QAAQ,CAAC;IACnB,OAAO,CAAC,EAAE,oBAAoB,CAAC;IAC/B,KAAK,CAAC,EAAE,WAAW,CAAC;CACrB;;;;;;KA6BA"}
|
|
@@ -10,6 +10,7 @@ const logger_1 = require("../../bin/logger");
|
|
|
10
10
|
const context_1 = require("../../bin/utils/context");
|
|
11
11
|
const fs_2 = require("../../bin/utils/fs");
|
|
12
12
|
const constants_1 = require("../../constants");
|
|
13
|
+
const promptTemplate_0 = "{{#section \"system\"}}\nYou are a software test engineer who is given a goal to pick re-usable page object model methods for a given UI\nautomation test.\n\nYou will be provided with a test scenario as task and also the page object model methods available in the automation\ntest repository. The page object models acts as skills to execute a particular sub task of a given task.\n\nYou need to break down the task into sub tasks and identify which sub_task can be solved with the help of page object\nmodel methods. Once identified, you need to return with an usage API example for same. You need to pick methods only\nfrom the provided page object models.\n\nHere is the list of current page object models:\n\n{{pageFiles}}\n\n{{/section}}\n\n{{#section \"user\"}}\nFollowing is the test scenario for which you need to figure out the skills:\n\n**Task:**\n{{scenario}}\n\nBefore responding follow the instructions:\n- You need to break down the task into subtask and respond with the code of sub tasks for which there are methods\nexposed from page object models\n- Only use methods that are explicitly defined and exported in the provided page object models. Do not create, infer, or\nassume any methods or code that are not exported from the page object model files.\n- Do not respond with any methods or code that are not available in the list of page object models.\n- Ensure there are no type issues in the code generated.\n- Do not respond with markdown syntax or backticks.\n- Respond only with the code\n- Read steps one by one and generate the test code\n- Do not write any extra code than instructed in the steps\n- You need to respond with `<subtask></subtask>`, `<reason></reason>`, `<file_import_path></file_import_path>`, `\n<usage_example></usage_example>` and `<method_name></method_name>`\n- You should respond with methods exported from the page object models\n- Do not respond with any import statements.\n- Do not respond with sub_task for which there is no explicit method found\n- Always pick the page object model method matching the task provided. If there is no matching method, then ignore it.\nDo not attempt to generate or use any non existent methods for such instances.\n- Respond with the usage_example so that it can be directly copy pasted inside the test\n- Do not set the parameters for the method. Keep it as is with the interface parameters\n{{/section}}";
|
|
13
14
|
const utils_1 = require("./utils");
|
|
14
15
|
const fetchPomSkills = async ({ testCase, pomFiles, options, trace, }) => {
|
|
15
16
|
const fetchSkillsUsingPOMFilesSpan = trace?.span({
|
|
@@ -22,11 +23,11 @@ const fetchPomSkills = async ({ testCase, pomFiles, options, trace, }) => {
|
|
|
22
23
|
const promptSpan = fetchSkillsUsingPOMFilesSpan?.span({
|
|
23
24
|
name: "fetch-pom-skills-prompt",
|
|
24
25
|
});
|
|
25
|
-
const prompt =
|
|
26
|
+
const prompt = (0, llm_1.compilePrompt)(promptTemplate_0, {
|
|
26
27
|
pageFiles: pomFiles,
|
|
27
28
|
scenarioName: testCase.name,
|
|
28
29
|
scenario: testCase.steps.join("\n"),
|
|
29
|
-
}
|
|
30
|
+
});
|
|
30
31
|
promptSpan?.end({ output: { prompt } });
|
|
31
32
|
const llm = new llm_1.LLM({
|
|
32
33
|
trace: fetchSkillsUsingPOMFilesSpan,
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"update-flow.d.ts","sourceRoot":"","sources":["../../../src/agent/codegen/update-flow.ts"],"names":[],"mappings":"AAAA,OAAO,EAKL,WAAW,EACZ,MAAM,mBAAmB,CAAC;AAC3B,OAAO,KAAK,EACV,QAAQ,EACR,oBAAoB,EACrB,MAAM,4BAA4B,CAAC;AAGpC,OAAO,EAAE,0BAA0B,EAAE,MAAM,4BAA4B,CAAC;
|
|
1
|
+
{"version":3,"file":"update-flow.d.ts","sourceRoot":"","sources":["../../../src/agent/codegen/update-flow.ts"],"names":[],"mappings":"AAAA,OAAO,EAKL,WAAW,EACZ,MAAM,mBAAmB,CAAC;AAC3B,OAAO,KAAK,EACV,QAAQ,EACR,oBAAoB,EACrB,MAAM,4BAA4B,CAAC;AAGpC,OAAO,EAAE,0BAA0B,EAAE,MAAM,4BAA4B,CAAC;AAexE,OAAO,EAAE,UAAU,EAAE,eAAe,EAAE,MAAM,SAAS,CAAC;AAQtD,wBAAsB,2BAA2B,CAAC,EAChD,QAAQ,EACR,eAAe,EACf,SAAS,EACT,SAAS,EACT,YAAY,EACZ,OAAO,EACP,KAAK,GACN,EAAE;IACD,QAAQ,EAAE,QAAQ,CAAC;IACnB,SAAS,EAAE,MAAM,CAAC;IAClB,SAAS,EAAE,MAAM,CAAC;IAClB,YAAY,EAAE,MAAM,CAAC;IACrB,eAAe,EAAE,MAAM,CAAC;IACxB,KAAK,CAAC,EAAE,WAAW,CAAC;IACpB,OAAO,CAAC,EAAE,oBAAoB,CAAC;CAChC,GAAG,OAAO,CAAC;IACV,MAAM,EAAE,0BAA0B,EAAE,CAAC;IACrC,aAAa,EAAE,MAAM,CAAC;IACtB,WAAW,EAAE,UAAU,EAAE,CAAC;CAC3B,CAAC,CAkDD;AAED,wBAAsB,UAAU,CAC9B,QAAQ,EAAE,QAAQ,EAClB,IAAI,EAAE,MAAM,EACZ,OAAO,EAAE,oBAAoB,GAAG,SAAS,EACzC,OAAO,GAAE,OAAc,EACvB,QAAQ,GAAE,OAAc,EACxB,KAAK,CAAC,EAAE,WAAW,GAClB,OAAO,CAAC,eAAe,EAAE,CAAC,CA+F5B;AAED,wBAAsB,kCAAkC,CAAC,EACvD,SAAS,EACT,SAAS,EACT,QAAQ,EACR,YAAY,EACZ,OAAO,EACP,KAAK,GACN,EAAE;IACD,KAAK,CAAC,EAAE,WAAW,CAAC;IACpB,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB,SAAS,EAAE,MAAM,CAAC;IAClB,QAAQ,EAAE,QAAQ,CAAC;IACnB,OAAO,CAAC,EAAE,oBAAoB,CAAC;IAC/B,YAAY,EAAE,MAAM,CAAC;CACtB,mBAmGA;AAED,wBAAsB,qBAAqB,CAAC,EAC1C,QAAQ,EACR,IAAI,EACJ,OAAO,EACP,KAAK,EACL,aAAoB,GACrB,EAAE;IACD,QAAQ,EAAE,QAAQ,CAAC;IACnB,IAAI,EAAE,MAAM,CAAC;IACb,OAAO,CAAC,EAAE,oBAAoB,CAAC;IAC/B,KAAK,CAAC,EAAE,WAAW,CAAC;IACpB,aAAa,CAAC,EAAE,OAAO,CAAC;CACzB,GAAG,OAAO,CAAC,eAAe,EAAE,CAAC,CAqD7B"}
|
|
@@ -12,6 +12,8 @@ const context_1 = require("../../bin/utils/context");
|
|
|
12
12
|
const fs_1 = require("../../bin/utils/fs");
|
|
13
13
|
const web_1 = require("../../bin/utils/platform/web");
|
|
14
14
|
const constants_1 = require("../../constants");
|
|
15
|
+
const promptTemplate_0 = "{{#section \"system\"}}\n\nYou are a software test engineer who is given a task to update a test case. You will be provided with steps of a test\ncase scenario and you are given a snippet with interface await createTest(task, playwright_page_instance).\n\nYou need to analyze the request and place createTest snippet at the correct position and pass on the steps to this\n`createTest` method with the correct page instance.\n\nYou will be provided with current tests, fixtures and page object models for you to use and update code as per the\ntask provided to you.\nYou need to respond with file path and updated code block inside the file.\n\nHere is the list of current tests and fixtures:\n\n{{testFiles}}\n\n\nHere is the list of current page object models:\n\n{{pageFiles}}\n{{/section}}\n\n\n{{#section \"user\"}}\nFollowing is the test scenario for which you need to update the test:\ntest name:\n{{scenarioName}}\n\n\nTask:\n{{scenarioSteps}}\n\n\nFollow these steps to complete the task:\n\n1. Determine the task's intent:\n- The default intent is **\"add steps\"**.\n- If the task explicitly mentions (using words like \"replace\", \"remove\", \"delete\") that existing test steps should\nbe replaced or deleted, the intent is **\"update steps\"**. Identify the specific steps to be replaced or removed.\n- Note:\n- Do not assume that providing new or different steps implies an intent to update or replace existing steps. Unless\nthe task explicitly instructs to replace or remove existing code, interpret the intent as adding new steps to the\nexisting test.\n\n2. Identify the test block that requires updating.\n\n3. Place the `createTest` snippet:\n- Insert the `createTest` snippet at the location determined by the task, and pass the Playwright page instance.\n- If the intent is \"add\", do not alter the existing test code; simply append the `createTest` snippet based on any\nprovided location hints.\n- If the task includes location hints that don't match steps within the test, check dependent methods called from\nthe test for the update.\n- If no location hint is provided, place the `createTest` snippet at the end of the test block.\n- Even if the task includes steps that overlap with or are similar to existing steps, do not modify the existing\ncode. Unless the task explicitly instructs to replace or remove existing code, interpret the intent as adding new\nsteps to the existing test.\n\n4. Strip location hints from the task:\n- Remove any location hints (e.g., \"replace the current assertion and\") before passing the task to the `createTest`\nmethod.\n\nSubmission Guidelines:\n\n- Focus only on the provided test case and any related page object model methods used in the test.\n- Do not modify or add code within the `createTest` snippet.\n- Do not update or modify any other code apart from adding `createTest` code snippet.\n- Since the response will be used for search-and-replace operations, always provide the immediate parent AST node\nfor any code updates.\n- Include the full test block if any part of it is updated, preserving all unchanged code.\n- Do not use markdown syntax or backticks.\n- Respond using the following XML format:\n<reason_for_intent></reason_for_intent>\n<intent></intent>\n<location_of_update></location_of_update>\n<file_path></file_path>\n<old_code_block></old_code_block>\n<new_code_block></new_code_block>\n<change></change>\n\n- Each `<old_code_block>` and `<new_code_block>` should contain only one test block or page object model method\n definition. Provide separate blocks for multiple updates.\n - The `<change></change>` tag should also mention the file path being updated.\n - There should be only one `createTest` block in the `new_code_block`. The `createTest` method should be passed\n with entire task. Do not split the task while forwarding it to `createTest`.\n - `<new_code_block>` code snippet should be syntactically correct.\n - The code_block should not contain any import statements.\n {{/section}}";
|
|
16
|
+
const promptTemplate_1 = "{{#section \"system\"}}\nYou are a software test engineer who is given an objective to update test basis the task provided.\nYou will be provided with a test name, test and test file path.\n\nYou will be provided with current tests, fixtures and page object models for you to use and update code as per the task\nprovided to you. You need to respond with file path and updated code block inside the file.\n\nBefore responding you need to ensure that the code change is minimal and the change is reusable across tests. You need\nto ensure the code follows DRY principle.\n\nHere is the list of current tests and fixtures:\n\n{{testFiles}}\n\n\nHere is the list of current page object models:\n\n{{pageFiles}}\n\n{{/section}}\n\n{{#section \"user\"}}\nFollowing are the test details and the task to complete your objective:\nTest name:\n{{scenarioName}}\n\nTest case:\n{{currentScenarioCodeBlock}}\n\nTask:\n{{scenarioSteps}}\n\nIn order to execute the task:\n- Think step by step and first identify current test block which needs update and the methods which the current test\nblock depend on.\n- The task will demand changes in the current test case or the methods it depend on. Based on the task, identify list of\nfile paths which need change, the reason for change and the code change they need.\n- Next make changes to code blocks in each file paths.\n\n------\n\nFollow these instructions before responding with output:\n- Read the code line by line and ensure that achieve the task provided to you\n- Read the dependencies of the code block by scanning through file paths and file provided to you. refer the same file\npath while responding with output.\n- Focus only on the test case provided and associated JS methods called from the test case.\n- Since the response will be used to search and replace blocks, always respond with output which includes the full\nlexical scope surrounding the modified code.\n- If there are any updates inside test code block, ensure responding with full test block with unchanged code as well\n- Each code block should contain edits to only one code block in file path\n- DO NOT respond with any backticks or markdown syntax\n- Respond only with file path where the code block to be updated is present, old code block, new code block and a one\nliner reason for the change\n- Respond with <file_path></file_path>, <old_code_block></old_code_block>, <new_code_block></new_code_block> and\n<change></change> as xml tags\n- The reason for change should adhere to coding principles provided and review if the updated code is present in the\nfile path mentioned\n- The code change should belong to the right file path\n- The response must start with <file_path>\n {{/section}}";
|
|
15
17
|
const session_1 = require("../../session");
|
|
16
18
|
const test_update_feedback_1 = require("./test-update-feedback");
|
|
17
19
|
const utils_1 = require("./utils");
|
|
@@ -31,8 +33,7 @@ async function getUpdateTestCodeCompletion({ testCase, testFileContent, testFile
|
|
|
31
33
|
content: testFileContent,
|
|
32
34
|
suites: testCase?.suites || [],
|
|
33
35
|
});
|
|
34
|
-
const
|
|
35
|
-
const prompt = await (0, llm_1.getPrompt)(promptName, {
|
|
36
|
+
const prompt = (0, llm_1.compilePrompt)(promptTemplate_1, {
|
|
36
37
|
testFiles: testFiles,
|
|
37
38
|
pageFiles: pageFiles,
|
|
38
39
|
scenarioName,
|
|
@@ -174,17 +175,16 @@ async function getAppendCreateTestBlockCompletion({ testFiles, pageFiles, testCa
|
|
|
174
175
|
testFilePath,
|
|
175
176
|
},
|
|
176
177
|
});
|
|
177
|
-
const promptName = "append-create-test-block";
|
|
178
178
|
const promptSpan = trace?.span({
|
|
179
179
|
name: "append-create-test-block-prompt",
|
|
180
180
|
});
|
|
181
|
-
const instruction =
|
|
181
|
+
const instruction = (0, llm_1.compilePrompt)(promptTemplate_0, {
|
|
182
182
|
testFiles: testFiles,
|
|
183
183
|
pageFiles: pageFiles,
|
|
184
184
|
scenarioName: testCase.name,
|
|
185
185
|
scenarioSteps: testCase.steps.join("\n"),
|
|
186
186
|
scenarioFile: testFilePath,
|
|
187
|
-
}
|
|
187
|
+
});
|
|
188
188
|
promptSpan?.end({ output: { instruction } });
|
|
189
189
|
const [userInstruction] = instruction.filter((s) => s.role === "user");
|
|
190
190
|
const [systemInstruction] = instruction.filter((s) => s.role === "system");
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"use-skill.d.ts","sourceRoot":"","sources":["../../../src/agent/codegen/use-skill.ts"],"names":[],"mappings":"AAAA,OAAO,
|
|
1
|
+
{"version":3,"file":"use-skill.d.ts","sourceRoot":"","sources":["../../../src/agent/codegen/use-skill.ts"],"names":[],"mappings":"AAAA,OAAO,EAAsB,WAAW,EAAE,MAAM,mBAAmB,CAAC;AAWpE,wBAAsB,sBAAsB,CAAC,EAC3C,IAAI,EACJ,iBAAiB,EACjB,oBAAoB,EACpB,gBAAgB,EAChB,qBAAqB,EACrB,KAAK,GACN,EAAE;IACD,IAAI,EAAE,MAAM,CAAC;IACb,iBAAiB,EAAE,MAAM,CAAC;IAC1B,oBAAoB,EAAE,MAAM,CAAC;IAC7B,gBAAgB,EAAE,MAAM,CAAC;IACzB,qBAAqB,EAAE,MAAM,CAAC;IAC9B,KAAK,CAAC,EAAE,WAAW,CAAC;CACrB,mBA6CA"}
|
|
@@ -4,6 +4,7 @@ exports.generateSkillUsageCode = void 0;
|
|
|
4
4
|
const llm_1 = require("@empiricalrun/llm");
|
|
5
5
|
const logger_1 = require("../../bin/logger");
|
|
6
6
|
const constants_1 = require("../../constants");
|
|
7
|
+
const promptTemplate_0 = "{{#section \"system\"}}\nYou are given a sample method usage example, task and object variables available in the scope.\nBased on the task and object variables you need to generate the correct method call to be used in the scope.\n\nYou will also be provided with method definition and interface. Based on the definition you need to ensure creating\nvariables if the method has return value.\n\nFor page object use variable \"{{pageVariableName}}\"\n\n{{/section}}\n\n{{#section \"user\"}}\nTask:\n{{task}}\n\nMethod usage example:\n{{sampleUsageMethod}}\n------\n\nMethod definition:\n\n{{skillMethodDefinition}}\n\n------\nVariables object:\n{{scopeVariablesMapStr}}\n\nVariables object contain variable name and respective value available in scope.\n\nBefore responding ensure following the instructions:\n- Based on the task, write code using the method to achieve the task. If there are any return value of any methods,\ncreate `const` variables and assign the return values. The variable names should be inspired by the task assigned to you\n- Follow DRY principles and all good practices for typescript\n- Extract object properties and rename them if the return values is an object\n- Do not respond with any backticks or markdown syntax\n\n{{/section}}";
|
|
7
8
|
async function generateSkillUsageCode({ task, sampleUsageMethod, scopeVariablesMapStr, pageVariableName, skillMethodDefinition, trace, }) {
|
|
8
9
|
const logger = new logger_1.CustomLogger();
|
|
9
10
|
logger.log(`Generating code using skill usage example: ${sampleUsageMethod}`);
|
|
@@ -19,13 +20,13 @@ async function generateSkillUsageCode({ task, sampleUsageMethod, scopeVariablesM
|
|
|
19
20
|
const promptSpan = skillUsageSpan?.span({
|
|
20
21
|
name: "apply-skills-prompt",
|
|
21
22
|
});
|
|
22
|
-
const prompt =
|
|
23
|
+
const prompt = (0, llm_1.compilePrompt)(promptTemplate_0, {
|
|
23
24
|
task,
|
|
24
25
|
sampleUsageMethod,
|
|
25
26
|
scopeVariablesMapStr,
|
|
26
27
|
pageVariableName,
|
|
27
28
|
skillMethodDefinition,
|
|
28
|
-
}
|
|
29
|
+
});
|
|
29
30
|
promptSpan?.end({ output: prompt });
|
|
30
31
|
const llm = new llm_1.LLM({
|
|
31
32
|
trace: skillUsageSpan,
|
|
@@ -1,14 +1,12 @@
|
|
|
1
1
|
import { TraceClient } from "@empiricalrun/llm";
|
|
2
|
-
|
|
2
|
+
export declare const enrichPromptWithFailingLine: ({ trace, testBlock, testFilePath, suggestionForFix, }: {
|
|
3
3
|
testBlock: string;
|
|
4
4
|
testFilePath: string;
|
|
5
5
|
suggestionForFix: string;
|
|
6
|
-
trace?: TraceClient;
|
|
7
|
-
}
|
|
8
|
-
export declare const enrichPromptWithFailingLine: ({ trace, testBlock, testFilePath, suggestionForFix, }: ArgsT) => Promise<{
|
|
6
|
+
trace?: TraceClient | undefined;
|
|
7
|
+
}) => Promise<{
|
|
9
8
|
output: string;
|
|
10
9
|
is_user_message_enriched: boolean;
|
|
11
10
|
reason_for_output: string;
|
|
12
11
|
}>;
|
|
13
|
-
export {};
|
|
14
12
|
//# sourceMappingURL=index.d.ts.map
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../../src/agent/enrich-prompt/index.ts"],"names":[],"mappings":"AAAA,OAAO,
|
|
1
|
+
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../../src/agent/enrich-prompt/index.ts"],"names":[],"mappings":"AAAA,OAAO,EAAsB,WAAW,EAAE,MAAM,mBAAmB,CAAC;AAmCpE,eAAO,MAAM,2BAA2B;eAM3B,MAAM;kBACH,MAAM;sBACF,MAAM;;;YAGhB,MAAM;8BACY,OAAO;uBACd,MAAM;EA0D1B,CAAC"}
|
|
@@ -3,8 +3,8 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
|
|
3
3
|
exports.enrichPromptWithFailingLine = void 0;
|
|
4
4
|
const llm_1 = require("@empiricalrun/llm");
|
|
5
5
|
const constants_1 = require("../../constants");
|
|
6
|
+
const promptTemplate_0 = "{{#section \"system\"}}\nYou are a software QA engineer who is asked to update an automated browser test written that failed during a test run.\n\nYou will be provided with the details of the failure and then a user prompt that details what needs to be changed.\n\n# Details of the failure\n### Failing line:\n{{testBlock}}\n\n### Failing test file path:\n{{testFilePath}}\n\nYour goal is to enrich the user message with \"Failing line\" and \"Failing test file path\", if the user's message contains\nrequests like correcting or replacing failing lines or statements.\n\nTo fulfil your goal, follow these steps:\n1. Identify the user intent and check if the user says anything similar to:\n- \"Replace the failing line\"\n- \"Replace the error statement\"\n- \"Replace the failing statement\"\n- \"Replace the failed user action\"\n2. The user message intent can also be to modify things in or around the \"failing line\". Lookout for this scenario as\nwell to enrich the user message\n3. If such keywords are present, only then enrich the user message by inserting \"Failing line\" and \"Failing test file\npath\" at appropriate locations.\n\nBefore submitting your response, ensure the following:\n- Do not generate code for the user prompt. Your task is just to enrich the user message\n- Enrich the user message ONLY if any of these keywords are present - \"failing line\", \"error statement\", \"failing\nstatement\", \"failed user action\"\n- Do not include any markdown syntax or backticks\n- Respond with <reason_for_output></reason_for_output>, <is_user_message_enriched></is_user_message_enriched> and\n<output></output> as xml tags\n- is_user_message_enriched can be either true or false\n- output should follow the format: \"Replace the line <code> with <whatever is required to be done> in <file_path>\"\n{{/section}}\n\n{{#section \"user\"}}\nUser message:\n{{userMessage}}\n{{/section}}";
|
|
6
7
|
const utils_1 = require("./utils");
|
|
7
|
-
const promptName = "generate-self-heal-requested-change";
|
|
8
8
|
const responseFormat = {
|
|
9
9
|
type: "json_schema",
|
|
10
10
|
json_schema: {
|
|
@@ -31,7 +31,6 @@ const responseFormat = {
|
|
|
31
31
|
},
|
|
32
32
|
},
|
|
33
33
|
};
|
|
34
|
-
// TODO: fix the format
|
|
35
34
|
const enrichPromptWithFailingLine = async ({ trace, testBlock, testFilePath, suggestionForFix, }) => {
|
|
36
35
|
let output = {
|
|
37
36
|
output: suggestionForFix,
|
|
@@ -47,20 +46,10 @@ const enrichPromptWithFailingLine = async ({ trace, testBlock, testFilePath, sug
|
|
|
47
46
|
suggestionForFix,
|
|
48
47
|
},
|
|
49
48
|
});
|
|
50
|
-
const instructions =
|
|
49
|
+
const instructions = (0, llm_1.compilePrompt)(promptTemplate_0, {
|
|
51
50
|
testBlock,
|
|
52
51
|
testFilePath,
|
|
53
52
|
userMessage: suggestionForFix,
|
|
54
|
-
}, 26);
|
|
55
|
-
enrichedPromptSpan?.event({
|
|
56
|
-
name: "get prompt",
|
|
57
|
-
input: {
|
|
58
|
-
promptName,
|
|
59
|
-
testBlock,
|
|
60
|
-
testFilePath,
|
|
61
|
-
userMessage: suggestionForFix,
|
|
62
|
-
},
|
|
63
|
-
output: { instructions },
|
|
64
53
|
});
|
|
65
54
|
const llm = new llm_1.LLM({
|
|
66
55
|
trace,
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../../src/agent/infer-agent/index.ts"],"names":[],"mappings":"AAAA,OAAO,
|
|
1
|
+
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../../src/agent/infer-agent/index.ts"],"names":[],"mappings":"AAAA,OAAO,EAIL,WAAW,EACZ,MAAM,mBAAmB,CAAC;AAC3B,OAAO,EAAE,KAAK,EAAE,oBAAoB,EAAE,MAAM,4BAA4B,CAAC;AAYzE,wBAAsB,mBAAmB,CAAC,EACxC,IAAI,EACJ,OAAO,EACP,KAAK,GACN,EAAE;IACD,IAAI,EAAE,MAAM,CAAC;IACb,OAAO,CAAC,EAAE,oBAAoB,CAAC;IAC/B,KAAK,CAAC,EAAE,WAAW,CAAC;CACrB,GAAG,OAAO,CAAC;IAAE,QAAQ,EAAE,KAAK,CAAA;CAAE,CAAC,CAmE/B"}
|
|
@@ -4,7 +4,6 @@ exports.inferAgentBasedTask = void 0;
|
|
|
4
4
|
const llm_1 = require("@empiricalrun/llm");
|
|
5
5
|
const constants_1 = require("../../constants");
|
|
6
6
|
const promptTemplate_0 = "{{#section \"system\"}}\nYou are a software test engineer specializing in Playwright end-to-end tests. You are given a task which is a part of an\nend-to-end test scenario. The task may involve updating an existing end-to-end test case or writing a new test case from\nscratch. Tests involve user interactions (e.g. click on element) or other actions supported by Playwright (e.g.\nintercept network requests)\n\nYour objective is to identify whether the task requires accessing a web browser or not.\n\nTo fulfill your objective, answer the following questions:\n\n1. Does it require you to interact with a UI element in the browser? Examples of interactions are click, fill, type, key\npress, assert visibility of the element. Actions that interact with network requests are not UI element interactions.\n\n2. Is the locator of this UI element given to you in the task? Locators look like `getByText(...)`, `getByTestId(...)`\nand other locator methods in Playwright\n\n3. Decide if you need a browser: if you need to interact with a UI element AND you are NOT given the locator for that\nelement, you WILL NEED a browser.\n\n4. If you NEED a browser, then respond with answer as \"master\", otherwise respond with \"code\"\n\n\n# Example 1\n## Input\nTask:\nin this test don't delete the agent and remove steps after that\n\n## Output\n- ui_interaction_to_be_performed: There is no interaction here\n- ui_element_to_interact_with: No element specified\n- has_locator_for_that_element: No element specified\n- reasoning_for_browser_required: No interaction hence browser is not required\n- answer: code\n\n# Example 2\n## Input\nTask:\nin the swapfast test, replace the selectTokenForSwap method. Instead we will do this\\nclick on token button - this will\nshow a drawer\\nEnter usd in the search field that shows up in the drawer\\nSelect USDC.axl for Cosmos Hub - very\nimportant to choose this instead of USDC.axl on Osmosis\n\n## Output\n- ui_interaction_to_be_performed: Click on token button\n- ui_element_to_interact_with: Token button\n- has_locator_for_that_element: false\n- reasoning_for_browser_required: Task requires interacting with a UI element and identifying its locator which needs a\nbrowser\n- answer: master\n{{/section}}\n\n{{#section \"user\"}}\nTask:\n{{task}}\n{{/section}}";
|
|
7
|
-
const lib_1 = require("../../prompts/lib");
|
|
8
7
|
const session_1 = require("../../session");
|
|
9
8
|
const session = (0, session_1.getSessionDetails)();
|
|
10
9
|
async function inferAgentBasedTask({ task, options, trace, }) {
|
|
@@ -22,7 +21,7 @@ async function inferAgentBasedTask({ task, options, trace, }) {
|
|
|
22
21
|
options,
|
|
23
22
|
},
|
|
24
23
|
});
|
|
25
|
-
const messages = (0,
|
|
24
|
+
const messages = (0, llm_1.compilePrompt)(promptTemplate_0, { task });
|
|
26
25
|
const llm = new llm_1.LLM({
|
|
27
26
|
trace: inferAgentSpan,
|
|
28
27
|
provider: options?.modelProvider || constants_1.DEFAULT_MODEL_PROVIDER,
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"element-annotation.d.ts","sourceRoot":"","sources":["../../../src/agent/master/element-annotation.ts"],"names":[],"mappings":"AAAA,OAAO,
|
|
1
|
+
{"version":3,"file":"element-annotation.d.ts","sourceRoot":"","sources":["../../../src/agent/master/element-annotation.ts"],"names":[],"mappings":"AAAA,OAAO,EAAiB,GAAG,EAAE,WAAW,EAAE,MAAM,mBAAmB,CAAC;AACpE,OAAO,EAAE,IAAI,EAAE,MAAM,YAAY,CAAC;AAQlC,OAAO,EAAE,oBAAoB,EAAE,MAAM,aAAa,CAAC;AAEnD,OAAO,EAAE,UAAU,EAAE,MAAM,qBAAqB,CAAC;AA0DjD,wBAAsB,oBAAoB,CAAC,EACzC,kBAAkB,EAClB,WAAW,EACX,mBAAmB,EACnB,KAAK,EACL,GAAG,EACH,OAAO,EACP,UAAU,GACX,EAAE;IACD,kBAAkB,EAAE,MAAM,CAAC;IAC3B,WAAW,EAAE,MAAM,CAAC;IACpB,mBAAmB,EAAE,MAAM,CAAC;IAC5B,KAAK,CAAC,EAAE,WAAW,CAAC;IACpB,GAAG,CAAC,EAAE,GAAG,CAAC;IACV,OAAO,CAAC,EAAE,oBAAoB,CAAC;IAC/B,UAAU,EAAE,oBAAoB,CAAC;CAClC,GAAG,OAAO,CAAC,MAAM,GAAG,SAAS,CAAC,CA8C9B;AAED,MAAM,MAAM,oBAAoB,GAAG;IACjC,UAAU,EACN,KAAK,GACL,UAAU,CAAC,IAAI,GACf,UAAU,CAAC,WAAW,GACtB,UAAU,CAAC,MAAM,CAAC;IACtB,aAAa,CAAC,EAAE,MAAM,GAAG,SAAS,CAAC;CACpC,CAAC;AAEF,wBAAsB,iBAAiB,CAAC,EACtC,IAAI,EACJ,UAAU,EACV,OAAO,GACR,EAAE;IACD,IAAI,EAAE,IAAI,CAAC;IACX,UAAU,EAAE,oBAAoB,CAAC;IACjC,OAAO,EAAE,oBAAoB,CAAC;CAC/B,GAAG,OAAO,CAAC;IACV,cAAc,EAAE;QAAE,SAAS,EAAE,MAAM,CAAC;QAAC,IAAI,EAAE,MAAM,CAAA;KAAE,EAAE,CAAC;IACtD,gBAAgB,EAAE,MAAM,CAAC;IACzB,uBAAuB,EAAE,MAAM,CAAC;CACjC,CAAC,CAqDD"}
|
|
@@ -4,7 +4,6 @@ exports.getAnnotationKeys = exports.getElementAnnotation = void 0;
|
|
|
4
4
|
const llm_1 = require("@empiricalrun/llm");
|
|
5
5
|
const constants_1 = require("../../constants");
|
|
6
6
|
const promptTemplate_0 = "{{#section \"system\"}}\nYou are an expert in describing the images and it's content. You need to provide the descriptions of annotated elements\npresent in the image.\n\nYou will be provided with an annotated screenshot where interact-able / clickable elements are annotated. The annotation\nis done by drawing a red box around the element and a small yellow box on it which contains unique element id.\n\nYou are given a Annotations which contains list of unique element id and description of the element separated by \":\".\n\nYou are also given the description of the element on which the action needs to be taken. The description includes\ninformation about how the element looks, it's position etc.\n\nYour task is to provide the annotation of the element on which the action needs to be performed based on the element\ndescription.\n\nFollow steps to fulfil your task:\n- Using the list of all element Ids provided to you, map all the element Ids on the annotated screen and describe each\nelement.\n- For describing each element Id\n-- iterate over each element Id in annotation list\n-- check if the description is already present for the element Id in the Annotation provided to you. If present skip\ndescribing it and use it as is.\n-- if the description is NA, then identify the element in the annotated screenshot and describe it using the image or\nicon enclosed in the element.\n- Respond with the mapped element Ids as \"enriched_annotations\"\n- Based on the description provided to you and the enriched annotations, first identify the element Id whose description\nmatches the task provided\n\nNote:\n- Ensure providing the description of all the elements in the list.\n- Don't update the description if its already present in the given annotations\n- Replace all the \"NA\" with description of the element. Its position, how does it look like etc.\n- There should be no \"NA\" present in any of the element description\n{{/section}}\n\n{{#section \"user\"}}\nElement description:\n{{elementDescription}}\n\nAnnotations:\n{{annotations}}\n\n{{image annotatedScreenshot}}\n{{/section}}";
|
|
7
|
-
const lib_1 = require("../../prompts/lib");
|
|
8
7
|
const utils_1 = require("../utils");
|
|
9
8
|
const annotationToolAction = {
|
|
10
9
|
name: "element_annotation",
|
|
@@ -67,7 +66,7 @@ async function getElementAnnotation({ elementDescription, annotations, annotated
|
|
|
67
66
|
preference,
|
|
68
67
|
},
|
|
69
68
|
});
|
|
70
|
-
const messages = (0,
|
|
69
|
+
const messages = (0, llm_1.compilePrompt)(promptTemplate_0, {
|
|
71
70
|
elementDescription,
|
|
72
71
|
annotations,
|
|
73
72
|
annotatedScreenshot,
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"run.d.ts","sourceRoot":"","sources":["../../../src/agent/planner/run.ts"],"names":[],"mappings":"AAAA,OAAO,
|
|
1
|
+
{"version":3,"file":"run.d.ts","sourceRoot":"","sources":["../../../src/agent/planner/run.ts"],"names":[],"mappings":"AAAA,OAAO,EAAsB,WAAW,EAAE,MAAM,mBAAmB,CAAC;AA4BpE,wBAAsB,QAAQ,CAAC,EAC7B,IAAI,EACJ,QAAQ,EACR,KAAK,GACN,EAAE;IACD,IAAI,EAAE,MAAM,CAAC;IACb,QAAQ,EAAE,MAAM,CAAC;IACjB,KAAK,CAAC,EAAE,WAAW,CAAC;CACrB,mBAyCA"}
|
|
@@ -3,7 +3,6 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
|
|
3
3
|
exports.planTask = void 0;
|
|
4
4
|
const llm_1 = require("@empiricalrun/llm");
|
|
5
5
|
const context_1 = require("../../bin/utils/context");
|
|
6
|
-
const lib_1 = require("../../prompts/lib");
|
|
7
6
|
const promptTemplate_0 = "{{#section \"system\"}}\nYou are an expert software engineer in test. You are given a task to provide a high level plan to create a test for a\ngiven scenario.\n\nYou will be provided with already added tests and page object object models which you can use to plan out how to write\nthe test.\n\nThe expected plan should be a list of bullet points and each bullet point is a step in the test.\nYou will be provided with app knowledge as well, which can help you groom the steps in the tests.\n{{/section}}\n\n{{#section \"user\"}}\nApp knowledge\n{{appKnowledge}}\n\n-----\n\nCurrent tests and page object model references\n\n{{fileContext}}\n\n------\n\nTask:\n\n{{task}}\n\n------\n\nFollow the steps to create a test plan:\n- create sub tasks\n- read the task step by step and create sub tasks from the given task\n- Ensure no new steps are added which are not mentioned in the task\n- enriched sub tasks\n- Read the app knowledge provided to you and enrich the verified sub tasks based on the provided information in app\nknowledge.\n- Fill in the missing information in the verified sub tasks based on the app knowledge.\n- based on the type of task, add sub tasks to the verified sub tasks based on the matching criteria\n- final plan:\n- Once all the subtasks are enriched, list all the sub tasks as bullet points\n- Each bullet point should be one of the following actions:\n- Open page, Click on, Fill in, Assert, hover on, press, extract textContent\n- Do not respond with points which do not start with above actions.\n\nFollow the steps before responding\n- The steps should only contain bullet points on list of steps for the test\n- Do not add any other assertion which is not mentioned in the task or app knowledge\n- Respond with <create_sub_tasks></create_sub_tasks>\n<enriched_sub_tasks></enriched_sub_tasks> and <final_plan></final_plan>\n- The final plan should not mention reference to the knowledge base used to generate it\n{{/section}}";
|
|
8
7
|
function extractTestPlan(input) {
|
|
9
8
|
const result = {
|
|
@@ -41,7 +40,7 @@ ${pomPrompt}
|
|
|
41
40
|
},
|
|
42
41
|
});
|
|
43
42
|
const appKnowledge = await (0, context_1.fetchAppKnowledge)();
|
|
44
|
-
const messages = (0,
|
|
43
|
+
const messages = (0, llm_1.compilePrompt)(promptTemplate_0, {
|
|
45
44
|
appKnowledge,
|
|
46
45
|
fileContext,
|
|
47
46
|
task,
|
|
@@ -22,6 +22,7 @@ function annotateElementsWithPreference({
|
|
|
22
22
|
|
|
23
23
|
const MAX_Z_INDEX_FOR_DOM = 10000;
|
|
24
24
|
const Z_INDEX_FOR_MARKERS = 99999;
|
|
25
|
+
const originalZIndices = new Map(); // Store original z-indices
|
|
25
26
|
|
|
26
27
|
const document = window.document;
|
|
27
28
|
const annotationsMap = {};
|
|
@@ -568,6 +569,12 @@ function annotateElementsWithPreference({
|
|
|
568
569
|
annotation.node.style.boxShadow = "none";
|
|
569
570
|
});
|
|
570
571
|
|
|
572
|
+
// Restore original z-indices
|
|
573
|
+
originalZIndices.forEach((originalZ, element) => {
|
|
574
|
+
element.style.zIndex = originalZ;
|
|
575
|
+
});
|
|
576
|
+
originalZIndices.clear();
|
|
577
|
+
|
|
571
578
|
annotationsContainer.parentNode.removeChild(annotationsContainer);
|
|
572
579
|
annotationsContainer = null;
|
|
573
580
|
}
|
|
@@ -580,6 +587,8 @@ function annotateElementsWithPreference({
|
|
|
580
587
|
const z = window.getComputedStyle(el).getPropertyValue("z-index");
|
|
581
588
|
if (z !== "auto" && !isNaN(parseInt(z, 10))) {
|
|
582
589
|
zIndexes.push(parseInt(z, 10));
|
|
590
|
+
// Store original z-index
|
|
591
|
+
originalZIndices.set(el, z);
|
|
583
592
|
}
|
|
584
593
|
});
|
|
585
594
|
if (zIndexes.length === 0) {
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@empiricalrun/test-gen",
|
|
3
|
-
"version": "0.42.
|
|
3
|
+
"version": "0.42.24",
|
|
4
4
|
"publishConfig": {
|
|
5
5
|
"registry": "https://registry.npmjs.org/",
|
|
6
6
|
"access": "public"
|
|
@@ -56,14 +56,13 @@
|
|
|
56
56
|
"fs-extra": "^11.2.0",
|
|
57
57
|
"google-auth-library": "^9.10.0",
|
|
58
58
|
"google-spreadsheet": "^4.1.2",
|
|
59
|
-
"handlebars": "^4.7.8",
|
|
60
59
|
"ignore": "^5.3.1",
|
|
61
60
|
"lodash.isequal": "^4.5.0",
|
|
62
61
|
"md5": "^2.3.0",
|
|
63
62
|
"mime": "^4.0.4",
|
|
64
63
|
"minimatch": "^10.0.1",
|
|
65
64
|
"nanoid": "^5.0.7",
|
|
66
|
-
"openai": "4.
|
|
65
|
+
"openai": "4.67.0",
|
|
67
66
|
"picocolors": "^1.0.1",
|
|
68
67
|
"prettier": "^3.2.5",
|
|
69
68
|
"remove-markdown": "^0.5.5",
|
|
@@ -72,9 +71,9 @@
|
|
|
72
71
|
"ts-morph": "^23.0.0",
|
|
73
72
|
"tsx": "^4.16.2",
|
|
74
73
|
"typescript": "^5.3.3",
|
|
75
|
-
"@empiricalrun/llm": "^0.9.
|
|
76
|
-
"@empiricalrun/
|
|
77
|
-
"@empiricalrun/
|
|
74
|
+
"@empiricalrun/llm": "^0.9.33",
|
|
75
|
+
"@empiricalrun/r2-uploader": "^0.3.8",
|
|
76
|
+
"@empiricalrun/reporter": "^0.23.1"
|
|
78
77
|
},
|
|
79
78
|
"devDependencies": {
|
|
80
79
|
"@playwright/test": "1.47.1",
|
|
@@ -87,7 +86,7 @@
|
|
|
87
86
|
"js-levenshtein": "^1.1.6",
|
|
88
87
|
"playwright": "1.47.1",
|
|
89
88
|
"ts-patch": "^3.3.0",
|
|
90
|
-
"@empiricalrun/shared-types": "0.0.
|
|
89
|
+
"@empiricalrun/shared-types": "0.0.2"
|
|
91
90
|
},
|
|
92
91
|
"scripts": {
|
|
93
92
|
"dev": "tspc --build --watch",
|
package/vitest.config.ts
CHANGED
|
@@ -1,8 +1,7 @@
|
|
|
1
|
+
import { handlebarsLoaderForVitest } from "@empiricalrun/llm";
|
|
1
2
|
import { config as dotenvConfig } from "dotenv";
|
|
2
3
|
import { defineConfig } from "vitest/config";
|
|
3
4
|
|
|
4
|
-
import { handlebarsLoader } from "./src/prompts/lib/vitest-plugin";
|
|
5
|
-
|
|
6
5
|
dotenvConfig({ path: [".env.local", ".env"] });
|
|
7
6
|
|
|
8
7
|
export default defineConfig({
|
|
@@ -12,5 +11,5 @@ export default defineConfig({
|
|
|
12
11
|
globals: true,
|
|
13
12
|
},
|
|
14
13
|
assetsInclude: ["**/*.handlebars"],
|
|
15
|
-
plugins: [
|
|
14
|
+
plugins: [handlebarsLoaderForVitest],
|
|
16
15
|
});
|
|
@@ -1,8 +0,0 @@
|
|
|
1
|
-
import { LLMProvider } from "@empiricalrun/llm";
|
|
2
|
-
import OpenAI from "openai";
|
|
3
|
-
type PromptOptions = {
|
|
4
|
-
modelProvider?: LLMProvider;
|
|
5
|
-
};
|
|
6
|
-
export declare function compilePrompt<T extends object>(promptTemplate: string, params: T, options?: PromptOptions): OpenAI.Chat.Completions.ChatCompletionMessageParam[];
|
|
7
|
-
export {};
|
|
8
|
-
//# sourceMappingURL=index.d.ts.map
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../../src/prompts/lib/index.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,WAAW,EAAE,MAAM,mBAAmB,CAAC;AAGhD,OAAO,MAAM,MAAM,QAAQ,CAAC;AAyF5B,KAAK,aAAa,GAAG;IACnB,aAAa,CAAC,EAAE,WAAW,CAAC;CAC7B,CAAC;AAEF,wBAAgB,aAAa,CAAC,CAAC,SAAS,MAAM,EAC5C,cAAc,EAAE,MAAM,EACtB,MAAM,EAAE,CAAC,EACT,OAAO,CAAC,EAAE,aAAa,GACtB,MAAM,CAAC,IAAI,CAAC,WAAW,CAAC,0BAA0B,EAAE,CAwCtD"}
|
|
@@ -1,118 +0,0 @@
|
|
|
1
|
-
"use strict";
|
|
2
|
-
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
|
-
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
-
};
|
|
5
|
-
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
-
exports.compilePrompt = void 0;
|
|
7
|
-
const vision_1 = require("@empiricalrun/llm/vision");
|
|
8
|
-
const handlebars_1 = __importDefault(require("handlebars"));
|
|
9
|
-
const constants_1 = require("../../constants");
|
|
10
|
-
class SectionManager {
|
|
11
|
-
sections = {};
|
|
12
|
-
getSection(name) {
|
|
13
|
-
return this.sections[name] || "";
|
|
14
|
-
}
|
|
15
|
-
setSection(name, content) {
|
|
16
|
-
this.sections[name] = content;
|
|
17
|
-
}
|
|
18
|
-
getAllSections() {
|
|
19
|
-
return this.sections;
|
|
20
|
-
}
|
|
21
|
-
}
|
|
22
|
-
const IMAGE_TOKEN_PREFIX = "[[[HANDLEBARS_IMAGE:";
|
|
23
|
-
const IMAGE_TOKEN_SUFFIX = "]]]";
|
|
24
|
-
function createHandlebarsEnv() {
|
|
25
|
-
const HandlebarsEnv = handlebars_1.default.create();
|
|
26
|
-
const sectionManager = new SectionManager();
|
|
27
|
-
HandlebarsEnv.registerHelper("section", function (name, options) {
|
|
28
|
-
const content = options.fn(this);
|
|
29
|
-
sectionManager.setSection(name, content);
|
|
30
|
-
return ""; // don't output anything in place
|
|
31
|
-
});
|
|
32
|
-
HandlebarsEnv.registerHelper("image", function (imageParam) {
|
|
33
|
-
const tokenPayload = JSON.stringify({ url: imageParam });
|
|
34
|
-
// Use encodeURIComponent to avoid conflicts with special characters.
|
|
35
|
-
const token = `${IMAGE_TOKEN_PREFIX}${encodeURIComponent(tokenPayload)}${IMAGE_TOKEN_SUFFIX}`;
|
|
36
|
-
return token;
|
|
37
|
-
});
|
|
38
|
-
HandlebarsEnv.registerHelper("images", function (imagesParam) {
|
|
39
|
-
if (!Array.isArray(imagesParam))
|
|
40
|
-
return "";
|
|
41
|
-
return imagesParam
|
|
42
|
-
.map((url) => {
|
|
43
|
-
const tokenPayload = JSON.stringify({ url });
|
|
44
|
-
return `${IMAGE_TOKEN_PREFIX}${encodeURIComponent(tokenPayload)}${IMAGE_TOKEN_SUFFIX}`;
|
|
45
|
-
})
|
|
46
|
-
.join("");
|
|
47
|
-
});
|
|
48
|
-
return { HandlebarsEnv, sectionManager };
|
|
49
|
-
}
|
|
50
|
-
function processSectionContent(content) {
|
|
51
|
-
if (!content.includes(IMAGE_TOKEN_PREFIX)) {
|
|
52
|
-
return content.trim();
|
|
53
|
-
}
|
|
54
|
-
const segments = [];
|
|
55
|
-
const regex = /\[\[\[HANDLEBARS_IMAGE:(.*?)\]\]\]/g;
|
|
56
|
-
let lastIndex = 0;
|
|
57
|
-
let match;
|
|
58
|
-
while ((match = regex.exec(content)) !== null) {
|
|
59
|
-
// Get the text before the token.
|
|
60
|
-
const textPart = content.slice(lastIndex, match.index).trim();
|
|
61
|
-
if (textPart) {
|
|
62
|
-
segments.push({ type: "text", text: textPart });
|
|
63
|
-
}
|
|
64
|
-
// Decode the token payload.
|
|
65
|
-
try {
|
|
66
|
-
const payloadJson = decodeURIComponent(match[1]);
|
|
67
|
-
const payload = JSON.parse(payloadJson);
|
|
68
|
-
segments.push({ type: "image_url", image_url: { url: payload.url } });
|
|
69
|
-
}
|
|
70
|
-
catch (err) {
|
|
71
|
-
// If decoding/parsing fails, treat the token as literal text.
|
|
72
|
-
segments.push({ type: "text", text: match[0] });
|
|
73
|
-
}
|
|
74
|
-
lastIndex = match.index + match[0].length;
|
|
75
|
-
}
|
|
76
|
-
const remaining = content.slice(lastIndex).trim();
|
|
77
|
-
if (remaining) {
|
|
78
|
-
segments.push({ type: "text", text: remaining });
|
|
79
|
-
}
|
|
80
|
-
return segments;
|
|
81
|
-
}
|
|
82
|
-
function compilePrompt(promptTemplate, params, options) {
|
|
83
|
-
const { HandlebarsEnv, sectionManager } = createHandlebarsEnv();
|
|
84
|
-
const template = HandlebarsEnv.compile(promptTemplate, { noEscape: true });
|
|
85
|
-
template(params);
|
|
86
|
-
const sections = sectionManager.getAllSections();
|
|
87
|
-
// TODO: system cannot have images, we can add validation for that
|
|
88
|
-
const system = sections["system"];
|
|
89
|
-
const user = sections["user"];
|
|
90
|
-
if (!system || !user) {
|
|
91
|
-
// TODO: support templates that have only one section
|
|
92
|
-
throw new Error("Both system and user sections must be defined in the template");
|
|
93
|
-
}
|
|
94
|
-
const systemContent = processSectionContent(system);
|
|
95
|
-
const userContent = processSectionContent(user);
|
|
96
|
-
let userContentCorrectedForImageFormat = userContent;
|
|
97
|
-
if (Array.isArray(userContent)) {
|
|
98
|
-
const provider = options?.modelProvider || constants_1.DEFAULT_MODEL_PROVIDER;
|
|
99
|
-
userContentCorrectedForImageFormat = userContent.map((c) => {
|
|
100
|
-
if (c.type === "image_url") {
|
|
101
|
-
return {
|
|
102
|
-
...c,
|
|
103
|
-
image_url: {
|
|
104
|
-
url: (0, vision_1.imageFormatForProvider)(provider, c.image_url.url),
|
|
105
|
-
},
|
|
106
|
-
};
|
|
107
|
-
}
|
|
108
|
-
else {
|
|
109
|
-
return c;
|
|
110
|
-
}
|
|
111
|
-
});
|
|
112
|
-
}
|
|
113
|
-
return [
|
|
114
|
-
{ role: "system", content: systemContent },
|
|
115
|
-
{ role: "user", content: userContentCorrectedForImageFormat },
|
|
116
|
-
];
|
|
117
|
-
}
|
|
118
|
-
exports.compilePrompt = compilePrompt;
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"version":3,"file":"vitest-plugin.d.ts","sourceRoot":"","sources":["../../../src/prompts/lib/vitest-plugin.ts"],"names":[],"mappings":"AAEA,eAAO,MAAM,gBAAgB;;iBAER,GAAG,MAAM,MAAM;;;;CAUnC,CAAC"}
|
|
@@ -1,20 +0,0 @@
|
|
|
1
|
-
"use strict";
|
|
2
|
-
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
|
-
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
-
};
|
|
5
|
-
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
-
exports.handlebarsLoader = void 0;
|
|
7
|
-
const fs_1 = __importDefault(require("fs"));
|
|
8
|
-
exports.handlebarsLoader = {
|
|
9
|
-
name: "handlebars-loader",
|
|
10
|
-
async transform(_, id) {
|
|
11
|
-
if (id.endsWith(".handlebars") || id.endsWith(".hbs")) {
|
|
12
|
-
const content = fs_1.default.readFileSync(id, "utf-8");
|
|
13
|
-
return {
|
|
14
|
-
code: `export default ${JSON.stringify(content)};`,
|
|
15
|
-
map: null,
|
|
16
|
-
};
|
|
17
|
-
}
|
|
18
|
-
return null;
|
|
19
|
-
},
|
|
20
|
-
};
|