@empiricalrun/test-gen 0.42.22 → 0.42.23
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +9 -0
- package/dist/agent/browsing/index.js +1 -1
- package/dist/agent/codegen/create-test-block.js +1 -1
- package/dist/agent/codegen/fix-ts-errors.d.ts.map +1 -1
- package/dist/agent/codegen/fix-ts-errors.js +4 -2
- package/dist/agent/codegen/lexical-scoped-vars.js +1 -1
- package/dist/agent/codegen/skills-retriever.d.ts.map +1 -1
- package/dist/agent/codegen/skills-retriever.js +4 -2
- package/dist/agent/codegen/update-flow.d.ts.map +1 -1
- package/dist/agent/codegen/update-flow.js +6 -5
- package/dist/agent/codegen/use-skill.d.ts.map +1 -1
- package/dist/agent/codegen/use-skill.js +4 -2
- package/dist/agent/enrich-prompt/index.d.ts +3 -5
- package/dist/agent/enrich-prompt/index.d.ts.map +1 -1
- package/dist/agent/enrich-prompt/index.js +3 -13
- package/package.json +6 -6
package/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,14 @@
|
|
|
1
1
|
# @empiricalrun/test-gen
|
|
2
2
|
|
|
3
|
+
## 0.42.23
|
|
4
|
+
|
|
5
|
+
### Patch Changes
|
|
6
|
+
|
|
7
|
+
- b11f2cc: feat: move handlebars compiler to llm package
|
|
8
|
+
- 853384f: chore: Replace getPrompt with compilePrompt for colocated prompts
|
|
9
|
+
- Updated dependencies [b11f2cc]
|
|
10
|
+
- @empiricalrun/llm@0.9.33
|
|
11
|
+
|
|
3
12
|
## 0.42.22
|
|
4
13
|
|
|
5
14
|
### Patch Changes
|
|
@@ -29,7 +29,7 @@ async function executeTaskUsingBrowsingAgent({ action, page, actions, llm, optio
|
|
|
29
29
|
const pageSnapshot = (0, html_1.sanitizeHtml)(pageContent, options.htmlSanitize);
|
|
30
30
|
sanitizationSpan?.end({ output: { pageSnapshot } });
|
|
31
31
|
const promptSpan = browsingAgentSpan?.span({ name: "page-prompt" });
|
|
32
|
-
const messages =
|
|
32
|
+
const messages = (0, lib_1.compilePrompt)(promptTemplate_0, { pageSnapshot, task: action });
|
|
33
33
|
promptSpan?.end({ output: { messages } });
|
|
34
34
|
let completion;
|
|
35
35
|
completion = await (0, o1_completion_1.getO1Completion)({
|
|
@@ -29,7 +29,7 @@ async function createEmptyTestCaseBlock({ testCase, file, options, trace, }) {
|
|
|
29
29
|
const promptSpan = trace?.span({
|
|
30
30
|
name: "build-create-empty-test-case-prompt",
|
|
31
31
|
});
|
|
32
|
-
const prompt =
|
|
32
|
+
const prompt = (0, lib_1.compilePrompt)(promptTemplate_0, {
|
|
33
33
|
testFiles: context.codePrompt,
|
|
34
34
|
pageFiles: context.pomPrompt,
|
|
35
35
|
scenarioName: testCase.name,
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"fix-ts-errors.d.ts","sourceRoot":"","sources":["../../../src/agent/codegen/fix-ts-errors.ts"],"names":[],"mappings":"AAAA,OAAO,
|
|
1
|
+
{"version":3,"file":"fix-ts-errors.d.ts","sourceRoot":"","sources":["../../../src/agent/codegen/fix-ts-errors.ts"],"names":[],"mappings":"AAAA,OAAO,EAAO,WAAW,EAAE,MAAM,mBAAmB,CAAC;AACrD,OAAO,KAAK,EACV,QAAQ,EACR,oBAAoB,EACrB,MAAM,4BAA4B,CAAC;AAGpC,OAAO,EAAE,YAAY,EAAE,MAAM,kBAAkB,CAAC;AAWhD,wBAAsB,8BAA8B,CAAC,EACnD,KAAK,EACL,MAA2B,EAC3B,IAAI,EACJ,OAAO,EACP,eAAe,EACf,QAAQ,EACR,OAAO,GACR,EAAE;IACD,KAAK,CAAC,EAAE,WAAW,CAAC;IACpB,MAAM,CAAC,EAAE,YAAY,CAAC;IACtB,IAAI,EAAE,MAAM,CAAC;IACb,OAAO,EAAE,MAAM,CAAC;IAChB,eAAe,EAAE,MAAM,CAAC;IACxB,QAAQ,EAAE,QAAQ,CAAC;IACnB,OAAO,CAAC,EAAE,oBAAoB,CAAC;CAChC,iBAwEA"}
|
|
@@ -9,6 +9,8 @@ const fs_extra_1 = __importDefault(require("fs-extra"));
|
|
|
9
9
|
const logger_1 = require("../../bin/logger");
|
|
10
10
|
const web_1 = require("../../bin/utils/platform/web");
|
|
11
11
|
const constants_1 = require("../../constants");
|
|
12
|
+
const promptTemplate_0 = "{{#section \"system\"}}\nYou are a software engineer who is given a task to fix semantic and syntactical errors in a typescript file\n'{{scenarioFile}}' provided to you.\nYou will be provided with fixtures and page object models to use and fix errors.\n\nHere is the list of fixtures available:\n\n{{fixtureFiles}}\n\nHere is the list of current page object models available to you:\n\n{{pageFiles}}\n\nUse the above files to fix the errors.\n\nFollow these guidelines before responding with output\n- Ensure there are no type issues in the given {{scenarioFile}} file\n- For the given file respond with only the code\n- Do not respond with markdown syntax or backticks\n- Do not modify anything else apart from the code required to fix typescript error\n- Do not modify any other scenarios apart from the provided scenario name\n- Do not respond with any explanation. Respond only with the updated code.\n{{/section}}\n\n{{#section \"user\"}}\n'{{scenarioFile}}' with scenario name '{{scenarioName}}', has following typescript errors which you need to fix:\n{{errors}}\n\nHere is the content of the '{{scenarioFile}}':\n\n{{fileContent}}\n{{/section}}";
|
|
13
|
+
const lib_1 = require("../../prompts/lib");
|
|
12
14
|
async function validateAndFixTypescriptErrors({ trace, logger = new logger_1.CustomLogger(), file, pomCode, nonSpecFileCode, testCase, options, }) {
|
|
13
15
|
const validateTypesSpan = trace?.span({ name: "detect-type-errors-in-file" });
|
|
14
16
|
logger.log("Validating types...");
|
|
@@ -34,14 +36,14 @@ async function validateAndFixTypescriptErrors({ trace, logger = new logger_1.Cus
|
|
|
34
36
|
logger.warn("Found few errors while validating types. Trying to fix errors...");
|
|
35
37
|
errors.forEach((e) => console.warn(e));
|
|
36
38
|
const promptSpan = trace?.span({ name: "fix-type-errors-prompt" });
|
|
37
|
-
const instruction =
|
|
39
|
+
const instruction = (0, lib_1.compilePrompt)(promptTemplate_0, {
|
|
38
40
|
pageFiles: pomCode || "",
|
|
39
41
|
fixtureFiles: nonSpecFileCode || "",
|
|
40
42
|
scenarioFile: file,
|
|
41
43
|
errors: errors,
|
|
42
44
|
fileContent: fileContent,
|
|
43
45
|
scenarioName: testCase.name,
|
|
44
|
-
}
|
|
46
|
+
});
|
|
45
47
|
promptSpan?.end({ output: { instruction } });
|
|
46
48
|
const llm = new llm_1.LLM({
|
|
47
49
|
trace,
|
|
@@ -9,7 +9,7 @@ async function getLexicalScopedVars({ trace, file, referencePoint, options, }) {
|
|
|
9
9
|
const fetchLexicalScopedVarsSpan = trace?.span({
|
|
10
10
|
name: "lexical-scoped-vars",
|
|
11
11
|
});
|
|
12
|
-
const messages =
|
|
12
|
+
const messages = (0, lib_1.compilePrompt)(promptTemplate_0, {
|
|
13
13
|
testFile: file || "",
|
|
14
14
|
referencePoint: referencePoint || "",
|
|
15
15
|
});
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"skills-retriever.d.ts","sourceRoot":"","sources":["../../../src/agent/codegen/skills-retriever.ts"],"names":[],"mappings":"AAAA,OAAO,
|
|
1
|
+
{"version":3,"file":"skills-retriever.d.ts","sourceRoot":"","sources":["../../../src/agent/codegen/skills-retriever.ts"],"names":[],"mappings":"AAAA,OAAO,EAAO,WAAW,EAAE,MAAM,mBAAmB,CAAC;AACrD,OAAO,KAAK,EACV,QAAQ,EACR,oBAAoB,EACrB,MAAM,4BAA4B,CAAC;AAoBpC,eAAO,MAAM,cAAc;cAMf,QAAQ;;;;;;;;;;IAqFnB,CAAC;AAEF,wBAAsB,oBAAoB,CAAC,EACzC,QAAQ,EACR,OAAO,EACP,KAAK,GACN,EAAE;IACD,QAAQ,EAAE,QAAQ,CAAC;IACnB,OAAO,CAAC,EAAE,oBAAoB,CAAC;IAC/B,KAAK,CAAC,EAAE,WAAW,CAAC;CACrB;;;;;;KA6BA"}
|
|
@@ -10,6 +10,8 @@ const logger_1 = require("../../bin/logger");
|
|
|
10
10
|
const context_1 = require("../../bin/utils/context");
|
|
11
11
|
const fs_2 = require("../../bin/utils/fs");
|
|
12
12
|
const constants_1 = require("../../constants");
|
|
13
|
+
const promptTemplate_0 = "{{#section \"system\"}}\nYou are a software test engineer who is given a goal to pick re-usable page object model methods for a given UI\nautomation test.\n\nYou will be provided with a test scenario as task and also the page object model methods available in the automation\ntest repository. The page object models acts as skills to execute a particular sub task of a given task.\n\nYou need to break down the task into sub tasks and identify which sub_task can be solved with the help of page object\nmodel methods. Once identified, you need to return with an usage API example for same. You need to pick methods only\nfrom the provided page object models.\n\nHere is the list of current page object models:\n\n{{pageFiles}}\n\n{{/section}}\n\n{{#section \"user\"}}\nFollowing is the test scenario for which you need to figure out the skills:\n\n**Task:**\n{{scenario}}\n\nBefore responding follow the instructions:\n- You need to break down the task into subtask and respond with the code of sub tasks for which there are methods\nexposed from page object models\n- Only use methods that are explicitly defined and exported in the provided page object models. Do not create, infer, or\nassume any methods or code that are not exported from the page object model files.\n- Do not respond with any methods or code that are not available in the list of page object models.\n- Ensure there are no type issues in the code generated.\n- Do not respond with markdown syntax or backticks.\n- Respond only with the code\n- Read steps one by one and generate the test code\n- Do not write any extra code than instructed in the steps\n- You need to respond with `<subtask></subtask>`, `<reason></reason>`, `<file_import_path></file_import_path>`, `\n<usage_example></usage_example>` and `<method_name></method_name>`\n- You should respond with methods exported from the page object models\n- Do not respond with any import statements.\n- Do not respond with sub_task for which there is no explicit method found\n- Always pick the page object model method matching the task provided. If there is no matching method, then ignore it.\nDo not attempt to generate or use any non existent methods for such instances.\n- Respond with the usage_example so that it can be directly copy pasted inside the test\n- Do not set the parameters for the method. Keep it as is with the interface parameters\n{{/section}}";
|
|
14
|
+
const lib_1 = require("../../prompts/lib");
|
|
13
15
|
const utils_1 = require("./utils");
|
|
14
16
|
const fetchPomSkills = async ({ testCase, pomFiles, options, trace, }) => {
|
|
15
17
|
const fetchSkillsUsingPOMFilesSpan = trace?.span({
|
|
@@ -22,11 +24,11 @@ const fetchPomSkills = async ({ testCase, pomFiles, options, trace, }) => {
|
|
|
22
24
|
const promptSpan = fetchSkillsUsingPOMFilesSpan?.span({
|
|
23
25
|
name: "fetch-pom-skills-prompt",
|
|
24
26
|
});
|
|
25
|
-
const prompt =
|
|
27
|
+
const prompt = (0, lib_1.compilePrompt)(promptTemplate_0, {
|
|
26
28
|
pageFiles: pomFiles,
|
|
27
29
|
scenarioName: testCase.name,
|
|
28
30
|
scenario: testCase.steps.join("\n"),
|
|
29
|
-
}
|
|
31
|
+
});
|
|
30
32
|
promptSpan?.end({ output: { prompt } });
|
|
31
33
|
const llm = new llm_1.LLM({
|
|
32
34
|
trace: fetchSkillsUsingPOMFilesSpan,
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"update-flow.d.ts","sourceRoot":"","sources":["../../../src/agent/codegen/update-flow.ts"],"names":[],"mappings":"AAAA,OAAO,
|
|
1
|
+
{"version":3,"file":"update-flow.d.ts","sourceRoot":"","sources":["../../../src/agent/codegen/update-flow.ts"],"names":[],"mappings":"AAAA,OAAO,EAIL,WAAW,EACZ,MAAM,mBAAmB,CAAC;AAC3B,OAAO,KAAK,EACV,QAAQ,EACR,oBAAoB,EACrB,MAAM,4BAA4B,CAAC;AAGpC,OAAO,EAAE,0BAA0B,EAAE,MAAM,4BAA4B,CAAC;AAgBxE,OAAO,EAAE,UAAU,EAAE,eAAe,EAAE,MAAM,SAAS,CAAC;AAQtD,wBAAsB,2BAA2B,CAAC,EAChD,QAAQ,EACR,eAAe,EACf,SAAS,EACT,SAAS,EACT,YAAY,EACZ,OAAO,EACP,KAAK,GACN,EAAE;IACD,QAAQ,EAAE,QAAQ,CAAC;IACnB,SAAS,EAAE,MAAM,CAAC;IAClB,SAAS,EAAE,MAAM,CAAC;IAClB,YAAY,EAAE,MAAM,CAAC;IACrB,eAAe,EAAE,MAAM,CAAC;IACxB,KAAK,CAAC,EAAE,WAAW,CAAC;IACpB,OAAO,CAAC,EAAE,oBAAoB,CAAC;CAChC,GAAG,OAAO,CAAC;IACV,MAAM,EAAE,0BAA0B,EAAE,CAAC;IACrC,aAAa,EAAE,MAAM,CAAC;IACtB,WAAW,EAAE,UAAU,EAAE,CAAC;CAC3B,CAAC,CAkDD;AAED,wBAAsB,UAAU,CAC9B,QAAQ,EAAE,QAAQ,EAClB,IAAI,EAAE,MAAM,EACZ,OAAO,EAAE,oBAAoB,GAAG,SAAS,EACzC,OAAO,GAAE,OAAc,EACvB,QAAQ,GAAE,OAAc,EACxB,KAAK,CAAC,EAAE,WAAW,GAClB,OAAO,CAAC,eAAe,EAAE,CAAC,CA+F5B;AAED,wBAAsB,kCAAkC,CAAC,EACvD,SAAS,EACT,SAAS,EACT,QAAQ,EACR,YAAY,EACZ,OAAO,EACP,KAAK,GACN,EAAE;IACD,KAAK,CAAC,EAAE,WAAW,CAAC;IACpB,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB,SAAS,EAAE,MAAM,CAAC;IAClB,QAAQ,EAAE,QAAQ,CAAC;IACnB,OAAO,CAAC,EAAE,oBAAoB,CAAC;IAC/B,YAAY,EAAE,MAAM,CAAC;CACtB,mBAmGA;AAED,wBAAsB,qBAAqB,CAAC,EAC1C,QAAQ,EACR,IAAI,EACJ,OAAO,EACP,KAAK,EACL,aAAoB,GACrB,EAAE;IACD,QAAQ,EAAE,QAAQ,CAAC;IACnB,IAAI,EAAE,MAAM,CAAC;IACb,OAAO,CAAC,EAAE,oBAAoB,CAAC;IAC/B,KAAK,CAAC,EAAE,WAAW,CAAC;IACpB,aAAa,CAAC,EAAE,OAAO,CAAC;CACzB,GAAG,OAAO,CAAC,eAAe,EAAE,CAAC,CAqD7B"}
|
|
@@ -12,6 +12,9 @@ const context_1 = require("../../bin/utils/context");
|
|
|
12
12
|
const fs_1 = require("../../bin/utils/fs");
|
|
13
13
|
const web_1 = require("../../bin/utils/platform/web");
|
|
14
14
|
const constants_1 = require("../../constants");
|
|
15
|
+
const promptTemplate_0 = "{{#section \"system\"}}\n\nYou are a software test engineer who is given a task to update a test case. You will be provided with steps of a test\ncase scenario and you are given a snippet with interface await createTest(task, playwright_page_instance).\n\nYou need to analyze the request and place createTest snippet at the correct position and pass on the steps to this\n`createTest` method with the correct page instance.\n\nYou will be provided with current tests, fixtures and page object models for you to use and update code as per the\ntask provided to you.\nYou need to respond with file path and updated code block inside the file.\n\nHere is the list of current tests and fixtures:\n\n{{testFiles}}\n\n\nHere is the list of current page object models:\n\n{{pageFiles}}\n{{/section}}\n\n\n{{#section \"user\"}}\nFollowing is the test scenario for which you need to update the test:\ntest name:\n{{scenarioName}}\n\n\nTask:\n{{scenarioSteps}}\n\n\nFollow these steps to complete the task:\n\n1. Determine the task's intent:\n- The default intent is **\"add steps\"**.\n- If the task explicitly mentions (using words like \"replace\", \"remove\", \"delete\") that existing test steps should\nbe replaced or deleted, the intent is **\"update steps\"**. Identify the specific steps to be replaced or removed.\n- Note:\n- Do not assume that providing new or different steps implies an intent to update or replace existing steps. Unless\nthe task explicitly instructs to replace or remove existing code, interpret the intent as adding new steps to the\nexisting test.\n\n2. Identify the test block that requires updating.\n\n3. Place the `createTest` snippet:\n- Insert the `createTest` snippet at the location determined by the task, and pass the Playwright page instance.\n- If the intent is \"add\", do not alter the existing test code; simply append the `createTest` snippet based on any\nprovided location hints.\n- If the task includes location hints that don't match steps within the test, check dependent methods called from\nthe test for the update.\n- If no location hint is provided, place the `createTest` snippet at the end of the test block.\n- Even if the task includes steps that overlap with or are similar to existing steps, do not modify the existing\ncode. Unless the task explicitly instructs to replace or remove existing code, interpret the intent as adding new\nsteps to the existing test.\n\n4. Strip location hints from the task:\n- Remove any location hints (e.g., \"replace the current assertion and\") before passing the task to the `createTest`\nmethod.\n\nSubmission Guidelines:\n\n- Focus only on the provided test case and any related page object model methods used in the test.\n- Do not modify or add code within the `createTest` snippet.\n- Do not update or modify any other code apart from adding `createTest` code snippet.\n- Since the response will be used for search-and-replace operations, always provide the immediate parent AST node\nfor any code updates.\n- Include the full test block if any part of it is updated, preserving all unchanged code.\n- Do not use markdown syntax or backticks.\n- Respond using the following XML format:\n<reason_for_intent></reason_for_intent>\n<intent></intent>\n<location_of_update></location_of_update>\n<file_path></file_path>\n<old_code_block></old_code_block>\n<new_code_block></new_code_block>\n<change></change>\n\n- Each `<old_code_block>` and `<new_code_block>` should contain only one test block or page object model method\n definition. Provide separate blocks for multiple updates.\n - The `<change></change>` tag should also mention the file path being updated.\n - There should be only one `createTest` block in the `new_code_block`. The `createTest` method should be passed\n with entire task. Do not split the task while forwarding it to `createTest`.\n - `<new_code_block>` code snippet should be syntactically correct.\n - The code_block should not contain any import statements.\n {{/section}}";
|
|
16
|
+
const lib_1 = require("../../prompts/lib");
|
|
17
|
+
const promptTemplate_1 = "{{#section \"system\"}}\nYou are a software test engineer who is given an objective to update test basis the task provided.\nYou will be provided with a test name, test and test file path.\n\nYou will be provided with current tests, fixtures and page object models for you to use and update code as per the task\nprovided to you. You need to respond with file path and updated code block inside the file.\n\nBefore responding you need to ensure that the code change is minimal and the change is reusable across tests. You need\nto ensure the code follows DRY principle.\n\nHere is the list of current tests and fixtures:\n\n{{testFiles}}\n\n\nHere is the list of current page object models:\n\n{{pageFiles}}\n\n{{/section}}\n\n{{#section \"user\"}}\nFollowing are the test details and the task to complete your objective:\nTest name:\n{{scenarioName}}\n\nTest case:\n{{currentScenarioCodeBlock}}\n\nTask:\n{{scenarioSteps}}\n\nIn order to execute the task:\n- Think step by step and first identify current test block which needs update and the methods which the current test\nblock depend on.\n- The task will demand changes in the current test case or the methods it depend on. Based on the task, identify list of\nfile paths which need change, the reason for change and the code change they need.\n- Next make changes to code blocks in each file paths.\n\n------\n\nFollow these instructions before responding with output:\n- Read the code line by line and ensure that achieve the task provided to you\n- Read the dependencies of the code block by scanning through file paths and file provided to you. refer the same file\npath while responding with output.\n- Focus only on the test case provided and associated JS methods called from the test case.\n- Since the response will be used to search and replace blocks, always respond with output which includes the full\nlexical scope surrounding the modified code.\n- If there are any updates inside test code block, ensure responding with full test block with unchanged code as well\n- Each code block should contain edits to only one code block in file path\n- DO NOT respond with any backticks or markdown syntax\n- Respond only with file path where the code block to be updated is present, old code block, new code block and a one\nliner reason for the change\n- Respond with <file_path></file_path>, <old_code_block></old_code_block>, <new_code_block></new_code_block> and\n<change></change> as xml tags\n- The reason for change should adhere to coding principles provided and review if the updated code is present in the\nfile path mentioned\n- The code change should belong to the right file path\n- The response must start with <file_path>\n {{/section}}";
|
|
15
18
|
const session_1 = require("../../session");
|
|
16
19
|
const test_update_feedback_1 = require("./test-update-feedback");
|
|
17
20
|
const utils_1 = require("./utils");
|
|
@@ -31,8 +34,7 @@ async function getUpdateTestCodeCompletion({ testCase, testFileContent, testFile
|
|
|
31
34
|
content: testFileContent,
|
|
32
35
|
suites: testCase?.suites || [],
|
|
33
36
|
});
|
|
34
|
-
const
|
|
35
|
-
const prompt = await (0, llm_1.getPrompt)(promptName, {
|
|
37
|
+
const prompt = (0, lib_1.compilePrompt)(promptTemplate_1, {
|
|
36
38
|
testFiles: testFiles,
|
|
37
39
|
pageFiles: pageFiles,
|
|
38
40
|
scenarioName,
|
|
@@ -174,17 +176,16 @@ async function getAppendCreateTestBlockCompletion({ testFiles, pageFiles, testCa
|
|
|
174
176
|
testFilePath,
|
|
175
177
|
},
|
|
176
178
|
});
|
|
177
|
-
const promptName = "append-create-test-block";
|
|
178
179
|
const promptSpan = trace?.span({
|
|
179
180
|
name: "append-create-test-block-prompt",
|
|
180
181
|
});
|
|
181
|
-
const instruction =
|
|
182
|
+
const instruction = (0, lib_1.compilePrompt)(promptTemplate_0, {
|
|
182
183
|
testFiles: testFiles,
|
|
183
184
|
pageFiles: pageFiles,
|
|
184
185
|
scenarioName: testCase.name,
|
|
185
186
|
scenarioSteps: testCase.steps.join("\n"),
|
|
186
187
|
scenarioFile: testFilePath,
|
|
187
|
-
}
|
|
188
|
+
});
|
|
188
189
|
promptSpan?.end({ output: { instruction } });
|
|
189
190
|
const [userInstruction] = instruction.filter((s) => s.role === "user");
|
|
190
191
|
const [systemInstruction] = instruction.filter((s) => s.role === "system");
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"use-skill.d.ts","sourceRoot":"","sources":["../../../src/agent/codegen/use-skill.ts"],"names":[],"mappings":"AAAA,OAAO,
|
|
1
|
+
{"version":3,"file":"use-skill.d.ts","sourceRoot":"","sources":["../../../src/agent/codegen/use-skill.ts"],"names":[],"mappings":"AAAA,OAAO,EAAO,WAAW,EAAE,MAAM,mBAAmB,CAAC;AAYrD,wBAAsB,sBAAsB,CAAC,EAC3C,IAAI,EACJ,iBAAiB,EACjB,oBAAoB,EACpB,gBAAgB,EAChB,qBAAqB,EACrB,KAAK,GACN,EAAE;IACD,IAAI,EAAE,MAAM,CAAC;IACb,iBAAiB,EAAE,MAAM,CAAC;IAC1B,oBAAoB,EAAE,MAAM,CAAC;IAC7B,gBAAgB,EAAE,MAAM,CAAC;IACzB,qBAAqB,EAAE,MAAM,CAAC;IAC9B,KAAK,CAAC,EAAE,WAAW,CAAC;CACrB,mBA6CA"}
|
|
@@ -4,6 +4,8 @@ exports.generateSkillUsageCode = void 0;
|
|
|
4
4
|
const llm_1 = require("@empiricalrun/llm");
|
|
5
5
|
const logger_1 = require("../../bin/logger");
|
|
6
6
|
const constants_1 = require("../../constants");
|
|
7
|
+
const promptTemplate_0 = "{{#section \"system\"}}\nYou are given a sample method usage example, task and object variables available in the scope.\nBased on the task and object variables you need to generate the correct method call to be used in the scope.\n\nYou will also be provided with method definition and interface. Based on the definition you need to ensure creating\nvariables if the method has return value.\n\nFor page object use variable \"{{pageVariableName}}\"\n\n{{/section}}\n\n{{#section \"user\"}}\nTask:\n{{task}}\n\nMethod usage example:\n{{sampleUsageMethod}}\n------\n\nMethod definition:\n\n{{skillMethodDefinition}}\n\n------\nVariables object:\n{{scopeVariablesMapStr}}\n\nVariables object contain variable name and respective value available in scope.\n\nBefore responding ensure following the instructions:\n- Based on the task, write code using the method to achieve the task. If there are any return value of any methods,\ncreate `const` variables and assign the return values. The variable names should be inspired by the task assigned to you\n- Follow DRY principles and all good practices for typescript\n- Extract object properties and rename them if the return values is an object\n- Do not respond with any backticks or markdown syntax\n\n{{/section}}";
|
|
8
|
+
const lib_1 = require("../../prompts/lib");
|
|
7
9
|
async function generateSkillUsageCode({ task, sampleUsageMethod, scopeVariablesMapStr, pageVariableName, skillMethodDefinition, trace, }) {
|
|
8
10
|
const logger = new logger_1.CustomLogger();
|
|
9
11
|
logger.log(`Generating code using skill usage example: ${sampleUsageMethod}`);
|
|
@@ -19,13 +21,13 @@ async function generateSkillUsageCode({ task, sampleUsageMethod, scopeVariablesM
|
|
|
19
21
|
const promptSpan = skillUsageSpan?.span({
|
|
20
22
|
name: "apply-skills-prompt",
|
|
21
23
|
});
|
|
22
|
-
const prompt =
|
|
24
|
+
const prompt = (0, lib_1.compilePrompt)(promptTemplate_0, {
|
|
23
25
|
task,
|
|
24
26
|
sampleUsageMethod,
|
|
25
27
|
scopeVariablesMapStr,
|
|
26
28
|
pageVariableName,
|
|
27
29
|
skillMethodDefinition,
|
|
28
|
-
}
|
|
30
|
+
});
|
|
29
31
|
promptSpan?.end({ output: prompt });
|
|
30
32
|
const llm = new llm_1.LLM({
|
|
31
33
|
trace: skillUsageSpan,
|
|
@@ -1,14 +1,12 @@
|
|
|
1
1
|
import { TraceClient } from "@empiricalrun/llm";
|
|
2
|
-
|
|
2
|
+
export declare const enrichPromptWithFailingLine: ({ trace, testBlock, testFilePath, suggestionForFix, }: {
|
|
3
3
|
testBlock: string;
|
|
4
4
|
testFilePath: string;
|
|
5
5
|
suggestionForFix: string;
|
|
6
|
-
trace?: TraceClient;
|
|
7
|
-
}
|
|
8
|
-
export declare const enrichPromptWithFailingLine: ({ trace, testBlock, testFilePath, suggestionForFix, }: ArgsT) => Promise<{
|
|
6
|
+
trace?: TraceClient | undefined;
|
|
7
|
+
}) => Promise<{
|
|
9
8
|
output: string;
|
|
10
9
|
is_user_message_enriched: boolean;
|
|
11
10
|
reason_for_output: string;
|
|
12
11
|
}>;
|
|
13
|
-
export {};
|
|
14
12
|
//# sourceMappingURL=index.d.ts.map
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../../src/agent/enrich-prompt/index.ts"],"names":[],"mappings":"AAAA,OAAO,
|
|
1
|
+
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../../src/agent/enrich-prompt/index.ts"],"names":[],"mappings":"AAAA,OAAO,EAAO,WAAW,EAAE,MAAM,mBAAmB,CAAC;AAoCrD,eAAO,MAAM,2BAA2B;eAM3B,MAAM;kBACH,MAAM;sBACF,MAAM;;;YAGhB,MAAM;8BACY,OAAO;uBACd,MAAM;EA0D1B,CAAC"}
|
|
@@ -3,8 +3,9 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
|
|
3
3
|
exports.enrichPromptWithFailingLine = void 0;
|
|
4
4
|
const llm_1 = require("@empiricalrun/llm");
|
|
5
5
|
const constants_1 = require("../../constants");
|
|
6
|
+
const promptTemplate_0 = "{{#section \"system\"}}\nYou are a software QA engineer who is asked to update an automated browser test written that failed during a test run.\n\nYou will be provided with the details of the failure and then a user prompt that details what needs to be changed.\n\n# Details of the failure\n### Failing line:\n{{testBlock}}\n\n### Failing test file path:\n{{testFilePath}}\n\nYour goal is to enrich the user message with \"Failing line\" and \"Failing test file path\", if the user's message contains\nrequests like correcting or replacing failing lines or statements.\n\nTo fulfil your goal, follow these steps:\n1. Identify the user intent and check if the user says anything similar to:\n- \"Replace the failing line\"\n- \"Replace the error statement\"\n- \"Replace the failing statement\"\n- \"Replace the failed user action\"\n2. The user message intent can also be to modify things in or around the \"failing line\". Lookout for this scenario as\nwell to enrich the user message\n3. If such keywords are present, only then enrich the user message by inserting \"Failing line\" and \"Failing test file\npath\" at appropriate locations.\n\nBefore submitting your response, ensure the following:\n- Do not generate code for the user prompt. Your task is just to enrich the user message\n- Enrich the user message ONLY if any of these keywords are present - \"failing line\", \"error statement\", \"failing\nstatement\", \"failed user action\"\n- Do not include any markdown syntax or backticks\n- Respond with <reason_for_output></reason_for_output>, <is_user_message_enriched></is_user_message_enriched> and\n<output></output> as xml tags\n- is_user_message_enriched can be either true or false\n- output should follow the format: \"Replace the line <code> with <whatever is required to be done> in <file_path>\"\n{{/section}}\n\n{{#section \"user\"}}\nUser message:\n{{userMessage}}\n{{/section}}";
|
|
7
|
+
const lib_1 = require("../../prompts/lib");
|
|
6
8
|
const utils_1 = require("./utils");
|
|
7
|
-
const promptName = "generate-self-heal-requested-change";
|
|
8
9
|
const responseFormat = {
|
|
9
10
|
type: "json_schema",
|
|
10
11
|
json_schema: {
|
|
@@ -31,7 +32,6 @@ const responseFormat = {
|
|
|
31
32
|
},
|
|
32
33
|
},
|
|
33
34
|
};
|
|
34
|
-
// TODO: fix the format
|
|
35
35
|
const enrichPromptWithFailingLine = async ({ trace, testBlock, testFilePath, suggestionForFix, }) => {
|
|
36
36
|
let output = {
|
|
37
37
|
output: suggestionForFix,
|
|
@@ -47,20 +47,10 @@ const enrichPromptWithFailingLine = async ({ trace, testBlock, testFilePath, sug
|
|
|
47
47
|
suggestionForFix,
|
|
48
48
|
},
|
|
49
49
|
});
|
|
50
|
-
const instructions =
|
|
50
|
+
const instructions = (0, lib_1.compilePrompt)(promptTemplate_0, {
|
|
51
51
|
testBlock,
|
|
52
52
|
testFilePath,
|
|
53
53
|
userMessage: suggestionForFix,
|
|
54
|
-
}, 26);
|
|
55
|
-
enrichedPromptSpan?.event({
|
|
56
|
-
name: "get prompt",
|
|
57
|
-
input: {
|
|
58
|
-
promptName,
|
|
59
|
-
testBlock,
|
|
60
|
-
testFilePath,
|
|
61
|
-
userMessage: suggestionForFix,
|
|
62
|
-
},
|
|
63
|
-
output: { instructions },
|
|
64
54
|
});
|
|
65
55
|
const llm = new llm_1.LLM({
|
|
66
56
|
trace,
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@empiricalrun/test-gen",
|
|
3
|
-
"version": "0.42.
|
|
3
|
+
"version": "0.42.23",
|
|
4
4
|
"publishConfig": {
|
|
5
5
|
"registry": "https://registry.npmjs.org/",
|
|
6
6
|
"access": "public"
|
|
@@ -63,7 +63,7 @@
|
|
|
63
63
|
"mime": "^4.0.4",
|
|
64
64
|
"minimatch": "^10.0.1",
|
|
65
65
|
"nanoid": "^5.0.7",
|
|
66
|
-
"openai": "4.
|
|
66
|
+
"openai": "4.67.0",
|
|
67
67
|
"picocolors": "^1.0.1",
|
|
68
68
|
"prettier": "^3.2.5",
|
|
69
69
|
"remove-markdown": "^0.5.5",
|
|
@@ -72,9 +72,9 @@
|
|
|
72
72
|
"ts-morph": "^23.0.0",
|
|
73
73
|
"tsx": "^4.16.2",
|
|
74
74
|
"typescript": "^5.3.3",
|
|
75
|
-
"@empiricalrun/llm": "^0.9.
|
|
76
|
-
"@empiricalrun/
|
|
77
|
-
"@empiricalrun/
|
|
75
|
+
"@empiricalrun/llm": "^0.9.33",
|
|
76
|
+
"@empiricalrun/r2-uploader": "^0.3.8",
|
|
77
|
+
"@empiricalrun/reporter": "^0.23.1"
|
|
78
78
|
},
|
|
79
79
|
"devDependencies": {
|
|
80
80
|
"@playwright/test": "1.47.1",
|
|
@@ -87,7 +87,7 @@
|
|
|
87
87
|
"js-levenshtein": "^1.1.6",
|
|
88
88
|
"playwright": "1.47.1",
|
|
89
89
|
"ts-patch": "^3.3.0",
|
|
90
|
-
"@empiricalrun/shared-types": "0.0.
|
|
90
|
+
"@empiricalrun/shared-types": "0.0.2"
|
|
91
91
|
},
|
|
92
92
|
"scripts": {
|
|
93
93
|
"dev": "tspc --build --watch",
|