@qa-gentic/agents 1.1.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +203 -0
- package/bin/postinstall.js +75 -0
- package/bin/qa-stlc.js +76 -0
- package/package.json +48 -0
- package/skills/qa-stlc/AGENT-BEHAVIOR.md +373 -0
- package/skills/qa-stlc/deduplication-protocol.md +303 -0
- package/skills/qa-stlc/generate-gherkin.md +550 -0
- package/skills/qa-stlc/generate-playwright-code.md +439 -0
- package/skills/qa-stlc/generate-test-cases.md +176 -0
- package/skills/qa-stlc/write-helix-files.md +349 -0
- package/src/cmd-init.js +84 -0
- package/src/cmd-mcp-config.js +177 -0
- package/src/cmd-skills.js +124 -0
- package/src/cmd-verify.js +129 -0
- package/src/qa_stlc_agents/__init__.py +0 -0
- package/src/qa_stlc_agents/__pycache__/__init__.cpython-310.pyc +0 -0
- package/src/qa_stlc_agents/agent_gherkin_generator/__init__.py +0 -0
- package/src/qa_stlc_agents/agent_gherkin_generator/__pycache__/__init__.cpython-310.pyc +0 -0
- package/src/qa_stlc_agents/agent_gherkin_generator/__pycache__/server.cpython-310.pyc +0 -0
- package/src/qa_stlc_agents/agent_gherkin_generator/server.py +502 -0
- package/src/qa_stlc_agents/agent_gherkin_generator/tools/__init__.py +0 -0
- package/src/qa_stlc_agents/agent_gherkin_generator/tools/__pycache__/__init__.cpython-310.pyc +0 -0
- package/src/qa_stlc_agents/agent_gherkin_generator/tools/__pycache__/ado_gherkin.cpython-310.pyc +0 -0
- package/src/qa_stlc_agents/agent_gherkin_generator/tools/ado_gherkin.py +854 -0
- package/src/qa_stlc_agents/agent_helix_writer/__init__.py +0 -0
- package/src/qa_stlc_agents/agent_helix_writer/__pycache__/__init__.cpython-310.pyc +0 -0
- package/src/qa_stlc_agents/agent_helix_writer/__pycache__/server.cpython-310.pyc +0 -0
- package/src/qa_stlc_agents/agent_helix_writer/server.py +529 -0
- package/src/qa_stlc_agents/agent_helix_writer/tools/__init__.py +0 -0
- package/src/qa_stlc_agents/agent_helix_writer/tools/__pycache__/__init__.cpython-310.pyc +0 -0
- package/src/qa_stlc_agents/agent_helix_writer/tools/__pycache__/helix_write.cpython-310.pyc +0 -0
- package/src/qa_stlc_agents/agent_helix_writer/tools/helix_write.py +622 -0
- package/src/qa_stlc_agents/agent_playwright_generator/__init__.py +0 -0
- package/src/qa_stlc_agents/agent_playwright_generator/__pycache__/__init__.cpython-310.pyc +0 -0
- package/src/qa_stlc_agents/agent_playwright_generator/__pycache__/server.cpython-310.pyc +0 -0
- package/src/qa_stlc_agents/agent_playwright_generator/server.py +2771 -0
- package/src/qa_stlc_agents/agent_playwright_generator/tools/__init__.py +0 -0
- package/src/qa_stlc_agents/agent_playwright_generator/tools/__pycache__/__init__.cpython-310.pyc +0 -0
- package/src/qa_stlc_agents/agent_playwright_generator/tools/__pycache__/ado_attach.cpython-310.pyc +0 -0
- package/src/qa_stlc_agents/agent_playwright_generator/tools/ado_attach.py +62 -0
- package/src/qa_stlc_agents/agent_test_case_manager/__init__.py +0 -0
- package/src/qa_stlc_agents/agent_test_case_manager/__pycache__/__init__.cpython-310.pyc +0 -0
- package/src/qa_stlc_agents/agent_test_case_manager/__pycache__/server.cpython-310.pyc +0 -0
- package/src/qa_stlc_agents/agent_test_case_manager/server.py +483 -0
- package/src/qa_stlc_agents/agent_test_case_manager/tools/__init__.py +0 -0
- package/src/qa_stlc_agents/agent_test_case_manager/tools/__pycache__/__init__.cpython-310.pyc +0 -0
- package/src/qa_stlc_agents/agent_test_case_manager/tools/__pycache__/ado_workitem.cpython-310.pyc +0 -0
- package/src/qa_stlc_agents/agent_test_case_manager/tools/ado_workitem.py +302 -0
- package/src/qa_stlc_agents/shared/__init__.py +0 -0
- package/src/qa_stlc_agents/shared/__pycache__/__init__.cpython-310.pyc +0 -0
- package/src/qa_stlc_agents/shared/__pycache__/auth.cpython-310.pyc +0 -0
- package/src/qa_stlc_agents/shared/auth.py +119 -0
|
@@ -0,0 +1,2771 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Agent 3: QA Playwright Code Generator — MCP Server
|
|
3
|
+
|
|
4
|
+
Browser interaction at generation time is handled by the external Playwright MCP
|
|
5
|
+
server (github.com/microsoft/playwright-mcp). Start it before running workflows:
|
|
6
|
+
npx @playwright/mcp@latest --port 8931
|
|
7
|
+
|
|
8
|
+
The coding agent uses Playwright MCP tools (browser_navigate, browser_snapshot,
|
|
9
|
+
browser_fill, browser_click, browser_wait_for_url) to build a verified context_map
|
|
10
|
+
of locators from the live accessibility tree, then passes that map to
|
|
11
|
+
generate_playwright_code.
|
|
12
|
+
|
|
13
|
+
Three layers of self-healing at TEST RUNTIME:
|
|
14
|
+
1. Locator Healing — selector chain + AI Vision
|
|
15
|
+
2. Timing Healing — network trace drift detection → auto-adjusted timeouts
|
|
16
|
+
3. Visual Regression — element screenshot diff against approved baseline
|
|
17
|
+
|
|
18
|
+
DevToolsHealer (Layer 4) runs at test runtime via Playwright's built-in CDPSession —
|
|
19
|
+
no external process or port required.
|
|
20
|
+
|
|
21
|
+
CI/CD Telemetry:
|
|
22
|
+
HealingDashboard — HTTP server on :7890, engineers review/approve AI suggestions
|
|
23
|
+
before they are permanently committed to the repository.
|
|
24
|
+
"""
|
|
25
|
+
from __future__ import annotations
|
|
26
|
+
import asyncio, json, re, sys, os
|
|
27
|
+
from pathlib import Path
|
|
28
|
+
from dotenv import load_dotenv
|
|
29
|
+
from mcp.server import Server
|
|
30
|
+
from mcp.server.stdio import stdio_server
|
|
31
|
+
from mcp import types
|
|
32
|
+
from qa_stlc_agents.shared.auth import get_auth_headers, get_signed_in_user
|
|
33
|
+
from qa_stlc_agents.agent_playwright_generator.tools.ado_attach import attach_file_to_work_item as _attach_file
|
|
34
|
+
|
|
35
|
+
load_dotenv()
|
|
36
|
+
app = Server("qa-playwright-generator")
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
# ---------------------------------------------------------------------------
|
|
40
|
+
# Pre-output validation helpers
|
|
41
|
+
# ---------------------------------------------------------------------------
|
|
42
|
+
|
|
43
|
+
def _validate_scaffold_output(result: dict) -> dict:
|
|
44
|
+
"""Validate scaffold_locator_repository output before returning to user.
|
|
45
|
+
|
|
46
|
+
Checks:
|
|
47
|
+
1. All expected infrastructure files are present.
|
|
48
|
+
2. File contents are non-empty.
|
|
49
|
+
3. Basic TS syntax (brace/paren balance) for each file.
|
|
50
|
+
|
|
51
|
+
Returns { valid: bool, errors: list[str], warnings: list[str] }.
|
|
52
|
+
"""
|
|
53
|
+
errors: list[str] = []
|
|
54
|
+
warnings: list[str] = []
|
|
55
|
+
|
|
56
|
+
files = result.get("files", {})
|
|
57
|
+
if not files:
|
|
58
|
+
errors.append("scaffold_locator_repository produced no files.")
|
|
59
|
+
return {"valid": False, "errors": errors, "warnings": warnings}
|
|
60
|
+
|
|
61
|
+
required_bases = {"LocatorHealer.ts", "LocatorRepository.ts"}
|
|
62
|
+
found_bases = {Path(k).name for k in files}
|
|
63
|
+
missing = required_bases - found_bases
|
|
64
|
+
if missing:
|
|
65
|
+
errors.append(f"Missing required infrastructure files: {sorted(missing)}")
|
|
66
|
+
|
|
67
|
+
for file_key, content in files.items():
|
|
68
|
+
if not content or not content.strip():
|
|
69
|
+
errors.append(f"{file_key}: file content is empty.")
|
|
70
|
+
continue
|
|
71
|
+
# Brace/paren balance
|
|
72
|
+
s = re.sub(r'`[^`]*`', '``', content)
|
|
73
|
+
s = re.sub(r'"[^"\n]*"', '""', s)
|
|
74
|
+
s = re.sub(r"'[^'\n]*'", "''", s)
|
|
75
|
+
s = re.sub(r'//[^\n]*', '', s)
|
|
76
|
+
s = re.sub(r'/\*.*?\*/', '', s, flags=re.DOTALL)
|
|
77
|
+
brace_delta = s.count('{') - s.count('}')
|
|
78
|
+
if brace_delta != 0:
|
|
79
|
+
errors.append(f"{file_key}: unbalanced braces (delta={brace_delta:+d})")
|
|
80
|
+
paren_delta = s.count('(') - s.count(')')
|
|
81
|
+
if paren_delta != 0:
|
|
82
|
+
errors.append(f"{file_key}: unbalanced parentheses (delta={paren_delta:+d})")
|
|
83
|
+
|
|
84
|
+
return {"valid": len(errors) == 0, "errors": errors, "warnings": warnings}
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
def _validate_attach_inputs(files: list[dict]) -> dict:
|
|
88
|
+
"""Validate file payloads before attaching to ADO.
|
|
89
|
+
|
|
90
|
+
Checks:
|
|
91
|
+
1. Files list is non-empty.
|
|
92
|
+
2. Every file has a non-empty file_name and content.
|
|
93
|
+
3. No duplicate file names.
|
|
94
|
+
4. File names use safe characters.
|
|
95
|
+
|
|
96
|
+
Returns { valid: bool, errors: list[str], warnings: list[str] }.
|
|
97
|
+
"""
|
|
98
|
+
errors: list[str] = []
|
|
99
|
+
warnings: list[str] = []
|
|
100
|
+
|
|
101
|
+
if not files:
|
|
102
|
+
errors.append("No files to attach. Provide at least one file.")
|
|
103
|
+
return {"valid": False, "errors": errors, "warnings": warnings}
|
|
104
|
+
|
|
105
|
+
seen_names: set[str] = set()
|
|
106
|
+
for i, f in enumerate(files, start=1):
|
|
107
|
+
name = (f.get("file_name") or "").strip()
|
|
108
|
+
content = (f.get("content") or "").strip()
|
|
109
|
+
|
|
110
|
+
if not name:
|
|
111
|
+
errors.append(f"File #{i}: file_name is empty or missing.")
|
|
112
|
+
elif name in seen_names:
|
|
113
|
+
errors.append(f"File #{i}: duplicate file_name '{name}'.")
|
|
114
|
+
else:
|
|
115
|
+
seen_names.add(name)
|
|
116
|
+
|
|
117
|
+
if not content:
|
|
118
|
+
errors.append(f"File #{i} ('{name}'): content is empty.")
|
|
119
|
+
|
|
120
|
+
# Check for unsafe characters in filename
|
|
121
|
+
if name and re.search(r'[<>:"|?*\\]', name):
|
|
122
|
+
warnings.append(
|
|
123
|
+
f"File #{i} ('{name}'): file name contains characters that may be "
|
|
124
|
+
f"problematic on some file systems."
|
|
125
|
+
)
|
|
126
|
+
|
|
127
|
+
return {"valid": len(errors) == 0, "errors": errors, "warnings": warnings}
|
|
128
|
+
|
|
129
|
+
@app.list_tools()
|
|
130
|
+
async def list_tools() -> list[types.Tool]:
|
|
131
|
+
return [
|
|
132
|
+
types.Tool(
|
|
133
|
+
name="generate_playwright_code",
|
|
134
|
+
description=(
|
|
135
|
+
"Generate complete Playwright TypeScript code from a Gherkin .feature file. "
|
|
136
|
+
"Produces: locators.ts (selector+intent+stability+visualIntent), "
|
|
137
|
+
"{Page}Page.ts (LocatorHealer + TimingHealer + VisualIntentChecker), "
|
|
138
|
+
"{feature}.steps.ts, {feature}.feature, cucumber-profile.js. "
|
|
139
|
+
"All actions go through LocatorHealer (never raw page.click/fill). "
|
|
140
|
+
"TimingHealer intercepts network traces per action. "
|
|
141
|
+
"VisualIntentChecker screenshots elements at key assertions. "
|
|
142
|
+
"Pass context_map from Playwright MCP browser_snapshot calls to use "
|
|
143
|
+
"accessibility-tree-verified selectors instead of Gherkin inference. "
|
|
144
|
+
"Pass helix_project_root to auto-merge with existing Helix-QA files "
|
|
145
|
+
"(no overwrites, deduplication, conflict renaming)."
|
|
146
|
+
),
|
|
147
|
+
inputSchema={
|
|
148
|
+
"type": "object",
|
|
149
|
+
"properties": {
|
|
150
|
+
"gherkin_content": {"type": "string"},
|
|
151
|
+
"page_class_name": {"type": "string"},
|
|
152
|
+
"app_name": {"type": "string"},
|
|
153
|
+
"healing_strategy": {
|
|
154
|
+
"type": "string",
|
|
155
|
+
"enum": ["role-label-text-ai", "role-text-ai", "ai-first"],
|
|
156
|
+
},
|
|
157
|
+
"auth_hook": {
|
|
158
|
+
"type": "string",
|
|
159
|
+
"enum": ["microsoft-sso", "azure-keyvault-mfa", "none"],
|
|
160
|
+
},
|
|
161
|
+
"enable_visual_regression": {"type": "boolean"},
|
|
162
|
+
"enable_timing_healing": {"type": "boolean"},
|
|
163
|
+
"context_map": {
|
|
164
|
+
"type": "object",
|
|
165
|
+
"description": (
|
|
166
|
+
"Verified locator map built from Playwright MCP browser_snapshot calls. "
|
|
167
|
+
"Each key maps to { selector, intent, stability, visualIntent? }. "
|
|
168
|
+
"When provided, overrides Gherkin-inferred selectors. "
|
|
169
|
+
"Build by: browser_navigate → browser_snapshot → map AX nodes to entries "
|
|
170
|
+
"using stability rank: data-testid(100) > aria-role+name(90) > id(80) "
|
|
171
|
+
"> aria-label(70) > placeholder(60)."
|
|
172
|
+
),
|
|
173
|
+
},
|
|
174
|
+
"helix_project_root": {
|
|
175
|
+
"type": "string",
|
|
176
|
+
"description": (
|
|
177
|
+
"Optional path to Helix-QA project root. When provided, generated code "
|
|
178
|
+
"is merged with existing files (append-only, deduplication, conflict renaming). "
|
|
179
|
+
"No overwrites ever occur. Locators with naming conflicts get unique names. "
|
|
180
|
+
"Pass this to enable seamless integration with your automation framework."
|
|
181
|
+
),
|
|
182
|
+
},
|
|
183
|
+
},
|
|
184
|
+
"required": ["gherkin_content", "page_class_name"],
|
|
185
|
+
},
|
|
186
|
+
),
|
|
187
|
+
types.Tool(
|
|
188
|
+
name="scaffold_locator_repository",
|
|
189
|
+
description=(
|
|
190
|
+
"Generate the complete self-healing infrastructure. Six files: "
|
|
191
|
+
"LocatorHealer.ts, LocatorRepository.ts, "
|
|
192
|
+
"TimingHealer.ts (network trace timing healing — observes API response times, "
|
|
193
|
+
"auto-adjusts timeouts when endpoints drift beyond 20% of baseline), "
|
|
194
|
+
"VisualIntentChecker.ts (screenshot-based visual regression at key assertions — "
|
|
195
|
+
"detects elements that are found but look wrong), "
|
|
196
|
+
"DevToolsHealer.ts (CDP healing via Playwright CDPSession at test runtime — "
|
|
197
|
+
"AX tree name match + bounding box spatial search, no external process needed), "
|
|
198
|
+
"HealingDashboard.ts (HTTP server on :7890 — shows all pending AI suggestions "
|
|
199
|
+
"from all three layers; engineers approve/reject before changes are committed). "
|
|
200
|
+
"Call once per project."
|
|
201
|
+
),
|
|
202
|
+
inputSchema={
|
|
203
|
+
"type": "object",
|
|
204
|
+
"properties": {
|
|
205
|
+
"output_dir": {"type": "string"},
|
|
206
|
+
"enable_ai_vision": {"type": "boolean"},
|
|
207
|
+
"repository_path": {"type": "string"},
|
|
208
|
+
"dashboard_port": {"type": "integer"},
|
|
209
|
+
"enable_timing_healing": {"type": "boolean"},
|
|
210
|
+
"enable_visual_regression": {"type": "boolean"},
|
|
211
|
+
"enable_devtools_healer": {"type": "boolean"},
|
|
212
|
+
"ai_healing_provider": {
|
|
213
|
+
"type": "string",
|
|
214
|
+
"enum": ["anthropic", "copilot", "claude-code"],
|
|
215
|
+
"description": (
|
|
216
|
+
"AI Vision provider for strategy 6 healing. "
|
|
217
|
+
"anthropic: direct Anthropic API (requires AI_HEALING_API_KEY env var). "
|
|
218
|
+
"copilot: GitHub Copilot chat completions (uses GITHUB_TOKEN from Copilot extension or gh auth login). "
|
|
219
|
+
"claude-code: Claude Code local proxy (uses ANTHROPIC_API_KEY set automatically by the claude CLI session). "
|
|
220
|
+
"Can also be overridden at runtime via the AI_HEALING_PROVIDER env var without regenerating files. "
|
|
221
|
+
"Default: anthropic."
|
|
222
|
+
),
|
|
223
|
+
},
|
|
224
|
+
},
|
|
225
|
+
"required": [],
|
|
226
|
+
},
|
|
227
|
+
),
|
|
228
|
+
types.Tool(
|
|
229
|
+
name="attach_code_to_work_item",
|
|
230
|
+
description=(
|
|
231
|
+
"Attach generated TypeScript files to an ADO work item (Feature, PBI, or Bug). "
|
|
232
|
+
"NEVER pass an Epic ID — attachments on Epics are invisible in normal ADO workflow "
|
|
233
|
+
"views. If you have an Epic ID, use qa-gherkin-generator:fetch_epic_hierarchy "
|
|
234
|
+
"to walk its child Features and PBIs/Bugs, then call this tool once per Feature "
|
|
235
|
+
"or PBI/Bug work item ID."
|
|
236
|
+
),
|
|
237
|
+
inputSchema={
|
|
238
|
+
"type": "object",
|
|
239
|
+
"properties": {
|
|
240
|
+
"organization_url": {"type": "string"},
|
|
241
|
+
"project_name": {"type": "string"},
|
|
242
|
+
"work_item_id": {"type": "integer"},
|
|
243
|
+
"files": {
|
|
244
|
+
"type": "array",
|
|
245
|
+
"items": {
|
|
246
|
+
"type": "object",
|
|
247
|
+
"properties": {
|
|
248
|
+
"file_name": {"type": "string"},
|
|
249
|
+
"content": {"type": "string"},
|
|
250
|
+
},
|
|
251
|
+
"required": ["file_name", "content"],
|
|
252
|
+
},
|
|
253
|
+
},
|
|
254
|
+
},
|
|
255
|
+
"required": ["organization_url", "project_name", "work_item_id", "files"],
|
|
256
|
+
},
|
|
257
|
+
),
|
|
258
|
+
types.Tool(
|
|
259
|
+
name="validate_gherkin_steps",
|
|
260
|
+
description="Parse a .feature file and return all unique step strings grouped by keyword.",
|
|
261
|
+
inputSchema={
|
|
262
|
+
"type": "object",
|
|
263
|
+
"properties": {"gherkin_content": {"type": "string"}},
|
|
264
|
+
"required": ["gherkin_content"],
|
|
265
|
+
},
|
|
266
|
+
),
|
|
267
|
+
types.Tool(
|
|
268
|
+
name="pre_validate_cucumber_steps",
|
|
269
|
+
description=(
|
|
270
|
+
"Validate Cucumber step syntax in TypeScript step-definition files BEFORE passing "
|
|
271
|
+
"them to write_helix_files. Catches issues that cause the parenthesis-balance "
|
|
272
|
+
"validator to fail and blocks write_helix_files submissions containing bad patterns.\n\n"
|
|
273
|
+
"Checks performed:\n"
|
|
274
|
+
" 1. Raw regex step patterns (/^…$/) with unescaped capture groups — these "
|
|
275
|
+
"unbalance the paren counter. Prescribes Cucumber expression replacements.\n"
|
|
276
|
+
" 2. Step patterns that mix Cucumber expressions and regex syntax.\n"
|
|
277
|
+
" 3. Missing closing parenthesis or brace in step callback functions.\n"
|
|
278
|
+
" 4. Duplicate step patterns within the same file.\n"
|
|
279
|
+
" 5. Gherkin feature steps that have no matching step definition (unmatched steps).\n\n"
|
|
280
|
+
"Returns { valid: bool, errors: list, warnings: list, suggestions: list } "
|
|
281
|
+
"where suggestions provide ready-to-paste Cucumber expression replacements for "
|
|
282
|
+
"every flagged raw-regex pattern. Fix all errors before calling write_helix_files."
|
|
283
|
+
),
|
|
284
|
+
inputSchema={
|
|
285
|
+
"type": "object",
|
|
286
|
+
"properties": {
|
|
287
|
+
"steps_ts_content": {
|
|
288
|
+
"type": "string",
|
|
289
|
+
"description": (
|
|
290
|
+
"Content of the generated *.steps.ts file to validate. "
|
|
291
|
+
"Pass the value from generate_playwright_code['files']['<feature>.steps.ts']."
|
|
292
|
+
),
|
|
293
|
+
},
|
|
294
|
+
"gherkin_content": {
|
|
295
|
+
"type": "string",
|
|
296
|
+
"description": (
|
|
297
|
+
"Optional: content of the .feature file. "
|
|
298
|
+
"When provided, enables unmatched-step detection (check 5)."
|
|
299
|
+
),
|
|
300
|
+
},
|
|
301
|
+
},
|
|
302
|
+
"required": ["steps_ts_content"],
|
|
303
|
+
},
|
|
304
|
+
),
|
|
305
|
+
]
|
|
306
|
+
|
|
307
|
+
|
|
308
|
+
@app.call_tool()
|
|
309
|
+
async def call_tool(name: str, arguments: dict) -> list[types.TextContent]:
|
|
310
|
+
try:
|
|
311
|
+
if name == "generate_playwright_code":
|
|
312
|
+
result = await asyncio.to_thread(
|
|
313
|
+
_generate_playwright_code,
|
|
314
|
+
arguments["gherkin_content"],
|
|
315
|
+
arguments["page_class_name"],
|
|
316
|
+
arguments.get("app_name", "app"),
|
|
317
|
+
arguments.get("healing_strategy", "role-label-text-ai"),
|
|
318
|
+
arguments.get("auth_hook", "microsoft-sso"),
|
|
319
|
+
arguments.get("enable_visual_regression", True),
|
|
320
|
+
arguments.get("enable_timing_healing", True),
|
|
321
|
+
arguments.get("context_map"),
|
|
322
|
+
arguments.get("helix_project_root"),
|
|
323
|
+
)
|
|
324
|
+
elif name == "scaffold_locator_repository":
|
|
325
|
+
result = await asyncio.to_thread(
|
|
326
|
+
_scaffold_locator_repository,
|
|
327
|
+
arguments.get("output_dir", "src/utils/locators"),
|
|
328
|
+
arguments.get("enable_ai_vision", True),
|
|
329
|
+
arguments.get("repository_path", "test-results/locator-repository.json"),
|
|
330
|
+
arguments.get("dashboard_port", 7890),
|
|
331
|
+
arguments.get("enable_timing_healing", True),
|
|
332
|
+
arguments.get("enable_visual_regression", True),
|
|
333
|
+
arguments.get("enable_devtools_healer", True),
|
|
334
|
+
arguments.get("ai_healing_provider", "anthropic"),
|
|
335
|
+
)
|
|
336
|
+
# ── Pre-output validation ─────────────────────────────────────
|
|
337
|
+
result["_validation"] = _validate_scaffold_output(result)
|
|
338
|
+
elif name == "attach_code_to_work_item":
|
|
339
|
+
# ── Pre-attach input validation ────────────────────────────────
|
|
340
|
+
input_validation = _validate_attach_inputs(arguments.get("files", []))
|
|
341
|
+
if not input_validation["valid"]:
|
|
342
|
+
result = {
|
|
343
|
+
"error": "input_validation_failed",
|
|
344
|
+
"_validation": input_validation,
|
|
345
|
+
"message": (
|
|
346
|
+
"File inputs failed validation. Fix the errors below "
|
|
347
|
+
"and retry. No files were attached to ADO."
|
|
348
|
+
),
|
|
349
|
+
}
|
|
350
|
+
return [types.TextContent(type="text", text=json.dumps(result, indent=2, ensure_ascii=False))]
|
|
351
|
+
|
|
352
|
+
# ── Epic guard — attachments on Epics are invisible in ADO workflow views ──
|
|
353
|
+
wi_id = arguments["work_item_id"]
|
|
354
|
+
try:
|
|
355
|
+
import requests as _requests
|
|
356
|
+
from qa_stlc_agents.shared.auth import get_auth_headers as _gah
|
|
357
|
+
_peek = await asyncio.to_thread(
|
|
358
|
+
lambda: _requests.get(
|
|
359
|
+
f"{arguments['organization_url'].rstrip('/')}/{arguments['project_name']}"
|
|
360
|
+
f"/_apis/wit/workitems/{wi_id}",
|
|
361
|
+
headers=_gah(),
|
|
362
|
+
params={"api-version": "7.1"},
|
|
363
|
+
timeout=15,
|
|
364
|
+
)
|
|
365
|
+
)
|
|
366
|
+
if _peek.ok:
|
|
367
|
+
_wi_type = _peek.json().get("fields", {}).get("System.WorkItemType", "")
|
|
368
|
+
if _wi_type == "Epic":
|
|
369
|
+
return [types.TextContent(type="text", text=json.dumps({
|
|
370
|
+
"error": "epic_not_supported",
|
|
371
|
+
"work_item_id": wi_id,
|
|
372
|
+
"work_item_type": "Epic",
|
|
373
|
+
"message": (
|
|
374
|
+
f"Work item {wi_id} is an Epic. Playwright TypeScript files "
|
|
375
|
+
"must NOT be attached directly to an Epic — attachments on Epics "
|
|
376
|
+
"are effectively invisible in normal ADO workflow views. "
|
|
377
|
+
"Use qa-gherkin-generator:fetch_epic_hierarchy to walk the child "
|
|
378
|
+
"Features and PBIs/Bugs, then call attach_code_to_work_item once "
|
|
379
|
+
"per Feature or PBI/Bug work item ID."
|
|
380
|
+
),
|
|
381
|
+
}, indent=2))]
|
|
382
|
+
except Exception:
|
|
383
|
+
pass # If the peek fails, let the attach proceed and surface any real error naturally
|
|
384
|
+
|
|
385
|
+
attached, failed = [], []
|
|
386
|
+
for f in arguments["files"]:
|
|
387
|
+
try:
|
|
388
|
+
r = await asyncio.to_thread(
|
|
389
|
+
_attach_file,
|
|
390
|
+
arguments["organization_url"], arguments["project_name"],
|
|
391
|
+
arguments["work_item_id"], f["file_name"], f["content"],
|
|
392
|
+
"Generated by QA Playwright Code Generator",
|
|
393
|
+
)
|
|
394
|
+
attached.append(r)
|
|
395
|
+
except Exception as e:
|
|
396
|
+
failed.append({"file_name": f["file_name"], "error": str(e)})
|
|
397
|
+
result = {
|
|
398
|
+
"summary": {"requested": len(arguments["files"]),
|
|
399
|
+
"attached": len(attached), "failed": len(failed)},
|
|
400
|
+
"attached": attached, "failed": failed,
|
|
401
|
+
"_validation": {
|
|
402
|
+
"valid": len(failed) == 0,
|
|
403
|
+
"errors": [f["error"] for f in failed] if failed else [],
|
|
404
|
+
"warnings": input_validation.get("warnings", []),
|
|
405
|
+
"post_attach_check": {
|
|
406
|
+
"all_attached": len(attached) == len(arguments["files"]),
|
|
407
|
+
"failed_count": len(failed),
|
|
408
|
+
},
|
|
409
|
+
},
|
|
410
|
+
}
|
|
411
|
+
elif name == "validate_gherkin_steps":
|
|
412
|
+
result = await asyncio.to_thread(_parse_gherkin_steps, arguments["gherkin_content"])
|
|
413
|
+
elif name == "pre_validate_cucumber_steps":
|
|
414
|
+
result = await asyncio.to_thread(
|
|
415
|
+
_pre_validate_cucumber_steps,
|
|
416
|
+
arguments["steps_ts_content"],
|
|
417
|
+
arguments.get("gherkin_content"),
|
|
418
|
+
)
|
|
419
|
+
else:
|
|
420
|
+
result = {"error": f"Unknown tool: {name}"}
|
|
421
|
+
return [types.TextContent(type="text", text=json.dumps(result, indent=2, ensure_ascii=False))]
|
|
422
|
+
except Exception as exc:
|
|
423
|
+
return [types.TextContent(type="text", text=json.dumps({"error": str(exc), "tool": name}, indent=2))]
|
|
424
|
+
|
|
425
|
+
|
|
426
|
+
# ============================================================================
|
|
427
|
+
# Core helpers
|
|
428
|
+
# ============================================================================
|
|
429
|
+
|
|
430
|
+
def _to_kebab(s: str) -> str:
|
|
431
|
+
s = re.sub(r"([A-Z])", r"-\1", s).lower().strip("-")
|
|
432
|
+
return re.sub(r"-+", "-", s)
|
|
433
|
+
|
|
434
|
+
|
|
435
|
+
def _parse_gherkin_steps(content: str) -> dict:
|
|
436
|
+
keywords = ("Given", "When", "Then", "And", "But")
|
|
437
|
+
steps: dict = {k: [] for k in keywords}
|
|
438
|
+
for line in content.splitlines():
|
|
439
|
+
stripped = line.strip()
|
|
440
|
+
for kw in keywords:
|
|
441
|
+
if stripped.startswith(kw + " "):
|
|
442
|
+
text = stripped[len(kw):].strip()
|
|
443
|
+
if text not in steps[kw]:
|
|
444
|
+
steps[kw].append(text)
|
|
445
|
+
break
|
|
446
|
+
all_steps = [f"{kw} {s}" for kw in keywords for s in steps[kw]]
|
|
447
|
+
return {"by_keyword": steps, "all_steps": all_steps, "total_unique_steps": len(all_steps)}
|
|
448
|
+
|
|
449
|
+
|
|
450
|
+
# Cucumber expression parameter types — any of these in a step pattern means
|
|
451
|
+
# the step already uses Cucumber expressions correctly.
|
|
452
|
+
_CUCUMBER_EXPR_RE = re.compile(r"\{(?:string|int|float|word|bigdecimal|byte|short|long|double|[A-Za-z_]\w*)\}")
|
|
453
|
+
|
|
454
|
+
# Regex step pattern: Given(/^…$/, …) or When(/^…$/, …)
|
|
455
|
+
_RAW_REGEX_STEP_RE = re.compile(
|
|
456
|
+
r"^(Given|When|Then)\s*\(\s*/\^([^$]*)\$\s*/[gimusy]*\s*,",
|
|
457
|
+
re.MULTILINE,
|
|
458
|
+
)
|
|
459
|
+
|
|
460
|
+
# Capture groups inside a regex pattern that should become Cucumber expressions
|
|
461
|
+
_CAPTURE_GROUP_RE = re.compile(r"\((?!\?)") # ( not followed by ? (i.e. not (?:) or (?=) etc.)
|
|
462
|
+
|
|
463
|
+
# Mapping from common regex fragments to Cucumber expression equivalents
|
|
464
|
+
_REGEX_TO_CUCUMBER: list[tuple[re.Pattern, str]] = [
|
|
465
|
+
(re.compile(r'\(\.\+\)'), "{string}"),
|
|
466
|
+
(re.compile(r'\(\.\*\)'), "{string}"),
|
|
467
|
+
(re.compile(r'\(\\d\+\)'), "{int}"),
|
|
468
|
+
(re.compile(r'\(\\d\+\\.\\d\+\)'), "{float}"),
|
|
469
|
+
(re.compile(r'\(\\w\+\)'), "{word}"),
|
|
470
|
+
(re.compile(r'\([^)]+\)'), "{string}"), # fallback for any other group
|
|
471
|
+
]
|
|
472
|
+
|
|
473
|
+
|
|
474
|
+
def _regex_pattern_to_cucumber(raw_pattern: str) -> str:
|
|
475
|
+
"""Best-effort conversion of a raw regex step pattern to a Cucumber expression."""
|
|
476
|
+
result = raw_pattern
|
|
477
|
+
for rx, replacement in _REGEX_TO_CUCUMBER:
|
|
478
|
+
result = rx.sub(replacement, result, count=1)
|
|
479
|
+
if result != raw_pattern:
|
|
480
|
+
break
|
|
481
|
+
# Replace all remaining capture groups with {string}
|
|
482
|
+
result = _CAPTURE_GROUP_RE.sub("{string}", result)
|
|
483
|
+
return result
|
|
484
|
+
|
|
485
|
+
|
|
486
|
+
def _pre_validate_cucumber_steps(steps_ts_content: str, gherkin_content: str | None) -> dict:
|
|
487
|
+
"""Validate Cucumber step syntax in a generated *.steps.ts file.
|
|
488
|
+
|
|
489
|
+
Returns { valid: bool, errors: list[str], warnings: list[str], suggestions: list[dict] }.
|
|
490
|
+
"""
|
|
491
|
+
errors: list[str] = []
|
|
492
|
+
warnings: list[str] = []
|
|
493
|
+
suggestions: list[dict] = []
|
|
494
|
+
|
|
495
|
+
if not steps_ts_content or not steps_ts_content.strip():
|
|
496
|
+
return {
|
|
497
|
+
"valid": False,
|
|
498
|
+
"errors": ["steps_ts_content is empty — nothing to validate."],
|
|
499
|
+
"warnings": [],
|
|
500
|
+
"suggestions": [],
|
|
501
|
+
}
|
|
502
|
+
|
|
503
|
+
lines = steps_ts_content.splitlines()
|
|
504
|
+
|
|
505
|
+
# ── Check 1 & 2: raw regex patterns and mixed syntax ─────────────────
|
|
506
|
+
seen_patterns: list[str] = []
|
|
507
|
+
for match in _RAW_REGEX_STEP_RE.finditer(steps_ts_content):
|
|
508
|
+
keyword, raw_pattern = match.group(1), match.group(2)
|
|
509
|
+
capture_count = len(_CAPTURE_GROUP_RE.findall(raw_pattern))
|
|
510
|
+
if capture_count > 0:
|
|
511
|
+
cucumber_expr = _regex_pattern_to_cucumber(raw_pattern)
|
|
512
|
+
errors.append(
|
|
513
|
+
f"{keyword}(/^{raw_pattern}$/) uses a raw regex with {capture_count} "
|
|
514
|
+
f"capture group(s). "
|
|
515
|
+
f"The regex preprocessor in write_helix_files strips string literals before "
|
|
516
|
+
f"counting parentheses, so these unmatched '(' cause the paren-balance check "
|
|
517
|
+
f"to fail with delta={capture_count:+d}. "
|
|
518
|
+
f"Fix: replace the regex with a Cucumber expression."
|
|
519
|
+
)
|
|
520
|
+
suggestions.append({
|
|
521
|
+
"original": f"/^{raw_pattern}$/",
|
|
522
|
+
"replacement": f"'{cucumber_expr}'",
|
|
523
|
+
"keyword": keyword,
|
|
524
|
+
"note": (
|
|
525
|
+
"Replace the /^…$/ regex with the quoted Cucumber expression. "
|
|
526
|
+
"Update the callback parameter types: string for {string}, number for {int}/{float}."
|
|
527
|
+
),
|
|
528
|
+
})
|
|
529
|
+
elif _CUCUMBER_EXPR_RE.search(raw_pattern):
|
|
530
|
+
warnings.append(
|
|
531
|
+
f"{keyword}(/^{raw_pattern}$/) mixes regex anchors with Cucumber expressions. "
|
|
532
|
+
f"Use either a plain Cucumber expression string OR a pure regex — not both."
|
|
533
|
+
)
|
|
534
|
+
seen_patterns.append(raw_pattern)
|
|
535
|
+
|
|
536
|
+
# ── Check 3: unbalanced braces / parens in step callbacks ────────────
|
|
537
|
+
# Strip string literals and comments, then check balance per-block
|
|
538
|
+
stripped = re.sub(r'`[^`]*`', '``', steps_ts_content)
|
|
539
|
+
stripped = re.sub(r'"[^"\n]*"', '""', stripped)
|
|
540
|
+
stripped = re.sub(r"'[^'\n]*'", "''", stripped)
|
|
541
|
+
stripped = re.sub(r'//[^\n]*', '', stripped)
|
|
542
|
+
stripped = re.sub(r'/\*.*?\*/', '', stripped, flags=re.DOTALL)
|
|
543
|
+
# Skip regex literals (/^…$/) so they don't confuse the counter
|
|
544
|
+
stripped = re.sub(r'/\^[^/]+\$/', '/REGEX/', stripped)
|
|
545
|
+
|
|
546
|
+
brace_delta = stripped.count('{') - stripped.count('}')
|
|
547
|
+
paren_delta = stripped.count('(') - stripped.count(')')
|
|
548
|
+
if brace_delta != 0:
|
|
549
|
+
errors.append(
|
|
550
|
+
f"File-level unbalanced braces (delta={brace_delta:+d}) after stripping "
|
|
551
|
+
f"string literals and comments. Check for unterminated arrow functions or class bodies."
|
|
552
|
+
)
|
|
553
|
+
if paren_delta != 0:
|
|
554
|
+
# Only flag if no raw-regex errors already explain this
|
|
555
|
+
if not any("capture group" in e for e in errors):
|
|
556
|
+
errors.append(
|
|
557
|
+
f"File-level unbalanced parentheses (delta={paren_delta:+d}) after stripping "
|
|
558
|
+
f"string literals and comments. This will cause write_helix_files to reject the file. "
|
|
559
|
+
f"Check for unclosed function calls or raw regex step patterns with capture groups."
|
|
560
|
+
)
|
|
561
|
+
|
|
562
|
+
# ── Check 4: duplicate step patterns ─────────────────────────────────
|
|
563
|
+
all_patterns: list[str] = []
|
|
564
|
+
for match in re.finditer(
|
|
565
|
+
r"(?:Given|When|Then)\s*\(\s*(?:'([^']+)'|\"([^\"]+)\"|/\^([^$]+)\$/)",
|
|
566
|
+
steps_ts_content,
|
|
567
|
+
):
|
|
568
|
+
pattern = match.group(1) or match.group(2) or match.group(3) or ""
|
|
569
|
+
if pattern in all_patterns:
|
|
570
|
+
errors.append(f"Duplicate step pattern: '{pattern}' — defined more than once.")
|
|
571
|
+
else:
|
|
572
|
+
all_patterns.append(pattern)
|
|
573
|
+
|
|
574
|
+
# ── Check 5: unmatched Gherkin steps (optional) ───────────────────────
|
|
575
|
+
if gherkin_content:
|
|
576
|
+
gherkin_parsed = _parse_gherkin_steps(gherkin_content)
|
|
577
|
+
unmatched: list[str] = []
|
|
578
|
+
for step_text in gherkin_parsed["all_steps"]:
|
|
579
|
+
# Strip keyword prefix
|
|
580
|
+
step_body = re.sub(r"^(?:Given|When|Then|And|But)\s+", "", step_text)
|
|
581
|
+
matched = False
|
|
582
|
+
for pat in all_patterns:
|
|
583
|
+
try:
|
|
584
|
+
# Try Cucumber expression: replace {…} with .*
|
|
585
|
+
regex_pat = re.sub(r"\{[^}]+\}", ".*", pat)
|
|
586
|
+
if re.fullmatch(regex_pat, step_body, re.IGNORECASE):
|
|
587
|
+
matched = True
|
|
588
|
+
break
|
|
589
|
+
except re.error:
|
|
590
|
+
pass
|
|
591
|
+
if not matched:
|
|
592
|
+
unmatched.append(step_body)
|
|
593
|
+
if unmatched:
|
|
594
|
+
for step in unmatched:
|
|
595
|
+
warnings.append(
|
|
596
|
+
f"Gherkin step has no matching step definition: \"{step}\""
|
|
597
|
+
)
|
|
598
|
+
|
|
599
|
+
return {
|
|
600
|
+
"valid": len(errors) == 0,
|
|
601
|
+
"errors": errors,
|
|
602
|
+
"warnings": warnings,
|
|
603
|
+
"suggestions": suggestions,
|
|
604
|
+
"step_patterns_found": len(all_patterns),
|
|
605
|
+
}
|
|
606
|
+
|
|
607
|
+
|
|
608
|
+
def _extract_element_hints(gherkin: str) -> list:
|
|
609
|
+
text = gherkin.lower()
|
|
610
|
+
hints = []
|
|
611
|
+
if "photo" in text or "image" in text or "crop" in text:
|
|
612
|
+
hints += [
|
|
613
|
+
("profilePhoto", "profile-photo", "user profile photo image"),
|
|
614
|
+
("imageEditorModal", "image-editor-modal", "image crop editor modal"),
|
|
615
|
+
("cropHandle", "crop-handle", "image crop handle"),
|
|
616
|
+
("zoomSlider", "zoom-slider", "image zoom slider"),
|
|
617
|
+
("initialsAvatar", "initials-avatar", "user initials avatar fallback"),
|
|
618
|
+
]
|
|
619
|
+
if "modal" in text or "dialog" in text:
|
|
620
|
+
hints += [
|
|
621
|
+
("modalContainer", "modal-container", "modal or dialog container"),
|
|
622
|
+
("modalCloseBtn", "modal-close-btn", "modal close button"),
|
|
623
|
+
]
|
|
624
|
+
if "filter" in text or "search" in text:
|
|
625
|
+
hints += [
|
|
626
|
+
("searchInput", "search-input", "search or filter input"),
|
|
627
|
+
("filterDropdown", "filter-dropdown", "filter dropdown"),
|
|
628
|
+
]
|
|
629
|
+
return hints
|
|
630
|
+
|
|
631
|
+
|
|
632
|
+
# ============================================================================
|
|
633
|
+
# Helix-QA Integration: Append-Only File Handling & Deduplication
|
|
634
|
+
# ============================================================================
|
|
635
|
+
|
|
636
|
+
def _file_exists(file_path: str) -> bool:
|
|
637
|
+
"""Check if file exists."""
|
|
638
|
+
return Path(file_path).exists()
|
|
639
|
+
|
|
640
|
+
|
|
641
|
+
def _read_file(file_path: str) -> str:
|
|
642
|
+
"""Read file content. Return empty string if file does not exist."""
|
|
643
|
+
if not _file_exists(file_path):
|
|
644
|
+
return ""
|
|
645
|
+
try:
|
|
646
|
+
return Path(file_path).read_text(encoding="utf-8")
|
|
647
|
+
except Exception:
|
|
648
|
+
return ""
|
|
649
|
+
|
|
650
|
+
|
|
651
|
+
def _parse_locators_from_file(ts_content: str) -> dict:
|
|
652
|
+
"""Extract existing locator keys and selectors from locators.ts file.
|
|
653
|
+
|
|
654
|
+
Returns dict: {
|
|
655
|
+
"keys": ["pageContainer", "submitBtn", ...],
|
|
656
|
+
"locators": {
|
|
657
|
+
"pageContainer": {"selector": "...", "intent": "...", "stability": ...},
|
|
658
|
+
...
|
|
659
|
+
}
|
|
660
|
+
}
|
|
661
|
+
"""
|
|
662
|
+
keys = []
|
|
663
|
+
locators_dict = {}
|
|
664
|
+
|
|
665
|
+
# Match pattern: keyName: { selector: "...", intent: '...', ... }
|
|
666
|
+
pattern = r'(\w+):\s*\{\s*selector:\s*"([^"]+)".*?intent:\s*[\'"]([^\'"]+)'
|
|
667
|
+
|
|
668
|
+
for match in re.finditer(pattern, ts_content):
|
|
669
|
+
key_name, selector, intent = match.groups()
|
|
670
|
+
keys.append(key_name)
|
|
671
|
+
locators_dict[key_name] = {
|
|
672
|
+
"selector": selector,
|
|
673
|
+
"intent": intent,
|
|
674
|
+
}
|
|
675
|
+
|
|
676
|
+
return {"keys": keys, "locators": locators_dict}
|
|
677
|
+
|
|
678
|
+
|
|
679
|
+
def _parse_page_methods_from_file(ts_content: str) -> dict:
|
|
680
|
+
"""Extract existing method names from page object .ts file.
|
|
681
|
+
|
|
682
|
+
Returns dict: {
|
|
683
|
+
"methods": ["navigate", "submitForm", "fillPrimaryInput", ...],
|
|
684
|
+
"pattern": "async methodName(...): Promise<...>"
|
|
685
|
+
}
|
|
686
|
+
"""
|
|
687
|
+
methods = []
|
|
688
|
+
|
|
689
|
+
# Match pattern: async methodName(...): Promise<...> {
|
|
690
|
+
pattern = r'async\s+(\w+)\s*\('
|
|
691
|
+
|
|
692
|
+
for match in re.finditer(pattern, ts_content):
|
|
693
|
+
method_name = match.group(1)
|
|
694
|
+
methods.append(method_name)
|
|
695
|
+
|
|
696
|
+
return {"methods": methods}
|
|
697
|
+
|
|
698
|
+
|
|
699
|
+
def _parse_steps_from_file(ts_content: str) -> dict:
|
|
700
|
+
"""Extract existing step definition patterns from steps .ts file.
|
|
701
|
+
|
|
702
|
+
Returns dict: {
|
|
703
|
+
"step_patterns": ["given step text", "when step text", ...],
|
|
704
|
+
"by_keyword": {"Given": [...], "When": [...], "Then": [...]}
|
|
705
|
+
}
|
|
706
|
+
"""
|
|
707
|
+
step_patterns = []
|
|
708
|
+
by_keyword = {"Given": [], "When": [], "Then": []}
|
|
709
|
+
|
|
710
|
+
# Match pattern: Given(/^step pattern$/,
|
|
711
|
+
pattern = r'(Given|When|Then)\s*\(\s*/\^([^$]+)\$/'
|
|
712
|
+
|
|
713
|
+
for match in re.finditer(pattern, ts_content):
|
|
714
|
+
keyword, step_text = match.groups()
|
|
715
|
+
# Unescape regex pattern
|
|
716
|
+
step_text = step_text.replace(r'\ ', ' ').replace(r'\.', '.').replace(r'\+', '+')
|
|
717
|
+
step_patterns.append(step_text)
|
|
718
|
+
by_keyword.setdefault(keyword, []).append(step_text)
|
|
719
|
+
|
|
720
|
+
return {"step_patterns": step_patterns, "by_keyword": by_keyword}
|
|
721
|
+
|
|
722
|
+
|
|
723
|
+
def _parse_scenarios_from_feature_file(feature_content: str) -> dict:
|
|
724
|
+
"""Extract existing scenario titles from feature file.
|
|
725
|
+
|
|
726
|
+
Returns dict: {
|
|
727
|
+
"scenario_titles": ["Scenario 1 Title", "Scenario 2 Title", ...],
|
|
728
|
+
"scenario_count": N
|
|
729
|
+
}
|
|
730
|
+
"""
|
|
731
|
+
scenario_titles = []
|
|
732
|
+
|
|
733
|
+
# Match pattern: Scenario: Scenario Title
|
|
734
|
+
pattern = r'^\s*Scenario:\s+(.+)$'
|
|
735
|
+
|
|
736
|
+
for line in feature_content.splitlines():
|
|
737
|
+
match = re.match(pattern, line)
|
|
738
|
+
if match:
|
|
739
|
+
scenario_titles.append(match.group(1).strip())
|
|
740
|
+
|
|
741
|
+
return {"scenario_titles": scenario_titles, "scenario_count": len(scenario_titles)}
|
|
742
|
+
|
|
743
|
+
|
|
744
|
+
def _parse_utils_locators_from_file(ts_content: str) -> dict:
|
|
745
|
+
"""Extract existing utility locator keys and selectors from src/utils/locators/*.ts files.
|
|
746
|
+
|
|
747
|
+
Returns dict: {
|
|
748
|
+
"keys": ["locator1", "locator2", ...],
|
|
749
|
+
"locators": {
|
|
750
|
+
"locator1": {"selector": "...", "intent": "..."},
|
|
751
|
+
...
|
|
752
|
+
}
|
|
753
|
+
}
|
|
754
|
+
"""
|
|
755
|
+
keys = []
|
|
756
|
+
locators_dict = {}
|
|
757
|
+
|
|
758
|
+
# Match pattern: export const locatorName = { selector: "...", intent: '...' }
|
|
759
|
+
pattern = r"export\s+const\s+(\w+)\s*=\s*\{[^}]*selector:\s*['\"]([^'\"]+)['\"][^}]*intent:\s*['\"]([^'\"]+)['\"]"
|
|
760
|
+
|
|
761
|
+
for match in re.finditer(pattern, ts_content, re.DOTALL):
|
|
762
|
+
key_name, selector, intent = match.groups()
|
|
763
|
+
keys.append(key_name)
|
|
764
|
+
locators_dict[key_name] = {
|
|
765
|
+
"selector": selector,
|
|
766
|
+
"intent": intent,
|
|
767
|
+
}
|
|
768
|
+
|
|
769
|
+
return {"keys": keys, "locators": locators_dict}
|
|
770
|
+
|
|
771
|
+
|
|
772
|
+
def _merge_utils_locators_ts(new_content: str, existing_content: str, file_name: str) -> str:
|
|
773
|
+
"""Merge new utility locators into existing file, avoiding duplicates.
|
|
774
|
+
|
|
775
|
+
Args:
|
|
776
|
+
new_content: New utility locator content
|
|
777
|
+
existing_content: Existing file content
|
|
778
|
+
file_name: Name of the utility locator file (for logging)
|
|
779
|
+
|
|
780
|
+
Returns:
|
|
781
|
+
merged content with new locators appended
|
|
782
|
+
"""
|
|
783
|
+
if not existing_content:
|
|
784
|
+
return new_content
|
|
785
|
+
|
|
786
|
+
# Parse existing locators
|
|
787
|
+
parsed = _parse_utils_locators_from_file(existing_content)
|
|
788
|
+
existing_keys = parsed["keys"]
|
|
789
|
+
|
|
790
|
+
# Parse new locators
|
|
791
|
+
new_parsed = _parse_utils_locators_from_file(new_content)
|
|
792
|
+
new_keys = new_parsed["keys"]
|
|
793
|
+
|
|
794
|
+
# Filter out locators that already exist
|
|
795
|
+
unique_keys = [k for k in new_keys if k not in existing_keys]
|
|
796
|
+
|
|
797
|
+
if not unique_keys:
|
|
798
|
+
return existing_content
|
|
799
|
+
|
|
800
|
+
# Extract new unique locators from new_content and append
|
|
801
|
+
new_entries = []
|
|
802
|
+
for key_name in unique_keys:
|
|
803
|
+
pattern = rf"export\s+const\s+{key_name}\s*=\s*{{[^}}]*}}"
|
|
804
|
+
match = re.search(pattern, new_content, re.DOTALL)
|
|
805
|
+
if match:
|
|
806
|
+
new_entries.append(match.group(0))
|
|
807
|
+
|
|
808
|
+
if new_entries:
|
|
809
|
+
merged = existing_content + "\n\n" + "\n\n".join(new_entries)
|
|
810
|
+
return merged
|
|
811
|
+
|
|
812
|
+
return existing_content
|
|
813
|
+
|
|
814
|
+
|
|
815
|
+
def _generate_unique_locator_key(key: str, existing_keys: list) -> str:
|
|
816
|
+
"""Generate a unique locator key when a conflict occurs.
|
|
817
|
+
|
|
818
|
+
Strategy: append _1, _2, etc. until a unique name is found.
|
|
819
|
+
|
|
820
|
+
Example: If 'submitBtn' exists, generateunique 'submitBtn_1', 'submitBtn_2', etc.
|
|
821
|
+
Helix-QA Integration: On conflict, the new locator gets the unique name (existing one is never modified).
|
|
822
|
+
"""
|
|
823
|
+
candidate = key
|
|
824
|
+
counter = 1
|
|
825
|
+
while candidate in existing_keys:
|
|
826
|
+
candidate = f"{key}_{counter}"
|
|
827
|
+
counter += 1
|
|
828
|
+
return candidate
|
|
829
|
+
|
|
830
|
+
|
|
831
|
+
def _append_to_file(file_path: str, content: str) -> None:
|
|
832
|
+
"""Append content to file, creating it if it does not exist."""
|
|
833
|
+
path = Path(file_path)
|
|
834
|
+
path.parent.mkdir(parents=True, exist_ok=True)
|
|
835
|
+
|
|
836
|
+
if path.exists():
|
|
837
|
+
# Append with a newline separator
|
|
838
|
+
existing = path.read_text(encoding="utf-8")
|
|
839
|
+
path.write_text(existing + "\n" + content, encoding="utf-8")
|
|
840
|
+
else:
|
|
841
|
+
# Create new file
|
|
842
|
+
path.write_text(content, encoding="utf-8")
|
|
843
|
+
|
|
844
|
+
|
|
845
|
+
def _merge_locators_ts(new_locators_section: str, existing_content: str, page_class: str) -> str:
|
|
846
|
+
"""Merge new locators into existing locators.ts, avoiding duplicates.
|
|
847
|
+
|
|
848
|
+
Args:
|
|
849
|
+
new_locators_section: The full new locators section (everything between export const ... and closing brace)
|
|
850
|
+
existing_content: The existing file content
|
|
851
|
+
page_class: The page class name (e.g., "HomePage")
|
|
852
|
+
|
|
853
|
+
Returns:
|
|
854
|
+
merged content with new locators appended to the export object
|
|
855
|
+
"""
|
|
856
|
+
if not existing_content:
|
|
857
|
+
return new_locators_section
|
|
858
|
+
|
|
859
|
+
# Parse existing locator keys
|
|
860
|
+
parsed = _parse_locators_from_file(existing_content)
|
|
861
|
+
existing_keys = parsed["keys"]
|
|
862
|
+
|
|
863
|
+
# Extract locators from new section using regex
|
|
864
|
+
new_pattern = r'(\w+):\s*\{\s*selector:\s*"([^"]+)".*?intent:\s*[\'"]([^\'"]+)[\'"].*?\}'
|
|
865
|
+
new_locators_dict = {}
|
|
866
|
+
|
|
867
|
+
for match in re.finditer(new_pattern, new_locators_section):
|
|
868
|
+
key_name, selector, intent = match.groups()
|
|
869
|
+
new_locators_dict[key_name] = {"selector": selector, "intent": intent}
|
|
870
|
+
|
|
871
|
+
# Filter out existing keys
|
|
872
|
+
unique_locators = {}
|
|
873
|
+
locator_conflicts = {}
|
|
874
|
+
|
|
875
|
+
for key_name, entries in new_locators_dict.items():
|
|
876
|
+
if key_name not in existing_keys:
|
|
877
|
+
unique_locators[key_name] = entries
|
|
878
|
+
else:
|
|
879
|
+
# Check if selector is the same
|
|
880
|
+
if parsed["locators"].get(key_name, {}).get("selector") != entries["selector"]:
|
|
881
|
+
# Conflict: same key, different selector
|
|
882
|
+
new_key = _generate_unique_locator_key(key_name, existing_keys + list(unique_locators.keys()))
|
|
883
|
+
locator_conflicts[new_key] = entries
|
|
884
|
+
|
|
885
|
+
# If all locators already exist, return existing content
|
|
886
|
+
if not unique_locators and not locator_conflicts:
|
|
887
|
+
return existing_content
|
|
888
|
+
|
|
889
|
+
# Append new unique locators to the existing content
|
|
890
|
+
# Find the closing brace of the locators object
|
|
891
|
+
match = re.search(r'}} as const;\s*$', existing_content, re.MULTILINE)
|
|
892
|
+
if not match:
|
|
893
|
+
return existing_content
|
|
894
|
+
|
|
895
|
+
insert_pos = match.start()
|
|
896
|
+
|
|
897
|
+
new_entries = []
|
|
898
|
+
for key_name, entry in {**unique_locators, **locator_conflicts}.items():
|
|
899
|
+
selector = entry["selector"]
|
|
900
|
+
intent = entry["intent"]
|
|
901
|
+
new_entries.append(f' {key_name}: {{ selector: "{selector}", intent: \'{intent}\', stability: 0 }},')
|
|
902
|
+
|
|
903
|
+
if new_entries:
|
|
904
|
+
merged = (
|
|
905
|
+
existing_content[:insert_pos] +
|
|
906
|
+
"\n" + "\n".join(new_entries) +
|
|
907
|
+
"\n" +
|
|
908
|
+
existing_content[insert_pos:]
|
|
909
|
+
)
|
|
910
|
+
return merged
|
|
911
|
+
|
|
912
|
+
return existing_content
|
|
913
|
+
|
|
914
|
+
|
|
915
|
+
def _merge_page_object_ts(new_methods_section: str, existing_content: str, page_class: str, kebab: str) -> str:
|
|
916
|
+
"""Merge new methods into existing page object .ts file, avoiding duplicates."""
|
|
917
|
+
if not existing_content:
|
|
918
|
+
return new_methods_section
|
|
919
|
+
|
|
920
|
+
parsed = _parse_page_methods_from_file(existing_content)
|
|
921
|
+
existing_methods = parsed["methods"]
|
|
922
|
+
|
|
923
|
+
# Extract method names from new section
|
|
924
|
+
new_pattern = r'async\s+(\w+)\s*\('
|
|
925
|
+
new_methods = []
|
|
926
|
+
for match in re.finditer(new_pattern, new_methods_section):
|
|
927
|
+
new_methods.append(match.group(1))
|
|
928
|
+
|
|
929
|
+
# Find methods to add (not in existing)
|
|
930
|
+
unique_methods = [m for m in new_methods if m not in existing_methods]
|
|
931
|
+
|
|
932
|
+
if not unique_methods:
|
|
933
|
+
return existing_content
|
|
934
|
+
|
|
935
|
+
# Extract only the unique methods from the new section
|
|
936
|
+
methods_to_add = []
|
|
937
|
+
for method_name in unique_methods:
|
|
938
|
+
# Find the method in the new section
|
|
939
|
+
pattern = rf'async\s+{method_name}\s*\(.*?\n \}}\n'
|
|
940
|
+
match = re.search(pattern, new_methods_section, re.DOTALL)
|
|
941
|
+
if match:
|
|
942
|
+
methods_to_add.append(match.group(0))
|
|
943
|
+
|
|
944
|
+
if methods_to_add:
|
|
945
|
+
# Append to the existing content before the closing brace of the class
|
|
946
|
+
merged = re.sub(
|
|
947
|
+
r'(\n\})\s*$',
|
|
948
|
+
'\n\n' + '\n\n'.join(methods_to_add) + r'\1',
|
|
949
|
+
existing_content,
|
|
950
|
+
flags=re.MULTILINE
|
|
951
|
+
)
|
|
952
|
+
return merged
|
|
953
|
+
|
|
954
|
+
return existing_content
|
|
955
|
+
|
|
956
|
+
|
|
957
|
+
def _merge_steps_ts(new_steps_section: str, existing_content: str, page_class: str, kebab: str) -> str:
|
|
958
|
+
"""Merge new steps into existing steps .ts file, avoiding duplicates."""
|
|
959
|
+
if not existing_content:
|
|
960
|
+
return new_steps_section
|
|
961
|
+
|
|
962
|
+
parsed = _parse_steps_from_file(existing_content)
|
|
963
|
+
existing_patterns = parsed["step_patterns"]
|
|
964
|
+
|
|
965
|
+
# Extract new step patterns
|
|
966
|
+
new_pattern_list = []
|
|
967
|
+
for line in new_steps_section.splitlines():
|
|
968
|
+
match = re.search(r'/\^([^$]+)\$/', line)
|
|
969
|
+
if match:
|
|
970
|
+
step_text = match.group(1)
|
|
971
|
+
new_pattern_list.append(step_text)
|
|
972
|
+
|
|
973
|
+
# Filter out duplicates
|
|
974
|
+
unique_steps = []
|
|
975
|
+
for step_line in new_steps_section.splitlines():
|
|
976
|
+
# Check if this is a Given/When/Then block
|
|
977
|
+
if re.match(r'(Given|When|Then)\s*\(', step_line):
|
|
978
|
+
# Find the step pattern
|
|
979
|
+
match = re.search(r'/\^([^$]+)\$/', step_line)
|
|
980
|
+
if match:
|
|
981
|
+
step_text = match.group(1)
|
|
982
|
+
if step_text not in existing_patterns:
|
|
983
|
+
unique_steps.append(step_line)
|
|
984
|
+
|
|
985
|
+
if not unique_steps:
|
|
986
|
+
return existing_content
|
|
987
|
+
|
|
988
|
+
# Append before the last ";" if it exists, or before the "export default" or end of file
|
|
989
|
+
merged = re.sub(
|
|
990
|
+
r'(\);)\s*$',
|
|
991
|
+
r'\1\n\n' + '\n\n'.join(unique_steps),
|
|
992
|
+
existing_content,
|
|
993
|
+
flags=re.MULTILINE
|
|
994
|
+
)
|
|
995
|
+
return merged
|
|
996
|
+
|
|
997
|
+
|
|
998
|
+
def _merge_feature_file(new_feature_content: str, existing_content: str) -> str:
|
|
999
|
+
"""Merge scenarios into existing feature file, avoiding duplicate scenario titles."""
|
|
1000
|
+
if not existing_content:
|
|
1001
|
+
return new_feature_content
|
|
1002
|
+
|
|
1003
|
+
parsed = _parse_scenarios_from_feature_file(existing_content)
|
|
1004
|
+
existing_titles = parsed["scenario_titles"]
|
|
1005
|
+
|
|
1006
|
+
# Extract new scenarios
|
|
1007
|
+
scenario_blocks = []
|
|
1008
|
+
current_scenario = []
|
|
1009
|
+
in_scenario = False
|
|
1010
|
+
|
|
1011
|
+
for line in new_feature_content.splitlines():
|
|
1012
|
+
if re.match(r'^\s*Scenario:', line):
|
|
1013
|
+
if current_scenario:
|
|
1014
|
+
scenario_blocks.append('\n'.join(current_scenario))
|
|
1015
|
+
current_scenario = [line]
|
|
1016
|
+
in_scenario = True
|
|
1017
|
+
elif in_scenario:
|
|
1018
|
+
if line.strip() and not re.match(r'^\s*#', line):
|
|
1019
|
+
current_scenario.append(line)
|
|
1020
|
+
elif not line.strip():
|
|
1021
|
+
current_scenario.append(line)
|
|
1022
|
+
|
|
1023
|
+
if current_scenario:
|
|
1024
|
+
scenario_blocks.append('\n'.join(current_scenario))
|
|
1025
|
+
|
|
1026
|
+
# Filter out scenarios with existing titles
|
|
1027
|
+
unique_scenarios = []
|
|
1028
|
+
for scenario_block in scenario_blocks:
|
|
1029
|
+
match = re.search(r'Scenario:\s+(.+)$', scenario_block, re.MULTILINE)
|
|
1030
|
+
if match:
|
|
1031
|
+
title = match.group(1).strip()
|
|
1032
|
+
if title not in existing_titles:
|
|
1033
|
+
unique_scenarios.append(scenario_block)
|
|
1034
|
+
|
|
1035
|
+
if not unique_scenarios:
|
|
1036
|
+
return existing_content
|
|
1037
|
+
|
|
1038
|
+
# Append scenarios to the feature file
|
|
1039
|
+
merged = existing_content + "\n\n" + "\n\n".join(unique_scenarios)
|
|
1040
|
+
return merged
|
|
1041
|
+
|
|
1042
|
+
|
|
1043
|
+
# ============================================================================
|
|
1044
|
+
# generate_playwright_code
|
|
1045
|
+
# ============================================================================
|
|
1046
|
+
|
|
1047
|
+
def _generate_playwright_code(
|
|
1048
|
+
gherkin, page_class, app_name, healing_strategy,
|
|
1049
|
+
auth_hook, enable_visual_regression, enable_timing_healing,
|
|
1050
|
+
context_map=None, helix_project_root=None,
|
|
1051
|
+
):
|
|
1052
|
+
kebab = _to_kebab(page_class)
|
|
1053
|
+
camel = page_class[0].lower() + page_class[1:]
|
|
1054
|
+
all_steps = _parse_gherkin_steps(gherkin)["all_steps"]
|
|
1055
|
+
feature_title = page_class
|
|
1056
|
+
for line in gherkin.splitlines():
|
|
1057
|
+
if line.strip().startswith("Feature:"):
|
|
1058
|
+
feature_title = line.strip()[8:].strip()
|
|
1059
|
+
break
|
|
1060
|
+
|
|
1061
|
+
locator_source = "playwright-mcp-verified" if context_map else "gherkin-inferred"
|
|
1062
|
+
|
|
1063
|
+
# Generate new content
|
|
1064
|
+
new_locators = _gen_locators(page_class, kebab, gherkin, context_map)
|
|
1065
|
+
new_page_object = _gen_page_object(
|
|
1066
|
+
page_class, kebab, camel, gherkin, healing_strategy,
|
|
1067
|
+
enable_visual_regression, enable_timing_healing)
|
|
1068
|
+
new_step_defs = _gen_step_defs(
|
|
1069
|
+
page_class, kebab, camel, all_steps, auth_hook)
|
|
1070
|
+
new_feature = _gen_feature_file(gherkin, kebab, page_class)
|
|
1071
|
+
|
|
1072
|
+
# Normalize feature file: auto-quote unquoted string parameters so they
|
|
1073
|
+
# match the generated step-definition regex patterns.
|
|
1074
|
+
new_feature, normalization_warnings = _normalize_feature_steps(new_feature, new_step_defs)
|
|
1075
|
+
|
|
1076
|
+
# Handle Helix-QA project integration: read existing files and merge
|
|
1077
|
+
if helix_project_root:
|
|
1078
|
+
helix_root = Path(helix_project_root)
|
|
1079
|
+
|
|
1080
|
+
# Locators merge
|
|
1081
|
+
locators_path = str(helix_root / "src" / "locators" / f"{kebab}.locators.ts")
|
|
1082
|
+
existing_locators = _read_file(locators_path)
|
|
1083
|
+
new_locators = _merge_locators_ts(new_locators, existing_locators, page_class)
|
|
1084
|
+
|
|
1085
|
+
# Page object merge
|
|
1086
|
+
page_path = str(helix_root / "src" / "pages" / f"{kebab}.page.ts")
|
|
1087
|
+
existing_page = _read_file(page_path)
|
|
1088
|
+
new_page_object = _merge_page_object_ts(new_page_object, existing_page, page_class, kebab)
|
|
1089
|
+
|
|
1090
|
+
# Steps merge
|
|
1091
|
+
steps_path = str(helix_root / "src" / "test" / "steps" / f"{kebab}.steps.ts")
|
|
1092
|
+
existing_steps = _read_file(steps_path)
|
|
1093
|
+
new_step_defs = _merge_steps_ts(new_step_defs, existing_steps, page_class, kebab)
|
|
1094
|
+
|
|
1095
|
+
# Feature file merge
|
|
1096
|
+
feature_path = str(helix_root / "src" / "test" / "features" / f"{kebab}.feature")
|
|
1097
|
+
existing_feature = _read_file(feature_path)
|
|
1098
|
+
new_feature = _merge_feature_file(new_feature, existing_feature)
|
|
1099
|
+
|
|
1100
|
+
files = {
|
|
1101
|
+
f"src/locators/{kebab}.locators.ts": new_locators,
|
|
1102
|
+
f"src/pages/{kebab}.page.ts": new_page_object,
|
|
1103
|
+
f"src/test/steps/{kebab}.steps.ts": new_step_defs,
|
|
1104
|
+
f"src/test/features/{kebab}.feature": new_feature,
|
|
1105
|
+
"config/cucumber.js (add this profile)": _gen_cucumber_profile(kebab, app_name, auth_hook),
|
|
1106
|
+
}
|
|
1107
|
+
|
|
1108
|
+
layers = ["LocatorHealer (selector → role → label → text → AI Vision)"]
|
|
1109
|
+
if enable_timing_healing:
|
|
1110
|
+
layers.append("TimingHealer (network trace drift → auto-adjusted timeouts → HealingDashboard)")
|
|
1111
|
+
if enable_visual_regression:
|
|
1112
|
+
layers.append("VisualIntentChecker (element screenshot diff against baseline → HealingDashboard)")
|
|
1113
|
+
|
|
1114
|
+
hint = (
|
|
1115
|
+
"context_map was provided — locators.ts uses accessibility-tree-verified selectors."
|
|
1116
|
+
if context_map else
|
|
1117
|
+
"No context_map provided — locators.ts uses Gherkin inference (stability=0). "
|
|
1118
|
+
"Run Playwright MCP (npx @playwright/mcp@latest --port 8931) and use "
|
|
1119
|
+
"browser_navigate + browser_snapshot to build a verified context_map."
|
|
1120
|
+
)
|
|
1121
|
+
|
|
1122
|
+
merge_note = (
|
|
1123
|
+
f"\n✓ Helix-QA Integration: Files prepared for {helix_project_root}\n"
|
|
1124
|
+
" • Locators merged (no duplicates, conflicts renamed)\n"
|
|
1125
|
+
" • Page objects merged (new methods appended)\n"
|
|
1126
|
+
" • Step definitions merged (new steps appended)\n"
|
|
1127
|
+
" • Feature file merged (new scenarios appended)\n"
|
|
1128
|
+
" Ready to write to disk using write-helix-files or agent 4."
|
|
1129
|
+
) if helix_project_root else ""
|
|
1130
|
+
|
|
1131
|
+
# Run pre-output validation across all three checks
|
|
1132
|
+
validation = _run_pre_output_validation(
|
|
1133
|
+
files,
|
|
1134
|
+
f"src/test/features/{kebab}.feature",
|
|
1135
|
+
f"src/test/steps/{kebab}.steps.ts",
|
|
1136
|
+
kebab,
|
|
1137
|
+
page_class,
|
|
1138
|
+
)
|
|
1139
|
+
if normalization_warnings:
|
|
1140
|
+
validation["warnings"] = normalization_warnings + validation.get("warnings", [])
|
|
1141
|
+
validation["normalization_warnings"] = normalization_warnings
|
|
1142
|
+
|
|
1143
|
+
return {
|
|
1144
|
+
"files": files, "file_count": len(files),
|
|
1145
|
+
"page_class": page_class, "feature_title": feature_title,
|
|
1146
|
+
"step_count": len(all_steps), "healing_strategy": healing_strategy,
|
|
1147
|
+
"healing_layers": layers,
|
|
1148
|
+
"locator_source": locator_source,
|
|
1149
|
+
"validation": validation,
|
|
1150
|
+
"note": hint + merge_note,
|
|
1151
|
+
"scaffold_note": "Run scaffold_locator_repository once per project to generate all 6 infrastructure files.",
|
|
1152
|
+
}
|
|
1153
|
+
|
|
1154
|
+
|
|
1155
|
+
def _gen_locators(page_class, kebab, gherkin, context_map=None):
|
|
1156
|
+
if context_map and context_map.get("locators"):
|
|
1157
|
+
# context_map from Playwright MCP snapshot: list of { key, selector, intent, stability, visualIntent? }
|
|
1158
|
+
cdp_entries = _gen_locators_from_context_map(context_map)
|
|
1159
|
+
elif context_map and isinstance(context_map, dict) and any(
|
|
1160
|
+
isinstance(v, dict) and "selector" in v for v in context_map.values()
|
|
1161
|
+
):
|
|
1162
|
+
# context_map as flat dict: { key: { selector, intent, stability, visualIntent? } }
|
|
1163
|
+
cdp_entries = _gen_locators_from_flat_map(context_map)
|
|
1164
|
+
else:
|
|
1165
|
+
cdp_entries = None
|
|
1166
|
+
|
|
1167
|
+
if cdp_entries is None:
|
|
1168
|
+
extras = ""
|
|
1169
|
+
for k, t, i in _extract_element_hints(gherkin):
|
|
1170
|
+
extras += f" {k}: {{ selector: \"[data-testid='{t}']\", intent: '{i}', visualIntent: true }},\n"
|
|
1171
|
+
source_note = (
|
|
1172
|
+
" *\n"
|
|
1173
|
+
" * ⚠ GHERKIN-INFERRED: These selectors are placeholder data-testid values.\n"
|
|
1174
|
+
" * Run Playwright MCP (npx @playwright/mcp@latest --port 8931) and use\n"
|
|
1175
|
+
" * browser_navigate + browser_snapshot to replace them with real selectors."
|
|
1176
|
+
)
|
|
1177
|
+
else:
|
|
1178
|
+
extras = cdp_entries
|
|
1179
|
+
source_note = (
|
|
1180
|
+
" *\n"
|
|
1181
|
+
" * SOURCE: Playwright MCP accessibility tree snapshot.\n"
|
|
1182
|
+
" * Stability rank: data-testid(100) > aria-role+name(90) > id(80) > aria-label(70) > placeholder(60).\n"
|
|
1183
|
+
" * All selectors derived from real AX nodes — zero hallucinated values."
|
|
1184
|
+
)
|
|
1185
|
+
|
|
1186
|
+
return f'''/**
|
|
1187
|
+
* Locators: {page_class}Page
|
|
1188
|
+
* Pattern: {{ selector, intent, stability?, visualIntent? }}
|
|
1189
|
+
* selector: CSS selector derived from accessibility tree (or Gherkin inference)
|
|
1190
|
+
* intent: used by LocatorHealer (AI Vision fallback) and VisualIntentChecker
|
|
1191
|
+
* stability: 0–100 confidence score (100 = data-testid, 0 = gherkin-inferred)
|
|
1192
|
+
* visualIntent: when true, VisualIntentChecker screenshots this element at assertions{source_note}
|
|
1193
|
+
*/
|
|
1194
|
+
export const {page_class}Locators = {{
|
|
1195
|
+
pageContainer: {{ selector: "[data-testid='{kebab}-container']", intent: '{page_class} main page container', stability: 0 }},
|
|
1196
|
+
primaryActionBtn: {{ selector: "[data-testid='{kebab}-action-btn']", intent: 'primary action button', stability: 0, visualIntent: true }},
|
|
1197
|
+
submitBtn: {{ selector: "[data-testid='{kebab}-submit']", intent: 'submit form button', stability: 0, visualIntent: true }},
|
|
1198
|
+
confirmBtn: {{ selector: "[data-testid='{kebab}-confirm']", intent: 'confirm action button', stability: 0, visualIntent: true }},
|
|
1199
|
+
cancelBtn: {{ selector: "[data-testid='{kebab}-cancel']", intent: 'cancel or close button', stability: 0 }},
|
|
1200
|
+
saveBtn: {{ selector: "[data-testid='{kebab}-save']", intent: 'save changes button', stability: 0, visualIntent: true }},
|
|
1201
|
+
primaryInputField: {{ selector: "[data-testid='{kebab}-primary-input']", intent: 'primary text input', stability: 0 }},
|
|
1202
|
+
fileInput: {{ selector: "input[type='file']", intent: 'file upload input', stability: 70 }},
|
|
1203
|
+
successToast: {{ selector: "[data-testid='toast-success']", intent: 'success notification toast', stability: 0, visualIntent: true }},
|
|
1204
|
+
errorToast: {{ selector: "[data-testid='toast-error']", intent: 'error notification toast', stability: 0, visualIntent: true }},
|
|
1205
|
+
validationError: {{ selector: "[data-testid='validation-error']", intent: 'form validation error message', stability: 0, visualIntent: true }},
|
|
1206
|
+
successIndicator: {{ selector: "[data-testid='{kebab}-success']", intent: 'success state indicator', stability: 0, visualIntent: true }},
|
|
1207
|
+
updatedIndicator: {{ selector: "[data-testid='{kebab}-updated']", intent: 'updated state indicator', stability: 0 }},
|
|
1208
|
+
{extras}}} as const;
|
|
1209
|
+
export type LocatorKey = keyof typeof {page_class}Locators;
|
|
1210
|
+
'''
|
|
1211
|
+
|
|
1212
|
+
|
|
1213
|
+
def _gen_locators_from_context_map(context_map: dict) -> str:
|
|
1214
|
+
"""Convert context_map with a 'locators' list (from Playwright MCP snapshot parsing)."""
|
|
1215
|
+
lines = []
|
|
1216
|
+
for loc in context_map.get("locators", []):
|
|
1217
|
+
key = loc["key"]
|
|
1218
|
+
selector = loc["selector"]
|
|
1219
|
+
intent = loc["intent"]
|
|
1220
|
+
stability = loc.get("stability", 0)
|
|
1221
|
+
vi = ", visualIntent: true" if loc.get("visualIntent") else ""
|
|
1222
|
+
lines.append(
|
|
1223
|
+
f" // stability={stability}\n"
|
|
1224
|
+
f" {key}: {{ selector: \"{selector}\", intent: '{intent}', stability: {stability}{vi} }},"
|
|
1225
|
+
)
|
|
1226
|
+
return "\n".join(lines) + "\n"
|
|
1227
|
+
|
|
1228
|
+
|
|
1229
|
+
def _gen_locators_from_flat_map(context_map: dict) -> str:
|
|
1230
|
+
"""Convert context_map as flat dict { key: { selector, intent, stability, visualIntent? } }."""
|
|
1231
|
+
lines = []
|
|
1232
|
+
for key, entry in context_map.items():
|
|
1233
|
+
if not isinstance(entry, dict) or "selector" not in entry:
|
|
1234
|
+
continue
|
|
1235
|
+
selector = entry["selector"]
|
|
1236
|
+
intent = entry.get("intent", key)
|
|
1237
|
+
stability = entry.get("stability", 0)
|
|
1238
|
+
vi = ", visualIntent: true" if entry.get("visualIntent") else ""
|
|
1239
|
+
lines.append(
|
|
1240
|
+
f" // stability={stability}\n"
|
|
1241
|
+
f" {key}: {{ selector: \"{selector}\", intent: '{intent}', stability: {stability}{vi} }},"
|
|
1242
|
+
)
|
|
1243
|
+
return "\n".join(lines) + "\n"
|
|
1244
|
+
|
|
1245
|
+
|
|
1246
|
+
def _gen_page_object(page_class, kebab, camel, gherkin, healing_strategy,
|
|
1247
|
+
enable_visual_regression, enable_timing_healing):
|
|
1248
|
+
steps = _parse_gherkin_steps(gherkin)["all_steps"]
|
|
1249
|
+
stubs = _build_method_stubs(camel, steps)
|
|
1250
|
+
vi_import = 'import { VisualIntentChecker } from "@utils/locators/VisualIntentChecker";' if enable_visual_regression else ""
|
|
1251
|
+
th_import = 'import { TimingHealer } from "@utils/locators/TimingHealer";' if enable_timing_healing else ""
|
|
1252
|
+
vi_field = "private readonly visual: VisualIntentChecker;" if enable_visual_regression else ""
|
|
1253
|
+
th_field = "private readonly timing: TimingHealer;" if enable_timing_healing else ""
|
|
1254
|
+
vi_init = "this.visual = new VisualIntentChecker(this.page, fixture().logger, this.repo);" if enable_visual_regression else ""
|
|
1255
|
+
th_init = "this.timing = new TimingHealer(this.page, fixture().logger, this.repo);" if enable_timing_healing else ""
|
|
1256
|
+
tn = ' await this.timing.waitForNetworkIdle("navigate");' if enable_timing_healing else ""
|
|
1257
|
+
ts_ = ' await this.timing.waitForNetworkIdle("submitForm");' if enable_timing_healing else ""
|
|
1258
|
+
vc = ' await this.visual.check("successToast", this.loc.successToast.selector, this.loc.successToast.intent);' if enable_visual_regression else ""
|
|
1259
|
+
|
|
1260
|
+
return f'''import {{ Page, expect }} from "@playwright/test";
|
|
1261
|
+
import {{ fixture }} from "@hooks/pageFixture";
|
|
1262
|
+
import {{ {page_class}Locators }} from "./locators";
|
|
1263
|
+
import {{ LocatorHealer }} from "@utils/locators/LocatorHealer";
|
|
1264
|
+
import {{ LocatorRepository }} from "@utils/locators/LocatorRepository";
|
|
1265
|
+
{vi_import}
|
|
1266
|
+
{th_import}
|
|
1267
|
+
import {{ EnvironmentManager }} from "@helper/environment/environmentManager.util";
|
|
1268
|
+
|
|
1269
|
+
/**
|
|
1270
|
+
* {page_class}Page — Three-Layer Self-Healing Page Object
|
|
1271
|
+
*
|
|
1272
|
+
* Layer 1 — Locator Healing ({healing_strategy}):
|
|
1273
|
+
* primary selector → role-based → label-based → text-based → playwright-cli + Claude AI Vision
|
|
1274
|
+
* → DevToolsHealer AX tree (CDPSession) → DevToolsHealer bounding box (CDPSession)
|
|
1275
|
+
* Healed selectors persisted in LocatorRepository — zero overhead on repeat runs.
|
|
1276
|
+
*
|
|
1277
|
+
{" * Layer 2 — Timing Healing (TimingHealer): intercepts network traces, auto-heals timeout drift." if enable_timing_healing else ""}
|
|
1278
|
+
{" *" if enable_timing_healing else ""}
|
|
1279
|
+
{" * Layer 3 — Visual Intent (VisualIntentChecker): element screenshot diff at assertions." if enable_visual_regression else ""}
|
|
1280
|
+
{" *" if enable_visual_regression else ""}
|
|
1281
|
+
{" * HealingDashboard: http://localhost:7890 — approve/reject all AI suggestions." if (enable_timing_healing or enable_visual_regression) else ""}
|
|
1282
|
+
*
|
|
1283
|
+
* RULE: Never call page.click / page.fill / page.locator directly.
|
|
1284
|
+
*/
|
|
1285
|
+
export default class {page_class}Page {{
|
|
1286
|
+
private readonly page: Page;
|
|
1287
|
+
private readonly healer: LocatorHealer;
|
|
1288
|
+
private readonly repo: LocatorRepository;
|
|
1289
|
+
private readonly loc = {page_class}Locators;
|
|
1290
|
+
private readonly env: EnvironmentManager;
|
|
1291
|
+
{vi_field}
|
|
1292
|
+
{th_field}
|
|
1293
|
+
|
|
1294
|
+
constructor(page?: Page) {{
|
|
1295
|
+
this.page = page ?? fixture().page;
|
|
1296
|
+
this.env = new EnvironmentManager();
|
|
1297
|
+
this.repo = fixture().locatorRepository ?? new LocatorRepository();
|
|
1298
|
+
this.healer = new LocatorHealer(this.page, fixture().logger, this.repo);
|
|
1299
|
+
{vi_init}
|
|
1300
|
+
{th_init}
|
|
1301
|
+
Object.entries(this.loc).forEach(([key, val]) =>
|
|
1302
|
+
this.repo.register(key, val.selector, val.intent));
|
|
1303
|
+
}}
|
|
1304
|
+
|
|
1305
|
+
async navigate(): Promise<void> {{
|
|
1306
|
+
const url = `${{this.env.getBaseUrl()}}/${{this.env.getPath("{kebab}")}}`;
|
|
1307
|
+
fixture().logger.info(`Navigating to ${{url}}`);
|
|
1308
|
+
await this.page.goto(url, {{ waitUntil: "networkidle" }});
|
|
1309
|
+
{tn}
|
|
1310
|
+
await this.healer.assertVisibleWithHealing(
|
|
1311
|
+
"pageContainer", this.loc.pageContainer.selector, this.loc.pageContainer.intent);
|
|
1312
|
+
}}
|
|
1313
|
+
|
|
1314
|
+
async waitForPageLoad(): Promise<void> {{
|
|
1315
|
+
await this.healer.assertVisibleWithHealing(
|
|
1316
|
+
"pageContainer", this.loc.pageContainer.selector, this.loc.pageContainer.intent);
|
|
1317
|
+
}}
|
|
1318
|
+
|
|
1319
|
+
async submitForm(): Promise<void> {{
|
|
1320
|
+
await this.healer.clickWithHealing(
|
|
1321
|
+
"submitBtn", this.loc.submitBtn.selector, this.loc.submitBtn.intent);
|
|
1322
|
+
{ts_}
|
|
1323
|
+
}}
|
|
1324
|
+
|
|
1325
|
+
async confirmAction(): Promise<void> {{
|
|
1326
|
+
await this.healer.clickWithHealing(
|
|
1327
|
+
"confirmBtn", this.loc.confirmBtn.selector, this.loc.confirmBtn.intent);
|
|
1328
|
+
}}
|
|
1329
|
+
|
|
1330
|
+
async clickCancel(): Promise<void> {{
|
|
1331
|
+
await this.healer.clickWithHealing(
|
|
1332
|
+
"cancelBtn", this.loc.cancelBtn.selector, this.loc.cancelBtn.intent);
|
|
1333
|
+
}}
|
|
1334
|
+
|
|
1335
|
+
async clickSave(): Promise<void> {{
|
|
1336
|
+
await this.healer.clickWithHealing(
|
|
1337
|
+
"saveBtn", this.loc.saveBtn.selector, this.loc.saveBtn.intent);
|
|
1338
|
+
}}
|
|
1339
|
+
|
|
1340
|
+
async fillPrimaryInput(value: string): Promise<void> {{
|
|
1341
|
+
await this.healer.fillWithHealing(
|
|
1342
|
+
"primaryInputField", this.loc.primaryInputField.selector, value, this.loc.primaryInputField.intent);
|
|
1343
|
+
}}
|
|
1344
|
+
|
|
1345
|
+
async uploadFile(buffer: Buffer, fileName: string, mimeType: string): Promise<void> {{
|
|
1346
|
+
await this.page.locator(this.loc.fileInput.selector).setInputFiles({{ name: fileName, mimeType, buffer }});
|
|
1347
|
+
}}
|
|
1348
|
+
|
|
1349
|
+
async verifySuccessToast(): Promise<void> {{
|
|
1350
|
+
await this.healer.assertVisibleWithHealing(
|
|
1351
|
+
"successToast", this.loc.successToast.selector, this.loc.successToast.intent);
|
|
1352
|
+
{vc}
|
|
1353
|
+
fixture().logger.info("✓ Success toast verified (locator + visual)");
|
|
1354
|
+
}}
|
|
1355
|
+
|
|
1356
|
+
async verifyErrorToast(): Promise<void> {{
|
|
1357
|
+
await this.healer.assertVisibleWithHealing(
|
|
1358
|
+
"errorToast", this.loc.errorToast.selector, this.loc.errorToast.intent);
|
|
1359
|
+
fixture().logger.info("✓ Error toast verified");
|
|
1360
|
+
}}
|
|
1361
|
+
|
|
1362
|
+
async verifyValidationErrors(): Promise<void> {{
|
|
1363
|
+
await this.healer.assertVisibleWithHealing(
|
|
1364
|
+
"validationError", this.loc.validationError.selector, this.loc.validationError.intent);
|
|
1365
|
+
const count = await this.page.locator(this.loc.validationError.selector).count();
|
|
1366
|
+
expect(count).toBeGreaterThan(0);
|
|
1367
|
+
}}
|
|
1368
|
+
|
|
1369
|
+
async verifySuccessState(): Promise<void> {{
|
|
1370
|
+
await this.healer.assertVisibleWithHealing(
|
|
1371
|
+
"successIndicator", this.loc.successIndicator.selector, this.loc.successIndicator.intent);
|
|
1372
|
+
}}
|
|
1373
|
+
|
|
1374
|
+
async verifyStatePersisted(): Promise<void> {{
|
|
1375
|
+
await this.healer.assertVisibleWithHealing(
|
|
1376
|
+
"updatedIndicator", this.loc.updatedIndicator.selector, this.loc.updatedIndicator.intent);
|
|
1377
|
+
}}
|
|
1378
|
+
|
|
1379
|
+
async reloadAndVerify(): Promise<void> {{
|
|
1380
|
+
await this.page.reload({{ waitUntil: "networkidle" }});
|
|
1381
|
+
await this.verifyStatePersisted();
|
|
1382
|
+
}}
|
|
1383
|
+
|
|
1384
|
+
{stubs}
|
|
1385
|
+
}}
|
|
1386
|
+
'''
|
|
1387
|
+
|
|
1388
|
+
|
|
1389
|
+
def _build_method_stubs(camel, steps):
|
|
1390
|
+
stubs, seen = [], set()
|
|
1391
|
+
for step in steps:
|
|
1392
|
+
lower = step.lower()
|
|
1393
|
+
if "refresh" in lower and "refresh" not in seen:
|
|
1394
|
+
seen.add("refresh")
|
|
1395
|
+
stubs.append(''' async refreshPage(): Promise<void> {
|
|
1396
|
+
await this.page.reload({ waitUntil: "networkidle" });
|
|
1397
|
+
await this.waitForPageLoad();
|
|
1398
|
+
}''')
|
|
1399
|
+
if "initials" in lower and "initials" not in seen:
|
|
1400
|
+
seen.add("initials")
|
|
1401
|
+
stubs.append(''' async verifyInitialsAvatar(): Promise<void> {
|
|
1402
|
+
const sel = (this.loc as any).initialsAvatar?.selector ?? "[data-testid='initials-avatar']";
|
|
1403
|
+
const el = this.page.locator(sel);
|
|
1404
|
+
await expect(el).toBeVisible({ timeout: 10_000 });
|
|
1405
|
+
const text = await el.textContent() ?? "";
|
|
1406
|
+
expect(text.trim()).toMatch(/^[A-Z]{1,2}$/);
|
|
1407
|
+
fixture().logger.info(`✓ Initials: "${text.trim()}"`);
|
|
1408
|
+
}''')
|
|
1409
|
+
if "persist" in lower and "persist" not in seen:
|
|
1410
|
+
seen.add("persist")
|
|
1411
|
+
stubs.append(''' async verifyDataPersistenceAfterRefresh(): Promise<void> {
|
|
1412
|
+
await this.page.reload({ waitUntil: "networkidle" });
|
|
1413
|
+
await this.verifyStatePersisted();
|
|
1414
|
+
fixture().logger.info("✓ State persisted after refresh");
|
|
1415
|
+
}''')
|
|
1416
|
+
return "\n\n".join(stubs)
|
|
1417
|
+
|
|
1418
|
+
|
|
1419
|
+
def _gen_step_defs(page_class, kebab, camel, all_steps, auth_hook):
|
|
1420
|
+
bg = ""
|
|
1421
|
+
if auth_hook == "microsoft-sso":
|
|
1422
|
+
bg = f'''
|
|
1423
|
+
Given(
|
|
1424
|
+
/^user logs in with Microsoft SSO as "(.+)"$/,
|
|
1425
|
+
async function (userType: string): Promise<void> {{
|
|
1426
|
+
fixture().logger.info(`SSO login as ${{userType}}`);
|
|
1427
|
+
}},
|
|
1428
|
+
);
|
|
1429
|
+
Given(
|
|
1430
|
+
/^user of type "(.+)" is ready to login$/,
|
|
1431
|
+
async function (userType: string): Promise<void> {{
|
|
1432
|
+
fixture().logger.info(`Preparing login for ${{userType}}`);
|
|
1433
|
+
}},
|
|
1434
|
+
);
|
|
1435
|
+
'''
|
|
1436
|
+
mappings = [
|
|
1437
|
+
(["navigates to", "is on the"], f"{camel}Page.navigate()"),
|
|
1438
|
+
(["submits the form", "clicks submit"], f"{camel}Page.submitForm()"),
|
|
1439
|
+
(["confirms the action"], f"{camel}Page.confirmAction()"),
|
|
1440
|
+
(["clicks cancel", "clicks close"], f"{camel}Page.clickCancel()"),
|
|
1441
|
+
(["clicks save"], f"{camel}Page.clickSave()"),
|
|
1442
|
+
(["refreshes the page"], f"{camel}Page.refreshPage()"),
|
|
1443
|
+
(["selects a valid file", "uploads a valid"], f"{camel}Page.uploadFile(Buffer.alloc(5*1024*1024),'test.jpg','image/jpeg')"),
|
|
1444
|
+
(["exceeds the maximum", "oversized"], f"{camel}Page.uploadFile(Buffer.alloc(5*1024*1024+1),'over.jpg','image/jpeg')"),
|
|
1445
|
+
(["success toast", "success notification"], f"{camel}Page.verifySuccessToast()"),
|
|
1446
|
+
(["error toast", "error notification"], f"{camel}Page.verifyErrorToast()"),
|
|
1447
|
+
(["validation error"], f"{camel}Page.verifyValidationErrors()"),
|
|
1448
|
+
(["operation completes", "updated state"], f"{camel}Page.verifySuccessState()"),
|
|
1449
|
+
(["still be visible", "persists"], f"{camel}Page.verifyStatePersisted()"),
|
|
1450
|
+
(["initials", "fallback avatar"], f"{camel}Page.verifyInitialsAvatar()"),
|
|
1451
|
+
]
|
|
1452
|
+
lines, seen = [], set()
|
|
1453
|
+
for step in all_steps:
|
|
1454
|
+
m = re.match(r"^(Given|When|Then|And|But)\s+", step)
|
|
1455
|
+
if not m: continue
|
|
1456
|
+
kw, text = m.group(1), step[len(m.group(0)):]
|
|
1457
|
+
norm = re.sub(r"\s+", " ", text.lower())
|
|
1458
|
+
if norm in seen: continue
|
|
1459
|
+
seen.add(norm)
|
|
1460
|
+
lower = step.lower()
|
|
1461
|
+
method = next((mth for kws, mth in mappings if any(k in lower for k in kws)), None)
|
|
1462
|
+
if not method: continue
|
|
1463
|
+
pat = re.sub(r'"[^"]+"', '(.+)', re.escape(text)).replace(r"\ ", " ")
|
|
1464
|
+
lines.append(f'''{kw}(
|
|
1465
|
+
/^{pat}$/,
|
|
1466
|
+
async function (): Promise<void> {{
|
|
1467
|
+
await {method};
|
|
1468
|
+
}},
|
|
1469
|
+
);
|
|
1470
|
+
''')
|
|
1471
|
+
return f'''import {{ Given, When, Then }} from "@cucumber/cucumber";
|
|
1472
|
+
import {{ expect }} from "@playwright/test";
|
|
1473
|
+
import {{ fixture }} from "@hooks/pageFixture";
|
|
1474
|
+
import {page_class}Page from "@pages/{kebab}/{kebab}.page";
|
|
1475
|
+
|
|
1476
|
+
/**
|
|
1477
|
+
* Step Definitions: {page_class}
|
|
1478
|
+
* HealingDashboard: http://localhost:7890 — review pending suggestions.
|
|
1479
|
+
*/
|
|
1480
|
+
let {camel}Page: {page_class}Page;
|
|
1481
|
+
{bg}
|
|
1482
|
+
{"".join(lines)}
|
|
1483
|
+
'''
|
|
1484
|
+
|
|
1485
|
+
|
|
1486
|
+
def _gen_feature_file(gherkin: str, kebab: str, page_class: str) -> str:
|
|
1487
|
+
"""Generate feature file content. Returns the full feature file as-is from gherkin."""
|
|
1488
|
+
# The feature file is already in gherkin format, so we just return it
|
|
1489
|
+
# (This allows consume the gherkin directly without modification)
|
|
1490
|
+
return gherkin
|
|
1491
|
+
|
|
1492
|
+
|
|
1493
|
+
def _gen_cucumber_profile(kebab, app_name, auth_hook):
|
|
1494
|
+
profile = kebab.replace("-", "_")
|
|
1495
|
+
sso = '"src/test/steps/microsoftSSO.steps.ts",' if auth_hook == "microsoft-sso" else ""
|
|
1496
|
+
return f'''// Add to config/cucumber.js
|
|
1497
|
+
{profile}: {{
|
|
1498
|
+
tags: process.env.tags || "@{profile}",
|
|
1499
|
+
paths: ["src/test/features/{kebab}.feature"],
|
|
1500
|
+
require: ["src/test/steps/{kebab}.steps.ts", {sso} "src/hooks/hooks.ts"],
|
|
1501
|
+
requireModule: ["ts-node/register", "tsconfig-paths/register"],
|
|
1502
|
+
format: ["progress", "html:test-results/cucumber-report-{kebab}.html",
|
|
1503
|
+
"json:test-results/cucumber-report-{kebab}.json", "rerun:@rerun.txt"],
|
|
1504
|
+
parallel: 2, retry: 2, retryTagFilter: "@flaky",
|
|
1505
|
+
}},
|
|
1506
|
+
// Run: ENABLE_SELF_HEALING=true HEALING_DASHBOARD_PORT=7890 \\
|
|
1507
|
+
// cucumber-js --config=config/cucumber.js -p {profile}
|
|
1508
|
+
'''
|
|
1509
|
+
|
|
1510
|
+
|
|
1511
|
+
# ============================================================================
|
|
1512
|
+
# Post-generation validation
|
|
1513
|
+
# ============================================================================
|
|
1514
|
+
|
|
1515
|
+
_STEP_STOP_WORDS = frozenset([
|
|
1516
|
+
'a', 'an', 'the', 'is', 'are', 'has', 'have', 'to', 'be', 'with',
|
|
1517
|
+
'in', 'on', 'at', 'for', 'of', 'by', 'as', 'it', 'its', 'from',
|
|
1518
|
+
'and', 'or', 'not', 'no', 'true', 'false', 'user', 'page',
|
|
1519
|
+
])
|
|
1520
|
+
|
|
1521
|
+
|
|
1522
|
+
def _normalize_feature_steps(feature_content: str, step_defs_content: str) -> tuple:
|
|
1523
|
+
"""Auto-quote unquoted string parameters in feature file steps.
|
|
1524
|
+
|
|
1525
|
+
Builds compiled regex patterns from the generated step definitions, then for
|
|
1526
|
+
each feature step that does NOT already match any pattern, iteratively wraps
|
|
1527
|
+
unquoted alphanumeric tokens in double-quotes until a match is found.
|
|
1528
|
+
|
|
1529
|
+
Returns (normalized_content: str, warnings: list[str]).
|
|
1530
|
+
"""
|
|
1531
|
+
warnings: list = []
|
|
1532
|
+
|
|
1533
|
+
# Compile step-definition regex patterns
|
|
1534
|
+
patterns: list = []
|
|
1535
|
+
for line in step_defs_content.splitlines():
|
|
1536
|
+
m = re.search(r'/\^(.+?)\$/', line)
|
|
1537
|
+
if m:
|
|
1538
|
+
pat_str = m.group(1).replace(r'\ ', r'\s+')
|
|
1539
|
+
try:
|
|
1540
|
+
patterns.append(re.compile(r'^' + pat_str + r'$', re.IGNORECASE))
|
|
1541
|
+
except re.error:
|
|
1542
|
+
pass
|
|
1543
|
+
|
|
1544
|
+
if not patterns:
|
|
1545
|
+
return feature_content, warnings
|
|
1546
|
+
|
|
1547
|
+
kws = ('Given', 'When', 'Then', 'And', 'But')
|
|
1548
|
+
result_lines: list = []
|
|
1549
|
+
|
|
1550
|
+
for line in feature_content.splitlines():
|
|
1551
|
+
stripped = line.strip()
|
|
1552
|
+
kw = next((k for k in kws if stripped.startswith(k + ' ')), None)
|
|
1553
|
+
if not kw:
|
|
1554
|
+
result_lines.append(line)
|
|
1555
|
+
continue
|
|
1556
|
+
|
|
1557
|
+
indent = line[: len(line) - len(line.lstrip())]
|
|
1558
|
+
text = stripped[len(kw) + 1:]
|
|
1559
|
+
|
|
1560
|
+
# Already matches a pattern — keep as-is
|
|
1561
|
+
if any(p.match(text) for p in patterns):
|
|
1562
|
+
result_lines.append(line)
|
|
1563
|
+
continue
|
|
1564
|
+
|
|
1565
|
+
# Iteratively quote the first unquoted token that produces a match
|
|
1566
|
+
fixed = text
|
|
1567
|
+
for _ in range(5): # at most 5 params per step
|
|
1568
|
+
replaced = False
|
|
1569
|
+
for tm in re.finditer(r'(?<!["\'\w])([A-Za-z][A-Za-z0-9_\-]+)(?!["\'\w])', fixed):
|
|
1570
|
+
tok = tm.group(1)
|
|
1571
|
+
if tok.lower() in _STEP_STOP_WORDS:
|
|
1572
|
+
continue
|
|
1573
|
+
candidate = fixed[: tm.start(1)] + f'"{tok}"' + fixed[tm.end(1):]
|
|
1574
|
+
if any(p.match(candidate) for p in patterns):
|
|
1575
|
+
warnings.append(
|
|
1576
|
+
f"Auto-quoted '{tok}' in step: \"{kw} {text}\" → \"{kw} {candidate}\""
|
|
1577
|
+
)
|
|
1578
|
+
fixed = candidate
|
|
1579
|
+
replaced = True
|
|
1580
|
+
break
|
|
1581
|
+
if not replaced:
|
|
1582
|
+
break
|
|
1583
|
+
|
|
1584
|
+
result_lines.append(f"{indent}{kw} {fixed}")
|
|
1585
|
+
|
|
1586
|
+
return '\n'.join(result_lines), warnings
|
|
1587
|
+
|
|
1588
|
+
|
|
1589
|
+
def _check_import_consistency(files: dict, kebab: str, page_class: str) -> list:
|
|
1590
|
+
"""Verify internal imports between generated .ts files resolve to known file keys.
|
|
1591
|
+
|
|
1592
|
+
Maps Helix-QA tsconfig path aliases and relative paths to expected keys in the
|
|
1593
|
+
files dict. Reports any import whose resolved target is absent.
|
|
1594
|
+
|
|
1595
|
+
Returns list of issue strings.
|
|
1596
|
+
"""
|
|
1597
|
+
issues: list = []
|
|
1598
|
+
|
|
1599
|
+
alias_map: dict = {
|
|
1600
|
+
f"@pages/{kebab}/{kebab}.page": f"src/pages/{kebab}.page.ts",
|
|
1601
|
+
f"@pages/{kebab}.page": f"src/pages/{kebab}.page.ts",
|
|
1602
|
+
"./locators": f"src/locators/{kebab}.locators.ts",
|
|
1603
|
+
f"../locators/{kebab}.locators": f"src/locators/{kebab}.locators.ts",
|
|
1604
|
+
f"@locators/{kebab}.locators": f"src/locators/{kebab}.locators.ts",
|
|
1605
|
+
}
|
|
1606
|
+
internal_prefixes = ('./', '../', '@pages/', '@locators/')
|
|
1607
|
+
|
|
1608
|
+
for file_key, content in files.items():
|
|
1609
|
+
if not file_key.endswith('.ts'):
|
|
1610
|
+
continue
|
|
1611
|
+
for m in re.finditer(r"""from\s+['"]([^'"]+)['"]""", content):
|
|
1612
|
+
path = m.group(1)
|
|
1613
|
+
if not any(path.startswith(p) for p in internal_prefixes):
|
|
1614
|
+
continue
|
|
1615
|
+
if not path.strip():
|
|
1616
|
+
issues.append(f"{file_key}: empty import path detected")
|
|
1617
|
+
continue
|
|
1618
|
+
if path in alias_map:
|
|
1619
|
+
target = alias_map[path]
|
|
1620
|
+
if target not in files:
|
|
1621
|
+
issues.append(
|
|
1622
|
+
f"{file_key}: import '{path}' → expected '{target}' not in generated files"
|
|
1623
|
+
)
|
|
1624
|
+
|
|
1625
|
+
return issues
|
|
1626
|
+
|
|
1627
|
+
|
|
1628
|
+
def _validate_ts_syntax(content: str, file_name: str) -> list:
|
|
1629
|
+
"""Basic TypeScript syntax sanity checks.
|
|
1630
|
+
|
|
1631
|
+
Strips string literals, template literals, and comments before counting
|
|
1632
|
+
brace/paren balance to avoid false positives from characters inside strings.
|
|
1633
|
+
|
|
1634
|
+
Returns list of error strings.
|
|
1635
|
+
"""
|
|
1636
|
+
errors: list = []
|
|
1637
|
+
|
|
1638
|
+
# Strip literals and comments before balance checks
|
|
1639
|
+
s = re.sub(r'`[^`]*`', '``', content)
|
|
1640
|
+
s = re.sub(r'"[^"\n]*"', '""', s)
|
|
1641
|
+
s = re.sub(r"'[^'\n]*'", "''", s)
|
|
1642
|
+
s = re.sub(r'//[^\n]*', '', s)
|
|
1643
|
+
s = re.sub(r'/\*.*?\*/', '', s, flags=re.DOTALL)
|
|
1644
|
+
|
|
1645
|
+
nb = s.count('{') - s.count('}')
|
|
1646
|
+
if nb != 0:
|
|
1647
|
+
errors.append(f"{file_name}: unbalanced braces (delta={nb:+d})")
|
|
1648
|
+
|
|
1649
|
+
np_ = s.count('(') - s.count(')')
|
|
1650
|
+
if np_ != 0:
|
|
1651
|
+
errors.append(f"{file_name}: unbalanced parentheses (delta={np_:+d})")
|
|
1652
|
+
|
|
1653
|
+
if re.search(r"""from\s+['"]{2}""", content):
|
|
1654
|
+
errors.append(f"{file_name}: empty import path (from \"\")")
|
|
1655
|
+
|
|
1656
|
+
if ';;' in content:
|
|
1657
|
+
errors.append(f"{file_name}: double semicolons found")
|
|
1658
|
+
|
|
1659
|
+
return errors
|
|
1660
|
+
|
|
1661
|
+
|
|
1662
|
+
def _validate_step_coverage(feature_content: str, step_defs_content: str) -> list:
|
|
1663
|
+
"""Return feature steps that have no matching step definition regex pattern.
|
|
1664
|
+
|
|
1665
|
+
Returns list of uncovered step strings (keyword included).
|
|
1666
|
+
"""
|
|
1667
|
+
patterns: list = []
|
|
1668
|
+
for line in step_defs_content.splitlines():
|
|
1669
|
+
m = re.search(r'/\^(.+?)\$/', line)
|
|
1670
|
+
if m:
|
|
1671
|
+
pat_str = m.group(1).replace(r'\ ', r'\s+')
|
|
1672
|
+
try:
|
|
1673
|
+
patterns.append(re.compile(r'^' + pat_str + r'$', re.IGNORECASE))
|
|
1674
|
+
except re.error:
|
|
1675
|
+
pass
|
|
1676
|
+
|
|
1677
|
+
if not patterns:
|
|
1678
|
+
return []
|
|
1679
|
+
|
|
1680
|
+
kws = ('Given', 'When', 'Then', 'And', 'But')
|
|
1681
|
+
uncovered: list = []
|
|
1682
|
+
seen: set = set()
|
|
1683
|
+
|
|
1684
|
+
for line in feature_content.splitlines():
|
|
1685
|
+
stripped = line.strip()
|
|
1686
|
+
kw = next((k for k in kws if stripped.startswith(k + ' ')), None)
|
|
1687
|
+
if not kw:
|
|
1688
|
+
continue
|
|
1689
|
+
text = stripped[len(kw) + 1:]
|
|
1690
|
+
key = text.strip().lower()
|
|
1691
|
+
if key in seen:
|
|
1692
|
+
continue
|
|
1693
|
+
seen.add(key)
|
|
1694
|
+
if not any(p.match(text) for p in patterns):
|
|
1695
|
+
uncovered.append(f"{kw} {text}")
|
|
1696
|
+
|
|
1697
|
+
return uncovered
|
|
1698
|
+
|
|
1699
|
+
|
|
1700
|
+
def _run_pre_output_validation(
|
|
1701
|
+
files: dict, feature_key: str, step_key: str,
|
|
1702
|
+
kebab: str, page_class: str,
|
|
1703
|
+
) -> dict:
|
|
1704
|
+
"""Run all post-generation validations and return a consolidated summary.
|
|
1705
|
+
|
|
1706
|
+
Checks:
|
|
1707
|
+
1. Import consistency — internal @pages / ./ imports resolve to files dict keys.
|
|
1708
|
+
2. TypeScript syntax — brace/paren balance, empty imports, double semicolons.
|
|
1709
|
+
3. Step coverage — every unique feature step has a matching step definition.
|
|
1710
|
+
|
|
1711
|
+
Returns dict with keys: valid, errors, warnings, import_issues,
|
|
1712
|
+
syntax_issues, uncovered_steps.
|
|
1713
|
+
"""
|
|
1714
|
+
all_errors: list = []
|
|
1715
|
+
all_warnings: list = []
|
|
1716
|
+
|
|
1717
|
+
# 1 — Import consistency
|
|
1718
|
+
import_issues = _check_import_consistency(files, kebab, page_class)
|
|
1719
|
+
all_errors.extend(import_issues)
|
|
1720
|
+
|
|
1721
|
+
# 2 — TypeScript syntax per file
|
|
1722
|
+
syntax_issues: dict = {}
|
|
1723
|
+
for fk, fc in files.items():
|
|
1724
|
+
if fk.endswith('.ts'):
|
|
1725
|
+
errs = _validate_ts_syntax(fc, fk)
|
|
1726
|
+
if errs:
|
|
1727
|
+
syntax_issues[fk] = errs
|
|
1728
|
+
all_errors.extend(errs)
|
|
1729
|
+
|
|
1730
|
+
# 3 — Step coverage
|
|
1731
|
+
feature_content = files.get(feature_key, "")
|
|
1732
|
+
step_defs_content = files.get(step_key, "")
|
|
1733
|
+
uncovered = _validate_step_coverage(feature_content, step_defs_content)
|
|
1734
|
+
for step in uncovered:
|
|
1735
|
+
all_warnings.append(f"No step definition matches: '{step}'")
|
|
1736
|
+
|
|
1737
|
+
return {
|
|
1738
|
+
"valid": len(all_errors) == 0,
|
|
1739
|
+
"errors": all_errors,
|
|
1740
|
+
"warnings": all_warnings,
|
|
1741
|
+
"import_issues": import_issues,
|
|
1742
|
+
"syntax_issues": syntax_issues,
|
|
1743
|
+
"uncovered_steps": uncovered,
|
|
1744
|
+
}
|
|
1745
|
+
|
|
1746
|
+
|
|
1747
|
+
# ============================================================================
|
|
1748
|
+
# scaffold_locator_repository
|
|
1749
|
+
# ============================================================================
|
|
1750
|
+
|
|
1751
|
+
def _scaffold_locator_repository(
|
|
1752
|
+
output_dir, enable_ai_vision, repository_path,
|
|
1753
|
+
dashboard_port, enable_timing_healing, enable_visual_regression,
|
|
1754
|
+
enable_devtools_healer=True, ai_healing_provider="anthropic",
|
|
1755
|
+
):
|
|
1756
|
+
files = {
|
|
1757
|
+
f"{output_dir}/LocatorHealer.ts": _gen_locator_healer_cls(enable_ai_vision, enable_devtools_healer),
|
|
1758
|
+
f"{output_dir}/LocatorRepository.ts": _gen_locator_repo_cls(repository_path),
|
|
1759
|
+
}
|
|
1760
|
+
if enable_timing_healing:
|
|
1761
|
+
files[f"{output_dir}/TimingHealer.ts"] = _gen_timing_healer_cls(repository_path, dashboard_port)
|
|
1762
|
+
if enable_visual_regression:
|
|
1763
|
+
files[f"{output_dir}/VisualIntentChecker.ts"] = _gen_visual_checker_cls(repository_path, dashboard_port)
|
|
1764
|
+
if enable_devtools_healer:
|
|
1765
|
+
files[f"{output_dir}/DevToolsHealer.ts"] = _gen_devtools_healer_cls()
|
|
1766
|
+
files[f"{output_dir}/HealingDashboard.ts"] = _gen_healing_dashboard_cls(repository_path, dashboard_port)
|
|
1767
|
+
|
|
1768
|
+
strategies = [
|
|
1769
|
+
"1. Healed selector from LocatorRepository (zero overhead on repeat runs)",
|
|
1770
|
+
"2. Primary CSS/data-testid selector",
|
|
1771
|
+
"3. Role-based: page.getByRole() with intent keywords",
|
|
1772
|
+
"4. Label-based: page.getByLabel() from intent string",
|
|
1773
|
+
"5. Text-based: page.getByText() partial match",
|
|
1774
|
+
]
|
|
1775
|
+
if enable_ai_vision:
|
|
1776
|
+
strategies.append(
|
|
1777
|
+
"6. playwright-cli snapshot → AI Vision (provider: anthropic | copilot | claude-code) "
|
|
1778
|
+
"→ healed selector persisted [skip with ENABLE_AI_HEALING=false]"
|
|
1779
|
+
)
|
|
1780
|
+
if enable_devtools_healer:
|
|
1781
|
+
strategies.append("7. DevToolsHealer: CDPSession AX tree name match (no AI cost, no external process)")
|
|
1782
|
+
strategies.append("8. DevToolsHealer: CDPSession bounding box spatial search at last known coordinates")
|
|
1783
|
+
if enable_timing_healing:
|
|
1784
|
+
strategies.append(f"+ TimingHealer: network trace drift detection → auto-adjusted timeouts → HealingDashboard :{dashboard_port}")
|
|
1785
|
+
if enable_visual_regression:
|
|
1786
|
+
strategies.append(f"+ VisualIntentChecker: element screenshot diff → baseline comparison → HealingDashboard :{dashboard_port}")
|
|
1787
|
+
strategies.append(f"+ HealingDashboard: http://localhost:{dashboard_port} — review/approve all AI suggestions before commit")
|
|
1788
|
+
|
|
1789
|
+
return {
|
|
1790
|
+
"files": files,
|
|
1791
|
+
"output_dir": output_dir,
|
|
1792
|
+
"repository_path": repository_path,
|
|
1793
|
+
"dashboard_port": dashboard_port,
|
|
1794
|
+
"dashboard_url": f"http://localhost:{dashboard_port}",
|
|
1795
|
+
"healing_strategies": strategies,
|
|
1796
|
+
"healing_layers": {
|
|
1797
|
+
"locator": "LocatorHealer.ts — selector chain + AI Vision (anthropic|copilot|claude-code, ENABLE_AI_HEALING=false to skip)",
|
|
1798
|
+
"timing": f"TimingHealer.ts — network drift auto-healing {'enabled' if enable_timing_healing else 'disabled'}",
|
|
1799
|
+
"visual": f"VisualIntentChecker.ts — screenshot regression {'enabled' if enable_visual_regression else 'disabled'}",
|
|
1800
|
+
"devtools": f"DevToolsHealer.ts — CDPSession AX tree + bounding box healing {'enabled' if enable_devtools_healer else 'disabled'}",
|
|
1801
|
+
"dashboard": f"HealingDashboard.ts — http://localhost:{dashboard_port}",
|
|
1802
|
+
},
|
|
1803
|
+
"ai_healing_provider": ai_healing_provider if enable_ai_vision else "disabled",
|
|
1804
|
+
"env_vars": {
|
|
1805
|
+
"ENABLE_AI_HEALING": "Set to 'false' to skip strategy 6 without disabling strategies 7 & 8.",
|
|
1806
|
+
"AI_HEALING_PROVIDER": "Override provider at runtime: anthropic | copilot | claude-code.",
|
|
1807
|
+
"AI_HEALING_API_KEY": "Anthropic secret key — required when AI_HEALING_PROVIDER=anthropic.",
|
|
1808
|
+
"GITHUB_TOKEN": "GitHub Copilot token — required when AI_HEALING_PROVIDER=copilot.",
|
|
1809
|
+
"ANTHROPIC_API_KEY": "Set automatically by the claude CLI session (AI_HEALING_PROVIDER=claude-code).",
|
|
1810
|
+
},
|
|
1811
|
+
"note": (
|
|
1812
|
+
"DevToolsHealer uses page.context().newCDPSession(page) at test runtime — "
|
|
1813
|
+
"no external Chrome process or Playwright MCP connection needed during test execution."
|
|
1814
|
+
),
|
|
1815
|
+
}
|
|
1816
|
+
|
|
1817
|
+
|
|
1818
|
+
def _gen_locator_healer_cls(enable_ai_vision, enable_devtools_healer=True):
|
|
1819
|
+
ai_import = 'import { execSync } from "child_process";' if enable_ai_vision else ""
|
|
1820
|
+
cdp_import = 'import { DevToolsHealer } from "./DevToolsHealer";' if enable_devtools_healer else ""
|
|
1821
|
+
cdp_field = " private devtools!: DevToolsHealer;" if enable_devtools_healer else ""
|
|
1822
|
+
cdp_init = (" if ((this as any).devtools === undefined) "
|
|
1823
|
+
"(this as any).devtools = new DevToolsHealer(this.page, this.logger, this.repo);") if enable_devtools_healer else ""
|
|
1824
|
+
cdp_call = ("""
|
|
1825
|
+
// Strategy 7: DevToolsHealer — AX tree name match via CDPSession (no AI cost)
|
|
1826
|
+
const cdpAx = await this.devtools.findByAxName(key, intent, selector);
|
|
1827
|
+
if (cdpAx) return cdpAx;
|
|
1828
|
+
// Strategy 8: DevToolsHealer — bounding box spatial search via CDPSession
|
|
1829
|
+
const cdpBb = await this.devtools.findByBoundingBox(key, this.repo.getBBox(key), intent);
|
|
1830
|
+
if (cdpBb) return cdpBb;""") if enable_devtools_healer else ""
|
|
1831
|
+
|
|
1832
|
+
# ------------------------------------------------------------------
|
|
1833
|
+
# AI Vision method — emitted only when enable_ai_vision=True.
|
|
1834
|
+
#
|
|
1835
|
+
# Runtime guard: ENABLE_AI_HEALING=false skips strategy 6 entirely
|
|
1836
|
+
# so the full healing chain remains active in air-gapped / strict-CI
|
|
1837
|
+
# environments without disabling strategies 7 & 8.
|
|
1838
|
+
#
|
|
1839
|
+
# Provider routing via AI_HEALING_PROVIDER:
|
|
1840
|
+
# "anthropic" — direct Anthropic API (requires AI_HEALING_API_KEY)
|
|
1841
|
+
# "copilot" — GitHub Copilot chat completions endpoint
|
|
1842
|
+
# (uses GITHUB_TOKEN from the Copilot extension / CLI)
|
|
1843
|
+
# "claude-code" — Claude Code local proxy (process.env.ANTHROPIC_API_KEY
|
|
1844
|
+
# set automatically by `claude` CLI session)
|
|
1845
|
+
#
|
|
1846
|
+
# If AI_HEALING_PROVIDER is unset, "anthropic" is used and
|
|
1847
|
+
# AI_HEALING_API_KEY must be present. In Copilot / Claude Code mode
|
|
1848
|
+
# no separate key is needed — the coding agent's session token is reused.
|
|
1849
|
+
# ------------------------------------------------------------------
|
|
1850
|
+
ai_method = """
|
|
1851
|
+
// ── AI Vision provider routing ─────────────────────────────────────────
|
|
1852
|
+
//
|
|
1853
|
+
// Controlled by env vars — no code change needed to switch providers:
|
|
1854
|
+
//
|
|
1855
|
+
// ENABLE_AI_HEALING=false → skip strategy 6; fall through to 7 & 8
|
|
1856
|
+
// AI_HEALING_PROVIDER=anthropic → direct Anthropic API (default)
|
|
1857
|
+
// AI_HEALING_PROVIDER=copilot → GitHub Copilot chat completions
|
|
1858
|
+
// AI_HEALING_PROVIDER=claude-code → Claude Code local proxy
|
|
1859
|
+
//
|
|
1860
|
+
// Provider auth:
|
|
1861
|
+
// anthropic → AI_HEALING_API_KEY (Anthropic secret key)
|
|
1862
|
+
// copilot → GITHUB_TOKEN (set by Copilot extension / gh auth login)
|
|
1863
|
+
// claude-code → ANTHROPIC_API_KEY (set automatically by `claude` CLI session)
|
|
1864
|
+
//
|
|
1865
|
+
private _aiVisionEndpoint(): { url: string; authHeader: string; bodyMapper: (prompt: string) => object } {
|
|
1866
|
+
const provider = (process.env.AI_HEALING_PROVIDER ?? "anthropic").toLowerCase();
|
|
1867
|
+
if (provider === "copilot") {
|
|
1868
|
+
// GitHub Copilot chat completions — reuses the coding agent's session token.
|
|
1869
|
+
// Token is set by the Copilot VS Code extension or `gh auth login --scopes copilot`.
|
|
1870
|
+
const token = process.env.GITHUB_TOKEN ?? "";
|
|
1871
|
+
return {
|
|
1872
|
+
url: "https://api.githubcopilot.com/chat/completions",
|
|
1873
|
+
authHeader: `Bearer ${token}`,
|
|
1874
|
+
bodyMapper: (prompt: string) => ({
|
|
1875
|
+
model: "gpt-4o",
|
|
1876
|
+
messages: [{ role: "user", content: prompt }],
|
|
1877
|
+
max_tokens: 500,
|
|
1878
|
+
}),
|
|
1879
|
+
};
|
|
1880
|
+
}
|
|
1881
|
+
if (provider === "claude-code") {
|
|
1882
|
+
// Claude Code sets ANTHROPIC_API_KEY automatically in its spawned shell.
|
|
1883
|
+
// Uses the same Anthropic messages endpoint but authenticates via the
|
|
1884
|
+
// coding agent's licence — no separate key needed at the project level.
|
|
1885
|
+
const key = process.env.ANTHROPIC_API_KEY ?? "";
|
|
1886
|
+
return {
|
|
1887
|
+
url: "https://api.anthropic.com/v1/messages",
|
|
1888
|
+
authHeader: `x-api-key ${key}`,
|
|
1889
|
+
bodyMapper: (prompt: string) => ({
|
|
1890
|
+
model: "claude-sonnet-4-20250514", max_tokens: 1000,
|
|
1891
|
+
messages: [{ role: "user", content: prompt }],
|
|
1892
|
+
}),
|
|
1893
|
+
};
|
|
1894
|
+
}
|
|
1895
|
+
// Default: direct Anthropic API with explicit project key.
|
|
1896
|
+
const key = process.env.AI_HEALING_API_KEY ?? process.env.ANTHROPIC_API_KEY ?? "";
|
|
1897
|
+
return {
|
|
1898
|
+
url: "https://api.anthropic.com/v1/messages",
|
|
1899
|
+
authHeader: `x-api-key ${key}`,
|
|
1900
|
+
bodyMapper: (prompt: string) => ({
|
|
1901
|
+
model: "claude-sonnet-4-20250514", max_tokens: 1000,
|
|
1902
|
+
messages: [{ role: "user", content: prompt }],
|
|
1903
|
+
}),
|
|
1904
|
+
};
|
|
1905
|
+
}
|
|
1906
|
+
|
|
1907
|
+
private _extractSelector(provider: string, data: any): string {
|
|
1908
|
+
// GitHub Copilot returns OpenAI-compatible response shape.
|
|
1909
|
+
if (provider === "copilot") {
|
|
1910
|
+
return (data.choices?.[0]?.message?.content ?? "").trim();
|
|
1911
|
+
}
|
|
1912
|
+
// Anthropic (direct or Claude Code proxy) returns content blocks.
|
|
1913
|
+
return (data.content?.[0]?.text ?? "").trim();
|
|
1914
|
+
}
|
|
1915
|
+
|
|
1916
|
+
private async healViaAiVision(key: string, intent: string): Promise<Locator> {
|
|
1917
|
+
// ── Runtime guard — skip strategy 6 without disabling the full chain ──
|
|
1918
|
+
if (process.env.ENABLE_AI_HEALING === "false") {
|
|
1919
|
+
this.logger.info("⚕ AI Vision skipped (ENABLE_AI_HEALING=false)");
|
|
1920
|
+
throw new Error(`LocatorHealer: strategy 6 disabled via ENABLE_AI_HEALING=false`);
|
|
1921
|
+
}
|
|
1922
|
+
|
|
1923
|
+
const provider = (process.env.AI_HEALING_PROVIDER ?? "anthropic").toLowerCase();
|
|
1924
|
+
this.logger.warn(`⚕ AI Vision [${provider}]: key="${key}" intent="${intent}"`);
|
|
1925
|
+
try {
|
|
1926
|
+
const snapshot = execSync("playwright-cli snapshot", { encoding: "utf8", timeout: 10_000 });
|
|
1927
|
+
const { url, authHeader, bodyMapper } = this._aiVisionEndpoint();
|
|
1928
|
+
const prompt = `DOM snapshot:\\n${snapshot.slice(0, 3500)}\\nFind: "${intent}". Return ONLY a CSS selector, no explanation.`;
|
|
1929
|
+
|
|
1930
|
+
const headers: Record<string, string> = { "Content-Type": "application/json" };
|
|
1931
|
+
// authHeader format is "<scheme> <token>" for Bearer, or "<header-name> <value>" for x-api-key
|
|
1932
|
+
if (authHeader.startsWith("Bearer ")) {
|
|
1933
|
+
headers["Authorization"] = authHeader;
|
|
1934
|
+
} else if (authHeader.startsWith("x-api-key ")) {
|
|
1935
|
+
headers["x-api-key"] = authHeader.slice("x-api-key ".length);
|
|
1936
|
+
headers["anthropic-version"] = "2023-06-01";
|
|
1937
|
+
}
|
|
1938
|
+
|
|
1939
|
+
const resp = await fetch(url, {
|
|
1940
|
+
method: "POST",
|
|
1941
|
+
headers,
|
|
1942
|
+
body: JSON.stringify(bodyMapper(prompt)),
|
|
1943
|
+
});
|
|
1944
|
+
if (!resp.ok) {
|
|
1945
|
+
const body = await resp.text();
|
|
1946
|
+
throw new Error(`AI Vision HTTP ${resp.status}: ${body.slice(0, 200)}`);
|
|
1947
|
+
}
|
|
1948
|
+
const data = await resp.json();
|
|
1949
|
+
const sel = this._extractSelector(provider, data);
|
|
1950
|
+
if (sel) {
|
|
1951
|
+
const loc = this.page.locator(sel);
|
|
1952
|
+
await loc.waitFor({ state: "attached", timeout: 5_000 });
|
|
1953
|
+
this.repo.updateHealed(key, sel, intent, `ai-vision-${provider}`);
|
|
1954
|
+
this.logger.info(`⚕ AI Vision healed [${provider}]: ${key} → ${sel}`);
|
|
1955
|
+
return loc;
|
|
1956
|
+
}
|
|
1957
|
+
} catch (err) { this.logger.error(`⚕ AI Vision failed [${provider}]: ${err}`); }
|
|
1958
|
+
throw new Error(`LocatorHealer: all strategies exhausted for key="${key}" intent="${intent}"`);
|
|
1959
|
+
}
|
|
1960
|
+
""" if enable_ai_vision else """
|
|
1961
|
+
private async healViaAiVision(key: string, intent: string): Promise<Locator> {
|
|
1962
|
+
throw new Error(`LocatorHealer: all strategies exhausted for key="${key}" intent="${intent}"`);
|
|
1963
|
+
}
|
|
1964
|
+
"""
|
|
1965
|
+
return f'''import {{ Page, Locator }} from "@playwright/test";
|
|
1966
|
+
import {{ Logger }} from "winston";
|
|
1967
|
+
{ai_import}
|
|
1968
|
+
{cdp_import}
|
|
1969
|
+
import {{ LocatorRepository }} from "./LocatorRepository";
|
|
1970
|
+
|
|
1971
|
+
/**
|
|
1972
|
+
* LocatorHealer — Multi-Strategy Locator Healing
|
|
1973
|
+
*
|
|
1974
|
+
* Chain: cached-healed → primary → role → label → text →
|
|
1975
|
+
* AI Vision (6) → DevToolsHealer AX-name (7) → DevToolsHealer bbox (8)
|
|
1976
|
+
*
|
|
1977
|
+
* Strategies 7 & 8 use page.context().newCDPSession(page) — no external process.
|
|
1978
|
+
*
|
|
1979
|
+
* All healed selectors persisted in LocatorRepository (instant on repeat runs).
|
|
1980
|
+
* Bounding boxes captured after every successful click for Strategy 8.
|
|
1981
|
+
*/
|
|
1982
|
+
export class LocatorHealer {{
|
|
1983
|
+
private static readonly TIMEOUT = 8_000;
|
|
1984
|
+
private static readonly HEAL_TO = 15_000;
|
|
1985
|
+
private static readonly ATTACH_TO = 3_000;
|
|
1986
|
+
{cdp_field}
|
|
1987
|
+
|
|
1988
|
+
constructor(
|
|
1989
|
+
private readonly page: Page,
|
|
1990
|
+
private readonly logger: Logger,
|
|
1991
|
+
private readonly repo: LocatorRepository,
|
|
1992
|
+
) {{
|
|
1993
|
+
{cdp_init}
|
|
1994
|
+
}}
|
|
1995
|
+
|
|
1996
|
+
async clickWithHealing(key: string, selector: string, intent: string): Promise<void> {{
|
|
1997
|
+
const loc = await this.resolve(key, selector, intent);
|
|
1998
|
+
await loc.click({{ timeout: LocatorHealer.TIMEOUT }});
|
|
1999
|
+
this.repo.incrementSuccess(key);
|
|
2000
|
+
this.logger.info(`✓ click: ${{key}}`);
|
|
2001
|
+
if ((this as any).devtools) await (this as any).devtools.captureBoundingBox(key, selector).catch(()=>{{}});
|
|
2002
|
+
}}
|
|
2003
|
+
|
|
2004
|
+
async fillWithHealing(key: string, selector: string, value: string, intent: string): Promise<void> {{
|
|
2005
|
+
const loc = await this.resolve(key, selector, intent);
|
|
2006
|
+
await loc.fill(value, {{ timeout: LocatorHealer.TIMEOUT }});
|
|
2007
|
+
this.repo.incrementSuccess(key);
|
|
2008
|
+
}}
|
|
2009
|
+
|
|
2010
|
+
async assertVisibleWithHealing(key: string, selector: string, intent: string): Promise<void> {{
|
|
2011
|
+
const loc = await this.resolve(key, selector, intent);
|
|
2012
|
+
await loc.waitFor({{ state: "visible", timeout: LocatorHealer.HEAL_TO }});
|
|
2013
|
+
this.logger.info(`✓ visible: ${{key}}`);
|
|
2014
|
+
}}
|
|
2015
|
+
|
|
2016
|
+
async getTextWithHealing(key: string, selector: string, intent: string): Promise<string> {{
|
|
2017
|
+
return (await (await this.resolve(key, selector, intent)).textContent()) ?? "";
|
|
2018
|
+
}}
|
|
2019
|
+
|
|
2020
|
+
private async resolve(key: string, selector: string, intent: string): Promise<Locator> {{
|
|
2021
|
+
const healed = this.repo.getHealed(key);
|
|
2022
|
+
if (healed && healed !== selector) {{
|
|
2023
|
+
try {{
|
|
2024
|
+
const l = this.page.locator(healed);
|
|
2025
|
+
await l.waitFor({{ state: "attached", timeout: LocatorHealer.ATTACH_TO }});
|
|
2026
|
+
return l;
|
|
2027
|
+
}} catch {{}}
|
|
2028
|
+
}}
|
|
2029
|
+
try {{
|
|
2030
|
+
const l = this.page.locator(selector);
|
|
2031
|
+
await l.waitFor({{ state: "attached", timeout: 5_000 }});
|
|
2032
|
+
return l;
|
|
2033
|
+
}} catch {{
|
|
2034
|
+
this.logger.warn(`⚕ Primary failed: ${{key}}`);
|
|
2035
|
+
this.repo.incrementFailure(key);
|
|
2036
|
+
}}
|
|
2037
|
+
const words = intent.split(" ").slice(0, 3).join("|");
|
|
2038
|
+
for (const role of ["button", "link", "textbox", "combobox"] as const) {{
|
|
2039
|
+
try {{
|
|
2040
|
+
const l = this.page.getByRole(role, {{ name: new RegExp(words, "i") }});
|
|
2041
|
+
await l.waitFor({{ state: "attached", timeout: LocatorHealer.ATTACH_TO }});
|
|
2042
|
+
this.repo.updateHealed(key, `[role:${{role}}]`, intent, "role");
|
|
2043
|
+
this.logger.warn(`⚕ Role-healed: ${{key}}`);
|
|
2044
|
+
return l;
|
|
2045
|
+
}} catch {{}}
|
|
2046
|
+
}}
|
|
2047
|
+
try {{
|
|
2048
|
+
const l = this.page.getByLabel(intent, {{ exact: false }});
|
|
2049
|
+
await l.waitFor({{ state: "attached", timeout: LocatorHealer.ATTACH_TO }});
|
|
2050
|
+
this.repo.updateHealed(key, "[label-healed]", intent, "label");
|
|
2051
|
+
return l;
|
|
2052
|
+
}} catch {{}}
|
|
2053
|
+
try {{
|
|
2054
|
+
const w = intent.split(" ").find(w => w.length > 3) ?? intent;
|
|
2055
|
+
const l = this.page.getByText(w, {{ exact: false }});
|
|
2056
|
+
await l.waitFor({{ state: "attached", timeout: LocatorHealer.ATTACH_TO }});
|
|
2057
|
+
this.repo.updateHealed(key, "[text-healed]", intent, "text");
|
|
2058
|
+
return l;
|
|
2059
|
+
}} catch {{}}
|
|
2060
|
+
{cdp_call}
|
|
2061
|
+
return this.healViaAiVision(key, intent);
|
|
2062
|
+
}}
|
|
2063
|
+
{ai_method}
|
|
2064
|
+
}}
|
|
2065
|
+
'''
|
|
2066
|
+
|
|
2067
|
+
|
|
2068
|
+
def _gen_locator_repo_cls(repository_path):
|
|
2069
|
+
return f'''import * as fs from "fs";
|
|
2070
|
+
import * as path from "path";
|
|
2071
|
+
|
|
2072
|
+
export interface BoundingBox {{ x:number; y:number; width:number; height:number; }}
|
|
2073
|
+
|
|
2074
|
+
export interface LocatorEntry {{
|
|
2075
|
+
selector: string; intent: string; stability?: number; healedSelector?: string;
|
|
2076
|
+
healingHistory: Array<{{ from:string; to:string; timestamp:string; strategy:string }}>;
|
|
2077
|
+
lastHealed?: string; successCount: number; failureCount: number;
|
|
2078
|
+
/** Bounding box from last successful action — used by DevToolsHealer.findByBoundingBox */
|
|
2079
|
+
lastBoundingBox?: BoundingBox;
|
|
2080
|
+
/** Pending AI suggestion awaiting HealingDashboard approval before commit */
|
|
2081
|
+
pendingSuggestion?: {{ selector:string; strategy:string; suggestedAt:string }};
|
|
2082
|
+
}}
|
|
2083
|
+
|
|
2084
|
+
/**
|
|
2085
|
+
* LocatorRepository — Shared state store for all healing layers.
|
|
2086
|
+
* Persists to "{repository_path}".
|
|
2087
|
+
* queueSuggestion / approveSuggestion / rejectSuggestion power the HealingDashboard.
|
|
2088
|
+
*/
|
|
2089
|
+
export class LocatorRepository {{
|
|
2090
|
+
private readonly store = new Map<string, LocatorEntry>();
|
|
2091
|
+
constructor(private readonly filePath = "{repository_path}") {{ this.load(); }}
|
|
2092
|
+
|
|
2093
|
+
register(key: string, selector: string, intent: string, stability = 0): void {{
|
|
2094
|
+
if (!this.store.has(key))
|
|
2095
|
+
this.store.set(key, {{ selector, intent, stability, healingHistory: [], successCount: 0, failureCount: 0 }});
|
|
2096
|
+
else {{ this.store.get(key)!.intent = intent; this.store.get(key)!.stability = stability; }}
|
|
2097
|
+
}}
|
|
2098
|
+
|
|
2099
|
+
getHealed(key: string): string | undefined {{ return this.store.get(key)?.healedSelector; }}
|
|
2100
|
+
|
|
2101
|
+
updateHealed(key: string, sel: string, intent: string, strategy = "unknown"): void {{
|
|
2102
|
+
const e = this.store.get(key); if (!e) return;
|
|
2103
|
+
e.healingHistory.push({{ from: e.healedSelector ?? e.selector, to: sel,
|
|
2104
|
+
timestamp: new Date().toISOString(), strategy }});
|
|
2105
|
+
e.healedSelector = sel; e.lastHealed = new Date().toISOString();
|
|
2106
|
+
delete e.pendingSuggestion; this.persist();
|
|
2107
|
+
}}
|
|
2108
|
+
|
|
2109
|
+
queueSuggestion(key: string, suggestedSelector: string, strategy: string): void {{
|
|
2110
|
+
const e = this.store.get(key); if (!e) return;
|
|
2111
|
+
e.pendingSuggestion = {{ selector: suggestedSelector, strategy, suggestedAt: new Date().toISOString() }};
|
|
2112
|
+
this.persist();
|
|
2113
|
+
}}
|
|
2114
|
+
|
|
2115
|
+
approveSuggestion(key: string): boolean {{
|
|
2116
|
+
const e = this.store.get(key);
|
|
2117
|
+
if (!e?.pendingSuggestion) return false;
|
|
2118
|
+
this.updateHealed(key, e.pendingSuggestion.selector, e.intent,
|
|
2119
|
+
e.pendingSuggestion.strategy + ":approved");
|
|
2120
|
+
return true;
|
|
2121
|
+
}}
|
|
2122
|
+
|
|
2123
|
+
rejectSuggestion(key: string): boolean {{
|
|
2124
|
+
const e = this.store.get(key);
|
|
2125
|
+
if (!e?.pendingSuggestion) return false;
|
|
2126
|
+
delete e.pendingSuggestion; this.persist(); return true;
|
|
2127
|
+
}}
|
|
2128
|
+
|
|
2129
|
+
updateBoundingBox(key: string, bbox: BoundingBox): void {{
|
|
2130
|
+
const e = this.store.get(key);
|
|
2131
|
+
if (e) {{ e.lastBoundingBox = bbox; this.persist(); }}
|
|
2132
|
+
}}
|
|
2133
|
+
getBBox(key: string): BoundingBox | null {{
|
|
2134
|
+
return this.store.get(key)?.lastBoundingBox ?? null;
|
|
2135
|
+
}}
|
|
2136
|
+
|
|
2137
|
+
getPendingSuggestions() {{
|
|
2138
|
+
return [...this.store.entries()]
|
|
2139
|
+
.filter(([, v]) => v.pendingSuggestion)
|
|
2140
|
+
.map(([k, v]) => ({{ key: k, ...v }}));
|
|
2141
|
+
}}
|
|
2142
|
+
|
|
2143
|
+
incrementSuccess(key: string) {{ const e=this.store.get(key); if(e) {{ e.successCount++; this.persist(); }} }}
|
|
2144
|
+
incrementFailure(key: string) {{ const e=this.store.get(key); if(e) {{ e.failureCount++; this.persist(); }} }}
|
|
2145
|
+
|
|
2146
|
+
getAnalytics() {{
|
|
2147
|
+
const entries = [...this.store.entries()];
|
|
2148
|
+
return {{
|
|
2149
|
+
totalLocators: entries.length,
|
|
2150
|
+
healedLocators: entries.filter(([, v]) => v.healedSelector).length,
|
|
2151
|
+
pendingApprovals: entries.filter(([, v]) => v.pendingSuggestion).length,
|
|
2152
|
+
mostHealedKeys: entries
|
|
2153
|
+
.sort(([, a], [, b]) => b.healingHistory.length - a.healingHistory.length)
|
|
2154
|
+
.slice(0, 10)
|
|
2155
|
+
.map(([k, v]) => ({{ key: k, healCount: v.healingHistory.length, intent: v.intent }})),
|
|
2156
|
+
}};
|
|
2157
|
+
}}
|
|
2158
|
+
|
|
2159
|
+
exportJson(): string {{
|
|
2160
|
+
const o: Record<string, LocatorEntry> = {{}};
|
|
2161
|
+
this.store.forEach((v, k) => {{ o[k] = v; }});
|
|
2162
|
+
return JSON.stringify(o, null, 2);
|
|
2163
|
+
}}
|
|
2164
|
+
|
|
2165
|
+
private persist(): void {{
|
|
2166
|
+
try {{
|
|
2167
|
+
fs.mkdirSync(path.dirname(this.filePath), {{ recursive: true }});
|
|
2168
|
+
fs.writeFileSync(this.filePath, this.exportJson(), "utf8");
|
|
2169
|
+
}} catch {{}}
|
|
2170
|
+
}}
|
|
2171
|
+
|
|
2172
|
+
private load(): void {{
|
|
2173
|
+
try {{
|
|
2174
|
+
if (fs.existsSync(this.filePath)) {{
|
|
2175
|
+
const raw = JSON.parse(fs.readFileSync(this.filePath, "utf8"));
|
|
2176
|
+
Object.entries(raw).forEach(([k, v]) => this.store.set(k, v as LocatorEntry));
|
|
2177
|
+
}}
|
|
2178
|
+
}} catch {{}}
|
|
2179
|
+
}}
|
|
2180
|
+
}}
|
|
2181
|
+
'''
|
|
2182
|
+
|
|
2183
|
+
|
|
2184
|
+
def _gen_timing_healer_cls(repository_path, dashboard_port):
|
|
2185
|
+
return f'''import {{ Page }} from "@playwright/test";
|
|
2186
|
+
import {{ Logger }} from "winston";
|
|
2187
|
+
import {{ LocatorRepository }} from "./LocatorRepository";
|
|
2188
|
+
|
|
2189
|
+
/**
|
|
2190
|
+
* TimingHealer — Layer 2: Network Trace Timing Healing
|
|
2191
|
+
*
|
|
2192
|
+
* When an endpoint drifts > 20% from baseline, auto-adjusts timeout for the
|
|
2193
|
+
* current run and queues a suggestion to HealingDashboard :{dashboard_port}.
|
|
2194
|
+
*/
|
|
2195
|
+
export class TimingHealer {{
|
|
2196
|
+
private static readonly HEADROOM = 1.5;
|
|
2197
|
+
private static readonly DRIFT_THRESHOLD = 0.2;
|
|
2198
|
+
private static readonly MAX_TIMEOUT = 60_000;
|
|
2199
|
+
private readonly observations = new Map<string, number[]>();
|
|
2200
|
+
|
|
2201
|
+
constructor(
|
|
2202
|
+
private readonly page: Page,
|
|
2203
|
+
private readonly logger: Logger,
|
|
2204
|
+
private readonly repo: LocatorRepository,
|
|
2205
|
+
) {{}}
|
|
2206
|
+
|
|
2207
|
+
async waitForNetworkIdle(actionKey: string, baselineMs = 10_000): Promise<void> {{
|
|
2208
|
+
const adjusted = this._adjusted(actionKey, baselineMs);
|
|
2209
|
+
const start = Date.now();
|
|
2210
|
+
try {{
|
|
2211
|
+
await this.page.waitForLoadState("networkidle", {{ timeout: adjusted }});
|
|
2212
|
+
}} catch {{
|
|
2213
|
+
this.logger.warn(`⏱ TimingHealer: networkidle timeout for "${{actionKey}}" (${{adjusted}}ms)`);
|
|
2214
|
+
}}
|
|
2215
|
+
this._record(actionKey, Date.now() - start, baselineMs);
|
|
2216
|
+
}}
|
|
2217
|
+
|
|
2218
|
+
async waitForApiResponse(
|
|
2219
|
+
actionKey: string, urlPattern: string | RegExp, baselineMs = 15_000,
|
|
2220
|
+
): Promise<void> {{
|
|
2221
|
+
const adjusted = this._adjusted(actionKey, baselineMs);
|
|
2222
|
+
const start = Date.now();
|
|
2223
|
+
try {{
|
|
2224
|
+
await this.page.waitForResponse(
|
|
2225
|
+
r => typeof urlPattern === "string"
|
|
2226
|
+
? r.url().includes(urlPattern)
|
|
2227
|
+
: urlPattern.test(r.url()),
|
|
2228
|
+
{{ timeout: adjusted }},
|
|
2229
|
+
);
|
|
2230
|
+
}} catch {{
|
|
2231
|
+
this.logger.warn(`⏱ TimingHealer: API timeout for "${{actionKey}}"`);
|
|
2232
|
+
}}
|
|
2233
|
+
this._record(actionKey, Date.now() - start, baselineMs);
|
|
2234
|
+
}}
|
|
2235
|
+
|
|
2236
|
+
private _adjusted(actionKey: string, defaultMs: number): number {{
|
|
2237
|
+
const obs = this.observations.get(actionKey);
|
|
2238
|
+
if (!obs?.length) return defaultMs;
|
|
2239
|
+
const last = obs[obs.length - 1];
|
|
2240
|
+
return Math.min(Math.ceil(last * TimingHealer.HEADROOM), TimingHealer.MAX_TIMEOUT);
|
|
2241
|
+
}}
|
|
2242
|
+
|
|
2243
|
+
private _record(actionKey: string, observedMs: number, baselineMs: number): void {{
|
|
2244
|
+
const obs = this.observations.get(actionKey) ?? [];
|
|
2245
|
+
obs.push(observedMs);
|
|
2246
|
+
this.observations.set(actionKey, obs.slice(-20));
|
|
2247
|
+
const drift = (observedMs - baselineMs) / baselineMs;
|
|
2248
|
+
if (drift > TimingHealer.DRIFT_THRESHOLD) {{
|
|
2249
|
+
const adjusted = Math.min(Math.ceil(observedMs * TimingHealer.HEADROOM), TimingHealer.MAX_TIMEOUT);
|
|
2250
|
+
this.logger.warn(
|
|
2251
|
+
`⏱ TimingHealer: "${{actionKey}}" drifted ${{Math.round(drift * 100)}}% ` +
|
|
2252
|
+
`(${{baselineMs}}ms → ${{observedMs}}ms). Auto-adjusted to ${{adjusted}}ms. ` +
|
|
2253
|
+
`Queued for engineer review at http://localhost:{dashboard_port}`,
|
|
2254
|
+
);
|
|
2255
|
+
this.repo.queueSuggestion(`timing:${{actionKey}}`, `timeout:${{adjusted}}`, "timing-drift");
|
|
2256
|
+
}}
|
|
2257
|
+
}}
|
|
2258
|
+
}}
|
|
2259
|
+
'''
|
|
2260
|
+
|
|
2261
|
+
|
|
2262
|
+
def _gen_visual_checker_cls(repository_path, dashboard_port):
|
|
2263
|
+
return f'''import {{ Page, expect }} from "@playwright/test";
|
|
2264
|
+
import {{ Logger }} from "winston";
|
|
2265
|
+
import * as fs from "fs";
|
|
2266
|
+
import * as path from "path";
|
|
2267
|
+
import {{ LocatorRepository }} from "./LocatorRepository";
|
|
2268
|
+
|
|
2269
|
+
/**
|
|
2270
|
+
* VisualIntentChecker — Layer 3: Visual Regression alongside Locator Healing
|
|
2271
|
+
*
|
|
2272
|
+
* Captures element screenshots at assertions and diffs against approved baselines.
|
|
2273
|
+
* Pixel diff > 0.5% → queued to HealingDashboard :{dashboard_port}.
|
|
2274
|
+
*/
|
|
2275
|
+
export class VisualIntentChecker {{
|
|
2276
|
+
private static readonly DIFF_THRESHOLD = 0.5;
|
|
2277
|
+
private readonly baselineDir: string;
|
|
2278
|
+
private readonly actualDir: string;
|
|
2279
|
+
|
|
2280
|
+
constructor(
|
|
2281
|
+
private readonly page: Page,
|
|
2282
|
+
private readonly logger: Logger,
|
|
2283
|
+
private readonly repo?: LocatorRepository,
|
|
2284
|
+
) {{
|
|
2285
|
+
this.baselineDir = path.join("test-results", "visual-baselines");
|
|
2286
|
+
this.actualDir = path.join("test-results", "visual-actuals");
|
|
2287
|
+
fs.mkdirSync(this.baselineDir, {{ recursive: true }});
|
|
2288
|
+
fs.mkdirSync(this.actualDir, {{ recursive: true }});
|
|
2289
|
+
}}
|
|
2290
|
+
|
|
2291
|
+
async check(key: string, selector: string, intent: string): Promise<void> {{
|
|
2292
|
+
const safe = key.replace(/[^a-z0-9_-]/gi, "_");
|
|
2293
|
+
const baseline = path.join(this.baselineDir, `${{safe}}.png`);
|
|
2294
|
+
const actual = path.join(this.actualDir, `${{safe}}.png`);
|
|
2295
|
+
try {{
|
|
2296
|
+
const loc = this.page.locator(selector).first();
|
|
2297
|
+
if ((await loc.count()) === 0) {{
|
|
2298
|
+
this.logger.warn(`📸 VisualIntent: element not found for "${{key}}" — skipping`);
|
|
2299
|
+
return;
|
|
2300
|
+
}}
|
|
2301
|
+
const screenshot = await loc.screenshot({{ timeout: 5_000 }});
|
|
2302
|
+
fs.writeFileSync(actual, screenshot);
|
|
2303
|
+
if (!fs.existsSync(baseline)) {{
|
|
2304
|
+
fs.copyFileSync(actual, baseline);
|
|
2305
|
+
this.logger.info(`📸 VisualIntent: new baseline saved for "${{key}}"`);
|
|
2306
|
+
return;
|
|
2307
|
+
}}
|
|
2308
|
+
await expect(loc).toHaveScreenshot(`${{safe}}.png`, {{
|
|
2309
|
+
maxDiffPixelRatio: VisualIntentChecker.DIFF_THRESHOLD / 100,
|
|
2310
|
+
threshold: 0.2,
|
|
2311
|
+
}});
|
|
2312
|
+
this.logger.info(`📸 VisualIntent: ${{key}} — passed`);
|
|
2313
|
+
}} catch {{
|
|
2314
|
+
this.logger.warn(
|
|
2315
|
+
`📸 VisualIntent: "${{key}}" visual regression detected! ` +
|
|
2316
|
+
`Queued for review at http://localhost:{dashboard_port}`,
|
|
2317
|
+
);
|
|
2318
|
+
this.repo?.queueSuggestion(`visual:${{key}}`, actual, `visual-regression:${{intent}}`);
|
|
2319
|
+
}}
|
|
2320
|
+
}}
|
|
2321
|
+
|
|
2322
|
+
approveBaseline(key: string): void {{
|
|
2323
|
+
const safe = key.replace(/[^a-z0-9_-]/gi, "_");
|
|
2324
|
+
const baseline = path.join(this.baselineDir, `${{safe}}.png`);
|
|
2325
|
+
const actual = path.join(this.actualDir, `${{safe}}.png`);
|
|
2326
|
+
if (fs.existsSync(actual)) {{
|
|
2327
|
+
fs.copyFileSync(actual, baseline);
|
|
2328
|
+
this.logger.info(`📸 Baseline updated for "${{key}}"`);
|
|
2329
|
+
}}
|
|
2330
|
+
}}
|
|
2331
|
+
}}
|
|
2332
|
+
'''
|
|
2333
|
+
|
|
2334
|
+
|
|
2335
|
+
def _gen_healing_dashboard_cls(repository_path, dashboard_port):
|
|
2336
|
+
return f'''import * as fs from "fs";
|
|
2337
|
+
import * as path from "path";
|
|
2338
|
+
import * as http from "http";
|
|
2339
|
+
import * as url from "url";
|
|
2340
|
+
|
|
2341
|
+
/**
|
|
2342
|
+
* HealingDashboard — CI/CD Telemetry for AI-Suggested Changes
|
|
2343
|
+
*
|
|
2344
|
+
* Zero-dependency HTTP server displaying all pending suggestions from:
|
|
2345
|
+
* • LocatorHealer → AI Vision selector suggestions
|
|
2346
|
+
* • TimingHealer → auto-adjusted timeout suggestions
|
|
2347
|
+
* • VisualIntentChecker → visual baseline update suggestions
|
|
2348
|
+
*
|
|
2349
|
+
* Endpoints:
|
|
2350
|
+
* GET / → HTML dashboard (auto-refreshes every 10s)
|
|
2351
|
+
* GET /api/pending → JSON pending suggestions
|
|
2352
|
+
* GET /api/analytics → JSON healing analytics
|
|
2353
|
+
* POST /api/approve/:key → Approve → commit to {repository_path}
|
|
2354
|
+
* POST /api/reject/:key → Reject → discard
|
|
2355
|
+
* GET /api/visual/:key → Serve actual PNG for visual diff review
|
|
2356
|
+
*
|
|
2357
|
+
* Set HEALING_DASHBOARD_PORT=0 to disable the HTTP server in CI pipelines.
|
|
2358
|
+
* Suggestions are still written to {repository_path} for async review.
|
|
2359
|
+
*/
|
|
2360
|
+
export class HealingDashboard {{
|
|
2361
|
+
private server: http.Server | null = null;
|
|
2362
|
+
private readonly port: number;
|
|
2363
|
+
|
|
2364
|
+
constructor(
|
|
2365
|
+
private readonly repoPath = "{repository_path}",
|
|
2366
|
+
port = {dashboard_port},
|
|
2367
|
+
) {{
|
|
2368
|
+
this.port = Number(process.env.HEALING_DASHBOARD_PORT ?? port);
|
|
2369
|
+
}}
|
|
2370
|
+
|
|
2371
|
+
start(): void {{
|
|
2372
|
+
if (this.port === 0) return;
|
|
2373
|
+
this.server = http.createServer((req, res) => this._route(req, res));
|
|
2374
|
+
this.server.listen(this.port, () => {{
|
|
2375
|
+
console.log(`\\n🩺 HealingDashboard running at http://localhost:${{this.port}}`);
|
|
2376
|
+
console.log(` Approve or reject AI-suggested changes before committing.\\n`);
|
|
2377
|
+
}});
|
|
2378
|
+
}}
|
|
2379
|
+
|
|
2380
|
+
stop(): void {{ this.server?.close(); }}
|
|
2381
|
+
|
|
2382
|
+
private _route(req: http.IncomingMessage, res: http.ServerResponse): void {{
|
|
2383
|
+
const p = url.parse(req.url ?? "/").pathname ?? "/";
|
|
2384
|
+
const m = req.method ?? "GET";
|
|
2385
|
+
res.setHeader("Access-Control-Allow-Origin", "*");
|
|
2386
|
+
if (m === "OPTIONS") {{ res.writeHead(204); res.end(); return; }}
|
|
2387
|
+
|
|
2388
|
+
if (m === "GET" && p === "/") {{ res.writeHead(200, {{"Content-Type":"text/html"}}); res.end(this._html()); return; }}
|
|
2389
|
+
if (m === "GET" && p === "/api/pending") {{ res.writeHead(200, {{"Content-Type":"application/json"}}); res.end(JSON.stringify(this._pending(), null, 2)); return; }}
|
|
2390
|
+
if (m === "GET" && p === "/api/analytics") {{ res.writeHead(200, {{"Content-Type":"application/json"}}); res.end(JSON.stringify(this._analytics(), null, 2)); return; }}
|
|
2391
|
+
|
|
2392
|
+
const am = p.match(/^[/]api[/](approve|reject)[/](.+)$/);
|
|
2393
|
+
if (m === "POST" && am) {{
|
|
2394
|
+
const ok = am[1] === "approve"
|
|
2395
|
+
? this._approve(decodeURIComponent(am[2]))
|
|
2396
|
+
: this._reject(decodeURIComponent(am[2]));
|
|
2397
|
+
res.writeHead(ok ? 200 : 404, {{"Content-Type":"application/json"}});
|
|
2398
|
+
res.end(JSON.stringify({{ success: ok }}));
|
|
2399
|
+
return;
|
|
2400
|
+
}}
|
|
2401
|
+
|
|
2402
|
+
const vm = p.match(/^[/]api[/]visual[/](.+)$/);
|
|
2403
|
+
if (m === "GET" && vm) {{
|
|
2404
|
+
const key = decodeURIComponent(vm[1]).replace("visual:", "");
|
|
2405
|
+
const img = path.join("test-results", "visual-actuals", `${{key}}.png`);
|
|
2406
|
+
if (fs.existsSync(img)) {{ res.writeHead(200, {{"Content-Type":"image/png"}}); res.end(fs.readFileSync(img)); }}
|
|
2407
|
+
else {{ res.writeHead(404); res.end("Not found"); }}
|
|
2408
|
+
return;
|
|
2409
|
+
}}
|
|
2410
|
+
res.writeHead(404); res.end("Not found");
|
|
2411
|
+
}}
|
|
2412
|
+
|
|
2413
|
+
private _load(): Record<string, any> {{
|
|
2414
|
+
try {{ return JSON.parse(fs.readFileSync(this.repoPath, "utf8")); }} catch {{ return {{}}; }}
|
|
2415
|
+
}}
|
|
2416
|
+
private _save(d: Record<string, any>): void {{
|
|
2417
|
+
fs.mkdirSync(path.dirname(this.repoPath), {{ recursive: true }});
|
|
2418
|
+
fs.writeFileSync(this.repoPath, JSON.stringify(d, null, 2), "utf8");
|
|
2419
|
+
}}
|
|
2420
|
+
|
|
2421
|
+
private _pending(): any[] {{
|
|
2422
|
+
return Object.entries(this._load())
|
|
2423
|
+
.filter(([, v]) => (v as any).pendingSuggestion)
|
|
2424
|
+
.map(([k, v]) => {{
|
|
2425
|
+
const vv = v as any;
|
|
2426
|
+
return {{
|
|
2427
|
+
key: k, intent: vv.intent,
|
|
2428
|
+
current: vv.healedSelector ?? vv.selector,
|
|
2429
|
+
suggestion: vv.pendingSuggestion,
|
|
2430
|
+
failureCount: vv.failureCount,
|
|
2431
|
+
healCount: vv.healingHistory.length,
|
|
2432
|
+
}};
|
|
2433
|
+
}});
|
|
2434
|
+
}}
|
|
2435
|
+
|
|
2436
|
+
private _analytics(): any {{
|
|
2437
|
+
const e = Object.entries(this._load());
|
|
2438
|
+
return {{
|
|
2439
|
+
totalLocators: e.length,
|
|
2440
|
+
healedLocators: e.filter(([, v]) => (v as any).healedSelector).length,
|
|
2441
|
+
pendingApprovals: e.filter(([, v]) => (v as any).pendingSuggestion).length,
|
|
2442
|
+
timingSuggestions: e.filter(([k]) => k.startsWith("timing:")).length,
|
|
2443
|
+
visualSuggestions: e.filter(([k]) => k.startsWith("visual:")).length,
|
|
2444
|
+
mostHealedKeys: e
|
|
2445
|
+
.sort(([, a], [, b]) => (b as any).healingHistory.length - (a as any).healingHistory.length)
|
|
2446
|
+
.slice(0, 10)
|
|
2447
|
+
.map(([k, v]) => ({{ key: k, healCount: (v as any).healingHistory.length, intent: (v as any).intent }})),
|
|
2448
|
+
}};
|
|
2449
|
+
}}
|
|
2450
|
+
|
|
2451
|
+
private _approve(key: string): boolean {{
|
|
2452
|
+
const d = this._load(); const e = d[key];
|
|
2453
|
+
if (!e?.pendingSuggestion) return false;
|
|
2454
|
+
e.healingHistory.push({{
|
|
2455
|
+
from: e.healedSelector ?? e.selector, to: e.pendingSuggestion.selector,
|
|
2456
|
+
timestamp: new Date().toISOString(), strategy: e.pendingSuggestion.strategy + ":dashboard-approved",
|
|
2457
|
+
}});
|
|
2458
|
+
e.healedSelector = e.pendingSuggestion.selector;
|
|
2459
|
+
delete e.pendingSuggestion;
|
|
2460
|
+
this._save(d);
|
|
2461
|
+
console.log(`✅ HealingDashboard: approved "${{key}}"`);
|
|
2462
|
+
return true;
|
|
2463
|
+
}}
|
|
2464
|
+
|
|
2465
|
+
private _reject(key: string): boolean {{
|
|
2466
|
+
const d = this._load(); const e = d[key];
|
|
2467
|
+
if (!e?.pendingSuggestion) return false;
|
|
2468
|
+
console.log(`❌ HealingDashboard: rejected "${{key}}"`);
|
|
2469
|
+
delete e.pendingSuggestion;
|
|
2470
|
+
this._save(d);
|
|
2471
|
+
return true;
|
|
2472
|
+
}}
|
|
2473
|
+
|
|
2474
|
+
private _html(): string {{
|
|
2475
|
+
const pending = this._pending();
|
|
2476
|
+
const analytics = this._analytics();
|
|
2477
|
+
const rows = pending.map(p => `
|
|
2478
|
+
<tr>
|
|
2479
|
+
<td><code>${{p.key}}</code></td>
|
|
2480
|
+
<td>${{p.intent}}</td>
|
|
2481
|
+
<td><code style="color:#888">${{p.current}}</code></td>
|
|
2482
|
+
<td><code style="color:#f59e0b">${{p.suggestion.selector}}</code></td>
|
|
2483
|
+
<td>${{p.suggestion.strategy}}</td>
|
|
2484
|
+
<td><small>${{new Date(p.suggestion.suggestedAt).toLocaleString()}}</small></td>
|
|
2485
|
+
<td>
|
|
2486
|
+
<button onclick="act('approve','${{enc(p.key)}}')" style="background:#22c55e;color:#000;border:none;padding:4px 10px;border-radius:3px;cursor:pointer">Approve</button>
|
|
2487
|
+
<button onclick="act('reject','${{enc(p.key)}}')" style="background:#ef4444;color:#fff;border:none;padding:4px 10px;border-radius:3px;cursor:pointer;margin-left:4px">Reject</button>
|
|
2488
|
+
${{p.key.startsWith("visual:") ? `<a href="/api/visual/${{enc(p.key)}}" target="_blank" style="color:#818cf8;margin-left:6px">View diff</a>` : ""}}
|
|
2489
|
+
</td>
|
|
2490
|
+
</tr>`).join("");
|
|
2491
|
+
return `<!DOCTYPE html>
|
|
2492
|
+
<html lang="en">
|
|
2493
|
+
<head>
|
|
2494
|
+
<meta charset="UTF-8"><title>🩺 Healing Dashboard</title>
|
|
2495
|
+
<meta http-equiv="refresh" content="10">
|
|
2496
|
+
<style>
|
|
2497
|
+
body {{ font-family: monospace; background:#0f172a; color:#e2e8f0; padding:24px }}
|
|
2498
|
+
h1 {{ color:#38bdf8; margin-bottom:4px }}
|
|
2499
|
+
.sub {{ color:#64748b; font-size:12px; display:block; margin-bottom:20px }}
|
|
2500
|
+
.stats {{ display:flex; gap:12px; margin-bottom:20px }}
|
|
2501
|
+
.stat {{ background:#1e293b; border:1px solid #334155; border-radius:6px; padding:10px 16px }}
|
|
2502
|
+
.sv {{ font-size:26px; font-weight:700; color:#38bdf8 }}
|
|
2503
|
+
.sl {{ font-size:9px; color:#64748b; letter-spacing:1px }}
|
|
2504
|
+
table{{ width:100%; border-collapse:collapse; background:#1e293b; border-radius:6px; overflow:hidden }}
|
|
2505
|
+
th {{ background:#0f172a; padding:9px 12px; font-size:9px; letter-spacing:1px; color:#64748b; text-align:left }}
|
|
2506
|
+
td {{ padding:9px 12px; border-bottom:1px solid #334155; font-size:11px }}
|
|
2507
|
+
</style>
|
|
2508
|
+
</head>
|
|
2509
|
+
<body>
|
|
2510
|
+
<h1>🩺 Healing Dashboard</h1>
|
|
2511
|
+
<span class="sub">
|
|
2512
|
+
Auto-refreshes every 10s ·
|
|
2513
|
+
<a href="/api/analytics" style="color:#818cf8">Analytics JSON</a> ·
|
|
2514
|
+
Approve or reject AI suggestions before they are committed to the repository
|
|
2515
|
+
</span>
|
|
2516
|
+
<div class="stats">
|
|
2517
|
+
<div class="stat"><div class="sv">${{analytics.pendingApprovals}}</div><div class="sl">PENDING</div></div>
|
|
2518
|
+
<div class="stat"><div class="sv">${{analytics.healedLocators}}</div><div class="sl">HEALED</div></div>
|
|
2519
|
+
<div class="stat"><div class="sv">${{analytics.timingSuggestions}}</div><div class="sl">TIMING</div></div>
|
|
2520
|
+
<div class="stat"><div class="sv">${{analytics.visualSuggestions}}</div><div class="sl">VISUAL</div></div>
|
|
2521
|
+
</div>
|
|
2522
|
+
<table>
|
|
2523
|
+
<thead>
|
|
2524
|
+
<tr><th>Key</th><th>Intent</th><th>Current</th><th>Suggested</th><th>Strategy</th><th>Time</th><th>Action</th></tr>
|
|
2525
|
+
</thead>
|
|
2526
|
+
<tbody>
|
|
2527
|
+
${{rows || '<tr><td colspan="7" style="padding:24px;text-align:center;color:#64748b">✓ No pending suggestions — all AI changes approved or no healing events yet</td></tr>'}}
|
|
2528
|
+
</tbody>
|
|
2529
|
+
</table>
|
|
2530
|
+
<script>
|
|
2531
|
+
function enc(s) {{ return encodeURIComponent(s); }}
|
|
2532
|
+
async function act(action, key) {{
|
|
2533
|
+
await fetch("/api/" + action + "/" + key, {{ method: "POST" }});
|
|
2534
|
+
location.reload();
|
|
2535
|
+
}}
|
|
2536
|
+
</script>
|
|
2537
|
+
</body>
|
|
2538
|
+
</html>`;
|
|
2539
|
+
}}
|
|
2540
|
+
}}
|
|
2541
|
+
'''
|
|
2542
|
+
|
|
2543
|
+
|
|
2544
|
+
def _gen_devtools_healer_cls() -> str:
|
|
2545
|
+
return r'''import { Page, Locator } from "@playwright/test";
|
|
2546
|
+
import { Logger } from "winston";
|
|
2547
|
+
import { LocatorRepository, BoundingBox } from "./LocatorRepository";
|
|
2548
|
+
|
|
2549
|
+
interface AXNode {
|
|
2550
|
+
role?: { value: string };
|
|
2551
|
+
name?: { value: string };
|
|
2552
|
+
backendDOMNodeId?: number;
|
|
2553
|
+
}
|
|
2554
|
+
|
|
2555
|
+
/**
|
|
2556
|
+
* DevToolsHealer — CDP-Powered Precision Healing (Layer 4, runtime only)
|
|
2557
|
+
*
|
|
2558
|
+
* Uses page.context().newCDPSession(page) — no external process, no Playwright MCP
|
|
2559
|
+
* connection, no Chrome debugging port. Works purely within the Playwright test runner
|
|
2560
|
+
* on Chromium-based browsers.
|
|
2561
|
+
*
|
|
2562
|
+
* Three modes:
|
|
2563
|
+
*
|
|
2564
|
+
* Mode A — findByAxName: walks Accessibility.getFullAXTree for a node whose
|
|
2565
|
+
* accessible name matches the intent string (≥60% word overlap). Resolves the
|
|
2566
|
+
* best stable selector and validates with DOM.querySelectorAll.
|
|
2567
|
+
*
|
|
2568
|
+
* Mode B — findByBoundingBox: finds the element now at the last known screen
|
|
2569
|
+
* coordinates (DOM.getNodeForLocation). Handles re-keyed elements whose layout
|
|
2570
|
+
* position is unchanged.
|
|
2571
|
+
*
|
|
2572
|
+
* Mode C — validateSelector: used by generate_playwright_code at generation time
|
|
2573
|
+
* to confirm a proposed selector matches exactly 1 element. Not called at runtime.
|
|
2574
|
+
* (Note: generation-time validation now uses Playwright MCP browser_snapshot
|
|
2575
|
+
* to derive selectors from the AX tree directly — this method is a runtime-only
|
|
2576
|
+
* fallback validator.)
|
|
2577
|
+
*
|
|
2578
|
+
* Selector stability ranking (_bestSelectorForNode):
|
|
2579
|
+
* data-testid (100) > aria-role+aria-label (90) > id (80) > aria-label (70) > placeholder (60)
|
|
2580
|
+
*/
|
|
2581
|
+
export class DevToolsHealer {
|
|
2582
|
+
private cdpSession: any | null = null;
|
|
2583
|
+
|
|
2584
|
+
constructor(
|
|
2585
|
+
private readonly page: Page,
|
|
2586
|
+
private readonly logger: Logger,
|
|
2587
|
+
private readonly repo: LocatorRepository,
|
|
2588
|
+
) {}
|
|
2589
|
+
|
|
2590
|
+
async findByAxName(key: string, intent: string, _primarySelector: string): Promise<Locator | null> {
|
|
2591
|
+
const cdp = await this._session();
|
|
2592
|
+
if (!cdp) return null;
|
|
2593
|
+
|
|
2594
|
+
try {
|
|
2595
|
+
const result = await cdp.send("Accessibility.getFullAXTree", {}) as { nodes: AXNode[] };
|
|
2596
|
+
const intentWords = intent.toLowerCase().split(" ").filter(w => w.length > 2);
|
|
2597
|
+
|
|
2598
|
+
for (const node of result.nodes ?? []) {
|
|
2599
|
+
const nodeName = (node.name?.value ?? "").toLowerCase();
|
|
2600
|
+
const nodeRole = (node.role?.value ?? "").toLowerCase();
|
|
2601
|
+
const matches = intentWords.filter(w => nodeName.includes(w)).length;
|
|
2602
|
+
if (matches < Math.ceil(intentWords.length * 0.6)) continue;
|
|
2603
|
+
|
|
2604
|
+
const selector = await this._bestSelectorForNode(cdp, node.backendDOMNodeId, nodeRole, node.name?.value ?? "");
|
|
2605
|
+
if (!selector) continue;
|
|
2606
|
+
|
|
2607
|
+
const validated = await this.validateSelector(selector);
|
|
2608
|
+
if (!validated) continue;
|
|
2609
|
+
|
|
2610
|
+
const loc = this.page.locator(selector);
|
|
2611
|
+
this.repo.updateHealed(key, selector, intent, "cdp-ax-name");
|
|
2612
|
+
this.logger.info(`⚕ DevToolsHealer (AX): ${key} → ${selector}`);
|
|
2613
|
+
return loc;
|
|
2614
|
+
}
|
|
2615
|
+
} catch (err) {
|
|
2616
|
+
this.logger.warn(`⚕ DevToolsHealer AX failed: ${err}`);
|
|
2617
|
+
}
|
|
2618
|
+
return null;
|
|
2619
|
+
}
|
|
2620
|
+
|
|
2621
|
+
async findByBoundingBox(key: string, bbox: BoundingBox | null, intent: string): Promise<Locator | null> {
|
|
2622
|
+
if (!bbox) return null;
|
|
2623
|
+
const cdp = await this._session();
|
|
2624
|
+
if (!cdp) return null;
|
|
2625
|
+
|
|
2626
|
+
try {
|
|
2627
|
+
const cx = Math.round(bbox.x + bbox.width / 2);
|
|
2628
|
+
const cy = Math.round(bbox.y + bbox.height / 2);
|
|
2629
|
+
|
|
2630
|
+
const result = await cdp.send("DOM.getNodeForLocation", {
|
|
2631
|
+
x: cx, y: cy, includeUserAgentShadowDOM: false,
|
|
2632
|
+
}) as { backendNodeId?: number };
|
|
2633
|
+
|
|
2634
|
+
if (!result.backendNodeId) return null;
|
|
2635
|
+
|
|
2636
|
+
const selector = await this._bestSelectorForNode(cdp, result.backendNodeId, "", intent);
|
|
2637
|
+
if (!selector) return null;
|
|
2638
|
+
|
|
2639
|
+
const validated = await this.validateSelector(selector);
|
|
2640
|
+
if (!validated) return null;
|
|
2641
|
+
|
|
2642
|
+
const loc = this.page.locator(selector);
|
|
2643
|
+
this.repo.updateHealed(key, selector, intent, "cdp-bounding-box");
|
|
2644
|
+
this.logger.info(`⚕ DevToolsHealer (BBox): ${key} → ${selector} at (${cx},${cy})`);
|
|
2645
|
+
return loc;
|
|
2646
|
+
} catch (err) {
|
|
2647
|
+
this.logger.warn(`⚕ DevToolsHealer BBox failed: ${err}`);
|
|
2648
|
+
}
|
|
2649
|
+
return null;
|
|
2650
|
+
}
|
|
2651
|
+
|
|
2652
|
+
async validateSelector(selector: string): Promise<boolean> {
|
|
2653
|
+
const cdp = await this._session();
|
|
2654
|
+
if (!cdp) return true;
|
|
2655
|
+
|
|
2656
|
+
try {
|
|
2657
|
+
const result = await cdp.send("Runtime.evaluate", {
|
|
2658
|
+
expression: `document.querySelectorAll(${JSON.stringify(selector)}).length`,
|
|
2659
|
+
returnByValue: true,
|
|
2660
|
+
}) as { result: { value: number } };
|
|
2661
|
+
const count = result?.result?.value ?? 0;
|
|
2662
|
+
if (count !== 1) {
|
|
2663
|
+
this.logger.warn(`⚕ DevToolsHealer: selector "${selector}" matched ${count} elements (expected 1)`);
|
|
2664
|
+
}
|
|
2665
|
+
return count === 1;
|
|
2666
|
+
} catch {
|
|
2667
|
+
return true;
|
|
2668
|
+
}
|
|
2669
|
+
}
|
|
2670
|
+
|
|
2671
|
+
async captureBoundingBox(key: string, selector: string): Promise<void> {
|
|
2672
|
+
const cdp = await this._session();
|
|
2673
|
+
if (!cdp) return;
|
|
2674
|
+
|
|
2675
|
+
try {
|
|
2676
|
+
const nodeResult = await cdp.send("DOM.querySelector", {
|
|
2677
|
+
nodeId: 1,
|
|
2678
|
+
selector,
|
|
2679
|
+
}) as { nodeId: number };
|
|
2680
|
+
|
|
2681
|
+
if (!nodeResult.nodeId) return;
|
|
2682
|
+
|
|
2683
|
+
const boxResult = await cdp.send("DOM.getBoxModel", {
|
|
2684
|
+
nodeId: nodeResult.nodeId,
|
|
2685
|
+
}) as { model?: { content: number[] } };
|
|
2686
|
+
|
|
2687
|
+
const content = boxResult?.model?.content ?? [];
|
|
2688
|
+
if (content.length >= 6) {
|
|
2689
|
+
const bbox: BoundingBox = {
|
|
2690
|
+
x: content[0], y: content[1],
|
|
2691
|
+
width: content[4] - content[0],
|
|
2692
|
+
height: content[5] - content[1],
|
|
2693
|
+
};
|
|
2694
|
+
this.repo.updateBoundingBox(key, bbox);
|
|
2695
|
+
}
|
|
2696
|
+
} catch { /* non-fatal */ }
|
|
2697
|
+
}
|
|
2698
|
+
|
|
2699
|
+
private async _session(): Promise<any | null> {
|
|
2700
|
+
if (this.cdpSession) return this.cdpSession;
|
|
2701
|
+
try {
|
|
2702
|
+
this.cdpSession = await (this.page.context() as any).newCDPSession(this.page);
|
|
2703
|
+
return this.cdpSession;
|
|
2704
|
+
} catch {
|
|
2705
|
+
return null; // non-Chromium browser
|
|
2706
|
+
}
|
|
2707
|
+
}
|
|
2708
|
+
|
|
2709
|
+
private async _bestSelectorForNode(
|
|
2710
|
+
cdp: any, backendNodeId: number | undefined,
|
|
2711
|
+
role: string, name: string,
|
|
2712
|
+
): Promise<string | null> {
|
|
2713
|
+
if (!backendNodeId) return null;
|
|
2714
|
+
|
|
2715
|
+
try {
|
|
2716
|
+
const resolve = await cdp.send("DOM.resolveNode", { backendNodeId }) as { object?: { objectId: string } };
|
|
2717
|
+
const objId = resolve?.object?.objectId;
|
|
2718
|
+
if (!objId) return null;
|
|
2719
|
+
|
|
2720
|
+
const props = await cdp.send("Runtime.callFunctionOn", {
|
|
2721
|
+
objectId: objId,
|
|
2722
|
+
functionDeclaration: `function() {
|
|
2723
|
+
const e = this;
|
|
2724
|
+
return JSON.stringify({
|
|
2725
|
+
testid: e.getAttribute('data-testid') || e.getAttribute('data-test-id') || e.getAttribute('data-cy'),
|
|
2726
|
+
ariaLabel: e.getAttribute('aria-label'),
|
|
2727
|
+
id: e.id,
|
|
2728
|
+
placeholder:e.getAttribute('placeholder'),
|
|
2729
|
+
});
|
|
2730
|
+
}`,
|
|
2731
|
+
returnByValue: true,
|
|
2732
|
+
}) as { result?: { value: string } };
|
|
2733
|
+
|
|
2734
|
+
const attrs = JSON.parse(props?.result?.value ?? "{}");
|
|
2735
|
+
|
|
2736
|
+
if (attrs.testid) return `[data-testid='${attrs.testid}']`;
|
|
2737
|
+
if (attrs.ariaLabel) return `[aria-label='${attrs.ariaLabel}']`;
|
|
2738
|
+
if (attrs.id && !/^\d/.test(attrs.id)) return `#${attrs.id}`;
|
|
2739
|
+
if (attrs.placeholder)return `[placeholder='${attrs.placeholder}']`;
|
|
2740
|
+
if (role && name) return `[role='${role}'][aria-label='${name}']`;
|
|
2741
|
+
} catch { /* fall through */ }
|
|
2742
|
+
|
|
2743
|
+
return null;
|
|
2744
|
+
}
|
|
2745
|
+
}
|
|
2746
|
+
'''
|
|
2747
|
+
|
|
2748
|
+
|
|
2749
|
+
# ============================================================================
|
|
2750
|
+
# Entry point
|
|
2751
|
+
# ============================================================================
|
|
2752
|
+
|
|
2753
|
+
async def _run():
|
|
2754
|
+
async with stdio_server() as (r, w):
|
|
2755
|
+
await app.run(r, w, app.create_initialization_options())
|
|
2756
|
+
|
|
2757
|
+
|
|
2758
|
+
def main():
|
|
2759
|
+
try:
|
|
2760
|
+
get_auth_headers()
|
|
2761
|
+
except Exception as e:
|
|
2762
|
+
print(f"[qa-playwright-generator] Auth error: {e}", file=sys.stderr)
|
|
2763
|
+
sys.exit(1)
|
|
2764
|
+
user = get_signed_in_user()
|
|
2765
|
+
if user:
|
|
2766
|
+
print(f"[qa-playwright-generator] Authenticated as: {user}", file=sys.stderr)
|
|
2767
|
+
asyncio.run(_run())
|
|
2768
|
+
|
|
2769
|
+
|
|
2770
|
+
if __name__ == "__main__":
|
|
2771
|
+
main()
|