@qa-gentic/stlc-agents 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +310 -0
- package/bin/postinstall.js +78 -0
- package/bin/qa-stlc.js +89 -0
- package/package.json +48 -0
- package/skills/qa-stlc/AGENT-BEHAVIOR.md +383 -0
- package/skills/qa-stlc/deduplication-protocol.md +303 -0
- package/skills/qa-stlc/generate-gherkin.md +550 -0
- package/skills/qa-stlc/generate-playwright-code.md +464 -0
- package/skills/qa-stlc/generate-test-cases.md +176 -0
- package/skills/qa-stlc/write-helix-files.md +374 -0
- package/src/boilerplate-bundle.js +66 -0
- package/src/cmd-init.js +92 -0
- package/src/cmd-mcp-config.js +177 -0
- package/src/cmd-scaffold.js +130 -0
- package/src/cmd-skills.js +124 -0
- package/src/cmd-verify.js +129 -0
- package/src/stlc_agents/__init__.py +0 -0
- package/src/stlc_agents/agent_gherkin_generator/__init__.py +0 -0
- package/src/stlc_agents/agent_gherkin_generator/server.py +502 -0
- package/src/stlc_agents/agent_gherkin_generator/tools/__init__.py +0 -0
- package/src/stlc_agents/agent_gherkin_generator/tools/ado_gherkin.py +854 -0
- package/src/stlc_agents/agent_helix_writer/__init__.py +0 -0
- package/src/stlc_agents/agent_helix_writer/server.py +529 -0
- package/src/stlc_agents/agent_helix_writer/tools/__init__.py +0 -0
- package/src/stlc_agents/agent_helix_writer/tools/boilerplate.py +70 -0
- package/src/stlc_agents/agent_helix_writer/tools/helix_write.py +796 -0
- package/src/stlc_agents/agent_playwright_generator/__init__.py +0 -0
- package/src/stlc_agents/agent_playwright_generator/server.py +2610 -0
- package/src/stlc_agents/agent_playwright_generator/tools/__init__.py +0 -0
- package/src/stlc_agents/agent_playwright_generator/tools/ado_attach.py +62 -0
- package/src/stlc_agents/agent_test_case_manager/__init__.py +0 -0
- package/src/stlc_agents/agent_test_case_manager/server.py +483 -0
- package/src/stlc_agents/agent_test_case_manager/tools/__init__.py +0 -0
- package/src/stlc_agents/agent_test_case_manager/tools/ado_workitem.py +302 -0
- package/src/stlc_agents/shared/__init__.py +0 -0
- package/src/stlc_agents/shared/auth.py +119 -0
|
@@ -0,0 +1,796 @@
|
|
|
1
|
+
"""
|
|
2
|
+
helix_write.py — File-system write tool for the Helix QA framework.
|
|
3
|
+
|
|
4
|
+
Public API:
|
|
5
|
+
inspect_helix_project(helix_root) -> dict
|
|
6
|
+
write_files_to_helix(helix_root, files, mode) -> dict
|
|
7
|
+
read_helix_file(helix_root, relative_path) -> dict
|
|
8
|
+
list_helix_tree(helix_root) -> dict
|
|
9
|
+
|
|
10
|
+
Framework existence detection
|
|
11
|
+
inspect_helix_project() returns framework_state: "absent"|"partial"|"present"
|
|
12
|
+
and a recommendation of "scaffold_and_tests" or "tests_only".
|
|
13
|
+
write_files_to_helix() accepts that recommendation as its `mode` argument.
|
|
14
|
+
|
|
15
|
+
Boilerplate sourcing (scaffold_and_tests mode)
|
|
16
|
+
Missing infra files (all 14 in utils/locators/) are copied from embedded content
|
|
17
|
+
in boilerplate.py — NOT from generated strings and NOT from the filesystem.
|
|
18
|
+
This keeps LocatorHealer.ts (including its inline AX tree healing strategy) in sync
|
|
19
|
+
with the scaffold template. Files already on disk are never touched unless
|
|
20
|
+
force_scaffold=True is passed.
|
|
21
|
+
|
|
22
|
+
Infrastructure file protection
|
|
23
|
+
In mode="tests_only" all 14 utils/locators/*.ts files are always skipped.
|
|
24
|
+
In mode="scaffold_and_tests" missing infra files are sourced from boilerplate;
|
|
25
|
+
existing ones are skipped unless force_scaffold=True is passed.
|
|
26
|
+
|
|
27
|
+
Within-file deduplication
|
|
28
|
+
For locators.ts : new const-object entries are merged; duplicate keys skipped.
|
|
29
|
+
For *.steps.ts : new step blocks are appended; duplicate regex patterns skipped.
|
|
30
|
+
For *.page.ts : new async methods are appended; duplicate method names skipped.
|
|
31
|
+
For *.feature : new Scenario blocks are appended; duplicate titles skipped.
|
|
32
|
+
When a title matches, the existing scenario (and its original step
|
|
33
|
+
wording) is kept — the generated version is dropped entirely.
|
|
34
|
+
|
|
35
|
+
Interface adapter
|
|
36
|
+
The generator emits repo.updateHealed / repo.incrementSuccess / repo.getBBox etc.
|
|
37
|
+
The existing Helix LocatorRepository only has setHealed / getBestSelector / getHealed.
|
|
38
|
+
_adapt_to_helix_interface() rewrites generated content before writing so it
|
|
39
|
+
compiles cleanly against the Helix interface without manual edits.
|
|
40
|
+
"""
|
|
41
|
+
from __future__ import annotations
|
|
42
|
+
|
|
43
|
+
import re
|
|
44
|
+
from pathlib import Path
|
|
45
|
+
from typing import Any
|
|
46
|
+
|
|
47
|
+
from stlc_agents.agent_helix_writer.tools.boilerplate import BOILERPLATE, INFRA_FILES
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def _get_boilerplate_content(file_name: str) -> str | None:
|
|
51
|
+
"""Return content of the named boilerplate infra file, or None if not available."""
|
|
52
|
+
return INFRA_FILES.get(file_name)
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
# ── Infrastructure file names (sourced from embedded INFRA_FILES) ─────────────
|
|
56
|
+
|
|
57
|
+
_INFRA_FILES: set[str] = set(INFRA_FILES.keys())
|
|
58
|
+
_REQUIRED_INFRA = {"LocatorHealer.ts", "LocatorRepository.ts"}
|
|
59
|
+
_OPTIONAL_INFRA = _INFRA_FILES - _REQUIRED_INFRA
|
|
60
|
+
|
|
61
|
+
_INFRA_RE = re.compile(
|
|
62
|
+
r"(ElementContextHelper|HealApplicator|HealingDashboard|LocatorHealer|LocatorManager"
|
|
63
|
+
r"|LocatorRepository|LocatorRules|LocatorStrategy|PlaywrightHealerLogger|TimingHealer"
|
|
64
|
+
r"|VisualIntentChecker|healix-ci-apply|index|review-server)\.ts$"
|
|
65
|
+
)
|
|
66
|
+
_LOCATOR_RE = re.compile(r"locators\.ts$", re.IGNORECASE)
|
|
67
|
+
_PAGE_RE = re.compile(r"page\.ts$", re.IGNORECASE)
|
|
68
|
+
_STEPS_RE = re.compile(r"steps\.ts$", re.IGNORECASE)
|
|
69
|
+
_FEATURE_RE = re.compile(r"\.feature$", re.IGNORECASE)
|
|
70
|
+
_CUCUMBER_RE = re.compile(r"cucumber", re.IGNORECASE)
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
# ── Interface adapter ─────────────────────────────────────────────────────
|
|
74
|
+
|
|
75
|
+
def _adapt_to_helix_interface(content: str) -> str:
|
|
76
|
+
"""
|
|
77
|
+
Rewrite generated TypeScript so it compiles against the Helix-QA
|
|
78
|
+
LocatorRepository / LocatorHealer interface.
|
|
79
|
+
|
|
80
|
+
Generator emits → Helix expects
|
|
81
|
+
─────────────────────────────────────────────────────────────────
|
|
82
|
+
repo.updateHealed(k,s,…) → repo.setHealed(k, s)
|
|
83
|
+
repo.getBBox(key) → null
|
|
84
|
+
repo.incrementSuccess(k) → (removed)
|
|
85
|
+
repo.incrementFailure(k) → (removed)
|
|
86
|
+
repo.queueSuggestion(…) → (removed)
|
|
87
|
+
repo.updateBoundingBox(…) → (removed)
|
|
88
|
+
this.devtools.captureBoundingBox(…) → (removed)
|
|
89
|
+
fixture().logger → this.logger
|
|
90
|
+
fixture().locatorRepository → this.repo
|
|
91
|
+
fixture().page → this.page
|
|
92
|
+
import { Logger } from "winston" → import { HealerLogger }
|
|
93
|
+
import EnvironmentManager → import { environment } from @config/environment
|
|
94
|
+
new EnvironmentManager() → environment
|
|
95
|
+
this.env.getBaseUrl() → environment.getConfig().baseUrl
|
|
96
|
+
this.env.getPath('x') → "x"
|
|
97
|
+
"""
|
|
98
|
+
# repo.updateHealed(key, sel, ...) → repo.setHealed(key, sel)
|
|
99
|
+
content = re.sub(
|
|
100
|
+
r"this\.repo\.updateHealed\(\s*([^,)]+),\s*([^,)]+)(?:,[^)]+)?\)",
|
|
101
|
+
r"this.repo.setHealed(\1, \2)",
|
|
102
|
+
content,
|
|
103
|
+
)
|
|
104
|
+
# repo.getBBox(key) → null
|
|
105
|
+
content = re.sub(r"this\.repo\.getBBox\([^)]*\)", "null", content)
|
|
106
|
+
|
|
107
|
+
# Remove single-statement method calls that have no Helix equivalent
|
|
108
|
+
for method in ("incrementSuccess", "incrementFailure", "queueSuggestion",
|
|
109
|
+
"updateBoundingBox", "captureBoundingBox"):
|
|
110
|
+
content = re.sub(
|
|
111
|
+
rf"^\s*(?:this\.repo\.|(?:this\.\w+\.)?){method}\([^)]*\)(?:\.catch\([^)]*\))?;?\s*\n",
|
|
112
|
+
"",
|
|
113
|
+
content,
|
|
114
|
+
flags=re.MULTILINE,
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
# fixture() references
|
|
118
|
+
content = re.sub(r"fixture\(\)\.logger\b", "this.logger", content)
|
|
119
|
+
content = re.sub(r"fixture\(\)\.locatorRepository\b", "this.repo", content)
|
|
120
|
+
content = re.sub(r"fixture\(\)\.page\b", "this.page", content)
|
|
121
|
+
|
|
122
|
+
# Winston Logger → Helix HealerLogger
|
|
123
|
+
content = content.replace(
|
|
124
|
+
'import { Logger } from "winston";',
|
|
125
|
+
'import { HealerLogger } from "./LocatorHealer";',
|
|
126
|
+
)
|
|
127
|
+
content = re.sub(r"\bLogger\b(?!\s*=)", "HealerLogger", content)
|
|
128
|
+
|
|
129
|
+
# EnvironmentManager → Helix environment singleton
|
|
130
|
+
content = content.replace(
|
|
131
|
+
'import { EnvironmentManager } from "@helper/environment/environmentManager.util";',
|
|
132
|
+
'import { environment } from "@config/environment";',
|
|
133
|
+
)
|
|
134
|
+
content = re.sub(r"\s*this\.env\s*=\s*new EnvironmentManager\(\);?\s*\n", "\n", content)
|
|
135
|
+
content = content.replace("new EnvironmentManager()", "environment")
|
|
136
|
+
content = content.replace("this.env.getBaseUrl()", "environment.getConfig().baseUrl")
|
|
137
|
+
content = re.sub(r"this\.env\.getPath\(['\"]([^'\"]+)['\"]\)", r'"\1"', content)
|
|
138
|
+
|
|
139
|
+
return content
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
# ── Path resolution ───────────────────────────────────────────────────────
|
|
143
|
+
|
|
144
|
+
def _resolve_destination(helix_root: Path, file_key: str) -> Path:
|
|
145
|
+
key = file_key.strip()
|
|
146
|
+
|
|
147
|
+
if _CUCUMBER_RE.search(key):
|
|
148
|
+
return helix_root / "src" / "config" / "cucumber.config.ts"
|
|
149
|
+
if _INFRA_RE.search(key):
|
|
150
|
+
return helix_root / "src" / "utils" / "locators" / Path(key).name
|
|
151
|
+
if _FEATURE_RE.search(key):
|
|
152
|
+
return helix_root / "src" / "test" / "features" / Path(key).name
|
|
153
|
+
if _STEPS_RE.search(key):
|
|
154
|
+
return helix_root / "src" / "test" / "steps" / Path(key).name
|
|
155
|
+
if _LOCATOR_RE.search(key):
|
|
156
|
+
parts = Path(key).parts
|
|
157
|
+
stem = next(
|
|
158
|
+
(p for p in parts if p not in ("src", "pages", "locators", "utils") and not p.endswith(".ts")),
|
|
159
|
+
"page",
|
|
160
|
+
)
|
|
161
|
+
return helix_root / "src" / "locators" / f"{stem}.locators.ts"
|
|
162
|
+
if _PAGE_RE.search(key):
|
|
163
|
+
return helix_root / "src" / "pages" / Path(key).name
|
|
164
|
+
|
|
165
|
+
rel = key.lstrip("/")
|
|
166
|
+
return helix_root / (rel if rel.startswith("src/") else f"src/{rel}")
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
# ── Within-file merge helpers ─────────────────────────────────────────────
|
|
170
|
+
|
|
171
|
+
def _merge_locators(existing: str, generated: str) -> tuple[str, list[str], list[str]]:
|
|
172
|
+
"""Append locator entries whose keys are not already in existing."""
|
|
173
|
+
existing_keys = set(re.findall(r"^\s{2}(\w+)\s*:", existing, re.MULTILINE))
|
|
174
|
+
|
|
175
|
+
new_entries: list[tuple[str, str]] = []
|
|
176
|
+
current_key: str | None = None
|
|
177
|
+
current_lines: list[str] = []
|
|
178
|
+
|
|
179
|
+
for line in generated.splitlines():
|
|
180
|
+
key_match = re.match(r"^ (\w+)\s*:\s*\{", line)
|
|
181
|
+
if key_match:
|
|
182
|
+
if current_key:
|
|
183
|
+
new_entries.append((current_key, "\n".join(current_lines)))
|
|
184
|
+
current_key = key_match.group(1)
|
|
185
|
+
current_lines = [line]
|
|
186
|
+
elif current_key:
|
|
187
|
+
current_lines.append(line)
|
|
188
|
+
if re.match(r"^\s*\},?\s*$", line):
|
|
189
|
+
new_entries.append((current_key, "\n".join(current_lines)))
|
|
190
|
+
current_key = None
|
|
191
|
+
current_lines = []
|
|
192
|
+
|
|
193
|
+
added: list[str] = []
|
|
194
|
+
skipped: list[str] = []
|
|
195
|
+
append_lines: list[str] = []
|
|
196
|
+
|
|
197
|
+
for key, block in new_entries:
|
|
198
|
+
if key in existing_keys:
|
|
199
|
+
skipped.append(key)
|
|
200
|
+
else:
|
|
201
|
+
append_lines.append(block)
|
|
202
|
+
added.append(key)
|
|
203
|
+
|
|
204
|
+
if not append_lines:
|
|
205
|
+
return existing, added, skipped
|
|
206
|
+
|
|
207
|
+
insertion = "\n" + "\n".join(append_lines)
|
|
208
|
+
merged = re.sub(r"(\n\}\s*as\s+const\s*;)", insertion + r"\1", existing, count=1)
|
|
209
|
+
if merged == existing:
|
|
210
|
+
merged = existing.rstrip() + "\n" + insertion + "\n"
|
|
211
|
+
return merged, added, skipped
|
|
212
|
+
|
|
213
|
+
|
|
214
|
+
def _merge_steps(existing: str, generated: str) -> tuple[str, list[str], list[str]]:
|
|
215
|
+
"""Append step blocks whose regex pattern is not already in existing."""
|
|
216
|
+
existing_patterns = set(re.findall(r"/\^([^/]+)\$/", existing))
|
|
217
|
+
|
|
218
|
+
step_block_re = re.compile(r"^(Given|When|Then)\(", re.MULTILINE)
|
|
219
|
+
parts = step_block_re.split(generated)
|
|
220
|
+
|
|
221
|
+
blocks: list[tuple[str, str]] = []
|
|
222
|
+
i = 1
|
|
223
|
+
while i + 1 < len(parts):
|
|
224
|
+
keyword = parts[i]
|
|
225
|
+
body = parts[i + 1]
|
|
226
|
+
blocks.append((keyword, keyword + "(" + body))
|
|
227
|
+
i += 2
|
|
228
|
+
|
|
229
|
+
added: list[str] = []
|
|
230
|
+
skipped: list[str] = []
|
|
231
|
+
new_blocks: list[str] = []
|
|
232
|
+
|
|
233
|
+
for _kw, block in blocks:
|
|
234
|
+
pat_match = re.search(r"/\^([^/]+)\$/", block)
|
|
235
|
+
pattern = pat_match.group(1) if pat_match else block[:40]
|
|
236
|
+
if pattern in existing_patterns:
|
|
237
|
+
skipped.append(pattern)
|
|
238
|
+
else:
|
|
239
|
+
new_blocks.append(block)
|
|
240
|
+
added.append(pattern)
|
|
241
|
+
|
|
242
|
+
merged = existing.rstrip() + ("\n\n" + "\n".join(new_blocks) if new_blocks else "") + "\n"
|
|
243
|
+
return merged, added, skipped
|
|
244
|
+
|
|
245
|
+
|
|
246
|
+
def _parse_feature_blocks(content: str) -> tuple[str, list[str]]:
|
|
247
|
+
"""
|
|
248
|
+
Parse a Gherkin feature file into a header string and a list of scenario blocks.
|
|
249
|
+
|
|
250
|
+
Each scenario block includes that scenario's own @tag lines so they are
|
|
251
|
+
preserved when the block is appended to an existing file. The header
|
|
252
|
+
contains everything before the first scenario (Feature:, description, Background:,
|
|
253
|
+
and any feature-level @tags).
|
|
254
|
+
|
|
255
|
+
The split is line-by-line so that @tag lines are always attributed to the
|
|
256
|
+
scenario that *follows* them, not the scenario above — which is what a
|
|
257
|
+
regex lookahead over Scenario: would break.
|
|
258
|
+
"""
|
|
259
|
+
_is_tag = re.compile(r"^[ \t]*@")
|
|
260
|
+
_is_scenario = re.compile(r"^[ \t]*Scenario(?:[ \t]+Outline)?[ \t]*:", re.IGNORECASE)
|
|
261
|
+
|
|
262
|
+
lines = content.splitlines(keepends=True)
|
|
263
|
+
header: list[str] = []
|
|
264
|
+
blocks: list[str] = []
|
|
265
|
+
current: list[str] = []
|
|
266
|
+
pending_tags: list[str] = []
|
|
267
|
+
in_scenario = False
|
|
268
|
+
|
|
269
|
+
for line in lines:
|
|
270
|
+
if _is_tag.match(line):
|
|
271
|
+
if in_scenario:
|
|
272
|
+
# This @tag line belongs to the NEXT scenario — flush the current one
|
|
273
|
+
blocks.append("".join(current))
|
|
274
|
+
current = []
|
|
275
|
+
in_scenario = False
|
|
276
|
+
pending_tags.append(line)
|
|
277
|
+
elif _is_scenario.match(line):
|
|
278
|
+
if in_scenario:
|
|
279
|
+
# Back-to-back scenarios with no @tag between them (unusual but valid)
|
|
280
|
+
blocks.append("".join(current))
|
|
281
|
+
current = []
|
|
282
|
+
# Start a new scenario block; claim the pending @tags as its own
|
|
283
|
+
current = pending_tags + [line]
|
|
284
|
+
pending_tags = []
|
|
285
|
+
in_scenario = True
|
|
286
|
+
else:
|
|
287
|
+
if in_scenario:
|
|
288
|
+
current.append(line)
|
|
289
|
+
else:
|
|
290
|
+
# Line belongs to the header (description, Background, blank lines…)
|
|
291
|
+
header.extend(pending_tags)
|
|
292
|
+
pending_tags = []
|
|
293
|
+
header.append(line)
|
|
294
|
+
|
|
295
|
+
# Flush the last scenario
|
|
296
|
+
if current:
|
|
297
|
+
blocks.append("".join(current))
|
|
298
|
+
# Trailing tags (e.g. a file that ends with a @tag but no Scenario) go to header
|
|
299
|
+
if pending_tags:
|
|
300
|
+
header.extend(pending_tags)
|
|
301
|
+
|
|
302
|
+
return "".join(header), blocks
|
|
303
|
+
|
|
304
|
+
|
|
305
|
+
def _merge_feature_scenarios(existing: str, generated: str) -> tuple[str, list[str], list[str]]:
|
|
306
|
+
"""
|
|
307
|
+
Append Scenario / Scenario Outline blocks whose titles are not already present.
|
|
308
|
+
|
|
309
|
+
Deduplication is by title (case-insensitive exact match). When a collision is
|
|
310
|
+
found the existing scenario — including its original step wording — is kept and
|
|
311
|
+
the generated version is discarded. The Feature: header from the generated
|
|
312
|
+
content is always ignored; only newly-titled scenario blocks are appended.
|
|
313
|
+
|
|
314
|
+
@tag lines are correctly attributed to the scenario that follows them so
|
|
315
|
+
new scenarios are appended with their own tags intact and no dangling tags
|
|
316
|
+
are left behind from skipped duplicates.
|
|
317
|
+
"""
|
|
318
|
+
_scenario_title_re = re.compile(
|
|
319
|
+
r"^\s*Scenario(?:\s+Outline)?\s*:\s*(.+)$", re.MULTILINE | re.IGNORECASE
|
|
320
|
+
)
|
|
321
|
+
|
|
322
|
+
existing_titles = {
|
|
323
|
+
m.group(1).strip().lower() for m in _scenario_title_re.finditer(existing)
|
|
324
|
+
}
|
|
325
|
+
|
|
326
|
+
_, gen_blocks = _parse_feature_blocks(generated)
|
|
327
|
+
|
|
328
|
+
added: list[str] = []
|
|
329
|
+
skipped: list[str] = []
|
|
330
|
+
new_blocks: list[str] = []
|
|
331
|
+
|
|
332
|
+
for block in gen_blocks:
|
|
333
|
+
title_match = _scenario_title_re.search(block)
|
|
334
|
+
if not title_match:
|
|
335
|
+
continue
|
|
336
|
+
title = title_match.group(1).strip()
|
|
337
|
+
if title.lower() in existing_titles:
|
|
338
|
+
skipped.append(title)
|
|
339
|
+
else:
|
|
340
|
+
new_blocks.append(block.rstrip())
|
|
341
|
+
added.append(title)
|
|
342
|
+
|
|
343
|
+
if not new_blocks:
|
|
344
|
+
return existing, added, skipped
|
|
345
|
+
|
|
346
|
+
merged = existing.rstrip() + "\n\n" + "\n\n".join(new_blocks) + "\n"
|
|
347
|
+
return merged, added, skipped
|
|
348
|
+
|
|
349
|
+
|
|
350
|
+
def _merge_page_methods(existing: str, generated: str) -> tuple[str, list[str], list[str]]:
|
|
351
|
+
"""Append async methods whose names are not already in existing."""
|
|
352
|
+
existing_methods = set(re.findall(r"async\s+(\w+)\s*\(", existing))
|
|
353
|
+
|
|
354
|
+
method_re = re.compile(r"( async\s+\w+\s*\([^)]*\)[^{]*\{)", re.MULTILINE)
|
|
355
|
+
raw_parts = method_re.split(generated)
|
|
356
|
+
|
|
357
|
+
method_blocks: list[tuple[str, str]] = []
|
|
358
|
+
i = 1
|
|
359
|
+
while i + 1 < len(raw_parts):
|
|
360
|
+
sig = raw_parts[i]
|
|
361
|
+
body_and_rest = raw_parts[i + 1]
|
|
362
|
+
depth, end = 1, 0
|
|
363
|
+
for ch in body_and_rest:
|
|
364
|
+
if ch == "{":
|
|
365
|
+
depth += 1
|
|
366
|
+
elif ch == "}":
|
|
367
|
+
depth -= 1
|
|
368
|
+
if depth == 0:
|
|
369
|
+
break
|
|
370
|
+
end += 1
|
|
371
|
+
method_blocks.append((sig, sig + body_and_rest[: end + 1]))
|
|
372
|
+
i += 2
|
|
373
|
+
|
|
374
|
+
added: list[str] = []
|
|
375
|
+
skipped: list[str] = []
|
|
376
|
+
new_methods: list[str] = []
|
|
377
|
+
|
|
378
|
+
for sig, block in method_blocks:
|
|
379
|
+
name_match = re.search(r"async\s+(\w+)\s*\(", sig)
|
|
380
|
+
name = name_match.group(1) if name_match else sig[:30]
|
|
381
|
+
if name in existing_methods:
|
|
382
|
+
skipped.append(name)
|
|
383
|
+
else:
|
|
384
|
+
new_methods.append(block)
|
|
385
|
+
added.append(name)
|
|
386
|
+
|
|
387
|
+
if not new_methods:
|
|
388
|
+
return existing, added, skipped
|
|
389
|
+
|
|
390
|
+
insertion = "\n" + "\n\n".join(new_methods) + "\n"
|
|
391
|
+
merged = re.sub(r"\n\}\s*\n?$", insertion + "\n}\n", existing, count=1)
|
|
392
|
+
if merged == existing:
|
|
393
|
+
merged = existing.rstrip() + insertion + "}\n"
|
|
394
|
+
return merged, added, skipped
|
|
395
|
+
|
|
396
|
+
|
|
397
|
+
# ── Framework state inspection ────────────────────────────────────────────
|
|
398
|
+
|
|
399
|
+
def inspect_helix_project(helix_root: str) -> dict[str, Any]:
|
|
400
|
+
"""
|
|
401
|
+
Examine the Helix-QA project root and return a machine-readable verdict.
|
|
402
|
+
|
|
403
|
+
framework_state: "absent" | "partial" | "present"
|
|
404
|
+
recommendation: "scaffold_and_tests" | "tests_only"
|
|
405
|
+
|
|
406
|
+
"absent" helix_root missing, or src/ absent, or both required infra files absent.
|
|
407
|
+
"partial" src/ exists but one or both required infra files missing.
|
|
408
|
+
"present" Both LocatorHealer.ts and LocatorRepository.ts exist on disk.
|
|
409
|
+
"""
|
|
410
|
+
root = Path(helix_root).expanduser().resolve()
|
|
411
|
+
|
|
412
|
+
if not root.exists():
|
|
413
|
+
return {
|
|
414
|
+
"framework_state": "absent",
|
|
415
|
+
"missing_infra": sorted(_INFRA_FILES),
|
|
416
|
+
"existing_infra": [],
|
|
417
|
+
"has_src": False,
|
|
418
|
+
"recommendation": "scaffold_and_tests",
|
|
419
|
+
"message": (
|
|
420
|
+
f"helix_root '{root}' does not exist. "
|
|
421
|
+
"Create the directory and call write_helix_files with mode='scaffold_and_tests' — "
|
|
422
|
+
"all 14 infra files will be copied from the boilerplate automatically."
|
|
423
|
+
),
|
|
424
|
+
}
|
|
425
|
+
|
|
426
|
+
infra_dir = root / "src" / "utils" / "locators"
|
|
427
|
+
has_src = (root / "src").exists()
|
|
428
|
+
|
|
429
|
+
existing_infra = sorted(f for f in _INFRA_FILES if (infra_dir / f).exists())
|
|
430
|
+
missing_infra = sorted(f for f in _INFRA_FILES if f not in existing_infra)
|
|
431
|
+
missing_required = sorted(f for f in _REQUIRED_INFRA if f not in existing_infra)
|
|
432
|
+
|
|
433
|
+
if not has_src or missing_required:
|
|
434
|
+
state = "absent" if not has_src else "partial"
|
|
435
|
+
return {
|
|
436
|
+
"framework_state": state,
|
|
437
|
+
"missing_infra": missing_infra,
|
|
438
|
+
"existing_infra": existing_infra,
|
|
439
|
+
"has_src": has_src,
|
|
440
|
+
"recommendation": "scaffold_and_tests",
|
|
441
|
+
"message": (
|
|
442
|
+
f"Helix-QA framework is {state}. "
|
|
443
|
+
f"Missing files: {missing_infra}. "
|
|
444
|
+
"Call write_helix_files with mode='scaffold_and_tests' — "
|
|
445
|
+
"missing infra files will be copied from the boilerplate automatically."
|
|
446
|
+
),
|
|
447
|
+
}
|
|
448
|
+
|
|
449
|
+
return {
|
|
450
|
+
"framework_state": "present",
|
|
451
|
+
"missing_infra": missing_infra,
|
|
452
|
+
"existing_infra": existing_infra,
|
|
453
|
+
"has_src": True,
|
|
454
|
+
"recommendation": "tests_only",
|
|
455
|
+
"message": (
|
|
456
|
+
"Helix-QA framework is present. "
|
|
457
|
+
"Infrastructure files will not be touched. "
|
|
458
|
+
"Only test files will be written or merged."
|
|
459
|
+
),
|
|
460
|
+
}
|
|
461
|
+
|
|
462
|
+
|
|
463
|
+
# ── Main write function ────────────────────────────────────────────────────
|
|
464
|
+
|
|
465
|
+
def write_files_to_helix(
|
|
466
|
+
helix_root: str,
|
|
467
|
+
files: dict[str, str],
|
|
468
|
+
mode: str = "scaffold_and_tests",
|
|
469
|
+
force_scaffold: bool = False,
|
|
470
|
+
) -> dict[str, Any]:
|
|
471
|
+
"""
|
|
472
|
+
Write generated files into the Helix-QA project with full deduplication.
|
|
473
|
+
|
|
474
|
+
mode="tests_only" write locators, pages, steps, features — merge
|
|
475
|
+
into existing files, skip infra files always.
|
|
476
|
+
mode="scaffold_and_tests" also write infra files that do not yet exist.
|
|
477
|
+
force_scaffold=True overwrite existing infra files (use deliberately).
|
|
478
|
+
"""
|
|
479
|
+
root = Path(helix_root).expanduser().resolve()
|
|
480
|
+
if not root.exists():
|
|
481
|
+
return {
|
|
482
|
+
"success": False,
|
|
483
|
+
"error": f"helix_root does not exist: {root}",
|
|
484
|
+
"written": [], "skipped": [],
|
|
485
|
+
}
|
|
486
|
+
|
|
487
|
+
written: list[dict] = []
|
|
488
|
+
skipped: list[dict] = []
|
|
489
|
+
deduplication: dict[str, Any] = {}
|
|
490
|
+
|
|
491
|
+
# ── Auto-scaffold missing infra files from boilerplate ────────────────
|
|
492
|
+
# Runs before the files dict loop. Boilerplate is the source of truth for
|
|
493
|
+
# all 14 utils/locators/ files — generated strings (scaffold_locator_repository)
|
|
494
|
+
# are intentionally NOT used for infra writes when boilerplate content exists.
|
|
495
|
+
_written_from_boilerplate: set[str] = set()
|
|
496
|
+
if mode == "scaffold_and_tests":
|
|
497
|
+
infra_dir = root / "src" / "utils" / "locators"
|
|
498
|
+
for infra_file in sorted(_INFRA_FILES):
|
|
499
|
+
dest_bp = infra_dir / infra_file
|
|
500
|
+
if dest_bp.exists() and not force_scaffold:
|
|
501
|
+
continue
|
|
502
|
+
bp_content = _get_boilerplate_content(infra_file)
|
|
503
|
+
if bp_content is None:
|
|
504
|
+
continue
|
|
505
|
+
try:
|
|
506
|
+
action = "overwritten" if dest_bp.exists() else "created"
|
|
507
|
+
dest_bp.parent.mkdir(parents=True, exist_ok=True)
|
|
508
|
+
dest_bp.write_text(bp_content, encoding="utf-8")
|
|
509
|
+
dest_rel_bp = str(dest_bp.relative_to(root))
|
|
510
|
+
_written_from_boilerplate.add(dest_rel_bp)
|
|
511
|
+
written.append({
|
|
512
|
+
"file_key": f"src/utils/locators/{infra_file}",
|
|
513
|
+
"dest": dest_rel_bp,
|
|
514
|
+
"bytes": len(bp_content.encode()),
|
|
515
|
+
"action": action,
|
|
516
|
+
"source": "boilerplate",
|
|
517
|
+
})
|
|
518
|
+
except OSError as exc:
|
|
519
|
+
skipped.append({
|
|
520
|
+
"file_key": f"src/utils/locators/{infra_file}",
|
|
521
|
+
"dest": str(infra_dir / infra_file),
|
|
522
|
+
"reason": str(exc),
|
|
523
|
+
})
|
|
524
|
+
|
|
525
|
+
for file_key, raw_content in files.items():
|
|
526
|
+
if not raw_content or not raw_content.strip():
|
|
527
|
+
skipped.append({"file_key": file_key, "reason": "empty content"})
|
|
528
|
+
continue
|
|
529
|
+
|
|
530
|
+
# Apply interface adapter to all TypeScript content
|
|
531
|
+
content = _adapt_to_helix_interface(raw_content)
|
|
532
|
+
|
|
533
|
+
dest = _resolve_destination(root, file_key)
|
|
534
|
+
dest.parent.mkdir(parents=True, exist_ok=True)
|
|
535
|
+
dest_rel = str(dest.relative_to(root))
|
|
536
|
+
|
|
537
|
+
# ── Infrastructure files ───────────────────────────────────────────
|
|
538
|
+
if _INFRA_RE.search(file_key):
|
|
539
|
+
if mode == "tests_only":
|
|
540
|
+
skipped.append({
|
|
541
|
+
"file_key": file_key, "dest": dest_rel,
|
|
542
|
+
"reason": "infrastructure file — skipped in tests_only mode",
|
|
543
|
+
})
|
|
544
|
+
continue
|
|
545
|
+
# Already written from boilerplate in the pre-loop above
|
|
546
|
+
if dest_rel in _written_from_boilerplate:
|
|
547
|
+
skipped.append({
|
|
548
|
+
"file_key": file_key, "dest": dest_rel,
|
|
549
|
+
"reason": "sourced from boilerplate (passed content ignored)",
|
|
550
|
+
})
|
|
551
|
+
continue
|
|
552
|
+
if dest.exists() and not force_scaffold:
|
|
553
|
+
skipped.append({
|
|
554
|
+
"file_key": file_key, "dest": dest_rel,
|
|
555
|
+
"reason": "infrastructure file already exists (pass force_scaffold=True to overwrite)",
|
|
556
|
+
})
|
|
557
|
+
continue
|
|
558
|
+
try:
|
|
559
|
+
action = "overwritten" if dest.exists() else "created"
|
|
560
|
+
dest.write_text(content, encoding="utf-8")
|
|
561
|
+
written.append({"file_key": file_key, "dest": dest_rel,
|
|
562
|
+
"bytes": len(content.encode()), "action": action})
|
|
563
|
+
except OSError as exc:
|
|
564
|
+
skipped.append({"file_key": file_key, "dest": dest_rel, "reason": str(exc)})
|
|
565
|
+
continue
|
|
566
|
+
|
|
567
|
+
# ── Cucumber config: append profile, skip duplicate ────────────────
|
|
568
|
+
if _CUCUMBER_RE.search(file_key):
|
|
569
|
+
try:
|
|
570
|
+
if dest.exists():
|
|
571
|
+
existing_text = dest.read_text(encoding="utf-8")
|
|
572
|
+
profile_match = re.match(r"\s*(\w+)\s*:", content.strip())
|
|
573
|
+
profile_name = profile_match.group(1) if profile_match else None
|
|
574
|
+
if profile_name and profile_name in existing_text:
|
|
575
|
+
skipped.append({
|
|
576
|
+
"file_key": file_key, "dest": dest_rel,
|
|
577
|
+
"reason": f"profile '{profile_name}' already exists in cucumber.config.ts",
|
|
578
|
+
})
|
|
579
|
+
continue
|
|
580
|
+
dest.write_text(
|
|
581
|
+
existing_text.rstrip() + "\n\n// --- generated profile ---\n" + content,
|
|
582
|
+
encoding="utf-8",
|
|
583
|
+
)
|
|
584
|
+
written.append({"file_key": file_key, "dest": dest_rel,
|
|
585
|
+
"bytes": len(content.encode()), "action": "appended"})
|
|
586
|
+
else:
|
|
587
|
+
dest.write_text(content, encoding="utf-8")
|
|
588
|
+
written.append({"file_key": file_key, "dest": dest_rel,
|
|
589
|
+
"bytes": len(content.encode()), "action": "created"})
|
|
590
|
+
except OSError as exc:
|
|
591
|
+
skipped.append({"file_key": file_key, "dest": dest_rel, "reason": str(exc)})
|
|
592
|
+
continue
|
|
593
|
+
|
|
594
|
+
# ── Feature files: merge scenarios, deduplicate by title ─────────────
|
|
595
|
+
if _FEATURE_RE.search(file_key):
|
|
596
|
+
try:
|
|
597
|
+
if dest.exists():
|
|
598
|
+
existing_text = dest.read_text(encoding="utf-8")
|
|
599
|
+
merged, added, dup = _merge_feature_scenarios(existing_text, content)
|
|
600
|
+
deduplication[dest_rel] = {
|
|
601
|
+
"type": "feature",
|
|
602
|
+
"added_scenarios": added,
|
|
603
|
+
"skipped_scenarios": dup,
|
|
604
|
+
}
|
|
605
|
+
dest.write_text(merged, encoding="utf-8")
|
|
606
|
+
written.append({"file_key": file_key, "dest": dest_rel,
|
|
607
|
+
"bytes": len(merged.encode()), "action": "merged"})
|
|
608
|
+
else:
|
|
609
|
+
dest.write_text(content, encoding="utf-8")
|
|
610
|
+
written.append({"file_key": file_key, "dest": dest_rel,
|
|
611
|
+
"bytes": len(content.encode()), "action": "created"})
|
|
612
|
+
except OSError as exc:
|
|
613
|
+
skipped.append({"file_key": file_key, "dest": dest_rel, "reason": str(exc)})
|
|
614
|
+
continue
|
|
615
|
+
|
|
616
|
+
# ── Merge-aware write for locators / page / steps ─────────────────
|
|
617
|
+
try:
|
|
618
|
+
if not dest.exists():
|
|
619
|
+
dest.write_text(content, encoding="utf-8")
|
|
620
|
+
written.append({"file_key": file_key, "dest": dest_rel,
|
|
621
|
+
"bytes": len(content.encode()), "action": "created"})
|
|
622
|
+
continue
|
|
623
|
+
|
|
624
|
+
existing_text = dest.read_text(encoding="utf-8")
|
|
625
|
+
|
|
626
|
+
if _LOCATOR_RE.search(file_key):
|
|
627
|
+
merged, added, dup = _merge_locators(existing_text, content)
|
|
628
|
+
deduplication[dest_rel] = {
|
|
629
|
+
"type": "locators", "added_keys": added, "skipped_keys": dup,
|
|
630
|
+
}
|
|
631
|
+
elif _STEPS_RE.search(file_key):
|
|
632
|
+
merged, added, dup = _merge_steps(existing_text, content)
|
|
633
|
+
deduplication[dest_rel] = {
|
|
634
|
+
"type": "steps", "added_patterns": added, "skipped_patterns": dup,
|
|
635
|
+
}
|
|
636
|
+
elif _PAGE_RE.search(file_key):
|
|
637
|
+
merged, added, dup = _merge_page_methods(existing_text, content)
|
|
638
|
+
deduplication[dest_rel] = {
|
|
639
|
+
"type": "page", "added_methods": added, "skipped_methods": dup,
|
|
640
|
+
}
|
|
641
|
+
else:
|
|
642
|
+
merged = content
|
|
643
|
+
deduplication[dest_rel] = {"type": "unknown", "action": "overwritten"}
|
|
644
|
+
|
|
645
|
+
dest.write_text(merged, encoding="utf-8")
|
|
646
|
+
written.append({"file_key": file_key, "dest": dest_rel,
|
|
647
|
+
"bytes": len(merged.encode()), "action": "merged"})
|
|
648
|
+
|
|
649
|
+
except OSError as exc:
|
|
650
|
+
skipped.append({"file_key": file_key, "dest": dest_rel, "reason": str(exc)})
|
|
651
|
+
|
|
652
|
+
return {
|
|
653
|
+
"success": len(written) > 0 or len(skipped) == 0,
|
|
654
|
+
"helix_root": str(root),
|
|
655
|
+
"written": written,
|
|
656
|
+
"skipped": skipped,
|
|
657
|
+
"deduplication": deduplication,
|
|
658
|
+
"summary": {
|
|
659
|
+
"requested": len(files),
|
|
660
|
+
"written": len(written),
|
|
661
|
+
"skipped": len(skipped),
|
|
662
|
+
"scaffolded_from_boilerplate": len(_written_from_boilerplate),
|
|
663
|
+
},
|
|
664
|
+
}
|
|
665
|
+
|
|
666
|
+
|
|
667
|
+
# ── Read / list helpers ───────────────────────────────────────────────────
|
|
668
|
+
|
|
669
|
+
def read_helix_file(helix_root: str, relative_path: str) -> dict[str, Any]:
|
|
670
|
+
root = Path(helix_root).expanduser().resolve()
|
|
671
|
+
target = (root / relative_path.lstrip("/")).resolve()
|
|
672
|
+
try:
|
|
673
|
+
target.relative_to(root)
|
|
674
|
+
except ValueError:
|
|
675
|
+
return {"success": False, "error": "Path escapes helix_root"}
|
|
676
|
+
if not target.exists():
|
|
677
|
+
return {"success": False, "exists": False, "path": relative_path}
|
|
678
|
+
try:
|
|
679
|
+
content = target.read_text(encoding="utf-8")
|
|
680
|
+
return {"success": True, "exists": True, "path": relative_path,
|
|
681
|
+
"content": content, "bytes": len(content.encode())}
|
|
682
|
+
except OSError as exc:
|
|
683
|
+
return {"success": False, "error": str(exc)}
|
|
684
|
+
|
|
685
|
+
|
|
686
|
+
def update_helix_file(
|
|
687
|
+
helix_root: str,
|
|
688
|
+
relative_path: str,
|
|
689
|
+
content: str,
|
|
690
|
+
force_overwrite: bool = False,
|
|
691
|
+
) -> dict[str, Any]:
|
|
692
|
+
"""
|
|
693
|
+
Write or merge a single file in the Helix-QA project.
|
|
694
|
+
|
|
695
|
+
Applies the same interface adapter and merge logic as write_files_to_helix
|
|
696
|
+
but targets one file directly, avoiding a full-dict write call.
|
|
697
|
+
|
|
698
|
+
force_overwrite=True — replace the entire file without merging.
|
|
699
|
+
"""
|
|
700
|
+
root = Path(helix_root).expanduser().resolve()
|
|
701
|
+
target = (root / relative_path.lstrip("/")).resolve()
|
|
702
|
+
# Prevent path traversal
|
|
703
|
+
try:
|
|
704
|
+
target.relative_to(root)
|
|
705
|
+
except ValueError:
|
|
706
|
+
return {"success": False, "error": "relative_path escapes helix_root — path traversal rejected"}
|
|
707
|
+
|
|
708
|
+
if not content or not content.strip():
|
|
709
|
+
return {"success": False, "error": "content is empty — nothing to write"}
|
|
710
|
+
|
|
711
|
+
adapted = _adapt_to_helix_interface(content)
|
|
712
|
+
target.parent.mkdir(parents=True, exist_ok=True)
|
|
713
|
+
dest_rel = str(target.relative_to(root))
|
|
714
|
+
|
|
715
|
+
try:
|
|
716
|
+
if not target.exists() or force_overwrite:
|
|
717
|
+
action = "overwritten" if target.exists() else "created"
|
|
718
|
+
target.write_text(adapted, encoding="utf-8")
|
|
719
|
+
return {
|
|
720
|
+
"success": True,
|
|
721
|
+
"path": dest_rel,
|
|
722
|
+
"action": action,
|
|
723
|
+
"bytes": len(adapted.encode()),
|
|
724
|
+
"deduplication": None,
|
|
725
|
+
}
|
|
726
|
+
|
|
727
|
+
existing_text = target.read_text(encoding="utf-8")
|
|
728
|
+
file_key = relative_path
|
|
729
|
+
|
|
730
|
+
if _LOCATOR_RE.search(file_key):
|
|
731
|
+
merged, added, dup = _merge_locators(existing_text, adapted)
|
|
732
|
+
dedup = {"type": "locators", "added_keys": added, "skipped_keys": dup}
|
|
733
|
+
elif _STEPS_RE.search(file_key):
|
|
734
|
+
merged, added, dup = _merge_steps(existing_text, adapted)
|
|
735
|
+
dedup = {"type": "steps", "added_patterns": added, "skipped_patterns": dup}
|
|
736
|
+
elif _PAGE_RE.search(file_key):
|
|
737
|
+
merged, added, dup = _merge_page_methods(existing_text, adapted)
|
|
738
|
+
dedup = {"type": "page", "added_methods": added, "skipped_methods": dup}
|
|
739
|
+
elif _FEATURE_RE.search(file_key):
|
|
740
|
+
# Feature files are the Gherkin source of truth — always overwrite
|
|
741
|
+
target.write_text(adapted, encoding="utf-8")
|
|
742
|
+
return {
|
|
743
|
+
"success": True,
|
|
744
|
+
"path": dest_rel,
|
|
745
|
+
"action": "overwritten",
|
|
746
|
+
"bytes": len(adapted.encode()),
|
|
747
|
+
"deduplication": None,
|
|
748
|
+
}
|
|
749
|
+
else:
|
|
750
|
+
merged = adapted
|
|
751
|
+
dedup = {"type": "unknown", "action": "overwritten"}
|
|
752
|
+
|
|
753
|
+
target.write_text(merged, encoding="utf-8")
|
|
754
|
+
return {
|
|
755
|
+
"success": True,
|
|
756
|
+
"path": dest_rel,
|
|
757
|
+
"action": "merged",
|
|
758
|
+
"bytes": len(merged.encode()),
|
|
759
|
+
"deduplication": dedup,
|
|
760
|
+
}
|
|
761
|
+
|
|
762
|
+
except OSError as exc:
|
|
763
|
+
return {"success": False, "error": str(exc), "path": dest_rel}
|
|
764
|
+
|
|
765
|
+
|
|
766
|
+
def list_helix_tree(helix_root: str) -> dict[str, Any]:
|
|
767
|
+
root = Path(helix_root).expanduser().resolve()
|
|
768
|
+
if not root.exists():
|
|
769
|
+
return {"success": False, "error": f"helix_root does not exist: {root}"}
|
|
770
|
+
|
|
771
|
+
tree: dict[str, list[str]] = {
|
|
772
|
+
"features": [], "steps": [], "pages": [],
|
|
773
|
+
"locators": [], "utils_locators": [], "other": [],
|
|
774
|
+
}
|
|
775
|
+
for path in sorted(root.rglob("*")):
|
|
776
|
+
if not path.is_file():
|
|
777
|
+
continue
|
|
778
|
+
if path.suffix not in (".ts", ".feature", ".js"):
|
|
779
|
+
continue
|
|
780
|
+
if any(p in ("node_modules", "dist", "test-results", ".git") for p in path.parts):
|
|
781
|
+
continue
|
|
782
|
+
rel = str(path.relative_to(root))
|
|
783
|
+
if "test/features" in rel or rel.endswith(".feature"):
|
|
784
|
+
tree["features"].append(rel)
|
|
785
|
+
elif "test/steps" in rel and rel.endswith(".ts"):
|
|
786
|
+
tree["steps"].append(rel)
|
|
787
|
+
elif "src/pages" in rel and rel.endswith(".ts"):
|
|
788
|
+
tree["pages"].append(rel)
|
|
789
|
+
elif "src/locators" in rel and rel.endswith(".ts"):
|
|
790
|
+
tree["locators"].append(rel)
|
|
791
|
+
elif "utils/locators" in rel and rel.endswith(".ts"):
|
|
792
|
+
tree["utils_locators"].append(rel)
|
|
793
|
+
else:
|
|
794
|
+
tree["other"].append(rel)
|
|
795
|
+
|
|
796
|
+
return {"success": True, "helix_root": str(root), "tree": tree}
|