@qa-gentic/agents 1.1.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +203 -0
- package/bin/postinstall.js +75 -0
- package/bin/qa-stlc.js +76 -0
- package/package.json +48 -0
- package/skills/qa-stlc/AGENT-BEHAVIOR.md +373 -0
- package/skills/qa-stlc/deduplication-protocol.md +303 -0
- package/skills/qa-stlc/generate-gherkin.md +550 -0
- package/skills/qa-stlc/generate-playwright-code.md +439 -0
- package/skills/qa-stlc/generate-test-cases.md +176 -0
- package/skills/qa-stlc/write-helix-files.md +349 -0
- package/src/cmd-init.js +84 -0
- package/src/cmd-mcp-config.js +177 -0
- package/src/cmd-skills.js +124 -0
- package/src/cmd-verify.js +129 -0
- package/src/qa_stlc_agents/__init__.py +0 -0
- package/src/qa_stlc_agents/__pycache__/__init__.cpython-310.pyc +0 -0
- package/src/qa_stlc_agents/agent_gherkin_generator/__init__.py +0 -0
- package/src/qa_stlc_agents/agent_gherkin_generator/__pycache__/__init__.cpython-310.pyc +0 -0
- package/src/qa_stlc_agents/agent_gherkin_generator/__pycache__/server.cpython-310.pyc +0 -0
- package/src/qa_stlc_agents/agent_gherkin_generator/server.py +502 -0
- package/src/qa_stlc_agents/agent_gherkin_generator/tools/__init__.py +0 -0
- package/src/qa_stlc_agents/agent_gherkin_generator/tools/__pycache__/__init__.cpython-310.pyc +0 -0
- package/src/qa_stlc_agents/agent_gherkin_generator/tools/__pycache__/ado_gherkin.cpython-310.pyc +0 -0
- package/src/qa_stlc_agents/agent_gherkin_generator/tools/ado_gherkin.py +854 -0
- package/src/qa_stlc_agents/agent_helix_writer/__init__.py +0 -0
- package/src/qa_stlc_agents/agent_helix_writer/__pycache__/__init__.cpython-310.pyc +0 -0
- package/src/qa_stlc_agents/agent_helix_writer/__pycache__/server.cpython-310.pyc +0 -0
- package/src/qa_stlc_agents/agent_helix_writer/server.py +529 -0
- package/src/qa_stlc_agents/agent_helix_writer/tools/__init__.py +0 -0
- package/src/qa_stlc_agents/agent_helix_writer/tools/__pycache__/__init__.cpython-310.pyc +0 -0
- package/src/qa_stlc_agents/agent_helix_writer/tools/__pycache__/helix_write.cpython-310.pyc +0 -0
- package/src/qa_stlc_agents/agent_helix_writer/tools/helix_write.py +622 -0
- package/src/qa_stlc_agents/agent_playwright_generator/__init__.py +0 -0
- package/src/qa_stlc_agents/agent_playwright_generator/__pycache__/__init__.cpython-310.pyc +0 -0
- package/src/qa_stlc_agents/agent_playwright_generator/__pycache__/server.cpython-310.pyc +0 -0
- package/src/qa_stlc_agents/agent_playwright_generator/server.py +2771 -0
- package/src/qa_stlc_agents/agent_playwright_generator/tools/__init__.py +0 -0
- package/src/qa_stlc_agents/agent_playwright_generator/tools/__pycache__/__init__.cpython-310.pyc +0 -0
- package/src/qa_stlc_agents/agent_playwright_generator/tools/__pycache__/ado_attach.cpython-310.pyc +0 -0
- package/src/qa_stlc_agents/agent_playwright_generator/tools/ado_attach.py +62 -0
- package/src/qa_stlc_agents/agent_test_case_manager/__init__.py +0 -0
- package/src/qa_stlc_agents/agent_test_case_manager/__pycache__/__init__.cpython-310.pyc +0 -0
- package/src/qa_stlc_agents/agent_test_case_manager/__pycache__/server.cpython-310.pyc +0 -0
- package/src/qa_stlc_agents/agent_test_case_manager/server.py +483 -0
- package/src/qa_stlc_agents/agent_test_case_manager/tools/__init__.py +0 -0
- package/src/qa_stlc_agents/agent_test_case_manager/tools/__pycache__/__init__.cpython-310.pyc +0 -0
- package/src/qa_stlc_agents/agent_test_case_manager/tools/__pycache__/ado_workitem.cpython-310.pyc +0 -0
- package/src/qa_stlc_agents/agent_test_case_manager/tools/ado_workitem.py +302 -0
- package/src/qa_stlc_agents/shared/__init__.py +0 -0
- package/src/qa_stlc_agents/shared/__pycache__/__init__.cpython-310.pyc +0 -0
- package/src/qa_stlc_agents/shared/__pycache__/auth.cpython-310.pyc +0 -0
- package/src/qa_stlc_agents/shared/auth.py +119 -0
|
@@ -0,0 +1,854 @@
|
|
|
1
|
+
"""
|
|
2
|
+
ado_gherkin.py — Azure DevOps REST API calls for the Gherkin Generator agent.
|
|
3
|
+
|
|
4
|
+
Public API:
|
|
5
|
+
fetch_feature_hierarchy(org_url, project, feature_id) -> dict
|
|
6
|
+
fetch_work_item_for_gherkin(org_url, project, work_item_id) -> dict ← NEW
|
|
7
|
+
attach_feature_file(org_url, project, feature_id, title, content) -> dict
|
|
8
|
+
attach_work_item_file(org_url, project, work_item_id, title, content) -> dict ← NEW
|
|
9
|
+
validate_gherkin_content(gherkin_content) -> dict ← NEW
|
|
10
|
+
"""
|
|
11
|
+
from __future__ import annotations
|
|
12
|
+
|
|
13
|
+
import json
|
|
14
|
+
import re
|
|
15
|
+
import xml.etree.ElementTree as ET
|
|
16
|
+
from typing import List
|
|
17
|
+
|
|
18
|
+
import requests
|
|
19
|
+
|
|
20
|
+
from qa_stlc_agents.shared.auth import get_auth_headers
|
|
21
|
+
|
|
22
|
+
_API = "7.1"
|
|
23
|
+
|
|
24
|
+
# Work item types that can parent a Gherkin file directly
|
|
25
|
+
_PBI_TYPES = {"Product Backlog Item", "Bug", "User Story", "Task"}
|
|
26
|
+
|
|
27
|
+
# Scenario count bounds per scope
|
|
28
|
+
_SCENARIO_MIN_FEATURE = 5
|
|
29
|
+
_SCENARIO_MAX_FEATURE = 10
|
|
30
|
+
_SCENARIO_MIN_WI = 3
|
|
31
|
+
_SCENARIO_MAX_WI = 9
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
# ---------------------------------------------------------------------------
|
|
35
|
+
# validate_gherkin_content (NEW — structural enforcement gate)
|
|
36
|
+
# ---------------------------------------------------------------------------
|
|
37
|
+
|
|
38
|
+
def validate_gherkin_content(
|
|
39
|
+
gherkin_content: str,
|
|
40
|
+
scope: str = "feature",
|
|
41
|
+
) -> dict:
|
|
42
|
+
"""
|
|
43
|
+
Structurally validate a .feature file before it is attached to ADO.
|
|
44
|
+
|
|
45
|
+
Checks enforced:
|
|
46
|
+
1. At least one @smoke tag is present.
|
|
47
|
+
2. At least one @regression tag is present.
|
|
48
|
+
3. Scenario count is within the allowed range for the scope.
|
|
49
|
+
scope="feature" → 5–10 scenarios
|
|
50
|
+
scope="work_item" → 3–9 scenarios
|
|
51
|
+
4. Every scenario has at least one When step.
|
|
52
|
+
5. No duplicate scenario titles.
|
|
53
|
+
|
|
54
|
+
Returns:
|
|
55
|
+
{
|
|
56
|
+
"valid": bool,
|
|
57
|
+
"errors": [str], # empty list when valid
|
|
58
|
+
"warnings": [str], # non-blocking observations
|
|
59
|
+
"stats": {
|
|
60
|
+
"scenario_count": int,
|
|
61
|
+
"has_smoke": bool,
|
|
62
|
+
"has_regression": bool,
|
|
63
|
+
"duplicate_titles": [str],
|
|
64
|
+
"scenarios_missing_when": [str],
|
|
65
|
+
}
|
|
66
|
+
}
|
|
67
|
+
"""
|
|
68
|
+
errors: list[str] = []
|
|
69
|
+
warnings: list[str] = []
|
|
70
|
+
|
|
71
|
+
lines = gherkin_content.splitlines()
|
|
72
|
+
|
|
73
|
+
# ── Collect scenario blocks ──────────────────────────────────────────────
|
|
74
|
+
scenario_titles: list[str] = []
|
|
75
|
+
scenarios_missing_when: list[str] = []
|
|
76
|
+
current_title: str | None = None
|
|
77
|
+
current_has_when = False
|
|
78
|
+
|
|
79
|
+
tag_lines: list[str] = [] # lines that start with @
|
|
80
|
+
|
|
81
|
+
for line in lines:
|
|
82
|
+
stripped = line.strip()
|
|
83
|
+
|
|
84
|
+
if stripped.startswith("@"):
|
|
85
|
+
tag_lines.append(stripped)
|
|
86
|
+
continue
|
|
87
|
+
|
|
88
|
+
scenario_match = re.match(
|
|
89
|
+
r"^\s*(Scenario Outline|Scenario):\s*(.+)$", line
|
|
90
|
+
)
|
|
91
|
+
if scenario_match:
|
|
92
|
+
# Flush previous scenario
|
|
93
|
+
if current_title is not None and not current_has_when:
|
|
94
|
+
scenarios_missing_when.append(current_title)
|
|
95
|
+
|
|
96
|
+
current_title = scenario_match.group(2).strip()
|
|
97
|
+
current_has_when = False
|
|
98
|
+
scenario_titles.append(current_title)
|
|
99
|
+
continue
|
|
100
|
+
|
|
101
|
+
if stripped.startswith("When "):
|
|
102
|
+
current_has_when = True
|
|
103
|
+
|
|
104
|
+
# Flush last scenario
|
|
105
|
+
if current_title is not None and not current_has_when:
|
|
106
|
+
scenarios_missing_when.append(current_title)
|
|
107
|
+
|
|
108
|
+
scenario_count = len(scenario_titles)
|
|
109
|
+
|
|
110
|
+
# ── Tag checks ───────────────────────────────────────────────────────────
|
|
111
|
+
all_tags_text = " ".join(tag_lines) + " " + gherkin_content
|
|
112
|
+
has_smoke = bool(re.search(r"@smoke\b", all_tags_text))
|
|
113
|
+
has_regression = bool(re.search(r"@regression\b", all_tags_text))
|
|
114
|
+
|
|
115
|
+
if not has_smoke:
|
|
116
|
+
errors.append(
|
|
117
|
+
"Missing @smoke tag: every feature file must contain at least one "
|
|
118
|
+
"@smoke scenario marking the primary happy path."
|
|
119
|
+
)
|
|
120
|
+
|
|
121
|
+
if not has_regression:
|
|
122
|
+
errors.append(
|
|
123
|
+
"Missing @regression tag: every feature file must contain at least one "
|
|
124
|
+
"@regression scenario. Use @smoke only for the single primary happy path."
|
|
125
|
+
)
|
|
126
|
+
|
|
127
|
+
# ── Scenario count ───────────────────────────────────────────────────────
|
|
128
|
+
if scope == "feature":
|
|
129
|
+
min_s, max_s = _SCENARIO_MIN_FEATURE, _SCENARIO_MAX_FEATURE
|
|
130
|
+
scope_label = "Feature-scoped"
|
|
131
|
+
else:
|
|
132
|
+
min_s, max_s = _SCENARIO_MIN_WI, _SCENARIO_MAX_WI
|
|
133
|
+
scope_label = "Work-item-scoped"
|
|
134
|
+
|
|
135
|
+
if scenario_count < min_s:
|
|
136
|
+
errors.append(
|
|
137
|
+
f"Too few scenarios: {scenario_count} found, minimum is {min_s} "
|
|
138
|
+
f"for a {scope_label} feature file. Add more coverage for error "
|
|
139
|
+
f"cases, boundary conditions, or cancel flows."
|
|
140
|
+
)
|
|
141
|
+
elif scenario_count > max_s:
|
|
142
|
+
errors.append(
|
|
143
|
+
f"Too many scenarios: {scenario_count} found, maximum is {max_s} "
|
|
144
|
+
f"for a {scope_label} feature file. Split into multiple feature "
|
|
145
|
+
f"files or merge related scenarios into Scenario Outlines."
|
|
146
|
+
)
|
|
147
|
+
|
|
148
|
+
# ── When steps ───────────────────────────────────────────────────────────
|
|
149
|
+
for title in scenarios_missing_when:
|
|
150
|
+
errors.append(
|
|
151
|
+
f"Scenario '{title}' has no When step. Every scenario must contain "
|
|
152
|
+
f"at least one When step describing the user action."
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
# ── Duplicate titles ─────────────────────────────────────────────────────
|
|
156
|
+
seen: set[str] = set()
|
|
157
|
+
duplicate_titles: list[str] = []
|
|
158
|
+
for title in scenario_titles:
|
|
159
|
+
if title in seen:
|
|
160
|
+
duplicate_titles.append(title)
|
|
161
|
+
seen.add(title)
|
|
162
|
+
|
|
163
|
+
for title in duplicate_titles:
|
|
164
|
+
errors.append(
|
|
165
|
+
f"Duplicate scenario title: '{title}'. Every scenario title must be unique."
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
# ── Warnings (non-blocking) ───────────────────────────────────────────────
|
|
169
|
+
smoke_count = sum(
|
|
170
|
+
1 for i, line in enumerate(lines)
|
|
171
|
+
if "@smoke" in line
|
|
172
|
+
and i + 1 < len(lines)
|
|
173
|
+
and re.match(r"^\s*Scenario", lines[i + 1])
|
|
174
|
+
)
|
|
175
|
+
if smoke_count > 1:
|
|
176
|
+
warnings.append(
|
|
177
|
+
f"Multiple @smoke scenarios found ({smoke_count}). Convention is "
|
|
178
|
+
f"one @smoke per feature file — the single primary happy path."
|
|
179
|
+
)
|
|
180
|
+
|
|
181
|
+
if not re.search(r"^\s*Background:", gherkin_content, re.MULTILINE):
|
|
182
|
+
warnings.append(
|
|
183
|
+
"No Background block present. If 2 or more scenarios share the "
|
|
184
|
+
"same Given preconditions, consider extracting them into a Background."
|
|
185
|
+
)
|
|
186
|
+
|
|
187
|
+
return {
|
|
188
|
+
"valid": len(errors) == 0,
|
|
189
|
+
"errors": errors,
|
|
190
|
+
"warnings": warnings,
|
|
191
|
+
"stats": {
|
|
192
|
+
"scenario_count": scenario_count,
|
|
193
|
+
"has_smoke": has_smoke,
|
|
194
|
+
"has_regression": has_regression,
|
|
195
|
+
"duplicate_titles": duplicate_titles,
|
|
196
|
+
"scenarios_missing_when": scenarios_missing_when,
|
|
197
|
+
},
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
|
|
201
|
+
# ---------------------------------------------------------------------------
|
|
202
|
+
# fetch_work_item_for_gherkin (NEW — PBI/Bug-scoped fetch)
|
|
203
|
+
# ---------------------------------------------------------------------------
|
|
204
|
+
|
|
205
|
+
def fetch_work_item_for_gherkin(
|
|
206
|
+
org_url: str,
|
|
207
|
+
project: str,
|
|
208
|
+
work_item_id: int,
|
|
209
|
+
) -> dict:
|
|
210
|
+
"""
|
|
211
|
+
Fetch a single PBI or Bug work item with all fields needed to generate a
|
|
212
|
+
scoped Gherkin feature file. Unlike fetch_feature_hierarchy (which requires
|
|
213
|
+
a Feature ID), this accepts any PBI or Bug ID directly.
|
|
214
|
+
|
|
215
|
+
Returns the work item's own acceptance criteria, its linked test cases with
|
|
216
|
+
full steps, its parent Feature metadata, and the Gherkin best-practice
|
|
217
|
+
reference example — everything the LLM needs to write 3–9 scoped scenarios.
|
|
218
|
+
"""
|
|
219
|
+
org_url = org_url.rstrip("/")
|
|
220
|
+
headers = get_auth_headers()
|
|
221
|
+
|
|
222
|
+
# ── Fetch target work item ───────────────────────────────────────────────
|
|
223
|
+
resp = requests.get(
|
|
224
|
+
f"{org_url}/{project}/_apis/wit/workitems/{work_item_id}",
|
|
225
|
+
headers=headers,
|
|
226
|
+
params={"$expand": "relations", "api-version": _API},
|
|
227
|
+
timeout=30,
|
|
228
|
+
)
|
|
229
|
+
resp.raise_for_status()
|
|
230
|
+
data = resp.json()
|
|
231
|
+
fields = data.get("fields", {})
|
|
232
|
+
|
|
233
|
+
wi_type = fields.get("System.WorkItemType", "")
|
|
234
|
+
if wi_type not in _PBI_TYPES:
|
|
235
|
+
return {
|
|
236
|
+
"error": (
|
|
237
|
+
f"Work item {work_item_id} is a '{wi_type}'. "
|
|
238
|
+
f"fetch_work_item_for_gherkin only accepts: "
|
|
239
|
+
f"{', '.join(sorted(_PBI_TYPES))}. "
|
|
240
|
+
f"For Feature work items use fetch_feature_hierarchy instead."
|
|
241
|
+
),
|
|
242
|
+
"work_item_type": wi_type,
|
|
243
|
+
"work_item_id": work_item_id,
|
|
244
|
+
}
|
|
245
|
+
|
|
246
|
+
work_item = {
|
|
247
|
+
"id": data["id"],
|
|
248
|
+
"type": wi_type,
|
|
249
|
+
"title": fields.get("System.Title", ""),
|
|
250
|
+
"description": _strip_html(fields.get("System.Description", "") or ""),
|
|
251
|
+
"acceptance_criteria": _strip_html(
|
|
252
|
+
fields.get("Microsoft.VSTS.Common.AcceptanceCriteria", "") or ""
|
|
253
|
+
),
|
|
254
|
+
"repro_steps": _strip_html(
|
|
255
|
+
fields.get("Microsoft.VSTS.TCM.ReproSteps", "") or ""
|
|
256
|
+
),
|
|
257
|
+
"state": fields.get("System.State", ""),
|
|
258
|
+
"priority": fields.get("Microsoft.VSTS.Common.Priority", ""),
|
|
259
|
+
"story_points": fields.get("Microsoft.VSTS.Scheduling.StoryPoints", ""),
|
|
260
|
+
"tags": fields.get("System.Tags", ""),
|
|
261
|
+
"area_path": fields.get("System.AreaPath", ""),
|
|
262
|
+
"iteration_path": fields.get("System.IterationPath", ""),
|
|
263
|
+
}
|
|
264
|
+
|
|
265
|
+
# ── Collect linked TC IDs ────────────────────────────────────────────────
|
|
266
|
+
tc_ids = [
|
|
267
|
+
int(r["url"].split("/")[-1])
|
|
268
|
+
for r in data.get("relations", [])
|
|
269
|
+
if r.get("rel") == "Microsoft.VSTS.Common.TestedBy-Forward"
|
|
270
|
+
]
|
|
271
|
+
test_cases = _fetch_test_cases(org_url, project, tc_ids)
|
|
272
|
+
|
|
273
|
+
# ── Fetch parent Feature (if present) ───────────────────────────────────
|
|
274
|
+
parent_feature: dict | None = None
|
|
275
|
+
for rel in data.get("relations", []):
|
|
276
|
+
if rel.get("rel") == "System.LinkTypes.Hierarchy-Reverse":
|
|
277
|
+
try:
|
|
278
|
+
pr = requests.get(
|
|
279
|
+
rel["url"] + f"?api-version={_API}",
|
|
280
|
+
headers=get_auth_headers(),
|
|
281
|
+
timeout=30,
|
|
282
|
+
)
|
|
283
|
+
if pr.ok:
|
|
284
|
+
pf = pr.json().get("fields", {})
|
|
285
|
+
parent_feature = {
|
|
286
|
+
"id": pr.json().get("id"),
|
|
287
|
+
"type": pf.get("System.WorkItemType", ""),
|
|
288
|
+
"title": pf.get("System.Title", ""),
|
|
289
|
+
"description": _strip_html(
|
|
290
|
+
pf.get("System.Description", "") or ""
|
|
291
|
+
),
|
|
292
|
+
"acceptance_criteria": _strip_html(
|
|
293
|
+
pf.get("Microsoft.VSTS.Common.AcceptanceCriteria", "") or ""
|
|
294
|
+
),
|
|
295
|
+
}
|
|
296
|
+
except Exception:
|
|
297
|
+
pass
|
|
298
|
+
break
|
|
299
|
+
|
|
300
|
+
# ── Derive suggested file name ───────────────────────────────────────────
|
|
301
|
+
safe_title = re.sub(r"[^a-zA-Z0-9\-]", "-", work_item["title"])
|
|
302
|
+
safe_title = re.sub(r"-+", "-", safe_title).strip("-").lower()
|
|
303
|
+
suggested_file_name = f"{work_item_id}_{safe_title}.feature"
|
|
304
|
+
|
|
305
|
+
return {
|
|
306
|
+
"work_item": work_item,
|
|
307
|
+
"parent_feature": parent_feature,
|
|
308
|
+
"existing_test_cases": test_cases,
|
|
309
|
+
"existing_test_cases_count": len(test_cases),
|
|
310
|
+
"suggested_file_name": suggested_file_name,
|
|
311
|
+
"scope": "work_item",
|
|
312
|
+
"scenario_count_rule": f"{_SCENARIO_MIN_WI}–{_SCENARIO_MAX_WI} scenarios for a single PBI/Bug",
|
|
313
|
+
"best_practice_example": _GHERKIN_EXAMPLE,
|
|
314
|
+
}
|
|
315
|
+
|
|
316
|
+
|
|
317
|
+
# ---------------------------------------------------------------------------
|
|
318
|
+
# attach_work_item_file (NEW — attach .feature directly to PBI/Bug)
|
|
319
|
+
# ---------------------------------------------------------------------------
|
|
320
|
+
|
|
321
|
+
def attach_work_item_file(
|
|
322
|
+
org_url: str,
|
|
323
|
+
project: str,
|
|
324
|
+
work_item_id: int,
|
|
325
|
+
work_item_title: str,
|
|
326
|
+
gherkin_content: str,
|
|
327
|
+
) -> dict:
|
|
328
|
+
"""
|
|
329
|
+
Validate, upload, and attach a scoped .feature file to a PBI or Bug
|
|
330
|
+
work item in ADO.
|
|
331
|
+
|
|
332
|
+
Validation runs first — if the file fails structural checks the attachment
|
|
333
|
+
is NOT uploaded and the errors are returned so the LLM can correct them
|
|
334
|
+
before retrying.
|
|
335
|
+
|
|
336
|
+
File is named: {work_item_id}_{title_kebab}.feature
|
|
337
|
+
"""
|
|
338
|
+
# ── Structural validation gate ───────────────────────────────────────────
|
|
339
|
+
validation = validate_gherkin_content(gherkin_content, scope="work_item")
|
|
340
|
+
if not validation["valid"]:
|
|
341
|
+
return {
|
|
342
|
+
"success": False,
|
|
343
|
+
"reason": "validation_failed",
|
|
344
|
+
"validation": validation,
|
|
345
|
+
"message": (
|
|
346
|
+
"The .feature file was NOT uploaded to ADO. Fix the errors "
|
|
347
|
+
"below and call attach_work_item_file again with corrected content."
|
|
348
|
+
),
|
|
349
|
+
}
|
|
350
|
+
|
|
351
|
+
org_url = org_url.rstrip("/")
|
|
352
|
+
|
|
353
|
+
safe_title = re.sub(r"[^a-zA-Z0-9\-]", "-", work_item_title)
|
|
354
|
+
safe_title = re.sub(r"-+", "-", safe_title).strip("-").lower()
|
|
355
|
+
file_name = f"{work_item_id}_{safe_title}.feature"
|
|
356
|
+
|
|
357
|
+
# ── Upload to attachment store ───────────────────────────────────────────
|
|
358
|
+
upload_resp = requests.post(
|
|
359
|
+
f"{org_url}/{project}/_apis/wit/attachments",
|
|
360
|
+
headers=get_auth_headers("application/octet-stream"),
|
|
361
|
+
params={"fileName": file_name, "api-version": _API},
|
|
362
|
+
data=gherkin_content.encode("utf-8"),
|
|
363
|
+
timeout=30,
|
|
364
|
+
)
|
|
365
|
+
upload_resp.raise_for_status()
|
|
366
|
+
attachment_url = upload_resp.json()["url"]
|
|
367
|
+
|
|
368
|
+
# ── Link to work item ────────────────────────────────────────────────────
|
|
369
|
+
link_resp = requests.patch(
|
|
370
|
+
f"{org_url}/{project}/_apis/wit/workitems/{work_item_id}",
|
|
371
|
+
headers=get_auth_headers("application/json-patch+json"),
|
|
372
|
+
params={"api-version": _API},
|
|
373
|
+
json=[{
|
|
374
|
+
"op": "add",
|
|
375
|
+
"path": "/relations/-",
|
|
376
|
+
"value": {
|
|
377
|
+
"rel": "AttachedFile",
|
|
378
|
+
"url": attachment_url,
|
|
379
|
+
"attributes": {
|
|
380
|
+
"comment": (
|
|
381
|
+
"BDD regression feature file (work-item scope) — "
|
|
382
|
+
"generated by QA Gherkin Generator"
|
|
383
|
+
)
|
|
384
|
+
},
|
|
385
|
+
},
|
|
386
|
+
}],
|
|
387
|
+
timeout=30,
|
|
388
|
+
)
|
|
389
|
+
link_resp.raise_for_status()
|
|
390
|
+
|
|
391
|
+
return {
|
|
392
|
+
"success": True,
|
|
393
|
+
"work_item_id": work_item_id,
|
|
394
|
+
"file_name": file_name,
|
|
395
|
+
"attachment_url": attachment_url,
|
|
396
|
+
"validation": validation,
|
|
397
|
+
}
|
|
398
|
+
|
|
399
|
+
|
|
400
|
+
# ─────────────────────────────────────────────────────────────────────────────
|
|
401
|
+
# INTERNAL HELPER — shared by fetch_feature_hierarchy and fetch_epic_hierarchy
|
|
402
|
+
# ─────────────────────────────────────────────────────────────────────────────
|
|
403
|
+
|
|
404
|
+
def _fetch_feature_data(org_url: str, project: str, feature_id: int) -> dict:
|
|
405
|
+
"""
|
|
406
|
+
Core logic: fetch one Feature + its child PBIs/Bugs + their test cases.
|
|
407
|
+
Returns the same shape as the old fetch_feature_hierarchy body.
|
|
408
|
+
Called by both fetch_feature_hierarchy (single feature) and
|
|
409
|
+
fetch_epic_hierarchy (once per child feature of an epic).
|
|
410
|
+
"""
|
|
411
|
+
org_url = org_url.rstrip("/")
|
|
412
|
+
headers = get_auth_headers()
|
|
413
|
+
|
|
414
|
+
# ── Fetch the Feature work item ──────────────────────────────────────────
|
|
415
|
+
resp = requests.get(
|
|
416
|
+
f"{org_url}/{project}/_apis/wit/workitems/{feature_id}",
|
|
417
|
+
headers=headers,
|
|
418
|
+
params={"$expand": "relations", "api-version": _API},
|
|
419
|
+
timeout=30,
|
|
420
|
+
)
|
|
421
|
+
resp.raise_for_status()
|
|
422
|
+
data = resp.json()
|
|
423
|
+
fields = data.get("fields", {})
|
|
424
|
+
|
|
425
|
+
feature = {
|
|
426
|
+
"id": data["id"],
|
|
427
|
+
"type": fields.get("System.WorkItemType", ""),
|
|
428
|
+
"title": fields.get("System.Title", ""),
|
|
429
|
+
"description": _strip_html(fields.get("System.Description", "") or ""),
|
|
430
|
+
"acceptance_criteria": _strip_html(
|
|
431
|
+
fields.get("Microsoft.VSTS.Common.AcceptanceCriteria", "") or ""
|
|
432
|
+
),
|
|
433
|
+
"state": fields.get("System.State", ""),
|
|
434
|
+
"priority": fields.get("Microsoft.VSTS.Common.Priority", ""),
|
|
435
|
+
"area_path": fields.get("System.AreaPath", ""),
|
|
436
|
+
"iteration_path": fields.get("System.IterationPath", ""),
|
|
437
|
+
"tags": fields.get("System.Tags", ""),
|
|
438
|
+
}
|
|
439
|
+
|
|
440
|
+
# ── Fetch child PBIs / Bugs ──────────────────────────────────────────────
|
|
441
|
+
children: list[dict] = []
|
|
442
|
+
child_tc_map: dict[int, list[int]] = {}
|
|
443
|
+
|
|
444
|
+
for rel in data.get("relations", []):
|
|
445
|
+
if rel.get("rel") != "System.LinkTypes.Hierarchy-Forward":
|
|
446
|
+
continue
|
|
447
|
+
child_url = rel["url"]
|
|
448
|
+
child_id = int(child_url.split("/")[-1])
|
|
449
|
+
try:
|
|
450
|
+
cr = requests.get(
|
|
451
|
+
f"{child_url}?$expand=relations&api-version={_API}",
|
|
452
|
+
headers=get_auth_headers(),
|
|
453
|
+
timeout=30,
|
|
454
|
+
)
|
|
455
|
+
if not cr.ok:
|
|
456
|
+
continue
|
|
457
|
+
cr_data = cr.json()
|
|
458
|
+
cf = cr_data.get("fields", {})
|
|
459
|
+
child_type = cf.get("System.WorkItemType", "")
|
|
460
|
+
if child_type not in _PBI_TYPES:
|
|
461
|
+
continue
|
|
462
|
+
|
|
463
|
+
children.append({
|
|
464
|
+
"id": child_id,
|
|
465
|
+
"type": child_type,
|
|
466
|
+
"title": cf.get("System.Title", ""),
|
|
467
|
+
"description": _strip_html(cf.get("System.Description", "") or ""),
|
|
468
|
+
"acceptance_criteria": _strip_html(
|
|
469
|
+
cf.get("Microsoft.VSTS.Common.AcceptanceCriteria", "") or ""
|
|
470
|
+
),
|
|
471
|
+
"state": cf.get("System.State", ""),
|
|
472
|
+
"priority": cf.get("Microsoft.VSTS.Common.Priority", ""),
|
|
473
|
+
"story_points": cf.get("Microsoft.VSTS.Scheduling.StoryPoints", ""),
|
|
474
|
+
})
|
|
475
|
+
|
|
476
|
+
child_tc_map[child_id] = [
|
|
477
|
+
int(r["url"].split("/")[-1])
|
|
478
|
+
for r in cr_data.get("relations", [])
|
|
479
|
+
if r.get("rel") == "Microsoft.VSTS.Common.TestedBy-Forward"
|
|
480
|
+
]
|
|
481
|
+
except Exception:
|
|
482
|
+
continue
|
|
483
|
+
|
|
484
|
+
# ── Fetch test cases with full steps ─────────────────────────────────────
|
|
485
|
+
all_tc_ids = list({tc_id for ids in child_tc_map.values() for tc_id in ids})
|
|
486
|
+
test_cases = _fetch_test_cases(org_url, project, all_tc_ids)
|
|
487
|
+
|
|
488
|
+
return {
|
|
489
|
+
"feature": feature,
|
|
490
|
+
"child_work_items": children,
|
|
491
|
+
"child_work_items_count": len(children),
|
|
492
|
+
"existing_test_cases": test_cases,
|
|
493
|
+
"existing_test_cases_count": len(test_cases),
|
|
494
|
+
}
|
|
495
|
+
|
|
496
|
+
|
|
497
|
+
# ─────────────────────────────────────────────────────────────────────────────
|
|
498
|
+
# PUBLIC TOOL — Feature entry point (unchanged contract)
|
|
499
|
+
# ─────────────────────────────────────────────────────────────────────────────
|
|
500
|
+
|
|
501
|
+
def fetch_feature_hierarchy(org_url: str, project: str, feature_id: int) -> dict:
|
|
502
|
+
"""
|
|
503
|
+
Fetch a Feature work item with all child PBIs/Bugs and their linked test
|
|
504
|
+
cases (including full test steps). Also returns a Gherkin best-practice
|
|
505
|
+
reference example.
|
|
506
|
+
"""
|
|
507
|
+
result = _fetch_feature_data(org_url, project, feature_id)
|
|
508
|
+
return {
|
|
509
|
+
**result,
|
|
510
|
+
"scope": "feature",
|
|
511
|
+
"scenario_count_rule": (
|
|
512
|
+
f"{_SCENARIO_MIN_FEATURE}–{_SCENARIO_MAX_FEATURE} scenarios for a Feature"
|
|
513
|
+
),
|
|
514
|
+
"best_practice_example": _GHERKIN_EXAMPLE,
|
|
515
|
+
}
|
|
516
|
+
|
|
517
|
+
|
|
518
|
+
# ─────────────────────────────────────────────────────────────────────────────
|
|
519
|
+
# PUBLIC TOOL — Epic entry point (new)
|
|
520
|
+
# ─────────────────────────────────────────────────────────────────────────────
|
|
521
|
+
|
|
522
|
+
def fetch_epic_hierarchy(org_url: str, project: str, epic_id: int) -> dict:
|
|
523
|
+
"""
|
|
524
|
+
Fetch an Epic work item with ALL child Features, their child PBIs/Bugs,
|
|
525
|
+
and every linked test case (including full test steps). Use this as Step 1 when the user provides an Epic ID.
|
|
526
|
+
|
|
527
|
+
Hierarchy resolved:
|
|
528
|
+
Epic
|
|
529
|
+
└── Feature (one _fetch_feature_data call per feature)
|
|
530
|
+
└── PBI / Bug
|
|
531
|
+
└── Test Cases
|
|
532
|
+
"""
|
|
533
|
+
org_url = org_url.rstrip("/")
|
|
534
|
+
headers = get_auth_headers()
|
|
535
|
+
|
|
536
|
+
# ── Fetch the Epic ───────────────────────────────────────────────────────
|
|
537
|
+
resp = requests.get(
|
|
538
|
+
f"{org_url}/{project}/_apis/wit/workitems/{epic_id}",
|
|
539
|
+
headers=headers,
|
|
540
|
+
params={"$expand": "relations", "api-version": _API},
|
|
541
|
+
timeout=30,
|
|
542
|
+
)
|
|
543
|
+
resp.raise_for_status()
|
|
544
|
+
data = resp.json()
|
|
545
|
+
fields = data.get("fields", {})
|
|
546
|
+
|
|
547
|
+
epic = {
|
|
548
|
+
"id": data["id"],
|
|
549
|
+
"type": fields.get("System.WorkItemType", ""),
|
|
550
|
+
"title": fields.get("System.Title", ""),
|
|
551
|
+
"description": _strip_html(fields.get("System.Description", "") or ""),
|
|
552
|
+
"acceptance_criteria": _strip_html(
|
|
553
|
+
fields.get("Microsoft.VSTS.Common.AcceptanceCriteria", "") or ""
|
|
554
|
+
),
|
|
555
|
+
"state": fields.get("System.State", ""),
|
|
556
|
+
"priority": fields.get("Microsoft.VSTS.Common.Priority", ""),
|
|
557
|
+
"area_path": fields.get("System.AreaPath", ""),
|
|
558
|
+
"iteration_path": fields.get("System.IterationPath", ""),
|
|
559
|
+
"tags": fields.get("System.Tags", ""),
|
|
560
|
+
}
|
|
561
|
+
|
|
562
|
+
# ── Walk child Features and delegate to the shared helper ────────────────
|
|
563
|
+
features: list[dict] = []
|
|
564
|
+
total_children = 0
|
|
565
|
+
total_test_cases = 0
|
|
566
|
+
|
|
567
|
+
for rel in data.get("relations", []):
|
|
568
|
+
if rel.get("rel") != "System.LinkTypes.Hierarchy-Forward":
|
|
569
|
+
continue
|
|
570
|
+
child_url = rel["url"]
|
|
571
|
+
child_id = int(child_url.split("/")[-1])
|
|
572
|
+
|
|
573
|
+
# Peek at work item type before doing the full fetch
|
|
574
|
+
try:
|
|
575
|
+
peek = requests.get(
|
|
576
|
+
f"{child_url}?api-version={_API}",
|
|
577
|
+
headers=get_auth_headers(),
|
|
578
|
+
timeout=30,
|
|
579
|
+
)
|
|
580
|
+
if not peek.ok:
|
|
581
|
+
continue
|
|
582
|
+
child_type = peek.json().get("fields", {}).get("System.WorkItemType", "")
|
|
583
|
+
if child_type != "Feature":
|
|
584
|
+
continue # skip non-Feature children (e.g. orphan PBIs directly under Epic)
|
|
585
|
+
except Exception:
|
|
586
|
+
continue
|
|
587
|
+
|
|
588
|
+
# Full deep fetch via shared helper
|
|
589
|
+
try:
|
|
590
|
+
feature_data = _fetch_feature_data(org_url, project, child_id)
|
|
591
|
+
except Exception:
|
|
592
|
+
continue
|
|
593
|
+
|
|
594
|
+
features.append(feature_data)
|
|
595
|
+
total_children += feature_data["child_work_items_count"]
|
|
596
|
+
total_test_cases += feature_data["existing_test_cases_count"]
|
|
597
|
+
|
|
598
|
+
return {
|
|
599
|
+
"epic": epic,
|
|
600
|
+
"features": features,
|
|
601
|
+
"features_count": len(features),
|
|
602
|
+
"total_child_work_items_count": total_children,
|
|
603
|
+
"total_existing_test_cases_count": total_test_cases,
|
|
604
|
+
"scope": "epic",
|
|
605
|
+
"scenario_count_rule": (
|
|
606
|
+
f"{_SCENARIO_MIN_FEATURE}–{_SCENARIO_MAX_FEATURE} scenarios per Feature "
|
|
607
|
+
f"when generating at Epic scope"
|
|
608
|
+
),
|
|
609
|
+
"best_practice_example": _GHERKIN_EXAMPLE,
|
|
610
|
+
}
|
|
611
|
+
|
|
612
|
+
|
|
613
|
+
# ---------------------------------------------------------------------------
|
|
614
|
+
# attach_feature_file (extended with validation gate)
|
|
615
|
+
# ---------------------------------------------------------------------------
|
|
616
|
+
|
|
617
|
+
def attach_feature_file(
|
|
618
|
+
org_url: str,
|
|
619
|
+
project: str,
|
|
620
|
+
feature_id: int,
|
|
621
|
+
feature_title: str,
|
|
622
|
+
gherkin_content: str,
|
|
623
|
+
) -> dict:
|
|
624
|
+
"""
|
|
625
|
+
Validate, upload, and attach a .feature file to a Feature work item.
|
|
626
|
+
|
|
627
|
+
Validation runs first — if the file fails structural checks the attachment
|
|
628
|
+
is NOT uploaded and the errors are returned so the LLM can correct them
|
|
629
|
+
before retrying.
|
|
630
|
+
"""
|
|
631
|
+
# ── Structural validation gate ───────────────────────────────────────────
|
|
632
|
+
validation = validate_gherkin_content(gherkin_content, scope="feature")
|
|
633
|
+
if not validation["valid"]:
|
|
634
|
+
return {
|
|
635
|
+
"success": False,
|
|
636
|
+
"reason": "validation_failed",
|
|
637
|
+
"validation": validation,
|
|
638
|
+
"message": (
|
|
639
|
+
"The .feature file was NOT uploaded to ADO. Fix the errors "
|
|
640
|
+
"below and call attach_feature_file again with corrected content."
|
|
641
|
+
),
|
|
642
|
+
}
|
|
643
|
+
|
|
644
|
+
org_url = org_url.rstrip("/")
|
|
645
|
+
|
|
646
|
+
safe_title = re.sub(r"[^a-zA-Z0-9_\-]", "_", feature_title)
|
|
647
|
+
safe_title = re.sub(r"_+", "_", safe_title).strip("_")
|
|
648
|
+
file_name = f"{feature_id}_{safe_title}_regression.feature"
|
|
649
|
+
|
|
650
|
+
# ── Upload to attachment store ───────────────────────────────────────────
|
|
651
|
+
upload_resp = requests.post(
|
|
652
|
+
f"{org_url}/{project}/_apis/wit/attachments",
|
|
653
|
+
headers=get_auth_headers("application/octet-stream"),
|
|
654
|
+
params={"fileName": file_name, "api-version": _API},
|
|
655
|
+
data=gherkin_content.encode("utf-8"),
|
|
656
|
+
timeout=30,
|
|
657
|
+
)
|
|
658
|
+
upload_resp.raise_for_status()
|
|
659
|
+
attachment_url = upload_resp.json()["url"]
|
|
660
|
+
|
|
661
|
+
# ── Link to work item ────────────────────────────────────────────────────
|
|
662
|
+
link_resp = requests.patch(
|
|
663
|
+
f"{org_url}/{project}/_apis/wit/workitems/{feature_id}",
|
|
664
|
+
headers=get_auth_headers("application/json-patch+json"),
|
|
665
|
+
params={"api-version": _API},
|
|
666
|
+
json=[{
|
|
667
|
+
"op": "add",
|
|
668
|
+
"path": "/relations/-",
|
|
669
|
+
"value": {
|
|
670
|
+
"rel": "AttachedFile",
|
|
671
|
+
"url": attachment_url,
|
|
672
|
+
"attributes": {
|
|
673
|
+
"comment": (
|
|
674
|
+
"BDD regression feature file — generated by QA Gherkin Generator"
|
|
675
|
+
)
|
|
676
|
+
},
|
|
677
|
+
},
|
|
678
|
+
}],
|
|
679
|
+
timeout=30,
|
|
680
|
+
)
|
|
681
|
+
link_resp.raise_for_status()
|
|
682
|
+
|
|
683
|
+
return {
|
|
684
|
+
"success": True,
|
|
685
|
+
"feature_id": feature_id,
|
|
686
|
+
"file_name": file_name,
|
|
687
|
+
"attachment_url": attachment_url,
|
|
688
|
+
"validation": validation,
|
|
689
|
+
}
|
|
690
|
+
|
|
691
|
+
|
|
692
|
+
# ---------------------------------------------------------------------------
|
|
693
|
+
# Internal helpers
|
|
694
|
+
# ---------------------------------------------------------------------------
|
|
695
|
+
|
|
696
|
+
def _fetch_test_cases(org_url: str, project: str, tc_ids: List[int]) -> list:
|
|
697
|
+
results = []
|
|
698
|
+
for tc_id in tc_ids:
|
|
699
|
+
try:
|
|
700
|
+
resp = requests.get(
|
|
701
|
+
f"{org_url}/{project}/_apis/wit/workitems/{tc_id}",
|
|
702
|
+
headers=get_auth_headers(),
|
|
703
|
+
params={"api-version": _API},
|
|
704
|
+
timeout=30,
|
|
705
|
+
)
|
|
706
|
+
if not resp.ok:
|
|
707
|
+
continue
|
|
708
|
+
f = resp.json().get("fields", {})
|
|
709
|
+
steps = _parse_steps_xml(f.get("Microsoft.VSTS.TCM.Steps", ""))
|
|
710
|
+
results.append({
|
|
711
|
+
"id": tc_id,
|
|
712
|
+
"title": f.get("System.Title", ""),
|
|
713
|
+
"state": f.get("System.State", ""),
|
|
714
|
+
"priority": f.get("Microsoft.VSTS.Common.Priority", ""),
|
|
715
|
+
"steps": steps,
|
|
716
|
+
"steps_count": len(steps),
|
|
717
|
+
})
|
|
718
|
+
except Exception:
|
|
719
|
+
continue
|
|
720
|
+
return results
|
|
721
|
+
|
|
722
|
+
|
|
723
|
+
def _parse_steps_xml(xml_str: str) -> list:
|
|
724
|
+
if not xml_str:
|
|
725
|
+
return []
|
|
726
|
+
steps = []
|
|
727
|
+
try:
|
|
728
|
+
root = ET.fromstring(xml_str)
|
|
729
|
+
for step in root.findall(".//step"):
|
|
730
|
+
params = step.findall("parameterizedString")
|
|
731
|
+
action = params[0].text or "" if params else ""
|
|
732
|
+
expected = params[1].text or "" if len(params) > 1 else ""
|
|
733
|
+
steps.append({"action": action, "expected_result": expected})
|
|
734
|
+
except Exception:
|
|
735
|
+
pass
|
|
736
|
+
return steps
|
|
737
|
+
|
|
738
|
+
|
|
739
|
+
def _strip_html(html: str) -> str:
|
|
740
|
+
if not html:
|
|
741
|
+
return ""
|
|
742
|
+
text = re.sub(r"<br\s*/?>", "\n", html, flags=re.IGNORECASE)
|
|
743
|
+
text = re.sub(r"</p>|</li>", "\n", text, flags=re.IGNORECASE)
|
|
744
|
+
text = re.sub(r"<li[^>]*>", "• ", text, flags=re.IGNORECASE)
|
|
745
|
+
text = re.sub(r"<[^>]+>", "", text)
|
|
746
|
+
for old, new in [
|
|
747
|
+
(" ", " "), ("&", "&"), ("<", "<"),
|
|
748
|
+
(">", ">"), (""", '"'),
|
|
749
|
+
]:
|
|
750
|
+
text = text.replace(old, new)
|
|
751
|
+
return re.sub(r"\n{3,}", "\n\n", text).strip()
|
|
752
|
+
|
|
753
|
+
|
|
754
|
+
# ---------------------------------------------------------------------------
|
|
755
|
+
# Gherkin best-practice reference (embedded — no file dependency)
|
|
756
|
+
# ---------------------------------------------------------------------------
|
|
757
|
+
|
|
758
|
+
_GHERKIN_EXAMPLE = """\
|
|
759
|
+
# Best-practice Gherkin reference — follow this style exactly.
|
|
760
|
+
#
|
|
761
|
+
# Rules:
|
|
762
|
+
# • Feature title matches the ADO Feature work item title
|
|
763
|
+
# • Tags: @smoke for critical happy paths, @regression for all others
|
|
764
|
+
# • Background only when 2+ scenarios share the exact same preconditions
|
|
765
|
+
# • Scenario titles: verb phrase describing the user action and outcome
|
|
766
|
+
# • Steps: Given = state, When = action, Then = observable outcome
|
|
767
|
+
# • No "And" at the start of a scenario — use Given/When/Then
|
|
768
|
+
# • One Then assertion per scenario where possible
|
|
769
|
+
# • Scenario Outline + Examples for data-driven boundary tests
|
|
770
|
+
#
|
|
771
|
+
# Scenario count:
|
|
772
|
+
# • Feature-scoped file: 5–10 scenarios
|
|
773
|
+
# • Work-item-scoped file (PBI or Bug): 3–9 scenarios
|
|
774
|
+
|
|
775
|
+
@user_profile @regression
|
|
776
|
+
Feature: User Profile Photo Management
|
|
777
|
+
As a registered user
|
|
778
|
+
I want to manage my profile photo
|
|
779
|
+
So that my account accurately reflects my identity
|
|
780
|
+
|
|
781
|
+
Background:
|
|
782
|
+
Given the user is authenticated with their Microsoft account
|
|
783
|
+
And the user is on the My Profile slide-out
|
|
784
|
+
|
|
785
|
+
@smoke
|
|
786
|
+
Scenario: User uploads a valid profile photo and sees confirmation
|
|
787
|
+
When the user clicks "Update Photo" and selects a valid JPG under 5 MB
|
|
788
|
+
And the user adjusts the crop area and clicks Save
|
|
789
|
+
Then the profile photo is updated and a success toast is displayed
|
|
790
|
+
And the new photo is visible in the application header
|
|
791
|
+
|
|
792
|
+
@regression
|
|
793
|
+
Scenario: Upload is rejected when file exceeds the maximum size
|
|
794
|
+
When the user attempts to upload a file of 5.1 MB
|
|
795
|
+
Then an error message "File size exceeds 5 MB limit" is displayed
|
|
796
|
+
And no upload request is sent to the server
|
|
797
|
+
|
|
798
|
+
@regression
|
|
799
|
+
Scenario Outline: Upload is rejected for unsupported file formats
|
|
800
|
+
When the user attempts to upload a "<format>" file
|
|
801
|
+
Then an error message indicating the unsupported format is displayed
|
|
802
|
+
|
|
803
|
+
Examples:
|
|
804
|
+
| format |
|
|
805
|
+
| BMP |
|
|
806
|
+
| TIFF |
|
|
807
|
+
| EXE |
|
|
808
|
+
|
|
809
|
+
@regression
|
|
810
|
+
Scenario: Profile photo persists after page refresh
|
|
811
|
+
Given the user has successfully updated their profile photo
|
|
812
|
+
When the user refreshes the page
|
|
813
|
+
Then the updated profile photo is still displayed
|
|
814
|
+
|
|
815
|
+
@regression
|
|
816
|
+
Scenario: Initials are shown as fallback when no photo is uploaded
|
|
817
|
+
Given the user has not uploaded a profile photo
|
|
818
|
+
When the user views any page showing their avatar
|
|
819
|
+
Then their initials are displayed in place of a photo
|
|
820
|
+
|
|
821
|
+
@regression
|
|
822
|
+
Scenario: Cancelling the crop editor discards changes
|
|
823
|
+
Given the user has opened the photo crop editor and made adjustments
|
|
824
|
+
When the user clicks Cancel
|
|
825
|
+
Then no changes are saved and the previous photo is displayed
|
|
826
|
+
"""
|
|
827
|
+
|
|
828
|
+
# ---------------------------------------------------------------------------
|
|
829
|
+
# Public aliases — satisfy the test contract and the skill routing rule.
|
|
830
|
+
#
|
|
831
|
+
# The MCP server imports attach_work_item_file (the canonical name).
|
|
832
|
+
# Test suite and skill documentation reference the more descriptive names below.
|
|
833
|
+
# Both names point to the same implementation — no duplication of logic.
|
|
834
|
+
# ---------------------------------------------------------------------------
|
|
835
|
+
|
|
836
|
+
def _gherkin_filename_for_work_item(work_item_id: int, title: str) -> str:
|
|
837
|
+
"""
|
|
838
|
+
Build a safe, kebab-case filename for a PBI/Bug-scoped feature file.
|
|
839
|
+
|
|
840
|
+
Convention: {work_item_id}_{title_kebab}.feature
|
|
841
|
+
|
|
842
|
+
Examples:
|
|
843
|
+
42, "User can Upload a Profile Photo"
|
|
844
|
+
-> "42_user-can-upload-a-profile-photo.feature"
|
|
845
|
+
99, "Fix: Bug #123 — crash on login!"
|
|
846
|
+
-> "99_fix-bug-123-crash-on-login.feature"
|
|
847
|
+
"""
|
|
848
|
+
kebab = re.sub(r"[^a-z0-9]+", "-", title.lower()).strip("-")
|
|
849
|
+
return f"{work_item_id}_{kebab}.feature"
|
|
850
|
+
|
|
851
|
+
|
|
852
|
+
#: Descriptive alias for attach_work_item_file.
|
|
853
|
+
#: Imported by tests and referenced in the generate-gherkin.md skill routing rule.
|
|
854
|
+
attach_gherkin_to_work_item = attach_work_item_file
|