@qa-gentic/stlc-agents 1.0.1 → 1.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@qa-gentic/stlc-agents",
3
- "version": "1.0.1",
3
+ "version": "1.0.2",
4
4
  "description": "QA STLC Agents — MCP servers + skills for AI-powered test case, Gherkin, and Playwright generation against Azure DevOps. Works with Claude Code, GitHub Copilot, Cursor, Windsurf.",
5
5
  "keywords": [
6
6
  "playwright",
@@ -27,10 +27,19 @@ Infrastructure file protection
27
27
  Within-file deduplication
28
28
  For locators.ts : new const-object entries are merged; duplicate keys skipped.
29
29
  For *.steps.ts : new step blocks are appended; duplicate regex patterns skipped.
30
+ Cross-file deduplication is also applied: before writing any
31
+ *.steps.ts, ALL other *.steps.ts files in src/test/steps/ are
32
+ scanned for existing patterns. A pattern already defined in
33
+ any sibling file is treated as a duplicate — the block is
34
+ dropped from the incoming content. This prevents the
35
+ "Multiple step definitions match" Cucumber error.
30
36
  For *.page.ts : new async methods are appended; duplicate method names skipped.
31
37
  For *.feature : new Scenario blocks are appended; duplicate titles skipped.
32
- When a title matches, the existing scenario (and its original step
33
- wording) is kept the generated version is dropped entirely.
38
+ Cross-file deduplication is also applied: before writing any
39
+ *.feature file, ALL other *.feature files in
40
+ src/test/features/ are scanned for existing scenario titles.
41
+ A title already present in any sibling file is treated as a
42
+ duplicate and dropped from the incoming content.
34
43
 
35
44
  Interface adapter
36
45
  The generator emits repo.updateHealed / repo.incrementSuccess / repo.getBBox etc.
@@ -211,9 +220,53 @@ def _merge_locators(existing: str, generated: str) -> tuple[str, list[str], list
211
220
  return merged, added, skipped
212
221
 
213
222
 
214
- def _merge_steps(existing: str, generated: str) -> tuple[str, list[str], list[str]]:
215
- """Append step blocks whose regex pattern is not already in existing."""
223
+ def _collect_all_scenario_titles(features_dir: Path, exclude_file: Path | None = None) -> set[str]:
224
+ """Return every scenario title (lower-cased) defined in all *.feature files
225
+ in features_dir, optionally excluding one file (the one currently being written)."""
226
+ _title_re = re.compile(
227
+ r"^\s*Scenario(?:\s+Outline)?\s*:\s*(.+)$", re.MULTILINE | re.IGNORECASE
228
+ )
229
+ titles: set[str] = set()
230
+ if not features_dir.is_dir():
231
+ return titles
232
+ for f in features_dir.glob("*.feature"):
233
+ if exclude_file and f.resolve() == exclude_file.resolve():
234
+ continue
235
+ try:
236
+ titles.update(
237
+ m.group(1).strip().lower()
238
+ for m in _title_re.finditer(f.read_text(encoding="utf-8"))
239
+ )
240
+ except OSError:
241
+ pass
242
+ return titles
243
+
244
+
245
+
246
+ """Return every /^pattern$/ defined in all *.steps.ts files in steps_dir,
247
+ optionally excluding one file (the one currently being written)."""
248
+ patterns: set[str] = set()
249
+ if not steps_dir.is_dir():
250
+ return patterns
251
+ for f in steps_dir.glob("*.steps.ts"):
252
+ if exclude_file and f.resolve() == exclude_file.resolve():
253
+ continue
254
+ try:
255
+ patterns.update(re.findall(r"/\^([^/]+)\$/", f.read_text(encoding="utf-8")))
256
+ except OSError:
257
+ pass
258
+ return patterns
259
+
260
+
261
+ def _merge_steps(
262
+ existing: str,
263
+ generated: str,
264
+ cross_file_patterns: set[str] | None = None,
265
+ ) -> tuple[str, list[str], list[str]]:
266
+ """Append step blocks whose regex pattern is not already in existing or in
267
+ any sibling step file (cross_file_patterns)."""
216
268
  existing_patterns = set(re.findall(r"/\^([^/]+)\$/", existing))
269
+ forbidden = existing_patterns | (cross_file_patterns or set())
217
270
 
218
271
  step_block_re = re.compile(r"^(Given|When|Then)\(", re.MULTILINE)
219
272
  parts = step_block_re.split(generated)
@@ -233,11 +286,14 @@ def _merge_steps(existing: str, generated: str) -> tuple[str, list[str], list[st
233
286
  for _kw, block in blocks:
234
287
  pat_match = re.search(r"/\^([^/]+)\$/", block)
235
288
  pattern = pat_match.group(1) if pat_match else block[:40]
236
- if pattern in existing_patterns:
289
+ if pattern in forbidden:
237
290
  skipped.append(pattern)
238
291
  else:
239
292
  new_blocks.append(block)
240
293
  added.append(pattern)
294
+ # Track newly added so subsequent blocks in the same batch
295
+ # don't get written twice either
296
+ forbidden.add(pattern)
241
297
 
242
298
  merged = existing.rstrip() + ("\n\n" + "\n".join(new_blocks) if new_blocks else "") + "\n"
243
299
  return merged, added, skipped
@@ -302,9 +358,14 @@ def _parse_feature_blocks(content: str) -> tuple[str, list[str]]:
302
358
  return "".join(header), blocks
303
359
 
304
360
 
305
- def _merge_feature_scenarios(existing: str, generated: str) -> tuple[str, list[str], list[str]]:
361
+ def _merge_feature_scenarios(
362
+ existing: str,
363
+ generated: str,
364
+ cross_file_titles: set[str] | None = None,
365
+ ) -> tuple[str, list[str], list[str]]:
306
366
  """
307
- Append Scenario / Scenario Outline blocks whose titles are not already present.
367
+ Append Scenario / Scenario Outline blocks whose titles are not already present
368
+ in this file or in any sibling feature file (cross_file_titles).
308
369
 
309
370
  Deduplication is by title (case-insensitive exact match). When a collision is
310
371
  found the existing scenario — including its original step wording — is kept and
@@ -322,6 +383,7 @@ def _merge_feature_scenarios(existing: str, generated: str) -> tuple[str, list[s
322
383
  existing_titles = {
323
384
  m.group(1).strip().lower() for m in _scenario_title_re.finditer(existing)
324
385
  }
386
+ forbidden = existing_titles | (cross_file_titles or set())
325
387
 
326
388
  _, gen_blocks = _parse_feature_blocks(generated)
327
389
 
@@ -334,11 +396,12 @@ def _merge_feature_scenarios(existing: str, generated: str) -> tuple[str, list[s
334
396
  if not title_match:
335
397
  continue
336
398
  title = title_match.group(1).strip()
337
- if title.lower() in existing_titles:
399
+ if title.lower() in forbidden:
338
400
  skipped.append(title)
339
401
  else:
340
402
  new_blocks.append(block.rstrip())
341
403
  added.append(title)
404
+ forbidden.add(title.lower())
342
405
 
343
406
  if not new_blocks:
344
407
  return existing, added, skipped
@@ -596,7 +659,9 @@ def write_files_to_helix(
596
659
  try:
597
660
  if dest.exists():
598
661
  existing_text = dest.read_text(encoding="utf-8")
599
- merged, added, dup = _merge_feature_scenarios(existing_text, content)
662
+ features_dir = root / "src" / "test" / "features"
663
+ cross = _collect_all_scenario_titles(features_dir, exclude_file=dest)
664
+ merged, added, dup = _merge_feature_scenarios(existing_text, content, cross)
600
665
  deduplication[dest_rel] = {
601
666
  "type": "feature",
602
667
  "added_scenarios": added,
@@ -629,7 +694,9 @@ def write_files_to_helix(
629
694
  "type": "locators", "added_keys": added, "skipped_keys": dup,
630
695
  }
631
696
  elif _STEPS_RE.search(file_key):
632
- merged, added, dup = _merge_steps(existing_text, content)
697
+ steps_dir = root / "src" / "test" / "steps"
698
+ cross = _collect_all_step_patterns(steps_dir, exclude_file=dest)
699
+ merged, added, dup = _merge_steps(existing_text, content, cross)
633
700
  deduplication[dest_rel] = {
634
701
  "type": "steps", "added_patterns": added, "skipped_patterns": dup,
635
702
  }
@@ -731,21 +798,28 @@ def update_helix_file(
731
798
  merged, added, dup = _merge_locators(existing_text, adapted)
732
799
  dedup = {"type": "locators", "added_keys": added, "skipped_keys": dup}
733
800
  elif _STEPS_RE.search(file_key):
734
- merged, added, dup = _merge_steps(existing_text, adapted)
801
+ steps_dir = root / "src" / "test" / "steps"
802
+ cross = _collect_all_step_patterns(steps_dir, exclude_file=target)
803
+ merged, added, dup = _merge_steps(existing_text, adapted, cross)
735
804
  dedup = {"type": "steps", "added_patterns": added, "skipped_patterns": dup}
736
805
  elif _PAGE_RE.search(file_key):
737
806
  merged, added, dup = _merge_page_methods(existing_text, adapted)
738
807
  dedup = {"type": "page", "added_methods": added, "skipped_methods": dup}
739
808
  elif _FEATURE_RE.search(file_key):
740
- # Feature files are the Gherkin source of truth — always overwrite
741
- target.write_text(adapted, encoding="utf-8")
742
- return {
743
- "success": True,
744
- "path": dest_rel,
745
- "action": "overwritten",
746
- "bytes": len(adapted.encode()),
747
- "deduplication": None,
748
- }
809
+ features_dir = root / "src" / "test" / "features"
810
+ cross = _collect_all_scenario_titles(features_dir, exclude_file=target)
811
+ if not target.exists():
812
+ target.write_text(adapted, encoding="utf-8")
813
+ return {
814
+ "success": True,
815
+ "path": dest_rel,
816
+ "action": "created",
817
+ "bytes": len(adapted.encode()),
818
+ "deduplication": None,
819
+ }
820
+ existing_text = target.read_text(encoding="utf-8")
821
+ merged, added, dup = _merge_feature_scenarios(existing_text, adapted, cross)
822
+ dedup = {"type": "feature", "added_scenarios": added, "skipped_scenarios": dup}
749
823
  else:
750
824
  merged = adapted
751
825
  dedup = {"type": "unknown", "action": "overwritten"}