robotframework-testselection 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. TestSelection/__init__.py +3 -0
  2. TestSelection/cli.py +256 -0
  3. TestSelection/embedding/__init__.py +1 -0
  4. TestSelection/embedding/embedder.py +43 -0
  5. TestSelection/embedding/models.py +198 -0
  6. TestSelection/embedding/ports.py +24 -0
  7. TestSelection/execution/__init__.py +1 -0
  8. TestSelection/execution/listener.py +44 -0
  9. TestSelection/execution/prerun_modifier.py +43 -0
  10. TestSelection/execution/runner.py +75 -0
  11. TestSelection/parsing/__init__.py +1 -0
  12. TestSelection/parsing/datadriver_reader.py +54 -0
  13. TestSelection/parsing/keyword_resolver.py +51 -0
  14. TestSelection/parsing/suite_collector.py +85 -0
  15. TestSelection/parsing/text_builder.py +79 -0
  16. TestSelection/pipeline/__init__.py +1 -0
  17. TestSelection/pipeline/artifacts.py +110 -0
  18. TestSelection/pipeline/cache.py +74 -0
  19. TestSelection/pipeline/errors.py +18 -0
  20. TestSelection/pipeline/execute.py +52 -0
  21. TestSelection/pipeline/select.py +183 -0
  22. TestSelection/pipeline/vectorize.py +190 -0
  23. TestSelection/py.typed +0 -0
  24. TestSelection/selection/__init__.py +25 -0
  25. TestSelection/selection/dpp.py +31 -0
  26. TestSelection/selection/facility.py +25 -0
  27. TestSelection/selection/filtering.py +21 -0
  28. TestSelection/selection/fps.py +67 -0
  29. TestSelection/selection/kmedoids.py +32 -0
  30. TestSelection/selection/registry.py +70 -0
  31. TestSelection/selection/strategy.py +142 -0
  32. TestSelection/shared/__init__.py +1 -0
  33. TestSelection/shared/config.py +31 -0
  34. TestSelection/shared/types.py +117 -0
  35. robotframework_testselection-0.1.0.dist-info/METADATA +408 -0
  36. robotframework_testselection-0.1.0.dist-info/RECORD +39 -0
  37. robotframework_testselection-0.1.0.dist-info/WHEEL +4 -0
  38. robotframework_testselection-0.1.0.dist-info/entry_points.txt +2 -0
  39. robotframework_testselection-0.1.0.dist-info/licenses/LICENSE +191 -0
@@ -0,0 +1,3 @@
1
+ """Vector-based diverse test case selection for Robot Framework."""
2
+
3
+ __version__ = "0.1.0"
TestSelection/cli.py ADDED
@@ -0,0 +1,256 @@
1
+ """CLI entry points for the diverse test selection pipeline."""
2
+ from __future__ import annotations
3
+
4
+ import argparse
5
+ import logging
6
+ import os
7
+ import sys
8
+ from pathlib import Path
9
+
10
+ logger = logging.getLogger("TestSelection")
11
+
12
+
13
+ def _setup_logging(verbose: bool = False) -> None:
14
+ level = logging.DEBUG if verbose else logging.INFO
15
+ logging.basicConfig(
16
+ level=level,
17
+ format="%(message)s",
18
+ )
19
+
20
+
21
+ def _add_vectorize_parser(subparsers: argparse._SubParsersAction) -> None:
22
+ p = subparsers.add_parser("vectorize", help="Stage 1: vectorize test suite")
23
+ p.add_argument("--suite", required=True, type=Path, help="Path to .robot suite")
24
+ p.add_argument("--output", required=True, type=Path, help="Artifact output dir")
25
+ p.add_argument("--model", default="all-MiniLM-L6-v2", help="Embedding model name")
26
+ p.add_argument("--resolve-depth", type=int, default=0, help="Keyword resolve depth")
27
+ p.add_argument("--force", action="store_true", help="Force re-indexing")
28
+ p.add_argument(
29
+ "--datadriver-csv", nargs="*", type=Path,
30
+ help="DataDriver CSV files",
31
+ )
32
+ p.set_defaults(func=_cmd_vectorize)
33
+
34
+
35
+ def _add_select_parser(subparsers: argparse._SubParsersAction) -> None:
36
+ default_k = int(os.environ.get("DIVERSE_K", "50"))
37
+ default_strategy = os.environ.get("DIVERSE_STRATEGY", "fps")
38
+ default_seed = int(os.environ.get("DIVERSE_SEED", "42"))
39
+ default_output = os.environ.get("DIVERSE_OUTPUT", "")
40
+
41
+ p = subparsers.add_parser("select", help="Stage 2: select diverse subset")
42
+ p.add_argument("--artifacts", required=True, type=Path, help="Artifact directory")
43
+ p.add_argument("--k", type=int, default=default_k, help="Number of tests to select")
44
+ p.add_argument("--strategy", default=default_strategy, help="Selection strategy")
45
+ p.add_argument(
46
+ "--output",
47
+ type=Path,
48
+ default=Path(default_output) if default_output else None,
49
+ help="Output selection file",
50
+ )
51
+ p.add_argument(
52
+ "--include-tags", nargs="*",
53
+ help="Include only tests with these tags",
54
+ )
55
+ p.add_argument("--exclude-tags", nargs="*", help="Exclude tests with these tags")
56
+ p.add_argument("--seed", type=int, default=default_seed, help="Random seed")
57
+ p.add_argument(
58
+ "--no-datadriver",
59
+ action="store_true",
60
+ help="Exclude DataDriver tests",
61
+ )
62
+ p.set_defaults(func=_cmd_select)
63
+
64
+
65
+ def _add_execute_parser(subparsers: argparse._SubParsersAction) -> None:
66
+ p = subparsers.add_parser("execute", help="Stage 3: execute selected tests")
67
+ p.add_argument("--suite", required=True, type=Path, help="Path to .robot suite")
68
+ p.add_argument(
69
+ "--selection", required=True, type=Path, help="Selection JSON file"
70
+ )
71
+ p.add_argument(
72
+ "--output-dir", type=Path, default=Path("./results"),
73
+ help="Output dir",
74
+ )
75
+ p.add_argument("--robot-args", nargs="*", help="Extra robot arguments")
76
+ p.set_defaults(func=_cmd_execute)
77
+
78
+
79
+ def _add_run_parser(subparsers: argparse._SubParsersAction) -> None:
80
+ default_k = int(os.environ.get("DIVERSE_K", "50"))
81
+ default_strategy = os.environ.get("DIVERSE_STRATEGY", "fps")
82
+ default_seed = int(os.environ.get("DIVERSE_SEED", "42"))
83
+
84
+ p = subparsers.add_parser("run", help="Full pipeline: vectorize + select + execute")
85
+ p.add_argument("--suite", required=True, type=Path, help="Path to .robot suite")
86
+ p.add_argument("--k", type=int, default=default_k, help="Number of tests to select")
87
+ p.add_argument("--strategy", default=default_strategy, help="Selection strategy")
88
+ p.add_argument(
89
+ "--output-dir", type=Path, default=Path("./results"),
90
+ help="Output dir",
91
+ )
92
+ p.add_argument(
93
+ "--model", default="all-MiniLM-L6-v2",
94
+ help="Embedding model name",
95
+ )
96
+ p.add_argument("--resolve-depth", type=int, default=0, help="Keyword resolve depth")
97
+ p.add_argument("--force", action="store_true", help="Force re-indexing")
98
+ p.add_argument("--seed", type=int, default=default_seed, help="Random seed")
99
+ p.add_argument("--robot-args", nargs="*", help="Extra robot arguments")
100
+ p.set_defaults(func=_cmd_run)
101
+
102
+
103
+ def _cmd_vectorize(args: argparse.Namespace) -> int:
104
+ from TestSelection.pipeline.vectorize import run_vectorize
105
+
106
+ try:
107
+ indexed = run_vectorize(
108
+ suite_path=args.suite,
109
+ artifact_dir=args.output,
110
+ model_name=args.model,
111
+ resolve_depth=args.resolve_depth,
112
+ force=args.force,
113
+ datadriver_csvs=args.datadriver_csv,
114
+ )
115
+ if indexed:
116
+ logger.info("[DIVERSE-SELECT] Vectorization complete")
117
+ else:
118
+ logger.info("[DIVERSE-SELECT] Vectorization skipped (no changes)")
119
+ return 0
120
+ except Exception as exc:
121
+ logger.error("[DIVERSE-SELECT] Vectorization failed: %s", exc)
122
+ return 2
123
+
124
+
125
+ def _cmd_select(args: argparse.Namespace) -> int:
126
+ from TestSelection.pipeline.select import run_select
127
+
128
+ try:
129
+ result = run_select(
130
+ artifact_dir=args.artifacts,
131
+ k=args.k,
132
+ strategy=args.strategy,
133
+ output_file=args.output,
134
+ include_tags=args.include_tags,
135
+ exclude_tags=args.exclude_tags,
136
+ seed=args.seed,
137
+ include_datadriver=not args.no_datadriver,
138
+ )
139
+ logger.info(
140
+ "[DIVERSE-SELECT] Selected %d tests (strategy=%s)",
141
+ len(result.selected),
142
+ result.strategy,
143
+ )
144
+ return 0
145
+ except Exception as exc:
146
+ logger.error("[DIVERSE-SELECT] Selection failed: %s", exc)
147
+ return 2
148
+
149
+
150
+ def _cmd_execute(args: argparse.Namespace) -> int:
151
+ from TestSelection.pipeline.execute import run_execute
152
+
153
+ return run_execute(
154
+ suite_path=args.suite,
155
+ selection_file=args.selection,
156
+ output_dir=str(args.output_dir),
157
+ extra_robot_args=args.robot_args,
158
+ )
159
+
160
+
161
+ def _cmd_run(args: argparse.Namespace) -> int:
162
+ from TestSelection.pipeline.execute import run_execute
163
+ from TestSelection.pipeline.select import run_select
164
+ from TestSelection.pipeline.vectorize import run_vectorize
165
+
166
+ artifact_dir = args.output_dir / ".artifacts"
167
+
168
+ # Stage 1: Vectorize
169
+ try:
170
+ run_vectorize(
171
+ suite_path=args.suite,
172
+ artifact_dir=artifact_dir,
173
+ model_name=args.model,
174
+ resolve_depth=args.resolve_depth,
175
+ force=args.force,
176
+ )
177
+ except Exception as exc:
178
+ logger.warning(
179
+ "[DIVERSE-SELECT] Vectorization failed, falling back to all tests: %s",
180
+ exc,
181
+ )
182
+ return _fallback_execute(args)
183
+
184
+ # Stage 2: Select
185
+ selection_file = artifact_dir / "selected_tests.json"
186
+ try:
187
+ run_select(
188
+ artifact_dir=artifact_dir,
189
+ k=args.k,
190
+ strategy=args.strategy,
191
+ output_file=selection_file,
192
+ seed=args.seed,
193
+ )
194
+ except Exception as exc:
195
+ logger.warning(
196
+ "[DIVERSE-SELECT] Selection failed, falling back to all tests: %s",
197
+ exc,
198
+ )
199
+ return _fallback_execute(args)
200
+
201
+ # Stage 3: Execute
202
+ return run_execute(
203
+ suite_path=args.suite,
204
+ selection_file=selection_file,
205
+ output_dir=str(args.output_dir),
206
+ extra_robot_args=args.robot_args,
207
+ )
208
+
209
+
210
+ def _fallback_execute(args: argparse.Namespace) -> int:
211
+ """Execute all tests without selection (graceful degradation)."""
212
+ logger.info("[DIVERSE-SELECT] Running all tests (no selection)")
213
+ try:
214
+ import robot
215
+
216
+ robot_args = [
217
+ "--outputdir",
218
+ str(args.output_dir),
219
+ str(args.suite),
220
+ ]
221
+ return robot.run_cli(robot_args, exit=False) # type: ignore[attr-defined]
222
+ except Exception as exc:
223
+ logger.error("[DIVERSE-SELECT] Fallback execution failed: %s", exc)
224
+ return 2
225
+
226
+
227
+ def build_parser() -> argparse.ArgumentParser:
228
+ """Build the argument parser."""
229
+ parser = argparse.ArgumentParser(
230
+ prog="testcase-select",
231
+ description="Vector-based diverse test case selection for Robot Framework",
232
+ )
233
+ parser.add_argument("--verbose", "-v", action="store_true", help="Verbose logging")
234
+ subparsers = parser.add_subparsers(dest="command", help="Available commands")
235
+ _add_vectorize_parser(subparsers)
236
+ _add_select_parser(subparsers)
237
+ _add_execute_parser(subparsers)
238
+ _add_run_parser(subparsers)
239
+ return parser
240
+
241
+
242
+ def main(argv: list[str] | None = None) -> int:
243
+ """Main CLI entry point."""
244
+ parser = build_parser()
245
+ args = parser.parse_args(argv)
246
+ _setup_logging(getattr(args, "verbose", False))
247
+
248
+ if not args.command:
249
+ parser.print_help()
250
+ return 0
251
+
252
+ return args.func(args)
253
+
254
+
255
+ if __name__ == "__main__":
256
+ sys.exit(main())
@@ -0,0 +1 @@
1
+ """Embedding bounded context: vector embedding of test case text representations."""
@@ -0,0 +1,43 @@
1
+ """SentenceTransformer adapter -- ACL wrapping sentence-transformers."""
2
+ from __future__ import annotations
3
+
4
+ import numpy as np
5
+ from numpy.typing import NDArray
6
+
7
+
8
+ class SentenceTransformerAdapter:
9
+ """ACL: Wraps sentence-transformers behind the EmbeddingModel protocol.
10
+
11
+ Default model: all-MiniLM-L6-v2 (22M params, 384 dims, CPU-friendly).
12
+ Alternative: all-mpnet-base-v2 (768 dims) for higher quality.
13
+ """
14
+
15
+ def __init__(
16
+ self,
17
+ model_name: str = "all-MiniLM-L6-v2",
18
+ *,
19
+ show_progress_bar: bool = False,
20
+ ) -> None:
21
+ from sentence_transformers import SentenceTransformer
22
+
23
+ self._model = SentenceTransformer(model_name)
24
+ self._model_name = model_name
25
+ self._show_progress_bar = show_progress_bar
26
+
27
+ @property
28
+ def model_name(self) -> str:
29
+ return self._model_name
30
+
31
+ @property
32
+ def embedding_dim(self) -> int:
33
+ dim = self._model.get_sentence_embedding_dimension()
34
+ assert isinstance(dim, int)
35
+ return dim
36
+
37
+ def encode(self, texts: list[str]) -> NDArray[np.float32]:
38
+ result: NDArray[np.float32] = self._model.encode(
39
+ texts,
40
+ show_progress_bar=self._show_progress_bar,
41
+ normalize_embeddings=True,
42
+ )
43
+ return result
@@ -0,0 +1,198 @@
1
+ """Core domain objects for the Embedding bounded context."""
2
+ from __future__ import annotations
3
+
4
+ import json
5
+ from dataclasses import dataclass
6
+ from pathlib import Path
7
+
8
+ import numpy as np
9
+ from numpy.typing import NDArray
10
+
11
+
12
+ @dataclass(frozen=True)
13
+ class Embedding:
14
+ """A single test case embedding vector (value object)."""
15
+
16
+ vector: NDArray[np.float32]
17
+
18
+ @property
19
+ def dimensionality(self) -> int:
20
+ return int(self.vector.shape[0])
21
+
22
+ @property
23
+ def is_normalized(self) -> bool:
24
+ norm = float(np.linalg.norm(self.vector))
25
+ return abs(norm - 1.0) < 1e-6
26
+
27
+
28
+ @dataclass(frozen=True)
29
+ class ManifestEntry:
30
+ """Metadata for a single test in the artifact manifest."""
31
+
32
+ id: str
33
+ name: str
34
+ tags: tuple[str, ...]
35
+ suite: str
36
+ suite_name: str
37
+ is_datadriver: bool
38
+
39
+
40
+ @dataclass(frozen=True)
41
+ class ArtifactManifest:
42
+ """Describes the contents of an EmbeddingArtifact."""
43
+
44
+ model: str
45
+ embedding_dim: int
46
+ test_count: int
47
+ resolve_depth: int
48
+ tests: tuple[ManifestEntry, ...]
49
+
50
+
51
+ @dataclass(frozen=True)
52
+ class EmbeddingArtifact:
53
+ """Portable artifact produced by the Embedding Context.
54
+
55
+ Consists of two files:
56
+ - embeddings.npz: numpy archive with 'vectors' (N x dim) and 'ids' (N,)
57
+ - test_manifest.json: human-readable metadata
58
+ """
59
+
60
+ embeddings_path: Path
61
+ manifest_path: Path
62
+ model_name: str
63
+ embedding_dim: int
64
+ test_count: int
65
+
66
+
67
+ class EmbeddingMatrix:
68
+ """Aggregate root: holds all embeddings for a test suite.
69
+
70
+ Invariants:
71
+ - vectors.shape[0] == len(test_ids) (one vector per test)
72
+ - vectors.shape[1] == embedding_dim (consistent dimensionality)
73
+ - All vectors are L2-normalized for cosine distance computation
74
+ """
75
+
76
+ def __init__(
77
+ self,
78
+ model_name: str,
79
+ embedding_dim: int,
80
+ vectors: NDArray[np.float32],
81
+ test_ids: tuple[str, ...],
82
+ ) -> None:
83
+ self.model_name = model_name
84
+ self.embedding_dim = embedding_dim
85
+ self.vectors = vectors
86
+ self.test_ids = test_ids
87
+ self.validate_dimensions()
88
+
89
+ @property
90
+ def test_count(self) -> int:
91
+ return len(self.test_ids)
92
+
93
+ def validate_dimensions(self) -> None:
94
+ """Assert that vectors shape matches test_ids length and embedding_dim."""
95
+ if self.vectors.shape[0] != len(self.test_ids):
96
+ raise ValueError(
97
+ f"Vector count {self.vectors.shape[0]} != "
98
+ f"test ID count {len(self.test_ids)}"
99
+ )
100
+ if self.vectors.shape[1] != self.embedding_dim:
101
+ raise ValueError(
102
+ f"Vector dim {self.vectors.shape[1]} != "
103
+ f"expected dim {self.embedding_dim}"
104
+ )
105
+
106
+ def to_artifact(
107
+ self,
108
+ output_dir: Path,
109
+ manifest_entries: tuple[ManifestEntry, ...],
110
+ resolve_depth: int = 0,
111
+ ) -> EmbeddingArtifact:
112
+ """Serialize to portable artifact files."""
113
+ output_dir.mkdir(parents=True, exist_ok=True)
114
+ emb_path = output_dir / "embeddings.npz"
115
+ manifest_path = output_dir / "test_manifest.json"
116
+
117
+ np.savez_compressed(
118
+ emb_path,
119
+ vectors=self.vectors,
120
+ ids=np.array(list(self.test_ids)),
121
+ )
122
+
123
+ manifest = ArtifactManifest(
124
+ model=self.model_name,
125
+ embedding_dim=self.embedding_dim,
126
+ test_count=self.test_count,
127
+ resolve_depth=resolve_depth,
128
+ tests=manifest_entries,
129
+ )
130
+ manifest_path.write_text(
131
+ json.dumps(
132
+ {
133
+ "model": manifest.model,
134
+ "embedding_dim": manifest.embedding_dim,
135
+ "test_count": manifest.test_count,
136
+ "resolve_depth": manifest.resolve_depth,
137
+ "tests": [
138
+ {
139
+ "id": e.id,
140
+ "name": e.name,
141
+ "tags": list(e.tags),
142
+ "suite": e.suite,
143
+ "suite_name": e.suite_name,
144
+ "is_datadriver": e.is_datadriver,
145
+ }
146
+ for e in manifest.tests
147
+ ],
148
+ },
149
+ indent=2,
150
+ )
151
+ )
152
+
153
+ return EmbeddingArtifact(
154
+ embeddings_path=emb_path,
155
+ manifest_path=manifest_path,
156
+ model_name=self.model_name,
157
+ embedding_dim=self.embedding_dim,
158
+ test_count=self.test_count,
159
+ )
160
+
161
+ @classmethod
162
+ def from_artifact(
163
+ cls, artifact_dir: Path
164
+ ) -> tuple[EmbeddingMatrix, ArtifactManifest]:
165
+ """Load an EmbeddingMatrix and ArtifactManifest from artifact files."""
166
+ emb_path = artifact_dir / "embeddings.npz"
167
+ manifest_path = artifact_dir / "test_manifest.json"
168
+
169
+ data = np.load(emb_path, allow_pickle=True)
170
+ vectors: NDArray[np.float32] = data["vectors"]
171
+ ids: tuple[str, ...] = tuple(str(i) for i in data["ids"])
172
+
173
+ raw = json.loads(manifest_path.read_text())
174
+ manifest = ArtifactManifest(
175
+ model=raw["model"],
176
+ embedding_dim=raw["embedding_dim"],
177
+ test_count=raw["test_count"],
178
+ resolve_depth=raw.get("resolve_depth", 0),
179
+ tests=tuple(
180
+ ManifestEntry(
181
+ id=t["id"],
182
+ name=t["name"],
183
+ tags=tuple(t.get("tags", [])),
184
+ suite=t.get("suite", ""),
185
+ suite_name=t.get("suite_name", ""),
186
+ is_datadriver=t.get("is_datadriver", False),
187
+ )
188
+ for t in raw["tests"]
189
+ ),
190
+ )
191
+
192
+ matrix = cls(
193
+ model_name=raw["model"],
194
+ embedding_dim=raw["embedding_dim"],
195
+ vectors=vectors,
196
+ test_ids=ids,
197
+ )
198
+ return matrix, manifest
@@ -0,0 +1,24 @@
1
+ """Embedding model protocol -- the port for embedding adapters."""
2
+ from __future__ import annotations
3
+
4
+ from typing import Protocol, runtime_checkable
5
+
6
+ import numpy as np
7
+ from numpy.typing import NDArray
8
+
9
+
10
+ @runtime_checkable
11
+ class EmbeddingModel(Protocol):
12
+ """Protocol for embedding models.
13
+
14
+ Decouples the domain from sentence-transformers or any other
15
+ embedding backend. Implementations must be L2-normalizing.
16
+ """
17
+
18
+ @property
19
+ def model_name(self) -> str: ...
20
+
21
+ @property
22
+ def embedding_dim(self) -> int: ...
23
+
24
+ def encode(self, texts: list[str]) -> NDArray[np.float32]: ...
@@ -0,0 +1 @@
1
+ """Execution context: Robot Framework PreRunModifier and Listener v3."""
@@ -0,0 +1,44 @@
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ from pathlib import Path
5
+
6
+
7
+ class DiverseDataDriverListener:
8
+ """Listener v3 that filters DataDriver-generated tests.
9
+
10
+ Must run AFTER DataDriver. Lower priority = runs later.
11
+
12
+ Usage::
13
+
14
+ robot --listener module.DiverseDataDriverListener:file tests/
15
+ """
16
+
17
+ ROBOT_LISTENER_API_VERSION = 3
18
+ ROBOT_LISTENER_PRIORITY = 50 # lower than default -> runs after DataDriver
19
+
20
+ def __init__(self, selection_file: str) -> None:
21
+ data = json.loads(Path(selection_file).read_text())
22
+ self._selected_dd_names: set[str] = set(
23
+ t["name"]
24
+ for t in data["selected"]
25
+ if t.get("is_datadriver", False)
26
+ )
27
+ self._has_dd_tests = bool(self._selected_dd_names)
28
+ self._stats = {"suites_processed": 0, "tests_filtered": 0}
29
+
30
+ def start_suite(self, data, result) -> None: # noqa: ARG002
31
+ if not self._has_dd_tests:
32
+ return
33
+ if len(data.tests) <= 1:
34
+ return # not a DataDriver suite
35
+ original_count = len(data.tests)
36
+ matches = [t for t in data.tests if t.name in self._selected_dd_names]
37
+ if matches:
38
+ data.tests = matches
39
+ self._stats["suites_processed"] += 1
40
+ self._stats["tests_filtered"] += original_count - len(matches)
41
+
42
+ @property
43
+ def stats(self) -> dict[str, int]:
44
+ return dict(self._stats)
@@ -0,0 +1,43 @@
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ from pathlib import Path
5
+
6
+ from robot.api import SuiteVisitor
7
+
8
+
9
+ class DiversePreRunModifier(SuiteVisitor):
10
+ """PreRunModifier that filters standard (non-DataDriver) tests.
11
+
12
+ Usage CLI::
13
+
14
+ robot --prerunmodifier module.DiversePreRunModifier:file tests/
15
+
16
+ Usage programmatic:
17
+ suite.visit(DiversePreRunModifier('selected_tests.json'))
18
+ """
19
+
20
+ def __init__(self, selection_file: str) -> None:
21
+ data = json.loads(Path(selection_file).read_text())
22
+ self._selected_names: set[str] = set(
23
+ t["name"]
24
+ for t in data["selected"]
25
+ if not t.get("is_datadriver", False)
26
+ )
27
+ self._stats = {"kept": 0, "removed": 0}
28
+
29
+ def start_suite(self, suite) -> None: # type: ignore[override]
30
+ original = len(suite.tests)
31
+ suite.tests = [t for t in suite.tests if t.name in self._selected_names]
32
+ self._stats["kept"] += len(suite.tests)
33
+ self._stats["removed"] += original - len(suite.tests)
34
+
35
+ def end_suite(self, suite) -> None: # type: ignore[override]
36
+ suite.suites = [s for s in suite.suites if s.test_count > 0]
37
+
38
+ def visit_test(self, test) -> None: # type: ignore[override]
39
+ pass # skip internals for performance
40
+
41
+ @property
42
+ def stats(self) -> dict[str, int]:
43
+ return dict(self._stats)
@@ -0,0 +1,75 @@
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ from pathlib import Path
5
+
6
+
7
+ class ExecutionRunner:
8
+ """Orchestrates Robot Framework execution with test selection.
9
+
10
+ Builds the appropriate robot CLI arguments based on whether
11
+ the selection includes DataDriver tests.
12
+ """
13
+
14
+ def __init__(
15
+ self,
16
+ suite_path: str | Path,
17
+ selection_file: str | Path,
18
+ output_dir: str | Path = "./results",
19
+ ) -> None:
20
+ self._suite_path = Path(suite_path)
21
+ self._selection_file = Path(selection_file)
22
+ self._output_dir = Path(output_dir)
23
+ self._selection_data = json.loads(self._selection_file.read_text())
24
+ self._has_datadriver = any(
25
+ t.get("is_datadriver", False)
26
+ for t in self._selection_data["selected"]
27
+ )
28
+
29
+ def build_robot_args(self) -> list[str]:
30
+ """Build robot CLI arguments with --prerunmodifier and --listener as needed."""
31
+ args: list[str] = [
32
+ "--outputdir",
33
+ str(self._output_dir),
34
+ "--prerunmodifier",
35
+ f"TestSelection.execution.prerun_modifier.DiversePreRunModifier:{self._selection_file}",
36
+ ]
37
+ if self._has_datadriver:
38
+ args.extend([
39
+ "--listener",
40
+ f"TestSelection.execution.listener.DiverseDataDriverListener:{self._selection_file}",
41
+ ])
42
+ return args
43
+
44
+ def execute(self, extra_args: list[str] | None = None) -> int:
45
+ """Run robot.run_cli with built arguments. Returns exit code."""
46
+ import robot
47
+
48
+ args = self.build_robot_args()
49
+ if extra_args:
50
+ args.extend(extra_args)
51
+ args.append(str(self._suite_path))
52
+ return robot.run_cli(args, exit=False) # type: ignore[attr-defined]
53
+
54
+ def generate_report(self, return_code: int) -> dict:
55
+ """Generate selection_report.json with execution metadata."""
56
+ selected_count = len(self._selection_data["selected"])
57
+ dd_count = sum(
58
+ 1
59
+ for t in self._selection_data["selected"]
60
+ if t.get("is_datadriver", False)
61
+ )
62
+ report = {
63
+ "return_code": return_code,
64
+ "suite_path": str(self._suite_path),
65
+ "selection_file": str(self._selection_file),
66
+ "selected_tests": selected_count,
67
+ "datadriver_tests": dd_count,
68
+ "standard_tests": selected_count - dd_count,
69
+ "strategy": self._selection_data.get("strategy", "unknown"),
70
+ "status": "pass" if return_code == 0 else "fail",
71
+ }
72
+ self._output_dir.mkdir(parents=True, exist_ok=True)
73
+ report_path = self._output_dir / "selection_report.json"
74
+ report_path.write_text(json.dumps(report, indent=2))
75
+ return report