result-companion 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. result_companion/__init__.py +8 -0
  2. result_companion/core/analizers/__init__.py +0 -0
  3. result_companion/core/analizers/common.py +58 -0
  4. result_companion/core/analizers/factory_common.py +104 -0
  5. result_companion/core/analizers/local/__init__.py +0 -0
  6. result_companion/core/analizers/local/ollama_exceptions.py +10 -0
  7. result_companion/core/analizers/local/ollama_install.py +279 -0
  8. result_companion/core/analizers/local/ollama_runner.py +124 -0
  9. result_companion/core/analizers/local/ollama_server_manager.py +185 -0
  10. result_companion/core/analizers/models.py +17 -0
  11. result_companion/core/analizers/remote/__init__.py +0 -0
  12. result_companion/core/analizers/remote/custom_endpoint.py +0 -0
  13. result_companion/core/analizers/remote/openai.py +0 -0
  14. result_companion/core/chunking/chunking.py +113 -0
  15. result_companion/core/chunking/utils.py +114 -0
  16. result_companion/core/configs/default_config.yaml +85 -0
  17. result_companion/core/html/__init__.py +0 -0
  18. result_companion/core/html/html_creator.py +179 -0
  19. result_companion/core/html/llm_injector.py +20 -0
  20. result_companion/core/parsers/__init__.py +0 -0
  21. result_companion/core/parsers/config.py +256 -0
  22. result_companion/core/parsers/result_parser.py +101 -0
  23. result_companion/core/results/__init__.py +0 -0
  24. result_companion/core/results/visitors.py +34 -0
  25. result_companion/core/utils/__init__.py +0 -0
  26. result_companion/core/utils/log_levels.py +23 -0
  27. result_companion/core/utils/logging_config.py +115 -0
  28. result_companion/core/utils/progress.py +61 -0
  29. result_companion/entrypoints/__init__.py +0 -0
  30. result_companion/entrypoints/cli/__init__.py +0 -0
  31. result_companion/entrypoints/cli/cli_app.py +266 -0
  32. result_companion/entrypoints/run_rc.py +171 -0
  33. result_companion-0.0.1.dist-info/METADATA +216 -0
  34. result_companion-0.0.1.dist-info/RECORD +37 -0
  35. result_companion-0.0.1.dist-info/WHEEL +4 -0
  36. result_companion-0.0.1.dist-info/entry_points.txt +3 -0
  37. result_companion-0.0.1.dist-info/licenses/LICENSE +201 -0
@@ -0,0 +1,179 @@
1
+ from pathlib import Path
2
+ from typing import Dict
3
+
4
+ from robot.api import ExecutionResult
5
+ from robot.reporting.resultwriter import ResultWriter
6
+
7
+ from result_companion.core.html.llm_injector import LLMDataInjector
8
+ from result_companion.core.results.visitors import UniqueNameResultVisitor
9
+
10
+
11
+ def create_llm_html_log(
12
+ input_result_path: Path | str,
13
+ llm_output_path: Path | str,
14
+ llm_results: Dict[str, str],
15
+ model_info: Dict[str, str] = None,
16
+ ) -> None:
17
+ """Create HTML log with LLM data embedded in JS model.
18
+
19
+ Args:
20
+ input_result_path: Path to Robot Framework output.xml.
21
+ llm_output_path: Path for generated HTML report.
22
+ llm_results: Mapping of test names to LLM analysis.
23
+ model_info: Optional model information.
24
+ """
25
+ results = ExecutionResult(str(input_result_path))
26
+
27
+ results.visit(UniqueNameResultVisitor())
28
+ results.visit(LLMDataInjector(llm_results, model_info))
29
+
30
+ writer = ResultWriter(results)
31
+ writer.write_results(report=None, log=str(llm_output_path))
32
+
33
+ _inject_llm_ui(Path(llm_output_path))
34
+
35
+
36
+ def _inject_llm_ui(html_path: Path) -> None:
37
+ """Add JavaScript to display LLM results per test."""
38
+ script = """
39
+ <style>
40
+ .llm-section { margin: 12px 0; border: 1px solid var(--secondary-color); border-radius: 6px; overflow: hidden; }
41
+ .llm-header { padding: 10px 16px; background: var(--primary-color); color: var(--text-color); cursor: pointer; display: flex; justify-content: space-between; align-items: center; user-select: none; }
42
+ .llm-header:hover { background: var(--secondary-color); }
43
+ .llm-chevron { transition: transform 0.2s; font-size: 12px; }
44
+ .llm-chevron.collapsed { transform: rotate(-90deg); }
45
+ .llm-content { padding: 16px; background: var(--background-color); border-top: 1px solid var(--secondary-color); max-height: 500px; overflow-y: auto; display: none; position: relative; }
46
+ .llm-content h2 { color: var(--link-color); font-size: 14px; margin-top: 12px; margin-bottom: 8px; }
47
+ .llm-model { font-size: 11px; opacity: 0.7; margin-left: 8px; }
48
+ .llm-copy { position: absolute; top: 8px; right: 8px; background: var(--secondary-color); color: var(--text-color); border: 1px solid var(--primary-color); padding: 4px 8px; border-radius: 4px; cursor: pointer; font-size: 12px; }
49
+ .llm-copy:hover { background: var(--primary-color); }
50
+ .llm-copy.copied { background: var(--pass-color); color: white; }
51
+ .test.fail .llm-header { border-left: 4px solid var(--fail-color); }
52
+ .test.pass .llm-header { border-left: 4px solid var(--pass-color); }
53
+ </style>
54
+ <script src="https://cdn.jsdelivr.net/npm/marked/marked.min.js"></script>
55
+ <script>
56
+ $(function() {
57
+ var llmData = null;
58
+ var modelInfo = null;
59
+ var processed = new Set();
60
+
61
+ // Hide metadata row immediately
62
+ $('th').filter(function() { return $(this).text().indexOf('__llm_results') !== -1; }).parent().hide();
63
+
64
+ // Get LLM data
65
+ try {
66
+ var meta = window.testdata.suite().metadata;
67
+ for (var i in meta) {
68
+ if (meta[i][0] === '__llm_results') {
69
+ var div = document.createElement('div');
70
+ div.innerHTML = meta[i][1];
71
+ var decoded = div.textContent || div.innerText || '';
72
+ var data = JSON.parse(decoded);
73
+
74
+ if (data.results) {
75
+ llmData = data.results;
76
+ modelInfo = data.model;
77
+ } else {
78
+ llmData = data;
79
+ }
80
+ break;
81
+ }
82
+ }
83
+ } catch(e) {
84
+ console.error('Failed to load LLM data:', e);
85
+ }
86
+
87
+ // Process test element
88
+ function process(test) {
89
+ var id = test.attr('id');
90
+ if (!id || processed.has(id) || !llmData) return;
91
+
92
+ var name = test.find('.element-header .name').first().text().trim();
93
+
94
+ if (llmData[name]) {
95
+ processed.add(id);
96
+ var modelBadge = modelInfo ? '<span class="llm-model">' + modelInfo.model + '</span>' : '';
97
+ var html = '<div class="llm-section">' +
98
+ '<div class="llm-header">' +
99
+ '<div>🤖 AI Analysis ' + modelBadge + '</div>' +
100
+ '<span class="llm-chevron collapsed">▼</span>' +
101
+ '</div>' +
102
+ '<div class="llm-content">' +
103
+ '<button class="llm-copy">Copy</button>' +
104
+ marked.parse(llmData[name]) +
105
+ '</div></div>';
106
+ test.find('.children').first().append(html);
107
+
108
+ // Toggle with animation
109
+ test.find('.llm-header').click(function() {
110
+ var content = $(this).next();
111
+ var chevron = $(this).find('.llm-chevron');
112
+ content.slideToggle(200);
113
+ chevron.toggleClass('collapsed');
114
+ });
115
+
116
+ // Copy button
117
+ test.find('.llm-copy').click(function(e) {
118
+ e.stopPropagation();
119
+ var btn = $(this);
120
+ navigator.clipboard.writeText(llmData[name]);
121
+ btn.text('✓ Copied').addClass('copied');
122
+ setTimeout(function() {
123
+ btn.text('Copy').removeClass('copied');
124
+ }, 2000);
125
+ });
126
+ }
127
+ }
128
+
129
+ // Process all tests periodically
130
+ setInterval(function() {
131
+ $('.test').each(function() { process($(this)); });
132
+ }, 1000);
133
+ });
134
+ </script>
135
+ """
136
+ html = html_path.read_text()
137
+ html_path.write_text(html.replace("</body>", f"{script}\n</body>"))
138
+
139
+
140
+ if __name__ == "__main__":
141
+ # TODO: remove this test code
142
+ REPO_ROOT = Path(__file__).resolve().parent.parent.parent
143
+ input_result_path = REPO_ROOT / ".." / "examples" / "run_test" / "output.xml"
144
+ multiline_another = """**General Idea Behind Test Case**
145
+ This test case is designed to execute a SQL query on a database and validate the results.
146
+
147
+ **Flow**
148
+
149
+ * The test connects to the database using a provided connection string.
150
+ * It logs a message indicating that the query is being executed.
151
+ * The test executes the SQL query.
152
+ * If the query fails, it logs an error message and raises an exception.
153
+ * Finally, the test checks if the result of the query matches an expected result.
154
+
155
+ **Failure Root Cause**
156
+ The root cause of the failure is that the database connection string is invalid, causing a "Connection Timeout" error. This prevents the test from successfully executing the SQL query and comparing its results to the expected result.
157
+
158
+ **Potential Fixes**
159
+
160
+ * Verify that the provided connection string is correct and properly formatted.
161
+ * Use a valid and existing database connection string for testing purposes.
162
+ * Consider using environment variables or configuration files to store sensitive information like database credentials, making it easier to manage and rotate them.
163
+
164
+ ```python
165
+ import os
166
+ os.environ["DB_CONNECTION_STRING"] = "valid_connection_string"
167
+ ```
168
+ """
169
+
170
+ multiline_html_response = """<div> something here </div>
171
+ <div> deeper something here </div>""" # .replace("\n", " \\ \n")
172
+ create_llm_html_log(
173
+ input_result_path=input_result_path,
174
+ llm_results={
175
+ "Test Neasted Test Case": multiline_html_response,
176
+ "Ollama Local Model Run Should Succede": multiline_another,
177
+ },
178
+ llm_output_path="test_llm_full_log.html",
179
+ )
@@ -0,0 +1,20 @@
1
+ import json
2
+ from typing import Dict
3
+
4
+ from robot.result.visitor import ResultVisitor
5
+
6
+
7
+ class LLMDataInjector(ResultVisitor):
8
+ """Injects LLM results directly into test data."""
9
+
10
+ def __init__(self, llm_results: Dict[str, str], model_info: Dict[str, str] = None):
11
+ self.llm_results = llm_results
12
+ self.model_info = model_info
13
+
14
+ def end_result(self, result):
15
+ """Store LLM data as global metadata."""
16
+ if result.suite and self.llm_results:
17
+ data = {"results": self.llm_results}
18
+ if self.model_info:
19
+ data["model"] = self.model_info
20
+ result.suite.metadata["__llm_results"] = json.dumps(data)
File without changes
@@ -0,0 +1,256 @@
1
+ import os
2
+ import re
3
+ from enum import Enum
4
+ from pathlib import Path
5
+
6
+ import yaml
7
+ from pydantic import BaseModel, Field, SecretStr, ValidationError, model_serializer
8
+
9
+ from result_companion.core.utils.logging_config import logger
10
+
11
+
12
+ class ModelType(str, Enum):
13
+ LOCAL = "local"
14
+ REMOTE = "remote"
15
+
16
+
17
+ class TokenizerTypes(str, Enum):
18
+ AZURE_OPENAI = "azure_openai_tokenizer"
19
+ OLLAMA = "ollama_tokenizer"
20
+ BEDROCK = "bedrock_tokenizer"
21
+ GOOGLE = "google_tokenizer"
22
+ OPENAI = "openai_tokenizer"
23
+ ANTHROPIC = "anthropic_tokenizer"
24
+
25
+
26
+ class CustomEndpointModel(BaseModel):
27
+ azure_deployment: str = Field(min_length=5, description="Azure deployment URL.")
28
+ azure_endpoint: str
29
+ openai_api_version: str = Field(
30
+ min_length=5, description="OpenAI API version.", default="2023-03-15-preview"
31
+ )
32
+ openai_api_type: str = Field(
33
+ min_length=5, description="OpenAI API type.", default="azure"
34
+ )
35
+ openai_api_key: SecretStr = Field(min_length=5, description="OpenAI API key.")
36
+
37
+
38
+ class ChunkingPromptsModel(BaseModel):
39
+ chunk_analysis_prompt: str = Field(
40
+ min_length=5, description="Prompt for analyzing individual chunks."
41
+ )
42
+ final_synthesis_prompt: str = Field(
43
+ min_length=5, description="Prompt for synthesizing chunk summaries."
44
+ )
45
+
46
+
47
+ class LLMConfigModel(BaseModel):
48
+ question_prompt: str = Field(min_length=5, description="User prompt.")
49
+ prompt_template: str = Field(
50
+ min_length=5, description="Template for LLM ChatPromptTemplate."
51
+ )
52
+ chunking: ChunkingPromptsModel
53
+ model_type: ModelType = Field(
54
+ default=ModelType.LOCAL,
55
+ description=f"Which type of llm model runners to use {[el.name for el in ModelType]}",
56
+ )
57
+
58
+
59
+ class LLMInitStrategyModel(BaseModel):
60
+ parameters: dict = Field(default={}, description="Strategy parameters.")
61
+
62
+
63
+ class LLMFactoryModel(BaseModel):
64
+ model_type: str = Field(min_length=5, description="Model type.")
65
+ parameters: dict = Field(default={}, description="Model parameters.")
66
+ strategy: LLMInitStrategyModel = Field(
67
+ description="Strategy to run on init.", default_factory=LLMInitStrategyModel
68
+ )
69
+
70
+ def _get_masked_params(self) -> dict:
71
+ """Returns parameters dict with sensitive values masked."""
72
+ sensitive_keys = {"api_key", "token", "password", "secret", "auth"}
73
+ masked = {}
74
+ for key, value in self.parameters.items():
75
+ if any(s in key.lower() for s in sensitive_keys):
76
+ masked[key] = "***REDACTED***"
77
+ else:
78
+ masked[key] = value
79
+ return masked
80
+
81
+ def __repr__(self) -> str:
82
+ """Returns string representation with masked sensitive fields."""
83
+ return (
84
+ f"LLMFactoryModel(model_type={self.model_type!r}, "
85
+ f"parameters={self._get_masked_params()!r}, "
86
+ f"strategy={self.strategy!r})"
87
+ )
88
+
89
+ @model_serializer
90
+ def _mask_sensitive_fields(self) -> dict:
91
+ """Masks sensitive fields in parameters dict for serialization."""
92
+ return {
93
+ "model_type": self.model_type,
94
+ "parameters": self._get_masked_params(),
95
+ "strategy": self.strategy,
96
+ }
97
+
98
+
99
+ class TokenizerModel(BaseModel):
100
+ tokenizer: TokenizerTypes
101
+ max_content_tokens: int = Field(ge=0, description="Max content tokens.")
102
+
103
+
104
+ class ConcurrencyModel(BaseModel):
105
+ test_case: int = Field(
106
+ default=1, ge=1, description="Test cases processed in parallel."
107
+ )
108
+ chunk: int = Field(
109
+ default=1, ge=1, description="Chunks processed in parallel per test case."
110
+ )
111
+
112
+
113
+ class TestFilterModel(BaseModel):
114
+ """Test filtering config - passed to RF's native result.configure()."""
115
+
116
+ include_tags: list[str] = Field(default=[], description="RF --include patterns.")
117
+ exclude_tags: list[str] = Field(default=[], description="RF --exclude patterns.")
118
+ include_passing: bool = Field(default=False, description="Include PASS tests.")
119
+
120
+
121
+ class DefaultConfigModel(BaseModel):
122
+ version: float
123
+ llm_config: LLMConfigModel
124
+ llm_factory: LLMFactoryModel
125
+ tokenizer: TokenizerModel
126
+ concurrency: ConcurrencyModel = Field(default_factory=ConcurrencyModel)
127
+ test_filter: TestFilterModel = Field(default_factory=TestFilterModel)
128
+
129
+
130
+ class CustomModelEndpointConfig(DefaultConfigModel):
131
+ custom_endpoint: CustomEndpointModel
132
+
133
+
134
+ class ConfigLoader:
135
+ def __init__(
136
+ self,
137
+ default_config_file: Path | None = None,
138
+ ):
139
+ self.default_config_file = default_config_file
140
+
141
+ @staticmethod
142
+ def _read_yaml_file(file_path: Path) -> dict:
143
+ with open(file_path, "r") as file:
144
+ return yaml.safe_load(file)
145
+
146
+ @staticmethod
147
+ def _expand_env_vars(value):
148
+ """Expand environment variables in a string using ${VAR} syntax."""
149
+ if isinstance(value, str) and "${" in value and "}" in value:
150
+ pattern = re.compile(r"\${([^}^{]+)}")
151
+ matches = pattern.findall(value)
152
+ for match in matches:
153
+ env_var = os.environ.get(match)
154
+ if env_var:
155
+ value = value.replace(f"${{{match}}}", env_var)
156
+ else:
157
+ logger.warning(f"Environment variable '{match}' not found")
158
+ return value
159
+ return value
160
+
161
+ def _process_env_vars(self, data):
162
+ """Recursively process environment variables in the configuration data."""
163
+ if isinstance(data, dict):
164
+ return {k: self._process_env_vars(v) for k, v in data.items()}
165
+ elif isinstance(data, list):
166
+ return [self._process_env_vars(item) for item in data]
167
+ else:
168
+ return self._expand_env_vars(data)
169
+
170
+ def load_config(self, user_config_file: Path = None) -> DefaultConfigModel:
171
+ """Load and validate the YAML configuration file, with defaults."""
172
+ default_config = self._read_yaml_file(self.default_config_file)
173
+ # Process environment variables in default config
174
+ default_config = self._process_env_vars(default_config)
175
+
176
+ if user_config_file:
177
+ user_config = self._read_yaml_file(user_config_file)
178
+ # Process environment variables in user config
179
+ user_config = self._process_env_vars(user_config)
180
+ else:
181
+ logger.info(
182
+ "User configuration not found or not provided. Using default configuration!"
183
+ )
184
+ logger.debug({self.default_config_file})
185
+ user_config = {}
186
+
187
+ # TODO: improve unpacking
188
+ config_data = (
189
+ {
190
+ "version": default_config.get("version"),
191
+ "llm_config": {
192
+ **default_config.get("llm_config", {}),
193
+ **user_config.get("llm_config", {}),
194
+ },
195
+ "llm_factory": {
196
+ **default_config.get("llm_factory", {}),
197
+ **user_config.get("llm_factory", {}),
198
+ },
199
+ "tokenizer": {
200
+ **default_config.get("tokenizer", {}),
201
+ **user_config.get("tokenizer", {}),
202
+ },
203
+ "concurrency": {
204
+ **default_config.get("concurrency", {}),
205
+ **user_config.get("concurrency", {}),
206
+ },
207
+ "test_filter": {
208
+ **default_config.get("test_filter", {}),
209
+ **user_config.get("test_filter", {}),
210
+ },
211
+ }
212
+ if user_config_file
213
+ else default_config
214
+ )
215
+ try:
216
+ validated_config = DefaultConfigModel(**config_data)
217
+ except ValidationError as e:
218
+ logger.error(f"Configuration validation failed:\n{e}")
219
+ raise
220
+ return validated_config
221
+
222
+
223
+ def load_config(config_path: Path | None = None) -> DefaultConfigModel:
224
+ current_dir = os.path.dirname(os.path.abspath(__file__))
225
+ config_file_path = os.path.join(current_dir, "..", "configs", "default_config.yaml")
226
+
227
+ config_loader = ConfigLoader(default_config_file=config_file_path)
228
+ config = config_loader.load_config(user_config_file=config_path)
229
+ logger.debug(f"{config=}")
230
+ return config
231
+
232
+
233
+ # TODO: remove this code
234
+ # Example usage in a CLI application
235
+ if __name__ == "__main__":
236
+ import argparse
237
+ from pathlib import Path
238
+
239
+ # Define default config path (assumes it's in the package directory)
240
+ PACKAGE_DIR = Path(__file__).resolve().parent
241
+ DEFAULT_CONFIG_PATH = PACKAGE_DIR / "config" / "config.yaml"
242
+
243
+ parser = argparse.ArgumentParser(description="CLI Application with Config")
244
+ parser.add_argument(
245
+ "--config", type=str, help="Path to the YAML configuration file (optional)."
246
+ )
247
+ args = parser.parse_args()
248
+
249
+ config_loader = ConfigLoader(default_config_file=str(DEFAULT_CONFIG_PATH))
250
+ try:
251
+ config = config_loader.load_config(user_config_file=args.config)
252
+ print(f"Config loaded successfully: {config}")
253
+ # Use the `config.prompt` in your CLI application logic
254
+ print(f"Prompt: {config.prompt}")
255
+ except Exception as e:
256
+ print(f"Failed to load configuration: {e}")
@@ -0,0 +1,101 @@
1
+ from pathlib import Path
2
+
3
+ from robot.api import ExecutionResult
4
+
5
+ from result_companion.core.results.visitors import UniqueNameResultVisitor
6
+ from result_companion.core.utils.log_levels import LogLevels
7
+ from result_companion.core.utils.logging_config import logger
8
+
9
+
10
+ def search_for_test_cases(
11
+ tests: dict | list, accumulated: list | None = None
12
+ ) -> list[dict]:
13
+ """Recursively extracts test cases from nested suite structure."""
14
+ if accumulated is None:
15
+ accumulated = []
16
+ if isinstance(tests, list):
17
+ for el in tests:
18
+ search_for_test_cases(el, accumulated)
19
+ elif isinstance(tests, dict):
20
+ if tests.get("tests"):
21
+ accumulated.extend(tests["tests"])
22
+ elif tests.get("suites"):
23
+ search_for_test_cases(tests["suites"], accumulated)
24
+ return accumulated
25
+
26
+
27
+ def remove_redundant_fields(data: list[dict]) -> list[dict]:
28
+ fields_to_remove = [
29
+ "elapsed_time",
30
+ "lineno",
31
+ "owner",
32
+ "start_time",
33
+ "html",
34
+ "type",
35
+ "assign",
36
+ "level",
37
+ "timestamp",
38
+ ]
39
+
40
+ if isinstance(data, dict):
41
+ # Remove fields from the current dictionary
42
+ for field in fields_to_remove:
43
+ data.pop(field, None)
44
+
45
+ # Recursively process child dictionaries
46
+ for key, value in data.items():
47
+ if isinstance(value, dict):
48
+ data[key] = remove_redundant_fields(value)
49
+ elif isinstance(value, list):
50
+ data[key] = [
51
+ remove_redundant_fields(item) if isinstance(item, dict) else item
52
+ for item in value
53
+ ]
54
+
55
+ elif isinstance(data, list):
56
+ # Recursively process child dictionaries in the list
57
+ return [
58
+ remove_redundant_fields(item) if isinstance(item, dict) else item
59
+ for item in data
60
+ ]
61
+
62
+ return data
63
+
64
+
65
+ def get_robot_results_from_file_as_dict(
66
+ file_path: Path,
67
+ log_level: LogLevels,
68
+ include_tags: list[str] | None = None,
69
+ exclude_tags: list[str] | None = None,
70
+ ) -> list[dict]:
71
+ """Parses RF output.xml and returns test cases as dicts.
72
+
73
+ Uses RF's native filtering via result.configure() - same as rebot.
74
+
75
+ Args:
76
+ file_path: Path to output.xml.
77
+ log_level: Log level for parsing.
78
+ include_tags: RF tag patterns to include (e.g., ['smoke*', 'critical']).
79
+ exclude_tags: RF tag patterns to exclude (e.g., ['wip', 'bug-*']).
80
+
81
+ Returns:
82
+ List of test case dictionaries.
83
+ """
84
+ logger.debug(f"Getting robot results from {file_path}")
85
+ result = ExecutionResult(file_path)
86
+
87
+ # Use RF's native filtering (same as rebot --include/--exclude)
88
+ suite_config = {}
89
+ if include_tags:
90
+ suite_config["include_tags"] = include_tags
91
+ if exclude_tags:
92
+ suite_config["exclude_tags"] = exclude_tags
93
+ if suite_config:
94
+ result.configure(suite_config=suite_config)
95
+ logger.debug(f"Applied RF native filtering: {suite_config}")
96
+
97
+ result.visit(UniqueNameResultVisitor())
98
+ all_results = result.suite.to_dict()
99
+ all_results = search_for_test_cases(all_results)
100
+ all_results = remove_redundant_fields(all_results)
101
+ return all_results
File without changes
@@ -0,0 +1,34 @@
1
+ from robot.api import ResultVisitor
2
+
3
+ from result_companion.core.utils.logging_config import logger
4
+
5
+
6
+ # TODO: workaround to fix potentail problem with exposing llm results to invalid test cases in log.html
7
+ class UniqueNameResultVisitor(ResultVisitor):
8
+ """Custom visitor that ensures unique test case names by appending IDs to duplicates."""
9
+
10
+ def __init__(self):
11
+ super().__init__()
12
+ self.test_names = {}
13
+
14
+ def start_test(self, test):
15
+ """Called when a test is encountered during traversal."""
16
+ if test.name in self.test_names:
17
+ self.test_names[test.name] += 1
18
+ else:
19
+ self.test_names[test.name] = 1
20
+
21
+ def end_suite(self, suite):
22
+ """Called when suite processing is complete."""
23
+ for test in suite.tests:
24
+ if test.name in self.test_names and self.test_names[test.name] > 1:
25
+ logger.debug(f"Renaming test '{test.name}' to '{test.name} {test.id}'")
26
+ test.name = f"{test.name} {test.id}"
27
+
28
+ for child_suite in suite.suites:
29
+ for test in child_suite.tests:
30
+ if test.name in self.test_names and self.test_names[test.name] > 1:
31
+ logger.debug(
32
+ f"Renaming test '{test.name}' to '{test.name} {test.id}'"
33
+ )
34
+ test.name = f"{test.name} {test.id}"
File without changes
@@ -0,0 +1,23 @@
1
+ from enum import Enum
2
+ from typing import Any
3
+
4
+
5
+ class LogLevels(str, Enum):
6
+ DEBUG = "DEBUG"
7
+ INFO = "INFO"
8
+ WARNING = "WARNING"
9
+ ERROR = "ERROR"
10
+ CRITICAL = "CRITICAL"
11
+
12
+ def __str__(self) -> str:
13
+ return self.name
14
+
15
+ @classmethod
16
+ def from_str(cls, value) -> Any:
17
+ try:
18
+ return cls[value.upper()]
19
+ except KeyError as err:
20
+ _msg = (
21
+ f"Values available: {[e.name for e in cls]}, while provided: {value!r}"
22
+ )
23
+ raise ValueError(_msg) from err