result-companion 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. result_companion/__init__.py +8 -0
  2. result_companion/core/analizers/__init__.py +0 -0
  3. result_companion/core/analizers/common.py +58 -0
  4. result_companion/core/analizers/factory_common.py +104 -0
  5. result_companion/core/analizers/local/__init__.py +0 -0
  6. result_companion/core/analizers/local/ollama_exceptions.py +10 -0
  7. result_companion/core/analizers/local/ollama_install.py +279 -0
  8. result_companion/core/analizers/local/ollama_runner.py +124 -0
  9. result_companion/core/analizers/local/ollama_server_manager.py +185 -0
  10. result_companion/core/analizers/models.py +17 -0
  11. result_companion/core/analizers/remote/__init__.py +0 -0
  12. result_companion/core/analizers/remote/custom_endpoint.py +0 -0
  13. result_companion/core/analizers/remote/openai.py +0 -0
  14. result_companion/core/chunking/chunking.py +113 -0
  15. result_companion/core/chunking/utils.py +114 -0
  16. result_companion/core/configs/default_config.yaml +85 -0
  17. result_companion/core/html/__init__.py +0 -0
  18. result_companion/core/html/html_creator.py +179 -0
  19. result_companion/core/html/llm_injector.py +20 -0
  20. result_companion/core/parsers/__init__.py +0 -0
  21. result_companion/core/parsers/config.py +256 -0
  22. result_companion/core/parsers/result_parser.py +101 -0
  23. result_companion/core/results/__init__.py +0 -0
  24. result_companion/core/results/visitors.py +34 -0
  25. result_companion/core/utils/__init__.py +0 -0
  26. result_companion/core/utils/log_levels.py +23 -0
  27. result_companion/core/utils/logging_config.py +115 -0
  28. result_companion/core/utils/progress.py +61 -0
  29. result_companion/entrypoints/__init__.py +0 -0
  30. result_companion/entrypoints/cli/__init__.py +0 -0
  31. result_companion/entrypoints/cli/cli_app.py +266 -0
  32. result_companion/entrypoints/run_rc.py +171 -0
  33. result_companion-0.0.1.dist-info/METADATA +216 -0
  34. result_companion-0.0.1.dist-info/RECORD +37 -0
  35. result_companion-0.0.1.dist-info/WHEEL +4 -0
  36. result_companion-0.0.1.dist-info/entry_points.txt +3 -0
  37. result_companion-0.0.1.dist-info/licenses/LICENSE +201 -0
@@ -0,0 +1,115 @@
1
+ import json
2
+ import logging
3
+ import os
4
+ import tempfile
5
+ from logging.handlers import RotatingFileHandler
6
+ from typing import Dict
7
+
8
+ from tqdm import tqdm
9
+
10
+
11
+ class JsonFormatter(logging.Formatter):
12
+ """Formats log records as JSON."""
13
+
14
+ def format(self, record: logging.LogRecord) -> str:
15
+ """Formats a log record as JSON."""
16
+ log_data = {
17
+ "timestamp": self.formatTime(record),
18
+ "logger": record.name,
19
+ "level": record.levelname,
20
+ "message": record.getMessage(),
21
+ }
22
+
23
+ if record.exc_info:
24
+ log_data["exception"] = self.formatException(record.exc_info)
25
+
26
+ return json.dumps(log_data)
27
+
28
+
29
+ class TqdmLoggingHandler(logging.Handler):
30
+ """Logging handler that displays logs above tqdm progress bars."""
31
+
32
+ def __init__(self, level: int = logging.NOTSET):
33
+ super().__init__(level)
34
+ self.setFormatter(
35
+ logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
36
+ )
37
+
38
+ def emit(self, record: logging.LogRecord) -> None:
39
+ try:
40
+ tqdm.write(self.format(record))
41
+ self.flush()
42
+ except Exception:
43
+ self.handleError(record)
44
+
45
+
46
+ class LoggerRegistry:
47
+ """Registry for managing loggers with tqdm-compatible output."""
48
+
49
+ def __init__(self, default_log_level: int = logging.INFO):
50
+ self.loggers: Dict[str, logging.Logger] = {}
51
+ self.default_log_level: int = default_log_level
52
+ self._tqdm_handler = TqdmLoggingHandler(level=default_log_level)
53
+
54
+ def get_logger(self, name: str, use_tqdm: bool = True) -> logging.Logger:
55
+ """Get or create a logger by name."""
56
+ if name in self.loggers:
57
+ return self.loggers[name]
58
+
59
+ logger = _add_file_handler(name)
60
+
61
+ if use_tqdm and not any(
62
+ isinstance(h, TqdmLoggingHandler) for h in logger.handlers
63
+ ):
64
+ logger.addHandler(self._tqdm_handler)
65
+
66
+ self.loggers[name] = logger
67
+ return logger
68
+
69
+ def set_log_level(self, level: str | int) -> None:
70
+ """Set log level for console output only. File logging always captures DEBUG."""
71
+ if isinstance(level, str):
72
+ level = getattr(logging, level.upper(), logging.INFO)
73
+
74
+ self.default_log_level = level
75
+ self._tqdm_handler.setLevel(level)
76
+
77
+
78
+ def _add_file_handler(name: str) -> logging.Logger:
79
+ """Adds JSON file handler to logger. Returns logger set to DEBUG level."""
80
+ logger = logging.getLogger(name)
81
+ logger.setLevel(logging.DEBUG)
82
+
83
+ if logger.hasHandlers():
84
+ return logger
85
+
86
+ log_file_path = os.path.join(tempfile.gettempdir(), "result_companion.log")
87
+ try:
88
+ file_handler = RotatingFileHandler(
89
+ log_file_path, maxBytes=5 * 1024 * 1024, backupCount=3
90
+ )
91
+ file_handler.setLevel(logging.DEBUG)
92
+ file_handler.setFormatter(JsonFormatter())
93
+ logger.addHandler(file_handler)
94
+ except (OSError, IOError) as e:
95
+ logger.warning(f"Failed to write to log file {log_file_path}: {e}")
96
+
97
+ return logger
98
+
99
+
100
+ # Module-level singleton and helpers
101
+ logger_registry = LoggerRegistry()
102
+
103
+
104
+ def set_global_log_level(log_level: str | int) -> None:
105
+ """Set log level for all loggers."""
106
+ logger_registry.set_log_level(log_level)
107
+
108
+
109
+ def get_progress_logger(name: str = "RC") -> logging.Logger:
110
+ """Get a logger that works with progress bars."""
111
+ return logger_registry.get_logger(name)
112
+
113
+
114
+ # Default logger
115
+ logger = get_progress_logger("RC")
@@ -0,0 +1,61 @@
1
+ import asyncio
2
+ from typing import Any, List, TypeVar
3
+
4
+ from tqdm import tqdm
5
+
6
+ T = TypeVar("T")
7
+
8
+
9
+ async def run_tasks_with_progress(
10
+ coroutines: List,
11
+ semaphore: asyncio.Semaphore = None,
12
+ desc: str = "Processing tasks",
13
+ ) -> List[Any]:
14
+ """Run coroutines with progress bar tracking.
15
+
16
+ Args:
17
+ coroutines: List of coroutines to run.
18
+ semaphore: Optional semaphore to limit concurrency.
19
+ desc: Description to display in the progress bar.
20
+
21
+ Returns:
22
+ List of results from the coroutines.
23
+ """
24
+ if not coroutines:
25
+ return []
26
+
27
+ if semaphore is None:
28
+ semaphore = asyncio.Semaphore(1)
29
+
30
+ active_count = 0
31
+ lock = asyncio.Lock()
32
+
33
+ async def run_with_semaphore(coro):
34
+ nonlocal active_count
35
+ async with semaphore:
36
+ async with lock:
37
+ active_count += 1
38
+ try:
39
+ return await coro
40
+ finally:
41
+ async with lock:
42
+ active_count -= 1
43
+
44
+ tasks = [asyncio.create_task(run_with_semaphore(coro)) for coro in coroutines]
45
+ results = [None] * len(tasks)
46
+ task_to_index = {task: i for i, task in enumerate(tasks)}
47
+ pending = set(tasks)
48
+
49
+ with tqdm(
50
+ total=len(tasks), desc=desc, position=0, leave=True, dynamic_ncols=True
51
+ ) as pbar:
52
+ while pending:
53
+ done, pending = await asyncio.wait(
54
+ pending, return_when=asyncio.FIRST_COMPLETED
55
+ )
56
+ for task in done:
57
+ results[task_to_index[task]] = task.result()
58
+ pbar.update(1)
59
+ pbar.set_description(f"{desc} ({active_count} active)")
60
+
61
+ return results
File without changes
File without changes
@@ -0,0 +1,266 @@
1
+ import subprocess
2
+ from pathlib import Path
3
+ from typing import List, Optional
4
+
5
+ import typer
6
+ from click import get_current_context
7
+
8
+ from result_companion.core.analizers.local.ollama_install import (
9
+ auto_install_model,
10
+ auto_install_ollama,
11
+ )
12
+ from result_companion.core.analizers.local.ollama_runner import check_ollama_installed
13
+ from result_companion.core.analizers.local.ollama_server_manager import (
14
+ OllamaServerManager,
15
+ resolve_server_manager,
16
+ )
17
+ from result_companion.core.utils.log_levels import LogLevels
18
+ from result_companion.core.utils.logging_config import logger
19
+
20
+ app = typer.Typer()
21
+ setup_app = typer.Typer(help="Manage Ollama installation and models")
22
+ app.add_typer(setup_app, name="setup")
23
+
24
+
25
+ try:
26
+ from importlib.metadata import version as get_version
27
+ except ImportError:
28
+ from importlib_metadata import version as get_version
29
+
30
+ VERSION = get_version("result-companion")
31
+
32
+
33
+ def version_callback(value: bool):
34
+ if value:
35
+ typer.echo(f"result-companion version: {VERSION}")
36
+ raise typer.Exit()
37
+
38
+
39
+ @app.callback(invoke_without_command=True)
40
+ def main(
41
+ version: Optional[bool] = typer.Option(
42
+ None,
43
+ "--version",
44
+ "-v",
45
+ callback=version_callback,
46
+ is_eager=True,
47
+ help="Show the version and exit",
48
+ ),
49
+ ):
50
+ """
51
+ Result Companion CLI - Analyze Robot Framework results with LLM assistance.
52
+ """
53
+ # If no subcommand is provided, show help.
54
+ ctx = get_current_context()
55
+ if ctx.invoked_subcommand is None:
56
+ typer.echo(ctx.get_help())
57
+
58
+
59
+ @app.command()
60
+ def analyze(
61
+ output: Path = typer.Option(
62
+ ...,
63
+ "-o",
64
+ "--output",
65
+ exists=True,
66
+ file_okay=True,
67
+ dir_okay=False,
68
+ readable=True,
69
+ help="Output.xml file path",
70
+ ),
71
+ log_level: LogLevels = typer.Option(
72
+ LogLevels.INFO,
73
+ "-l",
74
+ "--log-level",
75
+ help="Log level verbosity",
76
+ case_sensitive=True,
77
+ ),
78
+ config: Optional[Path] = typer.Option(
79
+ None,
80
+ "-c",
81
+ "--config",
82
+ exists=True,
83
+ file_okay=True,
84
+ dir_okay=False,
85
+ readable=True,
86
+ help="YAML Config file path",
87
+ ),
88
+ report: Optional[str] = typer.Option(
89
+ None, "-r", "--report", help="Write LLM Report to HTML file"
90
+ ),
91
+ include_passing: bool = typer.Option(
92
+ False, "-i", "--include-passing", help="Include PASS test cases"
93
+ ),
94
+ include_tags: Optional[str] = typer.Option(
95
+ None,
96
+ "-I",
97
+ "--include",
98
+ help="Include tests by tags (comma-separated: 'smoke,critical*')",
99
+ ),
100
+ exclude_tags: Optional[str] = typer.Option(
101
+ None,
102
+ "-E",
103
+ "--exclude",
104
+ help="Exclude tests by tags (comma-separated: 'wip,bug*')",
105
+ ),
106
+ test_case_concurrency: Optional[int] = typer.Option(
107
+ None,
108
+ "--test-concurrency",
109
+ help="Test cases processed in parallel (overrides config)",
110
+ ),
111
+ chunk_concurrency: Optional[int] = typer.Option(
112
+ None,
113
+ "--chunk-concurrency",
114
+ help="Chunks per test case in parallel (overrides config)",
115
+ ),
116
+ ):
117
+ """Analyze Robot Framework test results with LLM assistance."""
118
+ typer.echo(f"Output: {output}")
119
+ typer.echo(f"Log Level: {log_level}")
120
+ typer.echo(f"Config: {config}")
121
+ typer.echo(f"Report: {report}")
122
+ typer.echo(f"Include Passing: {include_passing}")
123
+
124
+ # Parse CLI tag options
125
+ include_tag_list = (
126
+ [t.strip() for t in include_tags.split(",")] if include_tags else None
127
+ )
128
+ exclude_tag_list = (
129
+ [t.strip() for t in exclude_tags.split(",")] if exclude_tags else None
130
+ )
131
+
132
+ # Allow test injection via context, otherwise lazy import
133
+ ctx = get_current_context()
134
+ run = ctx.obj.get("analyze") if ctx.obj else None
135
+ if not run:
136
+ from result_companion.entrypoints.run_rc import run_rc
137
+
138
+ run = run_rc
139
+
140
+ run(
141
+ output,
142
+ log_level,
143
+ config,
144
+ report,
145
+ include_passing,
146
+ test_case_concurrency,
147
+ chunk_concurrency,
148
+ include_tag_list,
149
+ exclude_tag_list,
150
+ )
151
+
152
+
153
+ # Setup commands using the original functions
154
+ @setup_app.command("ollama")
155
+ def setup_ollama(
156
+ force: bool = typer.Option(
157
+ False,
158
+ "--force",
159
+ help="Force reinstallation even if already installed",
160
+ ),
161
+ ):
162
+ """Install Ollama on the local system."""
163
+ try:
164
+ typer.echo("Installing Ollama...")
165
+ auto_install_ollama()
166
+ typer.echo("Ollama installed successfully!")
167
+ except Exception as e:
168
+ typer.echo(f"Error during Ollama installation: {e}")
169
+ raise typer.Exit(code=1)
170
+
171
+
172
+ def install_ollama_model(
173
+ model_name: str,
174
+ server_manager=OllamaServerManager,
175
+ installation_cmd: List[str] = ["ollama", "pull"],
176
+ ollama_list_cmd: List[str] = ["ollama", "list"],
177
+ ) -> bool:
178
+ """
179
+ Install a specific Ollama model, ensuring the server is running.
180
+
181
+ Args:
182
+ model_name: Name of the model to install
183
+ server_manager: OllamaServerManager class or instance
184
+ installation_cmd: Command to install models
185
+ ollama_list_cmd: Command to list installed models
186
+
187
+ Returns:
188
+ bool: True if installation is successful
189
+
190
+ Raises:
191
+ Exception: If installation fails or server cannot start
192
+ """
193
+ check_ollama_installed()
194
+
195
+ with resolve_server_manager(server_manager):
196
+ logger.info(f"Installing model '{model_name}'...")
197
+
198
+ success = auto_install_model(
199
+ model_name=model_name,
200
+ )
201
+
202
+ if success:
203
+ logger.info(f"Model '{model_name}' installed successfully")
204
+ return success
205
+ logger.error(f"Failed to install model '{model_name}'")
206
+ raise Exception(f"Failed to install model '{model_name}'")
207
+
208
+
209
+ @setup_app.command("model")
210
+ def setup_model(
211
+ model_name: str = typer.Argument(..., help="Name of the model to install"),
212
+ ):
213
+ """Install a specific model into Ollama."""
214
+ try:
215
+ typer.echo(f"Installing model '{model_name}'...")
216
+
217
+ install_ollama_model(model_name)
218
+
219
+ typer.echo(f"Model '{model_name}' installed successfully!")
220
+ except Exception as e:
221
+ typer.echo(f"Error installing model '{model_name}': {e}")
222
+ logger.error(f"Model installation failed: {e}", exc_info=True)
223
+ raise typer.Exit(code=1)
224
+
225
+
226
+ def get_installed_models(
227
+ server_manager=OllamaServerManager, command_runner=subprocess.run
228
+ ):
229
+ """
230
+ Get a list of installed Ollama models.
231
+
232
+ Args:
233
+ server_manager: OllamaServerManager class or instance
234
+ command_runner: Function to run commands
235
+
236
+ Returns:
237
+ str: Output showing installed models
238
+
239
+ Raises:
240
+ subprocess.SubprocessError: If the command fails
241
+ """
242
+ with resolve_server_manager(server_manager):
243
+ result = command_runner(
244
+ ["ollama", "list"], capture_output=True, text=True, check=True
245
+ )
246
+ return result.stdout
247
+
248
+
249
+ @setup_app.command("list-models")
250
+ def list_models():
251
+ """List all installed Ollama models."""
252
+ try:
253
+ output = get_installed_models()
254
+ typer.echo("Installed models:")
255
+ typer.echo(output)
256
+ logger.debug(f"Installed models: \n{output}")
257
+ except subprocess.SubprocessError:
258
+ typer.echo("Error: Failed to list models. Is Ollama installed?")
259
+ raise typer.Exit(code=1)
260
+ except Exception as e:
261
+ typer.echo(f"Error: {e}")
262
+ raise typer.Exit(code=1)
263
+
264
+
265
+ if __name__ == "__main__":
266
+ app()
@@ -0,0 +1,171 @@
1
+ import asyncio
2
+ import time
3
+ from pathlib import Path
4
+ from typing import Optional
5
+
6
+ from langchain_anthropic import ChatAnthropic
7
+ from langchain_aws import BedrockLLM
8
+ from langchain_core.prompts import ChatPromptTemplate
9
+ from langchain_google_genai import ChatGoogleGenerativeAI
10
+ from langchain_ollama.llms import OllamaLLM
11
+ from langchain_openai import AzureChatOpenAI, ChatOpenAI
12
+ from pydantic import ValidationError
13
+
14
+ from result_companion.core.analizers.factory_common import execute_llm_and_get_results
15
+ from result_companion.core.analizers.local.ollama_runner import ollama_on_init_strategy
16
+ from result_companion.core.analizers.models import MODELS
17
+ from result_companion.core.html.html_creator import create_llm_html_log
18
+ from result_companion.core.parsers.config import LLMFactoryModel, load_config
19
+ from result_companion.core.parsers.result_parser import (
20
+ get_robot_results_from_file_as_dict,
21
+ )
22
+ from result_companion.core.utils.log_levels import LogLevels
23
+ from result_companion.core.utils.logging_config import logger, set_global_log_level
24
+
25
+
26
+ def init_llm_with_strategy_factory(
27
+ config: LLMFactoryModel,
28
+ ) -> MODELS:
29
+ model_type = config.model_type
30
+ parameters = config.parameters
31
+
32
+ model_classes = {
33
+ "OllamaLLM": (OllamaLLM, ollama_on_init_strategy),
34
+ "AzureChatOpenAI": (AzureChatOpenAI, None),
35
+ "BedrockLLM": (BedrockLLM, None),
36
+ "ChatGoogleGenerativeAI": (ChatGoogleGenerativeAI, None),
37
+ "ChatOpenAI": (ChatOpenAI, None),
38
+ "ChatAnthropic": (ChatAnthropic, None),
39
+ }
40
+
41
+ if model_type not in model_classes:
42
+ raise ValueError(
43
+ f"Unsupported model type: {model_type} not in {model_classes.keys()}"
44
+ )
45
+
46
+ model_class, strategy = model_classes[model_type]
47
+ try:
48
+ return model_class(**parameters), strategy
49
+ except (TypeError, ValidationError) as e:
50
+ raise ValueError(
51
+ f"Invalid parameters for {model_type}: {parameters}, while available parameters are: {model_class.__init__.__annotations__}"
52
+ ) from e
53
+
54
+
55
+ async def _main(
56
+ output: Path,
57
+ log_level: LogLevels,
58
+ config: Optional[Path],
59
+ report: Optional[str],
60
+ include_passing: bool,
61
+ test_case_concurrency: Optional[int] = None,
62
+ chunk_concurrency: Optional[int] = None,
63
+ include_tags: Optional[list[str]] = None,
64
+ exclude_tags: Optional[list[str]] = None,
65
+ ) -> bool:
66
+ set_global_log_level(str(log_level))
67
+
68
+ logger.info("Starting Result Companion!")
69
+ start = time.time()
70
+ # TODO: move to testable method
71
+ parsed_config = load_config(config)
72
+
73
+ if test_case_concurrency is not None:
74
+ parsed_config.concurrency.test_case = test_case_concurrency
75
+ if chunk_concurrency is not None:
76
+ parsed_config.concurrency.chunk = chunk_concurrency
77
+
78
+ # Merge CLI tags with config (CLI takes precedence)
79
+ final_include = include_tags or parsed_config.test_filter.include_tags or None
80
+ final_exclude = exclude_tags or parsed_config.test_filter.exclude_tags or None
81
+
82
+ # Use RF's native filtering (same as rebot --include/--exclude)
83
+ # TODO: set output log level from config or cli
84
+ test_cases = get_robot_results_from_file_as_dict(
85
+ file_path=output,
86
+ log_level=LogLevels.DEBUG,
87
+ include_tags=final_include,
88
+ exclude_tags=final_exclude,
89
+ )
90
+
91
+ # Filter passing tests (RF doesn't have this natively)
92
+ should_include_passing = (
93
+ include_passing or parsed_config.test_filter.include_passing
94
+ )
95
+ if not should_include_passing:
96
+ test_cases = [t for t in test_cases if t.get("status") != "PASS"]
97
+
98
+ logger.info(f"Filtered to {len(test_cases)} test cases")
99
+
100
+ question_from_config_file = parsed_config.llm_config.question_prompt
101
+ template = parsed_config.llm_config.prompt_template
102
+ model, model_init_strategy = init_llm_with_strategy_factory(
103
+ parsed_config.llm_factory
104
+ )
105
+
106
+ if model_init_strategy:
107
+ logger.debug(
108
+ f"Using init strategy: {model_init_strategy} with parameters: {parsed_config.llm_factory.strategy.parameters}"
109
+ )
110
+ model_init_strategy(**parsed_config.llm_factory.strategy.parameters)
111
+
112
+ logger.debug(f"Prompt template: {template}")
113
+ logger.debug(f"Question loaded {question_from_config_file=}")
114
+ prompt_template = ChatPromptTemplate.from_template(template)
115
+
116
+ llm_results = await execute_llm_and_get_results(
117
+ test_cases,
118
+ parsed_config,
119
+ prompt_template,
120
+ model,
121
+ )
122
+
123
+ report_path = report if report else "rc_log.html"
124
+ if llm_results:
125
+ model_info = {
126
+ "model": parsed_config.llm_factory.parameters.get(
127
+ "model", parsed_config.llm_factory.model_type
128
+ )
129
+ }
130
+ create_llm_html_log(
131
+ input_result_path=output,
132
+ llm_output_path=report_path,
133
+ llm_results=llm_results,
134
+ model_info=model_info,
135
+ )
136
+ logger.info(f"Report created: {Path(report_path).resolve()}")
137
+
138
+ stop = time.time()
139
+ logger.debug(f"Execution time: {stop - start}")
140
+ return True
141
+
142
+
143
+ def run_rc(
144
+ output: Path,
145
+ log_level: LogLevels,
146
+ config: Optional[Path],
147
+ report: Optional[str],
148
+ include_passing: bool,
149
+ test_case_concurrency: Optional[int] = None,
150
+ chunk_concurrency: Optional[int] = None,
151
+ include_tags: Optional[list[str]] = None,
152
+ exclude_tags: Optional[list[str]] = None,
153
+ ) -> bool:
154
+ try:
155
+ return asyncio.run(
156
+ _main(
157
+ output=output,
158
+ log_level=log_level,
159
+ config=config,
160
+ report=report,
161
+ include_passing=include_passing,
162
+ test_case_concurrency=test_case_concurrency,
163
+ chunk_concurrency=chunk_concurrency,
164
+ include_tags=include_tags,
165
+ exclude_tags=exclude_tags,
166
+ )
167
+ )
168
+ except Exception:
169
+ # logging unhandled exceptions to file from asyncio.run
170
+ logger.critical("Unhandled exception", exc_info=True)
171
+ raise