result-companion 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. result_companion/__init__.py +8 -0
  2. result_companion/core/analizers/__init__.py +0 -0
  3. result_companion/core/analizers/common.py +58 -0
  4. result_companion/core/analizers/factory_common.py +104 -0
  5. result_companion/core/analizers/local/__init__.py +0 -0
  6. result_companion/core/analizers/local/ollama_exceptions.py +10 -0
  7. result_companion/core/analizers/local/ollama_install.py +279 -0
  8. result_companion/core/analizers/local/ollama_runner.py +124 -0
  9. result_companion/core/analizers/local/ollama_server_manager.py +185 -0
  10. result_companion/core/analizers/models.py +17 -0
  11. result_companion/core/analizers/remote/__init__.py +0 -0
  12. result_companion/core/analizers/remote/custom_endpoint.py +0 -0
  13. result_companion/core/analizers/remote/openai.py +0 -0
  14. result_companion/core/chunking/chunking.py +113 -0
  15. result_companion/core/chunking/utils.py +114 -0
  16. result_companion/core/configs/default_config.yaml +85 -0
  17. result_companion/core/html/__init__.py +0 -0
  18. result_companion/core/html/html_creator.py +179 -0
  19. result_companion/core/html/llm_injector.py +20 -0
  20. result_companion/core/parsers/__init__.py +0 -0
  21. result_companion/core/parsers/config.py +256 -0
  22. result_companion/core/parsers/result_parser.py +101 -0
  23. result_companion/core/results/__init__.py +0 -0
  24. result_companion/core/results/visitors.py +34 -0
  25. result_companion/core/utils/__init__.py +0 -0
  26. result_companion/core/utils/log_levels.py +23 -0
  27. result_companion/core/utils/logging_config.py +115 -0
  28. result_companion/core/utils/progress.py +61 -0
  29. result_companion/entrypoints/__init__.py +0 -0
  30. result_companion/entrypoints/cli/__init__.py +0 -0
  31. result_companion/entrypoints/cli/cli_app.py +266 -0
  32. result_companion/entrypoints/run_rc.py +171 -0
  33. result_companion-0.0.1.dist-info/METADATA +216 -0
  34. result_companion-0.0.1.dist-info/RECORD +37 -0
  35. result_companion-0.0.1.dist-info/WHEEL +4 -0
  36. result_companion-0.0.1.dist-info/entry_points.txt +3 -0
  37. result_companion-0.0.1.dist-info/licenses/LICENSE +201 -0
@@ -0,0 +1,8 @@
1
+ """Result Companion - AI-powered Robot Framework test analysis."""
2
+
3
+ try:
4
+ from importlib.metadata import version
5
+
6
+ __version__ = version("result-companion")
7
+ except Exception: # pragma: no cover
8
+ __version__ = "0.0.0"
File without changes
@@ -0,0 +1,58 @@
1
+ import asyncio
2
+ from typing import Tuple
3
+
4
+ from langchain_core.runnables import RunnableSerializable
5
+
6
+
7
+ async def accumulate_llm_results(
8
+ test_case: list, question_from_config_file: str, chain: RunnableSerializable
9
+ ) -> Tuple[str, str]:
10
+ print(
11
+ f"\n### Test Case: {test_case['name']}, content length: {len(str(test_case))}"
12
+ )
13
+ result = []
14
+ async for chunk in chain.astream(
15
+ {"context": test_case, "question": question_from_config_file}, verbose=True
16
+ ):
17
+ result.append(chunk)
18
+ print(chunk, end="", flush=True)
19
+ return "".join(result), test_case["name"]
20
+
21
+
22
+ async def run_llm_based_analysis_and_stream_results(
23
+ test_cases: list, question_from_config_file: str, chain: RunnableSerializable
24
+ ) -> dict:
25
+
26
+ llm_results = dict()
27
+ for test_case in test_cases:
28
+ llm_results[test_case["name"]], _ = await accumulate_llm_results(
29
+ test_case, question_from_config_file, chain
30
+ )
31
+
32
+ return llm_results
33
+
34
+
35
+ async def run_with_semaphore(semaphore: asyncio.Semaphore, coroutine: any) -> any:
36
+ async with semaphore:
37
+ return await coroutine
38
+
39
+
40
+ async def run_llm_api_calls_based_analysis_and_stream_results(
41
+ test_cases: list, question_from_config_file: str, chain: RunnableSerializable
42
+ ) -> dict:
43
+ llm_results = dict()
44
+ corutines = []
45
+
46
+ for test_case in test_cases:
47
+ corutines.append(
48
+ accumulate_llm_results(test_case, question_from_config_file, chain)
49
+ )
50
+
51
+ semaphore = asyncio.Semaphore(1) # Limit concurrency
52
+
53
+ tasks = [run_with_semaphore(semaphore, coroutine) for coroutine in corutines]
54
+
55
+ for result, name in await asyncio.gather(*tasks):
56
+ llm_results[name] = result
57
+
58
+ return llm_results
@@ -0,0 +1,104 @@
1
+ import asyncio
2
+ from typing import Callable, Tuple
3
+
4
+ from langchain_anthropic import ChatAnthropic
5
+ from langchain_aws import BedrockLLM
6
+ from langchain_core.output_parsers import StrOutputParser
7
+ from langchain_core.prompts import ChatPromptTemplate
8
+ from langchain_google_genai import ChatGoogleGenerativeAI
9
+ from langchain_ollama.llms import OllamaLLM
10
+ from langchain_openai import AzureChatOpenAI, ChatOpenAI
11
+
12
+ from result_companion.core.chunking.chunking import (
13
+ accumulate_llm_results_for_summarizaton_chain,
14
+ )
15
+ from result_companion.core.chunking.utils import calculate_chunk_size
16
+ from result_companion.core.parsers.config import DefaultConfigModel
17
+ from result_companion.core.utils.logging_config import get_progress_logger
18
+ from result_companion.core.utils.progress import run_tasks_with_progress
19
+
20
+ logger = get_progress_logger("Analyzer")
21
+
22
+ MODELS = Tuple[
23
+ OllamaLLM
24
+ | AzureChatOpenAI
25
+ | BedrockLLM
26
+ | ChatGoogleGenerativeAI
27
+ | ChatOpenAI
28
+ | ChatAnthropic,
29
+ Callable,
30
+ ]
31
+
32
+
33
+ async def accumulate_llm_results_without_streaming(
34
+ test_case: dict,
35
+ question_from_config_file: str,
36
+ prompt: ChatPromptTemplate,
37
+ model: MODELS,
38
+ ) -> Tuple[str, str, list]:
39
+ logger.info(
40
+ f"### Test Case: {test_case['name']}, content length: {len(str(test_case))}"
41
+ )
42
+ chain = prompt | model | StrOutputParser()
43
+ return (
44
+ await chain.ainvoke(
45
+ {"context": test_case, "question": question_from_config_file}, verbose=True
46
+ ),
47
+ test_case["name"],
48
+ [],
49
+ )
50
+
51
+
52
+ async def execute_llm_and_get_results(
53
+ test_cases: list,
54
+ config: DefaultConfigModel,
55
+ prompt: ChatPromptTemplate,
56
+ model: MODELS,
57
+ ) -> dict:
58
+ question_from_config_file = config.llm_config.question_prompt
59
+ tokenizer = config.tokenizer
60
+ test_case_concurrency = config.concurrency.test_case
61
+ chunk_concurrency = config.concurrency.chunk
62
+ chunk_analysis_prompt = config.llm_config.chunking.chunk_analysis_prompt
63
+ final_synthesis_prompt = config.llm_config.chunking.final_synthesis_prompt
64
+
65
+ llm_results = dict()
66
+ corutines = []
67
+ logger.info(
68
+ f"Executing chain, {len(test_cases)=}, {test_case_concurrency=}, {chunk_concurrency=}"
69
+ )
70
+
71
+ for test_case in test_cases:
72
+ raw_test_case_text = str(test_case)
73
+ chunk = calculate_chunk_size(
74
+ raw_test_case_text, question_from_config_file, tokenizer
75
+ )
76
+
77
+ # TODO: zero chunk size seems magical
78
+ if chunk.chunk_size == 0:
79
+ corutines.append(
80
+ accumulate_llm_results_without_streaming(
81
+ test_case, question_from_config_file, prompt, model
82
+ )
83
+ )
84
+ else:
85
+ corutines.append(
86
+ accumulate_llm_results_for_summarizaton_chain(
87
+ test_case=test_case,
88
+ chunk_analysis_prompt=chunk_analysis_prompt,
89
+ final_synthesis_prompt=final_synthesis_prompt,
90
+ chunking_strategy=chunk,
91
+ llm=model,
92
+ chunk_concurrency=chunk_concurrency,
93
+ )
94
+ )
95
+
96
+ semaphore = asyncio.Semaphore(test_case_concurrency)
97
+
98
+ desc = f"Analyzing {len(test_cases)} test cases"
99
+ results = await run_tasks_with_progress(corutines, semaphore=semaphore, desc=desc)
100
+
101
+ for result, name, chunks in results:
102
+ llm_results[name] = result
103
+
104
+ return llm_results
File without changes
@@ -0,0 +1,10 @@
1
+ class OllamaServerNotRunning(Exception):
2
+ """Exception raised when Ollama server is not running."""
3
+
4
+
5
+ class OllamaNotInstalled(Exception):
6
+ """Exception raised when Ollama is not installed."""
7
+
8
+
9
+ class OllamaModelNotAvailable(Exception):
10
+ """Exception raised when the required Ollama model is not available."""
@@ -0,0 +1,279 @@
1
+ import platform
2
+ import shutil
3
+ import subprocess
4
+ from abc import ABC, abstractmethod
5
+ from dataclasses import dataclass
6
+ from enum import Enum, auto
7
+ from typing import List
8
+
9
+ from result_companion.core.utils.logging_config import logger
10
+
11
+
12
+ class PlatformType(Enum):
13
+ MACOS = auto()
14
+ LINUX_DEBIAN = auto()
15
+ LINUX_RHEL = auto()
16
+ LINUX_ARCH = auto()
17
+ WINDOWS = auto()
18
+ UNSUPPORTED = auto()
19
+
20
+
21
+ class OllamaInstallationError(Exception):
22
+ pass
23
+
24
+
25
+ class ModelInstallationError(Exception):
26
+ pass
27
+
28
+
29
+ @dataclass
30
+ class InstallConfig:
31
+ """Installation configuration for a platform."""
32
+
33
+ commands: List[List[str]]
34
+ prerequisite_check: str # Command to check if prerequisite exists
35
+
36
+
37
+ class BaseInstaller(ABC):
38
+ """Simplified base installer."""
39
+
40
+ def __init__(self):
41
+ self.config = self.get_config()
42
+
43
+ @abstractmethod
44
+ def get_config(self) -> InstallConfig:
45
+ """Get installation configuration."""
46
+ pass
47
+
48
+ def validate_prerequisites(self) -> bool:
49
+ """Check if prerequisites are available."""
50
+ return shutil.which(self.config.prerequisite_check) is not None
51
+
52
+ def install(self) -> None:
53
+ """Install Ollama."""
54
+ if not self.validate_prerequisites():
55
+ raise OllamaInstallationError(
56
+ f"Missing prerequisite: {self.config.prerequisite_check}"
57
+ )
58
+
59
+ for cmd in self.config.commands:
60
+ logger.info(f"Executing: {' '.join(cmd)}")
61
+ subprocess.run(cmd, check=True, capture_output=True, text=True)
62
+
63
+
64
+ class MacOSInstaller(BaseInstaller):
65
+ def get_config(self) -> InstallConfig:
66
+ return InstallConfig(
67
+ commands=[["brew", "install", "ollama"]], prerequisite_check="brew"
68
+ )
69
+
70
+
71
+ class DebianInstaller(BaseInstaller):
72
+ def get_config(self) -> InstallConfig:
73
+ return InstallConfig(
74
+ commands=[
75
+ ["curl", "-fsSL", "https://ollama.com/install.sh"],
76
+ ["sh", "-c", "curl -fsSL https://ollama.com/install.sh | sh"],
77
+ ],
78
+ prerequisite_check="curl",
79
+ )
80
+
81
+
82
+ class RHELInstaller(BaseInstaller):
83
+ def get_config(self) -> InstallConfig:
84
+ return InstallConfig(
85
+ commands=[
86
+ ["curl", "-fsSL", "https://ollama.com/install.sh"],
87
+ ["sh", "-c", "curl -fsSL https://ollama.com/install.sh | sh"],
88
+ ],
89
+ prerequisite_check="curl",
90
+ )
91
+
92
+
93
+ class ArchInstaller(BaseInstaller):
94
+ def get_config(self) -> InstallConfig:
95
+ return InstallConfig(
96
+ commands=[["sudo", "pacman", "-Sy", "--noconfirm", "ollama"]],
97
+ prerequisite_check="pacman",
98
+ )
99
+
100
+
101
+ class WindowsInstaller(BaseInstaller):
102
+ def get_config(self) -> InstallConfig:
103
+ return InstallConfig(
104
+ commands=[
105
+ [
106
+ "powershell",
107
+ "-Command",
108
+ "Invoke-WebRequest -Uri https://ollama.com/download/windows -OutFile $env:TEMP\\ollama-installer.exe",
109
+ ],
110
+ [
111
+ "powershell",
112
+ "-Command",
113
+ "Start-Process -FilePath $env:TEMP\\ollama-installer.exe -ArgumentList '/S' -Wait",
114
+ ],
115
+ ],
116
+ prerequisite_check="powershell",
117
+ )
118
+
119
+
120
+ class OllamaManager:
121
+ """Simplified Ollama installation and model manager."""
122
+
123
+ _INSTALLERS = {
124
+ PlatformType.MACOS: MacOSInstaller,
125
+ PlatformType.LINUX_DEBIAN: DebianInstaller,
126
+ PlatformType.LINUX_RHEL: RHELInstaller,
127
+ PlatformType.LINUX_ARCH: ArchInstaller,
128
+ PlatformType.WINDOWS: WindowsInstaller,
129
+ }
130
+
131
+ def __init__(self):
132
+ self.platform = self._detect_platform()
133
+
134
+ def _detect_platform(self) -> PlatformType:
135
+ """Detect current platform."""
136
+ system = platform.system().lower()
137
+
138
+ if system == "darwin":
139
+ return PlatformType.MACOS
140
+ elif system == "windows":
141
+ return PlatformType.WINDOWS
142
+ elif system == "linux":
143
+ return self._detect_linux_distro()
144
+ else:
145
+ return PlatformType.UNSUPPORTED
146
+
147
+ def _detect_linux_distro(self) -> PlatformType:
148
+ """Detect Linux distribution."""
149
+ try:
150
+ with open("/etc/os-release", "r") as f:
151
+ content = f.read().lower()
152
+ if any(x in content for x in ["ubuntu", "debian"]):
153
+ return PlatformType.LINUX_DEBIAN
154
+ elif any(x in content for x in ["rhel", "centos", "fedora"]):
155
+ return PlatformType.LINUX_RHEL
156
+ elif "arch" in content:
157
+ return PlatformType.LINUX_ARCH
158
+ except FileNotFoundError:
159
+ pass
160
+
161
+ # Fallback to package manager detection
162
+ if shutil.which("apt"):
163
+ return PlatformType.LINUX_DEBIAN
164
+ if shutil.which("yum") or shutil.which("dnf"):
165
+ return PlatformType.LINUX_RHEL
166
+ if shutil.which("pacman"):
167
+ return PlatformType.LINUX_ARCH
168
+
169
+ return PlatformType.LINUX_DEBIAN # Default
170
+
171
+ def _run_command(
172
+ self, cmd: List[str], stream_output: bool = False
173
+ ) -> subprocess.CompletedProcess:
174
+ """Run command with optional streaming."""
175
+ if stream_output:
176
+ return self._run_with_streaming(cmd)
177
+ # TOOD: remove this since streaming is always used
178
+ return subprocess.run(cmd, capture_output=True, text=True, check=True)
179
+
180
+ def _run_with_streaming(self, cmd: List[str]) -> subprocess.CompletedProcess:
181
+ """Run command with real-time output streaming."""
182
+ logger.info(f"Running: {' '.join(cmd)}")
183
+
184
+ process = subprocess.Popen(
185
+ cmd,
186
+ stdout=subprocess.PIPE,
187
+ stderr=subprocess.STDOUT,
188
+ text=True,
189
+ bufsize=1,
190
+ universal_newlines=True,
191
+ )
192
+
193
+ output_lines = []
194
+ for line in iter(process.stdout.readline, ""):
195
+ line = line.rstrip()
196
+ if line:
197
+ logger.info(f"Ollama: {line}")
198
+ output_lines.append(line)
199
+
200
+ return_code = process.wait()
201
+ result = subprocess.CompletedProcess(
202
+ cmd, return_code, "\n".join(output_lines), ""
203
+ )
204
+
205
+ if return_code != 0:
206
+ raise subprocess.CalledProcessError(
207
+ return_code, cmd, "\n".join(output_lines)
208
+ )
209
+
210
+ return result
211
+
212
+ def is_ollama_installed(self) -> bool:
213
+ """Check if Ollama is installed."""
214
+ try:
215
+ self._run_command(["ollama", "--version"])
216
+ return True
217
+ except (subprocess.SubprocessError, FileNotFoundError):
218
+ return False
219
+
220
+ def is_model_installed(self, model_name: str) -> bool:
221
+ """Check if model is installed."""
222
+ try:
223
+ result = self._run_command(["ollama", "list"])
224
+ return model_name in result.stdout
225
+ except (subprocess.SubprocessError, FileNotFoundError):
226
+ return False
227
+
228
+ def install_ollama(self) -> bool:
229
+ """Install Ollama if not already installed."""
230
+ if self.is_ollama_installed():
231
+ logger.info("Ollama already installed.")
232
+ return True
233
+
234
+ if self.platform == PlatformType.UNSUPPORTED:
235
+ raise OllamaInstallationError(f"Unsupported platform: {platform.system()}")
236
+
237
+ logger.info("Installing Ollama...")
238
+
239
+ try:
240
+ installer_class = self._INSTALLERS[self.platform]
241
+ installer = installer_class()
242
+ installer.install()
243
+ except Exception as e:
244
+ raise OllamaInstallationError(f"Installation failed: {str(e)}") from e
245
+
246
+ if not self.is_ollama_installed():
247
+ raise OllamaInstallationError("Installation verification failed")
248
+
249
+ logger.info("Ollama installed successfully!")
250
+ return True
251
+
252
+ def install_model(self, model_name: str) -> bool:
253
+ """Install model if not already installed."""
254
+ if self.is_model_installed(model_name):
255
+ logger.info(f"Model '{model_name}' already installed.")
256
+ return True
257
+
258
+ logger.info(f"Installing model '{model_name}'...")
259
+
260
+ try:
261
+ self._run_command(["ollama", "pull", model_name], stream_output=True)
262
+ except Exception as e:
263
+ raise ModelInstallationError(f"Model installation failed: {str(e)}") from e
264
+
265
+ if not self.is_model_installed(model_name):
266
+ raise ModelInstallationError("Model installation verification failed")
267
+
268
+ logger.info(f"Model '{model_name}' installed successfully!")
269
+ return True
270
+
271
+
272
+ def auto_install_ollama() -> bool:
273
+ """Auto-install Ollama."""
274
+ return OllamaManager().install_ollama()
275
+
276
+
277
+ def auto_install_model(model_name: str) -> bool:
278
+ """Auto-install model."""
279
+ return OllamaManager().install_model(model_name)
@@ -0,0 +1,124 @@
1
+ import subprocess
2
+ from typing import Optional, Type, Union
3
+
4
+ from result_companion.core.analizers.local.ollama_exceptions import (
5
+ OllamaModelNotAvailable,
6
+ OllamaNotInstalled,
7
+ OllamaServerNotRunning,
8
+ )
9
+ from result_companion.core.analizers.local.ollama_server_manager import (
10
+ OllamaServerManager,
11
+ resolve_server_manager,
12
+ )
13
+ from result_companion.core.utils.logging_config import logger
14
+
15
+
16
+ # TODO: move to different location > ollama_install
17
+ def check_ollama_installed(ollama_version_cmd: list = ["ollama", "--version"]) -> None:
18
+ logger.debug("Checking if Ollama is installed...")
19
+ try:
20
+ result = subprocess.run(
21
+ ollama_version_cmd, capture_output=True, text=True, check=True
22
+ )
23
+ logger.debug(f"Ollama installed: {result.stdout.strip()}")
24
+ except FileNotFoundError:
25
+ raise OllamaNotInstalled(
26
+ "Ollama command not found. Ensure it is installed and in your PATH."
27
+ )
28
+ except subprocess.CalledProcessError as exc:
29
+ raise OllamaNotInstalled(f"Ollama command failed: {exc}.")
30
+ except Exception as exc:
31
+ raise OllamaNotInstalled(f"Failed to check Ollama installation: {exc}") from exc
32
+ logger.debug("Ollama installation check passed.")
33
+
34
+
35
+ def check_model_installed(
36
+ model_name: str, ollama_list_cmd: list = ["ollama", "list"]
37
+ ) -> None:
38
+ logger.debug(f"Checking if model '{model_name}' is installed...")
39
+ try:
40
+ result = subprocess.run(
41
+ ollama_list_cmd, capture_output=True, text=True, check=True
42
+ )
43
+ except subprocess.CalledProcessError as exc:
44
+ raise OllamaServerNotRunning(
45
+ f"'ollama list' command failed with error code {exc.returncode}: {exc.stderr}"
46
+ ) from exc
47
+ if not any(
48
+ line.startswith(f"{model_name}:") or line.startswith(f"{model_name} ")
49
+ for line in result.stdout.splitlines()
50
+ ):
51
+ raise OllamaModelNotAvailable(
52
+ f"Model '{model_name}' is not installed in Ollama. Run `ollama pull {model_name}`."
53
+ )
54
+ logger.debug(f"Model '{model_name}' is installed.")
55
+
56
+
57
+ def ollama_on_init_strategy(
58
+ model_name: str,
59
+ server_url: str = "http://localhost:11434",
60
+ start_timeout: int = 30,
61
+ server_manager: Union[
62
+ Optional["OllamaServerManager"], Type["OllamaServerManager"]
63
+ ] = OllamaServerManager,
64
+ ) -> "OllamaServerManager":
65
+ """
66
+ Initialize Ollama by ensuring it is installed, the server is running,
67
+ and the specified model is available.
68
+
69
+ Parameters:
70
+ model_name (str): Name of the model to check.
71
+ server_url (str): URL where the server is expected.
72
+ start_timeout (int): Timeout for starting the server.
73
+ server_manager (Union[OllamaServerManager, Type[OllamaServerManager]]):
74
+ Either an instance of OllamaServerManager to use directly,
75
+ or the OllamaServerManager class (or subclass) to instantiate.
76
+ Defaults to the OllamaServerManager class.
77
+
78
+ Returns:
79
+ OllamaServerManager: The server manager instance that was used.
80
+
81
+ Raises:
82
+ OllamaNotInstalled: If Ollama is not installed.
83
+ OllamaServerNotRunning: If the server fails to start.
84
+ OllamaModelNotInstalled: If the specified model is not installed.
85
+ """
86
+ check_ollama_installed()
87
+
88
+ server_manager_instance = resolve_server_manager(
89
+ server_manager, server_url=server_url, start_timeout=start_timeout
90
+ )
91
+
92
+ if not server_manager_instance.is_running():
93
+ server_manager_instance.start()
94
+ else:
95
+ logger.debug("Ollama server is already running.")
96
+ logger.debug(f"Ollama server is confirmed running at {server_url}")
97
+ check_model_installed(model_name)
98
+ return server_manager_instance
99
+
100
+
101
+ if __name__ == "__main__":
102
+ # TODO: Transfer to integration tests
103
+ import logging
104
+
105
+ from langchain_ollama.llms import OllamaLLM
106
+
107
+ logging.basicConfig(level=logging.INFO)
108
+ test_model = "deepseek-r1" # Change to a model you might have/not have
109
+ try:
110
+ server_mnger = ollama_on_init_strategy(test_model)
111
+ print(f"Successfully verified Ollama setup for model: {test_model}")
112
+ except (OllamaNotInstalled, OllamaServerNotRunning, OllamaModelNotAvailable) as e:
113
+ print(f"Error: {e}")
114
+ except Exception as e:
115
+ print(f"An unexpected error occurred: {e}")
116
+
117
+ # Initialize the model with your local server endpoint
118
+ model = OllamaLLM(
119
+ model="deepseek-r1:1.5b",
120
+ )
121
+
122
+ result = model.invoke("Come up with consise interesting fact")
123
+ server_mnger.cleanup()
124
+ print(result)