result-companion 0.0.2__tar.gz → 0.0.3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {result_companion-0.0.2 → result_companion-0.0.3}/PKG-INFO +22 -3
- {result_companion-0.0.2 → result_companion-0.0.3}/README.md +20 -2
- {result_companion-0.0.2 → result_companion-0.0.3}/pyproject.toml +8 -1
- {result_companion-0.0.2 → result_companion-0.0.3}/result_companion/core/analizers/factory_common.py +33 -4
- {result_companion-0.0.2 → result_companion-0.0.3}/result_companion/core/analizers/models.py +4 -1
- result_companion-0.0.3/result_companion/core/analizers/remote/copilot.py +162 -0
- {result_companion-0.0.2 → result_companion-0.0.3}/result_companion/core/chunking/chunking.py +6 -1
- {result_companion-0.0.2 → result_companion-0.0.3}/result_companion/core/chunking/utils.py +5 -0
- {result_companion-0.0.2 → result_companion-0.0.3}/result_companion/core/utils/logging_config.py +7 -1
- {result_companion-0.0.2 → result_companion-0.0.3}/result_companion/entrypoints/cli/cli_app.py +6 -0
- {result_companion-0.0.2 → result_companion-0.0.3}/result_companion/entrypoints/run_rc.py +21 -2
- {result_companion-0.0.2 → result_companion-0.0.3}/LICENSE +0 -0
- {result_companion-0.0.2 → result_companion-0.0.3}/result_companion/__init__.py +0 -0
- {result_companion-0.0.2 → result_companion-0.0.3}/result_companion/core/analizers/__init__.py +0 -0
- {result_companion-0.0.2 → result_companion-0.0.3}/result_companion/core/analizers/common.py +0 -0
- {result_companion-0.0.2 → result_companion-0.0.3}/result_companion/core/analizers/local/__init__.py +0 -0
- {result_companion-0.0.2 → result_companion-0.0.3}/result_companion/core/analizers/local/ollama_exceptions.py +0 -0
- {result_companion-0.0.2 → result_companion-0.0.3}/result_companion/core/analizers/local/ollama_install.py +0 -0
- {result_companion-0.0.2 → result_companion-0.0.3}/result_companion/core/analizers/local/ollama_runner.py +0 -0
- {result_companion-0.0.2 → result_companion-0.0.3}/result_companion/core/analizers/local/ollama_server_manager.py +0 -0
- {result_companion-0.0.2 → result_companion-0.0.3}/result_companion/core/analizers/remote/__init__.py +0 -0
- {result_companion-0.0.2 → result_companion-0.0.3}/result_companion/core/analizers/remote/custom_endpoint.py +0 -0
- {result_companion-0.0.2 → result_companion-0.0.3}/result_companion/core/analizers/remote/openai.py +0 -0
- {result_companion-0.0.2 → result_companion-0.0.3}/result_companion/core/configs/default_config.yaml +0 -0
- {result_companion-0.0.2 → result_companion-0.0.3}/result_companion/core/html/__init__.py +0 -0
- {result_companion-0.0.2 → result_companion-0.0.3}/result_companion/core/html/html_creator.py +0 -0
- {result_companion-0.0.2 → result_companion-0.0.3}/result_companion/core/html/llm_injector.py +0 -0
- {result_companion-0.0.2 → result_companion-0.0.3}/result_companion/core/parsers/__init__.py +0 -0
- {result_companion-0.0.2 → result_companion-0.0.3}/result_companion/core/parsers/config.py +0 -0
- {result_companion-0.0.2 → result_companion-0.0.3}/result_companion/core/parsers/result_parser.py +0 -0
- {result_companion-0.0.2 → result_companion-0.0.3}/result_companion/core/results/__init__.py +0 -0
- {result_companion-0.0.2 → result_companion-0.0.3}/result_companion/core/results/visitors.py +0 -0
- {result_companion-0.0.2 → result_companion-0.0.3}/result_companion/core/utils/__init__.py +0 -0
- {result_companion-0.0.2 → result_companion-0.0.3}/result_companion/core/utils/log_levels.py +0 -0
- {result_companion-0.0.2 → result_companion-0.0.3}/result_companion/core/utils/progress.py +0 -0
- {result_companion-0.0.2 → result_companion-0.0.3}/result_companion/entrypoints/__init__.py +0 -0
- {result_companion-0.0.2 → result_companion-0.0.3}/result_companion/entrypoints/cli/__init__.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: result-companion
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.3
|
|
4
4
|
Summary: AI-powered analysis of Robot Framework test failures - instant insights from output.xml
|
|
5
5
|
License: Apache-2.0
|
|
6
6
|
License-File: LICENSE
|
|
@@ -19,6 +19,7 @@ Classifier: Programming Language :: Python :: 3.12
|
|
|
19
19
|
Classifier: Programming Language :: Python :: 3.13
|
|
20
20
|
Classifier: Programming Language :: Python :: 3.14
|
|
21
21
|
Classifier: Topic :: Software Development :: Testing
|
|
22
|
+
Requires-Dist: github-copilot-sdk (>=0.1.0,<0.2.0)
|
|
22
23
|
Requires-Dist: langchain-anthropic (>=0.3.7,<0.4.0)
|
|
23
24
|
Requires-Dist: langchain-aws (>=0.2.10,<0.3.0)
|
|
24
25
|
Requires-Dist: langchain-community (>=0.3.9,<0.4.0)
|
|
@@ -72,7 +73,24 @@ Your enhanced `log.html` now includes:
|
|
|
72
73
|
|
|
73
74
|
## Quick Start
|
|
74
75
|
|
|
75
|
-
### Option 1:
|
|
76
|
+
### Option 1: GitHub Copilot (Easiest for Users With Copilot)
|
|
77
|
+
|
|
78
|
+
Already have GitHub Copilot? Use it directly—no API keys needed.
|
|
79
|
+
|
|
80
|
+
```bash
|
|
81
|
+
pip install result-companion
|
|
82
|
+
|
|
83
|
+
# One-time setup
|
|
84
|
+
brew install copilot-cli # or: npm install -g @github/copilot
|
|
85
|
+
copilot /login # Login when prompted, then /exit
|
|
86
|
+
|
|
87
|
+
# Analyze your tests
|
|
88
|
+
result-companion analyze -o output.xml -c examples/copilot_config.yaml
|
|
89
|
+
```
|
|
90
|
+
|
|
91
|
+
See [Copilot setup guide](https://github.com/miltroj/result-companion/blob/main/examples/EXAMPLES.md#github-copilot).
|
|
92
|
+
|
|
93
|
+
### Option 2: Local AI (Free, Private)
|
|
76
94
|
|
|
77
95
|
```bash
|
|
78
96
|
pip install result-companion
|
|
@@ -85,7 +103,7 @@ result-companion setup model deepseek-r1:1.5b
|
|
|
85
103
|
result-companion analyze -o output.xml
|
|
86
104
|
```
|
|
87
105
|
|
|
88
|
-
### Option
|
|
106
|
+
### Option 3: Cloud AI ([OpenAI](https://github.com/miltroj/result-companion/blob/main/examples/EXAMPLES.md#openai), Azure, Google)
|
|
89
107
|
|
|
90
108
|
```bash
|
|
91
109
|
pip install result-companion
|
|
@@ -148,6 +166,7 @@ See [examples/EXAMPLES.md](https://github.com/miltroj/result-companion/blob/main
|
|
|
148
166
|
## Configuration Examples
|
|
149
167
|
|
|
150
168
|
Check [`examples/`](https://github.com/miltroj/result-companion/tree/main/examples) for ready-to-use configs:
|
|
169
|
+
- **GitHub Copilot** (easiest for users with copilot)
|
|
151
170
|
- Local Ollama setup (default)
|
|
152
171
|
- OpenAI, Azure, Google Cloud
|
|
153
172
|
- Custom endpoints (Databricks, self-hosted)
|
|
@@ -33,7 +33,24 @@ Your enhanced `log.html` now includes:
|
|
|
33
33
|
|
|
34
34
|
## Quick Start
|
|
35
35
|
|
|
36
|
-
### Option 1:
|
|
36
|
+
### Option 1: GitHub Copilot (Easiest for Users With Copilot)
|
|
37
|
+
|
|
38
|
+
Already have GitHub Copilot? Use it directly—no API keys needed.
|
|
39
|
+
|
|
40
|
+
```bash
|
|
41
|
+
pip install result-companion
|
|
42
|
+
|
|
43
|
+
# One-time setup
|
|
44
|
+
brew install copilot-cli # or: npm install -g @github/copilot
|
|
45
|
+
copilot /login # Login when prompted, then /exit
|
|
46
|
+
|
|
47
|
+
# Analyze your tests
|
|
48
|
+
result-companion analyze -o output.xml -c examples/copilot_config.yaml
|
|
49
|
+
```
|
|
50
|
+
|
|
51
|
+
See [Copilot setup guide](https://github.com/miltroj/result-companion/blob/main/examples/EXAMPLES.md#github-copilot).
|
|
52
|
+
|
|
53
|
+
### Option 2: Local AI (Free, Private)
|
|
37
54
|
|
|
38
55
|
```bash
|
|
39
56
|
pip install result-companion
|
|
@@ -46,7 +63,7 @@ result-companion setup model deepseek-r1:1.5b
|
|
|
46
63
|
result-companion analyze -o output.xml
|
|
47
64
|
```
|
|
48
65
|
|
|
49
|
-
### Option
|
|
66
|
+
### Option 3: Cloud AI ([OpenAI](https://github.com/miltroj/result-companion/blob/main/examples/EXAMPLES.md#openai), Azure, Google)
|
|
50
67
|
|
|
51
68
|
```bash
|
|
52
69
|
pip install result-companion
|
|
@@ -109,6 +126,7 @@ See [examples/EXAMPLES.md](https://github.com/miltroj/result-companion/blob/main
|
|
|
109
126
|
## Configuration Examples
|
|
110
127
|
|
|
111
128
|
Check [`examples/`](https://github.com/miltroj/result-companion/tree/main/examples) for ready-to-use configs:
|
|
129
|
+
- **GitHub Copilot** (easiest for users with copilot)
|
|
112
130
|
- Local Ollama setup (default)
|
|
113
131
|
- OpenAI, Azure, Google Cloud
|
|
114
132
|
- Custom endpoints (Databricks, self-hosted)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[tool.poetry]
|
|
2
2
|
name = "result-companion"
|
|
3
|
-
version = "0.0.
|
|
3
|
+
version = "0.0.3"
|
|
4
4
|
description = "AI-powered analysis of Robot Framework test failures - instant insights from output.xml"
|
|
5
5
|
authors = ["Milosz Trojanowski <mil.troj@gmail.com>"]
|
|
6
6
|
license = "Apache-2.0"
|
|
@@ -37,6 +37,7 @@ langchain-text-splitters = "^0.3.4"
|
|
|
37
37
|
langchain-google-genai = "^2.1.6"
|
|
38
38
|
langchain-anthropic = "^0.3.7"
|
|
39
39
|
tqdm = "^4.67.1"
|
|
40
|
+
github-copilot-sdk = "^0.1.0"
|
|
40
41
|
|
|
41
42
|
[tool.poetry.scripts]
|
|
42
43
|
result-companion = "result_companion.entrypoints.cli.cli_app:app"
|
|
@@ -53,6 +54,12 @@ pytest-cov = "^6.0.0"
|
|
|
53
54
|
pytest-asyncio = "^0.24.0"
|
|
54
55
|
pre-commit = "^4.1.0"
|
|
55
56
|
|
|
57
|
+
[tool.pytest.ini_options]
|
|
58
|
+
asyncio_mode = "auto"
|
|
59
|
+
markers = [
|
|
60
|
+
"e2e: end-to-end tests requiring real Copilot CLI connection",
|
|
61
|
+
]
|
|
62
|
+
|
|
56
63
|
[build-system]
|
|
57
64
|
requires = ["poetry-core"]
|
|
58
65
|
build-backend = "poetry.core.masonry.api"
|
{result_companion-0.0.2 → result_companion-0.0.3}/result_companion/core/analizers/factory_common.py
RENAMED
|
@@ -12,7 +12,7 @@ from langchain_openai import AzureChatOpenAI, ChatOpenAI
|
|
|
12
12
|
from result_companion.core.chunking.chunking import (
|
|
13
13
|
accumulate_llm_results_for_summarizaton_chain,
|
|
14
14
|
)
|
|
15
|
-
from result_companion.core.chunking.utils import calculate_chunk_size
|
|
15
|
+
from result_companion.core.chunking.utils import Chunking, calculate_chunk_size
|
|
16
16
|
from result_companion.core.parsers.config import DefaultConfigModel
|
|
17
17
|
from result_companion.core.utils.logging_config import get_progress_logger
|
|
18
18
|
from result_companion.core.utils.progress import run_tasks_with_progress
|
|
@@ -30,6 +30,29 @@ MODELS = Tuple[
|
|
|
30
30
|
]
|
|
31
31
|
|
|
32
32
|
|
|
33
|
+
def _stats_header(
|
|
34
|
+
status: str, chunk: Chunking, dryrun: bool = False, name: str = ""
|
|
35
|
+
) -> str:
|
|
36
|
+
"""Returns markdown stats line for analysis."""
|
|
37
|
+
chunks = chunk.number_of_chunks if chunk.requires_chunking else 0
|
|
38
|
+
prefix = "**[DRYRUN]** " if dryrun else ""
|
|
39
|
+
return f"""## {prefix} {name}
|
|
40
|
+
|
|
41
|
+
#### Status: {status} · Chunks: {chunks} · Tokens: ~{chunk.tokens_from_raw_text} · Raw length: {chunk.raw_text_len}
|
|
42
|
+
|
|
43
|
+
---
|
|
44
|
+
|
|
45
|
+
"""
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
async def _dryrun_result(test_case: dict) -> Tuple[str, str, list]:
|
|
49
|
+
"""Returns placeholder without calling LLM."""
|
|
50
|
+
logger.info(
|
|
51
|
+
f"### Test Case: {test_case['name']}, content length: {len(str(test_case))}"
|
|
52
|
+
)
|
|
53
|
+
return ("*No LLM analysis in dryrun mode.*", test_case["name"], [])
|
|
54
|
+
|
|
55
|
+
|
|
33
56
|
async def accumulate_llm_results_without_streaming(
|
|
34
57
|
test_case: dict,
|
|
35
58
|
question_from_config_file: str,
|
|
@@ -54,6 +77,7 @@ async def execute_llm_and_get_results(
|
|
|
54
77
|
config: DefaultConfigModel,
|
|
55
78
|
prompt: ChatPromptTemplate,
|
|
56
79
|
model: MODELS,
|
|
80
|
+
dryrun: bool = False,
|
|
57
81
|
) -> dict:
|
|
58
82
|
question_from_config_file = config.llm_config.question_prompt
|
|
59
83
|
tokenizer = config.tokenizer
|
|
@@ -64,6 +88,7 @@ async def execute_llm_and_get_results(
|
|
|
64
88
|
|
|
65
89
|
llm_results = dict()
|
|
66
90
|
corutines = []
|
|
91
|
+
test_case_stats = {} # name -> (chunk, status) for adding headers later
|
|
67
92
|
logger.info(
|
|
68
93
|
f"Executing chain, {len(test_cases)=}, {test_case_concurrency=}, {chunk_concurrency=}"
|
|
69
94
|
)
|
|
@@ -73,9 +98,11 @@ async def execute_llm_and_get_results(
|
|
|
73
98
|
chunk = calculate_chunk_size(
|
|
74
99
|
raw_test_case_text, question_from_config_file, tokenizer
|
|
75
100
|
)
|
|
101
|
+
test_case_stats[test_case["name"]] = (chunk, test_case.get("status", "N/A"))
|
|
76
102
|
|
|
77
|
-
|
|
78
|
-
|
|
103
|
+
if dryrun:
|
|
104
|
+
corutines.append(_dryrun_result(test_case))
|
|
105
|
+
elif not chunk.requires_chunking:
|
|
79
106
|
corutines.append(
|
|
80
107
|
accumulate_llm_results_without_streaming(
|
|
81
108
|
test_case, question_from_config_file, prompt, model
|
|
@@ -99,6 +126,8 @@ async def execute_llm_and_get_results(
|
|
|
99
126
|
results = await run_tasks_with_progress(corutines, semaphore=semaphore, desc=desc)
|
|
100
127
|
|
|
101
128
|
for result, name, chunks in results:
|
|
102
|
-
|
|
129
|
+
chunk, status = test_case_stats[name]
|
|
130
|
+
header = _stats_header(status, chunk, dryrun, name)
|
|
131
|
+
llm_results[name] = header + result
|
|
103
132
|
|
|
104
133
|
return llm_results
|
|
@@ -6,12 +6,15 @@ from langchain_google_genai import ChatGoogleGenerativeAI
|
|
|
6
6
|
from langchain_ollama.llms import OllamaLLM
|
|
7
7
|
from langchain_openai import AzureChatOpenAI, ChatOpenAI
|
|
8
8
|
|
|
9
|
+
from result_companion.core.analizers.remote.copilot import ChatCopilot
|
|
10
|
+
|
|
9
11
|
MODELS = Tuple[
|
|
10
12
|
OllamaLLM
|
|
11
13
|
| AzureChatOpenAI
|
|
12
14
|
| BedrockLLM
|
|
13
15
|
| ChatGoogleGenerativeAI
|
|
14
16
|
| ChatOpenAI
|
|
15
|
-
| ChatAnthropic
|
|
17
|
+
| ChatAnthropic
|
|
18
|
+
| ChatCopilot,
|
|
16
19
|
Callable,
|
|
17
20
|
]
|
|
@@ -0,0 +1,162 @@
|
|
|
1
|
+
"""LangChain adapter for GitHub Copilot SDK with session pooling."""
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
from contextlib import asynccontextmanager
|
|
5
|
+
from typing import Any, AsyncIterator, Optional
|
|
6
|
+
|
|
7
|
+
from copilot import CopilotClient
|
|
8
|
+
from langchain_core.callbacks import CallbackManagerForLLMRun
|
|
9
|
+
from langchain_core.language_models.chat_models import BaseChatModel
|
|
10
|
+
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage, SystemMessage
|
|
11
|
+
from langchain_core.outputs import ChatGeneration, ChatResult
|
|
12
|
+
from pydantic import ConfigDict
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def messages_to_prompt(messages: list[BaseMessage]) -> str:
|
|
16
|
+
"""Converts LangChain messages to a single prompt string."""
|
|
17
|
+
prefixes = {
|
|
18
|
+
SystemMessage: "[System]",
|
|
19
|
+
HumanMessage: "[User]",
|
|
20
|
+
AIMessage: "[Assistant]",
|
|
21
|
+
}
|
|
22
|
+
parts = [f"{prefixes.get(type(m), '')}: {m.content}" for m in messages]
|
|
23
|
+
return "\n\n".join(parts)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class SessionPool:
|
|
27
|
+
"""Pool of Copilot sessions for concurrent requests."""
|
|
28
|
+
|
|
29
|
+
def __init__(self, client: Any, model: str, pool_size: int = 5):
|
|
30
|
+
self._client = client
|
|
31
|
+
self._model = model
|
|
32
|
+
self._pool_size = pool_size
|
|
33
|
+
self._available: asyncio.Queue = asyncio.Queue()
|
|
34
|
+
self._created = 0
|
|
35
|
+
self._lock = asyncio.Lock()
|
|
36
|
+
|
|
37
|
+
async def _create_session(self) -> Any:
|
|
38
|
+
"""Creates a new session."""
|
|
39
|
+
return await self._client.create_session({"model": self._model})
|
|
40
|
+
|
|
41
|
+
@asynccontextmanager
|
|
42
|
+
async def acquire(self) -> AsyncIterator[Any]:
|
|
43
|
+
"""Acquires a session from pool, creates if needed."""
|
|
44
|
+
session = None
|
|
45
|
+
|
|
46
|
+
# Try to get existing session or create new one
|
|
47
|
+
async with self._lock:
|
|
48
|
+
if not self._available.empty():
|
|
49
|
+
session = self._available.get_nowait()
|
|
50
|
+
elif self._created < self._pool_size:
|
|
51
|
+
session = await self._create_session()
|
|
52
|
+
self._created += 1
|
|
53
|
+
|
|
54
|
+
# If pool exhausted, wait for available session
|
|
55
|
+
if session is None:
|
|
56
|
+
session = await self._available.get()
|
|
57
|
+
|
|
58
|
+
try:
|
|
59
|
+
yield session
|
|
60
|
+
finally:
|
|
61
|
+
await self._available.put(session)
|
|
62
|
+
|
|
63
|
+
async def close(self) -> None:
|
|
64
|
+
"""Destroys all sessions in the pool."""
|
|
65
|
+
while not self._available.empty():
|
|
66
|
+
session = self._available.get_nowait()
|
|
67
|
+
try:
|
|
68
|
+
await session.destroy()
|
|
69
|
+
except Exception:
|
|
70
|
+
pass
|
|
71
|
+
self._created = 0
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
class ChatCopilot(BaseChatModel):
|
|
75
|
+
"""LangChain adapter for GitHub Copilot SDK.
|
|
76
|
+
|
|
77
|
+
Uses session pool for concurrent requests - each request gets its own session.
|
|
78
|
+
"""
|
|
79
|
+
|
|
80
|
+
model: str = "gpt-4.1"
|
|
81
|
+
cli_path: Optional[str] = None
|
|
82
|
+
cli_url: Optional[str] = None
|
|
83
|
+
timeout: int = 300
|
|
84
|
+
pool_size: int = 5
|
|
85
|
+
|
|
86
|
+
_client: Any = None
|
|
87
|
+
_pool: Optional[SessionPool] = None
|
|
88
|
+
_started: bool = False
|
|
89
|
+
_init_lock: Optional[asyncio.Lock] = None
|
|
90
|
+
|
|
91
|
+
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
92
|
+
|
|
93
|
+
@property
|
|
94
|
+
def _llm_type(self) -> str:
|
|
95
|
+
return "copilot"
|
|
96
|
+
|
|
97
|
+
def _get_init_lock(self) -> asyncio.Lock:
|
|
98
|
+
"""Returns init lock, creating lazily in current event loop."""
|
|
99
|
+
if self._init_lock is None:
|
|
100
|
+
self._init_lock = asyncio.Lock()
|
|
101
|
+
return self._init_lock
|
|
102
|
+
|
|
103
|
+
async def _ensure_started(self) -> None:
|
|
104
|
+
"""Ensures client and pool are initialized. Thread-safe."""
|
|
105
|
+
if self._started:
|
|
106
|
+
return
|
|
107
|
+
|
|
108
|
+
async with self._get_init_lock():
|
|
109
|
+
if self._started:
|
|
110
|
+
return
|
|
111
|
+
|
|
112
|
+
opts = {}
|
|
113
|
+
if self.cli_path:
|
|
114
|
+
opts["cli_path"] = self.cli_path
|
|
115
|
+
if self.cli_url:
|
|
116
|
+
opts["cli_url"] = self.cli_url
|
|
117
|
+
|
|
118
|
+
self._client = CopilotClient(opts) if opts else CopilotClient()
|
|
119
|
+
await self._client.start()
|
|
120
|
+
self._pool = SessionPool(self._client, self.model, self.pool_size)
|
|
121
|
+
self._started = True
|
|
122
|
+
|
|
123
|
+
async def _agenerate(
|
|
124
|
+
self,
|
|
125
|
+
messages: list[BaseMessage],
|
|
126
|
+
stop: Optional[list[str]] = None,
|
|
127
|
+
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
|
128
|
+
**kwargs: Any,
|
|
129
|
+
) -> ChatResult:
|
|
130
|
+
"""Async generation using session from pool."""
|
|
131
|
+
await self._ensure_started()
|
|
132
|
+
|
|
133
|
+
prompt = messages_to_prompt(messages)
|
|
134
|
+
async with self._pool.acquire() as session:
|
|
135
|
+
response = await session.send_and_wait(
|
|
136
|
+
{"prompt": prompt}, timeout=self.timeout
|
|
137
|
+
)
|
|
138
|
+
|
|
139
|
+
content = response.data.content if response and response.data else ""
|
|
140
|
+
return ChatResult(
|
|
141
|
+
generations=[ChatGeneration(message=AIMessage(content=content))]
|
|
142
|
+
)
|
|
143
|
+
|
|
144
|
+
def _generate(
|
|
145
|
+
self,
|
|
146
|
+
messages: list[BaseMessage],
|
|
147
|
+
stop: Optional[list[str]] = None,
|
|
148
|
+
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
|
149
|
+
**kwargs: Any,
|
|
150
|
+
) -> ChatResult:
|
|
151
|
+
"""Synchronous generation - wraps async implementation."""
|
|
152
|
+
return asyncio.run(self._agenerate(messages, stop, run_manager, **kwargs))
|
|
153
|
+
|
|
154
|
+
async def aclose(self) -> None:
|
|
155
|
+
"""Cleanup pool and client resources."""
|
|
156
|
+
if self._pool:
|
|
157
|
+
await self._pool.close()
|
|
158
|
+
self._pool = None
|
|
159
|
+
if self._client:
|
|
160
|
+
await self._client.stop()
|
|
161
|
+
self._client = None
|
|
162
|
+
self._started = False
|
{result_companion-0.0.2 → result_companion-0.0.3}/result_companion/core/chunking/chunking.py
RENAMED
|
@@ -90,7 +90,12 @@ async def summarize_test_case(
|
|
|
90
90
|
logger.debug(
|
|
91
91
|
f"[{test_name}] Processing chunk {chunk_idx + 1}/{total_chunks}, length {len(chunk)}"
|
|
92
92
|
)
|
|
93
|
-
|
|
93
|
+
result = await summarization_chain.ainvoke({"text": chunk})
|
|
94
|
+
logger.debug(
|
|
95
|
+
f"[{test_name}] Chunk {chunk_idx + 1}/{total_chunks} completed",
|
|
96
|
+
extra={"summary": result},
|
|
97
|
+
)
|
|
98
|
+
return result
|
|
94
99
|
|
|
95
100
|
chunk_tasks = [process_with_limit(chunk, i) for i, chunk in enumerate(chunks)]
|
|
96
101
|
summaries = await asyncio.gather(*chunk_tasks)
|
|
@@ -15,6 +15,11 @@ class Chunking:
|
|
|
15
15
|
tokens_from_raw_text: int
|
|
16
16
|
tokenized_chunks: int
|
|
17
17
|
|
|
18
|
+
@property
|
|
19
|
+
def requires_chunking(self) -> bool:
|
|
20
|
+
"""Returns True if text needs to be split into chunks."""
|
|
21
|
+
return self.chunk_size > 0
|
|
22
|
+
|
|
18
23
|
|
|
19
24
|
def azure_openai_tokenizer(text: str) -> int:
|
|
20
25
|
"""Tokenizer for Azure OpenAI models using tiktoken."""
|
{result_companion-0.0.2 → result_companion-0.0.3}/result_companion/core/utils/logging_config.py
RENAMED
|
@@ -11,8 +11,10 @@ from tqdm import tqdm
|
|
|
11
11
|
class JsonFormatter(logging.Formatter):
|
|
12
12
|
"""Formats log records as JSON."""
|
|
13
13
|
|
|
14
|
+
STANDARD_ATTRS = frozenset(logging.LogRecord("", 0, "", 0, "", (), None).__dict__)
|
|
15
|
+
|
|
14
16
|
def format(self, record: logging.LogRecord) -> str:
|
|
15
|
-
"""Formats a log record as JSON."""
|
|
17
|
+
"""Formats a log record as JSON, including extra fields."""
|
|
16
18
|
log_data = {
|
|
17
19
|
"timestamp": self.formatTime(record),
|
|
18
20
|
"logger": record.name,
|
|
@@ -20,6 +22,10 @@ class JsonFormatter(logging.Formatter):
|
|
|
20
22
|
"message": record.getMessage(),
|
|
21
23
|
}
|
|
22
24
|
|
|
25
|
+
for key, value in record.__dict__.items():
|
|
26
|
+
if key not in self.STANDARD_ATTRS:
|
|
27
|
+
log_data[key] = value
|
|
28
|
+
|
|
23
29
|
if record.exc_info:
|
|
24
30
|
log_data["exception"] = self.formatException(record.exc_info)
|
|
25
31
|
|
{result_companion-0.0.2 → result_companion-0.0.3}/result_companion/entrypoints/cli/cli_app.py
RENAMED
|
@@ -113,6 +113,11 @@ def analyze(
|
|
|
113
113
|
"--chunk-concurrency",
|
|
114
114
|
help="Chunks per test case in parallel (overrides config)",
|
|
115
115
|
),
|
|
116
|
+
dryrun: bool = typer.Option(
|
|
117
|
+
False,
|
|
118
|
+
"--dryrun",
|
|
119
|
+
help="Skip LLM calls, generate HTML with debug metadata",
|
|
120
|
+
),
|
|
116
121
|
):
|
|
117
122
|
"""Analyze Robot Framework test results with LLM assistance."""
|
|
118
123
|
typer.echo(f"Output: {output}")
|
|
@@ -147,6 +152,7 @@ def analyze(
|
|
|
147
152
|
chunk_concurrency,
|
|
148
153
|
include_tag_list,
|
|
149
154
|
exclude_tag_list,
|
|
155
|
+
dryrun,
|
|
150
156
|
)
|
|
151
157
|
|
|
152
158
|
|
|
@@ -14,6 +14,7 @@ from pydantic import ValidationError
|
|
|
14
14
|
from result_companion.core.analizers.factory_common import execute_llm_and_get_results
|
|
15
15
|
from result_companion.core.analizers.local.ollama_runner import ollama_on_init_strategy
|
|
16
16
|
from result_companion.core.analizers.models import MODELS
|
|
17
|
+
from result_companion.core.analizers.remote.copilot import ChatCopilot
|
|
17
18
|
from result_companion.core.html.html_creator import create_llm_html_log
|
|
18
19
|
from result_companion.core.parsers.config import LLMFactoryModel, load_config
|
|
19
20
|
from result_companion.core.parsers.result_parser import (
|
|
@@ -25,9 +26,12 @@ from result_companion.core.utils.logging_config import logger, set_global_log_le
|
|
|
25
26
|
|
|
26
27
|
def init_llm_with_strategy_factory(
|
|
27
28
|
config: LLMFactoryModel,
|
|
29
|
+
test_case_concurrency: int = 1,
|
|
30
|
+
chunk_concurrency: int = 1,
|
|
28
31
|
) -> MODELS:
|
|
32
|
+
"""Creates LLM model with optional init strategy."""
|
|
29
33
|
model_type = config.model_type
|
|
30
|
-
parameters = config.parameters
|
|
34
|
+
parameters = dict(config.parameters)
|
|
31
35
|
|
|
32
36
|
model_classes = {
|
|
33
37
|
"OllamaLLM": (OllamaLLM, ollama_on_init_strategy),
|
|
@@ -36,6 +40,12 @@ def init_llm_with_strategy_factory(
|
|
|
36
40
|
"ChatGoogleGenerativeAI": (ChatGoogleGenerativeAI, None),
|
|
37
41
|
"ChatOpenAI": (ChatOpenAI, None),
|
|
38
42
|
"ChatAnthropic": (ChatAnthropic, None),
|
|
43
|
+
"ChatCopilot": (ChatCopilot, None),
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
# Runtime overrides: ChatCopilot needs pool_size = max concurrent requests
|
|
47
|
+
runtime_overrides = {
|
|
48
|
+
"ChatCopilot": {"pool_size": test_case_concurrency * chunk_concurrency},
|
|
39
49
|
}
|
|
40
50
|
|
|
41
51
|
if model_type not in model_classes:
|
|
@@ -43,6 +53,9 @@ def init_llm_with_strategy_factory(
|
|
|
43
53
|
f"Unsupported model type: {model_type} not in {model_classes.keys()}"
|
|
44
54
|
)
|
|
45
55
|
|
|
56
|
+
if model_type in runtime_overrides:
|
|
57
|
+
parameters.update(runtime_overrides[model_type])
|
|
58
|
+
|
|
46
59
|
model_class, strategy = model_classes[model_type]
|
|
47
60
|
try:
|
|
48
61
|
return model_class(**parameters), strategy
|
|
@@ -62,6 +75,7 @@ async def _main(
|
|
|
62
75
|
chunk_concurrency: Optional[int] = None,
|
|
63
76
|
include_tags: Optional[list[str]] = None,
|
|
64
77
|
exclude_tags: Optional[list[str]] = None,
|
|
78
|
+
dryrun: bool = False,
|
|
65
79
|
) -> bool:
|
|
66
80
|
set_global_log_level(str(log_level))
|
|
67
81
|
|
|
@@ -100,7 +114,9 @@ async def _main(
|
|
|
100
114
|
question_from_config_file = parsed_config.llm_config.question_prompt
|
|
101
115
|
template = parsed_config.llm_config.prompt_template
|
|
102
116
|
model, model_init_strategy = init_llm_with_strategy_factory(
|
|
103
|
-
parsed_config.llm_factory
|
|
117
|
+
parsed_config.llm_factory,
|
|
118
|
+
test_case_concurrency=parsed_config.concurrency.test_case,
|
|
119
|
+
chunk_concurrency=parsed_config.concurrency.chunk,
|
|
104
120
|
)
|
|
105
121
|
|
|
106
122
|
if model_init_strategy:
|
|
@@ -118,6 +134,7 @@ async def _main(
|
|
|
118
134
|
parsed_config,
|
|
119
135
|
prompt_template,
|
|
120
136
|
model,
|
|
137
|
+
dryrun=dryrun,
|
|
121
138
|
)
|
|
122
139
|
|
|
123
140
|
report_path = report if report else "rc_log.html"
|
|
@@ -150,6 +167,7 @@ def run_rc(
|
|
|
150
167
|
chunk_concurrency: Optional[int] = None,
|
|
151
168
|
include_tags: Optional[list[str]] = None,
|
|
152
169
|
exclude_tags: Optional[list[str]] = None,
|
|
170
|
+
dryrun: bool = False,
|
|
153
171
|
) -> bool:
|
|
154
172
|
try:
|
|
155
173
|
return asyncio.run(
|
|
@@ -163,6 +181,7 @@ def run_rc(
|
|
|
163
181
|
chunk_concurrency=chunk_concurrency,
|
|
164
182
|
include_tags=include_tags,
|
|
165
183
|
exclude_tags=exclude_tags,
|
|
184
|
+
dryrun=dryrun,
|
|
166
185
|
)
|
|
167
186
|
)
|
|
168
187
|
except Exception:
|
|
File without changes
|
|
File without changes
|
{result_companion-0.0.2 → result_companion-0.0.3}/result_companion/core/analizers/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
{result_companion-0.0.2 → result_companion-0.0.3}/result_companion/core/analizers/local/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{result_companion-0.0.2 → result_companion-0.0.3}/result_companion/core/analizers/remote/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
{result_companion-0.0.2 → result_companion-0.0.3}/result_companion/core/analizers/remote/openai.py
RENAMED
|
File without changes
|
{result_companion-0.0.2 → result_companion-0.0.3}/result_companion/core/configs/default_config.yaml
RENAMED
|
File without changes
|
|
File without changes
|
{result_companion-0.0.2 → result_companion-0.0.3}/result_companion/core/html/html_creator.py
RENAMED
|
File without changes
|
{result_companion-0.0.2 → result_companion-0.0.3}/result_companion/core/html/llm_injector.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
{result_companion-0.0.2 → result_companion-0.0.3}/result_companion/core/parsers/result_parser.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{result_companion-0.0.2 → result_companion-0.0.3}/result_companion/entrypoints/cli/__init__.py
RENAMED
|
File without changes
|