result-companion 0.0.2__py3-none-any.whl → 0.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -12,7 +12,7 @@ from langchain_openai import AzureChatOpenAI, ChatOpenAI
12
12
  from result_companion.core.chunking.chunking import (
13
13
  accumulate_llm_results_for_summarizaton_chain,
14
14
  )
15
- from result_companion.core.chunking.utils import calculate_chunk_size
15
+ from result_companion.core.chunking.utils import Chunking, calculate_chunk_size
16
16
  from result_companion.core.parsers.config import DefaultConfigModel
17
17
  from result_companion.core.utils.logging_config import get_progress_logger
18
18
  from result_companion.core.utils.progress import run_tasks_with_progress
@@ -30,6 +30,29 @@ MODELS = Tuple[
30
30
  ]
31
31
 
32
32
 
33
+ def _stats_header(
34
+ status: str, chunk: Chunking, dryrun: bool = False, name: str = ""
35
+ ) -> str:
36
+ """Returns markdown stats line for analysis."""
37
+ chunks = chunk.number_of_chunks if chunk.requires_chunking else 0
38
+ prefix = "**[DRYRUN]** " if dryrun else ""
39
+ return f"""## {prefix} {name}
40
+
41
+ #### Status: {status} · Chunks: {chunks} · Tokens: ~{chunk.tokens_from_raw_text} · Raw length: {chunk.raw_text_len}
42
+
43
+ ---
44
+
45
+ """
46
+
47
+
48
+ async def _dryrun_result(test_case: dict) -> Tuple[str, str, list]:
49
+ """Returns placeholder without calling LLM."""
50
+ logger.info(
51
+ f"### Test Case: {test_case['name']}, content length: {len(str(test_case))}"
52
+ )
53
+ return ("*No LLM analysis in dryrun mode.*", test_case["name"], [])
54
+
55
+
33
56
  async def accumulate_llm_results_without_streaming(
34
57
  test_case: dict,
35
58
  question_from_config_file: str,
@@ -54,6 +77,7 @@ async def execute_llm_and_get_results(
54
77
  config: DefaultConfigModel,
55
78
  prompt: ChatPromptTemplate,
56
79
  model: MODELS,
80
+ dryrun: bool = False,
57
81
  ) -> dict:
58
82
  question_from_config_file = config.llm_config.question_prompt
59
83
  tokenizer = config.tokenizer
@@ -64,6 +88,7 @@ async def execute_llm_and_get_results(
64
88
 
65
89
  llm_results = dict()
66
90
  corutines = []
91
+ test_case_stats = {} # name -> (chunk, status) for adding headers later
67
92
  logger.info(
68
93
  f"Executing chain, {len(test_cases)=}, {test_case_concurrency=}, {chunk_concurrency=}"
69
94
  )
@@ -73,9 +98,11 @@ async def execute_llm_and_get_results(
73
98
  chunk = calculate_chunk_size(
74
99
  raw_test_case_text, question_from_config_file, tokenizer
75
100
  )
101
+ test_case_stats[test_case["name"]] = (chunk, test_case.get("status", "N/A"))
76
102
 
77
- # TODO: zero chunk size seems magical
78
- if chunk.chunk_size == 0:
103
+ if dryrun:
104
+ corutines.append(_dryrun_result(test_case))
105
+ elif not chunk.requires_chunking:
79
106
  corutines.append(
80
107
  accumulate_llm_results_without_streaming(
81
108
  test_case, question_from_config_file, prompt, model
@@ -99,6 +126,8 @@ async def execute_llm_and_get_results(
99
126
  results = await run_tasks_with_progress(corutines, semaphore=semaphore, desc=desc)
100
127
 
101
128
  for result, name, chunks in results:
102
- llm_results[name] = result
129
+ chunk, status = test_case_stats[name]
130
+ header = _stats_header(status, chunk, dryrun, name)
131
+ llm_results[name] = header + result
103
132
 
104
133
  return llm_results
@@ -6,12 +6,15 @@ from langchain_google_genai import ChatGoogleGenerativeAI
6
6
  from langchain_ollama.llms import OllamaLLM
7
7
  from langchain_openai import AzureChatOpenAI, ChatOpenAI
8
8
 
9
+ from result_companion.core.analizers.remote.copilot import ChatCopilot
10
+
9
11
  MODELS = Tuple[
10
12
  OllamaLLM
11
13
  | AzureChatOpenAI
12
14
  | BedrockLLM
13
15
  | ChatGoogleGenerativeAI
14
16
  | ChatOpenAI
15
- | ChatAnthropic,
17
+ | ChatAnthropic
18
+ | ChatCopilot,
16
19
  Callable,
17
20
  ]
@@ -0,0 +1,162 @@
1
+ """LangChain adapter for GitHub Copilot SDK with session pooling."""
2
+
3
+ import asyncio
4
+ from contextlib import asynccontextmanager
5
+ from typing import Any, AsyncIterator, Optional
6
+
7
+ from copilot import CopilotClient
8
+ from langchain_core.callbacks import CallbackManagerForLLMRun
9
+ from langchain_core.language_models.chat_models import BaseChatModel
10
+ from langchain_core.messages import AIMessage, BaseMessage, HumanMessage, SystemMessage
11
+ from langchain_core.outputs import ChatGeneration, ChatResult
12
+ from pydantic import ConfigDict
13
+
14
+
15
+ def messages_to_prompt(messages: list[BaseMessage]) -> str:
16
+ """Converts LangChain messages to a single prompt string."""
17
+ prefixes = {
18
+ SystemMessage: "[System]",
19
+ HumanMessage: "[User]",
20
+ AIMessage: "[Assistant]",
21
+ }
22
+ parts = [f"{prefixes.get(type(m), '')}: {m.content}" for m in messages]
23
+ return "\n\n".join(parts)
24
+
25
+
26
+ class SessionPool:
27
+ """Pool of Copilot sessions for concurrent requests."""
28
+
29
+ def __init__(self, client: Any, model: str, pool_size: int = 5):
30
+ self._client = client
31
+ self._model = model
32
+ self._pool_size = pool_size
33
+ self._available: asyncio.Queue = asyncio.Queue()
34
+ self._created = 0
35
+ self._lock = asyncio.Lock()
36
+
37
+ async def _create_session(self) -> Any:
38
+ """Creates a new session."""
39
+ return await self._client.create_session({"model": self._model})
40
+
41
+ @asynccontextmanager
42
+ async def acquire(self) -> AsyncIterator[Any]:
43
+ """Acquires a session from pool, creates if needed."""
44
+ session = None
45
+
46
+ # Try to get existing session or create new one
47
+ async with self._lock:
48
+ if not self._available.empty():
49
+ session = self._available.get_nowait()
50
+ elif self._created < self._pool_size:
51
+ session = await self._create_session()
52
+ self._created += 1
53
+
54
+ # If pool exhausted, wait for available session
55
+ if session is None:
56
+ session = await self._available.get()
57
+
58
+ try:
59
+ yield session
60
+ finally:
61
+ await self._available.put(session)
62
+
63
+ async def close(self) -> None:
64
+ """Destroys all sessions in the pool."""
65
+ while not self._available.empty():
66
+ session = self._available.get_nowait()
67
+ try:
68
+ await session.destroy()
69
+ except Exception:
70
+ pass
71
+ self._created = 0
72
+
73
+
74
+ class ChatCopilot(BaseChatModel):
75
+ """LangChain adapter for GitHub Copilot SDK.
76
+
77
+ Uses session pool for concurrent requests - each request gets its own session.
78
+ """
79
+
80
+ model: str = "gpt-4.1"
81
+ cli_path: Optional[str] = None
82
+ cli_url: Optional[str] = None
83
+ timeout: int = 300
84
+ pool_size: int = 5
85
+
86
+ _client: Any = None
87
+ _pool: Optional[SessionPool] = None
88
+ _started: bool = False
89
+ _init_lock: Optional[asyncio.Lock] = None
90
+
91
+ model_config = ConfigDict(arbitrary_types_allowed=True)
92
+
93
+ @property
94
+ def _llm_type(self) -> str:
95
+ return "copilot"
96
+
97
+ def _get_init_lock(self) -> asyncio.Lock:
98
+ """Returns init lock, creating lazily in current event loop."""
99
+ if self._init_lock is None:
100
+ self._init_lock = asyncio.Lock()
101
+ return self._init_lock
102
+
103
+ async def _ensure_started(self) -> None:
104
+ """Ensures client and pool are initialized. Thread-safe."""
105
+ if self._started:
106
+ return
107
+
108
+ async with self._get_init_lock():
109
+ if self._started:
110
+ return
111
+
112
+ opts = {}
113
+ if self.cli_path:
114
+ opts["cli_path"] = self.cli_path
115
+ if self.cli_url:
116
+ opts["cli_url"] = self.cli_url
117
+
118
+ self._client = CopilotClient(opts) if opts else CopilotClient()
119
+ await self._client.start()
120
+ self._pool = SessionPool(self._client, self.model, self.pool_size)
121
+ self._started = True
122
+
123
+ async def _agenerate(
124
+ self,
125
+ messages: list[BaseMessage],
126
+ stop: Optional[list[str]] = None,
127
+ run_manager: Optional[CallbackManagerForLLMRun] = None,
128
+ **kwargs: Any,
129
+ ) -> ChatResult:
130
+ """Async generation using session from pool."""
131
+ await self._ensure_started()
132
+
133
+ prompt = messages_to_prompt(messages)
134
+ async with self._pool.acquire() as session:
135
+ response = await session.send_and_wait(
136
+ {"prompt": prompt}, timeout=self.timeout
137
+ )
138
+
139
+ content = response.data.content if response and response.data else ""
140
+ return ChatResult(
141
+ generations=[ChatGeneration(message=AIMessage(content=content))]
142
+ )
143
+
144
+ def _generate(
145
+ self,
146
+ messages: list[BaseMessage],
147
+ stop: Optional[list[str]] = None,
148
+ run_manager: Optional[CallbackManagerForLLMRun] = None,
149
+ **kwargs: Any,
150
+ ) -> ChatResult:
151
+ """Synchronous generation - wraps async implementation."""
152
+ return asyncio.run(self._agenerate(messages, stop, run_manager, **kwargs))
153
+
154
+ async def aclose(self) -> None:
155
+ """Cleanup pool and client resources."""
156
+ if self._pool:
157
+ await self._pool.close()
158
+ self._pool = None
159
+ if self._client:
160
+ await self._client.stop()
161
+ self._client = None
162
+ self._started = False
@@ -90,7 +90,12 @@ async def summarize_test_case(
90
90
  logger.debug(
91
91
  f"[{test_name}] Processing chunk {chunk_idx + 1}/{total_chunks}, length {len(chunk)}"
92
92
  )
93
- return await summarization_chain.ainvoke({"text": chunk})
93
+ result = await summarization_chain.ainvoke({"text": chunk})
94
+ logger.debug(
95
+ f"[{test_name}] Chunk {chunk_idx + 1}/{total_chunks} completed",
96
+ extra={"summary": result},
97
+ )
98
+ return result
94
99
 
95
100
  chunk_tasks = [process_with_limit(chunk, i) for i, chunk in enumerate(chunks)]
96
101
  summaries = await asyncio.gather(*chunk_tasks)
@@ -15,6 +15,11 @@ class Chunking:
15
15
  tokens_from_raw_text: int
16
16
  tokenized_chunks: int
17
17
 
18
+ @property
19
+ def requires_chunking(self) -> bool:
20
+ """Returns True if text needs to be split into chunks."""
21
+ return self.chunk_size > 0
22
+
18
23
 
19
24
  def azure_openai_tokenizer(text: str) -> int:
20
25
  """Tokenizer for Azure OpenAI models using tiktoken."""
@@ -11,8 +11,10 @@ from tqdm import tqdm
11
11
  class JsonFormatter(logging.Formatter):
12
12
  """Formats log records as JSON."""
13
13
 
14
+ STANDARD_ATTRS = frozenset(logging.LogRecord("", 0, "", 0, "", (), None).__dict__)
15
+
14
16
  def format(self, record: logging.LogRecord) -> str:
15
- """Formats a log record as JSON."""
17
+ """Formats a log record as JSON, including extra fields."""
16
18
  log_data = {
17
19
  "timestamp": self.formatTime(record),
18
20
  "logger": record.name,
@@ -20,6 +22,10 @@ class JsonFormatter(logging.Formatter):
20
22
  "message": record.getMessage(),
21
23
  }
22
24
 
25
+ for key, value in record.__dict__.items():
26
+ if key not in self.STANDARD_ATTRS:
27
+ log_data[key] = value
28
+
23
29
  if record.exc_info:
24
30
  log_data["exception"] = self.formatException(record.exc_info)
25
31
 
@@ -113,6 +113,11 @@ def analyze(
113
113
  "--chunk-concurrency",
114
114
  help="Chunks per test case in parallel (overrides config)",
115
115
  ),
116
+ dryrun: bool = typer.Option(
117
+ False,
118
+ "--dryrun",
119
+ help="Skip LLM calls, generate HTML with debug metadata",
120
+ ),
116
121
  ):
117
122
  """Analyze Robot Framework test results with LLM assistance."""
118
123
  typer.echo(f"Output: {output}")
@@ -147,6 +152,7 @@ def analyze(
147
152
  chunk_concurrency,
148
153
  include_tag_list,
149
154
  exclude_tag_list,
155
+ dryrun,
150
156
  )
151
157
 
152
158
 
@@ -14,6 +14,7 @@ from pydantic import ValidationError
14
14
  from result_companion.core.analizers.factory_common import execute_llm_and_get_results
15
15
  from result_companion.core.analizers.local.ollama_runner import ollama_on_init_strategy
16
16
  from result_companion.core.analizers.models import MODELS
17
+ from result_companion.core.analizers.remote.copilot import ChatCopilot
17
18
  from result_companion.core.html.html_creator import create_llm_html_log
18
19
  from result_companion.core.parsers.config import LLMFactoryModel, load_config
19
20
  from result_companion.core.parsers.result_parser import (
@@ -25,9 +26,12 @@ from result_companion.core.utils.logging_config import logger, set_global_log_le
25
26
 
26
27
  def init_llm_with_strategy_factory(
27
28
  config: LLMFactoryModel,
29
+ test_case_concurrency: int = 1,
30
+ chunk_concurrency: int = 1,
28
31
  ) -> MODELS:
32
+ """Creates LLM model with optional init strategy."""
29
33
  model_type = config.model_type
30
- parameters = config.parameters
34
+ parameters = dict(config.parameters)
31
35
 
32
36
  model_classes = {
33
37
  "OllamaLLM": (OllamaLLM, ollama_on_init_strategy),
@@ -36,6 +40,12 @@ def init_llm_with_strategy_factory(
36
40
  "ChatGoogleGenerativeAI": (ChatGoogleGenerativeAI, None),
37
41
  "ChatOpenAI": (ChatOpenAI, None),
38
42
  "ChatAnthropic": (ChatAnthropic, None),
43
+ "ChatCopilot": (ChatCopilot, None),
44
+ }
45
+
46
+ # Runtime overrides: ChatCopilot needs pool_size = max concurrent requests
47
+ runtime_overrides = {
48
+ "ChatCopilot": {"pool_size": test_case_concurrency * chunk_concurrency},
39
49
  }
40
50
 
41
51
  if model_type not in model_classes:
@@ -43,6 +53,9 @@ def init_llm_with_strategy_factory(
43
53
  f"Unsupported model type: {model_type} not in {model_classes.keys()}"
44
54
  )
45
55
 
56
+ if model_type in runtime_overrides:
57
+ parameters.update(runtime_overrides[model_type])
58
+
46
59
  model_class, strategy = model_classes[model_type]
47
60
  try:
48
61
  return model_class(**parameters), strategy
@@ -62,6 +75,7 @@ async def _main(
62
75
  chunk_concurrency: Optional[int] = None,
63
76
  include_tags: Optional[list[str]] = None,
64
77
  exclude_tags: Optional[list[str]] = None,
78
+ dryrun: bool = False,
65
79
  ) -> bool:
66
80
  set_global_log_level(str(log_level))
67
81
 
@@ -100,7 +114,9 @@ async def _main(
100
114
  question_from_config_file = parsed_config.llm_config.question_prompt
101
115
  template = parsed_config.llm_config.prompt_template
102
116
  model, model_init_strategy = init_llm_with_strategy_factory(
103
- parsed_config.llm_factory
117
+ parsed_config.llm_factory,
118
+ test_case_concurrency=parsed_config.concurrency.test_case,
119
+ chunk_concurrency=parsed_config.concurrency.chunk,
104
120
  )
105
121
 
106
122
  if model_init_strategy:
@@ -118,6 +134,7 @@ async def _main(
118
134
  parsed_config,
119
135
  prompt_template,
120
136
  model,
137
+ dryrun=dryrun,
121
138
  )
122
139
 
123
140
  report_path = report if report else "rc_log.html"
@@ -150,6 +167,7 @@ def run_rc(
150
167
  chunk_concurrency: Optional[int] = None,
151
168
  include_tags: Optional[list[str]] = None,
152
169
  exclude_tags: Optional[list[str]] = None,
170
+ dryrun: bool = False,
153
171
  ) -> bool:
154
172
  try:
155
173
  return asyncio.run(
@@ -163,6 +181,7 @@ def run_rc(
163
181
  chunk_concurrency=chunk_concurrency,
164
182
  include_tags=include_tags,
165
183
  exclude_tags=exclude_tags,
184
+ dryrun=dryrun,
166
185
  )
167
186
  )
168
187
  except Exception:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: result-companion
3
- Version: 0.0.2
3
+ Version: 0.0.3
4
4
  Summary: AI-powered analysis of Robot Framework test failures - instant insights from output.xml
5
5
  License: Apache-2.0
6
6
  License-File: LICENSE
@@ -19,6 +19,7 @@ Classifier: Programming Language :: Python :: 3.12
19
19
  Classifier: Programming Language :: Python :: 3.13
20
20
  Classifier: Programming Language :: Python :: 3.14
21
21
  Classifier: Topic :: Software Development :: Testing
22
+ Requires-Dist: github-copilot-sdk (>=0.1.0,<0.2.0)
22
23
  Requires-Dist: langchain-anthropic (>=0.3.7,<0.4.0)
23
24
  Requires-Dist: langchain-aws (>=0.2.10,<0.3.0)
24
25
  Requires-Dist: langchain-community (>=0.3.9,<0.4.0)
@@ -72,7 +73,24 @@ Your enhanced `log.html` now includes:
72
73
 
73
74
  ## Quick Start
74
75
 
75
- ### Option 1: Local AI (Free, Private)
76
+ ### Option 1: GitHub Copilot (Easiest for Users With Copilot)
77
+
78
+ Already have GitHub Copilot? Use it directly—no API keys needed.
79
+
80
+ ```bash
81
+ pip install result-companion
82
+
83
+ # One-time setup
84
+ brew install copilot-cli # or: npm install -g @github/copilot
85
+ copilot /login # Login when prompted, then /exit
86
+
87
+ # Analyze your tests
88
+ result-companion analyze -o output.xml -c examples/copilot_config.yaml
89
+ ```
90
+
91
+ See [Copilot setup guide](https://github.com/miltroj/result-companion/blob/main/examples/EXAMPLES.md#github-copilot).
92
+
93
+ ### Option 2: Local AI (Free, Private)
76
94
 
77
95
  ```bash
78
96
  pip install result-companion
@@ -85,7 +103,7 @@ result-companion setup model deepseek-r1:1.5b
85
103
  result-companion analyze -o output.xml
86
104
  ```
87
105
 
88
- ### Option 2: Cloud AI ([OpenAI](https://github.com/miltroj/result-companion/blob/main/examples/EXAMPLES.md#openai), Azure, Google)
106
+ ### Option 3: Cloud AI ([OpenAI](https://github.com/miltroj/result-companion/blob/main/examples/EXAMPLES.md#openai), Azure, Google)
89
107
 
90
108
  ```bash
91
109
  pip install result-companion
@@ -148,6 +166,7 @@ See [examples/EXAMPLES.md](https://github.com/miltroj/result-companion/blob/main
148
166
  ## Configuration Examples
149
167
 
150
168
  Check [`examples/`](https://github.com/miltroj/result-companion/tree/main/examples) for ready-to-use configs:
169
+ - **GitHub Copilot** (easiest for users with copilot)
151
170
  - Local Ollama setup (default)
152
171
  - OpenAI, Azure, Google Cloud
153
172
  - Custom endpoints (Databricks, self-hosted)
@@ -1,18 +1,19 @@
1
1
  result_companion/__init__.py,sha256=RcAx4Ybuw0kE54PzDu_P8znTVXYsHBRzkcSX06dQSYE,227
2
2
  result_companion/core/analizers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
3
  result_companion/core/analizers/common.py,sha256=gD21KHsFet-4DdrguZsAYQy9zn5dF5M5E_ziEaLWM80,1724
4
- result_companion/core/analizers/factory_common.py,sha256=CRXYzClKTij7vQ9NxcPqc3FoXCV7bRx13HbkQf6BRUo,3419
4
+ result_companion/core/analizers/factory_common.py,sha256=4cxFde48UB84857D1PAeuu_FOf0PXTuXpcMTq4Ni6ho,4505
5
5
  result_companion/core/analizers/local/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
6
  result_companion/core/analizers/local/ollama_exceptions.py,sha256=fsoBpVSfcxawmhAqNV81RGsgi5neW4GmEUtuVerTvyE,319
7
7
  result_companion/core/analizers/local/ollama_install.py,sha256=29pu3wJwOQdm7H-DEJlJDOSrlOoAeUqwvaZ7YEhNsKo,8866
8
8
  result_companion/core/analizers/local/ollama_runner.py,sha256=d2AfzuHPStLJMdTMmgk0pyKZRNtlJW7hxUMEXXAdiX4,4566
9
9
  result_companion/core/analizers/local/ollama_server_manager.py,sha256=L9k6kTDhGVb9hS48tzz2gOV7SKpBE20upuQi1QEJeFc,6572
10
- result_companion/core/analizers/models.py,sha256=eo5zVSNctRMrQlnYtuAc6rK-u3gak4gbXwyuneFbF3I,431
10
+ result_companion/core/analizers/models.py,sha256=0gPCxjBnW5N6w0v8eatmA5AxR_WCvFJPgkUrtUJscYE,521
11
11
  result_companion/core/analizers/remote/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
12
+ result_companion/core/analizers/remote/copilot.py,sha256=5TNuOJy9yG2W9WeCiy3ZmFGML245Vs6bzBGqZFWnrEU,5333
12
13
  result_companion/core/analizers/remote/custom_endpoint.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
13
14
  result_companion/core/analizers/remote/openai.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
14
- result_companion/core/chunking/chunking.py,sha256=mq35yCy-917hBYvOn54P1wAcIDYHB9K9BULgVKkef5E,3623
15
- result_companion/core/chunking/utils.py,sha256=nogJbPtwBPNomRwJ9P8ucU8YNeovmPS_mnzW8dYsSDM,3834
15
+ result_companion/core/chunking/chunking.py,sha256=hLB17tuT4OogIa7TLAl8JAX-dzo7MW7y_kJcghSTqx8,3815
16
+ result_companion/core/chunking/utils.py,sha256=gaTDi2511b9mK8z26zmz2YpmkjijTvdSRhN6fuJZ8DM,3991
16
17
  result_companion/core/configs/default_config.yaml,sha256=UD9wRPf3nSbKdHNTilVISlz40R97ZmtAf7cLb6PS4gc,2396
17
18
  result_companion/core/html/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
18
19
  result_companion/core/html/html_creator.py,sha256=Jmxw6jy-T-DKHF5oAWXc9bK2lUa4OnBA-ksWFSEFilY,7333
@@ -24,14 +25,14 @@ result_companion/core/results/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5N
24
25
  result_companion/core/results/visitors.py,sha256=rZ0jHt5m2gXF5X2TzYm-XtEyXFxvowpAH5ap3Paw-VM,1373
25
26
  result_companion/core/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
26
27
  result_companion/core/utils/log_levels.py,sha256=64rq3mrRQs2yKBjD4mojSHh0l8ok4V60ZM8LU_foJ58,544
27
- result_companion/core/utils/logging_config.py,sha256=zBdjTpTz3OMC_J_yona083oObyUKo1qlnQiXbVSzza4,3495
28
+ result_companion/core/utils/logging_config.py,sha256=hJN6SET1wsAJ61rX54bmAyI6CJjg7hDx5S2fW4YXIB0,3744
28
29
  result_companion/core/utils/progress.py,sha256=YmO1XvJg74DvYHGGeU5upQmrWol1Ote0pB4dYmcHQzw,1675
29
30
  result_companion/entrypoints/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
30
31
  result_companion/entrypoints/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
31
- result_companion/entrypoints/cli/cli_app.py,sha256=xTjE3b_SEIOc67NhK81vBtnEcID7wqmsX_qrgxNjIi8,7503
32
- result_companion/entrypoints/run_rc.py,sha256=76fNOElhhbWOxxNUh9x_CazSWVchsCz0h8xpvQX3qj8,6022
33
- result_companion-0.0.2.dist-info/METADATA,sha256=Zyq3uTFPZfLp-v_0xqWxe2VZ6sLH5DdxQKSpQW_acXw,7147
34
- result_companion-0.0.2.dist-info/WHEEL,sha256=3ny-bZhpXrU6vSQ1UPG34FoxZBp3lVcvK0LkgUz6VLk,88
35
- result_companion-0.0.2.dist-info/entry_points.txt,sha256=oK1iuYUNOtyMAIcc1GLVWntCaZqpb0Ioj_LpnJNq-qc,81
36
- result_companion-0.0.2.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
37
- result_companion-0.0.2.dist-info/RECORD,,
32
+ result_companion/entrypoints/cli/cli_app.py,sha256=khSyQ73RXXRceMdEhkSx1J_YCR29u4g2OwgQfxm74nE,7660
33
+ result_companion/entrypoints/run_rc.py,sha256=_5v1iCe8rBzqmSLTcDmV85olt2cenOl0GL2iNo4IM9E,6792
34
+ result_companion-0.0.3.dist-info/METADATA,sha256=3gipY-xesUmEQrNslTd-9um7iNz9ufKpQNSZ2F2K-rc,7780
35
+ result_companion-0.0.3.dist-info/WHEEL,sha256=kJCRJT_g0adfAJzTx2GUMmS80rTJIVHRCfG0DQgLq3o,88
36
+ result_companion-0.0.3.dist-info/entry_points.txt,sha256=oK1iuYUNOtyMAIcc1GLVWntCaZqpb0Ioj_LpnJNq-qc,81
37
+ result_companion-0.0.3.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
38
+ result_companion-0.0.3.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: poetry-core 2.3.0
2
+ Generator: poetry-core 2.3.1
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any