uipath 2.1.73__py3-none-any.whl → 2.1.75__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- uipath/_cli/_evals/_console_progress_reporter.py +241 -0
- uipath/_cli/_evals/_models/_output.py +2 -0
- uipath/_cli/_evals/_progress_reporter.py +11 -5
- uipath/_cli/_evals/_runtime.py +117 -50
- uipath/_cli/_runtime/_contracts.py +4 -2
- uipath/_cli/_runtime/_logging.py +83 -26
- uipath/_cli/cli_eval.py +5 -5
- uipath/_cli/middlewares.py +3 -3
- uipath/_events/_events.py +2 -0
- {uipath-2.1.73.dist-info → uipath-2.1.75.dist-info}/METADATA +1 -1
- {uipath-2.1.73.dist-info → uipath-2.1.75.dist-info}/RECORD +14 -13
- {uipath-2.1.73.dist-info → uipath-2.1.75.dist-info}/WHEEL +0 -0
- {uipath-2.1.73.dist-info → uipath-2.1.75.dist-info}/entry_points.txt +0 -0
- {uipath-2.1.73.dist-info → uipath-2.1.75.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,241 @@
|
|
1
|
+
"""Console progress reporter for evaluation runs with line-by-line output."""
|
2
|
+
|
3
|
+
import logging
|
4
|
+
from typing import Any, Dict
|
5
|
+
|
6
|
+
from rich.console import Console
|
7
|
+
from rich.rule import Rule
|
8
|
+
from rich.table import Table
|
9
|
+
|
10
|
+
from uipath._events._event_bus import EventBus
|
11
|
+
from uipath._events._events import (
|
12
|
+
EvalRunCreatedEvent,
|
13
|
+
EvalRunUpdatedEvent,
|
14
|
+
EvalSetRunCreatedEvent,
|
15
|
+
EvalSetRunUpdatedEvent,
|
16
|
+
EvaluationEvents,
|
17
|
+
)
|
18
|
+
from uipath.eval.evaluators import BaseEvaluator
|
19
|
+
from uipath.eval.models import ScoreType
|
20
|
+
|
21
|
+
logger = logging.getLogger(__name__)
|
22
|
+
|
23
|
+
|
24
|
+
class ConsoleProgressReporter:
|
25
|
+
"""Handles displaying evaluation progress to the console with line-by-line output."""
|
26
|
+
|
27
|
+
def __init__(self):
|
28
|
+
self.console = Console()
|
29
|
+
self.evaluators: Dict[str, BaseEvaluator[Any]] = {}
|
30
|
+
self.display_started = False
|
31
|
+
self.eval_results_by_name: Dict[str, list[Any]] = {}
|
32
|
+
|
33
|
+
def _convert_score_to_numeric(self, eval_result) -> float:
|
34
|
+
"""Convert evaluation result score to numeric value."""
|
35
|
+
score_value = eval_result.result.score
|
36
|
+
if eval_result.result.score_type == ScoreType.BOOLEAN:
|
37
|
+
score_value = 100 if score_value else 0
|
38
|
+
return score_value
|
39
|
+
|
40
|
+
def _get_evaluator_name(self, evaluator_id: str) -> str:
|
41
|
+
"""Get evaluator name from ID, with fallback."""
|
42
|
+
return self.evaluators.get(
|
43
|
+
evaluator_id,
|
44
|
+
type(
|
45
|
+
"obj",
|
46
|
+
(object,),
|
47
|
+
{"name": f"Evaluator {evaluator_id[:8]}"},
|
48
|
+
)(),
|
49
|
+
).name
|
50
|
+
|
51
|
+
def _display_successful_evaluation(self, eval_name: str, eval_results) -> None:
|
52
|
+
"""Display results for a successful evaluation."""
|
53
|
+
from rich.text import Text
|
54
|
+
|
55
|
+
if eval_results:
|
56
|
+
result = Text()
|
57
|
+
result.append("▌", style="bold green")
|
58
|
+
result.append(" ", style="")
|
59
|
+
result.append(eval_name, style="bold green")
|
60
|
+
self.console.print(result)
|
61
|
+
table = Table(show_header=False, box=None, padding=(0, 2, 0, 2))
|
62
|
+
|
63
|
+
for eval_result in eval_results:
|
64
|
+
evaluator_name = self._get_evaluator_name(eval_result.evaluator_id)
|
65
|
+
score_value = self._convert_score_to_numeric(eval_result)
|
66
|
+
table.add_row(
|
67
|
+
f"{evaluator_name}", f"[bold cyan]{score_value:.1f}[/bold cyan]"
|
68
|
+
)
|
69
|
+
|
70
|
+
self.console.print(table)
|
71
|
+
else:
|
72
|
+
result = Text()
|
73
|
+
result.append(" ✓ ", style="bold green")
|
74
|
+
result.append(eval_name, style="bold white")
|
75
|
+
result.append(" - No evaluators", style="dim")
|
76
|
+
self.console.print(result)
|
77
|
+
|
78
|
+
def _extract_error_message(self, eval_item_payload) -> str:
|
79
|
+
"""Extract clean error message from evaluation item."""
|
80
|
+
if hasattr(eval_item_payload, "_error_message"):
|
81
|
+
error_message = getattr(eval_item_payload, "_error_message", None)
|
82
|
+
if error_message:
|
83
|
+
return str(error_message) or "Execution failed"
|
84
|
+
return "Execution failed"
|
85
|
+
|
86
|
+
def _display_failed_evaluation(self, eval_name: str, error_msg: str) -> None:
|
87
|
+
"""Display results for a failed evaluation."""
|
88
|
+
from rich.text import Text
|
89
|
+
|
90
|
+
result = Text()
|
91
|
+
result.append(" ✗ ", style="bold red")
|
92
|
+
result.append(eval_name, style="bold white")
|
93
|
+
self.console.print(result)
|
94
|
+
|
95
|
+
error_text = Text()
|
96
|
+
error_text.append(" ", style="")
|
97
|
+
error_text.append(error_msg, style="red")
|
98
|
+
self.console.print(error_text)
|
99
|
+
|
100
|
+
def start_display(self):
|
101
|
+
"""Start the display."""
|
102
|
+
if not self.display_started:
|
103
|
+
self.console.print()
|
104
|
+
self.console.print("→ [bold]Running Evaluations[/bold]")
|
105
|
+
self.console.print()
|
106
|
+
self.display_started = True
|
107
|
+
|
108
|
+
async def handle_create_eval_set_run(self, payload: EvalSetRunCreatedEvent) -> None:
|
109
|
+
"""Handle evaluation set run creation."""
|
110
|
+
try:
|
111
|
+
self.evaluators = {eval.id: eval for eval in payload.evaluators}
|
112
|
+
except Exception as e:
|
113
|
+
logger.error(f"Failed to handle create eval set run event: {e}")
|
114
|
+
|
115
|
+
async def handle_create_eval_run(self, payload: EvalRunCreatedEvent) -> None:
|
116
|
+
"""Handle individual evaluation run creation."""
|
117
|
+
try:
|
118
|
+
if not self.display_started:
|
119
|
+
self.start_display()
|
120
|
+
|
121
|
+
self.console.print(f" ○ [dim]{payload.eval_item.name}[/dim] - Running...")
|
122
|
+
except Exception as e:
|
123
|
+
logger.error(f"Failed to handle create eval run event: {e}")
|
124
|
+
|
125
|
+
async def handle_update_eval_run(self, payload: EvalRunUpdatedEvent) -> None:
|
126
|
+
"""Handle evaluation run updates."""
|
127
|
+
try:
|
128
|
+
if payload.success:
|
129
|
+
# Store results for final display
|
130
|
+
self.eval_results_by_name[payload.eval_item.name] = payload.eval_results
|
131
|
+
self._display_successful_evaluation(
|
132
|
+
payload.eval_item.name, payload.eval_results
|
133
|
+
)
|
134
|
+
else:
|
135
|
+
error_msg = self._extract_error_message(payload.eval_item)
|
136
|
+
self._display_failed_evaluation(payload.eval_item.name, error_msg)
|
137
|
+
|
138
|
+
logs = payload.logs
|
139
|
+
|
140
|
+
self.console.print(
|
141
|
+
Rule(
|
142
|
+
f"[dim italic]Execution Logs: {payload.eval_item.name}[/dim italic]",
|
143
|
+
style="dim",
|
144
|
+
align="center",
|
145
|
+
)
|
146
|
+
)
|
147
|
+
|
148
|
+
if len(logs) > 0:
|
149
|
+
for record in logs:
|
150
|
+
log_line = f" [dim]{record.getMessage()}[/dim]"
|
151
|
+
self.console.print(log_line)
|
152
|
+
else:
|
153
|
+
self.console.print(" [dim italic]No execution logs[/dim italic]")
|
154
|
+
|
155
|
+
self.console.print(Rule(style="dim"))
|
156
|
+
except Exception as e:
|
157
|
+
logger.error(f"Console reporter error: {e}")
|
158
|
+
|
159
|
+
async def handle_update_eval_set_run(self, payload: EvalSetRunUpdatedEvent) -> None:
|
160
|
+
"""Handle evaluation set run completion."""
|
161
|
+
try:
|
162
|
+
self.final_results = payload.evaluator_scores
|
163
|
+
self.display_final_results()
|
164
|
+
except Exception as e:
|
165
|
+
logger.error(f"Console reporter error: {e}")
|
166
|
+
|
167
|
+
def display_final_results(self):
|
168
|
+
"""Display final results summary."""
|
169
|
+
self.console.print()
|
170
|
+
|
171
|
+
if hasattr(self, "final_results") and self.final_results:
|
172
|
+
from rich.table import Table
|
173
|
+
|
174
|
+
# Group evaluators by ID to organize display
|
175
|
+
evaluator_ids = list(self.final_results.keys())
|
176
|
+
|
177
|
+
# Print title
|
178
|
+
self.console.print("[bold]Evaluation Results[/bold]")
|
179
|
+
self.console.print()
|
180
|
+
|
181
|
+
# Create single summary table
|
182
|
+
summary_table = Table(show_header=True, padding=(0, 2))
|
183
|
+
summary_table.add_column("Evaluation", style="cyan")
|
184
|
+
|
185
|
+
# Add column for each evaluator
|
186
|
+
for evaluator_id in evaluator_ids:
|
187
|
+
evaluator_name = self._get_evaluator_name(evaluator_id)
|
188
|
+
summary_table.add_column(evaluator_name, justify="right")
|
189
|
+
|
190
|
+
# Add row for each evaluation
|
191
|
+
for eval_name, eval_results in self.eval_results_by_name.items():
|
192
|
+
row_values = [eval_name]
|
193
|
+
|
194
|
+
# Get score for each evaluator
|
195
|
+
for evaluator_id in evaluator_ids:
|
196
|
+
score_found = False
|
197
|
+
for eval_result in eval_results:
|
198
|
+
if eval_result.evaluator_id == evaluator_id:
|
199
|
+
score_value = self._convert_score_to_numeric(eval_result)
|
200
|
+
row_values.append(f"{score_value:.1f}")
|
201
|
+
score_found = True
|
202
|
+
break
|
203
|
+
|
204
|
+
if not score_found:
|
205
|
+
row_values.append("-")
|
206
|
+
|
207
|
+
summary_table.add_row(*row_values)
|
208
|
+
|
209
|
+
# Add separator row before average
|
210
|
+
summary_table.add_section()
|
211
|
+
|
212
|
+
# Add average row
|
213
|
+
avg_row_values = ["[bold]Average[/bold]"]
|
214
|
+
for evaluator_id in evaluator_ids:
|
215
|
+
avg_score = self.final_results[evaluator_id]
|
216
|
+
avg_row_values.append(f"[bold]{avg_score:.1f}[/bold]")
|
217
|
+
|
218
|
+
summary_table.add_row(*avg_row_values)
|
219
|
+
|
220
|
+
self.console.print(summary_table)
|
221
|
+
self.console.print()
|
222
|
+
else:
|
223
|
+
self.console.print(
|
224
|
+
"→ [bold green]All evaluations completed successfully![/bold green]"
|
225
|
+
)
|
226
|
+
self.console.print()
|
227
|
+
|
228
|
+
async def subscribe_to_eval_runtime_events(self, event_bus: EventBus) -> None:
|
229
|
+
"""Subscribe to evaluation runtime events."""
|
230
|
+
event_bus.subscribe(
|
231
|
+
EvaluationEvents.CREATE_EVAL_SET_RUN, self.handle_create_eval_set_run
|
232
|
+
)
|
233
|
+
event_bus.subscribe(
|
234
|
+
EvaluationEvents.CREATE_EVAL_RUN, self.handle_create_eval_run
|
235
|
+
)
|
236
|
+
event_bus.subscribe(
|
237
|
+
EvaluationEvents.UPDATE_EVAL_RUN, self.handle_update_eval_run
|
238
|
+
)
|
239
|
+
event_bus.subscribe(
|
240
|
+
EvaluationEvents.UPDATE_EVAL_SET_RUN, self.handle_update_eval_set_run
|
241
|
+
)
|
@@ -1,3 +1,4 @@
|
|
1
|
+
import logging
|
1
2
|
from typing import List, Optional
|
2
3
|
|
3
4
|
from opentelemetry.sdk.trace import ReadableSpan
|
@@ -15,6 +16,7 @@ class UiPathEvalRunExecutionOutput(BaseModel):
|
|
15
16
|
|
16
17
|
execution_time: float
|
17
18
|
spans: list[ReadableSpan]
|
19
|
+
logs: list[logging.LogRecord]
|
18
20
|
result: UiPathRuntimeResult
|
19
21
|
|
20
22
|
|
@@ -7,6 +7,7 @@ import os
|
|
7
7
|
from typing import Any, Dict, List
|
8
8
|
|
9
9
|
from opentelemetry import trace
|
10
|
+
from rich.console import Console
|
10
11
|
|
11
12
|
from uipath import UiPath
|
12
13
|
from uipath._cli._evals._models._evaluation_set import EvaluationItem, EvaluationStatus
|
@@ -68,6 +69,7 @@ class StudioWebProgressReporter:
|
|
68
69
|
|
69
70
|
self._client = uipath.api_client
|
70
71
|
self._console = console_logger
|
72
|
+
self._rich_console = Console()
|
71
73
|
self._project_id = os.getenv("UIPATH_PROJECT_ID", None)
|
72
74
|
if not self._project_id:
|
73
75
|
logger.warning(
|
@@ -79,6 +81,10 @@ class StudioWebProgressReporter:
|
|
79
81
|
self.evaluator_scores: Dict[str, List[float]] = {}
|
80
82
|
self.eval_run_ids: Dict[str, str] = {}
|
81
83
|
|
84
|
+
def _format_error_message(self, error: Exception, context: str) -> None:
|
85
|
+
"""Helper method to format and display error messages consistently."""
|
86
|
+
self._rich_console.print(f" • \u26a0 [dim]{context}: {error}[/dim]")
|
87
|
+
|
82
88
|
@gracefully_handle_errors
|
83
89
|
async def create_eval_set_run(
|
84
90
|
self,
|
@@ -182,7 +188,7 @@ class StudioWebProgressReporter:
|
|
182
188
|
logger.debug(f"Created eval set run with ID: {eval_set_run_id}")
|
183
189
|
|
184
190
|
except Exception as e:
|
185
|
-
|
191
|
+
self._format_error_message(e, "StudioWeb create eval set run error")
|
186
192
|
|
187
193
|
async def handle_create_eval_run(self, payload: EvalRunCreatedEvent) -> None:
|
188
194
|
try:
|
@@ -197,7 +203,7 @@ class StudioWebProgressReporter:
|
|
197
203
|
logger.warning("Cannot create eval run: eval_set_run_id not available")
|
198
204
|
|
199
205
|
except Exception as e:
|
200
|
-
|
206
|
+
self._format_error_message(e, "StudioWeb create eval run error")
|
201
207
|
|
202
208
|
async def handle_update_eval_run(self, payload: EvalRunUpdatedEvent) -> None:
|
203
209
|
try:
|
@@ -238,7 +244,7 @@ class StudioWebProgressReporter:
|
|
238
244
|
logger.debug(f"Updated eval run with ID: {eval_run_id}")
|
239
245
|
|
240
246
|
except Exception as e:
|
241
|
-
|
247
|
+
self._format_error_message(e, "StudioWeb reporting error")
|
242
248
|
|
243
249
|
async def handle_update_eval_set_run(self, payload: EvalSetRunUpdatedEvent) -> None:
|
244
250
|
try:
|
@@ -254,7 +260,7 @@ class StudioWebProgressReporter:
|
|
254
260
|
)
|
255
261
|
|
256
262
|
except Exception as e:
|
257
|
-
|
263
|
+
self._format_error_message(e, "StudioWeb update eval set run error")
|
258
264
|
|
259
265
|
async def subscribe_to_eval_runtime_events(self, event_bus: EventBus) -> None:
|
260
266
|
event_bus.subscribe(
|
@@ -270,7 +276,7 @@ class StudioWebProgressReporter:
|
|
270
276
|
EvaluationEvents.UPDATE_EVAL_SET_RUN, self.handle_update_eval_set_run
|
271
277
|
)
|
272
278
|
|
273
|
-
logger.
|
279
|
+
logger.debug("StudioWeb progress reporter subscribed to evaluation events")
|
274
280
|
|
275
281
|
def _extract_agent_snapshot(self, entrypoint: str) -> StudioWebAgentSnapshot:
|
276
282
|
try:
|
uipath/_cli/_evals/_runtime.py
CHANGED
@@ -1,4 +1,6 @@
|
|
1
1
|
import json
|
2
|
+
import logging
|
3
|
+
import uuid
|
2
4
|
from collections import defaultdict
|
3
5
|
from pathlib import Path
|
4
6
|
from time import time
|
@@ -25,6 +27,7 @@ from .._runtime._contracts import (
|
|
25
27
|
UiPathRuntimeResult,
|
26
28
|
UiPathRuntimeStatus,
|
27
29
|
)
|
30
|
+
from .._runtime._logging import ExecutionLogHandler
|
28
31
|
from .._utils._eval_set import EvalHelpers
|
29
32
|
from ._evaluator_factory import EvaluatorFactory
|
30
33
|
from ._models._evaluation_set import EvaluationItem, EvaluationSet
|
@@ -72,6 +75,33 @@ class ExecutionSpanExporter(SpanExporter):
|
|
72
75
|
self.clear()
|
73
76
|
|
74
77
|
|
78
|
+
class ExecutionLogsExporter:
|
79
|
+
"""Custom exporter that stores multiple execution log handlers."""
|
80
|
+
|
81
|
+
def __init__(self):
|
82
|
+
self._log_handlers: dict[str, ExecutionLogHandler] = {}
|
83
|
+
|
84
|
+
def register(self, execution_id: str, handler: ExecutionLogHandler) -> None:
|
85
|
+
self._log_handlers[execution_id] = handler
|
86
|
+
|
87
|
+
def get_logs(self, execution_id: str) -> list[logging.LogRecord]:
|
88
|
+
"""Clear stored spans for one or all executions."""
|
89
|
+
log_handler = self._log_handlers.get(execution_id)
|
90
|
+
return log_handler.buffer if log_handler else []
|
91
|
+
|
92
|
+
def clear(self, execution_id: Optional[str] = None) -> None:
|
93
|
+
"""Clear stored spans for one or all executions."""
|
94
|
+
if execution_id:
|
95
|
+
self._log_handlers.pop(execution_id, None)
|
96
|
+
else:
|
97
|
+
self._log_handlers.clear()
|
98
|
+
|
99
|
+
def flush_logs(self, execution_id: str, target_handler: logging.Handler) -> None:
|
100
|
+
log_handler = self._log_handlers.get(execution_id)
|
101
|
+
if log_handler:
|
102
|
+
log_handler.flush_execution_logs(target_handler)
|
103
|
+
|
104
|
+
|
75
105
|
class UiPathEvalContext(UiPathRuntimeContext):
|
76
106
|
"""Context used for evaluation runs."""
|
77
107
|
|
@@ -96,6 +126,8 @@ class UiPathEvalRuntime(UiPathBaseRuntime, Generic[T, C]):
|
|
96
126
|
self.event_bus: EventBus = event_bus
|
97
127
|
self.span_exporter: ExecutionSpanExporter = ExecutionSpanExporter()
|
98
128
|
self.factory.add_span_exporter(self.span_exporter)
|
129
|
+
self.logs_exporter: ExecutionLogsExporter = ExecutionLogsExporter()
|
130
|
+
self.execution_id = str(uuid.uuid4())
|
99
131
|
|
100
132
|
@classmethod
|
101
133
|
def from_eval_context(
|
@@ -110,9 +142,6 @@ class UiPathEvalRuntime(UiPathBaseRuntime, Generic[T, C]):
|
|
110
142
|
if self.context.eval_set is None:
|
111
143
|
raise ValueError("eval_set must be provided for evaluation runs")
|
112
144
|
|
113
|
-
if not self.context.execution_id:
|
114
|
-
raise ValueError("execution_id must be provided for evaluation runs")
|
115
|
-
|
116
145
|
event_bus = self.event_bus
|
117
146
|
|
118
147
|
evaluation_set = EvalHelpers.load_eval_set(
|
@@ -126,7 +155,7 @@ class UiPathEvalRuntime(UiPathBaseRuntime, Generic[T, C]):
|
|
126
155
|
await event_bus.publish(
|
127
156
|
EvaluationEvents.CREATE_EVAL_SET_RUN,
|
128
157
|
EvalSetRunCreatedEvent(
|
129
|
-
execution_id=self.
|
158
|
+
execution_id=self.execution_id,
|
130
159
|
entrypoint=self.context.entrypoint or "",
|
131
160
|
eval_set_id=evaluation_set.id,
|
132
161
|
no_of_evals=len(evaluation_set.evaluations),
|
@@ -142,7 +171,7 @@ class UiPathEvalRuntime(UiPathBaseRuntime, Generic[T, C]):
|
|
142
171
|
await event_bus.publish(
|
143
172
|
EvaluationEvents.CREATE_EVAL_RUN,
|
144
173
|
EvalRunCreatedEvent(
|
145
|
-
execution_id=self.
|
174
|
+
execution_id=self.execution_id,
|
146
175
|
eval_item=eval_item,
|
147
176
|
),
|
148
177
|
)
|
@@ -153,60 +182,87 @@ class UiPathEvalRuntime(UiPathBaseRuntime, Generic[T, C]):
|
|
153
182
|
|
154
183
|
results.evaluation_set_results.append(evaluation_run_results)
|
155
184
|
|
156
|
-
|
157
|
-
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
185
|
+
try:
|
186
|
+
agent_execution_output = await self.execute_runtime(eval_item)
|
187
|
+
evaluation_item_results: list[EvalItemResult] = []
|
188
|
+
|
189
|
+
for evaluator in evaluators:
|
190
|
+
evaluation_result = await self.run_evaluator(
|
191
|
+
evaluator=evaluator,
|
192
|
+
execution_output=agent_execution_output,
|
193
|
+
eval_item=eval_item,
|
194
|
+
)
|
165
195
|
|
166
|
-
|
167
|
-
|
168
|
-
)
|
169
|
-
evaluator_counts[evaluator.id] += 1
|
170
|
-
count = evaluator_counts[evaluator.id]
|
171
|
-
evaluator_averages[evaluator.id] += (
|
172
|
-
dto_result.score - evaluator_averages[evaluator.id]
|
173
|
-
) / count
|
174
|
-
|
175
|
-
evaluation_run_results.evaluation_run_results.append(
|
176
|
-
EvaluationRunResultDto(
|
177
|
-
evaluator_name=evaluator.name,
|
178
|
-
result=dto_result,
|
196
|
+
dto_result = EvaluationResultDto.from_evaluation_result(
|
197
|
+
evaluation_result
|
179
198
|
)
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
|
199
|
+
evaluator_counts[evaluator.id] += 1
|
200
|
+
count = evaluator_counts[evaluator.id]
|
201
|
+
evaluator_averages[evaluator.id] += (
|
202
|
+
dto_result.score - evaluator_averages[evaluator.id]
|
203
|
+
) / count
|
204
|
+
|
205
|
+
evaluation_run_results.evaluation_run_results.append(
|
206
|
+
EvaluationRunResultDto(
|
207
|
+
evaluator_name=evaluator.name,
|
208
|
+
result=dto_result,
|
209
|
+
)
|
210
|
+
)
|
211
|
+
evaluation_item_results.append(
|
212
|
+
EvalItemResult(
|
213
|
+
evaluator_id=evaluator.id,
|
214
|
+
result=evaluation_result,
|
215
|
+
)
|
185
216
|
)
|
186
|
-
)
|
187
|
-
|
188
|
-
evaluation_run_results.compute_average_score()
|
189
217
|
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
218
|
+
evaluation_run_results.compute_average_score()
|
219
|
+
|
220
|
+
await event_bus.publish(
|
221
|
+
EvaluationEvents.UPDATE_EVAL_RUN,
|
222
|
+
EvalRunUpdatedEvent(
|
223
|
+
execution_id=self.execution_id,
|
224
|
+
eval_item=eval_item,
|
225
|
+
eval_results=evaluation_item_results,
|
226
|
+
success=not agent_execution_output.result.error,
|
227
|
+
agent_output=agent_execution_output.result.output,
|
228
|
+
agent_execution_time=agent_execution_output.execution_time,
|
229
|
+
spans=agent_execution_output.spans,
|
230
|
+
logs=agent_execution_output.logs,
|
231
|
+
),
|
232
|
+
wait_for_completion=False,
|
233
|
+
)
|
234
|
+
except Exception as e:
|
235
|
+
error_msg = str(e)
|
236
|
+
eval_item._error_message = error_msg # type: ignore[attr-defined]
|
237
|
+
|
238
|
+
for evaluator in evaluators:
|
239
|
+
evaluator_counts[evaluator.id] += 1
|
240
|
+
count = evaluator_counts[evaluator.id]
|
241
|
+
evaluator_averages[evaluator.id] += (
|
242
|
+
0.0 - evaluator_averages[evaluator.id]
|
243
|
+
) / count
|
244
|
+
|
245
|
+
await event_bus.publish(
|
246
|
+
EvaluationEvents.UPDATE_EVAL_RUN,
|
247
|
+
EvalRunUpdatedEvent(
|
248
|
+
execution_id=self.execution_id,
|
249
|
+
eval_item=eval_item,
|
250
|
+
eval_results=[],
|
251
|
+
success=False,
|
252
|
+
agent_output={},
|
253
|
+
agent_execution_time=0.0,
|
254
|
+
spans=[],
|
255
|
+
logs=[],
|
256
|
+
),
|
257
|
+
wait_for_completion=False,
|
258
|
+
)
|
203
259
|
|
204
260
|
results.compute_average_score()
|
205
261
|
|
206
262
|
await event_bus.publish(
|
207
263
|
EvaluationEvents.UPDATE_EVAL_SET_RUN,
|
208
264
|
EvalSetRunUpdatedEvent(
|
209
|
-
execution_id=self.
|
265
|
+
execution_id=self.execution_id,
|
210
266
|
evaluator_scores=evaluator_averages,
|
211
267
|
),
|
212
268
|
wait_for_completion=False,
|
@@ -221,10 +277,12 @@ class UiPathEvalRuntime(UiPathBaseRuntime, Generic[T, C]):
|
|
221
277
|
async def execute_runtime(
|
222
278
|
self, eval_item: EvaluationItem
|
223
279
|
) -> UiPathEvalRunExecutionOutput:
|
280
|
+
eval_item_id = eval_item.id
|
224
281
|
runtime_context: C = self.factory.new_context(
|
225
|
-
execution_id=
|
282
|
+
execution_id=eval_item_id,
|
226
283
|
input_json=eval_item.inputs,
|
227
284
|
is_eval_run=True,
|
285
|
+
log_handler=self._setup_execution_logging(eval_item_id),
|
228
286
|
)
|
229
287
|
attributes = {
|
230
288
|
"evalId": eval_item.id,
|
@@ -247,15 +305,24 @@ class UiPathEvalRuntime(UiPathBaseRuntime, Generic[T, C]):
|
|
247
305
|
spans = self.span_exporter.get_spans(runtime_context.execution_id)
|
248
306
|
self.span_exporter.clear(runtime_context.execution_id)
|
249
307
|
|
308
|
+
logs = self.logs_exporter.get_logs(runtime_context.execution_id)
|
309
|
+
self.logs_exporter.clear(runtime_context.execution_id)
|
310
|
+
|
250
311
|
if result is None:
|
251
312
|
raise ValueError("Execution result cannot be None for eval runs")
|
252
313
|
|
253
314
|
return UiPathEvalRunExecutionOutput(
|
254
315
|
execution_time=end_time - start_time,
|
255
316
|
spans=spans,
|
317
|
+
logs=logs,
|
256
318
|
result=result,
|
257
319
|
)
|
258
320
|
|
321
|
+
def _setup_execution_logging(self, eval_item_id: str) -> ExecutionLogHandler:
|
322
|
+
execution_log_handler = ExecutionLogHandler(eval_item_id)
|
323
|
+
self.logs_exporter.register(eval_item_id, execution_log_handler)
|
324
|
+
return execution_log_handler
|
325
|
+
|
259
326
|
async def run_evaluator(
|
260
327
|
self,
|
261
328
|
evaluator: BaseEvaluator[Any],
|
@@ -519,7 +519,8 @@ class UiPathBaseRuntime(ABC):
|
|
519
519
|
|
520
520
|
await self.validate()
|
521
521
|
|
522
|
-
# Intercept all stdout/stderr/logs
|
522
|
+
# Intercept all stdout/stderr/logs
|
523
|
+
# write to file (runtime) or stdout (debug)
|
523
524
|
self.logs_interceptor = LogsInterceptor(
|
524
525
|
min_level=self.context.logs_min_level,
|
525
526
|
dir=self.context.runtime_dir,
|
@@ -649,7 +650,7 @@ class UiPathBaseRuntime(ABC):
|
|
649
650
|
raise
|
650
651
|
finally:
|
651
652
|
# Restore original logging
|
652
|
-
if self
|
653
|
+
if hasattr(self, "logs_interceptor"):
|
653
654
|
self.logs_interceptor.teardown()
|
654
655
|
|
655
656
|
await self.cleanup()
|
@@ -698,6 +699,7 @@ class UiPathRuntimeFactory(Generic[T, C]):
|
|
698
699
|
self.context_generator = context_generator
|
699
700
|
self.tracer_provider: TracerProvider = TracerProvider()
|
700
701
|
self.tracer_span_processors: List[SpanProcessor] = []
|
702
|
+
self.logs_exporter: Optional[Any] = None
|
701
703
|
trace.set_tracer_provider(self.tracer_provider)
|
702
704
|
|
703
705
|
def add_span_exporter(
|
uipath/_cli/_runtime/_logging.py
CHANGED
@@ -10,6 +10,35 @@ current_execution_id: ContextVar[Optional[str]] = ContextVar(
|
|
10
10
|
)
|
11
11
|
|
12
12
|
|
13
|
+
class ExecutionLogHandler(logging.Handler):
|
14
|
+
"""Handler for an execution unit."""
|
15
|
+
|
16
|
+
def __init__(self, execution_id: str):
|
17
|
+
"""Initialize the buffered handler."""
|
18
|
+
super().__init__()
|
19
|
+
self.execution_id: str = execution_id
|
20
|
+
self.buffer: list[logging.LogRecord] = []
|
21
|
+
self.setFormatter(logging.Formatter("[%(asctime)s][%(levelname)s] %(message)s"))
|
22
|
+
|
23
|
+
def emit(self, record: logging.LogRecord):
|
24
|
+
"""Store log record in buffer grouped by execution_id."""
|
25
|
+
self.buffer.append(record)
|
26
|
+
|
27
|
+
def flush_execution_logs(self, target_handler: logging.Handler) -> None:
|
28
|
+
"""Flush buffered logs to a target handler.
|
29
|
+
|
30
|
+
Args:
|
31
|
+
target_handler: The handler to write the logs to
|
32
|
+
"""
|
33
|
+
for record in self.buffer:
|
34
|
+
target_handler.handle(record)
|
35
|
+
target_handler.flush()
|
36
|
+
|
37
|
+
def clear_execution(self) -> None:
|
38
|
+
"""Clear buffered logs without writing them."""
|
39
|
+
self.buffer.clear()
|
40
|
+
|
41
|
+
|
13
42
|
class PersistentLogsHandler(logging.FileHandler):
|
14
43
|
"""A simple log handler that always writes to a single file without rotation."""
|
15
44
|
|
@@ -50,6 +79,16 @@ class ExecutionContextFilter(logging.Filter):
|
|
50
79
|
return False
|
51
80
|
|
52
81
|
|
82
|
+
class MasterExecutionFilter(logging.Filter):
|
83
|
+
"""Filter for master handler that blocks logs from any child execution."""
|
84
|
+
|
85
|
+
def filter(self, record: logging.LogRecord) -> bool:
|
86
|
+
"""Block logs that belong to a child execution context."""
|
87
|
+
ctx_execution_id = current_execution_id.get()
|
88
|
+
# Block if there's an active child execution context
|
89
|
+
return ctx_execution_id is None
|
90
|
+
|
91
|
+
|
53
92
|
class LogsInterceptor:
|
54
93
|
"""Intercepts all logging and stdout/stderr, routing to either persistent log files or stdout based on whether it's running as a job or not."""
|
55
94
|
|
@@ -93,7 +132,9 @@ class LogsInterceptor:
|
|
93
132
|
self.original_stderr = cast(TextIO, sys.stderr)
|
94
133
|
|
95
134
|
self.log_handler: Union[
|
96
|
-
PersistentLogsHandler,
|
135
|
+
PersistentLogsHandler,
|
136
|
+
logging.StreamHandler[TextIO],
|
137
|
+
logging.Handler,
|
97
138
|
]
|
98
139
|
|
99
140
|
if log_handler:
|
@@ -116,9 +157,14 @@ class LogsInterceptor:
|
|
116
157
|
self.log_handler.setLevel(self.numeric_min_level)
|
117
158
|
|
118
159
|
# Add execution context filter if execution_id provided
|
160
|
+
self.execution_filter: Optional[logging.Filter] = None
|
119
161
|
if execution_id:
|
120
162
|
self.execution_filter = ExecutionContextFilter(execution_id)
|
121
163
|
self.log_handler.addFilter(self.execution_filter)
|
164
|
+
else:
|
165
|
+
# Master execution: filter out child execution logs
|
166
|
+
self.execution_filter = MasterExecutionFilter()
|
167
|
+
self.log_handler.addFilter(self.execution_filter)
|
122
168
|
|
123
169
|
self.logger = logging.getLogger("runtime")
|
124
170
|
self.patched_loggers: set[str] = set()
|
@@ -146,17 +192,23 @@ class LogsInterceptor:
|
|
146
192
|
self.root_logger.setLevel(self.numeric_min_level)
|
147
193
|
|
148
194
|
if self.execution_id:
|
149
|
-
#
|
195
|
+
# Child execution mode: add our handler without removing others
|
150
196
|
if self.log_handler not in self.root_logger.handlers:
|
151
197
|
self.root_logger.addHandler(self.log_handler)
|
152
198
|
|
153
|
-
#
|
199
|
+
# Keep propagation enabled so logs flow through filters
|
200
|
+
# Our ExecutionContextFilter will ensure only our logs get through our handler
|
154
201
|
for logger_name in logging.root.manager.loggerDict:
|
155
202
|
logger = logging.getLogger(logger_name)
|
156
|
-
# Keep propagation enabled
|
203
|
+
# Keep propagation enabled for filtering to work
|
204
|
+
# logger.propagate remains True (default)
|
157
205
|
self.patched_loggers.add(logger_name)
|
206
|
+
|
207
|
+
# Child executions should redirect stdout/stderr to their own handler
|
208
|
+
# This ensures print statements are captured per execution
|
209
|
+
self._redirect_stdout_stderr()
|
158
210
|
else:
|
159
|
-
#
|
211
|
+
# Master execution mode: remove all handlers and add only ours
|
160
212
|
self._clean_all_handlers(self.root_logger)
|
161
213
|
|
162
214
|
# Set up propagation for all existing loggers
|
@@ -166,8 +218,8 @@ class LogsInterceptor:
|
|
166
218
|
self._clean_all_handlers(logger)
|
167
219
|
self.patched_loggers.add(logger_name)
|
168
220
|
|
169
|
-
|
170
|
-
|
221
|
+
# Master redirects stdout/stderr
|
222
|
+
self._redirect_stdout_stderr()
|
171
223
|
|
172
224
|
def _redirect_stdout_stderr(self) -> None:
|
173
225
|
"""Redirect stdout and stderr to the logging system."""
|
@@ -218,15 +270,20 @@ class LogsInterceptor:
|
|
218
270
|
stdout_logger = logging.getLogger("stdout")
|
219
271
|
stderr_logger = logging.getLogger("stderr")
|
220
272
|
|
221
|
-
stdout_logger.propagate = False
|
222
|
-
stderr_logger.propagate = False
|
223
|
-
|
224
273
|
if self.execution_id:
|
274
|
+
# Child execution: add our handler to stdout/stderr loggers
|
275
|
+
stdout_logger.propagate = False
|
276
|
+
stderr_logger.propagate = False
|
277
|
+
|
225
278
|
if self.log_handler not in stdout_logger.handlers:
|
226
279
|
stdout_logger.addHandler(self.log_handler)
|
227
280
|
if self.log_handler not in stderr_logger.handlers:
|
228
281
|
stderr_logger.addHandler(self.log_handler)
|
229
282
|
else:
|
283
|
+
# Master execution: clean and set up handlers
|
284
|
+
stdout_logger.propagate = False
|
285
|
+
stderr_logger.propagate = False
|
286
|
+
|
230
287
|
self._clean_all_handlers(stdout_logger)
|
231
288
|
self._clean_all_handlers(stderr_logger)
|
232
289
|
|
@@ -249,23 +306,22 @@ class LogsInterceptor:
|
|
249
306
|
logging.disable(self.original_disable_level)
|
250
307
|
|
251
308
|
# Remove our handler and filter
|
252
|
-
if self.
|
253
|
-
|
254
|
-
self.log_handler.removeFilter(self.execution_filter)
|
255
|
-
if self.log_handler in self.root_logger.handlers:
|
256
|
-
self.root_logger.removeHandler(self.log_handler)
|
257
|
-
|
258
|
-
# Remove from stdout/stderr loggers too
|
259
|
-
stdout_logger = logging.getLogger("stdout")
|
260
|
-
stderr_logger = logging.getLogger("stderr")
|
261
|
-
if self.log_handler in stdout_logger.handlers:
|
262
|
-
stdout_logger.removeHandler(self.log_handler)
|
263
|
-
if self.log_handler in stderr_logger.handlers:
|
264
|
-
stderr_logger.removeHandler(self.log_handler)
|
265
|
-
else:
|
266
|
-
if self.log_handler in self.root_logger.handlers:
|
267
|
-
self.root_logger.removeHandler(self.log_handler)
|
309
|
+
if self.execution_filter:
|
310
|
+
self.log_handler.removeFilter(self.execution_filter)
|
268
311
|
|
312
|
+
if self.log_handler in self.root_logger.handlers:
|
313
|
+
self.root_logger.removeHandler(self.log_handler)
|
314
|
+
|
315
|
+
# Remove from stdout/stderr loggers
|
316
|
+
stdout_logger = logging.getLogger("stdout")
|
317
|
+
stderr_logger = logging.getLogger("stderr")
|
318
|
+
if self.log_handler in stdout_logger.handlers:
|
319
|
+
stdout_logger.removeHandler(self.log_handler)
|
320
|
+
if self.log_handler in stderr_logger.handlers:
|
321
|
+
stderr_logger.removeHandler(self.log_handler)
|
322
|
+
|
323
|
+
if not self.execution_id:
|
324
|
+
# Master execution: restore everything
|
269
325
|
for logger_name in self.patched_loggers:
|
270
326
|
logger = logging.getLogger(logger_name)
|
271
327
|
if self.log_handler in logger.handlers:
|
@@ -278,6 +334,7 @@ class LogsInterceptor:
|
|
278
334
|
|
279
335
|
self.log_handler.close()
|
280
336
|
|
337
|
+
# Only restore streams if we redirected them
|
281
338
|
if self.original_stdout and self.original_stderr:
|
282
339
|
sys.stdout = self.original_stdout
|
283
340
|
sys.stderr = self.original_stderr
|
uipath/_cli/cli_eval.py
CHANGED
@@ -2,11 +2,11 @@
|
|
2
2
|
import ast
|
3
3
|
import asyncio
|
4
4
|
import os
|
5
|
-
import uuid
|
6
5
|
from typing import List, Optional
|
7
6
|
|
8
7
|
import click
|
9
8
|
|
9
|
+
from uipath._cli._evals._console_progress_reporter import ConsoleProgressReporter
|
10
10
|
from uipath._cli._evals._progress_reporter import StudioWebProgressReporter
|
11
11
|
from uipath._cli._evals._runtime import (
|
12
12
|
UiPathEvalContext,
|
@@ -114,7 +114,6 @@ def eval(
|
|
114
114
|
eval_context = UiPathEvalContext.with_defaults(
|
115
115
|
execution_output_file=output_file,
|
116
116
|
entrypoint=runtime_entrypoint,
|
117
|
-
execution_id=str(uuid.uuid4()),
|
118
117
|
)
|
119
118
|
|
120
119
|
eval_context.no_report = no_report
|
@@ -122,6 +121,9 @@ def eval(
|
|
122
121
|
eval_context.eval_set = eval_set or EvalHelpers.auto_discover_eval_set()
|
123
122
|
eval_context.eval_ids = eval_ids
|
124
123
|
|
124
|
+
console_reporter = ConsoleProgressReporter()
|
125
|
+
asyncio.run(console_reporter.subscribe_to_eval_runtime_events(event_bus))
|
126
|
+
|
125
127
|
try:
|
126
128
|
runtime_factory = UiPathRuntimeFactory(
|
127
129
|
UiPathScriptRuntime,
|
@@ -143,11 +145,9 @@ def eval(
|
|
143
145
|
asyncio.run(execute())
|
144
146
|
except Exception as e:
|
145
147
|
console.error(
|
146
|
-
f"Error:
|
148
|
+
f"Error occurred: {e or 'Execution failed'}", include_traceback=True
|
147
149
|
)
|
148
150
|
|
149
|
-
console.success("Evaluation completed successfully")
|
150
|
-
|
151
151
|
|
152
152
|
if __name__ == "__main__":
|
153
153
|
eval()
|
uipath/_cli/middlewares.py
CHANGED
@@ -121,20 +121,20 @@ class Middlewares:
|
|
121
121
|
]
|
122
122
|
|
123
123
|
if middlewares:
|
124
|
-
logger.
|
124
|
+
logger.debug(f"Found {len(middlewares)} middleware plugins")
|
125
125
|
|
126
126
|
for entry_point in middlewares:
|
127
127
|
try:
|
128
128
|
register_func = entry_point.load()
|
129
129
|
register_func()
|
130
|
-
logger.
|
130
|
+
logger.debug(f"Loaded middleware plugin: {entry_point.name}")
|
131
131
|
except Exception as e:
|
132
132
|
console.error(
|
133
133
|
f"Failed to load middleware plugin {entry_point.name}: {str(e)}",
|
134
134
|
include_traceback=True,
|
135
135
|
)
|
136
136
|
else:
|
137
|
-
logger.
|
137
|
+
logger.debug("No middleware plugins found")
|
138
138
|
|
139
139
|
except Exception as e:
|
140
140
|
logger.error(f"No middleware plugins loaded: {str(e)}")
|
uipath/_events/_events.py
CHANGED
@@ -1,4 +1,5 @@
|
|
1
1
|
import enum
|
2
|
+
import logging
|
2
3
|
from typing import Any, List, Union
|
3
4
|
|
4
5
|
from opentelemetry.sdk.trace import ReadableSpan
|
@@ -38,6 +39,7 @@ class EvalRunUpdatedEvent(BaseModel):
|
|
38
39
|
agent_output: Any
|
39
40
|
agent_execution_time: float
|
40
41
|
spans: List[ReadableSpan]
|
42
|
+
logs: List[logging.LogRecord]
|
41
43
|
|
42
44
|
|
43
45
|
class EvalSetRunUpdatedEvent(BaseModel):
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: uipath
|
3
|
-
Version: 2.1.
|
3
|
+
Version: 2.1.75
|
4
4
|
Summary: Python SDK and CLI for UiPath Platform, enabling programmatic interaction with automation services, process management, and deployment tools.
|
5
5
|
Project-URL: Homepage, https://uipath.com
|
6
6
|
Project-URL: Repository, https://github.com/UiPath/uipath-python
|
@@ -9,7 +9,7 @@ uipath/_cli/__init__.py,sha256=2RUgXYd8uJaYjA67xWb0w4IZuBmZoY8G1ccNmEQk9oM,2343
|
|
9
9
|
uipath/_cli/cli_auth.py,sha256=ZEA0Fwoo77Ez9ctpRAIq7sbAwj8F4OouAbMp1g1OvjM,2601
|
10
10
|
uipath/_cli/cli_deploy.py,sha256=KPCmQ0c_NYD5JofSDao5r6QYxHshVCRxlWDVnQvlp5w,645
|
11
11
|
uipath/_cli/cli_dev.py,sha256=nEfpjw1PZ72O6jmufYWVrueVwihFxDPOeJakdvNHdOA,2146
|
12
|
-
uipath/_cli/cli_eval.py,sha256=
|
12
|
+
uipath/_cli/cli_eval.py,sha256=yMXx56sk7IqVcpR3MIjjGGYcFaciKaZ4iIi8SY-Oixo,4918
|
13
13
|
uipath/_cli/cli_init.py,sha256=Ac3-9tIH3rpikIX1ehWTo7InW5tjVNoz_w6fjvgLK4w,7052
|
14
14
|
uipath/_cli/cli_invoke.py,sha256=m-te-EjhDpk_fhFDkt-yQFzmjEHGo5lQDGEQWxSXisQ,4395
|
15
15
|
uipath/_cli/cli_new.py,sha256=9378NYUBc9j-qKVXV7oja-jahfJhXBg8zKVyaon7ctY,2102
|
@@ -18,7 +18,7 @@ uipath/_cli/cli_publish.py,sha256=DgyfcZjvfV05Ldy0Pk5y_Le_nT9JduEE_x-VpIc_Kq0,64
|
|
18
18
|
uipath/_cli/cli_pull.py,sha256=PZ2hkfsfN-ElNa3FHjNetTux8XH03tDY5kWWqydQ2OY,6832
|
19
19
|
uipath/_cli/cli_push.py,sha256=-j-gDIbT8GyU2SybLQqFl5L8KI9nu3CDijVtltDgX20,3132
|
20
20
|
uipath/_cli/cli_run.py,sha256=1FKv20EjxrrP1I5rNSnL_HzbWtOAIMjB3M--4RPA_Yo,3709
|
21
|
-
uipath/_cli/middlewares.py,sha256=
|
21
|
+
uipath/_cli/middlewares.py,sha256=0D9a-wphyetnH9T97F08o7-1OKWF1lMweFHHAR0xiOw,4979
|
22
22
|
uipath/_cli/spinner.py,sha256=bS-U_HA5yne11ejUERu7CQoXmWdabUD2bm62EfEdV8M,1107
|
23
23
|
uipath/_cli/_auth/_auth_server.py,sha256=22km0F1NFNXgyLbvtAx3ssiQlVGHroLdtDCWEqiCiMg,7106
|
24
24
|
uipath/_cli/_auth/_auth_service.py,sha256=Thtp2wXZQAHqossPPXuP6sAEe4Px9xThhZutMECRrdU,6386
|
@@ -44,13 +44,14 @@ uipath/_cli/_dev/_terminal/_styles/terminal.tcss,sha256=ktVpKwXIXw2VZp8KIZD6fO9i
|
|
44
44
|
uipath/_cli/_dev/_terminal/_utils/_chat.py,sha256=YUZxYVdmEManwHDuZsczJT1dWIYE1dVBgABlurwMFcE,8493
|
45
45
|
uipath/_cli/_dev/_terminal/_utils/_exporter.py,sha256=oI6D_eMwrh_2aqDYUh4GrJg8VLGrLYhDahR-_o0uJns,4144
|
46
46
|
uipath/_cli/_dev/_terminal/_utils/_logger.py,sha256=_ipTl_oAiMF9I7keGt2AAFAMz40DNLVMVkoiq-07UAU,2943
|
47
|
+
uipath/_cli/_evals/_console_progress_reporter.py,sha256=lpQvppoejRKD4Xli15Q9dJIJJz9AckhVzuOv7rcPkcM,9230
|
47
48
|
uipath/_cli/_evals/_evaluator_factory.py,sha256=Gycv94VtGOpMir_Gba-UoiAyrSRfbSfe8_pTfjzcA9Q,3875
|
48
|
-
uipath/_cli/_evals/_progress_reporter.py,sha256=
|
49
|
-
uipath/_cli/_evals/_runtime.py,sha256=
|
49
|
+
uipath/_cli/_evals/_progress_reporter.py,sha256=kX7rNSa-QCLXIzK-vb9Jjf-XLEtucdeiQPgPlSkpp2U,16778
|
50
|
+
uipath/_cli/_evals/_runtime.py,sha256=z4wJceJcGT5psBOB87tLDCLkhHafSmtE1q0EU65wnaw,14407
|
50
51
|
uipath/_cli/_evals/_models/_evaluation_set.py,sha256=XgPNLWciE4FgCYzZXV2kRYHzdtbc33FWSQmZQqVSdMk,4747
|
51
52
|
uipath/_cli/_evals/_models/_evaluator.py,sha256=fuC3UOYwPD4d_wdynHeLSCzbu82golNAnnPnxC8Y4rk,3315
|
52
53
|
uipath/_cli/_evals/_models/_evaluator_base_params.py,sha256=lTYKOV66tcjW85KHTyOdtF1p1VDaBNemrMAvH8bFIFc,382
|
53
|
-
uipath/_cli/_evals/_models/_output.py,sha256=
|
54
|
+
uipath/_cli/_evals/_models/_output.py,sha256=DmwFXh1YdLiMXyXmyoZr_4hgrrv3oiHbrrtIWMqGfsg,3145
|
54
55
|
uipath/_cli/_evals/_models/_sw_reporting.py,sha256=tSBLQFAdOIun8eP0vsqt56K6bmCZz_uMaWI3hskg_24,536
|
55
56
|
uipath/_cli/_evals/_models/_trajectory_span.py,sha256=8ukM8sB9rvzBMHfC_gnexAC3xlp4uMDevKZrRzcgrm4,3637
|
56
57
|
uipath/_cli/_evals/mocks/__init__.py,sha256=2WXwAy_oZw5bKp6L0HB13QygCJeftOB_Bget0AI6Gik,32
|
@@ -60,10 +61,10 @@ uipath/_cli/_evals/mocks/mocker_factory.py,sha256=V5QKSTtQxztTo4-fK1TyAaXw2Z3mHf
|
|
60
61
|
uipath/_cli/_evals/mocks/mockito_mocker.py,sha256=LtYT6lJM9vc3qtbSZJcUeCzDn4zarkBVj7In_EX7kYY,2087
|
61
62
|
uipath/_cli/_evals/mocks/mocks.py,sha256=WqjWtHqKQXAsO1Wwom3Zcr1T09GQygwBWVp-EsxdW8o,4443
|
62
63
|
uipath/_cli/_push/sw_file_handler.py,sha256=iE8Sk1Z-9hxmLFFj3j-k4kTK6TzNFP6hUCmxTudG6JQ,18251
|
63
|
-
uipath/_cli/_runtime/_contracts.py,sha256=
|
64
|
+
uipath/_cli/_runtime/_contracts.py,sha256=E8Is7EQfAu7_hCbeZI68gmTxSxo4X7_U4vcSl7D3Syg,28988
|
64
65
|
uipath/_cli/_runtime/_escalation.py,sha256=x3vI98qsfRA-fL_tNkRVTFXioM5Gv2w0GFcXJJ5eQtg,7981
|
65
66
|
uipath/_cli/_runtime/_hitl.py,sha256=VKbM021nVg1HEDnTfucSLJ0LsDn83CKyUtVzofS2qTU,11369
|
66
|
-
uipath/_cli/_runtime/_logging.py,sha256=
|
67
|
+
uipath/_cli/_runtime/_logging.py,sha256=jwBfsy0Hi4zkfPH-v9dQ7m5dcJeuE0j_OxdpI-DhHaw,13854
|
67
68
|
uipath/_cli/_runtime/_runtime.py,sha256=gby9-avNNlEATEfSXtY8FfJ8nREsSCGA4wMgDlSXTDE,2297
|
68
69
|
uipath/_cli/_runtime/_script_executor.py,sha256=PjbmEbyCMofGH2F85b8RFsxdV3Tqw0kVqdWOOk2ZLlI,9687
|
69
70
|
uipath/_cli/_templates/.psmdcp.template,sha256=C7pBJPt98ovEljcBvGtEUGoWjjQhu9jls1bpYjeLOKA,611
|
@@ -86,7 +87,7 @@ uipath/_cli/_utils/_tracing.py,sha256=2igb03j3EHjF_A406UhtCKkPfudVfFPjUq5tXUEG4o
|
|
86
87
|
uipath/_cli/_utils/_uv_helpers.py,sha256=6SvoLnZPoKIxW0sjMvD1-ENV_HOXDYzH34GjBqwT138,3450
|
87
88
|
uipath/_events/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
88
89
|
uipath/_events/_event_bus.py,sha256=4-VzstyX69cr7wT1EY7ywp-Ndyz2CyemD3Wk_-QmRpo,5496
|
89
|
-
uipath/_events/_events.py,sha256=
|
90
|
+
uipath/_events/_events.py,sha256=rZAXze6F4M9MkoFv-Zcwmatfzq5JapyJoxKeeWuI_DY,1301
|
90
91
|
uipath/_resources/AGENTS.md,sha256=YWhWuX9XIbyVhVT3PnPc4Of3_q6bsNJcuzYu3N8f_Ug,25850
|
91
92
|
uipath/_services/__init__.py,sha256=eYZElMfYDQTQU6MMjIke5J-GGT9pzLD5QfbwLiTQkEE,1037
|
92
93
|
uipath/_services/_base_service.py,sha256=x9-9jhPzn9Z16KRdFHhJNvV-FZHvTniMsDfxlS4Cutk,5782
|
@@ -165,8 +166,8 @@ uipath/tracing/_traced.py,sha256=yBIY05PCCrYyx50EIHZnwJaKNdHPNx-YTR1sHQl0a98,199
|
|
165
166
|
uipath/tracing/_utils.py,sha256=qd7N56tg6VXQ9pREh61esBgUWLNA0ssKsE0QlwrRWFM,11974
|
166
167
|
uipath/utils/__init__.py,sha256=VD-KXFpF_oWexFg6zyiWMkxl2HM4hYJMIUDZ1UEtGx0,105
|
167
168
|
uipath/utils/_endpoints_manager.py,sha256=iRTl5Q0XAm_YgcnMcJOXtj-8052sr6jpWuPNz6CgT0Q,8408
|
168
|
-
uipath-2.1.
|
169
|
-
uipath-2.1.
|
170
|
-
uipath-2.1.
|
171
|
-
uipath-2.1.
|
172
|
-
uipath-2.1.
|
169
|
+
uipath-2.1.75.dist-info/METADATA,sha256=lxr5-VGeCGyTy7OCmrvbR5NqTL0nsu3T7U5W1BxLEXI,6593
|
170
|
+
uipath-2.1.75.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
171
|
+
uipath-2.1.75.dist-info/entry_points.txt,sha256=9C2_29U6Oq1ExFu7usihR-dnfIVNSKc-0EFbh0rskB4,43
|
172
|
+
uipath-2.1.75.dist-info/licenses/LICENSE,sha256=-KBavWXepyDjimmzH5fVAsi-6jNVpIKFc2kZs0Ri4ng,1058
|
173
|
+
uipath-2.1.75.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|