uipath 2.1.8__py3-none-any.whl → 2.1.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,89 @@
1
+ from datetime import datetime
2
+ from enum import IntEnum
3
+ from typing import Any, Dict, List, Optional
4
+
5
+ from pydantic import BaseModel, Field
6
+
7
+
8
+ class LLMResponse(BaseModel):
9
+ score: float
10
+ justification: str
11
+
12
+
13
+ class EvaluatorCategory(IntEnum):
14
+ """Types of evaluators."""
15
+
16
+ Deterministic = 0
17
+ LlmAsAJudge = 1
18
+ AgentScorer = 2
19
+ Trajectory = 3
20
+
21
+ @classmethod
22
+ def from_int(cls, value):
23
+ """Construct EvaluatorCategory from an int value."""
24
+ if value in cls._value2member_map_:
25
+ return cls(value)
26
+ else:
27
+ raise ValueError(f"{value} is not a valid EvaluatorCategory value")
28
+
29
+
30
+ class EvaluatorType(IntEnum):
31
+ """Subtypes of evaluators."""
32
+
33
+ Unknown = 0
34
+ Equals = 1
35
+ Contains = 2
36
+ Regex = 3
37
+ Factuality = 4
38
+ Custom = 5
39
+ JsonSimilarity = 6
40
+ Trajectory = 7
41
+ ContextPrecision = 8
42
+ Faithfulness = 9
43
+
44
+ @classmethod
45
+ def from_int(cls, value):
46
+ """Construct EvaluatorCategory from an int value."""
47
+ if value in cls._value2member_map_:
48
+ return cls(value)
49
+ else:
50
+ raise ValueError(f"{value} is not a valid EvaluatorType value")
51
+
52
+
53
+ class EvaluationResult(BaseModel):
54
+ """Result of a single evaluation."""
55
+
56
+ evaluation_id: str
57
+ evaluation_name: str
58
+ evaluator_id: str
59
+ evaluator_name: str
60
+ score: float
61
+ # mark this as optional, as it is populated inside the 'measure_execution_time' decorator
62
+ evaluation_time: Optional[float] = None
63
+ input: Dict[str, Any]
64
+ expected_output: Dict[str, Any]
65
+ actual_output: Dict[str, Any]
66
+ timestamp: datetime = Field(default_factory=datetime.utcnow)
67
+ details: Optional[str] = None
68
+
69
+
70
+ class EvaluationSetResult(BaseModel):
71
+ """Result of a complete evaluation set."""
72
+
73
+ eval_set_id: str
74
+ eval_set_name: str
75
+ results: List[EvaluationResult]
76
+ average_score: float
77
+
78
+
79
+ class ScoreType(IntEnum):
80
+ BOOLEAN = 0
81
+ NUMERICAL = 1
82
+ ERROR = 2
83
+
84
+
85
+ class EvalItemResult(BaseModel):
86
+ """Result of a single evaluation item."""
87
+
88
+ evaluator_id: str
89
+ result: EvaluationResult
@@ -0,0 +1,583 @@
1
+ """Evaluation service for running and managing evaluation sets."""
2
+
3
+ import asyncio
4
+ import json
5
+ import os
6
+ import tempfile
7
+ import warnings
8
+ from datetime import datetime, timezone
9
+ from pathlib import Path
10
+ from typing import Any, Dict, List, Optional
11
+
12
+ import click
13
+
14
+ from uipath._cli._utils._console import ConsoleLogger, EvaluationProgressManager
15
+
16
+ from ..cli_run import run_core # type: ignore
17
+ from ._evaluators._evaluator_base import EvaluatorBase
18
+ from ._evaluators._evaluator_factory import EvaluatorFactory
19
+ from ._models import (
20
+ EvaluationSet,
21
+ EvaluationSetResult,
22
+ )
23
+ from ._models._evaluators import EvalItemResult
24
+ from .progress_reporter import ProgressReporter
25
+
26
+ console = ConsoleLogger()
27
+
28
+
29
+ class EvaluationService:
30
+ """Service for running evaluations."""
31
+
32
+ def __init__(
33
+ self,
34
+ entrypoint: Optional[str] = None,
35
+ eval_set_path: Optional[str | Path] = None,
36
+ workers: int = 8,
37
+ report_progress: bool = True,
38
+ ):
39
+ """Initialize the evaluation service.
40
+
41
+ Args:
42
+ entrypoint: Path to the agent script to evaluate (optional, will auto-discover if not provided)
43
+ eval_set_path: Path to the evaluation set file (optional, will auto-discover if not provided)
44
+ workers: Number of parallel workers for running evaluations
45
+ report_progress: Whether to report progress to StudioWeb
46
+ """
47
+ self.entrypoint, self.eval_set_path = self._resolve_paths(
48
+ entrypoint, eval_set_path
49
+ )
50
+ self.eval_set = self._load_eval_set()
51
+ self._evaluators = self._load_evaluators()
52
+ self.num_workers = workers
53
+ self.results_lock = asyncio.Lock()
54
+ self._progress_manager: Optional[EvaluationProgressManager] = None
55
+ self._report_progress = report_progress
56
+ self._progress_reporter: Optional[ProgressReporter] = None
57
+ self._initialize_results()
58
+
59
+ def _resolve_paths(
60
+ self, entrypoint: Optional[str], eval_set_path: Optional[str | Path]
61
+ ) -> tuple[str, Path]:
62
+ """Resolve entrypoint and eval_set_path, auto-discovering if not provided.
63
+
64
+ Args:
65
+ entrypoint: Optional entrypoint path
66
+ eval_set_path: Optional eval set path
67
+
68
+ Returns:
69
+ Tuple of (resolved_entrypoint, resolved_eval_set_path)
70
+
71
+ Raises:
72
+ ValueError: If paths cannot be resolved or multiple options exist
73
+ """
74
+ resolved_entrypoint = entrypoint
75
+ resolved_eval_set_path = eval_set_path
76
+
77
+ if resolved_entrypoint is None:
78
+ resolved_entrypoint = self._auto_discover_entrypoint()
79
+
80
+ if resolved_eval_set_path is None:
81
+ resolved_eval_set_path = self._auto_discover_eval_set()
82
+
83
+ eval_set_path_obj = Path(resolved_eval_set_path)
84
+ if not eval_set_path_obj.is_file() or eval_set_path_obj.suffix != ".json":
85
+ raise ValueError("Evaluation set must be a JSON file")
86
+
87
+ return resolved_entrypoint, eval_set_path_obj
88
+
89
+ def _auto_discover_entrypoint(self) -> str:
90
+ """Auto-discover entrypoint from config file.
91
+
92
+ Returns:
93
+ Path to the entrypoint
94
+
95
+ Raises:
96
+ ValueError: If no entrypoint found or multiple entrypoints exist
97
+ """
98
+ config_file = "uipath.json"
99
+ if not os.path.isfile(config_file):
100
+ raise ValueError(
101
+ f"File '{config_file}' not found. Please run 'uipath init'."
102
+ )
103
+
104
+ with open(config_file, "r", encoding="utf-8") as f:
105
+ uipath_config = json.loads(f.read())
106
+
107
+ entrypoints = uipath_config.get("entryPoints", [])
108
+
109
+ if not entrypoints:
110
+ raise ValueError(
111
+ "No entrypoints found in uipath.json. Please run 'uipath init'."
112
+ )
113
+
114
+ if len(entrypoints) > 1:
115
+ entrypoint_paths = [ep.get("filePath") for ep in entrypoints]
116
+ raise ValueError(
117
+ f"Multiple entrypoints found: {entrypoint_paths}. "
118
+ f"Please specify which entrypoint to use: uipath eval <entrypoint> [eval_set]"
119
+ )
120
+
121
+ entrypoint_path = entrypoints[0].get("filePath")
122
+
123
+ console.info(
124
+ f"Auto-discovered entrypoint: {click.style(entrypoint_path, fg='cyan')}"
125
+ )
126
+ return entrypoint_path
127
+
128
+ def _auto_discover_eval_set(self) -> str:
129
+ """Auto-discover evaluation set from evals/eval-sets directory.
130
+
131
+ Returns:
132
+ Path to the evaluation set file
133
+
134
+ Raises:
135
+ ValueError: If no eval set found or multiple eval sets exist
136
+ """
137
+ eval_sets_dir = Path("evals/eval-sets")
138
+
139
+ if not eval_sets_dir.exists():
140
+ raise ValueError(
141
+ "No 'evals/eval-sets' directory found. "
142
+ "Please set 'UIPATH_PROJECT_ID' env var and run 'uipath pull'."
143
+ )
144
+
145
+ eval_set_files = list(eval_sets_dir.glob("*.json"))
146
+
147
+ if not eval_set_files:
148
+ raise ValueError(
149
+ "No evaluation set files found in 'evals/eval-sets' directory. "
150
+ )
151
+
152
+ if len(eval_set_files) > 1:
153
+ file_names = [f.name for f in eval_set_files]
154
+ raise ValueError(
155
+ f"Multiple evaluation sets found: {file_names}. "
156
+ f"Please specify which evaluation set to use: 'uipath eval [entrypoint] <eval_set_path>'"
157
+ )
158
+
159
+ eval_set_path = str(eval_set_files[0])
160
+ console.info(
161
+ f"Auto-discovered evaluation set: {click.style(eval_set_path, fg='cyan')}"
162
+ )
163
+ return eval_set_path
164
+
165
+ def _initialize_results(self) -> None:
166
+ """Initialize the results file and directory."""
167
+ self._create_and_initialize_results_file()
168
+ # Initialize progress reporter if needed
169
+ if self._report_progress:
170
+ agent_snapshot = self._extract_agent_snapshot()
171
+ self._progress_reporter = ProgressReporter(
172
+ eval_set_id=self.eval_set.id,
173
+ agent_snapshot=agent_snapshot,
174
+ no_of_evals=len(self.eval_set.evaluations),
175
+ evaluators=self._evaluators,
176
+ )
177
+
178
+ def _extract_agent_snapshot(self) -> str:
179
+ """Extract agent snapshot from uipath.json file.
180
+
181
+ Returns:
182
+ JSON string containing the agent snapshot with input and output schemas
183
+ """
184
+ config_file = "uipath.json"
185
+ if not os.path.isfile(config_file):
186
+ console.error(f"File '{config_file}' not found. Please run 'uipath init'")
187
+
188
+ with open(config_file, "r", encoding="utf-8") as f:
189
+ file_content = f.read()
190
+ uipath_config = json.loads(file_content)
191
+
192
+ entry_point = None
193
+ for ep in uipath_config.get("entryPoints", []):
194
+ if ep.get("filePath") == self.entrypoint:
195
+ entry_point = ep
196
+ break
197
+
198
+ if not entry_point:
199
+ console.error(
200
+ f"No entry point found with filePath '{self.entrypoint}' in uipath.json"
201
+ )
202
+
203
+ input_schema = entry_point.get("input", {}) # type: ignore
204
+ output_schema = entry_point.get("output", {}) # type: ignore
205
+
206
+ # Format as agent snapshot
207
+ agent_snapshot = {"inputSchema": input_schema, "outputSchema": output_schema}
208
+
209
+ return json.dumps(agent_snapshot)
210
+
211
+ def _create_and_initialize_results_file(self):
212
+ # Create results directory if it doesn't exist
213
+ results_dir = self.eval_set_path.parent.parent / "results"
214
+ results_dir.mkdir(exist_ok=True)
215
+
216
+ # Create results file
217
+ timestamp = datetime.now(timezone.utc).strftime("%M-%H-%d-%m-%Y")
218
+ eval_set_name = self.eval_set.name
219
+ self.result_file = results_dir / f"eval-{eval_set_name}-{timestamp}.json"
220
+
221
+ initial_results = EvaluationSetResult(
222
+ eval_set_id=self.eval_set.id,
223
+ eval_set_name=self.eval_set.name,
224
+ results=[],
225
+ average_score=0.0,
226
+ )
227
+
228
+ with open(self.result_file, "w", encoding="utf-8") as f:
229
+ f.write(initial_results.model_dump_json(indent=2))
230
+
231
+ def _load_eval_set(self) -> EvaluationSet:
232
+ """Load the evaluation set from file.
233
+
234
+ Returns:
235
+ The loaded evaluation set as EvaluationSet model
236
+ """
237
+ with open(self.eval_set_path, "r", encoding="utf-8") as f:
238
+ data = json.load(f)
239
+ return EvaluationSet(**data)
240
+
241
+ def _load_evaluators(self) -> List[EvaluatorBase]:
242
+ """Load evaluators referenced by the evaluation set."""
243
+ evaluators = []
244
+ evaluators_dir = self.eval_set_path.parent.parent / "evaluators"
245
+ evaluator_refs = set(self.eval_set.evaluatorRefs)
246
+ found_evaluator_ids = set()
247
+
248
+ # Load evaluators from JSON files
249
+ for file in evaluators_dir.glob("*.json"):
250
+ with open(file, "r", encoding="utf-8") as f:
251
+ data = json.load(f)
252
+ evaluator_id = data.get("id")
253
+
254
+ if evaluator_id in evaluator_refs:
255
+ try:
256
+ evaluator = EvaluatorFactory.create_evaluator(data)
257
+ evaluators.append(evaluator)
258
+ found_evaluator_ids.add(evaluator_id)
259
+ except Exception as e:
260
+ console.warning(
261
+ f"Failed to create evaluator {evaluator_id}: {str(e)}"
262
+ )
263
+
264
+ # Check if all referenced evaluators were found
265
+ missing_evaluators = evaluator_refs - found_evaluator_ids
266
+ if missing_evaluators:
267
+ raise ValueError(
268
+ f"Could not find evaluators with IDs: {missing_evaluators}"
269
+ )
270
+
271
+ return evaluators
272
+
273
+ async def _write_results(self, results: List[Any]) -> None:
274
+ """Write evaluation results to file with async lock.
275
+
276
+ Args:
277
+ results: List of evaluation results to write
278
+ """
279
+ async with self.results_lock:
280
+ # Read current results
281
+ with open(self.result_file, "r", encoding="utf-8") as f:
282
+ current_results = EvaluationSetResult.model_validate_json(f.read())
283
+
284
+ # Add new results
285
+ current_results.results.extend(results)
286
+
287
+ if current_results.results:
288
+ current_results.average_score = sum(
289
+ r.score for r in current_results.results
290
+ ) / len(current_results.results)
291
+
292
+ # Write updated results
293
+ with open(self.result_file, "w", encoding="utf-8") as f:
294
+ f.write(current_results.model_dump_json(indent=2))
295
+
296
+ async def _results_queue_consumer(self, results_queue: asyncio.Queue[Any]) -> None:
297
+ """Consumer task for the results queue that writes to local file.
298
+
299
+ Args:
300
+ results_queue: Queue containing evaluation results to write to file
301
+ """
302
+ while True:
303
+ results: list[EvalItemResult] = await results_queue.get()
304
+ if results is None:
305
+ # Sentinel value - consumer should stop
306
+ results_queue.task_done()
307
+ return
308
+
309
+ try:
310
+ await self._write_results([eval_item.result for eval_item in results])
311
+ results_queue.task_done()
312
+ except Exception as e:
313
+ console.warning(f"Error writing results to file: {str(e)}")
314
+ results_queue.task_done()
315
+
316
+ async def _sw_progress_reporter_queue_consumer(
317
+ self, sw_progress_reporter_queue: asyncio.Queue[Any]
318
+ ) -> None:
319
+ """Consumer task for the SW progress reporter.
320
+
321
+ Args:
322
+ sw_progress_reporter_queue: Queue containing evaluation results to report to StudioWeb
323
+ """
324
+ while True:
325
+ queue_item = await sw_progress_reporter_queue.get()
326
+ if queue_item is None:
327
+ # Sentinel value - consumer should stop
328
+ sw_progress_reporter_queue.task_done()
329
+ return
330
+ eval_run_id: str
331
+ eval_results: list[EvalItemResult]
332
+ success: bool
333
+ execution_time: float
334
+
335
+ eval_run_id, eval_results, success, execution_time = queue_item
336
+
337
+ try:
338
+ if self._progress_reporter:
339
+ await self._progress_reporter.update_eval_run(
340
+ eval_results, eval_run_id, success, execution_time
341
+ )
342
+ sw_progress_reporter_queue.task_done()
343
+ except Exception as e:
344
+ console.warning(f"Error reporting progress to StudioWeb: {str(e)}")
345
+ sw_progress_reporter_queue.task_done()
346
+
347
+ def _run_agent(self, input_json: str) -> tuple[Dict[str, Any], bool, float]:
348
+ """Run the agent with the given input.
349
+
350
+ Args:
351
+ input_json: JSON string containing input data
352
+
353
+ Returns:
354
+ Agent output as dictionary and success status
355
+ """
356
+ with tempfile.TemporaryDirectory() as tmpdir:
357
+ try:
358
+ import time
359
+
360
+ output_file = Path(tmpdir) / "output.json"
361
+ logs_file = Path(tmpdir) / "execution.log"
362
+
363
+ # Suppress LangChain deprecation warnings during agent execution
364
+ with warnings.catch_warnings():
365
+ warnings.filterwarnings(
366
+ "ignore", category=UserWarning, module="langchain"
367
+ )
368
+ # Note: Progress reporting is handled outside this method since it's async
369
+ start_time = time.time()
370
+ success, error_message, info_message = run_core(
371
+ entrypoint=self.entrypoint,
372
+ input=input_json,
373
+ resume=False,
374
+ input_file=None,
375
+ execution_output_file=output_file,
376
+ logs_file=logs_file,
377
+ runtime_dir=tmpdir,
378
+ is_eval_run=True,
379
+ )
380
+ execution_time = time.time() - start_time
381
+ if not success:
382
+ console.warning(error_message)
383
+ return {}, False, execution_time
384
+ else:
385
+ # Read the output file
386
+ with open(output_file, "r", encoding="utf-8") as f:
387
+ result = json.load(f)
388
+
389
+ # uncomment the following lines to have access to the execution.logs (needed for some types of evals)
390
+ # with open(logs_file, "r", encoding="utf-8") as f:
391
+ # logs = f.read()
392
+ if isinstance(result, str):
393
+ try:
394
+ return json.loads(result), True, execution_time
395
+ except json.JSONDecodeError as e:
396
+ raise Exception(f"Error parsing output: {e}") from e
397
+ return result, True, 0.0
398
+
399
+ except Exception as e:
400
+ console.warning(f"Error running agent: {str(e)}")
401
+ return {"error": str(e)}, False, execution_time
402
+
403
+ async def _process_evaluation(
404
+ self,
405
+ eval_item: Dict[str, Any],
406
+ results_queue: asyncio.Queue[Any],
407
+ sw_progress_reporter_queue: asyncio.Queue[Any],
408
+ ) -> None:
409
+ """Process a single evaluation item.
410
+
411
+ Args:
412
+ eval_item: The evaluation item to process
413
+ results_queue: Queue for local file results
414
+ sw_progress_reporter_queue: Queue for StudioWeb progress reporting
415
+ """
416
+ eval_id = eval_item["id"]
417
+ eval_run_id: Optional[str] = None
418
+
419
+ try:
420
+ input_json = json.dumps(eval_item["inputs"])
421
+
422
+ if self._report_progress and self._progress_reporter:
423
+ eval_run_id = await self._progress_reporter.create_eval_run(eval_item)
424
+
425
+ loop = asyncio.get_running_loop()
426
+ actual_output, success, execution_time = await loop.run_in_executor(
427
+ None,
428
+ self._run_agent,
429
+ input_json,
430
+ )
431
+
432
+ if success:
433
+ # Run each evaluator
434
+ eval_results: list[EvalItemResult] = []
435
+ for evaluator in self._evaluators:
436
+ result = await evaluator.evaluate(
437
+ evaluation_id=eval_item["id"],
438
+ evaluation_name=eval_item["name"],
439
+ input_data=eval_item["inputs"],
440
+ expected_output=eval_item["expectedOutput"],
441
+ actual_output=actual_output,
442
+ )
443
+ eval_results.append(
444
+ EvalItemResult(evaluator_id=evaluator.id, result=result)
445
+ )
446
+
447
+ await results_queue.put(eval_results)
448
+ if self._report_progress:
449
+ # TODO: modify this, here we are only reporting for success
450
+ await sw_progress_reporter_queue.put(
451
+ (eval_run_id, eval_results, success, execution_time)
452
+ )
453
+
454
+ # Update progress to completed
455
+ if self._progress_manager:
456
+ self._progress_manager.complete_evaluation(eval_id)
457
+ else:
458
+ # Mark as failed if agent execution failed
459
+ if self._progress_manager:
460
+ self._progress_manager.fail_evaluation(
461
+ eval_id, "Agent execution failed"
462
+ )
463
+
464
+ except Exception as e:
465
+ # Mark as failed with error message
466
+ if self._progress_manager:
467
+ self._progress_manager.fail_evaluation(eval_id, str(e))
468
+ raise
469
+
470
+ async def _producer_task(self, task_queue: asyncio.Queue[Any]) -> None:
471
+ """Producer task that adds all evaluations to the queue.
472
+
473
+ Args:
474
+ task_queue: The asyncio queue to add tasks to
475
+ """
476
+ for eval_item in self.eval_set.evaluations:
477
+ await task_queue.put(eval_item.model_dump())
478
+
479
+ # Add sentinel values to signal workers to stop
480
+ for _ in range(self.num_workers):
481
+ await task_queue.put(None)
482
+
483
+ async def _consumer_task(
484
+ self,
485
+ task_queue: asyncio.Queue[Any],
486
+ worker_id: int,
487
+ results_queue: asyncio.Queue[Any],
488
+ sw_progress_reporter_queue: asyncio.Queue[Any],
489
+ ) -> None:
490
+ """Consumer task that processes evaluations from the queue.
491
+
492
+ Args:
493
+ task_queue: The asyncio queue to get tasks from
494
+ worker_id: ID of this worker for logging
495
+ results_queue: Queue for local file results
496
+ sw_progress_reporter_queue: Queue for StudioWeb progress reporting
497
+ """
498
+ while True:
499
+ eval_item = await task_queue.get()
500
+ if eval_item is None:
501
+ # Sentinel value - worker should stop
502
+ task_queue.task_done()
503
+ return
504
+
505
+ try:
506
+ await self._process_evaluation(
507
+ eval_item, results_queue, sw_progress_reporter_queue
508
+ )
509
+ task_queue.task_done()
510
+ except Exception as e:
511
+ # Log error and continue to next item
512
+ task_queue.task_done()
513
+ console.warning(
514
+ f"Evaluation {eval_item.get('name', 'Unknown')} failed: {str(e)}"
515
+ )
516
+
517
+ async def run_evaluation(self) -> None:
518
+ """Run the evaluation set using multiple worker tasks."""
519
+ console.info(
520
+ f"Starting evaluating {click.style(self.eval_set.name, fg='cyan')} evaluation set..."
521
+ )
522
+
523
+ if self._report_progress and self._progress_reporter:
524
+ await self._progress_reporter.create_eval_set_run()
525
+
526
+ # Prepare items for progress tracker
527
+ progress_items = [
528
+ {"id": eval_item.id, "name": eval_item.name}
529
+ for eval_item in self.eval_set.evaluations
530
+ ]
531
+
532
+ with console.evaluation_progress(progress_items) as progress_manager:
533
+ self._progress_manager = progress_manager
534
+
535
+ task_queue: asyncio.Queue[Any] = asyncio.Queue()
536
+ results_queue: asyncio.Queue[Any] = asyncio.Queue()
537
+ sw_progress_reporter_queue: asyncio.Queue[Any] = asyncio.Queue()
538
+
539
+ producer = asyncio.create_task(self._producer_task(task_queue))
540
+
541
+ consumers = []
542
+ for worker_id in range(self.num_workers):
543
+ consumer = asyncio.create_task(
544
+ self._consumer_task(
545
+ task_queue, worker_id, results_queue, sw_progress_reporter_queue
546
+ )
547
+ )
548
+ consumers.append(consumer)
549
+
550
+ # Create results queue consumer
551
+ results_consumer = asyncio.create_task(
552
+ self._results_queue_consumer(results_queue)
553
+ )
554
+
555
+ # Create SW progress reporter queue consumer
556
+ sw_progress_consumer = None
557
+ if self._report_progress:
558
+ sw_progress_consumer = asyncio.create_task(
559
+ self._sw_progress_reporter_queue_consumer(
560
+ sw_progress_reporter_queue
561
+ )
562
+ )
563
+
564
+ # Wait for producer to finish
565
+ await producer
566
+ await task_queue.join()
567
+
568
+ # Wait for all consumers to finish
569
+ await asyncio.gather(*consumers)
570
+
571
+ # Signal queue consumers to stop by sending sentinel values
572
+ await results_queue.put(None)
573
+ if self._report_progress:
574
+ await sw_progress_reporter_queue.put(None)
575
+
576
+ await results_consumer
577
+ if sw_progress_consumer:
578
+ await sw_progress_consumer
579
+
580
+ if self._progress_reporter:
581
+ await self._progress_reporter.update_eval_set_run()
582
+
583
+ console.info(f"Results saved to {click.style(self.result_file, fg='cyan')}")