uipath 2.1.113__py3-none-any.whl → 2.1.115__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of uipath might be problematic. Click here for more details.

@@ -18,6 +18,7 @@ from uipath._cli._runtime._contracts import (
18
18
  UiPathRuntimeResult,
19
19
  UiPathRuntimeStatus,
20
20
  )
21
+ from uipath._cli._utils._common import serialize_object
21
22
  from uipath._events._events import UiPathAgentStateEvent
22
23
 
23
24
  logger = logging.getLogger(__name__)
@@ -622,10 +623,12 @@ class SignalRDebugBridge(UiPathDebugBridge):
622
623
  try:
623
624
  # Wrap the event in SendCommand protocol
624
625
  # Server expects: SendCommand(event_name, json_string_of_data)
625
- data_json = json.dumps(data)
626
+ # Use serialize_object to recursively handle Pydantic models and nested objects
627
+ serialized_data = serialize_object(data)
628
+ data_json = json.dumps(serialized_data)
626
629
  arguments: list[Any] = [event_name, data_json]
627
630
  await self._client.send(method="SendCommand", arguments=arguments)
628
- logger.debug(f"Sent command: {event_name} with data: {data}")
631
+ logger.debug(f"Sent command: {event_name}")
629
632
  except Exception as e:
630
633
  logger.error(f"Error sending command {event_name} to SignalR hub: {e}")
631
634
 
@@ -633,31 +636,31 @@ class SignalRDebugBridge(UiPathDebugBridge):
633
636
  """Handle Start command from SignalR server.
634
637
 
635
638
  Args:
636
- args: List containing command arguments, typically [dict_with_args]
639
+ args: List containing command arguments as JSON string
637
640
  """
638
- logger.info(f"Start command received with args: {args}")
639
641
  if not args or len(args) == 0:
640
642
  logger.warning("Start command received with empty args.")
641
643
  return
642
644
 
643
- command_args = args[0] if isinstance(args[0], dict) else {}
645
+ command_args = json.loads(args[0])
644
646
  self.state.breakpoints = set(command_args.get("breakpoints", []))
645
647
  step_mode = command_args.get("enableStepMode", False)
646
648
  self.state.step_mode = step_mode
649
+ logger.info(
650
+ f"Debug started: breakpoints={self.state.breakpoints}, step_mode={step_mode}"
651
+ )
647
652
 
648
653
  async def _handle_resume(self, args: list[Any]) -> None:
649
654
  """Handle Resume command from SignalR server.
650
655
 
651
656
  Args:
652
- args: List containing command arguments
657
+ args: List containing command arguments as JSON string
653
658
  """
654
- logger.info(f"Resume command received with args: {args}")
655
- command_args = args[0] if args and len(args) > 0 else {}
659
+ command_args = json.loads(args[0]) if args and len(args) > 0 else {}
656
660
 
657
661
  if self._resume_event:
658
662
  self._resume_data = command_args
659
663
  self._resume_event.set()
660
- logger.info("Resume event set")
661
664
  else:
662
665
  logger.warning("Resume command received but no resume event is waiting")
663
666
 
@@ -665,24 +668,23 @@ class SignalRDebugBridge(UiPathDebugBridge):
665
668
  """Handle Step command from SignalR server.
666
669
 
667
670
  Args:
668
- args: List containing command arguments
671
+ args: List containing command arguments as JSON string
669
672
  """
670
- logger.info(f"Step command received with args: {args}")
671
- self.state.step_mode = True
672
- logger.info("Step mode enabled")
673
+ command_args = json.loads(args[0]) if args and len(args) > 0 else {}
674
+ step_mode = command_args.get("enableStepMode", True)
675
+ self.state.step_mode = step_mode
673
676
 
674
677
  async def _handle_add_breakpoints(self, args: list[Any]) -> None:
675
678
  """Handle AddBreakpoints command from SignalR server.
676
679
 
677
680
  Args:
678
- args: List containing command arguments with breakpoints list
681
+ args: List containing command arguments as JSON string with breakpoints list
679
682
  """
680
- logger.info(f"AddBreakpoints command received with args: {args}")
681
683
  if not args or len(args) == 0:
682
684
  logger.warning("AddBreakpoints command received with empty args.")
683
685
  return
684
686
 
685
- command_args = args[0] if isinstance(args[0], dict) else {}
687
+ command_args = json.loads(args[0])
686
688
  break_points = command_args.get("breakpoints", [])
687
689
 
688
690
  for bp in break_points:
@@ -693,23 +695,22 @@ class SignalRDebugBridge(UiPathDebugBridge):
693
695
  )
694
696
  if node_name:
695
697
  self.state.add_breakpoint(node_name)
696
- logger.info(f"Breakpoint set at: {node_name}")
698
+ logger.info(f"Breakpoint added: {node_name}")
697
699
  else:
698
- logger.warning(f"Breakpoint command received without node name: {bp}")
700
+ logger.warning(f"Breakpoint without node name: {bp}")
699
701
 
700
702
  async def _handle_remove_breakpoints(self, args: list[Any]) -> None:
701
703
  """Handle RemoveBreakpoints command from SignalR server.
702
704
 
703
705
  Args:
704
- args: List containing command arguments with breakpoints list
706
+ args: List containing command arguments as JSON string with breakpoints list
705
707
  """
706
- logger.info(f"RemoveBreakpoints command received with args: {args}")
707
708
  if not args or len(args) == 0:
708
709
  self.state.clear_all_breakpoints()
709
710
  logger.info("All breakpoints cleared")
710
711
  return
711
712
 
712
- command_args = args[0] if isinstance(args[0], dict) else {}
713
+ command_args = json.loads(args[0])
713
714
  break_points = command_args.get("breakpoints", [])
714
715
 
715
716
  if not break_points:
@@ -726,16 +727,9 @@ class SignalRDebugBridge(UiPathDebugBridge):
726
727
  self.state.remove_breakpoint(node_name)
727
728
  logger.info(f"Breakpoint removed: {node_name}")
728
729
 
729
- async def _handle_quit(self, args: list[Any]) -> None:
730
- """Handle Quit command from SignalR server.
731
-
732
- Args:
733
- args: List containing command arguments
734
- """
735
- if args:
736
- logger.info(f"Quit command received from server with args: {args}")
737
- else:
738
- logger.info("Quit command received from server")
730
+ async def _handle_quit(self, _args: list[Any]) -> None:
731
+ """Handle Quit command from SignalR server."""
732
+ logger.info("Quit command received")
739
733
  raise DebuggerQuitException("Quit command received from server")
740
734
 
741
735
  async def _handle_open(self) -> None:
@@ -1,5 +1,6 @@
1
1
  class EvaluationRuntimeException(Exception):
2
- def __init__(self, spans, logs, root_exception):
2
+ def __init__(self, spans, logs, root_exception, execution_time):
3
3
  self.spans = spans
4
4
  self.logs = logs
5
5
  self.root_exception = root_exception
6
+ self.execution_time = execution_time
@@ -4,6 +4,7 @@ import functools
4
4
  import json
5
5
  import logging
6
6
  import os
7
+ import uuid
7
8
  from typing import Any, Dict, List
8
9
  from urllib.parse import urlparse
9
10
 
@@ -215,9 +216,12 @@ class StudioWebProgressReporter:
215
216
  agent_snapshot: StudioWebAgentSnapshot,
216
217
  no_of_evals: int,
217
218
  evaluators: List[LegacyBaseEvaluator[Any]],
219
+ is_coded: bool = False,
218
220
  ) -> str:
219
221
  """Create a new evaluation set run in StudioWeb."""
220
- spec = self._create_eval_set_run_spec(eval_set_id, agent_snapshot, no_of_evals)
222
+ spec = self._create_eval_set_run_spec(
223
+ eval_set_id, agent_snapshot, no_of_evals, is_coded
224
+ )
221
225
  response = await self._client.request_async(
222
226
  method=spec.method,
223
227
  url=spec.endpoint,
@@ -231,18 +235,19 @@ class StudioWebProgressReporter:
231
235
 
232
236
  @gracefully_handle_errors
233
237
  async def create_eval_run(
234
- self, eval_item: AnyEvaluationItem, eval_set_run_id: str
238
+ self, eval_item: AnyEvaluationItem, eval_set_run_id: str, is_coded: bool = False
235
239
  ) -> str:
236
240
  """Create a new evaluation run in StudioWeb.
237
241
 
238
242
  Args:
239
243
  eval_item: Dictionary containing evaluation data
240
244
  eval_set_run_id: The ID of the evaluation set run
245
+ is_coded: Whether this is a coded evaluation (vs legacy)
241
246
 
242
247
  Returns:
243
248
  The ID of the created evaluation run
244
249
  """
245
- spec = self._create_eval_run_spec(eval_item, eval_set_run_id)
250
+ spec = self._create_eval_run_spec(eval_item, eval_set_run_id, is_coded)
246
251
  response = await self._client.request_async(
247
252
  method=spec.method,
248
253
  url=spec.endpoint,
@@ -289,13 +294,25 @@ class StudioWebProgressReporter:
289
294
  evaluator_runs.extend(runs)
290
295
  evaluator_scores.extend(scores)
291
296
 
292
- spec = self._update_eval_run_spec(
293
- assertion_runs=evaluator_runs,
294
- evaluator_scores=evaluator_scores,
295
- eval_run_id=sw_progress_item.eval_run_id,
296
- execution_time=sw_progress_item.agent_execution_time,
297
- actual_output=sw_progress_item.agent_output,
298
- )
297
+ # Use the appropriate spec method based on evaluation type
298
+ if is_coded:
299
+ spec = self._update_coded_eval_run_spec(
300
+ evaluator_runs=evaluator_runs,
301
+ evaluator_scores=evaluator_scores,
302
+ eval_run_id=sw_progress_item.eval_run_id,
303
+ execution_time=sw_progress_item.agent_execution_time,
304
+ actual_output=sw_progress_item.agent_output,
305
+ is_coded=is_coded,
306
+ )
307
+ else:
308
+ spec = self._update_eval_run_spec(
309
+ assertion_runs=evaluator_runs,
310
+ evaluator_scores=evaluator_scores,
311
+ eval_run_id=sw_progress_item.eval_run_id,
312
+ execution_time=sw_progress_item.agent_execution_time,
313
+ actual_output=sw_progress_item.agent_output,
314
+ is_coded=is_coded,
315
+ )
299
316
 
300
317
  await self._client.request_async(
301
318
  method=spec.method,
@@ -311,9 +328,12 @@ class StudioWebProgressReporter:
311
328
  self,
312
329
  eval_set_run_id: str,
313
330
  evaluator_scores: dict[str, float],
331
+ is_coded: bool = False,
314
332
  ):
315
333
  """Update the evaluation set run status to complete."""
316
- spec = self._update_eval_set_run_spec(eval_set_run_id, evaluator_scores)
334
+ spec = self._update_eval_set_run_spec(
335
+ eval_set_run_id, evaluator_scores, is_coded
336
+ )
317
337
  await self._client.request_async(
318
338
  method=spec.method,
319
339
  url=spec.endpoint,
@@ -337,6 +357,7 @@ class StudioWebProgressReporter:
337
357
  agent_snapshot=self._extract_agent_snapshot(payload.entrypoint),
338
358
  no_of_evals=payload.no_of_evals,
339
359
  evaluators=payload.evaluators,
360
+ is_coded=is_coded,
340
361
  )
341
362
  self.eval_set_run_ids[payload.execution_id] = eval_set_run_id
342
363
  current_span = trace.get_current_span()
@@ -353,12 +374,16 @@ class StudioWebProgressReporter:
353
374
  async def handle_create_eval_run(self, payload: EvalRunCreatedEvent) -> None:
354
375
  try:
355
376
  if eval_set_run_id := self.eval_set_run_ids.get(payload.execution_id):
377
+ # Get the is_coded flag for this execution
378
+ is_coded = self.is_coded_eval.get(payload.execution_id, False)
356
379
  eval_run_id = await self.create_eval_run(
357
- payload.eval_item, eval_set_run_id
380
+ payload.eval_item, eval_set_run_id, is_coded
358
381
  )
359
382
  if eval_run_id:
360
383
  self.eval_run_ids[payload.execution_id] = eval_run_id
361
- logger.debug(f"Created eval run with ID: {eval_run_id}")
384
+ logger.debug(
385
+ f"Created eval run with ID: {eval_run_id} (coded={is_coded})"
386
+ )
362
387
  else:
363
388
  logger.warning("Cannot create eval run: eval_set_run_id not available")
364
389
 
@@ -419,11 +444,16 @@ class StudioWebProgressReporter:
419
444
  async def handle_update_eval_set_run(self, payload: EvalSetRunUpdatedEvent) -> None:
420
445
  try:
421
446
  if eval_set_run_id := self.eval_set_run_ids.get(payload.execution_id):
447
+ # Get the is_coded flag for this execution
448
+ is_coded = self.is_coded_eval.get(payload.execution_id, False)
422
449
  await self.update_eval_set_run(
423
450
  eval_set_run_id,
424
451
  payload.evaluator_scores,
452
+ is_coded=is_coded,
453
+ )
454
+ logger.debug(
455
+ f"Updated eval set run with ID: {eval_set_run_id} (coded={is_coded})"
425
456
  )
426
- logger.debug(f"Updated eval set run with ID: {eval_set_run_id}")
427
457
  else:
428
458
  logger.warning(
429
459
  "Cannot update eval set run: eval_set_run_id not available"
@@ -486,18 +516,33 @@ class StudioWebProgressReporter:
486
516
  usage_metrics = self._extract_usage_from_spans(spans)
487
517
 
488
518
  for eval_result in eval_results:
519
+ # Skip results for evaluators not in the provided dict
520
+ # (happens when processing mixed coded/legacy eval sets)
521
+ if eval_result.evaluator_id not in evaluators:
522
+ continue
523
+
524
+ # Legacy API expects evaluatorId as GUID, convert string to GUID
525
+ try:
526
+ uuid.UUID(eval_result.evaluator_id)
527
+ evaluator_id_value = eval_result.evaluator_id
528
+ except ValueError:
529
+ # Generate deterministic UUID5 from string
530
+ evaluator_id_value = str(
531
+ uuid.uuid5(uuid.NAMESPACE_DNS, eval_result.evaluator_id)
532
+ )
533
+
489
534
  evaluator_scores_list.append(
490
535
  {
491
536
  "type": eval_result.result.score_type.value,
492
537
  "value": eval_result.result.score,
493
538
  "justification": eval_result.result.details,
494
- "evaluatorId": eval_result.evaluator_id,
539
+ "evaluatorId": evaluator_id_value,
495
540
  }
496
541
  )
497
542
  assertion_runs.append(
498
543
  {
499
544
  "status": EvaluationStatus.COMPLETED.value,
500
- "evaluatorId": eval_result.evaluator_id,
545
+ "evaluatorId": evaluator_id_value,
501
546
  "completionMetrics": {
502
547
  "duration": int(eval_result.result.evaluation_time)
503
548
  if eval_result.result.evaluation_time
@@ -536,6 +581,11 @@ class StudioWebProgressReporter:
536
581
  usage_metrics = self._extract_usage_from_spans(spans)
537
582
 
538
583
  for eval_result in eval_results:
584
+ # Skip results for evaluators not in the provided dict
585
+ # (happens when processing mixed coded/legacy eval sets)
586
+ if eval_result.evaluator_id not in evaluators:
587
+ continue
588
+
539
589
  evaluator_scores_list.append(
540
590
  {
541
591
  "type": eval_result.result.score_type.value,
@@ -575,17 +625,20 @@ class StudioWebProgressReporter:
575
625
  eval_run_id: str,
576
626
  actual_output: dict[str, Any],
577
627
  execution_time: float,
628
+ is_coded: bool = False,
578
629
  ) -> RequestSpec:
630
+ # For legacy evaluations, endpoint is without /coded
631
+ endpoint_suffix = "coded/" if is_coded else ""
579
632
  return RequestSpec(
580
633
  method="PUT",
581
634
  endpoint=Endpoint(
582
- f"{self._get_endpoint_prefix()}execution/agents/{self._project_id}/coded/evalRun"
635
+ f"{self._get_endpoint_prefix()}execution/agents/{self._project_id}/{endpoint_suffix}evalRun"
583
636
  ),
584
637
  json={
585
638
  "evalRunId": eval_run_id,
586
639
  "status": EvaluationStatus.COMPLETED.value,
587
640
  "result": {
588
- "output": {"content": {**actual_output}},
641
+ "output": {**actual_output},
589
642
  "evaluatorScores": evaluator_scores,
590
643
  },
591
644
  "completionMetrics": {"duration": int(execution_time)},
@@ -601,18 +654,21 @@ class StudioWebProgressReporter:
601
654
  eval_run_id: str,
602
655
  actual_output: dict[str, Any],
603
656
  execution_time: float,
657
+ is_coded: bool = False,
604
658
  ) -> RequestSpec:
605
659
  """Create update spec for coded evaluators."""
660
+ # For coded evaluations, endpoint has /coded
661
+ endpoint_suffix = "coded/" if is_coded else ""
606
662
  return RequestSpec(
607
663
  method="PUT",
608
664
  endpoint=Endpoint(
609
- f"{self._get_endpoint_prefix()}execution/agents/{self._project_id}/coded/evalRun"
665
+ f"{self._get_endpoint_prefix()}execution/agents/{self._project_id}/{endpoint_suffix}evalRun"
610
666
  ),
611
667
  json={
612
668
  "evalRunId": eval_run_id,
613
669
  "status": EvaluationStatus.COMPLETED.value,
614
670
  "result": {
615
- "output": {"content": {**actual_output}},
671
+ "output": {**actual_output},
616
672
  "scores": evaluator_scores,
617
673
  },
618
674
  "completionMetrics": {"duration": int(execution_time)},
@@ -622,11 +678,24 @@ class StudioWebProgressReporter:
622
678
  )
623
679
 
624
680
  def _create_eval_run_spec(
625
- self, eval_item: AnyEvaluationItem, eval_set_run_id: str
681
+ self, eval_item: AnyEvaluationItem, eval_set_run_id: str, is_coded: bool = False
626
682
  ) -> RequestSpec:
683
+ # Legacy API expects eval IDs as GUIDs, coded accepts strings
684
+ # Convert string IDs to deterministic GUIDs for legacy
685
+ if is_coded:
686
+ eval_item_id = eval_item.id
687
+ else:
688
+ # Try to parse as GUID, if it fails, generate deterministic GUID from string
689
+ try:
690
+ uuid.UUID(eval_item.id)
691
+ eval_item_id = eval_item.id
692
+ except ValueError:
693
+ # Generate deterministic UUID5 from string
694
+ eval_item_id = str(uuid.uuid5(uuid.NAMESPACE_DNS, eval_item.id))
695
+
627
696
  # Build eval snapshot based on evaluation item type
628
697
  eval_snapshot = {
629
- "id": eval_item.id,
698
+ "id": eval_item_id,
630
699
  "name": eval_item.name,
631
700
  "inputs": eval_item.inputs,
632
701
  }
@@ -638,10 +707,12 @@ class StudioWebProgressReporter:
638
707
  else:
639
708
  eval_snapshot["expectedOutput"] = eval_item.expected_output
640
709
 
710
+ # For legacy evaluations, endpoint is without /coded
711
+ endpoint_suffix = "coded/" if is_coded else ""
641
712
  return RequestSpec(
642
713
  method="POST",
643
714
  endpoint=Endpoint(
644
- f"{self._get_endpoint_prefix()}execution/agents/{self._project_id}/coded/evalRun"
715
+ f"{self._get_endpoint_prefix()}execution/agents/{self._project_id}/{endpoint_suffix}evalRun"
645
716
  ),
646
717
  json={
647
718
  "evalSetRunId": eval_set_run_id,
@@ -656,19 +727,42 @@ class StudioWebProgressReporter:
656
727
  eval_set_id: str,
657
728
  agent_snapshot: StudioWebAgentSnapshot,
658
729
  no_of_evals: int,
730
+ is_coded: bool = False,
659
731
  ) -> RequestSpec:
732
+ # For legacy evaluations, endpoint is without /coded
733
+ endpoint_suffix = "coded/" if is_coded else ""
734
+
735
+ # Legacy API expects evalSetId as GUID, coded accepts string
736
+ # Convert string IDs to deterministic GUIDs for legacy
737
+ if is_coded:
738
+ eval_set_id_value = eval_set_id
739
+ else:
740
+ # Try to parse as GUID, if it fails, generate deterministic GUID from string
741
+ try:
742
+ uuid.UUID(eval_set_id)
743
+ eval_set_id_value = eval_set_id
744
+ except ValueError:
745
+ # Generate deterministic UUID5 from string
746
+ eval_set_id_value = str(uuid.uuid5(uuid.NAMESPACE_DNS, eval_set_id))
747
+
748
+ payload = {
749
+ "agentId": self._project_id,
750
+ "evalSetId": eval_set_id_value,
751
+ "agentSnapshot": agent_snapshot.model_dump(by_alias=True),
752
+ "status": EvaluationStatus.IN_PROGRESS.value,
753
+ "numberOfEvalsExecuted": no_of_evals,
754
+ }
755
+
756
+ # Add version field for coded evaluations
757
+ if is_coded:
758
+ payload["version"] = "1.0"
759
+
660
760
  return RequestSpec(
661
761
  method="POST",
662
762
  endpoint=Endpoint(
663
- f"{self._get_endpoint_prefix()}execution/agents/{self._project_id}/coded/evalSetRun"
763
+ f"{self._get_endpoint_prefix()}execution/agents/{self._project_id}/{endpoint_suffix}evalSetRun"
664
764
  ),
665
- json={
666
- "agentId": self._project_id,
667
- "evalSetId": eval_set_id,
668
- "agentSnapshot": agent_snapshot.model_dump(by_alias=True),
669
- "status": EvaluationStatus.IN_PROGRESS.value,
670
- "numberOfEvalsExecuted": no_of_evals,
671
- },
765
+ json=payload,
672
766
  headers=self._tenant_header(),
673
767
  )
674
768
 
@@ -676,16 +770,34 @@ class StudioWebProgressReporter:
676
770
  self,
677
771
  eval_set_run_id: str,
678
772
  evaluator_scores: dict[str, float],
773
+ is_coded: bool = False,
679
774
  ) -> RequestSpec:
680
- evaluator_scores_list = [
681
- {"value": avg_score, "evaluatorId": evaluator_id}
682
- for evaluator_id, avg_score in evaluator_scores.items()
683
- ]
775
+ # Legacy API expects evaluatorId as GUID, coded accepts string
776
+ evaluator_scores_list = []
777
+ for evaluator_id, avg_score in evaluator_scores.items():
778
+ if is_coded:
779
+ evaluator_id_value = evaluator_id
780
+ else:
781
+ # Convert string to GUID for legacy
782
+ try:
783
+ uuid.UUID(evaluator_id)
784
+ evaluator_id_value = evaluator_id
785
+ except ValueError:
786
+ # Generate deterministic UUID5 from string
787
+ evaluator_id_value = str(
788
+ uuid.uuid5(uuid.NAMESPACE_DNS, evaluator_id)
789
+ )
790
+
791
+ evaluator_scores_list.append(
792
+ {"value": avg_score, "evaluatorId": evaluator_id_value}
793
+ )
684
794
 
795
+ # For legacy evaluations, endpoint is without /coded
796
+ endpoint_suffix = "coded/" if is_coded else ""
685
797
  return RequestSpec(
686
798
  method="PUT",
687
799
  endpoint=Endpoint(
688
- f"{self._get_endpoint_prefix()}execution/agents/{self._project_id}/coded/evalSetRun"
800
+ f"{self._get_endpoint_prefix()}execution/agents/{self._project_id}/{endpoint_suffix}evalSetRun"
689
801
  ),
690
802
  json={
691
803
  "evalSetRunId": eval_set_run_id,
@@ -30,6 +30,8 @@ from ...eval.models import EvaluationResult
30
30
  from ...eval.models.models import AgentExecution, EvalItemResult
31
31
  from .._runtime._contracts import (
32
32
  UiPathBaseRuntime,
33
+ UiPathErrorCategory,
34
+ UiPathErrorContract,
33
35
  UiPathExecutionBatchTraceProcessor,
34
36
  UiPathRuntimeContext,
35
37
  UiPathRuntimeFactory,
@@ -364,7 +366,43 @@ class UiPathEvalRuntime(UiPathBaseRuntime, Generic[T, C]):
364
366
  )
365
367
 
366
368
  try:
367
- agent_execution_output = await self.execute_runtime(eval_item, execution_id)
369
+ try:
370
+ agent_execution_output = await self.execute_runtime(
371
+ eval_item, execution_id
372
+ )
373
+ except Exception as e:
374
+ if self.context.verbose:
375
+ error_info = UiPathErrorContract(
376
+ code="RUNTIME_SHUTDOWN_ERROR",
377
+ title="Runtime shutdown failed",
378
+ detail=f"Error: {str(e)}",
379
+ category=UiPathErrorCategory.UNKNOWN,
380
+ )
381
+ error_result = UiPathRuntimeResult(
382
+ status=UiPathRuntimeStatus.FAULTED,
383
+ error=error_info,
384
+ )
385
+ if isinstance(e, EvaluationRuntimeException):
386
+ spans = e.spans
387
+ logs = e.logs
388
+ execution_time = e.execution_time
389
+ else:
390
+ spans = []
391
+ logs = []
392
+ execution_time = 0
393
+
394
+ evaluation_run_results.agent_execution_output = (
395
+ convert_eval_execution_output_to_serializable(
396
+ UiPathEvalRunExecutionOutput(
397
+ execution_time=execution_time,
398
+ result=error_result,
399
+ spans=spans,
400
+ logs=logs,
401
+ )
402
+ )
403
+ )
404
+ raise
405
+
368
406
  if self.context.verbose:
369
407
  evaluation_run_results.agent_execution_output = (
370
408
  convert_eval_execution_output_to_serializable(
@@ -530,6 +568,7 @@ class UiPathEvalRuntime(UiPathBaseRuntime, Generic[T, C]):
530
568
  runtime_context, root_span=eval_item.name, attributes=attributes
531
569
  )
532
570
  except Exception as e:
571
+ end_time = time()
533
572
  spans, logs = self._get_and_clear_execution_data(
534
573
  runtime_context.execution_id
535
574
  )
@@ -537,6 +576,7 @@ class UiPathEvalRuntime(UiPathBaseRuntime, Generic[T, C]):
537
576
  spans=spans,
538
577
  logs=logs,
539
578
  root_exception=e,
579
+ execution_time=end_time - start_time,
540
580
  ) from e
541
581
 
542
582
  end_time = time()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: uipath
3
- Version: 2.1.113
3
+ Version: 2.1.115
4
4
  Summary: Python SDK and CLI for UiPath Platform, enabling programmatic interaction with automation services, process management, and deployment tools.
5
5
  Project-URL: Homepage, https://uipath.com
6
6
  Project-URL: Repository, https://github.com/UiPath/uipath-python
@@ -34,7 +34,7 @@ uipath/_cli/_auth/auth_config.json,sha256=o8J5BBFwiEtjZLHpJ_64lvnTeYeRIHaJ-Bhg0Q
34
34
  uipath/_cli/_auth/index.html,sha256=uGK0CDTP8Rys_p4O_Pbd2x4tz0frKNVcumjrXnal5Nc,22814
35
35
  uipath/_cli/_auth/localhost.crt,sha256=oGl9oLLOiouHubAt39B4zEfylFvKEtbtr_43SIliXJc,1226
36
36
  uipath/_cli/_auth/localhost.key,sha256=X31VYXD8scZtmGA837dGX5l6G-LXHLo5ItWJhZXaz3c,1679
37
- uipath/_cli/_debug/_bridge.py,sha256=iF3lI73TRyVvQDf7Zb-HUxrVrW5oAnWOPT7JIFkFL_I,28433
37
+ uipath/_cli/_debug/_bridge.py,sha256=6PeeLUyLmOnLsytKIG2xgLXQlsBV8tZ8Yg-qm9RsrI0,28224
38
38
  uipath/_cli/_debug/_runtime.py,sha256=cGWoyQwHaKG5EQhYqTQfIuCX--n4PBijZQ9iXiOpRIc,5748
39
39
  uipath/_cli/_dev/_terminal/__init__.py,sha256=di_RiN9Mcp9wqyKRRqXag28vbSw8_78mCnQZNn9H-Ss,14027
40
40
  uipath/_cli/_dev/_terminal/_components/_chat.py,sha256=NLRoy49QScHiI-q0FGykkaU8ajv1d23fx7issSALcFA,4119
@@ -53,13 +53,13 @@ uipath/_cli/_evals/_console_progress_reporter.py,sha256=RlfhtyEHq2QjyXRevyeAhtGT
53
53
  uipath/_cli/_evals/_evaluate.py,sha256=yRVhZ6uV58EV5Fv5X_K6425ZGsseQslnLe6FpIKy-u8,833
54
54
  uipath/_cli/_evals/_evaluator_factory.py,sha256=gPF9fRMZBOUPnJSM1fzQyXGHMGYQw_0VmHv-JOGbZf4,14348
55
55
  uipath/_cli/_evals/_helpers.py,sha256=dYHgkWxy2fOuqqZDtOKWKsZ1Ri4dn8qMnuB6DE-1MUk,6661
56
- uipath/_cli/_evals/_progress_reporter.py,sha256=CinS0S7vqHDyEp7cU87eARebEvQWwkX7H7fanSqIHxo,27385
57
- uipath/_cli/_evals/_runtime.py,sha256=dU2LXZ-T55-wsqRUCP0Kc4AU1oh1MiF4NRKIyPWJytw,23895
56
+ uipath/_cli/_evals/_progress_reporter.py,sha256=QnkDAS_EJcM3Dzfd9h7n7Sqv9aOHhLmiF3O6XQ2UGKg,32179
57
+ uipath/_cli/_evals/_runtime.py,sha256=beT9cJIj0bcEtn4azTNXgTEz3_tO9Z8SvZDNno04O7I,25490
58
58
  uipath/_cli/_evals/_span_collection.py,sha256=RoKoeDFG2XODdlgI27ionCjU7LLD_C0LJJ3gu0wab10,779
59
59
  uipath/_cli/_evals/_models/_evaluation_set.py,sha256=7P6zIkgerGKHXL6rD1YHXFFWpyxCUpNu7AX71bAaNoE,7270
60
60
  uipath/_cli/_evals/_models/_evaluator.py,sha256=UXrN103gHJFw3MtVWlGwViQWAo2cICRR-n357zL6wTA,9369
61
61
  uipath/_cli/_evals/_models/_evaluator_base_params.py,sha256=8i7Ir70IjaNOINTHMTXVXsKB4koYf3BCR8Vh2cyrBQI,406
62
- uipath/_cli/_evals/_models/_exceptions.py,sha256=-oXLTDa4ab9Boa34ZxuUrCezf8ajIGrIEUVwZnmBASE,195
62
+ uipath/_cli/_evals/_models/_exceptions.py,sha256=yjrXoWwpvqt-Vfa-F-T1h4oM0J7mMIqbNae0CXH_Dmw,256
63
63
  uipath/_cli/_evals/_models/_mocks.py,sha256=mlD9qvdZNniuKxzY_ttJtwLVFvKGvvIukYvy0FTa12k,241
64
64
  uipath/_cli/_evals/_models/_output.py,sha256=ZQiRCqFZWUsPrJ96E_xQlup6xUlz0lmbJQdsy9WUqoU,7450
65
65
  uipath/_cli/_evals/_models/_sw_reporting.py,sha256=tSBLQFAdOIun8eP0vsqt56K6bmCZz_uMaWI3hskg_24,536
@@ -224,8 +224,8 @@ uipath/tracing/_utils.py,sha256=zMjiKjNpSN3YQNEU4-u5AAvPtUsi8QuEqNLya89jfAU,1446
224
224
  uipath/utils/__init__.py,sha256=VD-KXFpF_oWexFg6zyiWMkxl2HM4hYJMIUDZ1UEtGx0,105
225
225
  uipath/utils/_endpoints_manager.py,sha256=tnF_FiCx8qI2XaJDQgYkMN_gl9V0VqNR1uX7iawuLp8,8230
226
226
  uipath/utils/dynamic_schema.py,sha256=w0u_54MoeIAB-mf3GmwX1A_X8_HDrRy6p998PvX9evY,3839
227
- uipath-2.1.113.dist-info/METADATA,sha256=pKYTMkeNgq_jI893OyAeJWlYH_hKHfnh2kQcF3layKo,6626
228
- uipath-2.1.113.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
229
- uipath-2.1.113.dist-info/entry_points.txt,sha256=9C2_29U6Oq1ExFu7usihR-dnfIVNSKc-0EFbh0rskB4,43
230
- uipath-2.1.113.dist-info/licenses/LICENSE,sha256=-KBavWXepyDjimmzH5fVAsi-6jNVpIKFc2kZs0Ri4ng,1058
231
- uipath-2.1.113.dist-info/RECORD,,
227
+ uipath-2.1.115.dist-info/METADATA,sha256=_NKudz2j8Jkj-TVI5YEskzBhu3Iagq7NzJLjXceCKao,6626
228
+ uipath-2.1.115.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
229
+ uipath-2.1.115.dist-info/entry_points.txt,sha256=9C2_29U6Oq1ExFu7usihR-dnfIVNSKc-0EFbh0rskB4,43
230
+ uipath-2.1.115.dist-info/licenses/LICENSE,sha256=-KBavWXepyDjimmzH5fVAsi-6jNVpIKFc2kZs0Ri4ng,1058
231
+ uipath-2.1.115.dist-info/RECORD,,