uipath 2.1.76__py3-none-any.whl → 2.1.78__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -19,39 +19,31 @@ class BaseMockingStrategy(BaseModel):
19
19
  pass
20
20
 
21
21
 
22
+ class ModelSettings(BaseModel):
23
+ """Model Generation Parameters."""
24
+
25
+ model: str = Field(..., alias="model")
26
+ temperature: Optional[float] = Field(default=None, alias="temperature")
27
+ top_p: Optional[float] = Field(default=None, alias="topP")
28
+ top_k: Optional[int] = Field(default=None, alias="topK")
29
+ frequency_penalty: Optional[float] = Field(default=None, alias="frequencyPenalty")
30
+ presence_penalty: Optional[float] = Field(default=None, alias="presencePenalty")
31
+ max_tokens: Optional[int] = Field(default=None, alias="maxTokens")
32
+
33
+
22
34
  class LLMMockingStrategy(BaseMockingStrategy):
23
35
  type: Literal[MockingStrategyType.LLM] = MockingStrategyType.LLM
24
36
  prompt: str = Field(..., alias="prompt")
25
37
  tools_to_simulate: list[EvaluationSimulationTool] = Field(
26
38
  ..., alias="toolsToSimulate"
27
39
  )
40
+ model: Optional[ModelSettings] = Field(None, alias="model")
28
41
 
29
42
  model_config = ConfigDict(
30
43
  validate_by_name=True, validate_by_alias=True, extra="allow"
31
44
  )
32
45
 
33
46
 
34
- """
35
- {
36
- "function": "postprocess",
37
- "arguments": {
38
- "args": [],
39
- "kwargs": {"x": 3}
40
- },
41
- "then": [
42
- {
43
- "return": 3
44
- },
45
- {
46
- "raise": {
47
- "__target__": "NotImplementedError"
48
- }
49
- }
50
- ]
51
- }
52
- """
53
-
54
-
55
47
  class MockingArgument(BaseModel):
56
48
  args: List[Any] = Field(default_factory=lambda: [], alias="args")
57
49
  kwargs: Dict[str, Any] = Field(default_factory=lambda: {}, alias="kwargs")
@@ -99,18 +91,6 @@ class UnknownMockingStrategy(BaseMockingStrategy):
99
91
  MockingStrategy = Union[KnownMockingStrategy, UnknownMockingStrategy]
100
92
 
101
93
 
102
- def migrate_mocking_strategy(data) -> MockingStrategy:
103
- if data.get("simulate_tools") and "tools_to_simulate" in data:
104
- return LLMMockingStrategy(
105
- **{
106
- "prompt": data["simulation_instructions"],
107
- "toolsToSimulate": data["tools_to_simulate"],
108
- }
109
- )
110
- else:
111
- return UnknownMockingStrategy(type=MockingStrategyType.UNKNOWN)
112
-
113
-
114
94
  class EvaluationItem(BaseModel):
115
95
  """Individual evaluation item within an evaluation set."""
116
96
 
@@ -0,0 +1,9 @@
1
+ from pydantic import BaseModel, Field
2
+
3
+
4
+ class ExampleCall(BaseModel):
5
+ """Example invocation for a resource."""
6
+
7
+ id: str = Field(..., alias="id")
8
+ input: str = Field(..., alias="input")
9
+ output: str = Field(..., alias="output")
@@ -6,11 +6,18 @@ from typing import Any, Callable
6
6
 
7
7
  from pydantic import BaseModel
8
8
 
9
- from uipath._cli._evals._models._evaluation_set import (
9
+ from .._models._evaluation_set import (
10
10
  EvaluationItem,
11
11
  LLMMockingStrategy,
12
12
  )
13
- from uipath._cli._evals.mocks.mocker import Mocker, R, T, UiPathMockingNoMatcherError
13
+ from .._models._mocks import ExampleCall
14
+ from .mocker import (
15
+ Mocker,
16
+ R,
17
+ T,
18
+ UiPathMockResponseGenerationError,
19
+ UiPathNoMockFoundError,
20
+ )
14
21
 
15
22
  PROMPT = """You are simulating a tool call for automated testing purposes of an Agent.
16
23
  Your task is to provide a realistic response for the tool based on its schema, examples, and context.
@@ -102,8 +109,15 @@ class LLMMocker(Mocker):
102
109
  },
103
110
  }
104
111
  try:
112
+ # Safely pull examples from params.
113
+ example_calls = params.get("example_calls", [])
114
+ if not isinstance(example_calls, list):
115
+ example_calls = []
116
+ example_calls = [
117
+ call for call in example_calls if isinstance(call, ExampleCall)
118
+ ]
105
119
  prompt_input: dict[str, Any] = {
106
- "toolRunExamples": [], # Taken from history. Contains id, input json, output json
120
+ "toolRunExamples": example_calls,
107
121
  "testRunHistory": [], # This should contain ordered spans.
108
122
  "toolInfo": {
109
123
  "name": function_name,
@@ -132,6 +146,12 @@ class LLMMocker(Mocker):
132
146
  k: json.dumps(pydantic_to_dict_safe(v))
133
147
  for k, v in prompt_input.items()
134
148
  }
149
+ model_parameters = self.evaluation_item.mocking_strategy.model
150
+ completion_kwargs = (
151
+ model_parameters.model_dump(by_alias=False, exclude_none=True)
152
+ if model_parameters
153
+ else {}
154
+ )
135
155
  response = await llm.chat_completions(
136
156
  [
137
157
  {
@@ -140,14 +160,13 @@ class LLMMocker(Mocker):
140
160
  },
141
161
  ],
142
162
  response_format=response_format,
163
+ **completion_kwargs,
143
164
  )
144
165
  mocked_response = OutputSchema(
145
166
  **json.loads(response.choices[0].message.content)
146
167
  )
147
- return mocked_response.response
148
- except Exception:
149
- raise
168
+ return mocked_response.model_dump(mode="json")["response"]
169
+ except Exception as e:
170
+ raise UiPathMockResponseGenerationError() from e
150
171
  else:
151
- raise UiPathMockingNoMatcherError(
152
- f"Method '{function_name}' is not simulated."
153
- )
172
+ raise UiPathNoMockFoundError(f"Method '{function_name}' is not simulated.")
@@ -23,7 +23,13 @@ class Mocker(ABC):
23
23
  raise NotImplementedError()
24
24
 
25
25
 
26
- class UiPathMockingNoMatcherError(Exception):
27
- """Exception when a mocker is unable to find a match with the invocation."""
26
+ class UiPathNoMockFoundError(Exception):
27
+ """Exception when a mocker is unable to find a match with the invocation. This is a signal to invoke the real function."""
28
+
29
+ pass
30
+
31
+
32
+ class UiPathMockResponseGenerationError(Exception):
33
+ """Exception when a mocker is configured unable to generate a response."""
28
34
 
29
35
  pass
@@ -13,7 +13,13 @@ from uipath._cli._evals._models._evaluation_set import (
13
13
  MockingAnswerType,
14
14
  MockitoMockingStrategy,
15
15
  )
16
- from uipath._cli._evals.mocks.mocker import Mocker, R, T
16
+ from uipath._cli._evals.mocks.mocker import (
17
+ Mocker,
18
+ R,
19
+ T,
20
+ UiPathMockResponseGenerationError,
21
+ UiPathNoMockFoundError,
22
+ )
17
23
 
18
24
 
19
25
  class Stub:
@@ -59,4 +65,16 @@ class MockitoMocker(Mocker):
59
65
  self, func: Callable[[T], R], params: dict[str, Any], *args: T, **kwargs
60
66
  ) -> R:
61
67
  """Respond with mocked response."""
62
- return getattr(self.stub, params["name"])(*args, **kwargs)
68
+ if not isinstance(
69
+ self.evaluation_item.mocking_strategy, MockitoMockingStrategy
70
+ ):
71
+ raise UiPathMockResponseGenerationError("Mocking strategy misconfigured.")
72
+ if not any(
73
+ behavior.function == params["name"]
74
+ for behavior in self.evaluation_item.mocking_strategy.behaviors
75
+ ):
76
+ raise UiPathNoMockFoundError()
77
+ try:
78
+ return getattr(self.stub, params["name"])(*args, **kwargs)
79
+ except Exception as e:
80
+ raise UiPathMockResponseGenerationError() from e
@@ -1,18 +1,11 @@
1
1
  """Mocking interface."""
2
2
 
3
- import asyncio
4
- import functools
5
- import inspect
6
3
  import logging
7
- import threading
8
4
  from contextvars import ContextVar
9
5
  from typing import Any, Callable, Optional
10
6
 
11
- from pydantic import TypeAdapter
12
- from pydantic_function_models import ValidatedFunction # type: ignore[import-untyped]
13
-
14
7
  from uipath._cli._evals._models._evaluation_set import EvaluationItem
15
- from uipath._cli._evals.mocks.mocker import Mocker
8
+ from uipath._cli._evals.mocks.mocker import Mocker, UiPathNoMockFoundError
16
9
  from uipath._cli._evals.mocks.mocker_factory import MockerFactory
17
10
 
18
11
  evaluation_context: ContextVar[Optional[EvaluationItem]] = ContextVar(
@@ -21,12 +14,17 @@ evaluation_context: ContextVar[Optional[EvaluationItem]] = ContextVar(
21
14
 
22
15
  mocker_context: ContextVar[Optional[Mocker]] = ContextVar("mocker", default=None)
23
16
 
17
+ logger = logging.getLogger(__name__)
18
+
24
19
 
25
20
  def set_evaluation_item(item: EvaluationItem) -> None:
26
21
  """Set an evaluation item within an evaluation set."""
27
22
  evaluation_context.set(item)
28
23
  try:
29
- mocker_context.set(MockerFactory.create(item))
24
+ if item.mocking_strategy:
25
+ mocker_context.set(MockerFactory.create(item))
26
+ else:
27
+ mocker_context.set(None)
30
28
  except Exception:
31
29
  logger.warning(f"Failed to create mocker for evaluation {item.name}")
32
30
  mocker_context.set(None)
@@ -37,100 +35,7 @@ async def get_mocked_response(
37
35
  ) -> Any:
38
36
  """Get a mocked response."""
39
37
  mocker = mocker_context.get()
40
- evaluation_item = evaluation_context.get()
41
- if mocker is None or evaluation_item is None:
42
- # TODO raise a new UiPath exception type
43
- raise RuntimeError(f"Evaluation item {func.__name__} has not been evaluated")
38
+ if mocker is None:
39
+ raise UiPathNoMockFoundError()
44
40
  else:
45
41
  return await mocker.response(func, params, *args, **kwargs)
46
-
47
-
48
- _event_loop = None
49
- logger = logging.getLogger(__name__)
50
-
51
-
52
- def run_coroutine(coro):
53
- """Run a coroutine synchronously."""
54
- global _event_loop
55
- if not _event_loop or not _event_loop.is_running():
56
- _event_loop = asyncio.new_event_loop()
57
- threading.Thread(target=_event_loop.run_forever, daemon=True).start()
58
- future = asyncio.run_coroutine_threadsafe(coro, _event_loop)
59
- return future.result()
60
-
61
-
62
- def mocked_response_decorator(func, params: dict[str, Any]):
63
- """Mocked response decorator."""
64
-
65
- async def mock_response_generator(*args, **kwargs):
66
- mocked_response = await get_mocked_response(func, params, *args, **kwargs)
67
- return_type: Any = func.__annotations__.get("return", None)
68
-
69
- if return_type is not None:
70
- mocked_response = TypeAdapter(return_type).validate_python(mocked_response)
71
- return mocked_response
72
-
73
- is_async = inspect.iscoroutinefunction(func)
74
- if is_async:
75
-
76
- @functools.wraps(func)
77
- async def decorated_func(*args, **kwargs):
78
- try:
79
- return await mock_response_generator(*args, **kwargs)
80
- except Exception:
81
- logger.warning(
82
- f"Failed to mock response for {func.__name__}. Falling back to func."
83
- )
84
- return await func(*args, **kwargs)
85
- else:
86
-
87
- @functools.wraps(func)
88
- def decorated_func(*args, **kwargs):
89
- try:
90
- return run_coroutine(mock_response_generator(*args, **kwargs))
91
- except Exception:
92
- logger.warning(
93
- f"Failed to mock response for {func.__name__}. Falling back to func."
94
- )
95
- return func(*args, **kwargs)
96
-
97
- return decorated_func
98
-
99
-
100
- def mockable(
101
- name: Optional[str] = None,
102
- description: Optional[str] = None,
103
- **kwargs,
104
- ):
105
- """Decorate a function to be a mockable."""
106
-
107
- def decorator(func):
108
- params = {
109
- "name": name or func.__name__,
110
- "description": description or func.__doc__,
111
- "input_schema": get_input_schema(func),
112
- "output_schema": get_output_schema(func),
113
- **kwargs,
114
- }
115
- return mocked_response_decorator(func, params)
116
-
117
- return decorator
118
-
119
-
120
- def get_output_schema(func):
121
- """Retrieves the JSON schema for a function's return type hint."""
122
- try:
123
- adapter = TypeAdapter(inspect.signature(func).return_annotation)
124
- return adapter.json_schema()
125
- except Exception:
126
- logger.warning(f"Unable to extract output schema for function {func.__name__}")
127
- return {}
128
-
129
-
130
- def get_input_schema(func):
131
- """Retrieves the JSON schema for a function's input type."""
132
- try:
133
- return ValidatedFunction(func).model.model_json_schema()
134
- except Exception:
135
- logger.warning(f"Unable to extract input schema for function {func.__name__}")
136
- return {}
@@ -351,7 +351,8 @@ class UiPathLlmChatService(BaseService):
351
351
  n: int = 1,
352
352
  frequency_penalty: float = 0,
353
353
  presence_penalty: float = 0,
354
- top_p: float = 1,
354
+ top_p: Optional[float] = 1,
355
+ top_k: Optional[int] = None,
355
356
  tools: Optional[List[ToolDefinition]] = None,
356
357
  tool_choice: Optional[ToolChoice] = None,
357
358
  response_format: Optional[Union[Dict[str, Any], type[BaseModel]]] = None,
@@ -385,6 +386,8 @@ class UiPathLlmChatService(BaseService):
385
386
  Positive values encourage discussion of new topics. Defaults to 0.
386
387
  top_p (float, optional): Nucleus sampling parameter between 0 and 1.
387
388
  Controls diversity by considering only the top p probability mass. Defaults to 1.
389
+ top_k (int, optional): Nucleus sampling parameter.
390
+ Controls diversity by considering only the top k most probable tokens. Defaults to None.
388
391
  tools (Optional[List[ToolDefinition]], optional): List of tool definitions that the
389
392
  model can call. Tools enable the model to perform actions or retrieve information
390
393
  beyond text generation. Defaults to None.
@@ -486,6 +489,8 @@ class UiPathLlmChatService(BaseService):
486
489
  "presence_penalty": presence_penalty,
487
490
  "top_p": top_p,
488
491
  }
492
+ if top_k is not None:
493
+ request_body["top_k"] = top_k
489
494
 
490
495
  # Handle response_format - convert BaseModel to schema if needed
491
496
  if response_format:
@@ -7,6 +7,7 @@ from pydantic import BaseModel, ConfigDict, Discriminator, Field, Tag
7
7
 
8
8
  from uipath._cli._evals._models._evaluation_set import EvaluationSet
9
9
  from uipath._cli._evals._models._evaluator import Evaluator
10
+ from uipath._cli._evals._models._mocks import ExampleCall
10
11
  from uipath.models import Connection
11
12
 
12
13
 
@@ -70,7 +71,13 @@ class AgentToolSettings(BaseModel):
70
71
  )
71
72
 
72
73
 
73
- class AgentProcessToolProperties(BaseModel):
74
+ class BaseResourceProperties(BaseModel):
75
+ """Base resource properties."""
76
+
77
+ example_calls: Optional[List[ExampleCall]] = Field(None, alias="exampleCalls")
78
+
79
+
80
+ class AgentProcessToolProperties(BaseResourceProperties):
74
81
  """Properties specific to tool configuration."""
75
82
 
76
83
  folder_path: Optional[str] = Field(None, alias="folderPath")
@@ -125,7 +132,7 @@ class AgentIntegrationToolParameter(BaseModel):
125
132
  )
126
133
 
127
134
 
128
- class AgentIntegrationToolProperties(BaseModel):
135
+ class AgentIntegrationToolProperties(BaseResourceProperties):
129
136
  """Properties specific to tool configuration."""
130
137
 
131
138
  tool_path: str = Field(..., alias="toolPath")
@@ -198,7 +205,7 @@ class AgentEscalationRecipient(BaseModel):
198
205
  )
199
206
 
200
207
 
201
- class AgentEscalationChannelProperties(BaseModel):
208
+ class AgentEscalationChannelProperties(BaseResourceProperties):
202
209
  """Agent escalation channel properties."""
203
210
 
204
211
  app_name: str = Field(..., alias="appName")
@@ -6,12 +6,16 @@ from typing import Any, Optional
6
6
  from opentelemetry.sdk.trace import ReadableSpan
7
7
  from pydantic import field_validator
8
8
 
9
- from uipath._cli._evals._models._trajectory_span import TrajectoryEvaluationTrace
10
9
  from uipath.eval.models import EvaluationResult
11
10
 
12
11
  from ..._services import UiPathLlmChatService
13
12
  from ..._utils.constants import COMMUNITY_agents_SUFFIX
14
- from ..models.models import AgentExecution, LLMResponse, NumericEvaluationResult
13
+ from ..models.models import (
14
+ AgentExecution,
15
+ LLMResponse,
16
+ NumericEvaluationResult,
17
+ TrajectoryEvaluationTrace,
18
+ )
15
19
  from .base_evaluator import BaseEvaluator
16
20
 
17
21
 
@@ -0,0 +1,7 @@
1
+ """Mock interface."""
2
+
3
+ from uipath._cli._evals._models._mocks import ExampleCall
4
+
5
+ from .mockable import mockable
6
+
7
+ __all__ = ["ExampleCall", "mockable"]
@@ -0,0 +1,101 @@
1
+ """Mockable interface."""
2
+
3
+ import asyncio
4
+ import functools
5
+ import inspect
6
+ import logging
7
+ import threading
8
+ from typing import Any, List, Optional
9
+
10
+ from pydantic import TypeAdapter
11
+ from pydantic_function_models import ValidatedFunction # type: ignore[import-untyped]
12
+
13
+ from uipath._cli._evals._models._mocks import ExampleCall
14
+ from uipath._cli._evals.mocks.mocker import UiPathNoMockFoundError
15
+ from uipath._cli._evals.mocks.mocks import get_mocked_response
16
+
17
+ _event_loop = None
18
+ logger = logging.getLogger(__name__)
19
+
20
+
21
+ def run_coroutine(coro):
22
+ """Run a coroutine synchronously."""
23
+ global _event_loop
24
+ if not _event_loop or not _event_loop.is_running():
25
+ _event_loop = asyncio.new_event_loop()
26
+ threading.Thread(target=_event_loop.run_forever, daemon=True).start()
27
+ future = asyncio.run_coroutine_threadsafe(coro, _event_loop)
28
+ return future.result()
29
+
30
+
31
+ def mocked_response_decorator(func, params: dict[str, Any]):
32
+ """Mocked response decorator."""
33
+
34
+ async def mock_response_generator(*args, **kwargs):
35
+ mocked_response = await get_mocked_response(func, params, *args, **kwargs)
36
+ return_type: Any = func.__annotations__.get("return", None)
37
+
38
+ if return_type is not None:
39
+ mocked_response = TypeAdapter(return_type).validate_python(mocked_response)
40
+ return mocked_response
41
+
42
+ is_async = inspect.iscoroutinefunction(func)
43
+ if is_async:
44
+
45
+ @functools.wraps(func)
46
+ async def decorated_func(*args, **kwargs):
47
+ try:
48
+ return await mock_response_generator(*args, **kwargs)
49
+ except UiPathNoMockFoundError:
50
+ return await func(*args, **kwargs)
51
+ else:
52
+
53
+ @functools.wraps(func)
54
+ def decorated_func(*args, **kwargs):
55
+ try:
56
+ return run_coroutine(mock_response_generator(*args, **kwargs))
57
+ except UiPathNoMockFoundError:
58
+ return func(*args, **kwargs)
59
+
60
+ return decorated_func
61
+
62
+
63
+ def get_output_schema(func):
64
+ """Retrieves the JSON schema for a function's return type hint."""
65
+ try:
66
+ adapter = TypeAdapter(inspect.signature(func).return_annotation)
67
+ return adapter.json_schema()
68
+ except Exception:
69
+ logger.warning(f"Unable to extract output schema for function {func.__name__}")
70
+ return {}
71
+
72
+
73
+ def get_input_schema(func):
74
+ """Retrieves the JSON schema for a function's input type."""
75
+ try:
76
+ return ValidatedFunction(func).model.model_json_schema()
77
+ except Exception:
78
+ logger.warning(f"Unable to extract input schema for function {func.__name__}")
79
+ return {}
80
+
81
+
82
+ def mockable(
83
+ name: Optional[str] = None,
84
+ description: Optional[str] = None,
85
+ example_calls: Optional[List[ExampleCall]] = None,
86
+ **kwargs,
87
+ ):
88
+ """Decorate a function to be a mockable."""
89
+
90
+ def decorator(func):
91
+ params = {
92
+ "name": name or func.__name__,
93
+ "description": description or func.__doc__,
94
+ "input_schema": get_input_schema(func),
95
+ "output_schema": get_output_schema(func),
96
+ "example_calls": example_calls,
97
+ **kwargs,
98
+ }
99
+ return mocked_response_decorator(func, params)
100
+
101
+ return decorator
@@ -1,7 +1,8 @@
1
1
  """Models for evaluation framework including execution data and evaluation results."""
2
2
 
3
+ from dataclasses import dataclass
3
4
  from enum import IntEnum
4
- from typing import Annotated, Any, Dict, Literal, Optional, Union
5
+ from typing import Annotated, Any, Dict, List, Literal, Optional, Union
5
6
 
6
7
  from opentelemetry.sdk.trace import ReadableSpan
7
8
  from pydantic import BaseModel, ConfigDict, Field
@@ -113,3 +114,111 @@ class EvaluatorType(IntEnum):
113
114
  return cls(value)
114
115
  else:
115
116
  raise ValueError(f"{value} is not a valid EvaluatorType value")
117
+
118
+
119
+ @dataclass
120
+ class TrajectoryEvaluationSpan:
121
+ """Simplified span representation for trajectory evaluation.
122
+
123
+ Contains span information needed for evaluating agent execution paths,
124
+ excluding timestamps which are not useful for trajectory analysis.
125
+ """
126
+
127
+ name: str
128
+ status: str
129
+ attributes: Dict[str, Any]
130
+ parent_name: Optional[str] = None
131
+ events: Optional[List[Dict[str, Any]]] = None
132
+
133
+ def __post_init__(self):
134
+ """Initialize default values."""
135
+ if self.events is None:
136
+ self.events = []
137
+
138
+ @classmethod
139
+ def from_readable_span(
140
+ cls, span: ReadableSpan, parent_spans: Optional[Dict[int, str]] = None
141
+ ) -> "TrajectoryEvaluationSpan":
142
+ """Convert a ReadableSpan to a TrajectoryEvaluationSpan.
143
+
144
+ Args:
145
+ span: The OpenTelemetry ReadableSpan to convert
146
+ parent_spans: Optional mapping of span IDs to names for parent lookup
147
+
148
+ Returns:
149
+ TrajectoryEvaluationSpan with relevant data extracted
150
+ """
151
+ # Extract status
152
+ status_map = {0: "unset", 1: "ok", 2: "error"}
153
+ status = status_map.get(span.status.status_code.value, "unknown")
154
+
155
+ # Extract attributes - keep all attributes for now
156
+ attributes = {}
157
+ if span.attributes:
158
+ attributes = dict(span.attributes)
159
+
160
+ # Get parent name if available
161
+ parent_name = None
162
+ if span.parent and parent_spans and span.parent.span_id in parent_spans:
163
+ parent_name = parent_spans[span.parent.span_id]
164
+
165
+ # Extract events (without timestamps)
166
+ events = []
167
+ if hasattr(span, "events") and span.events:
168
+ for event in span.events:
169
+ event_data = {
170
+ "name": event.name,
171
+ "attributes": dict(event.attributes) if event.attributes else {},
172
+ }
173
+ events.append(event_data)
174
+
175
+ return cls(
176
+ name=span.name,
177
+ status=status,
178
+ attributes=attributes,
179
+ parent_name=parent_name,
180
+ events=events,
181
+ )
182
+
183
+ def to_dict(self) -> Dict[str, Any]:
184
+ """Convert to dictionary for JSON serialization."""
185
+ return {
186
+ "name": self.name,
187
+ "status": self.status,
188
+ "parent_name": self.parent_name,
189
+ "attributes": self.attributes,
190
+ "events": self.events,
191
+ }
192
+
193
+
194
+ class TrajectoryEvaluationTrace(BaseModel):
195
+ """Container for a collection of trajectory evaluation spans."""
196
+
197
+ spans: List[TrajectoryEvaluationSpan]
198
+
199
+ @classmethod
200
+ def from_readable_spans(
201
+ cls, spans: List[ReadableSpan]
202
+ ) -> "TrajectoryEvaluationTrace":
203
+ """Convert a list of ReadableSpans to TrajectoryEvaluationTrace.
204
+
205
+ Args:
206
+ spans: List of OpenTelemetry ReadableSpans to convert
207
+
208
+ Returns:
209
+ TrajectoryEvaluationTrace with converted spans
210
+ """
211
+ # Create a mapping of span IDs to names for parent lookup
212
+ span_id_to_name = {span.get_span_context().span_id: span.name for span in spans}
213
+
214
+ evaluation_spans = [
215
+ TrajectoryEvaluationSpan.from_readable_span(span, span_id_to_name)
216
+ for span in spans
217
+ ]
218
+
219
+ return cls(spans=evaluation_spans)
220
+
221
+ class Config:
222
+ """Pydantic configuration."""
223
+
224
+ arbitrary_types_allowed = True
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: uipath
3
- Version: 2.1.76
3
+ Version: 2.1.78
4
4
  Summary: Python SDK and CLI for UiPath Platform, enabling programmatic interaction with automation services, process management, and deployment tools.
5
5
  Project-URL: Homepage, https://uipath.com
6
6
  Project-URL: Repository, https://github.com/UiPath/uipath-python
@@ -48,19 +48,19 @@ uipath/_cli/_evals/_console_progress_reporter.py,sha256=HgB6pdMyoS6YVwuI3EpM2LBc
48
48
  uipath/_cli/_evals/_evaluator_factory.py,sha256=Gycv94VtGOpMir_Gba-UoiAyrSRfbSfe8_pTfjzcA9Q,3875
49
49
  uipath/_cli/_evals/_progress_reporter.py,sha256=kX7rNSa-QCLXIzK-vb9Jjf-XLEtucdeiQPgPlSkpp2U,16778
50
50
  uipath/_cli/_evals/_runtime.py,sha256=dz3mpZCLxwnLEdkwLo6W7qzBuVAklx6LMWtd4OMRk9w,15489
51
- uipath/_cli/_evals/_models/_evaluation_set.py,sha256=XgPNLWciE4FgCYzZXV2kRYHzdtbc33FWSQmZQqVSdMk,4747
51
+ uipath/_cli/_evals/_models/_evaluation_set.py,sha256=TEinpTAIzy5JLkF7-JrG_623ec2Y-GN9pfz284KKL_8,4567
52
52
  uipath/_cli/_evals/_models/_evaluator.py,sha256=fuC3UOYwPD4d_wdynHeLSCzbu82golNAnnPnxC8Y4rk,3315
53
53
  uipath/_cli/_evals/_models/_evaluator_base_params.py,sha256=lTYKOV66tcjW85KHTyOdtF1p1VDaBNemrMAvH8bFIFc,382
54
54
  uipath/_cli/_evals/_models/_exceptions.py,sha256=-oXLTDa4ab9Boa34ZxuUrCezf8ajIGrIEUVwZnmBASE,195
55
+ uipath/_cli/_evals/_models/_mocks.py,sha256=mlD9qvdZNniuKxzY_ttJtwLVFvKGvvIukYvy0FTa12k,241
55
56
  uipath/_cli/_evals/_models/_output.py,sha256=DmwFXh1YdLiMXyXmyoZr_4hgrrv3oiHbrrtIWMqGfsg,3145
56
57
  uipath/_cli/_evals/_models/_sw_reporting.py,sha256=tSBLQFAdOIun8eP0vsqt56K6bmCZz_uMaWI3hskg_24,536
57
- uipath/_cli/_evals/_models/_trajectory_span.py,sha256=8ukM8sB9rvzBMHfC_gnexAC3xlp4uMDevKZrRzcgrm4,3637
58
58
  uipath/_cli/_evals/mocks/__init__.py,sha256=2WXwAy_oZw5bKp6L0HB13QygCJeftOB_Bget0AI6Gik,32
59
- uipath/_cli/_evals/mocks/llm_mocker.py,sha256=EtfPUhKcBRQ7vQPQdF3QanKIx2vzzYWA9PfdjKeJnTw,6108
60
- uipath/_cli/_evals/mocks/mocker.py,sha256=FjSIqXF5HzQRi1eWrFfXBz-cYZEu5TiMVm2RsKnSEWI,626
59
+ uipath/_cli/_evals/mocks/llm_mocker.py,sha256=2pNCYKdXLoN_TTwiVk15RI-QC_CmHLaH40mFU1o4Ark,6828
60
+ uipath/_cli/_evals/mocks/mocker.py,sha256=p9UpJDIckvCgkO0qqJHWdMMSbySBOoa8xpEIi2QIbhA,810
61
61
  uipath/_cli/_evals/mocks/mocker_factory.py,sha256=V5QKSTtQxztTo4-fK1TyAaXw2Z3mHf2UC5mXqwuUGTs,811
62
- uipath/_cli/_evals/mocks/mockito_mocker.py,sha256=LtYT6lJM9vc3qtbSZJcUeCzDn4zarkBVj7In_EX7kYY,2087
63
- uipath/_cli/_evals/mocks/mocks.py,sha256=WqjWtHqKQXAsO1Wwom3Zcr1T09GQygwBWVp-EsxdW8o,4443
62
+ uipath/_cli/_evals/mocks/mockito_mocker.py,sha256=AO2BmFwA6hz3Lte-STVr7aJDPvMCqKNKa4j2jeNZ_U4,2677
63
+ uipath/_cli/_evals/mocks/mocks.py,sha256=wPoZV-x-okPSVLJkHyBlAZZnClb5lr32ox_qsjo3y6E,1314
64
64
  uipath/_cli/_push/sw_file_handler.py,sha256=iE8Sk1Z-9hxmLFFj3j-k4kTK6TzNFP6hUCmxTudG6JQ,18251
65
65
  uipath/_cli/_runtime/_contracts.py,sha256=E8Is7EQfAu7_hCbeZI68gmTxSxo4X7_U4vcSl7D3Syg,28988
66
66
  uipath/_cli/_runtime/_escalation.py,sha256=x3vI98qsfRA-fL_tNkRVTFXioM5Gv2w0GFcXJJ5eQtg,7981
@@ -103,7 +103,7 @@ uipath/_services/documents_service.py,sha256=UnFS8EpOZ_Ng2TZk3OiJJ3iNANvFs7QxuoG
103
103
  uipath/_services/entities_service.py,sha256=QKCLE6wRgq3HZraF-M2mljy-8il4vsNHrQhUgkewVVk,14028
104
104
  uipath/_services/folder_service.py,sha256=9JqgjKhWD-G_KUnfUTP2BADxL6OK9QNZsBsWZHAULdE,2749
105
105
  uipath/_services/jobs_service.py,sha256=tTZNsdZKN3uP7bWPQyBCpJeQxTfuOWbKYOR4L-_yJo4,32736
106
- uipath/_services/llm_gateway_service.py,sha256=IBpkG2N2_9LGHkbajBoet9F26DqILC8zL4uHnnniQvg,24124
106
+ uipath/_services/llm_gateway_service.py,sha256=oFBKSYbZqujGHDuM3A72S3J7mHn9kWjNTgcE3U0c24Y,24411
107
107
  uipath/_services/processes_service.py,sha256=O_uHgQ1rnwiV5quG0OQqabAnE6Rf6cWrMENYY2jKWt8,8585
108
108
  uipath/_services/queues_service.py,sha256=VaG3dWL2QK6AJBOLoW2NQTpkPfZjsqsYPl9-kfXPFzA,13534
109
109
  uipath/_utils/__init__.py,sha256=VdcpnENJIa0R6Y26NoxY64-wUVyvb4pKfTh1wXDQeMk,526
@@ -128,7 +128,7 @@ uipath/agent/conversation/exchange.py,sha256=nuk1tEMBHc_skrraT17d8U6AtyJ3h07ExGQ
128
128
  uipath/agent/conversation/message.py,sha256=1ZkEs146s79TrOAWCQwzBAEJvjAu4lQBpJ64tKXDgGE,2142
129
129
  uipath/agent/conversation/meta.py,sha256=3t0eS9UHoAPHre97QTUeVbjDhnMX4zj4-qG6ju0B8wY,315
130
130
  uipath/agent/conversation/tool.py,sha256=ol8XI8AVd-QNn5auXNBPcCzOkh9PPFtL7hTK3kqInkU,2191
131
- uipath/agent/models/agent.py,sha256=a4tE5LTne0ghpb9qeKkvBAotFJSC88hQP-y7OKGrYXk,12976
131
+ uipath/agent/models/agent.py,sha256=18NvqLtuQoEHYnrHiEApWjaka91PYCj_fJRFE72z4Uw,13236
132
132
  uipath/eval/_helpers/__init__.py,sha256=GSmZMryjuO3Wo_zdxZdrHCRRsgOxsVFYkYgJ15YNC3E,86
133
133
  uipath/eval/_helpers/helpers.py,sha256=iE2HHdMiAdAMLqxHkPKHpfecEtAuN5BTBqvKFTI8ciE,1315
134
134
  uipath/eval/evaluators/__init__.py,sha256=DJAAhgv0I5UfBod4sGnSiKerfrz1iMmk7GNFb71V8eI,494
@@ -137,9 +137,11 @@ uipath/eval/evaluators/deterministic_evaluator_base.py,sha256=yDWTMU1mG-93D6DscA
137
137
  uipath/eval/evaluators/exact_match_evaluator.py,sha256=Qfz-kIUf80PKjAuge1Tc1GvN6kDB6hHveBZ86w_2How,1512
138
138
  uipath/eval/evaluators/json_similarity_evaluator.py,sha256=cP4kpN-UIf690V5dq4LaCjJc2zFx-nEffUclCwDdlhM,6607
139
139
  uipath/eval/evaluators/llm_as_judge_evaluator.py,sha256=l0bbn8ZLi9ZTXcgr7tJ2tsCvHFqIIeGa7sobaAHgI2Y,4927
140
- uipath/eval/evaluators/trajectory_evaluator.py,sha256=IylFm4yeNcVYgtmBzvzFn4Y2GXdSNnvAF8F4bCvPYdw,5774
140
+ uipath/eval/evaluators/trajectory_evaluator.py,sha256=w9E8yUXp3KCXTfiUD-ut1OVyiOH3RpFFIIe7w3v3pBQ,5740
141
+ uipath/eval/mocks/__init__.py,sha256=Qis6XSN7_WOmrmD_I5Fo5E_OQpflb_SlZM_MDOszUXI,152
142
+ uipath/eval/mocks/mockable.py,sha256=LWrW9KgwE_HR8e3ql6xi1yq1naHhz-zv6Vw19NCx080,3224
141
143
  uipath/eval/models/__init__.py,sha256=x360CDZaRjUL3q3kh2CcXYYrQ47jwn6p6JnmhEIvMlA,419
142
- uipath/eval/models/models.py,sha256=is2wo-i0ld8Y_oZpbw5nG4cTXBz4bDLNxN6IjrfRcyM,2886
144
+ uipath/eval/models/models.py,sha256=YgPnkQunjEcEiueVQnYRsbQ3Nj1yQttDQZiMCq_DDkY,6321
143
145
  uipath/models/__init__.py,sha256=d_DkK1AtRUetM1t2NrH5UKgvJOBiynzaKnK5pMY7aIc,1289
144
146
  uipath/models/action_schema.py,sha256=tBn1qQ3NQLU5nwWlBIzIKIx3XK5pO_D1S51IjFlZ1FA,610
145
147
  uipath/models/actions.py,sha256=1vRsJ3JSmMdPkbiYAiHzY8K44vmW3VlMsmQUBAkSgrQ,3141
@@ -167,8 +169,8 @@ uipath/tracing/_traced.py,sha256=yBIY05PCCrYyx50EIHZnwJaKNdHPNx-YTR1sHQl0a98,199
167
169
  uipath/tracing/_utils.py,sha256=qd7N56tg6VXQ9pREh61esBgUWLNA0ssKsE0QlwrRWFM,11974
168
170
  uipath/utils/__init__.py,sha256=VD-KXFpF_oWexFg6zyiWMkxl2HM4hYJMIUDZ1UEtGx0,105
169
171
  uipath/utils/_endpoints_manager.py,sha256=iRTl5Q0XAm_YgcnMcJOXtj-8052sr6jpWuPNz6CgT0Q,8408
170
- uipath-2.1.76.dist-info/METADATA,sha256=zT48rJq2Cbsdva0uAbrjk7eNsLGxBiq2YTjp327fJg8,6593
171
- uipath-2.1.76.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
172
- uipath-2.1.76.dist-info/entry_points.txt,sha256=9C2_29U6Oq1ExFu7usihR-dnfIVNSKc-0EFbh0rskB4,43
173
- uipath-2.1.76.dist-info/licenses/LICENSE,sha256=-KBavWXepyDjimmzH5fVAsi-6jNVpIKFc2kZs0Ri4ng,1058
174
- uipath-2.1.76.dist-info/RECORD,,
172
+ uipath-2.1.78.dist-info/METADATA,sha256=nkOYdZqbtr1-dVNdXhEMnuvgqC1D-ktHX2-n1u-nf3s,6593
173
+ uipath-2.1.78.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
174
+ uipath-2.1.78.dist-info/entry_points.txt,sha256=9C2_29U6Oq1ExFu7usihR-dnfIVNSKc-0EFbh0rskB4,43
175
+ uipath-2.1.78.dist-info/licenses/LICENSE,sha256=-KBavWXepyDjimmzH5fVAsi-6jNVpIKFc2kZs0Ri4ng,1058
176
+ uipath-2.1.78.dist-info/RECORD,,
@@ -1,115 +0,0 @@
1
- """Trajectory evaluation span model for serializing span data in evaluations."""
2
-
3
- from dataclasses import dataclass
4
- from typing import Any, Dict, List, Optional
5
-
6
- from opentelemetry.sdk.trace import ReadableSpan
7
- from pydantic import BaseModel
8
-
9
-
10
- @dataclass
11
- class TrajectoryEvaluationSpan:
12
- """Simplified span representation for trajectory evaluation.
13
-
14
- Contains span information needed for evaluating agent execution paths,
15
- excluding timestamps which are not useful for trajectory analysis.
16
- """
17
-
18
- name: str
19
- status: str
20
- attributes: Dict[str, Any]
21
- parent_name: Optional[str] = None
22
- events: Optional[List[Dict[str, Any]]] = None
23
-
24
- def __post_init__(self):
25
- """Initialize default values."""
26
- if self.events is None:
27
- self.events = []
28
-
29
- @classmethod
30
- def from_readable_span(
31
- cls, span: ReadableSpan, parent_spans: Optional[Dict[int, str]] = None
32
- ) -> "TrajectoryEvaluationSpan":
33
- """Convert a ReadableSpan to a TrajectoryEvaluationSpan.
34
-
35
- Args:
36
- span: The OpenTelemetry ReadableSpan to convert
37
- parent_spans: Optional mapping of span IDs to names for parent lookup
38
-
39
- Returns:
40
- TrajectoryEvaluationSpan with relevant data extracted
41
- """
42
- # Extract status
43
- status_map = {0: "unset", 1: "ok", 2: "error"}
44
- status = status_map.get(span.status.status_code.value, "unknown")
45
-
46
- # Extract attributes - keep all attributes for now
47
- attributes = {}
48
- if span.attributes:
49
- attributes = dict(span.attributes)
50
-
51
- # Get parent name if available
52
- parent_name = None
53
- if span.parent and parent_spans and span.parent.span_id in parent_spans:
54
- parent_name = parent_spans[span.parent.span_id]
55
-
56
- # Extract events (without timestamps)
57
- events = []
58
- if hasattr(span, "events") and span.events:
59
- for event in span.events:
60
- event_data = {
61
- "name": event.name,
62
- "attributes": dict(event.attributes) if event.attributes else {},
63
- }
64
- events.append(event_data)
65
-
66
- return cls(
67
- name=span.name,
68
- status=status,
69
- attributes=attributes,
70
- parent_name=parent_name,
71
- events=events,
72
- )
73
-
74
- def to_dict(self) -> Dict[str, Any]:
75
- """Convert to dictionary for JSON serialization."""
76
- return {
77
- "name": self.name,
78
- "status": self.status,
79
- "parent_name": self.parent_name,
80
- "attributes": self.attributes,
81
- "events": self.events,
82
- }
83
-
84
-
85
- class TrajectoryEvaluationTrace(BaseModel):
86
- """Container for a collection of trajectory evaluation spans."""
87
-
88
- spans: List[TrajectoryEvaluationSpan]
89
-
90
- @classmethod
91
- def from_readable_spans(
92
- cls, spans: List[ReadableSpan]
93
- ) -> "TrajectoryEvaluationTrace":
94
- """Convert a list of ReadableSpans to TrajectoryEvaluationTrace.
95
-
96
- Args:
97
- spans: List of OpenTelemetry ReadableSpans to convert
98
-
99
- Returns:
100
- TrajectoryEvaluationTrace with converted spans
101
- """
102
- # Create a mapping of span IDs to names for parent lookup
103
- span_id_to_name = {span.get_span_context().span_id: span.name for span in spans}
104
-
105
- evaluation_spans = [
106
- TrajectoryEvaluationSpan.from_readable_span(span, span_id_to_name)
107
- for span in spans
108
- ]
109
-
110
- return cls(spans=evaluation_spans)
111
-
112
- class Config:
113
- """Pydantic configuration."""
114
-
115
- arbitrary_types_allowed = True