uipath 2.1.72__py3-none-any.whl → 2.1.73__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,5 +1,5 @@
1
- from enum import IntEnum
2
- from typing import Any, Dict, List
1
+ from enum import Enum, IntEnum
2
+ from typing import Annotated, Any, Dict, List, Literal, Optional, Union
3
3
 
4
4
  from pydantic import BaseModel, ConfigDict, Field
5
5
  from pydantic.alias_generators import to_camel
@@ -9,28 +9,127 @@ class EvaluationSimulationTool(BaseModel):
9
9
  name: str = Field(..., alias="name")
10
10
 
11
11
 
12
+ class MockingStrategyType(str, Enum):
13
+ LLM = "llm"
14
+ MOCKITO = "mockito"
15
+ UNKNOWN = "unknown"
16
+
17
+
18
+ class BaseMockingStrategy(BaseModel):
19
+ pass
20
+
21
+
22
+ class LLMMockingStrategy(BaseMockingStrategy):
23
+ type: Literal[MockingStrategyType.LLM] = MockingStrategyType.LLM
24
+ prompt: str = Field(..., alias="prompt")
25
+ tools_to_simulate: list[EvaluationSimulationTool] = Field(
26
+ ..., alias="toolsToSimulate"
27
+ )
28
+
29
+ model_config = ConfigDict(
30
+ validate_by_name=True, validate_by_alias=True, extra="allow"
31
+ )
32
+
33
+
34
+ """
35
+ {
36
+ "function": "postprocess",
37
+ "arguments": {
38
+ "args": [],
39
+ "kwargs": {"x": 3}
40
+ },
41
+ "then": [
42
+ {
43
+ "return": 3
44
+ },
45
+ {
46
+ "raise": {
47
+ "__target__": "NotImplementedError"
48
+ }
49
+ }
50
+ ]
51
+ }
52
+ """
53
+
54
+
55
+ class MockingArgument(BaseModel):
56
+ args: List[Any] = Field(default_factory=lambda: [], alias="args")
57
+ kwargs: Dict[str, Any] = Field(default_factory=lambda: {}, alias="kwargs")
58
+
59
+
60
+ class MockingAnswerType(str, Enum):
61
+ RETURN = "return"
62
+ RAISE = "raise"
63
+
64
+
65
+ class MockingAnswer(BaseModel):
66
+ type: MockingAnswerType
67
+ value: Any = Field(..., alias="value")
68
+
69
+
70
+ class MockingBehavior(BaseModel):
71
+ function: str = Field(..., alias="function")
72
+ arguments: MockingArgument = Field(..., alias="arguments")
73
+ then: List[MockingAnswer] = Field(..., alias="then")
74
+
75
+
76
+ class MockitoMockingStrategy(BaseMockingStrategy):
77
+ type: Literal[MockingStrategyType.MOCKITO] = MockingStrategyType.MOCKITO
78
+ behaviors: List[MockingBehavior] = Field(..., alias="config")
79
+
80
+ model_config = ConfigDict(
81
+ validate_by_name=True, validate_by_alias=True, extra="allow"
82
+ )
83
+
84
+
85
+ KnownMockingStrategy = Annotated[
86
+ Union[LLMMockingStrategy, MockitoMockingStrategy],
87
+ Field(discriminator="type"),
88
+ ]
89
+
90
+
91
+ class UnknownMockingStrategy(BaseMockingStrategy):
92
+ type: str = Field(..., alias="type")
93
+
94
+ model_config = ConfigDict(
95
+ validate_by_name=True, validate_by_alias=True, extra="allow"
96
+ )
97
+
98
+
99
+ MockingStrategy = Union[KnownMockingStrategy, UnknownMockingStrategy]
100
+
101
+
102
+ def migrate_mocking_strategy(data) -> MockingStrategy:
103
+ if data.get("simulate_tools") and "tools_to_simulate" in data:
104
+ return LLMMockingStrategy(
105
+ **{
106
+ "prompt": data["simulation_instructions"],
107
+ "toolsToSimulate": data["tools_to_simulate"],
108
+ }
109
+ )
110
+ else:
111
+ return UnknownMockingStrategy(type=MockingStrategyType.UNKNOWN)
112
+
113
+
12
114
  class EvaluationItem(BaseModel):
13
115
  """Individual evaluation item within an evaluation set."""
14
116
 
15
- model_config = ConfigDict(alias_generator=to_camel, populate_by_name=True)
117
+ model_config = ConfigDict(
118
+ alias_generator=to_camel, populate_by_name=True, extra="allow"
119
+ )
16
120
 
17
121
  id: str
18
122
  name: str
19
123
  inputs: Dict[str, Any]
20
124
  expected_output: Dict[str, Any]
21
125
  expected_agent_behavior: str = Field(default="", alias="expectedAgentBehavior")
22
- simulation_instructions: str = Field(default="", alias="simulationInstructions")
23
- simulate_input: bool = Field(default=False, alias="simulateInput")
24
- input_generation_instructions: str = Field(
25
- default="", alias="inputGenerationInstructions"
26
- )
27
- simulate_tools: bool = Field(default=False, alias="simulateTools")
28
- tools_to_simulate: List[EvaluationSimulationTool] = Field(
29
- default_factory=list, alias="toolsToSimulate"
30
- )
31
126
  eval_set_id: str = Field(alias="evalSetId")
32
127
  created_at: str = Field(alias="createdAt")
33
128
  updated_at: str = Field(alias="updatedAt")
129
+ mocking_strategy: Optional[MockingStrategy] = Field(
130
+ default=None,
131
+ alias="mockingStrategy",
132
+ )
34
133
 
35
134
 
36
135
  class EvaluationSet(BaseModel):
@@ -35,6 +35,7 @@ from ._models._output import (
35
35
  UiPathEvalOutput,
36
36
  UiPathEvalRunExecutionOutput,
37
37
  )
38
+ from .mocks.mocks import set_evaluation_item
38
39
 
39
40
  T = TypeVar("T", bound=UiPathBaseRuntime)
40
41
  C = TypeVar("C", bound=UiPathRuntimeContext)
@@ -137,6 +138,7 @@ class UiPathEvalRuntime(UiPathBaseRuntime, Generic[T, C]):
137
138
  evaluation_set_name=evaluation_set.name, score=0, evaluation_set_results=[]
138
139
  )
139
140
  for eval_item in evaluation_set.evaluations:
141
+ set_evaluation_item(eval_item)
140
142
  await event_bus.publish(
141
143
  EvaluationEvents.CREATE_EVAL_RUN,
142
144
  EvalRunCreatedEvent(
@@ -0,0 +1 @@
1
+ """UiPath mocking framework."""
@@ -0,0 +1,153 @@
1
+ """LLM mocker implementation."""
2
+
3
+ import json
4
+ import logging
5
+ from typing import Any, Callable
6
+
7
+ from pydantic import BaseModel
8
+
9
+ from uipath._cli._evals._models._evaluation_set import (
10
+ EvaluationItem,
11
+ LLMMockingStrategy,
12
+ )
13
+ from uipath._cli._evals.mocks.mocker import Mocker, R, T, UiPathMockingNoMatcherError
14
+
15
+ PROMPT = """You are simulating a tool call for automated testing purposes of an Agent.
16
+ Your task is to provide a realistic response for the tool based on its schema, examples, and context.
17
+
18
+ SELECTED TOOL INFORMATION:
19
+ {toolInfo}
20
+
21
+ SELECTED TOOL SCHEMA:
22
+ {toolSchema}
23
+
24
+ SELECTED TOOL EXAMPLES:
25
+ {toolRunExamples}
26
+
27
+ CHOSEN TOOL INPUT:
28
+ {currentToolInput}
29
+
30
+ CURRENT AGENT RUN SO FAR:
31
+ {testRunHistory}
32
+
33
+ HERE IS SOME INFORMATION ABOUT THE AGENT: DO NOT USE THIS INFORMATION TO MAKE THE RESPONSE, BUT RATHER TO UNDERSTAND THE CONTEXT IN WHICH THE TOOL IS BEING USED.
34
+ {agentInfo}
35
+
36
+ TEST RUN PROCTOR INSTRUCTIONS:
37
+ You will need to simulate a real user's interaction with the tool. This may require following some run specific instructions. If run instructions are provided, follow them exactly.
38
+ Here are the instructions for this run:
39
+ {testRunProctorInstructions}
40
+
41
+ Based on the above information, provide a realistic response for this tool call. Your response should:
42
+ 1. Match the expected output format according to the tool schema
43
+ 2. Be very consistent with how the tool has responded in previous examples. Do no omit fields or properties.
44
+ 3. Always include the entire output regardless of token length.
45
+ 3. Consider the context of the current test run and the agent being tested. If the agent is acting on a property, make sure the output includes that property.
46
+
47
+ Respond ONLY with valid JSON that would be a realistic and completetool response. Do not include any explanations or markdown.
48
+ """
49
+
50
+ logger = logging.getLogger(__name__)
51
+
52
+
53
+ def pydantic_to_dict_safe(obj: Any) -> Any:
54
+ """Serialize nested pydantic models to a dict."""
55
+ if isinstance(obj, BaseModel):
56
+ # Convert Pydantic model to dict recursively
57
+ return obj.model_dump(mode="json")
58
+ elif isinstance(obj, dict):
59
+ # Recursively convert dict entries
60
+ return {k: pydantic_to_dict_safe(v) for k, v in obj.items()}
61
+ elif isinstance(obj, list):
62
+ # Recursively convert items in lists
63
+ return [pydantic_to_dict_safe(item) for item in obj]
64
+ return obj # Return other types as is
65
+
66
+
67
+ class LLMMocker(Mocker):
68
+ """LLM Based Mocker."""
69
+
70
+ def __init__(self, evaluation_item: EvaluationItem):
71
+ """LLM Mocker constructor."""
72
+ self.evaluation_item = evaluation_item
73
+ assert isinstance(self.evaluation_item.mocking_strategy, LLMMockingStrategy)
74
+
75
+ async def response(
76
+ self, func: Callable[[T], R], params: dict[str, Any], *args: T, **kwargs
77
+ ) -> R:
78
+ """Respond with mocked response generated by an LLM."""
79
+ assert isinstance(self.evaluation_item.mocking_strategy, LLMMockingStrategy)
80
+
81
+ function_name = params.get("name") or func.__name__
82
+ if function_name in [
83
+ x.name for x in self.evaluation_item.mocking_strategy.tools_to_simulate
84
+ ]:
85
+ from uipath import UiPath
86
+ from uipath._services.llm_gateway_service import _cleanup_schema
87
+
88
+ llm = UiPath().llm
89
+ return_type: Any = func.__annotations__.get("return", None)
90
+ if return_type is None:
91
+ return_type = Any
92
+
93
+ class OutputSchema(BaseModel):
94
+ response: return_type
95
+
96
+ response_format = {
97
+ "type": "json_schema",
98
+ "json_schema": {
99
+ "name": OutputSchema.__name__.lower(),
100
+ "strict": True,
101
+ "schema": _cleanup_schema(OutputSchema),
102
+ },
103
+ }
104
+ try:
105
+ prompt_input: dict[str, Any] = {
106
+ "toolRunExamples": [], # Taken from history. Contains id, input json, output json
107
+ "testRunHistory": [], # This should contain ordered spans.
108
+ "toolInfo": {
109
+ "name": function_name,
110
+ "description": params.get("description"),
111
+ "arguments": params.get(
112
+ "arguments"
113
+ ), # arguments could be passed into tool
114
+ "settings": params.get(
115
+ "settings"
116
+ ), # settings could be passed into tool
117
+ "inputSchema": params.get("input_schema"),
118
+ },
119
+ "toolSchema": params.get("input_schema"),
120
+ "currentToolInput": {
121
+ "args": args,
122
+ "kwargs": kwargs,
123
+ },
124
+ "agentInfo": { # This is incomplete
125
+ # "agentName": self.evaluation_item.name, # to be obtained.
126
+ "actionName": self.evaluation_item.name, # Not sure if this is right?
127
+ "userInput": self.evaluation_item.inputs,
128
+ },
129
+ "testRunProctorInstructions": self.evaluation_item.mocking_strategy.prompt,
130
+ }
131
+ prompt_input = {
132
+ k: json.dumps(pydantic_to_dict_safe(v))
133
+ for k, v in prompt_input.items()
134
+ }
135
+ response = await llm.chat_completions(
136
+ [
137
+ {
138
+ "role": "user",
139
+ "content": PROMPT.format(**prompt_input),
140
+ },
141
+ ],
142
+ response_format=response_format,
143
+ )
144
+ mocked_response = OutputSchema(
145
+ **json.loads(response.choices[0].message.content)
146
+ )
147
+ return mocked_response.response
148
+ except Exception:
149
+ raise
150
+ else:
151
+ raise UiPathMockingNoMatcherError(
152
+ f"Method '{function_name}' is not simulated."
153
+ )
@@ -0,0 +1,29 @@
1
+ """Mocker definitions and implementations."""
2
+
3
+ from abc import ABC, abstractmethod
4
+ from collections.abc import Callable
5
+ from typing import Any, TypeVar
6
+
7
+ T = TypeVar("T")
8
+ R = TypeVar("R")
9
+
10
+
11
+ class Mocker(ABC):
12
+ """Mocker interface."""
13
+
14
+ @abstractmethod
15
+ async def response(
16
+ self,
17
+ func: Callable[[T], R],
18
+ params: dict[str, Any],
19
+ *args: T,
20
+ **kwargs,
21
+ ) -> R:
22
+ """Respond with mocked response."""
23
+ raise NotImplementedError()
24
+
25
+
26
+ class UiPathMockingNoMatcherError(Exception):
27
+ """Exception when a mocker is unable to find a match with the invocation."""
28
+
29
+ pass
@@ -0,0 +1,25 @@
1
+ """Mocker Factory."""
2
+
3
+ from uipath._cli._evals._models._evaluation_set import (
4
+ EvaluationItem,
5
+ LLMMockingStrategy,
6
+ MockitoMockingStrategy,
7
+ )
8
+ from uipath._cli._evals.mocks.llm_mocker import LLMMocker
9
+ from uipath._cli._evals.mocks.mocker import Mocker
10
+ from uipath._cli._evals.mocks.mockito_mocker import MockitoMocker
11
+
12
+
13
+ class MockerFactory:
14
+ """Mocker factory."""
15
+
16
+ @staticmethod
17
+ def create(evaluation_item: EvaluationItem) -> Mocker:
18
+ """Create a mocker instance."""
19
+ match evaluation_item.mocking_strategy:
20
+ case LLMMockingStrategy():
21
+ return LLMMocker(evaluation_item)
22
+ case MockitoMockingStrategy():
23
+ return MockitoMocker(evaluation_item)
24
+ case _:
25
+ raise ValueError("Unknown mocking strategy")
@@ -0,0 +1,62 @@
1
+ """Mockito mocker implementation.
2
+
3
+ https://mockito-python.readthedocs.io/en/latest/
4
+ """
5
+
6
+ from typing import Any, Callable
7
+
8
+ from hydra.utils import instantiate
9
+ from mockito import invocation, mocking # type: ignore[import-untyped]
10
+
11
+ from uipath._cli._evals._models._evaluation_set import (
12
+ EvaluationItem,
13
+ MockingAnswerType,
14
+ MockitoMockingStrategy,
15
+ )
16
+ from uipath._cli._evals.mocks.mocker import Mocker, R, T
17
+
18
+
19
+ class Stub:
20
+ """Stub interface."""
21
+
22
+ def __getattr__(self, item):
23
+ """Return a wrapper function that raises an exception."""
24
+
25
+ def func(*_args, **_kwargs):
26
+ """Not Implemented."""
27
+ raise NotImplementedError()
28
+
29
+ return func
30
+
31
+
32
+ class MockitoMocker(Mocker):
33
+ """Mockito Mocker."""
34
+
35
+ def __init__(self, evaluation_item: EvaluationItem):
36
+ """Instantiate a mockito mocker."""
37
+ self.evaluation_item = evaluation_item
38
+ assert isinstance(self.evaluation_item.mocking_strategy, MockitoMockingStrategy)
39
+
40
+ self.stub = Stub()
41
+ mock_obj = mocking.Mock(self.stub)
42
+
43
+ for behavior in self.evaluation_item.mocking_strategy.behaviors:
44
+ stubbed = invocation.StubbedInvocation(mock_obj, behavior.function)(
45
+ *instantiate(behavior.arguments.args, _convert_="object"),
46
+ **instantiate(behavior.arguments.kwargs, _convert_="object"),
47
+ )
48
+ for answer in behavior.then:
49
+ if answer.type == MockingAnswerType.RETURN:
50
+ stubbed = stubbed.thenReturn(
51
+ instantiate(answer.model_dump(), _convert_="object")["value"]
52
+ )
53
+ elif answer.type == MockingAnswerType.RAISE:
54
+ stubbed = stubbed.thenRaise(
55
+ instantiate(answer.model_dump(), _convert_="object")["value"]
56
+ )
57
+
58
+ async def response(
59
+ self, func: Callable[[T], R], params: dict[str, Any], *args: T, **kwargs
60
+ ) -> R:
61
+ """Respond with mocked response."""
62
+ return getattr(self.stub, params["name"])(*args, **kwargs)
@@ -0,0 +1,136 @@
1
+ """Mocking interface."""
2
+
3
+ import asyncio
4
+ import functools
5
+ import inspect
6
+ import logging
7
+ import threading
8
+ from contextvars import ContextVar
9
+ from typing import Any, Callable, Optional
10
+
11
+ from pydantic import TypeAdapter
12
+ from pydantic_function_models import ValidatedFunction # type: ignore[import-untyped]
13
+
14
+ from uipath._cli._evals._models._evaluation_set import EvaluationItem
15
+ from uipath._cli._evals.mocks.mocker import Mocker
16
+ from uipath._cli._evals.mocks.mocker_factory import MockerFactory
17
+
18
+ evaluation_context: ContextVar[Optional[EvaluationItem]] = ContextVar(
19
+ "evaluation", default=None
20
+ )
21
+
22
+ mocker_context: ContextVar[Optional[Mocker]] = ContextVar("mocker", default=None)
23
+
24
+
25
+ def set_evaluation_item(item: EvaluationItem) -> None:
26
+ """Set an evaluation item within an evaluation set."""
27
+ evaluation_context.set(item)
28
+ try:
29
+ mocker_context.set(MockerFactory.create(item))
30
+ except Exception:
31
+ logger.warning(f"Failed to create mocker for evaluation {item.name}")
32
+ mocker_context.set(None)
33
+
34
+
35
+ async def get_mocked_response(
36
+ func: Callable[[Any], Any], params: dict[str, Any], *args, **kwargs
37
+ ) -> Any:
38
+ """Get a mocked response."""
39
+ mocker = mocker_context.get()
40
+ evaluation_item = evaluation_context.get()
41
+ if mocker is None or evaluation_item is None:
42
+ # TODO raise a new UiPath exception type
43
+ raise RuntimeError(f"Evaluation item {func.__name__} has not been evaluated")
44
+ else:
45
+ return await mocker.response(func, params, *args, **kwargs)
46
+
47
+
48
+ _event_loop = None
49
+ logger = logging.getLogger(__name__)
50
+
51
+
52
+ def run_coroutine(coro):
53
+ """Run a coroutine synchronously."""
54
+ global _event_loop
55
+ if not _event_loop or not _event_loop.is_running():
56
+ _event_loop = asyncio.new_event_loop()
57
+ threading.Thread(target=_event_loop.run_forever, daemon=True).start()
58
+ future = asyncio.run_coroutine_threadsafe(coro, _event_loop)
59
+ return future.result()
60
+
61
+
62
+ def mocked_response_decorator(func, params: dict[str, Any]):
63
+ """Mocked response decorator."""
64
+
65
+ async def mock_response_generator(*args, **kwargs):
66
+ mocked_response = await get_mocked_response(func, params, *args, **kwargs)
67
+ return_type: Any = func.__annotations__.get("return", None)
68
+
69
+ if return_type is not None:
70
+ mocked_response = TypeAdapter(return_type).validate_python(mocked_response)
71
+ return mocked_response
72
+
73
+ is_async = inspect.iscoroutinefunction(func)
74
+ if is_async:
75
+
76
+ @functools.wraps(func)
77
+ async def decorated_func(*args, **kwargs):
78
+ try:
79
+ return await mock_response_generator(*args, **kwargs)
80
+ except Exception:
81
+ logger.warning(
82
+ f"Failed to mock response for {func.__name__}. Falling back to func."
83
+ )
84
+ return await func(*args, **kwargs)
85
+ else:
86
+
87
+ @functools.wraps(func)
88
+ def decorated_func(*args, **kwargs):
89
+ try:
90
+ return run_coroutine(mock_response_generator(*args, **kwargs))
91
+ except Exception:
92
+ logger.warning(
93
+ f"Failed to mock response for {func.__name__}. Falling back to func."
94
+ )
95
+ return func(*args, **kwargs)
96
+
97
+ return decorated_func
98
+
99
+
100
+ def mockable(
101
+ name: Optional[str] = None,
102
+ description: Optional[str] = None,
103
+ **kwargs,
104
+ ):
105
+ """Decorate a function to be a mockable."""
106
+
107
+ def decorator(func):
108
+ params = {
109
+ "name": name or func.__name__,
110
+ "description": description or func.__doc__,
111
+ "input_schema": get_input_schema(func),
112
+ "output_schema": get_output_schema(func),
113
+ **kwargs,
114
+ }
115
+ return mocked_response_decorator(func, params)
116
+
117
+ return decorator
118
+
119
+
120
+ def get_output_schema(func):
121
+ """Retrieves the JSON schema for a function's return type hint."""
122
+ try:
123
+ adapter = TypeAdapter(inspect.signature(func).return_annotation)
124
+ return adapter.json_schema()
125
+ except Exception:
126
+ logger.warning(f"Unable to extract output schema for function {func.__name__}")
127
+ return {}
128
+
129
+
130
+ def get_input_schema(func):
131
+ """Retrieves the JSON schema for a function's input type."""
132
+ try:
133
+ return ValidatedFunction(func).model.model_json_schema()
134
+ except Exception:
135
+ logger.warning(f"Unable to extract input schema for function {func.__name__}")
136
+ return {}
@@ -106,35 +106,32 @@ def _cleanup_schema(model_class: type[BaseModel]) -> Dict[str, Any]:
106
106
  """
107
107
  schema = model_class.model_json_schema()
108
108
 
109
- def clean_properties(properties):
110
- """Clean property definitions by removing titles and cleaning nested items."""
111
- cleaned_props = {}
112
- for prop_name, prop_def in properties.items():
113
- if isinstance(prop_def, dict):
114
- cleaned_prop = {}
115
- for key, value in prop_def.items():
116
- if key == "title": # Skip title
117
- continue
118
- elif key == "items" and isinstance(value, dict):
119
- # Clean nested items
120
- cleaned_items = {}
121
- for item_key, item_value in value.items():
122
- if item_key != "title":
123
- cleaned_items[item_key] = item_value
124
- cleaned_prop[key] = cleaned_items
125
- else:
126
- cleaned_prop[key] = value
127
- cleaned_props[prop_name] = cleaned_prop
128
- return cleaned_props
109
+ def clean_type(type_def):
110
+ """Clean property definitions by removing titles and cleaning nested items. Additionally, `additionalProperties` is ensured on all objects."""
111
+ cleaned_type = {}
112
+ for key, value in type_def.items():
113
+ if key == "title" or key == "properties":
114
+ continue
115
+ else:
116
+ cleaned_type[key] = value
117
+ if type_def.get("type") == "object" and "additionalProperties" not in type_def:
118
+ cleaned_type["additionalProperties"] = False
119
+
120
+ if "properties" in type_def:
121
+ properties = type_def.get("properties", {})
122
+ for key, value in properties.items():
123
+ properties[key] = clean_type(value)
124
+ cleaned_type["properties"] = properties
125
+
126
+ if "$defs" in type_def:
127
+ cleaned_defs = {}
128
+ for key, value in type_def["$defs"].items():
129
+ cleaned_defs[key] = clean_type(value)
130
+ cleaned_type["$defs"] = cleaned_defs
131
+ return cleaned_type
129
132
 
130
133
  # Create clean schema
131
- clean_schema = {
132
- "type": "object",
133
- "properties": clean_properties(schema.get("properties", {})),
134
- "required": schema.get("required", []),
135
- "additionalProperties": False,
136
- }
137
-
134
+ clean_schema = clean_type(schema)
138
135
  return clean_schema
139
136
 
140
137
 
uipath/agent/_utils.py CHANGED
@@ -4,13 +4,17 @@ from pathlib import PurePath
4
4
  from httpx import Response
5
5
  from pydantic import TypeAdapter
6
6
 
7
+ from uipath._cli._evals._models._evaluation_set import LLMMockingStrategy
7
8
  from uipath._cli._utils._studio_project import (
8
9
  ProjectFile,
9
10
  ProjectFolder,
10
11
  StudioClient,
11
12
  resolve_path,
12
13
  )
13
- from uipath.agent.models.agent import AgentDefinition
14
+ from uipath.agent.models.agent import (
15
+ AgentDefinition,
16
+ UnknownAgentDefinition,
17
+ )
14
18
 
15
19
  logger = logging.getLogger(__name__)
16
20
 
@@ -23,7 +27,7 @@ async def get_file(
23
27
  return await studio_client.download_file_async(resolved.id)
24
28
 
25
29
 
26
- async def load_agent_definition(project_id: str):
30
+ async def load_agent_definition(project_id: str) -> AgentDefinition:
27
31
  studio_client = StudioClient(project_id=project_id)
28
32
  project_structure = await studio_client.get_project_structure_async()
29
33
 
@@ -100,4 +104,21 @@ async def load_agent_definition(project_id: str):
100
104
  "evaluationSets": evaluation_sets,
101
105
  **agent,
102
106
  }
103
- return TypeAdapter(AgentDefinition).validate_python(agent_definition)
107
+ agent_definition = TypeAdapter(AgentDefinition).validate_python(agent_definition)
108
+ if agent_definition and isinstance(agent_definition, UnknownAgentDefinition):
109
+ if agent_definition.evaluation_sets:
110
+ for evaluation_set in agent_definition.evaluation_sets:
111
+ for evaluation in evaluation_set.evaluations:
112
+ if not evaluation.mocking_strategy:
113
+ # Migrate lowCode evaluation definitions
114
+ if evaluation.model_extra.get("simulateTools", False):
115
+ tools_to_simulate = evaluation.model_extra.get(
116
+ "toolsToSimulate", []
117
+ )
118
+ prompt = evaluation.model_extra.get(
119
+ "simulationInstructions", ""
120
+ )
121
+ evaluation.mocking_strategy = LLMMockingStrategy(
122
+ prompt=prompt, tools_to_simulate=tools_to_simulate
123
+ )
124
+ return agent_definition
@@ -55,6 +55,7 @@ class AgentToolType(str, Enum):
55
55
 
56
56
  AGENT = "agent"
57
57
  INTEGRATION = "integration"
58
+ PROCESS = "process"
58
59
 
59
60
 
60
61
  class AgentToolSettings(BaseModel):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: uipath
3
- Version: 2.1.72
3
+ Version: 2.1.73
4
4
  Summary: Python SDK and CLI for UiPath Platform, enabling programmatic interaction with automation services, process management, and deployment tools.
5
5
  Project-URL: Homepage, https://uipath.com
6
6
  Project-URL: Repository, https://github.com/UiPath/uipath-python
@@ -17,9 +17,12 @@ Requires-Python: >=3.10
17
17
  Requires-Dist: azure-monitor-opentelemetry>=1.6.8
18
18
  Requires-Dist: click>=8.1.8
19
19
  Requires-Dist: httpx>=0.28.1
20
+ Requires-Dist: hydra-core>=1.3.2
21
+ Requires-Dist: mockito>=1.5.4
20
22
  Requires-Dist: opentelemetry-instrumentation>=0.52b1
21
23
  Requires-Dist: opentelemetry-sdk>=1.31.1
22
24
  Requires-Dist: pathlib>=1.0.1
25
+ Requires-Dist: pydantic-function-models>=0.1.10
23
26
  Requires-Dist: pydantic>=2.11.1
24
27
  Requires-Dist: pyperclip>=1.9.0
25
28
  Requires-Dist: python-dotenv>=1.0.1
@@ -46,13 +46,19 @@ uipath/_cli/_dev/_terminal/_utils/_exporter.py,sha256=oI6D_eMwrh_2aqDYUh4GrJg8VL
46
46
  uipath/_cli/_dev/_terminal/_utils/_logger.py,sha256=_ipTl_oAiMF9I7keGt2AAFAMz40DNLVMVkoiq-07UAU,2943
47
47
  uipath/_cli/_evals/_evaluator_factory.py,sha256=Gycv94VtGOpMir_Gba-UoiAyrSRfbSfe8_pTfjzcA9Q,3875
48
48
  uipath/_cli/_evals/_progress_reporter.py,sha256=hpSt0CXpIoFJGsbqZkqmwyGO_TBNesbWKlvDJUEDxd8,16455
49
- uipath/_cli/_evals/_runtime.py,sha256=WKcBT6DGzNRjgEOpmH0b7RoEbEsHMyAbcAMs8b_CAI0,11418
50
- uipath/_cli/_evals/_models/_evaluation_set.py,sha256=RRDaP0X4E8kueL0Io9yB4y8akx3gKZhoSIgTNhgoN9Y,2407
49
+ uipath/_cli/_evals/_runtime.py,sha256=fo-wtUxZ9bvo1ftt_yu7o2oUBvrMqVNKCjaGH556-Vo,11506
50
+ uipath/_cli/_evals/_models/_evaluation_set.py,sha256=XgPNLWciE4FgCYzZXV2kRYHzdtbc33FWSQmZQqVSdMk,4747
51
51
  uipath/_cli/_evals/_models/_evaluator.py,sha256=fuC3UOYwPD4d_wdynHeLSCzbu82golNAnnPnxC8Y4rk,3315
52
52
  uipath/_cli/_evals/_models/_evaluator_base_params.py,sha256=lTYKOV66tcjW85KHTyOdtF1p1VDaBNemrMAvH8bFIFc,382
53
53
  uipath/_cli/_evals/_models/_output.py,sha256=LjwMBGI78sDFa2Dl8b9ReXJmjig57pdLWpuiwChrRLo,3096
54
54
  uipath/_cli/_evals/_models/_sw_reporting.py,sha256=tSBLQFAdOIun8eP0vsqt56K6bmCZz_uMaWI3hskg_24,536
55
55
  uipath/_cli/_evals/_models/_trajectory_span.py,sha256=8ukM8sB9rvzBMHfC_gnexAC3xlp4uMDevKZrRzcgrm4,3637
56
+ uipath/_cli/_evals/mocks/__init__.py,sha256=2WXwAy_oZw5bKp6L0HB13QygCJeftOB_Bget0AI6Gik,32
57
+ uipath/_cli/_evals/mocks/llm_mocker.py,sha256=EtfPUhKcBRQ7vQPQdF3QanKIx2vzzYWA9PfdjKeJnTw,6108
58
+ uipath/_cli/_evals/mocks/mocker.py,sha256=FjSIqXF5HzQRi1eWrFfXBz-cYZEu5TiMVm2RsKnSEWI,626
59
+ uipath/_cli/_evals/mocks/mocker_factory.py,sha256=V5QKSTtQxztTo4-fK1TyAaXw2Z3mHf2UC5mXqwuUGTs,811
60
+ uipath/_cli/_evals/mocks/mockito_mocker.py,sha256=LtYT6lJM9vc3qtbSZJcUeCzDn4zarkBVj7In_EX7kYY,2087
61
+ uipath/_cli/_evals/mocks/mocks.py,sha256=WqjWtHqKQXAsO1Wwom3Zcr1T09GQygwBWVp-EsxdW8o,4443
56
62
  uipath/_cli/_push/sw_file_handler.py,sha256=iE8Sk1Z-9hxmLFFj3j-k4kTK6TzNFP6hUCmxTudG6JQ,18251
57
63
  uipath/_cli/_runtime/_contracts.py,sha256=D57cq5V5CZ9p13n_vRDHRcwyJYQUcJLlAMbAOzIiBNI,28932
58
64
  uipath/_cli/_runtime/_escalation.py,sha256=x3vI98qsfRA-fL_tNkRVTFXioM5Gv2w0GFcXJJ5eQtg,7981
@@ -95,7 +101,7 @@ uipath/_services/documents_service.py,sha256=UnFS8EpOZ_Ng2TZk3OiJJ3iNANvFs7QxuoG
95
101
  uipath/_services/entities_service.py,sha256=QKCLE6wRgq3HZraF-M2mljy-8il4vsNHrQhUgkewVVk,14028
96
102
  uipath/_services/folder_service.py,sha256=9JqgjKhWD-G_KUnfUTP2BADxL6OK9QNZsBsWZHAULdE,2749
97
103
  uipath/_services/jobs_service.py,sha256=tTZNsdZKN3uP7bWPQyBCpJeQxTfuOWbKYOR4L-_yJo4,32736
98
- uipath/_services/llm_gateway_service.py,sha256=Ka1WCoOBKJfIlm7H9NmbKAPr27UYDYQCTExMbLKMY68,24249
104
+ uipath/_services/llm_gateway_service.py,sha256=IBpkG2N2_9LGHkbajBoet9F26DqILC8zL4uHnnniQvg,24124
99
105
  uipath/_services/processes_service.py,sha256=O_uHgQ1rnwiV5quG0OQqabAnE6Rf6cWrMENYY2jKWt8,8585
100
106
  uipath/_services/queues_service.py,sha256=VaG3dWL2QK6AJBOLoW2NQTpkPfZjsqsYPl9-kfXPFzA,13534
101
107
  uipath/_utils/__init__.py,sha256=VdcpnENJIa0R6Y26NoxY64-wUVyvb4pKfTh1wXDQeMk,526
@@ -109,7 +115,7 @@ uipath/_utils/_ssl_context.py,sha256=xSYitos0eJc9cPHzNtHISX9PBvL6D2vas5G_GiBdLp8
109
115
  uipath/_utils/_url.py,sha256=-4eluSrIZCUlnQ3qU17WPJkgaC2KwF9W5NeqGnTNGGo,2512
110
116
  uipath/_utils/_user_agent.py,sha256=pVJkFYacGwaQBomfwWVAvBQgdBUo62e4n3-fLIajWUU,563
111
117
  uipath/_utils/constants.py,sha256=2xLT-1aW0aJS2USeZbK-7zRgyyi1bgV60L0rtQOUqOM,1721
112
- uipath/agent/_utils.py,sha256=frpg3LYJofdGWS5w44PsBaNHiW9GRkLokvNnf2D4a54,3309
118
+ uipath/agent/_utils.py,sha256=mf4CtOZch1SFR83Z4QIkjXqMKK8Pm_xV8Q0-RedDkcE,4443
113
119
  uipath/agent/conversation/__init__.py,sha256=5hK-Iz131mnd9m6ANnpZZffxXZLVFDQ9GTg5z9ik1oQ,5265
114
120
  uipath/agent/conversation/async_stream.py,sha256=BA_8uU1DgE3VpU2KkJj0rkI3bAHLk_ZJKsajR0ipMpo,2055
115
121
  uipath/agent/conversation/citation.py,sha256=42dGv-wiYx3Lt7MPuPCFTkjAlSADFSzjyNXuZHdxqvo,2253
@@ -120,7 +126,7 @@ uipath/agent/conversation/exchange.py,sha256=nuk1tEMBHc_skrraT17d8U6AtyJ3h07ExGQ
120
126
  uipath/agent/conversation/message.py,sha256=1ZkEs146s79TrOAWCQwzBAEJvjAu4lQBpJ64tKXDgGE,2142
121
127
  uipath/agent/conversation/meta.py,sha256=3t0eS9UHoAPHre97QTUeVbjDhnMX4zj4-qG6ju0B8wY,315
122
128
  uipath/agent/conversation/tool.py,sha256=ol8XI8AVd-QNn5auXNBPcCzOkh9PPFtL7hTK3kqInkU,2191
123
- uipath/agent/models/agent.py,sha256=ie_N47K-txv9Q9kXeynI1FsO2l120JYmjPXNm-e6yMo,12952
129
+ uipath/agent/models/agent.py,sha256=a4tE5LTne0ghpb9qeKkvBAotFJSC88hQP-y7OKGrYXk,12976
124
130
  uipath/eval/_helpers/__init__.py,sha256=GSmZMryjuO3Wo_zdxZdrHCRRsgOxsVFYkYgJ15YNC3E,86
125
131
  uipath/eval/_helpers/helpers.py,sha256=iE2HHdMiAdAMLqxHkPKHpfecEtAuN5BTBqvKFTI8ciE,1315
126
132
  uipath/eval/evaluators/__init__.py,sha256=DJAAhgv0I5UfBod4sGnSiKerfrz1iMmk7GNFb71V8eI,494
@@ -159,8 +165,8 @@ uipath/tracing/_traced.py,sha256=yBIY05PCCrYyx50EIHZnwJaKNdHPNx-YTR1sHQl0a98,199
159
165
  uipath/tracing/_utils.py,sha256=qd7N56tg6VXQ9pREh61esBgUWLNA0ssKsE0QlwrRWFM,11974
160
166
  uipath/utils/__init__.py,sha256=VD-KXFpF_oWexFg6zyiWMkxl2HM4hYJMIUDZ1UEtGx0,105
161
167
  uipath/utils/_endpoints_manager.py,sha256=iRTl5Q0XAm_YgcnMcJOXtj-8052sr6jpWuPNz6CgT0Q,8408
162
- uipath-2.1.72.dist-info/METADATA,sha256=sIUVm86o9y3uorIq58WEe4g-58aQ_tD0mp_8TPcrhHc,6482
163
- uipath-2.1.72.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
164
- uipath-2.1.72.dist-info/entry_points.txt,sha256=9C2_29U6Oq1ExFu7usihR-dnfIVNSKc-0EFbh0rskB4,43
165
- uipath-2.1.72.dist-info/licenses/LICENSE,sha256=-KBavWXepyDjimmzH5fVAsi-6jNVpIKFc2kZs0Ri4ng,1058
166
- uipath-2.1.72.dist-info/RECORD,,
168
+ uipath-2.1.73.dist-info/METADATA,sha256=JMXtiT-80R6YqDxcl-MdGOYRi8HDuqGSh1EpcEJ7o_Q,6593
169
+ uipath-2.1.73.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
170
+ uipath-2.1.73.dist-info/entry_points.txt,sha256=9C2_29U6Oq1ExFu7usihR-dnfIVNSKc-0EFbh0rskB4,43
171
+ uipath-2.1.73.dist-info/licenses/LICENSE,sha256=-KBavWXepyDjimmzH5fVAsi-6jNVpIKFc2kZs0Ri4ng,1058
172
+ uipath-2.1.73.dist-info/RECORD,,