uipath 2.1.70__py3-none-any.whl → 2.1.72__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- uipath/_cli/_dev/_terminal/_components/_history.py +25 -5
- uipath/_cli/_evals/_evaluator_factory.py +35 -73
- uipath/_cli/_evals/_models/_evaluation_set.py +25 -15
- uipath/_cli/_evals/_models/_evaluator.py +106 -0
- uipath/_cli/_runtime/_contracts.py +1 -0
- uipath/_cli/_runtime/_logging.py +112 -31
- uipath/_cli/cli_pull.py +1 -1
- uipath/_services/context_grounding_service.py +304 -22
- uipath/_utils/constants.py +12 -0
- uipath/agent/_utils.py +48 -0
- uipath/agent/models/agent.py +10 -0
- {uipath-2.1.70.dist-info → uipath-2.1.72.dist-info}/METADATA +1 -1
- {uipath-2.1.70.dist-info → uipath-2.1.72.dist-info}/RECORD +16 -15
- {uipath-2.1.70.dist-info → uipath-2.1.72.dist-info}/WHEEL +0 -0
- {uipath-2.1.70.dist-info → uipath-2.1.72.dist-info}/entry_points.txt +0 -0
- {uipath-2.1.70.dist-info → uipath-2.1.72.dist-info}/licenses/LICENSE +0 -0
@@ -73,13 +73,33 @@ class RunHistoryPanel(Container):
|
|
73
73
|
self.refresh_list()
|
74
74
|
|
75
75
|
def _refresh_running_items(self) -> None:
|
76
|
+
"""Refresh display names for running items only."""
|
76
77
|
if not any(run.status == "running" for run in self.runs):
|
77
|
-
return None
|
78
|
+
return None
|
78
79
|
|
79
|
-
|
80
|
+
try:
|
81
|
+
run_list = self.query_one("#run-list", ListView)
|
82
|
+
except Exception:
|
83
|
+
return None
|
84
|
+
|
85
|
+
# Take a snapshot of items to avoid mid-iteration changes
|
86
|
+
items_snapshot = list(run_list.children)
|
87
|
+
|
88
|
+
for item in items_snapshot:
|
89
|
+
if not hasattr(item, "run_id"):
|
90
|
+
continue
|
91
|
+
|
92
|
+
run = self.get_run_by_id(item.run_id)
|
93
|
+
if not run or run.status != "running":
|
94
|
+
continue
|
95
|
+
|
96
|
+
# Check if item still exists in the list (wasn't removed)
|
97
|
+
if item not in run_list.children:
|
98
|
+
continue
|
80
99
|
|
81
|
-
|
82
|
-
run = self.get_run_by_id(item.run_id) # type: ignore[attr-defined]
|
83
|
-
if run and run.status == "running":
|
100
|
+
try:
|
84
101
|
static = item.query_one(Static)
|
85
102
|
static.update(run.display_name)
|
103
|
+
except Exception:
|
104
|
+
# Item structure changed or was removed
|
105
|
+
continue
|
@@ -1,5 +1,14 @@
|
|
1
1
|
from typing import Any, Dict
|
2
2
|
|
3
|
+
from pydantic import TypeAdapter
|
4
|
+
|
5
|
+
from uipath._cli._evals._models._evaluator import (
|
6
|
+
EqualsEvaluatorParams,
|
7
|
+
Evaluator,
|
8
|
+
JsonSimilarityEvaluatorParams,
|
9
|
+
LLMEvaluatorParams,
|
10
|
+
TrajectoryEvaluatorParams,
|
11
|
+
)
|
3
12
|
from uipath._cli._evals._models._evaluator_base_params import EvaluatorBaseParams
|
4
13
|
from uipath.eval.evaluators import (
|
5
14
|
BaseEvaluator,
|
@@ -8,7 +17,6 @@ from uipath.eval.evaluators import (
|
|
8
17
|
LlmAsAJudgeEvaluator,
|
9
18
|
TrajectoryEvaluator,
|
10
19
|
)
|
11
|
-
from uipath.eval.models.models import EvaluatorCategory, EvaluatorType
|
12
20
|
|
13
21
|
|
14
22
|
class EvaluatorFactory:
|
@@ -35,110 +43,64 @@ class EvaluatorFactory:
|
|
35
43
|
if not id:
|
36
44
|
raise ValueError("Evaluator configuration must include 'id' field")
|
37
45
|
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
evaluator_type=evaluator_type,
|
50
|
-
name=name,
|
51
|
-
description=description,
|
52
|
-
created_at=created_at,
|
53
|
-
updated_at=updated_at,
|
54
|
-
target_output_key=target_output_key,
|
55
|
-
)
|
56
|
-
|
57
|
-
match category:
|
58
|
-
case EvaluatorCategory.Deterministic:
|
59
|
-
if evaluator_type == evaluator_type.Equals:
|
60
|
-
return EvaluatorFactory._create_exact_match_evaluator(
|
61
|
-
base_params, data
|
62
|
-
)
|
63
|
-
elif evaluator_type == evaluator_type.JsonSimilarity:
|
64
|
-
return EvaluatorFactory._create_json_similarity_evaluator(
|
65
|
-
base_params, data
|
66
|
-
)
|
67
|
-
else:
|
68
|
-
raise ValueError(
|
69
|
-
f"Unknown evaluator type {evaluator_type} for category {category}"
|
70
|
-
)
|
71
|
-
case EvaluatorCategory.LlmAsAJudge:
|
72
|
-
return EvaluatorFactory._create_llm_as_judge_evaluator(
|
73
|
-
base_params, data
|
74
|
-
)
|
75
|
-
case EvaluatorCategory.AgentScorer:
|
76
|
-
raise NotImplementedError()
|
77
|
-
case EvaluatorCategory.Trajectory:
|
78
|
-
return EvaluatorFactory._create_trajectory_evaluator(base_params, data)
|
46
|
+
params: EvaluatorBaseParams = TypeAdapter(Evaluator).validate_python(data)
|
47
|
+
|
48
|
+
match params:
|
49
|
+
case EqualsEvaluatorParams():
|
50
|
+
return EvaluatorFactory._create_exact_match_evaluator(params)
|
51
|
+
case JsonSimilarityEvaluatorParams():
|
52
|
+
return EvaluatorFactory._create_json_similarity_evaluator(params)
|
53
|
+
case LLMEvaluatorParams():
|
54
|
+
return EvaluatorFactory._create_llm_as_judge_evaluator(params)
|
55
|
+
case TrajectoryEvaluatorParams():
|
56
|
+
return EvaluatorFactory._create_trajectory_evaluator(params)
|
79
57
|
case _:
|
80
|
-
raise ValueError(f"Unknown evaluator category: {
|
58
|
+
raise ValueError(f"Unknown evaluator category: {params}")
|
81
59
|
|
82
60
|
@staticmethod
|
83
61
|
def _create_exact_match_evaluator(
|
84
|
-
|
62
|
+
params: EqualsEvaluatorParams,
|
85
63
|
) -> ExactMatchEvaluator:
|
86
64
|
"""Create a deterministic evaluator."""
|
87
|
-
return ExactMatchEvaluator(
|
88
|
-
**base_params.model_dump(),
|
89
|
-
)
|
65
|
+
return ExactMatchEvaluator(**params.model_dump())
|
90
66
|
|
91
67
|
@staticmethod
|
92
68
|
def _create_json_similarity_evaluator(
|
93
|
-
|
69
|
+
params: JsonSimilarityEvaluatorParams,
|
94
70
|
) -> JsonSimilarityEvaluator:
|
95
71
|
"""Create a deterministic evaluator."""
|
96
|
-
return JsonSimilarityEvaluator(
|
97
|
-
**base_params.model_dump(),
|
98
|
-
)
|
72
|
+
return JsonSimilarityEvaluator(**params.model_dump())
|
99
73
|
|
100
74
|
@staticmethod
|
101
75
|
def _create_llm_as_judge_evaluator(
|
102
|
-
|
76
|
+
params: LLMEvaluatorParams,
|
103
77
|
) -> LlmAsAJudgeEvaluator:
|
104
78
|
"""Create an LLM-as-a-judge evaluator."""
|
105
|
-
|
106
|
-
if not prompt:
|
79
|
+
if not params.prompt:
|
107
80
|
raise ValueError("LLM evaluator must include 'prompt' field")
|
108
81
|
|
109
|
-
|
110
|
-
if not model:
|
82
|
+
if not params.model:
|
111
83
|
raise ValueError("LLM evaluator must include 'model' field")
|
112
|
-
if model == "same-as-agent":
|
84
|
+
if params.model == "same-as-agent":
|
113
85
|
raise ValueError(
|
114
86
|
"'same-as-agent' model option is not supported by coded agents evaluations. Please select a specific model for the evaluator."
|
115
87
|
)
|
116
88
|
|
117
|
-
return LlmAsAJudgeEvaluator(
|
118
|
-
**base_params.model_dump(),
|
119
|
-
prompt=prompt,
|
120
|
-
model=model,
|
121
|
-
)
|
89
|
+
return LlmAsAJudgeEvaluator(**params.model_dump())
|
122
90
|
|
123
91
|
@staticmethod
|
124
92
|
def _create_trajectory_evaluator(
|
125
|
-
|
93
|
+
params: TrajectoryEvaluatorParams,
|
126
94
|
) -> TrajectoryEvaluator:
|
127
95
|
"""Create a trajectory evaluator."""
|
128
|
-
|
129
|
-
if not prompt:
|
96
|
+
if not params.prompt:
|
130
97
|
raise ValueError("Trajectory evaluator must include 'prompt' field")
|
131
98
|
|
132
|
-
|
133
|
-
if not model:
|
99
|
+
if not params.model:
|
134
100
|
raise ValueError("LLM evaluator must include 'model' field")
|
135
|
-
if model == "same-as-agent":
|
101
|
+
if params.model == "same-as-agent":
|
136
102
|
raise ValueError(
|
137
103
|
"'same-as-agent' model option is not supported by coded agents evaluations. Please select a specific model for the evaluator."
|
138
104
|
)
|
139
105
|
|
140
|
-
return TrajectoryEvaluator(
|
141
|
-
**base_params.model_dump(),
|
142
|
-
prompt=prompt,
|
143
|
-
model=model,
|
144
|
-
)
|
106
|
+
return TrajectoryEvaluator(**params.model_dump())
|
@@ -5,6 +5,10 @@ from pydantic import BaseModel, ConfigDict, Field
|
|
5
5
|
from pydantic.alias_generators import to_camel
|
6
6
|
|
7
7
|
|
8
|
+
class EvaluationSimulationTool(BaseModel):
|
9
|
+
name: str = Field(..., alias="name")
|
10
|
+
|
11
|
+
|
8
12
|
class EvaluationItem(BaseModel):
|
9
13
|
"""Individual evaluation item within an evaluation set."""
|
10
14
|
|
@@ -14,15 +18,19 @@ class EvaluationItem(BaseModel):
|
|
14
18
|
name: str
|
15
19
|
inputs: Dict[str, Any]
|
16
20
|
expected_output: Dict[str, Any]
|
17
|
-
expected_agent_behavior: str = ""
|
18
|
-
simulation_instructions: str = ""
|
19
|
-
simulate_input: bool = False
|
20
|
-
input_generation_instructions: str =
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
21
|
+
expected_agent_behavior: str = Field(default="", alias="expectedAgentBehavior")
|
22
|
+
simulation_instructions: str = Field(default="", alias="simulationInstructions")
|
23
|
+
simulate_input: bool = Field(default=False, alias="simulateInput")
|
24
|
+
input_generation_instructions: str = Field(
|
25
|
+
default="", alias="inputGenerationInstructions"
|
26
|
+
)
|
27
|
+
simulate_tools: bool = Field(default=False, alias="simulateTools")
|
28
|
+
tools_to_simulate: List[EvaluationSimulationTool] = Field(
|
29
|
+
default_factory=list, alias="toolsToSimulate"
|
30
|
+
)
|
31
|
+
eval_set_id: str = Field(alias="evalSetId")
|
32
|
+
created_at: str = Field(alias="createdAt")
|
33
|
+
updated_at: str = Field(alias="updatedAt")
|
26
34
|
|
27
35
|
|
28
36
|
class EvaluationSet(BaseModel):
|
@@ -31,15 +39,17 @@ class EvaluationSet(BaseModel):
|
|
31
39
|
model_config = ConfigDict(alias_generator=to_camel, populate_by_name=True)
|
32
40
|
|
33
41
|
id: str
|
34
|
-
file_name: str
|
42
|
+
file_name: str = Field(..., alias="fileName")
|
35
43
|
evaluator_refs: List[str] = Field(default_factory=list)
|
36
44
|
evaluations: List[EvaluationItem] = Field(default_factory=list)
|
37
45
|
name: str
|
38
|
-
batch_size: int = 10
|
39
|
-
timeout_minutes: int = 20
|
40
|
-
model_settings: List[Dict[str, Any]] = Field(
|
41
|
-
|
42
|
-
|
46
|
+
batch_size: int = Field(10, alias="batchSize")
|
47
|
+
timeout_minutes: int = Field(default=20, alias="timeoutMinutes")
|
48
|
+
model_settings: List[Dict[str, Any]] = Field(
|
49
|
+
default_factory=list, alias="modelSettings"
|
50
|
+
)
|
51
|
+
created_at: str = Field(alias="createdAt")
|
52
|
+
updated_at: str = Field(alias="updatedAt")
|
43
53
|
|
44
54
|
def extract_selected_evals(self, eval_ids) -> None:
|
45
55
|
selected_evals: list[EvaluationItem] = []
|
@@ -0,0 +1,106 @@
|
|
1
|
+
from typing import Annotated, Any, Literal, Union
|
2
|
+
|
3
|
+
from pydantic import BaseModel, ConfigDict, Discriminator, Field, Tag
|
4
|
+
|
5
|
+
from uipath.eval.models.models import EvaluatorCategory, EvaluatorType
|
6
|
+
|
7
|
+
|
8
|
+
class EvaluatorBaseParams(BaseModel):
|
9
|
+
"""Parameters for initializing the base evaluator."""
|
10
|
+
|
11
|
+
id: str
|
12
|
+
name: str
|
13
|
+
description: str
|
14
|
+
evaluator_type: EvaluatorType = Field(..., alias="type")
|
15
|
+
created_at: str = Field(..., alias="createdAt")
|
16
|
+
updated_at: str = Field(..., alias="updatedAt")
|
17
|
+
target_output_key: str = Field(..., alias="targetOutputKey")
|
18
|
+
file_name: str = Field(..., alias="fileName")
|
19
|
+
|
20
|
+
|
21
|
+
class LLMEvaluatorParams(EvaluatorBaseParams):
|
22
|
+
category: Literal[EvaluatorCategory.LlmAsAJudge] = Field(..., alias="category")
|
23
|
+
prompt: str = Field(..., alias="prompt")
|
24
|
+
model: str = Field(..., alias="model")
|
25
|
+
|
26
|
+
model_config = ConfigDict(
|
27
|
+
validate_by_name=True, validate_by_alias=True, extra="allow"
|
28
|
+
)
|
29
|
+
|
30
|
+
|
31
|
+
class TrajectoryEvaluatorParams(EvaluatorBaseParams):
|
32
|
+
category: Literal[EvaluatorCategory.Trajectory] = Field(..., alias="category")
|
33
|
+
prompt: str = Field(..., alias="prompt")
|
34
|
+
model: str = Field(..., alias="model")
|
35
|
+
|
36
|
+
model_config = ConfigDict(
|
37
|
+
validate_by_name=True, validate_by_alias=True, extra="allow"
|
38
|
+
)
|
39
|
+
|
40
|
+
|
41
|
+
class EqualsEvaluatorParams(EvaluatorBaseParams):
|
42
|
+
model_config = ConfigDict(
|
43
|
+
validate_by_name=True, validate_by_alias=True, extra="allow"
|
44
|
+
)
|
45
|
+
|
46
|
+
|
47
|
+
class JsonSimilarityEvaluatorParams(EvaluatorBaseParams):
|
48
|
+
model_config = ConfigDict(
|
49
|
+
validate_by_name=True, validate_by_alias=True, extra="allow"
|
50
|
+
)
|
51
|
+
|
52
|
+
|
53
|
+
class UnknownEvaluatorParams(EvaluatorBaseParams):
|
54
|
+
model_config = ConfigDict(
|
55
|
+
validate_by_name=True, validate_by_alias=True, extra="allow"
|
56
|
+
)
|
57
|
+
|
58
|
+
|
59
|
+
def evaluator_discriminator(data: Any) -> str:
|
60
|
+
if isinstance(data, dict):
|
61
|
+
category = data.get("category")
|
62
|
+
evaluator_type = data.get("type")
|
63
|
+
match category:
|
64
|
+
case EvaluatorCategory.LlmAsAJudge:
|
65
|
+
return "LLMEvaluatorParams"
|
66
|
+
case EvaluatorCategory.Trajectory:
|
67
|
+
return "TrajectoryEvaluatorParams"
|
68
|
+
case EvaluatorCategory.Deterministic:
|
69
|
+
match evaluator_type:
|
70
|
+
case EvaluatorType.Equals:
|
71
|
+
return "EqualsEvaluatorParams"
|
72
|
+
case EvaluatorType.JsonSimilarity:
|
73
|
+
return "JsonSimilarityEvaluatorParams"
|
74
|
+
case _:
|
75
|
+
return "UnknownEvaluatorParams"
|
76
|
+
case _:
|
77
|
+
return "UnknownEvaluatorParams"
|
78
|
+
else:
|
79
|
+
return "UnknownEvaluatorParams"
|
80
|
+
|
81
|
+
|
82
|
+
Evaluator = Annotated[
|
83
|
+
Union[
|
84
|
+
Annotated[
|
85
|
+
LLMEvaluatorParams,
|
86
|
+
Tag("LLMEvaluatorParams"),
|
87
|
+
],
|
88
|
+
Annotated[
|
89
|
+
TrajectoryEvaluatorParams,
|
90
|
+
Tag("TrajectoryEvaluatorParams"),
|
91
|
+
],
|
92
|
+
Annotated[
|
93
|
+
EqualsEvaluatorParams,
|
94
|
+
Tag("EqualsEvaluatorParams"),
|
95
|
+
],
|
96
|
+
Annotated[
|
97
|
+
JsonSimilarityEvaluatorParams,
|
98
|
+
Tag("JsonSimilarityEvaluatorParams"),
|
99
|
+
],
|
100
|
+
Annotated[
|
101
|
+
UnknownEvaluatorParams,
|
102
|
+
Tag("UnknownEvaluatorParams"),
|
103
|
+
],
|
104
|
+
],
|
105
|
+
Field(discriminator=Discriminator(evaluator_discriminator)),
|
106
|
+
]
|
@@ -525,6 +525,7 @@ class UiPathBaseRuntime(ABC):
|
|
525
525
|
dir=self.context.runtime_dir,
|
526
526
|
file=self.context.logs_file,
|
527
527
|
job_id=self.context.job_id,
|
528
|
+
execution_id=self.context.execution_id,
|
528
529
|
is_debug_run=self.is_debug_run(),
|
529
530
|
log_handler=self.context.log_handler,
|
530
531
|
)
|
uipath/_cli/_runtime/_logging.py
CHANGED
@@ -1,8 +1,14 @@
|
|
1
1
|
import logging
|
2
2
|
import os
|
3
3
|
import sys
|
4
|
+
from contextvars import ContextVar
|
4
5
|
from typing import Optional, TextIO, Union, cast
|
5
6
|
|
7
|
+
# Context variable to track current execution_id
|
8
|
+
current_execution_id: ContextVar[Optional[str]] = ContextVar(
|
9
|
+
"current_execution_id", default=None
|
10
|
+
)
|
11
|
+
|
6
12
|
|
7
13
|
class PersistentLogsHandler(logging.FileHandler):
|
8
14
|
"""A simple log handler that always writes to a single file without rotation."""
|
@@ -20,6 +26,30 @@ class PersistentLogsHandler(logging.FileHandler):
|
|
20
26
|
self.setFormatter(self.formatter)
|
21
27
|
|
22
28
|
|
29
|
+
class ExecutionContextFilter(logging.Filter):
|
30
|
+
"""Filter that only allows logs from a specific execution context."""
|
31
|
+
|
32
|
+
def __init__(self, execution_id: str):
|
33
|
+
super().__init__()
|
34
|
+
self.execution_id = execution_id
|
35
|
+
|
36
|
+
def filter(self, record: logging.LogRecord) -> bool:
|
37
|
+
"""Allow logs that have matching execution_id attribute or context."""
|
38
|
+
# First check if record has execution_id attribute
|
39
|
+
record_execution_id = getattr(record, "execution_id", None)
|
40
|
+
if record_execution_id == self.execution_id:
|
41
|
+
return True
|
42
|
+
|
43
|
+
# Fall back to context variable
|
44
|
+
ctx_execution_id = current_execution_id.get()
|
45
|
+
if ctx_execution_id == self.execution_id:
|
46
|
+
# Inject execution_id into record for downstream handlers
|
47
|
+
record.execution_id = self.execution_id
|
48
|
+
return True
|
49
|
+
|
50
|
+
return False
|
51
|
+
|
52
|
+
|
23
53
|
class LogsInterceptor:
|
24
54
|
"""Intercepts all logging and stdout/stderr, routing to either persistent log files or stdout based on whether it's running as a job or not."""
|
25
55
|
|
@@ -31,6 +61,7 @@ class LogsInterceptor:
|
|
31
61
|
job_id: Optional[str] = None,
|
32
62
|
is_debug_run: bool = False,
|
33
63
|
log_handler: Optional[logging.Handler] = None,
|
64
|
+
execution_id: Optional[str] = None,
|
34
65
|
):
|
35
66
|
"""Initialize the log interceptor.
|
36
67
|
|
@@ -41,9 +72,11 @@ class LogsInterceptor:
|
|
41
72
|
job_id (str, optional): If provided, logs go to file; otherwise, to stdout.
|
42
73
|
is_debug_run (bool, optional): If True, log the output to stdout/stderr.
|
43
74
|
log_handler (logging.Handler, optional): Custom log handler to use.
|
75
|
+
execution_id (str, optional): Unique identifier for this execution context.
|
44
76
|
"""
|
45
77
|
min_level = min_level or "INFO"
|
46
78
|
self.job_id = job_id
|
79
|
+
self.execution_id = execution_id
|
47
80
|
|
48
81
|
# Convert to numeric level for consistent comparison
|
49
82
|
self.numeric_min_level = getattr(logging, min_level.upper(), logging.INFO)
|
@@ -81,6 +114,12 @@ class LogsInterceptor:
|
|
81
114
|
self.log_handler = PersistentLogsHandler(file=log_file)
|
82
115
|
|
83
116
|
self.log_handler.setLevel(self.numeric_min_level)
|
117
|
+
|
118
|
+
# Add execution context filter if execution_id provided
|
119
|
+
if execution_id:
|
120
|
+
self.execution_filter = ExecutionContextFilter(execution_id)
|
121
|
+
self.log_handler.addFilter(self.execution_filter)
|
122
|
+
|
84
123
|
self.logger = logging.getLogger("runtime")
|
85
124
|
self.patched_loggers: set[str] = set()
|
86
125
|
|
@@ -95,22 +134,37 @@ class LogsInterceptor:
|
|
95
134
|
|
96
135
|
def setup(self) -> None:
|
97
136
|
"""Configure logging to use our persistent handler."""
|
98
|
-
#
|
99
|
-
if self.
|
137
|
+
# Set the context variable for this execution
|
138
|
+
if self.execution_id:
|
139
|
+
current_execution_id.set(self.execution_id)
|
140
|
+
|
141
|
+
# Only use global disable if we're not in a parallel execution context
|
142
|
+
if not self.execution_id and self.numeric_min_level > logging.NOTSET:
|
100
143
|
logging.disable(self.numeric_min_level - 1)
|
101
144
|
|
102
145
|
# Set root logger level
|
103
146
|
self.root_logger.setLevel(self.numeric_min_level)
|
104
147
|
|
105
|
-
|
106
|
-
|
148
|
+
if self.execution_id:
|
149
|
+
# Parallel execution mode: add our handler without removing others
|
150
|
+
if self.log_handler not in self.root_logger.handlers:
|
151
|
+
self.root_logger.addHandler(self.log_handler)
|
152
|
+
|
153
|
+
# Set up propagation for all existing loggers
|
154
|
+
for logger_name in logging.root.manager.loggerDict:
|
155
|
+
logger = logging.getLogger(logger_name)
|
156
|
+
# Keep propagation enabled so logs flow to all handlers
|
157
|
+
self.patched_loggers.add(logger_name)
|
158
|
+
else:
|
159
|
+
# Single execution mode: remove all handlers and add only ours
|
160
|
+
self._clean_all_handlers(self.root_logger)
|
107
161
|
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
162
|
+
# Set up propagation for all existing loggers
|
163
|
+
for logger_name in logging.root.manager.loggerDict:
|
164
|
+
logger = logging.getLogger(logger_name)
|
165
|
+
logger.propagate = False # Prevent double-logging
|
166
|
+
self._clean_all_handlers(logger)
|
167
|
+
self.patched_loggers.add(logger_name)
|
114
168
|
|
115
169
|
# Set up stdout/stderr redirection
|
116
170
|
self._redirect_stdout_stderr()
|
@@ -130,7 +184,7 @@ class LogsInterceptor:
|
|
130
184
|
self.level = level
|
131
185
|
self.min_level = min_level
|
132
186
|
self.buffer = ""
|
133
|
-
self.sys_file = sys_file
|
187
|
+
self.sys_file = sys_file
|
134
188
|
|
135
189
|
def write(self, message: str) -> None:
|
136
190
|
self.buffer += message
|
@@ -138,7 +192,7 @@ class LogsInterceptor:
|
|
138
192
|
line, self.buffer = self.buffer.split("\n", 1)
|
139
193
|
# Only log if the message is not empty and the level is sufficient
|
140
194
|
if line and self.level >= self.min_level:
|
141
|
-
#
|
195
|
+
# The context variable is automatically available here
|
142
196
|
self.logger._log(self.level, line, ())
|
143
197
|
|
144
198
|
def flush(self) -> None:
|
@@ -160,14 +214,21 @@ class LogsInterceptor:
|
|
160
214
|
def writable(self) -> bool:
|
161
215
|
return True
|
162
216
|
|
163
|
-
# Set up stdout and stderr loggers
|
217
|
+
# Set up stdout and stderr loggers
|
164
218
|
stdout_logger = logging.getLogger("stdout")
|
165
|
-
stdout_logger.propagate = False
|
166
|
-
self._clean_all_handlers(stdout_logger)
|
167
|
-
|
168
219
|
stderr_logger = logging.getLogger("stderr")
|
220
|
+
|
221
|
+
stdout_logger.propagate = False
|
169
222
|
stderr_logger.propagate = False
|
170
|
-
|
223
|
+
|
224
|
+
if self.execution_id:
|
225
|
+
if self.log_handler not in stdout_logger.handlers:
|
226
|
+
stdout_logger.addHandler(self.log_handler)
|
227
|
+
if self.log_handler not in stderr_logger.handlers:
|
228
|
+
stderr_logger.addHandler(self.log_handler)
|
229
|
+
else:
|
230
|
+
self._clean_all_handlers(stdout_logger)
|
231
|
+
self._clean_all_handlers(stderr_logger)
|
171
232
|
|
172
233
|
# Use the min_level in the LoggerWriter to filter messages
|
173
234
|
sys.stdout = LoggerWriter(
|
@@ -179,21 +240,41 @@ class LogsInterceptor:
|
|
179
240
|
|
180
241
|
def teardown(self) -> None:
|
181
242
|
"""Restore original logging configuration."""
|
182
|
-
#
|
183
|
-
|
184
|
-
|
185
|
-
if self.log_handler in self.root_logger.handlers:
|
186
|
-
self.root_logger.removeHandler(self.log_handler)
|
243
|
+
# Clear the context variable
|
244
|
+
if self.execution_id:
|
245
|
+
current_execution_id.set(None)
|
187
246
|
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
self.
|
194
|
-
|
195
|
-
|
196
|
-
|
247
|
+
# Restore the original disable level
|
248
|
+
if not self.execution_id:
|
249
|
+
logging.disable(self.original_disable_level)
|
250
|
+
|
251
|
+
# Remove our handler and filter
|
252
|
+
if self.execution_id:
|
253
|
+
if hasattr(self, "execution_filter"):
|
254
|
+
self.log_handler.removeFilter(self.execution_filter)
|
255
|
+
if self.log_handler in self.root_logger.handlers:
|
256
|
+
self.root_logger.removeHandler(self.log_handler)
|
257
|
+
|
258
|
+
# Remove from stdout/stderr loggers too
|
259
|
+
stdout_logger = logging.getLogger("stdout")
|
260
|
+
stderr_logger = logging.getLogger("stderr")
|
261
|
+
if self.log_handler in stdout_logger.handlers:
|
262
|
+
stdout_logger.removeHandler(self.log_handler)
|
263
|
+
if self.log_handler in stderr_logger.handlers:
|
264
|
+
stderr_logger.removeHandler(self.log_handler)
|
265
|
+
else:
|
266
|
+
if self.log_handler in self.root_logger.handlers:
|
267
|
+
self.root_logger.removeHandler(self.log_handler)
|
268
|
+
|
269
|
+
for logger_name in self.patched_loggers:
|
270
|
+
logger = logging.getLogger(logger_name)
|
271
|
+
if self.log_handler in logger.handlers:
|
272
|
+
logger.removeHandler(self.log_handler)
|
273
|
+
|
274
|
+
self.root_logger.setLevel(self.original_level)
|
275
|
+
for handler in self.original_handlers:
|
276
|
+
if handler not in self.root_logger.handlers:
|
277
|
+
self.root_logger.addHandler(handler)
|
197
278
|
|
198
279
|
self.log_handler.close()
|
199
280
|
|
uipath/_cli/cli_pull.py
CHANGED
@@ -112,7 +112,7 @@ async def download_folder_files(
|
|
112
112
|
if local_hash != remote_hash:
|
113
113
|
styled_path = click.style(str(file_path), fg="cyan")
|
114
114
|
console.warning(f"File {styled_path}" + " differs from remote version.")
|
115
|
-
response = click.prompt("Do you want to
|
115
|
+
response = click.prompt("Do you want to overwrite it? (y/n)", type=str)
|
116
116
|
if response.lower() == "y":
|
117
117
|
with open(local_path, "w", encoding="utf-8", newline="\n") as f:
|
118
118
|
f.write(remote_content)
|
@@ -1,4 +1,5 @@
|
|
1
|
-
|
1
|
+
import json
|
2
|
+
from typing import Any, Dict, List, Optional, Tuple, Union
|
2
3
|
|
3
4
|
import httpx
|
4
5
|
from pydantic import TypeAdapter
|
@@ -9,6 +10,11 @@ from .._execution_context import ExecutionContext
|
|
9
10
|
from .._folder_context import FolderContext
|
10
11
|
from .._utils import Endpoint, RequestSpec, header_folder, infer_bindings
|
11
12
|
from .._utils.constants import (
|
13
|
+
CONFLUENCE_DATA_SOURCE,
|
14
|
+
DROPBOX_DATA_SOURCE,
|
15
|
+
GOOGLE_DRIVE_DATA_SOURCE,
|
16
|
+
LLMV4,
|
17
|
+
ONEDRIVE_DATA_SOURCE,
|
12
18
|
ORCHESTRATOR_STORAGE_BUCKET_DATA_SOURCE,
|
13
19
|
)
|
14
20
|
from ..models import IngestionInProgressException
|
@@ -312,6 +318,122 @@ class ContextGroundingService(FolderContext, BaseService):
|
|
312
318
|
|
313
319
|
return response.json()
|
314
320
|
|
321
|
+
@traced(name="contextgrounding_create_index", run_type="uipath")
|
322
|
+
@infer_bindings(resource_type="index")
|
323
|
+
def create_index(
|
324
|
+
self,
|
325
|
+
name: str,
|
326
|
+
source: Dict[str, Any],
|
327
|
+
description: Optional[str] = None,
|
328
|
+
cron_expression: Optional[str] = None,
|
329
|
+
time_zone_id: Optional[str] = None,
|
330
|
+
advanced_ingestion: Optional[bool] = True,
|
331
|
+
preprocessing_request: Optional[str] = LLMV4,
|
332
|
+
folder_key: Optional[str] = None,
|
333
|
+
folder_path: Optional[str] = None,
|
334
|
+
) -> ContextGroundingIndex:
|
335
|
+
"""Create a new context grounding index.
|
336
|
+
|
337
|
+
Args:
|
338
|
+
name (str): The name of the index to create.
|
339
|
+
source (dict): Source configuration dictionary:
|
340
|
+
- For buckets: type="bucket", bucket_name, folder_path, directory_path="/" (optional), file_type (optional)
|
341
|
+
- For Google Drive: type="google", connection_name, connection_id, leaf_folder_id, directory_path, folder_path, file_type (optional)
|
342
|
+
- For Dropbox: type="dropbox", connection_name, connection_id, directory_path, folder_path, file_type (optional)
|
343
|
+
- For OneDrive: type="onedrive", connection_name, connection_id, leaf_folder_id, directory_path, folder_path, file_type (optional)
|
344
|
+
- For Confluence: type="confluence", connection_name, connection_id, space_id, directory_path, folder_path, file_type (optional)
|
345
|
+
description (Optional[str]): Description of the index.
|
346
|
+
cron_expression (Optional[str]): Cron expression for scheduled indexing (e.g., "0 0 18 ? * 2" for Tuesdays at 6 PM).
|
347
|
+
time_zone_id (Optional[str]): Valid Windows Timezone ID for the cron expression (e.g., "UTC", "Pacific Standard Time", "GTB Standard Time").
|
348
|
+
advanced_ingestion (Optional[bool]): Enable advanced ingestion with preprocessing. Defaults to True.
|
349
|
+
preprocessing_request (Optional[str]): The OData type for preprocessing request. Defaults to LLMV4.
|
350
|
+
folder_key (Optional[str]): The key of the folder where the index will be created.
|
351
|
+
folder_path (Optional[str]): The path of the folder where the index will be created.
|
352
|
+
|
353
|
+
Returns:
|
354
|
+
ContextGroundingIndex: The created index information.
|
355
|
+
"""
|
356
|
+
spec = self._create_spec(
|
357
|
+
name=name,
|
358
|
+
description=description,
|
359
|
+
source=source,
|
360
|
+
cron_expression=cron_expression,
|
361
|
+
time_zone_id=time_zone_id,
|
362
|
+
advanced_ingestion=advanced_ingestion
|
363
|
+
if advanced_ingestion is not None
|
364
|
+
else True,
|
365
|
+
preprocessing_request=preprocessing_request or LLMV4,
|
366
|
+
folder_path=folder_path,
|
367
|
+
folder_key=folder_key,
|
368
|
+
)
|
369
|
+
|
370
|
+
response = self.request(
|
371
|
+
spec.method,
|
372
|
+
spec.endpoint,
|
373
|
+
content=spec.content,
|
374
|
+
headers=spec.headers,
|
375
|
+
)
|
376
|
+
|
377
|
+
return ContextGroundingIndex.model_validate(response.json())
|
378
|
+
|
379
|
+
@traced(name="contextgrounding_create_index", run_type="uipath")
|
380
|
+
@infer_bindings(resource_type="index")
|
381
|
+
async def create_index_async(
|
382
|
+
self,
|
383
|
+
name: str,
|
384
|
+
source: Dict[str, Any],
|
385
|
+
description: Optional[str] = None,
|
386
|
+
cron_expression: Optional[str] = None,
|
387
|
+
time_zone_id: Optional[str] = None,
|
388
|
+
advanced_ingestion: Optional[bool] = True,
|
389
|
+
preprocessing_request: Optional[str] = LLMV4,
|
390
|
+
folder_key: Optional[str] = None,
|
391
|
+
folder_path: Optional[str] = None,
|
392
|
+
) -> ContextGroundingIndex:
|
393
|
+
"""Create a new context grounding index.
|
394
|
+
|
395
|
+
Args:
|
396
|
+
name (str): The name of the index to create.
|
397
|
+
source (dict): Source configuration dictionary:
|
398
|
+
- For buckets: type="bucket", bucket_name, folder_path, directory_path="/" (optional), file_type (optional)
|
399
|
+
- For Google Drive: type="google_drive", connection_name, connection_id, leaf_folder_id, directory_path, folder_path, file_type (optional)
|
400
|
+
- For Dropbox: type="dropbox", connection_name, connection_id, directory_path, folder_path, file_type (optional)
|
401
|
+
- For OneDrive: type="onedrive", connection_name, connection_id, leaf_folder_id, directory_path, folder_path, file_type (optional)
|
402
|
+
- For Confluence: type="confluence", connection_name, connection_id, space_id, directory_path, folder_path, file_type (optional)
|
403
|
+
description (Optional[str]): Description of the index.
|
404
|
+
cron_expression (Optional[str]): Cron expression for scheduled indexing (e.g., "0 0 18 ? * 2" for Tuesdays at 6 PM).
|
405
|
+
time_zone_id (Optional[str]): Valid Windows Timezone ID for the cron expression (e.g., "UTC", "Pacific Standard Time", "GTB Standard Time").
|
406
|
+
advanced_ingestion (Optional[bool]): Enable advanced ingestion with preprocessing. Defaults to True.
|
407
|
+
preprocessing_request (Optional[str]): The OData type for preprocessing request. Defaults to LLMV4.
|
408
|
+
folder_key (Optional[str]): The key of the folder where the index will be created.
|
409
|
+
folder_path (Optional[str]): The path of the folder where the index will be created.
|
410
|
+
|
411
|
+
Returns:
|
412
|
+
ContextGroundingIndex: The created index information.
|
413
|
+
"""
|
414
|
+
spec = self._create_spec(
|
415
|
+
name=name,
|
416
|
+
description=description,
|
417
|
+
source=source,
|
418
|
+
cron_expression=cron_expression,
|
419
|
+
time_zone_id=time_zone_id,
|
420
|
+
advanced_ingestion=advanced_ingestion
|
421
|
+
if advanced_ingestion is not None
|
422
|
+
else True,
|
423
|
+
preprocessing_request=preprocessing_request or LLMV4,
|
424
|
+
folder_path=folder_path,
|
425
|
+
folder_key=folder_key,
|
426
|
+
)
|
427
|
+
|
428
|
+
response = await self.request_async(
|
429
|
+
spec.method,
|
430
|
+
spec.endpoint,
|
431
|
+
content=spec.content,
|
432
|
+
headers=spec.headers,
|
433
|
+
)
|
434
|
+
|
435
|
+
return ContextGroundingIndex.model_validate(response.json())
|
436
|
+
|
315
437
|
@traced(name="contextgrounding_search", run_type="uipath")
|
316
438
|
def search(
|
317
439
|
self,
|
@@ -575,40 +697,200 @@ class ContextGroundingService(FolderContext, BaseService):
|
|
575
697
|
self,
|
576
698
|
name: str,
|
577
699
|
description: Optional[str],
|
578
|
-
|
579
|
-
|
580
|
-
|
700
|
+
source: Dict[str, Any],
|
701
|
+
advanced_ingestion: bool,
|
702
|
+
preprocessing_request: str,
|
703
|
+
cron_expression: Optional[str] = None,
|
704
|
+
time_zone_id: Optional[str] = None,
|
581
705
|
folder_key: Optional[str] = None,
|
582
706
|
folder_path: Optional[str] = None,
|
583
707
|
) -> RequestSpec:
|
708
|
+
"""Create request spec for index creation.
|
709
|
+
|
710
|
+
Args:
|
711
|
+
name: Index name
|
712
|
+
description: Index description
|
713
|
+
source: Source configuration dictionary
|
714
|
+
cron_expression: Optional cron expression for scheduled indexing
|
715
|
+
time_zone_id: Optional timezone for cron expression
|
716
|
+
advanced_ingestion: Whether to enable advanced ingestion with preprocessing
|
717
|
+
preprocessing_request: OData type for preprocessing request
|
718
|
+
folder_key: Optional folder key
|
719
|
+
folder_path: Optional folder path
|
720
|
+
|
721
|
+
Returns:
|
722
|
+
RequestSpec for the create index request
|
723
|
+
"""
|
724
|
+
source_type = source.get("type", "").lower()
|
725
|
+
|
584
726
|
folder_key = self._resolve_folder_key(folder_key, folder_path)
|
727
|
+
file_type = source.get("file_type")
|
728
|
+
file_name_glob = f"**/*.{file_type}" if file_type else "**/*"
|
729
|
+
|
730
|
+
data_source = self._build_data_source(source_type, source, file_name_glob)
|
731
|
+
|
732
|
+
if cron_expression:
|
733
|
+
data_source["indexer"] = {
|
734
|
+
"cronExpression": cron_expression,
|
735
|
+
"timeZoneId": time_zone_id or "UTC",
|
736
|
+
}
|
737
|
+
|
738
|
+
payload = {
|
739
|
+
"name": name,
|
740
|
+
"description": description or "",
|
741
|
+
"dataSource": data_source,
|
742
|
+
}
|
743
|
+
|
744
|
+
if advanced_ingestion and preprocessing_request:
|
745
|
+
payload["preProcessing"] = {
|
746
|
+
"@odata.type": preprocessing_request,
|
747
|
+
}
|
585
748
|
|
586
|
-
storage_bucket_folder_path = (
|
587
|
-
storage_bucket_folder_path
|
588
|
-
if storage_bucket_folder_path
|
589
|
-
else self._folder_path
|
590
|
-
)
|
591
749
|
return RequestSpec(
|
592
750
|
method="POST",
|
593
751
|
endpoint=Endpoint("/ecs_/v2/indexes/create"),
|
594
|
-
json
|
595
|
-
"name": name,
|
596
|
-
"description": description,
|
597
|
-
"dataSource": {
|
598
|
-
"@odata.type": ORCHESTRATOR_STORAGE_BUCKET_DATA_SOURCE,
|
599
|
-
"folder": storage_bucket_folder_path,
|
600
|
-
"bucketName": storage_bucket_name,
|
601
|
-
"fileNameGlob": file_name_glob
|
602
|
-
if file_name_glob is not None
|
603
|
-
else "*",
|
604
|
-
"directoryPath": "/",
|
605
|
-
},
|
606
|
-
},
|
752
|
+
content=json.dumps(payload),
|
607
753
|
headers={
|
608
754
|
**header_folder(folder_key, None),
|
755
|
+
"Content-Type": "application/json",
|
609
756
|
},
|
610
757
|
)
|
611
758
|
|
759
|
+
def _build_data_source(
|
760
|
+
self, source_type: str, source: Dict[str, Any], file_name_glob: str
|
761
|
+
) -> Dict[str, Any]:
|
762
|
+
"""Build data source configuration based on type."""
|
763
|
+
if source_type == "bucket":
|
764
|
+
return self._build_bucket_data_source(source, file_name_glob)
|
765
|
+
elif source_type in ["google_drive"]:
|
766
|
+
return self._build_google_drive_data_source(source, file_name_glob)
|
767
|
+
elif source_type == "dropbox":
|
768
|
+
return self._build_dropbox_data_source(source, file_name_glob)
|
769
|
+
elif source_type == "onedrive":
|
770
|
+
return self._build_onedrive_data_source(source, file_name_glob)
|
771
|
+
elif source_type == "confluence":
|
772
|
+
return self._build_confluence_data_source(source, file_name_glob)
|
773
|
+
else:
|
774
|
+
raise ValueError(
|
775
|
+
f"Unsupported data source type: {source_type}. "
|
776
|
+
f"Supported types: bucket, google_drive, dropbox, onedrive, confluence"
|
777
|
+
)
|
778
|
+
|
779
|
+
def _build_bucket_data_source(
|
780
|
+
self, source: Dict[str, Any], file_name_glob: str
|
781
|
+
) -> Dict[str, Any]:
|
782
|
+
"""Build data source configuration for storage bucket."""
|
783
|
+
required_fields = ["bucket_name", "folder_path"]
|
784
|
+
for field in required_fields:
|
785
|
+
if not source.get(field):
|
786
|
+
raise ValueError(f"{field} is required for bucket data source")
|
787
|
+
|
788
|
+
return {
|
789
|
+
"@odata.type": ORCHESTRATOR_STORAGE_BUCKET_DATA_SOURCE,
|
790
|
+
"folder": source["folder_path"],
|
791
|
+
"bucketName": source["bucket_name"],
|
792
|
+
"fileNameGlob": file_name_glob,
|
793
|
+
"directoryPath": source.get("directory_path", "/"),
|
794
|
+
}
|
795
|
+
|
796
|
+
def _build_google_drive_data_source(
|
797
|
+
self, source: Dict[str, Any], file_name_glob: str
|
798
|
+
) -> Dict[str, Any]:
|
799
|
+
"""Build data source configuration for Google Drive."""
|
800
|
+
required_fields = [
|
801
|
+
"connection_id",
|
802
|
+
"connection_name",
|
803
|
+
"leaf_folder_id",
|
804
|
+
"directory_path",
|
805
|
+
"folder_path",
|
806
|
+
]
|
807
|
+
for field in required_fields:
|
808
|
+
if not source.get(field):
|
809
|
+
raise ValueError(f"{field} is required for Google Drive data source")
|
810
|
+
|
811
|
+
return {
|
812
|
+
"@odata.type": GOOGLE_DRIVE_DATA_SOURCE,
|
813
|
+
"folder": source["folder_path"],
|
814
|
+
"connectionId": source["connection_id"],
|
815
|
+
"connectionName": source["connection_name"],
|
816
|
+
"leafFolderId": source["leaf_folder_id"],
|
817
|
+
"directoryPath": source["directory_path"],
|
818
|
+
"fileNameGlob": file_name_glob,
|
819
|
+
}
|
820
|
+
|
821
|
+
def _build_dropbox_data_source(
|
822
|
+
self, source: Dict[str, Any], file_name_glob: str
|
823
|
+
) -> Dict[str, Any]:
|
824
|
+
"""Build data source configuration for Dropbox."""
|
825
|
+
required_fields = [
|
826
|
+
"connection_id",
|
827
|
+
"connection_name",
|
828
|
+
"directory_path",
|
829
|
+
"folder_path",
|
830
|
+
]
|
831
|
+
for field in required_fields:
|
832
|
+
if not source.get(field):
|
833
|
+
raise ValueError(f"{field} is required for Dropbox data source")
|
834
|
+
|
835
|
+
return {
|
836
|
+
"@odata.type": DROPBOX_DATA_SOURCE,
|
837
|
+
"folder": source["folder_path"],
|
838
|
+
"connectionId": source["connection_id"],
|
839
|
+
"connectionName": source["connection_name"],
|
840
|
+
"directoryPath": source["directory_path"],
|
841
|
+
"fileNameGlob": file_name_glob,
|
842
|
+
}
|
843
|
+
|
844
|
+
def _build_onedrive_data_source(
|
845
|
+
self, source: Dict[str, Any], file_name_glob: str
|
846
|
+
) -> Dict[str, Any]:
|
847
|
+
"""Build data source configuration for OneDrive."""
|
848
|
+
required_fields = [
|
849
|
+
"connection_id",
|
850
|
+
"connection_name",
|
851
|
+
"leaf_folder_id",
|
852
|
+
"directory_path",
|
853
|
+
"folder_path",
|
854
|
+
]
|
855
|
+
for field in required_fields:
|
856
|
+
if not source.get(field):
|
857
|
+
raise ValueError(f"{field} is required for OneDrive data source")
|
858
|
+
|
859
|
+
return {
|
860
|
+
"@odata.type": ONEDRIVE_DATA_SOURCE,
|
861
|
+
"folder": source["folder_path"],
|
862
|
+
"connectionId": source["connection_id"],
|
863
|
+
"connectionName": source["connection_name"],
|
864
|
+
"leafFolderId": source["leaf_folder_id"],
|
865
|
+
"directoryPath": source["directory_path"],
|
866
|
+
"fileNameGlob": file_name_glob,
|
867
|
+
}
|
868
|
+
|
869
|
+
def _build_confluence_data_source(
|
870
|
+
self, source: Dict[str, Any], file_name_glob: str
|
871
|
+
) -> Dict[str, Any]:
|
872
|
+
"""Build data source configuration for Confluence."""
|
873
|
+
required_fields = [
|
874
|
+
"connection_id",
|
875
|
+
"connection_name",
|
876
|
+
"directory_path",
|
877
|
+
"folder_path",
|
878
|
+
"space_id",
|
879
|
+
]
|
880
|
+
for field in required_fields:
|
881
|
+
if not source.get(field):
|
882
|
+
raise ValueError(f"{field} is required for Confluence data source")
|
883
|
+
|
884
|
+
return {
|
885
|
+
"@odata.type": CONFLUENCE_DATA_SOURCE,
|
886
|
+
"folder": source["folder_path"],
|
887
|
+
"connectionId": source["connection_id"],
|
888
|
+
"connectionName": source["connection_name"],
|
889
|
+
"directoryPath": source["directory_path"],
|
890
|
+
"fileNameGlob": file_name_glob,
|
891
|
+
"spaceId": source["space_id"],
|
892
|
+
}
|
893
|
+
|
612
894
|
def _retrieve_by_id_spec(
|
613
895
|
self,
|
614
896
|
id: str,
|
uipath/_utils/constants.py
CHANGED
@@ -25,6 +25,18 @@ HEADER_SW_LOCK_KEY = "x-uipath-sw-lockkey"
|
|
25
25
|
ORCHESTRATOR_STORAGE_BUCKET_DATA_SOURCE = (
|
26
26
|
"#UiPath.Vdbs.Domain.Api.V20Models.StorageBucketDataSourceRequest"
|
27
27
|
)
|
28
|
+
CONFLUENCE_DATA_SOURCE = "#UiPath.Vdbs.Domain.Api.V20Models.ConfluenceDataSourceRequest"
|
29
|
+
DROPBOX_DATA_SOURCE = "#UiPath.Vdbs.Domain.Api.V20Models.DropboxDataSourceRequest"
|
30
|
+
GOOGLE_DRIVE_DATA_SOURCE = (
|
31
|
+
"#UiPath.Vdbs.Domain.Api.V20Models.GoogleDriveDataSourceRequest"
|
32
|
+
)
|
33
|
+
ONEDRIVE_DATA_SOURCE = "#UiPath.Vdbs.Domain.Api.V20Models.OneDriveDataSourceRequest"
|
34
|
+
|
35
|
+
# Preprocessing request types
|
36
|
+
LLMV3Mini = "#UiPath.Vdbs.Domain.Api.V20Models.LLMV3MiniPreProcessingRequest"
|
37
|
+
LLMV4 = "#UiPath.Vdbs.Domain.Api.V20Models.LLMV4PreProcessingRequest"
|
38
|
+
NativeV1 = "#UiPath.Vdbs.Domain.Api.V20Models.NativeV1PreProcessingRequest"
|
39
|
+
|
28
40
|
|
29
41
|
# Local storage
|
30
42
|
TEMP_ATTACHMENTS_FOLDER = "uipath_attachments"
|
uipath/agent/_utils.py
CHANGED
@@ -31,6 +31,52 @@ async def load_agent_definition(project_id: str):
|
|
31
31
|
await get_file(project_structure, PurePath("agent.json"), studio_client)
|
32
32
|
).json()
|
33
33
|
|
34
|
+
evaluators = []
|
35
|
+
try:
|
36
|
+
evaluators_path = resolve_path(
|
37
|
+
project_structure, PurePath("evals", "evaluators")
|
38
|
+
)
|
39
|
+
if isinstance(evaluators_path, ProjectFolder):
|
40
|
+
for file in evaluators_path.files:
|
41
|
+
evaluators.append(
|
42
|
+
(
|
43
|
+
await get_file(
|
44
|
+
evaluators_path, PurePath(file.name), studio_client
|
45
|
+
)
|
46
|
+
).json()
|
47
|
+
)
|
48
|
+
else:
|
49
|
+
logger.warning(
|
50
|
+
"Unable to read evaluators from project. Defaulting to empty evaluators."
|
51
|
+
)
|
52
|
+
except Exception:
|
53
|
+
logger.warning(
|
54
|
+
"Unable to read evaluators from project. Defaulting to empty evaluators."
|
55
|
+
)
|
56
|
+
|
57
|
+
evaluation_sets = []
|
58
|
+
try:
|
59
|
+
evaluation_sets_path = resolve_path(
|
60
|
+
project_structure, PurePath("evals", "eval-sets")
|
61
|
+
)
|
62
|
+
if isinstance(evaluation_sets_path, ProjectFolder):
|
63
|
+
for file in evaluation_sets_path.files:
|
64
|
+
evaluation_sets.append(
|
65
|
+
(
|
66
|
+
await get_file(
|
67
|
+
evaluation_sets_path, PurePath(file.name), studio_client
|
68
|
+
)
|
69
|
+
).json()
|
70
|
+
)
|
71
|
+
else:
|
72
|
+
logger.warning(
|
73
|
+
"Unable to read eval-sets from project. Defaulting to empty eval-sets."
|
74
|
+
)
|
75
|
+
except Exception:
|
76
|
+
logger.warning(
|
77
|
+
"Unable to read eval-sets from project. Defaulting to empty eval-sets."
|
78
|
+
)
|
79
|
+
|
34
80
|
resolved_path = resolve_path(project_structure, PurePath("resources"))
|
35
81
|
if isinstance(resolved_path, ProjectFolder):
|
36
82
|
resource_folders = resolved_path.folders
|
@@ -50,6 +96,8 @@ async def load_agent_definition(project_id: str):
|
|
50
96
|
"id": project_id,
|
51
97
|
"name": project_structure.name,
|
52
98
|
"resources": resources,
|
99
|
+
"evaluators": evaluators,
|
100
|
+
"evaluationSets": evaluation_sets,
|
53
101
|
**agent,
|
54
102
|
}
|
55
103
|
return TypeAdapter(AgentDefinition).validate_python(agent_definition)
|
uipath/agent/models/agent.py
CHANGED
@@ -5,6 +5,8 @@ from typing import Annotated, Any, Dict, List, Literal, Optional, Union
|
|
5
5
|
|
6
6
|
from pydantic import BaseModel, ConfigDict, Discriminator, Field, Tag
|
7
7
|
|
8
|
+
from uipath._cli._evals._models._evaluation_set import EvaluationSet
|
9
|
+
from uipath._cli._evals._models._evaluator import Evaluator
|
8
10
|
from uipath.models import Connection
|
9
11
|
|
10
12
|
|
@@ -307,6 +309,14 @@ class BaseAgentDefinition(BaseModel):
|
|
307
309
|
resources: List[AgentResourceConfig] = Field(
|
308
310
|
..., description="List of tools, context, and escalation resources"
|
309
311
|
)
|
312
|
+
evaluation_sets: Optional[List[EvaluationSet]] = Field(
|
313
|
+
None,
|
314
|
+
alias="evaluationSets",
|
315
|
+
description="List of agent evaluation sets",
|
316
|
+
)
|
317
|
+
evaluators: Optional[List[Evaluator]] = Field(
|
318
|
+
None, description="List of agent evaluators"
|
319
|
+
)
|
310
320
|
|
311
321
|
model_config = ConfigDict(
|
312
322
|
validate_by_name=True, validate_by_alias=True, extra="allow"
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: uipath
|
3
|
-
Version: 2.1.
|
3
|
+
Version: 2.1.72
|
4
4
|
Summary: Python SDK and CLI for UiPath Platform, enabling programmatic interaction with automation services, process management, and deployment tools.
|
5
5
|
Project-URL: Homepage, https://uipath.com
|
6
6
|
Project-URL: Repository, https://github.com/UiPath/uipath-python
|
@@ -15,7 +15,7 @@ uipath/_cli/cli_invoke.py,sha256=m-te-EjhDpk_fhFDkt-yQFzmjEHGo5lQDGEQWxSXisQ,439
|
|
15
15
|
uipath/_cli/cli_new.py,sha256=9378NYUBc9j-qKVXV7oja-jahfJhXBg8zKVyaon7ctY,2102
|
16
16
|
uipath/_cli/cli_pack.py,sha256=NmwZTfwZ2fURiHyiX1BM0juAtBOjPB1Jmcpu-rD7p-4,11025
|
17
17
|
uipath/_cli/cli_publish.py,sha256=DgyfcZjvfV05Ldy0Pk5y_Le_nT9JduEE_x-VpIc_Kq0,6471
|
18
|
-
uipath/_cli/cli_pull.py,sha256=
|
18
|
+
uipath/_cli/cli_pull.py,sha256=PZ2hkfsfN-ElNa3FHjNetTux8XH03tDY5kWWqydQ2OY,6832
|
19
19
|
uipath/_cli/cli_push.py,sha256=-j-gDIbT8GyU2SybLQqFl5L8KI9nu3CDijVtltDgX20,3132
|
20
20
|
uipath/_cli/cli_run.py,sha256=1FKv20EjxrrP1I5rNSnL_HzbWtOAIMjB3M--4RPA_Yo,3709
|
21
21
|
uipath/_cli/middlewares.py,sha256=GvMhDnx1BmA7rIe12s6Uqv1JdqNZhvraU0a91oqGag4,4976
|
@@ -35,7 +35,7 @@ uipath/_cli/_auth/localhost.key,sha256=X31VYXD8scZtmGA837dGX5l6G-LXHLo5ItWJhZXaz
|
|
35
35
|
uipath/_cli/_dev/_terminal/__init__.py,sha256=di_RiN9Mcp9wqyKRRqXag28vbSw8_78mCnQZNn9H-Ss,14027
|
36
36
|
uipath/_cli/_dev/_terminal/_components/_chat.py,sha256=NLRoy49QScHiI-q0FGykkaU8ajv1d23fx7issSALcFA,4119
|
37
37
|
uipath/_cli/_dev/_terminal/_components/_details.py,sha256=FbLYtJ56gqHV6CIrpzO_n9Sk_YNg4nzRKTSsbj-DBPQ,17257
|
38
|
-
uipath/_cli/_dev/_terminal/_components/_history.py,sha256=
|
38
|
+
uipath/_cli/_dev/_terminal/_components/_history.py,sha256=QX9GWiKpP1H6_wVOJkrIMb-8w2HKg8g-kcbwxiEQj2s,3237
|
39
39
|
uipath/_cli/_dev/_terminal/_components/_json_input.py,sha256=MPkaeiA5KfkwJZKuNJ02hQksVtluZlmJv9nLRRAWYQI,592
|
40
40
|
uipath/_cli/_dev/_terminal/_components/_new.py,sha256=paA8oRhP5mphpf3RHV0gx7_CYdN5e6158tv_XVQifdE,5219
|
41
41
|
uipath/_cli/_dev/_terminal/_models/_execution.py,sha256=gPcxtwWR9eO929VaieOdI1e77clceKLoKA0FYayuCFQ,2869
|
@@ -44,19 +44,20 @@ uipath/_cli/_dev/_terminal/_styles/terminal.tcss,sha256=ktVpKwXIXw2VZp8KIZD6fO9i
|
|
44
44
|
uipath/_cli/_dev/_terminal/_utils/_chat.py,sha256=YUZxYVdmEManwHDuZsczJT1dWIYE1dVBgABlurwMFcE,8493
|
45
45
|
uipath/_cli/_dev/_terminal/_utils/_exporter.py,sha256=oI6D_eMwrh_2aqDYUh4GrJg8VLGrLYhDahR-_o0uJns,4144
|
46
46
|
uipath/_cli/_dev/_terminal/_utils/_logger.py,sha256=_ipTl_oAiMF9I7keGt2AAFAMz40DNLVMVkoiq-07UAU,2943
|
47
|
-
uipath/_cli/_evals/_evaluator_factory.py,sha256=
|
47
|
+
uipath/_cli/_evals/_evaluator_factory.py,sha256=Gycv94VtGOpMir_Gba-UoiAyrSRfbSfe8_pTfjzcA9Q,3875
|
48
48
|
uipath/_cli/_evals/_progress_reporter.py,sha256=hpSt0CXpIoFJGsbqZkqmwyGO_TBNesbWKlvDJUEDxd8,16455
|
49
49
|
uipath/_cli/_evals/_runtime.py,sha256=WKcBT6DGzNRjgEOpmH0b7RoEbEsHMyAbcAMs8b_CAI0,11418
|
50
|
-
uipath/_cli/_evals/_models/_evaluation_set.py,sha256=
|
50
|
+
uipath/_cli/_evals/_models/_evaluation_set.py,sha256=RRDaP0X4E8kueL0Io9yB4y8akx3gKZhoSIgTNhgoN9Y,2407
|
51
|
+
uipath/_cli/_evals/_models/_evaluator.py,sha256=fuC3UOYwPD4d_wdynHeLSCzbu82golNAnnPnxC8Y4rk,3315
|
51
52
|
uipath/_cli/_evals/_models/_evaluator_base_params.py,sha256=lTYKOV66tcjW85KHTyOdtF1p1VDaBNemrMAvH8bFIFc,382
|
52
53
|
uipath/_cli/_evals/_models/_output.py,sha256=LjwMBGI78sDFa2Dl8b9ReXJmjig57pdLWpuiwChrRLo,3096
|
53
54
|
uipath/_cli/_evals/_models/_sw_reporting.py,sha256=tSBLQFAdOIun8eP0vsqt56K6bmCZz_uMaWI3hskg_24,536
|
54
55
|
uipath/_cli/_evals/_models/_trajectory_span.py,sha256=8ukM8sB9rvzBMHfC_gnexAC3xlp4uMDevKZrRzcgrm4,3637
|
55
56
|
uipath/_cli/_push/sw_file_handler.py,sha256=iE8Sk1Z-9hxmLFFj3j-k4kTK6TzNFP6hUCmxTudG6JQ,18251
|
56
|
-
uipath/_cli/_runtime/_contracts.py,sha256=
|
57
|
+
uipath/_cli/_runtime/_contracts.py,sha256=D57cq5V5CZ9p13n_vRDHRcwyJYQUcJLlAMbAOzIiBNI,28932
|
57
58
|
uipath/_cli/_runtime/_escalation.py,sha256=x3vI98qsfRA-fL_tNkRVTFXioM5Gv2w0GFcXJJ5eQtg,7981
|
58
59
|
uipath/_cli/_runtime/_hitl.py,sha256=VKbM021nVg1HEDnTfucSLJ0LsDn83CKyUtVzofS2qTU,11369
|
59
|
-
uipath/_cli/_runtime/_logging.py,sha256=
|
60
|
+
uipath/_cli/_runtime/_logging.py,sha256=iO0AG_tqUBp7aJZTK_ZgwV3fFvxbi9Rp1UOBn3F76lw,11684
|
60
61
|
uipath/_cli/_runtime/_runtime.py,sha256=gby9-avNNlEATEfSXtY8FfJ8nREsSCGA4wMgDlSXTDE,2297
|
61
62
|
uipath/_cli/_runtime/_script_executor.py,sha256=PjbmEbyCMofGH2F85b8RFsxdV3Tqw0kVqdWOOk2ZLlI,9687
|
62
63
|
uipath/_cli/_templates/.psmdcp.template,sha256=C7pBJPt98ovEljcBvGtEUGoWjjQhu9jls1bpYjeLOKA,611
|
@@ -89,7 +90,7 @@ uipath/_services/assets_service.py,sha256=pG0Io--SeiRRQmfUWPQPl1vq3csZlQgx30LBNK
|
|
89
90
|
uipath/_services/attachments_service.py,sha256=NPQYK7CGjfBaNT_1S5vEAfODmOChTbQZforllFM2ofU,26678
|
90
91
|
uipath/_services/buckets_service.py,sha256=5s8tuivd7GUZYj774DDUYTa0axxlUuesc4EBY1V5sdk,18496
|
91
92
|
uipath/_services/connections_service.py,sha256=Gt8zPY4oA7cMYAU2LI3lBieoBpV81BOGelnzDWJl_V4,7931
|
92
|
-
uipath/_services/context_grounding_service.py,sha256=
|
93
|
+
uipath/_services/context_grounding_service.py,sha256=Pjx-QQQEiSKD-hY6ityj3QUSALN3fIcKLLHr_NZ0d_g,37117
|
93
94
|
uipath/_services/documents_service.py,sha256=UnFS8EpOZ_Ng2TZk3OiJJ3iNANvFs7QxuoG_v-lQj6c,24815
|
94
95
|
uipath/_services/entities_service.py,sha256=QKCLE6wRgq3HZraF-M2mljy-8il4vsNHrQhUgkewVVk,14028
|
95
96
|
uipath/_services/folder_service.py,sha256=9JqgjKhWD-G_KUnfUTP2BADxL6OK9QNZsBsWZHAULdE,2749
|
@@ -107,8 +108,8 @@ uipath/_utils/_request_spec.py,sha256=iCtBLqtbWUpFG5g1wtIZBzSupKsfaRLiQFoFc_4B70
|
|
107
108
|
uipath/_utils/_ssl_context.py,sha256=xSYitos0eJc9cPHzNtHISX9PBvL6D2vas5G_GiBdLp8,1783
|
108
109
|
uipath/_utils/_url.py,sha256=-4eluSrIZCUlnQ3qU17WPJkgaC2KwF9W5NeqGnTNGGo,2512
|
109
110
|
uipath/_utils/_user_agent.py,sha256=pVJkFYacGwaQBomfwWVAvBQgdBUo62e4n3-fLIajWUU,563
|
110
|
-
uipath/_utils/constants.py,sha256=
|
111
|
-
uipath/agent/_utils.py,sha256=
|
111
|
+
uipath/_utils/constants.py,sha256=2xLT-1aW0aJS2USeZbK-7zRgyyi1bgV60L0rtQOUqOM,1721
|
112
|
+
uipath/agent/_utils.py,sha256=frpg3LYJofdGWS5w44PsBaNHiW9GRkLokvNnf2D4a54,3309
|
112
113
|
uipath/agent/conversation/__init__.py,sha256=5hK-Iz131mnd9m6ANnpZZffxXZLVFDQ9GTg5z9ik1oQ,5265
|
113
114
|
uipath/agent/conversation/async_stream.py,sha256=BA_8uU1DgE3VpU2KkJj0rkI3bAHLk_ZJKsajR0ipMpo,2055
|
114
115
|
uipath/agent/conversation/citation.py,sha256=42dGv-wiYx3Lt7MPuPCFTkjAlSADFSzjyNXuZHdxqvo,2253
|
@@ -119,7 +120,7 @@ uipath/agent/conversation/exchange.py,sha256=nuk1tEMBHc_skrraT17d8U6AtyJ3h07ExGQ
|
|
119
120
|
uipath/agent/conversation/message.py,sha256=1ZkEs146s79TrOAWCQwzBAEJvjAu4lQBpJ64tKXDgGE,2142
|
120
121
|
uipath/agent/conversation/meta.py,sha256=3t0eS9UHoAPHre97QTUeVbjDhnMX4zj4-qG6ju0B8wY,315
|
121
122
|
uipath/agent/conversation/tool.py,sha256=ol8XI8AVd-QNn5auXNBPcCzOkh9PPFtL7hTK3kqInkU,2191
|
122
|
-
uipath/agent/models/agent.py,sha256=
|
123
|
+
uipath/agent/models/agent.py,sha256=ie_N47K-txv9Q9kXeynI1FsO2l120JYmjPXNm-e6yMo,12952
|
123
124
|
uipath/eval/_helpers/__init__.py,sha256=GSmZMryjuO3Wo_zdxZdrHCRRsgOxsVFYkYgJ15YNC3E,86
|
124
125
|
uipath/eval/_helpers/helpers.py,sha256=iE2HHdMiAdAMLqxHkPKHpfecEtAuN5BTBqvKFTI8ciE,1315
|
125
126
|
uipath/eval/evaluators/__init__.py,sha256=DJAAhgv0I5UfBod4sGnSiKerfrz1iMmk7GNFb71V8eI,494
|
@@ -158,8 +159,8 @@ uipath/tracing/_traced.py,sha256=yBIY05PCCrYyx50EIHZnwJaKNdHPNx-YTR1sHQl0a98,199
|
|
158
159
|
uipath/tracing/_utils.py,sha256=qd7N56tg6VXQ9pREh61esBgUWLNA0ssKsE0QlwrRWFM,11974
|
159
160
|
uipath/utils/__init__.py,sha256=VD-KXFpF_oWexFg6zyiWMkxl2HM4hYJMIUDZ1UEtGx0,105
|
160
161
|
uipath/utils/_endpoints_manager.py,sha256=iRTl5Q0XAm_YgcnMcJOXtj-8052sr6jpWuPNz6CgT0Q,8408
|
161
|
-
uipath-2.1.
|
162
|
-
uipath-2.1.
|
163
|
-
uipath-2.1.
|
164
|
-
uipath-2.1.
|
165
|
-
uipath-2.1.
|
162
|
+
uipath-2.1.72.dist-info/METADATA,sha256=sIUVm86o9y3uorIq58WEe4g-58aQ_tD0mp_8TPcrhHc,6482
|
163
|
+
uipath-2.1.72.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
164
|
+
uipath-2.1.72.dist-info/entry_points.txt,sha256=9C2_29U6Oq1ExFu7usihR-dnfIVNSKc-0EFbh0rskB4,43
|
165
|
+
uipath-2.1.72.dist-info/licenses/LICENSE,sha256=-KBavWXepyDjimmzH5fVAsi-6jNVpIKFc2kZs0Ri4ng,1058
|
166
|
+
uipath-2.1.72.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|