uipath 2.1.78__py3-none-any.whl → 2.1.80__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- uipath/_cli/_evals/_models/_output.py +17 -14
- uipath/_cli/_evals/_runtime.py +203 -102
- uipath/_cli/_runtime/_runtime.py +3 -1
- uipath/_cli/cli_eval.py +2 -2
- uipath/_services/connections_service.py +0 -2
- uipath/eval/mocks/mockable.py +4 -2
- {uipath-2.1.78.dist-info → uipath-2.1.80.dist-info}/METADATA +1 -1
- {uipath-2.1.78.dist-info → uipath-2.1.80.dist-info}/RECORD +11 -11
- {uipath-2.1.78.dist-info → uipath-2.1.80.dist-info}/WHEEL +0 -0
- {uipath-2.1.78.dist-info → uipath-2.1.80.dist-info}/entry_points.txt +0 -0
- {uipath-2.1.78.dist-info → uipath-2.1.80.dist-info}/licenses/LICENSE +0 -0
@@ -58,43 +58,46 @@ class EvaluationRunResultDto(BaseModel):
|
|
58
58
|
model_config = ConfigDict(alias_generator=to_camel, populate_by_name=True)
|
59
59
|
|
60
60
|
evaluator_name: str
|
61
|
+
evaluator_id: str
|
61
62
|
result: EvaluationResultDto
|
62
63
|
|
64
|
+
@model_serializer(mode="wrap")
|
65
|
+
def serialize_model(self, serializer, info):
|
66
|
+
data = serializer(self)
|
67
|
+
if isinstance(data, dict):
|
68
|
+
data.pop("evaluatorId", None)
|
69
|
+
return data
|
70
|
+
|
63
71
|
|
64
72
|
class EvaluationRunResult(BaseModel):
|
65
73
|
model_config = ConfigDict(alias_generator=to_camel, populate_by_name=True)
|
66
74
|
|
67
|
-
score: float = 0.0
|
68
75
|
evaluation_name: str
|
69
76
|
evaluation_run_results: List[EvaluationRunResultDto]
|
70
77
|
|
71
|
-
|
78
|
+
@property
|
79
|
+
def score(self) -> float:
|
72
80
|
"""Compute average score for this single eval_item."""
|
73
81
|
if not self.evaluation_run_results:
|
74
|
-
|
75
|
-
return
|
82
|
+
return 0.0
|
76
83
|
|
77
84
|
total_score = sum(dto.result.score for dto in self.evaluation_run_results)
|
78
|
-
|
85
|
+
return total_score / len(self.evaluation_run_results)
|
79
86
|
|
80
87
|
|
81
88
|
class UiPathEvalOutput(BaseModel):
|
82
89
|
model_config = ConfigDict(alias_generator=to_camel, populate_by_name=True)
|
83
90
|
|
84
91
|
evaluation_set_name: str
|
85
|
-
score: float
|
86
92
|
evaluation_set_results: List[EvaluationRunResult]
|
87
93
|
|
88
|
-
|
89
|
-
|
94
|
+
@property
|
95
|
+
def score(self) -> float:
|
96
|
+
"""Compute overall average score from evaluation results."""
|
90
97
|
if not self.evaluation_set_results:
|
91
|
-
|
92
|
-
return
|
93
|
-
|
94
|
-
for eval_result in self.evaluation_set_results:
|
95
|
-
eval_result.compute_average_score()
|
98
|
+
return 0.0
|
96
99
|
|
97
100
|
eval_item_scores = [
|
98
101
|
eval_result.score for eval_result in self.evaluation_set_results
|
99
102
|
]
|
100
|
-
|
103
|
+
return sum(eval_item_scores) / len(eval_item_scores)
|
uipath/_cli/_evals/_runtime.py
CHANGED
@@ -1,3 +1,4 @@
|
|
1
|
+
import asyncio
|
1
2
|
import json
|
2
3
|
import logging
|
3
4
|
import uuid
|
@@ -151,9 +152,6 @@ class UiPathEvalRuntime(UiPathBaseRuntime, Generic[T, C]):
|
|
151
152
|
)
|
152
153
|
evaluators = self._load_evaluators(evaluation_set)
|
153
154
|
|
154
|
-
evaluator_averages = {evaluator.id: 0.0 for evaluator in evaluators}
|
155
|
-
evaluator_counts = {evaluator.id: 0 for evaluator in evaluators}
|
156
|
-
|
157
155
|
await event_bus.publish(
|
158
156
|
EvaluationEvents.CREATE_EVAL_SET_RUN,
|
159
157
|
EvalSetRunCreatedEvent(
|
@@ -165,110 +163,37 @@ class UiPathEvalRuntime(UiPathBaseRuntime, Generic[T, C]):
|
|
165
163
|
),
|
166
164
|
)
|
167
165
|
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
execution_id=self.execution_id,
|
177
|
-
eval_item=eval_item,
|
178
|
-
),
|
166
|
+
# Check if parallel execution should be used
|
167
|
+
if (
|
168
|
+
self.context.workers
|
169
|
+
and self.context.workers > 1
|
170
|
+
and len(evaluation_set.evaluations) > 1
|
171
|
+
):
|
172
|
+
eval_run_result_list = await self._execute_parallel(
|
173
|
+
evaluation_set, evaluators, event_bus, self.context.workers
|
179
174
|
)
|
180
|
-
|
181
|
-
|
182
|
-
|
175
|
+
else:
|
176
|
+
eval_run_result_list = await self._execute_sequential(
|
177
|
+
evaluation_set, evaluators, event_bus
|
183
178
|
)
|
179
|
+
results = UiPathEvalOutput(
|
180
|
+
evaluation_set_name=evaluation_set.name,
|
181
|
+
evaluation_set_results=eval_run_result_list,
|
182
|
+
)
|
184
183
|
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
agent_execution_output = await self.execute_runtime(eval_item)
|
189
|
-
evaluation_item_results: list[EvalItemResult] = []
|
190
|
-
|
191
|
-
for evaluator in evaluators:
|
192
|
-
evaluation_result = await self.run_evaluator(
|
193
|
-
evaluator=evaluator,
|
194
|
-
execution_output=agent_execution_output,
|
195
|
-
eval_item=eval_item,
|
196
|
-
)
|
197
|
-
|
198
|
-
dto_result = EvaluationResultDto.from_evaluation_result(
|
199
|
-
evaluation_result
|
200
|
-
)
|
201
|
-
evaluator_counts[evaluator.id] += 1
|
202
|
-
count = evaluator_counts[evaluator.id]
|
203
|
-
evaluator_averages[evaluator.id] += (
|
204
|
-
dto_result.score - evaluator_averages[evaluator.id]
|
205
|
-
) / count
|
206
|
-
|
207
|
-
evaluation_run_results.evaluation_run_results.append(
|
208
|
-
EvaluationRunResultDto(
|
209
|
-
evaluator_name=evaluator.name,
|
210
|
-
result=dto_result,
|
211
|
-
)
|
212
|
-
)
|
213
|
-
evaluation_item_results.append(
|
214
|
-
EvalItemResult(
|
215
|
-
evaluator_id=evaluator.id,
|
216
|
-
result=evaluation_result,
|
217
|
-
)
|
218
|
-
)
|
219
|
-
|
220
|
-
evaluation_run_results.compute_average_score()
|
221
|
-
|
222
|
-
await event_bus.publish(
|
223
|
-
EvaluationEvents.UPDATE_EVAL_RUN,
|
224
|
-
EvalRunUpdatedEvent(
|
225
|
-
execution_id=self.execution_id,
|
226
|
-
eval_item=eval_item,
|
227
|
-
eval_results=evaluation_item_results,
|
228
|
-
success=not agent_execution_output.result.error,
|
229
|
-
agent_output=agent_execution_output.result.output,
|
230
|
-
agent_execution_time=agent_execution_output.execution_time,
|
231
|
-
spans=agent_execution_output.spans,
|
232
|
-
logs=agent_execution_output.logs,
|
233
|
-
),
|
234
|
-
wait_for_completion=False,
|
235
|
-
)
|
236
|
-
except Exception as e:
|
237
|
-
exception_details = EvalItemExceptionDetails(exception=e)
|
184
|
+
# Computing evaluator averages
|
185
|
+
evaluator_averages: Dict[str, float] = defaultdict(float)
|
186
|
+
evaluator_count: Dict[str, int] = defaultdict(int)
|
238
187
|
|
239
|
-
|
240
|
-
|
241
|
-
|
242
|
-
|
243
|
-
0.0 - evaluator_averages[evaluator.id]
|
244
|
-
) / count
|
188
|
+
for eval_run_result in results.evaluation_set_results:
|
189
|
+
for result_dto in eval_run_result.evaluation_run_results:
|
190
|
+
evaluator_averages[result_dto.evaluator_id] += result_dto.result.score
|
191
|
+
evaluator_count[result_dto.evaluator_id] += 1
|
245
192
|
|
246
|
-
|
247
|
-
|
248
|
-
|
249
|
-
|
250
|
-
success=False,
|
251
|
-
agent_output={},
|
252
|
-
agent_execution_time=0.0,
|
253
|
-
exception_details=exception_details,
|
254
|
-
spans=[],
|
255
|
-
logs=[],
|
256
|
-
)
|
257
|
-
if isinstance(e, EvaluationRuntimeException):
|
258
|
-
eval_run_updated_event.spans = e.spans
|
259
|
-
eval_run_updated_event.logs = e.logs
|
260
|
-
eval_run_updated_event.exception_details.exception = ( # type: ignore
|
261
|
-
e.root_exception
|
262
|
-
)
|
263
|
-
eval_run_updated_event.exception_details.runtime_exception = True # type: ignore
|
264
|
-
|
265
|
-
await event_bus.publish(
|
266
|
-
EvaluationEvents.UPDATE_EVAL_RUN,
|
267
|
-
eval_run_updated_event,
|
268
|
-
wait_for_completion=False,
|
269
|
-
)
|
270
|
-
|
271
|
-
results.compute_average_score()
|
193
|
+
for eval_id in evaluator_averages:
|
194
|
+
evaluator_averages[eval_id] = (
|
195
|
+
evaluator_averages[eval_id] / evaluator_count[eval_id]
|
196
|
+
)
|
272
197
|
|
273
198
|
await event_bus.publish(
|
274
199
|
EvaluationEvents.UPDATE_EVAL_SET_RUN,
|
@@ -285,6 +210,182 @@ class UiPathEvalRuntime(UiPathBaseRuntime, Generic[T, C]):
|
|
285
210
|
)
|
286
211
|
return self.context.result
|
287
212
|
|
213
|
+
async def _execute_sequential(
|
214
|
+
self,
|
215
|
+
evaluation_set: EvaluationSet,
|
216
|
+
evaluators: List[BaseEvaluator[Any]],
|
217
|
+
event_bus: EventBus,
|
218
|
+
) -> List[EvaluationRunResult]:
|
219
|
+
all_eval_run_result: list[EvaluationRunResult] = []
|
220
|
+
|
221
|
+
for eval_item in evaluation_set.evaluations:
|
222
|
+
all_eval_run_result.append(
|
223
|
+
await self._execute_eval(eval_item, evaluators, event_bus)
|
224
|
+
)
|
225
|
+
|
226
|
+
return all_eval_run_result
|
227
|
+
|
228
|
+
async def _execute_parallel(
|
229
|
+
self,
|
230
|
+
evaluation_set: EvaluationSet,
|
231
|
+
evaluators: List[BaseEvaluator[Any]],
|
232
|
+
event_bus: EventBus,
|
233
|
+
workers: int,
|
234
|
+
) -> List[EvaluationRunResult]:
|
235
|
+
# Create a queue with max concurrency
|
236
|
+
queue: asyncio.Queue[tuple[int, EvaluationItem]] = asyncio.Queue(
|
237
|
+
maxsize=workers
|
238
|
+
)
|
239
|
+
|
240
|
+
# Dictionary to store results with their original indices
|
241
|
+
results_dict: Dict[int, EvaluationRunResult] = {}
|
242
|
+
|
243
|
+
# Producer task to fill the queue
|
244
|
+
async def producer() -> None:
|
245
|
+
for index, eval_item in enumerate(evaluation_set.evaluations):
|
246
|
+
await queue.put((index, eval_item))
|
247
|
+
# Signal completion by putting None markers
|
248
|
+
for _ in range(workers):
|
249
|
+
await queue.put(None) # type: ignore
|
250
|
+
|
251
|
+
# Worker function to process items from the queue
|
252
|
+
async def worker(worker_id: int) -> None:
|
253
|
+
while True:
|
254
|
+
item = await queue.get()
|
255
|
+
|
256
|
+
# Check for termination signal
|
257
|
+
if item is None:
|
258
|
+
queue.task_done()
|
259
|
+
break
|
260
|
+
|
261
|
+
index, eval_item = item
|
262
|
+
|
263
|
+
try:
|
264
|
+
# Execute the evaluation
|
265
|
+
result = await self._execute_eval(eval_item, evaluators, event_bus)
|
266
|
+
|
267
|
+
# Store result with its index to maintain order
|
268
|
+
results_dict[index] = result
|
269
|
+
finally:
|
270
|
+
# Mark the task as done
|
271
|
+
queue.task_done()
|
272
|
+
|
273
|
+
# Start producer
|
274
|
+
producer_task = asyncio.create_task(producer())
|
275
|
+
|
276
|
+
# Create worker tasks based on workers
|
277
|
+
worker_tasks = [asyncio.create_task(worker(i)) for i in range(workers)]
|
278
|
+
|
279
|
+
# Wait for producer and all workers to complete
|
280
|
+
await producer_task
|
281
|
+
await asyncio.gather(*worker_tasks)
|
282
|
+
|
283
|
+
# Return results in the original order
|
284
|
+
return [results_dict[i] for i in range(len(evaluation_set.evaluations))]
|
285
|
+
|
286
|
+
async def _execute_eval(
|
287
|
+
self,
|
288
|
+
eval_item: EvaluationItem,
|
289
|
+
evaluators: List[BaseEvaluator[Any]],
|
290
|
+
event_bus: EventBus,
|
291
|
+
) -> EvaluationRunResult:
|
292
|
+
set_evaluation_item(eval_item)
|
293
|
+
|
294
|
+
await event_bus.publish(
|
295
|
+
EvaluationEvents.CREATE_EVAL_RUN,
|
296
|
+
EvalRunCreatedEvent(
|
297
|
+
execution_id=self.execution_id,
|
298
|
+
eval_item=eval_item,
|
299
|
+
),
|
300
|
+
)
|
301
|
+
|
302
|
+
evaluation_run_results = EvaluationRunResult(
|
303
|
+
evaluation_name=eval_item.name, evaluation_run_results=[]
|
304
|
+
)
|
305
|
+
|
306
|
+
try:
|
307
|
+
agent_execution_output = await self.execute_runtime(eval_item)
|
308
|
+
evaluation_item_results: list[EvalItemResult] = []
|
309
|
+
|
310
|
+
for evaluator in evaluators:
|
311
|
+
evaluation_result = await self.run_evaluator(
|
312
|
+
evaluator=evaluator,
|
313
|
+
execution_output=agent_execution_output,
|
314
|
+
eval_item=eval_item,
|
315
|
+
)
|
316
|
+
|
317
|
+
dto_result = EvaluationResultDto.from_evaluation_result(
|
318
|
+
evaluation_result
|
319
|
+
)
|
320
|
+
|
321
|
+
evaluation_run_results.evaluation_run_results.append(
|
322
|
+
EvaluationRunResultDto(
|
323
|
+
evaluator_name=evaluator.name,
|
324
|
+
result=dto_result,
|
325
|
+
evaluator_id=evaluator.id,
|
326
|
+
)
|
327
|
+
)
|
328
|
+
evaluation_item_results.append(
|
329
|
+
EvalItemResult(
|
330
|
+
evaluator_id=evaluator.id,
|
331
|
+
result=evaluation_result,
|
332
|
+
)
|
333
|
+
)
|
334
|
+
|
335
|
+
await event_bus.publish(
|
336
|
+
EvaluationEvents.UPDATE_EVAL_RUN,
|
337
|
+
EvalRunUpdatedEvent(
|
338
|
+
execution_id=self.execution_id,
|
339
|
+
eval_item=eval_item,
|
340
|
+
eval_results=evaluation_item_results,
|
341
|
+
success=not agent_execution_output.result.error,
|
342
|
+
agent_output=agent_execution_output.result.output,
|
343
|
+
agent_execution_time=agent_execution_output.execution_time,
|
344
|
+
spans=agent_execution_output.spans,
|
345
|
+
logs=agent_execution_output.logs,
|
346
|
+
),
|
347
|
+
wait_for_completion=False,
|
348
|
+
)
|
349
|
+
|
350
|
+
except Exception as e:
|
351
|
+
exception_details = EvalItemExceptionDetails(exception=e)
|
352
|
+
|
353
|
+
for evaluator in evaluators:
|
354
|
+
evaluation_run_results.evaluation_run_results.append(
|
355
|
+
EvaluationRunResultDto(
|
356
|
+
evaluator_name=evaluator.name,
|
357
|
+
evaluator_id=evaluator.id,
|
358
|
+
result=EvaluationResultDto(score=0),
|
359
|
+
)
|
360
|
+
)
|
361
|
+
|
362
|
+
eval_run_updated_event = EvalRunUpdatedEvent(
|
363
|
+
execution_id=self.execution_id,
|
364
|
+
eval_item=eval_item,
|
365
|
+
eval_results=[],
|
366
|
+
success=False,
|
367
|
+
agent_output={},
|
368
|
+
agent_execution_time=0.0,
|
369
|
+
exception_details=exception_details,
|
370
|
+
spans=[],
|
371
|
+
logs=[],
|
372
|
+
)
|
373
|
+
if isinstance(e, EvaluationRuntimeException):
|
374
|
+
eval_run_updated_event.spans = e.spans
|
375
|
+
eval_run_updated_event.logs = e.logs
|
376
|
+
eval_run_updated_event.exception_details.exception = ( # type: ignore
|
377
|
+
e.root_exception
|
378
|
+
)
|
379
|
+
eval_run_updated_event.exception_details.runtime_exception = True # type: ignore
|
380
|
+
|
381
|
+
await event_bus.publish(
|
382
|
+
EvaluationEvents.UPDATE_EVAL_RUN,
|
383
|
+
eval_run_updated_event,
|
384
|
+
wait_for_completion=False,
|
385
|
+
)
|
386
|
+
|
387
|
+
return evaluation_run_results
|
388
|
+
|
288
389
|
def _get_and_clear_execution_data(
|
289
390
|
self, execution_id: str
|
290
391
|
) -> tuple[List[ReadableSpan], list[logging.LogRecord]]:
|
uipath/_cli/_runtime/_runtime.py
CHANGED
@@ -45,7 +45,9 @@ class UiPathRuntime(UiPathBaseRuntime):
|
|
45
45
|
try:
|
46
46
|
script_result = await self.executor(self.context.input_json)
|
47
47
|
|
48
|
-
if self.context.job_id is None
|
48
|
+
if self.context.job_id is None and not getattr(
|
49
|
+
self.context, "is_eval_run", False
|
50
|
+
):
|
49
51
|
logger.info(script_result)
|
50
52
|
|
51
53
|
self.context.result = UiPathRuntimeResult(
|
uipath/_cli/cli_eval.py
CHANGED
@@ -69,8 +69,8 @@ def setup_reporting_prereq(no_report: bool) -> bool:
|
|
69
69
|
@click.option(
|
70
70
|
"--workers",
|
71
71
|
type=int,
|
72
|
-
default=
|
73
|
-
help="Number of parallel workers for running evaluations (default:
|
72
|
+
default=1,
|
73
|
+
help="Number of parallel workers for running evaluations (default: 1)",
|
74
74
|
)
|
75
75
|
@click.option(
|
76
76
|
"--output-file",
|
@@ -32,7 +32,6 @@ class ConnectionsService(BaseService):
|
|
32
32
|
super().__init__(config=config, execution_context=execution_context)
|
33
33
|
self._folders_service = folders_service
|
34
34
|
|
35
|
-
@infer_bindings(resource_type="connection", name="key")
|
36
35
|
@traced(
|
37
36
|
name="connections_retrieve",
|
38
37
|
run_type="uipath",
|
@@ -165,7 +164,6 @@ class ConnectionsService(BaseService):
|
|
165
164
|
|
166
165
|
return self._parse_and_validate_list_response(response)
|
167
166
|
|
168
|
-
@infer_bindings(resource_type="connection", name="key")
|
169
167
|
@traced(
|
170
168
|
name="connections_retrieve",
|
171
169
|
run_type="uipath",
|
uipath/eval/mocks/mockable.py
CHANGED
@@ -82,6 +82,8 @@ def get_input_schema(func):
|
|
82
82
|
def mockable(
|
83
83
|
name: Optional[str] = None,
|
84
84
|
description: Optional[str] = None,
|
85
|
+
input_schema: Optional[dict[str, Any]] = None,
|
86
|
+
output_schema: Optional[dict[str, Any]] = None,
|
85
87
|
example_calls: Optional[List[ExampleCall]] = None,
|
86
88
|
**kwargs,
|
87
89
|
):
|
@@ -91,8 +93,8 @@ def mockable(
|
|
91
93
|
params = {
|
92
94
|
"name": name or func.__name__,
|
93
95
|
"description": description or func.__doc__,
|
94
|
-
"input_schema": get_input_schema(func),
|
95
|
-
"output_schema": get_output_schema(func),
|
96
|
+
"input_schema": input_schema or get_input_schema(func),
|
97
|
+
"output_schema": output_schema or get_output_schema(func),
|
96
98
|
"example_calls": example_calls,
|
97
99
|
**kwargs,
|
98
100
|
}
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: uipath
|
3
|
-
Version: 2.1.
|
3
|
+
Version: 2.1.80
|
4
4
|
Summary: Python SDK and CLI for UiPath Platform, enabling programmatic interaction with automation services, process management, and deployment tools.
|
5
5
|
Project-URL: Homepage, https://uipath.com
|
6
6
|
Project-URL: Repository, https://github.com/UiPath/uipath-python
|
@@ -9,7 +9,7 @@ uipath/_cli/__init__.py,sha256=2RUgXYd8uJaYjA67xWb0w4IZuBmZoY8G1ccNmEQk9oM,2343
|
|
9
9
|
uipath/_cli/cli_auth.py,sha256=ZEA0Fwoo77Ez9ctpRAIq7sbAwj8F4OouAbMp1g1OvjM,2601
|
10
10
|
uipath/_cli/cli_deploy.py,sha256=KPCmQ0c_NYD5JofSDao5r6QYxHshVCRxlWDVnQvlp5w,645
|
11
11
|
uipath/_cli/cli_dev.py,sha256=nEfpjw1PZ72O6jmufYWVrueVwihFxDPOeJakdvNHdOA,2146
|
12
|
-
uipath/_cli/cli_eval.py,sha256=
|
12
|
+
uipath/_cli/cli_eval.py,sha256=oOMywGSUrHDQ1W_54ccbekzCeduPf-KHRyu_r0Dezd0,5444
|
13
13
|
uipath/_cli/cli_init.py,sha256=Ac3-9tIH3rpikIX1ehWTo7InW5tjVNoz_w6fjvgLK4w,7052
|
14
14
|
uipath/_cli/cli_invoke.py,sha256=m-te-EjhDpk_fhFDkt-yQFzmjEHGo5lQDGEQWxSXisQ,4395
|
15
15
|
uipath/_cli/cli_new.py,sha256=9378NYUBc9j-qKVXV7oja-jahfJhXBg8zKVyaon7ctY,2102
|
@@ -47,13 +47,13 @@ uipath/_cli/_dev/_terminal/_utils/_logger.py,sha256=_ipTl_oAiMF9I7keGt2AAFAMz40D
|
|
47
47
|
uipath/_cli/_evals/_console_progress_reporter.py,sha256=HgB6pdMyoS6YVwuI3EpM2LBcH3U69nrdaTyNgPG8ssg,9304
|
48
48
|
uipath/_cli/_evals/_evaluator_factory.py,sha256=Gycv94VtGOpMir_Gba-UoiAyrSRfbSfe8_pTfjzcA9Q,3875
|
49
49
|
uipath/_cli/_evals/_progress_reporter.py,sha256=kX7rNSa-QCLXIzK-vb9Jjf-XLEtucdeiQPgPlSkpp2U,16778
|
50
|
-
uipath/_cli/_evals/_runtime.py,sha256=
|
50
|
+
uipath/_cli/_evals/_runtime.py,sha256=YvyOUR13qIwLfhBQuaOCWbACiJXdxs46plTWsVHRC7g,18567
|
51
51
|
uipath/_cli/_evals/_models/_evaluation_set.py,sha256=TEinpTAIzy5JLkF7-JrG_623ec2Y-GN9pfz284KKL_8,4567
|
52
52
|
uipath/_cli/_evals/_models/_evaluator.py,sha256=fuC3UOYwPD4d_wdynHeLSCzbu82golNAnnPnxC8Y4rk,3315
|
53
53
|
uipath/_cli/_evals/_models/_evaluator_base_params.py,sha256=lTYKOV66tcjW85KHTyOdtF1p1VDaBNemrMAvH8bFIFc,382
|
54
54
|
uipath/_cli/_evals/_models/_exceptions.py,sha256=-oXLTDa4ab9Boa34ZxuUrCezf8ajIGrIEUVwZnmBASE,195
|
55
55
|
uipath/_cli/_evals/_models/_mocks.py,sha256=mlD9qvdZNniuKxzY_ttJtwLVFvKGvvIukYvy0FTa12k,241
|
56
|
-
uipath/_cli/_evals/_models/_output.py,sha256=
|
56
|
+
uipath/_cli/_evals/_models/_output.py,sha256=4KPgXypO2qvWsDf37KbdMGmlYgaIfBSeGgKL32J2nP0,3157
|
57
57
|
uipath/_cli/_evals/_models/_sw_reporting.py,sha256=tSBLQFAdOIun8eP0vsqt56K6bmCZz_uMaWI3hskg_24,536
|
58
58
|
uipath/_cli/_evals/mocks/__init__.py,sha256=2WXwAy_oZw5bKp6L0HB13QygCJeftOB_Bget0AI6Gik,32
|
59
59
|
uipath/_cli/_evals/mocks/llm_mocker.py,sha256=2pNCYKdXLoN_TTwiVk15RI-QC_CmHLaH40mFU1o4Ark,6828
|
@@ -66,7 +66,7 @@ uipath/_cli/_runtime/_contracts.py,sha256=E8Is7EQfAu7_hCbeZI68gmTxSxo4X7_U4vcSl7
|
|
66
66
|
uipath/_cli/_runtime/_escalation.py,sha256=x3vI98qsfRA-fL_tNkRVTFXioM5Gv2w0GFcXJJ5eQtg,7981
|
67
67
|
uipath/_cli/_runtime/_hitl.py,sha256=VKbM021nVg1HEDnTfucSLJ0LsDn83CKyUtVzofS2qTU,11369
|
68
68
|
uipath/_cli/_runtime/_logging.py,sha256=jwBfsy0Hi4zkfPH-v9dQ7m5dcJeuE0j_OxdpI-DhHaw,13854
|
69
|
-
uipath/_cli/_runtime/_runtime.py,sha256=
|
69
|
+
uipath/_cli/_runtime/_runtime.py,sha256=P77YtxylLzpK2yz4S0tsBTVFpBSoh5lA7Gpu0SFRD5w,2379
|
70
70
|
uipath/_cli/_runtime/_script_executor.py,sha256=PjbmEbyCMofGH2F85b8RFsxdV3Tqw0kVqdWOOk2ZLlI,9687
|
71
71
|
uipath/_cli/_templates/.psmdcp.template,sha256=C7pBJPt98ovEljcBvGtEUGoWjjQhu9jls1bpYjeLOKA,611
|
72
72
|
uipath/_cli/_templates/.rels.template,sha256=-fTcw7OA1AcymHr0LzBqbMAAtzZTRXLTNa_ljq087Jk,406
|
@@ -97,7 +97,7 @@ uipath/_services/api_client.py,sha256=kGm04ijk9AOEQd2BMxvQg-2QoB8dmyoDwFFDPyutAG
|
|
97
97
|
uipath/_services/assets_service.py,sha256=pG0Io--SeiRRQmfUWPQPl1vq3csZlQgx30LBNKRmmF8,12145
|
98
98
|
uipath/_services/attachments_service.py,sha256=NPQYK7CGjfBaNT_1S5vEAfODmOChTbQZforllFM2ofU,26678
|
99
99
|
uipath/_services/buckets_service.py,sha256=5s8tuivd7GUZYj774DDUYTa0axxlUuesc4EBY1V5sdk,18496
|
100
|
-
uipath/_services/connections_service.py,sha256=
|
100
|
+
uipath/_services/connections_service.py,sha256=IqhKdRYwNZlRsDL2vY7gyl5nAiYaK1zvj_CLa7WLzVQ,15785
|
101
101
|
uipath/_services/context_grounding_service.py,sha256=Pjx-QQQEiSKD-hY6ityj3QUSALN3fIcKLLHr_NZ0d_g,37117
|
102
102
|
uipath/_services/documents_service.py,sha256=UnFS8EpOZ_Ng2TZk3OiJJ3iNANvFs7QxuoG_v-lQj6c,24815
|
103
103
|
uipath/_services/entities_service.py,sha256=QKCLE6wRgq3HZraF-M2mljy-8il4vsNHrQhUgkewVVk,14028
|
@@ -139,7 +139,7 @@ uipath/eval/evaluators/json_similarity_evaluator.py,sha256=cP4kpN-UIf690V5dq4LaC
|
|
139
139
|
uipath/eval/evaluators/llm_as_judge_evaluator.py,sha256=l0bbn8ZLi9ZTXcgr7tJ2tsCvHFqIIeGa7sobaAHgI2Y,4927
|
140
140
|
uipath/eval/evaluators/trajectory_evaluator.py,sha256=w9E8yUXp3KCXTfiUD-ut1OVyiOH3RpFFIIe7w3v3pBQ,5740
|
141
141
|
uipath/eval/mocks/__init__.py,sha256=Qis6XSN7_WOmrmD_I5Fo5E_OQpflb_SlZM_MDOszUXI,152
|
142
|
-
uipath/eval/mocks/mockable.py,sha256=
|
142
|
+
uipath/eval/mocks/mockable.py,sha256=FJEE4iz6nchowGhoGR3FgF9VvymHnWJkUyakKOK4fIg,3360
|
143
143
|
uipath/eval/models/__init__.py,sha256=x360CDZaRjUL3q3kh2CcXYYrQ47jwn6p6JnmhEIvMlA,419
|
144
144
|
uipath/eval/models/models.py,sha256=YgPnkQunjEcEiueVQnYRsbQ3Nj1yQttDQZiMCq_DDkY,6321
|
145
145
|
uipath/models/__init__.py,sha256=d_DkK1AtRUetM1t2NrH5UKgvJOBiynzaKnK5pMY7aIc,1289
|
@@ -169,8 +169,8 @@ uipath/tracing/_traced.py,sha256=yBIY05PCCrYyx50EIHZnwJaKNdHPNx-YTR1sHQl0a98,199
|
|
169
169
|
uipath/tracing/_utils.py,sha256=qd7N56tg6VXQ9pREh61esBgUWLNA0ssKsE0QlwrRWFM,11974
|
170
170
|
uipath/utils/__init__.py,sha256=VD-KXFpF_oWexFg6zyiWMkxl2HM4hYJMIUDZ1UEtGx0,105
|
171
171
|
uipath/utils/_endpoints_manager.py,sha256=iRTl5Q0XAm_YgcnMcJOXtj-8052sr6jpWuPNz6CgT0Q,8408
|
172
|
-
uipath-2.1.
|
173
|
-
uipath-2.1.
|
174
|
-
uipath-2.1.
|
175
|
-
uipath-2.1.
|
176
|
-
uipath-2.1.
|
172
|
+
uipath-2.1.80.dist-info/METADATA,sha256=AMUh4rPIDQJzkepk6v3LaIpzwfyFraEhCYdN_0Z96YI,6593
|
173
|
+
uipath-2.1.80.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
174
|
+
uipath-2.1.80.dist-info/entry_points.txt,sha256=9C2_29U6Oq1ExFu7usihR-dnfIVNSKc-0EFbh0rskB4,43
|
175
|
+
uipath-2.1.80.dist-info/licenses/LICENSE,sha256=-KBavWXepyDjimmzH5fVAsi-6jNVpIKFc2kZs0Ri4ng,1058
|
176
|
+
uipath-2.1.80.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|