uipath 2.1.79__py3-none-any.whl → 2.1.81__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of uipath might be problematic. Click here for more details.

@@ -1,3 +1,4 @@
1
+ import asyncio
1
2
  import json
2
3
  import logging
3
4
  import uuid
@@ -151,9 +152,6 @@ class UiPathEvalRuntime(UiPathBaseRuntime, Generic[T, C]):
151
152
  )
152
153
  evaluators = self._load_evaluators(evaluation_set)
153
154
 
154
- evaluator_averages = {evaluator.id: 0.0 for evaluator in evaluators}
155
- evaluator_counts = {evaluator.id: 0 for evaluator in evaluators}
156
-
157
155
  await event_bus.publish(
158
156
  EvaluationEvents.CREATE_EVAL_SET_RUN,
159
157
  EvalSetRunCreatedEvent(
@@ -165,110 +163,37 @@ class UiPathEvalRuntime(UiPathBaseRuntime, Generic[T, C]):
165
163
  ),
166
164
  )
167
165
 
168
- results = UiPathEvalOutput(
169
- evaluation_set_name=evaluation_set.name, score=0, evaluation_set_results=[]
170
- )
171
- for eval_item in evaluation_set.evaluations:
172
- set_evaluation_item(eval_item)
173
- await event_bus.publish(
174
- EvaluationEvents.CREATE_EVAL_RUN,
175
- EvalRunCreatedEvent(
176
- execution_id=self.execution_id,
177
- eval_item=eval_item,
178
- ),
166
+ # Check if parallel execution should be used
167
+ if (
168
+ self.context.workers
169
+ and self.context.workers > 1
170
+ and len(evaluation_set.evaluations) > 1
171
+ ):
172
+ eval_run_result_list = await self._execute_parallel(
173
+ evaluation_set, evaluators, event_bus, self.context.workers
179
174
  )
180
-
181
- evaluation_run_results = EvaluationRunResult(
182
- evaluation_name=eval_item.name, evaluation_run_results=[]
175
+ else:
176
+ eval_run_result_list = await self._execute_sequential(
177
+ evaluation_set, evaluators, event_bus
183
178
  )
179
+ results = UiPathEvalOutput(
180
+ evaluation_set_name=evaluation_set.name,
181
+ evaluation_set_results=eval_run_result_list,
182
+ )
184
183
 
185
- results.evaluation_set_results.append(evaluation_run_results)
186
-
187
- try:
188
- agent_execution_output = await self.execute_runtime(eval_item)
189
- evaluation_item_results: list[EvalItemResult] = []
190
-
191
- for evaluator in evaluators:
192
- evaluation_result = await self.run_evaluator(
193
- evaluator=evaluator,
194
- execution_output=agent_execution_output,
195
- eval_item=eval_item,
196
- )
197
-
198
- dto_result = EvaluationResultDto.from_evaluation_result(
199
- evaluation_result
200
- )
201
- evaluator_counts[evaluator.id] += 1
202
- count = evaluator_counts[evaluator.id]
203
- evaluator_averages[evaluator.id] += (
204
- dto_result.score - evaluator_averages[evaluator.id]
205
- ) / count
206
-
207
- evaluation_run_results.evaluation_run_results.append(
208
- EvaluationRunResultDto(
209
- evaluator_name=evaluator.name,
210
- result=dto_result,
211
- )
212
- )
213
- evaluation_item_results.append(
214
- EvalItemResult(
215
- evaluator_id=evaluator.id,
216
- result=evaluation_result,
217
- )
218
- )
219
-
220
- evaluation_run_results.compute_average_score()
221
-
222
- await event_bus.publish(
223
- EvaluationEvents.UPDATE_EVAL_RUN,
224
- EvalRunUpdatedEvent(
225
- execution_id=self.execution_id,
226
- eval_item=eval_item,
227
- eval_results=evaluation_item_results,
228
- success=not agent_execution_output.result.error,
229
- agent_output=agent_execution_output.result.output,
230
- agent_execution_time=agent_execution_output.execution_time,
231
- spans=agent_execution_output.spans,
232
- logs=agent_execution_output.logs,
233
- ),
234
- wait_for_completion=False,
235
- )
236
- except Exception as e:
237
- exception_details = EvalItemExceptionDetails(exception=e)
184
+ # Computing evaluator averages
185
+ evaluator_averages: Dict[str, float] = defaultdict(float)
186
+ evaluator_count: Dict[str, int] = defaultdict(int)
238
187
 
239
- for evaluator in evaluators:
240
- evaluator_counts[evaluator.id] += 1
241
- count = evaluator_counts[evaluator.id]
242
- evaluator_averages[evaluator.id] += (
243
- 0.0 - evaluator_averages[evaluator.id]
244
- ) / count
188
+ for eval_run_result in results.evaluation_set_results:
189
+ for result_dto in eval_run_result.evaluation_run_results:
190
+ evaluator_averages[result_dto.evaluator_id] += result_dto.result.score
191
+ evaluator_count[result_dto.evaluator_id] += 1
245
192
 
246
- eval_run_updated_event = EvalRunUpdatedEvent(
247
- execution_id=self.execution_id,
248
- eval_item=eval_item,
249
- eval_results=[],
250
- success=False,
251
- agent_output={},
252
- agent_execution_time=0.0,
253
- exception_details=exception_details,
254
- spans=[],
255
- logs=[],
256
- )
257
- if isinstance(e, EvaluationRuntimeException):
258
- eval_run_updated_event.spans = e.spans
259
- eval_run_updated_event.logs = e.logs
260
- eval_run_updated_event.exception_details.exception = ( # type: ignore
261
- e.root_exception
262
- )
263
- eval_run_updated_event.exception_details.runtime_exception = True # type: ignore
264
-
265
- await event_bus.publish(
266
- EvaluationEvents.UPDATE_EVAL_RUN,
267
- eval_run_updated_event,
268
- wait_for_completion=False,
269
- )
270
-
271
- results.compute_average_score()
193
+ for eval_id in evaluator_averages:
194
+ evaluator_averages[eval_id] = (
195
+ evaluator_averages[eval_id] / evaluator_count[eval_id]
196
+ )
272
197
 
273
198
  await event_bus.publish(
274
199
  EvaluationEvents.UPDATE_EVAL_SET_RUN,
@@ -285,6 +210,182 @@ class UiPathEvalRuntime(UiPathBaseRuntime, Generic[T, C]):
285
210
  )
286
211
  return self.context.result
287
212
 
213
+ async def _execute_sequential(
214
+ self,
215
+ evaluation_set: EvaluationSet,
216
+ evaluators: List[BaseEvaluator[Any]],
217
+ event_bus: EventBus,
218
+ ) -> List[EvaluationRunResult]:
219
+ all_eval_run_result: list[EvaluationRunResult] = []
220
+
221
+ for eval_item in evaluation_set.evaluations:
222
+ all_eval_run_result.append(
223
+ await self._execute_eval(eval_item, evaluators, event_bus)
224
+ )
225
+
226
+ return all_eval_run_result
227
+
228
+ async def _execute_parallel(
229
+ self,
230
+ evaluation_set: EvaluationSet,
231
+ evaluators: List[BaseEvaluator[Any]],
232
+ event_bus: EventBus,
233
+ workers: int,
234
+ ) -> List[EvaluationRunResult]:
235
+ # Create a queue with max concurrency
236
+ queue: asyncio.Queue[tuple[int, EvaluationItem]] = asyncio.Queue(
237
+ maxsize=workers
238
+ )
239
+
240
+ # Dictionary to store results with their original indices
241
+ results_dict: Dict[int, EvaluationRunResult] = {}
242
+
243
+ # Producer task to fill the queue
244
+ async def producer() -> None:
245
+ for index, eval_item in enumerate(evaluation_set.evaluations):
246
+ await queue.put((index, eval_item))
247
+ # Signal completion by putting None markers
248
+ for _ in range(workers):
249
+ await queue.put(None) # type: ignore
250
+
251
+ # Worker function to process items from the queue
252
+ async def worker(worker_id: int) -> None:
253
+ while True:
254
+ item = await queue.get()
255
+
256
+ # Check for termination signal
257
+ if item is None:
258
+ queue.task_done()
259
+ break
260
+
261
+ index, eval_item = item
262
+
263
+ try:
264
+ # Execute the evaluation
265
+ result = await self._execute_eval(eval_item, evaluators, event_bus)
266
+
267
+ # Store result with its index to maintain order
268
+ results_dict[index] = result
269
+ finally:
270
+ # Mark the task as done
271
+ queue.task_done()
272
+
273
+ # Start producer
274
+ producer_task = asyncio.create_task(producer())
275
+
276
+ # Create worker tasks based on workers
277
+ worker_tasks = [asyncio.create_task(worker(i)) for i in range(workers)]
278
+
279
+ # Wait for producer and all workers to complete
280
+ await producer_task
281
+ await asyncio.gather(*worker_tasks)
282
+
283
+ # Return results in the original order
284
+ return [results_dict[i] for i in range(len(evaluation_set.evaluations))]
285
+
286
+ async def _execute_eval(
287
+ self,
288
+ eval_item: EvaluationItem,
289
+ evaluators: List[BaseEvaluator[Any]],
290
+ event_bus: EventBus,
291
+ ) -> EvaluationRunResult:
292
+ set_evaluation_item(eval_item)
293
+
294
+ await event_bus.publish(
295
+ EvaluationEvents.CREATE_EVAL_RUN,
296
+ EvalRunCreatedEvent(
297
+ execution_id=self.execution_id,
298
+ eval_item=eval_item,
299
+ ),
300
+ )
301
+
302
+ evaluation_run_results = EvaluationRunResult(
303
+ evaluation_name=eval_item.name, evaluation_run_results=[]
304
+ )
305
+
306
+ try:
307
+ agent_execution_output = await self.execute_runtime(eval_item)
308
+ evaluation_item_results: list[EvalItemResult] = []
309
+
310
+ for evaluator in evaluators:
311
+ evaluation_result = await self.run_evaluator(
312
+ evaluator=evaluator,
313
+ execution_output=agent_execution_output,
314
+ eval_item=eval_item,
315
+ )
316
+
317
+ dto_result = EvaluationResultDto.from_evaluation_result(
318
+ evaluation_result
319
+ )
320
+
321
+ evaluation_run_results.evaluation_run_results.append(
322
+ EvaluationRunResultDto(
323
+ evaluator_name=evaluator.name,
324
+ result=dto_result,
325
+ evaluator_id=evaluator.id,
326
+ )
327
+ )
328
+ evaluation_item_results.append(
329
+ EvalItemResult(
330
+ evaluator_id=evaluator.id,
331
+ result=evaluation_result,
332
+ )
333
+ )
334
+
335
+ await event_bus.publish(
336
+ EvaluationEvents.UPDATE_EVAL_RUN,
337
+ EvalRunUpdatedEvent(
338
+ execution_id=self.execution_id,
339
+ eval_item=eval_item,
340
+ eval_results=evaluation_item_results,
341
+ success=not agent_execution_output.result.error,
342
+ agent_output=agent_execution_output.result.output,
343
+ agent_execution_time=agent_execution_output.execution_time,
344
+ spans=agent_execution_output.spans,
345
+ logs=agent_execution_output.logs,
346
+ ),
347
+ wait_for_completion=False,
348
+ )
349
+
350
+ except Exception as e:
351
+ exception_details = EvalItemExceptionDetails(exception=e)
352
+
353
+ for evaluator in evaluators:
354
+ evaluation_run_results.evaluation_run_results.append(
355
+ EvaluationRunResultDto(
356
+ evaluator_name=evaluator.name,
357
+ evaluator_id=evaluator.id,
358
+ result=EvaluationResultDto(score=0),
359
+ )
360
+ )
361
+
362
+ eval_run_updated_event = EvalRunUpdatedEvent(
363
+ execution_id=self.execution_id,
364
+ eval_item=eval_item,
365
+ eval_results=[],
366
+ success=False,
367
+ agent_output={},
368
+ agent_execution_time=0.0,
369
+ exception_details=exception_details,
370
+ spans=[],
371
+ logs=[],
372
+ )
373
+ if isinstance(e, EvaluationRuntimeException):
374
+ eval_run_updated_event.spans = e.spans
375
+ eval_run_updated_event.logs = e.logs
376
+ eval_run_updated_event.exception_details.exception = ( # type: ignore
377
+ e.root_exception
378
+ )
379
+ eval_run_updated_event.exception_details.runtime_exception = True # type: ignore
380
+
381
+ await event_bus.publish(
382
+ EvaluationEvents.UPDATE_EVAL_RUN,
383
+ eval_run_updated_event,
384
+ wait_for_completion=False,
385
+ )
386
+
387
+ return evaluation_run_results
388
+
288
389
  def _get_and_clear_execution_data(
289
390
  self, execution_id: str
290
391
  ) -> tuple[List[ReadableSpan], list[logging.LogRecord]]:
@@ -45,7 +45,9 @@ class UiPathRuntime(UiPathBaseRuntime):
45
45
  try:
46
46
  script_result = await self.executor(self.context.input_json)
47
47
 
48
- if self.context.job_id is None:
48
+ if self.context.job_id is None and not getattr(
49
+ self.context, "is_eval_run", False
50
+ ):
49
51
  logger.info(script_result)
50
52
 
51
53
  self.context.result = UiPathRuntimeResult(
@@ -12,19 +12,19 @@ from ..spinner import Spinner
12
12
  def environment_options(function):
13
13
  function = click.option(
14
14
  "--alpha",
15
- "domain",
15
+ "environment",
16
16
  flag_value="alpha",
17
17
  help="Use alpha environment",
18
18
  )(function)
19
19
  function = click.option(
20
20
  "--staging",
21
- "domain",
21
+ "environment",
22
22
  flag_value="staging",
23
23
  help="Use staging environment",
24
24
  )(function)
25
25
  function = click.option(
26
26
  "--cloud",
27
- "domain",
27
+ "environment",
28
28
  flag_value="cloud",
29
29
  default=True,
30
30
  help="Use production environment",
uipath/_cli/cli_auth.py CHANGED
@@ -48,7 +48,7 @@ console = ConsoleLogger()
48
48
  )
49
49
  @track
50
50
  def auth(
51
- domain,
51
+ environment: str,
52
52
  force: bool = False,
53
53
  client_id: Optional[str] = None,
54
54
  client_secret: Optional[str] = None,
@@ -70,7 +70,7 @@ def auth(
70
70
  - Set UIPATH_DISABLE_SSL_VERIFY to disable SSL verification (not recommended)
71
71
  """
72
72
  auth_service = AuthService(
73
- domain,
73
+ environment=environment,
74
74
  force=force,
75
75
  client_id=client_id,
76
76
  client_secret=client_secret,
uipath/_cli/cli_eval.py CHANGED
@@ -69,8 +69,8 @@ def setup_reporting_prereq(no_report: bool) -> bool:
69
69
  @click.option(
70
70
  "--workers",
71
71
  type=int,
72
- default=8,
73
- help="Number of parallel workers for running evaluations (default: 8)",
72
+ default=1,
73
+ help="Number of parallel workers for running evaluations (default: 1)",
74
74
  )
75
75
  @click.option(
76
76
  "--output-file",
@@ -7,6 +7,7 @@ from .connections_service import ConnectionsService
7
7
  from .context_grounding_service import ContextGroundingService
8
8
  from .documents_service import DocumentsService
9
9
  from .entities_service import EntitiesService
10
+ from .external_application_service import ExternalApplicationService
10
11
  from .folder_service import FolderService
11
12
  from .jobs_service import JobsService
12
13
  from .llm_gateway_service import UiPathLlmChatService, UiPathOpenAIService
@@ -29,4 +30,5 @@ __all__ = [
29
30
  "UiPathLlmChatService",
30
31
  "FolderService",
31
32
  "EntitiesService",
33
+ "ExternalApplicationService",
32
34
  ]
@@ -1,22 +1,26 @@
1
- from typing import Optional, cast
1
+ from os import environ as env
2
+ from typing import Optional
2
3
  from urllib.parse import urlparse
3
4
 
4
5
  import httpx
6
+ from httpx import HTTPStatusError, Request
5
7
 
6
- from ..._utils._ssl_context import get_httpx_client_kwargs
7
- from .._utils._console import ConsoleLogger
8
- from ._models import TokenData
9
- from ._utils import parse_access_token, update_env_file
8
+ from .._utils._ssl_context import get_httpx_client_kwargs
9
+ from .._utils.constants import ENV_BASE_URL
10
+ from ..models.auth import TokenData
11
+ from ..models.exceptions import EnrichedException
10
12
 
11
- console = ConsoleLogger()
12
13
 
13
-
14
- class ClientCredentialsService:
14
+ class ExternalApplicationService:
15
15
  """Service for client credentials authentication flow."""
16
16
 
17
- def __init__(self, base_url: str):
18
- self._base_url = base_url
19
- self._domain = self._extract_domain_from_base_url(base_url)
17
+ def __init__(self, base_url: Optional[str]):
18
+ if not (resolved_base_url := (base_url or env.get(ENV_BASE_URL))):
19
+ raise ValueError(
20
+ "Base URL must be set either via constructor or the BASE_URL environment variable."
21
+ )
22
+ self._base_url = resolved_base_url
23
+ self._domain = self._extract_environment_from_base_url(self._base_url)
20
24
 
21
25
  def get_token_url(self) -> str:
22
26
  """Get the token URL for the specified domain."""
@@ -40,7 +44,7 @@ class ClientCredentialsService:
40
44
  """
41
45
  return hostname == domain or hostname.endswith(f".{domain}")
42
46
 
43
- def _extract_domain_from_base_url(self, base_url: str) -> str:
47
+ def _extract_environment_from_base_url(self, base_url: str) -> str:
44
48
  """Extract domain from base URL.
45
49
 
46
50
  Args:
@@ -70,9 +74,9 @@ class ClientCredentialsService:
70
74
  # Default to cloud if parsing fails
71
75
  return "cloud"
72
76
 
73
- def authenticate(
77
+ def get_token_data(
74
78
  self, client_id: str, client_secret: str, scope: Optional[str] = "OR.Execution"
75
- ) -> None:
79
+ ) -> TokenData:
76
80
  """Authenticate using client credentials flow.
77
81
 
78
82
  Args:
@@ -81,7 +85,7 @@ class ClientCredentialsService:
81
85
  scope: The scope for the token (default: OR.Execution)
82
86
 
83
87
  Returns:
84
- Token data if successful, None otherwise
88
+ Token data if successful
85
89
  """
86
90
  token_url = self.get_token_url()
87
91
 
@@ -97,49 +101,40 @@ class ClientCredentialsService:
97
101
  response = client.post(token_url, data=data)
98
102
  match response.status_code:
99
103
  case 200:
100
- token_data = response.json()
101
- token_data = cast(
102
- TokenData,
103
- {
104
- "access_token": token_data["access_token"],
105
- "token_type": token_data.get("token_type", "Bearer"),
106
- "expires_in": token_data.get("expires_in", 3600),
107
- "scope": token_data.get("scope", scope),
108
- "refresh_token": "",
109
- "id_token": "",
110
- },
111
- )
112
- self._setup_environment(token_data)
104
+ return TokenData.model_validate(response.json())
113
105
  case 400:
114
- console.error(
115
- "Invalid client credentials or request parameters."
106
+ raise EnrichedException(
107
+ HTTPStatusError(
108
+ message="Invalid client credentials or request parameters.",
109
+ request=Request(
110
+ data=data, url=token_url, method="post"
111
+ ),
112
+ response=response,
113
+ )
116
114
  )
117
115
  case 401:
118
- console.error("Unauthorized: Invalid client credentials.")
116
+ raise EnrichedException(
117
+ HTTPStatusError(
118
+ message="Unauthorized: Invalid client credentials.",
119
+ request=Request(
120
+ data=data, url=token_url, method="post"
121
+ ),
122
+ response=response,
123
+ )
124
+ )
119
125
  case _:
120
- console.error(
121
- f"Authentication failed: {response.status_code} - {response.text}"
126
+ raise EnrichedException(
127
+ HTTPStatusError(
128
+ message=f"Authentication failed with unexpected status: {response.status_code}",
129
+ request=Request(
130
+ data=data, url=token_url, method="post"
131
+ ),
132
+ response=response,
133
+ )
122
134
  )
123
-
135
+ except EnrichedException:
136
+ raise
124
137
  except httpx.RequestError as e:
125
- console.error(f"Network error during authentication: {e}")
138
+ raise Exception(f"Network error during authentication: {e}") from e
126
139
  except Exception as e:
127
- console.error(f"Unexpected error during authentication: {e}")
128
-
129
- def _setup_environment(self, token_data: TokenData):
130
- """Setup environment variables for client credentials authentication.
131
-
132
- Args:
133
- token_data: The token data from authentication
134
- base_url: The base URL for the UiPath instance
135
- """
136
- parsed_access_token = parse_access_token(token_data["access_token"])
137
-
138
- env_vars = {
139
- "UIPATH_ACCESS_TOKEN": token_data["access_token"],
140
- "UIPATH_URL": self._base_url,
141
- "UIPATH_ORGANIZATION_ID": parsed_access_token.get("prt_id", ""),
142
- "UIPATH_TENANT_ID": "",
143
- }
144
-
145
- update_env_file(env_vars)
140
+ raise Exception(f"Unexpected error during authentication: {e}") from e
uipath/_uipath.py CHANGED
@@ -1,4 +1,3 @@
1
- from os import environ as env
2
1
  from typing import Optional
3
2
 
4
3
  from pydantic import ValidationError
@@ -23,11 +22,7 @@ from ._services import (
23
22
  UiPathOpenAIService,
24
23
  )
25
24
  from ._utils import setup_logging
26
- from ._utils.constants import (
27
- ENV_BASE_URL,
28
- ENV_UIPATH_ACCESS_TOKEN,
29
- ENV_UNATTENDED_USER_ACCESS_TOKEN,
30
- )
25
+ from ._utils._auth import resolve_config
31
26
  from .models.errors import BaseUrlMissingError, SecretMissingError
32
27
 
33
28
 
@@ -37,19 +32,18 @@ class UiPath:
37
32
  *,
38
33
  base_url: Optional[str] = None,
39
34
  secret: Optional[str] = None,
35
+ client_id: Optional[str] = None,
36
+ client_secret: Optional[str] = None,
37
+ scope: Optional[str] = None,
40
38
  debug: bool = False,
41
39
  ) -> None:
42
- base_url_value = base_url or env.get(ENV_BASE_URL)
43
- secret_value = (
44
- secret
45
- or env.get(ENV_UNATTENDED_USER_ACCESS_TOKEN)
46
- or env.get(ENV_UIPATH_ACCESS_TOKEN)
47
- )
48
-
49
40
  try:
41
+ base_url, secret = resolve_config(
42
+ base_url, secret, client_id, client_secret, scope
43
+ )
50
44
  self._config = Config(
51
- base_url=base_url_value, # type: ignore
52
- secret=secret_value, # type: ignore
45
+ base_url=base_url,
46
+ secret=secret,
53
47
  )
54
48
  except ValidationError as e:
55
49
  for error in e.errors():