uipath 2.0.0.dev3__py3-none-any.whl → 2.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of uipath might be problematic. Click here for more details.
- uipath/__init__.py +24 -0
- uipath/_cli/README.md +11 -0
- uipath/_cli/__init__.py +54 -0
- uipath/_cli/_auth/_auth_server.py +165 -0
- uipath/_cli/_auth/_models.py +51 -0
- uipath/_cli/_auth/_oidc_utils.py +69 -0
- uipath/_cli/_auth/_portal_service.py +163 -0
- uipath/_cli/_auth/_utils.py +51 -0
- uipath/_cli/_auth/auth_config.json +6 -0
- uipath/_cli/_auth/index.html +167 -0
- uipath/_cli/_auth/localhost.crt +25 -0
- uipath/_cli/_auth/localhost.key +27 -0
- uipath/_cli/_runtime/_contracts.py +429 -0
- uipath/_cli/_runtime/_logging.py +193 -0
- uipath/_cli/_runtime/_runtime.py +264 -0
- uipath/_cli/_templates/.psmdcp.template +9 -0
- uipath/_cli/_templates/.rels.template +5 -0
- uipath/_cli/_templates/[Content_Types].xml.template +9 -0
- uipath/_cli/_templates/main.py.template +25 -0
- uipath/_cli/_templates/package.nuspec.template +10 -0
- uipath/_cli/_utils/_common.py +24 -0
- uipath/_cli/_utils/_input_args.py +126 -0
- uipath/_cli/_utils/_parse_ast.py +542 -0
- uipath/_cli/cli_auth.py +97 -0
- uipath/_cli/cli_deploy.py +13 -0
- uipath/_cli/cli_init.py +113 -0
- uipath/_cli/cli_new.py +76 -0
- uipath/_cli/cli_pack.py +337 -0
- uipath/_cli/cli_publish.py +113 -0
- uipath/_cli/cli_run.py +133 -0
- uipath/_cli/middlewares.py +113 -0
- uipath/_config.py +6 -0
- uipath/_execution_context.py +83 -0
- uipath/_folder_context.py +62 -0
- uipath/_models/__init__.py +37 -0
- uipath/_models/action_schema.py +26 -0
- uipath/_models/actions.py +64 -0
- uipath/_models/assets.py +48 -0
- uipath/_models/connections.py +51 -0
- uipath/_models/context_grounding.py +18 -0
- uipath/_models/context_grounding_index.py +60 -0
- uipath/_models/exceptions.py +6 -0
- uipath/_models/interrupt_models.py +28 -0
- uipath/_models/job.py +66 -0
- uipath/_models/llm_gateway.py +101 -0
- uipath/_models/processes.py +48 -0
- uipath/_models/queues.py +167 -0
- uipath/_services/__init__.py +26 -0
- uipath/_services/_base_service.py +250 -0
- uipath/_services/actions_service.py +271 -0
- uipath/_services/api_client.py +89 -0
- uipath/_services/assets_service.py +257 -0
- uipath/_services/buckets_service.py +268 -0
- uipath/_services/connections_service.py +185 -0
- uipath/_services/connections_service.pyi +50 -0
- uipath/_services/context_grounding_service.py +402 -0
- uipath/_services/folder_service.py +49 -0
- uipath/_services/jobs_service.py +265 -0
- uipath/_services/llm_gateway_service.py +311 -0
- uipath/_services/processes_service.py +168 -0
- uipath/_services/queues_service.py +314 -0
- uipath/_uipath.py +98 -0
- uipath/_utils/__init__.py +17 -0
- uipath/_utils/_endpoint.py +79 -0
- uipath/_utils/_infer_bindings.py +30 -0
- uipath/_utils/_logs.py +15 -0
- uipath/_utils/_request_override.py +18 -0
- uipath/_utils/_request_spec.py +23 -0
- uipath/_utils/_user_agent.py +16 -0
- uipath/_utils/constants.py +25 -0
- uipath/py.typed +0 -0
- {uipath-2.0.0.dev3.dist-info → uipath-2.0.1.dist-info}/METADATA +2 -3
- uipath-2.0.1.dist-info/RECORD +75 -0
- uipath-2.0.0.dev3.dist-info/RECORD +0 -4
- {uipath-2.0.0.dev3.dist-info → uipath-2.0.1.dist-info}/WHEEL +0 -0
- {uipath-2.0.0.dev3.dist-info → uipath-2.0.1.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,265 @@
|
|
|
1
|
+
import json
|
|
2
|
+
from typing import Any, Dict, Optional, overload
|
|
3
|
+
|
|
4
|
+
from .._config import Config
|
|
5
|
+
from .._execution_context import ExecutionContext
|
|
6
|
+
from .._folder_context import FolderContext
|
|
7
|
+
from .._models.job import Job
|
|
8
|
+
from .._utils import Endpoint, RequestSpec, header_folder
|
|
9
|
+
from ._base_service import BaseService
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class JobsService(FolderContext, BaseService):
|
|
13
|
+
"""Service for managing API payloads and job inbox interactions.
|
|
14
|
+
|
|
15
|
+
A job represents a single execution of an automation - it is created when you start
|
|
16
|
+
a process and contains information about that specific run, including its status,
|
|
17
|
+
start time, and any input/output data.
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
def __init__(self, config: Config, execution_context: ExecutionContext) -> None:
|
|
21
|
+
super().__init__(config=config, execution_context=execution_context)
|
|
22
|
+
|
|
23
|
+
@overload
|
|
24
|
+
def resume(self, *, inbox_id: str, payload: Any) -> None: ...
|
|
25
|
+
|
|
26
|
+
@overload
|
|
27
|
+
def resume(self, *, job_id: str, payload: Any) -> None: ...
|
|
28
|
+
|
|
29
|
+
def resume(
|
|
30
|
+
self,
|
|
31
|
+
*,
|
|
32
|
+
inbox_id: Optional[str] = None,
|
|
33
|
+
job_id: Optional[str] = None,
|
|
34
|
+
folder_key: Optional[str] = None,
|
|
35
|
+
folder_path: Optional[str] = None,
|
|
36
|
+
payload: Any,
|
|
37
|
+
) -> None:
|
|
38
|
+
"""Sends a payload to resume a paused job waiting for input, identified by its inbox ID.
|
|
39
|
+
|
|
40
|
+
Args:
|
|
41
|
+
inbox_id (Optional[str]): The inbox ID of the job.
|
|
42
|
+
job_id (Optional[str]): The job ID of the job.
|
|
43
|
+
folder_key (Optional[str]): The key of the folder to execute the process in. Override the default one set in the SDK config.
|
|
44
|
+
folder_path (Optional[str]): The path of the folder to execute the process in. Override the default one set in the SDK config.
|
|
45
|
+
payload (Any): The payload to deliver.
|
|
46
|
+
"""
|
|
47
|
+
if job_id is None and inbox_id is None:
|
|
48
|
+
raise ValueError("Either job_id or inbox_id must be provided")
|
|
49
|
+
|
|
50
|
+
# for type checking
|
|
51
|
+
job_id = str(job_id)
|
|
52
|
+
inbox_id = (
|
|
53
|
+
inbox_id
|
|
54
|
+
if inbox_id
|
|
55
|
+
else self._retrieve_inbox_id(
|
|
56
|
+
job_id=job_id,
|
|
57
|
+
folder_key=folder_key,
|
|
58
|
+
folder_path=folder_path,
|
|
59
|
+
)
|
|
60
|
+
)
|
|
61
|
+
spec = self._resume_spec(
|
|
62
|
+
inbox_id=inbox_id,
|
|
63
|
+
payload=payload,
|
|
64
|
+
folder_key=folder_key,
|
|
65
|
+
folder_path=folder_path,
|
|
66
|
+
)
|
|
67
|
+
self.request(
|
|
68
|
+
spec.method,
|
|
69
|
+
url=spec.endpoint,
|
|
70
|
+
headers=spec.headers,
|
|
71
|
+
content=spec.content,
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
async def resume_async(
|
|
75
|
+
self,
|
|
76
|
+
*,
|
|
77
|
+
inbox_id: Optional[str] = None,
|
|
78
|
+
job_id: Optional[str] = None,
|
|
79
|
+
folder_key: Optional[str] = None,
|
|
80
|
+
folder_path: Optional[str] = None,
|
|
81
|
+
payload: Any,
|
|
82
|
+
) -> None:
|
|
83
|
+
"""Asynchronously sends a payload to resume a paused job waiting for input, identified by its inbox ID.
|
|
84
|
+
|
|
85
|
+
Args:
|
|
86
|
+
inbox_id (Optional[str]): The inbox ID of the job. If not provided, the execution context will be used to retrieve the inbox ID.
|
|
87
|
+
job_id (Optional[str]): The job ID of the job.
|
|
88
|
+
folder_key (Optional[str]): The key of the folder to execute the process in. Override the default one set in the SDK config.
|
|
89
|
+
folder_path (Optional[str]): The path of the folder to execute the process in. Override the default one set in the SDK config.
|
|
90
|
+
payload (Any): The payload to deliver.
|
|
91
|
+
|
|
92
|
+
Examples:
|
|
93
|
+
```python
|
|
94
|
+
import asyncio
|
|
95
|
+
|
|
96
|
+
from uipath import UiPath
|
|
97
|
+
|
|
98
|
+
sdk = UiPath()
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
async def main(): # noqa: D103
|
|
102
|
+
payload = await sdk.jobs.resume_async(job_id="38073051", payload="The response")
|
|
103
|
+
print(payload)
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
asyncio.run(main())
|
|
107
|
+
```
|
|
108
|
+
"""
|
|
109
|
+
if job_id is None and inbox_id is None:
|
|
110
|
+
raise ValueError("Either job_id or inbox_id must be provided")
|
|
111
|
+
|
|
112
|
+
# for type checking
|
|
113
|
+
job_id = str(job_id)
|
|
114
|
+
inbox_id = (
|
|
115
|
+
inbox_id
|
|
116
|
+
if inbox_id
|
|
117
|
+
else self._retrieve_inbox_id(
|
|
118
|
+
job_id=job_id,
|
|
119
|
+
folder_key=folder_key,
|
|
120
|
+
folder_path=folder_path,
|
|
121
|
+
)
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
spec = self._resume_spec(
|
|
125
|
+
inbox_id=inbox_id,
|
|
126
|
+
payload=payload,
|
|
127
|
+
folder_key=folder_key,
|
|
128
|
+
folder_path=folder_path,
|
|
129
|
+
)
|
|
130
|
+
await self.request_async(
|
|
131
|
+
spec.method,
|
|
132
|
+
url=spec.endpoint,
|
|
133
|
+
headers=spec.headers,
|
|
134
|
+
content=spec.content,
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
@property
|
|
138
|
+
def custom_headers(self) -> Dict[str, str]:
|
|
139
|
+
return self.folder_headers
|
|
140
|
+
|
|
141
|
+
def retrieve(
|
|
142
|
+
self,
|
|
143
|
+
job_key: str,
|
|
144
|
+
) -> Job:
|
|
145
|
+
spec = self._retrieve_spec(job_key=job_key)
|
|
146
|
+
response = self.request(
|
|
147
|
+
spec.method,
|
|
148
|
+
url=spec.endpoint,
|
|
149
|
+
)
|
|
150
|
+
|
|
151
|
+
return Job.model_validate(response.json())
|
|
152
|
+
|
|
153
|
+
async def retrieve_async(
|
|
154
|
+
self,
|
|
155
|
+
job_key: str,
|
|
156
|
+
) -> Job:
|
|
157
|
+
spec = self._retrieve_spec(job_key=job_key)
|
|
158
|
+
response = await self.request_async(
|
|
159
|
+
spec.method,
|
|
160
|
+
url=spec.endpoint,
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
return Job.model_validate(response.json())
|
|
164
|
+
|
|
165
|
+
def _retrieve_inbox_id(
|
|
166
|
+
self,
|
|
167
|
+
*,
|
|
168
|
+
job_id: str,
|
|
169
|
+
folder_key: Optional[str] = None,
|
|
170
|
+
folder_path: Optional[str] = None,
|
|
171
|
+
) -> str:
|
|
172
|
+
spec = self._retrieve_inbox_id_spec(
|
|
173
|
+
job_id=job_id,
|
|
174
|
+
folder_key=folder_key,
|
|
175
|
+
folder_path=folder_path,
|
|
176
|
+
)
|
|
177
|
+
response = self.request(
|
|
178
|
+
spec.method,
|
|
179
|
+
url=spec.endpoint,
|
|
180
|
+
params=spec.params,
|
|
181
|
+
headers=spec.headers,
|
|
182
|
+
)
|
|
183
|
+
|
|
184
|
+
response = response.json()
|
|
185
|
+
return self._extract_first_inbox_id(response)
|
|
186
|
+
|
|
187
|
+
async def _retrieve_inbox_id_async(
|
|
188
|
+
self,
|
|
189
|
+
*,
|
|
190
|
+
job_id: str,
|
|
191
|
+
folder_key: Optional[str] = None,
|
|
192
|
+
folder_path: Optional[str] = None,
|
|
193
|
+
) -> str:
|
|
194
|
+
spec = self._retrieve_inbox_id_spec(
|
|
195
|
+
job_id=job_id,
|
|
196
|
+
folder_key=folder_key,
|
|
197
|
+
folder_path=folder_path,
|
|
198
|
+
)
|
|
199
|
+
response = await self.request_async(
|
|
200
|
+
spec.method,
|
|
201
|
+
url=spec.endpoint,
|
|
202
|
+
params=spec.params,
|
|
203
|
+
headers=spec.headers,
|
|
204
|
+
)
|
|
205
|
+
|
|
206
|
+
response = response.json()
|
|
207
|
+
return self._extract_first_inbox_id(response)
|
|
208
|
+
|
|
209
|
+
def _extract_first_inbox_id(self, response: Any) -> str:
|
|
210
|
+
if len(response["value"]) > 0:
|
|
211
|
+
# FIXME: is this correct?
|
|
212
|
+
return response["value"][0]["ItemKey"]
|
|
213
|
+
else:
|
|
214
|
+
raise Exception("No inbox found")
|
|
215
|
+
|
|
216
|
+
def _retrieve_inbox_id_spec(
|
|
217
|
+
self,
|
|
218
|
+
*,
|
|
219
|
+
job_id: str,
|
|
220
|
+
folder_key: Optional[str] = None,
|
|
221
|
+
folder_path: Optional[str] = None,
|
|
222
|
+
) -> RequestSpec:
|
|
223
|
+
return RequestSpec(
|
|
224
|
+
method="GET",
|
|
225
|
+
endpoint=Endpoint("/orchestrator_/odata/JobTriggers"),
|
|
226
|
+
params={
|
|
227
|
+
"$filter": f"JobId eq {job_id}",
|
|
228
|
+
"$top": 1,
|
|
229
|
+
"$select": "ItemKey",
|
|
230
|
+
},
|
|
231
|
+
headers={
|
|
232
|
+
**header_folder(folder_key, folder_path),
|
|
233
|
+
},
|
|
234
|
+
)
|
|
235
|
+
|
|
236
|
+
def _resume_spec(
|
|
237
|
+
self,
|
|
238
|
+
*,
|
|
239
|
+
inbox_id: str,
|
|
240
|
+
payload: Any = None,
|
|
241
|
+
folder_key: Optional[str] = None,
|
|
242
|
+
folder_path: Optional[str] = None,
|
|
243
|
+
) -> RequestSpec:
|
|
244
|
+
return RequestSpec(
|
|
245
|
+
method="POST",
|
|
246
|
+
endpoint=Endpoint(
|
|
247
|
+
f"/orchestrator_/api/JobTriggers/DeliverPayload/{inbox_id}"
|
|
248
|
+
),
|
|
249
|
+
content=json.dumps({"payload": payload}),
|
|
250
|
+
headers={
|
|
251
|
+
**header_folder(folder_key, folder_path),
|
|
252
|
+
},
|
|
253
|
+
)
|
|
254
|
+
|
|
255
|
+
def _retrieve_spec(
|
|
256
|
+
self,
|
|
257
|
+
*,
|
|
258
|
+
job_key: str,
|
|
259
|
+
) -> RequestSpec:
|
|
260
|
+
return RequestSpec(
|
|
261
|
+
method="GET",
|
|
262
|
+
endpoint=Endpoint(
|
|
263
|
+
f"orchestrator_/odata/Jobs/UiPath.Server.Configuration.OData.GetByKey(identifier={job_key})"
|
|
264
|
+
),
|
|
265
|
+
)
|
|
@@ -0,0 +1,311 @@
|
|
|
1
|
+
import json
|
|
2
|
+
from typing import Any, Dict, List, Optional
|
|
3
|
+
|
|
4
|
+
from .._config import Config
|
|
5
|
+
from .._execution_context import ExecutionContext
|
|
6
|
+
from .._models.llm_gateway import (
|
|
7
|
+
ChatCompletion,
|
|
8
|
+
SpecificToolChoice,
|
|
9
|
+
TextEmbedding,
|
|
10
|
+
ToolChoice,
|
|
11
|
+
ToolDefinition,
|
|
12
|
+
UsageInfo,
|
|
13
|
+
)
|
|
14
|
+
from .._utils import Endpoint
|
|
15
|
+
from ._base_service import BaseService
|
|
16
|
+
|
|
17
|
+
# Common constants
|
|
18
|
+
API_VERSION = "2024-10-21"
|
|
19
|
+
NORMALIZED_API_VERSION = "2024-08-01-preview"
|
|
20
|
+
|
|
21
|
+
# Common headers
|
|
22
|
+
DEFAULT_LLM_HEADERS = {
|
|
23
|
+
"X-UIPATH-STREAMING-ENABLED": "false",
|
|
24
|
+
"X-UiPath-LlmGateway-RequestingProduct": "uipath-python-sdk",
|
|
25
|
+
"X-UiPath-LlmGateway-RequestingFeature": "langgraph-agent",
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class ChatModels(object):
|
|
30
|
+
gpt_4 = "gpt-4"
|
|
31
|
+
gpt_4_1106_Preview = "gpt-4-1106-Preview"
|
|
32
|
+
gpt_4_32k = "gpt-4-32k"
|
|
33
|
+
gpt_4_turbo_2024_04_09 = "gpt-4-turbo-2024-04-09"
|
|
34
|
+
gpt_4_vision_preview = "gpt-4-vision-preview"
|
|
35
|
+
gpt_4o_2024_05_13 = "gpt-4o-2024-05-13"
|
|
36
|
+
gpt_4o_2024_08_06 = "gpt-4o-2024-08-06"
|
|
37
|
+
gpt_4o_mini_2024_07_18 = "gpt-4o-mini-2024-07-18"
|
|
38
|
+
o3_mini = "o3-mini-2025-01-31"
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class EmbeddingModels(object):
|
|
42
|
+
text_embedding_3_large = "text-embedding-3-large"
|
|
43
|
+
text_embedding_ada_002 = "text-embedding-ada-002"
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
API_VERSION = "2024-10-21"
|
|
47
|
+
NORMALIZED_API_VERSION = "2024-08-01-preview"
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
class UiPathOpenAIService(BaseService):
|
|
51
|
+
"""Service calling llm gateway service."""
|
|
52
|
+
|
|
53
|
+
def __init__(self, config: Config, execution_context: ExecutionContext) -> None:
|
|
54
|
+
super().__init__(config=config, execution_context=execution_context)
|
|
55
|
+
|
|
56
|
+
async def embeddings_usage(
|
|
57
|
+
self, input: str, embedding_model: str = EmbeddingModels.text_embedding_ada_002
|
|
58
|
+
):
|
|
59
|
+
"""Embedd the input text using llm gateway service.
|
|
60
|
+
|
|
61
|
+
Args:
|
|
62
|
+
input (str): The input text to embedd.
|
|
63
|
+
embedding_model (str, optional): The embedding model to use. Defaults to text-embedding-ada-002.
|
|
64
|
+
|
|
65
|
+
Returns:
|
|
66
|
+
EmbeddingUsageInfo: The embedding usage information.
|
|
67
|
+
"""
|
|
68
|
+
endpoint = Endpoint(
|
|
69
|
+
f"/llmgateway_/openai/deployments/{embedding_model}/embeddings/usage"
|
|
70
|
+
)
|
|
71
|
+
|
|
72
|
+
response = await self.request_async(
|
|
73
|
+
"POST",
|
|
74
|
+
endpoint,
|
|
75
|
+
content=json.dumps({"input": input}),
|
|
76
|
+
params={"api-version": API_VERSION},
|
|
77
|
+
headers=DEFAULT_LLM_HEADERS,
|
|
78
|
+
)
|
|
79
|
+
|
|
80
|
+
return UsageInfo.model_validate(response.json())
|
|
81
|
+
|
|
82
|
+
async def embeddings(
|
|
83
|
+
self, input: str, embedding_model: str = EmbeddingModels.text_embedding_ada_002
|
|
84
|
+
):
|
|
85
|
+
"""Embed the input text using llm gateway service.
|
|
86
|
+
|
|
87
|
+
Args:
|
|
88
|
+
input (str): The input text to embed.
|
|
89
|
+
|
|
90
|
+
Returns:
|
|
91
|
+
TextEmbedding: The embedding response.
|
|
92
|
+
"""
|
|
93
|
+
endpoint = Endpoint(
|
|
94
|
+
f"/llmgateway_/openai/deployments/{embedding_model}/embeddings"
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
response = await self.request_async(
|
|
98
|
+
"POST",
|
|
99
|
+
endpoint,
|
|
100
|
+
content=json.dumps({"input": input}),
|
|
101
|
+
params={"api-version": API_VERSION},
|
|
102
|
+
headers=DEFAULT_LLM_HEADERS,
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
return TextEmbedding.model_validate(response.json())
|
|
106
|
+
|
|
107
|
+
async def chat_completions(
|
|
108
|
+
self,
|
|
109
|
+
messages: List[Dict[str, str]],
|
|
110
|
+
model: str = ChatModels.gpt_4o_mini_2024_07_18,
|
|
111
|
+
max_tokens: int = 50,
|
|
112
|
+
temperature: float = 0,
|
|
113
|
+
):
|
|
114
|
+
"""Get chat completions using llm gateway service.
|
|
115
|
+
|
|
116
|
+
Args:
|
|
117
|
+
messages (List[Dict[str, str]]): List of message dictionaries with 'role' and 'content' keys.
|
|
118
|
+
The supported roles are 'system', 'user', and 'assistant'.
|
|
119
|
+
|
|
120
|
+
Example:
|
|
121
|
+
```
|
|
122
|
+
[
|
|
123
|
+
{"role": "system", "content": "You are a helpful Python programming assistant."},
|
|
124
|
+
{"role": "user", "content": "How do I read a file in Python?"},
|
|
125
|
+
{"role": "assistant", "content": "You can use the built-in open() function."},
|
|
126
|
+
{"role": "user", "content": "Can you show an example?"}
|
|
127
|
+
]
|
|
128
|
+
```
|
|
129
|
+
The conversation history can be included to provide context to the model.
|
|
130
|
+
model (str, optional): The model to use for chat completion. Defaults to ChatModels.gpt_4o_mini_2024_07_18.
|
|
131
|
+
max_tokens (int, optional): Maximum number of tokens to generate. Defaults to 50.
|
|
132
|
+
temperature (float, optional): Temperature for sampling, between 0 and 1.
|
|
133
|
+
Lower values make output more deterministic. Defaults to 0.
|
|
134
|
+
|
|
135
|
+
Returns:
|
|
136
|
+
ChatCompletion: The chat completion response.
|
|
137
|
+
"""
|
|
138
|
+
endpoint = Endpoint(f"/llmgateway_/openai/deployments/{model}/chat/completions")
|
|
139
|
+
|
|
140
|
+
request_body = {
|
|
141
|
+
"messages": messages,
|
|
142
|
+
"max_tokens": max_tokens,
|
|
143
|
+
"temperature": temperature,
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
response = await self.request_async(
|
|
147
|
+
"POST",
|
|
148
|
+
endpoint,
|
|
149
|
+
content=json.dumps(request_body),
|
|
150
|
+
params={"api-version": API_VERSION},
|
|
151
|
+
headers=DEFAULT_LLM_HEADERS,
|
|
152
|
+
)
|
|
153
|
+
|
|
154
|
+
return ChatCompletion.model_validate(response.json())
|
|
155
|
+
|
|
156
|
+
async def chat_completions_usage(
|
|
157
|
+
self,
|
|
158
|
+
messages: List[Dict[str, str]],
|
|
159
|
+
model: str = ChatModels.gpt_4o_mini_2024_07_18,
|
|
160
|
+
max_tokens: int = 50,
|
|
161
|
+
temperature: float = 0,
|
|
162
|
+
):
|
|
163
|
+
"""Get chat completions usage using llm gateway service.
|
|
164
|
+
|
|
165
|
+
Args:
|
|
166
|
+
messages (List[Dict[str, str]]): List of message dictionaries with 'role' and 'content' keys.
|
|
167
|
+
The supported roles are 'system', 'user', and 'assistant'.
|
|
168
|
+
|
|
169
|
+
Example:
|
|
170
|
+
```
|
|
171
|
+
[
|
|
172
|
+
{"role": "system", "content": "You are a helpful Python programming assistant."},
|
|
173
|
+
{"role": "user", "content": "How do I read a file in Python?"},
|
|
174
|
+
{"role": "assistant", "content": "You can use the built-in open() function."},
|
|
175
|
+
{"role": "user", "content": "Can you show an example?"}
|
|
176
|
+
]
|
|
177
|
+
```
|
|
178
|
+
The conversation history can be included to provide context to the model.
|
|
179
|
+
model (str, optional): The model to use for chat completion. Defaults to ChatModels.gpt_4o_mini_2024_07_18.
|
|
180
|
+
max_tokens (int, optional): Maximum number of tokens to generate. Defaults to 50.
|
|
181
|
+
temperature (float, optional): Temperature for sampling, between 0 and 1.
|
|
182
|
+
Lower values make output more deterministic. Defaults to 0.
|
|
183
|
+
|
|
184
|
+
Returns:
|
|
185
|
+
ChatCompletion: The chat completion usage response.
|
|
186
|
+
"""
|
|
187
|
+
endpoint = Endpoint(
|
|
188
|
+
f"/llmgateway_/openai/deployments/{model}/chat/completions/usage"
|
|
189
|
+
)
|
|
190
|
+
|
|
191
|
+
request_body = {
|
|
192
|
+
"messages": messages,
|
|
193
|
+
"max_tokens": max_tokens,
|
|
194
|
+
"temperature": temperature,
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
response = await self.request_async(
|
|
198
|
+
"POST",
|
|
199
|
+
endpoint,
|
|
200
|
+
content=json.dumps(request_body),
|
|
201
|
+
params={"api-version": API_VERSION},
|
|
202
|
+
headers=DEFAULT_LLM_HEADERS,
|
|
203
|
+
)
|
|
204
|
+
|
|
205
|
+
return UsageInfo.model_validate(response.json())
|
|
206
|
+
|
|
207
|
+
|
|
208
|
+
class UiPathLlmChatService(BaseService):
|
|
209
|
+
"""Service for calling UiPath's normalized LLM Gateway API."""
|
|
210
|
+
|
|
211
|
+
def __init__(self, config: Config, execution_context: ExecutionContext) -> None:
|
|
212
|
+
super().__init__(config=config, execution_context=execution_context)
|
|
213
|
+
|
|
214
|
+
async def chat_completions(
|
|
215
|
+
self,
|
|
216
|
+
messages: List[Dict[str, str]],
|
|
217
|
+
model: str = ChatModels.gpt_4o_mini_2024_07_18,
|
|
218
|
+
max_tokens: int = 250,
|
|
219
|
+
temperature: float = 0,
|
|
220
|
+
n: int = 1,
|
|
221
|
+
frequency_penalty: float = 0,
|
|
222
|
+
presence_penalty: float = 0,
|
|
223
|
+
top_p: float = 1,
|
|
224
|
+
tools: Optional[List[ToolDefinition]] = None,
|
|
225
|
+
tool_choice: Optional[ToolChoice] = None,
|
|
226
|
+
):
|
|
227
|
+
"""Get chat completions using UiPath's normalized LLM Gateway API.
|
|
228
|
+
|
|
229
|
+
Args:
|
|
230
|
+
messages (List[Dict[str, str]]): List of message dictionaries with 'role' and 'content' keys.
|
|
231
|
+
The supported roles are 'system', 'user', and 'assistant'.
|
|
232
|
+
model (str, optional): The model to use for chat completion. Defaults to ChatModels.gpt_4o_mini_2024_07_18.
|
|
233
|
+
max_tokens (int, optional): Maximum number of tokens to generate. Defaults to 250.
|
|
234
|
+
temperature (float, optional): Temperature for sampling, between 0 and 1.
|
|
235
|
+
Lower values make output more deterministic. Defaults to 0.
|
|
236
|
+
n (int, optional): Number of chat completion choices to generate. Defaults to 1.
|
|
237
|
+
frequency_penalty (float, optional): Penalty for token frequency. Defaults to 0.
|
|
238
|
+
presence_penalty (float, optional): Penalty for token presence. Defaults to 0.
|
|
239
|
+
top_p (float, optional): Nucleus sampling parameter. Defaults to 1.
|
|
240
|
+
tools (Optional[List[ToolDefinition]], optional): List of tool definitions. Defaults to None.
|
|
241
|
+
tool_choice (Optional[ToolChoice], optional): Tool choice configuration.
|
|
242
|
+
Can be "auto", "none", an AutoToolChoice, a RequiredToolChoice, or a SpecificToolChoice. Defaults to None.
|
|
243
|
+
|
|
244
|
+
Returns:
|
|
245
|
+
ChatCompletion: The chat completion response.
|
|
246
|
+
"""
|
|
247
|
+
endpoint = Endpoint("/llmgateway_/api/chat/completions")
|
|
248
|
+
|
|
249
|
+
request_body = {
|
|
250
|
+
"messages": messages,
|
|
251
|
+
"max_tokens": max_tokens,
|
|
252
|
+
"temperature": temperature,
|
|
253
|
+
"n": n,
|
|
254
|
+
"frequency_penalty": frequency_penalty,
|
|
255
|
+
"presence_penalty": presence_penalty,
|
|
256
|
+
"top_p": top_p,
|
|
257
|
+
}
|
|
258
|
+
|
|
259
|
+
# Add tools if provided - convert to UiPath format
|
|
260
|
+
if tools:
|
|
261
|
+
request_body["tools"] = [
|
|
262
|
+
self._convert_tool_to_uipath_format(tool) for tool in tools
|
|
263
|
+
]
|
|
264
|
+
|
|
265
|
+
# Handle tool_choice
|
|
266
|
+
if tool_choice:
|
|
267
|
+
if isinstance(tool_choice, str):
|
|
268
|
+
request_body["tool_choice"] = tool_choice
|
|
269
|
+
elif isinstance(tool_choice, SpecificToolChoice):
|
|
270
|
+
request_body["tool_choice"] = {"type": "tool", "name": tool_choice.name}
|
|
271
|
+
else:
|
|
272
|
+
request_body["tool_choice"] = tool_choice.model_dump()
|
|
273
|
+
|
|
274
|
+
# Use default headers but update with normalized API specific headers
|
|
275
|
+
headers = {
|
|
276
|
+
**DEFAULT_LLM_HEADERS,
|
|
277
|
+
"X-UiPath-LlmGateway-NormalizedApi-ModelName": model,
|
|
278
|
+
}
|
|
279
|
+
|
|
280
|
+
response = await self.request_async(
|
|
281
|
+
"POST",
|
|
282
|
+
endpoint,
|
|
283
|
+
content=json.dumps(request_body),
|
|
284
|
+
params={"api-version": NORMALIZED_API_VERSION},
|
|
285
|
+
headers=headers,
|
|
286
|
+
)
|
|
287
|
+
|
|
288
|
+
return ChatCompletion.model_validate(response.json())
|
|
289
|
+
|
|
290
|
+
def _convert_tool_to_uipath_format(self, tool: ToolDefinition) -> Dict[str, Any]:
|
|
291
|
+
"""Convert an OpenAI-style tool definition directly to UiPath API format."""
|
|
292
|
+
parameters = {
|
|
293
|
+
"type": tool.function.parameters.type,
|
|
294
|
+
"properties": {
|
|
295
|
+
name: {
|
|
296
|
+
"type": prop.type,
|
|
297
|
+
**({"description": prop.description} if prop.description else {}),
|
|
298
|
+
**({"enum": prop.enum} if prop.enum else {}),
|
|
299
|
+
}
|
|
300
|
+
for name, prop in tool.function.parameters.properties.items()
|
|
301
|
+
},
|
|
302
|
+
}
|
|
303
|
+
|
|
304
|
+
if tool.function.parameters.required:
|
|
305
|
+
parameters["required"] = tool.function.parameters.required
|
|
306
|
+
|
|
307
|
+
return {
|
|
308
|
+
"name": tool.function.name,
|
|
309
|
+
"description": tool.function.description,
|
|
310
|
+
"parameters": parameters,
|
|
311
|
+
}
|