airia 0.1.31__py3-none-any.whl → 0.1.33__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- airia/client/data_vector_search/async_data_vector_search.py +18 -1
- airia/client/data_vector_search/base_data_vector_search.py +5 -1
- airia/client/data_vector_search/sync_data_vector_search.py +18 -1
- airia/client/deployments/async_deployments.py +28 -1
- airia/client/deployments/base_deployments.py +21 -1
- airia/client/deployments/sync_deployments.py +28 -1
- airia/client/pipeline_execution/async_pipeline_execution.py +69 -9
- airia/client/pipeline_execution/base_pipeline_execution.py +42 -2
- airia/client/pipeline_execution/sync_pipeline_execution.py +72 -10
- airia/client/pipelines_config/async_pipelines_config.py +37 -7
- airia/client/pipelines_config/base_pipelines_config.py +25 -1
- airia/client/pipelines_config/sync_pipelines_config.py +37 -7
- airia/client/project/async_project.py +31 -5
- airia/client/project/base_project.py +25 -2
- airia/client/project/sync_project.py +33 -5
- airia/client/store/async_store.py +16 -1
- airia/client/store/base_store.py +11 -1
- airia/client/store/sync_store.py +16 -1
- airia/types/api/pipeline_execution/__init__.py +8 -0
- airia/types/api/pipeline_execution/_pipeline_execution.py +8 -0
- airia/types/api/pipeline_execution/get_pipeline_execution.py +83 -0
- {airia-0.1.31.dist-info → airia-0.1.33.dist-info}/METADATA +1 -1
- {airia-0.1.31.dist-info → airia-0.1.33.dist-info}/RECORD +26 -25
- {airia-0.1.31.dist-info → airia-0.1.33.dist-info}/WHEEL +0 -0
- {airia-0.1.31.dist-info → airia-0.1.33.dist-info}/licenses/LICENSE +0 -0
- {airia-0.1.31.dist-info → airia-0.1.33.dist-info}/top_level.txt +0 -0
|
@@ -11,7 +11,12 @@ class AsyncDataVectorSearch(BaseDataVectorSearch):
|
|
|
11
11
|
super().__init__(request_handler)
|
|
12
12
|
|
|
13
13
|
async def get_file_chunks(
|
|
14
|
-
self,
|
|
14
|
+
self,
|
|
15
|
+
data_store_id: str,
|
|
16
|
+
file_id: str,
|
|
17
|
+
page_number: int = 1,
|
|
18
|
+
page_size: int = 50,
|
|
19
|
+
correlation_id: Optional[str] = None,
|
|
15
20
|
) -> GetFileChunksResponse:
|
|
16
21
|
"""
|
|
17
22
|
Retrieve chunks from a specific file in a data store.
|
|
@@ -21,6 +26,8 @@ class AsyncDataVectorSearch(BaseDataVectorSearch):
|
|
|
21
26
|
Args:
|
|
22
27
|
data_store_id: The unique identifier of the data store (GUID format)
|
|
23
28
|
file_id: The unique identifier of the file (GUID format)
|
|
29
|
+
page_number: The page number (1-based). Default is 1.
|
|
30
|
+
page_size: The page size. Maximum supported value is 100. Default is 50.
|
|
24
31
|
correlation_id: Optional correlation ID for request tracing
|
|
25
32
|
|
|
26
33
|
Returns:
|
|
@@ -49,6 +56,14 @@ class AsyncDataVectorSearch(BaseDataVectorSearch):
|
|
|
49
56
|
file_id="your_file_id"
|
|
50
57
|
)
|
|
51
58
|
|
|
59
|
+
# Get file chunks with custom pagination
|
|
60
|
+
chunks_response = await client.data_vector_search.get_file_chunks(
|
|
61
|
+
data_store_id="your_data_store_id",
|
|
62
|
+
file_id="your_file_id",
|
|
63
|
+
page_number=2,
|
|
64
|
+
page_size=100
|
|
65
|
+
)
|
|
66
|
+
|
|
52
67
|
# Access the chunks
|
|
53
68
|
for chunk in chunks_response.chunks:
|
|
54
69
|
print(f"Chunk: {chunk.chunk}")
|
|
@@ -64,6 +79,8 @@ class AsyncDataVectorSearch(BaseDataVectorSearch):
|
|
|
64
79
|
request_data = self._pre_get_file_chunks(
|
|
65
80
|
data_store_id=data_store_id,
|
|
66
81
|
file_id=file_id,
|
|
82
|
+
page_number=page_number,
|
|
83
|
+
page_size=page_size,
|
|
67
84
|
correlation_id=correlation_id,
|
|
68
85
|
api_version=ApiVersion.V1.value,
|
|
69
86
|
)
|
|
@@ -13,6 +13,8 @@ class BaseDataVectorSearch:
|
|
|
13
13
|
self,
|
|
14
14
|
data_store_id: str,
|
|
15
15
|
file_id: str,
|
|
16
|
+
page_number: int = 1,
|
|
17
|
+
page_size: int = 50,
|
|
16
18
|
correlation_id: Optional[str] = None,
|
|
17
19
|
api_version: str = ApiVersion.V1.value,
|
|
18
20
|
):
|
|
@@ -24,6 +26,8 @@ class BaseDataVectorSearch:
|
|
|
24
26
|
Args:
|
|
25
27
|
data_store_id: ID of the data store
|
|
26
28
|
file_id: ID of the file
|
|
29
|
+
page_number: The page number (1-based). Default is 1.
|
|
30
|
+
page_size: The page size. Maximum supported value is 100. Default is 50.
|
|
27
31
|
correlation_id: Optional correlation ID for tracing
|
|
28
32
|
api_version: API version to use for the request
|
|
29
33
|
|
|
@@ -44,7 +48,7 @@ class BaseDataVectorSearch:
|
|
|
44
48
|
)
|
|
45
49
|
|
|
46
50
|
request_data = self._request_handler.prepare_request(
|
|
47
|
-
url, correlation_id=correlation_id, params={"pageNumber":
|
|
51
|
+
url, correlation_id=correlation_id, params={"pageNumber": page_number, "pageSize": page_size}
|
|
48
52
|
)
|
|
49
53
|
|
|
50
54
|
return request_data
|
|
@@ -11,7 +11,12 @@ class DataVectorSearch(BaseDataVectorSearch):
|
|
|
11
11
|
super().__init__(request_handler)
|
|
12
12
|
|
|
13
13
|
def get_file_chunks(
|
|
14
|
-
self,
|
|
14
|
+
self,
|
|
15
|
+
data_store_id: str,
|
|
16
|
+
file_id: str,
|
|
17
|
+
page_number: int = 1,
|
|
18
|
+
page_size: int = 50,
|
|
19
|
+
correlation_id: Optional[str] = None,
|
|
15
20
|
) -> GetFileChunksResponse:
|
|
16
21
|
"""
|
|
17
22
|
Retrieve chunks from a specific file in a data store.
|
|
@@ -21,6 +26,8 @@ class DataVectorSearch(BaseDataVectorSearch):
|
|
|
21
26
|
Args:
|
|
22
27
|
data_store_id: The unique identifier of the data store (GUID format)
|
|
23
28
|
file_id: The unique identifier of the file (GUID format)
|
|
29
|
+
page_number: The page number (1-based). Default is 1.
|
|
30
|
+
page_size: The page size. Maximum supported value is 100. Default is 50.
|
|
24
31
|
correlation_id: Optional correlation ID for request tracing
|
|
25
32
|
|
|
26
33
|
Returns:
|
|
@@ -47,6 +54,14 @@ class DataVectorSearch(BaseDataVectorSearch):
|
|
|
47
54
|
file_id="your_file_id"
|
|
48
55
|
)
|
|
49
56
|
|
|
57
|
+
# Get file chunks with custom pagination
|
|
58
|
+
chunks_response = client.data_vector_search.get_file_chunks(
|
|
59
|
+
data_store_id="your_data_store_id",
|
|
60
|
+
file_id="your_file_id",
|
|
61
|
+
page_number=2,
|
|
62
|
+
page_size=100
|
|
63
|
+
)
|
|
64
|
+
|
|
50
65
|
# Access the chunks
|
|
51
66
|
for chunk in chunks_response.chunks:
|
|
52
67
|
print(f"Chunk: {chunk.chunk}")
|
|
@@ -58,6 +73,8 @@ class DataVectorSearch(BaseDataVectorSearch):
|
|
|
58
73
|
request_data = self._pre_get_file_chunks(
|
|
59
74
|
data_store_id=data_store_id,
|
|
60
75
|
file_id=file_id,
|
|
76
|
+
page_number=page_number,
|
|
77
|
+
page_size=page_size,
|
|
61
78
|
correlation_id=correlation_id,
|
|
62
79
|
api_version=ApiVersion.V1.value,
|
|
63
80
|
)
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from typing import List, Optional
|
|
1
|
+
from typing import List, Literal, Optional
|
|
2
2
|
|
|
3
3
|
from ...types._api_version import ApiVersion
|
|
4
4
|
from ...types.api.deployments import GetDeploymentResponse, GetDeploymentsResponse
|
|
@@ -12,6 +12,11 @@ class AsyncDeployments(BaseDeployments):
|
|
|
12
12
|
|
|
13
13
|
async def get_deployments(
|
|
14
14
|
self,
|
|
15
|
+
page_number: Optional[int] = None,
|
|
16
|
+
page_size: Optional[int] = None,
|
|
17
|
+
sort_by: Optional[str] = None,
|
|
18
|
+
sort_direction: Optional[Literal["ASC", "DESC"]] = None,
|
|
19
|
+
filter: Optional[str] = None,
|
|
15
20
|
tags: Optional[List[str]] = None,
|
|
16
21
|
is_recommended: Optional[bool] = None,
|
|
17
22
|
project_id: Optional[str] = None,
|
|
@@ -26,6 +31,11 @@ class AsyncDeployments(BaseDeployments):
|
|
|
26
31
|
about each deployment including associated pipelines, data sources, and user prompts.
|
|
27
32
|
|
|
28
33
|
Args:
|
|
34
|
+
page_number: The page number to be fetched
|
|
35
|
+
page_size: The number of items per page
|
|
36
|
+
sort_by: Property to sort by
|
|
37
|
+
sort_direction: The direction of the sort, either "ASC" for ascending or "DESC" for descending
|
|
38
|
+
filter: The search filter
|
|
29
39
|
tags: Optional list of tags to filter deployments by
|
|
30
40
|
is_recommended: Optional filter by recommended status
|
|
31
41
|
project_id: Optional filter by project id
|
|
@@ -42,16 +52,33 @@ class AsyncDeployments(BaseDeployments):
|
|
|
42
52
|
Example:
|
|
43
53
|
```python
|
|
44
54
|
client = AiriaAsyncClient(api_key="your-api-key")
|
|
55
|
+
|
|
56
|
+
# Basic usage with filtering
|
|
45
57
|
deployments = await client.deployments.get_deployments(
|
|
46
58
|
tags=["production", "nlp"],
|
|
47
59
|
is_recommended=True
|
|
48
60
|
)
|
|
61
|
+
|
|
62
|
+
# With pagination and sorting
|
|
63
|
+
deployments = await client.deployments.get_deployments(
|
|
64
|
+
page_number=1,
|
|
65
|
+
page_size=20,
|
|
66
|
+
sort_by="deploymentName",
|
|
67
|
+
sort_direction="ASC",
|
|
68
|
+
filter="text-analysis"
|
|
69
|
+
)
|
|
70
|
+
|
|
49
71
|
print(f"Found {deployments.total_count} deployments")
|
|
50
72
|
for deployment in deployments.items:
|
|
51
73
|
print(f"- {deployment.deployment_name}")
|
|
52
74
|
```
|
|
53
75
|
"""
|
|
54
76
|
request_data = self._pre_get_deployments(
|
|
77
|
+
page_number=page_number,
|
|
78
|
+
page_size=page_size,
|
|
79
|
+
sort_by=sort_by,
|
|
80
|
+
sort_direction=sort_direction,
|
|
81
|
+
filter=filter,
|
|
55
82
|
tags=tags,
|
|
56
83
|
is_recommended=is_recommended,
|
|
57
84
|
correlation_id=correlation_id,
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from typing import List, Optional, Union
|
|
1
|
+
from typing import List, Literal, Optional, Union
|
|
2
2
|
from urllib.parse import urljoin
|
|
3
3
|
|
|
4
4
|
from ...types._api_version import ApiVersion
|
|
@@ -11,6 +11,11 @@ class BaseDeployments:
|
|
|
11
11
|
|
|
12
12
|
def _pre_get_deployments(
|
|
13
13
|
self,
|
|
14
|
+
page_number: Optional[int] = None,
|
|
15
|
+
page_size: Optional[int] = None,
|
|
16
|
+
sort_by: Optional[str] = None,
|
|
17
|
+
sort_direction: Optional[Literal["ASC", "DESC"]] = None,
|
|
18
|
+
filter: Optional[str] = None,
|
|
14
19
|
tags: Optional[List[str]] = None,
|
|
15
20
|
is_recommended: Optional[bool] = None,
|
|
16
21
|
correlation_id: Optional[str] = None,
|
|
@@ -23,6 +28,11 @@ class BaseDeployments:
|
|
|
23
28
|
retrieval requests, including optional filtering by tags and recommendation status.
|
|
24
29
|
|
|
25
30
|
Args:
|
|
31
|
+
page_number: The page number to be fetched
|
|
32
|
+
page_size: The number of items per page
|
|
33
|
+
sort_by: Property to sort by
|
|
34
|
+
sort_direction: The direction of the sort, either "ASC" for ascending or "DESC" for descending
|
|
35
|
+
filter: The search filter
|
|
26
36
|
tags: Optional list of tags to filter deployments by
|
|
27
37
|
is_recommended: Optional filter by recommended status
|
|
28
38
|
correlation_id: Optional correlation ID for tracing
|
|
@@ -45,6 +55,16 @@ class BaseDeployments:
|
|
|
45
55
|
|
|
46
56
|
# Build query parameters
|
|
47
57
|
params = {}
|
|
58
|
+
if page_number is not None:
|
|
59
|
+
params["PageNumber"] = page_number
|
|
60
|
+
if page_size is not None:
|
|
61
|
+
params["PageSize"] = page_size
|
|
62
|
+
if sort_by is not None:
|
|
63
|
+
params["SortBy"] = sort_by
|
|
64
|
+
if sort_direction is not None:
|
|
65
|
+
params["SortDirection"] = sort_direction
|
|
66
|
+
if filter is not None:
|
|
67
|
+
params["Filter"] = filter
|
|
48
68
|
if tags is not None:
|
|
49
69
|
params["tags"] = tags
|
|
50
70
|
if is_recommended is not None:
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from typing import List, Optional
|
|
1
|
+
from typing import List, Literal, Optional
|
|
2
2
|
|
|
3
3
|
from ...types._api_version import ApiVersion
|
|
4
4
|
from ...types.api.deployments import GetDeploymentResponse, GetDeploymentsResponse
|
|
@@ -12,6 +12,11 @@ class Deployments(BaseDeployments):
|
|
|
12
12
|
|
|
13
13
|
def get_deployments(
|
|
14
14
|
self,
|
|
15
|
+
page_number: Optional[int] = None,
|
|
16
|
+
page_size: Optional[int] = None,
|
|
17
|
+
sort_by: Optional[str] = None,
|
|
18
|
+
sort_direction: Optional[Literal["ASC", "DESC"]] = None,
|
|
19
|
+
filter: Optional[str] = None,
|
|
15
20
|
tags: Optional[List[str]] = None,
|
|
16
21
|
is_recommended: Optional[bool] = None,
|
|
17
22
|
project_id: Optional[str] = None,
|
|
@@ -26,6 +31,11 @@ class Deployments(BaseDeployments):
|
|
|
26
31
|
about each deployment including associated pipelines, data sources, and user prompts.
|
|
27
32
|
|
|
28
33
|
Args:
|
|
34
|
+
page_number: The page number to be fetched
|
|
35
|
+
page_size: The number of items per page
|
|
36
|
+
sort_by: Property to sort by
|
|
37
|
+
sort_direction: The direction of the sort, either "ASC" for ascending or "DESC" for descending
|
|
38
|
+
filter: The search filter
|
|
29
39
|
tags: Optional list of tags to filter deployments by
|
|
30
40
|
is_recommended: Optional filter by recommended status
|
|
31
41
|
project_id: Optional filter by project id
|
|
@@ -42,16 +52,33 @@ class Deployments(BaseDeployments):
|
|
|
42
52
|
Example:
|
|
43
53
|
```python
|
|
44
54
|
client = AiriaClient(api_key="your-api-key")
|
|
55
|
+
|
|
56
|
+
# Basic usage with filtering
|
|
45
57
|
deployments = client.deployments.get_deployments(
|
|
46
58
|
tags=["production", "nlp"],
|
|
47
59
|
is_recommended=True
|
|
48
60
|
)
|
|
61
|
+
|
|
62
|
+
# With pagination and sorting
|
|
63
|
+
deployments = client.deployments.get_deployments(
|
|
64
|
+
page_number=1,
|
|
65
|
+
page_size=20,
|
|
66
|
+
sort_by="deploymentName",
|
|
67
|
+
sort_direction="ASC",
|
|
68
|
+
filter="text-analysis"
|
|
69
|
+
)
|
|
70
|
+
|
|
49
71
|
print(f"Found {deployments.total_count} deployments")
|
|
50
72
|
for deployment in deployments.items:
|
|
51
73
|
print(f"- {deployment.deployment_name}")
|
|
52
74
|
```
|
|
53
75
|
"""
|
|
54
76
|
request_data = self._pre_get_deployments(
|
|
77
|
+
page_number=page_number,
|
|
78
|
+
page_size=page_size,
|
|
79
|
+
sort_by=sort_by,
|
|
80
|
+
sort_direction=sort_direction,
|
|
81
|
+
filter=filter,
|
|
55
82
|
tags=tags,
|
|
56
83
|
is_recommended=is_recommended,
|
|
57
84
|
correlation_id=correlation_id,
|
|
@@ -3,8 +3,12 @@ from typing import Any, Dict, List, Literal, Optional, Type, Union, overload
|
|
|
3
3
|
from pydantic import BaseModel
|
|
4
4
|
|
|
5
5
|
from ...types._api_version import ApiVersion
|
|
6
|
-
from ...types._structured_output import
|
|
6
|
+
from ...types._structured_output import (
|
|
7
|
+
create_schema_system_message,
|
|
8
|
+
parse_response_to_model,
|
|
9
|
+
)
|
|
7
10
|
from ...types.api.pipeline_execution import (
|
|
11
|
+
GetPipelineExecutionResponse,
|
|
8
12
|
PipelineExecutionAsyncStreamedResponse,
|
|
9
13
|
PipelineExecutionResponse,
|
|
10
14
|
TemporaryAssistantAsyncStreamedResponse,
|
|
@@ -209,7 +213,9 @@ class AsyncPipelineExecution(BasePipelineExecution):
|
|
|
209
213
|
modified_in_memory_messages = in_memory_messages
|
|
210
214
|
if output_schema is not None:
|
|
211
215
|
# Create a copy of in_memory_messages if it exists, otherwise create new list
|
|
212
|
-
modified_in_memory_messages =
|
|
216
|
+
modified_in_memory_messages = (
|
|
217
|
+
list(in_memory_messages) if in_memory_messages else []
|
|
218
|
+
)
|
|
213
219
|
# Insert schema instruction as first system message
|
|
214
220
|
schema_message = create_schema_system_message(output_schema)
|
|
215
221
|
modified_in_memory_messages.insert(0, schema_message)
|
|
@@ -246,7 +252,9 @@ class AsyncPipelineExecution(BasePipelineExecution):
|
|
|
246
252
|
response = PipelineExecutionResponse(**resp)
|
|
247
253
|
# Parse response to Pydantic model if output_schema was provided
|
|
248
254
|
if output_schema is not None and response.result:
|
|
249
|
-
response.result = parse_response_to_model(
|
|
255
|
+
response.result = parse_response_to_model(
|
|
256
|
+
response.result, output_schema
|
|
257
|
+
)
|
|
250
258
|
return response
|
|
251
259
|
|
|
252
260
|
return PipelineExecutionAsyncStreamedResponse(stream=resp)
|
|
@@ -256,8 +264,8 @@ class AsyncPipelineExecution(BasePipelineExecution):
|
|
|
256
264
|
self,
|
|
257
265
|
model_parameters: Dict[str, Any],
|
|
258
266
|
user_input: str,
|
|
267
|
+
prompt_parameters: Dict[str, Any],
|
|
259
268
|
assistant_name: str = "",
|
|
260
|
-
prompt_parameters: Dict[str, Any] = {"prompt": ""},
|
|
261
269
|
async_output: Literal[False] = False,
|
|
262
270
|
include_tools_response: bool = False,
|
|
263
271
|
save_history: bool = True,
|
|
@@ -287,8 +295,8 @@ class AsyncPipelineExecution(BasePipelineExecution):
|
|
|
287
295
|
self,
|
|
288
296
|
model_parameters: Dict[str, Any],
|
|
289
297
|
user_input: str,
|
|
298
|
+
prompt_parameters: Dict[str, Any],
|
|
290
299
|
assistant_name: str = "",
|
|
291
|
-
prompt_parameters: Dict[str, Any] = {"prompt": ""},
|
|
292
300
|
async_output: Literal[True] = True,
|
|
293
301
|
include_tools_response: bool = False,
|
|
294
302
|
save_history: bool = True,
|
|
@@ -317,8 +325,8 @@ class AsyncPipelineExecution(BasePipelineExecution):
|
|
|
317
325
|
self,
|
|
318
326
|
model_parameters: Dict[str, Any],
|
|
319
327
|
user_input: str,
|
|
328
|
+
prompt_parameters: Dict[str, Any],
|
|
320
329
|
assistant_name: str = "",
|
|
321
|
-
prompt_parameters: Dict[str, Any] = {"prompt": ""},
|
|
322
330
|
async_output: bool = False,
|
|
323
331
|
include_tools_response: bool = False,
|
|
324
332
|
save_history: bool = True,
|
|
@@ -355,8 +363,9 @@ class AsyncPipelineExecution(BasePipelineExecution):
|
|
|
355
363
|
model_parameters: Model parameters (required). Must include libraryModelId,
|
|
356
364
|
projectModelId, modelIdentifierType, and modelIsAvailableinProject
|
|
357
365
|
user_input: User input text (required)
|
|
366
|
+
prompt_parameters: Parameters for prompt configuration (required). Must include
|
|
367
|
+
a 'prompt' key with the system prompt text
|
|
358
368
|
assistant_name: Name of the temporary assistant. Default is ""
|
|
359
|
-
prompt_parameters: Parameters for prompt configuration. Default is {"prompt": ""}
|
|
360
369
|
async_output: Whether to stream the response. Default is False
|
|
361
370
|
include_tools_response: Whether to return initial LLM tool result. Default is False
|
|
362
371
|
save_history: Whether to save input and output to conversation history. Default is True
|
|
@@ -403,6 +412,7 @@ class AsyncPipelineExecution(BasePipelineExecution):
|
|
|
403
412
|
"modelIsAvailableinProject": True,
|
|
404
413
|
},
|
|
405
414
|
user_input="say double bubble bath ten times fast",
|
|
415
|
+
prompt_parameters={"prompt": "You are a helpful assistant."},
|
|
406
416
|
)
|
|
407
417
|
print(response.result)
|
|
408
418
|
```
|
|
@@ -418,6 +428,7 @@ class AsyncPipelineExecution(BasePipelineExecution):
|
|
|
418
428
|
response = await client.pipeline_execution.execute_temporary_assistant(
|
|
419
429
|
model_parameters={...},
|
|
420
430
|
user_input="What's the weather?",
|
|
431
|
+
prompt_parameters={"prompt": "You are a weather information assistant."},
|
|
421
432
|
output_schema=WeatherInfo
|
|
422
433
|
)
|
|
423
434
|
```
|
|
@@ -440,7 +451,9 @@ class AsyncPipelineExecution(BasePipelineExecution):
|
|
|
440
451
|
modified_in_memory_messages = in_memory_messages
|
|
441
452
|
if output_schema is not None:
|
|
442
453
|
# Create a copy of in_memory_messages if it exists, otherwise create new list
|
|
443
|
-
modified_in_memory_messages =
|
|
454
|
+
modified_in_memory_messages = (
|
|
455
|
+
list(in_memory_messages) if in_memory_messages else []
|
|
456
|
+
)
|
|
444
457
|
# Insert schema instruction as first system message
|
|
445
458
|
schema_message = create_schema_system_message(output_schema)
|
|
446
459
|
modified_in_memory_messages.insert(0, schema_message)
|
|
@@ -487,5 +500,52 @@ class AsyncPipelineExecution(BasePipelineExecution):
|
|
|
487
500
|
response = TemporaryAssistantResponse(**resp)
|
|
488
501
|
# Parse response to Pydantic model if output_schema was provided
|
|
489
502
|
if output_schema is not None and response.result:
|
|
490
|
-
response.result = parse_response_to_model(
|
|
503
|
+
response.result = parse_response_to_model(
|
|
504
|
+
str(response.result), output_schema
|
|
505
|
+
)
|
|
491
506
|
return response
|
|
507
|
+
|
|
508
|
+
async def get_pipeline_execution(
|
|
509
|
+
self, execution_id: str, correlation_id: Optional[str] = None
|
|
510
|
+
) -> GetPipelineExecutionResponse:
|
|
511
|
+
"""
|
|
512
|
+
Retrieve a pipeline execution result by execution ID asynchronously.
|
|
513
|
+
|
|
514
|
+
This method fetches the details of a specific pipeline execution using its
|
|
515
|
+
unique identifier. The response includes execution logs, step execution records,
|
|
516
|
+
timing information, and any errors that occurred during execution.
|
|
517
|
+
|
|
518
|
+
Args:
|
|
519
|
+
execution_id: The execution id (GUID format)
|
|
520
|
+
correlation_id: Optional correlation ID for request tracing
|
|
521
|
+
|
|
522
|
+
Returns:
|
|
523
|
+
GetPipelineExecutionResponse: Pipeline execution details including logs and step records
|
|
524
|
+
|
|
525
|
+
Raises:
|
|
526
|
+
AiriaAPIError: If the API request fails or execution is not found
|
|
527
|
+
ValueError: If an invalid API version is provided
|
|
528
|
+
|
|
529
|
+
Example:
|
|
530
|
+
```python
|
|
531
|
+
client = AiriaAsyncClient(api_key="your-api-key")
|
|
532
|
+
execution = await client.pipeline_execution.get_pipeline_execution("execution-id-123")
|
|
533
|
+
print(f"Execution ID: {execution.execution_id}")
|
|
534
|
+
print(f"Success: {execution.log_record_details.success}")
|
|
535
|
+
print(f"Duration: {execution.log_record_details.duration}")
|
|
536
|
+
|
|
537
|
+
# Iterate through step execution logs
|
|
538
|
+
if execution.step_execution_log_records:
|
|
539
|
+
for step in execution.step_execution_log_records:
|
|
540
|
+
print(f"Step: {step.step_title} - Success: {step.success}")
|
|
541
|
+
```
|
|
542
|
+
"""
|
|
543
|
+
request_data = self._pre_get_pipeline_execution(
|
|
544
|
+
execution_id=execution_id,
|
|
545
|
+
correlation_id=correlation_id,
|
|
546
|
+
api_version=ApiVersion.V1.value,
|
|
547
|
+
)
|
|
548
|
+
|
|
549
|
+
response = await self._request_handler.make_request("GET", request_data)
|
|
550
|
+
|
|
551
|
+
return GetPipelineExecutionResponse(**response)
|
|
@@ -120,8 +120,8 @@ class BasePipelineExecution:
|
|
|
120
120
|
self,
|
|
121
121
|
model_parameters: Dict[str, Any],
|
|
122
122
|
user_input: str,
|
|
123
|
+
prompt_parameters: Dict[str, Any],
|
|
123
124
|
assistant_name: str = "",
|
|
124
|
-
prompt_parameters: Dict[str, Any] = {},
|
|
125
125
|
async_output: bool = False,
|
|
126
126
|
include_tools_response: bool = False,
|
|
127
127
|
save_history: bool = True,
|
|
@@ -153,8 +153,9 @@ class BasePipelineExecution:
|
|
|
153
153
|
Args:
|
|
154
154
|
model_parameters: model parameters
|
|
155
155
|
user_input: Optional user input text
|
|
156
|
+
prompt_parameters: Parameters for prompt configuration (required). Must include
|
|
157
|
+
a 'prompt' key with the system prompt text
|
|
156
158
|
assistant_name: Name of the temporary assistant
|
|
157
|
-
prompt_parameters: Parameters for prompt configuration (required)
|
|
158
159
|
async_output: Whether to stream the response. Default is False
|
|
159
160
|
include_tools_response: Whether to return initial LLM tool result. Default is False
|
|
160
161
|
save_history: Whether to save input and output to conversation history. Default is True
|
|
@@ -224,3 +225,42 @@ class BasePipelineExecution:
|
|
|
224
225
|
)
|
|
225
226
|
|
|
226
227
|
return request_data
|
|
228
|
+
|
|
229
|
+
def _pre_get_pipeline_execution(
|
|
230
|
+
self,
|
|
231
|
+
execution_id: str,
|
|
232
|
+
correlation_id: Optional[str] = None,
|
|
233
|
+
api_version: str = ApiVersion.V1.value,
|
|
234
|
+
):
|
|
235
|
+
"""
|
|
236
|
+
Prepare request data for retrieving a pipeline execution.
|
|
237
|
+
|
|
238
|
+
This internal method constructs the URL for pipeline execution retrieval
|
|
239
|
+
by ID using the specified API version.
|
|
240
|
+
|
|
241
|
+
Args:
|
|
242
|
+
execution_id: The execution id (GUID format)
|
|
243
|
+
correlation_id: Optional correlation ID for tracing
|
|
244
|
+
api_version: API version to use for the request
|
|
245
|
+
|
|
246
|
+
Returns:
|
|
247
|
+
RequestData: Prepared request data for the pipeline execution endpoint
|
|
248
|
+
|
|
249
|
+
Raises:
|
|
250
|
+
ValueError: If an invalid API version is provided
|
|
251
|
+
"""
|
|
252
|
+
if api_version not in ApiVersion.as_list():
|
|
253
|
+
raise ValueError(
|
|
254
|
+
f"Invalid API version: {api_version}. Valid versions are: {', '.join(ApiVersion.as_list())}"
|
|
255
|
+
)
|
|
256
|
+
|
|
257
|
+
url = urljoin(
|
|
258
|
+
self._request_handler.base_url,
|
|
259
|
+
f"{api_version}/PipelineExecution/{execution_id}",
|
|
260
|
+
)
|
|
261
|
+
|
|
262
|
+
request_data = self._request_handler.prepare_request(
|
|
263
|
+
url=url, correlation_id=correlation_id
|
|
264
|
+
)
|
|
265
|
+
|
|
266
|
+
return request_data
|
|
@@ -3,8 +3,12 @@ from typing import Any, Dict, List, Literal, Optional, Type, Union, overload
|
|
|
3
3
|
from pydantic import BaseModel
|
|
4
4
|
|
|
5
5
|
from ...types._api_version import ApiVersion
|
|
6
|
-
from ...types._structured_output import
|
|
6
|
+
from ...types._structured_output import (
|
|
7
|
+
create_schema_system_message,
|
|
8
|
+
parse_response_to_model,
|
|
9
|
+
)
|
|
7
10
|
from ...types.api.pipeline_execution import (
|
|
11
|
+
GetPipelineExecutionResponse,
|
|
8
12
|
PipelineExecutionResponse,
|
|
9
13
|
PipelineExecutionStreamedResponse,
|
|
10
14
|
TemporaryAssistantResponse,
|
|
@@ -208,7 +212,9 @@ class PipelineExecution(BasePipelineExecution):
|
|
|
208
212
|
modified_in_memory_messages = in_memory_messages
|
|
209
213
|
if output_schema is not None:
|
|
210
214
|
# Create a copy of in_memory_messages if it exists, otherwise create new list
|
|
211
|
-
modified_in_memory_messages =
|
|
215
|
+
modified_in_memory_messages = (
|
|
216
|
+
list(in_memory_messages) if in_memory_messages else []
|
|
217
|
+
)
|
|
212
218
|
# Insert schema instruction as first system message
|
|
213
219
|
schema_message = create_schema_system_message(output_schema)
|
|
214
220
|
modified_in_memory_messages.insert(0, schema_message)
|
|
@@ -245,7 +251,9 @@ class PipelineExecution(BasePipelineExecution):
|
|
|
245
251
|
response = PipelineExecutionResponse(**resp)
|
|
246
252
|
# Parse response to Pydantic model if output_schema was provided
|
|
247
253
|
if output_schema is not None and response.result:
|
|
248
|
-
response.result = parse_response_to_model(
|
|
254
|
+
response.result = parse_response_to_model(
|
|
255
|
+
response.result, output_schema
|
|
256
|
+
)
|
|
249
257
|
return response
|
|
250
258
|
|
|
251
259
|
return PipelineExecutionStreamedResponse(stream=resp)
|
|
@@ -255,8 +263,8 @@ class PipelineExecution(BasePipelineExecution):
|
|
|
255
263
|
self,
|
|
256
264
|
model_parameters: Dict[str, Any],
|
|
257
265
|
user_input: str,
|
|
266
|
+
prompt_parameters: Dict[str, Any],
|
|
258
267
|
assistant_name: str = "",
|
|
259
|
-
prompt_parameters: Dict[str, Any] = {"prompt": ""},
|
|
260
268
|
async_output: Literal[False] = False,
|
|
261
269
|
include_tools_response: bool = False,
|
|
262
270
|
save_history: bool = True,
|
|
@@ -286,8 +294,8 @@ class PipelineExecution(BasePipelineExecution):
|
|
|
286
294
|
self,
|
|
287
295
|
model_parameters: Dict[str, Any],
|
|
288
296
|
user_input: str,
|
|
297
|
+
prompt_parameters: Dict[str, Any],
|
|
289
298
|
assistant_name: str = "",
|
|
290
|
-
prompt_parameters: Dict[str, Any] = {"prompt": ""},
|
|
291
299
|
async_output: Literal[True] = True,
|
|
292
300
|
include_tools_response: bool = False,
|
|
293
301
|
save_history: bool = True,
|
|
@@ -316,8 +324,8 @@ class PipelineExecution(BasePipelineExecution):
|
|
|
316
324
|
self,
|
|
317
325
|
model_parameters: Dict[str, Any],
|
|
318
326
|
user_input: str,
|
|
327
|
+
prompt_parameters: Dict[str, Any],
|
|
319
328
|
assistant_name: str = "",
|
|
320
|
-
prompt_parameters: Dict[str, Any] = {"prompt": ""},
|
|
321
329
|
async_output: bool = False,
|
|
322
330
|
include_tools_response: bool = False,
|
|
323
331
|
save_history: bool = True,
|
|
@@ -354,8 +362,9 @@ class PipelineExecution(BasePipelineExecution):
|
|
|
354
362
|
model_parameters: Model parameters (required). Must include libraryModelId,
|
|
355
363
|
projectModelId, modelIdentifierType, and modelIsAvailableinProject
|
|
356
364
|
user_input: User input text (required)
|
|
365
|
+
prompt_parameters: Parameters for prompt configuration (required). Must include
|
|
366
|
+
a 'prompt' key with the system prompt text
|
|
357
367
|
assistant_name: Name of the temporary assistant. Default is ""
|
|
358
|
-
prompt_parameters: Parameters for prompt configuration. Default is {"prompt": ""}
|
|
359
368
|
async_output: Whether to stream the response. Default is False
|
|
360
369
|
include_tools_response: Whether to return initial LLM tool result. Default is False
|
|
361
370
|
save_history: Whether to save input and output to conversation history. Default is True
|
|
@@ -401,6 +410,7 @@ class PipelineExecution(BasePipelineExecution):
|
|
|
401
410
|
"modelIsAvailableinProject": True,
|
|
402
411
|
},
|
|
403
412
|
user_input="say double bubble bath ten times fast",
|
|
413
|
+
prompt_parameters={"prompt": "You are a helpful assistant."},
|
|
404
414
|
)
|
|
405
415
|
print(response.result)
|
|
406
416
|
```
|
|
@@ -417,6 +427,7 @@ class PipelineExecution(BasePipelineExecution):
|
|
|
417
427
|
response = client.pipeline_execution.execute_temporary_assistant(
|
|
418
428
|
model_parameters={...},
|
|
419
429
|
user_input="What's the weather like?",
|
|
430
|
+
prompt_parameters={"prompt": "You are a weather information assistant."},
|
|
420
431
|
output_schema=WeatherInfo
|
|
421
432
|
)
|
|
422
433
|
# Response will conform to WeatherInfo schema
|
|
@@ -440,7 +451,9 @@ class PipelineExecution(BasePipelineExecution):
|
|
|
440
451
|
modified_in_memory_messages = in_memory_messages
|
|
441
452
|
if output_schema is not None:
|
|
442
453
|
# Create a copy of in_memory_messages if it exists, otherwise create new list
|
|
443
|
-
modified_in_memory_messages =
|
|
454
|
+
modified_in_memory_messages = (
|
|
455
|
+
list(in_memory_messages) if in_memory_messages else []
|
|
456
|
+
)
|
|
444
457
|
# Insert schema instruction as first system message
|
|
445
458
|
schema_message = create_schema_system_message(output_schema)
|
|
446
459
|
modified_in_memory_messages.insert(0, schema_message)
|
|
@@ -448,7 +461,9 @@ class PipelineExecution(BasePipelineExecution):
|
|
|
448
461
|
output_configuration = None
|
|
449
462
|
|
|
450
463
|
# Convert UUID objects to strings for API compatibility
|
|
451
|
-
conversation_id_str =
|
|
464
|
+
conversation_id_str = (
|
|
465
|
+
str(conversation_id) if conversation_id else conversation_id
|
|
466
|
+
)
|
|
452
467
|
user_id_str = str(user_id) if user_id else user_id
|
|
453
468
|
user_input_id_str = str(user_input_id) if user_input_id else user_input_id
|
|
454
469
|
|
|
@@ -492,5 +507,52 @@ class PipelineExecution(BasePipelineExecution):
|
|
|
492
507
|
response = TemporaryAssistantResponse(**resp)
|
|
493
508
|
# Parse response to Pydantic model if output_schema was provided
|
|
494
509
|
if output_schema is not None and response.result:
|
|
495
|
-
response.result = parse_response_to_model(
|
|
510
|
+
response.result = parse_response_to_model(
|
|
511
|
+
str(response.result), output_schema
|
|
512
|
+
)
|
|
496
513
|
return response
|
|
514
|
+
|
|
515
|
+
def get_pipeline_execution(
|
|
516
|
+
self, execution_id: str, correlation_id: Optional[str] = None
|
|
517
|
+
) -> GetPipelineExecutionResponse:
|
|
518
|
+
"""
|
|
519
|
+
Retrieve a pipeline execution result by execution ID.
|
|
520
|
+
|
|
521
|
+
This method fetches the details of a specific pipeline execution using its
|
|
522
|
+
unique identifier. The response includes execution logs, step execution records,
|
|
523
|
+
timing information, and any errors that occurred during execution.
|
|
524
|
+
|
|
525
|
+
Args:
|
|
526
|
+
execution_id: The execution id (GUID format)
|
|
527
|
+
correlation_id: Optional correlation ID for request tracing
|
|
528
|
+
|
|
529
|
+
Returns:
|
|
530
|
+
GetPipelineExecutionResponse: Pipeline execution details including logs and step records
|
|
531
|
+
|
|
532
|
+
Raises:
|
|
533
|
+
AiriaAPIError: If the API request fails or execution is not found
|
|
534
|
+
ValueError: If an invalid API version is provided
|
|
535
|
+
|
|
536
|
+
Example:
|
|
537
|
+
```python
|
|
538
|
+
client = AiriaClient(api_key="your-api-key")
|
|
539
|
+
execution = client.pipeline_execution.get_pipeline_execution("execution-id-123")
|
|
540
|
+
print(f"Execution ID: {execution.execution_id}")
|
|
541
|
+
print(f"Success: {execution.log_record_details.success}")
|
|
542
|
+
print(f"Duration: {execution.log_record_details.duration}")
|
|
543
|
+
|
|
544
|
+
# Iterate through step execution logs
|
|
545
|
+
if execution.step_execution_log_records:
|
|
546
|
+
for step in execution.step_execution_log_records:
|
|
547
|
+
print(f"Step: {step.step_title} - Success: {step.success}")
|
|
548
|
+
```
|
|
549
|
+
"""
|
|
550
|
+
request_data = self._pre_get_pipeline_execution(
|
|
551
|
+
execution_id=execution_id,
|
|
552
|
+
correlation_id=correlation_id,
|
|
553
|
+
api_version=ApiVersion.V1.value,
|
|
554
|
+
)
|
|
555
|
+
|
|
556
|
+
response = self._request_handler.make_request("GET", request_data)
|
|
557
|
+
|
|
558
|
+
return GetPipelineExecutionResponse(**response)
|