airia 0.1.31__tar.gz → 0.1.33__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (107) hide show
  1. {airia-0.1.31 → airia-0.1.33}/PKG-INFO +1 -1
  2. {airia-0.1.31 → airia-0.1.33}/airia/client/data_vector_search/async_data_vector_search.py +18 -1
  3. {airia-0.1.31 → airia-0.1.33}/airia/client/data_vector_search/base_data_vector_search.py +5 -1
  4. {airia-0.1.31 → airia-0.1.33}/airia/client/data_vector_search/sync_data_vector_search.py +18 -1
  5. {airia-0.1.31 → airia-0.1.33}/airia/client/deployments/async_deployments.py +28 -1
  6. {airia-0.1.31 → airia-0.1.33}/airia/client/deployments/base_deployments.py +21 -1
  7. {airia-0.1.31 → airia-0.1.33}/airia/client/deployments/sync_deployments.py +28 -1
  8. {airia-0.1.31 → airia-0.1.33}/airia/client/pipeline_execution/async_pipeline_execution.py +69 -9
  9. {airia-0.1.31 → airia-0.1.33}/airia/client/pipeline_execution/base_pipeline_execution.py +42 -2
  10. {airia-0.1.31 → airia-0.1.33}/airia/client/pipeline_execution/sync_pipeline_execution.py +72 -10
  11. {airia-0.1.31 → airia-0.1.33}/airia/client/pipelines_config/async_pipelines_config.py +37 -7
  12. {airia-0.1.31 → airia-0.1.33}/airia/client/pipelines_config/base_pipelines_config.py +25 -1
  13. {airia-0.1.31 → airia-0.1.33}/airia/client/pipelines_config/sync_pipelines_config.py +37 -7
  14. {airia-0.1.31 → airia-0.1.33}/airia/client/project/async_project.py +31 -5
  15. {airia-0.1.31 → airia-0.1.33}/airia/client/project/base_project.py +25 -2
  16. {airia-0.1.31 → airia-0.1.33}/airia/client/project/sync_project.py +33 -5
  17. {airia-0.1.31 → airia-0.1.33}/airia/client/store/async_store.py +16 -1
  18. {airia-0.1.31 → airia-0.1.33}/airia/client/store/base_store.py +11 -1
  19. {airia-0.1.31 → airia-0.1.33}/airia/client/store/sync_store.py +16 -1
  20. {airia-0.1.31 → airia-0.1.33}/airia/types/api/pipeline_execution/__init__.py +8 -0
  21. {airia-0.1.31 → airia-0.1.33}/airia/types/api/pipeline_execution/_pipeline_execution.py +8 -0
  22. airia-0.1.33/airia/types/api/pipeline_execution/get_pipeline_execution.py +83 -0
  23. {airia-0.1.31 → airia-0.1.33}/airia.egg-info/PKG-INFO +1 -1
  24. {airia-0.1.31 → airia-0.1.33}/airia.egg-info/SOURCES.txt +1 -0
  25. {airia-0.1.31 → airia-0.1.33}/pyproject.toml +1 -1
  26. {airia-0.1.31 → airia-0.1.33}/LICENSE +0 -0
  27. {airia-0.1.31 → airia-0.1.33}/README.md +0 -0
  28. {airia-0.1.31 → airia-0.1.33}/airia/__init__.py +0 -0
  29. {airia-0.1.31 → airia-0.1.33}/airia/client/__init__.py +0 -0
  30. {airia-0.1.31 → airia-0.1.33}/airia/client/_request_handler/__init__.py +0 -0
  31. {airia-0.1.31 → airia-0.1.33}/airia/client/_request_handler/async_request_handler.py +0 -0
  32. {airia-0.1.31 → airia-0.1.33}/airia/client/_request_handler/base_request_handler.py +0 -0
  33. {airia-0.1.31 → airia-0.1.33}/airia/client/_request_handler/sync_request_handler.py +0 -0
  34. {airia-0.1.31 → airia-0.1.33}/airia/client/async_client.py +0 -0
  35. {airia-0.1.31 → airia-0.1.33}/airia/client/attachments/__init__.py +0 -0
  36. {airia-0.1.31 → airia-0.1.33}/airia/client/attachments/async_attachments.py +0 -0
  37. {airia-0.1.31 → airia-0.1.33}/airia/client/attachments/base_attachments.py +0 -0
  38. {airia-0.1.31 → airia-0.1.33}/airia/client/attachments/sync_attachments.py +0 -0
  39. {airia-0.1.31 → airia-0.1.33}/airia/client/base_client.py +0 -0
  40. {airia-0.1.31 → airia-0.1.33}/airia/client/conversations/__init__.py +0 -0
  41. {airia-0.1.31 → airia-0.1.33}/airia/client/conversations/async_conversations.py +0 -0
  42. {airia-0.1.31 → airia-0.1.33}/airia/client/conversations/base_conversations.py +0 -0
  43. {airia-0.1.31 → airia-0.1.33}/airia/client/conversations/sync_conversations.py +0 -0
  44. {airia-0.1.31 → airia-0.1.33}/airia/client/data_vector_search/__init__.py +0 -0
  45. {airia-0.1.31 → airia-0.1.33}/airia/client/deployments/__init__.py +0 -0
  46. {airia-0.1.31 → airia-0.1.33}/airia/client/library/__init__.py +0 -0
  47. {airia-0.1.31 → airia-0.1.33}/airia/client/library/async_library.py +0 -0
  48. {airia-0.1.31 → airia-0.1.33}/airia/client/library/base_library.py +0 -0
  49. {airia-0.1.31 → airia-0.1.33}/airia/client/library/sync_library.py +0 -0
  50. {airia-0.1.31 → airia-0.1.33}/airia/client/models/__init__.py +0 -0
  51. {airia-0.1.31 → airia-0.1.33}/airia/client/models/async_models.py +0 -0
  52. {airia-0.1.31 → airia-0.1.33}/airia/client/models/base_models.py +0 -0
  53. {airia-0.1.31 → airia-0.1.33}/airia/client/models/sync_models.py +0 -0
  54. {airia-0.1.31 → airia-0.1.33}/airia/client/pipeline_execution/__init__.py +0 -0
  55. {airia-0.1.31 → airia-0.1.33}/airia/client/pipeline_import/__init__.py +0 -0
  56. {airia-0.1.31 → airia-0.1.33}/airia/client/pipeline_import/async_pipeline_import.py +0 -0
  57. {airia-0.1.31 → airia-0.1.33}/airia/client/pipeline_import/base_pipeline_import.py +0 -0
  58. {airia-0.1.31 → airia-0.1.33}/airia/client/pipeline_import/sync_pipeline_import.py +0 -0
  59. {airia-0.1.31 → airia-0.1.33}/airia/client/pipelines_config/__init__.py +0 -0
  60. {airia-0.1.31 → airia-0.1.33}/airia/client/project/__init__.py +0 -0
  61. {airia-0.1.31 → airia-0.1.33}/airia/client/store/__init__.py +0 -0
  62. {airia-0.1.31 → airia-0.1.33}/airia/client/sync_client.py +0 -0
  63. {airia-0.1.31 → airia-0.1.33}/airia/client/tools/__init__.py +0 -0
  64. {airia-0.1.31 → airia-0.1.33}/airia/client/tools/async_tools.py +0 -0
  65. {airia-0.1.31 → airia-0.1.33}/airia/client/tools/base_tools.py +0 -0
  66. {airia-0.1.31 → airia-0.1.33}/airia/client/tools/sync_tools.py +0 -0
  67. {airia-0.1.31 → airia-0.1.33}/airia/constants.py +0 -0
  68. {airia-0.1.31 → airia-0.1.33}/airia/exceptions.py +0 -0
  69. {airia-0.1.31 → airia-0.1.33}/airia/logs.py +0 -0
  70. {airia-0.1.31 → airia-0.1.33}/airia/types/__init__.py +0 -0
  71. {airia-0.1.31 → airia-0.1.33}/airia/types/_api_version.py +0 -0
  72. {airia-0.1.31 → airia-0.1.33}/airia/types/_request_data.py +0 -0
  73. {airia-0.1.31 → airia-0.1.33}/airia/types/_structured_output.py +0 -0
  74. {airia-0.1.31 → airia-0.1.33}/airia/types/api/__init__.py +0 -0
  75. {airia-0.1.31 → airia-0.1.33}/airia/types/api/attachments/__init__.py +0 -0
  76. {airia-0.1.31 → airia-0.1.33}/airia/types/api/attachments/upload_file.py +0 -0
  77. {airia-0.1.31 → airia-0.1.33}/airia/types/api/conversations/__init__.py +0 -0
  78. {airia-0.1.31 → airia-0.1.33}/airia/types/api/conversations/_conversations.py +0 -0
  79. {airia-0.1.31 → airia-0.1.33}/airia/types/api/data_vector_search/__init__.py +0 -0
  80. {airia-0.1.31 → airia-0.1.33}/airia/types/api/data_vector_search/get_file_chunks.py +0 -0
  81. {airia-0.1.31 → airia-0.1.33}/airia/types/api/deployments/__init__.py +0 -0
  82. {airia-0.1.31 → airia-0.1.33}/airia/types/api/deployments/get_deployment.py +0 -0
  83. {airia-0.1.31 → airia-0.1.33}/airia/types/api/deployments/get_deployments.py +0 -0
  84. {airia-0.1.31 → airia-0.1.33}/airia/types/api/library/__init__.py +0 -0
  85. {airia-0.1.31 → airia-0.1.33}/airia/types/api/library/_library_models.py +0 -0
  86. {airia-0.1.31 → airia-0.1.33}/airia/types/api/models/__init__.py +0 -0
  87. {airia-0.1.31 → airia-0.1.33}/airia/types/api/models/list_models.py +0 -0
  88. {airia-0.1.31 → airia-0.1.33}/airia/types/api/pipeline_import/__init__.py +0 -0
  89. {airia-0.1.31 → airia-0.1.33}/airia/types/api/pipeline_import/create_agent_from_pipeline_definition.py +0 -0
  90. {airia-0.1.31 → airia-0.1.33}/airia/types/api/pipelines_config/__init__.py +0 -0
  91. {airia-0.1.31 → airia-0.1.33}/airia/types/api/pipelines_config/export_pipeline_definition.py +0 -0
  92. {airia-0.1.31 → airia-0.1.33}/airia/types/api/pipelines_config/get_pipeline_config.py +0 -0
  93. {airia-0.1.31 → airia-0.1.33}/airia/types/api/pipelines_config/get_pipelines_config.py +0 -0
  94. {airia-0.1.31 → airia-0.1.33}/airia/types/api/project/__init__.py +0 -0
  95. {airia-0.1.31 → airia-0.1.33}/airia/types/api/project/get_projects.py +0 -0
  96. {airia-0.1.31 → airia-0.1.33}/airia/types/api/store/__init__.py +0 -0
  97. {airia-0.1.31 → airia-0.1.33}/airia/types/api/store/get_file.py +0 -0
  98. {airia-0.1.31 → airia-0.1.33}/airia/types/api/store/get_files.py +0 -0
  99. {airia-0.1.31 → airia-0.1.33}/airia/types/api/tools/__init__.py +0 -0
  100. {airia-0.1.31 → airia-0.1.33}/airia/types/api/tools/_tools.py +0 -0
  101. {airia-0.1.31 → airia-0.1.33}/airia/types/sse/__init__.py +0 -0
  102. {airia-0.1.31 → airia-0.1.33}/airia/types/sse/sse_messages.py +0 -0
  103. {airia-0.1.31 → airia-0.1.33}/airia/utils/sse_parser.py +0 -0
  104. {airia-0.1.31 → airia-0.1.33}/airia.egg-info/dependency_links.txt +0 -0
  105. {airia-0.1.31 → airia-0.1.33}/airia.egg-info/requires.txt +0 -0
  106. {airia-0.1.31 → airia-0.1.33}/airia.egg-info/top_level.txt +0 -0
  107. {airia-0.1.31 → airia-0.1.33}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: airia
3
- Version: 0.1.31
3
+ Version: 0.1.33
4
4
  Summary: Python SDK for Airia API
5
5
  Author-email: Airia LLC <support@airia.com>
6
6
  License: MIT
@@ -11,7 +11,12 @@ class AsyncDataVectorSearch(BaseDataVectorSearch):
11
11
  super().__init__(request_handler)
12
12
 
13
13
  async def get_file_chunks(
14
- self, data_store_id: str, file_id: str, correlation_id: Optional[str] = None
14
+ self,
15
+ data_store_id: str,
16
+ file_id: str,
17
+ page_number: int = 1,
18
+ page_size: int = 50,
19
+ correlation_id: Optional[str] = None,
15
20
  ) -> GetFileChunksResponse:
16
21
  """
17
22
  Retrieve chunks from a specific file in a data store.
@@ -21,6 +26,8 @@ class AsyncDataVectorSearch(BaseDataVectorSearch):
21
26
  Args:
22
27
  data_store_id: The unique identifier of the data store (GUID format)
23
28
  file_id: The unique identifier of the file (GUID format)
29
+ page_number: The page number (1-based). Default is 1.
30
+ page_size: The page size. Maximum supported value is 100. Default is 50.
24
31
  correlation_id: Optional correlation ID for request tracing
25
32
 
26
33
  Returns:
@@ -49,6 +56,14 @@ class AsyncDataVectorSearch(BaseDataVectorSearch):
49
56
  file_id="your_file_id"
50
57
  )
51
58
 
59
+ # Get file chunks with custom pagination
60
+ chunks_response = await client.data_vector_search.get_file_chunks(
61
+ data_store_id="your_data_store_id",
62
+ file_id="your_file_id",
63
+ page_number=2,
64
+ page_size=100
65
+ )
66
+
52
67
  # Access the chunks
53
68
  for chunk in chunks_response.chunks:
54
69
  print(f"Chunk: {chunk.chunk}")
@@ -64,6 +79,8 @@ class AsyncDataVectorSearch(BaseDataVectorSearch):
64
79
  request_data = self._pre_get_file_chunks(
65
80
  data_store_id=data_store_id,
66
81
  file_id=file_id,
82
+ page_number=page_number,
83
+ page_size=page_size,
67
84
  correlation_id=correlation_id,
68
85
  api_version=ApiVersion.V1.value,
69
86
  )
@@ -13,6 +13,8 @@ class BaseDataVectorSearch:
13
13
  self,
14
14
  data_store_id: str,
15
15
  file_id: str,
16
+ page_number: int = 1,
17
+ page_size: int = 50,
16
18
  correlation_id: Optional[str] = None,
17
19
  api_version: str = ApiVersion.V1.value,
18
20
  ):
@@ -24,6 +26,8 @@ class BaseDataVectorSearch:
24
26
  Args:
25
27
  data_store_id: ID of the data store
26
28
  file_id: ID of the file
29
+ page_number: The page number (1-based). Default is 1.
30
+ page_size: The page size. Maximum supported value is 100. Default is 50.
27
31
  correlation_id: Optional correlation ID for tracing
28
32
  api_version: API version to use for the request
29
33
 
@@ -44,7 +48,7 @@ class BaseDataVectorSearch:
44
48
  )
45
49
 
46
50
  request_data = self._request_handler.prepare_request(
47
- url, correlation_id=correlation_id, params={"pageNumber": 1, "pageSize": 50}
51
+ url, correlation_id=correlation_id, params={"pageNumber": page_number, "pageSize": page_size}
48
52
  )
49
53
 
50
54
  return request_data
@@ -11,7 +11,12 @@ class DataVectorSearch(BaseDataVectorSearch):
11
11
  super().__init__(request_handler)
12
12
 
13
13
  def get_file_chunks(
14
- self, data_store_id: str, file_id: str, correlation_id: Optional[str] = None
14
+ self,
15
+ data_store_id: str,
16
+ file_id: str,
17
+ page_number: int = 1,
18
+ page_size: int = 50,
19
+ correlation_id: Optional[str] = None,
15
20
  ) -> GetFileChunksResponse:
16
21
  """
17
22
  Retrieve chunks from a specific file in a data store.
@@ -21,6 +26,8 @@ class DataVectorSearch(BaseDataVectorSearch):
21
26
  Args:
22
27
  data_store_id: The unique identifier of the data store (GUID format)
23
28
  file_id: The unique identifier of the file (GUID format)
29
+ page_number: The page number (1-based). Default is 1.
30
+ page_size: The page size. Maximum supported value is 100. Default is 50.
24
31
  correlation_id: Optional correlation ID for request tracing
25
32
 
26
33
  Returns:
@@ -47,6 +54,14 @@ class DataVectorSearch(BaseDataVectorSearch):
47
54
  file_id="your_file_id"
48
55
  )
49
56
 
57
+ # Get file chunks with custom pagination
58
+ chunks_response = client.data_vector_search.get_file_chunks(
59
+ data_store_id="your_data_store_id",
60
+ file_id="your_file_id",
61
+ page_number=2,
62
+ page_size=100
63
+ )
64
+
50
65
  # Access the chunks
51
66
  for chunk in chunks_response.chunks:
52
67
  print(f"Chunk: {chunk.chunk}")
@@ -58,6 +73,8 @@ class DataVectorSearch(BaseDataVectorSearch):
58
73
  request_data = self._pre_get_file_chunks(
59
74
  data_store_id=data_store_id,
60
75
  file_id=file_id,
76
+ page_number=page_number,
77
+ page_size=page_size,
61
78
  correlation_id=correlation_id,
62
79
  api_version=ApiVersion.V1.value,
63
80
  )
@@ -1,4 +1,4 @@
1
- from typing import List, Optional
1
+ from typing import List, Literal, Optional
2
2
 
3
3
  from ...types._api_version import ApiVersion
4
4
  from ...types.api.deployments import GetDeploymentResponse, GetDeploymentsResponse
@@ -12,6 +12,11 @@ class AsyncDeployments(BaseDeployments):
12
12
 
13
13
  async def get_deployments(
14
14
  self,
15
+ page_number: Optional[int] = None,
16
+ page_size: Optional[int] = None,
17
+ sort_by: Optional[str] = None,
18
+ sort_direction: Optional[Literal["ASC", "DESC"]] = None,
19
+ filter: Optional[str] = None,
15
20
  tags: Optional[List[str]] = None,
16
21
  is_recommended: Optional[bool] = None,
17
22
  project_id: Optional[str] = None,
@@ -26,6 +31,11 @@ class AsyncDeployments(BaseDeployments):
26
31
  about each deployment including associated pipelines, data sources, and user prompts.
27
32
 
28
33
  Args:
34
+ page_number: The page number to be fetched
35
+ page_size: The number of items per page
36
+ sort_by: Property to sort by
37
+ sort_direction: The direction of the sort, either "ASC" for ascending or "DESC" for descending
38
+ filter: The search filter
29
39
  tags: Optional list of tags to filter deployments by
30
40
  is_recommended: Optional filter by recommended status
31
41
  project_id: Optional filter by project id
@@ -42,16 +52,33 @@ class AsyncDeployments(BaseDeployments):
42
52
  Example:
43
53
  ```python
44
54
  client = AiriaAsyncClient(api_key="your-api-key")
55
+
56
+ # Basic usage with filtering
45
57
  deployments = await client.deployments.get_deployments(
46
58
  tags=["production", "nlp"],
47
59
  is_recommended=True
48
60
  )
61
+
62
+ # With pagination and sorting
63
+ deployments = await client.deployments.get_deployments(
64
+ page_number=1,
65
+ page_size=20,
66
+ sort_by="deploymentName",
67
+ sort_direction="ASC",
68
+ filter="text-analysis"
69
+ )
70
+
49
71
  print(f"Found {deployments.total_count} deployments")
50
72
  for deployment in deployments.items:
51
73
  print(f"- {deployment.deployment_name}")
52
74
  ```
53
75
  """
54
76
  request_data = self._pre_get_deployments(
77
+ page_number=page_number,
78
+ page_size=page_size,
79
+ sort_by=sort_by,
80
+ sort_direction=sort_direction,
81
+ filter=filter,
55
82
  tags=tags,
56
83
  is_recommended=is_recommended,
57
84
  correlation_id=correlation_id,
@@ -1,4 +1,4 @@
1
- from typing import List, Optional, Union
1
+ from typing import List, Literal, Optional, Union
2
2
  from urllib.parse import urljoin
3
3
 
4
4
  from ...types._api_version import ApiVersion
@@ -11,6 +11,11 @@ class BaseDeployments:
11
11
 
12
12
  def _pre_get_deployments(
13
13
  self,
14
+ page_number: Optional[int] = None,
15
+ page_size: Optional[int] = None,
16
+ sort_by: Optional[str] = None,
17
+ sort_direction: Optional[Literal["ASC", "DESC"]] = None,
18
+ filter: Optional[str] = None,
14
19
  tags: Optional[List[str]] = None,
15
20
  is_recommended: Optional[bool] = None,
16
21
  correlation_id: Optional[str] = None,
@@ -23,6 +28,11 @@ class BaseDeployments:
23
28
  retrieval requests, including optional filtering by tags and recommendation status.
24
29
 
25
30
  Args:
31
+ page_number: The page number to be fetched
32
+ page_size: The number of items per page
33
+ sort_by: Property to sort by
34
+ sort_direction: The direction of the sort, either "ASC" for ascending or "DESC" for descending
35
+ filter: The search filter
26
36
  tags: Optional list of tags to filter deployments by
27
37
  is_recommended: Optional filter by recommended status
28
38
  correlation_id: Optional correlation ID for tracing
@@ -45,6 +55,16 @@ class BaseDeployments:
45
55
 
46
56
  # Build query parameters
47
57
  params = {}
58
+ if page_number is not None:
59
+ params["PageNumber"] = page_number
60
+ if page_size is not None:
61
+ params["PageSize"] = page_size
62
+ if sort_by is not None:
63
+ params["SortBy"] = sort_by
64
+ if sort_direction is not None:
65
+ params["SortDirection"] = sort_direction
66
+ if filter is not None:
67
+ params["Filter"] = filter
48
68
  if tags is not None:
49
69
  params["tags"] = tags
50
70
  if is_recommended is not None:
@@ -1,4 +1,4 @@
1
- from typing import List, Optional
1
+ from typing import List, Literal, Optional
2
2
 
3
3
  from ...types._api_version import ApiVersion
4
4
  from ...types.api.deployments import GetDeploymentResponse, GetDeploymentsResponse
@@ -12,6 +12,11 @@ class Deployments(BaseDeployments):
12
12
 
13
13
  def get_deployments(
14
14
  self,
15
+ page_number: Optional[int] = None,
16
+ page_size: Optional[int] = None,
17
+ sort_by: Optional[str] = None,
18
+ sort_direction: Optional[Literal["ASC", "DESC"]] = None,
19
+ filter: Optional[str] = None,
15
20
  tags: Optional[List[str]] = None,
16
21
  is_recommended: Optional[bool] = None,
17
22
  project_id: Optional[str] = None,
@@ -26,6 +31,11 @@ class Deployments(BaseDeployments):
26
31
  about each deployment including associated pipelines, data sources, and user prompts.
27
32
 
28
33
  Args:
34
+ page_number: The page number to be fetched
35
+ page_size: The number of items per page
36
+ sort_by: Property to sort by
37
+ sort_direction: The direction of the sort, either "ASC" for ascending or "DESC" for descending
38
+ filter: The search filter
29
39
  tags: Optional list of tags to filter deployments by
30
40
  is_recommended: Optional filter by recommended status
31
41
  project_id: Optional filter by project id
@@ -42,16 +52,33 @@ class Deployments(BaseDeployments):
42
52
  Example:
43
53
  ```python
44
54
  client = AiriaClient(api_key="your-api-key")
55
+
56
+ # Basic usage with filtering
45
57
  deployments = client.deployments.get_deployments(
46
58
  tags=["production", "nlp"],
47
59
  is_recommended=True
48
60
  )
61
+
62
+ # With pagination and sorting
63
+ deployments = client.deployments.get_deployments(
64
+ page_number=1,
65
+ page_size=20,
66
+ sort_by="deploymentName",
67
+ sort_direction="ASC",
68
+ filter="text-analysis"
69
+ )
70
+
49
71
  print(f"Found {deployments.total_count} deployments")
50
72
  for deployment in deployments.items:
51
73
  print(f"- {deployment.deployment_name}")
52
74
  ```
53
75
  """
54
76
  request_data = self._pre_get_deployments(
77
+ page_number=page_number,
78
+ page_size=page_size,
79
+ sort_by=sort_by,
80
+ sort_direction=sort_direction,
81
+ filter=filter,
55
82
  tags=tags,
56
83
  is_recommended=is_recommended,
57
84
  correlation_id=correlation_id,
@@ -3,8 +3,12 @@ from typing import Any, Dict, List, Literal, Optional, Type, Union, overload
3
3
  from pydantic import BaseModel
4
4
 
5
5
  from ...types._api_version import ApiVersion
6
- from ...types._structured_output import create_schema_system_message, parse_response_to_model
6
+ from ...types._structured_output import (
7
+ create_schema_system_message,
8
+ parse_response_to_model,
9
+ )
7
10
  from ...types.api.pipeline_execution import (
11
+ GetPipelineExecutionResponse,
8
12
  PipelineExecutionAsyncStreamedResponse,
9
13
  PipelineExecutionResponse,
10
14
  TemporaryAssistantAsyncStreamedResponse,
@@ -209,7 +213,9 @@ class AsyncPipelineExecution(BasePipelineExecution):
209
213
  modified_in_memory_messages = in_memory_messages
210
214
  if output_schema is not None:
211
215
  # Create a copy of in_memory_messages if it exists, otherwise create new list
212
- modified_in_memory_messages = list(in_memory_messages) if in_memory_messages else []
216
+ modified_in_memory_messages = (
217
+ list(in_memory_messages) if in_memory_messages else []
218
+ )
213
219
  # Insert schema instruction as first system message
214
220
  schema_message = create_schema_system_message(output_schema)
215
221
  modified_in_memory_messages.insert(0, schema_message)
@@ -246,7 +252,9 @@ class AsyncPipelineExecution(BasePipelineExecution):
246
252
  response = PipelineExecutionResponse(**resp)
247
253
  # Parse response to Pydantic model if output_schema was provided
248
254
  if output_schema is not None and response.result:
249
- response.result = parse_response_to_model(response.result, output_schema)
255
+ response.result = parse_response_to_model(
256
+ response.result, output_schema
257
+ )
250
258
  return response
251
259
 
252
260
  return PipelineExecutionAsyncStreamedResponse(stream=resp)
@@ -256,8 +264,8 @@ class AsyncPipelineExecution(BasePipelineExecution):
256
264
  self,
257
265
  model_parameters: Dict[str, Any],
258
266
  user_input: str,
267
+ prompt_parameters: Dict[str, Any],
259
268
  assistant_name: str = "",
260
- prompt_parameters: Dict[str, Any] = {"prompt": ""},
261
269
  async_output: Literal[False] = False,
262
270
  include_tools_response: bool = False,
263
271
  save_history: bool = True,
@@ -287,8 +295,8 @@ class AsyncPipelineExecution(BasePipelineExecution):
287
295
  self,
288
296
  model_parameters: Dict[str, Any],
289
297
  user_input: str,
298
+ prompt_parameters: Dict[str, Any],
290
299
  assistant_name: str = "",
291
- prompt_parameters: Dict[str, Any] = {"prompt": ""},
292
300
  async_output: Literal[True] = True,
293
301
  include_tools_response: bool = False,
294
302
  save_history: bool = True,
@@ -317,8 +325,8 @@ class AsyncPipelineExecution(BasePipelineExecution):
317
325
  self,
318
326
  model_parameters: Dict[str, Any],
319
327
  user_input: str,
328
+ prompt_parameters: Dict[str, Any],
320
329
  assistant_name: str = "",
321
- prompt_parameters: Dict[str, Any] = {"prompt": ""},
322
330
  async_output: bool = False,
323
331
  include_tools_response: bool = False,
324
332
  save_history: bool = True,
@@ -355,8 +363,9 @@ class AsyncPipelineExecution(BasePipelineExecution):
355
363
  model_parameters: Model parameters (required). Must include libraryModelId,
356
364
  projectModelId, modelIdentifierType, and modelIsAvailableinProject
357
365
  user_input: User input text (required)
366
+ prompt_parameters: Parameters for prompt configuration (required). Must include
367
+ a 'prompt' key with the system prompt text
358
368
  assistant_name: Name of the temporary assistant. Default is ""
359
- prompt_parameters: Parameters for prompt configuration. Default is {"prompt": ""}
360
369
  async_output: Whether to stream the response. Default is False
361
370
  include_tools_response: Whether to return initial LLM tool result. Default is False
362
371
  save_history: Whether to save input and output to conversation history. Default is True
@@ -403,6 +412,7 @@ class AsyncPipelineExecution(BasePipelineExecution):
403
412
  "modelIsAvailableinProject": True,
404
413
  },
405
414
  user_input="say double bubble bath ten times fast",
415
+ prompt_parameters={"prompt": "You are a helpful assistant."},
406
416
  )
407
417
  print(response.result)
408
418
  ```
@@ -418,6 +428,7 @@ class AsyncPipelineExecution(BasePipelineExecution):
418
428
  response = await client.pipeline_execution.execute_temporary_assistant(
419
429
  model_parameters={...},
420
430
  user_input="What's the weather?",
431
+ prompt_parameters={"prompt": "You are a weather information assistant."},
421
432
  output_schema=WeatherInfo
422
433
  )
423
434
  ```
@@ -440,7 +451,9 @@ class AsyncPipelineExecution(BasePipelineExecution):
440
451
  modified_in_memory_messages = in_memory_messages
441
452
  if output_schema is not None:
442
453
  # Create a copy of in_memory_messages if it exists, otherwise create new list
443
- modified_in_memory_messages = list(in_memory_messages) if in_memory_messages else []
454
+ modified_in_memory_messages = (
455
+ list(in_memory_messages) if in_memory_messages else []
456
+ )
444
457
  # Insert schema instruction as first system message
445
458
  schema_message = create_schema_system_message(output_schema)
446
459
  modified_in_memory_messages.insert(0, schema_message)
@@ -487,5 +500,52 @@ class AsyncPipelineExecution(BasePipelineExecution):
487
500
  response = TemporaryAssistantResponse(**resp)
488
501
  # Parse response to Pydantic model if output_schema was provided
489
502
  if output_schema is not None and response.result:
490
- response.result = parse_response_to_model(str(response.result), output_schema)
503
+ response.result = parse_response_to_model(
504
+ str(response.result), output_schema
505
+ )
491
506
  return response
507
+
508
+ async def get_pipeline_execution(
509
+ self, execution_id: str, correlation_id: Optional[str] = None
510
+ ) -> GetPipelineExecutionResponse:
511
+ """
512
+ Retrieve a pipeline execution result by execution ID asynchronously.
513
+
514
+ This method fetches the details of a specific pipeline execution using its
515
+ unique identifier. The response includes execution logs, step execution records,
516
+ timing information, and any errors that occurred during execution.
517
+
518
+ Args:
519
+ execution_id: The execution id (GUID format)
520
+ correlation_id: Optional correlation ID for request tracing
521
+
522
+ Returns:
523
+ GetPipelineExecutionResponse: Pipeline execution details including logs and step records
524
+
525
+ Raises:
526
+ AiriaAPIError: If the API request fails or execution is not found
527
+ ValueError: If an invalid API version is provided
528
+
529
+ Example:
530
+ ```python
531
+ client = AiriaAsyncClient(api_key="your-api-key")
532
+ execution = await client.pipeline_execution.get_pipeline_execution("execution-id-123")
533
+ print(f"Execution ID: {execution.execution_id}")
534
+ print(f"Success: {execution.log_record_details.success}")
535
+ print(f"Duration: {execution.log_record_details.duration}")
536
+
537
+ # Iterate through step execution logs
538
+ if execution.step_execution_log_records:
539
+ for step in execution.step_execution_log_records:
540
+ print(f"Step: {step.step_title} - Success: {step.success}")
541
+ ```
542
+ """
543
+ request_data = self._pre_get_pipeline_execution(
544
+ execution_id=execution_id,
545
+ correlation_id=correlation_id,
546
+ api_version=ApiVersion.V1.value,
547
+ )
548
+
549
+ response = await self._request_handler.make_request("GET", request_data)
550
+
551
+ return GetPipelineExecutionResponse(**response)
@@ -120,8 +120,8 @@ class BasePipelineExecution:
120
120
  self,
121
121
  model_parameters: Dict[str, Any],
122
122
  user_input: str,
123
+ prompt_parameters: Dict[str, Any],
123
124
  assistant_name: str = "",
124
- prompt_parameters: Dict[str, Any] = {},
125
125
  async_output: bool = False,
126
126
  include_tools_response: bool = False,
127
127
  save_history: bool = True,
@@ -153,8 +153,9 @@ class BasePipelineExecution:
153
153
  Args:
154
154
  model_parameters: model parameters
155
155
  user_input: Optional user input text
156
+ prompt_parameters: Parameters for prompt configuration (required). Must include
157
+ a 'prompt' key with the system prompt text
156
158
  assistant_name: Name of the temporary assistant
157
- prompt_parameters: Parameters for prompt configuration (required)
158
159
  async_output: Whether to stream the response. Default is False
159
160
  include_tools_response: Whether to return initial LLM tool result. Default is False
160
161
  save_history: Whether to save input and output to conversation history. Default is True
@@ -224,3 +225,42 @@ class BasePipelineExecution:
224
225
  )
225
226
 
226
227
  return request_data
228
+
229
+ def _pre_get_pipeline_execution(
230
+ self,
231
+ execution_id: str,
232
+ correlation_id: Optional[str] = None,
233
+ api_version: str = ApiVersion.V1.value,
234
+ ):
235
+ """
236
+ Prepare request data for retrieving a pipeline execution.
237
+
238
+ This internal method constructs the URL for pipeline execution retrieval
239
+ by ID using the specified API version.
240
+
241
+ Args:
242
+ execution_id: The execution id (GUID format)
243
+ correlation_id: Optional correlation ID for tracing
244
+ api_version: API version to use for the request
245
+
246
+ Returns:
247
+ RequestData: Prepared request data for the pipeline execution endpoint
248
+
249
+ Raises:
250
+ ValueError: If an invalid API version is provided
251
+ """
252
+ if api_version not in ApiVersion.as_list():
253
+ raise ValueError(
254
+ f"Invalid API version: {api_version}. Valid versions are: {', '.join(ApiVersion.as_list())}"
255
+ )
256
+
257
+ url = urljoin(
258
+ self._request_handler.base_url,
259
+ f"{api_version}/PipelineExecution/{execution_id}",
260
+ )
261
+
262
+ request_data = self._request_handler.prepare_request(
263
+ url=url, correlation_id=correlation_id
264
+ )
265
+
266
+ return request_data