airia 0.1.31__py3-none-any.whl → 0.1.32__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (26) hide show
  1. airia/client/data_vector_search/async_data_vector_search.py +18 -1
  2. airia/client/data_vector_search/base_data_vector_search.py +5 -1
  3. airia/client/data_vector_search/sync_data_vector_search.py +18 -1
  4. airia/client/deployments/async_deployments.py +28 -1
  5. airia/client/deployments/base_deployments.py +21 -1
  6. airia/client/deployments/sync_deployments.py +28 -1
  7. airia/client/pipeline_execution/async_pipeline_execution.py +62 -5
  8. airia/client/pipeline_execution/base_pipeline_execution.py +39 -0
  9. airia/client/pipeline_execution/sync_pipeline_execution.py +65 -6
  10. airia/client/pipelines_config/async_pipelines_config.py +37 -7
  11. airia/client/pipelines_config/base_pipelines_config.py +25 -1
  12. airia/client/pipelines_config/sync_pipelines_config.py +37 -7
  13. airia/client/project/async_project.py +31 -5
  14. airia/client/project/base_project.py +25 -2
  15. airia/client/project/sync_project.py +33 -5
  16. airia/client/store/async_store.py +16 -1
  17. airia/client/store/base_store.py +11 -1
  18. airia/client/store/sync_store.py +16 -1
  19. airia/types/api/pipeline_execution/__init__.py +8 -0
  20. airia/types/api/pipeline_execution/_pipeline_execution.py +8 -0
  21. airia/types/api/pipeline_execution/get_pipeline_execution.py +83 -0
  22. {airia-0.1.31.dist-info → airia-0.1.32.dist-info}/METADATA +1 -1
  23. {airia-0.1.31.dist-info → airia-0.1.32.dist-info}/RECORD +26 -25
  24. {airia-0.1.31.dist-info → airia-0.1.32.dist-info}/WHEEL +0 -0
  25. {airia-0.1.31.dist-info → airia-0.1.32.dist-info}/licenses/LICENSE +0 -0
  26. {airia-0.1.31.dist-info → airia-0.1.32.dist-info}/top_level.txt +0 -0
@@ -11,7 +11,12 @@ class AsyncDataVectorSearch(BaseDataVectorSearch):
11
11
  super().__init__(request_handler)
12
12
 
13
13
  async def get_file_chunks(
14
- self, data_store_id: str, file_id: str, correlation_id: Optional[str] = None
14
+ self,
15
+ data_store_id: str,
16
+ file_id: str,
17
+ page_number: int = 1,
18
+ page_size: int = 50,
19
+ correlation_id: Optional[str] = None,
15
20
  ) -> GetFileChunksResponse:
16
21
  """
17
22
  Retrieve chunks from a specific file in a data store.
@@ -21,6 +26,8 @@ class AsyncDataVectorSearch(BaseDataVectorSearch):
21
26
  Args:
22
27
  data_store_id: The unique identifier of the data store (GUID format)
23
28
  file_id: The unique identifier of the file (GUID format)
29
+ page_number: The page number (1-based). Default is 1.
30
+ page_size: The page size. Maximum supported value is 100. Default is 50.
24
31
  correlation_id: Optional correlation ID for request tracing
25
32
 
26
33
  Returns:
@@ -49,6 +56,14 @@ class AsyncDataVectorSearch(BaseDataVectorSearch):
49
56
  file_id="your_file_id"
50
57
  )
51
58
 
59
+ # Get file chunks with custom pagination
60
+ chunks_response = await client.data_vector_search.get_file_chunks(
61
+ data_store_id="your_data_store_id",
62
+ file_id="your_file_id",
63
+ page_number=2,
64
+ page_size=100
65
+ )
66
+
52
67
  # Access the chunks
53
68
  for chunk in chunks_response.chunks:
54
69
  print(f"Chunk: {chunk.chunk}")
@@ -64,6 +79,8 @@ class AsyncDataVectorSearch(BaseDataVectorSearch):
64
79
  request_data = self._pre_get_file_chunks(
65
80
  data_store_id=data_store_id,
66
81
  file_id=file_id,
82
+ page_number=page_number,
83
+ page_size=page_size,
67
84
  correlation_id=correlation_id,
68
85
  api_version=ApiVersion.V1.value,
69
86
  )
@@ -13,6 +13,8 @@ class BaseDataVectorSearch:
13
13
  self,
14
14
  data_store_id: str,
15
15
  file_id: str,
16
+ page_number: int = 1,
17
+ page_size: int = 50,
16
18
  correlation_id: Optional[str] = None,
17
19
  api_version: str = ApiVersion.V1.value,
18
20
  ):
@@ -24,6 +26,8 @@ class BaseDataVectorSearch:
24
26
  Args:
25
27
  data_store_id: ID of the data store
26
28
  file_id: ID of the file
29
+ page_number: The page number (1-based). Default is 1.
30
+ page_size: The page size. Maximum supported value is 100. Default is 50.
27
31
  correlation_id: Optional correlation ID for tracing
28
32
  api_version: API version to use for the request
29
33
 
@@ -44,7 +48,7 @@ class BaseDataVectorSearch:
44
48
  )
45
49
 
46
50
  request_data = self._request_handler.prepare_request(
47
- url, correlation_id=correlation_id, params={"pageNumber": 1, "pageSize": 50}
51
+ url, correlation_id=correlation_id, params={"pageNumber": page_number, "pageSize": page_size}
48
52
  )
49
53
 
50
54
  return request_data
@@ -11,7 +11,12 @@ class DataVectorSearch(BaseDataVectorSearch):
11
11
  super().__init__(request_handler)
12
12
 
13
13
  def get_file_chunks(
14
- self, data_store_id: str, file_id: str, correlation_id: Optional[str] = None
14
+ self,
15
+ data_store_id: str,
16
+ file_id: str,
17
+ page_number: int = 1,
18
+ page_size: int = 50,
19
+ correlation_id: Optional[str] = None,
15
20
  ) -> GetFileChunksResponse:
16
21
  """
17
22
  Retrieve chunks from a specific file in a data store.
@@ -21,6 +26,8 @@ class DataVectorSearch(BaseDataVectorSearch):
21
26
  Args:
22
27
  data_store_id: The unique identifier of the data store (GUID format)
23
28
  file_id: The unique identifier of the file (GUID format)
29
+ page_number: The page number (1-based). Default is 1.
30
+ page_size: The page size. Maximum supported value is 100. Default is 50.
24
31
  correlation_id: Optional correlation ID for request tracing
25
32
 
26
33
  Returns:
@@ -47,6 +54,14 @@ class DataVectorSearch(BaseDataVectorSearch):
47
54
  file_id="your_file_id"
48
55
  )
49
56
 
57
+ # Get file chunks with custom pagination
58
+ chunks_response = client.data_vector_search.get_file_chunks(
59
+ data_store_id="your_data_store_id",
60
+ file_id="your_file_id",
61
+ page_number=2,
62
+ page_size=100
63
+ )
64
+
50
65
  # Access the chunks
51
66
  for chunk in chunks_response.chunks:
52
67
  print(f"Chunk: {chunk.chunk}")
@@ -58,6 +73,8 @@ class DataVectorSearch(BaseDataVectorSearch):
58
73
  request_data = self._pre_get_file_chunks(
59
74
  data_store_id=data_store_id,
60
75
  file_id=file_id,
76
+ page_number=page_number,
77
+ page_size=page_size,
61
78
  correlation_id=correlation_id,
62
79
  api_version=ApiVersion.V1.value,
63
80
  )
@@ -1,4 +1,4 @@
1
- from typing import List, Optional
1
+ from typing import List, Literal, Optional
2
2
 
3
3
  from ...types._api_version import ApiVersion
4
4
  from ...types.api.deployments import GetDeploymentResponse, GetDeploymentsResponse
@@ -12,6 +12,11 @@ class AsyncDeployments(BaseDeployments):
12
12
 
13
13
  async def get_deployments(
14
14
  self,
15
+ page_number: Optional[int] = None,
16
+ page_size: Optional[int] = None,
17
+ sort_by: Optional[str] = None,
18
+ sort_direction: Optional[Literal["ASC", "DESC"]] = None,
19
+ filter: Optional[str] = None,
15
20
  tags: Optional[List[str]] = None,
16
21
  is_recommended: Optional[bool] = None,
17
22
  project_id: Optional[str] = None,
@@ -26,6 +31,11 @@ class AsyncDeployments(BaseDeployments):
26
31
  about each deployment including associated pipelines, data sources, and user prompts.
27
32
 
28
33
  Args:
34
+ page_number: The page number to be fetched
35
+ page_size: The number of items per page
36
+ sort_by: Property to sort by
37
+ sort_direction: The direction of the sort, either "ASC" for ascending or "DESC" for descending
38
+ filter: The search filter
29
39
  tags: Optional list of tags to filter deployments by
30
40
  is_recommended: Optional filter by recommended status
31
41
  project_id: Optional filter by project id
@@ -42,16 +52,33 @@ class AsyncDeployments(BaseDeployments):
42
52
  Example:
43
53
  ```python
44
54
  client = AiriaAsyncClient(api_key="your-api-key")
55
+
56
+ # Basic usage with filtering
45
57
  deployments = await client.deployments.get_deployments(
46
58
  tags=["production", "nlp"],
47
59
  is_recommended=True
48
60
  )
61
+
62
+ # With pagination and sorting
63
+ deployments = await client.deployments.get_deployments(
64
+ page_number=1,
65
+ page_size=20,
66
+ sort_by="deploymentName",
67
+ sort_direction="ASC",
68
+ filter="text-analysis"
69
+ )
70
+
49
71
  print(f"Found {deployments.total_count} deployments")
50
72
  for deployment in deployments.items:
51
73
  print(f"- {deployment.deployment_name}")
52
74
  ```
53
75
  """
54
76
  request_data = self._pre_get_deployments(
77
+ page_number=page_number,
78
+ page_size=page_size,
79
+ sort_by=sort_by,
80
+ sort_direction=sort_direction,
81
+ filter=filter,
55
82
  tags=tags,
56
83
  is_recommended=is_recommended,
57
84
  correlation_id=correlation_id,
@@ -1,4 +1,4 @@
1
- from typing import List, Optional, Union
1
+ from typing import List, Literal, Optional, Union
2
2
  from urllib.parse import urljoin
3
3
 
4
4
  from ...types._api_version import ApiVersion
@@ -11,6 +11,11 @@ class BaseDeployments:
11
11
 
12
12
  def _pre_get_deployments(
13
13
  self,
14
+ page_number: Optional[int] = None,
15
+ page_size: Optional[int] = None,
16
+ sort_by: Optional[str] = None,
17
+ sort_direction: Optional[Literal["ASC", "DESC"]] = None,
18
+ filter: Optional[str] = None,
14
19
  tags: Optional[List[str]] = None,
15
20
  is_recommended: Optional[bool] = None,
16
21
  correlation_id: Optional[str] = None,
@@ -23,6 +28,11 @@ class BaseDeployments:
23
28
  retrieval requests, including optional filtering by tags and recommendation status.
24
29
 
25
30
  Args:
31
+ page_number: The page number to be fetched
32
+ page_size: The number of items per page
33
+ sort_by: Property to sort by
34
+ sort_direction: The direction of the sort, either "ASC" for ascending or "DESC" for descending
35
+ filter: The search filter
26
36
  tags: Optional list of tags to filter deployments by
27
37
  is_recommended: Optional filter by recommended status
28
38
  correlation_id: Optional correlation ID for tracing
@@ -45,6 +55,16 @@ class BaseDeployments:
45
55
 
46
56
  # Build query parameters
47
57
  params = {}
58
+ if page_number is not None:
59
+ params["PageNumber"] = page_number
60
+ if page_size is not None:
61
+ params["PageSize"] = page_size
62
+ if sort_by is not None:
63
+ params["SortBy"] = sort_by
64
+ if sort_direction is not None:
65
+ params["SortDirection"] = sort_direction
66
+ if filter is not None:
67
+ params["Filter"] = filter
48
68
  if tags is not None:
49
69
  params["tags"] = tags
50
70
  if is_recommended is not None:
@@ -1,4 +1,4 @@
1
- from typing import List, Optional
1
+ from typing import List, Literal, Optional
2
2
 
3
3
  from ...types._api_version import ApiVersion
4
4
  from ...types.api.deployments import GetDeploymentResponse, GetDeploymentsResponse
@@ -12,6 +12,11 @@ class Deployments(BaseDeployments):
12
12
 
13
13
  def get_deployments(
14
14
  self,
15
+ page_number: Optional[int] = None,
16
+ page_size: Optional[int] = None,
17
+ sort_by: Optional[str] = None,
18
+ sort_direction: Optional[Literal["ASC", "DESC"]] = None,
19
+ filter: Optional[str] = None,
15
20
  tags: Optional[List[str]] = None,
16
21
  is_recommended: Optional[bool] = None,
17
22
  project_id: Optional[str] = None,
@@ -26,6 +31,11 @@ class Deployments(BaseDeployments):
26
31
  about each deployment including associated pipelines, data sources, and user prompts.
27
32
 
28
33
  Args:
34
+ page_number: The page number to be fetched
35
+ page_size: The number of items per page
36
+ sort_by: Property to sort by
37
+ sort_direction: The direction of the sort, either "ASC" for ascending or "DESC" for descending
38
+ filter: The search filter
29
39
  tags: Optional list of tags to filter deployments by
30
40
  is_recommended: Optional filter by recommended status
31
41
  project_id: Optional filter by project id
@@ -42,16 +52,33 @@ class Deployments(BaseDeployments):
42
52
  Example:
43
53
  ```python
44
54
  client = AiriaClient(api_key="your-api-key")
55
+
56
+ # Basic usage with filtering
45
57
  deployments = client.deployments.get_deployments(
46
58
  tags=["production", "nlp"],
47
59
  is_recommended=True
48
60
  )
61
+
62
+ # With pagination and sorting
63
+ deployments = client.deployments.get_deployments(
64
+ page_number=1,
65
+ page_size=20,
66
+ sort_by="deploymentName",
67
+ sort_direction="ASC",
68
+ filter="text-analysis"
69
+ )
70
+
49
71
  print(f"Found {deployments.total_count} deployments")
50
72
  for deployment in deployments.items:
51
73
  print(f"- {deployment.deployment_name}")
52
74
  ```
53
75
  """
54
76
  request_data = self._pre_get_deployments(
77
+ page_number=page_number,
78
+ page_size=page_size,
79
+ sort_by=sort_by,
80
+ sort_direction=sort_direction,
81
+ filter=filter,
55
82
  tags=tags,
56
83
  is_recommended=is_recommended,
57
84
  correlation_id=correlation_id,
@@ -3,8 +3,12 @@ from typing import Any, Dict, List, Literal, Optional, Type, Union, overload
3
3
  from pydantic import BaseModel
4
4
 
5
5
  from ...types._api_version import ApiVersion
6
- from ...types._structured_output import create_schema_system_message, parse_response_to_model
6
+ from ...types._structured_output import (
7
+ create_schema_system_message,
8
+ parse_response_to_model,
9
+ )
7
10
  from ...types.api.pipeline_execution import (
11
+ GetPipelineExecutionResponse,
8
12
  PipelineExecutionAsyncStreamedResponse,
9
13
  PipelineExecutionResponse,
10
14
  TemporaryAssistantAsyncStreamedResponse,
@@ -209,7 +213,9 @@ class AsyncPipelineExecution(BasePipelineExecution):
209
213
  modified_in_memory_messages = in_memory_messages
210
214
  if output_schema is not None:
211
215
  # Create a copy of in_memory_messages if it exists, otherwise create new list
212
- modified_in_memory_messages = list(in_memory_messages) if in_memory_messages else []
216
+ modified_in_memory_messages = (
217
+ list(in_memory_messages) if in_memory_messages else []
218
+ )
213
219
  # Insert schema instruction as first system message
214
220
  schema_message = create_schema_system_message(output_schema)
215
221
  modified_in_memory_messages.insert(0, schema_message)
@@ -246,7 +252,9 @@ class AsyncPipelineExecution(BasePipelineExecution):
246
252
  response = PipelineExecutionResponse(**resp)
247
253
  # Parse response to Pydantic model if output_schema was provided
248
254
  if output_schema is not None and response.result:
249
- response.result = parse_response_to_model(response.result, output_schema)
255
+ response.result = parse_response_to_model(
256
+ response.result, output_schema
257
+ )
250
258
  return response
251
259
 
252
260
  return PipelineExecutionAsyncStreamedResponse(stream=resp)
@@ -440,7 +448,9 @@ class AsyncPipelineExecution(BasePipelineExecution):
440
448
  modified_in_memory_messages = in_memory_messages
441
449
  if output_schema is not None:
442
450
  # Create a copy of in_memory_messages if it exists, otherwise create new list
443
- modified_in_memory_messages = list(in_memory_messages) if in_memory_messages else []
451
+ modified_in_memory_messages = (
452
+ list(in_memory_messages) if in_memory_messages else []
453
+ )
444
454
  # Insert schema instruction as first system message
445
455
  schema_message = create_schema_system_message(output_schema)
446
456
  modified_in_memory_messages.insert(0, schema_message)
@@ -487,5 +497,52 @@ class AsyncPipelineExecution(BasePipelineExecution):
487
497
  response = TemporaryAssistantResponse(**resp)
488
498
  # Parse response to Pydantic model if output_schema was provided
489
499
  if output_schema is not None and response.result:
490
- response.result = parse_response_to_model(str(response.result), output_schema)
500
+ response.result = parse_response_to_model(
501
+ str(response.result), output_schema
502
+ )
491
503
  return response
504
+
505
+ async def get_pipeline_execution(
506
+ self, execution_id: str, correlation_id: Optional[str] = None
507
+ ) -> GetPipelineExecutionResponse:
508
+ """
509
+ Retrieve a pipeline execution result by execution ID asynchronously.
510
+
511
+ This method fetches the details of a specific pipeline execution using its
512
+ unique identifier. The response includes execution logs, step execution records,
513
+ timing information, and any errors that occurred during execution.
514
+
515
+ Args:
516
+ execution_id: The execution id (GUID format)
517
+ correlation_id: Optional correlation ID for request tracing
518
+
519
+ Returns:
520
+ GetPipelineExecutionResponse: Pipeline execution details including logs and step records
521
+
522
+ Raises:
523
+ AiriaAPIError: If the API request fails or execution is not found
524
+ ValueError: If an invalid API version is provided
525
+
526
+ Example:
527
+ ```python
528
+ client = AiriaAsyncClient(api_key="your-api-key")
529
+ execution = await client.pipeline_execution.get_pipeline_execution("execution-id-123")
530
+ print(f"Execution ID: {execution.execution_id}")
531
+ print(f"Success: {execution.log_record_details.success}")
532
+ print(f"Duration: {execution.log_record_details.duration}")
533
+
534
+ # Iterate through step execution logs
535
+ if execution.step_execution_log_records:
536
+ for step in execution.step_execution_log_records:
537
+ print(f"Step: {step.step_title} - Success: {step.success}")
538
+ ```
539
+ """
540
+ request_data = self._pre_get_pipeline_execution(
541
+ execution_id=execution_id,
542
+ correlation_id=correlation_id,
543
+ api_version=ApiVersion.V1.value,
544
+ )
545
+
546
+ response = await self._request_handler.make_request("GET", request_data)
547
+
548
+ return GetPipelineExecutionResponse(**response)
@@ -224,3 +224,42 @@ class BasePipelineExecution:
224
224
  )
225
225
 
226
226
  return request_data
227
+
228
+ def _pre_get_pipeline_execution(
229
+ self,
230
+ execution_id: str,
231
+ correlation_id: Optional[str] = None,
232
+ api_version: str = ApiVersion.V1.value,
233
+ ):
234
+ """
235
+ Prepare request data for retrieving a pipeline execution.
236
+
237
+ This internal method constructs the URL for pipeline execution retrieval
238
+ by ID using the specified API version.
239
+
240
+ Args:
241
+ execution_id: The execution id (GUID format)
242
+ correlation_id: Optional correlation ID for tracing
243
+ api_version: API version to use for the request
244
+
245
+ Returns:
246
+ RequestData: Prepared request data for the pipeline execution endpoint
247
+
248
+ Raises:
249
+ ValueError: If an invalid API version is provided
250
+ """
251
+ if api_version not in ApiVersion.as_list():
252
+ raise ValueError(
253
+ f"Invalid API version: {api_version}. Valid versions are: {', '.join(ApiVersion.as_list())}"
254
+ )
255
+
256
+ url = urljoin(
257
+ self._request_handler.base_url,
258
+ f"{api_version}/PipelineExecution/{execution_id}",
259
+ )
260
+
261
+ request_data = self._request_handler.prepare_request(
262
+ url=url, correlation_id=correlation_id
263
+ )
264
+
265
+ return request_data
@@ -3,8 +3,12 @@ from typing import Any, Dict, List, Literal, Optional, Type, Union, overload
3
3
  from pydantic import BaseModel
4
4
 
5
5
  from ...types._api_version import ApiVersion
6
- from ...types._structured_output import create_schema_system_message, parse_response_to_model
6
+ from ...types._structured_output import (
7
+ create_schema_system_message,
8
+ parse_response_to_model,
9
+ )
7
10
  from ...types.api.pipeline_execution import (
11
+ GetPipelineExecutionResponse,
8
12
  PipelineExecutionResponse,
9
13
  PipelineExecutionStreamedResponse,
10
14
  TemporaryAssistantResponse,
@@ -208,7 +212,9 @@ class PipelineExecution(BasePipelineExecution):
208
212
  modified_in_memory_messages = in_memory_messages
209
213
  if output_schema is not None:
210
214
  # Create a copy of in_memory_messages if it exists, otherwise create new list
211
- modified_in_memory_messages = list(in_memory_messages) if in_memory_messages else []
215
+ modified_in_memory_messages = (
216
+ list(in_memory_messages) if in_memory_messages else []
217
+ )
212
218
  # Insert schema instruction as first system message
213
219
  schema_message = create_schema_system_message(output_schema)
214
220
  modified_in_memory_messages.insert(0, schema_message)
@@ -245,7 +251,9 @@ class PipelineExecution(BasePipelineExecution):
245
251
  response = PipelineExecutionResponse(**resp)
246
252
  # Parse response to Pydantic model if output_schema was provided
247
253
  if output_schema is not None and response.result:
248
- response.result = parse_response_to_model(response.result, output_schema)
254
+ response.result = parse_response_to_model(
255
+ response.result, output_schema
256
+ )
249
257
  return response
250
258
 
251
259
  return PipelineExecutionStreamedResponse(stream=resp)
@@ -440,7 +448,9 @@ class PipelineExecution(BasePipelineExecution):
440
448
  modified_in_memory_messages = in_memory_messages
441
449
  if output_schema is not None:
442
450
  # Create a copy of in_memory_messages if it exists, otherwise create new list
443
- modified_in_memory_messages = list(in_memory_messages) if in_memory_messages else []
451
+ modified_in_memory_messages = (
452
+ list(in_memory_messages) if in_memory_messages else []
453
+ )
444
454
  # Insert schema instruction as first system message
445
455
  schema_message = create_schema_system_message(output_schema)
446
456
  modified_in_memory_messages.insert(0, schema_message)
@@ -448,7 +458,9 @@ class PipelineExecution(BasePipelineExecution):
448
458
  output_configuration = None
449
459
 
450
460
  # Convert UUID objects to strings for API compatibility
451
- conversation_id_str = str(conversation_id) if conversation_id else conversation_id
461
+ conversation_id_str = (
462
+ str(conversation_id) if conversation_id else conversation_id
463
+ )
452
464
  user_id_str = str(user_id) if user_id else user_id
453
465
  user_input_id_str = str(user_input_id) if user_input_id else user_input_id
454
466
 
@@ -492,5 +504,52 @@ class PipelineExecution(BasePipelineExecution):
492
504
  response = TemporaryAssistantResponse(**resp)
493
505
  # Parse response to Pydantic model if output_schema was provided
494
506
  if output_schema is not None and response.result:
495
- response.result = parse_response_to_model(str(response.result), output_schema)
507
+ response.result = parse_response_to_model(
508
+ str(response.result), output_schema
509
+ )
496
510
  return response
511
+
512
+ def get_pipeline_execution(
513
+ self, execution_id: str, correlation_id: Optional[str] = None
514
+ ) -> GetPipelineExecutionResponse:
515
+ """
516
+ Retrieve a pipeline execution result by execution ID.
517
+
518
+ This method fetches the details of a specific pipeline execution using its
519
+ unique identifier. The response includes execution logs, step execution records,
520
+ timing information, and any errors that occurred during execution.
521
+
522
+ Args:
523
+ execution_id: The execution id (GUID format)
524
+ correlation_id: Optional correlation ID for request tracing
525
+
526
+ Returns:
527
+ GetPipelineExecutionResponse: Pipeline execution details including logs and step records
528
+
529
+ Raises:
530
+ AiriaAPIError: If the API request fails or execution is not found
531
+ ValueError: If an invalid API version is provided
532
+
533
+ Example:
534
+ ```python
535
+ client = AiriaClient(api_key="your-api-key")
536
+ execution = client.pipeline_execution.get_pipeline_execution("execution-id-123")
537
+ print(f"Execution ID: {execution.execution_id}")
538
+ print(f"Success: {execution.log_record_details.success}")
539
+ print(f"Duration: {execution.log_record_details.duration}")
540
+
541
+ # Iterate through step execution logs
542
+ if execution.step_execution_log_records:
543
+ for step in execution.step_execution_log_records:
544
+ print(f"Step: {step.step_title} - Success: {step.success}")
545
+ ```
546
+ """
547
+ request_data = self._pre_get_pipeline_execution(
548
+ execution_id=execution_id,
549
+ correlation_id=correlation_id,
550
+ api_version=ApiVersion.V1.value,
551
+ )
552
+
553
+ response = self._request_handler.make_request("GET", request_data)
554
+
555
+ return GetPipelineExecutionResponse(**response)
@@ -1,4 +1,4 @@
1
- from typing import Optional
1
+ from typing import Literal, Optional
2
2
 
3
3
  from ...types._api_version import ApiVersion
4
4
  from ...types.api.pipelines_config import (
@@ -126,20 +126,34 @@ class AsyncPipelinesConfig(BasePipelinesConfig):
126
126
  return ExportPipelineDefinitionResponse(**resp)
127
127
 
128
128
  async def get_pipelines_config(
129
- self, project_id: Optional[str] = None, correlation_id: Optional[str] = None
129
+ self,
130
+ page_number: Optional[int] = None,
131
+ page_size: Optional[int] = None,
132
+ sort_by: Optional[str] = None,
133
+ sort_direction: Optional[Literal["ASC", "DESC"]] = None,
134
+ filter: Optional[str] = None,
135
+ project_id: Optional[str] = None,
136
+ model_credential_source_type: Optional[Literal["UserProvided", "Library"]] = None,
137
+ correlation_id: Optional[str] = None,
130
138
  ) -> GetPipelinesConfigResponse:
131
139
  """
132
- Retrieve a list of pipeline configurations, optionally filtered by project ID.
140
+ Retrieve a list of pipeline configurations with optional filtering, pagination, and sorting.
133
141
 
134
142
  This method fetches a list of pipeline configurations including their
135
143
  deployment details, execution statistics, version information, and metadata.
136
- The results can be filtered by project ID to retrieve only pipelines
137
- belonging to a specific project.
144
+ The results can be filtered, paginated, and sorted using various parameters.
138
145
 
139
146
  Args:
147
+ page_number (int, optional): The page number to be fetched.
148
+ page_size (int, optional): The number of items per page.
149
+ sort_by (str, optional): Property to sort by.
150
+ sort_direction (str, optional): The direction of the sort, either "ASC" for ascending or "DESC" for descending.
151
+ filter (str, optional): The search filter.
140
152
  project_id (str, optional): The unique identifier of the project to filter
141
153
  pipelines by. If not provided, pipelines from all accessible projects
142
154
  will be returned.
155
+ model_credential_source_type (str, optional): Optional filter to return only pipelines
156
+ using models with specified source type ("UserProvided" or "Library").
143
157
  correlation_id (str, optional): A unique identifier for request tracing
144
158
  and logging. If not provided, one will be automatically generated.
145
159
 
@@ -168,9 +182,19 @@ class AsyncPipelinesConfig(BasePipelinesConfig):
168
182
  if pipeline.execution_stats:
169
183
  print(f"Success count: {pipeline.execution_stats.success_count}")
170
184
 
171
- # Get pipelines for a specific project
185
+ # Get pipelines with pagination and sorting
186
+ pipelines = await client.pipelines_config.get_pipelines_config(
187
+ page_number=1,
188
+ page_size=20,
189
+ sort_by="name",
190
+ sort_direction="ASC",
191
+ filter="classification"
192
+ )
193
+
194
+ # Get pipelines for a specific project with model source type filter
172
195
  project_pipelines = await client.pipelines_config.get_pipelines_config(
173
- project_id="your_project_id"
196
+ project_id="your_project_id",
197
+ model_credential_source_type="UserProvided"
174
198
  )
175
199
  print(f"Project pipelines: {project_pipelines.total_count}")
176
200
  ```
@@ -181,7 +205,13 @@ class AsyncPipelinesConfig(BasePipelinesConfig):
181
205
  pipeline identifier.
182
206
  """
183
207
  request_data = self._pre_get_pipelines_config(
208
+ page_number=page_number,
209
+ page_size=page_size,
210
+ sort_by=sort_by,
211
+ sort_direction=sort_direction,
212
+ filter=filter,
184
213
  project_id=project_id,
214
+ model_credential_source_type=model_credential_source_type,
185
215
  correlation_id=correlation_id,
186
216
  api_version=ApiVersion.V1.value,
187
217
  )