airia 0.1.23__tar.gz → 0.1.25__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {airia-0.1.23 → airia-0.1.25}/PKG-INFO +1 -1
- {airia-0.1.23 → airia-0.1.25}/airia/client/async_client.py +2 -0
- airia-0.1.25/airia/client/library/__init__.py +5 -0
- airia-0.1.25/airia/client/library/async_library.py +100 -0
- airia-0.1.25/airia/client/library/base_library.py +110 -0
- airia-0.1.25/airia/client/library/sync_library.py +100 -0
- {airia-0.1.23 → airia-0.1.25}/airia/client/pipeline_execution/async_pipeline_execution.py +207 -29
- airia-0.1.25/airia/client/pipeline_execution/base_pipeline_execution.py +223 -0
- airia-0.1.25/airia/client/pipeline_execution/sync_pipeline_execution.py +419 -0
- {airia-0.1.23 → airia-0.1.25}/airia/client/sync_client.py +2 -0
- airia-0.1.25/airia/types/api/library/__init__.py +11 -0
- airia-0.1.25/airia/types/api/library/_library_models.py +218 -0
- airia-0.1.25/airia/types/api/pipeline_execution/__init__.py +23 -0
- airia-0.1.25/airia/types/api/pipeline_execution/_pipeline_execution.py +175 -0
- {airia-0.1.23 → airia-0.1.25}/airia/types/api/pipelines_config/get_pipeline_config.py +1 -1
- {airia-0.1.23 → airia-0.1.25}/airia.egg-info/PKG-INFO +1 -1
- {airia-0.1.23 → airia-0.1.25}/airia.egg-info/SOURCES.txt +6 -0
- {airia-0.1.23 → airia-0.1.25}/pyproject.toml +1 -1
- airia-0.1.23/airia/client/pipeline_execution/base_pipeline_execution.py +0 -114
- airia-0.1.23/airia/client/pipeline_execution/sync_pipeline_execution.py +0 -240
- airia-0.1.23/airia/types/api/pipeline_execution/__init__.py +0 -13
- airia-0.1.23/airia/types/api/pipeline_execution/_pipeline_execution.py +0 -76
- {airia-0.1.23 → airia-0.1.25}/LICENSE +0 -0
- {airia-0.1.23 → airia-0.1.25}/README.md +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/__init__.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/client/__init__.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/client/_request_handler/__init__.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/client/_request_handler/async_request_handler.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/client/_request_handler/base_request_handler.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/client/_request_handler/sync_request_handler.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/client/attachments/__init__.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/client/attachments/async_attachments.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/client/attachments/base_attachments.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/client/attachments/sync_attachments.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/client/base_client.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/client/conversations/__init__.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/client/conversations/async_conversations.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/client/conversations/base_conversations.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/client/conversations/sync_conversations.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/client/data_vector_search/__init__.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/client/data_vector_search/async_data_vector_search.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/client/data_vector_search/base_data_vector_search.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/client/data_vector_search/sync_data_vector_search.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/client/deployments/__init__.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/client/deployments/async_deployments.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/client/deployments/base_deployments.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/client/deployments/sync_deployments.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/client/pipeline_execution/__init__.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/client/pipelines_config/__init__.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/client/pipelines_config/async_pipelines_config.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/client/pipelines_config/base_pipelines_config.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/client/pipelines_config/sync_pipelines_config.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/client/project/__init__.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/client/project/async_project.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/client/project/base_project.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/client/project/sync_project.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/client/store/__init__.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/client/store/async_store.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/client/store/base_store.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/client/store/sync_store.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/constants.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/exceptions.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/logs.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/types/__init__.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/types/_api_version.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/types/_request_data.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/types/api/__init__.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/types/api/attachments/__init__.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/types/api/attachments/upload_file.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/types/api/conversations/__init__.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/types/api/conversations/_conversations.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/types/api/data_vector_search/__init__.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/types/api/data_vector_search/get_file_chunks.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/types/api/deployments/__init__.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/types/api/deployments/get_deployment.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/types/api/deployments/get_deployments.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/types/api/pipelines_config/__init__.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/types/api/pipelines_config/export_pipeline_definition.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/types/api/pipelines_config/get_pipelines_config.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/types/api/project/__init__.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/types/api/project/get_projects.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/types/api/store/__init__.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/types/api/store/get_file.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/types/api/store/get_files.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/types/sse/__init__.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/types/sse/sse_messages.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia/utils/sse_parser.py +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia.egg-info/dependency_links.txt +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia.egg-info/requires.txt +0 -0
- {airia-0.1.23 → airia-0.1.25}/airia.egg-info/top_level.txt +0 -0
- {airia-0.1.23 → airia-0.1.25}/setup.cfg +0 -0
|
@@ -14,6 +14,7 @@ from .base_client import AiriaBaseClient
|
|
|
14
14
|
from .conversations import AsyncConversations
|
|
15
15
|
from .data_vector_search import AsyncDataVectorSearch
|
|
16
16
|
from .deployments import AsyncDeployments
|
|
17
|
+
from .library import AsyncLibrary
|
|
17
18
|
from .pipeline_execution import AsyncPipelineExecution
|
|
18
19
|
from .pipelines_config import AsyncPipelinesConfig
|
|
19
20
|
from .project import AsyncProject
|
|
@@ -68,6 +69,7 @@ class AiriaAsyncClient(AiriaBaseClient):
|
|
|
68
69
|
self.store = AsyncStore(self._request_handler)
|
|
69
70
|
self.deployments = AsyncDeployments(self._request_handler)
|
|
70
71
|
self.data_vector_search = AsyncDataVectorSearch(self._request_handler)
|
|
72
|
+
self.library = AsyncLibrary(self._request_handler)
|
|
71
73
|
|
|
72
74
|
@classmethod
|
|
73
75
|
def with_openai_gateway(
|
|
@@ -0,0 +1,100 @@
|
|
|
1
|
+
from typing import Optional
|
|
2
|
+
|
|
3
|
+
from ...types.api.library import GetLibraryModelsResponse
|
|
4
|
+
from .._request_handler import AsyncRequestHandler
|
|
5
|
+
from .base_library import BaseLibrary
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class AsyncLibrary(BaseLibrary):
|
|
9
|
+
"""Asynchronous library client for interacting with Airia Library endpoints."""
|
|
10
|
+
|
|
11
|
+
def __init__(self, request_handler: AsyncRequestHandler):
|
|
12
|
+
super().__init__(request_handler)
|
|
13
|
+
|
|
14
|
+
async def get_models(
|
|
15
|
+
self,
|
|
16
|
+
page_number: Optional[int] = None,
|
|
17
|
+
page_size: Optional[int] = None,
|
|
18
|
+
sort_by: Optional[str] = None,
|
|
19
|
+
sort_direction: Optional[str] = None,
|
|
20
|
+
search: Optional[str] = None,
|
|
21
|
+
providers: Optional[str] = None,
|
|
22
|
+
categories: Optional[str] = None,
|
|
23
|
+
licenses: Optional[str] = None,
|
|
24
|
+
industries: Optional[str] = None,
|
|
25
|
+
authors: Optional[str] = None,
|
|
26
|
+
is_open_source: Optional[bool] = None,
|
|
27
|
+
chat_specialized: Optional[bool] = None,
|
|
28
|
+
industry: Optional[str] = None,
|
|
29
|
+
commercial_use: Optional[bool] = None,
|
|
30
|
+
certifications: Optional[str] = None,
|
|
31
|
+
has_tool_support: Optional[bool] = None,
|
|
32
|
+
has_stream_support: Optional[bool] = None,
|
|
33
|
+
correlation_id: Optional[str] = None,
|
|
34
|
+
) -> GetLibraryModelsResponse:
|
|
35
|
+
"""
|
|
36
|
+
Asynchronously retrieve models from the Airia Library with optional filtering and pagination.
|
|
37
|
+
|
|
38
|
+
Args:
|
|
39
|
+
page_number: The page number to be fetched
|
|
40
|
+
page_size: The number of items per page
|
|
41
|
+
sort_by: Property to sort by
|
|
42
|
+
sort_direction: Direction of the sort, either "ASC" for ascending or "DESC" for descending
|
|
43
|
+
search: An optional search string
|
|
44
|
+
providers: Library service provider type filter
|
|
45
|
+
categories: Library service model category filter
|
|
46
|
+
licenses: Library service model license filter
|
|
47
|
+
industries: Optional list of industries to filter by
|
|
48
|
+
authors: Optional list of authors to filter by
|
|
49
|
+
is_open_source: Optional flag to filter by open source status
|
|
50
|
+
chat_specialized: Optional flag to filter by chat specialized status
|
|
51
|
+
industry: Optional flag to filter by Industry
|
|
52
|
+
commercial_use: Optional flag to filter by Commercial Use
|
|
53
|
+
certifications: Optional list of certifications to filter by
|
|
54
|
+
has_tool_support: Optional flag to filter by the models support for tools
|
|
55
|
+
has_stream_support: Optional flag to filter by the models support for response streaming
|
|
56
|
+
correlation_id: Optional correlation ID for request tracing. If not provided,
|
|
57
|
+
one will be generated automatically.
|
|
58
|
+
|
|
59
|
+
Returns:
|
|
60
|
+
GetLibraryModelsResponse: Response containing the list of models and total count
|
|
61
|
+
|
|
62
|
+
Raises:
|
|
63
|
+
AiriaAPIError: If the API request fails with details about the error.
|
|
64
|
+
aiohttp.ClientError: For other request-related errors.
|
|
65
|
+
|
|
66
|
+
Example:
|
|
67
|
+
```python
|
|
68
|
+
client = AiriaAsyncClient(api_key="your_api_key")
|
|
69
|
+
response = await client.library.get_models(
|
|
70
|
+
search="gpt",
|
|
71
|
+
providers="OpenAI",
|
|
72
|
+
page_size=10
|
|
73
|
+
)
|
|
74
|
+
for model in response.models:
|
|
75
|
+
print(f"Model: {model.name} (ID: {model.id})")
|
|
76
|
+
```
|
|
77
|
+
"""
|
|
78
|
+
request_data = self._prepare_get_models_request(
|
|
79
|
+
page_number=page_number,
|
|
80
|
+
page_size=page_size,
|
|
81
|
+
sort_by=sort_by,
|
|
82
|
+
sort_direction=sort_direction,
|
|
83
|
+
search=search,
|
|
84
|
+
providers=providers,
|
|
85
|
+
categories=categories,
|
|
86
|
+
licenses=licenses,
|
|
87
|
+
industries=industries,
|
|
88
|
+
authors=authors,
|
|
89
|
+
is_open_source=is_open_source,
|
|
90
|
+
chat_specialized=chat_specialized,
|
|
91
|
+
industry=industry,
|
|
92
|
+
commercial_use=commercial_use,
|
|
93
|
+
certifications=certifications,
|
|
94
|
+
has_tool_support=has_tool_support,
|
|
95
|
+
has_stream_support=has_stream_support,
|
|
96
|
+
correlation_id=correlation_id,
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
resp = await self._request_handler.make_request("GET", request_data)
|
|
100
|
+
return GetLibraryModelsResponse(**resp)
|
|
@@ -0,0 +1,110 @@
|
|
|
1
|
+
from typing import Optional, Union
|
|
2
|
+
from urllib.parse import urljoin
|
|
3
|
+
|
|
4
|
+
from ...types._api_version import ApiVersion
|
|
5
|
+
from .._request_handler import AsyncRequestHandler, RequestHandler
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class BaseLibrary:
|
|
9
|
+
"""Base library client with common functionality for sync and async implementations."""
|
|
10
|
+
|
|
11
|
+
def __init__(self, request_handler: Union[RequestHandler, AsyncRequestHandler]):
|
|
12
|
+
self._request_handler = request_handler
|
|
13
|
+
|
|
14
|
+
def _prepare_get_models_request(
|
|
15
|
+
self,
|
|
16
|
+
page_number: Optional[int] = None,
|
|
17
|
+
page_size: Optional[int] = None,
|
|
18
|
+
sort_by: Optional[str] = None,
|
|
19
|
+
sort_direction: Optional[str] = None,
|
|
20
|
+
search: Optional[str] = None,
|
|
21
|
+
providers: Optional[str] = None,
|
|
22
|
+
categories: Optional[str] = None,
|
|
23
|
+
licenses: Optional[str] = None,
|
|
24
|
+
industries: Optional[str] = None,
|
|
25
|
+
authors: Optional[str] = None,
|
|
26
|
+
is_open_source: Optional[bool] = None,
|
|
27
|
+
chat_specialized: Optional[bool] = None,
|
|
28
|
+
industry: Optional[str] = None,
|
|
29
|
+
commercial_use: Optional[bool] = None,
|
|
30
|
+
certifications: Optional[str] = None,
|
|
31
|
+
has_tool_support: Optional[bool] = None,
|
|
32
|
+
has_stream_support: Optional[bool] = None,
|
|
33
|
+
correlation_id: Optional[str] = None,
|
|
34
|
+
):
|
|
35
|
+
"""
|
|
36
|
+
Prepare request data for the get_models endpoint.
|
|
37
|
+
|
|
38
|
+
This internal method constructs the URL and parameters for library models
|
|
39
|
+
requests, validating parameters and preparing all request components.
|
|
40
|
+
|
|
41
|
+
Args:
|
|
42
|
+
page_number: The page number to be fetched
|
|
43
|
+
page_size: The number of items per page
|
|
44
|
+
sort_by: Property to sort by
|
|
45
|
+
sort_direction: Direction of the sort, either "ASC" for ascending or "DESC" for descending
|
|
46
|
+
search: An optional search string
|
|
47
|
+
providers: Library service provider type filter
|
|
48
|
+
categories: Library service model category filter
|
|
49
|
+
licenses: Library service model license filter
|
|
50
|
+
industries: Optional list of industries to filter by
|
|
51
|
+
authors: Optional list of authors to filter by
|
|
52
|
+
is_open_source: Optional flag to filter by open source status
|
|
53
|
+
chat_specialized: Optional flag to filter by chat specialized status
|
|
54
|
+
industry: Optional flag to filter by Industry
|
|
55
|
+
commercial_use: Optional flag to filter by Commercial Use
|
|
56
|
+
certifications: Optional list of certifications to filter by
|
|
57
|
+
has_tool_support: Optional flag to filter by the models support for tools
|
|
58
|
+
has_stream_support: Optional flag to filter by the models support for response streaming
|
|
59
|
+
correlation_id: Optional correlation ID for request tracing
|
|
60
|
+
|
|
61
|
+
Returns:
|
|
62
|
+
RequestData: Prepared request data for the library models endpoint
|
|
63
|
+
"""
|
|
64
|
+
url = urljoin(
|
|
65
|
+
self._request_handler.base_url,
|
|
66
|
+
f"api/marketplace/{ApiVersion.V1.value}/Library/models",
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
# Build query parameters, excluding None values
|
|
70
|
+
params = {}
|
|
71
|
+
if page_number is not None:
|
|
72
|
+
params["PageNumber"] = page_number
|
|
73
|
+
if page_size is not None:
|
|
74
|
+
params["PageSize"] = page_size
|
|
75
|
+
if sort_by is not None:
|
|
76
|
+
params["SortBy"] = sort_by
|
|
77
|
+
if sort_direction is not None:
|
|
78
|
+
params["SortDirection"] = sort_direction
|
|
79
|
+
if search is not None:
|
|
80
|
+
params["search"] = search
|
|
81
|
+
if providers is not None:
|
|
82
|
+
params["Providers"] = providers
|
|
83
|
+
if categories is not None:
|
|
84
|
+
params["Categories"] = categories
|
|
85
|
+
if licenses is not None:
|
|
86
|
+
params["Licenses"] = licenses
|
|
87
|
+
if industries is not None:
|
|
88
|
+
params["Industries"] = industries
|
|
89
|
+
if authors is not None:
|
|
90
|
+
params["Authors"] = authors
|
|
91
|
+
if is_open_source is not None:
|
|
92
|
+
params["IsOpenSource"] = is_open_source
|
|
93
|
+
if chat_specialized is not None:
|
|
94
|
+
params["ChatSpecialized"] = chat_specialized
|
|
95
|
+
if industry is not None:
|
|
96
|
+
params["Industry"] = industry
|
|
97
|
+
if commercial_use is not None:
|
|
98
|
+
params["CommercialUse"] = commercial_use
|
|
99
|
+
if certifications is not None:
|
|
100
|
+
params["Certifications"] = certifications
|
|
101
|
+
if has_tool_support is not None:
|
|
102
|
+
params["HasToolSupport"] = has_tool_support
|
|
103
|
+
if has_stream_support is not None:
|
|
104
|
+
params["HasStreamSupport"] = has_stream_support
|
|
105
|
+
|
|
106
|
+
request_data = self._request_handler.prepare_request(
|
|
107
|
+
url=url, params=params, correlation_id=correlation_id
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
return request_data
|
|
@@ -0,0 +1,100 @@
|
|
|
1
|
+
from typing import Optional
|
|
2
|
+
|
|
3
|
+
from ...types.api.library import GetLibraryModelsResponse
|
|
4
|
+
from .._request_handler import RequestHandler
|
|
5
|
+
from .base_library import BaseLibrary
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class Library(BaseLibrary):
|
|
9
|
+
"""Synchronous library client for interacting with Airia Library endpoints."""
|
|
10
|
+
|
|
11
|
+
def __init__(self, request_handler: RequestHandler):
|
|
12
|
+
super().__init__(request_handler)
|
|
13
|
+
|
|
14
|
+
def get_models(
|
|
15
|
+
self,
|
|
16
|
+
page_number: Optional[int] = None,
|
|
17
|
+
page_size: Optional[int] = None,
|
|
18
|
+
sort_by: Optional[str] = None,
|
|
19
|
+
sort_direction: Optional[str] = None,
|
|
20
|
+
search: Optional[str] = None,
|
|
21
|
+
providers: Optional[str] = None,
|
|
22
|
+
categories: Optional[str] = None,
|
|
23
|
+
licenses: Optional[str] = None,
|
|
24
|
+
industries: Optional[str] = None,
|
|
25
|
+
authors: Optional[str] = None,
|
|
26
|
+
is_open_source: Optional[bool] = None,
|
|
27
|
+
chat_specialized: Optional[bool] = None,
|
|
28
|
+
industry: Optional[str] = None,
|
|
29
|
+
commercial_use: Optional[bool] = None,
|
|
30
|
+
certifications: Optional[str] = None,
|
|
31
|
+
has_tool_support: Optional[bool] = None,
|
|
32
|
+
has_stream_support: Optional[bool] = None,
|
|
33
|
+
correlation_id: Optional[str] = None,
|
|
34
|
+
) -> GetLibraryModelsResponse:
|
|
35
|
+
"""
|
|
36
|
+
Retrieve models from the Airia Library with optional filtering and pagination.
|
|
37
|
+
|
|
38
|
+
Args:
|
|
39
|
+
page_number: The page number to be fetched
|
|
40
|
+
page_size: The number of items per page
|
|
41
|
+
sort_by: Property to sort by
|
|
42
|
+
sort_direction: Direction of the sort, either "ASC" for ascending or "DESC" for descending
|
|
43
|
+
search: An optional search string
|
|
44
|
+
providers: Library service provider type filter
|
|
45
|
+
categories: Library service model category filter
|
|
46
|
+
licenses: Library service model license filter
|
|
47
|
+
industries: Optional list of industries to filter by
|
|
48
|
+
authors: Optional list of authors to filter by
|
|
49
|
+
is_open_source: Optional flag to filter by open source status
|
|
50
|
+
chat_specialized: Optional flag to filter by chat specialized status
|
|
51
|
+
industry: Optional flag to filter by Industry
|
|
52
|
+
commercial_use: Optional flag to filter by Commercial Use
|
|
53
|
+
certifications: Optional list of certifications to filter by
|
|
54
|
+
has_tool_support: Optional flag to filter by the models support for tools
|
|
55
|
+
has_stream_support: Optional flag to filter by the models support for response streaming
|
|
56
|
+
correlation_id: Optional correlation ID for request tracing. If not provided,
|
|
57
|
+
one will be generated automatically.
|
|
58
|
+
|
|
59
|
+
Returns:
|
|
60
|
+
GetLibraryModelsResponse: Response containing the list of models and total count
|
|
61
|
+
|
|
62
|
+
Raises:
|
|
63
|
+
AiriaAPIError: If the API request fails with details about the error.
|
|
64
|
+
requests.RequestException: For other request-related errors.
|
|
65
|
+
|
|
66
|
+
Example:
|
|
67
|
+
```python
|
|
68
|
+
client = AiriaClient(api_key="your_api_key")
|
|
69
|
+
response = client.library.get_models(
|
|
70
|
+
search="gpt",
|
|
71
|
+
providers="OpenAI",
|
|
72
|
+
page_size=10
|
|
73
|
+
)
|
|
74
|
+
for model in response.models:
|
|
75
|
+
print(f"Model: {model.name} (ID: {model.id})")
|
|
76
|
+
```
|
|
77
|
+
"""
|
|
78
|
+
request_data = self._prepare_get_models_request(
|
|
79
|
+
page_number=page_number,
|
|
80
|
+
page_size=page_size,
|
|
81
|
+
sort_by=sort_by,
|
|
82
|
+
sort_direction=sort_direction,
|
|
83
|
+
search=search,
|
|
84
|
+
providers=providers,
|
|
85
|
+
categories=categories,
|
|
86
|
+
licenses=licenses,
|
|
87
|
+
industries=industries,
|
|
88
|
+
authors=authors,
|
|
89
|
+
is_open_source=is_open_source,
|
|
90
|
+
chat_specialized=chat_specialized,
|
|
91
|
+
industry=industry,
|
|
92
|
+
commercial_use=commercial_use,
|
|
93
|
+
certifications=certifications,
|
|
94
|
+
has_tool_support=has_tool_support,
|
|
95
|
+
has_stream_support=has_stream_support,
|
|
96
|
+
correlation_id=correlation_id,
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
resp = self._request_handler.make_request("GET", request_data)
|
|
100
|
+
return GetLibraryModelsResponse(**resp)
|
|
@@ -3,8 +3,9 @@ from typing import Any, Dict, List, Literal, Optional, Union, overload
|
|
|
3
3
|
from ...types._api_version import ApiVersion
|
|
4
4
|
from ...types.api.pipeline_execution import (
|
|
5
5
|
PipelineExecutionAsyncStreamedResponse,
|
|
6
|
-
PipelineExecutionDebugResponse,
|
|
7
6
|
PipelineExecutionResponse,
|
|
7
|
+
TemporaryAssistantAsyncStreamedResponse,
|
|
8
|
+
TemporaryAssistantResponse,
|
|
8
9
|
)
|
|
9
10
|
from .._request_handler import AsyncRequestHandler
|
|
10
11
|
from .base_pipeline_execution import BasePipelineExecution
|
|
@@ -64,7 +65,7 @@ class AsyncPipelineExecution(BasePipelineExecution):
|
|
|
64
65
|
self,
|
|
65
66
|
pipeline_id: str,
|
|
66
67
|
user_input: str,
|
|
67
|
-
debug:
|
|
68
|
+
debug: bool = False,
|
|
68
69
|
user_id: Optional[str] = None,
|
|
69
70
|
conversation_id: Optional[str] = None,
|
|
70
71
|
async_output: Literal[False] = False,
|
|
@@ -82,29 +83,6 @@ class AsyncPipelineExecution(BasePipelineExecution):
|
|
|
82
83
|
correlation_id: Optional[str] = None,
|
|
83
84
|
) -> PipelineExecutionResponse: ...
|
|
84
85
|
|
|
85
|
-
@overload
|
|
86
|
-
async def execute_pipeline(
|
|
87
|
-
self,
|
|
88
|
-
pipeline_id: str,
|
|
89
|
-
user_input: str,
|
|
90
|
-
debug: Literal[True] = True,
|
|
91
|
-
user_id: Optional[str] = None,
|
|
92
|
-
conversation_id: Optional[str] = None,
|
|
93
|
-
async_output: Literal[False] = False,
|
|
94
|
-
include_tools_response: bool = False,
|
|
95
|
-
images: Optional[List[str]] = None,
|
|
96
|
-
files: Optional[List[str]] = None,
|
|
97
|
-
data_source_folders: Optional[Dict[str, Any]] = None,
|
|
98
|
-
data_source_files: Optional[Dict[str, Any]] = None,
|
|
99
|
-
in_memory_messages: Optional[List[Dict[str, str]]] = None,
|
|
100
|
-
current_date_time: Optional[str] = None,
|
|
101
|
-
save_history: bool = True,
|
|
102
|
-
additional_info: Optional[List[Any]] = None,
|
|
103
|
-
prompt_variables: Optional[Dict[str, Any]] = None,
|
|
104
|
-
voice_enabled: bool = False,
|
|
105
|
-
correlation_id: Optional[str] = None,
|
|
106
|
-
) -> PipelineExecutionDebugResponse: ...
|
|
107
|
-
|
|
108
86
|
@overload
|
|
109
87
|
async def execute_pipeline(
|
|
110
88
|
self,
|
|
@@ -149,7 +127,6 @@ class AsyncPipelineExecution(BasePipelineExecution):
|
|
|
149
127
|
voice_enabled: bool = False,
|
|
150
128
|
correlation_id: Optional[str] = None,
|
|
151
129
|
) -> Union[
|
|
152
|
-
PipelineExecutionDebugResponse,
|
|
153
130
|
PipelineExecutionResponse,
|
|
154
131
|
PipelineExecutionAsyncStreamedResponse,
|
|
155
132
|
]:
|
|
@@ -233,8 +210,209 @@ class AsyncPipelineExecution(BasePipelineExecution):
|
|
|
233
210
|
)
|
|
234
211
|
|
|
235
212
|
if not async_output:
|
|
236
|
-
|
|
237
|
-
return PipelineExecutionResponse(**resp)
|
|
238
|
-
return PipelineExecutionDebugResponse(**resp)
|
|
213
|
+
return PipelineExecutionResponse(**resp)
|
|
239
214
|
|
|
240
215
|
return PipelineExecutionAsyncStreamedResponse(stream=resp)
|
|
216
|
+
|
|
217
|
+
@overload
|
|
218
|
+
async def execute_temporary_assistant(
|
|
219
|
+
self,
|
|
220
|
+
model_parameters: Dict[str, Any],
|
|
221
|
+
user_input: str,
|
|
222
|
+
assistant_name: str = "",
|
|
223
|
+
prompt_parameters: Dict[str, Any] = {"prompt": ""},
|
|
224
|
+
async_output: Literal[False] = False,
|
|
225
|
+
include_tools_response: bool = False,
|
|
226
|
+
save_history: bool = True,
|
|
227
|
+
voice_enabled: bool = False,
|
|
228
|
+
debug: bool = False,
|
|
229
|
+
additional_info: Optional[List[Any]] = None,
|
|
230
|
+
conversation_id: Optional[str] = None,
|
|
231
|
+
current_date_time: Optional[str] = None,
|
|
232
|
+
data_source_files: Optional[Dict[str, List[str]]] = None,
|
|
233
|
+
data_source_folders: Optional[Dict[str, List[str]]] = None,
|
|
234
|
+
data_store_parameters: Optional[Dict[str, Any]] = None,
|
|
235
|
+
external_user_id: Optional[str] = None,
|
|
236
|
+
files: Optional[List[str]] = None,
|
|
237
|
+
images: Optional[List[str]] = None,
|
|
238
|
+
in_memory_messages: Optional[List[Dict[str, Any]]] = None,
|
|
239
|
+
output_configuration: Optional[Dict[str, Any]] = None,
|
|
240
|
+
prompt_variables: Optional[Dict[str, Any]] = None,
|
|
241
|
+
user_id: Optional[str] = None,
|
|
242
|
+
user_input_id: Optional[str] = None,
|
|
243
|
+
variables: Optional[Dict[str, Any]] = None,
|
|
244
|
+
correlation_id: Optional[str] = None,
|
|
245
|
+
) -> TemporaryAssistantResponse: ...
|
|
246
|
+
|
|
247
|
+
@overload
|
|
248
|
+
async def execute_temporary_assistant(
|
|
249
|
+
self,
|
|
250
|
+
model_parameters: Dict[str, Any],
|
|
251
|
+
user_input: str,
|
|
252
|
+
assistant_name: str = "",
|
|
253
|
+
prompt_parameters: Dict[str, Any] = {"prompt": ""},
|
|
254
|
+
async_output: Literal[True] = True,
|
|
255
|
+
include_tools_response: bool = False,
|
|
256
|
+
save_history: bool = True,
|
|
257
|
+
voice_enabled: bool = False,
|
|
258
|
+
debug: bool = False,
|
|
259
|
+
additional_info: Optional[List[Any]] = None,
|
|
260
|
+
conversation_id: Optional[str] = None,
|
|
261
|
+
current_date_time: Optional[str] = None,
|
|
262
|
+
data_source_files: Optional[Dict[str, List[str]]] = None,
|
|
263
|
+
data_source_folders: Optional[Dict[str, List[str]]] = None,
|
|
264
|
+
data_store_parameters: Optional[Dict[str, Any]] = None,
|
|
265
|
+
external_user_id: Optional[str] = None,
|
|
266
|
+
files: Optional[List[str]] = None,
|
|
267
|
+
images: Optional[List[str]] = None,
|
|
268
|
+
in_memory_messages: Optional[List[Dict[str, Any]]] = None,
|
|
269
|
+
output_configuration: Optional[Dict[str, Any]] = None,
|
|
270
|
+
prompt_variables: Optional[Dict[str, Any]] = None,
|
|
271
|
+
user_id: Optional[str] = None,
|
|
272
|
+
user_input_id: Optional[str] = None,
|
|
273
|
+
variables: Optional[Dict[str, Any]] = None,
|
|
274
|
+
correlation_id: Optional[str] = None,
|
|
275
|
+
) -> TemporaryAssistantAsyncStreamedResponse: ...
|
|
276
|
+
|
|
277
|
+
async def execute_temporary_assistant(
|
|
278
|
+
self,
|
|
279
|
+
model_parameters: Dict[str, Any],
|
|
280
|
+
user_input: str,
|
|
281
|
+
assistant_name: str = "",
|
|
282
|
+
prompt_parameters: Dict[str, Any] = {"prompt": ""},
|
|
283
|
+
async_output: bool = False,
|
|
284
|
+
include_tools_response: bool = False,
|
|
285
|
+
save_history: bool = True,
|
|
286
|
+
voice_enabled: bool = False,
|
|
287
|
+
debug: bool = False,
|
|
288
|
+
additional_info: Optional[List[Any]] = None,
|
|
289
|
+
conversation_id: Optional[str] = None,
|
|
290
|
+
current_date_time: Optional[str] = None,
|
|
291
|
+
data_source_files: Optional[Dict[str, List[str]]] = None,
|
|
292
|
+
data_source_folders: Optional[Dict[str, List[str]]] = None,
|
|
293
|
+
data_store_parameters: Optional[Dict[str, Any]] = None,
|
|
294
|
+
external_user_id: Optional[str] = None,
|
|
295
|
+
files: Optional[List[str]] = None,
|
|
296
|
+
images: Optional[List[str]] = None,
|
|
297
|
+
in_memory_messages: Optional[List[Dict[str, Any]]] = None,
|
|
298
|
+
output_configuration: Optional[Dict[str, Any]] = None,
|
|
299
|
+
prompt_variables: Optional[Dict[str, Any]] = None,
|
|
300
|
+
user_id: Optional[str] = None,
|
|
301
|
+
user_input_id: Optional[str] = None,
|
|
302
|
+
variables: Optional[Dict[str, Any]] = None,
|
|
303
|
+
correlation_id: Optional[str] = None,
|
|
304
|
+
) -> Union[
|
|
305
|
+
TemporaryAssistantResponse,
|
|
306
|
+
TemporaryAssistantAsyncStreamedResponse,
|
|
307
|
+
]:
|
|
308
|
+
"""
|
|
309
|
+
Execute a temporary assistant with the provided parameters asynchronously.
|
|
310
|
+
|
|
311
|
+
This method creates and executes a temporary AI assistant with custom configuration,
|
|
312
|
+
allowing for flexible assistant behavior without creating a persistent pipeline.
|
|
313
|
+
|
|
314
|
+
Args:
|
|
315
|
+
model_parameters: Model parameters (required). Must include libraryModelId,
|
|
316
|
+
projectModelId, modelIdentifierType, and modelIsAvailableinProject
|
|
317
|
+
user_input: User input text (required)
|
|
318
|
+
assistant_name: Name of the temporary assistant. Default is ""
|
|
319
|
+
prompt_parameters: Parameters for prompt configuration. Default is {"prompt": ""}
|
|
320
|
+
async_output: Whether to stream the response. Default is False
|
|
321
|
+
include_tools_response: Whether to return initial LLM tool result. Default is False
|
|
322
|
+
save_history: Whether to save input and output to conversation history. Default is True
|
|
323
|
+
voice_enabled: Whether voice output is enabled. Default is False
|
|
324
|
+
debug: Whether debug mode execution is enabled. Default is False
|
|
325
|
+
additional_info: Optional additional information array
|
|
326
|
+
conversation_id: Optional conversation identifier
|
|
327
|
+
current_date_time: Optional current date and time in ISO format
|
|
328
|
+
data_source_files: Optional dictionary mapping data source GUIDs to file GUID arrays
|
|
329
|
+
data_source_folders: Optional dictionary mapping data source GUIDs to folder GUID arrays
|
|
330
|
+
data_store_parameters: Optional DataStore parameters
|
|
331
|
+
external_user_id: Optional external user identifier
|
|
332
|
+
files: Optional list of file identifiers
|
|
333
|
+
images: Optional list of image identifiers
|
|
334
|
+
in_memory_messages: Optional list of in-memory messages
|
|
335
|
+
output_configuration: Optional output configuration
|
|
336
|
+
prompt_variables: Optional prompt variables dictionary
|
|
337
|
+
user_id: Optional user identifier
|
|
338
|
+
user_input_id: Optional unique identifier for user input
|
|
339
|
+
variables: Optional variables dictionary
|
|
340
|
+
correlation_id: Optional correlation ID for request tracing. If not provided,
|
|
341
|
+
one will be generated automatically.
|
|
342
|
+
|
|
343
|
+
Returns:
|
|
344
|
+
Response containing the result of the temporary assistant execution.
|
|
345
|
+
Returns different response types based on the result type discriminator.
|
|
346
|
+
|
|
347
|
+
Raises:
|
|
348
|
+
AiriaAPIError: If the API request fails with details about the error.
|
|
349
|
+
aiohttp.ClientError: For other request-related errors.
|
|
350
|
+
ValueError: If required parameters are missing or invalid.
|
|
351
|
+
|
|
352
|
+
Example:
|
|
353
|
+
```python
|
|
354
|
+
client = AiriaAsyncClient(api_key="your_api_key")
|
|
355
|
+
response = await client.pipeline_execution.execute_temporary_assistant(
|
|
356
|
+
model_parameters={
|
|
357
|
+
"libraryModelId": "library-model-id",
|
|
358
|
+
"projectModelId": None,
|
|
359
|
+
"modelIdentifierType": "Library",
|
|
360
|
+
"modelIsAvailableinProject": True,
|
|
361
|
+
},
|
|
362
|
+
user_input="say double bubble bath ten times fast",
|
|
363
|
+
)
|
|
364
|
+
print(response.result)
|
|
365
|
+
```
|
|
366
|
+
"""
|
|
367
|
+
# Validate required parameters
|
|
368
|
+
if not user_input:
|
|
369
|
+
raise ValueError("user_input cannot be empty")
|
|
370
|
+
|
|
371
|
+
if not model_parameters:
|
|
372
|
+
raise ValueError("model_parameters cannot be empty")
|
|
373
|
+
|
|
374
|
+
# Handle file and image uploads (local files are uploaded, URLs are passed through)
|
|
375
|
+
image_urls = None
|
|
376
|
+
file_urls = None
|
|
377
|
+
|
|
378
|
+
if images or files:
|
|
379
|
+
file_urls, image_urls = await self._upload_files(files or [], images or [])
|
|
380
|
+
|
|
381
|
+
request_data = self._pre_execute_temporary_assistant(
|
|
382
|
+
model_parameters=model_parameters,
|
|
383
|
+
user_input=user_input,
|
|
384
|
+
assistant_name=assistant_name,
|
|
385
|
+
prompt_parameters=prompt_parameters,
|
|
386
|
+
async_output=async_output,
|
|
387
|
+
include_tools_response=include_tools_response,
|
|
388
|
+
save_history=save_history,
|
|
389
|
+
voice_enabled=voice_enabled,
|
|
390
|
+
debug=debug,
|
|
391
|
+
additional_info=additional_info,
|
|
392
|
+
conversation_id=conversation_id,
|
|
393
|
+
current_date_time=current_date_time,
|
|
394
|
+
data_source_files=data_source_files,
|
|
395
|
+
data_source_folders=data_source_folders,
|
|
396
|
+
data_store_parameters=data_store_parameters,
|
|
397
|
+
external_user_id=external_user_id,
|
|
398
|
+
files=file_urls,
|
|
399
|
+
images=image_urls,
|
|
400
|
+
in_memory_messages=in_memory_messages,
|
|
401
|
+
output_configuration=output_configuration,
|
|
402
|
+
prompt_variables=prompt_variables,
|
|
403
|
+
user_id=user_id,
|
|
404
|
+
user_input_id=user_input_id,
|
|
405
|
+
variables=variables,
|
|
406
|
+
correlation_id=correlation_id,
|
|
407
|
+
)
|
|
408
|
+
|
|
409
|
+
resp = (
|
|
410
|
+
self._request_handler.make_request_stream("POST", request_data)
|
|
411
|
+
if async_output
|
|
412
|
+
else await self._request_handler.make_request("POST", request_data)
|
|
413
|
+
)
|
|
414
|
+
|
|
415
|
+
if async_output:
|
|
416
|
+
return TemporaryAssistantAsyncStreamedResponse(stream=resp)
|
|
417
|
+
|
|
418
|
+
return TemporaryAssistantResponse(**resp)
|