usecortex-ai 0.3.6__py3-none-any.whl → 0.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- usecortex_ai/__init__.py +82 -70
- usecortex_ai/client.py +25 -23
- usecortex_ai/dashboard/client.py +448 -0
- usecortex_ai/{user_memory → dashboard}/raw_client.py +371 -530
- usecortex_ai/embeddings/client.py +229 -102
- usecortex_ai/embeddings/raw_client.py +323 -211
- usecortex_ai/errors/__init__.py +2 -0
- usecortex_ai/errors/bad_request_error.py +1 -2
- usecortex_ai/errors/forbidden_error.py +1 -2
- usecortex_ai/errors/internal_server_error.py +1 -2
- usecortex_ai/errors/not_found_error.py +1 -2
- usecortex_ai/errors/service_unavailable_error.py +1 -2
- usecortex_ai/errors/too_many_requests_error.py +11 -0
- usecortex_ai/errors/unauthorized_error.py +1 -2
- usecortex_ai/fetch/client.py +350 -29
- usecortex_ai/fetch/raw_client.py +919 -65
- usecortex_ai/raw_client.py +8 -2
- usecortex_ai/search/client.py +313 -257
- usecortex_ai/search/raw_client.py +463 -344
- usecortex_ai/search/types/alpha.py +1 -1
- usecortex_ai/sources/client.py +29 -216
- usecortex_ai/sources/raw_client.py +51 -589
- usecortex_ai/tenant/client.py +155 -118
- usecortex_ai/tenant/raw_client.py +227 -350
- usecortex_ai/types/__init__.py +76 -66
- usecortex_ai/types/add_memory_response.py +39 -0
- usecortex_ai/types/{scored_triplet_response.py → api_key_info.py} +16 -12
- usecortex_ai/types/app_sources_upload_data.py +15 -6
- usecortex_ai/types/{file_upload_result.py → collection_stats.py} +5 -5
- usecortex_ai/types/custom_property_definition.py +75 -0
- usecortex_ai/types/dashboard_apis_response.py +33 -0
- usecortex_ai/types/dashboard_sources_response.py +33 -0
- usecortex_ai/types/dashboard_tenants_response.py +33 -0
- usecortex_ai/types/{list_sources_response.py → delete_result.py} +10 -7
- usecortex_ai/types/delete_user_memory_response.py +1 -1
- usecortex_ai/types/entity.py +4 -4
- usecortex_ai/types/fetch_mode.py +5 -0
- usecortex_ai/types/{relations.py → forceful_relations_payload.py} +4 -4
- usecortex_ai/types/graph_context.py +26 -0
- usecortex_ai/types/{delete_sources.py → infra.py} +4 -3
- usecortex_ai/types/{fetch_content_data.py → insert_result.py} +12 -8
- usecortex_ai/types/memory_item.py +88 -0
- usecortex_ai/types/memory_result_item.py +47 -0
- usecortex_ai/types/milvus_data_type.py +21 -0
- usecortex_ai/types/path_triplet.py +3 -18
- usecortex_ai/types/processing_status.py +3 -2
- usecortex_ai/types/processing_status_indexing_status.py +7 -0
- usecortex_ai/types/qn_a_search_response.py +49 -0
- usecortex_ai/types/{retrieve_response.py → raw_embedding_document.py} +11 -8
- usecortex_ai/types/raw_embedding_search_result.py +47 -0
- usecortex_ai/types/{user_memory.py → raw_embedding_vector.py} +6 -6
- usecortex_ai/types/relation_evidence.py +24 -5
- usecortex_ai/types/retrieval_result.py +30 -0
- usecortex_ai/types/scored_path_response.py +5 -19
- usecortex_ai/types/search_mode.py +5 -0
- usecortex_ai/types/{batch_upload_data.py → source_delete_response.py} +8 -8
- usecortex_ai/types/{list_user_memories_response.py → source_delete_result_item.py} +11 -7
- usecortex_ai/types/source_fetch_response.py +70 -0
- usecortex_ai/types/{graph_relations_response.py → source_graph_relations_response.py} +3 -3
- usecortex_ai/types/{single_upload_data.py → source_list_response.py} +7 -10
- usecortex_ai/types/source_model.py +11 -1
- usecortex_ai/types/source_status.py +5 -0
- usecortex_ai/types/source_upload_response.py +35 -0
- usecortex_ai/types/source_upload_result_item.py +38 -0
- usecortex_ai/types/supported_llm_providers.py +5 -0
- usecortex_ai/types/{embeddings_create_collection_data.py → tenant_create_response.py} +9 -7
- usecortex_ai/types/{webpage_scrape_request.py → tenant_info.py} +10 -5
- usecortex_ai/types/tenant_metadata_schema_info.py +36 -0
- usecortex_ai/types/{tenant_create_data.py → tenant_stats_response.py} +9 -8
- usecortex_ai/types/{triple_with_evidence.py → triplet_with_evidence.py} +5 -1
- usecortex_ai/types/user_assistant_pair.py +4 -0
- usecortex_ai/types/{search_chunk.py → vector_store_chunk.py} +5 -11
- usecortex_ai/upload/__init__.py +3 -0
- usecortex_ai/upload/client.py +233 -1937
- usecortex_ai/upload/raw_client.py +364 -4401
- usecortex_ai/upload/types/__init__.py +7 -0
- usecortex_ai/upload/types/body_upload_app_ingestion_upload_app_post_app_sources.py +7 -0
- {usecortex_ai-0.3.6.dist-info → usecortex_ai-0.5.0.dist-info}/METADATA +2 -2
- usecortex_ai-0.5.0.dist-info/RECORD +114 -0
- {usecortex_ai-0.3.6.dist-info → usecortex_ai-0.5.0.dist-info}/WHEEL +1 -1
- {usecortex_ai-0.3.6.dist-info → usecortex_ai-0.5.0.dist-info}/licenses/LICENSE +21 -21
- {usecortex_ai-0.3.6.dist-info → usecortex_ai-0.5.0.dist-info}/top_level.txt +0 -0
- usecortex_ai/document/client.py +0 -139
- usecortex_ai/document/raw_client.py +0 -312
- usecortex_ai/types/add_user_memory_response.py +0 -41
- usecortex_ai/types/body_scrape_webpage_upload_scrape_webpage_post.py +0 -17
- usecortex_ai/types/body_update_scrape_job_upload_update_webpage_patch.py +0 -17
- usecortex_ai/types/chunk_graph_relations_response.py +0 -33
- usecortex_ai/types/delete_memory_request.py +0 -32
- usecortex_ai/types/delete_sub_tenant_data.py +0 -42
- usecortex_ai/types/embeddings_delete_data.py +0 -37
- usecortex_ai/types/embeddings_get_data.py +0 -37
- usecortex_ai/types/embeddings_search_data.py +0 -37
- usecortex_ai/types/extended_context.py +0 -17
- usecortex_ai/types/markdown_upload_request.py +0 -41
- usecortex_ai/types/related_chunk.py +0 -22
- usecortex_ai/types/retrieve_user_memory_response.py +0 -38
- usecortex_ai/types/source.py +0 -52
- usecortex_ai/types/sub_tenant_ids_data.py +0 -47
- usecortex_ai/types/tenant_stats.py +0 -42
- usecortex_ai/user/__init__.py +0 -4
- usecortex_ai/user/client.py +0 -145
- usecortex_ai/user/raw_client.py +0 -316
- usecortex_ai/user_memory/__init__.py +0 -4
- usecortex_ai/user_memory/client.py +0 -515
- usecortex_ai-0.3.6.dist-info/RECORD +0 -112
- /usecortex_ai/{document → dashboard}/__init__.py +0 -0
usecortex_ai/errors/__init__.py
CHANGED
|
@@ -7,6 +7,7 @@ from .forbidden_error import ForbiddenError
|
|
|
7
7
|
from .internal_server_error import InternalServerError
|
|
8
8
|
from .not_found_error import NotFoundError
|
|
9
9
|
from .service_unavailable_error import ServiceUnavailableError
|
|
10
|
+
from .too_many_requests_error import TooManyRequestsError
|
|
10
11
|
from .unauthorized_error import UnauthorizedError
|
|
11
12
|
from .unprocessable_entity_error import UnprocessableEntityError
|
|
12
13
|
|
|
@@ -16,6 +17,7 @@ __all__ = [
|
|
|
16
17
|
"InternalServerError",
|
|
17
18
|
"NotFoundError",
|
|
18
19
|
"ServiceUnavailableError",
|
|
20
|
+
"TooManyRequestsError",
|
|
19
21
|
"UnauthorizedError",
|
|
20
22
|
"UnprocessableEntityError",
|
|
21
23
|
]
|
|
@@ -3,9 +3,8 @@
|
|
|
3
3
|
import typing
|
|
4
4
|
|
|
5
5
|
from ..core.api_error import ApiError
|
|
6
|
-
from ..types.actual_error_response import ActualErrorResponse
|
|
7
6
|
|
|
8
7
|
|
|
9
8
|
class BadRequestError(ApiError):
|
|
10
|
-
def __init__(self, body:
|
|
9
|
+
def __init__(self, body: typing.Optional[typing.Any], headers: typing.Optional[typing.Dict[str, str]] = None):
|
|
11
10
|
super().__init__(status_code=400, headers=headers, body=body)
|
|
@@ -3,9 +3,8 @@
|
|
|
3
3
|
import typing
|
|
4
4
|
|
|
5
5
|
from ..core.api_error import ApiError
|
|
6
|
-
from ..types.actual_error_response import ActualErrorResponse
|
|
7
6
|
|
|
8
7
|
|
|
9
8
|
class ForbiddenError(ApiError):
|
|
10
|
-
def __init__(self, body:
|
|
9
|
+
def __init__(self, body: typing.Optional[typing.Any], headers: typing.Optional[typing.Dict[str, str]] = None):
|
|
11
10
|
super().__init__(status_code=403, headers=headers, body=body)
|
|
@@ -3,9 +3,8 @@
|
|
|
3
3
|
import typing
|
|
4
4
|
|
|
5
5
|
from ..core.api_error import ApiError
|
|
6
|
-
from ..types.actual_error_response import ActualErrorResponse
|
|
7
6
|
|
|
8
7
|
|
|
9
8
|
class InternalServerError(ApiError):
|
|
10
|
-
def __init__(self, body:
|
|
9
|
+
def __init__(self, body: typing.Optional[typing.Any], headers: typing.Optional[typing.Dict[str, str]] = None):
|
|
11
10
|
super().__init__(status_code=500, headers=headers, body=body)
|
|
@@ -3,9 +3,8 @@
|
|
|
3
3
|
import typing
|
|
4
4
|
|
|
5
5
|
from ..core.api_error import ApiError
|
|
6
|
-
from ..types.actual_error_response import ActualErrorResponse
|
|
7
6
|
|
|
8
7
|
|
|
9
8
|
class NotFoundError(ApiError):
|
|
10
|
-
def __init__(self, body:
|
|
9
|
+
def __init__(self, body: typing.Optional[typing.Any], headers: typing.Optional[typing.Dict[str, str]] = None):
|
|
11
10
|
super().__init__(status_code=404, headers=headers, body=body)
|
|
@@ -3,9 +3,8 @@
|
|
|
3
3
|
import typing
|
|
4
4
|
|
|
5
5
|
from ..core.api_error import ApiError
|
|
6
|
-
from ..types.actual_error_response import ActualErrorResponse
|
|
7
6
|
|
|
8
7
|
|
|
9
8
|
class ServiceUnavailableError(ApiError):
|
|
10
|
-
def __init__(self, body:
|
|
9
|
+
def __init__(self, body: typing.Optional[typing.Any], headers: typing.Optional[typing.Dict[str, str]] = None):
|
|
11
10
|
super().__init__(status_code=503, headers=headers, body=body)
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import typing
|
|
4
|
+
|
|
5
|
+
from ..core.api_error import ApiError
|
|
6
|
+
from ..types.actual_error_response import ActualErrorResponse
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class TooManyRequestsError(ApiError):
|
|
10
|
+
def __init__(self, body: ActualErrorResponse, headers: typing.Optional[typing.Dict[str, str]] = None):
|
|
11
|
+
super().__init__(status_code=429, headers=headers, body=body)
|
|
@@ -3,9 +3,8 @@
|
|
|
3
3
|
import typing
|
|
4
4
|
|
|
5
5
|
from ..core.api_error import ApiError
|
|
6
|
-
from ..types.actual_error_response import ActualErrorResponse
|
|
7
6
|
|
|
8
7
|
|
|
9
8
|
class UnauthorizedError(ApiError):
|
|
10
|
-
def __init__(self, body:
|
|
9
|
+
def __init__(self, body: typing.Optional[typing.Any], headers: typing.Optional[typing.Dict[str, str]] = None):
|
|
11
10
|
super().__init__(status_code=401, headers=headers, body=body)
|
usecortex_ai/fetch/client.py
CHANGED
|
@@ -4,7 +4,10 @@ import typing
|
|
|
4
4
|
|
|
5
5
|
from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
|
|
6
6
|
from ..core.request_options import RequestOptions
|
|
7
|
-
from ..types.
|
|
7
|
+
from ..types.fetch_mode import FetchMode
|
|
8
|
+
from ..types.source_fetch_response import SourceFetchResponse
|
|
9
|
+
from ..types.source_graph_relations_response import SourceGraphRelationsResponse
|
|
10
|
+
from ..types.source_list_response import SourceListResponse
|
|
8
11
|
from .raw_client import AsyncRawFetchClient, RawFetchClient
|
|
9
12
|
|
|
10
13
|
# this is used as the default value for optional parameters
|
|
@@ -26,35 +29,188 @@ class FetchClient:
|
|
|
26
29
|
"""
|
|
27
30
|
return self._raw_client
|
|
28
31
|
|
|
29
|
-
def
|
|
32
|
+
def sources(
|
|
30
33
|
self,
|
|
31
34
|
*,
|
|
32
|
-
file_id: str,
|
|
33
|
-
file_type: str,
|
|
34
35
|
tenant_id: str,
|
|
35
|
-
|
|
36
|
+
sub_tenant_id: typing.Optional[str] = None,
|
|
37
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
38
|
+
) -> SourceListResponse:
|
|
39
|
+
"""
|
|
40
|
+
Retrieve all sources for a specific tenant and subtenant combination.
|
|
41
|
+
|
|
42
|
+
Use this endpoint to fetch a complete list of all sources associated
|
|
43
|
+
with your tenant. This includes documents, files, and other content
|
|
44
|
+
you've uploaded for processing.
|
|
45
|
+
|
|
46
|
+
You can optionally specify a sub-tenant to narrow down the results to
|
|
47
|
+
sources within that specific sub-tenant scope.
|
|
48
|
+
|
|
49
|
+
Parameters
|
|
50
|
+
----------
|
|
51
|
+
tenant_id : str
|
|
52
|
+
Unique identifier for the tenant/organization
|
|
53
|
+
|
|
54
|
+
sub_tenant_id : typing.Optional[str]
|
|
55
|
+
Optional sub-tenant identifier used to organize data within a tenant. If omitted, the default sub-tenant created during tenant setup will be used.
|
|
56
|
+
|
|
57
|
+
request_options : typing.Optional[RequestOptions]
|
|
58
|
+
Request-specific configuration.
|
|
59
|
+
|
|
60
|
+
Returns
|
|
61
|
+
-------
|
|
62
|
+
SourceListResponse
|
|
63
|
+
Successful Response
|
|
64
|
+
|
|
65
|
+
Examples
|
|
66
|
+
--------
|
|
67
|
+
from usecortex-ai import CortexAI
|
|
68
|
+
|
|
69
|
+
client = CortexAI(token="YOUR_TOKEN", )
|
|
70
|
+
client.fetch.sources(tenant_id='tenant_id', )
|
|
71
|
+
"""
|
|
72
|
+
_response = self._raw_client.sources(
|
|
73
|
+
tenant_id=tenant_id, sub_tenant_id=sub_tenant_id, request_options=request_options
|
|
74
|
+
)
|
|
75
|
+
return _response.data
|
|
76
|
+
|
|
77
|
+
def source_by_ids(
|
|
78
|
+
self,
|
|
79
|
+
*,
|
|
80
|
+
source_ids: typing.Sequence[str],
|
|
81
|
+
tenant_id: str,
|
|
36
82
|
sub_tenant_id: typing.Optional[str] = OMIT,
|
|
37
83
|
request_options: typing.Optional[RequestOptions] = None,
|
|
38
|
-
) ->
|
|
84
|
+
) -> SourceListResponse:
|
|
39
85
|
"""
|
|
86
|
+
Retrieve specific sources by their IDs.
|
|
87
|
+
|
|
88
|
+
Use this endpoint to fetch one or more sources by providing their
|
|
89
|
+
unique identifiers. This is useful when you need detailed information
|
|
90
|
+
about specific documents or content you've previously uploaded.
|
|
91
|
+
|
|
92
|
+
Provide the source IDs in the request body along with your tenant
|
|
93
|
+
information to get the exact sources you need.
|
|
94
|
+
|
|
40
95
|
Parameters
|
|
41
96
|
----------
|
|
42
|
-
|
|
97
|
+
source_ids : typing.Sequence[str]
|
|
98
|
+
List of source IDs to fetch.
|
|
99
|
+
|
|
100
|
+
tenant_id : str
|
|
101
|
+
Tenant ID
|
|
43
102
|
|
|
44
|
-
|
|
103
|
+
sub_tenant_id : typing.Optional[str]
|
|
104
|
+
Sub-tenant ID
|
|
45
105
|
|
|
106
|
+
request_options : typing.Optional[RequestOptions]
|
|
107
|
+
Request-specific configuration.
|
|
108
|
+
|
|
109
|
+
Returns
|
|
110
|
+
-------
|
|
111
|
+
SourceListResponse
|
|
112
|
+
Successful Response
|
|
113
|
+
|
|
114
|
+
Examples
|
|
115
|
+
--------
|
|
116
|
+
from usecortex-ai import CortexAI
|
|
117
|
+
|
|
118
|
+
client = CortexAI(token="YOUR_TOKEN", )
|
|
119
|
+
client.fetch.source_by_ids(source_ids=['source_ids'], tenant_id='tenant_id', )
|
|
120
|
+
"""
|
|
121
|
+
_response = self._raw_client.source_by_ids(
|
|
122
|
+
source_ids=source_ids, tenant_id=tenant_id, sub_tenant_id=sub_tenant_id, request_options=request_options
|
|
123
|
+
)
|
|
124
|
+
return _response.data
|
|
125
|
+
|
|
126
|
+
def graph_relations_by_source_id(
|
|
127
|
+
self,
|
|
128
|
+
*,
|
|
129
|
+
source_id: str,
|
|
130
|
+
tenant_id: typing.Optional[str] = None,
|
|
131
|
+
sub_tenant_id: typing.Optional[str] = None,
|
|
132
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
133
|
+
) -> SourceGraphRelationsResponse:
|
|
134
|
+
"""
|
|
135
|
+
Retrieve relations for a specific source.
|
|
136
|
+
|
|
137
|
+
Use this endpoint to fetch all relations associated with a specific source.
|
|
138
|
+
This is useful when you need to understand the relationships between entities within a source.
|
|
139
|
+
|
|
140
|
+
Provide the source ID in the request body along with your tenant information to get the relations for that source.
|
|
141
|
+
|
|
142
|
+
Parameters
|
|
143
|
+
----------
|
|
144
|
+
source_id : str
|
|
145
|
+
The source ID to fetch relations for
|
|
146
|
+
|
|
147
|
+
tenant_id : typing.Optional[str]
|
|
148
|
+
Unique identifier for the tenant/organization
|
|
149
|
+
|
|
150
|
+
sub_tenant_id : typing.Optional[str]
|
|
151
|
+
Optional sub-tenant identifier used to organize data within a tenant. If omitted, the default sub-tenant created during tenant setup will be used.
|
|
152
|
+
|
|
153
|
+
request_options : typing.Optional[RequestOptions]
|
|
154
|
+
Request-specific configuration.
|
|
155
|
+
|
|
156
|
+
Returns
|
|
157
|
+
-------
|
|
158
|
+
SourceGraphRelationsResponse
|
|
159
|
+
Successful Response
|
|
160
|
+
|
|
161
|
+
Examples
|
|
162
|
+
--------
|
|
163
|
+
from usecortex-ai import CortexAI
|
|
164
|
+
|
|
165
|
+
client = CortexAI(token="YOUR_TOKEN", )
|
|
166
|
+
client.fetch.graph_relations_by_source_id(source_id='source_id', )
|
|
167
|
+
"""
|
|
168
|
+
_response = self._raw_client.graph_relations_by_source_id(
|
|
169
|
+
source_id=source_id, tenant_id=tenant_id, sub_tenant_id=sub_tenant_id, request_options=request_options
|
|
170
|
+
)
|
|
171
|
+
return _response.data
|
|
172
|
+
|
|
173
|
+
def source_content(
|
|
174
|
+
self,
|
|
175
|
+
*,
|
|
176
|
+
tenant_id: str,
|
|
177
|
+
source_id: str,
|
|
178
|
+
sub_tenant_id: typing.Optional[str] = OMIT,
|
|
179
|
+
mode: typing.Optional[FetchMode] = OMIT,
|
|
180
|
+
expiry_seconds: typing.Optional[int] = OMIT,
|
|
181
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
182
|
+
) -> SourceFetchResponse:
|
|
183
|
+
"""
|
|
184
|
+
Fetch the content of a source ingested.
|
|
185
|
+
|
|
186
|
+
This endpoint can return:
|
|
187
|
+
- File content directly (as string or base64)
|
|
188
|
+
- A presigned URL to access the file
|
|
189
|
+
- Both content and presigned URL
|
|
190
|
+
|
|
191
|
+
Parameters
|
|
192
|
+
----------
|
|
46
193
|
tenant_id : str
|
|
194
|
+
Unique identifier for the tenant/organization
|
|
47
195
|
|
|
48
|
-
|
|
196
|
+
source_id : str
|
|
197
|
+
Source ID of the file to fetch
|
|
49
198
|
|
|
50
199
|
sub_tenant_id : typing.Optional[str]
|
|
200
|
+
Optional sub-tenant identifier used to organize data within a tenant. If omitted, the default sub-tenant created during tenant setup will be used.
|
|
201
|
+
|
|
202
|
+
mode : typing.Optional[FetchMode]
|
|
203
|
+
Fetch mode: 'content' returns file content, 'url' returns presigned URL, 'both' returns both
|
|
204
|
+
|
|
205
|
+
expiry_seconds : typing.Optional[int]
|
|
206
|
+
Expiry time in seconds for presigned URL (60-604800, default: 3600)
|
|
51
207
|
|
|
52
208
|
request_options : typing.Optional[RequestOptions]
|
|
53
209
|
Request-specific configuration.
|
|
54
210
|
|
|
55
211
|
Returns
|
|
56
212
|
-------
|
|
57
|
-
|
|
213
|
+
SourceFetchResponse
|
|
58
214
|
Successful Response
|
|
59
215
|
|
|
60
216
|
Examples
|
|
@@ -62,14 +218,14 @@ class FetchClient:
|
|
|
62
218
|
from usecortex-ai import CortexAI
|
|
63
219
|
|
|
64
220
|
client = CortexAI(token="YOUR_TOKEN", )
|
|
65
|
-
client.fetch.
|
|
221
|
+
client.fetch.source_content(tenant_id='tenant_id', source_id='source_id', )
|
|
66
222
|
"""
|
|
67
|
-
_response = self._raw_client.
|
|
68
|
-
file_id=file_id,
|
|
69
|
-
file_type=file_type,
|
|
223
|
+
_response = self._raw_client.source_content(
|
|
70
224
|
tenant_id=tenant_id,
|
|
71
|
-
|
|
225
|
+
source_id=source_id,
|
|
72
226
|
sub_tenant_id=sub_tenant_id,
|
|
227
|
+
mode=mode,
|
|
228
|
+
expiry_seconds=expiry_seconds,
|
|
73
229
|
request_options=request_options,
|
|
74
230
|
)
|
|
75
231
|
return _response.data
|
|
@@ -90,35 +246,200 @@ class AsyncFetchClient:
|
|
|
90
246
|
"""
|
|
91
247
|
return self._raw_client
|
|
92
248
|
|
|
93
|
-
async def
|
|
249
|
+
async def sources(
|
|
250
|
+
self,
|
|
251
|
+
*,
|
|
252
|
+
tenant_id: str,
|
|
253
|
+
sub_tenant_id: typing.Optional[str] = None,
|
|
254
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
255
|
+
) -> SourceListResponse:
|
|
256
|
+
"""
|
|
257
|
+
Retrieve all sources for a specific tenant and subtenant combination.
|
|
258
|
+
|
|
259
|
+
Use this endpoint to fetch a complete list of all sources associated
|
|
260
|
+
with your tenant. This includes documents, files, and other content
|
|
261
|
+
you've uploaded for processing.
|
|
262
|
+
|
|
263
|
+
You can optionally specify a sub-tenant to narrow down the results to
|
|
264
|
+
sources within that specific sub-tenant scope.
|
|
265
|
+
|
|
266
|
+
Parameters
|
|
267
|
+
----------
|
|
268
|
+
tenant_id : str
|
|
269
|
+
Unique identifier for the tenant/organization
|
|
270
|
+
|
|
271
|
+
sub_tenant_id : typing.Optional[str]
|
|
272
|
+
Optional sub-tenant identifier used to organize data within a tenant. If omitted, the default sub-tenant created during tenant setup will be used.
|
|
273
|
+
|
|
274
|
+
request_options : typing.Optional[RequestOptions]
|
|
275
|
+
Request-specific configuration.
|
|
276
|
+
|
|
277
|
+
Returns
|
|
278
|
+
-------
|
|
279
|
+
SourceListResponse
|
|
280
|
+
Successful Response
|
|
281
|
+
|
|
282
|
+
Examples
|
|
283
|
+
--------
|
|
284
|
+
import asyncio
|
|
285
|
+
|
|
286
|
+
from usecortex-ai import AsyncCortexAI
|
|
287
|
+
|
|
288
|
+
client = AsyncCortexAI(token="YOUR_TOKEN", )
|
|
289
|
+
async def main() -> None:
|
|
290
|
+
await client.fetch.sources(tenant_id='tenant_id', )
|
|
291
|
+
asyncio.run(main())
|
|
292
|
+
"""
|
|
293
|
+
_response = await self._raw_client.sources(
|
|
294
|
+
tenant_id=tenant_id, sub_tenant_id=sub_tenant_id, request_options=request_options
|
|
295
|
+
)
|
|
296
|
+
return _response.data
|
|
297
|
+
|
|
298
|
+
async def source_by_ids(
|
|
94
299
|
self,
|
|
95
300
|
*,
|
|
96
|
-
|
|
97
|
-
file_type: str,
|
|
301
|
+
source_ids: typing.Sequence[str],
|
|
98
302
|
tenant_id: str,
|
|
99
|
-
return_content: typing.Optional[bool] = OMIT,
|
|
100
303
|
sub_tenant_id: typing.Optional[str] = OMIT,
|
|
101
304
|
request_options: typing.Optional[RequestOptions] = None,
|
|
102
|
-
) ->
|
|
305
|
+
) -> SourceListResponse:
|
|
103
306
|
"""
|
|
307
|
+
Retrieve specific sources by their IDs.
|
|
308
|
+
|
|
309
|
+
Use this endpoint to fetch one or more sources by providing their
|
|
310
|
+
unique identifiers. This is useful when you need detailed information
|
|
311
|
+
about specific documents or content you've previously uploaded.
|
|
312
|
+
|
|
313
|
+
Provide the source IDs in the request body along with your tenant
|
|
314
|
+
information to get the exact sources you need.
|
|
315
|
+
|
|
104
316
|
Parameters
|
|
105
317
|
----------
|
|
106
|
-
|
|
318
|
+
source_ids : typing.Sequence[str]
|
|
319
|
+
List of source IDs to fetch.
|
|
107
320
|
|
|
108
|
-
|
|
321
|
+
tenant_id : str
|
|
322
|
+
Tenant ID
|
|
109
323
|
|
|
324
|
+
sub_tenant_id : typing.Optional[str]
|
|
325
|
+
Sub-tenant ID
|
|
326
|
+
|
|
327
|
+
request_options : typing.Optional[RequestOptions]
|
|
328
|
+
Request-specific configuration.
|
|
329
|
+
|
|
330
|
+
Returns
|
|
331
|
+
-------
|
|
332
|
+
SourceListResponse
|
|
333
|
+
Successful Response
|
|
334
|
+
|
|
335
|
+
Examples
|
|
336
|
+
--------
|
|
337
|
+
import asyncio
|
|
338
|
+
|
|
339
|
+
from usecortex-ai import AsyncCortexAI
|
|
340
|
+
|
|
341
|
+
client = AsyncCortexAI(token="YOUR_TOKEN", )
|
|
342
|
+
async def main() -> None:
|
|
343
|
+
await client.fetch.source_by_ids(source_ids=['source_ids'], tenant_id='tenant_id', )
|
|
344
|
+
asyncio.run(main())
|
|
345
|
+
"""
|
|
346
|
+
_response = await self._raw_client.source_by_ids(
|
|
347
|
+
source_ids=source_ids, tenant_id=tenant_id, sub_tenant_id=sub_tenant_id, request_options=request_options
|
|
348
|
+
)
|
|
349
|
+
return _response.data
|
|
350
|
+
|
|
351
|
+
async def graph_relations_by_source_id(
|
|
352
|
+
self,
|
|
353
|
+
*,
|
|
354
|
+
source_id: str,
|
|
355
|
+
tenant_id: typing.Optional[str] = None,
|
|
356
|
+
sub_tenant_id: typing.Optional[str] = None,
|
|
357
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
358
|
+
) -> SourceGraphRelationsResponse:
|
|
359
|
+
"""
|
|
360
|
+
Retrieve relations for a specific source.
|
|
361
|
+
|
|
362
|
+
Use this endpoint to fetch all relations associated with a specific source.
|
|
363
|
+
This is useful when you need to understand the relationships between entities within a source.
|
|
364
|
+
|
|
365
|
+
Provide the source ID in the request body along with your tenant information to get the relations for that source.
|
|
366
|
+
|
|
367
|
+
Parameters
|
|
368
|
+
----------
|
|
369
|
+
source_id : str
|
|
370
|
+
The source ID to fetch relations for
|
|
371
|
+
|
|
372
|
+
tenant_id : typing.Optional[str]
|
|
373
|
+
Unique identifier for the tenant/organization
|
|
374
|
+
|
|
375
|
+
sub_tenant_id : typing.Optional[str]
|
|
376
|
+
Optional sub-tenant identifier used to organize data within a tenant. If omitted, the default sub-tenant created during tenant setup will be used.
|
|
377
|
+
|
|
378
|
+
request_options : typing.Optional[RequestOptions]
|
|
379
|
+
Request-specific configuration.
|
|
380
|
+
|
|
381
|
+
Returns
|
|
382
|
+
-------
|
|
383
|
+
SourceGraphRelationsResponse
|
|
384
|
+
Successful Response
|
|
385
|
+
|
|
386
|
+
Examples
|
|
387
|
+
--------
|
|
388
|
+
import asyncio
|
|
389
|
+
|
|
390
|
+
from usecortex-ai import AsyncCortexAI
|
|
391
|
+
|
|
392
|
+
client = AsyncCortexAI(token="YOUR_TOKEN", )
|
|
393
|
+
async def main() -> None:
|
|
394
|
+
await client.fetch.graph_relations_by_source_id(source_id='source_id', )
|
|
395
|
+
asyncio.run(main())
|
|
396
|
+
"""
|
|
397
|
+
_response = await self._raw_client.graph_relations_by_source_id(
|
|
398
|
+
source_id=source_id, tenant_id=tenant_id, sub_tenant_id=sub_tenant_id, request_options=request_options
|
|
399
|
+
)
|
|
400
|
+
return _response.data
|
|
401
|
+
|
|
402
|
+
async def source_content(
|
|
403
|
+
self,
|
|
404
|
+
*,
|
|
405
|
+
tenant_id: str,
|
|
406
|
+
source_id: str,
|
|
407
|
+
sub_tenant_id: typing.Optional[str] = OMIT,
|
|
408
|
+
mode: typing.Optional[FetchMode] = OMIT,
|
|
409
|
+
expiry_seconds: typing.Optional[int] = OMIT,
|
|
410
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
411
|
+
) -> SourceFetchResponse:
|
|
412
|
+
"""
|
|
413
|
+
Fetch the content of a source ingested.
|
|
414
|
+
|
|
415
|
+
This endpoint can return:
|
|
416
|
+
- File content directly (as string or base64)
|
|
417
|
+
- A presigned URL to access the file
|
|
418
|
+
- Both content and presigned URL
|
|
419
|
+
|
|
420
|
+
Parameters
|
|
421
|
+
----------
|
|
110
422
|
tenant_id : str
|
|
423
|
+
Unique identifier for the tenant/organization
|
|
111
424
|
|
|
112
|
-
|
|
425
|
+
source_id : str
|
|
426
|
+
Source ID of the file to fetch
|
|
113
427
|
|
|
114
428
|
sub_tenant_id : typing.Optional[str]
|
|
429
|
+
Optional sub-tenant identifier used to organize data within a tenant. If omitted, the default sub-tenant created during tenant setup will be used.
|
|
430
|
+
|
|
431
|
+
mode : typing.Optional[FetchMode]
|
|
432
|
+
Fetch mode: 'content' returns file content, 'url' returns presigned URL, 'both' returns both
|
|
433
|
+
|
|
434
|
+
expiry_seconds : typing.Optional[int]
|
|
435
|
+
Expiry time in seconds for presigned URL (60-604800, default: 3600)
|
|
115
436
|
|
|
116
437
|
request_options : typing.Optional[RequestOptions]
|
|
117
438
|
Request-specific configuration.
|
|
118
439
|
|
|
119
440
|
Returns
|
|
120
441
|
-------
|
|
121
|
-
|
|
442
|
+
SourceFetchResponse
|
|
122
443
|
Successful Response
|
|
123
444
|
|
|
124
445
|
Examples
|
|
@@ -129,15 +450,15 @@ class AsyncFetchClient:
|
|
|
129
450
|
|
|
130
451
|
client = AsyncCortexAI(token="YOUR_TOKEN", )
|
|
131
452
|
async def main() -> None:
|
|
132
|
-
await client.fetch.
|
|
453
|
+
await client.fetch.source_content(tenant_id='tenant_id', source_id='source_id', )
|
|
133
454
|
asyncio.run(main())
|
|
134
455
|
"""
|
|
135
|
-
_response = await self._raw_client.
|
|
136
|
-
file_id=file_id,
|
|
137
|
-
file_type=file_type,
|
|
456
|
+
_response = await self._raw_client.source_content(
|
|
138
457
|
tenant_id=tenant_id,
|
|
139
|
-
|
|
458
|
+
source_id=source_id,
|
|
140
459
|
sub_tenant_id=sub_tenant_id,
|
|
460
|
+
mode=mode,
|
|
461
|
+
expiry_seconds=expiry_seconds,
|
|
141
462
|
request_options=request_options,
|
|
142
463
|
)
|
|
143
464
|
return _response.data
|