usecortex-ai 0.2.1__py3-none-any.whl → 0.2.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. usecortex_ai/__init__.py +18 -3
  2. usecortex_ai/client.py +7 -78
  3. usecortex_ai/core/client_wrapper.py +6 -8
  4. usecortex_ai/document/__init__.py +4 -0
  5. usecortex_ai/document/client.py +139 -0
  6. usecortex_ai/document/raw_client.py +312 -0
  7. usecortex_ai/embeddings/client.py +40 -70
  8. usecortex_ai/embeddings/raw_client.py +40 -70
  9. usecortex_ai/fetch/client.py +10 -0
  10. usecortex_ai/fetch/raw_client.py +10 -0
  11. usecortex_ai/search/client.py +74 -80
  12. usecortex_ai/search/raw_client.py +74 -80
  13. usecortex_ai/sources/client.py +28 -83
  14. usecortex_ai/sources/raw_client.py +28 -259
  15. usecortex_ai/tenant/client.py +152 -4
  16. usecortex_ai/tenant/raw_client.py +502 -4
  17. usecortex_ai/types/__init__.py +16 -2
  18. usecortex_ai/types/add_user_memory_response.py +36 -0
  19. usecortex_ai/types/app_sources_upload_data.py +10 -2
  20. usecortex_ai/types/attachment_model.py +34 -7
  21. usecortex_ai/types/batch_upload_data.py +10 -2
  22. usecortex_ai/types/body_scrape_webpage_upload_scrape_webpage_post.py +0 -2
  23. usecortex_ai/types/body_update_scrape_job_upload_update_webpage_patch.py +0 -2
  24. usecortex_ai/types/content_model.py +33 -6
  25. usecortex_ai/types/delete_memory_request.py +14 -3
  26. usecortex_ai/types/delete_sources.py +20 -0
  27. usecortex_ai/types/delete_sub_tenant_data.py +42 -0
  28. usecortex_ai/types/delete_user_memory_response.py +31 -0
  29. usecortex_ai/types/embeddings_create_collection_data.py +19 -4
  30. usecortex_ai/types/embeddings_delete_data.py +19 -4
  31. usecortex_ai/types/embeddings_get_data.py +19 -4
  32. usecortex_ai/types/embeddings_search_data.py +19 -4
  33. usecortex_ai/types/error_response.py +0 -1
  34. usecortex_ai/types/fetch_content_data.py +19 -5
  35. usecortex_ai/types/file_upload_result.py +9 -2
  36. usecortex_ai/types/generate_user_memory_response.py +32 -0
  37. usecortex_ai/types/list_sources_response.py +14 -3
  38. usecortex_ai/types/list_user_memories_response.py +32 -0
  39. usecortex_ai/types/markdown_upload_request.py +18 -5
  40. usecortex_ai/types/processing_status.py +14 -3
  41. usecortex_ai/types/relations.py +9 -2
  42. usecortex_ai/types/retrieve_user_memory_response.py +32 -0
  43. usecortex_ai/types/search_chunk.py +54 -17
  44. usecortex_ai/types/single_upload_data.py +10 -2
  45. usecortex_ai/types/source.py +34 -15
  46. usecortex_ai/types/source_model.py +63 -14
  47. usecortex_ai/types/sub_tenant_ids_data.py +24 -5
  48. usecortex_ai/types/tenant_create_data.py +19 -4
  49. usecortex_ai/types/tenant_stats.py +24 -5
  50. usecortex_ai/types/user_memory.py +31 -0
  51. usecortex_ai/upload/client.py +394 -79
  52. usecortex_ai/upload/raw_client.py +398 -103
  53. usecortex_ai/user/client.py +14 -6
  54. usecortex_ai/user/raw_client.py +14 -6
  55. usecortex_ai/user_memory/client.py +184 -50
  56. usecortex_ai/user_memory/raw_client.py +905 -88
  57. {usecortex_ai-0.2.1.dist-info → usecortex_ai-0.2.2.dist-info}/METADATA +1 -1
  58. usecortex_ai-0.2.2.dist-info/RECORD +100 -0
  59. usecortex_ai/raw_client.py +0 -90
  60. usecortex_ai/types/source_content.py +0 -26
  61. usecortex_ai-0.2.1.dist-info/RECORD +0 -91
  62. {usecortex_ai-0.2.1.dist-info → usecortex_ai-0.2.2.dist-info}/WHEEL +0 -0
  63. {usecortex_ai-0.2.1.dist-info → usecortex_ai-0.2.2.dist-info}/licenses/LICENSE +0 -0
  64. {usecortex_ai-0.2.1.dist-info → usecortex_ai-0.2.2.dist-info}/top_level.txt +0 -0
@@ -52,41 +52,60 @@ class RawSearchClient:
52
52
  request_options: typing.Optional[RequestOptions] = None,
53
53
  ) -> HttpResponse[typing.Optional[typing.Any]]:
54
54
  """
55
+ Ask a question to your uploaded knowledge base and let Cortex AI answer it.
56
+
55
57
  Parameters
56
58
  ----------
57
59
  question : str
60
+ The question to be answered
58
61
 
59
62
  session_id : str
63
+ Unique identifier for the conversation session. Keep it same when the current question refers to a previous answer or question
60
64
 
61
65
  tenant_id : str
66
+ Identifier for the tenant/organization
62
67
 
63
68
  context_list : typing.Optional[typing.Sequence[str]]
69
+ List of context strings to provide additional information
64
70
 
65
71
  search_modes : typing.Optional[typing.Sequence[str]]
72
+ List of search modes to use for finding relevant information
66
73
 
67
74
  sub_tenant_id : typing.Optional[str]
75
+ Identifier for sub-tenant within the tenant
68
76
 
69
77
  highlight_chunks : typing.Optional[bool]
78
+ Whether to return text chunks in the response along with final LLM generated answer
70
79
 
71
80
  stream : typing.Optional[bool]
81
+ Whether to stream the response
72
82
 
73
83
  search_alpha : typing.Optional[float]
84
+ Closer to 0.0 means a exact keyword search will be performed, closer to 1.0 means semantics of the search will be considered. In most cases, you wont have to toggle it yourself.
74
85
 
75
86
  recency_bias : typing.Optional[float]
87
+ Bias towards more recent information (0.0 to 1.0)
76
88
 
77
89
  ai_generation : typing.Optional[bool]
90
+ Whether to use AI for generating responses
78
91
 
79
92
  top_n : typing.Optional[int]
93
+ Number of top results to return
80
94
 
81
95
  user_name : typing.Optional[str]
96
+ Name of the user making the request. This helps LLM to know the user's name if semantics around the username are involved in query. Its generally a good practice to include it possible.
82
97
 
83
98
  user_instructions : typing.Optional[str]
99
+ Custom instructions for the AI response to add to our proprietary prompt. This can be used to provide additional context or instructions for the LLM to follow so that the answers are tailored towards your application style
84
100
 
85
101
  multi_step_reasoning : typing.Optional[bool]
102
+ Enable advanced multi-step reasoning for complex queries. When enabled, the AI will automatically break down complex questions into multiple research steps to provide more comprehensive and accurate answers.
86
103
 
87
104
  auto_agent_routing : typing.Optional[bool]
105
+ Enable intelligent agent routing to automatically select the most suitable AI agent for your specific query type. Different agents are optimized for various use cases like social media, code, conversations, general knowledge, etc.
88
106
 
89
107
  metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
108
+ Additional metadata for the request
90
109
 
91
110
  request_options : typing.Optional[RequestOptions]
92
111
  Request-specific configuration.
@@ -227,45 +246,33 @@ class RawSearchClient:
227
246
  max_chunks: typing.Optional[int] = OMIT,
228
247
  alpha: typing.Optional[Alpha] = OMIT,
229
248
  recency_bias: typing.Optional[float] = OMIT,
230
- num_related_chunks: typing.Optional[int] = OMIT,
231
249
  request_options: typing.Optional[RequestOptions] = None,
232
250
  ) -> HttpResponse[typing.List[SearchChunk]]:
233
251
  """
234
- Search for content within indexed sources using semantic and keyword search capabilities.
235
-
236
- This endpoint performs a search query against the Findr backend, allowing users to retrieve
237
- relevant content chunks from their indexed documents and sources. The search can be customized
238
- with various parameters to control the number of results and search behavior.
239
-
240
- Args:
241
- request (SearchRequest): The search request containing:
242
- - query (str): Search query string to find relevant content
243
- - tenant_id (str, optional): Tenant identifier for multi-tenancy
244
- - sub_tenant_id (str, optional): Sub-tenant identifier, defaults to tenant_id
245
- - max_chunks (int, optional): Maximum number of content chunks to return
246
- - alpha (Union[float, str], optional): Search algorithm parameter for result ranking (default: 0.8). Can be float-type (0.0-1.0) or 'auto' for dynamic selection
247
- - recency_bias (float, optional): Bias towards more recent content (default: 0.5)
248
- - num_related_chunks (int, optional): Number of related chunks to return (default: 0)
249
- api_details (dict): Authentication details obtained from API key validation
250
-
251
- Returns:
252
- SearchData: Success response with search results
252
+ Search for relevant content within your indexed sources.
253
+
254
+ This API returns the chunks related to the query you make. We use neural (embedding) search to give you the most relevant chunks.
255
+ Results are ranked by relevance and can be customized with parameters like result limits and recency preferences.
253
256
 
254
257
  Parameters
255
258
  ----------
256
259
  query : str
260
+ Search terms to find relevant content
257
261
 
258
262
  tenant_id : str
263
+ Unique identifier for the tenant/organization
259
264
 
260
265
  sub_tenant_id : typing.Optional[str]
266
+ Optional sub-tenant identifier for organizing data within a tenant. If not provided, defaults to tenant_id
261
267
 
262
268
  max_chunks : typing.Optional[int]
269
+ Maximum number of results to return
263
270
 
264
271
  alpha : typing.Optional[Alpha]
272
+ Search ranking algorithm parameter (0.0-1.0 or 'auto')
265
273
 
266
274
  recency_bias : typing.Optional[float]
267
-
268
- num_related_chunks : typing.Optional[int]
275
+ Preference for newer content (0.0 = no bias, 1.0 = strong recency preference)
269
276
 
270
277
  request_options : typing.Optional[RequestOptions]
271
278
  Request-specific configuration.
@@ -285,7 +292,6 @@ class RawSearchClient:
285
292
  "max_chunks": max_chunks,
286
293
  "alpha": convert_and_respect_annotation_metadata(object_=alpha, annotation=Alpha, direction="write"),
287
294
  "recency_bias": recency_bias,
288
- "num_related_chunks": num_related_chunks,
289
295
  },
290
296
  headers={
291
297
  "content-type": "application/json",
@@ -396,36 +402,27 @@ class RawSearchClient:
396
402
  request_options: typing.Optional[RequestOptions] = None,
397
403
  ) -> HttpResponse[typing.List[SearchChunk]]:
398
404
  """
399
- Full text search endpoint for Cortex customers.
400
- Performs full text search with configurable operators for precise text matching against the Findr backend.
401
-
402
- This endpoint performs a full text search query against the Findr backend, allowing users to retrieve
403
- relevant content chunks from their indexed documents and sources using BM25-based text matching.
404
- The search can be customized with various operators to control the matching behavior.
405
-
406
- Args:
407
- request (FullTextSearchRequest): The full text search request containing:
408
- - query (str): Search query string to find relevant content
409
- - tenant_id (str): Tenant identifier for multi-tenancy
410
- - sub_tenant_id (str, optional): Sub-tenant identifier, defaults to tenant_id
411
- - operator (BM25OperatorType, optional): Full text search operator type (OR or AND). Defaults to OR
412
- - max_chunks (int, optional): Maximum number of content chunks to return (1-1001, defaults to 25)
413
- api_details (dict): Authentication details obtained from API key validation
405
+ Perform full text search for exact matches within your indexed sources.
414
406
 
415
- Returns:
416
- FullTextSearchData: Success response with full text search results
407
+ Use this endpoint to find content chunks using BM25-based text matching with configurable operators.
408
+ Choose between OR and AND operators to control how search terms are combined for precise text matching.
417
409
 
418
410
  Parameters
419
411
  ----------
420
412
  query : str
413
+ Search terms to find in your content
421
414
 
422
415
  tenant_id : str
416
+ Unique identifier for the tenant/organization
423
417
 
424
418
  sub_tenant_id : typing.Optional[str]
419
+ Optional sub-tenant identifier for organizing data within a tenant. If not provided, defaults to tenant_id
425
420
 
426
421
  operator : typing.Optional[Bm25OperatorType]
422
+ How to combine search terms (OR or AND)
427
423
 
428
424
  max_chunks : typing.Optional[int]
425
+ Maximum number of results to return
429
426
 
430
427
  request_options : typing.Optional[RequestOptions]
431
428
  Request-specific configuration.
@@ -571,41 +568,60 @@ class AsyncRawSearchClient:
571
568
  request_options: typing.Optional[RequestOptions] = None,
572
569
  ) -> AsyncHttpResponse[typing.Optional[typing.Any]]:
573
570
  """
571
+ Ask a question to your uploaded knowledge base and let Cortex AI answer it.
572
+
574
573
  Parameters
575
574
  ----------
576
575
  question : str
576
+ The question to be answered
577
577
 
578
578
  session_id : str
579
+ Unique identifier for the conversation session. Keep it same when the current question refers to a previous answer or question
579
580
 
580
581
  tenant_id : str
582
+ Identifier for the tenant/organization
581
583
 
582
584
  context_list : typing.Optional[typing.Sequence[str]]
585
+ List of context strings to provide additional information
583
586
 
584
587
  search_modes : typing.Optional[typing.Sequence[str]]
588
+ List of search modes to use for finding relevant information
585
589
 
586
590
  sub_tenant_id : typing.Optional[str]
591
+ Identifier for sub-tenant within the tenant
587
592
 
588
593
  highlight_chunks : typing.Optional[bool]
594
+ Whether to return text chunks in the response along with final LLM generated answer
589
595
 
590
596
  stream : typing.Optional[bool]
597
+ Whether to stream the response
591
598
 
592
599
  search_alpha : typing.Optional[float]
600
+ Closer to 0.0 means a exact keyword search will be performed, closer to 1.0 means semantics of the search will be considered. In most cases, you wont have to toggle it yourself.
593
601
 
594
602
  recency_bias : typing.Optional[float]
603
+ Bias towards more recent information (0.0 to 1.0)
595
604
 
596
605
  ai_generation : typing.Optional[bool]
606
+ Whether to use AI for generating responses
597
607
 
598
608
  top_n : typing.Optional[int]
609
+ Number of top results to return
599
610
 
600
611
  user_name : typing.Optional[str]
612
+ Name of the user making the request. This helps LLM to know the user's name if semantics around the username are involved in query. Its generally a good practice to include it possible.
601
613
 
602
614
  user_instructions : typing.Optional[str]
615
+ Custom instructions for the AI response to add to our proprietary prompt. This can be used to provide additional context or instructions for the LLM to follow so that the answers are tailored towards your application style
603
616
 
604
617
  multi_step_reasoning : typing.Optional[bool]
618
+ Enable advanced multi-step reasoning for complex queries. When enabled, the AI will automatically break down complex questions into multiple research steps to provide more comprehensive and accurate answers.
605
619
 
606
620
  auto_agent_routing : typing.Optional[bool]
621
+ Enable intelligent agent routing to automatically select the most suitable AI agent for your specific query type. Different agents are optimized for various use cases like social media, code, conversations, general knowledge, etc.
607
622
 
608
623
  metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
624
+ Additional metadata for the request
609
625
 
610
626
  request_options : typing.Optional[RequestOptions]
611
627
  Request-specific configuration.
@@ -746,45 +762,33 @@ class AsyncRawSearchClient:
746
762
  max_chunks: typing.Optional[int] = OMIT,
747
763
  alpha: typing.Optional[Alpha] = OMIT,
748
764
  recency_bias: typing.Optional[float] = OMIT,
749
- num_related_chunks: typing.Optional[int] = OMIT,
750
765
  request_options: typing.Optional[RequestOptions] = None,
751
766
  ) -> AsyncHttpResponse[typing.List[SearchChunk]]:
752
767
  """
753
- Search for content within indexed sources using semantic and keyword search capabilities.
754
-
755
- This endpoint performs a search query against the Findr backend, allowing users to retrieve
756
- relevant content chunks from their indexed documents and sources. The search can be customized
757
- with various parameters to control the number of results and search behavior.
758
-
759
- Args:
760
- request (SearchRequest): The search request containing:
761
- - query (str): Search query string to find relevant content
762
- - tenant_id (str, optional): Tenant identifier for multi-tenancy
763
- - sub_tenant_id (str, optional): Sub-tenant identifier, defaults to tenant_id
764
- - max_chunks (int, optional): Maximum number of content chunks to return
765
- - alpha (Union[float, str], optional): Search algorithm parameter for result ranking (default: 0.8). Can be float-type (0.0-1.0) or 'auto' for dynamic selection
766
- - recency_bias (float, optional): Bias towards more recent content (default: 0.5)
767
- - num_related_chunks (int, optional): Number of related chunks to return (default: 0)
768
- api_details (dict): Authentication details obtained from API key validation
769
-
770
- Returns:
771
- SearchData: Success response with search results
768
+ Search for relevant content within your indexed sources.
769
+
770
+ This API returns the chunks related to the query you make. We use neural (embedding) search to give you the most relevant chunks.
771
+ Results are ranked by relevance and can be customized with parameters like result limits and recency preferences.
772
772
 
773
773
  Parameters
774
774
  ----------
775
775
  query : str
776
+ Search terms to find relevant content
776
777
 
777
778
  tenant_id : str
779
+ Unique identifier for the tenant/organization
778
780
 
779
781
  sub_tenant_id : typing.Optional[str]
782
+ Optional sub-tenant identifier for organizing data within a tenant. If not provided, defaults to tenant_id
780
783
 
781
784
  max_chunks : typing.Optional[int]
785
+ Maximum number of results to return
782
786
 
783
787
  alpha : typing.Optional[Alpha]
788
+ Search ranking algorithm parameter (0.0-1.0 or 'auto')
784
789
 
785
790
  recency_bias : typing.Optional[float]
786
-
787
- num_related_chunks : typing.Optional[int]
791
+ Preference for newer content (0.0 = no bias, 1.0 = strong recency preference)
788
792
 
789
793
  request_options : typing.Optional[RequestOptions]
790
794
  Request-specific configuration.
@@ -804,7 +808,6 @@ class AsyncRawSearchClient:
804
808
  "max_chunks": max_chunks,
805
809
  "alpha": convert_and_respect_annotation_metadata(object_=alpha, annotation=Alpha, direction="write"),
806
810
  "recency_bias": recency_bias,
807
- "num_related_chunks": num_related_chunks,
808
811
  },
809
812
  headers={
810
813
  "content-type": "application/json",
@@ -915,36 +918,27 @@ class AsyncRawSearchClient:
915
918
  request_options: typing.Optional[RequestOptions] = None,
916
919
  ) -> AsyncHttpResponse[typing.List[SearchChunk]]:
917
920
  """
918
- Full text search endpoint for Cortex customers.
919
- Performs full text search with configurable operators for precise text matching against the Findr backend.
920
-
921
- This endpoint performs a full text search query against the Findr backend, allowing users to retrieve
922
- relevant content chunks from their indexed documents and sources using BM25-based text matching.
923
- The search can be customized with various operators to control the matching behavior.
924
-
925
- Args:
926
- request (FullTextSearchRequest): The full text search request containing:
927
- - query (str): Search query string to find relevant content
928
- - tenant_id (str): Tenant identifier for multi-tenancy
929
- - sub_tenant_id (str, optional): Sub-tenant identifier, defaults to tenant_id
930
- - operator (BM25OperatorType, optional): Full text search operator type (OR or AND). Defaults to OR
931
- - max_chunks (int, optional): Maximum number of content chunks to return (1-1001, defaults to 25)
932
- api_details (dict): Authentication details obtained from API key validation
921
+ Perform full text search for exact matches within your indexed sources.
933
922
 
934
- Returns:
935
- FullTextSearchData: Success response with full text search results
923
+ Use this endpoint to find content chunks using BM25-based text matching with configurable operators.
924
+ Choose between OR and AND operators to control how search terms are combined for precise text matching.
936
925
 
937
926
  Parameters
938
927
  ----------
939
928
  query : str
929
+ Search terms to find in your content
940
930
 
941
931
  tenant_id : str
932
+ Unique identifier for the tenant/organization
942
933
 
943
934
  sub_tenant_id : typing.Optional[str]
935
+ Optional sub-tenant identifier for organizing data within a tenant. If not provided, defaults to tenant_id
944
936
 
945
937
  operator : typing.Optional[Bm25OperatorType]
938
+ How to combine search terms (OR or AND)
946
939
 
947
940
  max_chunks : typing.Optional[int]
941
+ Maximum number of results to return
948
942
 
949
943
  request_options : typing.Optional[RequestOptions]
950
944
  Request-specific configuration.
@@ -5,7 +5,6 @@ import typing
5
5
  from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
6
6
  from ..core.request_options import RequestOptions
7
7
  from ..types.list_sources_response import ListSourcesResponse
8
- from ..types.sub_tenant_ids_data import SubTenantIdsData
9
8
  from .raw_client import AsyncRawSourcesClient, RawSourcesClient
10
9
 
11
10
  # this is used as the default value for optional parameters
@@ -35,11 +34,19 @@ class SourcesClient:
35
34
  request_options: typing.Optional[RequestOptions] = None,
36
35
  ) -> ListSourcesResponse:
37
36
  """
37
+ Retrieve all sources for a specific tenant.
38
+
39
+ Use this endpoint to fetch a complete list of all sources associated with your tenant. This includes documents, files, and other content you've uploaded for processing.
40
+
41
+ You can optionally specify a sub-tenant to narrow down the results to sources within that specific sub-tenant scope.
42
+
38
43
  Parameters
39
44
  ----------
40
45
  tenant_id : str
46
+ Unique identifier for the tenant/organization
41
47
 
42
48
  sub_tenant_id : typing.Optional[str]
49
+ Optional sub-tenant identifier for organizing data within a tenant. If not provided, defaults to tenant_id
43
50
 
44
51
  request_options : typing.Optional[RequestOptions]
45
52
  Request-specific configuration.
@@ -69,6 +76,12 @@ class SourcesClient:
69
76
  request_options: typing.Optional[RequestOptions] = None,
70
77
  ) -> ListSourcesResponse:
71
78
  """
79
+ Retrieve specific sources by their IDs.
80
+
81
+ Use this endpoint to fetch one or more sources by providing their unique identifiers. This is useful when you need detailed information about specific documents or content you've previously uploaded.
82
+
83
+ Provide the source IDs in the request body along with your tenant information to get the exact sources you need.
84
+
72
85
  Parameters
73
86
  ----------
74
87
  tenant_id : str
@@ -95,45 +108,6 @@ class SourcesClient:
95
108
  )
96
109
  return _response.data
97
110
 
98
- def get_sub_tenant_ids(
99
- self, *, tenant_id: str, request_options: typing.Optional[RequestOptions] = None
100
- ) -> SubTenantIdsData:
101
- """
102
- Get all sub-tenant IDs (tenant IDs) contained within a specific Weaviate collection.
103
- Fetches the tenant IDs directly from Weaviate using default cluster credentials.
104
-
105
- Args:
106
- tenant_id: The tenant ID to fetch sub-tenant IDs for
107
- api_details: Authentication dependency
108
-
109
- Returns:
110
- SubTenantIdsData: Contains collection_name, sub_tenant_ids list, count, and success message
111
-
112
- Example:
113
- GET /list/sub_tenant_ids?tenant_id=my_tenant_123
114
-
115
- Parameters
116
- ----------
117
- tenant_id : str
118
-
119
- request_options : typing.Optional[RequestOptions]
120
- Request-specific configuration.
121
-
122
- Returns
123
- -------
124
- SubTenantIdsData
125
- Successful Response
126
-
127
- Examples
128
- --------
129
- from usecortex-ai import CortexAI
130
-
131
- client = CortexAI(token="YOUR_TOKEN", )
132
- client.sources.get_sub_tenant_ids(tenant_id='tenant_id', )
133
- """
134
- _response = self._raw_client.get_sub_tenant_ids(tenant_id=tenant_id, request_options=request_options)
135
- return _response.data
136
-
137
111
 
138
112
  class AsyncSourcesClient:
139
113
  def __init__(self, *, client_wrapper: AsyncClientWrapper):
@@ -158,11 +132,19 @@ class AsyncSourcesClient:
158
132
  request_options: typing.Optional[RequestOptions] = None,
159
133
  ) -> ListSourcesResponse:
160
134
  """
135
+ Retrieve all sources for a specific tenant.
136
+
137
+ Use this endpoint to fetch a complete list of all sources associated with your tenant. This includes documents, files, and other content you've uploaded for processing.
138
+
139
+ You can optionally specify a sub-tenant to narrow down the results to sources within that specific sub-tenant scope.
140
+
161
141
  Parameters
162
142
  ----------
163
143
  tenant_id : str
144
+ Unique identifier for the tenant/organization
164
145
 
165
146
  sub_tenant_id : typing.Optional[str]
147
+ Optional sub-tenant identifier for organizing data within a tenant. If not provided, defaults to tenant_id
166
148
 
167
149
  request_options : typing.Optional[RequestOptions]
168
150
  Request-specific configuration.
@@ -196,6 +178,12 @@ class AsyncSourcesClient:
196
178
  request_options: typing.Optional[RequestOptions] = None,
197
179
  ) -> ListSourcesResponse:
198
180
  """
181
+ Retrieve specific sources by their IDs.
182
+
183
+ Use this endpoint to fetch one or more sources by providing their unique identifiers. This is useful when you need detailed information about specific documents or content you've previously uploaded.
184
+
185
+ Provide the source IDs in the request body along with your tenant information to get the exact sources you need.
186
+
199
187
  Parameters
200
188
  ----------
201
189
  tenant_id : str
@@ -225,46 +213,3 @@ class AsyncSourcesClient:
225
213
  tenant_id=tenant_id, source_ids=source_ids, request_options=request_options
226
214
  )
227
215
  return _response.data
228
-
229
- async def get_sub_tenant_ids(
230
- self, *, tenant_id: str, request_options: typing.Optional[RequestOptions] = None
231
- ) -> SubTenantIdsData:
232
- """
233
- Get all sub-tenant IDs (tenant IDs) contained within a specific Weaviate collection.
234
- Fetches the tenant IDs directly from Weaviate using default cluster credentials.
235
-
236
- Args:
237
- tenant_id: The tenant ID to fetch sub-tenant IDs for
238
- api_details: Authentication dependency
239
-
240
- Returns:
241
- SubTenantIdsData: Contains collection_name, sub_tenant_ids list, count, and success message
242
-
243
- Example:
244
- GET /list/sub_tenant_ids?tenant_id=my_tenant_123
245
-
246
- Parameters
247
- ----------
248
- tenant_id : str
249
-
250
- request_options : typing.Optional[RequestOptions]
251
- Request-specific configuration.
252
-
253
- Returns
254
- -------
255
- SubTenantIdsData
256
- Successful Response
257
-
258
- Examples
259
- --------
260
- import asyncio
261
-
262
- from usecortex-ai import AsyncCortexAI
263
-
264
- client = AsyncCortexAI(token="YOUR_TOKEN", )
265
- async def main() -> None:
266
- await client.sources.get_sub_tenant_ids(tenant_id='tenant_id', )
267
- asyncio.run(main())
268
- """
269
- _response = await self._raw_client.get_sub_tenant_ids(tenant_id=tenant_id, request_options=request_options)
270
- return _response.data