usecortex-ai 0.2.1__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. usecortex_ai/__init__.py +18 -3
  2. usecortex_ai/client.py +3 -0
  3. usecortex_ai/document/__init__.py +4 -0
  4. usecortex_ai/document/client.py +139 -0
  5. usecortex_ai/document/raw_client.py +312 -0
  6. usecortex_ai/embeddings/client.py +48 -78
  7. usecortex_ai/embeddings/raw_client.py +44 -74
  8. usecortex_ai/fetch/client.py +2 -2
  9. usecortex_ai/search/client.py +88 -84
  10. usecortex_ai/search/raw_client.py +82 -78
  11. usecortex_ai/sources/client.py +26 -157
  12. usecortex_ai/sources/raw_client.py +22 -501
  13. usecortex_ai/tenant/client.py +154 -6
  14. usecortex_ai/tenant/raw_client.py +502 -4
  15. usecortex_ai/types/__init__.py +16 -2
  16. usecortex_ai/types/add_user_memory_response.py +36 -0
  17. usecortex_ai/types/app_sources_upload_data.py +10 -2
  18. usecortex_ai/types/attachment_model.py +34 -7
  19. usecortex_ai/types/batch_upload_data.py +10 -2
  20. usecortex_ai/types/body_scrape_webpage_upload_scrape_webpage_post.py +0 -2
  21. usecortex_ai/types/body_update_scrape_job_upload_update_webpage_patch.py +0 -2
  22. usecortex_ai/types/content_model.py +33 -6
  23. usecortex_ai/types/delete_memory_request.py +14 -3
  24. usecortex_ai/types/delete_sources.py +20 -0
  25. usecortex_ai/types/delete_sub_tenant_data.py +42 -0
  26. usecortex_ai/types/delete_user_memory_response.py +31 -0
  27. usecortex_ai/types/embeddings_create_collection_data.py +19 -4
  28. usecortex_ai/types/embeddings_delete_data.py +19 -4
  29. usecortex_ai/types/embeddings_get_data.py +19 -4
  30. usecortex_ai/types/embeddings_search_data.py +19 -4
  31. usecortex_ai/types/error_response.py +0 -1
  32. usecortex_ai/types/fetch_content_data.py +19 -5
  33. usecortex_ai/types/file_upload_result.py +9 -2
  34. usecortex_ai/types/generate_user_memory_response.py +32 -0
  35. usecortex_ai/types/list_sources_response.py +14 -3
  36. usecortex_ai/types/list_user_memories_response.py +32 -0
  37. usecortex_ai/types/markdown_upload_request.py +23 -5
  38. usecortex_ai/types/processing_status.py +14 -3
  39. usecortex_ai/types/relations.py +9 -2
  40. usecortex_ai/types/retrieve_user_memory_response.py +32 -0
  41. usecortex_ai/types/search_chunk.py +54 -17
  42. usecortex_ai/types/single_upload_data.py +10 -2
  43. usecortex_ai/types/source.py +34 -15
  44. usecortex_ai/types/source_model.py +63 -14
  45. usecortex_ai/types/sub_tenant_ids_data.py +29 -5
  46. usecortex_ai/types/tenant_create_data.py +19 -4
  47. usecortex_ai/types/tenant_stats.py +24 -5
  48. usecortex_ai/types/user_memory.py +31 -0
  49. usecortex_ai/upload/client.py +486 -111
  50. usecortex_ai/upload/raw_client.py +458 -103
  51. usecortex_ai/user/client.py +30 -10
  52. usecortex_ai/user/raw_client.py +22 -6
  53. usecortex_ai/user_memory/client.py +200 -56
  54. usecortex_ai/user_memory/raw_client.py +921 -94
  55. {usecortex_ai-0.2.1.dist-info → usecortex_ai-0.3.0.dist-info}/METADATA +1 -1
  56. usecortex_ai-0.3.0.dist-info/RECORD +101 -0
  57. usecortex_ai/types/source_content.py +0 -26
  58. usecortex_ai-0.2.1.dist-info/RECORD +0 -91
  59. {usecortex_ai-0.2.1.dist-info → usecortex_ai-0.3.0.dist-info}/WHEEL +0 -0
  60. {usecortex_ai-0.2.1.dist-info → usecortex_ai-0.3.0.dist-info}/licenses/LICENSE +0 -0
  61. {usecortex_ai-0.2.1.dist-info → usecortex_ai-0.3.0.dist-info}/top_level.txt +0 -0
@@ -52,41 +52,60 @@ class RawSearchClient:
52
52
  request_options: typing.Optional[RequestOptions] = None,
53
53
  ) -> HttpResponse[typing.Optional[typing.Any]]:
54
54
  """
55
+ Ask a question to your uploaded knowledge base and let Cortex AI answer it.
56
+
55
57
  Parameters
56
58
  ----------
57
59
  question : str
60
+ The question to be answered
58
61
 
59
62
  session_id : str
63
+ Unique identifier for the conversation session. Keep it same when the current question refers to a previous answer or question
60
64
 
61
65
  tenant_id : str
66
+ Identifier for the tenant/organization
62
67
 
63
68
  context_list : typing.Optional[typing.Sequence[str]]
69
+ List of context strings to provide additional information
64
70
 
65
71
  search_modes : typing.Optional[typing.Sequence[str]]
72
+ List of search modes to use for finding relevant information
66
73
 
67
74
  sub_tenant_id : typing.Optional[str]
75
+ Identifier for sub-tenant within the tenant
68
76
 
69
77
  highlight_chunks : typing.Optional[bool]
78
+ Whether to return text chunks in the response along with final LLM generated answer
70
79
 
71
80
  stream : typing.Optional[bool]
81
+ Whether to stream the response
72
82
 
73
83
  search_alpha : typing.Optional[float]
84
+ Closer to 0.0 means a exact keyword search will be performed, closer to 1.0 means semantics of the search will be considered. In most cases, you wont have to toggle it yourself.
74
85
 
75
86
  recency_bias : typing.Optional[float]
87
+ Bias towards more recent information (0.0 to 1.0)
76
88
 
77
89
  ai_generation : typing.Optional[bool]
90
+ Whether to use AI for generating responses
78
91
 
79
92
  top_n : typing.Optional[int]
93
+ Number of top results to return
80
94
 
81
95
  user_name : typing.Optional[str]
96
+ Name of the user making the request. This helps LLM to know the user's name if semantics around the username are involved in query. Its generally a good practice to include it possible.
82
97
 
83
98
  user_instructions : typing.Optional[str]
99
+ Custom instructions for the AI response to add to our proprietary prompt. This can be used to provide additional context or instructions for the LLM to follow so that the answers are tailored towards your application style
84
100
 
85
101
  multi_step_reasoning : typing.Optional[bool]
102
+ Enable advanced multi-step reasoning for complex queries. When enabled, the AI will automatically break down complex questions into multiple research steps to provide more comprehensive and accurate answers.
86
103
 
87
104
  auto_agent_routing : typing.Optional[bool]
105
+ Enable intelligent agent routing to automatically select the most suitable AI agent for your specific query type. Different agents are optimized for various use cases like social media, code, conversations, general knowledge, etc.
88
106
 
89
107
  metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
108
+ Additional metadata for the request
90
109
 
91
110
  request_options : typing.Optional[RequestOptions]
92
111
  Request-specific configuration.
@@ -227,45 +246,37 @@ class RawSearchClient:
227
246
  max_chunks: typing.Optional[int] = OMIT,
228
247
  alpha: typing.Optional[Alpha] = OMIT,
229
248
  recency_bias: typing.Optional[float] = OMIT,
230
- num_related_chunks: typing.Optional[int] = OMIT,
249
+ personalise_search: typing.Optional[bool] = OMIT,
231
250
  request_options: typing.Optional[RequestOptions] = None,
232
251
  ) -> HttpResponse[typing.List[SearchChunk]]:
233
252
  """
234
- Search for content within indexed sources using semantic and keyword search capabilities.
235
-
236
- This endpoint performs a search query against the Findr backend, allowing users to retrieve
237
- relevant content chunks from their indexed documents and sources. The search can be customized
238
- with various parameters to control the number of results and search behavior.
239
-
240
- Args:
241
- request (SearchRequest): The search request containing:
242
- - query (str): Search query string to find relevant content
243
- - tenant_id (str, optional): Tenant identifier for multi-tenancy
244
- - sub_tenant_id (str, optional): Sub-tenant identifier, defaults to tenant_id
245
- - max_chunks (int, optional): Maximum number of content chunks to return
246
- - alpha (Union[float, str], optional): Search algorithm parameter for result ranking (default: 0.8). Can be float-type (0.0-1.0) or 'auto' for dynamic selection
247
- - recency_bias (float, optional): Bias towards more recent content (default: 0.5)
248
- - num_related_chunks (int, optional): Number of related chunks to return (default: 0)
249
- api_details (dict): Authentication details obtained from API key validation
250
-
251
- Returns:
252
- SearchData: Success response with search results
253
+ Search for relevant content within your indexed sources.
254
+
255
+ This API returns the chunks related to the query you make. We use neural (embedding) search to give you the most relevant chunks.
256
+ Results are ranked by relevance and can be customized with parameters like result limits and recency preferences.
253
257
 
254
258
  Parameters
255
259
  ----------
256
260
  query : str
261
+ Search terms to find relevant content
257
262
 
258
263
  tenant_id : str
264
+ Unique identifier for the tenant/organization
259
265
 
260
266
  sub_tenant_id : typing.Optional[str]
267
+ Optional sub-tenant identifier used to organize data within a tenant. If omitted, the default sub-tenant created during tenant setup will be used.
261
268
 
262
269
  max_chunks : typing.Optional[int]
270
+ Maximum number of results to return
263
271
 
264
272
  alpha : typing.Optional[Alpha]
273
+ Search ranking algorithm parameter (0.0-1.0 or 'auto')
265
274
 
266
275
  recency_bias : typing.Optional[float]
276
+ Preference for newer content (0.0 = no bias, 1.0 = strong recency preference)
267
277
 
268
- num_related_chunks : typing.Optional[int]
278
+ personalise_search : typing.Optional[bool]
279
+ Enable personalized search results based on user preferences
269
280
 
270
281
  request_options : typing.Optional[RequestOptions]
271
282
  Request-specific configuration.
@@ -285,7 +296,7 @@ class RawSearchClient:
285
296
  "max_chunks": max_chunks,
286
297
  "alpha": convert_and_respect_annotation_metadata(object_=alpha, annotation=Alpha, direction="write"),
287
298
  "recency_bias": recency_bias,
288
- "num_related_chunks": num_related_chunks,
299
+ "personalise_search": personalise_search,
289
300
  },
290
301
  headers={
291
302
  "content-type": "application/json",
@@ -396,36 +407,27 @@ class RawSearchClient:
396
407
  request_options: typing.Optional[RequestOptions] = None,
397
408
  ) -> HttpResponse[typing.List[SearchChunk]]:
398
409
  """
399
- Full text search endpoint for Cortex customers.
400
- Performs full text search with configurable operators for precise text matching against the Findr backend.
401
-
402
- This endpoint performs a full text search query against the Findr backend, allowing users to retrieve
403
- relevant content chunks from their indexed documents and sources using BM25-based text matching.
404
- The search can be customized with various operators to control the matching behavior.
405
-
406
- Args:
407
- request (FullTextSearchRequest): The full text search request containing:
408
- - query (str): Search query string to find relevant content
409
- - tenant_id (str): Tenant identifier for multi-tenancy
410
- - sub_tenant_id (str, optional): Sub-tenant identifier, defaults to tenant_id
411
- - operator (BM25OperatorType, optional): Full text search operator type (OR or AND). Defaults to OR
412
- - max_chunks (int, optional): Maximum number of content chunks to return (1-1001, defaults to 25)
413
- api_details (dict): Authentication details obtained from API key validation
410
+ Perform full text search for exact matches within your indexed sources.
414
411
 
415
- Returns:
416
- FullTextSearchData: Success response with full text search results
412
+ Use this endpoint to find content chunks using BM25-based text matching with configurable operators.
413
+ Choose between 'OR' and 'AND' operators to control how search terms are combined for precise text matching.
417
414
 
418
415
  Parameters
419
416
  ----------
420
417
  query : str
418
+ Search terms to find in your content
421
419
 
422
420
  tenant_id : str
421
+ Unique identifier for the tenant/organization
423
422
 
424
423
  sub_tenant_id : typing.Optional[str]
424
+ Optional sub-tenant identifier used to organize data within a tenant. If omitted, the default sub-tenant created during tenant setup will be used.
425
425
 
426
426
  operator : typing.Optional[Bm25OperatorType]
427
+ How to combine search terms (OR or AND)
427
428
 
428
429
  max_chunks : typing.Optional[int]
430
+ Maximum number of results to return
429
431
 
430
432
  request_options : typing.Optional[RequestOptions]
431
433
  Request-specific configuration.
@@ -571,41 +573,60 @@ class AsyncRawSearchClient:
571
573
  request_options: typing.Optional[RequestOptions] = None,
572
574
  ) -> AsyncHttpResponse[typing.Optional[typing.Any]]:
573
575
  """
576
+ Ask a question to your uploaded knowledge base and let Cortex AI answer it.
577
+
574
578
  Parameters
575
579
  ----------
576
580
  question : str
581
+ The question to be answered
577
582
 
578
583
  session_id : str
584
+ Unique identifier for the conversation session. Keep it same when the current question refers to a previous answer or question
579
585
 
580
586
  tenant_id : str
587
+ Identifier for the tenant/organization
581
588
 
582
589
  context_list : typing.Optional[typing.Sequence[str]]
590
+ List of context strings to provide additional information
583
591
 
584
592
  search_modes : typing.Optional[typing.Sequence[str]]
593
+ List of search modes to use for finding relevant information
585
594
 
586
595
  sub_tenant_id : typing.Optional[str]
596
+ Identifier for sub-tenant within the tenant
587
597
 
588
598
  highlight_chunks : typing.Optional[bool]
599
+ Whether to return text chunks in the response along with final LLM generated answer
589
600
 
590
601
  stream : typing.Optional[bool]
602
+ Whether to stream the response
591
603
 
592
604
  search_alpha : typing.Optional[float]
605
+ Closer to 0.0 means a exact keyword search will be performed, closer to 1.0 means semantics of the search will be considered. In most cases, you wont have to toggle it yourself.
593
606
 
594
607
  recency_bias : typing.Optional[float]
608
+ Bias towards more recent information (0.0 to 1.0)
595
609
 
596
610
  ai_generation : typing.Optional[bool]
611
+ Whether to use AI for generating responses
597
612
 
598
613
  top_n : typing.Optional[int]
614
+ Number of top results to return
599
615
 
600
616
  user_name : typing.Optional[str]
617
+ Name of the user making the request. This helps LLM to know the user's name if semantics around the username are involved in query. Its generally a good practice to include it possible.
601
618
 
602
619
  user_instructions : typing.Optional[str]
620
+ Custom instructions for the AI response to add to our proprietary prompt. This can be used to provide additional context or instructions for the LLM to follow so that the answers are tailored towards your application style
603
621
 
604
622
  multi_step_reasoning : typing.Optional[bool]
623
+ Enable advanced multi-step reasoning for complex queries. When enabled, the AI will automatically break down complex questions into multiple research steps to provide more comprehensive and accurate answers.
605
624
 
606
625
  auto_agent_routing : typing.Optional[bool]
626
+ Enable intelligent agent routing to automatically select the most suitable AI agent for your specific query type. Different agents are optimized for various use cases like social media, code, conversations, general knowledge, etc.
607
627
 
608
628
  metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
629
+ Additional metadata for the request
609
630
 
610
631
  request_options : typing.Optional[RequestOptions]
611
632
  Request-specific configuration.
@@ -746,45 +767,37 @@ class AsyncRawSearchClient:
746
767
  max_chunks: typing.Optional[int] = OMIT,
747
768
  alpha: typing.Optional[Alpha] = OMIT,
748
769
  recency_bias: typing.Optional[float] = OMIT,
749
- num_related_chunks: typing.Optional[int] = OMIT,
770
+ personalise_search: typing.Optional[bool] = OMIT,
750
771
  request_options: typing.Optional[RequestOptions] = None,
751
772
  ) -> AsyncHttpResponse[typing.List[SearchChunk]]:
752
773
  """
753
- Search for content within indexed sources using semantic and keyword search capabilities.
754
-
755
- This endpoint performs a search query against the Findr backend, allowing users to retrieve
756
- relevant content chunks from their indexed documents and sources. The search can be customized
757
- with various parameters to control the number of results and search behavior.
758
-
759
- Args:
760
- request (SearchRequest): The search request containing:
761
- - query (str): Search query string to find relevant content
762
- - tenant_id (str, optional): Tenant identifier for multi-tenancy
763
- - sub_tenant_id (str, optional): Sub-tenant identifier, defaults to tenant_id
764
- - max_chunks (int, optional): Maximum number of content chunks to return
765
- - alpha (Union[float, str], optional): Search algorithm parameter for result ranking (default: 0.8). Can be float-type (0.0-1.0) or 'auto' for dynamic selection
766
- - recency_bias (float, optional): Bias towards more recent content (default: 0.5)
767
- - num_related_chunks (int, optional): Number of related chunks to return (default: 0)
768
- api_details (dict): Authentication details obtained from API key validation
769
-
770
- Returns:
771
- SearchData: Success response with search results
774
+ Search for relevant content within your indexed sources.
775
+
776
+ This API returns the chunks related to the query you make. We use neural (embedding) search to give you the most relevant chunks.
777
+ Results are ranked by relevance and can be customized with parameters like result limits and recency preferences.
772
778
 
773
779
  Parameters
774
780
  ----------
775
781
  query : str
782
+ Search terms to find relevant content
776
783
 
777
784
  tenant_id : str
785
+ Unique identifier for the tenant/organization
778
786
 
779
787
  sub_tenant_id : typing.Optional[str]
788
+ Optional sub-tenant identifier used to organize data within a tenant. If omitted, the default sub-tenant created during tenant setup will be used.
780
789
 
781
790
  max_chunks : typing.Optional[int]
791
+ Maximum number of results to return
782
792
 
783
793
  alpha : typing.Optional[Alpha]
794
+ Search ranking algorithm parameter (0.0-1.0 or 'auto')
784
795
 
785
796
  recency_bias : typing.Optional[float]
797
+ Preference for newer content (0.0 = no bias, 1.0 = strong recency preference)
786
798
 
787
- num_related_chunks : typing.Optional[int]
799
+ personalise_search : typing.Optional[bool]
800
+ Enable personalized search results based on user preferences
788
801
 
789
802
  request_options : typing.Optional[RequestOptions]
790
803
  Request-specific configuration.
@@ -804,7 +817,7 @@ class AsyncRawSearchClient:
804
817
  "max_chunks": max_chunks,
805
818
  "alpha": convert_and_respect_annotation_metadata(object_=alpha, annotation=Alpha, direction="write"),
806
819
  "recency_bias": recency_bias,
807
- "num_related_chunks": num_related_chunks,
820
+ "personalise_search": personalise_search,
808
821
  },
809
822
  headers={
810
823
  "content-type": "application/json",
@@ -915,36 +928,27 @@ class AsyncRawSearchClient:
915
928
  request_options: typing.Optional[RequestOptions] = None,
916
929
  ) -> AsyncHttpResponse[typing.List[SearchChunk]]:
917
930
  """
918
- Full text search endpoint for Cortex customers.
919
- Performs full text search with configurable operators for precise text matching against the Findr backend.
920
-
921
- This endpoint performs a full text search query against the Findr backend, allowing users to retrieve
922
- relevant content chunks from their indexed documents and sources using BM25-based text matching.
923
- The search can be customized with various operators to control the matching behavior.
924
-
925
- Args:
926
- request (FullTextSearchRequest): The full text search request containing:
927
- - query (str): Search query string to find relevant content
928
- - tenant_id (str): Tenant identifier for multi-tenancy
929
- - sub_tenant_id (str, optional): Sub-tenant identifier, defaults to tenant_id
930
- - operator (BM25OperatorType, optional): Full text search operator type (OR or AND). Defaults to OR
931
- - max_chunks (int, optional): Maximum number of content chunks to return (1-1001, defaults to 25)
932
- api_details (dict): Authentication details obtained from API key validation
931
+ Perform full text search for exact matches within your indexed sources.
933
932
 
934
- Returns:
935
- FullTextSearchData: Success response with full text search results
933
+ Use this endpoint to find content chunks using BM25-based text matching with configurable operators.
934
+ Choose between 'OR' and 'AND' operators to control how search terms are combined for precise text matching.
936
935
 
937
936
  Parameters
938
937
  ----------
939
938
  query : str
939
+ Search terms to find in your content
940
940
 
941
941
  tenant_id : str
942
+ Unique identifier for the tenant/organization
942
943
 
943
944
  sub_tenant_id : typing.Optional[str]
945
+ Optional sub-tenant identifier used to organize data within a tenant. If omitted, the default sub-tenant created during tenant setup will be used.
944
946
 
945
947
  operator : typing.Optional[Bm25OperatorType]
948
+ How to combine search terms (OR or AND)
946
949
 
947
950
  max_chunks : typing.Optional[int]
951
+ Maximum number of results to return
948
952
 
949
953
  request_options : typing.Optional[RequestOptions]
950
954
  Request-specific configuration.
@@ -5,7 +5,6 @@ import typing
5
5
  from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
6
6
  from ..core.request_options import RequestOptions
7
7
  from ..types.list_sources_response import ListSourcesResponse
8
- from ..types.sub_tenant_ids_data import SubTenantIdsData
9
8
  from .raw_client import AsyncRawSourcesClient, RawSourcesClient
10
9
 
11
10
  # this is used as the default value for optional parameters
@@ -31,49 +30,27 @@ class SourcesClient:
31
30
  self,
32
31
  *,
33
32
  tenant_id: str,
34
- sub_tenant_id: typing.Optional[str] = None,
33
+ source_ids: typing.Sequence[str],
34
+ sub_tenant_id: typing.Optional[str] = OMIT,
35
35
  request_options: typing.Optional[RequestOptions] = None,
36
36
  ) -> ListSourcesResponse:
37
37
  """
38
- Parameters
39
- ----------
40
- tenant_id : str
38
+ Retrieve specific sources by their IDs.
41
39
 
42
- sub_tenant_id : typing.Optional[str]
43
-
44
- request_options : typing.Optional[RequestOptions]
45
- Request-specific configuration.
46
-
47
- Returns
48
- -------
49
- ListSourcesResponse
50
- Successful Response
51
-
52
- Examples
53
- --------
54
- from usecortex-ai import CortexAI
40
+ Use this endpoint to fetch one or more sources by providing their unique identifiers. This is useful when you need detailed information about specific documents or content you've previously uploaded.
55
41
 
56
- client = CortexAI(token="YOUR_TOKEN", )
57
- client.sources.get_all(tenant_id='tenant_id', )
58
- """
59
- _response = self._raw_client.get_all(
60
- tenant_id=tenant_id, sub_tenant_id=sub_tenant_id, request_options=request_options
61
- )
62
- return _response.data
42
+ Provide the source IDs in the request body along with your tenant information to get the exact sources you need.
63
43
 
64
- def get_by_ids(
65
- self,
66
- *,
67
- tenant_id: str,
68
- source_ids: typing.Sequence[str],
69
- request_options: typing.Optional[RequestOptions] = None,
70
- ) -> ListSourcesResponse:
71
- """
72
44
  Parameters
73
45
  ----------
74
46
  tenant_id : str
47
+ Unique identifier for the tenant/organization
75
48
 
76
49
  source_ids : typing.Sequence[str]
50
+ List of source IDs to fetch
51
+
52
+ sub_tenant_id : typing.Optional[str]
53
+ Optional sub-tenant identifier used to organize data within a tenant. If omitted, the default sub-tenant created during tenant setup will be used.
77
54
 
78
55
  request_options : typing.Optional[RequestOptions]
79
56
  Request-specific configuration.
@@ -88,52 +65,13 @@ class SourcesClient:
88
65
  from usecortex-ai import CortexAI
89
66
 
90
67
  client = CortexAI(token="YOUR_TOKEN", )
91
- client.sources.get_by_ids(tenant_id='tenant_id', source_ids=['source_ids'], )
68
+ client.sources.get_all(tenant_id='tenant_1234', source_ids=['CortexDoc1234', 'CortexDoc4567'], )
92
69
  """
93
- _response = self._raw_client.get_by_ids(
94
- tenant_id=tenant_id, source_ids=source_ids, request_options=request_options
70
+ _response = self._raw_client.get_all(
71
+ tenant_id=tenant_id, source_ids=source_ids, sub_tenant_id=sub_tenant_id, request_options=request_options
95
72
  )
96
73
  return _response.data
97
74
 
98
- def get_sub_tenant_ids(
99
- self, *, tenant_id: str, request_options: typing.Optional[RequestOptions] = None
100
- ) -> SubTenantIdsData:
101
- """
102
- Get all sub-tenant IDs (tenant IDs) contained within a specific Weaviate collection.
103
- Fetches the tenant IDs directly from Weaviate using default cluster credentials.
104
-
105
- Args:
106
- tenant_id: The tenant ID to fetch sub-tenant IDs for
107
- api_details: Authentication dependency
108
-
109
- Returns:
110
- SubTenantIdsData: Contains collection_name, sub_tenant_ids list, count, and success message
111
-
112
- Example:
113
- GET /list/sub_tenant_ids?tenant_id=my_tenant_123
114
-
115
- Parameters
116
- ----------
117
- tenant_id : str
118
-
119
- request_options : typing.Optional[RequestOptions]
120
- Request-specific configuration.
121
-
122
- Returns
123
- -------
124
- SubTenantIdsData
125
- Successful Response
126
-
127
- Examples
128
- --------
129
- from usecortex-ai import CortexAI
130
-
131
- client = CortexAI(token="YOUR_TOKEN", )
132
- client.sources.get_sub_tenant_ids(tenant_id='tenant_id', )
133
- """
134
- _response = self._raw_client.get_sub_tenant_ids(tenant_id=tenant_id, request_options=request_options)
135
- return _response.data
136
-
137
75
 
138
76
  class AsyncSourcesClient:
139
77
  def __init__(self, *, client_wrapper: AsyncClientWrapper):
@@ -154,53 +92,27 @@ class AsyncSourcesClient:
154
92
  self,
155
93
  *,
156
94
  tenant_id: str,
157
- sub_tenant_id: typing.Optional[str] = None,
95
+ source_ids: typing.Sequence[str],
96
+ sub_tenant_id: typing.Optional[str] = OMIT,
158
97
  request_options: typing.Optional[RequestOptions] = None,
159
98
  ) -> ListSourcesResponse:
160
99
  """
161
- Parameters
162
- ----------
163
- tenant_id : str
100
+ Retrieve specific sources by their IDs.
164
101
 
165
- sub_tenant_id : typing.Optional[str]
166
-
167
- request_options : typing.Optional[RequestOptions]
168
- Request-specific configuration.
169
-
170
- Returns
171
- -------
172
- ListSourcesResponse
173
- Successful Response
174
-
175
- Examples
176
- --------
177
- import asyncio
178
-
179
- from usecortex-ai import AsyncCortexAI
102
+ Use this endpoint to fetch one or more sources by providing their unique identifiers. This is useful when you need detailed information about specific documents or content you've previously uploaded.
180
103
 
181
- client = AsyncCortexAI(token="YOUR_TOKEN", )
182
- async def main() -> None:
183
- await client.sources.get_all(tenant_id='tenant_id', )
184
- asyncio.run(main())
185
- """
186
- _response = await self._raw_client.get_all(
187
- tenant_id=tenant_id, sub_tenant_id=sub_tenant_id, request_options=request_options
188
- )
189
- return _response.data
104
+ Provide the source IDs in the request body along with your tenant information to get the exact sources you need.
190
105
 
191
- async def get_by_ids(
192
- self,
193
- *,
194
- tenant_id: str,
195
- source_ids: typing.Sequence[str],
196
- request_options: typing.Optional[RequestOptions] = None,
197
- ) -> ListSourcesResponse:
198
- """
199
106
  Parameters
200
107
  ----------
201
108
  tenant_id : str
109
+ Unique identifier for the tenant/organization
202
110
 
203
111
  source_ids : typing.Sequence[str]
112
+ List of source IDs to fetch
113
+
114
+ sub_tenant_id : typing.Optional[str]
115
+ Optional sub-tenant identifier used to organize data within a tenant. If omitted, the default sub-tenant created during tenant setup will be used.
204
116
 
205
117
  request_options : typing.Optional[RequestOptions]
206
118
  Request-specific configuration.
@@ -218,53 +130,10 @@ class AsyncSourcesClient:
218
130
 
219
131
  client = AsyncCortexAI(token="YOUR_TOKEN", )
220
132
  async def main() -> None:
221
- await client.sources.get_by_ids(tenant_id='tenant_id', source_ids=['source_ids'], )
133
+ await client.sources.get_all(tenant_id='tenant_1234', source_ids=['CortexDoc1234', 'CortexDoc4567'], )
222
134
  asyncio.run(main())
223
135
  """
224
- _response = await self._raw_client.get_by_ids(
225
- tenant_id=tenant_id, source_ids=source_ids, request_options=request_options
136
+ _response = await self._raw_client.get_all(
137
+ tenant_id=tenant_id, source_ids=source_ids, sub_tenant_id=sub_tenant_id, request_options=request_options
226
138
  )
227
139
  return _response.data
228
-
229
- async def get_sub_tenant_ids(
230
- self, *, tenant_id: str, request_options: typing.Optional[RequestOptions] = None
231
- ) -> SubTenantIdsData:
232
- """
233
- Get all sub-tenant IDs (tenant IDs) contained within a specific Weaviate collection.
234
- Fetches the tenant IDs directly from Weaviate using default cluster credentials.
235
-
236
- Args:
237
- tenant_id: The tenant ID to fetch sub-tenant IDs for
238
- api_details: Authentication dependency
239
-
240
- Returns:
241
- SubTenantIdsData: Contains collection_name, sub_tenant_ids list, count, and success message
242
-
243
- Example:
244
- GET /list/sub_tenant_ids?tenant_id=my_tenant_123
245
-
246
- Parameters
247
- ----------
248
- tenant_id : str
249
-
250
- request_options : typing.Optional[RequestOptions]
251
- Request-specific configuration.
252
-
253
- Returns
254
- -------
255
- SubTenantIdsData
256
- Successful Response
257
-
258
- Examples
259
- --------
260
- import asyncio
261
-
262
- from usecortex-ai import AsyncCortexAI
263
-
264
- client = AsyncCortexAI(token="YOUR_TOKEN", )
265
- async def main() -> None:
266
- await client.sources.get_sub_tenant_ids(tenant_id='tenant_id', )
267
- asyncio.run(main())
268
- """
269
- _response = await self._raw_client.get_sub_tenant_ids(tenant_id=tenant_id, request_options=request_options)
270
- return _response.data