usecortex-ai 0.4.0__py3-none-any.whl → 0.5.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- usecortex_ai/__init__.py +10 -4
- usecortex_ai/client.py +0 -4
- usecortex_ai/dashboard/client.py +2 -30
- usecortex_ai/dashboard/raw_client.py +0 -28
- usecortex_ai/embeddings/client.py +8 -58
- usecortex_ai/embeddings/raw_client.py +8 -58
- usecortex_ai/fetch/__init__.py +3 -0
- usecortex_ai/fetch/client.py +42 -165
- usecortex_ai/fetch/raw_client.py +38 -341
- usecortex_ai/fetch/types/__init__.py +7 -0
- usecortex_ai/fetch/types/fetch_list_knowledge_response.py +8 -0
- usecortex_ai/raw_client.py +0 -4
- usecortex_ai/search/client.py +40 -108
- usecortex_ai/search/raw_client.py +40 -108
- usecortex_ai/sources/client.py +2 -16
- usecortex_ai/sources/raw_client.py +2 -16
- usecortex_ai/tenant/client.py +4 -108
- usecortex_ai/tenant/raw_client.py +2 -106
- usecortex_ai/types/__init__.py +8 -2
- usecortex_ai/types/forceful_relations_payload.py +27 -0
- usecortex_ai/types/list_content_kind.py +5 -0
- usecortex_ai/types/list_user_memories_response.py +32 -0
- usecortex_ai/types/memory_item.py +7 -1
- usecortex_ai/types/retrieval_result.py +4 -0
- usecortex_ai/types/retrieve_mode.py +1 -1
- usecortex_ai/types/user_memory.py +31 -0
- usecortex_ai/types/vector_store_chunk.py +5 -0
- usecortex_ai/upload/__init__.py +0 -3
- usecortex_ai/upload/client.py +34 -204
- usecortex_ai/upload/raw_client.py +30 -382
- {usecortex_ai-0.4.0.dist-info → usecortex_ai-0.5.1.dist-info}/METADATA +1 -1
- {usecortex_ai-0.4.0.dist-info → usecortex_ai-0.5.1.dist-info}/RECORD +35 -32
- {usecortex_ai-0.4.0.dist-info → usecortex_ai-0.5.1.dist-info}/WHEEL +1 -1
- usecortex_ai/types/app_sources_upload_data.py +0 -39
- usecortex_ai/upload/types/__init__.py +0 -7
- usecortex_ai/upload/types/body_upload_app_ingestion_upload_app_post_app_sources.py +0 -7
- {usecortex_ai-0.4.0.dist-info → usecortex_ai-0.5.1.dist-info}/licenses/LICENSE +0 -0
- {usecortex_ai-0.4.0.dist-info → usecortex_ai-0.5.1.dist-info}/top_level.txt +0 -0
|
@@ -40,31 +40,18 @@ class RawSearchClient:
|
|
|
40
40
|
tenant_id: str,
|
|
41
41
|
query: str,
|
|
42
42
|
sub_tenant_id: typing.Optional[str] = OMIT,
|
|
43
|
-
|
|
43
|
+
max_results: typing.Optional[int] = OMIT,
|
|
44
44
|
mode: typing.Optional[RetrieveMode] = OMIT,
|
|
45
45
|
alpha: typing.Optional[Alpha] = OMIT,
|
|
46
46
|
recency_bias: typing.Optional[float] = OMIT,
|
|
47
|
-
|
|
48
|
-
personalise_search: typing.Optional[bool] = OMIT,
|
|
47
|
+
personalise_recall: typing.Optional[bool] = OMIT,
|
|
49
48
|
graph_context: typing.Optional[bool] = OMIT,
|
|
50
|
-
|
|
49
|
+
additional_context: typing.Optional[str] = OMIT,
|
|
51
50
|
search_mode: typing.Optional[SearchMode] = OMIT,
|
|
51
|
+
metadata_filters: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
|
|
52
52
|
request_options: typing.Optional[RequestOptions] = None,
|
|
53
53
|
) -> HttpResponse[RetrievalResult]:
|
|
54
54
|
"""
|
|
55
|
-
Search for relevant content within your indexed sources or user memories.
|
|
56
|
-
|
|
57
|
-
Results are ranked by relevance and can be customized with parameters like
|
|
58
|
-
result limits, alpha weighting, and recency preferences.
|
|
59
|
-
|
|
60
|
-
Use `search_mode` to specify what to search:
|
|
61
|
-
- "sources" (default): Search over indexed documents
|
|
62
|
-
- "memories": Search over user memories (uses inferred content)
|
|
63
|
-
|
|
64
|
-
Use `mode` to control retrieval quality:
|
|
65
|
-
- "fast" (default): Single query, faster response
|
|
66
|
-
- "accurate": Multi-query generation with reranking, higher quality
|
|
67
|
-
|
|
68
55
|
Parameters
|
|
69
56
|
----------
|
|
70
57
|
tenant_id : str
|
|
@@ -76,7 +63,7 @@ class RawSearchClient:
|
|
|
76
63
|
sub_tenant_id : typing.Optional[str]
|
|
77
64
|
Optional sub-tenant identifier used to organize data within a tenant. If omitted, the default sub-tenant created during tenant setup will be used.
|
|
78
65
|
|
|
79
|
-
|
|
66
|
+
max_results : typing.Optional[int]
|
|
80
67
|
Maximum number of results to return
|
|
81
68
|
|
|
82
69
|
mode : typing.Optional[RetrieveMode]
|
|
@@ -88,21 +75,21 @@ class RawSearchClient:
|
|
|
88
75
|
recency_bias : typing.Optional[float]
|
|
89
76
|
Preference for newer content (0.0 = no bias, 1.0 = strong recency preference)
|
|
90
77
|
|
|
91
|
-
|
|
92
|
-
Number of related content chunks to include
|
|
93
|
-
|
|
94
|
-
personalise_search : typing.Optional[bool]
|
|
78
|
+
personalise_recall : typing.Optional[bool]
|
|
95
79
|
Enable personalized search results based on user preferences
|
|
96
80
|
|
|
97
81
|
graph_context : typing.Optional[bool]
|
|
98
82
|
Enable graph context for search results
|
|
99
83
|
|
|
100
|
-
|
|
84
|
+
additional_context : typing.Optional[str]
|
|
101
85
|
Additional context provided by the user to guide retrieval
|
|
102
86
|
|
|
103
87
|
search_mode : typing.Optional[SearchMode]
|
|
104
88
|
What to search: 'sources' for documents or 'memories' for user memories
|
|
105
89
|
|
|
90
|
+
metadata_filters : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
|
|
91
|
+
Optional key-value pairs to filter search results by tenant metadata fields. Keys must match fields defined in tenant_metadata_schema during tenant creation. Supports exact match filtering on indexed metadata fields. Example: {'category': 'engineering', 'priority': 'high'}
|
|
92
|
+
|
|
106
93
|
request_options : typing.Optional[RequestOptions]
|
|
107
94
|
Request-specific configuration.
|
|
108
95
|
|
|
@@ -112,21 +99,21 @@ class RawSearchClient:
|
|
|
112
99
|
Successful Response
|
|
113
100
|
"""
|
|
114
101
|
_response = self._client_wrapper.httpx_client.request(
|
|
115
|
-
"
|
|
102
|
+
"recall/full_recall",
|
|
116
103
|
method="POST",
|
|
117
104
|
json={
|
|
118
105
|
"tenant_id": tenant_id,
|
|
119
106
|
"sub_tenant_id": sub_tenant_id,
|
|
120
107
|
"query": query,
|
|
121
|
-
"
|
|
108
|
+
"max_results": max_results,
|
|
122
109
|
"mode": mode,
|
|
123
110
|
"alpha": convert_and_respect_annotation_metadata(object_=alpha, annotation=Alpha, direction="write"),
|
|
124
111
|
"recency_bias": recency_bias,
|
|
125
|
-
"
|
|
126
|
-
"personalise_search": personalise_search,
|
|
112
|
+
"personalise_recall": personalise_recall,
|
|
127
113
|
"graph_context": graph_context,
|
|
128
|
-
"
|
|
114
|
+
"additional_context": additional_context,
|
|
129
115
|
"search_mode": search_mode,
|
|
116
|
+
"metadata_filters": metadata_filters,
|
|
130
117
|
},
|
|
131
118
|
headers={
|
|
132
119
|
"content-type": "application/json",
|
|
@@ -244,19 +231,11 @@ class RawSearchClient:
|
|
|
244
231
|
query: str,
|
|
245
232
|
sub_tenant_id: typing.Optional[str] = OMIT,
|
|
246
233
|
operator: typing.Optional[Bm25OperatorType] = OMIT,
|
|
247
|
-
|
|
234
|
+
max_results: typing.Optional[int] = OMIT,
|
|
248
235
|
search_mode: typing.Optional[SearchMode] = OMIT,
|
|
249
236
|
request_options: typing.Optional[RequestOptions] = None,
|
|
250
237
|
) -> HttpResponse[RetrievalResult]:
|
|
251
238
|
"""
|
|
252
|
-
Perform full text search for exact matches within your indexed sources or memories.
|
|
253
|
-
Choose between 'OR' and 'AND' operators to control how search terms are combined
|
|
254
|
-
for precise text matching.
|
|
255
|
-
|
|
256
|
-
Use `search_mode` to specify what to search:
|
|
257
|
-
- "sources" (default): Search over indexed documents
|
|
258
|
-
- "memories": Search over user memories
|
|
259
|
-
|
|
260
239
|
Parameters
|
|
261
240
|
----------
|
|
262
241
|
tenant_id : str
|
|
@@ -271,7 +250,7 @@ class RawSearchClient:
|
|
|
271
250
|
operator : typing.Optional[Bm25OperatorType]
|
|
272
251
|
How to combine search terms (OR or AND)
|
|
273
252
|
|
|
274
|
-
|
|
253
|
+
max_results : typing.Optional[int]
|
|
275
254
|
Maximum number of results to return
|
|
276
255
|
|
|
277
256
|
search_mode : typing.Optional[SearchMode]
|
|
@@ -286,14 +265,14 @@ class RawSearchClient:
|
|
|
286
265
|
Successful Response
|
|
287
266
|
"""
|
|
288
267
|
_response = self._client_wrapper.httpx_client.request(
|
|
289
|
-
"
|
|
268
|
+
"recall/boolean_recall",
|
|
290
269
|
method="POST",
|
|
291
270
|
json={
|
|
292
271
|
"tenant_id": tenant_id,
|
|
293
272
|
"sub_tenant_id": sub_tenant_id,
|
|
294
273
|
"query": query,
|
|
295
274
|
"operator": operator,
|
|
296
|
-
"
|
|
275
|
+
"max_results": max_results,
|
|
297
276
|
"search_mode": search_mode,
|
|
298
277
|
},
|
|
299
278
|
headers={
|
|
@@ -424,19 +403,6 @@ class RawSearchClient:
|
|
|
424
403
|
request_options: typing.Optional[RequestOptions] = None,
|
|
425
404
|
) -> HttpResponse[QnASearchResponse]:
|
|
426
405
|
"""
|
|
427
|
-
Ask a question and get an AI-generated answer based on your indexed sources or memories.
|
|
428
|
-
|
|
429
|
-
The response includes both the AI answer and the source chunks used to generate it,
|
|
430
|
-
enabling full transparency and citation capabilities.
|
|
431
|
-
|
|
432
|
-
Use `search_mode` to specify what to search:
|
|
433
|
-
- "sources" (default): Search over indexed documents
|
|
434
|
-
- "memories": Search over user memories
|
|
435
|
-
|
|
436
|
-
Use `mode` to control retrieval quality:
|
|
437
|
-
- "fast" (default): Single query, faster response
|
|
438
|
-
- "accurate": Multi-query generation with reranking, higher quality
|
|
439
|
-
|
|
440
406
|
Parameters
|
|
441
407
|
----------
|
|
442
408
|
tenant_id : str
|
|
@@ -487,7 +453,7 @@ class RawSearchClient:
|
|
|
487
453
|
Successful Response
|
|
488
454
|
"""
|
|
489
455
|
_response = self._client_wrapper.httpx_client.request(
|
|
490
|
-
"
|
|
456
|
+
"recall/qna",
|
|
491
457
|
method="POST",
|
|
492
458
|
json={
|
|
493
459
|
"tenant_id": tenant_id,
|
|
@@ -624,31 +590,18 @@ class AsyncRawSearchClient:
|
|
|
624
590
|
tenant_id: str,
|
|
625
591
|
query: str,
|
|
626
592
|
sub_tenant_id: typing.Optional[str] = OMIT,
|
|
627
|
-
|
|
593
|
+
max_results: typing.Optional[int] = OMIT,
|
|
628
594
|
mode: typing.Optional[RetrieveMode] = OMIT,
|
|
629
595
|
alpha: typing.Optional[Alpha] = OMIT,
|
|
630
596
|
recency_bias: typing.Optional[float] = OMIT,
|
|
631
|
-
|
|
632
|
-
personalise_search: typing.Optional[bool] = OMIT,
|
|
597
|
+
personalise_recall: typing.Optional[bool] = OMIT,
|
|
633
598
|
graph_context: typing.Optional[bool] = OMIT,
|
|
634
|
-
|
|
599
|
+
additional_context: typing.Optional[str] = OMIT,
|
|
635
600
|
search_mode: typing.Optional[SearchMode] = OMIT,
|
|
601
|
+
metadata_filters: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
|
|
636
602
|
request_options: typing.Optional[RequestOptions] = None,
|
|
637
603
|
) -> AsyncHttpResponse[RetrievalResult]:
|
|
638
604
|
"""
|
|
639
|
-
Search for relevant content within your indexed sources or user memories.
|
|
640
|
-
|
|
641
|
-
Results are ranked by relevance and can be customized with parameters like
|
|
642
|
-
result limits, alpha weighting, and recency preferences.
|
|
643
|
-
|
|
644
|
-
Use `search_mode` to specify what to search:
|
|
645
|
-
- "sources" (default): Search over indexed documents
|
|
646
|
-
- "memories": Search over user memories (uses inferred content)
|
|
647
|
-
|
|
648
|
-
Use `mode` to control retrieval quality:
|
|
649
|
-
- "fast" (default): Single query, faster response
|
|
650
|
-
- "accurate": Multi-query generation with reranking, higher quality
|
|
651
|
-
|
|
652
605
|
Parameters
|
|
653
606
|
----------
|
|
654
607
|
tenant_id : str
|
|
@@ -660,7 +613,7 @@ class AsyncRawSearchClient:
|
|
|
660
613
|
sub_tenant_id : typing.Optional[str]
|
|
661
614
|
Optional sub-tenant identifier used to organize data within a tenant. If omitted, the default sub-tenant created during tenant setup will be used.
|
|
662
615
|
|
|
663
|
-
|
|
616
|
+
max_results : typing.Optional[int]
|
|
664
617
|
Maximum number of results to return
|
|
665
618
|
|
|
666
619
|
mode : typing.Optional[RetrieveMode]
|
|
@@ -672,21 +625,21 @@ class AsyncRawSearchClient:
|
|
|
672
625
|
recency_bias : typing.Optional[float]
|
|
673
626
|
Preference for newer content (0.0 = no bias, 1.0 = strong recency preference)
|
|
674
627
|
|
|
675
|
-
|
|
676
|
-
Number of related content chunks to include
|
|
677
|
-
|
|
678
|
-
personalise_search : typing.Optional[bool]
|
|
628
|
+
personalise_recall : typing.Optional[bool]
|
|
679
629
|
Enable personalized search results based on user preferences
|
|
680
630
|
|
|
681
631
|
graph_context : typing.Optional[bool]
|
|
682
632
|
Enable graph context for search results
|
|
683
633
|
|
|
684
|
-
|
|
634
|
+
additional_context : typing.Optional[str]
|
|
685
635
|
Additional context provided by the user to guide retrieval
|
|
686
636
|
|
|
687
637
|
search_mode : typing.Optional[SearchMode]
|
|
688
638
|
What to search: 'sources' for documents or 'memories' for user memories
|
|
689
639
|
|
|
640
|
+
metadata_filters : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
|
|
641
|
+
Optional key-value pairs to filter search results by tenant metadata fields. Keys must match fields defined in tenant_metadata_schema during tenant creation. Supports exact match filtering on indexed metadata fields. Example: {'category': 'engineering', 'priority': 'high'}
|
|
642
|
+
|
|
690
643
|
request_options : typing.Optional[RequestOptions]
|
|
691
644
|
Request-specific configuration.
|
|
692
645
|
|
|
@@ -696,21 +649,21 @@ class AsyncRawSearchClient:
|
|
|
696
649
|
Successful Response
|
|
697
650
|
"""
|
|
698
651
|
_response = await self._client_wrapper.httpx_client.request(
|
|
699
|
-
"
|
|
652
|
+
"recall/full_recall",
|
|
700
653
|
method="POST",
|
|
701
654
|
json={
|
|
702
655
|
"tenant_id": tenant_id,
|
|
703
656
|
"sub_tenant_id": sub_tenant_id,
|
|
704
657
|
"query": query,
|
|
705
|
-
"
|
|
658
|
+
"max_results": max_results,
|
|
706
659
|
"mode": mode,
|
|
707
660
|
"alpha": convert_and_respect_annotation_metadata(object_=alpha, annotation=Alpha, direction="write"),
|
|
708
661
|
"recency_bias": recency_bias,
|
|
709
|
-
"
|
|
710
|
-
"personalise_search": personalise_search,
|
|
662
|
+
"personalise_recall": personalise_recall,
|
|
711
663
|
"graph_context": graph_context,
|
|
712
|
-
"
|
|
664
|
+
"additional_context": additional_context,
|
|
713
665
|
"search_mode": search_mode,
|
|
666
|
+
"metadata_filters": metadata_filters,
|
|
714
667
|
},
|
|
715
668
|
headers={
|
|
716
669
|
"content-type": "application/json",
|
|
@@ -828,19 +781,11 @@ class AsyncRawSearchClient:
|
|
|
828
781
|
query: str,
|
|
829
782
|
sub_tenant_id: typing.Optional[str] = OMIT,
|
|
830
783
|
operator: typing.Optional[Bm25OperatorType] = OMIT,
|
|
831
|
-
|
|
784
|
+
max_results: typing.Optional[int] = OMIT,
|
|
832
785
|
search_mode: typing.Optional[SearchMode] = OMIT,
|
|
833
786
|
request_options: typing.Optional[RequestOptions] = None,
|
|
834
787
|
) -> AsyncHttpResponse[RetrievalResult]:
|
|
835
788
|
"""
|
|
836
|
-
Perform full text search for exact matches within your indexed sources or memories.
|
|
837
|
-
Choose between 'OR' and 'AND' operators to control how search terms are combined
|
|
838
|
-
for precise text matching.
|
|
839
|
-
|
|
840
|
-
Use `search_mode` to specify what to search:
|
|
841
|
-
- "sources" (default): Search over indexed documents
|
|
842
|
-
- "memories": Search over user memories
|
|
843
|
-
|
|
844
789
|
Parameters
|
|
845
790
|
----------
|
|
846
791
|
tenant_id : str
|
|
@@ -855,7 +800,7 @@ class AsyncRawSearchClient:
|
|
|
855
800
|
operator : typing.Optional[Bm25OperatorType]
|
|
856
801
|
How to combine search terms (OR or AND)
|
|
857
802
|
|
|
858
|
-
|
|
803
|
+
max_results : typing.Optional[int]
|
|
859
804
|
Maximum number of results to return
|
|
860
805
|
|
|
861
806
|
search_mode : typing.Optional[SearchMode]
|
|
@@ -870,14 +815,14 @@ class AsyncRawSearchClient:
|
|
|
870
815
|
Successful Response
|
|
871
816
|
"""
|
|
872
817
|
_response = await self._client_wrapper.httpx_client.request(
|
|
873
|
-
"
|
|
818
|
+
"recall/boolean_recall",
|
|
874
819
|
method="POST",
|
|
875
820
|
json={
|
|
876
821
|
"tenant_id": tenant_id,
|
|
877
822
|
"sub_tenant_id": sub_tenant_id,
|
|
878
823
|
"query": query,
|
|
879
824
|
"operator": operator,
|
|
880
|
-
"
|
|
825
|
+
"max_results": max_results,
|
|
881
826
|
"search_mode": search_mode,
|
|
882
827
|
},
|
|
883
828
|
headers={
|
|
@@ -1008,19 +953,6 @@ class AsyncRawSearchClient:
|
|
|
1008
953
|
request_options: typing.Optional[RequestOptions] = None,
|
|
1009
954
|
) -> AsyncHttpResponse[QnASearchResponse]:
|
|
1010
955
|
"""
|
|
1011
|
-
Ask a question and get an AI-generated answer based on your indexed sources or memories.
|
|
1012
|
-
|
|
1013
|
-
The response includes both the AI answer and the source chunks used to generate it,
|
|
1014
|
-
enabling full transparency and citation capabilities.
|
|
1015
|
-
|
|
1016
|
-
Use `search_mode` to specify what to search:
|
|
1017
|
-
- "sources" (default): Search over indexed documents
|
|
1018
|
-
- "memories": Search over user memories
|
|
1019
|
-
|
|
1020
|
-
Use `mode` to control retrieval quality:
|
|
1021
|
-
- "fast" (default): Single query, faster response
|
|
1022
|
-
- "accurate": Multi-query generation with reranking, higher quality
|
|
1023
|
-
|
|
1024
956
|
Parameters
|
|
1025
957
|
----------
|
|
1026
958
|
tenant_id : str
|
|
@@ -1071,7 +1003,7 @@ class AsyncRawSearchClient:
|
|
|
1071
1003
|
Successful Response
|
|
1072
1004
|
"""
|
|
1073
1005
|
_response = await self._client_wrapper.httpx_client.request(
|
|
1074
|
-
"
|
|
1006
|
+
"recall/qna",
|
|
1075
1007
|
method="POST",
|
|
1076
1008
|
json={
|
|
1077
1009
|
"tenant_id": tenant_id,
|
usecortex_ai/sources/client.py
CHANGED
|
@@ -35,13 +35,6 @@ class SourcesClient:
|
|
|
35
35
|
request_options: typing.Optional[RequestOptions] = None,
|
|
36
36
|
) -> SourceDeleteResponse:
|
|
37
37
|
"""
|
|
38
|
-
Remove documents and content from your knowledge base.
|
|
39
|
-
|
|
40
|
-
This endpoint permanently deletes the specified sources from your knowledge base.
|
|
41
|
-
Once deleted, the content will no longer be available for search or retrieval.
|
|
42
|
-
|
|
43
|
-
Use this carefully as the action cannot be undone. The system will confirm successful deletion of each source ID you specify.
|
|
44
|
-
|
|
45
38
|
Parameters
|
|
46
39
|
----------
|
|
47
40
|
tenant_id : str
|
|
@@ -64,7 +57,7 @@ class SourcesClient:
|
|
|
64
57
|
from usecortex-ai import CortexAI
|
|
65
58
|
|
|
66
59
|
client = CortexAI(token="YOUR_TOKEN", )
|
|
67
|
-
client.sources.delete(tenant_id='
|
|
60
|
+
client.sources.delete(tenant_id='tenant_1234', sub_tenant_id='sub_tenant_4567', source_ids=['source_ids'], )
|
|
68
61
|
"""
|
|
69
62
|
_response = self._raw_client.delete(
|
|
70
63
|
tenant_id=tenant_id, sub_tenant_id=sub_tenant_id, source_ids=source_ids, request_options=request_options
|
|
@@ -96,13 +89,6 @@ class AsyncSourcesClient:
|
|
|
96
89
|
request_options: typing.Optional[RequestOptions] = None,
|
|
97
90
|
) -> SourceDeleteResponse:
|
|
98
91
|
"""
|
|
99
|
-
Remove documents and content from your knowledge base.
|
|
100
|
-
|
|
101
|
-
This endpoint permanently deletes the specified sources from your knowledge base.
|
|
102
|
-
Once deleted, the content will no longer be available for search or retrieval.
|
|
103
|
-
|
|
104
|
-
Use this carefully as the action cannot be undone. The system will confirm successful deletion of each source ID you specify.
|
|
105
|
-
|
|
106
92
|
Parameters
|
|
107
93
|
----------
|
|
108
94
|
tenant_id : str
|
|
@@ -128,7 +114,7 @@ class AsyncSourcesClient:
|
|
|
128
114
|
|
|
129
115
|
client = AsyncCortexAI(token="YOUR_TOKEN", )
|
|
130
116
|
async def main() -> None:
|
|
131
|
-
await client.sources.delete(tenant_id='
|
|
117
|
+
await client.sources.delete(tenant_id='tenant_1234', sub_tenant_id='sub_tenant_4567', source_ids=['source_ids'], )
|
|
132
118
|
asyncio.run(main())
|
|
133
119
|
"""
|
|
134
120
|
_response = await self._raw_client.delete(
|
|
@@ -34,13 +34,6 @@ class RawSourcesClient:
|
|
|
34
34
|
request_options: typing.Optional[RequestOptions] = None,
|
|
35
35
|
) -> HttpResponse[SourceDeleteResponse]:
|
|
36
36
|
"""
|
|
37
|
-
Remove documents and content from your knowledge base.
|
|
38
|
-
|
|
39
|
-
This endpoint permanently deletes the specified sources from your knowledge base.
|
|
40
|
-
Once deleted, the content will no longer be available for search or retrieval.
|
|
41
|
-
|
|
42
|
-
Use this carefully as the action cannot be undone. The system will confirm successful deletion of each source ID you specify.
|
|
43
|
-
|
|
44
37
|
Parameters
|
|
45
38
|
----------
|
|
46
39
|
tenant_id : str
|
|
@@ -59,7 +52,7 @@ class RawSourcesClient:
|
|
|
59
52
|
Successful Response
|
|
60
53
|
"""
|
|
61
54
|
_response = self._client_wrapper.httpx_client.request(
|
|
62
|
-
"
|
|
55
|
+
"knowledge/delete_knowledge",
|
|
63
56
|
method="POST",
|
|
64
57
|
json={
|
|
65
58
|
"tenant_id": tenant_id,
|
|
@@ -178,13 +171,6 @@ class AsyncRawSourcesClient:
|
|
|
178
171
|
request_options: typing.Optional[RequestOptions] = None,
|
|
179
172
|
) -> AsyncHttpResponse[SourceDeleteResponse]:
|
|
180
173
|
"""
|
|
181
|
-
Remove documents and content from your knowledge base.
|
|
182
|
-
|
|
183
|
-
This endpoint permanently deletes the specified sources from your knowledge base.
|
|
184
|
-
Once deleted, the content will no longer be available for search or retrieval.
|
|
185
|
-
|
|
186
|
-
Use this carefully as the action cannot be undone. The system will confirm successful deletion of each source ID you specify.
|
|
187
|
-
|
|
188
174
|
Parameters
|
|
189
175
|
----------
|
|
190
176
|
tenant_id : str
|
|
@@ -203,7 +189,7 @@ class AsyncRawSourcesClient:
|
|
|
203
189
|
Successful Response
|
|
204
190
|
"""
|
|
205
191
|
_response = await self._client_wrapper.httpx_client.request(
|
|
206
|
-
"
|
|
192
|
+
"knowledge/delete_knowledge",
|
|
207
193
|
method="POST",
|
|
208
194
|
json={
|
|
209
195
|
"tenant_id": tenant_id,
|
usecortex_ai/tenant/client.py
CHANGED
|
@@ -38,47 +38,6 @@ class TenantClient:
|
|
|
38
38
|
request_options: typing.Optional[RequestOptions] = None,
|
|
39
39
|
) -> TenantCreateResponse:
|
|
40
40
|
"""
|
|
41
|
-
Create a tenant for your account.
|
|
42
|
-
|
|
43
|
-
Use this endpoint to initialize a tenant space you can use for ingestion, embeddings, and search.
|
|
44
|
-
|
|
45
|
-
**Tenant Metadata Schema**
|
|
46
|
-
|
|
47
|
-
You can optionally provide a `tenant_metadata_schema` to define custom fields that will be
|
|
48
|
-
indexed in the vector store. Each field can be configured with:
|
|
49
|
-
|
|
50
|
-
- `enable_match`: Enable text filtering on this field
|
|
51
|
-
- `enable_dense_embedding`: Create dense embeddings for semantic similarity search
|
|
52
|
-
- `enable_sparse_embedding`: Create sparse embeddings (BM25) for keyword search
|
|
53
|
-
|
|
54
|
-
**Example Request:**
|
|
55
|
-
```json
|
|
56
|
-
{
|
|
57
|
-
"tenant_id": "my-tenant",
|
|
58
|
-
"tenant_metadata_schema": [
|
|
59
|
-
{
|
|
60
|
-
"name": "category",
|
|
61
|
-
"data_type": "VARCHAR",
|
|
62
|
-
"max_length": 256,
|
|
63
|
-
"enable_match": true
|
|
64
|
-
},
|
|
65
|
-
{
|
|
66
|
-
"name": "product_description",
|
|
67
|
-
"data_type": "VARCHAR",
|
|
68
|
-
"max_length": 4096,
|
|
69
|
-
"enable_dense_embedding": true,
|
|
70
|
-
"enable_sparse_embedding": true
|
|
71
|
-
}
|
|
72
|
-
]
|
|
73
|
-
}
|
|
74
|
-
```
|
|
75
|
-
|
|
76
|
-
Expected outcome:
|
|
77
|
-
- A tenant is created and returned with its identifier.
|
|
78
|
-
- If tenant_metadata_schema is provided, the vector store collection will include
|
|
79
|
-
the specified custom fields with their configured search capabilities.
|
|
80
|
-
- If the tenant already exists, you receive a success message with the existing identifier.
|
|
81
|
-
|
|
82
41
|
Parameters
|
|
83
42
|
----------
|
|
84
43
|
tenant_id : str
|
|
@@ -106,7 +65,7 @@ class TenantClient:
|
|
|
106
65
|
from usecortex-ai import CortexAI
|
|
107
66
|
|
|
108
67
|
client = CortexAI(token="YOUR_TOKEN", )
|
|
109
|
-
client.tenant.create_tenant(tenant_id='
|
|
68
|
+
client.tenant.create_tenant(tenant_id='tenant_1234', )
|
|
110
69
|
"""
|
|
111
70
|
_response = self._raw_client.create_tenant(
|
|
112
71
|
tenant_id=tenant_id,
|
|
@@ -119,17 +78,6 @@ class TenantClient:
|
|
|
119
78
|
|
|
120
79
|
def stats(self, *, tenant_id: str, request_options: typing.Optional[RequestOptions] = None) -> TenantStatsResponse:
|
|
121
80
|
"""
|
|
122
|
-
Retrieve usage stats for your tenant.
|
|
123
|
-
|
|
124
|
-
Use this endpoint to check whether a tenant exists and view core metrics like total
|
|
125
|
-
indexed objects and vector dimension. This helps you validate
|
|
126
|
-
setup and monitor ingestion.
|
|
127
|
-
|
|
128
|
-
Expected outcome
|
|
129
|
-
|
|
130
|
-
You receive the current object count and vector dimension for the tenant.
|
|
131
|
-
If the tenant does not exist, you get a not-found error.
|
|
132
|
-
|
|
133
81
|
Parameters
|
|
134
82
|
----------
|
|
135
83
|
tenant_id : str
|
|
@@ -148,7 +96,7 @@ class TenantClient:
|
|
|
148
96
|
from usecortex-ai import CortexAI
|
|
149
97
|
|
|
150
98
|
client = CortexAI(token="YOUR_TOKEN", )
|
|
151
|
-
client.tenant.stats(tenant_id='
|
|
99
|
+
client.tenant.stats(tenant_id='tenant_1234', )
|
|
152
100
|
"""
|
|
153
101
|
_response = self._raw_client.stats(tenant_id=tenant_id, request_options=request_options)
|
|
154
102
|
return _response.data
|
|
@@ -179,47 +127,6 @@ class AsyncTenantClient:
|
|
|
179
127
|
request_options: typing.Optional[RequestOptions] = None,
|
|
180
128
|
) -> TenantCreateResponse:
|
|
181
129
|
"""
|
|
182
|
-
Create a tenant for your account.
|
|
183
|
-
|
|
184
|
-
Use this endpoint to initialize a tenant space you can use for ingestion, embeddings, and search.
|
|
185
|
-
|
|
186
|
-
**Tenant Metadata Schema**
|
|
187
|
-
|
|
188
|
-
You can optionally provide a `tenant_metadata_schema` to define custom fields that will be
|
|
189
|
-
indexed in the vector store. Each field can be configured with:
|
|
190
|
-
|
|
191
|
-
- `enable_match`: Enable text filtering on this field
|
|
192
|
-
- `enable_dense_embedding`: Create dense embeddings for semantic similarity search
|
|
193
|
-
- `enable_sparse_embedding`: Create sparse embeddings (BM25) for keyword search
|
|
194
|
-
|
|
195
|
-
**Example Request:**
|
|
196
|
-
```json
|
|
197
|
-
{
|
|
198
|
-
"tenant_id": "my-tenant",
|
|
199
|
-
"tenant_metadata_schema": [
|
|
200
|
-
{
|
|
201
|
-
"name": "category",
|
|
202
|
-
"data_type": "VARCHAR",
|
|
203
|
-
"max_length": 256,
|
|
204
|
-
"enable_match": true
|
|
205
|
-
},
|
|
206
|
-
{
|
|
207
|
-
"name": "product_description",
|
|
208
|
-
"data_type": "VARCHAR",
|
|
209
|
-
"max_length": 4096,
|
|
210
|
-
"enable_dense_embedding": true,
|
|
211
|
-
"enable_sparse_embedding": true
|
|
212
|
-
}
|
|
213
|
-
]
|
|
214
|
-
}
|
|
215
|
-
```
|
|
216
|
-
|
|
217
|
-
Expected outcome:
|
|
218
|
-
- A tenant is created and returned with its identifier.
|
|
219
|
-
- If tenant_metadata_schema is provided, the vector store collection will include
|
|
220
|
-
the specified custom fields with their configured search capabilities.
|
|
221
|
-
- If the tenant already exists, you receive a success message with the existing identifier.
|
|
222
|
-
|
|
223
130
|
Parameters
|
|
224
131
|
----------
|
|
225
132
|
tenant_id : str
|
|
@@ -250,7 +157,7 @@ class AsyncTenantClient:
|
|
|
250
157
|
|
|
251
158
|
client = AsyncCortexAI(token="YOUR_TOKEN", )
|
|
252
159
|
async def main() -> None:
|
|
253
|
-
await client.tenant.create_tenant(tenant_id='
|
|
160
|
+
await client.tenant.create_tenant(tenant_id='tenant_1234', )
|
|
254
161
|
asyncio.run(main())
|
|
255
162
|
"""
|
|
256
163
|
_response = await self._raw_client.create_tenant(
|
|
@@ -266,17 +173,6 @@ class AsyncTenantClient:
|
|
|
266
173
|
self, *, tenant_id: str, request_options: typing.Optional[RequestOptions] = None
|
|
267
174
|
) -> TenantStatsResponse:
|
|
268
175
|
"""
|
|
269
|
-
Retrieve usage stats for your tenant.
|
|
270
|
-
|
|
271
|
-
Use this endpoint to check whether a tenant exists and view core metrics like total
|
|
272
|
-
indexed objects and vector dimension. This helps you validate
|
|
273
|
-
setup and monitor ingestion.
|
|
274
|
-
|
|
275
|
-
Expected outcome
|
|
276
|
-
|
|
277
|
-
You receive the current object count and vector dimension for the tenant.
|
|
278
|
-
If the tenant does not exist, you get a not-found error.
|
|
279
|
-
|
|
280
176
|
Parameters
|
|
281
177
|
----------
|
|
282
178
|
tenant_id : str
|
|
@@ -298,7 +194,7 @@ class AsyncTenantClient:
|
|
|
298
194
|
|
|
299
195
|
client = AsyncCortexAI(token="YOUR_TOKEN", )
|
|
300
196
|
async def main() -> None:
|
|
301
|
-
await client.tenant.stats(tenant_id='
|
|
197
|
+
await client.tenant.stats(tenant_id='tenant_1234', )
|
|
302
198
|
asyncio.run(main())
|
|
303
199
|
"""
|
|
304
200
|
_response = await self._raw_client.stats(tenant_id=tenant_id, request_options=request_options)
|