usecortex-ai 0.5.0__py3-none-any.whl → 0.5.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. usecortex_ai/__init__.py +8 -4
  2. usecortex_ai/client.py +0 -4
  3. usecortex_ai/dashboard/client.py +2 -30
  4. usecortex_ai/dashboard/raw_client.py +0 -28
  5. usecortex_ai/embeddings/client.py +8 -58
  6. usecortex_ai/embeddings/raw_client.py +8 -58
  7. usecortex_ai/fetch/__init__.py +3 -0
  8. usecortex_ai/fetch/client.py +42 -165
  9. usecortex_ai/fetch/raw_client.py +38 -341
  10. usecortex_ai/fetch/types/__init__.py +7 -0
  11. usecortex_ai/fetch/types/fetch_list_knowledge_response.py +8 -0
  12. usecortex_ai/raw_client.py +0 -4
  13. usecortex_ai/search/client.py +36 -124
  14. usecortex_ai/search/raw_client.py +36 -124
  15. usecortex_ai/sources/client.py +2 -16
  16. usecortex_ai/sources/raw_client.py +2 -16
  17. usecortex_ai/tenant/client.py +4 -108
  18. usecortex_ai/tenant/raw_client.py +2 -106
  19. usecortex_ai/types/__init__.py +6 -2
  20. usecortex_ai/types/list_content_kind.py +5 -0
  21. usecortex_ai/types/list_user_memories_response.py +32 -0
  22. usecortex_ai/types/retrieval_result.py +1 -1
  23. usecortex_ai/types/retrieve_mode.py +1 -1
  24. usecortex_ai/types/user_memory.py +31 -0
  25. usecortex_ai/upload/__init__.py +0 -3
  26. usecortex_ai/upload/client.py +34 -204
  27. usecortex_ai/upload/raw_client.py +30 -382
  28. {usecortex_ai-0.5.0.dist-info → usecortex_ai-0.5.1.dist-info}/METADATA +1 -1
  29. {usecortex_ai-0.5.0.dist-info → usecortex_ai-0.5.1.dist-info}/RECORD +32 -30
  30. {usecortex_ai-0.5.0.dist-info → usecortex_ai-0.5.1.dist-info}/licenses/LICENSE +21 -21
  31. {usecortex_ai-0.5.0.dist-info → usecortex_ai-0.5.1.dist-info}/top_level.txt +0 -0
  32. usecortex_ai/types/app_sources_upload_data.py +0 -39
  33. usecortex_ai/upload/types/__init__.py +0 -7
  34. usecortex_ai/upload/types/body_upload_app_ingestion_upload_app_post_app_sources.py +0 -7
  35. {usecortex_ai-0.5.0.dist-info → usecortex_ai-0.5.1.dist-info}/WHEEL +0 -0
@@ -40,37 +40,18 @@ class RawSearchClient:
40
40
  tenant_id: str,
41
41
  query: str,
42
42
  sub_tenant_id: typing.Optional[str] = OMIT,
43
- max_chunks: typing.Optional[int] = OMIT,
43
+ max_results: typing.Optional[int] = OMIT,
44
44
  mode: typing.Optional[RetrieveMode] = OMIT,
45
45
  alpha: typing.Optional[Alpha] = OMIT,
46
46
  recency_bias: typing.Optional[float] = OMIT,
47
- num_related_chunks: typing.Optional[int] = OMIT,
48
- personalise_search: typing.Optional[bool] = OMIT,
47
+ personalise_recall: typing.Optional[bool] = OMIT,
49
48
  graph_context: typing.Optional[bool] = OMIT,
50
- extra_context: typing.Optional[str] = OMIT,
49
+ additional_context: typing.Optional[str] = OMIT,
51
50
  search_mode: typing.Optional[SearchMode] = OMIT,
52
- filters: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
51
+ metadata_filters: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
53
52
  request_options: typing.Optional[RequestOptions] = None,
54
53
  ) -> HttpResponse[RetrievalResult]:
55
54
  """
56
- Search for relevant content within your indexed sources or user memories.
57
-
58
- Results are ranked by relevance and can be customized with parameters like
59
- result limits, alpha weighting, and recency preferences.
60
-
61
- Use `search_mode` to specify what to search:
62
- - "sources" (default): Search over indexed documents
63
- - "memories": Search over user memories (uses inferred content)
64
-
65
- Use `mode` to control retrieval quality:
66
- - "fast" (default): Single query, faster response
67
- - "accurate": Multi-query generation with reranking, higher quality
68
-
69
- Use `filters` to narrow results by metadata:
70
- - Provide key-value pairs matching fields defined in your tenant_metadata_schema
71
- - Example: `{"category": "engineering", "priority": "high"}`
72
- - Filters are validated against your tenant schema for type safety
73
-
74
55
  Parameters
75
56
  ----------
76
57
  tenant_id : str
@@ -82,7 +63,7 @@ class RawSearchClient:
82
63
  sub_tenant_id : typing.Optional[str]
83
64
  Optional sub-tenant identifier used to organize data within a tenant. If omitted, the default sub-tenant created during tenant setup will be used.
84
65
 
85
- max_chunks : typing.Optional[int]
66
+ max_results : typing.Optional[int]
86
67
  Maximum number of results to return
87
68
 
88
69
  mode : typing.Optional[RetrieveMode]
@@ -94,22 +75,19 @@ class RawSearchClient:
94
75
  recency_bias : typing.Optional[float]
95
76
  Preference for newer content (0.0 = no bias, 1.0 = strong recency preference)
96
77
 
97
- num_related_chunks : typing.Optional[int]
98
- Number of related content chunks to include
99
-
100
- personalise_search : typing.Optional[bool]
78
+ personalise_recall : typing.Optional[bool]
101
79
  Enable personalized search results based on user preferences
102
80
 
103
81
  graph_context : typing.Optional[bool]
104
82
  Enable graph context for search results
105
83
 
106
- extra_context : typing.Optional[str]
84
+ additional_context : typing.Optional[str]
107
85
  Additional context provided by the user to guide retrieval
108
86
 
109
87
  search_mode : typing.Optional[SearchMode]
110
88
  What to search: 'sources' for documents or 'memories' for user memories
111
89
 
112
- filters : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
90
+ metadata_filters : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
113
91
  Optional key-value pairs to filter search results by tenant metadata fields. Keys must match fields defined in tenant_metadata_schema during tenant creation. Supports exact match filtering on indexed metadata fields. Example: {'category': 'engineering', 'priority': 'high'}
114
92
 
115
93
  request_options : typing.Optional[RequestOptions]
@@ -121,22 +99,21 @@ class RawSearchClient:
121
99
  Successful Response
122
100
  """
123
101
  _response = self._client_wrapper.httpx_client.request(
124
- "search/hybrid-search",
102
+ "recall/full_recall",
125
103
  method="POST",
126
104
  json={
127
105
  "tenant_id": tenant_id,
128
106
  "sub_tenant_id": sub_tenant_id,
129
107
  "query": query,
130
- "max_chunks": max_chunks,
108
+ "max_results": max_results,
131
109
  "mode": mode,
132
110
  "alpha": convert_and_respect_annotation_metadata(object_=alpha, annotation=Alpha, direction="write"),
133
111
  "recency_bias": recency_bias,
134
- "num_related_chunks": num_related_chunks,
135
- "personalise_search": personalise_search,
112
+ "personalise_recall": personalise_recall,
136
113
  "graph_context": graph_context,
137
- "extra_context": extra_context,
114
+ "additional_context": additional_context,
138
115
  "search_mode": search_mode,
139
- "filters": filters,
116
+ "metadata_filters": metadata_filters,
140
117
  },
141
118
  headers={
142
119
  "content-type": "application/json",
@@ -254,19 +231,11 @@ class RawSearchClient:
254
231
  query: str,
255
232
  sub_tenant_id: typing.Optional[str] = OMIT,
256
233
  operator: typing.Optional[Bm25OperatorType] = OMIT,
257
- max_chunks: typing.Optional[int] = OMIT,
234
+ max_results: typing.Optional[int] = OMIT,
258
235
  search_mode: typing.Optional[SearchMode] = OMIT,
259
236
  request_options: typing.Optional[RequestOptions] = None,
260
237
  ) -> HttpResponse[RetrievalResult]:
261
238
  """
262
- Perform full text search for exact matches within your indexed sources or memories.
263
- Choose between 'OR' and 'AND' operators to control how search terms are combined
264
- for precise text matching.
265
-
266
- Use `search_mode` to specify what to search:
267
- - "sources" (default): Search over indexed documents
268
- - "memories": Search over user memories
269
-
270
239
  Parameters
271
240
  ----------
272
241
  tenant_id : str
@@ -281,7 +250,7 @@ class RawSearchClient:
281
250
  operator : typing.Optional[Bm25OperatorType]
282
251
  How to combine search terms (OR or AND)
283
252
 
284
- max_chunks : typing.Optional[int]
253
+ max_results : typing.Optional[int]
285
254
  Maximum number of results to return
286
255
 
287
256
  search_mode : typing.Optional[SearchMode]
@@ -296,14 +265,14 @@ class RawSearchClient:
296
265
  Successful Response
297
266
  """
298
267
  _response = self._client_wrapper.httpx_client.request(
299
- "search/full-text-search",
268
+ "recall/boolean_recall",
300
269
  method="POST",
301
270
  json={
302
271
  "tenant_id": tenant_id,
303
272
  "sub_tenant_id": sub_tenant_id,
304
273
  "query": query,
305
274
  "operator": operator,
306
- "max_chunks": max_chunks,
275
+ "max_results": max_results,
307
276
  "search_mode": search_mode,
308
277
  },
309
278
  headers={
@@ -434,19 +403,6 @@ class RawSearchClient:
434
403
  request_options: typing.Optional[RequestOptions] = None,
435
404
  ) -> HttpResponse[QnASearchResponse]:
436
405
  """
437
- Ask a question and get an AI-generated answer based on your indexed sources or memories.
438
-
439
- The response includes both the AI answer and the source chunks used to generate it,
440
- enabling full transparency and citation capabilities.
441
-
442
- Use `search_mode` to specify what to search:
443
- - "sources" (default): Search over indexed documents
444
- - "memories": Search over user memories
445
-
446
- Use `mode` to control retrieval quality:
447
- - "fast" (default): Single query, faster response
448
- - "accurate": Multi-query generation with reranking, higher quality
449
-
450
406
  Parameters
451
407
  ----------
452
408
  tenant_id : str
@@ -497,7 +453,7 @@ class RawSearchClient:
497
453
  Successful Response
498
454
  """
499
455
  _response = self._client_wrapper.httpx_client.request(
500
- "search/qna",
456
+ "recall/qna",
501
457
  method="POST",
502
458
  json={
503
459
  "tenant_id": tenant_id,
@@ -634,37 +590,18 @@ class AsyncRawSearchClient:
634
590
  tenant_id: str,
635
591
  query: str,
636
592
  sub_tenant_id: typing.Optional[str] = OMIT,
637
- max_chunks: typing.Optional[int] = OMIT,
593
+ max_results: typing.Optional[int] = OMIT,
638
594
  mode: typing.Optional[RetrieveMode] = OMIT,
639
595
  alpha: typing.Optional[Alpha] = OMIT,
640
596
  recency_bias: typing.Optional[float] = OMIT,
641
- num_related_chunks: typing.Optional[int] = OMIT,
642
- personalise_search: typing.Optional[bool] = OMIT,
597
+ personalise_recall: typing.Optional[bool] = OMIT,
643
598
  graph_context: typing.Optional[bool] = OMIT,
644
- extra_context: typing.Optional[str] = OMIT,
599
+ additional_context: typing.Optional[str] = OMIT,
645
600
  search_mode: typing.Optional[SearchMode] = OMIT,
646
- filters: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
601
+ metadata_filters: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
647
602
  request_options: typing.Optional[RequestOptions] = None,
648
603
  ) -> AsyncHttpResponse[RetrievalResult]:
649
604
  """
650
- Search for relevant content within your indexed sources or user memories.
651
-
652
- Results are ranked by relevance and can be customized with parameters like
653
- result limits, alpha weighting, and recency preferences.
654
-
655
- Use `search_mode` to specify what to search:
656
- - "sources" (default): Search over indexed documents
657
- - "memories": Search over user memories (uses inferred content)
658
-
659
- Use `mode` to control retrieval quality:
660
- - "fast" (default): Single query, faster response
661
- - "accurate": Multi-query generation with reranking, higher quality
662
-
663
- Use `filters` to narrow results by metadata:
664
- - Provide key-value pairs matching fields defined in your tenant_metadata_schema
665
- - Example: `{"category": "engineering", "priority": "high"}`
666
- - Filters are validated against your tenant schema for type safety
667
-
668
605
  Parameters
669
606
  ----------
670
607
  tenant_id : str
@@ -676,7 +613,7 @@ class AsyncRawSearchClient:
676
613
  sub_tenant_id : typing.Optional[str]
677
614
  Optional sub-tenant identifier used to organize data within a tenant. If omitted, the default sub-tenant created during tenant setup will be used.
678
615
 
679
- max_chunks : typing.Optional[int]
616
+ max_results : typing.Optional[int]
680
617
  Maximum number of results to return
681
618
 
682
619
  mode : typing.Optional[RetrieveMode]
@@ -688,22 +625,19 @@ class AsyncRawSearchClient:
688
625
  recency_bias : typing.Optional[float]
689
626
  Preference for newer content (0.0 = no bias, 1.0 = strong recency preference)
690
627
 
691
- num_related_chunks : typing.Optional[int]
692
- Number of related content chunks to include
693
-
694
- personalise_search : typing.Optional[bool]
628
+ personalise_recall : typing.Optional[bool]
695
629
  Enable personalized search results based on user preferences
696
630
 
697
631
  graph_context : typing.Optional[bool]
698
632
  Enable graph context for search results
699
633
 
700
- extra_context : typing.Optional[str]
634
+ additional_context : typing.Optional[str]
701
635
  Additional context provided by the user to guide retrieval
702
636
 
703
637
  search_mode : typing.Optional[SearchMode]
704
638
  What to search: 'sources' for documents or 'memories' for user memories
705
639
 
706
- filters : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
640
+ metadata_filters : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
707
641
  Optional key-value pairs to filter search results by tenant metadata fields. Keys must match fields defined in tenant_metadata_schema during tenant creation. Supports exact match filtering on indexed metadata fields. Example: {'category': 'engineering', 'priority': 'high'}
708
642
 
709
643
  request_options : typing.Optional[RequestOptions]
@@ -715,22 +649,21 @@ class AsyncRawSearchClient:
715
649
  Successful Response
716
650
  """
717
651
  _response = await self._client_wrapper.httpx_client.request(
718
- "search/hybrid-search",
652
+ "recall/full_recall",
719
653
  method="POST",
720
654
  json={
721
655
  "tenant_id": tenant_id,
722
656
  "sub_tenant_id": sub_tenant_id,
723
657
  "query": query,
724
- "max_chunks": max_chunks,
658
+ "max_results": max_results,
725
659
  "mode": mode,
726
660
  "alpha": convert_and_respect_annotation_metadata(object_=alpha, annotation=Alpha, direction="write"),
727
661
  "recency_bias": recency_bias,
728
- "num_related_chunks": num_related_chunks,
729
- "personalise_search": personalise_search,
662
+ "personalise_recall": personalise_recall,
730
663
  "graph_context": graph_context,
731
- "extra_context": extra_context,
664
+ "additional_context": additional_context,
732
665
  "search_mode": search_mode,
733
- "filters": filters,
666
+ "metadata_filters": metadata_filters,
734
667
  },
735
668
  headers={
736
669
  "content-type": "application/json",
@@ -848,19 +781,11 @@ class AsyncRawSearchClient:
848
781
  query: str,
849
782
  sub_tenant_id: typing.Optional[str] = OMIT,
850
783
  operator: typing.Optional[Bm25OperatorType] = OMIT,
851
- max_chunks: typing.Optional[int] = OMIT,
784
+ max_results: typing.Optional[int] = OMIT,
852
785
  search_mode: typing.Optional[SearchMode] = OMIT,
853
786
  request_options: typing.Optional[RequestOptions] = None,
854
787
  ) -> AsyncHttpResponse[RetrievalResult]:
855
788
  """
856
- Perform full text search for exact matches within your indexed sources or memories.
857
- Choose between 'OR' and 'AND' operators to control how search terms are combined
858
- for precise text matching.
859
-
860
- Use `search_mode` to specify what to search:
861
- - "sources" (default): Search over indexed documents
862
- - "memories": Search over user memories
863
-
864
789
  Parameters
865
790
  ----------
866
791
  tenant_id : str
@@ -875,7 +800,7 @@ class AsyncRawSearchClient:
875
800
  operator : typing.Optional[Bm25OperatorType]
876
801
  How to combine search terms (OR or AND)
877
802
 
878
- max_chunks : typing.Optional[int]
803
+ max_results : typing.Optional[int]
879
804
  Maximum number of results to return
880
805
 
881
806
  search_mode : typing.Optional[SearchMode]
@@ -890,14 +815,14 @@ class AsyncRawSearchClient:
890
815
  Successful Response
891
816
  """
892
817
  _response = await self._client_wrapper.httpx_client.request(
893
- "search/full-text-search",
818
+ "recall/boolean_recall",
894
819
  method="POST",
895
820
  json={
896
821
  "tenant_id": tenant_id,
897
822
  "sub_tenant_id": sub_tenant_id,
898
823
  "query": query,
899
824
  "operator": operator,
900
- "max_chunks": max_chunks,
825
+ "max_results": max_results,
901
826
  "search_mode": search_mode,
902
827
  },
903
828
  headers={
@@ -1028,19 +953,6 @@ class AsyncRawSearchClient:
1028
953
  request_options: typing.Optional[RequestOptions] = None,
1029
954
  ) -> AsyncHttpResponse[QnASearchResponse]:
1030
955
  """
1031
- Ask a question and get an AI-generated answer based on your indexed sources or memories.
1032
-
1033
- The response includes both the AI answer and the source chunks used to generate it,
1034
- enabling full transparency and citation capabilities.
1035
-
1036
- Use `search_mode` to specify what to search:
1037
- - "sources" (default): Search over indexed documents
1038
- - "memories": Search over user memories
1039
-
1040
- Use `mode` to control retrieval quality:
1041
- - "fast" (default): Single query, faster response
1042
- - "accurate": Multi-query generation with reranking, higher quality
1043
-
1044
956
  Parameters
1045
957
  ----------
1046
958
  tenant_id : str
@@ -1091,7 +1003,7 @@ class AsyncRawSearchClient:
1091
1003
  Successful Response
1092
1004
  """
1093
1005
  _response = await self._client_wrapper.httpx_client.request(
1094
- "search/qna",
1006
+ "recall/qna",
1095
1007
  method="POST",
1096
1008
  json={
1097
1009
  "tenant_id": tenant_id,
@@ -35,13 +35,6 @@ class SourcesClient:
35
35
  request_options: typing.Optional[RequestOptions] = None,
36
36
  ) -> SourceDeleteResponse:
37
37
  """
38
- Remove documents and content from your knowledge base.
39
-
40
- This endpoint permanently deletes the specified sources from your knowledge base.
41
- Once deleted, the content will no longer be available for search or retrieval.
42
-
43
- Use this carefully as the action cannot be undone. The system will confirm successful deletion of each source ID you specify.
44
-
45
38
  Parameters
46
39
  ----------
47
40
  tenant_id : str
@@ -64,7 +57,7 @@ class SourcesClient:
64
57
  from usecortex-ai import CortexAI
65
58
 
66
59
  client = CortexAI(token="YOUR_TOKEN", )
67
- client.sources.delete(tenant_id='tenant_id', sub_tenant_id='sub_tenant_id', source_ids=['source_ids'], )
60
+ client.sources.delete(tenant_id='tenant_1234', sub_tenant_id='sub_tenant_4567', source_ids=['source_ids'], )
68
61
  """
69
62
  _response = self._raw_client.delete(
70
63
  tenant_id=tenant_id, sub_tenant_id=sub_tenant_id, source_ids=source_ids, request_options=request_options
@@ -96,13 +89,6 @@ class AsyncSourcesClient:
96
89
  request_options: typing.Optional[RequestOptions] = None,
97
90
  ) -> SourceDeleteResponse:
98
91
  """
99
- Remove documents and content from your knowledge base.
100
-
101
- This endpoint permanently deletes the specified sources from your knowledge base.
102
- Once deleted, the content will no longer be available for search or retrieval.
103
-
104
- Use this carefully as the action cannot be undone. The system will confirm successful deletion of each source ID you specify.
105
-
106
92
  Parameters
107
93
  ----------
108
94
  tenant_id : str
@@ -128,7 +114,7 @@ class AsyncSourcesClient:
128
114
 
129
115
  client = AsyncCortexAI(token="YOUR_TOKEN", )
130
116
  async def main() -> None:
131
- await client.sources.delete(tenant_id='tenant_id', sub_tenant_id='sub_tenant_id', source_ids=['source_ids'], )
117
+ await client.sources.delete(tenant_id='tenant_1234', sub_tenant_id='sub_tenant_4567', source_ids=['source_ids'], )
132
118
  asyncio.run(main())
133
119
  """
134
120
  _response = await self._raw_client.delete(
@@ -34,13 +34,6 @@ class RawSourcesClient:
34
34
  request_options: typing.Optional[RequestOptions] = None,
35
35
  ) -> HttpResponse[SourceDeleteResponse]:
36
36
  """
37
- Remove documents and content from your knowledge base.
38
-
39
- This endpoint permanently deletes the specified sources from your knowledge base.
40
- Once deleted, the content will no longer be available for search or retrieval.
41
-
42
- Use this carefully as the action cannot be undone. The system will confirm successful deletion of each source ID you specify.
43
-
44
37
  Parameters
45
38
  ----------
46
39
  tenant_id : str
@@ -59,7 +52,7 @@ class RawSourcesClient:
59
52
  Successful Response
60
53
  """
61
54
  _response = self._client_wrapper.httpx_client.request(
62
- "delete/delete-sources",
55
+ "knowledge/delete_knowledge",
63
56
  method="POST",
64
57
  json={
65
58
  "tenant_id": tenant_id,
@@ -178,13 +171,6 @@ class AsyncRawSourcesClient:
178
171
  request_options: typing.Optional[RequestOptions] = None,
179
172
  ) -> AsyncHttpResponse[SourceDeleteResponse]:
180
173
  """
181
- Remove documents and content from your knowledge base.
182
-
183
- This endpoint permanently deletes the specified sources from your knowledge base.
184
- Once deleted, the content will no longer be available for search or retrieval.
185
-
186
- Use this carefully as the action cannot be undone. The system will confirm successful deletion of each source ID you specify.
187
-
188
174
  Parameters
189
175
  ----------
190
176
  tenant_id : str
@@ -203,7 +189,7 @@ class AsyncRawSourcesClient:
203
189
  Successful Response
204
190
  """
205
191
  _response = await self._client_wrapper.httpx_client.request(
206
- "delete/delete-sources",
192
+ "knowledge/delete_knowledge",
207
193
  method="POST",
208
194
  json={
209
195
  "tenant_id": tenant_id,
@@ -38,47 +38,6 @@ class TenantClient:
38
38
  request_options: typing.Optional[RequestOptions] = None,
39
39
  ) -> TenantCreateResponse:
40
40
  """
41
- Create a tenant for your account.
42
-
43
- Use this endpoint to initialize a tenant space you can use for ingestion, embeddings, and search.
44
-
45
- **Tenant Metadata Schema**
46
-
47
- You can optionally provide a `tenant_metadata_schema` to define custom fields that will be
48
- indexed in the vector store. Each field can be configured with:
49
-
50
- - `enable_match`: Enable text filtering on this field
51
- - `enable_dense_embedding`: Create dense embeddings for semantic similarity search
52
- - `enable_sparse_embedding`: Create sparse embeddings (BM25) for keyword search
53
-
54
- **Example Request:**
55
- ```json
56
- {
57
- "tenant_id": "my-tenant",
58
- "tenant_metadata_schema": [
59
- {
60
- "name": "category",
61
- "data_type": "VARCHAR",
62
- "max_length": 256,
63
- "enable_match": true
64
- },
65
- {
66
- "name": "product_description",
67
- "data_type": "VARCHAR",
68
- "max_length": 4096,
69
- "enable_dense_embedding": true,
70
- "enable_sparse_embedding": true
71
- }
72
- ]
73
- }
74
- ```
75
-
76
- Expected outcome:
77
- - A tenant is created and returned with its identifier.
78
- - If tenant_metadata_schema is provided, the vector store collection will include
79
- the specified custom fields with their configured search capabilities.
80
- - If the tenant already exists, you receive a success message with the existing identifier.
81
-
82
41
  Parameters
83
42
  ----------
84
43
  tenant_id : str
@@ -106,7 +65,7 @@ class TenantClient:
106
65
  from usecortex-ai import CortexAI
107
66
 
108
67
  client = CortexAI(token="YOUR_TOKEN", )
109
- client.tenant.create_tenant(tenant_id='tenant_id', )
68
+ client.tenant.create_tenant(tenant_id='tenant_1234', )
110
69
  """
111
70
  _response = self._raw_client.create_tenant(
112
71
  tenant_id=tenant_id,
@@ -119,17 +78,6 @@ class TenantClient:
119
78
 
120
79
  def stats(self, *, tenant_id: str, request_options: typing.Optional[RequestOptions] = None) -> TenantStatsResponse:
121
80
  """
122
- Retrieve usage stats for your tenant.
123
-
124
- Use this endpoint to check whether a tenant exists and view core metrics like total
125
- indexed objects and vector dimension. This helps you validate
126
- setup and monitor ingestion.
127
-
128
- Expected outcome
129
-
130
- You receive the current object count and vector dimension for the tenant.
131
- If the tenant does not exist, you get a not-found error.
132
-
133
81
  Parameters
134
82
  ----------
135
83
  tenant_id : str
@@ -148,7 +96,7 @@ class TenantClient:
148
96
  from usecortex-ai import CortexAI
149
97
 
150
98
  client = CortexAI(token="YOUR_TOKEN", )
151
- client.tenant.stats(tenant_id='tenant_id', )
99
+ client.tenant.stats(tenant_id='tenant_1234', )
152
100
  """
153
101
  _response = self._raw_client.stats(tenant_id=tenant_id, request_options=request_options)
154
102
  return _response.data
@@ -179,47 +127,6 @@ class AsyncTenantClient:
179
127
  request_options: typing.Optional[RequestOptions] = None,
180
128
  ) -> TenantCreateResponse:
181
129
  """
182
- Create a tenant for your account.
183
-
184
- Use this endpoint to initialize a tenant space you can use for ingestion, embeddings, and search.
185
-
186
- **Tenant Metadata Schema**
187
-
188
- You can optionally provide a `tenant_metadata_schema` to define custom fields that will be
189
- indexed in the vector store. Each field can be configured with:
190
-
191
- - `enable_match`: Enable text filtering on this field
192
- - `enable_dense_embedding`: Create dense embeddings for semantic similarity search
193
- - `enable_sparse_embedding`: Create sparse embeddings (BM25) for keyword search
194
-
195
- **Example Request:**
196
- ```json
197
- {
198
- "tenant_id": "my-tenant",
199
- "tenant_metadata_schema": [
200
- {
201
- "name": "category",
202
- "data_type": "VARCHAR",
203
- "max_length": 256,
204
- "enable_match": true
205
- },
206
- {
207
- "name": "product_description",
208
- "data_type": "VARCHAR",
209
- "max_length": 4096,
210
- "enable_dense_embedding": true,
211
- "enable_sparse_embedding": true
212
- }
213
- ]
214
- }
215
- ```
216
-
217
- Expected outcome:
218
- - A tenant is created and returned with its identifier.
219
- - If tenant_metadata_schema is provided, the vector store collection will include
220
- the specified custom fields with their configured search capabilities.
221
- - If the tenant already exists, you receive a success message with the existing identifier.
222
-
223
130
  Parameters
224
131
  ----------
225
132
  tenant_id : str
@@ -250,7 +157,7 @@ class AsyncTenantClient:
250
157
 
251
158
  client = AsyncCortexAI(token="YOUR_TOKEN", )
252
159
  async def main() -> None:
253
- await client.tenant.create_tenant(tenant_id='tenant_id', )
160
+ await client.tenant.create_tenant(tenant_id='tenant_1234', )
254
161
  asyncio.run(main())
255
162
  """
256
163
  _response = await self._raw_client.create_tenant(
@@ -266,17 +173,6 @@ class AsyncTenantClient:
266
173
  self, *, tenant_id: str, request_options: typing.Optional[RequestOptions] = None
267
174
  ) -> TenantStatsResponse:
268
175
  """
269
- Retrieve usage stats for your tenant.
270
-
271
- Use this endpoint to check whether a tenant exists and view core metrics like total
272
- indexed objects and vector dimension. This helps you validate
273
- setup and monitor ingestion.
274
-
275
- Expected outcome
276
-
277
- You receive the current object count and vector dimension for the tenant.
278
- If the tenant does not exist, you get a not-found error.
279
-
280
176
  Parameters
281
177
  ----------
282
178
  tenant_id : str
@@ -298,7 +194,7 @@ class AsyncTenantClient:
298
194
 
299
195
  client = AsyncCortexAI(token="YOUR_TOKEN", )
300
196
  async def main() -> None:
301
- await client.tenant.stats(tenant_id='tenant_id', )
197
+ await client.tenant.stats(tenant_id='tenant_1234', )
302
198
  asyncio.run(main())
303
199
  """
304
200
  _response = await self._raw_client.stats(tenant_id=tenant_id, request_options=request_options)