usecortex-ai 0.2.1__py3-none-any.whl → 0.2.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- usecortex_ai/__init__.py +18 -3
- usecortex_ai/client.py +7 -78
- usecortex_ai/core/client_wrapper.py +6 -8
- usecortex_ai/document/__init__.py +4 -0
- usecortex_ai/document/client.py +139 -0
- usecortex_ai/document/raw_client.py +312 -0
- usecortex_ai/embeddings/client.py +40 -70
- usecortex_ai/embeddings/raw_client.py +40 -70
- usecortex_ai/fetch/client.py +10 -0
- usecortex_ai/fetch/raw_client.py +10 -0
- usecortex_ai/search/client.py +74 -80
- usecortex_ai/search/raw_client.py +74 -80
- usecortex_ai/sources/client.py +28 -83
- usecortex_ai/sources/raw_client.py +28 -259
- usecortex_ai/tenant/client.py +152 -4
- usecortex_ai/tenant/raw_client.py +502 -4
- usecortex_ai/types/__init__.py +16 -2
- usecortex_ai/types/add_user_memory_response.py +36 -0
- usecortex_ai/types/app_sources_upload_data.py +10 -2
- usecortex_ai/types/attachment_model.py +34 -7
- usecortex_ai/types/batch_upload_data.py +10 -2
- usecortex_ai/types/body_scrape_webpage_upload_scrape_webpage_post.py +0 -2
- usecortex_ai/types/body_update_scrape_job_upload_update_webpage_patch.py +0 -2
- usecortex_ai/types/content_model.py +33 -6
- usecortex_ai/types/delete_memory_request.py +14 -3
- usecortex_ai/types/delete_sources.py +20 -0
- usecortex_ai/types/delete_sub_tenant_data.py +42 -0
- usecortex_ai/types/delete_user_memory_response.py +31 -0
- usecortex_ai/types/embeddings_create_collection_data.py +19 -4
- usecortex_ai/types/embeddings_delete_data.py +19 -4
- usecortex_ai/types/embeddings_get_data.py +19 -4
- usecortex_ai/types/embeddings_search_data.py +19 -4
- usecortex_ai/types/error_response.py +0 -1
- usecortex_ai/types/fetch_content_data.py +19 -5
- usecortex_ai/types/file_upload_result.py +9 -2
- usecortex_ai/types/generate_user_memory_response.py +32 -0
- usecortex_ai/types/list_sources_response.py +14 -3
- usecortex_ai/types/list_user_memories_response.py +32 -0
- usecortex_ai/types/markdown_upload_request.py +18 -5
- usecortex_ai/types/processing_status.py +14 -3
- usecortex_ai/types/relations.py +9 -2
- usecortex_ai/types/retrieve_user_memory_response.py +32 -0
- usecortex_ai/types/search_chunk.py +54 -17
- usecortex_ai/types/single_upload_data.py +10 -2
- usecortex_ai/types/source.py +34 -15
- usecortex_ai/types/source_model.py +63 -14
- usecortex_ai/types/sub_tenant_ids_data.py +24 -5
- usecortex_ai/types/tenant_create_data.py +19 -4
- usecortex_ai/types/tenant_stats.py +24 -5
- usecortex_ai/types/user_memory.py +31 -0
- usecortex_ai/upload/client.py +394 -79
- usecortex_ai/upload/raw_client.py +398 -103
- usecortex_ai/user/client.py +14 -6
- usecortex_ai/user/raw_client.py +14 -6
- usecortex_ai/user_memory/client.py +184 -50
- usecortex_ai/user_memory/raw_client.py +905 -88
- {usecortex_ai-0.2.1.dist-info → usecortex_ai-0.2.2.dist-info}/METADATA +1 -1
- usecortex_ai-0.2.2.dist-info/RECORD +100 -0
- usecortex_ai/raw_client.py +0 -90
- usecortex_ai/types/source_content.py +0 -26
- usecortex_ai-0.2.1.dist-info/RECORD +0 -91
- {usecortex_ai-0.2.1.dist-info → usecortex_ai-0.2.2.dist-info}/WHEEL +0 -0
- {usecortex_ai-0.2.1.dist-info → usecortex_ai-0.2.2.dist-info}/licenses/LICENSE +0 -0
- {usecortex_ai-0.2.1.dist-info → usecortex_ai-0.2.2.dist-info}/top_level.txt +0 -0
|
@@ -38,28 +38,20 @@ class RawEmbeddingsClient:
|
|
|
38
38
|
request_options: typing.Optional[RequestOptions] = None,
|
|
39
39
|
) -> HttpResponse[EmbeddingsDeleteData]:
|
|
40
40
|
"""
|
|
41
|
-
Delete
|
|
41
|
+
Delete embedding chunks by chunk ID.
|
|
42
42
|
|
|
43
|
-
|
|
44
|
-
chunk IDs to the backend delete service.
|
|
45
|
-
|
|
46
|
-
Args:
|
|
47
|
-
request (EmbeddingsDeleteRequest): The delete request containing:
|
|
48
|
-
- chunk_ids (List[str]): List of chunk IDs to delete
|
|
49
|
-
- tenant_id (str): Tenant identifier for multi-tenancy
|
|
50
|
-
- sub_tenant_id (str, optional): Sub-tenant identifier, defaults to tenant_id
|
|
51
|
-
api_details (dict): Authentication details obtained from API key validation
|
|
52
|
-
|
|
53
|
-
Returns:
|
|
54
|
-
EmbeddingsDeleteData: Success response with deletion details
|
|
43
|
+
Use this to remove specific chunks from your embeddings index when they are no longer valid or should not appear in results.
|
|
55
44
|
|
|
56
45
|
Parameters
|
|
57
46
|
----------
|
|
58
47
|
chunk_ids : typing.Sequence[str]
|
|
48
|
+
The chunk IDs of the source you want to delete
|
|
59
49
|
|
|
60
50
|
tenant_id : str
|
|
51
|
+
Unique identifier for the tenant/organization
|
|
61
52
|
|
|
62
53
|
sub_tenant_id : typing.Optional[str]
|
|
54
|
+
Optional sub-tenant identifier for organizing data within a tenant. If not provided, defaults to tenant_id
|
|
63
55
|
|
|
64
56
|
request_options : typing.Optional[RequestOptions]
|
|
65
57
|
Request-specific configuration.
|
|
@@ -185,29 +177,24 @@ class RawEmbeddingsClient:
|
|
|
185
177
|
request_options: typing.Optional[RequestOptions] = None,
|
|
186
178
|
) -> HttpResponse[EmbeddingsSearchData]:
|
|
187
179
|
"""
|
|
188
|
-
|
|
180
|
+
Find similar chunks using an embedding vector.
|
|
189
181
|
|
|
190
|
-
|
|
191
|
-
and returns a list of the most similar chunk IDs based on vector similarity.
|
|
182
|
+
Use this to retrieve the most similar chunk IDs to a single query embedding.
|
|
192
183
|
|
|
193
|
-
Args:
|
|
194
|
-
request (EmbeddingsSearchRequest): The search request containing:
|
|
195
|
-
- embeddings (List[float]): Single embedding vector for similarity search
|
|
196
|
-
- tenant_id (str): Tenant identifier for multi-tenancy
|
|
197
|
-
- sub_tenant_id (str, optional): Sub-tenant identifier, defaults to tenant_id
|
|
198
|
-
- max_chunks (int, optional): Maximum number of chunk IDs to return (default: 10)
|
|
199
|
-
api_details (dict): Authentication details obtained from API key validation
|
|
200
184
|
|
|
201
|
-
|
|
202
|
-
|
|
185
|
+
Expected outcome
|
|
186
|
+
- You receive the closest chunk IDs with optional similarity scores.
|
|
203
187
|
|
|
204
188
|
Parameters
|
|
205
189
|
----------
|
|
206
190
|
tenant_id : str
|
|
191
|
+
Unique identifier for the tenant/organization
|
|
207
192
|
|
|
208
193
|
embeddings : typing.Optional[typing.Sequence[float]]
|
|
194
|
+
The embedding vector for search
|
|
209
195
|
|
|
210
196
|
sub_tenant_id : typing.Optional[str]
|
|
197
|
+
Optional sub-tenant identifier for organizing data within a tenant. If not provided, defaults to tenant_id
|
|
211
198
|
|
|
212
199
|
max_chunks : typing.Optional[int]
|
|
213
200
|
|
|
@@ -335,20 +322,20 @@ class RawEmbeddingsClient:
|
|
|
335
322
|
request_options: typing.Optional[RequestOptions] = None,
|
|
336
323
|
) -> HttpResponse[EmbeddingsGetData]:
|
|
337
324
|
"""
|
|
338
|
-
|
|
325
|
+
Retrieve embeddings for specific chunk IDs.
|
|
339
326
|
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
Returns:
|
|
343
|
-
EmbeddingsGetData: Embeddings data for the requested chunk IDs
|
|
327
|
+
Use this when you already know the chunk IDs and need their corresponding embeddings.
|
|
344
328
|
|
|
345
329
|
Parameters
|
|
346
330
|
----------
|
|
347
331
|
chunk_ids : typing.Sequence[str]
|
|
332
|
+
The chunk IDs of the source you want to get embeddings for
|
|
348
333
|
|
|
349
334
|
tenant_id : str
|
|
335
|
+
Unique identifier for the tenant/organization
|
|
350
336
|
|
|
351
337
|
sub_tenant_id : typing.Optional[str]
|
|
338
|
+
Optional sub-tenant identifier for organizing data within a tenant. If not provided, defaults to tenant_id
|
|
352
339
|
|
|
353
340
|
request_options : typing.Optional[RequestOptions]
|
|
354
341
|
Request-specific configuration.
|
|
@@ -468,16 +455,14 @@ class RawEmbeddingsClient:
|
|
|
468
455
|
self, *, tenant_id: str, request_options: typing.Optional[RequestOptions] = None
|
|
469
456
|
) -> HttpResponse[EmbeddingsCreateCollectionData]:
|
|
470
457
|
"""
|
|
471
|
-
Create an embeddings collection for
|
|
472
|
-
|
|
473
|
-
sub_tenant_id is set to be the same as tenant_id as per requirements.
|
|
458
|
+
Create an embeddings collection for a tenant.
|
|
474
459
|
|
|
475
|
-
|
|
476
|
-
EmbeddingsCreateCollectionData: Success response with collection details
|
|
460
|
+
Use this to initialize storage so you can index and query embeddings for the tenant.
|
|
477
461
|
|
|
478
462
|
Parameters
|
|
479
463
|
----------
|
|
480
464
|
tenant_id : str
|
|
465
|
+
Unique identifier for the tenant/organization
|
|
481
466
|
|
|
482
467
|
request_options : typing.Optional[RequestOptions]
|
|
483
468
|
Request-specific configuration.
|
|
@@ -601,28 +586,20 @@ class AsyncRawEmbeddingsClient:
|
|
|
601
586
|
request_options: typing.Optional[RequestOptions] = None,
|
|
602
587
|
) -> AsyncHttpResponse[EmbeddingsDeleteData]:
|
|
603
588
|
"""
|
|
604
|
-
Delete
|
|
589
|
+
Delete embedding chunks by chunk ID.
|
|
605
590
|
|
|
606
|
-
|
|
607
|
-
chunk IDs to the backend delete service.
|
|
608
|
-
|
|
609
|
-
Args:
|
|
610
|
-
request (EmbeddingsDeleteRequest): The delete request containing:
|
|
611
|
-
- chunk_ids (List[str]): List of chunk IDs to delete
|
|
612
|
-
- tenant_id (str): Tenant identifier for multi-tenancy
|
|
613
|
-
- sub_tenant_id (str, optional): Sub-tenant identifier, defaults to tenant_id
|
|
614
|
-
api_details (dict): Authentication details obtained from API key validation
|
|
615
|
-
|
|
616
|
-
Returns:
|
|
617
|
-
EmbeddingsDeleteData: Success response with deletion details
|
|
591
|
+
Use this to remove specific chunks from your embeddings index when they are no longer valid or should not appear in results.
|
|
618
592
|
|
|
619
593
|
Parameters
|
|
620
594
|
----------
|
|
621
595
|
chunk_ids : typing.Sequence[str]
|
|
596
|
+
The chunk IDs of the source you want to delete
|
|
622
597
|
|
|
623
598
|
tenant_id : str
|
|
599
|
+
Unique identifier for the tenant/organization
|
|
624
600
|
|
|
625
601
|
sub_tenant_id : typing.Optional[str]
|
|
602
|
+
Optional sub-tenant identifier for organizing data within a tenant. If not provided, defaults to tenant_id
|
|
626
603
|
|
|
627
604
|
request_options : typing.Optional[RequestOptions]
|
|
628
605
|
Request-specific configuration.
|
|
@@ -748,29 +725,24 @@ class AsyncRawEmbeddingsClient:
|
|
|
748
725
|
request_options: typing.Optional[RequestOptions] = None,
|
|
749
726
|
) -> AsyncHttpResponse[EmbeddingsSearchData]:
|
|
750
727
|
"""
|
|
751
|
-
|
|
728
|
+
Find similar chunks using an embedding vector.
|
|
752
729
|
|
|
753
|
-
|
|
754
|
-
and returns a list of the most similar chunk IDs based on vector similarity.
|
|
730
|
+
Use this to retrieve the most similar chunk IDs to a single query embedding.
|
|
755
731
|
|
|
756
|
-
Args:
|
|
757
|
-
request (EmbeddingsSearchRequest): The search request containing:
|
|
758
|
-
- embeddings (List[float]): Single embedding vector for similarity search
|
|
759
|
-
- tenant_id (str): Tenant identifier for multi-tenancy
|
|
760
|
-
- sub_tenant_id (str, optional): Sub-tenant identifier, defaults to tenant_id
|
|
761
|
-
- max_chunks (int, optional): Maximum number of chunk IDs to return (default: 10)
|
|
762
|
-
api_details (dict): Authentication details obtained from API key validation
|
|
763
732
|
|
|
764
|
-
|
|
765
|
-
|
|
733
|
+
Expected outcome
|
|
734
|
+
- You receive the closest chunk IDs with optional similarity scores.
|
|
766
735
|
|
|
767
736
|
Parameters
|
|
768
737
|
----------
|
|
769
738
|
tenant_id : str
|
|
739
|
+
Unique identifier for the tenant/organization
|
|
770
740
|
|
|
771
741
|
embeddings : typing.Optional[typing.Sequence[float]]
|
|
742
|
+
The embedding vector for search
|
|
772
743
|
|
|
773
744
|
sub_tenant_id : typing.Optional[str]
|
|
745
|
+
Optional sub-tenant identifier for organizing data within a tenant. If not provided, defaults to tenant_id
|
|
774
746
|
|
|
775
747
|
max_chunks : typing.Optional[int]
|
|
776
748
|
|
|
@@ -898,20 +870,20 @@ class AsyncRawEmbeddingsClient:
|
|
|
898
870
|
request_options: typing.Optional[RequestOptions] = None,
|
|
899
871
|
) -> AsyncHttpResponse[EmbeddingsGetData]:
|
|
900
872
|
"""
|
|
901
|
-
|
|
873
|
+
Retrieve embeddings for specific chunk IDs.
|
|
902
874
|
|
|
903
|
-
|
|
904
|
-
|
|
905
|
-
Returns:
|
|
906
|
-
EmbeddingsGetData: Embeddings data for the requested chunk IDs
|
|
875
|
+
Use this when you already know the chunk IDs and need their corresponding embeddings.
|
|
907
876
|
|
|
908
877
|
Parameters
|
|
909
878
|
----------
|
|
910
879
|
chunk_ids : typing.Sequence[str]
|
|
880
|
+
The chunk IDs of the source you want to get embeddings for
|
|
911
881
|
|
|
912
882
|
tenant_id : str
|
|
883
|
+
Unique identifier for the tenant/organization
|
|
913
884
|
|
|
914
885
|
sub_tenant_id : typing.Optional[str]
|
|
886
|
+
Optional sub-tenant identifier for organizing data within a tenant. If not provided, defaults to tenant_id
|
|
915
887
|
|
|
916
888
|
request_options : typing.Optional[RequestOptions]
|
|
917
889
|
Request-specific configuration.
|
|
@@ -1031,16 +1003,14 @@ class AsyncRawEmbeddingsClient:
|
|
|
1031
1003
|
self, *, tenant_id: str, request_options: typing.Optional[RequestOptions] = None
|
|
1032
1004
|
) -> AsyncHttpResponse[EmbeddingsCreateCollectionData]:
|
|
1033
1005
|
"""
|
|
1034
|
-
Create an embeddings collection for
|
|
1035
|
-
|
|
1036
|
-
sub_tenant_id is set to be the same as tenant_id as per requirements.
|
|
1006
|
+
Create an embeddings collection for a tenant.
|
|
1037
1007
|
|
|
1038
|
-
|
|
1039
|
-
EmbeddingsCreateCollectionData: Success response with collection details
|
|
1008
|
+
Use this to initialize storage so you can index and query embeddings for the tenant.
|
|
1040
1009
|
|
|
1041
1010
|
Parameters
|
|
1042
1011
|
----------
|
|
1043
1012
|
tenant_id : str
|
|
1013
|
+
Unique identifier for the tenant/organization
|
|
1044
1014
|
|
|
1045
1015
|
request_options : typing.Optional[RequestOptions]
|
|
1046
1016
|
Request-specific configuration.
|
usecortex_ai/fetch/client.py
CHANGED
|
@@ -40,14 +40,19 @@ class FetchClient:
|
|
|
40
40
|
Parameters
|
|
41
41
|
----------
|
|
42
42
|
file_id : str
|
|
43
|
+
Unique identifier for the file to fetch
|
|
43
44
|
|
|
44
45
|
file_type : str
|
|
46
|
+
Type of file (e.g., 'app', 'file', or other)
|
|
45
47
|
|
|
46
48
|
tenant_id : str
|
|
49
|
+
Tenant identifier for multi-tenancy
|
|
47
50
|
|
|
48
51
|
return_content : typing.Optional[bool]
|
|
52
|
+
Whether to return the file content along with the URL
|
|
49
53
|
|
|
50
54
|
sub_tenant_id : typing.Optional[str]
|
|
55
|
+
Sub-tenant identifier, defaults to tenant_id if not provided
|
|
51
56
|
|
|
52
57
|
request_options : typing.Optional[RequestOptions]
|
|
53
58
|
Request-specific configuration.
|
|
@@ -104,14 +109,19 @@ class AsyncFetchClient:
|
|
|
104
109
|
Parameters
|
|
105
110
|
----------
|
|
106
111
|
file_id : str
|
|
112
|
+
Unique identifier for the file to fetch
|
|
107
113
|
|
|
108
114
|
file_type : str
|
|
115
|
+
Type of file (e.g., 'app', 'file', or other)
|
|
109
116
|
|
|
110
117
|
tenant_id : str
|
|
118
|
+
Tenant identifier for multi-tenancy
|
|
111
119
|
|
|
112
120
|
return_content : typing.Optional[bool]
|
|
121
|
+
Whether to return the file content along with the URL
|
|
113
122
|
|
|
114
123
|
sub_tenant_id : typing.Optional[str]
|
|
124
|
+
Sub-tenant identifier, defaults to tenant_id if not provided
|
|
115
125
|
|
|
116
126
|
request_options : typing.Optional[RequestOptions]
|
|
117
127
|
Request-specific configuration.
|
usecortex_ai/fetch/raw_client.py
CHANGED
|
@@ -40,14 +40,19 @@ class RawFetchClient:
|
|
|
40
40
|
Parameters
|
|
41
41
|
----------
|
|
42
42
|
file_id : str
|
|
43
|
+
Unique identifier for the file to fetch
|
|
43
44
|
|
|
44
45
|
file_type : str
|
|
46
|
+
Type of file (e.g., 'app', 'file', or other)
|
|
45
47
|
|
|
46
48
|
tenant_id : str
|
|
49
|
+
Tenant identifier for multi-tenancy
|
|
47
50
|
|
|
48
51
|
return_content : typing.Optional[bool]
|
|
52
|
+
Whether to return the file content along with the URL
|
|
49
53
|
|
|
50
54
|
sub_tenant_id : typing.Optional[str]
|
|
55
|
+
Sub-tenant identifier, defaults to tenant_id if not provided
|
|
51
56
|
|
|
52
57
|
request_options : typing.Optional[RequestOptions]
|
|
53
58
|
Request-specific configuration.
|
|
@@ -184,14 +189,19 @@ class AsyncRawFetchClient:
|
|
|
184
189
|
Parameters
|
|
185
190
|
----------
|
|
186
191
|
file_id : str
|
|
192
|
+
Unique identifier for the file to fetch
|
|
187
193
|
|
|
188
194
|
file_type : str
|
|
195
|
+
Type of file (e.g., 'app', 'file', or other)
|
|
189
196
|
|
|
190
197
|
tenant_id : str
|
|
198
|
+
Tenant identifier for multi-tenancy
|
|
191
199
|
|
|
192
200
|
return_content : typing.Optional[bool]
|
|
201
|
+
Whether to return the file content along with the URL
|
|
193
202
|
|
|
194
203
|
sub_tenant_id : typing.Optional[str]
|
|
204
|
+
Sub-tenant identifier, defaults to tenant_id if not provided
|
|
195
205
|
|
|
196
206
|
request_options : typing.Optional[RequestOptions]
|
|
197
207
|
Request-specific configuration.
|
usecortex_ai/search/client.py
CHANGED
|
@@ -51,41 +51,60 @@ class SearchClient:
|
|
|
51
51
|
request_options: typing.Optional[RequestOptions] = None,
|
|
52
52
|
) -> typing.Optional[typing.Any]:
|
|
53
53
|
"""
|
|
54
|
+
Ask a question to your uploaded knowledge base and let Cortex AI answer it.
|
|
55
|
+
|
|
54
56
|
Parameters
|
|
55
57
|
----------
|
|
56
58
|
question : str
|
|
59
|
+
The question to be answered
|
|
57
60
|
|
|
58
61
|
session_id : str
|
|
62
|
+
Unique identifier for the conversation session. Keep it same when the current question refers to a previous answer or question
|
|
59
63
|
|
|
60
64
|
tenant_id : str
|
|
65
|
+
Identifier for the tenant/organization
|
|
61
66
|
|
|
62
67
|
context_list : typing.Optional[typing.Sequence[str]]
|
|
68
|
+
List of context strings to provide additional information
|
|
63
69
|
|
|
64
70
|
search_modes : typing.Optional[typing.Sequence[str]]
|
|
71
|
+
List of search modes to use for finding relevant information
|
|
65
72
|
|
|
66
73
|
sub_tenant_id : typing.Optional[str]
|
|
74
|
+
Identifier for sub-tenant within the tenant
|
|
67
75
|
|
|
68
76
|
highlight_chunks : typing.Optional[bool]
|
|
77
|
+
Whether to return text chunks in the response along with final LLM generated answer
|
|
69
78
|
|
|
70
79
|
stream : typing.Optional[bool]
|
|
80
|
+
Whether to stream the response
|
|
71
81
|
|
|
72
82
|
search_alpha : typing.Optional[float]
|
|
83
|
+
Closer to 0.0 means a exact keyword search will be performed, closer to 1.0 means semantics of the search will be considered. In most cases, you wont have to toggle it yourself.
|
|
73
84
|
|
|
74
85
|
recency_bias : typing.Optional[float]
|
|
86
|
+
Bias towards more recent information (0.0 to 1.0)
|
|
75
87
|
|
|
76
88
|
ai_generation : typing.Optional[bool]
|
|
89
|
+
Whether to use AI for generating responses
|
|
77
90
|
|
|
78
91
|
top_n : typing.Optional[int]
|
|
92
|
+
Number of top results to return
|
|
79
93
|
|
|
80
94
|
user_name : typing.Optional[str]
|
|
95
|
+
Name of the user making the request. This helps LLM to know the user's name if semantics around the username are involved in query. Its generally a good practice to include it possible.
|
|
81
96
|
|
|
82
97
|
user_instructions : typing.Optional[str]
|
|
98
|
+
Custom instructions for the AI response to add to our proprietary prompt. This can be used to provide additional context or instructions for the LLM to follow so that the answers are tailored towards your application style
|
|
83
99
|
|
|
84
100
|
multi_step_reasoning : typing.Optional[bool]
|
|
101
|
+
Enable advanced multi-step reasoning for complex queries. When enabled, the AI will automatically break down complex questions into multiple research steps to provide more comprehensive and accurate answers.
|
|
85
102
|
|
|
86
103
|
auto_agent_routing : typing.Optional[bool]
|
|
104
|
+
Enable intelligent agent routing to automatically select the most suitable AI agent for your specific query type. Different agents are optimized for various use cases like social media, code, conversations, general knowledge, etc.
|
|
87
105
|
|
|
88
106
|
metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
|
|
107
|
+
Additional metadata for the request
|
|
89
108
|
|
|
90
109
|
request_options : typing.Optional[RequestOptions]
|
|
91
110
|
Request-specific configuration.
|
|
@@ -133,45 +152,33 @@ class SearchClient:
|
|
|
133
152
|
max_chunks: typing.Optional[int] = OMIT,
|
|
134
153
|
alpha: typing.Optional[Alpha] = OMIT,
|
|
135
154
|
recency_bias: typing.Optional[float] = OMIT,
|
|
136
|
-
num_related_chunks: typing.Optional[int] = OMIT,
|
|
137
155
|
request_options: typing.Optional[RequestOptions] = None,
|
|
138
156
|
) -> typing.List[SearchChunk]:
|
|
139
157
|
"""
|
|
140
|
-
Search for content within indexed sources
|
|
141
|
-
|
|
142
|
-
This
|
|
143
|
-
|
|
144
|
-
with various parameters to control the number of results and search behavior.
|
|
145
|
-
|
|
146
|
-
Args:
|
|
147
|
-
request (SearchRequest): The search request containing:
|
|
148
|
-
- query (str): Search query string to find relevant content
|
|
149
|
-
- tenant_id (str, optional): Tenant identifier for multi-tenancy
|
|
150
|
-
- sub_tenant_id (str, optional): Sub-tenant identifier, defaults to tenant_id
|
|
151
|
-
- max_chunks (int, optional): Maximum number of content chunks to return
|
|
152
|
-
- alpha (Union[float, str], optional): Search algorithm parameter for result ranking (default: 0.8). Can be float-type (0.0-1.0) or 'auto' for dynamic selection
|
|
153
|
-
- recency_bias (float, optional): Bias towards more recent content (default: 0.5)
|
|
154
|
-
- num_related_chunks (int, optional): Number of related chunks to return (default: 0)
|
|
155
|
-
api_details (dict): Authentication details obtained from API key validation
|
|
156
|
-
|
|
157
|
-
Returns:
|
|
158
|
-
SearchData: Success response with search results
|
|
158
|
+
Search for relevant content within your indexed sources.
|
|
159
|
+
|
|
160
|
+
This API returns the chunks related to the query you make. We use neural (embedding) search to give you the most relevant chunks.
|
|
161
|
+
Results are ranked by relevance and can be customized with parameters like result limits and recency preferences.
|
|
159
162
|
|
|
160
163
|
Parameters
|
|
161
164
|
----------
|
|
162
165
|
query : str
|
|
166
|
+
Search terms to find relevant content
|
|
163
167
|
|
|
164
168
|
tenant_id : str
|
|
169
|
+
Unique identifier for the tenant/organization
|
|
165
170
|
|
|
166
171
|
sub_tenant_id : typing.Optional[str]
|
|
172
|
+
Optional sub-tenant identifier for organizing data within a tenant. If not provided, defaults to tenant_id
|
|
167
173
|
|
|
168
174
|
max_chunks : typing.Optional[int]
|
|
175
|
+
Maximum number of results to return
|
|
169
176
|
|
|
170
177
|
alpha : typing.Optional[Alpha]
|
|
178
|
+
Search ranking algorithm parameter (0.0-1.0 or 'auto')
|
|
171
179
|
|
|
172
180
|
recency_bias : typing.Optional[float]
|
|
173
|
-
|
|
174
|
-
num_related_chunks : typing.Optional[int]
|
|
181
|
+
Preference for newer content (0.0 = no bias, 1.0 = strong recency preference)
|
|
175
182
|
|
|
176
183
|
request_options : typing.Optional[RequestOptions]
|
|
177
184
|
Request-specific configuration.
|
|
@@ -195,7 +202,6 @@ class SearchClient:
|
|
|
195
202
|
max_chunks=max_chunks,
|
|
196
203
|
alpha=alpha,
|
|
197
204
|
recency_bias=recency_bias,
|
|
198
|
-
num_related_chunks=num_related_chunks,
|
|
199
205
|
request_options=request_options,
|
|
200
206
|
)
|
|
201
207
|
return _response.data
|
|
@@ -211,36 +217,27 @@ class SearchClient:
|
|
|
211
217
|
request_options: typing.Optional[RequestOptions] = None,
|
|
212
218
|
) -> typing.List[SearchChunk]:
|
|
213
219
|
"""
|
|
214
|
-
|
|
215
|
-
Performs full text search with configurable operators for precise text matching against the Findr backend.
|
|
216
|
-
|
|
217
|
-
This endpoint performs a full text search query against the Findr backend, allowing users to retrieve
|
|
218
|
-
relevant content chunks from their indexed documents and sources using BM25-based text matching.
|
|
219
|
-
The search can be customized with various operators to control the matching behavior.
|
|
220
|
-
|
|
221
|
-
Args:
|
|
222
|
-
request (FullTextSearchRequest): The full text search request containing:
|
|
223
|
-
- query (str): Search query string to find relevant content
|
|
224
|
-
- tenant_id (str): Tenant identifier for multi-tenancy
|
|
225
|
-
- sub_tenant_id (str, optional): Sub-tenant identifier, defaults to tenant_id
|
|
226
|
-
- operator (BM25OperatorType, optional): Full text search operator type (OR or AND). Defaults to OR
|
|
227
|
-
- max_chunks (int, optional): Maximum number of content chunks to return (1-1001, defaults to 25)
|
|
228
|
-
api_details (dict): Authentication details obtained from API key validation
|
|
220
|
+
Perform full text search for exact matches within your indexed sources.
|
|
229
221
|
|
|
230
|
-
|
|
231
|
-
|
|
222
|
+
Use this endpoint to find content chunks using BM25-based text matching with configurable operators.
|
|
223
|
+
Choose between OR and AND operators to control how search terms are combined for precise text matching.
|
|
232
224
|
|
|
233
225
|
Parameters
|
|
234
226
|
----------
|
|
235
227
|
query : str
|
|
228
|
+
Search terms to find in your content
|
|
236
229
|
|
|
237
230
|
tenant_id : str
|
|
231
|
+
Unique identifier for the tenant/organization
|
|
238
232
|
|
|
239
233
|
sub_tenant_id : typing.Optional[str]
|
|
234
|
+
Optional sub-tenant identifier for organizing data within a tenant. If not provided, defaults to tenant_id
|
|
240
235
|
|
|
241
236
|
operator : typing.Optional[Bm25OperatorType]
|
|
237
|
+
How to combine search terms (OR or AND)
|
|
242
238
|
|
|
243
239
|
max_chunks : typing.Optional[int]
|
|
240
|
+
Maximum number of results to return
|
|
244
241
|
|
|
245
242
|
request_options : typing.Optional[RequestOptions]
|
|
246
243
|
Request-specific configuration.
|
|
@@ -306,41 +303,60 @@ class AsyncSearchClient:
|
|
|
306
303
|
request_options: typing.Optional[RequestOptions] = None,
|
|
307
304
|
) -> typing.Optional[typing.Any]:
|
|
308
305
|
"""
|
|
306
|
+
Ask a question to your uploaded knowledge base and let Cortex AI answer it.
|
|
307
|
+
|
|
309
308
|
Parameters
|
|
310
309
|
----------
|
|
311
310
|
question : str
|
|
311
|
+
The question to be answered
|
|
312
312
|
|
|
313
313
|
session_id : str
|
|
314
|
+
Unique identifier for the conversation session. Keep it same when the current question refers to a previous answer or question
|
|
314
315
|
|
|
315
316
|
tenant_id : str
|
|
317
|
+
Identifier for the tenant/organization
|
|
316
318
|
|
|
317
319
|
context_list : typing.Optional[typing.Sequence[str]]
|
|
320
|
+
List of context strings to provide additional information
|
|
318
321
|
|
|
319
322
|
search_modes : typing.Optional[typing.Sequence[str]]
|
|
323
|
+
List of search modes to use for finding relevant information
|
|
320
324
|
|
|
321
325
|
sub_tenant_id : typing.Optional[str]
|
|
326
|
+
Identifier for sub-tenant within the tenant
|
|
322
327
|
|
|
323
328
|
highlight_chunks : typing.Optional[bool]
|
|
329
|
+
Whether to return text chunks in the response along with final LLM generated answer
|
|
324
330
|
|
|
325
331
|
stream : typing.Optional[bool]
|
|
332
|
+
Whether to stream the response
|
|
326
333
|
|
|
327
334
|
search_alpha : typing.Optional[float]
|
|
335
|
+
Closer to 0.0 means a exact keyword search will be performed, closer to 1.0 means semantics of the search will be considered. In most cases, you wont have to toggle it yourself.
|
|
328
336
|
|
|
329
337
|
recency_bias : typing.Optional[float]
|
|
338
|
+
Bias towards more recent information (0.0 to 1.0)
|
|
330
339
|
|
|
331
340
|
ai_generation : typing.Optional[bool]
|
|
341
|
+
Whether to use AI for generating responses
|
|
332
342
|
|
|
333
343
|
top_n : typing.Optional[int]
|
|
344
|
+
Number of top results to return
|
|
334
345
|
|
|
335
346
|
user_name : typing.Optional[str]
|
|
347
|
+
Name of the user making the request. This helps LLM to know the user's name if semantics around the username are involved in query. Its generally a good practice to include it possible.
|
|
336
348
|
|
|
337
349
|
user_instructions : typing.Optional[str]
|
|
350
|
+
Custom instructions for the AI response to add to our proprietary prompt. This can be used to provide additional context or instructions for the LLM to follow so that the answers are tailored towards your application style
|
|
338
351
|
|
|
339
352
|
multi_step_reasoning : typing.Optional[bool]
|
|
353
|
+
Enable advanced multi-step reasoning for complex queries. When enabled, the AI will automatically break down complex questions into multiple research steps to provide more comprehensive and accurate answers.
|
|
340
354
|
|
|
341
355
|
auto_agent_routing : typing.Optional[bool]
|
|
356
|
+
Enable intelligent agent routing to automatically select the most suitable AI agent for your specific query type. Different agents are optimized for various use cases like social media, code, conversations, general knowledge, etc.
|
|
342
357
|
|
|
343
358
|
metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
|
|
359
|
+
Additional metadata for the request
|
|
344
360
|
|
|
345
361
|
request_options : typing.Optional[RequestOptions]
|
|
346
362
|
Request-specific configuration.
|
|
@@ -392,45 +408,33 @@ class AsyncSearchClient:
|
|
|
392
408
|
max_chunks: typing.Optional[int] = OMIT,
|
|
393
409
|
alpha: typing.Optional[Alpha] = OMIT,
|
|
394
410
|
recency_bias: typing.Optional[float] = OMIT,
|
|
395
|
-
num_related_chunks: typing.Optional[int] = OMIT,
|
|
396
411
|
request_options: typing.Optional[RequestOptions] = None,
|
|
397
412
|
) -> typing.List[SearchChunk]:
|
|
398
413
|
"""
|
|
399
|
-
Search for content within indexed sources
|
|
400
|
-
|
|
401
|
-
This
|
|
402
|
-
|
|
403
|
-
with various parameters to control the number of results and search behavior.
|
|
404
|
-
|
|
405
|
-
Args:
|
|
406
|
-
request (SearchRequest): The search request containing:
|
|
407
|
-
- query (str): Search query string to find relevant content
|
|
408
|
-
- tenant_id (str, optional): Tenant identifier for multi-tenancy
|
|
409
|
-
- sub_tenant_id (str, optional): Sub-tenant identifier, defaults to tenant_id
|
|
410
|
-
- max_chunks (int, optional): Maximum number of content chunks to return
|
|
411
|
-
- alpha (Union[float, str], optional): Search algorithm parameter for result ranking (default: 0.8). Can be float-type (0.0-1.0) or 'auto' for dynamic selection
|
|
412
|
-
- recency_bias (float, optional): Bias towards more recent content (default: 0.5)
|
|
413
|
-
- num_related_chunks (int, optional): Number of related chunks to return (default: 0)
|
|
414
|
-
api_details (dict): Authentication details obtained from API key validation
|
|
415
|
-
|
|
416
|
-
Returns:
|
|
417
|
-
SearchData: Success response with search results
|
|
414
|
+
Search for relevant content within your indexed sources.
|
|
415
|
+
|
|
416
|
+
This API returns the chunks related to the query you make. We use neural (embedding) search to give you the most relevant chunks.
|
|
417
|
+
Results are ranked by relevance and can be customized with parameters like result limits and recency preferences.
|
|
418
418
|
|
|
419
419
|
Parameters
|
|
420
420
|
----------
|
|
421
421
|
query : str
|
|
422
|
+
Search terms to find relevant content
|
|
422
423
|
|
|
423
424
|
tenant_id : str
|
|
425
|
+
Unique identifier for the tenant/organization
|
|
424
426
|
|
|
425
427
|
sub_tenant_id : typing.Optional[str]
|
|
428
|
+
Optional sub-tenant identifier for organizing data within a tenant. If not provided, defaults to tenant_id
|
|
426
429
|
|
|
427
430
|
max_chunks : typing.Optional[int]
|
|
431
|
+
Maximum number of results to return
|
|
428
432
|
|
|
429
433
|
alpha : typing.Optional[Alpha]
|
|
434
|
+
Search ranking algorithm parameter (0.0-1.0 or 'auto')
|
|
430
435
|
|
|
431
436
|
recency_bias : typing.Optional[float]
|
|
432
|
-
|
|
433
|
-
num_related_chunks : typing.Optional[int]
|
|
437
|
+
Preference for newer content (0.0 = no bias, 1.0 = strong recency preference)
|
|
434
438
|
|
|
435
439
|
request_options : typing.Optional[RequestOptions]
|
|
436
440
|
Request-specific configuration.
|
|
@@ -458,7 +462,6 @@ class AsyncSearchClient:
|
|
|
458
462
|
max_chunks=max_chunks,
|
|
459
463
|
alpha=alpha,
|
|
460
464
|
recency_bias=recency_bias,
|
|
461
|
-
num_related_chunks=num_related_chunks,
|
|
462
465
|
request_options=request_options,
|
|
463
466
|
)
|
|
464
467
|
return _response.data
|
|
@@ -474,36 +477,27 @@ class AsyncSearchClient:
|
|
|
474
477
|
request_options: typing.Optional[RequestOptions] = None,
|
|
475
478
|
) -> typing.List[SearchChunk]:
|
|
476
479
|
"""
|
|
477
|
-
|
|
478
|
-
Performs full text search with configurable operators for precise text matching against the Findr backend.
|
|
479
|
-
|
|
480
|
-
This endpoint performs a full text search query against the Findr backend, allowing users to retrieve
|
|
481
|
-
relevant content chunks from their indexed documents and sources using BM25-based text matching.
|
|
482
|
-
The search can be customized with various operators to control the matching behavior.
|
|
483
|
-
|
|
484
|
-
Args:
|
|
485
|
-
request (FullTextSearchRequest): The full text search request containing:
|
|
486
|
-
- query (str): Search query string to find relevant content
|
|
487
|
-
- tenant_id (str): Tenant identifier for multi-tenancy
|
|
488
|
-
- sub_tenant_id (str, optional): Sub-tenant identifier, defaults to tenant_id
|
|
489
|
-
- operator (BM25OperatorType, optional): Full text search operator type (OR or AND). Defaults to OR
|
|
490
|
-
- max_chunks (int, optional): Maximum number of content chunks to return (1-1001, defaults to 25)
|
|
491
|
-
api_details (dict): Authentication details obtained from API key validation
|
|
480
|
+
Perform full text search for exact matches within your indexed sources.
|
|
492
481
|
|
|
493
|
-
|
|
494
|
-
|
|
482
|
+
Use this endpoint to find content chunks using BM25-based text matching with configurable operators.
|
|
483
|
+
Choose between OR and AND operators to control how search terms are combined for precise text matching.
|
|
495
484
|
|
|
496
485
|
Parameters
|
|
497
486
|
----------
|
|
498
487
|
query : str
|
|
488
|
+
Search terms to find in your content
|
|
499
489
|
|
|
500
490
|
tenant_id : str
|
|
491
|
+
Unique identifier for the tenant/organization
|
|
501
492
|
|
|
502
493
|
sub_tenant_id : typing.Optional[str]
|
|
494
|
+
Optional sub-tenant identifier for organizing data within a tenant. If not provided, defaults to tenant_id
|
|
503
495
|
|
|
504
496
|
operator : typing.Optional[Bm25OperatorType]
|
|
497
|
+
How to combine search terms (OR or AND)
|
|
505
498
|
|
|
506
499
|
max_chunks : typing.Optional[int]
|
|
500
|
+
Maximum number of results to return
|
|
507
501
|
|
|
508
502
|
request_options : typing.Optional[RequestOptions]
|
|
509
503
|
Request-specific configuration.
|