usecortex-ai 0.2.0__py3-none-any.whl → 0.2.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. usecortex_ai/__init__.py +20 -3
  2. usecortex_ai/client.py +7 -78
  3. usecortex_ai/core/client_wrapper.py +6 -8
  4. usecortex_ai/document/__init__.py +4 -0
  5. usecortex_ai/document/client.py +139 -0
  6. usecortex_ai/document/raw_client.py +312 -0
  7. usecortex_ai/embeddings/client.py +50 -82
  8. usecortex_ai/embeddings/raw_client.py +48 -80
  9. usecortex_ai/fetch/client.py +10 -0
  10. usecortex_ai/fetch/raw_client.py +10 -0
  11. usecortex_ai/search/client.py +74 -80
  12. usecortex_ai/search/raw_client.py +74 -80
  13. usecortex_ai/sources/client.py +28 -0
  14. usecortex_ai/sources/raw_client.py +28 -0
  15. usecortex_ai/tenant/client.py +152 -4
  16. usecortex_ai/tenant/raw_client.py +502 -4
  17. usecortex_ai/types/__init__.py +18 -2
  18. usecortex_ai/types/add_user_memory_response.py +36 -0
  19. usecortex_ai/types/app_sources_upload_data.py +10 -2
  20. usecortex_ai/types/attachment_model.py +34 -7
  21. usecortex_ai/types/batch_upload_data.py +10 -2
  22. usecortex_ai/types/body_scrape_webpage_upload_scrape_webpage_post.py +0 -2
  23. usecortex_ai/types/body_update_scrape_job_upload_update_webpage_patch.py +0 -2
  24. usecortex_ai/types/content_model.py +33 -6
  25. usecortex_ai/types/delete_memory_request.py +14 -3
  26. usecortex_ai/types/delete_sources.py +20 -0
  27. usecortex_ai/types/delete_sub_tenant_data.py +42 -0
  28. usecortex_ai/types/delete_user_memory_response.py +31 -0
  29. usecortex_ai/types/embeddings_create_collection_data.py +19 -4
  30. usecortex_ai/types/embeddings_delete_data.py +19 -4
  31. usecortex_ai/types/embeddings_get_data.py +19 -4
  32. usecortex_ai/types/embeddings_search_data.py +19 -4
  33. usecortex_ai/types/error_response.py +0 -1
  34. usecortex_ai/types/fetch_content_data.py +19 -5
  35. usecortex_ai/types/file_upload_result.py +9 -2
  36. usecortex_ai/types/generate_user_memory_response.py +32 -0
  37. usecortex_ai/types/list_sources_response.py +14 -3
  38. usecortex_ai/types/list_user_memories_response.py +32 -0
  39. usecortex_ai/types/markdown_upload_request.py +18 -3
  40. usecortex_ai/types/processing_status.py +14 -3
  41. usecortex_ai/types/relations.py +2 -2
  42. usecortex_ai/types/retrieve_user_memory_response.py +32 -0
  43. usecortex_ai/types/search_chunk.py +54 -16
  44. usecortex_ai/types/single_upload_data.py +10 -2
  45. usecortex_ai/types/source.py +34 -14
  46. usecortex_ai/types/source_model.py +63 -12
  47. usecortex_ai/types/sub_tenant_ids_data.py +42 -0
  48. usecortex_ai/types/tenant_create_data.py +19 -4
  49. usecortex_ai/types/tenant_stats.py +24 -5
  50. usecortex_ai/types/user_memory.py +31 -0
  51. usecortex_ai/upload/client.py +508 -132
  52. usecortex_ai/upload/raw_client.py +492 -152
  53. usecortex_ai/user/client.py +14 -6
  54. usecortex_ai/user/raw_client.py +14 -6
  55. usecortex_ai/user_memory/client.py +184 -50
  56. usecortex_ai/user_memory/raw_client.py +905 -88
  57. {usecortex_ai-0.2.0.dist-info → usecortex_ai-0.2.2.dist-info}/METADATA +1 -1
  58. usecortex_ai-0.2.2.dist-info/RECORD +100 -0
  59. usecortex_ai/raw_client.py +0 -90
  60. usecortex_ai/types/source_content.py +0 -26
  61. usecortex_ai-0.2.0.dist-info/RECORD +0 -90
  62. {usecortex_ai-0.2.0.dist-info → usecortex_ai-0.2.2.dist-info}/WHEEL +0 -0
  63. {usecortex_ai-0.2.0.dist-info → usecortex_ai-0.2.2.dist-info}/licenses/LICENSE +0 -0
  64. {usecortex_ai-0.2.0.dist-info → usecortex_ai-0.2.2.dist-info}/top_level.txt +0 -0
@@ -51,41 +51,60 @@ class SearchClient:
51
51
  request_options: typing.Optional[RequestOptions] = None,
52
52
  ) -> typing.Optional[typing.Any]:
53
53
  """
54
+ Ask a question to your uploaded knowledge base and let Cortex AI answer it.
55
+
54
56
  Parameters
55
57
  ----------
56
58
  question : str
59
+ The question to be answered
57
60
 
58
61
  session_id : str
62
+ Unique identifier for the conversation session. Keep it same when the current question refers to a previous answer or question
59
63
 
60
64
  tenant_id : str
65
+ Identifier for the tenant/organization
61
66
 
62
67
  context_list : typing.Optional[typing.Sequence[str]]
68
+ List of context strings to provide additional information
63
69
 
64
70
  search_modes : typing.Optional[typing.Sequence[str]]
71
+ List of search modes to use for finding relevant information
65
72
 
66
73
  sub_tenant_id : typing.Optional[str]
74
+ Identifier for sub-tenant within the tenant
67
75
 
68
76
  highlight_chunks : typing.Optional[bool]
77
+ Whether to return text chunks in the response along with final LLM generated answer
69
78
 
70
79
  stream : typing.Optional[bool]
80
+ Whether to stream the response
71
81
 
72
82
  search_alpha : typing.Optional[float]
83
+ Closer to 0.0 means a exact keyword search will be performed, closer to 1.0 means semantics of the search will be considered. In most cases, you wont have to toggle it yourself.
73
84
 
74
85
  recency_bias : typing.Optional[float]
86
+ Bias towards more recent information (0.0 to 1.0)
75
87
 
76
88
  ai_generation : typing.Optional[bool]
89
+ Whether to use AI for generating responses
77
90
 
78
91
  top_n : typing.Optional[int]
92
+ Number of top results to return
79
93
 
80
94
  user_name : typing.Optional[str]
95
+ Name of the user making the request. This helps LLM to know the user's name if semantics around the username are involved in query. Its generally a good practice to include it possible.
81
96
 
82
97
  user_instructions : typing.Optional[str]
98
+ Custom instructions for the AI response to add to our proprietary prompt. This can be used to provide additional context or instructions for the LLM to follow so that the answers are tailored towards your application style
83
99
 
84
100
  multi_step_reasoning : typing.Optional[bool]
101
+ Enable advanced multi-step reasoning for complex queries. When enabled, the AI will automatically break down complex questions into multiple research steps to provide more comprehensive and accurate answers.
85
102
 
86
103
  auto_agent_routing : typing.Optional[bool]
104
+ Enable intelligent agent routing to automatically select the most suitable AI agent for your specific query type. Different agents are optimized for various use cases like social media, code, conversations, general knowledge, etc.
87
105
 
88
106
  metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
107
+ Additional metadata for the request
89
108
 
90
109
  request_options : typing.Optional[RequestOptions]
91
110
  Request-specific configuration.
@@ -133,45 +152,33 @@ class SearchClient:
133
152
  max_chunks: typing.Optional[int] = OMIT,
134
153
  alpha: typing.Optional[Alpha] = OMIT,
135
154
  recency_bias: typing.Optional[float] = OMIT,
136
- num_related_chunks: typing.Optional[int] = OMIT,
137
155
  request_options: typing.Optional[RequestOptions] = None,
138
156
  ) -> typing.List[SearchChunk]:
139
157
  """
140
- Search for content within indexed sources using semantic and keyword search capabilities.
141
-
142
- This endpoint performs a search query against the Findr backend, allowing users to retrieve
143
- relevant content chunks from their indexed documents and sources. The search can be customized
144
- with various parameters to control the number of results and search behavior.
145
-
146
- Args:
147
- request (SearchRequest): The search request containing:
148
- - query (str): Search query string to find relevant content
149
- - tenant_id (str, optional): Tenant identifier for multi-tenancy
150
- - sub_tenant_id (str, optional): Sub-tenant identifier, defaults to tenant_id
151
- - max_chunks (int, optional): Maximum number of content chunks to return
152
- - alpha (Union[float, str], optional): Search algorithm parameter for result ranking (default: 0.8). Can be float-type (0.0-1.0) or 'auto' for dynamic selection
153
- - recency_bias (float, optional): Bias towards more recent content (default: 0.5)
154
- - num_related_chunks (int, optional): Number of related chunks to return (default: 0)
155
- api_details (dict): Authentication details obtained from API key validation
156
-
157
- Returns:
158
- SearchData: Success response with search results
158
+ Search for relevant content within your indexed sources.
159
+
160
+ This API returns the chunks related to the query you make. We use neural (embedding) search to give you the most relevant chunks.
161
+ Results are ranked by relevance and can be customized with parameters like result limits and recency preferences.
159
162
 
160
163
  Parameters
161
164
  ----------
162
165
  query : str
166
+ Search terms to find relevant content
163
167
 
164
168
  tenant_id : str
169
+ Unique identifier for the tenant/organization
165
170
 
166
171
  sub_tenant_id : typing.Optional[str]
172
+ Optional sub-tenant identifier for organizing data within a tenant. If not provided, defaults to tenant_id
167
173
 
168
174
  max_chunks : typing.Optional[int]
175
+ Maximum number of results to return
169
176
 
170
177
  alpha : typing.Optional[Alpha]
178
+ Search ranking algorithm parameter (0.0-1.0 or 'auto')
171
179
 
172
180
  recency_bias : typing.Optional[float]
173
-
174
- num_related_chunks : typing.Optional[int]
181
+ Preference for newer content (0.0 = no bias, 1.0 = strong recency preference)
175
182
 
176
183
  request_options : typing.Optional[RequestOptions]
177
184
  Request-specific configuration.
@@ -195,7 +202,6 @@ class SearchClient:
195
202
  max_chunks=max_chunks,
196
203
  alpha=alpha,
197
204
  recency_bias=recency_bias,
198
- num_related_chunks=num_related_chunks,
199
205
  request_options=request_options,
200
206
  )
201
207
  return _response.data
@@ -211,36 +217,27 @@ class SearchClient:
211
217
  request_options: typing.Optional[RequestOptions] = None,
212
218
  ) -> typing.List[SearchChunk]:
213
219
  """
214
- Full text search endpoint for Cortex customers.
215
- Performs full text search with configurable operators for precise text matching against the Findr backend.
216
-
217
- This endpoint performs a full text search query against the Findr backend, allowing users to retrieve
218
- relevant content chunks from their indexed documents and sources using BM25-based text matching.
219
- The search can be customized with various operators to control the matching behavior.
220
-
221
- Args:
222
- request (FullTextSearchRequest): The full text search request containing:
223
- - query (str): Search query string to find relevant content
224
- - tenant_id (str): Tenant identifier for multi-tenancy
225
- - sub_tenant_id (str, optional): Sub-tenant identifier, defaults to tenant_id
226
- - operator (BM25OperatorType, optional): Full text search operator type (OR or AND). Defaults to OR
227
- - max_chunks (int, optional): Maximum number of content chunks to return (1-1001, defaults to 25)
228
- api_details (dict): Authentication details obtained from API key validation
220
+ Perform full text search for exact matches within your indexed sources.
229
221
 
230
- Returns:
231
- FullTextSearchData: Success response with full text search results
222
+ Use this endpoint to find content chunks using BM25-based text matching with configurable operators.
223
+ Choose between OR and AND operators to control how search terms are combined for precise text matching.
232
224
 
233
225
  Parameters
234
226
  ----------
235
227
  query : str
228
+ Search terms to find in your content
236
229
 
237
230
  tenant_id : str
231
+ Unique identifier for the tenant/organization
238
232
 
239
233
  sub_tenant_id : typing.Optional[str]
234
+ Optional sub-tenant identifier for organizing data within a tenant. If not provided, defaults to tenant_id
240
235
 
241
236
  operator : typing.Optional[Bm25OperatorType]
237
+ How to combine search terms (OR or AND)
242
238
 
243
239
  max_chunks : typing.Optional[int]
240
+ Maximum number of results to return
244
241
 
245
242
  request_options : typing.Optional[RequestOptions]
246
243
  Request-specific configuration.
@@ -306,41 +303,60 @@ class AsyncSearchClient:
306
303
  request_options: typing.Optional[RequestOptions] = None,
307
304
  ) -> typing.Optional[typing.Any]:
308
305
  """
306
+ Ask a question to your uploaded knowledge base and let Cortex AI answer it.
307
+
309
308
  Parameters
310
309
  ----------
311
310
  question : str
311
+ The question to be answered
312
312
 
313
313
  session_id : str
314
+ Unique identifier for the conversation session. Keep it same when the current question refers to a previous answer or question
314
315
 
315
316
  tenant_id : str
317
+ Identifier for the tenant/organization
316
318
 
317
319
  context_list : typing.Optional[typing.Sequence[str]]
320
+ List of context strings to provide additional information
318
321
 
319
322
  search_modes : typing.Optional[typing.Sequence[str]]
323
+ List of search modes to use for finding relevant information
320
324
 
321
325
  sub_tenant_id : typing.Optional[str]
326
+ Identifier for sub-tenant within the tenant
322
327
 
323
328
  highlight_chunks : typing.Optional[bool]
329
+ Whether to return text chunks in the response along with final LLM generated answer
324
330
 
325
331
  stream : typing.Optional[bool]
332
+ Whether to stream the response
326
333
 
327
334
  search_alpha : typing.Optional[float]
335
+ Closer to 0.0 means a exact keyword search will be performed, closer to 1.0 means semantics of the search will be considered. In most cases, you wont have to toggle it yourself.
328
336
 
329
337
  recency_bias : typing.Optional[float]
338
+ Bias towards more recent information (0.0 to 1.0)
330
339
 
331
340
  ai_generation : typing.Optional[bool]
341
+ Whether to use AI for generating responses
332
342
 
333
343
  top_n : typing.Optional[int]
344
+ Number of top results to return
334
345
 
335
346
  user_name : typing.Optional[str]
347
+ Name of the user making the request. This helps LLM to know the user's name if semantics around the username are involved in query. Its generally a good practice to include it possible.
336
348
 
337
349
  user_instructions : typing.Optional[str]
350
+ Custom instructions for the AI response to add to our proprietary prompt. This can be used to provide additional context or instructions for the LLM to follow so that the answers are tailored towards your application style
338
351
 
339
352
  multi_step_reasoning : typing.Optional[bool]
353
+ Enable advanced multi-step reasoning for complex queries. When enabled, the AI will automatically break down complex questions into multiple research steps to provide more comprehensive and accurate answers.
340
354
 
341
355
  auto_agent_routing : typing.Optional[bool]
356
+ Enable intelligent agent routing to automatically select the most suitable AI agent for your specific query type. Different agents are optimized for various use cases like social media, code, conversations, general knowledge, etc.
342
357
 
343
358
  metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
359
+ Additional metadata for the request
344
360
 
345
361
  request_options : typing.Optional[RequestOptions]
346
362
  Request-specific configuration.
@@ -392,45 +408,33 @@ class AsyncSearchClient:
392
408
  max_chunks: typing.Optional[int] = OMIT,
393
409
  alpha: typing.Optional[Alpha] = OMIT,
394
410
  recency_bias: typing.Optional[float] = OMIT,
395
- num_related_chunks: typing.Optional[int] = OMIT,
396
411
  request_options: typing.Optional[RequestOptions] = None,
397
412
  ) -> typing.List[SearchChunk]:
398
413
  """
399
- Search for content within indexed sources using semantic and keyword search capabilities.
400
-
401
- This endpoint performs a search query against the Findr backend, allowing users to retrieve
402
- relevant content chunks from their indexed documents and sources. The search can be customized
403
- with various parameters to control the number of results and search behavior.
404
-
405
- Args:
406
- request (SearchRequest): The search request containing:
407
- - query (str): Search query string to find relevant content
408
- - tenant_id (str, optional): Tenant identifier for multi-tenancy
409
- - sub_tenant_id (str, optional): Sub-tenant identifier, defaults to tenant_id
410
- - max_chunks (int, optional): Maximum number of content chunks to return
411
- - alpha (Union[float, str], optional): Search algorithm parameter for result ranking (default: 0.8). Can be float-type (0.0-1.0) or 'auto' for dynamic selection
412
- - recency_bias (float, optional): Bias towards more recent content (default: 0.5)
413
- - num_related_chunks (int, optional): Number of related chunks to return (default: 0)
414
- api_details (dict): Authentication details obtained from API key validation
415
-
416
- Returns:
417
- SearchData: Success response with search results
414
+ Search for relevant content within your indexed sources.
415
+
416
+ This API returns the chunks related to the query you make. We use neural (embedding) search to give you the most relevant chunks.
417
+ Results are ranked by relevance and can be customized with parameters like result limits and recency preferences.
418
418
 
419
419
  Parameters
420
420
  ----------
421
421
  query : str
422
+ Search terms to find relevant content
422
423
 
423
424
  tenant_id : str
425
+ Unique identifier for the tenant/organization
424
426
 
425
427
  sub_tenant_id : typing.Optional[str]
428
+ Optional sub-tenant identifier for organizing data within a tenant. If not provided, defaults to tenant_id
426
429
 
427
430
  max_chunks : typing.Optional[int]
431
+ Maximum number of results to return
428
432
 
429
433
  alpha : typing.Optional[Alpha]
434
+ Search ranking algorithm parameter (0.0-1.0 or 'auto')
430
435
 
431
436
  recency_bias : typing.Optional[float]
432
-
433
- num_related_chunks : typing.Optional[int]
437
+ Preference for newer content (0.0 = no bias, 1.0 = strong recency preference)
434
438
 
435
439
  request_options : typing.Optional[RequestOptions]
436
440
  Request-specific configuration.
@@ -458,7 +462,6 @@ class AsyncSearchClient:
458
462
  max_chunks=max_chunks,
459
463
  alpha=alpha,
460
464
  recency_bias=recency_bias,
461
- num_related_chunks=num_related_chunks,
462
465
  request_options=request_options,
463
466
  )
464
467
  return _response.data
@@ -474,36 +477,27 @@ class AsyncSearchClient:
474
477
  request_options: typing.Optional[RequestOptions] = None,
475
478
  ) -> typing.List[SearchChunk]:
476
479
  """
477
- Full text search endpoint for Cortex customers.
478
- Performs full text search with configurable operators for precise text matching against the Findr backend.
479
-
480
- This endpoint performs a full text search query against the Findr backend, allowing users to retrieve
481
- relevant content chunks from their indexed documents and sources using BM25-based text matching.
482
- The search can be customized with various operators to control the matching behavior.
483
-
484
- Args:
485
- request (FullTextSearchRequest): The full text search request containing:
486
- - query (str): Search query string to find relevant content
487
- - tenant_id (str): Tenant identifier for multi-tenancy
488
- - sub_tenant_id (str, optional): Sub-tenant identifier, defaults to tenant_id
489
- - operator (BM25OperatorType, optional): Full text search operator type (OR or AND). Defaults to OR
490
- - max_chunks (int, optional): Maximum number of content chunks to return (1-1001, defaults to 25)
491
- api_details (dict): Authentication details obtained from API key validation
480
+ Perform full text search for exact matches within your indexed sources.
492
481
 
493
- Returns:
494
- FullTextSearchData: Success response with full text search results
482
+ Use this endpoint to find content chunks using BM25-based text matching with configurable operators.
483
+ Choose between OR and AND operators to control how search terms are combined for precise text matching.
495
484
 
496
485
  Parameters
497
486
  ----------
498
487
  query : str
488
+ Search terms to find in your content
499
489
 
500
490
  tenant_id : str
491
+ Unique identifier for the tenant/organization
501
492
 
502
493
  sub_tenant_id : typing.Optional[str]
494
+ Optional sub-tenant identifier for organizing data within a tenant. If not provided, defaults to tenant_id
503
495
 
504
496
  operator : typing.Optional[Bm25OperatorType]
497
+ How to combine search terms (OR or AND)
505
498
 
506
499
  max_chunks : typing.Optional[int]
500
+ Maximum number of results to return
507
501
 
508
502
  request_options : typing.Optional[RequestOptions]
509
503
  Request-specific configuration.