usecortex-ai 0.4.0__py3-none-any.whl → 0.5.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. usecortex_ai/__init__.py +10 -4
  2. usecortex_ai/client.py +0 -4
  3. usecortex_ai/dashboard/client.py +2 -30
  4. usecortex_ai/dashboard/raw_client.py +0 -28
  5. usecortex_ai/embeddings/client.py +8 -58
  6. usecortex_ai/embeddings/raw_client.py +8 -58
  7. usecortex_ai/fetch/__init__.py +3 -0
  8. usecortex_ai/fetch/client.py +42 -165
  9. usecortex_ai/fetch/raw_client.py +38 -341
  10. usecortex_ai/fetch/types/__init__.py +7 -0
  11. usecortex_ai/fetch/types/fetch_list_knowledge_response.py +8 -0
  12. usecortex_ai/raw_client.py +0 -4
  13. usecortex_ai/search/client.py +40 -108
  14. usecortex_ai/search/raw_client.py +40 -108
  15. usecortex_ai/sources/client.py +2 -16
  16. usecortex_ai/sources/raw_client.py +2 -16
  17. usecortex_ai/tenant/client.py +4 -108
  18. usecortex_ai/tenant/raw_client.py +2 -106
  19. usecortex_ai/types/__init__.py +8 -2
  20. usecortex_ai/types/forceful_relations_payload.py +27 -0
  21. usecortex_ai/types/list_content_kind.py +5 -0
  22. usecortex_ai/types/list_user_memories_response.py +32 -0
  23. usecortex_ai/types/memory_item.py +7 -1
  24. usecortex_ai/types/retrieval_result.py +4 -0
  25. usecortex_ai/types/retrieve_mode.py +1 -1
  26. usecortex_ai/types/user_memory.py +31 -0
  27. usecortex_ai/types/vector_store_chunk.py +5 -0
  28. usecortex_ai/upload/__init__.py +0 -3
  29. usecortex_ai/upload/client.py +34 -204
  30. usecortex_ai/upload/raw_client.py +30 -382
  31. {usecortex_ai-0.4.0.dist-info → usecortex_ai-0.5.1.dist-info}/METADATA +1 -1
  32. {usecortex_ai-0.4.0.dist-info → usecortex_ai-0.5.1.dist-info}/RECORD +35 -32
  33. {usecortex_ai-0.4.0.dist-info → usecortex_ai-0.5.1.dist-info}/WHEEL +1 -1
  34. usecortex_ai/types/app_sources_upload_data.py +0 -39
  35. usecortex_ai/upload/types/__init__.py +0 -7
  36. usecortex_ai/upload/types/body_upload_app_ingestion_upload_app_post_app_sources.py +0 -7
  37. {usecortex_ai-0.4.0.dist-info → usecortex_ai-0.5.1.dist-info}/licenses/LICENSE +0 -0
  38. {usecortex_ai-0.4.0.dist-info → usecortex_ai-0.5.1.dist-info}/top_level.txt +0 -0
@@ -38,31 +38,18 @@ class SearchClient:
38
38
  tenant_id: str,
39
39
  query: str,
40
40
  sub_tenant_id: typing.Optional[str] = OMIT,
41
- max_chunks: typing.Optional[int] = OMIT,
41
+ max_results: typing.Optional[int] = OMIT,
42
42
  mode: typing.Optional[RetrieveMode] = OMIT,
43
43
  alpha: typing.Optional[Alpha] = OMIT,
44
44
  recency_bias: typing.Optional[float] = OMIT,
45
- num_related_chunks: typing.Optional[int] = OMIT,
46
- personalise_search: typing.Optional[bool] = OMIT,
45
+ personalise_recall: typing.Optional[bool] = OMIT,
47
46
  graph_context: typing.Optional[bool] = OMIT,
48
- extra_context: typing.Optional[str] = OMIT,
47
+ additional_context: typing.Optional[str] = OMIT,
49
48
  search_mode: typing.Optional[SearchMode] = OMIT,
49
+ metadata_filters: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
50
50
  request_options: typing.Optional[RequestOptions] = None,
51
51
  ) -> RetrievalResult:
52
52
  """
53
- Search for relevant content within your indexed sources or user memories.
54
-
55
- Results are ranked by relevance and can be customized with parameters like
56
- result limits, alpha weighting, and recency preferences.
57
-
58
- Use `search_mode` to specify what to search:
59
- - "sources" (default): Search over indexed documents
60
- - "memories": Search over user memories (uses inferred content)
61
-
62
- Use `mode` to control retrieval quality:
63
- - "fast" (default): Single query, faster response
64
- - "accurate": Multi-query generation with reranking, higher quality
65
-
66
53
  Parameters
67
54
  ----------
68
55
  tenant_id : str
@@ -74,7 +61,7 @@ class SearchClient:
74
61
  sub_tenant_id : typing.Optional[str]
75
62
  Optional sub-tenant identifier used to organize data within a tenant. If omitted, the default sub-tenant created during tenant setup will be used.
76
63
 
77
- max_chunks : typing.Optional[int]
64
+ max_results : typing.Optional[int]
78
65
  Maximum number of results to return
79
66
 
80
67
  mode : typing.Optional[RetrieveMode]
@@ -86,21 +73,21 @@ class SearchClient:
86
73
  recency_bias : typing.Optional[float]
87
74
  Preference for newer content (0.0 = no bias, 1.0 = strong recency preference)
88
75
 
89
- num_related_chunks : typing.Optional[int]
90
- Number of related content chunks to include
91
-
92
- personalise_search : typing.Optional[bool]
76
+ personalise_recall : typing.Optional[bool]
93
77
  Enable personalized search results based on user preferences
94
78
 
95
79
  graph_context : typing.Optional[bool]
96
80
  Enable graph context for search results
97
81
 
98
- extra_context : typing.Optional[str]
82
+ additional_context : typing.Optional[str]
99
83
  Additional context provided by the user to guide retrieval
100
84
 
101
85
  search_mode : typing.Optional[SearchMode]
102
86
  What to search: 'sources' for documents or 'memories' for user memories
103
87
 
88
+ metadata_filters : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
89
+ Optional key-value pairs to filter search results by tenant metadata fields. Keys must match fields defined in tenant_metadata_schema during tenant creation. Supports exact match filtering on indexed metadata fields. Example: {'category': 'engineering', 'priority': 'high'}
90
+
104
91
  request_options : typing.Optional[RequestOptions]
105
92
  Request-specific configuration.
106
93
 
@@ -114,21 +101,21 @@ class SearchClient:
114
101
  from usecortex-ai import CortexAI
115
102
 
116
103
  client = CortexAI(token="YOUR_TOKEN", )
117
- client.search.retrieve(tenant_id='tenant_id', query='query', )
104
+ client.search.retrieve(tenant_id='tenant_1234', query='Which mode does user prefer', )
118
105
  """
119
106
  _response = self._raw_client.retrieve(
120
107
  tenant_id=tenant_id,
121
108
  query=query,
122
109
  sub_tenant_id=sub_tenant_id,
123
- max_chunks=max_chunks,
110
+ max_results=max_results,
124
111
  mode=mode,
125
112
  alpha=alpha,
126
113
  recency_bias=recency_bias,
127
- num_related_chunks=num_related_chunks,
128
- personalise_search=personalise_search,
114
+ personalise_recall=personalise_recall,
129
115
  graph_context=graph_context,
130
- extra_context=extra_context,
116
+ additional_context=additional_context,
131
117
  search_mode=search_mode,
118
+ metadata_filters=metadata_filters,
132
119
  request_options=request_options,
133
120
  )
134
121
  return _response.data
@@ -140,19 +127,11 @@ class SearchClient:
140
127
  query: str,
141
128
  sub_tenant_id: typing.Optional[str] = OMIT,
142
129
  operator: typing.Optional[Bm25OperatorType] = OMIT,
143
- max_chunks: typing.Optional[int] = OMIT,
130
+ max_results: typing.Optional[int] = OMIT,
144
131
  search_mode: typing.Optional[SearchMode] = OMIT,
145
132
  request_options: typing.Optional[RequestOptions] = None,
146
133
  ) -> RetrievalResult:
147
134
  """
148
- Perform full text search for exact matches within your indexed sources or memories.
149
- Choose between 'OR' and 'AND' operators to control how search terms are combined
150
- for precise text matching.
151
-
152
- Use `search_mode` to specify what to search:
153
- - "sources" (default): Search over indexed documents
154
- - "memories": Search over user memories
155
-
156
135
  Parameters
157
136
  ----------
158
137
  tenant_id : str
@@ -167,7 +146,7 @@ class SearchClient:
167
146
  operator : typing.Optional[Bm25OperatorType]
168
147
  How to combine search terms (OR or AND)
169
148
 
170
- max_chunks : typing.Optional[int]
149
+ max_results : typing.Optional[int]
171
150
  Maximum number of results to return
172
151
 
173
152
  search_mode : typing.Optional[SearchMode]
@@ -186,14 +165,14 @@ class SearchClient:
186
165
  from usecortex-ai import CortexAI
187
166
 
188
167
  client = CortexAI(token="YOUR_TOKEN", )
189
- client.search.full_text_search(tenant_id='tenant_id', query='query', )
168
+ client.search.full_text_search(tenant_id='tenant_1234', query='John Smith Jake', )
190
169
  """
191
170
  _response = self._raw_client.full_text_search(
192
171
  tenant_id=tenant_id,
193
172
  query=query,
194
173
  sub_tenant_id=sub_tenant_id,
195
174
  operator=operator,
196
- max_chunks=max_chunks,
175
+ max_results=max_results,
197
176
  search_mode=search_mode,
198
177
  request_options=request_options,
199
178
  )
@@ -218,19 +197,6 @@ class SearchClient:
218
197
  request_options: typing.Optional[RequestOptions] = None,
219
198
  ) -> QnASearchResponse:
220
199
  """
221
- Ask a question and get an AI-generated answer based on your indexed sources or memories.
222
-
223
- The response includes both the AI answer and the source chunks used to generate it,
224
- enabling full transparency and citation capabilities.
225
-
226
- Use `search_mode` to specify what to search:
227
- - "sources" (default): Search over indexed documents
228
- - "memories": Search over user memories
229
-
230
- Use `mode` to control retrieval quality:
231
- - "fast" (default): Single query, faster response
232
- - "accurate": Multi-query generation with reranking, higher quality
233
-
234
200
  Parameters
235
201
  ----------
236
202
  tenant_id : str
@@ -285,7 +251,7 @@ class SearchClient:
285
251
  from usecortex-ai import CortexAI
286
252
 
287
253
  client = CortexAI(token="YOUR_TOKEN", )
288
- client.search.qna(tenant_id='tenant_id', question='question', )
254
+ client.search.qna(tenant_id='tenant_1234', question='What is Cortex AI', )
289
255
  """
290
256
  _response = self._raw_client.qna(
291
257
  tenant_id=tenant_id,
@@ -327,31 +293,18 @@ class AsyncSearchClient:
327
293
  tenant_id: str,
328
294
  query: str,
329
295
  sub_tenant_id: typing.Optional[str] = OMIT,
330
- max_chunks: typing.Optional[int] = OMIT,
296
+ max_results: typing.Optional[int] = OMIT,
331
297
  mode: typing.Optional[RetrieveMode] = OMIT,
332
298
  alpha: typing.Optional[Alpha] = OMIT,
333
299
  recency_bias: typing.Optional[float] = OMIT,
334
- num_related_chunks: typing.Optional[int] = OMIT,
335
- personalise_search: typing.Optional[bool] = OMIT,
300
+ personalise_recall: typing.Optional[bool] = OMIT,
336
301
  graph_context: typing.Optional[bool] = OMIT,
337
- extra_context: typing.Optional[str] = OMIT,
302
+ additional_context: typing.Optional[str] = OMIT,
338
303
  search_mode: typing.Optional[SearchMode] = OMIT,
304
+ metadata_filters: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
339
305
  request_options: typing.Optional[RequestOptions] = None,
340
306
  ) -> RetrievalResult:
341
307
  """
342
- Search for relevant content within your indexed sources or user memories.
343
-
344
- Results are ranked by relevance and can be customized with parameters like
345
- result limits, alpha weighting, and recency preferences.
346
-
347
- Use `search_mode` to specify what to search:
348
- - "sources" (default): Search over indexed documents
349
- - "memories": Search over user memories (uses inferred content)
350
-
351
- Use `mode` to control retrieval quality:
352
- - "fast" (default): Single query, faster response
353
- - "accurate": Multi-query generation with reranking, higher quality
354
-
355
308
  Parameters
356
309
  ----------
357
310
  tenant_id : str
@@ -363,7 +316,7 @@ class AsyncSearchClient:
363
316
  sub_tenant_id : typing.Optional[str]
364
317
  Optional sub-tenant identifier used to organize data within a tenant. If omitted, the default sub-tenant created during tenant setup will be used.
365
318
 
366
- max_chunks : typing.Optional[int]
319
+ max_results : typing.Optional[int]
367
320
  Maximum number of results to return
368
321
 
369
322
  mode : typing.Optional[RetrieveMode]
@@ -375,21 +328,21 @@ class AsyncSearchClient:
375
328
  recency_bias : typing.Optional[float]
376
329
  Preference for newer content (0.0 = no bias, 1.0 = strong recency preference)
377
330
 
378
- num_related_chunks : typing.Optional[int]
379
- Number of related content chunks to include
380
-
381
- personalise_search : typing.Optional[bool]
331
+ personalise_recall : typing.Optional[bool]
382
332
  Enable personalized search results based on user preferences
383
333
 
384
334
  graph_context : typing.Optional[bool]
385
335
  Enable graph context for search results
386
336
 
387
- extra_context : typing.Optional[str]
337
+ additional_context : typing.Optional[str]
388
338
  Additional context provided by the user to guide retrieval
389
339
 
390
340
  search_mode : typing.Optional[SearchMode]
391
341
  What to search: 'sources' for documents or 'memories' for user memories
392
342
 
343
+ metadata_filters : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
344
+ Optional key-value pairs to filter search results by tenant metadata fields. Keys must match fields defined in tenant_metadata_schema during tenant creation. Supports exact match filtering on indexed metadata fields. Example: {'category': 'engineering', 'priority': 'high'}
345
+
393
346
  request_options : typing.Optional[RequestOptions]
394
347
  Request-specific configuration.
395
348
 
@@ -406,22 +359,22 @@ class AsyncSearchClient:
406
359
 
407
360
  client = AsyncCortexAI(token="YOUR_TOKEN", )
408
361
  async def main() -> None:
409
- await client.search.retrieve(tenant_id='tenant_id', query='query', )
362
+ await client.search.retrieve(tenant_id='tenant_1234', query='Which mode does user prefer', )
410
363
  asyncio.run(main())
411
364
  """
412
365
  _response = await self._raw_client.retrieve(
413
366
  tenant_id=tenant_id,
414
367
  query=query,
415
368
  sub_tenant_id=sub_tenant_id,
416
- max_chunks=max_chunks,
369
+ max_results=max_results,
417
370
  mode=mode,
418
371
  alpha=alpha,
419
372
  recency_bias=recency_bias,
420
- num_related_chunks=num_related_chunks,
421
- personalise_search=personalise_search,
373
+ personalise_recall=personalise_recall,
422
374
  graph_context=graph_context,
423
- extra_context=extra_context,
375
+ additional_context=additional_context,
424
376
  search_mode=search_mode,
377
+ metadata_filters=metadata_filters,
425
378
  request_options=request_options,
426
379
  )
427
380
  return _response.data
@@ -433,19 +386,11 @@ class AsyncSearchClient:
433
386
  query: str,
434
387
  sub_tenant_id: typing.Optional[str] = OMIT,
435
388
  operator: typing.Optional[Bm25OperatorType] = OMIT,
436
- max_chunks: typing.Optional[int] = OMIT,
389
+ max_results: typing.Optional[int] = OMIT,
437
390
  search_mode: typing.Optional[SearchMode] = OMIT,
438
391
  request_options: typing.Optional[RequestOptions] = None,
439
392
  ) -> RetrievalResult:
440
393
  """
441
- Perform full text search for exact matches within your indexed sources or memories.
442
- Choose between 'OR' and 'AND' operators to control how search terms are combined
443
- for precise text matching.
444
-
445
- Use `search_mode` to specify what to search:
446
- - "sources" (default): Search over indexed documents
447
- - "memories": Search over user memories
448
-
449
394
  Parameters
450
395
  ----------
451
396
  tenant_id : str
@@ -460,7 +405,7 @@ class AsyncSearchClient:
460
405
  operator : typing.Optional[Bm25OperatorType]
461
406
  How to combine search terms (OR or AND)
462
407
 
463
- max_chunks : typing.Optional[int]
408
+ max_results : typing.Optional[int]
464
409
  Maximum number of results to return
465
410
 
466
411
  search_mode : typing.Optional[SearchMode]
@@ -482,7 +427,7 @@ class AsyncSearchClient:
482
427
 
483
428
  client = AsyncCortexAI(token="YOUR_TOKEN", )
484
429
  async def main() -> None:
485
- await client.search.full_text_search(tenant_id='tenant_id', query='query', )
430
+ await client.search.full_text_search(tenant_id='tenant_1234', query='John Smith Jake', )
486
431
  asyncio.run(main())
487
432
  """
488
433
  _response = await self._raw_client.full_text_search(
@@ -490,7 +435,7 @@ class AsyncSearchClient:
490
435
  query=query,
491
436
  sub_tenant_id=sub_tenant_id,
492
437
  operator=operator,
493
- max_chunks=max_chunks,
438
+ max_results=max_results,
494
439
  search_mode=search_mode,
495
440
  request_options=request_options,
496
441
  )
@@ -515,19 +460,6 @@ class AsyncSearchClient:
515
460
  request_options: typing.Optional[RequestOptions] = None,
516
461
  ) -> QnASearchResponse:
517
462
  """
518
- Ask a question and get an AI-generated answer based on your indexed sources or memories.
519
-
520
- The response includes both the AI answer and the source chunks used to generate it,
521
- enabling full transparency and citation capabilities.
522
-
523
- Use `search_mode` to specify what to search:
524
- - "sources" (default): Search over indexed documents
525
- - "memories": Search over user memories
526
-
527
- Use `mode` to control retrieval quality:
528
- - "fast" (default): Single query, faster response
529
- - "accurate": Multi-query generation with reranking, higher quality
530
-
531
463
  Parameters
532
464
  ----------
533
465
  tenant_id : str
@@ -585,7 +517,7 @@ class AsyncSearchClient:
585
517
 
586
518
  client = AsyncCortexAI(token="YOUR_TOKEN", )
587
519
  async def main() -> None:
588
- await client.search.qna(tenant_id='tenant_id', question='question', )
520
+ await client.search.qna(tenant_id='tenant_1234', question='What is Cortex AI', )
589
521
  asyncio.run(main())
590
522
  """
591
523
  _response = await self._raw_client.qna(