usecortex-ai 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (89) hide show
  1. cortex_ai/__init__.py +103 -0
  2. cortex_ai/client.py +244 -0
  3. cortex_ai/core/__init__.py +52 -0
  4. cortex_ai/core/api_error.py +23 -0
  5. cortex_ai/core/client_wrapper.py +84 -0
  6. cortex_ai/core/datetime_utils.py +28 -0
  7. cortex_ai/core/file.py +67 -0
  8. cortex_ai/core/force_multipart.py +18 -0
  9. cortex_ai/core/http_client.py +543 -0
  10. cortex_ai/core/http_response.py +55 -0
  11. cortex_ai/core/jsonable_encoder.py +100 -0
  12. cortex_ai/core/pydantic_utilities.py +258 -0
  13. cortex_ai/core/query_encoder.py +58 -0
  14. cortex_ai/core/remove_none_from_dict.py +11 -0
  15. cortex_ai/core/request_options.py +35 -0
  16. cortex_ai/core/serialization.py +276 -0
  17. cortex_ai/embeddings/__init__.py +4 -0
  18. cortex_ai/embeddings/client.py +442 -0
  19. cortex_ai/embeddings/raw_client.py +1153 -0
  20. cortex_ai/environment.py +7 -0
  21. cortex_ai/errors/__init__.py +21 -0
  22. cortex_ai/errors/bad_request_error.py +11 -0
  23. cortex_ai/errors/forbidden_error.py +11 -0
  24. cortex_ai/errors/internal_server_error.py +11 -0
  25. cortex_ai/errors/not_found_error.py +11 -0
  26. cortex_ai/errors/service_unavailable_error.py +11 -0
  27. cortex_ai/errors/unauthorized_error.py +11 -0
  28. cortex_ai/errors/unprocessable_entity_error.py +10 -0
  29. cortex_ai/fetch/__init__.py +4 -0
  30. cortex_ai/fetch/client.py +143 -0
  31. cortex_ai/fetch/raw_client.py +310 -0
  32. cortex_ai/raw_client.py +90 -0
  33. cortex_ai/search/__init__.py +7 -0
  34. cortex_ai/search/client.py +536 -0
  35. cortex_ai/search/raw_client.py +1064 -0
  36. cortex_ai/search/types/__init__.py +7 -0
  37. cortex_ai/search/types/alpha.py +5 -0
  38. cortex_ai/sources/__init__.py +4 -0
  39. cortex_ai/sources/client.py +187 -0
  40. cortex_ai/sources/raw_client.py +532 -0
  41. cortex_ai/tenant/__init__.py +4 -0
  42. cortex_ai/tenant/client.py +120 -0
  43. cortex_ai/tenant/raw_client.py +283 -0
  44. cortex_ai/types/__init__.py +69 -0
  45. cortex_ai/types/actual_error_response.py +20 -0
  46. cortex_ai/types/app_sources_upload_data.py +22 -0
  47. cortex_ai/types/attachment_model.py +26 -0
  48. cortex_ai/types/batch_upload_data.py +22 -0
  49. cortex_ai/types/bm_25_operator_type.py +5 -0
  50. cortex_ai/types/content_model.py +26 -0
  51. cortex_ai/types/delete_memory_request.py +21 -0
  52. cortex_ai/types/embeddings_create_collection_data.py +22 -0
  53. cortex_ai/types/embeddings_delete_data.py +22 -0
  54. cortex_ai/types/embeddings_get_data.py +22 -0
  55. cortex_ai/types/embeddings_search_data.py +22 -0
  56. cortex_ai/types/error_response.py +22 -0
  57. cortex_ai/types/extended_context.py +20 -0
  58. cortex_ai/types/fetch_content_data.py +23 -0
  59. cortex_ai/types/file_upload_result.py +20 -0
  60. cortex_ai/types/full_text_search_data.py +22 -0
  61. cortex_ai/types/http_validation_error.py +20 -0
  62. cortex_ai/types/list_sources_response.py +22 -0
  63. cortex_ai/types/markdown_upload_request.py +21 -0
  64. cortex_ai/types/processing_status.py +22 -0
  65. cortex_ai/types/related_chunk.py +22 -0
  66. cortex_ai/types/search_chunk.py +34 -0
  67. cortex_ai/types/search_data.py +22 -0
  68. cortex_ai/types/single_upload_data.py +21 -0
  69. cortex_ai/types/source.py +32 -0
  70. cortex_ai/types/source_content.py +26 -0
  71. cortex_ai/types/source_model.py +32 -0
  72. cortex_ai/types/tenant_create_data.py +22 -0
  73. cortex_ai/types/tenant_stats.py +23 -0
  74. cortex_ai/types/validation_error.py +22 -0
  75. cortex_ai/types/validation_error_loc_item.py +5 -0
  76. cortex_ai/upload/__init__.py +4 -0
  77. cortex_ai/upload/client.py +1572 -0
  78. cortex_ai/upload/raw_client.py +4202 -0
  79. cortex_ai/user/__init__.py +4 -0
  80. cortex_ai/user/client.py +125 -0
  81. cortex_ai/user/raw_client.py +300 -0
  82. cortex_ai/user_memory/__init__.py +4 -0
  83. cortex_ai/user_memory/client.py +443 -0
  84. cortex_ai/user_memory/raw_client.py +651 -0
  85. usecortex_ai-0.1.0.dist-info/METADATA +136 -0
  86. usecortex_ai-0.1.0.dist-info/RECORD +89 -0
  87. usecortex_ai-0.1.0.dist-info/WHEEL +5 -0
  88. usecortex_ai-0.1.0.dist-info/licenses/LICENSE +22 -0
  89. usecortex_ai-0.1.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,536 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
6
+ from ..core.request_options import RequestOptions
7
+ from ..types.bm_25_operator_type import Bm25OperatorType
8
+ from ..types.full_text_search_data import FullTextSearchData
9
+ from ..types.search_data import SearchData
10
+ from .raw_client import AsyncRawSearchClient, RawSearchClient
11
+ from .types.alpha import Alpha
12
+
13
+ # this is used as the default value for optional parameters
14
+ OMIT = typing.cast(typing.Any, ...)
15
+
16
+
17
+ class SearchClient:
18
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
19
+ self._raw_client = RawSearchClient(client_wrapper=client_wrapper)
20
+
21
+ @property
22
+ def with_raw_response(self) -> RawSearchClient:
23
+ """
24
+ Retrieves a raw implementation of this client that returns raw responses.
25
+
26
+ Returns
27
+ -------
28
+ RawSearchClient
29
+ """
30
+ return self._raw_client
31
+
32
+ def qna(
33
+ self,
34
+ *,
35
+ question: str,
36
+ session_id: str,
37
+ tenant_id: str,
38
+ context_list: typing.Optional[typing.Sequence[str]] = OMIT,
39
+ search_modes: typing.Optional[typing.Sequence[str]] = OMIT,
40
+ sub_tenant_id: typing.Optional[str] = OMIT,
41
+ highlight_chunks: typing.Optional[bool] = OMIT,
42
+ stream: typing.Optional[bool] = OMIT,
43
+ search_alpha: typing.Optional[float] = OMIT,
44
+ recency_bias: typing.Optional[float] = OMIT,
45
+ ai_generation: typing.Optional[bool] = OMIT,
46
+ top_n: typing.Optional[int] = OMIT,
47
+ user_name: typing.Optional[str] = OMIT,
48
+ user_instructions: typing.Optional[str] = OMIT,
49
+ multi_step_reasoning: typing.Optional[bool] = OMIT,
50
+ auto_agent_routing: typing.Optional[bool] = OMIT,
51
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
52
+ request_options: typing.Optional[RequestOptions] = None,
53
+ ) -> typing.Optional[typing.Any]:
54
+ """
55
+ Parameters
56
+ ----------
57
+ question : str
58
+
59
+ session_id : str
60
+
61
+ tenant_id : str
62
+
63
+ context_list : typing.Optional[typing.Sequence[str]]
64
+
65
+ search_modes : typing.Optional[typing.Sequence[str]]
66
+
67
+ sub_tenant_id : typing.Optional[str]
68
+
69
+ highlight_chunks : typing.Optional[bool]
70
+
71
+ stream : typing.Optional[bool]
72
+
73
+ search_alpha : typing.Optional[float]
74
+
75
+ recency_bias : typing.Optional[float]
76
+
77
+ ai_generation : typing.Optional[bool]
78
+
79
+ top_n : typing.Optional[int]
80
+
81
+ user_name : typing.Optional[str]
82
+
83
+ user_instructions : typing.Optional[str]
84
+
85
+ multi_step_reasoning : typing.Optional[bool]
86
+
87
+ auto_agent_routing : typing.Optional[bool]
88
+
89
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
90
+
91
+ request_options : typing.Optional[RequestOptions]
92
+ Request-specific configuration.
93
+
94
+ Returns
95
+ -------
96
+ typing.Optional[typing.Any]
97
+ Successful Response
98
+
99
+ Examples
100
+ --------
101
+ from cortex-ai import CortexAI
102
+
103
+ client = CortexAI(token="YOUR_TOKEN", )
104
+ client.search.qna(question='question', session_id='session_id', tenant_id='tenant_id', )
105
+ """
106
+ _response = self._raw_client.qna(
107
+ question=question,
108
+ session_id=session_id,
109
+ tenant_id=tenant_id,
110
+ context_list=context_list,
111
+ search_modes=search_modes,
112
+ sub_tenant_id=sub_tenant_id,
113
+ highlight_chunks=highlight_chunks,
114
+ stream=stream,
115
+ search_alpha=search_alpha,
116
+ recency_bias=recency_bias,
117
+ ai_generation=ai_generation,
118
+ top_n=top_n,
119
+ user_name=user_name,
120
+ user_instructions=user_instructions,
121
+ multi_step_reasoning=multi_step_reasoning,
122
+ auto_agent_routing=auto_agent_routing,
123
+ metadata=metadata,
124
+ request_options=request_options,
125
+ )
126
+ return _response.data
127
+
128
+ def retrieve(
129
+ self,
130
+ *,
131
+ query: str,
132
+ tenant_id: str,
133
+ sub_tenant_id: typing.Optional[str] = OMIT,
134
+ max_chunks: typing.Optional[int] = OMIT,
135
+ alpha: typing.Optional[Alpha] = OMIT,
136
+ recency_bias: typing.Optional[float] = OMIT,
137
+ num_related_chunks: typing.Optional[int] = OMIT,
138
+ request_options: typing.Optional[RequestOptions] = None,
139
+ ) -> SearchData:
140
+ """
141
+ Search for content within indexed sources using semantic and keyword search capabilities.
142
+
143
+ This endpoint performs a search query against the Findr backend, allowing users to retrieve
144
+ relevant content chunks from their indexed documents and sources. The search can be customized
145
+ with various parameters to control the number of results and search behavior.
146
+
147
+ Args:
148
+ request (SearchRequest): The search request containing:
149
+ - query (str): Search query string to find relevant content
150
+ - tenant_id (str, optional): Tenant identifier for multi-tenancy
151
+ - sub_tenant_id (str, optional): Sub-tenant identifier, defaults to tenant_id
152
+ - max_chunks (int, optional): Maximum number of content chunks to return
153
+ - alpha (Union[float, str], optional): Search algorithm parameter for result ranking (default: 0.8). Can be float-type (0.0-1.0) or 'auto' for dynamic selection
154
+ - recency_bias (float, optional): Bias towards more recent content (default: 0.5)
155
+ - num_related_chunks (int, optional): Number of related chunks to return (default: 0)
156
+ api_details (dict): Authentication details obtained from API key validation
157
+
158
+ Returns:
159
+ SearchData: Success response with search results
160
+
161
+ Parameters
162
+ ----------
163
+ query : str
164
+
165
+ tenant_id : str
166
+
167
+ sub_tenant_id : typing.Optional[str]
168
+
169
+ max_chunks : typing.Optional[int]
170
+
171
+ alpha : typing.Optional[Alpha]
172
+
173
+ recency_bias : typing.Optional[float]
174
+
175
+ num_related_chunks : typing.Optional[int]
176
+
177
+ request_options : typing.Optional[RequestOptions]
178
+ Request-specific configuration.
179
+
180
+ Returns
181
+ -------
182
+ SearchData
183
+ Successful Response
184
+
185
+ Examples
186
+ --------
187
+ from cortex-ai import CortexAI
188
+
189
+ client = CortexAI(token="YOUR_TOKEN", )
190
+ client.search.retrieve(query='query', tenant_id='tenant_id', )
191
+ """
192
+ _response = self._raw_client.retrieve(
193
+ query=query,
194
+ tenant_id=tenant_id,
195
+ sub_tenant_id=sub_tenant_id,
196
+ max_chunks=max_chunks,
197
+ alpha=alpha,
198
+ recency_bias=recency_bias,
199
+ num_related_chunks=num_related_chunks,
200
+ request_options=request_options,
201
+ )
202
+ return _response.data
203
+
204
+ def full_text_search(
205
+ self,
206
+ *,
207
+ query: str,
208
+ tenant_id: str,
209
+ sub_tenant_id: typing.Optional[str] = OMIT,
210
+ operator: typing.Optional[Bm25OperatorType] = OMIT,
211
+ max_chunks: typing.Optional[int] = OMIT,
212
+ request_options: typing.Optional[RequestOptions] = None,
213
+ ) -> FullTextSearchData:
214
+ """
215
+ Full text search endpoint for Cortex customers.
216
+ Performs full text search with configurable operators for precise text matching against the Findr backend.
217
+
218
+ This endpoint performs a full text search query against the Findr backend, allowing users to retrieve
219
+ relevant content chunks from their indexed documents and sources using BM25-based text matching.
220
+ The search can be customized with various operators to control the matching behavior.
221
+
222
+ Args:
223
+ request (FullTextSearchRequest): The full text search request containing:
224
+ - query (str): Search query string to find relevant content
225
+ - tenant_id (str): Tenant identifier for multi-tenancy
226
+ - sub_tenant_id (str, optional): Sub-tenant identifier, defaults to tenant_id
227
+ - operator (BM25OperatorType, optional): Full text search operator type (OR or AND). Defaults to OR
228
+ - max_chunks (int, optional): Maximum number of content chunks to return (1-1001, defaults to 25)
229
+ api_details (dict): Authentication details obtained from API key validation
230
+
231
+ Returns:
232
+ FullTextSearchData: Success response with full text search results
233
+
234
+ Parameters
235
+ ----------
236
+ query : str
237
+
238
+ tenant_id : str
239
+
240
+ sub_tenant_id : typing.Optional[str]
241
+
242
+ operator : typing.Optional[Bm25OperatorType]
243
+
244
+ max_chunks : typing.Optional[int]
245
+
246
+ request_options : typing.Optional[RequestOptions]
247
+ Request-specific configuration.
248
+
249
+ Returns
250
+ -------
251
+ FullTextSearchData
252
+ Successful Response
253
+
254
+ Examples
255
+ --------
256
+ from cortex-ai import CortexAI
257
+
258
+ client = CortexAI(token="YOUR_TOKEN", )
259
+ client.search.full_text_search(query='query', tenant_id='tenant_id', )
260
+ """
261
+ _response = self._raw_client.full_text_search(
262
+ query=query,
263
+ tenant_id=tenant_id,
264
+ sub_tenant_id=sub_tenant_id,
265
+ operator=operator,
266
+ max_chunks=max_chunks,
267
+ request_options=request_options,
268
+ )
269
+ return _response.data
270
+
271
+
272
+ class AsyncSearchClient:
273
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
274
+ self._raw_client = AsyncRawSearchClient(client_wrapper=client_wrapper)
275
+
276
+ @property
277
+ def with_raw_response(self) -> AsyncRawSearchClient:
278
+ """
279
+ Retrieves a raw implementation of this client that returns raw responses.
280
+
281
+ Returns
282
+ -------
283
+ AsyncRawSearchClient
284
+ """
285
+ return self._raw_client
286
+
287
+ async def qna(
288
+ self,
289
+ *,
290
+ question: str,
291
+ session_id: str,
292
+ tenant_id: str,
293
+ context_list: typing.Optional[typing.Sequence[str]] = OMIT,
294
+ search_modes: typing.Optional[typing.Sequence[str]] = OMIT,
295
+ sub_tenant_id: typing.Optional[str] = OMIT,
296
+ highlight_chunks: typing.Optional[bool] = OMIT,
297
+ stream: typing.Optional[bool] = OMIT,
298
+ search_alpha: typing.Optional[float] = OMIT,
299
+ recency_bias: typing.Optional[float] = OMIT,
300
+ ai_generation: typing.Optional[bool] = OMIT,
301
+ top_n: typing.Optional[int] = OMIT,
302
+ user_name: typing.Optional[str] = OMIT,
303
+ user_instructions: typing.Optional[str] = OMIT,
304
+ multi_step_reasoning: typing.Optional[bool] = OMIT,
305
+ auto_agent_routing: typing.Optional[bool] = OMIT,
306
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
307
+ request_options: typing.Optional[RequestOptions] = None,
308
+ ) -> typing.Optional[typing.Any]:
309
+ """
310
+ Parameters
311
+ ----------
312
+ question : str
313
+
314
+ session_id : str
315
+
316
+ tenant_id : str
317
+
318
+ context_list : typing.Optional[typing.Sequence[str]]
319
+
320
+ search_modes : typing.Optional[typing.Sequence[str]]
321
+
322
+ sub_tenant_id : typing.Optional[str]
323
+
324
+ highlight_chunks : typing.Optional[bool]
325
+
326
+ stream : typing.Optional[bool]
327
+
328
+ search_alpha : typing.Optional[float]
329
+
330
+ recency_bias : typing.Optional[float]
331
+
332
+ ai_generation : typing.Optional[bool]
333
+
334
+ top_n : typing.Optional[int]
335
+
336
+ user_name : typing.Optional[str]
337
+
338
+ user_instructions : typing.Optional[str]
339
+
340
+ multi_step_reasoning : typing.Optional[bool]
341
+
342
+ auto_agent_routing : typing.Optional[bool]
343
+
344
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
345
+
346
+ request_options : typing.Optional[RequestOptions]
347
+ Request-specific configuration.
348
+
349
+ Returns
350
+ -------
351
+ typing.Optional[typing.Any]
352
+ Successful Response
353
+
354
+ Examples
355
+ --------
356
+ import asyncio
357
+
358
+ from cortex-ai import AsyncCortexAI
359
+
360
+ client = AsyncCortexAI(token="YOUR_TOKEN", )
361
+ async def main() -> None:
362
+ await client.search.qna(question='question', session_id='session_id', tenant_id='tenant_id', )
363
+ asyncio.run(main())
364
+ """
365
+ _response = await self._raw_client.qna(
366
+ question=question,
367
+ session_id=session_id,
368
+ tenant_id=tenant_id,
369
+ context_list=context_list,
370
+ search_modes=search_modes,
371
+ sub_tenant_id=sub_tenant_id,
372
+ highlight_chunks=highlight_chunks,
373
+ stream=stream,
374
+ search_alpha=search_alpha,
375
+ recency_bias=recency_bias,
376
+ ai_generation=ai_generation,
377
+ top_n=top_n,
378
+ user_name=user_name,
379
+ user_instructions=user_instructions,
380
+ multi_step_reasoning=multi_step_reasoning,
381
+ auto_agent_routing=auto_agent_routing,
382
+ metadata=metadata,
383
+ request_options=request_options,
384
+ )
385
+ return _response.data
386
+
387
+ async def retrieve(
388
+ self,
389
+ *,
390
+ query: str,
391
+ tenant_id: str,
392
+ sub_tenant_id: typing.Optional[str] = OMIT,
393
+ max_chunks: typing.Optional[int] = OMIT,
394
+ alpha: typing.Optional[Alpha] = OMIT,
395
+ recency_bias: typing.Optional[float] = OMIT,
396
+ num_related_chunks: typing.Optional[int] = OMIT,
397
+ request_options: typing.Optional[RequestOptions] = None,
398
+ ) -> SearchData:
399
+ """
400
+ Search for content within indexed sources using semantic and keyword search capabilities.
401
+
402
+ This endpoint performs a search query against the Findr backend, allowing users to retrieve
403
+ relevant content chunks from their indexed documents and sources. The search can be customized
404
+ with various parameters to control the number of results and search behavior.
405
+
406
+ Args:
407
+ request (SearchRequest): The search request containing:
408
+ - query (str): Search query string to find relevant content
409
+ - tenant_id (str, optional): Tenant identifier for multi-tenancy
410
+ - sub_tenant_id (str, optional): Sub-tenant identifier, defaults to tenant_id
411
+ - max_chunks (int, optional): Maximum number of content chunks to return
412
+ - alpha (Union[float, str], optional): Search algorithm parameter for result ranking (default: 0.8). Can be float-type (0.0-1.0) or 'auto' for dynamic selection
413
+ - recency_bias (float, optional): Bias towards more recent content (default: 0.5)
414
+ - num_related_chunks (int, optional): Number of related chunks to return (default: 0)
415
+ api_details (dict): Authentication details obtained from API key validation
416
+
417
+ Returns:
418
+ SearchData: Success response with search results
419
+
420
+ Parameters
421
+ ----------
422
+ query : str
423
+
424
+ tenant_id : str
425
+
426
+ sub_tenant_id : typing.Optional[str]
427
+
428
+ max_chunks : typing.Optional[int]
429
+
430
+ alpha : typing.Optional[Alpha]
431
+
432
+ recency_bias : typing.Optional[float]
433
+
434
+ num_related_chunks : typing.Optional[int]
435
+
436
+ request_options : typing.Optional[RequestOptions]
437
+ Request-specific configuration.
438
+
439
+ Returns
440
+ -------
441
+ SearchData
442
+ Successful Response
443
+
444
+ Examples
445
+ --------
446
+ import asyncio
447
+
448
+ from cortex-ai import AsyncCortexAI
449
+
450
+ client = AsyncCortexAI(token="YOUR_TOKEN", )
451
+ async def main() -> None:
452
+ await client.search.retrieve(query='query', tenant_id='tenant_id', )
453
+ asyncio.run(main())
454
+ """
455
+ _response = await self._raw_client.retrieve(
456
+ query=query,
457
+ tenant_id=tenant_id,
458
+ sub_tenant_id=sub_tenant_id,
459
+ max_chunks=max_chunks,
460
+ alpha=alpha,
461
+ recency_bias=recency_bias,
462
+ num_related_chunks=num_related_chunks,
463
+ request_options=request_options,
464
+ )
465
+ return _response.data
466
+
467
+ async def full_text_search(
468
+ self,
469
+ *,
470
+ query: str,
471
+ tenant_id: str,
472
+ sub_tenant_id: typing.Optional[str] = OMIT,
473
+ operator: typing.Optional[Bm25OperatorType] = OMIT,
474
+ max_chunks: typing.Optional[int] = OMIT,
475
+ request_options: typing.Optional[RequestOptions] = None,
476
+ ) -> FullTextSearchData:
477
+ """
478
+ Full text search endpoint for Cortex customers.
479
+ Performs full text search with configurable operators for precise text matching against the Findr backend.
480
+
481
+ This endpoint performs a full text search query against the Findr backend, allowing users to retrieve
482
+ relevant content chunks from their indexed documents and sources using BM25-based text matching.
483
+ The search can be customized with various operators to control the matching behavior.
484
+
485
+ Args:
486
+ request (FullTextSearchRequest): The full text search request containing:
487
+ - query (str): Search query string to find relevant content
488
+ - tenant_id (str): Tenant identifier for multi-tenancy
489
+ - sub_tenant_id (str, optional): Sub-tenant identifier, defaults to tenant_id
490
+ - operator (BM25OperatorType, optional): Full text search operator type (OR or AND). Defaults to OR
491
+ - max_chunks (int, optional): Maximum number of content chunks to return (1-1001, defaults to 25)
492
+ api_details (dict): Authentication details obtained from API key validation
493
+
494
+ Returns:
495
+ FullTextSearchData: Success response with full text search results
496
+
497
+ Parameters
498
+ ----------
499
+ query : str
500
+
501
+ tenant_id : str
502
+
503
+ sub_tenant_id : typing.Optional[str]
504
+
505
+ operator : typing.Optional[Bm25OperatorType]
506
+
507
+ max_chunks : typing.Optional[int]
508
+
509
+ request_options : typing.Optional[RequestOptions]
510
+ Request-specific configuration.
511
+
512
+ Returns
513
+ -------
514
+ FullTextSearchData
515
+ Successful Response
516
+
517
+ Examples
518
+ --------
519
+ import asyncio
520
+
521
+ from cortex-ai import AsyncCortexAI
522
+
523
+ client = AsyncCortexAI(token="YOUR_TOKEN", )
524
+ async def main() -> None:
525
+ await client.search.full_text_search(query='query', tenant_id='tenant_id', )
526
+ asyncio.run(main())
527
+ """
528
+ _response = await self._raw_client.full_text_search(
529
+ query=query,
530
+ tenant_id=tenant_id,
531
+ sub_tenant_id=sub_tenant_id,
532
+ operator=operator,
533
+ max_chunks=max_chunks,
534
+ request_options=request_options,
535
+ )
536
+ return _response.data