usecortex-ai 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (89) hide show
  1. cortex_ai/__init__.py +103 -0
  2. cortex_ai/client.py +244 -0
  3. cortex_ai/core/__init__.py +52 -0
  4. cortex_ai/core/api_error.py +23 -0
  5. cortex_ai/core/client_wrapper.py +84 -0
  6. cortex_ai/core/datetime_utils.py +28 -0
  7. cortex_ai/core/file.py +67 -0
  8. cortex_ai/core/force_multipart.py +18 -0
  9. cortex_ai/core/http_client.py +543 -0
  10. cortex_ai/core/http_response.py +55 -0
  11. cortex_ai/core/jsonable_encoder.py +100 -0
  12. cortex_ai/core/pydantic_utilities.py +258 -0
  13. cortex_ai/core/query_encoder.py +58 -0
  14. cortex_ai/core/remove_none_from_dict.py +11 -0
  15. cortex_ai/core/request_options.py +35 -0
  16. cortex_ai/core/serialization.py +276 -0
  17. cortex_ai/embeddings/__init__.py +4 -0
  18. cortex_ai/embeddings/client.py +442 -0
  19. cortex_ai/embeddings/raw_client.py +1153 -0
  20. cortex_ai/environment.py +7 -0
  21. cortex_ai/errors/__init__.py +21 -0
  22. cortex_ai/errors/bad_request_error.py +11 -0
  23. cortex_ai/errors/forbidden_error.py +11 -0
  24. cortex_ai/errors/internal_server_error.py +11 -0
  25. cortex_ai/errors/not_found_error.py +11 -0
  26. cortex_ai/errors/service_unavailable_error.py +11 -0
  27. cortex_ai/errors/unauthorized_error.py +11 -0
  28. cortex_ai/errors/unprocessable_entity_error.py +10 -0
  29. cortex_ai/fetch/__init__.py +4 -0
  30. cortex_ai/fetch/client.py +143 -0
  31. cortex_ai/fetch/raw_client.py +310 -0
  32. cortex_ai/raw_client.py +90 -0
  33. cortex_ai/search/__init__.py +7 -0
  34. cortex_ai/search/client.py +536 -0
  35. cortex_ai/search/raw_client.py +1064 -0
  36. cortex_ai/search/types/__init__.py +7 -0
  37. cortex_ai/search/types/alpha.py +5 -0
  38. cortex_ai/sources/__init__.py +4 -0
  39. cortex_ai/sources/client.py +187 -0
  40. cortex_ai/sources/raw_client.py +532 -0
  41. cortex_ai/tenant/__init__.py +4 -0
  42. cortex_ai/tenant/client.py +120 -0
  43. cortex_ai/tenant/raw_client.py +283 -0
  44. cortex_ai/types/__init__.py +69 -0
  45. cortex_ai/types/actual_error_response.py +20 -0
  46. cortex_ai/types/app_sources_upload_data.py +22 -0
  47. cortex_ai/types/attachment_model.py +26 -0
  48. cortex_ai/types/batch_upload_data.py +22 -0
  49. cortex_ai/types/bm_25_operator_type.py +5 -0
  50. cortex_ai/types/content_model.py +26 -0
  51. cortex_ai/types/delete_memory_request.py +21 -0
  52. cortex_ai/types/embeddings_create_collection_data.py +22 -0
  53. cortex_ai/types/embeddings_delete_data.py +22 -0
  54. cortex_ai/types/embeddings_get_data.py +22 -0
  55. cortex_ai/types/embeddings_search_data.py +22 -0
  56. cortex_ai/types/error_response.py +22 -0
  57. cortex_ai/types/extended_context.py +20 -0
  58. cortex_ai/types/fetch_content_data.py +23 -0
  59. cortex_ai/types/file_upload_result.py +20 -0
  60. cortex_ai/types/full_text_search_data.py +22 -0
  61. cortex_ai/types/http_validation_error.py +20 -0
  62. cortex_ai/types/list_sources_response.py +22 -0
  63. cortex_ai/types/markdown_upload_request.py +21 -0
  64. cortex_ai/types/processing_status.py +22 -0
  65. cortex_ai/types/related_chunk.py +22 -0
  66. cortex_ai/types/search_chunk.py +34 -0
  67. cortex_ai/types/search_data.py +22 -0
  68. cortex_ai/types/single_upload_data.py +21 -0
  69. cortex_ai/types/source.py +32 -0
  70. cortex_ai/types/source_content.py +26 -0
  71. cortex_ai/types/source_model.py +32 -0
  72. cortex_ai/types/tenant_create_data.py +22 -0
  73. cortex_ai/types/tenant_stats.py +23 -0
  74. cortex_ai/types/validation_error.py +22 -0
  75. cortex_ai/types/validation_error_loc_item.py +5 -0
  76. cortex_ai/upload/__init__.py +4 -0
  77. cortex_ai/upload/client.py +1572 -0
  78. cortex_ai/upload/raw_client.py +4202 -0
  79. cortex_ai/user/__init__.py +4 -0
  80. cortex_ai/user/client.py +125 -0
  81. cortex_ai/user/raw_client.py +300 -0
  82. cortex_ai/user_memory/__init__.py +4 -0
  83. cortex_ai/user_memory/client.py +443 -0
  84. cortex_ai/user_memory/raw_client.py +651 -0
  85. usecortex_ai-0.1.0.dist-info/METADATA +136 -0
  86. usecortex_ai-0.1.0.dist-info/RECORD +89 -0
  87. usecortex_ai-0.1.0.dist-info/WHEEL +5 -0
  88. usecortex_ai-0.1.0.dist-info/licenses/LICENSE +22 -0
  89. usecortex_ai-0.1.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,442 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
6
+ from ..core.request_options import RequestOptions
7
+ from ..types.embeddings_create_collection_data import EmbeddingsCreateCollectionData
8
+ from ..types.embeddings_delete_data import EmbeddingsDeleteData
9
+ from ..types.embeddings_get_data import EmbeddingsGetData
10
+ from ..types.embeddings_search_data import EmbeddingsSearchData
11
+ from .raw_client import AsyncRawEmbeddingsClient, RawEmbeddingsClient
12
+
13
+ # this is used as the default value for optional parameters
14
+ OMIT = typing.cast(typing.Any, ...)
15
+
16
+
17
+ class EmbeddingsClient:
18
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
19
+ self._raw_client = RawEmbeddingsClient(client_wrapper=client_wrapper)
20
+
21
+ @property
22
+ def with_raw_response(self) -> RawEmbeddingsClient:
23
+ """
24
+ Retrieves a raw implementation of this client that returns raw responses.
25
+
26
+ Returns
27
+ -------
28
+ RawEmbeddingsClient
29
+ """
30
+ return self._raw_client
31
+
32
+ def delete(
33
+ self,
34
+ *,
35
+ chunk_ids: typing.Sequence[str],
36
+ tenant_id: str,
37
+ sub_tenant_id: typing.Optional[str] = OMIT,
38
+ request_options: typing.Optional[RequestOptions] = None,
39
+ ) -> EmbeddingsDeleteData:
40
+ """
41
+ Delete specific embedding chunks from indexed sources.
42
+
43
+ This endpoint deletes specified embedding chunks from the Findr backend by sending
44
+ chunk IDs to the backend delete service.
45
+
46
+ Args:
47
+ request (EmbeddingsDeleteRequest): The delete request containing:
48
+ - chunk_ids (List[str]): List of chunk IDs to delete
49
+ - tenant_id (str): Tenant identifier for multi-tenancy
50
+ - sub_tenant_id (str, optional): Sub-tenant identifier, defaults to tenant_id
51
+ api_details (dict): Authentication details obtained from API key validation
52
+
53
+ Returns:
54
+ EmbeddingsDeleteData: Success response with deletion details
55
+
56
+ Parameters
57
+ ----------
58
+ chunk_ids : typing.Sequence[str]
59
+
60
+ tenant_id : str
61
+
62
+ sub_tenant_id : typing.Optional[str]
63
+
64
+ request_options : typing.Optional[RequestOptions]
65
+ Request-specific configuration.
66
+
67
+ Returns
68
+ -------
69
+ EmbeddingsDeleteData
70
+ Successful Response
71
+
72
+ Examples
73
+ --------
74
+ from cortex-ai import CortexAI
75
+
76
+ client = CortexAI(token="YOUR_TOKEN", )
77
+ client.embeddings.delete(chunk_ids=['chunk_ids'], tenant_id='tenant_id', )
78
+ """
79
+ _response = self._raw_client.delete(
80
+ chunk_ids=chunk_ids, tenant_id=tenant_id, sub_tenant_id=sub_tenant_id, request_options=request_options
81
+ )
82
+ return _response.data
83
+
84
+ def search(
85
+ self,
86
+ *,
87
+ embeddings: typing.Sequence[float],
88
+ tenant_id: str,
89
+ sub_tenant_id: typing.Optional[str] = OMIT,
90
+ max_chunks: typing.Optional[int] = OMIT,
91
+ request_options: typing.Optional[RequestOptions] = None,
92
+ ) -> EmbeddingsSearchData:
93
+ """
94
+ Search for similar embedding chunks using vector similarity.
95
+
96
+ This endpoint performs semantic search by sending an embedding vector to the Findr backend
97
+ and returns a list of the most similar chunk IDs based on vector similarity.
98
+
99
+ Args:
100
+ request (EmbeddingsSearchRequest): The search request containing:
101
+ - embeddings (List[float]): Single embedding vector for similarity search
102
+ - tenant_id (str): Tenant identifier for multi-tenancy
103
+ - sub_tenant_id (str, optional): Sub-tenant identifier, defaults to tenant_id
104
+ - max_chunks (int, optional): Maximum number of chunk IDs to return (default: 10)
105
+ api_details (dict): Authentication details obtained from API key validation
106
+
107
+ Returns:
108
+ EmbeddingsSearchData: List of chunk IDs with similarity scores
109
+
110
+ Parameters
111
+ ----------
112
+ embeddings : typing.Sequence[float]
113
+ Single embedding vector for search
114
+
115
+ tenant_id : str
116
+
117
+ sub_tenant_id : typing.Optional[str]
118
+
119
+ max_chunks : typing.Optional[int]
120
+
121
+ request_options : typing.Optional[RequestOptions]
122
+ Request-specific configuration.
123
+
124
+ Returns
125
+ -------
126
+ EmbeddingsSearchData
127
+ Successful Response
128
+
129
+ Examples
130
+ --------
131
+ from cortex-ai import CortexAI
132
+
133
+ client = CortexAI(token="YOUR_TOKEN", )
134
+ client.embeddings.search(embeddings=[1.1], tenant_id='tenant_id', )
135
+ """
136
+ _response = self._raw_client.search(
137
+ embeddings=embeddings,
138
+ tenant_id=tenant_id,
139
+ sub_tenant_id=sub_tenant_id,
140
+ max_chunks=max_chunks,
141
+ request_options=request_options,
142
+ )
143
+ return _response.data
144
+
145
+ def get_by_chunk_ids(
146
+ self,
147
+ *,
148
+ chunk_ids: typing.Sequence[str],
149
+ tenant_id: str,
150
+ sub_tenant_id: typing.Optional[str] = OMIT,
151
+ request_options: typing.Optional[RequestOptions] = None,
152
+ ) -> EmbeddingsGetData:
153
+ """
154
+ Get embeddings based on chunk IDs.
155
+
156
+ This endpoint returns embeddings for a list of chunk IDs.
157
+
158
+ Returns:
159
+ EmbeddingsGetData: Embeddings data for the requested chunk IDs
160
+
161
+ Parameters
162
+ ----------
163
+ chunk_ids : typing.Sequence[str]
164
+
165
+ tenant_id : str
166
+
167
+ sub_tenant_id : typing.Optional[str]
168
+
169
+ request_options : typing.Optional[RequestOptions]
170
+ Request-specific configuration.
171
+
172
+ Returns
173
+ -------
174
+ EmbeddingsGetData
175
+ Successful Response
176
+
177
+ Examples
178
+ --------
179
+ from cortex-ai import CortexAI
180
+
181
+ client = CortexAI(token="YOUR_TOKEN", )
182
+ client.embeddings.get_by_chunk_ids(chunk_ids=['chunk_ids'], tenant_id='tenant_id', )
183
+ """
184
+ _response = self._raw_client.get_by_chunk_ids(
185
+ chunk_ids=chunk_ids, tenant_id=tenant_id, sub_tenant_id=sub_tenant_id, request_options=request_options
186
+ )
187
+ return _response.data
188
+
189
+ def create_collection(
190
+ self, *, tenant_id: str, request_options: typing.Optional[RequestOptions] = None
191
+ ) -> EmbeddingsCreateCollectionData:
192
+ """
193
+ Create an embeddings collection for the given tenant in Findr.
194
+
195
+ sub_tenant_id is set to be the same as tenant_id as per requirements.
196
+
197
+ Returns:
198
+ EmbeddingsCreateCollectionData: Success response with collection details
199
+
200
+ Parameters
201
+ ----------
202
+ tenant_id : str
203
+
204
+ request_options : typing.Optional[RequestOptions]
205
+ Request-specific configuration.
206
+
207
+ Returns
208
+ -------
209
+ EmbeddingsCreateCollectionData
210
+ Successful Response
211
+
212
+ Examples
213
+ --------
214
+ from cortex-ai import CortexAI
215
+
216
+ client = CortexAI(token="YOUR_TOKEN", )
217
+ client.embeddings.create_collection(tenant_id='tenant_id', )
218
+ """
219
+ _response = self._raw_client.create_collection(tenant_id=tenant_id, request_options=request_options)
220
+ return _response.data
221
+
222
+
223
+ class AsyncEmbeddingsClient:
224
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
225
+ self._raw_client = AsyncRawEmbeddingsClient(client_wrapper=client_wrapper)
226
+
227
+ @property
228
+ def with_raw_response(self) -> AsyncRawEmbeddingsClient:
229
+ """
230
+ Retrieves a raw implementation of this client that returns raw responses.
231
+
232
+ Returns
233
+ -------
234
+ AsyncRawEmbeddingsClient
235
+ """
236
+ return self._raw_client
237
+
238
+ async def delete(
239
+ self,
240
+ *,
241
+ chunk_ids: typing.Sequence[str],
242
+ tenant_id: str,
243
+ sub_tenant_id: typing.Optional[str] = OMIT,
244
+ request_options: typing.Optional[RequestOptions] = None,
245
+ ) -> EmbeddingsDeleteData:
246
+ """
247
+ Delete specific embedding chunks from indexed sources.
248
+
249
+ This endpoint deletes specified embedding chunks from the Findr backend by sending
250
+ chunk IDs to the backend delete service.
251
+
252
+ Args:
253
+ request (EmbeddingsDeleteRequest): The delete request containing:
254
+ - chunk_ids (List[str]): List of chunk IDs to delete
255
+ - tenant_id (str): Tenant identifier for multi-tenancy
256
+ - sub_tenant_id (str, optional): Sub-tenant identifier, defaults to tenant_id
257
+ api_details (dict): Authentication details obtained from API key validation
258
+
259
+ Returns:
260
+ EmbeddingsDeleteData: Success response with deletion details
261
+
262
+ Parameters
263
+ ----------
264
+ chunk_ids : typing.Sequence[str]
265
+
266
+ tenant_id : str
267
+
268
+ sub_tenant_id : typing.Optional[str]
269
+
270
+ request_options : typing.Optional[RequestOptions]
271
+ Request-specific configuration.
272
+
273
+ Returns
274
+ -------
275
+ EmbeddingsDeleteData
276
+ Successful Response
277
+
278
+ Examples
279
+ --------
280
+ import asyncio
281
+
282
+ from cortex-ai import AsyncCortexAI
283
+
284
+ client = AsyncCortexAI(token="YOUR_TOKEN", )
285
+ async def main() -> None:
286
+ await client.embeddings.delete(chunk_ids=['chunk_ids'], tenant_id='tenant_id', )
287
+ asyncio.run(main())
288
+ """
289
+ _response = await self._raw_client.delete(
290
+ chunk_ids=chunk_ids, tenant_id=tenant_id, sub_tenant_id=sub_tenant_id, request_options=request_options
291
+ )
292
+ return _response.data
293
+
294
+ async def search(
295
+ self,
296
+ *,
297
+ embeddings: typing.Sequence[float],
298
+ tenant_id: str,
299
+ sub_tenant_id: typing.Optional[str] = OMIT,
300
+ max_chunks: typing.Optional[int] = OMIT,
301
+ request_options: typing.Optional[RequestOptions] = None,
302
+ ) -> EmbeddingsSearchData:
303
+ """
304
+ Search for similar embedding chunks using vector similarity.
305
+
306
+ This endpoint performs semantic search by sending an embedding vector to the Findr backend
307
+ and returns a list of the most similar chunk IDs based on vector similarity.
308
+
309
+ Args:
310
+ request (EmbeddingsSearchRequest): The search request containing:
311
+ - embeddings (List[float]): Single embedding vector for similarity search
312
+ - tenant_id (str): Tenant identifier for multi-tenancy
313
+ - sub_tenant_id (str, optional): Sub-tenant identifier, defaults to tenant_id
314
+ - max_chunks (int, optional): Maximum number of chunk IDs to return (default: 10)
315
+ api_details (dict): Authentication details obtained from API key validation
316
+
317
+ Returns:
318
+ EmbeddingsSearchData: List of chunk IDs with similarity scores
319
+
320
+ Parameters
321
+ ----------
322
+ embeddings : typing.Sequence[float]
323
+ Single embedding vector for search
324
+
325
+ tenant_id : str
326
+
327
+ sub_tenant_id : typing.Optional[str]
328
+
329
+ max_chunks : typing.Optional[int]
330
+
331
+ request_options : typing.Optional[RequestOptions]
332
+ Request-specific configuration.
333
+
334
+ Returns
335
+ -------
336
+ EmbeddingsSearchData
337
+ Successful Response
338
+
339
+ Examples
340
+ --------
341
+ import asyncio
342
+
343
+ from cortex-ai import AsyncCortexAI
344
+
345
+ client = AsyncCortexAI(token="YOUR_TOKEN", )
346
+ async def main() -> None:
347
+ await client.embeddings.search(embeddings=[1.1], tenant_id='tenant_id', )
348
+ asyncio.run(main())
349
+ """
350
+ _response = await self._raw_client.search(
351
+ embeddings=embeddings,
352
+ tenant_id=tenant_id,
353
+ sub_tenant_id=sub_tenant_id,
354
+ max_chunks=max_chunks,
355
+ request_options=request_options,
356
+ )
357
+ return _response.data
358
+
359
+ async def get_by_chunk_ids(
360
+ self,
361
+ *,
362
+ chunk_ids: typing.Sequence[str],
363
+ tenant_id: str,
364
+ sub_tenant_id: typing.Optional[str] = OMIT,
365
+ request_options: typing.Optional[RequestOptions] = None,
366
+ ) -> EmbeddingsGetData:
367
+ """
368
+ Get embeddings based on chunk IDs.
369
+
370
+ This endpoint returns embeddings for a list of chunk IDs.
371
+
372
+ Returns:
373
+ EmbeddingsGetData: Embeddings data for the requested chunk IDs
374
+
375
+ Parameters
376
+ ----------
377
+ chunk_ids : typing.Sequence[str]
378
+
379
+ tenant_id : str
380
+
381
+ sub_tenant_id : typing.Optional[str]
382
+
383
+ request_options : typing.Optional[RequestOptions]
384
+ Request-specific configuration.
385
+
386
+ Returns
387
+ -------
388
+ EmbeddingsGetData
389
+ Successful Response
390
+
391
+ Examples
392
+ --------
393
+ import asyncio
394
+
395
+ from cortex-ai import AsyncCortexAI
396
+
397
+ client = AsyncCortexAI(token="YOUR_TOKEN", )
398
+ async def main() -> None:
399
+ await client.embeddings.get_by_chunk_ids(chunk_ids=['chunk_ids'], tenant_id='tenant_id', )
400
+ asyncio.run(main())
401
+ """
402
+ _response = await self._raw_client.get_by_chunk_ids(
403
+ chunk_ids=chunk_ids, tenant_id=tenant_id, sub_tenant_id=sub_tenant_id, request_options=request_options
404
+ )
405
+ return _response.data
406
+
407
+ async def create_collection(
408
+ self, *, tenant_id: str, request_options: typing.Optional[RequestOptions] = None
409
+ ) -> EmbeddingsCreateCollectionData:
410
+ """
411
+ Create an embeddings collection for the given tenant in Findr.
412
+
413
+ sub_tenant_id is set to be the same as tenant_id as per requirements.
414
+
415
+ Returns:
416
+ EmbeddingsCreateCollectionData: Success response with collection details
417
+
418
+ Parameters
419
+ ----------
420
+ tenant_id : str
421
+
422
+ request_options : typing.Optional[RequestOptions]
423
+ Request-specific configuration.
424
+
425
+ Returns
426
+ -------
427
+ EmbeddingsCreateCollectionData
428
+ Successful Response
429
+
430
+ Examples
431
+ --------
432
+ import asyncio
433
+
434
+ from cortex-ai import AsyncCortexAI
435
+
436
+ client = AsyncCortexAI(token="YOUR_TOKEN", )
437
+ async def main() -> None:
438
+ await client.embeddings.create_collection(tenant_id='tenant_id', )
439
+ asyncio.run(main())
440
+ """
441
+ _response = await self._raw_client.create_collection(tenant_id=tenant_id, request_options=request_options)
442
+ return _response.data