usecortex-ai 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cortex_ai/__init__.py +103 -0
- cortex_ai/client.py +244 -0
- cortex_ai/core/__init__.py +52 -0
- cortex_ai/core/api_error.py +23 -0
- cortex_ai/core/client_wrapper.py +84 -0
- cortex_ai/core/datetime_utils.py +28 -0
- cortex_ai/core/file.py +67 -0
- cortex_ai/core/force_multipart.py +18 -0
- cortex_ai/core/http_client.py +543 -0
- cortex_ai/core/http_response.py +55 -0
- cortex_ai/core/jsonable_encoder.py +100 -0
- cortex_ai/core/pydantic_utilities.py +258 -0
- cortex_ai/core/query_encoder.py +58 -0
- cortex_ai/core/remove_none_from_dict.py +11 -0
- cortex_ai/core/request_options.py +35 -0
- cortex_ai/core/serialization.py +276 -0
- cortex_ai/embeddings/__init__.py +4 -0
- cortex_ai/embeddings/client.py +442 -0
- cortex_ai/embeddings/raw_client.py +1153 -0
- cortex_ai/environment.py +7 -0
- cortex_ai/errors/__init__.py +21 -0
- cortex_ai/errors/bad_request_error.py +11 -0
- cortex_ai/errors/forbidden_error.py +11 -0
- cortex_ai/errors/internal_server_error.py +11 -0
- cortex_ai/errors/not_found_error.py +11 -0
- cortex_ai/errors/service_unavailable_error.py +11 -0
- cortex_ai/errors/unauthorized_error.py +11 -0
- cortex_ai/errors/unprocessable_entity_error.py +10 -0
- cortex_ai/fetch/__init__.py +4 -0
- cortex_ai/fetch/client.py +143 -0
- cortex_ai/fetch/raw_client.py +310 -0
- cortex_ai/raw_client.py +90 -0
- cortex_ai/search/__init__.py +7 -0
- cortex_ai/search/client.py +536 -0
- cortex_ai/search/raw_client.py +1064 -0
- cortex_ai/search/types/__init__.py +7 -0
- cortex_ai/search/types/alpha.py +5 -0
- cortex_ai/sources/__init__.py +4 -0
- cortex_ai/sources/client.py +187 -0
- cortex_ai/sources/raw_client.py +532 -0
- cortex_ai/tenant/__init__.py +4 -0
- cortex_ai/tenant/client.py +120 -0
- cortex_ai/tenant/raw_client.py +283 -0
- cortex_ai/types/__init__.py +69 -0
- cortex_ai/types/actual_error_response.py +20 -0
- cortex_ai/types/app_sources_upload_data.py +22 -0
- cortex_ai/types/attachment_model.py +26 -0
- cortex_ai/types/batch_upload_data.py +22 -0
- cortex_ai/types/bm_25_operator_type.py +5 -0
- cortex_ai/types/content_model.py +26 -0
- cortex_ai/types/delete_memory_request.py +21 -0
- cortex_ai/types/embeddings_create_collection_data.py +22 -0
- cortex_ai/types/embeddings_delete_data.py +22 -0
- cortex_ai/types/embeddings_get_data.py +22 -0
- cortex_ai/types/embeddings_search_data.py +22 -0
- cortex_ai/types/error_response.py +22 -0
- cortex_ai/types/extended_context.py +20 -0
- cortex_ai/types/fetch_content_data.py +23 -0
- cortex_ai/types/file_upload_result.py +20 -0
- cortex_ai/types/full_text_search_data.py +22 -0
- cortex_ai/types/http_validation_error.py +20 -0
- cortex_ai/types/list_sources_response.py +22 -0
- cortex_ai/types/markdown_upload_request.py +21 -0
- cortex_ai/types/processing_status.py +22 -0
- cortex_ai/types/related_chunk.py +22 -0
- cortex_ai/types/search_chunk.py +34 -0
- cortex_ai/types/search_data.py +22 -0
- cortex_ai/types/single_upload_data.py +21 -0
- cortex_ai/types/source.py +32 -0
- cortex_ai/types/source_content.py +26 -0
- cortex_ai/types/source_model.py +32 -0
- cortex_ai/types/tenant_create_data.py +22 -0
- cortex_ai/types/tenant_stats.py +23 -0
- cortex_ai/types/validation_error.py +22 -0
- cortex_ai/types/validation_error_loc_item.py +5 -0
- cortex_ai/upload/__init__.py +4 -0
- cortex_ai/upload/client.py +1572 -0
- cortex_ai/upload/raw_client.py +4202 -0
- cortex_ai/user/__init__.py +4 -0
- cortex_ai/user/client.py +125 -0
- cortex_ai/user/raw_client.py +300 -0
- cortex_ai/user_memory/__init__.py +4 -0
- cortex_ai/user_memory/client.py +443 -0
- cortex_ai/user_memory/raw_client.py +651 -0
- usecortex_ai-0.1.0.dist-info/METADATA +136 -0
- usecortex_ai-0.1.0.dist-info/RECORD +89 -0
- usecortex_ai-0.1.0.dist-info/WHEEL +5 -0
- usecortex_ai-0.1.0.dist-info/licenses/LICENSE +22 -0
- usecortex_ai-0.1.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,443 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import typing
|
|
4
|
+
|
|
5
|
+
from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
|
|
6
|
+
from ..core.request_options import RequestOptions
|
|
7
|
+
from .raw_client import AsyncRawUserMemoryClient, RawUserMemoryClient
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class UserMemoryClient:
|
|
11
|
+
def __init__(self, *, client_wrapper: SyncClientWrapper):
|
|
12
|
+
self._raw_client = RawUserMemoryClient(client_wrapper=client_wrapper)
|
|
13
|
+
|
|
14
|
+
@property
|
|
15
|
+
def with_raw_response(self) -> RawUserMemoryClient:
|
|
16
|
+
"""
|
|
17
|
+
Retrieves a raw implementation of this client that returns raw responses.
|
|
18
|
+
|
|
19
|
+
Returns
|
|
20
|
+
-------
|
|
21
|
+
RawUserMemoryClient
|
|
22
|
+
"""
|
|
23
|
+
return self._raw_client
|
|
24
|
+
|
|
25
|
+
def list_user_memories(
|
|
26
|
+
self, *, tenant_id: str, sub_tenant_id: str, request_options: typing.Optional[RequestOptions] = None
|
|
27
|
+
) -> typing.Optional[typing.Any]:
|
|
28
|
+
"""
|
|
29
|
+
Parameters
|
|
30
|
+
----------
|
|
31
|
+
tenant_id : str
|
|
32
|
+
|
|
33
|
+
sub_tenant_id : str
|
|
34
|
+
|
|
35
|
+
request_options : typing.Optional[RequestOptions]
|
|
36
|
+
Request-specific configuration.
|
|
37
|
+
|
|
38
|
+
Returns
|
|
39
|
+
-------
|
|
40
|
+
typing.Optional[typing.Any]
|
|
41
|
+
Successful Response
|
|
42
|
+
|
|
43
|
+
Examples
|
|
44
|
+
--------
|
|
45
|
+
from cortex-ai import CortexAI
|
|
46
|
+
|
|
47
|
+
client = CortexAI(token="YOUR_TOKEN", )
|
|
48
|
+
client.user_memory.list_user_memories(tenant_id='tenant_id', sub_tenant_id='sub_tenant_id', )
|
|
49
|
+
"""
|
|
50
|
+
_response = self._raw_client.list_user_memories(
|
|
51
|
+
tenant_id=tenant_id, sub_tenant_id=sub_tenant_id, request_options=request_options
|
|
52
|
+
)
|
|
53
|
+
return _response.data
|
|
54
|
+
|
|
55
|
+
def delete_user_memory(
|
|
56
|
+
self,
|
|
57
|
+
*,
|
|
58
|
+
tenant_id: str,
|
|
59
|
+
memory_id: str,
|
|
60
|
+
sub_tenant_id: str,
|
|
61
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
62
|
+
) -> typing.Optional[typing.Any]:
|
|
63
|
+
"""
|
|
64
|
+
Parameters
|
|
65
|
+
----------
|
|
66
|
+
tenant_id : str
|
|
67
|
+
|
|
68
|
+
memory_id : str
|
|
69
|
+
|
|
70
|
+
sub_tenant_id : str
|
|
71
|
+
|
|
72
|
+
request_options : typing.Optional[RequestOptions]
|
|
73
|
+
Request-specific configuration.
|
|
74
|
+
|
|
75
|
+
Returns
|
|
76
|
+
-------
|
|
77
|
+
typing.Optional[typing.Any]
|
|
78
|
+
Successful Response
|
|
79
|
+
|
|
80
|
+
Examples
|
|
81
|
+
--------
|
|
82
|
+
from cortex-ai import CortexAI
|
|
83
|
+
|
|
84
|
+
client = CortexAI(token="YOUR_TOKEN", )
|
|
85
|
+
client.user_memory.delete_user_memory(tenant_id='tenant_id', memory_id='memory_id', sub_tenant_id='sub_tenant_id', )
|
|
86
|
+
"""
|
|
87
|
+
_response = self._raw_client.delete_user_memory(
|
|
88
|
+
tenant_id=tenant_id, memory_id=memory_id, sub_tenant_id=sub_tenant_id, request_options=request_options
|
|
89
|
+
)
|
|
90
|
+
return _response.data
|
|
91
|
+
|
|
92
|
+
def retrieve_user_memory(
|
|
93
|
+
self,
|
|
94
|
+
*,
|
|
95
|
+
tenant_id: str,
|
|
96
|
+
query: str,
|
|
97
|
+
sub_tenant_id: str,
|
|
98
|
+
max_count: typing.Optional[int] = None,
|
|
99
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
100
|
+
) -> typing.Optional[typing.Any]:
|
|
101
|
+
"""
|
|
102
|
+
Parameters
|
|
103
|
+
----------
|
|
104
|
+
tenant_id : str
|
|
105
|
+
|
|
106
|
+
query : str
|
|
107
|
+
|
|
108
|
+
sub_tenant_id : str
|
|
109
|
+
|
|
110
|
+
max_count : typing.Optional[int]
|
|
111
|
+
|
|
112
|
+
request_options : typing.Optional[RequestOptions]
|
|
113
|
+
Request-specific configuration.
|
|
114
|
+
|
|
115
|
+
Returns
|
|
116
|
+
-------
|
|
117
|
+
typing.Optional[typing.Any]
|
|
118
|
+
Successful Response
|
|
119
|
+
|
|
120
|
+
Examples
|
|
121
|
+
--------
|
|
122
|
+
from cortex-ai import CortexAI
|
|
123
|
+
|
|
124
|
+
client = CortexAI(token="YOUR_TOKEN", )
|
|
125
|
+
client.user_memory.retrieve_user_memory(tenant_id='tenant_id', query='query', sub_tenant_id='sub_tenant_id', )
|
|
126
|
+
"""
|
|
127
|
+
_response = self._raw_client.retrieve_user_memory(
|
|
128
|
+
tenant_id=tenant_id,
|
|
129
|
+
query=query,
|
|
130
|
+
sub_tenant_id=sub_tenant_id,
|
|
131
|
+
max_count=max_count,
|
|
132
|
+
request_options=request_options,
|
|
133
|
+
)
|
|
134
|
+
return _response.data
|
|
135
|
+
|
|
136
|
+
def generate_user_memory(
|
|
137
|
+
self,
|
|
138
|
+
*,
|
|
139
|
+
tenant_id: str,
|
|
140
|
+
user_query: str,
|
|
141
|
+
user_name: str,
|
|
142
|
+
sub_tenant_id: str,
|
|
143
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
144
|
+
) -> typing.Optional[typing.Any]:
|
|
145
|
+
"""
|
|
146
|
+
Parameters
|
|
147
|
+
----------
|
|
148
|
+
tenant_id : str
|
|
149
|
+
|
|
150
|
+
user_query : str
|
|
151
|
+
|
|
152
|
+
user_name : str
|
|
153
|
+
|
|
154
|
+
sub_tenant_id : str
|
|
155
|
+
|
|
156
|
+
request_options : typing.Optional[RequestOptions]
|
|
157
|
+
Request-specific configuration.
|
|
158
|
+
|
|
159
|
+
Returns
|
|
160
|
+
-------
|
|
161
|
+
typing.Optional[typing.Any]
|
|
162
|
+
Successful Response
|
|
163
|
+
|
|
164
|
+
Examples
|
|
165
|
+
--------
|
|
166
|
+
from cortex-ai import CortexAI
|
|
167
|
+
|
|
168
|
+
client = CortexAI(token="YOUR_TOKEN", )
|
|
169
|
+
client.user_memory.generate_user_memory(tenant_id='tenant_id', user_query='user_query', user_name='user_name', sub_tenant_id='sub_tenant_id', )
|
|
170
|
+
"""
|
|
171
|
+
_response = self._raw_client.generate_user_memory(
|
|
172
|
+
tenant_id=tenant_id,
|
|
173
|
+
user_query=user_query,
|
|
174
|
+
user_name=user_name,
|
|
175
|
+
sub_tenant_id=sub_tenant_id,
|
|
176
|
+
request_options=request_options,
|
|
177
|
+
)
|
|
178
|
+
return _response.data
|
|
179
|
+
|
|
180
|
+
def add_user_memory(
|
|
181
|
+
self,
|
|
182
|
+
*,
|
|
183
|
+
tenant_id: str,
|
|
184
|
+
user_memory: str,
|
|
185
|
+
sub_tenant_id: str,
|
|
186
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
187
|
+
) -> typing.Optional[typing.Any]:
|
|
188
|
+
"""
|
|
189
|
+
Parameters
|
|
190
|
+
----------
|
|
191
|
+
tenant_id : str
|
|
192
|
+
|
|
193
|
+
user_memory : str
|
|
194
|
+
|
|
195
|
+
sub_tenant_id : str
|
|
196
|
+
|
|
197
|
+
request_options : typing.Optional[RequestOptions]
|
|
198
|
+
Request-specific configuration.
|
|
199
|
+
|
|
200
|
+
Returns
|
|
201
|
+
-------
|
|
202
|
+
typing.Optional[typing.Any]
|
|
203
|
+
Successful Response
|
|
204
|
+
|
|
205
|
+
Examples
|
|
206
|
+
--------
|
|
207
|
+
from cortex-ai import CortexAI
|
|
208
|
+
|
|
209
|
+
client = CortexAI(token="YOUR_TOKEN", )
|
|
210
|
+
client.user_memory.add_user_memory(tenant_id='tenant_id', user_memory='user_memory', sub_tenant_id='sub_tenant_id', )
|
|
211
|
+
"""
|
|
212
|
+
_response = self._raw_client.add_user_memory(
|
|
213
|
+
tenant_id=tenant_id, user_memory=user_memory, sub_tenant_id=sub_tenant_id, request_options=request_options
|
|
214
|
+
)
|
|
215
|
+
return _response.data
|
|
216
|
+
|
|
217
|
+
|
|
218
|
+
class AsyncUserMemoryClient:
|
|
219
|
+
def __init__(self, *, client_wrapper: AsyncClientWrapper):
|
|
220
|
+
self._raw_client = AsyncRawUserMemoryClient(client_wrapper=client_wrapper)
|
|
221
|
+
|
|
222
|
+
@property
|
|
223
|
+
def with_raw_response(self) -> AsyncRawUserMemoryClient:
|
|
224
|
+
"""
|
|
225
|
+
Retrieves a raw implementation of this client that returns raw responses.
|
|
226
|
+
|
|
227
|
+
Returns
|
|
228
|
+
-------
|
|
229
|
+
AsyncRawUserMemoryClient
|
|
230
|
+
"""
|
|
231
|
+
return self._raw_client
|
|
232
|
+
|
|
233
|
+
async def list_user_memories(
|
|
234
|
+
self, *, tenant_id: str, sub_tenant_id: str, request_options: typing.Optional[RequestOptions] = None
|
|
235
|
+
) -> typing.Optional[typing.Any]:
|
|
236
|
+
"""
|
|
237
|
+
Parameters
|
|
238
|
+
----------
|
|
239
|
+
tenant_id : str
|
|
240
|
+
|
|
241
|
+
sub_tenant_id : str
|
|
242
|
+
|
|
243
|
+
request_options : typing.Optional[RequestOptions]
|
|
244
|
+
Request-specific configuration.
|
|
245
|
+
|
|
246
|
+
Returns
|
|
247
|
+
-------
|
|
248
|
+
typing.Optional[typing.Any]
|
|
249
|
+
Successful Response
|
|
250
|
+
|
|
251
|
+
Examples
|
|
252
|
+
--------
|
|
253
|
+
import asyncio
|
|
254
|
+
|
|
255
|
+
from cortex-ai import AsyncCortexAI
|
|
256
|
+
|
|
257
|
+
client = AsyncCortexAI(token="YOUR_TOKEN", )
|
|
258
|
+
async def main() -> None:
|
|
259
|
+
await client.user_memory.list_user_memories(tenant_id='tenant_id', sub_tenant_id='sub_tenant_id', )
|
|
260
|
+
asyncio.run(main())
|
|
261
|
+
"""
|
|
262
|
+
_response = await self._raw_client.list_user_memories(
|
|
263
|
+
tenant_id=tenant_id, sub_tenant_id=sub_tenant_id, request_options=request_options
|
|
264
|
+
)
|
|
265
|
+
return _response.data
|
|
266
|
+
|
|
267
|
+
async def delete_user_memory(
|
|
268
|
+
self,
|
|
269
|
+
*,
|
|
270
|
+
tenant_id: str,
|
|
271
|
+
memory_id: str,
|
|
272
|
+
sub_tenant_id: str,
|
|
273
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
274
|
+
) -> typing.Optional[typing.Any]:
|
|
275
|
+
"""
|
|
276
|
+
Parameters
|
|
277
|
+
----------
|
|
278
|
+
tenant_id : str
|
|
279
|
+
|
|
280
|
+
memory_id : str
|
|
281
|
+
|
|
282
|
+
sub_tenant_id : str
|
|
283
|
+
|
|
284
|
+
request_options : typing.Optional[RequestOptions]
|
|
285
|
+
Request-specific configuration.
|
|
286
|
+
|
|
287
|
+
Returns
|
|
288
|
+
-------
|
|
289
|
+
typing.Optional[typing.Any]
|
|
290
|
+
Successful Response
|
|
291
|
+
|
|
292
|
+
Examples
|
|
293
|
+
--------
|
|
294
|
+
import asyncio
|
|
295
|
+
|
|
296
|
+
from cortex-ai import AsyncCortexAI
|
|
297
|
+
|
|
298
|
+
client = AsyncCortexAI(token="YOUR_TOKEN", )
|
|
299
|
+
async def main() -> None:
|
|
300
|
+
await client.user_memory.delete_user_memory(tenant_id='tenant_id', memory_id='memory_id', sub_tenant_id='sub_tenant_id', )
|
|
301
|
+
asyncio.run(main())
|
|
302
|
+
"""
|
|
303
|
+
_response = await self._raw_client.delete_user_memory(
|
|
304
|
+
tenant_id=tenant_id, memory_id=memory_id, sub_tenant_id=sub_tenant_id, request_options=request_options
|
|
305
|
+
)
|
|
306
|
+
return _response.data
|
|
307
|
+
|
|
308
|
+
async def retrieve_user_memory(
|
|
309
|
+
self,
|
|
310
|
+
*,
|
|
311
|
+
tenant_id: str,
|
|
312
|
+
query: str,
|
|
313
|
+
sub_tenant_id: str,
|
|
314
|
+
max_count: typing.Optional[int] = None,
|
|
315
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
316
|
+
) -> typing.Optional[typing.Any]:
|
|
317
|
+
"""
|
|
318
|
+
Parameters
|
|
319
|
+
----------
|
|
320
|
+
tenant_id : str
|
|
321
|
+
|
|
322
|
+
query : str
|
|
323
|
+
|
|
324
|
+
sub_tenant_id : str
|
|
325
|
+
|
|
326
|
+
max_count : typing.Optional[int]
|
|
327
|
+
|
|
328
|
+
request_options : typing.Optional[RequestOptions]
|
|
329
|
+
Request-specific configuration.
|
|
330
|
+
|
|
331
|
+
Returns
|
|
332
|
+
-------
|
|
333
|
+
typing.Optional[typing.Any]
|
|
334
|
+
Successful Response
|
|
335
|
+
|
|
336
|
+
Examples
|
|
337
|
+
--------
|
|
338
|
+
import asyncio
|
|
339
|
+
|
|
340
|
+
from cortex-ai import AsyncCortexAI
|
|
341
|
+
|
|
342
|
+
client = AsyncCortexAI(token="YOUR_TOKEN", )
|
|
343
|
+
async def main() -> None:
|
|
344
|
+
await client.user_memory.retrieve_user_memory(tenant_id='tenant_id', query='query', sub_tenant_id='sub_tenant_id', )
|
|
345
|
+
asyncio.run(main())
|
|
346
|
+
"""
|
|
347
|
+
_response = await self._raw_client.retrieve_user_memory(
|
|
348
|
+
tenant_id=tenant_id,
|
|
349
|
+
query=query,
|
|
350
|
+
sub_tenant_id=sub_tenant_id,
|
|
351
|
+
max_count=max_count,
|
|
352
|
+
request_options=request_options,
|
|
353
|
+
)
|
|
354
|
+
return _response.data
|
|
355
|
+
|
|
356
|
+
async def generate_user_memory(
|
|
357
|
+
self,
|
|
358
|
+
*,
|
|
359
|
+
tenant_id: str,
|
|
360
|
+
user_query: str,
|
|
361
|
+
user_name: str,
|
|
362
|
+
sub_tenant_id: str,
|
|
363
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
364
|
+
) -> typing.Optional[typing.Any]:
|
|
365
|
+
"""
|
|
366
|
+
Parameters
|
|
367
|
+
----------
|
|
368
|
+
tenant_id : str
|
|
369
|
+
|
|
370
|
+
user_query : str
|
|
371
|
+
|
|
372
|
+
user_name : str
|
|
373
|
+
|
|
374
|
+
sub_tenant_id : str
|
|
375
|
+
|
|
376
|
+
request_options : typing.Optional[RequestOptions]
|
|
377
|
+
Request-specific configuration.
|
|
378
|
+
|
|
379
|
+
Returns
|
|
380
|
+
-------
|
|
381
|
+
typing.Optional[typing.Any]
|
|
382
|
+
Successful Response
|
|
383
|
+
|
|
384
|
+
Examples
|
|
385
|
+
--------
|
|
386
|
+
import asyncio
|
|
387
|
+
|
|
388
|
+
from cortex-ai import AsyncCortexAI
|
|
389
|
+
|
|
390
|
+
client = AsyncCortexAI(token="YOUR_TOKEN", )
|
|
391
|
+
async def main() -> None:
|
|
392
|
+
await client.user_memory.generate_user_memory(tenant_id='tenant_id', user_query='user_query', user_name='user_name', sub_tenant_id='sub_tenant_id', )
|
|
393
|
+
asyncio.run(main())
|
|
394
|
+
"""
|
|
395
|
+
_response = await self._raw_client.generate_user_memory(
|
|
396
|
+
tenant_id=tenant_id,
|
|
397
|
+
user_query=user_query,
|
|
398
|
+
user_name=user_name,
|
|
399
|
+
sub_tenant_id=sub_tenant_id,
|
|
400
|
+
request_options=request_options,
|
|
401
|
+
)
|
|
402
|
+
return _response.data
|
|
403
|
+
|
|
404
|
+
async def add_user_memory(
|
|
405
|
+
self,
|
|
406
|
+
*,
|
|
407
|
+
tenant_id: str,
|
|
408
|
+
user_memory: str,
|
|
409
|
+
sub_tenant_id: str,
|
|
410
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
411
|
+
) -> typing.Optional[typing.Any]:
|
|
412
|
+
"""
|
|
413
|
+
Parameters
|
|
414
|
+
----------
|
|
415
|
+
tenant_id : str
|
|
416
|
+
|
|
417
|
+
user_memory : str
|
|
418
|
+
|
|
419
|
+
sub_tenant_id : str
|
|
420
|
+
|
|
421
|
+
request_options : typing.Optional[RequestOptions]
|
|
422
|
+
Request-specific configuration.
|
|
423
|
+
|
|
424
|
+
Returns
|
|
425
|
+
-------
|
|
426
|
+
typing.Optional[typing.Any]
|
|
427
|
+
Successful Response
|
|
428
|
+
|
|
429
|
+
Examples
|
|
430
|
+
--------
|
|
431
|
+
import asyncio
|
|
432
|
+
|
|
433
|
+
from cortex-ai import AsyncCortexAI
|
|
434
|
+
|
|
435
|
+
client = AsyncCortexAI(token="YOUR_TOKEN", )
|
|
436
|
+
async def main() -> None:
|
|
437
|
+
await client.user_memory.add_user_memory(tenant_id='tenant_id', user_memory='user_memory', sub_tenant_id='sub_tenant_id', )
|
|
438
|
+
asyncio.run(main())
|
|
439
|
+
"""
|
|
440
|
+
_response = await self._raw_client.add_user_memory(
|
|
441
|
+
tenant_id=tenant_id, user_memory=user_memory, sub_tenant_id=sub_tenant_id, request_options=request_options
|
|
442
|
+
)
|
|
443
|
+
return _response.data
|