usecortex-ai 0.3.3__py3-none-any.whl → 0.3.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- usecortex_ai/__init__.py +16 -2
- usecortex_ai/search/client.py +36 -4
- usecortex_ai/search/raw_client.py +40 -8
- usecortex_ai/sources/client.py +97 -0
- usecortex_ai/sources/raw_client.py +273 -0
- usecortex_ai/types/__init__.py +16 -2
- usecortex_ai/types/add_user_memory_response.py +6 -1
- usecortex_ai/types/entity.py +42 -0
- usecortex_ai/types/extended_context.py +5 -2
- usecortex_ai/types/graph_relations_response.py +33 -0
- usecortex_ai/types/processing_status.py +6 -1
- usecortex_ai/types/relation_evidence.py +52 -0
- usecortex_ai/types/retrieve_mode.py +5 -0
- usecortex_ai/types/retrieve_response.py +34 -0
- usecortex_ai/types/retrieve_user_memory_response.py +6 -0
- usecortex_ai/types/search_chunk.py +6 -0
- usecortex_ai/types/{generate_user_memory_response.py → triple_with_evidence.py} +10 -7
- usecortex_ai/types/user_assistant_pair.py +27 -0
- usecortex_ai/types/webpage_scrape_request.py +27 -0
- usecortex_ai/upload/client.py +276 -0
- usecortex_ai/upload/raw_client.py +1179 -339
- usecortex_ai/user_memory/client.py +77 -149
- usecortex_ai/user_memory/raw_client.py +74 -329
- {usecortex_ai-0.3.3.dist-info → usecortex_ai-0.3.5.dist-info}/METADATA +1 -1
- {usecortex_ai-0.3.3.dist-info → usecortex_ai-0.3.5.dist-info}/RECORD +28 -21
- {usecortex_ai-0.3.3.dist-info → usecortex_ai-0.3.5.dist-info}/WHEEL +0 -0
- {usecortex_ai-0.3.3.dist-info → usecortex_ai-0.3.5.dist-info}/licenses/LICENSE +0 -0
- {usecortex_ai-0.3.3.dist-info → usecortex_ai-0.3.5.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import typing
|
|
4
|
+
|
|
5
|
+
import pydantic
|
|
6
|
+
from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class UserAssistantPair(UniversalBaseModel):
|
|
10
|
+
user: str = pydantic.Field()
|
|
11
|
+
"""
|
|
12
|
+
User's message in the conversation
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
assistant: str = pydantic.Field()
|
|
16
|
+
"""
|
|
17
|
+
Assistant's response to the user message
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
if IS_PYDANTIC_V2:
|
|
21
|
+
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
|
|
22
|
+
else:
|
|
23
|
+
|
|
24
|
+
class Config:
|
|
25
|
+
frozen = True
|
|
26
|
+
smart_union = True
|
|
27
|
+
extra = pydantic.Extra.allow
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import typing
|
|
4
|
+
|
|
5
|
+
import pydantic
|
|
6
|
+
from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class WebpageScrapeRequest(UniversalBaseModel):
|
|
10
|
+
web_url: str = pydantic.Field()
|
|
11
|
+
"""
|
|
12
|
+
The URL of the webpage to scrape and index
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
file_id: typing.Optional[str] = pydantic.Field(default=None)
|
|
16
|
+
"""
|
|
17
|
+
Optional custom file ID for the scraped content. If not provided, a unique ID will be generated
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
if IS_PYDANTIC_V2:
|
|
21
|
+
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
|
|
22
|
+
else:
|
|
23
|
+
|
|
24
|
+
class Config:
|
|
25
|
+
frozen = True
|
|
26
|
+
smart_union = True
|
|
27
|
+
extra = pydantic.Extra.allow
|
usecortex_ai/upload/client.py
CHANGED
|
@@ -7,9 +7,11 @@ from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
|
|
|
7
7
|
from ..core.request_options import RequestOptions
|
|
8
8
|
from ..types.app_sources_upload_data import AppSourcesUploadData
|
|
9
9
|
from ..types.batch_upload_data import BatchUploadData
|
|
10
|
+
from ..types.markdown_upload_request import MarkdownUploadRequest
|
|
10
11
|
from ..types.processing_status import ProcessingStatus
|
|
11
12
|
from ..types.single_upload_data import SingleUploadData
|
|
12
13
|
from ..types.source_model import SourceModel
|
|
14
|
+
from ..types.webpage_scrape_request import WebpageScrapeRequest
|
|
13
15
|
from .raw_client import AsyncRawUploadClient, RawUploadClient
|
|
14
16
|
|
|
15
17
|
# this is used as the default value for optional parameters
|
|
@@ -646,6 +648,92 @@ class UploadClient:
|
|
|
646
648
|
)
|
|
647
649
|
return _response.data
|
|
648
650
|
|
|
651
|
+
def batch_upload_markdown(
|
|
652
|
+
self,
|
|
653
|
+
*,
|
|
654
|
+
tenant_id: str,
|
|
655
|
+
request: typing.Sequence[MarkdownUploadRequest],
|
|
656
|
+
sub_tenant_id: typing.Optional[str] = None,
|
|
657
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
658
|
+
) -> BatchUploadData:
|
|
659
|
+
"""
|
|
660
|
+
Upload multiple markdown/text documents simultaneously for efficient bulk processing.
|
|
661
|
+
|
|
662
|
+
This endpoint allows you to upload several markdown or text contents at once. Each content item gets processed asynchronously, and you can track the progress using their returned file IDs.
|
|
663
|
+
|
|
664
|
+
Parameters
|
|
665
|
+
----------
|
|
666
|
+
tenant_id : str
|
|
667
|
+
Unique identifier for the tenant/organization
|
|
668
|
+
|
|
669
|
+
request : typing.Sequence[MarkdownUploadRequest]
|
|
670
|
+
|
|
671
|
+
sub_tenant_id : typing.Optional[str]
|
|
672
|
+
Optional sub-tenant identifier used to organize data within a tenant. If omitted, the default sub-tenant created during tenant setup will be used.
|
|
673
|
+
|
|
674
|
+
request_options : typing.Optional[RequestOptions]
|
|
675
|
+
Request-specific configuration.
|
|
676
|
+
|
|
677
|
+
Returns
|
|
678
|
+
-------
|
|
679
|
+
BatchUploadData
|
|
680
|
+
Successful Response
|
|
681
|
+
|
|
682
|
+
Examples
|
|
683
|
+
--------
|
|
684
|
+
from usecortex-ai import CortexAI, MarkdownUploadRequest
|
|
685
|
+
|
|
686
|
+
client = CortexAI(token="YOUR_TOKEN", )
|
|
687
|
+
client.upload.batch_upload_markdown(tenant_id='tenant_1234', sub_tenant_id='sub_tenant_4567', request=[MarkdownUploadRequest(content='<content>', )], )
|
|
688
|
+
"""
|
|
689
|
+
_response = self._raw_client.batch_upload_markdown(
|
|
690
|
+
tenant_id=tenant_id, request=request, sub_tenant_id=sub_tenant_id, request_options=request_options
|
|
691
|
+
)
|
|
692
|
+
return _response.data
|
|
693
|
+
|
|
694
|
+
def batch_upload_text(
|
|
695
|
+
self,
|
|
696
|
+
*,
|
|
697
|
+
tenant_id: str,
|
|
698
|
+
request: typing.Sequence[MarkdownUploadRequest],
|
|
699
|
+
sub_tenant_id: typing.Optional[str] = None,
|
|
700
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
701
|
+
) -> BatchUploadData:
|
|
702
|
+
"""
|
|
703
|
+
Upload multiple markdown/text documents simultaneously for efficient bulk processing.
|
|
704
|
+
|
|
705
|
+
This endpoint allows you to upload several markdown or text contents at once. Each content item gets processed asynchronously, and you can track the progress using their returned file IDs.
|
|
706
|
+
|
|
707
|
+
Parameters
|
|
708
|
+
----------
|
|
709
|
+
tenant_id : str
|
|
710
|
+
Unique identifier for the tenant/organization
|
|
711
|
+
|
|
712
|
+
request : typing.Sequence[MarkdownUploadRequest]
|
|
713
|
+
|
|
714
|
+
sub_tenant_id : typing.Optional[str]
|
|
715
|
+
Optional sub-tenant identifier used to organize data within a tenant. If omitted, the default sub-tenant created during tenant setup will be used.
|
|
716
|
+
|
|
717
|
+
request_options : typing.Optional[RequestOptions]
|
|
718
|
+
Request-specific configuration.
|
|
719
|
+
|
|
720
|
+
Returns
|
|
721
|
+
-------
|
|
722
|
+
BatchUploadData
|
|
723
|
+
Successful Response
|
|
724
|
+
|
|
725
|
+
Examples
|
|
726
|
+
--------
|
|
727
|
+
from usecortex-ai import CortexAI, MarkdownUploadRequest
|
|
728
|
+
|
|
729
|
+
client = CortexAI(token="YOUR_TOKEN", )
|
|
730
|
+
client.upload.batch_upload_text(tenant_id='tenant_1234', sub_tenant_id='sub_tenant_4567', request=[MarkdownUploadRequest(content='<content>', )], )
|
|
731
|
+
"""
|
|
732
|
+
_response = self._raw_client.batch_upload_text(
|
|
733
|
+
tenant_id=tenant_id, request=request, sub_tenant_id=sub_tenant_id, request_options=request_options
|
|
734
|
+
)
|
|
735
|
+
return _response.data
|
|
736
|
+
|
|
649
737
|
def upload_embeddings(
|
|
650
738
|
self,
|
|
651
739
|
*,
|
|
@@ -854,6 +942,51 @@ class UploadClient:
|
|
|
854
942
|
)
|
|
855
943
|
return _response.data
|
|
856
944
|
|
|
945
|
+
def batch_scrape_webpage(
|
|
946
|
+
self,
|
|
947
|
+
*,
|
|
948
|
+
tenant_id: str,
|
|
949
|
+
request: typing.Sequence[WebpageScrapeRequest],
|
|
950
|
+
sub_tenant_id: typing.Optional[str] = None,
|
|
951
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
952
|
+
) -> BatchUploadData:
|
|
953
|
+
"""
|
|
954
|
+
Extract and index content from multiple web pages simultaneously.
|
|
955
|
+
|
|
956
|
+
This endpoint initiates web scraping for multiple URLs at once, extracting the main content, text, and structure from each webpage. It's perfect for capturing multiple articles, documentation pages, or any web content you want to include in your knowledge base.
|
|
957
|
+
|
|
958
|
+
The system processes all webpage content asynchronously, cleaning and structuring the information for optimal search and retrieval.
|
|
959
|
+
|
|
960
|
+
Parameters
|
|
961
|
+
----------
|
|
962
|
+
tenant_id : str
|
|
963
|
+
Unique identifier for the tenant/organization
|
|
964
|
+
|
|
965
|
+
request : typing.Sequence[WebpageScrapeRequest]
|
|
966
|
+
|
|
967
|
+
sub_tenant_id : typing.Optional[str]
|
|
968
|
+
Optional sub-tenant identifier used to organize data within a tenant. If omitted, the default sub-tenant created during tenant setup will be used.
|
|
969
|
+
|
|
970
|
+
request_options : typing.Optional[RequestOptions]
|
|
971
|
+
Request-specific configuration.
|
|
972
|
+
|
|
973
|
+
Returns
|
|
974
|
+
-------
|
|
975
|
+
BatchUploadData
|
|
976
|
+
Successful Response
|
|
977
|
+
|
|
978
|
+
Examples
|
|
979
|
+
--------
|
|
980
|
+
from usecortex-ai import CortexAI, WebpageScrapeRequest
|
|
981
|
+
|
|
982
|
+
client = CortexAI(token="YOUR_TOKEN", )
|
|
983
|
+
client.upload.batch_scrape_webpage(tenant_id='tenant_1234', sub_tenant_id='sub_tenant_4567', request=[WebpageScrapeRequest(web_url='https://www.usecortex.ai/', )], )
|
|
984
|
+
"""
|
|
985
|
+
_response = self._raw_client.batch_scrape_webpage(
|
|
986
|
+
tenant_id=tenant_id, request=request, sub_tenant_id=sub_tenant_id, request_options=request_options
|
|
987
|
+
)
|
|
988
|
+
return _response.data
|
|
989
|
+
|
|
857
990
|
def delete_source(
|
|
858
991
|
self,
|
|
859
992
|
*,
|
|
@@ -1659,6 +1792,100 @@ class AsyncUploadClient:
|
|
|
1659
1792
|
)
|
|
1660
1793
|
return _response.data
|
|
1661
1794
|
|
|
1795
|
+
async def batch_upload_markdown(
|
|
1796
|
+
self,
|
|
1797
|
+
*,
|
|
1798
|
+
tenant_id: str,
|
|
1799
|
+
request: typing.Sequence[MarkdownUploadRequest],
|
|
1800
|
+
sub_tenant_id: typing.Optional[str] = None,
|
|
1801
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
1802
|
+
) -> BatchUploadData:
|
|
1803
|
+
"""
|
|
1804
|
+
Upload multiple markdown/text documents simultaneously for efficient bulk processing.
|
|
1805
|
+
|
|
1806
|
+
This endpoint allows you to upload several markdown or text contents at once. Each content item gets processed asynchronously, and you can track the progress using their returned file IDs.
|
|
1807
|
+
|
|
1808
|
+
Parameters
|
|
1809
|
+
----------
|
|
1810
|
+
tenant_id : str
|
|
1811
|
+
Unique identifier for the tenant/organization
|
|
1812
|
+
|
|
1813
|
+
request : typing.Sequence[MarkdownUploadRequest]
|
|
1814
|
+
|
|
1815
|
+
sub_tenant_id : typing.Optional[str]
|
|
1816
|
+
Optional sub-tenant identifier used to organize data within a tenant. If omitted, the default sub-tenant created during tenant setup will be used.
|
|
1817
|
+
|
|
1818
|
+
request_options : typing.Optional[RequestOptions]
|
|
1819
|
+
Request-specific configuration.
|
|
1820
|
+
|
|
1821
|
+
Returns
|
|
1822
|
+
-------
|
|
1823
|
+
BatchUploadData
|
|
1824
|
+
Successful Response
|
|
1825
|
+
|
|
1826
|
+
Examples
|
|
1827
|
+
--------
|
|
1828
|
+
import asyncio
|
|
1829
|
+
|
|
1830
|
+
from usecortex-ai import AsyncCortexAI, MarkdownUploadRequest
|
|
1831
|
+
|
|
1832
|
+
client = AsyncCortexAI(token="YOUR_TOKEN", )
|
|
1833
|
+
async def main() -> None:
|
|
1834
|
+
await client.upload.batch_upload_markdown(tenant_id='tenant_1234', sub_tenant_id='sub_tenant_4567', request=[MarkdownUploadRequest(content='<content>', )], )
|
|
1835
|
+
asyncio.run(main())
|
|
1836
|
+
"""
|
|
1837
|
+
_response = await self._raw_client.batch_upload_markdown(
|
|
1838
|
+
tenant_id=tenant_id, request=request, sub_tenant_id=sub_tenant_id, request_options=request_options
|
|
1839
|
+
)
|
|
1840
|
+
return _response.data
|
|
1841
|
+
|
|
1842
|
+
async def batch_upload_text(
|
|
1843
|
+
self,
|
|
1844
|
+
*,
|
|
1845
|
+
tenant_id: str,
|
|
1846
|
+
request: typing.Sequence[MarkdownUploadRequest],
|
|
1847
|
+
sub_tenant_id: typing.Optional[str] = None,
|
|
1848
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
1849
|
+
) -> BatchUploadData:
|
|
1850
|
+
"""
|
|
1851
|
+
Upload multiple markdown/text documents simultaneously for efficient bulk processing.
|
|
1852
|
+
|
|
1853
|
+
This endpoint allows you to upload several markdown or text contents at once. Each content item gets processed asynchronously, and you can track the progress using their returned file IDs.
|
|
1854
|
+
|
|
1855
|
+
Parameters
|
|
1856
|
+
----------
|
|
1857
|
+
tenant_id : str
|
|
1858
|
+
Unique identifier for the tenant/organization
|
|
1859
|
+
|
|
1860
|
+
request : typing.Sequence[MarkdownUploadRequest]
|
|
1861
|
+
|
|
1862
|
+
sub_tenant_id : typing.Optional[str]
|
|
1863
|
+
Optional sub-tenant identifier used to organize data within a tenant. If omitted, the default sub-tenant created during tenant setup will be used.
|
|
1864
|
+
|
|
1865
|
+
request_options : typing.Optional[RequestOptions]
|
|
1866
|
+
Request-specific configuration.
|
|
1867
|
+
|
|
1868
|
+
Returns
|
|
1869
|
+
-------
|
|
1870
|
+
BatchUploadData
|
|
1871
|
+
Successful Response
|
|
1872
|
+
|
|
1873
|
+
Examples
|
|
1874
|
+
--------
|
|
1875
|
+
import asyncio
|
|
1876
|
+
|
|
1877
|
+
from usecortex-ai import AsyncCortexAI, MarkdownUploadRequest
|
|
1878
|
+
|
|
1879
|
+
client = AsyncCortexAI(token="YOUR_TOKEN", )
|
|
1880
|
+
async def main() -> None:
|
|
1881
|
+
await client.upload.batch_upload_text(tenant_id='tenant_1234', sub_tenant_id='sub_tenant_4567', request=[MarkdownUploadRequest(content='<content>', )], )
|
|
1882
|
+
asyncio.run(main())
|
|
1883
|
+
"""
|
|
1884
|
+
_response = await self._raw_client.batch_upload_text(
|
|
1885
|
+
tenant_id=tenant_id, request=request, sub_tenant_id=sub_tenant_id, request_options=request_options
|
|
1886
|
+
)
|
|
1887
|
+
return _response.data
|
|
1888
|
+
|
|
1662
1889
|
async def upload_embeddings(
|
|
1663
1890
|
self,
|
|
1664
1891
|
*,
|
|
@@ -1883,6 +2110,55 @@ class AsyncUploadClient:
|
|
|
1883
2110
|
)
|
|
1884
2111
|
return _response.data
|
|
1885
2112
|
|
|
2113
|
+
async def batch_scrape_webpage(
|
|
2114
|
+
self,
|
|
2115
|
+
*,
|
|
2116
|
+
tenant_id: str,
|
|
2117
|
+
request: typing.Sequence[WebpageScrapeRequest],
|
|
2118
|
+
sub_tenant_id: typing.Optional[str] = None,
|
|
2119
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
2120
|
+
) -> BatchUploadData:
|
|
2121
|
+
"""
|
|
2122
|
+
Extract and index content from multiple web pages simultaneously.
|
|
2123
|
+
|
|
2124
|
+
This endpoint initiates web scraping for multiple URLs at once, extracting the main content, text, and structure from each webpage. It's perfect for capturing multiple articles, documentation pages, or any web content you want to include in your knowledge base.
|
|
2125
|
+
|
|
2126
|
+
The system processes all webpage content asynchronously, cleaning and structuring the information for optimal search and retrieval.
|
|
2127
|
+
|
|
2128
|
+
Parameters
|
|
2129
|
+
----------
|
|
2130
|
+
tenant_id : str
|
|
2131
|
+
Unique identifier for the tenant/organization
|
|
2132
|
+
|
|
2133
|
+
request : typing.Sequence[WebpageScrapeRequest]
|
|
2134
|
+
|
|
2135
|
+
sub_tenant_id : typing.Optional[str]
|
|
2136
|
+
Optional sub-tenant identifier used to organize data within a tenant. If omitted, the default sub-tenant created during tenant setup will be used.
|
|
2137
|
+
|
|
2138
|
+
request_options : typing.Optional[RequestOptions]
|
|
2139
|
+
Request-specific configuration.
|
|
2140
|
+
|
|
2141
|
+
Returns
|
|
2142
|
+
-------
|
|
2143
|
+
BatchUploadData
|
|
2144
|
+
Successful Response
|
|
2145
|
+
|
|
2146
|
+
Examples
|
|
2147
|
+
--------
|
|
2148
|
+
import asyncio
|
|
2149
|
+
|
|
2150
|
+
from usecortex-ai import AsyncCortexAI, WebpageScrapeRequest
|
|
2151
|
+
|
|
2152
|
+
client = AsyncCortexAI(token="YOUR_TOKEN", )
|
|
2153
|
+
async def main() -> None:
|
|
2154
|
+
await client.upload.batch_scrape_webpage(tenant_id='tenant_1234', sub_tenant_id='sub_tenant_4567', request=[WebpageScrapeRequest(web_url='https://www.usecortex.ai/', )], )
|
|
2155
|
+
asyncio.run(main())
|
|
2156
|
+
"""
|
|
2157
|
+
_response = await self._raw_client.batch_scrape_webpage(
|
|
2158
|
+
tenant_id=tenant_id, request=request, sub_tenant_id=sub_tenant_id, request_options=request_options
|
|
2159
|
+
)
|
|
2160
|
+
return _response.data
|
|
2161
|
+
|
|
1886
2162
|
async def delete_source(
|
|
1887
2163
|
self,
|
|
1888
2164
|
*,
|