usecortex-ai 0.3.4__py3-none-any.whl → 0.3.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. usecortex_ai/__init__.py +24 -2
  2. usecortex_ai/search/client.py +36 -4
  3. usecortex_ai/search/raw_client.py +40 -8
  4. usecortex_ai/sources/client.py +97 -0
  5. usecortex_ai/sources/raw_client.py +273 -0
  6. usecortex_ai/types/__init__.py +24 -2
  7. usecortex_ai/types/add_user_memory_response.py +6 -1
  8. usecortex_ai/types/chunk_graph_relations_response.py +33 -0
  9. usecortex_ai/types/entity.py +42 -0
  10. usecortex_ai/types/extended_context.py +0 -3
  11. usecortex_ai/types/graph_relations_response.py +33 -0
  12. usecortex_ai/types/{generate_user_memory_response.py → path_triplet.py} +13 -7
  13. usecortex_ai/types/relation_evidence.py +53 -0
  14. usecortex_ai/types/retrieve_mode.py +5 -0
  15. usecortex_ai/types/retrieve_response.py +34 -0
  16. usecortex_ai/types/retrieve_user_memory_response.py +6 -0
  17. usecortex_ai/types/scored_path_response.py +40 -0
  18. usecortex_ai/types/scored_triplet_response.py +43 -0
  19. usecortex_ai/types/search_chunk.py +11 -0
  20. usecortex_ai/types/triple_with_evidence.py +31 -0
  21. usecortex_ai/types/user_assistant_pair.py +27 -0
  22. usecortex_ai/types/webpage_scrape_request.py +27 -0
  23. usecortex_ai/upload/client.py +276 -0
  24. usecortex_ai/upload/raw_client.py +1179 -339
  25. usecortex_ai/user_memory/client.py +77 -149
  26. usecortex_ai/user_memory/raw_client.py +74 -329
  27. {usecortex_ai-0.3.4.dist-info → usecortex_ai-0.3.6.dist-info}/METADATA +1 -1
  28. {usecortex_ai-0.3.4.dist-info → usecortex_ai-0.3.6.dist-info}/RECORD +31 -20
  29. {usecortex_ai-0.3.4.dist-info → usecortex_ai-0.3.6.dist-info}/WHEEL +0 -0
  30. {usecortex_ai-0.3.4.dist-info → usecortex_ai-0.3.6.dist-info}/licenses/LICENSE +0 -0
  31. {usecortex_ai-0.3.4.dist-info → usecortex_ai-0.3.6.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,40 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ import pydantic
6
+ from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
7
+ from .path_triplet import PathTriplet
8
+
9
+
10
+ class ScoredPathResponse(UniversalBaseModel):
11
+ """
12
+ A multi-hop path (chain of triplets) with a relevancy score.
13
+
14
+ Represents connected paths like: A --rel1--> B --rel2--> C
15
+ The triplets list preserves the chain order.
16
+ """
17
+
18
+ combined_context: typing.Optional[str] = pydantic.Field(default=None)
19
+ """
20
+ Merged context from all triplets in the path
21
+ """
22
+
23
+ triplets: typing.List[PathTriplet] = pydantic.Field()
24
+ """
25
+ Ordered list of triplets forming the path chain
26
+ """
27
+
28
+ relevancy_score: typing.Optional[float] = pydantic.Field(default=None)
29
+ """
30
+ Relevancy score for the entire path
31
+ """
32
+
33
+ if IS_PYDANTIC_V2:
34
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
35
+ else:
36
+
37
+ class Config:
38
+ frozen = True
39
+ smart_union = True
40
+ extra = pydantic.Extra.allow
@@ -0,0 +1,43 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ import pydantic
6
+ from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
7
+ from .entity import Entity
8
+ from .relation_evidence import RelationEvidence
9
+
10
+
11
+ class ScoredTripletResponse(UniversalBaseModel):
12
+ """
13
+ Individual scored triplet for entity-based search results
14
+ """
15
+
16
+ source: Entity = pydantic.Field()
17
+ """
18
+ Source entity
19
+ """
20
+
21
+ target: Entity = pydantic.Field()
22
+ """
23
+ Target entity
24
+ """
25
+
26
+ relation: RelationEvidence = pydantic.Field()
27
+ """
28
+ Relation between entities
29
+ """
30
+
31
+ relevancy_score: typing.Optional[float] = pydantic.Field(default=None)
32
+ """
33
+ Relevancy score from reranking
34
+ """
35
+
36
+ if IS_PYDANTIC_V2:
37
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
38
+ else:
39
+
40
+ class Config:
41
+ frozen = True
42
+ smart_union = True
43
+ extra = pydantic.Extra.allow
@@ -4,6 +4,7 @@ import typing
4
4
 
5
5
  import pydantic
6
6
  from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
7
+ from .extended_context import ExtendedContext
7
8
 
8
9
 
9
10
  class SearchChunk(UniversalBaseModel):
@@ -62,6 +63,16 @@ class SearchChunk(UniversalBaseModel):
62
63
  Custom metadata associated with your tenant
63
64
  """
64
65
 
66
+ extra_context: typing.Optional[ExtendedContext] = pydantic.Field(default=None)
67
+ """
68
+ Additional context for this chunk
69
+ """
70
+
71
+ graph_triplet_ids: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
72
+ """
73
+ List of group IDs for triplets linked to this chunk. Lookup triplet data in graph_relations.chunk_relations[group_id]
74
+ """
75
+
65
76
  if IS_PYDANTIC_V2:
66
77
  model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
67
78
  else:
@@ -0,0 +1,31 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ import pydantic
6
+ from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
7
+ from .entity import Entity
8
+ from .relation_evidence import RelationEvidence
9
+
10
+
11
+ class TripleWithEvidence(UniversalBaseModel):
12
+ source: Entity
13
+ target: Entity
14
+ relations: typing.List[RelationEvidence] = pydantic.Field()
15
+ """
16
+ Array of relation evidences
17
+ """
18
+
19
+ chunk_id: str = pydantic.Field()
20
+ """
21
+ The chunk_id these relations are associated with
22
+ """
23
+
24
+ if IS_PYDANTIC_V2:
25
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
26
+ else:
27
+
28
+ class Config:
29
+ frozen = True
30
+ smart_union = True
31
+ extra = pydantic.Extra.allow
@@ -0,0 +1,27 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ import pydantic
6
+ from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
7
+
8
+
9
+ class UserAssistantPair(UniversalBaseModel):
10
+ user: str = pydantic.Field()
11
+ """
12
+ User's message in the conversation
13
+ """
14
+
15
+ assistant: str = pydantic.Field()
16
+ """
17
+ Assistant's response to the user message
18
+ """
19
+
20
+ if IS_PYDANTIC_V2:
21
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
22
+ else:
23
+
24
+ class Config:
25
+ frozen = True
26
+ smart_union = True
27
+ extra = pydantic.Extra.allow
@@ -0,0 +1,27 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ import pydantic
6
+ from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
7
+
8
+
9
+ class WebpageScrapeRequest(UniversalBaseModel):
10
+ web_url: str = pydantic.Field()
11
+ """
12
+ The URL of the webpage to scrape and index
13
+ """
14
+
15
+ file_id: typing.Optional[str] = pydantic.Field(default=None)
16
+ """
17
+ Optional custom file ID for the scraped content. If not provided, a unique ID will be generated
18
+ """
19
+
20
+ if IS_PYDANTIC_V2:
21
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
22
+ else:
23
+
24
+ class Config:
25
+ frozen = True
26
+ smart_union = True
27
+ extra = pydantic.Extra.allow
@@ -7,9 +7,11 @@ from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
7
7
  from ..core.request_options import RequestOptions
8
8
  from ..types.app_sources_upload_data import AppSourcesUploadData
9
9
  from ..types.batch_upload_data import BatchUploadData
10
+ from ..types.markdown_upload_request import MarkdownUploadRequest
10
11
  from ..types.processing_status import ProcessingStatus
11
12
  from ..types.single_upload_data import SingleUploadData
12
13
  from ..types.source_model import SourceModel
14
+ from ..types.webpage_scrape_request import WebpageScrapeRequest
13
15
  from .raw_client import AsyncRawUploadClient, RawUploadClient
14
16
 
15
17
  # this is used as the default value for optional parameters
@@ -646,6 +648,92 @@ class UploadClient:
646
648
  )
647
649
  return _response.data
648
650
 
651
+ def batch_upload_markdown(
652
+ self,
653
+ *,
654
+ tenant_id: str,
655
+ request: typing.Sequence[MarkdownUploadRequest],
656
+ sub_tenant_id: typing.Optional[str] = None,
657
+ request_options: typing.Optional[RequestOptions] = None,
658
+ ) -> BatchUploadData:
659
+ """
660
+ Upload multiple markdown/text documents simultaneously for efficient bulk processing.
661
+
662
+ This endpoint allows you to upload several markdown or text contents at once. Each content item gets processed asynchronously, and you can track the progress using their returned file IDs.
663
+
664
+ Parameters
665
+ ----------
666
+ tenant_id : str
667
+ Unique identifier for the tenant/organization
668
+
669
+ request : typing.Sequence[MarkdownUploadRequest]
670
+
671
+ sub_tenant_id : typing.Optional[str]
672
+ Optional sub-tenant identifier used to organize data within a tenant. If omitted, the default sub-tenant created during tenant setup will be used.
673
+
674
+ request_options : typing.Optional[RequestOptions]
675
+ Request-specific configuration.
676
+
677
+ Returns
678
+ -------
679
+ BatchUploadData
680
+ Successful Response
681
+
682
+ Examples
683
+ --------
684
+ from usecortex-ai import CortexAI, MarkdownUploadRequest
685
+
686
+ client = CortexAI(token="YOUR_TOKEN", )
687
+ client.upload.batch_upload_markdown(tenant_id='tenant_1234', sub_tenant_id='sub_tenant_4567', request=[MarkdownUploadRequest(content='<content>', )], )
688
+ """
689
+ _response = self._raw_client.batch_upload_markdown(
690
+ tenant_id=tenant_id, request=request, sub_tenant_id=sub_tenant_id, request_options=request_options
691
+ )
692
+ return _response.data
693
+
694
+ def batch_upload_text(
695
+ self,
696
+ *,
697
+ tenant_id: str,
698
+ request: typing.Sequence[MarkdownUploadRequest],
699
+ sub_tenant_id: typing.Optional[str] = None,
700
+ request_options: typing.Optional[RequestOptions] = None,
701
+ ) -> BatchUploadData:
702
+ """
703
+ Upload multiple markdown/text documents simultaneously for efficient bulk processing.
704
+
705
+ This endpoint allows you to upload several markdown or text contents at once. Each content item gets processed asynchronously, and you can track the progress using their returned file IDs.
706
+
707
+ Parameters
708
+ ----------
709
+ tenant_id : str
710
+ Unique identifier for the tenant/organization
711
+
712
+ request : typing.Sequence[MarkdownUploadRequest]
713
+
714
+ sub_tenant_id : typing.Optional[str]
715
+ Optional sub-tenant identifier used to organize data within a tenant. If omitted, the default sub-tenant created during tenant setup will be used.
716
+
717
+ request_options : typing.Optional[RequestOptions]
718
+ Request-specific configuration.
719
+
720
+ Returns
721
+ -------
722
+ BatchUploadData
723
+ Successful Response
724
+
725
+ Examples
726
+ --------
727
+ from usecortex-ai import CortexAI, MarkdownUploadRequest
728
+
729
+ client = CortexAI(token="YOUR_TOKEN", )
730
+ client.upload.batch_upload_text(tenant_id='tenant_1234', sub_tenant_id='sub_tenant_4567', request=[MarkdownUploadRequest(content='<content>', )], )
731
+ """
732
+ _response = self._raw_client.batch_upload_text(
733
+ tenant_id=tenant_id, request=request, sub_tenant_id=sub_tenant_id, request_options=request_options
734
+ )
735
+ return _response.data
736
+
649
737
  def upload_embeddings(
650
738
  self,
651
739
  *,
@@ -854,6 +942,51 @@ class UploadClient:
854
942
  )
855
943
  return _response.data
856
944
 
945
+ def batch_scrape_webpage(
946
+ self,
947
+ *,
948
+ tenant_id: str,
949
+ request: typing.Sequence[WebpageScrapeRequest],
950
+ sub_tenant_id: typing.Optional[str] = None,
951
+ request_options: typing.Optional[RequestOptions] = None,
952
+ ) -> BatchUploadData:
953
+ """
954
+ Extract and index content from multiple web pages simultaneously.
955
+
956
+ This endpoint initiates web scraping for multiple URLs at once, extracting the main content, text, and structure from each webpage. It's perfect for capturing multiple articles, documentation pages, or any web content you want to include in your knowledge base.
957
+
958
+ The system processes all webpage content asynchronously, cleaning and structuring the information for optimal search and retrieval.
959
+
960
+ Parameters
961
+ ----------
962
+ tenant_id : str
963
+ Unique identifier for the tenant/organization
964
+
965
+ request : typing.Sequence[WebpageScrapeRequest]
966
+
967
+ sub_tenant_id : typing.Optional[str]
968
+ Optional sub-tenant identifier used to organize data within a tenant. If omitted, the default sub-tenant created during tenant setup will be used.
969
+
970
+ request_options : typing.Optional[RequestOptions]
971
+ Request-specific configuration.
972
+
973
+ Returns
974
+ -------
975
+ BatchUploadData
976
+ Successful Response
977
+
978
+ Examples
979
+ --------
980
+ from usecortex-ai import CortexAI, WebpageScrapeRequest
981
+
982
+ client = CortexAI(token="YOUR_TOKEN", )
983
+ client.upload.batch_scrape_webpage(tenant_id='tenant_1234', sub_tenant_id='sub_tenant_4567', request=[WebpageScrapeRequest(web_url='https://www.usecortex.ai/', )], )
984
+ """
985
+ _response = self._raw_client.batch_scrape_webpage(
986
+ tenant_id=tenant_id, request=request, sub_tenant_id=sub_tenant_id, request_options=request_options
987
+ )
988
+ return _response.data
989
+
857
990
  def delete_source(
858
991
  self,
859
992
  *,
@@ -1659,6 +1792,100 @@ class AsyncUploadClient:
1659
1792
  )
1660
1793
  return _response.data
1661
1794
 
1795
+ async def batch_upload_markdown(
1796
+ self,
1797
+ *,
1798
+ tenant_id: str,
1799
+ request: typing.Sequence[MarkdownUploadRequest],
1800
+ sub_tenant_id: typing.Optional[str] = None,
1801
+ request_options: typing.Optional[RequestOptions] = None,
1802
+ ) -> BatchUploadData:
1803
+ """
1804
+ Upload multiple markdown/text documents simultaneously for efficient bulk processing.
1805
+
1806
+ This endpoint allows you to upload several markdown or text contents at once. Each content item gets processed asynchronously, and you can track the progress using their returned file IDs.
1807
+
1808
+ Parameters
1809
+ ----------
1810
+ tenant_id : str
1811
+ Unique identifier for the tenant/organization
1812
+
1813
+ request : typing.Sequence[MarkdownUploadRequest]
1814
+
1815
+ sub_tenant_id : typing.Optional[str]
1816
+ Optional sub-tenant identifier used to organize data within a tenant. If omitted, the default sub-tenant created during tenant setup will be used.
1817
+
1818
+ request_options : typing.Optional[RequestOptions]
1819
+ Request-specific configuration.
1820
+
1821
+ Returns
1822
+ -------
1823
+ BatchUploadData
1824
+ Successful Response
1825
+
1826
+ Examples
1827
+ --------
1828
+ import asyncio
1829
+
1830
+ from usecortex-ai import AsyncCortexAI, MarkdownUploadRequest
1831
+
1832
+ client = AsyncCortexAI(token="YOUR_TOKEN", )
1833
+ async def main() -> None:
1834
+ await client.upload.batch_upload_markdown(tenant_id='tenant_1234', sub_tenant_id='sub_tenant_4567', request=[MarkdownUploadRequest(content='<content>', )], )
1835
+ asyncio.run(main())
1836
+ """
1837
+ _response = await self._raw_client.batch_upload_markdown(
1838
+ tenant_id=tenant_id, request=request, sub_tenant_id=sub_tenant_id, request_options=request_options
1839
+ )
1840
+ return _response.data
1841
+
1842
+ async def batch_upload_text(
1843
+ self,
1844
+ *,
1845
+ tenant_id: str,
1846
+ request: typing.Sequence[MarkdownUploadRequest],
1847
+ sub_tenant_id: typing.Optional[str] = None,
1848
+ request_options: typing.Optional[RequestOptions] = None,
1849
+ ) -> BatchUploadData:
1850
+ """
1851
+ Upload multiple markdown/text documents simultaneously for efficient bulk processing.
1852
+
1853
+ This endpoint allows you to upload several markdown or text contents at once. Each content item gets processed asynchronously, and you can track the progress using their returned file IDs.
1854
+
1855
+ Parameters
1856
+ ----------
1857
+ tenant_id : str
1858
+ Unique identifier for the tenant/organization
1859
+
1860
+ request : typing.Sequence[MarkdownUploadRequest]
1861
+
1862
+ sub_tenant_id : typing.Optional[str]
1863
+ Optional sub-tenant identifier used to organize data within a tenant. If omitted, the default sub-tenant created during tenant setup will be used.
1864
+
1865
+ request_options : typing.Optional[RequestOptions]
1866
+ Request-specific configuration.
1867
+
1868
+ Returns
1869
+ -------
1870
+ BatchUploadData
1871
+ Successful Response
1872
+
1873
+ Examples
1874
+ --------
1875
+ import asyncio
1876
+
1877
+ from usecortex-ai import AsyncCortexAI, MarkdownUploadRequest
1878
+
1879
+ client = AsyncCortexAI(token="YOUR_TOKEN", )
1880
+ async def main() -> None:
1881
+ await client.upload.batch_upload_text(tenant_id='tenant_1234', sub_tenant_id='sub_tenant_4567', request=[MarkdownUploadRequest(content='<content>', )], )
1882
+ asyncio.run(main())
1883
+ """
1884
+ _response = await self._raw_client.batch_upload_text(
1885
+ tenant_id=tenant_id, request=request, sub_tenant_id=sub_tenant_id, request_options=request_options
1886
+ )
1887
+ return _response.data
1888
+
1662
1889
  async def upload_embeddings(
1663
1890
  self,
1664
1891
  *,
@@ -1883,6 +2110,55 @@ class AsyncUploadClient:
1883
2110
  )
1884
2111
  return _response.data
1885
2112
 
2113
+ async def batch_scrape_webpage(
2114
+ self,
2115
+ *,
2116
+ tenant_id: str,
2117
+ request: typing.Sequence[WebpageScrapeRequest],
2118
+ sub_tenant_id: typing.Optional[str] = None,
2119
+ request_options: typing.Optional[RequestOptions] = None,
2120
+ ) -> BatchUploadData:
2121
+ """
2122
+ Extract and index content from multiple web pages simultaneously.
2123
+
2124
+ This endpoint initiates web scraping for multiple URLs at once, extracting the main content, text, and structure from each webpage. It's perfect for capturing multiple articles, documentation pages, or any web content you want to include in your knowledge base.
2125
+
2126
+ The system processes all webpage content asynchronously, cleaning and structuring the information for optimal search and retrieval.
2127
+
2128
+ Parameters
2129
+ ----------
2130
+ tenant_id : str
2131
+ Unique identifier for the tenant/organization
2132
+
2133
+ request : typing.Sequence[WebpageScrapeRequest]
2134
+
2135
+ sub_tenant_id : typing.Optional[str]
2136
+ Optional sub-tenant identifier used to organize data within a tenant. If omitted, the default sub-tenant created during tenant setup will be used.
2137
+
2138
+ request_options : typing.Optional[RequestOptions]
2139
+ Request-specific configuration.
2140
+
2141
+ Returns
2142
+ -------
2143
+ BatchUploadData
2144
+ Successful Response
2145
+
2146
+ Examples
2147
+ --------
2148
+ import asyncio
2149
+
2150
+ from usecortex-ai import AsyncCortexAI, WebpageScrapeRequest
2151
+
2152
+ client = AsyncCortexAI(token="YOUR_TOKEN", )
2153
+ async def main() -> None:
2154
+ await client.upload.batch_scrape_webpage(tenant_id='tenant_1234', sub_tenant_id='sub_tenant_4567', request=[WebpageScrapeRequest(web_url='https://www.usecortex.ai/', )], )
2155
+ asyncio.run(main())
2156
+ """
2157
+ _response = await self._raw_client.batch_scrape_webpage(
2158
+ tenant_id=tenant_id, request=request, sub_tenant_id=sub_tenant_id, request_options=request_options
2159
+ )
2160
+ return _response.data
2161
+
1886
2162
  async def delete_source(
1887
2163
  self,
1888
2164
  *,