usecortex-ai 0.1.0__tar.gz → 0.1.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (97) hide show
  1. {usecortex_ai-0.1.0/src/usecortex_ai.egg-info → usecortex_ai-0.1.1}/PKG-INFO +1 -1
  2. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/pyproject.toml +1 -1
  3. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/__init__.py +6 -4
  4. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/client.py +4 -4
  5. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/embeddings/client.py +8 -8
  6. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/fetch/client.py +2 -2
  7. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/search/client.py +15 -16
  8. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/search/raw_client.py +17 -18
  9. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/sources/client.py +4 -4
  10. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/tenant/client.py +2 -2
  11. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/types/__init__.py +6 -4
  12. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/types/app_sources_upload_data.py +1 -1
  13. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/types/batch_upload_data.py +1 -1
  14. usecortex_ai-0.1.1/src/cortex_ai/types/body_scrape_webpage_upload_scrape_webpage_post.py +19 -0
  15. usecortex_ai-0.1.1/src/cortex_ai/types/body_update_scrape_job_upload_update_webpage_patch.py +19 -0
  16. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/types/embeddings_create_collection_data.py +1 -1
  17. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/types/embeddings_delete_data.py +1 -1
  18. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/types/embeddings_get_data.py +1 -1
  19. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/types/embeddings_search_data.py +1 -1
  20. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/types/error_response.py +1 -1
  21. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/types/extended_context.py +1 -1
  22. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/types/fetch_content_data.py +1 -1
  23. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/types/list_sources_response.py +1 -1
  24. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/types/processing_status.py +1 -1
  25. usecortex_ai-0.1.1/src/cortex_ai/types/relations.py +27 -0
  26. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/types/single_upload_data.py +1 -1
  27. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/types/tenant_create_data.py +1 -1
  28. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/types/tenant_stats.py +1 -1
  29. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/upload/client.py +138 -120
  30. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/upload/raw_client.py +154 -80
  31. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/user/client.py +2 -2
  32. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/user_memory/client.py +10 -10
  33. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1/src/usecortex_ai.egg-info}/PKG-INFO +1 -1
  34. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/usecortex_ai.egg-info/SOURCES.txt +3 -2
  35. usecortex_ai-0.1.0/src/cortex_ai/types/full_text_search_data.py +0 -22
  36. usecortex_ai-0.1.0/src/cortex_ai/types/search_data.py +0 -22
  37. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/LICENSE +0 -0
  38. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/README.md +0 -0
  39. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/setup.cfg +0 -0
  40. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/core/__init__.py +0 -0
  41. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/core/api_error.py +0 -0
  42. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/core/client_wrapper.py +0 -0
  43. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/core/datetime_utils.py +0 -0
  44. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/core/file.py +0 -0
  45. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/core/force_multipart.py +0 -0
  46. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/core/http_client.py +0 -0
  47. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/core/http_response.py +0 -0
  48. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/core/jsonable_encoder.py +0 -0
  49. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/core/pydantic_utilities.py +0 -0
  50. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/core/query_encoder.py +0 -0
  51. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/core/remove_none_from_dict.py +0 -0
  52. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/core/request_options.py +0 -0
  53. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/core/serialization.py +0 -0
  54. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/embeddings/__init__.py +0 -0
  55. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/embeddings/raw_client.py +0 -0
  56. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/environment.py +0 -0
  57. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/errors/__init__.py +0 -0
  58. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/errors/bad_request_error.py +0 -0
  59. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/errors/forbidden_error.py +0 -0
  60. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/errors/internal_server_error.py +0 -0
  61. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/errors/not_found_error.py +0 -0
  62. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/errors/service_unavailable_error.py +0 -0
  63. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/errors/unauthorized_error.py +0 -0
  64. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/errors/unprocessable_entity_error.py +0 -0
  65. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/fetch/__init__.py +0 -0
  66. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/fetch/raw_client.py +0 -0
  67. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/raw_client.py +0 -0
  68. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/search/__init__.py +0 -0
  69. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/search/types/__init__.py +0 -0
  70. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/search/types/alpha.py +0 -0
  71. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/sources/__init__.py +0 -0
  72. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/sources/raw_client.py +0 -0
  73. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/tenant/__init__.py +0 -0
  74. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/tenant/raw_client.py +0 -0
  75. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/types/actual_error_response.py +0 -0
  76. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/types/attachment_model.py +0 -0
  77. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/types/bm_25_operator_type.py +0 -0
  78. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/types/content_model.py +0 -0
  79. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/types/delete_memory_request.py +0 -0
  80. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/types/file_upload_result.py +0 -0
  81. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/types/http_validation_error.py +0 -0
  82. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/types/markdown_upload_request.py +0 -0
  83. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/types/related_chunk.py +0 -0
  84. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/types/search_chunk.py +0 -0
  85. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/types/source.py +0 -0
  86. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/types/source_content.py +0 -0
  87. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/types/source_model.py +0 -0
  88. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/types/validation_error.py +0 -0
  89. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/types/validation_error_loc_item.py +0 -0
  90. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/upload/__init__.py +0 -0
  91. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/user/__init__.py +0 -0
  92. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/user/raw_client.py +0 -0
  93. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/user_memory/__init__.py +0 -0
  94. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/cortex_ai/user_memory/raw_client.py +0 -0
  95. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/usecortex_ai.egg-info/dependency_links.txt +0 -0
  96. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/usecortex_ai.egg-info/requires.txt +0 -0
  97. {usecortex_ai-0.1.0 → usecortex_ai-0.1.1}/src/usecortex_ai.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: usecortex-ai
3
- Version: 0.1.0
3
+ Version: 0.1.1
4
4
  Summary: The official Python SDK for the Cortex AI platform.
5
5
  Author-email: Nishkarsh Shrivastava <nishkarsh@usecortex.ai>
6
6
  License: Copyright (c) 2024 Cortex AI
@@ -9,7 +9,7 @@ build-backend = "setuptools.build_meta"
9
9
  name = "usecortex-ai"
10
10
 
11
11
  # Start with an initial version
12
- version = "0.1.0"
12
+ version = "0.1.1"
13
13
 
14
14
  authors = [
15
15
  { name = "Nishkarsh Shrivastava", email = "nishkarsh@usecortex.ai" },
@@ -8,6 +8,8 @@ from .types import (
8
8
  AttachmentModel,
9
9
  BatchUploadData,
10
10
  Bm25OperatorType,
11
+ BodyScrapeWebpageUploadScrapeWebpagePost,
12
+ BodyUpdateScrapeJobUploadUpdateWebpagePatch,
11
13
  ContentModel,
12
14
  DeleteMemoryRequest,
13
15
  EmbeddingsCreateCollectionData,
@@ -18,14 +20,13 @@ from .types import (
18
20
  ExtendedContext,
19
21
  FetchContentData,
20
22
  FileUploadResult,
21
- FullTextSearchData,
22
23
  HttpValidationError,
23
24
  ListSourcesResponse,
24
25
  MarkdownUploadRequest,
25
26
  ProcessingStatus,
26
27
  RelatedChunk,
28
+ Relations,
27
29
  SearchChunk,
28
- SearchData,
29
30
  SingleUploadData,
30
31
  Source,
31
32
  SourceContent,
@@ -58,6 +59,8 @@ __all__ = [
58
59
  "BadRequestError",
59
60
  "BatchUploadData",
60
61
  "Bm25OperatorType",
62
+ "BodyScrapeWebpageUploadScrapeWebpagePost",
63
+ "BodyUpdateScrapeJobUploadUpdateWebpagePatch",
61
64
  "ContentModel",
62
65
  "CortexAI",
63
66
  "CortexAIEnvironment",
@@ -71,7 +74,6 @@ __all__ = [
71
74
  "FetchContentData",
72
75
  "FileUploadResult",
73
76
  "ForbiddenError",
74
- "FullTextSearchData",
75
77
  "HttpValidationError",
76
78
  "InternalServerError",
77
79
  "ListSourcesResponse",
@@ -79,8 +81,8 @@ __all__ = [
79
81
  "NotFoundError",
80
82
  "ProcessingStatus",
81
83
  "RelatedChunk",
84
+ "Relations",
82
85
  "SearchChunk",
83
- "SearchData",
84
86
  "ServiceUnavailableError",
85
87
  "SingleUploadData",
86
88
  "Source",
@@ -50,7 +50,7 @@ class CortexAI:
50
50
 
51
51
  Examples
52
52
  --------
53
- from cortex-ai import CortexAI
53
+ from usecortex-ai import CortexAI
54
54
 
55
55
  client = CortexAI(token="YOUR_TOKEN", )
56
56
  """
@@ -115,7 +115,7 @@ class CortexAI:
115
115
 
116
116
  Examples
117
117
  --------
118
- from cortex-ai import CortexAI
118
+ from usecortex-ai import CortexAI
119
119
 
120
120
  client = CortexAI(token="YOUR_TOKEN", )
121
121
  client.root_get()
@@ -157,7 +157,7 @@ class AsyncCortexAI:
157
157
 
158
158
  Examples
159
159
  --------
160
- from cortex-ai import AsyncCortexAI
160
+ from usecortex-ai import AsyncCortexAI
161
161
 
162
162
  client = AsyncCortexAI(token="YOUR_TOKEN", )
163
163
  """
@@ -224,7 +224,7 @@ class AsyncCortexAI:
224
224
  --------
225
225
  import asyncio
226
226
 
227
- from cortex-ai import AsyncCortexAI
227
+ from usecortex-ai import AsyncCortexAI
228
228
 
229
229
  client = AsyncCortexAI(token="YOUR_TOKEN", )
230
230
  async def main() -> None:
@@ -71,7 +71,7 @@ class EmbeddingsClient:
71
71
 
72
72
  Examples
73
73
  --------
74
- from cortex-ai import CortexAI
74
+ from usecortex-ai import CortexAI
75
75
 
76
76
  client = CortexAI(token="YOUR_TOKEN", )
77
77
  client.embeddings.delete(chunk_ids=['chunk_ids'], tenant_id='tenant_id', )
@@ -128,7 +128,7 @@ class EmbeddingsClient:
128
128
 
129
129
  Examples
130
130
  --------
131
- from cortex-ai import CortexAI
131
+ from usecortex-ai import CortexAI
132
132
 
133
133
  client = CortexAI(token="YOUR_TOKEN", )
134
134
  client.embeddings.search(embeddings=[1.1], tenant_id='tenant_id', )
@@ -176,7 +176,7 @@ class EmbeddingsClient:
176
176
 
177
177
  Examples
178
178
  --------
179
- from cortex-ai import CortexAI
179
+ from usecortex-ai import CortexAI
180
180
 
181
181
  client = CortexAI(token="YOUR_TOKEN", )
182
182
  client.embeddings.get_by_chunk_ids(chunk_ids=['chunk_ids'], tenant_id='tenant_id', )
@@ -211,7 +211,7 @@ class EmbeddingsClient:
211
211
 
212
212
  Examples
213
213
  --------
214
- from cortex-ai import CortexAI
214
+ from usecortex-ai import CortexAI
215
215
 
216
216
  client = CortexAI(token="YOUR_TOKEN", )
217
217
  client.embeddings.create_collection(tenant_id='tenant_id', )
@@ -279,7 +279,7 @@ class AsyncEmbeddingsClient:
279
279
  --------
280
280
  import asyncio
281
281
 
282
- from cortex-ai import AsyncCortexAI
282
+ from usecortex-ai import AsyncCortexAI
283
283
 
284
284
  client = AsyncCortexAI(token="YOUR_TOKEN", )
285
285
  async def main() -> None:
@@ -340,7 +340,7 @@ class AsyncEmbeddingsClient:
340
340
  --------
341
341
  import asyncio
342
342
 
343
- from cortex-ai import AsyncCortexAI
343
+ from usecortex-ai import AsyncCortexAI
344
344
 
345
345
  client = AsyncCortexAI(token="YOUR_TOKEN", )
346
346
  async def main() -> None:
@@ -392,7 +392,7 @@ class AsyncEmbeddingsClient:
392
392
  --------
393
393
  import asyncio
394
394
 
395
- from cortex-ai import AsyncCortexAI
395
+ from usecortex-ai import AsyncCortexAI
396
396
 
397
397
  client = AsyncCortexAI(token="YOUR_TOKEN", )
398
398
  async def main() -> None:
@@ -431,7 +431,7 @@ class AsyncEmbeddingsClient:
431
431
  --------
432
432
  import asyncio
433
433
 
434
- from cortex-ai import AsyncCortexAI
434
+ from usecortex-ai import AsyncCortexAI
435
435
 
436
436
  client = AsyncCortexAI(token="YOUR_TOKEN", )
437
437
  async def main() -> None:
@@ -59,7 +59,7 @@ class FetchClient:
59
59
 
60
60
  Examples
61
61
  --------
62
- from cortex-ai import CortexAI
62
+ from usecortex-ai import CortexAI
63
63
 
64
64
  client = CortexAI(token="YOUR_TOKEN", )
65
65
  client.fetch.fetch_content(file_id='file_id', file_type='file_type', tenant_id='tenant_id', )
@@ -125,7 +125,7 @@ class AsyncFetchClient:
125
125
  --------
126
126
  import asyncio
127
127
 
128
- from cortex-ai import AsyncCortexAI
128
+ from usecortex-ai import AsyncCortexAI
129
129
 
130
130
  client = AsyncCortexAI(token="YOUR_TOKEN", )
131
131
  async def main() -> None:
@@ -5,8 +5,7 @@ import typing
5
5
  from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
6
6
  from ..core.request_options import RequestOptions
7
7
  from ..types.bm_25_operator_type import Bm25OperatorType
8
- from ..types.full_text_search_data import FullTextSearchData
9
- from ..types.search_data import SearchData
8
+ from ..types.search_chunk import SearchChunk
10
9
  from .raw_client import AsyncRawSearchClient, RawSearchClient
11
10
  from .types.alpha import Alpha
12
11
 
@@ -98,7 +97,7 @@ class SearchClient:
98
97
 
99
98
  Examples
100
99
  --------
101
- from cortex-ai import CortexAI
100
+ from usecortex-ai import CortexAI
102
101
 
103
102
  client = CortexAI(token="YOUR_TOKEN", )
104
103
  client.search.qna(question='question', session_id='session_id', tenant_id='tenant_id', )
@@ -136,7 +135,7 @@ class SearchClient:
136
135
  recency_bias: typing.Optional[float] = OMIT,
137
136
  num_related_chunks: typing.Optional[int] = OMIT,
138
137
  request_options: typing.Optional[RequestOptions] = None,
139
- ) -> SearchData:
138
+ ) -> typing.List[SearchChunk]:
140
139
  """
141
140
  Search for content within indexed sources using semantic and keyword search capabilities.
142
141
 
@@ -179,12 +178,12 @@ class SearchClient:
179
178
 
180
179
  Returns
181
180
  -------
182
- SearchData
181
+ typing.List[SearchChunk]
183
182
  Successful Response
184
183
 
185
184
  Examples
186
185
  --------
187
- from cortex-ai import CortexAI
186
+ from usecortex-ai import CortexAI
188
187
 
189
188
  client = CortexAI(token="YOUR_TOKEN", )
190
189
  client.search.retrieve(query='query', tenant_id='tenant_id', )
@@ -210,7 +209,7 @@ class SearchClient:
210
209
  operator: typing.Optional[Bm25OperatorType] = OMIT,
211
210
  max_chunks: typing.Optional[int] = OMIT,
212
211
  request_options: typing.Optional[RequestOptions] = None,
213
- ) -> FullTextSearchData:
212
+ ) -> typing.List[SearchChunk]:
214
213
  """
215
214
  Full text search endpoint for Cortex customers.
216
215
  Performs full text search with configurable operators for precise text matching against the Findr backend.
@@ -248,12 +247,12 @@ class SearchClient:
248
247
 
249
248
  Returns
250
249
  -------
251
- FullTextSearchData
250
+ typing.List[SearchChunk]
252
251
  Successful Response
253
252
 
254
253
  Examples
255
254
  --------
256
- from cortex-ai import CortexAI
255
+ from usecortex-ai import CortexAI
257
256
 
258
257
  client = CortexAI(token="YOUR_TOKEN", )
259
258
  client.search.full_text_search(query='query', tenant_id='tenant_id', )
@@ -355,7 +354,7 @@ class AsyncSearchClient:
355
354
  --------
356
355
  import asyncio
357
356
 
358
- from cortex-ai import AsyncCortexAI
357
+ from usecortex-ai import AsyncCortexAI
359
358
 
360
359
  client = AsyncCortexAI(token="YOUR_TOKEN", )
361
360
  async def main() -> None:
@@ -395,7 +394,7 @@ class AsyncSearchClient:
395
394
  recency_bias: typing.Optional[float] = OMIT,
396
395
  num_related_chunks: typing.Optional[int] = OMIT,
397
396
  request_options: typing.Optional[RequestOptions] = None,
398
- ) -> SearchData:
397
+ ) -> typing.List[SearchChunk]:
399
398
  """
400
399
  Search for content within indexed sources using semantic and keyword search capabilities.
401
400
 
@@ -438,14 +437,14 @@ class AsyncSearchClient:
438
437
 
439
438
  Returns
440
439
  -------
441
- SearchData
440
+ typing.List[SearchChunk]
442
441
  Successful Response
443
442
 
444
443
  Examples
445
444
  --------
446
445
  import asyncio
447
446
 
448
- from cortex-ai import AsyncCortexAI
447
+ from usecortex-ai import AsyncCortexAI
449
448
 
450
449
  client = AsyncCortexAI(token="YOUR_TOKEN", )
451
450
  async def main() -> None:
@@ -473,7 +472,7 @@ class AsyncSearchClient:
473
472
  operator: typing.Optional[Bm25OperatorType] = OMIT,
474
473
  max_chunks: typing.Optional[int] = OMIT,
475
474
  request_options: typing.Optional[RequestOptions] = None,
476
- ) -> FullTextSearchData:
475
+ ) -> typing.List[SearchChunk]:
477
476
  """
478
477
  Full text search endpoint for Cortex customers.
479
478
  Performs full text search with configurable operators for precise text matching against the Findr backend.
@@ -511,14 +510,14 @@ class AsyncSearchClient:
511
510
 
512
511
  Returns
513
512
  -------
514
- FullTextSearchData
513
+ typing.List[SearchChunk]
515
514
  Successful Response
516
515
 
517
516
  Examples
518
517
  --------
519
518
  import asyncio
520
519
 
521
- from cortex-ai import AsyncCortexAI
520
+ from usecortex-ai import AsyncCortexAI
522
521
 
523
522
  client = AsyncCortexAI(token="YOUR_TOKEN", )
524
523
  async def main() -> None:
@@ -18,8 +18,7 @@ from ..errors.unauthorized_error import UnauthorizedError
18
18
  from ..errors.unprocessable_entity_error import UnprocessableEntityError
19
19
  from ..types.actual_error_response import ActualErrorResponse
20
20
  from ..types.bm_25_operator_type import Bm25OperatorType
21
- from ..types.full_text_search_data import FullTextSearchData
22
- from ..types.search_data import SearchData
21
+ from ..types.search_chunk import SearchChunk
23
22
  from .types.alpha import Alpha
24
23
 
25
24
  # this is used as the default value for optional parameters
@@ -230,7 +229,7 @@ class RawSearchClient:
230
229
  recency_bias: typing.Optional[float] = OMIT,
231
230
  num_related_chunks: typing.Optional[int] = OMIT,
232
231
  request_options: typing.Optional[RequestOptions] = None,
233
- ) -> HttpResponse[SearchData]:
232
+ ) -> HttpResponse[typing.List[SearchChunk]]:
234
233
  """
235
234
  Search for content within indexed sources using semantic and keyword search capabilities.
236
235
 
@@ -273,7 +272,7 @@ class RawSearchClient:
273
272
 
274
273
  Returns
275
274
  -------
276
- HttpResponse[SearchData]
275
+ HttpResponse[typing.List[SearchChunk]]
277
276
  Successful Response
278
277
  """
279
278
  _response = self._client_wrapper.httpx_client.request(
@@ -297,9 +296,9 @@ class RawSearchClient:
297
296
  try:
298
297
  if 200 <= _response.status_code < 300:
299
298
  _data = typing.cast(
300
- SearchData,
299
+ typing.List[SearchChunk],
301
300
  parse_obj_as(
302
- type_=SearchData, # type: ignore
301
+ type_=typing.List[SearchChunk], # type: ignore
303
302
  object_=_response.json(),
304
303
  ),
305
304
  )
@@ -395,7 +394,7 @@ class RawSearchClient:
395
394
  operator: typing.Optional[Bm25OperatorType] = OMIT,
396
395
  max_chunks: typing.Optional[int] = OMIT,
397
396
  request_options: typing.Optional[RequestOptions] = None,
398
- ) -> HttpResponse[FullTextSearchData]:
397
+ ) -> HttpResponse[typing.List[SearchChunk]]:
399
398
  """
400
399
  Full text search endpoint for Cortex customers.
401
400
  Performs full text search with configurable operators for precise text matching against the Findr backend.
@@ -433,7 +432,7 @@ class RawSearchClient:
433
432
 
434
433
  Returns
435
434
  -------
436
- HttpResponse[FullTextSearchData]
435
+ HttpResponse[typing.List[SearchChunk]]
437
436
  Successful Response
438
437
  """
439
438
  _response = self._client_wrapper.httpx_client.request(
@@ -455,9 +454,9 @@ class RawSearchClient:
455
454
  try:
456
455
  if 200 <= _response.status_code < 300:
457
456
  _data = typing.cast(
458
- FullTextSearchData,
457
+ typing.List[SearchChunk],
459
458
  parse_obj_as(
460
- type_=FullTextSearchData, # type: ignore
459
+ type_=typing.List[SearchChunk], # type: ignore
461
460
  object_=_response.json(),
462
461
  ),
463
462
  )
@@ -749,7 +748,7 @@ class AsyncRawSearchClient:
749
748
  recency_bias: typing.Optional[float] = OMIT,
750
749
  num_related_chunks: typing.Optional[int] = OMIT,
751
750
  request_options: typing.Optional[RequestOptions] = None,
752
- ) -> AsyncHttpResponse[SearchData]:
751
+ ) -> AsyncHttpResponse[typing.List[SearchChunk]]:
753
752
  """
754
753
  Search for content within indexed sources using semantic and keyword search capabilities.
755
754
 
@@ -792,7 +791,7 @@ class AsyncRawSearchClient:
792
791
 
793
792
  Returns
794
793
  -------
795
- AsyncHttpResponse[SearchData]
794
+ AsyncHttpResponse[typing.List[SearchChunk]]
796
795
  Successful Response
797
796
  """
798
797
  _response = await self._client_wrapper.httpx_client.request(
@@ -816,9 +815,9 @@ class AsyncRawSearchClient:
816
815
  try:
817
816
  if 200 <= _response.status_code < 300:
818
817
  _data = typing.cast(
819
- SearchData,
818
+ typing.List[SearchChunk],
820
819
  parse_obj_as(
821
- type_=SearchData, # type: ignore
820
+ type_=typing.List[SearchChunk], # type: ignore
822
821
  object_=_response.json(),
823
822
  ),
824
823
  )
@@ -914,7 +913,7 @@ class AsyncRawSearchClient:
914
913
  operator: typing.Optional[Bm25OperatorType] = OMIT,
915
914
  max_chunks: typing.Optional[int] = OMIT,
916
915
  request_options: typing.Optional[RequestOptions] = None,
917
- ) -> AsyncHttpResponse[FullTextSearchData]:
916
+ ) -> AsyncHttpResponse[typing.List[SearchChunk]]:
918
917
  """
919
918
  Full text search endpoint for Cortex customers.
920
919
  Performs full text search with configurable operators for precise text matching against the Findr backend.
@@ -952,7 +951,7 @@ class AsyncRawSearchClient:
952
951
 
953
952
  Returns
954
953
  -------
955
- AsyncHttpResponse[FullTextSearchData]
954
+ AsyncHttpResponse[typing.List[SearchChunk]]
956
955
  Successful Response
957
956
  """
958
957
  _response = await self._client_wrapper.httpx_client.request(
@@ -974,9 +973,9 @@ class AsyncRawSearchClient:
974
973
  try:
975
974
  if 200 <= _response.status_code < 300:
976
975
  _data = typing.cast(
977
- FullTextSearchData,
976
+ typing.List[SearchChunk],
978
977
  parse_obj_as(
979
- type_=FullTextSearchData, # type: ignore
978
+ type_=typing.List[SearchChunk], # type: ignore
980
979
  object_=_response.json(),
981
980
  ),
982
981
  )
@@ -50,7 +50,7 @@ class SourcesClient:
50
50
 
51
51
  Examples
52
52
  --------
53
- from cortex-ai import CortexAI
53
+ from usecortex-ai import CortexAI
54
54
 
55
55
  client = CortexAI(token="YOUR_TOKEN", )
56
56
  client.sources.get_all(tenant_id='tenant_id', )
@@ -84,7 +84,7 @@ class SourcesClient:
84
84
 
85
85
  Examples
86
86
  --------
87
- from cortex-ai import CortexAI
87
+ from usecortex-ai import CortexAI
88
88
 
89
89
  client = CortexAI(token="YOUR_TOKEN", )
90
90
  client.sources.get_by_ids(tenant_id='tenant_id', source_ids=['source_ids'], )
@@ -136,7 +136,7 @@ class AsyncSourcesClient:
136
136
  --------
137
137
  import asyncio
138
138
 
139
- from cortex-ai import AsyncCortexAI
139
+ from usecortex-ai import AsyncCortexAI
140
140
 
141
141
  client = AsyncCortexAI(token="YOUR_TOKEN", )
142
142
  async def main() -> None:
@@ -174,7 +174,7 @@ class AsyncSourcesClient:
174
174
  --------
175
175
  import asyncio
176
176
 
177
- from cortex-ai import AsyncCortexAI
177
+ from usecortex-ai import AsyncCortexAI
178
178
 
179
179
  client = AsyncCortexAI(token="YOUR_TOKEN", )
180
180
  async def main() -> None:
@@ -51,7 +51,7 @@ class TenantClient:
51
51
 
52
52
  Examples
53
53
  --------
54
- from cortex-ai import CortexAI
54
+ from usecortex-ai import CortexAI
55
55
 
56
56
  client = CortexAI(token="YOUR_TOKEN", )
57
57
  client.tenant.stats(tenant_id='tenant_id', )
@@ -107,7 +107,7 @@ class AsyncTenantClient:
107
107
  --------
108
108
  import asyncio
109
109
 
110
- from cortex-ai import AsyncCortexAI
110
+ from usecortex-ai import AsyncCortexAI
111
111
 
112
112
  client = AsyncCortexAI(token="YOUR_TOKEN", )
113
113
  async def main() -> None:
@@ -7,6 +7,8 @@ from .app_sources_upload_data import AppSourcesUploadData
7
7
  from .attachment_model import AttachmentModel
8
8
  from .batch_upload_data import BatchUploadData
9
9
  from .bm_25_operator_type import Bm25OperatorType
10
+ from .body_scrape_webpage_upload_scrape_webpage_post import BodyScrapeWebpageUploadScrapeWebpagePost
11
+ from .body_update_scrape_job_upload_update_webpage_patch import BodyUpdateScrapeJobUploadUpdateWebpagePatch
10
12
  from .content_model import ContentModel
11
13
  from .delete_memory_request import DeleteMemoryRequest
12
14
  from .embeddings_create_collection_data import EmbeddingsCreateCollectionData
@@ -17,14 +19,13 @@ from .error_response import ErrorResponse
17
19
  from .extended_context import ExtendedContext
18
20
  from .fetch_content_data import FetchContentData
19
21
  from .file_upload_result import FileUploadResult
20
- from .full_text_search_data import FullTextSearchData
21
22
  from .http_validation_error import HttpValidationError
22
23
  from .list_sources_response import ListSourcesResponse
23
24
  from .markdown_upload_request import MarkdownUploadRequest
24
25
  from .processing_status import ProcessingStatus
25
26
  from .related_chunk import RelatedChunk
27
+ from .relations import Relations
26
28
  from .search_chunk import SearchChunk
27
- from .search_data import SearchData
28
29
  from .single_upload_data import SingleUploadData
29
30
  from .source import Source
30
31
  from .source_content import SourceContent
@@ -40,6 +41,8 @@ __all__ = [
40
41
  "AttachmentModel",
41
42
  "BatchUploadData",
42
43
  "Bm25OperatorType",
44
+ "BodyScrapeWebpageUploadScrapeWebpagePost",
45
+ "BodyUpdateScrapeJobUploadUpdateWebpagePatch",
43
46
  "ContentModel",
44
47
  "DeleteMemoryRequest",
45
48
  "EmbeddingsCreateCollectionData",
@@ -50,14 +53,13 @@ __all__ = [
50
53
  "ExtendedContext",
51
54
  "FetchContentData",
52
55
  "FileUploadResult",
53
- "FullTextSearchData",
54
56
  "HttpValidationError",
55
57
  "ListSourcesResponse",
56
58
  "MarkdownUploadRequest",
57
59
  "ProcessingStatus",
58
60
  "RelatedChunk",
61
+ "Relations",
59
62
  "SearchChunk",
60
- "SearchData",
61
63
  "SingleUploadData",
62
64
  "Source",
63
65
  "SourceContent",
@@ -9,7 +9,7 @@ from .file_upload_result import FileUploadResult
9
9
 
10
10
  class AppSourcesUploadData(UniversalBaseModel):
11
11
  success: typing.Optional[bool] = None
12
- message: str
12
+ message: typing.Optional[str] = None
13
13
  uploaded: typing.List[FileUploadResult]
14
14
 
15
15
  if IS_PYDANTIC_V2:
@@ -9,7 +9,7 @@ from .file_upload_result import FileUploadResult
9
9
 
10
10
  class BatchUploadData(UniversalBaseModel):
11
11
  success: typing.Optional[bool] = None
12
- message: str
12
+ message: typing.Optional[str] = None
13
13
  uploaded: typing.List[FileUploadResult]
14
14
 
15
15
  if IS_PYDANTIC_V2:
@@ -0,0 +1,19 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ import pydantic
6
+ from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
7
+
8
+
9
+ class BodyScrapeWebpageUploadScrapeWebpagePost(UniversalBaseModel):
10
+ relations: typing.Optional[str] = None
11
+
12
+ if IS_PYDANTIC_V2:
13
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
14
+ else:
15
+
16
+ class Config:
17
+ frozen = True
18
+ smart_union = True
19
+ extra = pydantic.Extra.allow
@@ -0,0 +1,19 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ import pydantic
6
+ from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
7
+
8
+
9
+ class BodyUpdateScrapeJobUploadUpdateWebpagePatch(UniversalBaseModel):
10
+ relations: typing.Optional[str] = None
11
+
12
+ if IS_PYDANTIC_V2:
13
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
14
+ else:
15
+
16
+ class Config:
17
+ frozen = True
18
+ smart_union = True
19
+ extra = pydantic.Extra.allow
@@ -8,7 +8,7 @@ from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
8
8
 
9
9
  class EmbeddingsCreateCollectionData(UniversalBaseModel):
10
10
  success: typing.Optional[bool] = None
11
- message: str
11
+ message: typing.Optional[str] = None
12
12
  tenant_id: str
13
13
  sub_tenant_id: str
14
14
 
@@ -8,7 +8,7 @@ from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
8
8
 
9
9
  class EmbeddingsDeleteData(UniversalBaseModel):
10
10
  success: typing.Optional[bool] = None
11
- message: str
11
+ message: typing.Optional[str] = None
12
12
  total_deleted: int
13
13
  status: typing.Dict[str, bool]
14
14
 
@@ -8,7 +8,7 @@ from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
8
8
 
9
9
  class EmbeddingsGetData(UniversalBaseModel):
10
10
  success: typing.Optional[bool] = None
11
- message: str
11
+ message: typing.Optional[str] = None
12
12
  embeddings: typing.Dict[str, typing.List[float]]
13
13
  not_found_chunk_ids: typing.List[str]
14
14
 
@@ -8,7 +8,7 @@ from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
8
8
 
9
9
  class EmbeddingsSearchData(UniversalBaseModel):
10
10
  success: typing.Optional[bool] = None
11
- message: str
11
+ message: typing.Optional[str] = None
12
12
  chunk_ids: typing.List[str]
13
13
  scores: typing.List[float]
14
14
 
@@ -8,7 +8,7 @@ from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
8
8
 
9
9
  class ErrorResponse(UniversalBaseModel):
10
10
  success: typing.Optional[bool] = None
11
- message: str
11
+ message: typing.Optional[str] = None
12
12
  error_code: typing.Optional[str] = None
13
13
  data: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None
14
14
 
@@ -8,7 +8,7 @@ from .related_chunk import RelatedChunk
8
8
 
9
9
 
10
10
  class ExtendedContext(UniversalBaseModel):
11
- related_chunks: typing.Optional[typing.List[RelatedChunk]] = None
11
+ relations: typing.Optional[typing.List[RelatedChunk]] = None
12
12
 
13
13
  if IS_PYDANTIC_V2:
14
14
  model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2