llama-cloud 0.0.9__py3-none-any.whl → 0.0.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

@@ -9,7 +9,8 @@ from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
9
9
  from ...core.jsonable_encoder import jsonable_encoder
10
10
  from ...core.remove_none_from_dict import remove_none_from_dict
11
11
  from ...errors.unprocessable_entity_error import UnprocessableEntityError
12
- from ...types.chat_params import ChatParams
12
+ from ...types.chat_data import ChatData
13
+ from ...types.chat_message import ChatMessage
13
14
  from ...types.cloud_document import CloudDocument
14
15
  from ...types.cloud_document_create import CloudDocumentCreate
15
16
  from ...types.configured_transformation_item import ConfiguredTransformationItem
@@ -33,6 +34,7 @@ from ...types.pipeline_type import PipelineType
33
34
  from ...types.preset_retrieval_params import PresetRetrievalParams
34
35
  from ...types.retrieval_mode import RetrievalMode
35
36
  from ...types.retrieve_results import RetrieveResults
37
+ from ...types.text_node import TextNode
36
38
  from .types.pipeline_file_update_custom_metadata_value import PipelineFileUpdateCustomMetadataValue
37
39
 
38
40
  try:
@@ -281,7 +283,6 @@ class PipelinesClient:
281
283
  data_sink: typing.Optional[DataSinkCreate] = OMIT,
282
284
  preset_retrieval_parameters: typing.Optional[PresetRetrievalParams] = OMIT,
283
285
  eval_parameters: typing.Optional[EvalExecutionParams] = OMIT,
284
- llama_parse_enabled: typing.Optional[bool] = OMIT,
285
286
  llama_parse_parameters: typing.Optional[LlamaParseParameters] = OMIT,
286
287
  name: typing.Optional[str] = OMIT,
287
288
  managed_pipeline_id: typing.Optional[str] = OMIT,
@@ -302,8 +303,6 @@ class PipelinesClient:
302
303
 
303
304
  - eval_parameters: typing.Optional[EvalExecutionParams]. Eval parameters for the pipeline.
304
305
 
305
- - llama_parse_enabled: typing.Optional[bool]. Whether to use LlamaParse during pipeline execution.
306
-
307
306
  - llama_parse_parameters: typing.Optional[LlamaParseParameters]. Settings that can be configured for how to use LlamaParse to parse files within a LlamaCloud pipeline.
308
307
 
309
308
  - name: typing.Optional[str].
@@ -356,8 +355,6 @@ class PipelinesClient:
356
355
  _request["preset_retrieval_parameters"] = preset_retrieval_parameters
357
356
  if eval_parameters is not OMIT:
358
357
  _request["eval_parameters"] = eval_parameters
359
- if llama_parse_enabled is not OMIT:
360
- _request["llama_parse_enabled"] = llama_parse_enabled
361
358
  if llama_parse_parameters is not OMIT:
362
359
  _request["llama_parse_parameters"] = llama_parse_parameters
363
360
  if name is not OMIT:
@@ -477,6 +474,38 @@ class PipelinesClient:
477
474
  raise ApiError(status_code=_response.status_code, body=_response.text)
478
475
  raise ApiError(status_code=_response.status_code, body=_response_json)
479
476
 
477
+ def copy_pipeline(self, pipeline_id: str) -> Pipeline:
478
+ """
479
+ Copy a pipeline by ID.
480
+
481
+ Parameters:
482
+ - pipeline_id: str.
483
+ ---
484
+ from llama_cloud.client import LlamaCloud
485
+
486
+ client = LlamaCloud(
487
+ token="YOUR_TOKEN",
488
+ )
489
+ client.pipelines.copy_pipeline(
490
+ pipeline_id="string",
491
+ )
492
+ """
493
+ _response = self._client_wrapper.httpx_client.request(
494
+ "POST",
495
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/copy"),
496
+ headers=self._client_wrapper.get_headers(),
497
+ timeout=60,
498
+ )
499
+ if 200 <= _response.status_code < 300:
500
+ return pydantic.parse_obj_as(Pipeline, _response.json()) # type: ignore
501
+ if _response.status_code == 422:
502
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
503
+ try:
504
+ _response_json = _response.json()
505
+ except JSONDecodeError:
506
+ raise ApiError(status_code=_response.status_code, body=_response.text)
507
+ raise ApiError(status_code=_response.status_code, body=_response_json)
508
+
480
509
  def get_eval_dataset_executions(self, pipeline_id: str, eval_dataset_id: str) -> typing.List[EvalDatasetJobRecord]:
481
510
  """
482
511
  Get the status of an EvalDatasetExecution.
@@ -1231,17 +1260,26 @@ class PipelinesClient:
1231
1260
  raise ApiError(status_code=_response.status_code, body=_response.text)
1232
1261
  raise ApiError(status_code=_response.status_code, body=_response_json)
1233
1262
 
1234
- def chat(self, pipeline_id: str, *, retrieval_parameters: PresetRetrievalParams, data: ChatParams) -> typing.Any:
1263
+ def chat(
1264
+ self,
1265
+ pipeline_id: str,
1266
+ *,
1267
+ messages: typing.List[ChatMessage],
1268
+ data: ChatData,
1269
+ class_name: typing.Optional[str] = OMIT,
1270
+ ) -> typing.Any:
1235
1271
  """
1236
1272
  Parameters:
1237
1273
  - pipeline_id: str.
1238
1274
 
1239
- - retrieval_parameters: PresetRetrievalParams.
1275
+ - messages: typing.List[ChatMessage].
1240
1276
 
1241
- - data: ChatParams.
1277
+ - data: ChatData.
1278
+
1279
+ - class_name: typing.Optional[str].
1242
1280
  ---
1243
1281
  from llama_cloud import (
1244
- ChatParams,
1282
+ ChatData,
1245
1283
  FilterCondition,
1246
1284
  MetadataFilters,
1247
1285
  PresetRetrievalParams,
@@ -1254,22 +1292,25 @@ class PipelinesClient:
1254
1292
  )
1255
1293
  client.pipelines.chat(
1256
1294
  pipeline_id="string",
1257
- retrieval_parameters=PresetRetrievalParams(
1258
- search_filters=MetadataFilters(
1259
- filters=[],
1260
- condition=FilterCondition.AND,
1295
+ messages=[],
1296
+ data=ChatData(
1297
+ retrieval_parameters=PresetRetrievalParams(
1298
+ search_filters=MetadataFilters(
1299
+ filters=[],
1300
+ condition=FilterCondition.AND,
1301
+ ),
1302
+ retrieval_mode=RetrievalMode.CHUNKS,
1261
1303
  ),
1262
- retrieval_mode=RetrievalMode.CHUNKS,
1263
- ),
1264
- data=ChatParams(
1265
- messages=[],
1266
1304
  ),
1267
1305
  )
1268
1306
  """
1307
+ _request: typing.Dict[str, typing.Any] = {"messages": messages, "data": data}
1308
+ if class_name is not OMIT:
1309
+ _request["class_name"] = class_name
1269
1310
  _response = self._client_wrapper.httpx_client.request(
1270
1311
  "POST",
1271
1312
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/chat"),
1272
- json=jsonable_encoder({"retrieval_parameters": retrieval_parameters, "data": data}),
1313
+ json=jsonable_encoder(_request),
1273
1314
  headers=self._client_wrapper.get_headers(),
1274
1315
  timeout=60,
1275
1316
  )
@@ -1284,7 +1325,13 @@ class PipelinesClient:
1284
1325
  raise ApiError(status_code=_response.status_code, body=_response_json)
1285
1326
 
1286
1327
  def list_pipeline_documents(
1287
- self, pipeline_id: str, *, skip: typing.Optional[int] = None, limit: typing.Optional[int] = None
1328
+ self,
1329
+ pipeline_id: str,
1330
+ *,
1331
+ skip: typing.Optional[int] = None,
1332
+ limit: typing.Optional[int] = None,
1333
+ file_id: typing.Optional[str] = None,
1334
+ only_direct_upload: typing.Optional[bool] = None,
1288
1335
  ) -> typing.List[CloudDocument]:
1289
1336
  """
1290
1337
  Return a list of documents for a pipeline.
@@ -1295,6 +1342,10 @@ class PipelinesClient:
1295
1342
  - skip: typing.Optional[int].
1296
1343
 
1297
1344
  - limit: typing.Optional[int].
1345
+
1346
+ - file_id: typing.Optional[str].
1347
+
1348
+ - only_direct_upload: typing.Optional[bool].
1298
1349
  ---
1299
1350
  from llama_cloud.client import LlamaCloud
1300
1351
 
@@ -1310,7 +1361,9 @@ class PipelinesClient:
1310
1361
  urllib.parse.urljoin(
1311
1362
  f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/documents"
1312
1363
  ),
1313
- params=remove_none_from_dict({"skip": skip, "limit": limit}),
1364
+ params=remove_none_from_dict(
1365
+ {"skip": skip, "limit": limit, "file_id": file_id, "only_direct_upload": only_direct_upload}
1366
+ ),
1314
1367
  headers=self._client_wrapper.get_headers(),
1315
1368
  timeout=60,
1316
1369
  )
@@ -1516,6 +1569,44 @@ class PipelinesClient:
1516
1569
  raise ApiError(status_code=_response.status_code, body=_response.text)
1517
1570
  raise ApiError(status_code=_response.status_code, body=_response_json)
1518
1571
 
1572
+ def list_pipeline_document_chunks(self, pipeline_id: str, document_id: str) -> typing.List[TextNode]:
1573
+ """
1574
+ Return a list of chunks for a pipeline document.
1575
+
1576
+ Parameters:
1577
+ - pipeline_id: str.
1578
+
1579
+ - document_id: str.
1580
+ ---
1581
+ from llama_cloud.client import LlamaCloud
1582
+
1583
+ client = LlamaCloud(
1584
+ token="YOUR_TOKEN",
1585
+ )
1586
+ client.pipelines.list_pipeline_document_chunks(
1587
+ pipeline_id="string",
1588
+ document_id="string",
1589
+ )
1590
+ """
1591
+ _response = self._client_wrapper.httpx_client.request(
1592
+ "GET",
1593
+ urllib.parse.urljoin(
1594
+ f"{self._client_wrapper.get_base_url()}/",
1595
+ f"api/v1/pipelines/{pipeline_id}/documents/{document_id}/chunks",
1596
+ ),
1597
+ headers=self._client_wrapper.get_headers(),
1598
+ timeout=60,
1599
+ )
1600
+ if 200 <= _response.status_code < 300:
1601
+ return pydantic.parse_obj_as(typing.List[TextNode], _response.json()) # type: ignore
1602
+ if _response.status_code == 422:
1603
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
1604
+ try:
1605
+ _response_json = _response.json()
1606
+ except JSONDecodeError:
1607
+ raise ApiError(status_code=_response.status_code, body=_response.text)
1608
+ raise ApiError(status_code=_response.status_code, body=_response_json)
1609
+
1519
1610
 
1520
1611
  class AsyncPipelinesClient:
1521
1612
  def __init__(self, *, client_wrapper: AsyncClientWrapper):
@@ -1751,7 +1842,6 @@ class AsyncPipelinesClient:
1751
1842
  data_sink: typing.Optional[DataSinkCreate] = OMIT,
1752
1843
  preset_retrieval_parameters: typing.Optional[PresetRetrievalParams] = OMIT,
1753
1844
  eval_parameters: typing.Optional[EvalExecutionParams] = OMIT,
1754
- llama_parse_enabled: typing.Optional[bool] = OMIT,
1755
1845
  llama_parse_parameters: typing.Optional[LlamaParseParameters] = OMIT,
1756
1846
  name: typing.Optional[str] = OMIT,
1757
1847
  managed_pipeline_id: typing.Optional[str] = OMIT,
@@ -1772,8 +1862,6 @@ class AsyncPipelinesClient:
1772
1862
 
1773
1863
  - eval_parameters: typing.Optional[EvalExecutionParams]. Eval parameters for the pipeline.
1774
1864
 
1775
- - llama_parse_enabled: typing.Optional[bool]. Whether to use LlamaParse during pipeline execution.
1776
-
1777
1865
  - llama_parse_parameters: typing.Optional[LlamaParseParameters]. Settings that can be configured for how to use LlamaParse to parse files within a LlamaCloud pipeline.
1778
1866
 
1779
1867
  - name: typing.Optional[str].
@@ -1826,8 +1914,6 @@ class AsyncPipelinesClient:
1826
1914
  _request["preset_retrieval_parameters"] = preset_retrieval_parameters
1827
1915
  if eval_parameters is not OMIT:
1828
1916
  _request["eval_parameters"] = eval_parameters
1829
- if llama_parse_enabled is not OMIT:
1830
- _request["llama_parse_enabled"] = llama_parse_enabled
1831
1917
  if llama_parse_parameters is not OMIT:
1832
1918
  _request["llama_parse_parameters"] = llama_parse_parameters
1833
1919
  if name is not OMIT:
@@ -1947,6 +2033,38 @@ class AsyncPipelinesClient:
1947
2033
  raise ApiError(status_code=_response.status_code, body=_response.text)
1948
2034
  raise ApiError(status_code=_response.status_code, body=_response_json)
1949
2035
 
2036
+ async def copy_pipeline(self, pipeline_id: str) -> Pipeline:
2037
+ """
2038
+ Copy a pipeline by ID.
2039
+
2040
+ Parameters:
2041
+ - pipeline_id: str.
2042
+ ---
2043
+ from llama_cloud.client import AsyncLlamaCloud
2044
+
2045
+ client = AsyncLlamaCloud(
2046
+ token="YOUR_TOKEN",
2047
+ )
2048
+ await client.pipelines.copy_pipeline(
2049
+ pipeline_id="string",
2050
+ )
2051
+ """
2052
+ _response = await self._client_wrapper.httpx_client.request(
2053
+ "POST",
2054
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/copy"),
2055
+ headers=self._client_wrapper.get_headers(),
2056
+ timeout=60,
2057
+ )
2058
+ if 200 <= _response.status_code < 300:
2059
+ return pydantic.parse_obj_as(Pipeline, _response.json()) # type: ignore
2060
+ if _response.status_code == 422:
2061
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
2062
+ try:
2063
+ _response_json = _response.json()
2064
+ except JSONDecodeError:
2065
+ raise ApiError(status_code=_response.status_code, body=_response.text)
2066
+ raise ApiError(status_code=_response.status_code, body=_response_json)
2067
+
1950
2068
  async def get_eval_dataset_executions(
1951
2069
  self, pipeline_id: str, eval_dataset_id: str
1952
2070
  ) -> typing.List[EvalDatasetJobRecord]:
@@ -2704,18 +2822,25 @@ class AsyncPipelinesClient:
2704
2822
  raise ApiError(status_code=_response.status_code, body=_response_json)
2705
2823
 
2706
2824
  async def chat(
2707
- self, pipeline_id: str, *, retrieval_parameters: PresetRetrievalParams, data: ChatParams
2825
+ self,
2826
+ pipeline_id: str,
2827
+ *,
2828
+ messages: typing.List[ChatMessage],
2829
+ data: ChatData,
2830
+ class_name: typing.Optional[str] = OMIT,
2708
2831
  ) -> typing.Any:
2709
2832
  """
2710
2833
  Parameters:
2711
2834
  - pipeline_id: str.
2712
2835
 
2713
- - retrieval_parameters: PresetRetrievalParams.
2836
+ - messages: typing.List[ChatMessage].
2714
2837
 
2715
- - data: ChatParams.
2838
+ - data: ChatData.
2839
+
2840
+ - class_name: typing.Optional[str].
2716
2841
  ---
2717
2842
  from llama_cloud import (
2718
- ChatParams,
2843
+ ChatData,
2719
2844
  FilterCondition,
2720
2845
  MetadataFilters,
2721
2846
  PresetRetrievalParams,
@@ -2728,22 +2853,25 @@ class AsyncPipelinesClient:
2728
2853
  )
2729
2854
  await client.pipelines.chat(
2730
2855
  pipeline_id="string",
2731
- retrieval_parameters=PresetRetrievalParams(
2732
- search_filters=MetadataFilters(
2733
- filters=[],
2734
- condition=FilterCondition.AND,
2856
+ messages=[],
2857
+ data=ChatData(
2858
+ retrieval_parameters=PresetRetrievalParams(
2859
+ search_filters=MetadataFilters(
2860
+ filters=[],
2861
+ condition=FilterCondition.AND,
2862
+ ),
2863
+ retrieval_mode=RetrievalMode.CHUNKS,
2735
2864
  ),
2736
- retrieval_mode=RetrievalMode.CHUNKS,
2737
- ),
2738
- data=ChatParams(
2739
- messages=[],
2740
2865
  ),
2741
2866
  )
2742
2867
  """
2868
+ _request: typing.Dict[str, typing.Any] = {"messages": messages, "data": data}
2869
+ if class_name is not OMIT:
2870
+ _request["class_name"] = class_name
2743
2871
  _response = await self._client_wrapper.httpx_client.request(
2744
2872
  "POST",
2745
2873
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/chat"),
2746
- json=jsonable_encoder({"retrieval_parameters": retrieval_parameters, "data": data}),
2874
+ json=jsonable_encoder(_request),
2747
2875
  headers=self._client_wrapper.get_headers(),
2748
2876
  timeout=60,
2749
2877
  )
@@ -2758,7 +2886,13 @@ class AsyncPipelinesClient:
2758
2886
  raise ApiError(status_code=_response.status_code, body=_response_json)
2759
2887
 
2760
2888
  async def list_pipeline_documents(
2761
- self, pipeline_id: str, *, skip: typing.Optional[int] = None, limit: typing.Optional[int] = None
2889
+ self,
2890
+ pipeline_id: str,
2891
+ *,
2892
+ skip: typing.Optional[int] = None,
2893
+ limit: typing.Optional[int] = None,
2894
+ file_id: typing.Optional[str] = None,
2895
+ only_direct_upload: typing.Optional[bool] = None,
2762
2896
  ) -> typing.List[CloudDocument]:
2763
2897
  """
2764
2898
  Return a list of documents for a pipeline.
@@ -2769,6 +2903,10 @@ class AsyncPipelinesClient:
2769
2903
  - skip: typing.Optional[int].
2770
2904
 
2771
2905
  - limit: typing.Optional[int].
2906
+
2907
+ - file_id: typing.Optional[str].
2908
+
2909
+ - only_direct_upload: typing.Optional[bool].
2772
2910
  ---
2773
2911
  from llama_cloud.client import AsyncLlamaCloud
2774
2912
 
@@ -2784,7 +2922,9 @@ class AsyncPipelinesClient:
2784
2922
  urllib.parse.urljoin(
2785
2923
  f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/documents"
2786
2924
  ),
2787
- params=remove_none_from_dict({"skip": skip, "limit": limit}),
2925
+ params=remove_none_from_dict(
2926
+ {"skip": skip, "limit": limit, "file_id": file_id, "only_direct_upload": only_direct_upload}
2927
+ ),
2788
2928
  headers=self._client_wrapper.get_headers(),
2789
2929
  timeout=60,
2790
2930
  )
@@ -2989,3 +3129,41 @@ class AsyncPipelinesClient:
2989
3129
  except JSONDecodeError:
2990
3130
  raise ApiError(status_code=_response.status_code, body=_response.text)
2991
3131
  raise ApiError(status_code=_response.status_code, body=_response_json)
3132
+
3133
+ async def list_pipeline_document_chunks(self, pipeline_id: str, document_id: str) -> typing.List[TextNode]:
3134
+ """
3135
+ Return a list of chunks for a pipeline document.
3136
+
3137
+ Parameters:
3138
+ - pipeline_id: str.
3139
+
3140
+ - document_id: str.
3141
+ ---
3142
+ from llama_cloud.client import AsyncLlamaCloud
3143
+
3144
+ client = AsyncLlamaCloud(
3145
+ token="YOUR_TOKEN",
3146
+ )
3147
+ await client.pipelines.list_pipeline_document_chunks(
3148
+ pipeline_id="string",
3149
+ document_id="string",
3150
+ )
3151
+ """
3152
+ _response = await self._client_wrapper.httpx_client.request(
3153
+ "GET",
3154
+ urllib.parse.urljoin(
3155
+ f"{self._client_wrapper.get_base_url()}/",
3156
+ f"api/v1/pipelines/{pipeline_id}/documents/{document_id}/chunks",
3157
+ ),
3158
+ headers=self._client_wrapper.get_headers(),
3159
+ timeout=60,
3160
+ )
3161
+ if 200 <= _response.status_code < 300:
3162
+ return pydantic.parse_obj_as(typing.List[TextNode], _response.json()) # type: ignore
3163
+ if _response.status_code == 422:
3164
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
3165
+ try:
3166
+ _response_json = _response.json()
3167
+ except JSONDecodeError:
3168
+ raise ApiError(status_code=_response.status_code, body=_response.text)
3169
+ raise ApiError(status_code=_response.status_code, body=_response_json)
@@ -4,11 +4,12 @@ from .azure_open_ai_embedding import AzureOpenAiEmbedding
4
4
  from .base import Base
5
5
  from .base_prompt_template import BasePromptTemplate
6
6
  from .bedrock_embedding import BedrockEmbedding
7
+ from .chat_data import ChatData
7
8
  from .chat_message import ChatMessage
8
- from .chat_params import ChatParams
9
9
  from .cloud_az_storage_blob_data_source import CloudAzStorageBlobDataSource
10
10
  from .cloud_azure_ai_search_vector_store import CloudAzureAiSearchVectorStore
11
11
  from .cloud_chroma_vector_store import CloudChromaVectorStore
12
+ from .cloud_confluence_data_source import CloudConfluenceDataSource
12
13
  from .cloud_document import CloudDocument
13
14
  from .cloud_document_create import CloudDocumentCreate
14
15
  from .cloud_jira_data_source import CloudJiraDataSource
@@ -55,6 +56,7 @@ from .eval_llm_model_data import EvalLlmModelData
55
56
  from .eval_question import EvalQuestion
56
57
  from .eval_question_create import EvalQuestionCreate
57
58
  from .eval_question_result import EvalQuestionResult
59
+ from .extraction_job import ExtractionJob
58
60
  from .extraction_result import ExtractionResult
59
61
  from .extraction_result_data_value import ExtractionResultDataValue
60
62
  from .extraction_schema import ExtractionSchema
@@ -134,6 +136,7 @@ from .token_text_splitter import TokenTextSplitter
134
136
  from .transformation_category_names import TransformationCategoryNames
135
137
  from .user_organization import UserOrganization
136
138
  from .user_organization_create import UserOrganizationCreate
139
+ from .user_organization_delete import UserOrganizationDelete
137
140
  from .validation_error import ValidationError
138
141
  from .validation_error_loc_item import ValidationErrorLocItem
139
142
 
@@ -142,11 +145,12 @@ __all__ = [
142
145
  "Base",
143
146
  "BasePromptTemplate",
144
147
  "BedrockEmbedding",
148
+ "ChatData",
145
149
  "ChatMessage",
146
- "ChatParams",
147
150
  "CloudAzStorageBlobDataSource",
148
151
  "CloudAzureAiSearchVectorStore",
149
152
  "CloudChromaVectorStore",
153
+ "CloudConfluenceDataSource",
150
154
  "CloudDocument",
151
155
  "CloudDocumentCreate",
152
156
  "CloudJiraDataSource",
@@ -193,6 +197,7 @@ __all__ = [
193
197
  "EvalQuestion",
194
198
  "EvalQuestionCreate",
195
199
  "EvalQuestionResult",
200
+ "ExtractionJob",
196
201
  "ExtractionResult",
197
202
  "ExtractionResultDataValue",
198
203
  "ExtractionSchema",
@@ -272,6 +277,7 @@ __all__ = [
272
277
  "TransformationCategoryNames",
273
278
  "UserOrganization",
274
279
  "UserOrganizationCreate",
280
+ "UserOrganizationDelete",
275
281
  "ValidationError",
276
282
  "ValidationErrorLocItem",
277
283
  ]
@@ -4,7 +4,7 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
- from .chat_message import ChatMessage
7
+ from .preset_retrieval_params import PresetRetrievalParams
8
8
 
9
9
  try:
10
10
  import pydantic
@@ -15,13 +15,13 @@ except ImportError:
15
15
  import pydantic # type: ignore
16
16
 
17
17
 
18
- class ChatParams(pydantic.BaseModel):
18
+ class ChatData(pydantic.BaseModel):
19
19
  """
20
20
  Base schema model for BaseComponent classes used in the platform.
21
21
  Comes with special serialization logic for types used commonly in platform codebase.
22
22
  """
23
23
 
24
- messages: typing.List[ChatMessage]
24
+ retrieval_parameters: PresetRetrievalParams
25
25
  class_name: typing.Optional[str]
26
26
 
27
27
  def json(self, **kwargs: typing.Any) -> str:
@@ -24,7 +24,7 @@ class CloudAzureAiSearchVectorStore(pydantic.BaseModel):
24
24
  search_service_endpoint: str
25
25
  search_service_api_version: typing.Optional[str]
26
26
  index_name: typing.Optional[str]
27
- filterable_metadata_field_keys: typing.Optional[typing.List[str]]
27
+ filterable_metadata_field_keys: typing.Optional[typing.Dict[str, typing.Any]]
28
28
  embedding_dimension: typing.Optional[int]
29
29
  class_name: typing.Optional[str]
30
30
 
@@ -0,0 +1,45 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic
10
+ if pydantic.__version__.startswith("1."):
11
+ raise ImportError
12
+ import pydantic.v1 as pydantic # type: ignore
13
+ except ImportError:
14
+ import pydantic # type: ignore
15
+
16
+
17
+ class CloudConfluenceDataSource(pydantic.BaseModel):
18
+ """
19
+ Base component object to capture class names.
20
+ """
21
+
22
+ server_url: str = pydantic.Field(description="The server URL of the Confluence instance.")
23
+ authentication_mechanism: str = pydantic.Field(
24
+ description="Type of Authentication for connecting to Confluence APIs."
25
+ )
26
+ user_name: typing.Optional[str] = pydantic.Field(description="The username to use for authentication.")
27
+ api_token: typing.Optional[str] = pydantic.Field(description="The API token to use for authentication.")
28
+ space_key: typing.Optional[str] = pydantic.Field(description="The space key to read from.")
29
+ page_ids: typing.Optional[str] = pydantic.Field(description="The page IDs of the Confluence to read from.")
30
+ cql: typing.Optional[str] = pydantic.Field(description="The CQL query to use for fetching pages.")
31
+ label: typing.Optional[str] = pydantic.Field(description="The label to use for fetching pages.")
32
+ class_name: typing.Optional[str]
33
+
34
+ def json(self, **kwargs: typing.Any) -> str:
35
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
36
+ return super().json(**kwargs_with_defaults)
37
+
38
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
39
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
40
+ return super().dict(**kwargs_with_defaults)
41
+
42
+ class Config:
43
+ frozen = True
44
+ smart_union = True
45
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -17,6 +17,7 @@ class ConfigurableDataSourceNames(str, enum.Enum):
17
17
  MICROSOFT_SHAREPOINT = "MICROSOFT_SHAREPOINT"
18
18
  SLACK = "SLACK"
19
19
  NOTION_PAGE = "NOTION_PAGE"
20
+ CONFLUENCE = "CONFLUENCE"
20
21
  JIRA = "JIRA"
21
22
 
22
23
  def visit(
@@ -27,6 +28,7 @@ class ConfigurableDataSourceNames(str, enum.Enum):
27
28
  microsoft_sharepoint: typing.Callable[[], T_Result],
28
29
  slack: typing.Callable[[], T_Result],
29
30
  notion_page: typing.Callable[[], T_Result],
31
+ confluence: typing.Callable[[], T_Result],
30
32
  jira: typing.Callable[[], T_Result],
31
33
  ) -> T_Result:
32
34
  if self is ConfigurableDataSourceNames.S_3:
@@ -41,5 +43,7 @@ class ConfigurableDataSourceNames(str, enum.Enum):
41
43
  return slack()
42
44
  if self is ConfigurableDataSourceNames.NOTION_PAGE:
43
45
  return notion_page()
46
+ if self is ConfigurableDataSourceNames.CONFLUENCE:
47
+ return confluence()
44
48
  if self is ConfigurableDataSourceNames.JIRA:
45
49
  return jira()
@@ -3,6 +3,7 @@
3
3
  import typing
4
4
 
5
5
  from .cloud_az_storage_blob_data_source import CloudAzStorageBlobDataSource
6
+ from .cloud_confluence_data_source import CloudConfluenceDataSource
6
7
  from .cloud_jira_data_source import CloudJiraDataSource
7
8
  from .cloud_notion_page_data_source import CloudNotionPageDataSource
8
9
  from .cloud_one_drive_data_source import CloudOneDriveDataSource
@@ -17,5 +18,6 @@ DataSourceComponentOne = typing.Union[
17
18
  CloudSharepointDataSource,
18
19
  CloudSlackDataSource,
19
20
  CloudNotionPageDataSource,
21
+ CloudConfluenceDataSource,
20
22
  CloudJiraDataSource,
21
23
  ]
@@ -3,6 +3,7 @@
3
3
  import typing
4
4
 
5
5
  from .cloud_az_storage_blob_data_source import CloudAzStorageBlobDataSource
6
+ from .cloud_confluence_data_source import CloudConfluenceDataSource
6
7
  from .cloud_jira_data_source import CloudJiraDataSource
7
8
  from .cloud_notion_page_data_source import CloudNotionPageDataSource
8
9
  from .cloud_one_drive_data_source import CloudOneDriveDataSource
@@ -17,5 +18,6 @@ DataSourceCreateComponentOne = typing.Union[
17
18
  CloudSharepointDataSource,
18
19
  CloudSlackDataSource,
19
20
  CloudNotionPageDataSource,
21
+ CloudConfluenceDataSource,
20
22
  CloudJiraDataSource,
21
23
  ]
@@ -0,0 +1,35 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .file import File
8
+ from .status_enum import StatusEnum
9
+
10
+ try:
11
+ import pydantic
12
+ if pydantic.__version__.startswith("1."):
13
+ raise ImportError
14
+ import pydantic.v1 as pydantic # type: ignore
15
+ except ImportError:
16
+ import pydantic # type: ignore
17
+
18
+
19
+ class ExtractionJob(pydantic.BaseModel):
20
+ id: str = pydantic.Field(description="The id of the extraction job")
21
+ status: StatusEnum = pydantic.Field(description="The status of the extraction job")
22
+ file: File = pydantic.Field(description="The file that the extract was extracted from")
23
+
24
+ def json(self, **kwargs: typing.Any) -> str:
25
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
26
+ return super().json(**kwargs_with_defaults)
27
+
28
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
29
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
30
+ return super().dict(**kwargs_with_defaults)
31
+
32
+ class Config:
33
+ frozen = True
34
+ smart_union = True
35
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -25,7 +25,7 @@ class ExtractionSchema(pydantic.BaseModel):
25
25
  updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
26
26
  name: str = pydantic.Field(description="The name of the extraction schema")
27
27
  project_id: str = pydantic.Field(description="The ID of the project that the extraction schema belongs to")
28
- data_schema: typing.Dict[str, ExtractionSchemaDataSchemaValue] = pydantic.Field(
28
+ data_schema: typing.Optional[typing.Dict[str, ExtractionSchemaDataSchemaValue]] = pydantic.Field(
29
29
  description="The schema of the data"
30
30
  )
31
31