llama-cloud 0.0.7__py3-none-any.whl → 0.0.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (47) hide show
  1. llama_cloud/__init__.py +34 -4
  2. llama_cloud/client.py +6 -0
  3. llama_cloud/resources/__init__.py +16 -1
  4. llama_cloud/resources/data_sinks/client.py +40 -8
  5. llama_cloud/resources/data_sinks/types/data_sink_update_component_one.py +2 -0
  6. llama_cloud/resources/data_sources/client.py +48 -12
  7. llama_cloud/resources/data_sources/types/data_source_update_component_one.py +6 -4
  8. llama_cloud/resources/extraction/__init__.py +5 -0
  9. llama_cloud/resources/extraction/client.py +632 -0
  10. llama_cloud/resources/extraction/types/__init__.py +5 -0
  11. llama_cloud/resources/extraction/types/extraction_schema_update_data_schema_value.py +7 -0
  12. llama_cloud/resources/organizations/__init__.py +2 -0
  13. llama_cloud/resources/organizations/client.py +786 -0
  14. llama_cloud/resources/pipelines/client.py +312 -12
  15. llama_cloud/resources/projects/client.py +28 -8
  16. llama_cloud/types/__init__.py +28 -4
  17. llama_cloud/types/azure_open_ai_embedding.py +3 -0
  18. llama_cloud/types/{cloud_google_drive_data_source.py → chat_params.py} +5 -6
  19. llama_cloud/types/cloud_azure_ai_search_vector_store.py +42 -0
  20. llama_cloud/types/cloud_jira_data_source.py +43 -0
  21. llama_cloud/types/{cloud_gcs_data_source.py → cloud_notion_page_data_source.py} +4 -6
  22. llama_cloud/types/cloud_sharepoint_data_source.py +1 -0
  23. llama_cloud/types/cloud_slack_data_source.py +42 -0
  24. llama_cloud/types/configurable_data_sink_names.py +4 -0
  25. llama_cloud/types/configurable_data_source_names.py +12 -8
  26. llama_cloud/types/data_sink_component_one.py +2 -0
  27. llama_cloud/types/data_sink_create_component_one.py +2 -0
  28. llama_cloud/types/data_source_component_one.py +6 -4
  29. llama_cloud/types/data_source_create_component_one.py +6 -4
  30. llama_cloud/types/eval_dataset_job_record.py +1 -0
  31. llama_cloud/types/extraction_result.py +42 -0
  32. llama_cloud/types/extraction_result_data_value.py +5 -0
  33. llama_cloud/types/extraction_schema.py +43 -0
  34. llama_cloud/types/extraction_schema_data_schema_value.py +7 -0
  35. llama_cloud/types/organization.py +38 -0
  36. llama_cloud/types/organization_create.py +35 -0
  37. llama_cloud/types/pipeline_data_source_component_one.py +6 -4
  38. llama_cloud/types/preset_retrieval_params.py +5 -0
  39. llama_cloud/types/project.py +1 -1
  40. llama_cloud/types/retrieval_mode.py +29 -0
  41. llama_cloud/types/text_node.py +1 -0
  42. llama_cloud/types/user_organization.py +40 -0
  43. llama_cloud/types/user_organization_create.py +36 -0
  44. {llama_cloud-0.0.7.dist-info → llama_cloud-0.0.9.dist-info}/METADATA +1 -1
  45. {llama_cloud-0.0.7.dist-info → llama_cloud-0.0.9.dist-info}/RECORD +47 -29
  46. {llama_cloud-0.0.7.dist-info → llama_cloud-0.0.9.dist-info}/LICENSE +0 -0
  47. {llama_cloud-0.0.7.dist-info → llama_cloud-0.0.9.dist-info}/WHEEL +0 -0
@@ -9,6 +9,7 @@ from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
9
9
  from ...core.jsonable_encoder import jsonable_encoder
10
10
  from ...core.remove_none_from_dict import remove_none_from_dict
11
11
  from ...errors.unprocessable_entity_error import UnprocessableEntityError
12
+ from ...types.chat_params import ChatParams
12
13
  from ...types.cloud_document import CloudDocument
13
14
  from ...types.cloud_document_create import CloudDocumentCreate
14
15
  from ...types.configured_transformation_item import ConfiguredTransformationItem
@@ -30,6 +31,7 @@ from ...types.pipeline_file import PipelineFile
30
31
  from ...types.pipeline_file_create import PipelineFileCreate
31
32
  from ...types.pipeline_type import PipelineType
32
33
  from ...types.preset_retrieval_params import PresetRetrievalParams
34
+ from ...types.retrieval_mode import RetrievalMode
33
35
  from ...types.retrieve_results import RetrieveResults
34
36
  from .types.pipeline_file_update_custom_metadata_value import PipelineFileUpdateCustomMetadataValue
35
37
 
@@ -52,19 +54,22 @@ class PipelinesClient:
52
54
  def search_pipelines(
53
55
  self,
54
56
  *,
55
- project_name: str,
57
+ project_name: typing.Optional[str] = None,
56
58
  pipeline_name: typing.Optional[str] = None,
57
59
  pipeline_type: typing.Optional[PipelineType] = None,
60
+ project_id: typing.Optional[str] = None,
58
61
  ) -> typing.List[Pipeline]:
59
62
  """
60
63
  Search for pipelines by various parameters.
61
64
 
62
65
  Parameters:
63
- - project_name: str.
66
+ - project_name: typing.Optional[str].
64
67
 
65
68
  - pipeline_name: typing.Optional[str].
66
69
 
67
70
  - pipeline_type: typing.Optional[PipelineType].
71
+
72
+ - project_id: typing.Optional[str].
68
73
  ---
69
74
  from llama_cloud import PipelineType
70
75
  from llama_cloud.client import LlamaCloud
@@ -73,7 +78,6 @@ class PipelinesClient:
73
78
  token="YOUR_TOKEN",
74
79
  )
75
80
  client.pipelines.search_pipelines(
76
- project_name="string",
77
81
  pipeline_type=PipelineType.PLAYGROUND,
78
82
  )
79
83
  """
@@ -81,7 +85,12 @@ class PipelinesClient:
81
85
  "GET",
82
86
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/pipelines"),
83
87
  params=remove_none_from_dict(
84
- {"project_name": project_name, "pipeline_name": pipeline_name, "pipeline_type": pipeline_type}
88
+ {
89
+ "project_name": project_name,
90
+ "pipeline_name": pipeline_name,
91
+ "pipeline_type": pipeline_type,
92
+ "project_id": project_id,
93
+ }
85
94
  ),
86
95
  headers=self._client_wrapper.get_headers(),
87
96
  timeout=60,
@@ -115,6 +124,7 @@ class PipelinesClient:
115
124
  PipelineCreate,
116
125
  PipelineType,
117
126
  PresetRetrievalParams,
127
+ RetrievalMode,
118
128
  SupportedEvalLlmModelNames,
119
129
  )
120
130
  from llama_cloud.client import LlamaCloud
@@ -133,6 +143,7 @@ class PipelinesClient:
133
143
  filters=[],
134
144
  condition=FilterCondition.AND,
135
145
  ),
146
+ retrieval_mode=RetrievalMode.CHUNKS,
136
147
  ),
137
148
  eval_parameters=EvalExecutionParams(
138
149
  llm_model=SupportedEvalLlmModelNames.GPT_3_5_TURBO,
@@ -181,6 +192,7 @@ class PipelinesClient:
181
192
  PipelineCreate,
182
193
  PipelineType,
183
194
  PresetRetrievalParams,
195
+ RetrievalMode,
184
196
  SupportedEvalLlmModelNames,
185
197
  )
186
198
  from llama_cloud.client import LlamaCloud
@@ -199,6 +211,7 @@ class PipelinesClient:
199
211
  filters=[],
200
212
  condition=FilterCondition.AND,
201
213
  ),
214
+ retrieval_mode=RetrievalMode.CHUNKS,
202
215
  ),
203
216
  eval_parameters=EvalExecutionParams(
204
217
  llm_model=SupportedEvalLlmModelNames.GPT_3_5_TURBO,
@@ -305,6 +318,7 @@ class PipelinesClient:
305
318
  LlamaParseParameters,
306
319
  MetadataFilters,
307
320
  PresetRetrievalParams,
321
+ RetrievalMode,
308
322
  SupportedEvalLlmModelNames,
309
323
  )
310
324
  from llama_cloud.client import LlamaCloud
@@ -323,6 +337,7 @@ class PipelinesClient:
323
337
  filters=[],
324
338
  condition=FilterCondition.AND,
325
339
  ),
340
+ retrieval_mode=RetrievalMode.CHUNKS,
326
341
  ),
327
342
  eval_parameters=EvalExecutionParams(
328
343
  llm_model=SupportedEvalLlmModelNames.GPT_3_5_TURBO,
@@ -644,12 +659,22 @@ class PipelinesClient:
644
659
  raise ApiError(status_code=_response.status_code, body=_response.text)
645
660
  raise ApiError(status_code=_response.status_code, body=_response_json)
646
661
 
647
- def list_pipeline_files(self, pipeline_id: str) -> typing.List[PipelineFile]:
662
+ def list_pipeline_files(
663
+ self,
664
+ pipeline_id: str,
665
+ *,
666
+ data_source_id: typing.Optional[str] = None,
667
+ only_manually_uploaded: typing.Optional[bool] = None,
668
+ ) -> typing.List[PipelineFile]:
648
669
  """
649
670
  Get files for a pipeline.
650
671
 
651
672
  Parameters:
652
673
  - pipeline_id: str.
674
+
675
+ - data_source_id: typing.Optional[str].
676
+
677
+ - only_manually_uploaded: typing.Optional[bool].
653
678
  ---
654
679
  from llama_cloud.client import LlamaCloud
655
680
 
@@ -663,6 +688,9 @@ class PipelinesClient:
663
688
  _response = self._client_wrapper.httpx_client.request(
664
689
  "GET",
665
690
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/files"),
691
+ params=remove_none_from_dict(
692
+ {"data_source_id": data_source_id, "only_manually_uploaded": only_manually_uploaded}
693
+ ),
666
694
  headers=self._client_wrapper.get_headers(),
667
695
  timeout=60,
668
696
  )
@@ -837,6 +865,65 @@ class PipelinesClient:
837
865
  raise ApiError(status_code=_response.status_code, body=_response.text)
838
866
  raise ApiError(status_code=_response.status_code, body=_response_json)
839
867
 
868
+ def import_pipeline_metadata(self, pipeline_id: str, *, upload_file: typing.IO) -> typing.Dict[str, str]:
869
+ """
870
+ Import metadata for a pipeline.
871
+
872
+ Parameters:
873
+ - pipeline_id: str.
874
+
875
+ - upload_file: typing.IO.
876
+ """
877
+ _response = self._client_wrapper.httpx_client.request(
878
+ "PUT",
879
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/metadata"),
880
+ data=jsonable_encoder({}),
881
+ files={"upload_file": upload_file},
882
+ headers=self._client_wrapper.get_headers(),
883
+ timeout=60,
884
+ )
885
+ if 200 <= _response.status_code < 300:
886
+ return pydantic.parse_obj_as(typing.Dict[str, str], _response.json()) # type: ignore
887
+ if _response.status_code == 422:
888
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
889
+ try:
890
+ _response_json = _response.json()
891
+ except JSONDecodeError:
892
+ raise ApiError(status_code=_response.status_code, body=_response.text)
893
+ raise ApiError(status_code=_response.status_code, body=_response_json)
894
+
895
+ def delete_pipeline_files_metadata(self, pipeline_id: str) -> None:
896
+ """
897
+ Delete metadata for all files in a pipeline.
898
+
899
+ Parameters:
900
+ - pipeline_id: str.
901
+ ---
902
+ from llama_cloud.client import LlamaCloud
903
+
904
+ client = LlamaCloud(
905
+ token="YOUR_TOKEN",
906
+ )
907
+ client.pipelines.delete_pipeline_files_metadata(
908
+ pipeline_id="string",
909
+ )
910
+ """
911
+ _response = self._client_wrapper.httpx_client.request(
912
+ "DELETE",
913
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/metadata"),
914
+ headers=self._client_wrapper.get_headers(),
915
+ timeout=60,
916
+ )
917
+ if 200 <= _response.status_code < 300:
918
+ return
919
+ if _response.status_code == 422:
920
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
921
+ try:
922
+ _response_json = _response.json()
923
+ except JSONDecodeError:
924
+ raise ApiError(status_code=_response.status_code, body=_response.text)
925
+ raise ApiError(status_code=_response.status_code, body=_response_json)
926
+
840
927
  def list_pipeline_data_sources(self, pipeline_id: str) -> typing.List[PipelineDataSource]:
841
928
  """
842
929
  Get data sources for a pipeline.
@@ -997,6 +1084,8 @@ class PipelinesClient:
997
1084
  rerank_top_n: typing.Optional[int] = OMIT,
998
1085
  alpha: typing.Optional[float] = OMIT,
999
1086
  search_filters: typing.Optional[MetadataFilters] = OMIT,
1087
+ files_top_k: typing.Optional[int] = OMIT,
1088
+ retrieval_mode: typing.Optional[RetrievalMode] = OMIT,
1000
1089
  query: str,
1001
1090
  ) -> RetrieveResults:
1002
1091
  """
@@ -1017,9 +1106,13 @@ class PipelinesClient:
1017
1106
 
1018
1107
  - search_filters: typing.Optional[MetadataFilters]. Search filters for retrieval.
1019
1108
 
1109
+ - files_top_k: typing.Optional[int]. Number of files to retrieve (only for retrieval mode files_via_metadata and files_via_content).
1110
+
1111
+ - retrieval_mode: typing.Optional[RetrievalMode]. The retrieval mode for the query.
1112
+
1020
1113
  - query: str. The query to retrieve against.
1021
1114
  ---
1022
- from llama_cloud import FilterCondition, MetadataFilters
1115
+ from llama_cloud import FilterCondition, MetadataFilters, RetrievalMode
1023
1116
  from llama_cloud.client import LlamaCloud
1024
1117
 
1025
1118
  client = LlamaCloud(
@@ -1031,6 +1124,7 @@ class PipelinesClient:
1031
1124
  filters=[],
1032
1125
  condition=FilterCondition.AND,
1033
1126
  ),
1127
+ retrieval_mode=RetrievalMode.CHUNKS,
1034
1128
  query="string",
1035
1129
  )
1036
1130
  """
@@ -1047,6 +1141,10 @@ class PipelinesClient:
1047
1141
  _request["alpha"] = alpha
1048
1142
  if search_filters is not OMIT:
1049
1143
  _request["search_filters"] = search_filters
1144
+ if files_top_k is not OMIT:
1145
+ _request["files_top_k"] = files_top_k
1146
+ if retrieval_mode is not OMIT:
1147
+ _request["retrieval_mode"] = retrieval_mode
1050
1148
  _response = self._client_wrapper.httpx_client.request(
1051
1149
  "POST",
1052
1150
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/retrieve"),
@@ -1133,6 +1231,58 @@ class PipelinesClient:
1133
1231
  raise ApiError(status_code=_response.status_code, body=_response.text)
1134
1232
  raise ApiError(status_code=_response.status_code, body=_response_json)
1135
1233
 
1234
+ def chat(self, pipeline_id: str, *, retrieval_parameters: PresetRetrievalParams, data: ChatParams) -> typing.Any:
1235
+ """
1236
+ Parameters:
1237
+ - pipeline_id: str.
1238
+
1239
+ - retrieval_parameters: PresetRetrievalParams.
1240
+
1241
+ - data: ChatParams.
1242
+ ---
1243
+ from llama_cloud import (
1244
+ ChatParams,
1245
+ FilterCondition,
1246
+ MetadataFilters,
1247
+ PresetRetrievalParams,
1248
+ RetrievalMode,
1249
+ )
1250
+ from llama_cloud.client import LlamaCloud
1251
+
1252
+ client = LlamaCloud(
1253
+ token="YOUR_TOKEN",
1254
+ )
1255
+ client.pipelines.chat(
1256
+ pipeline_id="string",
1257
+ retrieval_parameters=PresetRetrievalParams(
1258
+ search_filters=MetadataFilters(
1259
+ filters=[],
1260
+ condition=FilterCondition.AND,
1261
+ ),
1262
+ retrieval_mode=RetrievalMode.CHUNKS,
1263
+ ),
1264
+ data=ChatParams(
1265
+ messages=[],
1266
+ ),
1267
+ )
1268
+ """
1269
+ _response = self._client_wrapper.httpx_client.request(
1270
+ "POST",
1271
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/chat"),
1272
+ json=jsonable_encoder({"retrieval_parameters": retrieval_parameters, "data": data}),
1273
+ headers=self._client_wrapper.get_headers(),
1274
+ timeout=60,
1275
+ )
1276
+ if 200 <= _response.status_code < 300:
1277
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
1278
+ if _response.status_code == 422:
1279
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
1280
+ try:
1281
+ _response_json = _response.json()
1282
+ except JSONDecodeError:
1283
+ raise ApiError(status_code=_response.status_code, body=_response.text)
1284
+ raise ApiError(status_code=_response.status_code, body=_response_json)
1285
+
1136
1286
  def list_pipeline_documents(
1137
1287
  self, pipeline_id: str, *, skip: typing.Optional[int] = None, limit: typing.Optional[int] = None
1138
1288
  ) -> typing.List[CloudDocument]:
@@ -1374,19 +1524,22 @@ class AsyncPipelinesClient:
1374
1524
  async def search_pipelines(
1375
1525
  self,
1376
1526
  *,
1377
- project_name: str,
1527
+ project_name: typing.Optional[str] = None,
1378
1528
  pipeline_name: typing.Optional[str] = None,
1379
1529
  pipeline_type: typing.Optional[PipelineType] = None,
1530
+ project_id: typing.Optional[str] = None,
1380
1531
  ) -> typing.List[Pipeline]:
1381
1532
  """
1382
1533
  Search for pipelines by various parameters.
1383
1534
 
1384
1535
  Parameters:
1385
- - project_name: str.
1536
+ - project_name: typing.Optional[str].
1386
1537
 
1387
1538
  - pipeline_name: typing.Optional[str].
1388
1539
 
1389
1540
  - pipeline_type: typing.Optional[PipelineType].
1541
+
1542
+ - project_id: typing.Optional[str].
1390
1543
  ---
1391
1544
  from llama_cloud import PipelineType
1392
1545
  from llama_cloud.client import AsyncLlamaCloud
@@ -1395,7 +1548,6 @@ class AsyncPipelinesClient:
1395
1548
  token="YOUR_TOKEN",
1396
1549
  )
1397
1550
  await client.pipelines.search_pipelines(
1398
- project_name="string",
1399
1551
  pipeline_type=PipelineType.PLAYGROUND,
1400
1552
  )
1401
1553
  """
@@ -1403,7 +1555,12 @@ class AsyncPipelinesClient:
1403
1555
  "GET",
1404
1556
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/pipelines"),
1405
1557
  params=remove_none_from_dict(
1406
- {"project_name": project_name, "pipeline_name": pipeline_name, "pipeline_type": pipeline_type}
1558
+ {
1559
+ "project_name": project_name,
1560
+ "pipeline_name": pipeline_name,
1561
+ "pipeline_type": pipeline_type,
1562
+ "project_id": project_id,
1563
+ }
1407
1564
  ),
1408
1565
  headers=self._client_wrapper.get_headers(),
1409
1566
  timeout=60,
@@ -1437,6 +1594,7 @@ class AsyncPipelinesClient:
1437
1594
  PipelineCreate,
1438
1595
  PipelineType,
1439
1596
  PresetRetrievalParams,
1597
+ RetrievalMode,
1440
1598
  SupportedEvalLlmModelNames,
1441
1599
  )
1442
1600
  from llama_cloud.client import AsyncLlamaCloud
@@ -1455,6 +1613,7 @@ class AsyncPipelinesClient:
1455
1613
  filters=[],
1456
1614
  condition=FilterCondition.AND,
1457
1615
  ),
1616
+ retrieval_mode=RetrievalMode.CHUNKS,
1458
1617
  ),
1459
1618
  eval_parameters=EvalExecutionParams(
1460
1619
  llm_model=SupportedEvalLlmModelNames.GPT_3_5_TURBO,
@@ -1503,6 +1662,7 @@ class AsyncPipelinesClient:
1503
1662
  PipelineCreate,
1504
1663
  PipelineType,
1505
1664
  PresetRetrievalParams,
1665
+ RetrievalMode,
1506
1666
  SupportedEvalLlmModelNames,
1507
1667
  )
1508
1668
  from llama_cloud.client import AsyncLlamaCloud
@@ -1521,6 +1681,7 @@ class AsyncPipelinesClient:
1521
1681
  filters=[],
1522
1682
  condition=FilterCondition.AND,
1523
1683
  ),
1684
+ retrieval_mode=RetrievalMode.CHUNKS,
1524
1685
  ),
1525
1686
  eval_parameters=EvalExecutionParams(
1526
1687
  llm_model=SupportedEvalLlmModelNames.GPT_3_5_TURBO,
@@ -1627,6 +1788,7 @@ class AsyncPipelinesClient:
1627
1788
  LlamaParseParameters,
1628
1789
  MetadataFilters,
1629
1790
  PresetRetrievalParams,
1791
+ RetrievalMode,
1630
1792
  SupportedEvalLlmModelNames,
1631
1793
  )
1632
1794
  from llama_cloud.client import AsyncLlamaCloud
@@ -1645,6 +1807,7 @@ class AsyncPipelinesClient:
1645
1807
  filters=[],
1646
1808
  condition=FilterCondition.AND,
1647
1809
  ),
1810
+ retrieval_mode=RetrievalMode.CHUNKS,
1648
1811
  ),
1649
1812
  eval_parameters=EvalExecutionParams(
1650
1813
  llm_model=SupportedEvalLlmModelNames.GPT_3_5_TURBO,
@@ -1968,12 +2131,22 @@ class AsyncPipelinesClient:
1968
2131
  raise ApiError(status_code=_response.status_code, body=_response.text)
1969
2132
  raise ApiError(status_code=_response.status_code, body=_response_json)
1970
2133
 
1971
- async def list_pipeline_files(self, pipeline_id: str) -> typing.List[PipelineFile]:
2134
+ async def list_pipeline_files(
2135
+ self,
2136
+ pipeline_id: str,
2137
+ *,
2138
+ data_source_id: typing.Optional[str] = None,
2139
+ only_manually_uploaded: typing.Optional[bool] = None,
2140
+ ) -> typing.List[PipelineFile]:
1972
2141
  """
1973
2142
  Get files for a pipeline.
1974
2143
 
1975
2144
  Parameters:
1976
2145
  - pipeline_id: str.
2146
+
2147
+ - data_source_id: typing.Optional[str].
2148
+
2149
+ - only_manually_uploaded: typing.Optional[bool].
1977
2150
  ---
1978
2151
  from llama_cloud.client import AsyncLlamaCloud
1979
2152
 
@@ -1987,6 +2160,9 @@ class AsyncPipelinesClient:
1987
2160
  _response = await self._client_wrapper.httpx_client.request(
1988
2161
  "GET",
1989
2162
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/files"),
2163
+ params=remove_none_from_dict(
2164
+ {"data_source_id": data_source_id, "only_manually_uploaded": only_manually_uploaded}
2165
+ ),
1990
2166
  headers=self._client_wrapper.get_headers(),
1991
2167
  timeout=60,
1992
2168
  )
@@ -2161,6 +2337,65 @@ class AsyncPipelinesClient:
2161
2337
  raise ApiError(status_code=_response.status_code, body=_response.text)
2162
2338
  raise ApiError(status_code=_response.status_code, body=_response_json)
2163
2339
 
2340
+ async def import_pipeline_metadata(self, pipeline_id: str, *, upload_file: typing.IO) -> typing.Dict[str, str]:
2341
+ """
2342
+ Import metadata for a pipeline.
2343
+
2344
+ Parameters:
2345
+ - pipeline_id: str.
2346
+
2347
+ - upload_file: typing.IO.
2348
+ """
2349
+ _response = await self._client_wrapper.httpx_client.request(
2350
+ "PUT",
2351
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/metadata"),
2352
+ data=jsonable_encoder({}),
2353
+ files={"upload_file": upload_file},
2354
+ headers=self._client_wrapper.get_headers(),
2355
+ timeout=60,
2356
+ )
2357
+ if 200 <= _response.status_code < 300:
2358
+ return pydantic.parse_obj_as(typing.Dict[str, str], _response.json()) # type: ignore
2359
+ if _response.status_code == 422:
2360
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
2361
+ try:
2362
+ _response_json = _response.json()
2363
+ except JSONDecodeError:
2364
+ raise ApiError(status_code=_response.status_code, body=_response.text)
2365
+ raise ApiError(status_code=_response.status_code, body=_response_json)
2366
+
2367
+ async def delete_pipeline_files_metadata(self, pipeline_id: str) -> None:
2368
+ """
2369
+ Delete metadata for all files in a pipeline.
2370
+
2371
+ Parameters:
2372
+ - pipeline_id: str.
2373
+ ---
2374
+ from llama_cloud.client import AsyncLlamaCloud
2375
+
2376
+ client = AsyncLlamaCloud(
2377
+ token="YOUR_TOKEN",
2378
+ )
2379
+ await client.pipelines.delete_pipeline_files_metadata(
2380
+ pipeline_id="string",
2381
+ )
2382
+ """
2383
+ _response = await self._client_wrapper.httpx_client.request(
2384
+ "DELETE",
2385
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/metadata"),
2386
+ headers=self._client_wrapper.get_headers(),
2387
+ timeout=60,
2388
+ )
2389
+ if 200 <= _response.status_code < 300:
2390
+ return
2391
+ if _response.status_code == 422:
2392
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
2393
+ try:
2394
+ _response_json = _response.json()
2395
+ except JSONDecodeError:
2396
+ raise ApiError(status_code=_response.status_code, body=_response.text)
2397
+ raise ApiError(status_code=_response.status_code, body=_response_json)
2398
+
2164
2399
  async def list_pipeline_data_sources(self, pipeline_id: str) -> typing.List[PipelineDataSource]:
2165
2400
  """
2166
2401
  Get data sources for a pipeline.
@@ -2321,6 +2556,8 @@ class AsyncPipelinesClient:
2321
2556
  rerank_top_n: typing.Optional[int] = OMIT,
2322
2557
  alpha: typing.Optional[float] = OMIT,
2323
2558
  search_filters: typing.Optional[MetadataFilters] = OMIT,
2559
+ files_top_k: typing.Optional[int] = OMIT,
2560
+ retrieval_mode: typing.Optional[RetrievalMode] = OMIT,
2324
2561
  query: str,
2325
2562
  ) -> RetrieveResults:
2326
2563
  """
@@ -2341,9 +2578,13 @@ class AsyncPipelinesClient:
2341
2578
 
2342
2579
  - search_filters: typing.Optional[MetadataFilters]. Search filters for retrieval.
2343
2580
 
2581
+ - files_top_k: typing.Optional[int]. Number of files to retrieve (only for retrieval mode files_via_metadata and files_via_content).
2582
+
2583
+ - retrieval_mode: typing.Optional[RetrievalMode]. The retrieval mode for the query.
2584
+
2344
2585
  - query: str. The query to retrieve against.
2345
2586
  ---
2346
- from llama_cloud import FilterCondition, MetadataFilters
2587
+ from llama_cloud import FilterCondition, MetadataFilters, RetrievalMode
2347
2588
  from llama_cloud.client import AsyncLlamaCloud
2348
2589
 
2349
2590
  client = AsyncLlamaCloud(
@@ -2355,6 +2596,7 @@ class AsyncPipelinesClient:
2355
2596
  filters=[],
2356
2597
  condition=FilterCondition.AND,
2357
2598
  ),
2599
+ retrieval_mode=RetrievalMode.CHUNKS,
2358
2600
  query="string",
2359
2601
  )
2360
2602
  """
@@ -2371,6 +2613,10 @@ class AsyncPipelinesClient:
2371
2613
  _request["alpha"] = alpha
2372
2614
  if search_filters is not OMIT:
2373
2615
  _request["search_filters"] = search_filters
2616
+ if files_top_k is not OMIT:
2617
+ _request["files_top_k"] = files_top_k
2618
+ if retrieval_mode is not OMIT:
2619
+ _request["retrieval_mode"] = retrieval_mode
2374
2620
  _response = await self._client_wrapper.httpx_client.request(
2375
2621
  "POST",
2376
2622
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/retrieve"),
@@ -2457,6 +2703,60 @@ class AsyncPipelinesClient:
2457
2703
  raise ApiError(status_code=_response.status_code, body=_response.text)
2458
2704
  raise ApiError(status_code=_response.status_code, body=_response_json)
2459
2705
 
2706
+ async def chat(
2707
+ self, pipeline_id: str, *, retrieval_parameters: PresetRetrievalParams, data: ChatParams
2708
+ ) -> typing.Any:
2709
+ """
2710
+ Parameters:
2711
+ - pipeline_id: str.
2712
+
2713
+ - retrieval_parameters: PresetRetrievalParams.
2714
+
2715
+ - data: ChatParams.
2716
+ ---
2717
+ from llama_cloud import (
2718
+ ChatParams,
2719
+ FilterCondition,
2720
+ MetadataFilters,
2721
+ PresetRetrievalParams,
2722
+ RetrievalMode,
2723
+ )
2724
+ from llama_cloud.client import AsyncLlamaCloud
2725
+
2726
+ client = AsyncLlamaCloud(
2727
+ token="YOUR_TOKEN",
2728
+ )
2729
+ await client.pipelines.chat(
2730
+ pipeline_id="string",
2731
+ retrieval_parameters=PresetRetrievalParams(
2732
+ search_filters=MetadataFilters(
2733
+ filters=[],
2734
+ condition=FilterCondition.AND,
2735
+ ),
2736
+ retrieval_mode=RetrievalMode.CHUNKS,
2737
+ ),
2738
+ data=ChatParams(
2739
+ messages=[],
2740
+ ),
2741
+ )
2742
+ """
2743
+ _response = await self._client_wrapper.httpx_client.request(
2744
+ "POST",
2745
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/chat"),
2746
+ json=jsonable_encoder({"retrieval_parameters": retrieval_parameters, "data": data}),
2747
+ headers=self._client_wrapper.get_headers(),
2748
+ timeout=60,
2749
+ )
2750
+ if 200 <= _response.status_code < 300:
2751
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
2752
+ if _response.status_code == 422:
2753
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
2754
+ try:
2755
+ _response_json = _response.json()
2756
+ except JSONDecodeError:
2757
+ raise ApiError(status_code=_response.status_code, body=_response.text)
2758
+ raise ApiError(status_code=_response.status_code, body=_response_json)
2759
+
2460
2760
  async def list_pipeline_documents(
2461
2761
  self, pipeline_id: str, *, skip: typing.Optional[int] = None, limit: typing.Optional[int] = None
2462
2762
  ) -> typing.List[CloudDocument]: