llama-cloud 0.0.8__py3-none-any.whl → 0.0.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (40) hide show
  1. llama_cloud/__init__.py +22 -0
  2. llama_cloud/client.py +3 -0
  3. llama_cloud/resources/__init__.py +13 -1
  4. llama_cloud/resources/data_sinks/client.py +40 -8
  5. llama_cloud/resources/data_sources/client.py +48 -12
  6. llama_cloud/resources/data_sources/types/data_source_update_component_one.py +4 -0
  7. llama_cloud/resources/extraction/client.py +55 -38
  8. llama_cloud/resources/organizations/__init__.py +2 -0
  9. llama_cloud/resources/organizations/client.py +867 -0
  10. llama_cloud/resources/parsing/client.py +104 -0
  11. llama_cloud/resources/pipelines/client.py +358 -24
  12. llama_cloud/resources/projects/client.py +28 -8
  13. llama_cloud/types/__init__.py +20 -0
  14. llama_cloud/types/chat_data.py +38 -0
  15. llama_cloud/types/cloud_azure_ai_search_vector_store.py +1 -1
  16. llama_cloud/types/cloud_confluence_data_source.py +45 -0
  17. llama_cloud/types/cloud_jira_data_source.py +43 -0
  18. llama_cloud/types/cloud_sharepoint_data_source.py +1 -0
  19. llama_cloud/types/configurable_data_source_names.py +8 -0
  20. llama_cloud/types/data_source_component_one.py +4 -0
  21. llama_cloud/types/data_source_create_component_one.py +4 -0
  22. llama_cloud/types/eval_dataset_job_record.py +1 -0
  23. llama_cloud/types/extraction_job.py +35 -0
  24. llama_cloud/types/extraction_schema.py +1 -2
  25. llama_cloud/types/llama_parse_parameters.py +5 -0
  26. llama_cloud/types/organization.py +38 -0
  27. llama_cloud/types/organization_create.py +35 -0
  28. llama_cloud/types/pipeline.py +0 -3
  29. llama_cloud/types/pipeline_create.py +0 -3
  30. llama_cloud/types/pipeline_data_source_component_one.py +4 -0
  31. llama_cloud/types/preset_retrieval_params.py +5 -0
  32. llama_cloud/types/project.py +1 -1
  33. llama_cloud/types/retrieval_mode.py +29 -0
  34. llama_cloud/types/user_organization.py +49 -0
  35. llama_cloud/types/user_organization_create.py +36 -0
  36. llama_cloud/types/user_organization_delete.py +36 -0
  37. {llama_cloud-0.0.8.dist-info → llama_cloud-0.0.10.dist-info}/METADATA +2 -1
  38. {llama_cloud-0.0.8.dist-info → llama_cloud-0.0.10.dist-info}/RECORD +40 -28
  39. {llama_cloud-0.0.8.dist-info → llama_cloud-0.0.10.dist-info}/WHEEL +1 -1
  40. {llama_cloud-0.0.8.dist-info → llama_cloud-0.0.10.dist-info}/LICENSE +0 -0
@@ -9,6 +9,8 @@ from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
9
9
  from ...core.jsonable_encoder import jsonable_encoder
10
10
  from ...core.remove_none_from_dict import remove_none_from_dict
11
11
  from ...errors.unprocessable_entity_error import UnprocessableEntityError
12
+ from ...types.chat_data import ChatData
13
+ from ...types.chat_message import ChatMessage
12
14
  from ...types.cloud_document import CloudDocument
13
15
  from ...types.cloud_document_create import CloudDocumentCreate
14
16
  from ...types.configured_transformation_item import ConfiguredTransformationItem
@@ -30,7 +32,9 @@ from ...types.pipeline_file import PipelineFile
30
32
  from ...types.pipeline_file_create import PipelineFileCreate
31
33
  from ...types.pipeline_type import PipelineType
32
34
  from ...types.preset_retrieval_params import PresetRetrievalParams
35
+ from ...types.retrieval_mode import RetrievalMode
33
36
  from ...types.retrieve_results import RetrieveResults
37
+ from ...types.text_node import TextNode
34
38
  from .types.pipeline_file_update_custom_metadata_value import PipelineFileUpdateCustomMetadataValue
35
39
 
36
40
  try:
@@ -52,19 +56,22 @@ class PipelinesClient:
52
56
  def search_pipelines(
53
57
  self,
54
58
  *,
55
- project_name: str,
59
+ project_name: typing.Optional[str] = None,
56
60
  pipeline_name: typing.Optional[str] = None,
57
61
  pipeline_type: typing.Optional[PipelineType] = None,
62
+ project_id: typing.Optional[str] = None,
58
63
  ) -> typing.List[Pipeline]:
59
64
  """
60
65
  Search for pipelines by various parameters.
61
66
 
62
67
  Parameters:
63
- - project_name: str.
68
+ - project_name: typing.Optional[str].
64
69
 
65
70
  - pipeline_name: typing.Optional[str].
66
71
 
67
72
  - pipeline_type: typing.Optional[PipelineType].
73
+
74
+ - project_id: typing.Optional[str].
68
75
  ---
69
76
  from llama_cloud import PipelineType
70
77
  from llama_cloud.client import LlamaCloud
@@ -73,7 +80,6 @@ class PipelinesClient:
73
80
  token="YOUR_TOKEN",
74
81
  )
75
82
  client.pipelines.search_pipelines(
76
- project_name="string",
77
83
  pipeline_type=PipelineType.PLAYGROUND,
78
84
  )
79
85
  """
@@ -81,7 +87,12 @@ class PipelinesClient:
81
87
  "GET",
82
88
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/pipelines"),
83
89
  params=remove_none_from_dict(
84
- {"project_name": project_name, "pipeline_name": pipeline_name, "pipeline_type": pipeline_type}
90
+ {
91
+ "project_name": project_name,
92
+ "pipeline_name": pipeline_name,
93
+ "pipeline_type": pipeline_type,
94
+ "project_id": project_id,
95
+ }
85
96
  ),
86
97
  headers=self._client_wrapper.get_headers(),
87
98
  timeout=60,
@@ -115,6 +126,7 @@ class PipelinesClient:
115
126
  PipelineCreate,
116
127
  PipelineType,
117
128
  PresetRetrievalParams,
129
+ RetrievalMode,
118
130
  SupportedEvalLlmModelNames,
119
131
  )
120
132
  from llama_cloud.client import LlamaCloud
@@ -133,6 +145,7 @@ class PipelinesClient:
133
145
  filters=[],
134
146
  condition=FilterCondition.AND,
135
147
  ),
148
+ retrieval_mode=RetrievalMode.CHUNKS,
136
149
  ),
137
150
  eval_parameters=EvalExecutionParams(
138
151
  llm_model=SupportedEvalLlmModelNames.GPT_3_5_TURBO,
@@ -181,6 +194,7 @@ class PipelinesClient:
181
194
  PipelineCreate,
182
195
  PipelineType,
183
196
  PresetRetrievalParams,
197
+ RetrievalMode,
184
198
  SupportedEvalLlmModelNames,
185
199
  )
186
200
  from llama_cloud.client import LlamaCloud
@@ -199,6 +213,7 @@ class PipelinesClient:
199
213
  filters=[],
200
214
  condition=FilterCondition.AND,
201
215
  ),
216
+ retrieval_mode=RetrievalMode.CHUNKS,
202
217
  ),
203
218
  eval_parameters=EvalExecutionParams(
204
219
  llm_model=SupportedEvalLlmModelNames.GPT_3_5_TURBO,
@@ -268,7 +283,6 @@ class PipelinesClient:
268
283
  data_sink: typing.Optional[DataSinkCreate] = OMIT,
269
284
  preset_retrieval_parameters: typing.Optional[PresetRetrievalParams] = OMIT,
270
285
  eval_parameters: typing.Optional[EvalExecutionParams] = OMIT,
271
- llama_parse_enabled: typing.Optional[bool] = OMIT,
272
286
  llama_parse_parameters: typing.Optional[LlamaParseParameters] = OMIT,
273
287
  name: typing.Optional[str] = OMIT,
274
288
  managed_pipeline_id: typing.Optional[str] = OMIT,
@@ -289,8 +303,6 @@ class PipelinesClient:
289
303
 
290
304
  - eval_parameters: typing.Optional[EvalExecutionParams]. Eval parameters for the pipeline.
291
305
 
292
- - llama_parse_enabled: typing.Optional[bool]. Whether to use LlamaParse during pipeline execution.
293
-
294
306
  - llama_parse_parameters: typing.Optional[LlamaParseParameters]. Settings that can be configured for how to use LlamaParse to parse files within a LlamaCloud pipeline.
295
307
 
296
308
  - name: typing.Optional[str].
@@ -305,6 +317,7 @@ class PipelinesClient:
305
317
  LlamaParseParameters,
306
318
  MetadataFilters,
307
319
  PresetRetrievalParams,
320
+ RetrievalMode,
308
321
  SupportedEvalLlmModelNames,
309
322
  )
310
323
  from llama_cloud.client import LlamaCloud
@@ -323,6 +336,7 @@ class PipelinesClient:
323
336
  filters=[],
324
337
  condition=FilterCondition.AND,
325
338
  ),
339
+ retrieval_mode=RetrievalMode.CHUNKS,
326
340
  ),
327
341
  eval_parameters=EvalExecutionParams(
328
342
  llm_model=SupportedEvalLlmModelNames.GPT_3_5_TURBO,
@@ -341,8 +355,6 @@ class PipelinesClient:
341
355
  _request["preset_retrieval_parameters"] = preset_retrieval_parameters
342
356
  if eval_parameters is not OMIT:
343
357
  _request["eval_parameters"] = eval_parameters
344
- if llama_parse_enabled is not OMIT:
345
- _request["llama_parse_enabled"] = llama_parse_enabled
346
358
  if llama_parse_parameters is not OMIT:
347
359
  _request["llama_parse_parameters"] = llama_parse_parameters
348
360
  if name is not OMIT:
@@ -462,6 +474,38 @@ class PipelinesClient:
462
474
  raise ApiError(status_code=_response.status_code, body=_response.text)
463
475
  raise ApiError(status_code=_response.status_code, body=_response_json)
464
476
 
477
+ def copy_pipeline(self, pipeline_id: str) -> Pipeline:
478
+ """
479
+ Copy a pipeline by ID.
480
+
481
+ Parameters:
482
+ - pipeline_id: str.
483
+ ---
484
+ from llama_cloud.client import LlamaCloud
485
+
486
+ client = LlamaCloud(
487
+ token="YOUR_TOKEN",
488
+ )
489
+ client.pipelines.copy_pipeline(
490
+ pipeline_id="string",
491
+ )
492
+ """
493
+ _response = self._client_wrapper.httpx_client.request(
494
+ "POST",
495
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/copy"),
496
+ headers=self._client_wrapper.get_headers(),
497
+ timeout=60,
498
+ )
499
+ if 200 <= _response.status_code < 300:
500
+ return pydantic.parse_obj_as(Pipeline, _response.json()) # type: ignore
501
+ if _response.status_code == 422:
502
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
503
+ try:
504
+ _response_json = _response.json()
505
+ except JSONDecodeError:
506
+ raise ApiError(status_code=_response.status_code, body=_response.text)
507
+ raise ApiError(status_code=_response.status_code, body=_response_json)
508
+
465
509
  def get_eval_dataset_executions(self, pipeline_id: str, eval_dataset_id: str) -> typing.List[EvalDatasetJobRecord]:
466
510
  """
467
511
  Get the status of an EvalDatasetExecution.
@@ -1069,6 +1113,8 @@ class PipelinesClient:
1069
1113
  rerank_top_n: typing.Optional[int] = OMIT,
1070
1114
  alpha: typing.Optional[float] = OMIT,
1071
1115
  search_filters: typing.Optional[MetadataFilters] = OMIT,
1116
+ files_top_k: typing.Optional[int] = OMIT,
1117
+ retrieval_mode: typing.Optional[RetrievalMode] = OMIT,
1072
1118
  query: str,
1073
1119
  ) -> RetrieveResults:
1074
1120
  """
@@ -1089,9 +1135,13 @@ class PipelinesClient:
1089
1135
 
1090
1136
  - search_filters: typing.Optional[MetadataFilters]. Search filters for retrieval.
1091
1137
 
1138
+ - files_top_k: typing.Optional[int]. Number of files to retrieve (only for retrieval mode files_via_metadata and files_via_content).
1139
+
1140
+ - retrieval_mode: typing.Optional[RetrievalMode]. The retrieval mode for the query.
1141
+
1092
1142
  - query: str. The query to retrieve against.
1093
1143
  ---
1094
- from llama_cloud import FilterCondition, MetadataFilters
1144
+ from llama_cloud import FilterCondition, MetadataFilters, RetrievalMode
1095
1145
  from llama_cloud.client import LlamaCloud
1096
1146
 
1097
1147
  client = LlamaCloud(
@@ -1103,6 +1153,7 @@ class PipelinesClient:
1103
1153
  filters=[],
1104
1154
  condition=FilterCondition.AND,
1105
1155
  ),
1156
+ retrieval_mode=RetrievalMode.CHUNKS,
1106
1157
  query="string",
1107
1158
  )
1108
1159
  """
@@ -1119,6 +1170,10 @@ class PipelinesClient:
1119
1170
  _request["alpha"] = alpha
1120
1171
  if search_filters is not OMIT:
1121
1172
  _request["search_filters"] = search_filters
1173
+ if files_top_k is not OMIT:
1174
+ _request["files_top_k"] = files_top_k
1175
+ if retrieval_mode is not OMIT:
1176
+ _request["retrieval_mode"] = retrieval_mode
1122
1177
  _response = self._client_wrapper.httpx_client.request(
1123
1178
  "POST",
1124
1179
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/retrieve"),
@@ -1205,8 +1260,78 @@ class PipelinesClient:
1205
1260
  raise ApiError(status_code=_response.status_code, body=_response.text)
1206
1261
  raise ApiError(status_code=_response.status_code, body=_response_json)
1207
1262
 
1263
+ def chat(
1264
+ self,
1265
+ pipeline_id: str,
1266
+ *,
1267
+ messages: typing.List[ChatMessage],
1268
+ data: ChatData,
1269
+ class_name: typing.Optional[str] = OMIT,
1270
+ ) -> typing.Any:
1271
+ """
1272
+ Parameters:
1273
+ - pipeline_id: str.
1274
+
1275
+ - messages: typing.List[ChatMessage].
1276
+
1277
+ - data: ChatData.
1278
+
1279
+ - class_name: typing.Optional[str].
1280
+ ---
1281
+ from llama_cloud import (
1282
+ ChatData,
1283
+ FilterCondition,
1284
+ MetadataFilters,
1285
+ PresetRetrievalParams,
1286
+ RetrievalMode,
1287
+ )
1288
+ from llama_cloud.client import LlamaCloud
1289
+
1290
+ client = LlamaCloud(
1291
+ token="YOUR_TOKEN",
1292
+ )
1293
+ client.pipelines.chat(
1294
+ pipeline_id="string",
1295
+ messages=[],
1296
+ data=ChatData(
1297
+ retrieval_parameters=PresetRetrievalParams(
1298
+ search_filters=MetadataFilters(
1299
+ filters=[],
1300
+ condition=FilterCondition.AND,
1301
+ ),
1302
+ retrieval_mode=RetrievalMode.CHUNKS,
1303
+ ),
1304
+ ),
1305
+ )
1306
+ """
1307
+ _request: typing.Dict[str, typing.Any] = {"messages": messages, "data": data}
1308
+ if class_name is not OMIT:
1309
+ _request["class_name"] = class_name
1310
+ _response = self._client_wrapper.httpx_client.request(
1311
+ "POST",
1312
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/chat"),
1313
+ json=jsonable_encoder(_request),
1314
+ headers=self._client_wrapper.get_headers(),
1315
+ timeout=60,
1316
+ )
1317
+ if 200 <= _response.status_code < 300:
1318
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
1319
+ if _response.status_code == 422:
1320
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
1321
+ try:
1322
+ _response_json = _response.json()
1323
+ except JSONDecodeError:
1324
+ raise ApiError(status_code=_response.status_code, body=_response.text)
1325
+ raise ApiError(status_code=_response.status_code, body=_response_json)
1326
+
1208
1327
  def list_pipeline_documents(
1209
- self, pipeline_id: str, *, skip: typing.Optional[int] = None, limit: typing.Optional[int] = None
1328
+ self,
1329
+ pipeline_id: str,
1330
+ *,
1331
+ skip: typing.Optional[int] = None,
1332
+ limit: typing.Optional[int] = None,
1333
+ file_id: typing.Optional[str] = None,
1334
+ only_direct_upload: typing.Optional[bool] = None,
1210
1335
  ) -> typing.List[CloudDocument]:
1211
1336
  """
1212
1337
  Return a list of documents for a pipeline.
@@ -1217,6 +1342,10 @@ class PipelinesClient:
1217
1342
  - skip: typing.Optional[int].
1218
1343
 
1219
1344
  - limit: typing.Optional[int].
1345
+
1346
+ - file_id: typing.Optional[str].
1347
+
1348
+ - only_direct_upload: typing.Optional[bool].
1220
1349
  ---
1221
1350
  from llama_cloud.client import LlamaCloud
1222
1351
 
@@ -1232,7 +1361,9 @@ class PipelinesClient:
1232
1361
  urllib.parse.urljoin(
1233
1362
  f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/documents"
1234
1363
  ),
1235
- params=remove_none_from_dict({"skip": skip, "limit": limit}),
1364
+ params=remove_none_from_dict(
1365
+ {"skip": skip, "limit": limit, "file_id": file_id, "only_direct_upload": only_direct_upload}
1366
+ ),
1236
1367
  headers=self._client_wrapper.get_headers(),
1237
1368
  timeout=60,
1238
1369
  )
@@ -1438,6 +1569,44 @@ class PipelinesClient:
1438
1569
  raise ApiError(status_code=_response.status_code, body=_response.text)
1439
1570
  raise ApiError(status_code=_response.status_code, body=_response_json)
1440
1571
 
1572
+ def list_pipeline_document_chunks(self, pipeline_id: str, document_id: str) -> typing.List[TextNode]:
1573
+ """
1574
+ Return a list of chunks for a pipeline document.
1575
+
1576
+ Parameters:
1577
+ - pipeline_id: str.
1578
+
1579
+ - document_id: str.
1580
+ ---
1581
+ from llama_cloud.client import LlamaCloud
1582
+
1583
+ client = LlamaCloud(
1584
+ token="YOUR_TOKEN",
1585
+ )
1586
+ client.pipelines.list_pipeline_document_chunks(
1587
+ pipeline_id="string",
1588
+ document_id="string",
1589
+ )
1590
+ """
1591
+ _response = self._client_wrapper.httpx_client.request(
1592
+ "GET",
1593
+ urllib.parse.urljoin(
1594
+ f"{self._client_wrapper.get_base_url()}/",
1595
+ f"api/v1/pipelines/{pipeline_id}/documents/{document_id}/chunks",
1596
+ ),
1597
+ headers=self._client_wrapper.get_headers(),
1598
+ timeout=60,
1599
+ )
1600
+ if 200 <= _response.status_code < 300:
1601
+ return pydantic.parse_obj_as(typing.List[TextNode], _response.json()) # type: ignore
1602
+ if _response.status_code == 422:
1603
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
1604
+ try:
1605
+ _response_json = _response.json()
1606
+ except JSONDecodeError:
1607
+ raise ApiError(status_code=_response.status_code, body=_response.text)
1608
+ raise ApiError(status_code=_response.status_code, body=_response_json)
1609
+
1441
1610
 
1442
1611
  class AsyncPipelinesClient:
1443
1612
  def __init__(self, *, client_wrapper: AsyncClientWrapper):
@@ -1446,19 +1615,22 @@ class AsyncPipelinesClient:
1446
1615
  async def search_pipelines(
1447
1616
  self,
1448
1617
  *,
1449
- project_name: str,
1618
+ project_name: typing.Optional[str] = None,
1450
1619
  pipeline_name: typing.Optional[str] = None,
1451
1620
  pipeline_type: typing.Optional[PipelineType] = None,
1621
+ project_id: typing.Optional[str] = None,
1452
1622
  ) -> typing.List[Pipeline]:
1453
1623
  """
1454
1624
  Search for pipelines by various parameters.
1455
1625
 
1456
1626
  Parameters:
1457
- - project_name: str.
1627
+ - project_name: typing.Optional[str].
1458
1628
 
1459
1629
  - pipeline_name: typing.Optional[str].
1460
1630
 
1461
1631
  - pipeline_type: typing.Optional[PipelineType].
1632
+
1633
+ - project_id: typing.Optional[str].
1462
1634
  ---
1463
1635
  from llama_cloud import PipelineType
1464
1636
  from llama_cloud.client import AsyncLlamaCloud
@@ -1467,7 +1639,6 @@ class AsyncPipelinesClient:
1467
1639
  token="YOUR_TOKEN",
1468
1640
  )
1469
1641
  await client.pipelines.search_pipelines(
1470
- project_name="string",
1471
1642
  pipeline_type=PipelineType.PLAYGROUND,
1472
1643
  )
1473
1644
  """
@@ -1475,7 +1646,12 @@ class AsyncPipelinesClient:
1475
1646
  "GET",
1476
1647
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/pipelines"),
1477
1648
  params=remove_none_from_dict(
1478
- {"project_name": project_name, "pipeline_name": pipeline_name, "pipeline_type": pipeline_type}
1649
+ {
1650
+ "project_name": project_name,
1651
+ "pipeline_name": pipeline_name,
1652
+ "pipeline_type": pipeline_type,
1653
+ "project_id": project_id,
1654
+ }
1479
1655
  ),
1480
1656
  headers=self._client_wrapper.get_headers(),
1481
1657
  timeout=60,
@@ -1509,6 +1685,7 @@ class AsyncPipelinesClient:
1509
1685
  PipelineCreate,
1510
1686
  PipelineType,
1511
1687
  PresetRetrievalParams,
1688
+ RetrievalMode,
1512
1689
  SupportedEvalLlmModelNames,
1513
1690
  )
1514
1691
  from llama_cloud.client import AsyncLlamaCloud
@@ -1527,6 +1704,7 @@ class AsyncPipelinesClient:
1527
1704
  filters=[],
1528
1705
  condition=FilterCondition.AND,
1529
1706
  ),
1707
+ retrieval_mode=RetrievalMode.CHUNKS,
1530
1708
  ),
1531
1709
  eval_parameters=EvalExecutionParams(
1532
1710
  llm_model=SupportedEvalLlmModelNames.GPT_3_5_TURBO,
@@ -1575,6 +1753,7 @@ class AsyncPipelinesClient:
1575
1753
  PipelineCreate,
1576
1754
  PipelineType,
1577
1755
  PresetRetrievalParams,
1756
+ RetrievalMode,
1578
1757
  SupportedEvalLlmModelNames,
1579
1758
  )
1580
1759
  from llama_cloud.client import AsyncLlamaCloud
@@ -1593,6 +1772,7 @@ class AsyncPipelinesClient:
1593
1772
  filters=[],
1594
1773
  condition=FilterCondition.AND,
1595
1774
  ),
1775
+ retrieval_mode=RetrievalMode.CHUNKS,
1596
1776
  ),
1597
1777
  eval_parameters=EvalExecutionParams(
1598
1778
  llm_model=SupportedEvalLlmModelNames.GPT_3_5_TURBO,
@@ -1662,7 +1842,6 @@ class AsyncPipelinesClient:
1662
1842
  data_sink: typing.Optional[DataSinkCreate] = OMIT,
1663
1843
  preset_retrieval_parameters: typing.Optional[PresetRetrievalParams] = OMIT,
1664
1844
  eval_parameters: typing.Optional[EvalExecutionParams] = OMIT,
1665
- llama_parse_enabled: typing.Optional[bool] = OMIT,
1666
1845
  llama_parse_parameters: typing.Optional[LlamaParseParameters] = OMIT,
1667
1846
  name: typing.Optional[str] = OMIT,
1668
1847
  managed_pipeline_id: typing.Optional[str] = OMIT,
@@ -1683,8 +1862,6 @@ class AsyncPipelinesClient:
1683
1862
 
1684
1863
  - eval_parameters: typing.Optional[EvalExecutionParams]. Eval parameters for the pipeline.
1685
1864
 
1686
- - llama_parse_enabled: typing.Optional[bool]. Whether to use LlamaParse during pipeline execution.
1687
-
1688
1865
  - llama_parse_parameters: typing.Optional[LlamaParseParameters]. Settings that can be configured for how to use LlamaParse to parse files within a LlamaCloud pipeline.
1689
1866
 
1690
1867
  - name: typing.Optional[str].
@@ -1699,6 +1876,7 @@ class AsyncPipelinesClient:
1699
1876
  LlamaParseParameters,
1700
1877
  MetadataFilters,
1701
1878
  PresetRetrievalParams,
1879
+ RetrievalMode,
1702
1880
  SupportedEvalLlmModelNames,
1703
1881
  )
1704
1882
  from llama_cloud.client import AsyncLlamaCloud
@@ -1717,6 +1895,7 @@ class AsyncPipelinesClient:
1717
1895
  filters=[],
1718
1896
  condition=FilterCondition.AND,
1719
1897
  ),
1898
+ retrieval_mode=RetrievalMode.CHUNKS,
1720
1899
  ),
1721
1900
  eval_parameters=EvalExecutionParams(
1722
1901
  llm_model=SupportedEvalLlmModelNames.GPT_3_5_TURBO,
@@ -1735,8 +1914,6 @@ class AsyncPipelinesClient:
1735
1914
  _request["preset_retrieval_parameters"] = preset_retrieval_parameters
1736
1915
  if eval_parameters is not OMIT:
1737
1916
  _request["eval_parameters"] = eval_parameters
1738
- if llama_parse_enabled is not OMIT:
1739
- _request["llama_parse_enabled"] = llama_parse_enabled
1740
1917
  if llama_parse_parameters is not OMIT:
1741
1918
  _request["llama_parse_parameters"] = llama_parse_parameters
1742
1919
  if name is not OMIT:
@@ -1856,6 +2033,38 @@ class AsyncPipelinesClient:
1856
2033
  raise ApiError(status_code=_response.status_code, body=_response.text)
1857
2034
  raise ApiError(status_code=_response.status_code, body=_response_json)
1858
2035
 
2036
+ async def copy_pipeline(self, pipeline_id: str) -> Pipeline:
2037
+ """
2038
+ Copy a pipeline by ID.
2039
+
2040
+ Parameters:
2041
+ - pipeline_id: str.
2042
+ ---
2043
+ from llama_cloud.client import AsyncLlamaCloud
2044
+
2045
+ client = AsyncLlamaCloud(
2046
+ token="YOUR_TOKEN",
2047
+ )
2048
+ await client.pipelines.copy_pipeline(
2049
+ pipeline_id="string",
2050
+ )
2051
+ """
2052
+ _response = await self._client_wrapper.httpx_client.request(
2053
+ "POST",
2054
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/copy"),
2055
+ headers=self._client_wrapper.get_headers(),
2056
+ timeout=60,
2057
+ )
2058
+ if 200 <= _response.status_code < 300:
2059
+ return pydantic.parse_obj_as(Pipeline, _response.json()) # type: ignore
2060
+ if _response.status_code == 422:
2061
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
2062
+ try:
2063
+ _response_json = _response.json()
2064
+ except JSONDecodeError:
2065
+ raise ApiError(status_code=_response.status_code, body=_response.text)
2066
+ raise ApiError(status_code=_response.status_code, body=_response_json)
2067
+
1859
2068
  async def get_eval_dataset_executions(
1860
2069
  self, pipeline_id: str, eval_dataset_id: str
1861
2070
  ) -> typing.List[EvalDatasetJobRecord]:
@@ -2465,6 +2674,8 @@ class AsyncPipelinesClient:
2465
2674
  rerank_top_n: typing.Optional[int] = OMIT,
2466
2675
  alpha: typing.Optional[float] = OMIT,
2467
2676
  search_filters: typing.Optional[MetadataFilters] = OMIT,
2677
+ files_top_k: typing.Optional[int] = OMIT,
2678
+ retrieval_mode: typing.Optional[RetrievalMode] = OMIT,
2468
2679
  query: str,
2469
2680
  ) -> RetrieveResults:
2470
2681
  """
@@ -2485,9 +2696,13 @@ class AsyncPipelinesClient:
2485
2696
 
2486
2697
  - search_filters: typing.Optional[MetadataFilters]. Search filters for retrieval.
2487
2698
 
2699
+ - files_top_k: typing.Optional[int]. Number of files to retrieve (only for retrieval mode files_via_metadata and files_via_content).
2700
+
2701
+ - retrieval_mode: typing.Optional[RetrievalMode]. The retrieval mode for the query.
2702
+
2488
2703
  - query: str. The query to retrieve against.
2489
2704
  ---
2490
- from llama_cloud import FilterCondition, MetadataFilters
2705
+ from llama_cloud import FilterCondition, MetadataFilters, RetrievalMode
2491
2706
  from llama_cloud.client import AsyncLlamaCloud
2492
2707
 
2493
2708
  client = AsyncLlamaCloud(
@@ -2499,6 +2714,7 @@ class AsyncPipelinesClient:
2499
2714
  filters=[],
2500
2715
  condition=FilterCondition.AND,
2501
2716
  ),
2717
+ retrieval_mode=RetrievalMode.CHUNKS,
2502
2718
  query="string",
2503
2719
  )
2504
2720
  """
@@ -2515,6 +2731,10 @@ class AsyncPipelinesClient:
2515
2731
  _request["alpha"] = alpha
2516
2732
  if search_filters is not OMIT:
2517
2733
  _request["search_filters"] = search_filters
2734
+ if files_top_k is not OMIT:
2735
+ _request["files_top_k"] = files_top_k
2736
+ if retrieval_mode is not OMIT:
2737
+ _request["retrieval_mode"] = retrieval_mode
2518
2738
  _response = await self._client_wrapper.httpx_client.request(
2519
2739
  "POST",
2520
2740
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/retrieve"),
@@ -2601,8 +2821,78 @@ class AsyncPipelinesClient:
2601
2821
  raise ApiError(status_code=_response.status_code, body=_response.text)
2602
2822
  raise ApiError(status_code=_response.status_code, body=_response_json)
2603
2823
 
2824
+ async def chat(
2825
+ self,
2826
+ pipeline_id: str,
2827
+ *,
2828
+ messages: typing.List[ChatMessage],
2829
+ data: ChatData,
2830
+ class_name: typing.Optional[str] = OMIT,
2831
+ ) -> typing.Any:
2832
+ """
2833
+ Parameters:
2834
+ - pipeline_id: str.
2835
+
2836
+ - messages: typing.List[ChatMessage].
2837
+
2838
+ - data: ChatData.
2839
+
2840
+ - class_name: typing.Optional[str].
2841
+ ---
2842
+ from llama_cloud import (
2843
+ ChatData,
2844
+ FilterCondition,
2845
+ MetadataFilters,
2846
+ PresetRetrievalParams,
2847
+ RetrievalMode,
2848
+ )
2849
+ from llama_cloud.client import AsyncLlamaCloud
2850
+
2851
+ client = AsyncLlamaCloud(
2852
+ token="YOUR_TOKEN",
2853
+ )
2854
+ await client.pipelines.chat(
2855
+ pipeline_id="string",
2856
+ messages=[],
2857
+ data=ChatData(
2858
+ retrieval_parameters=PresetRetrievalParams(
2859
+ search_filters=MetadataFilters(
2860
+ filters=[],
2861
+ condition=FilterCondition.AND,
2862
+ ),
2863
+ retrieval_mode=RetrievalMode.CHUNKS,
2864
+ ),
2865
+ ),
2866
+ )
2867
+ """
2868
+ _request: typing.Dict[str, typing.Any] = {"messages": messages, "data": data}
2869
+ if class_name is not OMIT:
2870
+ _request["class_name"] = class_name
2871
+ _response = await self._client_wrapper.httpx_client.request(
2872
+ "POST",
2873
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/chat"),
2874
+ json=jsonable_encoder(_request),
2875
+ headers=self._client_wrapper.get_headers(),
2876
+ timeout=60,
2877
+ )
2878
+ if 200 <= _response.status_code < 300:
2879
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
2880
+ if _response.status_code == 422:
2881
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
2882
+ try:
2883
+ _response_json = _response.json()
2884
+ except JSONDecodeError:
2885
+ raise ApiError(status_code=_response.status_code, body=_response.text)
2886
+ raise ApiError(status_code=_response.status_code, body=_response_json)
2887
+
2604
2888
  async def list_pipeline_documents(
2605
- self, pipeline_id: str, *, skip: typing.Optional[int] = None, limit: typing.Optional[int] = None
2889
+ self,
2890
+ pipeline_id: str,
2891
+ *,
2892
+ skip: typing.Optional[int] = None,
2893
+ limit: typing.Optional[int] = None,
2894
+ file_id: typing.Optional[str] = None,
2895
+ only_direct_upload: typing.Optional[bool] = None,
2606
2896
  ) -> typing.List[CloudDocument]:
2607
2897
  """
2608
2898
  Return a list of documents for a pipeline.
@@ -2613,6 +2903,10 @@ class AsyncPipelinesClient:
2613
2903
  - skip: typing.Optional[int].
2614
2904
 
2615
2905
  - limit: typing.Optional[int].
2906
+
2907
+ - file_id: typing.Optional[str].
2908
+
2909
+ - only_direct_upload: typing.Optional[bool].
2616
2910
  ---
2617
2911
  from llama_cloud.client import AsyncLlamaCloud
2618
2912
 
@@ -2628,7 +2922,9 @@ class AsyncPipelinesClient:
2628
2922
  urllib.parse.urljoin(
2629
2923
  f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/documents"
2630
2924
  ),
2631
- params=remove_none_from_dict({"skip": skip, "limit": limit}),
2925
+ params=remove_none_from_dict(
2926
+ {"skip": skip, "limit": limit, "file_id": file_id, "only_direct_upload": only_direct_upload}
2927
+ ),
2632
2928
  headers=self._client_wrapper.get_headers(),
2633
2929
  timeout=60,
2634
2930
  )
@@ -2833,3 +3129,41 @@ class AsyncPipelinesClient:
2833
3129
  except JSONDecodeError:
2834
3130
  raise ApiError(status_code=_response.status_code, body=_response.text)
2835
3131
  raise ApiError(status_code=_response.status_code, body=_response_json)
3132
+
3133
+ async def list_pipeline_document_chunks(self, pipeline_id: str, document_id: str) -> typing.List[TextNode]:
3134
+ """
3135
+ Return a list of chunks for a pipeline document.
3136
+
3137
+ Parameters:
3138
+ - pipeline_id: str.
3139
+
3140
+ - document_id: str.
3141
+ ---
3142
+ from llama_cloud.client import AsyncLlamaCloud
3143
+
3144
+ client = AsyncLlamaCloud(
3145
+ token="YOUR_TOKEN",
3146
+ )
3147
+ await client.pipelines.list_pipeline_document_chunks(
3148
+ pipeline_id="string",
3149
+ document_id="string",
3150
+ )
3151
+ """
3152
+ _response = await self._client_wrapper.httpx_client.request(
3153
+ "GET",
3154
+ urllib.parse.urljoin(
3155
+ f"{self._client_wrapper.get_base_url()}/",
3156
+ f"api/v1/pipelines/{pipeline_id}/documents/{document_id}/chunks",
3157
+ ),
3158
+ headers=self._client_wrapper.get_headers(),
3159
+ timeout=60,
3160
+ )
3161
+ if 200 <= _response.status_code < 300:
3162
+ return pydantic.parse_obj_as(typing.List[TextNode], _response.json()) # type: ignore
3163
+ if _response.status_code == 422:
3164
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
3165
+ try:
3166
+ _response_json = _response.json()
3167
+ except JSONDecodeError:
3168
+ raise ApiError(status_code=_response.status_code, body=_response.text)
3169
+ raise ApiError(status_code=_response.status_code, body=_response_json)