llama-cloud 0.0.5__py3-none-any.whl → 0.0.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

@@ -18,7 +18,8 @@ from ...types.eval_execution_params import EvalExecutionParams
18
18
  from ...types.eval_execution_params_override import EvalExecutionParamsOverride
19
19
  from ...types.eval_question_result import EvalQuestionResult
20
20
  from ...types.http_validation_error import HttpValidationError
21
- from ...types.managed_ingestion_status import ManagedIngestionStatus
21
+ from ...types.llama_parse_parameters import LlamaParseParameters
22
+ from ...types.managed_ingestion_status_response import ManagedIngestionStatusResponse
22
23
  from ...types.metadata_filters import MetadataFilters
23
24
  from ...types.pipeline import Pipeline
24
25
  from ...types.pipeline_create import PipelineCreate
@@ -27,7 +28,6 @@ from ...types.pipeline_data_source_create import PipelineDataSourceCreate
27
28
  from ...types.pipeline_deployment import PipelineDeployment
28
29
  from ...types.pipeline_file import PipelineFile
29
30
  from ...types.pipeline_file_create import PipelineFileCreate
30
- from ...types.pipeline_file_status_response import PipelineFileStatusResponse
31
31
  from ...types.pipeline_type import PipelineType
32
32
  from ...types.preset_retrieval_params import PresetRetrievalParams
33
33
  from ...types.retrieve_results import RetrieveResults
@@ -110,6 +110,7 @@ class PipelinesClient:
110
110
  DataSinkCreate,
111
111
  EvalExecutionParams,
112
112
  FilterCondition,
113
+ LlamaParseParameters,
113
114
  MetadataFilters,
114
115
  PipelineCreate,
115
116
  PipelineType,
@@ -136,6 +137,7 @@ class PipelinesClient:
136
137
  eval_parameters=EvalExecutionParams(
137
138
  llm_model=SupportedEvalLlmModelNames.GPT_3_5_TURBO,
138
139
  ),
140
+ llama_parse_parameters=LlamaParseParameters(),
139
141
  name="string",
140
142
  pipeline_type=PipelineType.PLAYGROUND,
141
143
  ),
@@ -174,6 +176,7 @@ class PipelinesClient:
174
176
  DataSinkCreate,
175
177
  EvalExecutionParams,
176
178
  FilterCondition,
179
+ LlamaParseParameters,
177
180
  MetadataFilters,
178
181
  PipelineCreate,
179
182
  PipelineType,
@@ -200,6 +203,7 @@ class PipelinesClient:
200
203
  eval_parameters=EvalExecutionParams(
201
204
  llm_model=SupportedEvalLlmModelNames.GPT_3_5_TURBO,
202
205
  ),
206
+ llama_parse_parameters=LlamaParseParameters(),
203
207
  name="string",
204
208
  pipeline_type=PipelineType.PLAYGROUND,
205
209
  ),
@@ -223,16 +227,12 @@ class PipelinesClient:
223
227
  raise ApiError(status_code=_response.status_code, body=_response.text)
224
228
  raise ApiError(status_code=_response.status_code, body=_response_json)
225
229
 
226
- def get_pipeline(
227
- self, pipeline_id: str, *, with_managed_ingestion_status: typing.Optional[bool] = None
228
- ) -> Pipeline:
230
+ def get_pipeline(self, pipeline_id: str) -> Pipeline:
229
231
  """
230
232
  Get a pipeline by ID for a given project.
231
233
 
232
234
  Parameters:
233
235
  - pipeline_id: str.
234
-
235
- - with_managed_ingestion_status: typing.Optional[bool].
236
236
  ---
237
237
  from llama_cloud.client import LlamaCloud
238
238
 
@@ -246,7 +246,6 @@ class PipelinesClient:
246
246
  _response = self._client_wrapper.httpx_client.request(
247
247
  "GET",
248
248
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}"),
249
- params=remove_none_from_dict({"with_managed_ingestion_status": with_managed_ingestion_status}),
250
249
  headers=self._client_wrapper.get_headers(),
251
250
  timeout=60,
252
251
  )
@@ -270,6 +269,7 @@ class PipelinesClient:
270
269
  preset_retrieval_parameters: typing.Optional[PresetRetrievalParams] = OMIT,
271
270
  eval_parameters: typing.Optional[EvalExecutionParams] = OMIT,
272
271
  llama_parse_enabled: typing.Optional[bool] = OMIT,
272
+ llama_parse_parameters: typing.Optional[LlamaParseParameters] = OMIT,
273
273
  name: typing.Optional[str] = OMIT,
274
274
  managed_pipeline_id: typing.Optional[str] = OMIT,
275
275
  ) -> Pipeline:
@@ -291,6 +291,8 @@ class PipelinesClient:
291
291
 
292
292
  - llama_parse_enabled: typing.Optional[bool]. Whether to use LlamaParse during pipeline execution.
293
293
 
294
+ - llama_parse_parameters: typing.Optional[LlamaParseParameters]. Settings that can be configured for how to use LlamaParse to parse files within a LlamaCloud pipeline.
295
+
294
296
  - name: typing.Optional[str].
295
297
 
296
298
  - managed_pipeline_id: typing.Optional[str]. The ID of the ManagedPipeline this playground pipeline is linked to.
@@ -300,6 +302,7 @@ class PipelinesClient:
300
302
  DataSinkCreate,
301
303
  EvalExecutionParams,
302
304
  FilterCondition,
305
+ LlamaParseParameters,
303
306
  MetadataFilters,
304
307
  PresetRetrievalParams,
305
308
  SupportedEvalLlmModelNames,
@@ -324,6 +327,7 @@ class PipelinesClient:
324
327
  eval_parameters=EvalExecutionParams(
325
328
  llm_model=SupportedEvalLlmModelNames.GPT_3_5_TURBO,
326
329
  ),
330
+ llama_parse_parameters=LlamaParseParameters(),
327
331
  )
328
332
  """
329
333
  _request: typing.Dict[str, typing.Any] = {}
@@ -339,6 +343,8 @@ class PipelinesClient:
339
343
  _request["eval_parameters"] = eval_parameters
340
344
  if llama_parse_enabled is not OMIT:
341
345
  _request["llama_parse_enabled"] = llama_parse_enabled
346
+ if llama_parse_parameters is not OMIT:
347
+ _request["llama_parse_parameters"] = llama_parse_parameters
342
348
  if name is not OMIT:
343
349
  _request["name"] = name
344
350
  if managed_pipeline_id is not OMIT:
@@ -392,6 +398,38 @@ class PipelinesClient:
392
398
  raise ApiError(status_code=_response.status_code, body=_response.text)
393
399
  raise ApiError(status_code=_response.status_code, body=_response_json)
394
400
 
401
+ def get_pipeline_status(self, pipeline_id: str) -> ManagedIngestionStatusResponse:
402
+ """
403
+ Get the status of a pipeline by ID.
404
+
405
+ Parameters:
406
+ - pipeline_id: str.
407
+ ---
408
+ from llama_cloud.client import LlamaCloud
409
+
410
+ client = LlamaCloud(
411
+ token="YOUR_TOKEN",
412
+ )
413
+ client.pipelines.get_pipeline_status(
414
+ pipeline_id="string",
415
+ )
416
+ """
417
+ _response = self._client_wrapper.httpx_client.request(
418
+ "GET",
419
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/status"),
420
+ headers=self._client_wrapper.get_headers(),
421
+ timeout=60,
422
+ )
423
+ if 200 <= _response.status_code < 300:
424
+ return pydantic.parse_obj_as(ManagedIngestionStatusResponse, _response.json()) # type: ignore
425
+ if _response.status_code == 422:
426
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
427
+ try:
428
+ _response_json = _response.json()
429
+ except JSONDecodeError:
430
+ raise ApiError(status_code=_response.status_code, body=_response.text)
431
+ raise ApiError(status_code=_response.status_code, body=_response_json)
432
+
395
433
  def sync_pipeline(self, pipeline_id: str) -> Pipeline:
396
434
  """
397
435
  Run ingestion for the pipeline by incrementally updating the data-sink with upstream changes from data-sources & files.
@@ -606,7 +644,7 @@ class PipelinesClient:
606
644
  raise ApiError(status_code=_response.status_code, body=_response.text)
607
645
  raise ApiError(status_code=_response.status_code, body=_response_json)
608
646
 
609
- def get_files_for_pipeline(self, pipeline_id: str) -> typing.List[PipelineFile]:
647
+ def list_pipeline_files(self, pipeline_id: str) -> typing.List[PipelineFile]:
610
648
  """
611
649
  Get files for a pipeline.
612
650
 
@@ -618,7 +656,7 @@ class PipelinesClient:
618
656
  client = LlamaCloud(
619
657
  token="YOUR_TOKEN",
620
658
  )
621
- client.pipelines.get_files_for_pipeline(
659
+ client.pipelines.list_pipeline_files(
622
660
  pipeline_id="string",
623
661
  )
624
662
  """
@@ -676,7 +714,7 @@ class PipelinesClient:
676
714
  raise ApiError(status_code=_response.status_code, body=_response.text)
677
715
  raise ApiError(status_code=_response.status_code, body=_response_json)
678
716
 
679
- def get_pipeline_file_status(self, pipeline_id: str, file_id: str) -> PipelineFileStatusResponse:
717
+ def get_pipeline_file_status(self, pipeline_id: str, file_id: str) -> ManagedIngestionStatusResponse:
680
718
  """
681
719
  Get status of a file for a pipeline.
682
720
 
@@ -704,7 +742,7 @@ class PipelinesClient:
704
742
  timeout=60,
705
743
  )
706
744
  if 200 <= _response.status_code < 300:
707
- return pydantic.parse_obj_as(PipelineFileStatusResponse, _response.json()) # type: ignore
745
+ return pydantic.parse_obj_as(ManagedIngestionStatusResponse, _response.json()) # type: ignore
708
746
  if _response.status_code == 422:
709
747
  raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
710
748
  try:
@@ -799,7 +837,7 @@ class PipelinesClient:
799
837
  raise ApiError(status_code=_response.status_code, body=_response.text)
800
838
  raise ApiError(status_code=_response.status_code, body=_response_json)
801
839
 
802
- def get_pipeline_data_sources(self, pipeline_id: str) -> typing.List[PipelineDataSource]:
840
+ def list_pipeline_data_sources(self, pipeline_id: str) -> typing.List[PipelineDataSource]:
803
841
  """
804
842
  Get data sources for a pipeline.
805
843
 
@@ -811,7 +849,7 @@ class PipelinesClient:
811
849
  client = LlamaCloud(
812
850
  token="YOUR_TOKEN",
813
851
  )
814
- client.pipelines.get_pipeline_data_sources(
852
+ client.pipelines.list_pipeline_data_sources(
815
853
  pipeline_id="string",
816
854
  )
817
855
  """
@@ -1026,7 +1064,7 @@ class PipelinesClient:
1026
1064
  raise ApiError(status_code=_response.status_code, body=_response.text)
1027
1065
  raise ApiError(status_code=_response.status_code, body=_response_json)
1028
1066
 
1029
- def get_pipeline_jobs(self, pipeline_id: str) -> typing.List[PipelineDeployment]:
1067
+ def list_pipeline_jobs(self, pipeline_id: str) -> typing.List[PipelineDeployment]:
1030
1068
  """
1031
1069
  Get jobs for a pipeline.
1032
1070
 
@@ -1038,7 +1076,7 @@ class PipelinesClient:
1038
1076
  client = LlamaCloud(
1039
1077
  token="YOUR_TOKEN",
1040
1078
  )
1041
- client.pipelines.get_pipeline_jobs(
1079
+ client.pipelines.list_pipeline_jobs(
1042
1080
  pipeline_id="string",
1043
1081
  )
1044
1082
  """
@@ -1290,7 +1328,7 @@ class PipelinesClient:
1290
1328
  raise ApiError(status_code=_response.status_code, body=_response.text)
1291
1329
  raise ApiError(status_code=_response.status_code, body=_response_json)
1292
1330
 
1293
- def get_pipeline_document_status(self, pipeline_id: str, document_id: str) -> ManagedIngestionStatus:
1331
+ def get_pipeline_document_status(self, pipeline_id: str, document_id: str) -> ManagedIngestionStatusResponse:
1294
1332
  """
1295
1333
  Return a single document for a pipeline.
1296
1334
 
@@ -1319,7 +1357,7 @@ class PipelinesClient:
1319
1357
  timeout=60,
1320
1358
  )
1321
1359
  if 200 <= _response.status_code < 300:
1322
- return pydantic.parse_obj_as(ManagedIngestionStatus, _response.json()) # type: ignore
1360
+ return pydantic.parse_obj_as(ManagedIngestionStatusResponse, _response.json()) # type: ignore
1323
1361
  if _response.status_code == 422:
1324
1362
  raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
1325
1363
  try:
@@ -1394,6 +1432,7 @@ class AsyncPipelinesClient:
1394
1432
  DataSinkCreate,
1395
1433
  EvalExecutionParams,
1396
1434
  FilterCondition,
1435
+ LlamaParseParameters,
1397
1436
  MetadataFilters,
1398
1437
  PipelineCreate,
1399
1438
  PipelineType,
@@ -1420,6 +1459,7 @@ class AsyncPipelinesClient:
1420
1459
  eval_parameters=EvalExecutionParams(
1421
1460
  llm_model=SupportedEvalLlmModelNames.GPT_3_5_TURBO,
1422
1461
  ),
1462
+ llama_parse_parameters=LlamaParseParameters(),
1423
1463
  name="string",
1424
1464
  pipeline_type=PipelineType.PLAYGROUND,
1425
1465
  ),
@@ -1458,6 +1498,7 @@ class AsyncPipelinesClient:
1458
1498
  DataSinkCreate,
1459
1499
  EvalExecutionParams,
1460
1500
  FilterCondition,
1501
+ LlamaParseParameters,
1461
1502
  MetadataFilters,
1462
1503
  PipelineCreate,
1463
1504
  PipelineType,
@@ -1484,6 +1525,7 @@ class AsyncPipelinesClient:
1484
1525
  eval_parameters=EvalExecutionParams(
1485
1526
  llm_model=SupportedEvalLlmModelNames.GPT_3_5_TURBO,
1486
1527
  ),
1528
+ llama_parse_parameters=LlamaParseParameters(),
1487
1529
  name="string",
1488
1530
  pipeline_type=PipelineType.PLAYGROUND,
1489
1531
  ),
@@ -1507,16 +1549,12 @@ class AsyncPipelinesClient:
1507
1549
  raise ApiError(status_code=_response.status_code, body=_response.text)
1508
1550
  raise ApiError(status_code=_response.status_code, body=_response_json)
1509
1551
 
1510
- async def get_pipeline(
1511
- self, pipeline_id: str, *, with_managed_ingestion_status: typing.Optional[bool] = None
1512
- ) -> Pipeline:
1552
+ async def get_pipeline(self, pipeline_id: str) -> Pipeline:
1513
1553
  """
1514
1554
  Get a pipeline by ID for a given project.
1515
1555
 
1516
1556
  Parameters:
1517
1557
  - pipeline_id: str.
1518
-
1519
- - with_managed_ingestion_status: typing.Optional[bool].
1520
1558
  ---
1521
1559
  from llama_cloud.client import AsyncLlamaCloud
1522
1560
 
@@ -1530,7 +1568,6 @@ class AsyncPipelinesClient:
1530
1568
  _response = await self._client_wrapper.httpx_client.request(
1531
1569
  "GET",
1532
1570
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}"),
1533
- params=remove_none_from_dict({"with_managed_ingestion_status": with_managed_ingestion_status}),
1534
1571
  headers=self._client_wrapper.get_headers(),
1535
1572
  timeout=60,
1536
1573
  )
@@ -1554,6 +1591,7 @@ class AsyncPipelinesClient:
1554
1591
  preset_retrieval_parameters: typing.Optional[PresetRetrievalParams] = OMIT,
1555
1592
  eval_parameters: typing.Optional[EvalExecutionParams] = OMIT,
1556
1593
  llama_parse_enabled: typing.Optional[bool] = OMIT,
1594
+ llama_parse_parameters: typing.Optional[LlamaParseParameters] = OMIT,
1557
1595
  name: typing.Optional[str] = OMIT,
1558
1596
  managed_pipeline_id: typing.Optional[str] = OMIT,
1559
1597
  ) -> Pipeline:
@@ -1575,6 +1613,8 @@ class AsyncPipelinesClient:
1575
1613
 
1576
1614
  - llama_parse_enabled: typing.Optional[bool]. Whether to use LlamaParse during pipeline execution.
1577
1615
 
1616
+ - llama_parse_parameters: typing.Optional[LlamaParseParameters]. Settings that can be configured for how to use LlamaParse to parse files within a LlamaCloud pipeline.
1617
+
1578
1618
  - name: typing.Optional[str].
1579
1619
 
1580
1620
  - managed_pipeline_id: typing.Optional[str]. The ID of the ManagedPipeline this playground pipeline is linked to.
@@ -1584,6 +1624,7 @@ class AsyncPipelinesClient:
1584
1624
  DataSinkCreate,
1585
1625
  EvalExecutionParams,
1586
1626
  FilterCondition,
1627
+ LlamaParseParameters,
1587
1628
  MetadataFilters,
1588
1629
  PresetRetrievalParams,
1589
1630
  SupportedEvalLlmModelNames,
@@ -1608,6 +1649,7 @@ class AsyncPipelinesClient:
1608
1649
  eval_parameters=EvalExecutionParams(
1609
1650
  llm_model=SupportedEvalLlmModelNames.GPT_3_5_TURBO,
1610
1651
  ),
1652
+ llama_parse_parameters=LlamaParseParameters(),
1611
1653
  )
1612
1654
  """
1613
1655
  _request: typing.Dict[str, typing.Any] = {}
@@ -1623,6 +1665,8 @@ class AsyncPipelinesClient:
1623
1665
  _request["eval_parameters"] = eval_parameters
1624
1666
  if llama_parse_enabled is not OMIT:
1625
1667
  _request["llama_parse_enabled"] = llama_parse_enabled
1668
+ if llama_parse_parameters is not OMIT:
1669
+ _request["llama_parse_parameters"] = llama_parse_parameters
1626
1670
  if name is not OMIT:
1627
1671
  _request["name"] = name
1628
1672
  if managed_pipeline_id is not OMIT:
@@ -1676,6 +1720,38 @@ class AsyncPipelinesClient:
1676
1720
  raise ApiError(status_code=_response.status_code, body=_response.text)
1677
1721
  raise ApiError(status_code=_response.status_code, body=_response_json)
1678
1722
 
1723
+ async def get_pipeline_status(self, pipeline_id: str) -> ManagedIngestionStatusResponse:
1724
+ """
1725
+ Get the status of a pipeline by ID.
1726
+
1727
+ Parameters:
1728
+ - pipeline_id: str.
1729
+ ---
1730
+ from llama_cloud.client import AsyncLlamaCloud
1731
+
1732
+ client = AsyncLlamaCloud(
1733
+ token="YOUR_TOKEN",
1734
+ )
1735
+ await client.pipelines.get_pipeline_status(
1736
+ pipeline_id="string",
1737
+ )
1738
+ """
1739
+ _response = await self._client_wrapper.httpx_client.request(
1740
+ "GET",
1741
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/status"),
1742
+ headers=self._client_wrapper.get_headers(),
1743
+ timeout=60,
1744
+ )
1745
+ if 200 <= _response.status_code < 300:
1746
+ return pydantic.parse_obj_as(ManagedIngestionStatusResponse, _response.json()) # type: ignore
1747
+ if _response.status_code == 422:
1748
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
1749
+ try:
1750
+ _response_json = _response.json()
1751
+ except JSONDecodeError:
1752
+ raise ApiError(status_code=_response.status_code, body=_response.text)
1753
+ raise ApiError(status_code=_response.status_code, body=_response_json)
1754
+
1679
1755
  async def sync_pipeline(self, pipeline_id: str) -> Pipeline:
1680
1756
  """
1681
1757
  Run ingestion for the pipeline by incrementally updating the data-sink with upstream changes from data-sources & files.
@@ -1892,7 +1968,7 @@ class AsyncPipelinesClient:
1892
1968
  raise ApiError(status_code=_response.status_code, body=_response.text)
1893
1969
  raise ApiError(status_code=_response.status_code, body=_response_json)
1894
1970
 
1895
- async def get_files_for_pipeline(self, pipeline_id: str) -> typing.List[PipelineFile]:
1971
+ async def list_pipeline_files(self, pipeline_id: str) -> typing.List[PipelineFile]:
1896
1972
  """
1897
1973
  Get files for a pipeline.
1898
1974
 
@@ -1904,7 +1980,7 @@ class AsyncPipelinesClient:
1904
1980
  client = AsyncLlamaCloud(
1905
1981
  token="YOUR_TOKEN",
1906
1982
  )
1907
- await client.pipelines.get_files_for_pipeline(
1983
+ await client.pipelines.list_pipeline_files(
1908
1984
  pipeline_id="string",
1909
1985
  )
1910
1986
  """
@@ -1962,7 +2038,7 @@ class AsyncPipelinesClient:
1962
2038
  raise ApiError(status_code=_response.status_code, body=_response.text)
1963
2039
  raise ApiError(status_code=_response.status_code, body=_response_json)
1964
2040
 
1965
- async def get_pipeline_file_status(self, pipeline_id: str, file_id: str) -> PipelineFileStatusResponse:
2041
+ async def get_pipeline_file_status(self, pipeline_id: str, file_id: str) -> ManagedIngestionStatusResponse:
1966
2042
  """
1967
2043
  Get status of a file for a pipeline.
1968
2044
 
@@ -1990,7 +2066,7 @@ class AsyncPipelinesClient:
1990
2066
  timeout=60,
1991
2067
  )
1992
2068
  if 200 <= _response.status_code < 300:
1993
- return pydantic.parse_obj_as(PipelineFileStatusResponse, _response.json()) # type: ignore
2069
+ return pydantic.parse_obj_as(ManagedIngestionStatusResponse, _response.json()) # type: ignore
1994
2070
  if _response.status_code == 422:
1995
2071
  raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
1996
2072
  try:
@@ -2085,7 +2161,7 @@ class AsyncPipelinesClient:
2085
2161
  raise ApiError(status_code=_response.status_code, body=_response.text)
2086
2162
  raise ApiError(status_code=_response.status_code, body=_response_json)
2087
2163
 
2088
- async def get_pipeline_data_sources(self, pipeline_id: str) -> typing.List[PipelineDataSource]:
2164
+ async def list_pipeline_data_sources(self, pipeline_id: str) -> typing.List[PipelineDataSource]:
2089
2165
  """
2090
2166
  Get data sources for a pipeline.
2091
2167
 
@@ -2097,7 +2173,7 @@ class AsyncPipelinesClient:
2097
2173
  client = AsyncLlamaCloud(
2098
2174
  token="YOUR_TOKEN",
2099
2175
  )
2100
- await client.pipelines.get_pipeline_data_sources(
2176
+ await client.pipelines.list_pipeline_data_sources(
2101
2177
  pipeline_id="string",
2102
2178
  )
2103
2179
  """
@@ -2312,7 +2388,7 @@ class AsyncPipelinesClient:
2312
2388
  raise ApiError(status_code=_response.status_code, body=_response.text)
2313
2389
  raise ApiError(status_code=_response.status_code, body=_response_json)
2314
2390
 
2315
- async def get_pipeline_jobs(self, pipeline_id: str) -> typing.List[PipelineDeployment]:
2391
+ async def list_pipeline_jobs(self, pipeline_id: str) -> typing.List[PipelineDeployment]:
2316
2392
  """
2317
2393
  Get jobs for a pipeline.
2318
2394
 
@@ -2324,7 +2400,7 @@ class AsyncPipelinesClient:
2324
2400
  client = AsyncLlamaCloud(
2325
2401
  token="YOUR_TOKEN",
2326
2402
  )
2327
- await client.pipelines.get_pipeline_jobs(
2403
+ await client.pipelines.list_pipeline_jobs(
2328
2404
  pipeline_id="string",
2329
2405
  )
2330
2406
  """
@@ -2576,7 +2652,7 @@ class AsyncPipelinesClient:
2576
2652
  raise ApiError(status_code=_response.status_code, body=_response.text)
2577
2653
  raise ApiError(status_code=_response.status_code, body=_response_json)
2578
2654
 
2579
- async def get_pipeline_document_status(self, pipeline_id: str, document_id: str) -> ManagedIngestionStatus:
2655
+ async def get_pipeline_document_status(self, pipeline_id: str, document_id: str) -> ManagedIngestionStatusResponse:
2580
2656
  """
2581
2657
  Return a single document for a pipeline.
2582
2658
 
@@ -2605,7 +2681,7 @@ class AsyncPipelinesClient:
2605
2681
  timeout=60,
2606
2682
  )
2607
2683
  if 200 <= _response.status_code < 300:
2608
- return pydantic.parse_obj_as(ManagedIngestionStatus, _response.json()) # type: ignore
2684
+ return pydantic.parse_obj_as(ManagedIngestionStatusResponse, _response.json()) # type: ignore
2609
2685
  if _response.status_code == 422:
2610
2686
  raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
2611
2687
  try:
@@ -238,9 +238,9 @@ class ProjectsClient:
238
238
  raise ApiError(status_code=_response.status_code, body=_response.text)
239
239
  raise ApiError(status_code=_response.status_code, body=_response_json)
240
240
 
241
- def get_datasets_for_project(self, project_id: str) -> typing.List[EvalDataset]:
241
+ def list_datasets_for_project(self, project_id: str) -> typing.List[EvalDataset]:
242
242
  """
243
- Get all eval datasets for a project.
243
+ List eval datasets for a project.
244
244
 
245
245
  Parameters:
246
246
  - project_id: str.
@@ -250,7 +250,7 @@ class ProjectsClient:
250
250
  client = LlamaCloud(
251
251
  token="YOUR_TOKEN",
252
252
  )
253
- client.projects.get_datasets_for_project(
253
+ client.projects.list_datasets_for_project(
254
254
  project_id="string",
255
255
  )
256
256
  """
@@ -353,9 +353,9 @@ class ProjectsClient:
353
353
  raise ApiError(status_code=_response.status_code, body=_response.text)
354
354
  raise ApiError(status_code=_response.status_code, body=_response_json)
355
355
 
356
- def get_local_evals_for_project(self, project_id: str) -> typing.List[LocalEvalResults]:
356
+ def list_local_evals_for_project(self, project_id: str) -> typing.List[LocalEvalResults]:
357
357
  """
358
- Get all local eval results for a project.
358
+ List local eval results for a project.
359
359
 
360
360
  Parameters:
361
361
  - project_id: str.
@@ -365,7 +365,7 @@ class ProjectsClient:
365
365
  client = LlamaCloud(
366
366
  token="YOUR_TOKEN",
367
367
  )
368
- client.projects.get_local_evals_for_project(
368
+ client.projects.list_local_evals_for_project(
369
369
  project_id="string",
370
370
  )
371
371
  """
@@ -385,9 +385,9 @@ class ProjectsClient:
385
385
  raise ApiError(status_code=_response.status_code, body=_response.text)
386
386
  raise ApiError(status_code=_response.status_code, body=_response_json)
387
387
 
388
- def get_local_eval_sets_for_project(self, project_id: str) -> typing.List[LocalEvalSets]:
388
+ def list_local_eval_sets_for_project(self, project_id: str) -> typing.List[LocalEvalSets]:
389
389
  """
390
- Get all local eval sets for a project.
390
+ List local eval sets for a project.
391
391
 
392
392
  Parameters:
393
393
  - project_id: str.
@@ -397,7 +397,7 @@ class ProjectsClient:
397
397
  client = LlamaCloud(
398
398
  token="YOUR_TOKEN",
399
399
  )
400
- client.projects.get_local_eval_sets_for_project(
400
+ client.projects.list_local_eval_sets_for_project(
401
401
  project_id="string",
402
402
  )
403
403
  """
@@ -457,9 +457,9 @@ class ProjectsClient:
457
457
  raise ApiError(status_code=_response.status_code, body=_response.text)
458
458
  raise ApiError(status_code=_response.status_code, body=_response_json)
459
459
 
460
- def get_promptmixin_prompts(self, project_id: str) -> typing.List[PromptMixinPrompts]:
460
+ def list_promptmixin_prompts(self, project_id: str) -> typing.List[PromptMixinPrompts]:
461
461
  """
462
- Get all PromptMixin prompt sets for a project.
462
+ List PromptMixin prompt sets for a project.
463
463
 
464
464
  Parameters:
465
465
  - project_id: str.
@@ -469,7 +469,7 @@ class ProjectsClient:
469
469
  client = LlamaCloud(
470
470
  token="YOUR_TOKEN",
471
471
  )
472
- client.projects.get_promptmixin_prompts(
472
+ client.projects.list_promptmixin_prompts(
473
473
  project_id="string",
474
474
  )
475
475
  """
@@ -824,9 +824,9 @@ class AsyncProjectsClient:
824
824
  raise ApiError(status_code=_response.status_code, body=_response.text)
825
825
  raise ApiError(status_code=_response.status_code, body=_response_json)
826
826
 
827
- async def get_datasets_for_project(self, project_id: str) -> typing.List[EvalDataset]:
827
+ async def list_datasets_for_project(self, project_id: str) -> typing.List[EvalDataset]:
828
828
  """
829
- Get all eval datasets for a project.
829
+ List eval datasets for a project.
830
830
 
831
831
  Parameters:
832
832
  - project_id: str.
@@ -836,7 +836,7 @@ class AsyncProjectsClient:
836
836
  client = AsyncLlamaCloud(
837
837
  token="YOUR_TOKEN",
838
838
  )
839
- await client.projects.get_datasets_for_project(
839
+ await client.projects.list_datasets_for_project(
840
840
  project_id="string",
841
841
  )
842
842
  """
@@ -939,9 +939,9 @@ class AsyncProjectsClient:
939
939
  raise ApiError(status_code=_response.status_code, body=_response.text)
940
940
  raise ApiError(status_code=_response.status_code, body=_response_json)
941
941
 
942
- async def get_local_evals_for_project(self, project_id: str) -> typing.List[LocalEvalResults]:
942
+ async def list_local_evals_for_project(self, project_id: str) -> typing.List[LocalEvalResults]:
943
943
  """
944
- Get all local eval results for a project.
944
+ List local eval results for a project.
945
945
 
946
946
  Parameters:
947
947
  - project_id: str.
@@ -951,7 +951,7 @@ class AsyncProjectsClient:
951
951
  client = AsyncLlamaCloud(
952
952
  token="YOUR_TOKEN",
953
953
  )
954
- await client.projects.get_local_evals_for_project(
954
+ await client.projects.list_local_evals_for_project(
955
955
  project_id="string",
956
956
  )
957
957
  """
@@ -971,9 +971,9 @@ class AsyncProjectsClient:
971
971
  raise ApiError(status_code=_response.status_code, body=_response.text)
972
972
  raise ApiError(status_code=_response.status_code, body=_response_json)
973
973
 
974
- async def get_local_eval_sets_for_project(self, project_id: str) -> typing.List[LocalEvalSets]:
974
+ async def list_local_eval_sets_for_project(self, project_id: str) -> typing.List[LocalEvalSets]:
975
975
  """
976
- Get all local eval sets for a project.
976
+ List local eval sets for a project.
977
977
 
978
978
  Parameters:
979
979
  - project_id: str.
@@ -983,7 +983,7 @@ class AsyncProjectsClient:
983
983
  client = AsyncLlamaCloud(
984
984
  token="YOUR_TOKEN",
985
985
  )
986
- await client.projects.get_local_eval_sets_for_project(
986
+ await client.projects.list_local_eval_sets_for_project(
987
987
  project_id="string",
988
988
  )
989
989
  """
@@ -1043,9 +1043,9 @@ class AsyncProjectsClient:
1043
1043
  raise ApiError(status_code=_response.status_code, body=_response.text)
1044
1044
  raise ApiError(status_code=_response.status_code, body=_response_json)
1045
1045
 
1046
- async def get_promptmixin_prompts(self, project_id: str) -> typing.List[PromptMixinPrompts]:
1046
+ async def list_promptmixin_prompts(self, project_id: str) -> typing.List[PromptMixinPrompts]:
1047
1047
  """
1048
- Get all PromptMixin prompt sets for a project.
1048
+ List PromptMixin prompt sets for a project.
1049
1049
 
1050
1050
  Parameters:
1051
1051
  - project_id: str.
@@ -1055,7 +1055,7 @@ class AsyncProjectsClient:
1055
1055
  client = AsyncLlamaCloud(
1056
1056
  token="YOUR_TOKEN",
1057
1057
  )
1058
- await client.projects.get_promptmixin_prompts(
1058
+ await client.projects.list_promptmixin_prompts(
1059
1059
  project_id="string",
1060
1060
  )
1061
1061
  """
@@ -62,12 +62,14 @@ from .http_validation_error import HttpValidationError
62
62
  from .hugging_face_inference_api_embedding import HuggingFaceInferenceApiEmbedding
63
63
  from .hugging_face_inference_api_embedding_token import HuggingFaceInferenceApiEmbeddingToken
64
64
  from .json_node_parser import JsonNodeParser
65
+ from .llama_parse_parameters import LlamaParseParameters
65
66
  from .llama_parse_supported_file_extensions import LlamaParseSupportedFileExtensions
66
67
  from .llm import Llm
67
68
  from .local_eval import LocalEval
68
69
  from .local_eval_results import LocalEvalResults
69
70
  from .local_eval_sets import LocalEvalSets
70
71
  from .managed_ingestion_status import ManagedIngestionStatus
72
+ from .managed_ingestion_status_response import ManagedIngestionStatusResponse
71
73
  from .markdown_element_node_parser import MarkdownElementNodeParser
72
74
  from .markdown_node_parser import MarkdownNodeParser
73
75
  from .message_role import MessageRole
@@ -99,7 +101,6 @@ from .pipeline_file_create import PipelineFileCreate
99
101
  from .pipeline_file_create_custom_metadata_value import PipelineFileCreateCustomMetadataValue
100
102
  from .pipeline_file_custom_metadata_value import PipelineFileCustomMetadataValue
101
103
  from .pipeline_file_resource_info_value import PipelineFileResourceInfoValue
102
- from .pipeline_file_status_response import PipelineFileStatusResponse
103
104
  from .pipeline_type import PipelineType
104
105
  from .pooling import Pooling
105
106
  from .preset_retrieval_params import PresetRetrievalParams
@@ -187,12 +188,14 @@ __all__ = [
187
188
  "HuggingFaceInferenceApiEmbedding",
188
189
  "HuggingFaceInferenceApiEmbeddingToken",
189
190
  "JsonNodeParser",
191
+ "LlamaParseParameters",
190
192
  "LlamaParseSupportedFileExtensions",
191
193
  "Llm",
192
194
  "LocalEval",
193
195
  "LocalEvalResults",
194
196
  "LocalEvalSets",
195
197
  "ManagedIngestionStatus",
198
+ "ManagedIngestionStatusResponse",
196
199
  "MarkdownElementNodeParser",
197
200
  "MarkdownNodeParser",
198
201
  "MessageRole",
@@ -224,7 +227,6 @@ __all__ = [
224
227
  "PipelineFileCreateCustomMetadataValue",
225
228
  "PipelineFileCustomMetadataValue",
226
229
  "PipelineFileResourceInfoValue",
227
- "PipelineFileStatusResponse",
228
230
  "PipelineType",
229
231
  "Pooling",
230
232
  "PresetRetrievalParams",