llama-cloud 0.1.5__py3-none-any.whl → 0.1.7a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (129) hide show
  1. llama_cloud/__init__.py +138 -2
  2. llama_cloud/client.py +15 -0
  3. llama_cloud/resources/__init__.py +17 -1
  4. llama_cloud/resources/chat_apps/__init__.py +2 -0
  5. llama_cloud/resources/chat_apps/client.py +620 -0
  6. llama_cloud/resources/data_sinks/client.py +2 -2
  7. llama_cloud/resources/data_sources/client.py +2 -2
  8. llama_cloud/resources/embedding_model_configs/client.py +4 -4
  9. llama_cloud/resources/files/__init__.py +2 -2
  10. llama_cloud/resources/files/client.py +21 -0
  11. llama_cloud/resources/files/types/__init__.py +2 -1
  12. llama_cloud/resources/files/types/file_create_permission_info_value.py +7 -0
  13. llama_cloud/resources/jobs/__init__.py +2 -0
  14. llama_cloud/resources/jobs/client.py +148 -0
  15. llama_cloud/resources/llama_extract/__init__.py +5 -0
  16. llama_cloud/resources/llama_extract/client.py +1038 -0
  17. llama_cloud/resources/llama_extract/types/__init__.py +6 -0
  18. llama_cloud/resources/llama_extract/types/extract_agent_create_data_schema_value.py +7 -0
  19. llama_cloud/resources/llama_extract/types/extract_agent_update_data_schema_value.py +7 -0
  20. llama_cloud/resources/organizations/client.py +14 -14
  21. llama_cloud/resources/parsing/client.py +480 -229
  22. llama_cloud/resources/pipelines/client.py +182 -126
  23. llama_cloud/resources/projects/client.py +210 -102
  24. llama_cloud/resources/reports/__init__.py +5 -0
  25. llama_cloud/resources/reports/client.py +1198 -0
  26. llama_cloud/resources/reports/types/__init__.py +7 -0
  27. llama_cloud/resources/reports/types/update_report_plan_api_v_1_reports_report_id_plan_patch_request_action.py +25 -0
  28. llama_cloud/resources/retrievers/__init__.py +2 -0
  29. llama_cloud/resources/retrievers/client.py +654 -0
  30. llama_cloud/types/__init__.py +124 -2
  31. llama_cloud/types/{chat_message.py → app_schema_chat_chat_message.py} +2 -2
  32. llama_cloud/types/chat_app.py +44 -0
  33. llama_cloud/types/chat_app_response.py +41 -0
  34. llama_cloud/types/cloud_az_storage_blob_data_source.py +1 -0
  35. llama_cloud/types/cloud_box_data_source.py +1 -0
  36. llama_cloud/types/cloud_confluence_data_source.py +1 -0
  37. llama_cloud/types/cloud_google_drive_data_source.py +1 -0
  38. llama_cloud/types/cloud_jira_data_source.py +1 -0
  39. llama_cloud/types/cloud_notion_page_data_source.py +1 -0
  40. llama_cloud/types/cloud_one_drive_data_source.py +1 -0
  41. llama_cloud/types/cloud_postgres_vector_store.py +1 -0
  42. llama_cloud/types/cloud_s_3_data_source.py +1 -0
  43. llama_cloud/types/cloud_sharepoint_data_source.py +1 -0
  44. llama_cloud/types/cloud_slack_data_source.py +1 -0
  45. llama_cloud/types/composite_retrieval_mode.py +21 -0
  46. llama_cloud/types/composite_retrieval_result.py +38 -0
  47. llama_cloud/types/composite_retrieved_text_node.py +42 -0
  48. llama_cloud/types/data_sink.py +1 -1
  49. llama_cloud/types/data_sink_create.py +1 -1
  50. llama_cloud/types/data_source.py +1 -1
  51. llama_cloud/types/data_source_create.py +1 -1
  52. llama_cloud/types/edit_suggestion.py +39 -0
  53. llama_cloud/types/eval_dataset_job_record.py +1 -0
  54. llama_cloud/types/extract_agent.py +45 -0
  55. llama_cloud/types/extract_agent_data_schema_value.py +5 -0
  56. llama_cloud/types/extract_config.py +40 -0
  57. llama_cloud/types/extract_job.py +35 -0
  58. llama_cloud/types/extract_job_create.py +40 -0
  59. llama_cloud/types/extract_job_create_data_schema_override_value.py +7 -0
  60. llama_cloud/types/extract_mode.py +17 -0
  61. llama_cloud/types/extract_resultset.py +46 -0
  62. llama_cloud/types/extract_resultset_data.py +11 -0
  63. llama_cloud/types/extract_resultset_data_item_value.py +7 -0
  64. llama_cloud/types/extract_resultset_data_zero_value.py +7 -0
  65. llama_cloud/types/extract_resultset_extraction_metadata_value.py +7 -0
  66. llama_cloud/types/file.py +3 -0
  67. llama_cloud/types/file_permission_info_value.py +5 -0
  68. llama_cloud/types/filter_condition.py +9 -1
  69. llama_cloud/types/filter_operator.py +4 -0
  70. llama_cloud/types/image_block.py +35 -0
  71. llama_cloud/types/input_message.py +1 -1
  72. llama_cloud/types/job_name_mapping.py +4 -0
  73. llama_cloud/types/job_names.py +89 -0
  74. llama_cloud/types/job_record.py +57 -0
  75. llama_cloud/types/job_record_with_usage_metrics.py +36 -0
  76. llama_cloud/types/llama_index_core_base_llms_types_chat_message.py +39 -0
  77. llama_cloud/types/llama_index_core_base_llms_types_chat_message_blocks_item.py +33 -0
  78. llama_cloud/types/llama_parse_parameters.py +15 -0
  79. llama_cloud/types/llm.py +1 -0
  80. llama_cloud/types/llm_model_data.py +1 -0
  81. llama_cloud/types/llm_parameters.py +1 -0
  82. llama_cloud/types/managed_ingestion_status.py +4 -0
  83. llama_cloud/types/managed_ingestion_status_response.py +1 -0
  84. llama_cloud/types/object_type.py +4 -0
  85. llama_cloud/types/organization.py +5 -0
  86. llama_cloud/types/paginated_jobs_history_with_metrics.py +35 -0
  87. llama_cloud/types/paginated_report_response.py +35 -0
  88. llama_cloud/types/parse_plan_level.py +21 -0
  89. llama_cloud/types/parsing_job_structured_result.py +32 -0
  90. llama_cloud/types/pipeline_create.py +3 -1
  91. llama_cloud/types/pipeline_data_source.py +1 -1
  92. llama_cloud/types/pipeline_file.py +3 -0
  93. llama_cloud/types/pipeline_file_permission_info_value.py +7 -0
  94. llama_cloud/types/playground_session.py +2 -2
  95. llama_cloud/types/preset_retrieval_params.py +1 -0
  96. llama_cloud/types/progress_event.py +44 -0
  97. llama_cloud/types/progress_event_status.py +33 -0
  98. llama_cloud/types/prompt_spec.py +2 -2
  99. llama_cloud/types/related_node_info.py +2 -2
  100. llama_cloud/types/related_node_info_node_type.py +7 -0
  101. llama_cloud/types/report.py +33 -0
  102. llama_cloud/types/report_block.py +34 -0
  103. llama_cloud/types/report_block_dependency.py +29 -0
  104. llama_cloud/types/report_create_response.py +31 -0
  105. llama_cloud/types/report_event_item.py +40 -0
  106. llama_cloud/types/report_event_item_event_data.py +45 -0
  107. llama_cloud/types/report_event_type.py +37 -0
  108. llama_cloud/types/report_metadata.py +39 -0
  109. llama_cloud/types/report_plan.py +36 -0
  110. llama_cloud/types/report_plan_block.py +36 -0
  111. llama_cloud/types/report_query.py +33 -0
  112. llama_cloud/types/report_response.py +41 -0
  113. llama_cloud/types/report_state.py +37 -0
  114. llama_cloud/types/report_state_event.py +38 -0
  115. llama_cloud/types/report_update_event.py +38 -0
  116. llama_cloud/types/retrieve_results.py +1 -1
  117. llama_cloud/types/retriever.py +45 -0
  118. llama_cloud/types/retriever_create.py +37 -0
  119. llama_cloud/types/retriever_pipeline.py +37 -0
  120. llama_cloud/types/status_enum.py +4 -0
  121. llama_cloud/types/supported_llm_model_names.py +4 -0
  122. llama_cloud/types/text_block.py +31 -0
  123. llama_cloud/types/text_node.py +13 -6
  124. llama_cloud/types/usage_metric_response.py +34 -0
  125. llama_cloud/types/user_job_record.py +32 -0
  126. {llama_cloud-0.1.5.dist-info → llama_cloud-0.1.7a1.dist-info}/METADATA +3 -1
  127. {llama_cloud-0.1.5.dist-info → llama_cloud-0.1.7a1.dist-info}/RECORD +129 -59
  128. {llama_cloud-0.1.5.dist-info → llama_cloud-0.1.7a1.dist-info}/WHEEL +1 -1
  129. {llama_cloud-0.1.5.dist-info → llama_cloud-0.1.7a1.dist-info}/LICENSE +0 -0
@@ -380,6 +380,29 @@ class PipelinesClient:
380
380
  raise ApiError(status_code=_response.status_code, body=_response.text)
381
381
  raise ApiError(status_code=_response.status_code, body=_response_json)
382
382
 
383
+ def cancel_pipeline_sync(self, pipeline_id: str) -> Pipeline:
384
+ """
385
+ Parameters:
386
+ - pipeline_id: str.
387
+ """
388
+ _response = self._client_wrapper.httpx_client.request(
389
+ "POST",
390
+ urllib.parse.urljoin(
391
+ f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/sync/cancel"
392
+ ),
393
+ headers=self._client_wrapper.get_headers(),
394
+ timeout=60,
395
+ )
396
+ if 200 <= _response.status_code < 300:
397
+ return pydantic.parse_obj_as(Pipeline, _response.json()) # type: ignore
398
+ if _response.status_code == 422:
399
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
400
+ try:
401
+ _response_json = _response.json()
402
+ except JSONDecodeError:
403
+ raise ApiError(status_code=_response.status_code, body=_response.text)
404
+ raise ApiError(status_code=_response.status_code, body=_response_json)
405
+
383
406
  def copy_pipeline(self, pipeline_id: str) -> Pipeline:
384
407
  """
385
408
  Copy a pipeline by ID.
@@ -403,14 +426,14 @@ class PipelinesClient:
403
426
  raise ApiError(status_code=_response.status_code, body=_response.text)
404
427
  raise ApiError(status_code=_response.status_code, body=_response_json)
405
428
 
406
- def get_eval_dataset_executions(self, pipeline_id: str, eval_dataset_id: str) -> typing.List[EvalDatasetJobRecord]:
429
+ def get_eval_dataset_executions(self, eval_dataset_id: str, pipeline_id: str) -> typing.List[EvalDatasetJobRecord]:
407
430
  """
408
431
  Get the status of an EvalDatasetExecution.
409
432
 
410
433
  Parameters:
411
- - pipeline_id: str.
412
-
413
434
  - eval_dataset_id: str.
435
+
436
+ - pipeline_id: str.
414
437
  ---
415
438
  from llama_cloud.client import LlamaCloud
416
439
 
@@ -418,8 +441,8 @@ class PipelinesClient:
418
441
  token="YOUR_TOKEN",
419
442
  )
420
443
  client.pipelines.get_eval_dataset_executions(
421
- pipeline_id="string",
422
444
  eval_dataset_id="string",
445
+ pipeline_id="string",
423
446
  )
424
447
  """
425
448
  _response = self._client_wrapper.httpx_client.request(
@@ -443,8 +466,8 @@ class PipelinesClient:
443
466
 
444
467
  def execute_eval_dataset(
445
468
  self,
446
- pipeline_id: str,
447
469
  eval_dataset_id: str,
470
+ pipeline_id: str,
448
471
  *,
449
472
  eval_question_ids: typing.List[str],
450
473
  params: typing.Optional[EvalExecutionParamsOverride] = OMIT,
@@ -453,10 +476,10 @@ class PipelinesClient:
453
476
  Execute a dataset.
454
477
 
455
478
  Parameters:
456
- - pipeline_id: str.
457
-
458
479
  - eval_dataset_id: str.
459
480
 
481
+ - pipeline_id: str.
482
+
460
483
  - eval_question_ids: typing.List[str].
461
484
 
462
485
  - params: typing.Optional[EvalExecutionParamsOverride]. The parameters for the eval execution that will override the ones set in the pipeline.
@@ -468,8 +491,8 @@ class PipelinesClient:
468
491
  token="YOUR_TOKEN",
469
492
  )
470
493
  client.pipelines.execute_eval_dataset(
471
- pipeline_id="string",
472
494
  eval_dataset_id="string",
495
+ pipeline_id="string",
473
496
  eval_question_ids=[],
474
497
  params=EvalExecutionParamsOverride(
475
498
  llm_model=SupportedLlmModelNames.GPT_3_5_TURBO,
@@ -500,7 +523,7 @@ class PipelinesClient:
500
523
  raise ApiError(status_code=_response.status_code, body=_response_json)
501
524
 
502
525
  def get_eval_dataset_execution_result(
503
- self, pipeline_id: str, eval_dataset_id: str
526
+ self, eval_dataset_id: str, pipeline_id: str
504
527
  ) -> typing.List[EvalQuestionResult]:
505
528
  """
506
529
  Get the result of an EvalDatasetExecution.
@@ -509,9 +532,9 @@ class PipelinesClient:
509
532
  If any of the specified questions do not have a result, they will be ignored.
510
533
 
511
534
  Parameters:
512
- - pipeline_id: str.
513
-
514
535
  - eval_dataset_id: str.
536
+
537
+ - pipeline_id: str.
515
538
  ---
516
539
  from llama_cloud.client import LlamaCloud
517
540
 
@@ -519,8 +542,8 @@ class PipelinesClient:
519
542
  token="YOUR_TOKEN",
520
543
  )
521
544
  client.pipelines.get_eval_dataset_execution_result(
522
- pipeline_id="string",
523
545
  eval_dataset_id="string",
546
+ pipeline_id="string",
524
547
  )
525
548
  """
526
549
  _response = self._client_wrapper.httpx_client.request(
@@ -543,17 +566,17 @@ class PipelinesClient:
543
566
  raise ApiError(status_code=_response.status_code, body=_response_json)
544
567
 
545
568
  def get_eval_dataset_execution(
546
- self, pipeline_id: str, eval_dataset_id: str, eval_dataset_execution_id: str
569
+ self, eval_dataset_id: str, eval_dataset_execution_id: str, pipeline_id: str
547
570
  ) -> EvalDatasetJobRecord:
548
571
  """
549
572
  Get the status of an EvalDatasetExecution.
550
573
 
551
574
  Parameters:
552
- - pipeline_id: str.
553
-
554
575
  - eval_dataset_id: str.
555
576
 
556
577
  - eval_dataset_execution_id: str.
578
+
579
+ - pipeline_id: str.
557
580
  ---
558
581
  from llama_cloud.client import LlamaCloud
559
582
 
@@ -561,9 +584,9 @@ class PipelinesClient:
561
584
  token="YOUR_TOKEN",
562
585
  )
563
586
  client.pipelines.get_eval_dataset_execution(
564
- pipeline_id="string",
565
587
  eval_dataset_id="string",
566
588
  eval_dataset_execution_id="string",
589
+ pipeline_id="string",
567
590
  )
568
591
  """
569
592
  _response = self._client_wrapper.httpx_client.request(
@@ -724,14 +747,14 @@ class PipelinesClient:
724
747
  raise ApiError(status_code=_response.status_code, body=_response.text)
725
748
  raise ApiError(status_code=_response.status_code, body=_response_json)
726
749
 
727
- def get_pipeline_file_status(self, pipeline_id: str, file_id: str) -> ManagedIngestionStatusResponse:
750
+ def get_pipeline_file_status(self, file_id: str, pipeline_id: str) -> ManagedIngestionStatusResponse:
728
751
  """
729
752
  Get status of a file for a pipeline.
730
753
 
731
754
  Parameters:
732
- - pipeline_id: str.
733
-
734
755
  - file_id: str.
756
+
757
+ - pipeline_id: str.
735
758
  ---
736
759
  from llama_cloud.client import LlamaCloud
737
760
 
@@ -739,8 +762,8 @@ class PipelinesClient:
739
762
  token="YOUR_TOKEN",
740
763
  )
741
764
  client.pipelines.get_pipeline_file_status(
742
- pipeline_id="string",
743
765
  file_id="string",
766
+ pipeline_id="string",
744
767
  )
745
768
  """
746
769
  _response = self._client_wrapper.httpx_client.request(
@@ -763,8 +786,8 @@ class PipelinesClient:
763
786
 
764
787
  def update_pipeline_file(
765
788
  self,
766
- pipeline_id: str,
767
789
  file_id: str,
790
+ pipeline_id: str,
768
791
  *,
769
792
  custom_metadata: typing.Optional[
770
793
  typing.Dict[str, typing.Optional[PipelineFileUpdateCustomMetadataValue]]
@@ -774,10 +797,10 @@ class PipelinesClient:
774
797
  Update a file for a pipeline.
775
798
 
776
799
  Parameters:
777
- - pipeline_id: str.
778
-
779
800
  - file_id: str.
780
801
 
802
+ - pipeline_id: str.
803
+
781
804
  - custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[PipelineFileUpdateCustomMetadataValue]]].
782
805
  ---
783
806
  from llama_cloud.client import LlamaCloud
@@ -786,8 +809,8 @@ class PipelinesClient:
786
809
  token="YOUR_TOKEN",
787
810
  )
788
811
  client.pipelines.update_pipeline_file(
789
- pipeline_id="string",
790
812
  file_id="string",
813
+ pipeline_id="string",
791
814
  )
792
815
  """
793
816
  _request: typing.Dict[str, typing.Any] = {}
@@ -812,14 +835,14 @@ class PipelinesClient:
812
835
  raise ApiError(status_code=_response.status_code, body=_response.text)
813
836
  raise ApiError(status_code=_response.status_code, body=_response_json)
814
837
 
815
- def delete_pipeline_file(self, pipeline_id: str, file_id: str) -> None:
838
+ def delete_pipeline_file(self, file_id: str, pipeline_id: str) -> None:
816
839
  """
817
840
  Delete a file from a pipeline.
818
841
 
819
842
  Parameters:
820
- - pipeline_id: str.
821
-
822
843
  - file_id: str.
844
+
845
+ - pipeline_id: str.
823
846
  ---
824
847
  from llama_cloud.client import LlamaCloud
825
848
 
@@ -827,8 +850,8 @@ class PipelinesClient:
827
850
  token="YOUR_TOKEN",
828
851
  )
829
852
  client.pipelines.delete_pipeline_file(
830
- pipeline_id="string",
831
853
  file_id="string",
854
+ pipeline_id="string",
832
855
  )
833
856
  """
834
857
  _response = self._client_wrapper.httpx_client.request(
@@ -983,16 +1006,16 @@ class PipelinesClient:
983
1006
  raise ApiError(status_code=_response.status_code, body=_response_json)
984
1007
 
985
1008
  def update_pipeline_data_source(
986
- self, pipeline_id: str, data_source_id: str, *, sync_interval: typing.Optional[float] = OMIT
1009
+ self, data_source_id: str, pipeline_id: str, *, sync_interval: typing.Optional[float] = OMIT
987
1010
  ) -> PipelineDataSource:
988
1011
  """
989
1012
  Update the configuration of a data source in a pipeline.
990
1013
 
991
1014
  Parameters:
992
- - pipeline_id: str.
993
-
994
1015
  - data_source_id: str.
995
1016
 
1017
+ - pipeline_id: str.
1018
+
996
1019
  - sync_interval: typing.Optional[float].
997
1020
  ---
998
1021
  from llama_cloud.client import LlamaCloud
@@ -1001,8 +1024,8 @@ class PipelinesClient:
1001
1024
  token="YOUR_TOKEN",
1002
1025
  )
1003
1026
  client.pipelines.update_pipeline_data_source(
1004
- pipeline_id="string",
1005
1027
  data_source_id="string",
1028
+ pipeline_id="string",
1006
1029
  )
1007
1030
  """
1008
1031
  _request: typing.Dict[str, typing.Any] = {}
@@ -1028,14 +1051,14 @@ class PipelinesClient:
1028
1051
  raise ApiError(status_code=_response.status_code, body=_response.text)
1029
1052
  raise ApiError(status_code=_response.status_code, body=_response_json)
1030
1053
 
1031
- def delete_pipeline_data_source(self, pipeline_id: str, data_source_id: str) -> None:
1054
+ def delete_pipeline_data_source(self, data_source_id: str, pipeline_id: str) -> None:
1032
1055
  """
1033
1056
  Delete a data source from a pipeline.
1034
1057
 
1035
1058
  Parameters:
1036
- - pipeline_id: str.
1037
-
1038
1059
  - data_source_id: str.
1060
+
1061
+ - pipeline_id: str.
1039
1062
  ---
1040
1063
  from llama_cloud.client import LlamaCloud
1041
1064
 
@@ -1043,8 +1066,8 @@ class PipelinesClient:
1043
1066
  token="YOUR_TOKEN",
1044
1067
  )
1045
1068
  client.pipelines.delete_pipeline_data_source(
1046
- pipeline_id="string",
1047
1069
  data_source_id="string",
1070
+ pipeline_id="string",
1048
1071
  )
1049
1072
  """
1050
1073
  _response = self._client_wrapper.httpx_client.request(
@@ -1066,14 +1089,14 @@ class PipelinesClient:
1066
1089
  raise ApiError(status_code=_response.status_code, body=_response.text)
1067
1090
  raise ApiError(status_code=_response.status_code, body=_response_json)
1068
1091
 
1069
- def sync_pipeline_data_source(self, pipeline_id: str, data_source_id: str) -> Pipeline:
1092
+ def sync_pipeline_data_source(self, data_source_id: str, pipeline_id: str) -> Pipeline:
1070
1093
  """
1071
1094
  Run ingestion for the pipeline data source by incrementally updating the data-sink with upstream changes from data-source.
1072
1095
 
1073
1096
  Parameters:
1074
- - pipeline_id: str.
1075
-
1076
1097
  - data_source_id: str.
1098
+
1099
+ - pipeline_id: str.
1077
1100
  """
1078
1101
  _response = self._client_wrapper.httpx_client.request(
1079
1102
  "POST",
@@ -1094,14 +1117,14 @@ class PipelinesClient:
1094
1117
  raise ApiError(status_code=_response.status_code, body=_response.text)
1095
1118
  raise ApiError(status_code=_response.status_code, body=_response_json)
1096
1119
 
1097
- def get_pipeline_data_source_status(self, pipeline_id: str, data_source_id: str) -> ManagedIngestionStatusResponse:
1120
+ def get_pipeline_data_source_status(self, data_source_id: str, pipeline_id: str) -> ManagedIngestionStatusResponse:
1098
1121
  """
1099
1122
  Get the status of a data source for a pipeline.
1100
1123
 
1101
1124
  Parameters:
1102
- - pipeline_id: str.
1103
-
1104
1125
  - data_source_id: str.
1126
+
1127
+ - pipeline_id: str.
1105
1128
  ---
1106
1129
  from llama_cloud.client import LlamaCloud
1107
1130
 
@@ -1109,8 +1132,8 @@ class PipelinesClient:
1109
1132
  token="YOUR_TOKEN",
1110
1133
  )
1111
1134
  client.pipelines.get_pipeline_data_source_status(
1112
- pipeline_id="string",
1113
1135
  data_source_id="string",
1136
+ pipeline_id="string",
1114
1137
  )
1115
1138
  """
1116
1139
  _response = self._client_wrapper.httpx_client.request(
@@ -1137,6 +1160,7 @@ class PipelinesClient:
1137
1160
  pipeline_id: str,
1138
1161
  *,
1139
1162
  dense_similarity_top_k: typing.Optional[int] = OMIT,
1163
+ dense_similarity_cutoff: typing.Optional[float] = OMIT,
1140
1164
  sparse_similarity_top_k: typing.Optional[int] = OMIT,
1141
1165
  enable_reranking: typing.Optional[bool] = OMIT,
1142
1166
  rerank_top_n: typing.Optional[int] = OMIT,
@@ -1156,6 +1180,8 @@ class PipelinesClient:
1156
1180
 
1157
1181
  - dense_similarity_top_k: typing.Optional[int].
1158
1182
 
1183
+ - dense_similarity_cutoff: typing.Optional[float].
1184
+
1159
1185
  - sparse_similarity_top_k: typing.Optional[int].
1160
1186
 
1161
1187
  - enable_reranking: typing.Optional[bool].
@@ -1195,6 +1221,8 @@ class PipelinesClient:
1195
1221
  _request: typing.Dict[str, typing.Any] = {"query": query}
1196
1222
  if dense_similarity_top_k is not OMIT:
1197
1223
  _request["dense_similarity_top_k"] = dense_similarity_top_k
1224
+ if dense_similarity_cutoff is not OMIT:
1225
+ _request["dense_similarity_cutoff"] = dense_similarity_cutoff
1198
1226
  if sparse_similarity_top_k is not OMIT:
1199
1227
  _request["sparse_similarity_top_k"] = sparse_similarity_top_k
1200
1228
  if enable_reranking is not OMIT:
@@ -1262,14 +1290,14 @@ class PipelinesClient:
1262
1290
  raise ApiError(status_code=_response.status_code, body=_response.text)
1263
1291
  raise ApiError(status_code=_response.status_code, body=_response_json)
1264
1292
 
1265
- def get_pipeline_job(self, pipeline_id: str, job_id: str) -> PipelineDeployment:
1293
+ def get_pipeline_job(self, job_id: str, pipeline_id: str) -> PipelineDeployment:
1266
1294
  """
1267
1295
  Get a job for a pipeline.
1268
1296
 
1269
1297
  Parameters:
1270
- - pipeline_id: str.
1271
-
1272
1298
  - job_id: str.
1299
+
1300
+ - pipeline_id: str.
1273
1301
  ---
1274
1302
  from llama_cloud.client import LlamaCloud
1275
1303
 
@@ -1277,8 +1305,8 @@ class PipelinesClient:
1277
1305
  token="YOUR_TOKEN",
1278
1306
  )
1279
1307
  client.pipelines.get_pipeline_job(
1280
- pipeline_id="string",
1281
1308
  job_id="string",
1309
+ pipeline_id="string",
1282
1310
  )
1283
1311
  """
1284
1312
  _response = self._client_wrapper.httpx_client.request(
@@ -1540,14 +1568,14 @@ class PipelinesClient:
1540
1568
  raise ApiError(status_code=_response.status_code, body=_response.text)
1541
1569
  raise ApiError(status_code=_response.status_code, body=_response_json)
1542
1570
 
1543
- def get_pipeline_document(self, pipeline_id: str, document_id: str) -> CloudDocument:
1571
+ def get_pipeline_document(self, document_id: str, pipeline_id: str) -> CloudDocument:
1544
1572
  """
1545
1573
  Return a single document for a pipeline.
1546
1574
 
1547
1575
  Parameters:
1548
- - pipeline_id: str.
1549
-
1550
1576
  - document_id: str.
1577
+
1578
+ - pipeline_id: str.
1551
1579
  ---
1552
1580
  from llama_cloud.client import LlamaCloud
1553
1581
 
@@ -1555,8 +1583,8 @@ class PipelinesClient:
1555
1583
  token="YOUR_TOKEN",
1556
1584
  )
1557
1585
  client.pipelines.get_pipeline_document(
1558
- pipeline_id="string",
1559
1586
  document_id="string",
1587
+ pipeline_id="string",
1560
1588
  )
1561
1589
  """
1562
1590
  _response = self._client_wrapper.httpx_client.request(
@@ -1577,14 +1605,14 @@ class PipelinesClient:
1577
1605
  raise ApiError(status_code=_response.status_code, body=_response.text)
1578
1606
  raise ApiError(status_code=_response.status_code, body=_response_json)
1579
1607
 
1580
- def delete_pipeline_document(self, pipeline_id: str, document_id: str) -> None:
1608
+ def delete_pipeline_document(self, document_id: str, pipeline_id: str) -> None:
1581
1609
  """
1582
1610
  Delete a document for a pipeline.
1583
1611
 
1584
1612
  Parameters:
1585
- - pipeline_id: str.
1586
-
1587
1613
  - document_id: str.
1614
+
1615
+ - pipeline_id: str.
1588
1616
  ---
1589
1617
  from llama_cloud.client import LlamaCloud
1590
1618
 
@@ -1592,8 +1620,8 @@ class PipelinesClient:
1592
1620
  token="YOUR_TOKEN",
1593
1621
  )
1594
1622
  client.pipelines.delete_pipeline_document(
1595
- pipeline_id="string",
1596
1623
  document_id="string",
1624
+ pipeline_id="string",
1597
1625
  )
1598
1626
  """
1599
1627
  _response = self._client_wrapper.httpx_client.request(
@@ -1614,14 +1642,14 @@ class PipelinesClient:
1614
1642
  raise ApiError(status_code=_response.status_code, body=_response.text)
1615
1643
  raise ApiError(status_code=_response.status_code, body=_response_json)
1616
1644
 
1617
- def get_pipeline_document_status(self, pipeline_id: str, document_id: str) -> ManagedIngestionStatusResponse:
1645
+ def get_pipeline_document_status(self, document_id: str, pipeline_id: str) -> ManagedIngestionStatusResponse:
1618
1646
  """
1619
1647
  Return a single document for a pipeline.
1620
1648
 
1621
1649
  Parameters:
1622
- - pipeline_id: str.
1623
-
1624
1650
  - document_id: str.
1651
+
1652
+ - pipeline_id: str.
1625
1653
  ---
1626
1654
  from llama_cloud.client import LlamaCloud
1627
1655
 
@@ -1629,8 +1657,8 @@ class PipelinesClient:
1629
1657
  token="YOUR_TOKEN",
1630
1658
  )
1631
1659
  client.pipelines.get_pipeline_document_status(
1632
- pipeline_id="string",
1633
1660
  document_id="string",
1661
+ pipeline_id="string",
1634
1662
  )
1635
1663
  """
1636
1664
  _response = self._client_wrapper.httpx_client.request(
@@ -1652,14 +1680,14 @@ class PipelinesClient:
1652
1680
  raise ApiError(status_code=_response.status_code, body=_response.text)
1653
1681
  raise ApiError(status_code=_response.status_code, body=_response_json)
1654
1682
 
1655
- def list_pipeline_document_chunks(self, pipeline_id: str, document_id: str) -> typing.List[TextNode]:
1683
+ def list_pipeline_document_chunks(self, document_id: str, pipeline_id: str) -> typing.List[TextNode]:
1656
1684
  """
1657
1685
  Return a list of chunks for a pipeline document.
1658
1686
 
1659
1687
  Parameters:
1660
- - pipeline_id: str.
1661
-
1662
1688
  - document_id: str.
1689
+
1690
+ - pipeline_id: str.
1663
1691
  ---
1664
1692
  from llama_cloud.client import LlamaCloud
1665
1693
 
@@ -1667,8 +1695,8 @@ class PipelinesClient:
1667
1695
  token="YOUR_TOKEN",
1668
1696
  )
1669
1697
  client.pipelines.list_pipeline_document_chunks(
1670
- pipeline_id="string",
1671
1698
  document_id="string",
1699
+ pipeline_id="string",
1672
1700
  )
1673
1701
  """
1674
1702
  _response = self._client_wrapper.httpx_client.request(
@@ -2018,6 +2046,29 @@ class AsyncPipelinesClient:
2018
2046
  raise ApiError(status_code=_response.status_code, body=_response.text)
2019
2047
  raise ApiError(status_code=_response.status_code, body=_response_json)
2020
2048
 
2049
+ async def cancel_pipeline_sync(self, pipeline_id: str) -> Pipeline:
2050
+ """
2051
+ Parameters:
2052
+ - pipeline_id: str.
2053
+ """
2054
+ _response = await self._client_wrapper.httpx_client.request(
2055
+ "POST",
2056
+ urllib.parse.urljoin(
2057
+ f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/sync/cancel"
2058
+ ),
2059
+ headers=self._client_wrapper.get_headers(),
2060
+ timeout=60,
2061
+ )
2062
+ if 200 <= _response.status_code < 300:
2063
+ return pydantic.parse_obj_as(Pipeline, _response.json()) # type: ignore
2064
+ if _response.status_code == 422:
2065
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
2066
+ try:
2067
+ _response_json = _response.json()
2068
+ except JSONDecodeError:
2069
+ raise ApiError(status_code=_response.status_code, body=_response.text)
2070
+ raise ApiError(status_code=_response.status_code, body=_response_json)
2071
+
2021
2072
  async def copy_pipeline(self, pipeline_id: str) -> Pipeline:
2022
2073
  """
2023
2074
  Copy a pipeline by ID.
@@ -2042,15 +2093,15 @@ class AsyncPipelinesClient:
2042
2093
  raise ApiError(status_code=_response.status_code, body=_response_json)
2043
2094
 
2044
2095
  async def get_eval_dataset_executions(
2045
- self, pipeline_id: str, eval_dataset_id: str
2096
+ self, eval_dataset_id: str, pipeline_id: str
2046
2097
  ) -> typing.List[EvalDatasetJobRecord]:
2047
2098
  """
2048
2099
  Get the status of an EvalDatasetExecution.
2049
2100
 
2050
2101
  Parameters:
2051
- - pipeline_id: str.
2052
-
2053
2102
  - eval_dataset_id: str.
2103
+
2104
+ - pipeline_id: str.
2054
2105
  ---
2055
2106
  from llama_cloud.client import AsyncLlamaCloud
2056
2107
 
@@ -2058,8 +2109,8 @@ class AsyncPipelinesClient:
2058
2109
  token="YOUR_TOKEN",
2059
2110
  )
2060
2111
  await client.pipelines.get_eval_dataset_executions(
2061
- pipeline_id="string",
2062
2112
  eval_dataset_id="string",
2113
+ pipeline_id="string",
2063
2114
  )
2064
2115
  """
2065
2116
  _response = await self._client_wrapper.httpx_client.request(
@@ -2083,8 +2134,8 @@ class AsyncPipelinesClient:
2083
2134
 
2084
2135
  async def execute_eval_dataset(
2085
2136
  self,
2086
- pipeline_id: str,
2087
2137
  eval_dataset_id: str,
2138
+ pipeline_id: str,
2088
2139
  *,
2089
2140
  eval_question_ids: typing.List[str],
2090
2141
  params: typing.Optional[EvalExecutionParamsOverride] = OMIT,
@@ -2093,10 +2144,10 @@ class AsyncPipelinesClient:
2093
2144
  Execute a dataset.
2094
2145
 
2095
2146
  Parameters:
2096
- - pipeline_id: str.
2097
-
2098
2147
  - eval_dataset_id: str.
2099
2148
 
2149
+ - pipeline_id: str.
2150
+
2100
2151
  - eval_question_ids: typing.List[str].
2101
2152
 
2102
2153
  - params: typing.Optional[EvalExecutionParamsOverride]. The parameters for the eval execution that will override the ones set in the pipeline.
@@ -2108,8 +2159,8 @@ class AsyncPipelinesClient:
2108
2159
  token="YOUR_TOKEN",
2109
2160
  )
2110
2161
  await client.pipelines.execute_eval_dataset(
2111
- pipeline_id="string",
2112
2162
  eval_dataset_id="string",
2163
+ pipeline_id="string",
2113
2164
  eval_question_ids=[],
2114
2165
  params=EvalExecutionParamsOverride(
2115
2166
  llm_model=SupportedLlmModelNames.GPT_3_5_TURBO,
@@ -2140,7 +2191,7 @@ class AsyncPipelinesClient:
2140
2191
  raise ApiError(status_code=_response.status_code, body=_response_json)
2141
2192
 
2142
2193
  async def get_eval_dataset_execution_result(
2143
- self, pipeline_id: str, eval_dataset_id: str
2194
+ self, eval_dataset_id: str, pipeline_id: str
2144
2195
  ) -> typing.List[EvalQuestionResult]:
2145
2196
  """
2146
2197
  Get the result of an EvalDatasetExecution.
@@ -2149,9 +2200,9 @@ class AsyncPipelinesClient:
2149
2200
  If any of the specified questions do not have a result, they will be ignored.
2150
2201
 
2151
2202
  Parameters:
2152
- - pipeline_id: str.
2153
-
2154
2203
  - eval_dataset_id: str.
2204
+
2205
+ - pipeline_id: str.
2155
2206
  ---
2156
2207
  from llama_cloud.client import AsyncLlamaCloud
2157
2208
 
@@ -2159,8 +2210,8 @@ class AsyncPipelinesClient:
2159
2210
  token="YOUR_TOKEN",
2160
2211
  )
2161
2212
  await client.pipelines.get_eval_dataset_execution_result(
2162
- pipeline_id="string",
2163
2213
  eval_dataset_id="string",
2214
+ pipeline_id="string",
2164
2215
  )
2165
2216
  """
2166
2217
  _response = await self._client_wrapper.httpx_client.request(
@@ -2183,17 +2234,17 @@ class AsyncPipelinesClient:
2183
2234
  raise ApiError(status_code=_response.status_code, body=_response_json)
2184
2235
 
2185
2236
  async def get_eval_dataset_execution(
2186
- self, pipeline_id: str, eval_dataset_id: str, eval_dataset_execution_id: str
2237
+ self, eval_dataset_id: str, eval_dataset_execution_id: str, pipeline_id: str
2187
2238
  ) -> EvalDatasetJobRecord:
2188
2239
  """
2189
2240
  Get the status of an EvalDatasetExecution.
2190
2241
 
2191
2242
  Parameters:
2192
- - pipeline_id: str.
2193
-
2194
2243
  - eval_dataset_id: str.
2195
2244
 
2196
2245
  - eval_dataset_execution_id: str.
2246
+
2247
+ - pipeline_id: str.
2197
2248
  ---
2198
2249
  from llama_cloud.client import AsyncLlamaCloud
2199
2250
 
@@ -2201,9 +2252,9 @@ class AsyncPipelinesClient:
2201
2252
  token="YOUR_TOKEN",
2202
2253
  )
2203
2254
  await client.pipelines.get_eval_dataset_execution(
2204
- pipeline_id="string",
2205
2255
  eval_dataset_id="string",
2206
2256
  eval_dataset_execution_id="string",
2257
+ pipeline_id="string",
2207
2258
  )
2208
2259
  """
2209
2260
  _response = await self._client_wrapper.httpx_client.request(
@@ -2364,14 +2415,14 @@ class AsyncPipelinesClient:
2364
2415
  raise ApiError(status_code=_response.status_code, body=_response.text)
2365
2416
  raise ApiError(status_code=_response.status_code, body=_response_json)
2366
2417
 
2367
- async def get_pipeline_file_status(self, pipeline_id: str, file_id: str) -> ManagedIngestionStatusResponse:
2418
+ async def get_pipeline_file_status(self, file_id: str, pipeline_id: str) -> ManagedIngestionStatusResponse:
2368
2419
  """
2369
2420
  Get status of a file for a pipeline.
2370
2421
 
2371
2422
  Parameters:
2372
- - pipeline_id: str.
2373
-
2374
2423
  - file_id: str.
2424
+
2425
+ - pipeline_id: str.
2375
2426
  ---
2376
2427
  from llama_cloud.client import AsyncLlamaCloud
2377
2428
 
@@ -2379,8 +2430,8 @@ class AsyncPipelinesClient:
2379
2430
  token="YOUR_TOKEN",
2380
2431
  )
2381
2432
  await client.pipelines.get_pipeline_file_status(
2382
- pipeline_id="string",
2383
2433
  file_id="string",
2434
+ pipeline_id="string",
2384
2435
  )
2385
2436
  """
2386
2437
  _response = await self._client_wrapper.httpx_client.request(
@@ -2403,8 +2454,8 @@ class AsyncPipelinesClient:
2403
2454
 
2404
2455
  async def update_pipeline_file(
2405
2456
  self,
2406
- pipeline_id: str,
2407
2457
  file_id: str,
2458
+ pipeline_id: str,
2408
2459
  *,
2409
2460
  custom_metadata: typing.Optional[
2410
2461
  typing.Dict[str, typing.Optional[PipelineFileUpdateCustomMetadataValue]]
@@ -2414,10 +2465,10 @@ class AsyncPipelinesClient:
2414
2465
  Update a file for a pipeline.
2415
2466
 
2416
2467
  Parameters:
2417
- - pipeline_id: str.
2418
-
2419
2468
  - file_id: str.
2420
2469
 
2470
+ - pipeline_id: str.
2471
+
2421
2472
  - custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[PipelineFileUpdateCustomMetadataValue]]].
2422
2473
  ---
2423
2474
  from llama_cloud.client import AsyncLlamaCloud
@@ -2426,8 +2477,8 @@ class AsyncPipelinesClient:
2426
2477
  token="YOUR_TOKEN",
2427
2478
  )
2428
2479
  await client.pipelines.update_pipeline_file(
2429
- pipeline_id="string",
2430
2480
  file_id="string",
2481
+ pipeline_id="string",
2431
2482
  )
2432
2483
  """
2433
2484
  _request: typing.Dict[str, typing.Any] = {}
@@ -2452,14 +2503,14 @@ class AsyncPipelinesClient:
2452
2503
  raise ApiError(status_code=_response.status_code, body=_response.text)
2453
2504
  raise ApiError(status_code=_response.status_code, body=_response_json)
2454
2505
 
2455
- async def delete_pipeline_file(self, pipeline_id: str, file_id: str) -> None:
2506
+ async def delete_pipeline_file(self, file_id: str, pipeline_id: str) -> None:
2456
2507
  """
2457
2508
  Delete a file from a pipeline.
2458
2509
 
2459
2510
  Parameters:
2460
- - pipeline_id: str.
2461
-
2462
2511
  - file_id: str.
2512
+
2513
+ - pipeline_id: str.
2463
2514
  ---
2464
2515
  from llama_cloud.client import AsyncLlamaCloud
2465
2516
 
@@ -2467,8 +2518,8 @@ class AsyncPipelinesClient:
2467
2518
  token="YOUR_TOKEN",
2468
2519
  )
2469
2520
  await client.pipelines.delete_pipeline_file(
2470
- pipeline_id="string",
2471
2521
  file_id="string",
2522
+ pipeline_id="string",
2472
2523
  )
2473
2524
  """
2474
2525
  _response = await self._client_wrapper.httpx_client.request(
@@ -2623,16 +2674,16 @@ class AsyncPipelinesClient:
2623
2674
  raise ApiError(status_code=_response.status_code, body=_response_json)
2624
2675
 
2625
2676
  async def update_pipeline_data_source(
2626
- self, pipeline_id: str, data_source_id: str, *, sync_interval: typing.Optional[float] = OMIT
2677
+ self, data_source_id: str, pipeline_id: str, *, sync_interval: typing.Optional[float] = OMIT
2627
2678
  ) -> PipelineDataSource:
2628
2679
  """
2629
2680
  Update the configuration of a data source in a pipeline.
2630
2681
 
2631
2682
  Parameters:
2632
- - pipeline_id: str.
2633
-
2634
2683
  - data_source_id: str.
2635
2684
 
2685
+ - pipeline_id: str.
2686
+
2636
2687
  - sync_interval: typing.Optional[float].
2637
2688
  ---
2638
2689
  from llama_cloud.client import AsyncLlamaCloud
@@ -2641,8 +2692,8 @@ class AsyncPipelinesClient:
2641
2692
  token="YOUR_TOKEN",
2642
2693
  )
2643
2694
  await client.pipelines.update_pipeline_data_source(
2644
- pipeline_id="string",
2645
2695
  data_source_id="string",
2696
+ pipeline_id="string",
2646
2697
  )
2647
2698
  """
2648
2699
  _request: typing.Dict[str, typing.Any] = {}
@@ -2668,14 +2719,14 @@ class AsyncPipelinesClient:
2668
2719
  raise ApiError(status_code=_response.status_code, body=_response.text)
2669
2720
  raise ApiError(status_code=_response.status_code, body=_response_json)
2670
2721
 
2671
- async def delete_pipeline_data_source(self, pipeline_id: str, data_source_id: str) -> None:
2722
+ async def delete_pipeline_data_source(self, data_source_id: str, pipeline_id: str) -> None:
2672
2723
  """
2673
2724
  Delete a data source from a pipeline.
2674
2725
 
2675
2726
  Parameters:
2676
- - pipeline_id: str.
2677
-
2678
2727
  - data_source_id: str.
2728
+
2729
+ - pipeline_id: str.
2679
2730
  ---
2680
2731
  from llama_cloud.client import AsyncLlamaCloud
2681
2732
 
@@ -2683,8 +2734,8 @@ class AsyncPipelinesClient:
2683
2734
  token="YOUR_TOKEN",
2684
2735
  )
2685
2736
  await client.pipelines.delete_pipeline_data_source(
2686
- pipeline_id="string",
2687
2737
  data_source_id="string",
2738
+ pipeline_id="string",
2688
2739
  )
2689
2740
  """
2690
2741
  _response = await self._client_wrapper.httpx_client.request(
@@ -2706,14 +2757,14 @@ class AsyncPipelinesClient:
2706
2757
  raise ApiError(status_code=_response.status_code, body=_response.text)
2707
2758
  raise ApiError(status_code=_response.status_code, body=_response_json)
2708
2759
 
2709
- async def sync_pipeline_data_source(self, pipeline_id: str, data_source_id: str) -> Pipeline:
2760
+ async def sync_pipeline_data_source(self, data_source_id: str, pipeline_id: str) -> Pipeline:
2710
2761
  """
2711
2762
  Run ingestion for the pipeline data source by incrementally updating the data-sink with upstream changes from data-source.
2712
2763
 
2713
2764
  Parameters:
2714
- - pipeline_id: str.
2715
-
2716
2765
  - data_source_id: str.
2766
+
2767
+ - pipeline_id: str.
2717
2768
  """
2718
2769
  _response = await self._client_wrapper.httpx_client.request(
2719
2770
  "POST",
@@ -2735,15 +2786,15 @@ class AsyncPipelinesClient:
2735
2786
  raise ApiError(status_code=_response.status_code, body=_response_json)
2736
2787
 
2737
2788
  async def get_pipeline_data_source_status(
2738
- self, pipeline_id: str, data_source_id: str
2789
+ self, data_source_id: str, pipeline_id: str
2739
2790
  ) -> ManagedIngestionStatusResponse:
2740
2791
  """
2741
2792
  Get the status of a data source for a pipeline.
2742
2793
 
2743
2794
  Parameters:
2744
- - pipeline_id: str.
2745
-
2746
2795
  - data_source_id: str.
2796
+
2797
+ - pipeline_id: str.
2747
2798
  ---
2748
2799
  from llama_cloud.client import AsyncLlamaCloud
2749
2800
 
@@ -2751,8 +2802,8 @@ class AsyncPipelinesClient:
2751
2802
  token="YOUR_TOKEN",
2752
2803
  )
2753
2804
  await client.pipelines.get_pipeline_data_source_status(
2754
- pipeline_id="string",
2755
2805
  data_source_id="string",
2806
+ pipeline_id="string",
2756
2807
  )
2757
2808
  """
2758
2809
  _response = await self._client_wrapper.httpx_client.request(
@@ -2779,6 +2830,7 @@ class AsyncPipelinesClient:
2779
2830
  pipeline_id: str,
2780
2831
  *,
2781
2832
  dense_similarity_top_k: typing.Optional[int] = OMIT,
2833
+ dense_similarity_cutoff: typing.Optional[float] = OMIT,
2782
2834
  sparse_similarity_top_k: typing.Optional[int] = OMIT,
2783
2835
  enable_reranking: typing.Optional[bool] = OMIT,
2784
2836
  rerank_top_n: typing.Optional[int] = OMIT,
@@ -2798,6 +2850,8 @@ class AsyncPipelinesClient:
2798
2850
 
2799
2851
  - dense_similarity_top_k: typing.Optional[int].
2800
2852
 
2853
+ - dense_similarity_cutoff: typing.Optional[float].
2854
+
2801
2855
  - sparse_similarity_top_k: typing.Optional[int].
2802
2856
 
2803
2857
  - enable_reranking: typing.Optional[bool].
@@ -2837,6 +2891,8 @@ class AsyncPipelinesClient:
2837
2891
  _request: typing.Dict[str, typing.Any] = {"query": query}
2838
2892
  if dense_similarity_top_k is not OMIT:
2839
2893
  _request["dense_similarity_top_k"] = dense_similarity_top_k
2894
+ if dense_similarity_cutoff is not OMIT:
2895
+ _request["dense_similarity_cutoff"] = dense_similarity_cutoff
2840
2896
  if sparse_similarity_top_k is not OMIT:
2841
2897
  _request["sparse_similarity_top_k"] = sparse_similarity_top_k
2842
2898
  if enable_reranking is not OMIT:
@@ -2904,14 +2960,14 @@ class AsyncPipelinesClient:
2904
2960
  raise ApiError(status_code=_response.status_code, body=_response.text)
2905
2961
  raise ApiError(status_code=_response.status_code, body=_response_json)
2906
2962
 
2907
- async def get_pipeline_job(self, pipeline_id: str, job_id: str) -> PipelineDeployment:
2963
+ async def get_pipeline_job(self, job_id: str, pipeline_id: str) -> PipelineDeployment:
2908
2964
  """
2909
2965
  Get a job for a pipeline.
2910
2966
 
2911
2967
  Parameters:
2912
- - pipeline_id: str.
2913
-
2914
2968
  - job_id: str.
2969
+
2970
+ - pipeline_id: str.
2915
2971
  ---
2916
2972
  from llama_cloud.client import AsyncLlamaCloud
2917
2973
 
@@ -2919,8 +2975,8 @@ class AsyncPipelinesClient:
2919
2975
  token="YOUR_TOKEN",
2920
2976
  )
2921
2977
  await client.pipelines.get_pipeline_job(
2922
- pipeline_id="string",
2923
2978
  job_id="string",
2979
+ pipeline_id="string",
2924
2980
  )
2925
2981
  """
2926
2982
  _response = await self._client_wrapper.httpx_client.request(
@@ -3182,14 +3238,14 @@ class AsyncPipelinesClient:
3182
3238
  raise ApiError(status_code=_response.status_code, body=_response.text)
3183
3239
  raise ApiError(status_code=_response.status_code, body=_response_json)
3184
3240
 
3185
- async def get_pipeline_document(self, pipeline_id: str, document_id: str) -> CloudDocument:
3241
+ async def get_pipeline_document(self, document_id: str, pipeline_id: str) -> CloudDocument:
3186
3242
  """
3187
3243
  Return a single document for a pipeline.
3188
3244
 
3189
3245
  Parameters:
3190
- - pipeline_id: str.
3191
-
3192
3246
  - document_id: str.
3247
+
3248
+ - pipeline_id: str.
3193
3249
  ---
3194
3250
  from llama_cloud.client import AsyncLlamaCloud
3195
3251
 
@@ -3197,8 +3253,8 @@ class AsyncPipelinesClient:
3197
3253
  token="YOUR_TOKEN",
3198
3254
  )
3199
3255
  await client.pipelines.get_pipeline_document(
3200
- pipeline_id="string",
3201
3256
  document_id="string",
3257
+ pipeline_id="string",
3202
3258
  )
3203
3259
  """
3204
3260
  _response = await self._client_wrapper.httpx_client.request(
@@ -3219,14 +3275,14 @@ class AsyncPipelinesClient:
3219
3275
  raise ApiError(status_code=_response.status_code, body=_response.text)
3220
3276
  raise ApiError(status_code=_response.status_code, body=_response_json)
3221
3277
 
3222
- async def delete_pipeline_document(self, pipeline_id: str, document_id: str) -> None:
3278
+ async def delete_pipeline_document(self, document_id: str, pipeline_id: str) -> None:
3223
3279
  """
3224
3280
  Delete a document for a pipeline.
3225
3281
 
3226
3282
  Parameters:
3227
- - pipeline_id: str.
3228
-
3229
3283
  - document_id: str.
3284
+
3285
+ - pipeline_id: str.
3230
3286
  ---
3231
3287
  from llama_cloud.client import AsyncLlamaCloud
3232
3288
 
@@ -3234,8 +3290,8 @@ class AsyncPipelinesClient:
3234
3290
  token="YOUR_TOKEN",
3235
3291
  )
3236
3292
  await client.pipelines.delete_pipeline_document(
3237
- pipeline_id="string",
3238
3293
  document_id="string",
3294
+ pipeline_id="string",
3239
3295
  )
3240
3296
  """
3241
3297
  _response = await self._client_wrapper.httpx_client.request(
@@ -3256,14 +3312,14 @@ class AsyncPipelinesClient:
3256
3312
  raise ApiError(status_code=_response.status_code, body=_response.text)
3257
3313
  raise ApiError(status_code=_response.status_code, body=_response_json)
3258
3314
 
3259
- async def get_pipeline_document_status(self, pipeline_id: str, document_id: str) -> ManagedIngestionStatusResponse:
3315
+ async def get_pipeline_document_status(self, document_id: str, pipeline_id: str) -> ManagedIngestionStatusResponse:
3260
3316
  """
3261
3317
  Return a single document for a pipeline.
3262
3318
 
3263
3319
  Parameters:
3264
- - pipeline_id: str.
3265
-
3266
3320
  - document_id: str.
3321
+
3322
+ - pipeline_id: str.
3267
3323
  ---
3268
3324
  from llama_cloud.client import AsyncLlamaCloud
3269
3325
 
@@ -3271,8 +3327,8 @@ class AsyncPipelinesClient:
3271
3327
  token="YOUR_TOKEN",
3272
3328
  )
3273
3329
  await client.pipelines.get_pipeline_document_status(
3274
- pipeline_id="string",
3275
3330
  document_id="string",
3331
+ pipeline_id="string",
3276
3332
  )
3277
3333
  """
3278
3334
  _response = await self._client_wrapper.httpx_client.request(
@@ -3294,14 +3350,14 @@ class AsyncPipelinesClient:
3294
3350
  raise ApiError(status_code=_response.status_code, body=_response.text)
3295
3351
  raise ApiError(status_code=_response.status_code, body=_response_json)
3296
3352
 
3297
- async def list_pipeline_document_chunks(self, pipeline_id: str, document_id: str) -> typing.List[TextNode]:
3353
+ async def list_pipeline_document_chunks(self, document_id: str, pipeline_id: str) -> typing.List[TextNode]:
3298
3354
  """
3299
3355
  Return a list of chunks for a pipeline document.
3300
3356
 
3301
3357
  Parameters:
3302
- - pipeline_id: str.
3303
-
3304
3358
  - document_id: str.
3359
+
3360
+ - pipeline_id: str.
3305
3361
  ---
3306
3362
  from llama_cloud.client import AsyncLlamaCloud
3307
3363
 
@@ -3309,8 +3365,8 @@ class AsyncPipelinesClient:
3309
3365
  token="YOUR_TOKEN",
3310
3366
  )
3311
3367
  await client.pipelines.list_pipeline_document_chunks(
3312
- pipeline_id="string",
3313
3368
  document_id="string",
3369
+ pipeline_id="string",
3314
3370
  )
3315
3371
  """
3316
3372
  _response = await self._client_wrapper.httpx_client.request(