llama-cloud 0.0.1__py3-none-any.whl → 0.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (102) hide show
  1. llama_cloud/client.py +2 -2
  2. llama_cloud/core/jsonable_encoder.py +3 -0
  3. llama_cloud/resources/api_keys/client.py +19 -16
  4. llama_cloud/resources/billing/client.py +15 -12
  5. llama_cloud/resources/component_definitions/client.py +15 -12
  6. llama_cloud/resources/data_sinks/client.py +33 -30
  7. llama_cloud/resources/data_sources/client.py +33 -30
  8. llama_cloud/resources/deprecated/client.py +51 -48
  9. llama_cloud/resources/evals/client.py +47 -44
  10. llama_cloud/resources/files/client.py +27 -24
  11. llama_cloud/resources/parsing/client.py +51 -48
  12. llama_cloud/resources/pipelines/client.py +238 -164
  13. llama_cloud/resources/projects/client.py +75 -72
  14. llama_cloud/types/api_key.py +3 -0
  15. llama_cloud/types/azure_open_ai_embedding.py +3 -0
  16. llama_cloud/types/base.py +3 -0
  17. llama_cloud/types/base_prompt_template.py +3 -0
  18. llama_cloud/types/bedrock_embedding.py +3 -0
  19. llama_cloud/types/chat_message.py +3 -0
  20. llama_cloud/types/cloud_az_storage_blob_data_source.py +3 -0
  21. llama_cloud/types/cloud_chroma_vector_store.py +3 -0
  22. llama_cloud/types/cloud_document.py +3 -0
  23. llama_cloud/types/cloud_document_create.py +3 -0
  24. llama_cloud/types/cloud_gcs_data_source.py +3 -0
  25. llama_cloud/types/cloud_google_drive_data_source.py +3 -0
  26. llama_cloud/types/cloud_one_drive_data_source.py +3 -0
  27. llama_cloud/types/cloud_pinecone_vector_store.py +3 -0
  28. llama_cloud/types/cloud_postgres_vector_store.py +3 -0
  29. llama_cloud/types/cloud_qdrant_vector_store.py +3 -0
  30. llama_cloud/types/cloud_s_3_data_source.py +3 -0
  31. llama_cloud/types/cloud_sharepoint_data_source.py +3 -0
  32. llama_cloud/types/cloud_weaviate_vector_store.py +3 -0
  33. llama_cloud/types/code_splitter.py +3 -0
  34. llama_cloud/types/cohere_embedding.py +3 -0
  35. llama_cloud/types/configurable_transformation_definition.py +3 -0
  36. llama_cloud/types/configured_transformation_item.py +3 -0
  37. llama_cloud/types/data_sink.py +3 -0
  38. llama_cloud/types/data_sink_create.py +3 -0
  39. llama_cloud/types/data_sink_definition.py +3 -0
  40. llama_cloud/types/data_source.py +3 -0
  41. llama_cloud/types/data_source_create.py +3 -0
  42. llama_cloud/types/data_source_definition.py +3 -0
  43. llama_cloud/types/eval_dataset.py +3 -0
  44. llama_cloud/types/eval_dataset_job_params.py +3 -0
  45. llama_cloud/types/eval_dataset_job_record.py +3 -0
  46. llama_cloud/types/eval_execution_params.py +3 -0
  47. llama_cloud/types/eval_execution_params_override.py +3 -0
  48. llama_cloud/types/eval_llm_model_data.py +3 -0
  49. llama_cloud/types/eval_question.py +3 -0
  50. llama_cloud/types/eval_question_create.py +3 -0
  51. llama_cloud/types/eval_question_result.py +3 -0
  52. llama_cloud/types/file.py +3 -0
  53. llama_cloud/types/gemini_embedding.py +3 -0
  54. llama_cloud/types/html_node_parser.py +3 -0
  55. llama_cloud/types/http_validation_error.py +3 -0
  56. llama_cloud/types/hugging_face_inference_api_embedding.py +3 -0
  57. llama_cloud/types/json_node_parser.py +3 -0
  58. llama_cloud/types/llm.py +3 -0
  59. llama_cloud/types/local_eval.py +3 -0
  60. llama_cloud/types/local_eval_results.py +3 -0
  61. llama_cloud/types/local_eval_sets.py +3 -0
  62. llama_cloud/types/markdown_element_node_parser.py +3 -0
  63. llama_cloud/types/markdown_node_parser.py +3 -0
  64. llama_cloud/types/metadata_filter.py +3 -0
  65. llama_cloud/types/metadata_filters.py +3 -0
  66. llama_cloud/types/metric_result.py +3 -0
  67. llama_cloud/types/node_parser.py +3 -0
  68. llama_cloud/types/open_ai_embedding.py +3 -0
  69. llama_cloud/types/parsing_history_item.py +3 -0
  70. llama_cloud/types/parsing_job.py +3 -0
  71. llama_cloud/types/parsing_job_json_result.py +3 -0
  72. llama_cloud/types/parsing_job_markdown_result.py +3 -0
  73. llama_cloud/types/parsing_job_text_result.py +3 -0
  74. llama_cloud/types/parsing_usage.py +3 -0
  75. llama_cloud/types/pipeline.py +3 -0
  76. llama_cloud/types/pipeline_create.py +3 -0
  77. llama_cloud/types/pipeline_data_source.py +3 -0
  78. llama_cloud/types/pipeline_data_source_create.py +3 -0
  79. llama_cloud/types/pipeline_deployment.py +3 -0
  80. llama_cloud/types/pipeline_file.py +3 -0
  81. llama_cloud/types/pipeline_file_create.py +3 -0
  82. llama_cloud/types/pipeline_file_status_response.py +3 -0
  83. llama_cloud/types/preset_retrieval_params.py +3 -0
  84. llama_cloud/types/presigned_url.py +3 -0
  85. llama_cloud/types/project.py +3 -0
  86. llama_cloud/types/project_create.py +3 -0
  87. llama_cloud/types/prompt_mixin_prompts.py +3 -0
  88. llama_cloud/types/prompt_spec.py +3 -0
  89. llama_cloud/types/related_node_info.py +3 -0
  90. llama_cloud/types/retrieve_results.py +3 -0
  91. llama_cloud/types/sentence_splitter.py +3 -0
  92. llama_cloud/types/simple_file_node_parser.py +3 -0
  93. llama_cloud/types/supported_eval_llm_model.py +3 -0
  94. llama_cloud/types/text_node.py +3 -0
  95. llama_cloud/types/text_node_with_score.py +3 -0
  96. llama_cloud/types/token_text_splitter.py +3 -0
  97. llama_cloud/types/validation_error.py +3 -0
  98. {llama_cloud-0.0.1.dist-info → llama_cloud-0.0.3.dist-info}/METADATA +1 -1
  99. llama_cloud-0.0.3.dist-info/RECORD +173 -0
  100. llama_cloud-0.0.1.dist-info/RECORD +0 -173
  101. {llama_cloud-0.0.1.dist-info → llama_cloud-0.0.3.dist-info}/LICENSE +0 -0
  102. {llama_cloud-0.0.1.dist-info → llama_cloud-0.0.3.dist-info}/WHEEL +0 -0
@@ -18,6 +18,7 @@ from ...types.eval_execution_params import EvalExecutionParams
18
18
  from ...types.eval_execution_params_override import EvalExecutionParamsOverride
19
19
  from ...types.eval_question_result import EvalQuestionResult
20
20
  from ...types.http_validation_error import HttpValidationError
21
+ from ...types.managed_ingestion_status import ManagedIngestionStatus
21
22
  from ...types.metadata_filters import MetadataFilters
22
23
  from ...types.pipeline import Pipeline
23
24
  from ...types.pipeline_create import PipelineCreate
@@ -33,6 +34,9 @@ from ...types.retrieve_results import RetrieveResults
33
34
  from .types.pipeline_file_update_custom_metadata_value import PipelineFileUpdateCustomMetadataValue
34
35
 
35
36
  try:
37
+ import pydantic
38
+ if pydantic.__version__.startswith("1."):
39
+ raise ImportError
36
40
  import pydantic.v1 as pydantic # type: ignore
37
41
  except ImportError:
38
42
  import pydantic # type: ignore
@@ -62,10 +66,10 @@ class PipelinesClient:
62
66
 
63
67
  - pipeline_type: typing.Optional[PipelineType].
64
68
  ---
65
- from platform import PipelineType
66
- from platform.client import PlatformApi
69
+ from llama_cloud import PipelineType
70
+ from llama_cloud.client import LlamaCloud
67
71
 
68
- client = PlatformApi(
72
+ client = LlamaCloud(
69
73
  token="YOUR_TOKEN",
70
74
  base_url="https://yourhost.com/path/to/api",
71
75
  )
@@ -102,7 +106,7 @@ class PipelinesClient:
102
106
 
103
107
  - request: PipelineCreate.
104
108
  ---
105
- from platform import (
109
+ from llama_cloud import (
106
110
  ConfigurableDataSinkNames,
107
111
  DataSinkCreate,
108
112
  EvalExecutionParams,
@@ -113,9 +117,9 @@ class PipelinesClient:
113
117
  PresetRetrievalParams,
114
118
  SupportedEvalLlmModelNames,
115
119
  )
116
- from platform.client import PlatformApi
120
+ from llama_cloud.client import LlamaCloud
117
121
 
118
- client = PlatformApi(
122
+ client = LlamaCloud(
119
123
  token="YOUR_TOKEN",
120
124
  base_url="https://yourhost.com/path/to/api",
121
125
  )
@@ -167,7 +171,7 @@ class PipelinesClient:
167
171
 
168
172
  - request: PipelineCreate.
169
173
  ---
170
- from platform import (
174
+ from llama_cloud import (
171
175
  ConfigurableDataSinkNames,
172
176
  DataSinkCreate,
173
177
  EvalExecutionParams,
@@ -178,9 +182,9 @@ class PipelinesClient:
178
182
  PresetRetrievalParams,
179
183
  SupportedEvalLlmModelNames,
180
184
  )
181
- from platform.client import PlatformApi
185
+ from llama_cloud.client import LlamaCloud
182
186
 
183
- client = PlatformApi(
187
+ client = LlamaCloud(
184
188
  token="YOUR_TOKEN",
185
189
  base_url="https://yourhost.com/path/to/api",
186
190
  )
@@ -233,9 +237,9 @@ class PipelinesClient:
233
237
 
234
238
  - with_managed_ingestion_status: typing.Optional[bool].
235
239
  ---
236
- from platform.client import PlatformApi
240
+ from llama_cloud.client import LlamaCloud
237
241
 
238
- client = PlatformApi(
242
+ client = LlamaCloud(
239
243
  token="YOUR_TOKEN",
240
244
  base_url="https://yourhost.com/path/to/api",
241
245
  )
@@ -295,7 +299,7 @@ class PipelinesClient:
295
299
 
296
300
  - managed_pipeline_id: typing.Optional[str]. The ID of the ManagedPipeline this playground pipeline is linked to.
297
301
  ---
298
- from platform import (
302
+ from llama_cloud import (
299
303
  ConfigurableDataSinkNames,
300
304
  DataSinkCreate,
301
305
  EvalExecutionParams,
@@ -304,9 +308,9 @@ class PipelinesClient:
304
308
  PresetRetrievalParams,
305
309
  SupportedEvalLlmModelNames,
306
310
  )
307
- from platform.client import PlatformApi
311
+ from llama_cloud.client import LlamaCloud
308
312
 
309
- client = PlatformApi(
313
+ client = LlamaCloud(
310
314
  token="YOUR_TOKEN",
311
315
  base_url="https://yourhost.com/path/to/api",
312
316
  )
@@ -368,9 +372,9 @@ class PipelinesClient:
368
372
  Parameters:
369
373
  - pipeline_id: str.
370
374
  ---
371
- from platform.client import PlatformApi
375
+ from llama_cloud.client import LlamaCloud
372
376
 
373
- client = PlatformApi(
377
+ client = LlamaCloud(
374
378
  token="YOUR_TOKEN",
375
379
  base_url="https://yourhost.com/path/to/api",
376
380
  )
@@ -401,9 +405,9 @@ class PipelinesClient:
401
405
  Parameters:
402
406
  - pipeline_id: str.
403
407
  ---
404
- from platform.client import PlatformApi
408
+ from llama_cloud.client import LlamaCloud
405
409
 
406
- client = PlatformApi(
410
+ client = LlamaCloud(
407
411
  token="YOUR_TOKEN",
408
412
  base_url="https://yourhost.com/path/to/api",
409
413
  )
@@ -436,9 +440,9 @@ class PipelinesClient:
436
440
 
437
441
  - eval_dataset_id: str.
438
442
  ---
439
- from platform.client import PlatformApi
443
+ from llama_cloud.client import LlamaCloud
440
444
 
441
- client = PlatformApi(
445
+ client = LlamaCloud(
442
446
  token="YOUR_TOKEN",
443
447
  base_url="https://yourhost.com/path/to/api",
444
448
  )
@@ -486,10 +490,10 @@ class PipelinesClient:
486
490
 
487
491
  - params: typing.Optional[EvalExecutionParamsOverride]. The parameters for the eval execution that will override the ones set in the pipeline.
488
492
  ---
489
- from platform import EvalExecutionParamsOverride, SupportedEvalLlmModelNames
490
- from platform.client import PlatformApi
493
+ from llama_cloud import EvalExecutionParamsOverride, SupportedEvalLlmModelNames
494
+ from llama_cloud.client import LlamaCloud
491
495
 
492
- client = PlatformApi(
496
+ client = LlamaCloud(
493
497
  token="YOUR_TOKEN",
494
498
  base_url="https://yourhost.com/path/to/api",
495
499
  )
@@ -539,9 +543,9 @@ class PipelinesClient:
539
543
 
540
544
  - eval_dataset_id: str.
541
545
  ---
542
- from platform.client import PlatformApi
546
+ from llama_cloud.client import LlamaCloud
543
547
 
544
- client = PlatformApi(
548
+ client = LlamaCloud(
545
549
  token="YOUR_TOKEN",
546
550
  base_url="https://yourhost.com/path/to/api",
547
551
  )
@@ -582,9 +586,9 @@ class PipelinesClient:
582
586
 
583
587
  - eval_dataset_execution_id: str.
584
588
  ---
585
- from platform.client import PlatformApi
589
+ from llama_cloud.client import LlamaCloud
586
590
 
587
- client = PlatformApi(
591
+ client = LlamaCloud(
588
592
  token="YOUR_TOKEN",
589
593
  base_url="https://yourhost.com/path/to/api",
590
594
  )
@@ -620,9 +624,9 @@ class PipelinesClient:
620
624
  Parameters:
621
625
  - pipeline_id: str.
622
626
  ---
623
- from platform.client import PlatformApi
627
+ from llama_cloud.client import LlamaCloud
624
628
 
625
- client = PlatformApi(
629
+ client = LlamaCloud(
626
630
  token="YOUR_TOKEN",
627
631
  base_url="https://yourhost.com/path/to/api",
628
632
  )
@@ -657,9 +661,9 @@ class PipelinesClient:
657
661
 
658
662
  - request: typing.List[PipelineFileCreate].
659
663
  ---
660
- from platform.client import PlatformApi
664
+ from llama_cloud.client import LlamaCloud
661
665
 
662
- client = PlatformApi(
666
+ client = LlamaCloud(
663
667
  token="YOUR_TOKEN",
664
668
  base_url="https://yourhost.com/path/to/api",
665
669
  )
@@ -694,9 +698,9 @@ class PipelinesClient:
694
698
 
695
699
  - file_id: str.
696
700
  ---
697
- from platform.client import PlatformApi
701
+ from llama_cloud.client import LlamaCloud
698
702
 
699
- client = PlatformApi(
703
+ client = LlamaCloud(
700
704
  token="YOUR_TOKEN",
701
705
  base_url="https://yourhost.com/path/to/api",
702
706
  )
@@ -740,9 +744,9 @@ class PipelinesClient:
740
744
 
741
745
  - custom_metadata: typing.Optional[typing.Dict[str, PipelineFileUpdateCustomMetadataValue]]. Custom metadata for the file
742
746
  ---
743
- from platform.client import PlatformApi
747
+ from llama_cloud.client import LlamaCloud
744
748
 
745
- client = PlatformApi(
749
+ client = LlamaCloud(
746
750
  token="YOUR_TOKEN",
747
751
  base_url="https://yourhost.com/path/to/api",
748
752
  )
@@ -782,9 +786,9 @@ class PipelinesClient:
782
786
 
783
787
  - file_id: str.
784
788
  ---
785
- from platform.client import PlatformApi
789
+ from llama_cloud.client import LlamaCloud
786
790
 
787
- client = PlatformApi(
791
+ client = LlamaCloud(
788
792
  token="YOUR_TOKEN",
789
793
  base_url="https://yourhost.com/path/to/api",
790
794
  )
@@ -818,9 +822,9 @@ class PipelinesClient:
818
822
  Parameters:
819
823
  - pipeline_id: str.
820
824
  ---
821
- from platform.client import PlatformApi
825
+ from llama_cloud.client import LlamaCloud
822
826
 
823
- client = PlatformApi(
827
+ client = LlamaCloud(
824
828
  token="YOUR_TOKEN",
825
829
  base_url="https://yourhost.com/path/to/api",
826
830
  )
@@ -857,9 +861,9 @@ class PipelinesClient:
857
861
 
858
862
  - request: typing.List[PipelineDataSourceCreate].
859
863
  ---
860
- from platform.client import PlatformApi
864
+ from llama_cloud.client import LlamaCloud
861
865
 
862
- client = PlatformApi(
866
+ client = LlamaCloud(
863
867
  token="YOUR_TOKEN",
864
868
  base_url="https://yourhost.com/path/to/api",
865
869
  )
@@ -896,9 +900,9 @@ class PipelinesClient:
896
900
 
897
901
  - data_source_id: str.
898
902
  ---
899
- from platform.client import PlatformApi
903
+ from llama_cloud.client import LlamaCloud
900
904
 
901
- client = PlatformApi(
905
+ client = LlamaCloud(
902
906
  token="YOUR_TOKEN",
903
907
  base_url="https://yourhost.com/path/to/api",
904
908
  )
@@ -935,9 +939,9 @@ class PipelinesClient:
935
939
 
936
940
  - data_source_id: str.
937
941
  ---
938
- from platform.client import PlatformApi
942
+ from llama_cloud.client import LlamaCloud
939
943
 
940
- client = PlatformApi(
944
+ client = LlamaCloud(
941
945
  token="YOUR_TOKEN",
942
946
  base_url="https://yourhost.com/path/to/api",
943
947
  )
@@ -997,10 +1001,10 @@ class PipelinesClient:
997
1001
 
998
1002
  - query: str. The query to retrieve against.
999
1003
  ---
1000
- from platform import FilterCondition, MetadataFilters
1001
- from platform.client import PlatformApi
1004
+ from llama_cloud import FilterCondition, MetadataFilters
1005
+ from llama_cloud.client import LlamaCloud
1002
1006
 
1003
- client = PlatformApi(
1007
+ client = LlamaCloud(
1004
1008
  token="YOUR_TOKEN",
1005
1009
  base_url="https://yourhost.com/path/to/api",
1006
1010
  )
@@ -1050,9 +1054,9 @@ class PipelinesClient:
1050
1054
  Parameters:
1051
1055
  - pipeline_id: str.
1052
1056
  ---
1053
- from platform.client import PlatformApi
1057
+ from llama_cloud.client import LlamaCloud
1054
1058
 
1055
- client = PlatformApi(
1059
+ client = LlamaCloud(
1056
1060
  token="YOUR_TOKEN",
1057
1061
  base_url="https://yourhost.com/path/to/api",
1058
1062
  )
@@ -1085,9 +1089,9 @@ class PipelinesClient:
1085
1089
 
1086
1090
  - job_id: str.
1087
1091
  ---
1088
- from platform.client import PlatformApi
1092
+ from llama_cloud.client import LlamaCloud
1089
1093
 
1090
- client = PlatformApi(
1094
+ client = LlamaCloud(
1091
1095
  token="YOUR_TOKEN",
1092
1096
  base_url="https://yourhost.com/path/to/api",
1093
1097
  )
@@ -1127,9 +1131,9 @@ class PipelinesClient:
1127
1131
 
1128
1132
  - limit: typing.Optional[int].
1129
1133
  ---
1130
- from platform.client import PlatformApi
1134
+ from llama_cloud.client import LlamaCloud
1131
1135
 
1132
- client = PlatformApi(
1136
+ client = LlamaCloud(
1133
1137
  token="YOUR_TOKEN",
1134
1138
  base_url="https://yourhost.com/path/to/api",
1135
1139
  )
@@ -1156,28 +1160,26 @@ class PipelinesClient:
1156
1160
  raise ApiError(status_code=_response.status_code, body=_response.text)
1157
1161
  raise ApiError(status_code=_response.status_code, body=_response_json)
1158
1162
 
1159
- def create_pipeline_document(self, pipeline_id: str, *, request: CloudDocumentCreate) -> CloudDocument:
1163
+ def create_batch_pipeline_documents(
1164
+ self, pipeline_id: str, *, request: typing.List[CloudDocumentCreate]
1165
+ ) -> typing.List[CloudDocument]:
1160
1166
  """
1161
- Create a new document for a pipeline.
1167
+ Batch create documents for a pipeline.
1162
1168
 
1163
1169
  Parameters:
1164
1170
  - pipeline_id: str.
1165
1171
 
1166
- - request: CloudDocumentCreate.
1172
+ - request: typing.List[CloudDocumentCreate].
1167
1173
  ---
1168
- from platform import CloudDocumentCreate
1169
- from platform.client import PlatformApi
1174
+ from llama_cloud.client import LlamaCloud
1170
1175
 
1171
- client = PlatformApi(
1176
+ client = LlamaCloud(
1172
1177
  token="YOUR_TOKEN",
1173
1178
  base_url="https://yourhost.com/path/to/api",
1174
1179
  )
1175
- client.pipelines.create_pipeline_document(
1180
+ client.pipelines.create_batch_pipeline_documents(
1176
1181
  pipeline_id="string",
1177
- request=CloudDocumentCreate(
1178
- text="string",
1179
- metadata={"string": {}},
1180
- ),
1182
+ request=[],
1181
1183
  )
1182
1184
  """
1183
1185
  _response = self._client_wrapper.httpx_client.request(
@@ -1190,7 +1192,7 @@ class PipelinesClient:
1190
1192
  timeout=60,
1191
1193
  )
1192
1194
  if 200 <= _response.status_code < 300:
1193
- return pydantic.parse_obj_as(CloudDocument, _response.json()) # type: ignore
1195
+ return pydantic.parse_obj_as(typing.List[CloudDocument], _response.json()) # type: ignore
1194
1196
  if _response.status_code == 422:
1195
1197
  raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
1196
1198
  try:
@@ -1199,28 +1201,26 @@ class PipelinesClient:
1199
1201
  raise ApiError(status_code=_response.status_code, body=_response.text)
1200
1202
  raise ApiError(status_code=_response.status_code, body=_response_json)
1201
1203
 
1202
- def upsert_pipeline_document(self, pipeline_id: str, *, request: CloudDocumentCreate) -> CloudDocument:
1204
+ def upsert_batch_pipeline_documents(
1205
+ self, pipeline_id: str, *, request: typing.List[CloudDocumentCreate]
1206
+ ) -> typing.List[CloudDocument]:
1203
1207
  """
1204
- Create or update a document for a pipeline.
1208
+ Batch create or update a document for a pipeline.
1205
1209
 
1206
1210
  Parameters:
1207
1211
  - pipeline_id: str.
1208
1212
 
1209
- - request: CloudDocumentCreate.
1213
+ - request: typing.List[CloudDocumentCreate].
1210
1214
  ---
1211
- from platform import CloudDocumentCreate
1212
- from platform.client import PlatformApi
1215
+ from llama_cloud.client import LlamaCloud
1213
1216
 
1214
- client = PlatformApi(
1217
+ client = LlamaCloud(
1215
1218
  token="YOUR_TOKEN",
1216
1219
  base_url="https://yourhost.com/path/to/api",
1217
1220
  )
1218
- client.pipelines.upsert_pipeline_document(
1221
+ client.pipelines.upsert_batch_pipeline_documents(
1219
1222
  pipeline_id="string",
1220
- request=CloudDocumentCreate(
1221
- text="string",
1222
- metadata={"string": {}},
1223
- ),
1223
+ request=[],
1224
1224
  )
1225
1225
  """
1226
1226
  _response = self._client_wrapper.httpx_client.request(
@@ -1233,7 +1233,7 @@ class PipelinesClient:
1233
1233
  timeout=60,
1234
1234
  )
1235
1235
  if 200 <= _response.status_code < 300:
1236
- return pydantic.parse_obj_as(CloudDocument, _response.json()) # type: ignore
1236
+ return pydantic.parse_obj_as(typing.List[CloudDocument], _response.json()) # type: ignore
1237
1237
  if _response.status_code == 422:
1238
1238
  raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
1239
1239
  try:
@@ -1251,9 +1251,9 @@ class PipelinesClient:
1251
1251
 
1252
1252
  - document_id: str.
1253
1253
  ---
1254
- from platform.client import PlatformApi
1254
+ from llama_cloud.client import LlamaCloud
1255
1255
 
1256
- client = PlatformApi(
1256
+ client = LlamaCloud(
1257
1257
  token="YOUR_TOKEN",
1258
1258
  base_url="https://yourhost.com/path/to/api",
1259
1259
  )
@@ -1289,9 +1289,9 @@ class PipelinesClient:
1289
1289
 
1290
1290
  - document_id: str.
1291
1291
  ---
1292
- from platform.client import PlatformApi
1292
+ from llama_cloud.client import LlamaCloud
1293
1293
 
1294
- client = PlatformApi(
1294
+ client = LlamaCloud(
1295
1295
  token="YOUR_TOKEN",
1296
1296
  base_url="https://yourhost.com/path/to/api",
1297
1297
  )
@@ -1318,6 +1318,45 @@ class PipelinesClient:
1318
1318
  raise ApiError(status_code=_response.status_code, body=_response.text)
1319
1319
  raise ApiError(status_code=_response.status_code, body=_response_json)
1320
1320
 
1321
+ def get_pipeline_document_status(self, pipeline_id: str, document_id: str) -> ManagedIngestionStatus:
1322
+ """
1323
+ Return a single document for a pipeline.
1324
+
1325
+ Parameters:
1326
+ - pipeline_id: str.
1327
+
1328
+ - document_id: str.
1329
+ ---
1330
+ from llama_cloud.client import LlamaCloud
1331
+
1332
+ client = LlamaCloud(
1333
+ token="YOUR_TOKEN",
1334
+ base_url="https://yourhost.com/path/to/api",
1335
+ )
1336
+ client.pipelines.get_pipeline_document_status(
1337
+ pipeline_id="string",
1338
+ document_id="string",
1339
+ )
1340
+ """
1341
+ _response = self._client_wrapper.httpx_client.request(
1342
+ "GET",
1343
+ urllib.parse.urljoin(
1344
+ f"{self._client_wrapper.get_base_url()}/",
1345
+ f"api/v1/pipelines/{pipeline_id}/documents/{document_id}/status",
1346
+ ),
1347
+ headers=self._client_wrapper.get_headers(),
1348
+ timeout=60,
1349
+ )
1350
+ if 200 <= _response.status_code < 300:
1351
+ return pydantic.parse_obj_as(ManagedIngestionStatus, _response.json()) # type: ignore
1352
+ if _response.status_code == 422:
1353
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
1354
+ try:
1355
+ _response_json = _response.json()
1356
+ except JSONDecodeError:
1357
+ raise ApiError(status_code=_response.status_code, body=_response.text)
1358
+ raise ApiError(status_code=_response.status_code, body=_response_json)
1359
+
1321
1360
 
1322
1361
  class AsyncPipelinesClient:
1323
1362
  def __init__(self, *, client_wrapper: AsyncClientWrapper):
@@ -1340,10 +1379,10 @@ class AsyncPipelinesClient:
1340
1379
 
1341
1380
  - pipeline_type: typing.Optional[PipelineType].
1342
1381
  ---
1343
- from platform import PipelineType
1344
- from platform.client import AsyncPlatformApi
1382
+ from llama_cloud import PipelineType
1383
+ from llama_cloud.client import AsyncLlamaCloud
1345
1384
 
1346
- client = AsyncPlatformApi(
1385
+ client = AsyncLlamaCloud(
1347
1386
  token="YOUR_TOKEN",
1348
1387
  base_url="https://yourhost.com/path/to/api",
1349
1388
  )
@@ -1380,7 +1419,7 @@ class AsyncPipelinesClient:
1380
1419
 
1381
1420
  - request: PipelineCreate.
1382
1421
  ---
1383
- from platform import (
1422
+ from llama_cloud import (
1384
1423
  ConfigurableDataSinkNames,
1385
1424
  DataSinkCreate,
1386
1425
  EvalExecutionParams,
@@ -1391,9 +1430,9 @@ class AsyncPipelinesClient:
1391
1430
  PresetRetrievalParams,
1392
1431
  SupportedEvalLlmModelNames,
1393
1432
  )
1394
- from platform.client import AsyncPlatformApi
1433
+ from llama_cloud.client import AsyncLlamaCloud
1395
1434
 
1396
- client = AsyncPlatformApi(
1435
+ client = AsyncLlamaCloud(
1397
1436
  token="YOUR_TOKEN",
1398
1437
  base_url="https://yourhost.com/path/to/api",
1399
1438
  )
@@ -1445,7 +1484,7 @@ class AsyncPipelinesClient:
1445
1484
 
1446
1485
  - request: PipelineCreate.
1447
1486
  ---
1448
- from platform import (
1487
+ from llama_cloud import (
1449
1488
  ConfigurableDataSinkNames,
1450
1489
  DataSinkCreate,
1451
1490
  EvalExecutionParams,
@@ -1456,9 +1495,9 @@ class AsyncPipelinesClient:
1456
1495
  PresetRetrievalParams,
1457
1496
  SupportedEvalLlmModelNames,
1458
1497
  )
1459
- from platform.client import AsyncPlatformApi
1498
+ from llama_cloud.client import AsyncLlamaCloud
1460
1499
 
1461
- client = AsyncPlatformApi(
1500
+ client = AsyncLlamaCloud(
1462
1501
  token="YOUR_TOKEN",
1463
1502
  base_url="https://yourhost.com/path/to/api",
1464
1503
  )
@@ -1511,9 +1550,9 @@ class AsyncPipelinesClient:
1511
1550
 
1512
1551
  - with_managed_ingestion_status: typing.Optional[bool].
1513
1552
  ---
1514
- from platform.client import AsyncPlatformApi
1553
+ from llama_cloud.client import AsyncLlamaCloud
1515
1554
 
1516
- client = AsyncPlatformApi(
1555
+ client = AsyncLlamaCloud(
1517
1556
  token="YOUR_TOKEN",
1518
1557
  base_url="https://yourhost.com/path/to/api",
1519
1558
  )
@@ -1573,7 +1612,7 @@ class AsyncPipelinesClient:
1573
1612
 
1574
1613
  - managed_pipeline_id: typing.Optional[str]. The ID of the ManagedPipeline this playground pipeline is linked to.
1575
1614
  ---
1576
- from platform import (
1615
+ from llama_cloud import (
1577
1616
  ConfigurableDataSinkNames,
1578
1617
  DataSinkCreate,
1579
1618
  EvalExecutionParams,
@@ -1582,9 +1621,9 @@ class AsyncPipelinesClient:
1582
1621
  PresetRetrievalParams,
1583
1622
  SupportedEvalLlmModelNames,
1584
1623
  )
1585
- from platform.client import AsyncPlatformApi
1624
+ from llama_cloud.client import AsyncLlamaCloud
1586
1625
 
1587
- client = AsyncPlatformApi(
1626
+ client = AsyncLlamaCloud(
1588
1627
  token="YOUR_TOKEN",
1589
1628
  base_url="https://yourhost.com/path/to/api",
1590
1629
  )
@@ -1646,9 +1685,9 @@ class AsyncPipelinesClient:
1646
1685
  Parameters:
1647
1686
  - pipeline_id: str.
1648
1687
  ---
1649
- from platform.client import AsyncPlatformApi
1688
+ from llama_cloud.client import AsyncLlamaCloud
1650
1689
 
1651
- client = AsyncPlatformApi(
1690
+ client = AsyncLlamaCloud(
1652
1691
  token="YOUR_TOKEN",
1653
1692
  base_url="https://yourhost.com/path/to/api",
1654
1693
  )
@@ -1679,9 +1718,9 @@ class AsyncPipelinesClient:
1679
1718
  Parameters:
1680
1719
  - pipeline_id: str.
1681
1720
  ---
1682
- from platform.client import AsyncPlatformApi
1721
+ from llama_cloud.client import AsyncLlamaCloud
1683
1722
 
1684
- client = AsyncPlatformApi(
1723
+ client = AsyncLlamaCloud(
1685
1724
  token="YOUR_TOKEN",
1686
1725
  base_url="https://yourhost.com/path/to/api",
1687
1726
  )
@@ -1716,9 +1755,9 @@ class AsyncPipelinesClient:
1716
1755
 
1717
1756
  - eval_dataset_id: str.
1718
1757
  ---
1719
- from platform.client import AsyncPlatformApi
1758
+ from llama_cloud.client import AsyncLlamaCloud
1720
1759
 
1721
- client = AsyncPlatformApi(
1760
+ client = AsyncLlamaCloud(
1722
1761
  token="YOUR_TOKEN",
1723
1762
  base_url="https://yourhost.com/path/to/api",
1724
1763
  )
@@ -1766,10 +1805,10 @@ class AsyncPipelinesClient:
1766
1805
 
1767
1806
  - params: typing.Optional[EvalExecutionParamsOverride]. The parameters for the eval execution that will override the ones set in the pipeline.
1768
1807
  ---
1769
- from platform import EvalExecutionParamsOverride, SupportedEvalLlmModelNames
1770
- from platform.client import AsyncPlatformApi
1808
+ from llama_cloud import EvalExecutionParamsOverride, SupportedEvalLlmModelNames
1809
+ from llama_cloud.client import AsyncLlamaCloud
1771
1810
 
1772
- client = AsyncPlatformApi(
1811
+ client = AsyncLlamaCloud(
1773
1812
  token="YOUR_TOKEN",
1774
1813
  base_url="https://yourhost.com/path/to/api",
1775
1814
  )
@@ -1819,9 +1858,9 @@ class AsyncPipelinesClient:
1819
1858
 
1820
1859
  - eval_dataset_id: str.
1821
1860
  ---
1822
- from platform.client import AsyncPlatformApi
1861
+ from llama_cloud.client import AsyncLlamaCloud
1823
1862
 
1824
- client = AsyncPlatformApi(
1863
+ client = AsyncLlamaCloud(
1825
1864
  token="YOUR_TOKEN",
1826
1865
  base_url="https://yourhost.com/path/to/api",
1827
1866
  )
@@ -1862,9 +1901,9 @@ class AsyncPipelinesClient:
1862
1901
 
1863
1902
  - eval_dataset_execution_id: str.
1864
1903
  ---
1865
- from platform.client import AsyncPlatformApi
1904
+ from llama_cloud.client import AsyncLlamaCloud
1866
1905
 
1867
- client = AsyncPlatformApi(
1906
+ client = AsyncLlamaCloud(
1868
1907
  token="YOUR_TOKEN",
1869
1908
  base_url="https://yourhost.com/path/to/api",
1870
1909
  )
@@ -1900,9 +1939,9 @@ class AsyncPipelinesClient:
1900
1939
  Parameters:
1901
1940
  - pipeline_id: str.
1902
1941
  ---
1903
- from platform.client import AsyncPlatformApi
1942
+ from llama_cloud.client import AsyncLlamaCloud
1904
1943
 
1905
- client = AsyncPlatformApi(
1944
+ client = AsyncLlamaCloud(
1906
1945
  token="YOUR_TOKEN",
1907
1946
  base_url="https://yourhost.com/path/to/api",
1908
1947
  )
@@ -1937,9 +1976,9 @@ class AsyncPipelinesClient:
1937
1976
 
1938
1977
  - request: typing.List[PipelineFileCreate].
1939
1978
  ---
1940
- from platform.client import AsyncPlatformApi
1979
+ from llama_cloud.client import AsyncLlamaCloud
1941
1980
 
1942
- client = AsyncPlatformApi(
1981
+ client = AsyncLlamaCloud(
1943
1982
  token="YOUR_TOKEN",
1944
1983
  base_url="https://yourhost.com/path/to/api",
1945
1984
  )
@@ -1974,9 +2013,9 @@ class AsyncPipelinesClient:
1974
2013
 
1975
2014
  - file_id: str.
1976
2015
  ---
1977
- from platform.client import AsyncPlatformApi
2016
+ from llama_cloud.client import AsyncLlamaCloud
1978
2017
 
1979
- client = AsyncPlatformApi(
2018
+ client = AsyncLlamaCloud(
1980
2019
  token="YOUR_TOKEN",
1981
2020
  base_url="https://yourhost.com/path/to/api",
1982
2021
  )
@@ -2020,9 +2059,9 @@ class AsyncPipelinesClient:
2020
2059
 
2021
2060
  - custom_metadata: typing.Optional[typing.Dict[str, PipelineFileUpdateCustomMetadataValue]]. Custom metadata for the file
2022
2061
  ---
2023
- from platform.client import AsyncPlatformApi
2062
+ from llama_cloud.client import AsyncLlamaCloud
2024
2063
 
2025
- client = AsyncPlatformApi(
2064
+ client = AsyncLlamaCloud(
2026
2065
  token="YOUR_TOKEN",
2027
2066
  base_url="https://yourhost.com/path/to/api",
2028
2067
  )
@@ -2062,9 +2101,9 @@ class AsyncPipelinesClient:
2062
2101
 
2063
2102
  - file_id: str.
2064
2103
  ---
2065
- from platform.client import AsyncPlatformApi
2104
+ from llama_cloud.client import AsyncLlamaCloud
2066
2105
 
2067
- client = AsyncPlatformApi(
2106
+ client = AsyncLlamaCloud(
2068
2107
  token="YOUR_TOKEN",
2069
2108
  base_url="https://yourhost.com/path/to/api",
2070
2109
  )
@@ -2098,9 +2137,9 @@ class AsyncPipelinesClient:
2098
2137
  Parameters:
2099
2138
  - pipeline_id: str.
2100
2139
  ---
2101
- from platform.client import AsyncPlatformApi
2140
+ from llama_cloud.client import AsyncLlamaCloud
2102
2141
 
2103
- client = AsyncPlatformApi(
2142
+ client = AsyncLlamaCloud(
2104
2143
  token="YOUR_TOKEN",
2105
2144
  base_url="https://yourhost.com/path/to/api",
2106
2145
  )
@@ -2137,9 +2176,9 @@ class AsyncPipelinesClient:
2137
2176
 
2138
2177
  - request: typing.List[PipelineDataSourceCreate].
2139
2178
  ---
2140
- from platform.client import AsyncPlatformApi
2179
+ from llama_cloud.client import AsyncLlamaCloud
2141
2180
 
2142
- client = AsyncPlatformApi(
2181
+ client = AsyncLlamaCloud(
2143
2182
  token="YOUR_TOKEN",
2144
2183
  base_url="https://yourhost.com/path/to/api",
2145
2184
  )
@@ -2176,9 +2215,9 @@ class AsyncPipelinesClient:
2176
2215
 
2177
2216
  - data_source_id: str.
2178
2217
  ---
2179
- from platform.client import AsyncPlatformApi
2218
+ from llama_cloud.client import AsyncLlamaCloud
2180
2219
 
2181
- client = AsyncPlatformApi(
2220
+ client = AsyncLlamaCloud(
2182
2221
  token="YOUR_TOKEN",
2183
2222
  base_url="https://yourhost.com/path/to/api",
2184
2223
  )
@@ -2215,9 +2254,9 @@ class AsyncPipelinesClient:
2215
2254
 
2216
2255
  - data_source_id: str.
2217
2256
  ---
2218
- from platform.client import AsyncPlatformApi
2257
+ from llama_cloud.client import AsyncLlamaCloud
2219
2258
 
2220
- client = AsyncPlatformApi(
2259
+ client = AsyncLlamaCloud(
2221
2260
  token="YOUR_TOKEN",
2222
2261
  base_url="https://yourhost.com/path/to/api",
2223
2262
  )
@@ -2277,10 +2316,10 @@ class AsyncPipelinesClient:
2277
2316
 
2278
2317
  - query: str. The query to retrieve against.
2279
2318
  ---
2280
- from platform import FilterCondition, MetadataFilters
2281
- from platform.client import AsyncPlatformApi
2319
+ from llama_cloud import FilterCondition, MetadataFilters
2320
+ from llama_cloud.client import AsyncLlamaCloud
2282
2321
 
2283
- client = AsyncPlatformApi(
2322
+ client = AsyncLlamaCloud(
2284
2323
  token="YOUR_TOKEN",
2285
2324
  base_url="https://yourhost.com/path/to/api",
2286
2325
  )
@@ -2330,9 +2369,9 @@ class AsyncPipelinesClient:
2330
2369
  Parameters:
2331
2370
  - pipeline_id: str.
2332
2371
  ---
2333
- from platform.client import AsyncPlatformApi
2372
+ from llama_cloud.client import AsyncLlamaCloud
2334
2373
 
2335
- client = AsyncPlatformApi(
2374
+ client = AsyncLlamaCloud(
2336
2375
  token="YOUR_TOKEN",
2337
2376
  base_url="https://yourhost.com/path/to/api",
2338
2377
  )
@@ -2365,9 +2404,9 @@ class AsyncPipelinesClient:
2365
2404
 
2366
2405
  - job_id: str.
2367
2406
  ---
2368
- from platform.client import AsyncPlatformApi
2407
+ from llama_cloud.client import AsyncLlamaCloud
2369
2408
 
2370
- client = AsyncPlatformApi(
2409
+ client = AsyncLlamaCloud(
2371
2410
  token="YOUR_TOKEN",
2372
2411
  base_url="https://yourhost.com/path/to/api",
2373
2412
  )
@@ -2407,9 +2446,9 @@ class AsyncPipelinesClient:
2407
2446
 
2408
2447
  - limit: typing.Optional[int].
2409
2448
  ---
2410
- from platform.client import AsyncPlatformApi
2449
+ from llama_cloud.client import AsyncLlamaCloud
2411
2450
 
2412
- client = AsyncPlatformApi(
2451
+ client = AsyncLlamaCloud(
2413
2452
  token="YOUR_TOKEN",
2414
2453
  base_url="https://yourhost.com/path/to/api",
2415
2454
  )
@@ -2436,28 +2475,26 @@ class AsyncPipelinesClient:
2436
2475
  raise ApiError(status_code=_response.status_code, body=_response.text)
2437
2476
  raise ApiError(status_code=_response.status_code, body=_response_json)
2438
2477
 
2439
- async def create_pipeline_document(self, pipeline_id: str, *, request: CloudDocumentCreate) -> CloudDocument:
2478
+ async def create_batch_pipeline_documents(
2479
+ self, pipeline_id: str, *, request: typing.List[CloudDocumentCreate]
2480
+ ) -> typing.List[CloudDocument]:
2440
2481
  """
2441
- Create a new document for a pipeline.
2482
+ Batch create documents for a pipeline.
2442
2483
 
2443
2484
  Parameters:
2444
2485
  - pipeline_id: str.
2445
2486
 
2446
- - request: CloudDocumentCreate.
2487
+ - request: typing.List[CloudDocumentCreate].
2447
2488
  ---
2448
- from platform import CloudDocumentCreate
2449
- from platform.client import AsyncPlatformApi
2489
+ from llama_cloud.client import AsyncLlamaCloud
2450
2490
 
2451
- client = AsyncPlatformApi(
2491
+ client = AsyncLlamaCloud(
2452
2492
  token="YOUR_TOKEN",
2453
2493
  base_url="https://yourhost.com/path/to/api",
2454
2494
  )
2455
- await client.pipelines.create_pipeline_document(
2495
+ await client.pipelines.create_batch_pipeline_documents(
2456
2496
  pipeline_id="string",
2457
- request=CloudDocumentCreate(
2458
- text="string",
2459
- metadata={"string": {}},
2460
- ),
2497
+ request=[],
2461
2498
  )
2462
2499
  """
2463
2500
  _response = await self._client_wrapper.httpx_client.request(
@@ -2470,7 +2507,7 @@ class AsyncPipelinesClient:
2470
2507
  timeout=60,
2471
2508
  )
2472
2509
  if 200 <= _response.status_code < 300:
2473
- return pydantic.parse_obj_as(CloudDocument, _response.json()) # type: ignore
2510
+ return pydantic.parse_obj_as(typing.List[CloudDocument], _response.json()) # type: ignore
2474
2511
  if _response.status_code == 422:
2475
2512
  raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
2476
2513
  try:
@@ -2479,28 +2516,26 @@ class AsyncPipelinesClient:
2479
2516
  raise ApiError(status_code=_response.status_code, body=_response.text)
2480
2517
  raise ApiError(status_code=_response.status_code, body=_response_json)
2481
2518
 
2482
- async def upsert_pipeline_document(self, pipeline_id: str, *, request: CloudDocumentCreate) -> CloudDocument:
2519
+ async def upsert_batch_pipeline_documents(
2520
+ self, pipeline_id: str, *, request: typing.List[CloudDocumentCreate]
2521
+ ) -> typing.List[CloudDocument]:
2483
2522
  """
2484
- Create or update a document for a pipeline.
2523
+ Batch create or update a document for a pipeline.
2485
2524
 
2486
2525
  Parameters:
2487
2526
  - pipeline_id: str.
2488
2527
 
2489
- - request: CloudDocumentCreate.
2528
+ - request: typing.List[CloudDocumentCreate].
2490
2529
  ---
2491
- from platform import CloudDocumentCreate
2492
- from platform.client import AsyncPlatformApi
2530
+ from llama_cloud.client import AsyncLlamaCloud
2493
2531
 
2494
- client = AsyncPlatformApi(
2532
+ client = AsyncLlamaCloud(
2495
2533
  token="YOUR_TOKEN",
2496
2534
  base_url="https://yourhost.com/path/to/api",
2497
2535
  )
2498
- await client.pipelines.upsert_pipeline_document(
2536
+ await client.pipelines.upsert_batch_pipeline_documents(
2499
2537
  pipeline_id="string",
2500
- request=CloudDocumentCreate(
2501
- text="string",
2502
- metadata={"string": {}},
2503
- ),
2538
+ request=[],
2504
2539
  )
2505
2540
  """
2506
2541
  _response = await self._client_wrapper.httpx_client.request(
@@ -2513,7 +2548,7 @@ class AsyncPipelinesClient:
2513
2548
  timeout=60,
2514
2549
  )
2515
2550
  if 200 <= _response.status_code < 300:
2516
- return pydantic.parse_obj_as(CloudDocument, _response.json()) # type: ignore
2551
+ return pydantic.parse_obj_as(typing.List[CloudDocument], _response.json()) # type: ignore
2517
2552
  if _response.status_code == 422:
2518
2553
  raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
2519
2554
  try:
@@ -2531,9 +2566,9 @@ class AsyncPipelinesClient:
2531
2566
 
2532
2567
  - document_id: str.
2533
2568
  ---
2534
- from platform.client import AsyncPlatformApi
2569
+ from llama_cloud.client import AsyncLlamaCloud
2535
2570
 
2536
- client = AsyncPlatformApi(
2571
+ client = AsyncLlamaCloud(
2537
2572
  token="YOUR_TOKEN",
2538
2573
  base_url="https://yourhost.com/path/to/api",
2539
2574
  )
@@ -2569,9 +2604,9 @@ class AsyncPipelinesClient:
2569
2604
 
2570
2605
  - document_id: str.
2571
2606
  ---
2572
- from platform.client import AsyncPlatformApi
2607
+ from llama_cloud.client import AsyncLlamaCloud
2573
2608
 
2574
- client = AsyncPlatformApi(
2609
+ client = AsyncLlamaCloud(
2575
2610
  token="YOUR_TOKEN",
2576
2611
  base_url="https://yourhost.com/path/to/api",
2577
2612
  )
@@ -2597,3 +2632,42 @@ class AsyncPipelinesClient:
2597
2632
  except JSONDecodeError:
2598
2633
  raise ApiError(status_code=_response.status_code, body=_response.text)
2599
2634
  raise ApiError(status_code=_response.status_code, body=_response_json)
2635
+
2636
+ async def get_pipeline_document_status(self, pipeline_id: str, document_id: str) -> ManagedIngestionStatus:
2637
+ """
2638
+ Return a single document for a pipeline.
2639
+
2640
+ Parameters:
2641
+ - pipeline_id: str.
2642
+
2643
+ - document_id: str.
2644
+ ---
2645
+ from llama_cloud.client import AsyncLlamaCloud
2646
+
2647
+ client = AsyncLlamaCloud(
2648
+ token="YOUR_TOKEN",
2649
+ base_url="https://yourhost.com/path/to/api",
2650
+ )
2651
+ await client.pipelines.get_pipeline_document_status(
2652
+ pipeline_id="string",
2653
+ document_id="string",
2654
+ )
2655
+ """
2656
+ _response = await self._client_wrapper.httpx_client.request(
2657
+ "GET",
2658
+ urllib.parse.urljoin(
2659
+ f"{self._client_wrapper.get_base_url()}/",
2660
+ f"api/v1/pipelines/{pipeline_id}/documents/{document_id}/status",
2661
+ ),
2662
+ headers=self._client_wrapper.get_headers(),
2663
+ timeout=60,
2664
+ )
2665
+ if 200 <= _response.status_code < 300:
2666
+ return pydantic.parse_obj_as(ManagedIngestionStatus, _response.json()) # type: ignore
2667
+ if _response.status_code == 422:
2668
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
2669
+ try:
2670
+ _response_json = _response.json()
2671
+ except JSONDecodeError:
2672
+ raise ApiError(status_code=_response.status_code, body=_response.text)
2673
+ raise ApiError(status_code=_response.status_code, body=_response_json)