llama-cloud 0.0.2__py3-none-any.whl → 0.0.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (104) hide show
  1. llama_cloud/__init__.py +2 -0
  2. llama_cloud/client.py +16 -4
  3. llama_cloud/core/jsonable_encoder.py +3 -0
  4. llama_cloud/environment.py +7 -0
  5. llama_cloud/resources/api_keys/client.py +11 -16
  6. llama_cloud/resources/billing/client.py +9 -12
  7. llama_cloud/resources/component_definitions/client.py +9 -12
  8. llama_cloud/resources/data_sinks/client.py +21 -30
  9. llama_cloud/resources/data_sources/client.py +21 -30
  10. llama_cloud/resources/deprecated/client.py +27 -48
  11. llama_cloud/resources/evals/client.py +27 -44
  12. llama_cloud/resources/files/client.py +15 -24
  13. llama_cloud/resources/parsing/client.py +27 -48
  14. llama_cloud/resources/pipelines/client.py +73 -128
  15. llama_cloud/resources/projects/client.py +43 -72
  16. llama_cloud/types/api_key.py +3 -0
  17. llama_cloud/types/azure_open_ai_embedding.py +3 -0
  18. llama_cloud/types/base.py +3 -0
  19. llama_cloud/types/base_prompt_template.py +3 -0
  20. llama_cloud/types/bedrock_embedding.py +3 -0
  21. llama_cloud/types/chat_message.py +3 -0
  22. llama_cloud/types/cloud_az_storage_blob_data_source.py +3 -0
  23. llama_cloud/types/cloud_chroma_vector_store.py +3 -0
  24. llama_cloud/types/cloud_document.py +3 -0
  25. llama_cloud/types/cloud_document_create.py +3 -0
  26. llama_cloud/types/cloud_gcs_data_source.py +3 -0
  27. llama_cloud/types/cloud_google_drive_data_source.py +3 -0
  28. llama_cloud/types/cloud_one_drive_data_source.py +3 -0
  29. llama_cloud/types/cloud_pinecone_vector_store.py +3 -0
  30. llama_cloud/types/cloud_postgres_vector_store.py +3 -0
  31. llama_cloud/types/cloud_qdrant_vector_store.py +3 -0
  32. llama_cloud/types/cloud_s_3_data_source.py +3 -0
  33. llama_cloud/types/cloud_sharepoint_data_source.py +3 -0
  34. llama_cloud/types/cloud_weaviate_vector_store.py +3 -0
  35. llama_cloud/types/code_splitter.py +3 -0
  36. llama_cloud/types/cohere_embedding.py +3 -0
  37. llama_cloud/types/configurable_transformation_definition.py +3 -0
  38. llama_cloud/types/configured_transformation_item.py +3 -0
  39. llama_cloud/types/data_sink.py +3 -0
  40. llama_cloud/types/data_sink_create.py +3 -0
  41. llama_cloud/types/data_sink_definition.py +3 -0
  42. llama_cloud/types/data_source.py +3 -0
  43. llama_cloud/types/data_source_create.py +3 -0
  44. llama_cloud/types/data_source_definition.py +3 -0
  45. llama_cloud/types/eval_dataset.py +3 -0
  46. llama_cloud/types/eval_dataset_job_params.py +3 -0
  47. llama_cloud/types/eval_dataset_job_record.py +3 -0
  48. llama_cloud/types/eval_execution_params.py +3 -0
  49. llama_cloud/types/eval_execution_params_override.py +3 -0
  50. llama_cloud/types/eval_llm_model_data.py +3 -0
  51. llama_cloud/types/eval_question.py +3 -0
  52. llama_cloud/types/eval_question_create.py +3 -0
  53. llama_cloud/types/eval_question_result.py +3 -0
  54. llama_cloud/types/file.py +3 -0
  55. llama_cloud/types/gemini_embedding.py +3 -0
  56. llama_cloud/types/html_node_parser.py +3 -0
  57. llama_cloud/types/http_validation_error.py +3 -0
  58. llama_cloud/types/hugging_face_inference_api_embedding.py +3 -0
  59. llama_cloud/types/json_node_parser.py +3 -0
  60. llama_cloud/types/llm.py +3 -0
  61. llama_cloud/types/local_eval.py +3 -0
  62. llama_cloud/types/local_eval_results.py +3 -0
  63. llama_cloud/types/local_eval_sets.py +3 -0
  64. llama_cloud/types/markdown_element_node_parser.py +3 -0
  65. llama_cloud/types/markdown_node_parser.py +3 -0
  66. llama_cloud/types/metadata_filter.py +3 -0
  67. llama_cloud/types/metadata_filters.py +3 -0
  68. llama_cloud/types/metric_result.py +3 -0
  69. llama_cloud/types/node_parser.py +3 -0
  70. llama_cloud/types/open_ai_embedding.py +3 -0
  71. llama_cloud/types/parsing_history_item.py +3 -0
  72. llama_cloud/types/parsing_job.py +3 -0
  73. llama_cloud/types/parsing_job_json_result.py +3 -0
  74. llama_cloud/types/parsing_job_markdown_result.py +3 -0
  75. llama_cloud/types/parsing_job_text_result.py +3 -0
  76. llama_cloud/types/parsing_usage.py +3 -0
  77. llama_cloud/types/pipeline.py +3 -0
  78. llama_cloud/types/pipeline_create.py +3 -0
  79. llama_cloud/types/pipeline_data_source.py +3 -0
  80. llama_cloud/types/pipeline_data_source_create.py +3 -0
  81. llama_cloud/types/pipeline_deployment.py +3 -0
  82. llama_cloud/types/pipeline_file.py +3 -0
  83. llama_cloud/types/pipeline_file_create.py +3 -0
  84. llama_cloud/types/pipeline_file_status_response.py +3 -0
  85. llama_cloud/types/preset_retrieval_params.py +3 -0
  86. llama_cloud/types/presigned_url.py +3 -0
  87. llama_cloud/types/project.py +3 -0
  88. llama_cloud/types/project_create.py +3 -0
  89. llama_cloud/types/prompt_mixin_prompts.py +3 -0
  90. llama_cloud/types/prompt_spec.py +3 -0
  91. llama_cloud/types/related_node_info.py +3 -0
  92. llama_cloud/types/retrieve_results.py +3 -0
  93. llama_cloud/types/sentence_splitter.py +3 -0
  94. llama_cloud/types/simple_file_node_parser.py +3 -0
  95. llama_cloud/types/supported_eval_llm_model.py +3 -0
  96. llama_cloud/types/text_node.py +3 -0
  97. llama_cloud/types/text_node_with_score.py +3 -0
  98. llama_cloud/types/token_text_splitter.py +3 -0
  99. llama_cloud/types/validation_error.py +3 -0
  100. {llama_cloud-0.0.2.dist-info → llama_cloud-0.0.4.dist-info}/METADATA +1 -1
  101. llama_cloud-0.0.4.dist-info/RECORD +174 -0
  102. llama_cloud-0.0.2.dist-info/RECORD +0 -173
  103. {llama_cloud-0.0.2.dist-info → llama_cloud-0.0.4.dist-info}/LICENSE +0 -0
  104. {llama_cloud-0.0.2.dist-info → llama_cloud-0.0.4.dist-info}/WHEEL +0 -0
@@ -34,6 +34,9 @@ from ...types.retrieve_results import RetrieveResults
34
34
  from .types.pipeline_file_update_custom_metadata_value import PipelineFileUpdateCustomMetadataValue
35
35
 
36
36
  try:
37
+ import pydantic
38
+ if pydantic.__version__.startswith("1."):
39
+ raise ImportError
37
40
  import pydantic.v1 as pydantic # type: ignore
38
41
  except ImportError:
39
42
  import pydantic # type: ignore
@@ -63,12 +66,11 @@ class PipelinesClient:
63
66
 
64
67
  - pipeline_type: typing.Optional[PipelineType].
65
68
  ---
66
- from platform import PipelineType
67
- from platform.client import LlamaCloud
69
+ from llama_cloud import PipelineType
70
+ from llama_cloud.client import LlamaCloud
68
71
 
69
72
  client = LlamaCloud(
70
73
  token="YOUR_TOKEN",
71
- base_url="https://yourhost.com/path/to/api",
72
74
  )
73
75
  client.pipelines.search_pipelines(
74
76
  project_name="string",
@@ -103,7 +105,7 @@ class PipelinesClient:
103
105
 
104
106
  - request: PipelineCreate.
105
107
  ---
106
- from platform import (
108
+ from llama_cloud import (
107
109
  ConfigurableDataSinkNames,
108
110
  DataSinkCreate,
109
111
  EvalExecutionParams,
@@ -114,11 +116,10 @@ class PipelinesClient:
114
116
  PresetRetrievalParams,
115
117
  SupportedEvalLlmModelNames,
116
118
  )
117
- from platform.client import LlamaCloud
119
+ from llama_cloud.client import LlamaCloud
118
120
 
119
121
  client = LlamaCloud(
120
122
  token="YOUR_TOKEN",
121
- base_url="https://yourhost.com/path/to/api",
122
123
  )
123
124
  client.pipelines.create_pipeline(
124
125
  request=PipelineCreate(
@@ -168,7 +169,7 @@ class PipelinesClient:
168
169
 
169
170
  - request: PipelineCreate.
170
171
  ---
171
- from platform import (
172
+ from llama_cloud import (
172
173
  ConfigurableDataSinkNames,
173
174
  DataSinkCreate,
174
175
  EvalExecutionParams,
@@ -179,11 +180,10 @@ class PipelinesClient:
179
180
  PresetRetrievalParams,
180
181
  SupportedEvalLlmModelNames,
181
182
  )
182
- from platform.client import LlamaCloud
183
+ from llama_cloud.client import LlamaCloud
183
184
 
184
185
  client = LlamaCloud(
185
186
  token="YOUR_TOKEN",
186
- base_url="https://yourhost.com/path/to/api",
187
187
  )
188
188
  client.pipelines.upsert_pipeline(
189
189
  request=PipelineCreate(
@@ -234,11 +234,10 @@ class PipelinesClient:
234
234
 
235
235
  - with_managed_ingestion_status: typing.Optional[bool].
236
236
  ---
237
- from platform.client import LlamaCloud
237
+ from llama_cloud.client import LlamaCloud
238
238
 
239
239
  client = LlamaCloud(
240
240
  token="YOUR_TOKEN",
241
- base_url="https://yourhost.com/path/to/api",
242
241
  )
243
242
  client.pipelines.get_pipeline(
244
243
  pipeline_id="string",
@@ -296,7 +295,7 @@ class PipelinesClient:
296
295
 
297
296
  - managed_pipeline_id: typing.Optional[str]. The ID of the ManagedPipeline this playground pipeline is linked to.
298
297
  ---
299
- from platform import (
298
+ from llama_cloud import (
300
299
  ConfigurableDataSinkNames,
301
300
  DataSinkCreate,
302
301
  EvalExecutionParams,
@@ -305,11 +304,10 @@ class PipelinesClient:
305
304
  PresetRetrievalParams,
306
305
  SupportedEvalLlmModelNames,
307
306
  )
308
- from platform.client import LlamaCloud
307
+ from llama_cloud.client import LlamaCloud
309
308
 
310
309
  client = LlamaCloud(
311
310
  token="YOUR_TOKEN",
312
- base_url="https://yourhost.com/path/to/api",
313
311
  )
314
312
  client.pipelines.update_existing_pipeline(
315
313
  pipeline_id="string",
@@ -369,11 +367,10 @@ class PipelinesClient:
369
367
  Parameters:
370
368
  - pipeline_id: str.
371
369
  ---
372
- from platform.client import LlamaCloud
370
+ from llama_cloud.client import LlamaCloud
373
371
 
374
372
  client = LlamaCloud(
375
373
  token="YOUR_TOKEN",
376
- base_url="https://yourhost.com/path/to/api",
377
374
  )
378
375
  client.pipelines.delete_pipeline(
379
376
  pipeline_id="string",
@@ -402,11 +399,10 @@ class PipelinesClient:
402
399
  Parameters:
403
400
  - pipeline_id: str.
404
401
  ---
405
- from platform.client import LlamaCloud
402
+ from llama_cloud.client import LlamaCloud
406
403
 
407
404
  client = LlamaCloud(
408
405
  token="YOUR_TOKEN",
409
- base_url="https://yourhost.com/path/to/api",
410
406
  )
411
407
  client.pipelines.sync_pipeline(
412
408
  pipeline_id="string",
@@ -437,11 +433,10 @@ class PipelinesClient:
437
433
 
438
434
  - eval_dataset_id: str.
439
435
  ---
440
- from platform.client import LlamaCloud
436
+ from llama_cloud.client import LlamaCloud
441
437
 
442
438
  client = LlamaCloud(
443
439
  token="YOUR_TOKEN",
444
- base_url="https://yourhost.com/path/to/api",
445
440
  )
446
441
  client.pipelines.get_eval_dataset_executions(
447
442
  pipeline_id="string",
@@ -487,12 +482,11 @@ class PipelinesClient:
487
482
 
488
483
  - params: typing.Optional[EvalExecutionParamsOverride]. The parameters for the eval execution that will override the ones set in the pipeline.
489
484
  ---
490
- from platform import EvalExecutionParamsOverride, SupportedEvalLlmModelNames
491
- from platform.client import LlamaCloud
485
+ from llama_cloud import EvalExecutionParamsOverride, SupportedEvalLlmModelNames
486
+ from llama_cloud.client import LlamaCloud
492
487
 
493
488
  client = LlamaCloud(
494
489
  token="YOUR_TOKEN",
495
- base_url="https://yourhost.com/path/to/api",
496
490
  )
497
491
  client.pipelines.execute_eval_dataset(
498
492
  pipeline_id="string",
@@ -540,11 +534,10 @@ class PipelinesClient:
540
534
 
541
535
  - eval_dataset_id: str.
542
536
  ---
543
- from platform.client import LlamaCloud
537
+ from llama_cloud.client import LlamaCloud
544
538
 
545
539
  client = LlamaCloud(
546
540
  token="YOUR_TOKEN",
547
- base_url="https://yourhost.com/path/to/api",
548
541
  )
549
542
  client.pipelines.get_eval_dataset_execution_result(
550
543
  pipeline_id="string",
@@ -583,11 +576,10 @@ class PipelinesClient:
583
576
 
584
577
  - eval_dataset_execution_id: str.
585
578
  ---
586
- from platform.client import LlamaCloud
579
+ from llama_cloud.client import LlamaCloud
587
580
 
588
581
  client = LlamaCloud(
589
582
  token="YOUR_TOKEN",
590
- base_url="https://yourhost.com/path/to/api",
591
583
  )
592
584
  client.pipelines.get_eval_dataset_execution(
593
585
  pipeline_id="string",
@@ -621,11 +613,10 @@ class PipelinesClient:
621
613
  Parameters:
622
614
  - pipeline_id: str.
623
615
  ---
624
- from platform.client import LlamaCloud
616
+ from llama_cloud.client import LlamaCloud
625
617
 
626
618
  client = LlamaCloud(
627
619
  token="YOUR_TOKEN",
628
- base_url="https://yourhost.com/path/to/api",
629
620
  )
630
621
  client.pipelines.get_files_for_pipeline(
631
622
  pipeline_id="string",
@@ -658,11 +649,10 @@ class PipelinesClient:
658
649
 
659
650
  - request: typing.List[PipelineFileCreate].
660
651
  ---
661
- from platform.client import LlamaCloud
652
+ from llama_cloud.client import LlamaCloud
662
653
 
663
654
  client = LlamaCloud(
664
655
  token="YOUR_TOKEN",
665
- base_url="https://yourhost.com/path/to/api",
666
656
  )
667
657
  client.pipelines.add_files_to_pipeline(
668
658
  pipeline_id="string",
@@ -695,11 +685,10 @@ class PipelinesClient:
695
685
 
696
686
  - file_id: str.
697
687
  ---
698
- from platform.client import LlamaCloud
688
+ from llama_cloud.client import LlamaCloud
699
689
 
700
690
  client = LlamaCloud(
701
691
  token="YOUR_TOKEN",
702
- base_url="https://yourhost.com/path/to/api",
703
692
  )
704
693
  client.pipelines.get_pipeline_file_status(
705
694
  pipeline_id="string",
@@ -741,11 +730,10 @@ class PipelinesClient:
741
730
 
742
731
  - custom_metadata: typing.Optional[typing.Dict[str, PipelineFileUpdateCustomMetadataValue]]. Custom metadata for the file
743
732
  ---
744
- from platform.client import LlamaCloud
733
+ from llama_cloud.client import LlamaCloud
745
734
 
746
735
  client = LlamaCloud(
747
736
  token="YOUR_TOKEN",
748
- base_url="https://yourhost.com/path/to/api",
749
737
  )
750
738
  client.pipelines.update_pipeline_file(
751
739
  pipeline_id="string",
@@ -783,11 +771,10 @@ class PipelinesClient:
783
771
 
784
772
  - file_id: str.
785
773
  ---
786
- from platform.client import LlamaCloud
774
+ from llama_cloud.client import LlamaCloud
787
775
 
788
776
  client = LlamaCloud(
789
777
  token="YOUR_TOKEN",
790
- base_url="https://yourhost.com/path/to/api",
791
778
  )
792
779
  client.pipelines.delete_pipeline_file(
793
780
  pipeline_id="string",
@@ -819,11 +806,10 @@ class PipelinesClient:
819
806
  Parameters:
820
807
  - pipeline_id: str.
821
808
  ---
822
- from platform.client import LlamaCloud
809
+ from llama_cloud.client import LlamaCloud
823
810
 
824
811
  client = LlamaCloud(
825
812
  token="YOUR_TOKEN",
826
- base_url="https://yourhost.com/path/to/api",
827
813
  )
828
814
  client.pipelines.get_pipeline_data_sources(
829
815
  pipeline_id="string",
@@ -858,11 +844,10 @@ class PipelinesClient:
858
844
 
859
845
  - request: typing.List[PipelineDataSourceCreate].
860
846
  ---
861
- from platform.client import LlamaCloud
847
+ from llama_cloud.client import LlamaCloud
862
848
 
863
849
  client = LlamaCloud(
864
850
  token="YOUR_TOKEN",
865
- base_url="https://yourhost.com/path/to/api",
866
851
  )
867
852
  client.pipelines.add_data_sources_to_pipeline(
868
853
  pipeline_id="string",
@@ -897,11 +882,10 @@ class PipelinesClient:
897
882
 
898
883
  - data_source_id: str.
899
884
  ---
900
- from platform.client import LlamaCloud
885
+ from llama_cloud.client import LlamaCloud
901
886
 
902
887
  client = LlamaCloud(
903
888
  token="YOUR_TOKEN",
904
- base_url="https://yourhost.com/path/to/api",
905
889
  )
906
890
  client.pipelines.delete_pipeline_data_source(
907
891
  pipeline_id="string",
@@ -936,11 +920,10 @@ class PipelinesClient:
936
920
 
937
921
  - data_source_id: str.
938
922
  ---
939
- from platform.client import LlamaCloud
923
+ from llama_cloud.client import LlamaCloud
940
924
 
941
925
  client = LlamaCloud(
942
926
  token="YOUR_TOKEN",
943
- base_url="https://yourhost.com/path/to/api",
944
927
  )
945
928
  client.pipelines.sync_pipeline_data_source(
946
929
  pipeline_id="string",
@@ -998,12 +981,11 @@ class PipelinesClient:
998
981
 
999
982
  - query: str. The query to retrieve against.
1000
983
  ---
1001
- from platform import FilterCondition, MetadataFilters
1002
- from platform.client import LlamaCloud
984
+ from llama_cloud import FilterCondition, MetadataFilters
985
+ from llama_cloud.client import LlamaCloud
1003
986
 
1004
987
  client = LlamaCloud(
1005
988
  token="YOUR_TOKEN",
1006
- base_url="https://yourhost.com/path/to/api",
1007
989
  )
1008
990
  client.pipelines.run_search(
1009
991
  pipeline_id="string",
@@ -1051,11 +1033,10 @@ class PipelinesClient:
1051
1033
  Parameters:
1052
1034
  - pipeline_id: str.
1053
1035
  ---
1054
- from platform.client import LlamaCloud
1036
+ from llama_cloud.client import LlamaCloud
1055
1037
 
1056
1038
  client = LlamaCloud(
1057
1039
  token="YOUR_TOKEN",
1058
- base_url="https://yourhost.com/path/to/api",
1059
1040
  )
1060
1041
  client.pipelines.get_pipeline_jobs(
1061
1042
  pipeline_id="string",
@@ -1086,11 +1067,10 @@ class PipelinesClient:
1086
1067
 
1087
1068
  - job_id: str.
1088
1069
  ---
1089
- from platform.client import LlamaCloud
1070
+ from llama_cloud.client import LlamaCloud
1090
1071
 
1091
1072
  client = LlamaCloud(
1092
1073
  token="YOUR_TOKEN",
1093
- base_url="https://yourhost.com/path/to/api",
1094
1074
  )
1095
1075
  client.pipelines.get_pipeline_job(
1096
1076
  pipeline_id="string",
@@ -1128,11 +1108,10 @@ class PipelinesClient:
1128
1108
 
1129
1109
  - limit: typing.Optional[int].
1130
1110
  ---
1131
- from platform.client import LlamaCloud
1111
+ from llama_cloud.client import LlamaCloud
1132
1112
 
1133
1113
  client = LlamaCloud(
1134
1114
  token="YOUR_TOKEN",
1135
- base_url="https://yourhost.com/path/to/api",
1136
1115
  )
1137
1116
  client.pipelines.list_pipeline_documents(
1138
1117
  pipeline_id="string",
@@ -1168,11 +1147,10 @@ class PipelinesClient:
1168
1147
 
1169
1148
  - request: typing.List[CloudDocumentCreate].
1170
1149
  ---
1171
- from platform.client import LlamaCloud
1150
+ from llama_cloud.client import LlamaCloud
1172
1151
 
1173
1152
  client = LlamaCloud(
1174
1153
  token="YOUR_TOKEN",
1175
- base_url="https://yourhost.com/path/to/api",
1176
1154
  )
1177
1155
  client.pipelines.create_batch_pipeline_documents(
1178
1156
  pipeline_id="string",
@@ -1209,11 +1187,10 @@ class PipelinesClient:
1209
1187
 
1210
1188
  - request: typing.List[CloudDocumentCreate].
1211
1189
  ---
1212
- from platform.client import LlamaCloud
1190
+ from llama_cloud.client import LlamaCloud
1213
1191
 
1214
1192
  client = LlamaCloud(
1215
1193
  token="YOUR_TOKEN",
1216
- base_url="https://yourhost.com/path/to/api",
1217
1194
  )
1218
1195
  client.pipelines.upsert_batch_pipeline_documents(
1219
1196
  pipeline_id="string",
@@ -1248,11 +1225,10 @@ class PipelinesClient:
1248
1225
 
1249
1226
  - document_id: str.
1250
1227
  ---
1251
- from platform.client import LlamaCloud
1228
+ from llama_cloud.client import LlamaCloud
1252
1229
 
1253
1230
  client = LlamaCloud(
1254
1231
  token="YOUR_TOKEN",
1255
- base_url="https://yourhost.com/path/to/api",
1256
1232
  )
1257
1233
  client.pipelines.get_pipeline_document(
1258
1234
  pipeline_id="string",
@@ -1286,11 +1262,10 @@ class PipelinesClient:
1286
1262
 
1287
1263
  - document_id: str.
1288
1264
  ---
1289
- from platform.client import LlamaCloud
1265
+ from llama_cloud.client import LlamaCloud
1290
1266
 
1291
1267
  client = LlamaCloud(
1292
1268
  token="YOUR_TOKEN",
1293
- base_url="https://yourhost.com/path/to/api",
1294
1269
  )
1295
1270
  client.pipelines.delete_pipeline_document(
1296
1271
  pipeline_id="string",
@@ -1324,11 +1299,10 @@ class PipelinesClient:
1324
1299
 
1325
1300
  - document_id: str.
1326
1301
  ---
1327
- from platform.client import LlamaCloud
1302
+ from llama_cloud.client import LlamaCloud
1328
1303
 
1329
1304
  client = LlamaCloud(
1330
1305
  token="YOUR_TOKEN",
1331
- base_url="https://yourhost.com/path/to/api",
1332
1306
  )
1333
1307
  client.pipelines.get_pipeline_document_status(
1334
1308
  pipeline_id="string",
@@ -1376,12 +1350,11 @@ class AsyncPipelinesClient:
1376
1350
 
1377
1351
  - pipeline_type: typing.Optional[PipelineType].
1378
1352
  ---
1379
- from platform import PipelineType
1380
- from platform.client import AsyncLlamaCloud
1353
+ from llama_cloud import PipelineType
1354
+ from llama_cloud.client import AsyncLlamaCloud
1381
1355
 
1382
1356
  client = AsyncLlamaCloud(
1383
1357
  token="YOUR_TOKEN",
1384
- base_url="https://yourhost.com/path/to/api",
1385
1358
  )
1386
1359
  await client.pipelines.search_pipelines(
1387
1360
  project_name="string",
@@ -1416,7 +1389,7 @@ class AsyncPipelinesClient:
1416
1389
 
1417
1390
  - request: PipelineCreate.
1418
1391
  ---
1419
- from platform import (
1392
+ from llama_cloud import (
1420
1393
  ConfigurableDataSinkNames,
1421
1394
  DataSinkCreate,
1422
1395
  EvalExecutionParams,
@@ -1427,11 +1400,10 @@ class AsyncPipelinesClient:
1427
1400
  PresetRetrievalParams,
1428
1401
  SupportedEvalLlmModelNames,
1429
1402
  )
1430
- from platform.client import AsyncLlamaCloud
1403
+ from llama_cloud.client import AsyncLlamaCloud
1431
1404
 
1432
1405
  client = AsyncLlamaCloud(
1433
1406
  token="YOUR_TOKEN",
1434
- base_url="https://yourhost.com/path/to/api",
1435
1407
  )
1436
1408
  await client.pipelines.create_pipeline(
1437
1409
  request=PipelineCreate(
@@ -1481,7 +1453,7 @@ class AsyncPipelinesClient:
1481
1453
 
1482
1454
  - request: PipelineCreate.
1483
1455
  ---
1484
- from platform import (
1456
+ from llama_cloud import (
1485
1457
  ConfigurableDataSinkNames,
1486
1458
  DataSinkCreate,
1487
1459
  EvalExecutionParams,
@@ -1492,11 +1464,10 @@ class AsyncPipelinesClient:
1492
1464
  PresetRetrievalParams,
1493
1465
  SupportedEvalLlmModelNames,
1494
1466
  )
1495
- from platform.client import AsyncLlamaCloud
1467
+ from llama_cloud.client import AsyncLlamaCloud
1496
1468
 
1497
1469
  client = AsyncLlamaCloud(
1498
1470
  token="YOUR_TOKEN",
1499
- base_url="https://yourhost.com/path/to/api",
1500
1471
  )
1501
1472
  await client.pipelines.upsert_pipeline(
1502
1473
  request=PipelineCreate(
@@ -1547,11 +1518,10 @@ class AsyncPipelinesClient:
1547
1518
 
1548
1519
  - with_managed_ingestion_status: typing.Optional[bool].
1549
1520
  ---
1550
- from platform.client import AsyncLlamaCloud
1521
+ from llama_cloud.client import AsyncLlamaCloud
1551
1522
 
1552
1523
  client = AsyncLlamaCloud(
1553
1524
  token="YOUR_TOKEN",
1554
- base_url="https://yourhost.com/path/to/api",
1555
1525
  )
1556
1526
  await client.pipelines.get_pipeline(
1557
1527
  pipeline_id="string",
@@ -1609,7 +1579,7 @@ class AsyncPipelinesClient:
1609
1579
 
1610
1580
  - managed_pipeline_id: typing.Optional[str]. The ID of the ManagedPipeline this playground pipeline is linked to.
1611
1581
  ---
1612
- from platform import (
1582
+ from llama_cloud import (
1613
1583
  ConfigurableDataSinkNames,
1614
1584
  DataSinkCreate,
1615
1585
  EvalExecutionParams,
@@ -1618,11 +1588,10 @@ class AsyncPipelinesClient:
1618
1588
  PresetRetrievalParams,
1619
1589
  SupportedEvalLlmModelNames,
1620
1590
  )
1621
- from platform.client import AsyncLlamaCloud
1591
+ from llama_cloud.client import AsyncLlamaCloud
1622
1592
 
1623
1593
  client = AsyncLlamaCloud(
1624
1594
  token="YOUR_TOKEN",
1625
- base_url="https://yourhost.com/path/to/api",
1626
1595
  )
1627
1596
  await client.pipelines.update_existing_pipeline(
1628
1597
  pipeline_id="string",
@@ -1682,11 +1651,10 @@ class AsyncPipelinesClient:
1682
1651
  Parameters:
1683
1652
  - pipeline_id: str.
1684
1653
  ---
1685
- from platform.client import AsyncLlamaCloud
1654
+ from llama_cloud.client import AsyncLlamaCloud
1686
1655
 
1687
1656
  client = AsyncLlamaCloud(
1688
1657
  token="YOUR_TOKEN",
1689
- base_url="https://yourhost.com/path/to/api",
1690
1658
  )
1691
1659
  await client.pipelines.delete_pipeline(
1692
1660
  pipeline_id="string",
@@ -1715,11 +1683,10 @@ class AsyncPipelinesClient:
1715
1683
  Parameters:
1716
1684
  - pipeline_id: str.
1717
1685
  ---
1718
- from platform.client import AsyncLlamaCloud
1686
+ from llama_cloud.client import AsyncLlamaCloud
1719
1687
 
1720
1688
  client = AsyncLlamaCloud(
1721
1689
  token="YOUR_TOKEN",
1722
- base_url="https://yourhost.com/path/to/api",
1723
1690
  )
1724
1691
  await client.pipelines.sync_pipeline(
1725
1692
  pipeline_id="string",
@@ -1752,11 +1719,10 @@ class AsyncPipelinesClient:
1752
1719
 
1753
1720
  - eval_dataset_id: str.
1754
1721
  ---
1755
- from platform.client import AsyncLlamaCloud
1722
+ from llama_cloud.client import AsyncLlamaCloud
1756
1723
 
1757
1724
  client = AsyncLlamaCloud(
1758
1725
  token="YOUR_TOKEN",
1759
- base_url="https://yourhost.com/path/to/api",
1760
1726
  )
1761
1727
  await client.pipelines.get_eval_dataset_executions(
1762
1728
  pipeline_id="string",
@@ -1802,12 +1768,11 @@ class AsyncPipelinesClient:
1802
1768
 
1803
1769
  - params: typing.Optional[EvalExecutionParamsOverride]. The parameters for the eval execution that will override the ones set in the pipeline.
1804
1770
  ---
1805
- from platform import EvalExecutionParamsOverride, SupportedEvalLlmModelNames
1806
- from platform.client import AsyncLlamaCloud
1771
+ from llama_cloud import EvalExecutionParamsOverride, SupportedEvalLlmModelNames
1772
+ from llama_cloud.client import AsyncLlamaCloud
1807
1773
 
1808
1774
  client = AsyncLlamaCloud(
1809
1775
  token="YOUR_TOKEN",
1810
- base_url="https://yourhost.com/path/to/api",
1811
1776
  )
1812
1777
  await client.pipelines.execute_eval_dataset(
1813
1778
  pipeline_id="string",
@@ -1855,11 +1820,10 @@ class AsyncPipelinesClient:
1855
1820
 
1856
1821
  - eval_dataset_id: str.
1857
1822
  ---
1858
- from platform.client import AsyncLlamaCloud
1823
+ from llama_cloud.client import AsyncLlamaCloud
1859
1824
 
1860
1825
  client = AsyncLlamaCloud(
1861
1826
  token="YOUR_TOKEN",
1862
- base_url="https://yourhost.com/path/to/api",
1863
1827
  )
1864
1828
  await client.pipelines.get_eval_dataset_execution_result(
1865
1829
  pipeline_id="string",
@@ -1898,11 +1862,10 @@ class AsyncPipelinesClient:
1898
1862
 
1899
1863
  - eval_dataset_execution_id: str.
1900
1864
  ---
1901
- from platform.client import AsyncLlamaCloud
1865
+ from llama_cloud.client import AsyncLlamaCloud
1902
1866
 
1903
1867
  client = AsyncLlamaCloud(
1904
1868
  token="YOUR_TOKEN",
1905
- base_url="https://yourhost.com/path/to/api",
1906
1869
  )
1907
1870
  await client.pipelines.get_eval_dataset_execution(
1908
1871
  pipeline_id="string",
@@ -1936,11 +1899,10 @@ class AsyncPipelinesClient:
1936
1899
  Parameters:
1937
1900
  - pipeline_id: str.
1938
1901
  ---
1939
- from platform.client import AsyncLlamaCloud
1902
+ from llama_cloud.client import AsyncLlamaCloud
1940
1903
 
1941
1904
  client = AsyncLlamaCloud(
1942
1905
  token="YOUR_TOKEN",
1943
- base_url="https://yourhost.com/path/to/api",
1944
1906
  )
1945
1907
  await client.pipelines.get_files_for_pipeline(
1946
1908
  pipeline_id="string",
@@ -1973,11 +1935,10 @@ class AsyncPipelinesClient:
1973
1935
 
1974
1936
  - request: typing.List[PipelineFileCreate].
1975
1937
  ---
1976
- from platform.client import AsyncLlamaCloud
1938
+ from llama_cloud.client import AsyncLlamaCloud
1977
1939
 
1978
1940
  client = AsyncLlamaCloud(
1979
1941
  token="YOUR_TOKEN",
1980
- base_url="https://yourhost.com/path/to/api",
1981
1942
  )
1982
1943
  await client.pipelines.add_files_to_pipeline(
1983
1944
  pipeline_id="string",
@@ -2010,11 +1971,10 @@ class AsyncPipelinesClient:
2010
1971
 
2011
1972
  - file_id: str.
2012
1973
  ---
2013
- from platform.client import AsyncLlamaCloud
1974
+ from llama_cloud.client import AsyncLlamaCloud
2014
1975
 
2015
1976
  client = AsyncLlamaCloud(
2016
1977
  token="YOUR_TOKEN",
2017
- base_url="https://yourhost.com/path/to/api",
2018
1978
  )
2019
1979
  await client.pipelines.get_pipeline_file_status(
2020
1980
  pipeline_id="string",
@@ -2056,11 +2016,10 @@ class AsyncPipelinesClient:
2056
2016
 
2057
2017
  - custom_metadata: typing.Optional[typing.Dict[str, PipelineFileUpdateCustomMetadataValue]]. Custom metadata for the file
2058
2018
  ---
2059
- from platform.client import AsyncLlamaCloud
2019
+ from llama_cloud.client import AsyncLlamaCloud
2060
2020
 
2061
2021
  client = AsyncLlamaCloud(
2062
2022
  token="YOUR_TOKEN",
2063
- base_url="https://yourhost.com/path/to/api",
2064
2023
  )
2065
2024
  await client.pipelines.update_pipeline_file(
2066
2025
  pipeline_id="string",
@@ -2098,11 +2057,10 @@ class AsyncPipelinesClient:
2098
2057
 
2099
2058
  - file_id: str.
2100
2059
  ---
2101
- from platform.client import AsyncLlamaCloud
2060
+ from llama_cloud.client import AsyncLlamaCloud
2102
2061
 
2103
2062
  client = AsyncLlamaCloud(
2104
2063
  token="YOUR_TOKEN",
2105
- base_url="https://yourhost.com/path/to/api",
2106
2064
  )
2107
2065
  await client.pipelines.delete_pipeline_file(
2108
2066
  pipeline_id="string",
@@ -2134,11 +2092,10 @@ class AsyncPipelinesClient:
2134
2092
  Parameters:
2135
2093
  - pipeline_id: str.
2136
2094
  ---
2137
- from platform.client import AsyncLlamaCloud
2095
+ from llama_cloud.client import AsyncLlamaCloud
2138
2096
 
2139
2097
  client = AsyncLlamaCloud(
2140
2098
  token="YOUR_TOKEN",
2141
- base_url="https://yourhost.com/path/to/api",
2142
2099
  )
2143
2100
  await client.pipelines.get_pipeline_data_sources(
2144
2101
  pipeline_id="string",
@@ -2173,11 +2130,10 @@ class AsyncPipelinesClient:
2173
2130
 
2174
2131
  - request: typing.List[PipelineDataSourceCreate].
2175
2132
  ---
2176
- from platform.client import AsyncLlamaCloud
2133
+ from llama_cloud.client import AsyncLlamaCloud
2177
2134
 
2178
2135
  client = AsyncLlamaCloud(
2179
2136
  token="YOUR_TOKEN",
2180
- base_url="https://yourhost.com/path/to/api",
2181
2137
  )
2182
2138
  await client.pipelines.add_data_sources_to_pipeline(
2183
2139
  pipeline_id="string",
@@ -2212,11 +2168,10 @@ class AsyncPipelinesClient:
2212
2168
 
2213
2169
  - data_source_id: str.
2214
2170
  ---
2215
- from platform.client import AsyncLlamaCloud
2171
+ from llama_cloud.client import AsyncLlamaCloud
2216
2172
 
2217
2173
  client = AsyncLlamaCloud(
2218
2174
  token="YOUR_TOKEN",
2219
- base_url="https://yourhost.com/path/to/api",
2220
2175
  )
2221
2176
  await client.pipelines.delete_pipeline_data_source(
2222
2177
  pipeline_id="string",
@@ -2251,11 +2206,10 @@ class AsyncPipelinesClient:
2251
2206
 
2252
2207
  - data_source_id: str.
2253
2208
  ---
2254
- from platform.client import AsyncLlamaCloud
2209
+ from llama_cloud.client import AsyncLlamaCloud
2255
2210
 
2256
2211
  client = AsyncLlamaCloud(
2257
2212
  token="YOUR_TOKEN",
2258
- base_url="https://yourhost.com/path/to/api",
2259
2213
  )
2260
2214
  await client.pipelines.sync_pipeline_data_source(
2261
2215
  pipeline_id="string",
@@ -2313,12 +2267,11 @@ class AsyncPipelinesClient:
2313
2267
 
2314
2268
  - query: str. The query to retrieve against.
2315
2269
  ---
2316
- from platform import FilterCondition, MetadataFilters
2317
- from platform.client import AsyncLlamaCloud
2270
+ from llama_cloud import FilterCondition, MetadataFilters
2271
+ from llama_cloud.client import AsyncLlamaCloud
2318
2272
 
2319
2273
  client = AsyncLlamaCloud(
2320
2274
  token="YOUR_TOKEN",
2321
- base_url="https://yourhost.com/path/to/api",
2322
2275
  )
2323
2276
  await client.pipelines.run_search(
2324
2277
  pipeline_id="string",
@@ -2366,11 +2319,10 @@ class AsyncPipelinesClient:
2366
2319
  Parameters:
2367
2320
  - pipeline_id: str.
2368
2321
  ---
2369
- from platform.client import AsyncLlamaCloud
2322
+ from llama_cloud.client import AsyncLlamaCloud
2370
2323
 
2371
2324
  client = AsyncLlamaCloud(
2372
2325
  token="YOUR_TOKEN",
2373
- base_url="https://yourhost.com/path/to/api",
2374
2326
  )
2375
2327
  await client.pipelines.get_pipeline_jobs(
2376
2328
  pipeline_id="string",
@@ -2401,11 +2353,10 @@ class AsyncPipelinesClient:
2401
2353
 
2402
2354
  - job_id: str.
2403
2355
  ---
2404
- from platform.client import AsyncLlamaCloud
2356
+ from llama_cloud.client import AsyncLlamaCloud
2405
2357
 
2406
2358
  client = AsyncLlamaCloud(
2407
2359
  token="YOUR_TOKEN",
2408
- base_url="https://yourhost.com/path/to/api",
2409
2360
  )
2410
2361
  await client.pipelines.get_pipeline_job(
2411
2362
  pipeline_id="string",
@@ -2443,11 +2394,10 @@ class AsyncPipelinesClient:
2443
2394
 
2444
2395
  - limit: typing.Optional[int].
2445
2396
  ---
2446
- from platform.client import AsyncLlamaCloud
2397
+ from llama_cloud.client import AsyncLlamaCloud
2447
2398
 
2448
2399
  client = AsyncLlamaCloud(
2449
2400
  token="YOUR_TOKEN",
2450
- base_url="https://yourhost.com/path/to/api",
2451
2401
  )
2452
2402
  await client.pipelines.list_pipeline_documents(
2453
2403
  pipeline_id="string",
@@ -2483,11 +2433,10 @@ class AsyncPipelinesClient:
2483
2433
 
2484
2434
  - request: typing.List[CloudDocumentCreate].
2485
2435
  ---
2486
- from platform.client import AsyncLlamaCloud
2436
+ from llama_cloud.client import AsyncLlamaCloud
2487
2437
 
2488
2438
  client = AsyncLlamaCloud(
2489
2439
  token="YOUR_TOKEN",
2490
- base_url="https://yourhost.com/path/to/api",
2491
2440
  )
2492
2441
  await client.pipelines.create_batch_pipeline_documents(
2493
2442
  pipeline_id="string",
@@ -2524,11 +2473,10 @@ class AsyncPipelinesClient:
2524
2473
 
2525
2474
  - request: typing.List[CloudDocumentCreate].
2526
2475
  ---
2527
- from platform.client import AsyncLlamaCloud
2476
+ from llama_cloud.client import AsyncLlamaCloud
2528
2477
 
2529
2478
  client = AsyncLlamaCloud(
2530
2479
  token="YOUR_TOKEN",
2531
- base_url="https://yourhost.com/path/to/api",
2532
2480
  )
2533
2481
  await client.pipelines.upsert_batch_pipeline_documents(
2534
2482
  pipeline_id="string",
@@ -2563,11 +2511,10 @@ class AsyncPipelinesClient:
2563
2511
 
2564
2512
  - document_id: str.
2565
2513
  ---
2566
- from platform.client import AsyncLlamaCloud
2514
+ from llama_cloud.client import AsyncLlamaCloud
2567
2515
 
2568
2516
  client = AsyncLlamaCloud(
2569
2517
  token="YOUR_TOKEN",
2570
- base_url="https://yourhost.com/path/to/api",
2571
2518
  )
2572
2519
  await client.pipelines.get_pipeline_document(
2573
2520
  pipeline_id="string",
@@ -2601,11 +2548,10 @@ class AsyncPipelinesClient:
2601
2548
 
2602
2549
  - document_id: str.
2603
2550
  ---
2604
- from platform.client import AsyncLlamaCloud
2551
+ from llama_cloud.client import AsyncLlamaCloud
2605
2552
 
2606
2553
  client = AsyncLlamaCloud(
2607
2554
  token="YOUR_TOKEN",
2608
- base_url="https://yourhost.com/path/to/api",
2609
2555
  )
2610
2556
  await client.pipelines.delete_pipeline_document(
2611
2557
  pipeline_id="string",
@@ -2639,11 +2585,10 @@ class AsyncPipelinesClient:
2639
2585
 
2640
2586
  - document_id: str.
2641
2587
  ---
2642
- from platform.client import AsyncLlamaCloud
2588
+ from llama_cloud.client import AsyncLlamaCloud
2643
2589
 
2644
2590
  client = AsyncLlamaCloud(
2645
2591
  token="YOUR_TOKEN",
2646
- base_url="https://yourhost.com/path/to/api",
2647
2592
  )
2648
2593
  await client.pipelines.get_pipeline_document_status(
2649
2594
  pipeline_id="string",