llama-cloud 0.0.2__py3-none-any.whl → 0.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of llama-cloud might be problematic. Click here for more details.
- llama_cloud/core/jsonable_encoder.py +3 -0
- llama_cloud/resources/api_keys/client.py +11 -8
- llama_cloud/resources/billing/client.py +9 -6
- llama_cloud/resources/component_definitions/client.py +9 -6
- llama_cloud/resources/data_sinks/client.py +21 -18
- llama_cloud/resources/data_sources/client.py +21 -18
- llama_cloud/resources/deprecated/client.py +27 -24
- llama_cloud/resources/evals/client.py +27 -24
- llama_cloud/resources/files/client.py +15 -12
- llama_cloud/resources/parsing/client.py +27 -24
- llama_cloud/resources/pipelines/client.py +73 -70
- llama_cloud/resources/projects/client.py +43 -40
- llama_cloud/types/api_key.py +3 -0
- llama_cloud/types/azure_open_ai_embedding.py +3 -0
- llama_cloud/types/base.py +3 -0
- llama_cloud/types/base_prompt_template.py +3 -0
- llama_cloud/types/bedrock_embedding.py +3 -0
- llama_cloud/types/chat_message.py +3 -0
- llama_cloud/types/cloud_az_storage_blob_data_source.py +3 -0
- llama_cloud/types/cloud_chroma_vector_store.py +3 -0
- llama_cloud/types/cloud_document.py +3 -0
- llama_cloud/types/cloud_document_create.py +3 -0
- llama_cloud/types/cloud_gcs_data_source.py +3 -0
- llama_cloud/types/cloud_google_drive_data_source.py +3 -0
- llama_cloud/types/cloud_one_drive_data_source.py +3 -0
- llama_cloud/types/cloud_pinecone_vector_store.py +3 -0
- llama_cloud/types/cloud_postgres_vector_store.py +3 -0
- llama_cloud/types/cloud_qdrant_vector_store.py +3 -0
- llama_cloud/types/cloud_s_3_data_source.py +3 -0
- llama_cloud/types/cloud_sharepoint_data_source.py +3 -0
- llama_cloud/types/cloud_weaviate_vector_store.py +3 -0
- llama_cloud/types/code_splitter.py +3 -0
- llama_cloud/types/cohere_embedding.py +3 -0
- llama_cloud/types/configurable_transformation_definition.py +3 -0
- llama_cloud/types/configured_transformation_item.py +3 -0
- llama_cloud/types/data_sink.py +3 -0
- llama_cloud/types/data_sink_create.py +3 -0
- llama_cloud/types/data_sink_definition.py +3 -0
- llama_cloud/types/data_source.py +3 -0
- llama_cloud/types/data_source_create.py +3 -0
- llama_cloud/types/data_source_definition.py +3 -0
- llama_cloud/types/eval_dataset.py +3 -0
- llama_cloud/types/eval_dataset_job_params.py +3 -0
- llama_cloud/types/eval_dataset_job_record.py +3 -0
- llama_cloud/types/eval_execution_params.py +3 -0
- llama_cloud/types/eval_execution_params_override.py +3 -0
- llama_cloud/types/eval_llm_model_data.py +3 -0
- llama_cloud/types/eval_question.py +3 -0
- llama_cloud/types/eval_question_create.py +3 -0
- llama_cloud/types/eval_question_result.py +3 -0
- llama_cloud/types/file.py +3 -0
- llama_cloud/types/gemini_embedding.py +3 -0
- llama_cloud/types/html_node_parser.py +3 -0
- llama_cloud/types/http_validation_error.py +3 -0
- llama_cloud/types/hugging_face_inference_api_embedding.py +3 -0
- llama_cloud/types/json_node_parser.py +3 -0
- llama_cloud/types/llm.py +3 -0
- llama_cloud/types/local_eval.py +3 -0
- llama_cloud/types/local_eval_results.py +3 -0
- llama_cloud/types/local_eval_sets.py +3 -0
- llama_cloud/types/markdown_element_node_parser.py +3 -0
- llama_cloud/types/markdown_node_parser.py +3 -0
- llama_cloud/types/metadata_filter.py +3 -0
- llama_cloud/types/metadata_filters.py +3 -0
- llama_cloud/types/metric_result.py +3 -0
- llama_cloud/types/node_parser.py +3 -0
- llama_cloud/types/open_ai_embedding.py +3 -0
- llama_cloud/types/parsing_history_item.py +3 -0
- llama_cloud/types/parsing_job.py +3 -0
- llama_cloud/types/parsing_job_json_result.py +3 -0
- llama_cloud/types/parsing_job_markdown_result.py +3 -0
- llama_cloud/types/parsing_job_text_result.py +3 -0
- llama_cloud/types/parsing_usage.py +3 -0
- llama_cloud/types/pipeline.py +3 -0
- llama_cloud/types/pipeline_create.py +3 -0
- llama_cloud/types/pipeline_data_source.py +3 -0
- llama_cloud/types/pipeline_data_source_create.py +3 -0
- llama_cloud/types/pipeline_deployment.py +3 -0
- llama_cloud/types/pipeline_file.py +3 -0
- llama_cloud/types/pipeline_file_create.py +3 -0
- llama_cloud/types/pipeline_file_status_response.py +3 -0
- llama_cloud/types/preset_retrieval_params.py +3 -0
- llama_cloud/types/presigned_url.py +3 -0
- llama_cloud/types/project.py +3 -0
- llama_cloud/types/project_create.py +3 -0
- llama_cloud/types/prompt_mixin_prompts.py +3 -0
- llama_cloud/types/prompt_spec.py +3 -0
- llama_cloud/types/related_node_info.py +3 -0
- llama_cloud/types/retrieve_results.py +3 -0
- llama_cloud/types/sentence_splitter.py +3 -0
- llama_cloud/types/simple_file_node_parser.py +3 -0
- llama_cloud/types/supported_eval_llm_model.py +3 -0
- llama_cloud/types/text_node.py +3 -0
- llama_cloud/types/text_node_with_score.py +3 -0
- llama_cloud/types/token_text_splitter.py +3 -0
- llama_cloud/types/validation_error.py +3 -0
- {llama_cloud-0.0.2.dist-info → llama_cloud-0.0.3.dist-info}/METADATA +1 -1
- llama_cloud-0.0.3.dist-info/RECORD +173 -0
- llama_cloud-0.0.2.dist-info/RECORD +0 -173
- {llama_cloud-0.0.2.dist-info → llama_cloud-0.0.3.dist-info}/LICENSE +0 -0
- {llama_cloud-0.0.2.dist-info → llama_cloud-0.0.3.dist-info}/WHEEL +0 -0
|
@@ -34,6 +34,9 @@ from ...types.retrieve_results import RetrieveResults
|
|
|
34
34
|
from .types.pipeline_file_update_custom_metadata_value import PipelineFileUpdateCustomMetadataValue
|
|
35
35
|
|
|
36
36
|
try:
|
|
37
|
+
import pydantic
|
|
38
|
+
if pydantic.__version__.startswith("1."):
|
|
39
|
+
raise ImportError
|
|
37
40
|
import pydantic.v1 as pydantic # type: ignore
|
|
38
41
|
except ImportError:
|
|
39
42
|
import pydantic # type: ignore
|
|
@@ -63,8 +66,8 @@ class PipelinesClient:
|
|
|
63
66
|
|
|
64
67
|
- pipeline_type: typing.Optional[PipelineType].
|
|
65
68
|
---
|
|
66
|
-
from
|
|
67
|
-
from
|
|
69
|
+
from llama_cloud import PipelineType
|
|
70
|
+
from llama_cloud.client import LlamaCloud
|
|
68
71
|
|
|
69
72
|
client = LlamaCloud(
|
|
70
73
|
token="YOUR_TOKEN",
|
|
@@ -103,7 +106,7 @@ class PipelinesClient:
|
|
|
103
106
|
|
|
104
107
|
- request: PipelineCreate.
|
|
105
108
|
---
|
|
106
|
-
from
|
|
109
|
+
from llama_cloud import (
|
|
107
110
|
ConfigurableDataSinkNames,
|
|
108
111
|
DataSinkCreate,
|
|
109
112
|
EvalExecutionParams,
|
|
@@ -114,7 +117,7 @@ class PipelinesClient:
|
|
|
114
117
|
PresetRetrievalParams,
|
|
115
118
|
SupportedEvalLlmModelNames,
|
|
116
119
|
)
|
|
117
|
-
from
|
|
120
|
+
from llama_cloud.client import LlamaCloud
|
|
118
121
|
|
|
119
122
|
client = LlamaCloud(
|
|
120
123
|
token="YOUR_TOKEN",
|
|
@@ -168,7 +171,7 @@ class PipelinesClient:
|
|
|
168
171
|
|
|
169
172
|
- request: PipelineCreate.
|
|
170
173
|
---
|
|
171
|
-
from
|
|
174
|
+
from llama_cloud import (
|
|
172
175
|
ConfigurableDataSinkNames,
|
|
173
176
|
DataSinkCreate,
|
|
174
177
|
EvalExecutionParams,
|
|
@@ -179,7 +182,7 @@ class PipelinesClient:
|
|
|
179
182
|
PresetRetrievalParams,
|
|
180
183
|
SupportedEvalLlmModelNames,
|
|
181
184
|
)
|
|
182
|
-
from
|
|
185
|
+
from llama_cloud.client import LlamaCloud
|
|
183
186
|
|
|
184
187
|
client = LlamaCloud(
|
|
185
188
|
token="YOUR_TOKEN",
|
|
@@ -234,7 +237,7 @@ class PipelinesClient:
|
|
|
234
237
|
|
|
235
238
|
- with_managed_ingestion_status: typing.Optional[bool].
|
|
236
239
|
---
|
|
237
|
-
from
|
|
240
|
+
from llama_cloud.client import LlamaCloud
|
|
238
241
|
|
|
239
242
|
client = LlamaCloud(
|
|
240
243
|
token="YOUR_TOKEN",
|
|
@@ -296,7 +299,7 @@ class PipelinesClient:
|
|
|
296
299
|
|
|
297
300
|
- managed_pipeline_id: typing.Optional[str]. The ID of the ManagedPipeline this playground pipeline is linked to.
|
|
298
301
|
---
|
|
299
|
-
from
|
|
302
|
+
from llama_cloud import (
|
|
300
303
|
ConfigurableDataSinkNames,
|
|
301
304
|
DataSinkCreate,
|
|
302
305
|
EvalExecutionParams,
|
|
@@ -305,7 +308,7 @@ class PipelinesClient:
|
|
|
305
308
|
PresetRetrievalParams,
|
|
306
309
|
SupportedEvalLlmModelNames,
|
|
307
310
|
)
|
|
308
|
-
from
|
|
311
|
+
from llama_cloud.client import LlamaCloud
|
|
309
312
|
|
|
310
313
|
client = LlamaCloud(
|
|
311
314
|
token="YOUR_TOKEN",
|
|
@@ -369,7 +372,7 @@ class PipelinesClient:
|
|
|
369
372
|
Parameters:
|
|
370
373
|
- pipeline_id: str.
|
|
371
374
|
---
|
|
372
|
-
from
|
|
375
|
+
from llama_cloud.client import LlamaCloud
|
|
373
376
|
|
|
374
377
|
client = LlamaCloud(
|
|
375
378
|
token="YOUR_TOKEN",
|
|
@@ -402,7 +405,7 @@ class PipelinesClient:
|
|
|
402
405
|
Parameters:
|
|
403
406
|
- pipeline_id: str.
|
|
404
407
|
---
|
|
405
|
-
from
|
|
408
|
+
from llama_cloud.client import LlamaCloud
|
|
406
409
|
|
|
407
410
|
client = LlamaCloud(
|
|
408
411
|
token="YOUR_TOKEN",
|
|
@@ -437,7 +440,7 @@ class PipelinesClient:
|
|
|
437
440
|
|
|
438
441
|
- eval_dataset_id: str.
|
|
439
442
|
---
|
|
440
|
-
from
|
|
443
|
+
from llama_cloud.client import LlamaCloud
|
|
441
444
|
|
|
442
445
|
client = LlamaCloud(
|
|
443
446
|
token="YOUR_TOKEN",
|
|
@@ -487,8 +490,8 @@ class PipelinesClient:
|
|
|
487
490
|
|
|
488
491
|
- params: typing.Optional[EvalExecutionParamsOverride]. The parameters for the eval execution that will override the ones set in the pipeline.
|
|
489
492
|
---
|
|
490
|
-
from
|
|
491
|
-
from
|
|
493
|
+
from llama_cloud import EvalExecutionParamsOverride, SupportedEvalLlmModelNames
|
|
494
|
+
from llama_cloud.client import LlamaCloud
|
|
492
495
|
|
|
493
496
|
client = LlamaCloud(
|
|
494
497
|
token="YOUR_TOKEN",
|
|
@@ -540,7 +543,7 @@ class PipelinesClient:
|
|
|
540
543
|
|
|
541
544
|
- eval_dataset_id: str.
|
|
542
545
|
---
|
|
543
|
-
from
|
|
546
|
+
from llama_cloud.client import LlamaCloud
|
|
544
547
|
|
|
545
548
|
client = LlamaCloud(
|
|
546
549
|
token="YOUR_TOKEN",
|
|
@@ -583,7 +586,7 @@ class PipelinesClient:
|
|
|
583
586
|
|
|
584
587
|
- eval_dataset_execution_id: str.
|
|
585
588
|
---
|
|
586
|
-
from
|
|
589
|
+
from llama_cloud.client import LlamaCloud
|
|
587
590
|
|
|
588
591
|
client = LlamaCloud(
|
|
589
592
|
token="YOUR_TOKEN",
|
|
@@ -621,7 +624,7 @@ class PipelinesClient:
|
|
|
621
624
|
Parameters:
|
|
622
625
|
- pipeline_id: str.
|
|
623
626
|
---
|
|
624
|
-
from
|
|
627
|
+
from llama_cloud.client import LlamaCloud
|
|
625
628
|
|
|
626
629
|
client = LlamaCloud(
|
|
627
630
|
token="YOUR_TOKEN",
|
|
@@ -658,7 +661,7 @@ class PipelinesClient:
|
|
|
658
661
|
|
|
659
662
|
- request: typing.List[PipelineFileCreate].
|
|
660
663
|
---
|
|
661
|
-
from
|
|
664
|
+
from llama_cloud.client import LlamaCloud
|
|
662
665
|
|
|
663
666
|
client = LlamaCloud(
|
|
664
667
|
token="YOUR_TOKEN",
|
|
@@ -695,7 +698,7 @@ class PipelinesClient:
|
|
|
695
698
|
|
|
696
699
|
- file_id: str.
|
|
697
700
|
---
|
|
698
|
-
from
|
|
701
|
+
from llama_cloud.client import LlamaCloud
|
|
699
702
|
|
|
700
703
|
client = LlamaCloud(
|
|
701
704
|
token="YOUR_TOKEN",
|
|
@@ -741,7 +744,7 @@ class PipelinesClient:
|
|
|
741
744
|
|
|
742
745
|
- custom_metadata: typing.Optional[typing.Dict[str, PipelineFileUpdateCustomMetadataValue]]. Custom metadata for the file
|
|
743
746
|
---
|
|
744
|
-
from
|
|
747
|
+
from llama_cloud.client import LlamaCloud
|
|
745
748
|
|
|
746
749
|
client = LlamaCloud(
|
|
747
750
|
token="YOUR_TOKEN",
|
|
@@ -783,7 +786,7 @@ class PipelinesClient:
|
|
|
783
786
|
|
|
784
787
|
- file_id: str.
|
|
785
788
|
---
|
|
786
|
-
from
|
|
789
|
+
from llama_cloud.client import LlamaCloud
|
|
787
790
|
|
|
788
791
|
client = LlamaCloud(
|
|
789
792
|
token="YOUR_TOKEN",
|
|
@@ -819,7 +822,7 @@ class PipelinesClient:
|
|
|
819
822
|
Parameters:
|
|
820
823
|
- pipeline_id: str.
|
|
821
824
|
---
|
|
822
|
-
from
|
|
825
|
+
from llama_cloud.client import LlamaCloud
|
|
823
826
|
|
|
824
827
|
client = LlamaCloud(
|
|
825
828
|
token="YOUR_TOKEN",
|
|
@@ -858,7 +861,7 @@ class PipelinesClient:
|
|
|
858
861
|
|
|
859
862
|
- request: typing.List[PipelineDataSourceCreate].
|
|
860
863
|
---
|
|
861
|
-
from
|
|
864
|
+
from llama_cloud.client import LlamaCloud
|
|
862
865
|
|
|
863
866
|
client = LlamaCloud(
|
|
864
867
|
token="YOUR_TOKEN",
|
|
@@ -897,7 +900,7 @@ class PipelinesClient:
|
|
|
897
900
|
|
|
898
901
|
- data_source_id: str.
|
|
899
902
|
---
|
|
900
|
-
from
|
|
903
|
+
from llama_cloud.client import LlamaCloud
|
|
901
904
|
|
|
902
905
|
client = LlamaCloud(
|
|
903
906
|
token="YOUR_TOKEN",
|
|
@@ -936,7 +939,7 @@ class PipelinesClient:
|
|
|
936
939
|
|
|
937
940
|
- data_source_id: str.
|
|
938
941
|
---
|
|
939
|
-
from
|
|
942
|
+
from llama_cloud.client import LlamaCloud
|
|
940
943
|
|
|
941
944
|
client = LlamaCloud(
|
|
942
945
|
token="YOUR_TOKEN",
|
|
@@ -998,8 +1001,8 @@ class PipelinesClient:
|
|
|
998
1001
|
|
|
999
1002
|
- query: str. The query to retrieve against.
|
|
1000
1003
|
---
|
|
1001
|
-
from
|
|
1002
|
-
from
|
|
1004
|
+
from llama_cloud import FilterCondition, MetadataFilters
|
|
1005
|
+
from llama_cloud.client import LlamaCloud
|
|
1003
1006
|
|
|
1004
1007
|
client = LlamaCloud(
|
|
1005
1008
|
token="YOUR_TOKEN",
|
|
@@ -1051,7 +1054,7 @@ class PipelinesClient:
|
|
|
1051
1054
|
Parameters:
|
|
1052
1055
|
- pipeline_id: str.
|
|
1053
1056
|
---
|
|
1054
|
-
from
|
|
1057
|
+
from llama_cloud.client import LlamaCloud
|
|
1055
1058
|
|
|
1056
1059
|
client = LlamaCloud(
|
|
1057
1060
|
token="YOUR_TOKEN",
|
|
@@ -1086,7 +1089,7 @@ class PipelinesClient:
|
|
|
1086
1089
|
|
|
1087
1090
|
- job_id: str.
|
|
1088
1091
|
---
|
|
1089
|
-
from
|
|
1092
|
+
from llama_cloud.client import LlamaCloud
|
|
1090
1093
|
|
|
1091
1094
|
client = LlamaCloud(
|
|
1092
1095
|
token="YOUR_TOKEN",
|
|
@@ -1128,7 +1131,7 @@ class PipelinesClient:
|
|
|
1128
1131
|
|
|
1129
1132
|
- limit: typing.Optional[int].
|
|
1130
1133
|
---
|
|
1131
|
-
from
|
|
1134
|
+
from llama_cloud.client import LlamaCloud
|
|
1132
1135
|
|
|
1133
1136
|
client = LlamaCloud(
|
|
1134
1137
|
token="YOUR_TOKEN",
|
|
@@ -1168,7 +1171,7 @@ class PipelinesClient:
|
|
|
1168
1171
|
|
|
1169
1172
|
- request: typing.List[CloudDocumentCreate].
|
|
1170
1173
|
---
|
|
1171
|
-
from
|
|
1174
|
+
from llama_cloud.client import LlamaCloud
|
|
1172
1175
|
|
|
1173
1176
|
client = LlamaCloud(
|
|
1174
1177
|
token="YOUR_TOKEN",
|
|
@@ -1209,7 +1212,7 @@ class PipelinesClient:
|
|
|
1209
1212
|
|
|
1210
1213
|
- request: typing.List[CloudDocumentCreate].
|
|
1211
1214
|
---
|
|
1212
|
-
from
|
|
1215
|
+
from llama_cloud.client import LlamaCloud
|
|
1213
1216
|
|
|
1214
1217
|
client = LlamaCloud(
|
|
1215
1218
|
token="YOUR_TOKEN",
|
|
@@ -1248,7 +1251,7 @@ class PipelinesClient:
|
|
|
1248
1251
|
|
|
1249
1252
|
- document_id: str.
|
|
1250
1253
|
---
|
|
1251
|
-
from
|
|
1254
|
+
from llama_cloud.client import LlamaCloud
|
|
1252
1255
|
|
|
1253
1256
|
client = LlamaCloud(
|
|
1254
1257
|
token="YOUR_TOKEN",
|
|
@@ -1286,7 +1289,7 @@ class PipelinesClient:
|
|
|
1286
1289
|
|
|
1287
1290
|
- document_id: str.
|
|
1288
1291
|
---
|
|
1289
|
-
from
|
|
1292
|
+
from llama_cloud.client import LlamaCloud
|
|
1290
1293
|
|
|
1291
1294
|
client = LlamaCloud(
|
|
1292
1295
|
token="YOUR_TOKEN",
|
|
@@ -1324,7 +1327,7 @@ class PipelinesClient:
|
|
|
1324
1327
|
|
|
1325
1328
|
- document_id: str.
|
|
1326
1329
|
---
|
|
1327
|
-
from
|
|
1330
|
+
from llama_cloud.client import LlamaCloud
|
|
1328
1331
|
|
|
1329
1332
|
client = LlamaCloud(
|
|
1330
1333
|
token="YOUR_TOKEN",
|
|
@@ -1376,8 +1379,8 @@ class AsyncPipelinesClient:
|
|
|
1376
1379
|
|
|
1377
1380
|
- pipeline_type: typing.Optional[PipelineType].
|
|
1378
1381
|
---
|
|
1379
|
-
from
|
|
1380
|
-
from
|
|
1382
|
+
from llama_cloud import PipelineType
|
|
1383
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
1381
1384
|
|
|
1382
1385
|
client = AsyncLlamaCloud(
|
|
1383
1386
|
token="YOUR_TOKEN",
|
|
@@ -1416,7 +1419,7 @@ class AsyncPipelinesClient:
|
|
|
1416
1419
|
|
|
1417
1420
|
- request: PipelineCreate.
|
|
1418
1421
|
---
|
|
1419
|
-
from
|
|
1422
|
+
from llama_cloud import (
|
|
1420
1423
|
ConfigurableDataSinkNames,
|
|
1421
1424
|
DataSinkCreate,
|
|
1422
1425
|
EvalExecutionParams,
|
|
@@ -1427,7 +1430,7 @@ class AsyncPipelinesClient:
|
|
|
1427
1430
|
PresetRetrievalParams,
|
|
1428
1431
|
SupportedEvalLlmModelNames,
|
|
1429
1432
|
)
|
|
1430
|
-
from
|
|
1433
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
1431
1434
|
|
|
1432
1435
|
client = AsyncLlamaCloud(
|
|
1433
1436
|
token="YOUR_TOKEN",
|
|
@@ -1481,7 +1484,7 @@ class AsyncPipelinesClient:
|
|
|
1481
1484
|
|
|
1482
1485
|
- request: PipelineCreate.
|
|
1483
1486
|
---
|
|
1484
|
-
from
|
|
1487
|
+
from llama_cloud import (
|
|
1485
1488
|
ConfigurableDataSinkNames,
|
|
1486
1489
|
DataSinkCreate,
|
|
1487
1490
|
EvalExecutionParams,
|
|
@@ -1492,7 +1495,7 @@ class AsyncPipelinesClient:
|
|
|
1492
1495
|
PresetRetrievalParams,
|
|
1493
1496
|
SupportedEvalLlmModelNames,
|
|
1494
1497
|
)
|
|
1495
|
-
from
|
|
1498
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
1496
1499
|
|
|
1497
1500
|
client = AsyncLlamaCloud(
|
|
1498
1501
|
token="YOUR_TOKEN",
|
|
@@ -1547,7 +1550,7 @@ class AsyncPipelinesClient:
|
|
|
1547
1550
|
|
|
1548
1551
|
- with_managed_ingestion_status: typing.Optional[bool].
|
|
1549
1552
|
---
|
|
1550
|
-
from
|
|
1553
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
1551
1554
|
|
|
1552
1555
|
client = AsyncLlamaCloud(
|
|
1553
1556
|
token="YOUR_TOKEN",
|
|
@@ -1609,7 +1612,7 @@ class AsyncPipelinesClient:
|
|
|
1609
1612
|
|
|
1610
1613
|
- managed_pipeline_id: typing.Optional[str]. The ID of the ManagedPipeline this playground pipeline is linked to.
|
|
1611
1614
|
---
|
|
1612
|
-
from
|
|
1615
|
+
from llama_cloud import (
|
|
1613
1616
|
ConfigurableDataSinkNames,
|
|
1614
1617
|
DataSinkCreate,
|
|
1615
1618
|
EvalExecutionParams,
|
|
@@ -1618,7 +1621,7 @@ class AsyncPipelinesClient:
|
|
|
1618
1621
|
PresetRetrievalParams,
|
|
1619
1622
|
SupportedEvalLlmModelNames,
|
|
1620
1623
|
)
|
|
1621
|
-
from
|
|
1624
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
1622
1625
|
|
|
1623
1626
|
client = AsyncLlamaCloud(
|
|
1624
1627
|
token="YOUR_TOKEN",
|
|
@@ -1682,7 +1685,7 @@ class AsyncPipelinesClient:
|
|
|
1682
1685
|
Parameters:
|
|
1683
1686
|
- pipeline_id: str.
|
|
1684
1687
|
---
|
|
1685
|
-
from
|
|
1688
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
1686
1689
|
|
|
1687
1690
|
client = AsyncLlamaCloud(
|
|
1688
1691
|
token="YOUR_TOKEN",
|
|
@@ -1715,7 +1718,7 @@ class AsyncPipelinesClient:
|
|
|
1715
1718
|
Parameters:
|
|
1716
1719
|
- pipeline_id: str.
|
|
1717
1720
|
---
|
|
1718
|
-
from
|
|
1721
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
1719
1722
|
|
|
1720
1723
|
client = AsyncLlamaCloud(
|
|
1721
1724
|
token="YOUR_TOKEN",
|
|
@@ -1752,7 +1755,7 @@ class AsyncPipelinesClient:
|
|
|
1752
1755
|
|
|
1753
1756
|
- eval_dataset_id: str.
|
|
1754
1757
|
---
|
|
1755
|
-
from
|
|
1758
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
1756
1759
|
|
|
1757
1760
|
client = AsyncLlamaCloud(
|
|
1758
1761
|
token="YOUR_TOKEN",
|
|
@@ -1802,8 +1805,8 @@ class AsyncPipelinesClient:
|
|
|
1802
1805
|
|
|
1803
1806
|
- params: typing.Optional[EvalExecutionParamsOverride]. The parameters for the eval execution that will override the ones set in the pipeline.
|
|
1804
1807
|
---
|
|
1805
|
-
from
|
|
1806
|
-
from
|
|
1808
|
+
from llama_cloud import EvalExecutionParamsOverride, SupportedEvalLlmModelNames
|
|
1809
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
1807
1810
|
|
|
1808
1811
|
client = AsyncLlamaCloud(
|
|
1809
1812
|
token="YOUR_TOKEN",
|
|
@@ -1855,7 +1858,7 @@ class AsyncPipelinesClient:
|
|
|
1855
1858
|
|
|
1856
1859
|
- eval_dataset_id: str.
|
|
1857
1860
|
---
|
|
1858
|
-
from
|
|
1861
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
1859
1862
|
|
|
1860
1863
|
client = AsyncLlamaCloud(
|
|
1861
1864
|
token="YOUR_TOKEN",
|
|
@@ -1898,7 +1901,7 @@ class AsyncPipelinesClient:
|
|
|
1898
1901
|
|
|
1899
1902
|
- eval_dataset_execution_id: str.
|
|
1900
1903
|
---
|
|
1901
|
-
from
|
|
1904
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
1902
1905
|
|
|
1903
1906
|
client = AsyncLlamaCloud(
|
|
1904
1907
|
token="YOUR_TOKEN",
|
|
@@ -1936,7 +1939,7 @@ class AsyncPipelinesClient:
|
|
|
1936
1939
|
Parameters:
|
|
1937
1940
|
- pipeline_id: str.
|
|
1938
1941
|
---
|
|
1939
|
-
from
|
|
1942
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
1940
1943
|
|
|
1941
1944
|
client = AsyncLlamaCloud(
|
|
1942
1945
|
token="YOUR_TOKEN",
|
|
@@ -1973,7 +1976,7 @@ class AsyncPipelinesClient:
|
|
|
1973
1976
|
|
|
1974
1977
|
- request: typing.List[PipelineFileCreate].
|
|
1975
1978
|
---
|
|
1976
|
-
from
|
|
1979
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
1977
1980
|
|
|
1978
1981
|
client = AsyncLlamaCloud(
|
|
1979
1982
|
token="YOUR_TOKEN",
|
|
@@ -2010,7 +2013,7 @@ class AsyncPipelinesClient:
|
|
|
2010
2013
|
|
|
2011
2014
|
- file_id: str.
|
|
2012
2015
|
---
|
|
2013
|
-
from
|
|
2016
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
2014
2017
|
|
|
2015
2018
|
client = AsyncLlamaCloud(
|
|
2016
2019
|
token="YOUR_TOKEN",
|
|
@@ -2056,7 +2059,7 @@ class AsyncPipelinesClient:
|
|
|
2056
2059
|
|
|
2057
2060
|
- custom_metadata: typing.Optional[typing.Dict[str, PipelineFileUpdateCustomMetadataValue]]. Custom metadata for the file
|
|
2058
2061
|
---
|
|
2059
|
-
from
|
|
2062
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
2060
2063
|
|
|
2061
2064
|
client = AsyncLlamaCloud(
|
|
2062
2065
|
token="YOUR_TOKEN",
|
|
@@ -2098,7 +2101,7 @@ class AsyncPipelinesClient:
|
|
|
2098
2101
|
|
|
2099
2102
|
- file_id: str.
|
|
2100
2103
|
---
|
|
2101
|
-
from
|
|
2104
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
2102
2105
|
|
|
2103
2106
|
client = AsyncLlamaCloud(
|
|
2104
2107
|
token="YOUR_TOKEN",
|
|
@@ -2134,7 +2137,7 @@ class AsyncPipelinesClient:
|
|
|
2134
2137
|
Parameters:
|
|
2135
2138
|
- pipeline_id: str.
|
|
2136
2139
|
---
|
|
2137
|
-
from
|
|
2140
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
2138
2141
|
|
|
2139
2142
|
client = AsyncLlamaCloud(
|
|
2140
2143
|
token="YOUR_TOKEN",
|
|
@@ -2173,7 +2176,7 @@ class AsyncPipelinesClient:
|
|
|
2173
2176
|
|
|
2174
2177
|
- request: typing.List[PipelineDataSourceCreate].
|
|
2175
2178
|
---
|
|
2176
|
-
from
|
|
2179
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
2177
2180
|
|
|
2178
2181
|
client = AsyncLlamaCloud(
|
|
2179
2182
|
token="YOUR_TOKEN",
|
|
@@ -2212,7 +2215,7 @@ class AsyncPipelinesClient:
|
|
|
2212
2215
|
|
|
2213
2216
|
- data_source_id: str.
|
|
2214
2217
|
---
|
|
2215
|
-
from
|
|
2218
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
2216
2219
|
|
|
2217
2220
|
client = AsyncLlamaCloud(
|
|
2218
2221
|
token="YOUR_TOKEN",
|
|
@@ -2251,7 +2254,7 @@ class AsyncPipelinesClient:
|
|
|
2251
2254
|
|
|
2252
2255
|
- data_source_id: str.
|
|
2253
2256
|
---
|
|
2254
|
-
from
|
|
2257
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
2255
2258
|
|
|
2256
2259
|
client = AsyncLlamaCloud(
|
|
2257
2260
|
token="YOUR_TOKEN",
|
|
@@ -2313,8 +2316,8 @@ class AsyncPipelinesClient:
|
|
|
2313
2316
|
|
|
2314
2317
|
- query: str. The query to retrieve against.
|
|
2315
2318
|
---
|
|
2316
|
-
from
|
|
2317
|
-
from
|
|
2319
|
+
from llama_cloud import FilterCondition, MetadataFilters
|
|
2320
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
2318
2321
|
|
|
2319
2322
|
client = AsyncLlamaCloud(
|
|
2320
2323
|
token="YOUR_TOKEN",
|
|
@@ -2366,7 +2369,7 @@ class AsyncPipelinesClient:
|
|
|
2366
2369
|
Parameters:
|
|
2367
2370
|
- pipeline_id: str.
|
|
2368
2371
|
---
|
|
2369
|
-
from
|
|
2372
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
2370
2373
|
|
|
2371
2374
|
client = AsyncLlamaCloud(
|
|
2372
2375
|
token="YOUR_TOKEN",
|
|
@@ -2401,7 +2404,7 @@ class AsyncPipelinesClient:
|
|
|
2401
2404
|
|
|
2402
2405
|
- job_id: str.
|
|
2403
2406
|
---
|
|
2404
|
-
from
|
|
2407
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
2405
2408
|
|
|
2406
2409
|
client = AsyncLlamaCloud(
|
|
2407
2410
|
token="YOUR_TOKEN",
|
|
@@ -2443,7 +2446,7 @@ class AsyncPipelinesClient:
|
|
|
2443
2446
|
|
|
2444
2447
|
- limit: typing.Optional[int].
|
|
2445
2448
|
---
|
|
2446
|
-
from
|
|
2449
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
2447
2450
|
|
|
2448
2451
|
client = AsyncLlamaCloud(
|
|
2449
2452
|
token="YOUR_TOKEN",
|
|
@@ -2483,7 +2486,7 @@ class AsyncPipelinesClient:
|
|
|
2483
2486
|
|
|
2484
2487
|
- request: typing.List[CloudDocumentCreate].
|
|
2485
2488
|
---
|
|
2486
|
-
from
|
|
2489
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
2487
2490
|
|
|
2488
2491
|
client = AsyncLlamaCloud(
|
|
2489
2492
|
token="YOUR_TOKEN",
|
|
@@ -2524,7 +2527,7 @@ class AsyncPipelinesClient:
|
|
|
2524
2527
|
|
|
2525
2528
|
- request: typing.List[CloudDocumentCreate].
|
|
2526
2529
|
---
|
|
2527
|
-
from
|
|
2530
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
2528
2531
|
|
|
2529
2532
|
client = AsyncLlamaCloud(
|
|
2530
2533
|
token="YOUR_TOKEN",
|
|
@@ -2563,7 +2566,7 @@ class AsyncPipelinesClient:
|
|
|
2563
2566
|
|
|
2564
2567
|
- document_id: str.
|
|
2565
2568
|
---
|
|
2566
|
-
from
|
|
2569
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
2567
2570
|
|
|
2568
2571
|
client = AsyncLlamaCloud(
|
|
2569
2572
|
token="YOUR_TOKEN",
|
|
@@ -2601,7 +2604,7 @@ class AsyncPipelinesClient:
|
|
|
2601
2604
|
|
|
2602
2605
|
- document_id: str.
|
|
2603
2606
|
---
|
|
2604
|
-
from
|
|
2607
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
2605
2608
|
|
|
2606
2609
|
client = AsyncLlamaCloud(
|
|
2607
2610
|
token="YOUR_TOKEN",
|
|
@@ -2639,7 +2642,7 @@ class AsyncPipelinesClient:
|
|
|
2639
2642
|
|
|
2640
2643
|
- document_id: str.
|
|
2641
2644
|
---
|
|
2642
|
-
from
|
|
2645
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
2643
2646
|
|
|
2644
2647
|
client = AsyncLlamaCloud(
|
|
2645
2648
|
token="YOUR_TOKEN",
|