llama-cloud 0.1.5__py3-none-any.whl → 0.1.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of llama-cloud might be problematic. Click here for more details.
- llama_cloud/__init__.py +12 -10
- llama_cloud/environment.py +1 -1
- llama_cloud/resources/__init__.py +2 -1
- llama_cloud/resources/data_sinks/client.py +14 -14
- llama_cloud/resources/data_sources/client.py +16 -16
- llama_cloud/resources/embedding_model_configs/client.py +80 -24
- llama_cloud/resources/evals/client.py +36 -26
- llama_cloud/resources/extraction/client.py +32 -32
- llama_cloud/resources/files/__init__.py +2 -2
- llama_cloud/resources/files/client.py +53 -28
- llama_cloud/resources/files/types/__init__.py +2 -1
- llama_cloud/resources/files/types/file_create_permission_info_value.py +7 -0
- llama_cloud/resources/organizations/client.py +60 -56
- llama_cloud/resources/parsing/client.py +555 -324
- llama_cloud/resources/pipelines/client.py +446 -302
- llama_cloud/resources/projects/client.py +270 -136
- llama_cloud/types/__init__.py +10 -10
- llama_cloud/types/azure_open_ai_embedding.py +12 -6
- llama_cloud/types/base_prompt_template.py +6 -2
- llama_cloud/types/bedrock_embedding.py +12 -6
- llama_cloud/types/character_splitter.py +4 -2
- llama_cloud/types/chat_message.py +1 -1
- llama_cloud/types/cloud_az_storage_blob_data_source.py +16 -7
- llama_cloud/types/cloud_box_data_source.py +13 -6
- llama_cloud/types/cloud_confluence_data_source.py +7 -6
- llama_cloud/types/cloud_document.py +3 -1
- llama_cloud/types/cloud_document_create.py +3 -1
- llama_cloud/types/cloud_google_drive_data_source.py +1 -0
- llama_cloud/types/cloud_jira_data_source.py +7 -4
- llama_cloud/types/cloud_notion_page_data_source.py +3 -2
- llama_cloud/types/cloud_one_drive_data_source.py +6 -3
- llama_cloud/types/cloud_s_3_data_source.py +9 -4
- llama_cloud/types/cloud_sharepoint_data_source.py +9 -6
- llama_cloud/types/cloud_slack_data_source.py +7 -6
- llama_cloud/types/code_splitter.py +1 -1
- llama_cloud/types/cohere_embedding.py +7 -3
- llama_cloud/types/data_sink.py +4 -4
- llama_cloud/types/data_sink_create.py +1 -1
- llama_cloud/types/data_source.py +7 -5
- llama_cloud/types/data_source_create.py +4 -2
- llama_cloud/types/embedding_model_config.py +2 -2
- llama_cloud/types/embedding_model_config_update.py +4 -2
- llama_cloud/types/eval_dataset.py +2 -2
- llama_cloud/types/eval_dataset_job_record.py +13 -7
- llama_cloud/types/eval_execution_params_override.py +6 -2
- llama_cloud/types/eval_question.py +2 -2
- llama_cloud/types/extraction_result.py +2 -2
- llama_cloud/types/extraction_schema.py +5 -3
- llama_cloud/types/file.py +15 -7
- llama_cloud/types/file_permission_info_value.py +5 -0
- llama_cloud/types/filter_operator.py +2 -2
- llama_cloud/types/gemini_embedding.py +10 -6
- llama_cloud/types/hugging_face_inference_api_embedding.py +27 -11
- llama_cloud/types/input_message.py +3 -1
- llama_cloud/types/job_name_mapping.py +4 -0
- llama_cloud/types/llama_parse_parameters.py +11 -0
- llama_cloud/types/llm.py +4 -2
- llama_cloud/types/llm_parameters.py +5 -2
- llama_cloud/types/local_eval.py +10 -8
- llama_cloud/types/local_eval_results.py +1 -1
- llama_cloud/types/managed_ingestion_status_response.py +5 -3
- llama_cloud/types/markdown_element_node_parser.py +5 -3
- llama_cloud/types/markdown_node_parser.py +1 -1
- llama_cloud/types/metadata_filter.py +2 -2
- llama_cloud/types/metric_result.py +3 -3
- llama_cloud/types/node_parser.py +1 -1
- llama_cloud/types/open_ai_embedding.py +12 -6
- llama_cloud/types/organization.py +2 -2
- llama_cloud/types/page_splitter_node_parser.py +2 -2
- llama_cloud/types/parsing_job_structured_result.py +32 -0
- llama_cloud/types/permission.py +3 -3
- llama_cloud/types/pipeline.py +17 -7
- llama_cloud/types/pipeline_configuration_hashes.py +3 -3
- llama_cloud/types/pipeline_create.py +15 -5
- llama_cloud/types/pipeline_data_source.py +13 -7
- llama_cloud/types/pipeline_data_source_create.py +3 -1
- llama_cloud/types/pipeline_deployment.py +4 -4
- llama_cloud/types/pipeline_file.py +25 -11
- llama_cloud/types/pipeline_file_create.py +3 -1
- llama_cloud/types/pipeline_file_permission_info_value.py +7 -0
- llama_cloud/types/playground_session.py +2 -2
- llama_cloud/types/preset_retrieval_params.py +14 -7
- llama_cloud/types/presigned_url.py +3 -1
- llama_cloud/types/project.py +2 -2
- llama_cloud/types/prompt_mixin_prompts.py +1 -1
- llama_cloud/types/prompt_spec.py +4 -2
- llama_cloud/types/role.py +3 -3
- llama_cloud/types/sentence_splitter.py +4 -2
- llama_cloud/types/text_node.py +3 -3
- llama_cloud/types/{hugging_face_inference_api_embedding_token.py → token.py} +1 -1
- llama_cloud/types/token_text_splitter.py +1 -1
- llama_cloud/types/user_organization.py +9 -5
- llama_cloud/types/user_organization_create.py +4 -4
- llama_cloud/types/user_organization_delete.py +2 -2
- llama_cloud/types/user_organization_role.py +2 -2
- llama_cloud/types/value.py +5 -0
- llama_cloud/types/vertex_text_embedding.py +9 -5
- {llama_cloud-0.1.5.dist-info → llama_cloud-0.1.6.dist-info}/METADATA +2 -1
- {llama_cloud-0.1.5.dist-info → llama_cloud-0.1.6.dist-info}/RECORD +101 -100
- {llama_cloud-0.1.5.dist-info → llama_cloud-0.1.6.dist-info}/WHEEL +1 -1
- llama_cloud/types/data_sink_component.py +0 -20
- llama_cloud/types/data_source_component.py +0 -28
- llama_cloud/types/metadata_filter_value.py +0 -5
- llama_cloud/types/pipeline_data_source_component.py +0 -28
- {llama_cloud-0.1.5.dist-info → llama_cloud-0.1.6.dist-info}/LICENSE +0 -0
|
@@ -80,15 +80,12 @@ class PipelinesClient:
|
|
|
80
80
|
|
|
81
81
|
- organization_id: typing.Optional[str].
|
|
82
82
|
---
|
|
83
|
-
from llama_cloud import PipelineType
|
|
84
83
|
from llama_cloud.client import LlamaCloud
|
|
85
84
|
|
|
86
85
|
client = LlamaCloud(
|
|
87
86
|
token="YOUR_TOKEN",
|
|
88
87
|
)
|
|
89
|
-
client.pipelines.search_pipelines(
|
|
90
|
-
pipeline_type=PipelineType.PLAYGROUND,
|
|
91
|
-
)
|
|
88
|
+
client.pipelines.search_pipelines()
|
|
92
89
|
"""
|
|
93
90
|
_response = self._client_wrapper.httpx_client.request(
|
|
94
91
|
"GET",
|
|
@@ -131,6 +128,18 @@ class PipelinesClient:
|
|
|
131
128
|
- organization_id: typing.Optional[str].
|
|
132
129
|
|
|
133
130
|
- request: PipelineCreate.
|
|
131
|
+
---
|
|
132
|
+
from llama_cloud import PipelineCreate
|
|
133
|
+
from llama_cloud.client import LlamaCloud
|
|
134
|
+
|
|
135
|
+
client = LlamaCloud(
|
|
136
|
+
token="YOUR_TOKEN",
|
|
137
|
+
)
|
|
138
|
+
client.pipelines.create_pipeline(
|
|
139
|
+
request=PipelineCreate(
|
|
140
|
+
name="name",
|
|
141
|
+
),
|
|
142
|
+
)
|
|
134
143
|
"""
|
|
135
144
|
_response = self._client_wrapper.httpx_client.request(
|
|
136
145
|
"POST",
|
|
@@ -167,6 +176,18 @@ class PipelinesClient:
|
|
|
167
176
|
- organization_id: typing.Optional[str].
|
|
168
177
|
|
|
169
178
|
- request: PipelineCreate.
|
|
179
|
+
---
|
|
180
|
+
from llama_cloud import PipelineCreate
|
|
181
|
+
from llama_cloud.client import LlamaCloud
|
|
182
|
+
|
|
183
|
+
client = LlamaCloud(
|
|
184
|
+
token="YOUR_TOKEN",
|
|
185
|
+
)
|
|
186
|
+
client.pipelines.upsert_pipeline(
|
|
187
|
+
request=PipelineCreate(
|
|
188
|
+
name="name",
|
|
189
|
+
),
|
|
190
|
+
)
|
|
170
191
|
"""
|
|
171
192
|
_response = self._client_wrapper.httpx_client.request(
|
|
172
193
|
"PUT",
|
|
@@ -192,6 +213,15 @@ class PipelinesClient:
|
|
|
192
213
|
|
|
193
214
|
Parameters:
|
|
194
215
|
- pipeline_id: str.
|
|
216
|
+
---
|
|
217
|
+
from llama_cloud.client import LlamaCloud
|
|
218
|
+
|
|
219
|
+
client = LlamaCloud(
|
|
220
|
+
token="YOUR_TOKEN",
|
|
221
|
+
)
|
|
222
|
+
client.pipelines.get_pipeline(
|
|
223
|
+
pipeline_id="pipeline_id",
|
|
224
|
+
)
|
|
195
225
|
"""
|
|
196
226
|
_response = self._client_wrapper.httpx_client.request(
|
|
197
227
|
"GET",
|
|
@@ -235,23 +265,32 @@ class PipelinesClient:
|
|
|
235
265
|
|
|
236
266
|
- transform_config: typing.Optional[PipelineUpdateTransformConfig]. Configuration for the transformation.
|
|
237
267
|
|
|
238
|
-
- configured_transformations: typing.Optional[typing.List[ConfiguredTransformationItem]].
|
|
268
|
+
- configured_transformations: typing.Optional[typing.List[ConfiguredTransformationItem]]. Deprecated, use embedding_config or transform_config instead. configured transformations for the pipeline.
|
|
239
269
|
|
|
240
|
-
- data_sink_id: typing.Optional[str].
|
|
270
|
+
- data_sink_id: typing.Optional[str]. Data sink ID. When provided instead of data_sink, the data sink will be looked up by ID.
|
|
241
271
|
|
|
242
|
-
- embedding_model_config_id: typing.Optional[str].
|
|
272
|
+
- embedding_model_config_id: typing.Optional[str]. Embedding model config ID. When provided instead of embedding_config, the embedding model config will be looked up by ID.
|
|
243
273
|
|
|
244
|
-
- data_sink: typing.Optional[DataSinkCreate].
|
|
274
|
+
- data_sink: typing.Optional[DataSinkCreate]. Data sink. When provided instead of data_sink_id, the data sink will be created.
|
|
245
275
|
|
|
246
|
-
- preset_retrieval_parameters: typing.Optional[PresetRetrievalParams].
|
|
276
|
+
- preset_retrieval_parameters: typing.Optional[PresetRetrievalParams]. Preset retrieval parameters for the pipeline.
|
|
247
277
|
|
|
248
|
-
- eval_parameters: typing.Optional[EvalExecutionParams].
|
|
278
|
+
- eval_parameters: typing.Optional[EvalExecutionParams]. Eval parameters for the pipeline.
|
|
249
279
|
|
|
250
|
-
- llama_parse_parameters: typing.Optional[LlamaParseParameters].
|
|
280
|
+
- llama_parse_parameters: typing.Optional[LlamaParseParameters]. Settings that can be configured for how to use LlamaParse to parse files within a LlamaCloud pipeline.
|
|
251
281
|
|
|
252
282
|
- name: typing.Optional[str].
|
|
253
283
|
|
|
254
|
-
- managed_pipeline_id: typing.Optional[str].
|
|
284
|
+
- managed_pipeline_id: typing.Optional[str]. The ID of the ManagedPipeline this playground pipeline is linked to.
|
|
285
|
+
---
|
|
286
|
+
from llama_cloud.client import LlamaCloud
|
|
287
|
+
|
|
288
|
+
client = LlamaCloud(
|
|
289
|
+
token="YOUR_TOKEN",
|
|
290
|
+
)
|
|
291
|
+
client.pipelines.update_existing_pipeline(
|
|
292
|
+
pipeline_id="pipeline_id",
|
|
293
|
+
)
|
|
255
294
|
"""
|
|
256
295
|
_request: typing.Dict[str, typing.Any] = {}
|
|
257
296
|
if embedding_config is not OMIT:
|
|
@@ -306,7 +345,7 @@ class PipelinesClient:
|
|
|
306
345
|
token="YOUR_TOKEN",
|
|
307
346
|
)
|
|
308
347
|
client.pipelines.delete_pipeline(
|
|
309
|
-
pipeline_id="
|
|
348
|
+
pipeline_id="pipeline_id",
|
|
310
349
|
)
|
|
311
350
|
"""
|
|
312
351
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -338,7 +377,7 @@ class PipelinesClient:
|
|
|
338
377
|
token="YOUR_TOKEN",
|
|
339
378
|
)
|
|
340
379
|
client.pipelines.get_pipeline_status(
|
|
341
|
-
pipeline_id="
|
|
380
|
+
pipeline_id="pipeline_id",
|
|
342
381
|
)
|
|
343
382
|
"""
|
|
344
383
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -363,6 +402,15 @@ class PipelinesClient:
|
|
|
363
402
|
|
|
364
403
|
Parameters:
|
|
365
404
|
- pipeline_id: str.
|
|
405
|
+
---
|
|
406
|
+
from llama_cloud.client import LlamaCloud
|
|
407
|
+
|
|
408
|
+
client = LlamaCloud(
|
|
409
|
+
token="YOUR_TOKEN",
|
|
410
|
+
)
|
|
411
|
+
client.pipelines.sync_pipeline(
|
|
412
|
+
pipeline_id="pipeline_id",
|
|
413
|
+
)
|
|
366
414
|
"""
|
|
367
415
|
_response = self._client_wrapper.httpx_client.request(
|
|
368
416
|
"POST",
|
|
@@ -386,6 +434,15 @@ class PipelinesClient:
|
|
|
386
434
|
|
|
387
435
|
Parameters:
|
|
388
436
|
- pipeline_id: str.
|
|
437
|
+
---
|
|
438
|
+
from llama_cloud.client import LlamaCloud
|
|
439
|
+
|
|
440
|
+
client = LlamaCloud(
|
|
441
|
+
token="YOUR_TOKEN",
|
|
442
|
+
)
|
|
443
|
+
client.pipelines.copy_pipeline(
|
|
444
|
+
pipeline_id="pipeline_id",
|
|
445
|
+
)
|
|
389
446
|
"""
|
|
390
447
|
_response = self._client_wrapper.httpx_client.request(
|
|
391
448
|
"POST",
|
|
@@ -403,14 +460,14 @@ class PipelinesClient:
|
|
|
403
460
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
404
461
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
405
462
|
|
|
406
|
-
def get_eval_dataset_executions(self,
|
|
463
|
+
def get_eval_dataset_executions(self, eval_dataset_id: str, pipeline_id: str) -> typing.List[EvalDatasetJobRecord]:
|
|
407
464
|
"""
|
|
408
465
|
Get the status of an EvalDatasetExecution.
|
|
409
466
|
|
|
410
467
|
Parameters:
|
|
411
|
-
- pipeline_id: str.
|
|
412
|
-
|
|
413
468
|
- eval_dataset_id: str.
|
|
469
|
+
|
|
470
|
+
- pipeline_id: str.
|
|
414
471
|
---
|
|
415
472
|
from llama_cloud.client import LlamaCloud
|
|
416
473
|
|
|
@@ -418,8 +475,8 @@ class PipelinesClient:
|
|
|
418
475
|
token="YOUR_TOKEN",
|
|
419
476
|
)
|
|
420
477
|
client.pipelines.get_eval_dataset_executions(
|
|
421
|
-
|
|
422
|
-
|
|
478
|
+
eval_dataset_id="eval_dataset_id",
|
|
479
|
+
pipeline_id="pipeline_id",
|
|
423
480
|
)
|
|
424
481
|
"""
|
|
425
482
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -443,8 +500,8 @@ class PipelinesClient:
|
|
|
443
500
|
|
|
444
501
|
def execute_eval_dataset(
|
|
445
502
|
self,
|
|
446
|
-
pipeline_id: str,
|
|
447
503
|
eval_dataset_id: str,
|
|
504
|
+
pipeline_id: str,
|
|
448
505
|
*,
|
|
449
506
|
eval_question_ids: typing.List[str],
|
|
450
507
|
params: typing.Optional[EvalExecutionParamsOverride] = OMIT,
|
|
@@ -453,27 +510,23 @@ class PipelinesClient:
|
|
|
453
510
|
Execute a dataset.
|
|
454
511
|
|
|
455
512
|
Parameters:
|
|
456
|
-
- pipeline_id: str.
|
|
457
|
-
|
|
458
513
|
- eval_dataset_id: str.
|
|
459
514
|
|
|
515
|
+
- pipeline_id: str.
|
|
516
|
+
|
|
460
517
|
- eval_question_ids: typing.List[str].
|
|
461
518
|
|
|
462
519
|
- params: typing.Optional[EvalExecutionParamsOverride]. The parameters for the eval execution that will override the ones set in the pipeline.
|
|
463
520
|
---
|
|
464
|
-
from llama_cloud import EvalExecutionParamsOverride, SupportedLlmModelNames
|
|
465
521
|
from llama_cloud.client import LlamaCloud
|
|
466
522
|
|
|
467
523
|
client = LlamaCloud(
|
|
468
524
|
token="YOUR_TOKEN",
|
|
469
525
|
)
|
|
470
526
|
client.pipelines.execute_eval_dataset(
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
eval_question_ids=[],
|
|
474
|
-
params=EvalExecutionParamsOverride(
|
|
475
|
-
llm_model=SupportedLlmModelNames.GPT_3_5_TURBO,
|
|
476
|
-
),
|
|
527
|
+
eval_dataset_id="eval_dataset_id",
|
|
528
|
+
pipeline_id="pipeline_id",
|
|
529
|
+
eval_question_ids=["eval_question_ids"],
|
|
477
530
|
)
|
|
478
531
|
"""
|
|
479
532
|
_request: typing.Dict[str, typing.Any] = {"eval_question_ids": eval_question_ids}
|
|
@@ -500,7 +553,7 @@ class PipelinesClient:
|
|
|
500
553
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
501
554
|
|
|
502
555
|
def get_eval_dataset_execution_result(
|
|
503
|
-
self,
|
|
556
|
+
self, eval_dataset_id: str, pipeline_id: str
|
|
504
557
|
) -> typing.List[EvalQuestionResult]:
|
|
505
558
|
"""
|
|
506
559
|
Get the result of an EvalDatasetExecution.
|
|
@@ -509,9 +562,9 @@ class PipelinesClient:
|
|
|
509
562
|
If any of the specified questions do not have a result, they will be ignored.
|
|
510
563
|
|
|
511
564
|
Parameters:
|
|
512
|
-
- pipeline_id: str.
|
|
513
|
-
|
|
514
565
|
- eval_dataset_id: str.
|
|
566
|
+
|
|
567
|
+
- pipeline_id: str.
|
|
515
568
|
---
|
|
516
569
|
from llama_cloud.client import LlamaCloud
|
|
517
570
|
|
|
@@ -519,8 +572,8 @@ class PipelinesClient:
|
|
|
519
572
|
token="YOUR_TOKEN",
|
|
520
573
|
)
|
|
521
574
|
client.pipelines.get_eval_dataset_execution_result(
|
|
522
|
-
|
|
523
|
-
|
|
575
|
+
eval_dataset_id="eval_dataset_id",
|
|
576
|
+
pipeline_id="pipeline_id",
|
|
524
577
|
)
|
|
525
578
|
"""
|
|
526
579
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -543,17 +596,17 @@ class PipelinesClient:
|
|
|
543
596
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
544
597
|
|
|
545
598
|
def get_eval_dataset_execution(
|
|
546
|
-
self,
|
|
599
|
+
self, eval_dataset_id: str, eval_dataset_execution_id: str, pipeline_id: str
|
|
547
600
|
) -> EvalDatasetJobRecord:
|
|
548
601
|
"""
|
|
549
602
|
Get the status of an EvalDatasetExecution.
|
|
550
603
|
|
|
551
604
|
Parameters:
|
|
552
|
-
- pipeline_id: str.
|
|
553
|
-
|
|
554
605
|
- eval_dataset_id: str.
|
|
555
606
|
|
|
556
607
|
- eval_dataset_execution_id: str.
|
|
608
|
+
|
|
609
|
+
- pipeline_id: str.
|
|
557
610
|
---
|
|
558
611
|
from llama_cloud.client import LlamaCloud
|
|
559
612
|
|
|
@@ -561,9 +614,9 @@ class PipelinesClient:
|
|
|
561
614
|
token="YOUR_TOKEN",
|
|
562
615
|
)
|
|
563
616
|
client.pipelines.get_eval_dataset_execution(
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
|
|
617
|
+
eval_dataset_id="eval_dataset_id",
|
|
618
|
+
eval_dataset_execution_id="eval_dataset_execution_id",
|
|
619
|
+
pipeline_id="pipeline_id",
|
|
567
620
|
)
|
|
568
621
|
"""
|
|
569
622
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -608,7 +661,7 @@ class PipelinesClient:
|
|
|
608
661
|
token="YOUR_TOKEN",
|
|
609
662
|
)
|
|
610
663
|
client.pipelines.list_pipeline_files(
|
|
611
|
-
pipeline_id="
|
|
664
|
+
pipeline_id="pipeline_id",
|
|
612
665
|
)
|
|
613
666
|
"""
|
|
614
667
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -641,14 +694,19 @@ class PipelinesClient:
|
|
|
641
694
|
|
|
642
695
|
- request: typing.List[PipelineFileCreate].
|
|
643
696
|
---
|
|
697
|
+
from llama_cloud import PipelineFileCreate
|
|
644
698
|
from llama_cloud.client import LlamaCloud
|
|
645
699
|
|
|
646
700
|
client = LlamaCloud(
|
|
647
701
|
token="YOUR_TOKEN",
|
|
648
702
|
)
|
|
649
703
|
client.pipelines.add_files_to_pipeline(
|
|
650
|
-
pipeline_id="
|
|
651
|
-
request=[
|
|
704
|
+
pipeline_id="pipeline_id",
|
|
705
|
+
request=[
|
|
706
|
+
PipelineFileCreate(
|
|
707
|
+
file_id="file_id",
|
|
708
|
+
)
|
|
709
|
+
],
|
|
652
710
|
)
|
|
653
711
|
"""
|
|
654
712
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -697,7 +755,7 @@ class PipelinesClient:
|
|
|
697
755
|
token="YOUR_TOKEN",
|
|
698
756
|
)
|
|
699
757
|
client.pipelines.list_pipeline_files_2(
|
|
700
|
-
pipeline_id="
|
|
758
|
+
pipeline_id="pipeline_id",
|
|
701
759
|
)
|
|
702
760
|
"""
|
|
703
761
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -724,14 +782,14 @@ class PipelinesClient:
|
|
|
724
782
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
725
783
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
726
784
|
|
|
727
|
-
def get_pipeline_file_status(self,
|
|
785
|
+
def get_pipeline_file_status(self, file_id: str, pipeline_id: str) -> ManagedIngestionStatusResponse:
|
|
728
786
|
"""
|
|
729
787
|
Get status of a file for a pipeline.
|
|
730
788
|
|
|
731
789
|
Parameters:
|
|
732
|
-
- pipeline_id: str.
|
|
733
|
-
|
|
734
790
|
- file_id: str.
|
|
791
|
+
|
|
792
|
+
- pipeline_id: str.
|
|
735
793
|
---
|
|
736
794
|
from llama_cloud.client import LlamaCloud
|
|
737
795
|
|
|
@@ -739,8 +797,8 @@ class PipelinesClient:
|
|
|
739
797
|
token="YOUR_TOKEN",
|
|
740
798
|
)
|
|
741
799
|
client.pipelines.get_pipeline_file_status(
|
|
742
|
-
|
|
743
|
-
|
|
800
|
+
file_id="file_id",
|
|
801
|
+
pipeline_id="pipeline_id",
|
|
744
802
|
)
|
|
745
803
|
"""
|
|
746
804
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -763,8 +821,8 @@ class PipelinesClient:
|
|
|
763
821
|
|
|
764
822
|
def update_pipeline_file(
|
|
765
823
|
self,
|
|
766
|
-
pipeline_id: str,
|
|
767
824
|
file_id: str,
|
|
825
|
+
pipeline_id: str,
|
|
768
826
|
*,
|
|
769
827
|
custom_metadata: typing.Optional[
|
|
770
828
|
typing.Dict[str, typing.Optional[PipelineFileUpdateCustomMetadataValue]]
|
|
@@ -774,11 +832,11 @@ class PipelinesClient:
|
|
|
774
832
|
Update a file for a pipeline.
|
|
775
833
|
|
|
776
834
|
Parameters:
|
|
777
|
-
- pipeline_id: str.
|
|
778
|
-
|
|
779
835
|
- file_id: str.
|
|
780
836
|
|
|
781
|
-
-
|
|
837
|
+
- pipeline_id: str.
|
|
838
|
+
|
|
839
|
+
- custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[PipelineFileUpdateCustomMetadataValue]]]. Custom metadata for the file
|
|
782
840
|
---
|
|
783
841
|
from llama_cloud.client import LlamaCloud
|
|
784
842
|
|
|
@@ -786,8 +844,8 @@ class PipelinesClient:
|
|
|
786
844
|
token="YOUR_TOKEN",
|
|
787
845
|
)
|
|
788
846
|
client.pipelines.update_pipeline_file(
|
|
789
|
-
|
|
790
|
-
|
|
847
|
+
file_id="file_id",
|
|
848
|
+
pipeline_id="pipeline_id",
|
|
791
849
|
)
|
|
792
850
|
"""
|
|
793
851
|
_request: typing.Dict[str, typing.Any] = {}
|
|
@@ -812,14 +870,14 @@ class PipelinesClient:
|
|
|
812
870
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
813
871
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
814
872
|
|
|
815
|
-
def delete_pipeline_file(self,
|
|
873
|
+
def delete_pipeline_file(self, file_id: str, pipeline_id: str) -> None:
|
|
816
874
|
"""
|
|
817
875
|
Delete a file from a pipeline.
|
|
818
876
|
|
|
819
877
|
Parameters:
|
|
820
|
-
- pipeline_id: str.
|
|
821
|
-
|
|
822
878
|
- file_id: str.
|
|
879
|
+
|
|
880
|
+
- pipeline_id: str.
|
|
823
881
|
---
|
|
824
882
|
from llama_cloud.client import LlamaCloud
|
|
825
883
|
|
|
@@ -827,8 +885,8 @@ class PipelinesClient:
|
|
|
827
885
|
token="YOUR_TOKEN",
|
|
828
886
|
)
|
|
829
887
|
client.pipelines.delete_pipeline_file(
|
|
830
|
-
|
|
831
|
-
|
|
888
|
+
file_id="file_id",
|
|
889
|
+
pipeline_id="pipeline_id",
|
|
832
890
|
)
|
|
833
891
|
"""
|
|
834
892
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -857,6 +915,15 @@ class PipelinesClient:
|
|
|
857
915
|
- pipeline_id: str.
|
|
858
916
|
|
|
859
917
|
- upload_file: typing.IO.
|
|
918
|
+
---
|
|
919
|
+
from llama_cloud.client import LlamaCloud
|
|
920
|
+
|
|
921
|
+
client = LlamaCloud(
|
|
922
|
+
token="YOUR_TOKEN",
|
|
923
|
+
)
|
|
924
|
+
client.pipelines.import_pipeline_metadata(
|
|
925
|
+
pipeline_id="pipeline_id",
|
|
926
|
+
)
|
|
860
927
|
"""
|
|
861
928
|
_response = self._client_wrapper.httpx_client.request(
|
|
862
929
|
"PUT",
|
|
@@ -889,7 +956,7 @@ class PipelinesClient:
|
|
|
889
956
|
token="YOUR_TOKEN",
|
|
890
957
|
)
|
|
891
958
|
client.pipelines.delete_pipeline_files_metadata(
|
|
892
|
-
pipeline_id="
|
|
959
|
+
pipeline_id="pipeline_id",
|
|
893
960
|
)
|
|
894
961
|
"""
|
|
895
962
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -921,7 +988,7 @@ class PipelinesClient:
|
|
|
921
988
|
token="YOUR_TOKEN",
|
|
922
989
|
)
|
|
923
990
|
client.pipelines.list_pipeline_data_sources(
|
|
924
|
-
pipeline_id="
|
|
991
|
+
pipeline_id="pipeline_id",
|
|
925
992
|
)
|
|
926
993
|
"""
|
|
927
994
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -953,14 +1020,19 @@ class PipelinesClient:
|
|
|
953
1020
|
|
|
954
1021
|
- request: typing.List[PipelineDataSourceCreate].
|
|
955
1022
|
---
|
|
1023
|
+
from llama_cloud import PipelineDataSourceCreate
|
|
956
1024
|
from llama_cloud.client import LlamaCloud
|
|
957
1025
|
|
|
958
1026
|
client = LlamaCloud(
|
|
959
1027
|
token="YOUR_TOKEN",
|
|
960
1028
|
)
|
|
961
1029
|
client.pipelines.add_data_sources_to_pipeline(
|
|
962
|
-
pipeline_id="
|
|
963
|
-
request=[
|
|
1030
|
+
pipeline_id="pipeline_id",
|
|
1031
|
+
request=[
|
|
1032
|
+
PipelineDataSourceCreate(
|
|
1033
|
+
data_source_id="data_source_id",
|
|
1034
|
+
)
|
|
1035
|
+
],
|
|
964
1036
|
)
|
|
965
1037
|
"""
|
|
966
1038
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -983,17 +1055,17 @@ class PipelinesClient:
|
|
|
983
1055
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
984
1056
|
|
|
985
1057
|
def update_pipeline_data_source(
|
|
986
|
-
self,
|
|
1058
|
+
self, data_source_id: str, pipeline_id: str, *, sync_interval: typing.Optional[float] = OMIT
|
|
987
1059
|
) -> PipelineDataSource:
|
|
988
1060
|
"""
|
|
989
1061
|
Update the configuration of a data source in a pipeline.
|
|
990
1062
|
|
|
991
1063
|
Parameters:
|
|
992
|
-
- pipeline_id: str.
|
|
993
|
-
|
|
994
1064
|
- data_source_id: str.
|
|
995
1065
|
|
|
996
|
-
-
|
|
1066
|
+
- pipeline_id: str.
|
|
1067
|
+
|
|
1068
|
+
- sync_interval: typing.Optional[float]. The interval at which the data source should be synced.
|
|
997
1069
|
---
|
|
998
1070
|
from llama_cloud.client import LlamaCloud
|
|
999
1071
|
|
|
@@ -1001,8 +1073,8 @@ class PipelinesClient:
|
|
|
1001
1073
|
token="YOUR_TOKEN",
|
|
1002
1074
|
)
|
|
1003
1075
|
client.pipelines.update_pipeline_data_source(
|
|
1004
|
-
|
|
1005
|
-
|
|
1076
|
+
data_source_id="data_source_id",
|
|
1077
|
+
pipeline_id="pipeline_id",
|
|
1006
1078
|
)
|
|
1007
1079
|
"""
|
|
1008
1080
|
_request: typing.Dict[str, typing.Any] = {}
|
|
@@ -1028,14 +1100,14 @@ class PipelinesClient:
|
|
|
1028
1100
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1029
1101
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1030
1102
|
|
|
1031
|
-
def delete_pipeline_data_source(self,
|
|
1103
|
+
def delete_pipeline_data_source(self, data_source_id: str, pipeline_id: str) -> None:
|
|
1032
1104
|
"""
|
|
1033
1105
|
Delete a data source from a pipeline.
|
|
1034
1106
|
|
|
1035
1107
|
Parameters:
|
|
1036
|
-
- pipeline_id: str.
|
|
1037
|
-
|
|
1038
1108
|
- data_source_id: str.
|
|
1109
|
+
|
|
1110
|
+
- pipeline_id: str.
|
|
1039
1111
|
---
|
|
1040
1112
|
from llama_cloud.client import LlamaCloud
|
|
1041
1113
|
|
|
@@ -1043,8 +1115,8 @@ class PipelinesClient:
|
|
|
1043
1115
|
token="YOUR_TOKEN",
|
|
1044
1116
|
)
|
|
1045
1117
|
client.pipelines.delete_pipeline_data_source(
|
|
1046
|
-
|
|
1047
|
-
|
|
1118
|
+
data_source_id="data_source_id",
|
|
1119
|
+
pipeline_id="pipeline_id",
|
|
1048
1120
|
)
|
|
1049
1121
|
"""
|
|
1050
1122
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -1066,14 +1138,24 @@ class PipelinesClient:
|
|
|
1066
1138
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1067
1139
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1068
1140
|
|
|
1069
|
-
def sync_pipeline_data_source(self,
|
|
1141
|
+
def sync_pipeline_data_source(self, data_source_id: str, pipeline_id: str) -> Pipeline:
|
|
1070
1142
|
"""
|
|
1071
1143
|
Run ingestion for the pipeline data source by incrementally updating the data-sink with upstream changes from data-source.
|
|
1072
1144
|
|
|
1073
1145
|
Parameters:
|
|
1146
|
+
- data_source_id: str.
|
|
1147
|
+
|
|
1074
1148
|
- pipeline_id: str.
|
|
1149
|
+
---
|
|
1150
|
+
from llama_cloud.client import LlamaCloud
|
|
1075
1151
|
|
|
1076
|
-
|
|
1152
|
+
client = LlamaCloud(
|
|
1153
|
+
token="YOUR_TOKEN",
|
|
1154
|
+
)
|
|
1155
|
+
client.pipelines.sync_pipeline_data_source(
|
|
1156
|
+
data_source_id="data_source_id",
|
|
1157
|
+
pipeline_id="pipeline_id",
|
|
1158
|
+
)
|
|
1077
1159
|
"""
|
|
1078
1160
|
_response = self._client_wrapper.httpx_client.request(
|
|
1079
1161
|
"POST",
|
|
@@ -1094,14 +1176,14 @@ class PipelinesClient:
|
|
|
1094
1176
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1095
1177
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1096
1178
|
|
|
1097
|
-
def get_pipeline_data_source_status(self,
|
|
1179
|
+
def get_pipeline_data_source_status(self, data_source_id: str, pipeline_id: str) -> ManagedIngestionStatusResponse:
|
|
1098
1180
|
"""
|
|
1099
1181
|
Get the status of a data source for a pipeline.
|
|
1100
1182
|
|
|
1101
1183
|
Parameters:
|
|
1102
|
-
- pipeline_id: str.
|
|
1103
|
-
|
|
1104
1184
|
- data_source_id: str.
|
|
1185
|
+
|
|
1186
|
+
- pipeline_id: str.
|
|
1105
1187
|
---
|
|
1106
1188
|
from llama_cloud.client import LlamaCloud
|
|
1107
1189
|
|
|
@@ -1109,8 +1191,8 @@ class PipelinesClient:
|
|
|
1109
1191
|
token="YOUR_TOKEN",
|
|
1110
1192
|
)
|
|
1111
1193
|
client.pipelines.get_pipeline_data_source_status(
|
|
1112
|
-
|
|
1113
|
-
|
|
1194
|
+
data_source_id="data_source_id",
|
|
1195
|
+
pipeline_id="pipeline_id",
|
|
1114
1196
|
)
|
|
1115
1197
|
"""
|
|
1116
1198
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -1137,6 +1219,7 @@ class PipelinesClient:
|
|
|
1137
1219
|
pipeline_id: str,
|
|
1138
1220
|
*,
|
|
1139
1221
|
dense_similarity_top_k: typing.Optional[int] = OMIT,
|
|
1222
|
+
dense_similarity_cutoff: typing.Optional[float] = OMIT,
|
|
1140
1223
|
sparse_similarity_top_k: typing.Optional[int] = OMIT,
|
|
1141
1224
|
enable_reranking: typing.Optional[bool] = OMIT,
|
|
1142
1225
|
rerank_top_n: typing.Optional[int] = OMIT,
|
|
@@ -1154,19 +1237,21 @@ class PipelinesClient:
|
|
|
1154
1237
|
Parameters:
|
|
1155
1238
|
- pipeline_id: str.
|
|
1156
1239
|
|
|
1157
|
-
- dense_similarity_top_k: typing.Optional[int].
|
|
1240
|
+
- dense_similarity_top_k: typing.Optional[int]. Number of nodes for dense retrieval.
|
|
1158
1241
|
|
|
1159
|
-
-
|
|
1242
|
+
- dense_similarity_cutoff: typing.Optional[float]. Minimum similarity score wrt query for retrieval
|
|
1160
1243
|
|
|
1161
|
-
-
|
|
1244
|
+
- sparse_similarity_top_k: typing.Optional[int]. Number of nodes for sparse retrieval.
|
|
1162
1245
|
|
|
1163
|
-
-
|
|
1246
|
+
- enable_reranking: typing.Optional[bool]. Enable reranking for retrieval
|
|
1164
1247
|
|
|
1165
|
-
-
|
|
1248
|
+
- rerank_top_n: typing.Optional[int]. Number of reranked nodes for returning.
|
|
1166
1249
|
|
|
1167
|
-
-
|
|
1250
|
+
- alpha: typing.Optional[float]. Alpha value for hybrid retrieval to determine the weights between dense and sparse retrieval. 0 is sparse retrieval and 1 is dense retrieval.
|
|
1168
1251
|
|
|
1169
|
-
-
|
|
1252
|
+
- search_filters: typing.Optional[MetadataFilters]. Search filters for retrieval.
|
|
1253
|
+
|
|
1254
|
+
- files_top_k: typing.Optional[int]. Number of files to retrieve (only for retrieval mode files_via_metadata and files_via_content).
|
|
1170
1255
|
|
|
1171
1256
|
- retrieval_mode: typing.Optional[RetrievalMode]. The retrieval mode for the query.
|
|
1172
1257
|
|
|
@@ -1176,25 +1261,21 @@ class PipelinesClient:
|
|
|
1176
1261
|
|
|
1177
1262
|
- class_name: typing.Optional[str].
|
|
1178
1263
|
---
|
|
1179
|
-
from llama_cloud import FilterCondition, MetadataFilters, RetrievalMode
|
|
1180
1264
|
from llama_cloud.client import LlamaCloud
|
|
1181
1265
|
|
|
1182
1266
|
client = LlamaCloud(
|
|
1183
1267
|
token="YOUR_TOKEN",
|
|
1184
1268
|
)
|
|
1185
1269
|
client.pipelines.run_search(
|
|
1186
|
-
pipeline_id="
|
|
1187
|
-
|
|
1188
|
-
filters=[],
|
|
1189
|
-
condition=FilterCondition.AND,
|
|
1190
|
-
),
|
|
1191
|
-
retrieval_mode=RetrievalMode.CHUNKS,
|
|
1192
|
-
query="string",
|
|
1270
|
+
pipeline_id="pipeline_id",
|
|
1271
|
+
query="query",
|
|
1193
1272
|
)
|
|
1194
1273
|
"""
|
|
1195
1274
|
_request: typing.Dict[str, typing.Any] = {"query": query}
|
|
1196
1275
|
if dense_similarity_top_k is not OMIT:
|
|
1197
1276
|
_request["dense_similarity_top_k"] = dense_similarity_top_k
|
|
1277
|
+
if dense_similarity_cutoff is not OMIT:
|
|
1278
|
+
_request["dense_similarity_cutoff"] = dense_similarity_cutoff
|
|
1198
1279
|
if sparse_similarity_top_k is not OMIT:
|
|
1199
1280
|
_request["sparse_similarity_top_k"] = sparse_similarity_top_k
|
|
1200
1281
|
if enable_reranking is not OMIT:
|
|
@@ -1243,7 +1324,7 @@ class PipelinesClient:
|
|
|
1243
1324
|
token="YOUR_TOKEN",
|
|
1244
1325
|
)
|
|
1245
1326
|
client.pipelines.list_pipeline_jobs(
|
|
1246
|
-
pipeline_id="
|
|
1327
|
+
pipeline_id="pipeline_id",
|
|
1247
1328
|
)
|
|
1248
1329
|
"""
|
|
1249
1330
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -1262,14 +1343,14 @@ class PipelinesClient:
|
|
|
1262
1343
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1263
1344
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1264
1345
|
|
|
1265
|
-
def get_pipeline_job(self,
|
|
1346
|
+
def get_pipeline_job(self, job_id: str, pipeline_id: str) -> PipelineDeployment:
|
|
1266
1347
|
"""
|
|
1267
1348
|
Get a job for a pipeline.
|
|
1268
1349
|
|
|
1269
1350
|
Parameters:
|
|
1270
|
-
- pipeline_id: str.
|
|
1271
|
-
|
|
1272
1351
|
- job_id: str.
|
|
1352
|
+
|
|
1353
|
+
- pipeline_id: str.
|
|
1273
1354
|
---
|
|
1274
1355
|
from llama_cloud.client import LlamaCloud
|
|
1275
1356
|
|
|
@@ -1277,8 +1358,8 @@ class PipelinesClient:
|
|
|
1277
1358
|
token="YOUR_TOKEN",
|
|
1278
1359
|
)
|
|
1279
1360
|
client.pipelines.get_pipeline_job(
|
|
1280
|
-
|
|
1281
|
-
|
|
1361
|
+
job_id="job_id",
|
|
1362
|
+
pipeline_id="pipeline_id",
|
|
1282
1363
|
)
|
|
1283
1364
|
"""
|
|
1284
1365
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -1312,7 +1393,7 @@ class PipelinesClient:
|
|
|
1312
1393
|
token="YOUR_TOKEN",
|
|
1313
1394
|
)
|
|
1314
1395
|
client.pipelines.get_playground_session(
|
|
1315
|
-
pipeline_id="
|
|
1396
|
+
pipeline_id="pipeline_id",
|
|
1316
1397
|
)
|
|
1317
1398
|
"""
|
|
1318
1399
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -1353,34 +1434,13 @@ class PipelinesClient:
|
|
|
1353
1434
|
|
|
1354
1435
|
- class_name: typing.Optional[str].
|
|
1355
1436
|
---
|
|
1356
|
-
from llama_cloud import (
|
|
1357
|
-
ChatData,
|
|
1358
|
-
FilterCondition,
|
|
1359
|
-
LlmParameters,
|
|
1360
|
-
MetadataFilters,
|
|
1361
|
-
PresetRetrievalParams,
|
|
1362
|
-
RetrievalMode,
|
|
1363
|
-
SupportedLlmModelNames,
|
|
1364
|
-
)
|
|
1365
1437
|
from llama_cloud.client import LlamaCloud
|
|
1366
1438
|
|
|
1367
1439
|
client = LlamaCloud(
|
|
1368
1440
|
token="YOUR_TOKEN",
|
|
1369
1441
|
)
|
|
1370
1442
|
client.pipelines.chat(
|
|
1371
|
-
pipeline_id="
|
|
1372
|
-
data=ChatData(
|
|
1373
|
-
retrieval_parameters=PresetRetrievalParams(
|
|
1374
|
-
search_filters=MetadataFilters(
|
|
1375
|
-
filters=[],
|
|
1376
|
-
condition=FilterCondition.AND,
|
|
1377
|
-
),
|
|
1378
|
-
retrieval_mode=RetrievalMode.CHUNKS,
|
|
1379
|
-
),
|
|
1380
|
-
llm_parameters=LlmParameters(
|
|
1381
|
-
model_name=SupportedLlmModelNames.GPT_3_5_TURBO,
|
|
1382
|
-
),
|
|
1383
|
-
),
|
|
1443
|
+
pipeline_id="pipeline_id",
|
|
1384
1444
|
)
|
|
1385
1445
|
"""
|
|
1386
1446
|
_request: typing.Dict[str, typing.Any] = {}
|
|
@@ -1436,7 +1496,7 @@ class PipelinesClient:
|
|
|
1436
1496
|
token="YOUR_TOKEN",
|
|
1437
1497
|
)
|
|
1438
1498
|
client.pipelines.list_pipeline_documents(
|
|
1439
|
-
pipeline_id="
|
|
1499
|
+
pipeline_id="pipeline_id",
|
|
1440
1500
|
)
|
|
1441
1501
|
"""
|
|
1442
1502
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -1471,14 +1531,20 @@ class PipelinesClient:
|
|
|
1471
1531
|
|
|
1472
1532
|
- request: typing.List[CloudDocumentCreate].
|
|
1473
1533
|
---
|
|
1534
|
+
from llama_cloud import CloudDocumentCreate
|
|
1474
1535
|
from llama_cloud.client import LlamaCloud
|
|
1475
1536
|
|
|
1476
1537
|
client = LlamaCloud(
|
|
1477
1538
|
token="YOUR_TOKEN",
|
|
1478
1539
|
)
|
|
1479
1540
|
client.pipelines.create_batch_pipeline_documents(
|
|
1480
|
-
pipeline_id="
|
|
1481
|
-
request=[
|
|
1541
|
+
pipeline_id="pipeline_id",
|
|
1542
|
+
request=[
|
|
1543
|
+
CloudDocumentCreate(
|
|
1544
|
+
text="text",
|
|
1545
|
+
metadata={"key": "value"},
|
|
1546
|
+
)
|
|
1547
|
+
],
|
|
1482
1548
|
)
|
|
1483
1549
|
"""
|
|
1484
1550
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -1511,14 +1577,20 @@ class PipelinesClient:
|
|
|
1511
1577
|
|
|
1512
1578
|
- request: typing.List[CloudDocumentCreate].
|
|
1513
1579
|
---
|
|
1580
|
+
from llama_cloud import CloudDocumentCreate
|
|
1514
1581
|
from llama_cloud.client import LlamaCloud
|
|
1515
1582
|
|
|
1516
1583
|
client = LlamaCloud(
|
|
1517
1584
|
token="YOUR_TOKEN",
|
|
1518
1585
|
)
|
|
1519
1586
|
client.pipelines.upsert_batch_pipeline_documents(
|
|
1520
|
-
pipeline_id="
|
|
1521
|
-
request=[
|
|
1587
|
+
pipeline_id="pipeline_id",
|
|
1588
|
+
request=[
|
|
1589
|
+
CloudDocumentCreate(
|
|
1590
|
+
text="text",
|
|
1591
|
+
metadata={"key": "value"},
|
|
1592
|
+
)
|
|
1593
|
+
],
|
|
1522
1594
|
)
|
|
1523
1595
|
"""
|
|
1524
1596
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -1540,14 +1612,14 @@ class PipelinesClient:
|
|
|
1540
1612
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1541
1613
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1542
1614
|
|
|
1543
|
-
def get_pipeline_document(self,
|
|
1615
|
+
def get_pipeline_document(self, document_id: str, pipeline_id: str) -> CloudDocument:
|
|
1544
1616
|
"""
|
|
1545
1617
|
Return a single document for a pipeline.
|
|
1546
1618
|
|
|
1547
1619
|
Parameters:
|
|
1548
|
-
- pipeline_id: str.
|
|
1549
|
-
|
|
1550
1620
|
- document_id: str.
|
|
1621
|
+
|
|
1622
|
+
- pipeline_id: str.
|
|
1551
1623
|
---
|
|
1552
1624
|
from llama_cloud.client import LlamaCloud
|
|
1553
1625
|
|
|
@@ -1555,8 +1627,8 @@ class PipelinesClient:
|
|
|
1555
1627
|
token="YOUR_TOKEN",
|
|
1556
1628
|
)
|
|
1557
1629
|
client.pipelines.get_pipeline_document(
|
|
1558
|
-
|
|
1559
|
-
|
|
1630
|
+
document_id="document_id",
|
|
1631
|
+
pipeline_id="pipeline_id",
|
|
1560
1632
|
)
|
|
1561
1633
|
"""
|
|
1562
1634
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -1577,14 +1649,14 @@ class PipelinesClient:
|
|
|
1577
1649
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1578
1650
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1579
1651
|
|
|
1580
|
-
def delete_pipeline_document(self,
|
|
1652
|
+
def delete_pipeline_document(self, document_id: str, pipeline_id: str) -> None:
|
|
1581
1653
|
"""
|
|
1582
1654
|
Delete a document for a pipeline.
|
|
1583
1655
|
|
|
1584
1656
|
Parameters:
|
|
1585
|
-
- pipeline_id: str.
|
|
1586
|
-
|
|
1587
1657
|
- document_id: str.
|
|
1658
|
+
|
|
1659
|
+
- pipeline_id: str.
|
|
1588
1660
|
---
|
|
1589
1661
|
from llama_cloud.client import LlamaCloud
|
|
1590
1662
|
|
|
@@ -1592,8 +1664,8 @@ class PipelinesClient:
|
|
|
1592
1664
|
token="YOUR_TOKEN",
|
|
1593
1665
|
)
|
|
1594
1666
|
client.pipelines.delete_pipeline_document(
|
|
1595
|
-
|
|
1596
|
-
|
|
1667
|
+
document_id="document_id",
|
|
1668
|
+
pipeline_id="pipeline_id",
|
|
1597
1669
|
)
|
|
1598
1670
|
"""
|
|
1599
1671
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -1614,14 +1686,14 @@ class PipelinesClient:
|
|
|
1614
1686
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1615
1687
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1616
1688
|
|
|
1617
|
-
def get_pipeline_document_status(self,
|
|
1689
|
+
def get_pipeline_document_status(self, document_id: str, pipeline_id: str) -> ManagedIngestionStatusResponse:
|
|
1618
1690
|
"""
|
|
1619
1691
|
Return a single document for a pipeline.
|
|
1620
1692
|
|
|
1621
1693
|
Parameters:
|
|
1622
|
-
- pipeline_id: str.
|
|
1623
|
-
|
|
1624
1694
|
- document_id: str.
|
|
1695
|
+
|
|
1696
|
+
- pipeline_id: str.
|
|
1625
1697
|
---
|
|
1626
1698
|
from llama_cloud.client import LlamaCloud
|
|
1627
1699
|
|
|
@@ -1629,8 +1701,8 @@ class PipelinesClient:
|
|
|
1629
1701
|
token="YOUR_TOKEN",
|
|
1630
1702
|
)
|
|
1631
1703
|
client.pipelines.get_pipeline_document_status(
|
|
1632
|
-
|
|
1633
|
-
|
|
1704
|
+
document_id="document_id",
|
|
1705
|
+
pipeline_id="pipeline_id",
|
|
1634
1706
|
)
|
|
1635
1707
|
"""
|
|
1636
1708
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -1652,14 +1724,14 @@ class PipelinesClient:
|
|
|
1652
1724
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1653
1725
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1654
1726
|
|
|
1655
|
-
def list_pipeline_document_chunks(self,
|
|
1727
|
+
def list_pipeline_document_chunks(self, document_id: str, pipeline_id: str) -> typing.List[TextNode]:
|
|
1656
1728
|
"""
|
|
1657
1729
|
Return a list of chunks for a pipeline document.
|
|
1658
1730
|
|
|
1659
1731
|
Parameters:
|
|
1660
|
-
- pipeline_id: str.
|
|
1661
|
-
|
|
1662
1732
|
- document_id: str.
|
|
1733
|
+
|
|
1734
|
+
- pipeline_id: str.
|
|
1663
1735
|
---
|
|
1664
1736
|
from llama_cloud.client import LlamaCloud
|
|
1665
1737
|
|
|
@@ -1667,8 +1739,8 @@ class PipelinesClient:
|
|
|
1667
1739
|
token="YOUR_TOKEN",
|
|
1668
1740
|
)
|
|
1669
1741
|
client.pipelines.list_pipeline_document_chunks(
|
|
1670
|
-
|
|
1671
|
-
|
|
1742
|
+
document_id="document_id",
|
|
1743
|
+
pipeline_id="pipeline_id",
|
|
1672
1744
|
)
|
|
1673
1745
|
"""
|
|
1674
1746
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -1718,15 +1790,12 @@ class AsyncPipelinesClient:
|
|
|
1718
1790
|
|
|
1719
1791
|
- organization_id: typing.Optional[str].
|
|
1720
1792
|
---
|
|
1721
|
-
from llama_cloud import PipelineType
|
|
1722
1793
|
from llama_cloud.client import AsyncLlamaCloud
|
|
1723
1794
|
|
|
1724
1795
|
client = AsyncLlamaCloud(
|
|
1725
1796
|
token="YOUR_TOKEN",
|
|
1726
1797
|
)
|
|
1727
|
-
await client.pipelines.search_pipelines(
|
|
1728
|
-
pipeline_type=PipelineType.PLAYGROUND,
|
|
1729
|
-
)
|
|
1798
|
+
await client.pipelines.search_pipelines()
|
|
1730
1799
|
"""
|
|
1731
1800
|
_response = await self._client_wrapper.httpx_client.request(
|
|
1732
1801
|
"GET",
|
|
@@ -1769,6 +1838,18 @@ class AsyncPipelinesClient:
|
|
|
1769
1838
|
- organization_id: typing.Optional[str].
|
|
1770
1839
|
|
|
1771
1840
|
- request: PipelineCreate.
|
|
1841
|
+
---
|
|
1842
|
+
from llama_cloud import PipelineCreate
|
|
1843
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
1844
|
+
|
|
1845
|
+
client = AsyncLlamaCloud(
|
|
1846
|
+
token="YOUR_TOKEN",
|
|
1847
|
+
)
|
|
1848
|
+
await client.pipelines.create_pipeline(
|
|
1849
|
+
request=PipelineCreate(
|
|
1850
|
+
name="name",
|
|
1851
|
+
),
|
|
1852
|
+
)
|
|
1772
1853
|
"""
|
|
1773
1854
|
_response = await self._client_wrapper.httpx_client.request(
|
|
1774
1855
|
"POST",
|
|
@@ -1805,6 +1886,18 @@ class AsyncPipelinesClient:
|
|
|
1805
1886
|
- organization_id: typing.Optional[str].
|
|
1806
1887
|
|
|
1807
1888
|
- request: PipelineCreate.
|
|
1889
|
+
---
|
|
1890
|
+
from llama_cloud import PipelineCreate
|
|
1891
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
1892
|
+
|
|
1893
|
+
client = AsyncLlamaCloud(
|
|
1894
|
+
token="YOUR_TOKEN",
|
|
1895
|
+
)
|
|
1896
|
+
await client.pipelines.upsert_pipeline(
|
|
1897
|
+
request=PipelineCreate(
|
|
1898
|
+
name="name",
|
|
1899
|
+
),
|
|
1900
|
+
)
|
|
1808
1901
|
"""
|
|
1809
1902
|
_response = await self._client_wrapper.httpx_client.request(
|
|
1810
1903
|
"PUT",
|
|
@@ -1830,6 +1923,15 @@ class AsyncPipelinesClient:
|
|
|
1830
1923
|
|
|
1831
1924
|
Parameters:
|
|
1832
1925
|
- pipeline_id: str.
|
|
1926
|
+
---
|
|
1927
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
1928
|
+
|
|
1929
|
+
client = AsyncLlamaCloud(
|
|
1930
|
+
token="YOUR_TOKEN",
|
|
1931
|
+
)
|
|
1932
|
+
await client.pipelines.get_pipeline(
|
|
1933
|
+
pipeline_id="pipeline_id",
|
|
1934
|
+
)
|
|
1833
1935
|
"""
|
|
1834
1936
|
_response = await self._client_wrapper.httpx_client.request(
|
|
1835
1937
|
"GET",
|
|
@@ -1873,23 +1975,32 @@ class AsyncPipelinesClient:
|
|
|
1873
1975
|
|
|
1874
1976
|
- transform_config: typing.Optional[PipelineUpdateTransformConfig]. Configuration for the transformation.
|
|
1875
1977
|
|
|
1876
|
-
- configured_transformations: typing.Optional[typing.List[ConfiguredTransformationItem]].
|
|
1978
|
+
- configured_transformations: typing.Optional[typing.List[ConfiguredTransformationItem]]. Deprecated, use embedding_config or transform_config instead. configured transformations for the pipeline.
|
|
1877
1979
|
|
|
1878
|
-
- data_sink_id: typing.Optional[str].
|
|
1980
|
+
- data_sink_id: typing.Optional[str]. Data sink ID. When provided instead of data_sink, the data sink will be looked up by ID.
|
|
1879
1981
|
|
|
1880
|
-
- embedding_model_config_id: typing.Optional[str].
|
|
1982
|
+
- embedding_model_config_id: typing.Optional[str]. Embedding model config ID. When provided instead of embedding_config, the embedding model config will be looked up by ID.
|
|
1881
1983
|
|
|
1882
|
-
- data_sink: typing.Optional[DataSinkCreate].
|
|
1984
|
+
- data_sink: typing.Optional[DataSinkCreate]. Data sink. When provided instead of data_sink_id, the data sink will be created.
|
|
1883
1985
|
|
|
1884
|
-
- preset_retrieval_parameters: typing.Optional[PresetRetrievalParams].
|
|
1986
|
+
- preset_retrieval_parameters: typing.Optional[PresetRetrievalParams]. Preset retrieval parameters for the pipeline.
|
|
1885
1987
|
|
|
1886
|
-
- eval_parameters: typing.Optional[EvalExecutionParams].
|
|
1988
|
+
- eval_parameters: typing.Optional[EvalExecutionParams]. Eval parameters for the pipeline.
|
|
1887
1989
|
|
|
1888
|
-
- llama_parse_parameters: typing.Optional[LlamaParseParameters].
|
|
1990
|
+
- llama_parse_parameters: typing.Optional[LlamaParseParameters]. Settings that can be configured for how to use LlamaParse to parse files within a LlamaCloud pipeline.
|
|
1889
1991
|
|
|
1890
1992
|
- name: typing.Optional[str].
|
|
1891
1993
|
|
|
1892
|
-
- managed_pipeline_id: typing.Optional[str].
|
|
1994
|
+
- managed_pipeline_id: typing.Optional[str]. The ID of the ManagedPipeline this playground pipeline is linked to.
|
|
1995
|
+
---
|
|
1996
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
1997
|
+
|
|
1998
|
+
client = AsyncLlamaCloud(
|
|
1999
|
+
token="YOUR_TOKEN",
|
|
2000
|
+
)
|
|
2001
|
+
await client.pipelines.update_existing_pipeline(
|
|
2002
|
+
pipeline_id="pipeline_id",
|
|
2003
|
+
)
|
|
1893
2004
|
"""
|
|
1894
2005
|
_request: typing.Dict[str, typing.Any] = {}
|
|
1895
2006
|
if embedding_config is not OMIT:
|
|
@@ -1944,7 +2055,7 @@ class AsyncPipelinesClient:
|
|
|
1944
2055
|
token="YOUR_TOKEN",
|
|
1945
2056
|
)
|
|
1946
2057
|
await client.pipelines.delete_pipeline(
|
|
1947
|
-
pipeline_id="
|
|
2058
|
+
pipeline_id="pipeline_id",
|
|
1948
2059
|
)
|
|
1949
2060
|
"""
|
|
1950
2061
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -1976,7 +2087,7 @@ class AsyncPipelinesClient:
|
|
|
1976
2087
|
token="YOUR_TOKEN",
|
|
1977
2088
|
)
|
|
1978
2089
|
await client.pipelines.get_pipeline_status(
|
|
1979
|
-
pipeline_id="
|
|
2090
|
+
pipeline_id="pipeline_id",
|
|
1980
2091
|
)
|
|
1981
2092
|
"""
|
|
1982
2093
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -2001,6 +2112,15 @@ class AsyncPipelinesClient:
|
|
|
2001
2112
|
|
|
2002
2113
|
Parameters:
|
|
2003
2114
|
- pipeline_id: str.
|
|
2115
|
+
---
|
|
2116
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
2117
|
+
|
|
2118
|
+
client = AsyncLlamaCloud(
|
|
2119
|
+
token="YOUR_TOKEN",
|
|
2120
|
+
)
|
|
2121
|
+
await client.pipelines.sync_pipeline(
|
|
2122
|
+
pipeline_id="pipeline_id",
|
|
2123
|
+
)
|
|
2004
2124
|
"""
|
|
2005
2125
|
_response = await self._client_wrapper.httpx_client.request(
|
|
2006
2126
|
"POST",
|
|
@@ -2024,6 +2144,15 @@ class AsyncPipelinesClient:
|
|
|
2024
2144
|
|
|
2025
2145
|
Parameters:
|
|
2026
2146
|
- pipeline_id: str.
|
|
2147
|
+
---
|
|
2148
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
2149
|
+
|
|
2150
|
+
client = AsyncLlamaCloud(
|
|
2151
|
+
token="YOUR_TOKEN",
|
|
2152
|
+
)
|
|
2153
|
+
await client.pipelines.copy_pipeline(
|
|
2154
|
+
pipeline_id="pipeline_id",
|
|
2155
|
+
)
|
|
2027
2156
|
"""
|
|
2028
2157
|
_response = await self._client_wrapper.httpx_client.request(
|
|
2029
2158
|
"POST",
|
|
@@ -2042,15 +2171,15 @@ class AsyncPipelinesClient:
|
|
|
2042
2171
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2043
2172
|
|
|
2044
2173
|
async def get_eval_dataset_executions(
|
|
2045
|
-
self,
|
|
2174
|
+
self, eval_dataset_id: str, pipeline_id: str
|
|
2046
2175
|
) -> typing.List[EvalDatasetJobRecord]:
|
|
2047
2176
|
"""
|
|
2048
2177
|
Get the status of an EvalDatasetExecution.
|
|
2049
2178
|
|
|
2050
2179
|
Parameters:
|
|
2051
|
-
- pipeline_id: str.
|
|
2052
|
-
|
|
2053
2180
|
- eval_dataset_id: str.
|
|
2181
|
+
|
|
2182
|
+
- pipeline_id: str.
|
|
2054
2183
|
---
|
|
2055
2184
|
from llama_cloud.client import AsyncLlamaCloud
|
|
2056
2185
|
|
|
@@ -2058,8 +2187,8 @@ class AsyncPipelinesClient:
|
|
|
2058
2187
|
token="YOUR_TOKEN",
|
|
2059
2188
|
)
|
|
2060
2189
|
await client.pipelines.get_eval_dataset_executions(
|
|
2061
|
-
|
|
2062
|
-
|
|
2190
|
+
eval_dataset_id="eval_dataset_id",
|
|
2191
|
+
pipeline_id="pipeline_id",
|
|
2063
2192
|
)
|
|
2064
2193
|
"""
|
|
2065
2194
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -2083,8 +2212,8 @@ class AsyncPipelinesClient:
|
|
|
2083
2212
|
|
|
2084
2213
|
async def execute_eval_dataset(
|
|
2085
2214
|
self,
|
|
2086
|
-
pipeline_id: str,
|
|
2087
2215
|
eval_dataset_id: str,
|
|
2216
|
+
pipeline_id: str,
|
|
2088
2217
|
*,
|
|
2089
2218
|
eval_question_ids: typing.List[str],
|
|
2090
2219
|
params: typing.Optional[EvalExecutionParamsOverride] = OMIT,
|
|
@@ -2093,27 +2222,23 @@ class AsyncPipelinesClient:
|
|
|
2093
2222
|
Execute a dataset.
|
|
2094
2223
|
|
|
2095
2224
|
Parameters:
|
|
2096
|
-
- pipeline_id: str.
|
|
2097
|
-
|
|
2098
2225
|
- eval_dataset_id: str.
|
|
2099
2226
|
|
|
2227
|
+
- pipeline_id: str.
|
|
2228
|
+
|
|
2100
2229
|
- eval_question_ids: typing.List[str].
|
|
2101
2230
|
|
|
2102
2231
|
- params: typing.Optional[EvalExecutionParamsOverride]. The parameters for the eval execution that will override the ones set in the pipeline.
|
|
2103
2232
|
---
|
|
2104
|
-
from llama_cloud import EvalExecutionParamsOverride, SupportedLlmModelNames
|
|
2105
2233
|
from llama_cloud.client import AsyncLlamaCloud
|
|
2106
2234
|
|
|
2107
2235
|
client = AsyncLlamaCloud(
|
|
2108
2236
|
token="YOUR_TOKEN",
|
|
2109
2237
|
)
|
|
2110
2238
|
await client.pipelines.execute_eval_dataset(
|
|
2111
|
-
|
|
2112
|
-
|
|
2113
|
-
eval_question_ids=[],
|
|
2114
|
-
params=EvalExecutionParamsOverride(
|
|
2115
|
-
llm_model=SupportedLlmModelNames.GPT_3_5_TURBO,
|
|
2116
|
-
),
|
|
2239
|
+
eval_dataset_id="eval_dataset_id",
|
|
2240
|
+
pipeline_id="pipeline_id",
|
|
2241
|
+
eval_question_ids=["eval_question_ids"],
|
|
2117
2242
|
)
|
|
2118
2243
|
"""
|
|
2119
2244
|
_request: typing.Dict[str, typing.Any] = {"eval_question_ids": eval_question_ids}
|
|
@@ -2140,7 +2265,7 @@ class AsyncPipelinesClient:
|
|
|
2140
2265
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2141
2266
|
|
|
2142
2267
|
async def get_eval_dataset_execution_result(
|
|
2143
|
-
self,
|
|
2268
|
+
self, eval_dataset_id: str, pipeline_id: str
|
|
2144
2269
|
) -> typing.List[EvalQuestionResult]:
|
|
2145
2270
|
"""
|
|
2146
2271
|
Get the result of an EvalDatasetExecution.
|
|
@@ -2149,9 +2274,9 @@ class AsyncPipelinesClient:
|
|
|
2149
2274
|
If any of the specified questions do not have a result, they will be ignored.
|
|
2150
2275
|
|
|
2151
2276
|
Parameters:
|
|
2152
|
-
- pipeline_id: str.
|
|
2153
|
-
|
|
2154
2277
|
- eval_dataset_id: str.
|
|
2278
|
+
|
|
2279
|
+
- pipeline_id: str.
|
|
2155
2280
|
---
|
|
2156
2281
|
from llama_cloud.client import AsyncLlamaCloud
|
|
2157
2282
|
|
|
@@ -2159,8 +2284,8 @@ class AsyncPipelinesClient:
|
|
|
2159
2284
|
token="YOUR_TOKEN",
|
|
2160
2285
|
)
|
|
2161
2286
|
await client.pipelines.get_eval_dataset_execution_result(
|
|
2162
|
-
|
|
2163
|
-
|
|
2287
|
+
eval_dataset_id="eval_dataset_id",
|
|
2288
|
+
pipeline_id="pipeline_id",
|
|
2164
2289
|
)
|
|
2165
2290
|
"""
|
|
2166
2291
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -2183,17 +2308,17 @@ class AsyncPipelinesClient:
|
|
|
2183
2308
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2184
2309
|
|
|
2185
2310
|
async def get_eval_dataset_execution(
|
|
2186
|
-
self,
|
|
2311
|
+
self, eval_dataset_id: str, eval_dataset_execution_id: str, pipeline_id: str
|
|
2187
2312
|
) -> EvalDatasetJobRecord:
|
|
2188
2313
|
"""
|
|
2189
2314
|
Get the status of an EvalDatasetExecution.
|
|
2190
2315
|
|
|
2191
2316
|
Parameters:
|
|
2192
|
-
- pipeline_id: str.
|
|
2193
|
-
|
|
2194
2317
|
- eval_dataset_id: str.
|
|
2195
2318
|
|
|
2196
2319
|
- eval_dataset_execution_id: str.
|
|
2320
|
+
|
|
2321
|
+
- pipeline_id: str.
|
|
2197
2322
|
---
|
|
2198
2323
|
from llama_cloud.client import AsyncLlamaCloud
|
|
2199
2324
|
|
|
@@ -2201,9 +2326,9 @@ class AsyncPipelinesClient:
|
|
|
2201
2326
|
token="YOUR_TOKEN",
|
|
2202
2327
|
)
|
|
2203
2328
|
await client.pipelines.get_eval_dataset_execution(
|
|
2204
|
-
|
|
2205
|
-
|
|
2206
|
-
|
|
2329
|
+
eval_dataset_id="eval_dataset_id",
|
|
2330
|
+
eval_dataset_execution_id="eval_dataset_execution_id",
|
|
2331
|
+
pipeline_id="pipeline_id",
|
|
2207
2332
|
)
|
|
2208
2333
|
"""
|
|
2209
2334
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -2248,7 +2373,7 @@ class AsyncPipelinesClient:
|
|
|
2248
2373
|
token="YOUR_TOKEN",
|
|
2249
2374
|
)
|
|
2250
2375
|
await client.pipelines.list_pipeline_files(
|
|
2251
|
-
pipeline_id="
|
|
2376
|
+
pipeline_id="pipeline_id",
|
|
2252
2377
|
)
|
|
2253
2378
|
"""
|
|
2254
2379
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -2281,14 +2406,19 @@ class AsyncPipelinesClient:
|
|
|
2281
2406
|
|
|
2282
2407
|
- request: typing.List[PipelineFileCreate].
|
|
2283
2408
|
---
|
|
2409
|
+
from llama_cloud import PipelineFileCreate
|
|
2284
2410
|
from llama_cloud.client import AsyncLlamaCloud
|
|
2285
2411
|
|
|
2286
2412
|
client = AsyncLlamaCloud(
|
|
2287
2413
|
token="YOUR_TOKEN",
|
|
2288
2414
|
)
|
|
2289
2415
|
await client.pipelines.add_files_to_pipeline(
|
|
2290
|
-
pipeline_id="
|
|
2291
|
-
request=[
|
|
2416
|
+
pipeline_id="pipeline_id",
|
|
2417
|
+
request=[
|
|
2418
|
+
PipelineFileCreate(
|
|
2419
|
+
file_id="file_id",
|
|
2420
|
+
)
|
|
2421
|
+
],
|
|
2292
2422
|
)
|
|
2293
2423
|
"""
|
|
2294
2424
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -2337,7 +2467,7 @@ class AsyncPipelinesClient:
|
|
|
2337
2467
|
token="YOUR_TOKEN",
|
|
2338
2468
|
)
|
|
2339
2469
|
await client.pipelines.list_pipeline_files_2(
|
|
2340
|
-
pipeline_id="
|
|
2470
|
+
pipeline_id="pipeline_id",
|
|
2341
2471
|
)
|
|
2342
2472
|
"""
|
|
2343
2473
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -2364,14 +2494,14 @@ class AsyncPipelinesClient:
|
|
|
2364
2494
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2365
2495
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2366
2496
|
|
|
2367
|
-
async def get_pipeline_file_status(self,
|
|
2497
|
+
async def get_pipeline_file_status(self, file_id: str, pipeline_id: str) -> ManagedIngestionStatusResponse:
|
|
2368
2498
|
"""
|
|
2369
2499
|
Get status of a file for a pipeline.
|
|
2370
2500
|
|
|
2371
2501
|
Parameters:
|
|
2372
|
-
- pipeline_id: str.
|
|
2373
|
-
|
|
2374
2502
|
- file_id: str.
|
|
2503
|
+
|
|
2504
|
+
- pipeline_id: str.
|
|
2375
2505
|
---
|
|
2376
2506
|
from llama_cloud.client import AsyncLlamaCloud
|
|
2377
2507
|
|
|
@@ -2379,8 +2509,8 @@ class AsyncPipelinesClient:
|
|
|
2379
2509
|
token="YOUR_TOKEN",
|
|
2380
2510
|
)
|
|
2381
2511
|
await client.pipelines.get_pipeline_file_status(
|
|
2382
|
-
|
|
2383
|
-
|
|
2512
|
+
file_id="file_id",
|
|
2513
|
+
pipeline_id="pipeline_id",
|
|
2384
2514
|
)
|
|
2385
2515
|
"""
|
|
2386
2516
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -2403,8 +2533,8 @@ class AsyncPipelinesClient:
|
|
|
2403
2533
|
|
|
2404
2534
|
async def update_pipeline_file(
|
|
2405
2535
|
self,
|
|
2406
|
-
pipeline_id: str,
|
|
2407
2536
|
file_id: str,
|
|
2537
|
+
pipeline_id: str,
|
|
2408
2538
|
*,
|
|
2409
2539
|
custom_metadata: typing.Optional[
|
|
2410
2540
|
typing.Dict[str, typing.Optional[PipelineFileUpdateCustomMetadataValue]]
|
|
@@ -2414,11 +2544,11 @@ class AsyncPipelinesClient:
|
|
|
2414
2544
|
Update a file for a pipeline.
|
|
2415
2545
|
|
|
2416
2546
|
Parameters:
|
|
2417
|
-
- pipeline_id: str.
|
|
2418
|
-
|
|
2419
2547
|
- file_id: str.
|
|
2420
2548
|
|
|
2421
|
-
-
|
|
2549
|
+
- pipeline_id: str.
|
|
2550
|
+
|
|
2551
|
+
- custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[PipelineFileUpdateCustomMetadataValue]]]. Custom metadata for the file
|
|
2422
2552
|
---
|
|
2423
2553
|
from llama_cloud.client import AsyncLlamaCloud
|
|
2424
2554
|
|
|
@@ -2426,8 +2556,8 @@ class AsyncPipelinesClient:
|
|
|
2426
2556
|
token="YOUR_TOKEN",
|
|
2427
2557
|
)
|
|
2428
2558
|
await client.pipelines.update_pipeline_file(
|
|
2429
|
-
|
|
2430
|
-
|
|
2559
|
+
file_id="file_id",
|
|
2560
|
+
pipeline_id="pipeline_id",
|
|
2431
2561
|
)
|
|
2432
2562
|
"""
|
|
2433
2563
|
_request: typing.Dict[str, typing.Any] = {}
|
|
@@ -2452,14 +2582,14 @@ class AsyncPipelinesClient:
|
|
|
2452
2582
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2453
2583
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2454
2584
|
|
|
2455
|
-
async def delete_pipeline_file(self,
|
|
2585
|
+
async def delete_pipeline_file(self, file_id: str, pipeline_id: str) -> None:
|
|
2456
2586
|
"""
|
|
2457
2587
|
Delete a file from a pipeline.
|
|
2458
2588
|
|
|
2459
2589
|
Parameters:
|
|
2460
|
-
- pipeline_id: str.
|
|
2461
|
-
|
|
2462
2590
|
- file_id: str.
|
|
2591
|
+
|
|
2592
|
+
- pipeline_id: str.
|
|
2463
2593
|
---
|
|
2464
2594
|
from llama_cloud.client import AsyncLlamaCloud
|
|
2465
2595
|
|
|
@@ -2467,8 +2597,8 @@ class AsyncPipelinesClient:
|
|
|
2467
2597
|
token="YOUR_TOKEN",
|
|
2468
2598
|
)
|
|
2469
2599
|
await client.pipelines.delete_pipeline_file(
|
|
2470
|
-
|
|
2471
|
-
|
|
2600
|
+
file_id="file_id",
|
|
2601
|
+
pipeline_id="pipeline_id",
|
|
2472
2602
|
)
|
|
2473
2603
|
"""
|
|
2474
2604
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -2497,6 +2627,15 @@ class AsyncPipelinesClient:
|
|
|
2497
2627
|
- pipeline_id: str.
|
|
2498
2628
|
|
|
2499
2629
|
- upload_file: typing.IO.
|
|
2630
|
+
---
|
|
2631
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
2632
|
+
|
|
2633
|
+
client = AsyncLlamaCloud(
|
|
2634
|
+
token="YOUR_TOKEN",
|
|
2635
|
+
)
|
|
2636
|
+
await client.pipelines.import_pipeline_metadata(
|
|
2637
|
+
pipeline_id="pipeline_id",
|
|
2638
|
+
)
|
|
2500
2639
|
"""
|
|
2501
2640
|
_response = await self._client_wrapper.httpx_client.request(
|
|
2502
2641
|
"PUT",
|
|
@@ -2529,7 +2668,7 @@ class AsyncPipelinesClient:
|
|
|
2529
2668
|
token="YOUR_TOKEN",
|
|
2530
2669
|
)
|
|
2531
2670
|
await client.pipelines.delete_pipeline_files_metadata(
|
|
2532
|
-
pipeline_id="
|
|
2671
|
+
pipeline_id="pipeline_id",
|
|
2533
2672
|
)
|
|
2534
2673
|
"""
|
|
2535
2674
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -2561,7 +2700,7 @@ class AsyncPipelinesClient:
|
|
|
2561
2700
|
token="YOUR_TOKEN",
|
|
2562
2701
|
)
|
|
2563
2702
|
await client.pipelines.list_pipeline_data_sources(
|
|
2564
|
-
pipeline_id="
|
|
2703
|
+
pipeline_id="pipeline_id",
|
|
2565
2704
|
)
|
|
2566
2705
|
"""
|
|
2567
2706
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -2593,14 +2732,19 @@ class AsyncPipelinesClient:
|
|
|
2593
2732
|
|
|
2594
2733
|
- request: typing.List[PipelineDataSourceCreate].
|
|
2595
2734
|
---
|
|
2735
|
+
from llama_cloud import PipelineDataSourceCreate
|
|
2596
2736
|
from llama_cloud.client import AsyncLlamaCloud
|
|
2597
2737
|
|
|
2598
2738
|
client = AsyncLlamaCloud(
|
|
2599
2739
|
token="YOUR_TOKEN",
|
|
2600
2740
|
)
|
|
2601
2741
|
await client.pipelines.add_data_sources_to_pipeline(
|
|
2602
|
-
pipeline_id="
|
|
2603
|
-
request=[
|
|
2742
|
+
pipeline_id="pipeline_id",
|
|
2743
|
+
request=[
|
|
2744
|
+
PipelineDataSourceCreate(
|
|
2745
|
+
data_source_id="data_source_id",
|
|
2746
|
+
)
|
|
2747
|
+
],
|
|
2604
2748
|
)
|
|
2605
2749
|
"""
|
|
2606
2750
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -2623,17 +2767,17 @@ class AsyncPipelinesClient:
|
|
|
2623
2767
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2624
2768
|
|
|
2625
2769
|
async def update_pipeline_data_source(
|
|
2626
|
-
self,
|
|
2770
|
+
self, data_source_id: str, pipeline_id: str, *, sync_interval: typing.Optional[float] = OMIT
|
|
2627
2771
|
) -> PipelineDataSource:
|
|
2628
2772
|
"""
|
|
2629
2773
|
Update the configuration of a data source in a pipeline.
|
|
2630
2774
|
|
|
2631
2775
|
Parameters:
|
|
2632
|
-
- pipeline_id: str.
|
|
2633
|
-
|
|
2634
2776
|
- data_source_id: str.
|
|
2635
2777
|
|
|
2636
|
-
-
|
|
2778
|
+
- pipeline_id: str.
|
|
2779
|
+
|
|
2780
|
+
- sync_interval: typing.Optional[float]. The interval at which the data source should be synced.
|
|
2637
2781
|
---
|
|
2638
2782
|
from llama_cloud.client import AsyncLlamaCloud
|
|
2639
2783
|
|
|
@@ -2641,8 +2785,8 @@ class AsyncPipelinesClient:
|
|
|
2641
2785
|
token="YOUR_TOKEN",
|
|
2642
2786
|
)
|
|
2643
2787
|
await client.pipelines.update_pipeline_data_source(
|
|
2644
|
-
|
|
2645
|
-
|
|
2788
|
+
data_source_id="data_source_id",
|
|
2789
|
+
pipeline_id="pipeline_id",
|
|
2646
2790
|
)
|
|
2647
2791
|
"""
|
|
2648
2792
|
_request: typing.Dict[str, typing.Any] = {}
|
|
@@ -2668,14 +2812,14 @@ class AsyncPipelinesClient:
|
|
|
2668
2812
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2669
2813
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2670
2814
|
|
|
2671
|
-
async def delete_pipeline_data_source(self,
|
|
2815
|
+
async def delete_pipeline_data_source(self, data_source_id: str, pipeline_id: str) -> None:
|
|
2672
2816
|
"""
|
|
2673
2817
|
Delete a data source from a pipeline.
|
|
2674
2818
|
|
|
2675
2819
|
Parameters:
|
|
2676
|
-
- pipeline_id: str.
|
|
2677
|
-
|
|
2678
2820
|
- data_source_id: str.
|
|
2821
|
+
|
|
2822
|
+
- pipeline_id: str.
|
|
2679
2823
|
---
|
|
2680
2824
|
from llama_cloud.client import AsyncLlamaCloud
|
|
2681
2825
|
|
|
@@ -2683,8 +2827,8 @@ class AsyncPipelinesClient:
|
|
|
2683
2827
|
token="YOUR_TOKEN",
|
|
2684
2828
|
)
|
|
2685
2829
|
await client.pipelines.delete_pipeline_data_source(
|
|
2686
|
-
|
|
2687
|
-
|
|
2830
|
+
data_source_id="data_source_id",
|
|
2831
|
+
pipeline_id="pipeline_id",
|
|
2688
2832
|
)
|
|
2689
2833
|
"""
|
|
2690
2834
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -2706,14 +2850,24 @@ class AsyncPipelinesClient:
|
|
|
2706
2850
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2707
2851
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2708
2852
|
|
|
2709
|
-
async def sync_pipeline_data_source(self,
|
|
2853
|
+
async def sync_pipeline_data_source(self, data_source_id: str, pipeline_id: str) -> Pipeline:
|
|
2710
2854
|
"""
|
|
2711
2855
|
Run ingestion for the pipeline data source by incrementally updating the data-sink with upstream changes from data-source.
|
|
2712
2856
|
|
|
2713
2857
|
Parameters:
|
|
2858
|
+
- data_source_id: str.
|
|
2859
|
+
|
|
2714
2860
|
- pipeline_id: str.
|
|
2861
|
+
---
|
|
2862
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
2715
2863
|
|
|
2716
|
-
|
|
2864
|
+
client = AsyncLlamaCloud(
|
|
2865
|
+
token="YOUR_TOKEN",
|
|
2866
|
+
)
|
|
2867
|
+
await client.pipelines.sync_pipeline_data_source(
|
|
2868
|
+
data_source_id="data_source_id",
|
|
2869
|
+
pipeline_id="pipeline_id",
|
|
2870
|
+
)
|
|
2717
2871
|
"""
|
|
2718
2872
|
_response = await self._client_wrapper.httpx_client.request(
|
|
2719
2873
|
"POST",
|
|
@@ -2735,15 +2889,15 @@ class AsyncPipelinesClient:
|
|
|
2735
2889
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2736
2890
|
|
|
2737
2891
|
async def get_pipeline_data_source_status(
|
|
2738
|
-
self,
|
|
2892
|
+
self, data_source_id: str, pipeline_id: str
|
|
2739
2893
|
) -> ManagedIngestionStatusResponse:
|
|
2740
2894
|
"""
|
|
2741
2895
|
Get the status of a data source for a pipeline.
|
|
2742
2896
|
|
|
2743
2897
|
Parameters:
|
|
2744
|
-
- pipeline_id: str.
|
|
2745
|
-
|
|
2746
2898
|
- data_source_id: str.
|
|
2899
|
+
|
|
2900
|
+
- pipeline_id: str.
|
|
2747
2901
|
---
|
|
2748
2902
|
from llama_cloud.client import AsyncLlamaCloud
|
|
2749
2903
|
|
|
@@ -2751,8 +2905,8 @@ class AsyncPipelinesClient:
|
|
|
2751
2905
|
token="YOUR_TOKEN",
|
|
2752
2906
|
)
|
|
2753
2907
|
await client.pipelines.get_pipeline_data_source_status(
|
|
2754
|
-
|
|
2755
|
-
|
|
2908
|
+
data_source_id="data_source_id",
|
|
2909
|
+
pipeline_id="pipeline_id",
|
|
2756
2910
|
)
|
|
2757
2911
|
"""
|
|
2758
2912
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -2779,6 +2933,7 @@ class AsyncPipelinesClient:
|
|
|
2779
2933
|
pipeline_id: str,
|
|
2780
2934
|
*,
|
|
2781
2935
|
dense_similarity_top_k: typing.Optional[int] = OMIT,
|
|
2936
|
+
dense_similarity_cutoff: typing.Optional[float] = OMIT,
|
|
2782
2937
|
sparse_similarity_top_k: typing.Optional[int] = OMIT,
|
|
2783
2938
|
enable_reranking: typing.Optional[bool] = OMIT,
|
|
2784
2939
|
rerank_top_n: typing.Optional[int] = OMIT,
|
|
@@ -2796,19 +2951,21 @@ class AsyncPipelinesClient:
|
|
|
2796
2951
|
Parameters:
|
|
2797
2952
|
- pipeline_id: str.
|
|
2798
2953
|
|
|
2799
|
-
- dense_similarity_top_k: typing.Optional[int].
|
|
2954
|
+
- dense_similarity_top_k: typing.Optional[int]. Number of nodes for dense retrieval.
|
|
2800
2955
|
|
|
2801
|
-
-
|
|
2956
|
+
- dense_similarity_cutoff: typing.Optional[float]. Minimum similarity score wrt query for retrieval
|
|
2802
2957
|
|
|
2803
|
-
-
|
|
2958
|
+
- sparse_similarity_top_k: typing.Optional[int]. Number of nodes for sparse retrieval.
|
|
2804
2959
|
|
|
2805
|
-
-
|
|
2960
|
+
- enable_reranking: typing.Optional[bool]. Enable reranking for retrieval
|
|
2806
2961
|
|
|
2807
|
-
-
|
|
2962
|
+
- rerank_top_n: typing.Optional[int]. Number of reranked nodes for returning.
|
|
2808
2963
|
|
|
2809
|
-
-
|
|
2964
|
+
- alpha: typing.Optional[float]. Alpha value for hybrid retrieval to determine the weights between dense and sparse retrieval. 0 is sparse retrieval and 1 is dense retrieval.
|
|
2810
2965
|
|
|
2811
|
-
-
|
|
2966
|
+
- search_filters: typing.Optional[MetadataFilters]. Search filters for retrieval.
|
|
2967
|
+
|
|
2968
|
+
- files_top_k: typing.Optional[int]. Number of files to retrieve (only for retrieval mode files_via_metadata and files_via_content).
|
|
2812
2969
|
|
|
2813
2970
|
- retrieval_mode: typing.Optional[RetrievalMode]. The retrieval mode for the query.
|
|
2814
2971
|
|
|
@@ -2818,25 +2975,21 @@ class AsyncPipelinesClient:
|
|
|
2818
2975
|
|
|
2819
2976
|
- class_name: typing.Optional[str].
|
|
2820
2977
|
---
|
|
2821
|
-
from llama_cloud import FilterCondition, MetadataFilters, RetrievalMode
|
|
2822
2978
|
from llama_cloud.client import AsyncLlamaCloud
|
|
2823
2979
|
|
|
2824
2980
|
client = AsyncLlamaCloud(
|
|
2825
2981
|
token="YOUR_TOKEN",
|
|
2826
2982
|
)
|
|
2827
2983
|
await client.pipelines.run_search(
|
|
2828
|
-
pipeline_id="
|
|
2829
|
-
|
|
2830
|
-
filters=[],
|
|
2831
|
-
condition=FilterCondition.AND,
|
|
2832
|
-
),
|
|
2833
|
-
retrieval_mode=RetrievalMode.CHUNKS,
|
|
2834
|
-
query="string",
|
|
2984
|
+
pipeline_id="pipeline_id",
|
|
2985
|
+
query="query",
|
|
2835
2986
|
)
|
|
2836
2987
|
"""
|
|
2837
2988
|
_request: typing.Dict[str, typing.Any] = {"query": query}
|
|
2838
2989
|
if dense_similarity_top_k is not OMIT:
|
|
2839
2990
|
_request["dense_similarity_top_k"] = dense_similarity_top_k
|
|
2991
|
+
if dense_similarity_cutoff is not OMIT:
|
|
2992
|
+
_request["dense_similarity_cutoff"] = dense_similarity_cutoff
|
|
2840
2993
|
if sparse_similarity_top_k is not OMIT:
|
|
2841
2994
|
_request["sparse_similarity_top_k"] = sparse_similarity_top_k
|
|
2842
2995
|
if enable_reranking is not OMIT:
|
|
@@ -2885,7 +3038,7 @@ class AsyncPipelinesClient:
|
|
|
2885
3038
|
token="YOUR_TOKEN",
|
|
2886
3039
|
)
|
|
2887
3040
|
await client.pipelines.list_pipeline_jobs(
|
|
2888
|
-
pipeline_id="
|
|
3041
|
+
pipeline_id="pipeline_id",
|
|
2889
3042
|
)
|
|
2890
3043
|
"""
|
|
2891
3044
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -2904,14 +3057,14 @@ class AsyncPipelinesClient:
|
|
|
2904
3057
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2905
3058
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2906
3059
|
|
|
2907
|
-
async def get_pipeline_job(self,
|
|
3060
|
+
async def get_pipeline_job(self, job_id: str, pipeline_id: str) -> PipelineDeployment:
|
|
2908
3061
|
"""
|
|
2909
3062
|
Get a job for a pipeline.
|
|
2910
3063
|
|
|
2911
3064
|
Parameters:
|
|
2912
|
-
- pipeline_id: str.
|
|
2913
|
-
|
|
2914
3065
|
- job_id: str.
|
|
3066
|
+
|
|
3067
|
+
- pipeline_id: str.
|
|
2915
3068
|
---
|
|
2916
3069
|
from llama_cloud.client import AsyncLlamaCloud
|
|
2917
3070
|
|
|
@@ -2919,8 +3072,8 @@ class AsyncPipelinesClient:
|
|
|
2919
3072
|
token="YOUR_TOKEN",
|
|
2920
3073
|
)
|
|
2921
3074
|
await client.pipelines.get_pipeline_job(
|
|
2922
|
-
|
|
2923
|
-
|
|
3075
|
+
job_id="job_id",
|
|
3076
|
+
pipeline_id="pipeline_id",
|
|
2924
3077
|
)
|
|
2925
3078
|
"""
|
|
2926
3079
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -2954,7 +3107,7 @@ class AsyncPipelinesClient:
|
|
|
2954
3107
|
token="YOUR_TOKEN",
|
|
2955
3108
|
)
|
|
2956
3109
|
await client.pipelines.get_playground_session(
|
|
2957
|
-
pipeline_id="
|
|
3110
|
+
pipeline_id="pipeline_id",
|
|
2958
3111
|
)
|
|
2959
3112
|
"""
|
|
2960
3113
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -2995,34 +3148,13 @@ class AsyncPipelinesClient:
|
|
|
2995
3148
|
|
|
2996
3149
|
- class_name: typing.Optional[str].
|
|
2997
3150
|
---
|
|
2998
|
-
from llama_cloud import (
|
|
2999
|
-
ChatData,
|
|
3000
|
-
FilterCondition,
|
|
3001
|
-
LlmParameters,
|
|
3002
|
-
MetadataFilters,
|
|
3003
|
-
PresetRetrievalParams,
|
|
3004
|
-
RetrievalMode,
|
|
3005
|
-
SupportedLlmModelNames,
|
|
3006
|
-
)
|
|
3007
3151
|
from llama_cloud.client import AsyncLlamaCloud
|
|
3008
3152
|
|
|
3009
3153
|
client = AsyncLlamaCloud(
|
|
3010
3154
|
token="YOUR_TOKEN",
|
|
3011
3155
|
)
|
|
3012
3156
|
await client.pipelines.chat(
|
|
3013
|
-
pipeline_id="
|
|
3014
|
-
data=ChatData(
|
|
3015
|
-
retrieval_parameters=PresetRetrievalParams(
|
|
3016
|
-
search_filters=MetadataFilters(
|
|
3017
|
-
filters=[],
|
|
3018
|
-
condition=FilterCondition.AND,
|
|
3019
|
-
),
|
|
3020
|
-
retrieval_mode=RetrievalMode.CHUNKS,
|
|
3021
|
-
),
|
|
3022
|
-
llm_parameters=LlmParameters(
|
|
3023
|
-
model_name=SupportedLlmModelNames.GPT_3_5_TURBO,
|
|
3024
|
-
),
|
|
3025
|
-
),
|
|
3157
|
+
pipeline_id="pipeline_id",
|
|
3026
3158
|
)
|
|
3027
3159
|
"""
|
|
3028
3160
|
_request: typing.Dict[str, typing.Any] = {}
|
|
@@ -3078,7 +3210,7 @@ class AsyncPipelinesClient:
|
|
|
3078
3210
|
token="YOUR_TOKEN",
|
|
3079
3211
|
)
|
|
3080
3212
|
await client.pipelines.list_pipeline_documents(
|
|
3081
|
-
pipeline_id="
|
|
3213
|
+
pipeline_id="pipeline_id",
|
|
3082
3214
|
)
|
|
3083
3215
|
"""
|
|
3084
3216
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -3113,14 +3245,20 @@ class AsyncPipelinesClient:
|
|
|
3113
3245
|
|
|
3114
3246
|
- request: typing.List[CloudDocumentCreate].
|
|
3115
3247
|
---
|
|
3248
|
+
from llama_cloud import CloudDocumentCreate
|
|
3116
3249
|
from llama_cloud.client import AsyncLlamaCloud
|
|
3117
3250
|
|
|
3118
3251
|
client = AsyncLlamaCloud(
|
|
3119
3252
|
token="YOUR_TOKEN",
|
|
3120
3253
|
)
|
|
3121
3254
|
await client.pipelines.create_batch_pipeline_documents(
|
|
3122
|
-
pipeline_id="
|
|
3123
|
-
request=[
|
|
3255
|
+
pipeline_id="pipeline_id",
|
|
3256
|
+
request=[
|
|
3257
|
+
CloudDocumentCreate(
|
|
3258
|
+
text="text",
|
|
3259
|
+
metadata={"key": "value"},
|
|
3260
|
+
)
|
|
3261
|
+
],
|
|
3124
3262
|
)
|
|
3125
3263
|
"""
|
|
3126
3264
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -3153,14 +3291,20 @@ class AsyncPipelinesClient:
|
|
|
3153
3291
|
|
|
3154
3292
|
- request: typing.List[CloudDocumentCreate].
|
|
3155
3293
|
---
|
|
3294
|
+
from llama_cloud import CloudDocumentCreate
|
|
3156
3295
|
from llama_cloud.client import AsyncLlamaCloud
|
|
3157
3296
|
|
|
3158
3297
|
client = AsyncLlamaCloud(
|
|
3159
3298
|
token="YOUR_TOKEN",
|
|
3160
3299
|
)
|
|
3161
3300
|
await client.pipelines.upsert_batch_pipeline_documents(
|
|
3162
|
-
pipeline_id="
|
|
3163
|
-
request=[
|
|
3301
|
+
pipeline_id="pipeline_id",
|
|
3302
|
+
request=[
|
|
3303
|
+
CloudDocumentCreate(
|
|
3304
|
+
text="text",
|
|
3305
|
+
metadata={"key": "value"},
|
|
3306
|
+
)
|
|
3307
|
+
],
|
|
3164
3308
|
)
|
|
3165
3309
|
"""
|
|
3166
3310
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -3182,14 +3326,14 @@ class AsyncPipelinesClient:
|
|
|
3182
3326
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
3183
3327
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
3184
3328
|
|
|
3185
|
-
async def get_pipeline_document(self,
|
|
3329
|
+
async def get_pipeline_document(self, document_id: str, pipeline_id: str) -> CloudDocument:
|
|
3186
3330
|
"""
|
|
3187
3331
|
Return a single document for a pipeline.
|
|
3188
3332
|
|
|
3189
3333
|
Parameters:
|
|
3190
|
-
- pipeline_id: str.
|
|
3191
|
-
|
|
3192
3334
|
- document_id: str.
|
|
3335
|
+
|
|
3336
|
+
- pipeline_id: str.
|
|
3193
3337
|
---
|
|
3194
3338
|
from llama_cloud.client import AsyncLlamaCloud
|
|
3195
3339
|
|
|
@@ -3197,8 +3341,8 @@ class AsyncPipelinesClient:
|
|
|
3197
3341
|
token="YOUR_TOKEN",
|
|
3198
3342
|
)
|
|
3199
3343
|
await client.pipelines.get_pipeline_document(
|
|
3200
|
-
|
|
3201
|
-
|
|
3344
|
+
document_id="document_id",
|
|
3345
|
+
pipeline_id="pipeline_id",
|
|
3202
3346
|
)
|
|
3203
3347
|
"""
|
|
3204
3348
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -3219,14 +3363,14 @@ class AsyncPipelinesClient:
|
|
|
3219
3363
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
3220
3364
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
3221
3365
|
|
|
3222
|
-
async def delete_pipeline_document(self,
|
|
3366
|
+
async def delete_pipeline_document(self, document_id: str, pipeline_id: str) -> None:
|
|
3223
3367
|
"""
|
|
3224
3368
|
Delete a document for a pipeline.
|
|
3225
3369
|
|
|
3226
3370
|
Parameters:
|
|
3227
|
-
- pipeline_id: str.
|
|
3228
|
-
|
|
3229
3371
|
- document_id: str.
|
|
3372
|
+
|
|
3373
|
+
- pipeline_id: str.
|
|
3230
3374
|
---
|
|
3231
3375
|
from llama_cloud.client import AsyncLlamaCloud
|
|
3232
3376
|
|
|
@@ -3234,8 +3378,8 @@ class AsyncPipelinesClient:
|
|
|
3234
3378
|
token="YOUR_TOKEN",
|
|
3235
3379
|
)
|
|
3236
3380
|
await client.pipelines.delete_pipeline_document(
|
|
3237
|
-
|
|
3238
|
-
|
|
3381
|
+
document_id="document_id",
|
|
3382
|
+
pipeline_id="pipeline_id",
|
|
3239
3383
|
)
|
|
3240
3384
|
"""
|
|
3241
3385
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -3256,14 +3400,14 @@ class AsyncPipelinesClient:
|
|
|
3256
3400
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
3257
3401
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
3258
3402
|
|
|
3259
|
-
async def get_pipeline_document_status(self,
|
|
3403
|
+
async def get_pipeline_document_status(self, document_id: str, pipeline_id: str) -> ManagedIngestionStatusResponse:
|
|
3260
3404
|
"""
|
|
3261
3405
|
Return a single document for a pipeline.
|
|
3262
3406
|
|
|
3263
3407
|
Parameters:
|
|
3264
|
-
- pipeline_id: str.
|
|
3265
|
-
|
|
3266
3408
|
- document_id: str.
|
|
3409
|
+
|
|
3410
|
+
- pipeline_id: str.
|
|
3267
3411
|
---
|
|
3268
3412
|
from llama_cloud.client import AsyncLlamaCloud
|
|
3269
3413
|
|
|
@@ -3271,8 +3415,8 @@ class AsyncPipelinesClient:
|
|
|
3271
3415
|
token="YOUR_TOKEN",
|
|
3272
3416
|
)
|
|
3273
3417
|
await client.pipelines.get_pipeline_document_status(
|
|
3274
|
-
|
|
3275
|
-
|
|
3418
|
+
document_id="document_id",
|
|
3419
|
+
pipeline_id="pipeline_id",
|
|
3276
3420
|
)
|
|
3277
3421
|
"""
|
|
3278
3422
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -3294,14 +3438,14 @@ class AsyncPipelinesClient:
|
|
|
3294
3438
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
3295
3439
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
3296
3440
|
|
|
3297
|
-
async def list_pipeline_document_chunks(self,
|
|
3441
|
+
async def list_pipeline_document_chunks(self, document_id: str, pipeline_id: str) -> typing.List[TextNode]:
|
|
3298
3442
|
"""
|
|
3299
3443
|
Return a list of chunks for a pipeline document.
|
|
3300
3444
|
|
|
3301
3445
|
Parameters:
|
|
3302
|
-
- pipeline_id: str.
|
|
3303
|
-
|
|
3304
3446
|
- document_id: str.
|
|
3447
|
+
|
|
3448
|
+
- pipeline_id: str.
|
|
3305
3449
|
---
|
|
3306
3450
|
from llama_cloud.client import AsyncLlamaCloud
|
|
3307
3451
|
|
|
@@ -3309,8 +3453,8 @@ class AsyncPipelinesClient:
|
|
|
3309
3453
|
token="YOUR_TOKEN",
|
|
3310
3454
|
)
|
|
3311
3455
|
await client.pipelines.list_pipeline_document_chunks(
|
|
3312
|
-
|
|
3313
|
-
|
|
3456
|
+
document_id="document_id",
|
|
3457
|
+
pipeline_id="pipeline_id",
|
|
3314
3458
|
)
|
|
3315
3459
|
"""
|
|
3316
3460
|
_response = await self._client_wrapper.httpx_client.request(
|