llama-cloud 0.1.6__py3-none-any.whl → 0.1.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of llama-cloud might be problematic. Click here for more details.
- llama_cloud/__init__.py +140 -6
- llama_cloud/client.py +15 -0
- llama_cloud/environment.py +1 -1
- llama_cloud/resources/__init__.py +15 -0
- llama_cloud/{types/token.py → resources/chat_apps/__init__.py} +0 -3
- llama_cloud/resources/chat_apps/client.py +630 -0
- llama_cloud/resources/data_sinks/client.py +12 -12
- llama_cloud/resources/data_sources/client.py +14 -14
- llama_cloud/resources/embedding_model_configs/client.py +20 -76
- llama_cloud/resources/evals/client.py +26 -36
- llama_cloud/resources/extraction/client.py +32 -32
- llama_cloud/resources/files/client.py +40 -44
- llama_cloud/resources/jobs/__init__.py +2 -0
- llama_cloud/resources/jobs/client.py +148 -0
- llama_cloud/resources/llama_extract/__init__.py +5 -0
- llama_cloud/resources/llama_extract/client.py +1038 -0
- llama_cloud/resources/llama_extract/types/__init__.py +6 -0
- llama_cloud/resources/llama_extract/types/extract_agent_create_data_schema_value.py +7 -0
- llama_cloud/resources/llama_extract/types/extract_agent_update_data_schema_value.py +7 -0
- llama_cloud/resources/organizations/client.py +66 -70
- llama_cloud/resources/parsing/client.py +448 -428
- llama_cloud/resources/pipelines/client.py +256 -344
- llama_cloud/resources/projects/client.py +34 -60
- llama_cloud/resources/reports/__init__.py +5 -0
- llama_cloud/resources/reports/client.py +1198 -0
- llama_cloud/resources/reports/types/__init__.py +7 -0
- llama_cloud/resources/reports/types/update_report_plan_api_v_1_reports_report_id_plan_patch_request_action.py +25 -0
- llama_cloud/resources/retrievers/__init__.py +2 -0
- llama_cloud/resources/retrievers/client.py +654 -0
- llama_cloud/types/__init__.py +128 -6
- llama_cloud/types/{chat_message.py → app_schema_chat_chat_message.py} +3 -3
- llama_cloud/types/azure_open_ai_embedding.py +6 -12
- llama_cloud/types/base_prompt_template.py +2 -6
- llama_cloud/types/bedrock_embedding.py +6 -12
- llama_cloud/types/character_splitter.py +2 -4
- llama_cloud/types/chat_app.py +44 -0
- llama_cloud/types/chat_app_response.py +41 -0
- llama_cloud/types/cloud_az_storage_blob_data_source.py +7 -15
- llama_cloud/types/cloud_box_data_source.py +6 -12
- llama_cloud/types/cloud_confluence_data_source.py +6 -6
- llama_cloud/types/cloud_document.py +1 -3
- llama_cloud/types/cloud_document_create.py +1 -3
- llama_cloud/types/cloud_jira_data_source.py +4 -6
- llama_cloud/types/cloud_notion_page_data_source.py +2 -2
- llama_cloud/types/cloud_one_drive_data_source.py +3 -5
- llama_cloud/types/cloud_postgres_vector_store.py +1 -0
- llama_cloud/types/cloud_s_3_data_source.py +4 -8
- llama_cloud/types/cloud_sharepoint_data_source.py +6 -8
- llama_cloud/types/cloud_slack_data_source.py +6 -6
- llama_cloud/types/code_splitter.py +1 -1
- llama_cloud/types/cohere_embedding.py +3 -7
- llama_cloud/types/composite_retrieval_mode.py +21 -0
- llama_cloud/types/composite_retrieval_result.py +38 -0
- llama_cloud/types/composite_retrieved_text_node.py +42 -0
- llama_cloud/types/data_sink.py +4 -4
- llama_cloud/types/data_sink_component.py +20 -0
- llama_cloud/types/data_source.py +5 -7
- llama_cloud/types/data_source_component.py +28 -0
- llama_cloud/types/data_source_create.py +1 -3
- llama_cloud/types/edit_suggestion.py +39 -0
- llama_cloud/types/embedding_model_config.py +2 -2
- llama_cloud/types/embedding_model_config_update.py +2 -4
- llama_cloud/types/eval_dataset.py +2 -2
- llama_cloud/types/eval_dataset_job_record.py +8 -13
- llama_cloud/types/eval_execution_params_override.py +2 -6
- llama_cloud/types/eval_question.py +2 -2
- llama_cloud/types/extract_agent.py +45 -0
- llama_cloud/types/extract_agent_data_schema_value.py +5 -0
- llama_cloud/types/extract_config.py +40 -0
- llama_cloud/types/extract_job.py +35 -0
- llama_cloud/types/extract_job_create.py +40 -0
- llama_cloud/types/extract_job_create_data_schema_override_value.py +7 -0
- llama_cloud/types/extract_mode.py +17 -0
- llama_cloud/types/extract_resultset.py +46 -0
- llama_cloud/types/extract_resultset_data.py +11 -0
- llama_cloud/types/extract_resultset_data_item_value.py +7 -0
- llama_cloud/types/extract_resultset_data_zero_value.py +7 -0
- llama_cloud/types/extract_resultset_extraction_metadata_value.py +7 -0
- llama_cloud/types/extraction_result.py +2 -2
- llama_cloud/types/extraction_schema.py +3 -5
- llama_cloud/types/file.py +9 -14
- llama_cloud/types/filter_condition.py +9 -1
- llama_cloud/types/filter_operator.py +6 -2
- llama_cloud/types/gemini_embedding.py +6 -10
- llama_cloud/types/hugging_face_inference_api_embedding.py +11 -27
- llama_cloud/types/hugging_face_inference_api_embedding_token.py +5 -0
- llama_cloud/types/image_block.py +35 -0
- llama_cloud/types/input_message.py +2 -4
- llama_cloud/types/job_names.py +89 -0
- llama_cloud/types/job_record.py +57 -0
- llama_cloud/types/job_record_with_usage_metrics.py +36 -0
- llama_cloud/types/llama_index_core_base_llms_types_chat_message.py +39 -0
- llama_cloud/types/llama_index_core_base_llms_types_chat_message_blocks_item.py +33 -0
- llama_cloud/types/llama_parse_parameters.py +4 -0
- llama_cloud/types/llm.py +3 -4
- llama_cloud/types/llm_model_data.py +1 -0
- llama_cloud/types/llm_parameters.py +3 -5
- llama_cloud/types/local_eval.py +8 -10
- llama_cloud/types/local_eval_results.py +1 -1
- llama_cloud/types/managed_ingestion_status.py +4 -0
- llama_cloud/types/managed_ingestion_status_response.py +4 -5
- llama_cloud/types/markdown_element_node_parser.py +3 -5
- llama_cloud/types/markdown_node_parser.py +1 -1
- llama_cloud/types/metadata_filter.py +2 -2
- llama_cloud/types/metadata_filter_value.py +5 -0
- llama_cloud/types/metric_result.py +3 -3
- llama_cloud/types/node_parser.py +1 -1
- llama_cloud/types/object_type.py +4 -0
- llama_cloud/types/open_ai_embedding.py +6 -12
- llama_cloud/types/organization.py +7 -2
- llama_cloud/types/page_splitter_node_parser.py +2 -2
- llama_cloud/types/paginated_jobs_history_with_metrics.py +35 -0
- llama_cloud/types/paginated_report_response.py +35 -0
- llama_cloud/types/parse_plan_level.py +21 -0
- llama_cloud/types/permission.py +3 -3
- llama_cloud/types/pipeline.py +7 -17
- llama_cloud/types/pipeline_configuration_hashes.py +3 -3
- llama_cloud/types/pipeline_create.py +8 -16
- llama_cloud/types/pipeline_data_source.py +7 -13
- llama_cloud/types/pipeline_data_source_component.py +28 -0
- llama_cloud/types/pipeline_data_source_create.py +1 -3
- llama_cloud/types/pipeline_deployment.py +4 -4
- llama_cloud/types/pipeline_file.py +13 -24
- llama_cloud/types/pipeline_file_create.py +1 -3
- llama_cloud/types/playground_session.py +4 -4
- llama_cloud/types/preset_retrieval_params.py +8 -14
- llama_cloud/types/presigned_url.py +1 -3
- llama_cloud/types/progress_event.py +44 -0
- llama_cloud/types/progress_event_status.py +33 -0
- llama_cloud/types/project.py +2 -2
- llama_cloud/types/prompt_mixin_prompts.py +1 -1
- llama_cloud/types/prompt_spec.py +3 -5
- llama_cloud/types/related_node_info.py +2 -2
- llama_cloud/types/related_node_info_node_type.py +7 -0
- llama_cloud/types/report.py +33 -0
- llama_cloud/types/report_block.py +34 -0
- llama_cloud/types/report_block_dependency.py +29 -0
- llama_cloud/types/report_create_response.py +31 -0
- llama_cloud/types/report_event_item.py +40 -0
- llama_cloud/types/report_event_item_event_data.py +45 -0
- llama_cloud/types/report_event_type.py +37 -0
- llama_cloud/types/report_metadata.py +43 -0
- llama_cloud/types/report_plan.py +36 -0
- llama_cloud/types/report_plan_block.py +36 -0
- llama_cloud/types/report_query.py +33 -0
- llama_cloud/types/report_response.py +41 -0
- llama_cloud/types/report_state.py +37 -0
- llama_cloud/types/report_state_event.py +38 -0
- llama_cloud/types/report_update_event.py +38 -0
- llama_cloud/types/retrieve_results.py +1 -1
- llama_cloud/types/retriever.py +45 -0
- llama_cloud/types/retriever_create.py +37 -0
- llama_cloud/types/retriever_pipeline.py +37 -0
- llama_cloud/types/role.py +3 -3
- llama_cloud/types/sentence_splitter.py +2 -4
- llama_cloud/types/status_enum.py +4 -0
- llama_cloud/types/supported_llm_model_names.py +4 -0
- llama_cloud/types/text_block.py +31 -0
- llama_cloud/types/text_node.py +15 -8
- llama_cloud/types/token_text_splitter.py +1 -1
- llama_cloud/types/usage_metric_response.py +34 -0
- llama_cloud/types/user_job_record.py +32 -0
- llama_cloud/types/user_organization.py +5 -9
- llama_cloud/types/user_organization_create.py +4 -4
- llama_cloud/types/user_organization_delete.py +2 -2
- llama_cloud/types/user_organization_role.py +2 -2
- llama_cloud/types/vertex_text_embedding.py +5 -9
- {llama_cloud-0.1.6.dist-info → llama_cloud-0.1.7.dist-info}/METADATA +2 -1
- llama_cloud-0.1.7.dist-info/RECORD +310 -0
- llama_cloud/types/value.py +0 -5
- llama_cloud-0.1.6.dist-info/RECORD +0 -241
- {llama_cloud-0.1.6.dist-info → llama_cloud-0.1.7.dist-info}/LICENSE +0 -0
- {llama_cloud-0.1.6.dist-info → llama_cloud-0.1.7.dist-info}/WHEEL +0 -0
|
@@ -80,12 +80,15 @@ class PipelinesClient:
|
|
|
80
80
|
|
|
81
81
|
- organization_id: typing.Optional[str].
|
|
82
82
|
---
|
|
83
|
+
from llama_cloud import PipelineType
|
|
83
84
|
from llama_cloud.client import LlamaCloud
|
|
84
85
|
|
|
85
86
|
client = LlamaCloud(
|
|
86
87
|
token="YOUR_TOKEN",
|
|
87
88
|
)
|
|
88
|
-
client.pipelines.search_pipelines(
|
|
89
|
+
client.pipelines.search_pipelines(
|
|
90
|
+
pipeline_type=PipelineType.PLAYGROUND,
|
|
91
|
+
)
|
|
89
92
|
"""
|
|
90
93
|
_response = self._client_wrapper.httpx_client.request(
|
|
91
94
|
"GET",
|
|
@@ -128,18 +131,6 @@ class PipelinesClient:
|
|
|
128
131
|
- organization_id: typing.Optional[str].
|
|
129
132
|
|
|
130
133
|
- request: PipelineCreate.
|
|
131
|
-
---
|
|
132
|
-
from llama_cloud import PipelineCreate
|
|
133
|
-
from llama_cloud.client import LlamaCloud
|
|
134
|
-
|
|
135
|
-
client = LlamaCloud(
|
|
136
|
-
token="YOUR_TOKEN",
|
|
137
|
-
)
|
|
138
|
-
client.pipelines.create_pipeline(
|
|
139
|
-
request=PipelineCreate(
|
|
140
|
-
name="name",
|
|
141
|
-
),
|
|
142
|
-
)
|
|
143
134
|
"""
|
|
144
135
|
_response = self._client_wrapper.httpx_client.request(
|
|
145
136
|
"POST",
|
|
@@ -176,18 +167,6 @@ class PipelinesClient:
|
|
|
176
167
|
- organization_id: typing.Optional[str].
|
|
177
168
|
|
|
178
169
|
- request: PipelineCreate.
|
|
179
|
-
---
|
|
180
|
-
from llama_cloud import PipelineCreate
|
|
181
|
-
from llama_cloud.client import LlamaCloud
|
|
182
|
-
|
|
183
|
-
client = LlamaCloud(
|
|
184
|
-
token="YOUR_TOKEN",
|
|
185
|
-
)
|
|
186
|
-
client.pipelines.upsert_pipeline(
|
|
187
|
-
request=PipelineCreate(
|
|
188
|
-
name="name",
|
|
189
|
-
),
|
|
190
|
-
)
|
|
191
170
|
"""
|
|
192
171
|
_response = self._client_wrapper.httpx_client.request(
|
|
193
172
|
"PUT",
|
|
@@ -213,15 +192,6 @@ class PipelinesClient:
|
|
|
213
192
|
|
|
214
193
|
Parameters:
|
|
215
194
|
- pipeline_id: str.
|
|
216
|
-
---
|
|
217
|
-
from llama_cloud.client import LlamaCloud
|
|
218
|
-
|
|
219
|
-
client = LlamaCloud(
|
|
220
|
-
token="YOUR_TOKEN",
|
|
221
|
-
)
|
|
222
|
-
client.pipelines.get_pipeline(
|
|
223
|
-
pipeline_id="pipeline_id",
|
|
224
|
-
)
|
|
225
195
|
"""
|
|
226
196
|
_response = self._client_wrapper.httpx_client.request(
|
|
227
197
|
"GET",
|
|
@@ -265,32 +235,23 @@ class PipelinesClient:
|
|
|
265
235
|
|
|
266
236
|
- transform_config: typing.Optional[PipelineUpdateTransformConfig]. Configuration for the transformation.
|
|
267
237
|
|
|
268
|
-
- configured_transformations: typing.Optional[typing.List[ConfiguredTransformationItem]].
|
|
238
|
+
- configured_transformations: typing.Optional[typing.List[ConfiguredTransformationItem]].
|
|
269
239
|
|
|
270
|
-
- data_sink_id: typing.Optional[str].
|
|
240
|
+
- data_sink_id: typing.Optional[str].
|
|
271
241
|
|
|
272
|
-
- embedding_model_config_id: typing.Optional[str].
|
|
242
|
+
- embedding_model_config_id: typing.Optional[str].
|
|
273
243
|
|
|
274
|
-
- data_sink: typing.Optional[DataSinkCreate].
|
|
244
|
+
- data_sink: typing.Optional[DataSinkCreate].
|
|
275
245
|
|
|
276
|
-
- preset_retrieval_parameters: typing.Optional[PresetRetrievalParams].
|
|
246
|
+
- preset_retrieval_parameters: typing.Optional[PresetRetrievalParams].
|
|
277
247
|
|
|
278
|
-
- eval_parameters: typing.Optional[EvalExecutionParams].
|
|
248
|
+
- eval_parameters: typing.Optional[EvalExecutionParams].
|
|
279
249
|
|
|
280
|
-
- llama_parse_parameters: typing.Optional[LlamaParseParameters].
|
|
250
|
+
- llama_parse_parameters: typing.Optional[LlamaParseParameters].
|
|
281
251
|
|
|
282
252
|
- name: typing.Optional[str].
|
|
283
253
|
|
|
284
|
-
- managed_pipeline_id: typing.Optional[str].
|
|
285
|
-
---
|
|
286
|
-
from llama_cloud.client import LlamaCloud
|
|
287
|
-
|
|
288
|
-
client = LlamaCloud(
|
|
289
|
-
token="YOUR_TOKEN",
|
|
290
|
-
)
|
|
291
|
-
client.pipelines.update_existing_pipeline(
|
|
292
|
-
pipeline_id="pipeline_id",
|
|
293
|
-
)
|
|
254
|
+
- managed_pipeline_id: typing.Optional[str].
|
|
294
255
|
"""
|
|
295
256
|
_request: typing.Dict[str, typing.Any] = {}
|
|
296
257
|
if embedding_config is not OMIT:
|
|
@@ -345,7 +306,7 @@ class PipelinesClient:
|
|
|
345
306
|
token="YOUR_TOKEN",
|
|
346
307
|
)
|
|
347
308
|
client.pipelines.delete_pipeline(
|
|
348
|
-
pipeline_id="
|
|
309
|
+
pipeline_id="string",
|
|
349
310
|
)
|
|
350
311
|
"""
|
|
351
312
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -377,7 +338,7 @@ class PipelinesClient:
|
|
|
377
338
|
token="YOUR_TOKEN",
|
|
378
339
|
)
|
|
379
340
|
client.pipelines.get_pipeline_status(
|
|
380
|
-
pipeline_id="
|
|
341
|
+
pipeline_id="string",
|
|
381
342
|
)
|
|
382
343
|
"""
|
|
383
344
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -402,15 +363,6 @@ class PipelinesClient:
|
|
|
402
363
|
|
|
403
364
|
Parameters:
|
|
404
365
|
- pipeline_id: str.
|
|
405
|
-
---
|
|
406
|
-
from llama_cloud.client import LlamaCloud
|
|
407
|
-
|
|
408
|
-
client = LlamaCloud(
|
|
409
|
-
token="YOUR_TOKEN",
|
|
410
|
-
)
|
|
411
|
-
client.pipelines.sync_pipeline(
|
|
412
|
-
pipeline_id="pipeline_id",
|
|
413
|
-
)
|
|
414
366
|
"""
|
|
415
367
|
_response = self._client_wrapper.httpx_client.request(
|
|
416
368
|
"POST",
|
|
@@ -428,21 +380,35 @@ class PipelinesClient:
|
|
|
428
380
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
429
381
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
430
382
|
|
|
383
|
+
def cancel_pipeline_sync(self, pipeline_id: str) -> Pipeline:
|
|
384
|
+
"""
|
|
385
|
+
Parameters:
|
|
386
|
+
- pipeline_id: str.
|
|
387
|
+
"""
|
|
388
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
389
|
+
"POST",
|
|
390
|
+
urllib.parse.urljoin(
|
|
391
|
+
f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/sync/cancel"
|
|
392
|
+
),
|
|
393
|
+
headers=self._client_wrapper.get_headers(),
|
|
394
|
+
timeout=60,
|
|
395
|
+
)
|
|
396
|
+
if 200 <= _response.status_code < 300:
|
|
397
|
+
return pydantic.parse_obj_as(Pipeline, _response.json()) # type: ignore
|
|
398
|
+
if _response.status_code == 422:
|
|
399
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
400
|
+
try:
|
|
401
|
+
_response_json = _response.json()
|
|
402
|
+
except JSONDecodeError:
|
|
403
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
404
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
405
|
+
|
|
431
406
|
def copy_pipeline(self, pipeline_id: str) -> Pipeline:
|
|
432
407
|
"""
|
|
433
408
|
Copy a pipeline by ID.
|
|
434
409
|
|
|
435
410
|
Parameters:
|
|
436
411
|
- pipeline_id: str.
|
|
437
|
-
---
|
|
438
|
-
from llama_cloud.client import LlamaCloud
|
|
439
|
-
|
|
440
|
-
client = LlamaCloud(
|
|
441
|
-
token="YOUR_TOKEN",
|
|
442
|
-
)
|
|
443
|
-
client.pipelines.copy_pipeline(
|
|
444
|
-
pipeline_id="pipeline_id",
|
|
445
|
-
)
|
|
446
412
|
"""
|
|
447
413
|
_response = self._client_wrapper.httpx_client.request(
|
|
448
414
|
"POST",
|
|
@@ -475,8 +441,8 @@ class PipelinesClient:
|
|
|
475
441
|
token="YOUR_TOKEN",
|
|
476
442
|
)
|
|
477
443
|
client.pipelines.get_eval_dataset_executions(
|
|
478
|
-
eval_dataset_id="
|
|
479
|
-
pipeline_id="
|
|
444
|
+
eval_dataset_id="string",
|
|
445
|
+
pipeline_id="string",
|
|
480
446
|
)
|
|
481
447
|
"""
|
|
482
448
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -518,15 +484,19 @@ class PipelinesClient:
|
|
|
518
484
|
|
|
519
485
|
- params: typing.Optional[EvalExecutionParamsOverride]. The parameters for the eval execution that will override the ones set in the pipeline.
|
|
520
486
|
---
|
|
487
|
+
from llama_cloud import EvalExecutionParamsOverride, SupportedLlmModelNames
|
|
521
488
|
from llama_cloud.client import LlamaCloud
|
|
522
489
|
|
|
523
490
|
client = LlamaCloud(
|
|
524
491
|
token="YOUR_TOKEN",
|
|
525
492
|
)
|
|
526
493
|
client.pipelines.execute_eval_dataset(
|
|
527
|
-
eval_dataset_id="
|
|
528
|
-
pipeline_id="
|
|
529
|
-
eval_question_ids=[
|
|
494
|
+
eval_dataset_id="string",
|
|
495
|
+
pipeline_id="string",
|
|
496
|
+
eval_question_ids=[],
|
|
497
|
+
params=EvalExecutionParamsOverride(
|
|
498
|
+
llm_model=SupportedLlmModelNames.GPT_3_5_TURBO,
|
|
499
|
+
),
|
|
530
500
|
)
|
|
531
501
|
"""
|
|
532
502
|
_request: typing.Dict[str, typing.Any] = {"eval_question_ids": eval_question_ids}
|
|
@@ -572,8 +542,8 @@ class PipelinesClient:
|
|
|
572
542
|
token="YOUR_TOKEN",
|
|
573
543
|
)
|
|
574
544
|
client.pipelines.get_eval_dataset_execution_result(
|
|
575
|
-
eval_dataset_id="
|
|
576
|
-
pipeline_id="
|
|
545
|
+
eval_dataset_id="string",
|
|
546
|
+
pipeline_id="string",
|
|
577
547
|
)
|
|
578
548
|
"""
|
|
579
549
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -614,9 +584,9 @@ class PipelinesClient:
|
|
|
614
584
|
token="YOUR_TOKEN",
|
|
615
585
|
)
|
|
616
586
|
client.pipelines.get_eval_dataset_execution(
|
|
617
|
-
eval_dataset_id="
|
|
618
|
-
eval_dataset_execution_id="
|
|
619
|
-
pipeline_id="
|
|
587
|
+
eval_dataset_id="string",
|
|
588
|
+
eval_dataset_execution_id="string",
|
|
589
|
+
pipeline_id="string",
|
|
620
590
|
)
|
|
621
591
|
"""
|
|
622
592
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -661,7 +631,7 @@ class PipelinesClient:
|
|
|
661
631
|
token="YOUR_TOKEN",
|
|
662
632
|
)
|
|
663
633
|
client.pipelines.list_pipeline_files(
|
|
664
|
-
pipeline_id="
|
|
634
|
+
pipeline_id="string",
|
|
665
635
|
)
|
|
666
636
|
"""
|
|
667
637
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -694,19 +664,14 @@ class PipelinesClient:
|
|
|
694
664
|
|
|
695
665
|
- request: typing.List[PipelineFileCreate].
|
|
696
666
|
---
|
|
697
|
-
from llama_cloud import PipelineFileCreate
|
|
698
667
|
from llama_cloud.client import LlamaCloud
|
|
699
668
|
|
|
700
669
|
client = LlamaCloud(
|
|
701
670
|
token="YOUR_TOKEN",
|
|
702
671
|
)
|
|
703
672
|
client.pipelines.add_files_to_pipeline(
|
|
704
|
-
pipeline_id="
|
|
705
|
-
request=[
|
|
706
|
-
PipelineFileCreate(
|
|
707
|
-
file_id="file_id",
|
|
708
|
-
)
|
|
709
|
-
],
|
|
673
|
+
pipeline_id="string",
|
|
674
|
+
request=[],
|
|
710
675
|
)
|
|
711
676
|
"""
|
|
712
677
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -755,7 +720,7 @@ class PipelinesClient:
|
|
|
755
720
|
token="YOUR_TOKEN",
|
|
756
721
|
)
|
|
757
722
|
client.pipelines.list_pipeline_files_2(
|
|
758
|
-
pipeline_id="
|
|
723
|
+
pipeline_id="string",
|
|
759
724
|
)
|
|
760
725
|
"""
|
|
761
726
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -797,8 +762,8 @@ class PipelinesClient:
|
|
|
797
762
|
token="YOUR_TOKEN",
|
|
798
763
|
)
|
|
799
764
|
client.pipelines.get_pipeline_file_status(
|
|
800
|
-
file_id="
|
|
801
|
-
pipeline_id="
|
|
765
|
+
file_id="string",
|
|
766
|
+
pipeline_id="string",
|
|
802
767
|
)
|
|
803
768
|
"""
|
|
804
769
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -836,7 +801,7 @@ class PipelinesClient:
|
|
|
836
801
|
|
|
837
802
|
- pipeline_id: str.
|
|
838
803
|
|
|
839
|
-
- custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[PipelineFileUpdateCustomMetadataValue]]].
|
|
804
|
+
- custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[PipelineFileUpdateCustomMetadataValue]]].
|
|
840
805
|
---
|
|
841
806
|
from llama_cloud.client import LlamaCloud
|
|
842
807
|
|
|
@@ -844,8 +809,8 @@ class PipelinesClient:
|
|
|
844
809
|
token="YOUR_TOKEN",
|
|
845
810
|
)
|
|
846
811
|
client.pipelines.update_pipeline_file(
|
|
847
|
-
file_id="
|
|
848
|
-
pipeline_id="
|
|
812
|
+
file_id="string",
|
|
813
|
+
pipeline_id="string",
|
|
849
814
|
)
|
|
850
815
|
"""
|
|
851
816
|
_request: typing.Dict[str, typing.Any] = {}
|
|
@@ -885,8 +850,8 @@ class PipelinesClient:
|
|
|
885
850
|
token="YOUR_TOKEN",
|
|
886
851
|
)
|
|
887
852
|
client.pipelines.delete_pipeline_file(
|
|
888
|
-
file_id="
|
|
889
|
-
pipeline_id="
|
|
853
|
+
file_id="string",
|
|
854
|
+
pipeline_id="string",
|
|
890
855
|
)
|
|
891
856
|
"""
|
|
892
857
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -915,15 +880,6 @@ class PipelinesClient:
|
|
|
915
880
|
- pipeline_id: str.
|
|
916
881
|
|
|
917
882
|
- upload_file: typing.IO.
|
|
918
|
-
---
|
|
919
|
-
from llama_cloud.client import LlamaCloud
|
|
920
|
-
|
|
921
|
-
client = LlamaCloud(
|
|
922
|
-
token="YOUR_TOKEN",
|
|
923
|
-
)
|
|
924
|
-
client.pipelines.import_pipeline_metadata(
|
|
925
|
-
pipeline_id="pipeline_id",
|
|
926
|
-
)
|
|
927
883
|
"""
|
|
928
884
|
_response = self._client_wrapper.httpx_client.request(
|
|
929
885
|
"PUT",
|
|
@@ -956,7 +912,7 @@ class PipelinesClient:
|
|
|
956
912
|
token="YOUR_TOKEN",
|
|
957
913
|
)
|
|
958
914
|
client.pipelines.delete_pipeline_files_metadata(
|
|
959
|
-
pipeline_id="
|
|
915
|
+
pipeline_id="string",
|
|
960
916
|
)
|
|
961
917
|
"""
|
|
962
918
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -988,7 +944,7 @@ class PipelinesClient:
|
|
|
988
944
|
token="YOUR_TOKEN",
|
|
989
945
|
)
|
|
990
946
|
client.pipelines.list_pipeline_data_sources(
|
|
991
|
-
pipeline_id="
|
|
947
|
+
pipeline_id="string",
|
|
992
948
|
)
|
|
993
949
|
"""
|
|
994
950
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -1020,19 +976,14 @@ class PipelinesClient:
|
|
|
1020
976
|
|
|
1021
977
|
- request: typing.List[PipelineDataSourceCreate].
|
|
1022
978
|
---
|
|
1023
|
-
from llama_cloud import PipelineDataSourceCreate
|
|
1024
979
|
from llama_cloud.client import LlamaCloud
|
|
1025
980
|
|
|
1026
981
|
client = LlamaCloud(
|
|
1027
982
|
token="YOUR_TOKEN",
|
|
1028
983
|
)
|
|
1029
984
|
client.pipelines.add_data_sources_to_pipeline(
|
|
1030
|
-
pipeline_id="
|
|
1031
|
-
request=[
|
|
1032
|
-
PipelineDataSourceCreate(
|
|
1033
|
-
data_source_id="data_source_id",
|
|
1034
|
-
)
|
|
1035
|
-
],
|
|
985
|
+
pipeline_id="string",
|
|
986
|
+
request=[],
|
|
1036
987
|
)
|
|
1037
988
|
"""
|
|
1038
989
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -1065,7 +1016,7 @@ class PipelinesClient:
|
|
|
1065
1016
|
|
|
1066
1017
|
- pipeline_id: str.
|
|
1067
1018
|
|
|
1068
|
-
- sync_interval: typing.Optional[float].
|
|
1019
|
+
- sync_interval: typing.Optional[float].
|
|
1069
1020
|
---
|
|
1070
1021
|
from llama_cloud.client import LlamaCloud
|
|
1071
1022
|
|
|
@@ -1073,8 +1024,8 @@ class PipelinesClient:
|
|
|
1073
1024
|
token="YOUR_TOKEN",
|
|
1074
1025
|
)
|
|
1075
1026
|
client.pipelines.update_pipeline_data_source(
|
|
1076
|
-
data_source_id="
|
|
1077
|
-
pipeline_id="
|
|
1027
|
+
data_source_id="string",
|
|
1028
|
+
pipeline_id="string",
|
|
1078
1029
|
)
|
|
1079
1030
|
"""
|
|
1080
1031
|
_request: typing.Dict[str, typing.Any] = {}
|
|
@@ -1115,8 +1066,8 @@ class PipelinesClient:
|
|
|
1115
1066
|
token="YOUR_TOKEN",
|
|
1116
1067
|
)
|
|
1117
1068
|
client.pipelines.delete_pipeline_data_source(
|
|
1118
|
-
data_source_id="
|
|
1119
|
-
pipeline_id="
|
|
1069
|
+
data_source_id="string",
|
|
1070
|
+
pipeline_id="string",
|
|
1120
1071
|
)
|
|
1121
1072
|
"""
|
|
1122
1073
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -1146,16 +1097,6 @@ class PipelinesClient:
|
|
|
1146
1097
|
- data_source_id: str.
|
|
1147
1098
|
|
|
1148
1099
|
- pipeline_id: str.
|
|
1149
|
-
---
|
|
1150
|
-
from llama_cloud.client import LlamaCloud
|
|
1151
|
-
|
|
1152
|
-
client = LlamaCloud(
|
|
1153
|
-
token="YOUR_TOKEN",
|
|
1154
|
-
)
|
|
1155
|
-
client.pipelines.sync_pipeline_data_source(
|
|
1156
|
-
data_source_id="data_source_id",
|
|
1157
|
-
pipeline_id="pipeline_id",
|
|
1158
|
-
)
|
|
1159
1100
|
"""
|
|
1160
1101
|
_response = self._client_wrapper.httpx_client.request(
|
|
1161
1102
|
"POST",
|
|
@@ -1191,8 +1132,8 @@ class PipelinesClient:
|
|
|
1191
1132
|
token="YOUR_TOKEN",
|
|
1192
1133
|
)
|
|
1193
1134
|
client.pipelines.get_pipeline_data_source_status(
|
|
1194
|
-
data_source_id="
|
|
1195
|
-
pipeline_id="
|
|
1135
|
+
data_source_id="string",
|
|
1136
|
+
pipeline_id="string",
|
|
1196
1137
|
)
|
|
1197
1138
|
"""
|
|
1198
1139
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -1237,21 +1178,21 @@ class PipelinesClient:
|
|
|
1237
1178
|
Parameters:
|
|
1238
1179
|
- pipeline_id: str.
|
|
1239
1180
|
|
|
1240
|
-
- dense_similarity_top_k: typing.Optional[int].
|
|
1181
|
+
- dense_similarity_top_k: typing.Optional[int].
|
|
1241
1182
|
|
|
1242
|
-
- dense_similarity_cutoff: typing.Optional[float].
|
|
1183
|
+
- dense_similarity_cutoff: typing.Optional[float].
|
|
1243
1184
|
|
|
1244
|
-
- sparse_similarity_top_k: typing.Optional[int].
|
|
1185
|
+
- sparse_similarity_top_k: typing.Optional[int].
|
|
1245
1186
|
|
|
1246
|
-
- enable_reranking: typing.Optional[bool].
|
|
1187
|
+
- enable_reranking: typing.Optional[bool].
|
|
1247
1188
|
|
|
1248
|
-
- rerank_top_n: typing.Optional[int].
|
|
1189
|
+
- rerank_top_n: typing.Optional[int].
|
|
1249
1190
|
|
|
1250
|
-
- alpha: typing.Optional[float].
|
|
1191
|
+
- alpha: typing.Optional[float].
|
|
1251
1192
|
|
|
1252
|
-
- search_filters: typing.Optional[MetadataFilters].
|
|
1193
|
+
- search_filters: typing.Optional[MetadataFilters].
|
|
1253
1194
|
|
|
1254
|
-
- files_top_k: typing.Optional[int].
|
|
1195
|
+
- files_top_k: typing.Optional[int].
|
|
1255
1196
|
|
|
1256
1197
|
- retrieval_mode: typing.Optional[RetrievalMode]. The retrieval mode for the query.
|
|
1257
1198
|
|
|
@@ -1261,14 +1202,20 @@ class PipelinesClient:
|
|
|
1261
1202
|
|
|
1262
1203
|
- class_name: typing.Optional[str].
|
|
1263
1204
|
---
|
|
1205
|
+
from llama_cloud import FilterCondition, MetadataFilters, RetrievalMode
|
|
1264
1206
|
from llama_cloud.client import LlamaCloud
|
|
1265
1207
|
|
|
1266
1208
|
client = LlamaCloud(
|
|
1267
1209
|
token="YOUR_TOKEN",
|
|
1268
1210
|
)
|
|
1269
1211
|
client.pipelines.run_search(
|
|
1270
|
-
pipeline_id="
|
|
1271
|
-
|
|
1212
|
+
pipeline_id="string",
|
|
1213
|
+
search_filters=MetadataFilters(
|
|
1214
|
+
filters=[],
|
|
1215
|
+
condition=FilterCondition.AND,
|
|
1216
|
+
),
|
|
1217
|
+
retrieval_mode=RetrievalMode.CHUNKS,
|
|
1218
|
+
query="string",
|
|
1272
1219
|
)
|
|
1273
1220
|
"""
|
|
1274
1221
|
_request: typing.Dict[str, typing.Any] = {"query": query}
|
|
@@ -1324,7 +1271,7 @@ class PipelinesClient:
|
|
|
1324
1271
|
token="YOUR_TOKEN",
|
|
1325
1272
|
)
|
|
1326
1273
|
client.pipelines.list_pipeline_jobs(
|
|
1327
|
-
pipeline_id="
|
|
1274
|
+
pipeline_id="string",
|
|
1328
1275
|
)
|
|
1329
1276
|
"""
|
|
1330
1277
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -1358,8 +1305,8 @@ class PipelinesClient:
|
|
|
1358
1305
|
token="YOUR_TOKEN",
|
|
1359
1306
|
)
|
|
1360
1307
|
client.pipelines.get_pipeline_job(
|
|
1361
|
-
job_id="
|
|
1362
|
-
pipeline_id="
|
|
1308
|
+
job_id="string",
|
|
1309
|
+
pipeline_id="string",
|
|
1363
1310
|
)
|
|
1364
1311
|
"""
|
|
1365
1312
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -1393,7 +1340,7 @@ class PipelinesClient:
|
|
|
1393
1340
|
token="YOUR_TOKEN",
|
|
1394
1341
|
)
|
|
1395
1342
|
client.pipelines.get_playground_session(
|
|
1396
|
-
pipeline_id="
|
|
1343
|
+
pipeline_id="string",
|
|
1397
1344
|
)
|
|
1398
1345
|
"""
|
|
1399
1346
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -1434,13 +1381,34 @@ class PipelinesClient:
|
|
|
1434
1381
|
|
|
1435
1382
|
- class_name: typing.Optional[str].
|
|
1436
1383
|
---
|
|
1384
|
+
from llama_cloud import (
|
|
1385
|
+
ChatData,
|
|
1386
|
+
FilterCondition,
|
|
1387
|
+
LlmParameters,
|
|
1388
|
+
MetadataFilters,
|
|
1389
|
+
PresetRetrievalParams,
|
|
1390
|
+
RetrievalMode,
|
|
1391
|
+
SupportedLlmModelNames,
|
|
1392
|
+
)
|
|
1437
1393
|
from llama_cloud.client import LlamaCloud
|
|
1438
1394
|
|
|
1439
1395
|
client = LlamaCloud(
|
|
1440
1396
|
token="YOUR_TOKEN",
|
|
1441
1397
|
)
|
|
1442
1398
|
client.pipelines.chat(
|
|
1443
|
-
pipeline_id="
|
|
1399
|
+
pipeline_id="string",
|
|
1400
|
+
data=ChatData(
|
|
1401
|
+
retrieval_parameters=PresetRetrievalParams(
|
|
1402
|
+
search_filters=MetadataFilters(
|
|
1403
|
+
filters=[],
|
|
1404
|
+
condition=FilterCondition.AND,
|
|
1405
|
+
),
|
|
1406
|
+
retrieval_mode=RetrievalMode.CHUNKS,
|
|
1407
|
+
),
|
|
1408
|
+
llm_parameters=LlmParameters(
|
|
1409
|
+
model_name=SupportedLlmModelNames.GPT_3_5_TURBO,
|
|
1410
|
+
),
|
|
1411
|
+
),
|
|
1444
1412
|
)
|
|
1445
1413
|
"""
|
|
1446
1414
|
_request: typing.Dict[str, typing.Any] = {}
|
|
@@ -1496,7 +1464,7 @@ class PipelinesClient:
|
|
|
1496
1464
|
token="YOUR_TOKEN",
|
|
1497
1465
|
)
|
|
1498
1466
|
client.pipelines.list_pipeline_documents(
|
|
1499
|
-
pipeline_id="
|
|
1467
|
+
pipeline_id="string",
|
|
1500
1468
|
)
|
|
1501
1469
|
"""
|
|
1502
1470
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -1531,20 +1499,14 @@ class PipelinesClient:
|
|
|
1531
1499
|
|
|
1532
1500
|
- request: typing.List[CloudDocumentCreate].
|
|
1533
1501
|
---
|
|
1534
|
-
from llama_cloud import CloudDocumentCreate
|
|
1535
1502
|
from llama_cloud.client import LlamaCloud
|
|
1536
1503
|
|
|
1537
1504
|
client = LlamaCloud(
|
|
1538
1505
|
token="YOUR_TOKEN",
|
|
1539
1506
|
)
|
|
1540
1507
|
client.pipelines.create_batch_pipeline_documents(
|
|
1541
|
-
pipeline_id="
|
|
1542
|
-
request=[
|
|
1543
|
-
CloudDocumentCreate(
|
|
1544
|
-
text="text",
|
|
1545
|
-
metadata={"key": "value"},
|
|
1546
|
-
)
|
|
1547
|
-
],
|
|
1508
|
+
pipeline_id="string",
|
|
1509
|
+
request=[],
|
|
1548
1510
|
)
|
|
1549
1511
|
"""
|
|
1550
1512
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -1577,20 +1539,14 @@ class PipelinesClient:
|
|
|
1577
1539
|
|
|
1578
1540
|
- request: typing.List[CloudDocumentCreate].
|
|
1579
1541
|
---
|
|
1580
|
-
from llama_cloud import CloudDocumentCreate
|
|
1581
1542
|
from llama_cloud.client import LlamaCloud
|
|
1582
1543
|
|
|
1583
1544
|
client = LlamaCloud(
|
|
1584
1545
|
token="YOUR_TOKEN",
|
|
1585
1546
|
)
|
|
1586
1547
|
client.pipelines.upsert_batch_pipeline_documents(
|
|
1587
|
-
pipeline_id="
|
|
1588
|
-
request=[
|
|
1589
|
-
CloudDocumentCreate(
|
|
1590
|
-
text="text",
|
|
1591
|
-
metadata={"key": "value"},
|
|
1592
|
-
)
|
|
1593
|
-
],
|
|
1548
|
+
pipeline_id="string",
|
|
1549
|
+
request=[],
|
|
1594
1550
|
)
|
|
1595
1551
|
"""
|
|
1596
1552
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -1627,8 +1583,8 @@ class PipelinesClient:
|
|
|
1627
1583
|
token="YOUR_TOKEN",
|
|
1628
1584
|
)
|
|
1629
1585
|
client.pipelines.get_pipeline_document(
|
|
1630
|
-
document_id="
|
|
1631
|
-
pipeline_id="
|
|
1586
|
+
document_id="string",
|
|
1587
|
+
pipeline_id="string",
|
|
1632
1588
|
)
|
|
1633
1589
|
"""
|
|
1634
1590
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -1664,8 +1620,8 @@ class PipelinesClient:
|
|
|
1664
1620
|
token="YOUR_TOKEN",
|
|
1665
1621
|
)
|
|
1666
1622
|
client.pipelines.delete_pipeline_document(
|
|
1667
|
-
document_id="
|
|
1668
|
-
pipeline_id="
|
|
1623
|
+
document_id="string",
|
|
1624
|
+
pipeline_id="string",
|
|
1669
1625
|
)
|
|
1670
1626
|
"""
|
|
1671
1627
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -1701,8 +1657,8 @@ class PipelinesClient:
|
|
|
1701
1657
|
token="YOUR_TOKEN",
|
|
1702
1658
|
)
|
|
1703
1659
|
client.pipelines.get_pipeline_document_status(
|
|
1704
|
-
document_id="
|
|
1705
|
-
pipeline_id="
|
|
1660
|
+
document_id="string",
|
|
1661
|
+
pipeline_id="string",
|
|
1706
1662
|
)
|
|
1707
1663
|
"""
|
|
1708
1664
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -1739,8 +1695,8 @@ class PipelinesClient:
|
|
|
1739
1695
|
token="YOUR_TOKEN",
|
|
1740
1696
|
)
|
|
1741
1697
|
client.pipelines.list_pipeline_document_chunks(
|
|
1742
|
-
document_id="
|
|
1743
|
-
pipeline_id="
|
|
1698
|
+
document_id="string",
|
|
1699
|
+
pipeline_id="string",
|
|
1744
1700
|
)
|
|
1745
1701
|
"""
|
|
1746
1702
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -1790,12 +1746,15 @@ class AsyncPipelinesClient:
|
|
|
1790
1746
|
|
|
1791
1747
|
- organization_id: typing.Optional[str].
|
|
1792
1748
|
---
|
|
1749
|
+
from llama_cloud import PipelineType
|
|
1793
1750
|
from llama_cloud.client import AsyncLlamaCloud
|
|
1794
1751
|
|
|
1795
1752
|
client = AsyncLlamaCloud(
|
|
1796
1753
|
token="YOUR_TOKEN",
|
|
1797
1754
|
)
|
|
1798
|
-
await client.pipelines.search_pipelines(
|
|
1755
|
+
await client.pipelines.search_pipelines(
|
|
1756
|
+
pipeline_type=PipelineType.PLAYGROUND,
|
|
1757
|
+
)
|
|
1799
1758
|
"""
|
|
1800
1759
|
_response = await self._client_wrapper.httpx_client.request(
|
|
1801
1760
|
"GET",
|
|
@@ -1838,18 +1797,6 @@ class AsyncPipelinesClient:
|
|
|
1838
1797
|
- organization_id: typing.Optional[str].
|
|
1839
1798
|
|
|
1840
1799
|
- request: PipelineCreate.
|
|
1841
|
-
---
|
|
1842
|
-
from llama_cloud import PipelineCreate
|
|
1843
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
1844
|
-
|
|
1845
|
-
client = AsyncLlamaCloud(
|
|
1846
|
-
token="YOUR_TOKEN",
|
|
1847
|
-
)
|
|
1848
|
-
await client.pipelines.create_pipeline(
|
|
1849
|
-
request=PipelineCreate(
|
|
1850
|
-
name="name",
|
|
1851
|
-
),
|
|
1852
|
-
)
|
|
1853
1800
|
"""
|
|
1854
1801
|
_response = await self._client_wrapper.httpx_client.request(
|
|
1855
1802
|
"POST",
|
|
@@ -1886,18 +1833,6 @@ class AsyncPipelinesClient:
|
|
|
1886
1833
|
- organization_id: typing.Optional[str].
|
|
1887
1834
|
|
|
1888
1835
|
- request: PipelineCreate.
|
|
1889
|
-
---
|
|
1890
|
-
from llama_cloud import PipelineCreate
|
|
1891
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
1892
|
-
|
|
1893
|
-
client = AsyncLlamaCloud(
|
|
1894
|
-
token="YOUR_TOKEN",
|
|
1895
|
-
)
|
|
1896
|
-
await client.pipelines.upsert_pipeline(
|
|
1897
|
-
request=PipelineCreate(
|
|
1898
|
-
name="name",
|
|
1899
|
-
),
|
|
1900
|
-
)
|
|
1901
1836
|
"""
|
|
1902
1837
|
_response = await self._client_wrapper.httpx_client.request(
|
|
1903
1838
|
"PUT",
|
|
@@ -1923,15 +1858,6 @@ class AsyncPipelinesClient:
|
|
|
1923
1858
|
|
|
1924
1859
|
Parameters:
|
|
1925
1860
|
- pipeline_id: str.
|
|
1926
|
-
---
|
|
1927
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
1928
|
-
|
|
1929
|
-
client = AsyncLlamaCloud(
|
|
1930
|
-
token="YOUR_TOKEN",
|
|
1931
|
-
)
|
|
1932
|
-
await client.pipelines.get_pipeline(
|
|
1933
|
-
pipeline_id="pipeline_id",
|
|
1934
|
-
)
|
|
1935
1861
|
"""
|
|
1936
1862
|
_response = await self._client_wrapper.httpx_client.request(
|
|
1937
1863
|
"GET",
|
|
@@ -1975,32 +1901,23 @@ class AsyncPipelinesClient:
|
|
|
1975
1901
|
|
|
1976
1902
|
- transform_config: typing.Optional[PipelineUpdateTransformConfig]. Configuration for the transformation.
|
|
1977
1903
|
|
|
1978
|
-
- configured_transformations: typing.Optional[typing.List[ConfiguredTransformationItem]].
|
|
1904
|
+
- configured_transformations: typing.Optional[typing.List[ConfiguredTransformationItem]].
|
|
1979
1905
|
|
|
1980
|
-
- data_sink_id: typing.Optional[str].
|
|
1906
|
+
- data_sink_id: typing.Optional[str].
|
|
1981
1907
|
|
|
1982
|
-
- embedding_model_config_id: typing.Optional[str].
|
|
1908
|
+
- embedding_model_config_id: typing.Optional[str].
|
|
1983
1909
|
|
|
1984
|
-
- data_sink: typing.Optional[DataSinkCreate].
|
|
1910
|
+
- data_sink: typing.Optional[DataSinkCreate].
|
|
1985
1911
|
|
|
1986
|
-
- preset_retrieval_parameters: typing.Optional[PresetRetrievalParams].
|
|
1912
|
+
- preset_retrieval_parameters: typing.Optional[PresetRetrievalParams].
|
|
1987
1913
|
|
|
1988
|
-
- eval_parameters: typing.Optional[EvalExecutionParams].
|
|
1914
|
+
- eval_parameters: typing.Optional[EvalExecutionParams].
|
|
1989
1915
|
|
|
1990
|
-
- llama_parse_parameters: typing.Optional[LlamaParseParameters].
|
|
1916
|
+
- llama_parse_parameters: typing.Optional[LlamaParseParameters].
|
|
1991
1917
|
|
|
1992
1918
|
- name: typing.Optional[str].
|
|
1993
1919
|
|
|
1994
|
-
- managed_pipeline_id: typing.Optional[str].
|
|
1995
|
-
---
|
|
1996
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
1997
|
-
|
|
1998
|
-
client = AsyncLlamaCloud(
|
|
1999
|
-
token="YOUR_TOKEN",
|
|
2000
|
-
)
|
|
2001
|
-
await client.pipelines.update_existing_pipeline(
|
|
2002
|
-
pipeline_id="pipeline_id",
|
|
2003
|
-
)
|
|
1920
|
+
- managed_pipeline_id: typing.Optional[str].
|
|
2004
1921
|
"""
|
|
2005
1922
|
_request: typing.Dict[str, typing.Any] = {}
|
|
2006
1923
|
if embedding_config is not OMIT:
|
|
@@ -2055,7 +1972,7 @@ class AsyncPipelinesClient:
|
|
|
2055
1972
|
token="YOUR_TOKEN",
|
|
2056
1973
|
)
|
|
2057
1974
|
await client.pipelines.delete_pipeline(
|
|
2058
|
-
pipeline_id="
|
|
1975
|
+
pipeline_id="string",
|
|
2059
1976
|
)
|
|
2060
1977
|
"""
|
|
2061
1978
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -2087,7 +2004,7 @@ class AsyncPipelinesClient:
|
|
|
2087
2004
|
token="YOUR_TOKEN",
|
|
2088
2005
|
)
|
|
2089
2006
|
await client.pipelines.get_pipeline_status(
|
|
2090
|
-
pipeline_id="
|
|
2007
|
+
pipeline_id="string",
|
|
2091
2008
|
)
|
|
2092
2009
|
"""
|
|
2093
2010
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -2112,15 +2029,6 @@ class AsyncPipelinesClient:
|
|
|
2112
2029
|
|
|
2113
2030
|
Parameters:
|
|
2114
2031
|
- pipeline_id: str.
|
|
2115
|
-
---
|
|
2116
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
2117
|
-
|
|
2118
|
-
client = AsyncLlamaCloud(
|
|
2119
|
-
token="YOUR_TOKEN",
|
|
2120
|
-
)
|
|
2121
|
-
await client.pipelines.sync_pipeline(
|
|
2122
|
-
pipeline_id="pipeline_id",
|
|
2123
|
-
)
|
|
2124
2032
|
"""
|
|
2125
2033
|
_response = await self._client_wrapper.httpx_client.request(
|
|
2126
2034
|
"POST",
|
|
@@ -2138,21 +2046,35 @@ class AsyncPipelinesClient:
|
|
|
2138
2046
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2139
2047
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2140
2048
|
|
|
2049
|
+
async def cancel_pipeline_sync(self, pipeline_id: str) -> Pipeline:
|
|
2050
|
+
"""
|
|
2051
|
+
Parameters:
|
|
2052
|
+
- pipeline_id: str.
|
|
2053
|
+
"""
|
|
2054
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
2055
|
+
"POST",
|
|
2056
|
+
urllib.parse.urljoin(
|
|
2057
|
+
f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/sync/cancel"
|
|
2058
|
+
),
|
|
2059
|
+
headers=self._client_wrapper.get_headers(),
|
|
2060
|
+
timeout=60,
|
|
2061
|
+
)
|
|
2062
|
+
if 200 <= _response.status_code < 300:
|
|
2063
|
+
return pydantic.parse_obj_as(Pipeline, _response.json()) # type: ignore
|
|
2064
|
+
if _response.status_code == 422:
|
|
2065
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
2066
|
+
try:
|
|
2067
|
+
_response_json = _response.json()
|
|
2068
|
+
except JSONDecodeError:
|
|
2069
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2070
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2071
|
+
|
|
2141
2072
|
async def copy_pipeline(self, pipeline_id: str) -> Pipeline:
|
|
2142
2073
|
"""
|
|
2143
2074
|
Copy a pipeline by ID.
|
|
2144
2075
|
|
|
2145
2076
|
Parameters:
|
|
2146
2077
|
- pipeline_id: str.
|
|
2147
|
-
---
|
|
2148
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
2149
|
-
|
|
2150
|
-
client = AsyncLlamaCloud(
|
|
2151
|
-
token="YOUR_TOKEN",
|
|
2152
|
-
)
|
|
2153
|
-
await client.pipelines.copy_pipeline(
|
|
2154
|
-
pipeline_id="pipeline_id",
|
|
2155
|
-
)
|
|
2156
2078
|
"""
|
|
2157
2079
|
_response = await self._client_wrapper.httpx_client.request(
|
|
2158
2080
|
"POST",
|
|
@@ -2187,8 +2109,8 @@ class AsyncPipelinesClient:
|
|
|
2187
2109
|
token="YOUR_TOKEN",
|
|
2188
2110
|
)
|
|
2189
2111
|
await client.pipelines.get_eval_dataset_executions(
|
|
2190
|
-
eval_dataset_id="
|
|
2191
|
-
pipeline_id="
|
|
2112
|
+
eval_dataset_id="string",
|
|
2113
|
+
pipeline_id="string",
|
|
2192
2114
|
)
|
|
2193
2115
|
"""
|
|
2194
2116
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -2230,15 +2152,19 @@ class AsyncPipelinesClient:
|
|
|
2230
2152
|
|
|
2231
2153
|
- params: typing.Optional[EvalExecutionParamsOverride]. The parameters for the eval execution that will override the ones set in the pipeline.
|
|
2232
2154
|
---
|
|
2155
|
+
from llama_cloud import EvalExecutionParamsOverride, SupportedLlmModelNames
|
|
2233
2156
|
from llama_cloud.client import AsyncLlamaCloud
|
|
2234
2157
|
|
|
2235
2158
|
client = AsyncLlamaCloud(
|
|
2236
2159
|
token="YOUR_TOKEN",
|
|
2237
2160
|
)
|
|
2238
2161
|
await client.pipelines.execute_eval_dataset(
|
|
2239
|
-
eval_dataset_id="
|
|
2240
|
-
pipeline_id="
|
|
2241
|
-
eval_question_ids=[
|
|
2162
|
+
eval_dataset_id="string",
|
|
2163
|
+
pipeline_id="string",
|
|
2164
|
+
eval_question_ids=[],
|
|
2165
|
+
params=EvalExecutionParamsOverride(
|
|
2166
|
+
llm_model=SupportedLlmModelNames.GPT_3_5_TURBO,
|
|
2167
|
+
),
|
|
2242
2168
|
)
|
|
2243
2169
|
"""
|
|
2244
2170
|
_request: typing.Dict[str, typing.Any] = {"eval_question_ids": eval_question_ids}
|
|
@@ -2284,8 +2210,8 @@ class AsyncPipelinesClient:
|
|
|
2284
2210
|
token="YOUR_TOKEN",
|
|
2285
2211
|
)
|
|
2286
2212
|
await client.pipelines.get_eval_dataset_execution_result(
|
|
2287
|
-
eval_dataset_id="
|
|
2288
|
-
pipeline_id="
|
|
2213
|
+
eval_dataset_id="string",
|
|
2214
|
+
pipeline_id="string",
|
|
2289
2215
|
)
|
|
2290
2216
|
"""
|
|
2291
2217
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -2326,9 +2252,9 @@ class AsyncPipelinesClient:
|
|
|
2326
2252
|
token="YOUR_TOKEN",
|
|
2327
2253
|
)
|
|
2328
2254
|
await client.pipelines.get_eval_dataset_execution(
|
|
2329
|
-
eval_dataset_id="
|
|
2330
|
-
eval_dataset_execution_id="
|
|
2331
|
-
pipeline_id="
|
|
2255
|
+
eval_dataset_id="string",
|
|
2256
|
+
eval_dataset_execution_id="string",
|
|
2257
|
+
pipeline_id="string",
|
|
2332
2258
|
)
|
|
2333
2259
|
"""
|
|
2334
2260
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -2373,7 +2299,7 @@ class AsyncPipelinesClient:
|
|
|
2373
2299
|
token="YOUR_TOKEN",
|
|
2374
2300
|
)
|
|
2375
2301
|
await client.pipelines.list_pipeline_files(
|
|
2376
|
-
pipeline_id="
|
|
2302
|
+
pipeline_id="string",
|
|
2377
2303
|
)
|
|
2378
2304
|
"""
|
|
2379
2305
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -2406,19 +2332,14 @@ class AsyncPipelinesClient:
|
|
|
2406
2332
|
|
|
2407
2333
|
- request: typing.List[PipelineFileCreate].
|
|
2408
2334
|
---
|
|
2409
|
-
from llama_cloud import PipelineFileCreate
|
|
2410
2335
|
from llama_cloud.client import AsyncLlamaCloud
|
|
2411
2336
|
|
|
2412
2337
|
client = AsyncLlamaCloud(
|
|
2413
2338
|
token="YOUR_TOKEN",
|
|
2414
2339
|
)
|
|
2415
2340
|
await client.pipelines.add_files_to_pipeline(
|
|
2416
|
-
pipeline_id="
|
|
2417
|
-
request=[
|
|
2418
|
-
PipelineFileCreate(
|
|
2419
|
-
file_id="file_id",
|
|
2420
|
-
)
|
|
2421
|
-
],
|
|
2341
|
+
pipeline_id="string",
|
|
2342
|
+
request=[],
|
|
2422
2343
|
)
|
|
2423
2344
|
"""
|
|
2424
2345
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -2467,7 +2388,7 @@ class AsyncPipelinesClient:
|
|
|
2467
2388
|
token="YOUR_TOKEN",
|
|
2468
2389
|
)
|
|
2469
2390
|
await client.pipelines.list_pipeline_files_2(
|
|
2470
|
-
pipeline_id="
|
|
2391
|
+
pipeline_id="string",
|
|
2471
2392
|
)
|
|
2472
2393
|
"""
|
|
2473
2394
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -2509,8 +2430,8 @@ class AsyncPipelinesClient:
|
|
|
2509
2430
|
token="YOUR_TOKEN",
|
|
2510
2431
|
)
|
|
2511
2432
|
await client.pipelines.get_pipeline_file_status(
|
|
2512
|
-
file_id="
|
|
2513
|
-
pipeline_id="
|
|
2433
|
+
file_id="string",
|
|
2434
|
+
pipeline_id="string",
|
|
2514
2435
|
)
|
|
2515
2436
|
"""
|
|
2516
2437
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -2548,7 +2469,7 @@ class AsyncPipelinesClient:
|
|
|
2548
2469
|
|
|
2549
2470
|
- pipeline_id: str.
|
|
2550
2471
|
|
|
2551
|
-
- custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[PipelineFileUpdateCustomMetadataValue]]].
|
|
2472
|
+
- custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[PipelineFileUpdateCustomMetadataValue]]].
|
|
2552
2473
|
---
|
|
2553
2474
|
from llama_cloud.client import AsyncLlamaCloud
|
|
2554
2475
|
|
|
@@ -2556,8 +2477,8 @@ class AsyncPipelinesClient:
|
|
|
2556
2477
|
token="YOUR_TOKEN",
|
|
2557
2478
|
)
|
|
2558
2479
|
await client.pipelines.update_pipeline_file(
|
|
2559
|
-
file_id="
|
|
2560
|
-
pipeline_id="
|
|
2480
|
+
file_id="string",
|
|
2481
|
+
pipeline_id="string",
|
|
2561
2482
|
)
|
|
2562
2483
|
"""
|
|
2563
2484
|
_request: typing.Dict[str, typing.Any] = {}
|
|
@@ -2597,8 +2518,8 @@ class AsyncPipelinesClient:
|
|
|
2597
2518
|
token="YOUR_TOKEN",
|
|
2598
2519
|
)
|
|
2599
2520
|
await client.pipelines.delete_pipeline_file(
|
|
2600
|
-
file_id="
|
|
2601
|
-
pipeline_id="
|
|
2521
|
+
file_id="string",
|
|
2522
|
+
pipeline_id="string",
|
|
2602
2523
|
)
|
|
2603
2524
|
"""
|
|
2604
2525
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -2627,15 +2548,6 @@ class AsyncPipelinesClient:
|
|
|
2627
2548
|
- pipeline_id: str.
|
|
2628
2549
|
|
|
2629
2550
|
- upload_file: typing.IO.
|
|
2630
|
-
---
|
|
2631
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
2632
|
-
|
|
2633
|
-
client = AsyncLlamaCloud(
|
|
2634
|
-
token="YOUR_TOKEN",
|
|
2635
|
-
)
|
|
2636
|
-
await client.pipelines.import_pipeline_metadata(
|
|
2637
|
-
pipeline_id="pipeline_id",
|
|
2638
|
-
)
|
|
2639
2551
|
"""
|
|
2640
2552
|
_response = await self._client_wrapper.httpx_client.request(
|
|
2641
2553
|
"PUT",
|
|
@@ -2668,7 +2580,7 @@ class AsyncPipelinesClient:
|
|
|
2668
2580
|
token="YOUR_TOKEN",
|
|
2669
2581
|
)
|
|
2670
2582
|
await client.pipelines.delete_pipeline_files_metadata(
|
|
2671
|
-
pipeline_id="
|
|
2583
|
+
pipeline_id="string",
|
|
2672
2584
|
)
|
|
2673
2585
|
"""
|
|
2674
2586
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -2700,7 +2612,7 @@ class AsyncPipelinesClient:
|
|
|
2700
2612
|
token="YOUR_TOKEN",
|
|
2701
2613
|
)
|
|
2702
2614
|
await client.pipelines.list_pipeline_data_sources(
|
|
2703
|
-
pipeline_id="
|
|
2615
|
+
pipeline_id="string",
|
|
2704
2616
|
)
|
|
2705
2617
|
"""
|
|
2706
2618
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -2732,19 +2644,14 @@ class AsyncPipelinesClient:
|
|
|
2732
2644
|
|
|
2733
2645
|
- request: typing.List[PipelineDataSourceCreate].
|
|
2734
2646
|
---
|
|
2735
|
-
from llama_cloud import PipelineDataSourceCreate
|
|
2736
2647
|
from llama_cloud.client import AsyncLlamaCloud
|
|
2737
2648
|
|
|
2738
2649
|
client = AsyncLlamaCloud(
|
|
2739
2650
|
token="YOUR_TOKEN",
|
|
2740
2651
|
)
|
|
2741
2652
|
await client.pipelines.add_data_sources_to_pipeline(
|
|
2742
|
-
pipeline_id="
|
|
2743
|
-
request=[
|
|
2744
|
-
PipelineDataSourceCreate(
|
|
2745
|
-
data_source_id="data_source_id",
|
|
2746
|
-
)
|
|
2747
|
-
],
|
|
2653
|
+
pipeline_id="string",
|
|
2654
|
+
request=[],
|
|
2748
2655
|
)
|
|
2749
2656
|
"""
|
|
2750
2657
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -2777,7 +2684,7 @@ class AsyncPipelinesClient:
|
|
|
2777
2684
|
|
|
2778
2685
|
- pipeline_id: str.
|
|
2779
2686
|
|
|
2780
|
-
- sync_interval: typing.Optional[float].
|
|
2687
|
+
- sync_interval: typing.Optional[float].
|
|
2781
2688
|
---
|
|
2782
2689
|
from llama_cloud.client import AsyncLlamaCloud
|
|
2783
2690
|
|
|
@@ -2785,8 +2692,8 @@ class AsyncPipelinesClient:
|
|
|
2785
2692
|
token="YOUR_TOKEN",
|
|
2786
2693
|
)
|
|
2787
2694
|
await client.pipelines.update_pipeline_data_source(
|
|
2788
|
-
data_source_id="
|
|
2789
|
-
pipeline_id="
|
|
2695
|
+
data_source_id="string",
|
|
2696
|
+
pipeline_id="string",
|
|
2790
2697
|
)
|
|
2791
2698
|
"""
|
|
2792
2699
|
_request: typing.Dict[str, typing.Any] = {}
|
|
@@ -2827,8 +2734,8 @@ class AsyncPipelinesClient:
|
|
|
2827
2734
|
token="YOUR_TOKEN",
|
|
2828
2735
|
)
|
|
2829
2736
|
await client.pipelines.delete_pipeline_data_source(
|
|
2830
|
-
data_source_id="
|
|
2831
|
-
pipeline_id="
|
|
2737
|
+
data_source_id="string",
|
|
2738
|
+
pipeline_id="string",
|
|
2832
2739
|
)
|
|
2833
2740
|
"""
|
|
2834
2741
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -2858,16 +2765,6 @@ class AsyncPipelinesClient:
|
|
|
2858
2765
|
- data_source_id: str.
|
|
2859
2766
|
|
|
2860
2767
|
- pipeline_id: str.
|
|
2861
|
-
---
|
|
2862
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
2863
|
-
|
|
2864
|
-
client = AsyncLlamaCloud(
|
|
2865
|
-
token="YOUR_TOKEN",
|
|
2866
|
-
)
|
|
2867
|
-
await client.pipelines.sync_pipeline_data_source(
|
|
2868
|
-
data_source_id="data_source_id",
|
|
2869
|
-
pipeline_id="pipeline_id",
|
|
2870
|
-
)
|
|
2871
2768
|
"""
|
|
2872
2769
|
_response = await self._client_wrapper.httpx_client.request(
|
|
2873
2770
|
"POST",
|
|
@@ -2905,8 +2802,8 @@ class AsyncPipelinesClient:
|
|
|
2905
2802
|
token="YOUR_TOKEN",
|
|
2906
2803
|
)
|
|
2907
2804
|
await client.pipelines.get_pipeline_data_source_status(
|
|
2908
|
-
data_source_id="
|
|
2909
|
-
pipeline_id="
|
|
2805
|
+
data_source_id="string",
|
|
2806
|
+
pipeline_id="string",
|
|
2910
2807
|
)
|
|
2911
2808
|
"""
|
|
2912
2809
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -2951,21 +2848,21 @@ class AsyncPipelinesClient:
|
|
|
2951
2848
|
Parameters:
|
|
2952
2849
|
- pipeline_id: str.
|
|
2953
2850
|
|
|
2954
|
-
- dense_similarity_top_k: typing.Optional[int].
|
|
2851
|
+
- dense_similarity_top_k: typing.Optional[int].
|
|
2955
2852
|
|
|
2956
|
-
- dense_similarity_cutoff: typing.Optional[float].
|
|
2853
|
+
- dense_similarity_cutoff: typing.Optional[float].
|
|
2957
2854
|
|
|
2958
|
-
- sparse_similarity_top_k: typing.Optional[int].
|
|
2855
|
+
- sparse_similarity_top_k: typing.Optional[int].
|
|
2959
2856
|
|
|
2960
|
-
- enable_reranking: typing.Optional[bool].
|
|
2857
|
+
- enable_reranking: typing.Optional[bool].
|
|
2961
2858
|
|
|
2962
|
-
- rerank_top_n: typing.Optional[int].
|
|
2859
|
+
- rerank_top_n: typing.Optional[int].
|
|
2963
2860
|
|
|
2964
|
-
- alpha: typing.Optional[float].
|
|
2861
|
+
- alpha: typing.Optional[float].
|
|
2965
2862
|
|
|
2966
|
-
- search_filters: typing.Optional[MetadataFilters].
|
|
2863
|
+
- search_filters: typing.Optional[MetadataFilters].
|
|
2967
2864
|
|
|
2968
|
-
- files_top_k: typing.Optional[int].
|
|
2865
|
+
- files_top_k: typing.Optional[int].
|
|
2969
2866
|
|
|
2970
2867
|
- retrieval_mode: typing.Optional[RetrievalMode]. The retrieval mode for the query.
|
|
2971
2868
|
|
|
@@ -2975,14 +2872,20 @@ class AsyncPipelinesClient:
|
|
|
2975
2872
|
|
|
2976
2873
|
- class_name: typing.Optional[str].
|
|
2977
2874
|
---
|
|
2875
|
+
from llama_cloud import FilterCondition, MetadataFilters, RetrievalMode
|
|
2978
2876
|
from llama_cloud.client import AsyncLlamaCloud
|
|
2979
2877
|
|
|
2980
2878
|
client = AsyncLlamaCloud(
|
|
2981
2879
|
token="YOUR_TOKEN",
|
|
2982
2880
|
)
|
|
2983
2881
|
await client.pipelines.run_search(
|
|
2984
|
-
pipeline_id="
|
|
2985
|
-
|
|
2882
|
+
pipeline_id="string",
|
|
2883
|
+
search_filters=MetadataFilters(
|
|
2884
|
+
filters=[],
|
|
2885
|
+
condition=FilterCondition.AND,
|
|
2886
|
+
),
|
|
2887
|
+
retrieval_mode=RetrievalMode.CHUNKS,
|
|
2888
|
+
query="string",
|
|
2986
2889
|
)
|
|
2987
2890
|
"""
|
|
2988
2891
|
_request: typing.Dict[str, typing.Any] = {"query": query}
|
|
@@ -3038,7 +2941,7 @@ class AsyncPipelinesClient:
|
|
|
3038
2941
|
token="YOUR_TOKEN",
|
|
3039
2942
|
)
|
|
3040
2943
|
await client.pipelines.list_pipeline_jobs(
|
|
3041
|
-
pipeline_id="
|
|
2944
|
+
pipeline_id="string",
|
|
3042
2945
|
)
|
|
3043
2946
|
"""
|
|
3044
2947
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -3072,8 +2975,8 @@ class AsyncPipelinesClient:
|
|
|
3072
2975
|
token="YOUR_TOKEN",
|
|
3073
2976
|
)
|
|
3074
2977
|
await client.pipelines.get_pipeline_job(
|
|
3075
|
-
job_id="
|
|
3076
|
-
pipeline_id="
|
|
2978
|
+
job_id="string",
|
|
2979
|
+
pipeline_id="string",
|
|
3077
2980
|
)
|
|
3078
2981
|
"""
|
|
3079
2982
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -3107,7 +3010,7 @@ class AsyncPipelinesClient:
|
|
|
3107
3010
|
token="YOUR_TOKEN",
|
|
3108
3011
|
)
|
|
3109
3012
|
await client.pipelines.get_playground_session(
|
|
3110
|
-
pipeline_id="
|
|
3013
|
+
pipeline_id="string",
|
|
3111
3014
|
)
|
|
3112
3015
|
"""
|
|
3113
3016
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -3148,13 +3051,34 @@ class AsyncPipelinesClient:
|
|
|
3148
3051
|
|
|
3149
3052
|
- class_name: typing.Optional[str].
|
|
3150
3053
|
---
|
|
3054
|
+
from llama_cloud import (
|
|
3055
|
+
ChatData,
|
|
3056
|
+
FilterCondition,
|
|
3057
|
+
LlmParameters,
|
|
3058
|
+
MetadataFilters,
|
|
3059
|
+
PresetRetrievalParams,
|
|
3060
|
+
RetrievalMode,
|
|
3061
|
+
SupportedLlmModelNames,
|
|
3062
|
+
)
|
|
3151
3063
|
from llama_cloud.client import AsyncLlamaCloud
|
|
3152
3064
|
|
|
3153
3065
|
client = AsyncLlamaCloud(
|
|
3154
3066
|
token="YOUR_TOKEN",
|
|
3155
3067
|
)
|
|
3156
3068
|
await client.pipelines.chat(
|
|
3157
|
-
pipeline_id="
|
|
3069
|
+
pipeline_id="string",
|
|
3070
|
+
data=ChatData(
|
|
3071
|
+
retrieval_parameters=PresetRetrievalParams(
|
|
3072
|
+
search_filters=MetadataFilters(
|
|
3073
|
+
filters=[],
|
|
3074
|
+
condition=FilterCondition.AND,
|
|
3075
|
+
),
|
|
3076
|
+
retrieval_mode=RetrievalMode.CHUNKS,
|
|
3077
|
+
),
|
|
3078
|
+
llm_parameters=LlmParameters(
|
|
3079
|
+
model_name=SupportedLlmModelNames.GPT_3_5_TURBO,
|
|
3080
|
+
),
|
|
3081
|
+
),
|
|
3158
3082
|
)
|
|
3159
3083
|
"""
|
|
3160
3084
|
_request: typing.Dict[str, typing.Any] = {}
|
|
@@ -3210,7 +3134,7 @@ class AsyncPipelinesClient:
|
|
|
3210
3134
|
token="YOUR_TOKEN",
|
|
3211
3135
|
)
|
|
3212
3136
|
await client.pipelines.list_pipeline_documents(
|
|
3213
|
-
pipeline_id="
|
|
3137
|
+
pipeline_id="string",
|
|
3214
3138
|
)
|
|
3215
3139
|
"""
|
|
3216
3140
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -3245,20 +3169,14 @@ class AsyncPipelinesClient:
|
|
|
3245
3169
|
|
|
3246
3170
|
- request: typing.List[CloudDocumentCreate].
|
|
3247
3171
|
---
|
|
3248
|
-
from llama_cloud import CloudDocumentCreate
|
|
3249
3172
|
from llama_cloud.client import AsyncLlamaCloud
|
|
3250
3173
|
|
|
3251
3174
|
client = AsyncLlamaCloud(
|
|
3252
3175
|
token="YOUR_TOKEN",
|
|
3253
3176
|
)
|
|
3254
3177
|
await client.pipelines.create_batch_pipeline_documents(
|
|
3255
|
-
pipeline_id="
|
|
3256
|
-
request=[
|
|
3257
|
-
CloudDocumentCreate(
|
|
3258
|
-
text="text",
|
|
3259
|
-
metadata={"key": "value"},
|
|
3260
|
-
)
|
|
3261
|
-
],
|
|
3178
|
+
pipeline_id="string",
|
|
3179
|
+
request=[],
|
|
3262
3180
|
)
|
|
3263
3181
|
"""
|
|
3264
3182
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -3291,20 +3209,14 @@ class AsyncPipelinesClient:
|
|
|
3291
3209
|
|
|
3292
3210
|
- request: typing.List[CloudDocumentCreate].
|
|
3293
3211
|
---
|
|
3294
|
-
from llama_cloud import CloudDocumentCreate
|
|
3295
3212
|
from llama_cloud.client import AsyncLlamaCloud
|
|
3296
3213
|
|
|
3297
3214
|
client = AsyncLlamaCloud(
|
|
3298
3215
|
token="YOUR_TOKEN",
|
|
3299
3216
|
)
|
|
3300
3217
|
await client.pipelines.upsert_batch_pipeline_documents(
|
|
3301
|
-
pipeline_id="
|
|
3302
|
-
request=[
|
|
3303
|
-
CloudDocumentCreate(
|
|
3304
|
-
text="text",
|
|
3305
|
-
metadata={"key": "value"},
|
|
3306
|
-
)
|
|
3307
|
-
],
|
|
3218
|
+
pipeline_id="string",
|
|
3219
|
+
request=[],
|
|
3308
3220
|
)
|
|
3309
3221
|
"""
|
|
3310
3222
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -3341,8 +3253,8 @@ class AsyncPipelinesClient:
|
|
|
3341
3253
|
token="YOUR_TOKEN",
|
|
3342
3254
|
)
|
|
3343
3255
|
await client.pipelines.get_pipeline_document(
|
|
3344
|
-
document_id="
|
|
3345
|
-
pipeline_id="
|
|
3256
|
+
document_id="string",
|
|
3257
|
+
pipeline_id="string",
|
|
3346
3258
|
)
|
|
3347
3259
|
"""
|
|
3348
3260
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -3378,8 +3290,8 @@ class AsyncPipelinesClient:
|
|
|
3378
3290
|
token="YOUR_TOKEN",
|
|
3379
3291
|
)
|
|
3380
3292
|
await client.pipelines.delete_pipeline_document(
|
|
3381
|
-
document_id="
|
|
3382
|
-
pipeline_id="
|
|
3293
|
+
document_id="string",
|
|
3294
|
+
pipeline_id="string",
|
|
3383
3295
|
)
|
|
3384
3296
|
"""
|
|
3385
3297
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -3415,8 +3327,8 @@ class AsyncPipelinesClient:
|
|
|
3415
3327
|
token="YOUR_TOKEN",
|
|
3416
3328
|
)
|
|
3417
3329
|
await client.pipelines.get_pipeline_document_status(
|
|
3418
|
-
document_id="
|
|
3419
|
-
pipeline_id="
|
|
3330
|
+
document_id="string",
|
|
3331
|
+
pipeline_id="string",
|
|
3420
3332
|
)
|
|
3421
3333
|
"""
|
|
3422
3334
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -3453,8 +3365,8 @@ class AsyncPipelinesClient:
|
|
|
3453
3365
|
token="YOUR_TOKEN",
|
|
3454
3366
|
)
|
|
3455
3367
|
await client.pipelines.list_pipeline_document_chunks(
|
|
3456
|
-
document_id="
|
|
3457
|
-
pipeline_id="
|
|
3368
|
+
document_id="string",
|
|
3369
|
+
pipeline_id="string",
|
|
3458
3370
|
)
|
|
3459
3371
|
"""
|
|
3460
3372
|
_response = await self._client_wrapper.httpx_client.request(
|