llama-cloud 0.1.4__py3-none-any.whl → 0.1.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (117) hide show
  1. llama_cloud/__init__.py +76 -10
  2. llama_cloud/client.py +3 -0
  3. llama_cloud/environment.py +1 -1
  4. llama_cloud/resources/__init__.py +23 -1
  5. llama_cloud/resources/data_sinks/client.py +26 -20
  6. llama_cloud/resources/data_sources/client.py +16 -16
  7. llama_cloud/resources/embedding_model_configs/__init__.py +23 -0
  8. llama_cloud/resources/embedding_model_configs/client.py +416 -0
  9. llama_cloud/resources/embedding_model_configs/types/__init__.py +23 -0
  10. llama_cloud/resources/embedding_model_configs/types/embedding_model_config_create_embedding_config.py +89 -0
  11. llama_cloud/resources/evals/client.py +36 -26
  12. llama_cloud/resources/extraction/client.py +32 -32
  13. llama_cloud/resources/files/__init__.py +2 -2
  14. llama_cloud/resources/files/client.py +310 -54
  15. llama_cloud/resources/files/types/__init__.py +3 -1
  16. llama_cloud/resources/files/types/file_create_from_url_resource_info_value.py +7 -0
  17. llama_cloud/resources/files/types/file_create_permission_info_value.py +7 -0
  18. llama_cloud/resources/organizations/client.py +125 -56
  19. llama_cloud/resources/parsing/client.py +652 -264
  20. llama_cloud/resources/pipelines/client.py +617 -310
  21. llama_cloud/resources/projects/client.py +341 -136
  22. llama_cloud/types/__init__.py +58 -10
  23. llama_cloud/types/azure_open_ai_embedding.py +12 -6
  24. llama_cloud/types/base_prompt_template.py +6 -2
  25. llama_cloud/types/bedrock_embedding.py +12 -6
  26. llama_cloud/types/character_splitter.py +4 -2
  27. llama_cloud/types/chat_message.py +1 -1
  28. llama_cloud/types/cloud_az_storage_blob_data_source.py +16 -7
  29. llama_cloud/types/cloud_box_data_source.py +13 -6
  30. llama_cloud/types/cloud_confluence_data_source.py +7 -6
  31. llama_cloud/types/cloud_document.py +3 -1
  32. llama_cloud/types/cloud_document_create.py +3 -1
  33. llama_cloud/types/cloud_google_drive_data_source.py +1 -0
  34. llama_cloud/types/cloud_jira_data_source.py +7 -4
  35. llama_cloud/types/cloud_notion_page_data_source.py +3 -2
  36. llama_cloud/types/cloud_one_drive_data_source.py +6 -2
  37. llama_cloud/types/cloud_postgres_vector_store.py +1 -1
  38. llama_cloud/types/cloud_s_3_data_source.py +9 -4
  39. llama_cloud/types/cloud_sharepoint_data_source.py +9 -5
  40. llama_cloud/types/cloud_slack_data_source.py +7 -6
  41. llama_cloud/types/code_splitter.py +1 -1
  42. llama_cloud/types/cohere_embedding.py +7 -3
  43. llama_cloud/types/data_sink.py +4 -4
  44. llama_cloud/types/data_sink_create.py +1 -1
  45. llama_cloud/types/data_source.py +7 -5
  46. llama_cloud/types/data_source_create.py +4 -2
  47. llama_cloud/types/embedding_model_config.py +43 -0
  48. llama_cloud/types/embedding_model_config_embedding_config.py +89 -0
  49. llama_cloud/types/embedding_model_config_update.py +35 -0
  50. llama_cloud/types/embedding_model_config_update_embedding_config.py +89 -0
  51. llama_cloud/types/eval_dataset.py +2 -2
  52. llama_cloud/types/eval_dataset_job_record.py +13 -7
  53. llama_cloud/types/eval_execution_params_override.py +6 -2
  54. llama_cloud/types/eval_question.py +2 -2
  55. llama_cloud/types/extraction_result.py +2 -2
  56. llama_cloud/types/extraction_schema.py +5 -3
  57. llama_cloud/types/file.py +15 -7
  58. llama_cloud/types/file_permission_info_value.py +5 -0
  59. llama_cloud/types/filter_operator.py +2 -2
  60. llama_cloud/types/gemini_embedding.py +10 -6
  61. llama_cloud/types/hugging_face_inference_api_embedding.py +27 -11
  62. llama_cloud/types/input_message.py +3 -1
  63. llama_cloud/types/interval_usage_and_plan.py +36 -0
  64. llama_cloud/types/job_name_mapping.py +4 -0
  65. llama_cloud/types/llama_parse_parameters.py +21 -0
  66. llama_cloud/types/llm.py +4 -2
  67. llama_cloud/types/llm_parameters.py +5 -2
  68. llama_cloud/types/local_eval.py +10 -8
  69. llama_cloud/types/local_eval_results.py +1 -1
  70. llama_cloud/types/managed_ingestion_status_response.py +5 -3
  71. llama_cloud/types/markdown_element_node_parser.py +5 -3
  72. llama_cloud/types/markdown_node_parser.py +3 -2
  73. llama_cloud/types/metadata_filter.py +2 -2
  74. llama_cloud/types/metric_result.py +3 -3
  75. llama_cloud/types/node_parser.py +1 -1
  76. llama_cloud/types/open_ai_embedding.py +12 -6
  77. llama_cloud/types/organization.py +2 -2
  78. llama_cloud/types/page_splitter_node_parser.py +2 -2
  79. llama_cloud/types/paginated_list_pipeline_files_response.py +35 -0
  80. llama_cloud/types/parsing_job_structured_result.py +32 -0
  81. llama_cloud/types/permission.py +3 -3
  82. llama_cloud/types/pipeline.py +17 -6
  83. llama_cloud/types/pipeline_configuration_hashes.py +3 -3
  84. llama_cloud/types/pipeline_create.py +15 -4
  85. llama_cloud/types/pipeline_data_source.py +13 -7
  86. llama_cloud/types/pipeline_data_source_create.py +3 -1
  87. llama_cloud/types/pipeline_deployment.py +4 -4
  88. llama_cloud/types/pipeline_file.py +25 -10
  89. llama_cloud/types/pipeline_file_create.py +3 -1
  90. llama_cloud/types/pipeline_file_permission_info_value.py +7 -0
  91. llama_cloud/types/plan.py +40 -0
  92. llama_cloud/types/playground_session.py +2 -2
  93. llama_cloud/types/preset_retrieval_params.py +14 -7
  94. llama_cloud/types/presigned_url.py +3 -1
  95. llama_cloud/types/project.py +2 -2
  96. llama_cloud/types/prompt_mixin_prompts.py +1 -1
  97. llama_cloud/types/prompt_spec.py +4 -2
  98. llama_cloud/types/role.py +3 -3
  99. llama_cloud/types/sentence_splitter.py +4 -2
  100. llama_cloud/types/text_node.py +3 -3
  101. llama_cloud/types/{hugging_face_inference_api_embedding_token.py → token.py} +1 -1
  102. llama_cloud/types/token_text_splitter.py +1 -1
  103. llama_cloud/types/usage.py +41 -0
  104. llama_cloud/types/user_organization.py +9 -5
  105. llama_cloud/types/user_organization_create.py +4 -4
  106. llama_cloud/types/user_organization_delete.py +2 -2
  107. llama_cloud/types/user_organization_role.py +2 -2
  108. llama_cloud/types/value.py +5 -0
  109. llama_cloud/types/vertex_text_embedding.py +9 -5
  110. {llama_cloud-0.1.4.dist-info → llama_cloud-0.1.6.dist-info}/METADATA +1 -1
  111. {llama_cloud-0.1.4.dist-info → llama_cloud-0.1.6.dist-info}/RECORD +113 -99
  112. llama_cloud/types/data_sink_component.py +0 -20
  113. llama_cloud/types/data_source_component.py +0 -28
  114. llama_cloud/types/metadata_filter_value.py +0 -5
  115. llama_cloud/types/pipeline_data_source_component.py +0 -28
  116. {llama_cloud-0.1.4.dist-info → llama_cloud-0.1.6.dist-info}/LICENSE +0 -0
  117. {llama_cloud-0.1.4.dist-info → llama_cloud-0.1.6.dist-info}/WHEEL +0 -0
@@ -23,6 +23,7 @@ from ...types.input_message import InputMessage
23
23
  from ...types.llama_parse_parameters import LlamaParseParameters
24
24
  from ...types.managed_ingestion_status_response import ManagedIngestionStatusResponse
25
25
  from ...types.metadata_filters import MetadataFilters
26
+ from ...types.paginated_list_pipeline_files_response import PaginatedListPipelineFilesResponse
26
27
  from ...types.pipeline import Pipeline
27
28
  from ...types.pipeline_create import PipelineCreate
28
29
  from ...types.pipeline_data_source import PipelineDataSource
@@ -59,42 +60,43 @@ class PipelinesClient:
59
60
  def search_pipelines(
60
61
  self,
61
62
  *,
63
+ project_id: typing.Optional[str] = None,
62
64
  project_name: typing.Optional[str] = None,
63
65
  pipeline_name: typing.Optional[str] = None,
64
66
  pipeline_type: typing.Optional[PipelineType] = None,
65
- project_id: typing.Optional[str] = None,
67
+ organization_id: typing.Optional[str] = None,
66
68
  ) -> typing.List[Pipeline]:
67
69
  """
68
70
  Search for pipelines by various parameters.
69
71
 
70
72
  Parameters:
73
+ - project_id: typing.Optional[str].
74
+
71
75
  - project_name: typing.Optional[str].
72
76
 
73
77
  - pipeline_name: typing.Optional[str].
74
78
 
75
79
  - pipeline_type: typing.Optional[PipelineType].
76
80
 
77
- - project_id: typing.Optional[str].
81
+ - organization_id: typing.Optional[str].
78
82
  ---
79
- from llama_cloud import PipelineType
80
83
  from llama_cloud.client import LlamaCloud
81
84
 
82
85
  client = LlamaCloud(
83
86
  token="YOUR_TOKEN",
84
87
  )
85
- client.pipelines.search_pipelines(
86
- pipeline_type=PipelineType.PLAYGROUND,
87
- )
88
+ client.pipelines.search_pipelines()
88
89
  """
89
90
  _response = self._client_wrapper.httpx_client.request(
90
91
  "GET",
91
92
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/pipelines"),
92
93
  params=remove_none_from_dict(
93
94
  {
95
+ "project_id": project_id,
94
96
  "project_name": project_name,
95
97
  "pipeline_name": pipeline_name,
96
98
  "pipeline_type": pipeline_type,
97
- "project_id": project_id,
99
+ "organization_id": organization_id,
98
100
  }
99
101
  ),
100
102
  headers=self._client_wrapper.get_headers(),
@@ -110,19 +112,39 @@ class PipelinesClient:
110
112
  raise ApiError(status_code=_response.status_code, body=_response.text)
111
113
  raise ApiError(status_code=_response.status_code, body=_response_json)
112
114
 
113
- def create_pipeline(self, *, project_id: typing.Optional[str] = None, request: PipelineCreate) -> Pipeline:
115
+ def create_pipeline(
116
+ self,
117
+ *,
118
+ project_id: typing.Optional[str] = None,
119
+ organization_id: typing.Optional[str] = None,
120
+ request: PipelineCreate,
121
+ ) -> Pipeline:
114
122
  """
115
123
  Create a new pipeline for a project.
116
124
 
117
125
  Parameters:
118
126
  - project_id: typing.Optional[str].
119
127
 
128
+ - organization_id: typing.Optional[str].
129
+
120
130
  - request: PipelineCreate.
131
+ ---
132
+ from llama_cloud import PipelineCreate
133
+ from llama_cloud.client import LlamaCloud
134
+
135
+ client = LlamaCloud(
136
+ token="YOUR_TOKEN",
137
+ )
138
+ client.pipelines.create_pipeline(
139
+ request=PipelineCreate(
140
+ name="name",
141
+ ),
142
+ )
121
143
  """
122
144
  _response = self._client_wrapper.httpx_client.request(
123
145
  "POST",
124
146
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/pipelines"),
125
- params=remove_none_from_dict({"project_id": project_id}),
147
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
126
148
  json=jsonable_encoder(request),
127
149
  headers=self._client_wrapper.get_headers(),
128
150
  timeout=60,
@@ -137,7 +159,13 @@ class PipelinesClient:
137
159
  raise ApiError(status_code=_response.status_code, body=_response.text)
138
160
  raise ApiError(status_code=_response.status_code, body=_response_json)
139
161
 
140
- def upsert_pipeline(self, *, project_id: typing.Optional[str] = None, request: PipelineCreate) -> Pipeline:
162
+ def upsert_pipeline(
163
+ self,
164
+ *,
165
+ project_id: typing.Optional[str] = None,
166
+ organization_id: typing.Optional[str] = None,
167
+ request: PipelineCreate,
168
+ ) -> Pipeline:
141
169
  """
142
170
  Upsert a pipeline for a project.
143
171
  Updates if a pipeline with the same name and project_id already exists. Otherwise, creates a new pipeline.
@@ -145,12 +173,26 @@ class PipelinesClient:
145
173
  Parameters:
146
174
  - project_id: typing.Optional[str].
147
175
 
176
+ - organization_id: typing.Optional[str].
177
+
148
178
  - request: PipelineCreate.
179
+ ---
180
+ from llama_cloud import PipelineCreate
181
+ from llama_cloud.client import LlamaCloud
182
+
183
+ client = LlamaCloud(
184
+ token="YOUR_TOKEN",
185
+ )
186
+ client.pipelines.upsert_pipeline(
187
+ request=PipelineCreate(
188
+ name="name",
189
+ ),
190
+ )
149
191
  """
150
192
  _response = self._client_wrapper.httpx_client.request(
151
193
  "PUT",
152
194
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/pipelines"),
153
- params=remove_none_from_dict({"project_id": project_id}),
195
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
154
196
  json=jsonable_encoder(request),
155
197
  headers=self._client_wrapper.get_headers(),
156
198
  timeout=60,
@@ -171,6 +213,15 @@ class PipelinesClient:
171
213
 
172
214
  Parameters:
173
215
  - pipeline_id: str.
216
+ ---
217
+ from llama_cloud.client import LlamaCloud
218
+
219
+ client = LlamaCloud(
220
+ token="YOUR_TOKEN",
221
+ )
222
+ client.pipelines.get_pipeline(
223
+ pipeline_id="pipeline_id",
224
+ )
174
225
  """
175
226
  _response = self._client_wrapper.httpx_client.request(
176
227
  "GET",
@@ -196,6 +247,7 @@ class PipelinesClient:
196
247
  transform_config: typing.Optional[PipelineUpdateTransformConfig] = OMIT,
197
248
  configured_transformations: typing.Optional[typing.List[ConfiguredTransformationItem]] = OMIT,
198
249
  data_sink_id: typing.Optional[str] = OMIT,
250
+ embedding_model_config_id: typing.Optional[str] = OMIT,
199
251
  data_sink: typing.Optional[DataSinkCreate] = OMIT,
200
252
  preset_retrieval_parameters: typing.Optional[PresetRetrievalParams] = OMIT,
201
253
  eval_parameters: typing.Optional[EvalExecutionParams] = OMIT,
@@ -213,21 +265,32 @@ class PipelinesClient:
213
265
 
214
266
  - transform_config: typing.Optional[PipelineUpdateTransformConfig]. Configuration for the transformation.
215
267
 
216
- - configured_transformations: typing.Optional[typing.List[ConfiguredTransformationItem]].
268
+ - configured_transformations: typing.Optional[typing.List[ConfiguredTransformationItem]]. Deprecated, use embedding_config or transform_config instead. configured transformations for the pipeline.
269
+
270
+ - data_sink_id: typing.Optional[str]. Data sink ID. When provided instead of data_sink, the data sink will be looked up by ID.
217
271
 
218
- - data_sink_id: typing.Optional[str].
272
+ - embedding_model_config_id: typing.Optional[str]. Embedding model config ID. When provided instead of embedding_config, the embedding model config will be looked up by ID.
219
273
 
220
- - data_sink: typing.Optional[DataSinkCreate].
274
+ - data_sink: typing.Optional[DataSinkCreate]. Data sink. When provided instead of data_sink_id, the data sink will be created.
221
275
 
222
- - preset_retrieval_parameters: typing.Optional[PresetRetrievalParams].
276
+ - preset_retrieval_parameters: typing.Optional[PresetRetrievalParams]. Preset retrieval parameters for the pipeline.
223
277
 
224
- - eval_parameters: typing.Optional[EvalExecutionParams].
278
+ - eval_parameters: typing.Optional[EvalExecutionParams]. Eval parameters for the pipeline.
225
279
 
226
- - llama_parse_parameters: typing.Optional[LlamaParseParameters].
280
+ - llama_parse_parameters: typing.Optional[LlamaParseParameters]. Settings that can be configured for how to use LlamaParse to parse files within a LlamaCloud pipeline.
227
281
 
228
282
  - name: typing.Optional[str].
229
283
 
230
- - managed_pipeline_id: typing.Optional[str].
284
+ - managed_pipeline_id: typing.Optional[str]. The ID of the ManagedPipeline this playground pipeline is linked to.
285
+ ---
286
+ from llama_cloud.client import LlamaCloud
287
+
288
+ client = LlamaCloud(
289
+ token="YOUR_TOKEN",
290
+ )
291
+ client.pipelines.update_existing_pipeline(
292
+ pipeline_id="pipeline_id",
293
+ )
231
294
  """
232
295
  _request: typing.Dict[str, typing.Any] = {}
233
296
  if embedding_config is not OMIT:
@@ -238,6 +301,8 @@ class PipelinesClient:
238
301
  _request["configured_transformations"] = configured_transformations
239
302
  if data_sink_id is not OMIT:
240
303
  _request["data_sink_id"] = data_sink_id
304
+ if embedding_model_config_id is not OMIT:
305
+ _request["embedding_model_config_id"] = embedding_model_config_id
241
306
  if data_sink is not OMIT:
242
307
  _request["data_sink"] = data_sink
243
308
  if preset_retrieval_parameters is not OMIT:
@@ -280,7 +345,7 @@ class PipelinesClient:
280
345
  token="YOUR_TOKEN",
281
346
  )
282
347
  client.pipelines.delete_pipeline(
283
- pipeline_id="string",
348
+ pipeline_id="pipeline_id",
284
349
  )
285
350
  """
286
351
  _response = self._client_wrapper.httpx_client.request(
@@ -312,7 +377,7 @@ class PipelinesClient:
312
377
  token="YOUR_TOKEN",
313
378
  )
314
379
  client.pipelines.get_pipeline_status(
315
- pipeline_id="string",
380
+ pipeline_id="pipeline_id",
316
381
  )
317
382
  """
318
383
  _response = self._client_wrapper.httpx_client.request(
@@ -337,6 +402,15 @@ class PipelinesClient:
337
402
 
338
403
  Parameters:
339
404
  - pipeline_id: str.
405
+ ---
406
+ from llama_cloud.client import LlamaCloud
407
+
408
+ client = LlamaCloud(
409
+ token="YOUR_TOKEN",
410
+ )
411
+ client.pipelines.sync_pipeline(
412
+ pipeline_id="pipeline_id",
413
+ )
340
414
  """
341
415
  _response = self._client_wrapper.httpx_client.request(
342
416
  "POST",
@@ -360,6 +434,15 @@ class PipelinesClient:
360
434
 
361
435
  Parameters:
362
436
  - pipeline_id: str.
437
+ ---
438
+ from llama_cloud.client import LlamaCloud
439
+
440
+ client = LlamaCloud(
441
+ token="YOUR_TOKEN",
442
+ )
443
+ client.pipelines.copy_pipeline(
444
+ pipeline_id="pipeline_id",
445
+ )
363
446
  """
364
447
  _response = self._client_wrapper.httpx_client.request(
365
448
  "POST",
@@ -377,14 +460,14 @@ class PipelinesClient:
377
460
  raise ApiError(status_code=_response.status_code, body=_response.text)
378
461
  raise ApiError(status_code=_response.status_code, body=_response_json)
379
462
 
380
- def get_eval_dataset_executions(self, pipeline_id: str, eval_dataset_id: str) -> typing.List[EvalDatasetJobRecord]:
463
+ def get_eval_dataset_executions(self, eval_dataset_id: str, pipeline_id: str) -> typing.List[EvalDatasetJobRecord]:
381
464
  """
382
465
  Get the status of an EvalDatasetExecution.
383
466
 
384
467
  Parameters:
385
- - pipeline_id: str.
386
-
387
468
  - eval_dataset_id: str.
469
+
470
+ - pipeline_id: str.
388
471
  ---
389
472
  from llama_cloud.client import LlamaCloud
390
473
 
@@ -392,8 +475,8 @@ class PipelinesClient:
392
475
  token="YOUR_TOKEN",
393
476
  )
394
477
  client.pipelines.get_eval_dataset_executions(
395
- pipeline_id="string",
396
- eval_dataset_id="string",
478
+ eval_dataset_id="eval_dataset_id",
479
+ pipeline_id="pipeline_id",
397
480
  )
398
481
  """
399
482
  _response = self._client_wrapper.httpx_client.request(
@@ -417,8 +500,8 @@ class PipelinesClient:
417
500
 
418
501
  def execute_eval_dataset(
419
502
  self,
420
- pipeline_id: str,
421
503
  eval_dataset_id: str,
504
+ pipeline_id: str,
422
505
  *,
423
506
  eval_question_ids: typing.List[str],
424
507
  params: typing.Optional[EvalExecutionParamsOverride] = OMIT,
@@ -427,27 +510,23 @@ class PipelinesClient:
427
510
  Execute a dataset.
428
511
 
429
512
  Parameters:
430
- - pipeline_id: str.
431
-
432
513
  - eval_dataset_id: str.
433
514
 
515
+ - pipeline_id: str.
516
+
434
517
  - eval_question_ids: typing.List[str].
435
518
 
436
519
  - params: typing.Optional[EvalExecutionParamsOverride]. The parameters for the eval execution that will override the ones set in the pipeline.
437
520
  ---
438
- from llama_cloud import EvalExecutionParamsOverride, SupportedLlmModelNames
439
521
  from llama_cloud.client import LlamaCloud
440
522
 
441
523
  client = LlamaCloud(
442
524
  token="YOUR_TOKEN",
443
525
  )
444
526
  client.pipelines.execute_eval_dataset(
445
- pipeline_id="string",
446
- eval_dataset_id="string",
447
- eval_question_ids=[],
448
- params=EvalExecutionParamsOverride(
449
- llm_model=SupportedLlmModelNames.GPT_3_5_TURBO,
450
- ),
527
+ eval_dataset_id="eval_dataset_id",
528
+ pipeline_id="pipeline_id",
529
+ eval_question_ids=["eval_question_ids"],
451
530
  )
452
531
  """
453
532
  _request: typing.Dict[str, typing.Any] = {"eval_question_ids": eval_question_ids}
@@ -474,7 +553,7 @@ class PipelinesClient:
474
553
  raise ApiError(status_code=_response.status_code, body=_response_json)
475
554
 
476
555
  def get_eval_dataset_execution_result(
477
- self, pipeline_id: str, eval_dataset_id: str
556
+ self, eval_dataset_id: str, pipeline_id: str
478
557
  ) -> typing.List[EvalQuestionResult]:
479
558
  """
480
559
  Get the result of an EvalDatasetExecution.
@@ -483,9 +562,9 @@ class PipelinesClient:
483
562
  If any of the specified questions do not have a result, they will be ignored.
484
563
 
485
564
  Parameters:
486
- - pipeline_id: str.
487
-
488
565
  - eval_dataset_id: str.
566
+
567
+ - pipeline_id: str.
489
568
  ---
490
569
  from llama_cloud.client import LlamaCloud
491
570
 
@@ -493,8 +572,8 @@ class PipelinesClient:
493
572
  token="YOUR_TOKEN",
494
573
  )
495
574
  client.pipelines.get_eval_dataset_execution_result(
496
- pipeline_id="string",
497
- eval_dataset_id="string",
575
+ eval_dataset_id="eval_dataset_id",
576
+ pipeline_id="pipeline_id",
498
577
  )
499
578
  """
500
579
  _response = self._client_wrapper.httpx_client.request(
@@ -517,17 +596,17 @@ class PipelinesClient:
517
596
  raise ApiError(status_code=_response.status_code, body=_response_json)
518
597
 
519
598
  def get_eval_dataset_execution(
520
- self, pipeline_id: str, eval_dataset_id: str, eval_dataset_execution_id: str
599
+ self, eval_dataset_id: str, eval_dataset_execution_id: str, pipeline_id: str
521
600
  ) -> EvalDatasetJobRecord:
522
601
  """
523
602
  Get the status of an EvalDatasetExecution.
524
603
 
525
604
  Parameters:
526
- - pipeline_id: str.
527
-
528
605
  - eval_dataset_id: str.
529
606
 
530
607
  - eval_dataset_execution_id: str.
608
+
609
+ - pipeline_id: str.
531
610
  ---
532
611
  from llama_cloud.client import LlamaCloud
533
612
 
@@ -535,9 +614,9 @@ class PipelinesClient:
535
614
  token="YOUR_TOKEN",
536
615
  )
537
616
  client.pipelines.get_eval_dataset_execution(
538
- pipeline_id="string",
539
- eval_dataset_id="string",
540
- eval_dataset_execution_id="string",
617
+ eval_dataset_id="eval_dataset_id",
618
+ eval_dataset_execution_id="eval_dataset_execution_id",
619
+ pipeline_id="pipeline_id",
541
620
  )
542
621
  """
543
622
  _response = self._client_wrapper.httpx_client.request(
@@ -582,7 +661,7 @@ class PipelinesClient:
582
661
  token="YOUR_TOKEN",
583
662
  )
584
663
  client.pipelines.list_pipeline_files(
585
- pipeline_id="string",
664
+ pipeline_id="pipeline_id",
586
665
  )
587
666
  """
588
667
  _response = self._client_wrapper.httpx_client.request(
@@ -615,14 +694,19 @@ class PipelinesClient:
615
694
 
616
695
  - request: typing.List[PipelineFileCreate].
617
696
  ---
697
+ from llama_cloud import PipelineFileCreate
618
698
  from llama_cloud.client import LlamaCloud
619
699
 
620
700
  client = LlamaCloud(
621
701
  token="YOUR_TOKEN",
622
702
  )
623
703
  client.pipelines.add_files_to_pipeline(
624
- pipeline_id="string",
625
- request=[],
704
+ pipeline_id="pipeline_id",
705
+ request=[
706
+ PipelineFileCreate(
707
+ file_id="file_id",
708
+ )
709
+ ],
626
710
  )
627
711
  """
628
712
  _response = self._client_wrapper.httpx_client.request(
@@ -642,14 +726,70 @@ class PipelinesClient:
642
726
  raise ApiError(status_code=_response.status_code, body=_response.text)
643
727
  raise ApiError(status_code=_response.status_code, body=_response_json)
644
728
 
645
- def get_pipeline_file_status(self, pipeline_id: str, file_id: str) -> ManagedIngestionStatusResponse:
729
+ def list_pipeline_files_2(
730
+ self,
731
+ pipeline_id: str,
732
+ *,
733
+ data_source_id: typing.Optional[str] = None,
734
+ only_manually_uploaded: typing.Optional[bool] = None,
735
+ limit: typing.Optional[int] = None,
736
+ offset: typing.Optional[int] = None,
737
+ ) -> PaginatedListPipelineFilesResponse:
646
738
  """
647
- Get status of a file for a pipeline.
739
+ Get files for a pipeline.
648
740
 
649
741
  Parameters:
650
742
  - pipeline_id: str.
651
743
 
744
+ - data_source_id: typing.Optional[str].
745
+
746
+ - only_manually_uploaded: typing.Optional[bool].
747
+
748
+ - limit: typing.Optional[int].
749
+
750
+ - offset: typing.Optional[int].
751
+ ---
752
+ from llama_cloud.client import LlamaCloud
753
+
754
+ client = LlamaCloud(
755
+ token="YOUR_TOKEN",
756
+ )
757
+ client.pipelines.list_pipeline_files_2(
758
+ pipeline_id="pipeline_id",
759
+ )
760
+ """
761
+ _response = self._client_wrapper.httpx_client.request(
762
+ "GET",
763
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/files2"),
764
+ params=remove_none_from_dict(
765
+ {
766
+ "data_source_id": data_source_id,
767
+ "only_manually_uploaded": only_manually_uploaded,
768
+ "limit": limit,
769
+ "offset": offset,
770
+ }
771
+ ),
772
+ headers=self._client_wrapper.get_headers(),
773
+ timeout=60,
774
+ )
775
+ if 200 <= _response.status_code < 300:
776
+ return pydantic.parse_obj_as(PaginatedListPipelineFilesResponse, _response.json()) # type: ignore
777
+ if _response.status_code == 422:
778
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
779
+ try:
780
+ _response_json = _response.json()
781
+ except JSONDecodeError:
782
+ raise ApiError(status_code=_response.status_code, body=_response.text)
783
+ raise ApiError(status_code=_response.status_code, body=_response_json)
784
+
785
+ def get_pipeline_file_status(self, file_id: str, pipeline_id: str) -> ManagedIngestionStatusResponse:
786
+ """
787
+ Get status of a file for a pipeline.
788
+
789
+ Parameters:
652
790
  - file_id: str.
791
+
792
+ - pipeline_id: str.
653
793
  ---
654
794
  from llama_cloud.client import LlamaCloud
655
795
 
@@ -657,8 +797,8 @@ class PipelinesClient:
657
797
  token="YOUR_TOKEN",
658
798
  )
659
799
  client.pipelines.get_pipeline_file_status(
660
- pipeline_id="string",
661
- file_id="string",
800
+ file_id="file_id",
801
+ pipeline_id="pipeline_id",
662
802
  )
663
803
  """
664
804
  _response = self._client_wrapper.httpx_client.request(
@@ -681,8 +821,8 @@ class PipelinesClient:
681
821
 
682
822
  def update_pipeline_file(
683
823
  self,
684
- pipeline_id: str,
685
824
  file_id: str,
825
+ pipeline_id: str,
686
826
  *,
687
827
  custom_metadata: typing.Optional[
688
828
  typing.Dict[str, typing.Optional[PipelineFileUpdateCustomMetadataValue]]
@@ -692,11 +832,11 @@ class PipelinesClient:
692
832
  Update a file for a pipeline.
693
833
 
694
834
  Parameters:
695
- - pipeline_id: str.
696
-
697
835
  - file_id: str.
698
836
 
699
- - custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[PipelineFileUpdateCustomMetadataValue]]].
837
+ - pipeline_id: str.
838
+
839
+ - custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[PipelineFileUpdateCustomMetadataValue]]]. Custom metadata for the file
700
840
  ---
701
841
  from llama_cloud.client import LlamaCloud
702
842
 
@@ -704,8 +844,8 @@ class PipelinesClient:
704
844
  token="YOUR_TOKEN",
705
845
  )
706
846
  client.pipelines.update_pipeline_file(
707
- pipeline_id="string",
708
- file_id="string",
847
+ file_id="file_id",
848
+ pipeline_id="pipeline_id",
709
849
  )
710
850
  """
711
851
  _request: typing.Dict[str, typing.Any] = {}
@@ -730,14 +870,14 @@ class PipelinesClient:
730
870
  raise ApiError(status_code=_response.status_code, body=_response.text)
731
871
  raise ApiError(status_code=_response.status_code, body=_response_json)
732
872
 
733
- def delete_pipeline_file(self, pipeline_id: str, file_id: str) -> None:
873
+ def delete_pipeline_file(self, file_id: str, pipeline_id: str) -> None:
734
874
  """
735
875
  Delete a file from a pipeline.
736
876
 
737
877
  Parameters:
738
- - pipeline_id: str.
739
-
740
878
  - file_id: str.
879
+
880
+ - pipeline_id: str.
741
881
  ---
742
882
  from llama_cloud.client import LlamaCloud
743
883
 
@@ -745,8 +885,8 @@ class PipelinesClient:
745
885
  token="YOUR_TOKEN",
746
886
  )
747
887
  client.pipelines.delete_pipeline_file(
748
- pipeline_id="string",
749
- file_id="string",
888
+ file_id="file_id",
889
+ pipeline_id="pipeline_id",
750
890
  )
751
891
  """
752
892
  _response = self._client_wrapper.httpx_client.request(
@@ -775,6 +915,15 @@ class PipelinesClient:
775
915
  - pipeline_id: str.
776
916
 
777
917
  - upload_file: typing.IO.
918
+ ---
919
+ from llama_cloud.client import LlamaCloud
920
+
921
+ client = LlamaCloud(
922
+ token="YOUR_TOKEN",
923
+ )
924
+ client.pipelines.import_pipeline_metadata(
925
+ pipeline_id="pipeline_id",
926
+ )
778
927
  """
779
928
  _response = self._client_wrapper.httpx_client.request(
780
929
  "PUT",
@@ -807,7 +956,7 @@ class PipelinesClient:
807
956
  token="YOUR_TOKEN",
808
957
  )
809
958
  client.pipelines.delete_pipeline_files_metadata(
810
- pipeline_id="string",
959
+ pipeline_id="pipeline_id",
811
960
  )
812
961
  """
813
962
  _response = self._client_wrapper.httpx_client.request(
@@ -839,7 +988,7 @@ class PipelinesClient:
839
988
  token="YOUR_TOKEN",
840
989
  )
841
990
  client.pipelines.list_pipeline_data_sources(
842
- pipeline_id="string",
991
+ pipeline_id="pipeline_id",
843
992
  )
844
993
  """
845
994
  _response = self._client_wrapper.httpx_client.request(
@@ -871,14 +1020,19 @@ class PipelinesClient:
871
1020
 
872
1021
  - request: typing.List[PipelineDataSourceCreate].
873
1022
  ---
1023
+ from llama_cloud import PipelineDataSourceCreate
874
1024
  from llama_cloud.client import LlamaCloud
875
1025
 
876
1026
  client = LlamaCloud(
877
1027
  token="YOUR_TOKEN",
878
1028
  )
879
1029
  client.pipelines.add_data_sources_to_pipeline(
880
- pipeline_id="string",
881
- request=[],
1030
+ pipeline_id="pipeline_id",
1031
+ request=[
1032
+ PipelineDataSourceCreate(
1033
+ data_source_id="data_source_id",
1034
+ )
1035
+ ],
882
1036
  )
883
1037
  """
884
1038
  _response = self._client_wrapper.httpx_client.request(
@@ -901,17 +1055,17 @@ class PipelinesClient:
901
1055
  raise ApiError(status_code=_response.status_code, body=_response_json)
902
1056
 
903
1057
  def update_pipeline_data_source(
904
- self, pipeline_id: str, data_source_id: str, *, sync_interval: typing.Optional[float] = OMIT
1058
+ self, data_source_id: str, pipeline_id: str, *, sync_interval: typing.Optional[float] = OMIT
905
1059
  ) -> PipelineDataSource:
906
1060
  """
907
1061
  Update the configuration of a data source in a pipeline.
908
1062
 
909
1063
  Parameters:
910
- - pipeline_id: str.
911
-
912
1064
  - data_source_id: str.
913
1065
 
914
- - sync_interval: typing.Optional[float].
1066
+ - pipeline_id: str.
1067
+
1068
+ - sync_interval: typing.Optional[float]. The interval at which the data source should be synced.
915
1069
  ---
916
1070
  from llama_cloud.client import LlamaCloud
917
1071
 
@@ -919,8 +1073,8 @@ class PipelinesClient:
919
1073
  token="YOUR_TOKEN",
920
1074
  )
921
1075
  client.pipelines.update_pipeline_data_source(
922
- pipeline_id="string",
923
- data_source_id="string",
1076
+ data_source_id="data_source_id",
1077
+ pipeline_id="pipeline_id",
924
1078
  )
925
1079
  """
926
1080
  _request: typing.Dict[str, typing.Any] = {}
@@ -946,14 +1100,14 @@ class PipelinesClient:
946
1100
  raise ApiError(status_code=_response.status_code, body=_response.text)
947
1101
  raise ApiError(status_code=_response.status_code, body=_response_json)
948
1102
 
949
- def delete_pipeline_data_source(self, pipeline_id: str, data_source_id: str) -> None:
1103
+ def delete_pipeline_data_source(self, data_source_id: str, pipeline_id: str) -> None:
950
1104
  """
951
1105
  Delete a data source from a pipeline.
952
1106
 
953
1107
  Parameters:
954
- - pipeline_id: str.
955
-
956
1108
  - data_source_id: str.
1109
+
1110
+ - pipeline_id: str.
957
1111
  ---
958
1112
  from llama_cloud.client import LlamaCloud
959
1113
 
@@ -961,8 +1115,8 @@ class PipelinesClient:
961
1115
  token="YOUR_TOKEN",
962
1116
  )
963
1117
  client.pipelines.delete_pipeline_data_source(
964
- pipeline_id="string",
965
- data_source_id="string",
1118
+ data_source_id="data_source_id",
1119
+ pipeline_id="pipeline_id",
966
1120
  )
967
1121
  """
968
1122
  _response = self._client_wrapper.httpx_client.request(
@@ -984,14 +1138,24 @@ class PipelinesClient:
984
1138
  raise ApiError(status_code=_response.status_code, body=_response.text)
985
1139
  raise ApiError(status_code=_response.status_code, body=_response_json)
986
1140
 
987
- def sync_pipeline_data_source(self, pipeline_id: str, data_source_id: str) -> Pipeline:
1141
+ def sync_pipeline_data_source(self, data_source_id: str, pipeline_id: str) -> Pipeline:
988
1142
  """
989
1143
  Run ingestion for the pipeline data source by incrementally updating the data-sink with upstream changes from data-source.
990
1144
 
991
1145
  Parameters:
1146
+ - data_source_id: str.
1147
+
992
1148
  - pipeline_id: str.
1149
+ ---
1150
+ from llama_cloud.client import LlamaCloud
993
1151
 
994
- - data_source_id: str.
1152
+ client = LlamaCloud(
1153
+ token="YOUR_TOKEN",
1154
+ )
1155
+ client.pipelines.sync_pipeline_data_source(
1156
+ data_source_id="data_source_id",
1157
+ pipeline_id="pipeline_id",
1158
+ )
995
1159
  """
996
1160
  _response = self._client_wrapper.httpx_client.request(
997
1161
  "POST",
@@ -1012,14 +1176,14 @@ class PipelinesClient:
1012
1176
  raise ApiError(status_code=_response.status_code, body=_response.text)
1013
1177
  raise ApiError(status_code=_response.status_code, body=_response_json)
1014
1178
 
1015
- def get_pipeline_data_source_status(self, pipeline_id: str, data_source_id: str) -> ManagedIngestionStatusResponse:
1179
+ def get_pipeline_data_source_status(self, data_source_id: str, pipeline_id: str) -> ManagedIngestionStatusResponse:
1016
1180
  """
1017
1181
  Get the status of a data source for a pipeline.
1018
1182
 
1019
1183
  Parameters:
1020
- - pipeline_id: str.
1021
-
1022
1184
  - data_source_id: str.
1185
+
1186
+ - pipeline_id: str.
1023
1187
  ---
1024
1188
  from llama_cloud.client import LlamaCloud
1025
1189
 
@@ -1027,8 +1191,8 @@ class PipelinesClient:
1027
1191
  token="YOUR_TOKEN",
1028
1192
  )
1029
1193
  client.pipelines.get_pipeline_data_source_status(
1030
- pipeline_id="string",
1031
- data_source_id="string",
1194
+ data_source_id="data_source_id",
1195
+ pipeline_id="pipeline_id",
1032
1196
  )
1033
1197
  """
1034
1198
  _response = self._client_wrapper.httpx_client.request(
@@ -1055,6 +1219,7 @@ class PipelinesClient:
1055
1219
  pipeline_id: str,
1056
1220
  *,
1057
1221
  dense_similarity_top_k: typing.Optional[int] = OMIT,
1222
+ dense_similarity_cutoff: typing.Optional[float] = OMIT,
1058
1223
  sparse_similarity_top_k: typing.Optional[int] = OMIT,
1059
1224
  enable_reranking: typing.Optional[bool] = OMIT,
1060
1225
  rerank_top_n: typing.Optional[int] = OMIT,
@@ -1072,19 +1237,21 @@ class PipelinesClient:
1072
1237
  Parameters:
1073
1238
  - pipeline_id: str.
1074
1239
 
1075
- - dense_similarity_top_k: typing.Optional[int].
1240
+ - dense_similarity_top_k: typing.Optional[int]. Number of nodes for dense retrieval.
1241
+
1242
+ - dense_similarity_cutoff: typing.Optional[float]. Minimum similarity score wrt query for retrieval
1076
1243
 
1077
- - sparse_similarity_top_k: typing.Optional[int].
1244
+ - sparse_similarity_top_k: typing.Optional[int]. Number of nodes for sparse retrieval.
1078
1245
 
1079
- - enable_reranking: typing.Optional[bool].
1246
+ - enable_reranking: typing.Optional[bool]. Enable reranking for retrieval
1080
1247
 
1081
- - rerank_top_n: typing.Optional[int].
1248
+ - rerank_top_n: typing.Optional[int]. Number of reranked nodes for returning.
1082
1249
 
1083
- - alpha: typing.Optional[float].
1250
+ - alpha: typing.Optional[float]. Alpha value for hybrid retrieval to determine the weights between dense and sparse retrieval. 0 is sparse retrieval and 1 is dense retrieval.
1084
1251
 
1085
- - search_filters: typing.Optional[MetadataFilters].
1252
+ - search_filters: typing.Optional[MetadataFilters]. Search filters for retrieval.
1086
1253
 
1087
- - files_top_k: typing.Optional[int].
1254
+ - files_top_k: typing.Optional[int]. Number of files to retrieve (only for retrieval mode files_via_metadata and files_via_content).
1088
1255
 
1089
1256
  - retrieval_mode: typing.Optional[RetrievalMode]. The retrieval mode for the query.
1090
1257
 
@@ -1094,25 +1261,21 @@ class PipelinesClient:
1094
1261
 
1095
1262
  - class_name: typing.Optional[str].
1096
1263
  ---
1097
- from llama_cloud import FilterCondition, MetadataFilters, RetrievalMode
1098
1264
  from llama_cloud.client import LlamaCloud
1099
1265
 
1100
1266
  client = LlamaCloud(
1101
1267
  token="YOUR_TOKEN",
1102
1268
  )
1103
1269
  client.pipelines.run_search(
1104
- pipeline_id="string",
1105
- search_filters=MetadataFilters(
1106
- filters=[],
1107
- condition=FilterCondition.AND,
1108
- ),
1109
- retrieval_mode=RetrievalMode.CHUNKS,
1110
- query="string",
1270
+ pipeline_id="pipeline_id",
1271
+ query="query",
1111
1272
  )
1112
1273
  """
1113
1274
  _request: typing.Dict[str, typing.Any] = {"query": query}
1114
1275
  if dense_similarity_top_k is not OMIT:
1115
1276
  _request["dense_similarity_top_k"] = dense_similarity_top_k
1277
+ if dense_similarity_cutoff is not OMIT:
1278
+ _request["dense_similarity_cutoff"] = dense_similarity_cutoff
1116
1279
  if sparse_similarity_top_k is not OMIT:
1117
1280
  _request["sparse_similarity_top_k"] = sparse_similarity_top_k
1118
1281
  if enable_reranking is not OMIT:
@@ -1161,7 +1324,7 @@ class PipelinesClient:
1161
1324
  token="YOUR_TOKEN",
1162
1325
  )
1163
1326
  client.pipelines.list_pipeline_jobs(
1164
- pipeline_id="string",
1327
+ pipeline_id="pipeline_id",
1165
1328
  )
1166
1329
  """
1167
1330
  _response = self._client_wrapper.httpx_client.request(
@@ -1180,14 +1343,14 @@ class PipelinesClient:
1180
1343
  raise ApiError(status_code=_response.status_code, body=_response.text)
1181
1344
  raise ApiError(status_code=_response.status_code, body=_response_json)
1182
1345
 
1183
- def get_pipeline_job(self, pipeline_id: str, job_id: str) -> PipelineDeployment:
1346
+ def get_pipeline_job(self, job_id: str, pipeline_id: str) -> PipelineDeployment:
1184
1347
  """
1185
1348
  Get a job for a pipeline.
1186
1349
 
1187
1350
  Parameters:
1188
- - pipeline_id: str.
1189
-
1190
1351
  - job_id: str.
1352
+
1353
+ - pipeline_id: str.
1191
1354
  ---
1192
1355
  from llama_cloud.client import LlamaCloud
1193
1356
 
@@ -1195,8 +1358,8 @@ class PipelinesClient:
1195
1358
  token="YOUR_TOKEN",
1196
1359
  )
1197
1360
  client.pipelines.get_pipeline_job(
1198
- pipeline_id="string",
1199
- job_id="string",
1361
+ job_id="job_id",
1362
+ pipeline_id="pipeline_id",
1200
1363
  )
1201
1364
  """
1202
1365
  _response = self._client_wrapper.httpx_client.request(
@@ -1230,7 +1393,7 @@ class PipelinesClient:
1230
1393
  token="YOUR_TOKEN",
1231
1394
  )
1232
1395
  client.pipelines.get_playground_session(
1233
- pipeline_id="string",
1396
+ pipeline_id="pipeline_id",
1234
1397
  )
1235
1398
  """
1236
1399
  _response = self._client_wrapper.httpx_client.request(
@@ -1271,34 +1434,13 @@ class PipelinesClient:
1271
1434
 
1272
1435
  - class_name: typing.Optional[str].
1273
1436
  ---
1274
- from llama_cloud import (
1275
- ChatData,
1276
- FilterCondition,
1277
- LlmParameters,
1278
- MetadataFilters,
1279
- PresetRetrievalParams,
1280
- RetrievalMode,
1281
- SupportedLlmModelNames,
1282
- )
1283
1437
  from llama_cloud.client import LlamaCloud
1284
1438
 
1285
1439
  client = LlamaCloud(
1286
1440
  token="YOUR_TOKEN",
1287
1441
  )
1288
1442
  client.pipelines.chat(
1289
- pipeline_id="string",
1290
- data=ChatData(
1291
- retrieval_parameters=PresetRetrievalParams(
1292
- search_filters=MetadataFilters(
1293
- filters=[],
1294
- condition=FilterCondition.AND,
1295
- ),
1296
- retrieval_mode=RetrievalMode.CHUNKS,
1297
- ),
1298
- llm_parameters=LlmParameters(
1299
- model_name=SupportedLlmModelNames.GPT_3_5_TURBO,
1300
- ),
1301
- ),
1443
+ pipeline_id="pipeline_id",
1302
1444
  )
1303
1445
  """
1304
1446
  _request: typing.Dict[str, typing.Any] = {}
@@ -1354,7 +1496,7 @@ class PipelinesClient:
1354
1496
  token="YOUR_TOKEN",
1355
1497
  )
1356
1498
  client.pipelines.list_pipeline_documents(
1357
- pipeline_id="string",
1499
+ pipeline_id="pipeline_id",
1358
1500
  )
1359
1501
  """
1360
1502
  _response = self._client_wrapper.httpx_client.request(
@@ -1389,14 +1531,20 @@ class PipelinesClient:
1389
1531
 
1390
1532
  - request: typing.List[CloudDocumentCreate].
1391
1533
  ---
1534
+ from llama_cloud import CloudDocumentCreate
1392
1535
  from llama_cloud.client import LlamaCloud
1393
1536
 
1394
1537
  client = LlamaCloud(
1395
1538
  token="YOUR_TOKEN",
1396
1539
  )
1397
1540
  client.pipelines.create_batch_pipeline_documents(
1398
- pipeline_id="string",
1399
- request=[],
1541
+ pipeline_id="pipeline_id",
1542
+ request=[
1543
+ CloudDocumentCreate(
1544
+ text="text",
1545
+ metadata={"key": "value"},
1546
+ )
1547
+ ],
1400
1548
  )
1401
1549
  """
1402
1550
  _response = self._client_wrapper.httpx_client.request(
@@ -1429,14 +1577,20 @@ class PipelinesClient:
1429
1577
 
1430
1578
  - request: typing.List[CloudDocumentCreate].
1431
1579
  ---
1580
+ from llama_cloud import CloudDocumentCreate
1432
1581
  from llama_cloud.client import LlamaCloud
1433
1582
 
1434
1583
  client = LlamaCloud(
1435
1584
  token="YOUR_TOKEN",
1436
1585
  )
1437
1586
  client.pipelines.upsert_batch_pipeline_documents(
1438
- pipeline_id="string",
1439
- request=[],
1587
+ pipeline_id="pipeline_id",
1588
+ request=[
1589
+ CloudDocumentCreate(
1590
+ text="text",
1591
+ metadata={"key": "value"},
1592
+ )
1593
+ ],
1440
1594
  )
1441
1595
  """
1442
1596
  _response = self._client_wrapper.httpx_client.request(
@@ -1458,14 +1612,14 @@ class PipelinesClient:
1458
1612
  raise ApiError(status_code=_response.status_code, body=_response.text)
1459
1613
  raise ApiError(status_code=_response.status_code, body=_response_json)
1460
1614
 
1461
- def get_pipeline_document(self, pipeline_id: str, document_id: str) -> CloudDocument:
1615
+ def get_pipeline_document(self, document_id: str, pipeline_id: str) -> CloudDocument:
1462
1616
  """
1463
1617
  Return a single document for a pipeline.
1464
1618
 
1465
1619
  Parameters:
1466
- - pipeline_id: str.
1467
-
1468
1620
  - document_id: str.
1621
+
1622
+ - pipeline_id: str.
1469
1623
  ---
1470
1624
  from llama_cloud.client import LlamaCloud
1471
1625
 
@@ -1473,8 +1627,8 @@ class PipelinesClient:
1473
1627
  token="YOUR_TOKEN",
1474
1628
  )
1475
1629
  client.pipelines.get_pipeline_document(
1476
- pipeline_id="string",
1477
- document_id="string",
1630
+ document_id="document_id",
1631
+ pipeline_id="pipeline_id",
1478
1632
  )
1479
1633
  """
1480
1634
  _response = self._client_wrapper.httpx_client.request(
@@ -1495,14 +1649,14 @@ class PipelinesClient:
1495
1649
  raise ApiError(status_code=_response.status_code, body=_response.text)
1496
1650
  raise ApiError(status_code=_response.status_code, body=_response_json)
1497
1651
 
1498
- def delete_pipeline_document(self, pipeline_id: str, document_id: str) -> None:
1652
+ def delete_pipeline_document(self, document_id: str, pipeline_id: str) -> None:
1499
1653
  """
1500
1654
  Delete a document for a pipeline.
1501
1655
 
1502
1656
  Parameters:
1503
- - pipeline_id: str.
1504
-
1505
1657
  - document_id: str.
1658
+
1659
+ - pipeline_id: str.
1506
1660
  ---
1507
1661
  from llama_cloud.client import LlamaCloud
1508
1662
 
@@ -1510,8 +1664,8 @@ class PipelinesClient:
1510
1664
  token="YOUR_TOKEN",
1511
1665
  )
1512
1666
  client.pipelines.delete_pipeline_document(
1513
- pipeline_id="string",
1514
- document_id="string",
1667
+ document_id="document_id",
1668
+ pipeline_id="pipeline_id",
1515
1669
  )
1516
1670
  """
1517
1671
  _response = self._client_wrapper.httpx_client.request(
@@ -1532,14 +1686,14 @@ class PipelinesClient:
1532
1686
  raise ApiError(status_code=_response.status_code, body=_response.text)
1533
1687
  raise ApiError(status_code=_response.status_code, body=_response_json)
1534
1688
 
1535
- def get_pipeline_document_status(self, pipeline_id: str, document_id: str) -> ManagedIngestionStatusResponse:
1689
+ def get_pipeline_document_status(self, document_id: str, pipeline_id: str) -> ManagedIngestionStatusResponse:
1536
1690
  """
1537
1691
  Return a single document for a pipeline.
1538
1692
 
1539
1693
  Parameters:
1540
- - pipeline_id: str.
1541
-
1542
1694
  - document_id: str.
1695
+
1696
+ - pipeline_id: str.
1543
1697
  ---
1544
1698
  from llama_cloud.client import LlamaCloud
1545
1699
 
@@ -1547,8 +1701,8 @@ class PipelinesClient:
1547
1701
  token="YOUR_TOKEN",
1548
1702
  )
1549
1703
  client.pipelines.get_pipeline_document_status(
1550
- pipeline_id="string",
1551
- document_id="string",
1704
+ document_id="document_id",
1705
+ pipeline_id="pipeline_id",
1552
1706
  )
1553
1707
  """
1554
1708
  _response = self._client_wrapper.httpx_client.request(
@@ -1570,14 +1724,14 @@ class PipelinesClient:
1570
1724
  raise ApiError(status_code=_response.status_code, body=_response.text)
1571
1725
  raise ApiError(status_code=_response.status_code, body=_response_json)
1572
1726
 
1573
- def list_pipeline_document_chunks(self, pipeline_id: str, document_id: str) -> typing.List[TextNode]:
1727
+ def list_pipeline_document_chunks(self, document_id: str, pipeline_id: str) -> typing.List[TextNode]:
1574
1728
  """
1575
1729
  Return a list of chunks for a pipeline document.
1576
1730
 
1577
1731
  Parameters:
1578
- - pipeline_id: str.
1579
-
1580
1732
  - document_id: str.
1733
+
1734
+ - pipeline_id: str.
1581
1735
  ---
1582
1736
  from llama_cloud.client import LlamaCloud
1583
1737
 
@@ -1585,8 +1739,8 @@ class PipelinesClient:
1585
1739
  token="YOUR_TOKEN",
1586
1740
  )
1587
1741
  client.pipelines.list_pipeline_document_chunks(
1588
- pipeline_id="string",
1589
- document_id="string",
1742
+ document_id="document_id",
1743
+ pipeline_id="pipeline_id",
1590
1744
  )
1591
1745
  """
1592
1746
  _response = self._client_wrapper.httpx_client.request(
@@ -1616,42 +1770,43 @@ class AsyncPipelinesClient:
1616
1770
  async def search_pipelines(
1617
1771
  self,
1618
1772
  *,
1773
+ project_id: typing.Optional[str] = None,
1619
1774
  project_name: typing.Optional[str] = None,
1620
1775
  pipeline_name: typing.Optional[str] = None,
1621
1776
  pipeline_type: typing.Optional[PipelineType] = None,
1622
- project_id: typing.Optional[str] = None,
1777
+ organization_id: typing.Optional[str] = None,
1623
1778
  ) -> typing.List[Pipeline]:
1624
1779
  """
1625
1780
  Search for pipelines by various parameters.
1626
1781
 
1627
1782
  Parameters:
1783
+ - project_id: typing.Optional[str].
1784
+
1628
1785
  - project_name: typing.Optional[str].
1629
1786
 
1630
1787
  - pipeline_name: typing.Optional[str].
1631
1788
 
1632
1789
  - pipeline_type: typing.Optional[PipelineType].
1633
1790
 
1634
- - project_id: typing.Optional[str].
1791
+ - organization_id: typing.Optional[str].
1635
1792
  ---
1636
- from llama_cloud import PipelineType
1637
1793
  from llama_cloud.client import AsyncLlamaCloud
1638
1794
 
1639
1795
  client = AsyncLlamaCloud(
1640
1796
  token="YOUR_TOKEN",
1641
1797
  )
1642
- await client.pipelines.search_pipelines(
1643
- pipeline_type=PipelineType.PLAYGROUND,
1644
- )
1798
+ await client.pipelines.search_pipelines()
1645
1799
  """
1646
1800
  _response = await self._client_wrapper.httpx_client.request(
1647
1801
  "GET",
1648
1802
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/pipelines"),
1649
1803
  params=remove_none_from_dict(
1650
1804
  {
1805
+ "project_id": project_id,
1651
1806
  "project_name": project_name,
1652
1807
  "pipeline_name": pipeline_name,
1653
1808
  "pipeline_type": pipeline_type,
1654
- "project_id": project_id,
1809
+ "organization_id": organization_id,
1655
1810
  }
1656
1811
  ),
1657
1812
  headers=self._client_wrapper.get_headers(),
@@ -1667,19 +1822,39 @@ class AsyncPipelinesClient:
1667
1822
  raise ApiError(status_code=_response.status_code, body=_response.text)
1668
1823
  raise ApiError(status_code=_response.status_code, body=_response_json)
1669
1824
 
1670
- async def create_pipeline(self, *, project_id: typing.Optional[str] = None, request: PipelineCreate) -> Pipeline:
1825
+ async def create_pipeline(
1826
+ self,
1827
+ *,
1828
+ project_id: typing.Optional[str] = None,
1829
+ organization_id: typing.Optional[str] = None,
1830
+ request: PipelineCreate,
1831
+ ) -> Pipeline:
1671
1832
  """
1672
1833
  Create a new pipeline for a project.
1673
1834
 
1674
1835
  Parameters:
1675
1836
  - project_id: typing.Optional[str].
1676
1837
 
1838
+ - organization_id: typing.Optional[str].
1839
+
1677
1840
  - request: PipelineCreate.
1841
+ ---
1842
+ from llama_cloud import PipelineCreate
1843
+ from llama_cloud.client import AsyncLlamaCloud
1844
+
1845
+ client = AsyncLlamaCloud(
1846
+ token="YOUR_TOKEN",
1847
+ )
1848
+ await client.pipelines.create_pipeline(
1849
+ request=PipelineCreate(
1850
+ name="name",
1851
+ ),
1852
+ )
1678
1853
  """
1679
1854
  _response = await self._client_wrapper.httpx_client.request(
1680
1855
  "POST",
1681
1856
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/pipelines"),
1682
- params=remove_none_from_dict({"project_id": project_id}),
1857
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
1683
1858
  json=jsonable_encoder(request),
1684
1859
  headers=self._client_wrapper.get_headers(),
1685
1860
  timeout=60,
@@ -1694,7 +1869,13 @@ class AsyncPipelinesClient:
1694
1869
  raise ApiError(status_code=_response.status_code, body=_response.text)
1695
1870
  raise ApiError(status_code=_response.status_code, body=_response_json)
1696
1871
 
1697
- async def upsert_pipeline(self, *, project_id: typing.Optional[str] = None, request: PipelineCreate) -> Pipeline:
1872
+ async def upsert_pipeline(
1873
+ self,
1874
+ *,
1875
+ project_id: typing.Optional[str] = None,
1876
+ organization_id: typing.Optional[str] = None,
1877
+ request: PipelineCreate,
1878
+ ) -> Pipeline:
1698
1879
  """
1699
1880
  Upsert a pipeline for a project.
1700
1881
  Updates if a pipeline with the same name and project_id already exists. Otherwise, creates a new pipeline.
@@ -1702,12 +1883,26 @@ class AsyncPipelinesClient:
1702
1883
  Parameters:
1703
1884
  - project_id: typing.Optional[str].
1704
1885
 
1886
+ - organization_id: typing.Optional[str].
1887
+
1705
1888
  - request: PipelineCreate.
1889
+ ---
1890
+ from llama_cloud import PipelineCreate
1891
+ from llama_cloud.client import AsyncLlamaCloud
1892
+
1893
+ client = AsyncLlamaCloud(
1894
+ token="YOUR_TOKEN",
1895
+ )
1896
+ await client.pipelines.upsert_pipeline(
1897
+ request=PipelineCreate(
1898
+ name="name",
1899
+ ),
1900
+ )
1706
1901
  """
1707
1902
  _response = await self._client_wrapper.httpx_client.request(
1708
1903
  "PUT",
1709
1904
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/pipelines"),
1710
- params=remove_none_from_dict({"project_id": project_id}),
1905
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
1711
1906
  json=jsonable_encoder(request),
1712
1907
  headers=self._client_wrapper.get_headers(),
1713
1908
  timeout=60,
@@ -1728,6 +1923,15 @@ class AsyncPipelinesClient:
1728
1923
 
1729
1924
  Parameters:
1730
1925
  - pipeline_id: str.
1926
+ ---
1927
+ from llama_cloud.client import AsyncLlamaCloud
1928
+
1929
+ client = AsyncLlamaCloud(
1930
+ token="YOUR_TOKEN",
1931
+ )
1932
+ await client.pipelines.get_pipeline(
1933
+ pipeline_id="pipeline_id",
1934
+ )
1731
1935
  """
1732
1936
  _response = await self._client_wrapper.httpx_client.request(
1733
1937
  "GET",
@@ -1753,6 +1957,7 @@ class AsyncPipelinesClient:
1753
1957
  transform_config: typing.Optional[PipelineUpdateTransformConfig] = OMIT,
1754
1958
  configured_transformations: typing.Optional[typing.List[ConfiguredTransformationItem]] = OMIT,
1755
1959
  data_sink_id: typing.Optional[str] = OMIT,
1960
+ embedding_model_config_id: typing.Optional[str] = OMIT,
1756
1961
  data_sink: typing.Optional[DataSinkCreate] = OMIT,
1757
1962
  preset_retrieval_parameters: typing.Optional[PresetRetrievalParams] = OMIT,
1758
1963
  eval_parameters: typing.Optional[EvalExecutionParams] = OMIT,
@@ -1770,21 +1975,32 @@ class AsyncPipelinesClient:
1770
1975
 
1771
1976
  - transform_config: typing.Optional[PipelineUpdateTransformConfig]. Configuration for the transformation.
1772
1977
 
1773
- - configured_transformations: typing.Optional[typing.List[ConfiguredTransformationItem]].
1978
+ - configured_transformations: typing.Optional[typing.List[ConfiguredTransformationItem]]. Deprecated, use embedding_config or transform_config instead. configured transformations for the pipeline.
1979
+
1980
+ - data_sink_id: typing.Optional[str]. Data sink ID. When provided instead of data_sink, the data sink will be looked up by ID.
1774
1981
 
1775
- - data_sink_id: typing.Optional[str].
1982
+ - embedding_model_config_id: typing.Optional[str]. Embedding model config ID. When provided instead of embedding_config, the embedding model config will be looked up by ID.
1776
1983
 
1777
- - data_sink: typing.Optional[DataSinkCreate].
1984
+ - data_sink: typing.Optional[DataSinkCreate]. Data sink. When provided instead of data_sink_id, the data sink will be created.
1778
1985
 
1779
- - preset_retrieval_parameters: typing.Optional[PresetRetrievalParams].
1986
+ - preset_retrieval_parameters: typing.Optional[PresetRetrievalParams]. Preset retrieval parameters for the pipeline.
1780
1987
 
1781
- - eval_parameters: typing.Optional[EvalExecutionParams].
1988
+ - eval_parameters: typing.Optional[EvalExecutionParams]. Eval parameters for the pipeline.
1782
1989
 
1783
- - llama_parse_parameters: typing.Optional[LlamaParseParameters].
1990
+ - llama_parse_parameters: typing.Optional[LlamaParseParameters]. Settings that can be configured for how to use LlamaParse to parse files within a LlamaCloud pipeline.
1784
1991
 
1785
1992
  - name: typing.Optional[str].
1786
1993
 
1787
- - managed_pipeline_id: typing.Optional[str].
1994
+ - managed_pipeline_id: typing.Optional[str]. The ID of the ManagedPipeline this playground pipeline is linked to.
1995
+ ---
1996
+ from llama_cloud.client import AsyncLlamaCloud
1997
+
1998
+ client = AsyncLlamaCloud(
1999
+ token="YOUR_TOKEN",
2000
+ )
2001
+ await client.pipelines.update_existing_pipeline(
2002
+ pipeline_id="pipeline_id",
2003
+ )
1788
2004
  """
1789
2005
  _request: typing.Dict[str, typing.Any] = {}
1790
2006
  if embedding_config is not OMIT:
@@ -1795,6 +2011,8 @@ class AsyncPipelinesClient:
1795
2011
  _request["configured_transformations"] = configured_transformations
1796
2012
  if data_sink_id is not OMIT:
1797
2013
  _request["data_sink_id"] = data_sink_id
2014
+ if embedding_model_config_id is not OMIT:
2015
+ _request["embedding_model_config_id"] = embedding_model_config_id
1798
2016
  if data_sink is not OMIT:
1799
2017
  _request["data_sink"] = data_sink
1800
2018
  if preset_retrieval_parameters is not OMIT:
@@ -1837,7 +2055,7 @@ class AsyncPipelinesClient:
1837
2055
  token="YOUR_TOKEN",
1838
2056
  )
1839
2057
  await client.pipelines.delete_pipeline(
1840
- pipeline_id="string",
2058
+ pipeline_id="pipeline_id",
1841
2059
  )
1842
2060
  """
1843
2061
  _response = await self._client_wrapper.httpx_client.request(
@@ -1869,7 +2087,7 @@ class AsyncPipelinesClient:
1869
2087
  token="YOUR_TOKEN",
1870
2088
  )
1871
2089
  await client.pipelines.get_pipeline_status(
1872
- pipeline_id="string",
2090
+ pipeline_id="pipeline_id",
1873
2091
  )
1874
2092
  """
1875
2093
  _response = await self._client_wrapper.httpx_client.request(
@@ -1894,6 +2112,15 @@ class AsyncPipelinesClient:
1894
2112
 
1895
2113
  Parameters:
1896
2114
  - pipeline_id: str.
2115
+ ---
2116
+ from llama_cloud.client import AsyncLlamaCloud
2117
+
2118
+ client = AsyncLlamaCloud(
2119
+ token="YOUR_TOKEN",
2120
+ )
2121
+ await client.pipelines.sync_pipeline(
2122
+ pipeline_id="pipeline_id",
2123
+ )
1897
2124
  """
1898
2125
  _response = await self._client_wrapper.httpx_client.request(
1899
2126
  "POST",
@@ -1917,6 +2144,15 @@ class AsyncPipelinesClient:
1917
2144
 
1918
2145
  Parameters:
1919
2146
  - pipeline_id: str.
2147
+ ---
2148
+ from llama_cloud.client import AsyncLlamaCloud
2149
+
2150
+ client = AsyncLlamaCloud(
2151
+ token="YOUR_TOKEN",
2152
+ )
2153
+ await client.pipelines.copy_pipeline(
2154
+ pipeline_id="pipeline_id",
2155
+ )
1920
2156
  """
1921
2157
  _response = await self._client_wrapper.httpx_client.request(
1922
2158
  "POST",
@@ -1935,15 +2171,15 @@ class AsyncPipelinesClient:
1935
2171
  raise ApiError(status_code=_response.status_code, body=_response_json)
1936
2172
 
1937
2173
  async def get_eval_dataset_executions(
1938
- self, pipeline_id: str, eval_dataset_id: str
2174
+ self, eval_dataset_id: str, pipeline_id: str
1939
2175
  ) -> typing.List[EvalDatasetJobRecord]:
1940
2176
  """
1941
2177
  Get the status of an EvalDatasetExecution.
1942
2178
 
1943
2179
  Parameters:
1944
- - pipeline_id: str.
1945
-
1946
2180
  - eval_dataset_id: str.
2181
+
2182
+ - pipeline_id: str.
1947
2183
  ---
1948
2184
  from llama_cloud.client import AsyncLlamaCloud
1949
2185
 
@@ -1951,8 +2187,8 @@ class AsyncPipelinesClient:
1951
2187
  token="YOUR_TOKEN",
1952
2188
  )
1953
2189
  await client.pipelines.get_eval_dataset_executions(
1954
- pipeline_id="string",
1955
- eval_dataset_id="string",
2190
+ eval_dataset_id="eval_dataset_id",
2191
+ pipeline_id="pipeline_id",
1956
2192
  )
1957
2193
  """
1958
2194
  _response = await self._client_wrapper.httpx_client.request(
@@ -1976,8 +2212,8 @@ class AsyncPipelinesClient:
1976
2212
 
1977
2213
  async def execute_eval_dataset(
1978
2214
  self,
1979
- pipeline_id: str,
1980
2215
  eval_dataset_id: str,
2216
+ pipeline_id: str,
1981
2217
  *,
1982
2218
  eval_question_ids: typing.List[str],
1983
2219
  params: typing.Optional[EvalExecutionParamsOverride] = OMIT,
@@ -1986,27 +2222,23 @@ class AsyncPipelinesClient:
1986
2222
  Execute a dataset.
1987
2223
 
1988
2224
  Parameters:
1989
- - pipeline_id: str.
1990
-
1991
2225
  - eval_dataset_id: str.
1992
2226
 
2227
+ - pipeline_id: str.
2228
+
1993
2229
  - eval_question_ids: typing.List[str].
1994
2230
 
1995
2231
  - params: typing.Optional[EvalExecutionParamsOverride]. The parameters for the eval execution that will override the ones set in the pipeline.
1996
2232
  ---
1997
- from llama_cloud import EvalExecutionParamsOverride, SupportedLlmModelNames
1998
2233
  from llama_cloud.client import AsyncLlamaCloud
1999
2234
 
2000
2235
  client = AsyncLlamaCloud(
2001
2236
  token="YOUR_TOKEN",
2002
2237
  )
2003
2238
  await client.pipelines.execute_eval_dataset(
2004
- pipeline_id="string",
2005
- eval_dataset_id="string",
2006
- eval_question_ids=[],
2007
- params=EvalExecutionParamsOverride(
2008
- llm_model=SupportedLlmModelNames.GPT_3_5_TURBO,
2009
- ),
2239
+ eval_dataset_id="eval_dataset_id",
2240
+ pipeline_id="pipeline_id",
2241
+ eval_question_ids=["eval_question_ids"],
2010
2242
  )
2011
2243
  """
2012
2244
  _request: typing.Dict[str, typing.Any] = {"eval_question_ids": eval_question_ids}
@@ -2033,7 +2265,7 @@ class AsyncPipelinesClient:
2033
2265
  raise ApiError(status_code=_response.status_code, body=_response_json)
2034
2266
 
2035
2267
  async def get_eval_dataset_execution_result(
2036
- self, pipeline_id: str, eval_dataset_id: str
2268
+ self, eval_dataset_id: str, pipeline_id: str
2037
2269
  ) -> typing.List[EvalQuestionResult]:
2038
2270
  """
2039
2271
  Get the result of an EvalDatasetExecution.
@@ -2042,9 +2274,9 @@ class AsyncPipelinesClient:
2042
2274
  If any of the specified questions do not have a result, they will be ignored.
2043
2275
 
2044
2276
  Parameters:
2045
- - pipeline_id: str.
2046
-
2047
2277
  - eval_dataset_id: str.
2278
+
2279
+ - pipeline_id: str.
2048
2280
  ---
2049
2281
  from llama_cloud.client import AsyncLlamaCloud
2050
2282
 
@@ -2052,8 +2284,8 @@ class AsyncPipelinesClient:
2052
2284
  token="YOUR_TOKEN",
2053
2285
  )
2054
2286
  await client.pipelines.get_eval_dataset_execution_result(
2055
- pipeline_id="string",
2056
- eval_dataset_id="string",
2287
+ eval_dataset_id="eval_dataset_id",
2288
+ pipeline_id="pipeline_id",
2057
2289
  )
2058
2290
  """
2059
2291
  _response = await self._client_wrapper.httpx_client.request(
@@ -2076,17 +2308,17 @@ class AsyncPipelinesClient:
2076
2308
  raise ApiError(status_code=_response.status_code, body=_response_json)
2077
2309
 
2078
2310
  async def get_eval_dataset_execution(
2079
- self, pipeline_id: str, eval_dataset_id: str, eval_dataset_execution_id: str
2311
+ self, eval_dataset_id: str, eval_dataset_execution_id: str, pipeline_id: str
2080
2312
  ) -> EvalDatasetJobRecord:
2081
2313
  """
2082
2314
  Get the status of an EvalDatasetExecution.
2083
2315
 
2084
2316
  Parameters:
2085
- - pipeline_id: str.
2086
-
2087
2317
  - eval_dataset_id: str.
2088
2318
 
2089
2319
  - eval_dataset_execution_id: str.
2320
+
2321
+ - pipeline_id: str.
2090
2322
  ---
2091
2323
  from llama_cloud.client import AsyncLlamaCloud
2092
2324
 
@@ -2094,9 +2326,9 @@ class AsyncPipelinesClient:
2094
2326
  token="YOUR_TOKEN",
2095
2327
  )
2096
2328
  await client.pipelines.get_eval_dataset_execution(
2097
- pipeline_id="string",
2098
- eval_dataset_id="string",
2099
- eval_dataset_execution_id="string",
2329
+ eval_dataset_id="eval_dataset_id",
2330
+ eval_dataset_execution_id="eval_dataset_execution_id",
2331
+ pipeline_id="pipeline_id",
2100
2332
  )
2101
2333
  """
2102
2334
  _response = await self._client_wrapper.httpx_client.request(
@@ -2141,7 +2373,7 @@ class AsyncPipelinesClient:
2141
2373
  token="YOUR_TOKEN",
2142
2374
  )
2143
2375
  await client.pipelines.list_pipeline_files(
2144
- pipeline_id="string",
2376
+ pipeline_id="pipeline_id",
2145
2377
  )
2146
2378
  """
2147
2379
  _response = await self._client_wrapper.httpx_client.request(
@@ -2174,14 +2406,19 @@ class AsyncPipelinesClient:
2174
2406
 
2175
2407
  - request: typing.List[PipelineFileCreate].
2176
2408
  ---
2409
+ from llama_cloud import PipelineFileCreate
2177
2410
  from llama_cloud.client import AsyncLlamaCloud
2178
2411
 
2179
2412
  client = AsyncLlamaCloud(
2180
2413
  token="YOUR_TOKEN",
2181
2414
  )
2182
2415
  await client.pipelines.add_files_to_pipeline(
2183
- pipeline_id="string",
2184
- request=[],
2416
+ pipeline_id="pipeline_id",
2417
+ request=[
2418
+ PipelineFileCreate(
2419
+ file_id="file_id",
2420
+ )
2421
+ ],
2185
2422
  )
2186
2423
  """
2187
2424
  _response = await self._client_wrapper.httpx_client.request(
@@ -2201,14 +2438,70 @@ class AsyncPipelinesClient:
2201
2438
  raise ApiError(status_code=_response.status_code, body=_response.text)
2202
2439
  raise ApiError(status_code=_response.status_code, body=_response_json)
2203
2440
 
2204
- async def get_pipeline_file_status(self, pipeline_id: str, file_id: str) -> ManagedIngestionStatusResponse:
2441
+ async def list_pipeline_files_2(
2442
+ self,
2443
+ pipeline_id: str,
2444
+ *,
2445
+ data_source_id: typing.Optional[str] = None,
2446
+ only_manually_uploaded: typing.Optional[bool] = None,
2447
+ limit: typing.Optional[int] = None,
2448
+ offset: typing.Optional[int] = None,
2449
+ ) -> PaginatedListPipelineFilesResponse:
2205
2450
  """
2206
- Get status of a file for a pipeline.
2451
+ Get files for a pipeline.
2207
2452
 
2208
2453
  Parameters:
2209
2454
  - pipeline_id: str.
2210
2455
 
2456
+ - data_source_id: typing.Optional[str].
2457
+
2458
+ - only_manually_uploaded: typing.Optional[bool].
2459
+
2460
+ - limit: typing.Optional[int].
2461
+
2462
+ - offset: typing.Optional[int].
2463
+ ---
2464
+ from llama_cloud.client import AsyncLlamaCloud
2465
+
2466
+ client = AsyncLlamaCloud(
2467
+ token="YOUR_TOKEN",
2468
+ )
2469
+ await client.pipelines.list_pipeline_files_2(
2470
+ pipeline_id="pipeline_id",
2471
+ )
2472
+ """
2473
+ _response = await self._client_wrapper.httpx_client.request(
2474
+ "GET",
2475
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/files2"),
2476
+ params=remove_none_from_dict(
2477
+ {
2478
+ "data_source_id": data_source_id,
2479
+ "only_manually_uploaded": only_manually_uploaded,
2480
+ "limit": limit,
2481
+ "offset": offset,
2482
+ }
2483
+ ),
2484
+ headers=self._client_wrapper.get_headers(),
2485
+ timeout=60,
2486
+ )
2487
+ if 200 <= _response.status_code < 300:
2488
+ return pydantic.parse_obj_as(PaginatedListPipelineFilesResponse, _response.json()) # type: ignore
2489
+ if _response.status_code == 422:
2490
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
2491
+ try:
2492
+ _response_json = _response.json()
2493
+ except JSONDecodeError:
2494
+ raise ApiError(status_code=_response.status_code, body=_response.text)
2495
+ raise ApiError(status_code=_response.status_code, body=_response_json)
2496
+
2497
+ async def get_pipeline_file_status(self, file_id: str, pipeline_id: str) -> ManagedIngestionStatusResponse:
2498
+ """
2499
+ Get status of a file for a pipeline.
2500
+
2501
+ Parameters:
2211
2502
  - file_id: str.
2503
+
2504
+ - pipeline_id: str.
2212
2505
  ---
2213
2506
  from llama_cloud.client import AsyncLlamaCloud
2214
2507
 
@@ -2216,8 +2509,8 @@ class AsyncPipelinesClient:
2216
2509
  token="YOUR_TOKEN",
2217
2510
  )
2218
2511
  await client.pipelines.get_pipeline_file_status(
2219
- pipeline_id="string",
2220
- file_id="string",
2512
+ file_id="file_id",
2513
+ pipeline_id="pipeline_id",
2221
2514
  )
2222
2515
  """
2223
2516
  _response = await self._client_wrapper.httpx_client.request(
@@ -2240,8 +2533,8 @@ class AsyncPipelinesClient:
2240
2533
 
2241
2534
  async def update_pipeline_file(
2242
2535
  self,
2243
- pipeline_id: str,
2244
2536
  file_id: str,
2537
+ pipeline_id: str,
2245
2538
  *,
2246
2539
  custom_metadata: typing.Optional[
2247
2540
  typing.Dict[str, typing.Optional[PipelineFileUpdateCustomMetadataValue]]
@@ -2251,11 +2544,11 @@ class AsyncPipelinesClient:
2251
2544
  Update a file for a pipeline.
2252
2545
 
2253
2546
  Parameters:
2254
- - pipeline_id: str.
2255
-
2256
2547
  - file_id: str.
2257
2548
 
2258
- - custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[PipelineFileUpdateCustomMetadataValue]]].
2549
+ - pipeline_id: str.
2550
+
2551
+ - custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[PipelineFileUpdateCustomMetadataValue]]]. Custom metadata for the file
2259
2552
  ---
2260
2553
  from llama_cloud.client import AsyncLlamaCloud
2261
2554
 
@@ -2263,8 +2556,8 @@ class AsyncPipelinesClient:
2263
2556
  token="YOUR_TOKEN",
2264
2557
  )
2265
2558
  await client.pipelines.update_pipeline_file(
2266
- pipeline_id="string",
2267
- file_id="string",
2559
+ file_id="file_id",
2560
+ pipeline_id="pipeline_id",
2268
2561
  )
2269
2562
  """
2270
2563
  _request: typing.Dict[str, typing.Any] = {}
@@ -2289,14 +2582,14 @@ class AsyncPipelinesClient:
2289
2582
  raise ApiError(status_code=_response.status_code, body=_response.text)
2290
2583
  raise ApiError(status_code=_response.status_code, body=_response_json)
2291
2584
 
2292
- async def delete_pipeline_file(self, pipeline_id: str, file_id: str) -> None:
2585
+ async def delete_pipeline_file(self, file_id: str, pipeline_id: str) -> None:
2293
2586
  """
2294
2587
  Delete a file from a pipeline.
2295
2588
 
2296
2589
  Parameters:
2297
- - pipeline_id: str.
2298
-
2299
2590
  - file_id: str.
2591
+
2592
+ - pipeline_id: str.
2300
2593
  ---
2301
2594
  from llama_cloud.client import AsyncLlamaCloud
2302
2595
 
@@ -2304,8 +2597,8 @@ class AsyncPipelinesClient:
2304
2597
  token="YOUR_TOKEN",
2305
2598
  )
2306
2599
  await client.pipelines.delete_pipeline_file(
2307
- pipeline_id="string",
2308
- file_id="string",
2600
+ file_id="file_id",
2601
+ pipeline_id="pipeline_id",
2309
2602
  )
2310
2603
  """
2311
2604
  _response = await self._client_wrapper.httpx_client.request(
@@ -2334,6 +2627,15 @@ class AsyncPipelinesClient:
2334
2627
  - pipeline_id: str.
2335
2628
 
2336
2629
  - upload_file: typing.IO.
2630
+ ---
2631
+ from llama_cloud.client import AsyncLlamaCloud
2632
+
2633
+ client = AsyncLlamaCloud(
2634
+ token="YOUR_TOKEN",
2635
+ )
2636
+ await client.pipelines.import_pipeline_metadata(
2637
+ pipeline_id="pipeline_id",
2638
+ )
2337
2639
  """
2338
2640
  _response = await self._client_wrapper.httpx_client.request(
2339
2641
  "PUT",
@@ -2366,7 +2668,7 @@ class AsyncPipelinesClient:
2366
2668
  token="YOUR_TOKEN",
2367
2669
  )
2368
2670
  await client.pipelines.delete_pipeline_files_metadata(
2369
- pipeline_id="string",
2671
+ pipeline_id="pipeline_id",
2370
2672
  )
2371
2673
  """
2372
2674
  _response = await self._client_wrapper.httpx_client.request(
@@ -2398,7 +2700,7 @@ class AsyncPipelinesClient:
2398
2700
  token="YOUR_TOKEN",
2399
2701
  )
2400
2702
  await client.pipelines.list_pipeline_data_sources(
2401
- pipeline_id="string",
2703
+ pipeline_id="pipeline_id",
2402
2704
  )
2403
2705
  """
2404
2706
  _response = await self._client_wrapper.httpx_client.request(
@@ -2430,14 +2732,19 @@ class AsyncPipelinesClient:
2430
2732
 
2431
2733
  - request: typing.List[PipelineDataSourceCreate].
2432
2734
  ---
2735
+ from llama_cloud import PipelineDataSourceCreate
2433
2736
  from llama_cloud.client import AsyncLlamaCloud
2434
2737
 
2435
2738
  client = AsyncLlamaCloud(
2436
2739
  token="YOUR_TOKEN",
2437
2740
  )
2438
2741
  await client.pipelines.add_data_sources_to_pipeline(
2439
- pipeline_id="string",
2440
- request=[],
2742
+ pipeline_id="pipeline_id",
2743
+ request=[
2744
+ PipelineDataSourceCreate(
2745
+ data_source_id="data_source_id",
2746
+ )
2747
+ ],
2441
2748
  )
2442
2749
  """
2443
2750
  _response = await self._client_wrapper.httpx_client.request(
@@ -2460,17 +2767,17 @@ class AsyncPipelinesClient:
2460
2767
  raise ApiError(status_code=_response.status_code, body=_response_json)
2461
2768
 
2462
2769
  async def update_pipeline_data_source(
2463
- self, pipeline_id: str, data_source_id: str, *, sync_interval: typing.Optional[float] = OMIT
2770
+ self, data_source_id: str, pipeline_id: str, *, sync_interval: typing.Optional[float] = OMIT
2464
2771
  ) -> PipelineDataSource:
2465
2772
  """
2466
2773
  Update the configuration of a data source in a pipeline.
2467
2774
 
2468
2775
  Parameters:
2469
- - pipeline_id: str.
2470
-
2471
2776
  - data_source_id: str.
2472
2777
 
2473
- - sync_interval: typing.Optional[float].
2778
+ - pipeline_id: str.
2779
+
2780
+ - sync_interval: typing.Optional[float]. The interval at which the data source should be synced.
2474
2781
  ---
2475
2782
  from llama_cloud.client import AsyncLlamaCloud
2476
2783
 
@@ -2478,8 +2785,8 @@ class AsyncPipelinesClient:
2478
2785
  token="YOUR_TOKEN",
2479
2786
  )
2480
2787
  await client.pipelines.update_pipeline_data_source(
2481
- pipeline_id="string",
2482
- data_source_id="string",
2788
+ data_source_id="data_source_id",
2789
+ pipeline_id="pipeline_id",
2483
2790
  )
2484
2791
  """
2485
2792
  _request: typing.Dict[str, typing.Any] = {}
@@ -2505,14 +2812,14 @@ class AsyncPipelinesClient:
2505
2812
  raise ApiError(status_code=_response.status_code, body=_response.text)
2506
2813
  raise ApiError(status_code=_response.status_code, body=_response_json)
2507
2814
 
2508
- async def delete_pipeline_data_source(self, pipeline_id: str, data_source_id: str) -> None:
2815
+ async def delete_pipeline_data_source(self, data_source_id: str, pipeline_id: str) -> None:
2509
2816
  """
2510
2817
  Delete a data source from a pipeline.
2511
2818
 
2512
2819
  Parameters:
2513
- - pipeline_id: str.
2514
-
2515
2820
  - data_source_id: str.
2821
+
2822
+ - pipeline_id: str.
2516
2823
  ---
2517
2824
  from llama_cloud.client import AsyncLlamaCloud
2518
2825
 
@@ -2520,8 +2827,8 @@ class AsyncPipelinesClient:
2520
2827
  token="YOUR_TOKEN",
2521
2828
  )
2522
2829
  await client.pipelines.delete_pipeline_data_source(
2523
- pipeline_id="string",
2524
- data_source_id="string",
2830
+ data_source_id="data_source_id",
2831
+ pipeline_id="pipeline_id",
2525
2832
  )
2526
2833
  """
2527
2834
  _response = await self._client_wrapper.httpx_client.request(
@@ -2543,14 +2850,24 @@ class AsyncPipelinesClient:
2543
2850
  raise ApiError(status_code=_response.status_code, body=_response.text)
2544
2851
  raise ApiError(status_code=_response.status_code, body=_response_json)
2545
2852
 
2546
- async def sync_pipeline_data_source(self, pipeline_id: str, data_source_id: str) -> Pipeline:
2853
+ async def sync_pipeline_data_source(self, data_source_id: str, pipeline_id: str) -> Pipeline:
2547
2854
  """
2548
2855
  Run ingestion for the pipeline data source by incrementally updating the data-sink with upstream changes from data-source.
2549
2856
 
2550
2857
  Parameters:
2858
+ - data_source_id: str.
2859
+
2551
2860
  - pipeline_id: str.
2861
+ ---
2862
+ from llama_cloud.client import AsyncLlamaCloud
2552
2863
 
2553
- - data_source_id: str.
2864
+ client = AsyncLlamaCloud(
2865
+ token="YOUR_TOKEN",
2866
+ )
2867
+ await client.pipelines.sync_pipeline_data_source(
2868
+ data_source_id="data_source_id",
2869
+ pipeline_id="pipeline_id",
2870
+ )
2554
2871
  """
2555
2872
  _response = await self._client_wrapper.httpx_client.request(
2556
2873
  "POST",
@@ -2572,15 +2889,15 @@ class AsyncPipelinesClient:
2572
2889
  raise ApiError(status_code=_response.status_code, body=_response_json)
2573
2890
 
2574
2891
  async def get_pipeline_data_source_status(
2575
- self, pipeline_id: str, data_source_id: str
2892
+ self, data_source_id: str, pipeline_id: str
2576
2893
  ) -> ManagedIngestionStatusResponse:
2577
2894
  """
2578
2895
  Get the status of a data source for a pipeline.
2579
2896
 
2580
2897
  Parameters:
2581
- - pipeline_id: str.
2582
-
2583
2898
  - data_source_id: str.
2899
+
2900
+ - pipeline_id: str.
2584
2901
  ---
2585
2902
  from llama_cloud.client import AsyncLlamaCloud
2586
2903
 
@@ -2588,8 +2905,8 @@ class AsyncPipelinesClient:
2588
2905
  token="YOUR_TOKEN",
2589
2906
  )
2590
2907
  await client.pipelines.get_pipeline_data_source_status(
2591
- pipeline_id="string",
2592
- data_source_id="string",
2908
+ data_source_id="data_source_id",
2909
+ pipeline_id="pipeline_id",
2593
2910
  )
2594
2911
  """
2595
2912
  _response = await self._client_wrapper.httpx_client.request(
@@ -2616,6 +2933,7 @@ class AsyncPipelinesClient:
2616
2933
  pipeline_id: str,
2617
2934
  *,
2618
2935
  dense_similarity_top_k: typing.Optional[int] = OMIT,
2936
+ dense_similarity_cutoff: typing.Optional[float] = OMIT,
2619
2937
  sparse_similarity_top_k: typing.Optional[int] = OMIT,
2620
2938
  enable_reranking: typing.Optional[bool] = OMIT,
2621
2939
  rerank_top_n: typing.Optional[int] = OMIT,
@@ -2633,19 +2951,21 @@ class AsyncPipelinesClient:
2633
2951
  Parameters:
2634
2952
  - pipeline_id: str.
2635
2953
 
2636
- - dense_similarity_top_k: typing.Optional[int].
2954
+ - dense_similarity_top_k: typing.Optional[int]. Number of nodes for dense retrieval.
2955
+
2956
+ - dense_similarity_cutoff: typing.Optional[float]. Minimum similarity score wrt query for retrieval
2637
2957
 
2638
- - sparse_similarity_top_k: typing.Optional[int].
2958
+ - sparse_similarity_top_k: typing.Optional[int]. Number of nodes for sparse retrieval.
2639
2959
 
2640
- - enable_reranking: typing.Optional[bool].
2960
+ - enable_reranking: typing.Optional[bool]. Enable reranking for retrieval
2641
2961
 
2642
- - rerank_top_n: typing.Optional[int].
2962
+ - rerank_top_n: typing.Optional[int]. Number of reranked nodes for returning.
2643
2963
 
2644
- - alpha: typing.Optional[float].
2964
+ - alpha: typing.Optional[float]. Alpha value for hybrid retrieval to determine the weights between dense and sparse retrieval. 0 is sparse retrieval and 1 is dense retrieval.
2645
2965
 
2646
- - search_filters: typing.Optional[MetadataFilters].
2966
+ - search_filters: typing.Optional[MetadataFilters]. Search filters for retrieval.
2647
2967
 
2648
- - files_top_k: typing.Optional[int].
2968
+ - files_top_k: typing.Optional[int]. Number of files to retrieve (only for retrieval mode files_via_metadata and files_via_content).
2649
2969
 
2650
2970
  - retrieval_mode: typing.Optional[RetrievalMode]. The retrieval mode for the query.
2651
2971
 
@@ -2655,25 +2975,21 @@ class AsyncPipelinesClient:
2655
2975
 
2656
2976
  - class_name: typing.Optional[str].
2657
2977
  ---
2658
- from llama_cloud import FilterCondition, MetadataFilters, RetrievalMode
2659
2978
  from llama_cloud.client import AsyncLlamaCloud
2660
2979
 
2661
2980
  client = AsyncLlamaCloud(
2662
2981
  token="YOUR_TOKEN",
2663
2982
  )
2664
2983
  await client.pipelines.run_search(
2665
- pipeline_id="string",
2666
- search_filters=MetadataFilters(
2667
- filters=[],
2668
- condition=FilterCondition.AND,
2669
- ),
2670
- retrieval_mode=RetrievalMode.CHUNKS,
2671
- query="string",
2984
+ pipeline_id="pipeline_id",
2985
+ query="query",
2672
2986
  )
2673
2987
  """
2674
2988
  _request: typing.Dict[str, typing.Any] = {"query": query}
2675
2989
  if dense_similarity_top_k is not OMIT:
2676
2990
  _request["dense_similarity_top_k"] = dense_similarity_top_k
2991
+ if dense_similarity_cutoff is not OMIT:
2992
+ _request["dense_similarity_cutoff"] = dense_similarity_cutoff
2677
2993
  if sparse_similarity_top_k is not OMIT:
2678
2994
  _request["sparse_similarity_top_k"] = sparse_similarity_top_k
2679
2995
  if enable_reranking is not OMIT:
@@ -2722,7 +3038,7 @@ class AsyncPipelinesClient:
2722
3038
  token="YOUR_TOKEN",
2723
3039
  )
2724
3040
  await client.pipelines.list_pipeline_jobs(
2725
- pipeline_id="string",
3041
+ pipeline_id="pipeline_id",
2726
3042
  )
2727
3043
  """
2728
3044
  _response = await self._client_wrapper.httpx_client.request(
@@ -2741,14 +3057,14 @@ class AsyncPipelinesClient:
2741
3057
  raise ApiError(status_code=_response.status_code, body=_response.text)
2742
3058
  raise ApiError(status_code=_response.status_code, body=_response_json)
2743
3059
 
2744
- async def get_pipeline_job(self, pipeline_id: str, job_id: str) -> PipelineDeployment:
3060
+ async def get_pipeline_job(self, job_id: str, pipeline_id: str) -> PipelineDeployment:
2745
3061
  """
2746
3062
  Get a job for a pipeline.
2747
3063
 
2748
3064
  Parameters:
2749
- - pipeline_id: str.
2750
-
2751
3065
  - job_id: str.
3066
+
3067
+ - pipeline_id: str.
2752
3068
  ---
2753
3069
  from llama_cloud.client import AsyncLlamaCloud
2754
3070
 
@@ -2756,8 +3072,8 @@ class AsyncPipelinesClient:
2756
3072
  token="YOUR_TOKEN",
2757
3073
  )
2758
3074
  await client.pipelines.get_pipeline_job(
2759
- pipeline_id="string",
2760
- job_id="string",
3075
+ job_id="job_id",
3076
+ pipeline_id="pipeline_id",
2761
3077
  )
2762
3078
  """
2763
3079
  _response = await self._client_wrapper.httpx_client.request(
@@ -2791,7 +3107,7 @@ class AsyncPipelinesClient:
2791
3107
  token="YOUR_TOKEN",
2792
3108
  )
2793
3109
  await client.pipelines.get_playground_session(
2794
- pipeline_id="string",
3110
+ pipeline_id="pipeline_id",
2795
3111
  )
2796
3112
  """
2797
3113
  _response = await self._client_wrapper.httpx_client.request(
@@ -2832,34 +3148,13 @@ class AsyncPipelinesClient:
2832
3148
 
2833
3149
  - class_name: typing.Optional[str].
2834
3150
  ---
2835
- from llama_cloud import (
2836
- ChatData,
2837
- FilterCondition,
2838
- LlmParameters,
2839
- MetadataFilters,
2840
- PresetRetrievalParams,
2841
- RetrievalMode,
2842
- SupportedLlmModelNames,
2843
- )
2844
3151
  from llama_cloud.client import AsyncLlamaCloud
2845
3152
 
2846
3153
  client = AsyncLlamaCloud(
2847
3154
  token="YOUR_TOKEN",
2848
3155
  )
2849
3156
  await client.pipelines.chat(
2850
- pipeline_id="string",
2851
- data=ChatData(
2852
- retrieval_parameters=PresetRetrievalParams(
2853
- search_filters=MetadataFilters(
2854
- filters=[],
2855
- condition=FilterCondition.AND,
2856
- ),
2857
- retrieval_mode=RetrievalMode.CHUNKS,
2858
- ),
2859
- llm_parameters=LlmParameters(
2860
- model_name=SupportedLlmModelNames.GPT_3_5_TURBO,
2861
- ),
2862
- ),
3157
+ pipeline_id="pipeline_id",
2863
3158
  )
2864
3159
  """
2865
3160
  _request: typing.Dict[str, typing.Any] = {}
@@ -2915,7 +3210,7 @@ class AsyncPipelinesClient:
2915
3210
  token="YOUR_TOKEN",
2916
3211
  )
2917
3212
  await client.pipelines.list_pipeline_documents(
2918
- pipeline_id="string",
3213
+ pipeline_id="pipeline_id",
2919
3214
  )
2920
3215
  """
2921
3216
  _response = await self._client_wrapper.httpx_client.request(
@@ -2950,14 +3245,20 @@ class AsyncPipelinesClient:
2950
3245
 
2951
3246
  - request: typing.List[CloudDocumentCreate].
2952
3247
  ---
3248
+ from llama_cloud import CloudDocumentCreate
2953
3249
  from llama_cloud.client import AsyncLlamaCloud
2954
3250
 
2955
3251
  client = AsyncLlamaCloud(
2956
3252
  token="YOUR_TOKEN",
2957
3253
  )
2958
3254
  await client.pipelines.create_batch_pipeline_documents(
2959
- pipeline_id="string",
2960
- request=[],
3255
+ pipeline_id="pipeline_id",
3256
+ request=[
3257
+ CloudDocumentCreate(
3258
+ text="text",
3259
+ metadata={"key": "value"},
3260
+ )
3261
+ ],
2961
3262
  )
2962
3263
  """
2963
3264
  _response = await self._client_wrapper.httpx_client.request(
@@ -2990,14 +3291,20 @@ class AsyncPipelinesClient:
2990
3291
 
2991
3292
  - request: typing.List[CloudDocumentCreate].
2992
3293
  ---
3294
+ from llama_cloud import CloudDocumentCreate
2993
3295
  from llama_cloud.client import AsyncLlamaCloud
2994
3296
 
2995
3297
  client = AsyncLlamaCloud(
2996
3298
  token="YOUR_TOKEN",
2997
3299
  )
2998
3300
  await client.pipelines.upsert_batch_pipeline_documents(
2999
- pipeline_id="string",
3000
- request=[],
3301
+ pipeline_id="pipeline_id",
3302
+ request=[
3303
+ CloudDocumentCreate(
3304
+ text="text",
3305
+ metadata={"key": "value"},
3306
+ )
3307
+ ],
3001
3308
  )
3002
3309
  """
3003
3310
  _response = await self._client_wrapper.httpx_client.request(
@@ -3019,14 +3326,14 @@ class AsyncPipelinesClient:
3019
3326
  raise ApiError(status_code=_response.status_code, body=_response.text)
3020
3327
  raise ApiError(status_code=_response.status_code, body=_response_json)
3021
3328
 
3022
- async def get_pipeline_document(self, pipeline_id: str, document_id: str) -> CloudDocument:
3329
+ async def get_pipeline_document(self, document_id: str, pipeline_id: str) -> CloudDocument:
3023
3330
  """
3024
3331
  Return a single document for a pipeline.
3025
3332
 
3026
3333
  Parameters:
3027
- - pipeline_id: str.
3028
-
3029
3334
  - document_id: str.
3335
+
3336
+ - pipeline_id: str.
3030
3337
  ---
3031
3338
  from llama_cloud.client import AsyncLlamaCloud
3032
3339
 
@@ -3034,8 +3341,8 @@ class AsyncPipelinesClient:
3034
3341
  token="YOUR_TOKEN",
3035
3342
  )
3036
3343
  await client.pipelines.get_pipeline_document(
3037
- pipeline_id="string",
3038
- document_id="string",
3344
+ document_id="document_id",
3345
+ pipeline_id="pipeline_id",
3039
3346
  )
3040
3347
  """
3041
3348
  _response = await self._client_wrapper.httpx_client.request(
@@ -3056,14 +3363,14 @@ class AsyncPipelinesClient:
3056
3363
  raise ApiError(status_code=_response.status_code, body=_response.text)
3057
3364
  raise ApiError(status_code=_response.status_code, body=_response_json)
3058
3365
 
3059
- async def delete_pipeline_document(self, pipeline_id: str, document_id: str) -> None:
3366
+ async def delete_pipeline_document(self, document_id: str, pipeline_id: str) -> None:
3060
3367
  """
3061
3368
  Delete a document for a pipeline.
3062
3369
 
3063
3370
  Parameters:
3064
- - pipeline_id: str.
3065
-
3066
3371
  - document_id: str.
3372
+
3373
+ - pipeline_id: str.
3067
3374
  ---
3068
3375
  from llama_cloud.client import AsyncLlamaCloud
3069
3376
 
@@ -3071,8 +3378,8 @@ class AsyncPipelinesClient:
3071
3378
  token="YOUR_TOKEN",
3072
3379
  )
3073
3380
  await client.pipelines.delete_pipeline_document(
3074
- pipeline_id="string",
3075
- document_id="string",
3381
+ document_id="document_id",
3382
+ pipeline_id="pipeline_id",
3076
3383
  )
3077
3384
  """
3078
3385
  _response = await self._client_wrapper.httpx_client.request(
@@ -3093,14 +3400,14 @@ class AsyncPipelinesClient:
3093
3400
  raise ApiError(status_code=_response.status_code, body=_response.text)
3094
3401
  raise ApiError(status_code=_response.status_code, body=_response_json)
3095
3402
 
3096
- async def get_pipeline_document_status(self, pipeline_id: str, document_id: str) -> ManagedIngestionStatusResponse:
3403
+ async def get_pipeline_document_status(self, document_id: str, pipeline_id: str) -> ManagedIngestionStatusResponse:
3097
3404
  """
3098
3405
  Return a single document for a pipeline.
3099
3406
 
3100
3407
  Parameters:
3101
- - pipeline_id: str.
3102
-
3103
3408
  - document_id: str.
3409
+
3410
+ - pipeline_id: str.
3104
3411
  ---
3105
3412
  from llama_cloud.client import AsyncLlamaCloud
3106
3413
 
@@ -3108,8 +3415,8 @@ class AsyncPipelinesClient:
3108
3415
  token="YOUR_TOKEN",
3109
3416
  )
3110
3417
  await client.pipelines.get_pipeline_document_status(
3111
- pipeline_id="string",
3112
- document_id="string",
3418
+ document_id="document_id",
3419
+ pipeline_id="pipeline_id",
3113
3420
  )
3114
3421
  """
3115
3422
  _response = await self._client_wrapper.httpx_client.request(
@@ -3131,14 +3438,14 @@ class AsyncPipelinesClient:
3131
3438
  raise ApiError(status_code=_response.status_code, body=_response.text)
3132
3439
  raise ApiError(status_code=_response.status_code, body=_response_json)
3133
3440
 
3134
- async def list_pipeline_document_chunks(self, pipeline_id: str, document_id: str) -> typing.List[TextNode]:
3441
+ async def list_pipeline_document_chunks(self, document_id: str, pipeline_id: str) -> typing.List[TextNode]:
3135
3442
  """
3136
3443
  Return a list of chunks for a pipeline document.
3137
3444
 
3138
3445
  Parameters:
3139
- - pipeline_id: str.
3140
-
3141
3446
  - document_id: str.
3447
+
3448
+ - pipeline_id: str.
3142
3449
  ---
3143
3450
  from llama_cloud.client import AsyncLlamaCloud
3144
3451
 
@@ -3146,8 +3453,8 @@ class AsyncPipelinesClient:
3146
3453
  token="YOUR_TOKEN",
3147
3454
  )
3148
3455
  await client.pipelines.list_pipeline_document_chunks(
3149
- pipeline_id="string",
3150
- document_id="string",
3456
+ document_id="document_id",
3457
+ pipeline_id="pipeline_id",
3151
3458
  )
3152
3459
  """
3153
3460
  _response = await self._client_wrapper.httpx_client.request(