llama-cloud 0.0.17__py3-none-any.whl → 0.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of llama-cloud might be problematic. Click here for more details.
- llama_cloud/__init__.py +8 -36
- llama_cloud/client.py +0 -3
- llama_cloud/resources/__init__.py +2 -10
- llama_cloud/resources/data_sinks/__init__.py +2 -2
- llama_cloud/resources/data_sinks/client.py +8 -8
- llama_cloud/resources/data_sinks/types/__init__.py +1 -2
- llama_cloud/resources/data_sinks/types/data_sink_update_component.py +15 -2
- llama_cloud/resources/data_sources/__init__.py +2 -2
- llama_cloud/resources/data_sources/client.py +6 -6
- llama_cloud/resources/data_sources/types/__init__.py +1 -2
- llama_cloud/resources/data_sources/types/data_source_update_component.py +23 -2
- llama_cloud/resources/extraction/client.py +14 -14
- llama_cloud/resources/files/client.py +10 -10
- llama_cloud/resources/organizations/client.py +2 -2
- llama_cloud/resources/parsing/client.py +100 -60
- llama_cloud/resources/pipelines/__init__.py +0 -4
- llama_cloud/resources/pipelines/client.py +50 -340
- llama_cloud/resources/pipelines/types/__init__.py +1 -7
- llama_cloud/resources/pipelines/types/pipeline_update_embedding_config.py +15 -15
- llama_cloud/resources/pipelines/types/pipeline_update_transform_config.py +1 -24
- llama_cloud/types/__init__.py +9 -29
- llama_cloud/types/azure_open_ai_embedding.py +7 -39
- llama_cloud/types/base_prompt_template.py +3 -14
- llama_cloud/types/bedrock_embedding.py +7 -20
- llama_cloud/types/box_auth_mechanism.py +0 -4
- llama_cloud/types/character_splitter.py +3 -4
- llama_cloud/types/chat_data.py +0 -5
- llama_cloud/types/chat_message.py +1 -6
- llama_cloud/types/cloud_az_storage_blob_data_source.py +7 -18
- llama_cloud/types/cloud_box_data_source.py +6 -16
- llama_cloud/types/cloud_confluence_data_source.py +6 -10
- llama_cloud/types/cloud_document.py +1 -3
- llama_cloud/types/cloud_document_create.py +1 -3
- llama_cloud/types/cloud_google_drive_data_source.py +0 -4
- llama_cloud/types/cloud_jira_data_source.py +4 -6
- llama_cloud/types/cloud_notion_page_data_source.py +2 -6
- llama_cloud/types/cloud_one_drive_data_source.py +2 -6
- llama_cloud/types/cloud_pinecone_vector_store.py +1 -1
- llama_cloud/types/cloud_postgres_vector_store.py +0 -4
- llama_cloud/types/cloud_s_3_data_source.py +4 -12
- llama_cloud/types/cloud_sharepoint_data_source.py +5 -9
- llama_cloud/types/cloud_slack_data_source.py +6 -10
- llama_cloud/types/code_splitter.py +2 -1
- llama_cloud/types/cohere_embedding.py +6 -15
- llama_cloud/types/configurable_data_sink_names.py +0 -12
- llama_cloud/types/configurable_data_source_names.py +0 -4
- llama_cloud/types/configurable_transformation_names.py +0 -32
- llama_cloud/types/configured_transformation_item_component.py +15 -2
- llama_cloud/types/data_sink.py +2 -2
- llama_cloud/types/data_sink_component.py +15 -2
- llama_cloud/types/data_sink_create_component.py +15 -2
- llama_cloud/types/data_source.py +3 -5
- llama_cloud/types/data_source_component.py +23 -2
- llama_cloud/types/data_source_create.py +1 -3
- llama_cloud/types/data_source_create_component.py +23 -2
- llama_cloud/types/eval_dataset.py +2 -2
- llama_cloud/types/eval_dataset_job_record.py +7 -13
- llama_cloud/types/eval_execution_params_override.py +2 -6
- llama_cloud/types/eval_metric.py +17 -0
- llama_cloud/types/eval_question.py +2 -6
- llama_cloud/types/extraction_result.py +5 -3
- llama_cloud/types/extraction_schema.py +3 -5
- llama_cloud/types/file.py +7 -11
- llama_cloud/types/gemini_embedding.py +7 -22
- llama_cloud/types/hugging_face_inference_api_embedding.py +9 -34
- llama_cloud/types/input_message.py +2 -4
- llama_cloud/types/llama_parse_parameters.py +5 -0
- llama_cloud/types/llama_parse_supported_file_extensions.py +0 -4
- llama_cloud/types/llm.py +9 -8
- llama_cloud/types/llm_parameters.py +2 -7
- llama_cloud/types/local_eval.py +8 -10
- llama_cloud/types/local_eval_results.py +1 -1
- llama_cloud/types/managed_ingestion_status_response.py +3 -5
- llama_cloud/types/markdown_element_node_parser.py +4 -5
- llama_cloud/types/markdown_node_parser.py +2 -1
- llama_cloud/types/message_annotation.py +1 -6
- llama_cloud/types/metric_result.py +3 -3
- llama_cloud/types/node_parser.py +2 -1
- llama_cloud/types/node_relationship.py +44 -0
- llama_cloud/types/object_type.py +0 -4
- llama_cloud/types/open_ai_embedding.py +7 -36
- llama_cloud/types/organization.py +2 -2
- llama_cloud/types/page_splitter_node_parser.py +3 -2
- llama_cloud/types/parsing_job_json_result.py +2 -2
- llama_cloud/types/parsing_job_markdown_result.py +1 -1
- llama_cloud/types/parsing_job_text_result.py +1 -1
- llama_cloud/types/partition_names.py +45 -0
- llama_cloud/types/pipeline.py +7 -17
- llama_cloud/types/pipeline_configuration_hashes.py +3 -3
- llama_cloud/types/pipeline_create.py +6 -18
- llama_cloud/types/pipeline_create_embedding_config.py +15 -15
- llama_cloud/types/pipeline_create_transform_config.py +1 -24
- llama_cloud/types/pipeline_data_source.py +5 -11
- llama_cloud/types/pipeline_data_source_component.py +23 -2
- llama_cloud/types/pipeline_data_source_create.py +1 -3
- llama_cloud/types/pipeline_deployment.py +4 -8
- llama_cloud/types/pipeline_embedding_config.py +15 -15
- llama_cloud/types/pipeline_file.py +10 -18
- llama_cloud/types/pipeline_file_create.py +1 -3
- llama_cloud/types/playground_session.py +2 -2
- llama_cloud/types/preset_retrieval_params.py +8 -11
- llama_cloud/types/presigned_url.py +1 -3
- llama_cloud/types/project.py +2 -2
- llama_cloud/types/prompt_mixin_prompts.py +1 -1
- llama_cloud/types/prompt_spec.py +2 -4
- llama_cloud/types/related_node_info.py +0 -4
- llama_cloud/types/retrieval_mode.py +0 -4
- llama_cloud/types/sentence_splitter.py +3 -4
- llama_cloud/types/supported_llm_model_names.py +0 -4
- llama_cloud/types/text_node.py +3 -9
- llama_cloud/types/token_text_splitter.py +2 -1
- llama_cloud/types/transformation_category_names.py +0 -4
- llama_cloud/types/user_organization.py +5 -9
- llama_cloud/types/user_organization_create.py +2 -2
- llama_cloud/types/user_organization_delete.py +2 -2
- llama_cloud/types/vertex_ai_embedding_config.py +2 -2
- llama_cloud/types/{extend_vertex_text_embedding.py → vertex_text_embedding.py} +10 -23
- {llama_cloud-0.0.17.dist-info → llama_cloud-0.1.1.dist-info}/METADATA +1 -1
- llama_cloud-0.1.1.dist-info/RECORD +224 -0
- llama_cloud/resources/auth/__init__.py +0 -2
- llama_cloud/resources/auth/client.py +0 -124
- llama_cloud/resources/data_sinks/types/data_sink_update_component_one.py +0 -23
- llama_cloud/resources/data_sources/types/data_source_update_component_one.py +0 -27
- llama_cloud/types/cloud_chroma_vector_store.py +0 -43
- llama_cloud/types/cloud_weaviate_vector_store.py +0 -41
- llama_cloud/types/configured_transformation_item_component_one.py +0 -35
- llama_cloud/types/custom_claims.py +0 -58
- llama_cloud/types/data_sink_component_one.py +0 -23
- llama_cloud/types/data_sink_create_component_one.py +0 -23
- llama_cloud/types/data_source_component_one.py +0 -27
- llama_cloud/types/data_source_create_component_one.py +0 -27
- llama_cloud/types/pipeline_data_source_component_one.py +0 -27
- llama_cloud/types/user.py +0 -35
- llama_cloud-0.0.17.dist-info/RECORD +0 -235
- {llama_cloud-0.0.17.dist-info → llama_cloud-0.1.1.dist-info}/LICENSE +0 -0
- {llama_cloud-0.0.17.dist-info → llama_cloud-0.1.1.dist-info}/WHEEL +0 -0
|
@@ -118,46 +118,6 @@ class PipelinesClient:
|
|
|
118
118
|
- project_id: typing.Optional[str].
|
|
119
119
|
|
|
120
120
|
- request: PipelineCreate.
|
|
121
|
-
---
|
|
122
|
-
from llama_cloud import (
|
|
123
|
-
ConfigurableDataSinkNames,
|
|
124
|
-
DataSinkCreate,
|
|
125
|
-
EvalExecutionParams,
|
|
126
|
-
FilterCondition,
|
|
127
|
-
LlamaParseParameters,
|
|
128
|
-
MetadataFilters,
|
|
129
|
-
PipelineCreate,
|
|
130
|
-
PipelineType,
|
|
131
|
-
PresetRetrievalParams,
|
|
132
|
-
RetrievalMode,
|
|
133
|
-
SupportedLlmModelNames,
|
|
134
|
-
)
|
|
135
|
-
from llama_cloud.client import LlamaCloud
|
|
136
|
-
|
|
137
|
-
client = LlamaCloud(
|
|
138
|
-
token="YOUR_TOKEN",
|
|
139
|
-
)
|
|
140
|
-
client.pipelines.create_pipeline(
|
|
141
|
-
request=PipelineCreate(
|
|
142
|
-
data_sink=DataSinkCreate(
|
|
143
|
-
name="string",
|
|
144
|
-
sink_type=ConfigurableDataSinkNames.CHROMA,
|
|
145
|
-
),
|
|
146
|
-
preset_retrieval_parameters=PresetRetrievalParams(
|
|
147
|
-
search_filters=MetadataFilters(
|
|
148
|
-
filters=[],
|
|
149
|
-
condition=FilterCondition.AND,
|
|
150
|
-
),
|
|
151
|
-
retrieval_mode=RetrievalMode.CHUNKS,
|
|
152
|
-
),
|
|
153
|
-
eval_parameters=EvalExecutionParams(
|
|
154
|
-
llm_model=SupportedLlmModelNames.GPT_3_5_TURBO,
|
|
155
|
-
),
|
|
156
|
-
llama_parse_parameters=LlamaParseParameters(),
|
|
157
|
-
name="string",
|
|
158
|
-
pipeline_type=PipelineType.PLAYGROUND,
|
|
159
|
-
),
|
|
160
|
-
)
|
|
161
121
|
"""
|
|
162
122
|
_response = self._client_wrapper.httpx_client.request(
|
|
163
123
|
"POST",
|
|
@@ -186,46 +146,6 @@ class PipelinesClient:
|
|
|
186
146
|
- project_id: typing.Optional[str].
|
|
187
147
|
|
|
188
148
|
- request: PipelineCreate.
|
|
189
|
-
---
|
|
190
|
-
from llama_cloud import (
|
|
191
|
-
ConfigurableDataSinkNames,
|
|
192
|
-
DataSinkCreate,
|
|
193
|
-
EvalExecutionParams,
|
|
194
|
-
FilterCondition,
|
|
195
|
-
LlamaParseParameters,
|
|
196
|
-
MetadataFilters,
|
|
197
|
-
PipelineCreate,
|
|
198
|
-
PipelineType,
|
|
199
|
-
PresetRetrievalParams,
|
|
200
|
-
RetrievalMode,
|
|
201
|
-
SupportedLlmModelNames,
|
|
202
|
-
)
|
|
203
|
-
from llama_cloud.client import LlamaCloud
|
|
204
|
-
|
|
205
|
-
client = LlamaCloud(
|
|
206
|
-
token="YOUR_TOKEN",
|
|
207
|
-
)
|
|
208
|
-
client.pipelines.upsert_pipeline(
|
|
209
|
-
request=PipelineCreate(
|
|
210
|
-
data_sink=DataSinkCreate(
|
|
211
|
-
name="string",
|
|
212
|
-
sink_type=ConfigurableDataSinkNames.CHROMA,
|
|
213
|
-
),
|
|
214
|
-
preset_retrieval_parameters=PresetRetrievalParams(
|
|
215
|
-
search_filters=MetadataFilters(
|
|
216
|
-
filters=[],
|
|
217
|
-
condition=FilterCondition.AND,
|
|
218
|
-
),
|
|
219
|
-
retrieval_mode=RetrievalMode.CHUNKS,
|
|
220
|
-
),
|
|
221
|
-
eval_parameters=EvalExecutionParams(
|
|
222
|
-
llm_model=SupportedLlmModelNames.GPT_3_5_TURBO,
|
|
223
|
-
),
|
|
224
|
-
llama_parse_parameters=LlamaParseParameters(),
|
|
225
|
-
name="string",
|
|
226
|
-
pipeline_type=PipelineType.PLAYGROUND,
|
|
227
|
-
),
|
|
228
|
-
)
|
|
229
149
|
"""
|
|
230
150
|
_response = self._client_wrapper.httpx_client.request(
|
|
231
151
|
"PUT",
|
|
@@ -251,15 +171,6 @@ class PipelinesClient:
|
|
|
251
171
|
|
|
252
172
|
Parameters:
|
|
253
173
|
- pipeline_id: str.
|
|
254
|
-
---
|
|
255
|
-
from llama_cloud.client import LlamaCloud
|
|
256
|
-
|
|
257
|
-
client = LlamaCloud(
|
|
258
|
-
token="YOUR_TOKEN",
|
|
259
|
-
)
|
|
260
|
-
client.pipelines.get_pipeline(
|
|
261
|
-
pipeline_id="string",
|
|
262
|
-
)
|
|
263
174
|
"""
|
|
264
175
|
_response = self._client_wrapper.httpx_client.request(
|
|
265
176
|
"GET",
|
|
@@ -298,60 +209,25 @@ class PipelinesClient:
|
|
|
298
209
|
Parameters:
|
|
299
210
|
- pipeline_id: str.
|
|
300
211
|
|
|
301
|
-
- embedding_config: typing.Optional[PipelineUpdateEmbeddingConfig].
|
|
212
|
+
- embedding_config: typing.Optional[PipelineUpdateEmbeddingConfig].
|
|
302
213
|
|
|
303
214
|
- transform_config: typing.Optional[PipelineUpdateTransformConfig]. Configuration for the transformation.
|
|
304
215
|
|
|
305
|
-
- configured_transformations: typing.Optional[typing.List[ConfiguredTransformationItem]].
|
|
216
|
+
- configured_transformations: typing.Optional[typing.List[ConfiguredTransformationItem]].
|
|
306
217
|
|
|
307
|
-
- data_sink_id: typing.Optional[str].
|
|
218
|
+
- data_sink_id: typing.Optional[str].
|
|
308
219
|
|
|
309
|
-
- data_sink: typing.Optional[DataSinkCreate].
|
|
220
|
+
- data_sink: typing.Optional[DataSinkCreate].
|
|
310
221
|
|
|
311
|
-
- preset_retrieval_parameters: typing.Optional[PresetRetrievalParams].
|
|
222
|
+
- preset_retrieval_parameters: typing.Optional[PresetRetrievalParams].
|
|
312
223
|
|
|
313
|
-
- eval_parameters: typing.Optional[EvalExecutionParams].
|
|
224
|
+
- eval_parameters: typing.Optional[EvalExecutionParams].
|
|
314
225
|
|
|
315
|
-
- llama_parse_parameters: typing.Optional[LlamaParseParameters].
|
|
226
|
+
- llama_parse_parameters: typing.Optional[LlamaParseParameters].
|
|
316
227
|
|
|
317
228
|
- name: typing.Optional[str].
|
|
318
229
|
|
|
319
|
-
- managed_pipeline_id: typing.Optional[str].
|
|
320
|
-
---
|
|
321
|
-
from llama_cloud import (
|
|
322
|
-
ConfigurableDataSinkNames,
|
|
323
|
-
DataSinkCreate,
|
|
324
|
-
EvalExecutionParams,
|
|
325
|
-
FilterCondition,
|
|
326
|
-
LlamaParseParameters,
|
|
327
|
-
MetadataFilters,
|
|
328
|
-
PresetRetrievalParams,
|
|
329
|
-
RetrievalMode,
|
|
330
|
-
SupportedLlmModelNames,
|
|
331
|
-
)
|
|
332
|
-
from llama_cloud.client import LlamaCloud
|
|
333
|
-
|
|
334
|
-
client = LlamaCloud(
|
|
335
|
-
token="YOUR_TOKEN",
|
|
336
|
-
)
|
|
337
|
-
client.pipelines.update_existing_pipeline(
|
|
338
|
-
pipeline_id="string",
|
|
339
|
-
data_sink=DataSinkCreate(
|
|
340
|
-
name="string",
|
|
341
|
-
sink_type=ConfigurableDataSinkNames.CHROMA,
|
|
342
|
-
),
|
|
343
|
-
preset_retrieval_parameters=PresetRetrievalParams(
|
|
344
|
-
search_filters=MetadataFilters(
|
|
345
|
-
filters=[],
|
|
346
|
-
condition=FilterCondition.AND,
|
|
347
|
-
),
|
|
348
|
-
retrieval_mode=RetrievalMode.CHUNKS,
|
|
349
|
-
),
|
|
350
|
-
eval_parameters=EvalExecutionParams(
|
|
351
|
-
llm_model=SupportedLlmModelNames.GPT_3_5_TURBO,
|
|
352
|
-
),
|
|
353
|
-
llama_parse_parameters=LlamaParseParameters(),
|
|
354
|
-
)
|
|
230
|
+
- managed_pipeline_id: typing.Optional[str].
|
|
355
231
|
"""
|
|
356
232
|
_request: typing.Dict[str, typing.Any] = {}
|
|
357
233
|
if embedding_config is not OMIT:
|
|
@@ -461,15 +337,6 @@ class PipelinesClient:
|
|
|
461
337
|
|
|
462
338
|
Parameters:
|
|
463
339
|
- pipeline_id: str.
|
|
464
|
-
---
|
|
465
|
-
from llama_cloud.client import LlamaCloud
|
|
466
|
-
|
|
467
|
-
client = LlamaCloud(
|
|
468
|
-
token="YOUR_TOKEN",
|
|
469
|
-
)
|
|
470
|
-
client.pipelines.sync_pipeline(
|
|
471
|
-
pipeline_id="string",
|
|
472
|
-
)
|
|
473
340
|
"""
|
|
474
341
|
_response = self._client_wrapper.httpx_client.request(
|
|
475
342
|
"POST",
|
|
@@ -493,15 +360,6 @@ class PipelinesClient:
|
|
|
493
360
|
|
|
494
361
|
Parameters:
|
|
495
362
|
- pipeline_id: str.
|
|
496
|
-
---
|
|
497
|
-
from llama_cloud.client import LlamaCloud
|
|
498
|
-
|
|
499
|
-
client = LlamaCloud(
|
|
500
|
-
token="YOUR_TOKEN",
|
|
501
|
-
)
|
|
502
|
-
client.pipelines.copy_pipeline(
|
|
503
|
-
pipeline_id="string",
|
|
504
|
-
)
|
|
505
363
|
"""
|
|
506
364
|
_response = self._client_wrapper.httpx_client.request(
|
|
507
365
|
"POST",
|
|
@@ -826,7 +684,9 @@ class PipelinesClient:
|
|
|
826
684
|
pipeline_id: str,
|
|
827
685
|
file_id: str,
|
|
828
686
|
*,
|
|
829
|
-
custom_metadata: typing.Optional[
|
|
687
|
+
custom_metadata: typing.Optional[
|
|
688
|
+
typing.Dict[str, typing.Optional[PipelineFileUpdateCustomMetadataValue]]
|
|
689
|
+
] = OMIT,
|
|
830
690
|
) -> PipelineFile:
|
|
831
691
|
"""
|
|
832
692
|
Update a file for a pipeline.
|
|
@@ -836,7 +696,7 @@ class PipelinesClient:
|
|
|
836
696
|
|
|
837
697
|
- file_id: str.
|
|
838
698
|
|
|
839
|
-
- custom_metadata: typing.Optional[typing.Dict[str, PipelineFileUpdateCustomMetadataValue]].
|
|
699
|
+
- custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[PipelineFileUpdateCustomMetadataValue]]].
|
|
840
700
|
---
|
|
841
701
|
from llama_cloud.client import LlamaCloud
|
|
842
702
|
|
|
@@ -1051,7 +911,7 @@ class PipelinesClient:
|
|
|
1051
911
|
|
|
1052
912
|
- data_source_id: str.
|
|
1053
913
|
|
|
1054
|
-
- sync_interval: typing.Optional[float].
|
|
914
|
+
- sync_interval: typing.Optional[float].
|
|
1055
915
|
---
|
|
1056
916
|
from llama_cloud.client import LlamaCloud
|
|
1057
917
|
|
|
@@ -1132,16 +992,6 @@ class PipelinesClient:
|
|
|
1132
992
|
- pipeline_id: str.
|
|
1133
993
|
|
|
1134
994
|
- data_source_id: str.
|
|
1135
|
-
---
|
|
1136
|
-
from llama_cloud.client import LlamaCloud
|
|
1137
|
-
|
|
1138
|
-
client = LlamaCloud(
|
|
1139
|
-
token="YOUR_TOKEN",
|
|
1140
|
-
)
|
|
1141
|
-
client.pipelines.sync_pipeline_data_source(
|
|
1142
|
-
pipeline_id="string",
|
|
1143
|
-
data_source_id="string",
|
|
1144
|
-
)
|
|
1145
995
|
"""
|
|
1146
996
|
_response = self._client_wrapper.httpx_client.request(
|
|
1147
997
|
"POST",
|
|
@@ -1214,6 +1064,7 @@ class PipelinesClient:
|
|
|
1214
1064
|
retrieval_mode: typing.Optional[RetrievalMode] = OMIT,
|
|
1215
1065
|
retrieve_image_nodes: typing.Optional[bool] = OMIT,
|
|
1216
1066
|
query: str,
|
|
1067
|
+
class_name: typing.Optional[str] = OMIT,
|
|
1217
1068
|
) -> RetrieveResults:
|
|
1218
1069
|
"""
|
|
1219
1070
|
Get retrieval results for a managed pipeline and a query
|
|
@@ -1221,25 +1072,27 @@ class PipelinesClient:
|
|
|
1221
1072
|
Parameters:
|
|
1222
1073
|
- pipeline_id: str.
|
|
1223
1074
|
|
|
1224
|
-
- dense_similarity_top_k: typing.Optional[int].
|
|
1075
|
+
- dense_similarity_top_k: typing.Optional[int].
|
|
1225
1076
|
|
|
1226
|
-
- sparse_similarity_top_k: typing.Optional[int].
|
|
1077
|
+
- sparse_similarity_top_k: typing.Optional[int].
|
|
1227
1078
|
|
|
1228
|
-
- enable_reranking: typing.Optional[bool].
|
|
1079
|
+
- enable_reranking: typing.Optional[bool].
|
|
1229
1080
|
|
|
1230
|
-
- rerank_top_n: typing.Optional[int].
|
|
1081
|
+
- rerank_top_n: typing.Optional[int].
|
|
1231
1082
|
|
|
1232
|
-
- alpha: typing.Optional[float].
|
|
1083
|
+
- alpha: typing.Optional[float].
|
|
1233
1084
|
|
|
1234
|
-
- search_filters: typing.Optional[MetadataFilters].
|
|
1085
|
+
- search_filters: typing.Optional[MetadataFilters].
|
|
1235
1086
|
|
|
1236
|
-
- files_top_k: typing.Optional[int].
|
|
1087
|
+
- files_top_k: typing.Optional[int].
|
|
1237
1088
|
|
|
1238
1089
|
- retrieval_mode: typing.Optional[RetrievalMode]. The retrieval mode for the query.
|
|
1239
1090
|
|
|
1240
1091
|
- retrieve_image_nodes: typing.Optional[bool]. Whether to retrieve image nodes.
|
|
1241
1092
|
|
|
1242
1093
|
- query: str. The query to retrieve against.
|
|
1094
|
+
|
|
1095
|
+
- class_name: typing.Optional[str].
|
|
1243
1096
|
---
|
|
1244
1097
|
from llama_cloud import FilterCondition, MetadataFilters, RetrievalMode
|
|
1245
1098
|
from llama_cloud.client import LlamaCloud
|
|
@@ -1276,6 +1129,8 @@ class PipelinesClient:
|
|
|
1276
1129
|
_request["retrieval_mode"] = retrieval_mode
|
|
1277
1130
|
if retrieve_image_nodes is not OMIT:
|
|
1278
1131
|
_request["retrieve_image_nodes"] = retrieve_image_nodes
|
|
1132
|
+
if class_name is not OMIT:
|
|
1133
|
+
_request["class_name"] = class_name
|
|
1279
1134
|
_response = self._client_wrapper.httpx_client.request(
|
|
1280
1135
|
"POST",
|
|
1281
1136
|
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/retrieve"),
|
|
@@ -1820,46 +1675,6 @@ class AsyncPipelinesClient:
|
|
|
1820
1675
|
- project_id: typing.Optional[str].
|
|
1821
1676
|
|
|
1822
1677
|
- request: PipelineCreate.
|
|
1823
|
-
---
|
|
1824
|
-
from llama_cloud import (
|
|
1825
|
-
ConfigurableDataSinkNames,
|
|
1826
|
-
DataSinkCreate,
|
|
1827
|
-
EvalExecutionParams,
|
|
1828
|
-
FilterCondition,
|
|
1829
|
-
LlamaParseParameters,
|
|
1830
|
-
MetadataFilters,
|
|
1831
|
-
PipelineCreate,
|
|
1832
|
-
PipelineType,
|
|
1833
|
-
PresetRetrievalParams,
|
|
1834
|
-
RetrievalMode,
|
|
1835
|
-
SupportedLlmModelNames,
|
|
1836
|
-
)
|
|
1837
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
1838
|
-
|
|
1839
|
-
client = AsyncLlamaCloud(
|
|
1840
|
-
token="YOUR_TOKEN",
|
|
1841
|
-
)
|
|
1842
|
-
await client.pipelines.create_pipeline(
|
|
1843
|
-
request=PipelineCreate(
|
|
1844
|
-
data_sink=DataSinkCreate(
|
|
1845
|
-
name="string",
|
|
1846
|
-
sink_type=ConfigurableDataSinkNames.CHROMA,
|
|
1847
|
-
),
|
|
1848
|
-
preset_retrieval_parameters=PresetRetrievalParams(
|
|
1849
|
-
search_filters=MetadataFilters(
|
|
1850
|
-
filters=[],
|
|
1851
|
-
condition=FilterCondition.AND,
|
|
1852
|
-
),
|
|
1853
|
-
retrieval_mode=RetrievalMode.CHUNKS,
|
|
1854
|
-
),
|
|
1855
|
-
eval_parameters=EvalExecutionParams(
|
|
1856
|
-
llm_model=SupportedLlmModelNames.GPT_3_5_TURBO,
|
|
1857
|
-
),
|
|
1858
|
-
llama_parse_parameters=LlamaParseParameters(),
|
|
1859
|
-
name="string",
|
|
1860
|
-
pipeline_type=PipelineType.PLAYGROUND,
|
|
1861
|
-
),
|
|
1862
|
-
)
|
|
1863
1678
|
"""
|
|
1864
1679
|
_response = await self._client_wrapper.httpx_client.request(
|
|
1865
1680
|
"POST",
|
|
@@ -1888,46 +1703,6 @@ class AsyncPipelinesClient:
|
|
|
1888
1703
|
- project_id: typing.Optional[str].
|
|
1889
1704
|
|
|
1890
1705
|
- request: PipelineCreate.
|
|
1891
|
-
---
|
|
1892
|
-
from llama_cloud import (
|
|
1893
|
-
ConfigurableDataSinkNames,
|
|
1894
|
-
DataSinkCreate,
|
|
1895
|
-
EvalExecutionParams,
|
|
1896
|
-
FilterCondition,
|
|
1897
|
-
LlamaParseParameters,
|
|
1898
|
-
MetadataFilters,
|
|
1899
|
-
PipelineCreate,
|
|
1900
|
-
PipelineType,
|
|
1901
|
-
PresetRetrievalParams,
|
|
1902
|
-
RetrievalMode,
|
|
1903
|
-
SupportedLlmModelNames,
|
|
1904
|
-
)
|
|
1905
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
1906
|
-
|
|
1907
|
-
client = AsyncLlamaCloud(
|
|
1908
|
-
token="YOUR_TOKEN",
|
|
1909
|
-
)
|
|
1910
|
-
await client.pipelines.upsert_pipeline(
|
|
1911
|
-
request=PipelineCreate(
|
|
1912
|
-
data_sink=DataSinkCreate(
|
|
1913
|
-
name="string",
|
|
1914
|
-
sink_type=ConfigurableDataSinkNames.CHROMA,
|
|
1915
|
-
),
|
|
1916
|
-
preset_retrieval_parameters=PresetRetrievalParams(
|
|
1917
|
-
search_filters=MetadataFilters(
|
|
1918
|
-
filters=[],
|
|
1919
|
-
condition=FilterCondition.AND,
|
|
1920
|
-
),
|
|
1921
|
-
retrieval_mode=RetrievalMode.CHUNKS,
|
|
1922
|
-
),
|
|
1923
|
-
eval_parameters=EvalExecutionParams(
|
|
1924
|
-
llm_model=SupportedLlmModelNames.GPT_3_5_TURBO,
|
|
1925
|
-
),
|
|
1926
|
-
llama_parse_parameters=LlamaParseParameters(),
|
|
1927
|
-
name="string",
|
|
1928
|
-
pipeline_type=PipelineType.PLAYGROUND,
|
|
1929
|
-
),
|
|
1930
|
-
)
|
|
1931
1706
|
"""
|
|
1932
1707
|
_response = await self._client_wrapper.httpx_client.request(
|
|
1933
1708
|
"PUT",
|
|
@@ -1953,15 +1728,6 @@ class AsyncPipelinesClient:
|
|
|
1953
1728
|
|
|
1954
1729
|
Parameters:
|
|
1955
1730
|
- pipeline_id: str.
|
|
1956
|
-
---
|
|
1957
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
1958
|
-
|
|
1959
|
-
client = AsyncLlamaCloud(
|
|
1960
|
-
token="YOUR_TOKEN",
|
|
1961
|
-
)
|
|
1962
|
-
await client.pipelines.get_pipeline(
|
|
1963
|
-
pipeline_id="string",
|
|
1964
|
-
)
|
|
1965
1731
|
"""
|
|
1966
1732
|
_response = await self._client_wrapper.httpx_client.request(
|
|
1967
1733
|
"GET",
|
|
@@ -2000,60 +1766,25 @@ class AsyncPipelinesClient:
|
|
|
2000
1766
|
Parameters:
|
|
2001
1767
|
- pipeline_id: str.
|
|
2002
1768
|
|
|
2003
|
-
- embedding_config: typing.Optional[PipelineUpdateEmbeddingConfig].
|
|
1769
|
+
- embedding_config: typing.Optional[PipelineUpdateEmbeddingConfig].
|
|
2004
1770
|
|
|
2005
1771
|
- transform_config: typing.Optional[PipelineUpdateTransformConfig]. Configuration for the transformation.
|
|
2006
1772
|
|
|
2007
|
-
- configured_transformations: typing.Optional[typing.List[ConfiguredTransformationItem]].
|
|
1773
|
+
- configured_transformations: typing.Optional[typing.List[ConfiguredTransformationItem]].
|
|
2008
1774
|
|
|
2009
|
-
- data_sink_id: typing.Optional[str].
|
|
1775
|
+
- data_sink_id: typing.Optional[str].
|
|
2010
1776
|
|
|
2011
|
-
- data_sink: typing.Optional[DataSinkCreate].
|
|
1777
|
+
- data_sink: typing.Optional[DataSinkCreate].
|
|
2012
1778
|
|
|
2013
|
-
- preset_retrieval_parameters: typing.Optional[PresetRetrievalParams].
|
|
1779
|
+
- preset_retrieval_parameters: typing.Optional[PresetRetrievalParams].
|
|
2014
1780
|
|
|
2015
|
-
- eval_parameters: typing.Optional[EvalExecutionParams].
|
|
1781
|
+
- eval_parameters: typing.Optional[EvalExecutionParams].
|
|
2016
1782
|
|
|
2017
|
-
- llama_parse_parameters: typing.Optional[LlamaParseParameters].
|
|
1783
|
+
- llama_parse_parameters: typing.Optional[LlamaParseParameters].
|
|
2018
1784
|
|
|
2019
1785
|
- name: typing.Optional[str].
|
|
2020
1786
|
|
|
2021
|
-
- managed_pipeline_id: typing.Optional[str].
|
|
2022
|
-
---
|
|
2023
|
-
from llama_cloud import (
|
|
2024
|
-
ConfigurableDataSinkNames,
|
|
2025
|
-
DataSinkCreate,
|
|
2026
|
-
EvalExecutionParams,
|
|
2027
|
-
FilterCondition,
|
|
2028
|
-
LlamaParseParameters,
|
|
2029
|
-
MetadataFilters,
|
|
2030
|
-
PresetRetrievalParams,
|
|
2031
|
-
RetrievalMode,
|
|
2032
|
-
SupportedLlmModelNames,
|
|
2033
|
-
)
|
|
2034
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
2035
|
-
|
|
2036
|
-
client = AsyncLlamaCloud(
|
|
2037
|
-
token="YOUR_TOKEN",
|
|
2038
|
-
)
|
|
2039
|
-
await client.pipelines.update_existing_pipeline(
|
|
2040
|
-
pipeline_id="string",
|
|
2041
|
-
data_sink=DataSinkCreate(
|
|
2042
|
-
name="string",
|
|
2043
|
-
sink_type=ConfigurableDataSinkNames.CHROMA,
|
|
2044
|
-
),
|
|
2045
|
-
preset_retrieval_parameters=PresetRetrievalParams(
|
|
2046
|
-
search_filters=MetadataFilters(
|
|
2047
|
-
filters=[],
|
|
2048
|
-
condition=FilterCondition.AND,
|
|
2049
|
-
),
|
|
2050
|
-
retrieval_mode=RetrievalMode.CHUNKS,
|
|
2051
|
-
),
|
|
2052
|
-
eval_parameters=EvalExecutionParams(
|
|
2053
|
-
llm_model=SupportedLlmModelNames.GPT_3_5_TURBO,
|
|
2054
|
-
),
|
|
2055
|
-
llama_parse_parameters=LlamaParseParameters(),
|
|
2056
|
-
)
|
|
1787
|
+
- managed_pipeline_id: typing.Optional[str].
|
|
2057
1788
|
"""
|
|
2058
1789
|
_request: typing.Dict[str, typing.Any] = {}
|
|
2059
1790
|
if embedding_config is not OMIT:
|
|
@@ -2163,15 +1894,6 @@ class AsyncPipelinesClient:
|
|
|
2163
1894
|
|
|
2164
1895
|
Parameters:
|
|
2165
1896
|
- pipeline_id: str.
|
|
2166
|
-
---
|
|
2167
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
2168
|
-
|
|
2169
|
-
client = AsyncLlamaCloud(
|
|
2170
|
-
token="YOUR_TOKEN",
|
|
2171
|
-
)
|
|
2172
|
-
await client.pipelines.sync_pipeline(
|
|
2173
|
-
pipeline_id="string",
|
|
2174
|
-
)
|
|
2175
1897
|
"""
|
|
2176
1898
|
_response = await self._client_wrapper.httpx_client.request(
|
|
2177
1899
|
"POST",
|
|
@@ -2195,15 +1917,6 @@ class AsyncPipelinesClient:
|
|
|
2195
1917
|
|
|
2196
1918
|
Parameters:
|
|
2197
1919
|
- pipeline_id: str.
|
|
2198
|
-
---
|
|
2199
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
2200
|
-
|
|
2201
|
-
client = AsyncLlamaCloud(
|
|
2202
|
-
token="YOUR_TOKEN",
|
|
2203
|
-
)
|
|
2204
|
-
await client.pipelines.copy_pipeline(
|
|
2205
|
-
pipeline_id="string",
|
|
2206
|
-
)
|
|
2207
1920
|
"""
|
|
2208
1921
|
_response = await self._client_wrapper.httpx_client.request(
|
|
2209
1922
|
"POST",
|
|
@@ -2530,7 +2243,9 @@ class AsyncPipelinesClient:
|
|
|
2530
2243
|
pipeline_id: str,
|
|
2531
2244
|
file_id: str,
|
|
2532
2245
|
*,
|
|
2533
|
-
custom_metadata: typing.Optional[
|
|
2246
|
+
custom_metadata: typing.Optional[
|
|
2247
|
+
typing.Dict[str, typing.Optional[PipelineFileUpdateCustomMetadataValue]]
|
|
2248
|
+
] = OMIT,
|
|
2534
2249
|
) -> PipelineFile:
|
|
2535
2250
|
"""
|
|
2536
2251
|
Update a file for a pipeline.
|
|
@@ -2540,7 +2255,7 @@ class AsyncPipelinesClient:
|
|
|
2540
2255
|
|
|
2541
2256
|
- file_id: str.
|
|
2542
2257
|
|
|
2543
|
-
- custom_metadata: typing.Optional[typing.Dict[str, PipelineFileUpdateCustomMetadataValue]].
|
|
2258
|
+
- custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[PipelineFileUpdateCustomMetadataValue]]].
|
|
2544
2259
|
---
|
|
2545
2260
|
from llama_cloud.client import AsyncLlamaCloud
|
|
2546
2261
|
|
|
@@ -2755,7 +2470,7 @@ class AsyncPipelinesClient:
|
|
|
2755
2470
|
|
|
2756
2471
|
- data_source_id: str.
|
|
2757
2472
|
|
|
2758
|
-
- sync_interval: typing.Optional[float].
|
|
2473
|
+
- sync_interval: typing.Optional[float].
|
|
2759
2474
|
---
|
|
2760
2475
|
from llama_cloud.client import AsyncLlamaCloud
|
|
2761
2476
|
|
|
@@ -2836,16 +2551,6 @@ class AsyncPipelinesClient:
|
|
|
2836
2551
|
- pipeline_id: str.
|
|
2837
2552
|
|
|
2838
2553
|
- data_source_id: str.
|
|
2839
|
-
---
|
|
2840
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
2841
|
-
|
|
2842
|
-
client = AsyncLlamaCloud(
|
|
2843
|
-
token="YOUR_TOKEN",
|
|
2844
|
-
)
|
|
2845
|
-
await client.pipelines.sync_pipeline_data_source(
|
|
2846
|
-
pipeline_id="string",
|
|
2847
|
-
data_source_id="string",
|
|
2848
|
-
)
|
|
2849
2554
|
"""
|
|
2850
2555
|
_response = await self._client_wrapper.httpx_client.request(
|
|
2851
2556
|
"POST",
|
|
@@ -2920,6 +2625,7 @@ class AsyncPipelinesClient:
|
|
|
2920
2625
|
retrieval_mode: typing.Optional[RetrievalMode] = OMIT,
|
|
2921
2626
|
retrieve_image_nodes: typing.Optional[bool] = OMIT,
|
|
2922
2627
|
query: str,
|
|
2628
|
+
class_name: typing.Optional[str] = OMIT,
|
|
2923
2629
|
) -> RetrieveResults:
|
|
2924
2630
|
"""
|
|
2925
2631
|
Get retrieval results for a managed pipeline and a query
|
|
@@ -2927,25 +2633,27 @@ class AsyncPipelinesClient:
|
|
|
2927
2633
|
Parameters:
|
|
2928
2634
|
- pipeline_id: str.
|
|
2929
2635
|
|
|
2930
|
-
- dense_similarity_top_k: typing.Optional[int].
|
|
2636
|
+
- dense_similarity_top_k: typing.Optional[int].
|
|
2931
2637
|
|
|
2932
|
-
- sparse_similarity_top_k: typing.Optional[int].
|
|
2638
|
+
- sparse_similarity_top_k: typing.Optional[int].
|
|
2933
2639
|
|
|
2934
|
-
- enable_reranking: typing.Optional[bool].
|
|
2640
|
+
- enable_reranking: typing.Optional[bool].
|
|
2935
2641
|
|
|
2936
|
-
- rerank_top_n: typing.Optional[int].
|
|
2642
|
+
- rerank_top_n: typing.Optional[int].
|
|
2937
2643
|
|
|
2938
|
-
- alpha: typing.Optional[float].
|
|
2644
|
+
- alpha: typing.Optional[float].
|
|
2939
2645
|
|
|
2940
|
-
- search_filters: typing.Optional[MetadataFilters].
|
|
2646
|
+
- search_filters: typing.Optional[MetadataFilters].
|
|
2941
2647
|
|
|
2942
|
-
- files_top_k: typing.Optional[int].
|
|
2648
|
+
- files_top_k: typing.Optional[int].
|
|
2943
2649
|
|
|
2944
2650
|
- retrieval_mode: typing.Optional[RetrievalMode]. The retrieval mode for the query.
|
|
2945
2651
|
|
|
2946
2652
|
- retrieve_image_nodes: typing.Optional[bool]. Whether to retrieve image nodes.
|
|
2947
2653
|
|
|
2948
2654
|
- query: str. The query to retrieve against.
|
|
2655
|
+
|
|
2656
|
+
- class_name: typing.Optional[str].
|
|
2949
2657
|
---
|
|
2950
2658
|
from llama_cloud import FilterCondition, MetadataFilters, RetrievalMode
|
|
2951
2659
|
from llama_cloud.client import AsyncLlamaCloud
|
|
@@ -2982,6 +2690,8 @@ class AsyncPipelinesClient:
|
|
|
2982
2690
|
_request["retrieval_mode"] = retrieval_mode
|
|
2983
2691
|
if retrieve_image_nodes is not OMIT:
|
|
2984
2692
|
_request["retrieve_image_nodes"] = retrieve_image_nodes
|
|
2693
|
+
if class_name is not OMIT:
|
|
2694
|
+
_request["class_name"] = class_name
|
|
2985
2695
|
_response = await self._client_wrapper.httpx_client.request(
|
|
2986
2696
|
"POST",
|
|
2987
2697
|
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/retrieve"),
|
|
@@ -11,11 +11,7 @@ from .pipeline_update_embedding_config import (
|
|
|
11
11
|
PipelineUpdateEmbeddingConfig_OpenaiEmbedding,
|
|
12
12
|
PipelineUpdateEmbeddingConfig_VertexaiEmbedding,
|
|
13
13
|
)
|
|
14
|
-
from .pipeline_update_transform_config import
|
|
15
|
-
PipelineUpdateTransformConfig,
|
|
16
|
-
PipelineUpdateTransformConfig_Advanced,
|
|
17
|
-
PipelineUpdateTransformConfig_Auto,
|
|
18
|
-
)
|
|
14
|
+
from .pipeline_update_transform_config import PipelineUpdateTransformConfig
|
|
19
15
|
|
|
20
16
|
__all__ = [
|
|
21
17
|
"PipelineFileUpdateCustomMetadataValue",
|
|
@@ -28,6 +24,4 @@ __all__ = [
|
|
|
28
24
|
"PipelineUpdateEmbeddingConfig_OpenaiEmbedding",
|
|
29
25
|
"PipelineUpdateEmbeddingConfig_VertexaiEmbedding",
|
|
30
26
|
"PipelineUpdateTransformConfig",
|
|
31
|
-
"PipelineUpdateTransformConfig_Advanced",
|
|
32
|
-
"PipelineUpdateTransformConfig_Auto",
|
|
33
27
|
]
|