llama-cloud 0.0.13__py3-none-any.whl → 0.0.15__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of llama-cloud might be problematic. Click here for more details.
- llama_cloud/__init__.py +76 -14
- llama_cloud/resources/__init__.py +14 -0
- llama_cloud/resources/data_sources/types/data_source_update_component_one.py +2 -0
- llama_cloud/resources/evals/client.py +5 -5
- llama_cloud/resources/parsing/client.py +8 -0
- llama_cloud/resources/pipelines/__init__.py +14 -0
- llama_cloud/resources/pipelines/client.py +285 -66
- llama_cloud/resources/pipelines/types/__init__.py +16 -0
- llama_cloud/resources/pipelines/types/pipeline_update_embedding_config.py +78 -0
- llama_cloud/types/__init__.py +68 -14
- llama_cloud/types/{embedding_config.py → azure_open_ai_embedding_config.py} +4 -6
- llama_cloud/types/bedrock_embedding_config.py +34 -0
- llama_cloud/types/box_auth_mechanism.py +21 -0
- llama_cloud/types/chat_data.py +1 -1
- llama_cloud/types/chat_message.py +14 -4
- llama_cloud/types/cloud_azure_ai_search_vector_store.py +3 -0
- llama_cloud/types/cloud_box_data_source.py +51 -0
- llama_cloud/types/cloud_document.py +3 -0
- llama_cloud/types/cloud_document_create.py +3 -0
- llama_cloud/types/cloud_sharepoint_data_source.py +2 -1
- llama_cloud/types/cohere_embedding_config.py +34 -0
- llama_cloud/types/configurable_data_source_names.py +4 -0
- llama_cloud/types/custom_claims.py +0 -3
- llama_cloud/types/data_source_component_one.py +2 -0
- llama_cloud/types/data_source_create_component_one.py +2 -0
- llama_cloud/types/eval_execution_params.py +2 -2
- llama_cloud/types/eval_execution_params_override.py +2 -2
- llama_cloud/types/filter_operator.py +4 -0
- llama_cloud/types/gemini_embedding_config.py +34 -0
- llama_cloud/types/hugging_face_inference_api_embedding_config.py +34 -0
- llama_cloud/types/input_message.py +42 -0
- llama_cloud/types/llama_parse_parameters.py +4 -1
- llama_cloud/types/{eval_llm_model_data.py → llm_model_data.py} +1 -1
- llama_cloud/types/llm_parameters.py +2 -2
- llama_cloud/types/{supported_eval_llm_model.py → message_annotation.py} +6 -6
- llama_cloud/types/metadata_filter.py +1 -1
- llama_cloud/types/open_ai_embedding_config.py +34 -0
- llama_cloud/types/page_segmentation_config.py +2 -0
- llama_cloud/types/pipeline.py +11 -1
- llama_cloud/types/pipeline_create.py +3 -3
- llama_cloud/types/pipeline_create_embedding_config.py +78 -0
- llama_cloud/types/pipeline_data_source.py +7 -0
- llama_cloud/types/pipeline_data_source_component_one.py +2 -0
- llama_cloud/types/pipeline_data_source_create.py +3 -0
- llama_cloud/types/pipeline_embedding_config.py +78 -0
- llama_cloud/types/pipeline_transform_config.py +31 -0
- llama_cloud/types/playground_session.py +51 -0
- llama_cloud/types/supported_llm_model.py +41 -0
- llama_cloud/types/{supported_eval_llm_model_names.py → supported_llm_model_names.py} +10 -6
- {llama_cloud-0.0.13.dist-info → llama_cloud-0.0.15.dist-info}/METADATA +1 -1
- {llama_cloud-0.0.13.dist-info → llama_cloud-0.0.15.dist-info}/RECORD +53 -42
- llama_cloud/types/embedding_config_component.py +0 -7
- llama_cloud/types/embedding_config_component_one.py +0 -19
- llama_cloud/types/embedding_config_type.py +0 -41
- {llama_cloud-0.0.13.dist-info → llama_cloud-0.0.15.dist-info}/LICENSE +0 -0
- {llama_cloud-0.0.13.dist-info → llama_cloud-0.0.15.dist-info}/WHEEL +0 -0
|
@@ -10,17 +10,16 @@ from ...core.jsonable_encoder import jsonable_encoder
|
|
|
10
10
|
from ...core.remove_none_from_dict import remove_none_from_dict
|
|
11
11
|
from ...errors.unprocessable_entity_error import UnprocessableEntityError
|
|
12
12
|
from ...types.chat_data import ChatData
|
|
13
|
-
from ...types.chat_message import ChatMessage
|
|
14
13
|
from ...types.cloud_document import CloudDocument
|
|
15
14
|
from ...types.cloud_document_create import CloudDocumentCreate
|
|
16
15
|
from ...types.configured_transformation_item import ConfiguredTransformationItem
|
|
17
16
|
from ...types.data_sink_create import DataSinkCreate
|
|
18
|
-
from ...types.embedding_config import EmbeddingConfig
|
|
19
17
|
from ...types.eval_dataset_job_record import EvalDatasetJobRecord
|
|
20
18
|
from ...types.eval_execution_params import EvalExecutionParams
|
|
21
19
|
from ...types.eval_execution_params_override import EvalExecutionParamsOverride
|
|
22
20
|
from ...types.eval_question_result import EvalQuestionResult
|
|
23
21
|
from ...types.http_validation_error import HttpValidationError
|
|
22
|
+
from ...types.input_message import InputMessage
|
|
24
23
|
from ...types.llama_parse_parameters import LlamaParseParameters
|
|
25
24
|
from ...types.managed_ingestion_status_response import ManagedIngestionStatusResponse
|
|
26
25
|
from ...types.metadata_filters import MetadataFilters
|
|
@@ -32,11 +31,13 @@ from ...types.pipeline_deployment import PipelineDeployment
|
|
|
32
31
|
from ...types.pipeline_file import PipelineFile
|
|
33
32
|
from ...types.pipeline_file_create import PipelineFileCreate
|
|
34
33
|
from ...types.pipeline_type import PipelineType
|
|
34
|
+
from ...types.playground_session import PlaygroundSession
|
|
35
35
|
from ...types.preset_retrieval_params import PresetRetrievalParams
|
|
36
36
|
from ...types.retrieval_mode import RetrievalMode
|
|
37
37
|
from ...types.retrieve_results import RetrieveResults
|
|
38
38
|
from ...types.text_node import TextNode
|
|
39
39
|
from .types.pipeline_file_update_custom_metadata_value import PipelineFileUpdateCustomMetadataValue
|
|
40
|
+
from .types.pipeline_update_embedding_config import PipelineUpdateEmbeddingConfig
|
|
40
41
|
from .types.pipeline_update_transform_config import PipelineUpdateTransformConfig
|
|
41
42
|
|
|
42
43
|
try:
|
|
@@ -121,8 +122,6 @@ class PipelinesClient:
|
|
|
121
122
|
from llama_cloud import (
|
|
122
123
|
ConfigurableDataSinkNames,
|
|
123
124
|
DataSinkCreate,
|
|
124
|
-
EmbeddingConfig,
|
|
125
|
-
EmbeddingConfigType,
|
|
126
125
|
EvalExecutionParams,
|
|
127
126
|
FilterCondition,
|
|
128
127
|
LlamaParseParameters,
|
|
@@ -131,7 +130,7 @@ class PipelinesClient:
|
|
|
131
130
|
PipelineType,
|
|
132
131
|
PresetRetrievalParams,
|
|
133
132
|
RetrievalMode,
|
|
134
|
-
|
|
133
|
+
SupportedLlmModelNames,
|
|
135
134
|
)
|
|
136
135
|
from llama_cloud.client import LlamaCloud
|
|
137
136
|
|
|
@@ -140,9 +139,6 @@ class PipelinesClient:
|
|
|
140
139
|
)
|
|
141
140
|
client.pipelines.create_pipeline(
|
|
142
141
|
request=PipelineCreate(
|
|
143
|
-
embedding_config=EmbeddingConfig(
|
|
144
|
-
type=EmbeddingConfigType.OPENAI_EMBEDDING,
|
|
145
|
-
),
|
|
146
142
|
data_sink=DataSinkCreate(
|
|
147
143
|
name="string",
|
|
148
144
|
sink_type=ConfigurableDataSinkNames.CHROMA,
|
|
@@ -155,7 +151,7 @@ class PipelinesClient:
|
|
|
155
151
|
retrieval_mode=RetrievalMode.CHUNKS,
|
|
156
152
|
),
|
|
157
153
|
eval_parameters=EvalExecutionParams(
|
|
158
|
-
llm_model=
|
|
154
|
+
llm_model=SupportedLlmModelNames.GPT_3_5_TURBO,
|
|
159
155
|
),
|
|
160
156
|
llama_parse_parameters=LlamaParseParameters(),
|
|
161
157
|
name="string",
|
|
@@ -194,8 +190,6 @@ class PipelinesClient:
|
|
|
194
190
|
from llama_cloud import (
|
|
195
191
|
ConfigurableDataSinkNames,
|
|
196
192
|
DataSinkCreate,
|
|
197
|
-
EmbeddingConfig,
|
|
198
|
-
EmbeddingConfigType,
|
|
199
193
|
EvalExecutionParams,
|
|
200
194
|
FilterCondition,
|
|
201
195
|
LlamaParseParameters,
|
|
@@ -204,7 +198,7 @@ class PipelinesClient:
|
|
|
204
198
|
PipelineType,
|
|
205
199
|
PresetRetrievalParams,
|
|
206
200
|
RetrievalMode,
|
|
207
|
-
|
|
201
|
+
SupportedLlmModelNames,
|
|
208
202
|
)
|
|
209
203
|
from llama_cloud.client import LlamaCloud
|
|
210
204
|
|
|
@@ -213,9 +207,6 @@ class PipelinesClient:
|
|
|
213
207
|
)
|
|
214
208
|
client.pipelines.upsert_pipeline(
|
|
215
209
|
request=PipelineCreate(
|
|
216
|
-
embedding_config=EmbeddingConfig(
|
|
217
|
-
type=EmbeddingConfigType.OPENAI_EMBEDDING,
|
|
218
|
-
),
|
|
219
210
|
data_sink=DataSinkCreate(
|
|
220
211
|
name="string",
|
|
221
212
|
sink_type=ConfigurableDataSinkNames.CHROMA,
|
|
@@ -228,7 +219,7 @@ class PipelinesClient:
|
|
|
228
219
|
retrieval_mode=RetrievalMode.CHUNKS,
|
|
229
220
|
),
|
|
230
221
|
eval_parameters=EvalExecutionParams(
|
|
231
|
-
llm_model=
|
|
222
|
+
llm_model=SupportedLlmModelNames.GPT_3_5_TURBO,
|
|
232
223
|
),
|
|
233
224
|
llama_parse_parameters=LlamaParseParameters(),
|
|
234
225
|
name="string",
|
|
@@ -290,7 +281,7 @@ class PipelinesClient:
|
|
|
290
281
|
self,
|
|
291
282
|
pipeline_id: str,
|
|
292
283
|
*,
|
|
293
|
-
embedding_config: typing.Optional[
|
|
284
|
+
embedding_config: typing.Optional[PipelineUpdateEmbeddingConfig] = OMIT,
|
|
294
285
|
transform_config: typing.Optional[PipelineUpdateTransformConfig] = OMIT,
|
|
295
286
|
configured_transformations: typing.Optional[typing.List[ConfiguredTransformationItem]] = OMIT,
|
|
296
287
|
data_sink_id: typing.Optional[str] = OMIT,
|
|
@@ -307,11 +298,11 @@ class PipelinesClient:
|
|
|
307
298
|
Parameters:
|
|
308
299
|
- pipeline_id: str.
|
|
309
300
|
|
|
310
|
-
- embedding_config: typing.Optional[
|
|
301
|
+
- embedding_config: typing.Optional[PipelineUpdateEmbeddingConfig]. Configuration for the embedding model.
|
|
311
302
|
|
|
312
303
|
- transform_config: typing.Optional[PipelineUpdateTransformConfig]. Configuration for the transformation.
|
|
313
304
|
|
|
314
|
-
- configured_transformations: typing.Optional[typing.List[ConfiguredTransformationItem]].
|
|
305
|
+
- configured_transformations: typing.Optional[typing.List[ConfiguredTransformationItem]]. Deprecated, use embedding_config or transform_config instead. configured transformations for the pipeline.
|
|
315
306
|
|
|
316
307
|
- data_sink_id: typing.Optional[str]. Data sink ID. When provided instead of data_sink, the data sink will be looked up by ID.
|
|
317
308
|
|
|
@@ -330,15 +321,13 @@ class PipelinesClient:
|
|
|
330
321
|
from llama_cloud import (
|
|
331
322
|
ConfigurableDataSinkNames,
|
|
332
323
|
DataSinkCreate,
|
|
333
|
-
EmbeddingConfig,
|
|
334
|
-
EmbeddingConfigType,
|
|
335
324
|
EvalExecutionParams,
|
|
336
325
|
FilterCondition,
|
|
337
326
|
LlamaParseParameters,
|
|
338
327
|
MetadataFilters,
|
|
339
328
|
PresetRetrievalParams,
|
|
340
329
|
RetrievalMode,
|
|
341
|
-
|
|
330
|
+
SupportedLlmModelNames,
|
|
342
331
|
)
|
|
343
332
|
from llama_cloud.client import LlamaCloud
|
|
344
333
|
|
|
@@ -347,9 +336,6 @@ class PipelinesClient:
|
|
|
347
336
|
)
|
|
348
337
|
client.pipelines.update_existing_pipeline(
|
|
349
338
|
pipeline_id="string",
|
|
350
|
-
embedding_config=EmbeddingConfig(
|
|
351
|
-
type=EmbeddingConfigType.OPENAI_EMBEDDING,
|
|
352
|
-
),
|
|
353
339
|
data_sink=DataSinkCreate(
|
|
354
340
|
name="string",
|
|
355
341
|
sink_type=ConfigurableDataSinkNames.CHROMA,
|
|
@@ -362,7 +348,7 @@ class PipelinesClient:
|
|
|
362
348
|
retrieval_mode=RetrievalMode.CHUNKS,
|
|
363
349
|
),
|
|
364
350
|
eval_parameters=EvalExecutionParams(
|
|
365
|
-
llm_model=
|
|
351
|
+
llm_model=SupportedLlmModelNames.GPT_3_5_TURBO,
|
|
366
352
|
),
|
|
367
353
|
llama_parse_parameters=LlamaParseParameters(),
|
|
368
354
|
)
|
|
@@ -591,7 +577,7 @@ class PipelinesClient:
|
|
|
591
577
|
|
|
592
578
|
- params: typing.Optional[EvalExecutionParamsOverride]. The parameters for the eval execution that will override the ones set in the pipeline.
|
|
593
579
|
---
|
|
594
|
-
from llama_cloud import EvalExecutionParamsOverride,
|
|
580
|
+
from llama_cloud import EvalExecutionParamsOverride, SupportedLlmModelNames
|
|
595
581
|
from llama_cloud.client import LlamaCloud
|
|
596
582
|
|
|
597
583
|
client = LlamaCloud(
|
|
@@ -602,7 +588,7 @@ class PipelinesClient:
|
|
|
602
588
|
eval_dataset_id="string",
|
|
603
589
|
eval_question_ids=[],
|
|
604
590
|
params=EvalExecutionParamsOverride(
|
|
605
|
-
llm_model=
|
|
591
|
+
llm_model=SupportedLlmModelNames.GPT_3_5_TURBO,
|
|
606
592
|
),
|
|
607
593
|
)
|
|
608
594
|
"""
|
|
@@ -1054,6 +1040,52 @@ class PipelinesClient:
|
|
|
1054
1040
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1055
1041
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1056
1042
|
|
|
1043
|
+
def update_pipeline_data_source(
|
|
1044
|
+
self, pipeline_id: str, data_source_id: str, *, sync_interval: typing.Optional[float] = OMIT
|
|
1045
|
+
) -> PipelineDataSource:
|
|
1046
|
+
"""
|
|
1047
|
+
Update the configuration of a data source in a pipeline.
|
|
1048
|
+
|
|
1049
|
+
Parameters:
|
|
1050
|
+
- pipeline_id: str.
|
|
1051
|
+
|
|
1052
|
+
- data_source_id: str.
|
|
1053
|
+
|
|
1054
|
+
- sync_interval: typing.Optional[float]. The interval at which the data source should be synced.
|
|
1055
|
+
---
|
|
1056
|
+
from llama_cloud.client import LlamaCloud
|
|
1057
|
+
|
|
1058
|
+
client = LlamaCloud(
|
|
1059
|
+
token="YOUR_TOKEN",
|
|
1060
|
+
)
|
|
1061
|
+
client.pipelines.update_pipeline_data_source(
|
|
1062
|
+
pipeline_id="string",
|
|
1063
|
+
data_source_id="string",
|
|
1064
|
+
)
|
|
1065
|
+
"""
|
|
1066
|
+
_request: typing.Dict[str, typing.Any] = {}
|
|
1067
|
+
if sync_interval is not OMIT:
|
|
1068
|
+
_request["sync_interval"] = sync_interval
|
|
1069
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
1070
|
+
"PUT",
|
|
1071
|
+
urllib.parse.urljoin(
|
|
1072
|
+
f"{self._client_wrapper.get_base_url()}/",
|
|
1073
|
+
f"api/v1/pipelines/{pipeline_id}/data-sources/{data_source_id}",
|
|
1074
|
+
),
|
|
1075
|
+
json=jsonable_encoder(_request),
|
|
1076
|
+
headers=self._client_wrapper.get_headers(),
|
|
1077
|
+
timeout=60,
|
|
1078
|
+
)
|
|
1079
|
+
if 200 <= _response.status_code < 300:
|
|
1080
|
+
return pydantic.parse_obj_as(PipelineDataSource, _response.json()) # type: ignore
|
|
1081
|
+
if _response.status_code == 422:
|
|
1082
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1083
|
+
try:
|
|
1084
|
+
_response_json = _response.json()
|
|
1085
|
+
except JSONDecodeError:
|
|
1086
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1087
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1088
|
+
|
|
1057
1089
|
def delete_pipeline_data_source(self, pipeline_id: str, data_source_id: str) -> None:
|
|
1058
1090
|
"""
|
|
1059
1091
|
Delete a data source from a pipeline.
|
|
@@ -1130,6 +1162,44 @@ class PipelinesClient:
|
|
|
1130
1162
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1131
1163
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1132
1164
|
|
|
1165
|
+
def get_pipeline_data_source_status(self, pipeline_id: str, data_source_id: str) -> ManagedIngestionStatusResponse:
|
|
1166
|
+
"""
|
|
1167
|
+
Get the status of a data source for a pipeline.
|
|
1168
|
+
|
|
1169
|
+
Parameters:
|
|
1170
|
+
- pipeline_id: str.
|
|
1171
|
+
|
|
1172
|
+
- data_source_id: str.
|
|
1173
|
+
---
|
|
1174
|
+
from llama_cloud.client import LlamaCloud
|
|
1175
|
+
|
|
1176
|
+
client = LlamaCloud(
|
|
1177
|
+
token="YOUR_TOKEN",
|
|
1178
|
+
)
|
|
1179
|
+
client.pipelines.get_pipeline_data_source_status(
|
|
1180
|
+
pipeline_id="string",
|
|
1181
|
+
data_source_id="string",
|
|
1182
|
+
)
|
|
1183
|
+
"""
|
|
1184
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
1185
|
+
"GET",
|
|
1186
|
+
urllib.parse.urljoin(
|
|
1187
|
+
f"{self._client_wrapper.get_base_url()}/",
|
|
1188
|
+
f"api/v1/pipelines/{pipeline_id}/data-sources/{data_source_id}/status",
|
|
1189
|
+
),
|
|
1190
|
+
headers=self._client_wrapper.get_headers(),
|
|
1191
|
+
timeout=60,
|
|
1192
|
+
)
|
|
1193
|
+
if 200 <= _response.status_code < 300:
|
|
1194
|
+
return pydantic.parse_obj_as(ManagedIngestionStatusResponse, _response.json()) # type: ignore
|
|
1195
|
+
if _response.status_code == 422:
|
|
1196
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1197
|
+
try:
|
|
1198
|
+
_response_json = _response.json()
|
|
1199
|
+
except JSONDecodeError:
|
|
1200
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1201
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1202
|
+
|
|
1133
1203
|
def run_search(
|
|
1134
1204
|
self,
|
|
1135
1205
|
pipeline_id: str,
|
|
@@ -1287,21 +1357,57 @@ class PipelinesClient:
|
|
|
1287
1357
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1288
1358
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1289
1359
|
|
|
1360
|
+
def get_playground_session(self, pipeline_id: str) -> PlaygroundSession:
|
|
1361
|
+
"""
|
|
1362
|
+
Get a playground session for a user and pipeline.
|
|
1363
|
+
|
|
1364
|
+
Parameters:
|
|
1365
|
+
- pipeline_id: str.
|
|
1366
|
+
---
|
|
1367
|
+
from llama_cloud.client import LlamaCloud
|
|
1368
|
+
|
|
1369
|
+
client = LlamaCloud(
|
|
1370
|
+
token="YOUR_TOKEN",
|
|
1371
|
+
)
|
|
1372
|
+
client.pipelines.get_playground_session(
|
|
1373
|
+
pipeline_id="string",
|
|
1374
|
+
)
|
|
1375
|
+
"""
|
|
1376
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
1377
|
+
"GET",
|
|
1378
|
+
urllib.parse.urljoin(
|
|
1379
|
+
f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/playground-session"
|
|
1380
|
+
),
|
|
1381
|
+
headers=self._client_wrapper.get_headers(),
|
|
1382
|
+
timeout=60,
|
|
1383
|
+
)
|
|
1384
|
+
if 200 <= _response.status_code < 300:
|
|
1385
|
+
return pydantic.parse_obj_as(PlaygroundSession, _response.json()) # type: ignore
|
|
1386
|
+
if _response.status_code == 422:
|
|
1387
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1388
|
+
try:
|
|
1389
|
+
_response_json = _response.json()
|
|
1390
|
+
except JSONDecodeError:
|
|
1391
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1392
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1393
|
+
|
|
1290
1394
|
def chat(
|
|
1291
1395
|
self,
|
|
1292
1396
|
pipeline_id: str,
|
|
1293
1397
|
*,
|
|
1294
|
-
messages: typing.List[
|
|
1295
|
-
data: ChatData,
|
|
1398
|
+
messages: typing.Optional[typing.List[InputMessage]] = OMIT,
|
|
1399
|
+
data: typing.Optional[ChatData] = OMIT,
|
|
1296
1400
|
class_name: typing.Optional[str] = OMIT,
|
|
1297
1401
|
) -> typing.Any:
|
|
1298
1402
|
"""
|
|
1403
|
+
Make a retrieval query + chat completion for a managed pipeline.
|
|
1404
|
+
|
|
1299
1405
|
Parameters:
|
|
1300
1406
|
- pipeline_id: str.
|
|
1301
1407
|
|
|
1302
|
-
- messages: typing.List[
|
|
1408
|
+
- messages: typing.Optional[typing.List[InputMessage]].
|
|
1303
1409
|
|
|
1304
|
-
- data: ChatData.
|
|
1410
|
+
- data: typing.Optional[ChatData].
|
|
1305
1411
|
|
|
1306
1412
|
- class_name: typing.Optional[str].
|
|
1307
1413
|
---
|
|
@@ -1320,7 +1426,6 @@ class PipelinesClient:
|
|
|
1320
1426
|
)
|
|
1321
1427
|
client.pipelines.chat(
|
|
1322
1428
|
pipeline_id="string",
|
|
1323
|
-
messages=[],
|
|
1324
1429
|
data=ChatData(
|
|
1325
1430
|
retrieval_parameters=PresetRetrievalParams(
|
|
1326
1431
|
search_filters=MetadataFilters(
|
|
@@ -1333,7 +1438,11 @@ class PipelinesClient:
|
|
|
1333
1438
|
),
|
|
1334
1439
|
)
|
|
1335
1440
|
"""
|
|
1336
|
-
_request: typing.Dict[str, typing.Any] = {
|
|
1441
|
+
_request: typing.Dict[str, typing.Any] = {}
|
|
1442
|
+
if messages is not OMIT:
|
|
1443
|
+
_request["messages"] = messages
|
|
1444
|
+
if data is not OMIT:
|
|
1445
|
+
_request["data"] = data
|
|
1337
1446
|
if class_name is not OMIT:
|
|
1338
1447
|
_request["class_name"] = class_name
|
|
1339
1448
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -1707,8 +1816,6 @@ class AsyncPipelinesClient:
|
|
|
1707
1816
|
from llama_cloud import (
|
|
1708
1817
|
ConfigurableDataSinkNames,
|
|
1709
1818
|
DataSinkCreate,
|
|
1710
|
-
EmbeddingConfig,
|
|
1711
|
-
EmbeddingConfigType,
|
|
1712
1819
|
EvalExecutionParams,
|
|
1713
1820
|
FilterCondition,
|
|
1714
1821
|
LlamaParseParameters,
|
|
@@ -1717,7 +1824,7 @@ class AsyncPipelinesClient:
|
|
|
1717
1824
|
PipelineType,
|
|
1718
1825
|
PresetRetrievalParams,
|
|
1719
1826
|
RetrievalMode,
|
|
1720
|
-
|
|
1827
|
+
SupportedLlmModelNames,
|
|
1721
1828
|
)
|
|
1722
1829
|
from llama_cloud.client import AsyncLlamaCloud
|
|
1723
1830
|
|
|
@@ -1726,9 +1833,6 @@ class AsyncPipelinesClient:
|
|
|
1726
1833
|
)
|
|
1727
1834
|
await client.pipelines.create_pipeline(
|
|
1728
1835
|
request=PipelineCreate(
|
|
1729
|
-
embedding_config=EmbeddingConfig(
|
|
1730
|
-
type=EmbeddingConfigType.OPENAI_EMBEDDING,
|
|
1731
|
-
),
|
|
1732
1836
|
data_sink=DataSinkCreate(
|
|
1733
1837
|
name="string",
|
|
1734
1838
|
sink_type=ConfigurableDataSinkNames.CHROMA,
|
|
@@ -1741,7 +1845,7 @@ class AsyncPipelinesClient:
|
|
|
1741
1845
|
retrieval_mode=RetrievalMode.CHUNKS,
|
|
1742
1846
|
),
|
|
1743
1847
|
eval_parameters=EvalExecutionParams(
|
|
1744
|
-
llm_model=
|
|
1848
|
+
llm_model=SupportedLlmModelNames.GPT_3_5_TURBO,
|
|
1745
1849
|
),
|
|
1746
1850
|
llama_parse_parameters=LlamaParseParameters(),
|
|
1747
1851
|
name="string",
|
|
@@ -1780,8 +1884,6 @@ class AsyncPipelinesClient:
|
|
|
1780
1884
|
from llama_cloud import (
|
|
1781
1885
|
ConfigurableDataSinkNames,
|
|
1782
1886
|
DataSinkCreate,
|
|
1783
|
-
EmbeddingConfig,
|
|
1784
|
-
EmbeddingConfigType,
|
|
1785
1887
|
EvalExecutionParams,
|
|
1786
1888
|
FilterCondition,
|
|
1787
1889
|
LlamaParseParameters,
|
|
@@ -1790,7 +1892,7 @@ class AsyncPipelinesClient:
|
|
|
1790
1892
|
PipelineType,
|
|
1791
1893
|
PresetRetrievalParams,
|
|
1792
1894
|
RetrievalMode,
|
|
1793
|
-
|
|
1895
|
+
SupportedLlmModelNames,
|
|
1794
1896
|
)
|
|
1795
1897
|
from llama_cloud.client import AsyncLlamaCloud
|
|
1796
1898
|
|
|
@@ -1799,9 +1901,6 @@ class AsyncPipelinesClient:
|
|
|
1799
1901
|
)
|
|
1800
1902
|
await client.pipelines.upsert_pipeline(
|
|
1801
1903
|
request=PipelineCreate(
|
|
1802
|
-
embedding_config=EmbeddingConfig(
|
|
1803
|
-
type=EmbeddingConfigType.OPENAI_EMBEDDING,
|
|
1804
|
-
),
|
|
1805
1904
|
data_sink=DataSinkCreate(
|
|
1806
1905
|
name="string",
|
|
1807
1906
|
sink_type=ConfigurableDataSinkNames.CHROMA,
|
|
@@ -1814,7 +1913,7 @@ class AsyncPipelinesClient:
|
|
|
1814
1913
|
retrieval_mode=RetrievalMode.CHUNKS,
|
|
1815
1914
|
),
|
|
1816
1915
|
eval_parameters=EvalExecutionParams(
|
|
1817
|
-
llm_model=
|
|
1916
|
+
llm_model=SupportedLlmModelNames.GPT_3_5_TURBO,
|
|
1818
1917
|
),
|
|
1819
1918
|
llama_parse_parameters=LlamaParseParameters(),
|
|
1820
1919
|
name="string",
|
|
@@ -1876,7 +1975,7 @@ class AsyncPipelinesClient:
|
|
|
1876
1975
|
self,
|
|
1877
1976
|
pipeline_id: str,
|
|
1878
1977
|
*,
|
|
1879
|
-
embedding_config: typing.Optional[
|
|
1978
|
+
embedding_config: typing.Optional[PipelineUpdateEmbeddingConfig] = OMIT,
|
|
1880
1979
|
transform_config: typing.Optional[PipelineUpdateTransformConfig] = OMIT,
|
|
1881
1980
|
configured_transformations: typing.Optional[typing.List[ConfiguredTransformationItem]] = OMIT,
|
|
1882
1981
|
data_sink_id: typing.Optional[str] = OMIT,
|
|
@@ -1893,11 +1992,11 @@ class AsyncPipelinesClient:
|
|
|
1893
1992
|
Parameters:
|
|
1894
1993
|
- pipeline_id: str.
|
|
1895
1994
|
|
|
1896
|
-
- embedding_config: typing.Optional[
|
|
1995
|
+
- embedding_config: typing.Optional[PipelineUpdateEmbeddingConfig]. Configuration for the embedding model.
|
|
1897
1996
|
|
|
1898
1997
|
- transform_config: typing.Optional[PipelineUpdateTransformConfig]. Configuration for the transformation.
|
|
1899
1998
|
|
|
1900
|
-
- configured_transformations: typing.Optional[typing.List[ConfiguredTransformationItem]].
|
|
1999
|
+
- configured_transformations: typing.Optional[typing.List[ConfiguredTransformationItem]]. Deprecated, use embedding_config or transform_config instead. configured transformations for the pipeline.
|
|
1901
2000
|
|
|
1902
2001
|
- data_sink_id: typing.Optional[str]. Data sink ID. When provided instead of data_sink, the data sink will be looked up by ID.
|
|
1903
2002
|
|
|
@@ -1916,15 +2015,13 @@ class AsyncPipelinesClient:
|
|
|
1916
2015
|
from llama_cloud import (
|
|
1917
2016
|
ConfigurableDataSinkNames,
|
|
1918
2017
|
DataSinkCreate,
|
|
1919
|
-
EmbeddingConfig,
|
|
1920
|
-
EmbeddingConfigType,
|
|
1921
2018
|
EvalExecutionParams,
|
|
1922
2019
|
FilterCondition,
|
|
1923
2020
|
LlamaParseParameters,
|
|
1924
2021
|
MetadataFilters,
|
|
1925
2022
|
PresetRetrievalParams,
|
|
1926
2023
|
RetrievalMode,
|
|
1927
|
-
|
|
2024
|
+
SupportedLlmModelNames,
|
|
1928
2025
|
)
|
|
1929
2026
|
from llama_cloud.client import AsyncLlamaCloud
|
|
1930
2027
|
|
|
@@ -1933,9 +2030,6 @@ class AsyncPipelinesClient:
|
|
|
1933
2030
|
)
|
|
1934
2031
|
await client.pipelines.update_existing_pipeline(
|
|
1935
2032
|
pipeline_id="string",
|
|
1936
|
-
embedding_config=EmbeddingConfig(
|
|
1937
|
-
type=EmbeddingConfigType.OPENAI_EMBEDDING,
|
|
1938
|
-
),
|
|
1939
2033
|
data_sink=DataSinkCreate(
|
|
1940
2034
|
name="string",
|
|
1941
2035
|
sink_type=ConfigurableDataSinkNames.CHROMA,
|
|
@@ -1948,7 +2042,7 @@ class AsyncPipelinesClient:
|
|
|
1948
2042
|
retrieval_mode=RetrievalMode.CHUNKS,
|
|
1949
2043
|
),
|
|
1950
2044
|
eval_parameters=EvalExecutionParams(
|
|
1951
|
-
llm_model=
|
|
2045
|
+
llm_model=SupportedLlmModelNames.GPT_3_5_TURBO,
|
|
1952
2046
|
),
|
|
1953
2047
|
llama_parse_parameters=LlamaParseParameters(),
|
|
1954
2048
|
)
|
|
@@ -2179,7 +2273,7 @@ class AsyncPipelinesClient:
|
|
|
2179
2273
|
|
|
2180
2274
|
- params: typing.Optional[EvalExecutionParamsOverride]. The parameters for the eval execution that will override the ones set in the pipeline.
|
|
2181
2275
|
---
|
|
2182
|
-
from llama_cloud import EvalExecutionParamsOverride,
|
|
2276
|
+
from llama_cloud import EvalExecutionParamsOverride, SupportedLlmModelNames
|
|
2183
2277
|
from llama_cloud.client import AsyncLlamaCloud
|
|
2184
2278
|
|
|
2185
2279
|
client = AsyncLlamaCloud(
|
|
@@ -2190,7 +2284,7 @@ class AsyncPipelinesClient:
|
|
|
2190
2284
|
eval_dataset_id="string",
|
|
2191
2285
|
eval_question_ids=[],
|
|
2192
2286
|
params=EvalExecutionParamsOverride(
|
|
2193
|
-
llm_model=
|
|
2287
|
+
llm_model=SupportedLlmModelNames.GPT_3_5_TURBO,
|
|
2194
2288
|
),
|
|
2195
2289
|
)
|
|
2196
2290
|
"""
|
|
@@ -2642,6 +2736,52 @@ class AsyncPipelinesClient:
|
|
|
2642
2736
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2643
2737
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2644
2738
|
|
|
2739
|
+
async def update_pipeline_data_source(
|
|
2740
|
+
self, pipeline_id: str, data_source_id: str, *, sync_interval: typing.Optional[float] = OMIT
|
|
2741
|
+
) -> PipelineDataSource:
|
|
2742
|
+
"""
|
|
2743
|
+
Update the configuration of a data source in a pipeline.
|
|
2744
|
+
|
|
2745
|
+
Parameters:
|
|
2746
|
+
- pipeline_id: str.
|
|
2747
|
+
|
|
2748
|
+
- data_source_id: str.
|
|
2749
|
+
|
|
2750
|
+
- sync_interval: typing.Optional[float]. The interval at which the data source should be synced.
|
|
2751
|
+
---
|
|
2752
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
2753
|
+
|
|
2754
|
+
client = AsyncLlamaCloud(
|
|
2755
|
+
token="YOUR_TOKEN",
|
|
2756
|
+
)
|
|
2757
|
+
await client.pipelines.update_pipeline_data_source(
|
|
2758
|
+
pipeline_id="string",
|
|
2759
|
+
data_source_id="string",
|
|
2760
|
+
)
|
|
2761
|
+
"""
|
|
2762
|
+
_request: typing.Dict[str, typing.Any] = {}
|
|
2763
|
+
if sync_interval is not OMIT:
|
|
2764
|
+
_request["sync_interval"] = sync_interval
|
|
2765
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
2766
|
+
"PUT",
|
|
2767
|
+
urllib.parse.urljoin(
|
|
2768
|
+
f"{self._client_wrapper.get_base_url()}/",
|
|
2769
|
+
f"api/v1/pipelines/{pipeline_id}/data-sources/{data_source_id}",
|
|
2770
|
+
),
|
|
2771
|
+
json=jsonable_encoder(_request),
|
|
2772
|
+
headers=self._client_wrapper.get_headers(),
|
|
2773
|
+
timeout=60,
|
|
2774
|
+
)
|
|
2775
|
+
if 200 <= _response.status_code < 300:
|
|
2776
|
+
return pydantic.parse_obj_as(PipelineDataSource, _response.json()) # type: ignore
|
|
2777
|
+
if _response.status_code == 422:
|
|
2778
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
2779
|
+
try:
|
|
2780
|
+
_response_json = _response.json()
|
|
2781
|
+
except JSONDecodeError:
|
|
2782
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2783
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2784
|
+
|
|
2645
2785
|
async def delete_pipeline_data_source(self, pipeline_id: str, data_source_id: str) -> None:
|
|
2646
2786
|
"""
|
|
2647
2787
|
Delete a data source from a pipeline.
|
|
@@ -2718,6 +2858,46 @@ class AsyncPipelinesClient:
|
|
|
2718
2858
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2719
2859
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2720
2860
|
|
|
2861
|
+
async def get_pipeline_data_source_status(
|
|
2862
|
+
self, pipeline_id: str, data_source_id: str
|
|
2863
|
+
) -> ManagedIngestionStatusResponse:
|
|
2864
|
+
"""
|
|
2865
|
+
Get the status of a data source for a pipeline.
|
|
2866
|
+
|
|
2867
|
+
Parameters:
|
|
2868
|
+
- pipeline_id: str.
|
|
2869
|
+
|
|
2870
|
+
- data_source_id: str.
|
|
2871
|
+
---
|
|
2872
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
2873
|
+
|
|
2874
|
+
client = AsyncLlamaCloud(
|
|
2875
|
+
token="YOUR_TOKEN",
|
|
2876
|
+
)
|
|
2877
|
+
await client.pipelines.get_pipeline_data_source_status(
|
|
2878
|
+
pipeline_id="string",
|
|
2879
|
+
data_source_id="string",
|
|
2880
|
+
)
|
|
2881
|
+
"""
|
|
2882
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
2883
|
+
"GET",
|
|
2884
|
+
urllib.parse.urljoin(
|
|
2885
|
+
f"{self._client_wrapper.get_base_url()}/",
|
|
2886
|
+
f"api/v1/pipelines/{pipeline_id}/data-sources/{data_source_id}/status",
|
|
2887
|
+
),
|
|
2888
|
+
headers=self._client_wrapper.get_headers(),
|
|
2889
|
+
timeout=60,
|
|
2890
|
+
)
|
|
2891
|
+
if 200 <= _response.status_code < 300:
|
|
2892
|
+
return pydantic.parse_obj_as(ManagedIngestionStatusResponse, _response.json()) # type: ignore
|
|
2893
|
+
if _response.status_code == 422:
|
|
2894
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
2895
|
+
try:
|
|
2896
|
+
_response_json = _response.json()
|
|
2897
|
+
except JSONDecodeError:
|
|
2898
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2899
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2900
|
+
|
|
2721
2901
|
async def run_search(
|
|
2722
2902
|
self,
|
|
2723
2903
|
pipeline_id: str,
|
|
@@ -2875,21 +3055,57 @@ class AsyncPipelinesClient:
|
|
|
2875
3055
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2876
3056
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2877
3057
|
|
|
3058
|
+
async def get_playground_session(self, pipeline_id: str) -> PlaygroundSession:
|
|
3059
|
+
"""
|
|
3060
|
+
Get a playground session for a user and pipeline.
|
|
3061
|
+
|
|
3062
|
+
Parameters:
|
|
3063
|
+
- pipeline_id: str.
|
|
3064
|
+
---
|
|
3065
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
3066
|
+
|
|
3067
|
+
client = AsyncLlamaCloud(
|
|
3068
|
+
token="YOUR_TOKEN",
|
|
3069
|
+
)
|
|
3070
|
+
await client.pipelines.get_playground_session(
|
|
3071
|
+
pipeline_id="string",
|
|
3072
|
+
)
|
|
3073
|
+
"""
|
|
3074
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
3075
|
+
"GET",
|
|
3076
|
+
urllib.parse.urljoin(
|
|
3077
|
+
f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/playground-session"
|
|
3078
|
+
),
|
|
3079
|
+
headers=self._client_wrapper.get_headers(),
|
|
3080
|
+
timeout=60,
|
|
3081
|
+
)
|
|
3082
|
+
if 200 <= _response.status_code < 300:
|
|
3083
|
+
return pydantic.parse_obj_as(PlaygroundSession, _response.json()) # type: ignore
|
|
3084
|
+
if _response.status_code == 422:
|
|
3085
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
3086
|
+
try:
|
|
3087
|
+
_response_json = _response.json()
|
|
3088
|
+
except JSONDecodeError:
|
|
3089
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
3090
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
3091
|
+
|
|
2878
3092
|
async def chat(
|
|
2879
3093
|
self,
|
|
2880
3094
|
pipeline_id: str,
|
|
2881
3095
|
*,
|
|
2882
|
-
messages: typing.List[
|
|
2883
|
-
data: ChatData,
|
|
3096
|
+
messages: typing.Optional[typing.List[InputMessage]] = OMIT,
|
|
3097
|
+
data: typing.Optional[ChatData] = OMIT,
|
|
2884
3098
|
class_name: typing.Optional[str] = OMIT,
|
|
2885
3099
|
) -> typing.Any:
|
|
2886
3100
|
"""
|
|
3101
|
+
Make a retrieval query + chat completion for a managed pipeline.
|
|
3102
|
+
|
|
2887
3103
|
Parameters:
|
|
2888
3104
|
- pipeline_id: str.
|
|
2889
3105
|
|
|
2890
|
-
- messages: typing.List[
|
|
3106
|
+
- messages: typing.Optional[typing.List[InputMessage]].
|
|
2891
3107
|
|
|
2892
|
-
- data: ChatData.
|
|
3108
|
+
- data: typing.Optional[ChatData].
|
|
2893
3109
|
|
|
2894
3110
|
- class_name: typing.Optional[str].
|
|
2895
3111
|
---
|
|
@@ -2908,7 +3124,6 @@ class AsyncPipelinesClient:
|
|
|
2908
3124
|
)
|
|
2909
3125
|
await client.pipelines.chat(
|
|
2910
3126
|
pipeline_id="string",
|
|
2911
|
-
messages=[],
|
|
2912
3127
|
data=ChatData(
|
|
2913
3128
|
retrieval_parameters=PresetRetrievalParams(
|
|
2914
3129
|
search_filters=MetadataFilters(
|
|
@@ -2921,7 +3136,11 @@ class AsyncPipelinesClient:
|
|
|
2921
3136
|
),
|
|
2922
3137
|
)
|
|
2923
3138
|
"""
|
|
2924
|
-
_request: typing.Dict[str, typing.Any] = {
|
|
3139
|
+
_request: typing.Dict[str, typing.Any] = {}
|
|
3140
|
+
if messages is not OMIT:
|
|
3141
|
+
_request["messages"] = messages
|
|
3142
|
+
if data is not OMIT:
|
|
3143
|
+
_request["data"] = data
|
|
2925
3144
|
if class_name is not OMIT:
|
|
2926
3145
|
_request["class_name"] = class_name
|
|
2927
3146
|
_response = await self._client_wrapper.httpx_client.request(
|