llama-cloud 0.0.12__py3-none-any.whl → 0.0.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (56) hide show
  1. llama_cloud/__init__.py +76 -14
  2. llama_cloud/resources/__init__.py +14 -0
  3. llama_cloud/resources/data_sources/types/data_source_update_component_one.py +2 -0
  4. llama_cloud/resources/evals/client.py +5 -5
  5. llama_cloud/resources/parsing/client.py +8 -0
  6. llama_cloud/resources/pipelines/__init__.py +14 -0
  7. llama_cloud/resources/pipelines/client.py +115 -66
  8. llama_cloud/resources/pipelines/types/__init__.py +16 -0
  9. llama_cloud/resources/pipelines/types/pipeline_update_embedding_config.py +78 -0
  10. llama_cloud/types/__init__.py +68 -14
  11. llama_cloud/types/{embedding_config.py → azure_open_ai_embedding_config.py} +4 -6
  12. llama_cloud/types/bedrock_embedding_config.py +34 -0
  13. llama_cloud/types/box_auth_mechanism.py +21 -0
  14. llama_cloud/types/chat_data.py +1 -1
  15. llama_cloud/types/chat_message.py +14 -4
  16. llama_cloud/types/cloud_azure_ai_search_vector_store.py +3 -0
  17. llama_cloud/types/cloud_box_data_source.py +51 -0
  18. llama_cloud/types/cloud_document.py +3 -0
  19. llama_cloud/types/cloud_document_create.py +3 -0
  20. llama_cloud/types/cloud_sharepoint_data_source.py +2 -1
  21. llama_cloud/types/cohere_embedding_config.py +34 -0
  22. llama_cloud/types/configurable_data_source_names.py +4 -0
  23. llama_cloud/types/custom_claims.py +0 -3
  24. llama_cloud/types/data_source_component_one.py +2 -0
  25. llama_cloud/types/data_source_create_component_one.py +2 -0
  26. llama_cloud/types/eval_execution_params.py +2 -2
  27. llama_cloud/types/eval_execution_params_override.py +2 -2
  28. llama_cloud/types/filter_operator.py +4 -0
  29. llama_cloud/types/gemini_embedding_config.py +34 -0
  30. llama_cloud/types/hugging_face_inference_api_embedding_config.py +34 -0
  31. llama_cloud/types/input_message.py +42 -0
  32. llama_cloud/types/llama_parse_parameters.py +4 -1
  33. llama_cloud/types/{eval_llm_model_data.py → llm_model_data.py} +1 -1
  34. llama_cloud/types/llm_parameters.py +2 -2
  35. llama_cloud/types/{supported_eval_llm_model.py → message_annotation.py} +6 -6
  36. llama_cloud/types/metadata_filter.py +1 -1
  37. llama_cloud/types/open_ai_embedding_config.py +34 -0
  38. llama_cloud/types/page_segmentation_config.py +2 -0
  39. llama_cloud/types/parsing_usage.py +1 -1
  40. llama_cloud/types/pipeline.py +11 -1
  41. llama_cloud/types/pipeline_create.py +3 -3
  42. llama_cloud/types/pipeline_create_embedding_config.py +78 -0
  43. llama_cloud/types/pipeline_data_source_component_one.py +2 -0
  44. llama_cloud/types/pipeline_embedding_config.py +78 -0
  45. llama_cloud/types/pipeline_transform_config.py +31 -0
  46. llama_cloud/types/playground_session.py +51 -0
  47. llama_cloud/types/supported_llm_model.py +41 -0
  48. llama_cloud/types/supported_llm_model_names.py +41 -0
  49. {llama_cloud-0.0.12.dist-info → llama_cloud-0.0.14.dist-info}/METADATA +1 -1
  50. {llama_cloud-0.0.12.dist-info → llama_cloud-0.0.14.dist-info}/RECORD +52 -41
  51. llama_cloud/types/embedding_config_component.py +0 -7
  52. llama_cloud/types/embedding_config_component_one.py +0 -19
  53. llama_cloud/types/embedding_config_type.py +0 -41
  54. llama_cloud/types/supported_eval_llm_model_names.py +0 -29
  55. {llama_cloud-0.0.12.dist-info → llama_cloud-0.0.14.dist-info}/LICENSE +0 -0
  56. {llama_cloud-0.0.12.dist-info → llama_cloud-0.0.14.dist-info}/WHEEL +0 -0
@@ -10,17 +10,16 @@ from ...core.jsonable_encoder import jsonable_encoder
10
10
  from ...core.remove_none_from_dict import remove_none_from_dict
11
11
  from ...errors.unprocessable_entity_error import UnprocessableEntityError
12
12
  from ...types.chat_data import ChatData
13
- from ...types.chat_message import ChatMessage
14
13
  from ...types.cloud_document import CloudDocument
15
14
  from ...types.cloud_document_create import CloudDocumentCreate
16
15
  from ...types.configured_transformation_item import ConfiguredTransformationItem
17
16
  from ...types.data_sink_create import DataSinkCreate
18
- from ...types.embedding_config import EmbeddingConfig
19
17
  from ...types.eval_dataset_job_record import EvalDatasetJobRecord
20
18
  from ...types.eval_execution_params import EvalExecutionParams
21
19
  from ...types.eval_execution_params_override import EvalExecutionParamsOverride
22
20
  from ...types.eval_question_result import EvalQuestionResult
23
21
  from ...types.http_validation_error import HttpValidationError
22
+ from ...types.input_message import InputMessage
24
23
  from ...types.llama_parse_parameters import LlamaParseParameters
25
24
  from ...types.managed_ingestion_status_response import ManagedIngestionStatusResponse
26
25
  from ...types.metadata_filters import MetadataFilters
@@ -32,11 +31,13 @@ from ...types.pipeline_deployment import PipelineDeployment
32
31
  from ...types.pipeline_file import PipelineFile
33
32
  from ...types.pipeline_file_create import PipelineFileCreate
34
33
  from ...types.pipeline_type import PipelineType
34
+ from ...types.playground_session import PlaygroundSession
35
35
  from ...types.preset_retrieval_params import PresetRetrievalParams
36
36
  from ...types.retrieval_mode import RetrievalMode
37
37
  from ...types.retrieve_results import RetrieveResults
38
38
  from ...types.text_node import TextNode
39
39
  from .types.pipeline_file_update_custom_metadata_value import PipelineFileUpdateCustomMetadataValue
40
+ from .types.pipeline_update_embedding_config import PipelineUpdateEmbeddingConfig
40
41
  from .types.pipeline_update_transform_config import PipelineUpdateTransformConfig
41
42
 
42
43
  try:
@@ -121,8 +122,6 @@ class PipelinesClient:
121
122
  from llama_cloud import (
122
123
  ConfigurableDataSinkNames,
123
124
  DataSinkCreate,
124
- EmbeddingConfig,
125
- EmbeddingConfigType,
126
125
  EvalExecutionParams,
127
126
  FilterCondition,
128
127
  LlamaParseParameters,
@@ -131,7 +130,7 @@ class PipelinesClient:
131
130
  PipelineType,
132
131
  PresetRetrievalParams,
133
132
  RetrievalMode,
134
- SupportedEvalLlmModelNames,
133
+ SupportedLlmModelNames,
135
134
  )
136
135
  from llama_cloud.client import LlamaCloud
137
136
 
@@ -140,9 +139,6 @@ class PipelinesClient:
140
139
  )
141
140
  client.pipelines.create_pipeline(
142
141
  request=PipelineCreate(
143
- embedding_config=EmbeddingConfig(
144
- type=EmbeddingConfigType.OPENAI_EMBEDDING,
145
- ),
146
142
  data_sink=DataSinkCreate(
147
143
  name="string",
148
144
  sink_type=ConfigurableDataSinkNames.CHROMA,
@@ -155,7 +151,7 @@ class PipelinesClient:
155
151
  retrieval_mode=RetrievalMode.CHUNKS,
156
152
  ),
157
153
  eval_parameters=EvalExecutionParams(
158
- llm_model=SupportedEvalLlmModelNames.GPT_3_5_TURBO,
154
+ llm_model=SupportedLlmModelNames.GPT_3_5_TURBO,
159
155
  ),
160
156
  llama_parse_parameters=LlamaParseParameters(),
161
157
  name="string",
@@ -194,8 +190,6 @@ class PipelinesClient:
194
190
  from llama_cloud import (
195
191
  ConfigurableDataSinkNames,
196
192
  DataSinkCreate,
197
- EmbeddingConfig,
198
- EmbeddingConfigType,
199
193
  EvalExecutionParams,
200
194
  FilterCondition,
201
195
  LlamaParseParameters,
@@ -204,7 +198,7 @@ class PipelinesClient:
204
198
  PipelineType,
205
199
  PresetRetrievalParams,
206
200
  RetrievalMode,
207
- SupportedEvalLlmModelNames,
201
+ SupportedLlmModelNames,
208
202
  )
209
203
  from llama_cloud.client import LlamaCloud
210
204
 
@@ -213,9 +207,6 @@ class PipelinesClient:
213
207
  )
214
208
  client.pipelines.upsert_pipeline(
215
209
  request=PipelineCreate(
216
- embedding_config=EmbeddingConfig(
217
- type=EmbeddingConfigType.OPENAI_EMBEDDING,
218
- ),
219
210
  data_sink=DataSinkCreate(
220
211
  name="string",
221
212
  sink_type=ConfigurableDataSinkNames.CHROMA,
@@ -228,7 +219,7 @@ class PipelinesClient:
228
219
  retrieval_mode=RetrievalMode.CHUNKS,
229
220
  ),
230
221
  eval_parameters=EvalExecutionParams(
231
- llm_model=SupportedEvalLlmModelNames.GPT_3_5_TURBO,
222
+ llm_model=SupportedLlmModelNames.GPT_3_5_TURBO,
232
223
  ),
233
224
  llama_parse_parameters=LlamaParseParameters(),
234
225
  name="string",
@@ -290,7 +281,7 @@ class PipelinesClient:
290
281
  self,
291
282
  pipeline_id: str,
292
283
  *,
293
- embedding_config: typing.Optional[EmbeddingConfig] = OMIT,
284
+ embedding_config: typing.Optional[PipelineUpdateEmbeddingConfig] = OMIT,
294
285
  transform_config: typing.Optional[PipelineUpdateTransformConfig] = OMIT,
295
286
  configured_transformations: typing.Optional[typing.List[ConfiguredTransformationItem]] = OMIT,
296
287
  data_sink_id: typing.Optional[str] = OMIT,
@@ -307,11 +298,11 @@ class PipelinesClient:
307
298
  Parameters:
308
299
  - pipeline_id: str.
309
300
 
310
- - embedding_config: typing.Optional[EmbeddingConfig]. Configuration for the embedding model.
301
+ - embedding_config: typing.Optional[PipelineUpdateEmbeddingConfig]. Configuration for the embedding model.
311
302
 
312
303
  - transform_config: typing.Optional[PipelineUpdateTransformConfig]. Configuration for the transformation.
313
304
 
314
- - configured_transformations: typing.Optional[typing.List[ConfiguredTransformationItem]]. List of configured transformations.
305
+ - configured_transformations: typing.Optional[typing.List[ConfiguredTransformationItem]]. Deprecated, use embedding_config or transform_config instead. configured transformations for the pipeline.
315
306
 
316
307
  - data_sink_id: typing.Optional[str]. Data sink ID. When provided instead of data_sink, the data sink will be looked up by ID.
317
308
 
@@ -330,15 +321,13 @@ class PipelinesClient:
330
321
  from llama_cloud import (
331
322
  ConfigurableDataSinkNames,
332
323
  DataSinkCreate,
333
- EmbeddingConfig,
334
- EmbeddingConfigType,
335
324
  EvalExecutionParams,
336
325
  FilterCondition,
337
326
  LlamaParseParameters,
338
327
  MetadataFilters,
339
328
  PresetRetrievalParams,
340
329
  RetrievalMode,
341
- SupportedEvalLlmModelNames,
330
+ SupportedLlmModelNames,
342
331
  )
343
332
  from llama_cloud.client import LlamaCloud
344
333
 
@@ -347,9 +336,6 @@ class PipelinesClient:
347
336
  )
348
337
  client.pipelines.update_existing_pipeline(
349
338
  pipeline_id="string",
350
- embedding_config=EmbeddingConfig(
351
- type=EmbeddingConfigType.OPENAI_EMBEDDING,
352
- ),
353
339
  data_sink=DataSinkCreate(
354
340
  name="string",
355
341
  sink_type=ConfigurableDataSinkNames.CHROMA,
@@ -362,7 +348,7 @@ class PipelinesClient:
362
348
  retrieval_mode=RetrievalMode.CHUNKS,
363
349
  ),
364
350
  eval_parameters=EvalExecutionParams(
365
- llm_model=SupportedEvalLlmModelNames.GPT_3_5_TURBO,
351
+ llm_model=SupportedLlmModelNames.GPT_3_5_TURBO,
366
352
  ),
367
353
  llama_parse_parameters=LlamaParseParameters(),
368
354
  )
@@ -591,7 +577,7 @@ class PipelinesClient:
591
577
 
592
578
  - params: typing.Optional[EvalExecutionParamsOverride]. The parameters for the eval execution that will override the ones set in the pipeline.
593
579
  ---
594
- from llama_cloud import EvalExecutionParamsOverride, SupportedEvalLlmModelNames
580
+ from llama_cloud import EvalExecutionParamsOverride, SupportedLlmModelNames
595
581
  from llama_cloud.client import LlamaCloud
596
582
 
597
583
  client = LlamaCloud(
@@ -602,7 +588,7 @@ class PipelinesClient:
602
588
  eval_dataset_id="string",
603
589
  eval_question_ids=[],
604
590
  params=EvalExecutionParamsOverride(
605
- llm_model=SupportedEvalLlmModelNames.GPT_3_5_TURBO,
591
+ llm_model=SupportedLlmModelNames.GPT_3_5_TURBO,
606
592
  ),
607
593
  )
608
594
  """
@@ -1287,21 +1273,57 @@ class PipelinesClient:
1287
1273
  raise ApiError(status_code=_response.status_code, body=_response.text)
1288
1274
  raise ApiError(status_code=_response.status_code, body=_response_json)
1289
1275
 
1276
+ def get_playground_session(self, pipeline_id: str) -> PlaygroundSession:
1277
+ """
1278
+ Get a playground session for a user and pipeline.
1279
+
1280
+ Parameters:
1281
+ - pipeline_id: str.
1282
+ ---
1283
+ from llama_cloud.client import LlamaCloud
1284
+
1285
+ client = LlamaCloud(
1286
+ token="YOUR_TOKEN",
1287
+ )
1288
+ client.pipelines.get_playground_session(
1289
+ pipeline_id="string",
1290
+ )
1291
+ """
1292
+ _response = self._client_wrapper.httpx_client.request(
1293
+ "GET",
1294
+ urllib.parse.urljoin(
1295
+ f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/playground-session"
1296
+ ),
1297
+ headers=self._client_wrapper.get_headers(),
1298
+ timeout=60,
1299
+ )
1300
+ if 200 <= _response.status_code < 300:
1301
+ return pydantic.parse_obj_as(PlaygroundSession, _response.json()) # type: ignore
1302
+ if _response.status_code == 422:
1303
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
1304
+ try:
1305
+ _response_json = _response.json()
1306
+ except JSONDecodeError:
1307
+ raise ApiError(status_code=_response.status_code, body=_response.text)
1308
+ raise ApiError(status_code=_response.status_code, body=_response_json)
1309
+
1290
1310
  def chat(
1291
1311
  self,
1292
1312
  pipeline_id: str,
1293
1313
  *,
1294
- messages: typing.List[ChatMessage],
1295
- data: ChatData,
1314
+ messages: typing.Optional[typing.List[InputMessage]] = OMIT,
1315
+ data: typing.Optional[ChatData] = OMIT,
1296
1316
  class_name: typing.Optional[str] = OMIT,
1297
1317
  ) -> typing.Any:
1298
1318
  """
1319
+ Make a retrieval query + chat completion for a managed pipeline.
1320
+
1299
1321
  Parameters:
1300
1322
  - pipeline_id: str.
1301
1323
 
1302
- - messages: typing.List[ChatMessage].
1324
+ - messages: typing.Optional[typing.List[InputMessage]].
1303
1325
 
1304
- - data: ChatData.
1326
+ - data: typing.Optional[ChatData].
1305
1327
 
1306
1328
  - class_name: typing.Optional[str].
1307
1329
  ---
@@ -1320,7 +1342,6 @@ class PipelinesClient:
1320
1342
  )
1321
1343
  client.pipelines.chat(
1322
1344
  pipeline_id="string",
1323
- messages=[],
1324
1345
  data=ChatData(
1325
1346
  retrieval_parameters=PresetRetrievalParams(
1326
1347
  search_filters=MetadataFilters(
@@ -1333,7 +1354,11 @@ class PipelinesClient:
1333
1354
  ),
1334
1355
  )
1335
1356
  """
1336
- _request: typing.Dict[str, typing.Any] = {"messages": messages, "data": data}
1357
+ _request: typing.Dict[str, typing.Any] = {}
1358
+ if messages is not OMIT:
1359
+ _request["messages"] = messages
1360
+ if data is not OMIT:
1361
+ _request["data"] = data
1337
1362
  if class_name is not OMIT:
1338
1363
  _request["class_name"] = class_name
1339
1364
  _response = self._client_wrapper.httpx_client.request(
@@ -1707,8 +1732,6 @@ class AsyncPipelinesClient:
1707
1732
  from llama_cloud import (
1708
1733
  ConfigurableDataSinkNames,
1709
1734
  DataSinkCreate,
1710
- EmbeddingConfig,
1711
- EmbeddingConfigType,
1712
1735
  EvalExecutionParams,
1713
1736
  FilterCondition,
1714
1737
  LlamaParseParameters,
@@ -1717,7 +1740,7 @@ class AsyncPipelinesClient:
1717
1740
  PipelineType,
1718
1741
  PresetRetrievalParams,
1719
1742
  RetrievalMode,
1720
- SupportedEvalLlmModelNames,
1743
+ SupportedLlmModelNames,
1721
1744
  )
1722
1745
  from llama_cloud.client import AsyncLlamaCloud
1723
1746
 
@@ -1726,9 +1749,6 @@ class AsyncPipelinesClient:
1726
1749
  )
1727
1750
  await client.pipelines.create_pipeline(
1728
1751
  request=PipelineCreate(
1729
- embedding_config=EmbeddingConfig(
1730
- type=EmbeddingConfigType.OPENAI_EMBEDDING,
1731
- ),
1732
1752
  data_sink=DataSinkCreate(
1733
1753
  name="string",
1734
1754
  sink_type=ConfigurableDataSinkNames.CHROMA,
@@ -1741,7 +1761,7 @@ class AsyncPipelinesClient:
1741
1761
  retrieval_mode=RetrievalMode.CHUNKS,
1742
1762
  ),
1743
1763
  eval_parameters=EvalExecutionParams(
1744
- llm_model=SupportedEvalLlmModelNames.GPT_3_5_TURBO,
1764
+ llm_model=SupportedLlmModelNames.GPT_3_5_TURBO,
1745
1765
  ),
1746
1766
  llama_parse_parameters=LlamaParseParameters(),
1747
1767
  name="string",
@@ -1780,8 +1800,6 @@ class AsyncPipelinesClient:
1780
1800
  from llama_cloud import (
1781
1801
  ConfigurableDataSinkNames,
1782
1802
  DataSinkCreate,
1783
- EmbeddingConfig,
1784
- EmbeddingConfigType,
1785
1803
  EvalExecutionParams,
1786
1804
  FilterCondition,
1787
1805
  LlamaParseParameters,
@@ -1790,7 +1808,7 @@ class AsyncPipelinesClient:
1790
1808
  PipelineType,
1791
1809
  PresetRetrievalParams,
1792
1810
  RetrievalMode,
1793
- SupportedEvalLlmModelNames,
1811
+ SupportedLlmModelNames,
1794
1812
  )
1795
1813
  from llama_cloud.client import AsyncLlamaCloud
1796
1814
 
@@ -1799,9 +1817,6 @@ class AsyncPipelinesClient:
1799
1817
  )
1800
1818
  await client.pipelines.upsert_pipeline(
1801
1819
  request=PipelineCreate(
1802
- embedding_config=EmbeddingConfig(
1803
- type=EmbeddingConfigType.OPENAI_EMBEDDING,
1804
- ),
1805
1820
  data_sink=DataSinkCreate(
1806
1821
  name="string",
1807
1822
  sink_type=ConfigurableDataSinkNames.CHROMA,
@@ -1814,7 +1829,7 @@ class AsyncPipelinesClient:
1814
1829
  retrieval_mode=RetrievalMode.CHUNKS,
1815
1830
  ),
1816
1831
  eval_parameters=EvalExecutionParams(
1817
- llm_model=SupportedEvalLlmModelNames.GPT_3_5_TURBO,
1832
+ llm_model=SupportedLlmModelNames.GPT_3_5_TURBO,
1818
1833
  ),
1819
1834
  llama_parse_parameters=LlamaParseParameters(),
1820
1835
  name="string",
@@ -1876,7 +1891,7 @@ class AsyncPipelinesClient:
1876
1891
  self,
1877
1892
  pipeline_id: str,
1878
1893
  *,
1879
- embedding_config: typing.Optional[EmbeddingConfig] = OMIT,
1894
+ embedding_config: typing.Optional[PipelineUpdateEmbeddingConfig] = OMIT,
1880
1895
  transform_config: typing.Optional[PipelineUpdateTransformConfig] = OMIT,
1881
1896
  configured_transformations: typing.Optional[typing.List[ConfiguredTransformationItem]] = OMIT,
1882
1897
  data_sink_id: typing.Optional[str] = OMIT,
@@ -1893,11 +1908,11 @@ class AsyncPipelinesClient:
1893
1908
  Parameters:
1894
1909
  - pipeline_id: str.
1895
1910
 
1896
- - embedding_config: typing.Optional[EmbeddingConfig]. Configuration for the embedding model.
1911
+ - embedding_config: typing.Optional[PipelineUpdateEmbeddingConfig]. Configuration for the embedding model.
1897
1912
 
1898
1913
  - transform_config: typing.Optional[PipelineUpdateTransformConfig]. Configuration for the transformation.
1899
1914
 
1900
- - configured_transformations: typing.Optional[typing.List[ConfiguredTransformationItem]]. List of configured transformations.
1915
+ - configured_transformations: typing.Optional[typing.List[ConfiguredTransformationItem]]. Deprecated, use embedding_config or transform_config instead. configured transformations for the pipeline.
1901
1916
 
1902
1917
  - data_sink_id: typing.Optional[str]. Data sink ID. When provided instead of data_sink, the data sink will be looked up by ID.
1903
1918
 
@@ -1916,15 +1931,13 @@ class AsyncPipelinesClient:
1916
1931
  from llama_cloud import (
1917
1932
  ConfigurableDataSinkNames,
1918
1933
  DataSinkCreate,
1919
- EmbeddingConfig,
1920
- EmbeddingConfigType,
1921
1934
  EvalExecutionParams,
1922
1935
  FilterCondition,
1923
1936
  LlamaParseParameters,
1924
1937
  MetadataFilters,
1925
1938
  PresetRetrievalParams,
1926
1939
  RetrievalMode,
1927
- SupportedEvalLlmModelNames,
1940
+ SupportedLlmModelNames,
1928
1941
  )
1929
1942
  from llama_cloud.client import AsyncLlamaCloud
1930
1943
 
@@ -1933,9 +1946,6 @@ class AsyncPipelinesClient:
1933
1946
  )
1934
1947
  await client.pipelines.update_existing_pipeline(
1935
1948
  pipeline_id="string",
1936
- embedding_config=EmbeddingConfig(
1937
- type=EmbeddingConfigType.OPENAI_EMBEDDING,
1938
- ),
1939
1949
  data_sink=DataSinkCreate(
1940
1950
  name="string",
1941
1951
  sink_type=ConfigurableDataSinkNames.CHROMA,
@@ -1948,7 +1958,7 @@ class AsyncPipelinesClient:
1948
1958
  retrieval_mode=RetrievalMode.CHUNKS,
1949
1959
  ),
1950
1960
  eval_parameters=EvalExecutionParams(
1951
- llm_model=SupportedEvalLlmModelNames.GPT_3_5_TURBO,
1961
+ llm_model=SupportedLlmModelNames.GPT_3_5_TURBO,
1952
1962
  ),
1953
1963
  llama_parse_parameters=LlamaParseParameters(),
1954
1964
  )
@@ -2179,7 +2189,7 @@ class AsyncPipelinesClient:
2179
2189
 
2180
2190
  - params: typing.Optional[EvalExecutionParamsOverride]. The parameters for the eval execution that will override the ones set in the pipeline.
2181
2191
  ---
2182
- from llama_cloud import EvalExecutionParamsOverride, SupportedEvalLlmModelNames
2192
+ from llama_cloud import EvalExecutionParamsOverride, SupportedLlmModelNames
2183
2193
  from llama_cloud.client import AsyncLlamaCloud
2184
2194
 
2185
2195
  client = AsyncLlamaCloud(
@@ -2190,7 +2200,7 @@ class AsyncPipelinesClient:
2190
2200
  eval_dataset_id="string",
2191
2201
  eval_question_ids=[],
2192
2202
  params=EvalExecutionParamsOverride(
2193
- llm_model=SupportedEvalLlmModelNames.GPT_3_5_TURBO,
2203
+ llm_model=SupportedLlmModelNames.GPT_3_5_TURBO,
2194
2204
  ),
2195
2205
  )
2196
2206
  """
@@ -2875,21 +2885,57 @@ class AsyncPipelinesClient:
2875
2885
  raise ApiError(status_code=_response.status_code, body=_response.text)
2876
2886
  raise ApiError(status_code=_response.status_code, body=_response_json)
2877
2887
 
2888
+ async def get_playground_session(self, pipeline_id: str) -> PlaygroundSession:
2889
+ """
2890
+ Get a playground session for a user and pipeline.
2891
+
2892
+ Parameters:
2893
+ - pipeline_id: str.
2894
+ ---
2895
+ from llama_cloud.client import AsyncLlamaCloud
2896
+
2897
+ client = AsyncLlamaCloud(
2898
+ token="YOUR_TOKEN",
2899
+ )
2900
+ await client.pipelines.get_playground_session(
2901
+ pipeline_id="string",
2902
+ )
2903
+ """
2904
+ _response = await self._client_wrapper.httpx_client.request(
2905
+ "GET",
2906
+ urllib.parse.urljoin(
2907
+ f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/playground-session"
2908
+ ),
2909
+ headers=self._client_wrapper.get_headers(),
2910
+ timeout=60,
2911
+ )
2912
+ if 200 <= _response.status_code < 300:
2913
+ return pydantic.parse_obj_as(PlaygroundSession, _response.json()) # type: ignore
2914
+ if _response.status_code == 422:
2915
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
2916
+ try:
2917
+ _response_json = _response.json()
2918
+ except JSONDecodeError:
2919
+ raise ApiError(status_code=_response.status_code, body=_response.text)
2920
+ raise ApiError(status_code=_response.status_code, body=_response_json)
2921
+
2878
2922
  async def chat(
2879
2923
  self,
2880
2924
  pipeline_id: str,
2881
2925
  *,
2882
- messages: typing.List[ChatMessage],
2883
- data: ChatData,
2926
+ messages: typing.Optional[typing.List[InputMessage]] = OMIT,
2927
+ data: typing.Optional[ChatData] = OMIT,
2884
2928
  class_name: typing.Optional[str] = OMIT,
2885
2929
  ) -> typing.Any:
2886
2930
  """
2931
+ Make a retrieval query + chat completion for a managed pipeline.
2932
+
2887
2933
  Parameters:
2888
2934
  - pipeline_id: str.
2889
2935
 
2890
- - messages: typing.List[ChatMessage].
2936
+ - messages: typing.Optional[typing.List[InputMessage]].
2891
2937
 
2892
- - data: ChatData.
2938
+ - data: typing.Optional[ChatData].
2893
2939
 
2894
2940
  - class_name: typing.Optional[str].
2895
2941
  ---
@@ -2908,7 +2954,6 @@ class AsyncPipelinesClient:
2908
2954
  )
2909
2955
  await client.pipelines.chat(
2910
2956
  pipeline_id="string",
2911
- messages=[],
2912
2957
  data=ChatData(
2913
2958
  retrieval_parameters=PresetRetrievalParams(
2914
2959
  search_filters=MetadataFilters(
@@ -2921,7 +2966,11 @@ class AsyncPipelinesClient:
2921
2966
  ),
2922
2967
  )
2923
2968
  """
2924
- _request: typing.Dict[str, typing.Any] = {"messages": messages, "data": data}
2969
+ _request: typing.Dict[str, typing.Any] = {}
2970
+ if messages is not OMIT:
2971
+ _request["messages"] = messages
2972
+ if data is not OMIT:
2973
+ _request["data"] = data
2925
2974
  if class_name is not OMIT:
2926
2975
  _request["class_name"] = class_name
2927
2976
  _response = await self._client_wrapper.httpx_client.request(
@@ -1,6 +1,15 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
3
  from .pipeline_file_update_custom_metadata_value import PipelineFileUpdateCustomMetadataValue
4
+ from .pipeline_update_embedding_config import (
5
+ PipelineUpdateEmbeddingConfig,
6
+ PipelineUpdateEmbeddingConfig_AzureEmbedding,
7
+ PipelineUpdateEmbeddingConfig_BedrockEmbedding,
8
+ PipelineUpdateEmbeddingConfig_CohereEmbedding,
9
+ PipelineUpdateEmbeddingConfig_GeminiEmbedding,
10
+ PipelineUpdateEmbeddingConfig_HuggingfaceApiEmbedding,
11
+ PipelineUpdateEmbeddingConfig_OpenaiEmbedding,
12
+ )
4
13
  from .pipeline_update_transform_config import (
5
14
  PipelineUpdateTransformConfig,
6
15
  PipelineUpdateTransformConfig_Advanced,
@@ -9,6 +18,13 @@ from .pipeline_update_transform_config import (
9
18
 
10
19
  __all__ = [
11
20
  "PipelineFileUpdateCustomMetadataValue",
21
+ "PipelineUpdateEmbeddingConfig",
22
+ "PipelineUpdateEmbeddingConfig_AzureEmbedding",
23
+ "PipelineUpdateEmbeddingConfig_BedrockEmbedding",
24
+ "PipelineUpdateEmbeddingConfig_CohereEmbedding",
25
+ "PipelineUpdateEmbeddingConfig_GeminiEmbedding",
26
+ "PipelineUpdateEmbeddingConfig_HuggingfaceApiEmbedding",
27
+ "PipelineUpdateEmbeddingConfig_OpenaiEmbedding",
12
28
  "PipelineUpdateTransformConfig",
13
29
  "PipelineUpdateTransformConfig_Advanced",
14
30
  "PipelineUpdateTransformConfig_Auto",
@@ -0,0 +1,78 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ from __future__ import annotations
4
+
5
+ import typing
6
+
7
+ import typing_extensions
8
+
9
+ from ....types.azure_open_ai_embedding_config import AzureOpenAiEmbeddingConfig
10
+ from ....types.bedrock_embedding_config import BedrockEmbeddingConfig
11
+ from ....types.cohere_embedding_config import CohereEmbeddingConfig
12
+ from ....types.gemini_embedding_config import GeminiEmbeddingConfig
13
+ from ....types.hugging_face_inference_api_embedding_config import HuggingFaceInferenceApiEmbeddingConfig
14
+ from ....types.open_ai_embedding_config import OpenAiEmbeddingConfig
15
+
16
+
17
+ class PipelineUpdateEmbeddingConfig_OpenaiEmbedding(OpenAiEmbeddingConfig):
18
+ type: typing_extensions.Literal["OPENAI_EMBEDDING"]
19
+
20
+ class Config:
21
+ frozen = True
22
+ smart_union = True
23
+ allow_population_by_field_name = True
24
+
25
+
26
+ class PipelineUpdateEmbeddingConfig_AzureEmbedding(AzureOpenAiEmbeddingConfig):
27
+ type: typing_extensions.Literal["AZURE_EMBEDDING"]
28
+
29
+ class Config:
30
+ frozen = True
31
+ smart_union = True
32
+ allow_population_by_field_name = True
33
+
34
+
35
+ class PipelineUpdateEmbeddingConfig_HuggingfaceApiEmbedding(HuggingFaceInferenceApiEmbeddingConfig):
36
+ type: typing_extensions.Literal["HUGGINGFACE_API_EMBEDDING"]
37
+
38
+ class Config:
39
+ frozen = True
40
+ smart_union = True
41
+ allow_population_by_field_name = True
42
+
43
+
44
+ class PipelineUpdateEmbeddingConfig_BedrockEmbedding(BedrockEmbeddingConfig):
45
+ type: typing_extensions.Literal["BEDROCK_EMBEDDING"]
46
+
47
+ class Config:
48
+ frozen = True
49
+ smart_union = True
50
+ allow_population_by_field_name = True
51
+
52
+
53
+ class PipelineUpdateEmbeddingConfig_GeminiEmbedding(GeminiEmbeddingConfig):
54
+ type: typing_extensions.Literal["GEMINI_EMBEDDING"]
55
+
56
+ class Config:
57
+ frozen = True
58
+ smart_union = True
59
+ allow_population_by_field_name = True
60
+
61
+
62
+ class PipelineUpdateEmbeddingConfig_CohereEmbedding(CohereEmbeddingConfig):
63
+ type: typing_extensions.Literal["COHERE_EMBEDDING"]
64
+
65
+ class Config:
66
+ frozen = True
67
+ smart_union = True
68
+ allow_population_by_field_name = True
69
+
70
+
71
+ PipelineUpdateEmbeddingConfig = typing.Union[
72
+ PipelineUpdateEmbeddingConfig_OpenaiEmbedding,
73
+ PipelineUpdateEmbeddingConfig_AzureEmbedding,
74
+ PipelineUpdateEmbeddingConfig_HuggingfaceApiEmbedding,
75
+ PipelineUpdateEmbeddingConfig_BedrockEmbedding,
76
+ PipelineUpdateEmbeddingConfig_GeminiEmbedding,
77
+ PipelineUpdateEmbeddingConfig_CohereEmbedding,
78
+ ]