llama-cloud 0.1.8__py3-none-any.whl → 0.1.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (51) hide show
  1. llama_cloud/__init__.py +32 -22
  2. llama_cloud/client.py +0 -3
  3. llama_cloud/resources/__init__.py +14 -8
  4. llama_cloud/resources/chat_apps/client.py +99 -133
  5. llama_cloud/resources/files/client.py +34 -6
  6. llama_cloud/resources/llama_extract/__init__.py +16 -2
  7. llama_cloud/resources/llama_extract/client.py +238 -366
  8. llama_cloud/resources/llama_extract/types/__init__.py +14 -3
  9. llama_cloud/resources/llama_extract/types/extract_agent_create_data_schema.py +9 -0
  10. llama_cloud/resources/llama_extract/types/{extract_agent_create_data_schema_value.py → extract_agent_create_data_schema_zero_value.py} +1 -1
  11. llama_cloud/resources/llama_extract/types/extract_agent_update_data_schema.py +9 -0
  12. llama_cloud/resources/{extraction/types/extraction_schema_create_data_schema_value.py → llama_extract/types/extract_agent_update_data_schema_zero_value.py} +1 -1
  13. llama_cloud/resources/llama_extract/types/extract_schema_validate_request_data_schema.py +9 -0
  14. llama_cloud/resources/llama_extract/types/extract_schema_validate_request_data_schema_zero_value.py +7 -0
  15. llama_cloud/resources/organizations/client.py +8 -12
  16. llama_cloud/resources/parsing/client.py +146 -18
  17. llama_cloud/resources/reports/client.py +30 -26
  18. llama_cloud/resources/retrievers/client.py +16 -4
  19. llama_cloud/types/__init__.py +20 -12
  20. llama_cloud/types/chat_app.py +11 -9
  21. llama_cloud/types/chat_app_response.py +12 -10
  22. llama_cloud/types/cloud_mongo_db_atlas_vector_search.py +1 -0
  23. llama_cloud/types/extract_job.py +3 -1
  24. llama_cloud/types/extract_job_create.py +4 -2
  25. llama_cloud/types/extract_job_create_data_schema_override.py +9 -0
  26. llama_cloud/{resources/extraction/types/extraction_schema_update_data_schema_value.py → types/extract_job_create_data_schema_override_zero_value.py} +1 -1
  27. llama_cloud/types/extract_resultset.py +2 -6
  28. llama_cloud/types/extract_run.py +5 -0
  29. llama_cloud/types/extract_run_data.py +11 -0
  30. llama_cloud/types/extract_run_data_item_value.py +5 -0
  31. llama_cloud/types/extract_run_data_zero_value.py +5 -0
  32. llama_cloud/{resources/llama_extract/types/extract_agent_update_data_schema_value.py → types/extract_run_extraction_metadata_value.py} +1 -1
  33. llama_cloud/types/{extraction_job.py → extract_schema_validate_response.py} +3 -6
  34. llama_cloud/types/extract_schema_validate_response_data_schema_value.py +7 -0
  35. llama_cloud/types/extract_state.py +4 -4
  36. llama_cloud/types/llama_extract_settings.py +4 -0
  37. llama_cloud/types/llama_parse_parameters.py +11 -0
  38. llama_cloud/types/plan.py +4 -0
  39. llama_cloud/types/{extraction_result.py → preset_composite_retrieval_params.py} +5 -14
  40. llama_cloud/types/{extraction_schema.py → report_file_info.py} +5 -9
  41. llama_cloud/types/report_metadata.py +2 -1
  42. {llama_cloud-0.1.8.dist-info → llama_cloud-0.1.10.dist-info}/METADATA +2 -1
  43. {llama_cloud-0.1.8.dist-info → llama_cloud-0.1.10.dist-info}/RECORD +45 -42
  44. {llama_cloud-0.1.8.dist-info → llama_cloud-0.1.10.dist-info}/WHEEL +1 -1
  45. llama_cloud/resources/extraction/__init__.py +0 -5
  46. llama_cloud/resources/extraction/client.py +0 -756
  47. llama_cloud/resources/extraction/types/__init__.py +0 -6
  48. llama_cloud/types/extract_job_create_data_schema_override_value.py +0 -7
  49. llama_cloud/types/extraction_result_data_value.py +0 -5
  50. llama_cloud/types/extraction_schema_data_schema_value.py +0 -7
  51. {llama_cloud-0.1.8.dist-info → llama_cloud-0.1.10.dist-info}/LICENSE +0 -0
llama_cloud/__init__.py CHANGED
@@ -103,7 +103,8 @@ from .types import (
103
103
  ExtractConfig,
104
104
  ExtractJob,
105
105
  ExtractJobCreate,
106
- ExtractJobCreateDataSchemaOverrideValue,
106
+ ExtractJobCreateDataSchemaOverride,
107
+ ExtractJobCreateDataSchemaOverrideZeroValue,
107
108
  ExtractMode,
108
109
  ExtractResultset,
109
110
  ExtractResultsetData,
@@ -111,13 +112,14 @@ from .types import (
111
112
  ExtractResultsetDataZeroValue,
112
113
  ExtractResultsetExtractionMetadataValue,
113
114
  ExtractRun,
115
+ ExtractRunData,
116
+ ExtractRunDataItemValue,
114
117
  ExtractRunDataSchemaValue,
118
+ ExtractRunDataZeroValue,
119
+ ExtractRunExtractionMetadataValue,
120
+ ExtractSchemaValidateResponse,
121
+ ExtractSchemaValidateResponseDataSchemaValue,
115
122
  ExtractState,
116
- ExtractionJob,
117
- ExtractionResult,
118
- ExtractionResultDataValue,
119
- ExtractionSchema,
120
- ExtractionSchemaDataSchemaValue,
121
123
  File,
122
124
  FilePermissionInfoValue,
123
125
  FileResourceInfoValue,
@@ -228,6 +230,7 @@ from .types import (
228
230
  Plan,
229
231
  PlaygroundSession,
230
232
  Pooling,
233
+ PresetCompositeRetrievalParams,
231
234
  PresetRetrievalParams,
232
235
  PresignedUrl,
233
236
  ProgressEvent,
@@ -249,6 +252,7 @@ from .types import (
249
252
  ReportEventItemEventData_ReportBlockUpdate,
250
253
  ReportEventItemEventData_ReportStateUpdate,
251
254
  ReportEventType,
255
+ ReportFileInfo,
252
256
  ReportMetadata,
253
257
  ReportPlan,
254
258
  ReportPlanBlock,
@@ -302,10 +306,12 @@ from .resources import (
302
306
  EmbeddingModelConfigCreateEmbeddingConfig_HuggingfaceApiEmbedding,
303
307
  EmbeddingModelConfigCreateEmbeddingConfig_OpenaiEmbedding,
304
308
  EmbeddingModelConfigCreateEmbeddingConfig_VertexaiEmbedding,
305
- ExtractAgentCreateDataSchemaValue,
306
- ExtractAgentUpdateDataSchemaValue,
307
- ExtractionSchemaCreateDataSchemaValue,
308
- ExtractionSchemaUpdateDataSchemaValue,
309
+ ExtractAgentCreateDataSchema,
310
+ ExtractAgentCreateDataSchemaZeroValue,
311
+ ExtractAgentUpdateDataSchema,
312
+ ExtractAgentUpdateDataSchemaZeroValue,
313
+ ExtractSchemaValidateRequestDataSchema,
314
+ ExtractSchemaValidateRequestDataSchemaZeroValue,
309
315
  FileCreateFromUrlResourceInfoValue,
310
316
  FileCreatePermissionInfoValue,
311
317
  FileCreateResourceInfoValue,
@@ -326,7 +332,6 @@ from .resources import (
326
332
  data_sources,
327
333
  embedding_model_configs,
328
334
  evals,
329
- extraction,
330
335
  files,
331
336
  jobs,
332
337
  llama_extract,
@@ -449,13 +454,16 @@ __all__ = [
449
454
  "EvalQuestionCreate",
450
455
  "EvalQuestionResult",
451
456
  "ExtractAgent",
452
- "ExtractAgentCreateDataSchemaValue",
457
+ "ExtractAgentCreateDataSchema",
458
+ "ExtractAgentCreateDataSchemaZeroValue",
453
459
  "ExtractAgentDataSchemaValue",
454
- "ExtractAgentUpdateDataSchemaValue",
460
+ "ExtractAgentUpdateDataSchema",
461
+ "ExtractAgentUpdateDataSchemaZeroValue",
455
462
  "ExtractConfig",
456
463
  "ExtractJob",
457
464
  "ExtractJobCreate",
458
- "ExtractJobCreateDataSchemaOverrideValue",
465
+ "ExtractJobCreateDataSchemaOverride",
466
+ "ExtractJobCreateDataSchemaOverrideZeroValue",
459
467
  "ExtractMode",
460
468
  "ExtractResultset",
461
469
  "ExtractResultsetData",
@@ -463,15 +471,16 @@ __all__ = [
463
471
  "ExtractResultsetDataZeroValue",
464
472
  "ExtractResultsetExtractionMetadataValue",
465
473
  "ExtractRun",
474
+ "ExtractRunData",
475
+ "ExtractRunDataItemValue",
466
476
  "ExtractRunDataSchemaValue",
477
+ "ExtractRunDataZeroValue",
478
+ "ExtractRunExtractionMetadataValue",
479
+ "ExtractSchemaValidateRequestDataSchema",
480
+ "ExtractSchemaValidateRequestDataSchemaZeroValue",
481
+ "ExtractSchemaValidateResponse",
482
+ "ExtractSchemaValidateResponseDataSchemaValue",
467
483
  "ExtractState",
468
- "ExtractionJob",
469
- "ExtractionResult",
470
- "ExtractionResultDataValue",
471
- "ExtractionSchema",
472
- "ExtractionSchemaCreateDataSchemaValue",
473
- "ExtractionSchemaDataSchemaValue",
474
- "ExtractionSchemaUpdateDataSchemaValue",
475
484
  "File",
476
485
  "FileCreateFromUrlResourceInfoValue",
477
486
  "FileCreatePermissionInfoValue",
@@ -596,6 +605,7 @@ __all__ = [
596
605
  "Plan",
597
606
  "PlaygroundSession",
598
607
  "Pooling",
608
+ "PresetCompositeRetrievalParams",
599
609
  "PresetRetrievalParams",
600
610
  "PresignedUrl",
601
611
  "ProgressEvent",
@@ -617,6 +627,7 @@ __all__ = [
617
627
  "ReportEventItemEventData_ReportBlockUpdate",
618
628
  "ReportEventItemEventData_ReportStateUpdate",
619
629
  "ReportEventType",
630
+ "ReportFileInfo",
620
631
  "ReportMetadata",
621
632
  "ReportPlan",
622
633
  "ReportPlanBlock",
@@ -664,7 +675,6 @@ __all__ = [
664
675
  "data_sources",
665
676
  "embedding_model_configs",
666
677
  "evals",
667
- "extraction",
668
678
  "files",
669
679
  "jobs",
670
680
  "llama_extract",
llama_cloud/client.py CHANGED
@@ -12,7 +12,6 @@ from .resources.data_sinks.client import AsyncDataSinksClient, DataSinksClient
12
12
  from .resources.data_sources.client import AsyncDataSourcesClient, DataSourcesClient
13
13
  from .resources.embedding_model_configs.client import AsyncEmbeddingModelConfigsClient, EmbeddingModelConfigsClient
14
14
  from .resources.evals.client import AsyncEvalsClient, EvalsClient
15
- from .resources.extraction.client import AsyncExtractionClient, ExtractionClient
16
15
  from .resources.files.client import AsyncFilesClient, FilesClient
17
16
  from .resources.jobs.client import AsyncJobsClient, JobsClient
18
17
  from .resources.llama_extract.client import AsyncLlamaExtractClient, LlamaExtractClient
@@ -52,7 +51,6 @@ class LlamaCloud:
52
51
  self.parsing = ParsingClient(client_wrapper=self._client_wrapper)
53
52
  self.component_definitions = ComponentDefinitionsClient(client_wrapper=self._client_wrapper)
54
53
  self.chat_apps = ChatAppsClient(client_wrapper=self._client_wrapper)
55
- self.extraction = ExtractionClient(client_wrapper=self._client_wrapper)
56
54
  self.llama_extract = LlamaExtractClient(client_wrapper=self._client_wrapper)
57
55
  self.reports = ReportsClient(client_wrapper=self._client_wrapper)
58
56
 
@@ -85,7 +83,6 @@ class AsyncLlamaCloud:
85
83
  self.parsing = AsyncParsingClient(client_wrapper=self._client_wrapper)
86
84
  self.component_definitions = AsyncComponentDefinitionsClient(client_wrapper=self._client_wrapper)
87
85
  self.chat_apps = AsyncChatAppsClient(client_wrapper=self._client_wrapper)
88
- self.extraction = AsyncExtractionClient(client_wrapper=self._client_wrapper)
89
86
  self.llama_extract = AsyncLlamaExtractClient(client_wrapper=self._client_wrapper)
90
87
  self.reports = AsyncReportsClient(client_wrapper=self._client_wrapper)
91
88
 
@@ -7,7 +7,6 @@ from . import (
7
7
  data_sources,
8
8
  embedding_model_configs,
9
9
  evals,
10
- extraction,
11
10
  files,
12
11
  jobs,
13
12
  llama_extract,
@@ -30,9 +29,15 @@ from .embedding_model_configs import (
30
29
  EmbeddingModelConfigCreateEmbeddingConfig_OpenaiEmbedding,
31
30
  EmbeddingModelConfigCreateEmbeddingConfig_VertexaiEmbedding,
32
31
  )
33
- from .extraction import ExtractionSchemaCreateDataSchemaValue, ExtractionSchemaUpdateDataSchemaValue
34
32
  from .files import FileCreateFromUrlResourceInfoValue, FileCreatePermissionInfoValue, FileCreateResourceInfoValue
35
- from .llama_extract import ExtractAgentCreateDataSchemaValue, ExtractAgentUpdateDataSchemaValue
33
+ from .llama_extract import (
34
+ ExtractAgentCreateDataSchema,
35
+ ExtractAgentCreateDataSchemaZeroValue,
36
+ ExtractAgentUpdateDataSchema,
37
+ ExtractAgentUpdateDataSchemaZeroValue,
38
+ ExtractSchemaValidateRequestDataSchema,
39
+ ExtractSchemaValidateRequestDataSchemaZeroValue,
40
+ )
36
41
  from .pipelines import (
37
42
  PipelineFileUpdateCustomMetadataValue,
38
43
  PipelineUpdateEmbeddingConfig,
@@ -59,10 +64,12 @@ __all__ = [
59
64
  "EmbeddingModelConfigCreateEmbeddingConfig_HuggingfaceApiEmbedding",
60
65
  "EmbeddingModelConfigCreateEmbeddingConfig_OpenaiEmbedding",
61
66
  "EmbeddingModelConfigCreateEmbeddingConfig_VertexaiEmbedding",
62
- "ExtractAgentCreateDataSchemaValue",
63
- "ExtractAgentUpdateDataSchemaValue",
64
- "ExtractionSchemaCreateDataSchemaValue",
65
- "ExtractionSchemaUpdateDataSchemaValue",
67
+ "ExtractAgentCreateDataSchema",
68
+ "ExtractAgentCreateDataSchemaZeroValue",
69
+ "ExtractAgentUpdateDataSchema",
70
+ "ExtractAgentUpdateDataSchemaZeroValue",
71
+ "ExtractSchemaValidateRequestDataSchema",
72
+ "ExtractSchemaValidateRequestDataSchemaZeroValue",
66
73
  "FileCreateFromUrlResourceInfoValue",
67
74
  "FileCreatePermissionInfoValue",
68
75
  "FileCreateResourceInfoValue",
@@ -83,7 +90,6 @@ __all__ = [
83
90
  "data_sources",
84
91
  "embedding_model_configs",
85
92
  "evals",
86
- "extraction",
87
93
  "files",
88
94
  "jobs",
89
95
  "llama_extract",
@@ -14,7 +14,7 @@ from ...types.chat_app_response import ChatAppResponse
14
14
  from ...types.http_validation_error import HttpValidationError
15
15
  from ...types.input_message import InputMessage
16
16
  from ...types.llm_parameters import LlmParameters
17
- from ...types.preset_retrieval_params import PresetRetrievalParams
17
+ from ...types.preset_composite_retrieval_params import PresetCompositeRetrievalParams
18
18
 
19
19
  try:
20
20
  import pydantic
@@ -71,10 +71,9 @@ class ChatAppsClient:
71
71
  project_id: typing.Optional[str] = None,
72
72
  organization_id: typing.Optional[str] = None,
73
73
  name: str,
74
- pipeline_id: str,
74
+ retriever_id: str,
75
75
  llm_config: LlmParameters,
76
- retrieval_config: PresetRetrievalParams,
77
- chat_app_create_project_id: str,
76
+ retrieval_config: PresetCompositeRetrievalParams,
78
77
  ) -> ChatApp:
79
78
  """
80
79
  Create a new chat app.
@@ -84,22 +83,18 @@ class ChatAppsClient:
84
83
 
85
84
  - organization_id: typing.Optional[str].
86
85
 
87
- - name: str.
86
+ - name: str. Name of the chat app
88
87
 
89
- - pipeline_id: str.
88
+ - retriever_id: str. ID of the retriever to use for the chat app
90
89
 
91
- - llm_config: LlmParameters.
90
+ - llm_config: LlmParameters. Configuration for the LLM model to use for the chat app
92
91
 
93
- - retrieval_config: PresetRetrievalParams.
94
-
95
- - chat_app_create_project_id: str.
92
+ - retrieval_config: PresetCompositeRetrievalParams. Configuration for the retrieval model to use for the chat app
96
93
  ---
97
94
  from llama_cloud import (
98
- FilterCondition,
95
+ CompositeRetrievalMode,
99
96
  LlmParameters,
100
- MetadataFilters,
101
- PresetRetrievalParams,
102
- RetrievalMode,
97
+ PresetCompositeRetrievalParams,
103
98
  SupportedLlmModelNames,
104
99
  )
105
100
  from llama_cloud.client import LlamaCloud
@@ -109,18 +104,13 @@ class ChatAppsClient:
109
104
  )
110
105
  client.chat_apps.create_chat_app_api_v_1_apps_post(
111
106
  name="string",
112
- pipeline_id="string",
107
+ retriever_id="string",
113
108
  llm_config=LlmParameters(
114
109
  model_name=SupportedLlmModelNames.GPT_3_5_TURBO,
115
110
  ),
116
- retrieval_config=PresetRetrievalParams(
117
- search_filters=MetadataFilters(
118
- filters=[],
119
- condition=FilterCondition.AND,
120
- ),
121
- retrieval_mode=RetrievalMode.CHUNKS,
111
+ retrieval_config=PresetCompositeRetrievalParams(
112
+ mode=CompositeRetrievalMode.ROUTING,
122
113
  ),
123
- chat_app_create_project_id="string",
124
114
  )
125
115
  """
126
116
  _response = self._client_wrapper.httpx_client.request(
@@ -130,10 +120,9 @@ class ChatAppsClient:
130
120
  json=jsonable_encoder(
131
121
  {
132
122
  "name": name,
133
- "pipeline_id": pipeline_id,
123
+ "retriever_id": retriever_id,
134
124
  "llm_config": llm_config,
135
125
  "retrieval_config": retrieval_config,
136
- "project_id": chat_app_create_project_id,
137
126
  }
138
127
  ),
139
128
  headers=self._client_wrapper.get_headers(),
@@ -181,36 +170,6 @@ class ChatAppsClient:
181
170
  raise ApiError(status_code=_response.status_code, body=_response.text)
182
171
  raise ApiError(status_code=_response.status_code, body=_response_json)
183
172
 
184
- def delete_chat_app(self, id: str) -> typing.Any:
185
- """
186
- Parameters:
187
- - id: str.
188
- ---
189
- from llama_cloud.client import LlamaCloud
190
-
191
- client = LlamaCloud(
192
- token="YOUR_TOKEN",
193
- )
194
- client.chat_apps.delete_chat_app(
195
- id="string",
196
- )
197
- """
198
- _response = self._client_wrapper.httpx_client.request(
199
- "DELETE",
200
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/apps/{id}"),
201
- headers=self._client_wrapper.get_headers(),
202
- timeout=60,
203
- )
204
- if 200 <= _response.status_code < 300:
205
- return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
206
- if _response.status_code == 422:
207
- raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
208
- try:
209
- _response_json = _response.json()
210
- except JSONDecodeError:
211
- raise ApiError(status_code=_response.status_code, body=_response.text)
212
- raise ApiError(status_code=_response.status_code, body=_response_json)
213
-
214
173
  def update_chat_app(
215
174
  self,
216
175
  id: str,
@@ -219,7 +178,7 @@ class ChatAppsClient:
219
178
  organization_id: typing.Optional[str] = None,
220
179
  name: typing.Optional[str] = OMIT,
221
180
  llm_config: typing.Optional[LlmParameters] = OMIT,
222
- retrieval_config: typing.Optional[PresetRetrievalParams] = OMIT,
181
+ retrieval_config: typing.Optional[PresetCompositeRetrievalParams] = OMIT,
223
182
  ) -> ChatApp:
224
183
  """
225
184
  Update a chat app.
@@ -235,14 +194,12 @@ class ChatAppsClient:
235
194
 
236
195
  - llm_config: typing.Optional[LlmParameters].
237
196
 
238
- - retrieval_config: typing.Optional[PresetRetrievalParams].
197
+ - retrieval_config: typing.Optional[PresetCompositeRetrievalParams].
239
198
  ---
240
199
  from llama_cloud import (
241
- FilterCondition,
200
+ CompositeRetrievalMode,
242
201
  LlmParameters,
243
- MetadataFilters,
244
- PresetRetrievalParams,
245
- RetrievalMode,
202
+ PresetCompositeRetrievalParams,
246
203
  SupportedLlmModelNames,
247
204
  )
248
205
  from llama_cloud.client import LlamaCloud
@@ -255,12 +212,8 @@ class ChatAppsClient:
255
212
  llm_config=LlmParameters(
256
213
  model_name=SupportedLlmModelNames.GPT_3_5_TURBO,
257
214
  ),
258
- retrieval_config=PresetRetrievalParams(
259
- search_filters=MetadataFilters(
260
- filters=[],
261
- condition=FilterCondition.AND,
262
- ),
263
- retrieval_mode=RetrievalMode.CHUNKS,
215
+ retrieval_config=PresetCompositeRetrievalParams(
216
+ mode=CompositeRetrievalMode.ROUTING,
264
217
  ),
265
218
  )
266
219
  """
@@ -272,7 +225,7 @@ class ChatAppsClient:
272
225
  if retrieval_config is not OMIT:
273
226
  _request["retrieval_config"] = retrieval_config
274
227
  _response = self._client_wrapper.httpx_client.request(
275
- "PATCH",
228
+ "PUT",
276
229
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/apps/{id}"),
277
230
  params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
278
231
  json=jsonable_encoder(_request),
@@ -289,6 +242,36 @@ class ChatAppsClient:
289
242
  raise ApiError(status_code=_response.status_code, body=_response.text)
290
243
  raise ApiError(status_code=_response.status_code, body=_response_json)
291
244
 
245
+ def delete_chat_app(self, id: str) -> typing.Any:
246
+ """
247
+ Parameters:
248
+ - id: str.
249
+ ---
250
+ from llama_cloud.client import LlamaCloud
251
+
252
+ client = LlamaCloud(
253
+ token="YOUR_TOKEN",
254
+ )
255
+ client.chat_apps.delete_chat_app(
256
+ id="string",
257
+ )
258
+ """
259
+ _response = self._client_wrapper.httpx_client.request(
260
+ "DELETE",
261
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/apps/{id}"),
262
+ headers=self._client_wrapper.get_headers(),
263
+ timeout=60,
264
+ )
265
+ if 200 <= _response.status_code < 300:
266
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
267
+ if _response.status_code == 422:
268
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
269
+ try:
270
+ _response_json = _response.json()
271
+ except JSONDecodeError:
272
+ raise ApiError(status_code=_response.status_code, body=_response.text)
273
+ raise ApiError(status_code=_response.status_code, body=_response_json)
274
+
292
275
  def chat_with_chat_app(self, id: str, *, messages: typing.Optional[typing.List[InputMessage]] = OMIT) -> typing.Any:
293
276
  """
294
277
  Chat with a chat app.
@@ -371,10 +354,9 @@ class AsyncChatAppsClient:
371
354
  project_id: typing.Optional[str] = None,
372
355
  organization_id: typing.Optional[str] = None,
373
356
  name: str,
374
- pipeline_id: str,
357
+ retriever_id: str,
375
358
  llm_config: LlmParameters,
376
- retrieval_config: PresetRetrievalParams,
377
- chat_app_create_project_id: str,
359
+ retrieval_config: PresetCompositeRetrievalParams,
378
360
  ) -> ChatApp:
379
361
  """
380
362
  Create a new chat app.
@@ -384,22 +366,18 @@ class AsyncChatAppsClient:
384
366
 
385
367
  - organization_id: typing.Optional[str].
386
368
 
387
- - name: str.
369
+ - name: str. Name of the chat app
388
370
 
389
- - pipeline_id: str.
371
+ - retriever_id: str. ID of the retriever to use for the chat app
390
372
 
391
- - llm_config: LlmParameters.
373
+ - llm_config: LlmParameters. Configuration for the LLM model to use for the chat app
392
374
 
393
- - retrieval_config: PresetRetrievalParams.
394
-
395
- - chat_app_create_project_id: str.
375
+ - retrieval_config: PresetCompositeRetrievalParams. Configuration for the retrieval model to use for the chat app
396
376
  ---
397
377
  from llama_cloud import (
398
- FilterCondition,
378
+ CompositeRetrievalMode,
399
379
  LlmParameters,
400
- MetadataFilters,
401
- PresetRetrievalParams,
402
- RetrievalMode,
380
+ PresetCompositeRetrievalParams,
403
381
  SupportedLlmModelNames,
404
382
  )
405
383
  from llama_cloud.client import AsyncLlamaCloud
@@ -409,18 +387,13 @@ class AsyncChatAppsClient:
409
387
  )
410
388
  await client.chat_apps.create_chat_app_api_v_1_apps_post(
411
389
  name="string",
412
- pipeline_id="string",
390
+ retriever_id="string",
413
391
  llm_config=LlmParameters(
414
392
  model_name=SupportedLlmModelNames.GPT_3_5_TURBO,
415
393
  ),
416
- retrieval_config=PresetRetrievalParams(
417
- search_filters=MetadataFilters(
418
- filters=[],
419
- condition=FilterCondition.AND,
420
- ),
421
- retrieval_mode=RetrievalMode.CHUNKS,
394
+ retrieval_config=PresetCompositeRetrievalParams(
395
+ mode=CompositeRetrievalMode.ROUTING,
422
396
  ),
423
- chat_app_create_project_id="string",
424
397
  )
425
398
  """
426
399
  _response = await self._client_wrapper.httpx_client.request(
@@ -430,10 +403,9 @@ class AsyncChatAppsClient:
430
403
  json=jsonable_encoder(
431
404
  {
432
405
  "name": name,
433
- "pipeline_id": pipeline_id,
406
+ "retriever_id": retriever_id,
434
407
  "llm_config": llm_config,
435
408
  "retrieval_config": retrieval_config,
436
- "project_id": chat_app_create_project_id,
437
409
  }
438
410
  ),
439
411
  headers=self._client_wrapper.get_headers(),
@@ -481,36 +453,6 @@ class AsyncChatAppsClient:
481
453
  raise ApiError(status_code=_response.status_code, body=_response.text)
482
454
  raise ApiError(status_code=_response.status_code, body=_response_json)
483
455
 
484
- async def delete_chat_app(self, id: str) -> typing.Any:
485
- """
486
- Parameters:
487
- - id: str.
488
- ---
489
- from llama_cloud.client import AsyncLlamaCloud
490
-
491
- client = AsyncLlamaCloud(
492
- token="YOUR_TOKEN",
493
- )
494
- await client.chat_apps.delete_chat_app(
495
- id="string",
496
- )
497
- """
498
- _response = await self._client_wrapper.httpx_client.request(
499
- "DELETE",
500
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/apps/{id}"),
501
- headers=self._client_wrapper.get_headers(),
502
- timeout=60,
503
- )
504
- if 200 <= _response.status_code < 300:
505
- return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
506
- if _response.status_code == 422:
507
- raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
508
- try:
509
- _response_json = _response.json()
510
- except JSONDecodeError:
511
- raise ApiError(status_code=_response.status_code, body=_response.text)
512
- raise ApiError(status_code=_response.status_code, body=_response_json)
513
-
514
456
  async def update_chat_app(
515
457
  self,
516
458
  id: str,
@@ -519,7 +461,7 @@ class AsyncChatAppsClient:
519
461
  organization_id: typing.Optional[str] = None,
520
462
  name: typing.Optional[str] = OMIT,
521
463
  llm_config: typing.Optional[LlmParameters] = OMIT,
522
- retrieval_config: typing.Optional[PresetRetrievalParams] = OMIT,
464
+ retrieval_config: typing.Optional[PresetCompositeRetrievalParams] = OMIT,
523
465
  ) -> ChatApp:
524
466
  """
525
467
  Update a chat app.
@@ -535,14 +477,12 @@ class AsyncChatAppsClient:
535
477
 
536
478
  - llm_config: typing.Optional[LlmParameters].
537
479
 
538
- - retrieval_config: typing.Optional[PresetRetrievalParams].
480
+ - retrieval_config: typing.Optional[PresetCompositeRetrievalParams].
539
481
  ---
540
482
  from llama_cloud import (
541
- FilterCondition,
483
+ CompositeRetrievalMode,
542
484
  LlmParameters,
543
- MetadataFilters,
544
- PresetRetrievalParams,
545
- RetrievalMode,
485
+ PresetCompositeRetrievalParams,
546
486
  SupportedLlmModelNames,
547
487
  )
548
488
  from llama_cloud.client import AsyncLlamaCloud
@@ -555,12 +495,8 @@ class AsyncChatAppsClient:
555
495
  llm_config=LlmParameters(
556
496
  model_name=SupportedLlmModelNames.GPT_3_5_TURBO,
557
497
  ),
558
- retrieval_config=PresetRetrievalParams(
559
- search_filters=MetadataFilters(
560
- filters=[],
561
- condition=FilterCondition.AND,
562
- ),
563
- retrieval_mode=RetrievalMode.CHUNKS,
498
+ retrieval_config=PresetCompositeRetrievalParams(
499
+ mode=CompositeRetrievalMode.ROUTING,
564
500
  ),
565
501
  )
566
502
  """
@@ -572,7 +508,7 @@ class AsyncChatAppsClient:
572
508
  if retrieval_config is not OMIT:
573
509
  _request["retrieval_config"] = retrieval_config
574
510
  _response = await self._client_wrapper.httpx_client.request(
575
- "PATCH",
511
+ "PUT",
576
512
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/apps/{id}"),
577
513
  params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
578
514
  json=jsonable_encoder(_request),
@@ -589,6 +525,36 @@ class AsyncChatAppsClient:
589
525
  raise ApiError(status_code=_response.status_code, body=_response.text)
590
526
  raise ApiError(status_code=_response.status_code, body=_response_json)
591
527
 
528
+ async def delete_chat_app(self, id: str) -> typing.Any:
529
+ """
530
+ Parameters:
531
+ - id: str.
532
+ ---
533
+ from llama_cloud.client import AsyncLlamaCloud
534
+
535
+ client = AsyncLlamaCloud(
536
+ token="YOUR_TOKEN",
537
+ )
538
+ await client.chat_apps.delete_chat_app(
539
+ id="string",
540
+ )
541
+ """
542
+ _response = await self._client_wrapper.httpx_client.request(
543
+ "DELETE",
544
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/apps/{id}"),
545
+ headers=self._client_wrapper.get_headers(),
546
+ timeout=60,
547
+ )
548
+ if 200 <= _response.status_code < 300:
549
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
550
+ if _response.status_code == 422:
551
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
552
+ try:
553
+ _response_json = _response.json()
554
+ except JSONDecodeError:
555
+ raise ApiError(status_code=_response.status_code, body=_response.text)
556
+ raise ApiError(status_code=_response.status_code, body=_response_json)
557
+
592
558
  async def chat_with_chat_app(
593
559
  self, id: str, *, messages: typing.Optional[typing.List[InputMessage]] = OMIT
594
560
  ) -> typing.Any: