llama-cloud 0.1.5__py3-none-any.whl → 0.1.7a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (129) hide show
  1. llama_cloud/__init__.py +138 -2
  2. llama_cloud/client.py +15 -0
  3. llama_cloud/resources/__init__.py +17 -1
  4. llama_cloud/resources/chat_apps/__init__.py +2 -0
  5. llama_cloud/resources/chat_apps/client.py +620 -0
  6. llama_cloud/resources/data_sinks/client.py +2 -2
  7. llama_cloud/resources/data_sources/client.py +2 -2
  8. llama_cloud/resources/embedding_model_configs/client.py +4 -4
  9. llama_cloud/resources/files/__init__.py +2 -2
  10. llama_cloud/resources/files/client.py +21 -0
  11. llama_cloud/resources/files/types/__init__.py +2 -1
  12. llama_cloud/resources/files/types/file_create_permission_info_value.py +7 -0
  13. llama_cloud/resources/jobs/__init__.py +2 -0
  14. llama_cloud/resources/jobs/client.py +148 -0
  15. llama_cloud/resources/llama_extract/__init__.py +5 -0
  16. llama_cloud/resources/llama_extract/client.py +1038 -0
  17. llama_cloud/resources/llama_extract/types/__init__.py +6 -0
  18. llama_cloud/resources/llama_extract/types/extract_agent_create_data_schema_value.py +7 -0
  19. llama_cloud/resources/llama_extract/types/extract_agent_update_data_schema_value.py +7 -0
  20. llama_cloud/resources/organizations/client.py +14 -14
  21. llama_cloud/resources/parsing/client.py +480 -229
  22. llama_cloud/resources/pipelines/client.py +182 -126
  23. llama_cloud/resources/projects/client.py +210 -102
  24. llama_cloud/resources/reports/__init__.py +5 -0
  25. llama_cloud/resources/reports/client.py +1198 -0
  26. llama_cloud/resources/reports/types/__init__.py +7 -0
  27. llama_cloud/resources/reports/types/update_report_plan_api_v_1_reports_report_id_plan_patch_request_action.py +25 -0
  28. llama_cloud/resources/retrievers/__init__.py +2 -0
  29. llama_cloud/resources/retrievers/client.py +654 -0
  30. llama_cloud/types/__init__.py +124 -2
  31. llama_cloud/types/{chat_message.py → app_schema_chat_chat_message.py} +2 -2
  32. llama_cloud/types/chat_app.py +44 -0
  33. llama_cloud/types/chat_app_response.py +41 -0
  34. llama_cloud/types/cloud_az_storage_blob_data_source.py +1 -0
  35. llama_cloud/types/cloud_box_data_source.py +1 -0
  36. llama_cloud/types/cloud_confluence_data_source.py +1 -0
  37. llama_cloud/types/cloud_google_drive_data_source.py +1 -0
  38. llama_cloud/types/cloud_jira_data_source.py +1 -0
  39. llama_cloud/types/cloud_notion_page_data_source.py +1 -0
  40. llama_cloud/types/cloud_one_drive_data_source.py +1 -0
  41. llama_cloud/types/cloud_postgres_vector_store.py +1 -0
  42. llama_cloud/types/cloud_s_3_data_source.py +1 -0
  43. llama_cloud/types/cloud_sharepoint_data_source.py +1 -0
  44. llama_cloud/types/cloud_slack_data_source.py +1 -0
  45. llama_cloud/types/composite_retrieval_mode.py +21 -0
  46. llama_cloud/types/composite_retrieval_result.py +38 -0
  47. llama_cloud/types/composite_retrieved_text_node.py +42 -0
  48. llama_cloud/types/data_sink.py +1 -1
  49. llama_cloud/types/data_sink_create.py +1 -1
  50. llama_cloud/types/data_source.py +1 -1
  51. llama_cloud/types/data_source_create.py +1 -1
  52. llama_cloud/types/edit_suggestion.py +39 -0
  53. llama_cloud/types/eval_dataset_job_record.py +1 -0
  54. llama_cloud/types/extract_agent.py +45 -0
  55. llama_cloud/types/extract_agent_data_schema_value.py +5 -0
  56. llama_cloud/types/extract_config.py +40 -0
  57. llama_cloud/types/extract_job.py +35 -0
  58. llama_cloud/types/extract_job_create.py +40 -0
  59. llama_cloud/types/extract_job_create_data_schema_override_value.py +7 -0
  60. llama_cloud/types/extract_mode.py +17 -0
  61. llama_cloud/types/extract_resultset.py +46 -0
  62. llama_cloud/types/extract_resultset_data.py +11 -0
  63. llama_cloud/types/extract_resultset_data_item_value.py +7 -0
  64. llama_cloud/types/extract_resultset_data_zero_value.py +7 -0
  65. llama_cloud/types/extract_resultset_extraction_metadata_value.py +7 -0
  66. llama_cloud/types/file.py +3 -0
  67. llama_cloud/types/file_permission_info_value.py +5 -0
  68. llama_cloud/types/filter_condition.py +9 -1
  69. llama_cloud/types/filter_operator.py +4 -0
  70. llama_cloud/types/image_block.py +35 -0
  71. llama_cloud/types/input_message.py +1 -1
  72. llama_cloud/types/job_name_mapping.py +4 -0
  73. llama_cloud/types/job_names.py +89 -0
  74. llama_cloud/types/job_record.py +57 -0
  75. llama_cloud/types/job_record_with_usage_metrics.py +36 -0
  76. llama_cloud/types/llama_index_core_base_llms_types_chat_message.py +39 -0
  77. llama_cloud/types/llama_index_core_base_llms_types_chat_message_blocks_item.py +33 -0
  78. llama_cloud/types/llama_parse_parameters.py +15 -0
  79. llama_cloud/types/llm.py +1 -0
  80. llama_cloud/types/llm_model_data.py +1 -0
  81. llama_cloud/types/llm_parameters.py +1 -0
  82. llama_cloud/types/managed_ingestion_status.py +4 -0
  83. llama_cloud/types/managed_ingestion_status_response.py +1 -0
  84. llama_cloud/types/object_type.py +4 -0
  85. llama_cloud/types/organization.py +5 -0
  86. llama_cloud/types/paginated_jobs_history_with_metrics.py +35 -0
  87. llama_cloud/types/paginated_report_response.py +35 -0
  88. llama_cloud/types/parse_plan_level.py +21 -0
  89. llama_cloud/types/parsing_job_structured_result.py +32 -0
  90. llama_cloud/types/pipeline_create.py +3 -1
  91. llama_cloud/types/pipeline_data_source.py +1 -1
  92. llama_cloud/types/pipeline_file.py +3 -0
  93. llama_cloud/types/pipeline_file_permission_info_value.py +7 -0
  94. llama_cloud/types/playground_session.py +2 -2
  95. llama_cloud/types/preset_retrieval_params.py +1 -0
  96. llama_cloud/types/progress_event.py +44 -0
  97. llama_cloud/types/progress_event_status.py +33 -0
  98. llama_cloud/types/prompt_spec.py +2 -2
  99. llama_cloud/types/related_node_info.py +2 -2
  100. llama_cloud/types/related_node_info_node_type.py +7 -0
  101. llama_cloud/types/report.py +33 -0
  102. llama_cloud/types/report_block.py +34 -0
  103. llama_cloud/types/report_block_dependency.py +29 -0
  104. llama_cloud/types/report_create_response.py +31 -0
  105. llama_cloud/types/report_event_item.py +40 -0
  106. llama_cloud/types/report_event_item_event_data.py +45 -0
  107. llama_cloud/types/report_event_type.py +37 -0
  108. llama_cloud/types/report_metadata.py +39 -0
  109. llama_cloud/types/report_plan.py +36 -0
  110. llama_cloud/types/report_plan_block.py +36 -0
  111. llama_cloud/types/report_query.py +33 -0
  112. llama_cloud/types/report_response.py +41 -0
  113. llama_cloud/types/report_state.py +37 -0
  114. llama_cloud/types/report_state_event.py +38 -0
  115. llama_cloud/types/report_update_event.py +38 -0
  116. llama_cloud/types/retrieve_results.py +1 -1
  117. llama_cloud/types/retriever.py +45 -0
  118. llama_cloud/types/retriever_create.py +37 -0
  119. llama_cloud/types/retriever_pipeline.py +37 -0
  120. llama_cloud/types/status_enum.py +4 -0
  121. llama_cloud/types/supported_llm_model_names.py +4 -0
  122. llama_cloud/types/text_block.py +31 -0
  123. llama_cloud/types/text_node.py +13 -6
  124. llama_cloud/types/usage_metric_response.py +34 -0
  125. llama_cloud/types/user_job_record.py +32 -0
  126. {llama_cloud-0.1.5.dist-info → llama_cloud-0.1.7a1.dist-info}/METADATA +3 -1
  127. {llama_cloud-0.1.5.dist-info → llama_cloud-0.1.7a1.dist-info}/RECORD +129 -59
  128. {llama_cloud-0.1.5.dist-info → llama_cloud-0.1.7a1.dist-info}/WHEEL +1 -1
  129. {llama_cloud-0.1.5.dist-info → llama_cloud-0.1.7a1.dist-info}/LICENSE +0 -0
@@ -0,0 +1,620 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+ import urllib.parse
5
+ from json.decoder import JSONDecodeError
6
+
7
+ from ...core.api_error import ApiError
8
+ from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
9
+ from ...core.jsonable_encoder import jsonable_encoder
10
+ from ...core.remove_none_from_dict import remove_none_from_dict
11
+ from ...errors.unprocessable_entity_error import UnprocessableEntityError
12
+ from ...types.chat_app import ChatApp
13
+ from ...types.chat_app_response import ChatAppResponse
14
+ from ...types.http_validation_error import HttpValidationError
15
+ from ...types.input_message import InputMessage
16
+ from ...types.llm_parameters import LlmParameters
17
+ from ...types.preset_retrieval_params import PresetRetrievalParams
18
+
19
+ try:
20
+ import pydantic
21
+ if pydantic.__version__.startswith("1."):
22
+ raise ImportError
23
+ import pydantic.v1 as pydantic # type: ignore
24
+ except ImportError:
25
+ import pydantic # type: ignore
26
+
27
+ # this is used as the default value for optional parameters
28
+ OMIT = typing.cast(typing.Any, ...)
29
+
30
+
31
+ class ChatAppsClient:
32
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
33
+ self._client_wrapper = client_wrapper
34
+
35
+ def get_chat_apps_api_v_1_apps_get(
36
+ self, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
37
+ ) -> typing.List[ChatAppResponse]:
38
+ """
39
+ Parameters:
40
+ - project_id: typing.Optional[str].
41
+
42
+ - organization_id: typing.Optional[str].
43
+ ---
44
+ from llama_cloud.client import LlamaCloud
45
+
46
+ client = LlamaCloud(
47
+ token="YOUR_TOKEN",
48
+ )
49
+ client.chat_apps.get_chat_apps_api_v_1_apps_get()
50
+ """
51
+ _response = self._client_wrapper.httpx_client.request(
52
+ "GET",
53
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/apps"),
54
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
55
+ headers=self._client_wrapper.get_headers(),
56
+ timeout=60,
57
+ )
58
+ if 200 <= _response.status_code < 300:
59
+ return pydantic.parse_obj_as(typing.List[ChatAppResponse], _response.json()) # type: ignore
60
+ if _response.status_code == 422:
61
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
62
+ try:
63
+ _response_json = _response.json()
64
+ except JSONDecodeError:
65
+ raise ApiError(status_code=_response.status_code, body=_response.text)
66
+ raise ApiError(status_code=_response.status_code, body=_response_json)
67
+
68
+ def create_chat_app_api_v_1_apps_post(
69
+ self,
70
+ *,
71
+ project_id: typing.Optional[str] = None,
72
+ organization_id: typing.Optional[str] = None,
73
+ pipeline_id: str,
74
+ llm_config: LlmParameters,
75
+ retrieval_config: PresetRetrievalParams,
76
+ chat_app_create_project_id: str,
77
+ ) -> ChatApp:
78
+ """
79
+ Create a new chat app.
80
+
81
+ Parameters:
82
+ - project_id: typing.Optional[str].
83
+
84
+ - organization_id: typing.Optional[str].
85
+
86
+ - pipeline_id: str.
87
+
88
+ - llm_config: LlmParameters.
89
+
90
+ - retrieval_config: PresetRetrievalParams.
91
+
92
+ - chat_app_create_project_id: str.
93
+ ---
94
+ from llama_cloud import (
95
+ FilterCondition,
96
+ LlmParameters,
97
+ MetadataFilters,
98
+ PresetRetrievalParams,
99
+ RetrievalMode,
100
+ SupportedLlmModelNames,
101
+ )
102
+ from llama_cloud.client import LlamaCloud
103
+
104
+ client = LlamaCloud(
105
+ token="YOUR_TOKEN",
106
+ )
107
+ client.chat_apps.create_chat_app_api_v_1_apps_post(
108
+ pipeline_id="string",
109
+ llm_config=LlmParameters(
110
+ model_name=SupportedLlmModelNames.GPT_3_5_TURBO,
111
+ ),
112
+ retrieval_config=PresetRetrievalParams(
113
+ search_filters=MetadataFilters(
114
+ filters=[],
115
+ condition=FilterCondition.AND,
116
+ ),
117
+ retrieval_mode=RetrievalMode.CHUNKS,
118
+ ),
119
+ chat_app_create_project_id="string",
120
+ )
121
+ """
122
+ _response = self._client_wrapper.httpx_client.request(
123
+ "POST",
124
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/apps"),
125
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
126
+ json=jsonable_encoder(
127
+ {
128
+ "pipeline_id": pipeline_id,
129
+ "llm_config": llm_config,
130
+ "retrieval_config": retrieval_config,
131
+ "project_id": chat_app_create_project_id,
132
+ }
133
+ ),
134
+ headers=self._client_wrapper.get_headers(),
135
+ timeout=60,
136
+ )
137
+ if 200 <= _response.status_code < 300:
138
+ return pydantic.parse_obj_as(ChatApp, _response.json()) # type: ignore
139
+ if _response.status_code == 422:
140
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
141
+ try:
142
+ _response_json = _response.json()
143
+ except JSONDecodeError:
144
+ raise ApiError(status_code=_response.status_code, body=_response.text)
145
+ raise ApiError(status_code=_response.status_code, body=_response_json)
146
+
147
+ def get_chat_app(self, id: str) -> ChatApp:
148
+ """
149
+ Get a chat app by ID.
150
+
151
+ Parameters:
152
+ - id: str.
153
+ ---
154
+ from llama_cloud.client import LlamaCloud
155
+
156
+ client = LlamaCloud(
157
+ token="YOUR_TOKEN",
158
+ )
159
+ client.chat_apps.get_chat_app(
160
+ id="string",
161
+ )
162
+ """
163
+ _response = self._client_wrapper.httpx_client.request(
164
+ "GET",
165
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/apps/{id}"),
166
+ headers=self._client_wrapper.get_headers(),
167
+ timeout=60,
168
+ )
169
+ if 200 <= _response.status_code < 300:
170
+ return pydantic.parse_obj_as(ChatApp, _response.json()) # type: ignore
171
+ if _response.status_code == 422:
172
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
173
+ try:
174
+ _response_json = _response.json()
175
+ except JSONDecodeError:
176
+ raise ApiError(status_code=_response.status_code, body=_response.text)
177
+ raise ApiError(status_code=_response.status_code, body=_response_json)
178
+
179
+ def delete_chat_app(self, id: str) -> typing.Any:
180
+ """
181
+ Parameters:
182
+ - id: str.
183
+ ---
184
+ from llama_cloud.client import LlamaCloud
185
+
186
+ client = LlamaCloud(
187
+ token="YOUR_TOKEN",
188
+ )
189
+ client.chat_apps.delete_chat_app(
190
+ id="string",
191
+ )
192
+ """
193
+ _response = self._client_wrapper.httpx_client.request(
194
+ "DELETE",
195
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/apps/{id}"),
196
+ headers=self._client_wrapper.get_headers(),
197
+ timeout=60,
198
+ )
199
+ if 200 <= _response.status_code < 300:
200
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
201
+ if _response.status_code == 422:
202
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
203
+ try:
204
+ _response_json = _response.json()
205
+ except JSONDecodeError:
206
+ raise ApiError(status_code=_response.status_code, body=_response.text)
207
+ raise ApiError(status_code=_response.status_code, body=_response_json)
208
+
209
+ def update_chat_app(
210
+ self,
211
+ id: str,
212
+ *,
213
+ project_id: typing.Optional[str] = None,
214
+ organization_id: typing.Optional[str] = None,
215
+ name: typing.Optional[str] = OMIT,
216
+ llm_config: typing.Optional[LlmParameters] = OMIT,
217
+ retrieval_config: typing.Optional[PresetRetrievalParams] = OMIT,
218
+ ) -> ChatApp:
219
+ """
220
+ Update a chat app.
221
+
222
+ Parameters:
223
+ - id: str.
224
+
225
+ - project_id: typing.Optional[str].
226
+
227
+ - organization_id: typing.Optional[str].
228
+
229
+ - name: typing.Optional[str].
230
+
231
+ - llm_config: typing.Optional[LlmParameters].
232
+
233
+ - retrieval_config: typing.Optional[PresetRetrievalParams].
234
+ ---
235
+ from llama_cloud import (
236
+ FilterCondition,
237
+ LlmParameters,
238
+ MetadataFilters,
239
+ PresetRetrievalParams,
240
+ RetrievalMode,
241
+ SupportedLlmModelNames,
242
+ )
243
+ from llama_cloud.client import LlamaCloud
244
+
245
+ client = LlamaCloud(
246
+ token="YOUR_TOKEN",
247
+ )
248
+ client.chat_apps.update_chat_app(
249
+ id="string",
250
+ llm_config=LlmParameters(
251
+ model_name=SupportedLlmModelNames.GPT_3_5_TURBO,
252
+ ),
253
+ retrieval_config=PresetRetrievalParams(
254
+ search_filters=MetadataFilters(
255
+ filters=[],
256
+ condition=FilterCondition.AND,
257
+ ),
258
+ retrieval_mode=RetrievalMode.CHUNKS,
259
+ ),
260
+ )
261
+ """
262
+ _request: typing.Dict[str, typing.Any] = {}
263
+ if name is not OMIT:
264
+ _request["name"] = name
265
+ if llm_config is not OMIT:
266
+ _request["llm_config"] = llm_config
267
+ if retrieval_config is not OMIT:
268
+ _request["retrieval_config"] = retrieval_config
269
+ _response = self._client_wrapper.httpx_client.request(
270
+ "PATCH",
271
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/apps/{id}"),
272
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
273
+ json=jsonable_encoder(_request),
274
+ headers=self._client_wrapper.get_headers(),
275
+ timeout=60,
276
+ )
277
+ if 200 <= _response.status_code < 300:
278
+ return pydantic.parse_obj_as(ChatApp, _response.json()) # type: ignore
279
+ if _response.status_code == 422:
280
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
281
+ try:
282
+ _response_json = _response.json()
283
+ except JSONDecodeError:
284
+ raise ApiError(status_code=_response.status_code, body=_response.text)
285
+ raise ApiError(status_code=_response.status_code, body=_response_json)
286
+
287
+ def chat_with_chat_app(self, id: str, *, messages: typing.Optional[typing.List[InputMessage]] = OMIT) -> typing.Any:
288
+ """
289
+ Chat with a chat app.
290
+
291
+ Parameters:
292
+ - id: str.
293
+
294
+ - messages: typing.Optional[typing.List[InputMessage]].
295
+ ---
296
+ from llama_cloud.client import LlamaCloud
297
+
298
+ client = LlamaCloud(
299
+ token="YOUR_TOKEN",
300
+ )
301
+ client.chat_apps.chat_with_chat_app(
302
+ id="string",
303
+ )
304
+ """
305
+ _request: typing.Dict[str, typing.Any] = {}
306
+ if messages is not OMIT:
307
+ _request["messages"] = messages
308
+ _response = self._client_wrapper.httpx_client.request(
309
+ "POST",
310
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/apps/{id}/chat"),
311
+ json=jsonable_encoder(_request),
312
+ headers=self._client_wrapper.get_headers(),
313
+ timeout=60,
314
+ )
315
+ if 200 <= _response.status_code < 300:
316
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
317
+ if _response.status_code == 422:
318
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
319
+ try:
320
+ _response_json = _response.json()
321
+ except JSONDecodeError:
322
+ raise ApiError(status_code=_response.status_code, body=_response.text)
323
+ raise ApiError(status_code=_response.status_code, body=_response_json)
324
+
325
+
326
+ class AsyncChatAppsClient:
327
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
328
+ self._client_wrapper = client_wrapper
329
+
330
+ async def get_chat_apps_api_v_1_apps_get(
331
+ self, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
332
+ ) -> typing.List[ChatAppResponse]:
333
+ """
334
+ Parameters:
335
+ - project_id: typing.Optional[str].
336
+
337
+ - organization_id: typing.Optional[str].
338
+ ---
339
+ from llama_cloud.client import AsyncLlamaCloud
340
+
341
+ client = AsyncLlamaCloud(
342
+ token="YOUR_TOKEN",
343
+ )
344
+ await client.chat_apps.get_chat_apps_api_v_1_apps_get()
345
+ """
346
+ _response = await self._client_wrapper.httpx_client.request(
347
+ "GET",
348
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/apps"),
349
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
350
+ headers=self._client_wrapper.get_headers(),
351
+ timeout=60,
352
+ )
353
+ if 200 <= _response.status_code < 300:
354
+ return pydantic.parse_obj_as(typing.List[ChatAppResponse], _response.json()) # type: ignore
355
+ if _response.status_code == 422:
356
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
357
+ try:
358
+ _response_json = _response.json()
359
+ except JSONDecodeError:
360
+ raise ApiError(status_code=_response.status_code, body=_response.text)
361
+ raise ApiError(status_code=_response.status_code, body=_response_json)
362
+
363
+ async def create_chat_app_api_v_1_apps_post(
364
+ self,
365
+ *,
366
+ project_id: typing.Optional[str] = None,
367
+ organization_id: typing.Optional[str] = None,
368
+ pipeline_id: str,
369
+ llm_config: LlmParameters,
370
+ retrieval_config: PresetRetrievalParams,
371
+ chat_app_create_project_id: str,
372
+ ) -> ChatApp:
373
+ """
374
+ Create a new chat app.
375
+
376
+ Parameters:
377
+ - project_id: typing.Optional[str].
378
+
379
+ - organization_id: typing.Optional[str].
380
+
381
+ - pipeline_id: str.
382
+
383
+ - llm_config: LlmParameters.
384
+
385
+ - retrieval_config: PresetRetrievalParams.
386
+
387
+ - chat_app_create_project_id: str.
388
+ ---
389
+ from llama_cloud import (
390
+ FilterCondition,
391
+ LlmParameters,
392
+ MetadataFilters,
393
+ PresetRetrievalParams,
394
+ RetrievalMode,
395
+ SupportedLlmModelNames,
396
+ )
397
+ from llama_cloud.client import AsyncLlamaCloud
398
+
399
+ client = AsyncLlamaCloud(
400
+ token="YOUR_TOKEN",
401
+ )
402
+ await client.chat_apps.create_chat_app_api_v_1_apps_post(
403
+ pipeline_id="string",
404
+ llm_config=LlmParameters(
405
+ model_name=SupportedLlmModelNames.GPT_3_5_TURBO,
406
+ ),
407
+ retrieval_config=PresetRetrievalParams(
408
+ search_filters=MetadataFilters(
409
+ filters=[],
410
+ condition=FilterCondition.AND,
411
+ ),
412
+ retrieval_mode=RetrievalMode.CHUNKS,
413
+ ),
414
+ chat_app_create_project_id="string",
415
+ )
416
+ """
417
+ _response = await self._client_wrapper.httpx_client.request(
418
+ "POST",
419
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/apps"),
420
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
421
+ json=jsonable_encoder(
422
+ {
423
+ "pipeline_id": pipeline_id,
424
+ "llm_config": llm_config,
425
+ "retrieval_config": retrieval_config,
426
+ "project_id": chat_app_create_project_id,
427
+ }
428
+ ),
429
+ headers=self._client_wrapper.get_headers(),
430
+ timeout=60,
431
+ )
432
+ if 200 <= _response.status_code < 300:
433
+ return pydantic.parse_obj_as(ChatApp, _response.json()) # type: ignore
434
+ if _response.status_code == 422:
435
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
436
+ try:
437
+ _response_json = _response.json()
438
+ except JSONDecodeError:
439
+ raise ApiError(status_code=_response.status_code, body=_response.text)
440
+ raise ApiError(status_code=_response.status_code, body=_response_json)
441
+
442
+ async def get_chat_app(self, id: str) -> ChatApp:
443
+ """
444
+ Get a chat app by ID.
445
+
446
+ Parameters:
447
+ - id: str.
448
+ ---
449
+ from llama_cloud.client import AsyncLlamaCloud
450
+
451
+ client = AsyncLlamaCloud(
452
+ token="YOUR_TOKEN",
453
+ )
454
+ await client.chat_apps.get_chat_app(
455
+ id="string",
456
+ )
457
+ """
458
+ _response = await self._client_wrapper.httpx_client.request(
459
+ "GET",
460
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/apps/{id}"),
461
+ headers=self._client_wrapper.get_headers(),
462
+ timeout=60,
463
+ )
464
+ if 200 <= _response.status_code < 300:
465
+ return pydantic.parse_obj_as(ChatApp, _response.json()) # type: ignore
466
+ if _response.status_code == 422:
467
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
468
+ try:
469
+ _response_json = _response.json()
470
+ except JSONDecodeError:
471
+ raise ApiError(status_code=_response.status_code, body=_response.text)
472
+ raise ApiError(status_code=_response.status_code, body=_response_json)
473
+
474
+ async def delete_chat_app(self, id: str) -> typing.Any:
475
+ """
476
+ Parameters:
477
+ - id: str.
478
+ ---
479
+ from llama_cloud.client import AsyncLlamaCloud
480
+
481
+ client = AsyncLlamaCloud(
482
+ token="YOUR_TOKEN",
483
+ )
484
+ await client.chat_apps.delete_chat_app(
485
+ id="string",
486
+ )
487
+ """
488
+ _response = await self._client_wrapper.httpx_client.request(
489
+ "DELETE",
490
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/apps/{id}"),
491
+ headers=self._client_wrapper.get_headers(),
492
+ timeout=60,
493
+ )
494
+ if 200 <= _response.status_code < 300:
495
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
496
+ if _response.status_code == 422:
497
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
498
+ try:
499
+ _response_json = _response.json()
500
+ except JSONDecodeError:
501
+ raise ApiError(status_code=_response.status_code, body=_response.text)
502
+ raise ApiError(status_code=_response.status_code, body=_response_json)
503
+
504
+ async def update_chat_app(
505
+ self,
506
+ id: str,
507
+ *,
508
+ project_id: typing.Optional[str] = None,
509
+ organization_id: typing.Optional[str] = None,
510
+ name: typing.Optional[str] = OMIT,
511
+ llm_config: typing.Optional[LlmParameters] = OMIT,
512
+ retrieval_config: typing.Optional[PresetRetrievalParams] = OMIT,
513
+ ) -> ChatApp:
514
+ """
515
+ Update a chat app.
516
+
517
+ Parameters:
518
+ - id: str.
519
+
520
+ - project_id: typing.Optional[str].
521
+
522
+ - organization_id: typing.Optional[str].
523
+
524
+ - name: typing.Optional[str].
525
+
526
+ - llm_config: typing.Optional[LlmParameters].
527
+
528
+ - retrieval_config: typing.Optional[PresetRetrievalParams].
529
+ ---
530
+ from llama_cloud import (
531
+ FilterCondition,
532
+ LlmParameters,
533
+ MetadataFilters,
534
+ PresetRetrievalParams,
535
+ RetrievalMode,
536
+ SupportedLlmModelNames,
537
+ )
538
+ from llama_cloud.client import AsyncLlamaCloud
539
+
540
+ client = AsyncLlamaCloud(
541
+ token="YOUR_TOKEN",
542
+ )
543
+ await client.chat_apps.update_chat_app(
544
+ id="string",
545
+ llm_config=LlmParameters(
546
+ model_name=SupportedLlmModelNames.GPT_3_5_TURBO,
547
+ ),
548
+ retrieval_config=PresetRetrievalParams(
549
+ search_filters=MetadataFilters(
550
+ filters=[],
551
+ condition=FilterCondition.AND,
552
+ ),
553
+ retrieval_mode=RetrievalMode.CHUNKS,
554
+ ),
555
+ )
556
+ """
557
+ _request: typing.Dict[str, typing.Any] = {}
558
+ if name is not OMIT:
559
+ _request["name"] = name
560
+ if llm_config is not OMIT:
561
+ _request["llm_config"] = llm_config
562
+ if retrieval_config is not OMIT:
563
+ _request["retrieval_config"] = retrieval_config
564
+ _response = await self._client_wrapper.httpx_client.request(
565
+ "PATCH",
566
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/apps/{id}"),
567
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
568
+ json=jsonable_encoder(_request),
569
+ headers=self._client_wrapper.get_headers(),
570
+ timeout=60,
571
+ )
572
+ if 200 <= _response.status_code < 300:
573
+ return pydantic.parse_obj_as(ChatApp, _response.json()) # type: ignore
574
+ if _response.status_code == 422:
575
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
576
+ try:
577
+ _response_json = _response.json()
578
+ except JSONDecodeError:
579
+ raise ApiError(status_code=_response.status_code, body=_response.text)
580
+ raise ApiError(status_code=_response.status_code, body=_response_json)
581
+
582
+ async def chat_with_chat_app(
583
+ self, id: str, *, messages: typing.Optional[typing.List[InputMessage]] = OMIT
584
+ ) -> typing.Any:
585
+ """
586
+ Chat with a chat app.
587
+
588
+ Parameters:
589
+ - id: str.
590
+
591
+ - messages: typing.Optional[typing.List[InputMessage]].
592
+ ---
593
+ from llama_cloud.client import AsyncLlamaCloud
594
+
595
+ client = AsyncLlamaCloud(
596
+ token="YOUR_TOKEN",
597
+ )
598
+ await client.chat_apps.chat_with_chat_app(
599
+ id="string",
600
+ )
601
+ """
602
+ _request: typing.Dict[str, typing.Any] = {}
603
+ if messages is not OMIT:
604
+ _request["messages"] = messages
605
+ _response = await self._client_wrapper.httpx_client.request(
606
+ "POST",
607
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/apps/{id}/chat"),
608
+ json=jsonable_encoder(_request),
609
+ headers=self._client_wrapper.get_headers(),
610
+ timeout=60,
611
+ )
612
+ if 200 <= _response.status_code < 300:
613
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
614
+ if _response.status_code == 422:
615
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
616
+ try:
617
+ _response_json = _response.json()
618
+ except JSONDecodeError:
619
+ raise ApiError(status_code=_response.status_code, body=_response.text)
620
+ raise ApiError(status_code=_response.status_code, body=_response_json)
@@ -213,7 +213,7 @@ class DataSinksClient:
213
213
 
214
214
  - sink_type: ConfigurableDataSinkNames.
215
215
 
216
- - component: typing.Optional[DataSinkUpdateComponent].
216
+ - component: typing.Optional[DataSinkUpdateComponent]. Component that implements the data sink
217
217
  ---
218
218
  from llama_cloud import ConfigurableDataSinkNames
219
219
  from llama_cloud.client import LlamaCloud
@@ -467,7 +467,7 @@ class AsyncDataSinksClient:
467
467
 
468
468
  - sink_type: ConfigurableDataSinkNames.
469
469
 
470
- - component: typing.Optional[DataSinkUpdateComponent].
470
+ - component: typing.Optional[DataSinkUpdateComponent]. Component that implements the data sink
471
471
  ---
472
472
  from llama_cloud import ConfigurableDataSinkNames
473
473
  from llama_cloud.client import AsyncLlamaCloud
@@ -218,7 +218,7 @@ class DataSourcesClient:
218
218
 
219
219
  - custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[DataSourceUpdateCustomMetadataValue]]].
220
220
 
221
- - component: typing.Optional[DataSourceUpdateComponent].
221
+ - component: typing.Optional[DataSourceUpdateComponent]. Component that implements the data source
222
222
  ---
223
223
  from llama_cloud import ConfigurableDataSourceNames
224
224
  from llama_cloud.client import LlamaCloud
@@ -478,7 +478,7 @@ class AsyncDataSourcesClient:
478
478
 
479
479
  - custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[DataSourceUpdateCustomMetadataValue]]].
480
480
 
481
- - component: typing.Optional[DataSourceUpdateComponent].
481
+ - component: typing.Optional[DataSourceUpdateComponent]. Component that implements the data source
482
482
  ---
483
483
  from llama_cloud import ConfigurableDataSourceNames
484
484
  from llama_cloud.client import AsyncLlamaCloud
@@ -63,7 +63,7 @@ class EmbeddingModelConfigsClient:
63
63
 
64
64
  def create_embedding_model_config(
65
65
  self, *, project_id: str, name: str, embedding_config: EmbeddingModelConfigCreateEmbeddingConfig
66
- ) -> typing.Any:
66
+ ) -> EmbeddingModelConfig:
67
67
  """
68
68
  Create a new embedding model configuration within a specified project.
69
69
 
@@ -83,7 +83,7 @@ class EmbeddingModelConfigsClient:
83
83
  timeout=60,
84
84
  )
85
85
  if 200 <= _response.status_code < 300:
86
- return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
86
+ return pydantic.parse_obj_as(EmbeddingModelConfig, _response.json()) # type: ignore
87
87
  if _response.status_code == 422:
88
88
  raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
89
89
  try:
@@ -230,7 +230,7 @@ class AsyncEmbeddingModelConfigsClient:
230
230
 
231
231
  async def create_embedding_model_config(
232
232
  self, *, project_id: str, name: str, embedding_config: EmbeddingModelConfigCreateEmbeddingConfig
233
- ) -> typing.Any:
233
+ ) -> EmbeddingModelConfig:
234
234
  """
235
235
  Create a new embedding model configuration within a specified project.
236
236
 
@@ -250,7 +250,7 @@ class AsyncEmbeddingModelConfigsClient:
250
250
  timeout=60,
251
251
  )
252
252
  if 200 <= _response.status_code < 300:
253
- return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
253
+ return pydantic.parse_obj_as(EmbeddingModelConfig, _response.json()) # type: ignore
254
254
  if _response.status_code == 422:
255
255
  raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
256
256
  try: