llama-cloud 0.1.5__py3-none-any.whl → 0.1.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (129) hide show
  1. llama_cloud/__init__.py +138 -2
  2. llama_cloud/client.py +15 -0
  3. llama_cloud/resources/__init__.py +17 -1
  4. llama_cloud/resources/chat_apps/__init__.py +2 -0
  5. llama_cloud/resources/chat_apps/client.py +630 -0
  6. llama_cloud/resources/data_sinks/client.py +2 -2
  7. llama_cloud/resources/data_sources/client.py +2 -2
  8. llama_cloud/resources/embedding_model_configs/client.py +4 -4
  9. llama_cloud/resources/files/__init__.py +2 -2
  10. llama_cloud/resources/files/client.py +21 -0
  11. llama_cloud/resources/files/types/__init__.py +2 -1
  12. llama_cloud/resources/files/types/file_create_permission_info_value.py +7 -0
  13. llama_cloud/resources/jobs/__init__.py +2 -0
  14. llama_cloud/resources/jobs/client.py +148 -0
  15. llama_cloud/resources/llama_extract/__init__.py +5 -0
  16. llama_cloud/resources/llama_extract/client.py +1038 -0
  17. llama_cloud/resources/llama_extract/types/__init__.py +6 -0
  18. llama_cloud/resources/llama_extract/types/extract_agent_create_data_schema_value.py +7 -0
  19. llama_cloud/resources/llama_extract/types/extract_agent_update_data_schema_value.py +7 -0
  20. llama_cloud/resources/organizations/client.py +14 -14
  21. llama_cloud/resources/parsing/client.py +480 -229
  22. llama_cloud/resources/pipelines/client.py +182 -126
  23. llama_cloud/resources/projects/client.py +210 -102
  24. llama_cloud/resources/reports/__init__.py +5 -0
  25. llama_cloud/resources/reports/client.py +1198 -0
  26. llama_cloud/resources/reports/types/__init__.py +7 -0
  27. llama_cloud/resources/reports/types/update_report_plan_api_v_1_reports_report_id_plan_patch_request_action.py +25 -0
  28. llama_cloud/resources/retrievers/__init__.py +2 -0
  29. llama_cloud/resources/retrievers/client.py +654 -0
  30. llama_cloud/types/__init__.py +124 -2
  31. llama_cloud/types/{chat_message.py → app_schema_chat_chat_message.py} +2 -2
  32. llama_cloud/types/chat_app.py +44 -0
  33. llama_cloud/types/chat_app_response.py +41 -0
  34. llama_cloud/types/cloud_az_storage_blob_data_source.py +1 -0
  35. llama_cloud/types/cloud_box_data_source.py +1 -0
  36. llama_cloud/types/cloud_confluence_data_source.py +1 -0
  37. llama_cloud/types/cloud_google_drive_data_source.py +1 -0
  38. llama_cloud/types/cloud_jira_data_source.py +1 -0
  39. llama_cloud/types/cloud_notion_page_data_source.py +1 -0
  40. llama_cloud/types/cloud_one_drive_data_source.py +1 -0
  41. llama_cloud/types/cloud_postgres_vector_store.py +1 -0
  42. llama_cloud/types/cloud_s_3_data_source.py +1 -0
  43. llama_cloud/types/cloud_sharepoint_data_source.py +1 -0
  44. llama_cloud/types/cloud_slack_data_source.py +1 -0
  45. llama_cloud/types/composite_retrieval_mode.py +21 -0
  46. llama_cloud/types/composite_retrieval_result.py +38 -0
  47. llama_cloud/types/composite_retrieved_text_node.py +42 -0
  48. llama_cloud/types/data_sink.py +1 -1
  49. llama_cloud/types/data_sink_create.py +1 -1
  50. llama_cloud/types/data_source.py +1 -1
  51. llama_cloud/types/data_source_create.py +1 -1
  52. llama_cloud/types/edit_suggestion.py +39 -0
  53. llama_cloud/types/eval_dataset_job_record.py +1 -0
  54. llama_cloud/types/extract_agent.py +45 -0
  55. llama_cloud/types/extract_agent_data_schema_value.py +5 -0
  56. llama_cloud/types/extract_config.py +40 -0
  57. llama_cloud/types/extract_job.py +35 -0
  58. llama_cloud/types/extract_job_create.py +40 -0
  59. llama_cloud/types/extract_job_create_data_schema_override_value.py +7 -0
  60. llama_cloud/types/extract_mode.py +17 -0
  61. llama_cloud/types/extract_resultset.py +46 -0
  62. llama_cloud/types/extract_resultset_data.py +11 -0
  63. llama_cloud/types/extract_resultset_data_item_value.py +7 -0
  64. llama_cloud/types/extract_resultset_data_zero_value.py +7 -0
  65. llama_cloud/types/extract_resultset_extraction_metadata_value.py +7 -0
  66. llama_cloud/types/file.py +3 -0
  67. llama_cloud/types/file_permission_info_value.py +5 -0
  68. llama_cloud/types/filter_condition.py +9 -1
  69. llama_cloud/types/filter_operator.py +4 -0
  70. llama_cloud/types/image_block.py +35 -0
  71. llama_cloud/types/input_message.py +1 -1
  72. llama_cloud/types/job_name_mapping.py +4 -0
  73. llama_cloud/types/job_names.py +89 -0
  74. llama_cloud/types/job_record.py +57 -0
  75. llama_cloud/types/job_record_with_usage_metrics.py +36 -0
  76. llama_cloud/types/llama_index_core_base_llms_types_chat_message.py +39 -0
  77. llama_cloud/types/llama_index_core_base_llms_types_chat_message_blocks_item.py +33 -0
  78. llama_cloud/types/llama_parse_parameters.py +15 -0
  79. llama_cloud/types/llm.py +1 -0
  80. llama_cloud/types/llm_model_data.py +1 -0
  81. llama_cloud/types/llm_parameters.py +1 -0
  82. llama_cloud/types/managed_ingestion_status.py +4 -0
  83. llama_cloud/types/managed_ingestion_status_response.py +1 -0
  84. llama_cloud/types/object_type.py +4 -0
  85. llama_cloud/types/organization.py +5 -0
  86. llama_cloud/types/paginated_jobs_history_with_metrics.py +35 -0
  87. llama_cloud/types/paginated_report_response.py +35 -0
  88. llama_cloud/types/parse_plan_level.py +21 -0
  89. llama_cloud/types/parsing_job_structured_result.py +32 -0
  90. llama_cloud/types/pipeline_create.py +3 -1
  91. llama_cloud/types/pipeline_data_source.py +1 -1
  92. llama_cloud/types/pipeline_file.py +3 -0
  93. llama_cloud/types/pipeline_file_permission_info_value.py +7 -0
  94. llama_cloud/types/playground_session.py +2 -2
  95. llama_cloud/types/preset_retrieval_params.py +1 -0
  96. llama_cloud/types/progress_event.py +44 -0
  97. llama_cloud/types/progress_event_status.py +33 -0
  98. llama_cloud/types/prompt_spec.py +2 -2
  99. llama_cloud/types/related_node_info.py +2 -2
  100. llama_cloud/types/related_node_info_node_type.py +7 -0
  101. llama_cloud/types/report.py +33 -0
  102. llama_cloud/types/report_block.py +34 -0
  103. llama_cloud/types/report_block_dependency.py +29 -0
  104. llama_cloud/types/report_create_response.py +31 -0
  105. llama_cloud/types/report_event_item.py +40 -0
  106. llama_cloud/types/report_event_item_event_data.py +45 -0
  107. llama_cloud/types/report_event_type.py +37 -0
  108. llama_cloud/types/report_metadata.py +43 -0
  109. llama_cloud/types/report_plan.py +36 -0
  110. llama_cloud/types/report_plan_block.py +36 -0
  111. llama_cloud/types/report_query.py +33 -0
  112. llama_cloud/types/report_response.py +41 -0
  113. llama_cloud/types/report_state.py +37 -0
  114. llama_cloud/types/report_state_event.py +38 -0
  115. llama_cloud/types/report_update_event.py +38 -0
  116. llama_cloud/types/retrieve_results.py +1 -1
  117. llama_cloud/types/retriever.py +45 -0
  118. llama_cloud/types/retriever_create.py +37 -0
  119. llama_cloud/types/retriever_pipeline.py +37 -0
  120. llama_cloud/types/status_enum.py +4 -0
  121. llama_cloud/types/supported_llm_model_names.py +4 -0
  122. llama_cloud/types/text_block.py +31 -0
  123. llama_cloud/types/text_node.py +13 -6
  124. llama_cloud/types/usage_metric_response.py +34 -0
  125. llama_cloud/types/user_job_record.py +32 -0
  126. {llama_cloud-0.1.5.dist-info → llama_cloud-0.1.7.dist-info}/METADATA +3 -1
  127. {llama_cloud-0.1.5.dist-info → llama_cloud-0.1.7.dist-info}/RECORD +129 -59
  128. {llama_cloud-0.1.5.dist-info → llama_cloud-0.1.7.dist-info}/WHEEL +1 -1
  129. {llama_cloud-0.1.5.dist-info → llama_cloud-0.1.7.dist-info}/LICENSE +0 -0
@@ -0,0 +1,630 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+ import urllib.parse
5
+ from json.decoder import JSONDecodeError
6
+
7
+ from ...core.api_error import ApiError
8
+ from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
9
+ from ...core.jsonable_encoder import jsonable_encoder
10
+ from ...core.remove_none_from_dict import remove_none_from_dict
11
+ from ...errors.unprocessable_entity_error import UnprocessableEntityError
12
+ from ...types.chat_app import ChatApp
13
+ from ...types.chat_app_response import ChatAppResponse
14
+ from ...types.http_validation_error import HttpValidationError
15
+ from ...types.input_message import InputMessage
16
+ from ...types.llm_parameters import LlmParameters
17
+ from ...types.preset_retrieval_params import PresetRetrievalParams
18
+
19
+ try:
20
+ import pydantic
21
+ if pydantic.__version__.startswith("1."):
22
+ raise ImportError
23
+ import pydantic.v1 as pydantic # type: ignore
24
+ except ImportError:
25
+ import pydantic # type: ignore
26
+
27
+ # this is used as the default value for optional parameters
28
+ OMIT = typing.cast(typing.Any, ...)
29
+
30
+
31
+ class ChatAppsClient:
32
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
33
+ self._client_wrapper = client_wrapper
34
+
35
+ def get_chat_apps_api_v_1_apps_get(
36
+ self, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
37
+ ) -> typing.List[ChatAppResponse]:
38
+ """
39
+ Parameters:
40
+ - project_id: typing.Optional[str].
41
+
42
+ - organization_id: typing.Optional[str].
43
+ ---
44
+ from llama_cloud.client import LlamaCloud
45
+
46
+ client = LlamaCloud(
47
+ token="YOUR_TOKEN",
48
+ )
49
+ client.chat_apps.get_chat_apps_api_v_1_apps_get()
50
+ """
51
+ _response = self._client_wrapper.httpx_client.request(
52
+ "GET",
53
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/apps"),
54
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
55
+ headers=self._client_wrapper.get_headers(),
56
+ timeout=60,
57
+ )
58
+ if 200 <= _response.status_code < 300:
59
+ return pydantic.parse_obj_as(typing.List[ChatAppResponse], _response.json()) # type: ignore
60
+ if _response.status_code == 422:
61
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
62
+ try:
63
+ _response_json = _response.json()
64
+ except JSONDecodeError:
65
+ raise ApiError(status_code=_response.status_code, body=_response.text)
66
+ raise ApiError(status_code=_response.status_code, body=_response_json)
67
+
68
+ def create_chat_app_api_v_1_apps_post(
69
+ self,
70
+ *,
71
+ project_id: typing.Optional[str] = None,
72
+ organization_id: typing.Optional[str] = None,
73
+ name: str,
74
+ pipeline_id: str,
75
+ llm_config: LlmParameters,
76
+ retrieval_config: PresetRetrievalParams,
77
+ chat_app_create_project_id: str,
78
+ ) -> ChatApp:
79
+ """
80
+ Create a new chat app.
81
+
82
+ Parameters:
83
+ - project_id: typing.Optional[str].
84
+
85
+ - organization_id: typing.Optional[str].
86
+
87
+ - name: str.
88
+
89
+ - pipeline_id: str.
90
+
91
+ - llm_config: LlmParameters.
92
+
93
+ - retrieval_config: PresetRetrievalParams.
94
+
95
+ - chat_app_create_project_id: str.
96
+ ---
97
+ from llama_cloud import (
98
+ FilterCondition,
99
+ LlmParameters,
100
+ MetadataFilters,
101
+ PresetRetrievalParams,
102
+ RetrievalMode,
103
+ SupportedLlmModelNames,
104
+ )
105
+ from llama_cloud.client import LlamaCloud
106
+
107
+ client = LlamaCloud(
108
+ token="YOUR_TOKEN",
109
+ )
110
+ client.chat_apps.create_chat_app_api_v_1_apps_post(
111
+ name="string",
112
+ pipeline_id="string",
113
+ llm_config=LlmParameters(
114
+ model_name=SupportedLlmModelNames.GPT_3_5_TURBO,
115
+ ),
116
+ retrieval_config=PresetRetrievalParams(
117
+ search_filters=MetadataFilters(
118
+ filters=[],
119
+ condition=FilterCondition.AND,
120
+ ),
121
+ retrieval_mode=RetrievalMode.CHUNKS,
122
+ ),
123
+ chat_app_create_project_id="string",
124
+ )
125
+ """
126
+ _response = self._client_wrapper.httpx_client.request(
127
+ "POST",
128
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/apps"),
129
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
130
+ json=jsonable_encoder(
131
+ {
132
+ "name": name,
133
+ "pipeline_id": pipeline_id,
134
+ "llm_config": llm_config,
135
+ "retrieval_config": retrieval_config,
136
+ "project_id": chat_app_create_project_id,
137
+ }
138
+ ),
139
+ headers=self._client_wrapper.get_headers(),
140
+ timeout=60,
141
+ )
142
+ if 200 <= _response.status_code < 300:
143
+ return pydantic.parse_obj_as(ChatApp, _response.json()) # type: ignore
144
+ if _response.status_code == 422:
145
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
146
+ try:
147
+ _response_json = _response.json()
148
+ except JSONDecodeError:
149
+ raise ApiError(status_code=_response.status_code, body=_response.text)
150
+ raise ApiError(status_code=_response.status_code, body=_response_json)
151
+
152
+ def get_chat_app(self, id: str) -> ChatApp:
153
+ """
154
+ Get a chat app by ID.
155
+
156
+ Parameters:
157
+ - id: str.
158
+ ---
159
+ from llama_cloud.client import LlamaCloud
160
+
161
+ client = LlamaCloud(
162
+ token="YOUR_TOKEN",
163
+ )
164
+ client.chat_apps.get_chat_app(
165
+ id="string",
166
+ )
167
+ """
168
+ _response = self._client_wrapper.httpx_client.request(
169
+ "GET",
170
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/apps/{id}"),
171
+ headers=self._client_wrapper.get_headers(),
172
+ timeout=60,
173
+ )
174
+ if 200 <= _response.status_code < 300:
175
+ return pydantic.parse_obj_as(ChatApp, _response.json()) # type: ignore
176
+ if _response.status_code == 422:
177
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
178
+ try:
179
+ _response_json = _response.json()
180
+ except JSONDecodeError:
181
+ raise ApiError(status_code=_response.status_code, body=_response.text)
182
+ raise ApiError(status_code=_response.status_code, body=_response_json)
183
+
184
+ def delete_chat_app(self, id: str) -> typing.Any:
185
+ """
186
+ Parameters:
187
+ - id: str.
188
+ ---
189
+ from llama_cloud.client import LlamaCloud
190
+
191
+ client = LlamaCloud(
192
+ token="YOUR_TOKEN",
193
+ )
194
+ client.chat_apps.delete_chat_app(
195
+ id="string",
196
+ )
197
+ """
198
+ _response = self._client_wrapper.httpx_client.request(
199
+ "DELETE",
200
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/apps/{id}"),
201
+ headers=self._client_wrapper.get_headers(),
202
+ timeout=60,
203
+ )
204
+ if 200 <= _response.status_code < 300:
205
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
206
+ if _response.status_code == 422:
207
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
208
+ try:
209
+ _response_json = _response.json()
210
+ except JSONDecodeError:
211
+ raise ApiError(status_code=_response.status_code, body=_response.text)
212
+ raise ApiError(status_code=_response.status_code, body=_response_json)
213
+
214
+ def update_chat_app(
215
+ self,
216
+ id: str,
217
+ *,
218
+ project_id: typing.Optional[str] = None,
219
+ organization_id: typing.Optional[str] = None,
220
+ name: typing.Optional[str] = OMIT,
221
+ llm_config: typing.Optional[LlmParameters] = OMIT,
222
+ retrieval_config: typing.Optional[PresetRetrievalParams] = OMIT,
223
+ ) -> ChatApp:
224
+ """
225
+ Update a chat app.
226
+
227
+ Parameters:
228
+ - id: str.
229
+
230
+ - project_id: typing.Optional[str].
231
+
232
+ - organization_id: typing.Optional[str].
233
+
234
+ - name: typing.Optional[str].
235
+
236
+ - llm_config: typing.Optional[LlmParameters].
237
+
238
+ - retrieval_config: typing.Optional[PresetRetrievalParams].
239
+ ---
240
+ from llama_cloud import (
241
+ FilterCondition,
242
+ LlmParameters,
243
+ MetadataFilters,
244
+ PresetRetrievalParams,
245
+ RetrievalMode,
246
+ SupportedLlmModelNames,
247
+ )
248
+ from llama_cloud.client import LlamaCloud
249
+
250
+ client = LlamaCloud(
251
+ token="YOUR_TOKEN",
252
+ )
253
+ client.chat_apps.update_chat_app(
254
+ id="string",
255
+ llm_config=LlmParameters(
256
+ model_name=SupportedLlmModelNames.GPT_3_5_TURBO,
257
+ ),
258
+ retrieval_config=PresetRetrievalParams(
259
+ search_filters=MetadataFilters(
260
+ filters=[],
261
+ condition=FilterCondition.AND,
262
+ ),
263
+ retrieval_mode=RetrievalMode.CHUNKS,
264
+ ),
265
+ )
266
+ """
267
+ _request: typing.Dict[str, typing.Any] = {}
268
+ if name is not OMIT:
269
+ _request["name"] = name
270
+ if llm_config is not OMIT:
271
+ _request["llm_config"] = llm_config
272
+ if retrieval_config is not OMIT:
273
+ _request["retrieval_config"] = retrieval_config
274
+ _response = self._client_wrapper.httpx_client.request(
275
+ "PATCH",
276
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/apps/{id}"),
277
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
278
+ json=jsonable_encoder(_request),
279
+ headers=self._client_wrapper.get_headers(),
280
+ timeout=60,
281
+ )
282
+ if 200 <= _response.status_code < 300:
283
+ return pydantic.parse_obj_as(ChatApp, _response.json()) # type: ignore
284
+ if _response.status_code == 422:
285
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
286
+ try:
287
+ _response_json = _response.json()
288
+ except JSONDecodeError:
289
+ raise ApiError(status_code=_response.status_code, body=_response.text)
290
+ raise ApiError(status_code=_response.status_code, body=_response_json)
291
+
292
+ def chat_with_chat_app(self, id: str, *, messages: typing.Optional[typing.List[InputMessage]] = OMIT) -> typing.Any:
293
+ """
294
+ Chat with a chat app.
295
+
296
+ Parameters:
297
+ - id: str.
298
+
299
+ - messages: typing.Optional[typing.List[InputMessage]].
300
+ ---
301
+ from llama_cloud.client import LlamaCloud
302
+
303
+ client = LlamaCloud(
304
+ token="YOUR_TOKEN",
305
+ )
306
+ client.chat_apps.chat_with_chat_app(
307
+ id="string",
308
+ )
309
+ """
310
+ _request: typing.Dict[str, typing.Any] = {}
311
+ if messages is not OMIT:
312
+ _request["messages"] = messages
313
+ _response = self._client_wrapper.httpx_client.request(
314
+ "POST",
315
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/apps/{id}/chat"),
316
+ json=jsonable_encoder(_request),
317
+ headers=self._client_wrapper.get_headers(),
318
+ timeout=60,
319
+ )
320
+ if 200 <= _response.status_code < 300:
321
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
322
+ if _response.status_code == 422:
323
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
324
+ try:
325
+ _response_json = _response.json()
326
+ except JSONDecodeError:
327
+ raise ApiError(status_code=_response.status_code, body=_response.text)
328
+ raise ApiError(status_code=_response.status_code, body=_response_json)
329
+
330
+
331
+ class AsyncChatAppsClient:
332
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
333
+ self._client_wrapper = client_wrapper
334
+
335
+ async def get_chat_apps_api_v_1_apps_get(
336
+ self, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
337
+ ) -> typing.List[ChatAppResponse]:
338
+ """
339
+ Parameters:
340
+ - project_id: typing.Optional[str].
341
+
342
+ - organization_id: typing.Optional[str].
343
+ ---
344
+ from llama_cloud.client import AsyncLlamaCloud
345
+
346
+ client = AsyncLlamaCloud(
347
+ token="YOUR_TOKEN",
348
+ )
349
+ await client.chat_apps.get_chat_apps_api_v_1_apps_get()
350
+ """
351
+ _response = await self._client_wrapper.httpx_client.request(
352
+ "GET",
353
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/apps"),
354
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
355
+ headers=self._client_wrapper.get_headers(),
356
+ timeout=60,
357
+ )
358
+ if 200 <= _response.status_code < 300:
359
+ return pydantic.parse_obj_as(typing.List[ChatAppResponse], _response.json()) # type: ignore
360
+ if _response.status_code == 422:
361
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
362
+ try:
363
+ _response_json = _response.json()
364
+ except JSONDecodeError:
365
+ raise ApiError(status_code=_response.status_code, body=_response.text)
366
+ raise ApiError(status_code=_response.status_code, body=_response_json)
367
+
368
+ async def create_chat_app_api_v_1_apps_post(
369
+ self,
370
+ *,
371
+ project_id: typing.Optional[str] = None,
372
+ organization_id: typing.Optional[str] = None,
373
+ name: str,
374
+ pipeline_id: str,
375
+ llm_config: LlmParameters,
376
+ retrieval_config: PresetRetrievalParams,
377
+ chat_app_create_project_id: str,
378
+ ) -> ChatApp:
379
+ """
380
+ Create a new chat app.
381
+
382
+ Parameters:
383
+ - project_id: typing.Optional[str].
384
+
385
+ - organization_id: typing.Optional[str].
386
+
387
+ - name: str.
388
+
389
+ - pipeline_id: str.
390
+
391
+ - llm_config: LlmParameters.
392
+
393
+ - retrieval_config: PresetRetrievalParams.
394
+
395
+ - chat_app_create_project_id: str.
396
+ ---
397
+ from llama_cloud import (
398
+ FilterCondition,
399
+ LlmParameters,
400
+ MetadataFilters,
401
+ PresetRetrievalParams,
402
+ RetrievalMode,
403
+ SupportedLlmModelNames,
404
+ )
405
+ from llama_cloud.client import AsyncLlamaCloud
406
+
407
+ client = AsyncLlamaCloud(
408
+ token="YOUR_TOKEN",
409
+ )
410
+ await client.chat_apps.create_chat_app_api_v_1_apps_post(
411
+ name="string",
412
+ pipeline_id="string",
413
+ llm_config=LlmParameters(
414
+ model_name=SupportedLlmModelNames.GPT_3_5_TURBO,
415
+ ),
416
+ retrieval_config=PresetRetrievalParams(
417
+ search_filters=MetadataFilters(
418
+ filters=[],
419
+ condition=FilterCondition.AND,
420
+ ),
421
+ retrieval_mode=RetrievalMode.CHUNKS,
422
+ ),
423
+ chat_app_create_project_id="string",
424
+ )
425
+ """
426
+ _response = await self._client_wrapper.httpx_client.request(
427
+ "POST",
428
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/apps"),
429
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
430
+ json=jsonable_encoder(
431
+ {
432
+ "name": name,
433
+ "pipeline_id": pipeline_id,
434
+ "llm_config": llm_config,
435
+ "retrieval_config": retrieval_config,
436
+ "project_id": chat_app_create_project_id,
437
+ }
438
+ ),
439
+ headers=self._client_wrapper.get_headers(),
440
+ timeout=60,
441
+ )
442
+ if 200 <= _response.status_code < 300:
443
+ return pydantic.parse_obj_as(ChatApp, _response.json()) # type: ignore
444
+ if _response.status_code == 422:
445
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
446
+ try:
447
+ _response_json = _response.json()
448
+ except JSONDecodeError:
449
+ raise ApiError(status_code=_response.status_code, body=_response.text)
450
+ raise ApiError(status_code=_response.status_code, body=_response_json)
451
+
452
+ async def get_chat_app(self, id: str) -> ChatApp:
453
+ """
454
+ Get a chat app by ID.
455
+
456
+ Parameters:
457
+ - id: str.
458
+ ---
459
+ from llama_cloud.client import AsyncLlamaCloud
460
+
461
+ client = AsyncLlamaCloud(
462
+ token="YOUR_TOKEN",
463
+ )
464
+ await client.chat_apps.get_chat_app(
465
+ id="string",
466
+ )
467
+ """
468
+ _response = await self._client_wrapper.httpx_client.request(
469
+ "GET",
470
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/apps/{id}"),
471
+ headers=self._client_wrapper.get_headers(),
472
+ timeout=60,
473
+ )
474
+ if 200 <= _response.status_code < 300:
475
+ return pydantic.parse_obj_as(ChatApp, _response.json()) # type: ignore
476
+ if _response.status_code == 422:
477
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
478
+ try:
479
+ _response_json = _response.json()
480
+ except JSONDecodeError:
481
+ raise ApiError(status_code=_response.status_code, body=_response.text)
482
+ raise ApiError(status_code=_response.status_code, body=_response_json)
483
+
484
+ async def delete_chat_app(self, id: str) -> typing.Any:
485
+ """
486
+ Parameters:
487
+ - id: str.
488
+ ---
489
+ from llama_cloud.client import AsyncLlamaCloud
490
+
491
+ client = AsyncLlamaCloud(
492
+ token="YOUR_TOKEN",
493
+ )
494
+ await client.chat_apps.delete_chat_app(
495
+ id="string",
496
+ )
497
+ """
498
+ _response = await self._client_wrapper.httpx_client.request(
499
+ "DELETE",
500
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/apps/{id}"),
501
+ headers=self._client_wrapper.get_headers(),
502
+ timeout=60,
503
+ )
504
+ if 200 <= _response.status_code < 300:
505
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
506
+ if _response.status_code == 422:
507
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
508
+ try:
509
+ _response_json = _response.json()
510
+ except JSONDecodeError:
511
+ raise ApiError(status_code=_response.status_code, body=_response.text)
512
+ raise ApiError(status_code=_response.status_code, body=_response_json)
513
+
514
+ async def update_chat_app(
515
+ self,
516
+ id: str,
517
+ *,
518
+ project_id: typing.Optional[str] = None,
519
+ organization_id: typing.Optional[str] = None,
520
+ name: typing.Optional[str] = OMIT,
521
+ llm_config: typing.Optional[LlmParameters] = OMIT,
522
+ retrieval_config: typing.Optional[PresetRetrievalParams] = OMIT,
523
+ ) -> ChatApp:
524
+ """
525
+ Update a chat app.
526
+
527
+ Parameters:
528
+ - id: str.
529
+
530
+ - project_id: typing.Optional[str].
531
+
532
+ - organization_id: typing.Optional[str].
533
+
534
+ - name: typing.Optional[str].
535
+
536
+ - llm_config: typing.Optional[LlmParameters].
537
+
538
+ - retrieval_config: typing.Optional[PresetRetrievalParams].
539
+ ---
540
+ from llama_cloud import (
541
+ FilterCondition,
542
+ LlmParameters,
543
+ MetadataFilters,
544
+ PresetRetrievalParams,
545
+ RetrievalMode,
546
+ SupportedLlmModelNames,
547
+ )
548
+ from llama_cloud.client import AsyncLlamaCloud
549
+
550
+ client = AsyncLlamaCloud(
551
+ token="YOUR_TOKEN",
552
+ )
553
+ await client.chat_apps.update_chat_app(
554
+ id="string",
555
+ llm_config=LlmParameters(
556
+ model_name=SupportedLlmModelNames.GPT_3_5_TURBO,
557
+ ),
558
+ retrieval_config=PresetRetrievalParams(
559
+ search_filters=MetadataFilters(
560
+ filters=[],
561
+ condition=FilterCondition.AND,
562
+ ),
563
+ retrieval_mode=RetrievalMode.CHUNKS,
564
+ ),
565
+ )
566
+ """
567
+ _request: typing.Dict[str, typing.Any] = {}
568
+ if name is not OMIT:
569
+ _request["name"] = name
570
+ if llm_config is not OMIT:
571
+ _request["llm_config"] = llm_config
572
+ if retrieval_config is not OMIT:
573
+ _request["retrieval_config"] = retrieval_config
574
+ _response = await self._client_wrapper.httpx_client.request(
575
+ "PATCH",
576
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/apps/{id}"),
577
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
578
+ json=jsonable_encoder(_request),
579
+ headers=self._client_wrapper.get_headers(),
580
+ timeout=60,
581
+ )
582
+ if 200 <= _response.status_code < 300:
583
+ return pydantic.parse_obj_as(ChatApp, _response.json()) # type: ignore
584
+ if _response.status_code == 422:
585
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
586
+ try:
587
+ _response_json = _response.json()
588
+ except JSONDecodeError:
589
+ raise ApiError(status_code=_response.status_code, body=_response.text)
590
+ raise ApiError(status_code=_response.status_code, body=_response_json)
591
+
592
+ async def chat_with_chat_app(
593
+ self, id: str, *, messages: typing.Optional[typing.List[InputMessage]] = OMIT
594
+ ) -> typing.Any:
595
+ """
596
+ Chat with a chat app.
597
+
598
+ Parameters:
599
+ - id: str.
600
+
601
+ - messages: typing.Optional[typing.List[InputMessage]].
602
+ ---
603
+ from llama_cloud.client import AsyncLlamaCloud
604
+
605
+ client = AsyncLlamaCloud(
606
+ token="YOUR_TOKEN",
607
+ )
608
+ await client.chat_apps.chat_with_chat_app(
609
+ id="string",
610
+ )
611
+ """
612
+ _request: typing.Dict[str, typing.Any] = {}
613
+ if messages is not OMIT:
614
+ _request["messages"] = messages
615
+ _response = await self._client_wrapper.httpx_client.request(
616
+ "POST",
617
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/apps/{id}/chat"),
618
+ json=jsonable_encoder(_request),
619
+ headers=self._client_wrapper.get_headers(),
620
+ timeout=60,
621
+ )
622
+ if 200 <= _response.status_code < 300:
623
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
624
+ if _response.status_code == 422:
625
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
626
+ try:
627
+ _response_json = _response.json()
628
+ except JSONDecodeError:
629
+ raise ApiError(status_code=_response.status_code, body=_response.text)
630
+ raise ApiError(status_code=_response.status_code, body=_response_json)
@@ -213,7 +213,7 @@ class DataSinksClient:
213
213
 
214
214
  - sink_type: ConfigurableDataSinkNames.
215
215
 
216
- - component: typing.Optional[DataSinkUpdateComponent].
216
+ - component: typing.Optional[DataSinkUpdateComponent]. Component that implements the data sink
217
217
  ---
218
218
  from llama_cloud import ConfigurableDataSinkNames
219
219
  from llama_cloud.client import LlamaCloud
@@ -467,7 +467,7 @@ class AsyncDataSinksClient:
467
467
 
468
468
  - sink_type: ConfigurableDataSinkNames.
469
469
 
470
- - component: typing.Optional[DataSinkUpdateComponent].
470
+ - component: typing.Optional[DataSinkUpdateComponent]. Component that implements the data sink
471
471
  ---
472
472
  from llama_cloud import ConfigurableDataSinkNames
473
473
  from llama_cloud.client import AsyncLlamaCloud
@@ -218,7 +218,7 @@ class DataSourcesClient:
218
218
 
219
219
  - custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[DataSourceUpdateCustomMetadataValue]]].
220
220
 
221
- - component: typing.Optional[DataSourceUpdateComponent].
221
+ - component: typing.Optional[DataSourceUpdateComponent]. Component that implements the data source
222
222
  ---
223
223
  from llama_cloud import ConfigurableDataSourceNames
224
224
  from llama_cloud.client import LlamaCloud
@@ -478,7 +478,7 @@ class AsyncDataSourcesClient:
478
478
 
479
479
  - custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[DataSourceUpdateCustomMetadataValue]]].
480
480
 
481
- - component: typing.Optional[DataSourceUpdateComponent].
481
+ - component: typing.Optional[DataSourceUpdateComponent]. Component that implements the data source
482
482
  ---
483
483
  from llama_cloud import ConfigurableDataSourceNames
484
484
  from llama_cloud.client import AsyncLlamaCloud
@@ -63,7 +63,7 @@ class EmbeddingModelConfigsClient:
63
63
 
64
64
  def create_embedding_model_config(
65
65
  self, *, project_id: str, name: str, embedding_config: EmbeddingModelConfigCreateEmbeddingConfig
66
- ) -> typing.Any:
66
+ ) -> EmbeddingModelConfig:
67
67
  """
68
68
  Create a new embedding model configuration within a specified project.
69
69
 
@@ -83,7 +83,7 @@ class EmbeddingModelConfigsClient:
83
83
  timeout=60,
84
84
  )
85
85
  if 200 <= _response.status_code < 300:
86
- return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
86
+ return pydantic.parse_obj_as(EmbeddingModelConfig, _response.json()) # type: ignore
87
87
  if _response.status_code == 422:
88
88
  raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
89
89
  try:
@@ -230,7 +230,7 @@ class AsyncEmbeddingModelConfigsClient:
230
230
 
231
231
  async def create_embedding_model_config(
232
232
  self, *, project_id: str, name: str, embedding_config: EmbeddingModelConfigCreateEmbeddingConfig
233
- ) -> typing.Any:
233
+ ) -> EmbeddingModelConfig:
234
234
  """
235
235
  Create a new embedding model configuration within a specified project.
236
236
 
@@ -250,7 +250,7 @@ class AsyncEmbeddingModelConfigsClient:
250
250
  timeout=60,
251
251
  )
252
252
  if 200 <= _response.status_code < 300:
253
- return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
253
+ return pydantic.parse_obj_as(EmbeddingModelConfig, _response.json()) # type: ignore
254
254
  if _response.status_code == 422:
255
255
  raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
256
256
  try: