llama-cloud 0.1.6__py3-none-any.whl → 0.1.7a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (173) hide show
  1. llama_cloud/__init__.py +140 -6
  2. llama_cloud/client.py +15 -0
  3. llama_cloud/environment.py +1 -1
  4. llama_cloud/resources/__init__.py +15 -0
  5. llama_cloud/{types/token.py → resources/chat_apps/__init__.py} +0 -3
  6. llama_cloud/resources/chat_apps/client.py +620 -0
  7. llama_cloud/resources/data_sinks/client.py +12 -12
  8. llama_cloud/resources/data_sources/client.py +14 -14
  9. llama_cloud/resources/embedding_model_configs/client.py +20 -76
  10. llama_cloud/resources/evals/client.py +26 -36
  11. llama_cloud/resources/extraction/client.py +32 -32
  12. llama_cloud/resources/files/client.py +40 -44
  13. llama_cloud/resources/jobs/__init__.py +2 -0
  14. llama_cloud/resources/jobs/client.py +148 -0
  15. llama_cloud/resources/llama_extract/__init__.py +5 -0
  16. llama_cloud/resources/llama_extract/client.py +1038 -0
  17. llama_cloud/resources/llama_extract/types/__init__.py +6 -0
  18. llama_cloud/resources/llama_extract/types/extract_agent_create_data_schema_value.py +7 -0
  19. llama_cloud/resources/llama_extract/types/extract_agent_update_data_schema_value.py +7 -0
  20. llama_cloud/resources/organizations/client.py +66 -70
  21. llama_cloud/resources/parsing/client.py +448 -428
  22. llama_cloud/resources/pipelines/client.py +256 -344
  23. llama_cloud/resources/projects/client.py +34 -60
  24. llama_cloud/resources/reports/__init__.py +5 -0
  25. llama_cloud/resources/reports/client.py +1198 -0
  26. llama_cloud/resources/reports/types/__init__.py +7 -0
  27. llama_cloud/resources/reports/types/update_report_plan_api_v_1_reports_report_id_plan_patch_request_action.py +25 -0
  28. llama_cloud/resources/retrievers/__init__.py +2 -0
  29. llama_cloud/resources/retrievers/client.py +654 -0
  30. llama_cloud/types/__init__.py +128 -6
  31. llama_cloud/types/{chat_message.py → app_schema_chat_chat_message.py} +3 -3
  32. llama_cloud/types/azure_open_ai_embedding.py +6 -12
  33. llama_cloud/types/base_prompt_template.py +2 -6
  34. llama_cloud/types/bedrock_embedding.py +6 -12
  35. llama_cloud/types/character_splitter.py +2 -4
  36. llama_cloud/types/chat_app.py +44 -0
  37. llama_cloud/types/chat_app_response.py +41 -0
  38. llama_cloud/types/cloud_az_storage_blob_data_source.py +7 -15
  39. llama_cloud/types/cloud_box_data_source.py +6 -12
  40. llama_cloud/types/cloud_confluence_data_source.py +6 -6
  41. llama_cloud/types/cloud_document.py +1 -3
  42. llama_cloud/types/cloud_document_create.py +1 -3
  43. llama_cloud/types/cloud_jira_data_source.py +4 -6
  44. llama_cloud/types/cloud_notion_page_data_source.py +2 -2
  45. llama_cloud/types/cloud_one_drive_data_source.py +3 -5
  46. llama_cloud/types/cloud_postgres_vector_store.py +1 -0
  47. llama_cloud/types/cloud_s_3_data_source.py +4 -8
  48. llama_cloud/types/cloud_sharepoint_data_source.py +6 -8
  49. llama_cloud/types/cloud_slack_data_source.py +6 -6
  50. llama_cloud/types/code_splitter.py +1 -1
  51. llama_cloud/types/cohere_embedding.py +3 -7
  52. llama_cloud/types/composite_retrieval_mode.py +21 -0
  53. llama_cloud/types/composite_retrieval_result.py +38 -0
  54. llama_cloud/types/composite_retrieved_text_node.py +42 -0
  55. llama_cloud/types/data_sink.py +4 -4
  56. llama_cloud/types/data_sink_component.py +20 -0
  57. llama_cloud/types/data_source.py +5 -7
  58. llama_cloud/types/data_source_component.py +28 -0
  59. llama_cloud/types/data_source_create.py +1 -3
  60. llama_cloud/types/edit_suggestion.py +39 -0
  61. llama_cloud/types/embedding_model_config.py +2 -2
  62. llama_cloud/types/embedding_model_config_update.py +2 -4
  63. llama_cloud/types/eval_dataset.py +2 -2
  64. llama_cloud/types/eval_dataset_job_record.py +8 -13
  65. llama_cloud/types/eval_execution_params_override.py +2 -6
  66. llama_cloud/types/eval_question.py +2 -2
  67. llama_cloud/types/extract_agent.py +45 -0
  68. llama_cloud/types/extract_agent_data_schema_value.py +5 -0
  69. llama_cloud/types/extract_config.py +40 -0
  70. llama_cloud/types/extract_job.py +35 -0
  71. llama_cloud/types/extract_job_create.py +40 -0
  72. llama_cloud/types/extract_job_create_data_schema_override_value.py +7 -0
  73. llama_cloud/types/extract_mode.py +17 -0
  74. llama_cloud/types/extract_resultset.py +46 -0
  75. llama_cloud/types/extract_resultset_data.py +11 -0
  76. llama_cloud/types/extract_resultset_data_item_value.py +7 -0
  77. llama_cloud/types/extract_resultset_data_zero_value.py +7 -0
  78. llama_cloud/types/extract_resultset_extraction_metadata_value.py +7 -0
  79. llama_cloud/types/extraction_result.py +2 -2
  80. llama_cloud/types/extraction_schema.py +3 -5
  81. llama_cloud/types/file.py +9 -14
  82. llama_cloud/types/filter_condition.py +9 -1
  83. llama_cloud/types/filter_operator.py +6 -2
  84. llama_cloud/types/gemini_embedding.py +6 -10
  85. llama_cloud/types/hugging_face_inference_api_embedding.py +11 -27
  86. llama_cloud/types/hugging_face_inference_api_embedding_token.py +5 -0
  87. llama_cloud/types/image_block.py +35 -0
  88. llama_cloud/types/input_message.py +2 -4
  89. llama_cloud/types/job_names.py +89 -0
  90. llama_cloud/types/job_record.py +57 -0
  91. llama_cloud/types/job_record_with_usage_metrics.py +36 -0
  92. llama_cloud/types/llama_index_core_base_llms_types_chat_message.py +39 -0
  93. llama_cloud/types/llama_index_core_base_llms_types_chat_message_blocks_item.py +33 -0
  94. llama_cloud/types/llama_parse_parameters.py +4 -0
  95. llama_cloud/types/llm.py +3 -4
  96. llama_cloud/types/llm_model_data.py +1 -0
  97. llama_cloud/types/llm_parameters.py +3 -5
  98. llama_cloud/types/local_eval.py +8 -10
  99. llama_cloud/types/local_eval_results.py +1 -1
  100. llama_cloud/types/managed_ingestion_status.py +4 -0
  101. llama_cloud/types/managed_ingestion_status_response.py +4 -5
  102. llama_cloud/types/markdown_element_node_parser.py +3 -5
  103. llama_cloud/types/markdown_node_parser.py +1 -1
  104. llama_cloud/types/metadata_filter.py +2 -2
  105. llama_cloud/types/metadata_filter_value.py +5 -0
  106. llama_cloud/types/metric_result.py +3 -3
  107. llama_cloud/types/node_parser.py +1 -1
  108. llama_cloud/types/object_type.py +4 -0
  109. llama_cloud/types/open_ai_embedding.py +6 -12
  110. llama_cloud/types/organization.py +7 -2
  111. llama_cloud/types/page_splitter_node_parser.py +2 -2
  112. llama_cloud/types/paginated_jobs_history_with_metrics.py +35 -0
  113. llama_cloud/types/paginated_report_response.py +35 -0
  114. llama_cloud/types/parse_plan_level.py +21 -0
  115. llama_cloud/types/permission.py +3 -3
  116. llama_cloud/types/pipeline.py +7 -17
  117. llama_cloud/types/pipeline_configuration_hashes.py +3 -3
  118. llama_cloud/types/pipeline_create.py +8 -16
  119. llama_cloud/types/pipeline_data_source.py +7 -13
  120. llama_cloud/types/pipeline_data_source_component.py +28 -0
  121. llama_cloud/types/pipeline_data_source_create.py +1 -3
  122. llama_cloud/types/pipeline_deployment.py +4 -4
  123. llama_cloud/types/pipeline_file.py +13 -24
  124. llama_cloud/types/pipeline_file_create.py +1 -3
  125. llama_cloud/types/playground_session.py +4 -4
  126. llama_cloud/types/preset_retrieval_params.py +8 -14
  127. llama_cloud/types/presigned_url.py +1 -3
  128. llama_cloud/types/progress_event.py +44 -0
  129. llama_cloud/types/progress_event_status.py +33 -0
  130. llama_cloud/types/project.py +2 -2
  131. llama_cloud/types/prompt_mixin_prompts.py +1 -1
  132. llama_cloud/types/prompt_spec.py +3 -5
  133. llama_cloud/types/related_node_info.py +2 -2
  134. llama_cloud/types/related_node_info_node_type.py +7 -0
  135. llama_cloud/types/report.py +33 -0
  136. llama_cloud/types/report_block.py +34 -0
  137. llama_cloud/types/report_block_dependency.py +29 -0
  138. llama_cloud/types/report_create_response.py +31 -0
  139. llama_cloud/types/report_event_item.py +40 -0
  140. llama_cloud/types/report_event_item_event_data.py +45 -0
  141. llama_cloud/types/report_event_type.py +37 -0
  142. llama_cloud/types/report_metadata.py +39 -0
  143. llama_cloud/types/report_plan.py +36 -0
  144. llama_cloud/types/report_plan_block.py +36 -0
  145. llama_cloud/types/report_query.py +33 -0
  146. llama_cloud/types/report_response.py +41 -0
  147. llama_cloud/types/report_state.py +37 -0
  148. llama_cloud/types/report_state_event.py +38 -0
  149. llama_cloud/types/report_update_event.py +38 -0
  150. llama_cloud/types/retrieve_results.py +1 -1
  151. llama_cloud/types/retriever.py +45 -0
  152. llama_cloud/types/retriever_create.py +37 -0
  153. llama_cloud/types/retriever_pipeline.py +37 -0
  154. llama_cloud/types/role.py +3 -3
  155. llama_cloud/types/sentence_splitter.py +2 -4
  156. llama_cloud/types/status_enum.py +4 -0
  157. llama_cloud/types/supported_llm_model_names.py +4 -0
  158. llama_cloud/types/text_block.py +31 -0
  159. llama_cloud/types/text_node.py +15 -8
  160. llama_cloud/types/token_text_splitter.py +1 -1
  161. llama_cloud/types/usage_metric_response.py +34 -0
  162. llama_cloud/types/user_job_record.py +32 -0
  163. llama_cloud/types/user_organization.py +5 -9
  164. llama_cloud/types/user_organization_create.py +4 -4
  165. llama_cloud/types/user_organization_delete.py +2 -2
  166. llama_cloud/types/user_organization_role.py +2 -2
  167. llama_cloud/types/vertex_text_embedding.py +5 -9
  168. {llama_cloud-0.1.6.dist-info → llama_cloud-0.1.7a1.dist-info}/METADATA +2 -1
  169. llama_cloud-0.1.7a1.dist-info/RECORD +310 -0
  170. llama_cloud/types/value.py +0 -5
  171. llama_cloud-0.1.6.dist-info/RECORD +0 -241
  172. {llama_cloud-0.1.6.dist-info → llama_cloud-0.1.7a1.dist-info}/LICENSE +0 -0
  173. {llama_cloud-0.1.6.dist-info → llama_cloud-0.1.7a1.dist-info}/WHEEL +0 -0
@@ -0,0 +1,620 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+ import urllib.parse
5
+ from json.decoder import JSONDecodeError
6
+
7
+ from ...core.api_error import ApiError
8
+ from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
9
+ from ...core.jsonable_encoder import jsonable_encoder
10
+ from ...core.remove_none_from_dict import remove_none_from_dict
11
+ from ...errors.unprocessable_entity_error import UnprocessableEntityError
12
+ from ...types.chat_app import ChatApp
13
+ from ...types.chat_app_response import ChatAppResponse
14
+ from ...types.http_validation_error import HttpValidationError
15
+ from ...types.input_message import InputMessage
16
+ from ...types.llm_parameters import LlmParameters
17
+ from ...types.preset_retrieval_params import PresetRetrievalParams
18
+
19
+ try:
20
+ import pydantic
21
+ if pydantic.__version__.startswith("1."):
22
+ raise ImportError
23
+ import pydantic.v1 as pydantic # type: ignore
24
+ except ImportError:
25
+ import pydantic # type: ignore
26
+
27
+ # this is used as the default value for optional parameters
28
+ OMIT = typing.cast(typing.Any, ...)
29
+
30
+
31
+ class ChatAppsClient:
32
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
33
+ self._client_wrapper = client_wrapper
34
+
35
+ def get_chat_apps_api_v_1_apps_get(
36
+ self, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
37
+ ) -> typing.List[ChatAppResponse]:
38
+ """
39
+ Parameters:
40
+ - project_id: typing.Optional[str].
41
+
42
+ - organization_id: typing.Optional[str].
43
+ ---
44
+ from llama_cloud.client import LlamaCloud
45
+
46
+ client = LlamaCloud(
47
+ token="YOUR_TOKEN",
48
+ )
49
+ client.chat_apps.get_chat_apps_api_v_1_apps_get()
50
+ """
51
+ _response = self._client_wrapper.httpx_client.request(
52
+ "GET",
53
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/apps"),
54
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
55
+ headers=self._client_wrapper.get_headers(),
56
+ timeout=60,
57
+ )
58
+ if 200 <= _response.status_code < 300:
59
+ return pydantic.parse_obj_as(typing.List[ChatAppResponse], _response.json()) # type: ignore
60
+ if _response.status_code == 422:
61
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
62
+ try:
63
+ _response_json = _response.json()
64
+ except JSONDecodeError:
65
+ raise ApiError(status_code=_response.status_code, body=_response.text)
66
+ raise ApiError(status_code=_response.status_code, body=_response_json)
67
+
68
+ def create_chat_app_api_v_1_apps_post(
69
+ self,
70
+ *,
71
+ project_id: typing.Optional[str] = None,
72
+ organization_id: typing.Optional[str] = None,
73
+ pipeline_id: str,
74
+ llm_config: LlmParameters,
75
+ retrieval_config: PresetRetrievalParams,
76
+ chat_app_create_project_id: str,
77
+ ) -> ChatApp:
78
+ """
79
+ Create a new chat app.
80
+
81
+ Parameters:
82
+ - project_id: typing.Optional[str].
83
+
84
+ - organization_id: typing.Optional[str].
85
+
86
+ - pipeline_id: str.
87
+
88
+ - llm_config: LlmParameters.
89
+
90
+ - retrieval_config: PresetRetrievalParams.
91
+
92
+ - chat_app_create_project_id: str.
93
+ ---
94
+ from llama_cloud import (
95
+ FilterCondition,
96
+ LlmParameters,
97
+ MetadataFilters,
98
+ PresetRetrievalParams,
99
+ RetrievalMode,
100
+ SupportedLlmModelNames,
101
+ )
102
+ from llama_cloud.client import LlamaCloud
103
+
104
+ client = LlamaCloud(
105
+ token="YOUR_TOKEN",
106
+ )
107
+ client.chat_apps.create_chat_app_api_v_1_apps_post(
108
+ pipeline_id="string",
109
+ llm_config=LlmParameters(
110
+ model_name=SupportedLlmModelNames.GPT_3_5_TURBO,
111
+ ),
112
+ retrieval_config=PresetRetrievalParams(
113
+ search_filters=MetadataFilters(
114
+ filters=[],
115
+ condition=FilterCondition.AND,
116
+ ),
117
+ retrieval_mode=RetrievalMode.CHUNKS,
118
+ ),
119
+ chat_app_create_project_id="string",
120
+ )
121
+ """
122
+ _response = self._client_wrapper.httpx_client.request(
123
+ "POST",
124
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/apps"),
125
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
126
+ json=jsonable_encoder(
127
+ {
128
+ "pipeline_id": pipeline_id,
129
+ "llm_config": llm_config,
130
+ "retrieval_config": retrieval_config,
131
+ "project_id": chat_app_create_project_id,
132
+ }
133
+ ),
134
+ headers=self._client_wrapper.get_headers(),
135
+ timeout=60,
136
+ )
137
+ if 200 <= _response.status_code < 300:
138
+ return pydantic.parse_obj_as(ChatApp, _response.json()) # type: ignore
139
+ if _response.status_code == 422:
140
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
141
+ try:
142
+ _response_json = _response.json()
143
+ except JSONDecodeError:
144
+ raise ApiError(status_code=_response.status_code, body=_response.text)
145
+ raise ApiError(status_code=_response.status_code, body=_response_json)
146
+
147
+ def get_chat_app(self, id: str) -> ChatApp:
148
+ """
149
+ Get a chat app by ID.
150
+
151
+ Parameters:
152
+ - id: str.
153
+ ---
154
+ from llama_cloud.client import LlamaCloud
155
+
156
+ client = LlamaCloud(
157
+ token="YOUR_TOKEN",
158
+ )
159
+ client.chat_apps.get_chat_app(
160
+ id="string",
161
+ )
162
+ """
163
+ _response = self._client_wrapper.httpx_client.request(
164
+ "GET",
165
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/apps/{id}"),
166
+ headers=self._client_wrapper.get_headers(),
167
+ timeout=60,
168
+ )
169
+ if 200 <= _response.status_code < 300:
170
+ return pydantic.parse_obj_as(ChatApp, _response.json()) # type: ignore
171
+ if _response.status_code == 422:
172
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
173
+ try:
174
+ _response_json = _response.json()
175
+ except JSONDecodeError:
176
+ raise ApiError(status_code=_response.status_code, body=_response.text)
177
+ raise ApiError(status_code=_response.status_code, body=_response_json)
178
+
179
+ def delete_chat_app(self, id: str) -> typing.Any:
180
+ """
181
+ Parameters:
182
+ - id: str.
183
+ ---
184
+ from llama_cloud.client import LlamaCloud
185
+
186
+ client = LlamaCloud(
187
+ token="YOUR_TOKEN",
188
+ )
189
+ client.chat_apps.delete_chat_app(
190
+ id="string",
191
+ )
192
+ """
193
+ _response = self._client_wrapper.httpx_client.request(
194
+ "DELETE",
195
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/apps/{id}"),
196
+ headers=self._client_wrapper.get_headers(),
197
+ timeout=60,
198
+ )
199
+ if 200 <= _response.status_code < 300:
200
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
201
+ if _response.status_code == 422:
202
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
203
+ try:
204
+ _response_json = _response.json()
205
+ except JSONDecodeError:
206
+ raise ApiError(status_code=_response.status_code, body=_response.text)
207
+ raise ApiError(status_code=_response.status_code, body=_response_json)
208
+
209
+ def update_chat_app(
210
+ self,
211
+ id: str,
212
+ *,
213
+ project_id: typing.Optional[str] = None,
214
+ organization_id: typing.Optional[str] = None,
215
+ name: typing.Optional[str] = OMIT,
216
+ llm_config: typing.Optional[LlmParameters] = OMIT,
217
+ retrieval_config: typing.Optional[PresetRetrievalParams] = OMIT,
218
+ ) -> ChatApp:
219
+ """
220
+ Update a chat app.
221
+
222
+ Parameters:
223
+ - id: str.
224
+
225
+ - project_id: typing.Optional[str].
226
+
227
+ - organization_id: typing.Optional[str].
228
+
229
+ - name: typing.Optional[str].
230
+
231
+ - llm_config: typing.Optional[LlmParameters].
232
+
233
+ - retrieval_config: typing.Optional[PresetRetrievalParams].
234
+ ---
235
+ from llama_cloud import (
236
+ FilterCondition,
237
+ LlmParameters,
238
+ MetadataFilters,
239
+ PresetRetrievalParams,
240
+ RetrievalMode,
241
+ SupportedLlmModelNames,
242
+ )
243
+ from llama_cloud.client import LlamaCloud
244
+
245
+ client = LlamaCloud(
246
+ token="YOUR_TOKEN",
247
+ )
248
+ client.chat_apps.update_chat_app(
249
+ id="string",
250
+ llm_config=LlmParameters(
251
+ model_name=SupportedLlmModelNames.GPT_3_5_TURBO,
252
+ ),
253
+ retrieval_config=PresetRetrievalParams(
254
+ search_filters=MetadataFilters(
255
+ filters=[],
256
+ condition=FilterCondition.AND,
257
+ ),
258
+ retrieval_mode=RetrievalMode.CHUNKS,
259
+ ),
260
+ )
261
+ """
262
+ _request: typing.Dict[str, typing.Any] = {}
263
+ if name is not OMIT:
264
+ _request["name"] = name
265
+ if llm_config is not OMIT:
266
+ _request["llm_config"] = llm_config
267
+ if retrieval_config is not OMIT:
268
+ _request["retrieval_config"] = retrieval_config
269
+ _response = self._client_wrapper.httpx_client.request(
270
+ "PATCH",
271
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/apps/{id}"),
272
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
273
+ json=jsonable_encoder(_request),
274
+ headers=self._client_wrapper.get_headers(),
275
+ timeout=60,
276
+ )
277
+ if 200 <= _response.status_code < 300:
278
+ return pydantic.parse_obj_as(ChatApp, _response.json()) # type: ignore
279
+ if _response.status_code == 422:
280
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
281
+ try:
282
+ _response_json = _response.json()
283
+ except JSONDecodeError:
284
+ raise ApiError(status_code=_response.status_code, body=_response.text)
285
+ raise ApiError(status_code=_response.status_code, body=_response_json)
286
+
287
+ def chat_with_chat_app(self, id: str, *, messages: typing.Optional[typing.List[InputMessage]] = OMIT) -> typing.Any:
288
+ """
289
+ Chat with a chat app.
290
+
291
+ Parameters:
292
+ - id: str.
293
+
294
+ - messages: typing.Optional[typing.List[InputMessage]].
295
+ ---
296
+ from llama_cloud.client import LlamaCloud
297
+
298
+ client = LlamaCloud(
299
+ token="YOUR_TOKEN",
300
+ )
301
+ client.chat_apps.chat_with_chat_app(
302
+ id="string",
303
+ )
304
+ """
305
+ _request: typing.Dict[str, typing.Any] = {}
306
+ if messages is not OMIT:
307
+ _request["messages"] = messages
308
+ _response = self._client_wrapper.httpx_client.request(
309
+ "POST",
310
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/apps/{id}/chat"),
311
+ json=jsonable_encoder(_request),
312
+ headers=self._client_wrapper.get_headers(),
313
+ timeout=60,
314
+ )
315
+ if 200 <= _response.status_code < 300:
316
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
317
+ if _response.status_code == 422:
318
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
319
+ try:
320
+ _response_json = _response.json()
321
+ except JSONDecodeError:
322
+ raise ApiError(status_code=_response.status_code, body=_response.text)
323
+ raise ApiError(status_code=_response.status_code, body=_response_json)
324
+
325
+
326
+ class AsyncChatAppsClient:
327
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
328
+ self._client_wrapper = client_wrapper
329
+
330
+ async def get_chat_apps_api_v_1_apps_get(
331
+ self, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
332
+ ) -> typing.List[ChatAppResponse]:
333
+ """
334
+ Parameters:
335
+ - project_id: typing.Optional[str].
336
+
337
+ - organization_id: typing.Optional[str].
338
+ ---
339
+ from llama_cloud.client import AsyncLlamaCloud
340
+
341
+ client = AsyncLlamaCloud(
342
+ token="YOUR_TOKEN",
343
+ )
344
+ await client.chat_apps.get_chat_apps_api_v_1_apps_get()
345
+ """
346
+ _response = await self._client_wrapper.httpx_client.request(
347
+ "GET",
348
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/apps"),
349
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
350
+ headers=self._client_wrapper.get_headers(),
351
+ timeout=60,
352
+ )
353
+ if 200 <= _response.status_code < 300:
354
+ return pydantic.parse_obj_as(typing.List[ChatAppResponse], _response.json()) # type: ignore
355
+ if _response.status_code == 422:
356
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
357
+ try:
358
+ _response_json = _response.json()
359
+ except JSONDecodeError:
360
+ raise ApiError(status_code=_response.status_code, body=_response.text)
361
+ raise ApiError(status_code=_response.status_code, body=_response_json)
362
+
363
+ async def create_chat_app_api_v_1_apps_post(
364
+ self,
365
+ *,
366
+ project_id: typing.Optional[str] = None,
367
+ organization_id: typing.Optional[str] = None,
368
+ pipeline_id: str,
369
+ llm_config: LlmParameters,
370
+ retrieval_config: PresetRetrievalParams,
371
+ chat_app_create_project_id: str,
372
+ ) -> ChatApp:
373
+ """
374
+ Create a new chat app.
375
+
376
+ Parameters:
377
+ - project_id: typing.Optional[str].
378
+
379
+ - organization_id: typing.Optional[str].
380
+
381
+ - pipeline_id: str.
382
+
383
+ - llm_config: LlmParameters.
384
+
385
+ - retrieval_config: PresetRetrievalParams.
386
+
387
+ - chat_app_create_project_id: str.
388
+ ---
389
+ from llama_cloud import (
390
+ FilterCondition,
391
+ LlmParameters,
392
+ MetadataFilters,
393
+ PresetRetrievalParams,
394
+ RetrievalMode,
395
+ SupportedLlmModelNames,
396
+ )
397
+ from llama_cloud.client import AsyncLlamaCloud
398
+
399
+ client = AsyncLlamaCloud(
400
+ token="YOUR_TOKEN",
401
+ )
402
+ await client.chat_apps.create_chat_app_api_v_1_apps_post(
403
+ pipeline_id="string",
404
+ llm_config=LlmParameters(
405
+ model_name=SupportedLlmModelNames.GPT_3_5_TURBO,
406
+ ),
407
+ retrieval_config=PresetRetrievalParams(
408
+ search_filters=MetadataFilters(
409
+ filters=[],
410
+ condition=FilterCondition.AND,
411
+ ),
412
+ retrieval_mode=RetrievalMode.CHUNKS,
413
+ ),
414
+ chat_app_create_project_id="string",
415
+ )
416
+ """
417
+ _response = await self._client_wrapper.httpx_client.request(
418
+ "POST",
419
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/apps"),
420
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
421
+ json=jsonable_encoder(
422
+ {
423
+ "pipeline_id": pipeline_id,
424
+ "llm_config": llm_config,
425
+ "retrieval_config": retrieval_config,
426
+ "project_id": chat_app_create_project_id,
427
+ }
428
+ ),
429
+ headers=self._client_wrapper.get_headers(),
430
+ timeout=60,
431
+ )
432
+ if 200 <= _response.status_code < 300:
433
+ return pydantic.parse_obj_as(ChatApp, _response.json()) # type: ignore
434
+ if _response.status_code == 422:
435
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
436
+ try:
437
+ _response_json = _response.json()
438
+ except JSONDecodeError:
439
+ raise ApiError(status_code=_response.status_code, body=_response.text)
440
+ raise ApiError(status_code=_response.status_code, body=_response_json)
441
+
442
+ async def get_chat_app(self, id: str) -> ChatApp:
443
+ """
444
+ Get a chat app by ID.
445
+
446
+ Parameters:
447
+ - id: str.
448
+ ---
449
+ from llama_cloud.client import AsyncLlamaCloud
450
+
451
+ client = AsyncLlamaCloud(
452
+ token="YOUR_TOKEN",
453
+ )
454
+ await client.chat_apps.get_chat_app(
455
+ id="string",
456
+ )
457
+ """
458
+ _response = await self._client_wrapper.httpx_client.request(
459
+ "GET",
460
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/apps/{id}"),
461
+ headers=self._client_wrapper.get_headers(),
462
+ timeout=60,
463
+ )
464
+ if 200 <= _response.status_code < 300:
465
+ return pydantic.parse_obj_as(ChatApp, _response.json()) # type: ignore
466
+ if _response.status_code == 422:
467
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
468
+ try:
469
+ _response_json = _response.json()
470
+ except JSONDecodeError:
471
+ raise ApiError(status_code=_response.status_code, body=_response.text)
472
+ raise ApiError(status_code=_response.status_code, body=_response_json)
473
+
474
+ async def delete_chat_app(self, id: str) -> typing.Any:
475
+ """
476
+ Parameters:
477
+ - id: str.
478
+ ---
479
+ from llama_cloud.client import AsyncLlamaCloud
480
+
481
+ client = AsyncLlamaCloud(
482
+ token="YOUR_TOKEN",
483
+ )
484
+ await client.chat_apps.delete_chat_app(
485
+ id="string",
486
+ )
487
+ """
488
+ _response = await self._client_wrapper.httpx_client.request(
489
+ "DELETE",
490
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/apps/{id}"),
491
+ headers=self._client_wrapper.get_headers(),
492
+ timeout=60,
493
+ )
494
+ if 200 <= _response.status_code < 300:
495
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
496
+ if _response.status_code == 422:
497
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
498
+ try:
499
+ _response_json = _response.json()
500
+ except JSONDecodeError:
501
+ raise ApiError(status_code=_response.status_code, body=_response.text)
502
+ raise ApiError(status_code=_response.status_code, body=_response_json)
503
+
504
+ async def update_chat_app(
505
+ self,
506
+ id: str,
507
+ *,
508
+ project_id: typing.Optional[str] = None,
509
+ organization_id: typing.Optional[str] = None,
510
+ name: typing.Optional[str] = OMIT,
511
+ llm_config: typing.Optional[LlmParameters] = OMIT,
512
+ retrieval_config: typing.Optional[PresetRetrievalParams] = OMIT,
513
+ ) -> ChatApp:
514
+ """
515
+ Update a chat app.
516
+
517
+ Parameters:
518
+ - id: str.
519
+
520
+ - project_id: typing.Optional[str].
521
+
522
+ - organization_id: typing.Optional[str].
523
+
524
+ - name: typing.Optional[str].
525
+
526
+ - llm_config: typing.Optional[LlmParameters].
527
+
528
+ - retrieval_config: typing.Optional[PresetRetrievalParams].
529
+ ---
530
+ from llama_cloud import (
531
+ FilterCondition,
532
+ LlmParameters,
533
+ MetadataFilters,
534
+ PresetRetrievalParams,
535
+ RetrievalMode,
536
+ SupportedLlmModelNames,
537
+ )
538
+ from llama_cloud.client import AsyncLlamaCloud
539
+
540
+ client = AsyncLlamaCloud(
541
+ token="YOUR_TOKEN",
542
+ )
543
+ await client.chat_apps.update_chat_app(
544
+ id="string",
545
+ llm_config=LlmParameters(
546
+ model_name=SupportedLlmModelNames.GPT_3_5_TURBO,
547
+ ),
548
+ retrieval_config=PresetRetrievalParams(
549
+ search_filters=MetadataFilters(
550
+ filters=[],
551
+ condition=FilterCondition.AND,
552
+ ),
553
+ retrieval_mode=RetrievalMode.CHUNKS,
554
+ ),
555
+ )
556
+ """
557
+ _request: typing.Dict[str, typing.Any] = {}
558
+ if name is not OMIT:
559
+ _request["name"] = name
560
+ if llm_config is not OMIT:
561
+ _request["llm_config"] = llm_config
562
+ if retrieval_config is not OMIT:
563
+ _request["retrieval_config"] = retrieval_config
564
+ _response = await self._client_wrapper.httpx_client.request(
565
+ "PATCH",
566
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/apps/{id}"),
567
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
568
+ json=jsonable_encoder(_request),
569
+ headers=self._client_wrapper.get_headers(),
570
+ timeout=60,
571
+ )
572
+ if 200 <= _response.status_code < 300:
573
+ return pydantic.parse_obj_as(ChatApp, _response.json()) # type: ignore
574
+ if _response.status_code == 422:
575
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
576
+ try:
577
+ _response_json = _response.json()
578
+ except JSONDecodeError:
579
+ raise ApiError(status_code=_response.status_code, body=_response.text)
580
+ raise ApiError(status_code=_response.status_code, body=_response_json)
581
+
582
+ async def chat_with_chat_app(
583
+ self, id: str, *, messages: typing.Optional[typing.List[InputMessage]] = OMIT
584
+ ) -> typing.Any:
585
+ """
586
+ Chat with a chat app.
587
+
588
+ Parameters:
589
+ - id: str.
590
+
591
+ - messages: typing.Optional[typing.List[InputMessage]].
592
+ ---
593
+ from llama_cloud.client import AsyncLlamaCloud
594
+
595
+ client = AsyncLlamaCloud(
596
+ token="YOUR_TOKEN",
597
+ )
598
+ await client.chat_apps.chat_with_chat_app(
599
+ id="string",
600
+ )
601
+ """
602
+ _request: typing.Dict[str, typing.Any] = {}
603
+ if messages is not OMIT:
604
+ _request["messages"] = messages
605
+ _response = await self._client_wrapper.httpx_client.request(
606
+ "POST",
607
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/apps/{id}/chat"),
608
+ json=jsonable_encoder(_request),
609
+ headers=self._client_wrapper.get_headers(),
610
+ timeout=60,
611
+ )
612
+ if 200 <= _response.status_code < 300:
613
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
614
+ if _response.status_code == 422:
615
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
616
+ try:
617
+ _response_json = _response.json()
618
+ except JSONDecodeError:
619
+ raise ApiError(status_code=_response.status_code, body=_response.text)
620
+ raise ApiError(status_code=_response.status_code, body=_response_json)