llama-cloud 0.1.4__py3-none-any.whl → 0.1.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (117) hide show
  1. llama_cloud/__init__.py +76 -10
  2. llama_cloud/client.py +3 -0
  3. llama_cloud/environment.py +1 -1
  4. llama_cloud/resources/__init__.py +23 -1
  5. llama_cloud/resources/data_sinks/client.py +26 -20
  6. llama_cloud/resources/data_sources/client.py +16 -16
  7. llama_cloud/resources/embedding_model_configs/__init__.py +23 -0
  8. llama_cloud/resources/embedding_model_configs/client.py +416 -0
  9. llama_cloud/resources/embedding_model_configs/types/__init__.py +23 -0
  10. llama_cloud/resources/embedding_model_configs/types/embedding_model_config_create_embedding_config.py +89 -0
  11. llama_cloud/resources/evals/client.py +36 -26
  12. llama_cloud/resources/extraction/client.py +32 -32
  13. llama_cloud/resources/files/__init__.py +2 -2
  14. llama_cloud/resources/files/client.py +310 -54
  15. llama_cloud/resources/files/types/__init__.py +3 -1
  16. llama_cloud/resources/files/types/file_create_from_url_resource_info_value.py +7 -0
  17. llama_cloud/resources/files/types/file_create_permission_info_value.py +7 -0
  18. llama_cloud/resources/organizations/client.py +125 -56
  19. llama_cloud/resources/parsing/client.py +652 -264
  20. llama_cloud/resources/pipelines/client.py +617 -310
  21. llama_cloud/resources/projects/client.py +341 -136
  22. llama_cloud/types/__init__.py +58 -10
  23. llama_cloud/types/azure_open_ai_embedding.py +12 -6
  24. llama_cloud/types/base_prompt_template.py +6 -2
  25. llama_cloud/types/bedrock_embedding.py +12 -6
  26. llama_cloud/types/character_splitter.py +4 -2
  27. llama_cloud/types/chat_message.py +1 -1
  28. llama_cloud/types/cloud_az_storage_blob_data_source.py +16 -7
  29. llama_cloud/types/cloud_box_data_source.py +13 -6
  30. llama_cloud/types/cloud_confluence_data_source.py +7 -6
  31. llama_cloud/types/cloud_document.py +3 -1
  32. llama_cloud/types/cloud_document_create.py +3 -1
  33. llama_cloud/types/cloud_google_drive_data_source.py +1 -0
  34. llama_cloud/types/cloud_jira_data_source.py +7 -4
  35. llama_cloud/types/cloud_notion_page_data_source.py +3 -2
  36. llama_cloud/types/cloud_one_drive_data_source.py +6 -2
  37. llama_cloud/types/cloud_postgres_vector_store.py +1 -1
  38. llama_cloud/types/cloud_s_3_data_source.py +9 -4
  39. llama_cloud/types/cloud_sharepoint_data_source.py +9 -5
  40. llama_cloud/types/cloud_slack_data_source.py +7 -6
  41. llama_cloud/types/code_splitter.py +1 -1
  42. llama_cloud/types/cohere_embedding.py +7 -3
  43. llama_cloud/types/data_sink.py +4 -4
  44. llama_cloud/types/data_sink_create.py +1 -1
  45. llama_cloud/types/data_source.py +7 -5
  46. llama_cloud/types/data_source_create.py +4 -2
  47. llama_cloud/types/embedding_model_config.py +43 -0
  48. llama_cloud/types/embedding_model_config_embedding_config.py +89 -0
  49. llama_cloud/types/embedding_model_config_update.py +35 -0
  50. llama_cloud/types/embedding_model_config_update_embedding_config.py +89 -0
  51. llama_cloud/types/eval_dataset.py +2 -2
  52. llama_cloud/types/eval_dataset_job_record.py +13 -7
  53. llama_cloud/types/eval_execution_params_override.py +6 -2
  54. llama_cloud/types/eval_question.py +2 -2
  55. llama_cloud/types/extraction_result.py +2 -2
  56. llama_cloud/types/extraction_schema.py +5 -3
  57. llama_cloud/types/file.py +15 -7
  58. llama_cloud/types/file_permission_info_value.py +5 -0
  59. llama_cloud/types/filter_operator.py +2 -2
  60. llama_cloud/types/gemini_embedding.py +10 -6
  61. llama_cloud/types/hugging_face_inference_api_embedding.py +27 -11
  62. llama_cloud/types/input_message.py +3 -1
  63. llama_cloud/types/interval_usage_and_plan.py +36 -0
  64. llama_cloud/types/job_name_mapping.py +4 -0
  65. llama_cloud/types/llama_parse_parameters.py +21 -0
  66. llama_cloud/types/llm.py +4 -2
  67. llama_cloud/types/llm_parameters.py +5 -2
  68. llama_cloud/types/local_eval.py +10 -8
  69. llama_cloud/types/local_eval_results.py +1 -1
  70. llama_cloud/types/managed_ingestion_status_response.py +5 -3
  71. llama_cloud/types/markdown_element_node_parser.py +5 -3
  72. llama_cloud/types/markdown_node_parser.py +3 -2
  73. llama_cloud/types/metadata_filter.py +2 -2
  74. llama_cloud/types/metric_result.py +3 -3
  75. llama_cloud/types/node_parser.py +1 -1
  76. llama_cloud/types/open_ai_embedding.py +12 -6
  77. llama_cloud/types/organization.py +2 -2
  78. llama_cloud/types/page_splitter_node_parser.py +2 -2
  79. llama_cloud/types/paginated_list_pipeline_files_response.py +35 -0
  80. llama_cloud/types/parsing_job_structured_result.py +32 -0
  81. llama_cloud/types/permission.py +3 -3
  82. llama_cloud/types/pipeline.py +17 -6
  83. llama_cloud/types/pipeline_configuration_hashes.py +3 -3
  84. llama_cloud/types/pipeline_create.py +15 -4
  85. llama_cloud/types/pipeline_data_source.py +13 -7
  86. llama_cloud/types/pipeline_data_source_create.py +3 -1
  87. llama_cloud/types/pipeline_deployment.py +4 -4
  88. llama_cloud/types/pipeline_file.py +25 -10
  89. llama_cloud/types/pipeline_file_create.py +3 -1
  90. llama_cloud/types/pipeline_file_permission_info_value.py +7 -0
  91. llama_cloud/types/plan.py +40 -0
  92. llama_cloud/types/playground_session.py +2 -2
  93. llama_cloud/types/preset_retrieval_params.py +14 -7
  94. llama_cloud/types/presigned_url.py +3 -1
  95. llama_cloud/types/project.py +2 -2
  96. llama_cloud/types/prompt_mixin_prompts.py +1 -1
  97. llama_cloud/types/prompt_spec.py +4 -2
  98. llama_cloud/types/role.py +3 -3
  99. llama_cloud/types/sentence_splitter.py +4 -2
  100. llama_cloud/types/text_node.py +3 -3
  101. llama_cloud/types/{hugging_face_inference_api_embedding_token.py → token.py} +1 -1
  102. llama_cloud/types/token_text_splitter.py +1 -1
  103. llama_cloud/types/usage.py +41 -0
  104. llama_cloud/types/user_organization.py +9 -5
  105. llama_cloud/types/user_organization_create.py +4 -4
  106. llama_cloud/types/user_organization_delete.py +2 -2
  107. llama_cloud/types/user_organization_role.py +2 -2
  108. llama_cloud/types/value.py +5 -0
  109. llama_cloud/types/vertex_text_embedding.py +9 -5
  110. {llama_cloud-0.1.4.dist-info → llama_cloud-0.1.6.dist-info}/METADATA +1 -1
  111. {llama_cloud-0.1.4.dist-info → llama_cloud-0.1.6.dist-info}/RECORD +113 -99
  112. llama_cloud/types/data_sink_component.py +0 -20
  113. llama_cloud/types/data_source_component.py +0 -28
  114. llama_cloud/types/metadata_filter_value.py +0 -5
  115. llama_cloud/types/pipeline_data_source_component.py +0 -28
  116. {llama_cloud-0.1.4.dist-info → llama_cloud-0.1.6.dist-info}/LICENSE +0 -0
  117. {llama_cloud-0.1.4.dist-info → llama_cloud-0.1.6.dist-info}/WHEEL +0 -0
@@ -0,0 +1,416 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+ import urllib.parse
5
+ from json.decoder import JSONDecodeError
6
+
7
+ from ...core.api_error import ApiError
8
+ from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
9
+ from ...core.jsonable_encoder import jsonable_encoder
10
+ from ...core.remove_none_from_dict import remove_none_from_dict
11
+ from ...errors.unprocessable_entity_error import UnprocessableEntityError
12
+ from ...types.embedding_model_config import EmbeddingModelConfig
13
+ from ...types.embedding_model_config_update import EmbeddingModelConfigUpdate
14
+ from ...types.http_validation_error import HttpValidationError
15
+ from .types.embedding_model_config_create_embedding_config import EmbeddingModelConfigCreateEmbeddingConfig
16
+
17
+ try:
18
+ import pydantic
19
+ if pydantic.__version__.startswith("1."):
20
+ raise ImportError
21
+ import pydantic.v1 as pydantic # type: ignore
22
+ except ImportError:
23
+ import pydantic # type: ignore
24
+
25
+ # this is used as the default value for optional parameters
26
+ OMIT = typing.cast(typing.Any, ...)
27
+
28
+
29
+ class EmbeddingModelConfigsClient:
30
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
31
+ self._client_wrapper = client_wrapper
32
+
33
+ def list_embedding_model_configs(self, *, project_id: str) -> typing.List[EmbeddingModelConfig]:
34
+ """
35
+ Parameters:
36
+ - project_id: str.
37
+ """
38
+ _response = self._client_wrapper.httpx_client.request(
39
+ "GET",
40
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/embedding-model-configs"),
41
+ params=remove_none_from_dict({"project_id": project_id}),
42
+ headers=self._client_wrapper.get_headers(),
43
+ timeout=60,
44
+ )
45
+ if 200 <= _response.status_code < 300:
46
+ return pydantic.parse_obj_as(typing.List[EmbeddingModelConfig], _response.json()) # type: ignore
47
+ if _response.status_code == 422:
48
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
49
+ try:
50
+ _response_json = _response.json()
51
+ except JSONDecodeError:
52
+ raise ApiError(status_code=_response.status_code, body=_response.text)
53
+ raise ApiError(status_code=_response.status_code, body=_response_json)
54
+
55
+ def create_embedding_model_config(
56
+ self, *, project_id: str, name: str, embedding_config: EmbeddingModelConfigCreateEmbeddingConfig
57
+ ) -> EmbeddingModelConfig:
58
+ """
59
+ Create a new embedding model configuration within a specified project.
60
+
61
+ Parameters:
62
+ - project_id: str.
63
+
64
+ - name: str. The name of the embedding model config.
65
+
66
+ - embedding_config: EmbeddingModelConfigCreateEmbeddingConfig. The embedding configuration for the embedding model config.
67
+ ---
68
+ from llama_cloud import (
69
+ EmbeddingModelConfigCreateEmbeddingConfig_VertexaiEmbedding,
70
+ )
71
+ from llama_cloud.client import LlamaCloud
72
+
73
+ client = LlamaCloud(
74
+ token="YOUR_TOKEN",
75
+ )
76
+ client.embedding_model_configs.create_embedding_model_config(
77
+ project_id="project_id",
78
+ name="name",
79
+ embedding_config=EmbeddingModelConfigCreateEmbeddingConfig_VertexaiEmbedding(
80
+ type="VERTEXAI_EMBEDDING",
81
+ ),
82
+ )
83
+ """
84
+ _response = self._client_wrapper.httpx_client.request(
85
+ "POST",
86
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/embedding-model-configs"),
87
+ params=remove_none_from_dict({"project_id": project_id}),
88
+ json=jsonable_encoder({"name": name, "embedding_config": embedding_config}),
89
+ headers=self._client_wrapper.get_headers(),
90
+ timeout=60,
91
+ )
92
+ if 200 <= _response.status_code < 300:
93
+ return pydantic.parse_obj_as(EmbeddingModelConfig, _response.json()) # type: ignore
94
+ if _response.status_code == 422:
95
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
96
+ try:
97
+ _response_json = _response.json()
98
+ except JSONDecodeError:
99
+ raise ApiError(status_code=_response.status_code, body=_response.text)
100
+ raise ApiError(status_code=_response.status_code, body=_response_json)
101
+
102
+ def upsert_embedding_model_config(
103
+ self,
104
+ *,
105
+ project_id: typing.Optional[str] = None,
106
+ organization_id: typing.Optional[str] = None,
107
+ request: EmbeddingModelConfigUpdate,
108
+ ) -> EmbeddingModelConfig:
109
+ """
110
+ Upserts an embedding model config.
111
+ Updates if an embedding model config with the same name and project_id already exists. Otherwise, creates a new embedding model config.
112
+
113
+ Parameters:
114
+ - project_id: typing.Optional[str].
115
+
116
+ - organization_id: typing.Optional[str].
117
+
118
+ - request: EmbeddingModelConfigUpdate.
119
+ ---
120
+ from llama_cloud import EmbeddingModelConfigUpdate
121
+ from llama_cloud.client import LlamaCloud
122
+
123
+ client = LlamaCloud(
124
+ token="YOUR_TOKEN",
125
+ )
126
+ client.embedding_model_configs.upsert_embedding_model_config(
127
+ request=EmbeddingModelConfigUpdate(),
128
+ )
129
+ """
130
+ _response = self._client_wrapper.httpx_client.request(
131
+ "PUT",
132
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/embedding-model-configs"),
133
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
134
+ json=jsonable_encoder(request),
135
+ headers=self._client_wrapper.get_headers(),
136
+ timeout=60,
137
+ )
138
+ if 200 <= _response.status_code < 300:
139
+ return pydantic.parse_obj_as(EmbeddingModelConfig, _response.json()) # type: ignore
140
+ if _response.status_code == 422:
141
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
142
+ try:
143
+ _response_json = _response.json()
144
+ except JSONDecodeError:
145
+ raise ApiError(status_code=_response.status_code, body=_response.text)
146
+ raise ApiError(status_code=_response.status_code, body=_response_json)
147
+
148
+ def update_embedding_model_config(
149
+ self, embedding_model_config_id: str, *, request: EmbeddingModelConfigUpdate
150
+ ) -> EmbeddingModelConfig:
151
+ """
152
+ Update an embedding model config by ID.
153
+
154
+ Parameters:
155
+ - embedding_model_config_id: str.
156
+
157
+ - request: EmbeddingModelConfigUpdate.
158
+ ---
159
+ from llama_cloud import EmbeddingModelConfigUpdate
160
+ from llama_cloud.client import LlamaCloud
161
+
162
+ client = LlamaCloud(
163
+ token="YOUR_TOKEN",
164
+ )
165
+ client.embedding_model_configs.update_embedding_model_config(
166
+ embedding_model_config_id="embedding_model_config_id",
167
+ request=EmbeddingModelConfigUpdate(),
168
+ )
169
+ """
170
+ _response = self._client_wrapper.httpx_client.request(
171
+ "PUT",
172
+ urllib.parse.urljoin(
173
+ f"{self._client_wrapper.get_base_url()}/", f"api/v1/embedding-model-configs/{embedding_model_config_id}"
174
+ ),
175
+ json=jsonable_encoder(request),
176
+ headers=self._client_wrapper.get_headers(),
177
+ timeout=60,
178
+ )
179
+ if 200 <= _response.status_code < 300:
180
+ return pydantic.parse_obj_as(EmbeddingModelConfig, _response.json()) # type: ignore
181
+ if _response.status_code == 422:
182
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
183
+ try:
184
+ _response_json = _response.json()
185
+ except JSONDecodeError:
186
+ raise ApiError(status_code=_response.status_code, body=_response.text)
187
+ raise ApiError(status_code=_response.status_code, body=_response_json)
188
+
189
+ def delete_embedding_model_config(self, embedding_model_config_id: str) -> None:
190
+ """
191
+ Delete an embedding model config by ID.
192
+
193
+ Parameters:
194
+ - embedding_model_config_id: str.
195
+ ---
196
+ from llama_cloud.client import LlamaCloud
197
+
198
+ client = LlamaCloud(
199
+ token="YOUR_TOKEN",
200
+ )
201
+ client.embedding_model_configs.delete_embedding_model_config(
202
+ embedding_model_config_id="embedding_model_config_id",
203
+ )
204
+ """
205
+ _response = self._client_wrapper.httpx_client.request(
206
+ "DELETE",
207
+ urllib.parse.urljoin(
208
+ f"{self._client_wrapper.get_base_url()}/", f"api/v1/embedding-model-configs/{embedding_model_config_id}"
209
+ ),
210
+ headers=self._client_wrapper.get_headers(),
211
+ timeout=60,
212
+ )
213
+ if 200 <= _response.status_code < 300:
214
+ return
215
+ if _response.status_code == 422:
216
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
217
+ try:
218
+ _response_json = _response.json()
219
+ except JSONDecodeError:
220
+ raise ApiError(status_code=_response.status_code, body=_response.text)
221
+ raise ApiError(status_code=_response.status_code, body=_response_json)
222
+
223
+
224
+ class AsyncEmbeddingModelConfigsClient:
225
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
226
+ self._client_wrapper = client_wrapper
227
+
228
+ async def list_embedding_model_configs(self, *, project_id: str) -> typing.List[EmbeddingModelConfig]:
229
+ """
230
+ Parameters:
231
+ - project_id: str.
232
+ """
233
+ _response = await self._client_wrapper.httpx_client.request(
234
+ "GET",
235
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/embedding-model-configs"),
236
+ params=remove_none_from_dict({"project_id": project_id}),
237
+ headers=self._client_wrapper.get_headers(),
238
+ timeout=60,
239
+ )
240
+ if 200 <= _response.status_code < 300:
241
+ return pydantic.parse_obj_as(typing.List[EmbeddingModelConfig], _response.json()) # type: ignore
242
+ if _response.status_code == 422:
243
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
244
+ try:
245
+ _response_json = _response.json()
246
+ except JSONDecodeError:
247
+ raise ApiError(status_code=_response.status_code, body=_response.text)
248
+ raise ApiError(status_code=_response.status_code, body=_response_json)
249
+
250
+ async def create_embedding_model_config(
251
+ self, *, project_id: str, name: str, embedding_config: EmbeddingModelConfigCreateEmbeddingConfig
252
+ ) -> EmbeddingModelConfig:
253
+ """
254
+ Create a new embedding model configuration within a specified project.
255
+
256
+ Parameters:
257
+ - project_id: str.
258
+
259
+ - name: str. The name of the embedding model config.
260
+
261
+ - embedding_config: EmbeddingModelConfigCreateEmbeddingConfig. The embedding configuration for the embedding model config.
262
+ ---
263
+ from llama_cloud import (
264
+ EmbeddingModelConfigCreateEmbeddingConfig_VertexaiEmbedding,
265
+ )
266
+ from llama_cloud.client import AsyncLlamaCloud
267
+
268
+ client = AsyncLlamaCloud(
269
+ token="YOUR_TOKEN",
270
+ )
271
+ await client.embedding_model_configs.create_embedding_model_config(
272
+ project_id="project_id",
273
+ name="name",
274
+ embedding_config=EmbeddingModelConfigCreateEmbeddingConfig_VertexaiEmbedding(
275
+ type="VERTEXAI_EMBEDDING",
276
+ ),
277
+ )
278
+ """
279
+ _response = await self._client_wrapper.httpx_client.request(
280
+ "POST",
281
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/embedding-model-configs"),
282
+ params=remove_none_from_dict({"project_id": project_id}),
283
+ json=jsonable_encoder({"name": name, "embedding_config": embedding_config}),
284
+ headers=self._client_wrapper.get_headers(),
285
+ timeout=60,
286
+ )
287
+ if 200 <= _response.status_code < 300:
288
+ return pydantic.parse_obj_as(EmbeddingModelConfig, _response.json()) # type: ignore
289
+ if _response.status_code == 422:
290
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
291
+ try:
292
+ _response_json = _response.json()
293
+ except JSONDecodeError:
294
+ raise ApiError(status_code=_response.status_code, body=_response.text)
295
+ raise ApiError(status_code=_response.status_code, body=_response_json)
296
+
297
+ async def upsert_embedding_model_config(
298
+ self,
299
+ *,
300
+ project_id: typing.Optional[str] = None,
301
+ organization_id: typing.Optional[str] = None,
302
+ request: EmbeddingModelConfigUpdate,
303
+ ) -> EmbeddingModelConfig:
304
+ """
305
+ Upserts an embedding model config.
306
+ Updates if an embedding model config with the same name and project_id already exists. Otherwise, creates a new embedding model config.
307
+
308
+ Parameters:
309
+ - project_id: typing.Optional[str].
310
+
311
+ - organization_id: typing.Optional[str].
312
+
313
+ - request: EmbeddingModelConfigUpdate.
314
+ ---
315
+ from llama_cloud import EmbeddingModelConfigUpdate
316
+ from llama_cloud.client import AsyncLlamaCloud
317
+
318
+ client = AsyncLlamaCloud(
319
+ token="YOUR_TOKEN",
320
+ )
321
+ await client.embedding_model_configs.upsert_embedding_model_config(
322
+ request=EmbeddingModelConfigUpdate(),
323
+ )
324
+ """
325
+ _response = await self._client_wrapper.httpx_client.request(
326
+ "PUT",
327
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/embedding-model-configs"),
328
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
329
+ json=jsonable_encoder(request),
330
+ headers=self._client_wrapper.get_headers(),
331
+ timeout=60,
332
+ )
333
+ if 200 <= _response.status_code < 300:
334
+ return pydantic.parse_obj_as(EmbeddingModelConfig, _response.json()) # type: ignore
335
+ if _response.status_code == 422:
336
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
337
+ try:
338
+ _response_json = _response.json()
339
+ except JSONDecodeError:
340
+ raise ApiError(status_code=_response.status_code, body=_response.text)
341
+ raise ApiError(status_code=_response.status_code, body=_response_json)
342
+
343
+ async def update_embedding_model_config(
344
+ self, embedding_model_config_id: str, *, request: EmbeddingModelConfigUpdate
345
+ ) -> EmbeddingModelConfig:
346
+ """
347
+ Update an embedding model config by ID.
348
+
349
+ Parameters:
350
+ - embedding_model_config_id: str.
351
+
352
+ - request: EmbeddingModelConfigUpdate.
353
+ ---
354
+ from llama_cloud import EmbeddingModelConfigUpdate
355
+ from llama_cloud.client import AsyncLlamaCloud
356
+
357
+ client = AsyncLlamaCloud(
358
+ token="YOUR_TOKEN",
359
+ )
360
+ await client.embedding_model_configs.update_embedding_model_config(
361
+ embedding_model_config_id="embedding_model_config_id",
362
+ request=EmbeddingModelConfigUpdate(),
363
+ )
364
+ """
365
+ _response = await self._client_wrapper.httpx_client.request(
366
+ "PUT",
367
+ urllib.parse.urljoin(
368
+ f"{self._client_wrapper.get_base_url()}/", f"api/v1/embedding-model-configs/{embedding_model_config_id}"
369
+ ),
370
+ json=jsonable_encoder(request),
371
+ headers=self._client_wrapper.get_headers(),
372
+ timeout=60,
373
+ )
374
+ if 200 <= _response.status_code < 300:
375
+ return pydantic.parse_obj_as(EmbeddingModelConfig, _response.json()) # type: ignore
376
+ if _response.status_code == 422:
377
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
378
+ try:
379
+ _response_json = _response.json()
380
+ except JSONDecodeError:
381
+ raise ApiError(status_code=_response.status_code, body=_response.text)
382
+ raise ApiError(status_code=_response.status_code, body=_response_json)
383
+
384
+ async def delete_embedding_model_config(self, embedding_model_config_id: str) -> None:
385
+ """
386
+ Delete an embedding model config by ID.
387
+
388
+ Parameters:
389
+ - embedding_model_config_id: str.
390
+ ---
391
+ from llama_cloud.client import AsyncLlamaCloud
392
+
393
+ client = AsyncLlamaCloud(
394
+ token="YOUR_TOKEN",
395
+ )
396
+ await client.embedding_model_configs.delete_embedding_model_config(
397
+ embedding_model_config_id="embedding_model_config_id",
398
+ )
399
+ """
400
+ _response = await self._client_wrapper.httpx_client.request(
401
+ "DELETE",
402
+ urllib.parse.urljoin(
403
+ f"{self._client_wrapper.get_base_url()}/", f"api/v1/embedding-model-configs/{embedding_model_config_id}"
404
+ ),
405
+ headers=self._client_wrapper.get_headers(),
406
+ timeout=60,
407
+ )
408
+ if 200 <= _response.status_code < 300:
409
+ return
410
+ if _response.status_code == 422:
411
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
412
+ try:
413
+ _response_json = _response.json()
414
+ except JSONDecodeError:
415
+ raise ApiError(status_code=_response.status_code, body=_response.text)
416
+ raise ApiError(status_code=_response.status_code, body=_response_json)
@@ -0,0 +1,23 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ from .embedding_model_config_create_embedding_config import (
4
+ EmbeddingModelConfigCreateEmbeddingConfig,
5
+ EmbeddingModelConfigCreateEmbeddingConfig_AzureEmbedding,
6
+ EmbeddingModelConfigCreateEmbeddingConfig_BedrockEmbedding,
7
+ EmbeddingModelConfigCreateEmbeddingConfig_CohereEmbedding,
8
+ EmbeddingModelConfigCreateEmbeddingConfig_GeminiEmbedding,
9
+ EmbeddingModelConfigCreateEmbeddingConfig_HuggingfaceApiEmbedding,
10
+ EmbeddingModelConfigCreateEmbeddingConfig_OpenaiEmbedding,
11
+ EmbeddingModelConfigCreateEmbeddingConfig_VertexaiEmbedding,
12
+ )
13
+
14
+ __all__ = [
15
+ "EmbeddingModelConfigCreateEmbeddingConfig",
16
+ "EmbeddingModelConfigCreateEmbeddingConfig_AzureEmbedding",
17
+ "EmbeddingModelConfigCreateEmbeddingConfig_BedrockEmbedding",
18
+ "EmbeddingModelConfigCreateEmbeddingConfig_CohereEmbedding",
19
+ "EmbeddingModelConfigCreateEmbeddingConfig_GeminiEmbedding",
20
+ "EmbeddingModelConfigCreateEmbeddingConfig_HuggingfaceApiEmbedding",
21
+ "EmbeddingModelConfigCreateEmbeddingConfig_OpenaiEmbedding",
22
+ "EmbeddingModelConfigCreateEmbeddingConfig_VertexaiEmbedding",
23
+ ]
@@ -0,0 +1,89 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ from __future__ import annotations
4
+
5
+ import typing
6
+
7
+ import typing_extensions
8
+
9
+ from ....types.azure_open_ai_embedding_config import AzureOpenAiEmbeddingConfig
10
+ from ....types.bedrock_embedding_config import BedrockEmbeddingConfig
11
+ from ....types.cohere_embedding_config import CohereEmbeddingConfig
12
+ from ....types.gemini_embedding_config import GeminiEmbeddingConfig
13
+ from ....types.hugging_face_inference_api_embedding_config import HuggingFaceInferenceApiEmbeddingConfig
14
+ from ....types.open_ai_embedding_config import OpenAiEmbeddingConfig
15
+ from ....types.vertex_ai_embedding_config import VertexAiEmbeddingConfig
16
+
17
+
18
+ class EmbeddingModelConfigCreateEmbeddingConfig_AzureEmbedding(AzureOpenAiEmbeddingConfig):
19
+ type: typing_extensions.Literal["AZURE_EMBEDDING"]
20
+
21
+ class Config:
22
+ frozen = True
23
+ smart_union = True
24
+ allow_population_by_field_name = True
25
+
26
+
27
+ class EmbeddingModelConfigCreateEmbeddingConfig_BedrockEmbedding(BedrockEmbeddingConfig):
28
+ type: typing_extensions.Literal["BEDROCK_EMBEDDING"]
29
+
30
+ class Config:
31
+ frozen = True
32
+ smart_union = True
33
+ allow_population_by_field_name = True
34
+
35
+
36
+ class EmbeddingModelConfigCreateEmbeddingConfig_CohereEmbedding(CohereEmbeddingConfig):
37
+ type: typing_extensions.Literal["COHERE_EMBEDDING"]
38
+
39
+ class Config:
40
+ frozen = True
41
+ smart_union = True
42
+ allow_population_by_field_name = True
43
+
44
+
45
+ class EmbeddingModelConfigCreateEmbeddingConfig_GeminiEmbedding(GeminiEmbeddingConfig):
46
+ type: typing_extensions.Literal["GEMINI_EMBEDDING"]
47
+
48
+ class Config:
49
+ frozen = True
50
+ smart_union = True
51
+ allow_population_by_field_name = True
52
+
53
+
54
+ class EmbeddingModelConfigCreateEmbeddingConfig_HuggingfaceApiEmbedding(HuggingFaceInferenceApiEmbeddingConfig):
55
+ type: typing_extensions.Literal["HUGGINGFACE_API_EMBEDDING"]
56
+
57
+ class Config:
58
+ frozen = True
59
+ smart_union = True
60
+ allow_population_by_field_name = True
61
+
62
+
63
+ class EmbeddingModelConfigCreateEmbeddingConfig_OpenaiEmbedding(OpenAiEmbeddingConfig):
64
+ type: typing_extensions.Literal["OPENAI_EMBEDDING"]
65
+
66
+ class Config:
67
+ frozen = True
68
+ smart_union = True
69
+ allow_population_by_field_name = True
70
+
71
+
72
+ class EmbeddingModelConfigCreateEmbeddingConfig_VertexaiEmbedding(VertexAiEmbeddingConfig):
73
+ type: typing_extensions.Literal["VERTEXAI_EMBEDDING"]
74
+
75
+ class Config:
76
+ frozen = True
77
+ smart_union = True
78
+ allow_population_by_field_name = True
79
+
80
+
81
+ EmbeddingModelConfigCreateEmbeddingConfig = typing.Union[
82
+ EmbeddingModelConfigCreateEmbeddingConfig_AzureEmbedding,
83
+ EmbeddingModelConfigCreateEmbeddingConfig_BedrockEmbedding,
84
+ EmbeddingModelConfigCreateEmbeddingConfig_CohereEmbedding,
85
+ EmbeddingModelConfigCreateEmbeddingConfig_GeminiEmbedding,
86
+ EmbeddingModelConfigCreateEmbeddingConfig_HuggingfaceApiEmbedding,
87
+ EmbeddingModelConfigCreateEmbeddingConfig_OpenaiEmbedding,
88
+ EmbeddingModelConfigCreateEmbeddingConfig_VertexaiEmbedding,
89
+ ]