llama-cloud 0.1.18__py3-none-any.whl → 0.1.19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (52) hide show
  1. llama_cloud/__init__.py +36 -16
  2. llama_cloud/client.py +3 -0
  3. llama_cloud/resources/__init__.py +20 -0
  4. llama_cloud/resources/beta/__init__.py +2 -0
  5. llama_cloud/resources/beta/client.py +371 -0
  6. llama_cloud/resources/embedding_model_configs/client.py +82 -22
  7. llama_cloud/resources/llama_extract/__init__.py +21 -0
  8. llama_cloud/resources/llama_extract/client.py +227 -114
  9. llama_cloud/resources/llama_extract/types/__init__.py +21 -0
  10. llama_cloud/resources/parsing/client.py +115 -4
  11. llama_cloud/resources/pipelines/client.py +105 -0
  12. llama_cloud/types/__init__.py +26 -24
  13. llama_cloud/types/{extract_schema_validate_request.py → audio_block.py} +5 -3
  14. llama_cloud/types/batch.py +47 -0
  15. llama_cloud/types/batch_item.py +40 -0
  16. llama_cloud/types/{extract_agent_update.py → batch_paginated_list.py} +6 -9
  17. llama_cloud/types/{extract_agent_create.py → batch_public_output.py} +7 -10
  18. llama_cloud/types/cloud_confluence_data_source.py +1 -0
  19. llama_cloud/types/cloud_postgres_vector_store.py +2 -0
  20. llama_cloud/types/cloud_sharepoint_data_source.py +1 -0
  21. llama_cloud/types/extract_config.py +2 -0
  22. llama_cloud/types/extract_job_create.py +1 -2
  23. llama_cloud/types/fail_page_mode.py +29 -0
  24. llama_cloud/types/{extract_job_create_batch.py → file_count_by_status_response.py} +7 -12
  25. llama_cloud/types/file_parse_public.py +36 -0
  26. llama_cloud/types/job_names.py +8 -12
  27. llama_cloud/types/llama_index_core_base_llms_types_chat_message_blocks_item.py +13 -1
  28. llama_cloud/types/llama_parse_parameters.py +7 -0
  29. llama_cloud/types/markdown_node_parser.py +4 -0
  30. llama_cloud/types/message_role.py +4 -0
  31. llama_cloud/types/pg_vector_distance_method.py +43 -0
  32. llama_cloud/types/pg_vector_hnsw_settings.py +45 -0
  33. llama_cloud/types/pg_vector_vector_type.py +35 -0
  34. llama_cloud/types/pipeline_create.py +1 -0
  35. llama_cloud/types/pipeline_data_source.py +3 -0
  36. llama_cloud/types/pipeline_data_source_status.py +33 -0
  37. llama_cloud/types/pipeline_file.py +1 -0
  38. llama_cloud/types/prompt_conf.py +3 -0
  39. llama_cloud/types/struct_parse_conf.py +4 -1
  40. llama_cloud/types/token_text_splitter.py +3 -0
  41. {llama_cloud-0.1.18.dist-info → llama_cloud-0.1.19.dist-info}/METADATA +1 -1
  42. {llama_cloud-0.1.18.dist-info → llama_cloud-0.1.19.dist-info}/RECORD +52 -41
  43. /llama_cloud/{types → resources/llama_extract/types}/extract_agent_create_data_schema.py +0 -0
  44. /llama_cloud/{types → resources/llama_extract/types}/extract_agent_create_data_schema_zero_value.py +0 -0
  45. /llama_cloud/{types → resources/llama_extract/types}/extract_agent_update_data_schema.py +0 -0
  46. /llama_cloud/{types → resources/llama_extract/types}/extract_agent_update_data_schema_zero_value.py +0 -0
  47. /llama_cloud/{types → resources/llama_extract/types}/extract_job_create_batch_data_schema_override.py +0 -0
  48. /llama_cloud/{types → resources/llama_extract/types}/extract_job_create_batch_data_schema_override_zero_value.py +0 -0
  49. /llama_cloud/{types → resources/llama_extract/types}/extract_schema_validate_request_data_schema.py +0 -0
  50. /llama_cloud/{types → resources/llama_extract/types}/extract_schema_validate_request_data_schema_zero_value.py +0 -0
  51. {llama_cloud-0.1.18.dist-info → llama_cloud-0.1.19.dist-info}/LICENSE +0 -0
  52. {llama_cloud-0.1.18.dist-info → llama_cloud-0.1.19.dist-info}/WHEEL +0 -0
@@ -30,24 +30,26 @@ class EmbeddingModelConfigsClient:
30
30
  def __init__(self, *, client_wrapper: SyncClientWrapper):
31
31
  self._client_wrapper = client_wrapper
32
32
 
33
- def list_embedding_model_configs(self, *, project_id: str) -> typing.List[EmbeddingModelConfig]:
33
+ def list_embedding_model_configs(
34
+ self, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
35
+ ) -> typing.List[EmbeddingModelConfig]:
34
36
  """
35
37
  Parameters:
36
- - project_id: str.
38
+ - project_id: typing.Optional[str].
39
+
40
+ - organization_id: typing.Optional[str].
37
41
  ---
38
42
  from llama_cloud.client import LlamaCloud
39
43
 
40
44
  client = LlamaCloud(
41
45
  token="YOUR_TOKEN",
42
46
  )
43
- client.embedding_model_configs.list_embedding_model_configs(
44
- project_id="string",
45
- )
47
+ client.embedding_model_configs.list_embedding_model_configs()
46
48
  """
47
49
  _response = self._client_wrapper.httpx_client.request(
48
50
  "GET",
49
51
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/embedding-model-configs"),
50
- params=remove_none_from_dict({"project_id": project_id}),
52
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
51
53
  headers=self._client_wrapper.get_headers(),
52
54
  timeout=60,
53
55
  )
@@ -62,13 +64,20 @@ class EmbeddingModelConfigsClient:
62
64
  raise ApiError(status_code=_response.status_code, body=_response_json)
63
65
 
64
66
  def create_embedding_model_config(
65
- self, *, project_id: str, name: str, embedding_config: EmbeddingModelConfigCreateEmbeddingConfig
67
+ self,
68
+ *,
69
+ project_id: typing.Optional[str] = None,
70
+ organization_id: typing.Optional[str] = None,
71
+ name: str,
72
+ embedding_config: EmbeddingModelConfigCreateEmbeddingConfig,
66
73
  ) -> EmbeddingModelConfig:
67
74
  """
68
75
  Create a new embedding model configuration within a specified project.
69
76
 
70
77
  Parameters:
71
- - project_id: str.
78
+ - project_id: typing.Optional[str].
79
+
80
+ - organization_id: typing.Optional[str].
72
81
 
73
82
  - name: str. The name of the embedding model config.
74
83
 
@@ -77,7 +86,7 @@ class EmbeddingModelConfigsClient:
77
86
  _response = self._client_wrapper.httpx_client.request(
78
87
  "POST",
79
88
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/embedding-model-configs"),
80
- params=remove_none_from_dict({"project_id": project_id}),
89
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
81
90
  json=jsonable_encoder({"name": name, "embedding_config": embedding_config}),
82
91
  headers=self._client_wrapper.get_headers(),
83
92
  timeout=60,
@@ -129,7 +138,12 @@ class EmbeddingModelConfigsClient:
129
138
  raise ApiError(status_code=_response.status_code, body=_response_json)
130
139
 
131
140
  def update_embedding_model_config(
132
- self, embedding_model_config_id: str, *, request: EmbeddingModelConfigUpdate
141
+ self,
142
+ embedding_model_config_id: str,
143
+ *,
144
+ project_id: typing.Optional[str] = None,
145
+ organization_id: typing.Optional[str] = None,
146
+ request: EmbeddingModelConfigUpdate,
133
147
  ) -> EmbeddingModelConfig:
134
148
  """
135
149
  Update an embedding model config by ID.
@@ -137,6 +151,10 @@ class EmbeddingModelConfigsClient:
137
151
  Parameters:
138
152
  - embedding_model_config_id: str.
139
153
 
154
+ - project_id: typing.Optional[str].
155
+
156
+ - organization_id: typing.Optional[str].
157
+
140
158
  - request: EmbeddingModelConfigUpdate.
141
159
  """
142
160
  _response = self._client_wrapper.httpx_client.request(
@@ -144,6 +162,7 @@ class EmbeddingModelConfigsClient:
144
162
  urllib.parse.urljoin(
145
163
  f"{self._client_wrapper.get_base_url()}/", f"api/v1/embedding-model-configs/{embedding_model_config_id}"
146
164
  ),
165
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
147
166
  json=jsonable_encoder(request),
148
167
  headers=self._client_wrapper.get_headers(),
149
168
  timeout=60,
@@ -158,12 +177,22 @@ class EmbeddingModelConfigsClient:
158
177
  raise ApiError(status_code=_response.status_code, body=_response.text)
159
178
  raise ApiError(status_code=_response.status_code, body=_response_json)
160
179
 
161
- def delete_embedding_model_config(self, embedding_model_config_id: str) -> None:
180
+ def delete_embedding_model_config(
181
+ self,
182
+ embedding_model_config_id: str,
183
+ *,
184
+ project_id: typing.Optional[str] = None,
185
+ organization_id: typing.Optional[str] = None,
186
+ ) -> None:
162
187
  """
163
188
  Delete an embedding model config by ID.
164
189
 
165
190
  Parameters:
166
191
  - embedding_model_config_id: str.
192
+
193
+ - project_id: typing.Optional[str].
194
+
195
+ - organization_id: typing.Optional[str].
167
196
  ---
168
197
  from llama_cloud.client import LlamaCloud
169
198
 
@@ -179,6 +208,7 @@ class EmbeddingModelConfigsClient:
179
208
  urllib.parse.urljoin(
180
209
  f"{self._client_wrapper.get_base_url()}/", f"api/v1/embedding-model-configs/{embedding_model_config_id}"
181
210
  ),
211
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
182
212
  headers=self._client_wrapper.get_headers(),
183
213
  timeout=60,
184
214
  )
@@ -197,24 +227,26 @@ class AsyncEmbeddingModelConfigsClient:
197
227
  def __init__(self, *, client_wrapper: AsyncClientWrapper):
198
228
  self._client_wrapper = client_wrapper
199
229
 
200
- async def list_embedding_model_configs(self, *, project_id: str) -> typing.List[EmbeddingModelConfig]:
230
+ async def list_embedding_model_configs(
231
+ self, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
232
+ ) -> typing.List[EmbeddingModelConfig]:
201
233
  """
202
234
  Parameters:
203
- - project_id: str.
235
+ - project_id: typing.Optional[str].
236
+
237
+ - organization_id: typing.Optional[str].
204
238
  ---
205
239
  from llama_cloud.client import AsyncLlamaCloud
206
240
 
207
241
  client = AsyncLlamaCloud(
208
242
  token="YOUR_TOKEN",
209
243
  )
210
- await client.embedding_model_configs.list_embedding_model_configs(
211
- project_id="string",
212
- )
244
+ await client.embedding_model_configs.list_embedding_model_configs()
213
245
  """
214
246
  _response = await self._client_wrapper.httpx_client.request(
215
247
  "GET",
216
248
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/embedding-model-configs"),
217
- params=remove_none_from_dict({"project_id": project_id}),
249
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
218
250
  headers=self._client_wrapper.get_headers(),
219
251
  timeout=60,
220
252
  )
@@ -229,13 +261,20 @@ class AsyncEmbeddingModelConfigsClient:
229
261
  raise ApiError(status_code=_response.status_code, body=_response_json)
230
262
 
231
263
  async def create_embedding_model_config(
232
- self, *, project_id: str, name: str, embedding_config: EmbeddingModelConfigCreateEmbeddingConfig
264
+ self,
265
+ *,
266
+ project_id: typing.Optional[str] = None,
267
+ organization_id: typing.Optional[str] = None,
268
+ name: str,
269
+ embedding_config: EmbeddingModelConfigCreateEmbeddingConfig,
233
270
  ) -> EmbeddingModelConfig:
234
271
  """
235
272
  Create a new embedding model configuration within a specified project.
236
273
 
237
274
  Parameters:
238
- - project_id: str.
275
+ - project_id: typing.Optional[str].
276
+
277
+ - organization_id: typing.Optional[str].
239
278
 
240
279
  - name: str. The name of the embedding model config.
241
280
 
@@ -244,7 +283,7 @@ class AsyncEmbeddingModelConfigsClient:
244
283
  _response = await self._client_wrapper.httpx_client.request(
245
284
  "POST",
246
285
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/embedding-model-configs"),
247
- params=remove_none_from_dict({"project_id": project_id}),
286
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
248
287
  json=jsonable_encoder({"name": name, "embedding_config": embedding_config}),
249
288
  headers=self._client_wrapper.get_headers(),
250
289
  timeout=60,
@@ -296,7 +335,12 @@ class AsyncEmbeddingModelConfigsClient:
296
335
  raise ApiError(status_code=_response.status_code, body=_response_json)
297
336
 
298
337
  async def update_embedding_model_config(
299
- self, embedding_model_config_id: str, *, request: EmbeddingModelConfigUpdate
338
+ self,
339
+ embedding_model_config_id: str,
340
+ *,
341
+ project_id: typing.Optional[str] = None,
342
+ organization_id: typing.Optional[str] = None,
343
+ request: EmbeddingModelConfigUpdate,
300
344
  ) -> EmbeddingModelConfig:
301
345
  """
302
346
  Update an embedding model config by ID.
@@ -304,6 +348,10 @@ class AsyncEmbeddingModelConfigsClient:
304
348
  Parameters:
305
349
  - embedding_model_config_id: str.
306
350
 
351
+ - project_id: typing.Optional[str].
352
+
353
+ - organization_id: typing.Optional[str].
354
+
307
355
  - request: EmbeddingModelConfigUpdate.
308
356
  """
309
357
  _response = await self._client_wrapper.httpx_client.request(
@@ -311,6 +359,7 @@ class AsyncEmbeddingModelConfigsClient:
311
359
  urllib.parse.urljoin(
312
360
  f"{self._client_wrapper.get_base_url()}/", f"api/v1/embedding-model-configs/{embedding_model_config_id}"
313
361
  ),
362
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
314
363
  json=jsonable_encoder(request),
315
364
  headers=self._client_wrapper.get_headers(),
316
365
  timeout=60,
@@ -325,12 +374,22 @@ class AsyncEmbeddingModelConfigsClient:
325
374
  raise ApiError(status_code=_response.status_code, body=_response.text)
326
375
  raise ApiError(status_code=_response.status_code, body=_response_json)
327
376
 
328
- async def delete_embedding_model_config(self, embedding_model_config_id: str) -> None:
377
+ async def delete_embedding_model_config(
378
+ self,
379
+ embedding_model_config_id: str,
380
+ *,
381
+ project_id: typing.Optional[str] = None,
382
+ organization_id: typing.Optional[str] = None,
383
+ ) -> None:
329
384
  """
330
385
  Delete an embedding model config by ID.
331
386
 
332
387
  Parameters:
333
388
  - embedding_model_config_id: str.
389
+
390
+ - project_id: typing.Optional[str].
391
+
392
+ - organization_id: typing.Optional[str].
334
393
  ---
335
394
  from llama_cloud.client import AsyncLlamaCloud
336
395
 
@@ -346,6 +405,7 @@ class AsyncEmbeddingModelConfigsClient:
346
405
  urllib.parse.urljoin(
347
406
  f"{self._client_wrapper.get_base_url()}/", f"api/v1/embedding-model-configs/{embedding_model_config_id}"
348
407
  ),
408
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
349
409
  headers=self._client_wrapper.get_headers(),
350
410
  timeout=60,
351
411
  )
@@ -1,2 +1,23 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
+ from .types import (
4
+ ExtractAgentCreateDataSchema,
5
+ ExtractAgentCreateDataSchemaZeroValue,
6
+ ExtractAgentUpdateDataSchema,
7
+ ExtractAgentUpdateDataSchemaZeroValue,
8
+ ExtractJobCreateBatchDataSchemaOverride,
9
+ ExtractJobCreateBatchDataSchemaOverrideZeroValue,
10
+ ExtractSchemaValidateRequestDataSchema,
11
+ ExtractSchemaValidateRequestDataSchemaZeroValue,
12
+ )
13
+
14
+ __all__ = [
15
+ "ExtractAgentCreateDataSchema",
16
+ "ExtractAgentCreateDataSchemaZeroValue",
17
+ "ExtractAgentUpdateDataSchema",
18
+ "ExtractAgentUpdateDataSchemaZeroValue",
19
+ "ExtractJobCreateBatchDataSchemaOverride",
20
+ "ExtractJobCreateBatchDataSchemaOverrideZeroValue",
21
+ "ExtractSchemaValidateRequestDataSchema",
22
+ "ExtractSchemaValidateRequestDataSchemaZeroValue",
23
+ ]