llama-cloud 0.1.3__py3-none-any.whl → 0.1.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (38) hide show
  1. llama_cloud/__init__.py +64 -0
  2. llama_cloud/client.py +3 -0
  3. llama_cloud/resources/__init__.py +22 -1
  4. llama_cloud/resources/data_sinks/client.py +12 -6
  5. llama_cloud/resources/embedding_model_configs/__init__.py +23 -0
  6. llama_cloud/resources/embedding_model_configs/client.py +360 -0
  7. llama_cloud/resources/embedding_model_configs/types/__init__.py +23 -0
  8. llama_cloud/resources/embedding_model_configs/types/embedding_model_config_create_embedding_config.py +89 -0
  9. llama_cloud/resources/files/__init__.py +2 -2
  10. llama_cloud/resources/files/client.py +265 -34
  11. llama_cloud/resources/files/types/__init__.py +2 -1
  12. llama_cloud/resources/files/types/file_create_from_url_resource_info_value.py +7 -0
  13. llama_cloud/resources/organizations/client.py +65 -0
  14. llama_cloud/resources/parsing/client.py +157 -0
  15. llama_cloud/resources/pipelines/client.py +177 -14
  16. llama_cloud/resources/projects/client.py +71 -0
  17. llama_cloud/types/__init__.py +48 -0
  18. llama_cloud/types/base.py +29 -0
  19. llama_cloud/types/cloud_one_drive_data_source.py +1 -0
  20. llama_cloud/types/cloud_postgres_vector_store.py +1 -1
  21. llama_cloud/types/cloud_sharepoint_data_source.py +1 -0
  22. llama_cloud/types/embedding_model_config.py +43 -0
  23. llama_cloud/types/embedding_model_config_embedding_config.py +89 -0
  24. llama_cloud/types/embedding_model_config_update.py +33 -0
  25. llama_cloud/types/embedding_model_config_update_embedding_config.py +89 -0
  26. llama_cloud/types/interval_usage_and_plan.py +36 -0
  27. llama_cloud/types/llama_parse_parameters.py +10 -0
  28. llama_cloud/types/markdown_node_parser.py +2 -1
  29. llama_cloud/types/paginated_list_pipeline_files_response.py +35 -0
  30. llama_cloud/types/pipeline.py +1 -0
  31. llama_cloud/types/pipeline_create.py +1 -0
  32. llama_cloud/types/pipeline_file.py +1 -0
  33. llama_cloud/types/plan.py +40 -0
  34. llama_cloud/types/usage.py +41 -0
  35. {llama_cloud-0.1.3.dist-info → llama_cloud-0.1.5.dist-info}/METADATA +1 -2
  36. {llama_cloud-0.1.3.dist-info → llama_cloud-0.1.5.dist-info}/RECORD +38 -25
  37. {llama_cloud-0.1.3.dist-info → llama_cloud-0.1.5.dist-info}/WHEEL +1 -1
  38. {llama_cloud-0.1.3.dist-info → llama_cloud-0.1.5.dist-info}/LICENSE +0 -0
@@ -23,6 +23,7 @@ from ...types.input_message import InputMessage
23
23
  from ...types.llama_parse_parameters import LlamaParseParameters
24
24
  from ...types.managed_ingestion_status_response import ManagedIngestionStatusResponse
25
25
  from ...types.metadata_filters import MetadataFilters
26
+ from ...types.paginated_list_pipeline_files_response import PaginatedListPipelineFilesResponse
26
27
  from ...types.pipeline import Pipeline
27
28
  from ...types.pipeline_create import PipelineCreate
28
29
  from ...types.pipeline_data_source import PipelineDataSource
@@ -59,22 +60,25 @@ class PipelinesClient:
59
60
  def search_pipelines(
60
61
  self,
61
62
  *,
63
+ project_id: typing.Optional[str] = None,
62
64
  project_name: typing.Optional[str] = None,
63
65
  pipeline_name: typing.Optional[str] = None,
64
66
  pipeline_type: typing.Optional[PipelineType] = None,
65
- project_id: typing.Optional[str] = None,
67
+ organization_id: typing.Optional[str] = None,
66
68
  ) -> typing.List[Pipeline]:
67
69
  """
68
70
  Search for pipelines by various parameters.
69
71
 
70
72
  Parameters:
73
+ - project_id: typing.Optional[str].
74
+
71
75
  - project_name: typing.Optional[str].
72
76
 
73
77
  - pipeline_name: typing.Optional[str].
74
78
 
75
79
  - pipeline_type: typing.Optional[PipelineType].
76
80
 
77
- - project_id: typing.Optional[str].
81
+ - organization_id: typing.Optional[str].
78
82
  ---
79
83
  from llama_cloud import PipelineType
80
84
  from llama_cloud.client import LlamaCloud
@@ -91,10 +95,11 @@ class PipelinesClient:
91
95
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/pipelines"),
92
96
  params=remove_none_from_dict(
93
97
  {
98
+ "project_id": project_id,
94
99
  "project_name": project_name,
95
100
  "pipeline_name": pipeline_name,
96
101
  "pipeline_type": pipeline_type,
97
- "project_id": project_id,
102
+ "organization_id": organization_id,
98
103
  }
99
104
  ),
100
105
  headers=self._client_wrapper.get_headers(),
@@ -110,19 +115,27 @@ class PipelinesClient:
110
115
  raise ApiError(status_code=_response.status_code, body=_response.text)
111
116
  raise ApiError(status_code=_response.status_code, body=_response_json)
112
117
 
113
- def create_pipeline(self, *, project_id: typing.Optional[str] = None, request: PipelineCreate) -> Pipeline:
118
+ def create_pipeline(
119
+ self,
120
+ *,
121
+ project_id: typing.Optional[str] = None,
122
+ organization_id: typing.Optional[str] = None,
123
+ request: PipelineCreate,
124
+ ) -> Pipeline:
114
125
  """
115
126
  Create a new pipeline for a project.
116
127
 
117
128
  Parameters:
118
129
  - project_id: typing.Optional[str].
119
130
 
131
+ - organization_id: typing.Optional[str].
132
+
120
133
  - request: PipelineCreate.
121
134
  """
122
135
  _response = self._client_wrapper.httpx_client.request(
123
136
  "POST",
124
137
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/pipelines"),
125
- params=remove_none_from_dict({"project_id": project_id}),
138
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
126
139
  json=jsonable_encoder(request),
127
140
  headers=self._client_wrapper.get_headers(),
128
141
  timeout=60,
@@ -137,7 +150,13 @@ class PipelinesClient:
137
150
  raise ApiError(status_code=_response.status_code, body=_response.text)
138
151
  raise ApiError(status_code=_response.status_code, body=_response_json)
139
152
 
140
- def upsert_pipeline(self, *, project_id: typing.Optional[str] = None, request: PipelineCreate) -> Pipeline:
153
+ def upsert_pipeline(
154
+ self,
155
+ *,
156
+ project_id: typing.Optional[str] = None,
157
+ organization_id: typing.Optional[str] = None,
158
+ request: PipelineCreate,
159
+ ) -> Pipeline:
141
160
  """
142
161
  Upsert a pipeline for a project.
143
162
  Updates if a pipeline with the same name and project_id already exists. Otherwise, creates a new pipeline.
@@ -145,12 +164,14 @@ class PipelinesClient:
145
164
  Parameters:
146
165
  - project_id: typing.Optional[str].
147
166
 
167
+ - organization_id: typing.Optional[str].
168
+
148
169
  - request: PipelineCreate.
149
170
  """
150
171
  _response = self._client_wrapper.httpx_client.request(
151
172
  "PUT",
152
173
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/pipelines"),
153
- params=remove_none_from_dict({"project_id": project_id}),
174
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
154
175
  json=jsonable_encoder(request),
155
176
  headers=self._client_wrapper.get_headers(),
156
177
  timeout=60,
@@ -196,6 +217,7 @@ class PipelinesClient:
196
217
  transform_config: typing.Optional[PipelineUpdateTransformConfig] = OMIT,
197
218
  configured_transformations: typing.Optional[typing.List[ConfiguredTransformationItem]] = OMIT,
198
219
  data_sink_id: typing.Optional[str] = OMIT,
220
+ embedding_model_config_id: typing.Optional[str] = OMIT,
199
221
  data_sink: typing.Optional[DataSinkCreate] = OMIT,
200
222
  preset_retrieval_parameters: typing.Optional[PresetRetrievalParams] = OMIT,
201
223
  eval_parameters: typing.Optional[EvalExecutionParams] = OMIT,
@@ -217,6 +239,8 @@ class PipelinesClient:
217
239
 
218
240
  - data_sink_id: typing.Optional[str].
219
241
 
242
+ - embedding_model_config_id: typing.Optional[str].
243
+
220
244
  - data_sink: typing.Optional[DataSinkCreate].
221
245
 
222
246
  - preset_retrieval_parameters: typing.Optional[PresetRetrievalParams].
@@ -238,6 +262,8 @@ class PipelinesClient:
238
262
  _request["configured_transformations"] = configured_transformations
239
263
  if data_sink_id is not OMIT:
240
264
  _request["data_sink_id"] = data_sink_id
265
+ if embedding_model_config_id is not OMIT:
266
+ _request["embedding_model_config_id"] = embedding_model_config_id
241
267
  if data_sink is not OMIT:
242
268
  _request["data_sink"] = data_sink
243
269
  if preset_retrieval_parameters is not OMIT:
@@ -642,6 +668,62 @@ class PipelinesClient:
642
668
  raise ApiError(status_code=_response.status_code, body=_response.text)
643
669
  raise ApiError(status_code=_response.status_code, body=_response_json)
644
670
 
671
+ def list_pipeline_files_2(
672
+ self,
673
+ pipeline_id: str,
674
+ *,
675
+ data_source_id: typing.Optional[str] = None,
676
+ only_manually_uploaded: typing.Optional[bool] = None,
677
+ limit: typing.Optional[int] = None,
678
+ offset: typing.Optional[int] = None,
679
+ ) -> PaginatedListPipelineFilesResponse:
680
+ """
681
+ Get files for a pipeline.
682
+
683
+ Parameters:
684
+ - pipeline_id: str.
685
+
686
+ - data_source_id: typing.Optional[str].
687
+
688
+ - only_manually_uploaded: typing.Optional[bool].
689
+
690
+ - limit: typing.Optional[int].
691
+
692
+ - offset: typing.Optional[int].
693
+ ---
694
+ from llama_cloud.client import LlamaCloud
695
+
696
+ client = LlamaCloud(
697
+ token="YOUR_TOKEN",
698
+ )
699
+ client.pipelines.list_pipeline_files_2(
700
+ pipeline_id="string",
701
+ )
702
+ """
703
+ _response = self._client_wrapper.httpx_client.request(
704
+ "GET",
705
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/files2"),
706
+ params=remove_none_from_dict(
707
+ {
708
+ "data_source_id": data_source_id,
709
+ "only_manually_uploaded": only_manually_uploaded,
710
+ "limit": limit,
711
+ "offset": offset,
712
+ }
713
+ ),
714
+ headers=self._client_wrapper.get_headers(),
715
+ timeout=60,
716
+ )
717
+ if 200 <= _response.status_code < 300:
718
+ return pydantic.parse_obj_as(PaginatedListPipelineFilesResponse, _response.json()) # type: ignore
719
+ if _response.status_code == 422:
720
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
721
+ try:
722
+ _response_json = _response.json()
723
+ except JSONDecodeError:
724
+ raise ApiError(status_code=_response.status_code, body=_response.text)
725
+ raise ApiError(status_code=_response.status_code, body=_response_json)
726
+
645
727
  def get_pipeline_file_status(self, pipeline_id: str, file_id: str) -> ManagedIngestionStatusResponse:
646
728
  """
647
729
  Get status of a file for a pipeline.
@@ -1616,22 +1698,25 @@ class AsyncPipelinesClient:
1616
1698
  async def search_pipelines(
1617
1699
  self,
1618
1700
  *,
1701
+ project_id: typing.Optional[str] = None,
1619
1702
  project_name: typing.Optional[str] = None,
1620
1703
  pipeline_name: typing.Optional[str] = None,
1621
1704
  pipeline_type: typing.Optional[PipelineType] = None,
1622
- project_id: typing.Optional[str] = None,
1705
+ organization_id: typing.Optional[str] = None,
1623
1706
  ) -> typing.List[Pipeline]:
1624
1707
  """
1625
1708
  Search for pipelines by various parameters.
1626
1709
 
1627
1710
  Parameters:
1711
+ - project_id: typing.Optional[str].
1712
+
1628
1713
  - project_name: typing.Optional[str].
1629
1714
 
1630
1715
  - pipeline_name: typing.Optional[str].
1631
1716
 
1632
1717
  - pipeline_type: typing.Optional[PipelineType].
1633
1718
 
1634
- - project_id: typing.Optional[str].
1719
+ - organization_id: typing.Optional[str].
1635
1720
  ---
1636
1721
  from llama_cloud import PipelineType
1637
1722
  from llama_cloud.client import AsyncLlamaCloud
@@ -1648,10 +1733,11 @@ class AsyncPipelinesClient:
1648
1733
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/pipelines"),
1649
1734
  params=remove_none_from_dict(
1650
1735
  {
1736
+ "project_id": project_id,
1651
1737
  "project_name": project_name,
1652
1738
  "pipeline_name": pipeline_name,
1653
1739
  "pipeline_type": pipeline_type,
1654
- "project_id": project_id,
1740
+ "organization_id": organization_id,
1655
1741
  }
1656
1742
  ),
1657
1743
  headers=self._client_wrapper.get_headers(),
@@ -1667,19 +1753,27 @@ class AsyncPipelinesClient:
1667
1753
  raise ApiError(status_code=_response.status_code, body=_response.text)
1668
1754
  raise ApiError(status_code=_response.status_code, body=_response_json)
1669
1755
 
1670
- async def create_pipeline(self, *, project_id: typing.Optional[str] = None, request: PipelineCreate) -> Pipeline:
1756
+ async def create_pipeline(
1757
+ self,
1758
+ *,
1759
+ project_id: typing.Optional[str] = None,
1760
+ organization_id: typing.Optional[str] = None,
1761
+ request: PipelineCreate,
1762
+ ) -> Pipeline:
1671
1763
  """
1672
1764
  Create a new pipeline for a project.
1673
1765
 
1674
1766
  Parameters:
1675
1767
  - project_id: typing.Optional[str].
1676
1768
 
1769
+ - organization_id: typing.Optional[str].
1770
+
1677
1771
  - request: PipelineCreate.
1678
1772
  """
1679
1773
  _response = await self._client_wrapper.httpx_client.request(
1680
1774
  "POST",
1681
1775
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/pipelines"),
1682
- params=remove_none_from_dict({"project_id": project_id}),
1776
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
1683
1777
  json=jsonable_encoder(request),
1684
1778
  headers=self._client_wrapper.get_headers(),
1685
1779
  timeout=60,
@@ -1694,7 +1788,13 @@ class AsyncPipelinesClient:
1694
1788
  raise ApiError(status_code=_response.status_code, body=_response.text)
1695
1789
  raise ApiError(status_code=_response.status_code, body=_response_json)
1696
1790
 
1697
- async def upsert_pipeline(self, *, project_id: typing.Optional[str] = None, request: PipelineCreate) -> Pipeline:
1791
+ async def upsert_pipeline(
1792
+ self,
1793
+ *,
1794
+ project_id: typing.Optional[str] = None,
1795
+ organization_id: typing.Optional[str] = None,
1796
+ request: PipelineCreate,
1797
+ ) -> Pipeline:
1698
1798
  """
1699
1799
  Upsert a pipeline for a project.
1700
1800
  Updates if a pipeline with the same name and project_id already exists. Otherwise, creates a new pipeline.
@@ -1702,12 +1802,14 @@ class AsyncPipelinesClient:
1702
1802
  Parameters:
1703
1803
  - project_id: typing.Optional[str].
1704
1804
 
1805
+ - organization_id: typing.Optional[str].
1806
+
1705
1807
  - request: PipelineCreate.
1706
1808
  """
1707
1809
  _response = await self._client_wrapper.httpx_client.request(
1708
1810
  "PUT",
1709
1811
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/pipelines"),
1710
- params=remove_none_from_dict({"project_id": project_id}),
1812
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
1711
1813
  json=jsonable_encoder(request),
1712
1814
  headers=self._client_wrapper.get_headers(),
1713
1815
  timeout=60,
@@ -1753,6 +1855,7 @@ class AsyncPipelinesClient:
1753
1855
  transform_config: typing.Optional[PipelineUpdateTransformConfig] = OMIT,
1754
1856
  configured_transformations: typing.Optional[typing.List[ConfiguredTransformationItem]] = OMIT,
1755
1857
  data_sink_id: typing.Optional[str] = OMIT,
1858
+ embedding_model_config_id: typing.Optional[str] = OMIT,
1756
1859
  data_sink: typing.Optional[DataSinkCreate] = OMIT,
1757
1860
  preset_retrieval_parameters: typing.Optional[PresetRetrievalParams] = OMIT,
1758
1861
  eval_parameters: typing.Optional[EvalExecutionParams] = OMIT,
@@ -1774,6 +1877,8 @@ class AsyncPipelinesClient:
1774
1877
 
1775
1878
  - data_sink_id: typing.Optional[str].
1776
1879
 
1880
+ - embedding_model_config_id: typing.Optional[str].
1881
+
1777
1882
  - data_sink: typing.Optional[DataSinkCreate].
1778
1883
 
1779
1884
  - preset_retrieval_parameters: typing.Optional[PresetRetrievalParams].
@@ -1795,6 +1900,8 @@ class AsyncPipelinesClient:
1795
1900
  _request["configured_transformations"] = configured_transformations
1796
1901
  if data_sink_id is not OMIT:
1797
1902
  _request["data_sink_id"] = data_sink_id
1903
+ if embedding_model_config_id is not OMIT:
1904
+ _request["embedding_model_config_id"] = embedding_model_config_id
1798
1905
  if data_sink is not OMIT:
1799
1906
  _request["data_sink"] = data_sink
1800
1907
  if preset_retrieval_parameters is not OMIT:
@@ -2201,6 +2308,62 @@ class AsyncPipelinesClient:
2201
2308
  raise ApiError(status_code=_response.status_code, body=_response.text)
2202
2309
  raise ApiError(status_code=_response.status_code, body=_response_json)
2203
2310
 
2311
+ async def list_pipeline_files_2(
2312
+ self,
2313
+ pipeline_id: str,
2314
+ *,
2315
+ data_source_id: typing.Optional[str] = None,
2316
+ only_manually_uploaded: typing.Optional[bool] = None,
2317
+ limit: typing.Optional[int] = None,
2318
+ offset: typing.Optional[int] = None,
2319
+ ) -> PaginatedListPipelineFilesResponse:
2320
+ """
2321
+ Get files for a pipeline.
2322
+
2323
+ Parameters:
2324
+ - pipeline_id: str.
2325
+
2326
+ - data_source_id: typing.Optional[str].
2327
+
2328
+ - only_manually_uploaded: typing.Optional[bool].
2329
+
2330
+ - limit: typing.Optional[int].
2331
+
2332
+ - offset: typing.Optional[int].
2333
+ ---
2334
+ from llama_cloud.client import AsyncLlamaCloud
2335
+
2336
+ client = AsyncLlamaCloud(
2337
+ token="YOUR_TOKEN",
2338
+ )
2339
+ await client.pipelines.list_pipeline_files_2(
2340
+ pipeline_id="string",
2341
+ )
2342
+ """
2343
+ _response = await self._client_wrapper.httpx_client.request(
2344
+ "GET",
2345
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/files2"),
2346
+ params=remove_none_from_dict(
2347
+ {
2348
+ "data_source_id": data_source_id,
2349
+ "only_manually_uploaded": only_manually_uploaded,
2350
+ "limit": limit,
2351
+ "offset": offset,
2352
+ }
2353
+ ),
2354
+ headers=self._client_wrapper.get_headers(),
2355
+ timeout=60,
2356
+ )
2357
+ if 200 <= _response.status_code < 300:
2358
+ return pydantic.parse_obj_as(PaginatedListPipelineFilesResponse, _response.json()) # type: ignore
2359
+ if _response.status_code == 422:
2360
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
2361
+ try:
2362
+ _response_json = _response.json()
2363
+ except JSONDecodeError:
2364
+ raise ApiError(status_code=_response.status_code, body=_response.text)
2365
+ raise ApiError(status_code=_response.status_code, body=_response_json)
2366
+
2204
2367
  async def get_pipeline_file_status(self, pipeline_id: str, file_id: str) -> ManagedIngestionStatusResponse:
2205
2368
  """
2206
2369
  Get status of a file for a pipeline.
@@ -11,6 +11,7 @@ from ...core.remove_none_from_dict import remove_none_from_dict
11
11
  from ...errors.unprocessable_entity_error import UnprocessableEntityError
12
12
  from ...types.eval_dataset import EvalDataset
13
13
  from ...types.http_validation_error import HttpValidationError
14
+ from ...types.interval_usage_and_plan import IntervalUsageAndPlan
14
15
  from ...types.local_eval import LocalEval
15
16
  from ...types.local_eval_results import LocalEvalResults
16
17
  from ...types.local_eval_sets import LocalEvalSets
@@ -248,6 +249,41 @@ class ProjectsClient:
248
249
  raise ApiError(status_code=_response.status_code, body=_response.text)
249
250
  raise ApiError(status_code=_response.status_code, body=_response_json)
250
251
 
252
+ def get_project_usage(
253
+ self, project_id: typing.Optional[str], *, organization_id: typing.Optional[str] = None
254
+ ) -> IntervalUsageAndPlan:
255
+ """
256
+ Get usage for a project
257
+
258
+ Parameters:
259
+ - project_id: typing.Optional[str].
260
+
261
+ - organization_id: typing.Optional[str].
262
+ ---
263
+ from llama_cloud.client import LlamaCloud
264
+
265
+ client = LlamaCloud(
266
+ token="YOUR_TOKEN",
267
+ )
268
+ client.projects.get_project_usage()
269
+ """
270
+ _response = self._client_wrapper.httpx_client.request(
271
+ "GET",
272
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}/usage"),
273
+ params=remove_none_from_dict({"organization_id": organization_id}),
274
+ headers=self._client_wrapper.get_headers(),
275
+ timeout=60,
276
+ )
277
+ if 200 <= _response.status_code < 300:
278
+ return pydantic.parse_obj_as(IntervalUsageAndPlan, _response.json()) # type: ignore
279
+ if _response.status_code == 422:
280
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
281
+ try:
282
+ _response_json = _response.json()
283
+ except JSONDecodeError:
284
+ raise ApiError(status_code=_response.status_code, body=_response.text)
285
+ raise ApiError(status_code=_response.status_code, body=_response_json)
286
+
251
287
  def list_datasets_for_project(self, project_id: str) -> typing.List[EvalDataset]:
252
288
  """
253
289
  List eval datasets for a project.
@@ -844,6 +880,41 @@ class AsyncProjectsClient:
844
880
  raise ApiError(status_code=_response.status_code, body=_response.text)
845
881
  raise ApiError(status_code=_response.status_code, body=_response_json)
846
882
 
883
+ async def get_project_usage(
884
+ self, project_id: typing.Optional[str], *, organization_id: typing.Optional[str] = None
885
+ ) -> IntervalUsageAndPlan:
886
+ """
887
+ Get usage for a project
888
+
889
+ Parameters:
890
+ - project_id: typing.Optional[str].
891
+
892
+ - organization_id: typing.Optional[str].
893
+ ---
894
+ from llama_cloud.client import AsyncLlamaCloud
895
+
896
+ client = AsyncLlamaCloud(
897
+ token="YOUR_TOKEN",
898
+ )
899
+ await client.projects.get_project_usage()
900
+ """
901
+ _response = await self._client_wrapper.httpx_client.request(
902
+ "GET",
903
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}/usage"),
904
+ params=remove_none_from_dict({"organization_id": organization_id}),
905
+ headers=self._client_wrapper.get_headers(),
906
+ timeout=60,
907
+ )
908
+ if 200 <= _response.status_code < 300:
909
+ return pydantic.parse_obj_as(IntervalUsageAndPlan, _response.json()) # type: ignore
910
+ if _response.status_code == 422:
911
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
912
+ try:
913
+ _response_json = _response.json()
914
+ except JSONDecodeError:
915
+ raise ApiError(status_code=_response.status_code, body=_response.text)
916
+ raise ApiError(status_code=_response.status_code, body=_response_json)
917
+
847
918
  async def list_datasets_for_project(self, project_id: str) -> typing.List[EvalDataset]:
848
919
  """
849
920
  List eval datasets for a project.
@@ -67,6 +67,28 @@ from .data_source_create_custom_metadata_value import DataSourceCreateCustomMeta
67
67
  from .data_source_custom_metadata_value import DataSourceCustomMetadataValue
68
68
  from .data_source_definition import DataSourceDefinition
69
69
  from .element_segmentation_config import ElementSegmentationConfig
70
+ from .embedding_model_config import EmbeddingModelConfig
71
+ from .embedding_model_config_embedding_config import (
72
+ EmbeddingModelConfigEmbeddingConfig,
73
+ EmbeddingModelConfigEmbeddingConfig_AzureEmbedding,
74
+ EmbeddingModelConfigEmbeddingConfig_BedrockEmbedding,
75
+ EmbeddingModelConfigEmbeddingConfig_CohereEmbedding,
76
+ EmbeddingModelConfigEmbeddingConfig_GeminiEmbedding,
77
+ EmbeddingModelConfigEmbeddingConfig_HuggingfaceApiEmbedding,
78
+ EmbeddingModelConfigEmbeddingConfig_OpenaiEmbedding,
79
+ EmbeddingModelConfigEmbeddingConfig_VertexaiEmbedding,
80
+ )
81
+ from .embedding_model_config_update import EmbeddingModelConfigUpdate
82
+ from .embedding_model_config_update_embedding_config import (
83
+ EmbeddingModelConfigUpdateEmbeddingConfig,
84
+ EmbeddingModelConfigUpdateEmbeddingConfig_AzureEmbedding,
85
+ EmbeddingModelConfigUpdateEmbeddingConfig_BedrockEmbedding,
86
+ EmbeddingModelConfigUpdateEmbeddingConfig_CohereEmbedding,
87
+ EmbeddingModelConfigUpdateEmbeddingConfig_GeminiEmbedding,
88
+ EmbeddingModelConfigUpdateEmbeddingConfig_HuggingfaceApiEmbedding,
89
+ EmbeddingModelConfigUpdateEmbeddingConfig_OpenaiEmbedding,
90
+ EmbeddingModelConfigUpdateEmbeddingConfig_VertexaiEmbedding,
91
+ )
70
92
  from .eval_dataset import EvalDataset
71
93
  from .eval_dataset_job_params import EvalDatasetJobParams
72
94
  from .eval_dataset_job_record import EvalDatasetJobRecord
@@ -93,6 +115,7 @@ from .hugging_face_inference_api_embedding_config import HuggingFaceInferenceApi
93
115
  from .hugging_face_inference_api_embedding_token import HuggingFaceInferenceApiEmbeddingToken
94
116
  from .ingestion_error_response import IngestionErrorResponse
95
117
  from .input_message import InputMessage
118
+ from .interval_usage_and_plan import IntervalUsageAndPlan
96
119
  from .job_name_mapping import JobNameMapping
97
120
  from .llama_parse_parameters import LlamaParseParameters
98
121
  from .llama_parse_supported_file_extensions import LlamaParseSupportedFileExtensions
@@ -126,6 +149,7 @@ from .page_screenshot_metadata import PageScreenshotMetadata
126
149
  from .page_screenshot_node_with_score import PageScreenshotNodeWithScore
127
150
  from .page_segmentation_config import PageSegmentationConfig
128
151
  from .page_splitter_node_parser import PageSplitterNodeParser
152
+ from .paginated_list_pipeline_files_response import PaginatedListPipelineFilesResponse
129
153
  from .parser_languages import ParserLanguages
130
154
  from .parsing_history_item import ParsingHistoryItem
131
155
  from .parsing_job import ParsingJob
@@ -176,6 +200,7 @@ from .pipeline_transform_config import (
176
200
  PipelineTransformConfig_Auto,
177
201
  )
178
202
  from .pipeline_type import PipelineType
203
+ from .plan import Plan
179
204
  from .playground_session import PlaygroundSession
180
205
  from .pooling import Pooling
181
206
  from .preset_retrieval_params import PresetRetrievalParams
@@ -201,6 +226,7 @@ from .text_node_with_score import TextNodeWithScore
201
226
  from .token_chunking_config import TokenChunkingConfig
202
227
  from .token_text_splitter import TokenTextSplitter
203
228
  from .transformation_category_names import TransformationCategoryNames
229
+ from .usage import Usage
204
230
  from .user_organization import UserOrganization
205
231
  from .user_organization_create import UserOrganizationCreate
206
232
  from .user_organization_delete import UserOrganizationDelete
@@ -275,6 +301,24 @@ __all__ = [
275
301
  "DataSourceCustomMetadataValue",
276
302
  "DataSourceDefinition",
277
303
  "ElementSegmentationConfig",
304
+ "EmbeddingModelConfig",
305
+ "EmbeddingModelConfigEmbeddingConfig",
306
+ "EmbeddingModelConfigEmbeddingConfig_AzureEmbedding",
307
+ "EmbeddingModelConfigEmbeddingConfig_BedrockEmbedding",
308
+ "EmbeddingModelConfigEmbeddingConfig_CohereEmbedding",
309
+ "EmbeddingModelConfigEmbeddingConfig_GeminiEmbedding",
310
+ "EmbeddingModelConfigEmbeddingConfig_HuggingfaceApiEmbedding",
311
+ "EmbeddingModelConfigEmbeddingConfig_OpenaiEmbedding",
312
+ "EmbeddingModelConfigEmbeddingConfig_VertexaiEmbedding",
313
+ "EmbeddingModelConfigUpdate",
314
+ "EmbeddingModelConfigUpdateEmbeddingConfig",
315
+ "EmbeddingModelConfigUpdateEmbeddingConfig_AzureEmbedding",
316
+ "EmbeddingModelConfigUpdateEmbeddingConfig_BedrockEmbedding",
317
+ "EmbeddingModelConfigUpdateEmbeddingConfig_CohereEmbedding",
318
+ "EmbeddingModelConfigUpdateEmbeddingConfig_GeminiEmbedding",
319
+ "EmbeddingModelConfigUpdateEmbeddingConfig_HuggingfaceApiEmbedding",
320
+ "EmbeddingModelConfigUpdateEmbeddingConfig_OpenaiEmbedding",
321
+ "EmbeddingModelConfigUpdateEmbeddingConfig_VertexaiEmbedding",
278
322
  "EvalDataset",
279
323
  "EvalDatasetJobParams",
280
324
  "EvalDatasetJobRecord",
@@ -301,6 +345,7 @@ __all__ = [
301
345
  "HuggingFaceInferenceApiEmbeddingToken",
302
346
  "IngestionErrorResponse",
303
347
  "InputMessage",
348
+ "IntervalUsageAndPlan",
304
349
  "JobNameMapping",
305
350
  "LlamaParseParameters",
306
351
  "LlamaParseSupportedFileExtensions",
@@ -334,6 +379,7 @@ __all__ = [
334
379
  "PageScreenshotNodeWithScore",
335
380
  "PageSegmentationConfig",
336
381
  "PageSplitterNodeParser",
382
+ "PaginatedListPipelineFilesResponse",
337
383
  "ParserLanguages",
338
384
  "ParsingHistoryItem",
339
385
  "ParsingJob",
@@ -378,6 +424,7 @@ __all__ = [
378
424
  "PipelineTransformConfig_Advanced",
379
425
  "PipelineTransformConfig_Auto",
380
426
  "PipelineType",
427
+ "Plan",
381
428
  "PlaygroundSession",
382
429
  "Pooling",
383
430
  "PresetRetrievalParams",
@@ -403,6 +450,7 @@ __all__ = [
403
450
  "TokenChunkingConfig",
404
451
  "TokenTextSplitter",
405
452
  "TransformationCategoryNames",
453
+ "Usage",
406
454
  "UserOrganization",
407
455
  "UserOrganizationCreate",
408
456
  "UserOrganizationDelete",
llama_cloud/types/base.py CHANGED
@@ -0,0 +1,29 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic
10
+ if pydantic.__version__.startswith("1."):
11
+ raise ImportError
12
+ import pydantic.v1 as pydantic # type: ignore
13
+ except ImportError:
14
+ import pydantic # type: ignore
15
+
16
+
17
+ class Base(pydantic.BaseModel):
18
+ def json(self, **kwargs: typing.Any) -> str:
19
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
20
+ return super().json(**kwargs_with_defaults)
21
+
22
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
23
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
24
+ return super().dict(**kwargs_with_defaults)
25
+
26
+ class Config:
27
+ frozen = True
28
+ smart_union = True
29
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -21,6 +21,7 @@ class CloudOneDriveDataSource(pydantic.BaseModel):
21
21
  client_id: str = pydantic.Field(description="The client ID to use for authentication.")
22
22
  client_secret: str = pydantic.Field(description="The client secret to use for authentication.")
23
23
  tenant_id: str = pydantic.Field(description="The tenant ID to use for authentication.")
24
+ required_exts: typing.Optional[typing.List[str]]
24
25
  class_name: typing.Optional[str]
25
26
 
26
27
  def json(self, **kwargs: typing.Any) -> str:
@@ -19,7 +19,7 @@ class CloudPostgresVectorStore(pydantic.BaseModel):
19
19
  database: str
20
20
  host: str
21
21
  password: str
22
- port: str
22
+ port: int
23
23
  user: str
24
24
  table_name: str
25
25
  schema_name: str
@@ -23,6 +23,7 @@ class CloudSharepointDataSource(pydantic.BaseModel):
23
23
  client_id: str = pydantic.Field(description="The client ID to use for authentication.")
24
24
  client_secret: str = pydantic.Field(description="The client secret to use for authentication.")
25
25
  tenant_id: str = pydantic.Field(description="The tenant ID to use for authentication.")
26
+ required_exts: typing.Optional[typing.List[str]]
26
27
  class_name: typing.Optional[str]
27
28
 
28
29
  def json(self, **kwargs: typing.Any) -> str: