llama-cloud 0.1.12__py3-none-any.whl → 0.1.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (46) hide show
  1. llama_cloud/__init__.py +38 -12
  2. llama_cloud/resources/__init__.py +0 -14
  3. llama_cloud/resources/llama_extract/__init__.py +0 -17
  4. llama_cloud/resources/llama_extract/client.py +113 -314
  5. llama_cloud/resources/organizations/client.py +15 -5
  6. llama_cloud/resources/parsing/client.py +153 -86
  7. llama_cloud/resources/pipelines/client.py +145 -10
  8. llama_cloud/resources/projects/client.py +25 -9
  9. llama_cloud/resources/reports/client.py +16 -6
  10. llama_cloud/types/__init__.py +44 -6
  11. llama_cloud/types/{plan.py → base_plan.py} +16 -13
  12. llama_cloud/types/base_plan_metronome_plan_type.py +17 -0
  13. llama_cloud/types/base_plan_name.py +45 -0
  14. llama_cloud/types/base_plan_plan_frequency.py +25 -0
  15. llama_cloud/types/billing_period.py +32 -0
  16. llama_cloud/types/{base.py → credit_type.py} +4 -1
  17. llama_cloud/types/data_source.py +1 -0
  18. llama_cloud/types/eval_dataset_job_record.py +1 -2
  19. llama_cloud/types/extract_agent_create.py +39 -0
  20. llama_cloud/types/extract_agent_update.py +38 -0
  21. llama_cloud/types/extract_schema_validate_request.py +32 -0
  22. llama_cloud/types/free_credits_usage.py +34 -0
  23. llama_cloud/types/job_record.py +2 -3
  24. llama_cloud/types/llama_parse_parameters.py +9 -0
  25. llama_cloud/types/llm_parameters.py +1 -0
  26. llama_cloud/types/page_screenshot_metadata.py +1 -0
  27. llama_cloud/types/paginated_list_cloud_documents_response.py +35 -0
  28. llama_cloud/types/parsing_mode.py +37 -0
  29. llama_cloud/types/pipeline_data_source.py +1 -0
  30. llama_cloud/types/pipeline_file.py +1 -0
  31. llama_cloud/types/plan_limits.py +52 -0
  32. llama_cloud/types/recurring_credit_grant.py +44 -0
  33. llama_cloud/types/usage.py +5 -4
  34. llama_cloud/types/usage_active_alerts_item.py +25 -0
  35. llama_cloud/types/{interval_usage_and_plan.py → usage_and_plan.py} +4 -6
  36. {llama_cloud-0.1.12.dist-info → llama_cloud-0.1.14.dist-info}/METADATA +2 -1
  37. {llama_cloud-0.1.12.dist-info → llama_cloud-0.1.14.dist-info}/RECORD +45 -33
  38. {llama_cloud-0.1.12.dist-info → llama_cloud-0.1.14.dist-info}/WHEEL +1 -1
  39. llama_cloud/resources/llama_extract/types/__init__.py +0 -17
  40. /llama_cloud/{resources/llama_extract/types → types}/extract_agent_create_data_schema.py +0 -0
  41. /llama_cloud/{resources/llama_extract/types → types}/extract_agent_create_data_schema_zero_value.py +0 -0
  42. /llama_cloud/{resources/llama_extract/types → types}/extract_agent_update_data_schema.py +0 -0
  43. /llama_cloud/{resources/llama_extract/types → types}/extract_agent_update_data_schema_zero_value.py +0 -0
  44. /llama_cloud/{resources/llama_extract/types → types}/extract_schema_validate_request_data_schema.py +0 -0
  45. /llama_cloud/{resources/llama_extract/types → types}/extract_schema_validate_request_data_schema_zero_value.py +0 -0
  46. {llama_cloud-0.1.12.dist-info → llama_cloud-0.1.14.dist-info}/LICENSE +0 -0
@@ -23,6 +23,7 @@ from ...types.input_message import InputMessage
23
23
  from ...types.llama_parse_parameters import LlamaParseParameters
24
24
  from ...types.managed_ingestion_status_response import ManagedIngestionStatusResponse
25
25
  from ...types.metadata_filters import MetadataFilters
26
+ from ...types.paginated_list_cloud_documents_response import PaginatedListCloudDocumentsResponse
26
27
  from ...types.paginated_list_pipeline_files_response import PaginatedListPipelineFilesResponse
27
28
  from ...types.pipeline import Pipeline
28
29
  from ...types.pipeline_create import PipelineCreate
@@ -64,7 +65,6 @@ class PipelinesClient:
64
65
  project_name: typing.Optional[str] = None,
65
66
  pipeline_name: typing.Optional[str] = None,
66
67
  pipeline_type: typing.Optional[PipelineType] = None,
67
- organization_id: typing.Optional[str] = None,
68
68
  ) -> typing.List[Pipeline]:
69
69
  """
70
70
  Search for pipelines by various parameters.
@@ -77,8 +77,6 @@ class PipelinesClient:
77
77
  - pipeline_name: typing.Optional[str].
78
78
 
79
79
  - pipeline_type: typing.Optional[PipelineType].
80
-
81
- - organization_id: typing.Optional[str].
82
80
  ---
83
81
  from llama_cloud import PipelineType
84
82
  from llama_cloud.client import LlamaCloud
@@ -99,7 +97,6 @@ class PipelinesClient:
99
97
  "project_name": project_name,
100
98
  "pipeline_name": pipeline_name,
101
99
  "pipeline_type": pipeline_type,
102
- "organization_id": organization_id,
103
100
  }
104
101
  ),
105
102
  headers=self._client_wrapper.get_headers(),
@@ -1443,6 +1440,7 @@ class PipelinesClient:
1443
1440
  limit: typing.Optional[int] = None,
1444
1441
  file_id: typing.Optional[str] = None,
1445
1442
  only_direct_upload: typing.Optional[bool] = None,
1443
+ only_api_data_source_documents: typing.Optional[bool] = None,
1446
1444
  ) -> typing.List[CloudDocument]:
1447
1445
  """
1448
1446
  Return a list of documents for a pipeline.
@@ -1457,6 +1455,8 @@ class PipelinesClient:
1457
1455
  - file_id: typing.Optional[str].
1458
1456
 
1459
1457
  - only_direct_upload: typing.Optional[bool].
1458
+
1459
+ - only_api_data_source_documents: typing.Optional[bool].
1460
1460
  ---
1461
1461
  from llama_cloud.client import LlamaCloud
1462
1462
 
@@ -1473,7 +1473,13 @@ class PipelinesClient:
1473
1473
  f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/documents"
1474
1474
  ),
1475
1475
  params=remove_none_from_dict(
1476
- {"skip": skip, "limit": limit, "file_id": file_id, "only_direct_upload": only_direct_upload}
1476
+ {
1477
+ "skip": skip,
1478
+ "limit": limit,
1479
+ "file_id": file_id,
1480
+ "only_direct_upload": only_direct_upload,
1481
+ "only_api_data_source_documents": only_api_data_source_documents,
1482
+ }
1477
1483
  ),
1478
1484
  headers=self._client_wrapper.get_headers(),
1479
1485
  timeout=60,
@@ -1568,6 +1574,68 @@ class PipelinesClient:
1568
1574
  raise ApiError(status_code=_response.status_code, body=_response.text)
1569
1575
  raise ApiError(status_code=_response.status_code, body=_response_json)
1570
1576
 
1577
+ def paginated_list_pipeline_documents(
1578
+ self,
1579
+ pipeline_id: str,
1580
+ *,
1581
+ skip: typing.Optional[int] = None,
1582
+ limit: typing.Optional[int] = None,
1583
+ file_id: typing.Optional[str] = None,
1584
+ only_direct_upload: typing.Optional[bool] = None,
1585
+ only_api_data_source_documents: typing.Optional[bool] = None,
1586
+ ) -> PaginatedListCloudDocumentsResponse:
1587
+ """
1588
+ Return a list of documents for a pipeline.
1589
+
1590
+ Parameters:
1591
+ - pipeline_id: str.
1592
+
1593
+ - skip: typing.Optional[int].
1594
+
1595
+ - limit: typing.Optional[int].
1596
+
1597
+ - file_id: typing.Optional[str].
1598
+
1599
+ - only_direct_upload: typing.Optional[bool].
1600
+
1601
+ - only_api_data_source_documents: typing.Optional[bool].
1602
+ ---
1603
+ from llama_cloud.client import LlamaCloud
1604
+
1605
+ client = LlamaCloud(
1606
+ token="YOUR_TOKEN",
1607
+ )
1608
+ client.pipelines.paginated_list_pipeline_documents(
1609
+ pipeline_id="string",
1610
+ )
1611
+ """
1612
+ _response = self._client_wrapper.httpx_client.request(
1613
+ "GET",
1614
+ urllib.parse.urljoin(
1615
+ f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/documents/paginated"
1616
+ ),
1617
+ params=remove_none_from_dict(
1618
+ {
1619
+ "skip": skip,
1620
+ "limit": limit,
1621
+ "file_id": file_id,
1622
+ "only_direct_upload": only_direct_upload,
1623
+ "only_api_data_source_documents": only_api_data_source_documents,
1624
+ }
1625
+ ),
1626
+ headers=self._client_wrapper.get_headers(),
1627
+ timeout=60,
1628
+ )
1629
+ if 200 <= _response.status_code < 300:
1630
+ return pydantic.parse_obj_as(PaginatedListCloudDocumentsResponse, _response.json()) # type: ignore
1631
+ if _response.status_code == 422:
1632
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
1633
+ try:
1634
+ _response_json = _response.json()
1635
+ except JSONDecodeError:
1636
+ raise ApiError(status_code=_response.status_code, body=_response.text)
1637
+ raise ApiError(status_code=_response.status_code, body=_response_json)
1638
+
1571
1639
  def get_pipeline_document(self, document_id: str, pipeline_id: str) -> CloudDocument:
1572
1640
  """
1573
1641
  Return a single document for a pipeline.
@@ -1730,7 +1798,6 @@ class AsyncPipelinesClient:
1730
1798
  project_name: typing.Optional[str] = None,
1731
1799
  pipeline_name: typing.Optional[str] = None,
1732
1800
  pipeline_type: typing.Optional[PipelineType] = None,
1733
- organization_id: typing.Optional[str] = None,
1734
1801
  ) -> typing.List[Pipeline]:
1735
1802
  """
1736
1803
  Search for pipelines by various parameters.
@@ -1743,8 +1810,6 @@ class AsyncPipelinesClient:
1743
1810
  - pipeline_name: typing.Optional[str].
1744
1811
 
1745
1812
  - pipeline_type: typing.Optional[PipelineType].
1746
-
1747
- - organization_id: typing.Optional[str].
1748
1813
  ---
1749
1814
  from llama_cloud import PipelineType
1750
1815
  from llama_cloud.client import AsyncLlamaCloud
@@ -1765,7 +1830,6 @@ class AsyncPipelinesClient:
1765
1830
  "project_name": project_name,
1766
1831
  "pipeline_name": pipeline_name,
1767
1832
  "pipeline_type": pipeline_type,
1768
- "organization_id": organization_id,
1769
1833
  }
1770
1834
  ),
1771
1835
  headers=self._client_wrapper.get_headers(),
@@ -3113,6 +3177,7 @@ class AsyncPipelinesClient:
3113
3177
  limit: typing.Optional[int] = None,
3114
3178
  file_id: typing.Optional[str] = None,
3115
3179
  only_direct_upload: typing.Optional[bool] = None,
3180
+ only_api_data_source_documents: typing.Optional[bool] = None,
3116
3181
  ) -> typing.List[CloudDocument]:
3117
3182
  """
3118
3183
  Return a list of documents for a pipeline.
@@ -3127,6 +3192,8 @@ class AsyncPipelinesClient:
3127
3192
  - file_id: typing.Optional[str].
3128
3193
 
3129
3194
  - only_direct_upload: typing.Optional[bool].
3195
+
3196
+ - only_api_data_source_documents: typing.Optional[bool].
3130
3197
  ---
3131
3198
  from llama_cloud.client import AsyncLlamaCloud
3132
3199
 
@@ -3143,7 +3210,13 @@ class AsyncPipelinesClient:
3143
3210
  f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/documents"
3144
3211
  ),
3145
3212
  params=remove_none_from_dict(
3146
- {"skip": skip, "limit": limit, "file_id": file_id, "only_direct_upload": only_direct_upload}
3213
+ {
3214
+ "skip": skip,
3215
+ "limit": limit,
3216
+ "file_id": file_id,
3217
+ "only_direct_upload": only_direct_upload,
3218
+ "only_api_data_source_documents": only_api_data_source_documents,
3219
+ }
3147
3220
  ),
3148
3221
  headers=self._client_wrapper.get_headers(),
3149
3222
  timeout=60,
@@ -3238,6 +3311,68 @@ class AsyncPipelinesClient:
3238
3311
  raise ApiError(status_code=_response.status_code, body=_response.text)
3239
3312
  raise ApiError(status_code=_response.status_code, body=_response_json)
3240
3313
 
3314
+ async def paginated_list_pipeline_documents(
3315
+ self,
3316
+ pipeline_id: str,
3317
+ *,
3318
+ skip: typing.Optional[int] = None,
3319
+ limit: typing.Optional[int] = None,
3320
+ file_id: typing.Optional[str] = None,
3321
+ only_direct_upload: typing.Optional[bool] = None,
3322
+ only_api_data_source_documents: typing.Optional[bool] = None,
3323
+ ) -> PaginatedListCloudDocumentsResponse:
3324
+ """
3325
+ Return a list of documents for a pipeline.
3326
+
3327
+ Parameters:
3328
+ - pipeline_id: str.
3329
+
3330
+ - skip: typing.Optional[int].
3331
+
3332
+ - limit: typing.Optional[int].
3333
+
3334
+ - file_id: typing.Optional[str].
3335
+
3336
+ - only_direct_upload: typing.Optional[bool].
3337
+
3338
+ - only_api_data_source_documents: typing.Optional[bool].
3339
+ ---
3340
+ from llama_cloud.client import AsyncLlamaCloud
3341
+
3342
+ client = AsyncLlamaCloud(
3343
+ token="YOUR_TOKEN",
3344
+ )
3345
+ await client.pipelines.paginated_list_pipeline_documents(
3346
+ pipeline_id="string",
3347
+ )
3348
+ """
3349
+ _response = await self._client_wrapper.httpx_client.request(
3350
+ "GET",
3351
+ urllib.parse.urljoin(
3352
+ f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/documents/paginated"
3353
+ ),
3354
+ params=remove_none_from_dict(
3355
+ {
3356
+ "skip": skip,
3357
+ "limit": limit,
3358
+ "file_id": file_id,
3359
+ "only_direct_upload": only_direct_upload,
3360
+ "only_api_data_source_documents": only_api_data_source_documents,
3361
+ }
3362
+ ),
3363
+ headers=self._client_wrapper.get_headers(),
3364
+ timeout=60,
3365
+ )
3366
+ if 200 <= _response.status_code < 300:
3367
+ return pydantic.parse_obj_as(PaginatedListCloudDocumentsResponse, _response.json()) # type: ignore
3368
+ if _response.status_code == 422:
3369
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
3370
+ try:
3371
+ _response_json = _response.json()
3372
+ except JSONDecodeError:
3373
+ raise ApiError(status_code=_response.status_code, body=_response.text)
3374
+ raise ApiError(status_code=_response.status_code, body=_response_json)
3375
+
3241
3376
  async def get_pipeline_document(self, document_id: str, pipeline_id: str) -> CloudDocument:
3242
3377
  """
3243
3378
  Return a single document for a pipeline.
@@ -11,13 +11,13 @@ from ...core.remove_none_from_dict import remove_none_from_dict
11
11
  from ...errors.unprocessable_entity_error import UnprocessableEntityError
12
12
  from ...types.eval_dataset import EvalDataset
13
13
  from ...types.http_validation_error import HttpValidationError
14
- from ...types.interval_usage_and_plan import IntervalUsageAndPlan
15
14
  from ...types.local_eval import LocalEval
16
15
  from ...types.local_eval_results import LocalEvalResults
17
16
  from ...types.local_eval_sets import LocalEvalSets
18
17
  from ...types.project import Project
19
18
  from ...types.project_create import ProjectCreate
20
19
  from ...types.prompt_mixin_prompts import PromptMixinPrompts
20
+ from ...types.usage_and_plan import UsageAndPlan
21
21
 
22
22
  try:
23
23
  import pydantic
@@ -256,14 +256,20 @@ class ProjectsClient:
256
256
  raise ApiError(status_code=_response.status_code, body=_response_json)
257
257
 
258
258
  def get_project_usage(
259
- self, project_id: typing.Optional[str], *, organization_id: typing.Optional[str] = None
260
- ) -> IntervalUsageAndPlan:
259
+ self,
260
+ project_id: typing.Optional[str],
261
+ *,
262
+ get_current_invoice_total: typing.Optional[bool] = None,
263
+ organization_id: typing.Optional[str] = None,
264
+ ) -> UsageAndPlan:
261
265
  """
262
266
  Get usage for a project
263
267
 
264
268
  Parameters:
265
269
  - project_id: typing.Optional[str].
266
270
 
271
+ - get_current_invoice_total: typing.Optional[bool].
272
+
267
273
  - organization_id: typing.Optional[str].
268
274
  ---
269
275
  from llama_cloud.client import LlamaCloud
@@ -276,12 +282,14 @@ class ProjectsClient:
276
282
  _response = self._client_wrapper.httpx_client.request(
277
283
  "GET",
278
284
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}/usage"),
279
- params=remove_none_from_dict({"organization_id": organization_id}),
285
+ params=remove_none_from_dict(
286
+ {"get_current_invoice_total": get_current_invoice_total, "organization_id": organization_id}
287
+ ),
280
288
  headers=self._client_wrapper.get_headers(),
281
289
  timeout=60,
282
290
  )
283
291
  if 200 <= _response.status_code < 300:
284
- return pydantic.parse_obj_as(IntervalUsageAndPlan, _response.json()) # type: ignore
292
+ return pydantic.parse_obj_as(UsageAndPlan, _response.json()) # type: ignore
285
293
  if _response.status_code == 422:
286
294
  raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
287
295
  try:
@@ -943,14 +951,20 @@ class AsyncProjectsClient:
943
951
  raise ApiError(status_code=_response.status_code, body=_response_json)
944
952
 
945
953
  async def get_project_usage(
946
- self, project_id: typing.Optional[str], *, organization_id: typing.Optional[str] = None
947
- ) -> IntervalUsageAndPlan:
954
+ self,
955
+ project_id: typing.Optional[str],
956
+ *,
957
+ get_current_invoice_total: typing.Optional[bool] = None,
958
+ organization_id: typing.Optional[str] = None,
959
+ ) -> UsageAndPlan:
948
960
  """
949
961
  Get usage for a project
950
962
 
951
963
  Parameters:
952
964
  - project_id: typing.Optional[str].
953
965
 
966
+ - get_current_invoice_total: typing.Optional[bool].
967
+
954
968
  - organization_id: typing.Optional[str].
955
969
  ---
956
970
  from llama_cloud.client import AsyncLlamaCloud
@@ -963,12 +977,14 @@ class AsyncProjectsClient:
963
977
  _response = await self._client_wrapper.httpx_client.request(
964
978
  "GET",
965
979
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}/usage"),
966
- params=remove_none_from_dict({"organization_id": organization_id}),
980
+ params=remove_none_from_dict(
981
+ {"get_current_invoice_total": get_current_invoice_total, "organization_id": organization_id}
982
+ ),
967
983
  headers=self._client_wrapper.get_headers(),
968
984
  timeout=60,
969
985
  )
970
986
  if 200 <= _response.status_code < 300:
971
- return pydantic.parse_obj_as(IntervalUsageAndPlan, _response.json()) # type: ignore
987
+ return pydantic.parse_obj_as(UsageAndPlan, _response.json()) # type: ignore
972
988
  if _response.status_code == 422:
973
989
  raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
974
990
  try:
@@ -1,11 +1,13 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
+ import datetime as dt
3
4
  import typing
4
5
  import urllib.parse
5
6
  from json.decoder import JSONDecodeError
6
7
 
7
8
  from ...core.api_error import ApiError
8
9
  from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
10
+ from ...core.datetime_utils import serialize_datetime
9
11
  from ...core.jsonable_encoder import jsonable_encoder
10
12
  from ...core.remove_none_from_dict import remove_none_from_dict
11
13
  from ...errors.unprocessable_entity_error import UnprocessableEntityError
@@ -451,7 +453,7 @@ class ReportsClient:
451
453
  self,
452
454
  report_id: str,
453
455
  *,
454
- last_sequence: typing.Optional[int] = None,
456
+ after: typing.Optional[dt.datetime] = None,
455
457
  project_id: typing.Optional[str] = None,
456
458
  organization_id: typing.Optional[str] = None,
457
459
  ) -> typing.List[ReportEventItem]:
@@ -461,7 +463,7 @@ class ReportsClient:
461
463
  Parameters:
462
464
  - report_id: str.
463
465
 
464
- - last_sequence: typing.Optional[int].
466
+ - after: typing.Optional[dt.datetime].
465
467
 
466
468
  - project_id: typing.Optional[str].
467
469
 
@@ -480,7 +482,11 @@ class ReportsClient:
480
482
  "GET",
481
483
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/reports/{report_id}/events"),
482
484
  params=remove_none_from_dict(
483
- {"last_sequence": last_sequence, "project_id": project_id, "organization_id": organization_id}
485
+ {
486
+ "after": serialize_datetime(after) if after is not None else None,
487
+ "project_id": project_id,
488
+ "organization_id": organization_id,
489
+ }
484
490
  ),
485
491
  headers=self._client_wrapper.get_headers(),
486
492
  timeout=60,
@@ -1043,7 +1049,7 @@ class AsyncReportsClient:
1043
1049
  self,
1044
1050
  report_id: str,
1045
1051
  *,
1046
- last_sequence: typing.Optional[int] = None,
1052
+ after: typing.Optional[dt.datetime] = None,
1047
1053
  project_id: typing.Optional[str] = None,
1048
1054
  organization_id: typing.Optional[str] = None,
1049
1055
  ) -> typing.List[ReportEventItem]:
@@ -1053,7 +1059,7 @@ class AsyncReportsClient:
1053
1059
  Parameters:
1054
1060
  - report_id: str.
1055
1061
 
1056
- - last_sequence: typing.Optional[int].
1062
+ - after: typing.Optional[dt.datetime].
1057
1063
 
1058
1064
  - project_id: typing.Optional[str].
1059
1065
 
@@ -1072,7 +1078,11 @@ class AsyncReportsClient:
1072
1078
  "GET",
1073
1079
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/reports/{report_id}/events"),
1074
1080
  params=remove_none_from_dict(
1075
- {"last_sequence": last_sequence, "project_id": project_id, "organization_id": organization_id}
1081
+ {
1082
+ "after": serialize_datetime(after) if after is not None else None,
1083
+ "project_id": project_id,
1084
+ "organization_id": organization_id,
1085
+ }
1076
1086
  ),
1077
1087
  headers=self._client_wrapper.get_headers(),
1078
1088
  timeout=60,
@@ -19,10 +19,14 @@ from .app_schema_chat_chat_message import AppSchemaChatChatMessage
19
19
  from .auto_transform_config import AutoTransformConfig
20
20
  from .azure_open_ai_embedding import AzureOpenAiEmbedding
21
21
  from .azure_open_ai_embedding_config import AzureOpenAiEmbeddingConfig
22
- from .base import Base
22
+ from .base_plan import BasePlan
23
+ from .base_plan_metronome_plan_type import BasePlanMetronomePlanType
24
+ from .base_plan_name import BasePlanName
25
+ from .base_plan_plan_frequency import BasePlanPlanFrequency
23
26
  from .base_prompt_template import BasePromptTemplate
24
27
  from .bedrock_embedding import BedrockEmbedding
25
28
  from .bedrock_embedding_config import BedrockEmbeddingConfig
29
+ from .billing_period import BillingPeriod
26
30
  from .box_auth_mechanism import BoxAuthMechanism
27
31
  from .character_chunking_config import CharacterChunkingConfig
28
32
  from .character_splitter import CharacterSplitter
@@ -61,6 +65,7 @@ from .configurable_transformation_definition import ConfigurableTransformationDe
61
65
  from .configurable_transformation_names import ConfigurableTransformationNames
62
66
  from .configured_transformation_item import ConfiguredTransformationItem
63
67
  from .configured_transformation_item_component import ConfiguredTransformationItemComponent
68
+ from .credit_type import CreditType
64
69
  from .data_sink import DataSink
65
70
  from .data_sink_component import DataSinkComponent
66
71
  from .data_sink_create import DataSinkCreate
@@ -108,7 +113,13 @@ from .eval_question import EvalQuestion
108
113
  from .eval_question_create import EvalQuestionCreate
109
114
  from .eval_question_result import EvalQuestionResult
110
115
  from .extract_agent import ExtractAgent
116
+ from .extract_agent_create import ExtractAgentCreate
117
+ from .extract_agent_create_data_schema import ExtractAgentCreateDataSchema
118
+ from .extract_agent_create_data_schema_zero_value import ExtractAgentCreateDataSchemaZeroValue
111
119
  from .extract_agent_data_schema_value import ExtractAgentDataSchemaValue
120
+ from .extract_agent_update import ExtractAgentUpdate
121
+ from .extract_agent_update_data_schema import ExtractAgentUpdateDataSchema
122
+ from .extract_agent_update_data_schema_zero_value import ExtractAgentUpdateDataSchemaZeroValue
112
123
  from .extract_config import ExtractConfig
113
124
  from .extract_job import ExtractJob
114
125
  from .extract_job_create import ExtractJobCreate
@@ -126,6 +137,9 @@ from .extract_run_data_item_value import ExtractRunDataItemValue
126
137
  from .extract_run_data_schema_value import ExtractRunDataSchemaValue
127
138
  from .extract_run_data_zero_value import ExtractRunDataZeroValue
128
139
  from .extract_run_extraction_metadata_value import ExtractRunExtractionMetadataValue
140
+ from .extract_schema_validate_request import ExtractSchemaValidateRequest
141
+ from .extract_schema_validate_request_data_schema import ExtractSchemaValidateRequestDataSchema
142
+ from .extract_schema_validate_request_data_schema_zero_value import ExtractSchemaValidateRequestDataSchemaZeroValue
129
143
  from .extract_schema_validate_response import ExtractSchemaValidateResponse
130
144
  from .extract_schema_validate_response_data_schema_value import ExtractSchemaValidateResponseDataSchemaValue
131
145
  from .extract_state import ExtractState
@@ -135,6 +149,7 @@ from .file_permission_info_value import FilePermissionInfoValue
135
149
  from .file_resource_info_value import FileResourceInfoValue
136
150
  from .filter_condition import FilterCondition
137
151
  from .filter_operator import FilterOperator
152
+ from .free_credits_usage import FreeCreditsUsage
138
153
  from .gemini_embedding import GeminiEmbedding
139
154
  from .gemini_embedding_config import GeminiEmbeddingConfig
140
155
  from .http_validation_error import HttpValidationError
@@ -144,7 +159,6 @@ from .hugging_face_inference_api_embedding_token import HuggingFaceInferenceApiE
144
159
  from .image_block import ImageBlock
145
160
  from .ingestion_error_response import IngestionErrorResponse
146
161
  from .input_message import InputMessage
147
- from .interval_usage_and_plan import IntervalUsageAndPlan
148
162
  from .job_name_mapping import JobNameMapping
149
163
  from .job_names import JobNames
150
164
  from .job_record import JobRecord
@@ -190,6 +204,7 @@ from .page_screenshot_node_with_score import PageScreenshotNodeWithScore
190
204
  from .page_segmentation_config import PageSegmentationConfig
191
205
  from .page_splitter_node_parser import PageSplitterNodeParser
192
206
  from .paginated_jobs_history_with_metrics import PaginatedJobsHistoryWithMetrics
207
+ from .paginated_list_cloud_documents_response import PaginatedListCloudDocumentsResponse
193
208
  from .paginated_list_pipeline_files_response import PaginatedListPipelineFilesResponse
194
209
  from .paginated_report_response import PaginatedReportResponse
195
210
  from .parse_plan_level import ParsePlanLevel
@@ -200,6 +215,7 @@ from .parsing_job_json_result import ParsingJobJsonResult
200
215
  from .parsing_job_markdown_result import ParsingJobMarkdownResult
201
216
  from .parsing_job_structured_result import ParsingJobStructuredResult
202
217
  from .parsing_job_text_result import ParsingJobTextResult
218
+ from .parsing_mode import ParsingMode
203
219
  from .parsing_usage import ParsingUsage
204
220
  from .partition_names import PartitionNames
205
221
  from .permission import Permission
@@ -245,7 +261,7 @@ from .pipeline_transform_config import (
245
261
  PipelineTransformConfig_Auto,
246
262
  )
247
263
  from .pipeline_type import PipelineType
248
- from .plan import Plan
264
+ from .plan_limits import PlanLimits
249
265
  from .playground_session import PlaygroundSession
250
266
  from .pooling import Pooling
251
267
  from .preset_composite_retrieval_params import PresetCompositeRetrievalParams
@@ -259,6 +275,7 @@ from .prompt_conf import PromptConf
259
275
  from .prompt_mixin_prompts import PromptMixinPrompts
260
276
  from .prompt_spec import PromptSpec
261
277
  from .pydantic_program_mode import PydanticProgramMode
278
+ from .recurring_credit_grant import RecurringCreditGrant
262
279
  from .related_node_info import RelatedNodeInfo
263
280
  from .related_node_info_node_type import RelatedNodeInfoNodeType
264
281
  from .report import Report
@@ -304,6 +321,8 @@ from .token_chunking_config import TokenChunkingConfig
304
321
  from .token_text_splitter import TokenTextSplitter
305
322
  from .transformation_category_names import TransformationCategoryNames
306
323
  from .usage import Usage
324
+ from .usage_active_alerts_item import UsageActiveAlertsItem
325
+ from .usage_and_plan import UsageAndPlan
307
326
  from .usage_metric_response import UsageMetricResponse
308
327
  from .user_job_record import UserJobRecord
309
328
  from .user_organization import UserOrganization
@@ -332,10 +351,14 @@ __all__ = [
332
351
  "AutoTransformConfig",
333
352
  "AzureOpenAiEmbedding",
334
353
  "AzureOpenAiEmbeddingConfig",
335
- "Base",
354
+ "BasePlan",
355
+ "BasePlanMetronomePlanType",
356
+ "BasePlanName",
357
+ "BasePlanPlanFrequency",
336
358
  "BasePromptTemplate",
337
359
  "BedrockEmbedding",
338
360
  "BedrockEmbeddingConfig",
361
+ "BillingPeriod",
339
362
  "BoxAuthMechanism",
340
363
  "CharacterChunkingConfig",
341
364
  "CharacterSplitter",
@@ -374,6 +397,7 @@ __all__ = [
374
397
  "ConfigurableTransformationNames",
375
398
  "ConfiguredTransformationItem",
376
399
  "ConfiguredTransformationItemComponent",
400
+ "CreditType",
377
401
  "DataSink",
378
402
  "DataSinkComponent",
379
403
  "DataSinkCreate",
@@ -417,7 +441,13 @@ __all__ = [
417
441
  "EvalQuestionCreate",
418
442
  "EvalQuestionResult",
419
443
  "ExtractAgent",
444
+ "ExtractAgentCreate",
445
+ "ExtractAgentCreateDataSchema",
446
+ "ExtractAgentCreateDataSchemaZeroValue",
420
447
  "ExtractAgentDataSchemaValue",
448
+ "ExtractAgentUpdate",
449
+ "ExtractAgentUpdateDataSchema",
450
+ "ExtractAgentUpdateDataSchemaZeroValue",
421
451
  "ExtractConfig",
422
452
  "ExtractJob",
423
453
  "ExtractJobCreate",
@@ -435,6 +465,9 @@ __all__ = [
435
465
  "ExtractRunDataSchemaValue",
436
466
  "ExtractRunDataZeroValue",
437
467
  "ExtractRunExtractionMetadataValue",
468
+ "ExtractSchemaValidateRequest",
469
+ "ExtractSchemaValidateRequestDataSchema",
470
+ "ExtractSchemaValidateRequestDataSchemaZeroValue",
438
471
  "ExtractSchemaValidateResponse",
439
472
  "ExtractSchemaValidateResponseDataSchemaValue",
440
473
  "ExtractState",
@@ -444,6 +477,7 @@ __all__ = [
444
477
  "FileResourceInfoValue",
445
478
  "FilterCondition",
446
479
  "FilterOperator",
480
+ "FreeCreditsUsage",
447
481
  "GeminiEmbedding",
448
482
  "GeminiEmbeddingConfig",
449
483
  "HttpValidationError",
@@ -453,7 +487,6 @@ __all__ = [
453
487
  "ImageBlock",
454
488
  "IngestionErrorResponse",
455
489
  "InputMessage",
456
- "IntervalUsageAndPlan",
457
490
  "JobNameMapping",
458
491
  "JobNames",
459
492
  "JobRecord",
@@ -497,6 +530,7 @@ __all__ = [
497
530
  "PageSegmentationConfig",
498
531
  "PageSplitterNodeParser",
499
532
  "PaginatedJobsHistoryWithMetrics",
533
+ "PaginatedListCloudDocumentsResponse",
500
534
  "PaginatedListPipelineFilesResponse",
501
535
  "PaginatedReportResponse",
502
536
  "ParsePlanLevel",
@@ -507,6 +541,7 @@ __all__ = [
507
541
  "ParsingJobMarkdownResult",
508
542
  "ParsingJobStructuredResult",
509
543
  "ParsingJobTextResult",
544
+ "ParsingMode",
510
545
  "ParsingUsage",
511
546
  "PartitionNames",
512
547
  "Permission",
@@ -546,7 +581,7 @@ __all__ = [
546
581
  "PipelineTransformConfig_Advanced",
547
582
  "PipelineTransformConfig_Auto",
548
583
  "PipelineType",
549
- "Plan",
584
+ "PlanLimits",
550
585
  "PlaygroundSession",
551
586
  "Pooling",
552
587
  "PresetCompositeRetrievalParams",
@@ -560,6 +595,7 @@ __all__ = [
560
595
  "PromptMixinPrompts",
561
596
  "PromptSpec",
562
597
  "PydanticProgramMode",
598
+ "RecurringCreditGrant",
563
599
  "RelatedNodeInfo",
564
600
  "RelatedNodeInfoNodeType",
565
601
  "Report",
@@ -603,6 +639,8 @@ __all__ = [
603
639
  "TokenTextSplitter",
604
640
  "TransformationCategoryNames",
605
641
  "Usage",
642
+ "UsageActiveAlertsItem",
643
+ "UsageAndPlan",
606
644
  "UsageMetricResponse",
607
645
  "UserJobRecord",
608
646
  "UserOrganization",
@@ -4,6 +4,12 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
+ from .base_plan_metronome_plan_type import BasePlanMetronomePlanType
8
+ from .base_plan_name import BasePlanName
9
+ from .base_plan_plan_frequency import BasePlanPlanFrequency
10
+ from .billing_period import BillingPeriod
11
+ from .plan_limits import PlanLimits
12
+ from .recurring_credit_grant import RecurringCreditGrant
7
13
 
8
14
  try:
9
15
  import pydantic
@@ -14,21 +20,18 @@ except ImportError:
14
20
  import pydantic # type: ignore
15
21
 
16
22
 
17
- class Plan(pydantic.BaseModel):
18
- id: str = pydantic.Field(description="The ID of the plan")
19
- name: typing.Optional[str]
20
- total_users: typing.Optional[int]
21
- total_indexes: typing.Optional[int]
22
- total_indexed_pages: typing.Optional[int]
23
- credits: typing.Optional[int]
24
- has_payment_method: typing.Optional[bool]
25
- free: typing.Optional[bool] = pydantic.Field(description="If is a free plan")
26
- allowed_index: typing.Optional[bool] = pydantic.Field(description="If is allowed to use indexes")
27
- allowed_external_index: typing.Optional[bool] = pydantic.Field(
28
- description="If is allowed to use external data sources or sinks in indexes"
29
- )
23
+ class BasePlan(pydantic.BaseModel):
24
+ id: typing.Optional[str]
25
+ name: BasePlanName
26
+ metronome_plan_type: BasePlanMetronomePlanType
27
+ metronome_rate_card_alias: typing.Optional[str]
28
+ limits: PlanLimits
29
+ recurring_credits: typing.Optional[typing.List[RecurringCreditGrant]]
30
+ plan_frequency: BasePlanPlanFrequency
31
+ metronome_customer_id: typing.Optional[str]
30
32
  starting_on: typing.Optional[dt.datetime]
31
33
  ending_before: typing.Optional[dt.datetime]
34
+ current_billing_period: typing.Optional[BillingPeriod]
32
35
 
33
36
  def json(self, **kwargs: typing.Any) -> str:
34
37
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}