llama-cloud 0.1.12__py3-none-any.whl → 0.1.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (46) hide show
  1. llama_cloud/__init__.py +38 -12
  2. llama_cloud/resources/__init__.py +0 -14
  3. llama_cloud/resources/llama_extract/__init__.py +0 -17
  4. llama_cloud/resources/llama_extract/client.py +113 -314
  5. llama_cloud/resources/organizations/client.py +15 -5
  6. llama_cloud/resources/parsing/client.py +153 -86
  7. llama_cloud/resources/pipelines/client.py +145 -10
  8. llama_cloud/resources/projects/client.py +25 -9
  9. llama_cloud/resources/reports/client.py +16 -6
  10. llama_cloud/types/__init__.py +44 -6
  11. llama_cloud/types/{plan.py → base_plan.py} +16 -13
  12. llama_cloud/types/base_plan_metronome_plan_type.py +17 -0
  13. llama_cloud/types/base_plan_name.py +45 -0
  14. llama_cloud/types/base_plan_plan_frequency.py +25 -0
  15. llama_cloud/types/billing_period.py +32 -0
  16. llama_cloud/types/{base.py → credit_type.py} +4 -1
  17. llama_cloud/types/data_source.py +1 -0
  18. llama_cloud/types/eval_dataset_job_record.py +1 -2
  19. llama_cloud/types/extract_agent_create.py +39 -0
  20. llama_cloud/types/extract_agent_update.py +38 -0
  21. llama_cloud/types/extract_schema_validate_request.py +32 -0
  22. llama_cloud/types/free_credits_usage.py +34 -0
  23. llama_cloud/types/job_record.py +2 -3
  24. llama_cloud/types/llama_parse_parameters.py +9 -0
  25. llama_cloud/types/llm_parameters.py +1 -0
  26. llama_cloud/types/page_screenshot_metadata.py +1 -0
  27. llama_cloud/types/paginated_list_cloud_documents_response.py +35 -0
  28. llama_cloud/types/parsing_mode.py +37 -0
  29. llama_cloud/types/pipeline_data_source.py +1 -0
  30. llama_cloud/types/pipeline_file.py +1 -0
  31. llama_cloud/types/plan_limits.py +52 -0
  32. llama_cloud/types/recurring_credit_grant.py +44 -0
  33. llama_cloud/types/usage.py +5 -4
  34. llama_cloud/types/usage_active_alerts_item.py +25 -0
  35. llama_cloud/types/{interval_usage_and_plan.py → usage_and_plan.py} +4 -6
  36. {llama_cloud-0.1.12.dist-info → llama_cloud-0.1.14.dist-info}/METADATA +2 -1
  37. {llama_cloud-0.1.12.dist-info → llama_cloud-0.1.14.dist-info}/RECORD +45 -33
  38. {llama_cloud-0.1.12.dist-info → llama_cloud-0.1.14.dist-info}/WHEEL +1 -1
  39. llama_cloud/resources/llama_extract/types/__init__.py +0 -17
  40. /llama_cloud/{resources/llama_extract/types → types}/extract_agent_create_data_schema.py +0 -0
  41. /llama_cloud/{resources/llama_extract/types → types}/extract_agent_create_data_schema_zero_value.py +0 -0
  42. /llama_cloud/{resources/llama_extract/types → types}/extract_agent_update_data_schema.py +0 -0
  43. /llama_cloud/{resources/llama_extract/types → types}/extract_agent_update_data_schema_zero_value.py +0 -0
  44. /llama_cloud/{resources/llama_extract/types → types}/extract_schema_validate_request_data_schema.py +0 -0
  45. /llama_cloud/{resources/llama_extract/types → types}/extract_schema_validate_request_data_schema_zero_value.py +0 -0
  46. {llama_cloud-0.1.12.dist-info → llama_cloud-0.1.14.dist-info}/LICENSE +0 -0
@@ -0,0 +1,17 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+ import typing
5
+
6
+ T_Result = typing.TypeVar("T_Result")
7
+
8
+
9
+ class BasePlanMetronomePlanType(str, enum.Enum):
10
+ PLAN = "plan"
11
+ CONTRACT = "contract"
12
+
13
+ def visit(self, plan: typing.Callable[[], T_Result], contract: typing.Callable[[], T_Result]) -> T_Result:
14
+ if self is BasePlanMetronomePlanType.PLAN:
15
+ return plan()
16
+ if self is BasePlanMetronomePlanType.CONTRACT:
17
+ return contract()
@@ -0,0 +1,45 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+ import typing
5
+
6
+ T_Result = typing.TypeVar("T_Result")
7
+
8
+
9
+ class BasePlanName(str, enum.Enum):
10
+ FREE = "free"
11
+ LLAMA_PARSE = "llama_parse"
12
+ ENTERPRISE = "enterprise"
13
+ UNKNOWN = "unknown"
14
+ FREE_CONTRACT = "free_contract"
15
+ PRO = "pro"
16
+ ENTERPRISE_CONTRACT = "enterprise_contract"
17
+ ENTERPRISE_POC = "enterprise_poc"
18
+
19
+ def visit(
20
+ self,
21
+ free: typing.Callable[[], T_Result],
22
+ llama_parse: typing.Callable[[], T_Result],
23
+ enterprise: typing.Callable[[], T_Result],
24
+ unknown: typing.Callable[[], T_Result],
25
+ free_contract: typing.Callable[[], T_Result],
26
+ pro: typing.Callable[[], T_Result],
27
+ enterprise_contract: typing.Callable[[], T_Result],
28
+ enterprise_poc: typing.Callable[[], T_Result],
29
+ ) -> T_Result:
30
+ if self is BasePlanName.FREE:
31
+ return free()
32
+ if self is BasePlanName.LLAMA_PARSE:
33
+ return llama_parse()
34
+ if self is BasePlanName.ENTERPRISE:
35
+ return enterprise()
36
+ if self is BasePlanName.UNKNOWN:
37
+ return unknown()
38
+ if self is BasePlanName.FREE_CONTRACT:
39
+ return free_contract()
40
+ if self is BasePlanName.PRO:
41
+ return pro()
42
+ if self is BasePlanName.ENTERPRISE_CONTRACT:
43
+ return enterprise_contract()
44
+ if self is BasePlanName.ENTERPRISE_POC:
45
+ return enterprise_poc()
@@ -0,0 +1,25 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+ import typing
5
+
6
+ T_Result = typing.TypeVar("T_Result")
7
+
8
+
9
+ class BasePlanPlanFrequency(str, enum.Enum):
10
+ MONTHLY = "MONTHLY"
11
+ QUARTERLY = "QUARTERLY"
12
+ ANNUAL = "ANNUAL"
13
+
14
+ def visit(
15
+ self,
16
+ monthly: typing.Callable[[], T_Result],
17
+ quarterly: typing.Callable[[], T_Result],
18
+ annual: typing.Callable[[], T_Result],
19
+ ) -> T_Result:
20
+ if self is BasePlanPlanFrequency.MONTHLY:
21
+ return monthly()
22
+ if self is BasePlanPlanFrequency.QUARTERLY:
23
+ return quarterly()
24
+ if self is BasePlanPlanFrequency.ANNUAL:
25
+ return annual()
@@ -0,0 +1,32 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic
10
+ if pydantic.__version__.startswith("1."):
11
+ raise ImportError
12
+ import pydantic.v1 as pydantic # type: ignore
13
+ except ImportError:
14
+ import pydantic # type: ignore
15
+
16
+
17
+ class BillingPeriod(pydantic.BaseModel):
18
+ start_date: dt.datetime
19
+ end_date: dt.datetime
20
+
21
+ def json(self, **kwargs: typing.Any) -> str:
22
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
23
+ return super().json(**kwargs_with_defaults)
24
+
25
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
26
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
27
+ return super().dict(**kwargs_with_defaults)
28
+
29
+ class Config:
30
+ frozen = True
31
+ smart_union = True
32
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -14,7 +14,10 @@ except ImportError:
14
14
  import pydantic # type: ignore
15
15
 
16
16
 
17
- class Base(pydantic.BaseModel):
17
+ class CreditType(pydantic.BaseModel):
18
+ id: str
19
+ name: str
20
+
18
21
  def json(self, **kwargs: typing.Any) -> str:
19
22
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
20
23
  return super().json(**kwargs_with_defaults)
@@ -29,6 +29,7 @@ class DataSource(pydantic.BaseModel):
29
29
  source_type: ConfigurableDataSourceNames
30
30
  custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[DataSourceCustomMetadataValue]]]
31
31
  component: DataSourceComponent = pydantic.Field(description="Component that implements the data source")
32
+ version_metadata: typing.Optional[typing.Dict[str, typing.Any]]
32
33
  project_id: str
33
34
 
34
35
  def json(self, **kwargs: typing.Any) -> str:
@@ -6,7 +6,6 @@ import typing
6
6
  import typing_extensions
7
7
 
8
8
  from ..core.datetime_utils import serialize_datetime
9
- from .base import Base
10
9
  from .eval_dataset_job_params import EvalDatasetJobParams
11
10
  from .status_enum import StatusEnum
12
11
 
@@ -43,7 +42,7 @@ class EvalDatasetJobRecord(pydantic.BaseModel):
43
42
  started_at: typing.Optional[dt.datetime]
44
43
  ended_at: typing.Optional[dt.datetime]
45
44
  updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
46
- data: typing.Optional[Base]
45
+ data: typing.Optional[typing.Any]
47
46
 
48
47
  def json(self, **kwargs: typing.Any) -> str:
49
48
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -0,0 +1,39 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .extract_agent_create_data_schema import ExtractAgentCreateDataSchema
8
+ from .extract_config import ExtractConfig
9
+
10
+ try:
11
+ import pydantic
12
+ if pydantic.__version__.startswith("1."):
13
+ raise ImportError
14
+ import pydantic.v1 as pydantic # type: ignore
15
+ except ImportError:
16
+ import pydantic # type: ignore
17
+
18
+
19
+ class ExtractAgentCreate(pydantic.BaseModel):
20
+ """
21
+ Settings for creating an extraction agent.
22
+ """
23
+
24
+ name: str = pydantic.Field(description="The name of the extraction schema")
25
+ data_schema: ExtractAgentCreateDataSchema = pydantic.Field(description="The schema of the data.")
26
+ config: ExtractConfig = pydantic.Field(description="The configuration parameters for the extraction agent.")
27
+
28
+ def json(self, **kwargs: typing.Any) -> str:
29
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
30
+ return super().json(**kwargs_with_defaults)
31
+
32
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
33
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
34
+ return super().dict(**kwargs_with_defaults)
35
+
36
+ class Config:
37
+ frozen = True
38
+ smart_union = True
39
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,38 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .extract_agent_update_data_schema import ExtractAgentUpdateDataSchema
8
+ from .extract_config import ExtractConfig
9
+
10
+ try:
11
+ import pydantic
12
+ if pydantic.__version__.startswith("1."):
13
+ raise ImportError
14
+ import pydantic.v1 as pydantic # type: ignore
15
+ except ImportError:
16
+ import pydantic # type: ignore
17
+
18
+
19
+ class ExtractAgentUpdate(pydantic.BaseModel):
20
+ """
21
+ Settings for updating an extraction schema.
22
+ """
23
+
24
+ data_schema: ExtractAgentUpdateDataSchema = pydantic.Field(description="The schema of the data")
25
+ config: ExtractConfig = pydantic.Field(description="The configuration parameters for the extraction agent.")
26
+
27
+ def json(self, **kwargs: typing.Any) -> str:
28
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
29
+ return super().json(**kwargs_with_defaults)
30
+
31
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
32
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
33
+ return super().dict(**kwargs_with_defaults)
34
+
35
+ class Config:
36
+ frozen = True
37
+ smart_union = True
38
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,32 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .extract_schema_validate_request_data_schema import ExtractSchemaValidateRequestDataSchema
8
+
9
+ try:
10
+ import pydantic
11
+ if pydantic.__version__.startswith("1."):
12
+ raise ImportError
13
+ import pydantic.v1 as pydantic # type: ignore
14
+ except ImportError:
15
+ import pydantic # type: ignore
16
+
17
+
18
+ class ExtractSchemaValidateRequest(pydantic.BaseModel):
19
+ data_schema: ExtractSchemaValidateRequestDataSchema
20
+
21
+ def json(self, **kwargs: typing.Any) -> str:
22
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
23
+ return super().json(**kwargs_with_defaults)
24
+
25
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
26
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
27
+ return super().dict(**kwargs_with_defaults)
28
+
29
+ class Config:
30
+ frozen = True
31
+ smart_union = True
32
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,34 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic
10
+ if pydantic.__version__.startswith("1."):
11
+ raise ImportError
12
+ import pydantic.v1 as pydantic # type: ignore
13
+ except ImportError:
14
+ import pydantic # type: ignore
15
+
16
+
17
+ class FreeCreditsUsage(pydantic.BaseModel):
18
+ starting_balance: int
19
+ remaining_balance: int
20
+ grant_name: str
21
+ expires_at: dt.datetime
22
+
23
+ def json(self, **kwargs: typing.Any) -> str:
24
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
25
+ return super().json(**kwargs_with_defaults)
26
+
27
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
28
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
29
+ return super().dict(**kwargs_with_defaults)
30
+
31
+ class Config:
32
+ frozen = True
33
+ smart_union = True
34
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -4,7 +4,6 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
- from .base import Base
8
7
  from .job_names import JobNames
9
8
  from .status_enum import StatusEnum
10
9
 
@@ -26,7 +25,7 @@ class JobRecord(pydantic.BaseModel):
26
25
  partitions: typing.Dict[str, str] = pydantic.Field(
27
26
  description="The partitions for this execution. Used for determining where to save job output."
28
27
  )
29
- parameters: typing.Optional[Base]
28
+ parameters: typing.Optional[typing.Any]
30
29
  session_id: typing.Optional[str]
31
30
  correlation_id: typing.Optional[str]
32
31
  parent_job_execution_id: typing.Optional[str]
@@ -41,7 +40,7 @@ class JobRecord(pydantic.BaseModel):
41
40
  started_at: typing.Optional[dt.datetime]
42
41
  ended_at: typing.Optional[dt.datetime]
43
42
  updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
44
- data: typing.Optional[Base]
43
+ data: typing.Optional[typing.Any]
45
44
 
46
45
  def json(self, **kwargs: typing.Any) -> str:
47
46
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -5,6 +5,7 @@ import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
7
  from .parser_languages import ParserLanguages
8
+ from .parsing_mode import ParsingMode
8
9
 
9
10
  try:
10
11
  import pydantic
@@ -24,6 +25,8 @@ class LlamaParseParameters(pydantic.BaseModel):
24
25
  parsing_instruction: typing.Optional[str]
25
26
  disable_ocr: typing.Optional[bool]
26
27
  annotate_links: typing.Optional[bool]
28
+ adaptive_long_table: typing.Optional[bool]
29
+ compact_markdown_table: typing.Optional[bool]
27
30
  disable_reconstruction: typing.Optional[bool]
28
31
  disable_image_extraction: typing.Optional[bool]
29
32
  invalidate_cache: typing.Optional[bool]
@@ -31,6 +34,7 @@ class LlamaParseParameters(pydantic.BaseModel):
31
34
  do_not_cache: typing.Optional[bool]
32
35
  fast_mode: typing.Optional[bool]
33
36
  skip_diagonal_text: typing.Optional[bool]
37
+ preserve_layout_alignment_across_pages: typing.Optional[bool]
34
38
  gpt_4_o_mode: typing.Optional[bool] = pydantic.Field(alias="gpt4o_mode")
35
39
  gpt_4_o_api_key: typing.Optional[str] = pydantic.Field(alias="gpt4o_api_key")
36
40
  do_not_unroll_columns: typing.Optional[bool]
@@ -48,6 +52,7 @@ class LlamaParseParameters(pydantic.BaseModel):
48
52
  target_pages: typing.Optional[str]
49
53
  use_vendor_multimodal_model: typing.Optional[bool]
50
54
  vendor_multimodal_model_name: typing.Optional[str]
55
+ model: typing.Optional[str]
51
56
  vendor_multimodal_api_key: typing.Optional[str]
52
57
  page_prefix: typing.Optional[str]
53
58
  page_suffix: typing.Optional[str]
@@ -91,6 +96,10 @@ class LlamaParseParameters(pydantic.BaseModel):
91
96
  ignore_document_elements_for_layout_detection: typing.Optional[bool]
92
97
  output_tables_as_html: typing.Optional[bool] = pydantic.Field(alias="output_tables_as_HTML")
93
98
  internal_is_screenshot_job: typing.Optional[bool]
99
+ parse_mode: typing.Optional[ParsingMode]
100
+ system_prompt: typing.Optional[str]
101
+ system_prompt_append: typing.Optional[str]
102
+ user_prompt: typing.Optional[str]
94
103
 
95
104
  def json(self, **kwargs: typing.Any) -> str:
96
105
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -22,6 +22,7 @@ class LlmParameters(pydantic.BaseModel):
22
22
  system_prompt: typing.Optional[str]
23
23
  temperature: typing.Optional[float]
24
24
  use_chain_of_thought_reasoning: typing.Optional[bool]
25
+ use_citation: typing.Optional[bool]
25
26
  class_name: typing.Optional[str]
26
27
 
27
28
  def json(self, **kwargs: typing.Any) -> str:
@@ -18,6 +18,7 @@ class PageScreenshotMetadata(pydantic.BaseModel):
18
18
  page_index: int = pydantic.Field(description="The index of the page for which the screenshot is taken (0-indexed)")
19
19
  file_id: str = pydantic.Field(description="The ID of the file that the page screenshot was taken from")
20
20
  image_size: int = pydantic.Field(description="The size of the image in bytes")
21
+ metadata: typing.Optional[typing.Dict[str, typing.Any]]
21
22
 
22
23
  def json(self, **kwargs: typing.Any) -> str:
23
24
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -0,0 +1,35 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .cloud_document import CloudDocument
8
+
9
+ try:
10
+ import pydantic
11
+ if pydantic.__version__.startswith("1."):
12
+ raise ImportError
13
+ import pydantic.v1 as pydantic # type: ignore
14
+ except ImportError:
15
+ import pydantic # type: ignore
16
+
17
+
18
+ class PaginatedListCloudDocumentsResponse(pydantic.BaseModel):
19
+ documents: typing.List[CloudDocument] = pydantic.Field(description="The documents to list")
20
+ limit: int = pydantic.Field(description="The limit of the documents")
21
+ offset: int = pydantic.Field(description="The offset of the documents")
22
+ total_count: int = pydantic.Field(description="The total number of documents")
23
+
24
+ def json(self, **kwargs: typing.Any) -> str:
25
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
26
+ return super().json(**kwargs_with_defaults)
27
+
28
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
29
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
30
+ return super().dict(**kwargs_with_defaults)
31
+
32
+ class Config:
33
+ frozen = True
34
+ smart_union = True
35
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,37 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+ import typing
5
+
6
+ T_Result = typing.TypeVar("T_Result")
7
+
8
+
9
+ class ParsingMode(str, enum.Enum):
10
+ """
11
+ Enum for representing the mode of parsing to be used
12
+ """
13
+
14
+ PARSE_PAGE_WITHOUT_LLM = "parse_page_without_llm"
15
+ PARSE_PAGE_WITH_LLM = "parse_page_with_llm"
16
+ PARSE_PAGE_WITH_LVM = "parse_page_with_lvm"
17
+ PARSE_PAGE_WITH_AGENT = "parse_page_with_agent"
18
+ PARSE_DOCUMENT_WITH_LLM = "parse_document_with_llm"
19
+
20
+ def visit(
21
+ self,
22
+ parse_page_without_llm: typing.Callable[[], T_Result],
23
+ parse_page_with_llm: typing.Callable[[], T_Result],
24
+ parse_page_with_lvm: typing.Callable[[], T_Result],
25
+ parse_page_with_agent: typing.Callable[[], T_Result],
26
+ parse_document_with_llm: typing.Callable[[], T_Result],
27
+ ) -> T_Result:
28
+ if self is ParsingMode.PARSE_PAGE_WITHOUT_LLM:
29
+ return parse_page_without_llm()
30
+ if self is ParsingMode.PARSE_PAGE_WITH_LLM:
31
+ return parse_page_with_llm()
32
+ if self is ParsingMode.PARSE_PAGE_WITH_LVM:
33
+ return parse_page_with_lvm()
34
+ if self is ParsingMode.PARSE_PAGE_WITH_AGENT:
35
+ return parse_page_with_agent()
36
+ if self is ParsingMode.PARSE_DOCUMENT_WITH_LLM:
37
+ return parse_document_with_llm()
@@ -29,6 +29,7 @@ class PipelineDataSource(pydantic.BaseModel):
29
29
  source_type: ConfigurableDataSourceNames
30
30
  custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[PipelineDataSourceCustomMetadataValue]]]
31
31
  component: PipelineDataSourceComponent = pydantic.Field(description="Component that implements the data source")
32
+ version_metadata: typing.Optional[typing.Dict[str, typing.Any]]
32
33
  project_id: str
33
34
  data_source_id: str = pydantic.Field(description="The ID of the data source.")
34
35
  pipeline_id: str = pydantic.Field(description="The ID of the pipeline.")
@@ -40,6 +40,7 @@ class PipelineFile(pydantic.BaseModel):
40
40
  custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[PipelineFileCustomMetadataValue]]]
41
41
  config_hash: typing.Optional[typing.Dict[str, typing.Optional[PipelineFileConfigHashValue]]]
42
42
  indexed_page_count: typing.Optional[int]
43
+ status: typing.Optional[str]
43
44
 
44
45
  def json(self, **kwargs: typing.Any) -> str:
45
46
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -0,0 +1,52 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic
10
+ if pydantic.__version__.startswith("1."):
11
+ raise ImportError
12
+ import pydantic.v1 as pydantic # type: ignore
13
+ except ImportError:
14
+ import pydantic # type: ignore
15
+
16
+
17
+ class PlanLimits(pydantic.BaseModel):
18
+ allow_pay_as_you_go: bool = pydantic.Field(description="Whether usage is allowed after credit grants are exhausted")
19
+ subscription_cost_usd: int
20
+ max_monthly_invoice_total_usd: typing.Optional[int]
21
+ max_concurrent_parse_jobs_premium: typing.Optional[int]
22
+ max_concurrent_parse_jobs_other: typing.Optional[int]
23
+ max_extraction_agents: typing.Optional[int]
24
+ max_extraction_runs: typing.Optional[int]
25
+ max_extraction_jobs: typing.Optional[int]
26
+ max_pages_per_index: typing.Optional[int]
27
+ max_files_per_index: typing.Optional[int]
28
+ max_indexes: typing.Optional[int]
29
+ max_concurrent_index_jobs: typing.Optional[int]
30
+ max_data_sources: typing.Optional[int]
31
+ max_embedding_models: typing.Optional[int]
32
+ max_data_sinks: typing.Optional[int]
33
+ max_published_agents: typing.Optional[int]
34
+ max_report_agent_sessions: typing.Optional[int]
35
+ max_users: typing.Optional[int]
36
+ max_organizations: typing.Optional[int]
37
+ max_projects: typing.Optional[int]
38
+ mfa_enabled: bool
39
+ sso_enabled: bool
40
+
41
+ def json(self, **kwargs: typing.Any) -> str:
42
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
43
+ return super().json(**kwargs_with_defaults)
44
+
45
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
46
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
47
+ return super().dict(**kwargs_with_defaults)
48
+
49
+ class Config:
50
+ frozen = True
51
+ smart_union = True
52
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,44 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .credit_type import CreditType
8
+
9
+ try:
10
+ import pydantic
11
+ if pydantic.__version__.startswith("1."):
12
+ raise ImportError
13
+ import pydantic.v1 as pydantic # type: ignore
14
+ except ImportError:
15
+ import pydantic # type: ignore
16
+
17
+
18
+ class RecurringCreditGrant(pydantic.BaseModel):
19
+ name: str
20
+ credit_amount: int
21
+ credit_type: CreditType
22
+ product_id: str = pydantic.Field(
23
+ description="The ID of the product in Metronome used to represent the credit grant"
24
+ )
25
+ priority: float
26
+ rollover_fraction: float = pydantic.Field(
27
+ description="The fraction of the credit that will roll over to the next period, between 0 and 1"
28
+ )
29
+ periods_duration: typing.Optional[float] = pydantic.Field(
30
+ description="How many billing periods the credit grant will last for"
31
+ )
32
+
33
+ def json(self, **kwargs: typing.Any) -> str:
34
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
35
+ return super().json(**kwargs_with_defaults)
36
+
37
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
38
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
39
+ return super().dict(**kwargs_with_defaults)
40
+
41
+ class Config:
42
+ frozen = True
43
+ smart_union = True
44
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -4,6 +4,8 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
+ from .free_credits_usage import FreeCreditsUsage
8
+ from .usage_active_alerts_item import UsageActiveAlertsItem
7
9
 
8
10
  try:
9
11
  import pydantic
@@ -19,13 +21,12 @@ class Usage(pydantic.BaseModel):
19
21
  Response model; use UsageSubmission for tracking
20
22
  """
21
23
 
24
+ active_free_credits_usage: typing.Optional[typing.List[FreeCreditsUsage]]
22
25
  total_users: typing.Optional[int]
23
26
  total_indexes: typing.Optional[int]
24
27
  total_indexed_pages: typing.Optional[int]
25
- extract_pages: typing.Optional[int]
26
- parse_pages: typing.Optional[int]
27
- index_pages: typing.Optional[int]
28
- credits: typing.Optional[int]
28
+ active_alerts: typing.Optional[typing.List[UsageActiveAlertsItem]]
29
+ current_invoice_total_usd_cents: typing.Optional[int]
29
30
 
30
31
  def json(self, **kwargs: typing.Any) -> str:
31
32
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -0,0 +1,25 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+ import typing
5
+
6
+ T_Result = typing.TypeVar("T_Result")
7
+
8
+
9
+ class UsageActiveAlertsItem(str, enum.Enum):
10
+ PLAN_SPEND_LIMIT_EXCEEDED = "plan_spend_limit_exceeded"
11
+ CONFIGURED_SPEND_LIMIT_EXCEEDED = "configured_spend_limit_exceeded"
12
+ FREE_CREDITS_EXHAUSTED = "free_credits_exhausted"
13
+
14
+ def visit(
15
+ self,
16
+ plan_spend_limit_exceeded: typing.Callable[[], T_Result],
17
+ configured_spend_limit_exceeded: typing.Callable[[], T_Result],
18
+ free_credits_exhausted: typing.Callable[[], T_Result],
19
+ ) -> T_Result:
20
+ if self is UsageActiveAlertsItem.PLAN_SPEND_LIMIT_EXCEEDED:
21
+ return plan_spend_limit_exceeded()
22
+ if self is UsageActiveAlertsItem.CONFIGURED_SPEND_LIMIT_EXCEEDED:
23
+ return configured_spend_limit_exceeded()
24
+ if self is UsageActiveAlertsItem.FREE_CREDITS_EXHAUSTED:
25
+ return free_credits_exhausted()
@@ -4,7 +4,7 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
- from .plan import Plan
7
+ from .base_plan import BasePlan
8
8
  from .usage import Usage
9
9
 
10
10
  try:
@@ -16,11 +16,9 @@ except ImportError:
16
16
  import pydantic # type: ignore
17
17
 
18
18
 
19
- class IntervalUsageAndPlan(pydantic.BaseModel):
20
- start_window: typing.Optional[dt.datetime]
21
- end_window: typing.Optional[dt.datetime]
22
- plan: typing.Optional[Plan]
23
- usage: typing.Optional[Usage]
19
+ class UsageAndPlan(pydantic.BaseModel):
20
+ plan: BasePlan
21
+ usage: Usage
24
22
 
25
23
  def json(self, **kwargs: typing.Any) -> str:
26
24
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}