llama-cloud 0.1.13__py3-none-any.whl → 0.1.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (41) hide show
  1. llama_cloud/__init__.py +36 -10
  2. llama_cloud/resources/__init__.py +0 -14
  3. llama_cloud/resources/llama_extract/__init__.py +0 -17
  4. llama_cloud/resources/llama_extract/client.py +105 -318
  5. llama_cloud/resources/organizations/client.py +15 -5
  6. llama_cloud/resources/parsing/client.py +24 -0
  7. llama_cloud/resources/pipelines/client.py +145 -10
  8. llama_cloud/resources/projects/client.py +25 -9
  9. llama_cloud/resources/reports/client.py +16 -6
  10. llama_cloud/types/__init__.py +42 -4
  11. llama_cloud/types/{plan.py → base_plan.py} +16 -13
  12. llama_cloud/types/base_plan_metronome_plan_type.py +17 -0
  13. llama_cloud/types/base_plan_name.py +45 -0
  14. llama_cloud/types/base_plan_plan_frequency.py +25 -0
  15. llama_cloud/types/billing_period.py +32 -0
  16. llama_cloud/types/credit_type.py +32 -0
  17. llama_cloud/types/data_source.py +1 -0
  18. llama_cloud/types/extract_agent_create.py +39 -0
  19. llama_cloud/types/extract_agent_update.py +38 -0
  20. llama_cloud/types/extract_schema_validate_request.py +32 -0
  21. llama_cloud/types/free_credits_usage.py +34 -0
  22. llama_cloud/types/llama_parse_parameters.py +3 -0
  23. llama_cloud/types/paginated_list_cloud_documents_response.py +35 -0
  24. llama_cloud/types/pipeline_data_source.py +1 -0
  25. llama_cloud/types/pipeline_file.py +1 -0
  26. llama_cloud/types/plan_limits.py +52 -0
  27. llama_cloud/types/recurring_credit_grant.py +44 -0
  28. llama_cloud/types/usage.py +5 -4
  29. llama_cloud/types/usage_active_alerts_item.py +25 -0
  30. llama_cloud/types/{interval_usage_and_plan.py → usage_and_plan.py} +4 -6
  31. {llama_cloud-0.1.13.dist-info → llama_cloud-0.1.14.dist-info}/METADATA +3 -1
  32. {llama_cloud-0.1.13.dist-info → llama_cloud-0.1.14.dist-info}/RECORD +40 -28
  33. {llama_cloud-0.1.13.dist-info → llama_cloud-0.1.14.dist-info}/WHEEL +1 -1
  34. llama_cloud/resources/llama_extract/types/__init__.py +0 -17
  35. /llama_cloud/{resources/llama_extract/types → types}/extract_agent_create_data_schema.py +0 -0
  36. /llama_cloud/{resources/llama_extract/types → types}/extract_agent_create_data_schema_zero_value.py +0 -0
  37. /llama_cloud/{resources/llama_extract/types → types}/extract_agent_update_data_schema.py +0 -0
  38. /llama_cloud/{resources/llama_extract/types → types}/extract_agent_update_data_schema_zero_value.py +0 -0
  39. /llama_cloud/{resources/llama_extract/types → types}/extract_schema_validate_request_data_schema.py +0 -0
  40. /llama_cloud/{resources/llama_extract/types → types}/extract_schema_validate_request_data_schema_zero_value.py +0 -0
  41. {llama_cloud-0.1.13.dist-info → llama_cloud-0.1.14.dist-info}/LICENSE +0 -0
@@ -19,9 +19,14 @@ from .app_schema_chat_chat_message import AppSchemaChatChatMessage
19
19
  from .auto_transform_config import AutoTransformConfig
20
20
  from .azure_open_ai_embedding import AzureOpenAiEmbedding
21
21
  from .azure_open_ai_embedding_config import AzureOpenAiEmbeddingConfig
22
+ from .base_plan import BasePlan
23
+ from .base_plan_metronome_plan_type import BasePlanMetronomePlanType
24
+ from .base_plan_name import BasePlanName
25
+ from .base_plan_plan_frequency import BasePlanPlanFrequency
22
26
  from .base_prompt_template import BasePromptTemplate
23
27
  from .bedrock_embedding import BedrockEmbedding
24
28
  from .bedrock_embedding_config import BedrockEmbeddingConfig
29
+ from .billing_period import BillingPeriod
25
30
  from .box_auth_mechanism import BoxAuthMechanism
26
31
  from .character_chunking_config import CharacterChunkingConfig
27
32
  from .character_splitter import CharacterSplitter
@@ -60,6 +65,7 @@ from .configurable_transformation_definition import ConfigurableTransformationDe
60
65
  from .configurable_transformation_names import ConfigurableTransformationNames
61
66
  from .configured_transformation_item import ConfiguredTransformationItem
62
67
  from .configured_transformation_item_component import ConfiguredTransformationItemComponent
68
+ from .credit_type import CreditType
63
69
  from .data_sink import DataSink
64
70
  from .data_sink_component import DataSinkComponent
65
71
  from .data_sink_create import DataSinkCreate
@@ -107,7 +113,13 @@ from .eval_question import EvalQuestion
107
113
  from .eval_question_create import EvalQuestionCreate
108
114
  from .eval_question_result import EvalQuestionResult
109
115
  from .extract_agent import ExtractAgent
116
+ from .extract_agent_create import ExtractAgentCreate
117
+ from .extract_agent_create_data_schema import ExtractAgentCreateDataSchema
118
+ from .extract_agent_create_data_schema_zero_value import ExtractAgentCreateDataSchemaZeroValue
110
119
  from .extract_agent_data_schema_value import ExtractAgentDataSchemaValue
120
+ from .extract_agent_update import ExtractAgentUpdate
121
+ from .extract_agent_update_data_schema import ExtractAgentUpdateDataSchema
122
+ from .extract_agent_update_data_schema_zero_value import ExtractAgentUpdateDataSchemaZeroValue
111
123
  from .extract_config import ExtractConfig
112
124
  from .extract_job import ExtractJob
113
125
  from .extract_job_create import ExtractJobCreate
@@ -125,6 +137,9 @@ from .extract_run_data_item_value import ExtractRunDataItemValue
125
137
  from .extract_run_data_schema_value import ExtractRunDataSchemaValue
126
138
  from .extract_run_data_zero_value import ExtractRunDataZeroValue
127
139
  from .extract_run_extraction_metadata_value import ExtractRunExtractionMetadataValue
140
+ from .extract_schema_validate_request import ExtractSchemaValidateRequest
141
+ from .extract_schema_validate_request_data_schema import ExtractSchemaValidateRequestDataSchema
142
+ from .extract_schema_validate_request_data_schema_zero_value import ExtractSchemaValidateRequestDataSchemaZeroValue
128
143
  from .extract_schema_validate_response import ExtractSchemaValidateResponse
129
144
  from .extract_schema_validate_response_data_schema_value import ExtractSchemaValidateResponseDataSchemaValue
130
145
  from .extract_state import ExtractState
@@ -134,6 +149,7 @@ from .file_permission_info_value import FilePermissionInfoValue
134
149
  from .file_resource_info_value import FileResourceInfoValue
135
150
  from .filter_condition import FilterCondition
136
151
  from .filter_operator import FilterOperator
152
+ from .free_credits_usage import FreeCreditsUsage
137
153
  from .gemini_embedding import GeminiEmbedding
138
154
  from .gemini_embedding_config import GeminiEmbeddingConfig
139
155
  from .http_validation_error import HttpValidationError
@@ -143,7 +159,6 @@ from .hugging_face_inference_api_embedding_token import HuggingFaceInferenceApiE
143
159
  from .image_block import ImageBlock
144
160
  from .ingestion_error_response import IngestionErrorResponse
145
161
  from .input_message import InputMessage
146
- from .interval_usage_and_plan import IntervalUsageAndPlan
147
162
  from .job_name_mapping import JobNameMapping
148
163
  from .job_names import JobNames
149
164
  from .job_record import JobRecord
@@ -189,6 +204,7 @@ from .page_screenshot_node_with_score import PageScreenshotNodeWithScore
189
204
  from .page_segmentation_config import PageSegmentationConfig
190
205
  from .page_splitter_node_parser import PageSplitterNodeParser
191
206
  from .paginated_jobs_history_with_metrics import PaginatedJobsHistoryWithMetrics
207
+ from .paginated_list_cloud_documents_response import PaginatedListCloudDocumentsResponse
192
208
  from .paginated_list_pipeline_files_response import PaginatedListPipelineFilesResponse
193
209
  from .paginated_report_response import PaginatedReportResponse
194
210
  from .parse_plan_level import ParsePlanLevel
@@ -245,7 +261,7 @@ from .pipeline_transform_config import (
245
261
  PipelineTransformConfig_Auto,
246
262
  )
247
263
  from .pipeline_type import PipelineType
248
- from .plan import Plan
264
+ from .plan_limits import PlanLimits
249
265
  from .playground_session import PlaygroundSession
250
266
  from .pooling import Pooling
251
267
  from .preset_composite_retrieval_params import PresetCompositeRetrievalParams
@@ -259,6 +275,7 @@ from .prompt_conf import PromptConf
259
275
  from .prompt_mixin_prompts import PromptMixinPrompts
260
276
  from .prompt_spec import PromptSpec
261
277
  from .pydantic_program_mode import PydanticProgramMode
278
+ from .recurring_credit_grant import RecurringCreditGrant
262
279
  from .related_node_info import RelatedNodeInfo
263
280
  from .related_node_info_node_type import RelatedNodeInfoNodeType
264
281
  from .report import Report
@@ -304,6 +321,8 @@ from .token_chunking_config import TokenChunkingConfig
304
321
  from .token_text_splitter import TokenTextSplitter
305
322
  from .transformation_category_names import TransformationCategoryNames
306
323
  from .usage import Usage
324
+ from .usage_active_alerts_item import UsageActiveAlertsItem
325
+ from .usage_and_plan import UsageAndPlan
307
326
  from .usage_metric_response import UsageMetricResponse
308
327
  from .user_job_record import UserJobRecord
309
328
  from .user_organization import UserOrganization
@@ -332,9 +351,14 @@ __all__ = [
332
351
  "AutoTransformConfig",
333
352
  "AzureOpenAiEmbedding",
334
353
  "AzureOpenAiEmbeddingConfig",
354
+ "BasePlan",
355
+ "BasePlanMetronomePlanType",
356
+ "BasePlanName",
357
+ "BasePlanPlanFrequency",
335
358
  "BasePromptTemplate",
336
359
  "BedrockEmbedding",
337
360
  "BedrockEmbeddingConfig",
361
+ "BillingPeriod",
338
362
  "BoxAuthMechanism",
339
363
  "CharacterChunkingConfig",
340
364
  "CharacterSplitter",
@@ -373,6 +397,7 @@ __all__ = [
373
397
  "ConfigurableTransformationNames",
374
398
  "ConfiguredTransformationItem",
375
399
  "ConfiguredTransformationItemComponent",
400
+ "CreditType",
376
401
  "DataSink",
377
402
  "DataSinkComponent",
378
403
  "DataSinkCreate",
@@ -416,7 +441,13 @@ __all__ = [
416
441
  "EvalQuestionCreate",
417
442
  "EvalQuestionResult",
418
443
  "ExtractAgent",
444
+ "ExtractAgentCreate",
445
+ "ExtractAgentCreateDataSchema",
446
+ "ExtractAgentCreateDataSchemaZeroValue",
419
447
  "ExtractAgentDataSchemaValue",
448
+ "ExtractAgentUpdate",
449
+ "ExtractAgentUpdateDataSchema",
450
+ "ExtractAgentUpdateDataSchemaZeroValue",
420
451
  "ExtractConfig",
421
452
  "ExtractJob",
422
453
  "ExtractJobCreate",
@@ -434,6 +465,9 @@ __all__ = [
434
465
  "ExtractRunDataSchemaValue",
435
466
  "ExtractRunDataZeroValue",
436
467
  "ExtractRunExtractionMetadataValue",
468
+ "ExtractSchemaValidateRequest",
469
+ "ExtractSchemaValidateRequestDataSchema",
470
+ "ExtractSchemaValidateRequestDataSchemaZeroValue",
437
471
  "ExtractSchemaValidateResponse",
438
472
  "ExtractSchemaValidateResponseDataSchemaValue",
439
473
  "ExtractState",
@@ -443,6 +477,7 @@ __all__ = [
443
477
  "FileResourceInfoValue",
444
478
  "FilterCondition",
445
479
  "FilterOperator",
480
+ "FreeCreditsUsage",
446
481
  "GeminiEmbedding",
447
482
  "GeminiEmbeddingConfig",
448
483
  "HttpValidationError",
@@ -452,7 +487,6 @@ __all__ = [
452
487
  "ImageBlock",
453
488
  "IngestionErrorResponse",
454
489
  "InputMessage",
455
- "IntervalUsageAndPlan",
456
490
  "JobNameMapping",
457
491
  "JobNames",
458
492
  "JobRecord",
@@ -496,6 +530,7 @@ __all__ = [
496
530
  "PageSegmentationConfig",
497
531
  "PageSplitterNodeParser",
498
532
  "PaginatedJobsHistoryWithMetrics",
533
+ "PaginatedListCloudDocumentsResponse",
499
534
  "PaginatedListPipelineFilesResponse",
500
535
  "PaginatedReportResponse",
501
536
  "ParsePlanLevel",
@@ -546,7 +581,7 @@ __all__ = [
546
581
  "PipelineTransformConfig_Advanced",
547
582
  "PipelineTransformConfig_Auto",
548
583
  "PipelineType",
549
- "Plan",
584
+ "PlanLimits",
550
585
  "PlaygroundSession",
551
586
  "Pooling",
552
587
  "PresetCompositeRetrievalParams",
@@ -560,6 +595,7 @@ __all__ = [
560
595
  "PromptMixinPrompts",
561
596
  "PromptSpec",
562
597
  "PydanticProgramMode",
598
+ "RecurringCreditGrant",
563
599
  "RelatedNodeInfo",
564
600
  "RelatedNodeInfoNodeType",
565
601
  "Report",
@@ -603,6 +639,8 @@ __all__ = [
603
639
  "TokenTextSplitter",
604
640
  "TransformationCategoryNames",
605
641
  "Usage",
642
+ "UsageActiveAlertsItem",
643
+ "UsageAndPlan",
606
644
  "UsageMetricResponse",
607
645
  "UserJobRecord",
608
646
  "UserOrganization",
@@ -4,6 +4,12 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
+ from .base_plan_metronome_plan_type import BasePlanMetronomePlanType
8
+ from .base_plan_name import BasePlanName
9
+ from .base_plan_plan_frequency import BasePlanPlanFrequency
10
+ from .billing_period import BillingPeriod
11
+ from .plan_limits import PlanLimits
12
+ from .recurring_credit_grant import RecurringCreditGrant
7
13
 
8
14
  try:
9
15
  import pydantic
@@ -14,21 +20,18 @@ except ImportError:
14
20
  import pydantic # type: ignore
15
21
 
16
22
 
17
- class Plan(pydantic.BaseModel):
18
- id: str = pydantic.Field(description="The ID of the plan")
19
- name: typing.Optional[str]
20
- total_users: typing.Optional[int]
21
- total_indexes: typing.Optional[int]
22
- total_indexed_pages: typing.Optional[int]
23
- credits: typing.Optional[int]
24
- has_payment_method: typing.Optional[bool]
25
- free: typing.Optional[bool] = pydantic.Field(description="If is a free plan")
26
- allowed_index: typing.Optional[bool] = pydantic.Field(description="If is allowed to use indexes")
27
- allowed_external_index: typing.Optional[bool] = pydantic.Field(
28
- description="If is allowed to use external data sources or sinks in indexes"
29
- )
23
+ class BasePlan(pydantic.BaseModel):
24
+ id: typing.Optional[str]
25
+ name: BasePlanName
26
+ metronome_plan_type: BasePlanMetronomePlanType
27
+ metronome_rate_card_alias: typing.Optional[str]
28
+ limits: PlanLimits
29
+ recurring_credits: typing.Optional[typing.List[RecurringCreditGrant]]
30
+ plan_frequency: BasePlanPlanFrequency
31
+ metronome_customer_id: typing.Optional[str]
30
32
  starting_on: typing.Optional[dt.datetime]
31
33
  ending_before: typing.Optional[dt.datetime]
34
+ current_billing_period: typing.Optional[BillingPeriod]
32
35
 
33
36
  def json(self, **kwargs: typing.Any) -> str:
34
37
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -0,0 +1,17 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+ import typing
5
+
6
+ T_Result = typing.TypeVar("T_Result")
7
+
8
+
9
+ class BasePlanMetronomePlanType(str, enum.Enum):
10
+ PLAN = "plan"
11
+ CONTRACT = "contract"
12
+
13
+ def visit(self, plan: typing.Callable[[], T_Result], contract: typing.Callable[[], T_Result]) -> T_Result:
14
+ if self is BasePlanMetronomePlanType.PLAN:
15
+ return plan()
16
+ if self is BasePlanMetronomePlanType.CONTRACT:
17
+ return contract()
@@ -0,0 +1,45 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+ import typing
5
+
6
+ T_Result = typing.TypeVar("T_Result")
7
+
8
+
9
+ class BasePlanName(str, enum.Enum):
10
+ FREE = "free"
11
+ LLAMA_PARSE = "llama_parse"
12
+ ENTERPRISE = "enterprise"
13
+ UNKNOWN = "unknown"
14
+ FREE_CONTRACT = "free_contract"
15
+ PRO = "pro"
16
+ ENTERPRISE_CONTRACT = "enterprise_contract"
17
+ ENTERPRISE_POC = "enterprise_poc"
18
+
19
+ def visit(
20
+ self,
21
+ free: typing.Callable[[], T_Result],
22
+ llama_parse: typing.Callable[[], T_Result],
23
+ enterprise: typing.Callable[[], T_Result],
24
+ unknown: typing.Callable[[], T_Result],
25
+ free_contract: typing.Callable[[], T_Result],
26
+ pro: typing.Callable[[], T_Result],
27
+ enterprise_contract: typing.Callable[[], T_Result],
28
+ enterprise_poc: typing.Callable[[], T_Result],
29
+ ) -> T_Result:
30
+ if self is BasePlanName.FREE:
31
+ return free()
32
+ if self is BasePlanName.LLAMA_PARSE:
33
+ return llama_parse()
34
+ if self is BasePlanName.ENTERPRISE:
35
+ return enterprise()
36
+ if self is BasePlanName.UNKNOWN:
37
+ return unknown()
38
+ if self is BasePlanName.FREE_CONTRACT:
39
+ return free_contract()
40
+ if self is BasePlanName.PRO:
41
+ return pro()
42
+ if self is BasePlanName.ENTERPRISE_CONTRACT:
43
+ return enterprise_contract()
44
+ if self is BasePlanName.ENTERPRISE_POC:
45
+ return enterprise_poc()
@@ -0,0 +1,25 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+ import typing
5
+
6
+ T_Result = typing.TypeVar("T_Result")
7
+
8
+
9
+ class BasePlanPlanFrequency(str, enum.Enum):
10
+ MONTHLY = "MONTHLY"
11
+ QUARTERLY = "QUARTERLY"
12
+ ANNUAL = "ANNUAL"
13
+
14
+ def visit(
15
+ self,
16
+ monthly: typing.Callable[[], T_Result],
17
+ quarterly: typing.Callable[[], T_Result],
18
+ annual: typing.Callable[[], T_Result],
19
+ ) -> T_Result:
20
+ if self is BasePlanPlanFrequency.MONTHLY:
21
+ return monthly()
22
+ if self is BasePlanPlanFrequency.QUARTERLY:
23
+ return quarterly()
24
+ if self is BasePlanPlanFrequency.ANNUAL:
25
+ return annual()
@@ -0,0 +1,32 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic
10
+ if pydantic.__version__.startswith("1."):
11
+ raise ImportError
12
+ import pydantic.v1 as pydantic # type: ignore
13
+ except ImportError:
14
+ import pydantic # type: ignore
15
+
16
+
17
+ class BillingPeriod(pydantic.BaseModel):
18
+ start_date: dt.datetime
19
+ end_date: dt.datetime
20
+
21
+ def json(self, **kwargs: typing.Any) -> str:
22
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
23
+ return super().json(**kwargs_with_defaults)
24
+
25
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
26
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
27
+ return super().dict(**kwargs_with_defaults)
28
+
29
+ class Config:
30
+ frozen = True
31
+ smart_union = True
32
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,32 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic
10
+ if pydantic.__version__.startswith("1."):
11
+ raise ImportError
12
+ import pydantic.v1 as pydantic # type: ignore
13
+ except ImportError:
14
+ import pydantic # type: ignore
15
+
16
+
17
+ class CreditType(pydantic.BaseModel):
18
+ id: str
19
+ name: str
20
+
21
+ def json(self, **kwargs: typing.Any) -> str:
22
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
23
+ return super().json(**kwargs_with_defaults)
24
+
25
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
26
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
27
+ return super().dict(**kwargs_with_defaults)
28
+
29
+ class Config:
30
+ frozen = True
31
+ smart_union = True
32
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -29,6 +29,7 @@ class DataSource(pydantic.BaseModel):
29
29
  source_type: ConfigurableDataSourceNames
30
30
  custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[DataSourceCustomMetadataValue]]]
31
31
  component: DataSourceComponent = pydantic.Field(description="Component that implements the data source")
32
+ version_metadata: typing.Optional[typing.Dict[str, typing.Any]]
32
33
  project_id: str
33
34
 
34
35
  def json(self, **kwargs: typing.Any) -> str:
@@ -0,0 +1,39 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .extract_agent_create_data_schema import ExtractAgentCreateDataSchema
8
+ from .extract_config import ExtractConfig
9
+
10
+ try:
11
+ import pydantic
12
+ if pydantic.__version__.startswith("1."):
13
+ raise ImportError
14
+ import pydantic.v1 as pydantic # type: ignore
15
+ except ImportError:
16
+ import pydantic # type: ignore
17
+
18
+
19
+ class ExtractAgentCreate(pydantic.BaseModel):
20
+ """
21
+ Settings for creating an extraction agent.
22
+ """
23
+
24
+ name: str = pydantic.Field(description="The name of the extraction schema")
25
+ data_schema: ExtractAgentCreateDataSchema = pydantic.Field(description="The schema of the data.")
26
+ config: ExtractConfig = pydantic.Field(description="The configuration parameters for the extraction agent.")
27
+
28
+ def json(self, **kwargs: typing.Any) -> str:
29
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
30
+ return super().json(**kwargs_with_defaults)
31
+
32
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
33
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
34
+ return super().dict(**kwargs_with_defaults)
35
+
36
+ class Config:
37
+ frozen = True
38
+ smart_union = True
39
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,38 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .extract_agent_update_data_schema import ExtractAgentUpdateDataSchema
8
+ from .extract_config import ExtractConfig
9
+
10
+ try:
11
+ import pydantic
12
+ if pydantic.__version__.startswith("1."):
13
+ raise ImportError
14
+ import pydantic.v1 as pydantic # type: ignore
15
+ except ImportError:
16
+ import pydantic # type: ignore
17
+
18
+
19
+ class ExtractAgentUpdate(pydantic.BaseModel):
20
+ """
21
+ Settings for updating an extraction schema.
22
+ """
23
+
24
+ data_schema: ExtractAgentUpdateDataSchema = pydantic.Field(description="The schema of the data")
25
+ config: ExtractConfig = pydantic.Field(description="The configuration parameters for the extraction agent.")
26
+
27
+ def json(self, **kwargs: typing.Any) -> str:
28
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
29
+ return super().json(**kwargs_with_defaults)
30
+
31
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
32
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
33
+ return super().dict(**kwargs_with_defaults)
34
+
35
+ class Config:
36
+ frozen = True
37
+ smart_union = True
38
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,32 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .extract_schema_validate_request_data_schema import ExtractSchemaValidateRequestDataSchema
8
+
9
+ try:
10
+ import pydantic
11
+ if pydantic.__version__.startswith("1."):
12
+ raise ImportError
13
+ import pydantic.v1 as pydantic # type: ignore
14
+ except ImportError:
15
+ import pydantic # type: ignore
16
+
17
+
18
+ class ExtractSchemaValidateRequest(pydantic.BaseModel):
19
+ data_schema: ExtractSchemaValidateRequestDataSchema
20
+
21
+ def json(self, **kwargs: typing.Any) -> str:
22
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
23
+ return super().json(**kwargs_with_defaults)
24
+
25
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
26
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
27
+ return super().dict(**kwargs_with_defaults)
28
+
29
+ class Config:
30
+ frozen = True
31
+ smart_union = True
32
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,34 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic
10
+ if pydantic.__version__.startswith("1."):
11
+ raise ImportError
12
+ import pydantic.v1 as pydantic # type: ignore
13
+ except ImportError:
14
+ import pydantic # type: ignore
15
+
16
+
17
+ class FreeCreditsUsage(pydantic.BaseModel):
18
+ starting_balance: int
19
+ remaining_balance: int
20
+ grant_name: str
21
+ expires_at: dt.datetime
22
+
23
+ def json(self, **kwargs: typing.Any) -> str:
24
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
25
+ return super().json(**kwargs_with_defaults)
26
+
27
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
28
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
29
+ return super().dict(**kwargs_with_defaults)
30
+
31
+ class Config:
32
+ frozen = True
33
+ smart_union = True
34
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -25,6 +25,8 @@ class LlamaParseParameters(pydantic.BaseModel):
25
25
  parsing_instruction: typing.Optional[str]
26
26
  disable_ocr: typing.Optional[bool]
27
27
  annotate_links: typing.Optional[bool]
28
+ adaptive_long_table: typing.Optional[bool]
29
+ compact_markdown_table: typing.Optional[bool]
28
30
  disable_reconstruction: typing.Optional[bool]
29
31
  disable_image_extraction: typing.Optional[bool]
30
32
  invalidate_cache: typing.Optional[bool]
@@ -50,6 +52,7 @@ class LlamaParseParameters(pydantic.BaseModel):
50
52
  target_pages: typing.Optional[str]
51
53
  use_vendor_multimodal_model: typing.Optional[bool]
52
54
  vendor_multimodal_model_name: typing.Optional[str]
55
+ model: typing.Optional[str]
53
56
  vendor_multimodal_api_key: typing.Optional[str]
54
57
  page_prefix: typing.Optional[str]
55
58
  page_suffix: typing.Optional[str]
@@ -0,0 +1,35 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .cloud_document import CloudDocument
8
+
9
+ try:
10
+ import pydantic
11
+ if pydantic.__version__.startswith("1."):
12
+ raise ImportError
13
+ import pydantic.v1 as pydantic # type: ignore
14
+ except ImportError:
15
+ import pydantic # type: ignore
16
+
17
+
18
+ class PaginatedListCloudDocumentsResponse(pydantic.BaseModel):
19
+ documents: typing.List[CloudDocument] = pydantic.Field(description="The documents to list")
20
+ limit: int = pydantic.Field(description="The limit of the documents")
21
+ offset: int = pydantic.Field(description="The offset of the documents")
22
+ total_count: int = pydantic.Field(description="The total number of documents")
23
+
24
+ def json(self, **kwargs: typing.Any) -> str:
25
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
26
+ return super().json(**kwargs_with_defaults)
27
+
28
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
29
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
30
+ return super().dict(**kwargs_with_defaults)
31
+
32
+ class Config:
33
+ frozen = True
34
+ smart_union = True
35
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -29,6 +29,7 @@ class PipelineDataSource(pydantic.BaseModel):
29
29
  source_type: ConfigurableDataSourceNames
30
30
  custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[PipelineDataSourceCustomMetadataValue]]]
31
31
  component: PipelineDataSourceComponent = pydantic.Field(description="Component that implements the data source")
32
+ version_metadata: typing.Optional[typing.Dict[str, typing.Any]]
32
33
  project_id: str
33
34
  data_source_id: str = pydantic.Field(description="The ID of the data source.")
34
35
  pipeline_id: str = pydantic.Field(description="The ID of the pipeline.")
@@ -40,6 +40,7 @@ class PipelineFile(pydantic.BaseModel):
40
40
  custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[PipelineFileCustomMetadataValue]]]
41
41
  config_hash: typing.Optional[typing.Dict[str, typing.Optional[PipelineFileConfigHashValue]]]
42
42
  indexed_page_count: typing.Optional[int]
43
+ status: typing.Optional[str]
43
44
 
44
45
  def json(self, **kwargs: typing.Any) -> str:
45
46
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -0,0 +1,52 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic
10
+ if pydantic.__version__.startswith("1."):
11
+ raise ImportError
12
+ import pydantic.v1 as pydantic # type: ignore
13
+ except ImportError:
14
+ import pydantic # type: ignore
15
+
16
+
17
+ class PlanLimits(pydantic.BaseModel):
18
+ allow_pay_as_you_go: bool = pydantic.Field(description="Whether usage is allowed after credit grants are exhausted")
19
+ subscription_cost_usd: int
20
+ max_monthly_invoice_total_usd: typing.Optional[int]
21
+ max_concurrent_parse_jobs_premium: typing.Optional[int]
22
+ max_concurrent_parse_jobs_other: typing.Optional[int]
23
+ max_extraction_agents: typing.Optional[int]
24
+ max_extraction_runs: typing.Optional[int]
25
+ max_extraction_jobs: typing.Optional[int]
26
+ max_pages_per_index: typing.Optional[int]
27
+ max_files_per_index: typing.Optional[int]
28
+ max_indexes: typing.Optional[int]
29
+ max_concurrent_index_jobs: typing.Optional[int]
30
+ max_data_sources: typing.Optional[int]
31
+ max_embedding_models: typing.Optional[int]
32
+ max_data_sinks: typing.Optional[int]
33
+ max_published_agents: typing.Optional[int]
34
+ max_report_agent_sessions: typing.Optional[int]
35
+ max_users: typing.Optional[int]
36
+ max_organizations: typing.Optional[int]
37
+ max_projects: typing.Optional[int]
38
+ mfa_enabled: bool
39
+ sso_enabled: bool
40
+
41
+ def json(self, **kwargs: typing.Any) -> str:
42
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
43
+ return super().json(**kwargs_with_defaults)
44
+
45
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
46
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
47
+ return super().dict(**kwargs_with_defaults)
48
+
49
+ class Config:
50
+ frozen = True
51
+ smart_union = True
52
+ json_encoders = {dt.datetime: serialize_datetime}