llama-cloud 0.1.13__py3-none-any.whl → 0.1.15__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of llama-cloud might be problematic. Click here for more details.
- llama_cloud/__init__.py +44 -10
- llama_cloud/resources/__init__.py +0 -14
- llama_cloud/resources/llama_extract/__init__.py +0 -17
- llama_cloud/resources/llama_extract/client.py +195 -246
- llama_cloud/resources/organizations/client.py +15 -5
- llama_cloud/resources/parsing/client.py +16 -0
- llama_cloud/resources/pipelines/client.py +145 -10
- llama_cloud/resources/projects/client.py +25 -9
- llama_cloud/resources/reports/client.py +16 -6
- llama_cloud/types/__init__.py +50 -4
- llama_cloud/types/{plan.py → base_plan.py} +16 -13
- llama_cloud/types/base_plan_metronome_plan_type.py +17 -0
- llama_cloud/types/base_plan_name.py +45 -0
- llama_cloud/types/base_plan_plan_frequency.py +25 -0
- llama_cloud/types/billing_period.py +32 -0
- llama_cloud/types/chunk_mode.py +4 -0
- llama_cloud/types/credit_type.py +32 -0
- llama_cloud/types/data_source.py +1 -0
- llama_cloud/types/extract_agent_create.py +39 -0
- llama_cloud/types/extract_agent_update.py +38 -0
- llama_cloud/types/extract_job_create_batch.py +42 -0
- llama_cloud/types/extract_job_create_batch_data_schema_override.py +9 -0
- llama_cloud/types/extract_job_create_batch_data_schema_override_zero_value.py +7 -0
- llama_cloud/types/extract_schema_validate_request.py +32 -0
- llama_cloud/types/free_credits_usage.py +34 -0
- llama_cloud/types/llama_parse_parameters.py +2 -0
- llama_cloud/types/paginated_extract_runs_response.py +39 -0
- llama_cloud/types/paginated_list_cloud_documents_response.py +35 -0
- llama_cloud/types/pipeline_data_source.py +1 -0
- llama_cloud/types/pipeline_file.py +1 -0
- llama_cloud/types/plan_limits.py +52 -0
- llama_cloud/types/recurring_credit_grant.py +44 -0
- llama_cloud/types/usage.py +7 -5
- llama_cloud/types/usage_active_alerts_item.py +25 -0
- llama_cloud/types/{interval_usage_and_plan.py → usage_and_plan.py} +4 -6
- {llama_cloud-0.1.13.dist-info → llama_cloud-0.1.15.dist-info}/METADATA +3 -1
- {llama_cloud-0.1.13.dist-info → llama_cloud-0.1.15.dist-info}/RECORD +45 -29
- {llama_cloud-0.1.13.dist-info → llama_cloud-0.1.15.dist-info}/WHEEL +1 -1
- llama_cloud/resources/llama_extract/types/__init__.py +0 -17
- /llama_cloud/{resources/llama_extract/types → types}/extract_agent_create_data_schema.py +0 -0
- /llama_cloud/{resources/llama_extract/types → types}/extract_agent_create_data_schema_zero_value.py +0 -0
- /llama_cloud/{resources/llama_extract/types → types}/extract_agent_update_data_schema.py +0 -0
- /llama_cloud/{resources/llama_extract/types → types}/extract_agent_update_data_schema_zero_value.py +0 -0
- /llama_cloud/{resources/llama_extract/types → types}/extract_schema_validate_request_data_schema.py +0 -0
- /llama_cloud/{resources/llama_extract/types → types}/extract_schema_validate_request_data_schema_zero_value.py +0 -0
- {llama_cloud-0.1.13.dist-info → llama_cloud-0.1.15.dist-info}/LICENSE +0 -0
llama_cloud/types/__init__.py
CHANGED
|
@@ -19,9 +19,14 @@ from .app_schema_chat_chat_message import AppSchemaChatChatMessage
|
|
|
19
19
|
from .auto_transform_config import AutoTransformConfig
|
|
20
20
|
from .azure_open_ai_embedding import AzureOpenAiEmbedding
|
|
21
21
|
from .azure_open_ai_embedding_config import AzureOpenAiEmbeddingConfig
|
|
22
|
+
from .base_plan import BasePlan
|
|
23
|
+
from .base_plan_metronome_plan_type import BasePlanMetronomePlanType
|
|
24
|
+
from .base_plan_name import BasePlanName
|
|
25
|
+
from .base_plan_plan_frequency import BasePlanPlanFrequency
|
|
22
26
|
from .base_prompt_template import BasePromptTemplate
|
|
23
27
|
from .bedrock_embedding import BedrockEmbedding
|
|
24
28
|
from .bedrock_embedding_config import BedrockEmbeddingConfig
|
|
29
|
+
from .billing_period import BillingPeriod
|
|
25
30
|
from .box_auth_mechanism import BoxAuthMechanism
|
|
26
31
|
from .character_chunking_config import CharacterChunkingConfig
|
|
27
32
|
from .character_splitter import CharacterSplitter
|
|
@@ -60,6 +65,7 @@ from .configurable_transformation_definition import ConfigurableTransformationDe
|
|
|
60
65
|
from .configurable_transformation_names import ConfigurableTransformationNames
|
|
61
66
|
from .configured_transformation_item import ConfiguredTransformationItem
|
|
62
67
|
from .configured_transformation_item_component import ConfiguredTransformationItemComponent
|
|
68
|
+
from .credit_type import CreditType
|
|
63
69
|
from .data_sink import DataSink
|
|
64
70
|
from .data_sink_component import DataSinkComponent
|
|
65
71
|
from .data_sink_create import DataSinkCreate
|
|
@@ -107,10 +113,19 @@ from .eval_question import EvalQuestion
|
|
|
107
113
|
from .eval_question_create import EvalQuestionCreate
|
|
108
114
|
from .eval_question_result import EvalQuestionResult
|
|
109
115
|
from .extract_agent import ExtractAgent
|
|
116
|
+
from .extract_agent_create import ExtractAgentCreate
|
|
117
|
+
from .extract_agent_create_data_schema import ExtractAgentCreateDataSchema
|
|
118
|
+
from .extract_agent_create_data_schema_zero_value import ExtractAgentCreateDataSchemaZeroValue
|
|
110
119
|
from .extract_agent_data_schema_value import ExtractAgentDataSchemaValue
|
|
120
|
+
from .extract_agent_update import ExtractAgentUpdate
|
|
121
|
+
from .extract_agent_update_data_schema import ExtractAgentUpdateDataSchema
|
|
122
|
+
from .extract_agent_update_data_schema_zero_value import ExtractAgentUpdateDataSchemaZeroValue
|
|
111
123
|
from .extract_config import ExtractConfig
|
|
112
124
|
from .extract_job import ExtractJob
|
|
113
125
|
from .extract_job_create import ExtractJobCreate
|
|
126
|
+
from .extract_job_create_batch import ExtractJobCreateBatch
|
|
127
|
+
from .extract_job_create_batch_data_schema_override import ExtractJobCreateBatchDataSchemaOverride
|
|
128
|
+
from .extract_job_create_batch_data_schema_override_zero_value import ExtractJobCreateBatchDataSchemaOverrideZeroValue
|
|
114
129
|
from .extract_job_create_data_schema_override import ExtractJobCreateDataSchemaOverride
|
|
115
130
|
from .extract_job_create_data_schema_override_zero_value import ExtractJobCreateDataSchemaOverrideZeroValue
|
|
116
131
|
from .extract_mode import ExtractMode
|
|
@@ -125,6 +140,9 @@ from .extract_run_data_item_value import ExtractRunDataItemValue
|
|
|
125
140
|
from .extract_run_data_schema_value import ExtractRunDataSchemaValue
|
|
126
141
|
from .extract_run_data_zero_value import ExtractRunDataZeroValue
|
|
127
142
|
from .extract_run_extraction_metadata_value import ExtractRunExtractionMetadataValue
|
|
143
|
+
from .extract_schema_validate_request import ExtractSchemaValidateRequest
|
|
144
|
+
from .extract_schema_validate_request_data_schema import ExtractSchemaValidateRequestDataSchema
|
|
145
|
+
from .extract_schema_validate_request_data_schema_zero_value import ExtractSchemaValidateRequestDataSchemaZeroValue
|
|
128
146
|
from .extract_schema_validate_response import ExtractSchemaValidateResponse
|
|
129
147
|
from .extract_schema_validate_response_data_schema_value import ExtractSchemaValidateResponseDataSchemaValue
|
|
130
148
|
from .extract_state import ExtractState
|
|
@@ -134,6 +152,7 @@ from .file_permission_info_value import FilePermissionInfoValue
|
|
|
134
152
|
from .file_resource_info_value import FileResourceInfoValue
|
|
135
153
|
from .filter_condition import FilterCondition
|
|
136
154
|
from .filter_operator import FilterOperator
|
|
155
|
+
from .free_credits_usage import FreeCreditsUsage
|
|
137
156
|
from .gemini_embedding import GeminiEmbedding
|
|
138
157
|
from .gemini_embedding_config import GeminiEmbeddingConfig
|
|
139
158
|
from .http_validation_error import HttpValidationError
|
|
@@ -143,7 +162,6 @@ from .hugging_face_inference_api_embedding_token import HuggingFaceInferenceApiE
|
|
|
143
162
|
from .image_block import ImageBlock
|
|
144
163
|
from .ingestion_error_response import IngestionErrorResponse
|
|
145
164
|
from .input_message import InputMessage
|
|
146
|
-
from .interval_usage_and_plan import IntervalUsageAndPlan
|
|
147
165
|
from .job_name_mapping import JobNameMapping
|
|
148
166
|
from .job_names import JobNames
|
|
149
167
|
from .job_record import JobRecord
|
|
@@ -188,7 +206,9 @@ from .page_screenshot_metadata import PageScreenshotMetadata
|
|
|
188
206
|
from .page_screenshot_node_with_score import PageScreenshotNodeWithScore
|
|
189
207
|
from .page_segmentation_config import PageSegmentationConfig
|
|
190
208
|
from .page_splitter_node_parser import PageSplitterNodeParser
|
|
209
|
+
from .paginated_extract_runs_response import PaginatedExtractRunsResponse
|
|
191
210
|
from .paginated_jobs_history_with_metrics import PaginatedJobsHistoryWithMetrics
|
|
211
|
+
from .paginated_list_cloud_documents_response import PaginatedListCloudDocumentsResponse
|
|
192
212
|
from .paginated_list_pipeline_files_response import PaginatedListPipelineFilesResponse
|
|
193
213
|
from .paginated_report_response import PaginatedReportResponse
|
|
194
214
|
from .parse_plan_level import ParsePlanLevel
|
|
@@ -245,7 +265,7 @@ from .pipeline_transform_config import (
|
|
|
245
265
|
PipelineTransformConfig_Auto,
|
|
246
266
|
)
|
|
247
267
|
from .pipeline_type import PipelineType
|
|
248
|
-
from .
|
|
268
|
+
from .plan_limits import PlanLimits
|
|
249
269
|
from .playground_session import PlaygroundSession
|
|
250
270
|
from .pooling import Pooling
|
|
251
271
|
from .preset_composite_retrieval_params import PresetCompositeRetrievalParams
|
|
@@ -259,6 +279,7 @@ from .prompt_conf import PromptConf
|
|
|
259
279
|
from .prompt_mixin_prompts import PromptMixinPrompts
|
|
260
280
|
from .prompt_spec import PromptSpec
|
|
261
281
|
from .pydantic_program_mode import PydanticProgramMode
|
|
282
|
+
from .recurring_credit_grant import RecurringCreditGrant
|
|
262
283
|
from .related_node_info import RelatedNodeInfo
|
|
263
284
|
from .related_node_info_node_type import RelatedNodeInfoNodeType
|
|
264
285
|
from .report import Report
|
|
@@ -304,6 +325,8 @@ from .token_chunking_config import TokenChunkingConfig
|
|
|
304
325
|
from .token_text_splitter import TokenTextSplitter
|
|
305
326
|
from .transformation_category_names import TransformationCategoryNames
|
|
306
327
|
from .usage import Usage
|
|
328
|
+
from .usage_active_alerts_item import UsageActiveAlertsItem
|
|
329
|
+
from .usage_and_plan import UsageAndPlan
|
|
307
330
|
from .usage_metric_response import UsageMetricResponse
|
|
308
331
|
from .user_job_record import UserJobRecord
|
|
309
332
|
from .user_organization import UserOrganization
|
|
@@ -332,9 +355,14 @@ __all__ = [
|
|
|
332
355
|
"AutoTransformConfig",
|
|
333
356
|
"AzureOpenAiEmbedding",
|
|
334
357
|
"AzureOpenAiEmbeddingConfig",
|
|
358
|
+
"BasePlan",
|
|
359
|
+
"BasePlanMetronomePlanType",
|
|
360
|
+
"BasePlanName",
|
|
361
|
+
"BasePlanPlanFrequency",
|
|
335
362
|
"BasePromptTemplate",
|
|
336
363
|
"BedrockEmbedding",
|
|
337
364
|
"BedrockEmbeddingConfig",
|
|
365
|
+
"BillingPeriod",
|
|
338
366
|
"BoxAuthMechanism",
|
|
339
367
|
"CharacterChunkingConfig",
|
|
340
368
|
"CharacterSplitter",
|
|
@@ -373,6 +401,7 @@ __all__ = [
|
|
|
373
401
|
"ConfigurableTransformationNames",
|
|
374
402
|
"ConfiguredTransformationItem",
|
|
375
403
|
"ConfiguredTransformationItemComponent",
|
|
404
|
+
"CreditType",
|
|
376
405
|
"DataSink",
|
|
377
406
|
"DataSinkComponent",
|
|
378
407
|
"DataSinkCreate",
|
|
@@ -416,10 +445,19 @@ __all__ = [
|
|
|
416
445
|
"EvalQuestionCreate",
|
|
417
446
|
"EvalQuestionResult",
|
|
418
447
|
"ExtractAgent",
|
|
448
|
+
"ExtractAgentCreate",
|
|
449
|
+
"ExtractAgentCreateDataSchema",
|
|
450
|
+
"ExtractAgentCreateDataSchemaZeroValue",
|
|
419
451
|
"ExtractAgentDataSchemaValue",
|
|
452
|
+
"ExtractAgentUpdate",
|
|
453
|
+
"ExtractAgentUpdateDataSchema",
|
|
454
|
+
"ExtractAgentUpdateDataSchemaZeroValue",
|
|
420
455
|
"ExtractConfig",
|
|
421
456
|
"ExtractJob",
|
|
422
457
|
"ExtractJobCreate",
|
|
458
|
+
"ExtractJobCreateBatch",
|
|
459
|
+
"ExtractJobCreateBatchDataSchemaOverride",
|
|
460
|
+
"ExtractJobCreateBatchDataSchemaOverrideZeroValue",
|
|
423
461
|
"ExtractJobCreateDataSchemaOverride",
|
|
424
462
|
"ExtractJobCreateDataSchemaOverrideZeroValue",
|
|
425
463
|
"ExtractMode",
|
|
@@ -434,6 +472,9 @@ __all__ = [
|
|
|
434
472
|
"ExtractRunDataSchemaValue",
|
|
435
473
|
"ExtractRunDataZeroValue",
|
|
436
474
|
"ExtractRunExtractionMetadataValue",
|
|
475
|
+
"ExtractSchemaValidateRequest",
|
|
476
|
+
"ExtractSchemaValidateRequestDataSchema",
|
|
477
|
+
"ExtractSchemaValidateRequestDataSchemaZeroValue",
|
|
437
478
|
"ExtractSchemaValidateResponse",
|
|
438
479
|
"ExtractSchemaValidateResponseDataSchemaValue",
|
|
439
480
|
"ExtractState",
|
|
@@ -443,6 +484,7 @@ __all__ = [
|
|
|
443
484
|
"FileResourceInfoValue",
|
|
444
485
|
"FilterCondition",
|
|
445
486
|
"FilterOperator",
|
|
487
|
+
"FreeCreditsUsage",
|
|
446
488
|
"GeminiEmbedding",
|
|
447
489
|
"GeminiEmbeddingConfig",
|
|
448
490
|
"HttpValidationError",
|
|
@@ -452,7 +494,6 @@ __all__ = [
|
|
|
452
494
|
"ImageBlock",
|
|
453
495
|
"IngestionErrorResponse",
|
|
454
496
|
"InputMessage",
|
|
455
|
-
"IntervalUsageAndPlan",
|
|
456
497
|
"JobNameMapping",
|
|
457
498
|
"JobNames",
|
|
458
499
|
"JobRecord",
|
|
@@ -495,7 +536,9 @@ __all__ = [
|
|
|
495
536
|
"PageScreenshotNodeWithScore",
|
|
496
537
|
"PageSegmentationConfig",
|
|
497
538
|
"PageSplitterNodeParser",
|
|
539
|
+
"PaginatedExtractRunsResponse",
|
|
498
540
|
"PaginatedJobsHistoryWithMetrics",
|
|
541
|
+
"PaginatedListCloudDocumentsResponse",
|
|
499
542
|
"PaginatedListPipelineFilesResponse",
|
|
500
543
|
"PaginatedReportResponse",
|
|
501
544
|
"ParsePlanLevel",
|
|
@@ -546,7 +589,7 @@ __all__ = [
|
|
|
546
589
|
"PipelineTransformConfig_Advanced",
|
|
547
590
|
"PipelineTransformConfig_Auto",
|
|
548
591
|
"PipelineType",
|
|
549
|
-
"
|
|
592
|
+
"PlanLimits",
|
|
550
593
|
"PlaygroundSession",
|
|
551
594
|
"Pooling",
|
|
552
595
|
"PresetCompositeRetrievalParams",
|
|
@@ -560,6 +603,7 @@ __all__ = [
|
|
|
560
603
|
"PromptMixinPrompts",
|
|
561
604
|
"PromptSpec",
|
|
562
605
|
"PydanticProgramMode",
|
|
606
|
+
"RecurringCreditGrant",
|
|
563
607
|
"RelatedNodeInfo",
|
|
564
608
|
"RelatedNodeInfoNodeType",
|
|
565
609
|
"Report",
|
|
@@ -603,6 +647,8 @@ __all__ = [
|
|
|
603
647
|
"TokenTextSplitter",
|
|
604
648
|
"TransformationCategoryNames",
|
|
605
649
|
"Usage",
|
|
650
|
+
"UsageActiveAlertsItem",
|
|
651
|
+
"UsageAndPlan",
|
|
606
652
|
"UsageMetricResponse",
|
|
607
653
|
"UserJobRecord",
|
|
608
654
|
"UserOrganization",
|
|
@@ -4,6 +4,12 @@ import datetime as dt
|
|
|
4
4
|
import typing
|
|
5
5
|
|
|
6
6
|
from ..core.datetime_utils import serialize_datetime
|
|
7
|
+
from .base_plan_metronome_plan_type import BasePlanMetronomePlanType
|
|
8
|
+
from .base_plan_name import BasePlanName
|
|
9
|
+
from .base_plan_plan_frequency import BasePlanPlanFrequency
|
|
10
|
+
from .billing_period import BillingPeriod
|
|
11
|
+
from .plan_limits import PlanLimits
|
|
12
|
+
from .recurring_credit_grant import RecurringCreditGrant
|
|
7
13
|
|
|
8
14
|
try:
|
|
9
15
|
import pydantic
|
|
@@ -14,21 +20,18 @@ except ImportError:
|
|
|
14
20
|
import pydantic # type: ignore
|
|
15
21
|
|
|
16
22
|
|
|
17
|
-
class
|
|
18
|
-
id: str
|
|
19
|
-
name:
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
allowed_index: typing.Optional[bool] = pydantic.Field(description="If is allowed to use indexes")
|
|
27
|
-
allowed_external_index: typing.Optional[bool] = pydantic.Field(
|
|
28
|
-
description="If is allowed to use external data sources or sinks in indexes"
|
|
29
|
-
)
|
|
23
|
+
class BasePlan(pydantic.BaseModel):
|
|
24
|
+
id: typing.Optional[str]
|
|
25
|
+
name: BasePlanName
|
|
26
|
+
metronome_plan_type: BasePlanMetronomePlanType
|
|
27
|
+
metronome_rate_card_alias: typing.Optional[str]
|
|
28
|
+
limits: PlanLimits
|
|
29
|
+
recurring_credits: typing.Optional[typing.List[RecurringCreditGrant]]
|
|
30
|
+
plan_frequency: BasePlanPlanFrequency
|
|
31
|
+
metronome_customer_id: typing.Optional[str]
|
|
30
32
|
starting_on: typing.Optional[dt.datetime]
|
|
31
33
|
ending_before: typing.Optional[dt.datetime]
|
|
34
|
+
current_billing_period: typing.Optional[BillingPeriod]
|
|
32
35
|
|
|
33
36
|
def json(self, **kwargs: typing.Any) -> str:
|
|
34
37
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import enum
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
T_Result = typing.TypeVar("T_Result")
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class BasePlanMetronomePlanType(str, enum.Enum):
|
|
10
|
+
PLAN = "plan"
|
|
11
|
+
CONTRACT = "contract"
|
|
12
|
+
|
|
13
|
+
def visit(self, plan: typing.Callable[[], T_Result], contract: typing.Callable[[], T_Result]) -> T_Result:
|
|
14
|
+
if self is BasePlanMetronomePlanType.PLAN:
|
|
15
|
+
return plan()
|
|
16
|
+
if self is BasePlanMetronomePlanType.CONTRACT:
|
|
17
|
+
return contract()
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import enum
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
T_Result = typing.TypeVar("T_Result")
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class BasePlanName(str, enum.Enum):
|
|
10
|
+
FREE = "free"
|
|
11
|
+
LLAMA_PARSE = "llama_parse"
|
|
12
|
+
ENTERPRISE = "enterprise"
|
|
13
|
+
UNKNOWN = "unknown"
|
|
14
|
+
FREE_CONTRACT = "free_contract"
|
|
15
|
+
PRO = "pro"
|
|
16
|
+
ENTERPRISE_CONTRACT = "enterprise_contract"
|
|
17
|
+
ENTERPRISE_POC = "enterprise_poc"
|
|
18
|
+
|
|
19
|
+
def visit(
|
|
20
|
+
self,
|
|
21
|
+
free: typing.Callable[[], T_Result],
|
|
22
|
+
llama_parse: typing.Callable[[], T_Result],
|
|
23
|
+
enterprise: typing.Callable[[], T_Result],
|
|
24
|
+
unknown: typing.Callable[[], T_Result],
|
|
25
|
+
free_contract: typing.Callable[[], T_Result],
|
|
26
|
+
pro: typing.Callable[[], T_Result],
|
|
27
|
+
enterprise_contract: typing.Callable[[], T_Result],
|
|
28
|
+
enterprise_poc: typing.Callable[[], T_Result],
|
|
29
|
+
) -> T_Result:
|
|
30
|
+
if self is BasePlanName.FREE:
|
|
31
|
+
return free()
|
|
32
|
+
if self is BasePlanName.LLAMA_PARSE:
|
|
33
|
+
return llama_parse()
|
|
34
|
+
if self is BasePlanName.ENTERPRISE:
|
|
35
|
+
return enterprise()
|
|
36
|
+
if self is BasePlanName.UNKNOWN:
|
|
37
|
+
return unknown()
|
|
38
|
+
if self is BasePlanName.FREE_CONTRACT:
|
|
39
|
+
return free_contract()
|
|
40
|
+
if self is BasePlanName.PRO:
|
|
41
|
+
return pro()
|
|
42
|
+
if self is BasePlanName.ENTERPRISE_CONTRACT:
|
|
43
|
+
return enterprise_contract()
|
|
44
|
+
if self is BasePlanName.ENTERPRISE_POC:
|
|
45
|
+
return enterprise_poc()
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import enum
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
T_Result = typing.TypeVar("T_Result")
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class BasePlanPlanFrequency(str, enum.Enum):
|
|
10
|
+
MONTHLY = "MONTHLY"
|
|
11
|
+
QUARTERLY = "QUARTERLY"
|
|
12
|
+
ANNUAL = "ANNUAL"
|
|
13
|
+
|
|
14
|
+
def visit(
|
|
15
|
+
self,
|
|
16
|
+
monthly: typing.Callable[[], T_Result],
|
|
17
|
+
quarterly: typing.Callable[[], T_Result],
|
|
18
|
+
annual: typing.Callable[[], T_Result],
|
|
19
|
+
) -> T_Result:
|
|
20
|
+
if self is BasePlanPlanFrequency.MONTHLY:
|
|
21
|
+
return monthly()
|
|
22
|
+
if self is BasePlanPlanFrequency.QUARTERLY:
|
|
23
|
+
return quarterly()
|
|
24
|
+
if self is BasePlanPlanFrequency.ANNUAL:
|
|
25
|
+
return annual()
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import datetime as dt
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
|
7
|
+
|
|
8
|
+
try:
|
|
9
|
+
import pydantic
|
|
10
|
+
if pydantic.__version__.startswith("1."):
|
|
11
|
+
raise ImportError
|
|
12
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
13
|
+
except ImportError:
|
|
14
|
+
import pydantic # type: ignore
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class BillingPeriod(pydantic.BaseModel):
|
|
18
|
+
start_date: dt.datetime
|
|
19
|
+
end_date: dt.datetime
|
|
20
|
+
|
|
21
|
+
def json(self, **kwargs: typing.Any) -> str:
|
|
22
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
23
|
+
return super().json(**kwargs_with_defaults)
|
|
24
|
+
|
|
25
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
26
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
27
|
+
return super().dict(**kwargs_with_defaults)
|
|
28
|
+
|
|
29
|
+
class Config:
|
|
30
|
+
frozen = True
|
|
31
|
+
smart_union = True
|
|
32
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
llama_cloud/types/chunk_mode.py
CHANGED
|
@@ -10,12 +10,14 @@ class ChunkMode(str, enum.Enum):
|
|
|
10
10
|
PAGE = "PAGE"
|
|
11
11
|
DOCUMENT = "DOCUMENT"
|
|
12
12
|
SECTION = "SECTION"
|
|
13
|
+
GROUPED_PAGES = "GROUPED_PAGES"
|
|
13
14
|
|
|
14
15
|
def visit(
|
|
15
16
|
self,
|
|
16
17
|
page: typing.Callable[[], T_Result],
|
|
17
18
|
document: typing.Callable[[], T_Result],
|
|
18
19
|
section: typing.Callable[[], T_Result],
|
|
20
|
+
grouped_pages: typing.Callable[[], T_Result],
|
|
19
21
|
) -> T_Result:
|
|
20
22
|
if self is ChunkMode.PAGE:
|
|
21
23
|
return page()
|
|
@@ -23,3 +25,5 @@ class ChunkMode(str, enum.Enum):
|
|
|
23
25
|
return document()
|
|
24
26
|
if self is ChunkMode.SECTION:
|
|
25
27
|
return section()
|
|
28
|
+
if self is ChunkMode.GROUPED_PAGES:
|
|
29
|
+
return grouped_pages()
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import datetime as dt
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
|
7
|
+
|
|
8
|
+
try:
|
|
9
|
+
import pydantic
|
|
10
|
+
if pydantic.__version__.startswith("1."):
|
|
11
|
+
raise ImportError
|
|
12
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
13
|
+
except ImportError:
|
|
14
|
+
import pydantic # type: ignore
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class CreditType(pydantic.BaseModel):
|
|
18
|
+
id: str
|
|
19
|
+
name: str
|
|
20
|
+
|
|
21
|
+
def json(self, **kwargs: typing.Any) -> str:
|
|
22
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
23
|
+
return super().json(**kwargs_with_defaults)
|
|
24
|
+
|
|
25
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
26
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
27
|
+
return super().dict(**kwargs_with_defaults)
|
|
28
|
+
|
|
29
|
+
class Config:
|
|
30
|
+
frozen = True
|
|
31
|
+
smart_union = True
|
|
32
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
llama_cloud/types/data_source.py
CHANGED
|
@@ -29,6 +29,7 @@ class DataSource(pydantic.BaseModel):
|
|
|
29
29
|
source_type: ConfigurableDataSourceNames
|
|
30
30
|
custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[DataSourceCustomMetadataValue]]]
|
|
31
31
|
component: DataSourceComponent = pydantic.Field(description="Component that implements the data source")
|
|
32
|
+
version_metadata: typing.Optional[typing.Dict[str, typing.Any]]
|
|
32
33
|
project_id: str
|
|
33
34
|
|
|
34
35
|
def json(self, **kwargs: typing.Any) -> str:
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import datetime as dt
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
|
7
|
+
from .extract_agent_create_data_schema import ExtractAgentCreateDataSchema
|
|
8
|
+
from .extract_config import ExtractConfig
|
|
9
|
+
|
|
10
|
+
try:
|
|
11
|
+
import pydantic
|
|
12
|
+
if pydantic.__version__.startswith("1."):
|
|
13
|
+
raise ImportError
|
|
14
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
15
|
+
except ImportError:
|
|
16
|
+
import pydantic # type: ignore
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class ExtractAgentCreate(pydantic.BaseModel):
|
|
20
|
+
"""
|
|
21
|
+
Settings for creating an extraction agent.
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
name: str = pydantic.Field(description="The name of the extraction schema")
|
|
25
|
+
data_schema: ExtractAgentCreateDataSchema = pydantic.Field(description="The schema of the data.")
|
|
26
|
+
config: ExtractConfig = pydantic.Field(description="The configuration parameters for the extraction agent.")
|
|
27
|
+
|
|
28
|
+
def json(self, **kwargs: typing.Any) -> str:
|
|
29
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
30
|
+
return super().json(**kwargs_with_defaults)
|
|
31
|
+
|
|
32
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
33
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
34
|
+
return super().dict(**kwargs_with_defaults)
|
|
35
|
+
|
|
36
|
+
class Config:
|
|
37
|
+
frozen = True
|
|
38
|
+
smart_union = True
|
|
39
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import datetime as dt
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
|
7
|
+
from .extract_agent_update_data_schema import ExtractAgentUpdateDataSchema
|
|
8
|
+
from .extract_config import ExtractConfig
|
|
9
|
+
|
|
10
|
+
try:
|
|
11
|
+
import pydantic
|
|
12
|
+
if pydantic.__version__.startswith("1."):
|
|
13
|
+
raise ImportError
|
|
14
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
15
|
+
except ImportError:
|
|
16
|
+
import pydantic # type: ignore
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class ExtractAgentUpdate(pydantic.BaseModel):
|
|
20
|
+
"""
|
|
21
|
+
Settings for updating an extraction schema.
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
data_schema: ExtractAgentUpdateDataSchema = pydantic.Field(description="The schema of the data")
|
|
25
|
+
config: ExtractConfig = pydantic.Field(description="The configuration parameters for the extraction agent.")
|
|
26
|
+
|
|
27
|
+
def json(self, **kwargs: typing.Any) -> str:
|
|
28
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
29
|
+
return super().json(**kwargs_with_defaults)
|
|
30
|
+
|
|
31
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
32
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
33
|
+
return super().dict(**kwargs_with_defaults)
|
|
34
|
+
|
|
35
|
+
class Config:
|
|
36
|
+
frozen = True
|
|
37
|
+
smart_union = True
|
|
38
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import datetime as dt
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
|
7
|
+
from .extract_config import ExtractConfig
|
|
8
|
+
from .extract_job_create_batch_data_schema_override import ExtractJobCreateBatchDataSchemaOverride
|
|
9
|
+
|
|
10
|
+
try:
|
|
11
|
+
import pydantic
|
|
12
|
+
if pydantic.__version__.startswith("1."):
|
|
13
|
+
raise ImportError
|
|
14
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
15
|
+
except ImportError:
|
|
16
|
+
import pydantic # type: ignore
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class ExtractJobCreateBatch(pydantic.BaseModel):
|
|
20
|
+
"""
|
|
21
|
+
Schema for creating extraction jobs in batch.
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
extraction_agent_id: str = pydantic.Field(description="The id of the extraction agent")
|
|
25
|
+
file_ids: typing.List[str] = pydantic.Field(description="The ids of the files")
|
|
26
|
+
data_schema_override: typing.Optional[ExtractJobCreateBatchDataSchemaOverride] = pydantic.Field(
|
|
27
|
+
description="The data schema to override the extraction agent's data schema with"
|
|
28
|
+
)
|
|
29
|
+
config_override: typing.Optional[ExtractConfig]
|
|
30
|
+
|
|
31
|
+
def json(self, **kwargs: typing.Any) -> str:
|
|
32
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
33
|
+
return super().json(**kwargs_with_defaults)
|
|
34
|
+
|
|
35
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
36
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
37
|
+
return super().dict(**kwargs_with_defaults)
|
|
38
|
+
|
|
39
|
+
class Config:
|
|
40
|
+
frozen = True
|
|
41
|
+
smart_union = True
|
|
42
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import typing
|
|
4
|
+
|
|
5
|
+
from .extract_job_create_batch_data_schema_override_zero_value import ExtractJobCreateBatchDataSchemaOverrideZeroValue
|
|
6
|
+
|
|
7
|
+
ExtractJobCreateBatchDataSchemaOverride = typing.Union[
|
|
8
|
+
typing.Dict[str, typing.Optional[ExtractJobCreateBatchDataSchemaOverrideZeroValue]], str
|
|
9
|
+
]
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import datetime as dt
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
|
7
|
+
from .extract_schema_validate_request_data_schema import ExtractSchemaValidateRequestDataSchema
|
|
8
|
+
|
|
9
|
+
try:
|
|
10
|
+
import pydantic
|
|
11
|
+
if pydantic.__version__.startswith("1."):
|
|
12
|
+
raise ImportError
|
|
13
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
14
|
+
except ImportError:
|
|
15
|
+
import pydantic # type: ignore
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class ExtractSchemaValidateRequest(pydantic.BaseModel):
|
|
19
|
+
data_schema: ExtractSchemaValidateRequestDataSchema
|
|
20
|
+
|
|
21
|
+
def json(self, **kwargs: typing.Any) -> str:
|
|
22
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
23
|
+
return super().json(**kwargs_with_defaults)
|
|
24
|
+
|
|
25
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
26
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
27
|
+
return super().dict(**kwargs_with_defaults)
|
|
28
|
+
|
|
29
|
+
class Config:
|
|
30
|
+
frozen = True
|
|
31
|
+
smart_union = True
|
|
32
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import datetime as dt
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
|
7
|
+
|
|
8
|
+
try:
|
|
9
|
+
import pydantic
|
|
10
|
+
if pydantic.__version__.startswith("1."):
|
|
11
|
+
raise ImportError
|
|
12
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
13
|
+
except ImportError:
|
|
14
|
+
import pydantic # type: ignore
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class FreeCreditsUsage(pydantic.BaseModel):
|
|
18
|
+
starting_balance: int
|
|
19
|
+
remaining_balance: int
|
|
20
|
+
grant_name: str
|
|
21
|
+
expires_at: dt.datetime
|
|
22
|
+
|
|
23
|
+
def json(self, **kwargs: typing.Any) -> str:
|
|
24
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
25
|
+
return super().json(**kwargs_with_defaults)
|
|
26
|
+
|
|
27
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
28
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
29
|
+
return super().dict(**kwargs_with_defaults)
|
|
30
|
+
|
|
31
|
+
class Config:
|
|
32
|
+
frozen = True
|
|
33
|
+
smart_union = True
|
|
34
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
|
@@ -25,6 +25,7 @@ class LlamaParseParameters(pydantic.BaseModel):
|
|
|
25
25
|
parsing_instruction: typing.Optional[str]
|
|
26
26
|
disable_ocr: typing.Optional[bool]
|
|
27
27
|
annotate_links: typing.Optional[bool]
|
|
28
|
+
adaptive_long_table: typing.Optional[bool]
|
|
28
29
|
disable_reconstruction: typing.Optional[bool]
|
|
29
30
|
disable_image_extraction: typing.Optional[bool]
|
|
30
31
|
invalidate_cache: typing.Optional[bool]
|
|
@@ -50,6 +51,7 @@ class LlamaParseParameters(pydantic.BaseModel):
|
|
|
50
51
|
target_pages: typing.Optional[str]
|
|
51
52
|
use_vendor_multimodal_model: typing.Optional[bool]
|
|
52
53
|
vendor_multimodal_model_name: typing.Optional[str]
|
|
54
|
+
model: typing.Optional[str]
|
|
53
55
|
vendor_multimodal_api_key: typing.Optional[str]
|
|
54
56
|
page_prefix: typing.Optional[str]
|
|
55
57
|
page_suffix: typing.Optional[str]
|