llama-cloud 0.1.19__py3-none-any.whl → 0.1.21__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of llama-cloud might be problematic. Click here for more details.
- llama_cloud/__init__.py +44 -26
- llama_cloud/resources/files/client.py +18 -4
- llama_cloud/resources/parsing/client.py +8 -0
- llama_cloud/resources/pipelines/client.py +25 -11
- llama_cloud/types/__init__.py +46 -26
- llama_cloud/types/{base_prompt_template.py → data_source_update_dispatcher_config.py} +9 -7
- llama_cloud/types/{node_parser.py → delete_params.py} +7 -9
- llama_cloud/types/document_ingestion_job_params.py +43 -0
- llama_cloud/types/extract_config.py +3 -0
- llama_cloud/types/job_record.py +2 -2
- llama_cloud/types/job_record_parameters.py +111 -0
- llama_cloud/types/{page_splitter_node_parser.py → l_lama_parse_transform_config.py} +5 -10
- llama_cloud/types/legacy_parse_job_config.py +189 -0
- llama_cloud/types/llama_parse_parameters.py +1 -0
- llama_cloud/types/load_files_job_config.py +35 -0
- llama_cloud/types/parse_job_config.py +134 -0
- llama_cloud/types/pipeline.py +4 -4
- llama_cloud/types/pipeline_create.py +2 -2
- llama_cloud/types/pipeline_file_update_dispatcher_config.py +38 -0
- llama_cloud/types/{configured_transformation_item.py → pipeline_file_updater_config.py} +13 -12
- llama_cloud/types/pipeline_managed_ingestion_job_params.py +37 -0
- llama_cloud/types/pipeline_metadata_config.py +36 -0
- llama_cloud/types/pipeline_status.py +17 -0
- llama_cloud/types/prompt_conf.py +1 -0
- llama_cloud/types/supported_llm_model.py +1 -2
- {llama_cloud-0.1.19.dist-info → llama_cloud-0.1.21.dist-info}/METADATA +6 -2
- {llama_cloud-0.1.19.dist-info → llama_cloud-0.1.21.dist-info}/RECORD +29 -29
- {llama_cloud-0.1.19.dist-info → llama_cloud-0.1.21.dist-info}/WHEEL +1 -1
- llama_cloud/types/character_splitter.py +0 -46
- llama_cloud/types/code_splitter.py +0 -50
- llama_cloud/types/configured_transformation_item_component.py +0 -22
- llama_cloud/types/llm.py +0 -60
- llama_cloud/types/markdown_element_node_parser.py +0 -51
- llama_cloud/types/markdown_node_parser.py +0 -52
- llama_cloud/types/pydantic_program_mode.py +0 -41
- llama_cloud/types/sentence_splitter.py +0 -50
- llama_cloud/types/token_text_splitter.py +0 -50
- {llama_cloud-0.1.19.dist-info → llama_cloud-0.1.21.dist-info}/LICENSE +0 -0
llama_cloud/types/__init__.py
CHANGED
|
@@ -24,7 +24,6 @@ from .base_plan import BasePlan
|
|
|
24
24
|
from .base_plan_metronome_plan_type import BasePlanMetronomePlanType
|
|
25
25
|
from .base_plan_name import BasePlanName
|
|
26
26
|
from .base_plan_plan_frequency import BasePlanPlanFrequency
|
|
27
|
-
from .base_prompt_template import BasePromptTemplate
|
|
28
27
|
from .batch import Batch
|
|
29
28
|
from .batch_item import BatchItem
|
|
30
29
|
from .batch_paginated_list import BatchPaginatedList
|
|
@@ -34,7 +33,6 @@ from .bedrock_embedding_config import BedrockEmbeddingConfig
|
|
|
34
33
|
from .billing_period import BillingPeriod
|
|
35
34
|
from .box_auth_mechanism import BoxAuthMechanism
|
|
36
35
|
from .character_chunking_config import CharacterChunkingConfig
|
|
37
|
-
from .character_splitter import CharacterSplitter
|
|
38
36
|
from .chat_app import ChatApp
|
|
39
37
|
from .chat_app_response import ChatAppResponse
|
|
40
38
|
from .chat_data import ChatData
|
|
@@ -57,7 +55,6 @@ from .cloud_qdrant_vector_store import CloudQdrantVectorStore
|
|
|
57
55
|
from .cloud_s_3_data_source import CloudS3DataSource
|
|
58
56
|
from .cloud_sharepoint_data_source import CloudSharepointDataSource
|
|
59
57
|
from .cloud_slack_data_source import CloudSlackDataSource
|
|
60
|
-
from .code_splitter import CodeSplitter
|
|
61
58
|
from .cohere_embedding import CohereEmbedding
|
|
62
59
|
from .cohere_embedding_config import CohereEmbeddingConfig
|
|
63
60
|
from .composite_retrieval_mode import CompositeRetrievalMode
|
|
@@ -68,8 +65,6 @@ from .configurable_data_sink_names import ConfigurableDataSinkNames
|
|
|
68
65
|
from .configurable_data_source_names import ConfigurableDataSourceNames
|
|
69
66
|
from .configurable_transformation_definition import ConfigurableTransformationDefinition
|
|
70
67
|
from .configurable_transformation_names import ConfigurableTransformationNames
|
|
71
|
-
from .configured_transformation_item import ConfiguredTransformationItem
|
|
72
|
-
from .configured_transformation_item_component import ConfiguredTransformationItemComponent
|
|
73
68
|
from .credit_type import CreditType
|
|
74
69
|
from .data_sink import DataSink
|
|
75
70
|
from .data_sink_component import DataSinkComponent
|
|
@@ -83,6 +78,9 @@ from .data_source_create_component import DataSourceCreateComponent
|
|
|
83
78
|
from .data_source_create_custom_metadata_value import DataSourceCreateCustomMetadataValue
|
|
84
79
|
from .data_source_custom_metadata_value import DataSourceCustomMetadataValue
|
|
85
80
|
from .data_source_definition import DataSourceDefinition
|
|
81
|
+
from .data_source_update_dispatcher_config import DataSourceUpdateDispatcherConfig
|
|
82
|
+
from .delete_params import DeleteParams
|
|
83
|
+
from .document_ingestion_job_params import DocumentIngestionJobParams
|
|
86
84
|
from .edit_suggestion import EditSuggestion
|
|
87
85
|
from .edit_suggestion_blocks_item import EditSuggestionBlocksItem
|
|
88
86
|
from .element_segmentation_config import ElementSegmentationConfig
|
|
@@ -153,7 +151,21 @@ from .input_message import InputMessage
|
|
|
153
151
|
from .job_name_mapping import JobNameMapping
|
|
154
152
|
from .job_names import JobNames
|
|
155
153
|
from .job_record import JobRecord
|
|
154
|
+
from .job_record_parameters import (
|
|
155
|
+
JobRecordParameters,
|
|
156
|
+
JobRecordParameters_DataSourceUpdateDispatcher,
|
|
157
|
+
JobRecordParameters_DocumentIngestion,
|
|
158
|
+
JobRecordParameters_LegacyParse,
|
|
159
|
+
JobRecordParameters_LlamaParseTransform,
|
|
160
|
+
JobRecordParameters_LoadFiles,
|
|
161
|
+
JobRecordParameters_Parse,
|
|
162
|
+
JobRecordParameters_PipelineFileUpdateDispatcher,
|
|
163
|
+
JobRecordParameters_PipelineFileUpdater,
|
|
164
|
+
JobRecordParameters_PipelineManagedIngestion,
|
|
165
|
+
)
|
|
156
166
|
from .job_record_with_usage_metrics import JobRecordWithUsageMetrics
|
|
167
|
+
from .l_lama_parse_transform_config import LLamaParseTransformConfig
|
|
168
|
+
from .legacy_parse_job_config import LegacyParseJobConfig
|
|
157
169
|
from .llama_extract_settings import LlamaExtractSettings
|
|
158
170
|
from .llama_index_core_base_llms_types_chat_message import LlamaIndexCoreBaseLlmsTypesChatMessage
|
|
159
171
|
from .llama_index_core_base_llms_types_chat_message_blocks_item import (
|
|
@@ -164,20 +176,17 @@ from .llama_index_core_base_llms_types_chat_message_blocks_item import (
|
|
|
164
176
|
)
|
|
165
177
|
from .llama_parse_parameters import LlamaParseParameters
|
|
166
178
|
from .llama_parse_supported_file_extensions import LlamaParseSupportedFileExtensions
|
|
167
|
-
from .llm import Llm
|
|
168
179
|
from .llm_model_data import LlmModelData
|
|
169
180
|
from .llm_parameters import LlmParameters
|
|
181
|
+
from .load_files_job_config import LoadFilesJobConfig
|
|
170
182
|
from .managed_ingestion_status import ManagedIngestionStatus
|
|
171
183
|
from .managed_ingestion_status_response import ManagedIngestionStatusResponse
|
|
172
|
-
from .markdown_element_node_parser import MarkdownElementNodeParser
|
|
173
|
-
from .markdown_node_parser import MarkdownNodeParser
|
|
174
184
|
from .message_annotation import MessageAnnotation
|
|
175
185
|
from .message_role import MessageRole
|
|
176
186
|
from .metadata_filter import MetadataFilter
|
|
177
187
|
from .metadata_filter_value import MetadataFilterValue
|
|
178
188
|
from .metadata_filters import MetadataFilters
|
|
179
189
|
from .metadata_filters_filters_item import MetadataFiltersFiltersItem
|
|
180
|
-
from .node_parser import NodeParser
|
|
181
190
|
from .node_relationship import NodeRelationship
|
|
182
191
|
from .none_chunking_config import NoneChunkingConfig
|
|
183
192
|
from .none_segmentation_config import NoneSegmentationConfig
|
|
@@ -190,12 +199,12 @@ from .page_figure_metadata import PageFigureMetadata
|
|
|
190
199
|
from .page_screenshot_metadata import PageScreenshotMetadata
|
|
191
200
|
from .page_screenshot_node_with_score import PageScreenshotNodeWithScore
|
|
192
201
|
from .page_segmentation_config import PageSegmentationConfig
|
|
193
|
-
from .page_splitter_node_parser import PageSplitterNodeParser
|
|
194
202
|
from .paginated_extract_runs_response import PaginatedExtractRunsResponse
|
|
195
203
|
from .paginated_jobs_history_with_metrics import PaginatedJobsHistoryWithMetrics
|
|
196
204
|
from .paginated_list_cloud_documents_response import PaginatedListCloudDocumentsResponse
|
|
197
205
|
from .paginated_list_pipeline_files_response import PaginatedListPipelineFilesResponse
|
|
198
206
|
from .paginated_report_response import PaginatedReportResponse
|
|
207
|
+
from .parse_job_config import ParseJobConfig
|
|
199
208
|
from .parse_plan_level import ParsePlanLevel
|
|
200
209
|
from .parser_languages import ParserLanguages
|
|
201
210
|
from .parsing_history_item import ParsingHistoryItem
|
|
@@ -248,6 +257,11 @@ from .pipeline_file_custom_metadata_value import PipelineFileCustomMetadataValue
|
|
|
248
257
|
from .pipeline_file_permission_info_value import PipelineFilePermissionInfoValue
|
|
249
258
|
from .pipeline_file_resource_info_value import PipelineFileResourceInfoValue
|
|
250
259
|
from .pipeline_file_status import PipelineFileStatus
|
|
260
|
+
from .pipeline_file_update_dispatcher_config import PipelineFileUpdateDispatcherConfig
|
|
261
|
+
from .pipeline_file_updater_config import PipelineFileUpdaterConfig
|
|
262
|
+
from .pipeline_managed_ingestion_job_params import PipelineManagedIngestionJobParams
|
|
263
|
+
from .pipeline_metadata_config import PipelineMetadataConfig
|
|
264
|
+
from .pipeline_status import PipelineStatus
|
|
251
265
|
from .pipeline_transform_config import (
|
|
252
266
|
PipelineTransformConfig,
|
|
253
267
|
PipelineTransformConfig_Advanced,
|
|
@@ -265,7 +279,6 @@ from .progress_event_status import ProgressEventStatus
|
|
|
265
279
|
from .project import Project
|
|
266
280
|
from .project_create import ProjectCreate
|
|
267
281
|
from .prompt_conf import PromptConf
|
|
268
|
-
from .pydantic_program_mode import PydanticProgramMode
|
|
269
282
|
from .re_rank_config import ReRankConfig
|
|
270
283
|
from .re_ranker_type import ReRankerType
|
|
271
284
|
from .recurring_credit_grant import RecurringCreditGrant
|
|
@@ -300,7 +313,6 @@ from .role import Role
|
|
|
300
313
|
from .schema_relax_mode import SchemaRelaxMode
|
|
301
314
|
from .semantic_chunking_config import SemanticChunkingConfig
|
|
302
315
|
from .sentence_chunking_config import SentenceChunkingConfig
|
|
303
|
-
from .sentence_splitter import SentenceSplitter
|
|
304
316
|
from .status_enum import StatusEnum
|
|
305
317
|
from .struct_mode import StructMode
|
|
306
318
|
from .struct_parse_conf import StructParseConf
|
|
@@ -311,7 +323,6 @@ from .text_node import TextNode
|
|
|
311
323
|
from .text_node_relationships_value import TextNodeRelationshipsValue
|
|
312
324
|
from .text_node_with_score import TextNodeWithScore
|
|
313
325
|
from .token_chunking_config import TokenChunkingConfig
|
|
314
|
-
from .token_text_splitter import TokenTextSplitter
|
|
315
326
|
from .transformation_category_names import TransformationCategoryNames
|
|
316
327
|
from .usage_and_plan import UsageAndPlan
|
|
317
328
|
from .usage_metric_response import UsageMetricResponse
|
|
@@ -349,7 +360,6 @@ __all__ = [
|
|
|
349
360
|
"BasePlanMetronomePlanType",
|
|
350
361
|
"BasePlanName",
|
|
351
362
|
"BasePlanPlanFrequency",
|
|
352
|
-
"BasePromptTemplate",
|
|
353
363
|
"Batch",
|
|
354
364
|
"BatchItem",
|
|
355
365
|
"BatchPaginatedList",
|
|
@@ -359,7 +369,6 @@ __all__ = [
|
|
|
359
369
|
"BillingPeriod",
|
|
360
370
|
"BoxAuthMechanism",
|
|
361
371
|
"CharacterChunkingConfig",
|
|
362
|
-
"CharacterSplitter",
|
|
363
372
|
"ChatApp",
|
|
364
373
|
"ChatAppResponse",
|
|
365
374
|
"ChatData",
|
|
@@ -382,7 +391,6 @@ __all__ = [
|
|
|
382
391
|
"CloudS3DataSource",
|
|
383
392
|
"CloudSharepointDataSource",
|
|
384
393
|
"CloudSlackDataSource",
|
|
385
|
-
"CodeSplitter",
|
|
386
394
|
"CohereEmbedding",
|
|
387
395
|
"CohereEmbeddingConfig",
|
|
388
396
|
"CompositeRetrievalMode",
|
|
@@ -393,8 +401,6 @@ __all__ = [
|
|
|
393
401
|
"ConfigurableDataSourceNames",
|
|
394
402
|
"ConfigurableTransformationDefinition",
|
|
395
403
|
"ConfigurableTransformationNames",
|
|
396
|
-
"ConfiguredTransformationItem",
|
|
397
|
-
"ConfiguredTransformationItemComponent",
|
|
398
404
|
"CreditType",
|
|
399
405
|
"DataSink",
|
|
400
406
|
"DataSinkComponent",
|
|
@@ -408,6 +414,9 @@ __all__ = [
|
|
|
408
414
|
"DataSourceCreateCustomMetadataValue",
|
|
409
415
|
"DataSourceCustomMetadataValue",
|
|
410
416
|
"DataSourceDefinition",
|
|
417
|
+
"DataSourceUpdateDispatcherConfig",
|
|
418
|
+
"DeleteParams",
|
|
419
|
+
"DocumentIngestionJobParams",
|
|
411
420
|
"EditSuggestion",
|
|
412
421
|
"EditSuggestionBlocksItem",
|
|
413
422
|
"ElementSegmentationConfig",
|
|
@@ -474,7 +483,19 @@ __all__ = [
|
|
|
474
483
|
"JobNameMapping",
|
|
475
484
|
"JobNames",
|
|
476
485
|
"JobRecord",
|
|
486
|
+
"JobRecordParameters",
|
|
487
|
+
"JobRecordParameters_DataSourceUpdateDispatcher",
|
|
488
|
+
"JobRecordParameters_DocumentIngestion",
|
|
489
|
+
"JobRecordParameters_LegacyParse",
|
|
490
|
+
"JobRecordParameters_LlamaParseTransform",
|
|
491
|
+
"JobRecordParameters_LoadFiles",
|
|
492
|
+
"JobRecordParameters_Parse",
|
|
493
|
+
"JobRecordParameters_PipelineFileUpdateDispatcher",
|
|
494
|
+
"JobRecordParameters_PipelineFileUpdater",
|
|
495
|
+
"JobRecordParameters_PipelineManagedIngestion",
|
|
477
496
|
"JobRecordWithUsageMetrics",
|
|
497
|
+
"LLamaParseTransformConfig",
|
|
498
|
+
"LegacyParseJobConfig",
|
|
478
499
|
"LlamaExtractSettings",
|
|
479
500
|
"LlamaIndexCoreBaseLlmsTypesChatMessage",
|
|
480
501
|
"LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem",
|
|
@@ -483,20 +504,17 @@ __all__ = [
|
|
|
483
504
|
"LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Text",
|
|
484
505
|
"LlamaParseParameters",
|
|
485
506
|
"LlamaParseSupportedFileExtensions",
|
|
486
|
-
"Llm",
|
|
487
507
|
"LlmModelData",
|
|
488
508
|
"LlmParameters",
|
|
509
|
+
"LoadFilesJobConfig",
|
|
489
510
|
"ManagedIngestionStatus",
|
|
490
511
|
"ManagedIngestionStatusResponse",
|
|
491
|
-
"MarkdownElementNodeParser",
|
|
492
|
-
"MarkdownNodeParser",
|
|
493
512
|
"MessageAnnotation",
|
|
494
513
|
"MessageRole",
|
|
495
514
|
"MetadataFilter",
|
|
496
515
|
"MetadataFilterValue",
|
|
497
516
|
"MetadataFilters",
|
|
498
517
|
"MetadataFiltersFiltersItem",
|
|
499
|
-
"NodeParser",
|
|
500
518
|
"NodeRelationship",
|
|
501
519
|
"NoneChunkingConfig",
|
|
502
520
|
"NoneSegmentationConfig",
|
|
@@ -509,12 +527,12 @@ __all__ = [
|
|
|
509
527
|
"PageScreenshotMetadata",
|
|
510
528
|
"PageScreenshotNodeWithScore",
|
|
511
529
|
"PageSegmentationConfig",
|
|
512
|
-
"PageSplitterNodeParser",
|
|
513
530
|
"PaginatedExtractRunsResponse",
|
|
514
531
|
"PaginatedJobsHistoryWithMetrics",
|
|
515
532
|
"PaginatedListCloudDocumentsResponse",
|
|
516
533
|
"PaginatedListPipelineFilesResponse",
|
|
517
534
|
"PaginatedReportResponse",
|
|
535
|
+
"ParseJobConfig",
|
|
518
536
|
"ParsePlanLevel",
|
|
519
537
|
"ParserLanguages",
|
|
520
538
|
"ParsingHistoryItem",
|
|
@@ -563,6 +581,11 @@ __all__ = [
|
|
|
563
581
|
"PipelineFilePermissionInfoValue",
|
|
564
582
|
"PipelineFileResourceInfoValue",
|
|
565
583
|
"PipelineFileStatus",
|
|
584
|
+
"PipelineFileUpdateDispatcherConfig",
|
|
585
|
+
"PipelineFileUpdaterConfig",
|
|
586
|
+
"PipelineManagedIngestionJobParams",
|
|
587
|
+
"PipelineMetadataConfig",
|
|
588
|
+
"PipelineStatus",
|
|
566
589
|
"PipelineTransformConfig",
|
|
567
590
|
"PipelineTransformConfig_Advanced",
|
|
568
591
|
"PipelineTransformConfig_Auto",
|
|
@@ -578,7 +601,6 @@ __all__ = [
|
|
|
578
601
|
"Project",
|
|
579
602
|
"ProjectCreate",
|
|
580
603
|
"PromptConf",
|
|
581
|
-
"PydanticProgramMode",
|
|
582
604
|
"ReRankConfig",
|
|
583
605
|
"ReRankerType",
|
|
584
606
|
"RecurringCreditGrant",
|
|
@@ -611,7 +633,6 @@ __all__ = [
|
|
|
611
633
|
"SchemaRelaxMode",
|
|
612
634
|
"SemanticChunkingConfig",
|
|
613
635
|
"SentenceChunkingConfig",
|
|
614
|
-
"SentenceSplitter",
|
|
615
636
|
"StatusEnum",
|
|
616
637
|
"StructMode",
|
|
617
638
|
"StructParseConf",
|
|
@@ -622,7 +643,6 @@ __all__ = [
|
|
|
622
643
|
"TextNodeRelationshipsValue",
|
|
623
644
|
"TextNodeWithScore",
|
|
624
645
|
"TokenChunkingConfig",
|
|
625
|
-
"TokenTextSplitter",
|
|
626
646
|
"TransformationCategoryNames",
|
|
627
647
|
"UsageAndPlan",
|
|
628
648
|
"UsageMetricResponse",
|
|
@@ -4,6 +4,7 @@ import datetime as dt
|
|
|
4
4
|
import typing
|
|
5
5
|
|
|
6
6
|
from ..core.datetime_utils import serialize_datetime
|
|
7
|
+
from .delete_params import DeleteParams
|
|
7
8
|
|
|
8
9
|
try:
|
|
9
10
|
import pydantic
|
|
@@ -14,13 +15,14 @@ except ImportError:
|
|
|
14
15
|
import pydantic # type: ignore
|
|
15
16
|
|
|
16
17
|
|
|
17
|
-
class
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
18
|
+
class DataSourceUpdateDispatcherConfig(pydantic.BaseModel):
|
|
19
|
+
"""
|
|
20
|
+
Schema for the parameters of a data source dispatcher job.
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
should_delete: typing.Optional[bool]
|
|
24
|
+
custom_metadata: typing.Optional[typing.Dict[str, typing.Any]]
|
|
25
|
+
delete_info: typing.Optional[DeleteParams]
|
|
24
26
|
|
|
25
27
|
def json(self, **kwargs: typing.Any) -> str:
|
|
26
28
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
@@ -14,18 +14,16 @@ except ImportError:
|
|
|
14
14
|
import pydantic # type: ignore
|
|
15
15
|
|
|
16
16
|
|
|
17
|
-
class
|
|
17
|
+
class DeleteParams(pydantic.BaseModel):
|
|
18
18
|
"""
|
|
19
|
-
|
|
19
|
+
Schema for the parameters of a delete job.
|
|
20
20
|
"""
|
|
21
21
|
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
id_func: typing.Optional[str]
|
|
28
|
-
class_name: typing.Optional[str]
|
|
22
|
+
document_ids_to_delete: typing.Optional[typing.List[str]]
|
|
23
|
+
files_ids_to_delete: typing.Optional[typing.List[str]]
|
|
24
|
+
data_sources_ids_to_delete: typing.Optional[typing.List[str]]
|
|
25
|
+
embed_collection_name: typing.Optional[str]
|
|
26
|
+
data_sink_id: typing.Optional[str]
|
|
29
27
|
|
|
30
28
|
def json(self, **kwargs: typing.Any) -> str:
|
|
31
29
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import datetime as dt
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
|
7
|
+
from .delete_params import DeleteParams
|
|
8
|
+
|
|
9
|
+
try:
|
|
10
|
+
import pydantic
|
|
11
|
+
if pydantic.__version__.startswith("1."):
|
|
12
|
+
raise ImportError
|
|
13
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
14
|
+
except ImportError:
|
|
15
|
+
import pydantic # type: ignore
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class DocumentIngestionJobParams(pydantic.BaseModel):
|
|
19
|
+
"""
|
|
20
|
+
Schema for the parameters of a document ingestion job.
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
custom_metadata: typing.Optional[typing.Dict[str, typing.Any]]
|
|
24
|
+
resource_info: typing.Optional[typing.Dict[str, typing.Any]]
|
|
25
|
+
should_delete: typing.Optional[bool]
|
|
26
|
+
document_ids: typing.Optional[typing.List[str]]
|
|
27
|
+
pipeline_file_id: typing.Optional[str]
|
|
28
|
+
delete_info: typing.Optional[DeleteParams]
|
|
29
|
+
is_new_file: typing.Optional[bool] = pydantic.Field(description="Whether the file is new")
|
|
30
|
+
page_count: typing.Optional[int]
|
|
31
|
+
|
|
32
|
+
def json(self, **kwargs: typing.Any) -> str:
|
|
33
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
34
|
+
return super().json(**kwargs_with_defaults)
|
|
35
|
+
|
|
36
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
37
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
38
|
+
return super().dict(**kwargs_with_defaults)
|
|
39
|
+
|
|
40
|
+
class Config:
|
|
41
|
+
frozen = True
|
|
42
|
+
smart_union = True
|
|
43
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
|
@@ -26,6 +26,9 @@ class ExtractConfig(pydantic.BaseModel):
|
|
|
26
26
|
system_prompt: typing.Optional[str]
|
|
27
27
|
use_reasoning: typing.Optional[bool] = pydantic.Field(description="Whether to use reasoning for the extraction.")
|
|
28
28
|
cite_sources: typing.Optional[bool] = pydantic.Field(description="Whether to cite sources for the extraction.")
|
|
29
|
+
invalidate_cache: typing.Optional[bool] = pydantic.Field(
|
|
30
|
+
description="Whether to invalidate the cache for the extraction."
|
|
31
|
+
)
|
|
29
32
|
|
|
30
33
|
def json(self, **kwargs: typing.Any) -> str:
|
|
31
34
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
llama_cloud/types/job_record.py
CHANGED
|
@@ -5,6 +5,7 @@ import typing
|
|
|
5
5
|
|
|
6
6
|
from ..core.datetime_utils import serialize_datetime
|
|
7
7
|
from .job_names import JobNames
|
|
8
|
+
from .job_record_parameters import JobRecordParameters
|
|
8
9
|
from .status_enum import StatusEnum
|
|
9
10
|
|
|
10
11
|
try:
|
|
@@ -25,7 +26,7 @@ class JobRecord(pydantic.BaseModel):
|
|
|
25
26
|
partitions: typing.Dict[str, str] = pydantic.Field(
|
|
26
27
|
description="The partitions for this execution. Used for determining where to save job output."
|
|
27
28
|
)
|
|
28
|
-
parameters: typing.Optional[
|
|
29
|
+
parameters: typing.Optional[JobRecordParameters]
|
|
29
30
|
session_id: typing.Optional[str]
|
|
30
31
|
correlation_id: typing.Optional[str]
|
|
31
32
|
parent_job_execution_id: typing.Optional[str]
|
|
@@ -40,7 +41,6 @@ class JobRecord(pydantic.BaseModel):
|
|
|
40
41
|
started_at: typing.Optional[dt.datetime]
|
|
41
42
|
ended_at: typing.Optional[dt.datetime]
|
|
42
43
|
updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
|
|
43
|
-
data: typing.Optional[typing.Any]
|
|
44
44
|
|
|
45
45
|
def json(self, **kwargs: typing.Any) -> str:
|
|
46
46
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
@@ -0,0 +1,111 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import typing
|
|
6
|
+
|
|
7
|
+
import typing_extensions
|
|
8
|
+
|
|
9
|
+
from .data_source_update_dispatcher_config import DataSourceUpdateDispatcherConfig
|
|
10
|
+
from .document_ingestion_job_params import DocumentIngestionJobParams
|
|
11
|
+
from .l_lama_parse_transform_config import LLamaParseTransformConfig
|
|
12
|
+
from .legacy_parse_job_config import LegacyParseJobConfig
|
|
13
|
+
from .load_files_job_config import LoadFilesJobConfig
|
|
14
|
+
from .parse_job_config import ParseJobConfig
|
|
15
|
+
from .pipeline_file_update_dispatcher_config import PipelineFileUpdateDispatcherConfig
|
|
16
|
+
from .pipeline_file_updater_config import PipelineFileUpdaterConfig
|
|
17
|
+
from .pipeline_managed_ingestion_job_params import PipelineManagedIngestionJobParams
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class JobRecordParameters_DataSourceUpdateDispatcher(DataSourceUpdateDispatcherConfig):
|
|
21
|
+
type: typing_extensions.Literal["data_source_update_dispatcher"]
|
|
22
|
+
|
|
23
|
+
class Config:
|
|
24
|
+
frozen = True
|
|
25
|
+
smart_union = True
|
|
26
|
+
allow_population_by_field_name = True
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class JobRecordParameters_DocumentIngestion(DocumentIngestionJobParams):
|
|
30
|
+
type: typing_extensions.Literal["document_ingestion"]
|
|
31
|
+
|
|
32
|
+
class Config:
|
|
33
|
+
frozen = True
|
|
34
|
+
smart_union = True
|
|
35
|
+
allow_population_by_field_name = True
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class JobRecordParameters_LegacyParse(LegacyParseJobConfig):
|
|
39
|
+
type: typing_extensions.Literal["legacy_parse"]
|
|
40
|
+
|
|
41
|
+
class Config:
|
|
42
|
+
frozen = True
|
|
43
|
+
smart_union = True
|
|
44
|
+
allow_population_by_field_name = True
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
class JobRecordParameters_LlamaParseTransform(LLamaParseTransformConfig):
|
|
48
|
+
type: typing_extensions.Literal["llama_parse_transform"]
|
|
49
|
+
|
|
50
|
+
class Config:
|
|
51
|
+
frozen = True
|
|
52
|
+
smart_union = True
|
|
53
|
+
allow_population_by_field_name = True
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
class JobRecordParameters_LoadFiles(LoadFilesJobConfig):
|
|
57
|
+
type: typing_extensions.Literal["load_files"]
|
|
58
|
+
|
|
59
|
+
class Config:
|
|
60
|
+
frozen = True
|
|
61
|
+
smart_union = True
|
|
62
|
+
allow_population_by_field_name = True
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
class JobRecordParameters_Parse(ParseJobConfig):
|
|
66
|
+
type: typing_extensions.Literal["parse"]
|
|
67
|
+
|
|
68
|
+
class Config:
|
|
69
|
+
frozen = True
|
|
70
|
+
smart_union = True
|
|
71
|
+
allow_population_by_field_name = True
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
class JobRecordParameters_PipelineFileUpdateDispatcher(PipelineFileUpdateDispatcherConfig):
|
|
75
|
+
type: typing_extensions.Literal["pipeline_file_update_dispatcher"]
|
|
76
|
+
|
|
77
|
+
class Config:
|
|
78
|
+
frozen = True
|
|
79
|
+
smart_union = True
|
|
80
|
+
allow_population_by_field_name = True
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
class JobRecordParameters_PipelineFileUpdater(PipelineFileUpdaterConfig):
|
|
84
|
+
type: typing_extensions.Literal["pipeline_file_updater"]
|
|
85
|
+
|
|
86
|
+
class Config:
|
|
87
|
+
frozen = True
|
|
88
|
+
smart_union = True
|
|
89
|
+
allow_population_by_field_name = True
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
class JobRecordParameters_PipelineManagedIngestion(PipelineManagedIngestionJobParams):
|
|
93
|
+
type: typing_extensions.Literal["pipeline_managed_ingestion"]
|
|
94
|
+
|
|
95
|
+
class Config:
|
|
96
|
+
frozen = True
|
|
97
|
+
smart_union = True
|
|
98
|
+
allow_population_by_field_name = True
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
JobRecordParameters = typing.Union[
|
|
102
|
+
JobRecordParameters_DataSourceUpdateDispatcher,
|
|
103
|
+
JobRecordParameters_DocumentIngestion,
|
|
104
|
+
JobRecordParameters_LegacyParse,
|
|
105
|
+
JobRecordParameters_LlamaParseTransform,
|
|
106
|
+
JobRecordParameters_LoadFiles,
|
|
107
|
+
JobRecordParameters_Parse,
|
|
108
|
+
JobRecordParameters_PipelineFileUpdateDispatcher,
|
|
109
|
+
JobRecordParameters_PipelineFileUpdater,
|
|
110
|
+
JobRecordParameters_PipelineManagedIngestion,
|
|
111
|
+
]
|
|
@@ -14,19 +14,14 @@ except ImportError:
|
|
|
14
14
|
import pydantic # type: ignore
|
|
15
15
|
|
|
16
16
|
|
|
17
|
-
class
|
|
17
|
+
class LLamaParseTransformConfig(pydantic.BaseModel):
|
|
18
18
|
"""
|
|
19
|
-
|
|
19
|
+
Schema for the parameters of llamaparse transform job.
|
|
20
20
|
"""
|
|
21
21
|
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
)
|
|
25
|
-
include_prev_next_rel: typing.Optional[bool] = pydantic.Field(description="Include prev/next node relationships.")
|
|
26
|
-
callback_manager: typing.Optional[typing.Any]
|
|
27
|
-
id_func: typing.Optional[str]
|
|
28
|
-
page_separator: typing.Optional[str]
|
|
29
|
-
class_name: typing.Optional[str]
|
|
22
|
+
custom_metadata: typing.Optional[typing.Dict[str, typing.Any]]
|
|
23
|
+
resource_info: typing.Optional[typing.Dict[str, typing.Any]]
|
|
24
|
+
file_output: str = pydantic.Field(description="Whether to delete the files")
|
|
30
25
|
|
|
31
26
|
def json(self, **kwargs: typing.Any) -> str:
|
|
32
27
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|