llama-cloud 0.1.18__py3-none-any.whl → 0.1.20__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of llama-cloud might be problematic. Click here for more details.
- llama_cloud/__init__.py +202 -42
- llama_cloud/client.py +3 -0
- llama_cloud/resources/__init__.py +61 -2
- llama_cloud/resources/beta/__init__.py +2 -0
- llama_cloud/resources/beta/client.py +371 -0
- llama_cloud/resources/data_sinks/__init__.py +18 -2
- llama_cloud/resources/data_sinks/client.py +2 -94
- llama_cloud/resources/data_sinks/types/__init__.py +18 -2
- llama_cloud/resources/data_sinks/types/data_sink_update_component.py +65 -7
- llama_cloud/resources/data_sources/__init__.py +30 -2
- llama_cloud/resources/data_sources/types/__init__.py +28 -1
- llama_cloud/resources/data_sources/types/data_source_update_component.py +2 -23
- llama_cloud/resources/data_sources/types/data_source_update_component_one.py +122 -0
- llama_cloud/resources/embedding_model_configs/client.py +82 -22
- llama_cloud/resources/files/client.py +18 -4
- llama_cloud/resources/llama_extract/__init__.py +21 -0
- llama_cloud/resources/llama_extract/client.py +227 -114
- llama_cloud/resources/llama_extract/types/__init__.py +21 -0
- llama_cloud/resources/parsing/client.py +123 -4
- llama_cloud/resources/pipelines/client.py +116 -11
- llama_cloud/types/__init__.py +172 -52
- llama_cloud/types/{extract_schema_validate_request.py → audio_block.py} +5 -3
- llama_cloud/types/batch.py +47 -0
- llama_cloud/types/batch_item.py +40 -0
- llama_cloud/types/batch_paginated_list.py +35 -0
- llama_cloud/types/{base_prompt_template.py → batch_public_output.py} +7 -7
- llama_cloud/types/cloud_confluence_data_source.py +1 -0
- llama_cloud/types/cloud_jira_data_source.py +0 -4
- llama_cloud/types/cloud_postgres_vector_store.py +2 -0
- llama_cloud/types/cloud_sharepoint_data_source.py +1 -0
- llama_cloud/types/data_sink_component.py +65 -7
- llama_cloud/types/data_sink_create_component.py +65 -7
- llama_cloud/types/data_source_component.py +2 -23
- llama_cloud/types/data_source_component_one.py +122 -0
- llama_cloud/types/data_source_create_component.py +2 -23
- llama_cloud/types/data_source_create_component_one.py +122 -0
- llama_cloud/types/{extract_agent_update.py → data_source_update_dispatcher_config.py} +6 -6
- llama_cloud/types/{node_parser.py → delete_params.py} +7 -9
- llama_cloud/types/{extract_agent_create.py → document_ingestion_job_params.py} +11 -7
- llama_cloud/types/extract_config.py +2 -0
- llama_cloud/types/extract_job_create.py +1 -2
- llama_cloud/types/fail_page_mode.py +29 -0
- llama_cloud/types/file_count_by_status_response.py +37 -0
- llama_cloud/types/file_parse_public.py +36 -0
- llama_cloud/types/job_names.py +8 -12
- llama_cloud/types/job_record.py +2 -2
- llama_cloud/types/job_record_parameters.py +111 -0
- llama_cloud/types/l_lama_parse_transform_config.py +37 -0
- llama_cloud/types/legacy_parse_job_config.py +189 -0
- llama_cloud/types/llama_index_core_base_llms_types_chat_message_blocks_item.py +13 -1
- llama_cloud/types/llama_parse_parameters.py +8 -0
- llama_cloud/types/load_files_job_config.py +35 -0
- llama_cloud/types/message_role.py +4 -0
- llama_cloud/types/parse_job_config.py +134 -0
- llama_cloud/types/pg_vector_distance_method.py +43 -0
- llama_cloud/types/{extract_job_create_batch.py → pg_vector_hnsw_settings.py} +12 -9
- llama_cloud/types/pg_vector_vector_type.py +35 -0
- llama_cloud/types/pipeline.py +2 -4
- llama_cloud/types/pipeline_create.py +3 -2
- llama_cloud/types/pipeline_data_source.py +3 -0
- llama_cloud/types/pipeline_data_source_component.py +2 -23
- llama_cloud/types/pipeline_data_source_component_one.py +122 -0
- llama_cloud/types/pipeline_data_source_status.py +33 -0
- llama_cloud/types/pipeline_file.py +1 -0
- llama_cloud/types/pipeline_file_update_dispatcher_config.py +38 -0
- llama_cloud/types/{markdown_node_parser.py → pipeline_file_updater_config.py} +14 -15
- llama_cloud/types/pipeline_managed_ingestion_job_params.py +37 -0
- llama_cloud/types/pipeline_metadata_config.py +36 -0
- llama_cloud/types/prompt_conf.py +3 -0
- llama_cloud/types/struct_parse_conf.py +4 -1
- {llama_cloud-0.1.18.dist-info → llama_cloud-0.1.20.dist-info}/METADATA +4 -2
- {llama_cloud-0.1.18.dist-info → llama_cloud-0.1.20.dist-info}/RECORD +82 -68
- {llama_cloud-0.1.18.dist-info → llama_cloud-0.1.20.dist-info}/WHEEL +1 -1
- llama_cloud/types/character_splitter.py +0 -46
- llama_cloud/types/code_splitter.py +0 -50
- llama_cloud/types/configured_transformation_item.py +0 -46
- llama_cloud/types/configured_transformation_item_component.py +0 -22
- llama_cloud/types/llm.py +0 -60
- llama_cloud/types/markdown_element_node_parser.py +0 -51
- llama_cloud/types/page_splitter_node_parser.py +0 -42
- llama_cloud/types/pydantic_program_mode.py +0 -41
- llama_cloud/types/sentence_splitter.py +0 -50
- llama_cloud/types/token_text_splitter.py +0 -47
- /llama_cloud/{types → resources/llama_extract/types}/extract_agent_create_data_schema.py +0 -0
- /llama_cloud/{types → resources/llama_extract/types}/extract_agent_create_data_schema_zero_value.py +0 -0
- /llama_cloud/{types → resources/llama_extract/types}/extract_agent_update_data_schema.py +0 -0
- /llama_cloud/{types → resources/llama_extract/types}/extract_agent_update_data_schema_zero_value.py +0 -0
- /llama_cloud/{types → resources/llama_extract/types}/extract_job_create_batch_data_schema_override.py +0 -0
- /llama_cloud/{types → resources/llama_extract/types}/extract_job_create_batch_data_schema_override_zero_value.py +0 -0
- /llama_cloud/{types → resources/llama_extract/types}/extract_schema_validate_request_data_schema.py +0 -0
- /llama_cloud/{types → resources/llama_extract/types}/extract_schema_validate_request_data_schema_zero_value.py +0 -0
- {llama_cloud-0.1.18.dist-info → llama_cloud-0.1.20.dist-info}/LICENSE +0 -0
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import enum
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
T_Result = typing.TypeVar("T_Result")
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class FailPageMode(str, enum.Enum):
|
|
10
|
+
"""
|
|
11
|
+
Enum for representing the different available page error handling modes
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
RAW_TEXT = "raw_text"
|
|
15
|
+
BLANK_PAGE = "blank_page"
|
|
16
|
+
ERROR_MESSAGE = "error_message"
|
|
17
|
+
|
|
18
|
+
def visit(
|
|
19
|
+
self,
|
|
20
|
+
raw_text: typing.Callable[[], T_Result],
|
|
21
|
+
blank_page: typing.Callable[[], T_Result],
|
|
22
|
+
error_message: typing.Callable[[], T_Result],
|
|
23
|
+
) -> T_Result:
|
|
24
|
+
if self is FailPageMode.RAW_TEXT:
|
|
25
|
+
return raw_text()
|
|
26
|
+
if self is FailPageMode.BLANK_PAGE:
|
|
27
|
+
return blank_page()
|
|
28
|
+
if self is FailPageMode.ERROR_MESSAGE:
|
|
29
|
+
return error_message()
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import datetime as dt
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
|
7
|
+
|
|
8
|
+
try:
|
|
9
|
+
import pydantic
|
|
10
|
+
if pydantic.__version__.startswith("1."):
|
|
11
|
+
raise ImportError
|
|
12
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
13
|
+
except ImportError:
|
|
14
|
+
import pydantic # type: ignore
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class FileCountByStatusResponse(pydantic.BaseModel):
|
|
18
|
+
counts: typing.Dict[str, int] = pydantic.Field(description="The counts of files by status")
|
|
19
|
+
total_count: int = pydantic.Field(description="The total number of files")
|
|
20
|
+
pipeline_id: typing.Optional[str]
|
|
21
|
+
data_source_id: typing.Optional[str]
|
|
22
|
+
only_manually_uploaded: typing.Optional[bool] = pydantic.Field(
|
|
23
|
+
description="Whether to only count manually uploaded files"
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
def json(self, **kwargs: typing.Any) -> str:
|
|
27
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
28
|
+
return super().json(**kwargs_with_defaults)
|
|
29
|
+
|
|
30
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
31
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
32
|
+
return super().dict(**kwargs_with_defaults)
|
|
33
|
+
|
|
34
|
+
class Config:
|
|
35
|
+
frozen = True
|
|
36
|
+
smart_union = True
|
|
37
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import datetime as dt
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
|
7
|
+
|
|
8
|
+
try:
|
|
9
|
+
import pydantic
|
|
10
|
+
if pydantic.__version__.startswith("1."):
|
|
11
|
+
raise ImportError
|
|
12
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
13
|
+
except ImportError:
|
|
14
|
+
import pydantic # type: ignore
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class FileParsePublic(pydantic.BaseModel):
|
|
18
|
+
created_at: dt.datetime = pydantic.Field(description="The date and time when the file was parsed.")
|
|
19
|
+
status: str = pydantic.Field(description="The status of the parse task.")
|
|
20
|
+
started_at: typing.Optional[dt.datetime]
|
|
21
|
+
ended_at: typing.Optional[dt.datetime]
|
|
22
|
+
input_path: str = pydantic.Field(description="The path to the input file.")
|
|
23
|
+
data_path: str = pydantic.Field(description="The path to the data file.")
|
|
24
|
+
|
|
25
|
+
def json(self, **kwargs: typing.Any) -> str:
|
|
26
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
27
|
+
return super().json(**kwargs_with_defaults)
|
|
28
|
+
|
|
29
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
30
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
31
|
+
return super().dict(**kwargs_with_defaults)
|
|
32
|
+
|
|
33
|
+
class Config:
|
|
34
|
+
frozen = True
|
|
35
|
+
smart_union = True
|
|
36
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
llama_cloud/types/job_names.py
CHANGED
|
@@ -14,7 +14,6 @@ class JobNames(str, enum.Enum):
|
|
|
14
14
|
LOAD_DOCUMENTS_JOB = "load_documents_job"
|
|
15
15
|
LOAD_FILES_JOB = "load_files_job"
|
|
16
16
|
PLAYGROUND_JOB = "playground_job"
|
|
17
|
-
EVAL_DATASET_JOB = "eval_dataset_job"
|
|
18
17
|
PIPELINE_MANAGED_INGESTION_JOB = "pipeline_managed_ingestion_job"
|
|
19
18
|
DATA_SOURCE_MANAGED_INGESTION_JOB = "data_source_managed_ingestion_job"
|
|
20
19
|
DATA_SOURCE_UPDATE_DISPATCHER_JOB = "data_source_update_dispatcher_job"
|
|
@@ -22,20 +21,19 @@ class JobNames(str, enum.Enum):
|
|
|
22
21
|
PIPELINE_FILE_UPDATER_JOB = "pipeline_file_updater_job"
|
|
23
22
|
FILE_MANAGED_INGESTION_JOB = "file_managed_ingestion_job"
|
|
24
23
|
DOCUMENT_INGESTION_JOB = "document_ingestion_job"
|
|
25
|
-
PARSE_RAW_FILE_JOB = "parse_raw_file_job"
|
|
26
|
-
LLAMA_PARSE_TRANSFORM_JOB = "llama_parse_transform_job"
|
|
27
24
|
METADATA_UPDATE_JOB = "metadata_update_job"
|
|
28
25
|
PARSE_RAW_FILE_JOB_CACHED = "parse_raw_file_job_cached"
|
|
29
26
|
EXTRACTION_JOB = "extraction_job"
|
|
30
27
|
EXTRACT_JOB = "extract_job"
|
|
31
28
|
ASYNCIO_TEST_JOB = "asyncio_test_job"
|
|
29
|
+
PARSE_RAW_FILE_JOB = "parse_raw_file_job"
|
|
30
|
+
LLAMA_PARSE_TRANSFORM_JOB = "llama_parse_transform_job"
|
|
32
31
|
|
|
33
32
|
def visit(
|
|
34
33
|
self,
|
|
35
34
|
load_documents_job: typing.Callable[[], T_Result],
|
|
36
35
|
load_files_job: typing.Callable[[], T_Result],
|
|
37
36
|
playground_job: typing.Callable[[], T_Result],
|
|
38
|
-
eval_dataset_job: typing.Callable[[], T_Result],
|
|
39
37
|
pipeline_managed_ingestion_job: typing.Callable[[], T_Result],
|
|
40
38
|
data_source_managed_ingestion_job: typing.Callable[[], T_Result],
|
|
41
39
|
data_source_update_dispatcher_job: typing.Callable[[], T_Result],
|
|
@@ -43,13 +41,13 @@ class JobNames(str, enum.Enum):
|
|
|
43
41
|
pipeline_file_updater_job: typing.Callable[[], T_Result],
|
|
44
42
|
file_managed_ingestion_job: typing.Callable[[], T_Result],
|
|
45
43
|
document_ingestion_job: typing.Callable[[], T_Result],
|
|
46
|
-
parse_raw_file_job: typing.Callable[[], T_Result],
|
|
47
|
-
llama_parse_transform_job: typing.Callable[[], T_Result],
|
|
48
44
|
metadata_update_job: typing.Callable[[], T_Result],
|
|
49
45
|
parse_raw_file_job_cached: typing.Callable[[], T_Result],
|
|
50
46
|
extraction_job: typing.Callable[[], T_Result],
|
|
51
47
|
extract_job: typing.Callable[[], T_Result],
|
|
52
48
|
asyncio_test_job: typing.Callable[[], T_Result],
|
|
49
|
+
parse_raw_file_job: typing.Callable[[], T_Result],
|
|
50
|
+
llama_parse_transform_job: typing.Callable[[], T_Result],
|
|
53
51
|
) -> T_Result:
|
|
54
52
|
if self is JobNames.LOAD_DOCUMENTS_JOB:
|
|
55
53
|
return load_documents_job()
|
|
@@ -57,8 +55,6 @@ class JobNames(str, enum.Enum):
|
|
|
57
55
|
return load_files_job()
|
|
58
56
|
if self is JobNames.PLAYGROUND_JOB:
|
|
59
57
|
return playground_job()
|
|
60
|
-
if self is JobNames.EVAL_DATASET_JOB:
|
|
61
|
-
return eval_dataset_job()
|
|
62
58
|
if self is JobNames.PIPELINE_MANAGED_INGESTION_JOB:
|
|
63
59
|
return pipeline_managed_ingestion_job()
|
|
64
60
|
if self is JobNames.DATA_SOURCE_MANAGED_INGESTION_JOB:
|
|
@@ -73,10 +69,6 @@ class JobNames(str, enum.Enum):
|
|
|
73
69
|
return file_managed_ingestion_job()
|
|
74
70
|
if self is JobNames.DOCUMENT_INGESTION_JOB:
|
|
75
71
|
return document_ingestion_job()
|
|
76
|
-
if self is JobNames.PARSE_RAW_FILE_JOB:
|
|
77
|
-
return parse_raw_file_job()
|
|
78
|
-
if self is JobNames.LLAMA_PARSE_TRANSFORM_JOB:
|
|
79
|
-
return llama_parse_transform_job()
|
|
80
72
|
if self is JobNames.METADATA_UPDATE_JOB:
|
|
81
73
|
return metadata_update_job()
|
|
82
74
|
if self is JobNames.PARSE_RAW_FILE_JOB_CACHED:
|
|
@@ -87,3 +79,7 @@ class JobNames(str, enum.Enum):
|
|
|
87
79
|
return extract_job()
|
|
88
80
|
if self is JobNames.ASYNCIO_TEST_JOB:
|
|
89
81
|
return asyncio_test_job()
|
|
82
|
+
if self is JobNames.PARSE_RAW_FILE_JOB:
|
|
83
|
+
return parse_raw_file_job()
|
|
84
|
+
if self is JobNames.LLAMA_PARSE_TRANSFORM_JOB:
|
|
85
|
+
return llama_parse_transform_job()
|
llama_cloud/types/job_record.py
CHANGED
|
@@ -5,6 +5,7 @@ import typing
|
|
|
5
5
|
|
|
6
6
|
from ..core.datetime_utils import serialize_datetime
|
|
7
7
|
from .job_names import JobNames
|
|
8
|
+
from .job_record_parameters import JobRecordParameters
|
|
8
9
|
from .status_enum import StatusEnum
|
|
9
10
|
|
|
10
11
|
try:
|
|
@@ -25,7 +26,7 @@ class JobRecord(pydantic.BaseModel):
|
|
|
25
26
|
partitions: typing.Dict[str, str] = pydantic.Field(
|
|
26
27
|
description="The partitions for this execution. Used for determining where to save job output."
|
|
27
28
|
)
|
|
28
|
-
parameters: typing.Optional[
|
|
29
|
+
parameters: typing.Optional[JobRecordParameters]
|
|
29
30
|
session_id: typing.Optional[str]
|
|
30
31
|
correlation_id: typing.Optional[str]
|
|
31
32
|
parent_job_execution_id: typing.Optional[str]
|
|
@@ -40,7 +41,6 @@ class JobRecord(pydantic.BaseModel):
|
|
|
40
41
|
started_at: typing.Optional[dt.datetime]
|
|
41
42
|
ended_at: typing.Optional[dt.datetime]
|
|
42
43
|
updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
|
|
43
|
-
data: typing.Optional[typing.Any]
|
|
44
44
|
|
|
45
45
|
def json(self, **kwargs: typing.Any) -> str:
|
|
46
46
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
@@ -0,0 +1,111 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import typing
|
|
6
|
+
|
|
7
|
+
import typing_extensions
|
|
8
|
+
|
|
9
|
+
from .data_source_update_dispatcher_config import DataSourceUpdateDispatcherConfig
|
|
10
|
+
from .document_ingestion_job_params import DocumentIngestionJobParams
|
|
11
|
+
from .l_lama_parse_transform_config import LLamaParseTransformConfig
|
|
12
|
+
from .legacy_parse_job_config import LegacyParseJobConfig
|
|
13
|
+
from .load_files_job_config import LoadFilesJobConfig
|
|
14
|
+
from .parse_job_config import ParseJobConfig
|
|
15
|
+
from .pipeline_file_update_dispatcher_config import PipelineFileUpdateDispatcherConfig
|
|
16
|
+
from .pipeline_file_updater_config import PipelineFileUpdaterConfig
|
|
17
|
+
from .pipeline_managed_ingestion_job_params import PipelineManagedIngestionJobParams
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class JobRecordParameters_DataSourceUpdateDispatcher(DataSourceUpdateDispatcherConfig):
|
|
21
|
+
type: typing_extensions.Literal["data_source_update_dispatcher"]
|
|
22
|
+
|
|
23
|
+
class Config:
|
|
24
|
+
frozen = True
|
|
25
|
+
smart_union = True
|
|
26
|
+
allow_population_by_field_name = True
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class JobRecordParameters_DocumentIngestion(DocumentIngestionJobParams):
|
|
30
|
+
type: typing_extensions.Literal["document_ingestion"]
|
|
31
|
+
|
|
32
|
+
class Config:
|
|
33
|
+
frozen = True
|
|
34
|
+
smart_union = True
|
|
35
|
+
allow_population_by_field_name = True
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class JobRecordParameters_LegacyParse(LegacyParseJobConfig):
|
|
39
|
+
type: typing_extensions.Literal["legacy_parse"]
|
|
40
|
+
|
|
41
|
+
class Config:
|
|
42
|
+
frozen = True
|
|
43
|
+
smart_union = True
|
|
44
|
+
allow_population_by_field_name = True
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
class JobRecordParameters_LlamaParseTransform(LLamaParseTransformConfig):
|
|
48
|
+
type: typing_extensions.Literal["llama_parse_transform"]
|
|
49
|
+
|
|
50
|
+
class Config:
|
|
51
|
+
frozen = True
|
|
52
|
+
smart_union = True
|
|
53
|
+
allow_population_by_field_name = True
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
class JobRecordParameters_LoadFiles(LoadFilesJobConfig):
|
|
57
|
+
type: typing_extensions.Literal["load_files"]
|
|
58
|
+
|
|
59
|
+
class Config:
|
|
60
|
+
frozen = True
|
|
61
|
+
smart_union = True
|
|
62
|
+
allow_population_by_field_name = True
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
class JobRecordParameters_Parse(ParseJobConfig):
|
|
66
|
+
type: typing_extensions.Literal["parse"]
|
|
67
|
+
|
|
68
|
+
class Config:
|
|
69
|
+
frozen = True
|
|
70
|
+
smart_union = True
|
|
71
|
+
allow_population_by_field_name = True
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
class JobRecordParameters_PipelineFileUpdateDispatcher(PipelineFileUpdateDispatcherConfig):
|
|
75
|
+
type: typing_extensions.Literal["pipeline_file_update_dispatcher"]
|
|
76
|
+
|
|
77
|
+
class Config:
|
|
78
|
+
frozen = True
|
|
79
|
+
smart_union = True
|
|
80
|
+
allow_population_by_field_name = True
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
class JobRecordParameters_PipelineFileUpdater(PipelineFileUpdaterConfig):
|
|
84
|
+
type: typing_extensions.Literal["pipeline_file_updater"]
|
|
85
|
+
|
|
86
|
+
class Config:
|
|
87
|
+
frozen = True
|
|
88
|
+
smart_union = True
|
|
89
|
+
allow_population_by_field_name = True
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
class JobRecordParameters_PipelineManagedIngestion(PipelineManagedIngestionJobParams):
|
|
93
|
+
type: typing_extensions.Literal["pipeline_managed_ingestion"]
|
|
94
|
+
|
|
95
|
+
class Config:
|
|
96
|
+
frozen = True
|
|
97
|
+
smart_union = True
|
|
98
|
+
allow_population_by_field_name = True
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
JobRecordParameters = typing.Union[
|
|
102
|
+
JobRecordParameters_DataSourceUpdateDispatcher,
|
|
103
|
+
JobRecordParameters_DocumentIngestion,
|
|
104
|
+
JobRecordParameters_LegacyParse,
|
|
105
|
+
JobRecordParameters_LlamaParseTransform,
|
|
106
|
+
JobRecordParameters_LoadFiles,
|
|
107
|
+
JobRecordParameters_Parse,
|
|
108
|
+
JobRecordParameters_PipelineFileUpdateDispatcher,
|
|
109
|
+
JobRecordParameters_PipelineFileUpdater,
|
|
110
|
+
JobRecordParameters_PipelineManagedIngestion,
|
|
111
|
+
]
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import datetime as dt
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
|
7
|
+
|
|
8
|
+
try:
|
|
9
|
+
import pydantic
|
|
10
|
+
if pydantic.__version__.startswith("1."):
|
|
11
|
+
raise ImportError
|
|
12
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
13
|
+
except ImportError:
|
|
14
|
+
import pydantic # type: ignore
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class LLamaParseTransformConfig(pydantic.BaseModel):
|
|
18
|
+
"""
|
|
19
|
+
Schema for the parameters of llamaparse transform job.
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
custom_metadata: typing.Optional[typing.Dict[str, typing.Any]]
|
|
23
|
+
resource_info: typing.Optional[typing.Dict[str, typing.Any]]
|
|
24
|
+
file_output: str = pydantic.Field(description="Whether to delete the files")
|
|
25
|
+
|
|
26
|
+
def json(self, **kwargs: typing.Any) -> str:
|
|
27
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
28
|
+
return super().json(**kwargs_with_defaults)
|
|
29
|
+
|
|
30
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
31
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
32
|
+
return super().dict(**kwargs_with_defaults)
|
|
33
|
+
|
|
34
|
+
class Config:
|
|
35
|
+
frozen = True
|
|
36
|
+
smart_union = True
|
|
37
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
|
@@ -0,0 +1,189 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import datetime as dt
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
|
7
|
+
|
|
8
|
+
try:
|
|
9
|
+
import pydantic
|
|
10
|
+
if pydantic.__version__.startswith("1."):
|
|
11
|
+
raise ImportError
|
|
12
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
13
|
+
except ImportError:
|
|
14
|
+
import pydantic # type: ignore
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class LegacyParseJobConfig(pydantic.BaseModel):
|
|
18
|
+
"""
|
|
19
|
+
Configuration for llamaparse job
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
custom_metadata: typing.Optional[typing.Dict[str, typing.Any]]
|
|
23
|
+
resource_info: typing.Optional[typing.Dict[str, typing.Any]]
|
|
24
|
+
user_id: str = pydantic.Field(alias="userId", description="The user ID.")
|
|
25
|
+
file_name: str = pydantic.Field(alias="fileName", description="The file name.")
|
|
26
|
+
original_file_name: str = pydantic.Field(alias="originalFileName", description="The original file name.")
|
|
27
|
+
file_key: str = pydantic.Field(alias="fileKey", description="The file key.")
|
|
28
|
+
input_url: typing.Optional[str] = pydantic.Field(alias="inputUrl")
|
|
29
|
+
http_proxy: typing.Optional[str] = pydantic.Field(alias="httpProxy")
|
|
30
|
+
fast_mode: typing.Optional[bool] = pydantic.Field(alias="fastMode")
|
|
31
|
+
lang: str = pydantic.Field(description="The language.")
|
|
32
|
+
template: typing.Optional[str] = pydantic.Field(description="The parsing instruction.")
|
|
33
|
+
pipeline_id: typing.Optional[str] = pydantic.Field(alias="pipelineId")
|
|
34
|
+
output_bucket: typing.Optional[str] = pydantic.Field(alias="outputBucket")
|
|
35
|
+
file_id: typing.Optional[str] = pydantic.Field(alias="fileId")
|
|
36
|
+
full_file_path: typing.Optional[str] = pydantic.Field(alias="fullFilePath")
|
|
37
|
+
from_l_lama_cloud: typing.Optional[bool] = pydantic.Field(
|
|
38
|
+
alias="fromLLamaCloud", description="Whether the file is from LLama cloud."
|
|
39
|
+
)
|
|
40
|
+
skip_diagonal_text: typing.Optional[bool] = pydantic.Field(
|
|
41
|
+
alias="skipDiagonalText", description="Whether to skip diagonal text."
|
|
42
|
+
)
|
|
43
|
+
preserve_layout_alignment_across_pages: typing.Optional[bool] = pydantic.Field(
|
|
44
|
+
alias="preserveLayoutAlignmentAcrossPages", description="Whether to preserve layout alignment across pages."
|
|
45
|
+
)
|
|
46
|
+
invalidate_cache: bool = pydantic.Field(alias="invalidateCache", description="Whether to invalidate the cache.")
|
|
47
|
+
output_pdf_of_document: typing.Optional[bool] = pydantic.Field(alias="outputPDFOfDocument")
|
|
48
|
+
save_images: typing.Optional[bool] = pydantic.Field(alias="saveImages")
|
|
49
|
+
gpt_4_o: typing.Optional[bool] = pydantic.Field(alias="gpt4o", description="Whether to use GPT4o.")
|
|
50
|
+
open_aiapi_key: str = pydantic.Field(alias="openAIAPIKey", description="The OpenAI API key.")
|
|
51
|
+
do_not_unroll_columns: typing.Optional[bool] = pydantic.Field(
|
|
52
|
+
alias="doNotUnrollColumns", description="Whether to unroll columns."
|
|
53
|
+
)
|
|
54
|
+
spread_sheet_extract_sub_tables: typing.Optional[bool] = pydantic.Field(alias="spreadSheetExtractSubTables")
|
|
55
|
+
extract_layout: typing.Optional[bool] = pydantic.Field(alias="extractLayout")
|
|
56
|
+
html_make_all_elements_visible: typing.Optional[bool] = pydantic.Field(alias="htmlMakeAllElementsVisible")
|
|
57
|
+
html_remove_fixed_elements: typing.Optional[bool] = pydantic.Field(alias="htmlRemoveFixedElements")
|
|
58
|
+
html_remove_navigation_elements: typing.Optional[bool] = pydantic.Field(alias="htmlRemoveNavigationElements")
|
|
59
|
+
guess_xlsx_sheet_name: typing.Optional[bool] = pydantic.Field(
|
|
60
|
+
alias="guessXLSXSheetName", description="Whether to guess the XLSX sheet name when generation output xlsx."
|
|
61
|
+
)
|
|
62
|
+
do_not_cache: typing.Optional[bool] = pydantic.Field(alias="doNotCache", description="Whether to cache.")
|
|
63
|
+
page_separator: typing.Optional[str] = pydantic.Field(alias="pageSeparator")
|
|
64
|
+
bounding_box: typing.Optional[str] = pydantic.Field(alias="boundingBox")
|
|
65
|
+
bbox_top: typing.Optional[float] = pydantic.Field(alias="bboxTop")
|
|
66
|
+
bbox_right: typing.Optional[float] = pydantic.Field(alias="bboxRight")
|
|
67
|
+
bbox_bottom: typing.Optional[float] = pydantic.Field(alias="bboxBottom")
|
|
68
|
+
bbox_left: typing.Optional[float] = pydantic.Field(alias="bboxLeft")
|
|
69
|
+
disable_reconstruction: typing.Optional[bool] = pydantic.Field(alias="disableReconstruction")
|
|
70
|
+
target_pages: typing.Optional[str] = pydantic.Field(alias="targetPages")
|
|
71
|
+
multimodal_pipeline: typing.Optional[bool] = pydantic.Field(alias="multimodalPipeline")
|
|
72
|
+
multimodal_model: typing.Optional[str] = pydantic.Field(alias="multimodalModel")
|
|
73
|
+
model: typing.Optional[str]
|
|
74
|
+
vendor_api_key: typing.Optional[str] = pydantic.Field(alias="vendorAPIKey")
|
|
75
|
+
page_prefix: typing.Optional[str] = pydantic.Field(alias="pagePrefix")
|
|
76
|
+
page_suffix: typing.Optional[str] = pydantic.Field(alias="pageSuffix")
|
|
77
|
+
webhook_url: typing.Optional[str] = pydantic.Field(alias="webhookUrl")
|
|
78
|
+
preset: typing.Optional[str]
|
|
79
|
+
take_screenshot: typing.Optional[bool] = pydantic.Field(
|
|
80
|
+
alias="takeScreenshot", description="Force to capture an image of each pages"
|
|
81
|
+
)
|
|
82
|
+
is_formatting_instruction: typing.Optional[bool] = pydantic.Field(
|
|
83
|
+
alias="isFormattingInstruction", description="Allow the parsing instruction to also format the output."
|
|
84
|
+
)
|
|
85
|
+
premium_mode: typing.Optional[bool] = pydantic.Field(
|
|
86
|
+
alias="premiumMode", description="Whether to use premiumMode pipeline."
|
|
87
|
+
)
|
|
88
|
+
continuous_mode: typing.Optional[bool] = pydantic.Field(
|
|
89
|
+
alias="continuousMode", description="Whether to use continuousMode pipeline."
|
|
90
|
+
)
|
|
91
|
+
disable_ocr: typing.Optional[bool] = pydantic.Field(
|
|
92
|
+
alias="disableOcr",
|
|
93
|
+
description="Disable the OCR on the document. LlamaParse will only extract the copyable text from the document",
|
|
94
|
+
)
|
|
95
|
+
disable_image_extraction: typing.Optional[bool] = pydantic.Field(
|
|
96
|
+
alias="disableImageExtraction",
|
|
97
|
+
description="Disable the image extraction from the document. LlamaParse will not extract any image from the document.",
|
|
98
|
+
)
|
|
99
|
+
annotate_links: typing.Optional[bool] = pydantic.Field(
|
|
100
|
+
alias="annotateLinks",
|
|
101
|
+
description="Annotate links in markdown. LlamaParse will try to add links from document into the markdown.",
|
|
102
|
+
)
|
|
103
|
+
adaptive_long_table: typing.Optional[bool] = pydantic.Field(
|
|
104
|
+
alias="adaptiveLongTable",
|
|
105
|
+
description="Adaptive long table. LlamaParse will try to detect long table and adapt the output.",
|
|
106
|
+
)
|
|
107
|
+
compact_markdown_table: typing.Optional[bool] = pydantic.Field(
|
|
108
|
+
alias="compactMarkdownTable",
|
|
109
|
+
description="Compact markdown table. LlamaParse will compact the markdown table to not include too many spaces.",
|
|
110
|
+
)
|
|
111
|
+
input_s_3_path: typing.Optional[str] = pydantic.Field(alias="inputS3Path")
|
|
112
|
+
input_s_3_region: typing.Optional[str] = pydantic.Field(alias="inputS3Region")
|
|
113
|
+
output_s_3_path_prefix: typing.Optional[str] = pydantic.Field(alias="outputS3PathPrefix")
|
|
114
|
+
output_s_3_region: typing.Optional[str] = pydantic.Field(alias="outputS3Region")
|
|
115
|
+
project_id: typing.Optional[str] = pydantic.Field(alias="projectId")
|
|
116
|
+
azure_open_ai_deployment_name: typing.Optional[str] = pydantic.Field(alias="azureOpenAiDeploymentName")
|
|
117
|
+
azure_open_ai_endpoint: typing.Optional[str] = pydantic.Field(alias="azureOpenAiEndpoint")
|
|
118
|
+
azure_open_ai_api_version: typing.Optional[str] = pydantic.Field(alias="azureOpenAiApiVersion")
|
|
119
|
+
azure_open_ai_key: typing.Optional[str] = pydantic.Field(alias="azureOpenAiKey")
|
|
120
|
+
auto_mode: typing.Optional[bool] = pydantic.Field(alias="autoMode", description="Whether to use auto mode.")
|
|
121
|
+
auto_mode_trigger_on_table_in_page: typing.Optional[bool] = pydantic.Field(
|
|
122
|
+
alias="autoModeTriggerOnTableInPage", description="Whether to trigger on table in page."
|
|
123
|
+
)
|
|
124
|
+
auto_mode_trigger_on_image_in_page: typing.Optional[bool] = pydantic.Field(
|
|
125
|
+
alias="autoModeTriggerOnImageInPage", description="Whether to trigger on image in page."
|
|
126
|
+
)
|
|
127
|
+
auto_mode_trigger_on_regexp_in_page: typing.Optional[str] = pydantic.Field(alias="autoModeTriggerOnRegexpInPage")
|
|
128
|
+
auto_mode_trigger_on_text_in_page: typing.Optional[str] = pydantic.Field(alias="autoModeTriggerOnTextInPage")
|
|
129
|
+
auto_mode_configuration_json: typing.Optional[str] = pydantic.Field(alias="autoModeConfigurationJSON")
|
|
130
|
+
structured_output: typing.Optional[bool] = pydantic.Field(
|
|
131
|
+
alias="structuredOutput", description="Whether to use structured output."
|
|
132
|
+
)
|
|
133
|
+
structured_output_json_schema: typing.Optional[str] = pydantic.Field(alias="structuredOutputJSONSchema")
|
|
134
|
+
structured_output_json_schema_name: typing.Optional[str] = pydantic.Field(alias="structuredOutputJSONSchemaName")
|
|
135
|
+
max_pages: typing.Optional[int] = pydantic.Field(alias="maxPages")
|
|
136
|
+
extract_charts: typing.Optional[bool] = pydantic.Field(
|
|
137
|
+
alias="extractCharts", description="Extract charts from the document."
|
|
138
|
+
)
|
|
139
|
+
formatting_instruction: typing.Optional[str] = pydantic.Field(alias="formattingInstruction")
|
|
140
|
+
complemental_formatting_instruction: typing.Optional[str] = pydantic.Field(
|
|
141
|
+
alias="complementalFormattingInstruction"
|
|
142
|
+
)
|
|
143
|
+
content_guideline_instruction: typing.Optional[str] = pydantic.Field(alias="contentGuidelineInstruction")
|
|
144
|
+
job_timeout_in_seconds: typing.Optional[float] = pydantic.Field(alias="jobTimeoutInSeconds")
|
|
145
|
+
job_timeout_extra_time_per_page_in_seconds: typing.Optional[float] = pydantic.Field(
|
|
146
|
+
alias="jobTimeoutExtraTimePerPageInSeconds"
|
|
147
|
+
)
|
|
148
|
+
strict_mode_image_extraction: typing.Optional[bool] = pydantic.Field(
|
|
149
|
+
alias="strictModeImageExtraction",
|
|
150
|
+
description="If true, the job will fail when we are not able to extract an image from a document.",
|
|
151
|
+
)
|
|
152
|
+
strict_mode_image_ocr: typing.Optional[bool] = pydantic.Field(
|
|
153
|
+
alias="strictModeImageOCR",
|
|
154
|
+
description="If true, the job will fail when we are not able to OCR an image from a document.",
|
|
155
|
+
)
|
|
156
|
+
strict_mode_reconstruction: typing.Optional[bool] = pydantic.Field(
|
|
157
|
+
alias="strictModeReconstruction",
|
|
158
|
+
description="If true, the job will fail when we are not able to transform a page to Markdown in a document.",
|
|
159
|
+
)
|
|
160
|
+
strict_mode_buggy_font: typing.Optional[bool] = pydantic.Field(
|
|
161
|
+
alias="strictModeBuggyFont",
|
|
162
|
+
description="If true, the job will fail when we are not able to extract a glyph from the document due to buggy font.",
|
|
163
|
+
)
|
|
164
|
+
ignore_document_elements_for_layout_detection: typing.Optional[bool] = pydantic.Field(
|
|
165
|
+
alias="ignoreDocumentElementsForLayoutDetection",
|
|
166
|
+
description="If true, the job will ignore document element for layout detection, and instead just rely on a visual model, only apply to layout detection.",
|
|
167
|
+
)
|
|
168
|
+
output_tables_as_html: typing.Optional[bool] = pydantic.Field(
|
|
169
|
+
alias="outputTablesAsHTML",
|
|
170
|
+
description="If true, the job will output tables as HTML in the markdown output, useful for merged cells.",
|
|
171
|
+
)
|
|
172
|
+
parse_mode: typing.Optional[str] = pydantic.Field(alias="parseMode")
|
|
173
|
+
system_prompt: typing.Optional[str] = pydantic.Field(alias="systemPrompt")
|
|
174
|
+
system_prompt_append: typing.Optional[str] = pydantic.Field(alias="systemPromptAppend")
|
|
175
|
+
user_prompt: typing.Optional[str] = pydantic.Field(alias="userPrompt")
|
|
176
|
+
|
|
177
|
+
def json(self, **kwargs: typing.Any) -> str:
|
|
178
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
179
|
+
return super().json(**kwargs_with_defaults)
|
|
180
|
+
|
|
181
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
182
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
183
|
+
return super().dict(**kwargs_with_defaults)
|
|
184
|
+
|
|
185
|
+
class Config:
|
|
186
|
+
frozen = True
|
|
187
|
+
smart_union = True
|
|
188
|
+
allow_population_by_field_name = True
|
|
189
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
|
@@ -6,10 +6,20 @@ import typing
|
|
|
6
6
|
|
|
7
7
|
import typing_extensions
|
|
8
8
|
|
|
9
|
+
from .audio_block import AudioBlock
|
|
9
10
|
from .image_block import ImageBlock
|
|
10
11
|
from .text_block import TextBlock
|
|
11
12
|
|
|
12
13
|
|
|
14
|
+
class LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Audio(AudioBlock):
|
|
15
|
+
block_type: typing_extensions.Literal["audio"]
|
|
16
|
+
|
|
17
|
+
class Config:
|
|
18
|
+
frozen = True
|
|
19
|
+
smart_union = True
|
|
20
|
+
allow_population_by_field_name = True
|
|
21
|
+
|
|
22
|
+
|
|
13
23
|
class LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Image(ImageBlock):
|
|
14
24
|
block_type: typing_extensions.Literal["image"]
|
|
15
25
|
|
|
@@ -29,5 +39,7 @@ class LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Text(TextBlock):
|
|
|
29
39
|
|
|
30
40
|
|
|
31
41
|
LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem = typing.Union[
|
|
32
|
-
|
|
42
|
+
LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Audio,
|
|
43
|
+
LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Image,
|
|
44
|
+
LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Text,
|
|
33
45
|
]
|
|
@@ -4,6 +4,7 @@ import datetime as dt
|
|
|
4
4
|
import typing
|
|
5
5
|
|
|
6
6
|
from ..core.datetime_utils import serialize_datetime
|
|
7
|
+
from .fail_page_mode import FailPageMode
|
|
7
8
|
from .parser_languages import ParserLanguages
|
|
8
9
|
from .parsing_mode import ParsingMode
|
|
9
10
|
|
|
@@ -78,6 +79,7 @@ class LlamaParseParameters(pydantic.BaseModel):
|
|
|
78
79
|
auto_mode_trigger_on_text_in_page: typing.Optional[str]
|
|
79
80
|
auto_mode_trigger_on_table_in_page: typing.Optional[bool]
|
|
80
81
|
auto_mode_trigger_on_image_in_page: typing.Optional[bool]
|
|
82
|
+
auto_mode_configuration_json: typing.Optional[str]
|
|
81
83
|
structured_output: typing.Optional[bool]
|
|
82
84
|
structured_output_json_schema: typing.Optional[str]
|
|
83
85
|
structured_output_json_schema_name: typing.Optional[str]
|
|
@@ -94,6 +96,7 @@ class LlamaParseParameters(pydantic.BaseModel):
|
|
|
94
96
|
strict_mode_image_ocr: typing.Optional[bool]
|
|
95
97
|
strict_mode_reconstruction: typing.Optional[bool]
|
|
96
98
|
strict_mode_buggy_font: typing.Optional[bool]
|
|
99
|
+
save_images: typing.Optional[bool]
|
|
97
100
|
ignore_document_elements_for_layout_detection: typing.Optional[bool]
|
|
98
101
|
output_tables_as_html: typing.Optional[bool] = pydantic.Field(alias="output_tables_as_HTML")
|
|
99
102
|
internal_is_screenshot_job: typing.Optional[bool]
|
|
@@ -101,6 +104,11 @@ class LlamaParseParameters(pydantic.BaseModel):
|
|
|
101
104
|
system_prompt: typing.Optional[str]
|
|
102
105
|
system_prompt_append: typing.Optional[str]
|
|
103
106
|
user_prompt: typing.Optional[str]
|
|
107
|
+
page_error_tolerance: typing.Optional[float]
|
|
108
|
+
replace_failed_page_mode: typing.Optional[FailPageMode]
|
|
109
|
+
replace_failed_page_with_error_message_prefix: typing.Optional[str]
|
|
110
|
+
replace_failed_page_with_error_message_suffix: typing.Optional[str]
|
|
111
|
+
markdown_table_multiline_header_separator: typing.Optional[str]
|
|
104
112
|
|
|
105
113
|
def json(self, **kwargs: typing.Any) -> str:
|
|
106
114
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import datetime as dt
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
|
7
|
+
|
|
8
|
+
try:
|
|
9
|
+
import pydantic
|
|
10
|
+
if pydantic.__version__.startswith("1."):
|
|
11
|
+
raise ImportError
|
|
12
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
13
|
+
except ImportError:
|
|
14
|
+
import pydantic # type: ignore
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class LoadFilesJobConfig(pydantic.BaseModel):
|
|
18
|
+
"""
|
|
19
|
+
Schema for the parameters of a load files job.
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
file_ids: typing.Optional[typing.List[str]]
|
|
23
|
+
|
|
24
|
+
def json(self, **kwargs: typing.Any) -> str:
|
|
25
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
26
|
+
return super().json(**kwargs_with_defaults)
|
|
27
|
+
|
|
28
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
29
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
30
|
+
return super().dict(**kwargs_with_defaults)
|
|
31
|
+
|
|
32
|
+
class Config:
|
|
33
|
+
frozen = True
|
|
34
|
+
smart_union = True
|
|
35
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
|
@@ -12,6 +12,7 @@ class MessageRole(str, enum.Enum):
|
|
|
12
12
|
"""
|
|
13
13
|
|
|
14
14
|
SYSTEM = "system"
|
|
15
|
+
DEVELOPER = "developer"
|
|
15
16
|
USER = "user"
|
|
16
17
|
ASSISTANT = "assistant"
|
|
17
18
|
FUNCTION = "function"
|
|
@@ -22,6 +23,7 @@ class MessageRole(str, enum.Enum):
|
|
|
22
23
|
def visit(
|
|
23
24
|
self,
|
|
24
25
|
system: typing.Callable[[], T_Result],
|
|
26
|
+
developer: typing.Callable[[], T_Result],
|
|
25
27
|
user: typing.Callable[[], T_Result],
|
|
26
28
|
assistant: typing.Callable[[], T_Result],
|
|
27
29
|
function: typing.Callable[[], T_Result],
|
|
@@ -31,6 +33,8 @@ class MessageRole(str, enum.Enum):
|
|
|
31
33
|
) -> T_Result:
|
|
32
34
|
if self is MessageRole.SYSTEM:
|
|
33
35
|
return system()
|
|
36
|
+
if self is MessageRole.DEVELOPER:
|
|
37
|
+
return developer()
|
|
34
38
|
if self is MessageRole.USER:
|
|
35
39
|
return user()
|
|
36
40
|
if self is MessageRole.ASSISTANT:
|