llama-cloud 0.0.17__py3-none-any.whl → 0.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of llama-cloud might be problematic. Click here for more details.
- llama_cloud/__init__.py +8 -36
- llama_cloud/client.py +0 -3
- llama_cloud/resources/__init__.py +2 -10
- llama_cloud/resources/data_sinks/__init__.py +2 -2
- llama_cloud/resources/data_sinks/client.py +8 -8
- llama_cloud/resources/data_sinks/types/__init__.py +1 -2
- llama_cloud/resources/data_sinks/types/data_sink_update_component.py +15 -2
- llama_cloud/resources/data_sources/__init__.py +2 -2
- llama_cloud/resources/data_sources/client.py +6 -6
- llama_cloud/resources/data_sources/types/__init__.py +1 -2
- llama_cloud/resources/data_sources/types/data_source_update_component.py +23 -2
- llama_cloud/resources/extraction/client.py +14 -14
- llama_cloud/resources/files/client.py +10 -10
- llama_cloud/resources/organizations/client.py +2 -2
- llama_cloud/resources/parsing/client.py +100 -60
- llama_cloud/resources/pipelines/__init__.py +0 -4
- llama_cloud/resources/pipelines/client.py +50 -340
- llama_cloud/resources/pipelines/types/__init__.py +1 -7
- llama_cloud/resources/pipelines/types/pipeline_update_embedding_config.py +15 -15
- llama_cloud/resources/pipelines/types/pipeline_update_transform_config.py +1 -24
- llama_cloud/types/__init__.py +9 -29
- llama_cloud/types/azure_open_ai_embedding.py +7 -39
- llama_cloud/types/base_prompt_template.py +3 -14
- llama_cloud/types/bedrock_embedding.py +7 -20
- llama_cloud/types/box_auth_mechanism.py +0 -4
- llama_cloud/types/character_splitter.py +3 -4
- llama_cloud/types/chat_data.py +0 -5
- llama_cloud/types/chat_message.py +1 -6
- llama_cloud/types/cloud_az_storage_blob_data_source.py +7 -18
- llama_cloud/types/cloud_box_data_source.py +6 -16
- llama_cloud/types/cloud_confluence_data_source.py +6 -10
- llama_cloud/types/cloud_document.py +1 -3
- llama_cloud/types/cloud_document_create.py +1 -3
- llama_cloud/types/cloud_google_drive_data_source.py +0 -4
- llama_cloud/types/cloud_jira_data_source.py +4 -6
- llama_cloud/types/cloud_notion_page_data_source.py +2 -6
- llama_cloud/types/cloud_one_drive_data_source.py +2 -6
- llama_cloud/types/cloud_pinecone_vector_store.py +1 -1
- llama_cloud/types/cloud_postgres_vector_store.py +0 -4
- llama_cloud/types/cloud_s_3_data_source.py +4 -12
- llama_cloud/types/cloud_sharepoint_data_source.py +5 -9
- llama_cloud/types/cloud_slack_data_source.py +6 -10
- llama_cloud/types/code_splitter.py +2 -1
- llama_cloud/types/cohere_embedding.py +6 -15
- llama_cloud/types/configurable_data_sink_names.py +0 -12
- llama_cloud/types/configurable_data_source_names.py +0 -4
- llama_cloud/types/configurable_transformation_names.py +0 -32
- llama_cloud/types/configured_transformation_item_component.py +15 -2
- llama_cloud/types/data_sink.py +2 -2
- llama_cloud/types/data_sink_component.py +15 -2
- llama_cloud/types/data_sink_create_component.py +15 -2
- llama_cloud/types/data_source.py +3 -5
- llama_cloud/types/data_source_component.py +23 -2
- llama_cloud/types/data_source_create.py +1 -3
- llama_cloud/types/data_source_create_component.py +23 -2
- llama_cloud/types/eval_dataset.py +2 -2
- llama_cloud/types/eval_dataset_job_record.py +7 -13
- llama_cloud/types/eval_execution_params_override.py +2 -6
- llama_cloud/types/eval_metric.py +17 -0
- llama_cloud/types/eval_question.py +2 -6
- llama_cloud/types/extraction_result.py +5 -3
- llama_cloud/types/extraction_schema.py +3 -5
- llama_cloud/types/file.py +7 -11
- llama_cloud/types/gemini_embedding.py +7 -22
- llama_cloud/types/hugging_face_inference_api_embedding.py +9 -34
- llama_cloud/types/input_message.py +2 -4
- llama_cloud/types/llama_parse_parameters.py +5 -0
- llama_cloud/types/llama_parse_supported_file_extensions.py +0 -4
- llama_cloud/types/llm.py +9 -8
- llama_cloud/types/llm_parameters.py +2 -7
- llama_cloud/types/local_eval.py +8 -10
- llama_cloud/types/local_eval_results.py +1 -1
- llama_cloud/types/managed_ingestion_status_response.py +3 -5
- llama_cloud/types/markdown_element_node_parser.py +4 -5
- llama_cloud/types/markdown_node_parser.py +2 -1
- llama_cloud/types/message_annotation.py +1 -6
- llama_cloud/types/metric_result.py +3 -3
- llama_cloud/types/node_parser.py +2 -1
- llama_cloud/types/node_relationship.py +44 -0
- llama_cloud/types/object_type.py +0 -4
- llama_cloud/types/open_ai_embedding.py +7 -36
- llama_cloud/types/organization.py +2 -2
- llama_cloud/types/page_splitter_node_parser.py +3 -2
- llama_cloud/types/parsing_job_json_result.py +2 -2
- llama_cloud/types/parsing_job_markdown_result.py +1 -1
- llama_cloud/types/parsing_job_text_result.py +1 -1
- llama_cloud/types/partition_names.py +45 -0
- llama_cloud/types/pipeline.py +7 -17
- llama_cloud/types/pipeline_configuration_hashes.py +3 -3
- llama_cloud/types/pipeline_create.py +6 -18
- llama_cloud/types/pipeline_create_embedding_config.py +15 -15
- llama_cloud/types/pipeline_create_transform_config.py +1 -24
- llama_cloud/types/pipeline_data_source.py +5 -11
- llama_cloud/types/pipeline_data_source_component.py +23 -2
- llama_cloud/types/pipeline_data_source_create.py +1 -3
- llama_cloud/types/pipeline_deployment.py +4 -8
- llama_cloud/types/pipeline_embedding_config.py +15 -15
- llama_cloud/types/pipeline_file.py +10 -18
- llama_cloud/types/pipeline_file_create.py +1 -3
- llama_cloud/types/playground_session.py +2 -2
- llama_cloud/types/preset_retrieval_params.py +8 -11
- llama_cloud/types/presigned_url.py +1 -3
- llama_cloud/types/project.py +2 -2
- llama_cloud/types/prompt_mixin_prompts.py +1 -1
- llama_cloud/types/prompt_spec.py +2 -4
- llama_cloud/types/related_node_info.py +0 -4
- llama_cloud/types/retrieval_mode.py +0 -4
- llama_cloud/types/sentence_splitter.py +3 -4
- llama_cloud/types/supported_llm_model_names.py +0 -4
- llama_cloud/types/text_node.py +3 -9
- llama_cloud/types/token_text_splitter.py +2 -1
- llama_cloud/types/transformation_category_names.py +0 -4
- llama_cloud/types/user_organization.py +5 -9
- llama_cloud/types/user_organization_create.py +2 -2
- llama_cloud/types/user_organization_delete.py +2 -2
- llama_cloud/types/vertex_ai_embedding_config.py +2 -2
- llama_cloud/types/{extend_vertex_text_embedding.py → vertex_text_embedding.py} +10 -23
- {llama_cloud-0.0.17.dist-info → llama_cloud-0.1.1.dist-info}/METADATA +1 -1
- llama_cloud-0.1.1.dist-info/RECORD +224 -0
- llama_cloud/resources/auth/__init__.py +0 -2
- llama_cloud/resources/auth/client.py +0 -124
- llama_cloud/resources/data_sinks/types/data_sink_update_component_one.py +0 -23
- llama_cloud/resources/data_sources/types/data_source_update_component_one.py +0 -27
- llama_cloud/types/cloud_chroma_vector_store.py +0 -43
- llama_cloud/types/cloud_weaviate_vector_store.py +0 -41
- llama_cloud/types/configured_transformation_item_component_one.py +0 -35
- llama_cloud/types/custom_claims.py +0 -58
- llama_cloud/types/data_sink_component_one.py +0 -23
- llama_cloud/types/data_sink_create_component_one.py +0 -23
- llama_cloud/types/data_source_component_one.py +0 -27
- llama_cloud/types/data_source_create_component_one.py +0 -27
- llama_cloud/types/pipeline_data_source_component_one.py +0 -27
- llama_cloud/types/user.py +0 -35
- llama_cloud-0.0.17.dist-info/RECORD +0 -235
- {llama_cloud-0.0.17.dist-info → llama_cloud-0.1.1.dist-info}/LICENSE +0 -0
- {llama_cloud-0.0.17.dist-info → llama_cloud-0.1.1.dist-info}/WHEEL +0 -0
|
@@ -15,8 +15,8 @@ from ....types.open_ai_embedding_config import OpenAiEmbeddingConfig
|
|
|
15
15
|
from ....types.vertex_ai_embedding_config import VertexAiEmbeddingConfig
|
|
16
16
|
|
|
17
17
|
|
|
18
|
-
class
|
|
19
|
-
type: typing_extensions.Literal["
|
|
18
|
+
class PipelineUpdateEmbeddingConfig_AzureEmbedding(AzureOpenAiEmbeddingConfig):
|
|
19
|
+
type: typing_extensions.Literal["AZURE_EMBEDDING"]
|
|
20
20
|
|
|
21
21
|
class Config:
|
|
22
22
|
frozen = True
|
|
@@ -24,8 +24,8 @@ class PipelineUpdateEmbeddingConfig_OpenaiEmbedding(OpenAiEmbeddingConfig):
|
|
|
24
24
|
allow_population_by_field_name = True
|
|
25
25
|
|
|
26
26
|
|
|
27
|
-
class
|
|
28
|
-
type: typing_extensions.Literal["
|
|
27
|
+
class PipelineUpdateEmbeddingConfig_BedrockEmbedding(BedrockEmbeddingConfig):
|
|
28
|
+
type: typing_extensions.Literal["BEDROCK_EMBEDDING"]
|
|
29
29
|
|
|
30
30
|
class Config:
|
|
31
31
|
frozen = True
|
|
@@ -33,8 +33,8 @@ class PipelineUpdateEmbeddingConfig_AzureEmbedding(AzureOpenAiEmbeddingConfig):
|
|
|
33
33
|
allow_population_by_field_name = True
|
|
34
34
|
|
|
35
35
|
|
|
36
|
-
class
|
|
37
|
-
type: typing_extensions.Literal["
|
|
36
|
+
class PipelineUpdateEmbeddingConfig_CohereEmbedding(CohereEmbeddingConfig):
|
|
37
|
+
type: typing_extensions.Literal["COHERE_EMBEDDING"]
|
|
38
38
|
|
|
39
39
|
class Config:
|
|
40
40
|
frozen = True
|
|
@@ -42,8 +42,8 @@ class PipelineUpdateEmbeddingConfig_HuggingfaceApiEmbedding(HuggingFaceInference
|
|
|
42
42
|
allow_population_by_field_name = True
|
|
43
43
|
|
|
44
44
|
|
|
45
|
-
class
|
|
46
|
-
type: typing_extensions.Literal["
|
|
45
|
+
class PipelineUpdateEmbeddingConfig_GeminiEmbedding(GeminiEmbeddingConfig):
|
|
46
|
+
type: typing_extensions.Literal["GEMINI_EMBEDDING"]
|
|
47
47
|
|
|
48
48
|
class Config:
|
|
49
49
|
frozen = True
|
|
@@ -51,8 +51,8 @@ class PipelineUpdateEmbeddingConfig_BedrockEmbedding(BedrockEmbeddingConfig):
|
|
|
51
51
|
allow_population_by_field_name = True
|
|
52
52
|
|
|
53
53
|
|
|
54
|
-
class
|
|
55
|
-
type: typing_extensions.Literal["
|
|
54
|
+
class PipelineUpdateEmbeddingConfig_HuggingfaceApiEmbedding(HuggingFaceInferenceApiEmbeddingConfig):
|
|
55
|
+
type: typing_extensions.Literal["HUGGINGFACE_API_EMBEDDING"]
|
|
56
56
|
|
|
57
57
|
class Config:
|
|
58
58
|
frozen = True
|
|
@@ -60,8 +60,8 @@ class PipelineUpdateEmbeddingConfig_GeminiEmbedding(GeminiEmbeddingConfig):
|
|
|
60
60
|
allow_population_by_field_name = True
|
|
61
61
|
|
|
62
62
|
|
|
63
|
-
class
|
|
64
|
-
type: typing_extensions.Literal["
|
|
63
|
+
class PipelineUpdateEmbeddingConfig_OpenaiEmbedding(OpenAiEmbeddingConfig):
|
|
64
|
+
type: typing_extensions.Literal["OPENAI_EMBEDDING"]
|
|
65
65
|
|
|
66
66
|
class Config:
|
|
67
67
|
frozen = True
|
|
@@ -79,11 +79,11 @@ class PipelineUpdateEmbeddingConfig_VertexaiEmbedding(VertexAiEmbeddingConfig):
|
|
|
79
79
|
|
|
80
80
|
|
|
81
81
|
PipelineUpdateEmbeddingConfig = typing.Union[
|
|
82
|
-
PipelineUpdateEmbeddingConfig_OpenaiEmbedding,
|
|
83
82
|
PipelineUpdateEmbeddingConfig_AzureEmbedding,
|
|
84
|
-
PipelineUpdateEmbeddingConfig_HuggingfaceApiEmbedding,
|
|
85
83
|
PipelineUpdateEmbeddingConfig_BedrockEmbedding,
|
|
86
|
-
PipelineUpdateEmbeddingConfig_GeminiEmbedding,
|
|
87
84
|
PipelineUpdateEmbeddingConfig_CohereEmbedding,
|
|
85
|
+
PipelineUpdateEmbeddingConfig_GeminiEmbedding,
|
|
86
|
+
PipelineUpdateEmbeddingConfig_HuggingfaceApiEmbedding,
|
|
87
|
+
PipelineUpdateEmbeddingConfig_OpenaiEmbedding,
|
|
88
88
|
PipelineUpdateEmbeddingConfig_VertexaiEmbedding,
|
|
89
89
|
]
|
|
@@ -1,31 +1,8 @@
|
|
|
1
1
|
# This file was auto-generated by Fern from our API Definition.
|
|
2
2
|
|
|
3
|
-
from __future__ import annotations
|
|
4
|
-
|
|
5
3
|
import typing
|
|
6
4
|
|
|
7
|
-
import typing_extensions
|
|
8
|
-
|
|
9
5
|
from ....types.advanced_mode_transform_config import AdvancedModeTransformConfig
|
|
10
6
|
from ....types.auto_transform_config import AutoTransformConfig
|
|
11
7
|
|
|
12
|
-
|
|
13
|
-
class PipelineUpdateTransformConfig_Auto(AutoTransformConfig):
|
|
14
|
-
mode: typing_extensions.Literal["auto"]
|
|
15
|
-
|
|
16
|
-
class Config:
|
|
17
|
-
frozen = True
|
|
18
|
-
smart_union = True
|
|
19
|
-
allow_population_by_field_name = True
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
class PipelineUpdateTransformConfig_Advanced(AdvancedModeTransformConfig):
|
|
23
|
-
mode: typing_extensions.Literal["advanced"]
|
|
24
|
-
|
|
25
|
-
class Config:
|
|
26
|
-
frozen = True
|
|
27
|
-
smart_union = True
|
|
28
|
-
allow_population_by_field_name = True
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
PipelineUpdateTransformConfig = typing.Union[PipelineUpdateTransformConfig_Auto, PipelineUpdateTransformConfig_Advanced]
|
|
8
|
+
PipelineUpdateTransformConfig = typing.Union[AutoTransformConfig, AdvancedModeTransformConfig]
|
llama_cloud/types/__init__.py
CHANGED
|
@@ -30,7 +30,6 @@ from .chat_message import ChatMessage
|
|
|
30
30
|
from .cloud_az_storage_blob_data_source import CloudAzStorageBlobDataSource
|
|
31
31
|
from .cloud_azure_ai_search_vector_store import CloudAzureAiSearchVectorStore
|
|
32
32
|
from .cloud_box_data_source import CloudBoxDataSource
|
|
33
|
-
from .cloud_chroma_vector_store import CloudChromaVectorStore
|
|
34
33
|
from .cloud_confluence_data_source import CloudConfluenceDataSource
|
|
35
34
|
from .cloud_document import CloudDocument
|
|
36
35
|
from .cloud_document_create import CloudDocumentCreate
|
|
@@ -46,7 +45,6 @@ from .cloud_qdrant_vector_store import CloudQdrantVectorStore
|
|
|
46
45
|
from .cloud_s_3_data_source import CloudS3DataSource
|
|
47
46
|
from .cloud_sharepoint_data_source import CloudSharepointDataSource
|
|
48
47
|
from .cloud_slack_data_source import CloudSlackDataSource
|
|
49
|
-
from .cloud_weaviate_vector_store import CloudWeaviateVectorStore
|
|
50
48
|
from .code_splitter import CodeSplitter
|
|
51
49
|
from .cohere_embedding import CohereEmbedding
|
|
52
50
|
from .cohere_embedding_config import CohereEmbeddingConfig
|
|
@@ -56,21 +54,15 @@ from .configurable_transformation_definition import ConfigurableTransformationDe
|
|
|
56
54
|
from .configurable_transformation_names import ConfigurableTransformationNames
|
|
57
55
|
from .configured_transformation_item import ConfiguredTransformationItem
|
|
58
56
|
from .configured_transformation_item_component import ConfiguredTransformationItemComponent
|
|
59
|
-
from .configured_transformation_item_component_one import ConfiguredTransformationItemComponentOne
|
|
60
|
-
from .custom_claims import CustomClaims
|
|
61
57
|
from .data_sink import DataSink
|
|
62
58
|
from .data_sink_component import DataSinkComponent
|
|
63
|
-
from .data_sink_component_one import DataSinkComponentOne
|
|
64
59
|
from .data_sink_create import DataSinkCreate
|
|
65
60
|
from .data_sink_create_component import DataSinkCreateComponent
|
|
66
|
-
from .data_sink_create_component_one import DataSinkCreateComponentOne
|
|
67
61
|
from .data_sink_definition import DataSinkDefinition
|
|
68
62
|
from .data_source import DataSource
|
|
69
63
|
from .data_source_component import DataSourceComponent
|
|
70
|
-
from .data_source_component_one import DataSourceComponentOne
|
|
71
64
|
from .data_source_create import DataSourceCreate
|
|
72
65
|
from .data_source_create_component import DataSourceCreateComponent
|
|
73
|
-
from .data_source_create_component_one import DataSourceCreateComponentOne
|
|
74
66
|
from .data_source_create_custom_metadata_value import DataSourceCreateCustomMetadataValue
|
|
75
67
|
from .data_source_custom_metadata_value import DataSourceCustomMetadataValue
|
|
76
68
|
from .data_source_definition import DataSourceDefinition
|
|
@@ -80,10 +72,10 @@ from .eval_dataset_job_params import EvalDatasetJobParams
|
|
|
80
72
|
from .eval_dataset_job_record import EvalDatasetJobRecord
|
|
81
73
|
from .eval_execution_params import EvalExecutionParams
|
|
82
74
|
from .eval_execution_params_override import EvalExecutionParamsOverride
|
|
75
|
+
from .eval_metric import EvalMetric
|
|
83
76
|
from .eval_question import EvalQuestion
|
|
84
77
|
from .eval_question_create import EvalQuestionCreate
|
|
85
78
|
from .eval_question_result import EvalQuestionResult
|
|
86
|
-
from .extend_vertex_text_embedding import ExtendVertexTextEmbedding
|
|
87
79
|
from .extraction_job import ExtractionJob
|
|
88
80
|
from .extraction_result import ExtractionResult
|
|
89
81
|
from .extraction_result_data_value import ExtractionResultDataValue
|
|
@@ -122,6 +114,7 @@ from .metadata_filters import MetadataFilters
|
|
|
122
114
|
from .metadata_filters_filters_item import MetadataFiltersFiltersItem
|
|
123
115
|
from .metric_result import MetricResult
|
|
124
116
|
from .node_parser import NodeParser
|
|
117
|
+
from .node_relationship import NodeRelationship
|
|
125
118
|
from .none_chunking_config import NoneChunkingConfig
|
|
126
119
|
from .none_segmentation_config import NoneSegmentationConfig
|
|
127
120
|
from .object_type import ObjectType
|
|
@@ -140,6 +133,7 @@ from .parsing_job_json_result import ParsingJobJsonResult
|
|
|
140
133
|
from .parsing_job_markdown_result import ParsingJobMarkdownResult
|
|
141
134
|
from .parsing_job_text_result import ParsingJobTextResult
|
|
142
135
|
from .parsing_usage import ParsingUsage
|
|
136
|
+
from .partition_names import PartitionNames
|
|
143
137
|
from .pipeline import Pipeline
|
|
144
138
|
from .pipeline_configuration_hashes import PipelineConfigurationHashes
|
|
145
139
|
from .pipeline_create import PipelineCreate
|
|
@@ -153,14 +147,9 @@ from .pipeline_create_embedding_config import (
|
|
|
153
147
|
PipelineCreateEmbeddingConfig_OpenaiEmbedding,
|
|
154
148
|
PipelineCreateEmbeddingConfig_VertexaiEmbedding,
|
|
155
149
|
)
|
|
156
|
-
from .pipeline_create_transform_config import
|
|
157
|
-
PipelineCreateTransformConfig,
|
|
158
|
-
PipelineCreateTransformConfig_Advanced,
|
|
159
|
-
PipelineCreateTransformConfig_Auto,
|
|
160
|
-
)
|
|
150
|
+
from .pipeline_create_transform_config import PipelineCreateTransformConfig
|
|
161
151
|
from .pipeline_data_source import PipelineDataSource
|
|
162
152
|
from .pipeline_data_source_component import PipelineDataSourceComponent
|
|
163
|
-
from .pipeline_data_source_component_one import PipelineDataSourceComponentOne
|
|
164
153
|
from .pipeline_data_source_create import PipelineDataSourceCreate
|
|
165
154
|
from .pipeline_data_source_custom_metadata_value import PipelineDataSourceCustomMetadataValue
|
|
166
155
|
from .pipeline_deployment import PipelineDeployment
|
|
@@ -210,7 +199,6 @@ from .text_node_with_score import TextNodeWithScore
|
|
|
210
199
|
from .token_chunking_config import TokenChunkingConfig
|
|
211
200
|
from .token_text_splitter import TokenTextSplitter
|
|
212
201
|
from .transformation_category_names import TransformationCategoryNames
|
|
213
|
-
from .user import User
|
|
214
202
|
from .user_organization import UserOrganization
|
|
215
203
|
from .user_organization_create import UserOrganizationCreate
|
|
216
204
|
from .user_organization_delete import UserOrganizationDelete
|
|
@@ -218,6 +206,7 @@ from .validation_error import ValidationError
|
|
|
218
206
|
from .validation_error_loc_item import ValidationErrorLocItem
|
|
219
207
|
from .vertex_ai_embedding_config import VertexAiEmbeddingConfig
|
|
220
208
|
from .vertex_embedding_mode import VertexEmbeddingMode
|
|
209
|
+
from .vertex_text_embedding import VertexTextEmbedding
|
|
221
210
|
|
|
222
211
|
__all__ = [
|
|
223
212
|
"AdvancedModeTransformConfig",
|
|
@@ -246,7 +235,6 @@ __all__ = [
|
|
|
246
235
|
"CloudAzStorageBlobDataSource",
|
|
247
236
|
"CloudAzureAiSearchVectorStore",
|
|
248
237
|
"CloudBoxDataSource",
|
|
249
|
-
"CloudChromaVectorStore",
|
|
250
238
|
"CloudConfluenceDataSource",
|
|
251
239
|
"CloudDocument",
|
|
252
240
|
"CloudDocumentCreate",
|
|
@@ -262,7 +250,6 @@ __all__ = [
|
|
|
262
250
|
"CloudS3DataSource",
|
|
263
251
|
"CloudSharepointDataSource",
|
|
264
252
|
"CloudSlackDataSource",
|
|
265
|
-
"CloudWeaviateVectorStore",
|
|
266
253
|
"CodeSplitter",
|
|
267
254
|
"CohereEmbedding",
|
|
268
255
|
"CohereEmbeddingConfig",
|
|
@@ -272,21 +259,15 @@ __all__ = [
|
|
|
272
259
|
"ConfigurableTransformationNames",
|
|
273
260
|
"ConfiguredTransformationItem",
|
|
274
261
|
"ConfiguredTransformationItemComponent",
|
|
275
|
-
"ConfiguredTransformationItemComponentOne",
|
|
276
|
-
"CustomClaims",
|
|
277
262
|
"DataSink",
|
|
278
263
|
"DataSinkComponent",
|
|
279
|
-
"DataSinkComponentOne",
|
|
280
264
|
"DataSinkCreate",
|
|
281
265
|
"DataSinkCreateComponent",
|
|
282
|
-
"DataSinkCreateComponentOne",
|
|
283
266
|
"DataSinkDefinition",
|
|
284
267
|
"DataSource",
|
|
285
268
|
"DataSourceComponent",
|
|
286
|
-
"DataSourceComponentOne",
|
|
287
269
|
"DataSourceCreate",
|
|
288
270
|
"DataSourceCreateComponent",
|
|
289
|
-
"DataSourceCreateComponentOne",
|
|
290
271
|
"DataSourceCreateCustomMetadataValue",
|
|
291
272
|
"DataSourceCustomMetadataValue",
|
|
292
273
|
"DataSourceDefinition",
|
|
@@ -296,10 +277,10 @@ __all__ = [
|
|
|
296
277
|
"EvalDatasetJobRecord",
|
|
297
278
|
"EvalExecutionParams",
|
|
298
279
|
"EvalExecutionParamsOverride",
|
|
280
|
+
"EvalMetric",
|
|
299
281
|
"EvalQuestion",
|
|
300
282
|
"EvalQuestionCreate",
|
|
301
283
|
"EvalQuestionResult",
|
|
302
|
-
"ExtendVertexTextEmbedding",
|
|
303
284
|
"ExtractionJob",
|
|
304
285
|
"ExtractionResult",
|
|
305
286
|
"ExtractionResultDataValue",
|
|
@@ -338,6 +319,7 @@ __all__ = [
|
|
|
338
319
|
"MetadataFiltersFiltersItem",
|
|
339
320
|
"MetricResult",
|
|
340
321
|
"NodeParser",
|
|
322
|
+
"NodeRelationship",
|
|
341
323
|
"NoneChunkingConfig",
|
|
342
324
|
"NoneSegmentationConfig",
|
|
343
325
|
"ObjectType",
|
|
@@ -356,6 +338,7 @@ __all__ = [
|
|
|
356
338
|
"ParsingJobMarkdownResult",
|
|
357
339
|
"ParsingJobTextResult",
|
|
358
340
|
"ParsingUsage",
|
|
341
|
+
"PartitionNames",
|
|
359
342
|
"Pipeline",
|
|
360
343
|
"PipelineConfigurationHashes",
|
|
361
344
|
"PipelineCreate",
|
|
@@ -368,11 +351,8 @@ __all__ = [
|
|
|
368
351
|
"PipelineCreateEmbeddingConfig_OpenaiEmbedding",
|
|
369
352
|
"PipelineCreateEmbeddingConfig_VertexaiEmbedding",
|
|
370
353
|
"PipelineCreateTransformConfig",
|
|
371
|
-
"PipelineCreateTransformConfig_Advanced",
|
|
372
|
-
"PipelineCreateTransformConfig_Auto",
|
|
373
354
|
"PipelineDataSource",
|
|
374
355
|
"PipelineDataSourceComponent",
|
|
375
|
-
"PipelineDataSourceComponentOne",
|
|
376
356
|
"PipelineDataSourceCreate",
|
|
377
357
|
"PipelineDataSourceCustomMetadataValue",
|
|
378
358
|
"PipelineDeployment",
|
|
@@ -418,7 +398,6 @@ __all__ = [
|
|
|
418
398
|
"TokenChunkingConfig",
|
|
419
399
|
"TokenTextSplitter",
|
|
420
400
|
"TransformationCategoryNames",
|
|
421
|
-
"User",
|
|
422
401
|
"UserOrganization",
|
|
423
402
|
"UserOrganizationCreate",
|
|
424
403
|
"UserOrganizationDelete",
|
|
@@ -426,4 +405,5 @@ __all__ = [
|
|
|
426
405
|
"ValidationErrorLocItem",
|
|
427
406
|
"VertexAiEmbeddingConfig",
|
|
428
407
|
"VertexEmbeddingMode",
|
|
408
|
+
"VertexTextEmbedding",
|
|
429
409
|
]
|
|
@@ -15,56 +15,24 @@ except ImportError:
|
|
|
15
15
|
|
|
16
16
|
|
|
17
17
|
class AzureOpenAiEmbedding(pydantic.BaseModel):
|
|
18
|
-
""
|
|
19
|
-
OpenAI class for embeddings.
|
|
20
|
-
|
|
21
|
-
Args:
|
|
22
|
-
mode (str): Mode for embedding.
|
|
23
|
-
Defaults to OpenAIEmbeddingMode.TEXT_SEARCH_MODE.
|
|
24
|
-
Options are:
|
|
25
|
-
|
|
26
|
-
- OpenAIEmbeddingMode.SIMILARITY_MODE
|
|
27
|
-
- OpenAIEmbeddingMode.TEXT_SEARCH_MODE
|
|
28
|
-
|
|
29
|
-
model (str): Model for embedding.
|
|
30
|
-
Defaults to OpenAIEmbeddingModelType.TEXT_EMBED_ADA_002.
|
|
31
|
-
Options are:
|
|
32
|
-
|
|
33
|
-
- OpenAIEmbeddingModelType.DAVINCI
|
|
34
|
-
- OpenAIEmbeddingModelType.CURIE
|
|
35
|
-
- OpenAIEmbeddingModelType.BABBAGE
|
|
36
|
-
- OpenAIEmbeddingModelType.ADA
|
|
37
|
-
- OpenAIEmbeddingModelType.TEXT_EMBED_ADA_002
|
|
38
|
-
"""
|
|
39
|
-
|
|
40
|
-
model_name: typing.Optional[str] = pydantic.Field(description="The name of the embedding model.")
|
|
18
|
+
model_name: typing.Optional[str] = pydantic.Field(description="The name of the OpenAI embedding model.")
|
|
41
19
|
embed_batch_size: typing.Optional[int] = pydantic.Field(description="The batch size for embedding calls.")
|
|
42
|
-
|
|
43
|
-
num_workers: typing.Optional[int] = pydantic.Field(
|
|
44
|
-
description="The number of workers to use for async embedding calls."
|
|
45
|
-
)
|
|
20
|
+
num_workers: typing.Optional[int]
|
|
46
21
|
additional_kwargs: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(
|
|
47
22
|
description="Additional kwargs for the OpenAI API."
|
|
48
23
|
)
|
|
49
|
-
api_key: str
|
|
24
|
+
api_key: typing.Optional[str]
|
|
50
25
|
api_base: typing.Optional[str] = pydantic.Field(description="The base URL for Azure deployment.")
|
|
51
26
|
api_version: typing.Optional[str] = pydantic.Field(description="The version for Azure OpenAI API.")
|
|
52
27
|
max_retries: typing.Optional[int] = pydantic.Field(description="Maximum number of retries.")
|
|
53
28
|
timeout: typing.Optional[float] = pydantic.Field(description="Timeout for each request.")
|
|
54
|
-
default_headers: typing.Optional[typing.Dict[str, str]]
|
|
55
|
-
description="The default headers for API requests."
|
|
56
|
-
)
|
|
29
|
+
default_headers: typing.Optional[typing.Dict[str, typing.Optional[str]]]
|
|
57
30
|
reuse_client: typing.Optional[bool] = pydantic.Field(
|
|
58
31
|
description="Reuse the OpenAI client between requests. When doing anything with large volumes of async API calls, setting this to false can improve stability."
|
|
59
32
|
)
|
|
60
|
-
dimensions: typing.Optional[int]
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
azure_endpoint: typing.Optional[str] = pydantic.Field(description="The Azure endpoint to use.")
|
|
64
|
-
azure_deployment: typing.Optional[str] = pydantic.Field(description="The Azure deployment to use.")
|
|
65
|
-
use_azure_ad: bool = pydantic.Field(
|
|
66
|
-
description="Indicates if Microsoft Entra ID (former Azure AD) is used for token authentication"
|
|
67
|
-
)
|
|
33
|
+
dimensions: typing.Optional[int]
|
|
34
|
+
azure_endpoint: typing.Optional[str]
|
|
35
|
+
azure_deployment: typing.Optional[str]
|
|
68
36
|
class_name: typing.Optional[str]
|
|
69
37
|
|
|
70
38
|
def json(self, **kwargs: typing.Any) -> str:
|
|
@@ -15,23 +15,12 @@ except ImportError:
|
|
|
15
15
|
|
|
16
16
|
|
|
17
17
|
class BasePromptTemplate(pydantic.BaseModel):
|
|
18
|
-
"""
|
|
19
|
-
Chainable mixin.
|
|
20
|
-
|
|
21
|
-
A module that can produce a `QueryComponent` from a set of inputs through
|
|
22
|
-
`as_query_component`.
|
|
23
|
-
|
|
24
|
-
If plugged in directly into a `QueryPipeline`, the `ChainableMixin` will be
|
|
25
|
-
converted into a `QueryComponent` with default parameters.
|
|
26
|
-
"""
|
|
27
|
-
|
|
28
18
|
metadata: typing.Dict[str, typing.Any]
|
|
29
19
|
template_vars: typing.List[str]
|
|
30
20
|
kwargs: typing.Dict[str, str]
|
|
31
|
-
output_parser: typing.
|
|
32
|
-
template_var_mappings: typing.Optional[typing.Dict[str, typing.Any]]
|
|
33
|
-
|
|
34
|
-
)
|
|
21
|
+
output_parser: typing.Any
|
|
22
|
+
template_var_mappings: typing.Optional[typing.Dict[str, typing.Any]]
|
|
23
|
+
function_mappings: typing.Optional[typing.Dict[str, typing.Optional[str]]]
|
|
35
24
|
|
|
36
25
|
def json(self, **kwargs: typing.Any) -> str:
|
|
37
26
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
@@ -15,27 +15,14 @@ except ImportError:
|
|
|
15
15
|
|
|
16
16
|
|
|
17
17
|
class BedrockEmbedding(pydantic.BaseModel):
|
|
18
|
-
""
|
|
19
|
-
Base class for embeddings.
|
|
20
|
-
"""
|
|
21
|
-
|
|
22
|
-
model_name: str = pydantic.Field(description="The modelId of the Bedrock model to use.")
|
|
18
|
+
model_name: typing.Optional[str] = pydantic.Field(description="The modelId of the Bedrock model to use.")
|
|
23
19
|
embed_batch_size: typing.Optional[int] = pydantic.Field(description="The batch size for embedding calls.")
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
)
|
|
31
|
-
aws_access_key_id: typing.Optional[str] = pydantic.Field(description="AWS Access Key ID to use")
|
|
32
|
-
aws_secret_access_key: typing.Optional[str] = pydantic.Field(description="AWS Secret Access Key to use")
|
|
33
|
-
aws_session_token: typing.Optional[str] = pydantic.Field(description="AWS Session Token to use")
|
|
34
|
-
region_name: typing.Optional[str] = pydantic.Field(
|
|
35
|
-
description="AWS region name to use. Uses region configured in AWS CLI if not passed"
|
|
36
|
-
)
|
|
37
|
-
botocore_session: typing.Optional[typing.Any]
|
|
38
|
-
botocore_config: typing.Optional[typing.Any]
|
|
20
|
+
num_workers: typing.Optional[int]
|
|
21
|
+
profile_name: typing.Optional[str]
|
|
22
|
+
aws_access_key_id: typing.Optional[str]
|
|
23
|
+
aws_secret_access_key: typing.Optional[str]
|
|
24
|
+
aws_session_token: typing.Optional[str]
|
|
25
|
+
region_name: typing.Optional[str]
|
|
39
26
|
max_retries: typing.Optional[int] = pydantic.Field(description="The maximum number of API retries.")
|
|
40
27
|
timeout: typing.Optional[float] = pydantic.Field(
|
|
41
28
|
description="The timeout for the Bedrock API request in seconds. It will be used for both connect and read timeouts."
|
|
@@ -23,14 +23,13 @@ class CharacterSplitter(pydantic.BaseModel):
|
|
|
23
23
|
description="Whether or not to consider metadata when splitting."
|
|
24
24
|
)
|
|
25
25
|
include_prev_next_rel: typing.Optional[bool] = pydantic.Field(description="Include prev/next node relationships.")
|
|
26
|
-
callback_manager: typing.Optional[typing.
|
|
26
|
+
callback_manager: typing.Optional[typing.Any]
|
|
27
|
+
id_func: typing.Optional[str]
|
|
27
28
|
chunk_size: typing.Optional[int] = pydantic.Field(description="The token chunk size for each chunk.")
|
|
28
29
|
chunk_overlap: typing.Optional[int] = pydantic.Field(description="The token overlap of each chunk when splitting.")
|
|
29
30
|
separator: typing.Optional[str] = pydantic.Field(description="Default separator for splitting into words")
|
|
30
31
|
paragraph_separator: typing.Optional[str] = pydantic.Field(description="Separator between paragraphs.")
|
|
31
|
-
secondary_chunking_regex: typing.Optional[str]
|
|
32
|
-
description="Backup regex for splitting into sentences."
|
|
33
|
-
)
|
|
32
|
+
secondary_chunking_regex: typing.Optional[str]
|
|
34
33
|
class_name: typing.Optional[str]
|
|
35
34
|
|
|
36
35
|
def json(self, **kwargs: typing.Any) -> str:
|
llama_cloud/types/chat_data.py
CHANGED
|
@@ -17,11 +17,6 @@ except ImportError:
|
|
|
17
17
|
|
|
18
18
|
|
|
19
19
|
class ChatData(pydantic.BaseModel):
|
|
20
|
-
"""
|
|
21
|
-
Base schema model for BaseComponent classes used in the platform.
|
|
22
|
-
Comes with special serialization logic for types used commonly in platform codebase.
|
|
23
|
-
"""
|
|
24
|
-
|
|
25
20
|
retrieval_parameters: typing.Optional[PresetRetrievalParams]
|
|
26
21
|
llm_parameters: typing.Optional[LlmParameters]
|
|
27
22
|
class_name: typing.Optional[str]
|
|
@@ -17,18 +17,13 @@ except ImportError:
|
|
|
17
17
|
|
|
18
18
|
|
|
19
19
|
class ChatMessage(pydantic.BaseModel):
|
|
20
|
-
"""
|
|
21
|
-
Base schema model for BaseComponent classes used in the platform.
|
|
22
|
-
Comes with special serialization logic for types used commonly in platform codebase.
|
|
23
|
-
"""
|
|
24
|
-
|
|
25
20
|
id: str
|
|
26
21
|
index: int = pydantic.Field(description="The index of the message in the chat.")
|
|
27
22
|
annotations: typing.Optional[typing.List[MessageAnnotation]] = pydantic.Field(
|
|
28
23
|
description="Retrieval annotations for the message."
|
|
29
24
|
)
|
|
30
25
|
role: MessageRole
|
|
31
|
-
content: typing.Optional[str]
|
|
26
|
+
content: typing.Optional[str]
|
|
32
27
|
additional_kwargs: typing.Optional[typing.Dict[str, str]] = pydantic.Field(
|
|
33
28
|
description="Additional arguments passed to the model"
|
|
34
29
|
)
|
|
@@ -15,26 +15,15 @@ except ImportError:
|
|
|
15
15
|
|
|
16
16
|
|
|
17
17
|
class CloudAzStorageBlobDataSource(pydantic.BaseModel):
|
|
18
|
-
"""
|
|
19
|
-
Base component object to capture class names.
|
|
20
|
-
"""
|
|
21
|
-
|
|
22
18
|
container_name: str = pydantic.Field(description="The name of the Azure Storage Blob container to read from.")
|
|
23
19
|
account_url: str = pydantic.Field(description="The Azure Storage Blob account URL to use for authentication.")
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
description="The Azure Storage Blob account key to use for authentication."
|
|
32
|
-
)
|
|
33
|
-
tenant_id: typing.Optional[str] = pydantic.Field(description="The Azure AD tenant ID to use for authentication.")
|
|
34
|
-
client_id: typing.Optional[str] = pydantic.Field(description="The Azure AD client ID to use for authentication.")
|
|
35
|
-
client_secret: typing.Optional[str] = pydantic.Field(
|
|
36
|
-
description="The Azure AD client secret to use for authentication."
|
|
37
|
-
)
|
|
20
|
+
blob: typing.Optional[str]
|
|
21
|
+
prefix: typing.Optional[str]
|
|
22
|
+
account_name: typing.Optional[str]
|
|
23
|
+
account_key: typing.Optional[str]
|
|
24
|
+
tenant_id: typing.Optional[str]
|
|
25
|
+
client_id: typing.Optional[str]
|
|
26
|
+
client_secret: typing.Optional[str]
|
|
38
27
|
class_name: typing.Optional[str]
|
|
39
28
|
|
|
40
29
|
def json(self, **kwargs: typing.Any) -> str:
|
|
@@ -16,25 +16,15 @@ except ImportError:
|
|
|
16
16
|
|
|
17
17
|
|
|
18
18
|
class CloudBoxDataSource(pydantic.BaseModel):
|
|
19
|
-
|
|
20
|
-
Base component object to capture class names.
|
|
21
|
-
"""
|
|
22
|
-
|
|
23
|
-
folder_id: typing.Optional[str] = pydantic.Field(description="The ID of the Box folder to read from.")
|
|
19
|
+
folder_id: typing.Optional[str]
|
|
24
20
|
authentication_mechanism: BoxAuthMechanism = pydantic.Field(
|
|
25
21
|
description="The type of authentication to use (Developer Token or CCG)"
|
|
26
22
|
)
|
|
27
|
-
developer_token: typing.Optional[str]
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
)
|
|
33
|
-
client_secret: typing.Optional[str] = pydantic.Field(description="Box API secret used for making auth requests.")
|
|
34
|
-
user_id: typing.Optional[str] = pydantic.Field(description="Box User ID, if provided authenticates as user.")
|
|
35
|
-
enterprise_id: typing.Optional[str] = pydantic.Field(
|
|
36
|
-
description="Box Enterprise ID, if provided authenticates as service."
|
|
37
|
-
)
|
|
23
|
+
developer_token: typing.Optional[str]
|
|
24
|
+
client_id: typing.Optional[str]
|
|
25
|
+
client_secret: typing.Optional[str]
|
|
26
|
+
user_id: typing.Optional[str]
|
|
27
|
+
enterprise_id: typing.Optional[str]
|
|
38
28
|
class_name: typing.Optional[str]
|
|
39
29
|
|
|
40
30
|
def json(self, **kwargs: typing.Any) -> str:
|
|
@@ -15,20 +15,16 @@ except ImportError:
|
|
|
15
15
|
|
|
16
16
|
|
|
17
17
|
class CloudConfluenceDataSource(pydantic.BaseModel):
|
|
18
|
-
"""
|
|
19
|
-
Base component object to capture class names.
|
|
20
|
-
"""
|
|
21
|
-
|
|
22
18
|
server_url: str = pydantic.Field(description="The server URL of the Confluence instance.")
|
|
23
19
|
authentication_mechanism: str = pydantic.Field(
|
|
24
20
|
description="Type of Authentication for connecting to Confluence APIs."
|
|
25
21
|
)
|
|
26
|
-
user_name: typing.Optional[str]
|
|
27
|
-
api_token: typing.Optional[str]
|
|
28
|
-
space_key: typing.Optional[str]
|
|
29
|
-
page_ids: typing.Optional[str]
|
|
30
|
-
cql: typing.Optional[str]
|
|
31
|
-
label: typing.Optional[str]
|
|
22
|
+
user_name: typing.Optional[str]
|
|
23
|
+
api_token: typing.Optional[str]
|
|
24
|
+
space_key: typing.Optional[str]
|
|
25
|
+
page_ids: typing.Optional[str]
|
|
26
|
+
cql: typing.Optional[str]
|
|
27
|
+
label: typing.Optional[str]
|
|
32
28
|
class_name: typing.Optional[str]
|
|
33
29
|
|
|
34
30
|
def json(self, **kwargs: typing.Any) -> str:
|
|
@@ -23,9 +23,7 @@ class CloudDocument(pydantic.BaseModel):
|
|
|
23
23
|
metadata: typing.Dict[str, typing.Any]
|
|
24
24
|
excluded_embed_metadata_keys: typing.Optional[typing.List[str]]
|
|
25
25
|
excluded_llm_metadata_keys: typing.Optional[typing.List[str]]
|
|
26
|
-
page_positions: typing.Optional[typing.List[int]]
|
|
27
|
-
description="indices in the CloudDocument.text where a new page begins. e.g. Second page starts at index specified by page_positions[1]."
|
|
28
|
-
)
|
|
26
|
+
page_positions: typing.Optional[typing.List[int]]
|
|
29
27
|
id: str
|
|
30
28
|
|
|
31
29
|
def json(self, **kwargs: typing.Any) -> str:
|
|
@@ -23,9 +23,7 @@ class CloudDocumentCreate(pydantic.BaseModel):
|
|
|
23
23
|
metadata: typing.Dict[str, typing.Any]
|
|
24
24
|
excluded_embed_metadata_keys: typing.Optional[typing.List[str]]
|
|
25
25
|
excluded_llm_metadata_keys: typing.Optional[typing.List[str]]
|
|
26
|
-
page_positions: typing.Optional[typing.List[int]]
|
|
27
|
-
description="indices in the CloudDocument.text where a new page begins. e.g. Second page starts at index specified by page_positions[1]."
|
|
28
|
-
)
|
|
26
|
+
page_positions: typing.Optional[typing.List[int]]
|
|
29
27
|
id: typing.Optional[str]
|
|
30
28
|
|
|
31
29
|
def json(self, **kwargs: typing.Any) -> str:
|
|
@@ -15,10 +15,6 @@ except ImportError:
|
|
|
15
15
|
|
|
16
16
|
|
|
17
17
|
class CloudGoogleDriveDataSource(pydantic.BaseModel):
|
|
18
|
-
"""
|
|
19
|
-
Base component object to capture class names.
|
|
20
|
-
"""
|
|
21
|
-
|
|
22
18
|
folder_id: str = pydantic.Field(description="The ID of the Google Drive folder to read from.")
|
|
23
19
|
service_account_key: typing.Dict[str, typing.Any] = pydantic.Field(
|
|
24
20
|
description="The service account key JSON to use for authentication."
|
|
@@ -19,12 +19,10 @@ class CloudJiraDataSource(pydantic.BaseModel):
|
|
|
19
19
|
Cloud Jira Data Source integrating JiraReader.
|
|
20
20
|
"""
|
|
21
21
|
|
|
22
|
-
email: typing.Optional[str]
|
|
23
|
-
api_token: typing.Optional[str]
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
server_url: typing.Optional[str] = pydantic.Field(description="The server url for Jira Cloud.")
|
|
27
|
-
cloud_id: typing.Optional[str] = pydantic.Field(description="The cloud ID, used in case of OAuth2.")
|
|
22
|
+
email: typing.Optional[str]
|
|
23
|
+
api_token: typing.Optional[str]
|
|
24
|
+
server_url: typing.Optional[str]
|
|
25
|
+
cloud_id: typing.Optional[str]
|
|
28
26
|
authentication_mechanism: str = pydantic.Field(description="Type of Authentication for connecting to Jira APIs.")
|
|
29
27
|
query: str = pydantic.Field(description="JQL (Jira Query Language) query to search.")
|
|
30
28
|
class_name: typing.Optional[str]
|
|
@@ -15,13 +15,9 @@ except ImportError:
|
|
|
15
15
|
|
|
16
16
|
|
|
17
17
|
class CloudNotionPageDataSource(pydantic.BaseModel):
|
|
18
|
-
"""
|
|
19
|
-
Base component object to capture class names.
|
|
20
|
-
"""
|
|
21
|
-
|
|
22
18
|
integration_token: str = pydantic.Field(description="The integration token to use for authentication.")
|
|
23
|
-
database_ids: typing.Optional[str]
|
|
24
|
-
page_ids: typing.Optional[str]
|
|
19
|
+
database_ids: typing.Optional[str]
|
|
20
|
+
page_ids: typing.Optional[str]
|
|
25
21
|
class_name: typing.Optional[str]
|
|
26
22
|
|
|
27
23
|
def json(self, **kwargs: typing.Any) -> str:
|