llama-cloud 0.0.9__py3-none-any.whl → 0.0.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (35) hide show
  1. llama_cloud/__init__.py +22 -2
  2. llama_cloud/resources/__init__.py +2 -1
  3. llama_cloud/resources/data_sources/types/data_source_update_component_one.py +2 -0
  4. llama_cloud/resources/extraction/__init__.py +2 -2
  5. llama_cloud/resources/extraction/client.py +172 -48
  6. llama_cloud/resources/extraction/types/__init__.py +2 -1
  7. llama_cloud/resources/extraction/types/extraction_schema_create_data_schema_value.py +7 -0
  8. llama_cloud/resources/organizations/client.py +81 -0
  9. llama_cloud/resources/parsing/client.py +104 -0
  10. llama_cloud/resources/pipelines/client.py +315 -43
  11. llama_cloud/types/__init__.py +20 -2
  12. llama_cloud/types/auto_transform_config.py +32 -0
  13. llama_cloud/types/{chat_params.py → chat_data.py} +3 -3
  14. llama_cloud/types/cloud_azure_ai_search_vector_store.py +1 -1
  15. llama_cloud/types/cloud_confluence_data_source.py +45 -0
  16. llama_cloud/types/configurable_data_source_names.py +4 -0
  17. llama_cloud/types/data_source_component_one.py +2 -0
  18. llama_cloud/types/data_source_create_component_one.py +2 -0
  19. llama_cloud/types/embedding_config.py +36 -0
  20. llama_cloud/types/embedding_config_component.py +19 -0
  21. llama_cloud/types/embedding_config_type.py +41 -0
  22. llama_cloud/types/extraction_job.py +35 -0
  23. llama_cloud/types/extraction_schema.py +1 -1
  24. llama_cloud/types/llama_parse_parameters.py +5 -0
  25. llama_cloud/types/pipeline.py +0 -3
  26. llama_cloud/types/pipeline_create.py +8 -3
  27. llama_cloud/types/pipeline_data_source_component_one.py +2 -0
  28. llama_cloud/types/transform_config.py +36 -0
  29. llama_cloud/types/transform_config_mode.py +21 -0
  30. llama_cloud/types/user_organization.py +10 -1
  31. llama_cloud/types/user_organization_delete.py +36 -0
  32. {llama_cloud-0.0.9.dist-info → llama_cloud-0.0.11.dist-info}/METADATA +1 -1
  33. {llama_cloud-0.0.9.dist-info → llama_cloud-0.0.11.dist-info}/RECORD +35 -25
  34. {llama_cloud-0.0.9.dist-info → llama_cloud-0.0.11.dist-info}/WHEEL +1 -1
  35. {llama_cloud-0.0.9.dist-info → llama_cloud-0.0.11.dist-info}/LICENSE +0 -0
@@ -1,14 +1,16 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
+ from .auto_transform_config import AutoTransformConfig
3
4
  from .azure_open_ai_embedding import AzureOpenAiEmbedding
4
5
  from .base import Base
5
6
  from .base_prompt_template import BasePromptTemplate
6
7
  from .bedrock_embedding import BedrockEmbedding
8
+ from .chat_data import ChatData
7
9
  from .chat_message import ChatMessage
8
- from .chat_params import ChatParams
9
10
  from .cloud_az_storage_blob_data_source import CloudAzStorageBlobDataSource
10
11
  from .cloud_azure_ai_search_vector_store import CloudAzureAiSearchVectorStore
11
12
  from .cloud_chroma_vector_store import CloudChromaVectorStore
13
+ from .cloud_confluence_data_source import CloudConfluenceDataSource
12
14
  from .cloud_document import CloudDocument
13
15
  from .cloud_document_create import CloudDocumentCreate
14
16
  from .cloud_jira_data_source import CloudJiraDataSource
@@ -46,6 +48,9 @@ from .data_source_create_component_one import DataSourceCreateComponentOne
46
48
  from .data_source_create_custom_metadata_value import DataSourceCreateCustomMetadataValue
47
49
  from .data_source_custom_metadata_value import DataSourceCustomMetadataValue
48
50
  from .data_source_definition import DataSourceDefinition
51
+ from .embedding_config import EmbeddingConfig
52
+ from .embedding_config_component import EmbeddingConfigComponent
53
+ from .embedding_config_type import EmbeddingConfigType
49
54
  from .eval_dataset import EvalDataset
50
55
  from .eval_dataset_job_params import EvalDatasetJobParams
51
56
  from .eval_dataset_job_record import EvalDatasetJobRecord
@@ -55,6 +60,7 @@ from .eval_llm_model_data import EvalLlmModelData
55
60
  from .eval_question import EvalQuestion
56
61
  from .eval_question_create import EvalQuestionCreate
57
62
  from .eval_question_result import EvalQuestionResult
63
+ from .extraction_job import ExtractionJob
58
64
  from .extraction_result import ExtractionResult
59
65
  from .extraction_result_data_value import ExtractionResultDataValue
60
66
  from .extraction_schema import ExtractionSchema
@@ -131,22 +137,27 @@ from .text_node import TextNode
131
137
  from .text_node_relationships_value import TextNodeRelationshipsValue
132
138
  from .text_node_with_score import TextNodeWithScore
133
139
  from .token_text_splitter import TokenTextSplitter
140
+ from .transform_config import TransformConfig
141
+ from .transform_config_mode import TransformConfigMode
134
142
  from .transformation_category_names import TransformationCategoryNames
135
143
  from .user_organization import UserOrganization
136
144
  from .user_organization_create import UserOrganizationCreate
145
+ from .user_organization_delete import UserOrganizationDelete
137
146
  from .validation_error import ValidationError
138
147
  from .validation_error_loc_item import ValidationErrorLocItem
139
148
 
140
149
  __all__ = [
150
+ "AutoTransformConfig",
141
151
  "AzureOpenAiEmbedding",
142
152
  "Base",
143
153
  "BasePromptTemplate",
144
154
  "BedrockEmbedding",
155
+ "ChatData",
145
156
  "ChatMessage",
146
- "ChatParams",
147
157
  "CloudAzStorageBlobDataSource",
148
158
  "CloudAzureAiSearchVectorStore",
149
159
  "CloudChromaVectorStore",
160
+ "CloudConfluenceDataSource",
150
161
  "CloudDocument",
151
162
  "CloudDocumentCreate",
152
163
  "CloudJiraDataSource",
@@ -184,6 +195,9 @@ __all__ = [
184
195
  "DataSourceCreateCustomMetadataValue",
185
196
  "DataSourceCustomMetadataValue",
186
197
  "DataSourceDefinition",
198
+ "EmbeddingConfig",
199
+ "EmbeddingConfigComponent",
200
+ "EmbeddingConfigType",
187
201
  "EvalDataset",
188
202
  "EvalDatasetJobParams",
189
203
  "EvalDatasetJobRecord",
@@ -193,6 +207,7 @@ __all__ = [
193
207
  "EvalQuestion",
194
208
  "EvalQuestionCreate",
195
209
  "EvalQuestionResult",
210
+ "ExtractionJob",
196
211
  "ExtractionResult",
197
212
  "ExtractionResultDataValue",
198
213
  "ExtractionSchema",
@@ -269,9 +284,12 @@ __all__ = [
269
284
  "TextNodeRelationshipsValue",
270
285
  "TextNodeWithScore",
271
286
  "TokenTextSplitter",
287
+ "TransformConfig",
288
+ "TransformConfigMode",
272
289
  "TransformationCategoryNames",
273
290
  "UserOrganization",
274
291
  "UserOrganizationCreate",
292
+ "UserOrganizationDelete",
275
293
  "ValidationError",
276
294
  "ValidationErrorLocItem",
277
295
  ]
@@ -0,0 +1,32 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic
10
+ if pydantic.__version__.startswith("1."):
11
+ raise ImportError
12
+ import pydantic.v1 as pydantic # type: ignore
13
+ except ImportError:
14
+ import pydantic # type: ignore
15
+
16
+
17
+ class AutoTransformConfig(pydantic.BaseModel):
18
+ chunk_size: typing.Optional[int] = pydantic.Field(description="Chunk size for the transformation.")
19
+ chunk_overlap: typing.Optional[int] = pydantic.Field(description="Chunk overlap for the transformation.")
20
+
21
+ def json(self, **kwargs: typing.Any) -> str:
22
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
23
+ return super().json(**kwargs_with_defaults)
24
+
25
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
26
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
27
+ return super().dict(**kwargs_with_defaults)
28
+
29
+ class Config:
30
+ frozen = True
31
+ smart_union = True
32
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -4,7 +4,7 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
- from .chat_message import ChatMessage
7
+ from .preset_retrieval_params import PresetRetrievalParams
8
8
 
9
9
  try:
10
10
  import pydantic
@@ -15,13 +15,13 @@ except ImportError:
15
15
  import pydantic # type: ignore
16
16
 
17
17
 
18
- class ChatParams(pydantic.BaseModel):
18
+ class ChatData(pydantic.BaseModel):
19
19
  """
20
20
  Base schema model for BaseComponent classes used in the platform.
21
21
  Comes with special serialization logic for types used commonly in platform codebase.
22
22
  """
23
23
 
24
- messages: typing.List[ChatMessage]
24
+ retrieval_parameters: PresetRetrievalParams
25
25
  class_name: typing.Optional[str]
26
26
 
27
27
  def json(self, **kwargs: typing.Any) -> str:
@@ -24,7 +24,7 @@ class CloudAzureAiSearchVectorStore(pydantic.BaseModel):
24
24
  search_service_endpoint: str
25
25
  search_service_api_version: typing.Optional[str]
26
26
  index_name: typing.Optional[str]
27
- filterable_metadata_field_keys: typing.Optional[typing.List[str]]
27
+ filterable_metadata_field_keys: typing.Optional[typing.Dict[str, typing.Any]]
28
28
  embedding_dimension: typing.Optional[int]
29
29
  class_name: typing.Optional[str]
30
30
 
@@ -0,0 +1,45 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic
10
+ if pydantic.__version__.startswith("1."):
11
+ raise ImportError
12
+ import pydantic.v1 as pydantic # type: ignore
13
+ except ImportError:
14
+ import pydantic # type: ignore
15
+
16
+
17
+ class CloudConfluenceDataSource(pydantic.BaseModel):
18
+ """
19
+ Base component object to capture class names.
20
+ """
21
+
22
+ server_url: str = pydantic.Field(description="The server URL of the Confluence instance.")
23
+ authentication_mechanism: str = pydantic.Field(
24
+ description="Type of Authentication for connecting to Confluence APIs."
25
+ )
26
+ user_name: typing.Optional[str] = pydantic.Field(description="The username to use for authentication.")
27
+ api_token: typing.Optional[str] = pydantic.Field(description="The API token to use for authentication.")
28
+ space_key: typing.Optional[str] = pydantic.Field(description="The space key to read from.")
29
+ page_ids: typing.Optional[str] = pydantic.Field(description="The page IDs of the Confluence to read from.")
30
+ cql: typing.Optional[str] = pydantic.Field(description="The CQL query to use for fetching pages.")
31
+ label: typing.Optional[str] = pydantic.Field(description="The label to use for fetching pages.")
32
+ class_name: typing.Optional[str]
33
+
34
+ def json(self, **kwargs: typing.Any) -> str:
35
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
36
+ return super().json(**kwargs_with_defaults)
37
+
38
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
39
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
40
+ return super().dict(**kwargs_with_defaults)
41
+
42
+ class Config:
43
+ frozen = True
44
+ smart_union = True
45
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -17,6 +17,7 @@ class ConfigurableDataSourceNames(str, enum.Enum):
17
17
  MICROSOFT_SHAREPOINT = "MICROSOFT_SHAREPOINT"
18
18
  SLACK = "SLACK"
19
19
  NOTION_PAGE = "NOTION_PAGE"
20
+ CONFLUENCE = "CONFLUENCE"
20
21
  JIRA = "JIRA"
21
22
 
22
23
  def visit(
@@ -27,6 +28,7 @@ class ConfigurableDataSourceNames(str, enum.Enum):
27
28
  microsoft_sharepoint: typing.Callable[[], T_Result],
28
29
  slack: typing.Callable[[], T_Result],
29
30
  notion_page: typing.Callable[[], T_Result],
31
+ confluence: typing.Callable[[], T_Result],
30
32
  jira: typing.Callable[[], T_Result],
31
33
  ) -> T_Result:
32
34
  if self is ConfigurableDataSourceNames.S_3:
@@ -41,5 +43,7 @@ class ConfigurableDataSourceNames(str, enum.Enum):
41
43
  return slack()
42
44
  if self is ConfigurableDataSourceNames.NOTION_PAGE:
43
45
  return notion_page()
46
+ if self is ConfigurableDataSourceNames.CONFLUENCE:
47
+ return confluence()
44
48
  if self is ConfigurableDataSourceNames.JIRA:
45
49
  return jira()
@@ -3,6 +3,7 @@
3
3
  import typing
4
4
 
5
5
  from .cloud_az_storage_blob_data_source import CloudAzStorageBlobDataSource
6
+ from .cloud_confluence_data_source import CloudConfluenceDataSource
6
7
  from .cloud_jira_data_source import CloudJiraDataSource
7
8
  from .cloud_notion_page_data_source import CloudNotionPageDataSource
8
9
  from .cloud_one_drive_data_source import CloudOneDriveDataSource
@@ -17,5 +18,6 @@ DataSourceComponentOne = typing.Union[
17
18
  CloudSharepointDataSource,
18
19
  CloudSlackDataSource,
19
20
  CloudNotionPageDataSource,
21
+ CloudConfluenceDataSource,
20
22
  CloudJiraDataSource,
21
23
  ]
@@ -3,6 +3,7 @@
3
3
  import typing
4
4
 
5
5
  from .cloud_az_storage_blob_data_source import CloudAzStorageBlobDataSource
6
+ from .cloud_confluence_data_source import CloudConfluenceDataSource
6
7
  from .cloud_jira_data_source import CloudJiraDataSource
7
8
  from .cloud_notion_page_data_source import CloudNotionPageDataSource
8
9
  from .cloud_one_drive_data_source import CloudOneDriveDataSource
@@ -17,5 +18,6 @@ DataSourceCreateComponentOne = typing.Union[
17
18
  CloudSharepointDataSource,
18
19
  CloudSlackDataSource,
19
20
  CloudNotionPageDataSource,
21
+ CloudConfluenceDataSource,
20
22
  CloudJiraDataSource,
21
23
  ]
@@ -0,0 +1,36 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .embedding_config_component import EmbeddingConfigComponent
8
+ from .embedding_config_type import EmbeddingConfigType
9
+
10
+ try:
11
+ import pydantic
12
+ if pydantic.__version__.startswith("1."):
13
+ raise ImportError
14
+ import pydantic.v1 as pydantic # type: ignore
15
+ except ImportError:
16
+ import pydantic # type: ignore
17
+
18
+
19
+ class EmbeddingConfig(pydantic.BaseModel):
20
+ type: typing.Optional[EmbeddingConfigType] = pydantic.Field(description="Type of the embedding model.")
21
+ component: typing.Optional[EmbeddingConfigComponent] = pydantic.Field(
22
+ description="Configuration for the transformation."
23
+ )
24
+
25
+ def json(self, **kwargs: typing.Any) -> str:
26
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
27
+ return super().json(**kwargs_with_defaults)
28
+
29
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
30
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
31
+ return super().dict(**kwargs_with_defaults)
32
+
33
+ class Config:
34
+ frozen = True
35
+ smart_union = True
36
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,19 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ from .azure_open_ai_embedding import AzureOpenAiEmbedding
6
+ from .bedrock_embedding import BedrockEmbedding
7
+ from .cohere_embedding import CohereEmbedding
8
+ from .gemini_embedding import GeminiEmbedding
9
+ from .hugging_face_inference_api_embedding import HuggingFaceInferenceApiEmbedding
10
+ from .open_ai_embedding import OpenAiEmbedding
11
+
12
+ EmbeddingConfigComponent = typing.Union[
13
+ OpenAiEmbedding,
14
+ AzureOpenAiEmbedding,
15
+ BedrockEmbedding,
16
+ CohereEmbedding,
17
+ GeminiEmbedding,
18
+ HuggingFaceInferenceApiEmbedding,
19
+ ]
@@ -0,0 +1,41 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+ import typing
5
+
6
+ T_Result = typing.TypeVar("T_Result")
7
+
8
+
9
+ class EmbeddingConfigType(str, enum.Enum):
10
+ """
11
+ An enumeration.
12
+ """
13
+
14
+ OPENAI_EMBEDDING = "OPENAI_EMBEDDING"
15
+ AZURE_EMBEDDING = "AZURE_EMBEDDING"
16
+ BEDROCK_EMBEDDING = "BEDROCK_EMBEDDING"
17
+ COHERE_EMBEDDING = "COHERE_EMBEDDING"
18
+ GEMINI_EMBEDDING = "GEMINI_EMBEDDING"
19
+ HUGGINGFACE_API_EMBEDDING = "HUGGINGFACE_API_EMBEDDING"
20
+
21
+ def visit(
22
+ self,
23
+ openai_embedding: typing.Callable[[], T_Result],
24
+ azure_embedding: typing.Callable[[], T_Result],
25
+ bedrock_embedding: typing.Callable[[], T_Result],
26
+ cohere_embedding: typing.Callable[[], T_Result],
27
+ gemini_embedding: typing.Callable[[], T_Result],
28
+ huggingface_api_embedding: typing.Callable[[], T_Result],
29
+ ) -> T_Result:
30
+ if self is EmbeddingConfigType.OPENAI_EMBEDDING:
31
+ return openai_embedding()
32
+ if self is EmbeddingConfigType.AZURE_EMBEDDING:
33
+ return azure_embedding()
34
+ if self is EmbeddingConfigType.BEDROCK_EMBEDDING:
35
+ return bedrock_embedding()
36
+ if self is EmbeddingConfigType.COHERE_EMBEDDING:
37
+ return cohere_embedding()
38
+ if self is EmbeddingConfigType.GEMINI_EMBEDDING:
39
+ return gemini_embedding()
40
+ if self is EmbeddingConfigType.HUGGINGFACE_API_EMBEDDING:
41
+ return huggingface_api_embedding()
@@ -0,0 +1,35 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .file import File
8
+ from .status_enum import StatusEnum
9
+
10
+ try:
11
+ import pydantic
12
+ if pydantic.__version__.startswith("1."):
13
+ raise ImportError
14
+ import pydantic.v1 as pydantic # type: ignore
15
+ except ImportError:
16
+ import pydantic # type: ignore
17
+
18
+
19
+ class ExtractionJob(pydantic.BaseModel):
20
+ id: str = pydantic.Field(description="The id of the extraction job")
21
+ status: StatusEnum = pydantic.Field(description="The status of the extraction job")
22
+ file: File = pydantic.Field(description="The file that the extract was extracted from")
23
+
24
+ def json(self, **kwargs: typing.Any) -> str:
25
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
26
+ return super().json(**kwargs_with_defaults)
27
+
28
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
29
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
30
+ return super().dict(**kwargs_with_defaults)
31
+
32
+ class Config:
33
+ frozen = True
34
+ smart_union = True
35
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -25,7 +25,7 @@ class ExtractionSchema(pydantic.BaseModel):
25
25
  updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
26
26
  name: str = pydantic.Field(description="The name of the extraction schema")
27
27
  project_id: str = pydantic.Field(description="The ID of the project that the extraction schema belongs to")
28
- data_schema: typing.Dict[str, ExtractionSchemaDataSchemaValue] = pydantic.Field(
28
+ data_schema: typing.Optional[typing.Dict[str, ExtractionSchemaDataSchemaValue]] = pydantic.Field(
29
29
  description="The schema of the data"
30
30
  )
31
31
 
@@ -33,6 +33,11 @@ class LlamaParseParameters(pydantic.BaseModel):
33
33
  page_separator: typing.Optional[str]
34
34
  bounding_box: typing.Optional[str]
35
35
  target_pages: typing.Optional[str]
36
+ use_vendor_multimodal_model: typing.Optional[str]
37
+ vendor_multimodal_model_name: typing.Optional[str]
38
+ vendor_multimodal_api_key: typing.Optional[str]
39
+ page_prefix: typing.Optional[str]
40
+ page_suffix: typing.Optional[str]
36
41
 
37
42
  def json(self, **kwargs: typing.Any) -> str:
38
43
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -43,9 +43,6 @@ class Pipeline(pydantic.BaseModel):
43
43
  eval_parameters: typing.Optional[EvalExecutionParams] = pydantic.Field(
44
44
  description="Eval parameters for the pipeline."
45
45
  )
46
- llama_parse_enabled: typing.Optional[bool] = pydantic.Field(
47
- description="Whether to use LlamaParse during pipeline execution."
48
- )
49
46
  llama_parse_parameters: typing.Optional[LlamaParseParameters] = pydantic.Field(
50
47
  description="Settings that can be configured for how to use LlamaParse to parse files within a LlamaCloud pipeline."
51
48
  )
@@ -6,10 +6,12 @@ import typing
6
6
  from ..core.datetime_utils import serialize_datetime
7
7
  from .configured_transformation_item import ConfiguredTransformationItem
8
8
  from .data_sink_create import DataSinkCreate
9
+ from .embedding_config import EmbeddingConfig
9
10
  from .eval_execution_params import EvalExecutionParams
10
11
  from .llama_parse_parameters import LlamaParseParameters
11
12
  from .pipeline_type import PipelineType
12
13
  from .preset_retrieval_params import PresetRetrievalParams
14
+ from .transform_config import TransformConfig
13
15
 
14
16
  try:
15
17
  import pydantic
@@ -25,6 +27,12 @@ class PipelineCreate(pydantic.BaseModel):
25
27
  Schema for creating a pipeline.
26
28
  """
27
29
 
30
+ embedding_config: typing.Optional[EmbeddingConfig] = pydantic.Field(
31
+ description="Configuration for the embedding model."
32
+ )
33
+ transform_config: typing.Optional[TransformConfig] = pydantic.Field(
34
+ description="Configuration for the transformation."
35
+ )
28
36
  configured_transformations: typing.Optional[typing.List[ConfiguredTransformationItem]] = pydantic.Field(
29
37
  description="List of configured transformations."
30
38
  )
@@ -40,9 +48,6 @@ class PipelineCreate(pydantic.BaseModel):
40
48
  eval_parameters: typing.Optional[EvalExecutionParams] = pydantic.Field(
41
49
  description="Eval parameters for the pipeline."
42
50
  )
43
- llama_parse_enabled: typing.Optional[bool] = pydantic.Field(
44
- description="Whether to use LlamaParse during pipeline execution."
45
- )
46
51
  llama_parse_parameters: typing.Optional[LlamaParseParameters] = pydantic.Field(
47
52
  description="Settings that can be configured for how to use LlamaParse to parse files within a LlamaCloud pipeline."
48
53
  )
@@ -3,6 +3,7 @@
3
3
  import typing
4
4
 
5
5
  from .cloud_az_storage_blob_data_source import CloudAzStorageBlobDataSource
6
+ from .cloud_confluence_data_source import CloudConfluenceDataSource
6
7
  from .cloud_jira_data_source import CloudJiraDataSource
7
8
  from .cloud_notion_page_data_source import CloudNotionPageDataSource
8
9
  from .cloud_one_drive_data_source import CloudOneDriveDataSource
@@ -17,5 +18,6 @@ PipelineDataSourceComponentOne = typing.Union[
17
18
  CloudSharepointDataSource,
18
19
  CloudSlackDataSource,
19
20
  CloudNotionPageDataSource,
21
+ CloudConfluenceDataSource,
20
22
  CloudJiraDataSource,
21
23
  ]
@@ -0,0 +1,36 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .auto_transform_config import AutoTransformConfig
8
+ from .transform_config_mode import TransformConfigMode
9
+
10
+ try:
11
+ import pydantic
12
+ if pydantic.__version__.startswith("1."):
13
+ raise ImportError
14
+ import pydantic.v1 as pydantic # type: ignore
15
+ except ImportError:
16
+ import pydantic # type: ignore
17
+
18
+
19
+ class TransformConfig(pydantic.BaseModel):
20
+ mode: typing.Optional[TransformConfigMode] = pydantic.Field(
21
+ description="Mode for the transformation configuration."
22
+ )
23
+ config: typing.Optional[AutoTransformConfig] = pydantic.Field(description="Configuration for the transformation.")
24
+
25
+ def json(self, **kwargs: typing.Any) -> str:
26
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
27
+ return super().json(**kwargs_with_defaults)
28
+
29
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
30
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
31
+ return super().dict(**kwargs_with_defaults)
32
+
33
+ class Config:
34
+ frozen = True
35
+ smart_union = True
36
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,21 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+ import typing
5
+
6
+ T_Result = typing.TypeVar("T_Result")
7
+
8
+
9
+ class TransformConfigMode(str, enum.Enum):
10
+ """
11
+ An enumeration.
12
+ """
13
+
14
+ AUTO = "AUTO"
15
+ ADVANCED = "ADVANCED"
16
+
17
+ def visit(self, auto: typing.Callable[[], T_Result], advanced: typing.Callable[[], T_Result]) -> T_Result:
18
+ if self is TransformConfigMode.AUTO:
19
+ return auto()
20
+ if self is TransformConfigMode.ADVANCED:
21
+ return advanced()
@@ -23,8 +23,17 @@ class UserOrganization(pydantic.BaseModel):
23
23
  created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
24
24
  updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
25
25
  email: str = pydantic.Field(description="The user's email address.")
26
- user_id: str = pydantic.Field(description="The user's ID.")
26
+ user_id: typing.Optional[str] = pydantic.Field(description="The user's ID.")
27
27
  organization_id: str = pydantic.Field(description="The organization's ID.")
28
+ pending: typing.Optional[bool] = pydantic.Field(
29
+ description="Whether the user's membership is pending account signup."
30
+ )
31
+ invited_by_user_id: typing.Optional[str] = pydantic.Field(
32
+ description="The user ID of the user who added the user to the organization."
33
+ )
34
+ invited_by_user_email: typing.Optional[str] = pydantic.Field(
35
+ description="The email address of the user who added the user to the organization."
36
+ )
28
37
 
29
38
  def json(self, **kwargs: typing.Any) -> str:
30
39
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -0,0 +1,36 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic
10
+ if pydantic.__version__.startswith("1."):
11
+ raise ImportError
12
+ import pydantic.v1 as pydantic # type: ignore
13
+ except ImportError:
14
+ import pydantic # type: ignore
15
+
16
+
17
+ class UserOrganizationDelete(pydantic.BaseModel):
18
+ """
19
+ Schema for deleting a user's membership to an organization.
20
+ """
21
+
22
+ user_id: typing.Optional[str] = pydantic.Field(description="The user's ID.")
23
+ email: typing.Optional[str] = pydantic.Field(description="The user's email address.")
24
+
25
+ def json(self, **kwargs: typing.Any) -> str:
26
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
27
+ return super().json(**kwargs_with_defaults)
28
+
29
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
30
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
31
+ return super().dict(**kwargs_with_defaults)
32
+
33
+ class Config:
34
+ frozen = True
35
+ smart_union = True
36
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: llama-cloud
3
- Version: 0.0.9
3
+ Version: 0.0.11
4
4
  Summary:
5
5
  Author: Logan Markewich
6
6
  Author-email: logan@runllama.ai