llama-cloud 0.0.17__py3-none-any.whl → 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (134) hide show
  1. llama_cloud/__init__.py +6 -30
  2. llama_cloud/client.py +0 -3
  3. llama_cloud/resources/__init__.py +2 -10
  4. llama_cloud/resources/data_sinks/__init__.py +2 -2
  5. llama_cloud/resources/data_sinks/client.py +2 -2
  6. llama_cloud/resources/data_sinks/types/__init__.py +1 -2
  7. llama_cloud/resources/data_sinks/types/data_sink_update_component.py +19 -2
  8. llama_cloud/resources/data_sources/__init__.py +2 -2
  9. llama_cloud/resources/data_sources/client.py +6 -6
  10. llama_cloud/resources/data_sources/types/__init__.py +1 -2
  11. llama_cloud/resources/data_sources/types/data_source_update_component.py +23 -2
  12. llama_cloud/resources/extraction/client.py +14 -14
  13. llama_cloud/resources/files/client.py +10 -10
  14. llama_cloud/resources/organizations/client.py +2 -2
  15. llama_cloud/resources/parsing/client.py +68 -60
  16. llama_cloud/resources/pipelines/__init__.py +0 -4
  17. llama_cloud/resources/pipelines/client.py +50 -340
  18. llama_cloud/resources/pipelines/types/__init__.py +1 -7
  19. llama_cloud/resources/pipelines/types/pipeline_update_embedding_config.py +15 -15
  20. llama_cloud/resources/pipelines/types/pipeline_update_transform_config.py +1 -24
  21. llama_cloud/types/__init__.py +7 -23
  22. llama_cloud/types/azure_open_ai_embedding.py +7 -34
  23. llama_cloud/types/base_prompt_template.py +3 -14
  24. llama_cloud/types/bedrock_embedding.py +7 -17
  25. llama_cloud/types/box_auth_mechanism.py +0 -4
  26. llama_cloud/types/character_splitter.py +3 -4
  27. llama_cloud/types/chat_data.py +0 -5
  28. llama_cloud/types/chat_message.py +1 -6
  29. llama_cloud/types/cloud_az_storage_blob_data_source.py +7 -18
  30. llama_cloud/types/cloud_box_data_source.py +6 -16
  31. llama_cloud/types/cloud_chroma_vector_store.py +1 -5
  32. llama_cloud/types/cloud_confluence_data_source.py +6 -10
  33. llama_cloud/types/cloud_document.py +1 -3
  34. llama_cloud/types/cloud_document_create.py +1 -3
  35. llama_cloud/types/cloud_google_drive_data_source.py +0 -4
  36. llama_cloud/types/cloud_jira_data_source.py +4 -6
  37. llama_cloud/types/cloud_notion_page_data_source.py +2 -6
  38. llama_cloud/types/cloud_one_drive_data_source.py +2 -6
  39. llama_cloud/types/cloud_postgres_vector_store.py +0 -4
  40. llama_cloud/types/cloud_s_3_data_source.py +4 -12
  41. llama_cloud/types/cloud_sharepoint_data_source.py +5 -9
  42. llama_cloud/types/cloud_slack_data_source.py +6 -10
  43. llama_cloud/types/cloud_weaviate_vector_store.py +0 -4
  44. llama_cloud/types/code_splitter.py +2 -1
  45. llama_cloud/types/cohere_embedding.py +3 -7
  46. llama_cloud/types/configurable_data_sink_names.py +0 -4
  47. llama_cloud/types/configurable_data_source_names.py +0 -4
  48. llama_cloud/types/configurable_transformation_names.py +0 -4
  49. llama_cloud/types/configured_transformation_item_component.py +29 -2
  50. llama_cloud/types/data_sink.py +2 -2
  51. llama_cloud/types/data_sink_component.py +19 -2
  52. llama_cloud/types/data_sink_create_component.py +19 -2
  53. llama_cloud/types/data_source.py +3 -5
  54. llama_cloud/types/data_source_component.py +23 -2
  55. llama_cloud/types/data_source_create.py +1 -3
  56. llama_cloud/types/data_source_create_component.py +23 -2
  57. llama_cloud/types/eval_dataset.py +2 -2
  58. llama_cloud/types/eval_dataset_job_record.py +7 -13
  59. llama_cloud/types/eval_execution_params_override.py +2 -6
  60. llama_cloud/types/eval_metric.py +17 -0
  61. llama_cloud/types/eval_question.py +2 -6
  62. llama_cloud/types/extend_vertex_text_embedding.py +6 -18
  63. llama_cloud/types/extraction_result.py +5 -3
  64. llama_cloud/types/extraction_schema.py +3 -5
  65. llama_cloud/types/file.py +7 -11
  66. llama_cloud/types/gemini_embedding.py +5 -9
  67. llama_cloud/types/hugging_face_inference_api_embedding.py +10 -26
  68. llama_cloud/types/input_message.py +2 -4
  69. llama_cloud/types/llama_parse_parameters.py +1 -0
  70. llama_cloud/types/llama_parse_supported_file_extensions.py +0 -4
  71. llama_cloud/types/llm.py +9 -8
  72. llama_cloud/types/llm_parameters.py +2 -7
  73. llama_cloud/types/local_eval.py +8 -10
  74. llama_cloud/types/local_eval_results.py +1 -1
  75. llama_cloud/types/managed_ingestion_status_response.py +3 -5
  76. llama_cloud/types/markdown_element_node_parser.py +4 -5
  77. llama_cloud/types/markdown_node_parser.py +2 -1
  78. llama_cloud/types/message_annotation.py +1 -6
  79. llama_cloud/types/metric_result.py +3 -3
  80. llama_cloud/types/node_parser.py +2 -1
  81. llama_cloud/types/node_relationship.py +44 -0
  82. llama_cloud/types/object_type.py +0 -4
  83. llama_cloud/types/open_ai_embedding.py +6 -12
  84. llama_cloud/types/organization.py +2 -2
  85. llama_cloud/types/page_splitter_node_parser.py +3 -2
  86. llama_cloud/types/parsing_job_json_result.py +2 -2
  87. llama_cloud/types/parsing_job_markdown_result.py +1 -1
  88. llama_cloud/types/parsing_job_text_result.py +1 -1
  89. llama_cloud/types/partition_names.py +45 -0
  90. llama_cloud/types/pipeline.py +7 -17
  91. llama_cloud/types/pipeline_configuration_hashes.py +3 -3
  92. llama_cloud/types/pipeline_create.py +6 -18
  93. llama_cloud/types/pipeline_create_embedding_config.py +15 -15
  94. llama_cloud/types/pipeline_create_transform_config.py +1 -24
  95. llama_cloud/types/pipeline_data_source.py +5 -11
  96. llama_cloud/types/pipeline_data_source_component.py +23 -2
  97. llama_cloud/types/pipeline_data_source_create.py +1 -3
  98. llama_cloud/types/pipeline_deployment.py +4 -8
  99. llama_cloud/types/pipeline_embedding_config.py +15 -15
  100. llama_cloud/types/pipeline_file.py +10 -18
  101. llama_cloud/types/pipeline_file_create.py +1 -3
  102. llama_cloud/types/playground_session.py +2 -2
  103. llama_cloud/types/preset_retrieval_params.py +8 -11
  104. llama_cloud/types/presigned_url.py +1 -3
  105. llama_cloud/types/project.py +2 -2
  106. llama_cloud/types/prompt_mixin_prompts.py +1 -1
  107. llama_cloud/types/prompt_spec.py +2 -4
  108. llama_cloud/types/related_node_info.py +0 -4
  109. llama_cloud/types/retrieval_mode.py +0 -4
  110. llama_cloud/types/sentence_splitter.py +3 -4
  111. llama_cloud/types/supported_llm_model_names.py +0 -4
  112. llama_cloud/types/text_node.py +3 -9
  113. llama_cloud/types/token_text_splitter.py +2 -1
  114. llama_cloud/types/transformation_category_names.py +0 -4
  115. llama_cloud/types/user_organization.py +5 -9
  116. llama_cloud/types/user_organization_create.py +2 -2
  117. llama_cloud/types/user_organization_delete.py +2 -2
  118. {llama_cloud-0.0.17.dist-info → llama_cloud-0.1.0.dist-info}/METADATA +1 -1
  119. llama_cloud-0.1.0.dist-info/RECORD +226 -0
  120. llama_cloud/resources/auth/__init__.py +0 -2
  121. llama_cloud/resources/auth/client.py +0 -124
  122. llama_cloud/resources/data_sinks/types/data_sink_update_component_one.py +0 -23
  123. llama_cloud/resources/data_sources/types/data_source_update_component_one.py +0 -27
  124. llama_cloud/types/configured_transformation_item_component_one.py +0 -35
  125. llama_cloud/types/custom_claims.py +0 -58
  126. llama_cloud/types/data_sink_component_one.py +0 -23
  127. llama_cloud/types/data_sink_create_component_one.py +0 -23
  128. llama_cloud/types/data_source_component_one.py +0 -27
  129. llama_cloud/types/data_source_create_component_one.py +0 -27
  130. llama_cloud/types/pipeline_data_source_component_one.py +0 -27
  131. llama_cloud/types/user.py +0 -35
  132. llama_cloud-0.0.17.dist-info/RECORD +0 -235
  133. {llama_cloud-0.0.17.dist-info → llama_cloud-0.1.0.dist-info}/LICENSE +0 -0
  134. {llama_cloud-0.0.17.dist-info → llama_cloud-0.1.0.dist-info}/WHEEL +0 -0
@@ -15,19 +15,11 @@ except ImportError:
15
15
 
16
16
 
17
17
  class CloudS3DataSource(pydantic.BaseModel):
18
- """
19
- Base component object to capture class names.
20
- """
21
-
22
18
  bucket: str = pydantic.Field(description="The name of the S3 bucket to read from.")
23
- prefix: typing.Optional[str] = pydantic.Field(description="The prefix of the S3 objects to read from.")
24
- aws_access_id: typing.Optional[str] = pydantic.Field(description="The AWS access ID to use for authentication.")
25
- aws_access_secret: typing.Optional[str] = pydantic.Field(
26
- description="The AWS access secret to use for authentication."
27
- )
28
- s_3_endpoint_url: typing.Optional[str] = pydantic.Field(
29
- alias="s3_endpoint_url", description="The S3 endpoint URL to use for authentication."
30
- )
19
+ prefix: typing.Optional[str]
20
+ aws_access_id: typing.Optional[str]
21
+ aws_access_secret: typing.Optional[str]
22
+ s_3_endpoint_url: typing.Optional[str] = pydantic.Field(alias="s3_endpoint_url")
31
23
  class_name: typing.Optional[str]
32
24
 
33
25
  def json(self, **kwargs: typing.Any) -> str:
@@ -15,15 +15,11 @@ except ImportError:
15
15
 
16
16
 
17
17
  class CloudSharepointDataSource(pydantic.BaseModel):
18
- """
19
- Base component object to capture class names.
20
- """
21
-
22
- site_name: typing.Optional[str] = pydantic.Field(description="The name of the SharePoint site to download from.")
23
- site_id: typing.Optional[str] = pydantic.Field(description="The ID of the SharePoint site to download from.")
24
- folder_path: typing.Optional[str] = pydantic.Field(description="The path of the Sharepoint folder to read from.")
25
- folder_id: typing.Optional[str] = pydantic.Field(description="The ID of the Sharepoint folder to read from.")
26
- drive_name: typing.Optional[str] = pydantic.Field(description="The name of the Sharepoint drive to read from.")
18
+ site_name: typing.Optional[str]
19
+ site_id: typing.Optional[str]
20
+ folder_path: typing.Optional[str]
21
+ folder_id: typing.Optional[str]
22
+ drive_name: typing.Optional[str]
27
23
  client_id: str = pydantic.Field(description="The client ID to use for authentication.")
28
24
  client_secret: str = pydantic.Field(description="The client secret to use for authentication.")
29
25
  tenant_id: str = pydantic.Field(description="The tenant ID to use for authentication.")
@@ -15,17 +15,13 @@ except ImportError:
15
15
 
16
16
 
17
17
  class CloudSlackDataSource(pydantic.BaseModel):
18
- """
19
- Base component object to capture class names.
20
- """
21
-
22
18
  slack_token: str = pydantic.Field(description="Slack Bot Token.")
23
- channel_ids: typing.Optional[str] = pydantic.Field(description="Slack Channel.")
24
- latest_date: typing.Optional[str] = pydantic.Field(description="Latest date.")
25
- earliest_date: typing.Optional[str] = pydantic.Field(description="Earliest date.")
26
- earliest_date_timestamp: typing.Optional[float] = pydantic.Field(description="Earliest date timestamp.")
27
- latest_date_timestamp: typing.Optional[float] = pydantic.Field(description="Latest date timestamp.")
28
- channel_patterns: typing.Optional[str] = pydantic.Field(description="Slack Channel name pattern.")
19
+ channel_ids: typing.Optional[str]
20
+ latest_date: typing.Optional[str]
21
+ earliest_date: typing.Optional[str]
22
+ earliest_date_timestamp: typing.Optional[float]
23
+ latest_date_timestamp: typing.Optional[float]
24
+ channel_patterns: typing.Optional[str]
29
25
  class_name: typing.Optional[str]
30
26
 
31
27
  def json(self, **kwargs: typing.Any) -> str:
@@ -15,10 +15,6 @@ except ImportError:
15
15
 
16
16
 
17
17
  class CloudWeaviateVectorStore(pydantic.BaseModel):
18
- """
19
- Base class for cloud vector stores.
20
- """
21
-
22
18
  supports_nested_metadata_filters: typing.Optional[bool]
23
19
  index_name: str
24
20
  url: typing.Optional[str]
@@ -26,7 +26,8 @@ class CodeSplitter(pydantic.BaseModel):
26
26
  description="Whether or not to consider metadata when splitting."
27
27
  )
28
28
  include_prev_next_rel: typing.Optional[bool] = pydantic.Field(description="Include prev/next node relationships.")
29
- callback_manager: typing.Optional[typing.Dict[str, typing.Any]]
29
+ callback_manager: typing.Optional[typing.Any]
30
+ id_func: typing.Optional[str]
30
31
  language: str = pydantic.Field(description="The programming language of the code being split.")
31
32
  chunk_lines: typing.Optional[int] = pydantic.Field(description="The number of lines to include in each chunk.")
32
33
  chunk_lines_overlap: typing.Optional[int] = pydantic.Field(
@@ -21,15 +21,11 @@ class CohereEmbedding(pydantic.BaseModel):
21
21
 
22
22
  model_name: typing.Optional[str] = pydantic.Field(description="The name of the embedding model.")
23
23
  embed_batch_size: typing.Optional[int] = pydantic.Field(description="The batch size for embedding calls.")
24
- callback_manager: typing.Optional[typing.Dict[str, typing.Any]]
25
- num_workers: typing.Optional[int] = pydantic.Field(
26
- description="The number of workers to use for async embedding calls."
27
- )
24
+ callback_manager: typing.Optional[typing.Any]
25
+ num_workers: typing.Optional[int]
28
26
  api_key: str = pydantic.Field(description="The Cohere API key.")
29
27
  truncate: str = pydantic.Field(description="Truncation type - START/ END/ NONE")
30
- input_type: typing.Optional[str] = pydantic.Field(
31
- description="Model Input type. If not provided, search_document and search_query are used when needed."
32
- )
28
+ input_type: typing.Optional[str]
33
29
  embedding_type: str = pydantic.Field(
34
30
  description="Embedding type. If not provided float embedding_type is used when needed."
35
31
  )
@@ -7,10 +7,6 @@ T_Result = typing.TypeVar("T_Result")
7
7
 
8
8
 
9
9
  class ConfigurableDataSinkNames(str, enum.Enum):
10
- """
11
- An enumeration.
12
- """
13
-
14
10
  CHROMA = "CHROMA"
15
11
  PINECONE = "PINECONE"
16
12
  POSTGRES = "POSTGRES"
@@ -7,10 +7,6 @@ T_Result = typing.TypeVar("T_Result")
7
7
 
8
8
 
9
9
  class ConfigurableDataSourceNames(str, enum.Enum):
10
- """
11
- An enumeration.
12
- """
13
-
14
10
  S_3 = "S3"
15
11
  AZURE_STORAGE_BLOB = "AZURE_STORAGE_BLOB"
16
12
  GOOGLE_DRIVE = "GOOGLE_DRIVE"
@@ -7,10 +7,6 @@ T_Result = typing.TypeVar("T_Result")
7
7
 
8
8
 
9
9
  class ConfigurableTransformationNames(str, enum.Enum):
10
- """
11
- An enumeration.
12
- """
13
-
14
10
  CHARACTER_SPLITTER = "CHARACTER_SPLITTER"
15
11
  PAGE_SPLITTER_NODE_PARSER = "PAGE_SPLITTER_NODE_PARSER"
16
12
  CODE_NODE_PARSER = "CODE_NODE_PARSER"
@@ -2,8 +2,35 @@
2
2
 
3
3
  import typing
4
4
 
5
- from .configured_transformation_item_component_one import ConfiguredTransformationItemComponentOne
5
+ from .azure_open_ai_embedding import AzureOpenAiEmbedding
6
+ from .bedrock_embedding import BedrockEmbedding
7
+ from .character_splitter import CharacterSplitter
8
+ from .code_splitter import CodeSplitter
9
+ from .cohere_embedding import CohereEmbedding
10
+ from .extend_vertex_text_embedding import ExtendVertexTextEmbedding
11
+ from .gemini_embedding import GeminiEmbedding
12
+ from .hugging_face_inference_api_embedding import HuggingFaceInferenceApiEmbedding
13
+ from .markdown_element_node_parser import MarkdownElementNodeParser
14
+ from .markdown_node_parser import MarkdownNodeParser
15
+ from .open_ai_embedding import OpenAiEmbedding
16
+ from .page_splitter_node_parser import PageSplitterNodeParser
17
+ from .sentence_splitter import SentenceSplitter
18
+ from .token_text_splitter import TokenTextSplitter
6
19
 
7
20
  ConfiguredTransformationItemComponent = typing.Union[
8
- typing.Dict[str, typing.Any], ConfiguredTransformationItemComponentOne
21
+ typing.Dict[str, typing.Any],
22
+ CharacterSplitter,
23
+ PageSplitterNodeParser,
24
+ CodeSplitter,
25
+ SentenceSplitter,
26
+ TokenTextSplitter,
27
+ MarkdownNodeParser,
28
+ MarkdownElementNodeParser,
29
+ OpenAiEmbedding,
30
+ AzureOpenAiEmbedding,
31
+ CohereEmbedding,
32
+ BedrockEmbedding,
33
+ HuggingFaceInferenceApiEmbedding,
34
+ GeminiEmbedding,
35
+ ExtendVertexTextEmbedding,
9
36
  ]
@@ -22,8 +22,8 @@ class DataSink(pydantic.BaseModel):
22
22
  """
23
23
 
24
24
  id: str = pydantic.Field(description="Unique identifier")
25
- created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
26
- updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
25
+ created_at: typing.Optional[dt.datetime]
26
+ updated_at: typing.Optional[dt.datetime]
27
27
  name: str = pydantic.Field(description="The name of the data sink.")
28
28
  sink_type: ConfigurableDataSinkNames
29
29
  component: DataSinkComponent
@@ -2,6 +2,23 @@
2
2
 
3
3
  import typing
4
4
 
5
- from .data_sink_component_one import DataSinkComponentOne
5
+ from .cloud_azure_ai_search_vector_store import CloudAzureAiSearchVectorStore
6
+ from .cloud_chroma_vector_store import CloudChromaVectorStore
7
+ from .cloud_milvus_vector_store import CloudMilvusVectorStore
8
+ from .cloud_mongo_db_atlas_vector_search import CloudMongoDbAtlasVectorSearch
9
+ from .cloud_pinecone_vector_store import CloudPineconeVectorStore
10
+ from .cloud_postgres_vector_store import CloudPostgresVectorStore
11
+ from .cloud_qdrant_vector_store import CloudQdrantVectorStore
12
+ from .cloud_weaviate_vector_store import CloudWeaviateVectorStore
6
13
 
7
- DataSinkComponent = typing.Union[typing.Dict[str, typing.Any], DataSinkComponentOne]
14
+ DataSinkComponent = typing.Union[
15
+ typing.Dict[str, typing.Any],
16
+ CloudChromaVectorStore,
17
+ CloudPineconeVectorStore,
18
+ CloudPostgresVectorStore,
19
+ CloudQdrantVectorStore,
20
+ CloudWeaviateVectorStore,
21
+ CloudAzureAiSearchVectorStore,
22
+ CloudMongoDbAtlasVectorSearch,
23
+ CloudMilvusVectorStore,
24
+ ]
@@ -2,6 +2,23 @@
2
2
 
3
3
  import typing
4
4
 
5
- from .data_sink_create_component_one import DataSinkCreateComponentOne
5
+ from .cloud_azure_ai_search_vector_store import CloudAzureAiSearchVectorStore
6
+ from .cloud_chroma_vector_store import CloudChromaVectorStore
7
+ from .cloud_milvus_vector_store import CloudMilvusVectorStore
8
+ from .cloud_mongo_db_atlas_vector_search import CloudMongoDbAtlasVectorSearch
9
+ from .cloud_pinecone_vector_store import CloudPineconeVectorStore
10
+ from .cloud_postgres_vector_store import CloudPostgresVectorStore
11
+ from .cloud_qdrant_vector_store import CloudQdrantVectorStore
12
+ from .cloud_weaviate_vector_store import CloudWeaviateVectorStore
6
13
 
7
- DataSinkCreateComponent = typing.Union[typing.Dict[str, typing.Any], DataSinkCreateComponentOne]
14
+ DataSinkCreateComponent = typing.Union[
15
+ typing.Dict[str, typing.Any],
16
+ CloudChromaVectorStore,
17
+ CloudPineconeVectorStore,
18
+ CloudPostgresVectorStore,
19
+ CloudQdrantVectorStore,
20
+ CloudWeaviateVectorStore,
21
+ CloudAzureAiSearchVectorStore,
22
+ CloudMongoDbAtlasVectorSearch,
23
+ CloudMilvusVectorStore,
24
+ ]
@@ -23,13 +23,11 @@ class DataSource(pydantic.BaseModel):
23
23
  """
24
24
 
25
25
  id: str = pydantic.Field(description="Unique identifier")
26
- created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
27
- updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
26
+ created_at: typing.Optional[dt.datetime]
27
+ updated_at: typing.Optional[dt.datetime]
28
28
  name: str = pydantic.Field(description="The name of the data source.")
29
29
  source_type: ConfigurableDataSourceNames
30
- custom_metadata: typing.Optional[typing.Dict[str, DataSourceCustomMetadataValue]] = pydantic.Field(
31
- description="Custom metadata that will be present on all data loaded from the data source"
32
- )
30
+ custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[DataSourceCustomMetadataValue]]]
33
31
  component: DataSourceComponent
34
32
  project_id: str
35
33
 
@@ -2,6 +2,27 @@
2
2
 
3
3
  import typing
4
4
 
5
- from .data_source_component_one import DataSourceComponentOne
5
+ from .cloud_az_storage_blob_data_source import CloudAzStorageBlobDataSource
6
+ from .cloud_box_data_source import CloudBoxDataSource
7
+ from .cloud_confluence_data_source import CloudConfluenceDataSource
8
+ from .cloud_google_drive_data_source import CloudGoogleDriveDataSource
9
+ from .cloud_jira_data_source import CloudJiraDataSource
10
+ from .cloud_notion_page_data_source import CloudNotionPageDataSource
11
+ from .cloud_one_drive_data_source import CloudOneDriveDataSource
12
+ from .cloud_s_3_data_source import CloudS3DataSource
13
+ from .cloud_sharepoint_data_source import CloudSharepointDataSource
14
+ from .cloud_slack_data_source import CloudSlackDataSource
6
15
 
7
- DataSourceComponent = typing.Union[typing.Dict[str, typing.Any], DataSourceComponentOne]
16
+ DataSourceComponent = typing.Union[
17
+ typing.Dict[str, typing.Any],
18
+ CloudS3DataSource,
19
+ CloudAzStorageBlobDataSource,
20
+ CloudGoogleDriveDataSource,
21
+ CloudOneDriveDataSource,
22
+ CloudSharepointDataSource,
23
+ CloudSlackDataSource,
24
+ CloudNotionPageDataSource,
25
+ CloudConfluenceDataSource,
26
+ CloudJiraDataSource,
27
+ CloudBoxDataSource,
28
+ ]
@@ -24,9 +24,7 @@ class DataSourceCreate(pydantic.BaseModel):
24
24
 
25
25
  name: str = pydantic.Field(description="The name of the data source.")
26
26
  source_type: ConfigurableDataSourceNames
27
- custom_metadata: typing.Optional[typing.Dict[str, DataSourceCreateCustomMetadataValue]] = pydantic.Field(
28
- description="Custom metadata that will be present on all data loaded from the data source"
29
- )
27
+ custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[DataSourceCreateCustomMetadataValue]]]
30
28
  component: DataSourceCreateComponent
31
29
 
32
30
  def json(self, **kwargs: typing.Any) -> str:
@@ -2,6 +2,27 @@
2
2
 
3
3
  import typing
4
4
 
5
- from .data_source_create_component_one import DataSourceCreateComponentOne
5
+ from .cloud_az_storage_blob_data_source import CloudAzStorageBlobDataSource
6
+ from .cloud_box_data_source import CloudBoxDataSource
7
+ from .cloud_confluence_data_source import CloudConfluenceDataSource
8
+ from .cloud_google_drive_data_source import CloudGoogleDriveDataSource
9
+ from .cloud_jira_data_source import CloudJiraDataSource
10
+ from .cloud_notion_page_data_source import CloudNotionPageDataSource
11
+ from .cloud_one_drive_data_source import CloudOneDriveDataSource
12
+ from .cloud_s_3_data_source import CloudS3DataSource
13
+ from .cloud_sharepoint_data_source import CloudSharepointDataSource
14
+ from .cloud_slack_data_source import CloudSlackDataSource
6
15
 
7
- DataSourceCreateComponent = typing.Union[typing.Dict[str, typing.Any], DataSourceCreateComponentOne]
16
+ DataSourceCreateComponent = typing.Union[
17
+ typing.Dict[str, typing.Any],
18
+ CloudS3DataSource,
19
+ CloudAzStorageBlobDataSource,
20
+ CloudGoogleDriveDataSource,
21
+ CloudOneDriveDataSource,
22
+ CloudSharepointDataSource,
23
+ CloudSlackDataSource,
24
+ CloudNotionPageDataSource,
25
+ CloudConfluenceDataSource,
26
+ CloudJiraDataSource,
27
+ CloudBoxDataSource,
28
+ ]
@@ -21,8 +21,8 @@ class EvalDataset(pydantic.BaseModel):
21
21
  """
22
22
 
23
23
  id: str = pydantic.Field(description="Unique identifier")
24
- created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
25
- updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
24
+ created_at: typing.Optional[dt.datetime]
25
+ updated_at: typing.Optional[dt.datetime]
26
26
  name: str = pydantic.Field(description="The name of the EvalDataset.")
27
27
  project_id: str
28
28
 
@@ -28,27 +28,21 @@ class EvalDatasetJobRecord(pydantic.BaseModel):
28
28
  partitions: typing.Dict[str, str] = pydantic.Field(
29
29
  description="The partitions for this execution. Used for determining where to save job output."
30
30
  )
31
- parameters: typing.Optional[EvalDatasetJobParams] = pydantic.Field(
32
- description="Additional input parameters for the eval execution."
33
- )
34
- session_id: typing.Optional[str] = pydantic.Field(
35
- description="The upstream request ID that created this job. Used for tracking the job across services."
36
- )
37
- correlation_id: typing.Optional[str] = pydantic.Field(
38
- description="The correlation ID for this job. Used for tracking the job across services."
39
- )
40
- parent_job_execution_id: typing.Optional[str] = pydantic.Field(description="The ID of the parent job execution.")
41
- user_id: typing.Optional[str] = pydantic.Field(description="The ID of the user that created this job")
31
+ parameters: typing.Optional[EvalDatasetJobParams]
32
+ session_id: typing.Optional[str]
33
+ correlation_id: typing.Optional[str]
34
+ parent_job_execution_id: typing.Optional[str]
35
+ user_id: typing.Optional[str]
42
36
  created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
43
37
  id: typing.Optional[str] = pydantic.Field(description="Unique identifier")
44
38
  status: StatusEnum
45
39
  error_code: typing.Optional[str]
46
40
  error_message: typing.Optional[str]
47
- attempts: typing.Optional[int] = pydantic.Field(description="The number of times this job has been attempted")
41
+ attempts: typing.Optional[int]
48
42
  started_at: typing.Optional[dt.datetime]
49
43
  ended_at: typing.Optional[dt.datetime]
50
44
  updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
51
- data: typing.Optional[Base] = pydantic.Field(description="Additional metadata for the job execution.")
45
+ data: typing.Optional[Base]
52
46
 
53
47
  def json(self, **kwargs: typing.Any) -> str:
54
48
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -20,12 +20,8 @@ class EvalExecutionParamsOverride(pydantic.BaseModel):
20
20
  Schema for the params override for an eval execution.
21
21
  """
22
22
 
23
- llm_model: typing.Optional[SupportedLlmModelNames] = pydantic.Field(
24
- description="The LLM model to use within eval execution."
25
- )
26
- qa_prompt_tmpl: typing.Optional[str] = pydantic.Field(
27
- description="The template to use for the question answering prompt."
28
- )
23
+ llm_model: typing.Optional[SupportedLlmModelNames]
24
+ qa_prompt_tmpl: typing.Optional[str]
29
25
 
30
26
  def json(self, **kwargs: typing.Any) -> str:
31
27
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -0,0 +1,17 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+ import typing
5
+
6
+ T_Result = typing.TypeVar("T_Result")
7
+
8
+
9
+ class EvalMetric(str, enum.Enum):
10
+ RELEVANCY = "RELEVANCY"
11
+ FAITHFULNESS = "FAITHFULNESS"
12
+
13
+ def visit(self, relevancy: typing.Callable[[], T_Result], faithfulness: typing.Callable[[], T_Result]) -> T_Result:
14
+ if self is EvalMetric.RELEVANCY:
15
+ return relevancy()
16
+ if self is EvalMetric.FAITHFULNESS:
17
+ return faithfulness()
@@ -15,13 +15,9 @@ except ImportError:
15
15
 
16
16
 
17
17
  class EvalQuestion(pydantic.BaseModel):
18
- """
19
- Base schema model containing common database fields.
20
- """
21
-
22
18
  id: str = pydantic.Field(description="Unique identifier")
23
- created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
24
- updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
19
+ created_at: typing.Optional[dt.datetime]
20
+ updated_at: typing.Optional[dt.datetime]
25
21
  content: str = pydantic.Field(description="The content of the question.")
26
22
  eval_dataset_id: str
27
23
  eval_dataset_index: int = pydantic.Field(
@@ -16,30 +16,18 @@ except ImportError:
16
16
 
17
17
 
18
18
  class ExtendVertexTextEmbedding(pydantic.BaseModel):
19
- """
20
- Base class for embeddings.
21
- """
22
-
23
19
  model_name: typing.Optional[str] = pydantic.Field(description="The name of the embedding model.")
24
20
  embed_batch_size: typing.Optional[int] = pydantic.Field(description="The batch size for embedding calls.")
25
- callback_manager: typing.Optional[typing.Dict[str, typing.Any]]
26
- num_workers: typing.Optional[int] = pydantic.Field(
27
- description="The number of workers to use for async embedding calls."
28
- )
21
+ callback_manager: typing.Optional[typing.Any]
22
+ num_workers: typing.Optional[int]
29
23
  embed_mode: VertexEmbeddingMode = pydantic.Field(description="The embedding mode to use.")
30
24
  additional_kwargs: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(
31
25
  description="Additional kwargs for the Vertex."
32
26
  )
33
- client_email: typing.Optional[str] = pydantic.Field(
34
- description="The client email to use when making Vertex API calls."
35
- )
36
- token_uri: typing.Optional[str] = pydantic.Field(description="The token uri to use when making Vertex API calls.")
37
- private_key_id: typing.Optional[str] = pydantic.Field(
38
- description="The private key id to use when making Vertex API calls."
39
- )
40
- private_key: typing.Optional[str] = pydantic.Field(
41
- description="The private key to use when making Vertex API calls."
42
- )
27
+ client_email: typing.Optional[str]
28
+ token_uri: typing.Optional[str]
29
+ private_key_id: typing.Optional[str]
30
+ private_key: typing.Optional[str]
43
31
  project: str = pydantic.Field(description="The default GCP project to use when making Vertex API calls.")
44
32
  location: str = pydantic.Field(description="The default location to use when making API calls.")
45
33
  class_name: typing.Optional[str]
@@ -22,10 +22,12 @@ class ExtractionResult(pydantic.BaseModel):
22
22
  """
23
23
 
24
24
  id: str = pydantic.Field(description="Unique identifier")
25
- created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
26
- updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
25
+ created_at: typing.Optional[dt.datetime]
26
+ updated_at: typing.Optional[dt.datetime]
27
27
  schema_id: str = pydantic.Field(description="The id of the schema")
28
- data: typing.Dict[str, ExtractionResultDataValue] = pydantic.Field(description="The data extracted from the file")
28
+ data: typing.Dict[str, typing.Optional[ExtractionResultDataValue]] = pydantic.Field(
29
+ description="The data extracted from the file"
30
+ )
29
31
  file: File = pydantic.Field(description="The file that the extract was extracted from")
30
32
 
31
33
  def json(self, **kwargs: typing.Any) -> str:
@@ -21,13 +21,11 @@ class ExtractionSchema(pydantic.BaseModel):
21
21
  """
22
22
 
23
23
  id: str = pydantic.Field(description="Unique identifier")
24
- created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
25
- updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
24
+ created_at: typing.Optional[dt.datetime]
25
+ updated_at: typing.Optional[dt.datetime]
26
26
  name: str = pydantic.Field(description="The name of the extraction schema")
27
27
  project_id: str = pydantic.Field(description="The ID of the project that the extraction schema belongs to")
28
- data_schema: typing.Optional[typing.Dict[str, ExtractionSchemaDataSchemaValue]] = pydantic.Field(
29
- description="The schema of the data"
30
- )
28
+ data_schema: typing.Optional[typing.Dict[str, typing.Optional[ExtractionSchemaDataSchemaValue]]]
31
29
 
32
30
  def json(self, **kwargs: typing.Any) -> str:
33
31
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
llama_cloud/types/file.py CHANGED
@@ -21,19 +21,15 @@ class File(pydantic.BaseModel):
21
21
  """
22
22
 
23
23
  id: str = pydantic.Field(description="Unique identifier")
24
- created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
25
- updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
24
+ created_at: typing.Optional[dt.datetime]
25
+ updated_at: typing.Optional[dt.datetime]
26
26
  name: str
27
- file_size: typing.Optional[int] = pydantic.Field(description="Size of the file in bytes")
28
- file_type: typing.Optional[str] = pydantic.Field(description="File type (e.g. pdf, docx, etc.)")
27
+ file_size: typing.Optional[int]
28
+ file_type: typing.Optional[str]
29
29
  project_id: str = pydantic.Field(description="The ID of the project that the file belongs to")
30
- last_modified_at: typing.Optional[dt.datetime] = pydantic.Field(description="The last modified time of the file")
31
- resource_info: typing.Optional[typing.Dict[str, FileResourceInfoValue]] = pydantic.Field(
32
- description="Resource information for the file"
33
- )
34
- data_source_id: typing.Optional[str] = pydantic.Field(
35
- description="The ID of the data source that the file belongs to"
36
- )
30
+ last_modified_at: typing.Optional[dt.datetime]
31
+ resource_info: typing.Optional[typing.Dict[str, typing.Optional[FileResourceInfoValue]]]
32
+ data_source_id: typing.Optional[str]
37
33
 
38
34
  def json(self, **kwargs: typing.Any) -> str:
39
35
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -29,15 +29,11 @@ class GeminiEmbedding(pydantic.BaseModel):
29
29
 
30
30
  model_name: typing.Optional[str] = pydantic.Field(description="The name of the embedding model.")
31
31
  embed_batch_size: typing.Optional[int] = pydantic.Field(description="The batch size for embedding calls.")
32
- callback_manager: typing.Optional[typing.Dict[str, typing.Any]]
33
- num_workers: typing.Optional[int] = pydantic.Field(
34
- description="The number of workers to use for async embedding calls."
35
- )
36
- title: typing.Optional[str] = pydantic.Field(
37
- description="Title is only applicable for retrieval_document tasks, and is used to represent a document title. For other tasks, title is invalid."
38
- )
39
- task_type: typing.Optional[str] = pydantic.Field(description="The task for embedding model.")
40
- api_key: typing.Optional[str] = pydantic.Field(description="API key to access the model. Defaults to None.")
32
+ callback_manager: typing.Optional[typing.Any]
33
+ num_workers: typing.Optional[int]
34
+ title: typing.Optional[str]
35
+ task_type: typing.Optional[str]
36
+ api_key: typing.Optional[str]
41
37
  class_name: typing.Optional[str]
42
38
 
43
39
  def json(self, **kwargs: typing.Any) -> str:
@@ -25,36 +25,20 @@ class HuggingFaceInferenceApiEmbedding(pydantic.BaseModel):
25
25
  - Uses the feature extraction task: https://huggingface.co/tasks/feature-extraction
26
26
  """
27
27
 
28
- model_name: typing.Optional[str] = pydantic.Field(
29
- description="Hugging Face model name. If None, the task will be used."
30
- )
28
+ model_name: typing.Optional[str]
31
29
  embed_batch_size: typing.Optional[int] = pydantic.Field(description="The batch size for embedding calls.")
32
- callback_manager: typing.Optional[typing.Dict[str, typing.Any]]
33
- num_workers: typing.Optional[int] = pydantic.Field(
34
- description="The number of workers to use for async embedding calls."
35
- )
36
- pooling: typing.Optional[Pooling] = pydantic.Field(
37
- description="Pooling strategy. If None, the model's default pooling is used."
38
- )
39
- query_instruction: typing.Optional[str] = pydantic.Field(
40
- description="Instruction to prepend during query embedding."
41
- )
42
- text_instruction: typing.Optional[str] = pydantic.Field(description="Instruction to prepend during text embedding.")
30
+ callback_manager: typing.Optional[typing.Any]
31
+ num_workers: typing.Optional[int]
32
+ pooling: typing.Optional[Pooling]
33
+ query_instruction: typing.Optional[str]
34
+ text_instruction: typing.Optional[str]
43
35
  token: typing.Optional[HuggingFaceInferenceApiEmbeddingToken] = pydantic.Field(
44
36
  description="Hugging Face token. Will default to the locally saved token. Pass token=False if you don’t want to send your token to the server."
45
37
  )
46
- timeout: typing.Optional[float] = pydantic.Field(
47
- description="The maximum number of seconds to wait for a response from the server. Loading a new model in Inference API can take up to several minutes. Defaults to None, meaning it will loop until the server is available."
48
- )
49
- headers: typing.Optional[typing.Dict[str, str]] = pydantic.Field(
50
- description="Additional headers to send to the server. By default only the authorization and user-agent headers are sent. Values in this dictionary will override the default values."
51
- )
52
- cookies: typing.Optional[typing.Dict[str, str]] = pydantic.Field(
53
- description="Additional cookies to send to the server."
54
- )
55
- task: typing.Optional[str] = pydantic.Field(
56
- description="Optional task to pick Hugging Face's recommended model, used when model_name is left as default of None."
57
- )
38
+ timeout: typing.Optional[float]
39
+ headers: typing.Optional[typing.Dict[str, typing.Optional[str]]]
40
+ cookies: typing.Optional[typing.Dict[str, typing.Optional[str]]]
41
+ task: typing.Optional[str]
58
42
  class_name: typing.Optional[str]
59
43
 
60
44
  def json(self, **kwargs: typing.Any) -> str:
@@ -20,12 +20,10 @@ class InputMessage(pydantic.BaseModel):
20
20
  This is distinct from a ChatMessage because this schema is enforced by the AI Chat library used in the frontend
21
21
  """
22
22
 
23
- id: typing.Optional[str] = pydantic.Field(description="ID of the message, if any. Not necessarily a UUID.")
23
+ id: typing.Optional[str]
24
24
  role: MessageRole
25
25
  content: str
26
- data: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(
27
- description="Additional data to be stored with the message."
28
- )
26
+ data: typing.Optional[typing.Dict[str, typing.Any]]
29
27
  class_name: typing.Optional[str]
30
28
 
31
29
  def json(self, **kwargs: typing.Any) -> str: