llama-cloud 0.0.15__py3-none-any.whl → 0.0.16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (33) hide show
  1. llama_cloud/__init__.py +20 -0
  2. llama_cloud/resources/__init__.py +2 -0
  3. llama_cloud/resources/files/client.py +159 -0
  4. llama_cloud/resources/parsing/client.py +40 -0
  5. llama_cloud/resources/pipelines/__init__.py +2 -0
  6. llama_cloud/resources/pipelines/client.py +18 -2
  7. llama_cloud/resources/pipelines/types/__init__.py +2 -0
  8. llama_cloud/resources/pipelines/types/pipeline_update_embedding_config.py +11 -0
  9. llama_cloud/types/__init__.py +18 -0
  10. llama_cloud/types/cloud_az_storage_blob_data_source.py +1 -2
  11. llama_cloud/types/cloud_postgres_vector_store.py +6 -8
  12. llama_cloud/types/configurable_transformation_names.py +4 -0
  13. llama_cloud/types/configured_transformation_item_component_one.py +2 -0
  14. llama_cloud/types/extend_vertex_text_embedding.py +58 -0
  15. llama_cloud/types/llama_parse_parameters.py +3 -1
  16. llama_cloud/types/llm_model_data.py +1 -0
  17. llama_cloud/types/llm_parameters.py +4 -1
  18. llama_cloud/types/page_screenshot_metadata.py +33 -0
  19. llama_cloud/types/page_screenshot_node_with_score.py +38 -0
  20. llama_cloud/types/pipeline.py +4 -0
  21. llama_cloud/types/pipeline_configuration_hashes.py +37 -0
  22. llama_cloud/types/pipeline_create_embedding_config.py +11 -0
  23. llama_cloud/types/pipeline_embedding_config.py +11 -0
  24. llama_cloud/types/pipeline_file.py +4 -0
  25. llama_cloud/types/pipeline_file_config_hash_value.py +5 -0
  26. llama_cloud/types/preset_retrieval_params.py +1 -0
  27. llama_cloud/types/retrieve_results.py +4 -0
  28. llama_cloud/types/vertex_ai_embedding_config.py +34 -0
  29. llama_cloud/types/vertex_embedding_mode.py +45 -0
  30. {llama_cloud-0.0.15.dist-info → llama_cloud-0.0.16.dist-info}/METADATA +1 -1
  31. {llama_cloud-0.0.15.dist-info → llama_cloud-0.0.16.dist-info}/RECORD +33 -26
  32. {llama_cloud-0.0.15.dist-info → llama_cloud-0.0.16.dist-info}/LICENSE +0 -0
  33. {llama_cloud-0.0.15.dist-info → llama_cloud-0.0.16.dist-info}/WHEEL +0 -0
@@ -24,6 +24,7 @@ class ConfigurableTransformationNames(str, enum.Enum):
24
24
  BEDROCK_EMBEDDING = "BEDROCK_EMBEDDING"
25
25
  HUGGINGFACE_API_EMBEDDING = "HUGGINGFACE_API_EMBEDDING"
26
26
  GEMINI_EMBEDDING = "GEMINI_EMBEDDING"
27
+ VERTEXAI_EMBEDDING = "VERTEXAI_EMBEDDING"
27
28
 
28
29
  def visit(
29
30
  self,
@@ -40,6 +41,7 @@ class ConfigurableTransformationNames(str, enum.Enum):
40
41
  bedrock_embedding: typing.Callable[[], T_Result],
41
42
  huggingface_api_embedding: typing.Callable[[], T_Result],
42
43
  gemini_embedding: typing.Callable[[], T_Result],
44
+ vertexai_embedding: typing.Callable[[], T_Result],
43
45
  ) -> T_Result:
44
46
  if self is ConfigurableTransformationNames.CHARACTER_SPLITTER:
45
47
  return character_splitter()
@@ -67,3 +69,5 @@ class ConfigurableTransformationNames(str, enum.Enum):
67
69
  return huggingface_api_embedding()
68
70
  if self is ConfigurableTransformationNames.GEMINI_EMBEDDING:
69
71
  return gemini_embedding()
72
+ if self is ConfigurableTransformationNames.VERTEXAI_EMBEDDING:
73
+ return vertexai_embedding()
@@ -7,6 +7,7 @@ from .bedrock_embedding import BedrockEmbedding
7
7
  from .character_splitter import CharacterSplitter
8
8
  from .code_splitter import CodeSplitter
9
9
  from .cohere_embedding import CohereEmbedding
10
+ from .extend_vertex_text_embedding import ExtendVertexTextEmbedding
10
11
  from .gemini_embedding import GeminiEmbedding
11
12
  from .hugging_face_inference_api_embedding import HuggingFaceInferenceApiEmbedding
12
13
  from .markdown_element_node_parser import MarkdownElementNodeParser
@@ -30,4 +31,5 @@ ConfiguredTransformationItemComponentOne = typing.Union[
30
31
  BedrockEmbedding,
31
32
  HuggingFaceInferenceApiEmbedding,
32
33
  GeminiEmbedding,
34
+ ExtendVertexTextEmbedding,
33
35
  ]
@@ -0,0 +1,58 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .vertex_embedding_mode import VertexEmbeddingMode
8
+
9
+ try:
10
+ import pydantic
11
+ if pydantic.__version__.startswith("1."):
12
+ raise ImportError
13
+ import pydantic.v1 as pydantic # type: ignore
14
+ except ImportError:
15
+ import pydantic # type: ignore
16
+
17
+
18
+ class ExtendVertexTextEmbedding(pydantic.BaseModel):
19
+ """
20
+ Base class for embeddings.
21
+ """
22
+
23
+ model_name: typing.Optional[str] = pydantic.Field(description="The name of the embedding model.")
24
+ embed_batch_size: typing.Optional[int] = pydantic.Field(description="The batch size for embedding calls.")
25
+ callback_manager: typing.Optional[typing.Dict[str, typing.Any]]
26
+ num_workers: typing.Optional[int] = pydantic.Field(
27
+ description="The number of workers to use for async embedding calls."
28
+ )
29
+ embed_mode: VertexEmbeddingMode = pydantic.Field(description="The embedding mode to use.")
30
+ additional_kwargs: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(
31
+ description="Additional kwargs for the Vertex."
32
+ )
33
+ client_email: typing.Optional[str] = pydantic.Field(
34
+ description="The client email to use when making Vertex API calls."
35
+ )
36
+ token_uri: typing.Optional[str] = pydantic.Field(description="The token uri to use when making Vertex API calls.")
37
+ private_key_id: typing.Optional[str] = pydantic.Field(
38
+ description="The private key id to use when making Vertex API calls."
39
+ )
40
+ private_key: typing.Optional[str] = pydantic.Field(
41
+ description="The private key to use when making Vertex API calls."
42
+ )
43
+ project: str = pydantic.Field(description="The default GCP project to use when making Vertex API calls.")
44
+ location: str = pydantic.Field(description="The default location to use when making API calls.")
45
+ class_name: typing.Optional[str]
46
+
47
+ def json(self, **kwargs: typing.Any) -> str:
48
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
49
+ return super().json(**kwargs_with_defaults)
50
+
51
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
52
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
53
+ return super().dict(**kwargs_with_defaults)
54
+
55
+ class Config:
56
+ frozen = True
57
+ smart_union = True
58
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -23,6 +23,7 @@ class LlamaParseParameters(pydantic.BaseModel):
23
23
  languages: typing.Optional[typing.List[ParserLanguages]]
24
24
  parsing_instruction: typing.Optional[str]
25
25
  disable_ocr: typing.Optional[bool]
26
+ disable_reconstruction: typing.Optional[bool]
26
27
  invalidate_cache: typing.Optional[bool]
27
28
  do_not_cache: typing.Optional[bool]
28
29
  fast_mode: typing.Optional[bool]
@@ -33,11 +34,12 @@ class LlamaParseParameters(pydantic.BaseModel):
33
34
  page_separator: typing.Optional[str]
34
35
  bounding_box: typing.Optional[str]
35
36
  target_pages: typing.Optional[str]
36
- use_vendor_multimodal_model: typing.Optional[str]
37
+ use_vendor_multimodal_model: typing.Optional[bool]
37
38
  vendor_multimodal_model_name: typing.Optional[str]
38
39
  vendor_multimodal_api_key: typing.Optional[str]
39
40
  page_prefix: typing.Optional[str]
40
41
  page_suffix: typing.Optional[str]
42
+ webhook_url: typing.Optional[str]
41
43
  take_screenshot: typing.Optional[bool]
42
44
  s_3_input_path: typing.Optional[str] = pydantic.Field(alias="s3_input_path")
43
45
  s_3_output_path_prefix: typing.Optional[str] = pydantic.Field(alias="s3_output_path_prefix")
@@ -21,6 +21,7 @@ class LlmModelData(pydantic.BaseModel):
21
21
 
22
22
  name: str = pydantic.Field(description="The name of the LLM model.")
23
23
  description: str = pydantic.Field(description="The description of the LLM model.")
24
+ multi_modal: bool = pydantic.Field(description="Whether the model supports multi-modal image input")
24
25
 
25
26
  def json(self, **kwargs: typing.Any) -> str:
26
27
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -4,6 +4,7 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
+ from .supported_llm_model_names import SupportedLlmModelNames
7
8
 
8
9
  try:
9
10
  import pydantic
@@ -20,7 +21,9 @@ class LlmParameters(pydantic.BaseModel):
20
21
  Comes with special serialization logic for types used commonly in platform codebase.
21
22
  """
22
23
 
23
- model_name: typing.Optional[str] = pydantic.Field(description="The name of the model to use for LLM completions.")
24
+ model_name: typing.Optional[SupportedLlmModelNames] = pydantic.Field(
25
+ description="The name of the model to use for LLM completions."
26
+ )
24
27
  system_prompt: typing.Optional[str] = pydantic.Field(description="The system prompt to use for the completion.")
25
28
  temperature: typing.Optional[float] = pydantic.Field(description="The temperature value for the model.")
26
29
  class_name: typing.Optional[str]
@@ -0,0 +1,33 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic
10
+ if pydantic.__version__.startswith("1."):
11
+ raise ImportError
12
+ import pydantic.v1 as pydantic # type: ignore
13
+ except ImportError:
14
+ import pydantic # type: ignore
15
+
16
+
17
+ class PageScreenshotMetadata(pydantic.BaseModel):
18
+ page_index: int = pydantic.Field(description="The index of the page for which the screenshot is taken (0-indexed)")
19
+ file_id: str = pydantic.Field(description="The ID of the file that the page screenshot was taken from")
20
+ image_size: int = pydantic.Field(description="The size of the image in bytes")
21
+
22
+ def json(self, **kwargs: typing.Any) -> str:
23
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
24
+ return super().json(**kwargs_with_defaults)
25
+
26
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
27
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
28
+ return super().dict(**kwargs_with_defaults)
29
+
30
+ class Config:
31
+ frozen = True
32
+ smart_union = True
33
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,38 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .page_screenshot_metadata import PageScreenshotMetadata
8
+
9
+ try:
10
+ import pydantic
11
+ if pydantic.__version__.startswith("1."):
12
+ raise ImportError
13
+ import pydantic.v1 as pydantic # type: ignore
14
+ except ImportError:
15
+ import pydantic # type: ignore
16
+
17
+
18
+ class PageScreenshotNodeWithScore(pydantic.BaseModel):
19
+ """
20
+ Page screenshot metadata with score
21
+ """
22
+
23
+ node: PageScreenshotMetadata
24
+ score: float = pydantic.Field(description="The score of the screenshot node")
25
+ class_name: typing.Optional[str]
26
+
27
+ def json(self, **kwargs: typing.Any) -> str:
28
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
29
+ return super().json(**kwargs_with_defaults)
30
+
31
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
32
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
33
+ return super().dict(**kwargs_with_defaults)
34
+
35
+ class Config:
36
+ frozen = True
37
+ smart_union = True
38
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -8,6 +8,7 @@ from .configured_transformation_item import ConfiguredTransformationItem
8
8
  from .data_sink import DataSink
9
9
  from .eval_execution_params import EvalExecutionParams
10
10
  from .llama_parse_parameters import LlamaParseParameters
11
+ from .pipeline_configuration_hashes import PipelineConfigurationHashes
11
12
  from .pipeline_embedding_config import PipelineEmbeddingConfig
12
13
  from .pipeline_transform_config import PipelineTransformConfig
13
14
  from .pipeline_type import PipelineType
@@ -44,6 +45,9 @@ class Pipeline(pydantic.BaseModel):
44
45
  configured_transformations: typing.Optional[typing.List[ConfiguredTransformationItem]] = pydantic.Field(
45
46
  description="Deprecated don't use it, List of configured transformations."
46
47
  )
48
+ config_hash: typing.Optional[PipelineConfigurationHashes] = pydantic.Field(
49
+ description="Hashes for the configuration of the pipeline."
50
+ )
47
51
  transform_config: typing.Optional[PipelineTransformConfig] = pydantic.Field(
48
52
  description="Configuration for the transformation."
49
53
  )
@@ -0,0 +1,37 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic
10
+ if pydantic.__version__.startswith("1."):
11
+ raise ImportError
12
+ import pydantic.v1 as pydantic # type: ignore
13
+ except ImportError:
14
+ import pydantic # type: ignore
15
+
16
+
17
+ class PipelineConfigurationHashes(pydantic.BaseModel):
18
+ """
19
+ Hashes for the configuration of a pipeline.
20
+ """
21
+
22
+ embedding_config_hash: typing.Optional[str] = pydantic.Field(description="Hash of the embedding config.")
23
+ parsing_config_hash: typing.Optional[str] = pydantic.Field(description="Hash of the llama parse parameters.")
24
+ transform_config_hash: typing.Optional[str] = pydantic.Field(description="Hash of the transform config.")
25
+
26
+ def json(self, **kwargs: typing.Any) -> str:
27
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
28
+ return super().json(**kwargs_with_defaults)
29
+
30
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
31
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
32
+ return super().dict(**kwargs_with_defaults)
33
+
34
+ class Config:
35
+ frozen = True
36
+ smart_union = True
37
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -12,6 +12,7 @@ from .cohere_embedding_config import CohereEmbeddingConfig
12
12
  from .gemini_embedding_config import GeminiEmbeddingConfig
13
13
  from .hugging_face_inference_api_embedding_config import HuggingFaceInferenceApiEmbeddingConfig
14
14
  from .open_ai_embedding_config import OpenAiEmbeddingConfig
15
+ from .vertex_ai_embedding_config import VertexAiEmbeddingConfig
15
16
 
16
17
 
17
18
  class PipelineCreateEmbeddingConfig_OpenaiEmbedding(OpenAiEmbeddingConfig):
@@ -68,6 +69,15 @@ class PipelineCreateEmbeddingConfig_CohereEmbedding(CohereEmbeddingConfig):
68
69
  allow_population_by_field_name = True
69
70
 
70
71
 
72
+ class PipelineCreateEmbeddingConfig_VertexaiEmbedding(VertexAiEmbeddingConfig):
73
+ type: typing_extensions.Literal["VERTEXAI_EMBEDDING"]
74
+
75
+ class Config:
76
+ frozen = True
77
+ smart_union = True
78
+ allow_population_by_field_name = True
79
+
80
+
71
81
  PipelineCreateEmbeddingConfig = typing.Union[
72
82
  PipelineCreateEmbeddingConfig_OpenaiEmbedding,
73
83
  PipelineCreateEmbeddingConfig_AzureEmbedding,
@@ -75,4 +85,5 @@ PipelineCreateEmbeddingConfig = typing.Union[
75
85
  PipelineCreateEmbeddingConfig_BedrockEmbedding,
76
86
  PipelineCreateEmbeddingConfig_GeminiEmbedding,
77
87
  PipelineCreateEmbeddingConfig_CohereEmbedding,
88
+ PipelineCreateEmbeddingConfig_VertexaiEmbedding,
78
89
  ]
@@ -12,6 +12,7 @@ from .cohere_embedding_config import CohereEmbeddingConfig
12
12
  from .gemini_embedding_config import GeminiEmbeddingConfig
13
13
  from .hugging_face_inference_api_embedding_config import HuggingFaceInferenceApiEmbeddingConfig
14
14
  from .open_ai_embedding_config import OpenAiEmbeddingConfig
15
+ from .vertex_ai_embedding_config import VertexAiEmbeddingConfig
15
16
 
16
17
 
17
18
  class PipelineEmbeddingConfig_OpenaiEmbedding(OpenAiEmbeddingConfig):
@@ -68,6 +69,15 @@ class PipelineEmbeddingConfig_CohereEmbedding(CohereEmbeddingConfig):
68
69
  allow_population_by_field_name = True
69
70
 
70
71
 
72
+ class PipelineEmbeddingConfig_VertexaiEmbedding(VertexAiEmbeddingConfig):
73
+ type: typing_extensions.Literal["VERTEXAI_EMBEDDING"]
74
+
75
+ class Config:
76
+ frozen = True
77
+ smart_union = True
78
+ allow_population_by_field_name = True
79
+
80
+
71
81
  PipelineEmbeddingConfig = typing.Union[
72
82
  PipelineEmbeddingConfig_OpenaiEmbedding,
73
83
  PipelineEmbeddingConfig_AzureEmbedding,
@@ -75,4 +85,5 @@ PipelineEmbeddingConfig = typing.Union[
75
85
  PipelineEmbeddingConfig_BedrockEmbedding,
76
86
  PipelineEmbeddingConfig_GeminiEmbedding,
77
87
  PipelineEmbeddingConfig_CohereEmbedding,
88
+ PipelineEmbeddingConfig_VertexaiEmbedding,
78
89
  ]
@@ -4,6 +4,7 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
+ from .pipeline_file_config_hash_value import PipelineFileConfigHashValue
7
8
  from .pipeline_file_custom_metadata_value import PipelineFileCustomMetadataValue
8
9
  from .pipeline_file_resource_info_value import PipelineFileResourceInfoValue
9
10
 
@@ -40,6 +41,9 @@ class PipelineFile(pydantic.BaseModel):
40
41
  custom_metadata: typing.Optional[typing.Dict[str, PipelineFileCustomMetadataValue]] = pydantic.Field(
41
42
  description="Custom metadata for the file"
42
43
  )
44
+ config_hash: typing.Optional[typing.Dict[str, PipelineFileConfigHashValue]] = pydantic.Field(
45
+ description="Hashes for the configuration of the pipeline."
46
+ )
43
47
 
44
48
  def json(self, **kwargs: typing.Any) -> str:
45
49
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -0,0 +1,5 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ PipelineFileConfigHashValue = typing.Union[typing.Dict[str, typing.Any], typing.List[typing.Any], str, int, float, bool]
@@ -33,6 +33,7 @@ class PresetRetrievalParams(pydantic.BaseModel):
33
33
  description="Number of files to retrieve (only for retrieval mode files_via_metadata and files_via_content)."
34
34
  )
35
35
  retrieval_mode: typing.Optional[RetrievalMode] = pydantic.Field(description="The retrieval mode for the query.")
36
+ retrieve_image_nodes: typing.Optional[bool] = pydantic.Field(description="Whether to retrieve image nodes.")
36
37
 
37
38
  def json(self, **kwargs: typing.Any) -> str:
38
39
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -4,6 +4,7 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
+ from .page_screenshot_node_with_score import PageScreenshotNodeWithScore
7
8
  from .text_node_with_score import TextNodeWithScore
8
9
 
9
10
  try:
@@ -24,6 +25,9 @@ class RetrieveResults(pydantic.BaseModel):
24
25
  retrieval_nodes: typing.List[TextNodeWithScore] = pydantic.Field(
25
26
  description="The nodes retrieved by the pipeline for the given query."
26
27
  )
28
+ image_nodes: typing.Optional[typing.List[PageScreenshotNodeWithScore]] = pydantic.Field(
29
+ description="The image nodes retrieved by the pipeline for the given query."
30
+ )
27
31
  retrieval_latency: typing.Dict[str, float] = pydantic.Field(
28
32
  description="The end-to-end latency for retrieval and reranking."
29
33
  )
@@ -0,0 +1,34 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .extend_vertex_text_embedding import ExtendVertexTextEmbedding
8
+
9
+ try:
10
+ import pydantic
11
+ if pydantic.__version__.startswith("1."):
12
+ raise ImportError
13
+ import pydantic.v1 as pydantic # type: ignore
14
+ except ImportError:
15
+ import pydantic # type: ignore
16
+
17
+
18
+ class VertexAiEmbeddingConfig(pydantic.BaseModel):
19
+ component: typing.Optional[ExtendVertexTextEmbedding] = pydantic.Field(
20
+ description="Configuration for the VertexAI embedding model."
21
+ )
22
+
23
+ def json(self, **kwargs: typing.Any) -> str:
24
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
25
+ return super().json(**kwargs_with_defaults)
26
+
27
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
28
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
29
+ return super().dict(**kwargs_with_defaults)
30
+
31
+ class Config:
32
+ frozen = True
33
+ smart_union = True
34
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,45 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+ import typing
5
+
6
+ T_Result = typing.TypeVar("T_Result")
7
+
8
+
9
+ class VertexEmbeddingMode(str, enum.Enum):
10
+ """
11
+ VertexAI embedding mode.
12
+
13
+ Attributes:
14
+ DEFAULT_MODE (str): The default embedding mode, for older models before August 2023,
15
+ that does not support task_type
16
+ CLASSIFICATION_MODE (str): Optimizes embeddings for classification tasks.
17
+ CLUSTERING_MODE (str): Optimizes embeddings for clustering tasks.
18
+ SEMANTIC_SIMILARITY_MODE (str): Optimizes embeddings for tasks that require assessments of semantic similarity.
19
+ RETRIEVAL_MODE (str): Optimizes embeddings for retrieval tasks, including search and document retrieval.
20
+ """
21
+
22
+ DEFAULT = "default"
23
+ CLASSIFICATION = "classification"
24
+ CLUSTERING = "clustering"
25
+ SIMILARITY = "similarity"
26
+ RETRIEVAL = "retrieval"
27
+
28
+ def visit(
29
+ self,
30
+ default: typing.Callable[[], T_Result],
31
+ classification: typing.Callable[[], T_Result],
32
+ clustering: typing.Callable[[], T_Result],
33
+ similarity: typing.Callable[[], T_Result],
34
+ retrieval: typing.Callable[[], T_Result],
35
+ ) -> T_Result:
36
+ if self is VertexEmbeddingMode.DEFAULT:
37
+ return default()
38
+ if self is VertexEmbeddingMode.CLASSIFICATION:
39
+ return classification()
40
+ if self is VertexEmbeddingMode.CLUSTERING:
41
+ return clustering()
42
+ if self is VertexEmbeddingMode.SIMILARITY:
43
+ return similarity()
44
+ if self is VertexEmbeddingMode.RETRIEVAL:
45
+ return retrieval()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: llama-cloud
3
- Version: 0.0.15
3
+ Version: 0.0.16
4
4
  Summary:
5
5
  Author: Logan Markewich
6
6
  Author-email: logan@runllama.ai