llama-cloud 0.1.22__py3-none-any.whl → 0.1.24__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (87) hide show
  1. llama_cloud/__init__.py +6 -66
  2. llama_cloud/client.py +0 -3
  3. llama_cloud/resources/__init__.py +1 -18
  4. llama_cloud/resources/data_sources/__init__.py +2 -2
  5. llama_cloud/resources/data_sources/client.py +5 -5
  6. llama_cloud/resources/data_sources/types/__init__.py +1 -2
  7. llama_cloud/resources/files/__init__.py +0 -3
  8. llama_cloud/resources/files/client.py +18 -19
  9. llama_cloud/resources/jobs/client.py +8 -0
  10. llama_cloud/resources/llama_extract/__init__.py +0 -8
  11. llama_cloud/resources/llama_extract/client.py +92 -24
  12. llama_cloud/resources/llama_extract/types/__init__.py +0 -8
  13. llama_cloud/resources/llama_extract/types/extract_agent_create_data_schema.py +2 -4
  14. llama_cloud/resources/llama_extract/types/extract_agent_update_data_schema.py +2 -4
  15. llama_cloud/resources/llama_extract/types/extract_job_create_batch_data_schema_override.py +2 -4
  16. llama_cloud/resources/llama_extract/types/extract_schema_validate_request_data_schema.py +2 -4
  17. llama_cloud/resources/organizations/client.py +14 -4
  18. llama_cloud/resources/parsing/client.py +8 -0
  19. llama_cloud/resources/pipelines/__init__.py +0 -2
  20. llama_cloud/resources/pipelines/client.py +43 -9
  21. llama_cloud/resources/pipelines/types/__init__.py +0 -2
  22. llama_cloud/types/__init__.py +6 -46
  23. llama_cloud/types/composite_retrieval_result.py +5 -1
  24. llama_cloud/types/data_source.py +2 -2
  25. llama_cloud/types/data_source_create.py +2 -2
  26. llama_cloud/types/extract_agent.py +2 -4
  27. llama_cloud/types/extract_job_create_data_schema_override.py +2 -4
  28. llama_cloud/types/extract_models.py +20 -8
  29. llama_cloud/types/extract_resultset.py +2 -2
  30. llama_cloud/types/extract_resultset_data.py +2 -4
  31. llama_cloud/types/extract_run.py +3 -4
  32. llama_cloud/types/extract_run_data.py +2 -4
  33. llama_cloud/types/extract_schema_validate_response.py +2 -2
  34. llama_cloud/types/file.py +3 -4
  35. llama_cloud/types/{llm_config_result.py → file_id_presigned_url.py} +9 -6
  36. llama_cloud/types/json_type.py +9 -0
  37. llama_cloud/types/legacy_parse_job_config.py +1 -0
  38. llama_cloud/types/llama_extract_settings.py +3 -1
  39. llama_cloud/types/llama_parse_parameters.py +1 -0
  40. llama_cloud/types/page_figure_metadata.py +1 -0
  41. llama_cloud/types/{llm_configs_response.py → page_figure_node_with_score.py} +9 -4
  42. llama_cloud/types/parse_job_config.py +1 -0
  43. llama_cloud/types/pipeline_data_source.py +2 -2
  44. llama_cloud/types/pipeline_file.py +5 -8
  45. llama_cloud/types/pipeline_file_create.py +2 -2
  46. llama_cloud/types/preset_retrieval_params.py +8 -0
  47. llama_cloud/types/retrieve_results.py +7 -1
  48. llama_cloud/types/supported_llm_model_names.py +20 -12
  49. llama_cloud/types/user_organization.py +1 -1
  50. llama_cloud/types/user_organization_delete.py +1 -0
  51. {llama_cloud-0.1.22.dist-info → llama_cloud-0.1.24.dist-info}/METADATA +2 -3
  52. {llama_cloud-0.1.22.dist-info → llama_cloud-0.1.24.dist-info}/RECORD +54 -86
  53. {llama_cloud-0.1.22.dist-info → llama_cloud-0.1.24.dist-info}/WHEEL +1 -1
  54. llama_cloud/resources/admin/__init__.py +0 -2
  55. llama_cloud/resources/admin/client.py +0 -78
  56. llama_cloud/resources/data_sources/types/data_source_update_custom_metadata_value.py +0 -7
  57. llama_cloud/resources/files/types/__init__.py +0 -7
  58. llama_cloud/resources/files/types/file_create_from_url_resource_info_value.py +0 -7
  59. llama_cloud/resources/files/types/file_create_permission_info_value.py +0 -7
  60. llama_cloud/resources/files/types/file_create_resource_info_value.py +0 -5
  61. llama_cloud/resources/llama_extract/types/extract_agent_create_data_schema_zero_value.py +0 -7
  62. llama_cloud/resources/llama_extract/types/extract_agent_update_data_schema_zero_value.py +0 -7
  63. llama_cloud/resources/llama_extract/types/extract_job_create_batch_data_schema_override_zero_value.py +0 -7
  64. llama_cloud/resources/llama_extract/types/extract_schema_validate_request_data_schema_zero_value.py +0 -7
  65. llama_cloud/resources/pipelines/types/pipeline_file_update_custom_metadata_value.py +0 -7
  66. llama_cloud/types/data_source_create_custom_metadata_value.py +0 -7
  67. llama_cloud/types/data_source_custom_metadata_value.py +0 -7
  68. llama_cloud/types/extract_agent_data_schema_value.py +0 -5
  69. llama_cloud/types/extract_job_create_data_schema_override_zero_value.py +0 -7
  70. llama_cloud/types/extract_resultset_data_item_value.py +0 -7
  71. llama_cloud/types/extract_resultset_data_zero_value.py +0 -7
  72. llama_cloud/types/extract_resultset_extraction_metadata_value.py +0 -7
  73. llama_cloud/types/extract_run_data_item_value.py +0 -5
  74. llama_cloud/types/extract_run_data_schema_value.py +0 -5
  75. llama_cloud/types/extract_run_data_zero_value.py +0 -5
  76. llama_cloud/types/extract_run_extraction_metadata_value.py +0 -7
  77. llama_cloud/types/extract_schema_validate_response_data_schema_value.py +0 -7
  78. llama_cloud/types/file_permission_info_value.py +0 -5
  79. llama_cloud/types/file_resource_info_value.py +0 -5
  80. llama_cloud/types/llm_config_result_llm_type.py +0 -33
  81. llama_cloud/types/pipeline_data_source_custom_metadata_value.py +0 -7
  82. llama_cloud/types/pipeline_file_config_hash_value.py +0 -5
  83. llama_cloud/types/pipeline_file_create_custom_metadata_value.py +0 -7
  84. llama_cloud/types/pipeline_file_custom_metadata_value.py +0 -7
  85. llama_cloud/types/pipeline_file_permission_info_value.py +0 -7
  86. llama_cloud/types/pipeline_file_resource_info_value.py +0 -7
  87. {llama_cloud-0.1.22.dist-info → llama_cloud-0.1.24.dist-info}/LICENSE +0 -0
@@ -4,7 +4,6 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
- from .llm_config_result_llm_type import LlmConfigResultLlmType
8
7
 
9
8
  try:
10
9
  import pydantic
@@ -15,11 +14,15 @@ except ImportError:
15
14
  import pydantic # type: ignore
16
15
 
17
16
 
18
- class LlmConfigResult(pydantic.BaseModel):
19
- llm_type: LlmConfigResultLlmType
20
- is_enabled: bool
21
- valid: bool
22
- error_message: typing.Optional[str]
17
+ class FileIdPresignedUrl(pydantic.BaseModel):
18
+ """
19
+ Schema for a presigned URL with a file ID.
20
+ """
21
+
22
+ url: str = pydantic.Field(description="A presigned URL for IO operations against a private file")
23
+ expires_at: dt.datetime = pydantic.Field(description="The time at which the presigned URL expires")
24
+ form_fields: typing.Optional[typing.Dict[str, typing.Optional[str]]]
25
+ file_id: str = pydantic.Field(description="The ID of the file associated with the presigned URL")
23
26
 
24
27
  def json(self, **kwargs: typing.Any) -> str:
25
28
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -0,0 +1,9 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ from __future__ import annotations
4
+
5
+ import typing
6
+
7
+ JsonType = typing.Union[
8
+ str, int, float, bool, typing.List[typing.Optional[JsonType]], typing.Dict[str, typing.Optional[JsonType]]
9
+ ]
@@ -45,6 +45,7 @@ class LegacyParseJobConfig(pydantic.BaseModel):
45
45
  )
46
46
  invalidate_cache: bool = pydantic.Field(alias="invalidateCache", description="Whether to invalidate the cache.")
47
47
  output_pdf_of_document: typing.Optional[bool] = pydantic.Field(alias="outputPDFOfDocument")
48
+ outlined_table_extraction: typing.Optional[bool] = pydantic.Field(alias="outlinedTableExtraction")
48
49
  save_images: typing.Optional[bool] = pydantic.Field(alias="saveImages")
49
50
  gpt_4_o: typing.Optional[bool] = pydantic.Field(alias="gpt4o", description="Whether to use GPT4o.")
50
51
  open_aiapi_key: str = pydantic.Field(alias="openAIAPIKey", description="The OpenAI API key.")
@@ -39,7 +39,9 @@ class LlamaExtractSettings(pydantic.BaseModel):
39
39
  extraction_agent_config: typing.Optional[typing.Dict[str, StructParseConf]] = pydantic.Field(
40
40
  description="The configuration for the extraction agent."
41
41
  )
42
- use_multimodal_extraction: typing.Optional[bool]
42
+ use_pixel_extraction: typing.Optional[bool] = pydantic.Field(
43
+ description="Whether to use extraction over pixels for multimodal mode."
44
+ )
43
45
  llama_parse_params: typing.Optional[LlamaParseParameters] = pydantic.Field(
44
46
  description="LlamaParse related settings."
45
47
  )
@@ -31,6 +31,7 @@ class LlamaParseParameters(pydantic.BaseModel):
31
31
  disable_reconstruction: typing.Optional[bool]
32
32
  disable_image_extraction: typing.Optional[bool]
33
33
  invalidate_cache: typing.Optional[bool]
34
+ outlined_table_extraction: typing.Optional[bool]
34
35
  output_pdf_of_document: typing.Optional[bool]
35
36
  do_not_cache: typing.Optional[bool]
36
37
  fast_mode: typing.Optional[bool]
@@ -21,6 +21,7 @@ class PageFigureMetadata(pydantic.BaseModel):
21
21
  figure_size: int = pydantic.Field(description="The size of the figure in bytes")
22
22
  is_likely_noise: typing.Optional[bool] = pydantic.Field(description="Whether the figure is likely to be noise")
23
23
  confidence: float = pydantic.Field(description="The confidence of the figure")
24
+ metadata: typing.Optional[typing.Dict[str, typing.Any]]
24
25
 
25
26
  def json(self, **kwargs: typing.Any) -> str:
26
27
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -4,7 +4,7 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
- from .llm_config_result import LlmConfigResult
7
+ from .page_figure_metadata import PageFigureMetadata
8
8
 
9
9
  try:
10
10
  import pydantic
@@ -15,9 +15,14 @@ except ImportError:
15
15
  import pydantic # type: ignore
16
16
 
17
17
 
18
- class LlmConfigsResponse(pydantic.BaseModel):
19
- llm_configs: typing.List[LlmConfigResult]
20
- last_validated_at: str
18
+ class PageFigureNodeWithScore(pydantic.BaseModel):
19
+ """
20
+ Page figure metadata with score
21
+ """
22
+
23
+ node: PageFigureMetadata
24
+ score: float = pydantic.Field(description="The score of the figure node")
25
+ class_name: typing.Optional[str]
21
26
 
22
27
  def json(self, **kwargs: typing.Any) -> str:
23
28
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -33,6 +33,7 @@ class ParseJobConfig(pydantic.BaseModel):
33
33
  disable_reconstruction: typing.Optional[bool]
34
34
  disable_image_extraction: typing.Optional[bool]
35
35
  invalidate_cache: typing.Optional[bool]
36
+ outlined_table_extraction: typing.Optional[bool]
36
37
  output_pdf_of_document: typing.Optional[bool]
37
38
  do_not_cache: typing.Optional[bool]
38
39
  fast_mode: typing.Optional[bool]
@@ -5,8 +5,8 @@ import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
7
  from .configurable_data_source_names import ConfigurableDataSourceNames
8
+ from .json_type import JsonType
8
9
  from .pipeline_data_source_component import PipelineDataSourceComponent
9
- from .pipeline_data_source_custom_metadata_value import PipelineDataSourceCustomMetadataValue
10
10
  from .pipeline_data_source_status import PipelineDataSourceStatus
11
11
 
12
12
  try:
@@ -28,7 +28,7 @@ class PipelineDataSource(pydantic.BaseModel):
28
28
  updated_at: typing.Optional[dt.datetime]
29
29
  name: str = pydantic.Field(description="The name of the data source.")
30
30
  source_type: ConfigurableDataSourceNames
31
- custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[PipelineDataSourceCustomMetadataValue]]]
31
+ custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[JsonType]]]
32
32
  component: PipelineDataSourceComponent = pydantic.Field(description="Component that implements the data source")
33
33
  version_metadata: typing.Optional[typing.Dict[str, typing.Any]]
34
34
  project_id: str
@@ -4,10 +4,7 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
- from .pipeline_file_config_hash_value import PipelineFileConfigHashValue
8
- from .pipeline_file_custom_metadata_value import PipelineFileCustomMetadataValue
9
- from .pipeline_file_permission_info_value import PipelineFilePermissionInfoValue
10
- from .pipeline_file_resource_info_value import PipelineFileResourceInfoValue
7
+ from .json_type import JsonType
11
8
  from .pipeline_file_status import PipelineFileStatus
12
9
 
13
10
  try:
@@ -33,13 +30,13 @@ class PipelineFile(pydantic.BaseModel):
33
30
  file_type: typing.Optional[str]
34
31
  project_id: str = pydantic.Field(description="The ID of the project that the file belongs to")
35
32
  last_modified_at: typing.Optional[dt.datetime]
36
- resource_info: typing.Optional[typing.Dict[str, typing.Optional[PipelineFileResourceInfoValue]]]
37
- permission_info: typing.Optional[typing.Dict[str, typing.Optional[PipelineFilePermissionInfoValue]]]
33
+ resource_info: typing.Optional[typing.Dict[str, typing.Optional[JsonType]]]
34
+ permission_info: typing.Optional[typing.Dict[str, typing.Optional[JsonType]]]
38
35
  data_source_id: typing.Optional[str]
39
36
  file_id: typing.Optional[str]
40
37
  pipeline_id: str = pydantic.Field(description="The ID of the pipeline that the file is associated with")
41
- custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[PipelineFileCustomMetadataValue]]]
42
- config_hash: typing.Optional[typing.Dict[str, typing.Optional[PipelineFileConfigHashValue]]]
38
+ custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[JsonType]]]
39
+ config_hash: typing.Optional[typing.Dict[str, typing.Optional[JsonType]]]
43
40
  indexed_page_count: typing.Optional[int]
44
41
  status: typing.Optional[PipelineFileStatus]
45
42
  status_updated_at: typing.Optional[dt.datetime]
@@ -4,7 +4,7 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
- from .pipeline_file_create_custom_metadata_value import PipelineFileCreateCustomMetadataValue
7
+ from .json_type import JsonType
8
8
 
9
9
  try:
10
10
  import pydantic
@@ -21,7 +21,7 @@ class PipelineFileCreate(pydantic.BaseModel):
21
21
  """
22
22
 
23
23
  file_id: str = pydantic.Field(description="The ID of the file")
24
- custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[PipelineFileCreateCustomMetadataValue]]]
24
+ custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[JsonType]]]
25
25
 
26
26
  def json(self, **kwargs: typing.Any) -> str:
27
27
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -4,6 +4,7 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
+ from .json_type import JsonType
7
8
  from .metadata_filters import MetadataFilters
8
9
  from .retrieval_mode import RetrievalMode
9
10
 
@@ -28,9 +29,16 @@ class PresetRetrievalParams(pydantic.BaseModel):
28
29
  rerank_top_n: typing.Optional[int]
29
30
  alpha: typing.Optional[float]
30
31
  search_filters: typing.Optional[MetadataFilters]
32
+ search_filters_inference_schema: typing.Optional[typing.Dict[str, typing.Optional[JsonType]]]
31
33
  files_top_k: typing.Optional[int]
32
34
  retrieval_mode: typing.Optional[RetrievalMode] = pydantic.Field(description="The retrieval mode for the query.")
33
35
  retrieve_image_nodes: typing.Optional[bool] = pydantic.Field(description="Whether to retrieve image nodes.")
36
+ retrieve_page_screenshot_nodes: typing.Optional[bool] = pydantic.Field(
37
+ description="Whether to retrieve page screenshot nodes."
38
+ )
39
+ retrieve_page_figure_nodes: typing.Optional[bool] = pydantic.Field(
40
+ description="Whether to retrieve page figure nodes."
41
+ )
34
42
  class_name: typing.Optional[str]
35
43
 
36
44
  def json(self, **kwargs: typing.Any) -> str:
@@ -4,6 +4,8 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
+ from .metadata_filters import MetadataFilters
8
+ from .page_figure_node_with_score import PageFigureNodeWithScore
7
9
  from .page_screenshot_node_with_score import PageScreenshotNodeWithScore
8
10
  from .text_node_with_score import TextNodeWithScore
9
11
 
@@ -26,7 +28,10 @@ class RetrieveResults(pydantic.BaseModel):
26
28
  description="The nodes retrieved by the pipeline for the given query."
27
29
  )
28
30
  image_nodes: typing.Optional[typing.List[PageScreenshotNodeWithScore]] = pydantic.Field(
29
- description="The image nodes retrieved by the pipeline for the given query."
31
+ description="The image nodes retrieved by the pipeline for the given query. Deprecated - will soon be replaced with 'page_screenshot_nodes'."
32
+ )
33
+ page_figure_nodes: typing.Optional[typing.List[PageFigureNodeWithScore]] = pydantic.Field(
34
+ description="The page figure nodes retrieved by the pipeline for the given query."
30
35
  )
31
36
  retrieval_latency: typing.Optional[typing.Dict[str, float]] = pydantic.Field(
32
37
  description="The end-to-end latency for retrieval and reranking."
@@ -34,6 +39,7 @@ class RetrieveResults(pydantic.BaseModel):
34
39
  metadata: typing.Optional[typing.Dict[str, str]] = pydantic.Field(
35
40
  description="Metadata associated with the retrieval execution"
36
41
  )
42
+ inferred_search_filters: typing.Optional[MetadataFilters]
37
43
  class_name: typing.Optional[str]
38
44
 
39
45
  def json(self, **kwargs: typing.Any) -> str:
@@ -9,41 +9,49 @@ T_Result = typing.TypeVar("T_Result")
9
9
  class SupportedLlmModelNames(str, enum.Enum):
10
10
  GPT_4_O = "GPT_4O"
11
11
  GPT_4_O_MINI = "GPT_4O_MINI"
12
- AZURE_OPENAI_GPT_3_5_TURBO = "AZURE_OPENAI_GPT_3_5_TURBO"
12
+ GPT_4_1 = "GPT_4_1"
13
+ GPT_4_1_NANO = "GPT_4_1_NANO"
14
+ GPT_4_1_MINI = "GPT_4_1_MINI"
13
15
  AZURE_OPENAI_GPT_4_O = "AZURE_OPENAI_GPT_4O"
14
16
  AZURE_OPENAI_GPT_4_O_MINI = "AZURE_OPENAI_GPT_4O_MINI"
15
- AZURE_OPENAI_GPT_4 = "AZURE_OPENAI_GPT_4"
16
17
  CLAUDE_3_5_SONNET = "CLAUDE_3_5_SONNET"
17
- BEDROCK_CLAUDE_3_5_SONNET = "BEDROCK_CLAUDE_3_5_SONNET"
18
+ BEDROCK_CLAUDE_3_5_SONNET_V_1 = "BEDROCK_CLAUDE_3_5_SONNET_V1"
19
+ BEDROCK_CLAUDE_3_5_SONNET_V_2 = "BEDROCK_CLAUDE_3_5_SONNET_V2"
18
20
  VERTEX_AI_CLAUDE_3_5_SONNET_V_2 = "VERTEX_AI_CLAUDE_3_5_SONNET_V2"
19
21
 
20
22
  def visit(
21
23
  self,
22
24
  gpt_4_o: typing.Callable[[], T_Result],
23
25
  gpt_4_o_mini: typing.Callable[[], T_Result],
24
- azure_openai_gpt_3_5_turbo: typing.Callable[[], T_Result],
26
+ gpt_4_1: typing.Callable[[], T_Result],
27
+ gpt_4_1_nano: typing.Callable[[], T_Result],
28
+ gpt_4_1_mini: typing.Callable[[], T_Result],
25
29
  azure_openai_gpt_4_o: typing.Callable[[], T_Result],
26
30
  azure_openai_gpt_4_o_mini: typing.Callable[[], T_Result],
27
- azure_openai_gpt_4: typing.Callable[[], T_Result],
28
31
  claude_3_5_sonnet: typing.Callable[[], T_Result],
29
- bedrock_claude_3_5_sonnet: typing.Callable[[], T_Result],
32
+ bedrock_claude_3_5_sonnet_v_1: typing.Callable[[], T_Result],
33
+ bedrock_claude_3_5_sonnet_v_2: typing.Callable[[], T_Result],
30
34
  vertex_ai_claude_3_5_sonnet_v_2: typing.Callable[[], T_Result],
31
35
  ) -> T_Result:
32
36
  if self is SupportedLlmModelNames.GPT_4_O:
33
37
  return gpt_4_o()
34
38
  if self is SupportedLlmModelNames.GPT_4_O_MINI:
35
39
  return gpt_4_o_mini()
36
- if self is SupportedLlmModelNames.AZURE_OPENAI_GPT_3_5_TURBO:
37
- return azure_openai_gpt_3_5_turbo()
40
+ if self is SupportedLlmModelNames.GPT_4_1:
41
+ return gpt_4_1()
42
+ if self is SupportedLlmModelNames.GPT_4_1_NANO:
43
+ return gpt_4_1_nano()
44
+ if self is SupportedLlmModelNames.GPT_4_1_MINI:
45
+ return gpt_4_1_mini()
38
46
  if self is SupportedLlmModelNames.AZURE_OPENAI_GPT_4_O:
39
47
  return azure_openai_gpt_4_o()
40
48
  if self is SupportedLlmModelNames.AZURE_OPENAI_GPT_4_O_MINI:
41
49
  return azure_openai_gpt_4_o_mini()
42
- if self is SupportedLlmModelNames.AZURE_OPENAI_GPT_4:
43
- return azure_openai_gpt_4()
44
50
  if self is SupportedLlmModelNames.CLAUDE_3_5_SONNET:
45
51
  return claude_3_5_sonnet()
46
- if self is SupportedLlmModelNames.BEDROCK_CLAUDE_3_5_SONNET:
47
- return bedrock_claude_3_5_sonnet()
52
+ if self is SupportedLlmModelNames.BEDROCK_CLAUDE_3_5_SONNET_V_1:
53
+ return bedrock_claude_3_5_sonnet_v_1()
54
+ if self is SupportedLlmModelNames.BEDROCK_CLAUDE_3_5_SONNET_V_2:
55
+ return bedrock_claude_3_5_sonnet_v_2()
48
56
  if self is SupportedLlmModelNames.VERTEX_AI_CLAUDE_3_5_SONNET_V_2:
49
57
  return vertex_ai_claude_3_5_sonnet_v_2()
@@ -23,7 +23,7 @@ class UserOrganization(pydantic.BaseModel):
23
23
  id: str = pydantic.Field(description="Unique identifier")
24
24
  created_at: typing.Optional[dt.datetime]
25
25
  updated_at: typing.Optional[dt.datetime]
26
- email: str = pydantic.Field(description="The user's email address.")
26
+ email: typing.Optional[str]
27
27
  user_id: typing.Optional[str]
28
28
  organization_id: str = pydantic.Field(description="The organization's ID.")
29
29
  pending: typing.Optional[bool] = pydantic.Field(
@@ -21,6 +21,7 @@ class UserOrganizationDelete(pydantic.BaseModel):
21
21
 
22
22
  user_id: typing.Optional[str]
23
23
  email: typing.Optional[str]
24
+ project_id_list: typing.Optional[typing.List[str]]
24
25
 
25
26
  def json(self, **kwargs: typing.Any) -> str:
26
27
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.3
1
+ Metadata-Version: 2.1
2
2
  Name: llama-cloud
3
- Version: 0.1.22
3
+ Version: 0.1.24
4
4
  Summary:
5
5
  License: MIT
6
6
  Author: Logan Markewich
@@ -13,7 +13,6 @@ Classifier: Programming Language :: Python :: 3.9
13
13
  Classifier: Programming Language :: Python :: 3.10
14
14
  Classifier: Programming Language :: Python :: 3.11
15
15
  Classifier: Programming Language :: Python :: 3.12
16
- Classifier: Programming Language :: Python :: 3.13
17
16
  Requires-Dist: certifi (>=2024.7.4)
18
17
  Requires-Dist: httpx (>=0.20.0)
19
18
  Requires-Dist: pydantic (>=1.10)