llama-cloud 0.1.21__py3-none-any.whl → 0.1.22__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (34) hide show
  1. llama_cloud/__init__.py +16 -12
  2. llama_cloud/client.py +3 -3
  3. llama_cloud/resources/__init__.py +2 -2
  4. llama_cloud/resources/admin/client.py +78 -0
  5. llama_cloud/resources/jobs/client.py +10 -2
  6. llama_cloud/resources/llama_extract/client.py +50 -6
  7. llama_cloud/resources/organizations/client.py +12 -2
  8. llama_cloud/resources/parsing/client.py +30 -0
  9. llama_cloud/resources/pipelines/client.py +8 -0
  10. llama_cloud/resources/retrievers/client.py +14 -0
  11. llama_cloud/types/__init__.py +14 -10
  12. llama_cloud/types/cloud_s_3_data_source.py +1 -0
  13. llama_cloud/types/{data_sink_definition.py → document_block.py} +6 -15
  14. llama_cloud/types/document_chunk_mode.py +17 -0
  15. llama_cloud/types/extract_config.py +4 -0
  16. llama_cloud/types/extract_mode.py +4 -0
  17. llama_cloud/types/extract_models.py +33 -0
  18. llama_cloud/types/llama_index_core_base_llms_types_chat_message_blocks_item.py +11 -0
  19. llama_cloud/types/{data_source_definition.py → llm_config_result.py} +6 -15
  20. llama_cloud/types/llm_config_result_llm_type.py +33 -0
  21. llama_cloud/types/llm_configs_response.py +33 -0
  22. llama_cloud/types/pipeline_create.py +1 -3
  23. llama_cloud/types/struct_parse_conf.py +2 -1
  24. llama_cloud/types/supported_llm_model_names.py +4 -4
  25. llama_cloud/types/user_organization_role.py +1 -0
  26. {llama_cloud-0.1.21.dist-info → llama_cloud-0.1.22.dist-info}/METADATA +1 -1
  27. {llama_cloud-0.1.21.dist-info → llama_cloud-0.1.22.dist-info}/RECORD +30 -29
  28. {llama_cloud-0.1.21.dist-info → llama_cloud-0.1.22.dist-info}/WHEEL +1 -1
  29. llama_cloud/resources/component_definitions/client.py +0 -189
  30. llama_cloud/types/configurable_transformation_definition.py +0 -48
  31. llama_cloud/types/configurable_transformation_names.py +0 -41
  32. llama_cloud/types/transformation_category_names.py +0 -17
  33. /llama_cloud/resources/{component_definitions → admin}/__init__.py +0 -0
  34. {llama_cloud-0.1.21.dist-info → llama_cloud-0.1.22.dist-info}/LICENSE +0 -0
@@ -295,6 +295,8 @@ class RetrieversClient:
295
295
  self,
296
296
  retriever_id: str,
297
297
  *,
298
+ project_id: typing.Optional[str] = None,
299
+ organization_id: typing.Optional[str] = None,
298
300
  mode: typing.Optional[CompositeRetrievalMode] = OMIT,
299
301
  rerank_top_n: typing.Optional[int] = OMIT,
300
302
  rerank_config: typing.Optional[ReRankConfig] = OMIT,
@@ -306,6 +308,10 @@ class RetrieversClient:
306
308
  Parameters:
307
309
  - retriever_id: str.
308
310
 
311
+ - project_id: typing.Optional[str].
312
+
313
+ - organization_id: typing.Optional[str].
314
+
309
315
  - mode: typing.Optional[CompositeRetrievalMode]. The mode of composite retrieval.
310
316
 
311
317
  - rerank_top_n: typing.Optional[int].
@@ -341,6 +347,7 @@ class RetrieversClient:
341
347
  urllib.parse.urljoin(
342
348
  f"{self._client_wrapper.get_base_url()}/", f"api/v1/retrievers/{retriever_id}/retrieve"
343
349
  ),
350
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
344
351
  json=jsonable_encoder(_request),
345
352
  headers=self._client_wrapper.get_headers(),
346
353
  timeout=60,
@@ -692,6 +699,8 @@ class AsyncRetrieversClient:
692
699
  self,
693
700
  retriever_id: str,
694
701
  *,
702
+ project_id: typing.Optional[str] = None,
703
+ organization_id: typing.Optional[str] = None,
695
704
  mode: typing.Optional[CompositeRetrievalMode] = OMIT,
696
705
  rerank_top_n: typing.Optional[int] = OMIT,
697
706
  rerank_config: typing.Optional[ReRankConfig] = OMIT,
@@ -703,6 +712,10 @@ class AsyncRetrieversClient:
703
712
  Parameters:
704
713
  - retriever_id: str.
705
714
 
715
+ - project_id: typing.Optional[str].
716
+
717
+ - organization_id: typing.Optional[str].
718
+
706
719
  - mode: typing.Optional[CompositeRetrievalMode]. The mode of composite retrieval.
707
720
 
708
721
  - rerank_top_n: typing.Optional[int].
@@ -738,6 +751,7 @@ class AsyncRetrieversClient:
738
751
  urllib.parse.urljoin(
739
752
  f"{self._client_wrapper.get_base_url()}/", f"api/v1/retrievers/{retriever_id}/retrieve"
740
753
  ),
754
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
741
755
  json=jsonable_encoder(_request),
742
756
  headers=self._client_wrapper.get_headers(),
743
757
  timeout=60,
@@ -63,23 +63,21 @@ from .composite_retrieved_text_node import CompositeRetrievedTextNode
63
63
  from .composite_retrieved_text_node_with_score import CompositeRetrievedTextNodeWithScore
64
64
  from .configurable_data_sink_names import ConfigurableDataSinkNames
65
65
  from .configurable_data_source_names import ConfigurableDataSourceNames
66
- from .configurable_transformation_definition import ConfigurableTransformationDefinition
67
- from .configurable_transformation_names import ConfigurableTransformationNames
68
66
  from .credit_type import CreditType
69
67
  from .data_sink import DataSink
70
68
  from .data_sink_component import DataSinkComponent
71
69
  from .data_sink_create import DataSinkCreate
72
70
  from .data_sink_create_component import DataSinkCreateComponent
73
- from .data_sink_definition import DataSinkDefinition
74
71
  from .data_source import DataSource
75
72
  from .data_source_component import DataSourceComponent
76
73
  from .data_source_create import DataSourceCreate
77
74
  from .data_source_create_component import DataSourceCreateComponent
78
75
  from .data_source_create_custom_metadata_value import DataSourceCreateCustomMetadataValue
79
76
  from .data_source_custom_metadata_value import DataSourceCustomMetadataValue
80
- from .data_source_definition import DataSourceDefinition
81
77
  from .data_source_update_dispatcher_config import DataSourceUpdateDispatcherConfig
82
78
  from .delete_params import DeleteParams
79
+ from .document_block import DocumentBlock
80
+ from .document_chunk_mode import DocumentChunkMode
83
81
  from .document_ingestion_job_params import DocumentIngestionJobParams
84
82
  from .edit_suggestion import EditSuggestion
85
83
  from .edit_suggestion_blocks_item import EditSuggestionBlocksItem
@@ -115,6 +113,7 @@ from .extract_job_create import ExtractJobCreate
115
113
  from .extract_job_create_data_schema_override import ExtractJobCreateDataSchemaOverride
116
114
  from .extract_job_create_data_schema_override_zero_value import ExtractJobCreateDataSchemaOverrideZeroValue
117
115
  from .extract_mode import ExtractMode
116
+ from .extract_models import ExtractModels
118
117
  from .extract_resultset import ExtractResultset
119
118
  from .extract_resultset_data import ExtractResultsetData
120
119
  from .extract_resultset_data_item_value import ExtractResultsetDataItemValue
@@ -171,11 +170,15 @@ from .llama_index_core_base_llms_types_chat_message import LlamaIndexCoreBaseLlm
171
170
  from .llama_index_core_base_llms_types_chat_message_blocks_item import (
172
171
  LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem,
173
172
  LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Audio,
173
+ LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Document,
174
174
  LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Image,
175
175
  LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Text,
176
176
  )
177
177
  from .llama_parse_parameters import LlamaParseParameters
178
178
  from .llama_parse_supported_file_extensions import LlamaParseSupportedFileExtensions
179
+ from .llm_config_result import LlmConfigResult
180
+ from .llm_config_result_llm_type import LlmConfigResultLlmType
181
+ from .llm_configs_response import LlmConfigsResponse
179
182
  from .llm_model_data import LlmModelData
180
183
  from .llm_parameters import LlmParameters
181
184
  from .load_files_job_config import LoadFilesJobConfig
@@ -323,7 +326,6 @@ from .text_node import TextNode
323
326
  from .text_node_relationships_value import TextNodeRelationshipsValue
324
327
  from .text_node_with_score import TextNodeWithScore
325
328
  from .token_chunking_config import TokenChunkingConfig
326
- from .transformation_category_names import TransformationCategoryNames
327
329
  from .usage_and_plan import UsageAndPlan
328
330
  from .usage_metric_response import UsageMetricResponse
329
331
  from .usage_response import UsageResponse
@@ -399,23 +401,21 @@ __all__ = [
399
401
  "CompositeRetrievedTextNodeWithScore",
400
402
  "ConfigurableDataSinkNames",
401
403
  "ConfigurableDataSourceNames",
402
- "ConfigurableTransformationDefinition",
403
- "ConfigurableTransformationNames",
404
404
  "CreditType",
405
405
  "DataSink",
406
406
  "DataSinkComponent",
407
407
  "DataSinkCreate",
408
408
  "DataSinkCreateComponent",
409
- "DataSinkDefinition",
410
409
  "DataSource",
411
410
  "DataSourceComponent",
412
411
  "DataSourceCreate",
413
412
  "DataSourceCreateComponent",
414
413
  "DataSourceCreateCustomMetadataValue",
415
414
  "DataSourceCustomMetadataValue",
416
- "DataSourceDefinition",
417
415
  "DataSourceUpdateDispatcherConfig",
418
416
  "DeleteParams",
417
+ "DocumentBlock",
418
+ "DocumentChunkMode",
419
419
  "DocumentIngestionJobParams",
420
420
  "EditSuggestion",
421
421
  "EditSuggestionBlocksItem",
@@ -447,6 +447,7 @@ __all__ = [
447
447
  "ExtractJobCreateDataSchemaOverride",
448
448
  "ExtractJobCreateDataSchemaOverrideZeroValue",
449
449
  "ExtractMode",
450
+ "ExtractModels",
450
451
  "ExtractResultset",
451
452
  "ExtractResultsetData",
452
453
  "ExtractResultsetDataItemValue",
@@ -500,10 +501,14 @@ __all__ = [
500
501
  "LlamaIndexCoreBaseLlmsTypesChatMessage",
501
502
  "LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem",
502
503
  "LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Audio",
504
+ "LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Document",
503
505
  "LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Image",
504
506
  "LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Text",
505
507
  "LlamaParseParameters",
506
508
  "LlamaParseSupportedFileExtensions",
509
+ "LlmConfigResult",
510
+ "LlmConfigResultLlmType",
511
+ "LlmConfigsResponse",
507
512
  "LlmModelData",
508
513
  "LlmParameters",
509
514
  "LoadFilesJobConfig",
@@ -643,7 +648,6 @@ __all__ = [
643
648
  "TextNodeRelationshipsValue",
644
649
  "TextNodeWithScore",
645
650
  "TokenChunkingConfig",
646
- "TransformationCategoryNames",
647
651
  "UsageAndPlan",
648
652
  "UsageMetricResponse",
649
653
  "UsageResponse",
@@ -18,6 +18,7 @@ class CloudS3DataSource(pydantic.BaseModel):
18
18
  supports_access_control: typing.Optional[bool]
19
19
  bucket: str = pydantic.Field(description="The name of the S3 bucket to read from.")
20
20
  prefix: typing.Optional[str]
21
+ regex_pattern: typing.Optional[str]
21
22
  aws_access_id: typing.Optional[str]
22
23
  aws_access_secret: typing.Optional[str]
23
24
  s_3_endpoint_url: typing.Optional[str] = pydantic.Field(alias="s3_endpoint_url")
@@ -4,7 +4,6 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
- from .configurable_data_sink_names import ConfigurableDataSinkNames
8
7
 
9
8
  try:
10
9
  import pydantic
@@ -15,20 +14,12 @@ except ImportError:
15
14
  import pydantic # type: ignore
16
15
 
17
16
 
18
- class DataSinkDefinition(pydantic.BaseModel):
19
- """
20
- Schema for a data sink definition.
21
- """
22
-
23
- label: str = pydantic.Field(
24
- description="The label field will be used to display the name of the component in the UI"
25
- )
26
- json_schema: typing.Dict[str, typing.Any] = pydantic.Field(
27
- description="The json_schema field can be used by clients to determine how to construct the component"
28
- )
29
- sink_type: ConfigurableDataSinkNames = pydantic.Field(
30
- description="The name field will act as the unique identifier of DataSinkDefinition objects"
31
- )
17
+ class DocumentBlock(pydantic.BaseModel):
18
+ data: typing.Optional[str]
19
+ path: typing.Optional[str]
20
+ url: typing.Optional[str]
21
+ title: typing.Optional[str]
22
+ document_mimetype: typing.Optional[str]
32
23
 
33
24
  def json(self, **kwargs: typing.Any) -> str:
34
25
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -0,0 +1,17 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+ import typing
5
+
6
+ T_Result = typing.TypeVar("T_Result")
7
+
8
+
9
+ class DocumentChunkMode(str, enum.Enum):
10
+ PAGE = "PAGE"
11
+ SECTION = "SECTION"
12
+
13
+ def visit(self, page: typing.Callable[[], T_Result], section: typing.Callable[[], T_Result]) -> T_Result:
14
+ if self is DocumentChunkMode.PAGE:
15
+ return page()
16
+ if self is DocumentChunkMode.SECTION:
17
+ return section()
@@ -4,6 +4,7 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
+ from .document_chunk_mode import DocumentChunkMode
7
8
  from .extract_mode import ExtractMode
8
9
  from .extract_target import ExtractTarget
9
10
 
@@ -26,6 +27,9 @@ class ExtractConfig(pydantic.BaseModel):
26
27
  system_prompt: typing.Optional[str]
27
28
  use_reasoning: typing.Optional[bool] = pydantic.Field(description="Whether to use reasoning for the extraction.")
28
29
  cite_sources: typing.Optional[bool] = pydantic.Field(description="Whether to cite sources for the extraction.")
30
+ chunk_mode: typing.Optional[DocumentChunkMode] = pydantic.Field(
31
+ description="The mode to use for chunking the document."
32
+ )
29
33
  invalidate_cache: typing.Optional[bool] = pydantic.Field(
30
34
  description="Whether to invalidate the cache for the extraction."
31
35
  )
@@ -9,6 +9,7 @@ T_Result = typing.TypeVar("T_Result")
9
9
  class ExtractMode(str, enum.Enum):
10
10
  FAST = "FAST"
11
11
  BALANCED = "BALANCED"
12
+ PREMIUM = "PREMIUM"
12
13
  MULTIMODAL = "MULTIMODAL"
13
14
  ACCURATE = "ACCURATE"
14
15
 
@@ -16,6 +17,7 @@ class ExtractMode(str, enum.Enum):
16
17
  self,
17
18
  fast: typing.Callable[[], T_Result],
18
19
  balanced: typing.Callable[[], T_Result],
20
+ premium: typing.Callable[[], T_Result],
19
21
  multimodal: typing.Callable[[], T_Result],
20
22
  accurate: typing.Callable[[], T_Result],
21
23
  ) -> T_Result:
@@ -23,6 +25,8 @@ class ExtractMode(str, enum.Enum):
23
25
  return fast()
24
26
  if self is ExtractMode.BALANCED:
25
27
  return balanced()
28
+ if self is ExtractMode.PREMIUM:
29
+ return premium()
26
30
  if self is ExtractMode.MULTIMODAL:
27
31
  return multimodal()
28
32
  if self is ExtractMode.ACCURATE:
@@ -0,0 +1,33 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+ import typing
5
+
6
+ T_Result = typing.TypeVar("T_Result")
7
+
8
+
9
+ class ExtractModels(str, enum.Enum):
10
+ GPT_4_O = "gpt-4o"
11
+ GPT_4_O_MINI = "gpt-4o-mini"
12
+ GPT_41 = "gpt-4.1"
13
+ GPT_41_MINI = "gpt-4.1-mini"
14
+ O_3_MINI = "o3-mini"
15
+
16
+ def visit(
17
+ self,
18
+ gpt_4_o: typing.Callable[[], T_Result],
19
+ gpt_4_o_mini: typing.Callable[[], T_Result],
20
+ gpt_41: typing.Callable[[], T_Result],
21
+ gpt_41_mini: typing.Callable[[], T_Result],
22
+ o_3_mini: typing.Callable[[], T_Result],
23
+ ) -> T_Result:
24
+ if self is ExtractModels.GPT_4_O:
25
+ return gpt_4_o()
26
+ if self is ExtractModels.GPT_4_O_MINI:
27
+ return gpt_4_o_mini()
28
+ if self is ExtractModels.GPT_41:
29
+ return gpt_41()
30
+ if self is ExtractModels.GPT_41_MINI:
31
+ return gpt_41_mini()
32
+ if self is ExtractModels.O_3_MINI:
33
+ return o_3_mini()
@@ -7,6 +7,7 @@ import typing
7
7
  import typing_extensions
8
8
 
9
9
  from .audio_block import AudioBlock
10
+ from .document_block import DocumentBlock
10
11
  from .image_block import ImageBlock
11
12
  from .text_block import TextBlock
12
13
 
@@ -20,6 +21,15 @@ class LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Audio(AudioBlock):
20
21
  allow_population_by_field_name = True
21
22
 
22
23
 
24
+ class LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Document(DocumentBlock):
25
+ block_type: typing_extensions.Literal["document"]
26
+
27
+ class Config:
28
+ frozen = True
29
+ smart_union = True
30
+ allow_population_by_field_name = True
31
+
32
+
23
33
  class LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Image(ImageBlock):
24
34
  block_type: typing_extensions.Literal["image"]
25
35
 
@@ -40,6 +50,7 @@ class LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Text(TextBlock):
40
50
 
41
51
  LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem = typing.Union[
42
52
  LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Audio,
53
+ LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Document,
43
54
  LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Image,
44
55
  LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Text,
45
56
  ]
@@ -4,7 +4,7 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
- from .configurable_data_source_names import ConfigurableDataSourceNames
7
+ from .llm_config_result_llm_type import LlmConfigResultLlmType
8
8
 
9
9
  try:
10
10
  import pydantic
@@ -15,20 +15,11 @@ except ImportError:
15
15
  import pydantic # type: ignore
16
16
 
17
17
 
18
- class DataSourceDefinition(pydantic.BaseModel):
19
- """
20
- Schema for a data source definition.
21
- """
22
-
23
- label: str = pydantic.Field(
24
- description="The label field will be used to display the name of the component in the UI"
25
- )
26
- json_schema: typing.Dict[str, typing.Any] = pydantic.Field(
27
- description="The json_schema field can be used by clients to determine how to construct the component"
28
- )
29
- source_type: ConfigurableDataSourceNames = pydantic.Field(
30
- description="The name field will act as the unique identifier of DataSourceDefinition objects"
31
- )
18
+ class LlmConfigResult(pydantic.BaseModel):
19
+ llm_type: LlmConfigResultLlmType
20
+ is_enabled: bool
21
+ valid: bool
22
+ error_message: typing.Optional[str]
32
23
 
33
24
  def json(self, **kwargs: typing.Any) -> str:
34
25
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -0,0 +1,33 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+ import typing
5
+
6
+ T_Result = typing.TypeVar("T_Result")
7
+
8
+
9
+ class LlmConfigResultLlmType(str, enum.Enum):
10
+ OPENAI = "openai"
11
+ ANTHROPIC = "anthropic"
12
+ GEMINI = "gemini"
13
+ AWS_BEDROCK = "aws_bedrock"
14
+ AZURE_OPENAI = "azure_openai"
15
+
16
+ def visit(
17
+ self,
18
+ openai: typing.Callable[[], T_Result],
19
+ anthropic: typing.Callable[[], T_Result],
20
+ gemini: typing.Callable[[], T_Result],
21
+ aws_bedrock: typing.Callable[[], T_Result],
22
+ azure_openai: typing.Callable[[], T_Result],
23
+ ) -> T_Result:
24
+ if self is LlmConfigResultLlmType.OPENAI:
25
+ return openai()
26
+ if self is LlmConfigResultLlmType.ANTHROPIC:
27
+ return anthropic()
28
+ if self is LlmConfigResultLlmType.GEMINI:
29
+ return gemini()
30
+ if self is LlmConfigResultLlmType.AWS_BEDROCK:
31
+ return aws_bedrock()
32
+ if self is LlmConfigResultLlmType.AZURE_OPENAI:
33
+ return azure_openai()
@@ -0,0 +1,33 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .llm_config_result import LlmConfigResult
8
+
9
+ try:
10
+ import pydantic
11
+ if pydantic.__version__.startswith("1."):
12
+ raise ImportError
13
+ import pydantic.v1 as pydantic # type: ignore
14
+ except ImportError:
15
+ import pydantic # type: ignore
16
+
17
+
18
+ class LlmConfigsResponse(pydantic.BaseModel):
19
+ llm_configs: typing.List[LlmConfigResult]
20
+ last_validated_at: str
21
+
22
+ def json(self, **kwargs: typing.Any) -> str:
23
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
24
+ return super().json(**kwargs_with_defaults)
25
+
26
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
27
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
28
+ return super().dict(**kwargs_with_defaults)
29
+
30
+ class Config:
31
+ frozen = True
32
+ smart_union = True
33
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -40,9 +40,7 @@ class PipelineCreate(pydantic.BaseModel):
40
40
  eval_parameters: typing.Optional[EvalExecutionParams] = pydantic.Field(
41
41
  description="Eval parameters for the pipeline."
42
42
  )
43
- llama_parse_parameters: typing.Optional[LlamaParseParameters] = pydantic.Field(
44
- description="Settings that can be configured for how to use LlamaParse to parse files within a LlamaCloud pipeline."
45
- )
43
+ llama_parse_parameters: typing.Optional[LlamaParseParameters]
46
44
  status: typing.Optional[str]
47
45
  metadata_config: typing.Optional[PipelineMetadataConfig]
48
46
  name: str
@@ -4,6 +4,7 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
+ from .extract_models import ExtractModels
7
8
  from .prompt_conf import PromptConf
8
9
  from .schema_relax_mode import SchemaRelaxMode
9
10
  from .struct_mode import StructMode
@@ -22,7 +23,7 @@ class StructParseConf(pydantic.BaseModel):
22
23
  Configuration for the structured parsing agent.
23
24
  """
24
25
 
25
- model: typing.Optional[str] = pydantic.Field(description="The model to use for the structured parsing.")
26
+ model: typing.Optional[ExtractModels] = pydantic.Field(description="The model to use for the structured parsing.")
26
27
  temperature: typing.Optional[float] = pydantic.Field(
27
28
  description="The temperature to use for the structured parsing."
28
29
  )
@@ -15,7 +15,7 @@ class SupportedLlmModelNames(str, enum.Enum):
15
15
  AZURE_OPENAI_GPT_4 = "AZURE_OPENAI_GPT_4"
16
16
  CLAUDE_3_5_SONNET = "CLAUDE_3_5_SONNET"
17
17
  BEDROCK_CLAUDE_3_5_SONNET = "BEDROCK_CLAUDE_3_5_SONNET"
18
- VERTEX_AI_CLAUDE_3_5_SONNET = "VERTEX_AI_CLAUDE_3_5_SONNET"
18
+ VERTEX_AI_CLAUDE_3_5_SONNET_V_2 = "VERTEX_AI_CLAUDE_3_5_SONNET_V2"
19
19
 
20
20
  def visit(
21
21
  self,
@@ -27,7 +27,7 @@ class SupportedLlmModelNames(str, enum.Enum):
27
27
  azure_openai_gpt_4: typing.Callable[[], T_Result],
28
28
  claude_3_5_sonnet: typing.Callable[[], T_Result],
29
29
  bedrock_claude_3_5_sonnet: typing.Callable[[], T_Result],
30
- vertex_ai_claude_3_5_sonnet: typing.Callable[[], T_Result],
30
+ vertex_ai_claude_3_5_sonnet_v_2: typing.Callable[[], T_Result],
31
31
  ) -> T_Result:
32
32
  if self is SupportedLlmModelNames.GPT_4_O:
33
33
  return gpt_4_o()
@@ -45,5 +45,5 @@ class SupportedLlmModelNames(str, enum.Enum):
45
45
  return claude_3_5_sonnet()
46
46
  if self is SupportedLlmModelNames.BEDROCK_CLAUDE_3_5_SONNET:
47
47
  return bedrock_claude_3_5_sonnet()
48
- if self is SupportedLlmModelNames.VERTEX_AI_CLAUDE_3_5_SONNET:
49
- return vertex_ai_claude_3_5_sonnet()
48
+ if self is SupportedLlmModelNames.VERTEX_AI_CLAUDE_3_5_SONNET_V_2:
49
+ return vertex_ai_claude_3_5_sonnet_v_2()
@@ -25,6 +25,7 @@ class UserOrganizationRole(pydantic.BaseModel):
25
25
  updated_at: typing.Optional[dt.datetime]
26
26
  user_id: str = pydantic.Field(description="The user's ID.")
27
27
  organization_id: str = pydantic.Field(description="The organization's ID.")
28
+ project_ids: typing.Optional[typing.List[str]]
28
29
  role_id: str = pydantic.Field(description="The role's ID.")
29
30
  role: Role = pydantic.Field(description="The role.")
30
31
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: llama-cloud
3
- Version: 0.1.21
3
+ Version: 0.1.22
4
4
  Summary:
5
5
  License: MIT
6
6
  Author: Logan Markewich