llama-cloud 0.1.5__py3-none-any.whl → 0.1.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (105) hide show
  1. llama_cloud/__init__.py +12 -10
  2. llama_cloud/environment.py +1 -1
  3. llama_cloud/resources/__init__.py +2 -1
  4. llama_cloud/resources/data_sinks/client.py +14 -14
  5. llama_cloud/resources/data_sources/client.py +16 -16
  6. llama_cloud/resources/embedding_model_configs/client.py +80 -24
  7. llama_cloud/resources/evals/client.py +36 -26
  8. llama_cloud/resources/extraction/client.py +32 -32
  9. llama_cloud/resources/files/__init__.py +2 -2
  10. llama_cloud/resources/files/client.py +53 -28
  11. llama_cloud/resources/files/types/__init__.py +2 -1
  12. llama_cloud/resources/files/types/file_create_permission_info_value.py +7 -0
  13. llama_cloud/resources/organizations/client.py +60 -56
  14. llama_cloud/resources/parsing/client.py +555 -324
  15. llama_cloud/resources/pipelines/client.py +446 -302
  16. llama_cloud/resources/projects/client.py +270 -136
  17. llama_cloud/types/__init__.py +10 -10
  18. llama_cloud/types/azure_open_ai_embedding.py +12 -6
  19. llama_cloud/types/base_prompt_template.py +6 -2
  20. llama_cloud/types/bedrock_embedding.py +12 -6
  21. llama_cloud/types/character_splitter.py +4 -2
  22. llama_cloud/types/chat_message.py +1 -1
  23. llama_cloud/types/cloud_az_storage_blob_data_source.py +16 -7
  24. llama_cloud/types/cloud_box_data_source.py +13 -6
  25. llama_cloud/types/cloud_confluence_data_source.py +7 -6
  26. llama_cloud/types/cloud_document.py +3 -1
  27. llama_cloud/types/cloud_document_create.py +3 -1
  28. llama_cloud/types/cloud_google_drive_data_source.py +1 -0
  29. llama_cloud/types/cloud_jira_data_source.py +7 -4
  30. llama_cloud/types/cloud_notion_page_data_source.py +3 -2
  31. llama_cloud/types/cloud_one_drive_data_source.py +6 -3
  32. llama_cloud/types/cloud_s_3_data_source.py +9 -4
  33. llama_cloud/types/cloud_sharepoint_data_source.py +9 -6
  34. llama_cloud/types/cloud_slack_data_source.py +7 -6
  35. llama_cloud/types/code_splitter.py +1 -1
  36. llama_cloud/types/cohere_embedding.py +7 -3
  37. llama_cloud/types/data_sink.py +4 -4
  38. llama_cloud/types/data_sink_create.py +1 -1
  39. llama_cloud/types/data_source.py +7 -5
  40. llama_cloud/types/data_source_create.py +4 -2
  41. llama_cloud/types/embedding_model_config.py +2 -2
  42. llama_cloud/types/embedding_model_config_update.py +4 -2
  43. llama_cloud/types/eval_dataset.py +2 -2
  44. llama_cloud/types/eval_dataset_job_record.py +13 -7
  45. llama_cloud/types/eval_execution_params_override.py +6 -2
  46. llama_cloud/types/eval_question.py +2 -2
  47. llama_cloud/types/extraction_result.py +2 -2
  48. llama_cloud/types/extraction_schema.py +5 -3
  49. llama_cloud/types/file.py +15 -7
  50. llama_cloud/types/file_permission_info_value.py +5 -0
  51. llama_cloud/types/filter_operator.py +2 -2
  52. llama_cloud/types/gemini_embedding.py +10 -6
  53. llama_cloud/types/hugging_face_inference_api_embedding.py +27 -11
  54. llama_cloud/types/input_message.py +3 -1
  55. llama_cloud/types/job_name_mapping.py +4 -0
  56. llama_cloud/types/llama_parse_parameters.py +11 -0
  57. llama_cloud/types/llm.py +4 -2
  58. llama_cloud/types/llm_parameters.py +5 -2
  59. llama_cloud/types/local_eval.py +10 -8
  60. llama_cloud/types/local_eval_results.py +1 -1
  61. llama_cloud/types/managed_ingestion_status_response.py +5 -3
  62. llama_cloud/types/markdown_element_node_parser.py +5 -3
  63. llama_cloud/types/markdown_node_parser.py +1 -1
  64. llama_cloud/types/metadata_filter.py +2 -2
  65. llama_cloud/types/metric_result.py +3 -3
  66. llama_cloud/types/node_parser.py +1 -1
  67. llama_cloud/types/open_ai_embedding.py +12 -6
  68. llama_cloud/types/organization.py +2 -2
  69. llama_cloud/types/page_splitter_node_parser.py +2 -2
  70. llama_cloud/types/parsing_job_structured_result.py +32 -0
  71. llama_cloud/types/permission.py +3 -3
  72. llama_cloud/types/pipeline.py +17 -7
  73. llama_cloud/types/pipeline_configuration_hashes.py +3 -3
  74. llama_cloud/types/pipeline_create.py +15 -5
  75. llama_cloud/types/pipeline_data_source.py +13 -7
  76. llama_cloud/types/pipeline_data_source_create.py +3 -1
  77. llama_cloud/types/pipeline_deployment.py +4 -4
  78. llama_cloud/types/pipeline_file.py +25 -11
  79. llama_cloud/types/pipeline_file_create.py +3 -1
  80. llama_cloud/types/pipeline_file_permission_info_value.py +7 -0
  81. llama_cloud/types/playground_session.py +2 -2
  82. llama_cloud/types/preset_retrieval_params.py +14 -7
  83. llama_cloud/types/presigned_url.py +3 -1
  84. llama_cloud/types/project.py +2 -2
  85. llama_cloud/types/prompt_mixin_prompts.py +1 -1
  86. llama_cloud/types/prompt_spec.py +4 -2
  87. llama_cloud/types/role.py +3 -3
  88. llama_cloud/types/sentence_splitter.py +4 -2
  89. llama_cloud/types/text_node.py +3 -3
  90. llama_cloud/types/{hugging_face_inference_api_embedding_token.py → token.py} +1 -1
  91. llama_cloud/types/token_text_splitter.py +1 -1
  92. llama_cloud/types/user_organization.py +9 -5
  93. llama_cloud/types/user_organization_create.py +4 -4
  94. llama_cloud/types/user_organization_delete.py +2 -2
  95. llama_cloud/types/user_organization_role.py +2 -2
  96. llama_cloud/types/value.py +5 -0
  97. llama_cloud/types/vertex_text_embedding.py +9 -5
  98. {llama_cloud-0.1.5.dist-info → llama_cloud-0.1.6.dist-info}/METADATA +2 -1
  99. {llama_cloud-0.1.5.dist-info → llama_cloud-0.1.6.dist-info}/RECORD +101 -100
  100. {llama_cloud-0.1.5.dist-info → llama_cloud-0.1.6.dist-info}/WHEEL +1 -1
  101. llama_cloud/types/data_sink_component.py +0 -20
  102. llama_cloud/types/data_source_component.py +0 -28
  103. llama_cloud/types/metadata_filter_value.py +0 -5
  104. llama_cloud/types/pipeline_data_source_component.py +0 -28
  105. {llama_cloud-0.1.5.dist-info → llama_cloud-0.1.6.dist-info}/LICENSE +0 -0
@@ -29,20 +29,26 @@ class Pipeline(pydantic.BaseModel):
29
29
  """
30
30
 
31
31
  id: str = pydantic.Field(description="Unique identifier")
32
- created_at: typing.Optional[dt.datetime]
33
- updated_at: typing.Optional[dt.datetime]
32
+ created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
33
+ updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
34
34
  name: str
35
35
  project_id: str
36
- embedding_model_config_id: typing.Optional[str]
36
+ embedding_model_config_id: typing.Optional[str] = pydantic.Field(
37
+ description="The ID of the EmbeddingModelConfig this pipeline is using."
38
+ )
37
39
  pipeline_type: typing.Optional[PipelineType] = pydantic.Field(
38
40
  description="Type of pipeline. Either PLAYGROUND or MANAGED."
39
41
  )
40
- managed_pipeline_id: typing.Optional[str]
42
+ managed_pipeline_id: typing.Optional[str] = pydantic.Field(
43
+ description="The ID of the ManagedPipeline this playground pipeline is linked to."
44
+ )
41
45
  embedding_config: PipelineEmbeddingConfig
42
46
  configured_transformations: typing.Optional[typing.List[ConfiguredTransformationItem]] = pydantic.Field(
43
47
  description="Deprecated don't use it, List of configured transformations."
44
48
  )
45
- config_hash: typing.Optional[PipelineConfigurationHashes]
49
+ config_hash: typing.Optional[PipelineConfigurationHashes] = pydantic.Field(
50
+ description="Hashes for the configuration of the pipeline."
51
+ )
46
52
  transform_config: typing.Optional[PipelineTransformConfig] = pydantic.Field(
47
53
  description="Configuration for the transformation."
48
54
  )
@@ -52,8 +58,12 @@ class Pipeline(pydantic.BaseModel):
52
58
  eval_parameters: typing.Optional[EvalExecutionParams] = pydantic.Field(
53
59
  description="Eval parameters for the pipeline."
54
60
  )
55
- llama_parse_parameters: typing.Optional[LlamaParseParameters]
56
- data_sink: typing.Optional[DataSink]
61
+ llama_parse_parameters: typing.Optional[LlamaParseParameters] = pydantic.Field(
62
+ description="Settings that can be configured for how to use LlamaParse to parse files within a LlamaCloud pipeline."
63
+ )
64
+ data_sink: typing.Optional[DataSink] = pydantic.Field(
65
+ description="The data sink for the pipeline. If None, the pipeline will use the fully managed data sink."
66
+ )
57
67
 
58
68
  def json(self, **kwargs: typing.Any) -> str:
59
69
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -19,9 +19,9 @@ class PipelineConfigurationHashes(pydantic.BaseModel):
19
19
  Hashes for the configuration of a pipeline.
20
20
  """
21
21
 
22
- embedding_config_hash: typing.Optional[str]
23
- parsing_config_hash: typing.Optional[str]
24
- transform_config_hash: typing.Optional[str]
22
+ embedding_config_hash: typing.Optional[str] = pydantic.Field(description="Hash of the embedding config.")
23
+ parsing_config_hash: typing.Optional[str] = pydantic.Field(description="Hash of the llama parse parameters.")
24
+ transform_config_hash: typing.Optional[str] = pydantic.Field(description="Hash of the transform config.")
25
25
 
26
26
  def json(self, **kwargs: typing.Any) -> str:
27
27
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -31,10 +31,18 @@ class PipelineCreate(pydantic.BaseModel):
31
31
  transform_config: typing.Optional[PipelineCreateTransformConfig] = pydantic.Field(
32
32
  description="Configuration for the transformation."
33
33
  )
34
- configured_transformations: typing.Optional[typing.List[ConfiguredTransformationItem]]
35
- data_sink_id: typing.Optional[str]
36
- embedding_model_config_id: typing.Optional[str]
37
- data_sink: typing.Optional[DataSinkCreate]
34
+ configured_transformations: typing.Optional[typing.List[ConfiguredTransformationItem]] = pydantic.Field(
35
+ description="Deprecated, use embedding_config or transform_config instead. configured transformations for the pipeline."
36
+ )
37
+ data_sink_id: typing.Optional[str] = pydantic.Field(
38
+ description="Data sink ID. When provided instead of data_sink, the data sink will be looked up by ID."
39
+ )
40
+ embedding_model_config_id: typing.Optional[str] = pydantic.Field(
41
+ description="Embedding model config ID. When provided instead of embedding_config, the embedding model config will be looked up by ID."
42
+ )
43
+ data_sink: typing.Optional[DataSinkCreate] = pydantic.Field(
44
+ description="Data sink. When provided instead of data_sink_id, the data sink will be created."
45
+ )
38
46
  preset_retrieval_parameters: typing.Optional[PresetRetrievalParams] = pydantic.Field(
39
47
  description="Preset retrieval parameters for the pipeline."
40
48
  )
@@ -46,7 +54,9 @@ class PipelineCreate(pydantic.BaseModel):
46
54
  pipeline_type: typing.Optional[PipelineType] = pydantic.Field(
47
55
  description="Type of pipeline. Either PLAYGROUND or MANAGED."
48
56
  )
49
- managed_pipeline_id: typing.Optional[str]
57
+ managed_pipeline_id: typing.Optional[str] = pydantic.Field(
58
+ description="The ID of the ManagedPipeline this playground pipeline is linked to."
59
+ )
50
60
 
51
61
  def json(self, **kwargs: typing.Any) -> str:
52
62
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -5,7 +5,7 @@ import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
7
  from .configurable_data_source_names import ConfigurableDataSourceNames
8
- from .pipeline_data_source_component import PipelineDataSourceComponent
8
+ from .data_source_create_component import DataSourceCreateComponent
9
9
  from .pipeline_data_source_custom_metadata_value import PipelineDataSourceCustomMetadataValue
10
10
 
11
11
  try:
@@ -23,18 +23,24 @@ class PipelineDataSource(pydantic.BaseModel):
23
23
  """
24
24
 
25
25
  id: str = pydantic.Field(description="Unique identifier")
26
- created_at: typing.Optional[dt.datetime]
27
- updated_at: typing.Optional[dt.datetime]
26
+ created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
27
+ updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
28
28
  name: str = pydantic.Field(description="The name of the data source.")
29
29
  source_type: ConfigurableDataSourceNames
30
- custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[PipelineDataSourceCustomMetadataValue]]]
31
- component: PipelineDataSourceComponent
30
+ custom_metadata: typing.Optional[
31
+ typing.Dict[str, typing.Optional[PipelineDataSourceCustomMetadataValue]]
32
+ ] = pydantic.Field(description="Custom metadata that will be present on all data loaded from the data source")
33
+ component: DataSourceCreateComponent = pydantic.Field(description="Component that implements the data source")
32
34
  project_id: str
33
35
  data_source_id: str = pydantic.Field(description="The ID of the data source.")
34
36
  pipeline_id: str = pydantic.Field(description="The ID of the pipeline.")
35
37
  last_synced_at: dt.datetime = pydantic.Field(description="The last time the data source was automatically synced.")
36
- sync_interval: typing.Optional[float]
37
- sync_schedule_set_by: typing.Optional[str]
38
+ sync_interval: typing.Optional[float] = pydantic.Field(
39
+ description="The interval at which the data source should be synced."
40
+ )
41
+ sync_schedule_set_by: typing.Optional[str] = pydantic.Field(
42
+ description="The id of the user who set the sync schedule."
43
+ )
38
44
 
39
45
  def json(self, **kwargs: typing.Any) -> str:
40
46
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -20,7 +20,9 @@ class PipelineDataSourceCreate(pydantic.BaseModel):
20
20
  """
21
21
 
22
22
  data_source_id: str = pydantic.Field(description="The ID of the data source.")
23
- sync_interval: typing.Optional[float]
23
+ sync_interval: typing.Optional[float] = pydantic.Field(
24
+ description="The interval at which the data source should be synced."
25
+ )
24
26
 
25
27
  def json(self, **kwargs: typing.Any) -> str:
26
28
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -17,11 +17,11 @@ except ImportError:
17
17
 
18
18
  class PipelineDeployment(pydantic.BaseModel):
19
19
  id: str = pydantic.Field(description="Unique identifier")
20
- created_at: typing.Optional[dt.datetime]
21
- updated_at: typing.Optional[dt.datetime]
20
+ created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
21
+ updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
22
22
  status: ManagedIngestionStatus = pydantic.Field(description="Status of the pipeline deployment.")
23
- started_at: typing.Optional[dt.datetime]
24
- ended_at: typing.Optional[dt.datetime]
23
+ started_at: typing.Optional[dt.datetime] = pydantic.Field(description="Time the pipeline deployment started.")
24
+ ended_at: typing.Optional[dt.datetime] = pydantic.Field(description="Time the pipeline deployment finished.")
25
25
 
26
26
  def json(self, **kwargs: typing.Any) -> str:
27
27
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -6,6 +6,7 @@ import typing
6
6
  from ..core.datetime_utils import serialize_datetime
7
7
  from .pipeline_file_config_hash_value import PipelineFileConfigHashValue
8
8
  from .pipeline_file_custom_metadata_value import PipelineFileCustomMetadataValue
9
+ from .pipeline_file_permission_info_value import PipelineFilePermissionInfoValue
9
10
  from .pipeline_file_resource_info_value import PipelineFileResourceInfoValue
10
11
 
11
12
  try:
@@ -23,20 +24,33 @@ class PipelineFile(pydantic.BaseModel):
23
24
  """
24
25
 
25
26
  id: str = pydantic.Field(description="Unique identifier")
26
- created_at: typing.Optional[dt.datetime]
27
- updated_at: typing.Optional[dt.datetime]
27
+ created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
28
+ updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
28
29
  name: typing.Optional[str]
29
- file_size: typing.Optional[int]
30
- file_type: typing.Optional[str]
30
+ file_size: typing.Optional[int] = pydantic.Field(description="Size of the file in bytes")
31
+ file_type: typing.Optional[str] = pydantic.Field(description="File type (e.g. pdf, docx, etc.)")
31
32
  project_id: str = pydantic.Field(description="The ID of the project that the file belongs to")
32
- last_modified_at: typing.Optional[dt.datetime]
33
- resource_info: typing.Optional[typing.Dict[str, typing.Optional[PipelineFileResourceInfoValue]]]
34
- data_source_id: typing.Optional[str]
35
- file_id: typing.Optional[str]
33
+ last_modified_at: typing.Optional[dt.datetime] = pydantic.Field(description="The last modified time of the file")
34
+ resource_info: typing.Optional[typing.Dict[str, typing.Optional[PipelineFileResourceInfoValue]]] = pydantic.Field(
35
+ description="Resource information for the file"
36
+ )
37
+ permission_info: typing.Optional[
38
+ typing.Dict[str, typing.Optional[PipelineFilePermissionInfoValue]]
39
+ ] = pydantic.Field(description="Permission information for the file")
40
+ data_source_id: typing.Optional[str] = pydantic.Field(
41
+ description="The ID of the data source that the file belongs to"
42
+ )
43
+ file_id: typing.Optional[str] = pydantic.Field(description="The ID of the file")
36
44
  pipeline_id: str = pydantic.Field(description="The ID of the pipeline that the file is associated with")
37
- custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[PipelineFileCustomMetadataValue]]]
38
- config_hash: typing.Optional[typing.Dict[str, typing.Optional[PipelineFileConfigHashValue]]]
39
- indexed_page_count: typing.Optional[int]
45
+ custom_metadata: typing.Optional[
46
+ typing.Dict[str, typing.Optional[PipelineFileCustomMetadataValue]]
47
+ ] = pydantic.Field(description="Custom metadata for the file")
48
+ config_hash: typing.Optional[typing.Dict[str, typing.Optional[PipelineFileConfigHashValue]]] = pydantic.Field(
49
+ description="Hashes for the configuration of the pipeline."
50
+ )
51
+ indexed_page_count: typing.Optional[int] = pydantic.Field(
52
+ description="The number of pages that have been indexed for this file"
53
+ )
40
54
 
41
55
  def json(self, **kwargs: typing.Any) -> str:
42
56
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -21,7 +21,9 @@ class PipelineFileCreate(pydantic.BaseModel):
21
21
  """
22
22
 
23
23
  file_id: str = pydantic.Field(description="The ID of the file")
24
- custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[PipelineFileCreateCustomMetadataValue]]]
24
+ custom_metadata: typing.Optional[
25
+ typing.Dict[str, typing.Optional[PipelineFileCreateCustomMetadataValue]]
26
+ ] = pydantic.Field(description="Custom metadata for the file")
25
27
 
26
28
  def json(self, **kwargs: typing.Any) -> str:
27
29
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -0,0 +1,7 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ PipelineFilePermissionInfoValue = typing.Union[
6
+ typing.Dict[str, typing.Any], typing.List[typing.Any], str, int, float, bool
7
+ ]
@@ -23,8 +23,8 @@ class PlaygroundSession(pydantic.BaseModel):
23
23
  """
24
24
 
25
25
  id: str = pydantic.Field(description="Unique identifier")
26
- created_at: typing.Optional[dt.datetime]
27
- updated_at: typing.Optional[dt.datetime]
26
+ created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
27
+ updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
28
28
  pipeline_id: str
29
29
  user_id: str
30
30
  llm_params_id: str
@@ -21,13 +21,20 @@ class PresetRetrievalParams(pydantic.BaseModel):
21
21
  Schema for the search params for an retrieval execution that can be preset for a pipeline.
22
22
  """
23
23
 
24
- dense_similarity_top_k: typing.Optional[int]
25
- sparse_similarity_top_k: typing.Optional[int]
26
- enable_reranking: typing.Optional[bool]
27
- rerank_top_n: typing.Optional[int]
28
- alpha: typing.Optional[float]
29
- search_filters: typing.Optional[MetadataFilters]
30
- files_top_k: typing.Optional[int]
24
+ dense_similarity_top_k: typing.Optional[int] = pydantic.Field(description="Number of nodes for dense retrieval.")
25
+ dense_similarity_cutoff: typing.Optional[float] = pydantic.Field(
26
+ description="Minimum similarity score wrt query for retrieval"
27
+ )
28
+ sparse_similarity_top_k: typing.Optional[int] = pydantic.Field(description="Number of nodes for sparse retrieval.")
29
+ enable_reranking: typing.Optional[bool] = pydantic.Field(description="Enable reranking for retrieval")
30
+ rerank_top_n: typing.Optional[int] = pydantic.Field(description="Number of reranked nodes for returning.")
31
+ alpha: typing.Optional[float] = pydantic.Field(
32
+ description="Alpha value for hybrid retrieval to determine the weights between dense and sparse retrieval. 0 is sparse retrieval and 1 is dense retrieval."
33
+ )
34
+ search_filters: typing.Optional[MetadataFilters] = pydantic.Field(description="Search filters for retrieval.")
35
+ files_top_k: typing.Optional[int] = pydantic.Field(
36
+ description="Number of files to retrieve (only for retrieval mode files_via_metadata and files_via_content)."
37
+ )
31
38
  retrieval_mode: typing.Optional[RetrievalMode] = pydantic.Field(description="The retrieval mode for the query.")
32
39
  retrieve_image_nodes: typing.Optional[bool] = pydantic.Field(description="Whether to retrieve image nodes.")
33
40
  class_name: typing.Optional[str]
@@ -21,7 +21,9 @@ class PresignedUrl(pydantic.BaseModel):
21
21
 
22
22
  url: str = pydantic.Field(description="A presigned URL for IO operations against a private file")
23
23
  expires_at: dt.datetime = pydantic.Field(description="The time at which the presigned URL expires")
24
- form_fields: typing.Optional[typing.Dict[str, typing.Optional[str]]]
24
+ form_fields: typing.Optional[typing.Dict[str, typing.Optional[str]]] = pydantic.Field(
25
+ description="Form fields for a presigned POST request"
26
+ )
25
27
 
26
28
  def json(self, **kwargs: typing.Any) -> str:
27
29
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -21,8 +21,8 @@ class Project(pydantic.BaseModel):
21
21
 
22
22
  name: str
23
23
  id: str = pydantic.Field(description="Unique identifier")
24
- created_at: typing.Optional[dt.datetime]
25
- updated_at: typing.Optional[dt.datetime]
24
+ created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
25
+ updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
26
26
  ad_hoc_eval_dataset_id: typing.Optional[str]
27
27
  organization_id: str = pydantic.Field(description="The Organization ID the project is under.")
28
28
  is_default: typing.Optional[bool] = pydantic.Field(
@@ -21,7 +21,7 @@ class PromptMixinPrompts(pydantic.BaseModel):
21
21
  """
22
22
 
23
23
  project_id: str = pydantic.Field(description="The ID of the project.")
24
- id: typing.Optional[str]
24
+ id: typing.Optional[str] = pydantic.Field(description="The ID of the prompt set.")
25
25
  name: str = pydantic.Field(description="The name of the prompt set.")
26
26
  prompts: typing.List[PromptSpec] = pydantic.Field(description="The prompts.")
27
27
 
@@ -19,8 +19,10 @@ class PromptSpec(pydantic.BaseModel):
19
19
  prompt_key: str = pydantic.Field(description="The key of the prompt in the PromptMixin.")
20
20
  prompt_class: str = pydantic.Field(description="The class of the prompt (PromptTemplate or ChatPromptTemplate).")
21
21
  prompt_type: str = pydantic.Field(description="The type of prompt.")
22
- template: typing.Optional[str]
23
- message_templates: typing.Optional[typing.List[ChatMessage]]
22
+ template: typing.Optional[str] = pydantic.Field(description="The template of the prompt.")
23
+ message_templates: typing.Optional[typing.List[ChatMessage]] = pydantic.Field(
24
+ description="The chat message templates of the prompt."
25
+ )
24
26
 
25
27
  def json(self, **kwargs: typing.Any) -> str:
26
28
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
llama_cloud/types/role.py CHANGED
@@ -21,10 +21,10 @@ class Role(pydantic.BaseModel):
21
21
  """
22
22
 
23
23
  id: str = pydantic.Field(description="Unique identifier")
24
- created_at: typing.Optional[dt.datetime]
25
- updated_at: typing.Optional[dt.datetime]
24
+ created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
25
+ updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
26
26
  name: str = pydantic.Field(description="A name for the role.")
27
- organization_id: typing.Optional[str]
27
+ organization_id: typing.Optional[str] = pydantic.Field(description="The organization's ID.")
28
28
  permissions: typing.List[Permission] = pydantic.Field(description="The actual permissions of the role.")
29
29
 
30
30
  def json(self, **kwargs: typing.Any) -> str:
@@ -28,12 +28,14 @@ class SentenceSplitter(pydantic.BaseModel):
28
28
  )
29
29
  include_prev_next_rel: typing.Optional[bool] = pydantic.Field(description="Include prev/next node relationships.")
30
30
  callback_manager: typing.Optional[typing.Any]
31
- id_func: typing.Optional[str]
31
+ id_func: typing.Optional[str] = pydantic.Field(description="Function to generate node IDs.")
32
32
  chunk_size: typing.Optional[int] = pydantic.Field(description="The token chunk size for each chunk.")
33
33
  chunk_overlap: typing.Optional[int] = pydantic.Field(description="The token overlap of each chunk when splitting.")
34
34
  separator: typing.Optional[str] = pydantic.Field(description="Default separator for splitting into words")
35
35
  paragraph_separator: typing.Optional[str] = pydantic.Field(description="Separator between paragraphs.")
36
- secondary_chunking_regex: typing.Optional[str]
36
+ secondary_chunking_regex: typing.Optional[str] = pydantic.Field(
37
+ description="Backup regex for splitting into sentences."
38
+ )
37
39
  class_name: typing.Optional[str]
38
40
 
39
41
  def json(self, **kwargs: typing.Any) -> str:
@@ -17,7 +17,7 @@ except ImportError:
17
17
 
18
18
  class TextNode(pydantic.BaseModel):
19
19
  id: typing.Optional[str] = pydantic.Field(alias="id_", description="Unique ID of the node.")
20
- embedding: typing.Optional[typing.List[float]]
20
+ embedding: typing.Optional[typing.List[float]] = pydantic.Field(description="Embedding of the node.")
21
21
  extra_info: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(
22
22
  description="A flat dictionary of metadata fields"
23
23
  )
@@ -32,8 +32,8 @@ class TextNode(pydantic.BaseModel):
32
32
  )
33
33
  text: typing.Optional[str] = pydantic.Field(description="Text content of the node.")
34
34
  mimetype: typing.Optional[str] = pydantic.Field(description="MIME type of the node content.")
35
- start_char_idx: typing.Optional[int]
36
- end_char_idx: typing.Optional[int]
35
+ start_char_idx: typing.Optional[int] = pydantic.Field(description="Start char index of the node.")
36
+ end_char_idx: typing.Optional[int] = pydantic.Field(description="End char index of the node.")
37
37
  text_template: typing.Optional[str] = pydantic.Field(
38
38
  description="Template for how text is formatted, with {content} and {metadata_str} placeholders."
39
39
  )
@@ -2,4 +2,4 @@
2
2
 
3
3
  import typing
4
4
 
5
- HuggingFaceInferenceApiEmbeddingToken = typing.Union[str, bool]
5
+ Token = typing.Union[str, bool]
@@ -24,7 +24,7 @@ class TokenTextSplitter(pydantic.BaseModel):
24
24
  )
25
25
  include_prev_next_rel: typing.Optional[bool] = pydantic.Field(description="Include prev/next node relationships.")
26
26
  callback_manager: typing.Optional[typing.Any]
27
- id_func: typing.Optional[str]
27
+ id_func: typing.Optional[str] = pydantic.Field(description="Function to generate node IDs.")
28
28
  chunk_size: typing.Optional[int] = pydantic.Field(description="The token chunk size for each chunk.")
29
29
  chunk_overlap: typing.Optional[int] = pydantic.Field(description="The token overlap of each chunk when splitting.")
30
30
  separator: typing.Optional[str] = pydantic.Field(description="Default separator for splitting into words")
@@ -21,16 +21,20 @@ class UserOrganization(pydantic.BaseModel):
21
21
  """
22
22
 
23
23
  id: str = pydantic.Field(description="Unique identifier")
24
- created_at: typing.Optional[dt.datetime]
25
- updated_at: typing.Optional[dt.datetime]
24
+ created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
25
+ updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
26
26
  email: str = pydantic.Field(description="The user's email address.")
27
- user_id: typing.Optional[str]
27
+ user_id: typing.Optional[str] = pydantic.Field(description="The user's ID.")
28
28
  organization_id: str = pydantic.Field(description="The organization's ID.")
29
29
  pending: typing.Optional[bool] = pydantic.Field(
30
30
  description="Whether the user's membership is pending account signup."
31
31
  )
32
- invited_by_user_id: typing.Optional[str]
33
- invited_by_user_email: typing.Optional[str]
32
+ invited_by_user_id: typing.Optional[str] = pydantic.Field(
33
+ description="The user ID of the user who added the user to the organization."
34
+ )
35
+ invited_by_user_email: typing.Optional[str] = pydantic.Field(
36
+ description="The email address of the user who added the user to the organization."
37
+ )
34
38
  roles: typing.List[UserOrganizationRole] = pydantic.Field(description="The roles of the user in the organization.")
35
39
 
36
40
  def json(self, **kwargs: typing.Any) -> str:
@@ -19,10 +19,10 @@ class UserOrganizationCreate(pydantic.BaseModel):
19
19
  Schema for creating a user's membership to an organization.
20
20
  """
21
21
 
22
- user_id: typing.Optional[str]
23
- email: typing.Optional[str]
24
- project_ids: typing.Optional[typing.List[str]]
25
- role_id: typing.Optional[str]
22
+ user_id: typing.Optional[str] = pydantic.Field(description="The user's ID.")
23
+ email: typing.Optional[str] = pydantic.Field(description="The user's email address.")
24
+ project_ids: typing.Optional[typing.List[str]] = pydantic.Field(description="The project IDs to add the user to.")
25
+ role_id: typing.Optional[str] = pydantic.Field(description="The role ID to assign to the user.")
26
26
 
27
27
  def json(self, **kwargs: typing.Any) -> str:
28
28
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -19,8 +19,8 @@ class UserOrganizationDelete(pydantic.BaseModel):
19
19
  Schema for deleting a user's membership to an organization.
20
20
  """
21
21
 
22
- user_id: typing.Optional[str]
23
- email: typing.Optional[str]
22
+ user_id: typing.Optional[str] = pydantic.Field(description="The user's ID.")
23
+ email: typing.Optional[str] = pydantic.Field(description="The user's email address.")
24
24
 
25
25
  def json(self, **kwargs: typing.Any) -> str:
26
26
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -21,8 +21,8 @@ class UserOrganizationRole(pydantic.BaseModel):
21
21
  """
22
22
 
23
23
  id: str = pydantic.Field(description="Unique identifier")
24
- created_at: typing.Optional[dt.datetime]
25
- updated_at: typing.Optional[dt.datetime]
24
+ created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
25
+ updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
26
26
  user_id: str = pydantic.Field(description="The user's ID.")
27
27
  organization_id: str = pydantic.Field(description="The organization's ID.")
28
28
  role_id: str = pydantic.Field(description="The role's ID.")
@@ -0,0 +1,5 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ Value = typing.Union[int, float, str, typing.List[str], typing.List[float], typing.List[int]]
@@ -18,17 +18,21 @@ except ImportError:
18
18
  class VertexTextEmbedding(pydantic.BaseModel):
19
19
  model_name: typing.Optional[str] = pydantic.Field(description="The modelId of the VertexAI model to use.")
20
20
  embed_batch_size: typing.Optional[int] = pydantic.Field(description="The batch size for embedding calls.")
21
- num_workers: typing.Optional[int]
21
+ num_workers: typing.Optional[int] = pydantic.Field(
22
+ description="The number of workers to use for async embedding calls."
23
+ )
22
24
  location: str = pydantic.Field(description="The default location to use when making API calls.")
23
25
  project: str = pydantic.Field(description="The default GCP project to use when making Vertex API calls.")
24
26
  embed_mode: typing.Optional[VertexEmbeddingMode] = pydantic.Field(description="The embedding mode to use.")
25
27
  additional_kwargs: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(
26
28
  description="Additional kwargs for the Vertex."
27
29
  )
28
- client_email: typing.Optional[str]
29
- token_uri: typing.Optional[str]
30
- private_key_id: typing.Optional[str]
31
- private_key: typing.Optional[str]
30
+ client_email: typing.Optional[str] = pydantic.Field(description="The client email for the VertexAI credentials.")
31
+ token_uri: typing.Optional[str] = pydantic.Field(description="The token URI for the VertexAI credentials.")
32
+ private_key_id: typing.Optional[str] = pydantic.Field(
33
+ description="The private key ID for the VertexAI credentials."
34
+ )
35
+ private_key: typing.Optional[str] = pydantic.Field(description="The private key for the VertexAI credentials.")
32
36
  class_name: typing.Optional[str]
33
37
 
34
38
  def json(self, **kwargs: typing.Any) -> str:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: llama-cloud
3
- Version: 0.1.5
3
+ Version: 0.1.6
4
4
  Summary:
5
5
  License: MIT
6
6
  Author: Logan Markewich
@@ -12,6 +12,7 @@ Classifier: Programming Language :: Python :: 3.8
12
12
  Classifier: Programming Language :: Python :: 3.9
13
13
  Classifier: Programming Language :: Python :: 3.10
14
14
  Classifier: Programming Language :: Python :: 3.11
15
+ Classifier: Programming Language :: Python :: 3.12
15
16
  Requires-Dist: httpx (>=0.20.0)
16
17
  Requires-Dist: pydantic (>=1.10)
17
18
  Description-Content-Type: text/markdown