llama-cloud 0.1.5__py3-none-any.whl → 0.1.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (105) hide show
  1. llama_cloud/__init__.py +12 -10
  2. llama_cloud/environment.py +1 -1
  3. llama_cloud/resources/__init__.py +2 -1
  4. llama_cloud/resources/data_sinks/client.py +14 -14
  5. llama_cloud/resources/data_sources/client.py +16 -16
  6. llama_cloud/resources/embedding_model_configs/client.py +80 -24
  7. llama_cloud/resources/evals/client.py +36 -26
  8. llama_cloud/resources/extraction/client.py +32 -32
  9. llama_cloud/resources/files/__init__.py +2 -2
  10. llama_cloud/resources/files/client.py +53 -28
  11. llama_cloud/resources/files/types/__init__.py +2 -1
  12. llama_cloud/resources/files/types/file_create_permission_info_value.py +7 -0
  13. llama_cloud/resources/organizations/client.py +60 -56
  14. llama_cloud/resources/parsing/client.py +555 -324
  15. llama_cloud/resources/pipelines/client.py +446 -302
  16. llama_cloud/resources/projects/client.py +270 -136
  17. llama_cloud/types/__init__.py +10 -10
  18. llama_cloud/types/azure_open_ai_embedding.py +12 -6
  19. llama_cloud/types/base_prompt_template.py +6 -2
  20. llama_cloud/types/bedrock_embedding.py +12 -6
  21. llama_cloud/types/character_splitter.py +4 -2
  22. llama_cloud/types/chat_message.py +1 -1
  23. llama_cloud/types/cloud_az_storage_blob_data_source.py +16 -7
  24. llama_cloud/types/cloud_box_data_source.py +13 -6
  25. llama_cloud/types/cloud_confluence_data_source.py +7 -6
  26. llama_cloud/types/cloud_document.py +3 -1
  27. llama_cloud/types/cloud_document_create.py +3 -1
  28. llama_cloud/types/cloud_google_drive_data_source.py +1 -0
  29. llama_cloud/types/cloud_jira_data_source.py +7 -4
  30. llama_cloud/types/cloud_notion_page_data_source.py +3 -2
  31. llama_cloud/types/cloud_one_drive_data_source.py +6 -3
  32. llama_cloud/types/cloud_s_3_data_source.py +9 -4
  33. llama_cloud/types/cloud_sharepoint_data_source.py +9 -6
  34. llama_cloud/types/cloud_slack_data_source.py +7 -6
  35. llama_cloud/types/code_splitter.py +1 -1
  36. llama_cloud/types/cohere_embedding.py +7 -3
  37. llama_cloud/types/data_sink.py +4 -4
  38. llama_cloud/types/data_sink_create.py +1 -1
  39. llama_cloud/types/data_source.py +7 -5
  40. llama_cloud/types/data_source_create.py +4 -2
  41. llama_cloud/types/embedding_model_config.py +2 -2
  42. llama_cloud/types/embedding_model_config_update.py +4 -2
  43. llama_cloud/types/eval_dataset.py +2 -2
  44. llama_cloud/types/eval_dataset_job_record.py +13 -7
  45. llama_cloud/types/eval_execution_params_override.py +6 -2
  46. llama_cloud/types/eval_question.py +2 -2
  47. llama_cloud/types/extraction_result.py +2 -2
  48. llama_cloud/types/extraction_schema.py +5 -3
  49. llama_cloud/types/file.py +15 -7
  50. llama_cloud/types/file_permission_info_value.py +5 -0
  51. llama_cloud/types/filter_operator.py +2 -2
  52. llama_cloud/types/gemini_embedding.py +10 -6
  53. llama_cloud/types/hugging_face_inference_api_embedding.py +27 -11
  54. llama_cloud/types/input_message.py +3 -1
  55. llama_cloud/types/job_name_mapping.py +4 -0
  56. llama_cloud/types/llama_parse_parameters.py +11 -0
  57. llama_cloud/types/llm.py +4 -2
  58. llama_cloud/types/llm_parameters.py +5 -2
  59. llama_cloud/types/local_eval.py +10 -8
  60. llama_cloud/types/local_eval_results.py +1 -1
  61. llama_cloud/types/managed_ingestion_status_response.py +5 -3
  62. llama_cloud/types/markdown_element_node_parser.py +5 -3
  63. llama_cloud/types/markdown_node_parser.py +1 -1
  64. llama_cloud/types/metadata_filter.py +2 -2
  65. llama_cloud/types/metric_result.py +3 -3
  66. llama_cloud/types/node_parser.py +1 -1
  67. llama_cloud/types/open_ai_embedding.py +12 -6
  68. llama_cloud/types/organization.py +2 -2
  69. llama_cloud/types/page_splitter_node_parser.py +2 -2
  70. llama_cloud/types/parsing_job_structured_result.py +32 -0
  71. llama_cloud/types/permission.py +3 -3
  72. llama_cloud/types/pipeline.py +17 -7
  73. llama_cloud/types/pipeline_configuration_hashes.py +3 -3
  74. llama_cloud/types/pipeline_create.py +15 -5
  75. llama_cloud/types/pipeline_data_source.py +13 -7
  76. llama_cloud/types/pipeline_data_source_create.py +3 -1
  77. llama_cloud/types/pipeline_deployment.py +4 -4
  78. llama_cloud/types/pipeline_file.py +25 -11
  79. llama_cloud/types/pipeline_file_create.py +3 -1
  80. llama_cloud/types/pipeline_file_permission_info_value.py +7 -0
  81. llama_cloud/types/playground_session.py +2 -2
  82. llama_cloud/types/preset_retrieval_params.py +14 -7
  83. llama_cloud/types/presigned_url.py +3 -1
  84. llama_cloud/types/project.py +2 -2
  85. llama_cloud/types/prompt_mixin_prompts.py +1 -1
  86. llama_cloud/types/prompt_spec.py +4 -2
  87. llama_cloud/types/role.py +3 -3
  88. llama_cloud/types/sentence_splitter.py +4 -2
  89. llama_cloud/types/text_node.py +3 -3
  90. llama_cloud/types/{hugging_face_inference_api_embedding_token.py → token.py} +1 -1
  91. llama_cloud/types/token_text_splitter.py +1 -1
  92. llama_cloud/types/user_organization.py +9 -5
  93. llama_cloud/types/user_organization_create.py +4 -4
  94. llama_cloud/types/user_organization_delete.py +2 -2
  95. llama_cloud/types/user_organization_role.py +2 -2
  96. llama_cloud/types/value.py +5 -0
  97. llama_cloud/types/vertex_text_embedding.py +9 -5
  98. {llama_cloud-0.1.5.dist-info → llama_cloud-0.1.6.dist-info}/METADATA +2 -1
  99. {llama_cloud-0.1.5.dist-info → llama_cloud-0.1.6.dist-info}/RECORD +101 -100
  100. {llama_cloud-0.1.5.dist-info → llama_cloud-0.1.6.dist-info}/WHEEL +1 -1
  101. llama_cloud/types/data_sink_component.py +0 -20
  102. llama_cloud/types/data_source_component.py +0 -28
  103. llama_cloud/types/metadata_filter_value.py +0 -5
  104. llama_cloud/types/pipeline_data_source_component.py +0 -28
  105. {llama_cloud-0.1.5.dist-info → llama_cloud-0.1.6.dist-info}/LICENSE +0 -0
@@ -21,8 +21,8 @@ class EvalDataset(pydantic.BaseModel):
21
21
  """
22
22
 
23
23
  id: str = pydantic.Field(description="Unique identifier")
24
- created_at: typing.Optional[dt.datetime]
25
- updated_at: typing.Optional[dt.datetime]
24
+ created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
25
+ updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
26
26
  name: str = pydantic.Field(description="The name of the EvalDataset.")
27
27
  project_id: str
28
28
 
@@ -28,21 +28,27 @@ class EvalDatasetJobRecord(pydantic.BaseModel):
28
28
  partitions: typing.Dict[str, str] = pydantic.Field(
29
29
  description="The partitions for this execution. Used for determining where to save job output."
30
30
  )
31
- parameters: typing.Optional[EvalDatasetJobParams]
32
- session_id: typing.Optional[str]
33
- correlation_id: typing.Optional[str]
34
- parent_job_execution_id: typing.Optional[str]
35
- user_id: typing.Optional[str]
31
+ parameters: typing.Optional[EvalDatasetJobParams] = pydantic.Field(
32
+ description="Additional input parameters for the eval execution."
33
+ )
34
+ session_id: typing.Optional[str] = pydantic.Field(
35
+ description="The upstream request ID that created this job. Used for tracking the job across services."
36
+ )
37
+ correlation_id: typing.Optional[str] = pydantic.Field(
38
+ description="The correlation ID for this job. Used for tracking the job across services."
39
+ )
40
+ parent_job_execution_id: typing.Optional[str] = pydantic.Field(description="The ID of the parent job execution.")
41
+ user_id: typing.Optional[str] = pydantic.Field(description="The ID of the user that created this job")
36
42
  created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
37
43
  id: typing.Optional[str] = pydantic.Field(description="Unique identifier")
38
44
  status: StatusEnum
39
45
  error_code: typing.Optional[str]
40
46
  error_message: typing.Optional[str]
41
- attempts: typing.Optional[int]
47
+ attempts: typing.Optional[int] = pydantic.Field(description="The number of times this job has been attempted")
42
48
  started_at: typing.Optional[dt.datetime]
43
49
  ended_at: typing.Optional[dt.datetime]
44
50
  updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
45
- data: typing.Optional[Base]
51
+ data: typing.Optional[Base] = pydantic.Field(description="Additional metadata for the job execution.")
46
52
 
47
53
  def json(self, **kwargs: typing.Any) -> str:
48
54
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -20,8 +20,12 @@ class EvalExecutionParamsOverride(pydantic.BaseModel):
20
20
  Schema for the params override for an eval execution.
21
21
  """
22
22
 
23
- llm_model: typing.Optional[SupportedLlmModelNames]
24
- qa_prompt_tmpl: typing.Optional[str]
23
+ llm_model: typing.Optional[SupportedLlmModelNames] = pydantic.Field(
24
+ description="The LLM model to use within eval execution."
25
+ )
26
+ qa_prompt_tmpl: typing.Optional[str] = pydantic.Field(
27
+ description="The template to use for the question answering prompt."
28
+ )
25
29
 
26
30
  def json(self, **kwargs: typing.Any) -> str:
27
31
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -16,8 +16,8 @@ except ImportError:
16
16
 
17
17
  class EvalQuestion(pydantic.BaseModel):
18
18
  id: str = pydantic.Field(description="Unique identifier")
19
- created_at: typing.Optional[dt.datetime]
20
- updated_at: typing.Optional[dt.datetime]
19
+ created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
20
+ updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
21
21
  content: str = pydantic.Field(description="The content of the question.")
22
22
  eval_dataset_id: str
23
23
  eval_dataset_index: int = pydantic.Field(
@@ -22,8 +22,8 @@ class ExtractionResult(pydantic.BaseModel):
22
22
  """
23
23
 
24
24
  id: str = pydantic.Field(description="Unique identifier")
25
- created_at: typing.Optional[dt.datetime]
26
- updated_at: typing.Optional[dt.datetime]
25
+ created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
26
+ updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
27
27
  schema_id: str = pydantic.Field(description="The id of the schema")
28
28
  data: typing.Dict[str, typing.Optional[ExtractionResultDataValue]] = pydantic.Field(
29
29
  description="The data extracted from the file"
@@ -21,11 +21,13 @@ class ExtractionSchema(pydantic.BaseModel):
21
21
  """
22
22
 
23
23
  id: str = pydantic.Field(description="Unique identifier")
24
- created_at: typing.Optional[dt.datetime]
25
- updated_at: typing.Optional[dt.datetime]
24
+ created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
25
+ updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
26
26
  name: str = pydantic.Field(description="The name of the extraction schema")
27
27
  project_id: str = pydantic.Field(description="The ID of the project that the extraction schema belongs to")
28
- data_schema: typing.Optional[typing.Dict[str, typing.Optional[ExtractionSchemaDataSchemaValue]]]
28
+ data_schema: typing.Optional[typing.Dict[str, typing.Optional[ExtractionSchemaDataSchemaValue]]] = pydantic.Field(
29
+ description="The schema of the data"
30
+ )
29
31
 
30
32
  def json(self, **kwargs: typing.Any) -> str:
31
33
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
llama_cloud/types/file.py CHANGED
@@ -4,6 +4,7 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
+ from .file_permission_info_value import FilePermissionInfoValue
7
8
  from .file_resource_info_value import FileResourceInfoValue
8
9
 
9
10
  try:
@@ -21,15 +22,22 @@ class File(pydantic.BaseModel):
21
22
  """
22
23
 
23
24
  id: str = pydantic.Field(description="Unique identifier")
24
- created_at: typing.Optional[dt.datetime]
25
- updated_at: typing.Optional[dt.datetime]
25
+ created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
26
+ updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
26
27
  name: str
27
- file_size: typing.Optional[int]
28
- file_type: typing.Optional[str]
28
+ file_size: typing.Optional[int] = pydantic.Field(description="Size of the file in bytes")
29
+ file_type: typing.Optional[str] = pydantic.Field(description="File type (e.g. pdf, docx, etc.)")
29
30
  project_id: str = pydantic.Field(description="The ID of the project that the file belongs to")
30
- last_modified_at: typing.Optional[dt.datetime]
31
- resource_info: typing.Optional[typing.Dict[str, typing.Optional[FileResourceInfoValue]]]
32
- data_source_id: typing.Optional[str]
31
+ last_modified_at: typing.Optional[dt.datetime] = pydantic.Field(description="The last modified time of the file")
32
+ resource_info: typing.Optional[typing.Dict[str, typing.Optional[FileResourceInfoValue]]] = pydantic.Field(
33
+ description="Resource information for the file"
34
+ )
35
+ permission_info: typing.Optional[typing.Dict[str, typing.Optional[FilePermissionInfoValue]]] = pydantic.Field(
36
+ description="Permission information for the file"
37
+ )
38
+ data_source_id: typing.Optional[str] = pydantic.Field(
39
+ description="The ID of the data source that the file belongs to"
40
+ )
33
41
 
34
42
  def json(self, **kwargs: typing.Any) -> str:
35
43
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -0,0 +1,5 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ FilePermissionInfoValue = typing.Union[typing.Dict[str, typing.Any], typing.List[typing.Any], str, int, float, bool]
@@ -36,7 +36,7 @@ class FilterOperator(str, enum.Enum):
36
36
  in_: typing.Callable[[], T_Result],
37
37
  nin: typing.Callable[[], T_Result],
38
38
  any: typing.Callable[[], T_Result],
39
- all: typing.Callable[[], T_Result],
39
+ all_: typing.Callable[[], T_Result],
40
40
  text_match: typing.Callable[[], T_Result],
41
41
  contains: typing.Callable[[], T_Result],
42
42
  is_empty: typing.Callable[[], T_Result],
@@ -60,7 +60,7 @@ class FilterOperator(str, enum.Enum):
60
60
  if self is FilterOperator.ANY:
61
61
  return any()
62
62
  if self is FilterOperator.ALL:
63
- return all()
63
+ return all_()
64
64
  if self is FilterOperator.TEXT_MATCH:
65
65
  return text_match()
66
66
  if self is FilterOperator.CONTAINS:
@@ -17,12 +17,16 @@ except ImportError:
17
17
  class GeminiEmbedding(pydantic.BaseModel):
18
18
  model_name: typing.Optional[str] = pydantic.Field(description="The modelId of the Gemini model to use.")
19
19
  embed_batch_size: typing.Optional[int] = pydantic.Field(description="The batch size for embedding calls.")
20
- num_workers: typing.Optional[int]
21
- title: typing.Optional[str]
22
- task_type: typing.Optional[str]
23
- api_key: typing.Optional[str]
24
- api_base: typing.Optional[str]
25
- transport: typing.Optional[str]
20
+ num_workers: typing.Optional[int] = pydantic.Field(
21
+ description="The number of workers to use for async embedding calls."
22
+ )
23
+ title: typing.Optional[str] = pydantic.Field(
24
+ description="Title is only applicable for retrieval_document tasks, and is used to represent a document title. For other tasks, title is invalid."
25
+ )
26
+ task_type: typing.Optional[str] = pydantic.Field(description="The task for embedding model.")
27
+ api_key: typing.Optional[str] = pydantic.Field(description="API key to access the model. Defaults to None.")
28
+ api_base: typing.Optional[str] = pydantic.Field(description="API base to access the model. Defaults to None.")
29
+ transport: typing.Optional[str] = pydantic.Field(description="Transport to access the model. Defaults to None.")
26
30
  class_name: typing.Optional[str]
27
31
 
28
32
  def json(self, **kwargs: typing.Any) -> str:
@@ -4,8 +4,8 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
- from .hugging_face_inference_api_embedding_token import HuggingFaceInferenceApiEmbeddingToken
8
7
  from .pooling import Pooling
8
+ from .token import Token
9
9
 
10
10
  try:
11
11
  import pydantic
@@ -17,19 +17,35 @@ except ImportError:
17
17
 
18
18
 
19
19
  class HuggingFaceInferenceApiEmbedding(pydantic.BaseModel):
20
- model_name: typing.Optional[str]
20
+ model_name: typing.Optional[str] = pydantic.Field(
21
+ description="Hugging Face model name. If None, the task will be used."
22
+ )
21
23
  embed_batch_size: typing.Optional[int] = pydantic.Field(description="The batch size for embedding calls.")
22
- num_workers: typing.Optional[int]
23
- pooling: typing.Optional[Pooling]
24
- query_instruction: typing.Optional[str]
25
- text_instruction: typing.Optional[str]
26
- token: typing.Optional[HuggingFaceInferenceApiEmbeddingToken] = pydantic.Field(
24
+ num_workers: typing.Optional[int] = pydantic.Field(
25
+ description="The number of workers to use for async embedding calls."
26
+ )
27
+ pooling: typing.Optional[Pooling] = pydantic.Field(
28
+ description="Pooling strategy. If None, the model's default pooling is used."
29
+ )
30
+ query_instruction: typing.Optional[str] = pydantic.Field(
31
+ description="Instruction to prepend during query embedding."
32
+ )
33
+ text_instruction: typing.Optional[str] = pydantic.Field(description="Instruction to prepend during text embedding.")
34
+ token: typing.Optional[Token] = pydantic.Field(
27
35
  description="Hugging Face token. Will default to the locally saved token. Pass token=False if you don’t want to send your token to the server."
28
36
  )
29
- timeout: typing.Optional[float]
30
- headers: typing.Optional[typing.Dict[str, typing.Optional[str]]]
31
- cookies: typing.Optional[typing.Dict[str, typing.Optional[str]]]
32
- task: typing.Optional[str]
37
+ timeout: typing.Optional[float] = pydantic.Field(
38
+ description="The maximum number of seconds to wait for a response from the server. Loading a new model in Inference API can take up to several minutes. Defaults to None, meaning it will loop until the server is available."
39
+ )
40
+ headers: typing.Optional[typing.Dict[str, typing.Optional[str]]] = pydantic.Field(
41
+ description="Additional headers to send to the server. By default only the authorization and user-agent headers are sent. Values in this dictionary will override the default values."
42
+ )
43
+ cookies: typing.Optional[typing.Dict[str, typing.Optional[str]]] = pydantic.Field(
44
+ description="Additional cookies to send to the server."
45
+ )
46
+ task: typing.Optional[str] = pydantic.Field(
47
+ description="Optional task to pick Hugging Face's recommended model, used when model_name is left as default of None."
48
+ )
33
49
  class_name: typing.Optional[str]
34
50
 
35
51
  def json(self, **kwargs: typing.Any) -> str:
@@ -23,7 +23,9 @@ class InputMessage(pydantic.BaseModel):
23
23
  id: str = pydantic.Field(description="ID of the message, if any. a UUID.")
24
24
  role: MessageRole
25
25
  content: str
26
- data: typing.Optional[typing.Dict[str, typing.Any]]
26
+ data: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(
27
+ description="Additional data to be stored with the message."
28
+ )
27
29
  class_name: typing.Optional[str]
28
30
 
29
31
  def json(self, **kwargs: typing.Any) -> str:
@@ -18,6 +18,7 @@ class JobNameMapping(str, enum.Enum):
18
18
  PARSE = "PARSE"
19
19
  TRANSFORM = "TRANSFORM"
20
20
  INGESTION = "INGESTION"
21
+ METADATA_UPDATE = "METADATA_UPDATE"
21
22
 
22
23
  def visit(
23
24
  self,
@@ -28,6 +29,7 @@ class JobNameMapping(str, enum.Enum):
28
29
  parse: typing.Callable[[], T_Result],
29
30
  transform: typing.Callable[[], T_Result],
30
31
  ingestion: typing.Callable[[], T_Result],
32
+ metadata_update: typing.Callable[[], T_Result],
31
33
  ) -> T_Result:
32
34
  if self is JobNameMapping.MANAGED_INGESTION:
33
35
  return managed_ingestion()
@@ -43,3 +45,5 @@ class JobNameMapping(str, enum.Enum):
43
45
  return transform()
44
46
  if self is JobNameMapping.INGESTION:
45
47
  return ingestion()
48
+ if self is JobNameMapping.METADATA_UPDATE:
49
+ return metadata_update()
@@ -27,15 +27,22 @@ class LlamaParseParameters(pydantic.BaseModel):
27
27
  disable_reconstruction: typing.Optional[bool]
28
28
  disable_image_extraction: typing.Optional[bool]
29
29
  invalidate_cache: typing.Optional[bool]
30
+ output_pdf_of_document: typing.Optional[bool]
30
31
  do_not_cache: typing.Optional[bool]
31
32
  fast_mode: typing.Optional[bool]
32
33
  skip_diagonal_text: typing.Optional[bool]
33
34
  gpt_4_o_mode: typing.Optional[bool] = pydantic.Field(alias="gpt4o_mode")
34
35
  gpt_4_o_api_key: typing.Optional[str] = pydantic.Field(alias="gpt4o_api_key")
35
36
  do_not_unroll_columns: typing.Optional[bool]
37
+ html_make_all_elements_visible: typing.Optional[bool]
38
+ html_remove_fixed_elements: typing.Optional[bool]
36
39
  guess_xlsx_sheet_name: typing.Optional[bool]
37
40
  page_separator: typing.Optional[str]
38
41
  bounding_box: typing.Optional[str]
42
+ bbox_top: typing.Optional[float]
43
+ bbox_right: typing.Optional[float]
44
+ bbox_bottom: typing.Optional[float]
45
+ bbox_left: typing.Optional[float]
39
46
  target_pages: typing.Optional[str]
40
47
  use_vendor_multimodal_model: typing.Optional[bool]
41
48
  vendor_multimodal_model_name: typing.Optional[str]
@@ -61,6 +68,10 @@ class LlamaParseParameters(pydantic.BaseModel):
61
68
  auto_mode_trigger_on_text_in_page: typing.Optional[str]
62
69
  auto_mode_trigger_on_table_in_page: typing.Optional[bool]
63
70
  auto_mode_trigger_on_image_in_page: typing.Optional[bool]
71
+ structured_output: typing.Optional[bool]
72
+ structured_output_json_schema: typing.Optional[str]
73
+ structured_output_json_schema_name: typing.Optional[str]
74
+ max_pages: typing.Optional[int]
64
75
 
65
76
  def json(self, **kwargs: typing.Any) -> str:
66
77
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
llama_cloud/types/llm.py CHANGED
@@ -34,7 +34,7 @@ class Llm(pydantic.BaseModel):
34
34
  """
35
35
 
36
36
  callback_manager: typing.Optional[typing.Any]
37
- system_prompt: typing.Optional[str]
37
+ system_prompt: typing.Optional[str] = pydantic.Field(description="System prompt for LLM calls.")
38
38
  messages_to_prompt: typing.Optional[str] = pydantic.Field(
39
39
  description="Function to convert a list of messages to an LLM prompt."
40
40
  )
@@ -43,7 +43,9 @@ class Llm(pydantic.BaseModel):
43
43
  )
44
44
  output_parser: typing.Optional[typing.Any]
45
45
  pydantic_program_mode: typing.Optional[PydanticProgramMode]
46
- query_wrapper_prompt: typing.Optional[BasePromptTemplate]
46
+ query_wrapper_prompt: typing.Optional[BasePromptTemplate] = pydantic.Field(
47
+ description="Query wrapper prompt for LLM calls."
48
+ )
47
49
 
48
50
  def json(self, **kwargs: typing.Any) -> str:
49
51
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -19,8 +19,11 @@ class LlmParameters(pydantic.BaseModel):
19
19
  model_name: typing.Optional[SupportedLlmModelNames] = pydantic.Field(
20
20
  description="The name of the model to use for LLM completions."
21
21
  )
22
- system_prompt: typing.Optional[str]
23
- temperature: typing.Optional[float]
22
+ system_prompt: typing.Optional[str] = pydantic.Field(description="The system prompt to use for the completion.")
23
+ temperature: typing.Optional[float] = pydantic.Field(description="The temperature value for the model.")
24
+ use_chain_of_thought_reasoning: typing.Optional[bool] = pydantic.Field(
25
+ description="Whether to use chain of thought reasoning."
26
+ )
24
27
  class_name: typing.Optional[str]
25
28
 
26
29
  def json(self, **kwargs: typing.Any) -> str:
@@ -21,17 +21,19 @@ class LocalEval(pydantic.BaseModel):
21
21
  Output of an BaseEvaluator.
22
22
  """
23
23
 
24
- query: typing.Optional[str]
25
- contexts: typing.Optional[typing.List[str]]
26
- response: typing.Optional[str]
27
- passing: typing.Optional[bool]
28
- feedback: typing.Optional[str]
29
- score: typing.Optional[float]
30
- pairwise_source: typing.Optional[str]
24
+ query: typing.Optional[str] = pydantic.Field(description="Query string")
25
+ contexts: typing.Optional[typing.List[str]] = pydantic.Field(description="Context strings")
26
+ response: typing.Optional[str] = pydantic.Field(description="Response string")
27
+ passing: typing.Optional[bool] = pydantic.Field(description="Binary evaluation result (passing or not)")
28
+ feedback: typing.Optional[str] = pydantic.Field(description="Feedback or reasoning for the response")
29
+ score: typing.Optional[float] = pydantic.Field(description="Score for the response")
30
+ pairwise_source: typing.Optional[str] = pydantic.Field(
31
+ description="Used only for pairwise and specifies whether it is from original order of presented answers or flipped order"
32
+ )
31
33
  invalid_result: typing.Optional[bool] = pydantic.Field(
32
34
  description="Whether the evaluation result is an invalid one."
33
35
  )
34
- invalid_reason: typing.Optional[str]
36
+ invalid_reason: typing.Optional[str] = pydantic.Field(description="Reason for invalid evaluation.")
35
37
 
36
38
  def json(self, **kwargs: typing.Any) -> str:
37
39
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -21,7 +21,7 @@ class LocalEvalResults(pydantic.BaseModel):
21
21
  """
22
22
 
23
23
  project_id: str = pydantic.Field(description="The ID of the project.")
24
- eval_set_id: typing.Optional[str]
24
+ eval_set_id: typing.Optional[str] = pydantic.Field(description="The ID of the local eval result set.")
25
25
  app_name: str = pydantic.Field(description="The name of the app.")
26
26
  eval_name: str = pydantic.Field(description="The name of the eval.")
27
27
  result: LocalEval = pydantic.Field(description="The eval results.")
@@ -17,10 +17,12 @@ except ImportError:
17
17
 
18
18
 
19
19
  class ManagedIngestionStatusResponse(pydantic.BaseModel):
20
- job_id: typing.Optional[str]
21
- deployment_date: typing.Optional[dt.datetime]
20
+ job_id: typing.Optional[str] = pydantic.Field(description="ID of the latest job.")
21
+ deployment_date: typing.Optional[dt.datetime] = pydantic.Field(description="Date of the deployment.")
22
22
  status: ManagedIngestionStatus = pydantic.Field(description="Status of the ingestion.")
23
- error: typing.Optional[typing.List[IngestionErrorResponse]]
23
+ error: typing.Optional[typing.List[IngestionErrorResponse]] = pydantic.Field(
24
+ description="List of errors that occurred during ingestion."
25
+ )
24
26
 
25
27
  def json(self, **kwargs: typing.Any) -> str:
26
28
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -29,12 +29,14 @@ class MarkdownElementNodeParser(pydantic.BaseModel):
29
29
  )
30
30
  include_prev_next_rel: typing.Optional[bool] = pydantic.Field(description="Include prev/next node relationships.")
31
31
  callback_manager: typing.Optional[typing.Any]
32
- id_func: typing.Optional[str]
33
- llm: typing.Optional[Llm]
32
+ id_func: typing.Optional[str] = pydantic.Field(description="Function to generate node IDs.")
33
+ llm: typing.Optional[Llm] = pydantic.Field(description="LLM model to use for summarization.")
34
34
  summary_query_str: typing.Optional[str] = pydantic.Field(description="Query string to use for summarization.")
35
35
  num_workers: typing.Optional[int] = pydantic.Field(description="Num of workers for async jobs.")
36
36
  show_progress: typing.Optional[bool] = pydantic.Field(description="Whether to show progress.")
37
- nested_node_parser: typing.Optional[NodeParser]
37
+ nested_node_parser: typing.Optional[NodeParser] = pydantic.Field(
38
+ description="Other types of node parsers to handle some types of nodes."
39
+ )
38
40
  class_name: typing.Optional[str]
39
41
 
40
42
  def json(self, **kwargs: typing.Any) -> str:
@@ -31,7 +31,7 @@ class MarkdownNodeParser(pydantic.BaseModel):
31
31
  )
32
32
  include_prev_next_rel: typing.Optional[bool] = pydantic.Field(description="Include prev/next node relationships.")
33
33
  callback_manager: typing.Optional[typing.Any]
34
- id_func: typing.Optional[str]
34
+ id_func: typing.Optional[str] = pydantic.Field(description="Function to generate node IDs.")
35
35
  class_name: typing.Optional[str]
36
36
 
37
37
  def json(self, **kwargs: typing.Any) -> str:
@@ -5,7 +5,7 @@ import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
7
  from .filter_operator import FilterOperator
8
- from .metadata_filter_value import MetadataFilterValue
8
+ from .value import Value
9
9
 
10
10
  try:
11
11
  import pydantic
@@ -27,7 +27,7 @@ class MetadataFilter(pydantic.BaseModel):
27
27
  """
28
28
 
29
29
  key: str
30
- value: typing.Optional[MetadataFilterValue]
30
+ value: typing.Optional[Value]
31
31
  operator: typing.Optional[FilterOperator]
32
32
 
33
33
  def json(self, **kwargs: typing.Any) -> str:
@@ -15,9 +15,9 @@ except ImportError:
15
15
 
16
16
 
17
17
  class MetricResult(pydantic.BaseModel):
18
- passing: typing.Optional[bool]
19
- score: typing.Optional[float]
20
- feedback: typing.Optional[str]
18
+ passing: typing.Optional[bool] = pydantic.Field(description="Whether the metric passed or not.")
19
+ score: typing.Optional[float] = pydantic.Field(description="The score for the metric.")
20
+ feedback: typing.Optional[str] = pydantic.Field(description="The reasoning for the metric.")
21
21
 
22
22
  def json(self, **kwargs: typing.Any) -> str:
23
23
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -24,7 +24,7 @@ class NodeParser(pydantic.BaseModel):
24
24
  )
25
25
  include_prev_next_rel: typing.Optional[bool] = pydantic.Field(description="Include prev/next node relationships.")
26
26
  callback_manager: typing.Optional[typing.Any]
27
- id_func: typing.Optional[str]
27
+ id_func: typing.Optional[str] = pydantic.Field(description="Function to generate node IDs.")
28
28
  class_name: typing.Optional[str]
29
29
 
30
30
  def json(self, **kwargs: typing.Any) -> str:
@@ -17,20 +17,26 @@ except ImportError:
17
17
  class OpenAiEmbedding(pydantic.BaseModel):
18
18
  model_name: typing.Optional[str] = pydantic.Field(description="The name of the OpenAI embedding model.")
19
19
  embed_batch_size: typing.Optional[int] = pydantic.Field(description="The batch size for embedding calls.")
20
- num_workers: typing.Optional[int]
20
+ num_workers: typing.Optional[int] = pydantic.Field(
21
+ description="The number of workers to use for async embedding calls."
22
+ )
21
23
  additional_kwargs: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(
22
24
  description="Additional kwargs for the OpenAI API."
23
25
  )
24
- api_key: typing.Optional[str]
25
- api_base: typing.Optional[str]
26
- api_version: typing.Optional[str]
26
+ api_key: typing.Optional[str] = pydantic.Field(description="The OpenAI API key.")
27
+ api_base: typing.Optional[str] = pydantic.Field(description="The base URL for OpenAI API.")
28
+ api_version: typing.Optional[str] = pydantic.Field(description="The version for OpenAI API.")
27
29
  max_retries: typing.Optional[int] = pydantic.Field(description="Maximum number of retries.")
28
30
  timeout: typing.Optional[float] = pydantic.Field(description="Timeout for each request.")
29
- default_headers: typing.Optional[typing.Dict[str, typing.Optional[str]]]
31
+ default_headers: typing.Optional[typing.Dict[str, typing.Optional[str]]] = pydantic.Field(
32
+ description="The default headers for API requests."
33
+ )
30
34
  reuse_client: typing.Optional[bool] = pydantic.Field(
31
35
  description="Reuse the OpenAI client between requests. When doing anything with large volumes of async API calls, setting this to false can improve stability."
32
36
  )
33
- dimensions: typing.Optional[int]
37
+ dimensions: typing.Optional[int] = pydantic.Field(
38
+ description="The number of dimensions on the output embedding vectors. Works only with v3 embedding models."
39
+ )
34
40
  class_name: typing.Optional[str]
35
41
 
36
42
  def json(self, **kwargs: typing.Any) -> str:
@@ -20,8 +20,8 @@ class Organization(pydantic.BaseModel):
20
20
  """
21
21
 
22
22
  id: str = pydantic.Field(description="Unique identifier")
23
- created_at: typing.Optional[dt.datetime]
24
- updated_at: typing.Optional[dt.datetime]
23
+ created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
24
+ updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
25
25
  name: str = pydantic.Field(description="A name for the organization.")
26
26
 
27
27
  def json(self, **kwargs: typing.Any) -> str:
@@ -24,8 +24,8 @@ class PageSplitterNodeParser(pydantic.BaseModel):
24
24
  )
25
25
  include_prev_next_rel: typing.Optional[bool] = pydantic.Field(description="Include prev/next node relationships.")
26
26
  callback_manager: typing.Optional[typing.Any]
27
- id_func: typing.Optional[str]
28
- page_separator: typing.Optional[str]
27
+ id_func: typing.Optional[str] = pydantic.Field(description="Function to generate node IDs.")
28
+ page_separator: typing.Optional[str] = pydantic.Field(description="Separator to split text into pages.")
29
29
  class_name: typing.Optional[str]
30
30
 
31
31
  def json(self, **kwargs: typing.Any) -> str:
@@ -0,0 +1,32 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic
10
+ if pydantic.__version__.startswith("1."):
11
+ raise ImportError
12
+ import pydantic.v1 as pydantic # type: ignore
13
+ except ImportError:
14
+ import pydantic # type: ignore
15
+
16
+
17
+ class ParsingJobStructuredResult(pydantic.BaseModel):
18
+ structured: typing.Any
19
+ job_metadata: typing.Any
20
+
21
+ def json(self, **kwargs: typing.Any) -> str:
22
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
23
+ return super().json(**kwargs_with_defaults)
24
+
25
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
26
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
27
+ return super().dict(**kwargs_with_defaults)
28
+
29
+ class Config:
30
+ frozen = True
31
+ smart_union = True
32
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -20,10 +20,10 @@ class Permission(pydantic.BaseModel):
20
20
  """
21
21
 
22
22
  id: str = pydantic.Field(description="Unique identifier")
23
- created_at: typing.Optional[dt.datetime]
24
- updated_at: typing.Optional[dt.datetime]
23
+ created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
24
+ updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
25
25
  name: str = pydantic.Field(description="A name for the permission.")
26
- description: typing.Optional[str]
26
+ description: typing.Optional[str] = pydantic.Field(description="A description for the permission.")
27
27
  access: bool = pydantic.Field(description="Whether the permission is granted or not.")
28
28
 
29
29
  def json(self, **kwargs: typing.Any) -> str: