llama-cloud 0.1.6__py3-none-any.whl → 0.1.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (173) hide show
  1. llama_cloud/__init__.py +140 -6
  2. llama_cloud/client.py +15 -0
  3. llama_cloud/environment.py +1 -1
  4. llama_cloud/resources/__init__.py +15 -0
  5. llama_cloud/{types/token.py → resources/chat_apps/__init__.py} +0 -3
  6. llama_cloud/resources/chat_apps/client.py +630 -0
  7. llama_cloud/resources/data_sinks/client.py +12 -12
  8. llama_cloud/resources/data_sources/client.py +14 -14
  9. llama_cloud/resources/embedding_model_configs/client.py +20 -76
  10. llama_cloud/resources/evals/client.py +26 -36
  11. llama_cloud/resources/extraction/client.py +32 -32
  12. llama_cloud/resources/files/client.py +40 -44
  13. llama_cloud/resources/jobs/__init__.py +2 -0
  14. llama_cloud/resources/jobs/client.py +148 -0
  15. llama_cloud/resources/llama_extract/__init__.py +5 -0
  16. llama_cloud/resources/llama_extract/client.py +1038 -0
  17. llama_cloud/resources/llama_extract/types/__init__.py +6 -0
  18. llama_cloud/resources/llama_extract/types/extract_agent_create_data_schema_value.py +7 -0
  19. llama_cloud/resources/llama_extract/types/extract_agent_update_data_schema_value.py +7 -0
  20. llama_cloud/resources/organizations/client.py +66 -70
  21. llama_cloud/resources/parsing/client.py +448 -428
  22. llama_cloud/resources/pipelines/client.py +256 -344
  23. llama_cloud/resources/projects/client.py +34 -60
  24. llama_cloud/resources/reports/__init__.py +5 -0
  25. llama_cloud/resources/reports/client.py +1198 -0
  26. llama_cloud/resources/reports/types/__init__.py +7 -0
  27. llama_cloud/resources/reports/types/update_report_plan_api_v_1_reports_report_id_plan_patch_request_action.py +25 -0
  28. llama_cloud/resources/retrievers/__init__.py +2 -0
  29. llama_cloud/resources/retrievers/client.py +654 -0
  30. llama_cloud/types/__init__.py +128 -6
  31. llama_cloud/types/{chat_message.py → app_schema_chat_chat_message.py} +3 -3
  32. llama_cloud/types/azure_open_ai_embedding.py +6 -12
  33. llama_cloud/types/base_prompt_template.py +2 -6
  34. llama_cloud/types/bedrock_embedding.py +6 -12
  35. llama_cloud/types/character_splitter.py +2 -4
  36. llama_cloud/types/chat_app.py +44 -0
  37. llama_cloud/types/chat_app_response.py +41 -0
  38. llama_cloud/types/cloud_az_storage_blob_data_source.py +7 -15
  39. llama_cloud/types/cloud_box_data_source.py +6 -12
  40. llama_cloud/types/cloud_confluence_data_source.py +6 -6
  41. llama_cloud/types/cloud_document.py +1 -3
  42. llama_cloud/types/cloud_document_create.py +1 -3
  43. llama_cloud/types/cloud_jira_data_source.py +4 -6
  44. llama_cloud/types/cloud_notion_page_data_source.py +2 -2
  45. llama_cloud/types/cloud_one_drive_data_source.py +3 -5
  46. llama_cloud/types/cloud_postgres_vector_store.py +1 -0
  47. llama_cloud/types/cloud_s_3_data_source.py +4 -8
  48. llama_cloud/types/cloud_sharepoint_data_source.py +6 -8
  49. llama_cloud/types/cloud_slack_data_source.py +6 -6
  50. llama_cloud/types/code_splitter.py +1 -1
  51. llama_cloud/types/cohere_embedding.py +3 -7
  52. llama_cloud/types/composite_retrieval_mode.py +21 -0
  53. llama_cloud/types/composite_retrieval_result.py +38 -0
  54. llama_cloud/types/composite_retrieved_text_node.py +42 -0
  55. llama_cloud/types/data_sink.py +4 -4
  56. llama_cloud/types/data_sink_component.py +20 -0
  57. llama_cloud/types/data_source.py +5 -7
  58. llama_cloud/types/data_source_component.py +28 -0
  59. llama_cloud/types/data_source_create.py +1 -3
  60. llama_cloud/types/edit_suggestion.py +39 -0
  61. llama_cloud/types/embedding_model_config.py +2 -2
  62. llama_cloud/types/embedding_model_config_update.py +2 -4
  63. llama_cloud/types/eval_dataset.py +2 -2
  64. llama_cloud/types/eval_dataset_job_record.py +8 -13
  65. llama_cloud/types/eval_execution_params_override.py +2 -6
  66. llama_cloud/types/eval_question.py +2 -2
  67. llama_cloud/types/extract_agent.py +45 -0
  68. llama_cloud/types/extract_agent_data_schema_value.py +5 -0
  69. llama_cloud/types/extract_config.py +40 -0
  70. llama_cloud/types/extract_job.py +35 -0
  71. llama_cloud/types/extract_job_create.py +40 -0
  72. llama_cloud/types/extract_job_create_data_schema_override_value.py +7 -0
  73. llama_cloud/types/extract_mode.py +17 -0
  74. llama_cloud/types/extract_resultset.py +46 -0
  75. llama_cloud/types/extract_resultset_data.py +11 -0
  76. llama_cloud/types/extract_resultset_data_item_value.py +7 -0
  77. llama_cloud/types/extract_resultset_data_zero_value.py +7 -0
  78. llama_cloud/types/extract_resultset_extraction_metadata_value.py +7 -0
  79. llama_cloud/types/extraction_result.py +2 -2
  80. llama_cloud/types/extraction_schema.py +3 -5
  81. llama_cloud/types/file.py +9 -14
  82. llama_cloud/types/filter_condition.py +9 -1
  83. llama_cloud/types/filter_operator.py +6 -2
  84. llama_cloud/types/gemini_embedding.py +6 -10
  85. llama_cloud/types/hugging_face_inference_api_embedding.py +11 -27
  86. llama_cloud/types/hugging_face_inference_api_embedding_token.py +5 -0
  87. llama_cloud/types/image_block.py +35 -0
  88. llama_cloud/types/input_message.py +2 -4
  89. llama_cloud/types/job_names.py +89 -0
  90. llama_cloud/types/job_record.py +57 -0
  91. llama_cloud/types/job_record_with_usage_metrics.py +36 -0
  92. llama_cloud/types/llama_index_core_base_llms_types_chat_message.py +39 -0
  93. llama_cloud/types/llama_index_core_base_llms_types_chat_message_blocks_item.py +33 -0
  94. llama_cloud/types/llama_parse_parameters.py +4 -0
  95. llama_cloud/types/llm.py +3 -4
  96. llama_cloud/types/llm_model_data.py +1 -0
  97. llama_cloud/types/llm_parameters.py +3 -5
  98. llama_cloud/types/local_eval.py +8 -10
  99. llama_cloud/types/local_eval_results.py +1 -1
  100. llama_cloud/types/managed_ingestion_status.py +4 -0
  101. llama_cloud/types/managed_ingestion_status_response.py +4 -5
  102. llama_cloud/types/markdown_element_node_parser.py +3 -5
  103. llama_cloud/types/markdown_node_parser.py +1 -1
  104. llama_cloud/types/metadata_filter.py +2 -2
  105. llama_cloud/types/metadata_filter_value.py +5 -0
  106. llama_cloud/types/metric_result.py +3 -3
  107. llama_cloud/types/node_parser.py +1 -1
  108. llama_cloud/types/object_type.py +4 -0
  109. llama_cloud/types/open_ai_embedding.py +6 -12
  110. llama_cloud/types/organization.py +7 -2
  111. llama_cloud/types/page_splitter_node_parser.py +2 -2
  112. llama_cloud/types/paginated_jobs_history_with_metrics.py +35 -0
  113. llama_cloud/types/paginated_report_response.py +35 -0
  114. llama_cloud/types/parse_plan_level.py +21 -0
  115. llama_cloud/types/permission.py +3 -3
  116. llama_cloud/types/pipeline.py +7 -17
  117. llama_cloud/types/pipeline_configuration_hashes.py +3 -3
  118. llama_cloud/types/pipeline_create.py +8 -16
  119. llama_cloud/types/pipeline_data_source.py +7 -13
  120. llama_cloud/types/pipeline_data_source_component.py +28 -0
  121. llama_cloud/types/pipeline_data_source_create.py +1 -3
  122. llama_cloud/types/pipeline_deployment.py +4 -4
  123. llama_cloud/types/pipeline_file.py +13 -24
  124. llama_cloud/types/pipeline_file_create.py +1 -3
  125. llama_cloud/types/playground_session.py +4 -4
  126. llama_cloud/types/preset_retrieval_params.py +8 -14
  127. llama_cloud/types/presigned_url.py +1 -3
  128. llama_cloud/types/progress_event.py +44 -0
  129. llama_cloud/types/progress_event_status.py +33 -0
  130. llama_cloud/types/project.py +2 -2
  131. llama_cloud/types/prompt_mixin_prompts.py +1 -1
  132. llama_cloud/types/prompt_spec.py +3 -5
  133. llama_cloud/types/related_node_info.py +2 -2
  134. llama_cloud/types/related_node_info_node_type.py +7 -0
  135. llama_cloud/types/report.py +33 -0
  136. llama_cloud/types/report_block.py +34 -0
  137. llama_cloud/types/report_block_dependency.py +29 -0
  138. llama_cloud/types/report_create_response.py +31 -0
  139. llama_cloud/types/report_event_item.py +40 -0
  140. llama_cloud/types/report_event_item_event_data.py +45 -0
  141. llama_cloud/types/report_event_type.py +37 -0
  142. llama_cloud/types/report_metadata.py +43 -0
  143. llama_cloud/types/report_plan.py +36 -0
  144. llama_cloud/types/report_plan_block.py +36 -0
  145. llama_cloud/types/report_query.py +33 -0
  146. llama_cloud/types/report_response.py +41 -0
  147. llama_cloud/types/report_state.py +37 -0
  148. llama_cloud/types/report_state_event.py +38 -0
  149. llama_cloud/types/report_update_event.py +38 -0
  150. llama_cloud/types/retrieve_results.py +1 -1
  151. llama_cloud/types/retriever.py +45 -0
  152. llama_cloud/types/retriever_create.py +37 -0
  153. llama_cloud/types/retriever_pipeline.py +37 -0
  154. llama_cloud/types/role.py +3 -3
  155. llama_cloud/types/sentence_splitter.py +2 -4
  156. llama_cloud/types/status_enum.py +4 -0
  157. llama_cloud/types/supported_llm_model_names.py +4 -0
  158. llama_cloud/types/text_block.py +31 -0
  159. llama_cloud/types/text_node.py +15 -8
  160. llama_cloud/types/token_text_splitter.py +1 -1
  161. llama_cloud/types/usage_metric_response.py +34 -0
  162. llama_cloud/types/user_job_record.py +32 -0
  163. llama_cloud/types/user_organization.py +5 -9
  164. llama_cloud/types/user_organization_create.py +4 -4
  165. llama_cloud/types/user_organization_delete.py +2 -2
  166. llama_cloud/types/user_organization_role.py +2 -2
  167. llama_cloud/types/vertex_text_embedding.py +5 -9
  168. {llama_cloud-0.1.6.dist-info → llama_cloud-0.1.7.dist-info}/METADATA +2 -1
  169. llama_cloud-0.1.7.dist-info/RECORD +310 -0
  170. llama_cloud/types/value.py +0 -5
  171. llama_cloud-0.1.6.dist-info/RECORD +0 -241
  172. {llama_cloud-0.1.6.dist-info → llama_cloud-0.1.7.dist-info}/LICENSE +0 -0
  173. {llama_cloud-0.1.6.dist-info → llama_cloud-0.1.7.dist-info}/WHEEL +0 -0
@@ -0,0 +1,33 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ from __future__ import annotations
4
+
5
+ import typing
6
+
7
+ import typing_extensions
8
+
9
+ from .image_block import ImageBlock
10
+ from .text_block import TextBlock
11
+
12
+
13
+ class LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Image(ImageBlock):
14
+ block_type: typing_extensions.Literal["image"]
15
+
16
+ class Config:
17
+ frozen = True
18
+ smart_union = True
19
+ allow_population_by_field_name = True
20
+
21
+
22
+ class LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Text(TextBlock):
23
+ block_type: typing_extensions.Literal["text"]
24
+
25
+ class Config:
26
+ frozen = True
27
+ smart_union = True
28
+ allow_population_by_field_name = True
29
+
30
+
31
+ LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem = typing.Union[
32
+ LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Image, LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Text
33
+ ]
@@ -34,7 +34,9 @@ class LlamaParseParameters(pydantic.BaseModel):
34
34
  gpt_4_o_mode: typing.Optional[bool] = pydantic.Field(alias="gpt4o_mode")
35
35
  gpt_4_o_api_key: typing.Optional[str] = pydantic.Field(alias="gpt4o_api_key")
36
36
  do_not_unroll_columns: typing.Optional[bool]
37
+ extract_layout: typing.Optional[bool]
37
38
  html_make_all_elements_visible: typing.Optional[bool]
39
+ html_remove_navigation_elements: typing.Optional[bool]
38
40
  html_remove_fixed_elements: typing.Optional[bool]
39
41
  guess_xlsx_sheet_name: typing.Optional[bool]
40
42
  page_separator: typing.Optional[str]
@@ -72,6 +74,8 @@ class LlamaParseParameters(pydantic.BaseModel):
72
74
  structured_output_json_schema: typing.Optional[str]
73
75
  structured_output_json_schema_name: typing.Optional[str]
74
76
  max_pages: typing.Optional[int]
77
+ max_pages_enforced: typing.Optional[int]
78
+ extract_charts: typing.Optional[bool]
75
79
 
76
80
  def json(self, **kwargs: typing.Any) -> str:
77
81
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
llama_cloud/types/llm.py CHANGED
@@ -34,7 +34,7 @@ class Llm(pydantic.BaseModel):
34
34
  """
35
35
 
36
36
  callback_manager: typing.Optional[typing.Any]
37
- system_prompt: typing.Optional[str] = pydantic.Field(description="System prompt for LLM calls.")
37
+ system_prompt: typing.Optional[str]
38
38
  messages_to_prompt: typing.Optional[str] = pydantic.Field(
39
39
  description="Function to convert a list of messages to an LLM prompt."
40
40
  )
@@ -43,9 +43,8 @@ class Llm(pydantic.BaseModel):
43
43
  )
44
44
  output_parser: typing.Optional[typing.Any]
45
45
  pydantic_program_mode: typing.Optional[PydanticProgramMode]
46
- query_wrapper_prompt: typing.Optional[BasePromptTemplate] = pydantic.Field(
47
- description="Query wrapper prompt for LLM calls."
48
- )
46
+ query_wrapper_prompt: typing.Optional[BasePromptTemplate]
47
+ class_name: typing.Optional[str]
49
48
 
50
49
  def json(self, **kwargs: typing.Any) -> str:
51
50
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -22,6 +22,7 @@ class LlmModelData(pydantic.BaseModel):
22
22
  name: str = pydantic.Field(description="The name of the LLM model.")
23
23
  description: str = pydantic.Field(description="The description of the LLM model.")
24
24
  multi_modal: bool = pydantic.Field(description="Whether the model supports multi-modal image input")
25
+ model_name: typing.Optional[str]
25
26
 
26
27
  def json(self, **kwargs: typing.Any) -> str:
27
28
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -19,11 +19,9 @@ class LlmParameters(pydantic.BaseModel):
19
19
  model_name: typing.Optional[SupportedLlmModelNames] = pydantic.Field(
20
20
  description="The name of the model to use for LLM completions."
21
21
  )
22
- system_prompt: typing.Optional[str] = pydantic.Field(description="The system prompt to use for the completion.")
23
- temperature: typing.Optional[float] = pydantic.Field(description="The temperature value for the model.")
24
- use_chain_of_thought_reasoning: typing.Optional[bool] = pydantic.Field(
25
- description="Whether to use chain of thought reasoning."
26
- )
22
+ system_prompt: typing.Optional[str]
23
+ temperature: typing.Optional[float]
24
+ use_chain_of_thought_reasoning: typing.Optional[bool]
27
25
  class_name: typing.Optional[str]
28
26
 
29
27
  def json(self, **kwargs: typing.Any) -> str:
@@ -21,19 +21,17 @@ class LocalEval(pydantic.BaseModel):
21
21
  Output of an BaseEvaluator.
22
22
  """
23
23
 
24
- query: typing.Optional[str] = pydantic.Field(description="Query string")
25
- contexts: typing.Optional[typing.List[str]] = pydantic.Field(description="Context strings")
26
- response: typing.Optional[str] = pydantic.Field(description="Response string")
27
- passing: typing.Optional[bool] = pydantic.Field(description="Binary evaluation result (passing or not)")
28
- feedback: typing.Optional[str] = pydantic.Field(description="Feedback or reasoning for the response")
29
- score: typing.Optional[float] = pydantic.Field(description="Score for the response")
30
- pairwise_source: typing.Optional[str] = pydantic.Field(
31
- description="Used only for pairwise and specifies whether it is from original order of presented answers or flipped order"
32
- )
24
+ query: typing.Optional[str]
25
+ contexts: typing.Optional[typing.List[str]]
26
+ response: typing.Optional[str]
27
+ passing: typing.Optional[bool]
28
+ feedback: typing.Optional[str]
29
+ score: typing.Optional[float]
30
+ pairwise_source: typing.Optional[str]
33
31
  invalid_result: typing.Optional[bool] = pydantic.Field(
34
32
  description="Whether the evaluation result is an invalid one."
35
33
  )
36
- invalid_reason: typing.Optional[str] = pydantic.Field(description="Reason for invalid evaluation.")
34
+ invalid_reason: typing.Optional[str]
37
35
 
38
36
  def json(self, **kwargs: typing.Any) -> str:
39
37
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -21,7 +21,7 @@ class LocalEvalResults(pydantic.BaseModel):
21
21
  """
22
22
 
23
23
  project_id: str = pydantic.Field(description="The ID of the project.")
24
- eval_set_id: typing.Optional[str] = pydantic.Field(description="The ID of the local eval result set.")
24
+ eval_set_id: typing.Optional[str]
25
25
  app_name: str = pydantic.Field(description="The name of the app.")
26
26
  eval_name: str = pydantic.Field(description="The name of the eval.")
27
27
  result: LocalEval = pydantic.Field(description="The eval results.")
@@ -16,6 +16,7 @@ class ManagedIngestionStatus(str, enum.Enum):
16
16
  SUCCESS = "SUCCESS"
17
17
  ERROR = "ERROR"
18
18
  PARTIAL_SUCCESS = "PARTIAL_SUCCESS"
19
+ CANCELLED = "CANCELLED"
19
20
 
20
21
  def visit(
21
22
  self,
@@ -24,6 +25,7 @@ class ManagedIngestionStatus(str, enum.Enum):
24
25
  success: typing.Callable[[], T_Result],
25
26
  error: typing.Callable[[], T_Result],
26
27
  partial_success: typing.Callable[[], T_Result],
28
+ cancelled: typing.Callable[[], T_Result],
27
29
  ) -> T_Result:
28
30
  if self is ManagedIngestionStatus.NOT_STARTED:
29
31
  return not_started()
@@ -35,3 +37,5 @@ class ManagedIngestionStatus(str, enum.Enum):
35
37
  return error()
36
38
  if self is ManagedIngestionStatus.PARTIAL_SUCCESS:
37
39
  return partial_success()
40
+ if self is ManagedIngestionStatus.CANCELLED:
41
+ return cancelled()
@@ -17,12 +17,11 @@ except ImportError:
17
17
 
18
18
 
19
19
  class ManagedIngestionStatusResponse(pydantic.BaseModel):
20
- job_id: typing.Optional[str] = pydantic.Field(description="ID of the latest job.")
21
- deployment_date: typing.Optional[dt.datetime] = pydantic.Field(description="Date of the deployment.")
20
+ job_id: typing.Optional[str]
21
+ deployment_date: typing.Optional[dt.datetime]
22
22
  status: ManagedIngestionStatus = pydantic.Field(description="Status of the ingestion.")
23
- error: typing.Optional[typing.List[IngestionErrorResponse]] = pydantic.Field(
24
- description="List of errors that occurred during ingestion."
25
- )
23
+ error: typing.Optional[typing.List[IngestionErrorResponse]]
24
+ effective_at: typing.Optional[dt.datetime]
26
25
 
27
26
  def json(self, **kwargs: typing.Any) -> str:
28
27
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -29,14 +29,12 @@ class MarkdownElementNodeParser(pydantic.BaseModel):
29
29
  )
30
30
  include_prev_next_rel: typing.Optional[bool] = pydantic.Field(description="Include prev/next node relationships.")
31
31
  callback_manager: typing.Optional[typing.Any]
32
- id_func: typing.Optional[str] = pydantic.Field(description="Function to generate node IDs.")
33
- llm: typing.Optional[Llm] = pydantic.Field(description="LLM model to use for summarization.")
32
+ id_func: typing.Optional[str]
33
+ llm: typing.Optional[Llm]
34
34
  summary_query_str: typing.Optional[str] = pydantic.Field(description="Query string to use for summarization.")
35
35
  num_workers: typing.Optional[int] = pydantic.Field(description="Num of workers for async jobs.")
36
36
  show_progress: typing.Optional[bool] = pydantic.Field(description="Whether to show progress.")
37
- nested_node_parser: typing.Optional[NodeParser] = pydantic.Field(
38
- description="Other types of node parsers to handle some types of nodes."
39
- )
37
+ nested_node_parser: typing.Optional[NodeParser]
40
38
  class_name: typing.Optional[str]
41
39
 
42
40
  def json(self, **kwargs: typing.Any) -> str:
@@ -31,7 +31,7 @@ class MarkdownNodeParser(pydantic.BaseModel):
31
31
  )
32
32
  include_prev_next_rel: typing.Optional[bool] = pydantic.Field(description="Include prev/next node relationships.")
33
33
  callback_manager: typing.Optional[typing.Any]
34
- id_func: typing.Optional[str] = pydantic.Field(description="Function to generate node IDs.")
34
+ id_func: typing.Optional[str]
35
35
  class_name: typing.Optional[str]
36
36
 
37
37
  def json(self, **kwargs: typing.Any) -> str:
@@ -5,7 +5,7 @@ import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
7
  from .filter_operator import FilterOperator
8
- from .value import Value
8
+ from .metadata_filter_value import MetadataFilterValue
9
9
 
10
10
  try:
11
11
  import pydantic
@@ -27,7 +27,7 @@ class MetadataFilter(pydantic.BaseModel):
27
27
  """
28
28
 
29
29
  key: str
30
- value: typing.Optional[Value]
30
+ value: typing.Optional[MetadataFilterValue]
31
31
  operator: typing.Optional[FilterOperator]
32
32
 
33
33
  def json(self, **kwargs: typing.Any) -> str:
@@ -0,0 +1,5 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ MetadataFilterValue = typing.Union[int, float, str, typing.List[str], typing.List[float], typing.List[int]]
@@ -15,9 +15,9 @@ except ImportError:
15
15
 
16
16
 
17
17
  class MetricResult(pydantic.BaseModel):
18
- passing: typing.Optional[bool] = pydantic.Field(description="Whether the metric passed or not.")
19
- score: typing.Optional[float] = pydantic.Field(description="The score for the metric.")
20
- feedback: typing.Optional[str] = pydantic.Field(description="The reasoning for the metric.")
18
+ passing: typing.Optional[bool]
19
+ score: typing.Optional[float]
20
+ feedback: typing.Optional[str]
21
21
 
22
22
  def json(self, **kwargs: typing.Any) -> str:
23
23
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -24,7 +24,7 @@ class NodeParser(pydantic.BaseModel):
24
24
  )
25
25
  include_prev_next_rel: typing.Optional[bool] = pydantic.Field(description="Include prev/next node relationships.")
26
26
  callback_manager: typing.Optional[typing.Any]
27
- id_func: typing.Optional[str] = pydantic.Field(description="Function to generate node IDs.")
27
+ id_func: typing.Optional[str]
28
28
  class_name: typing.Optional[str]
29
29
 
30
30
  def json(self, **kwargs: typing.Any) -> str:
@@ -11,6 +11,7 @@ class ObjectType(str, enum.Enum):
11
11
  TWO = "2"
12
12
  THREE = "3"
13
13
  FOUR = "4"
14
+ FIVE = "5"
14
15
 
15
16
  def visit(
16
17
  self,
@@ -18,6 +19,7 @@ class ObjectType(str, enum.Enum):
18
19
  two: typing.Callable[[], T_Result],
19
20
  three: typing.Callable[[], T_Result],
20
21
  four: typing.Callable[[], T_Result],
22
+ five: typing.Callable[[], T_Result],
21
23
  ) -> T_Result:
22
24
  if self is ObjectType.ONE:
23
25
  return one()
@@ -27,3 +29,5 @@ class ObjectType(str, enum.Enum):
27
29
  return three()
28
30
  if self is ObjectType.FOUR:
29
31
  return four()
32
+ if self is ObjectType.FIVE:
33
+ return five()
@@ -17,26 +17,20 @@ except ImportError:
17
17
  class OpenAiEmbedding(pydantic.BaseModel):
18
18
  model_name: typing.Optional[str] = pydantic.Field(description="The name of the OpenAI embedding model.")
19
19
  embed_batch_size: typing.Optional[int] = pydantic.Field(description="The batch size for embedding calls.")
20
- num_workers: typing.Optional[int] = pydantic.Field(
21
- description="The number of workers to use for async embedding calls."
22
- )
20
+ num_workers: typing.Optional[int]
23
21
  additional_kwargs: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(
24
22
  description="Additional kwargs for the OpenAI API."
25
23
  )
26
- api_key: typing.Optional[str] = pydantic.Field(description="The OpenAI API key.")
27
- api_base: typing.Optional[str] = pydantic.Field(description="The base URL for OpenAI API.")
28
- api_version: typing.Optional[str] = pydantic.Field(description="The version for OpenAI API.")
24
+ api_key: typing.Optional[str]
25
+ api_base: typing.Optional[str]
26
+ api_version: typing.Optional[str]
29
27
  max_retries: typing.Optional[int] = pydantic.Field(description="Maximum number of retries.")
30
28
  timeout: typing.Optional[float] = pydantic.Field(description="Timeout for each request.")
31
- default_headers: typing.Optional[typing.Dict[str, typing.Optional[str]]] = pydantic.Field(
32
- description="The default headers for API requests."
33
- )
29
+ default_headers: typing.Optional[typing.Dict[str, typing.Optional[str]]]
34
30
  reuse_client: typing.Optional[bool] = pydantic.Field(
35
31
  description="Reuse the OpenAI client between requests. When doing anything with large volumes of async API calls, setting this to false can improve stability."
36
32
  )
37
- dimensions: typing.Optional[int] = pydantic.Field(
38
- description="The number of dimensions on the output embedding vectors. Works only with v3 embedding models."
39
- )
33
+ dimensions: typing.Optional[int]
40
34
  class_name: typing.Optional[str]
41
35
 
42
36
  def json(self, **kwargs: typing.Any) -> str:
@@ -4,6 +4,7 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
+ from .parse_plan_level import ParsePlanLevel
7
8
 
8
9
  try:
9
10
  import pydantic
@@ -20,9 +21,13 @@ class Organization(pydantic.BaseModel):
20
21
  """
21
22
 
22
23
  id: str = pydantic.Field(description="Unique identifier")
23
- created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
24
- updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
24
+ created_at: typing.Optional[dt.datetime]
25
+ updated_at: typing.Optional[dt.datetime]
25
26
  name: str = pydantic.Field(description="A name for the organization.")
27
+ stripe_customer_id: typing.Optional[str]
28
+ parse_plan_level: typing.Optional[ParsePlanLevel] = pydantic.Field(
29
+ description="Whether the organization is a Parse Premium customer."
30
+ )
26
31
 
27
32
  def json(self, **kwargs: typing.Any) -> str:
28
33
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -24,8 +24,8 @@ class PageSplitterNodeParser(pydantic.BaseModel):
24
24
  )
25
25
  include_prev_next_rel: typing.Optional[bool] = pydantic.Field(description="Include prev/next node relationships.")
26
26
  callback_manager: typing.Optional[typing.Any]
27
- id_func: typing.Optional[str] = pydantic.Field(description="Function to generate node IDs.")
28
- page_separator: typing.Optional[str] = pydantic.Field(description="Separator to split text into pages.")
27
+ id_func: typing.Optional[str]
28
+ page_separator: typing.Optional[str]
29
29
  class_name: typing.Optional[str]
30
30
 
31
31
  def json(self, **kwargs: typing.Any) -> str:
@@ -0,0 +1,35 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .job_record_with_usage_metrics import JobRecordWithUsageMetrics
8
+
9
+ try:
10
+ import pydantic
11
+ if pydantic.__version__.startswith("1."):
12
+ raise ImportError
13
+ import pydantic.v1 as pydantic # type: ignore
14
+ except ImportError:
15
+ import pydantic # type: ignore
16
+
17
+
18
+ class PaginatedJobsHistoryWithMetrics(pydantic.BaseModel):
19
+ jobs: typing.List[JobRecordWithUsageMetrics]
20
+ total_count: int
21
+ limit: int
22
+ offset: int
23
+
24
+ def json(self, **kwargs: typing.Any) -> str:
25
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
26
+ return super().json(**kwargs_with_defaults)
27
+
28
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
29
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
30
+ return super().dict(**kwargs_with_defaults)
31
+
32
+ class Config:
33
+ frozen = True
34
+ smart_union = True
35
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,35 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .report_response import ReportResponse
8
+
9
+ try:
10
+ import pydantic
11
+ if pydantic.__version__.startswith("1."):
12
+ raise ImportError
13
+ import pydantic.v1 as pydantic # type: ignore
14
+ except ImportError:
15
+ import pydantic # type: ignore
16
+
17
+
18
+ class PaginatedReportResponse(pydantic.BaseModel):
19
+ report_responses: typing.List[ReportResponse]
20
+ limit: int
21
+ offset: int
22
+ total_count: int
23
+
24
+ def json(self, **kwargs: typing.Any) -> str:
25
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
26
+ return super().json(**kwargs_with_defaults)
27
+
28
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
29
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
30
+ return super().dict(**kwargs_with_defaults)
31
+
32
+ class Config:
33
+ frozen = True
34
+ smart_union = True
35
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,21 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+ import typing
5
+
6
+ T_Result = typing.TypeVar("T_Result")
7
+
8
+
9
+ class ParsePlanLevel(str, enum.Enum):
10
+ """
11
+ Enum for the Parse plan level.
12
+ """
13
+
14
+ DEFAULT = "DEFAULT"
15
+ PREMIUM = "PREMIUM"
16
+
17
+ def visit(self, default: typing.Callable[[], T_Result], premium: typing.Callable[[], T_Result]) -> T_Result:
18
+ if self is ParsePlanLevel.DEFAULT:
19
+ return default()
20
+ if self is ParsePlanLevel.PREMIUM:
21
+ return premium()
@@ -20,10 +20,10 @@ class Permission(pydantic.BaseModel):
20
20
  """
21
21
 
22
22
  id: str = pydantic.Field(description="Unique identifier")
23
- created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
24
- updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
23
+ created_at: typing.Optional[dt.datetime]
24
+ updated_at: typing.Optional[dt.datetime]
25
25
  name: str = pydantic.Field(description="A name for the permission.")
26
- description: typing.Optional[str] = pydantic.Field(description="A description for the permission.")
26
+ description: typing.Optional[str]
27
27
  access: bool = pydantic.Field(description="Whether the permission is granted or not.")
28
28
 
29
29
  def json(self, **kwargs: typing.Any) -> str:
@@ -29,26 +29,20 @@ class Pipeline(pydantic.BaseModel):
29
29
  """
30
30
 
31
31
  id: str = pydantic.Field(description="Unique identifier")
32
- created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
33
- updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
32
+ created_at: typing.Optional[dt.datetime]
33
+ updated_at: typing.Optional[dt.datetime]
34
34
  name: str
35
35
  project_id: str
36
- embedding_model_config_id: typing.Optional[str] = pydantic.Field(
37
- description="The ID of the EmbeddingModelConfig this pipeline is using."
38
- )
36
+ embedding_model_config_id: typing.Optional[str]
39
37
  pipeline_type: typing.Optional[PipelineType] = pydantic.Field(
40
38
  description="Type of pipeline. Either PLAYGROUND or MANAGED."
41
39
  )
42
- managed_pipeline_id: typing.Optional[str] = pydantic.Field(
43
- description="The ID of the ManagedPipeline this playground pipeline is linked to."
44
- )
40
+ managed_pipeline_id: typing.Optional[str]
45
41
  embedding_config: PipelineEmbeddingConfig
46
42
  configured_transformations: typing.Optional[typing.List[ConfiguredTransformationItem]] = pydantic.Field(
47
43
  description="Deprecated don't use it, List of configured transformations."
48
44
  )
49
- config_hash: typing.Optional[PipelineConfigurationHashes] = pydantic.Field(
50
- description="Hashes for the configuration of the pipeline."
51
- )
45
+ config_hash: typing.Optional[PipelineConfigurationHashes]
52
46
  transform_config: typing.Optional[PipelineTransformConfig] = pydantic.Field(
53
47
  description="Configuration for the transformation."
54
48
  )
@@ -58,12 +52,8 @@ class Pipeline(pydantic.BaseModel):
58
52
  eval_parameters: typing.Optional[EvalExecutionParams] = pydantic.Field(
59
53
  description="Eval parameters for the pipeline."
60
54
  )
61
- llama_parse_parameters: typing.Optional[LlamaParseParameters] = pydantic.Field(
62
- description="Settings that can be configured for how to use LlamaParse to parse files within a LlamaCloud pipeline."
63
- )
64
- data_sink: typing.Optional[DataSink] = pydantic.Field(
65
- description="The data sink for the pipeline. If None, the pipeline will use the fully managed data sink."
66
- )
55
+ llama_parse_parameters: typing.Optional[LlamaParseParameters]
56
+ data_sink: typing.Optional[DataSink]
67
57
 
68
58
  def json(self, **kwargs: typing.Any) -> str:
69
59
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -19,9 +19,9 @@ class PipelineConfigurationHashes(pydantic.BaseModel):
19
19
  Hashes for the configuration of a pipeline.
20
20
  """
21
21
 
22
- embedding_config_hash: typing.Optional[str] = pydantic.Field(description="Hash of the embedding config.")
23
- parsing_config_hash: typing.Optional[str] = pydantic.Field(description="Hash of the llama parse parameters.")
24
- transform_config_hash: typing.Optional[str] = pydantic.Field(description="Hash of the transform config.")
22
+ embedding_config_hash: typing.Optional[str]
23
+ parsing_config_hash: typing.Optional[str]
24
+ transform_config_hash: typing.Optional[str]
25
25
 
26
26
  def json(self, **kwargs: typing.Any) -> str:
27
27
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -31,32 +31,24 @@ class PipelineCreate(pydantic.BaseModel):
31
31
  transform_config: typing.Optional[PipelineCreateTransformConfig] = pydantic.Field(
32
32
  description="Configuration for the transformation."
33
33
  )
34
- configured_transformations: typing.Optional[typing.List[ConfiguredTransformationItem]] = pydantic.Field(
35
- description="Deprecated, use embedding_config or transform_config instead. configured transformations for the pipeline."
36
- )
37
- data_sink_id: typing.Optional[str] = pydantic.Field(
38
- description="Data sink ID. When provided instead of data_sink, the data sink will be looked up by ID."
39
- )
40
- embedding_model_config_id: typing.Optional[str] = pydantic.Field(
41
- description="Embedding model config ID. When provided instead of embedding_config, the embedding model config will be looked up by ID."
42
- )
43
- data_sink: typing.Optional[DataSinkCreate] = pydantic.Field(
44
- description="Data sink. When provided instead of data_sink_id, the data sink will be created."
45
- )
34
+ configured_transformations: typing.Optional[typing.List[ConfiguredTransformationItem]]
35
+ data_sink_id: typing.Optional[str]
36
+ embedding_model_config_id: typing.Optional[str]
37
+ data_sink: typing.Optional[DataSinkCreate]
46
38
  preset_retrieval_parameters: typing.Optional[PresetRetrievalParams] = pydantic.Field(
47
39
  description="Preset retrieval parameters for the pipeline."
48
40
  )
49
41
  eval_parameters: typing.Optional[EvalExecutionParams] = pydantic.Field(
50
42
  description="Eval parameters for the pipeline."
51
43
  )
52
- llama_parse_parameters: typing.Optional[LlamaParseParameters]
44
+ llama_parse_parameters: typing.Optional[LlamaParseParameters] = pydantic.Field(
45
+ description="Settings that can be configured for how to use LlamaParse to parse files within a LlamaCloud pipeline."
46
+ )
53
47
  name: str
54
48
  pipeline_type: typing.Optional[PipelineType] = pydantic.Field(
55
49
  description="Type of pipeline. Either PLAYGROUND or MANAGED."
56
50
  )
57
- managed_pipeline_id: typing.Optional[str] = pydantic.Field(
58
- description="The ID of the ManagedPipeline this playground pipeline is linked to."
59
- )
51
+ managed_pipeline_id: typing.Optional[str]
60
52
 
61
53
  def json(self, **kwargs: typing.Any) -> str:
62
54
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -5,7 +5,7 @@ import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
7
  from .configurable_data_source_names import ConfigurableDataSourceNames
8
- from .data_source_create_component import DataSourceCreateComponent
8
+ from .pipeline_data_source_component import PipelineDataSourceComponent
9
9
  from .pipeline_data_source_custom_metadata_value import PipelineDataSourceCustomMetadataValue
10
10
 
11
11
  try:
@@ -23,24 +23,18 @@ class PipelineDataSource(pydantic.BaseModel):
23
23
  """
24
24
 
25
25
  id: str = pydantic.Field(description="Unique identifier")
26
- created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
27
- updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
26
+ created_at: typing.Optional[dt.datetime]
27
+ updated_at: typing.Optional[dt.datetime]
28
28
  name: str = pydantic.Field(description="The name of the data source.")
29
29
  source_type: ConfigurableDataSourceNames
30
- custom_metadata: typing.Optional[
31
- typing.Dict[str, typing.Optional[PipelineDataSourceCustomMetadataValue]]
32
- ] = pydantic.Field(description="Custom metadata that will be present on all data loaded from the data source")
33
- component: DataSourceCreateComponent = pydantic.Field(description="Component that implements the data source")
30
+ custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[PipelineDataSourceCustomMetadataValue]]]
31
+ component: PipelineDataSourceComponent = pydantic.Field(description="Component that implements the data source")
34
32
  project_id: str
35
33
  data_source_id: str = pydantic.Field(description="The ID of the data source.")
36
34
  pipeline_id: str = pydantic.Field(description="The ID of the pipeline.")
37
35
  last_synced_at: dt.datetime = pydantic.Field(description="The last time the data source was automatically synced.")
38
- sync_interval: typing.Optional[float] = pydantic.Field(
39
- description="The interval at which the data source should be synced."
40
- )
41
- sync_schedule_set_by: typing.Optional[str] = pydantic.Field(
42
- description="The id of the user who set the sync schedule."
43
- )
36
+ sync_interval: typing.Optional[float]
37
+ sync_schedule_set_by: typing.Optional[str]
44
38
 
45
39
  def json(self, **kwargs: typing.Any) -> str:
46
40
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -0,0 +1,28 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ from .cloud_az_storage_blob_data_source import CloudAzStorageBlobDataSource
6
+ from .cloud_box_data_source import CloudBoxDataSource
7
+ from .cloud_confluence_data_source import CloudConfluenceDataSource
8
+ from .cloud_google_drive_data_source import CloudGoogleDriveDataSource
9
+ from .cloud_jira_data_source import CloudJiraDataSource
10
+ from .cloud_notion_page_data_source import CloudNotionPageDataSource
11
+ from .cloud_one_drive_data_source import CloudOneDriveDataSource
12
+ from .cloud_s_3_data_source import CloudS3DataSource
13
+ from .cloud_sharepoint_data_source import CloudSharepointDataSource
14
+ from .cloud_slack_data_source import CloudSlackDataSource
15
+
16
+ PipelineDataSourceComponent = typing.Union[
17
+ typing.Dict[str, typing.Any],
18
+ CloudS3DataSource,
19
+ CloudAzStorageBlobDataSource,
20
+ CloudGoogleDriveDataSource,
21
+ CloudOneDriveDataSource,
22
+ CloudSharepointDataSource,
23
+ CloudSlackDataSource,
24
+ CloudNotionPageDataSource,
25
+ CloudConfluenceDataSource,
26
+ CloudJiraDataSource,
27
+ CloudBoxDataSource,
28
+ ]