llama-cloud 0.1.6__py3-none-any.whl → 0.1.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (173) hide show
  1. llama_cloud/__init__.py +140 -6
  2. llama_cloud/client.py +15 -0
  3. llama_cloud/environment.py +1 -1
  4. llama_cloud/resources/__init__.py +15 -0
  5. llama_cloud/{types/token.py → resources/chat_apps/__init__.py} +0 -3
  6. llama_cloud/resources/chat_apps/client.py +630 -0
  7. llama_cloud/resources/data_sinks/client.py +12 -12
  8. llama_cloud/resources/data_sources/client.py +14 -14
  9. llama_cloud/resources/embedding_model_configs/client.py +20 -76
  10. llama_cloud/resources/evals/client.py +26 -36
  11. llama_cloud/resources/extraction/client.py +32 -32
  12. llama_cloud/resources/files/client.py +40 -44
  13. llama_cloud/resources/jobs/__init__.py +2 -0
  14. llama_cloud/resources/jobs/client.py +148 -0
  15. llama_cloud/resources/llama_extract/__init__.py +5 -0
  16. llama_cloud/resources/llama_extract/client.py +1038 -0
  17. llama_cloud/resources/llama_extract/types/__init__.py +6 -0
  18. llama_cloud/resources/llama_extract/types/extract_agent_create_data_schema_value.py +7 -0
  19. llama_cloud/resources/llama_extract/types/extract_agent_update_data_schema_value.py +7 -0
  20. llama_cloud/resources/organizations/client.py +66 -70
  21. llama_cloud/resources/parsing/client.py +448 -428
  22. llama_cloud/resources/pipelines/client.py +256 -344
  23. llama_cloud/resources/projects/client.py +34 -60
  24. llama_cloud/resources/reports/__init__.py +5 -0
  25. llama_cloud/resources/reports/client.py +1198 -0
  26. llama_cloud/resources/reports/types/__init__.py +7 -0
  27. llama_cloud/resources/reports/types/update_report_plan_api_v_1_reports_report_id_plan_patch_request_action.py +25 -0
  28. llama_cloud/resources/retrievers/__init__.py +2 -0
  29. llama_cloud/resources/retrievers/client.py +654 -0
  30. llama_cloud/types/__init__.py +128 -6
  31. llama_cloud/types/{chat_message.py → app_schema_chat_chat_message.py} +3 -3
  32. llama_cloud/types/azure_open_ai_embedding.py +6 -12
  33. llama_cloud/types/base_prompt_template.py +2 -6
  34. llama_cloud/types/bedrock_embedding.py +6 -12
  35. llama_cloud/types/character_splitter.py +2 -4
  36. llama_cloud/types/chat_app.py +44 -0
  37. llama_cloud/types/chat_app_response.py +41 -0
  38. llama_cloud/types/cloud_az_storage_blob_data_source.py +7 -15
  39. llama_cloud/types/cloud_box_data_source.py +6 -12
  40. llama_cloud/types/cloud_confluence_data_source.py +6 -6
  41. llama_cloud/types/cloud_document.py +1 -3
  42. llama_cloud/types/cloud_document_create.py +1 -3
  43. llama_cloud/types/cloud_jira_data_source.py +4 -6
  44. llama_cloud/types/cloud_notion_page_data_source.py +2 -2
  45. llama_cloud/types/cloud_one_drive_data_source.py +3 -5
  46. llama_cloud/types/cloud_postgres_vector_store.py +1 -0
  47. llama_cloud/types/cloud_s_3_data_source.py +4 -8
  48. llama_cloud/types/cloud_sharepoint_data_source.py +6 -8
  49. llama_cloud/types/cloud_slack_data_source.py +6 -6
  50. llama_cloud/types/code_splitter.py +1 -1
  51. llama_cloud/types/cohere_embedding.py +3 -7
  52. llama_cloud/types/composite_retrieval_mode.py +21 -0
  53. llama_cloud/types/composite_retrieval_result.py +38 -0
  54. llama_cloud/types/composite_retrieved_text_node.py +42 -0
  55. llama_cloud/types/data_sink.py +4 -4
  56. llama_cloud/types/data_sink_component.py +20 -0
  57. llama_cloud/types/data_source.py +5 -7
  58. llama_cloud/types/data_source_component.py +28 -0
  59. llama_cloud/types/data_source_create.py +1 -3
  60. llama_cloud/types/edit_suggestion.py +39 -0
  61. llama_cloud/types/embedding_model_config.py +2 -2
  62. llama_cloud/types/embedding_model_config_update.py +2 -4
  63. llama_cloud/types/eval_dataset.py +2 -2
  64. llama_cloud/types/eval_dataset_job_record.py +8 -13
  65. llama_cloud/types/eval_execution_params_override.py +2 -6
  66. llama_cloud/types/eval_question.py +2 -2
  67. llama_cloud/types/extract_agent.py +45 -0
  68. llama_cloud/types/extract_agent_data_schema_value.py +5 -0
  69. llama_cloud/types/extract_config.py +40 -0
  70. llama_cloud/types/extract_job.py +35 -0
  71. llama_cloud/types/extract_job_create.py +40 -0
  72. llama_cloud/types/extract_job_create_data_schema_override_value.py +7 -0
  73. llama_cloud/types/extract_mode.py +17 -0
  74. llama_cloud/types/extract_resultset.py +46 -0
  75. llama_cloud/types/extract_resultset_data.py +11 -0
  76. llama_cloud/types/extract_resultset_data_item_value.py +7 -0
  77. llama_cloud/types/extract_resultset_data_zero_value.py +7 -0
  78. llama_cloud/types/extract_resultset_extraction_metadata_value.py +7 -0
  79. llama_cloud/types/extraction_result.py +2 -2
  80. llama_cloud/types/extraction_schema.py +3 -5
  81. llama_cloud/types/file.py +9 -14
  82. llama_cloud/types/filter_condition.py +9 -1
  83. llama_cloud/types/filter_operator.py +6 -2
  84. llama_cloud/types/gemini_embedding.py +6 -10
  85. llama_cloud/types/hugging_face_inference_api_embedding.py +11 -27
  86. llama_cloud/types/hugging_face_inference_api_embedding_token.py +5 -0
  87. llama_cloud/types/image_block.py +35 -0
  88. llama_cloud/types/input_message.py +2 -4
  89. llama_cloud/types/job_names.py +89 -0
  90. llama_cloud/types/job_record.py +57 -0
  91. llama_cloud/types/job_record_with_usage_metrics.py +36 -0
  92. llama_cloud/types/llama_index_core_base_llms_types_chat_message.py +39 -0
  93. llama_cloud/types/llama_index_core_base_llms_types_chat_message_blocks_item.py +33 -0
  94. llama_cloud/types/llama_parse_parameters.py +4 -0
  95. llama_cloud/types/llm.py +3 -4
  96. llama_cloud/types/llm_model_data.py +1 -0
  97. llama_cloud/types/llm_parameters.py +3 -5
  98. llama_cloud/types/local_eval.py +8 -10
  99. llama_cloud/types/local_eval_results.py +1 -1
  100. llama_cloud/types/managed_ingestion_status.py +4 -0
  101. llama_cloud/types/managed_ingestion_status_response.py +4 -5
  102. llama_cloud/types/markdown_element_node_parser.py +3 -5
  103. llama_cloud/types/markdown_node_parser.py +1 -1
  104. llama_cloud/types/metadata_filter.py +2 -2
  105. llama_cloud/types/metadata_filter_value.py +5 -0
  106. llama_cloud/types/metric_result.py +3 -3
  107. llama_cloud/types/node_parser.py +1 -1
  108. llama_cloud/types/object_type.py +4 -0
  109. llama_cloud/types/open_ai_embedding.py +6 -12
  110. llama_cloud/types/organization.py +7 -2
  111. llama_cloud/types/page_splitter_node_parser.py +2 -2
  112. llama_cloud/types/paginated_jobs_history_with_metrics.py +35 -0
  113. llama_cloud/types/paginated_report_response.py +35 -0
  114. llama_cloud/types/parse_plan_level.py +21 -0
  115. llama_cloud/types/permission.py +3 -3
  116. llama_cloud/types/pipeline.py +7 -17
  117. llama_cloud/types/pipeline_configuration_hashes.py +3 -3
  118. llama_cloud/types/pipeline_create.py +8 -16
  119. llama_cloud/types/pipeline_data_source.py +7 -13
  120. llama_cloud/types/pipeline_data_source_component.py +28 -0
  121. llama_cloud/types/pipeline_data_source_create.py +1 -3
  122. llama_cloud/types/pipeline_deployment.py +4 -4
  123. llama_cloud/types/pipeline_file.py +13 -24
  124. llama_cloud/types/pipeline_file_create.py +1 -3
  125. llama_cloud/types/playground_session.py +4 -4
  126. llama_cloud/types/preset_retrieval_params.py +8 -14
  127. llama_cloud/types/presigned_url.py +1 -3
  128. llama_cloud/types/progress_event.py +44 -0
  129. llama_cloud/types/progress_event_status.py +33 -0
  130. llama_cloud/types/project.py +2 -2
  131. llama_cloud/types/prompt_mixin_prompts.py +1 -1
  132. llama_cloud/types/prompt_spec.py +3 -5
  133. llama_cloud/types/related_node_info.py +2 -2
  134. llama_cloud/types/related_node_info_node_type.py +7 -0
  135. llama_cloud/types/report.py +33 -0
  136. llama_cloud/types/report_block.py +34 -0
  137. llama_cloud/types/report_block_dependency.py +29 -0
  138. llama_cloud/types/report_create_response.py +31 -0
  139. llama_cloud/types/report_event_item.py +40 -0
  140. llama_cloud/types/report_event_item_event_data.py +45 -0
  141. llama_cloud/types/report_event_type.py +37 -0
  142. llama_cloud/types/report_metadata.py +43 -0
  143. llama_cloud/types/report_plan.py +36 -0
  144. llama_cloud/types/report_plan_block.py +36 -0
  145. llama_cloud/types/report_query.py +33 -0
  146. llama_cloud/types/report_response.py +41 -0
  147. llama_cloud/types/report_state.py +37 -0
  148. llama_cloud/types/report_state_event.py +38 -0
  149. llama_cloud/types/report_update_event.py +38 -0
  150. llama_cloud/types/retrieve_results.py +1 -1
  151. llama_cloud/types/retriever.py +45 -0
  152. llama_cloud/types/retriever_create.py +37 -0
  153. llama_cloud/types/retriever_pipeline.py +37 -0
  154. llama_cloud/types/role.py +3 -3
  155. llama_cloud/types/sentence_splitter.py +2 -4
  156. llama_cloud/types/status_enum.py +4 -0
  157. llama_cloud/types/supported_llm_model_names.py +4 -0
  158. llama_cloud/types/text_block.py +31 -0
  159. llama_cloud/types/text_node.py +15 -8
  160. llama_cloud/types/token_text_splitter.py +1 -1
  161. llama_cloud/types/usage_metric_response.py +34 -0
  162. llama_cloud/types/user_job_record.py +32 -0
  163. llama_cloud/types/user_organization.py +5 -9
  164. llama_cloud/types/user_organization_create.py +4 -4
  165. llama_cloud/types/user_organization_delete.py +2 -2
  166. llama_cloud/types/user_organization_role.py +2 -2
  167. llama_cloud/types/vertex_text_embedding.py +5 -9
  168. {llama_cloud-0.1.6.dist-info → llama_cloud-0.1.7.dist-info}/METADATA +2 -1
  169. llama_cloud-0.1.7.dist-info/RECORD +310 -0
  170. llama_cloud/types/value.py +0 -5
  171. llama_cloud-0.1.6.dist-info/RECORD +0 -241
  172. {llama_cloud-0.1.6.dist-info → llama_cloud-0.1.7.dist-info}/LICENSE +0 -0
  173. {llama_cloud-0.1.6.dist-info → llama_cloud-0.1.7.dist-info}/WHEEL +0 -0
@@ -20,12 +20,10 @@ class CloudJiraDataSource(pydantic.BaseModel):
20
20
  """
21
21
 
22
22
  supports_access_control: typing.Optional[bool]
23
- email: typing.Optional[str] = pydantic.Field(description="The email address to use for authentication.")
24
- api_token: typing.Optional[str] = pydantic.Field(
25
- description="The API/ Access Token used for Basic, PAT and OAuth2 authentication."
26
- )
27
- server_url: typing.Optional[str] = pydantic.Field(description="The server url for Jira Cloud.")
28
- cloud_id: typing.Optional[str] = pydantic.Field(description="The cloud ID, used in case of OAuth2.")
23
+ email: typing.Optional[str]
24
+ api_token: typing.Optional[str]
25
+ server_url: typing.Optional[str]
26
+ cloud_id: typing.Optional[str]
29
27
  authentication_mechanism: str = pydantic.Field(description="Type of Authentication for connecting to Jira APIs.")
30
28
  query: str = pydantic.Field(description="JQL (Jira Query Language) query to search.")
31
29
  class_name: typing.Optional[str]
@@ -17,8 +17,8 @@ except ImportError:
17
17
  class CloudNotionPageDataSource(pydantic.BaseModel):
18
18
  supports_access_control: typing.Optional[bool]
19
19
  integration_token: str = pydantic.Field(description="The integration token to use for authentication.")
20
- database_ids: typing.Optional[str] = pydantic.Field(description="The Notion Database Id to read content from.")
21
- page_ids: typing.Optional[str] = pydantic.Field(description="The Page ID's of the Notion to read from.")
20
+ database_ids: typing.Optional[str]
21
+ page_ids: typing.Optional[str]
22
22
  class_name: typing.Optional[str]
23
23
 
24
24
  def json(self, **kwargs: typing.Any) -> str:
@@ -17,14 +17,12 @@ except ImportError:
17
17
  class CloudOneDriveDataSource(pydantic.BaseModel):
18
18
  supports_access_control: typing.Optional[bool]
19
19
  user_principal_name: str = pydantic.Field(description="The user principal name to use for authentication.")
20
- folder_path: typing.Optional[str] = pydantic.Field(description="The path of the OneDrive folder to read from.")
21
- folder_id: typing.Optional[str] = pydantic.Field(description="The ID of the OneDrive folder to read from.")
20
+ folder_path: typing.Optional[str]
21
+ folder_id: typing.Optional[str]
22
22
  client_id: str = pydantic.Field(description="The client ID to use for authentication.")
23
23
  client_secret: str = pydantic.Field(description="The client secret to use for authentication.")
24
24
  tenant_id: str = pydantic.Field(description="The tenant ID to use for authentication.")
25
- required_exts: typing.Optional[typing.List[str]] = pydantic.Field(
26
- description="The list of required file extensions."
27
- )
25
+ required_exts: typing.Optional[typing.List[str]]
28
26
  class_name: typing.Optional[str]
29
27
 
30
28
  def json(self, **kwargs: typing.Any) -> str:
@@ -25,6 +25,7 @@ class CloudPostgresVectorStore(pydantic.BaseModel):
25
25
  schema_name: str
26
26
  embed_dim: int
27
27
  hybrid_search: typing.Optional[bool]
28
+ perform_setup: typing.Optional[bool]
28
29
  class_name: typing.Optional[str]
29
30
 
30
31
  def json(self, **kwargs: typing.Any) -> str:
@@ -17,14 +17,10 @@ except ImportError:
17
17
  class CloudS3DataSource(pydantic.BaseModel):
18
18
  supports_access_control: typing.Optional[bool]
19
19
  bucket: str = pydantic.Field(description="The name of the S3 bucket to read from.")
20
- prefix: typing.Optional[str] = pydantic.Field(description="The prefix of the S3 objects to read from.")
21
- aws_access_id: typing.Optional[str] = pydantic.Field(description="The AWS access ID to use for authentication.")
22
- aws_access_secret: typing.Optional[str] = pydantic.Field(
23
- description="The AWS access secret to use for authentication."
24
- )
25
- s_3_endpoint_url: typing.Optional[str] = pydantic.Field(
26
- alias="s3_endpoint_url", description="The S3 endpoint URL to use for authentication."
27
- )
20
+ prefix: typing.Optional[str]
21
+ aws_access_id: typing.Optional[str]
22
+ aws_access_secret: typing.Optional[str]
23
+ s_3_endpoint_url: typing.Optional[str] = pydantic.Field(alias="s3_endpoint_url")
28
24
  class_name: typing.Optional[str]
29
25
 
30
26
  def json(self, **kwargs: typing.Any) -> str:
@@ -16,17 +16,15 @@ except ImportError:
16
16
 
17
17
  class CloudSharepointDataSource(pydantic.BaseModel):
18
18
  supports_access_control: typing.Optional[bool]
19
- site_name: typing.Optional[str] = pydantic.Field(description="The name of the SharePoint site to download from.")
20
- site_id: typing.Optional[str] = pydantic.Field(description="The ID of the SharePoint site to download from.")
21
- folder_path: typing.Optional[str] = pydantic.Field(description="The path of the Sharepoint folder to read from.")
22
- folder_id: typing.Optional[str] = pydantic.Field(description="The ID of the Sharepoint folder to read from.")
23
- drive_name: typing.Optional[str] = pydantic.Field(description="The name of the Sharepoint drive to read from.")
19
+ site_name: typing.Optional[str]
20
+ site_id: typing.Optional[str]
21
+ folder_path: typing.Optional[str]
22
+ folder_id: typing.Optional[str]
23
+ drive_name: typing.Optional[str]
24
24
  client_id: str = pydantic.Field(description="The client ID to use for authentication.")
25
25
  client_secret: str = pydantic.Field(description="The client secret to use for authentication.")
26
26
  tenant_id: str = pydantic.Field(description="The tenant ID to use for authentication.")
27
- required_exts: typing.Optional[typing.List[str]] = pydantic.Field(
28
- description="The list of required file extensions."
29
- )
27
+ required_exts: typing.Optional[typing.List[str]]
30
28
  class_name: typing.Optional[str]
31
29
 
32
30
  def json(self, **kwargs: typing.Any) -> str:
@@ -17,12 +17,12 @@ except ImportError:
17
17
  class CloudSlackDataSource(pydantic.BaseModel):
18
18
  supports_access_control: typing.Optional[bool]
19
19
  slack_token: str = pydantic.Field(description="Slack Bot Token.")
20
- channel_ids: typing.Optional[str] = pydantic.Field(description="Slack Channel.")
21
- latest_date: typing.Optional[str] = pydantic.Field(description="Latest date.")
22
- earliest_date: typing.Optional[str] = pydantic.Field(description="Earliest date.")
23
- earliest_date_timestamp: typing.Optional[float] = pydantic.Field(description="Earliest date timestamp.")
24
- latest_date_timestamp: typing.Optional[float] = pydantic.Field(description="Latest date timestamp.")
25
- channel_patterns: typing.Optional[str] = pydantic.Field(description="Slack Channel name pattern.")
20
+ channel_ids: typing.Optional[str]
21
+ latest_date: typing.Optional[str]
22
+ earliest_date: typing.Optional[str]
23
+ earliest_date_timestamp: typing.Optional[float]
24
+ latest_date_timestamp: typing.Optional[float]
25
+ channel_patterns: typing.Optional[str]
26
26
  class_name: typing.Optional[str]
27
27
 
28
28
  def json(self, **kwargs: typing.Any) -> str:
@@ -27,7 +27,7 @@ class CodeSplitter(pydantic.BaseModel):
27
27
  )
28
28
  include_prev_next_rel: typing.Optional[bool] = pydantic.Field(description="Include prev/next node relationships.")
29
29
  callback_manager: typing.Optional[typing.Any]
30
- id_func: typing.Optional[str] = pydantic.Field(description="Function to generate node IDs.")
30
+ id_func: typing.Optional[str]
31
31
  language: str = pydantic.Field(description="The programming language of the code being split.")
32
32
  chunk_lines: typing.Optional[int] = pydantic.Field(description="The number of lines to include in each chunk.")
33
33
  chunk_lines_overlap: typing.Optional[int] = pydantic.Field(
@@ -17,14 +17,10 @@ except ImportError:
17
17
  class CohereEmbedding(pydantic.BaseModel):
18
18
  model_name: typing.Optional[str] = pydantic.Field(description="The modelId of the Cohere model to use.")
19
19
  embed_batch_size: typing.Optional[int] = pydantic.Field(description="The batch size for embedding calls.")
20
- num_workers: typing.Optional[int] = pydantic.Field(
21
- description="The number of workers to use for async embedding calls."
22
- )
23
- api_key: typing.Optional[str] = pydantic.Field(description="The Cohere API key.")
20
+ num_workers: typing.Optional[int]
21
+ api_key: typing.Optional[str]
24
22
  truncate: typing.Optional[str] = pydantic.Field(description="Truncation type - START/ END/ NONE")
25
- input_type: typing.Optional[str] = pydantic.Field(
26
- description="Model Input type. If not provided, search_document and search_query are used when needed."
27
- )
23
+ input_type: typing.Optional[str]
28
24
  embedding_type: typing.Optional[str] = pydantic.Field(
29
25
  description="Embedding type. If not provided float embedding_type is used when needed."
30
26
  )
@@ -0,0 +1,21 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+ import typing
5
+
6
+ T_Result = typing.TypeVar("T_Result")
7
+
8
+
9
+ class CompositeRetrievalMode(str, enum.Enum):
10
+ """
11
+ Enum for the mode of composite retrieval.
12
+ """
13
+
14
+ ROUTING = "routing"
15
+ FULL = "full"
16
+
17
+ def visit(self, routing: typing.Callable[[], T_Result], full: typing.Callable[[], T_Result]) -> T_Result:
18
+ if self is CompositeRetrievalMode.ROUTING:
19
+ return routing()
20
+ if self is CompositeRetrievalMode.FULL:
21
+ return full()
@@ -0,0 +1,38 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .composite_retrieved_text_node import CompositeRetrievedTextNode
8
+ from .page_screenshot_node_with_score import PageScreenshotNodeWithScore
9
+
10
+ try:
11
+ import pydantic
12
+ if pydantic.__version__.startswith("1."):
13
+ raise ImportError
14
+ import pydantic.v1 as pydantic # type: ignore
15
+ except ImportError:
16
+ import pydantic # type: ignore
17
+
18
+
19
+ class CompositeRetrievalResult(pydantic.BaseModel):
20
+ nodes: typing.Optional[typing.List[CompositeRetrievedTextNode]] = pydantic.Field(
21
+ description="The retrieved nodes from the composite retrieval."
22
+ )
23
+ image_nodes: typing.Optional[typing.List[PageScreenshotNodeWithScore]] = pydantic.Field(
24
+ description="The image nodes retrieved by the pipeline for the given query."
25
+ )
26
+
27
+ def json(self, **kwargs: typing.Any) -> str:
28
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
29
+ return super().json(**kwargs_with_defaults)
30
+
31
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
32
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
33
+ return super().dict(**kwargs_with_defaults)
34
+
35
+ class Config:
36
+ frozen = True
37
+ smart_union = True
38
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,42 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic
10
+ if pydantic.__version__.startswith("1."):
11
+ raise ImportError
12
+ import pydantic.v1 as pydantic # type: ignore
13
+ except ImportError:
14
+ import pydantic # type: ignore
15
+
16
+
17
+ class CompositeRetrievedTextNode(pydantic.BaseModel):
18
+ id: str = pydantic.Field(description="The ID of the retrieved node.")
19
+ retriever_id: str = pydantic.Field(description="The ID of the retriever this node was retrieved from.")
20
+ retriever_pipeline_name: str = pydantic.Field(
21
+ description="The name of the retrieval pipeline this node was retrieved from."
22
+ )
23
+ pipeline_id: str = pydantic.Field(description="The ID of the pipeline this node was retrieved from.")
24
+ metadata: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(
25
+ description="Metadata associated with the retrieved node."
26
+ )
27
+ text: str = pydantic.Field(description="The text of the retrieved node.")
28
+ start_char_idx: typing.Optional[int]
29
+ end_char_idx: typing.Optional[int]
30
+
31
+ def json(self, **kwargs: typing.Any) -> str:
32
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
33
+ return super().json(**kwargs_with_defaults)
34
+
35
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
36
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
37
+ return super().dict(**kwargs_with_defaults)
38
+
39
+ class Config:
40
+ frozen = True
41
+ smart_union = True
42
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -5,7 +5,7 @@ import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
7
  from .configurable_data_sink_names import ConfigurableDataSinkNames
8
- from .data_sink_create_component import DataSinkCreateComponent
8
+ from .data_sink_component import DataSinkComponent
9
9
 
10
10
  try:
11
11
  import pydantic
@@ -22,11 +22,11 @@ class DataSink(pydantic.BaseModel):
22
22
  """
23
23
 
24
24
  id: str = pydantic.Field(description="Unique identifier")
25
- created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
26
- updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
25
+ created_at: typing.Optional[dt.datetime]
26
+ updated_at: typing.Optional[dt.datetime]
27
27
  name: str = pydantic.Field(description="The name of the data sink.")
28
28
  sink_type: ConfigurableDataSinkNames
29
- component: DataSinkCreateComponent = pydantic.Field(description="Component that implements the data sink")
29
+ component: DataSinkComponent = pydantic.Field(description="Component that implements the data sink")
30
30
  project_id: str
31
31
 
32
32
  def json(self, **kwargs: typing.Any) -> str:
@@ -0,0 +1,20 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ from .cloud_azure_ai_search_vector_store import CloudAzureAiSearchVectorStore
6
+ from .cloud_milvus_vector_store import CloudMilvusVectorStore
7
+ from .cloud_mongo_db_atlas_vector_search import CloudMongoDbAtlasVectorSearch
8
+ from .cloud_pinecone_vector_store import CloudPineconeVectorStore
9
+ from .cloud_postgres_vector_store import CloudPostgresVectorStore
10
+ from .cloud_qdrant_vector_store import CloudQdrantVectorStore
11
+
12
+ DataSinkComponent = typing.Union[
13
+ typing.Dict[str, typing.Any],
14
+ CloudPineconeVectorStore,
15
+ CloudPostgresVectorStore,
16
+ CloudQdrantVectorStore,
17
+ CloudAzureAiSearchVectorStore,
18
+ CloudMongoDbAtlasVectorSearch,
19
+ CloudMilvusVectorStore,
20
+ ]
@@ -5,7 +5,7 @@ import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
7
  from .configurable_data_source_names import ConfigurableDataSourceNames
8
- from .data_source_create_component import DataSourceCreateComponent
8
+ from .data_source_component import DataSourceComponent
9
9
  from .data_source_custom_metadata_value import DataSourceCustomMetadataValue
10
10
 
11
11
  try:
@@ -23,14 +23,12 @@ class DataSource(pydantic.BaseModel):
23
23
  """
24
24
 
25
25
  id: str = pydantic.Field(description="Unique identifier")
26
- created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
27
- updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
26
+ created_at: typing.Optional[dt.datetime]
27
+ updated_at: typing.Optional[dt.datetime]
28
28
  name: str = pydantic.Field(description="The name of the data source.")
29
29
  source_type: ConfigurableDataSourceNames
30
- custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[DataSourceCustomMetadataValue]]] = pydantic.Field(
31
- description="Custom metadata that will be present on all data loaded from the data source"
32
- )
33
- component: DataSourceCreateComponent = pydantic.Field(description="Component that implements the data source")
30
+ custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[DataSourceCustomMetadataValue]]]
31
+ component: DataSourceComponent = pydantic.Field(description="Component that implements the data source")
34
32
  project_id: str
35
33
 
36
34
  def json(self, **kwargs: typing.Any) -> str:
@@ -0,0 +1,28 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ from .cloud_az_storage_blob_data_source import CloudAzStorageBlobDataSource
6
+ from .cloud_box_data_source import CloudBoxDataSource
7
+ from .cloud_confluence_data_source import CloudConfluenceDataSource
8
+ from .cloud_google_drive_data_source import CloudGoogleDriveDataSource
9
+ from .cloud_jira_data_source import CloudJiraDataSource
10
+ from .cloud_notion_page_data_source import CloudNotionPageDataSource
11
+ from .cloud_one_drive_data_source import CloudOneDriveDataSource
12
+ from .cloud_s_3_data_source import CloudS3DataSource
13
+ from .cloud_sharepoint_data_source import CloudSharepointDataSource
14
+ from .cloud_slack_data_source import CloudSlackDataSource
15
+
16
+ DataSourceComponent = typing.Union[
17
+ typing.Dict[str, typing.Any],
18
+ CloudS3DataSource,
19
+ CloudAzStorageBlobDataSource,
20
+ CloudGoogleDriveDataSource,
21
+ CloudOneDriveDataSource,
22
+ CloudSharepointDataSource,
23
+ CloudSlackDataSource,
24
+ CloudNotionPageDataSource,
25
+ CloudConfluenceDataSource,
26
+ CloudJiraDataSource,
27
+ CloudBoxDataSource,
28
+ ]
@@ -24,9 +24,7 @@ class DataSourceCreate(pydantic.BaseModel):
24
24
 
25
25
  name: str = pydantic.Field(description="The name of the data source.")
26
26
  source_type: ConfigurableDataSourceNames
27
- custom_metadata: typing.Optional[
28
- typing.Dict[str, typing.Optional[DataSourceCreateCustomMetadataValue]]
29
- ] = pydantic.Field(description="Custom metadata that will be present on all data loaded from the data source")
27
+ custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[DataSourceCreateCustomMetadataValue]]]
30
28
  component: DataSourceCreateComponent = pydantic.Field(description="Component that implements the data source")
31
29
 
32
30
  def json(self, **kwargs: typing.Any) -> str:
@@ -0,0 +1,39 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .report_block import ReportBlock
8
+
9
+ try:
10
+ import pydantic
11
+ if pydantic.__version__.startswith("1."):
12
+ raise ImportError
13
+ import pydantic.v1 as pydantic # type: ignore
14
+ except ImportError:
15
+ import pydantic # type: ignore
16
+
17
+
18
+ class EditSuggestion(pydantic.BaseModel):
19
+ """
20
+ A suggestion for an edit to a report.
21
+ """
22
+
23
+ justification: str
24
+ start_line: int
25
+ end_line: int
26
+ blocks: typing.List[ReportBlock]
27
+
28
+ def json(self, **kwargs: typing.Any) -> str:
29
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
30
+ return super().json(**kwargs_with_defaults)
31
+
32
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
33
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
34
+ return super().dict(**kwargs_with_defaults)
35
+
36
+ class Config:
37
+ frozen = True
38
+ smart_union = True
39
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -21,8 +21,8 @@ class EmbeddingModelConfig(pydantic.BaseModel):
21
21
  """
22
22
 
23
23
  id: str = pydantic.Field(description="Unique identifier")
24
- created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
25
- updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
24
+ created_at: typing.Optional[dt.datetime]
25
+ updated_at: typing.Optional[dt.datetime]
26
26
  name: str = pydantic.Field(description="The name of the embedding model config.")
27
27
  embedding_config: EmbeddingModelConfigEmbeddingConfig = pydantic.Field(
28
28
  description="The embedding configuration for the embedding model config."
@@ -16,10 +16,8 @@ except ImportError:
16
16
 
17
17
 
18
18
  class EmbeddingModelConfigUpdate(pydantic.BaseModel):
19
- name: typing.Optional[str] = pydantic.Field(description="The name of the embedding model config.")
20
- embedding_config: typing.Optional[EmbeddingModelConfigUpdateEmbeddingConfig] = pydantic.Field(
21
- description="The embedding configuration for the embedding model config."
22
- )
19
+ name: typing.Optional[str]
20
+ embedding_config: typing.Optional[EmbeddingModelConfigUpdateEmbeddingConfig]
23
21
 
24
22
  def json(self, **kwargs: typing.Any) -> str:
25
23
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -21,8 +21,8 @@ class EvalDataset(pydantic.BaseModel):
21
21
  """
22
22
 
23
23
  id: str = pydantic.Field(description="Unique identifier")
24
- created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
25
- updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
24
+ created_at: typing.Optional[dt.datetime]
25
+ updated_at: typing.Optional[dt.datetime]
26
26
  name: str = pydantic.Field(description="The name of the EvalDataset.")
27
27
  project_id: str
28
28
 
@@ -28,27 +28,22 @@ class EvalDatasetJobRecord(pydantic.BaseModel):
28
28
  partitions: typing.Dict[str, str] = pydantic.Field(
29
29
  description="The partitions for this execution. Used for determining where to save job output."
30
30
  )
31
- parameters: typing.Optional[EvalDatasetJobParams] = pydantic.Field(
32
- description="Additional input parameters for the eval execution."
33
- )
34
- session_id: typing.Optional[str] = pydantic.Field(
35
- description="The upstream request ID that created this job. Used for tracking the job across services."
36
- )
37
- correlation_id: typing.Optional[str] = pydantic.Field(
38
- description="The correlation ID for this job. Used for tracking the job across services."
39
- )
40
- parent_job_execution_id: typing.Optional[str] = pydantic.Field(description="The ID of the parent job execution.")
41
- user_id: typing.Optional[str] = pydantic.Field(description="The ID of the user that created this job")
31
+ parameters: typing.Optional[EvalDatasetJobParams]
32
+ session_id: typing.Optional[str]
33
+ correlation_id: typing.Optional[str]
34
+ parent_job_execution_id: typing.Optional[str]
35
+ user_id: typing.Optional[str]
42
36
  created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
37
+ project_id: typing.Optional[str]
43
38
  id: typing.Optional[str] = pydantic.Field(description="Unique identifier")
44
39
  status: StatusEnum
45
40
  error_code: typing.Optional[str]
46
41
  error_message: typing.Optional[str]
47
- attempts: typing.Optional[int] = pydantic.Field(description="The number of times this job has been attempted")
42
+ attempts: typing.Optional[int]
48
43
  started_at: typing.Optional[dt.datetime]
49
44
  ended_at: typing.Optional[dt.datetime]
50
45
  updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
51
- data: typing.Optional[Base] = pydantic.Field(description="Additional metadata for the job execution.")
46
+ data: typing.Optional[Base]
52
47
 
53
48
  def json(self, **kwargs: typing.Any) -> str:
54
49
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -20,12 +20,8 @@ class EvalExecutionParamsOverride(pydantic.BaseModel):
20
20
  Schema for the params override for an eval execution.
21
21
  """
22
22
 
23
- llm_model: typing.Optional[SupportedLlmModelNames] = pydantic.Field(
24
- description="The LLM model to use within eval execution."
25
- )
26
- qa_prompt_tmpl: typing.Optional[str] = pydantic.Field(
27
- description="The template to use for the question answering prompt."
28
- )
23
+ llm_model: typing.Optional[SupportedLlmModelNames]
24
+ qa_prompt_tmpl: typing.Optional[str]
29
25
 
30
26
  def json(self, **kwargs: typing.Any) -> str:
31
27
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -16,8 +16,8 @@ except ImportError:
16
16
 
17
17
  class EvalQuestion(pydantic.BaseModel):
18
18
  id: str = pydantic.Field(description="Unique identifier")
19
- created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
20
- updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
19
+ created_at: typing.Optional[dt.datetime]
20
+ updated_at: typing.Optional[dt.datetime]
21
21
  content: str = pydantic.Field(description="The content of the question.")
22
22
  eval_dataset_id: str
23
23
  eval_dataset_index: int = pydantic.Field(
@@ -0,0 +1,45 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .extract_agent_data_schema_value import ExtractAgentDataSchemaValue
8
+ from .extract_config import ExtractConfig
9
+
10
+ try:
11
+ import pydantic
12
+ if pydantic.__version__.startswith("1."):
13
+ raise ImportError
14
+ import pydantic.v1 as pydantic # type: ignore
15
+ except ImportError:
16
+ import pydantic # type: ignore
17
+
18
+
19
+ class ExtractAgent(pydantic.BaseModel):
20
+ """
21
+ Schema and configuration for creating an extraction agent.
22
+ """
23
+
24
+ id: str = pydantic.Field(description="The id of the extraction agent.")
25
+ name: str = pydantic.Field(description="The name of the extraction agent.")
26
+ project_id: str = pydantic.Field(description="The ID of the project that the extraction agent belongs to.")
27
+ data_schema: typing.Dict[str, typing.Optional[ExtractAgentDataSchemaValue]] = pydantic.Field(
28
+ description="The schema of the data."
29
+ )
30
+ config: ExtractConfig = pydantic.Field(description="The configuration parameters for the extraction agent.")
31
+ created_at: typing.Optional[dt.datetime]
32
+ updated_at: typing.Optional[dt.datetime]
33
+
34
+ def json(self, **kwargs: typing.Any) -> str:
35
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
36
+ return super().json(**kwargs_with_defaults)
37
+
38
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
39
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
40
+ return super().dict(**kwargs_with_defaults)
41
+
42
+ class Config:
43
+ frozen = True
44
+ smart_union = True
45
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,5 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ ExtractAgentDataSchemaValue = typing.Union[typing.Dict[str, typing.Any], typing.List[typing.Any], str, int, float, bool]
@@ -0,0 +1,40 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .extract_mode import ExtractMode
8
+
9
+ try:
10
+ import pydantic
11
+ if pydantic.__version__.startswith("1."):
12
+ raise ImportError
13
+ import pydantic.v1 as pydantic # type: ignore
14
+ except ImportError:
15
+ import pydantic # type: ignore
16
+
17
+
18
+ class ExtractConfig(pydantic.BaseModel):
19
+ """
20
+ Additional parameters for the extraction agent.
21
+ """
22
+
23
+ extraction_mode: typing.Optional[ExtractMode] = pydantic.Field(description="The extraction mode specified.")
24
+ handle_missing: typing.Optional[bool] = pydantic.Field(
25
+ description="Whether to handle missing fields in the schema."
26
+ )
27
+ system_prompt: typing.Optional[str]
28
+
29
+ def json(self, **kwargs: typing.Any) -> str:
30
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
31
+ return super().json(**kwargs_with_defaults)
32
+
33
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
34
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
35
+ return super().dict(**kwargs_with_defaults)
36
+
37
+ class Config:
38
+ frozen = True
39
+ smart_union = True
40
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,35 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .file import File
8
+ from .status_enum import StatusEnum
9
+
10
+ try:
11
+ import pydantic
12
+ if pydantic.__version__.startswith("1."):
13
+ raise ImportError
14
+ import pydantic.v1 as pydantic # type: ignore
15
+ except ImportError:
16
+ import pydantic # type: ignore
17
+
18
+
19
+ class ExtractJob(pydantic.BaseModel):
20
+ id: str = pydantic.Field(description="The id of the extraction job")
21
+ status: StatusEnum = pydantic.Field(description="The status of the extraction job")
22
+ file: File = pydantic.Field(description="The file that the extract was extracted from")
23
+
24
+ def json(self, **kwargs: typing.Any) -> str:
25
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
26
+ return super().json(**kwargs_with_defaults)
27
+
28
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
29
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
30
+ return super().dict(**kwargs_with_defaults)
31
+
32
+ class Config:
33
+ frozen = True
34
+ smart_union = True
35
+ json_encoders = {dt.datetime: serialize_datetime}