llama-cloud 0.1.6__py3-none-any.whl → 0.1.7a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (173) hide show
  1. llama_cloud/__init__.py +140 -6
  2. llama_cloud/client.py +15 -0
  3. llama_cloud/environment.py +1 -1
  4. llama_cloud/resources/__init__.py +15 -0
  5. llama_cloud/{types/token.py → resources/chat_apps/__init__.py} +0 -3
  6. llama_cloud/resources/chat_apps/client.py +620 -0
  7. llama_cloud/resources/data_sinks/client.py +12 -12
  8. llama_cloud/resources/data_sources/client.py +14 -14
  9. llama_cloud/resources/embedding_model_configs/client.py +20 -76
  10. llama_cloud/resources/evals/client.py +26 -36
  11. llama_cloud/resources/extraction/client.py +32 -32
  12. llama_cloud/resources/files/client.py +40 -44
  13. llama_cloud/resources/jobs/__init__.py +2 -0
  14. llama_cloud/resources/jobs/client.py +148 -0
  15. llama_cloud/resources/llama_extract/__init__.py +5 -0
  16. llama_cloud/resources/llama_extract/client.py +1038 -0
  17. llama_cloud/resources/llama_extract/types/__init__.py +6 -0
  18. llama_cloud/resources/llama_extract/types/extract_agent_create_data_schema_value.py +7 -0
  19. llama_cloud/resources/llama_extract/types/extract_agent_update_data_schema_value.py +7 -0
  20. llama_cloud/resources/organizations/client.py +66 -70
  21. llama_cloud/resources/parsing/client.py +448 -428
  22. llama_cloud/resources/pipelines/client.py +256 -344
  23. llama_cloud/resources/projects/client.py +34 -60
  24. llama_cloud/resources/reports/__init__.py +5 -0
  25. llama_cloud/resources/reports/client.py +1198 -0
  26. llama_cloud/resources/reports/types/__init__.py +7 -0
  27. llama_cloud/resources/reports/types/update_report_plan_api_v_1_reports_report_id_plan_patch_request_action.py +25 -0
  28. llama_cloud/resources/retrievers/__init__.py +2 -0
  29. llama_cloud/resources/retrievers/client.py +654 -0
  30. llama_cloud/types/__init__.py +128 -6
  31. llama_cloud/types/{chat_message.py → app_schema_chat_chat_message.py} +3 -3
  32. llama_cloud/types/azure_open_ai_embedding.py +6 -12
  33. llama_cloud/types/base_prompt_template.py +2 -6
  34. llama_cloud/types/bedrock_embedding.py +6 -12
  35. llama_cloud/types/character_splitter.py +2 -4
  36. llama_cloud/types/chat_app.py +44 -0
  37. llama_cloud/types/chat_app_response.py +41 -0
  38. llama_cloud/types/cloud_az_storage_blob_data_source.py +7 -15
  39. llama_cloud/types/cloud_box_data_source.py +6 -12
  40. llama_cloud/types/cloud_confluence_data_source.py +6 -6
  41. llama_cloud/types/cloud_document.py +1 -3
  42. llama_cloud/types/cloud_document_create.py +1 -3
  43. llama_cloud/types/cloud_jira_data_source.py +4 -6
  44. llama_cloud/types/cloud_notion_page_data_source.py +2 -2
  45. llama_cloud/types/cloud_one_drive_data_source.py +3 -5
  46. llama_cloud/types/cloud_postgres_vector_store.py +1 -0
  47. llama_cloud/types/cloud_s_3_data_source.py +4 -8
  48. llama_cloud/types/cloud_sharepoint_data_source.py +6 -8
  49. llama_cloud/types/cloud_slack_data_source.py +6 -6
  50. llama_cloud/types/code_splitter.py +1 -1
  51. llama_cloud/types/cohere_embedding.py +3 -7
  52. llama_cloud/types/composite_retrieval_mode.py +21 -0
  53. llama_cloud/types/composite_retrieval_result.py +38 -0
  54. llama_cloud/types/composite_retrieved_text_node.py +42 -0
  55. llama_cloud/types/data_sink.py +4 -4
  56. llama_cloud/types/data_sink_component.py +20 -0
  57. llama_cloud/types/data_source.py +5 -7
  58. llama_cloud/types/data_source_component.py +28 -0
  59. llama_cloud/types/data_source_create.py +1 -3
  60. llama_cloud/types/edit_suggestion.py +39 -0
  61. llama_cloud/types/embedding_model_config.py +2 -2
  62. llama_cloud/types/embedding_model_config_update.py +2 -4
  63. llama_cloud/types/eval_dataset.py +2 -2
  64. llama_cloud/types/eval_dataset_job_record.py +8 -13
  65. llama_cloud/types/eval_execution_params_override.py +2 -6
  66. llama_cloud/types/eval_question.py +2 -2
  67. llama_cloud/types/extract_agent.py +45 -0
  68. llama_cloud/types/extract_agent_data_schema_value.py +5 -0
  69. llama_cloud/types/extract_config.py +40 -0
  70. llama_cloud/types/extract_job.py +35 -0
  71. llama_cloud/types/extract_job_create.py +40 -0
  72. llama_cloud/types/extract_job_create_data_schema_override_value.py +7 -0
  73. llama_cloud/types/extract_mode.py +17 -0
  74. llama_cloud/types/extract_resultset.py +46 -0
  75. llama_cloud/types/extract_resultset_data.py +11 -0
  76. llama_cloud/types/extract_resultset_data_item_value.py +7 -0
  77. llama_cloud/types/extract_resultset_data_zero_value.py +7 -0
  78. llama_cloud/types/extract_resultset_extraction_metadata_value.py +7 -0
  79. llama_cloud/types/extraction_result.py +2 -2
  80. llama_cloud/types/extraction_schema.py +3 -5
  81. llama_cloud/types/file.py +9 -14
  82. llama_cloud/types/filter_condition.py +9 -1
  83. llama_cloud/types/filter_operator.py +6 -2
  84. llama_cloud/types/gemini_embedding.py +6 -10
  85. llama_cloud/types/hugging_face_inference_api_embedding.py +11 -27
  86. llama_cloud/types/hugging_face_inference_api_embedding_token.py +5 -0
  87. llama_cloud/types/image_block.py +35 -0
  88. llama_cloud/types/input_message.py +2 -4
  89. llama_cloud/types/job_names.py +89 -0
  90. llama_cloud/types/job_record.py +57 -0
  91. llama_cloud/types/job_record_with_usage_metrics.py +36 -0
  92. llama_cloud/types/llama_index_core_base_llms_types_chat_message.py +39 -0
  93. llama_cloud/types/llama_index_core_base_llms_types_chat_message_blocks_item.py +33 -0
  94. llama_cloud/types/llama_parse_parameters.py +4 -0
  95. llama_cloud/types/llm.py +3 -4
  96. llama_cloud/types/llm_model_data.py +1 -0
  97. llama_cloud/types/llm_parameters.py +3 -5
  98. llama_cloud/types/local_eval.py +8 -10
  99. llama_cloud/types/local_eval_results.py +1 -1
  100. llama_cloud/types/managed_ingestion_status.py +4 -0
  101. llama_cloud/types/managed_ingestion_status_response.py +4 -5
  102. llama_cloud/types/markdown_element_node_parser.py +3 -5
  103. llama_cloud/types/markdown_node_parser.py +1 -1
  104. llama_cloud/types/metadata_filter.py +2 -2
  105. llama_cloud/types/metadata_filter_value.py +5 -0
  106. llama_cloud/types/metric_result.py +3 -3
  107. llama_cloud/types/node_parser.py +1 -1
  108. llama_cloud/types/object_type.py +4 -0
  109. llama_cloud/types/open_ai_embedding.py +6 -12
  110. llama_cloud/types/organization.py +7 -2
  111. llama_cloud/types/page_splitter_node_parser.py +2 -2
  112. llama_cloud/types/paginated_jobs_history_with_metrics.py +35 -0
  113. llama_cloud/types/paginated_report_response.py +35 -0
  114. llama_cloud/types/parse_plan_level.py +21 -0
  115. llama_cloud/types/permission.py +3 -3
  116. llama_cloud/types/pipeline.py +7 -17
  117. llama_cloud/types/pipeline_configuration_hashes.py +3 -3
  118. llama_cloud/types/pipeline_create.py +8 -16
  119. llama_cloud/types/pipeline_data_source.py +7 -13
  120. llama_cloud/types/pipeline_data_source_component.py +28 -0
  121. llama_cloud/types/pipeline_data_source_create.py +1 -3
  122. llama_cloud/types/pipeline_deployment.py +4 -4
  123. llama_cloud/types/pipeline_file.py +13 -24
  124. llama_cloud/types/pipeline_file_create.py +1 -3
  125. llama_cloud/types/playground_session.py +4 -4
  126. llama_cloud/types/preset_retrieval_params.py +8 -14
  127. llama_cloud/types/presigned_url.py +1 -3
  128. llama_cloud/types/progress_event.py +44 -0
  129. llama_cloud/types/progress_event_status.py +33 -0
  130. llama_cloud/types/project.py +2 -2
  131. llama_cloud/types/prompt_mixin_prompts.py +1 -1
  132. llama_cloud/types/prompt_spec.py +3 -5
  133. llama_cloud/types/related_node_info.py +2 -2
  134. llama_cloud/types/related_node_info_node_type.py +7 -0
  135. llama_cloud/types/report.py +33 -0
  136. llama_cloud/types/report_block.py +34 -0
  137. llama_cloud/types/report_block_dependency.py +29 -0
  138. llama_cloud/types/report_create_response.py +31 -0
  139. llama_cloud/types/report_event_item.py +40 -0
  140. llama_cloud/types/report_event_item_event_data.py +45 -0
  141. llama_cloud/types/report_event_type.py +37 -0
  142. llama_cloud/types/report_metadata.py +39 -0
  143. llama_cloud/types/report_plan.py +36 -0
  144. llama_cloud/types/report_plan_block.py +36 -0
  145. llama_cloud/types/report_query.py +33 -0
  146. llama_cloud/types/report_response.py +41 -0
  147. llama_cloud/types/report_state.py +37 -0
  148. llama_cloud/types/report_state_event.py +38 -0
  149. llama_cloud/types/report_update_event.py +38 -0
  150. llama_cloud/types/retrieve_results.py +1 -1
  151. llama_cloud/types/retriever.py +45 -0
  152. llama_cloud/types/retriever_create.py +37 -0
  153. llama_cloud/types/retriever_pipeline.py +37 -0
  154. llama_cloud/types/role.py +3 -3
  155. llama_cloud/types/sentence_splitter.py +2 -4
  156. llama_cloud/types/status_enum.py +4 -0
  157. llama_cloud/types/supported_llm_model_names.py +4 -0
  158. llama_cloud/types/text_block.py +31 -0
  159. llama_cloud/types/text_node.py +15 -8
  160. llama_cloud/types/token_text_splitter.py +1 -1
  161. llama_cloud/types/usage_metric_response.py +34 -0
  162. llama_cloud/types/user_job_record.py +32 -0
  163. llama_cloud/types/user_organization.py +5 -9
  164. llama_cloud/types/user_organization_create.py +4 -4
  165. llama_cloud/types/user_organization_delete.py +2 -2
  166. llama_cloud/types/user_organization_role.py +2 -2
  167. llama_cloud/types/vertex_text_embedding.py +5 -9
  168. {llama_cloud-0.1.6.dist-info → llama_cloud-0.1.7a1.dist-info}/METADATA +2 -1
  169. llama_cloud-0.1.7a1.dist-info/RECORD +310 -0
  170. llama_cloud/types/value.py +0 -5
  171. llama_cloud-0.1.6.dist-info/RECORD +0 -241
  172. {llama_cloud-0.1.6.dist-info → llama_cloud-0.1.7a1.dist-info}/LICENSE +0 -0
  173. {llama_cloud-0.1.6.dist-info → llama_cloud-0.1.7a1.dist-info}/WHEEL +0 -0
@@ -0,0 +1,40 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .extract_config import ExtractConfig
8
+ from .extract_job_create_data_schema_override_value import ExtractJobCreateDataSchemaOverrideValue
9
+
10
+ try:
11
+ import pydantic
12
+ if pydantic.__version__.startswith("1."):
13
+ raise ImportError
14
+ import pydantic.v1 as pydantic # type: ignore
15
+ except ImportError:
16
+ import pydantic # type: ignore
17
+
18
+
19
+ class ExtractJobCreate(pydantic.BaseModel):
20
+ """
21
+ Schema for creating an extraction job.
22
+ """
23
+
24
+ extraction_agent_id: str = pydantic.Field(description="The id of the extraction agent")
25
+ file_id: str = pydantic.Field(description="The id of the file")
26
+ data_schema_override: typing.Optional[typing.Dict[str, typing.Optional[ExtractJobCreateDataSchemaOverrideValue]]]
27
+ config_override: typing.Optional[ExtractConfig]
28
+
29
+ def json(self, **kwargs: typing.Any) -> str:
30
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
31
+ return super().json(**kwargs_with_defaults)
32
+
33
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
34
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
35
+ return super().dict(**kwargs_with_defaults)
36
+
37
+ class Config:
38
+ frozen = True
39
+ smart_union = True
40
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,7 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ ExtractJobCreateDataSchemaOverrideValue = typing.Union[
6
+ typing.Dict[str, typing.Any], typing.List[typing.Any], str, int, float, bool
7
+ ]
@@ -0,0 +1,17 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+ import typing
5
+
6
+ T_Result = typing.TypeVar("T_Result")
7
+
8
+
9
+ class ExtractMode(str, enum.Enum):
10
+ PER_DOC = "PER_DOC"
11
+ PER_PAGE = "PER_PAGE"
12
+
13
+ def visit(self, per_doc: typing.Callable[[], T_Result], per_page: typing.Callable[[], T_Result]) -> T_Result:
14
+ if self is ExtractMode.PER_DOC:
15
+ return per_doc()
16
+ if self is ExtractMode.PER_PAGE:
17
+ return per_page()
@@ -0,0 +1,46 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .extract_resultset_data import ExtractResultsetData
8
+ from .extract_resultset_extraction_metadata_value import ExtractResultsetExtractionMetadataValue
9
+ from .file import File
10
+
11
+ try:
12
+ import pydantic
13
+ if pydantic.__version__.startswith("1."):
14
+ raise ImportError
15
+ import pydantic.v1 as pydantic # type: ignore
16
+ except ImportError:
17
+ import pydantic # type: ignore
18
+
19
+
20
+ class ExtractResultset(pydantic.BaseModel):
21
+ """
22
+ Schema for an extraction result.
23
+ """
24
+
25
+ id: str = pydantic.Field(description="Unique identifier")
26
+ created_at: typing.Optional[dt.datetime]
27
+ updated_at: typing.Optional[dt.datetime]
28
+ extraction_agent_id: str = pydantic.Field(description="The id of the extraction agent")
29
+ data: typing.Optional[ExtractResultsetData] = pydantic.Field(description="The data extracted from the file")
30
+ extraction_metadata: typing.Dict[str, typing.Optional[ExtractResultsetExtractionMetadataValue]] = pydantic.Field(
31
+ description="The metadata extracted from the file"
32
+ )
33
+ file: File = pydantic.Field(description="The file that the extract was extracted from")
34
+
35
+ def json(self, **kwargs: typing.Any) -> str:
36
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
37
+ return super().json(**kwargs_with_defaults)
38
+
39
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
40
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
41
+ return super().dict(**kwargs_with_defaults)
42
+
43
+ class Config:
44
+ frozen = True
45
+ smart_union = True
46
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,11 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ from .extract_resultset_data_item_value import ExtractResultsetDataItemValue
6
+ from .extract_resultset_data_zero_value import ExtractResultsetDataZeroValue
7
+
8
+ ExtractResultsetData = typing.Union[
9
+ typing.Dict[str, typing.Optional[ExtractResultsetDataZeroValue]],
10
+ typing.List[typing.Dict[str, typing.Optional[ExtractResultsetDataItemValue]]],
11
+ ]
@@ -0,0 +1,7 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ ExtractResultsetDataItemValue = typing.Union[
6
+ typing.Dict[str, typing.Any], typing.List[typing.Any], str, int, float, bool
7
+ ]
@@ -0,0 +1,7 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ ExtractResultsetDataZeroValue = typing.Union[
6
+ typing.Dict[str, typing.Any], typing.List[typing.Any], str, int, float, bool
7
+ ]
@@ -0,0 +1,7 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ ExtractResultsetExtractionMetadataValue = typing.Union[
6
+ typing.Dict[str, typing.Any], typing.List[typing.Any], str, int, float, bool
7
+ ]
@@ -22,8 +22,8 @@ class ExtractionResult(pydantic.BaseModel):
22
22
  """
23
23
 
24
24
  id: str = pydantic.Field(description="Unique identifier")
25
- created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
26
- updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
25
+ created_at: typing.Optional[dt.datetime]
26
+ updated_at: typing.Optional[dt.datetime]
27
27
  schema_id: str = pydantic.Field(description="The id of the schema")
28
28
  data: typing.Dict[str, typing.Optional[ExtractionResultDataValue]] = pydantic.Field(
29
29
  description="The data extracted from the file"
@@ -21,13 +21,11 @@ class ExtractionSchema(pydantic.BaseModel):
21
21
  """
22
22
 
23
23
  id: str = pydantic.Field(description="Unique identifier")
24
- created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
25
- updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
24
+ created_at: typing.Optional[dt.datetime]
25
+ updated_at: typing.Optional[dt.datetime]
26
26
  name: str = pydantic.Field(description="The name of the extraction schema")
27
27
  project_id: str = pydantic.Field(description="The ID of the project that the extraction schema belongs to")
28
- data_schema: typing.Optional[typing.Dict[str, typing.Optional[ExtractionSchemaDataSchemaValue]]] = pydantic.Field(
29
- description="The schema of the data"
30
- )
28
+ data_schema: typing.Optional[typing.Dict[str, typing.Optional[ExtractionSchemaDataSchemaValue]]]
31
29
 
32
30
  def json(self, **kwargs: typing.Any) -> str:
33
31
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
llama_cloud/types/file.py CHANGED
@@ -22,22 +22,17 @@ class File(pydantic.BaseModel):
22
22
  """
23
23
 
24
24
  id: str = pydantic.Field(description="Unique identifier")
25
- created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
26
- updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
25
+ created_at: typing.Optional[dt.datetime]
26
+ updated_at: typing.Optional[dt.datetime]
27
27
  name: str
28
- file_size: typing.Optional[int] = pydantic.Field(description="Size of the file in bytes")
29
- file_type: typing.Optional[str] = pydantic.Field(description="File type (e.g. pdf, docx, etc.)")
28
+ external_file_id: str = pydantic.Field(description="The ID of the file in the external system")
29
+ file_size: typing.Optional[int]
30
+ file_type: typing.Optional[str]
30
31
  project_id: str = pydantic.Field(description="The ID of the project that the file belongs to")
31
- last_modified_at: typing.Optional[dt.datetime] = pydantic.Field(description="The last modified time of the file")
32
- resource_info: typing.Optional[typing.Dict[str, typing.Optional[FileResourceInfoValue]]] = pydantic.Field(
33
- description="Resource information for the file"
34
- )
35
- permission_info: typing.Optional[typing.Dict[str, typing.Optional[FilePermissionInfoValue]]] = pydantic.Field(
36
- description="Permission information for the file"
37
- )
38
- data_source_id: typing.Optional[str] = pydantic.Field(
39
- description="The ID of the data source that the file belongs to"
40
- )
32
+ last_modified_at: typing.Optional[dt.datetime]
33
+ resource_info: typing.Optional[typing.Dict[str, typing.Optional[FileResourceInfoValue]]]
34
+ permission_info: typing.Optional[typing.Dict[str, typing.Optional[FilePermissionInfoValue]]]
35
+ data_source_id: typing.Optional[str]
41
36
 
42
37
  def json(self, **kwargs: typing.Any) -> str:
43
38
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -13,9 +13,17 @@ class FilterCondition(str, enum.Enum):
13
13
 
14
14
  AND = "and"
15
15
  OR = "or"
16
+ NOT = "not"
16
17
 
17
- def visit(self, and_: typing.Callable[[], T_Result], or_: typing.Callable[[], T_Result]) -> T_Result:
18
+ def visit(
19
+ self,
20
+ and_: typing.Callable[[], T_Result],
21
+ or_: typing.Callable[[], T_Result],
22
+ not_: typing.Callable[[], T_Result],
23
+ ) -> T_Result:
18
24
  if self is FilterCondition.AND:
19
25
  return and_()
20
26
  if self is FilterCondition.OR:
21
27
  return or_()
28
+ if self is FilterCondition.NOT:
29
+ return not_()
@@ -22,6 +22,7 @@ class FilterOperator(str, enum.Enum):
22
22
  ANY = "any"
23
23
  ALL = "all"
24
24
  TEXT_MATCH = "text_match"
25
+ TEXT_MATCH_INSENSITIVE = "text_match_insensitive"
25
26
  CONTAINS = "contains"
26
27
  IS_EMPTY = "is_empty"
27
28
 
@@ -36,8 +37,9 @@ class FilterOperator(str, enum.Enum):
36
37
  in_: typing.Callable[[], T_Result],
37
38
  nin: typing.Callable[[], T_Result],
38
39
  any: typing.Callable[[], T_Result],
39
- all_: typing.Callable[[], T_Result],
40
+ all: typing.Callable[[], T_Result],
40
41
  text_match: typing.Callable[[], T_Result],
42
+ text_match_insensitive: typing.Callable[[], T_Result],
41
43
  contains: typing.Callable[[], T_Result],
42
44
  is_empty: typing.Callable[[], T_Result],
43
45
  ) -> T_Result:
@@ -60,9 +62,11 @@ class FilterOperator(str, enum.Enum):
60
62
  if self is FilterOperator.ANY:
61
63
  return any()
62
64
  if self is FilterOperator.ALL:
63
- return all_()
65
+ return all()
64
66
  if self is FilterOperator.TEXT_MATCH:
65
67
  return text_match()
68
+ if self is FilterOperator.TEXT_MATCH_INSENSITIVE:
69
+ return text_match_insensitive()
66
70
  if self is FilterOperator.CONTAINS:
67
71
  return contains()
68
72
  if self is FilterOperator.IS_EMPTY:
@@ -17,16 +17,12 @@ except ImportError:
17
17
  class GeminiEmbedding(pydantic.BaseModel):
18
18
  model_name: typing.Optional[str] = pydantic.Field(description="The modelId of the Gemini model to use.")
19
19
  embed_batch_size: typing.Optional[int] = pydantic.Field(description="The batch size for embedding calls.")
20
- num_workers: typing.Optional[int] = pydantic.Field(
21
- description="The number of workers to use for async embedding calls."
22
- )
23
- title: typing.Optional[str] = pydantic.Field(
24
- description="Title is only applicable for retrieval_document tasks, and is used to represent a document title. For other tasks, title is invalid."
25
- )
26
- task_type: typing.Optional[str] = pydantic.Field(description="The task for embedding model.")
27
- api_key: typing.Optional[str] = pydantic.Field(description="API key to access the model. Defaults to None.")
28
- api_base: typing.Optional[str] = pydantic.Field(description="API base to access the model. Defaults to None.")
29
- transport: typing.Optional[str] = pydantic.Field(description="Transport to access the model. Defaults to None.")
20
+ num_workers: typing.Optional[int]
21
+ title: typing.Optional[str]
22
+ task_type: typing.Optional[str]
23
+ api_key: typing.Optional[str]
24
+ api_base: typing.Optional[str]
25
+ transport: typing.Optional[str]
30
26
  class_name: typing.Optional[str]
31
27
 
32
28
  def json(self, **kwargs: typing.Any) -> str:
@@ -4,8 +4,8 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
+ from .hugging_face_inference_api_embedding_token import HuggingFaceInferenceApiEmbeddingToken
7
8
  from .pooling import Pooling
8
- from .token import Token
9
9
 
10
10
  try:
11
11
  import pydantic
@@ -17,35 +17,19 @@ except ImportError:
17
17
 
18
18
 
19
19
  class HuggingFaceInferenceApiEmbedding(pydantic.BaseModel):
20
- model_name: typing.Optional[str] = pydantic.Field(
21
- description="Hugging Face model name. If None, the task will be used."
22
- )
20
+ model_name: typing.Optional[str]
23
21
  embed_batch_size: typing.Optional[int] = pydantic.Field(description="The batch size for embedding calls.")
24
- num_workers: typing.Optional[int] = pydantic.Field(
25
- description="The number of workers to use for async embedding calls."
26
- )
27
- pooling: typing.Optional[Pooling] = pydantic.Field(
28
- description="Pooling strategy. If None, the model's default pooling is used."
29
- )
30
- query_instruction: typing.Optional[str] = pydantic.Field(
31
- description="Instruction to prepend during query embedding."
32
- )
33
- text_instruction: typing.Optional[str] = pydantic.Field(description="Instruction to prepend during text embedding.")
34
- token: typing.Optional[Token] = pydantic.Field(
22
+ num_workers: typing.Optional[int]
23
+ pooling: typing.Optional[Pooling]
24
+ query_instruction: typing.Optional[str]
25
+ text_instruction: typing.Optional[str]
26
+ token: typing.Optional[HuggingFaceInferenceApiEmbeddingToken] = pydantic.Field(
35
27
  description="Hugging Face token. Will default to the locally saved token. Pass token=False if you don’t want to send your token to the server."
36
28
  )
37
- timeout: typing.Optional[float] = pydantic.Field(
38
- description="The maximum number of seconds to wait for a response from the server. Loading a new model in Inference API can take up to several minutes. Defaults to None, meaning it will loop until the server is available."
39
- )
40
- headers: typing.Optional[typing.Dict[str, typing.Optional[str]]] = pydantic.Field(
41
- description="Additional headers to send to the server. By default only the authorization and user-agent headers are sent. Values in this dictionary will override the default values."
42
- )
43
- cookies: typing.Optional[typing.Dict[str, typing.Optional[str]]] = pydantic.Field(
44
- description="Additional cookies to send to the server."
45
- )
46
- task: typing.Optional[str] = pydantic.Field(
47
- description="Optional task to pick Hugging Face's recommended model, used when model_name is left as default of None."
48
- )
29
+ timeout: typing.Optional[float]
30
+ headers: typing.Optional[typing.Dict[str, typing.Optional[str]]]
31
+ cookies: typing.Optional[typing.Dict[str, typing.Optional[str]]]
32
+ task: typing.Optional[str]
49
33
  class_name: typing.Optional[str]
50
34
 
51
35
  def json(self, **kwargs: typing.Any) -> str:
@@ -0,0 +1,5 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ HuggingFaceInferenceApiEmbeddingToken = typing.Union[str, bool]
@@ -0,0 +1,35 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic
10
+ if pydantic.__version__.startswith("1."):
11
+ raise ImportError
12
+ import pydantic.v1 as pydantic # type: ignore
13
+ except ImportError:
14
+ import pydantic # type: ignore
15
+
16
+
17
+ class ImageBlock(pydantic.BaseModel):
18
+ image: typing.Optional[str]
19
+ path: typing.Optional[str]
20
+ url: typing.Optional[str]
21
+ image_mimetype: typing.Optional[str]
22
+ detail: typing.Optional[str]
23
+
24
+ def json(self, **kwargs: typing.Any) -> str:
25
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
26
+ return super().json(**kwargs_with_defaults)
27
+
28
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
29
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
30
+ return super().dict(**kwargs_with_defaults)
31
+
32
+ class Config:
33
+ frozen = True
34
+ smart_union = True
35
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -20,12 +20,10 @@ class InputMessage(pydantic.BaseModel):
20
20
  This is distinct from a ChatMessage because this schema is enforced by the AI Chat library used in the frontend
21
21
  """
22
22
 
23
- id: str = pydantic.Field(description="ID of the message, if any. a UUID.")
23
+ id: typing.Optional[str] = pydantic.Field(description="ID of the message, if any. a UUID.")
24
24
  role: MessageRole
25
25
  content: str
26
- data: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(
27
- description="Additional data to be stored with the message."
28
- )
26
+ data: typing.Optional[typing.Dict[str, typing.Any]]
29
27
  class_name: typing.Optional[str]
30
28
 
31
29
  def json(self, **kwargs: typing.Any) -> str:
@@ -0,0 +1,89 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+ import typing
5
+
6
+ T_Result = typing.TypeVar("T_Result")
7
+
8
+
9
+ class JobNames(str, enum.Enum):
10
+ """
11
+ Enum for executable pipeline job names.
12
+ """
13
+
14
+ LOAD_DOCUMENTS_JOB = "load_documents_job"
15
+ LOAD_FILES_JOB = "load_files_job"
16
+ PLAYGROUND_JOB = "playground_job"
17
+ EVAL_DATASET_JOB = "eval_dataset_job"
18
+ PIPELINE_MANAGED_INGESTION_JOB = "pipeline_managed_ingestion_job"
19
+ DATA_SOURCE_MANAGED_INGESTION_JOB = "data_source_managed_ingestion_job"
20
+ DATA_SOURCE_UPDATE_DISPATCHER_JOB = "data_source_update_dispatcher_job"
21
+ PIPELINE_FILE_UPDATE_DISPATCHER_JOB = "pipeline_file_update_dispatcher_job"
22
+ PIPELINE_FILE_UPDATER_JOB = "pipeline_file_updater_job"
23
+ FILE_MANAGED_INGESTION_JOB = "file_managed_ingestion_job"
24
+ DOCUMENT_INGESTION_JOB = "document_ingestion_job"
25
+ PARSE_RAW_FILE_JOB = "parse_raw_file_job"
26
+ LLAMA_PARSE_TRANSFORM_JOB = "llama_parse_transform_job"
27
+ METADATA_UPDATE_JOB = "metadata_update_job"
28
+ PARSE_RAW_FILE_JOB_CACHED = "parse_raw_file_job_cached"
29
+ EXTRACTION_JOB = "extraction_job"
30
+ EXTRACT_JOB = "extract_job"
31
+ ASYNCIO_TEST_JOB = "asyncio_test_job"
32
+
33
+ def visit(
34
+ self,
35
+ load_documents_job: typing.Callable[[], T_Result],
36
+ load_files_job: typing.Callable[[], T_Result],
37
+ playground_job: typing.Callable[[], T_Result],
38
+ eval_dataset_job: typing.Callable[[], T_Result],
39
+ pipeline_managed_ingestion_job: typing.Callable[[], T_Result],
40
+ data_source_managed_ingestion_job: typing.Callable[[], T_Result],
41
+ data_source_update_dispatcher_job: typing.Callable[[], T_Result],
42
+ pipeline_file_update_dispatcher_job: typing.Callable[[], T_Result],
43
+ pipeline_file_updater_job: typing.Callable[[], T_Result],
44
+ file_managed_ingestion_job: typing.Callable[[], T_Result],
45
+ document_ingestion_job: typing.Callable[[], T_Result],
46
+ parse_raw_file_job: typing.Callable[[], T_Result],
47
+ llama_parse_transform_job: typing.Callable[[], T_Result],
48
+ metadata_update_job: typing.Callable[[], T_Result],
49
+ parse_raw_file_job_cached: typing.Callable[[], T_Result],
50
+ extraction_job: typing.Callable[[], T_Result],
51
+ extract_job: typing.Callable[[], T_Result],
52
+ asyncio_test_job: typing.Callable[[], T_Result],
53
+ ) -> T_Result:
54
+ if self is JobNames.LOAD_DOCUMENTS_JOB:
55
+ return load_documents_job()
56
+ if self is JobNames.LOAD_FILES_JOB:
57
+ return load_files_job()
58
+ if self is JobNames.PLAYGROUND_JOB:
59
+ return playground_job()
60
+ if self is JobNames.EVAL_DATASET_JOB:
61
+ return eval_dataset_job()
62
+ if self is JobNames.PIPELINE_MANAGED_INGESTION_JOB:
63
+ return pipeline_managed_ingestion_job()
64
+ if self is JobNames.DATA_SOURCE_MANAGED_INGESTION_JOB:
65
+ return data_source_managed_ingestion_job()
66
+ if self is JobNames.DATA_SOURCE_UPDATE_DISPATCHER_JOB:
67
+ return data_source_update_dispatcher_job()
68
+ if self is JobNames.PIPELINE_FILE_UPDATE_DISPATCHER_JOB:
69
+ return pipeline_file_update_dispatcher_job()
70
+ if self is JobNames.PIPELINE_FILE_UPDATER_JOB:
71
+ return pipeline_file_updater_job()
72
+ if self is JobNames.FILE_MANAGED_INGESTION_JOB:
73
+ return file_managed_ingestion_job()
74
+ if self is JobNames.DOCUMENT_INGESTION_JOB:
75
+ return document_ingestion_job()
76
+ if self is JobNames.PARSE_RAW_FILE_JOB:
77
+ return parse_raw_file_job()
78
+ if self is JobNames.LLAMA_PARSE_TRANSFORM_JOB:
79
+ return llama_parse_transform_job()
80
+ if self is JobNames.METADATA_UPDATE_JOB:
81
+ return metadata_update_job()
82
+ if self is JobNames.PARSE_RAW_FILE_JOB_CACHED:
83
+ return parse_raw_file_job_cached()
84
+ if self is JobNames.EXTRACTION_JOB:
85
+ return extraction_job()
86
+ if self is JobNames.EXTRACT_JOB:
87
+ return extract_job()
88
+ if self is JobNames.ASYNCIO_TEST_JOB:
89
+ return asyncio_test_job()
@@ -0,0 +1,57 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .base import Base
8
+ from .job_names import JobNames
9
+ from .status_enum import StatusEnum
10
+
11
+ try:
12
+ import pydantic
13
+ if pydantic.__version__.startswith("1."):
14
+ raise ImportError
15
+ import pydantic.v1 as pydantic # type: ignore
16
+ except ImportError:
17
+ import pydantic # type: ignore
18
+
19
+
20
+ class JobRecord(pydantic.BaseModel):
21
+ """
22
+ Schema for a job's metadata.
23
+ """
24
+
25
+ job_name: JobNames = pydantic.Field(description="The name of the job.")
26
+ partitions: typing.Dict[str, str] = pydantic.Field(
27
+ description="The partitions for this execution. Used for determining where to save job output."
28
+ )
29
+ parameters: typing.Optional[Base]
30
+ session_id: typing.Optional[str]
31
+ correlation_id: typing.Optional[str]
32
+ parent_job_execution_id: typing.Optional[str]
33
+ user_id: typing.Optional[str]
34
+ created_at: dt.datetime = pydantic.Field(description="Creation datetime")
35
+ project_id: typing.Optional[str]
36
+ id: typing.Optional[str] = pydantic.Field(description="Unique identifier")
37
+ status: StatusEnum
38
+ error_code: typing.Optional[str]
39
+ error_message: typing.Optional[str]
40
+ attempts: typing.Optional[int]
41
+ started_at: typing.Optional[dt.datetime]
42
+ ended_at: typing.Optional[dt.datetime]
43
+ updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
44
+ data: typing.Optional[Base]
45
+
46
+ def json(self, **kwargs: typing.Any) -> str:
47
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
48
+ return super().json(**kwargs_with_defaults)
49
+
50
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
51
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
52
+ return super().dict(**kwargs_with_defaults)
53
+
54
+ class Config:
55
+ frozen = True
56
+ smart_union = True
57
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,36 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .job_record import JobRecord
8
+ from .usage_metric_response import UsageMetricResponse
9
+ from .user_job_record import UserJobRecord
10
+
11
+ try:
12
+ import pydantic
13
+ if pydantic.__version__.startswith("1."):
14
+ raise ImportError
15
+ import pydantic.v1 as pydantic # type: ignore
16
+ except ImportError:
17
+ import pydantic # type: ignore
18
+
19
+
20
+ class JobRecordWithUsageMetrics(pydantic.BaseModel):
21
+ job_record: JobRecord
22
+ usage_metrics: typing.Optional[UsageMetricResponse]
23
+ user: UserJobRecord
24
+
25
+ def json(self, **kwargs: typing.Any) -> str:
26
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
27
+ return super().json(**kwargs_with_defaults)
28
+
29
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
30
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
31
+ return super().dict(**kwargs_with_defaults)
32
+
33
+ class Config:
34
+ frozen = True
35
+ smart_union = True
36
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,39 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .llama_index_core_base_llms_types_chat_message_blocks_item import LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem
8
+ from .message_role import MessageRole
9
+
10
+ try:
11
+ import pydantic
12
+ if pydantic.__version__.startswith("1."):
13
+ raise ImportError
14
+ import pydantic.v1 as pydantic # type: ignore
15
+ except ImportError:
16
+ import pydantic # type: ignore
17
+
18
+
19
+ class LlamaIndexCoreBaseLlmsTypesChatMessage(pydantic.BaseModel):
20
+ """
21
+ Chat message.
22
+ """
23
+
24
+ role: typing.Optional[MessageRole]
25
+ additional_kwargs: typing.Optional[typing.Dict[str, typing.Any]]
26
+ blocks: typing.Optional[typing.List[LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem]]
27
+
28
+ def json(self, **kwargs: typing.Any) -> str:
29
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
30
+ return super().json(**kwargs_with_defaults)
31
+
32
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
33
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
34
+ return super().dict(**kwargs_with_defaults)
35
+
36
+ class Config:
37
+ frozen = True
38
+ smart_union = True
39
+ json_encoders = {dt.datetime: serialize_datetime}