llama-cloud 0.1.5__py3-none-any.whl → 0.1.7a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (129) hide show
  1. llama_cloud/__init__.py +138 -2
  2. llama_cloud/client.py +15 -0
  3. llama_cloud/resources/__init__.py +17 -1
  4. llama_cloud/resources/chat_apps/__init__.py +2 -0
  5. llama_cloud/resources/chat_apps/client.py +620 -0
  6. llama_cloud/resources/data_sinks/client.py +2 -2
  7. llama_cloud/resources/data_sources/client.py +2 -2
  8. llama_cloud/resources/embedding_model_configs/client.py +4 -4
  9. llama_cloud/resources/files/__init__.py +2 -2
  10. llama_cloud/resources/files/client.py +21 -0
  11. llama_cloud/resources/files/types/__init__.py +2 -1
  12. llama_cloud/resources/files/types/file_create_permission_info_value.py +7 -0
  13. llama_cloud/resources/jobs/__init__.py +2 -0
  14. llama_cloud/resources/jobs/client.py +148 -0
  15. llama_cloud/resources/llama_extract/__init__.py +5 -0
  16. llama_cloud/resources/llama_extract/client.py +1038 -0
  17. llama_cloud/resources/llama_extract/types/__init__.py +6 -0
  18. llama_cloud/resources/llama_extract/types/extract_agent_create_data_schema_value.py +7 -0
  19. llama_cloud/resources/llama_extract/types/extract_agent_update_data_schema_value.py +7 -0
  20. llama_cloud/resources/organizations/client.py +14 -14
  21. llama_cloud/resources/parsing/client.py +480 -229
  22. llama_cloud/resources/pipelines/client.py +182 -126
  23. llama_cloud/resources/projects/client.py +210 -102
  24. llama_cloud/resources/reports/__init__.py +5 -0
  25. llama_cloud/resources/reports/client.py +1198 -0
  26. llama_cloud/resources/reports/types/__init__.py +7 -0
  27. llama_cloud/resources/reports/types/update_report_plan_api_v_1_reports_report_id_plan_patch_request_action.py +25 -0
  28. llama_cloud/resources/retrievers/__init__.py +2 -0
  29. llama_cloud/resources/retrievers/client.py +654 -0
  30. llama_cloud/types/__init__.py +124 -2
  31. llama_cloud/types/{chat_message.py → app_schema_chat_chat_message.py} +2 -2
  32. llama_cloud/types/chat_app.py +44 -0
  33. llama_cloud/types/chat_app_response.py +41 -0
  34. llama_cloud/types/cloud_az_storage_blob_data_source.py +1 -0
  35. llama_cloud/types/cloud_box_data_source.py +1 -0
  36. llama_cloud/types/cloud_confluence_data_source.py +1 -0
  37. llama_cloud/types/cloud_google_drive_data_source.py +1 -0
  38. llama_cloud/types/cloud_jira_data_source.py +1 -0
  39. llama_cloud/types/cloud_notion_page_data_source.py +1 -0
  40. llama_cloud/types/cloud_one_drive_data_source.py +1 -0
  41. llama_cloud/types/cloud_postgres_vector_store.py +1 -0
  42. llama_cloud/types/cloud_s_3_data_source.py +1 -0
  43. llama_cloud/types/cloud_sharepoint_data_source.py +1 -0
  44. llama_cloud/types/cloud_slack_data_source.py +1 -0
  45. llama_cloud/types/composite_retrieval_mode.py +21 -0
  46. llama_cloud/types/composite_retrieval_result.py +38 -0
  47. llama_cloud/types/composite_retrieved_text_node.py +42 -0
  48. llama_cloud/types/data_sink.py +1 -1
  49. llama_cloud/types/data_sink_create.py +1 -1
  50. llama_cloud/types/data_source.py +1 -1
  51. llama_cloud/types/data_source_create.py +1 -1
  52. llama_cloud/types/edit_suggestion.py +39 -0
  53. llama_cloud/types/eval_dataset_job_record.py +1 -0
  54. llama_cloud/types/extract_agent.py +45 -0
  55. llama_cloud/types/extract_agent_data_schema_value.py +5 -0
  56. llama_cloud/types/extract_config.py +40 -0
  57. llama_cloud/types/extract_job.py +35 -0
  58. llama_cloud/types/extract_job_create.py +40 -0
  59. llama_cloud/types/extract_job_create_data_schema_override_value.py +7 -0
  60. llama_cloud/types/extract_mode.py +17 -0
  61. llama_cloud/types/extract_resultset.py +46 -0
  62. llama_cloud/types/extract_resultset_data.py +11 -0
  63. llama_cloud/types/extract_resultset_data_item_value.py +7 -0
  64. llama_cloud/types/extract_resultset_data_zero_value.py +7 -0
  65. llama_cloud/types/extract_resultset_extraction_metadata_value.py +7 -0
  66. llama_cloud/types/file.py +3 -0
  67. llama_cloud/types/file_permission_info_value.py +5 -0
  68. llama_cloud/types/filter_condition.py +9 -1
  69. llama_cloud/types/filter_operator.py +4 -0
  70. llama_cloud/types/image_block.py +35 -0
  71. llama_cloud/types/input_message.py +1 -1
  72. llama_cloud/types/job_name_mapping.py +4 -0
  73. llama_cloud/types/job_names.py +89 -0
  74. llama_cloud/types/job_record.py +57 -0
  75. llama_cloud/types/job_record_with_usage_metrics.py +36 -0
  76. llama_cloud/types/llama_index_core_base_llms_types_chat_message.py +39 -0
  77. llama_cloud/types/llama_index_core_base_llms_types_chat_message_blocks_item.py +33 -0
  78. llama_cloud/types/llama_parse_parameters.py +15 -0
  79. llama_cloud/types/llm.py +1 -0
  80. llama_cloud/types/llm_model_data.py +1 -0
  81. llama_cloud/types/llm_parameters.py +1 -0
  82. llama_cloud/types/managed_ingestion_status.py +4 -0
  83. llama_cloud/types/managed_ingestion_status_response.py +1 -0
  84. llama_cloud/types/object_type.py +4 -0
  85. llama_cloud/types/organization.py +5 -0
  86. llama_cloud/types/paginated_jobs_history_with_metrics.py +35 -0
  87. llama_cloud/types/paginated_report_response.py +35 -0
  88. llama_cloud/types/parse_plan_level.py +21 -0
  89. llama_cloud/types/parsing_job_structured_result.py +32 -0
  90. llama_cloud/types/pipeline_create.py +3 -1
  91. llama_cloud/types/pipeline_data_source.py +1 -1
  92. llama_cloud/types/pipeline_file.py +3 -0
  93. llama_cloud/types/pipeline_file_permission_info_value.py +7 -0
  94. llama_cloud/types/playground_session.py +2 -2
  95. llama_cloud/types/preset_retrieval_params.py +1 -0
  96. llama_cloud/types/progress_event.py +44 -0
  97. llama_cloud/types/progress_event_status.py +33 -0
  98. llama_cloud/types/prompt_spec.py +2 -2
  99. llama_cloud/types/related_node_info.py +2 -2
  100. llama_cloud/types/related_node_info_node_type.py +7 -0
  101. llama_cloud/types/report.py +33 -0
  102. llama_cloud/types/report_block.py +34 -0
  103. llama_cloud/types/report_block_dependency.py +29 -0
  104. llama_cloud/types/report_create_response.py +31 -0
  105. llama_cloud/types/report_event_item.py +40 -0
  106. llama_cloud/types/report_event_item_event_data.py +45 -0
  107. llama_cloud/types/report_event_type.py +37 -0
  108. llama_cloud/types/report_metadata.py +39 -0
  109. llama_cloud/types/report_plan.py +36 -0
  110. llama_cloud/types/report_plan_block.py +36 -0
  111. llama_cloud/types/report_query.py +33 -0
  112. llama_cloud/types/report_response.py +41 -0
  113. llama_cloud/types/report_state.py +37 -0
  114. llama_cloud/types/report_state_event.py +38 -0
  115. llama_cloud/types/report_update_event.py +38 -0
  116. llama_cloud/types/retrieve_results.py +1 -1
  117. llama_cloud/types/retriever.py +45 -0
  118. llama_cloud/types/retriever_create.py +37 -0
  119. llama_cloud/types/retriever_pipeline.py +37 -0
  120. llama_cloud/types/status_enum.py +4 -0
  121. llama_cloud/types/supported_llm_model_names.py +4 -0
  122. llama_cloud/types/text_block.py +31 -0
  123. llama_cloud/types/text_node.py +13 -6
  124. llama_cloud/types/usage_metric_response.py +34 -0
  125. llama_cloud/types/user_job_record.py +32 -0
  126. {llama_cloud-0.1.5.dist-info → llama_cloud-0.1.7a1.dist-info}/METADATA +3 -1
  127. {llama_cloud-0.1.5.dist-info → llama_cloud-0.1.7a1.dist-info}/RECORD +129 -59
  128. {llama_cloud-0.1.5.dist-info → llama_cloud-0.1.7a1.dist-info}/WHEEL +1 -1
  129. {llama_cloud-0.1.5.dist-info → llama_cloud-0.1.7a1.dist-info}/LICENSE +0 -0
@@ -0,0 +1,39 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .report_block import ReportBlock
8
+
9
+ try:
10
+ import pydantic
11
+ if pydantic.__version__.startswith("1."):
12
+ raise ImportError
13
+ import pydantic.v1 as pydantic # type: ignore
14
+ except ImportError:
15
+ import pydantic # type: ignore
16
+
17
+
18
+ class EditSuggestion(pydantic.BaseModel):
19
+ """
20
+ A suggestion for an edit to a report.
21
+ """
22
+
23
+ justification: str
24
+ start_line: int
25
+ end_line: int
26
+ blocks: typing.List[ReportBlock]
27
+
28
+ def json(self, **kwargs: typing.Any) -> str:
29
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
30
+ return super().json(**kwargs_with_defaults)
31
+
32
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
33
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
34
+ return super().dict(**kwargs_with_defaults)
35
+
36
+ class Config:
37
+ frozen = True
38
+ smart_union = True
39
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -34,6 +34,7 @@ class EvalDatasetJobRecord(pydantic.BaseModel):
34
34
  parent_job_execution_id: typing.Optional[str]
35
35
  user_id: typing.Optional[str]
36
36
  created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
37
+ project_id: typing.Optional[str]
37
38
  id: typing.Optional[str] = pydantic.Field(description="Unique identifier")
38
39
  status: StatusEnum
39
40
  error_code: typing.Optional[str]
@@ -0,0 +1,45 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .extract_agent_data_schema_value import ExtractAgentDataSchemaValue
8
+ from .extract_config import ExtractConfig
9
+
10
+ try:
11
+ import pydantic
12
+ if pydantic.__version__.startswith("1."):
13
+ raise ImportError
14
+ import pydantic.v1 as pydantic # type: ignore
15
+ except ImportError:
16
+ import pydantic # type: ignore
17
+
18
+
19
+ class ExtractAgent(pydantic.BaseModel):
20
+ """
21
+ Schema and configuration for creating an extraction agent.
22
+ """
23
+
24
+ id: str = pydantic.Field(description="The id of the extraction agent.")
25
+ name: str = pydantic.Field(description="The name of the extraction agent.")
26
+ project_id: str = pydantic.Field(description="The ID of the project that the extraction agent belongs to.")
27
+ data_schema: typing.Dict[str, typing.Optional[ExtractAgentDataSchemaValue]] = pydantic.Field(
28
+ description="The schema of the data."
29
+ )
30
+ config: ExtractConfig = pydantic.Field(description="The configuration parameters for the extraction agent.")
31
+ created_at: typing.Optional[dt.datetime]
32
+ updated_at: typing.Optional[dt.datetime]
33
+
34
+ def json(self, **kwargs: typing.Any) -> str:
35
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
36
+ return super().json(**kwargs_with_defaults)
37
+
38
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
39
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
40
+ return super().dict(**kwargs_with_defaults)
41
+
42
+ class Config:
43
+ frozen = True
44
+ smart_union = True
45
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,5 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ ExtractAgentDataSchemaValue = typing.Union[typing.Dict[str, typing.Any], typing.List[typing.Any], str, int, float, bool]
@@ -0,0 +1,40 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .extract_mode import ExtractMode
8
+
9
+ try:
10
+ import pydantic
11
+ if pydantic.__version__.startswith("1."):
12
+ raise ImportError
13
+ import pydantic.v1 as pydantic # type: ignore
14
+ except ImportError:
15
+ import pydantic # type: ignore
16
+
17
+
18
+ class ExtractConfig(pydantic.BaseModel):
19
+ """
20
+ Additional parameters for the extraction agent.
21
+ """
22
+
23
+ extraction_mode: typing.Optional[ExtractMode] = pydantic.Field(description="The extraction mode specified.")
24
+ handle_missing: typing.Optional[bool] = pydantic.Field(
25
+ description="Whether to handle missing fields in the schema."
26
+ )
27
+ system_prompt: typing.Optional[str]
28
+
29
+ def json(self, **kwargs: typing.Any) -> str:
30
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
31
+ return super().json(**kwargs_with_defaults)
32
+
33
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
34
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
35
+ return super().dict(**kwargs_with_defaults)
36
+
37
+ class Config:
38
+ frozen = True
39
+ smart_union = True
40
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,35 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .file import File
8
+ from .status_enum import StatusEnum
9
+
10
+ try:
11
+ import pydantic
12
+ if pydantic.__version__.startswith("1."):
13
+ raise ImportError
14
+ import pydantic.v1 as pydantic # type: ignore
15
+ except ImportError:
16
+ import pydantic # type: ignore
17
+
18
+
19
+ class ExtractJob(pydantic.BaseModel):
20
+ id: str = pydantic.Field(description="The id of the extraction job")
21
+ status: StatusEnum = pydantic.Field(description="The status of the extraction job")
22
+ file: File = pydantic.Field(description="The file that the extract was extracted from")
23
+
24
+ def json(self, **kwargs: typing.Any) -> str:
25
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
26
+ return super().json(**kwargs_with_defaults)
27
+
28
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
29
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
30
+ return super().dict(**kwargs_with_defaults)
31
+
32
+ class Config:
33
+ frozen = True
34
+ smart_union = True
35
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,40 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .extract_config import ExtractConfig
8
+ from .extract_job_create_data_schema_override_value import ExtractJobCreateDataSchemaOverrideValue
9
+
10
+ try:
11
+ import pydantic
12
+ if pydantic.__version__.startswith("1."):
13
+ raise ImportError
14
+ import pydantic.v1 as pydantic # type: ignore
15
+ except ImportError:
16
+ import pydantic # type: ignore
17
+
18
+
19
+ class ExtractJobCreate(pydantic.BaseModel):
20
+ """
21
+ Schema for creating an extraction job.
22
+ """
23
+
24
+ extraction_agent_id: str = pydantic.Field(description="The id of the extraction agent")
25
+ file_id: str = pydantic.Field(description="The id of the file")
26
+ data_schema_override: typing.Optional[typing.Dict[str, typing.Optional[ExtractJobCreateDataSchemaOverrideValue]]]
27
+ config_override: typing.Optional[ExtractConfig]
28
+
29
+ def json(self, **kwargs: typing.Any) -> str:
30
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
31
+ return super().json(**kwargs_with_defaults)
32
+
33
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
34
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
35
+ return super().dict(**kwargs_with_defaults)
36
+
37
+ class Config:
38
+ frozen = True
39
+ smart_union = True
40
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,7 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ ExtractJobCreateDataSchemaOverrideValue = typing.Union[
6
+ typing.Dict[str, typing.Any], typing.List[typing.Any], str, int, float, bool
7
+ ]
@@ -0,0 +1,17 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+ import typing
5
+
6
+ T_Result = typing.TypeVar("T_Result")
7
+
8
+
9
+ class ExtractMode(str, enum.Enum):
10
+ PER_DOC = "PER_DOC"
11
+ PER_PAGE = "PER_PAGE"
12
+
13
+ def visit(self, per_doc: typing.Callable[[], T_Result], per_page: typing.Callable[[], T_Result]) -> T_Result:
14
+ if self is ExtractMode.PER_DOC:
15
+ return per_doc()
16
+ if self is ExtractMode.PER_PAGE:
17
+ return per_page()
@@ -0,0 +1,46 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .extract_resultset_data import ExtractResultsetData
8
+ from .extract_resultset_extraction_metadata_value import ExtractResultsetExtractionMetadataValue
9
+ from .file import File
10
+
11
+ try:
12
+ import pydantic
13
+ if pydantic.__version__.startswith("1."):
14
+ raise ImportError
15
+ import pydantic.v1 as pydantic # type: ignore
16
+ except ImportError:
17
+ import pydantic # type: ignore
18
+
19
+
20
+ class ExtractResultset(pydantic.BaseModel):
21
+ """
22
+ Schema for an extraction result.
23
+ """
24
+
25
+ id: str = pydantic.Field(description="Unique identifier")
26
+ created_at: typing.Optional[dt.datetime]
27
+ updated_at: typing.Optional[dt.datetime]
28
+ extraction_agent_id: str = pydantic.Field(description="The id of the extraction agent")
29
+ data: typing.Optional[ExtractResultsetData] = pydantic.Field(description="The data extracted from the file")
30
+ extraction_metadata: typing.Dict[str, typing.Optional[ExtractResultsetExtractionMetadataValue]] = pydantic.Field(
31
+ description="The metadata extracted from the file"
32
+ )
33
+ file: File = pydantic.Field(description="The file that the extract was extracted from")
34
+
35
+ def json(self, **kwargs: typing.Any) -> str:
36
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
37
+ return super().json(**kwargs_with_defaults)
38
+
39
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
40
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
41
+ return super().dict(**kwargs_with_defaults)
42
+
43
+ class Config:
44
+ frozen = True
45
+ smart_union = True
46
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,11 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ from .extract_resultset_data_item_value import ExtractResultsetDataItemValue
6
+ from .extract_resultset_data_zero_value import ExtractResultsetDataZeroValue
7
+
8
+ ExtractResultsetData = typing.Union[
9
+ typing.Dict[str, typing.Optional[ExtractResultsetDataZeroValue]],
10
+ typing.List[typing.Dict[str, typing.Optional[ExtractResultsetDataItemValue]]],
11
+ ]
@@ -0,0 +1,7 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ ExtractResultsetDataItemValue = typing.Union[
6
+ typing.Dict[str, typing.Any], typing.List[typing.Any], str, int, float, bool
7
+ ]
@@ -0,0 +1,7 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ ExtractResultsetDataZeroValue = typing.Union[
6
+ typing.Dict[str, typing.Any], typing.List[typing.Any], str, int, float, bool
7
+ ]
@@ -0,0 +1,7 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ ExtractResultsetExtractionMetadataValue = typing.Union[
6
+ typing.Dict[str, typing.Any], typing.List[typing.Any], str, int, float, bool
7
+ ]
llama_cloud/types/file.py CHANGED
@@ -4,6 +4,7 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
+ from .file_permission_info_value import FilePermissionInfoValue
7
8
  from .file_resource_info_value import FileResourceInfoValue
8
9
 
9
10
  try:
@@ -24,11 +25,13 @@ class File(pydantic.BaseModel):
24
25
  created_at: typing.Optional[dt.datetime]
25
26
  updated_at: typing.Optional[dt.datetime]
26
27
  name: str
28
+ external_file_id: str = pydantic.Field(description="The ID of the file in the external system")
27
29
  file_size: typing.Optional[int]
28
30
  file_type: typing.Optional[str]
29
31
  project_id: str = pydantic.Field(description="The ID of the project that the file belongs to")
30
32
  last_modified_at: typing.Optional[dt.datetime]
31
33
  resource_info: typing.Optional[typing.Dict[str, typing.Optional[FileResourceInfoValue]]]
34
+ permission_info: typing.Optional[typing.Dict[str, typing.Optional[FilePermissionInfoValue]]]
32
35
  data_source_id: typing.Optional[str]
33
36
 
34
37
  def json(self, **kwargs: typing.Any) -> str:
@@ -0,0 +1,5 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ FilePermissionInfoValue = typing.Union[typing.Dict[str, typing.Any], typing.List[typing.Any], str, int, float, bool]
@@ -13,9 +13,17 @@ class FilterCondition(str, enum.Enum):
13
13
 
14
14
  AND = "and"
15
15
  OR = "or"
16
+ NOT = "not"
16
17
 
17
- def visit(self, and_: typing.Callable[[], T_Result], or_: typing.Callable[[], T_Result]) -> T_Result:
18
+ def visit(
19
+ self,
20
+ and_: typing.Callable[[], T_Result],
21
+ or_: typing.Callable[[], T_Result],
22
+ not_: typing.Callable[[], T_Result],
23
+ ) -> T_Result:
18
24
  if self is FilterCondition.AND:
19
25
  return and_()
20
26
  if self is FilterCondition.OR:
21
27
  return or_()
28
+ if self is FilterCondition.NOT:
29
+ return not_()
@@ -22,6 +22,7 @@ class FilterOperator(str, enum.Enum):
22
22
  ANY = "any"
23
23
  ALL = "all"
24
24
  TEXT_MATCH = "text_match"
25
+ TEXT_MATCH_INSENSITIVE = "text_match_insensitive"
25
26
  CONTAINS = "contains"
26
27
  IS_EMPTY = "is_empty"
27
28
 
@@ -38,6 +39,7 @@ class FilterOperator(str, enum.Enum):
38
39
  any: typing.Callable[[], T_Result],
39
40
  all: typing.Callable[[], T_Result],
40
41
  text_match: typing.Callable[[], T_Result],
42
+ text_match_insensitive: typing.Callable[[], T_Result],
41
43
  contains: typing.Callable[[], T_Result],
42
44
  is_empty: typing.Callable[[], T_Result],
43
45
  ) -> T_Result:
@@ -63,6 +65,8 @@ class FilterOperator(str, enum.Enum):
63
65
  return all()
64
66
  if self is FilterOperator.TEXT_MATCH:
65
67
  return text_match()
68
+ if self is FilterOperator.TEXT_MATCH_INSENSITIVE:
69
+ return text_match_insensitive()
66
70
  if self is FilterOperator.CONTAINS:
67
71
  return contains()
68
72
  if self is FilterOperator.IS_EMPTY:
@@ -0,0 +1,35 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic
10
+ if pydantic.__version__.startswith("1."):
11
+ raise ImportError
12
+ import pydantic.v1 as pydantic # type: ignore
13
+ except ImportError:
14
+ import pydantic # type: ignore
15
+
16
+
17
+ class ImageBlock(pydantic.BaseModel):
18
+ image: typing.Optional[str]
19
+ path: typing.Optional[str]
20
+ url: typing.Optional[str]
21
+ image_mimetype: typing.Optional[str]
22
+ detail: typing.Optional[str]
23
+
24
+ def json(self, **kwargs: typing.Any) -> str:
25
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
26
+ return super().json(**kwargs_with_defaults)
27
+
28
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
29
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
30
+ return super().dict(**kwargs_with_defaults)
31
+
32
+ class Config:
33
+ frozen = True
34
+ smart_union = True
35
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -20,7 +20,7 @@ class InputMessage(pydantic.BaseModel):
20
20
  This is distinct from a ChatMessage because this schema is enforced by the AI Chat library used in the frontend
21
21
  """
22
22
 
23
- id: str = pydantic.Field(description="ID of the message, if any. a UUID.")
23
+ id: typing.Optional[str] = pydantic.Field(description="ID of the message, if any. a UUID.")
24
24
  role: MessageRole
25
25
  content: str
26
26
  data: typing.Optional[typing.Dict[str, typing.Any]]
@@ -18,6 +18,7 @@ class JobNameMapping(str, enum.Enum):
18
18
  PARSE = "PARSE"
19
19
  TRANSFORM = "TRANSFORM"
20
20
  INGESTION = "INGESTION"
21
+ METADATA_UPDATE = "METADATA_UPDATE"
21
22
 
22
23
  def visit(
23
24
  self,
@@ -28,6 +29,7 @@ class JobNameMapping(str, enum.Enum):
28
29
  parse: typing.Callable[[], T_Result],
29
30
  transform: typing.Callable[[], T_Result],
30
31
  ingestion: typing.Callable[[], T_Result],
32
+ metadata_update: typing.Callable[[], T_Result],
31
33
  ) -> T_Result:
32
34
  if self is JobNameMapping.MANAGED_INGESTION:
33
35
  return managed_ingestion()
@@ -43,3 +45,5 @@ class JobNameMapping(str, enum.Enum):
43
45
  return transform()
44
46
  if self is JobNameMapping.INGESTION:
45
47
  return ingestion()
48
+ if self is JobNameMapping.METADATA_UPDATE:
49
+ return metadata_update()
@@ -0,0 +1,89 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+ import typing
5
+
6
+ T_Result = typing.TypeVar("T_Result")
7
+
8
+
9
+ class JobNames(str, enum.Enum):
10
+ """
11
+ Enum for executable pipeline job names.
12
+ """
13
+
14
+ LOAD_DOCUMENTS_JOB = "load_documents_job"
15
+ LOAD_FILES_JOB = "load_files_job"
16
+ PLAYGROUND_JOB = "playground_job"
17
+ EVAL_DATASET_JOB = "eval_dataset_job"
18
+ PIPELINE_MANAGED_INGESTION_JOB = "pipeline_managed_ingestion_job"
19
+ DATA_SOURCE_MANAGED_INGESTION_JOB = "data_source_managed_ingestion_job"
20
+ DATA_SOURCE_UPDATE_DISPATCHER_JOB = "data_source_update_dispatcher_job"
21
+ PIPELINE_FILE_UPDATE_DISPATCHER_JOB = "pipeline_file_update_dispatcher_job"
22
+ PIPELINE_FILE_UPDATER_JOB = "pipeline_file_updater_job"
23
+ FILE_MANAGED_INGESTION_JOB = "file_managed_ingestion_job"
24
+ DOCUMENT_INGESTION_JOB = "document_ingestion_job"
25
+ PARSE_RAW_FILE_JOB = "parse_raw_file_job"
26
+ LLAMA_PARSE_TRANSFORM_JOB = "llama_parse_transform_job"
27
+ METADATA_UPDATE_JOB = "metadata_update_job"
28
+ PARSE_RAW_FILE_JOB_CACHED = "parse_raw_file_job_cached"
29
+ EXTRACTION_JOB = "extraction_job"
30
+ EXTRACT_JOB = "extract_job"
31
+ ASYNCIO_TEST_JOB = "asyncio_test_job"
32
+
33
+ def visit(
34
+ self,
35
+ load_documents_job: typing.Callable[[], T_Result],
36
+ load_files_job: typing.Callable[[], T_Result],
37
+ playground_job: typing.Callable[[], T_Result],
38
+ eval_dataset_job: typing.Callable[[], T_Result],
39
+ pipeline_managed_ingestion_job: typing.Callable[[], T_Result],
40
+ data_source_managed_ingestion_job: typing.Callable[[], T_Result],
41
+ data_source_update_dispatcher_job: typing.Callable[[], T_Result],
42
+ pipeline_file_update_dispatcher_job: typing.Callable[[], T_Result],
43
+ pipeline_file_updater_job: typing.Callable[[], T_Result],
44
+ file_managed_ingestion_job: typing.Callable[[], T_Result],
45
+ document_ingestion_job: typing.Callable[[], T_Result],
46
+ parse_raw_file_job: typing.Callable[[], T_Result],
47
+ llama_parse_transform_job: typing.Callable[[], T_Result],
48
+ metadata_update_job: typing.Callable[[], T_Result],
49
+ parse_raw_file_job_cached: typing.Callable[[], T_Result],
50
+ extraction_job: typing.Callable[[], T_Result],
51
+ extract_job: typing.Callable[[], T_Result],
52
+ asyncio_test_job: typing.Callable[[], T_Result],
53
+ ) -> T_Result:
54
+ if self is JobNames.LOAD_DOCUMENTS_JOB:
55
+ return load_documents_job()
56
+ if self is JobNames.LOAD_FILES_JOB:
57
+ return load_files_job()
58
+ if self is JobNames.PLAYGROUND_JOB:
59
+ return playground_job()
60
+ if self is JobNames.EVAL_DATASET_JOB:
61
+ return eval_dataset_job()
62
+ if self is JobNames.PIPELINE_MANAGED_INGESTION_JOB:
63
+ return pipeline_managed_ingestion_job()
64
+ if self is JobNames.DATA_SOURCE_MANAGED_INGESTION_JOB:
65
+ return data_source_managed_ingestion_job()
66
+ if self is JobNames.DATA_SOURCE_UPDATE_DISPATCHER_JOB:
67
+ return data_source_update_dispatcher_job()
68
+ if self is JobNames.PIPELINE_FILE_UPDATE_DISPATCHER_JOB:
69
+ return pipeline_file_update_dispatcher_job()
70
+ if self is JobNames.PIPELINE_FILE_UPDATER_JOB:
71
+ return pipeline_file_updater_job()
72
+ if self is JobNames.FILE_MANAGED_INGESTION_JOB:
73
+ return file_managed_ingestion_job()
74
+ if self is JobNames.DOCUMENT_INGESTION_JOB:
75
+ return document_ingestion_job()
76
+ if self is JobNames.PARSE_RAW_FILE_JOB:
77
+ return parse_raw_file_job()
78
+ if self is JobNames.LLAMA_PARSE_TRANSFORM_JOB:
79
+ return llama_parse_transform_job()
80
+ if self is JobNames.METADATA_UPDATE_JOB:
81
+ return metadata_update_job()
82
+ if self is JobNames.PARSE_RAW_FILE_JOB_CACHED:
83
+ return parse_raw_file_job_cached()
84
+ if self is JobNames.EXTRACTION_JOB:
85
+ return extraction_job()
86
+ if self is JobNames.EXTRACT_JOB:
87
+ return extract_job()
88
+ if self is JobNames.ASYNCIO_TEST_JOB:
89
+ return asyncio_test_job()
@@ -0,0 +1,57 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .base import Base
8
+ from .job_names import JobNames
9
+ from .status_enum import StatusEnum
10
+
11
+ try:
12
+ import pydantic
13
+ if pydantic.__version__.startswith("1."):
14
+ raise ImportError
15
+ import pydantic.v1 as pydantic # type: ignore
16
+ except ImportError:
17
+ import pydantic # type: ignore
18
+
19
+
20
+ class JobRecord(pydantic.BaseModel):
21
+ """
22
+ Schema for a job's metadata.
23
+ """
24
+
25
+ job_name: JobNames = pydantic.Field(description="The name of the job.")
26
+ partitions: typing.Dict[str, str] = pydantic.Field(
27
+ description="The partitions for this execution. Used for determining where to save job output."
28
+ )
29
+ parameters: typing.Optional[Base]
30
+ session_id: typing.Optional[str]
31
+ correlation_id: typing.Optional[str]
32
+ parent_job_execution_id: typing.Optional[str]
33
+ user_id: typing.Optional[str]
34
+ created_at: dt.datetime = pydantic.Field(description="Creation datetime")
35
+ project_id: typing.Optional[str]
36
+ id: typing.Optional[str] = pydantic.Field(description="Unique identifier")
37
+ status: StatusEnum
38
+ error_code: typing.Optional[str]
39
+ error_message: typing.Optional[str]
40
+ attempts: typing.Optional[int]
41
+ started_at: typing.Optional[dt.datetime]
42
+ ended_at: typing.Optional[dt.datetime]
43
+ updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
44
+ data: typing.Optional[Base]
45
+
46
+ def json(self, **kwargs: typing.Any) -> str:
47
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
48
+ return super().json(**kwargs_with_defaults)
49
+
50
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
51
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
52
+ return super().dict(**kwargs_with_defaults)
53
+
54
+ class Config:
55
+ frozen = True
56
+ smart_union = True
57
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,36 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .job_record import JobRecord
8
+ from .usage_metric_response import UsageMetricResponse
9
+ from .user_job_record import UserJobRecord
10
+
11
+ try:
12
+ import pydantic
13
+ if pydantic.__version__.startswith("1."):
14
+ raise ImportError
15
+ import pydantic.v1 as pydantic # type: ignore
16
+ except ImportError:
17
+ import pydantic # type: ignore
18
+
19
+
20
+ class JobRecordWithUsageMetrics(pydantic.BaseModel):
21
+ job_record: JobRecord
22
+ usage_metrics: typing.Optional[UsageMetricResponse]
23
+ user: UserJobRecord
24
+
25
+ def json(self, **kwargs: typing.Any) -> str:
26
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
27
+ return super().json(**kwargs_with_defaults)
28
+
29
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
30
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
31
+ return super().dict(**kwargs_with_defaults)
32
+
33
+ class Config:
34
+ frozen = True
35
+ smart_union = True
36
+ json_encoders = {dt.datetime: serialize_datetime}