llama-cloud 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (173) hide show
  1. llama_cloud/__init__.py +295 -0
  2. llama_cloud/client.py +72 -0
  3. llama_cloud/core/__init__.py +17 -0
  4. llama_cloud/core/api_error.py +15 -0
  5. llama_cloud/core/client_wrapper.py +51 -0
  6. llama_cloud/core/datetime_utils.py +28 -0
  7. llama_cloud/core/jsonable_encoder.py +103 -0
  8. llama_cloud/core/remove_none_from_dict.py +11 -0
  9. llama_cloud/errors/__init__.py +5 -0
  10. llama_cloud/errors/unprocessable_entity_error.py +9 -0
  11. llama_cloud/resources/__init__.py +40 -0
  12. llama_cloud/resources/api_keys/__init__.py +2 -0
  13. llama_cloud/resources/api_keys/client.py +302 -0
  14. llama_cloud/resources/billing/__init__.py +2 -0
  15. llama_cloud/resources/billing/client.py +234 -0
  16. llama_cloud/resources/component_definitions/__init__.py +2 -0
  17. llama_cloud/resources/component_definitions/client.py +192 -0
  18. llama_cloud/resources/data_sinks/__init__.py +5 -0
  19. llama_cloud/resources/data_sinks/client.py +506 -0
  20. llama_cloud/resources/data_sinks/types/__init__.py +6 -0
  21. llama_cloud/resources/data_sinks/types/data_sink_update_component.py +7 -0
  22. llama_cloud/resources/data_sinks/types/data_sink_update_component_one.py +17 -0
  23. llama_cloud/resources/data_sources/__init__.py +5 -0
  24. llama_cloud/resources/data_sources/client.py +521 -0
  25. llama_cloud/resources/data_sources/types/__init__.py +7 -0
  26. llama_cloud/resources/data_sources/types/data_source_update_component.py +7 -0
  27. llama_cloud/resources/data_sources/types/data_source_update_component_one.py +19 -0
  28. llama_cloud/resources/data_sources/types/data_source_update_custom_metadata_value.py +7 -0
  29. llama_cloud/resources/deprecated/__init__.py +2 -0
  30. llama_cloud/resources/deprecated/client.py +982 -0
  31. llama_cloud/resources/evals/__init__.py +2 -0
  32. llama_cloud/resources/evals/client.py +745 -0
  33. llama_cloud/resources/files/__init__.py +5 -0
  34. llama_cloud/resources/files/client.py +560 -0
  35. llama_cloud/resources/files/types/__init__.py +5 -0
  36. llama_cloud/resources/files/types/file_create_resource_info_value.py +5 -0
  37. llama_cloud/resources/parsing/__init__.py +2 -0
  38. llama_cloud/resources/parsing/client.py +982 -0
  39. llama_cloud/resources/pipelines/__init__.py +5 -0
  40. llama_cloud/resources/pipelines/client.py +2599 -0
  41. llama_cloud/resources/pipelines/types/__init__.py +5 -0
  42. llama_cloud/resources/pipelines/types/pipeline_file_update_custom_metadata_value.py +7 -0
  43. llama_cloud/resources/projects/__init__.py +2 -0
  44. llama_cloud/resources/projects/client.py +1231 -0
  45. llama_cloud/types/__init__.py +253 -0
  46. llama_cloud/types/api_key.py +37 -0
  47. llama_cloud/types/azure_open_ai_embedding.py +75 -0
  48. llama_cloud/types/base.py +26 -0
  49. llama_cloud/types/base_prompt_template.py +44 -0
  50. llama_cloud/types/bedrock_embedding.py +56 -0
  51. llama_cloud/types/chat_message.py +35 -0
  52. llama_cloud/types/cloud_az_storage_blob_data_source.py +40 -0
  53. llama_cloud/types/cloud_chroma_vector_store.py +40 -0
  54. llama_cloud/types/cloud_document.py +36 -0
  55. llama_cloud/types/cloud_document_create.py +36 -0
  56. llama_cloud/types/cloud_gcs_data_source.py +37 -0
  57. llama_cloud/types/cloud_google_drive_data_source.py +36 -0
  58. llama_cloud/types/cloud_one_drive_data_source.py +38 -0
  59. llama_cloud/types/cloud_pinecone_vector_store.py +46 -0
  60. llama_cloud/types/cloud_postgres_vector_store.py +44 -0
  61. llama_cloud/types/cloud_qdrant_vector_store.py +48 -0
  62. llama_cloud/types/cloud_s_3_data_source.py +42 -0
  63. llama_cloud/types/cloud_sharepoint_data_source.py +38 -0
  64. llama_cloud/types/cloud_weaviate_vector_store.py +38 -0
  65. llama_cloud/types/code_splitter.py +46 -0
  66. llama_cloud/types/cohere_embedding.py +46 -0
  67. llama_cloud/types/configurable_data_sink_names.py +37 -0
  68. llama_cloud/types/configurable_data_source_names.py +41 -0
  69. llama_cloud/types/configurable_transformation_definition.py +45 -0
  70. llama_cloud/types/configurable_transformation_names.py +73 -0
  71. llama_cloud/types/configured_transformation_item.py +43 -0
  72. llama_cloud/types/configured_transformation_item_component.py +9 -0
  73. llama_cloud/types/configured_transformation_item_component_one.py +35 -0
  74. llama_cloud/types/data_sink.py +40 -0
  75. llama_cloud/types/data_sink_component.py +7 -0
  76. llama_cloud/types/data_sink_component_one.py +17 -0
  77. llama_cloud/types/data_sink_create.py +36 -0
  78. llama_cloud/types/data_sink_create_component.py +7 -0
  79. llama_cloud/types/data_sink_create_component_one.py +17 -0
  80. llama_cloud/types/data_sink_definition.py +41 -0
  81. llama_cloud/types/data_source.py +44 -0
  82. llama_cloud/types/data_source_component.py +7 -0
  83. llama_cloud/types/data_source_component_one.py +19 -0
  84. llama_cloud/types/data_source_create.py +40 -0
  85. llama_cloud/types/data_source_create_component.py +7 -0
  86. llama_cloud/types/data_source_create_component_one.py +19 -0
  87. llama_cloud/types/data_source_create_custom_metadata_value.py +7 -0
  88. llama_cloud/types/data_source_custom_metadata_value.py +7 -0
  89. llama_cloud/types/data_source_definition.py +41 -0
  90. llama_cloud/types/eval_dataset.py +37 -0
  91. llama_cloud/types/eval_dataset_job_params.py +36 -0
  92. llama_cloud/types/eval_dataset_job_record.py +59 -0
  93. llama_cloud/types/eval_execution_params.py +38 -0
  94. llama_cloud/types/eval_execution_params_override.py +38 -0
  95. llama_cloud/types/eval_llm_model_data.py +33 -0
  96. llama_cloud/types/eval_question.py +39 -0
  97. llama_cloud/types/eval_question_create.py +28 -0
  98. llama_cloud/types/eval_question_result.py +49 -0
  99. llama_cloud/types/file.py +46 -0
  100. llama_cloud/types/file_resource_info_value.py +5 -0
  101. llama_cloud/types/filter_condition.py +21 -0
  102. llama_cloud/types/filter_operator.py +65 -0
  103. llama_cloud/types/gemini_embedding.py +51 -0
  104. llama_cloud/types/html_node_parser.py +44 -0
  105. llama_cloud/types/http_validation_error.py +29 -0
  106. llama_cloud/types/hugging_face_inference_api_embedding.py +68 -0
  107. llama_cloud/types/hugging_face_inference_api_embedding_token.py +5 -0
  108. llama_cloud/types/json_node_parser.py +43 -0
  109. llama_cloud/types/llama_parse_supported_file_extensions.py +161 -0
  110. llama_cloud/types/llm.py +55 -0
  111. llama_cloud/types/local_eval.py +46 -0
  112. llama_cloud/types/local_eval_results.py +37 -0
  113. llama_cloud/types/local_eval_sets.py +30 -0
  114. llama_cloud/types/managed_ingestion_status.py +37 -0
  115. llama_cloud/types/markdown_element_node_parser.py +49 -0
  116. llama_cloud/types/markdown_node_parser.py +43 -0
  117. llama_cloud/types/message_role.py +45 -0
  118. llama_cloud/types/metadata_filter.py +41 -0
  119. llama_cloud/types/metadata_filter_value.py +5 -0
  120. llama_cloud/types/metadata_filters.py +41 -0
  121. llama_cloud/types/metadata_filters_filters_item.py +8 -0
  122. llama_cloud/types/metric_result.py +30 -0
  123. llama_cloud/types/node_parser.py +37 -0
  124. llama_cloud/types/object_type.py +33 -0
  125. llama_cloud/types/open_ai_embedding.py +73 -0
  126. llama_cloud/types/parser_languages.py +361 -0
  127. llama_cloud/types/parsing_history_item.py +36 -0
  128. llama_cloud/types/parsing_job.py +30 -0
  129. llama_cloud/types/parsing_job_json_result.py +29 -0
  130. llama_cloud/types/parsing_job_markdown_result.py +29 -0
  131. llama_cloud/types/parsing_job_text_result.py +29 -0
  132. llama_cloud/types/parsing_usage.py +29 -0
  133. llama_cloud/types/pipeline.py +64 -0
  134. llama_cloud/types/pipeline_create.py +61 -0
  135. llama_cloud/types/pipeline_data_source.py +46 -0
  136. llama_cloud/types/pipeline_data_source_component.py +7 -0
  137. llama_cloud/types/pipeline_data_source_component_one.py +19 -0
  138. llama_cloud/types/pipeline_data_source_create.py +32 -0
  139. llama_cloud/types/pipeline_data_source_custom_metadata_value.py +7 -0
  140. llama_cloud/types/pipeline_deployment.py +38 -0
  141. llama_cloud/types/pipeline_file.py +52 -0
  142. llama_cloud/types/pipeline_file_create.py +36 -0
  143. llama_cloud/types/pipeline_file_create_custom_metadata_value.py +7 -0
  144. llama_cloud/types/pipeline_file_custom_metadata_value.py +7 -0
  145. llama_cloud/types/pipeline_file_resource_info_value.py +7 -0
  146. llama_cloud/types/pipeline_file_status_response.py +35 -0
  147. llama_cloud/types/pipeline_type.py +21 -0
  148. llama_cloud/types/pooling.py +29 -0
  149. llama_cloud/types/preset_retrieval_params.py +40 -0
  150. llama_cloud/types/presigned_url.py +36 -0
  151. llama_cloud/types/project.py +42 -0
  152. llama_cloud/types/project_create.py +32 -0
  153. llama_cloud/types/prompt_mixin_prompts.py +36 -0
  154. llama_cloud/types/prompt_spec.py +35 -0
  155. llama_cloud/types/pydantic_program_mode.py +41 -0
  156. llama_cloud/types/related_node_info.py +37 -0
  157. llama_cloud/types/retrieve_results.py +40 -0
  158. llama_cloud/types/sentence_splitter.py +48 -0
  159. llama_cloud/types/simple_file_node_parser.py +44 -0
  160. llama_cloud/types/status_enum.py +33 -0
  161. llama_cloud/types/supported_eval_llm_model.py +35 -0
  162. llama_cloud/types/supported_eval_llm_model_names.py +29 -0
  163. llama_cloud/types/text_node.py +62 -0
  164. llama_cloud/types/text_node_relationships_value.py +7 -0
  165. llama_cloud/types/text_node_with_score.py +36 -0
  166. llama_cloud/types/token_text_splitter.py +43 -0
  167. llama_cloud/types/transformation_category_names.py +21 -0
  168. llama_cloud/types/validation_error.py +31 -0
  169. llama_cloud/types/validation_error_loc_item.py +5 -0
  170. llama_cloud-0.0.1.dist-info/LICENSE +21 -0
  171. llama_cloud-0.0.1.dist-info/METADATA +25 -0
  172. llama_cloud-0.0.1.dist-info/RECORD +173 -0
  173. llama_cloud-0.0.1.dist-info/WHEEL +4 -0
@@ -0,0 +1,64 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .configured_transformation_item import ConfiguredTransformationItem
8
+ from .data_sink import DataSink
9
+ from .eval_execution_params import EvalExecutionParams
10
+ from .managed_ingestion_status import ManagedIngestionStatus
11
+ from .pipeline_type import PipelineType
12
+ from .preset_retrieval_params import PresetRetrievalParams
13
+
14
+ try:
15
+ import pydantic.v1 as pydantic # type: ignore
16
+ except ImportError:
17
+ import pydantic # type: ignore
18
+
19
+
20
+ class Pipeline(pydantic.BaseModel):
21
+ """
22
+ Schema for a pipeline.
23
+ """
24
+
25
+ configured_transformations: typing.List[ConfiguredTransformationItem]
26
+ id: str = pydantic.Field(description="Unique identifier")
27
+ created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
28
+ updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
29
+ name: str
30
+ project_id: str
31
+ pipeline_type: typing.Optional[PipelineType] = pydantic.Field(
32
+ description="Type of pipeline. Either PLAYGROUND or MANAGED."
33
+ )
34
+ managed_pipeline_id: typing.Optional[str] = pydantic.Field(
35
+ description="The ID of the ManagedPipeline this playground pipeline is linked to."
36
+ )
37
+ preset_retrieval_parameters: typing.Optional[PresetRetrievalParams] = pydantic.Field(
38
+ description="Preset retrieval parameters for the pipeline."
39
+ )
40
+ eval_parameters: typing.Optional[EvalExecutionParams] = pydantic.Field(
41
+ description="Eval parameters for the pipeline."
42
+ )
43
+ llama_parse_enabled: typing.Optional[bool] = pydantic.Field(
44
+ description="Whether to use LlamaParse during pipeline execution."
45
+ )
46
+ managed_ingestion_status: typing.Optional[ManagedIngestionStatus] = pydantic.Field(
47
+ description="Status of Managed Ingestion."
48
+ )
49
+ data_sink: typing.Optional[DataSink] = pydantic.Field(
50
+ description="The data sink for the pipeline. If None, the pipeline will use the fully managed data sink."
51
+ )
52
+
53
+ def json(self, **kwargs: typing.Any) -> str:
54
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
55
+ return super().json(**kwargs_with_defaults)
56
+
57
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
58
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
59
+ return super().dict(**kwargs_with_defaults)
60
+
61
+ class Config:
62
+ frozen = True
63
+ smart_union = True
64
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,61 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .configured_transformation_item import ConfiguredTransformationItem
8
+ from .data_sink_create import DataSinkCreate
9
+ from .eval_execution_params import EvalExecutionParams
10
+ from .pipeline_type import PipelineType
11
+ from .preset_retrieval_params import PresetRetrievalParams
12
+
13
+ try:
14
+ import pydantic.v1 as pydantic # type: ignore
15
+ except ImportError:
16
+ import pydantic # type: ignore
17
+
18
+
19
+ class PipelineCreate(pydantic.BaseModel):
20
+ """
21
+ Schema for creating a pipeline.
22
+ """
23
+
24
+ configured_transformations: typing.Optional[typing.List[ConfiguredTransformationItem]] = pydantic.Field(
25
+ description="List of configured transformations."
26
+ )
27
+ data_sink_id: typing.Optional[str] = pydantic.Field(
28
+ description="Data sink ID. When provided instead of data_sink, the data sink will be looked up by ID."
29
+ )
30
+ data_sink: typing.Optional[DataSinkCreate] = pydantic.Field(
31
+ description="Data sink. When provided instead of data_sink_id, the data sink will be created."
32
+ )
33
+ preset_retrieval_parameters: typing.Optional[PresetRetrievalParams] = pydantic.Field(
34
+ description="Preset retrieval parameters for the pipeline."
35
+ )
36
+ eval_parameters: typing.Optional[EvalExecutionParams] = pydantic.Field(
37
+ description="Eval parameters for the pipeline."
38
+ )
39
+ llama_parse_enabled: typing.Optional[bool] = pydantic.Field(
40
+ description="Whether to use LlamaParse during pipeline execution."
41
+ )
42
+ name: str
43
+ pipeline_type: typing.Optional[PipelineType] = pydantic.Field(
44
+ description="Type of pipeline. Either PLAYGROUND or MANAGED."
45
+ )
46
+ managed_pipeline_id: typing.Optional[str] = pydantic.Field(
47
+ description="The ID of the ManagedPipeline this playground pipeline is linked to."
48
+ )
49
+
50
+ def json(self, **kwargs: typing.Any) -> str:
51
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
52
+ return super().json(**kwargs_with_defaults)
53
+
54
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
55
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
56
+ return super().dict(**kwargs_with_defaults)
57
+
58
+ class Config:
59
+ frozen = True
60
+ smart_union = True
61
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,46 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .configurable_data_source_names import ConfigurableDataSourceNames
8
+ from .pipeline_data_source_component import PipelineDataSourceComponent
9
+ from .pipeline_data_source_custom_metadata_value import PipelineDataSourceCustomMetadataValue
10
+
11
+ try:
12
+ import pydantic.v1 as pydantic # type: ignore
13
+ except ImportError:
14
+ import pydantic # type: ignore
15
+
16
+
17
+ class PipelineDataSource(pydantic.BaseModel):
18
+ """
19
+ Schema for a data source in a pipeline.
20
+ """
21
+
22
+ id: str = pydantic.Field(description="Unique identifier")
23
+ created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
24
+ updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
25
+ name: str = pydantic.Field(description="The name of the data source.")
26
+ source_type: ConfigurableDataSourceNames
27
+ custom_metadata: typing.Optional[typing.Dict[str, PipelineDataSourceCustomMetadataValue]] = pydantic.Field(
28
+ description="Custom metadata that will be present on all data loaded from the data source"
29
+ )
30
+ component: PipelineDataSourceComponent
31
+ project_id: str
32
+ data_source_id: str = pydantic.Field(description="The ID of the data source.")
33
+ pipeline_id: str = pydantic.Field(description="The ID of the pipeline.")
34
+
35
+ def json(self, **kwargs: typing.Any) -> str:
36
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
37
+ return super().json(**kwargs_with_defaults)
38
+
39
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
40
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
41
+ return super().dict(**kwargs_with_defaults)
42
+
43
+ class Config:
44
+ frozen = True
45
+ smart_union = True
46
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,7 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ from .pipeline_data_source_component_one import PipelineDataSourceComponentOne
6
+
7
+ PipelineDataSourceComponent = typing.Union[typing.Dict[str, typing.Any], PipelineDataSourceComponentOne]
@@ -0,0 +1,19 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ from .cloud_az_storage_blob_data_source import CloudAzStorageBlobDataSource
6
+ from .cloud_gcs_data_source import CloudGcsDataSource
7
+ from .cloud_google_drive_data_source import CloudGoogleDriveDataSource
8
+ from .cloud_one_drive_data_source import CloudOneDriveDataSource
9
+ from .cloud_s_3_data_source import CloudS3DataSource
10
+ from .cloud_sharepoint_data_source import CloudSharepointDataSource
11
+
12
+ PipelineDataSourceComponentOne = typing.Union[
13
+ CloudS3DataSource,
14
+ CloudAzStorageBlobDataSource,
15
+ CloudGcsDataSource,
16
+ CloudGoogleDriveDataSource,
17
+ CloudOneDriveDataSource,
18
+ CloudSharepointDataSource,
19
+ ]
@@ -0,0 +1,32 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic.v1 as pydantic # type: ignore
10
+ except ImportError:
11
+ import pydantic # type: ignore
12
+
13
+
14
+ class PipelineDataSourceCreate(pydantic.BaseModel):
15
+ """
16
+ Schema for creating an association between a data source and a pipeline.
17
+ """
18
+
19
+ data_source_id: str = pydantic.Field(description="The ID of the data source.")
20
+
21
+ def json(self, **kwargs: typing.Any) -> str:
22
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
23
+ return super().json(**kwargs_with_defaults)
24
+
25
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
26
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
27
+ return super().dict(**kwargs_with_defaults)
28
+
29
+ class Config:
30
+ frozen = True
31
+ smart_union = True
32
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,7 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ PipelineDataSourceCustomMetadataValue = typing.Union[
6
+ typing.Dict[str, typing.Any], typing.List[typing.Any], str, int, float, bool
7
+ ]
@@ -0,0 +1,38 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .managed_ingestion_status import ManagedIngestionStatus
8
+
9
+ try:
10
+ import pydantic.v1 as pydantic # type: ignore
11
+ except ImportError:
12
+ import pydantic # type: ignore
13
+
14
+
15
+ class PipelineDeployment(pydantic.BaseModel):
16
+ """
17
+ Base schema model containing common database fields.
18
+ """
19
+
20
+ id: str = pydantic.Field(description="Unique identifier")
21
+ created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
22
+ updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
23
+ status: ManagedIngestionStatus = pydantic.Field(description="Status of the pipeline deployment.")
24
+ started_at: typing.Optional[dt.datetime] = pydantic.Field(description="Time the pipeline deployment started.")
25
+ ended_at: typing.Optional[dt.datetime] = pydantic.Field(description="Time the pipeline deployment finished.")
26
+
27
+ def json(self, **kwargs: typing.Any) -> str:
28
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
29
+ return super().json(**kwargs_with_defaults)
30
+
31
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
32
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
33
+ return super().dict(**kwargs_with_defaults)
34
+
35
+ class Config:
36
+ frozen = True
37
+ smart_union = True
38
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,52 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .pipeline_file_custom_metadata_value import PipelineFileCustomMetadataValue
8
+ from .pipeline_file_resource_info_value import PipelineFileResourceInfoValue
9
+
10
+ try:
11
+ import pydantic.v1 as pydantic # type: ignore
12
+ except ImportError:
13
+ import pydantic # type: ignore
14
+
15
+
16
+ class PipelineFile(pydantic.BaseModel):
17
+ """
18
+ Schema for a file that is associated with a pipeline.
19
+ """
20
+
21
+ id: str = pydantic.Field(description="Unique identifier")
22
+ created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
23
+ updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
24
+ name: typing.Optional[str]
25
+ file_size: typing.Optional[int] = pydantic.Field(description="Size of the file in bytes")
26
+ file_type: typing.Optional[str] = pydantic.Field(description="File type (e.g. pdf, docx, etc.)")
27
+ project_id: str = pydantic.Field(description="The ID of the project that the file belongs to")
28
+ last_modified_at: typing.Optional[dt.datetime] = pydantic.Field(description="The last modified time of the file")
29
+ resource_info: typing.Optional[typing.Dict[str, PipelineFileResourceInfoValue]] = pydantic.Field(
30
+ description="Resource information for the file"
31
+ )
32
+ data_source_id: typing.Optional[str] = pydantic.Field(
33
+ description="The ID of the data source that the file belongs to"
34
+ )
35
+ file_id: typing.Optional[str] = pydantic.Field(description="The ID of the file")
36
+ pipeline_id: str = pydantic.Field(description="The ID of the pipeline that the file is associated with")
37
+ custom_metadata: typing.Optional[typing.Dict[str, PipelineFileCustomMetadataValue]] = pydantic.Field(
38
+ description="Custom metadata for the file"
39
+ )
40
+
41
+ def json(self, **kwargs: typing.Any) -> str:
42
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
43
+ return super().json(**kwargs_with_defaults)
44
+
45
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
46
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
47
+ return super().dict(**kwargs_with_defaults)
48
+
49
+ class Config:
50
+ frozen = True
51
+ smart_union = True
52
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,36 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .pipeline_file_create_custom_metadata_value import PipelineFileCreateCustomMetadataValue
8
+
9
+ try:
10
+ import pydantic.v1 as pydantic # type: ignore
11
+ except ImportError:
12
+ import pydantic # type: ignore
13
+
14
+
15
+ class PipelineFileCreate(pydantic.BaseModel):
16
+ """
17
+ Schema for creating a file that is associated with a pipeline.
18
+ """
19
+
20
+ file_id: str = pydantic.Field(description="The ID of the file")
21
+ custom_metadata: typing.Optional[typing.Dict[str, PipelineFileCreateCustomMetadataValue]] = pydantic.Field(
22
+ description="Custom metadata for the file"
23
+ )
24
+
25
+ def json(self, **kwargs: typing.Any) -> str:
26
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
27
+ return super().json(**kwargs_with_defaults)
28
+
29
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
30
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
31
+ return super().dict(**kwargs_with_defaults)
32
+
33
+ class Config:
34
+ frozen = True
35
+ smart_union = True
36
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,7 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ PipelineFileCreateCustomMetadataValue = typing.Union[
6
+ typing.Dict[str, typing.Any], typing.List[typing.Any], str, int, float, bool
7
+ ]
@@ -0,0 +1,7 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ PipelineFileCustomMetadataValue = typing.Union[
6
+ typing.Dict[str, typing.Any], typing.List[typing.Any], str, int, float, bool
7
+ ]
@@ -0,0 +1,7 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ PipelineFileResourceInfoValue = typing.Union[
6
+ typing.Dict[str, typing.Any], typing.List[typing.Any], str, int, float, bool
7
+ ]
@@ -0,0 +1,35 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .managed_ingestion_status import ManagedIngestionStatus
8
+
9
+ try:
10
+ import pydantic.v1 as pydantic # type: ignore
11
+ except ImportError:
12
+ import pydantic # type: ignore
13
+
14
+
15
+ class PipelineFileStatusResponse(pydantic.BaseModel):
16
+ """
17
+ Schema for the status of a pipeline file.
18
+ """
19
+
20
+ file_id: str = pydantic.Field(description="The ID of the file")
21
+ pipeline_id: str = pydantic.Field(description="The ID of the pipeline")
22
+ status: ManagedIngestionStatus = pydantic.Field(description="The status of the pipeline file")
23
+
24
+ def json(self, **kwargs: typing.Any) -> str:
25
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
26
+ return super().json(**kwargs_with_defaults)
27
+
28
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
29
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
30
+ return super().dict(**kwargs_with_defaults)
31
+
32
+ class Config:
33
+ frozen = True
34
+ smart_union = True
35
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,21 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+ import typing
5
+
6
+ T_Result = typing.TypeVar("T_Result")
7
+
8
+
9
+ class PipelineType(str, enum.Enum):
10
+ """
11
+ Enum for representing the type of a pipeline
12
+ """
13
+
14
+ PLAYGROUND = "PLAYGROUND"
15
+ MANAGED = "MANAGED"
16
+
17
+ def visit(self, playground: typing.Callable[[], T_Result], managed: typing.Callable[[], T_Result]) -> T_Result:
18
+ if self is PipelineType.PLAYGROUND:
19
+ return playground()
20
+ if self is PipelineType.MANAGED:
21
+ return managed()
@@ -0,0 +1,29 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+ import typing
5
+
6
+ T_Result = typing.TypeVar("T_Result")
7
+
8
+
9
+ class Pooling(str, enum.Enum):
10
+ """
11
+ Enum of possible pooling choices with pooling behaviors.
12
+ """
13
+
14
+ CLS = "cls"
15
+ MEAN = "mean"
16
+ LAST = "last"
17
+
18
+ def visit(
19
+ self,
20
+ cls: typing.Callable[[], T_Result],
21
+ mean: typing.Callable[[], T_Result],
22
+ last: typing.Callable[[], T_Result],
23
+ ) -> T_Result:
24
+ if self is Pooling.CLS:
25
+ return cls()
26
+ if self is Pooling.MEAN:
27
+ return mean()
28
+ if self is Pooling.LAST:
29
+ return last()
@@ -0,0 +1,40 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .metadata_filters import MetadataFilters
8
+
9
+ try:
10
+ import pydantic.v1 as pydantic # type: ignore
11
+ except ImportError:
12
+ import pydantic # type: ignore
13
+
14
+
15
+ class PresetRetrievalParams(pydantic.BaseModel):
16
+ """
17
+ Schema for the search params for an retrieval execution that can be preset for a pipeline.
18
+ """
19
+
20
+ dense_similarity_top_k: typing.Optional[int] = pydantic.Field(description="Number of nodes for dense retrieval.")
21
+ sparse_similarity_top_k: typing.Optional[int] = pydantic.Field(description="Number of nodes for sparse retrieval.")
22
+ enable_reranking: typing.Optional[bool] = pydantic.Field(description="Enable reranking for retrieval")
23
+ rerank_top_n: typing.Optional[int] = pydantic.Field(description="Number of reranked nodes for returning.")
24
+ alpha: typing.Optional[float] = pydantic.Field(
25
+ description="Alpha value for hybrid retrieval to determine the weights between dense and sparse retrieval. 0 is sparse retrieval and 1 is dense retrieval."
26
+ )
27
+ search_filters: typing.Optional[MetadataFilters] = pydantic.Field(description="Search filters for retrieval.")
28
+
29
+ def json(self, **kwargs: typing.Any) -> str:
30
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
31
+ return super().json(**kwargs_with_defaults)
32
+
33
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
34
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
35
+ return super().dict(**kwargs_with_defaults)
36
+
37
+ class Config:
38
+ frozen = True
39
+ smart_union = True
40
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,36 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic.v1 as pydantic # type: ignore
10
+ except ImportError:
11
+ import pydantic # type: ignore
12
+
13
+
14
+ class PresignedUrl(pydantic.BaseModel):
15
+ """
16
+ Schema for a presigned URL.
17
+ """
18
+
19
+ url: str = pydantic.Field(description="A presigned URL for IO operations against a private file")
20
+ expires_at: dt.datetime = pydantic.Field(description="The time at which the presigned URL expires")
21
+ form_fields: typing.Optional[typing.Dict[str, str]] = pydantic.Field(
22
+ description="Form fields for a presigned POST request"
23
+ )
24
+
25
+ def json(self, **kwargs: typing.Any) -> str:
26
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
27
+ return super().json(**kwargs_with_defaults)
28
+
29
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
30
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
31
+ return super().dict(**kwargs_with_defaults)
32
+
33
+ class Config:
34
+ frozen = True
35
+ smart_union = True
36
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,42 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .pipeline import Pipeline
8
+
9
+ try:
10
+ import pydantic.v1 as pydantic # type: ignore
11
+ except ImportError:
12
+ import pydantic # type: ignore
13
+
14
+
15
+ class Project(pydantic.BaseModel):
16
+ """
17
+ Schema for a project.
18
+ """
19
+
20
+ name: str
21
+ id: str = pydantic.Field(description="Unique identifier")
22
+ created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
23
+ updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
24
+ pipelines: typing.List[Pipeline]
25
+ ad_hoc_eval_dataset_id: typing.Optional[str]
26
+ user_id: str = pydantic.Field(description="The user ID of the project owner.")
27
+ is_default: typing.Optional[bool] = pydantic.Field(
28
+ description="Whether this project is the default project for the user."
29
+ )
30
+
31
+ def json(self, **kwargs: typing.Any) -> str:
32
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
33
+ return super().json(**kwargs_with_defaults)
34
+
35
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
36
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
37
+ return super().dict(**kwargs_with_defaults)
38
+
39
+ class Config:
40
+ frozen = True
41
+ smart_union = True
42
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,32 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic.v1 as pydantic # type: ignore
10
+ except ImportError:
11
+ import pydantic # type: ignore
12
+
13
+
14
+ class ProjectCreate(pydantic.BaseModel):
15
+ """
16
+ Schema for creating a project.
17
+ """
18
+
19
+ name: str
20
+
21
+ def json(self, **kwargs: typing.Any) -> str:
22
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
23
+ return super().json(**kwargs_with_defaults)
24
+
25
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
26
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
27
+ return super().dict(**kwargs_with_defaults)
28
+
29
+ class Config:
30
+ frozen = True
31
+ smart_union = True
32
+ json_encoders = {dt.datetime: serialize_datetime}