llama-cloud 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (173) hide show
  1. llama_cloud/__init__.py +295 -0
  2. llama_cloud/client.py +72 -0
  3. llama_cloud/core/__init__.py +17 -0
  4. llama_cloud/core/api_error.py +15 -0
  5. llama_cloud/core/client_wrapper.py +51 -0
  6. llama_cloud/core/datetime_utils.py +28 -0
  7. llama_cloud/core/jsonable_encoder.py +103 -0
  8. llama_cloud/core/remove_none_from_dict.py +11 -0
  9. llama_cloud/errors/__init__.py +5 -0
  10. llama_cloud/errors/unprocessable_entity_error.py +9 -0
  11. llama_cloud/resources/__init__.py +40 -0
  12. llama_cloud/resources/api_keys/__init__.py +2 -0
  13. llama_cloud/resources/api_keys/client.py +302 -0
  14. llama_cloud/resources/billing/__init__.py +2 -0
  15. llama_cloud/resources/billing/client.py +234 -0
  16. llama_cloud/resources/component_definitions/__init__.py +2 -0
  17. llama_cloud/resources/component_definitions/client.py +192 -0
  18. llama_cloud/resources/data_sinks/__init__.py +5 -0
  19. llama_cloud/resources/data_sinks/client.py +506 -0
  20. llama_cloud/resources/data_sinks/types/__init__.py +6 -0
  21. llama_cloud/resources/data_sinks/types/data_sink_update_component.py +7 -0
  22. llama_cloud/resources/data_sinks/types/data_sink_update_component_one.py +17 -0
  23. llama_cloud/resources/data_sources/__init__.py +5 -0
  24. llama_cloud/resources/data_sources/client.py +521 -0
  25. llama_cloud/resources/data_sources/types/__init__.py +7 -0
  26. llama_cloud/resources/data_sources/types/data_source_update_component.py +7 -0
  27. llama_cloud/resources/data_sources/types/data_source_update_component_one.py +19 -0
  28. llama_cloud/resources/data_sources/types/data_source_update_custom_metadata_value.py +7 -0
  29. llama_cloud/resources/deprecated/__init__.py +2 -0
  30. llama_cloud/resources/deprecated/client.py +982 -0
  31. llama_cloud/resources/evals/__init__.py +2 -0
  32. llama_cloud/resources/evals/client.py +745 -0
  33. llama_cloud/resources/files/__init__.py +5 -0
  34. llama_cloud/resources/files/client.py +560 -0
  35. llama_cloud/resources/files/types/__init__.py +5 -0
  36. llama_cloud/resources/files/types/file_create_resource_info_value.py +5 -0
  37. llama_cloud/resources/parsing/__init__.py +2 -0
  38. llama_cloud/resources/parsing/client.py +982 -0
  39. llama_cloud/resources/pipelines/__init__.py +5 -0
  40. llama_cloud/resources/pipelines/client.py +2599 -0
  41. llama_cloud/resources/pipelines/types/__init__.py +5 -0
  42. llama_cloud/resources/pipelines/types/pipeline_file_update_custom_metadata_value.py +7 -0
  43. llama_cloud/resources/projects/__init__.py +2 -0
  44. llama_cloud/resources/projects/client.py +1231 -0
  45. llama_cloud/types/__init__.py +253 -0
  46. llama_cloud/types/api_key.py +37 -0
  47. llama_cloud/types/azure_open_ai_embedding.py +75 -0
  48. llama_cloud/types/base.py +26 -0
  49. llama_cloud/types/base_prompt_template.py +44 -0
  50. llama_cloud/types/bedrock_embedding.py +56 -0
  51. llama_cloud/types/chat_message.py +35 -0
  52. llama_cloud/types/cloud_az_storage_blob_data_source.py +40 -0
  53. llama_cloud/types/cloud_chroma_vector_store.py +40 -0
  54. llama_cloud/types/cloud_document.py +36 -0
  55. llama_cloud/types/cloud_document_create.py +36 -0
  56. llama_cloud/types/cloud_gcs_data_source.py +37 -0
  57. llama_cloud/types/cloud_google_drive_data_source.py +36 -0
  58. llama_cloud/types/cloud_one_drive_data_source.py +38 -0
  59. llama_cloud/types/cloud_pinecone_vector_store.py +46 -0
  60. llama_cloud/types/cloud_postgres_vector_store.py +44 -0
  61. llama_cloud/types/cloud_qdrant_vector_store.py +48 -0
  62. llama_cloud/types/cloud_s_3_data_source.py +42 -0
  63. llama_cloud/types/cloud_sharepoint_data_source.py +38 -0
  64. llama_cloud/types/cloud_weaviate_vector_store.py +38 -0
  65. llama_cloud/types/code_splitter.py +46 -0
  66. llama_cloud/types/cohere_embedding.py +46 -0
  67. llama_cloud/types/configurable_data_sink_names.py +37 -0
  68. llama_cloud/types/configurable_data_source_names.py +41 -0
  69. llama_cloud/types/configurable_transformation_definition.py +45 -0
  70. llama_cloud/types/configurable_transformation_names.py +73 -0
  71. llama_cloud/types/configured_transformation_item.py +43 -0
  72. llama_cloud/types/configured_transformation_item_component.py +9 -0
  73. llama_cloud/types/configured_transformation_item_component_one.py +35 -0
  74. llama_cloud/types/data_sink.py +40 -0
  75. llama_cloud/types/data_sink_component.py +7 -0
  76. llama_cloud/types/data_sink_component_one.py +17 -0
  77. llama_cloud/types/data_sink_create.py +36 -0
  78. llama_cloud/types/data_sink_create_component.py +7 -0
  79. llama_cloud/types/data_sink_create_component_one.py +17 -0
  80. llama_cloud/types/data_sink_definition.py +41 -0
  81. llama_cloud/types/data_source.py +44 -0
  82. llama_cloud/types/data_source_component.py +7 -0
  83. llama_cloud/types/data_source_component_one.py +19 -0
  84. llama_cloud/types/data_source_create.py +40 -0
  85. llama_cloud/types/data_source_create_component.py +7 -0
  86. llama_cloud/types/data_source_create_component_one.py +19 -0
  87. llama_cloud/types/data_source_create_custom_metadata_value.py +7 -0
  88. llama_cloud/types/data_source_custom_metadata_value.py +7 -0
  89. llama_cloud/types/data_source_definition.py +41 -0
  90. llama_cloud/types/eval_dataset.py +37 -0
  91. llama_cloud/types/eval_dataset_job_params.py +36 -0
  92. llama_cloud/types/eval_dataset_job_record.py +59 -0
  93. llama_cloud/types/eval_execution_params.py +38 -0
  94. llama_cloud/types/eval_execution_params_override.py +38 -0
  95. llama_cloud/types/eval_llm_model_data.py +33 -0
  96. llama_cloud/types/eval_question.py +39 -0
  97. llama_cloud/types/eval_question_create.py +28 -0
  98. llama_cloud/types/eval_question_result.py +49 -0
  99. llama_cloud/types/file.py +46 -0
  100. llama_cloud/types/file_resource_info_value.py +5 -0
  101. llama_cloud/types/filter_condition.py +21 -0
  102. llama_cloud/types/filter_operator.py +65 -0
  103. llama_cloud/types/gemini_embedding.py +51 -0
  104. llama_cloud/types/html_node_parser.py +44 -0
  105. llama_cloud/types/http_validation_error.py +29 -0
  106. llama_cloud/types/hugging_face_inference_api_embedding.py +68 -0
  107. llama_cloud/types/hugging_face_inference_api_embedding_token.py +5 -0
  108. llama_cloud/types/json_node_parser.py +43 -0
  109. llama_cloud/types/llama_parse_supported_file_extensions.py +161 -0
  110. llama_cloud/types/llm.py +55 -0
  111. llama_cloud/types/local_eval.py +46 -0
  112. llama_cloud/types/local_eval_results.py +37 -0
  113. llama_cloud/types/local_eval_sets.py +30 -0
  114. llama_cloud/types/managed_ingestion_status.py +37 -0
  115. llama_cloud/types/markdown_element_node_parser.py +49 -0
  116. llama_cloud/types/markdown_node_parser.py +43 -0
  117. llama_cloud/types/message_role.py +45 -0
  118. llama_cloud/types/metadata_filter.py +41 -0
  119. llama_cloud/types/metadata_filter_value.py +5 -0
  120. llama_cloud/types/metadata_filters.py +41 -0
  121. llama_cloud/types/metadata_filters_filters_item.py +8 -0
  122. llama_cloud/types/metric_result.py +30 -0
  123. llama_cloud/types/node_parser.py +37 -0
  124. llama_cloud/types/object_type.py +33 -0
  125. llama_cloud/types/open_ai_embedding.py +73 -0
  126. llama_cloud/types/parser_languages.py +361 -0
  127. llama_cloud/types/parsing_history_item.py +36 -0
  128. llama_cloud/types/parsing_job.py +30 -0
  129. llama_cloud/types/parsing_job_json_result.py +29 -0
  130. llama_cloud/types/parsing_job_markdown_result.py +29 -0
  131. llama_cloud/types/parsing_job_text_result.py +29 -0
  132. llama_cloud/types/parsing_usage.py +29 -0
  133. llama_cloud/types/pipeline.py +64 -0
  134. llama_cloud/types/pipeline_create.py +61 -0
  135. llama_cloud/types/pipeline_data_source.py +46 -0
  136. llama_cloud/types/pipeline_data_source_component.py +7 -0
  137. llama_cloud/types/pipeline_data_source_component_one.py +19 -0
  138. llama_cloud/types/pipeline_data_source_create.py +32 -0
  139. llama_cloud/types/pipeline_data_source_custom_metadata_value.py +7 -0
  140. llama_cloud/types/pipeline_deployment.py +38 -0
  141. llama_cloud/types/pipeline_file.py +52 -0
  142. llama_cloud/types/pipeline_file_create.py +36 -0
  143. llama_cloud/types/pipeline_file_create_custom_metadata_value.py +7 -0
  144. llama_cloud/types/pipeline_file_custom_metadata_value.py +7 -0
  145. llama_cloud/types/pipeline_file_resource_info_value.py +7 -0
  146. llama_cloud/types/pipeline_file_status_response.py +35 -0
  147. llama_cloud/types/pipeline_type.py +21 -0
  148. llama_cloud/types/pooling.py +29 -0
  149. llama_cloud/types/preset_retrieval_params.py +40 -0
  150. llama_cloud/types/presigned_url.py +36 -0
  151. llama_cloud/types/project.py +42 -0
  152. llama_cloud/types/project_create.py +32 -0
  153. llama_cloud/types/prompt_mixin_prompts.py +36 -0
  154. llama_cloud/types/prompt_spec.py +35 -0
  155. llama_cloud/types/pydantic_program_mode.py +41 -0
  156. llama_cloud/types/related_node_info.py +37 -0
  157. llama_cloud/types/retrieve_results.py +40 -0
  158. llama_cloud/types/sentence_splitter.py +48 -0
  159. llama_cloud/types/simple_file_node_parser.py +44 -0
  160. llama_cloud/types/status_enum.py +33 -0
  161. llama_cloud/types/supported_eval_llm_model.py +35 -0
  162. llama_cloud/types/supported_eval_llm_model_names.py +29 -0
  163. llama_cloud/types/text_node.py +62 -0
  164. llama_cloud/types/text_node_relationships_value.py +7 -0
  165. llama_cloud/types/text_node_with_score.py +36 -0
  166. llama_cloud/types/token_text_splitter.py +43 -0
  167. llama_cloud/types/transformation_category_names.py +21 -0
  168. llama_cloud/types/validation_error.py +31 -0
  169. llama_cloud/types/validation_error_loc_item.py +5 -0
  170. llama_cloud-0.0.1.dist-info/LICENSE +21 -0
  171. llama_cloud-0.0.1.dist-info/METADATA +25 -0
  172. llama_cloud-0.0.1.dist-info/RECORD +173 -0
  173. llama_cloud-0.0.1.dist-info/WHEEL +4 -0
@@ -0,0 +1,59 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ import typing_extensions
7
+
8
+ from ..core.datetime_utils import serialize_datetime
9
+ from .base import Base
10
+ from .eval_dataset_job_params import EvalDatasetJobParams
11
+ from .status_enum import StatusEnum
12
+
13
+ try:
14
+ import pydantic.v1 as pydantic # type: ignore
15
+ except ImportError:
16
+ import pydantic # type: ignore
17
+
18
+
19
+ class EvalDatasetJobRecord(pydantic.BaseModel):
20
+ """
21
+ Schema for job that evaluates an EvalDataset against a pipeline.
22
+ """
23
+
24
+ job_name: typing_extensions.Literal["eval_dataset_job"]
25
+ partitions: typing.Dict[str, str] = pydantic.Field(
26
+ description="The partitions for this execution. Used for determining where to save job output."
27
+ )
28
+ parameters: typing.Optional[EvalDatasetJobParams] = pydantic.Field(
29
+ description="Additional input parameters for the eval execution."
30
+ )
31
+ session_id: typing.Optional[str] = pydantic.Field(
32
+ description="The upstream request ID that created this job. Used for tracking the job across services."
33
+ )
34
+ correlation_id: typing.Optional[str] = pydantic.Field(
35
+ description="The correlation ID for this job. Used for tracking the job across services."
36
+ )
37
+ parent_job_execution_id: typing.Optional[str] = pydantic.Field(description="The ID of the parent job execution.")
38
+ id: typing.Optional[str] = pydantic.Field(description="Unique identifier")
39
+ status: StatusEnum
40
+ error_message: typing.Optional[str]
41
+ attempts: typing.Optional[int] = pydantic.Field(description="The number of times this job has been attempted")
42
+ started_at: typing.Optional[dt.datetime]
43
+ ended_at: typing.Optional[dt.datetime]
44
+ created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
45
+ updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
46
+ data: typing.Optional[Base] = pydantic.Field(description="Additional metadata for the job execution.")
47
+
48
+ def json(self, **kwargs: typing.Any) -> str:
49
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
50
+ return super().json(**kwargs_with_defaults)
51
+
52
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
53
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
54
+ return super().dict(**kwargs_with_defaults)
55
+
56
+ class Config:
57
+ frozen = True
58
+ smart_union = True
59
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,38 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .supported_eval_llm_model_names import SupportedEvalLlmModelNames
8
+
9
+ try:
10
+ import pydantic.v1 as pydantic # type: ignore
11
+ except ImportError:
12
+ import pydantic # type: ignore
13
+
14
+
15
+ class EvalExecutionParams(pydantic.BaseModel):
16
+ """
17
+ Schema for the params for an eval execution.
18
+ """
19
+
20
+ llm_model: typing.Optional[SupportedEvalLlmModelNames] = pydantic.Field(
21
+ description="The LLM model to use within eval execution."
22
+ )
23
+ qa_prompt_tmpl: typing.Optional[str] = pydantic.Field(
24
+ description="The template to use for the question answering prompt."
25
+ )
26
+
27
+ def json(self, **kwargs: typing.Any) -> str:
28
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
29
+ return super().json(**kwargs_with_defaults)
30
+
31
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
32
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
33
+ return super().dict(**kwargs_with_defaults)
34
+
35
+ class Config:
36
+ frozen = True
37
+ smart_union = True
38
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,38 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .supported_eval_llm_model_names import SupportedEvalLlmModelNames
8
+
9
+ try:
10
+ import pydantic.v1 as pydantic # type: ignore
11
+ except ImportError:
12
+ import pydantic # type: ignore
13
+
14
+
15
+ class EvalExecutionParamsOverride(pydantic.BaseModel):
16
+ """
17
+ Schema for the params override for an eval execution.
18
+ """
19
+
20
+ llm_model: typing.Optional[SupportedEvalLlmModelNames] = pydantic.Field(
21
+ description="The LLM model to use within eval execution."
22
+ )
23
+ qa_prompt_tmpl: typing.Optional[str] = pydantic.Field(
24
+ description="The template to use for the question answering prompt."
25
+ )
26
+
27
+ def json(self, **kwargs: typing.Any) -> str:
28
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
29
+ return super().json(**kwargs_with_defaults)
30
+
31
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
32
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
33
+ return super().dict(**kwargs_with_defaults)
34
+
35
+ class Config:
36
+ frozen = True
37
+ smart_union = True
38
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,33 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic.v1 as pydantic # type: ignore
10
+ except ImportError:
11
+ import pydantic # type: ignore
12
+
13
+
14
+ class EvalLlmModelData(pydantic.BaseModel):
15
+ """
16
+ Schema for an eval LLM model.
17
+ """
18
+
19
+ name: str = pydantic.Field(description="The name of the LLM model.")
20
+ description: str = pydantic.Field(description="The description of the LLM model.")
21
+
22
+ def json(self, **kwargs: typing.Any) -> str:
23
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
24
+ return super().json(**kwargs_with_defaults)
25
+
26
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
27
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
28
+ return super().dict(**kwargs_with_defaults)
29
+
30
+ class Config:
31
+ frozen = True
32
+ smart_union = True
33
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,39 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic.v1 as pydantic # type: ignore
10
+ except ImportError:
11
+ import pydantic # type: ignore
12
+
13
+
14
+ class EvalQuestion(pydantic.BaseModel):
15
+ """
16
+ Base schema model containing common database fields.
17
+ """
18
+
19
+ id: str = pydantic.Field(description="Unique identifier")
20
+ created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
21
+ updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
22
+ content: str = pydantic.Field(description="The content of the question.")
23
+ eval_dataset_id: str
24
+ eval_dataset_index: int = pydantic.Field(
25
+ description="The index at which this question is positioned relative to the other questions in the linked EvalDataset. Client is responsible for setting this correctly."
26
+ )
27
+
28
+ def json(self, **kwargs: typing.Any) -> str:
29
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
30
+ return super().json(**kwargs_with_defaults)
31
+
32
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
33
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
34
+ return super().dict(**kwargs_with_defaults)
35
+
36
+ class Config:
37
+ frozen = True
38
+ smart_union = True
39
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,28 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic.v1 as pydantic # type: ignore
10
+ except ImportError:
11
+ import pydantic # type: ignore
12
+
13
+
14
+ class EvalQuestionCreate(pydantic.BaseModel):
15
+ content: str = pydantic.Field(description="The content of the question.")
16
+
17
+ def json(self, **kwargs: typing.Any) -> str:
18
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
19
+ return super().json(**kwargs_with_defaults)
20
+
21
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
22
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
23
+ return super().dict(**kwargs_with_defaults)
24
+
25
+ class Config:
26
+ frozen = True
27
+ smart_union = True
28
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,49 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .eval_execution_params import EvalExecutionParams
8
+ from .metric_result import MetricResult
9
+ from .text_node import TextNode
10
+
11
+ try:
12
+ import pydantic.v1 as pydantic # type: ignore
13
+ except ImportError:
14
+ import pydantic # type: ignore
15
+
16
+
17
+ class EvalQuestionResult(pydantic.BaseModel):
18
+ """
19
+ Schema for the result of an eval question job.
20
+ """
21
+
22
+ eval_question_id: str = pydantic.Field(description="The ID of the question that was executed.")
23
+ pipeline_id: str = pydantic.Field(description="The ID of the pipeline that the question was executed against.")
24
+ source_nodes: typing.List[TextNode] = pydantic.Field(
25
+ description="The nodes retrieved by the pipeline for the given question."
26
+ )
27
+ answer: str = pydantic.Field(description="The answer to the question.")
28
+ eval_metrics: typing.Dict[str, MetricResult] = pydantic.Field(description="The eval metrics for the question.")
29
+ eval_dataset_execution_id: str = pydantic.Field(
30
+ description="The ID of the EvalDatasetJobRecord that this result was generated from."
31
+ )
32
+ eval_dataset_execution_params: EvalExecutionParams = pydantic.Field(
33
+ description="The EvalExecutionParams that were used when this result was generated."
34
+ )
35
+ eval_finished_at: dt.datetime = pydantic.Field(description="The timestamp when the eval finished.")
36
+ class_name: typing.Optional[str]
37
+
38
+ def json(self, **kwargs: typing.Any) -> str:
39
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
40
+ return super().json(**kwargs_with_defaults)
41
+
42
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
43
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
44
+ return super().dict(**kwargs_with_defaults)
45
+
46
+ class Config:
47
+ frozen = True
48
+ smart_union = True
49
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,46 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .file_resource_info_value import FileResourceInfoValue
8
+
9
+ try:
10
+ import pydantic.v1 as pydantic # type: ignore
11
+ except ImportError:
12
+ import pydantic # type: ignore
13
+
14
+
15
+ class File(pydantic.BaseModel):
16
+ """
17
+ Schema for a file.
18
+ """
19
+
20
+ id: str = pydantic.Field(description="Unique identifier")
21
+ created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
22
+ updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
23
+ name: str
24
+ file_size: typing.Optional[int] = pydantic.Field(description="Size of the file in bytes")
25
+ file_type: typing.Optional[str] = pydantic.Field(description="File type (e.g. pdf, docx, etc.)")
26
+ project_id: str = pydantic.Field(description="The ID of the project that the file belongs to")
27
+ last_modified_at: typing.Optional[dt.datetime] = pydantic.Field(description="The last modified time of the file")
28
+ resource_info: typing.Optional[typing.Dict[str, FileResourceInfoValue]] = pydantic.Field(
29
+ description="Resource information for the file"
30
+ )
31
+ data_source_id: typing.Optional[str] = pydantic.Field(
32
+ description="The ID of the data source that the file belongs to"
33
+ )
34
+
35
+ def json(self, **kwargs: typing.Any) -> str:
36
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
37
+ return super().json(**kwargs_with_defaults)
38
+
39
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
40
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
41
+ return super().dict(**kwargs_with_defaults)
42
+
43
+ class Config:
44
+ frozen = True
45
+ smart_union = True
46
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,5 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ FileResourceInfoValue = typing.Union[typing.Dict[str, typing.Any], typing.List[typing.Any], str, int, float, bool]
@@ -0,0 +1,21 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+ import typing
5
+
6
+ T_Result = typing.TypeVar("T_Result")
7
+
8
+
9
+ class FilterCondition(str, enum.Enum):
10
+ """
11
+ Vector store filter conditions to combine different filters.
12
+ """
13
+
14
+ AND = "and"
15
+ OR = "or"
16
+
17
+ def visit(self, and_: typing.Callable[[], T_Result], or_: typing.Callable[[], T_Result]) -> T_Result:
18
+ if self is FilterCondition.AND:
19
+ return and_()
20
+ if self is FilterCondition.OR:
21
+ return or_()
@@ -0,0 +1,65 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+ import typing
5
+
6
+ T_Result = typing.TypeVar("T_Result")
7
+
8
+
9
+ class FilterOperator(str, enum.Enum):
10
+ """
11
+ Vector store filter operator.
12
+ """
13
+
14
+ EQUAL_TO = "=="
15
+ GREATER_THAN = ">"
16
+ LESS_THAN = "<"
17
+ NOT_EQUALS = "!="
18
+ GREATER_THAN_OR_EQUAL_TO = ">="
19
+ LESS_THAN_OR_EQUAL_TO = "<="
20
+ IN = "in"
21
+ NIN = "nin"
22
+ ANY = "any"
23
+ ALL = "all"
24
+ TEXT_MATCH = "text_match"
25
+ CONTAINS = "contains"
26
+
27
+ def visit(
28
+ self,
29
+ equal_to: typing.Callable[[], T_Result],
30
+ greater_than: typing.Callable[[], T_Result],
31
+ less_than: typing.Callable[[], T_Result],
32
+ not_equals: typing.Callable[[], T_Result],
33
+ greater_than_or_equal_to: typing.Callable[[], T_Result],
34
+ less_than_or_equal_to: typing.Callable[[], T_Result],
35
+ in_: typing.Callable[[], T_Result],
36
+ nin: typing.Callable[[], T_Result],
37
+ any: typing.Callable[[], T_Result],
38
+ all: typing.Callable[[], T_Result],
39
+ text_match: typing.Callable[[], T_Result],
40
+ contains: typing.Callable[[], T_Result],
41
+ ) -> T_Result:
42
+ if self is FilterOperator.EQUAL_TO:
43
+ return equal_to()
44
+ if self is FilterOperator.GREATER_THAN:
45
+ return greater_than()
46
+ if self is FilterOperator.LESS_THAN:
47
+ return less_than()
48
+ if self is FilterOperator.NOT_EQUALS:
49
+ return not_equals()
50
+ if self is FilterOperator.GREATER_THAN_OR_EQUAL_TO:
51
+ return greater_than_or_equal_to()
52
+ if self is FilterOperator.LESS_THAN_OR_EQUAL_TO:
53
+ return less_than_or_equal_to()
54
+ if self is FilterOperator.IN:
55
+ return in_()
56
+ if self is FilterOperator.NIN:
57
+ return nin()
58
+ if self is FilterOperator.ANY:
59
+ return any()
60
+ if self is FilterOperator.ALL:
61
+ return all()
62
+ if self is FilterOperator.TEXT_MATCH:
63
+ return text_match()
64
+ if self is FilterOperator.CONTAINS:
65
+ return contains()
@@ -0,0 +1,51 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic.v1 as pydantic # type: ignore
10
+ except ImportError:
11
+ import pydantic # type: ignore
12
+
13
+
14
+ class GeminiEmbedding(pydantic.BaseModel):
15
+ """
16
+ Google Gemini embeddings.
17
+
18
+ Args:
19
+ model_name (str): Model for embedding.
20
+ Defaults to "models/embedding-001".
21
+
22
+ api_key (Optional[str]): API key to access the model. Defaults to None.
23
+ api_base (Optional[str]): API base to access the model. Defaults to Official Base.
24
+ transport (Optional[str]): Transport to access the model.
25
+ """
26
+
27
+ model_name: typing.Optional[str] = pydantic.Field(description="The name of the embedding model.")
28
+ embed_batch_size: typing.Optional[int] = pydantic.Field(description="The batch size for embedding calls.")
29
+ callback_manager: typing.Optional[typing.Dict[str, typing.Any]]
30
+ num_workers: typing.Optional[int] = pydantic.Field(
31
+ description="The number of workers to use for async embedding calls."
32
+ )
33
+ title: typing.Optional[str] = pydantic.Field(
34
+ description="Title is only applicable for retrieval_document tasks, and is used to represent a document title. For other tasks, title is invalid."
35
+ )
36
+ task_type: typing.Optional[str] = pydantic.Field(description="The task for embedding model.")
37
+ api_key: typing.Optional[str] = pydantic.Field(description="API key to access the model. Defaults to None.")
38
+ class_name: typing.Optional[str]
39
+
40
+ def json(self, **kwargs: typing.Any) -> str:
41
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
42
+ return super().json(**kwargs_with_defaults)
43
+
44
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
45
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
46
+ return super().dict(**kwargs_with_defaults)
47
+
48
+ class Config:
49
+ frozen = True
50
+ smart_union = True
51
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,44 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic.v1 as pydantic # type: ignore
10
+ except ImportError:
11
+ import pydantic # type: ignore
12
+
13
+
14
+ class HtmlNodeParser(pydantic.BaseModel):
15
+ """
16
+ HTML node parser.
17
+
18
+ Splits a document into Nodes using custom HTML splitting logic.
19
+
20
+ Args:
21
+ include_metadata (bool): whether to include metadata in nodes
22
+ include_prev_next_rel (bool): whether to include prev/next relationships
23
+ """
24
+
25
+ include_metadata: typing.Optional[bool] = pydantic.Field(
26
+ description="Whether or not to consider metadata when splitting."
27
+ )
28
+ include_prev_next_rel: typing.Optional[bool] = pydantic.Field(description="Include prev/next node relationships.")
29
+ callback_manager: typing.Optional[typing.Dict[str, typing.Any]]
30
+ tags: typing.Optional[typing.List[str]] = pydantic.Field(description="HTML tags to extract text from.")
31
+ class_name: typing.Optional[str]
32
+
33
+ def json(self, **kwargs: typing.Any) -> str:
34
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
35
+ return super().json(**kwargs_with_defaults)
36
+
37
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
38
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
39
+ return super().dict(**kwargs_with_defaults)
40
+
41
+ class Config:
42
+ frozen = True
43
+ smart_union = True
44
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,29 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .validation_error import ValidationError
8
+
9
+ try:
10
+ import pydantic.v1 as pydantic # type: ignore
11
+ except ImportError:
12
+ import pydantic # type: ignore
13
+
14
+
15
+ class HttpValidationError(pydantic.BaseModel):
16
+ detail: typing.Optional[typing.List[ValidationError]]
17
+
18
+ def json(self, **kwargs: typing.Any) -> str:
19
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
20
+ return super().json(**kwargs_with_defaults)
21
+
22
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
23
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
24
+ return super().dict(**kwargs_with_defaults)
25
+
26
+ class Config:
27
+ frozen = True
28
+ smart_union = True
29
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,68 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .hugging_face_inference_api_embedding_token import HuggingFaceInferenceApiEmbeddingToken
8
+ from .pooling import Pooling
9
+
10
+ try:
11
+ import pydantic.v1 as pydantic # type: ignore
12
+ except ImportError:
13
+ import pydantic # type: ignore
14
+
15
+
16
+ class HuggingFaceInferenceApiEmbedding(pydantic.BaseModel):
17
+ """
18
+ Wrapper on the Hugging Face's Inference API for embeddings.
19
+
20
+ Overview of the design:
21
+
22
+ - Uses the feature extraction task: https://huggingface.co/tasks/feature-extraction
23
+ """
24
+
25
+ model_name: typing.Optional[str] = pydantic.Field(
26
+ description="Hugging Face model name. If None, the task will be used."
27
+ )
28
+ embed_batch_size: typing.Optional[int] = pydantic.Field(description="The batch size for embedding calls.")
29
+ callback_manager: typing.Optional[typing.Dict[str, typing.Any]]
30
+ num_workers: typing.Optional[int] = pydantic.Field(
31
+ description="The number of workers to use for async embedding calls."
32
+ )
33
+ pooling: typing.Optional[Pooling] = pydantic.Field(
34
+ description="Pooling strategy. If None, the model's default pooling is used."
35
+ )
36
+ query_instruction: typing.Optional[str] = pydantic.Field(
37
+ description="Instruction to prepend during query embedding."
38
+ )
39
+ text_instruction: typing.Optional[str] = pydantic.Field(description="Instruction to prepend during text embedding.")
40
+ token: typing.Optional[HuggingFaceInferenceApiEmbeddingToken] = pydantic.Field(
41
+ description="Hugging Face token. Will default to the locally saved token. Pass token=False if you don’t want to send your token to the server."
42
+ )
43
+ timeout: typing.Optional[float] = pydantic.Field(
44
+ description="The maximum number of seconds to wait for a response from the server. Loading a new model in Inference API can take up to several minutes. Defaults to None, meaning it will loop until the server is available."
45
+ )
46
+ headers: typing.Optional[typing.Dict[str, str]] = pydantic.Field(
47
+ description="Additional headers to send to the server. By default only the authorization and user-agent headers are sent. Values in this dictionary will override the default values."
48
+ )
49
+ cookies: typing.Optional[typing.Dict[str, str]] = pydantic.Field(
50
+ description="Additional cookies to send to the server."
51
+ )
52
+ task: typing.Optional[str] = pydantic.Field(
53
+ description="Optional task to pick Hugging Face's recommended model, used when model_name is left as default of None."
54
+ )
55
+ class_name: typing.Optional[str]
56
+
57
+ def json(self, **kwargs: typing.Any) -> str:
58
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
59
+ return super().json(**kwargs_with_defaults)
60
+
61
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
62
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
63
+ return super().dict(**kwargs_with_defaults)
64
+
65
+ class Config:
66
+ frozen = True
67
+ smart_union = True
68
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,5 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ HuggingFaceInferenceApiEmbeddingToken = typing.Union[str, bool]