llama-cloud 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (173) hide show
  1. llama_cloud/__init__.py +295 -0
  2. llama_cloud/client.py +72 -0
  3. llama_cloud/core/__init__.py +17 -0
  4. llama_cloud/core/api_error.py +15 -0
  5. llama_cloud/core/client_wrapper.py +51 -0
  6. llama_cloud/core/datetime_utils.py +28 -0
  7. llama_cloud/core/jsonable_encoder.py +103 -0
  8. llama_cloud/core/remove_none_from_dict.py +11 -0
  9. llama_cloud/errors/__init__.py +5 -0
  10. llama_cloud/errors/unprocessable_entity_error.py +9 -0
  11. llama_cloud/resources/__init__.py +40 -0
  12. llama_cloud/resources/api_keys/__init__.py +2 -0
  13. llama_cloud/resources/api_keys/client.py +302 -0
  14. llama_cloud/resources/billing/__init__.py +2 -0
  15. llama_cloud/resources/billing/client.py +234 -0
  16. llama_cloud/resources/component_definitions/__init__.py +2 -0
  17. llama_cloud/resources/component_definitions/client.py +192 -0
  18. llama_cloud/resources/data_sinks/__init__.py +5 -0
  19. llama_cloud/resources/data_sinks/client.py +506 -0
  20. llama_cloud/resources/data_sinks/types/__init__.py +6 -0
  21. llama_cloud/resources/data_sinks/types/data_sink_update_component.py +7 -0
  22. llama_cloud/resources/data_sinks/types/data_sink_update_component_one.py +17 -0
  23. llama_cloud/resources/data_sources/__init__.py +5 -0
  24. llama_cloud/resources/data_sources/client.py +521 -0
  25. llama_cloud/resources/data_sources/types/__init__.py +7 -0
  26. llama_cloud/resources/data_sources/types/data_source_update_component.py +7 -0
  27. llama_cloud/resources/data_sources/types/data_source_update_component_one.py +19 -0
  28. llama_cloud/resources/data_sources/types/data_source_update_custom_metadata_value.py +7 -0
  29. llama_cloud/resources/deprecated/__init__.py +2 -0
  30. llama_cloud/resources/deprecated/client.py +982 -0
  31. llama_cloud/resources/evals/__init__.py +2 -0
  32. llama_cloud/resources/evals/client.py +745 -0
  33. llama_cloud/resources/files/__init__.py +5 -0
  34. llama_cloud/resources/files/client.py +560 -0
  35. llama_cloud/resources/files/types/__init__.py +5 -0
  36. llama_cloud/resources/files/types/file_create_resource_info_value.py +5 -0
  37. llama_cloud/resources/parsing/__init__.py +2 -0
  38. llama_cloud/resources/parsing/client.py +982 -0
  39. llama_cloud/resources/pipelines/__init__.py +5 -0
  40. llama_cloud/resources/pipelines/client.py +2599 -0
  41. llama_cloud/resources/pipelines/types/__init__.py +5 -0
  42. llama_cloud/resources/pipelines/types/pipeline_file_update_custom_metadata_value.py +7 -0
  43. llama_cloud/resources/projects/__init__.py +2 -0
  44. llama_cloud/resources/projects/client.py +1231 -0
  45. llama_cloud/types/__init__.py +253 -0
  46. llama_cloud/types/api_key.py +37 -0
  47. llama_cloud/types/azure_open_ai_embedding.py +75 -0
  48. llama_cloud/types/base.py +26 -0
  49. llama_cloud/types/base_prompt_template.py +44 -0
  50. llama_cloud/types/bedrock_embedding.py +56 -0
  51. llama_cloud/types/chat_message.py +35 -0
  52. llama_cloud/types/cloud_az_storage_blob_data_source.py +40 -0
  53. llama_cloud/types/cloud_chroma_vector_store.py +40 -0
  54. llama_cloud/types/cloud_document.py +36 -0
  55. llama_cloud/types/cloud_document_create.py +36 -0
  56. llama_cloud/types/cloud_gcs_data_source.py +37 -0
  57. llama_cloud/types/cloud_google_drive_data_source.py +36 -0
  58. llama_cloud/types/cloud_one_drive_data_source.py +38 -0
  59. llama_cloud/types/cloud_pinecone_vector_store.py +46 -0
  60. llama_cloud/types/cloud_postgres_vector_store.py +44 -0
  61. llama_cloud/types/cloud_qdrant_vector_store.py +48 -0
  62. llama_cloud/types/cloud_s_3_data_source.py +42 -0
  63. llama_cloud/types/cloud_sharepoint_data_source.py +38 -0
  64. llama_cloud/types/cloud_weaviate_vector_store.py +38 -0
  65. llama_cloud/types/code_splitter.py +46 -0
  66. llama_cloud/types/cohere_embedding.py +46 -0
  67. llama_cloud/types/configurable_data_sink_names.py +37 -0
  68. llama_cloud/types/configurable_data_source_names.py +41 -0
  69. llama_cloud/types/configurable_transformation_definition.py +45 -0
  70. llama_cloud/types/configurable_transformation_names.py +73 -0
  71. llama_cloud/types/configured_transformation_item.py +43 -0
  72. llama_cloud/types/configured_transformation_item_component.py +9 -0
  73. llama_cloud/types/configured_transformation_item_component_one.py +35 -0
  74. llama_cloud/types/data_sink.py +40 -0
  75. llama_cloud/types/data_sink_component.py +7 -0
  76. llama_cloud/types/data_sink_component_one.py +17 -0
  77. llama_cloud/types/data_sink_create.py +36 -0
  78. llama_cloud/types/data_sink_create_component.py +7 -0
  79. llama_cloud/types/data_sink_create_component_one.py +17 -0
  80. llama_cloud/types/data_sink_definition.py +41 -0
  81. llama_cloud/types/data_source.py +44 -0
  82. llama_cloud/types/data_source_component.py +7 -0
  83. llama_cloud/types/data_source_component_one.py +19 -0
  84. llama_cloud/types/data_source_create.py +40 -0
  85. llama_cloud/types/data_source_create_component.py +7 -0
  86. llama_cloud/types/data_source_create_component_one.py +19 -0
  87. llama_cloud/types/data_source_create_custom_metadata_value.py +7 -0
  88. llama_cloud/types/data_source_custom_metadata_value.py +7 -0
  89. llama_cloud/types/data_source_definition.py +41 -0
  90. llama_cloud/types/eval_dataset.py +37 -0
  91. llama_cloud/types/eval_dataset_job_params.py +36 -0
  92. llama_cloud/types/eval_dataset_job_record.py +59 -0
  93. llama_cloud/types/eval_execution_params.py +38 -0
  94. llama_cloud/types/eval_execution_params_override.py +38 -0
  95. llama_cloud/types/eval_llm_model_data.py +33 -0
  96. llama_cloud/types/eval_question.py +39 -0
  97. llama_cloud/types/eval_question_create.py +28 -0
  98. llama_cloud/types/eval_question_result.py +49 -0
  99. llama_cloud/types/file.py +46 -0
  100. llama_cloud/types/file_resource_info_value.py +5 -0
  101. llama_cloud/types/filter_condition.py +21 -0
  102. llama_cloud/types/filter_operator.py +65 -0
  103. llama_cloud/types/gemini_embedding.py +51 -0
  104. llama_cloud/types/html_node_parser.py +44 -0
  105. llama_cloud/types/http_validation_error.py +29 -0
  106. llama_cloud/types/hugging_face_inference_api_embedding.py +68 -0
  107. llama_cloud/types/hugging_face_inference_api_embedding_token.py +5 -0
  108. llama_cloud/types/json_node_parser.py +43 -0
  109. llama_cloud/types/llama_parse_supported_file_extensions.py +161 -0
  110. llama_cloud/types/llm.py +55 -0
  111. llama_cloud/types/local_eval.py +46 -0
  112. llama_cloud/types/local_eval_results.py +37 -0
  113. llama_cloud/types/local_eval_sets.py +30 -0
  114. llama_cloud/types/managed_ingestion_status.py +37 -0
  115. llama_cloud/types/markdown_element_node_parser.py +49 -0
  116. llama_cloud/types/markdown_node_parser.py +43 -0
  117. llama_cloud/types/message_role.py +45 -0
  118. llama_cloud/types/metadata_filter.py +41 -0
  119. llama_cloud/types/metadata_filter_value.py +5 -0
  120. llama_cloud/types/metadata_filters.py +41 -0
  121. llama_cloud/types/metadata_filters_filters_item.py +8 -0
  122. llama_cloud/types/metric_result.py +30 -0
  123. llama_cloud/types/node_parser.py +37 -0
  124. llama_cloud/types/object_type.py +33 -0
  125. llama_cloud/types/open_ai_embedding.py +73 -0
  126. llama_cloud/types/parser_languages.py +361 -0
  127. llama_cloud/types/parsing_history_item.py +36 -0
  128. llama_cloud/types/parsing_job.py +30 -0
  129. llama_cloud/types/parsing_job_json_result.py +29 -0
  130. llama_cloud/types/parsing_job_markdown_result.py +29 -0
  131. llama_cloud/types/parsing_job_text_result.py +29 -0
  132. llama_cloud/types/parsing_usage.py +29 -0
  133. llama_cloud/types/pipeline.py +64 -0
  134. llama_cloud/types/pipeline_create.py +61 -0
  135. llama_cloud/types/pipeline_data_source.py +46 -0
  136. llama_cloud/types/pipeline_data_source_component.py +7 -0
  137. llama_cloud/types/pipeline_data_source_component_one.py +19 -0
  138. llama_cloud/types/pipeline_data_source_create.py +32 -0
  139. llama_cloud/types/pipeline_data_source_custom_metadata_value.py +7 -0
  140. llama_cloud/types/pipeline_deployment.py +38 -0
  141. llama_cloud/types/pipeline_file.py +52 -0
  142. llama_cloud/types/pipeline_file_create.py +36 -0
  143. llama_cloud/types/pipeline_file_create_custom_metadata_value.py +7 -0
  144. llama_cloud/types/pipeline_file_custom_metadata_value.py +7 -0
  145. llama_cloud/types/pipeline_file_resource_info_value.py +7 -0
  146. llama_cloud/types/pipeline_file_status_response.py +35 -0
  147. llama_cloud/types/pipeline_type.py +21 -0
  148. llama_cloud/types/pooling.py +29 -0
  149. llama_cloud/types/preset_retrieval_params.py +40 -0
  150. llama_cloud/types/presigned_url.py +36 -0
  151. llama_cloud/types/project.py +42 -0
  152. llama_cloud/types/project_create.py +32 -0
  153. llama_cloud/types/prompt_mixin_prompts.py +36 -0
  154. llama_cloud/types/prompt_spec.py +35 -0
  155. llama_cloud/types/pydantic_program_mode.py +41 -0
  156. llama_cloud/types/related_node_info.py +37 -0
  157. llama_cloud/types/retrieve_results.py +40 -0
  158. llama_cloud/types/sentence_splitter.py +48 -0
  159. llama_cloud/types/simple_file_node_parser.py +44 -0
  160. llama_cloud/types/status_enum.py +33 -0
  161. llama_cloud/types/supported_eval_llm_model.py +35 -0
  162. llama_cloud/types/supported_eval_llm_model_names.py +29 -0
  163. llama_cloud/types/text_node.py +62 -0
  164. llama_cloud/types/text_node_relationships_value.py +7 -0
  165. llama_cloud/types/text_node_with_score.py +36 -0
  166. llama_cloud/types/token_text_splitter.py +43 -0
  167. llama_cloud/types/transformation_category_names.py +21 -0
  168. llama_cloud/types/validation_error.py +31 -0
  169. llama_cloud/types/validation_error_loc_item.py +5 -0
  170. llama_cloud-0.0.1.dist-info/LICENSE +21 -0
  171. llama_cloud-0.0.1.dist-info/METADATA +25 -0
  172. llama_cloud-0.0.1.dist-info/RECORD +173 -0
  173. llama_cloud-0.0.1.dist-info/WHEEL +4 -0
@@ -0,0 +1,43 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic.v1 as pydantic # type: ignore
10
+ except ImportError:
11
+ import pydantic # type: ignore
12
+
13
+
14
+ class JsonNodeParser(pydantic.BaseModel):
15
+ """
16
+ JSON node parser.
17
+
18
+ Splits a document into Nodes using custom JSON splitting logic.
19
+
20
+ Args:
21
+ include_metadata (bool): whether to include metadata in nodes
22
+ include_prev_next_rel (bool): whether to include prev/next relationships
23
+ """
24
+
25
+ include_metadata: typing.Optional[bool] = pydantic.Field(
26
+ description="Whether or not to consider metadata when splitting."
27
+ )
28
+ include_prev_next_rel: typing.Optional[bool] = pydantic.Field(description="Include prev/next node relationships.")
29
+ callback_manager: typing.Optional[typing.Dict[str, typing.Any]]
30
+ class_name: typing.Optional[str]
31
+
32
+ def json(self, **kwargs: typing.Any) -> str:
33
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
34
+ return super().json(**kwargs_with_defaults)
35
+
36
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
37
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
38
+ return super().dict(**kwargs_with_defaults)
39
+
40
+ class Config:
41
+ frozen = True
42
+ smart_union = True
43
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,161 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+ import typing
5
+
6
+ T_Result = typing.TypeVar("T_Result")
7
+
8
+
9
+ class LlamaParseSupportedFileExtensions(str, enum.Enum):
10
+ """
11
+ An enumeration.
12
+ """
13
+
14
+ PDF = ".pdf"
15
+ DOC = ".doc"
16
+ DOCX = ".docx"
17
+ DOCM = ".docm"
18
+ DOT = ".dot"
19
+ DOTX = ".dotx"
20
+ DOTM = ".dotm"
21
+ RTF = ".rtf"
22
+ WPS = ".wps"
23
+ WPD = ".wpd"
24
+ SXW = ".sxw"
25
+ STW = ".stw"
26
+ SXG = ".sxg"
27
+ PAGES = ".pages"
28
+ MW = ".mw"
29
+ MCW = ".mcw"
30
+ UOT = ".uot"
31
+ UOF = ".uof"
32
+ UOS = ".uos"
33
+ UOP = ".uop"
34
+ PPT = ".ppt"
35
+ PPTX = ".pptx"
36
+ POT = ".pot"
37
+ PPTM = ".pptm"
38
+ POTX = ".potx"
39
+ POTM = ".potm"
40
+ KEY = ".key"
41
+ ODP = ".odp"
42
+ ODG = ".odg"
43
+ OTP = ".otp"
44
+ FOPD = ".fopd"
45
+ SXI = ".sxi"
46
+ STI = ".sti"
47
+ EPUB = ".epub"
48
+ HTML = ".html"
49
+ HTM = ".htm"
50
+
51
+ def visit(
52
+ self,
53
+ pdf: typing.Callable[[], T_Result],
54
+ doc: typing.Callable[[], T_Result],
55
+ docx: typing.Callable[[], T_Result],
56
+ docm: typing.Callable[[], T_Result],
57
+ dot: typing.Callable[[], T_Result],
58
+ dotx: typing.Callable[[], T_Result],
59
+ dotm: typing.Callable[[], T_Result],
60
+ rtf: typing.Callable[[], T_Result],
61
+ wps: typing.Callable[[], T_Result],
62
+ wpd: typing.Callable[[], T_Result],
63
+ sxw: typing.Callable[[], T_Result],
64
+ stw: typing.Callable[[], T_Result],
65
+ sxg: typing.Callable[[], T_Result],
66
+ pages: typing.Callable[[], T_Result],
67
+ mw: typing.Callable[[], T_Result],
68
+ mcw: typing.Callable[[], T_Result],
69
+ uot: typing.Callable[[], T_Result],
70
+ uof: typing.Callable[[], T_Result],
71
+ uos: typing.Callable[[], T_Result],
72
+ uop: typing.Callable[[], T_Result],
73
+ ppt: typing.Callable[[], T_Result],
74
+ pptx: typing.Callable[[], T_Result],
75
+ pot: typing.Callable[[], T_Result],
76
+ pptm: typing.Callable[[], T_Result],
77
+ potx: typing.Callable[[], T_Result],
78
+ potm: typing.Callable[[], T_Result],
79
+ key: typing.Callable[[], T_Result],
80
+ odp: typing.Callable[[], T_Result],
81
+ odg: typing.Callable[[], T_Result],
82
+ otp: typing.Callable[[], T_Result],
83
+ fopd: typing.Callable[[], T_Result],
84
+ sxi: typing.Callable[[], T_Result],
85
+ sti: typing.Callable[[], T_Result],
86
+ epub: typing.Callable[[], T_Result],
87
+ html: typing.Callable[[], T_Result],
88
+ htm: typing.Callable[[], T_Result],
89
+ ) -> T_Result:
90
+ if self is LlamaParseSupportedFileExtensions.PDF:
91
+ return pdf()
92
+ if self is LlamaParseSupportedFileExtensions.DOC:
93
+ return doc()
94
+ if self is LlamaParseSupportedFileExtensions.DOCX:
95
+ return docx()
96
+ if self is LlamaParseSupportedFileExtensions.DOCM:
97
+ return docm()
98
+ if self is LlamaParseSupportedFileExtensions.DOT:
99
+ return dot()
100
+ if self is LlamaParseSupportedFileExtensions.DOTX:
101
+ return dotx()
102
+ if self is LlamaParseSupportedFileExtensions.DOTM:
103
+ return dotm()
104
+ if self is LlamaParseSupportedFileExtensions.RTF:
105
+ return rtf()
106
+ if self is LlamaParseSupportedFileExtensions.WPS:
107
+ return wps()
108
+ if self is LlamaParseSupportedFileExtensions.WPD:
109
+ return wpd()
110
+ if self is LlamaParseSupportedFileExtensions.SXW:
111
+ return sxw()
112
+ if self is LlamaParseSupportedFileExtensions.STW:
113
+ return stw()
114
+ if self is LlamaParseSupportedFileExtensions.SXG:
115
+ return sxg()
116
+ if self is LlamaParseSupportedFileExtensions.PAGES:
117
+ return pages()
118
+ if self is LlamaParseSupportedFileExtensions.MW:
119
+ return mw()
120
+ if self is LlamaParseSupportedFileExtensions.MCW:
121
+ return mcw()
122
+ if self is LlamaParseSupportedFileExtensions.UOT:
123
+ return uot()
124
+ if self is LlamaParseSupportedFileExtensions.UOF:
125
+ return uof()
126
+ if self is LlamaParseSupportedFileExtensions.UOS:
127
+ return uos()
128
+ if self is LlamaParseSupportedFileExtensions.UOP:
129
+ return uop()
130
+ if self is LlamaParseSupportedFileExtensions.PPT:
131
+ return ppt()
132
+ if self is LlamaParseSupportedFileExtensions.PPTX:
133
+ return pptx()
134
+ if self is LlamaParseSupportedFileExtensions.POT:
135
+ return pot()
136
+ if self is LlamaParseSupportedFileExtensions.PPTM:
137
+ return pptm()
138
+ if self is LlamaParseSupportedFileExtensions.POTX:
139
+ return potx()
140
+ if self is LlamaParseSupportedFileExtensions.POTM:
141
+ return potm()
142
+ if self is LlamaParseSupportedFileExtensions.KEY:
143
+ return key()
144
+ if self is LlamaParseSupportedFileExtensions.ODP:
145
+ return odp()
146
+ if self is LlamaParseSupportedFileExtensions.ODG:
147
+ return odg()
148
+ if self is LlamaParseSupportedFileExtensions.OTP:
149
+ return otp()
150
+ if self is LlamaParseSupportedFileExtensions.FOPD:
151
+ return fopd()
152
+ if self is LlamaParseSupportedFileExtensions.SXI:
153
+ return sxi()
154
+ if self is LlamaParseSupportedFileExtensions.STI:
155
+ return sti()
156
+ if self is LlamaParseSupportedFileExtensions.EPUB:
157
+ return epub()
158
+ if self is LlamaParseSupportedFileExtensions.HTML:
159
+ return html()
160
+ if self is LlamaParseSupportedFileExtensions.HTM:
161
+ return htm()
@@ -0,0 +1,55 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .base_prompt_template import BasePromptTemplate
8
+ from .pydantic_program_mode import PydanticProgramMode
9
+
10
+ try:
11
+ import pydantic.v1 as pydantic # type: ignore
12
+ except ImportError:
13
+ import pydantic # type: ignore
14
+
15
+
16
+ class Llm(pydantic.BaseModel):
17
+ """
18
+ The LLM class is the main class for interacting with language models.
19
+
20
+ Attributes:
21
+ system_prompt (Optional[str]):
22
+ System prompt for LLM calls.
23
+ messages_to_prompt (Callable):
24
+ Function to convert a list of messages to an LLM prompt.
25
+ completion_to_prompt (Callable):
26
+ Function to convert a completion to an LLM prompt.
27
+ output_parser (Optional[BaseOutputParser]):
28
+ Output parser to parse, validate, and correct errors programmatically.
29
+ pydantic_program_mode (PydanticProgramMode):
30
+ Pydantic program mode to use for structured prediction.
31
+ """
32
+
33
+ callback_manager: typing.Optional[typing.Dict[str, typing.Any]]
34
+ system_prompt: typing.Optional[str] = pydantic.Field(description="System prompt for LLM calls.")
35
+ output_parser: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(
36
+ description="Output parser to parse, validate, and correct errors programmatically."
37
+ )
38
+ pydantic_program_mode: typing.Optional[PydanticProgramMode]
39
+ query_wrapper_prompt: typing.Optional[BasePromptTemplate] = pydantic.Field(
40
+ description="Query wrapper prompt for LLM calls."
41
+ )
42
+ class_name: typing.Optional[str]
43
+
44
+ def json(self, **kwargs: typing.Any) -> str:
45
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
46
+ return super().json(**kwargs_with_defaults)
47
+
48
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
49
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
50
+ return super().dict(**kwargs_with_defaults)
51
+
52
+ class Config:
53
+ frozen = True
54
+ smart_union = True
55
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,46 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic.v1 as pydantic # type: ignore
10
+ except ImportError:
11
+ import pydantic # type: ignore
12
+
13
+
14
+ class LocalEval(pydantic.BaseModel):
15
+ """
16
+ Evaluation result, EvaluationResult from llama_index.
17
+
18
+ Output of an BaseEvaluator.
19
+ """
20
+
21
+ query: typing.Optional[str] = pydantic.Field(description="Query string")
22
+ contexts: typing.Optional[typing.List[str]] = pydantic.Field(description="Context strings")
23
+ response: typing.Optional[str] = pydantic.Field(description="Response string")
24
+ passing: typing.Optional[bool] = pydantic.Field(description="Binary evaluation result (passing or not)")
25
+ feedback: typing.Optional[str] = pydantic.Field(description="Feedback or reasoning for the response")
26
+ score: typing.Optional[float] = pydantic.Field(description="Score for the response")
27
+ pairwise_source: typing.Optional[str] = pydantic.Field(
28
+ description="Used only for pairwise and specifies whether it is from original order of presented answers or flipped order"
29
+ )
30
+ invalid_result: typing.Optional[bool] = pydantic.Field(
31
+ description="Whether the evaluation result is an invalid one."
32
+ )
33
+ invalid_reason: typing.Optional[str] = pydantic.Field(description="Reason for invalid evaluation.")
34
+
35
+ def json(self, **kwargs: typing.Any) -> str:
36
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
37
+ return super().json(**kwargs_with_defaults)
38
+
39
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
40
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
41
+ return super().dict(**kwargs_with_defaults)
42
+
43
+ class Config:
44
+ frozen = True
45
+ smart_union = True
46
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,37 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .local_eval import LocalEval
8
+
9
+ try:
10
+ import pydantic.v1 as pydantic # type: ignore
11
+ except ImportError:
12
+ import pydantic # type: ignore
13
+
14
+
15
+ class LocalEvalResults(pydantic.BaseModel):
16
+ """
17
+ Schema for the result of a local evaluation.
18
+ """
19
+
20
+ project_id: str = pydantic.Field(description="The ID of the project.")
21
+ eval_set_id: typing.Optional[str] = pydantic.Field(description="The ID of the local eval result set.")
22
+ app_name: str = pydantic.Field(description="The name of the app.")
23
+ eval_name: str = pydantic.Field(description="The name of the eval.")
24
+ result: LocalEval = pydantic.Field(description="The eval results.")
25
+
26
+ def json(self, **kwargs: typing.Any) -> str:
27
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
28
+ return super().json(**kwargs_with_defaults)
29
+
30
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
31
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
32
+ return super().dict(**kwargs_with_defaults)
33
+
34
+ class Config:
35
+ frozen = True
36
+ smart_union = True
37
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,30 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic.v1 as pydantic # type: ignore
10
+ except ImportError:
11
+ import pydantic # type: ignore
12
+
13
+
14
+ class LocalEvalSets(pydantic.BaseModel):
15
+ eval_set_id: str = pydantic.Field(description="The ID of the eval set.")
16
+ app_name: str = pydantic.Field(description="The name of the app.")
17
+ upload_time: dt.datetime = pydantic.Field(description="The time of the upload.")
18
+
19
+ def json(self, **kwargs: typing.Any) -> str:
20
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
21
+ return super().json(**kwargs_with_defaults)
22
+
23
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
24
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
25
+ return super().dict(**kwargs_with_defaults)
26
+
27
+ class Config:
28
+ frozen = True
29
+ smart_union = True
30
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,37 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+ import typing
5
+
6
+ T_Result = typing.TypeVar("T_Result")
7
+
8
+
9
+ class ManagedIngestionStatus(str, enum.Enum):
10
+ """
11
+ Status of managed ingestion with partial Updates.
12
+ """
13
+
14
+ NOT_STARTED = "NOT_STARTED"
15
+ IN_PROGRESS = "IN_PROGRESS"
16
+ SUCCESS = "SUCCESS"
17
+ ERROR = "ERROR"
18
+ PARTIAL_SUCCESS = "PARTIAL_SUCCESS"
19
+
20
+ def visit(
21
+ self,
22
+ not_started: typing.Callable[[], T_Result],
23
+ in_progress: typing.Callable[[], T_Result],
24
+ success: typing.Callable[[], T_Result],
25
+ error: typing.Callable[[], T_Result],
26
+ partial_success: typing.Callable[[], T_Result],
27
+ ) -> T_Result:
28
+ if self is ManagedIngestionStatus.NOT_STARTED:
29
+ return not_started()
30
+ if self is ManagedIngestionStatus.IN_PROGRESS:
31
+ return in_progress()
32
+ if self is ManagedIngestionStatus.SUCCESS:
33
+ return success()
34
+ if self is ManagedIngestionStatus.ERROR:
35
+ return error()
36
+ if self is ManagedIngestionStatus.PARTIAL_SUCCESS:
37
+ return partial_success()
@@ -0,0 +1,49 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .llm import Llm
8
+ from .node_parser import NodeParser
9
+
10
+ try:
11
+ import pydantic.v1 as pydantic # type: ignore
12
+ except ImportError:
13
+ import pydantic # type: ignore
14
+
15
+
16
+ class MarkdownElementNodeParser(pydantic.BaseModel):
17
+ """
18
+ Markdown element node parser.
19
+
20
+ Splits a markdown document into Text Nodes and Index Nodes corresponding to embedded objects
21
+ (e.g. tables).
22
+ """
23
+
24
+ include_metadata: typing.Optional[bool] = pydantic.Field(
25
+ description="Whether or not to consider metadata when splitting."
26
+ )
27
+ include_prev_next_rel: typing.Optional[bool] = pydantic.Field(description="Include prev/next node relationships.")
28
+ callback_manager: typing.Optional[typing.Dict[str, typing.Any]]
29
+ llm: typing.Optional[Llm] = pydantic.Field(description="LLM model to use for summarization.")
30
+ summary_query_str: typing.Optional[str] = pydantic.Field(description="Query string to use for summarization.")
31
+ num_workers: typing.Optional[int] = pydantic.Field(description="Num of workers for async jobs.")
32
+ show_progress: typing.Optional[bool] = pydantic.Field(description="Whether to show progress.")
33
+ nested_node_parser: typing.Optional[NodeParser] = pydantic.Field(
34
+ description="Other types of node parsers to handle some types of nodes."
35
+ )
36
+ class_name: typing.Optional[str]
37
+
38
+ def json(self, **kwargs: typing.Any) -> str:
39
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
40
+ return super().json(**kwargs_with_defaults)
41
+
42
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
43
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
44
+ return super().dict(**kwargs_with_defaults)
45
+
46
+ class Config:
47
+ frozen = True
48
+ smart_union = True
49
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,43 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic.v1 as pydantic # type: ignore
10
+ except ImportError:
11
+ import pydantic # type: ignore
12
+
13
+
14
+ class MarkdownNodeParser(pydantic.BaseModel):
15
+ """
16
+ Markdown node parser.
17
+
18
+ Splits a document into Nodes using custom Markdown splitting logic.
19
+
20
+ Args:
21
+ include_metadata (bool): whether to include metadata in nodes
22
+ include_prev_next_rel (bool): whether to include prev/next relationships
23
+ """
24
+
25
+ include_metadata: typing.Optional[bool] = pydantic.Field(
26
+ description="Whether or not to consider metadata when splitting."
27
+ )
28
+ include_prev_next_rel: typing.Optional[bool] = pydantic.Field(description="Include prev/next node relationships.")
29
+ callback_manager: typing.Optional[typing.Dict[str, typing.Any]]
30
+ class_name: typing.Optional[str]
31
+
32
+ def json(self, **kwargs: typing.Any) -> str:
33
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
34
+ return super().json(**kwargs_with_defaults)
35
+
36
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
37
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
38
+ return super().dict(**kwargs_with_defaults)
39
+
40
+ class Config:
41
+ frozen = True
42
+ smart_union = True
43
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,45 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+ import typing
5
+
6
+ T_Result = typing.TypeVar("T_Result")
7
+
8
+
9
+ class MessageRole(str, enum.Enum):
10
+ """
11
+ Message role.
12
+ """
13
+
14
+ SYSTEM = "system"
15
+ USER = "user"
16
+ ASSISTANT = "assistant"
17
+ FUNCTION = "function"
18
+ TOOL = "tool"
19
+ CHATBOT = "chatbot"
20
+ MODEL = "model"
21
+
22
+ def visit(
23
+ self,
24
+ system: typing.Callable[[], T_Result],
25
+ user: typing.Callable[[], T_Result],
26
+ assistant: typing.Callable[[], T_Result],
27
+ function: typing.Callable[[], T_Result],
28
+ tool: typing.Callable[[], T_Result],
29
+ chatbot: typing.Callable[[], T_Result],
30
+ model: typing.Callable[[], T_Result],
31
+ ) -> T_Result:
32
+ if self is MessageRole.SYSTEM:
33
+ return system()
34
+ if self is MessageRole.USER:
35
+ return user()
36
+ if self is MessageRole.ASSISTANT:
37
+ return assistant()
38
+ if self is MessageRole.FUNCTION:
39
+ return function()
40
+ if self is MessageRole.TOOL:
41
+ return tool()
42
+ if self is MessageRole.CHATBOT:
43
+ return chatbot()
44
+ if self is MessageRole.MODEL:
45
+ return model()
@@ -0,0 +1,41 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .filter_operator import FilterOperator
8
+ from .metadata_filter_value import MetadataFilterValue
9
+
10
+ try:
11
+ import pydantic.v1 as pydantic # type: ignore
12
+ except ImportError:
13
+ import pydantic # type: ignore
14
+
15
+
16
+ class MetadataFilter(pydantic.BaseModel):
17
+ """
18
+ Comprehensive metadata filter for vector stores to support more operators.
19
+
20
+ Value uses Strict\* types, as int, float and str are compatible types and were all
21
+ converted to string before.
22
+
23
+ See: https://docs.pydantic.dev/latest/usage/types/#strict-types
24
+ """
25
+
26
+ key: str
27
+ value: MetadataFilterValue
28
+ operator: typing.Optional[FilterOperator]
29
+
30
+ def json(self, **kwargs: typing.Any) -> str:
31
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
32
+ return super().json(**kwargs_with_defaults)
33
+
34
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
35
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
36
+ return super().dict(**kwargs_with_defaults)
37
+
38
+ class Config:
39
+ frozen = True
40
+ smart_union = True
41
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,5 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ MetadataFilterValue = typing.Union[int, float, str, typing.List[str], typing.List[float], typing.List[int]]
@@ -0,0 +1,41 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ from __future__ import annotations
4
+
5
+ import datetime as dt
6
+ import typing
7
+
8
+ from ..core.datetime_utils import serialize_datetime
9
+ from .filter_condition import FilterCondition
10
+
11
+ try:
12
+ import pydantic.v1 as pydantic # type: ignore
13
+ except ImportError:
14
+ import pydantic # type: ignore
15
+
16
+
17
+ class MetadataFilters(pydantic.BaseModel):
18
+ """
19
+ Metadata filters for vector stores.
20
+ """
21
+
22
+ filters: typing.List[MetadataFiltersFiltersItem]
23
+ condition: typing.Optional[FilterCondition]
24
+
25
+ def json(self, **kwargs: typing.Any) -> str:
26
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
27
+ return super().json(**kwargs_with_defaults)
28
+
29
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
30
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
31
+ return super().dict(**kwargs_with_defaults)
32
+
33
+ class Config:
34
+ frozen = True
35
+ smart_union = True
36
+ json_encoders = {dt.datetime: serialize_datetime}
37
+
38
+
39
+ from .metadata_filters_filters_item import MetadataFiltersFiltersItem # noqa: E402
40
+
41
+ MetadataFilters.update_forward_refs()
@@ -0,0 +1,8 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ from .metadata_filter import MetadataFilter
6
+ from .metadata_filters import MetadataFilters
7
+
8
+ MetadataFiltersFiltersItem = typing.Union[MetadataFilter, MetadataFilters]
@@ -0,0 +1,30 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic.v1 as pydantic # type: ignore
10
+ except ImportError:
11
+ import pydantic # type: ignore
12
+
13
+
14
+ class MetricResult(pydantic.BaseModel):
15
+ passing: typing.Optional[bool] = pydantic.Field(description="Whether the metric passed or not.")
16
+ score: typing.Optional[float] = pydantic.Field(description="The score for the metric.")
17
+ feedback: typing.Optional[str] = pydantic.Field(description="The reasoning for the metric.")
18
+
19
+ def json(self, **kwargs: typing.Any) -> str:
20
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
21
+ return super().json(**kwargs_with_defaults)
22
+
23
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
24
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
25
+ return super().dict(**kwargs_with_defaults)
26
+
27
+ class Config:
28
+ frozen = True
29
+ smart_union = True
30
+ json_encoders = {dt.datetime: serialize_datetime}