llama-cloud 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (173) hide show
  1. llama_cloud/__init__.py +295 -0
  2. llama_cloud/client.py +72 -0
  3. llama_cloud/core/__init__.py +17 -0
  4. llama_cloud/core/api_error.py +15 -0
  5. llama_cloud/core/client_wrapper.py +51 -0
  6. llama_cloud/core/datetime_utils.py +28 -0
  7. llama_cloud/core/jsonable_encoder.py +103 -0
  8. llama_cloud/core/remove_none_from_dict.py +11 -0
  9. llama_cloud/errors/__init__.py +5 -0
  10. llama_cloud/errors/unprocessable_entity_error.py +9 -0
  11. llama_cloud/resources/__init__.py +40 -0
  12. llama_cloud/resources/api_keys/__init__.py +2 -0
  13. llama_cloud/resources/api_keys/client.py +302 -0
  14. llama_cloud/resources/billing/__init__.py +2 -0
  15. llama_cloud/resources/billing/client.py +234 -0
  16. llama_cloud/resources/component_definitions/__init__.py +2 -0
  17. llama_cloud/resources/component_definitions/client.py +192 -0
  18. llama_cloud/resources/data_sinks/__init__.py +5 -0
  19. llama_cloud/resources/data_sinks/client.py +506 -0
  20. llama_cloud/resources/data_sinks/types/__init__.py +6 -0
  21. llama_cloud/resources/data_sinks/types/data_sink_update_component.py +7 -0
  22. llama_cloud/resources/data_sinks/types/data_sink_update_component_one.py +17 -0
  23. llama_cloud/resources/data_sources/__init__.py +5 -0
  24. llama_cloud/resources/data_sources/client.py +521 -0
  25. llama_cloud/resources/data_sources/types/__init__.py +7 -0
  26. llama_cloud/resources/data_sources/types/data_source_update_component.py +7 -0
  27. llama_cloud/resources/data_sources/types/data_source_update_component_one.py +19 -0
  28. llama_cloud/resources/data_sources/types/data_source_update_custom_metadata_value.py +7 -0
  29. llama_cloud/resources/deprecated/__init__.py +2 -0
  30. llama_cloud/resources/deprecated/client.py +982 -0
  31. llama_cloud/resources/evals/__init__.py +2 -0
  32. llama_cloud/resources/evals/client.py +745 -0
  33. llama_cloud/resources/files/__init__.py +5 -0
  34. llama_cloud/resources/files/client.py +560 -0
  35. llama_cloud/resources/files/types/__init__.py +5 -0
  36. llama_cloud/resources/files/types/file_create_resource_info_value.py +5 -0
  37. llama_cloud/resources/parsing/__init__.py +2 -0
  38. llama_cloud/resources/parsing/client.py +982 -0
  39. llama_cloud/resources/pipelines/__init__.py +5 -0
  40. llama_cloud/resources/pipelines/client.py +2599 -0
  41. llama_cloud/resources/pipelines/types/__init__.py +5 -0
  42. llama_cloud/resources/pipelines/types/pipeline_file_update_custom_metadata_value.py +7 -0
  43. llama_cloud/resources/projects/__init__.py +2 -0
  44. llama_cloud/resources/projects/client.py +1231 -0
  45. llama_cloud/types/__init__.py +253 -0
  46. llama_cloud/types/api_key.py +37 -0
  47. llama_cloud/types/azure_open_ai_embedding.py +75 -0
  48. llama_cloud/types/base.py +26 -0
  49. llama_cloud/types/base_prompt_template.py +44 -0
  50. llama_cloud/types/bedrock_embedding.py +56 -0
  51. llama_cloud/types/chat_message.py +35 -0
  52. llama_cloud/types/cloud_az_storage_blob_data_source.py +40 -0
  53. llama_cloud/types/cloud_chroma_vector_store.py +40 -0
  54. llama_cloud/types/cloud_document.py +36 -0
  55. llama_cloud/types/cloud_document_create.py +36 -0
  56. llama_cloud/types/cloud_gcs_data_source.py +37 -0
  57. llama_cloud/types/cloud_google_drive_data_source.py +36 -0
  58. llama_cloud/types/cloud_one_drive_data_source.py +38 -0
  59. llama_cloud/types/cloud_pinecone_vector_store.py +46 -0
  60. llama_cloud/types/cloud_postgres_vector_store.py +44 -0
  61. llama_cloud/types/cloud_qdrant_vector_store.py +48 -0
  62. llama_cloud/types/cloud_s_3_data_source.py +42 -0
  63. llama_cloud/types/cloud_sharepoint_data_source.py +38 -0
  64. llama_cloud/types/cloud_weaviate_vector_store.py +38 -0
  65. llama_cloud/types/code_splitter.py +46 -0
  66. llama_cloud/types/cohere_embedding.py +46 -0
  67. llama_cloud/types/configurable_data_sink_names.py +37 -0
  68. llama_cloud/types/configurable_data_source_names.py +41 -0
  69. llama_cloud/types/configurable_transformation_definition.py +45 -0
  70. llama_cloud/types/configurable_transformation_names.py +73 -0
  71. llama_cloud/types/configured_transformation_item.py +43 -0
  72. llama_cloud/types/configured_transformation_item_component.py +9 -0
  73. llama_cloud/types/configured_transformation_item_component_one.py +35 -0
  74. llama_cloud/types/data_sink.py +40 -0
  75. llama_cloud/types/data_sink_component.py +7 -0
  76. llama_cloud/types/data_sink_component_one.py +17 -0
  77. llama_cloud/types/data_sink_create.py +36 -0
  78. llama_cloud/types/data_sink_create_component.py +7 -0
  79. llama_cloud/types/data_sink_create_component_one.py +17 -0
  80. llama_cloud/types/data_sink_definition.py +41 -0
  81. llama_cloud/types/data_source.py +44 -0
  82. llama_cloud/types/data_source_component.py +7 -0
  83. llama_cloud/types/data_source_component_one.py +19 -0
  84. llama_cloud/types/data_source_create.py +40 -0
  85. llama_cloud/types/data_source_create_component.py +7 -0
  86. llama_cloud/types/data_source_create_component_one.py +19 -0
  87. llama_cloud/types/data_source_create_custom_metadata_value.py +7 -0
  88. llama_cloud/types/data_source_custom_metadata_value.py +7 -0
  89. llama_cloud/types/data_source_definition.py +41 -0
  90. llama_cloud/types/eval_dataset.py +37 -0
  91. llama_cloud/types/eval_dataset_job_params.py +36 -0
  92. llama_cloud/types/eval_dataset_job_record.py +59 -0
  93. llama_cloud/types/eval_execution_params.py +38 -0
  94. llama_cloud/types/eval_execution_params_override.py +38 -0
  95. llama_cloud/types/eval_llm_model_data.py +33 -0
  96. llama_cloud/types/eval_question.py +39 -0
  97. llama_cloud/types/eval_question_create.py +28 -0
  98. llama_cloud/types/eval_question_result.py +49 -0
  99. llama_cloud/types/file.py +46 -0
  100. llama_cloud/types/file_resource_info_value.py +5 -0
  101. llama_cloud/types/filter_condition.py +21 -0
  102. llama_cloud/types/filter_operator.py +65 -0
  103. llama_cloud/types/gemini_embedding.py +51 -0
  104. llama_cloud/types/html_node_parser.py +44 -0
  105. llama_cloud/types/http_validation_error.py +29 -0
  106. llama_cloud/types/hugging_face_inference_api_embedding.py +68 -0
  107. llama_cloud/types/hugging_face_inference_api_embedding_token.py +5 -0
  108. llama_cloud/types/json_node_parser.py +43 -0
  109. llama_cloud/types/llama_parse_supported_file_extensions.py +161 -0
  110. llama_cloud/types/llm.py +55 -0
  111. llama_cloud/types/local_eval.py +46 -0
  112. llama_cloud/types/local_eval_results.py +37 -0
  113. llama_cloud/types/local_eval_sets.py +30 -0
  114. llama_cloud/types/managed_ingestion_status.py +37 -0
  115. llama_cloud/types/markdown_element_node_parser.py +49 -0
  116. llama_cloud/types/markdown_node_parser.py +43 -0
  117. llama_cloud/types/message_role.py +45 -0
  118. llama_cloud/types/metadata_filter.py +41 -0
  119. llama_cloud/types/metadata_filter_value.py +5 -0
  120. llama_cloud/types/metadata_filters.py +41 -0
  121. llama_cloud/types/metadata_filters_filters_item.py +8 -0
  122. llama_cloud/types/metric_result.py +30 -0
  123. llama_cloud/types/node_parser.py +37 -0
  124. llama_cloud/types/object_type.py +33 -0
  125. llama_cloud/types/open_ai_embedding.py +73 -0
  126. llama_cloud/types/parser_languages.py +361 -0
  127. llama_cloud/types/parsing_history_item.py +36 -0
  128. llama_cloud/types/parsing_job.py +30 -0
  129. llama_cloud/types/parsing_job_json_result.py +29 -0
  130. llama_cloud/types/parsing_job_markdown_result.py +29 -0
  131. llama_cloud/types/parsing_job_text_result.py +29 -0
  132. llama_cloud/types/parsing_usage.py +29 -0
  133. llama_cloud/types/pipeline.py +64 -0
  134. llama_cloud/types/pipeline_create.py +61 -0
  135. llama_cloud/types/pipeline_data_source.py +46 -0
  136. llama_cloud/types/pipeline_data_source_component.py +7 -0
  137. llama_cloud/types/pipeline_data_source_component_one.py +19 -0
  138. llama_cloud/types/pipeline_data_source_create.py +32 -0
  139. llama_cloud/types/pipeline_data_source_custom_metadata_value.py +7 -0
  140. llama_cloud/types/pipeline_deployment.py +38 -0
  141. llama_cloud/types/pipeline_file.py +52 -0
  142. llama_cloud/types/pipeline_file_create.py +36 -0
  143. llama_cloud/types/pipeline_file_create_custom_metadata_value.py +7 -0
  144. llama_cloud/types/pipeline_file_custom_metadata_value.py +7 -0
  145. llama_cloud/types/pipeline_file_resource_info_value.py +7 -0
  146. llama_cloud/types/pipeline_file_status_response.py +35 -0
  147. llama_cloud/types/pipeline_type.py +21 -0
  148. llama_cloud/types/pooling.py +29 -0
  149. llama_cloud/types/preset_retrieval_params.py +40 -0
  150. llama_cloud/types/presigned_url.py +36 -0
  151. llama_cloud/types/project.py +42 -0
  152. llama_cloud/types/project_create.py +32 -0
  153. llama_cloud/types/prompt_mixin_prompts.py +36 -0
  154. llama_cloud/types/prompt_spec.py +35 -0
  155. llama_cloud/types/pydantic_program_mode.py +41 -0
  156. llama_cloud/types/related_node_info.py +37 -0
  157. llama_cloud/types/retrieve_results.py +40 -0
  158. llama_cloud/types/sentence_splitter.py +48 -0
  159. llama_cloud/types/simple_file_node_parser.py +44 -0
  160. llama_cloud/types/status_enum.py +33 -0
  161. llama_cloud/types/supported_eval_llm_model.py +35 -0
  162. llama_cloud/types/supported_eval_llm_model_names.py +29 -0
  163. llama_cloud/types/text_node.py +62 -0
  164. llama_cloud/types/text_node_relationships_value.py +7 -0
  165. llama_cloud/types/text_node_with_score.py +36 -0
  166. llama_cloud/types/token_text_splitter.py +43 -0
  167. llama_cloud/types/transformation_category_names.py +21 -0
  168. llama_cloud/types/validation_error.py +31 -0
  169. llama_cloud/types/validation_error_loc_item.py +5 -0
  170. llama_cloud-0.0.1.dist-info/LICENSE +21 -0
  171. llama_cloud-0.0.1.dist-info/METADATA +25 -0
  172. llama_cloud-0.0.1.dist-info/RECORD +173 -0
  173. llama_cloud-0.0.1.dist-info/WHEEL +4 -0
@@ -0,0 +1,36 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .prompt_spec import PromptSpec
8
+
9
+ try:
10
+ import pydantic.v1 as pydantic # type: ignore
11
+ except ImportError:
12
+ import pydantic # type: ignore
13
+
14
+
15
+ class PromptMixinPrompts(pydantic.BaseModel):
16
+ """
17
+ Schema for the prompts derived from the PromptMixin.
18
+ """
19
+
20
+ project_id: str = pydantic.Field(description="The ID of the project.")
21
+ id: typing.Optional[str] = pydantic.Field(description="The ID of the prompt set.")
22
+ name: str = pydantic.Field(description="The name of the prompt set.")
23
+ prompts: typing.List[PromptSpec] = pydantic.Field(description="The prompts.")
24
+
25
+ def json(self, **kwargs: typing.Any) -> str:
26
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
27
+ return super().json(**kwargs_with_defaults)
28
+
29
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
30
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
31
+ return super().dict(**kwargs_with_defaults)
32
+
33
+ class Config:
34
+ frozen = True
35
+ smart_union = True
36
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,35 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .chat_message import ChatMessage
8
+
9
+ try:
10
+ import pydantic.v1 as pydantic # type: ignore
11
+ except ImportError:
12
+ import pydantic # type: ignore
13
+
14
+
15
+ class PromptSpec(pydantic.BaseModel):
16
+ prompt_key: str = pydantic.Field(description="The key of the prompt in the PromptMixin.")
17
+ prompt_class: str = pydantic.Field(description="The class of the prompt (PromptTemplate or ChatPromptTemplate).")
18
+ prompt_type: str = pydantic.Field(description="The type of prompt.")
19
+ template: typing.Optional[str] = pydantic.Field(description="The template of the prompt.")
20
+ message_templates: typing.Optional[typing.List[ChatMessage]] = pydantic.Field(
21
+ description="The chat message templates of the prompt."
22
+ )
23
+
24
+ def json(self, **kwargs: typing.Any) -> str:
25
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
26
+ return super().json(**kwargs_with_defaults)
27
+
28
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
29
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
30
+ return super().dict(**kwargs_with_defaults)
31
+
32
+ class Config:
33
+ frozen = True
34
+ smart_union = True
35
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,41 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+ import typing
5
+
6
+ T_Result = typing.TypeVar("T_Result")
7
+
8
+
9
+ class PydanticProgramMode(str, enum.Enum):
10
+ """
11
+ Pydantic program mode.
12
+ """
13
+
14
+ DEFAULT = "default"
15
+ OPENAI = "openai"
16
+ LLM = "llm"
17
+ FUNCTION = "function"
18
+ GUIDANCE = "guidance"
19
+ LM_FORMAT_ENFORCER = "lm-format-enforcer"
20
+
21
+ def visit(
22
+ self,
23
+ default: typing.Callable[[], T_Result],
24
+ openai: typing.Callable[[], T_Result],
25
+ llm: typing.Callable[[], T_Result],
26
+ function: typing.Callable[[], T_Result],
27
+ guidance: typing.Callable[[], T_Result],
28
+ lm_format_enforcer: typing.Callable[[], T_Result],
29
+ ) -> T_Result:
30
+ if self is PydanticProgramMode.DEFAULT:
31
+ return default()
32
+ if self is PydanticProgramMode.OPENAI:
33
+ return openai()
34
+ if self is PydanticProgramMode.LLM:
35
+ return llm()
36
+ if self is PydanticProgramMode.FUNCTION:
37
+ return function()
38
+ if self is PydanticProgramMode.GUIDANCE:
39
+ return guidance()
40
+ if self is PydanticProgramMode.LM_FORMAT_ENFORCER:
41
+ return lm_format_enforcer()
@@ -0,0 +1,37 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .object_type import ObjectType
8
+
9
+ try:
10
+ import pydantic.v1 as pydantic # type: ignore
11
+ except ImportError:
12
+ import pydantic # type: ignore
13
+
14
+
15
+ class RelatedNodeInfo(pydantic.BaseModel):
16
+ """
17
+ Base component object to capture class names.
18
+ """
19
+
20
+ node_id: str
21
+ node_type: typing.Optional[ObjectType]
22
+ metadata: typing.Optional[typing.Dict[str, typing.Any]]
23
+ hash: typing.Optional[str]
24
+ class_name: typing.Optional[str]
25
+
26
+ def json(self, **kwargs: typing.Any) -> str:
27
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
28
+ return super().json(**kwargs_with_defaults)
29
+
30
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
31
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
32
+ return super().dict(**kwargs_with_defaults)
33
+
34
+ class Config:
35
+ frozen = True
36
+ smart_union = True
37
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,40 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .text_node_with_score import TextNodeWithScore
8
+
9
+ try:
10
+ import pydantic.v1 as pydantic # type: ignore
11
+ except ImportError:
12
+ import pydantic # type: ignore
13
+
14
+
15
+ class RetrieveResults(pydantic.BaseModel):
16
+ """
17
+ Schema for the result of an retrieval execution.
18
+ """
19
+
20
+ pipeline_id: str = pydantic.Field(description="The ID of the pipeline that the query was retrieved against.")
21
+ retrieval_nodes: typing.List[TextNodeWithScore] = pydantic.Field(
22
+ description="The nodes retrieved by the pipeline for the given query."
23
+ )
24
+ retrieval_latency: typing.Dict[str, float] = pydantic.Field(
25
+ description="The end-to-end latency for retrieval and reranking."
26
+ )
27
+ class_name: typing.Optional[str]
28
+
29
+ def json(self, **kwargs: typing.Any) -> str:
30
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
31
+ return super().json(**kwargs_with_defaults)
32
+
33
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
34
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
35
+ return super().dict(**kwargs_with_defaults)
36
+
37
+ class Config:
38
+ frozen = True
39
+ smart_union = True
40
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,48 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic.v1 as pydantic # type: ignore
10
+ except ImportError:
11
+ import pydantic # type: ignore
12
+
13
+
14
+ class SentenceSplitter(pydantic.BaseModel):
15
+ """
16
+ Parse text with a preference for complete sentences.
17
+
18
+ In general, this class tries to keep sentences and paragraphs together. Therefore
19
+ compared to the original TokenTextSplitter, there are less likely to be
20
+ hanging sentences or parts of sentences at the end of the node chunk.
21
+ """
22
+
23
+ include_metadata: typing.Optional[bool] = pydantic.Field(
24
+ description="Whether or not to consider metadata when splitting."
25
+ )
26
+ include_prev_next_rel: typing.Optional[bool] = pydantic.Field(description="Include prev/next node relationships.")
27
+ callback_manager: typing.Optional[typing.Dict[str, typing.Any]]
28
+ chunk_size: typing.Optional[int] = pydantic.Field(description="The token chunk size for each chunk.")
29
+ chunk_overlap: typing.Optional[int] = pydantic.Field(description="The token overlap of each chunk when splitting.")
30
+ separator: typing.Optional[str] = pydantic.Field(description="Default separator for splitting into words")
31
+ paragraph_separator: typing.Optional[str] = pydantic.Field(description="Separator between paragraphs.")
32
+ secondary_chunking_regex: typing.Optional[str] = pydantic.Field(
33
+ description="Backup regex for splitting into sentences."
34
+ )
35
+ class_name: typing.Optional[str]
36
+
37
+ def json(self, **kwargs: typing.Any) -> str:
38
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
39
+ return super().json(**kwargs_with_defaults)
40
+
41
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
42
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
43
+ return super().dict(**kwargs_with_defaults)
44
+
45
+ class Config:
46
+ frozen = True
47
+ smart_union = True
48
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,44 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic.v1 as pydantic # type: ignore
10
+ except ImportError:
11
+ import pydantic # type: ignore
12
+
13
+
14
+ class SimpleFileNodeParser(pydantic.BaseModel):
15
+ """
16
+ Simple file node parser.
17
+
18
+ Splits a document loaded from a file into Nodes using logic based on the file type
19
+ automatically detects the NodeParser to use based on file type
20
+
21
+ Args:
22
+ include_metadata (bool): whether to include metadata in nodes
23
+ include_prev_next_rel (bool): whether to include prev/next relationships
24
+ """
25
+
26
+ include_metadata: typing.Optional[bool] = pydantic.Field(
27
+ description="Whether or not to consider metadata when splitting."
28
+ )
29
+ include_prev_next_rel: typing.Optional[bool] = pydantic.Field(description="Include prev/next node relationships.")
30
+ callback_manager: typing.Optional[typing.Dict[str, typing.Any]]
31
+ class_name: typing.Optional[str]
32
+
33
+ def json(self, **kwargs: typing.Any) -> str:
34
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
35
+ return super().json(**kwargs_with_defaults)
36
+
37
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
38
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
39
+ return super().dict(**kwargs_with_defaults)
40
+
41
+ class Config:
42
+ frozen = True
43
+ smart_union = True
44
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,33 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+ import typing
5
+
6
+ T_Result = typing.TypeVar("T_Result")
7
+
8
+
9
+ class StatusEnum(str, enum.Enum):
10
+ """
11
+ Enum for representing the status of a job
12
+ """
13
+
14
+ PENDING = "PENDING"
15
+ SUCCESS = "SUCCESS"
16
+ ERROR = "ERROR"
17
+ PARTIAL_SUCCESS = "PARTIAL_SUCCESS"
18
+
19
+ def visit(
20
+ self,
21
+ pending: typing.Callable[[], T_Result],
22
+ success: typing.Callable[[], T_Result],
23
+ error: typing.Callable[[], T_Result],
24
+ partial_success: typing.Callable[[], T_Result],
25
+ ) -> T_Result:
26
+ if self is StatusEnum.PENDING:
27
+ return pending()
28
+ if self is StatusEnum.SUCCESS:
29
+ return success()
30
+ if self is StatusEnum.ERROR:
31
+ return error()
32
+ if self is StatusEnum.PARTIAL_SUCCESS:
33
+ return partial_success()
@@ -0,0 +1,35 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .eval_llm_model_data import EvalLlmModelData
8
+ from .supported_eval_llm_model_names import SupportedEvalLlmModelNames
9
+
10
+ try:
11
+ import pydantic.v1 as pydantic # type: ignore
12
+ except ImportError:
13
+ import pydantic # type: ignore
14
+
15
+
16
+ class SupportedEvalLlmModel(pydantic.BaseModel):
17
+ """
18
+ Response Schema for a supported eval LLM model.
19
+ """
20
+
21
+ name: SupportedEvalLlmModelNames = pydantic.Field(description="The name of the supported eval LLM model.")
22
+ details: EvalLlmModelData = pydantic.Field(description="The details of the supported eval LLM model.")
23
+
24
+ def json(self, **kwargs: typing.Any) -> str:
25
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
26
+ return super().json(**kwargs_with_defaults)
27
+
28
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
29
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
30
+ return super().dict(**kwargs_with_defaults)
31
+
32
+ class Config:
33
+ frozen = True
34
+ smart_union = True
35
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,29 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+ import typing
5
+
6
+ T_Result = typing.TypeVar("T_Result")
7
+
8
+
9
+ class SupportedEvalLlmModelNames(str, enum.Enum):
10
+ """
11
+ An enumeration.
12
+ """
13
+
14
+ GPT_3_5_TURBO = "GPT_3_5_TURBO"
15
+ GPT_4 = "GPT_4"
16
+ GPT_4_TURBO = "GPT_4_TURBO"
17
+
18
+ def visit(
19
+ self,
20
+ gpt_3_5_turbo: typing.Callable[[], T_Result],
21
+ gpt_4: typing.Callable[[], T_Result],
22
+ gpt_4_turbo: typing.Callable[[], T_Result],
23
+ ) -> T_Result:
24
+ if self is SupportedEvalLlmModelNames.GPT_3_5_TURBO:
25
+ return gpt_3_5_turbo()
26
+ if self is SupportedEvalLlmModelNames.GPT_4:
27
+ return gpt_4()
28
+ if self is SupportedEvalLlmModelNames.GPT_4_TURBO:
29
+ return gpt_4_turbo()
@@ -0,0 +1,62 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .text_node_relationships_value import TextNodeRelationshipsValue
8
+
9
+ try:
10
+ import pydantic.v1 as pydantic # type: ignore
11
+ except ImportError:
12
+ import pydantic # type: ignore
13
+
14
+
15
+ class TextNode(pydantic.BaseModel):
16
+ """
17
+ Base node Object.
18
+
19
+ Generic abstract interface for retrievable nodes
20
+ """
21
+
22
+ id: typing.Optional[str] = pydantic.Field(alias="id_", description="Unique ID of the node.")
23
+ embedding: typing.Optional[typing.List[float]] = pydantic.Field(description="Embedding of the node.")
24
+ extra_info: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(
25
+ description="A flat dictionary of metadata fields"
26
+ )
27
+ excluded_embed_metadata_keys: typing.Optional[typing.List[str]] = pydantic.Field(
28
+ description="Metadata keys that are excluded from text for the embed model."
29
+ )
30
+ excluded_llm_metadata_keys: typing.Optional[typing.List[str]] = pydantic.Field(
31
+ description="Metadata keys that are excluded from text for the LLM."
32
+ )
33
+ relationships: typing.Optional[typing.Dict[str, TextNodeRelationshipsValue]] = pydantic.Field(
34
+ description="A mapping of relationships to other node information."
35
+ )
36
+ text: typing.Optional[str] = pydantic.Field(description="Text content of the node.")
37
+ start_char_idx: typing.Optional[int] = pydantic.Field(description="Start char index of the node.")
38
+ end_char_idx: typing.Optional[int] = pydantic.Field(description="End char index of the node.")
39
+ text_template: typing.Optional[str] = pydantic.Field(
40
+ description="Template for how text is formatted, with {content} and {metadata_str} placeholders."
41
+ )
42
+ metadata_template: typing.Optional[str] = pydantic.Field(
43
+ description="Template for how metadata is formatted, with {key} and {value} placeholders."
44
+ )
45
+ metadata_seperator: typing.Optional[str] = pydantic.Field(
46
+ description="Separator between metadata fields when converting to string."
47
+ )
48
+ class_name: typing.Optional[str]
49
+
50
+ def json(self, **kwargs: typing.Any) -> str:
51
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
52
+ return super().json(**kwargs_with_defaults)
53
+
54
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
55
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
56
+ return super().dict(**kwargs_with_defaults)
57
+
58
+ class Config:
59
+ frozen = True
60
+ smart_union = True
61
+ allow_population_by_field_name = True
62
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,7 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ from .related_node_info import RelatedNodeInfo
6
+
7
+ TextNodeRelationshipsValue = typing.Union[RelatedNodeInfo, typing.List[RelatedNodeInfo]]
@@ -0,0 +1,36 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .text_node import TextNode
8
+
9
+ try:
10
+ import pydantic.v1 as pydantic # type: ignore
11
+ except ImportError:
12
+ import pydantic # type: ignore
13
+
14
+
15
+ class TextNodeWithScore(pydantic.BaseModel):
16
+ """
17
+ Same as NodeWithScore but type for node is a TextNode instead of BaseNode.
18
+ FastAPI doesn't accept abstract classes like BaseNode.
19
+ """
20
+
21
+ node: TextNode
22
+ score: typing.Optional[float]
23
+ class_name: typing.Optional[str]
24
+
25
+ def json(self, **kwargs: typing.Any) -> str:
26
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
27
+ return super().json(**kwargs_with_defaults)
28
+
29
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
30
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
31
+ return super().dict(**kwargs_with_defaults)
32
+
33
+ class Config:
34
+ frozen = True
35
+ smart_union = True
36
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,43 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic.v1 as pydantic # type: ignore
10
+ except ImportError:
11
+ import pydantic # type: ignore
12
+
13
+
14
+ class TokenTextSplitter(pydantic.BaseModel):
15
+ """
16
+ Implementation of splitting text that looks at word tokens.
17
+ """
18
+
19
+ include_metadata: typing.Optional[bool] = pydantic.Field(
20
+ description="Whether or not to consider metadata when splitting."
21
+ )
22
+ include_prev_next_rel: typing.Optional[bool] = pydantic.Field(description="Include prev/next node relationships.")
23
+ callback_manager: typing.Optional[typing.Dict[str, typing.Any]]
24
+ chunk_size: typing.Optional[int] = pydantic.Field(description="The token chunk size for each chunk.")
25
+ chunk_overlap: typing.Optional[int] = pydantic.Field(description="The token overlap of each chunk when splitting.")
26
+ separator: typing.Optional[str] = pydantic.Field(description="Default separator for splitting into words")
27
+ backup_separators: typing.Optional[typing.List[typing.Any]] = pydantic.Field(
28
+ description="Additional separators for splitting."
29
+ )
30
+ class_name: typing.Optional[str]
31
+
32
+ def json(self, **kwargs: typing.Any) -> str:
33
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
34
+ return super().json(**kwargs_with_defaults)
35
+
36
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
37
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
38
+ return super().dict(**kwargs_with_defaults)
39
+
40
+ class Config:
41
+ frozen = True
42
+ smart_union = True
43
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,21 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+ import typing
5
+
6
+ T_Result = typing.TypeVar("T_Result")
7
+
8
+
9
+ class TransformationCategoryNames(str, enum.Enum):
10
+ """
11
+ An enumeration.
12
+ """
13
+
14
+ NODE_PARSER = "NODE_PARSER"
15
+ EMBEDDING = "EMBEDDING"
16
+
17
+ def visit(self, node_parser: typing.Callable[[], T_Result], embedding: typing.Callable[[], T_Result]) -> T_Result:
18
+ if self is TransformationCategoryNames.NODE_PARSER:
19
+ return node_parser()
20
+ if self is TransformationCategoryNames.EMBEDDING:
21
+ return embedding()
@@ -0,0 +1,31 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .validation_error_loc_item import ValidationErrorLocItem
8
+
9
+ try:
10
+ import pydantic.v1 as pydantic # type: ignore
11
+ except ImportError:
12
+ import pydantic # type: ignore
13
+
14
+
15
+ class ValidationError(pydantic.BaseModel):
16
+ loc: typing.List[ValidationErrorLocItem]
17
+ msg: str
18
+ type: str
19
+
20
+ def json(self, **kwargs: typing.Any) -> str:
21
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
22
+ return super().json(**kwargs_with_defaults)
23
+
24
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
25
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
26
+ return super().dict(**kwargs_with_defaults)
27
+
28
+ class Config:
29
+ frozen = True
30
+ smart_union = True
31
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,5 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ ValidationErrorLocItem = typing.Union[str, int]
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2023 LlamaIndex
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,25 @@
1
+ Metadata-Version: 2.1
2
+ Name: llama-cloud
3
+ Version: 0.0.1
4
+ Summary:
5
+ Author: Logan Markewich
6
+ Author-email: logan@runllama.ai
7
+ Requires-Python: >=3.8,<4
8
+ Classifier: Programming Language :: Python :: 3
9
+ Classifier: Programming Language :: Python :: 3.8
10
+ Classifier: Programming Language :: Python :: 3.9
11
+ Classifier: Programming Language :: Python :: 3.10
12
+ Classifier: Programming Language :: Python :: 3.11
13
+ Classifier: Programming Language :: Python :: 3.12
14
+ Requires-Dist: httpx (>=0.20.0)
15
+ Requires-Dist: pydantic (>=1.10)
16
+ Description-Content-Type: text/markdown
17
+
18
+ # LlamaIndex Python Client
19
+
20
+ This client is auto-generated using [Fern](https://buildwithfern.com/docs/intro)
21
+
22
+ To publish:
23
+ - update the version in `pyproject.toml`
24
+ - run `poetry publish --build`
25
+