llama-cloud 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (173) hide show
  1. llama_cloud/__init__.py +295 -0
  2. llama_cloud/client.py +72 -0
  3. llama_cloud/core/__init__.py +17 -0
  4. llama_cloud/core/api_error.py +15 -0
  5. llama_cloud/core/client_wrapper.py +51 -0
  6. llama_cloud/core/datetime_utils.py +28 -0
  7. llama_cloud/core/jsonable_encoder.py +103 -0
  8. llama_cloud/core/remove_none_from_dict.py +11 -0
  9. llama_cloud/errors/__init__.py +5 -0
  10. llama_cloud/errors/unprocessable_entity_error.py +9 -0
  11. llama_cloud/resources/__init__.py +40 -0
  12. llama_cloud/resources/api_keys/__init__.py +2 -0
  13. llama_cloud/resources/api_keys/client.py +302 -0
  14. llama_cloud/resources/billing/__init__.py +2 -0
  15. llama_cloud/resources/billing/client.py +234 -0
  16. llama_cloud/resources/component_definitions/__init__.py +2 -0
  17. llama_cloud/resources/component_definitions/client.py +192 -0
  18. llama_cloud/resources/data_sinks/__init__.py +5 -0
  19. llama_cloud/resources/data_sinks/client.py +506 -0
  20. llama_cloud/resources/data_sinks/types/__init__.py +6 -0
  21. llama_cloud/resources/data_sinks/types/data_sink_update_component.py +7 -0
  22. llama_cloud/resources/data_sinks/types/data_sink_update_component_one.py +17 -0
  23. llama_cloud/resources/data_sources/__init__.py +5 -0
  24. llama_cloud/resources/data_sources/client.py +521 -0
  25. llama_cloud/resources/data_sources/types/__init__.py +7 -0
  26. llama_cloud/resources/data_sources/types/data_source_update_component.py +7 -0
  27. llama_cloud/resources/data_sources/types/data_source_update_component_one.py +19 -0
  28. llama_cloud/resources/data_sources/types/data_source_update_custom_metadata_value.py +7 -0
  29. llama_cloud/resources/deprecated/__init__.py +2 -0
  30. llama_cloud/resources/deprecated/client.py +982 -0
  31. llama_cloud/resources/evals/__init__.py +2 -0
  32. llama_cloud/resources/evals/client.py +745 -0
  33. llama_cloud/resources/files/__init__.py +5 -0
  34. llama_cloud/resources/files/client.py +560 -0
  35. llama_cloud/resources/files/types/__init__.py +5 -0
  36. llama_cloud/resources/files/types/file_create_resource_info_value.py +5 -0
  37. llama_cloud/resources/parsing/__init__.py +2 -0
  38. llama_cloud/resources/parsing/client.py +982 -0
  39. llama_cloud/resources/pipelines/__init__.py +5 -0
  40. llama_cloud/resources/pipelines/client.py +2599 -0
  41. llama_cloud/resources/pipelines/types/__init__.py +5 -0
  42. llama_cloud/resources/pipelines/types/pipeline_file_update_custom_metadata_value.py +7 -0
  43. llama_cloud/resources/projects/__init__.py +2 -0
  44. llama_cloud/resources/projects/client.py +1231 -0
  45. llama_cloud/types/__init__.py +253 -0
  46. llama_cloud/types/api_key.py +37 -0
  47. llama_cloud/types/azure_open_ai_embedding.py +75 -0
  48. llama_cloud/types/base.py +26 -0
  49. llama_cloud/types/base_prompt_template.py +44 -0
  50. llama_cloud/types/bedrock_embedding.py +56 -0
  51. llama_cloud/types/chat_message.py +35 -0
  52. llama_cloud/types/cloud_az_storage_blob_data_source.py +40 -0
  53. llama_cloud/types/cloud_chroma_vector_store.py +40 -0
  54. llama_cloud/types/cloud_document.py +36 -0
  55. llama_cloud/types/cloud_document_create.py +36 -0
  56. llama_cloud/types/cloud_gcs_data_source.py +37 -0
  57. llama_cloud/types/cloud_google_drive_data_source.py +36 -0
  58. llama_cloud/types/cloud_one_drive_data_source.py +38 -0
  59. llama_cloud/types/cloud_pinecone_vector_store.py +46 -0
  60. llama_cloud/types/cloud_postgres_vector_store.py +44 -0
  61. llama_cloud/types/cloud_qdrant_vector_store.py +48 -0
  62. llama_cloud/types/cloud_s_3_data_source.py +42 -0
  63. llama_cloud/types/cloud_sharepoint_data_source.py +38 -0
  64. llama_cloud/types/cloud_weaviate_vector_store.py +38 -0
  65. llama_cloud/types/code_splitter.py +46 -0
  66. llama_cloud/types/cohere_embedding.py +46 -0
  67. llama_cloud/types/configurable_data_sink_names.py +37 -0
  68. llama_cloud/types/configurable_data_source_names.py +41 -0
  69. llama_cloud/types/configurable_transformation_definition.py +45 -0
  70. llama_cloud/types/configurable_transformation_names.py +73 -0
  71. llama_cloud/types/configured_transformation_item.py +43 -0
  72. llama_cloud/types/configured_transformation_item_component.py +9 -0
  73. llama_cloud/types/configured_transformation_item_component_one.py +35 -0
  74. llama_cloud/types/data_sink.py +40 -0
  75. llama_cloud/types/data_sink_component.py +7 -0
  76. llama_cloud/types/data_sink_component_one.py +17 -0
  77. llama_cloud/types/data_sink_create.py +36 -0
  78. llama_cloud/types/data_sink_create_component.py +7 -0
  79. llama_cloud/types/data_sink_create_component_one.py +17 -0
  80. llama_cloud/types/data_sink_definition.py +41 -0
  81. llama_cloud/types/data_source.py +44 -0
  82. llama_cloud/types/data_source_component.py +7 -0
  83. llama_cloud/types/data_source_component_one.py +19 -0
  84. llama_cloud/types/data_source_create.py +40 -0
  85. llama_cloud/types/data_source_create_component.py +7 -0
  86. llama_cloud/types/data_source_create_component_one.py +19 -0
  87. llama_cloud/types/data_source_create_custom_metadata_value.py +7 -0
  88. llama_cloud/types/data_source_custom_metadata_value.py +7 -0
  89. llama_cloud/types/data_source_definition.py +41 -0
  90. llama_cloud/types/eval_dataset.py +37 -0
  91. llama_cloud/types/eval_dataset_job_params.py +36 -0
  92. llama_cloud/types/eval_dataset_job_record.py +59 -0
  93. llama_cloud/types/eval_execution_params.py +38 -0
  94. llama_cloud/types/eval_execution_params_override.py +38 -0
  95. llama_cloud/types/eval_llm_model_data.py +33 -0
  96. llama_cloud/types/eval_question.py +39 -0
  97. llama_cloud/types/eval_question_create.py +28 -0
  98. llama_cloud/types/eval_question_result.py +49 -0
  99. llama_cloud/types/file.py +46 -0
  100. llama_cloud/types/file_resource_info_value.py +5 -0
  101. llama_cloud/types/filter_condition.py +21 -0
  102. llama_cloud/types/filter_operator.py +65 -0
  103. llama_cloud/types/gemini_embedding.py +51 -0
  104. llama_cloud/types/html_node_parser.py +44 -0
  105. llama_cloud/types/http_validation_error.py +29 -0
  106. llama_cloud/types/hugging_face_inference_api_embedding.py +68 -0
  107. llama_cloud/types/hugging_face_inference_api_embedding_token.py +5 -0
  108. llama_cloud/types/json_node_parser.py +43 -0
  109. llama_cloud/types/llama_parse_supported_file_extensions.py +161 -0
  110. llama_cloud/types/llm.py +55 -0
  111. llama_cloud/types/local_eval.py +46 -0
  112. llama_cloud/types/local_eval_results.py +37 -0
  113. llama_cloud/types/local_eval_sets.py +30 -0
  114. llama_cloud/types/managed_ingestion_status.py +37 -0
  115. llama_cloud/types/markdown_element_node_parser.py +49 -0
  116. llama_cloud/types/markdown_node_parser.py +43 -0
  117. llama_cloud/types/message_role.py +45 -0
  118. llama_cloud/types/metadata_filter.py +41 -0
  119. llama_cloud/types/metadata_filter_value.py +5 -0
  120. llama_cloud/types/metadata_filters.py +41 -0
  121. llama_cloud/types/metadata_filters_filters_item.py +8 -0
  122. llama_cloud/types/metric_result.py +30 -0
  123. llama_cloud/types/node_parser.py +37 -0
  124. llama_cloud/types/object_type.py +33 -0
  125. llama_cloud/types/open_ai_embedding.py +73 -0
  126. llama_cloud/types/parser_languages.py +361 -0
  127. llama_cloud/types/parsing_history_item.py +36 -0
  128. llama_cloud/types/parsing_job.py +30 -0
  129. llama_cloud/types/parsing_job_json_result.py +29 -0
  130. llama_cloud/types/parsing_job_markdown_result.py +29 -0
  131. llama_cloud/types/parsing_job_text_result.py +29 -0
  132. llama_cloud/types/parsing_usage.py +29 -0
  133. llama_cloud/types/pipeline.py +64 -0
  134. llama_cloud/types/pipeline_create.py +61 -0
  135. llama_cloud/types/pipeline_data_source.py +46 -0
  136. llama_cloud/types/pipeline_data_source_component.py +7 -0
  137. llama_cloud/types/pipeline_data_source_component_one.py +19 -0
  138. llama_cloud/types/pipeline_data_source_create.py +32 -0
  139. llama_cloud/types/pipeline_data_source_custom_metadata_value.py +7 -0
  140. llama_cloud/types/pipeline_deployment.py +38 -0
  141. llama_cloud/types/pipeline_file.py +52 -0
  142. llama_cloud/types/pipeline_file_create.py +36 -0
  143. llama_cloud/types/pipeline_file_create_custom_metadata_value.py +7 -0
  144. llama_cloud/types/pipeline_file_custom_metadata_value.py +7 -0
  145. llama_cloud/types/pipeline_file_resource_info_value.py +7 -0
  146. llama_cloud/types/pipeline_file_status_response.py +35 -0
  147. llama_cloud/types/pipeline_type.py +21 -0
  148. llama_cloud/types/pooling.py +29 -0
  149. llama_cloud/types/preset_retrieval_params.py +40 -0
  150. llama_cloud/types/presigned_url.py +36 -0
  151. llama_cloud/types/project.py +42 -0
  152. llama_cloud/types/project_create.py +32 -0
  153. llama_cloud/types/prompt_mixin_prompts.py +36 -0
  154. llama_cloud/types/prompt_spec.py +35 -0
  155. llama_cloud/types/pydantic_program_mode.py +41 -0
  156. llama_cloud/types/related_node_info.py +37 -0
  157. llama_cloud/types/retrieve_results.py +40 -0
  158. llama_cloud/types/sentence_splitter.py +48 -0
  159. llama_cloud/types/simple_file_node_parser.py +44 -0
  160. llama_cloud/types/status_enum.py +33 -0
  161. llama_cloud/types/supported_eval_llm_model.py +35 -0
  162. llama_cloud/types/supported_eval_llm_model_names.py +29 -0
  163. llama_cloud/types/text_node.py +62 -0
  164. llama_cloud/types/text_node_relationships_value.py +7 -0
  165. llama_cloud/types/text_node_with_score.py +36 -0
  166. llama_cloud/types/token_text_splitter.py +43 -0
  167. llama_cloud/types/transformation_category_names.py +21 -0
  168. llama_cloud/types/validation_error.py +31 -0
  169. llama_cloud/types/validation_error_loc_item.py +5 -0
  170. llama_cloud-0.0.1.dist-info/LICENSE +21 -0
  171. llama_cloud-0.0.1.dist-info/METADATA +25 -0
  172. llama_cloud-0.0.1.dist-info/RECORD +173 -0
  173. llama_cloud-0.0.1.dist-info/WHEEL +4 -0
@@ -0,0 +1,36 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic.v1 as pydantic # type: ignore
10
+ except ImportError:
11
+ import pydantic # type: ignore
12
+
13
+
14
+ class CloudDocumentCreate(pydantic.BaseModel):
15
+ """
16
+ Create a new cloud document.
17
+ """
18
+
19
+ text: str
20
+ metadata: typing.Dict[str, typing.Any]
21
+ excluded_embed_metadata_keys: typing.Optional[typing.List[str]]
22
+ excluded_llm_metadata_keys: typing.Optional[typing.List[str]]
23
+ id: typing.Optional[str]
24
+
25
+ def json(self, **kwargs: typing.Any) -> str:
26
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
27
+ return super().json(**kwargs_with_defaults)
28
+
29
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
30
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
31
+ return super().dict(**kwargs_with_defaults)
32
+
33
+ class Config:
34
+ frozen = True
35
+ smart_union = True
36
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,37 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic.v1 as pydantic # type: ignore
10
+ except ImportError:
11
+ import pydantic # type: ignore
12
+
13
+
14
+ class CloudGcsDataSource(pydantic.BaseModel):
15
+ """
16
+ Base component object to capture class names.
17
+ """
18
+
19
+ bucket: str = pydantic.Field(description="The name of the GCS bucket to read from.")
20
+ prefix: typing.Optional[str] = pydantic.Field(description="The prefix of the GCS objects to read from.")
21
+ service_account_key: typing.Dict[str, typing.Any] = pydantic.Field(
22
+ description="The service account key JSON to use for authentication."
23
+ )
24
+ class_name: typing.Optional[str]
25
+
26
+ def json(self, **kwargs: typing.Any) -> str:
27
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
28
+ return super().json(**kwargs_with_defaults)
29
+
30
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
31
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
32
+ return super().dict(**kwargs_with_defaults)
33
+
34
+ class Config:
35
+ frozen = True
36
+ smart_union = True
37
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,36 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic.v1 as pydantic # type: ignore
10
+ except ImportError:
11
+ import pydantic # type: ignore
12
+
13
+
14
+ class CloudGoogleDriveDataSource(pydantic.BaseModel):
15
+ """
16
+ Base component object to capture class names.
17
+ """
18
+
19
+ folder_id: str = pydantic.Field(description="The ID of the Google Drive folder to read from.")
20
+ service_account_key: typing.Dict[str, typing.Any] = pydantic.Field(
21
+ description="The service account key JSON to use for authentication."
22
+ )
23
+ class_name: typing.Optional[str]
24
+
25
+ def json(self, **kwargs: typing.Any) -> str:
26
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
27
+ return super().json(**kwargs_with_defaults)
28
+
29
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
30
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
31
+ return super().dict(**kwargs_with_defaults)
32
+
33
+ class Config:
34
+ frozen = True
35
+ smart_union = True
36
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,38 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic.v1 as pydantic # type: ignore
10
+ except ImportError:
11
+ import pydantic # type: ignore
12
+
13
+
14
+ class CloudOneDriveDataSource(pydantic.BaseModel):
15
+ """
16
+ Base component object to capture class names.
17
+ """
18
+
19
+ user_principal_name: str = pydantic.Field(description="The user principal name to use for authentication.")
20
+ folder_path: typing.Optional[str] = pydantic.Field(description="The path of the OneDrive folder to read from.")
21
+ folder_id: typing.Optional[str] = pydantic.Field(description="The ID of the OneDrive folder to read from.")
22
+ client_id: str = pydantic.Field(description="The client ID to use for authentication.")
23
+ client_secret: str = pydantic.Field(description="The client secret to use for authentication.")
24
+ tenant_id: str = pydantic.Field(description="The tenant ID to use for authentication.")
25
+ class_name: typing.Optional[str]
26
+
27
+ def json(self, **kwargs: typing.Any) -> str:
28
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
29
+ return super().json(**kwargs_with_defaults)
30
+
31
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
32
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
33
+ return super().dict(**kwargs_with_defaults)
34
+
35
+ class Config:
36
+ frozen = True
37
+ smart_union = True
38
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,46 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic.v1 as pydantic # type: ignore
10
+ except ImportError:
11
+ import pydantic # type: ignore
12
+
13
+
14
+ class CloudPineconeVectorStore(pydantic.BaseModel):
15
+ """
16
+ Cloud Pinecone Vector Store.
17
+
18
+ This class is used to store the configuration for a Pinecone vector store, so that it can be
19
+ created and used in LlamaCloud.
20
+
21
+ Args:
22
+ api_key (str): API key for authenticating with Pinecone
23
+ index_name (str): name of the Pinecone index
24
+ namespace (optional[str]): namespace to use in the Pinecone index
25
+ insert_kwargs (optional[dict]): additional kwargs to pass during insertion
26
+ """
27
+
28
+ supports_nested_metadata_filters: typing.Optional[bool]
29
+ api_key: str
30
+ index_name: str
31
+ namespace: typing.Optional[str]
32
+ insert_kwargs: typing.Optional[typing.Dict[str, typing.Any]]
33
+ class_name: typing.Optional[str]
34
+
35
+ def json(self, **kwargs: typing.Any) -> str:
36
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
37
+ return super().json(**kwargs_with_defaults)
38
+
39
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
40
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
41
+ return super().dict(**kwargs_with_defaults)
42
+
43
+ class Config:
44
+ frozen = True
45
+ smart_union = True
46
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,44 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic.v1 as pydantic # type: ignore
10
+ except ImportError:
11
+ import pydantic # type: ignore
12
+
13
+
14
+ class CloudPostgresVectorStore(pydantic.BaseModel):
15
+ """
16
+ Base class for cloud vector stores.
17
+ """
18
+
19
+ supports_nested_metadata_filters: typing.Optional[bool]
20
+ connection_string: str
21
+ async_connection_string: str
22
+ table_name: str
23
+ schema_name: str
24
+ embed_dim: int
25
+ hybrid_search: bool
26
+ text_search_config: str
27
+ cache_ok: bool
28
+ perform_setup: bool
29
+ debug: bool
30
+ use_jsonb: bool
31
+ class_name: typing.Optional[str]
32
+
33
+ def json(self, **kwargs: typing.Any) -> str:
34
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
35
+ return super().json(**kwargs_with_defaults)
36
+
37
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
38
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
39
+ return super().dict(**kwargs_with_defaults)
40
+
41
+ class Config:
42
+ frozen = True
43
+ smart_union = True
44
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,48 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic.v1 as pydantic # type: ignore
10
+ except ImportError:
11
+ import pydantic # type: ignore
12
+
13
+
14
+ class CloudQdrantVectorStore(pydantic.BaseModel):
15
+ """
16
+ Cloud Qdrant Vector Store.
17
+
18
+ This class is used to store the configuration for a Qdrant vector store, so that it can be
19
+ created and used in LlamaCloud.
20
+
21
+ Args:
22
+ collection_name (str): name of the Qdrant collection
23
+ url (str): url of the Qdrant instance
24
+ api_key (str): API key for authenticating with Qdrant
25
+ max_retries (int): maximum number of retries in case of a failure. Defaults to 3
26
+ client_kwargs (dict): additional kwargs to pass to the Qdrant client
27
+ """
28
+
29
+ supports_nested_metadata_filters: typing.Optional[bool]
30
+ collection_name: str
31
+ url: str
32
+ api_key: str
33
+ max_retries: typing.Optional[int]
34
+ client_kwargs: typing.Optional[typing.Dict[str, typing.Any]]
35
+ class_name: typing.Optional[str]
36
+
37
+ def json(self, **kwargs: typing.Any) -> str:
38
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
39
+ return super().json(**kwargs_with_defaults)
40
+
41
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
42
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
43
+ return super().dict(**kwargs_with_defaults)
44
+
45
+ class Config:
46
+ frozen = True
47
+ smart_union = True
48
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,42 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic.v1 as pydantic # type: ignore
10
+ except ImportError:
11
+ import pydantic # type: ignore
12
+
13
+
14
+ class CloudS3DataSource(pydantic.BaseModel):
15
+ """
16
+ Base component object to capture class names.
17
+ """
18
+
19
+ bucket: str = pydantic.Field(description="The name of the S3 bucket to read from.")
20
+ prefix: typing.Optional[str] = pydantic.Field(description="The prefix of the S3 objects to read from.")
21
+ aws_access_id: typing.Optional[str] = pydantic.Field(description="The AWS access ID to use for authentication.")
22
+ aws_access_secret: typing.Optional[str] = pydantic.Field(
23
+ description="The AWS access secret to use for authentication."
24
+ )
25
+ s_3_endpoint_url: typing.Optional[str] = pydantic.Field(
26
+ alias="s3_endpoint_url", description="The S3 endpoint URL to use for authentication."
27
+ )
28
+ class_name: typing.Optional[str]
29
+
30
+ def json(self, **kwargs: typing.Any) -> str:
31
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
32
+ return super().json(**kwargs_with_defaults)
33
+
34
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
35
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
36
+ return super().dict(**kwargs_with_defaults)
37
+
38
+ class Config:
39
+ frozen = True
40
+ smart_union = True
41
+ allow_population_by_field_name = True
42
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,38 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic.v1 as pydantic # type: ignore
10
+ except ImportError:
11
+ import pydantic # type: ignore
12
+
13
+
14
+ class CloudSharepointDataSource(pydantic.BaseModel):
15
+ """
16
+ Base component object to capture class names.
17
+ """
18
+
19
+ site_name: str = pydantic.Field(description="The name of the SharePoint site to download from.")
20
+ folder_path: typing.Optional[str] = pydantic.Field(description="The path of the Sharepoint folder to read from.")
21
+ folder_id: typing.Optional[str] = pydantic.Field(description="The ID of the Sharepoint folder to read from.")
22
+ client_id: str = pydantic.Field(description="The client ID to use for authentication.")
23
+ client_secret: str = pydantic.Field(description="The client secret to use for authentication.")
24
+ tenant_id: str = pydantic.Field(description="The tenant ID to use for authentication.")
25
+ class_name: typing.Optional[str]
26
+
27
+ def json(self, **kwargs: typing.Any) -> str:
28
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
29
+ return super().json(**kwargs_with_defaults)
30
+
31
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
32
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
33
+ return super().dict(**kwargs_with_defaults)
34
+
35
+ class Config:
36
+ frozen = True
37
+ smart_union = True
38
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,38 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic.v1 as pydantic # type: ignore
10
+ except ImportError:
11
+ import pydantic # type: ignore
12
+
13
+
14
+ class CloudWeaviateVectorStore(pydantic.BaseModel):
15
+ """
16
+ Base class for cloud vector stores.
17
+ """
18
+
19
+ supports_nested_metadata_filters: typing.Optional[bool]
20
+ index_name: str
21
+ url: typing.Optional[str]
22
+ text_key: str
23
+ auth_config: typing.Optional[typing.Dict[str, typing.Any]]
24
+ client_kwargs: typing.Optional[typing.Dict[str, typing.Any]]
25
+ class_name: typing.Optional[str]
26
+
27
+ def json(self, **kwargs: typing.Any) -> str:
28
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
29
+ return super().json(**kwargs_with_defaults)
30
+
31
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
32
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
33
+ return super().dict(**kwargs_with_defaults)
34
+
35
+ class Config:
36
+ frozen = True
37
+ smart_union = True
38
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,46 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic.v1 as pydantic # type: ignore
10
+ except ImportError:
11
+ import pydantic # type: ignore
12
+
13
+
14
+ class CodeSplitter(pydantic.BaseModel):
15
+ """
16
+ Split code using a AST parser.
17
+
18
+ Thank you to Kevin Lu / SweepAI for suggesting this elegant code splitting solution.
19
+ https://docs.sweep.dev/blogs/chunking-2m-files
20
+ """
21
+
22
+ include_metadata: typing.Optional[bool] = pydantic.Field(
23
+ description="Whether or not to consider metadata when splitting."
24
+ )
25
+ include_prev_next_rel: typing.Optional[bool] = pydantic.Field(description="Include prev/next node relationships.")
26
+ callback_manager: typing.Optional[typing.Dict[str, typing.Any]]
27
+ language: str = pydantic.Field(description="The programming language of the code being split.")
28
+ chunk_lines: typing.Optional[int] = pydantic.Field(description="The number of lines to include in each chunk.")
29
+ chunk_lines_overlap: typing.Optional[int] = pydantic.Field(
30
+ description="How many lines of code each chunk overlaps with."
31
+ )
32
+ max_chars: typing.Optional[int] = pydantic.Field(description="Maximum number of characters per chunk.")
33
+ class_name: typing.Optional[str]
34
+
35
+ def json(self, **kwargs: typing.Any) -> str:
36
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
37
+ return super().json(**kwargs_with_defaults)
38
+
39
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
40
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
41
+ return super().dict(**kwargs_with_defaults)
42
+
43
+ class Config:
44
+ frozen = True
45
+ smart_union = True
46
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,46 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic.v1 as pydantic # type: ignore
10
+ except ImportError:
11
+ import pydantic # type: ignore
12
+
13
+
14
+ class CohereEmbedding(pydantic.BaseModel):
15
+ """
16
+ CohereEmbedding uses the Cohere API to generate embeddings for text.
17
+ """
18
+
19
+ model_name: typing.Optional[str] = pydantic.Field(description="The name of the embedding model.")
20
+ embed_batch_size: typing.Optional[int] = pydantic.Field(description="The batch size for embedding calls.")
21
+ callback_manager: typing.Optional[typing.Dict[str, typing.Any]]
22
+ num_workers: typing.Optional[int] = pydantic.Field(
23
+ description="The number of workers to use for async embedding calls."
24
+ )
25
+ api_key: str = pydantic.Field(description="The Cohere API key.")
26
+ truncate: str = pydantic.Field(description="Truncation type - START/ END/ NONE")
27
+ input_type: typing.Optional[str] = pydantic.Field(
28
+ description="Model Input type. If not provided, search_document and search_query are used when needed."
29
+ )
30
+ embedding_type: str = pydantic.Field(
31
+ description="Embedding type. If not provided float embedding_type is used when needed."
32
+ )
33
+ class_name: typing.Optional[str]
34
+
35
+ def json(self, **kwargs: typing.Any) -> str:
36
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
37
+ return super().json(**kwargs_with_defaults)
38
+
39
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
40
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
41
+ return super().dict(**kwargs_with_defaults)
42
+
43
+ class Config:
44
+ frozen = True
45
+ smart_union = True
46
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,37 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+ import typing
5
+
6
+ T_Result = typing.TypeVar("T_Result")
7
+
8
+
9
+ class ConfigurableDataSinkNames(str, enum.Enum):
10
+ """
11
+ An enumeration.
12
+ """
13
+
14
+ CHROMA = "CHROMA"
15
+ PINECONE = "PINECONE"
16
+ POSTGRES = "POSTGRES"
17
+ QDRANT = "QDRANT"
18
+ WEAVIATE = "WEAVIATE"
19
+
20
+ def visit(
21
+ self,
22
+ chroma: typing.Callable[[], T_Result],
23
+ pinecone: typing.Callable[[], T_Result],
24
+ postgres: typing.Callable[[], T_Result],
25
+ qdrant: typing.Callable[[], T_Result],
26
+ weaviate: typing.Callable[[], T_Result],
27
+ ) -> T_Result:
28
+ if self is ConfigurableDataSinkNames.CHROMA:
29
+ return chroma()
30
+ if self is ConfigurableDataSinkNames.PINECONE:
31
+ return pinecone()
32
+ if self is ConfigurableDataSinkNames.POSTGRES:
33
+ return postgres()
34
+ if self is ConfigurableDataSinkNames.QDRANT:
35
+ return qdrant()
36
+ if self is ConfigurableDataSinkNames.WEAVIATE:
37
+ return weaviate()
@@ -0,0 +1,41 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+ import typing
5
+
6
+ T_Result = typing.TypeVar("T_Result")
7
+
8
+
9
+ class ConfigurableDataSourceNames(str, enum.Enum):
10
+ """
11
+ An enumeration.
12
+ """
13
+
14
+ S_3 = "S3"
15
+ AZURE_STORAGE_BLOB = "AZURE_STORAGE_BLOB"
16
+ GCS = "GCS"
17
+ GOOGLE_DRIVE = "GOOGLE_DRIVE"
18
+ MICROSOFT_ONEDRIVE = "MICROSOFT_ONEDRIVE"
19
+ MICROSOFT_SHAREPOINT = "MICROSOFT_SHAREPOINT"
20
+
21
+ def visit(
22
+ self,
23
+ s_3: typing.Callable[[], T_Result],
24
+ azure_storage_blob: typing.Callable[[], T_Result],
25
+ gcs: typing.Callable[[], T_Result],
26
+ google_drive: typing.Callable[[], T_Result],
27
+ microsoft_onedrive: typing.Callable[[], T_Result],
28
+ microsoft_sharepoint: typing.Callable[[], T_Result],
29
+ ) -> T_Result:
30
+ if self is ConfigurableDataSourceNames.S_3:
31
+ return s_3()
32
+ if self is ConfigurableDataSourceNames.AZURE_STORAGE_BLOB:
33
+ return azure_storage_blob()
34
+ if self is ConfigurableDataSourceNames.GCS:
35
+ return gcs()
36
+ if self is ConfigurableDataSourceNames.GOOGLE_DRIVE:
37
+ return google_drive()
38
+ if self is ConfigurableDataSourceNames.MICROSOFT_ONEDRIVE:
39
+ return microsoft_onedrive()
40
+ if self is ConfigurableDataSourceNames.MICROSOFT_SHAREPOINT:
41
+ return microsoft_sharepoint()
@@ -0,0 +1,45 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .configurable_transformation_names import ConfigurableTransformationNames
8
+ from .transformation_category_names import TransformationCategoryNames
9
+
10
+ try:
11
+ import pydantic.v1 as pydantic # type: ignore
12
+ except ImportError:
13
+ import pydantic # type: ignore
14
+
15
+
16
+ class ConfigurableTransformationDefinition(pydantic.BaseModel):
17
+ """
18
+ Schema for a transformation definition.
19
+ """
20
+
21
+ label: str = pydantic.Field(
22
+ description="The label field will be used to display the name of the component in the UI"
23
+ )
24
+ json_schema: typing.Dict[str, typing.Any] = pydantic.Field(
25
+ description="The json_schema field can be used by clients to determine how to construct the component"
26
+ )
27
+ configurable_transformation_type: ConfigurableTransformationNames = pydantic.Field(
28
+ description="The name field will act as the unique identifier of TransformationDefinition objects"
29
+ )
30
+ transformation_category: TransformationCategoryNames = pydantic.Field(
31
+ description="The transformation_category field will be used to group transformations in the UI"
32
+ )
33
+
34
+ def json(self, **kwargs: typing.Any) -> str:
35
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
36
+ return super().json(**kwargs_with_defaults)
37
+
38
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
39
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
40
+ return super().dict(**kwargs_with_defaults)
41
+
42
+ class Config:
43
+ frozen = True
44
+ smart_union = True
45
+ json_encoders = {dt.datetime: serialize_datetime}