llama-cloud 0.0.16__py3-none-any.whl → 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (133) hide show
  1. llama_cloud/__init__.py +8 -30
  2. llama_cloud/client.py +0 -3
  3. llama_cloud/resources/__init__.py +2 -10
  4. llama_cloud/resources/data_sinks/__init__.py +2 -2
  5. llama_cloud/resources/data_sinks/client.py +2 -2
  6. llama_cloud/resources/data_sinks/types/__init__.py +1 -2
  7. llama_cloud/resources/data_sinks/types/data_sink_update_component.py +19 -2
  8. llama_cloud/resources/data_sources/__init__.py +2 -2
  9. llama_cloud/resources/data_sources/client.py +6 -6
  10. llama_cloud/resources/data_sources/types/__init__.py +1 -2
  11. llama_cloud/resources/data_sources/types/data_source_update_component.py +23 -2
  12. llama_cloud/resources/extraction/client.py +14 -14
  13. llama_cloud/resources/files/client.py +10 -10
  14. llama_cloud/resources/organizations/client.py +2 -2
  15. llama_cloud/resources/parsing/client.py +68 -60
  16. llama_cloud/resources/pipelines/__init__.py +0 -4
  17. llama_cloud/resources/pipelines/client.py +50 -340
  18. llama_cloud/resources/pipelines/types/__init__.py +1 -7
  19. llama_cloud/resources/pipelines/types/pipeline_update_embedding_config.py +15 -15
  20. llama_cloud/resources/pipelines/types/pipeline_update_transform_config.py +1 -24
  21. llama_cloud/types/__init__.py +9 -23
  22. llama_cloud/types/azure_open_ai_embedding.py +7 -34
  23. llama_cloud/types/base_prompt_template.py +3 -14
  24. llama_cloud/types/bedrock_embedding.py +7 -17
  25. llama_cloud/types/box_auth_mechanism.py +0 -4
  26. llama_cloud/types/character_splitter.py +3 -4
  27. llama_cloud/types/chat_data.py +0 -5
  28. llama_cloud/types/chat_message.py +1 -6
  29. llama_cloud/types/cloud_az_storage_blob_data_source.py +7 -18
  30. llama_cloud/types/cloud_box_data_source.py +6 -16
  31. llama_cloud/types/cloud_chroma_vector_store.py +1 -5
  32. llama_cloud/types/cloud_confluence_data_source.py +6 -10
  33. llama_cloud/types/cloud_document.py +1 -3
  34. llama_cloud/types/cloud_document_create.py +1 -3
  35. llama_cloud/types/{user.py → cloud_google_drive_data_source.py} +6 -6
  36. llama_cloud/types/cloud_jira_data_source.py +4 -6
  37. llama_cloud/types/cloud_notion_page_data_source.py +2 -6
  38. llama_cloud/types/cloud_one_drive_data_source.py +2 -6
  39. llama_cloud/types/cloud_postgres_vector_store.py +0 -4
  40. llama_cloud/types/cloud_s_3_data_source.py +4 -12
  41. llama_cloud/types/cloud_sharepoint_data_source.py +5 -9
  42. llama_cloud/types/cloud_slack_data_source.py +6 -10
  43. llama_cloud/types/cloud_weaviate_vector_store.py +0 -4
  44. llama_cloud/types/code_splitter.py +2 -1
  45. llama_cloud/types/cohere_embedding.py +3 -7
  46. llama_cloud/types/configurable_data_sink_names.py +0 -4
  47. llama_cloud/types/configurable_data_source_names.py +4 -4
  48. llama_cloud/types/configurable_transformation_names.py +0 -4
  49. llama_cloud/types/configured_transformation_item_component.py +29 -2
  50. llama_cloud/types/data_sink.py +2 -2
  51. llama_cloud/types/data_sink_component.py +19 -2
  52. llama_cloud/types/data_sink_create_component.py +19 -2
  53. llama_cloud/types/data_source.py +3 -5
  54. llama_cloud/types/data_source_component.py +23 -2
  55. llama_cloud/types/data_source_create.py +1 -3
  56. llama_cloud/types/data_source_create_component.py +23 -2
  57. llama_cloud/types/eval_dataset.py +2 -2
  58. llama_cloud/types/eval_dataset_job_record.py +7 -13
  59. llama_cloud/types/eval_execution_params_override.py +2 -6
  60. llama_cloud/types/eval_metric.py +17 -0
  61. llama_cloud/types/eval_question.py +2 -6
  62. llama_cloud/types/extend_vertex_text_embedding.py +6 -18
  63. llama_cloud/types/extraction_result.py +5 -3
  64. llama_cloud/types/extraction_schema.py +3 -5
  65. llama_cloud/types/file.py +7 -11
  66. llama_cloud/types/gemini_embedding.py +5 -9
  67. llama_cloud/types/hugging_face_inference_api_embedding.py +10 -26
  68. llama_cloud/types/input_message.py +2 -4
  69. llama_cloud/types/llama_parse_parameters.py +1 -0
  70. llama_cloud/types/llama_parse_supported_file_extensions.py +0 -4
  71. llama_cloud/types/llm.py +9 -8
  72. llama_cloud/types/llm_parameters.py +2 -7
  73. llama_cloud/types/local_eval.py +8 -10
  74. llama_cloud/types/local_eval_results.py +1 -1
  75. llama_cloud/types/managed_ingestion_status_response.py +3 -5
  76. llama_cloud/types/markdown_element_node_parser.py +4 -5
  77. llama_cloud/types/markdown_node_parser.py +2 -1
  78. llama_cloud/types/message_annotation.py +1 -6
  79. llama_cloud/types/metric_result.py +3 -3
  80. llama_cloud/types/node_parser.py +2 -1
  81. llama_cloud/types/node_relationship.py +44 -0
  82. llama_cloud/types/object_type.py +0 -4
  83. llama_cloud/types/open_ai_embedding.py +6 -12
  84. llama_cloud/types/organization.py +2 -2
  85. llama_cloud/types/page_splitter_node_parser.py +3 -2
  86. llama_cloud/types/parsing_job_json_result.py +2 -2
  87. llama_cloud/types/parsing_job_markdown_result.py +1 -1
  88. llama_cloud/types/parsing_job_text_result.py +1 -1
  89. llama_cloud/types/partition_names.py +45 -0
  90. llama_cloud/types/pipeline.py +7 -17
  91. llama_cloud/types/pipeline_configuration_hashes.py +3 -3
  92. llama_cloud/types/pipeline_create.py +6 -18
  93. llama_cloud/types/pipeline_create_embedding_config.py +15 -15
  94. llama_cloud/types/pipeline_create_transform_config.py +1 -24
  95. llama_cloud/types/pipeline_data_source.py +5 -11
  96. llama_cloud/types/pipeline_data_source_component.py +23 -2
  97. llama_cloud/types/pipeline_data_source_create.py +1 -3
  98. llama_cloud/types/pipeline_deployment.py +4 -8
  99. llama_cloud/types/pipeline_embedding_config.py +15 -15
  100. llama_cloud/types/pipeline_file.py +10 -18
  101. llama_cloud/types/pipeline_file_create.py +1 -3
  102. llama_cloud/types/playground_session.py +2 -2
  103. llama_cloud/types/preset_retrieval_params.py +8 -11
  104. llama_cloud/types/presigned_url.py +1 -3
  105. llama_cloud/types/project.py +2 -2
  106. llama_cloud/types/prompt_mixin_prompts.py +1 -1
  107. llama_cloud/types/prompt_spec.py +2 -4
  108. llama_cloud/types/related_node_info.py +0 -4
  109. llama_cloud/types/retrieval_mode.py +0 -4
  110. llama_cloud/types/sentence_splitter.py +3 -4
  111. llama_cloud/types/supported_llm_model_names.py +0 -4
  112. llama_cloud/types/text_node.py +3 -9
  113. llama_cloud/types/token_text_splitter.py +2 -1
  114. llama_cloud/types/transformation_category_names.py +0 -4
  115. llama_cloud/types/user_organization.py +5 -9
  116. llama_cloud/types/user_organization_create.py +2 -2
  117. llama_cloud/types/user_organization_delete.py +2 -2
  118. {llama_cloud-0.0.16.dist-info → llama_cloud-0.1.0.dist-info}/METADATA +1 -1
  119. llama_cloud-0.1.0.dist-info/RECORD +226 -0
  120. llama_cloud/resources/auth/__init__.py +0 -2
  121. llama_cloud/resources/auth/client.py +0 -124
  122. llama_cloud/resources/data_sinks/types/data_sink_update_component_one.py +0 -23
  123. llama_cloud/resources/data_sources/types/data_source_update_component_one.py +0 -25
  124. llama_cloud/types/configured_transformation_item_component_one.py +0 -35
  125. llama_cloud/types/custom_claims.py +0 -58
  126. llama_cloud/types/data_sink_component_one.py +0 -23
  127. llama_cloud/types/data_sink_create_component_one.py +0 -23
  128. llama_cloud/types/data_source_component_one.py +0 -25
  129. llama_cloud/types/data_source_create_component_one.py +0 -25
  130. llama_cloud/types/pipeline_data_source_component_one.py +0 -25
  131. llama_cloud-0.0.16.dist-info/RECORD +0 -234
  132. {llama_cloud-0.0.16.dist-info → llama_cloud-0.1.0.dist-info}/LICENSE +0 -0
  133. {llama_cloud-0.0.16.dist-info → llama_cloud-0.1.0.dist-info}/WHEEL +0 -0
@@ -15,8 +15,8 @@ from ....types.open_ai_embedding_config import OpenAiEmbeddingConfig
15
15
  from ....types.vertex_ai_embedding_config import VertexAiEmbeddingConfig
16
16
 
17
17
 
18
- class PipelineUpdateEmbeddingConfig_OpenaiEmbedding(OpenAiEmbeddingConfig):
19
- type: typing_extensions.Literal["OPENAI_EMBEDDING"]
18
+ class PipelineUpdateEmbeddingConfig_AzureEmbedding(AzureOpenAiEmbeddingConfig):
19
+ type: typing_extensions.Literal["AZURE_EMBEDDING"]
20
20
 
21
21
  class Config:
22
22
  frozen = True
@@ -24,8 +24,8 @@ class PipelineUpdateEmbeddingConfig_OpenaiEmbedding(OpenAiEmbeddingConfig):
24
24
  allow_population_by_field_name = True
25
25
 
26
26
 
27
- class PipelineUpdateEmbeddingConfig_AzureEmbedding(AzureOpenAiEmbeddingConfig):
28
- type: typing_extensions.Literal["AZURE_EMBEDDING"]
27
+ class PipelineUpdateEmbeddingConfig_BedrockEmbedding(BedrockEmbeddingConfig):
28
+ type: typing_extensions.Literal["BEDROCK_EMBEDDING"]
29
29
 
30
30
  class Config:
31
31
  frozen = True
@@ -33,8 +33,8 @@ class PipelineUpdateEmbeddingConfig_AzureEmbedding(AzureOpenAiEmbeddingConfig):
33
33
  allow_population_by_field_name = True
34
34
 
35
35
 
36
- class PipelineUpdateEmbeddingConfig_HuggingfaceApiEmbedding(HuggingFaceInferenceApiEmbeddingConfig):
37
- type: typing_extensions.Literal["HUGGINGFACE_API_EMBEDDING"]
36
+ class PipelineUpdateEmbeddingConfig_CohereEmbedding(CohereEmbeddingConfig):
37
+ type: typing_extensions.Literal["COHERE_EMBEDDING"]
38
38
 
39
39
  class Config:
40
40
  frozen = True
@@ -42,8 +42,8 @@ class PipelineUpdateEmbeddingConfig_HuggingfaceApiEmbedding(HuggingFaceInference
42
42
  allow_population_by_field_name = True
43
43
 
44
44
 
45
- class PipelineUpdateEmbeddingConfig_BedrockEmbedding(BedrockEmbeddingConfig):
46
- type: typing_extensions.Literal["BEDROCK_EMBEDDING"]
45
+ class PipelineUpdateEmbeddingConfig_GeminiEmbedding(GeminiEmbeddingConfig):
46
+ type: typing_extensions.Literal["GEMINI_EMBEDDING"]
47
47
 
48
48
  class Config:
49
49
  frozen = True
@@ -51,8 +51,8 @@ class PipelineUpdateEmbeddingConfig_BedrockEmbedding(BedrockEmbeddingConfig):
51
51
  allow_population_by_field_name = True
52
52
 
53
53
 
54
- class PipelineUpdateEmbeddingConfig_GeminiEmbedding(GeminiEmbeddingConfig):
55
- type: typing_extensions.Literal["GEMINI_EMBEDDING"]
54
+ class PipelineUpdateEmbeddingConfig_HuggingfaceApiEmbedding(HuggingFaceInferenceApiEmbeddingConfig):
55
+ type: typing_extensions.Literal["HUGGINGFACE_API_EMBEDDING"]
56
56
 
57
57
  class Config:
58
58
  frozen = True
@@ -60,8 +60,8 @@ class PipelineUpdateEmbeddingConfig_GeminiEmbedding(GeminiEmbeddingConfig):
60
60
  allow_population_by_field_name = True
61
61
 
62
62
 
63
- class PipelineUpdateEmbeddingConfig_CohereEmbedding(CohereEmbeddingConfig):
64
- type: typing_extensions.Literal["COHERE_EMBEDDING"]
63
+ class PipelineUpdateEmbeddingConfig_OpenaiEmbedding(OpenAiEmbeddingConfig):
64
+ type: typing_extensions.Literal["OPENAI_EMBEDDING"]
65
65
 
66
66
  class Config:
67
67
  frozen = True
@@ -79,11 +79,11 @@ class PipelineUpdateEmbeddingConfig_VertexaiEmbedding(VertexAiEmbeddingConfig):
79
79
 
80
80
 
81
81
  PipelineUpdateEmbeddingConfig = typing.Union[
82
- PipelineUpdateEmbeddingConfig_OpenaiEmbedding,
83
82
  PipelineUpdateEmbeddingConfig_AzureEmbedding,
84
- PipelineUpdateEmbeddingConfig_HuggingfaceApiEmbedding,
85
83
  PipelineUpdateEmbeddingConfig_BedrockEmbedding,
86
- PipelineUpdateEmbeddingConfig_GeminiEmbedding,
87
84
  PipelineUpdateEmbeddingConfig_CohereEmbedding,
85
+ PipelineUpdateEmbeddingConfig_GeminiEmbedding,
86
+ PipelineUpdateEmbeddingConfig_HuggingfaceApiEmbedding,
87
+ PipelineUpdateEmbeddingConfig_OpenaiEmbedding,
88
88
  PipelineUpdateEmbeddingConfig_VertexaiEmbedding,
89
89
  ]
@@ -1,31 +1,8 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
- from __future__ import annotations
4
-
5
3
  import typing
6
4
 
7
- import typing_extensions
8
-
9
5
  from ....types.advanced_mode_transform_config import AdvancedModeTransformConfig
10
6
  from ....types.auto_transform_config import AutoTransformConfig
11
7
 
12
-
13
- class PipelineUpdateTransformConfig_Auto(AutoTransformConfig):
14
- mode: typing_extensions.Literal["auto"]
15
-
16
- class Config:
17
- frozen = True
18
- smart_union = True
19
- allow_population_by_field_name = True
20
-
21
-
22
- class PipelineUpdateTransformConfig_Advanced(AdvancedModeTransformConfig):
23
- mode: typing_extensions.Literal["advanced"]
24
-
25
- class Config:
26
- frozen = True
27
- smart_union = True
28
- allow_population_by_field_name = True
29
-
30
-
31
- PipelineUpdateTransformConfig = typing.Union[PipelineUpdateTransformConfig_Auto, PipelineUpdateTransformConfig_Advanced]
8
+ PipelineUpdateTransformConfig = typing.Union[AutoTransformConfig, AdvancedModeTransformConfig]
@@ -34,6 +34,7 @@ from .cloud_chroma_vector_store import CloudChromaVectorStore
34
34
  from .cloud_confluence_data_source import CloudConfluenceDataSource
35
35
  from .cloud_document import CloudDocument
36
36
  from .cloud_document_create import CloudDocumentCreate
37
+ from .cloud_google_drive_data_source import CloudGoogleDriveDataSource
37
38
  from .cloud_jira_data_source import CloudJiraDataSource
38
39
  from .cloud_milvus_vector_store import CloudMilvusVectorStore
39
40
  from .cloud_mongo_db_atlas_vector_search import CloudMongoDbAtlasVectorSearch
@@ -55,21 +56,15 @@ from .configurable_transformation_definition import ConfigurableTransformationDe
55
56
  from .configurable_transformation_names import ConfigurableTransformationNames
56
57
  from .configured_transformation_item import ConfiguredTransformationItem
57
58
  from .configured_transformation_item_component import ConfiguredTransformationItemComponent
58
- from .configured_transformation_item_component_one import ConfiguredTransformationItemComponentOne
59
- from .custom_claims import CustomClaims
60
59
  from .data_sink import DataSink
61
60
  from .data_sink_component import DataSinkComponent
62
- from .data_sink_component_one import DataSinkComponentOne
63
61
  from .data_sink_create import DataSinkCreate
64
62
  from .data_sink_create_component import DataSinkCreateComponent
65
- from .data_sink_create_component_one import DataSinkCreateComponentOne
66
63
  from .data_sink_definition import DataSinkDefinition
67
64
  from .data_source import DataSource
68
65
  from .data_source_component import DataSourceComponent
69
- from .data_source_component_one import DataSourceComponentOne
70
66
  from .data_source_create import DataSourceCreate
71
67
  from .data_source_create_component import DataSourceCreateComponent
72
- from .data_source_create_component_one import DataSourceCreateComponentOne
73
68
  from .data_source_create_custom_metadata_value import DataSourceCreateCustomMetadataValue
74
69
  from .data_source_custom_metadata_value import DataSourceCustomMetadataValue
75
70
  from .data_source_definition import DataSourceDefinition
@@ -79,6 +74,7 @@ from .eval_dataset_job_params import EvalDatasetJobParams
79
74
  from .eval_dataset_job_record import EvalDatasetJobRecord
80
75
  from .eval_execution_params import EvalExecutionParams
81
76
  from .eval_execution_params_override import EvalExecutionParamsOverride
77
+ from .eval_metric import EvalMetric
82
78
  from .eval_question import EvalQuestion
83
79
  from .eval_question_create import EvalQuestionCreate
84
80
  from .eval_question_result import EvalQuestionResult
@@ -121,6 +117,7 @@ from .metadata_filters import MetadataFilters
121
117
  from .metadata_filters_filters_item import MetadataFiltersFiltersItem
122
118
  from .metric_result import MetricResult
123
119
  from .node_parser import NodeParser
120
+ from .node_relationship import NodeRelationship
124
121
  from .none_chunking_config import NoneChunkingConfig
125
122
  from .none_segmentation_config import NoneSegmentationConfig
126
123
  from .object_type import ObjectType
@@ -139,6 +136,7 @@ from .parsing_job_json_result import ParsingJobJsonResult
139
136
  from .parsing_job_markdown_result import ParsingJobMarkdownResult
140
137
  from .parsing_job_text_result import ParsingJobTextResult
141
138
  from .parsing_usage import ParsingUsage
139
+ from .partition_names import PartitionNames
142
140
  from .pipeline import Pipeline
143
141
  from .pipeline_configuration_hashes import PipelineConfigurationHashes
144
142
  from .pipeline_create import PipelineCreate
@@ -152,14 +150,9 @@ from .pipeline_create_embedding_config import (
152
150
  PipelineCreateEmbeddingConfig_OpenaiEmbedding,
153
151
  PipelineCreateEmbeddingConfig_VertexaiEmbedding,
154
152
  )
155
- from .pipeline_create_transform_config import (
156
- PipelineCreateTransformConfig,
157
- PipelineCreateTransformConfig_Advanced,
158
- PipelineCreateTransformConfig_Auto,
159
- )
153
+ from .pipeline_create_transform_config import PipelineCreateTransformConfig
160
154
  from .pipeline_data_source import PipelineDataSource
161
155
  from .pipeline_data_source_component import PipelineDataSourceComponent
162
- from .pipeline_data_source_component_one import PipelineDataSourceComponentOne
163
156
  from .pipeline_data_source_create import PipelineDataSourceCreate
164
157
  from .pipeline_data_source_custom_metadata_value import PipelineDataSourceCustomMetadataValue
165
158
  from .pipeline_deployment import PipelineDeployment
@@ -209,7 +202,6 @@ from .text_node_with_score import TextNodeWithScore
209
202
  from .token_chunking_config import TokenChunkingConfig
210
203
  from .token_text_splitter import TokenTextSplitter
211
204
  from .transformation_category_names import TransformationCategoryNames
212
- from .user import User
213
205
  from .user_organization import UserOrganization
214
206
  from .user_organization_create import UserOrganizationCreate
215
207
  from .user_organization_delete import UserOrganizationDelete
@@ -249,6 +241,7 @@ __all__ = [
249
241
  "CloudConfluenceDataSource",
250
242
  "CloudDocument",
251
243
  "CloudDocumentCreate",
244
+ "CloudGoogleDriveDataSource",
252
245
  "CloudJiraDataSource",
253
246
  "CloudMilvusVectorStore",
254
247
  "CloudMongoDbAtlasVectorSearch",
@@ -270,21 +263,15 @@ __all__ = [
270
263
  "ConfigurableTransformationNames",
271
264
  "ConfiguredTransformationItem",
272
265
  "ConfiguredTransformationItemComponent",
273
- "ConfiguredTransformationItemComponentOne",
274
- "CustomClaims",
275
266
  "DataSink",
276
267
  "DataSinkComponent",
277
- "DataSinkComponentOne",
278
268
  "DataSinkCreate",
279
269
  "DataSinkCreateComponent",
280
- "DataSinkCreateComponentOne",
281
270
  "DataSinkDefinition",
282
271
  "DataSource",
283
272
  "DataSourceComponent",
284
- "DataSourceComponentOne",
285
273
  "DataSourceCreate",
286
274
  "DataSourceCreateComponent",
287
- "DataSourceCreateComponentOne",
288
275
  "DataSourceCreateCustomMetadataValue",
289
276
  "DataSourceCustomMetadataValue",
290
277
  "DataSourceDefinition",
@@ -294,6 +281,7 @@ __all__ = [
294
281
  "EvalDatasetJobRecord",
295
282
  "EvalExecutionParams",
296
283
  "EvalExecutionParamsOverride",
284
+ "EvalMetric",
297
285
  "EvalQuestion",
298
286
  "EvalQuestionCreate",
299
287
  "EvalQuestionResult",
@@ -336,6 +324,7 @@ __all__ = [
336
324
  "MetadataFiltersFiltersItem",
337
325
  "MetricResult",
338
326
  "NodeParser",
327
+ "NodeRelationship",
339
328
  "NoneChunkingConfig",
340
329
  "NoneSegmentationConfig",
341
330
  "ObjectType",
@@ -354,6 +343,7 @@ __all__ = [
354
343
  "ParsingJobMarkdownResult",
355
344
  "ParsingJobTextResult",
356
345
  "ParsingUsage",
346
+ "PartitionNames",
357
347
  "Pipeline",
358
348
  "PipelineConfigurationHashes",
359
349
  "PipelineCreate",
@@ -366,11 +356,8 @@ __all__ = [
366
356
  "PipelineCreateEmbeddingConfig_OpenaiEmbedding",
367
357
  "PipelineCreateEmbeddingConfig_VertexaiEmbedding",
368
358
  "PipelineCreateTransformConfig",
369
- "PipelineCreateTransformConfig_Advanced",
370
- "PipelineCreateTransformConfig_Auto",
371
359
  "PipelineDataSource",
372
360
  "PipelineDataSourceComponent",
373
- "PipelineDataSourceComponentOne",
374
361
  "PipelineDataSourceCreate",
375
362
  "PipelineDataSourceCustomMetadataValue",
376
363
  "PipelineDeployment",
@@ -416,7 +403,6 @@ __all__ = [
416
403
  "TokenChunkingConfig",
417
404
  "TokenTextSplitter",
418
405
  "TransformationCategoryNames",
419
- "User",
420
406
  "UserOrganization",
421
407
  "UserOrganizationCreate",
422
408
  "UserOrganizationDelete",
@@ -15,34 +15,10 @@ except ImportError:
15
15
 
16
16
 
17
17
  class AzureOpenAiEmbedding(pydantic.BaseModel):
18
- """
19
- OpenAI class for embeddings.
20
-
21
- Args:
22
- mode (str): Mode for embedding.
23
- Defaults to OpenAIEmbeddingMode.TEXT_SEARCH_MODE.
24
- Options are:
25
-
26
- - OpenAIEmbeddingMode.SIMILARITY_MODE
27
- - OpenAIEmbeddingMode.TEXT_SEARCH_MODE
28
-
29
- model (str): Model for embedding.
30
- Defaults to OpenAIEmbeddingModelType.TEXT_EMBED_ADA_002.
31
- Options are:
32
-
33
- - OpenAIEmbeddingModelType.DAVINCI
34
- - OpenAIEmbeddingModelType.CURIE
35
- - OpenAIEmbeddingModelType.BABBAGE
36
- - OpenAIEmbeddingModelType.ADA
37
- - OpenAIEmbeddingModelType.TEXT_EMBED_ADA_002
38
- """
39
-
40
18
  model_name: typing.Optional[str] = pydantic.Field(description="The name of the embedding model.")
41
19
  embed_batch_size: typing.Optional[int] = pydantic.Field(description="The batch size for embedding calls.")
42
- callback_manager: typing.Optional[typing.Dict[str, typing.Any]]
43
- num_workers: typing.Optional[int] = pydantic.Field(
44
- description="The number of workers to use for async embedding calls."
45
- )
20
+ callback_manager: typing.Optional[typing.Any]
21
+ num_workers: typing.Optional[int]
46
22
  additional_kwargs: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(
47
23
  description="Additional kwargs for the OpenAI API."
48
24
  )
@@ -51,17 +27,14 @@ class AzureOpenAiEmbedding(pydantic.BaseModel):
51
27
  api_version: typing.Optional[str] = pydantic.Field(description="The version for Azure OpenAI API.")
52
28
  max_retries: typing.Optional[int] = pydantic.Field(description="Maximum number of retries.")
53
29
  timeout: typing.Optional[float] = pydantic.Field(description="Timeout for each request.")
54
- default_headers: typing.Optional[typing.Dict[str, str]] = pydantic.Field(
55
- description="The default headers for API requests."
56
- )
30
+ default_headers: typing.Optional[typing.Dict[str, typing.Optional[str]]]
57
31
  reuse_client: typing.Optional[bool] = pydantic.Field(
58
32
  description="Reuse the OpenAI client between requests. When doing anything with large volumes of async API calls, setting this to false can improve stability."
59
33
  )
60
- dimensions: typing.Optional[int] = pydantic.Field(
61
- description="The number of dimensions on the output embedding vectors. Works only with v3 embedding models."
62
- )
63
- azure_endpoint: typing.Optional[str] = pydantic.Field(description="The Azure endpoint to use.")
64
- azure_deployment: typing.Optional[str] = pydantic.Field(description="The Azure deployment to use.")
34
+ dimensions: typing.Optional[int]
35
+ azure_endpoint: typing.Optional[str]
36
+ azure_deployment: typing.Optional[str]
37
+ azure_ad_token_provider: typing.Optional[str]
65
38
  use_azure_ad: bool = pydantic.Field(
66
39
  description="Indicates if Microsoft Entra ID (former Azure AD) is used for token authentication"
67
40
  )
@@ -15,23 +15,12 @@ except ImportError:
15
15
 
16
16
 
17
17
  class BasePromptTemplate(pydantic.BaseModel):
18
- """
19
- Chainable mixin.
20
-
21
- A module that can produce a `QueryComponent` from a set of inputs through
22
- `as_query_component`.
23
-
24
- If plugged in directly into a `QueryPipeline`, the `ChainableMixin` will be
25
- converted into a `QueryComponent` with default parameters.
26
- """
27
-
28
18
  metadata: typing.Dict[str, typing.Any]
29
19
  template_vars: typing.List[str]
30
20
  kwargs: typing.Dict[str, str]
31
- output_parser: typing.Optional[typing.Dict[str, typing.Any]]
32
- template_var_mappings: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(
33
- description="Template variable mappings (Optional)."
34
- )
21
+ output_parser: typing.Any
22
+ template_var_mappings: typing.Optional[typing.Dict[str, typing.Any]]
23
+ function_mappings: typing.Optional[typing.Dict[str, typing.Optional[str]]]
35
24
 
36
25
  def json(self, **kwargs: typing.Any) -> str:
37
26
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -15,25 +15,15 @@ except ImportError:
15
15
 
16
16
 
17
17
  class BedrockEmbedding(pydantic.BaseModel):
18
- """
19
- Base class for embeddings.
20
- """
21
-
22
18
  model_name: str = pydantic.Field(description="The modelId of the Bedrock model to use.")
23
19
  embed_batch_size: typing.Optional[int] = pydantic.Field(description="The batch size for embedding calls.")
24
- callback_manager: typing.Optional[typing.Dict[str, typing.Any]]
25
- num_workers: typing.Optional[int] = pydantic.Field(
26
- description="The number of workers to use for async embedding calls."
27
- )
28
- profile_name: typing.Optional[str] = pydantic.Field(
29
- description="The name of aws profile to use. If not given, then the default profile is used."
30
- )
31
- aws_access_key_id: typing.Optional[str] = pydantic.Field(description="AWS Access Key ID to use")
32
- aws_secret_access_key: typing.Optional[str] = pydantic.Field(description="AWS Secret Access Key to use")
33
- aws_session_token: typing.Optional[str] = pydantic.Field(description="AWS Session Token to use")
34
- region_name: typing.Optional[str] = pydantic.Field(
35
- description="AWS region name to use. Uses region configured in AWS CLI if not passed"
36
- )
20
+ callback_manager: typing.Optional[typing.Any]
21
+ num_workers: typing.Optional[int]
22
+ profile_name: typing.Optional[str]
23
+ aws_access_key_id: typing.Optional[str]
24
+ aws_secret_access_key: typing.Optional[str]
25
+ aws_session_token: typing.Optional[str]
26
+ region_name: typing.Optional[str]
37
27
  botocore_session: typing.Optional[typing.Any]
38
28
  botocore_config: typing.Optional[typing.Any]
39
29
  max_retries: typing.Optional[int] = pydantic.Field(description="The maximum number of API retries.")
@@ -7,10 +7,6 @@ T_Result = typing.TypeVar("T_Result")
7
7
 
8
8
 
9
9
  class BoxAuthMechanism(str, enum.Enum):
10
- """
11
- An enumeration.
12
- """
13
-
14
10
  DEVELOPER_TOKEN = "developer_token"
15
11
  CCG = "ccg"
16
12
 
@@ -23,14 +23,13 @@ class CharacterSplitter(pydantic.BaseModel):
23
23
  description="Whether or not to consider metadata when splitting."
24
24
  )
25
25
  include_prev_next_rel: typing.Optional[bool] = pydantic.Field(description="Include prev/next node relationships.")
26
- callback_manager: typing.Optional[typing.Dict[str, typing.Any]]
26
+ callback_manager: typing.Optional[typing.Any]
27
+ id_func: typing.Optional[str]
27
28
  chunk_size: typing.Optional[int] = pydantic.Field(description="The token chunk size for each chunk.")
28
29
  chunk_overlap: typing.Optional[int] = pydantic.Field(description="The token overlap of each chunk when splitting.")
29
30
  separator: typing.Optional[str] = pydantic.Field(description="Default separator for splitting into words")
30
31
  paragraph_separator: typing.Optional[str] = pydantic.Field(description="Separator between paragraphs.")
31
- secondary_chunking_regex: typing.Optional[str] = pydantic.Field(
32
- description="Backup regex for splitting into sentences."
33
- )
32
+ secondary_chunking_regex: typing.Optional[str]
34
33
  class_name: typing.Optional[str]
35
34
 
36
35
  def json(self, **kwargs: typing.Any) -> str:
@@ -17,11 +17,6 @@ except ImportError:
17
17
 
18
18
 
19
19
  class ChatData(pydantic.BaseModel):
20
- """
21
- Base schema model for BaseComponent classes used in the platform.
22
- Comes with special serialization logic for types used commonly in platform codebase.
23
- """
24
-
25
20
  retrieval_parameters: typing.Optional[PresetRetrievalParams]
26
21
  llm_parameters: typing.Optional[LlmParameters]
27
22
  class_name: typing.Optional[str]
@@ -17,18 +17,13 @@ except ImportError:
17
17
 
18
18
 
19
19
  class ChatMessage(pydantic.BaseModel):
20
- """
21
- Base schema model for BaseComponent classes used in the platform.
22
- Comes with special serialization logic for types used commonly in platform codebase.
23
- """
24
-
25
20
  id: str
26
21
  index: int = pydantic.Field(description="The index of the message in the chat.")
27
22
  annotations: typing.Optional[typing.List[MessageAnnotation]] = pydantic.Field(
28
23
  description="Retrieval annotations for the message."
29
24
  )
30
25
  role: MessageRole
31
- content: typing.Optional[str] = pydantic.Field(description="Text content of the generation")
26
+ content: typing.Optional[str]
32
27
  additional_kwargs: typing.Optional[typing.Dict[str, str]] = pydantic.Field(
33
28
  description="Additional arguments passed to the model"
34
29
  )
@@ -15,26 +15,15 @@ except ImportError:
15
15
 
16
16
 
17
17
  class CloudAzStorageBlobDataSource(pydantic.BaseModel):
18
- """
19
- Base component object to capture class names.
20
- """
21
-
22
18
  container_name: str = pydantic.Field(description="The name of the Azure Storage Blob container to read from.")
23
19
  account_url: str = pydantic.Field(description="The Azure Storage Blob account URL to use for authentication.")
24
- prefix: typing.Optional[str] = pydantic.Field(
25
- description="The prefix of the Azure Storage Blob objects to read from. Use this to filter files at the subdirectory level"
26
- )
27
- account_name: typing.Optional[str] = pydantic.Field(
28
- description="The Azure Storage Blob account name to use for authentication."
29
- )
30
- account_key: typing.Optional[str] = pydantic.Field(
31
- description="The Azure Storage Blob account key to use for authentication."
32
- )
33
- tenant_id: typing.Optional[str] = pydantic.Field(description="The Azure AD tenant ID to use for authentication.")
34
- client_id: typing.Optional[str] = pydantic.Field(description="The Azure AD client ID to use for authentication.")
35
- client_secret: typing.Optional[str] = pydantic.Field(
36
- description="The Azure AD client secret to use for authentication."
37
- )
20
+ blob: typing.Optional[str]
21
+ prefix: typing.Optional[str]
22
+ account_name: typing.Optional[str]
23
+ account_key: typing.Optional[str]
24
+ tenant_id: typing.Optional[str]
25
+ client_id: typing.Optional[str]
26
+ client_secret: typing.Optional[str]
38
27
  class_name: typing.Optional[str]
39
28
 
40
29
  def json(self, **kwargs: typing.Any) -> str:
@@ -16,25 +16,15 @@ except ImportError:
16
16
 
17
17
 
18
18
  class CloudBoxDataSource(pydantic.BaseModel):
19
- """
20
- Base component object to capture class names.
21
- """
22
-
23
- folder_id: typing.Optional[str] = pydantic.Field(description="The ID of the Box folder to read from.")
19
+ folder_id: typing.Optional[str]
24
20
  authentication_mechanism: BoxAuthMechanism = pydantic.Field(
25
21
  description="The type of authentication to use (Developer Token or CCG)"
26
22
  )
27
- developer_token: typing.Optional[str] = pydantic.Field(
28
- description="Developer token for authentication if authentication_mechanism is 'developer_token'."
29
- )
30
- client_id: typing.Optional[str] = pydantic.Field(
31
- description="Box API key used for identifying the application the user is authenticating with"
32
- )
33
- client_secret: typing.Optional[str] = pydantic.Field(description="Box API secret used for making auth requests.")
34
- user_id: typing.Optional[str] = pydantic.Field(description="Box User ID, if provided authenticates as user.")
35
- enterprise_id: typing.Optional[str] = pydantic.Field(
36
- description="Box Enterprise ID, if provided authenticates as service."
37
- )
23
+ developer_token: typing.Optional[str]
24
+ client_id: typing.Optional[str]
25
+ client_secret: typing.Optional[str]
26
+ user_id: typing.Optional[str]
27
+ enterprise_id: typing.Optional[str]
38
28
  class_name: typing.Optional[str]
39
29
 
40
30
  def json(self, **kwargs: typing.Any) -> str:
@@ -15,16 +15,12 @@ except ImportError:
15
15
 
16
16
 
17
17
  class CloudChromaVectorStore(pydantic.BaseModel):
18
- """
19
- Base class for cloud vector stores.
20
- """
21
-
22
18
  supports_nested_metadata_filters: typing.Optional[bool]
23
19
  collection_name: typing.Optional[str]
24
20
  host: typing.Optional[str]
25
21
  port: typing.Optional[str]
26
22
  ssl: bool
27
- headers: typing.Optional[typing.Dict[str, str]]
23
+ headers: typing.Optional[typing.Dict[str, typing.Optional[str]]]
28
24
  persist_dir: typing.Optional[str]
29
25
  collection_kwargs: typing.Optional[typing.Dict[str, typing.Any]]
30
26
  class_name: typing.Optional[str]
@@ -15,20 +15,16 @@ except ImportError:
15
15
 
16
16
 
17
17
  class CloudConfluenceDataSource(pydantic.BaseModel):
18
- """
19
- Base component object to capture class names.
20
- """
21
-
22
18
  server_url: str = pydantic.Field(description="The server URL of the Confluence instance.")
23
19
  authentication_mechanism: str = pydantic.Field(
24
20
  description="Type of Authentication for connecting to Confluence APIs."
25
21
  )
26
- user_name: typing.Optional[str] = pydantic.Field(description="The username to use for authentication.")
27
- api_token: typing.Optional[str] = pydantic.Field(description="The API token to use for authentication.")
28
- space_key: typing.Optional[str] = pydantic.Field(description="The space key to read from.")
29
- page_ids: typing.Optional[str] = pydantic.Field(description="The page IDs of the Confluence to read from.")
30
- cql: typing.Optional[str] = pydantic.Field(description="The CQL query to use for fetching pages.")
31
- label: typing.Optional[str] = pydantic.Field(description="The label to use for fetching pages.")
22
+ user_name: typing.Optional[str]
23
+ api_token: typing.Optional[str]
24
+ space_key: typing.Optional[str]
25
+ page_ids: typing.Optional[str]
26
+ cql: typing.Optional[str]
27
+ label: typing.Optional[str]
32
28
  class_name: typing.Optional[str]
33
29
 
34
30
  def json(self, **kwargs: typing.Any) -> str:
@@ -23,9 +23,7 @@ class CloudDocument(pydantic.BaseModel):
23
23
  metadata: typing.Dict[str, typing.Any]
24
24
  excluded_embed_metadata_keys: typing.Optional[typing.List[str]]
25
25
  excluded_llm_metadata_keys: typing.Optional[typing.List[str]]
26
- page_positions: typing.Optional[typing.List[int]] = pydantic.Field(
27
- description="indices in the CloudDocument.text where a new page begins. e.g. Second page starts at index specified by page_positions[1]."
28
- )
26
+ page_positions: typing.Optional[typing.List[int]]
29
27
  id: str
30
28
 
31
29
  def json(self, **kwargs: typing.Any) -> str:
@@ -23,9 +23,7 @@ class CloudDocumentCreate(pydantic.BaseModel):
23
23
  metadata: typing.Dict[str, typing.Any]
24
24
  excluded_embed_metadata_keys: typing.Optional[typing.List[str]]
25
25
  excluded_llm_metadata_keys: typing.Optional[typing.List[str]]
26
- page_positions: typing.Optional[typing.List[int]] = pydantic.Field(
27
- description="indices in the CloudDocument.text where a new page begins. e.g. Second page starts at index specified by page_positions[1]."
28
- )
26
+ page_positions: typing.Optional[typing.List[int]]
29
27
  id: typing.Optional[str]
30
28
 
31
29
  def json(self, **kwargs: typing.Any) -> str:
@@ -4,7 +4,6 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
- from .custom_claims import CustomClaims
8
7
 
9
8
  try:
10
9
  import pydantic
@@ -15,11 +14,12 @@ except ImportError:
15
14
  import pydantic # type: ignore
16
15
 
17
16
 
18
- class User(pydantic.BaseModel):
19
- id: str
20
- email: str
21
- name: typing.Optional[str] = pydantic.Field(description="The user's name.")
22
- claims: typing.Optional[CustomClaims] = pydantic.Field(description="The user's custom claims.")
17
+ class CloudGoogleDriveDataSource(pydantic.BaseModel):
18
+ folder_id: str = pydantic.Field(description="The ID of the Google Drive folder to read from.")
19
+ service_account_key: typing.Dict[str, typing.Any] = pydantic.Field(
20
+ description="The service account key JSON to use for authentication."
21
+ )
22
+ class_name: typing.Optional[str]
23
23
 
24
24
  def json(self, **kwargs: typing.Any) -> str:
25
25
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -19,12 +19,10 @@ class CloudJiraDataSource(pydantic.BaseModel):
19
19
  Cloud Jira Data Source integrating JiraReader.
20
20
  """
21
21
 
22
- email: typing.Optional[str] = pydantic.Field(description="The email address to use for authentication.")
23
- api_token: typing.Optional[str] = pydantic.Field(
24
- description="The API/ Access Token used for Basic, PAT and OAuth2 authentication."
25
- )
26
- server_url: typing.Optional[str] = pydantic.Field(description="The server url for Jira Cloud.")
27
- cloud_id: typing.Optional[str] = pydantic.Field(description="The cloud ID, used in case of OAuth2.")
22
+ email: typing.Optional[str]
23
+ api_token: typing.Optional[str]
24
+ server_url: typing.Optional[str]
25
+ cloud_id: typing.Optional[str]
28
26
  authentication_mechanism: str = pydantic.Field(description="Type of Authentication for connecting to Jira APIs.")
29
27
  query: str = pydantic.Field(description="JQL (Jira Query Language) query to search.")
30
28
  class_name: typing.Optional[str]
@@ -15,13 +15,9 @@ except ImportError:
15
15
 
16
16
 
17
17
  class CloudNotionPageDataSource(pydantic.BaseModel):
18
- """
19
- Base component object to capture class names.
20
- """
21
-
22
18
  integration_token: str = pydantic.Field(description="The integration token to use for authentication.")
23
- database_ids: typing.Optional[str] = pydantic.Field(description="The Notion Database Id to read content from.")
24
- page_ids: typing.Optional[str] = pydantic.Field(description="The Page ID's of the Notion to read from.")
19
+ database_ids: typing.Optional[str]
20
+ page_ids: typing.Optional[str]
25
21
  class_name: typing.Optional[str]
26
22
 
27
23
  def json(self, **kwargs: typing.Any) -> str: