llama-cloud 0.0.13__py3-none-any.whl → 0.0.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (54) hide show
  1. llama_cloud/__init__.py +76 -14
  2. llama_cloud/resources/__init__.py +14 -0
  3. llama_cloud/resources/data_sources/types/data_source_update_component_one.py +2 -0
  4. llama_cloud/resources/evals/client.py +5 -5
  5. llama_cloud/resources/parsing/client.py +8 -0
  6. llama_cloud/resources/pipelines/__init__.py +14 -0
  7. llama_cloud/resources/pipelines/client.py +115 -66
  8. llama_cloud/resources/pipelines/types/__init__.py +16 -0
  9. llama_cloud/resources/pipelines/types/pipeline_update_embedding_config.py +78 -0
  10. llama_cloud/types/__init__.py +68 -14
  11. llama_cloud/types/{embedding_config.py → azure_open_ai_embedding_config.py} +4 -6
  12. llama_cloud/types/bedrock_embedding_config.py +34 -0
  13. llama_cloud/types/box_auth_mechanism.py +21 -0
  14. llama_cloud/types/chat_data.py +1 -1
  15. llama_cloud/types/chat_message.py +14 -4
  16. llama_cloud/types/cloud_azure_ai_search_vector_store.py +3 -0
  17. llama_cloud/types/cloud_box_data_source.py +51 -0
  18. llama_cloud/types/cloud_document.py +3 -0
  19. llama_cloud/types/cloud_document_create.py +3 -0
  20. llama_cloud/types/cloud_sharepoint_data_source.py +2 -1
  21. llama_cloud/types/cohere_embedding_config.py +34 -0
  22. llama_cloud/types/configurable_data_source_names.py +4 -0
  23. llama_cloud/types/custom_claims.py +0 -3
  24. llama_cloud/types/data_source_component_one.py +2 -0
  25. llama_cloud/types/data_source_create_component_one.py +2 -0
  26. llama_cloud/types/eval_execution_params.py +2 -2
  27. llama_cloud/types/eval_execution_params_override.py +2 -2
  28. llama_cloud/types/filter_operator.py +4 -0
  29. llama_cloud/types/gemini_embedding_config.py +34 -0
  30. llama_cloud/types/hugging_face_inference_api_embedding_config.py +34 -0
  31. llama_cloud/types/input_message.py +42 -0
  32. llama_cloud/types/llama_parse_parameters.py +4 -1
  33. llama_cloud/types/{eval_llm_model_data.py → llm_model_data.py} +1 -1
  34. llama_cloud/types/llm_parameters.py +2 -2
  35. llama_cloud/types/{supported_eval_llm_model.py → message_annotation.py} +6 -6
  36. llama_cloud/types/metadata_filter.py +1 -1
  37. llama_cloud/types/open_ai_embedding_config.py +34 -0
  38. llama_cloud/types/page_segmentation_config.py +2 -0
  39. llama_cloud/types/pipeline.py +11 -1
  40. llama_cloud/types/pipeline_create.py +3 -3
  41. llama_cloud/types/pipeline_create_embedding_config.py +78 -0
  42. llama_cloud/types/pipeline_data_source_component_one.py +2 -0
  43. llama_cloud/types/pipeline_embedding_config.py +78 -0
  44. llama_cloud/types/pipeline_transform_config.py +31 -0
  45. llama_cloud/types/playground_session.py +51 -0
  46. llama_cloud/types/supported_llm_model.py +41 -0
  47. llama_cloud/types/{supported_eval_llm_model_names.py → supported_llm_model_names.py} +10 -6
  48. {llama_cloud-0.0.13.dist-info → llama_cloud-0.0.14.dist-info}/METADATA +1 -1
  49. {llama_cloud-0.0.13.dist-info → llama_cloud-0.0.14.dist-info}/RECORD +51 -40
  50. llama_cloud/types/embedding_config_component.py +0 -7
  51. llama_cloud/types/embedding_config_component_one.py +0 -19
  52. llama_cloud/types/embedding_config_type.py +0 -41
  53. {llama_cloud-0.0.13.dist-info → llama_cloud-0.0.14.dist-info}/LICENSE +0 -0
  54. {llama_cloud-0.0.13.dist-info → llama_cloud-0.0.14.dist-info}/WHEEL +0 -0
@@ -17,15 +17,19 @@ from .advanced_mode_transform_config_segmentation_config import (
17
17
  )
18
18
  from .auto_transform_config import AutoTransformConfig
19
19
  from .azure_open_ai_embedding import AzureOpenAiEmbedding
20
+ from .azure_open_ai_embedding_config import AzureOpenAiEmbeddingConfig
20
21
  from .base import Base
21
22
  from .base_prompt_template import BasePromptTemplate
22
23
  from .bedrock_embedding import BedrockEmbedding
24
+ from .bedrock_embedding_config import BedrockEmbeddingConfig
25
+ from .box_auth_mechanism import BoxAuthMechanism
23
26
  from .character_chunking_config import CharacterChunkingConfig
24
27
  from .character_splitter import CharacterSplitter
25
28
  from .chat_data import ChatData
26
29
  from .chat_message import ChatMessage
27
30
  from .cloud_az_storage_blob_data_source import CloudAzStorageBlobDataSource
28
31
  from .cloud_azure_ai_search_vector_store import CloudAzureAiSearchVectorStore
32
+ from .cloud_box_data_source import CloudBoxDataSource
29
33
  from .cloud_chroma_vector_store import CloudChromaVectorStore
30
34
  from .cloud_confluence_data_source import CloudConfluenceDataSource
31
35
  from .cloud_document import CloudDocument
@@ -44,6 +48,7 @@ from .cloud_slack_data_source import CloudSlackDataSource
44
48
  from .cloud_weaviate_vector_store import CloudWeaviateVectorStore
45
49
  from .code_splitter import CodeSplitter
46
50
  from .cohere_embedding import CohereEmbedding
51
+ from .cohere_embedding_config import CohereEmbeddingConfig
47
52
  from .configurable_data_sink_names import ConfigurableDataSinkNames
48
53
  from .configurable_data_source_names import ConfigurableDataSourceNames
49
54
  from .configurable_transformation_definition import ConfigurableTransformationDefinition
@@ -69,16 +74,11 @@ from .data_source_create_custom_metadata_value import DataSourceCreateCustomMeta
69
74
  from .data_source_custom_metadata_value import DataSourceCustomMetadataValue
70
75
  from .data_source_definition import DataSourceDefinition
71
76
  from .element_segmentation_config import ElementSegmentationConfig
72
- from .embedding_config import EmbeddingConfig
73
- from .embedding_config_component import EmbeddingConfigComponent
74
- from .embedding_config_component_one import EmbeddingConfigComponentOne
75
- from .embedding_config_type import EmbeddingConfigType
76
77
  from .eval_dataset import EvalDataset
77
78
  from .eval_dataset_job_params import EvalDatasetJobParams
78
79
  from .eval_dataset_job_record import EvalDatasetJobRecord
79
80
  from .eval_execution_params import EvalExecutionParams
80
81
  from .eval_execution_params_override import EvalExecutionParamsOverride
81
- from .eval_llm_model_data import EvalLlmModelData
82
82
  from .eval_question import EvalQuestion
83
83
  from .eval_question_create import EvalQuestionCreate
84
84
  from .eval_question_result import EvalQuestionResult
@@ -92,14 +92,18 @@ from .file_resource_info_value import FileResourceInfoValue
92
92
  from .filter_condition import FilterCondition
93
93
  from .filter_operator import FilterOperator
94
94
  from .gemini_embedding import GeminiEmbedding
95
+ from .gemini_embedding_config import GeminiEmbeddingConfig
95
96
  from .http_validation_error import HttpValidationError
96
97
  from .hugging_face_inference_api_embedding import HuggingFaceInferenceApiEmbedding
98
+ from .hugging_face_inference_api_embedding_config import HuggingFaceInferenceApiEmbeddingConfig
97
99
  from .hugging_face_inference_api_embedding_token import HuggingFaceInferenceApiEmbeddingToken
98
100
  from .ingestion_error_response import IngestionErrorResponse
101
+ from .input_message import InputMessage
99
102
  from .job_name_mapping import JobNameMapping
100
103
  from .llama_parse_parameters import LlamaParseParameters
101
104
  from .llama_parse_supported_file_extensions import LlamaParseSupportedFileExtensions
102
105
  from .llm import Llm
106
+ from .llm_model_data import LlmModelData
103
107
  from .llm_parameters import LlmParameters
104
108
  from .local_eval import LocalEval
105
109
  from .local_eval_results import LocalEvalResults
@@ -108,6 +112,7 @@ from .managed_ingestion_status import ManagedIngestionStatus
108
112
  from .managed_ingestion_status_response import ManagedIngestionStatusResponse
109
113
  from .markdown_element_node_parser import MarkdownElementNodeParser
110
114
  from .markdown_node_parser import MarkdownNodeParser
115
+ from .message_annotation import MessageAnnotation
111
116
  from .message_role import MessageRole
112
117
  from .metadata_filter import MetadataFilter
113
118
  from .metadata_filter_value import MetadataFilterValue
@@ -119,6 +124,7 @@ from .none_chunking_config import NoneChunkingConfig
119
124
  from .none_segmentation_config import NoneSegmentationConfig
120
125
  from .object_type import ObjectType
121
126
  from .open_ai_embedding import OpenAiEmbedding
127
+ from .open_ai_embedding_config import OpenAiEmbeddingConfig
122
128
  from .organization import Organization
123
129
  from .organization_create import OrganizationCreate
124
130
  from .page_segmentation_config import PageSegmentationConfig
@@ -132,6 +138,15 @@ from .parsing_job_text_result import ParsingJobTextResult
132
138
  from .parsing_usage import ParsingUsage
133
139
  from .pipeline import Pipeline
134
140
  from .pipeline_create import PipelineCreate
141
+ from .pipeline_create_embedding_config import (
142
+ PipelineCreateEmbeddingConfig,
143
+ PipelineCreateEmbeddingConfig_AzureEmbedding,
144
+ PipelineCreateEmbeddingConfig_BedrockEmbedding,
145
+ PipelineCreateEmbeddingConfig_CohereEmbedding,
146
+ PipelineCreateEmbeddingConfig_GeminiEmbedding,
147
+ PipelineCreateEmbeddingConfig_HuggingfaceApiEmbedding,
148
+ PipelineCreateEmbeddingConfig_OpenaiEmbedding,
149
+ )
135
150
  from .pipeline_create_transform_config import (
136
151
  PipelineCreateTransformConfig,
137
152
  PipelineCreateTransformConfig_Advanced,
@@ -143,12 +158,27 @@ from .pipeline_data_source_component_one import PipelineDataSourceComponentOne
143
158
  from .pipeline_data_source_create import PipelineDataSourceCreate
144
159
  from .pipeline_data_source_custom_metadata_value import PipelineDataSourceCustomMetadataValue
145
160
  from .pipeline_deployment import PipelineDeployment
161
+ from .pipeline_embedding_config import (
162
+ PipelineEmbeddingConfig,
163
+ PipelineEmbeddingConfig_AzureEmbedding,
164
+ PipelineEmbeddingConfig_BedrockEmbedding,
165
+ PipelineEmbeddingConfig_CohereEmbedding,
166
+ PipelineEmbeddingConfig_GeminiEmbedding,
167
+ PipelineEmbeddingConfig_HuggingfaceApiEmbedding,
168
+ PipelineEmbeddingConfig_OpenaiEmbedding,
169
+ )
146
170
  from .pipeline_file import PipelineFile
147
171
  from .pipeline_file_create import PipelineFileCreate
148
172
  from .pipeline_file_create_custom_metadata_value import PipelineFileCreateCustomMetadataValue
149
173
  from .pipeline_file_custom_metadata_value import PipelineFileCustomMetadataValue
150
174
  from .pipeline_file_resource_info_value import PipelineFileResourceInfoValue
175
+ from .pipeline_transform_config import (
176
+ PipelineTransformConfig,
177
+ PipelineTransformConfig_Advanced,
178
+ PipelineTransformConfig_Auto,
179
+ )
151
180
  from .pipeline_type import PipelineType
181
+ from .playground_session import PlaygroundSession
152
182
  from .pooling import Pooling
153
183
  from .preset_retrieval_params import PresetRetrievalParams
154
184
  from .presigned_url import PresignedUrl
@@ -164,8 +194,8 @@ from .semantic_chunking_config import SemanticChunkingConfig
164
194
  from .sentence_chunking_config import SentenceChunkingConfig
165
195
  from .sentence_splitter import SentenceSplitter
166
196
  from .status_enum import StatusEnum
167
- from .supported_eval_llm_model import SupportedEvalLlmModel
168
- from .supported_eval_llm_model_names import SupportedEvalLlmModelNames
197
+ from .supported_llm_model import SupportedLlmModel
198
+ from .supported_llm_model_names import SupportedLlmModelNames
169
199
  from .text_node import TextNode
170
200
  from .text_node_relationships_value import TextNodeRelationshipsValue
171
201
  from .text_node_with_score import TextNodeWithScore
@@ -193,15 +223,19 @@ __all__ = [
193
223
  "AdvancedModeTransformConfigSegmentationConfig_Page",
194
224
  "AutoTransformConfig",
195
225
  "AzureOpenAiEmbedding",
226
+ "AzureOpenAiEmbeddingConfig",
196
227
  "Base",
197
228
  "BasePromptTemplate",
198
229
  "BedrockEmbedding",
230
+ "BedrockEmbeddingConfig",
231
+ "BoxAuthMechanism",
199
232
  "CharacterChunkingConfig",
200
233
  "CharacterSplitter",
201
234
  "ChatData",
202
235
  "ChatMessage",
203
236
  "CloudAzStorageBlobDataSource",
204
237
  "CloudAzureAiSearchVectorStore",
238
+ "CloudBoxDataSource",
205
239
  "CloudChromaVectorStore",
206
240
  "CloudConfluenceDataSource",
207
241
  "CloudDocument",
@@ -220,6 +254,7 @@ __all__ = [
220
254
  "CloudWeaviateVectorStore",
221
255
  "CodeSplitter",
222
256
  "CohereEmbedding",
257
+ "CohereEmbeddingConfig",
223
258
  "ConfigurableDataSinkNames",
224
259
  "ConfigurableDataSourceNames",
225
260
  "ConfigurableTransformationDefinition",
@@ -245,16 +280,11 @@ __all__ = [
245
280
  "DataSourceCustomMetadataValue",
246
281
  "DataSourceDefinition",
247
282
  "ElementSegmentationConfig",
248
- "EmbeddingConfig",
249
- "EmbeddingConfigComponent",
250
- "EmbeddingConfigComponentOne",
251
- "EmbeddingConfigType",
252
283
  "EvalDataset",
253
284
  "EvalDatasetJobParams",
254
285
  "EvalDatasetJobRecord",
255
286
  "EvalExecutionParams",
256
287
  "EvalExecutionParamsOverride",
257
- "EvalLlmModelData",
258
288
  "EvalQuestion",
259
289
  "EvalQuestionCreate",
260
290
  "EvalQuestionResult",
@@ -268,14 +298,18 @@ __all__ = [
268
298
  "FilterCondition",
269
299
  "FilterOperator",
270
300
  "GeminiEmbedding",
301
+ "GeminiEmbeddingConfig",
271
302
  "HttpValidationError",
272
303
  "HuggingFaceInferenceApiEmbedding",
304
+ "HuggingFaceInferenceApiEmbeddingConfig",
273
305
  "HuggingFaceInferenceApiEmbeddingToken",
274
306
  "IngestionErrorResponse",
307
+ "InputMessage",
275
308
  "JobNameMapping",
276
309
  "LlamaParseParameters",
277
310
  "LlamaParseSupportedFileExtensions",
278
311
  "Llm",
312
+ "LlmModelData",
279
313
  "LlmParameters",
280
314
  "LocalEval",
281
315
  "LocalEvalResults",
@@ -284,6 +318,7 @@ __all__ = [
284
318
  "ManagedIngestionStatusResponse",
285
319
  "MarkdownElementNodeParser",
286
320
  "MarkdownNodeParser",
321
+ "MessageAnnotation",
287
322
  "MessageRole",
288
323
  "MetadataFilter",
289
324
  "MetadataFilterValue",
@@ -295,6 +330,7 @@ __all__ = [
295
330
  "NoneSegmentationConfig",
296
331
  "ObjectType",
297
332
  "OpenAiEmbedding",
333
+ "OpenAiEmbeddingConfig",
298
334
  "Organization",
299
335
  "OrganizationCreate",
300
336
  "PageSegmentationConfig",
@@ -308,6 +344,13 @@ __all__ = [
308
344
  "ParsingUsage",
309
345
  "Pipeline",
310
346
  "PipelineCreate",
347
+ "PipelineCreateEmbeddingConfig",
348
+ "PipelineCreateEmbeddingConfig_AzureEmbedding",
349
+ "PipelineCreateEmbeddingConfig_BedrockEmbedding",
350
+ "PipelineCreateEmbeddingConfig_CohereEmbedding",
351
+ "PipelineCreateEmbeddingConfig_GeminiEmbedding",
352
+ "PipelineCreateEmbeddingConfig_HuggingfaceApiEmbedding",
353
+ "PipelineCreateEmbeddingConfig_OpenaiEmbedding",
311
354
  "PipelineCreateTransformConfig",
312
355
  "PipelineCreateTransformConfig_Advanced",
313
356
  "PipelineCreateTransformConfig_Auto",
@@ -317,12 +360,23 @@ __all__ = [
317
360
  "PipelineDataSourceCreate",
318
361
  "PipelineDataSourceCustomMetadataValue",
319
362
  "PipelineDeployment",
363
+ "PipelineEmbeddingConfig",
364
+ "PipelineEmbeddingConfig_AzureEmbedding",
365
+ "PipelineEmbeddingConfig_BedrockEmbedding",
366
+ "PipelineEmbeddingConfig_CohereEmbedding",
367
+ "PipelineEmbeddingConfig_GeminiEmbedding",
368
+ "PipelineEmbeddingConfig_HuggingfaceApiEmbedding",
369
+ "PipelineEmbeddingConfig_OpenaiEmbedding",
320
370
  "PipelineFile",
321
371
  "PipelineFileCreate",
322
372
  "PipelineFileCreateCustomMetadataValue",
323
373
  "PipelineFileCustomMetadataValue",
324
374
  "PipelineFileResourceInfoValue",
375
+ "PipelineTransformConfig",
376
+ "PipelineTransformConfig_Advanced",
377
+ "PipelineTransformConfig_Auto",
325
378
  "PipelineType",
379
+ "PlaygroundSession",
326
380
  "Pooling",
327
381
  "PresetRetrievalParams",
328
382
  "PresignedUrl",
@@ -338,8 +392,8 @@ __all__ = [
338
392
  "SentenceChunkingConfig",
339
393
  "SentenceSplitter",
340
394
  "StatusEnum",
341
- "SupportedEvalLlmModel",
342
- "SupportedEvalLlmModelNames",
395
+ "SupportedLlmModel",
396
+ "SupportedLlmModelNames",
343
397
  "TextNode",
344
398
  "TextNodeRelationshipsValue",
345
399
  "TextNodeWithScore",
@@ -4,8 +4,7 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
- from .embedding_config_component import EmbeddingConfigComponent
8
- from .embedding_config_type import EmbeddingConfigType
7
+ from .azure_open_ai_embedding import AzureOpenAiEmbedding
9
8
 
10
9
  try:
11
10
  import pydantic
@@ -16,10 +15,9 @@ except ImportError:
16
15
  import pydantic # type: ignore
17
16
 
18
17
 
19
- class EmbeddingConfig(pydantic.BaseModel):
20
- type: typing.Optional[EmbeddingConfigType] = pydantic.Field(description="Type of the embedding model.")
21
- component: typing.Optional[EmbeddingConfigComponent] = pydantic.Field(
22
- description="Configuration for the transformation."
18
+ class AzureOpenAiEmbeddingConfig(pydantic.BaseModel):
19
+ component: typing.Optional[AzureOpenAiEmbedding] = pydantic.Field(
20
+ description="Configuration for the Azure OpenAI embedding model."
23
21
  )
24
22
 
25
23
  def json(self, **kwargs: typing.Any) -> str:
@@ -0,0 +1,34 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .bedrock_embedding import BedrockEmbedding
8
+
9
+ try:
10
+ import pydantic
11
+ if pydantic.__version__.startswith("1."):
12
+ raise ImportError
13
+ import pydantic.v1 as pydantic # type: ignore
14
+ except ImportError:
15
+ import pydantic # type: ignore
16
+
17
+
18
+ class BedrockEmbeddingConfig(pydantic.BaseModel):
19
+ component: typing.Optional[BedrockEmbedding] = pydantic.Field(
20
+ description="Configuration for the Bedrock embedding model."
21
+ )
22
+
23
+ def json(self, **kwargs: typing.Any) -> str:
24
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
25
+ return super().json(**kwargs_with_defaults)
26
+
27
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
28
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
29
+ return super().dict(**kwargs_with_defaults)
30
+
31
+ class Config:
32
+ frozen = True
33
+ smart_union = True
34
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,21 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+ import typing
5
+
6
+ T_Result = typing.TypeVar("T_Result")
7
+
8
+
9
+ class BoxAuthMechanism(str, enum.Enum):
10
+ """
11
+ An enumeration.
12
+ """
13
+
14
+ DEVELOPER_TOKEN = "developer_token"
15
+ CCG = "ccg"
16
+
17
+ def visit(self, developer_token: typing.Callable[[], T_Result], ccg: typing.Callable[[], T_Result]) -> T_Result:
18
+ if self is BoxAuthMechanism.DEVELOPER_TOKEN:
19
+ return developer_token()
20
+ if self is BoxAuthMechanism.CCG:
21
+ return ccg()
@@ -22,7 +22,7 @@ class ChatData(pydantic.BaseModel):
22
22
  Comes with special serialization logic for types used commonly in platform codebase.
23
23
  """
24
24
 
25
- retrieval_parameters: PresetRetrievalParams
25
+ retrieval_parameters: typing.Optional[PresetRetrievalParams]
26
26
  llm_parameters: typing.Optional[LlmParameters]
27
27
  class_name: typing.Optional[str]
28
28
 
@@ -4,6 +4,7 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
+ from .message_annotation import MessageAnnotation
7
8
  from .message_role import MessageRole
8
9
 
9
10
  try:
@@ -17,12 +18,21 @@ except ImportError:
17
18
 
18
19
  class ChatMessage(pydantic.BaseModel):
19
20
  """
20
- Chat message.
21
+ Base schema model for BaseComponent classes used in the platform.
22
+ Comes with special serialization logic for types used commonly in platform codebase.
21
23
  """
22
24
 
23
- role: typing.Optional[MessageRole]
24
- content: typing.Optional[typing.Any]
25
- additional_kwargs: typing.Optional[typing.Dict[str, typing.Any]]
25
+ id: str
26
+ index: int = pydantic.Field(description="The index of the message in the chat.")
27
+ annotations: typing.Optional[typing.List[MessageAnnotation]] = pydantic.Field(
28
+ description="Retrieval annotations for the message."
29
+ )
30
+ role: MessageRole
31
+ content: typing.Optional[str] = pydantic.Field(description="Text content of the generation")
32
+ additional_kwargs: typing.Optional[typing.Dict[str, str]] = pydantic.Field(
33
+ description="Additional arguments passed to the model"
34
+ )
35
+ class_name: typing.Optional[str]
26
36
 
27
37
  def json(self, **kwargs: typing.Any) -> str:
28
38
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -26,6 +26,9 @@ class CloudAzureAiSearchVectorStore(pydantic.BaseModel):
26
26
  index_name: typing.Optional[str]
27
27
  filterable_metadata_field_keys: typing.Optional[typing.Dict[str, typing.Any]]
28
28
  embedding_dimension: typing.Optional[int]
29
+ client_id: typing.Optional[str]
30
+ client_secret: typing.Optional[str]
31
+ tenant_id: typing.Optional[str]
29
32
  class_name: typing.Optional[str]
30
33
 
31
34
  def json(self, **kwargs: typing.Any) -> str:
@@ -0,0 +1,51 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .box_auth_mechanism import BoxAuthMechanism
8
+
9
+ try:
10
+ import pydantic
11
+ if pydantic.__version__.startswith("1."):
12
+ raise ImportError
13
+ import pydantic.v1 as pydantic # type: ignore
14
+ except ImportError:
15
+ import pydantic # type: ignore
16
+
17
+
18
+ class CloudBoxDataSource(pydantic.BaseModel):
19
+ """
20
+ Base component object to capture class names.
21
+ """
22
+
23
+ folder_id: typing.Optional[str] = pydantic.Field(description="The ID of the Box folder to read from.")
24
+ authentication_mechanism: BoxAuthMechanism = pydantic.Field(
25
+ description="The type of authentication to use (Developer Token or CCG)"
26
+ )
27
+ developer_token: typing.Optional[str] = pydantic.Field(
28
+ description="Developer token for authentication if authentication_mechanism is 'developer_token'."
29
+ )
30
+ client_id: typing.Optional[str] = pydantic.Field(
31
+ description="Box API key used for identifying the application the user is authenticating with"
32
+ )
33
+ client_secret: typing.Optional[str] = pydantic.Field(description="Box API secret used for making auth requests.")
34
+ user_id: typing.Optional[str] = pydantic.Field(description="Box User ID, if provided authenticates as user.")
35
+ enterprise_id: typing.Optional[str] = pydantic.Field(
36
+ description="Box Enterprise ID, if provided authenticates as service."
37
+ )
38
+ class_name: typing.Optional[str]
39
+
40
+ def json(self, **kwargs: typing.Any) -> str:
41
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
42
+ return super().json(**kwargs_with_defaults)
43
+
44
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
45
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
46
+ return super().dict(**kwargs_with_defaults)
47
+
48
+ class Config:
49
+ frozen = True
50
+ smart_union = True
51
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -23,6 +23,9 @@ class CloudDocument(pydantic.BaseModel):
23
23
  metadata: typing.Dict[str, typing.Any]
24
24
  excluded_embed_metadata_keys: typing.Optional[typing.List[str]]
25
25
  excluded_llm_metadata_keys: typing.Optional[typing.List[str]]
26
+ page_positions: typing.Optional[typing.List[int]] = pydantic.Field(
27
+ description="indices in the CloudDocument.text where a new page begins. e.g. Second page starts at index specified by page_positions[1]."
28
+ )
26
29
  id: str
27
30
 
28
31
  def json(self, **kwargs: typing.Any) -> str:
@@ -23,6 +23,9 @@ class CloudDocumentCreate(pydantic.BaseModel):
23
23
  metadata: typing.Dict[str, typing.Any]
24
24
  excluded_embed_metadata_keys: typing.Optional[typing.List[str]]
25
25
  excluded_llm_metadata_keys: typing.Optional[typing.List[str]]
26
+ page_positions: typing.Optional[typing.List[int]] = pydantic.Field(
27
+ description="indices in the CloudDocument.text where a new page begins. e.g. Second page starts at index specified by page_positions[1]."
28
+ )
26
29
  id: typing.Optional[str]
27
30
 
28
31
  def json(self, **kwargs: typing.Any) -> str:
@@ -19,7 +19,8 @@ class CloudSharepointDataSource(pydantic.BaseModel):
19
19
  Base component object to capture class names.
20
20
  """
21
21
 
22
- site_name: str = pydantic.Field(description="The name of the SharePoint site to download from.")
22
+ site_name: typing.Optional[str] = pydantic.Field(description="The name of the SharePoint site to download from.")
23
+ site_id: typing.Optional[str] = pydantic.Field(description="The ID of the SharePoint site to download from.")
23
24
  folder_path: typing.Optional[str] = pydantic.Field(description="The path of the Sharepoint folder to read from.")
24
25
  folder_id: typing.Optional[str] = pydantic.Field(description="The ID of the Sharepoint folder to read from.")
25
26
  drive_name: typing.Optional[str] = pydantic.Field(description="The name of the Sharepoint drive to read from.")
@@ -0,0 +1,34 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .cohere_embedding import CohereEmbedding
8
+
9
+ try:
10
+ import pydantic
11
+ if pydantic.__version__.startswith("1."):
12
+ raise ImportError
13
+ import pydantic.v1 as pydantic # type: ignore
14
+ except ImportError:
15
+ import pydantic # type: ignore
16
+
17
+
18
+ class CohereEmbeddingConfig(pydantic.BaseModel):
19
+ component: typing.Optional[CohereEmbedding] = pydantic.Field(
20
+ description="Configuration for the Cohere embedding model."
21
+ )
22
+
23
+ def json(self, **kwargs: typing.Any) -> str:
24
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
25
+ return super().json(**kwargs_with_defaults)
26
+
27
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
28
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
29
+ return super().dict(**kwargs_with_defaults)
30
+
31
+ class Config:
32
+ frozen = True
33
+ smart_union = True
34
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -19,6 +19,7 @@ class ConfigurableDataSourceNames(str, enum.Enum):
19
19
  NOTION_PAGE = "NOTION_PAGE"
20
20
  CONFLUENCE = "CONFLUENCE"
21
21
  JIRA = "JIRA"
22
+ BOX = "BOX"
22
23
 
23
24
  def visit(
24
25
  self,
@@ -30,6 +31,7 @@ class ConfigurableDataSourceNames(str, enum.Enum):
30
31
  notion_page: typing.Callable[[], T_Result],
31
32
  confluence: typing.Callable[[], T_Result],
32
33
  jira: typing.Callable[[], T_Result],
34
+ box: typing.Callable[[], T_Result],
33
35
  ) -> T_Result:
34
36
  if self is ConfigurableDataSourceNames.S_3:
35
37
  return s_3()
@@ -47,3 +49,5 @@ class ConfigurableDataSourceNames(str, enum.Enum):
47
49
  return confluence()
48
50
  if self is ConfigurableDataSourceNames.JIRA:
49
51
  return jira()
52
+ if self is ConfigurableDataSourceNames.BOX:
53
+ return box()
@@ -33,9 +33,6 @@ class CustomClaims(pydantic.BaseModel):
33
33
  usage_index_max_files_per_pipeline: typing.Optional[int] = pydantic.Field(
34
34
  description="The maximum number of files per pipeline the user can index without LlamaParse premium."
35
35
  )
36
- max_jobs_in_execution: typing.Optional[int] = pydantic.Field(
37
- description="The maximum number of jobs the user can have in execution."
38
- )
39
36
  max_jobs_in_execution_per_job_type: typing.Optional[int] = pydantic.Field(
40
37
  description="The maximum number of jobs the user can have in execution per job type."
41
38
  )
@@ -3,6 +3,7 @@
3
3
  import typing
4
4
 
5
5
  from .cloud_az_storage_blob_data_source import CloudAzStorageBlobDataSource
6
+ from .cloud_box_data_source import CloudBoxDataSource
6
7
  from .cloud_confluence_data_source import CloudConfluenceDataSource
7
8
  from .cloud_jira_data_source import CloudJiraDataSource
8
9
  from .cloud_notion_page_data_source import CloudNotionPageDataSource
@@ -20,4 +21,5 @@ DataSourceComponentOne = typing.Union[
20
21
  CloudNotionPageDataSource,
21
22
  CloudConfluenceDataSource,
22
23
  CloudJiraDataSource,
24
+ CloudBoxDataSource,
23
25
  ]
@@ -3,6 +3,7 @@
3
3
  import typing
4
4
 
5
5
  from .cloud_az_storage_blob_data_source import CloudAzStorageBlobDataSource
6
+ from .cloud_box_data_source import CloudBoxDataSource
6
7
  from .cloud_confluence_data_source import CloudConfluenceDataSource
7
8
  from .cloud_jira_data_source import CloudJiraDataSource
8
9
  from .cloud_notion_page_data_source import CloudNotionPageDataSource
@@ -20,4 +21,5 @@ DataSourceCreateComponentOne = typing.Union[
20
21
  CloudNotionPageDataSource,
21
22
  CloudConfluenceDataSource,
22
23
  CloudJiraDataSource,
24
+ CloudBoxDataSource,
23
25
  ]
@@ -4,7 +4,7 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
- from .supported_eval_llm_model_names import SupportedEvalLlmModelNames
7
+ from .supported_llm_model_names import SupportedLlmModelNames
8
8
 
9
9
  try:
10
10
  import pydantic
@@ -20,7 +20,7 @@ class EvalExecutionParams(pydantic.BaseModel):
20
20
  Schema for the params for an eval execution.
21
21
  """
22
22
 
23
- llm_model: typing.Optional[SupportedEvalLlmModelNames] = pydantic.Field(
23
+ llm_model: typing.Optional[SupportedLlmModelNames] = pydantic.Field(
24
24
  description="The LLM model to use within eval execution."
25
25
  )
26
26
  qa_prompt_tmpl: typing.Optional[str] = pydantic.Field(
@@ -4,7 +4,7 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
- from .supported_eval_llm_model_names import SupportedEvalLlmModelNames
7
+ from .supported_llm_model_names import SupportedLlmModelNames
8
8
 
9
9
  try:
10
10
  import pydantic
@@ -20,7 +20,7 @@ class EvalExecutionParamsOverride(pydantic.BaseModel):
20
20
  Schema for the params override for an eval execution.
21
21
  """
22
22
 
23
- llm_model: typing.Optional[SupportedEvalLlmModelNames] = pydantic.Field(
23
+ llm_model: typing.Optional[SupportedLlmModelNames] = pydantic.Field(
24
24
  description="The LLM model to use within eval execution."
25
25
  )
26
26
  qa_prompt_tmpl: typing.Optional[str] = pydantic.Field(
@@ -23,6 +23,7 @@ class FilterOperator(str, enum.Enum):
23
23
  ALL = "all"
24
24
  TEXT_MATCH = "text_match"
25
25
  CONTAINS = "contains"
26
+ IS_EMPTY = "is_empty"
26
27
 
27
28
  def visit(
28
29
  self,
@@ -38,6 +39,7 @@ class FilterOperator(str, enum.Enum):
38
39
  all: typing.Callable[[], T_Result],
39
40
  text_match: typing.Callable[[], T_Result],
40
41
  contains: typing.Callable[[], T_Result],
42
+ is_empty: typing.Callable[[], T_Result],
41
43
  ) -> T_Result:
42
44
  if self is FilterOperator.EQUAL_TO:
43
45
  return equal_to()
@@ -63,3 +65,5 @@ class FilterOperator(str, enum.Enum):
63
65
  return text_match()
64
66
  if self is FilterOperator.CONTAINS:
65
67
  return contains()
68
+ if self is FilterOperator.IS_EMPTY:
69
+ return is_empty()